aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-04-30 11:45:48 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-30 11:45:48 -0400
commit95dfec6ae1cb8c03406aac612a5642cbddb676b3 (patch)
tree978de715f45de94a8e79eb08a08ca5fb9dfd9dea /drivers
parentae3a0064e6d69068b1c9fd075095da062430bda9 (diff)
parent159131149c2f56c1da5ae5e23ab9d5acef4916d1 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (53 commits) tcp: Overflow bug in Vegas [IPv4] UFO: prevent generation of chained skb destined to UFO device iwlwifi: move the selects to the tristate drivers ipv4: annotate a few functions __init in ipconfig.c atm: ambassador: vcc_sf semaphore to mutex MAINTAINERS: The socketcan-core list is subscribers-only. netfilter: nf_conntrack: padding breaks conntrack hash on ARM ipv4: Update MTU to all related cache entries in ip_rt_frag_needed() sch_sfq: use del_timer_sync() in sfq_destroy() net: Add compat support for getsockopt (MCAST_MSFILTER) net: Several cleanups for the setsockopt compat support. ipvs: fix oops in backup for fwmark conn templates bridge: kernel panic when unloading bridge module bridge: fix error handling in br_add_if() netfilter: {nfnetlink,ip,ip6}_queue: fix skb_over_panic when enlarging packets netfilter: x_tables: fix net namespace leak when reading /proc/net/xxx_tables_names netfilter: xt_TCPOPTSTRIP: signed tcphoff for ipv6_skip_exthdr() retval tcp: Limit cwnd growth when deferring for GSO tcp: Allow send-limited cwnd to grow up to max_burst when gso disabled [netdrvr] gianfar: Determine TBIPA value dynamically ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/atm/ambassador.c19
-rw-r--r--drivers/atm/ambassador.h2
-rw-r--r--drivers/net/3c505.c30
-rw-r--r--drivers/net/3c505.h1
-rw-r--r--drivers/net/3c509.c47
-rw-r--r--drivers/net/3c515.c64
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/arm/Kconfig8
-rw-r--r--drivers/net/arm/Makefile1
-rw-r--r--drivers/net/arm/ixp4xx_eth.c1265
-rw-r--r--drivers/net/bfin_mac.c296
-rw-r--r--drivers/net/bfin_mac.h2
-rw-r--r--drivers/net/eepro.c2
-rw-r--r--drivers/net/gianfar.c27
-rw-r--r--drivers/net/gianfar.h1
-rw-r--r--drivers/net/gianfar_mii.c38
-rw-r--r--drivers/net/gianfar_mii.h3
-rw-r--r--drivers/net/phy/Kconfig2
-rw-r--r--drivers/net/phy/phy_device.c2
-rw-r--r--drivers/net/phy/smsc.c83
-rw-r--r--drivers/net/r8169.c8
-rw-r--r--drivers/net/s2io.c337
-rw-r--r--drivers/net/s2io.h82
-rw-r--r--drivers/net/sfc/Kconfig12
-rw-r--r--drivers/net/sfc/Makefile5
-rw-r--r--drivers/net/sfc/bitfield.h508
-rw-r--r--drivers/net/sfc/boards.c167
-rw-r--r--drivers/net/sfc/boards.h26
-rw-r--r--drivers/net/sfc/efx.c2208
-rw-r--r--drivers/net/sfc/efx.h67
-rw-r--r--drivers/net/sfc/enum.h50
-rw-r--r--drivers/net/sfc/ethtool.c460
-rw-r--r--drivers/net/sfc/ethtool.h27
-rw-r--r--drivers/net/sfc/falcon.c2722
-rw-r--r--drivers/net/sfc/falcon.h130
-rw-r--r--drivers/net/sfc/falcon_hwdefs.h1135
-rw-r--r--drivers/net/sfc/falcon_io.h243
-rw-r--r--drivers/net/sfc/falcon_xmac.c585
-rw-r--r--drivers/net/sfc/gmii.h195
-rw-r--r--drivers/net/sfc/i2c-direct.c381
-rw-r--r--drivers/net/sfc/i2c-direct.h91
-rw-r--r--drivers/net/sfc/mac.h33
-rw-r--r--drivers/net/sfc/mdio_10g.c282
-rw-r--r--drivers/net/sfc/mdio_10g.h232
-rw-r--r--drivers/net/sfc/net_driver.h883
-rw-r--r--drivers/net/sfc/phy.h48
-rw-r--r--drivers/net/sfc/rx.c875
-rw-r--r--drivers/net/sfc/rx.h29
-rw-r--r--drivers/net/sfc/sfe4001.c252
-rw-r--r--drivers/net/sfc/spi.h71
-rw-r--r--drivers/net/sfc/tenxpress.c434
-rw-r--r--drivers/net/sfc/tx.c452
-rw-r--r--drivers/net/sfc/tx.h24
-rw-r--r--drivers/net/sfc/workarounds.h56
-rw-r--r--drivers/net/sfc/xenpack.h62
-rw-r--r--drivers/net/sfc/xfp_phy.c132
-rw-r--r--drivers/net/sis190.c136
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig12
-rw-r--r--drivers/s390/cio/ccwgroup.c96
-rw-r--r--drivers/s390/net/cu3088.c20
-rw-r--r--drivers/s390/net/lcs.c3
-rw-r--r--drivers/s390/net/netiucv.c3
-rw-r--r--drivers/s390/net/qeth_core.h50
-rw-r--r--drivers/s390/net/qeth_core_main.c200
-rw-r--r--drivers/s390/net/qeth_l2_main.c30
-rw-r--r--drivers/s390/net/qeth_l3.h3
-rw-r--r--drivers/s390/net/qeth_l3_main.c30
68 files changed, 15086 insertions, 697 deletions
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index 5aa12b011a9a..6adb72a2f876 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -33,6 +33,7 @@
33#include <linux/interrupt.h> 33#include <linux/interrupt.h>
34#include <linux/poison.h> 34#include <linux/poison.h>
35#include <linux/bitrev.h> 35#include <linux/bitrev.h>
36#include <linux/mutex.h>
36 37
37#include <asm/atomic.h> 38#include <asm/atomic.h>
38#include <asm/io.h> 39#include <asm/io.h>
@@ -1177,7 +1178,7 @@ static int amb_open (struct atm_vcc * atm_vcc)
1177 1178
1178 vcc->tx_frame_bits = tx_frame_bits; 1179 vcc->tx_frame_bits = tx_frame_bits;
1179 1180
1180 down (&dev->vcc_sf); 1181 mutex_lock(&dev->vcc_sf);
1181 if (dev->rxer[vci]) { 1182 if (dev->rxer[vci]) {
1182 // RXer on the channel already, just modify rate... 1183 // RXer on the channel already, just modify rate...
1183 cmd.request = cpu_to_be32 (SRB_MODIFY_VC_RATE); 1184 cmd.request = cpu_to_be32 (SRB_MODIFY_VC_RATE);
@@ -1203,7 +1204,7 @@ static int amb_open (struct atm_vcc * atm_vcc)
1203 schedule(); 1204 schedule();
1204 } 1205 }
1205 dev->txer[vci].tx_present = 1; 1206 dev->txer[vci].tx_present = 1;
1206 up (&dev->vcc_sf); 1207 mutex_unlock(&dev->vcc_sf);
1207 } 1208 }
1208 1209
1209 if (rxtp->traffic_class != ATM_NONE) { 1210 if (rxtp->traffic_class != ATM_NONE) {
@@ -1211,7 +1212,7 @@ static int amb_open (struct atm_vcc * atm_vcc)
1211 1212
1212 vcc->rx_info.pool = pool; 1213 vcc->rx_info.pool = pool;
1213 1214
1214 down (&dev->vcc_sf); 1215 mutex_lock(&dev->vcc_sf);
1215 /* grow RX buffer pool */ 1216 /* grow RX buffer pool */
1216 if (!dev->rxq[pool].buffers_wanted) 1217 if (!dev->rxq[pool].buffers_wanted)
1217 dev->rxq[pool].buffers_wanted = rx_lats; 1218 dev->rxq[pool].buffers_wanted = rx_lats;
@@ -1237,7 +1238,7 @@ static int amb_open (struct atm_vcc * atm_vcc)
1237 schedule(); 1238 schedule();
1238 // this link allows RX frames through 1239 // this link allows RX frames through
1239 dev->rxer[vci] = atm_vcc; 1240 dev->rxer[vci] = atm_vcc;
1240 up (&dev->vcc_sf); 1241 mutex_unlock(&dev->vcc_sf);
1241 } 1242 }
1242 1243
1243 // indicate readiness 1244 // indicate readiness
@@ -1262,7 +1263,7 @@ static void amb_close (struct atm_vcc * atm_vcc) {
1262 if (atm_vcc->qos.txtp.traffic_class != ATM_NONE) { 1263 if (atm_vcc->qos.txtp.traffic_class != ATM_NONE) {
1263 command cmd; 1264 command cmd;
1264 1265
1265 down (&dev->vcc_sf); 1266 mutex_lock(&dev->vcc_sf);
1266 if (dev->rxer[vci]) { 1267 if (dev->rxer[vci]) {
1267 // RXer still on the channel, just modify rate... XXX not really needed 1268 // RXer still on the channel, just modify rate... XXX not really needed
1268 cmd.request = cpu_to_be32 (SRB_MODIFY_VC_RATE); 1269 cmd.request = cpu_to_be32 (SRB_MODIFY_VC_RATE);
@@ -1277,7 +1278,7 @@ static void amb_close (struct atm_vcc * atm_vcc) {
1277 dev->txer[vci].tx_present = 0; 1278 dev->txer[vci].tx_present = 0;
1278 while (command_do (dev, &cmd)) 1279 while (command_do (dev, &cmd))
1279 schedule(); 1280 schedule();
1280 up (&dev->vcc_sf); 1281 mutex_unlock(&dev->vcc_sf);
1281 } 1282 }
1282 1283
1283 // disable RXing 1284 // disable RXing
@@ -1287,7 +1288,7 @@ static void amb_close (struct atm_vcc * atm_vcc) {
1287 // this is (the?) one reason why we need the amb_vcc struct 1288 // this is (the?) one reason why we need the amb_vcc struct
1288 unsigned char pool = vcc->rx_info.pool; 1289 unsigned char pool = vcc->rx_info.pool;
1289 1290
1290 down (&dev->vcc_sf); 1291 mutex_lock(&dev->vcc_sf);
1291 if (dev->txer[vci].tx_present) { 1292 if (dev->txer[vci].tx_present) {
1292 // TXer still on the channel, just go to pool zero XXX not really needed 1293 // TXer still on the channel, just go to pool zero XXX not really needed
1293 cmd.request = cpu_to_be32 (SRB_MODIFY_VC_FLAGS); 1294 cmd.request = cpu_to_be32 (SRB_MODIFY_VC_FLAGS);
@@ -1314,7 +1315,7 @@ static void amb_close (struct atm_vcc * atm_vcc) {
1314 dev->rxq[pool].buffers_wanted = 0; 1315 dev->rxq[pool].buffers_wanted = 0;
1315 drain_rx_pool (dev, pool); 1316 drain_rx_pool (dev, pool);
1316 } 1317 }
1317 up (&dev->vcc_sf); 1318 mutex_unlock(&dev->vcc_sf);
1318 } 1319 }
1319 1320
1320 // free our structure 1321 // free our structure
@@ -2188,7 +2189,7 @@ static void setup_dev(amb_dev *dev, struct pci_dev *pci_dev)
2188 2189
2189 // semaphore for txer/rxer modifications - we cannot use a 2190 // semaphore for txer/rxer modifications - we cannot use a
2190 // spinlock as the critical region needs to switch processes 2191 // spinlock as the critical region needs to switch processes
2191 init_MUTEX (&dev->vcc_sf); 2192 mutex_init(&dev->vcc_sf);
2192 // queue manipulation spinlocks; we want atomic reads and 2193 // queue manipulation spinlocks; we want atomic reads and
2193 // writes to the queue descriptors (handles IRQ and SMP) 2194 // writes to the queue descriptors (handles IRQ and SMP)
2194 // consider replacing "int pending" -> "atomic_t available" 2195 // consider replacing "int pending" -> "atomic_t available"
diff --git a/drivers/atm/ambassador.h b/drivers/atm/ambassador.h
index ff2a303cbe00..df55fa8387dc 100644
--- a/drivers/atm/ambassador.h
+++ b/drivers/atm/ambassador.h
@@ -638,7 +638,7 @@ struct amb_dev {
638 amb_txq txq; 638 amb_txq txq;
639 amb_rxq rxq[NUM_RX_POOLS]; 639 amb_rxq rxq[NUM_RX_POOLS];
640 640
641 struct semaphore vcc_sf; 641 struct mutex vcc_sf;
642 amb_tx_info txer[NUM_VCS]; 642 amb_tx_info txer[NUM_VCS];
643 struct atm_vcc * rxer[NUM_VCS]; 643 struct atm_vcc * rxer[NUM_VCS];
644 unsigned int tx_avail; 644 unsigned int tx_avail;
diff --git a/drivers/net/3c505.c b/drivers/net/3c505.c
index 9c6573419f5a..fdfb2b2cb734 100644
--- a/drivers/net/3c505.c
+++ b/drivers/net/3c505.c
@@ -670,7 +670,7 @@ static irqreturn_t elp_interrupt(int irq, void *dev_id)
670 memcpy(adapter->current_dma.target, adapter->dma_buffer, adapter->current_dma.length); 670 memcpy(adapter->current_dma.target, adapter->dma_buffer, adapter->current_dma.length);
671 } 671 }
672 skb->protocol = eth_type_trans(skb,dev); 672 skb->protocol = eth_type_trans(skb,dev);
673 adapter->stats.rx_bytes += skb->len; 673 dev->stats.rx_bytes += skb->len;
674 netif_rx(skb); 674 netif_rx(skb);
675 dev->last_rx = jiffies; 675 dev->last_rx = jiffies;
676 } 676 }
@@ -773,12 +773,12 @@ static irqreturn_t elp_interrupt(int irq, void *dev_id)
773 * received board statistics 773 * received board statistics
774 */ 774 */
775 case CMD_NETWORK_STATISTICS_RESPONSE: 775 case CMD_NETWORK_STATISTICS_RESPONSE:
776 adapter->stats.rx_packets += adapter->irx_pcb.data.netstat.tot_recv; 776 dev->stats.rx_packets += adapter->irx_pcb.data.netstat.tot_recv;
777 adapter->stats.tx_packets += adapter->irx_pcb.data.netstat.tot_xmit; 777 dev->stats.tx_packets += adapter->irx_pcb.data.netstat.tot_xmit;
778 adapter->stats.rx_crc_errors += adapter->irx_pcb.data.netstat.err_CRC; 778 dev->stats.rx_crc_errors += adapter->irx_pcb.data.netstat.err_CRC;
779 adapter->stats.rx_frame_errors += adapter->irx_pcb.data.netstat.err_align; 779 dev->stats.rx_frame_errors += adapter->irx_pcb.data.netstat.err_align;
780 adapter->stats.rx_fifo_errors += adapter->irx_pcb.data.netstat.err_ovrrun; 780 dev->stats.rx_fifo_errors += adapter->irx_pcb.data.netstat.err_ovrrun;
781 adapter->stats.rx_over_errors += adapter->irx_pcb.data.netstat.err_res; 781 dev->stats.rx_over_errors += adapter->irx_pcb.data.netstat.err_res;
782 adapter->got[CMD_NETWORK_STATISTICS] = 1; 782 adapter->got[CMD_NETWORK_STATISTICS] = 1;
783 if (elp_debug >= 3) 783 if (elp_debug >= 3)
784 printk(KERN_DEBUG "%s: interrupt - statistics response received\n", dev->name); 784 printk(KERN_DEBUG "%s: interrupt - statistics response received\n", dev->name);
@@ -794,11 +794,11 @@ static irqreturn_t elp_interrupt(int irq, void *dev_id)
794 break; 794 break;
795 switch (adapter->irx_pcb.data.xmit_resp.c_stat) { 795 switch (adapter->irx_pcb.data.xmit_resp.c_stat) {
796 case 0xffff: 796 case 0xffff:
797 adapter->stats.tx_aborted_errors++; 797 dev->stats.tx_aborted_errors++;
798 printk(KERN_INFO "%s: transmit timed out, network cable problem?\n", dev->name); 798 printk(KERN_INFO "%s: transmit timed out, network cable problem?\n", dev->name);
799 break; 799 break;
800 case 0xfffe: 800 case 0xfffe:
801 adapter->stats.tx_fifo_errors++; 801 dev->stats.tx_fifo_errors++;
802 printk(KERN_INFO "%s: transmit timed out, FIFO underrun\n", dev->name); 802 printk(KERN_INFO "%s: transmit timed out, FIFO underrun\n", dev->name);
803 break; 803 break;
804 } 804 }
@@ -986,7 +986,7 @@ static bool send_packet(struct net_device *dev, struct sk_buff *skb)
986 return false; 986 return false;
987 } 987 }
988 988
989 adapter->stats.tx_bytes += nlen; 989 dev->stats.tx_bytes += nlen;
990 990
991 /* 991 /*
992 * send the adapter a transmit packet command. Ignore segment and offset 992 * send the adapter a transmit packet command. Ignore segment and offset
@@ -1041,7 +1041,6 @@ static bool send_packet(struct net_device *dev, struct sk_buff *skb)
1041 1041
1042static void elp_timeout(struct net_device *dev) 1042static void elp_timeout(struct net_device *dev)
1043{ 1043{
1044 elp_device *adapter = dev->priv;
1045 int stat; 1044 int stat;
1046 1045
1047 stat = inb_status(dev->base_addr); 1046 stat = inb_status(dev->base_addr);
@@ -1049,7 +1048,7 @@ static void elp_timeout(struct net_device *dev)
1049 if (elp_debug >= 1) 1048 if (elp_debug >= 1)
1050 printk(KERN_DEBUG "%s: status %#02x\n", dev->name, stat); 1049 printk(KERN_DEBUG "%s: status %#02x\n", dev->name, stat);
1051 dev->trans_start = jiffies; 1050 dev->trans_start = jiffies;
1052 adapter->stats.tx_dropped++; 1051 dev->stats.tx_dropped++;
1053 netif_wake_queue(dev); 1052 netif_wake_queue(dev);
1054} 1053}
1055 1054
@@ -1113,7 +1112,7 @@ static struct net_device_stats *elp_get_stats(struct net_device *dev)
1113 /* If the device is closed, just return the latest stats we have, 1112 /* If the device is closed, just return the latest stats we have,
1114 - we cannot ask from the adapter without interrupts */ 1113 - we cannot ask from the adapter without interrupts */
1115 if (!netif_running(dev)) 1114 if (!netif_running(dev))
1116 return &adapter->stats; 1115 return &dev->stats;
1117 1116
1118 /* send a get statistics command to the board */ 1117 /* send a get statistics command to the board */
1119 adapter->tx_pcb.command = CMD_NETWORK_STATISTICS; 1118 adapter->tx_pcb.command = CMD_NETWORK_STATISTICS;
@@ -1126,12 +1125,12 @@ static struct net_device_stats *elp_get_stats(struct net_device *dev)
1126 while (adapter->got[CMD_NETWORK_STATISTICS] == 0 && time_before(jiffies, timeout)); 1125 while (adapter->got[CMD_NETWORK_STATISTICS] == 0 && time_before(jiffies, timeout));
1127 if (time_after_eq(jiffies, timeout)) { 1126 if (time_after_eq(jiffies, timeout)) {
1128 TIMEOUT_MSG(__LINE__); 1127 TIMEOUT_MSG(__LINE__);
1129 return &adapter->stats; 1128 return &dev->stats;
1130 } 1129 }
1131 } 1130 }
1132 1131
1133 /* statistics are now up to date */ 1132 /* statistics are now up to date */
1134 return &adapter->stats; 1133 return &dev->stats;
1135} 1134}
1136 1135
1137 1136
@@ -1571,7 +1570,6 @@ static int __init elplus_setup(struct net_device *dev)
1571 dev->set_multicast_list = elp_set_mc_list; /* local */ 1570 dev->set_multicast_list = elp_set_mc_list; /* local */
1572 dev->ethtool_ops = &netdev_ethtool_ops; /* local */ 1571 dev->ethtool_ops = &netdev_ethtool_ops; /* local */
1573 1572
1574 memset(&(adapter->stats), 0, sizeof(struct net_device_stats));
1575 dev->mem_start = dev->mem_end = 0; 1573 dev->mem_start = dev->mem_end = 0;
1576 1574
1577 err = register_netdev(dev); 1575 err = register_netdev(dev);
diff --git a/drivers/net/3c505.h b/drivers/net/3c505.h
index 1910cb1dc787..04df2a9002b6 100644
--- a/drivers/net/3c505.h
+++ b/drivers/net/3c505.h
@@ -264,7 +264,6 @@ typedef struct {
264 pcb_struct rx_pcb; /* PCB for foreground receiving */ 264 pcb_struct rx_pcb; /* PCB for foreground receiving */
265 pcb_struct itx_pcb; /* PCB for background sending */ 265 pcb_struct itx_pcb; /* PCB for background sending */
266 pcb_struct irx_pcb; /* PCB for background receiving */ 266 pcb_struct irx_pcb; /* PCB for background receiving */
267 struct net_device_stats stats;
268 267
269 void *dma_buffer; 268 void *dma_buffer;
270 269
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index 54dac0696d91..e6c545fe5f58 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -167,7 +167,6 @@ enum RxFilter {
167enum el3_cardtype { EL3_ISA, EL3_PNP, EL3_MCA, EL3_EISA }; 167enum el3_cardtype { EL3_ISA, EL3_PNP, EL3_MCA, EL3_EISA };
168 168
169struct el3_private { 169struct el3_private {
170 struct net_device_stats stats;
171 spinlock_t lock; 170 spinlock_t lock;
172 /* skb send-queue */ 171 /* skb send-queue */
173 int head, size; 172 int head, size;
@@ -794,7 +793,6 @@ el3_open(struct net_device *dev)
794static void 793static void
795el3_tx_timeout (struct net_device *dev) 794el3_tx_timeout (struct net_device *dev)
796{ 795{
797 struct el3_private *lp = netdev_priv(dev);
798 int ioaddr = dev->base_addr; 796 int ioaddr = dev->base_addr;
799 797
800 /* Transmitter timeout, serious problems. */ 798 /* Transmitter timeout, serious problems. */
@@ -802,7 +800,7 @@ el3_tx_timeout (struct net_device *dev)
802 "Tx FIFO room %d.\n", 800 "Tx FIFO room %d.\n",
803 dev->name, inb(ioaddr + TX_STATUS), inw(ioaddr + EL3_STATUS), 801 dev->name, inb(ioaddr + TX_STATUS), inw(ioaddr + EL3_STATUS),
804 inw(ioaddr + TX_FREE)); 802 inw(ioaddr + TX_FREE));
805 lp->stats.tx_errors++; 803 dev->stats.tx_errors++;
806 dev->trans_start = jiffies; 804 dev->trans_start = jiffies;
807 /* Issue TX_RESET and TX_START commands. */ 805 /* Issue TX_RESET and TX_START commands. */
808 outw(TxReset, ioaddr + EL3_CMD); 806 outw(TxReset, ioaddr + EL3_CMD);
@@ -820,7 +818,7 @@ el3_start_xmit(struct sk_buff *skb, struct net_device *dev)
820 818
821 netif_stop_queue (dev); 819 netif_stop_queue (dev);
822 820
823 lp->stats.tx_bytes += skb->len; 821 dev->stats.tx_bytes += skb->len;
824 822
825 if (el3_debug > 4) { 823 if (el3_debug > 4) {
826 printk(KERN_DEBUG "%s: el3_start_xmit(length = %u) called, status %4.4x.\n", 824 printk(KERN_DEBUG "%s: el3_start_xmit(length = %u) called, status %4.4x.\n",
@@ -881,7 +879,7 @@ el3_start_xmit(struct sk_buff *skb, struct net_device *dev)
881 int i = 4; 879 int i = 4;
882 880
883 while (--i > 0 && (tx_status = inb(ioaddr + TX_STATUS)) > 0) { 881 while (--i > 0 && (tx_status = inb(ioaddr + TX_STATUS)) > 0) {
884 if (tx_status & 0x38) lp->stats.tx_aborted_errors++; 882 if (tx_status & 0x38) dev->stats.tx_aborted_errors++;
885 if (tx_status & 0x30) outw(TxReset, ioaddr + EL3_CMD); 883 if (tx_status & 0x30) outw(TxReset, ioaddr + EL3_CMD);
886 if (tx_status & 0x3C) outw(TxEnable, ioaddr + EL3_CMD); 884 if (tx_status & 0x3C) outw(TxEnable, ioaddr + EL3_CMD);
887 outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */ 885 outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */
@@ -931,12 +929,11 @@ el3_interrupt(int irq, void *dev_id)
931 outw(AckIntr | RxEarly, ioaddr + EL3_CMD); 929 outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
932 } 930 }
933 if (status & TxComplete) { /* Really Tx error. */ 931 if (status & TxComplete) { /* Really Tx error. */
934 struct el3_private *lp = netdev_priv(dev);
935 short tx_status; 932 short tx_status;
936 int i = 4; 933 int i = 4;
937 934
938 while (--i>0 && (tx_status = inb(ioaddr + TX_STATUS)) > 0) { 935 while (--i>0 && (tx_status = inb(ioaddr + TX_STATUS)) > 0) {
939 if (tx_status & 0x38) lp->stats.tx_aborted_errors++; 936 if (tx_status & 0x38) dev->stats.tx_aborted_errors++;
940 if (tx_status & 0x30) outw(TxReset, ioaddr + EL3_CMD); 937 if (tx_status & 0x30) outw(TxReset, ioaddr + EL3_CMD);
941 if (tx_status & 0x3C) outw(TxEnable, ioaddr + EL3_CMD); 938 if (tx_status & 0x3C) outw(TxEnable, ioaddr + EL3_CMD);
942 outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */ 939 outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */
@@ -1002,7 +999,7 @@ el3_get_stats(struct net_device *dev)
1002 spin_lock_irqsave(&lp->lock, flags); 999 spin_lock_irqsave(&lp->lock, flags);
1003 update_stats(dev); 1000 update_stats(dev);
1004 spin_unlock_irqrestore(&lp->lock, flags); 1001 spin_unlock_irqrestore(&lp->lock, flags);
1005 return &lp->stats; 1002 return &dev->stats;
1006} 1003}
1007 1004
1008/* Update statistics. We change to register window 6, so this should be run 1005/* Update statistics. We change to register window 6, so this should be run
@@ -1012,7 +1009,6 @@ el3_get_stats(struct net_device *dev)
1012 */ 1009 */
1013static void update_stats(struct net_device *dev) 1010static void update_stats(struct net_device *dev)
1014{ 1011{
1015 struct el3_private *lp = netdev_priv(dev);
1016 int ioaddr = dev->base_addr; 1012 int ioaddr = dev->base_addr;
1017 1013
1018 if (el3_debug > 5) 1014 if (el3_debug > 5)
@@ -1021,13 +1017,13 @@ static void update_stats(struct net_device *dev)
1021 outw(StatsDisable, ioaddr + EL3_CMD); 1017 outw(StatsDisable, ioaddr + EL3_CMD);
1022 /* Switch to the stats window, and read everything. */ 1018 /* Switch to the stats window, and read everything. */
1023 EL3WINDOW(6); 1019 EL3WINDOW(6);
1024 lp->stats.tx_carrier_errors += inb(ioaddr + 0); 1020 dev->stats.tx_carrier_errors += inb(ioaddr + 0);
1025 lp->stats.tx_heartbeat_errors += inb(ioaddr + 1); 1021 dev->stats.tx_heartbeat_errors += inb(ioaddr + 1);
1026 /* Multiple collisions. */ inb(ioaddr + 2); 1022 /* Multiple collisions. */ inb(ioaddr + 2);
1027 lp->stats.collisions += inb(ioaddr + 3); 1023 dev->stats.collisions += inb(ioaddr + 3);
1028 lp->stats.tx_window_errors += inb(ioaddr + 4); 1024 dev->stats.tx_window_errors += inb(ioaddr + 4);
1029 lp->stats.rx_fifo_errors += inb(ioaddr + 5); 1025 dev->stats.rx_fifo_errors += inb(ioaddr + 5);
1030 lp->stats.tx_packets += inb(ioaddr + 6); 1026 dev->stats.tx_packets += inb(ioaddr + 6);
1031 /* Rx packets */ inb(ioaddr + 7); 1027 /* Rx packets */ inb(ioaddr + 7);
1032 /* Tx deferrals */ inb(ioaddr + 8); 1028 /* Tx deferrals */ inb(ioaddr + 8);
1033 inw(ioaddr + 10); /* Total Rx and Tx octets. */ 1029 inw(ioaddr + 10); /* Total Rx and Tx octets. */
@@ -1042,7 +1038,6 @@ static void update_stats(struct net_device *dev)
1042static int 1038static int
1043el3_rx(struct net_device *dev) 1039el3_rx(struct net_device *dev)
1044{ 1040{
1045 struct el3_private *lp = netdev_priv(dev);
1046 int ioaddr = dev->base_addr; 1041 int ioaddr = dev->base_addr;
1047 short rx_status; 1042 short rx_status;
1048 1043
@@ -1054,21 +1049,21 @@ el3_rx(struct net_device *dev)
1054 short error = rx_status & 0x3800; 1049 short error = rx_status & 0x3800;
1055 1050
1056 outw(RxDiscard, ioaddr + EL3_CMD); 1051 outw(RxDiscard, ioaddr + EL3_CMD);
1057 lp->stats.rx_errors++; 1052 dev->stats.rx_errors++;
1058 switch (error) { 1053 switch (error) {
1059 case 0x0000: lp->stats.rx_over_errors++; break; 1054 case 0x0000: dev->stats.rx_over_errors++; break;
1060 case 0x0800: lp->stats.rx_length_errors++; break; 1055 case 0x0800: dev->stats.rx_length_errors++; break;
1061 case 0x1000: lp->stats.rx_frame_errors++; break; 1056 case 0x1000: dev->stats.rx_frame_errors++; break;
1062 case 0x1800: lp->stats.rx_length_errors++; break; 1057 case 0x1800: dev->stats.rx_length_errors++; break;
1063 case 0x2000: lp->stats.rx_frame_errors++; break; 1058 case 0x2000: dev->stats.rx_frame_errors++; break;
1064 case 0x2800: lp->stats.rx_crc_errors++; break; 1059 case 0x2800: dev->stats.rx_crc_errors++; break;
1065 } 1060 }
1066 } else { 1061 } else {
1067 short pkt_len = rx_status & 0x7ff; 1062 short pkt_len = rx_status & 0x7ff;
1068 struct sk_buff *skb; 1063 struct sk_buff *skb;
1069 1064
1070 skb = dev_alloc_skb(pkt_len+5); 1065 skb = dev_alloc_skb(pkt_len+5);
1071 lp->stats.rx_bytes += pkt_len; 1066 dev->stats.rx_bytes += pkt_len;
1072 if (el3_debug > 4) 1067 if (el3_debug > 4)
1073 printk("Receiving packet size %d status %4.4x.\n", 1068 printk("Receiving packet size %d status %4.4x.\n",
1074 pkt_len, rx_status); 1069 pkt_len, rx_status);
@@ -1083,11 +1078,11 @@ el3_rx(struct net_device *dev)
1083 skb->protocol = eth_type_trans(skb,dev); 1078 skb->protocol = eth_type_trans(skb,dev);
1084 netif_rx(skb); 1079 netif_rx(skb);
1085 dev->last_rx = jiffies; 1080 dev->last_rx = jiffies;
1086 lp->stats.rx_packets++; 1081 dev->stats.rx_packets++;
1087 continue; 1082 continue;
1088 } 1083 }
1089 outw(RxDiscard, ioaddr + EL3_CMD); 1084 outw(RxDiscard, ioaddr + EL3_CMD);
1090 lp->stats.rx_dropped++; 1085 dev->stats.rx_dropped++;
1091 if (el3_debug) 1086 if (el3_debug)
1092 printk("%s: Couldn't allocate a sk_buff of size %d.\n", 1087 printk("%s: Couldn't allocate a sk_buff of size %d.\n",
1093 dev->name, pkt_len); 1088 dev->name, pkt_len);
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c
index 6ab84b661d70..105a8c7ca7e9 100644
--- a/drivers/net/3c515.c
+++ b/drivers/net/3c515.c
@@ -310,7 +310,6 @@ struct corkscrew_private {
310 struct sk_buff *tx_skbuff[TX_RING_SIZE]; 310 struct sk_buff *tx_skbuff[TX_RING_SIZE];
311 unsigned int cur_rx, cur_tx; /* The next free ring entry */ 311 unsigned int cur_rx, cur_tx; /* The next free ring entry */
312 unsigned int dirty_rx, dirty_tx;/* The ring entries to be free()ed. */ 312 unsigned int dirty_rx, dirty_tx;/* The ring entries to be free()ed. */
313 struct net_device_stats stats;
314 struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */ 313 struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */
315 struct timer_list timer; /* Media selection timer. */ 314 struct timer_list timer; /* Media selection timer. */
316 int capabilities ; /* Adapter capabilities word. */ 315 int capabilities ; /* Adapter capabilities word. */
@@ -983,8 +982,8 @@ static void corkscrew_timeout(struct net_device *dev)
983 break; 982 break;
984 outw(TxEnable, ioaddr + EL3_CMD); 983 outw(TxEnable, ioaddr + EL3_CMD);
985 dev->trans_start = jiffies; 984 dev->trans_start = jiffies;
986 vp->stats.tx_errors++; 985 dev->stats.tx_errors++;
987 vp->stats.tx_dropped++; 986 dev->stats.tx_dropped++;
988 netif_wake_queue(dev); 987 netif_wake_queue(dev);
989} 988}
990 989
@@ -1050,7 +1049,7 @@ static int corkscrew_start_xmit(struct sk_buff *skb,
1050 } 1049 }
1051 /* Put out the doubleword header... */ 1050 /* Put out the doubleword header... */
1052 outl(skb->len, ioaddr + TX_FIFO); 1051 outl(skb->len, ioaddr + TX_FIFO);
1053 vp->stats.tx_bytes += skb->len; 1052 dev->stats.tx_bytes += skb->len;
1054#ifdef VORTEX_BUS_MASTER 1053#ifdef VORTEX_BUS_MASTER
1055 if (vp->bus_master) { 1054 if (vp->bus_master) {
1056 /* Set the bus-master controller to transfer the packet. */ 1055 /* Set the bus-master controller to transfer the packet. */
@@ -1094,9 +1093,9 @@ static int corkscrew_start_xmit(struct sk_buff *skb,
1094 printk("%s: Tx error, status %2.2x.\n", 1093 printk("%s: Tx error, status %2.2x.\n",
1095 dev->name, tx_status); 1094 dev->name, tx_status);
1096 if (tx_status & 0x04) 1095 if (tx_status & 0x04)
1097 vp->stats.tx_fifo_errors++; 1096 dev->stats.tx_fifo_errors++;
1098 if (tx_status & 0x38) 1097 if (tx_status & 0x38)
1099 vp->stats.tx_aborted_errors++; 1098 dev->stats.tx_aborted_errors++;
1100 if (tx_status & 0x30) { 1099 if (tx_status & 0x30) {
1101 int j; 1100 int j;
1102 outw(TxReset, ioaddr + EL3_CMD); 1101 outw(TxReset, ioaddr + EL3_CMD);
@@ -1257,7 +1256,6 @@ static irqreturn_t corkscrew_interrupt(int irq, void *dev_id)
1257 1256
1258static int corkscrew_rx(struct net_device *dev) 1257static int corkscrew_rx(struct net_device *dev)
1259{ 1258{
1260 struct corkscrew_private *vp = netdev_priv(dev);
1261 int ioaddr = dev->base_addr; 1259 int ioaddr = dev->base_addr;
1262 int i; 1260 int i;
1263 short rx_status; 1261 short rx_status;
@@ -1271,17 +1269,17 @@ static int corkscrew_rx(struct net_device *dev)
1271 if (corkscrew_debug > 2) 1269 if (corkscrew_debug > 2)
1272 printk(" Rx error: status %2.2x.\n", 1270 printk(" Rx error: status %2.2x.\n",
1273 rx_error); 1271 rx_error);
1274 vp->stats.rx_errors++; 1272 dev->stats.rx_errors++;
1275 if (rx_error & 0x01) 1273 if (rx_error & 0x01)
1276 vp->stats.rx_over_errors++; 1274 dev->stats.rx_over_errors++;
1277 if (rx_error & 0x02) 1275 if (rx_error & 0x02)
1278 vp->stats.rx_length_errors++; 1276 dev->stats.rx_length_errors++;
1279 if (rx_error & 0x04) 1277 if (rx_error & 0x04)
1280 vp->stats.rx_frame_errors++; 1278 dev->stats.rx_frame_errors++;
1281 if (rx_error & 0x08) 1279 if (rx_error & 0x08)
1282 vp->stats.rx_crc_errors++; 1280 dev->stats.rx_crc_errors++;
1283 if (rx_error & 0x10) 1281 if (rx_error & 0x10)
1284 vp->stats.rx_length_errors++; 1282 dev->stats.rx_length_errors++;
1285 } else { 1283 } else {
1286 /* The packet length: up to 4.5K!. */ 1284 /* The packet length: up to 4.5K!. */
1287 short pkt_len = rx_status & 0x1fff; 1285 short pkt_len = rx_status & 0x1fff;
@@ -1301,8 +1299,8 @@ static int corkscrew_rx(struct net_device *dev)
1301 skb->protocol = eth_type_trans(skb, dev); 1299 skb->protocol = eth_type_trans(skb, dev);
1302 netif_rx(skb); 1300 netif_rx(skb);
1303 dev->last_rx = jiffies; 1301 dev->last_rx = jiffies;
1304 vp->stats.rx_packets++; 1302 dev->stats.rx_packets++;
1305 vp->stats.rx_bytes += pkt_len; 1303 dev->stats.rx_bytes += pkt_len;
1306 /* Wait a limited time to go to next packet. */ 1304 /* Wait a limited time to go to next packet. */
1307 for (i = 200; i >= 0; i--) 1305 for (i = 200; i >= 0; i--)
1308 if (! (inw(ioaddr + EL3_STATUS) & CmdInProgress)) 1306 if (! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
@@ -1312,7 +1310,7 @@ static int corkscrew_rx(struct net_device *dev)
1312 printk("%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, pkt_len); 1310 printk("%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, pkt_len);
1313 } 1311 }
1314 outw(RxDiscard, ioaddr + EL3_CMD); 1312 outw(RxDiscard, ioaddr + EL3_CMD);
1315 vp->stats.rx_dropped++; 1313 dev->stats.rx_dropped++;
1316 /* Wait a limited time to skip this packet. */ 1314 /* Wait a limited time to skip this packet. */
1317 for (i = 200; i >= 0; i--) 1315 for (i = 200; i >= 0; i--)
1318 if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress)) 1316 if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress))
@@ -1337,23 +1335,23 @@ static int boomerang_rx(struct net_device *dev)
1337 if (corkscrew_debug > 2) 1335 if (corkscrew_debug > 2)
1338 printk(" Rx error: status %2.2x.\n", 1336 printk(" Rx error: status %2.2x.\n",
1339 rx_error); 1337 rx_error);
1340 vp->stats.rx_errors++; 1338 dev->stats.rx_errors++;
1341 if (rx_error & 0x01) 1339 if (rx_error & 0x01)
1342 vp->stats.rx_over_errors++; 1340 dev->stats.rx_over_errors++;
1343 if (rx_error & 0x02) 1341 if (rx_error & 0x02)
1344 vp->stats.rx_length_errors++; 1342 dev->stats.rx_length_errors++;
1345 if (rx_error & 0x04) 1343 if (rx_error & 0x04)
1346 vp->stats.rx_frame_errors++; 1344 dev->stats.rx_frame_errors++;
1347 if (rx_error & 0x08) 1345 if (rx_error & 0x08)
1348 vp->stats.rx_crc_errors++; 1346 dev->stats.rx_crc_errors++;
1349 if (rx_error & 0x10) 1347 if (rx_error & 0x10)
1350 vp->stats.rx_length_errors++; 1348 dev->stats.rx_length_errors++;
1351 } else { 1349 } else {
1352 /* The packet length: up to 4.5K!. */ 1350 /* The packet length: up to 4.5K!. */
1353 short pkt_len = rx_status & 0x1fff; 1351 short pkt_len = rx_status & 0x1fff;
1354 struct sk_buff *skb; 1352 struct sk_buff *skb;
1355 1353
1356 vp->stats.rx_bytes += pkt_len; 1354 dev->stats.rx_bytes += pkt_len;
1357 if (corkscrew_debug > 4) 1355 if (corkscrew_debug > 4)
1358 printk("Receiving packet size %d status %4.4x.\n", 1356 printk("Receiving packet size %d status %4.4x.\n",
1359 pkt_len, rx_status); 1357 pkt_len, rx_status);
@@ -1388,7 +1386,7 @@ static int boomerang_rx(struct net_device *dev)
1388 skb->protocol = eth_type_trans(skb, dev); 1386 skb->protocol = eth_type_trans(skb, dev);
1389 netif_rx(skb); 1387 netif_rx(skb);
1390 dev->last_rx = jiffies; 1388 dev->last_rx = jiffies;
1391 vp->stats.rx_packets++; 1389 dev->stats.rx_packets++;
1392 } 1390 }
1393 entry = (++vp->cur_rx) % RX_RING_SIZE; 1391 entry = (++vp->cur_rx) % RX_RING_SIZE;
1394 } 1392 }
@@ -1475,7 +1473,7 @@ static struct net_device_stats *corkscrew_get_stats(struct net_device *dev)
1475 update_stats(dev->base_addr, dev); 1473 update_stats(dev->base_addr, dev);
1476 spin_unlock_irqrestore(&vp->lock, flags); 1474 spin_unlock_irqrestore(&vp->lock, flags);
1477 } 1475 }
1478 return &vp->stats; 1476 return &dev->stats;
1479} 1477}
1480 1478
1481/* Update statistics. 1479/* Update statistics.
@@ -1487,19 +1485,17 @@ static struct net_device_stats *corkscrew_get_stats(struct net_device *dev)
1487 */ 1485 */
1488static void update_stats(int ioaddr, struct net_device *dev) 1486static void update_stats(int ioaddr, struct net_device *dev)
1489{ 1487{
1490 struct corkscrew_private *vp = netdev_priv(dev);
1491
1492 /* Unlike the 3c5x9 we need not turn off stats updates while reading. */ 1488 /* Unlike the 3c5x9 we need not turn off stats updates while reading. */
1493 /* Switch to the stats window, and read everything. */ 1489 /* Switch to the stats window, and read everything. */
1494 EL3WINDOW(6); 1490 EL3WINDOW(6);
1495 vp->stats.tx_carrier_errors += inb(ioaddr + 0); 1491 dev->stats.tx_carrier_errors += inb(ioaddr + 0);
1496 vp->stats.tx_heartbeat_errors += inb(ioaddr + 1); 1492 dev->stats.tx_heartbeat_errors += inb(ioaddr + 1);
1497 /* Multiple collisions. */ inb(ioaddr + 2); 1493 /* Multiple collisions. */ inb(ioaddr + 2);
1498 vp->stats.collisions += inb(ioaddr + 3); 1494 dev->stats.collisions += inb(ioaddr + 3);
1499 vp->stats.tx_window_errors += inb(ioaddr + 4); 1495 dev->stats.tx_window_errors += inb(ioaddr + 4);
1500 vp->stats.rx_fifo_errors += inb(ioaddr + 5); 1496 dev->stats.rx_fifo_errors += inb(ioaddr + 5);
1501 vp->stats.tx_packets += inb(ioaddr + 6); 1497 dev->stats.tx_packets += inb(ioaddr + 6);
1502 vp->stats.tx_packets += (inb(ioaddr + 9) & 0x30) << 4; 1498 dev->stats.tx_packets += (inb(ioaddr + 9) & 0x30) << 4;
1503 /* Rx packets */ inb(ioaddr + 7); 1499 /* Rx packets */ inb(ioaddr + 7);
1504 /* Must read to clear */ 1500 /* Must read to clear */
1505 /* Tx deferrals */ inb(ioaddr + 8); 1501 /* Tx deferrals */ inb(ioaddr + 8);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index f90a86ba7e2f..af46341827f2 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2593,6 +2593,7 @@ config BNX2X
2593 To compile this driver as a module, choose M here: the module 2593 To compile this driver as a module, choose M here: the module
2594 will be called bnx2x. This is recommended. 2594 will be called bnx2x. This is recommended.
2595 2595
2596source "drivers/net/sfc/Kconfig"
2596 2597
2597endif # NETDEV_10000 2598endif # NETDEV_10000
2598 2599
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 2f1f3f2739fd..dcbfe8421154 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -253,3 +253,5 @@ obj-$(CONFIG_FS_ENET) += fs_enet/
253obj-$(CONFIG_NETXEN_NIC) += netxen/ 253obj-$(CONFIG_NETXEN_NIC) += netxen/
254obj-$(CONFIG_NIU) += niu.o 254obj-$(CONFIG_NIU) += niu.o
255obj-$(CONFIG_VIRTIO_NET) += virtio_net.o 255obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
256obj-$(CONFIG_SFC) += sfc/
257
diff --git a/drivers/net/arm/Kconfig b/drivers/net/arm/Kconfig
index f9cc2b621fe2..8eda6eeb43b7 100644
--- a/drivers/net/arm/Kconfig
+++ b/drivers/net/arm/Kconfig
@@ -47,3 +47,11 @@ config EP93XX_ETH
47 help 47 help
48 This is a driver for the ethernet hardware included in EP93xx CPUs. 48 This is a driver for the ethernet hardware included in EP93xx CPUs.
49 Say Y if you are building a kernel for EP93xx based devices. 49 Say Y if you are building a kernel for EP93xx based devices.
50
51config IXP4XX_ETH
52 tristate "Intel IXP4xx Ethernet support"
53 depends on ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR
54 select MII
55 help
56 Say Y here if you want to use built-in Ethernet ports
57 on IXP4xx processor.
diff --git a/drivers/net/arm/Makefile b/drivers/net/arm/Makefile
index a4c868278e11..7c812ac2b6a5 100644
--- a/drivers/net/arm/Makefile
+++ b/drivers/net/arm/Makefile
@@ -9,3 +9,4 @@ obj-$(CONFIG_ARM_ETHER3) += ether3.o
9obj-$(CONFIG_ARM_ETHER1) += ether1.o 9obj-$(CONFIG_ARM_ETHER1) += ether1.o
10obj-$(CONFIG_ARM_AT91_ETHER) += at91_ether.o 10obj-$(CONFIG_ARM_AT91_ETHER) += at91_ether.o
11obj-$(CONFIG_EP93XX_ETH) += ep93xx_eth.o 11obj-$(CONFIG_EP93XX_ETH) += ep93xx_eth.o
12obj-$(CONFIG_IXP4XX_ETH) += ixp4xx_eth.o
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c
new file mode 100644
index 000000000000..c617b64c288e
--- /dev/null
+++ b/drivers/net/arm/ixp4xx_eth.c
@@ -0,0 +1,1265 @@
1/*
2 * Intel IXP4xx Ethernet driver for Linux
3 *
4 * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation.
9 *
10 * Ethernet port config (0x00 is not present on IXP42X):
11 *
12 * logical port 0x00 0x10 0x20
13 * NPE 0 (NPE-A) 1 (NPE-B) 2 (NPE-C)
14 * physical PortId 2 0 1
15 * TX queue 23 24 25
16 * RX-free queue 26 27 28
17 * TX-done queue is always 31, per-port RX and TX-ready queues are configurable
18 *
19 *
20 * Queue entries:
21 * bits 0 -> 1 - NPE ID (RX and TX-done)
22 * bits 0 -> 2 - priority (TX, per 802.1D)
23 * bits 3 -> 4 - port ID (user-set?)
24 * bits 5 -> 31 - physical descriptor address
25 */
26
27#include <linux/delay.h>
28#include <linux/dma-mapping.h>
29#include <linux/dmapool.h>
30#include <linux/etherdevice.h>
31#include <linux/io.h>
32#include <linux/kernel.h>
33#include <linux/mii.h>
34#include <linux/platform_device.h>
35#include <asm/arch/npe.h>
36#include <asm/arch/qmgr.h>
37
38#define DEBUG_QUEUES 0
39#define DEBUG_DESC 0
40#define DEBUG_RX 0
41#define DEBUG_TX 0
42#define DEBUG_PKT_BYTES 0
43#define DEBUG_MDIO 0
44#define DEBUG_CLOSE 0
45
46#define DRV_NAME "ixp4xx_eth"
47
48#define MAX_NPES 3
49
50#define RX_DESCS 64 /* also length of all RX queues */
51#define TX_DESCS 16 /* also length of all TX queues */
52#define TXDONE_QUEUE_LEN 64 /* dwords */
53
54#define POOL_ALLOC_SIZE (sizeof(struct desc) * (RX_DESCS + TX_DESCS))
55#define REGS_SIZE 0x1000
56#define MAX_MRU 1536 /* 0x600 */
57#define RX_BUFF_SIZE ALIGN((NET_IP_ALIGN) + MAX_MRU, 4)
58
59#define NAPI_WEIGHT 16
60#define MDIO_INTERVAL (3 * HZ)
61#define MAX_MDIO_RETRIES 100 /* microseconds, typically 30 cycles */
62#define MAX_MII_RESET_RETRIES 100 /* mdio_read() cycles, typically 4 */
63#define MAX_CLOSE_WAIT 1000 /* microseconds, typically 2-3 cycles */
64
65#define NPE_ID(port_id) ((port_id) >> 4)
66#define PHYSICAL_ID(port_id) ((NPE_ID(port_id) + 2) % 3)
67#define TX_QUEUE(port_id) (NPE_ID(port_id) + 23)
68#define RXFREE_QUEUE(port_id) (NPE_ID(port_id) + 26)
69#define TXDONE_QUEUE 31
70
71/* TX Control Registers */
72#define TX_CNTRL0_TX_EN 0x01
73#define TX_CNTRL0_HALFDUPLEX 0x02
74#define TX_CNTRL0_RETRY 0x04
75#define TX_CNTRL0_PAD_EN 0x08
76#define TX_CNTRL0_APPEND_FCS 0x10
77#define TX_CNTRL0_2DEFER 0x20
78#define TX_CNTRL0_RMII 0x40 /* reduced MII */
79#define TX_CNTRL1_RETRIES 0x0F /* 4 bits */
80
81/* RX Control Registers */
82#define RX_CNTRL0_RX_EN 0x01
83#define RX_CNTRL0_PADSTRIP_EN 0x02
84#define RX_CNTRL0_SEND_FCS 0x04
85#define RX_CNTRL0_PAUSE_EN 0x08
86#define RX_CNTRL0_LOOP_EN 0x10
87#define RX_CNTRL0_ADDR_FLTR_EN 0x20
88#define RX_CNTRL0_RX_RUNT_EN 0x40
89#define RX_CNTRL0_BCAST_DIS 0x80
90#define RX_CNTRL1_DEFER_EN 0x01
91
92/* Core Control Register */
93#define CORE_RESET 0x01
94#define CORE_RX_FIFO_FLUSH 0x02
95#define CORE_TX_FIFO_FLUSH 0x04
96#define CORE_SEND_JAM 0x08
97#define CORE_MDC_EN 0x10 /* MDIO using NPE-B ETH-0 only */
98
99#define DEFAULT_TX_CNTRL0 (TX_CNTRL0_TX_EN | TX_CNTRL0_RETRY | \
100 TX_CNTRL0_PAD_EN | TX_CNTRL0_APPEND_FCS | \
101 TX_CNTRL0_2DEFER)
102#define DEFAULT_RX_CNTRL0 RX_CNTRL0_RX_EN
103#define DEFAULT_CORE_CNTRL CORE_MDC_EN
104
105
106/* NPE message codes */
107#define NPE_GETSTATUS 0x00
108#define NPE_EDB_SETPORTADDRESS 0x01
109#define NPE_EDB_GETMACADDRESSDATABASE 0x02
110#define NPE_EDB_SETMACADDRESSSDATABASE 0x03
111#define NPE_GETSTATS 0x04
112#define NPE_RESETSTATS 0x05
113#define NPE_SETMAXFRAMELENGTHS 0x06
114#define NPE_VLAN_SETRXTAGMODE 0x07
115#define NPE_VLAN_SETDEFAULTRXVID 0x08
116#define NPE_VLAN_SETPORTVLANTABLEENTRY 0x09
117#define NPE_VLAN_SETPORTVLANTABLERANGE 0x0A
118#define NPE_VLAN_SETRXQOSENTRY 0x0B
119#define NPE_VLAN_SETPORTIDEXTRACTIONMODE 0x0C
120#define NPE_STP_SETBLOCKINGSTATE 0x0D
121#define NPE_FW_SETFIREWALLMODE 0x0E
122#define NPE_PC_SETFRAMECONTROLDURATIONID 0x0F
123#define NPE_PC_SETAPMACTABLE 0x11
124#define NPE_SETLOOPBACK_MODE 0x12
125#define NPE_PC_SETBSSIDTABLE 0x13
126#define NPE_ADDRESS_FILTER_CONFIG 0x14
127#define NPE_APPENDFCSCONFIG 0x15
128#define NPE_NOTIFY_MAC_RECOVERY_DONE 0x16
129#define NPE_MAC_RECOVERY_START 0x17
130
131
132#ifdef __ARMEB__
133typedef struct sk_buff buffer_t;
134#define free_buffer dev_kfree_skb
135#define free_buffer_irq dev_kfree_skb_irq
136#else
137typedef void buffer_t;
138#define free_buffer kfree
139#define free_buffer_irq kfree
140#endif
141
142struct eth_regs {
143 u32 tx_control[2], __res1[2]; /* 000 */
144 u32 rx_control[2], __res2[2]; /* 010 */
145 u32 random_seed, __res3[3]; /* 020 */
146 u32 partial_empty_threshold, __res4; /* 030 */
147 u32 partial_full_threshold, __res5; /* 038 */
148 u32 tx_start_bytes, __res6[3]; /* 040 */
149 u32 tx_deferral, rx_deferral, __res7[2];/* 050 */
150 u32 tx_2part_deferral[2], __res8[2]; /* 060 */
151 u32 slot_time, __res9[3]; /* 070 */
152 u32 mdio_command[4]; /* 080 */
153 u32 mdio_status[4]; /* 090 */
154 u32 mcast_mask[6], __res10[2]; /* 0A0 */
155 u32 mcast_addr[6], __res11[2]; /* 0C0 */
156 u32 int_clock_threshold, __res12[3]; /* 0E0 */
157 u32 hw_addr[6], __res13[61]; /* 0F0 */
158 u32 core_control; /* 1FC */
159};
160
161struct port {
162 struct resource *mem_res;
163 struct eth_regs __iomem *regs;
164 struct npe *npe;
165 struct net_device *netdev;
166 struct napi_struct napi;
167 struct net_device_stats stat;
168 struct mii_if_info mii;
169 struct delayed_work mdio_thread;
170 struct eth_plat_info *plat;
171 buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS];
172 struct desc *desc_tab; /* coherent */
173 u32 desc_tab_phys;
174 int id; /* logical port ID */
175 u16 mii_bmcr;
176};
177
178/* NPE message structure */
179struct msg {
180#ifdef __ARMEB__
181 u8 cmd, eth_id, byte2, byte3;
182 u8 byte4, byte5, byte6, byte7;
183#else
184 u8 byte3, byte2, eth_id, cmd;
185 u8 byte7, byte6, byte5, byte4;
186#endif
187};
188
189/* Ethernet packet descriptor */
190struct desc {
191 u32 next; /* pointer to next buffer, unused */
192
193#ifdef __ARMEB__
194 u16 buf_len; /* buffer length */
195 u16 pkt_len; /* packet length */
196 u32 data; /* pointer to data buffer in RAM */
197 u8 dest_id;
198 u8 src_id;
199 u16 flags;
200 u8 qos;
201 u8 padlen;
202 u16 vlan_tci;
203#else
204 u16 pkt_len; /* packet length */
205 u16 buf_len; /* buffer length */
206 u32 data; /* pointer to data buffer in RAM */
207 u16 flags;
208 u8 src_id;
209 u8 dest_id;
210 u16 vlan_tci;
211 u8 padlen;
212 u8 qos;
213#endif
214
215#ifdef __ARMEB__
216 u8 dst_mac_0, dst_mac_1, dst_mac_2, dst_mac_3;
217 u8 dst_mac_4, dst_mac_5, src_mac_0, src_mac_1;
218 u8 src_mac_2, src_mac_3, src_mac_4, src_mac_5;
219#else
220 u8 dst_mac_3, dst_mac_2, dst_mac_1, dst_mac_0;
221 u8 src_mac_1, src_mac_0, dst_mac_5, dst_mac_4;
222 u8 src_mac_5, src_mac_4, src_mac_3, src_mac_2;
223#endif
224};
225
226
227#define rx_desc_phys(port, n) ((port)->desc_tab_phys + \
228 (n) * sizeof(struct desc))
229#define rx_desc_ptr(port, n) (&(port)->desc_tab[n])
230
231#define tx_desc_phys(port, n) ((port)->desc_tab_phys + \
232 ((n) + RX_DESCS) * sizeof(struct desc))
233#define tx_desc_ptr(port, n) (&(port)->desc_tab[(n) + RX_DESCS])
234
235#ifndef __ARMEB__
236static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
237{
238 int i;
239 for (i = 0; i < cnt; i++)
240 dest[i] = swab32(src[i]);
241}
242#endif
243
244static spinlock_t mdio_lock;
245static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */
246static int ports_open;
247static struct port *npe_port_tab[MAX_NPES];
248static struct dma_pool *dma_pool;
249
250
251static u16 mdio_cmd(struct net_device *dev, int phy_id, int location,
252 int write, u16 cmd)
253{
254 int cycles = 0;
255
256 if (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80) {
257 printk(KERN_ERR "%s: MII not ready to transmit\n", dev->name);
258 return 0;
259 }
260
261 if (write) {
262 __raw_writel(cmd & 0xFF, &mdio_regs->mdio_command[0]);
263 __raw_writel(cmd >> 8, &mdio_regs->mdio_command[1]);
264 }
265 __raw_writel(((phy_id << 5) | location) & 0xFF,
266 &mdio_regs->mdio_command[2]);
267 __raw_writel((phy_id >> 3) | (write << 2) | 0x80 /* GO */,
268 &mdio_regs->mdio_command[3]);
269
270 while ((cycles < MAX_MDIO_RETRIES) &&
271 (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80)) {
272 udelay(1);
273 cycles++;
274 }
275
276 if (cycles == MAX_MDIO_RETRIES) {
277 printk(KERN_ERR "%s: MII write failed\n", dev->name);
278 return 0;
279 }
280
281#if DEBUG_MDIO
282 printk(KERN_DEBUG "%s: mdio_cmd() took %i cycles\n", dev->name,
283 cycles);
284#endif
285
286 if (write)
287 return 0;
288
289 if (__raw_readl(&mdio_regs->mdio_status[3]) & 0x80) {
290 printk(KERN_ERR "%s: MII read failed\n", dev->name);
291 return 0;
292 }
293
294 return (__raw_readl(&mdio_regs->mdio_status[0]) & 0xFF) |
295 (__raw_readl(&mdio_regs->mdio_status[1]) << 8);
296}
297
298static int mdio_read(struct net_device *dev, int phy_id, int location)
299{
300 unsigned long flags;
301 u16 val;
302
303 spin_lock_irqsave(&mdio_lock, flags);
304 val = mdio_cmd(dev, phy_id, location, 0, 0);
305 spin_unlock_irqrestore(&mdio_lock, flags);
306 return val;
307}
308
309static void mdio_write(struct net_device *dev, int phy_id, int location,
310 int val)
311{
312 unsigned long flags;
313
314 spin_lock_irqsave(&mdio_lock, flags);
315 mdio_cmd(dev, phy_id, location, 1, val);
316 spin_unlock_irqrestore(&mdio_lock, flags);
317}
318
319static void phy_reset(struct net_device *dev, int phy_id)
320{
321 struct port *port = netdev_priv(dev);
322 int cycles = 0;
323
324 mdio_write(dev, phy_id, MII_BMCR, port->mii_bmcr | BMCR_RESET);
325
326 while (cycles < MAX_MII_RESET_RETRIES) {
327 if (!(mdio_read(dev, phy_id, MII_BMCR) & BMCR_RESET)) {
328#if DEBUG_MDIO
329 printk(KERN_DEBUG "%s: phy_reset() took %i cycles\n",
330 dev->name, cycles);
331#endif
332 return;
333 }
334 udelay(1);
335 cycles++;
336 }
337
338 printk(KERN_ERR "%s: MII reset failed\n", dev->name);
339}
340
341static void eth_set_duplex(struct port *port)
342{
343 if (port->mii.full_duplex)
344 __raw_writel(DEFAULT_TX_CNTRL0 & ~TX_CNTRL0_HALFDUPLEX,
345 &port->regs->tx_control[0]);
346 else
347 __raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX,
348 &port->regs->tx_control[0]);
349}
350
351
352static void phy_check_media(struct port *port, int init)
353{
354 if (mii_check_media(&port->mii, 1, init))
355 eth_set_duplex(port);
356 if (port->mii.force_media) { /* mii_check_media() doesn't work */
357 struct net_device *dev = port->netdev;
358 int cur_link = mii_link_ok(&port->mii);
359 int prev_link = netif_carrier_ok(dev);
360
361 if (!prev_link && cur_link) {
362 printk(KERN_INFO "%s: link up\n", dev->name);
363 netif_carrier_on(dev);
364 } else if (prev_link && !cur_link) {
365 printk(KERN_INFO "%s: link down\n", dev->name);
366 netif_carrier_off(dev);
367 }
368 }
369}
370
371
372static void mdio_thread(struct work_struct *work)
373{
374 struct port *port = container_of(work, struct port, mdio_thread.work);
375
376 phy_check_media(port, 0);
377 schedule_delayed_work(&port->mdio_thread, MDIO_INTERVAL);
378}
379
380
381static inline void debug_pkt(struct net_device *dev, const char *func,
382 u8 *data, int len)
383{
384#if DEBUG_PKT_BYTES
385 int i;
386
387 printk(KERN_DEBUG "%s: %s(%i) ", dev->name, func, len);
388 for (i = 0; i < len; i++) {
389 if (i >= DEBUG_PKT_BYTES)
390 break;
391 printk("%s%02X",
392 ((i == 6) || (i == 12) || (i >= 14)) ? " " : "",
393 data[i]);
394 }
395 printk("\n");
396#endif
397}
398
399
400static inline void debug_desc(u32 phys, struct desc *desc)
401{
402#if DEBUG_DESC
403 printk(KERN_DEBUG "%X: %X %3X %3X %08X %2X < %2X %4X %X"
404 " %X %X %02X%02X%02X%02X%02X%02X < %02X%02X%02X%02X%02X%02X\n",
405 phys, desc->next, desc->buf_len, desc->pkt_len,
406 desc->data, desc->dest_id, desc->src_id, desc->flags,
407 desc->qos, desc->padlen, desc->vlan_tci,
408 desc->dst_mac_0, desc->dst_mac_1, desc->dst_mac_2,
409 desc->dst_mac_3, desc->dst_mac_4, desc->dst_mac_5,
410 desc->src_mac_0, desc->src_mac_1, desc->src_mac_2,
411 desc->src_mac_3, desc->src_mac_4, desc->src_mac_5);
412#endif
413}
414
415static inline void debug_queue(unsigned int queue, int is_get, u32 phys)
416{
417#if DEBUG_QUEUES
418 static struct {
419 int queue;
420 char *name;
421 } names[] = {
422 { TX_QUEUE(0x10), "TX#0 " },
423 { TX_QUEUE(0x20), "TX#1 " },
424 { TX_QUEUE(0x00), "TX#2 " },
425 { RXFREE_QUEUE(0x10), "RX-free#0 " },
426 { RXFREE_QUEUE(0x20), "RX-free#1 " },
427 { RXFREE_QUEUE(0x00), "RX-free#2 " },
428 { TXDONE_QUEUE, "TX-done " },
429 };
430 int i;
431
432 for (i = 0; i < ARRAY_SIZE(names); i++)
433 if (names[i].queue == queue)
434 break;
435
436 printk(KERN_DEBUG "Queue %i %s%s %X\n", queue,
437 i < ARRAY_SIZE(names) ? names[i].name : "",
438 is_get ? "->" : "<-", phys);
439#endif
440}
441
442static inline u32 queue_get_entry(unsigned int queue)
443{
444 u32 phys = qmgr_get_entry(queue);
445 debug_queue(queue, 1, phys);
446 return phys;
447}
448
449static inline int queue_get_desc(unsigned int queue, struct port *port,
450 int is_tx)
451{
452 u32 phys, tab_phys, n_desc;
453 struct desc *tab;
454
455 if (!(phys = queue_get_entry(queue)))
456 return -1;
457
458 phys &= ~0x1F; /* mask out non-address bits */
459 tab_phys = is_tx ? tx_desc_phys(port, 0) : rx_desc_phys(port, 0);
460 tab = is_tx ? tx_desc_ptr(port, 0) : rx_desc_ptr(port, 0);
461 n_desc = (phys - tab_phys) / sizeof(struct desc);
462 BUG_ON(n_desc >= (is_tx ? TX_DESCS : RX_DESCS));
463 debug_desc(phys, &tab[n_desc]);
464 BUG_ON(tab[n_desc].next);
465 return n_desc;
466}
467
468static inline void queue_put_desc(unsigned int queue, u32 phys,
469 struct desc *desc)
470{
471 debug_queue(queue, 0, phys);
472 debug_desc(phys, desc);
473 BUG_ON(phys & 0x1F);
474 qmgr_put_entry(queue, phys);
475 BUG_ON(qmgr_stat_overflow(queue));
476}
477
478
479static inline void dma_unmap_tx(struct port *port, struct desc *desc)
480{
481#ifdef __ARMEB__
482 dma_unmap_single(&port->netdev->dev, desc->data,
483 desc->buf_len, DMA_TO_DEVICE);
484#else
485 dma_unmap_single(&port->netdev->dev, desc->data & ~3,
486 ALIGN((desc->data & 3) + desc->buf_len, 4),
487 DMA_TO_DEVICE);
488#endif
489}
490
491
492static void eth_rx_irq(void *pdev)
493{
494 struct net_device *dev = pdev;
495 struct port *port = netdev_priv(dev);
496
497#if DEBUG_RX
498 printk(KERN_DEBUG "%s: eth_rx_irq\n", dev->name);
499#endif
500 qmgr_disable_irq(port->plat->rxq);
501 netif_rx_schedule(dev, &port->napi);
502}
503
504static int eth_poll(struct napi_struct *napi, int budget)
505{
506 struct port *port = container_of(napi, struct port, napi);
507 struct net_device *dev = port->netdev;
508 unsigned int rxq = port->plat->rxq, rxfreeq = RXFREE_QUEUE(port->id);
509 int received = 0;
510
511#if DEBUG_RX
512 printk(KERN_DEBUG "%s: eth_poll\n", dev->name);
513#endif
514
515 while (received < budget) {
516 struct sk_buff *skb;
517 struct desc *desc;
518 int n;
519#ifdef __ARMEB__
520 struct sk_buff *temp;
521 u32 phys;
522#endif
523
524 if ((n = queue_get_desc(rxq, port, 0)) < 0) {
525 received = 0; /* No packet received */
526#if DEBUG_RX
527 printk(KERN_DEBUG "%s: eth_poll netif_rx_complete\n",
528 dev->name);
529#endif
530 netif_rx_complete(dev, napi);
531 qmgr_enable_irq(rxq);
532 if (!qmgr_stat_empty(rxq) &&
533 netif_rx_reschedule(dev, napi)) {
534#if DEBUG_RX
535 printk(KERN_DEBUG "%s: eth_poll"
536 " netif_rx_reschedule successed\n",
537 dev->name);
538#endif
539 qmgr_disable_irq(rxq);
540 continue;
541 }
542#if DEBUG_RX
543 printk(KERN_DEBUG "%s: eth_poll all done\n",
544 dev->name);
545#endif
546 return 0; /* all work done */
547 }
548
549 desc = rx_desc_ptr(port, n);
550
551#ifdef __ARMEB__
552 if ((skb = netdev_alloc_skb(dev, RX_BUFF_SIZE))) {
553 phys = dma_map_single(&dev->dev, skb->data,
554 RX_BUFF_SIZE, DMA_FROM_DEVICE);
555 if (dma_mapping_error(phys)) {
556 dev_kfree_skb(skb);
557 skb = NULL;
558 }
559 }
560#else
561 skb = netdev_alloc_skb(dev,
562 ALIGN(NET_IP_ALIGN + desc->pkt_len, 4));
563#endif
564
565 if (!skb) {
566 port->stat.rx_dropped++;
567 /* put the desc back on RX-ready queue */
568 desc->buf_len = MAX_MRU;
569 desc->pkt_len = 0;
570 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
571 continue;
572 }
573
574 /* process received frame */
575#ifdef __ARMEB__
576 temp = skb;
577 skb = port->rx_buff_tab[n];
578 dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN,
579 RX_BUFF_SIZE, DMA_FROM_DEVICE);
580#else
581 dma_sync_single(&dev->dev, desc->data - NET_IP_ALIGN,
582 RX_BUFF_SIZE, DMA_FROM_DEVICE);
583 memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
584 ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4);
585#endif
586 skb_reserve(skb, NET_IP_ALIGN);
587 skb_put(skb, desc->pkt_len);
588
589 debug_pkt(dev, "eth_poll", skb->data, skb->len);
590
591 skb->protocol = eth_type_trans(skb, dev);
592 dev->last_rx = jiffies;
593 port->stat.rx_packets++;
594 port->stat.rx_bytes += skb->len;
595 netif_receive_skb(skb);
596
597 /* put the new buffer on RX-free queue */
598#ifdef __ARMEB__
599 port->rx_buff_tab[n] = temp;
600 desc->data = phys + NET_IP_ALIGN;
601#endif
602 desc->buf_len = MAX_MRU;
603 desc->pkt_len = 0;
604 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
605 received++;
606 }
607
608#if DEBUG_RX
609 printk(KERN_DEBUG "eth_poll(): end, not all work done\n");
610#endif
611 return received; /* not all work done */
612}
613
614
615static void eth_txdone_irq(void *unused)
616{
617 u32 phys;
618
619#if DEBUG_TX
620 printk(KERN_DEBUG DRV_NAME ": eth_txdone_irq\n");
621#endif
622 while ((phys = queue_get_entry(TXDONE_QUEUE)) != 0) {
623 u32 npe_id, n_desc;
624 struct port *port;
625 struct desc *desc;
626 int start;
627
628 npe_id = phys & 3;
629 BUG_ON(npe_id >= MAX_NPES);
630 port = npe_port_tab[npe_id];
631 BUG_ON(!port);
632 phys &= ~0x1F; /* mask out non-address bits */
633 n_desc = (phys - tx_desc_phys(port, 0)) / sizeof(struct desc);
634 BUG_ON(n_desc >= TX_DESCS);
635 desc = tx_desc_ptr(port, n_desc);
636 debug_desc(phys, desc);
637
638 if (port->tx_buff_tab[n_desc]) { /* not the draining packet */
639 port->stat.tx_packets++;
640 port->stat.tx_bytes += desc->pkt_len;
641
642 dma_unmap_tx(port, desc);
643#if DEBUG_TX
644 printk(KERN_DEBUG "%s: eth_txdone_irq free %p\n",
645 port->netdev->name, port->tx_buff_tab[n_desc]);
646#endif
647 free_buffer_irq(port->tx_buff_tab[n_desc]);
648 port->tx_buff_tab[n_desc] = NULL;
649 }
650
651 start = qmgr_stat_empty(port->plat->txreadyq);
652 queue_put_desc(port->plat->txreadyq, phys, desc);
653 if (start) {
654#if DEBUG_TX
655 printk(KERN_DEBUG "%s: eth_txdone_irq xmit ready\n",
656 port->netdev->name);
657#endif
658 netif_wake_queue(port->netdev);
659 }
660 }
661}
662
663static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
664{
665 struct port *port = netdev_priv(dev);
666 unsigned int txreadyq = port->plat->txreadyq;
667 int len, offset, bytes, n;
668 void *mem;
669 u32 phys;
670 struct desc *desc;
671
672#if DEBUG_TX
673 printk(KERN_DEBUG "%s: eth_xmit\n", dev->name);
674#endif
675
676 if (unlikely(skb->len > MAX_MRU)) {
677 dev_kfree_skb(skb);
678 port->stat.tx_errors++;
679 return NETDEV_TX_OK;
680 }
681
682 debug_pkt(dev, "eth_xmit", skb->data, skb->len);
683
684 len = skb->len;
685#ifdef __ARMEB__
686 offset = 0; /* no need to keep alignment */
687 bytes = len;
688 mem = skb->data;
689#else
690 offset = (int)skb->data & 3; /* keep 32-bit alignment */
691 bytes = ALIGN(offset + len, 4);
692 if (!(mem = kmalloc(bytes, GFP_ATOMIC))) {
693 dev_kfree_skb(skb);
694 port->stat.tx_dropped++;
695 return NETDEV_TX_OK;
696 }
697 memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4);
698 dev_kfree_skb(skb);
699#endif
700
701 phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE);
702 if (dma_mapping_error(phys)) {
703#ifdef __ARMEB__
704 dev_kfree_skb(skb);
705#else
706 kfree(mem);
707#endif
708 port->stat.tx_dropped++;
709 return NETDEV_TX_OK;
710 }
711
712 n = queue_get_desc(txreadyq, port, 1);
713 BUG_ON(n < 0);
714 desc = tx_desc_ptr(port, n);
715
716#ifdef __ARMEB__
717 port->tx_buff_tab[n] = skb;
718#else
719 port->tx_buff_tab[n] = mem;
720#endif
721 desc->data = phys + offset;
722 desc->buf_len = desc->pkt_len = len;
723
724 /* NPE firmware pads short frames with zeros internally */
725 wmb();
726 queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc);
727 dev->trans_start = jiffies;
728
729 if (qmgr_stat_empty(txreadyq)) {
730#if DEBUG_TX
731 printk(KERN_DEBUG "%s: eth_xmit queue full\n", dev->name);
732#endif
733 netif_stop_queue(dev);
734 /* we could miss TX ready interrupt */
735 if (!qmgr_stat_empty(txreadyq)) {
736#if DEBUG_TX
737 printk(KERN_DEBUG "%s: eth_xmit ready again\n",
738 dev->name);
739#endif
740 netif_wake_queue(dev);
741 }
742 }
743
744#if DEBUG_TX
745 printk(KERN_DEBUG "%s: eth_xmit end\n", dev->name);
746#endif
747 return NETDEV_TX_OK;
748}
749
750
751static struct net_device_stats *eth_stats(struct net_device *dev)
752{
753 struct port *port = netdev_priv(dev);
754 return &port->stat;
755}
756
757static void eth_set_mcast_list(struct net_device *dev)
758{
759 struct port *port = netdev_priv(dev);
760 struct dev_mc_list *mclist = dev->mc_list;
761 u8 diffs[ETH_ALEN], *addr;
762 int cnt = dev->mc_count, i;
763
764 if ((dev->flags & IFF_PROMISC) || !mclist || !cnt) {
765 __raw_writel(DEFAULT_RX_CNTRL0 & ~RX_CNTRL0_ADDR_FLTR_EN,
766 &port->regs->rx_control[0]);
767 return;
768 }
769
770 memset(diffs, 0, ETH_ALEN);
771 addr = mclist->dmi_addr; /* first MAC address */
772
773 while (--cnt && (mclist = mclist->next))
774 for (i = 0; i < ETH_ALEN; i++)
775 diffs[i] |= addr[i] ^ mclist->dmi_addr[i];
776
777 for (i = 0; i < ETH_ALEN; i++) {
778 __raw_writel(addr[i], &port->regs->mcast_addr[i]);
779 __raw_writel(~diffs[i], &port->regs->mcast_mask[i]);
780 }
781
782 __raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN,
783 &port->regs->rx_control[0]);
784}
785
786
787static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
788{
789 struct port *port = netdev_priv(dev);
790 unsigned int duplex_chg;
791 int err;
792
793 if (!netif_running(dev))
794 return -EINVAL;
795 err = generic_mii_ioctl(&port->mii, if_mii(req), cmd, &duplex_chg);
796 if (duplex_chg)
797 eth_set_duplex(port);
798 return err;
799}
800
801
802static int request_queues(struct port *port)
803{
804 int err;
805
806 err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0);
807 if (err)
808 return err;
809
810 err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0);
811 if (err)
812 goto rel_rxfree;
813
814 err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0);
815 if (err)
816 goto rel_rx;
817
818 err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0);
819 if (err)
820 goto rel_tx;
821
822 /* TX-done queue handles skbs sent out by the NPEs */
823 if (!ports_open) {
824 err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0);
825 if (err)
826 goto rel_txready;
827 }
828 return 0;
829
830rel_txready:
831 qmgr_release_queue(port->plat->txreadyq);
832rel_tx:
833 qmgr_release_queue(TX_QUEUE(port->id));
834rel_rx:
835 qmgr_release_queue(port->plat->rxq);
836rel_rxfree:
837 qmgr_release_queue(RXFREE_QUEUE(port->id));
838 printk(KERN_DEBUG "%s: unable to request hardware queues\n",
839 port->netdev->name);
840 return err;
841}
842
843static void release_queues(struct port *port)
844{
845 qmgr_release_queue(RXFREE_QUEUE(port->id));
846 qmgr_release_queue(port->plat->rxq);
847 qmgr_release_queue(TX_QUEUE(port->id));
848 qmgr_release_queue(port->plat->txreadyq);
849
850 if (!ports_open)
851 qmgr_release_queue(TXDONE_QUEUE);
852}
853
854static int init_queues(struct port *port)
855{
856 int i;
857
858 if (!ports_open)
859 if (!(dma_pool = dma_pool_create(DRV_NAME, NULL,
860 POOL_ALLOC_SIZE, 32, 0)))
861 return -ENOMEM;
862
863 if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
864 &port->desc_tab_phys)))
865 return -ENOMEM;
866 memset(port->desc_tab, 0, POOL_ALLOC_SIZE);
867 memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */
868 memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab));
869
870 /* Setup RX buffers */
871 for (i = 0; i < RX_DESCS; i++) {
872 struct desc *desc = rx_desc_ptr(port, i);
873 buffer_t *buff; /* skb or kmalloc()ated memory */
874 void *data;
875#ifdef __ARMEB__
876 if (!(buff = netdev_alloc_skb(port->netdev, RX_BUFF_SIZE)))
877 return -ENOMEM;
878 data = buff->data;
879#else
880 if (!(buff = kmalloc(RX_BUFF_SIZE, GFP_KERNEL)))
881 return -ENOMEM;
882 data = buff;
883#endif
884 desc->buf_len = MAX_MRU;
885 desc->data = dma_map_single(&port->netdev->dev, data,
886 RX_BUFF_SIZE, DMA_FROM_DEVICE);
887 if (dma_mapping_error(desc->data)) {
888 free_buffer(buff);
889 return -EIO;
890 }
891 desc->data += NET_IP_ALIGN;
892 port->rx_buff_tab[i] = buff;
893 }
894
895 return 0;
896}
897
898static void destroy_queues(struct port *port)
899{
900 int i;
901
902 if (port->desc_tab) {
903 for (i = 0; i < RX_DESCS; i++) {
904 struct desc *desc = rx_desc_ptr(port, i);
905 buffer_t *buff = port->rx_buff_tab[i];
906 if (buff) {
907 dma_unmap_single(&port->netdev->dev,
908 desc->data - NET_IP_ALIGN,
909 RX_BUFF_SIZE, DMA_FROM_DEVICE);
910 free_buffer(buff);
911 }
912 }
913 for (i = 0; i < TX_DESCS; i++) {
914 struct desc *desc = tx_desc_ptr(port, i);
915 buffer_t *buff = port->tx_buff_tab[i];
916 if (buff) {
917 dma_unmap_tx(port, desc);
918 free_buffer(buff);
919 }
920 }
921 dma_pool_free(dma_pool, port->desc_tab, port->desc_tab_phys);
922 port->desc_tab = NULL;
923 }
924
925 if (!ports_open && dma_pool) {
926 dma_pool_destroy(dma_pool);
927 dma_pool = NULL;
928 }
929}
930
931static int eth_open(struct net_device *dev)
932{
933 struct port *port = netdev_priv(dev);
934 struct npe *npe = port->npe;
935 struct msg msg;
936 int i, err;
937
938 if (!npe_running(npe)) {
939 err = npe_load_firmware(npe, npe_name(npe), &dev->dev);
940 if (err)
941 return err;
942
943 if (npe_recv_message(npe, &msg, "ETH_GET_STATUS")) {
944 printk(KERN_ERR "%s: %s not responding\n", dev->name,
945 npe_name(npe));
946 return -EIO;
947 }
948 }
949
950 mdio_write(dev, port->plat->phy, MII_BMCR, port->mii_bmcr);
951
952 memset(&msg, 0, sizeof(msg));
953 msg.cmd = NPE_VLAN_SETRXQOSENTRY;
954 msg.eth_id = port->id;
955 msg.byte5 = port->plat->rxq | 0x80;
956 msg.byte7 = port->plat->rxq << 4;
957 for (i = 0; i < 8; i++) {
958 msg.byte3 = i;
959 if (npe_send_recv_message(port->npe, &msg, "ETH_SET_RXQ"))
960 return -EIO;
961 }
962
963 msg.cmd = NPE_EDB_SETPORTADDRESS;
964 msg.eth_id = PHYSICAL_ID(port->id);
965 msg.byte2 = dev->dev_addr[0];
966 msg.byte3 = dev->dev_addr[1];
967 msg.byte4 = dev->dev_addr[2];
968 msg.byte5 = dev->dev_addr[3];
969 msg.byte6 = dev->dev_addr[4];
970 msg.byte7 = dev->dev_addr[5];
971 if (npe_send_recv_message(port->npe, &msg, "ETH_SET_MAC"))
972 return -EIO;
973
974 memset(&msg, 0, sizeof(msg));
975 msg.cmd = NPE_FW_SETFIREWALLMODE;
976 msg.eth_id = port->id;
977 if (npe_send_recv_message(port->npe, &msg, "ETH_SET_FIREWALL_MODE"))
978 return -EIO;
979
980 if ((err = request_queues(port)) != 0)
981 return err;
982
983 if ((err = init_queues(port)) != 0) {
984 destroy_queues(port);
985 release_queues(port);
986 return err;
987 }
988
989 for (i = 0; i < ETH_ALEN; i++)
990 __raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]);
991 __raw_writel(0x08, &port->regs->random_seed);
992 __raw_writel(0x12, &port->regs->partial_empty_threshold);
993 __raw_writel(0x30, &port->regs->partial_full_threshold);
994 __raw_writel(0x08, &port->regs->tx_start_bytes);
995 __raw_writel(0x15, &port->regs->tx_deferral);
996 __raw_writel(0x08, &port->regs->tx_2part_deferral[0]);
997 __raw_writel(0x07, &port->regs->tx_2part_deferral[1]);
998 __raw_writel(0x80, &port->regs->slot_time);
999 __raw_writel(0x01, &port->regs->int_clock_threshold);
1000
1001 /* Populate queues with buffers, no failure after this point */
1002 for (i = 0; i < TX_DESCS; i++)
1003 queue_put_desc(port->plat->txreadyq,
1004 tx_desc_phys(port, i), tx_desc_ptr(port, i));
1005
1006 for (i = 0; i < RX_DESCS; i++)
1007 queue_put_desc(RXFREE_QUEUE(port->id),
1008 rx_desc_phys(port, i), rx_desc_ptr(port, i));
1009
1010 __raw_writel(TX_CNTRL1_RETRIES, &port->regs->tx_control[1]);
1011 __raw_writel(DEFAULT_TX_CNTRL0, &port->regs->tx_control[0]);
1012 __raw_writel(0, &port->regs->rx_control[1]);
1013 __raw_writel(DEFAULT_RX_CNTRL0, &port->regs->rx_control[0]);
1014
1015 napi_enable(&port->napi);
1016 phy_check_media(port, 1);
1017 eth_set_mcast_list(dev);
1018 netif_start_queue(dev);
1019 schedule_delayed_work(&port->mdio_thread, MDIO_INTERVAL);
1020
1021 qmgr_set_irq(port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY,
1022 eth_rx_irq, dev);
1023 if (!ports_open) {
1024 qmgr_set_irq(TXDONE_QUEUE, QUEUE_IRQ_SRC_NOT_EMPTY,
1025 eth_txdone_irq, NULL);
1026 qmgr_enable_irq(TXDONE_QUEUE);
1027 }
1028 ports_open++;
1029 /* we may already have RX data, enables IRQ */
1030 netif_rx_schedule(dev, &port->napi);
1031 return 0;
1032}
1033
1034static int eth_close(struct net_device *dev)
1035{
1036 struct port *port = netdev_priv(dev);
1037 struct msg msg;
1038 int buffs = RX_DESCS; /* allocated RX buffers */
1039 int i;
1040
1041 ports_open--;
1042 qmgr_disable_irq(port->plat->rxq);
1043 napi_disable(&port->napi);
1044 netif_stop_queue(dev);
1045
1046 while (queue_get_desc(RXFREE_QUEUE(port->id), port, 0) >= 0)
1047 buffs--;
1048
1049 memset(&msg, 0, sizeof(msg));
1050 msg.cmd = NPE_SETLOOPBACK_MODE;
1051 msg.eth_id = port->id;
1052 msg.byte3 = 1;
1053 if (npe_send_recv_message(port->npe, &msg, "ETH_ENABLE_LOOPBACK"))
1054 printk(KERN_CRIT "%s: unable to enable loopback\n", dev->name);
1055
1056 i = 0;
1057 do { /* drain RX buffers */
1058 while (queue_get_desc(port->plat->rxq, port, 0) >= 0)
1059 buffs--;
1060 if (!buffs)
1061 break;
1062 if (qmgr_stat_empty(TX_QUEUE(port->id))) {
1063 /* we have to inject some packet */
1064 struct desc *desc;
1065 u32 phys;
1066 int n = queue_get_desc(port->plat->txreadyq, port, 1);
1067 BUG_ON(n < 0);
1068 desc = tx_desc_ptr(port, n);
1069 phys = tx_desc_phys(port, n);
1070 desc->buf_len = desc->pkt_len = 1;
1071 wmb();
1072 queue_put_desc(TX_QUEUE(port->id), phys, desc);
1073 }
1074 udelay(1);
1075 } while (++i < MAX_CLOSE_WAIT);
1076
1077 if (buffs)
1078 printk(KERN_CRIT "%s: unable to drain RX queue, %i buffer(s)"
1079 " left in NPE\n", dev->name, buffs);
1080#if DEBUG_CLOSE
1081 if (!buffs)
1082 printk(KERN_DEBUG "Draining RX queue took %i cycles\n", i);
1083#endif
1084
1085 buffs = TX_DESCS;
1086 while (queue_get_desc(TX_QUEUE(port->id), port, 1) >= 0)
1087 buffs--; /* cancel TX */
1088
1089 i = 0;
1090 do {
1091 while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0)
1092 buffs--;
1093 if (!buffs)
1094 break;
1095 } while (++i < MAX_CLOSE_WAIT);
1096
1097 if (buffs)
1098 printk(KERN_CRIT "%s: unable to drain TX queue, %i buffer(s) "
1099 "left in NPE\n", dev->name, buffs);
1100#if DEBUG_CLOSE
1101 if (!buffs)
1102 printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i);
1103#endif
1104
1105 msg.byte3 = 0;
1106 if (npe_send_recv_message(port->npe, &msg, "ETH_DISABLE_LOOPBACK"))
1107 printk(KERN_CRIT "%s: unable to disable loopback\n",
1108 dev->name);
1109
1110 port->mii_bmcr = mdio_read(dev, port->plat->phy, MII_BMCR) &
1111 ~(BMCR_RESET | BMCR_PDOWN); /* may have been altered */
1112 mdio_write(dev, port->plat->phy, MII_BMCR,
1113 port->mii_bmcr | BMCR_PDOWN);
1114
1115 if (!ports_open)
1116 qmgr_disable_irq(TXDONE_QUEUE);
1117 cancel_rearming_delayed_work(&port->mdio_thread);
1118 destroy_queues(port);
1119 release_queues(port);
1120 return 0;
1121}
1122
1123static int __devinit eth_init_one(struct platform_device *pdev)
1124{
1125 struct port *port;
1126 struct net_device *dev;
1127 struct eth_plat_info *plat = pdev->dev.platform_data;
1128 u32 regs_phys;
1129 int err;
1130
1131 if (!(dev = alloc_etherdev(sizeof(struct port))))
1132 return -ENOMEM;
1133
1134 SET_NETDEV_DEV(dev, &pdev->dev);
1135 port = netdev_priv(dev);
1136 port->netdev = dev;
1137 port->id = pdev->id;
1138
1139 switch (port->id) {
1140 case IXP4XX_ETH_NPEA:
1141 port->regs = (struct eth_regs __iomem *)IXP4XX_EthA_BASE_VIRT;
1142 regs_phys = IXP4XX_EthA_BASE_PHYS;
1143 break;
1144 case IXP4XX_ETH_NPEB:
1145 port->regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
1146 regs_phys = IXP4XX_EthB_BASE_PHYS;
1147 break;
1148 case IXP4XX_ETH_NPEC:
1149 port->regs = (struct eth_regs __iomem *)IXP4XX_EthC_BASE_VIRT;
1150 regs_phys = IXP4XX_EthC_BASE_PHYS;
1151 break;
1152 default:
1153 err = -ENOSYS;
1154 goto err_free;
1155 }
1156
1157 dev->open = eth_open;
1158 dev->hard_start_xmit = eth_xmit;
1159 dev->stop = eth_close;
1160 dev->get_stats = eth_stats;
1161 dev->do_ioctl = eth_ioctl;
1162 dev->set_multicast_list = eth_set_mcast_list;
1163 dev->tx_queue_len = 100;
1164
1165 netif_napi_add(dev, &port->napi, eth_poll, NAPI_WEIGHT);
1166
1167 if (!(port->npe = npe_request(NPE_ID(port->id)))) {
1168 err = -EIO;
1169 goto err_free;
1170 }
1171
1172 if (register_netdev(dev)) {
1173 err = -EIO;
1174 goto err_npe_rel;
1175 }
1176
1177 port->mem_res = request_mem_region(regs_phys, REGS_SIZE, dev->name);
1178 if (!port->mem_res) {
1179 err = -EBUSY;
1180 goto err_unreg;
1181 }
1182
1183 port->plat = plat;
1184 npe_port_tab[NPE_ID(port->id)] = port;
1185 memcpy(dev->dev_addr, plat->hwaddr, ETH_ALEN);
1186
1187 platform_set_drvdata(pdev, dev);
1188
1189 __raw_writel(DEFAULT_CORE_CNTRL | CORE_RESET,
1190 &port->regs->core_control);
1191 udelay(50);
1192 __raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control);
1193 udelay(50);
1194
1195 port->mii.dev = dev;
1196 port->mii.mdio_read = mdio_read;
1197 port->mii.mdio_write = mdio_write;
1198 port->mii.phy_id = plat->phy;
1199 port->mii.phy_id_mask = 0x1F;
1200 port->mii.reg_num_mask = 0x1F;
1201
1202 printk(KERN_INFO "%s: MII PHY %i on %s\n", dev->name, plat->phy,
1203 npe_name(port->npe));
1204
1205 phy_reset(dev, plat->phy);
1206 port->mii_bmcr = mdio_read(dev, plat->phy, MII_BMCR) &
1207 ~(BMCR_RESET | BMCR_PDOWN);
1208 mdio_write(dev, plat->phy, MII_BMCR, port->mii_bmcr | BMCR_PDOWN);
1209
1210 INIT_DELAYED_WORK(&port->mdio_thread, mdio_thread);
1211 return 0;
1212
1213err_unreg:
1214 unregister_netdev(dev);
1215err_npe_rel:
1216 npe_release(port->npe);
1217err_free:
1218 free_netdev(dev);
1219 return err;
1220}
1221
1222static int __devexit eth_remove_one(struct platform_device *pdev)
1223{
1224 struct net_device *dev = platform_get_drvdata(pdev);
1225 struct port *port = netdev_priv(dev);
1226
1227 unregister_netdev(dev);
1228 npe_port_tab[NPE_ID(port->id)] = NULL;
1229 platform_set_drvdata(pdev, NULL);
1230 npe_release(port->npe);
1231 release_resource(port->mem_res);
1232 free_netdev(dev);
1233 return 0;
1234}
1235
1236static struct platform_driver drv = {
1237 .driver.name = DRV_NAME,
1238 .probe = eth_init_one,
1239 .remove = eth_remove_one,
1240};
1241
1242static int __init eth_init_module(void)
1243{
1244 if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEB_ETH0))
1245 return -ENOSYS;
1246
1247 /* All MII PHY accesses use NPE-B Ethernet registers */
1248 spin_lock_init(&mdio_lock);
1249 mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
1250 __raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control);
1251
1252 return platform_driver_register(&drv);
1253}
1254
1255static void __exit eth_cleanup_module(void)
1256{
1257 platform_driver_unregister(&drv);
1258}
1259
1260MODULE_AUTHOR("Krzysztof Halasa");
1261MODULE_DESCRIPTION("Intel IXP4xx Ethernet driver");
1262MODULE_LICENSE("GPL v2");
1263MODULE_ALIAS("platform:ixp4xx_eth");
1264module_init(eth_init_module);
1265module_exit(eth_cleanup_module);
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 4fec8581bfd7..89c0018132ec 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -27,6 +27,7 @@
27#include <linux/phy.h> 27#include <linux/phy.h>
28#include <linux/netdevice.h> 28#include <linux/netdevice.h>
29#include <linux/etherdevice.h> 29#include <linux/etherdevice.h>
30#include <linux/ethtool.h>
30#include <linux/skbuff.h> 31#include <linux/skbuff.h>
31#include <linux/platform_device.h> 32#include <linux/platform_device.h>
32 33
@@ -42,7 +43,7 @@
42#define DRV_NAME "bfin_mac" 43#define DRV_NAME "bfin_mac"
43#define DRV_VERSION "1.1" 44#define DRV_VERSION "1.1"
44#define DRV_AUTHOR "Bryan Wu, Luke Yang" 45#define DRV_AUTHOR "Bryan Wu, Luke Yang"
45#define DRV_DESC "Blackfin BF53[67] BF527 on-chip Ethernet MAC driver" 46#define DRV_DESC "Blackfin on-chip Ethernet MAC driver"
46 47
47MODULE_AUTHOR(DRV_AUTHOR); 48MODULE_AUTHOR(DRV_AUTHOR);
48MODULE_LICENSE("GPL"); 49MODULE_LICENSE("GPL");
@@ -73,8 +74,14 @@ static struct net_dma_desc_tx *current_tx_ptr;
73static struct net_dma_desc_tx *tx_desc; 74static struct net_dma_desc_tx *tx_desc;
74static struct net_dma_desc_rx *rx_desc; 75static struct net_dma_desc_rx *rx_desc;
75 76
76static void bf537mac_disable(void); 77#if defined(CONFIG_BFIN_MAC_RMII)
77static void bf537mac_enable(void); 78static u16 pin_req[] = P_RMII0;
79#else
80static u16 pin_req[] = P_MII0;
81#endif
82
83static void bfin_mac_disable(void);
84static void bfin_mac_enable(void);
78 85
79static void desc_list_free(void) 86static void desc_list_free(void)
80{ 87{
@@ -243,27 +250,6 @@ init_error:
243 250
244/*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/ 251/*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/
245 252
246/* Set FER regs to MUX in Ethernet pins */
247static int setup_pin_mux(int action)
248{
249#if defined(CONFIG_BFIN_MAC_RMII)
250 u16 pin_req[] = P_RMII0;
251#else
252 u16 pin_req[] = P_MII0;
253#endif
254
255 if (action) {
256 if (peripheral_request_list(pin_req, DRV_NAME)) {
257 printk(KERN_ERR DRV_NAME
258 ": Requesting Peripherals failed\n");
259 return -EFAULT;
260 }
261 } else
262 peripheral_free_list(pin_req);
263
264 return 0;
265}
266
267/* 253/*
268 * MII operations 254 * MII operations
269 */ 255 */
@@ -322,9 +308,9 @@ static int mdiobus_reset(struct mii_bus *bus)
322 return 0; 308 return 0;
323} 309}
324 310
325static void bf537_adjust_link(struct net_device *dev) 311static void bfin_mac_adjust_link(struct net_device *dev)
326{ 312{
327 struct bf537mac_local *lp = netdev_priv(dev); 313 struct bfin_mac_local *lp = netdev_priv(dev);
328 struct phy_device *phydev = lp->phydev; 314 struct phy_device *phydev = lp->phydev;
329 unsigned long flags; 315 unsigned long flags;
330 int new_state = 0; 316 int new_state = 0;
@@ -395,7 +381,7 @@ static void bf537_adjust_link(struct net_device *dev)
395 381
396static int mii_probe(struct net_device *dev) 382static int mii_probe(struct net_device *dev)
397{ 383{
398 struct bf537mac_local *lp = netdev_priv(dev); 384 struct bfin_mac_local *lp = netdev_priv(dev);
399 struct phy_device *phydev = NULL; 385 struct phy_device *phydev = NULL;
400 unsigned short sysctl; 386 unsigned short sysctl;
401 int i; 387 int i;
@@ -431,10 +417,10 @@ static int mii_probe(struct net_device *dev)
431 } 417 }
432 418
433#if defined(CONFIG_BFIN_MAC_RMII) 419#if defined(CONFIG_BFIN_MAC_RMII)
434 phydev = phy_connect(dev, phydev->dev.bus_id, &bf537_adjust_link, 0, 420 phydev = phy_connect(dev, phydev->dev.bus_id, &bfin_mac_adjust_link, 0,
435 PHY_INTERFACE_MODE_RMII); 421 PHY_INTERFACE_MODE_RMII);
436#else 422#else
437 phydev = phy_connect(dev, phydev->dev.bus_id, &bf537_adjust_link, 0, 423 phydev = phy_connect(dev, phydev->dev.bus_id, &bfin_mac_adjust_link, 0,
438 PHY_INTERFACE_MODE_MII); 424 PHY_INTERFACE_MODE_MII);
439#endif 425#endif
440 426
@@ -469,6 +455,51 @@ static int mii_probe(struct net_device *dev)
469 return 0; 455 return 0;
470} 456}
471 457
458/*
459 * Ethtool support
460 */
461
462static int
463bfin_mac_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
464{
465 struct bfin_mac_local *lp = netdev_priv(dev);
466
467 if (lp->phydev)
468 return phy_ethtool_gset(lp->phydev, cmd);
469
470 return -EINVAL;
471}
472
473static int
474bfin_mac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
475{
476 struct bfin_mac_local *lp = netdev_priv(dev);
477
478 if (!capable(CAP_NET_ADMIN))
479 return -EPERM;
480
481 if (lp->phydev)
482 return phy_ethtool_sset(lp->phydev, cmd);
483
484 return -EINVAL;
485}
486
487static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev,
488 struct ethtool_drvinfo *info)
489{
490 strcpy(info->driver, DRV_NAME);
491 strcpy(info->version, DRV_VERSION);
492 strcpy(info->fw_version, "N/A");
493 strcpy(info->bus_info, dev->dev.bus_id);
494}
495
496static struct ethtool_ops bfin_mac_ethtool_ops = {
497 .get_settings = bfin_mac_ethtool_getsettings,
498 .set_settings = bfin_mac_ethtool_setsettings,
499 .get_link = ethtool_op_get_link,
500 .get_drvinfo = bfin_mac_ethtool_getdrvinfo,
501};
502
472/**************************************************************************/ 503/**************************************************************************/
473void setup_system_regs(struct net_device *dev) 504void setup_system_regs(struct net_device *dev)
474{ 505{
@@ -511,7 +542,7 @@ static void setup_mac_addr(u8 *mac_addr)
511 bfin_write_EMAC_ADDRHI(addr_hi); 542 bfin_write_EMAC_ADDRHI(addr_hi);
512} 543}
513 544
514static int bf537mac_set_mac_address(struct net_device *dev, void *p) 545static int bfin_mac_set_mac_address(struct net_device *dev, void *p)
515{ 546{
516 struct sockaddr *addr = p; 547 struct sockaddr *addr = p;
517 if (netif_running(dev)) 548 if (netif_running(dev))
@@ -573,7 +604,7 @@ adjust_head:
573 604
574} 605}
575 606
576static int bf537mac_hard_start_xmit(struct sk_buff *skb, 607static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
577 struct net_device *dev) 608 struct net_device *dev)
578{ 609{
579 unsigned int data; 610 unsigned int data;
@@ -631,7 +662,7 @@ out:
631 return 0; 662 return 0;
632} 663}
633 664
634static void bf537mac_rx(struct net_device *dev) 665static void bfin_mac_rx(struct net_device *dev)
635{ 666{
636 struct sk_buff *skb, *new_skb; 667 struct sk_buff *skb, *new_skb;
637 unsigned short len; 668 unsigned short len;
@@ -680,7 +711,7 @@ out:
680} 711}
681 712
682/* interrupt routine to handle rx and error signal */ 713/* interrupt routine to handle rx and error signal */
683static irqreturn_t bf537mac_interrupt(int irq, void *dev_id) 714static irqreturn_t bfin_mac_interrupt(int irq, void *dev_id)
684{ 715{
685 struct net_device *dev = dev_id; 716 struct net_device *dev = dev_id;
686 int number = 0; 717 int number = 0;
@@ -700,21 +731,21 @@ get_one_packet:
700 } 731 }
701 732
702real_rx: 733real_rx:
703 bf537mac_rx(dev); 734 bfin_mac_rx(dev);
704 number++; 735 number++;
705 goto get_one_packet; 736 goto get_one_packet;
706} 737}
707 738
708#ifdef CONFIG_NET_POLL_CONTROLLER 739#ifdef CONFIG_NET_POLL_CONTROLLER
709static void bf537mac_poll(struct net_device *dev) 740static void bfin_mac_poll(struct net_device *dev)
710{ 741{
711 disable_irq(IRQ_MAC_RX); 742 disable_irq(IRQ_MAC_RX);
712 bf537mac_interrupt(IRQ_MAC_RX, dev); 743 bfin_mac_interrupt(IRQ_MAC_RX, dev);
713 enable_irq(IRQ_MAC_RX); 744 enable_irq(IRQ_MAC_RX);
714} 745}
715#endif /* CONFIG_NET_POLL_CONTROLLER */ 746#endif /* CONFIG_NET_POLL_CONTROLLER */
716 747
717static void bf537mac_disable(void) 748static void bfin_mac_disable(void)
718{ 749{
719 unsigned int opmode; 750 unsigned int opmode;
720 751
@@ -728,7 +759,7 @@ static void bf537mac_disable(void)
728/* 759/*
729 * Enable Interrupts, Receive, and Transmit 760 * Enable Interrupts, Receive, and Transmit
730 */ 761 */
731static void bf537mac_enable(void) 762static void bfin_mac_enable(void)
732{ 763{
733 u32 opmode; 764 u32 opmode;
734 765
@@ -766,23 +797,23 @@ static void bf537mac_enable(void)
766} 797}
767 798
768/* Our watchdog timed out. Called by the networking layer */ 799/* Our watchdog timed out. Called by the networking layer */
769static void bf537mac_timeout(struct net_device *dev) 800static void bfin_mac_timeout(struct net_device *dev)
770{ 801{
771 pr_debug("%s: %s\n", dev->name, __FUNCTION__); 802 pr_debug("%s: %s\n", dev->name, __FUNCTION__);
772 803
773 bf537mac_disable(); 804 bfin_mac_disable();
774 805
775 /* reset tx queue */ 806 /* reset tx queue */
776 tx_list_tail = tx_list_head->next; 807 tx_list_tail = tx_list_head->next;
777 808
778 bf537mac_enable(); 809 bfin_mac_enable();
779 810
780 /* We can accept TX packets again */ 811 /* We can accept TX packets again */
781 dev->trans_start = jiffies; 812 dev->trans_start = jiffies;
782 netif_wake_queue(dev); 813 netif_wake_queue(dev);
783} 814}
784 815
785static void bf537mac_multicast_hash(struct net_device *dev) 816static void bfin_mac_multicast_hash(struct net_device *dev)
786{ 817{
787 u32 emac_hashhi, emac_hashlo; 818 u32 emac_hashhi, emac_hashlo;
788 struct dev_mc_list *dmi = dev->mc_list; 819 struct dev_mc_list *dmi = dev->mc_list;
@@ -821,7 +852,7 @@ static void bf537mac_multicast_hash(struct net_device *dev)
821 * promiscuous mode (for TCPDUMP and cousins) or accept 852 * promiscuous mode (for TCPDUMP and cousins) or accept
822 * a select set of multicast packets 853 * a select set of multicast packets
823 */ 854 */
824static void bf537mac_set_multicast_list(struct net_device *dev) 855static void bfin_mac_set_multicast_list(struct net_device *dev)
825{ 856{
826 u32 sysctl; 857 u32 sysctl;
827 858
@@ -840,7 +871,7 @@ static void bf537mac_set_multicast_list(struct net_device *dev)
840 sysctl = bfin_read_EMAC_OPMODE(); 871 sysctl = bfin_read_EMAC_OPMODE();
841 sysctl |= HM; 872 sysctl |= HM;
842 bfin_write_EMAC_OPMODE(sysctl); 873 bfin_write_EMAC_OPMODE(sysctl);
843 bf537mac_multicast_hash(dev); 874 bfin_mac_multicast_hash(dev);
844 } else { 875 } else {
845 /* clear promisc or multicast mode */ 876 /* clear promisc or multicast mode */
846 sysctl = bfin_read_EMAC_OPMODE(); 877 sysctl = bfin_read_EMAC_OPMODE();
@@ -852,7 +883,7 @@ static void bf537mac_set_multicast_list(struct net_device *dev)
852/* 883/*
853 * this puts the device in an inactive state 884 * this puts the device in an inactive state
854 */ 885 */
855static void bf537mac_shutdown(struct net_device *dev) 886static void bfin_mac_shutdown(struct net_device *dev)
856{ 887{
857 /* Turn off the EMAC */ 888 /* Turn off the EMAC */
858 bfin_write_EMAC_OPMODE(0x00000000); 889 bfin_write_EMAC_OPMODE(0x00000000);
@@ -866,9 +897,9 @@ static void bf537mac_shutdown(struct net_device *dev)
866 * 897 *
867 * Set up everything, reset the card, etc.. 898 * Set up everything, reset the card, etc..
868 */ 899 */
869static int bf537mac_open(struct net_device *dev) 900static int bfin_mac_open(struct net_device *dev)
870{ 901{
871 struct bf537mac_local *lp = netdev_priv(dev); 902 struct bfin_mac_local *lp = netdev_priv(dev);
872 int retval; 903 int retval;
873 pr_debug("%s: %s\n", dev->name, __FUNCTION__); 904 pr_debug("%s: %s\n", dev->name, __FUNCTION__);
874 905
@@ -891,8 +922,8 @@ static int bf537mac_open(struct net_device *dev)
891 phy_start(lp->phydev); 922 phy_start(lp->phydev);
892 phy_write(lp->phydev, MII_BMCR, BMCR_RESET); 923 phy_write(lp->phydev, MII_BMCR, BMCR_RESET);
893 setup_system_regs(dev); 924 setup_system_regs(dev);
894 bf537mac_disable(); 925 bfin_mac_disable();
895 bf537mac_enable(); 926 bfin_mac_enable();
896 pr_debug("hardware init finished\n"); 927 pr_debug("hardware init finished\n");
897 netif_start_queue(dev); 928 netif_start_queue(dev);
898 netif_carrier_on(dev); 929 netif_carrier_on(dev);
@@ -906,9 +937,9 @@ static int bf537mac_open(struct net_device *dev)
906 * and not talk to the outside world. Caused by 937 * and not talk to the outside world. Caused by
907 * an 'ifconfig ethX down' 938 * an 'ifconfig ethX down'
908 */ 939 */
909static int bf537mac_close(struct net_device *dev) 940static int bfin_mac_close(struct net_device *dev)
910{ 941{
911 struct bf537mac_local *lp = netdev_priv(dev); 942 struct bfin_mac_local *lp = netdev_priv(dev);
912 pr_debug("%s: %s\n", dev->name, __FUNCTION__); 943 pr_debug("%s: %s\n", dev->name, __FUNCTION__);
913 944
914 netif_stop_queue(dev); 945 netif_stop_queue(dev);
@@ -918,7 +949,7 @@ static int bf537mac_close(struct net_device *dev)
918 phy_write(lp->phydev, MII_BMCR, BMCR_PDOWN); 949 phy_write(lp->phydev, MII_BMCR, BMCR_PDOWN);
919 950
920 /* clear everything */ 951 /* clear everything */
921 bf537mac_shutdown(dev); 952 bfin_mac_shutdown(dev);
922 953
923 /* free the rx/tx buffers */ 954 /* free the rx/tx buffers */
924 desc_list_free(); 955 desc_list_free();
@@ -926,46 +957,59 @@ static int bf537mac_close(struct net_device *dev)
926 return 0; 957 return 0;
927} 958}
928 959
929static int __init bf537mac_probe(struct net_device *dev) 960static int __init bfin_mac_probe(struct platform_device *pdev)
930{ 961{
931 struct bf537mac_local *lp = netdev_priv(dev); 962 struct net_device *ndev;
932 int retval; 963 struct bfin_mac_local *lp;
933 int i; 964 int rc, i;
965
966 ndev = alloc_etherdev(sizeof(struct bfin_mac_local));
967 if (!ndev) {
968 dev_err(&pdev->dev, "Cannot allocate net device!\n");
969 return -ENOMEM;
970 }
971
972 SET_NETDEV_DEV(ndev, &pdev->dev);
973 platform_set_drvdata(pdev, ndev);
974 lp = netdev_priv(ndev);
934 975
935 /* Grab the MAC address in the MAC */ 976 /* Grab the MAC address in the MAC */
936 *(__le32 *) (&(dev->dev_addr[0])) = cpu_to_le32(bfin_read_EMAC_ADDRLO()); 977 *(__le32 *) (&(ndev->dev_addr[0])) = cpu_to_le32(bfin_read_EMAC_ADDRLO());
937 *(__le16 *) (&(dev->dev_addr[4])) = cpu_to_le16((u16) bfin_read_EMAC_ADDRHI()); 978 *(__le16 *) (&(ndev->dev_addr[4])) = cpu_to_le16((u16) bfin_read_EMAC_ADDRHI());
938 979
939 /* probe mac */ 980 /* probe mac */
940 /*todo: how to proble? which is revision_register */ 981 /*todo: how to proble? which is revision_register */
941 bfin_write_EMAC_ADDRLO(0x12345678); 982 bfin_write_EMAC_ADDRLO(0x12345678);
942 if (bfin_read_EMAC_ADDRLO() != 0x12345678) { 983 if (bfin_read_EMAC_ADDRLO() != 0x12345678) {
943 pr_debug("can't detect bf537 mac!\n"); 984 dev_err(&pdev->dev, "Cannot detect Blackfin on-chip ethernet MAC controller!\n");
944 retval = -ENODEV; 985 rc = -ENODEV;
945 goto err_out; 986 goto out_err_probe_mac;
946 } 987 }
947 988
948 /* set the GPIO pins to Ethernet mode */ 989 /* set the GPIO pins to Ethernet mode */
949 retval = setup_pin_mux(1); 990 rc = peripheral_request_list(pin_req, DRV_NAME);
950 if (retval) 991 if (rc) {
951 return retval; 992 dev_err(&pdev->dev, "Requesting peripherals failed!\n");
952 993 rc = -EFAULT;
953 /*Is it valid? (Did bootloader initialize it?) */ 994 goto out_err_setup_pin_mux;
954 if (!is_valid_ether_addr(dev->dev_addr)) {
955 /* Grab the MAC from the board somehow - this is done in the
956 arch/blackfin/mach-bf537/boards/eth_mac.c */
957 bfin_get_ether_addr(dev->dev_addr);
958 } 995 }
959 996
997 /*
998 * Is it valid? (Did bootloader initialize it?)
999 * Grab the MAC from the board somehow
1000 * this is done in the arch/blackfin/mach-bfxxx/boards/eth_mac.c
1001 */
1002 if (!is_valid_ether_addr(ndev->dev_addr))
1003 bfin_get_ether_addr(ndev->dev_addr);
1004
960 /* If still not valid, get a random one */ 1005 /* If still not valid, get a random one */
961 if (!is_valid_ether_addr(dev->dev_addr)) { 1006 if (!is_valid_ether_addr(ndev->dev_addr))
962 random_ether_addr(dev->dev_addr); 1007 random_ether_addr(ndev->dev_addr);
963 }
964 1008
965 setup_mac_addr(dev->dev_addr); 1009 setup_mac_addr(ndev->dev_addr);
966 1010
967 /* MDIO bus initial */ 1011 /* MDIO bus initial */
968 lp->mii_bus.priv = dev; 1012 lp->mii_bus.priv = ndev;
969 lp->mii_bus.read = mdiobus_read; 1013 lp->mii_bus.read = mdiobus_read;
970 lp->mii_bus.write = mdiobus_write; 1014 lp->mii_bus.write = mdiobus_write;
971 lp->mii_bus.reset = mdiobus_reset; 1015 lp->mii_bus.reset = mdiobus_reset;
@@ -975,86 +1019,86 @@ static int __init bf537mac_probe(struct net_device *dev)
975 for (i = 0; i < PHY_MAX_ADDR; ++i) 1019 for (i = 0; i < PHY_MAX_ADDR; ++i)
976 lp->mii_bus.irq[i] = PHY_POLL; 1020 lp->mii_bus.irq[i] = PHY_POLL;
977 1021
978 mdiobus_register(&lp->mii_bus); 1022 rc = mdiobus_register(&lp->mii_bus);
1023 if (rc) {
1024 dev_err(&pdev->dev, "Cannot register MDIO bus!\n");
1025 goto out_err_mdiobus_register;
1026 }
979 1027
980 retval = mii_probe(dev); 1028 rc = mii_probe(ndev);
981 if (retval) 1029 if (rc) {
982 return retval; 1030 dev_err(&pdev->dev, "MII Probe failed!\n");
1031 goto out_err_mii_probe;
1032 }
983 1033
984 /* Fill in the fields of the device structure with ethernet values. */ 1034 /* Fill in the fields of the device structure with ethernet values. */
985 ether_setup(dev); 1035 ether_setup(ndev);
986 1036
987 dev->open = bf537mac_open; 1037 ndev->open = bfin_mac_open;
988 dev->stop = bf537mac_close; 1038 ndev->stop = bfin_mac_close;
989 dev->hard_start_xmit = bf537mac_hard_start_xmit; 1039 ndev->hard_start_xmit = bfin_mac_hard_start_xmit;
990 dev->set_mac_address = bf537mac_set_mac_address; 1040 ndev->set_mac_address = bfin_mac_set_mac_address;
991 dev->tx_timeout = bf537mac_timeout; 1041 ndev->tx_timeout = bfin_mac_timeout;
992 dev->set_multicast_list = bf537mac_set_multicast_list; 1042 ndev->set_multicast_list = bfin_mac_set_multicast_list;
993#ifdef CONFIG_NET_POLL_CONTROLLER 1043#ifdef CONFIG_NET_POLL_CONTROLLER
994 dev->poll_controller = bf537mac_poll; 1044 ndev->poll_controller = bfin_mac_poll;
995#endif 1045#endif
1046 ndev->ethtool_ops = &bfin_mac_ethtool_ops;
996 1047
997 spin_lock_init(&lp->lock); 1048 spin_lock_init(&lp->lock);
998 1049
999 /* now, enable interrupts */ 1050 /* now, enable interrupts */
1000 /* register irq handler */ 1051 /* register irq handler */
1001 if (request_irq 1052 rc = request_irq(IRQ_MAC_RX, bfin_mac_interrupt,
1002 (IRQ_MAC_RX, bf537mac_interrupt, IRQF_DISABLED | IRQF_SHARED, 1053 IRQF_DISABLED | IRQF_SHARED, "EMAC_RX", ndev);
1003 "EMAC_RX", dev)) { 1054 if (rc) {
1004 printk(KERN_WARNING DRV_NAME 1055 dev_err(&pdev->dev, "Cannot request Blackfin MAC RX IRQ!\n");
1005 ": Unable to attach BlackFin MAC RX interrupt\n"); 1056 rc = -EBUSY;
1006 return -EBUSY; 1057 goto out_err_request_irq;
1007 } 1058 }
1008 1059
1009 1060 rc = register_netdev(ndev);
1010 retval = register_netdev(dev); 1061 if (rc) {
1011 if (retval == 0) { 1062 dev_err(&pdev->dev, "Cannot register net device!\n");
1012 /* now, print out the card info, in a short format.. */ 1063 goto out_err_reg_ndev;
1013 printk(KERN_INFO "%s: Version %s, %s\n",
1014 DRV_NAME, DRV_VERSION, DRV_DESC);
1015 }
1016
1017err_out:
1018 return retval;
1019}
1020
1021static int bfin_mac_probe(struct platform_device *pdev)
1022{
1023 struct net_device *ndev;
1024
1025 ndev = alloc_etherdev(sizeof(struct bf537mac_local));
1026 if (!ndev) {
1027 printk(KERN_WARNING DRV_NAME ": could not allocate device\n");
1028 return -ENOMEM;
1029 } 1064 }
1030 1065
1031 SET_NETDEV_DEV(ndev, &pdev->dev); 1066 /* now, print out the card info, in a short format.. */
1067 dev_info(&pdev->dev, "%s, Version %s\n", DRV_DESC, DRV_VERSION);
1032 1068
1033 platform_set_drvdata(pdev, ndev); 1069 return 0;
1034 1070
1035 if (bf537mac_probe(ndev) != 0) { 1071out_err_reg_ndev:
1036 platform_set_drvdata(pdev, NULL); 1072 free_irq(IRQ_MAC_RX, ndev);
1037 free_netdev(ndev); 1073out_err_request_irq:
1038 printk(KERN_WARNING DRV_NAME ": not found\n"); 1074out_err_mii_probe:
1039 return -ENODEV; 1075 mdiobus_unregister(&lp->mii_bus);
1040 } 1076out_err_mdiobus_register:
1077 peripheral_free_list(pin_req);
1078out_err_setup_pin_mux:
1079out_err_probe_mac:
1080 platform_set_drvdata(pdev, NULL);
1081 free_netdev(ndev);
1041 1082
1042 return 0; 1083 return rc;
1043} 1084}
1044 1085
1045static int bfin_mac_remove(struct platform_device *pdev) 1086static int bfin_mac_remove(struct platform_device *pdev)
1046{ 1087{
1047 struct net_device *ndev = platform_get_drvdata(pdev); 1088 struct net_device *ndev = platform_get_drvdata(pdev);
1089 struct bfin_mac_local *lp = netdev_priv(ndev);
1048 1090
1049 platform_set_drvdata(pdev, NULL); 1091 platform_set_drvdata(pdev, NULL);
1050 1092
1093 mdiobus_unregister(&lp->mii_bus);
1094
1051 unregister_netdev(ndev); 1095 unregister_netdev(ndev);
1052 1096
1053 free_irq(IRQ_MAC_RX, ndev); 1097 free_irq(IRQ_MAC_RX, ndev);
1054 1098
1055 free_netdev(ndev); 1099 free_netdev(ndev);
1056 1100
1057 setup_pin_mux(0); 1101 peripheral_free_list(pin_req);
1058 1102
1059 return 0; 1103 return 0;
1060} 1104}
@@ -1065,7 +1109,7 @@ static int bfin_mac_suspend(struct platform_device *pdev, pm_message_t mesg)
1065 struct net_device *net_dev = platform_get_drvdata(pdev); 1109 struct net_device *net_dev = platform_get_drvdata(pdev);
1066 1110
1067 if (netif_running(net_dev)) 1111 if (netif_running(net_dev))
1068 bf537mac_close(net_dev); 1112 bfin_mac_close(net_dev);
1069 1113
1070 return 0; 1114 return 0;
1071} 1115}
@@ -1075,7 +1119,7 @@ static int bfin_mac_resume(struct platform_device *pdev)
1075 struct net_device *net_dev = platform_get_drvdata(pdev); 1119 struct net_device *net_dev = platform_get_drvdata(pdev);
1076 1120
1077 if (netif_running(net_dev)) 1121 if (netif_running(net_dev))
1078 bf537mac_open(net_dev); 1122 bfin_mac_open(net_dev);
1079 1123
1080 return 0; 1124 return 0;
1081} 1125}
diff --git a/drivers/net/bfin_mac.h b/drivers/net/bfin_mac.h
index f774d5a36942..beff51064ff4 100644
--- a/drivers/net/bfin_mac.h
+++ b/drivers/net/bfin_mac.h
@@ -49,7 +49,7 @@ struct net_dma_desc_tx {
49 struct status_area_tx status; 49 struct status_area_tx status;
50}; 50};
51 51
52struct bf537mac_local { 52struct bfin_mac_local {
53 /* 53 /*
54 * these are things that the kernel wants me to keep, so users 54 * these are things that the kernel wants me to keep, so users
55 * can find out semi-useless statistics of how well the card is 55 * can find out semi-useless statistics of how well the card is
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 83bda6ccde98..56f50491a453 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -633,7 +633,7 @@ static void __init printEEPROMInfo(struct net_device *dev)
633 printk(KERN_DEBUG " PC: %d\n", GetBit(Word,ee_PC)); 633 printk(KERN_DEBUG " PC: %d\n", GetBit(Word,ee_PC));
634 printk(KERN_DEBUG " TPE/AUI: %d\n", GetBit(Word,ee_TPE_AUI)); 634 printk(KERN_DEBUG " TPE/AUI: %d\n", GetBit(Word,ee_TPE_AUI));
635 printk(KERN_DEBUG " Jabber: %d\n", GetBit(Word,ee_Jabber)); 635 printk(KERN_DEBUG " Jabber: %d\n", GetBit(Word,ee_Jabber));
636 printk(KERN_DEBUG " AutoPort: %d\n", GetBit(!Word,ee_Jabber)); 636 printk(KERN_DEBUG " AutoPort: %d\n", !GetBit(Word,ee_AutoPort));
637 printk(KERN_DEBUG " Duplex: %d\n", GetBit(Word,ee_Duplex)); 637 printk(KERN_DEBUG " Duplex: %d\n", GetBit(Word,ee_Duplex));
638 } 638 }
639 639
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 99a4b990939f..587afe7be689 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -131,8 +131,6 @@ static void free_skb_resources(struct gfar_private *priv);
131static void gfar_set_multi(struct net_device *dev); 131static void gfar_set_multi(struct net_device *dev);
132static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); 132static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
133static void gfar_configure_serdes(struct net_device *dev); 133static void gfar_configure_serdes(struct net_device *dev);
134extern int gfar_local_mdio_write(struct gfar_mii __iomem *regs, int mii_id, int regnum, u16 value);
135extern int gfar_local_mdio_read(struct gfar_mii __iomem *regs, int mii_id, int regnum);
136#ifdef CONFIG_GFAR_NAPI 134#ifdef CONFIG_GFAR_NAPI
137static int gfar_poll(struct napi_struct *napi, int budget); 135static int gfar_poll(struct napi_struct *napi, int budget);
138#endif 136#endif
@@ -477,24 +475,30 @@ static int init_phy(struct net_device *dev)
477 return 0; 475 return 0;
478} 476}
479 477
478/*
479 * Initialize TBI PHY interface for communicating with the
480 * SERDES lynx PHY on the chip. We communicate with this PHY
481 * through the MDIO bus on each controller, treating it as a
482 * "normal" PHY at the address found in the TBIPA register. We assume
483 * that the TBIPA register is valid. Either the MDIO bus code will set
484 * it to a value that doesn't conflict with other PHYs on the bus, or the
485 * value doesn't matter, as there are no other PHYs on the bus.
486 */
480static void gfar_configure_serdes(struct net_device *dev) 487static void gfar_configure_serdes(struct net_device *dev)
481{ 488{
482 struct gfar_private *priv = netdev_priv(dev); 489 struct gfar_private *priv = netdev_priv(dev);
483 struct gfar_mii __iomem *regs = 490 struct gfar_mii __iomem *regs =
484 (void __iomem *)&priv->regs->gfar_mii_regs; 491 (void __iomem *)&priv->regs->gfar_mii_regs;
492 int tbipa = gfar_read(&priv->regs->tbipa);
485 493
486 /* Initialise TBI i/f to communicate with serdes (lynx phy) */ 494 /* Single clk mode, mii mode off(for serdes communication) */
495 gfar_local_mdio_write(regs, tbipa, MII_TBICON, TBICON_CLK_SELECT);
487 496
488 /* Single clk mode, mii mode off(for aerdes communication) */ 497 gfar_local_mdio_write(regs, tbipa, MII_ADVERTISE,
489 gfar_local_mdio_write(regs, TBIPA_VALUE, MII_TBICON, TBICON_CLK_SELECT);
490
491 /* Supported pause and full-duplex, no half-duplex */
492 gfar_local_mdio_write(regs, TBIPA_VALUE, MII_ADVERTISE,
493 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | 498 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
494 ADVERTISE_1000XPSE_ASYM); 499 ADVERTISE_1000XPSE_ASYM);
495 500
496 /* ANEG enable, restart ANEG, full duplex mode, speed[1] set */ 501 gfar_local_mdio_write(regs, tbipa, MII_BMCR, BMCR_ANENABLE |
497 gfar_local_mdio_write(regs, TBIPA_VALUE, MII_BMCR, BMCR_ANENABLE |
498 BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000); 502 BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
499} 503}
500 504
@@ -541,9 +545,6 @@ static void init_registers(struct net_device *dev)
541 545
542 /* Initialize the Minimum Frame Length Register */ 546 /* Initialize the Minimum Frame Length Register */
543 gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS); 547 gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS);
544
545 /* Assign the TBI an address which won't conflict with the PHYs */
546 gfar_write(&priv->regs->tbipa, TBIPA_VALUE);
547} 548}
548 549
549 550
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 0d0883609469..fd487be3993e 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -137,7 +137,6 @@ extern const char gfar_driver_version[];
137#define DEFAULT_RXCOUNT 0 137#define DEFAULT_RXCOUNT 0
138#endif /* CONFIG_GFAR_NAPI */ 138#endif /* CONFIG_GFAR_NAPI */
139 139
140#define TBIPA_VALUE 0x1f
141#define MIIMCFG_INIT_VALUE 0x00000007 140#define MIIMCFG_INIT_VALUE 0x00000007
142#define MIIMCFG_RESET 0x80000000 141#define MIIMCFG_RESET 0x80000000
143#define MIIMIND_BUSY 0x00000001 142#define MIIMIND_BUSY 0x00000001
diff --git a/drivers/net/gianfar_mii.c b/drivers/net/gianfar_mii.c
index b8898927236a..ebcfb27a904e 100644
--- a/drivers/net/gianfar_mii.c
+++ b/drivers/net/gianfar_mii.c
@@ -78,7 +78,6 @@ int gfar_local_mdio_write(struct gfar_mii __iomem *regs, int mii_id,
78 * same as system mdio bus, used for controlling the external PHYs, for eg. 78 * same as system mdio bus, used for controlling the external PHYs, for eg.
79 */ 79 */
80int gfar_local_mdio_read(struct gfar_mii __iomem *regs, int mii_id, int regnum) 80int gfar_local_mdio_read(struct gfar_mii __iomem *regs, int mii_id, int regnum)
81
82{ 81{
83 u16 value; 82 u16 value;
84 83
@@ -122,7 +121,7 @@ int gfar_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
122} 121}
123 122
124/* Reset the MIIM registers, and wait for the bus to free */ 123/* Reset the MIIM registers, and wait for the bus to free */
125int gfar_mdio_reset(struct mii_bus *bus) 124static int gfar_mdio_reset(struct mii_bus *bus)
126{ 125{
127 struct gfar_mii __iomem *regs = (void __iomem *)bus->priv; 126 struct gfar_mii __iomem *regs = (void __iomem *)bus->priv;
128 unsigned int timeout = PHY_INIT_TIMEOUT; 127 unsigned int timeout = PHY_INIT_TIMEOUT;
@@ -152,14 +151,15 @@ int gfar_mdio_reset(struct mii_bus *bus)
152} 151}
153 152
154 153
155int gfar_mdio_probe(struct device *dev) 154static int gfar_mdio_probe(struct device *dev)
156{ 155{
157 struct platform_device *pdev = to_platform_device(dev); 156 struct platform_device *pdev = to_platform_device(dev);
158 struct gianfar_mdio_data *pdata; 157 struct gianfar_mdio_data *pdata;
159 struct gfar_mii __iomem *regs; 158 struct gfar_mii __iomem *regs;
159 struct gfar __iomem *enet_regs;
160 struct mii_bus *new_bus; 160 struct mii_bus *new_bus;
161 struct resource *r; 161 struct resource *r;
162 int err = 0; 162 int i, err = 0;
163 163
164 if (NULL == dev) 164 if (NULL == dev)
165 return -EINVAL; 165 return -EINVAL;
@@ -199,6 +199,34 @@ int gfar_mdio_probe(struct device *dev)
199 new_bus->dev = dev; 199 new_bus->dev = dev;
200 dev_set_drvdata(dev, new_bus); 200 dev_set_drvdata(dev, new_bus);
201 201
202 /*
203 * This is mildly evil, but so is our hardware for doing this.
204 * Also, we have to cast back to struct gfar_mii because of
205 * definition weirdness done in gianfar.h.
206 */
207 enet_regs = (struct gfar __iomem *)
208 ((char *)regs - offsetof(struct gfar, gfar_mii_regs));
209
210 /* Scan the bus, looking for an empty spot for TBIPA */
211 gfar_write(&enet_regs->tbipa, 0);
212 for (i = PHY_MAX_ADDR; i > 0; i--) {
213 u32 phy_id;
214 int r;
215
216 r = get_phy_id(new_bus, i, &phy_id);
217 if (r)
218 return r;
219
220 if (phy_id == 0xffffffff)
221 break;
222 }
223
224 /* The bus is full. We don't support using 31 PHYs, sorry */
225 if (i == 0)
226 return -EBUSY;
227
228 gfar_write(&enet_regs->tbipa, i);
229
202 err = mdiobus_register(new_bus); 230 err = mdiobus_register(new_bus);
203 231
204 if (0 != err) { 232 if (0 != err) {
@@ -218,7 +246,7 @@ reg_map_fail:
218} 246}
219 247
220 248
221int gfar_mdio_remove(struct device *dev) 249static int gfar_mdio_remove(struct device *dev)
222{ 250{
223 struct mii_bus *bus = dev_get_drvdata(dev); 251 struct mii_bus *bus = dev_get_drvdata(dev);
224 252
diff --git a/drivers/net/gianfar_mii.h b/drivers/net/gianfar_mii.h
index b373091c7031..2af28b16a0e2 100644
--- a/drivers/net/gianfar_mii.h
+++ b/drivers/net/gianfar_mii.h
@@ -41,6 +41,9 @@ struct gfar_mii {
41 41
42int gfar_mdio_read(struct mii_bus *bus, int mii_id, int regnum); 42int gfar_mdio_read(struct mii_bus *bus, int mii_id, int regnum);
43int gfar_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value); 43int gfar_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
44int gfar_local_mdio_write(struct gfar_mii __iomem *regs, int mii_id,
45 int regnum, u16 value);
46int gfar_local_mdio_read(struct gfar_mii __iomem *regs, int mii_id, int regnum);
44int __init gfar_mdio_init(void); 47int __init gfar_mdio_init(void);
45void gfar_mdio_exit(void); 48void gfar_mdio_exit(void);
46#endif /* GIANFAR_PHY_H */ 49#endif /* GIANFAR_PHY_H */
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 3ac8529bb92c..6bf9e76b0a00 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -48,7 +48,7 @@ config VITESSE_PHY
48config SMSC_PHY 48config SMSC_PHY
49 tristate "Drivers for SMSC PHYs" 49 tristate "Drivers for SMSC PHYs"
50 ---help--- 50 ---help---
51 Currently supports the LAN83C185 PHY 51 Currently supports the LAN83C185, LAN8187 and LAN8700 PHYs
52 52
53config BROADCOM_PHY 53config BROADCOM_PHY
54 tristate "Drivers for Broadcom PHYs" 54 tristate "Drivers for Broadcom PHYs"
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index ddf8d51832a6..ac3c01d28fdf 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -256,7 +256,7 @@ void phy_prepare_link(struct phy_device *phydev,
256/** 256/**
257 * phy_connect - connect an ethernet device to a PHY device 257 * phy_connect - connect an ethernet device to a PHY device
258 * @dev: the network device to connect 258 * @dev: the network device to connect
259 * @phy_id: the PHY device to connect 259 * @bus_id: the id string of the PHY device to connect
260 * @handler: callback function for state change notifications 260 * @handler: callback function for state change notifications
261 * @flags: PHY device's dev_flags 261 * @flags: PHY device's dev_flags
262 * @interface: PHY device's interface 262 * @interface: PHY device's interface
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index b1d8ed40ad98..73baa7a3bb0e 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -12,6 +12,8 @@
12 * Free Software Foundation; either version 2 of the License, or (at your 12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version. 13 * option) any later version.
14 * 14 *
15 * Support added for SMSC LAN8187 and LAN8700 by steve.glendinning@smsc.com
16 *
15 */ 17 */
16 18
17#include <linux/kernel.h> 19#include <linux/kernel.h>
@@ -38,7 +40,7 @@
38 (MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4) 40 (MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4)
39 41
40 42
41static int lan83c185_config_intr(struct phy_device *phydev) 43static int smsc_phy_config_intr(struct phy_device *phydev)
42{ 44{
43 int rc = phy_write (phydev, MII_LAN83C185_IM, 45 int rc = phy_write (phydev, MII_LAN83C185_IM,
44 ((PHY_INTERRUPT_ENABLED == phydev->interrupts) 46 ((PHY_INTERRUPT_ENABLED == phydev->interrupts)
@@ -48,16 +50,16 @@ static int lan83c185_config_intr(struct phy_device *phydev)
48 return rc < 0 ? rc : 0; 50 return rc < 0 ? rc : 0;
49} 51}
50 52
51static int lan83c185_ack_interrupt(struct phy_device *phydev) 53static int smsc_phy_ack_interrupt(struct phy_device *phydev)
52{ 54{
53 int rc = phy_read (phydev, MII_LAN83C185_ISF); 55 int rc = phy_read (phydev, MII_LAN83C185_ISF);
54 56
55 return rc < 0 ? rc : 0; 57 return rc < 0 ? rc : 0;
56} 58}
57 59
58static int lan83c185_config_init(struct phy_device *phydev) 60static int smsc_phy_config_init(struct phy_device *phydev)
59{ 61{
60 return lan83c185_ack_interrupt (phydev); 62 return smsc_phy_ack_interrupt (phydev);
61} 63}
62 64
63 65
@@ -73,22 +75,87 @@ static struct phy_driver lan83c185_driver = {
73 /* basic functions */ 75 /* basic functions */
74 .config_aneg = genphy_config_aneg, 76 .config_aneg = genphy_config_aneg,
75 .read_status = genphy_read_status, 77 .read_status = genphy_read_status,
76 .config_init = lan83c185_config_init, 78 .config_init = smsc_phy_config_init,
77 79
78 /* IRQ related */ 80 /* IRQ related */
79 .ack_interrupt = lan83c185_ack_interrupt, 81 .ack_interrupt = smsc_phy_ack_interrupt,
80 .config_intr = lan83c185_config_intr, 82 .config_intr = smsc_phy_config_intr,
83
84 .driver = { .owner = THIS_MODULE, }
85};
86
87static struct phy_driver lan8187_driver = {
88 .phy_id = 0x0007c0b0, /* OUI=0x00800f, Model#=0x0b */
89 .phy_id_mask = 0xfffffff0,
90 .name = "SMSC LAN8187",
91
92 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
93 | SUPPORTED_Asym_Pause),
94 .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
95
96 /* basic functions */
97 .config_aneg = genphy_config_aneg,
98 .read_status = genphy_read_status,
99 .config_init = smsc_phy_config_init,
100
101 /* IRQ related */
102 .ack_interrupt = smsc_phy_ack_interrupt,
103 .config_intr = smsc_phy_config_intr,
104
105 .driver = { .owner = THIS_MODULE, }
106};
107
108static struct phy_driver lan8700_driver = {
109 .phy_id = 0x0007c0c0, /* OUI=0x00800f, Model#=0x0c */
110 .phy_id_mask = 0xfffffff0,
111 .name = "SMSC LAN8700",
112
113 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
114 | SUPPORTED_Asym_Pause),
115 .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
116
117 /* basic functions */
118 .config_aneg = genphy_config_aneg,
119 .read_status = genphy_read_status,
120 .config_init = smsc_phy_config_init,
121
122 /* IRQ related */
123 .ack_interrupt = smsc_phy_ack_interrupt,
124 .config_intr = smsc_phy_config_intr,
81 125
82 .driver = { .owner = THIS_MODULE, } 126 .driver = { .owner = THIS_MODULE, }
83}; 127};
84 128
85static int __init smsc_init(void) 129static int __init smsc_init(void)
86{ 130{
87 return phy_driver_register (&lan83c185_driver); 131 int ret;
132
133 ret = phy_driver_register (&lan83c185_driver);
134 if (ret)
135 goto err1;
136
137 ret = phy_driver_register (&lan8187_driver);
138 if (ret)
139 goto err2;
140
141 ret = phy_driver_register (&lan8700_driver);
142 if (ret)
143 goto err3;
144
145 return 0;
146
147err3:
148 phy_driver_unregister (&lan8187_driver);
149err2:
150 phy_driver_unregister (&lan83c185_driver);
151err1:
152 return ret;
88} 153}
89 154
90static void __exit smsc_exit(void) 155static void __exit smsc_exit(void)
91{ 156{
157 phy_driver_unregister (&lan8700_driver);
158 phy_driver_unregister (&lan8187_driver);
92 phy_driver_unregister (&lan83c185_driver); 159 phy_driver_unregister (&lan83c185_driver);
93} 160}
94 161
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 3acfeeabdee1..657242504621 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -1617,6 +1617,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1617 SET_NETDEV_DEV(dev, &pdev->dev); 1617 SET_NETDEV_DEV(dev, &pdev->dev);
1618 tp = netdev_priv(dev); 1618 tp = netdev_priv(dev);
1619 tp->dev = dev; 1619 tp->dev = dev;
1620 tp->pci_dev = pdev;
1620 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT); 1621 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
1621 1622
1622 /* enable device (incl. PCI PM wakeup and hotplug setup) */ 1623 /* enable device (incl. PCI PM wakeup and hotplug setup) */
@@ -1705,18 +1706,18 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1705 1706
1706 rtl8169_print_mac_version(tp); 1707 rtl8169_print_mac_version(tp);
1707 1708
1708 for (i = ARRAY_SIZE(rtl_chip_info) - 1; i >= 0; i--) { 1709 for (i = 0; i < ARRAY_SIZE(rtl_chip_info); i++) {
1709 if (tp->mac_version == rtl_chip_info[i].mac_version) 1710 if (tp->mac_version == rtl_chip_info[i].mac_version)
1710 break; 1711 break;
1711 } 1712 }
1712 if (i < 0) { 1713 if (i == ARRAY_SIZE(rtl_chip_info)) {
1713 /* Unknown chip: assume array element #0, original RTL-8169 */ 1714 /* Unknown chip: assume array element #0, original RTL-8169 */
1714 if (netif_msg_probe(tp)) { 1715 if (netif_msg_probe(tp)) {
1715 dev_printk(KERN_DEBUG, &pdev->dev, 1716 dev_printk(KERN_DEBUG, &pdev->dev,
1716 "unknown chip version, assuming %s\n", 1717 "unknown chip version, assuming %s\n",
1717 rtl_chip_info[0].name); 1718 rtl_chip_info[0].name);
1718 } 1719 }
1719 i++; 1720 i = 0;
1720 } 1721 }
1721 tp->chipset = i; 1722 tp->chipset = i;
1722 1723
@@ -1777,7 +1778,6 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1777#endif 1778#endif
1778 1779
1779 tp->intr_mask = 0xffff; 1780 tp->intr_mask = 0xffff;
1780 tp->pci_dev = pdev;
1781 tp->mmio_addr = ioaddr; 1781 tp->mmio_addr = ioaddr;
1782 tp->align = cfg->align; 1782 tp->align = cfg->align;
1783 tp->hw_start = cfg->hw_start; 1783 tp->hw_start = cfg->hw_start;
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 157fd932e951..523478ebfd69 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -86,7 +86,7 @@
86#include "s2io.h" 86#include "s2io.h"
87#include "s2io-regs.h" 87#include "s2io-regs.h"
88 88
89#define DRV_VERSION "2.0.26.22" 89#define DRV_VERSION "2.0.26.23"
90 90
91/* S2io Driver name & version. */ 91/* S2io Driver name & version. */
92static char s2io_driver_name[] = "Neterion"; 92static char s2io_driver_name[] = "Neterion";
@@ -809,6 +809,7 @@ static int init_shared_mem(struct s2io_nic *nic)
809 config->rx_cfg[i].num_rxd - 1; 809 config->rx_cfg[i].num_rxd - 1;
810 mac_control->rings[i].nic = nic; 810 mac_control->rings[i].nic = nic;
811 mac_control->rings[i].ring_no = i; 811 mac_control->rings[i].ring_no = i;
812 mac_control->rings[i].lro = lro_enable;
812 813
813 blk_cnt = config->rx_cfg[i].num_rxd / 814 blk_cnt = config->rx_cfg[i].num_rxd /
814 (rxd_count[nic->rxd_mode] + 1); 815 (rxd_count[nic->rxd_mode] + 1);
@@ -1560,113 +1561,112 @@ static int init_nic(struct s2io_nic *nic)
1560 writeq(val64, &bar0->tx_fifo_partition_0); 1561 writeq(val64, &bar0->tx_fifo_partition_0);
1561 1562
1562 /* Filling the Rx round robin registers as per the 1563 /* Filling the Rx round robin registers as per the
1563 * number of Rings and steering based on QoS. 1564 * number of Rings and steering based on QoS with
1564 */ 1565 * equal priority.
1566 */
1565 switch (config->rx_ring_num) { 1567 switch (config->rx_ring_num) {
1566 case 1: 1568 case 1:
1569 val64 = 0x0;
1570 writeq(val64, &bar0->rx_w_round_robin_0);
1571 writeq(val64, &bar0->rx_w_round_robin_1);
1572 writeq(val64, &bar0->rx_w_round_robin_2);
1573 writeq(val64, &bar0->rx_w_round_robin_3);
1574 writeq(val64, &bar0->rx_w_round_robin_4);
1575
1567 val64 = 0x8080808080808080ULL; 1576 val64 = 0x8080808080808080ULL;
1568 writeq(val64, &bar0->rts_qos_steering); 1577 writeq(val64, &bar0->rts_qos_steering);
1569 break; 1578 break;
1570 case 2: 1579 case 2:
1571 val64 = 0x0000010000010000ULL; 1580 val64 = 0x0001000100010001ULL;
1572 writeq(val64, &bar0->rx_w_round_robin_0); 1581 writeq(val64, &bar0->rx_w_round_robin_0);
1573 val64 = 0x0100000100000100ULL;
1574 writeq(val64, &bar0->rx_w_round_robin_1); 1582 writeq(val64, &bar0->rx_w_round_robin_1);
1575 val64 = 0x0001000001000001ULL;
1576 writeq(val64, &bar0->rx_w_round_robin_2); 1583 writeq(val64, &bar0->rx_w_round_robin_2);
1577 val64 = 0x0000010000010000ULL;
1578 writeq(val64, &bar0->rx_w_round_robin_3); 1584 writeq(val64, &bar0->rx_w_round_robin_3);
1579 val64 = 0x0100000000000000ULL; 1585 val64 = 0x0001000100000000ULL;
1580 writeq(val64, &bar0->rx_w_round_robin_4); 1586 writeq(val64, &bar0->rx_w_round_robin_4);
1581 1587
1582 val64 = 0x8080808040404040ULL; 1588 val64 = 0x8080808040404040ULL;
1583 writeq(val64, &bar0->rts_qos_steering); 1589 writeq(val64, &bar0->rts_qos_steering);
1584 break; 1590 break;
1585 case 3: 1591 case 3:
1586 val64 = 0x0001000102000001ULL; 1592 val64 = 0x0001020001020001ULL;
1587 writeq(val64, &bar0->rx_w_round_robin_0); 1593 writeq(val64, &bar0->rx_w_round_robin_0);
1588 val64 = 0x0001020000010001ULL; 1594 val64 = 0x0200010200010200ULL;
1589 writeq(val64, &bar0->rx_w_round_robin_1); 1595 writeq(val64, &bar0->rx_w_round_robin_1);
1590 val64 = 0x0200000100010200ULL; 1596 val64 = 0x0102000102000102ULL;
1591 writeq(val64, &bar0->rx_w_round_robin_2); 1597 writeq(val64, &bar0->rx_w_round_robin_2);
1592 val64 = 0x0001000102000001ULL; 1598 val64 = 0x0001020001020001ULL;
1593 writeq(val64, &bar0->rx_w_round_robin_3); 1599 writeq(val64, &bar0->rx_w_round_robin_3);
1594 val64 = 0x0001020000000000ULL; 1600 val64 = 0x0200010200000000ULL;
1595 writeq(val64, &bar0->rx_w_round_robin_4); 1601 writeq(val64, &bar0->rx_w_round_robin_4);
1596 1602
1597 val64 = 0x8080804040402020ULL; 1603 val64 = 0x8080804040402020ULL;
1598 writeq(val64, &bar0->rts_qos_steering); 1604 writeq(val64, &bar0->rts_qos_steering);
1599 break; 1605 break;
1600 case 4: 1606 case 4:
1601 val64 = 0x0001020300010200ULL; 1607 val64 = 0x0001020300010203ULL;
1602 writeq(val64, &bar0->rx_w_round_robin_0); 1608 writeq(val64, &bar0->rx_w_round_robin_0);
1603 val64 = 0x0100000102030001ULL;
1604 writeq(val64, &bar0->rx_w_round_robin_1); 1609 writeq(val64, &bar0->rx_w_round_robin_1);
1605 val64 = 0x0200010000010203ULL;
1606 writeq(val64, &bar0->rx_w_round_robin_2); 1610 writeq(val64, &bar0->rx_w_round_robin_2);
1607 val64 = 0x0001020001000001ULL;
1608 writeq(val64, &bar0->rx_w_round_robin_3); 1611 writeq(val64, &bar0->rx_w_round_robin_3);
1609 val64 = 0x0203000100000000ULL; 1612 val64 = 0x0001020300000000ULL;
1610 writeq(val64, &bar0->rx_w_round_robin_4); 1613 writeq(val64, &bar0->rx_w_round_robin_4);
1611 1614
1612 val64 = 0x8080404020201010ULL; 1615 val64 = 0x8080404020201010ULL;
1613 writeq(val64, &bar0->rts_qos_steering); 1616 writeq(val64, &bar0->rts_qos_steering);
1614 break; 1617 break;
1615 case 5: 1618 case 5:
1616 val64 = 0x0001000203000102ULL; 1619 val64 = 0x0001020304000102ULL;
1617 writeq(val64, &bar0->rx_w_round_robin_0); 1620 writeq(val64, &bar0->rx_w_round_robin_0);
1618 val64 = 0x0001020001030004ULL; 1621 val64 = 0x0304000102030400ULL;
1619 writeq(val64, &bar0->rx_w_round_robin_1); 1622 writeq(val64, &bar0->rx_w_round_robin_1);
1620 val64 = 0x0001000203000102ULL; 1623 val64 = 0x0102030400010203ULL;
1621 writeq(val64, &bar0->rx_w_round_robin_2); 1624 writeq(val64, &bar0->rx_w_round_robin_2);
1622 val64 = 0x0001020001030004ULL; 1625 val64 = 0x0400010203040001ULL;
1623 writeq(val64, &bar0->rx_w_round_robin_3); 1626 writeq(val64, &bar0->rx_w_round_robin_3);
1624 val64 = 0x0001000000000000ULL; 1627 val64 = 0x0203040000000000ULL;
1625 writeq(val64, &bar0->rx_w_round_robin_4); 1628 writeq(val64, &bar0->rx_w_round_robin_4);
1626 1629
1627 val64 = 0x8080404020201008ULL; 1630 val64 = 0x8080404020201008ULL;
1628 writeq(val64, &bar0->rts_qos_steering); 1631 writeq(val64, &bar0->rts_qos_steering);
1629 break; 1632 break;
1630 case 6: 1633 case 6:
1631 val64 = 0x0001020304000102ULL; 1634 val64 = 0x0001020304050001ULL;
1632 writeq(val64, &bar0->rx_w_round_robin_0); 1635 writeq(val64, &bar0->rx_w_round_robin_0);
1633 val64 = 0x0304050001020001ULL; 1636 val64 = 0x0203040500010203ULL;
1634 writeq(val64, &bar0->rx_w_round_robin_1); 1637 writeq(val64, &bar0->rx_w_round_robin_1);
1635 val64 = 0x0203000100000102ULL; 1638 val64 = 0x0405000102030405ULL;
1636 writeq(val64, &bar0->rx_w_round_robin_2); 1639 writeq(val64, &bar0->rx_w_round_robin_2);
1637 val64 = 0x0304000102030405ULL; 1640 val64 = 0x0001020304050001ULL;
1638 writeq(val64, &bar0->rx_w_round_robin_3); 1641 writeq(val64, &bar0->rx_w_round_robin_3);
1639 val64 = 0x0001000200000000ULL; 1642 val64 = 0x0203040500000000ULL;
1640 writeq(val64, &bar0->rx_w_round_robin_4); 1643 writeq(val64, &bar0->rx_w_round_robin_4);
1641 1644
1642 val64 = 0x8080404020100804ULL; 1645 val64 = 0x8080404020100804ULL;
1643 writeq(val64, &bar0->rts_qos_steering); 1646 writeq(val64, &bar0->rts_qos_steering);
1644 break; 1647 break;
1645 case 7: 1648 case 7:
1646 val64 = 0x0001020001020300ULL; 1649 val64 = 0x0001020304050600ULL;
1647 writeq(val64, &bar0->rx_w_round_robin_0); 1650 writeq(val64, &bar0->rx_w_round_robin_0);
1648 val64 = 0x0102030400010203ULL; 1651 val64 = 0x0102030405060001ULL;
1649 writeq(val64, &bar0->rx_w_round_robin_1); 1652 writeq(val64, &bar0->rx_w_round_robin_1);
1650 val64 = 0x0405060001020001ULL; 1653 val64 = 0x0203040506000102ULL;
1651 writeq(val64, &bar0->rx_w_round_robin_2); 1654 writeq(val64, &bar0->rx_w_round_robin_2);
1652 val64 = 0x0304050000010200ULL; 1655 val64 = 0x0304050600010203ULL;
1653 writeq(val64, &bar0->rx_w_round_robin_3); 1656 writeq(val64, &bar0->rx_w_round_robin_3);
1654 val64 = 0x0102030000000000ULL; 1657 val64 = 0x0405060000000000ULL;
1655 writeq(val64, &bar0->rx_w_round_robin_4); 1658 writeq(val64, &bar0->rx_w_round_robin_4);
1656 1659
1657 val64 = 0x8080402010080402ULL; 1660 val64 = 0x8080402010080402ULL;
1658 writeq(val64, &bar0->rts_qos_steering); 1661 writeq(val64, &bar0->rts_qos_steering);
1659 break; 1662 break;
1660 case 8: 1663 case 8:
1661 val64 = 0x0001020300040105ULL; 1664 val64 = 0x0001020304050607ULL;
1662 writeq(val64, &bar0->rx_w_round_robin_0); 1665 writeq(val64, &bar0->rx_w_round_robin_0);
1663 val64 = 0x0200030106000204ULL;
1664 writeq(val64, &bar0->rx_w_round_robin_1); 1666 writeq(val64, &bar0->rx_w_round_robin_1);
1665 val64 = 0x0103000502010007ULL;
1666 writeq(val64, &bar0->rx_w_round_robin_2); 1667 writeq(val64, &bar0->rx_w_round_robin_2);
1667 val64 = 0x0304010002060500ULL;
1668 writeq(val64, &bar0->rx_w_round_robin_3); 1668 writeq(val64, &bar0->rx_w_round_robin_3);
1669 val64 = 0x0103020400000000ULL; 1669 val64 = 0x0001020300000000ULL;
1670 writeq(val64, &bar0->rx_w_round_robin_4); 1670 writeq(val64, &bar0->rx_w_round_robin_4);
1671 1671
1672 val64 = 0x8040201008040201ULL; 1672 val64 = 0x8040201008040201ULL;
@@ -2499,8 +2499,7 @@ static void stop_nic(struct s2io_nic *nic)
2499 2499
2500/** 2500/**
2501 * fill_rx_buffers - Allocates the Rx side skbs 2501 * fill_rx_buffers - Allocates the Rx side skbs
2502 * @nic: device private variable 2502 * @ring_info: per ring structure
2503 * @ring_no: ring number
2504 * Description: 2503 * Description:
2505 * The function allocates Rx side skbs and puts the physical 2504 * The function allocates Rx side skbs and puts the physical
2506 * address of these buffers into the RxD buffer pointers, so that the NIC 2505 * address of these buffers into the RxD buffer pointers, so that the NIC
@@ -2518,103 +2517,94 @@ static void stop_nic(struct s2io_nic *nic)
2518 * SUCCESS on success or an appropriate -ve value on failure. 2517 * SUCCESS on success or an appropriate -ve value on failure.
2519 */ 2518 */
2520 2519
2521static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) 2520static int fill_rx_buffers(struct ring_info *ring)
2522{ 2521{
2523 struct net_device *dev = nic->dev;
2524 struct sk_buff *skb; 2522 struct sk_buff *skb;
2525 struct RxD_t *rxdp; 2523 struct RxD_t *rxdp;
2526 int off, off1, size, block_no, block_no1; 2524 int off, size, block_no, block_no1;
2527 u32 alloc_tab = 0; 2525 u32 alloc_tab = 0;
2528 u32 alloc_cnt; 2526 u32 alloc_cnt;
2529 struct mac_info *mac_control;
2530 struct config_param *config;
2531 u64 tmp; 2527 u64 tmp;
2532 struct buffAdd *ba; 2528 struct buffAdd *ba;
2533 struct RxD_t *first_rxdp = NULL; 2529 struct RxD_t *first_rxdp = NULL;
2534 u64 Buffer0_ptr = 0, Buffer1_ptr = 0; 2530 u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2531 int rxd_index = 0;
2535 struct RxD1 *rxdp1; 2532 struct RxD1 *rxdp1;
2536 struct RxD3 *rxdp3; 2533 struct RxD3 *rxdp3;
2537 struct swStat *stats = &nic->mac_control.stats_info->sw_stat; 2534 struct swStat *stats = &ring->nic->mac_control.stats_info->sw_stat;
2538 2535
2539 mac_control = &nic->mac_control; 2536 alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
2540 config = &nic->config;
2541 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2542 atomic_read(&nic->rx_bufs_left[ring_no]);
2543 2537
2544 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index; 2538 block_no1 = ring->rx_curr_get_info.block_index;
2545 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2546 while (alloc_tab < alloc_cnt) { 2539 while (alloc_tab < alloc_cnt) {
2547 block_no = mac_control->rings[ring_no].rx_curr_put_info. 2540 block_no = ring->rx_curr_put_info.block_index;
2548 block_index;
2549 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2550 2541
2551 rxdp = mac_control->rings[ring_no]. 2542 off = ring->rx_curr_put_info.offset;
2552 rx_blocks[block_no].rxds[off].virt_addr; 2543
2544 rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2545
2546 rxd_index = off + 1;
2547 if (block_no)
2548 rxd_index += (block_no * ring->rxd_count);
2553 2549
2554 if ((block_no == block_no1) && (off == off1) && 2550 if ((block_no == block_no1) &&
2555 (rxdp->Host_Control)) { 2551 (off == ring->rx_curr_get_info.offset) &&
2552 (rxdp->Host_Control)) {
2556 DBG_PRINT(INTR_DBG, "%s: Get and Put", 2553 DBG_PRINT(INTR_DBG, "%s: Get and Put",
2557 dev->name); 2554 ring->dev->name);
2558 DBG_PRINT(INTR_DBG, " info equated\n"); 2555 DBG_PRINT(INTR_DBG, " info equated\n");
2559 goto end; 2556 goto end;
2560 } 2557 }
2561 if (off && (off == rxd_count[nic->rxd_mode])) { 2558 if (off && (off == ring->rxd_count)) {
2562 mac_control->rings[ring_no].rx_curr_put_info. 2559 ring->rx_curr_put_info.block_index++;
2563 block_index++; 2560 if (ring->rx_curr_put_info.block_index ==
2564 if (mac_control->rings[ring_no].rx_curr_put_info. 2561 ring->block_count)
2565 block_index == mac_control->rings[ring_no]. 2562 ring->rx_curr_put_info.block_index = 0;
2566 block_count) 2563 block_no = ring->rx_curr_put_info.block_index;
2567 mac_control->rings[ring_no].rx_curr_put_info. 2564 off = 0;
2568 block_index = 0; 2565 ring->rx_curr_put_info.offset = off;
2569 block_no = mac_control->rings[ring_no]. 2566 rxdp = ring->rx_blocks[block_no].block_virt_addr;
2570 rx_curr_put_info.block_index;
2571 if (off == rxd_count[nic->rxd_mode])
2572 off = 0;
2573 mac_control->rings[ring_no].rx_curr_put_info.
2574 offset = off;
2575 rxdp = mac_control->rings[ring_no].
2576 rx_blocks[block_no].block_virt_addr;
2577 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n", 2567 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2578 dev->name, rxdp); 2568 ring->dev->name, rxdp);
2569
2579 } 2570 }
2580 2571
2581 if ((rxdp->Control_1 & RXD_OWN_XENA) && 2572 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2582 ((nic->rxd_mode == RXD_MODE_3B) && 2573 ((ring->rxd_mode == RXD_MODE_3B) &&
2583 (rxdp->Control_2 & s2BIT(0)))) { 2574 (rxdp->Control_2 & s2BIT(0)))) {
2584 mac_control->rings[ring_no].rx_curr_put_info. 2575 ring->rx_curr_put_info.offset = off;
2585 offset = off;
2586 goto end; 2576 goto end;
2587 } 2577 }
2588 /* calculate size of skb based on ring mode */ 2578 /* calculate size of skb based on ring mode */
2589 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE + 2579 size = ring->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2590 HEADER_802_2_SIZE + HEADER_SNAP_SIZE; 2580 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2591 if (nic->rxd_mode == RXD_MODE_1) 2581 if (ring->rxd_mode == RXD_MODE_1)
2592 size += NET_IP_ALIGN; 2582 size += NET_IP_ALIGN;
2593 else 2583 else
2594 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4; 2584 size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2595 2585
2596 /* allocate skb */ 2586 /* allocate skb */
2597 skb = dev_alloc_skb(size); 2587 skb = dev_alloc_skb(size);
2598 if(!skb) { 2588 if(!skb) {
2599 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name); 2589 DBG_PRINT(INFO_DBG, "%s: Out of ", ring->dev->name);
2600 DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n"); 2590 DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
2601 if (first_rxdp) { 2591 if (first_rxdp) {
2602 wmb(); 2592 wmb();
2603 first_rxdp->Control_1 |= RXD_OWN_XENA; 2593 first_rxdp->Control_1 |= RXD_OWN_XENA;
2604 } 2594 }
2605 nic->mac_control.stats_info->sw_stat. \ 2595 stats->mem_alloc_fail_cnt++;
2606 mem_alloc_fail_cnt++; 2596
2607 return -ENOMEM ; 2597 return -ENOMEM ;
2608 } 2598 }
2609 nic->mac_control.stats_info->sw_stat.mem_allocated 2599 stats->mem_allocated += skb->truesize;
2610 += skb->truesize; 2600
2611 if (nic->rxd_mode == RXD_MODE_1) { 2601 if (ring->rxd_mode == RXD_MODE_1) {
2612 /* 1 buffer mode - normal operation mode */ 2602 /* 1 buffer mode - normal operation mode */
2613 rxdp1 = (struct RxD1*)rxdp; 2603 rxdp1 = (struct RxD1*)rxdp;
2614 memset(rxdp, 0, sizeof(struct RxD1)); 2604 memset(rxdp, 0, sizeof(struct RxD1));
2615 skb_reserve(skb, NET_IP_ALIGN); 2605 skb_reserve(skb, NET_IP_ALIGN);
2616 rxdp1->Buffer0_ptr = pci_map_single 2606 rxdp1->Buffer0_ptr = pci_map_single
2617 (nic->pdev, skb->data, size - NET_IP_ALIGN, 2607 (ring->pdev, skb->data, size - NET_IP_ALIGN,
2618 PCI_DMA_FROMDEVICE); 2608 PCI_DMA_FROMDEVICE);
2619 if( (rxdp1->Buffer0_ptr == 0) || 2609 if( (rxdp1->Buffer0_ptr == 0) ||
2620 (rxdp1->Buffer0_ptr == 2610 (rxdp1->Buffer0_ptr ==
@@ -2623,8 +2613,8 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2623 2613
2624 rxdp->Control_2 = 2614 rxdp->Control_2 =
2625 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN); 2615 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2626 2616 rxdp->Host_Control = (unsigned long) (skb);
2627 } else if (nic->rxd_mode == RXD_MODE_3B) { 2617 } else if (ring->rxd_mode == RXD_MODE_3B) {
2628 /* 2618 /*
2629 * 2 buffer mode - 2619 * 2 buffer mode -
2630 * 2 buffer mode provides 128 2620 * 2 buffer mode provides 128
@@ -2640,7 +2630,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2640 rxdp3->Buffer0_ptr = Buffer0_ptr; 2630 rxdp3->Buffer0_ptr = Buffer0_ptr;
2641 rxdp3->Buffer1_ptr = Buffer1_ptr; 2631 rxdp3->Buffer1_ptr = Buffer1_ptr;
2642 2632
2643 ba = &mac_control->rings[ring_no].ba[block_no][off]; 2633 ba = &ring->ba[block_no][off];
2644 skb_reserve(skb, BUF0_LEN); 2634 skb_reserve(skb, BUF0_LEN);
2645 tmp = (u64)(unsigned long) skb->data; 2635 tmp = (u64)(unsigned long) skb->data;
2646 tmp += ALIGN_SIZE; 2636 tmp += ALIGN_SIZE;
@@ -2650,10 +2640,10 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2650 2640
2651 if (!(rxdp3->Buffer0_ptr)) 2641 if (!(rxdp3->Buffer0_ptr))
2652 rxdp3->Buffer0_ptr = 2642 rxdp3->Buffer0_ptr =
2653 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN, 2643 pci_map_single(ring->pdev, ba->ba_0,
2654 PCI_DMA_FROMDEVICE); 2644 BUF0_LEN, PCI_DMA_FROMDEVICE);
2655 else 2645 else
2656 pci_dma_sync_single_for_device(nic->pdev, 2646 pci_dma_sync_single_for_device(ring->pdev,
2657 (dma_addr_t) rxdp3->Buffer0_ptr, 2647 (dma_addr_t) rxdp3->Buffer0_ptr,
2658 BUF0_LEN, PCI_DMA_FROMDEVICE); 2648 BUF0_LEN, PCI_DMA_FROMDEVICE);
2659 if( (rxdp3->Buffer0_ptr == 0) || 2649 if( (rxdp3->Buffer0_ptr == 0) ||
@@ -2661,7 +2651,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2661 goto pci_map_failed; 2651 goto pci_map_failed;
2662 2652
2663 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); 2653 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2664 if (nic->rxd_mode == RXD_MODE_3B) { 2654 if (ring->rxd_mode == RXD_MODE_3B) {
2665 /* Two buffer mode */ 2655 /* Two buffer mode */
2666 2656
2667 /* 2657 /*
@@ -2669,39 +2659,42 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2669 * L4 payload 2659 * L4 payload
2670 */ 2660 */
2671 rxdp3->Buffer2_ptr = pci_map_single 2661 rxdp3->Buffer2_ptr = pci_map_single
2672 (nic->pdev, skb->data, dev->mtu + 4, 2662 (ring->pdev, skb->data, ring->mtu + 4,
2673 PCI_DMA_FROMDEVICE); 2663 PCI_DMA_FROMDEVICE);
2674 2664
2675 if( (rxdp3->Buffer2_ptr == 0) || 2665 if( (rxdp3->Buffer2_ptr == 0) ||
2676 (rxdp3->Buffer2_ptr == DMA_ERROR_CODE)) 2666 (rxdp3->Buffer2_ptr == DMA_ERROR_CODE))
2677 goto pci_map_failed; 2667 goto pci_map_failed;
2678 2668
2679 rxdp3->Buffer1_ptr = 2669 if (!rxdp3->Buffer1_ptr)
2680 pci_map_single(nic->pdev, 2670 rxdp3->Buffer1_ptr =
2671 pci_map_single(ring->pdev,
2681 ba->ba_1, BUF1_LEN, 2672 ba->ba_1, BUF1_LEN,
2682 PCI_DMA_FROMDEVICE); 2673 PCI_DMA_FROMDEVICE);
2674
2683 if( (rxdp3->Buffer1_ptr == 0) || 2675 if( (rxdp3->Buffer1_ptr == 0) ||
2684 (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) { 2676 (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
2685 pci_unmap_single 2677 pci_unmap_single
2686 (nic->pdev, 2678 (ring->pdev,
2687 (dma_addr_t)rxdp3->Buffer2_ptr, 2679 (dma_addr_t)(unsigned long)
2688 dev->mtu + 4, 2680 skb->data,
2681 ring->mtu + 4,
2689 PCI_DMA_FROMDEVICE); 2682 PCI_DMA_FROMDEVICE);
2690 goto pci_map_failed; 2683 goto pci_map_failed;
2691 } 2684 }
2692 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); 2685 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2693 rxdp->Control_2 |= SET_BUFFER2_SIZE_3 2686 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2694 (dev->mtu + 4); 2687 (ring->mtu + 4);
2695 } 2688 }
2696 rxdp->Control_2 |= s2BIT(0); 2689 rxdp->Control_2 |= s2BIT(0);
2690 rxdp->Host_Control = (unsigned long) (skb);
2697 } 2691 }
2698 rxdp->Host_Control = (unsigned long) (skb);
2699 if (alloc_tab & ((1 << rxsync_frequency) - 1)) 2692 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2700 rxdp->Control_1 |= RXD_OWN_XENA; 2693 rxdp->Control_1 |= RXD_OWN_XENA;
2701 off++; 2694 off++;
2702 if (off == (rxd_count[nic->rxd_mode] + 1)) 2695 if (off == (ring->rxd_count + 1))
2703 off = 0; 2696 off = 0;
2704 mac_control->rings[ring_no].rx_curr_put_info.offset = off; 2697 ring->rx_curr_put_info.offset = off;
2705 2698
2706 rxdp->Control_2 |= SET_RXD_MARKER; 2699 rxdp->Control_2 |= SET_RXD_MARKER;
2707 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) { 2700 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
@@ -2711,7 +2704,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2711 } 2704 }
2712 first_rxdp = rxdp; 2705 first_rxdp = rxdp;
2713 } 2706 }
2714 atomic_inc(&nic->rx_bufs_left[ring_no]); 2707 ring->rx_bufs_left += 1;
2715 alloc_tab++; 2708 alloc_tab++;
2716 } 2709 }
2717 2710
@@ -2783,7 +2776,7 @@ static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2783 } 2776 }
2784 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize; 2777 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2785 dev_kfree_skb(skb); 2778 dev_kfree_skb(skb);
2786 atomic_dec(&sp->rx_bufs_left[ring_no]); 2779 mac_control->rings[ring_no].rx_bufs_left -= 1;
2787 } 2780 }
2788} 2781}
2789 2782
@@ -2814,7 +2807,7 @@ static void free_rx_buffers(struct s2io_nic *sp)
2814 mac_control->rings[i].rx_curr_get_info.block_index = 0; 2807 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2815 mac_control->rings[i].rx_curr_put_info.offset = 0; 2808 mac_control->rings[i].rx_curr_put_info.offset = 0;
2816 mac_control->rings[i].rx_curr_get_info.offset = 0; 2809 mac_control->rings[i].rx_curr_get_info.offset = 0;
2817 atomic_set(&sp->rx_bufs_left[i], 0); 2810 mac_control->rings[i].rx_bufs_left = 0;
2818 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n", 2811 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2819 dev->name, buf_cnt, i); 2812 dev->name, buf_cnt, i);
2820 } 2813 }
@@ -2864,7 +2857,7 @@ static int s2io_poll(struct napi_struct *napi, int budget)
2864 netif_rx_complete(dev, napi); 2857 netif_rx_complete(dev, napi);
2865 2858
2866 for (i = 0; i < config->rx_ring_num; i++) { 2859 for (i = 0; i < config->rx_ring_num; i++) {
2867 if (fill_rx_buffers(nic, i) == -ENOMEM) { 2860 if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) {
2868 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); 2861 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2869 DBG_PRINT(INFO_DBG, " in Rx Poll!!\n"); 2862 DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2870 break; 2863 break;
@@ -2877,7 +2870,7 @@ static int s2io_poll(struct napi_struct *napi, int budget)
2877 2870
2878no_rx: 2871no_rx:
2879 for (i = 0; i < config->rx_ring_num; i++) { 2872 for (i = 0; i < config->rx_ring_num; i++) {
2880 if (fill_rx_buffers(nic, i) == -ENOMEM) { 2873 if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) {
2881 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); 2874 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2882 DBG_PRINT(INFO_DBG, " in Rx Poll!!\n"); 2875 DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2883 break; 2876 break;
@@ -2928,7 +2921,7 @@ static void s2io_netpoll(struct net_device *dev)
2928 rx_intr_handler(&mac_control->rings[i]); 2921 rx_intr_handler(&mac_control->rings[i]);
2929 2922
2930 for (i = 0; i < config->rx_ring_num; i++) { 2923 for (i = 0; i < config->rx_ring_num; i++) {
2931 if (fill_rx_buffers(nic, i) == -ENOMEM) { 2924 if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) {
2932 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); 2925 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2933 DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n"); 2926 DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
2934 break; 2927 break;
@@ -2953,8 +2946,6 @@ static void s2io_netpoll(struct net_device *dev)
2953 */ 2946 */
2954static void rx_intr_handler(struct ring_info *ring_data) 2947static void rx_intr_handler(struct ring_info *ring_data)
2955{ 2948{
2956 struct s2io_nic *nic = ring_data->nic;
2957 struct net_device *dev = (struct net_device *) nic->dev;
2958 int get_block, put_block; 2949 int get_block, put_block;
2959 struct rx_curr_get_info get_info, put_info; 2950 struct rx_curr_get_info get_info, put_info;
2960 struct RxD_t *rxdp; 2951 struct RxD_t *rxdp;
@@ -2977,33 +2968,34 @@ static void rx_intr_handler(struct ring_info *ring_data)
2977 */ 2968 */
2978 if ((get_block == put_block) && 2969 if ((get_block == put_block) &&
2979 (get_info.offset + 1) == put_info.offset) { 2970 (get_info.offset + 1) == put_info.offset) {
2980 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name); 2971 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
2972 ring_data->dev->name);
2981 break; 2973 break;
2982 } 2974 }
2983 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control); 2975 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2984 if (skb == NULL) { 2976 if (skb == NULL) {
2985 DBG_PRINT(ERR_DBG, "%s: The skb is ", 2977 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2986 dev->name); 2978 ring_data->dev->name);
2987 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n"); 2979 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2988 return; 2980 return;
2989 } 2981 }
2990 if (nic->rxd_mode == RXD_MODE_1) { 2982 if (ring_data->rxd_mode == RXD_MODE_1) {
2991 rxdp1 = (struct RxD1*)rxdp; 2983 rxdp1 = (struct RxD1*)rxdp;
2992 pci_unmap_single(nic->pdev, (dma_addr_t) 2984 pci_unmap_single(ring_data->pdev, (dma_addr_t)
2993 rxdp1->Buffer0_ptr, 2985 rxdp1->Buffer0_ptr,
2994 dev->mtu + 2986 ring_data->mtu +
2995 HEADER_ETHERNET_II_802_3_SIZE + 2987 HEADER_ETHERNET_II_802_3_SIZE +
2996 HEADER_802_2_SIZE + 2988 HEADER_802_2_SIZE +
2997 HEADER_SNAP_SIZE, 2989 HEADER_SNAP_SIZE,
2998 PCI_DMA_FROMDEVICE); 2990 PCI_DMA_FROMDEVICE);
2999 } else if (nic->rxd_mode == RXD_MODE_3B) { 2991 } else if (ring_data->rxd_mode == RXD_MODE_3B) {
3000 rxdp3 = (struct RxD3*)rxdp; 2992 rxdp3 = (struct RxD3*)rxdp;
3001 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t) 2993 pci_dma_sync_single_for_cpu(ring_data->pdev, (dma_addr_t)
3002 rxdp3->Buffer0_ptr, 2994 rxdp3->Buffer0_ptr,
3003 BUF0_LEN, PCI_DMA_FROMDEVICE); 2995 BUF0_LEN, PCI_DMA_FROMDEVICE);
3004 pci_unmap_single(nic->pdev, (dma_addr_t) 2996 pci_unmap_single(ring_data->pdev, (dma_addr_t)
3005 rxdp3->Buffer2_ptr, 2997 rxdp3->Buffer2_ptr,
3006 dev->mtu + 4, 2998 ring_data->mtu + 4,
3007 PCI_DMA_FROMDEVICE); 2999 PCI_DMA_FROMDEVICE);
3008 } 3000 }
3009 prefetch(skb->data); 3001 prefetch(skb->data);
@@ -3012,7 +3004,7 @@ static void rx_intr_handler(struct ring_info *ring_data)
3012 ring_data->rx_curr_get_info.offset = get_info.offset; 3004 ring_data->rx_curr_get_info.offset = get_info.offset;
3013 rxdp = ring_data->rx_blocks[get_block]. 3005 rxdp = ring_data->rx_blocks[get_block].
3014 rxds[get_info.offset].virt_addr; 3006 rxds[get_info.offset].virt_addr;
3015 if (get_info.offset == rxd_count[nic->rxd_mode]) { 3007 if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
3016 get_info.offset = 0; 3008 get_info.offset = 0;
3017 ring_data->rx_curr_get_info.offset = get_info.offset; 3009 ring_data->rx_curr_get_info.offset = get_info.offset;
3018 get_block++; 3010 get_block++;
@@ -3022,19 +3014,21 @@ static void rx_intr_handler(struct ring_info *ring_data)
3022 rxdp = ring_data->rx_blocks[get_block].block_virt_addr; 3014 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
3023 } 3015 }
3024 3016
3025 nic->pkts_to_process -= 1; 3017 if(ring_data->nic->config.napi){
3026 if ((napi) && (!nic->pkts_to_process)) 3018 ring_data->nic->pkts_to_process -= 1;
3027 break; 3019 if (!ring_data->nic->pkts_to_process)
3020 break;
3021 }
3028 pkt_cnt++; 3022 pkt_cnt++;
3029 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts)) 3023 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
3030 break; 3024 break;
3031 } 3025 }
3032 if (nic->lro) { 3026 if (ring_data->lro) {
3033 /* Clear all LRO sessions before exiting */ 3027 /* Clear all LRO sessions before exiting */
3034 for (i=0; i<MAX_LRO_SESSIONS; i++) { 3028 for (i=0; i<MAX_LRO_SESSIONS; i++) {
3035 struct lro *lro = &nic->lro0_n[i]; 3029 struct lro *lro = &ring_data->lro0_n[i];
3036 if (lro->in_use) { 3030 if (lro->in_use) {
3037 update_L3L4_header(nic, lro); 3031 update_L3L4_header(ring_data->nic, lro);
3038 queue_rx_frame(lro->parent, lro->vlan_tag); 3032 queue_rx_frame(lro->parent, lro->vlan_tag);
3039 clear_lro_session(lro); 3033 clear_lro_session(lro);
3040 } 3034 }
@@ -4333,10 +4327,10 @@ s2io_alarm_handle(unsigned long data)
4333 mod_timer(&sp->alarm_timer, jiffies + HZ / 2); 4327 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4334} 4328}
4335 4329
4336static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n) 4330static int s2io_chk_rx_buffers(struct ring_info *ring)
4337{ 4331{
4338 if (fill_rx_buffers(sp, rng_n) == -ENOMEM) { 4332 if (fill_rx_buffers(ring) == -ENOMEM) {
4339 DBG_PRINT(INFO_DBG, "%s:Out of memory", sp->dev->name); 4333 DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name);
4340 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n"); 4334 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
4341 } 4335 }
4342 return 0; 4336 return 0;
@@ -4351,7 +4345,7 @@ static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4351 return IRQ_HANDLED; 4345 return IRQ_HANDLED;
4352 4346
4353 rx_intr_handler(ring); 4347 rx_intr_handler(ring);
4354 s2io_chk_rx_buffers(sp, ring->ring_no); 4348 s2io_chk_rx_buffers(ring);
4355 4349
4356 return IRQ_HANDLED; 4350 return IRQ_HANDLED;
4357} 4351}
@@ -4809,7 +4803,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id)
4809 */ 4803 */
4810 if (!config->napi) { 4804 if (!config->napi) {
4811 for (i = 0; i < config->rx_ring_num; i++) 4805 for (i = 0; i < config->rx_ring_num; i++)
4812 s2io_chk_rx_buffers(sp, i); 4806 s2io_chk_rx_buffers(&mac_control->rings[i]);
4813 } 4807 }
4814 writeq(sp->general_int_mask, &bar0->general_int_mask); 4808 writeq(sp->general_int_mask, &bar0->general_int_mask);
4815 readl(&bar0->general_int_status); 4809 readl(&bar0->general_int_status);
@@ -4866,6 +4860,7 @@ static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4866 struct s2io_nic *sp = dev->priv; 4860 struct s2io_nic *sp = dev->priv;
4867 struct mac_info *mac_control; 4861 struct mac_info *mac_control;
4868 struct config_param *config; 4862 struct config_param *config;
4863 int i;
4869 4864
4870 4865
4871 mac_control = &sp->mac_control; 4866 mac_control = &sp->mac_control;
@@ -4885,6 +4880,13 @@ static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4885 sp->stats.rx_length_errors = 4880 sp->stats.rx_length_errors =
4886 le64_to_cpu(mac_control->stats_info->rmac_long_frms); 4881 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4887 4882
4883 /* collect per-ring rx_packets and rx_bytes */
4884 sp->stats.rx_packets = sp->stats.rx_bytes = 0;
4885 for (i = 0; i < config->rx_ring_num; i++) {
4886 sp->stats.rx_packets += mac_control->rings[i].rx_packets;
4887 sp->stats.rx_bytes += mac_control->rings[i].rx_bytes;
4888 }
4889
4888 return (&sp->stats); 4890 return (&sp->stats);
4889} 4891}
4890 4892
@@ -7157,7 +7159,9 @@ static int s2io_card_up(struct s2io_nic * sp)
7157 config = &sp->config; 7159 config = &sp->config;
7158 7160
7159 for (i = 0; i < config->rx_ring_num; i++) { 7161 for (i = 0; i < config->rx_ring_num; i++) {
7160 if ((ret = fill_rx_buffers(sp, i))) { 7162 mac_control->rings[i].mtu = dev->mtu;
7163 ret = fill_rx_buffers(&mac_control->rings[i]);
7164 if (ret) {
7161 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n", 7165 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7162 dev->name); 7166 dev->name);
7163 s2io_reset(sp); 7167 s2io_reset(sp);
@@ -7165,7 +7169,7 @@ static int s2io_card_up(struct s2io_nic * sp)
7165 return -ENOMEM; 7169 return -ENOMEM;
7166 } 7170 }
7167 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i, 7171 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
7168 atomic_read(&sp->rx_bufs_left[i])); 7172 mac_control->rings[i].rx_bufs_left);
7169 } 7173 }
7170 7174
7171 /* Initialise napi */ 7175 /* Initialise napi */
@@ -7300,7 +7304,7 @@ static void s2io_tx_watchdog(struct net_device *dev)
7300static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) 7304static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7301{ 7305{
7302 struct s2io_nic *sp = ring_data->nic; 7306 struct s2io_nic *sp = ring_data->nic;
7303 struct net_device *dev = (struct net_device *) sp->dev; 7307 struct net_device *dev = (struct net_device *) ring_data->dev;
7304 struct sk_buff *skb = (struct sk_buff *) 7308 struct sk_buff *skb = (struct sk_buff *)
7305 ((unsigned long) rxdp->Host_Control); 7309 ((unsigned long) rxdp->Host_Control);
7306 int ring_no = ring_data->ring_no; 7310 int ring_no = ring_data->ring_no;
@@ -7377,19 +7381,19 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7377 sp->mac_control.stats_info->sw_stat.mem_freed 7381 sp->mac_control.stats_info->sw_stat.mem_freed
7378 += skb->truesize; 7382 += skb->truesize;
7379 dev_kfree_skb(skb); 7383 dev_kfree_skb(skb);
7380 atomic_dec(&sp->rx_bufs_left[ring_no]); 7384 ring_data->rx_bufs_left -= 1;
7381 rxdp->Host_Control = 0; 7385 rxdp->Host_Control = 0;
7382 return 0; 7386 return 0;
7383 } 7387 }
7384 } 7388 }
7385 7389
7386 /* Updating statistics */ 7390 /* Updating statistics */
7387 sp->stats.rx_packets++; 7391 ring_data->rx_packets++;
7388 rxdp->Host_Control = 0; 7392 rxdp->Host_Control = 0;
7389 if (sp->rxd_mode == RXD_MODE_1) { 7393 if (sp->rxd_mode == RXD_MODE_1) {
7390 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2); 7394 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7391 7395
7392 sp->stats.rx_bytes += len; 7396 ring_data->rx_bytes += len;
7393 skb_put(skb, len); 7397 skb_put(skb, len);
7394 7398
7395 } else if (sp->rxd_mode == RXD_MODE_3B) { 7399 } else if (sp->rxd_mode == RXD_MODE_3B) {
@@ -7400,13 +7404,13 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7400 unsigned char *buff = skb_push(skb, buf0_len); 7404 unsigned char *buff = skb_push(skb, buf0_len);
7401 7405
7402 struct buffAdd *ba = &ring_data->ba[get_block][get_off]; 7406 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7403 sp->stats.rx_bytes += buf0_len + buf2_len; 7407 ring_data->rx_bytes += buf0_len + buf2_len;
7404 memcpy(buff, ba->ba_0, buf0_len); 7408 memcpy(buff, ba->ba_0, buf0_len);
7405 skb_put(skb, buf2_len); 7409 skb_put(skb, buf2_len);
7406 } 7410 }
7407 7411
7408 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) || 7412 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!ring_data->lro) ||
7409 (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) && 7413 (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
7410 (sp->rx_csum)) { 7414 (sp->rx_csum)) {
7411 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1); 7415 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7412 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1); 7416 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
@@ -7417,14 +7421,14 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7417 * a flag in the RxD. 7421 * a flag in the RxD.
7418 */ 7422 */
7419 skb->ip_summed = CHECKSUM_UNNECESSARY; 7423 skb->ip_summed = CHECKSUM_UNNECESSARY;
7420 if (sp->lro) { 7424 if (ring_data->lro) {
7421 u32 tcp_len; 7425 u32 tcp_len;
7422 u8 *tcp; 7426 u8 *tcp;
7423 int ret = 0; 7427 int ret = 0;
7424 7428
7425 ret = s2io_club_tcp_session(skb->data, &tcp, 7429 ret = s2io_club_tcp_session(ring_data,
7426 &tcp_len, &lro, 7430 skb->data, &tcp, &tcp_len, &lro,
7427 rxdp, sp); 7431 rxdp, sp);
7428 switch (ret) { 7432 switch (ret) {
7429 case 3: /* Begin anew */ 7433 case 3: /* Begin anew */
7430 lro->parent = skb; 7434 lro->parent = skb;
@@ -7486,7 +7490,7 @@ send_up:
7486 queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2)); 7490 queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7487 dev->last_rx = jiffies; 7491 dev->last_rx = jiffies;
7488aggregate: 7492aggregate:
7489 atomic_dec(&sp->rx_bufs_left[ring_no]); 7493 sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
7490 return SUCCESS; 7494 return SUCCESS;
7491} 7495}
7492 7496
@@ -7603,12 +7607,14 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7603 tx_steering_type = NO_STEERING; 7607 tx_steering_type = NO_STEERING;
7604 } 7608 }
7605 7609
7606 if ( rx_ring_num > 8) { 7610 if (rx_ring_num > MAX_RX_RINGS) {
7607 DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not " 7611 DBG_PRINT(ERR_DBG, "s2io: Requested number of rx rings not "
7608 "supported\n"); 7612 "supported\n");
7609 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n"); 7613 DBG_PRINT(ERR_DBG, "s2io: Default to %d rx rings\n",
7610 rx_ring_num = 8; 7614 MAX_RX_RINGS);
7615 rx_ring_num = MAX_RX_RINGS;
7611 } 7616 }
7617
7612 if (*dev_intr_type != INTA) 7618 if (*dev_intr_type != INTA)
7613 napi = 0; 7619 napi = 0;
7614 7620
@@ -7836,10 +7842,15 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7836 7842
7837 /* Rx side parameters. */ 7843 /* Rx side parameters. */
7838 config->rx_ring_num = rx_ring_num; 7844 config->rx_ring_num = rx_ring_num;
7839 for (i = 0; i < MAX_RX_RINGS; i++) { 7845 for (i = 0; i < config->rx_ring_num; i++) {
7840 config->rx_cfg[i].num_rxd = rx_ring_sz[i] * 7846 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
7841 (rxd_count[sp->rxd_mode] + 1); 7847 (rxd_count[sp->rxd_mode] + 1);
7842 config->rx_cfg[i].ring_priority = i; 7848 config->rx_cfg[i].ring_priority = i;
7849 mac_control->rings[i].rx_bufs_left = 0;
7850 mac_control->rings[i].rxd_mode = sp->rxd_mode;
7851 mac_control->rings[i].rxd_count = rxd_count[sp->rxd_mode];
7852 mac_control->rings[i].pdev = sp->pdev;
7853 mac_control->rings[i].dev = sp->dev;
7843 } 7854 }
7844 7855
7845 for (i = 0; i < rx_ring_num; i++) { 7856 for (i = 0; i < rx_ring_num; i++) {
@@ -7854,10 +7865,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7854 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7; 7865 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7855 7866
7856 7867
7857 /* Initialize Ring buffer parameters. */
7858 for (i = 0; i < config->rx_ring_num; i++)
7859 atomic_set(&sp->rx_bufs_left[i], 0);
7860
7861 /* initialize the shared memory used by the NIC and the host */ 7868 /* initialize the shared memory used by the NIC and the host */
7862 if (init_shared_mem(sp)) { 7869 if (init_shared_mem(sp)) {
7863 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", 7870 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
@@ -8077,6 +8084,9 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
8077 DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name, 8084 DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8078 sp->config.tx_fifo_num); 8085 sp->config.tx_fifo_num);
8079 8086
8087 DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8088 sp->config.rx_ring_num);
8089
8080 switch(sp->config.intr_type) { 8090 switch(sp->config.intr_type) {
8081 case INTA: 8091 case INTA:
8082 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name); 8092 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
@@ -8391,8 +8401,9 @@ static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8391} 8401}
8392 8402
8393static int 8403static int
8394s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro, 8404s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer, u8 **tcp,
8395 struct RxD_t *rxdp, struct s2io_nic *sp) 8405 u32 *tcp_len, struct lro **lro, struct RxD_t *rxdp,
8406 struct s2io_nic *sp)
8396{ 8407{
8397 struct iphdr *ip; 8408 struct iphdr *ip;
8398 struct tcphdr *tcph; 8409 struct tcphdr *tcph;
@@ -8410,7 +8421,7 @@ s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
8410 tcph = (struct tcphdr *)*tcp; 8421 tcph = (struct tcphdr *)*tcp;
8411 *tcp_len = get_l4_pyld_length(ip, tcph); 8422 *tcp_len = get_l4_pyld_length(ip, tcph);
8412 for (i=0; i<MAX_LRO_SESSIONS; i++) { 8423 for (i=0; i<MAX_LRO_SESSIONS; i++) {
8413 struct lro *l_lro = &sp->lro0_n[i]; 8424 struct lro *l_lro = &ring_data->lro0_n[i];
8414 if (l_lro->in_use) { 8425 if (l_lro->in_use) {
8415 if (check_for_socket_match(l_lro, ip, tcph)) 8426 if (check_for_socket_match(l_lro, ip, tcph))
8416 continue; 8427 continue;
@@ -8448,7 +8459,7 @@ s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
8448 } 8459 }
8449 8460
8450 for (i=0; i<MAX_LRO_SESSIONS; i++) { 8461 for (i=0; i<MAX_LRO_SESSIONS; i++) {
8451 struct lro *l_lro = &sp->lro0_n[i]; 8462 struct lro *l_lro = &ring_data->lro0_n[i];
8452 if (!(l_lro->in_use)) { 8463 if (!(l_lro->in_use)) {
8453 *lro = l_lro; 8464 *lro = l_lro;
8454 ret = 3; /* Begin anew */ 8465 ret = 3; /* Begin anew */
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index ce53a02105f2..0709ebae9139 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -678,11 +678,53 @@ struct rx_block_info {
678 struct rxd_info *rxds; 678 struct rxd_info *rxds;
679}; 679};
680 680
681/* Data structure to represent a LRO session */
682struct lro {
683 struct sk_buff *parent;
684 struct sk_buff *last_frag;
685 u8 *l2h;
686 struct iphdr *iph;
687 struct tcphdr *tcph;
688 u32 tcp_next_seq;
689 __be32 tcp_ack;
690 int total_len;
691 int frags_len;
692 int sg_num;
693 int in_use;
694 __be16 window;
695 u16 vlan_tag;
696 u32 cur_tsval;
697 __be32 cur_tsecr;
698 u8 saw_ts;
699} ____cacheline_aligned;
700
681/* Ring specific structure */ 701/* Ring specific structure */
682struct ring_info { 702struct ring_info {
683 /* The ring number */ 703 /* The ring number */
684 int ring_no; 704 int ring_no;
685 705
706 /* per-ring buffer counter */
707 u32 rx_bufs_left;
708
709 #define MAX_LRO_SESSIONS 32
710 struct lro lro0_n[MAX_LRO_SESSIONS];
711 u8 lro;
712
713 /* copy of sp->rxd_mode flag */
714 int rxd_mode;
715
716 /* Number of rxds per block for the rxd_mode */
717 int rxd_count;
718
719 /* copy of sp pointer */
720 struct s2io_nic *nic;
721
722 /* copy of sp->dev pointer */
723 struct net_device *dev;
724
725 /* copy of sp->pdev pointer */
726 struct pci_dev *pdev;
727
686 /* 728 /*
687 * Place holders for the virtual and physical addresses of 729 * Place holders for the virtual and physical addresses of
688 * all the Rx Blocks 730 * all the Rx Blocks
@@ -703,10 +745,16 @@ struct ring_info {
703 */ 745 */
704 struct rx_curr_get_info rx_curr_get_info; 746 struct rx_curr_get_info rx_curr_get_info;
705 747
748 /* interface MTU value */
749 unsigned mtu;
750
706 /* Buffer Address store. */ 751 /* Buffer Address store. */
707 struct buffAdd **ba; 752 struct buffAdd **ba;
708 struct s2io_nic *nic; 753
709}; 754 /* per-Ring statistics */
755 unsigned long rx_packets;
756 unsigned long rx_bytes;
757} ____cacheline_aligned;
710 758
711/* Fifo specific structure */ 759/* Fifo specific structure */
712struct fifo_info { 760struct fifo_info {
@@ -813,26 +861,6 @@ struct msix_info_st {
813 u64 data; 861 u64 data;
814}; 862};
815 863
816/* Data structure to represent a LRO session */
817struct lro {
818 struct sk_buff *parent;
819 struct sk_buff *last_frag;
820 u8 *l2h;
821 struct iphdr *iph;
822 struct tcphdr *tcph;
823 u32 tcp_next_seq;
824 __be32 tcp_ack;
825 int total_len;
826 int frags_len;
827 int sg_num;
828 int in_use;
829 __be16 window;
830 u16 vlan_tag;
831 u32 cur_tsval;
832 __be32 cur_tsecr;
833 u8 saw_ts;
834} ____cacheline_aligned;
835
836/* These flags represent the devices temporary state */ 864/* These flags represent the devices temporary state */
837enum s2io_device_state_t 865enum s2io_device_state_t
838{ 866{
@@ -872,8 +900,6 @@ struct s2io_nic {
872 /* Space to back up the PCI config space */ 900 /* Space to back up the PCI config space */
873 u32 config_space[256 / sizeof(u32)]; 901 u32 config_space[256 / sizeof(u32)];
874 902
875 atomic_t rx_bufs_left[MAX_RX_RINGS];
876
877#define PROMISC 1 903#define PROMISC 1
878#define ALL_MULTI 2 904#define ALL_MULTI 2
879 905
@@ -950,8 +976,6 @@ struct s2io_nic {
950#define XFRAME_II_DEVICE 2 976#define XFRAME_II_DEVICE 2
951 u8 device_type; 977 u8 device_type;
952 978
953#define MAX_LRO_SESSIONS 32
954 struct lro lro0_n[MAX_LRO_SESSIONS];
955 unsigned long clubbed_frms_cnt; 979 unsigned long clubbed_frms_cnt;
956 unsigned long sending_both; 980 unsigned long sending_both;
957 u8 lro; 981 u8 lro;
@@ -1118,9 +1142,9 @@ static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr);
1118static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int offset); 1142static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int offset);
1119static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr); 1143static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr);
1120 1144
1121static int 1145static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer,
1122s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro, 1146 u8 **tcp, u32 *tcp_len, struct lro **lro, struct RxD_t *rxdp,
1123 struct RxD_t *rxdp, struct s2io_nic *sp); 1147 struct s2io_nic *sp);
1124static void clear_lro_session(struct lro *lro); 1148static void clear_lro_session(struct lro *lro);
1125static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag); 1149static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag);
1126static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro); 1150static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro);
diff --git a/drivers/net/sfc/Kconfig b/drivers/net/sfc/Kconfig
new file mode 100644
index 000000000000..dbad95c295bd
--- /dev/null
+++ b/drivers/net/sfc/Kconfig
@@ -0,0 +1,12 @@
1config SFC
2 tristate "Solarflare Solarstorm SFC4000 support"
3 depends on PCI && INET
4 select MII
5 select INET_LRO
6 select CRC32
7 help
8 This driver supports 10-gigabit Ethernet cards based on
9 the Solarflare Communications Solarstorm SFC4000 controller.
10
11 To compile this driver as a module, choose M here. The module
12 will be called sfc.
diff --git a/drivers/net/sfc/Makefile b/drivers/net/sfc/Makefile
new file mode 100644
index 000000000000..0f023447eafd
--- /dev/null
+++ b/drivers/net/sfc/Makefile
@@ -0,0 +1,5 @@
1sfc-y += efx.o falcon.o tx.o rx.o falcon_xmac.o \
2 i2c-direct.o ethtool.o xfp_phy.o mdio_10g.o \
3 tenxpress.o boards.o sfe4001.o
4
5obj-$(CONFIG_SFC) += sfc.o
diff --git a/drivers/net/sfc/bitfield.h b/drivers/net/sfc/bitfield.h
new file mode 100644
index 000000000000..2806201644cc
--- /dev/null
+++ b/drivers/net/sfc/bitfield.h
@@ -0,0 +1,508 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_BITFIELD_H
12#define EFX_BITFIELD_H
13
14/*
15 * Efx bitfield access
16 *
17 * Efx NICs make extensive use of bitfields up to 128 bits
18 * wide. Since there is no native 128-bit datatype on most systems,
19 * and since 64-bit datatypes are inefficient on 32-bit systems and
20 * vice versa, we wrap accesses in a way that uses the most efficient
21 * datatype.
22 *
23 * The NICs are PCI devices and therefore little-endian. Since most
24 * of the quantities that we deal with are DMAed to/from host memory,
25 * we define our datatypes (efx_oword_t, efx_qword_t and
26 * efx_dword_t) to be little-endian.
27 */
28
29/* Lowest bit numbers and widths */
30#define EFX_DUMMY_FIELD_LBN 0
31#define EFX_DUMMY_FIELD_WIDTH 0
32#define EFX_DWORD_0_LBN 0
33#define EFX_DWORD_0_WIDTH 32
34#define EFX_DWORD_1_LBN 32
35#define EFX_DWORD_1_WIDTH 32
36#define EFX_DWORD_2_LBN 64
37#define EFX_DWORD_2_WIDTH 32
38#define EFX_DWORD_3_LBN 96
39#define EFX_DWORD_3_WIDTH 32
40
41/* Specified attribute (e.g. LBN) of the specified field */
42#define EFX_VAL(field, attribute) field ## _ ## attribute
43/* Low bit number of the specified field */
44#define EFX_LOW_BIT(field) EFX_VAL(field, LBN)
45/* Bit width of the specified field */
46#define EFX_WIDTH(field) EFX_VAL(field, WIDTH)
47/* High bit number of the specified field */
48#define EFX_HIGH_BIT(field) (EFX_LOW_BIT(field) + EFX_WIDTH(field) - 1)
49/* Mask equal in width to the specified field.
50 *
51 * For example, a field with width 5 would have a mask of 0x1f.
52 *
53 * The maximum width mask that can be generated is 64 bits.
54 */
55#define EFX_MASK64(field) \
56 (EFX_WIDTH(field) == 64 ? ~((u64) 0) : \
57 (((((u64) 1) << EFX_WIDTH(field))) - 1))
58
59/* Mask equal in width to the specified field.
60 *
61 * For example, a field with width 5 would have a mask of 0x1f.
62 *
63 * The maximum width mask that can be generated is 32 bits. Use
64 * EFX_MASK64 for higher width fields.
65 */
66#define EFX_MASK32(field) \
67 (EFX_WIDTH(field) == 32 ? ~((u32) 0) : \
68 (((((u32) 1) << EFX_WIDTH(field))) - 1))
69
70/* A doubleword (i.e. 4 byte) datatype - little-endian in HW */
71typedef union efx_dword {
72 __le32 u32[1];
73} efx_dword_t;
74
75/* A quadword (i.e. 8 byte) datatype - little-endian in HW */
76typedef union efx_qword {
77 __le64 u64[1];
78 __le32 u32[2];
79 efx_dword_t dword[2];
80} efx_qword_t;
81
82/* An octword (eight-word, i.e. 16 byte) datatype - little-endian in HW */
83typedef union efx_oword {
84 __le64 u64[2];
85 efx_qword_t qword[2];
86 __le32 u32[4];
87 efx_dword_t dword[4];
88} efx_oword_t;
89
90/* Format string and value expanders for printk */
91#define EFX_DWORD_FMT "%08x"
92#define EFX_QWORD_FMT "%08x:%08x"
93#define EFX_OWORD_FMT "%08x:%08x:%08x:%08x"
94#define EFX_DWORD_VAL(dword) \
95 ((unsigned int) le32_to_cpu((dword).u32[0]))
96#define EFX_QWORD_VAL(qword) \
97 ((unsigned int) le32_to_cpu((qword).u32[1])), \
98 ((unsigned int) le32_to_cpu((qword).u32[0]))
99#define EFX_OWORD_VAL(oword) \
100 ((unsigned int) le32_to_cpu((oword).u32[3])), \
101 ((unsigned int) le32_to_cpu((oword).u32[2])), \
102 ((unsigned int) le32_to_cpu((oword).u32[1])), \
103 ((unsigned int) le32_to_cpu((oword).u32[0]))
104
105/*
106 * Extract bit field portion [low,high) from the native-endian element
107 * which contains bits [min,max).
108 *
109 * For example, suppose "element" represents the high 32 bits of a
110 * 64-bit value, and we wish to extract the bits belonging to the bit
111 * field occupying bits 28-45 of this 64-bit value.
112 *
113 * Then EFX_EXTRACT ( element, 32, 63, 28, 45 ) would give
114 *
115 * ( element ) << 4
116 *
117 * The result will contain the relevant bits filled in in the range
118 * [0,high-low), with garbage in bits [high-low+1,...).
119 */
120#define EFX_EXTRACT_NATIVE(native_element, min, max, low, high) \
121 (((low > max) || (high < min)) ? 0 : \
122 ((low > min) ? \
123 ((native_element) >> (low - min)) : \
124 ((native_element) << (min - low))))
125
126/*
127 * Extract bit field portion [low,high) from the 64-bit little-endian
128 * element which contains bits [min,max)
129 */
130#define EFX_EXTRACT64(element, min, max, low, high) \
131 EFX_EXTRACT_NATIVE(le64_to_cpu(element), min, max, low, high)
132
133/*
134 * Extract bit field portion [low,high) from the 32-bit little-endian
135 * element which contains bits [min,max)
136 */
137#define EFX_EXTRACT32(element, min, max, low, high) \
138 EFX_EXTRACT_NATIVE(le32_to_cpu(element), min, max, low, high)
139
140#define EFX_EXTRACT_OWORD64(oword, low, high) \
141 (EFX_EXTRACT64((oword).u64[0], 0, 63, low, high) | \
142 EFX_EXTRACT64((oword).u64[1], 64, 127, low, high))
143
144#define EFX_EXTRACT_QWORD64(qword, low, high) \
145 EFX_EXTRACT64((qword).u64[0], 0, 63, low, high)
146
147#define EFX_EXTRACT_OWORD32(oword, low, high) \
148 (EFX_EXTRACT32((oword).u32[0], 0, 31, low, high) | \
149 EFX_EXTRACT32((oword).u32[1], 32, 63, low, high) | \
150 EFX_EXTRACT32((oword).u32[2], 64, 95, low, high) | \
151 EFX_EXTRACT32((oword).u32[3], 96, 127, low, high))
152
153#define EFX_EXTRACT_QWORD32(qword, low, high) \
154 (EFX_EXTRACT32((qword).u32[0], 0, 31, low, high) | \
155 EFX_EXTRACT32((qword).u32[1], 32, 63, low, high))
156
157#define EFX_EXTRACT_DWORD(dword, low, high) \
158 EFX_EXTRACT32((dword).u32[0], 0, 31, low, high)
159
160#define EFX_OWORD_FIELD64(oword, field) \
161 (EFX_EXTRACT_OWORD64(oword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \
162 & EFX_MASK64(field))
163
164#define EFX_QWORD_FIELD64(qword, field) \
165 (EFX_EXTRACT_QWORD64(qword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \
166 & EFX_MASK64(field))
167
168#define EFX_OWORD_FIELD32(oword, field) \
169 (EFX_EXTRACT_OWORD32(oword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \
170 & EFX_MASK32(field))
171
172#define EFX_QWORD_FIELD32(qword, field) \
173 (EFX_EXTRACT_QWORD32(qword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \
174 & EFX_MASK32(field))
175
176#define EFX_DWORD_FIELD(dword, field) \
177 (EFX_EXTRACT_DWORD(dword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \
178 & EFX_MASK32(field))
179
180#define EFX_OWORD_IS_ZERO64(oword) \
181 (((oword).u64[0] | (oword).u64[1]) == (__force __le64) 0)
182
183#define EFX_QWORD_IS_ZERO64(qword) \
184 (((qword).u64[0]) == (__force __le64) 0)
185
186#define EFX_OWORD_IS_ZERO32(oword) \
187 (((oword).u32[0] | (oword).u32[1] | (oword).u32[2] | (oword).u32[3]) \
188 == (__force __le32) 0)
189
190#define EFX_QWORD_IS_ZERO32(qword) \
191 (((qword).u32[0] | (qword).u32[1]) == (__force __le32) 0)
192
193#define EFX_DWORD_IS_ZERO(dword) \
194 (((dword).u32[0]) == (__force __le32) 0)
195
196#define EFX_OWORD_IS_ALL_ONES64(oword) \
197 (((oword).u64[0] & (oword).u64[1]) == ~((__force __le64) 0))
198
199#define EFX_QWORD_IS_ALL_ONES64(qword) \
200 ((qword).u64[0] == ~((__force __le64) 0))
201
202#define EFX_OWORD_IS_ALL_ONES32(oword) \
203 (((oword).u32[0] & (oword).u32[1] & (oword).u32[2] & (oword).u32[3]) \
204 == ~((__force __le32) 0))
205
206#define EFX_QWORD_IS_ALL_ONES32(qword) \
207 (((qword).u32[0] & (qword).u32[1]) == ~((__force __le32) 0))
208
209#define EFX_DWORD_IS_ALL_ONES(dword) \
210 ((dword).u32[0] == ~((__force __le32) 0))
211
212#if BITS_PER_LONG == 64
213#define EFX_OWORD_FIELD EFX_OWORD_FIELD64
214#define EFX_QWORD_FIELD EFX_QWORD_FIELD64
215#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO64
216#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO64
217#define EFX_OWORD_IS_ALL_ONES EFX_OWORD_IS_ALL_ONES64
218#define EFX_QWORD_IS_ALL_ONES EFX_QWORD_IS_ALL_ONES64
219#else
220#define EFX_OWORD_FIELD EFX_OWORD_FIELD32
221#define EFX_QWORD_FIELD EFX_QWORD_FIELD32
222#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO32
223#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO32
224#define EFX_OWORD_IS_ALL_ONES EFX_OWORD_IS_ALL_ONES32
225#define EFX_QWORD_IS_ALL_ONES EFX_QWORD_IS_ALL_ONES32
226#endif
227
228/*
229 * Construct bit field portion
230 *
231 * Creates the portion of the bit field [low,high) that lies within
232 * the range [min,max).
233 */
234#define EFX_INSERT_NATIVE64(min, max, low, high, value) \
235 (((low > max) || (high < min)) ? 0 : \
236 ((low > min) ? \
237 (((u64) (value)) << (low - min)) : \
238 (((u64) (value)) >> (min - low))))
239
240#define EFX_INSERT_NATIVE32(min, max, low, high, value) \
241 (((low > max) || (high < min)) ? 0 : \
242 ((low > min) ? \
243 (((u32) (value)) << (low - min)) : \
244 (((u32) (value)) >> (min - low))))
245
246#define EFX_INSERT_NATIVE(min, max, low, high, value) \
247 ((((max - min) >= 32) || ((high - low) >= 32)) ? \
248 EFX_INSERT_NATIVE64(min, max, low, high, value) : \
249 EFX_INSERT_NATIVE32(min, max, low, high, value))
250
251/*
252 * Construct bit field portion
253 *
254 * Creates the portion of the named bit field that lies within the
255 * range [min,max).
256 */
257#define EFX_INSERT_FIELD_NATIVE(min, max, field, value) \
258 EFX_INSERT_NATIVE(min, max, EFX_LOW_BIT(field), \
259 EFX_HIGH_BIT(field), value)
260
261/*
262 * Construct bit field
263 *
264 * Creates the portion of the named bit fields that lie within the
265 * range [min,max).
266 */
267#define EFX_INSERT_FIELDS_NATIVE(min, max, \
268 field1, value1, \
269 field2, value2, \
270 field3, value3, \
271 field4, value4, \
272 field5, value5, \
273 field6, value6, \
274 field7, value7, \
275 field8, value8, \
276 field9, value9, \
277 field10, value10) \
278 (EFX_INSERT_FIELD_NATIVE((min), (max), field1, (value1)) | \
279 EFX_INSERT_FIELD_NATIVE((min), (max), field2, (value2)) | \
280 EFX_INSERT_FIELD_NATIVE((min), (max), field3, (value3)) | \
281 EFX_INSERT_FIELD_NATIVE((min), (max), field4, (value4)) | \
282 EFX_INSERT_FIELD_NATIVE((min), (max), field5, (value5)) | \
283 EFX_INSERT_FIELD_NATIVE((min), (max), field6, (value6)) | \
284 EFX_INSERT_FIELD_NATIVE((min), (max), field7, (value7)) | \
285 EFX_INSERT_FIELD_NATIVE((min), (max), field8, (value8)) | \
286 EFX_INSERT_FIELD_NATIVE((min), (max), field9, (value9)) | \
287 EFX_INSERT_FIELD_NATIVE((min), (max), field10, (value10)))
288
289#define EFX_INSERT_FIELDS64(...) \
290 cpu_to_le64(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__))
291
292#define EFX_INSERT_FIELDS32(...) \
293 cpu_to_le32(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__))
294
295#define EFX_POPULATE_OWORD64(oword, ...) do { \
296 (oword).u64[0] = EFX_INSERT_FIELDS64(0, 63, __VA_ARGS__); \
297 (oword).u64[1] = EFX_INSERT_FIELDS64(64, 127, __VA_ARGS__); \
298 } while (0)
299
300#define EFX_POPULATE_QWORD64(qword, ...) do { \
301 (qword).u64[0] = EFX_INSERT_FIELDS64(0, 63, __VA_ARGS__); \
302 } while (0)
303
304#define EFX_POPULATE_OWORD32(oword, ...) do { \
305 (oword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \
306 (oword).u32[1] = EFX_INSERT_FIELDS32(32, 63, __VA_ARGS__); \
307 (oword).u32[2] = EFX_INSERT_FIELDS32(64, 95, __VA_ARGS__); \
308 (oword).u32[3] = EFX_INSERT_FIELDS32(96, 127, __VA_ARGS__); \
309 } while (0)
310
311#define EFX_POPULATE_QWORD32(qword, ...) do { \
312 (qword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \
313 (qword).u32[1] = EFX_INSERT_FIELDS32(32, 63, __VA_ARGS__); \
314 } while (0)
315
316#define EFX_POPULATE_DWORD(dword, ...) do { \
317 (dword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \
318 } while (0)
319
320#if BITS_PER_LONG == 64
321#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD64
322#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD64
323#else
324#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD32
325#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD32
326#endif
327
328/* Populate an octword field with various numbers of arguments */
329#define EFX_POPULATE_OWORD_10 EFX_POPULATE_OWORD
330#define EFX_POPULATE_OWORD_9(oword, ...) \
331 EFX_POPULATE_OWORD_10(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
332#define EFX_POPULATE_OWORD_8(oword, ...) \
333 EFX_POPULATE_OWORD_9(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
334#define EFX_POPULATE_OWORD_7(oword, ...) \
335 EFX_POPULATE_OWORD_8(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
336#define EFX_POPULATE_OWORD_6(oword, ...) \
337 EFX_POPULATE_OWORD_7(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
338#define EFX_POPULATE_OWORD_5(oword, ...) \
339 EFX_POPULATE_OWORD_6(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
340#define EFX_POPULATE_OWORD_4(oword, ...) \
341 EFX_POPULATE_OWORD_5(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
342#define EFX_POPULATE_OWORD_3(oword, ...) \
343 EFX_POPULATE_OWORD_4(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
344#define EFX_POPULATE_OWORD_2(oword, ...) \
345 EFX_POPULATE_OWORD_3(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
346#define EFX_POPULATE_OWORD_1(oword, ...) \
347 EFX_POPULATE_OWORD_2(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
348#define EFX_ZERO_OWORD(oword) \
349 EFX_POPULATE_OWORD_1(oword, EFX_DUMMY_FIELD, 0)
350#define EFX_SET_OWORD(oword) \
351 EFX_POPULATE_OWORD_4(oword, \
352 EFX_DWORD_0, 0xffffffff, \
353 EFX_DWORD_1, 0xffffffff, \
354 EFX_DWORD_2, 0xffffffff, \
355 EFX_DWORD_3, 0xffffffff)
356
357/* Populate a quadword field with various numbers of arguments */
358#define EFX_POPULATE_QWORD_10 EFX_POPULATE_QWORD
359#define EFX_POPULATE_QWORD_9(qword, ...) \
360 EFX_POPULATE_QWORD_10(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
361#define EFX_POPULATE_QWORD_8(qword, ...) \
362 EFX_POPULATE_QWORD_9(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
363#define EFX_POPULATE_QWORD_7(qword, ...) \
364 EFX_POPULATE_QWORD_8(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
365#define EFX_POPULATE_QWORD_6(qword, ...) \
366 EFX_POPULATE_QWORD_7(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
367#define EFX_POPULATE_QWORD_5(qword, ...) \
368 EFX_POPULATE_QWORD_6(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
369#define EFX_POPULATE_QWORD_4(qword, ...) \
370 EFX_POPULATE_QWORD_5(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
371#define EFX_POPULATE_QWORD_3(qword, ...) \
372 EFX_POPULATE_QWORD_4(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
373#define EFX_POPULATE_QWORD_2(qword, ...) \
374 EFX_POPULATE_QWORD_3(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
375#define EFX_POPULATE_QWORD_1(qword, ...) \
376 EFX_POPULATE_QWORD_2(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
377#define EFX_ZERO_QWORD(qword) \
378 EFX_POPULATE_QWORD_1(qword, EFX_DUMMY_FIELD, 0)
379#define EFX_SET_QWORD(qword) \
380 EFX_POPULATE_QWORD_2(qword, \
381 EFX_DWORD_0, 0xffffffff, \
382 EFX_DWORD_1, 0xffffffff)
383
384/* Populate a dword field with various numbers of arguments */
385#define EFX_POPULATE_DWORD_10 EFX_POPULATE_DWORD
386#define EFX_POPULATE_DWORD_9(dword, ...) \
387 EFX_POPULATE_DWORD_10(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
388#define EFX_POPULATE_DWORD_8(dword, ...) \
389 EFX_POPULATE_DWORD_9(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
390#define EFX_POPULATE_DWORD_7(dword, ...) \
391 EFX_POPULATE_DWORD_8(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
392#define EFX_POPULATE_DWORD_6(dword, ...) \
393 EFX_POPULATE_DWORD_7(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
394#define EFX_POPULATE_DWORD_5(dword, ...) \
395 EFX_POPULATE_DWORD_6(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
396#define EFX_POPULATE_DWORD_4(dword, ...) \
397 EFX_POPULATE_DWORD_5(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
398#define EFX_POPULATE_DWORD_3(dword, ...) \
399 EFX_POPULATE_DWORD_4(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
400#define EFX_POPULATE_DWORD_2(dword, ...) \
401 EFX_POPULATE_DWORD_3(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
402#define EFX_POPULATE_DWORD_1(dword, ...) \
403 EFX_POPULATE_DWORD_2(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
404#define EFX_ZERO_DWORD(dword) \
405 EFX_POPULATE_DWORD_1(dword, EFX_DUMMY_FIELD, 0)
406#define EFX_SET_DWORD(dword) \
407 EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, 0xffffffff)
408
409/*
410 * Modify a named field within an already-populated structure. Used
411 * for read-modify-write operations.
412 *
413 */
414
415#define EFX_INVERT_OWORD(oword) do { \
416 (oword).u64[0] = ~((oword).u64[0]); \
417 (oword).u64[1] = ~((oword).u64[1]); \
418 } while (0)
419
420#define EFX_INSERT_FIELD64(...) \
421 cpu_to_le64(EFX_INSERT_FIELD_NATIVE(__VA_ARGS__))
422
423#define EFX_INSERT_FIELD32(...) \
424 cpu_to_le32(EFX_INSERT_FIELD_NATIVE(__VA_ARGS__))
425
426#define EFX_INPLACE_MASK64(min, max, field) \
427 EFX_INSERT_FIELD64(min, max, field, EFX_MASK64(field))
428
429#define EFX_INPLACE_MASK32(min, max, field) \
430 EFX_INSERT_FIELD32(min, max, field, EFX_MASK32(field))
431
432#define EFX_SET_OWORD_FIELD64(oword, field, value) do { \
433 (oword).u64[0] = (((oword).u64[0] \
434 & ~EFX_INPLACE_MASK64(0, 63, field)) \
435 | EFX_INSERT_FIELD64(0, 63, field, value)); \
436 (oword).u64[1] = (((oword).u64[1] \
437 & ~EFX_INPLACE_MASK64(64, 127, field)) \
438 | EFX_INSERT_FIELD64(64, 127, field, value)); \
439 } while (0)
440
441#define EFX_SET_QWORD_FIELD64(qword, field, value) do { \
442 (qword).u64[0] = (((qword).u64[0] \
443 & ~EFX_INPLACE_MASK64(0, 63, field)) \
444 | EFX_INSERT_FIELD64(0, 63, field, value)); \
445 } while (0)
446
447#define EFX_SET_OWORD_FIELD32(oword, field, value) do { \
448 (oword).u32[0] = (((oword).u32[0] \
449 & ~EFX_INPLACE_MASK32(0, 31, field)) \
450 | EFX_INSERT_FIELD32(0, 31, field, value)); \
451 (oword).u32[1] = (((oword).u32[1] \
452 & ~EFX_INPLACE_MASK32(32, 63, field)) \
453 | EFX_INSERT_FIELD32(32, 63, field, value)); \
454 (oword).u32[2] = (((oword).u32[2] \
455 & ~EFX_INPLACE_MASK32(64, 95, field)) \
456 | EFX_INSERT_FIELD32(64, 95, field, value)); \
457 (oword).u32[3] = (((oword).u32[3] \
458 & ~EFX_INPLACE_MASK32(96, 127, field)) \
459 | EFX_INSERT_FIELD32(96, 127, field, value)); \
460 } while (0)
461
462#define EFX_SET_QWORD_FIELD32(qword, field, value) do { \
463 (qword).u32[0] = (((qword).u32[0] \
464 & ~EFX_INPLACE_MASK32(0, 31, field)) \
465 | EFX_INSERT_FIELD32(0, 31, field, value)); \
466 (qword).u32[1] = (((qword).u32[1] \
467 & ~EFX_INPLACE_MASK32(32, 63, field)) \
468 | EFX_INSERT_FIELD32(32, 63, field, value)); \
469 } while (0)
470
471#define EFX_SET_DWORD_FIELD(dword, field, value) do { \
472 (dword).u32[0] = (((dword).u32[0] \
473 & ~EFX_INPLACE_MASK32(0, 31, field)) \
474 | EFX_INSERT_FIELD32(0, 31, field, value)); \
475 } while (0)
476
477#if BITS_PER_LONG == 64
478#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD64
479#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD64
480#else
481#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD32
482#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD32
483#endif
484
485#define EFX_SET_OWORD_FIELD_VER(efx, oword, field, value) do { \
486 if (FALCON_REV(efx) >= FALCON_REV_B0) { \
487 EFX_SET_OWORD_FIELD((oword), field##_B0, (value)); \
488 } else { \
489 EFX_SET_OWORD_FIELD((oword), field##_A1, (value)); \
490 } \
491} while (0)
492
493#define EFX_QWORD_FIELD_VER(efx, qword, field) \
494 (FALCON_REV(efx) >= FALCON_REV_B0 ? \
495 EFX_QWORD_FIELD((qword), field##_B0) : \
496 EFX_QWORD_FIELD((qword), field##_A1))
497
498/* Used to avoid compiler warnings about shift range exceeding width
499 * of the data types when dma_addr_t is only 32 bits wide.
500 */
501#define DMA_ADDR_T_WIDTH (8 * sizeof(dma_addr_t))
502#define EFX_DMA_TYPE_WIDTH(width) \
503 (((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH)
504#define EFX_DMA_MAX_MASK ((DMA_ADDR_T_WIDTH == 64) ? \
505 ~((u64) 0) : ~((u32) 0))
506#define EFX_DMA_MASK(mask) ((mask) & EFX_DMA_MAX_MASK)
507
508#endif /* EFX_BITFIELD_H */
diff --git a/drivers/net/sfc/boards.c b/drivers/net/sfc/boards.c
new file mode 100644
index 000000000000..eecaa6d58584
--- /dev/null
+++ b/drivers/net/sfc/boards.c
@@ -0,0 +1,167 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#include "net_driver.h"
11#include "phy.h"
12#include "boards.h"
13#include "efx.h"
14
15/* Macros for unpacking the board revision */
16/* The revision info is in host byte order. */
17#define BOARD_TYPE(_rev) (_rev >> 8)
18#define BOARD_MAJOR(_rev) ((_rev >> 4) & 0xf)
19#define BOARD_MINOR(_rev) (_rev & 0xf)
20
21/* Blink support. If the PHY has no auto-blink mode so we hang it off a timer */
22#define BLINK_INTERVAL (HZ/2)
23
24static void blink_led_timer(unsigned long context)
25{
26 struct efx_nic *efx = (struct efx_nic *)context;
27 struct efx_blinker *bl = &efx->board_info.blinker;
28 efx->board_info.set_fault_led(efx, bl->state);
29 bl->state = !bl->state;
30 if (bl->resubmit) {
31 bl->timer.expires = jiffies + BLINK_INTERVAL;
32 add_timer(&bl->timer);
33 }
34}
35
36static void board_blink(struct efx_nic *efx, int blink)
37{
38 struct efx_blinker *blinker = &efx->board_info.blinker;
39
40 /* The rtnl mutex serialises all ethtool ioctls, so
41 * nothing special needs doing here. */
42 if (blink) {
43 blinker->resubmit = 1;
44 blinker->state = 0;
45 setup_timer(&blinker->timer, blink_led_timer,
46 (unsigned long)efx);
47 blinker->timer.expires = jiffies + BLINK_INTERVAL;
48 add_timer(&blinker->timer);
49 } else {
50 blinker->resubmit = 0;
51 if (blinker->timer.function)
52 del_timer_sync(&blinker->timer);
53 efx->board_info.set_fault_led(efx, 0);
54 }
55}
56
57/*****************************************************************************
58 * Support for the SFE4002
59 *
60 */
61/****************************************************************************/
62/* LED allocations. Note that on rev A0 boards the schematic and the reality
63 * differ: red and green are swapped. Below is the fixed (A1) layout (there
64 * are only 3 A0 boards in existence, so no real reason to make this
65 * conditional).
66 */
67#define SFE4002_FAULT_LED (2) /* Red */
68#define SFE4002_RX_LED (0) /* Green */
69#define SFE4002_TX_LED (1) /* Amber */
70
71static int sfe4002_init_leds(struct efx_nic *efx)
72{
73 /* Set the TX and RX LEDs to reflect status and activity, and the
74 * fault LED off */
75 xfp_set_led(efx, SFE4002_TX_LED,
76 QUAKE_LED_TXLINK | QUAKE_LED_LINK_ACTSTAT);
77 xfp_set_led(efx, SFE4002_RX_LED,
78 QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACTSTAT);
79 xfp_set_led(efx, SFE4002_FAULT_LED, QUAKE_LED_OFF);
80 efx->board_info.blinker.led_num = SFE4002_FAULT_LED;
81 return 0;
82}
83
84static void sfe4002_fault_led(struct efx_nic *efx, int state)
85{
86 xfp_set_led(efx, SFE4002_FAULT_LED, state ? QUAKE_LED_ON :
87 QUAKE_LED_OFF);
88}
89
90static int sfe4002_init(struct efx_nic *efx)
91{
92 efx->board_info.init_leds = sfe4002_init_leds;
93 efx->board_info.set_fault_led = sfe4002_fault_led;
94 efx->board_info.blink = board_blink;
95 return 0;
96}
97
98/* This will get expanded as board-specific details get moved out of the
99 * PHY drivers. */
100struct efx_board_data {
101 const char *ref_model;
102 const char *gen_type;
103 int (*init) (struct efx_nic *nic);
104};
105
106static int dummy_init(struct efx_nic *nic)
107{
108 return 0;
109}
110
111static struct efx_board_data board_data[] = {
112 [EFX_BOARD_INVALID] =
113 {NULL, NULL, dummy_init},
114 [EFX_BOARD_SFE4001] =
115 {"SFE4001", "10GBASE-T adapter", sfe4001_poweron},
116 [EFX_BOARD_SFE4002] =
117 {"SFE4002", "XFP adapter", sfe4002_init},
118};
119
120int efx_set_board_info(struct efx_nic *efx, u16 revision_info)
121{
122 int rc = 0;
123 struct efx_board_data *data;
124
125 if (BOARD_TYPE(revision_info) >= EFX_BOARD_MAX) {
126 EFX_ERR(efx, "squashing unknown board type %d\n",
127 BOARD_TYPE(revision_info));
128 revision_info = 0;
129 }
130
131 if (BOARD_TYPE(revision_info) == 0) {
132 efx->board_info.major = 0;
133 efx->board_info.minor = 0;
134 /* For early boards that don't have revision info. there is
135 * only 1 board for each PHY type, so we can work it out, with
136 * the exception of the PHY-less boards. */
137 switch (efx->phy_type) {
138 case PHY_TYPE_10XPRESS:
139 efx->board_info.type = EFX_BOARD_SFE4001;
140 break;
141 case PHY_TYPE_XFP:
142 efx->board_info.type = EFX_BOARD_SFE4002;
143 break;
144 default:
145 efx->board_info.type = 0;
146 break;
147 }
148 } else {
149 efx->board_info.type = BOARD_TYPE(revision_info);
150 efx->board_info.major = BOARD_MAJOR(revision_info);
151 efx->board_info.minor = BOARD_MINOR(revision_info);
152 }
153
154 data = &board_data[efx->board_info.type];
155
156 /* Report the board model number or generic type for recognisable
157 * boards. */
158 if (efx->board_info.type != 0)
159 EFX_INFO(efx, "board is %s rev %c%d\n",
160 (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC)
161 ? data->ref_model : data->gen_type,
162 'A' + efx->board_info.major, efx->board_info.minor);
163
164 efx->board_info.init = data->init;
165
166 return rc;
167}
diff --git a/drivers/net/sfc/boards.h b/drivers/net/sfc/boards.h
new file mode 100644
index 000000000000..f56341d428e1
--- /dev/null
+++ b/drivers/net/sfc/boards.h
@@ -0,0 +1,26 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#ifndef EFX_BOARDS_H
11#define EFX_BOARDS_H
12
13/* Board IDs (must fit in 8 bits) */
14enum efx_board_type {
15 EFX_BOARD_INVALID = 0,
16 EFX_BOARD_SFE4001 = 1, /* SFE4001 (10GBASE-T) */
17 EFX_BOARD_SFE4002 = 2,
18 /* Insert new types before here */
19 EFX_BOARD_MAX
20};
21
22extern int efx_set_board_info(struct efx_nic *efx, u16 revision_info);
23extern int sfe4001_poweron(struct efx_nic *efx);
24extern void sfe4001_poweroff(struct efx_nic *efx);
25
26#endif
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
new file mode 100644
index 000000000000..59edcf793c19
--- /dev/null
+++ b/drivers/net/sfc/efx.c
@@ -0,0 +1,2208 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/module.h>
12#include <linux/pci.h>
13#include <linux/netdevice.h>
14#include <linux/etherdevice.h>
15#include <linux/delay.h>
16#include <linux/notifier.h>
17#include <linux/ip.h>
18#include <linux/tcp.h>
19#include <linux/in.h>
20#include <linux/crc32.h>
21#include <linux/ethtool.h>
22#include "net_driver.h"
23#include "gmii.h"
24#include "ethtool.h"
25#include "tx.h"
26#include "rx.h"
27#include "efx.h"
28#include "mdio_10g.h"
29#include "falcon.h"
30#include "workarounds.h"
31#include "mac.h"
32
33#define EFX_MAX_MTU (9 * 1024)
34
35/* RX slow fill workqueue. If memory allocation fails in the fast path,
36 * a work item is pushed onto this work queue to retry the allocation later,
37 * to avoid the NIC being starved of RX buffers. Since this is a per cpu
38 * workqueue, there is nothing to be gained in making it per NIC
39 */
40static struct workqueue_struct *refill_workqueue;
41
42/**************************************************************************
43 *
44 * Configurable values
45 *
46 *************************************************************************/
47
48/*
49 * Enable large receive offload (LRO) aka soft segment reassembly (SSR)
50 *
51 * This sets the default for new devices. It can be controlled later
52 * using ethtool.
53 */
54static int lro = 1;
55module_param(lro, int, 0644);
56MODULE_PARM_DESC(lro, "Large receive offload acceleration");
57
58/*
59 * Use separate channels for TX and RX events
60 *
61 * Set this to 1 to use separate channels for TX and RX. It allows us to
62 * apply a higher level of interrupt moderation to TX events.
63 *
64 * This is forced to 0 for MSI interrupt mode as the interrupt vector
65 * is not written
66 */
67static unsigned int separate_tx_and_rx_channels = 1;
68
69/* This is the weight assigned to each of the (per-channel) virtual
70 * NAPI devices.
71 */
72static int napi_weight = 64;
73
74/* This is the time (in jiffies) between invocations of the hardware
75 * monitor, which checks for known hardware bugs and resets the
76 * hardware and driver as necessary.
77 */
78unsigned int efx_monitor_interval = 1 * HZ;
79
80/* This controls whether or not the hardware monitor will trigger a
81 * reset when it detects an error condition.
82 */
83static unsigned int monitor_reset = 1;
84
85/* This controls whether or not the driver will initialise devices
86 * with invalid MAC addresses stored in the EEPROM or flash. If true,
87 * such devices will be initialised with a random locally-generated
88 * MAC address. This allows for loading the sfc_mtd driver to
89 * reprogram the flash, even if the flash contents (including the MAC
90 * address) have previously been erased.
91 */
92static unsigned int allow_bad_hwaddr;
93
94/* Initial interrupt moderation settings. They can be modified after
95 * module load with ethtool.
96 *
97 * The default for RX should strike a balance between increasing the
98 * round-trip latency and reducing overhead.
99 */
100static unsigned int rx_irq_mod_usec = 60;
101
102/* Initial interrupt moderation settings. They can be modified after
103 * module load with ethtool.
104 *
105 * This default is chosen to ensure that a 10G link does not go idle
106 * while a TX queue is stopped after it has become full. A queue is
107 * restarted when it drops below half full. The time this takes (assuming
108 * worst case 3 descriptors per packet and 1024 descriptors) is
109 * 512 / 3 * 1.2 = 205 usec.
110 */
111static unsigned int tx_irq_mod_usec = 150;
112
113/* This is the first interrupt mode to try out of:
114 * 0 => MSI-X
115 * 1 => MSI
116 * 2 => legacy
117 */
118static unsigned int interrupt_mode;
119
120/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
121 * i.e. the number of CPUs among which we may distribute simultaneous
122 * interrupt handling.
123 *
124 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
125 * The default (0) means to assign an interrupt to each package (level II cache)
126 */
127static unsigned int rss_cpus;
128module_param(rss_cpus, uint, 0444);
129MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
130
131/**************************************************************************
132 *
133 * Utility functions and prototypes
134 *
135 *************************************************************************/
136static void efx_remove_channel(struct efx_channel *channel);
137static void efx_remove_port(struct efx_nic *efx);
138static void efx_fini_napi(struct efx_nic *efx);
139static void efx_fini_channels(struct efx_nic *efx);
140
141#define EFX_ASSERT_RESET_SERIALISED(efx) \
142 do { \
143 if ((efx->state == STATE_RUNNING) || \
144 (efx->state == STATE_RESETTING)) \
145 ASSERT_RTNL(); \
146 } while (0)
147
148/**************************************************************************
149 *
150 * Event queue processing
151 *
152 *************************************************************************/
153
154/* Process channel's event queue
155 *
156 * This function is responsible for processing the event queue of a
157 * single channel. The caller must guarantee that this function will
158 * never be concurrently called more than once on the same channel,
159 * though different channels may be being processed concurrently.
160 */
161static inline int efx_process_channel(struct efx_channel *channel, int rx_quota)
162{
163 int rxdmaqs;
164 struct efx_rx_queue *rx_queue;
165
166 if (unlikely(channel->efx->reset_pending != RESET_TYPE_NONE ||
167 !channel->enabled))
168 return rx_quota;
169
170 rxdmaqs = falcon_process_eventq(channel, &rx_quota);
171
172 /* Deliver last RX packet. */
173 if (channel->rx_pkt) {
174 __efx_rx_packet(channel, channel->rx_pkt,
175 channel->rx_pkt_csummed);
176 channel->rx_pkt = NULL;
177 }
178
179 efx_flush_lro(channel);
180 efx_rx_strategy(channel);
181
182 /* Refill descriptor rings as necessary */
183 rx_queue = &channel->efx->rx_queue[0];
184 while (rxdmaqs) {
185 if (rxdmaqs & 0x01)
186 efx_fast_push_rx_descriptors(rx_queue);
187 rx_queue++;
188 rxdmaqs >>= 1;
189 }
190
191 return rx_quota;
192}
193
194/* Mark channel as finished processing
195 *
196 * Note that since we will not receive further interrupts for this
197 * channel before we finish processing and call the eventq_read_ack()
198 * method, there is no need to use the interrupt hold-off timers.
199 */
200static inline void efx_channel_processed(struct efx_channel *channel)
201{
202 /* Write to EVQ_RPTR_REG. If a new event arrived in a race
203 * with finishing processing, a new interrupt will be raised.
204 */
205 channel->work_pending = 0;
206 smp_wmb(); /* Ensure channel updated before any new interrupt. */
207 falcon_eventq_read_ack(channel);
208}
209
210/* NAPI poll handler
211 *
212 * NAPI guarantees serialisation of polls of the same device, which
213 * provides the guarantee required by efx_process_channel().
214 */
215static int efx_poll(struct napi_struct *napi, int budget)
216{
217 struct efx_channel *channel =
218 container_of(napi, struct efx_channel, napi_str);
219 struct net_device *napi_dev = channel->napi_dev;
220 int unused;
221 int rx_packets;
222
223 EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n",
224 channel->channel, raw_smp_processor_id());
225
226 unused = efx_process_channel(channel, budget);
227 rx_packets = (budget - unused);
228
229 if (rx_packets < budget) {
230 /* There is no race here; although napi_disable() will
231 * only wait for netif_rx_complete(), this isn't a problem
232 * since efx_channel_processed() will have no effect if
233 * interrupts have already been disabled.
234 */
235 netif_rx_complete(napi_dev, napi);
236 efx_channel_processed(channel);
237 }
238
239 return rx_packets;
240}
241
242/* Process the eventq of the specified channel immediately on this CPU
243 *
244 * Disable hardware generated interrupts, wait for any existing
245 * processing to finish, then directly poll (and ack ) the eventq.
246 * Finally reenable NAPI and interrupts.
247 *
248 * Since we are touching interrupts the caller should hold the suspend lock
249 */
250void efx_process_channel_now(struct efx_channel *channel)
251{
252 struct efx_nic *efx = channel->efx;
253
254 BUG_ON(!channel->used_flags);
255 BUG_ON(!channel->enabled);
256
257 /* Disable interrupts and wait for ISRs to complete */
258 falcon_disable_interrupts(efx);
259 if (efx->legacy_irq)
260 synchronize_irq(efx->legacy_irq);
261 if (channel->has_interrupt && channel->irq)
262 synchronize_irq(channel->irq);
263
264 /* Wait for any NAPI processing to complete */
265 napi_disable(&channel->napi_str);
266
267 /* Poll the channel */
268 (void) efx_process_channel(channel, efx->type->evq_size);
269
270 /* Ack the eventq. This may cause an interrupt to be generated
271 * when they are reenabled */
272 efx_channel_processed(channel);
273
274 napi_enable(&channel->napi_str);
275 falcon_enable_interrupts(efx);
276}
277
278/* Create event queue
279 * Event queue memory allocations are done only once. If the channel
280 * is reset, the memory buffer will be reused; this guards against
281 * errors during channel reset and also simplifies interrupt handling.
282 */
283static int efx_probe_eventq(struct efx_channel *channel)
284{
285 EFX_LOG(channel->efx, "chan %d create event queue\n", channel->channel);
286
287 return falcon_probe_eventq(channel);
288}
289
290/* Prepare channel's event queue */
291static int efx_init_eventq(struct efx_channel *channel)
292{
293 EFX_LOG(channel->efx, "chan %d init event queue\n", channel->channel);
294
295 channel->eventq_read_ptr = 0;
296
297 return falcon_init_eventq(channel);
298}
299
300static void efx_fini_eventq(struct efx_channel *channel)
301{
302 EFX_LOG(channel->efx, "chan %d fini event queue\n", channel->channel);
303
304 falcon_fini_eventq(channel);
305}
306
307static void efx_remove_eventq(struct efx_channel *channel)
308{
309 EFX_LOG(channel->efx, "chan %d remove event queue\n", channel->channel);
310
311 falcon_remove_eventq(channel);
312}
313
314/**************************************************************************
315 *
316 * Channel handling
317 *
318 *************************************************************************/
319
320/* Setup per-NIC RX buffer parameters.
321 * Calculate the rx buffer allocation parameters required to support
322 * the current MTU, including padding for header alignment and overruns.
323 */
324static void efx_calc_rx_buffer_params(struct efx_nic *efx)
325{
326 unsigned int order, len;
327
328 len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
329 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
330 efx->type->rx_buffer_padding);
331
332 /* Calculate page-order */
333 for (order = 0; ((1u << order) * PAGE_SIZE) < len; ++order)
334 ;
335
336 efx->rx_buffer_len = len;
337 efx->rx_buffer_order = order;
338}
339
340static int efx_probe_channel(struct efx_channel *channel)
341{
342 struct efx_tx_queue *tx_queue;
343 struct efx_rx_queue *rx_queue;
344 int rc;
345
346 EFX_LOG(channel->efx, "creating channel %d\n", channel->channel);
347
348 rc = efx_probe_eventq(channel);
349 if (rc)
350 goto fail1;
351
352 efx_for_each_channel_tx_queue(tx_queue, channel) {
353 rc = efx_probe_tx_queue(tx_queue);
354 if (rc)
355 goto fail2;
356 }
357
358 efx_for_each_channel_rx_queue(rx_queue, channel) {
359 rc = efx_probe_rx_queue(rx_queue);
360 if (rc)
361 goto fail3;
362 }
363
364 channel->n_rx_frm_trunc = 0;
365
366 return 0;
367
368 fail3:
369 efx_for_each_channel_rx_queue(rx_queue, channel)
370 efx_remove_rx_queue(rx_queue);
371 fail2:
372 efx_for_each_channel_tx_queue(tx_queue, channel)
373 efx_remove_tx_queue(tx_queue);
374 fail1:
375 return rc;
376}
377
378
379/* Channels are shutdown and reinitialised whilst the NIC is running
380 * to propagate configuration changes (mtu, checksum offload), or
381 * to clear hardware error conditions
382 */
383static int efx_init_channels(struct efx_nic *efx)
384{
385 struct efx_tx_queue *tx_queue;
386 struct efx_rx_queue *rx_queue;
387 struct efx_channel *channel;
388 int rc = 0;
389
390 efx_calc_rx_buffer_params(efx);
391
392 /* Initialise the channels */
393 efx_for_each_channel(channel, efx) {
394 EFX_LOG(channel->efx, "init chan %d\n", channel->channel);
395
396 rc = efx_init_eventq(channel);
397 if (rc)
398 goto err;
399
400 efx_for_each_channel_tx_queue(tx_queue, channel) {
401 rc = efx_init_tx_queue(tx_queue);
402 if (rc)
403 goto err;
404 }
405
406 /* The rx buffer allocation strategy is MTU dependent */
407 efx_rx_strategy(channel);
408
409 efx_for_each_channel_rx_queue(rx_queue, channel) {
410 rc = efx_init_rx_queue(rx_queue);
411 if (rc)
412 goto err;
413 }
414
415 WARN_ON(channel->rx_pkt != NULL);
416 efx_rx_strategy(channel);
417 }
418
419 return 0;
420
421 err:
422 EFX_ERR(efx, "failed to initialise channel %d\n",
423 channel ? channel->channel : -1);
424 efx_fini_channels(efx);
425 return rc;
426}
427
428/* This enables event queue processing and packet transmission.
429 *
430 * Note that this function is not allowed to fail, since that would
431 * introduce too much complexity into the suspend/resume path.
432 */
433static void efx_start_channel(struct efx_channel *channel)
434{
435 struct efx_rx_queue *rx_queue;
436
437 EFX_LOG(channel->efx, "starting chan %d\n", channel->channel);
438
439 if (!(channel->efx->net_dev->flags & IFF_UP))
440 netif_napi_add(channel->napi_dev, &channel->napi_str,
441 efx_poll, napi_weight);
442
443 channel->work_pending = 0;
444 channel->enabled = 1;
445 smp_wmb(); /* ensure channel updated before first interrupt */
446
447 napi_enable(&channel->napi_str);
448
449 /* Load up RX descriptors */
450 efx_for_each_channel_rx_queue(rx_queue, channel)
451 efx_fast_push_rx_descriptors(rx_queue);
452}
453
454/* This disables event queue processing and packet transmission.
455 * This function does not guarantee that all queue processing
456 * (e.g. RX refill) is complete.
457 */
458static void efx_stop_channel(struct efx_channel *channel)
459{
460 struct efx_rx_queue *rx_queue;
461
462 if (!channel->enabled)
463 return;
464
465 EFX_LOG(channel->efx, "stop chan %d\n", channel->channel);
466
467 channel->enabled = 0;
468 napi_disable(&channel->napi_str);
469
470 /* Ensure that any worker threads have exited or will be no-ops */
471 efx_for_each_channel_rx_queue(rx_queue, channel) {
472 spin_lock_bh(&rx_queue->add_lock);
473 spin_unlock_bh(&rx_queue->add_lock);
474 }
475}
476
477static void efx_fini_channels(struct efx_nic *efx)
478{
479 struct efx_channel *channel;
480 struct efx_tx_queue *tx_queue;
481 struct efx_rx_queue *rx_queue;
482
483 EFX_ASSERT_RESET_SERIALISED(efx);
484 BUG_ON(efx->port_enabled);
485
486 efx_for_each_channel(channel, efx) {
487 EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel);
488
489 efx_for_each_channel_rx_queue(rx_queue, channel)
490 efx_fini_rx_queue(rx_queue);
491 efx_for_each_channel_tx_queue(tx_queue, channel)
492 efx_fini_tx_queue(tx_queue);
493 }
494
495 /* Do the event queues last so that we can handle flush events
496 * for all DMA queues. */
497 efx_for_each_channel(channel, efx) {
498 EFX_LOG(channel->efx, "shut down evq %d\n", channel->channel);
499
500 efx_fini_eventq(channel);
501 }
502}
503
504static void efx_remove_channel(struct efx_channel *channel)
505{
506 struct efx_tx_queue *tx_queue;
507 struct efx_rx_queue *rx_queue;
508
509 EFX_LOG(channel->efx, "destroy chan %d\n", channel->channel);
510
511 efx_for_each_channel_rx_queue(rx_queue, channel)
512 efx_remove_rx_queue(rx_queue);
513 efx_for_each_channel_tx_queue(tx_queue, channel)
514 efx_remove_tx_queue(tx_queue);
515 efx_remove_eventq(channel);
516
517 channel->used_flags = 0;
518}
519
520void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay)
521{
522 queue_delayed_work(refill_workqueue, &rx_queue->work, delay);
523}
524
525/**************************************************************************
526 *
527 * Port handling
528 *
529 **************************************************************************/
530
531/* This ensures that the kernel is kept informed (via
532 * netif_carrier_on/off) of the link status, and also maintains the
533 * link status's stop on the port's TX queue.
534 */
535static void efx_link_status_changed(struct efx_nic *efx)
536{
537 int carrier_ok;
538
539 /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
540 * that no events are triggered between unregister_netdev() and the
541 * driver unloading. A more general condition is that NETDEV_CHANGE
542 * can only be generated between NETDEV_UP and NETDEV_DOWN */
543 if (!netif_running(efx->net_dev))
544 return;
545
546 carrier_ok = netif_carrier_ok(efx->net_dev) ? 1 : 0;
547 if (efx->link_up != carrier_ok) {
548 efx->n_link_state_changes++;
549
550 if (efx->link_up)
551 netif_carrier_on(efx->net_dev);
552 else
553 netif_carrier_off(efx->net_dev);
554 }
555
556 /* Status message for kernel log */
557 if (efx->link_up) {
558 struct mii_if_info *gmii = &efx->mii;
559 unsigned adv, lpa;
560 /* NONE here means direct XAUI from the controller, with no
561 * MDIO-attached device we can query. */
562 if (efx->phy_type != PHY_TYPE_NONE) {
563 adv = gmii_advertised(gmii);
564 lpa = gmii_lpa(gmii);
565 } else {
566 lpa = GM_LPA_10000 | LPA_DUPLEX;
567 adv = lpa;
568 }
569 EFX_INFO(efx, "link up at %dMbps %s-duplex "
570 "(adv %04x lpa %04x) (MTU %d)%s\n",
571 (efx->link_options & GM_LPA_10000 ? 10000 :
572 (efx->link_options & GM_LPA_1000 ? 1000 :
573 (efx->link_options & GM_LPA_100 ? 100 :
574 10))),
575 (efx->link_options & GM_LPA_DUPLEX ?
576 "full" : "half"),
577 adv, lpa,
578 efx->net_dev->mtu,
579 (efx->promiscuous ? " [PROMISC]" : ""));
580 } else {
581 EFX_INFO(efx, "link down\n");
582 }
583
584}
585
586/* This call reinitialises the MAC to pick up new PHY settings. The
587 * caller must hold the mac_lock */
588static void __efx_reconfigure_port(struct efx_nic *efx)
589{
590 WARN_ON(!mutex_is_locked(&efx->mac_lock));
591
592 EFX_LOG(efx, "reconfiguring MAC from PHY settings on CPU %d\n",
593 raw_smp_processor_id());
594
595 falcon_reconfigure_xmac(efx);
596
597 /* Inform kernel of loss/gain of carrier */
598 efx_link_status_changed(efx);
599}
600
601/* Reinitialise the MAC to pick up new PHY settings, even if the port is
602 * disabled. */
603void efx_reconfigure_port(struct efx_nic *efx)
604{
605 EFX_ASSERT_RESET_SERIALISED(efx);
606
607 mutex_lock(&efx->mac_lock);
608 __efx_reconfigure_port(efx);
609 mutex_unlock(&efx->mac_lock);
610}
611
612/* Asynchronous efx_reconfigure_port work item. To speed up efx_flush_all()
613 * we don't efx_reconfigure_port() if the port is disabled. Care is taken
614 * in efx_stop_all() and efx_start_port() to prevent PHY events being lost */
615static void efx_reconfigure_work(struct work_struct *data)
616{
617 struct efx_nic *efx = container_of(data, struct efx_nic,
618 reconfigure_work);
619
620 mutex_lock(&efx->mac_lock);
621 if (efx->port_enabled)
622 __efx_reconfigure_port(efx);
623 mutex_unlock(&efx->mac_lock);
624}
625
626static int efx_probe_port(struct efx_nic *efx)
627{
628 int rc;
629
630 EFX_LOG(efx, "create port\n");
631
632 /* Connect up MAC/PHY operations table and read MAC address */
633 rc = falcon_probe_port(efx);
634 if (rc)
635 goto err;
636
637 /* Sanity check MAC address */
638 if (is_valid_ether_addr(efx->mac_address)) {
639 memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN);
640 } else {
641 DECLARE_MAC_BUF(mac);
642
643 EFX_ERR(efx, "invalid MAC address %s\n",
644 print_mac(mac, efx->mac_address));
645 if (!allow_bad_hwaddr) {
646 rc = -EINVAL;
647 goto err;
648 }
649 random_ether_addr(efx->net_dev->dev_addr);
650 EFX_INFO(efx, "using locally-generated MAC %s\n",
651 print_mac(mac, efx->net_dev->dev_addr));
652 }
653
654 return 0;
655
656 err:
657 efx_remove_port(efx);
658 return rc;
659}
660
661static int efx_init_port(struct efx_nic *efx)
662{
663 int rc;
664
665 EFX_LOG(efx, "init port\n");
666
667 /* Initialise the MAC and PHY */
668 rc = falcon_init_xmac(efx);
669 if (rc)
670 return rc;
671
672 efx->port_initialized = 1;
673
674 /* Reconfigure port to program MAC registers */
675 falcon_reconfigure_xmac(efx);
676
677 return 0;
678}
679
680/* Allow efx_reconfigure_port() to be scheduled, and close the window
681 * between efx_stop_port and efx_flush_all whereby a previously scheduled
682 * efx_reconfigure_port() may have been cancelled */
683static void efx_start_port(struct efx_nic *efx)
684{
685 EFX_LOG(efx, "start port\n");
686 BUG_ON(efx->port_enabled);
687
688 mutex_lock(&efx->mac_lock);
689 efx->port_enabled = 1;
690 __efx_reconfigure_port(efx);
691 mutex_unlock(&efx->mac_lock);
692}
693
694/* Prevent efx_reconfigure_work and efx_monitor() from executing, and
695 * efx_set_multicast_list() from scheduling efx_reconfigure_work.
696 * efx_reconfigure_work can still be scheduled via NAPI processing
697 * until efx_flush_all() is called */
698static void efx_stop_port(struct efx_nic *efx)
699{
700 EFX_LOG(efx, "stop port\n");
701
702 mutex_lock(&efx->mac_lock);
703 efx->port_enabled = 0;
704 mutex_unlock(&efx->mac_lock);
705
706 /* Serialise against efx_set_multicast_list() */
707 if (NET_DEV_REGISTERED(efx)) {
708 netif_tx_lock_bh(efx->net_dev);
709 netif_tx_unlock_bh(efx->net_dev);
710 }
711}
712
713static void efx_fini_port(struct efx_nic *efx)
714{
715 EFX_LOG(efx, "shut down port\n");
716
717 if (!efx->port_initialized)
718 return;
719
720 falcon_fini_xmac(efx);
721 efx->port_initialized = 0;
722
723 efx->link_up = 0;
724 efx_link_status_changed(efx);
725}
726
727static void efx_remove_port(struct efx_nic *efx)
728{
729 EFX_LOG(efx, "destroying port\n");
730
731 falcon_remove_port(efx);
732}
733
734/**************************************************************************
735 *
736 * NIC handling
737 *
738 **************************************************************************/
739
740/* This configures the PCI device to enable I/O and DMA. */
741static int efx_init_io(struct efx_nic *efx)
742{
743 struct pci_dev *pci_dev = efx->pci_dev;
744 dma_addr_t dma_mask = efx->type->max_dma_mask;
745 int rc;
746
747 EFX_LOG(efx, "initialising I/O\n");
748
749 rc = pci_enable_device(pci_dev);
750 if (rc) {
751 EFX_ERR(efx, "failed to enable PCI device\n");
752 goto fail1;
753 }
754
755 pci_set_master(pci_dev);
756
757 /* Set the PCI DMA mask. Try all possibilities from our
758 * genuine mask down to 32 bits, because some architectures
759 * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
760 * masks event though they reject 46 bit masks.
761 */
762 while (dma_mask > 0x7fffffffUL) {
763 if (pci_dma_supported(pci_dev, dma_mask) &&
764 ((rc = pci_set_dma_mask(pci_dev, dma_mask)) == 0))
765 break;
766 dma_mask >>= 1;
767 }
768 if (rc) {
769 EFX_ERR(efx, "could not find a suitable DMA mask\n");
770 goto fail2;
771 }
772 EFX_LOG(efx, "using DMA mask %llx\n", (unsigned long long) dma_mask);
773 rc = pci_set_consistent_dma_mask(pci_dev, dma_mask);
774 if (rc) {
775 /* pci_set_consistent_dma_mask() is not *allowed* to
776 * fail with a mask that pci_set_dma_mask() accepted,
777 * but just in case...
778 */
779 EFX_ERR(efx, "failed to set consistent DMA mask\n");
780 goto fail2;
781 }
782
783 efx->membase_phys = pci_resource_start(efx->pci_dev,
784 efx->type->mem_bar);
785 rc = pci_request_region(pci_dev, efx->type->mem_bar, "sfc");
786 if (rc) {
787 EFX_ERR(efx, "request for memory BAR failed\n");
788 rc = -EIO;
789 goto fail3;
790 }
791 efx->membase = ioremap_nocache(efx->membase_phys,
792 efx->type->mem_map_size);
793 if (!efx->membase) {
794 EFX_ERR(efx, "could not map memory BAR %d at %lx+%x\n",
795 efx->type->mem_bar, efx->membase_phys,
796 efx->type->mem_map_size);
797 rc = -ENOMEM;
798 goto fail4;
799 }
800 EFX_LOG(efx, "memory BAR %u at %lx+%x (virtual %p)\n",
801 efx->type->mem_bar, efx->membase_phys, efx->type->mem_map_size,
802 efx->membase);
803
804 return 0;
805
806 fail4:
807 release_mem_region(efx->membase_phys, efx->type->mem_map_size);
808 fail3:
809 efx->membase_phys = 0UL;
810 fail2:
811 pci_disable_device(efx->pci_dev);
812 fail1:
813 return rc;
814}
815
816static void efx_fini_io(struct efx_nic *efx)
817{
818 EFX_LOG(efx, "shutting down I/O\n");
819
820 if (efx->membase) {
821 iounmap(efx->membase);
822 efx->membase = NULL;
823 }
824
825 if (efx->membase_phys) {
826 pci_release_region(efx->pci_dev, efx->type->mem_bar);
827 efx->membase_phys = 0UL;
828 }
829
830 pci_disable_device(efx->pci_dev);
831}
832
833/* Probe the number and type of interrupts we are able to obtain. */
834static void efx_probe_interrupts(struct efx_nic *efx)
835{
836 int max_channel = efx->type->phys_addr_channels - 1;
837 struct msix_entry xentries[EFX_MAX_CHANNELS];
838 int rc, i;
839
840 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
841 BUG_ON(!pci_find_capability(efx->pci_dev, PCI_CAP_ID_MSIX));
842
843 efx->rss_queues = rss_cpus ? rss_cpus : num_online_cpus();
844 efx->rss_queues = min(efx->rss_queues, max_channel + 1);
845 efx->rss_queues = min(efx->rss_queues, EFX_MAX_CHANNELS);
846
847 /* Request maximum number of MSI interrupts, and fill out
848 * the channel interrupt information the allowed allocation */
849 for (i = 0; i < efx->rss_queues; i++)
850 xentries[i].entry = i;
851 rc = pci_enable_msix(efx->pci_dev, xentries, efx->rss_queues);
852 if (rc > 0) {
853 EFX_BUG_ON_PARANOID(rc >= efx->rss_queues);
854 efx->rss_queues = rc;
855 rc = pci_enable_msix(efx->pci_dev, xentries,
856 efx->rss_queues);
857 }
858
859 if (rc == 0) {
860 for (i = 0; i < efx->rss_queues; i++) {
861 efx->channel[i].has_interrupt = 1;
862 efx->channel[i].irq = xentries[i].vector;
863 }
864 } else {
865 /* Fall back to single channel MSI */
866 efx->interrupt_mode = EFX_INT_MODE_MSI;
867 EFX_ERR(efx, "could not enable MSI-X\n");
868 }
869 }
870
871 /* Try single interrupt MSI */
872 if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
873 efx->rss_queues = 1;
874 rc = pci_enable_msi(efx->pci_dev);
875 if (rc == 0) {
876 efx->channel[0].irq = efx->pci_dev->irq;
877 efx->channel[0].has_interrupt = 1;
878 } else {
879 EFX_ERR(efx, "could not enable MSI\n");
880 efx->interrupt_mode = EFX_INT_MODE_LEGACY;
881 }
882 }
883
884 /* Assume legacy interrupts */
885 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
886 efx->rss_queues = 1;
887 /* Every channel is interruptible */
888 for (i = 0; i < EFX_MAX_CHANNELS; i++)
889 efx->channel[i].has_interrupt = 1;
890 efx->legacy_irq = efx->pci_dev->irq;
891 }
892}
893
894static void efx_remove_interrupts(struct efx_nic *efx)
895{
896 struct efx_channel *channel;
897
898 /* Remove MSI/MSI-X interrupts */
899 efx_for_each_channel_with_interrupt(channel, efx)
900 channel->irq = 0;
901 pci_disable_msi(efx->pci_dev);
902 pci_disable_msix(efx->pci_dev);
903
904 /* Remove legacy interrupt */
905 efx->legacy_irq = 0;
906}
907
908/* Select number of used resources
909 * Should be called after probe_interrupts()
910 */
911static void efx_select_used(struct efx_nic *efx)
912{
913 struct efx_tx_queue *tx_queue;
914 struct efx_rx_queue *rx_queue;
915 int i;
916
917 /* TX queues. One per port per channel with TX capability
918 * (more than one per port won't work on Linux, due to out
919 * of order issues... but will be fine on Solaris)
920 */
921 tx_queue = &efx->tx_queue[0];
922
923 /* Perform this for each channel with TX capabilities.
924 * At the moment, we only support a single TX queue
925 */
926 tx_queue->used = 1;
927 if ((!EFX_INT_MODE_USE_MSI(efx)) && separate_tx_and_rx_channels)
928 tx_queue->channel = &efx->channel[1];
929 else
930 tx_queue->channel = &efx->channel[0];
931 tx_queue->channel->used_flags |= EFX_USED_BY_TX;
932 tx_queue++;
933
934 /* RX queues. Each has a dedicated channel. */
935 for (i = 0; i < EFX_MAX_RX_QUEUES; i++) {
936 rx_queue = &efx->rx_queue[i];
937
938 if (i < efx->rss_queues) {
939 rx_queue->used = 1;
940 /* If we allow multiple RX queues per channel
941 * we need to decide that here
942 */
943 rx_queue->channel = &efx->channel[rx_queue->queue];
944 rx_queue->channel->used_flags |= EFX_USED_BY_RX;
945 rx_queue++;
946 }
947 }
948}
949
950static int efx_probe_nic(struct efx_nic *efx)
951{
952 int rc;
953
954 EFX_LOG(efx, "creating NIC\n");
955
956 /* Carry out hardware-type specific initialisation */
957 rc = falcon_probe_nic(efx);
958 if (rc)
959 return rc;
960
961 /* Determine the number of channels and RX queues by trying to hook
962 * in MSI-X interrupts. */
963 efx_probe_interrupts(efx);
964
965 /* Determine number of RX queues and TX queues */
966 efx_select_used(efx);
967
968 /* Initialise the interrupt moderation settings */
969 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec);
970
971 return 0;
972}
973
974static void efx_remove_nic(struct efx_nic *efx)
975{
976 EFX_LOG(efx, "destroying NIC\n");
977
978 efx_remove_interrupts(efx);
979 falcon_remove_nic(efx);
980}
981
982/**************************************************************************
983 *
984 * NIC startup/shutdown
985 *
986 *************************************************************************/
987
988static int efx_probe_all(struct efx_nic *efx)
989{
990 struct efx_channel *channel;
991 int rc;
992
993 /* Create NIC */
994 rc = efx_probe_nic(efx);
995 if (rc) {
996 EFX_ERR(efx, "failed to create NIC\n");
997 goto fail1;
998 }
999
1000 /* Create port */
1001 rc = efx_probe_port(efx);
1002 if (rc) {
1003 EFX_ERR(efx, "failed to create port\n");
1004 goto fail2;
1005 }
1006
1007 /* Create channels */
1008 efx_for_each_channel(channel, efx) {
1009 rc = efx_probe_channel(channel);
1010 if (rc) {
1011 EFX_ERR(efx, "failed to create channel %d\n",
1012 channel->channel);
1013 goto fail3;
1014 }
1015 }
1016
1017 return 0;
1018
1019 fail3:
1020 efx_for_each_channel(channel, efx)
1021 efx_remove_channel(channel);
1022 efx_remove_port(efx);
1023 fail2:
1024 efx_remove_nic(efx);
1025 fail1:
1026 return rc;
1027}
1028
1029/* Called after previous invocation(s) of efx_stop_all, restarts the
1030 * port, kernel transmit queue, NAPI processing and hardware interrupts,
1031 * and ensures that the port is scheduled to be reconfigured.
1032 * This function is safe to call multiple times when the NIC is in any
1033 * state. */
1034static void efx_start_all(struct efx_nic *efx)
1035{
1036 struct efx_channel *channel;
1037
1038 EFX_ASSERT_RESET_SERIALISED(efx);
1039
1040 /* Check that it is appropriate to restart the interface. All
1041 * of these flags are safe to read under just the rtnl lock */
1042 if (efx->port_enabled)
1043 return;
1044 if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
1045 return;
1046 if (NET_DEV_REGISTERED(efx) && !netif_running(efx->net_dev))
1047 return;
1048
1049 /* Mark the port as enabled so port reconfigurations can start, then
1050 * restart the transmit interface early so the watchdog timer stops */
1051 efx_start_port(efx);
1052 efx_wake_queue(efx);
1053
1054 efx_for_each_channel(channel, efx)
1055 efx_start_channel(channel);
1056
1057 falcon_enable_interrupts(efx);
1058
1059 /* Start hardware monitor if we're in RUNNING */
1060 if (efx->state == STATE_RUNNING)
1061 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1062 efx_monitor_interval);
1063}
1064
1065/* Flush all delayed work. Should only be called when no more delayed work
1066 * will be scheduled. This doesn't flush pending online resets (efx_reset),
1067 * since we're holding the rtnl_lock at this point. */
1068static void efx_flush_all(struct efx_nic *efx)
1069{
1070 struct efx_rx_queue *rx_queue;
1071
1072 /* Make sure the hardware monitor is stopped */
1073 cancel_delayed_work_sync(&efx->monitor_work);
1074
1075 /* Ensure that all RX slow refills are complete. */
1076 efx_for_each_rx_queue(rx_queue, efx) {
1077 cancel_delayed_work_sync(&rx_queue->work);
1078 }
1079
1080 /* Stop scheduled port reconfigurations */
1081 cancel_work_sync(&efx->reconfigure_work);
1082
1083}
1084
1085/* Quiesce hardware and software without bringing the link down.
1086 * Safe to call multiple times, when the nic and interface is in any
1087 * state. The caller is guaranteed to subsequently be in a position
1088 * to modify any hardware and software state they see fit without
1089 * taking locks. */
1090static void efx_stop_all(struct efx_nic *efx)
1091{
1092 struct efx_channel *channel;
1093
1094 EFX_ASSERT_RESET_SERIALISED(efx);
1095
1096 /* port_enabled can be read safely under the rtnl lock */
1097 if (!efx->port_enabled)
1098 return;
1099
1100 /* Disable interrupts and wait for ISR to complete */
1101 falcon_disable_interrupts(efx);
1102 if (efx->legacy_irq)
1103 synchronize_irq(efx->legacy_irq);
1104 efx_for_each_channel_with_interrupt(channel, efx)
1105 if (channel->irq)
1106 synchronize_irq(channel->irq);
1107
1108 /* Stop all NAPI processing and synchronous rx refills */
1109 efx_for_each_channel(channel, efx)
1110 efx_stop_channel(channel);
1111
1112 /* Stop all asynchronous port reconfigurations. Since all
1113 * event processing has already been stopped, there is no
1114 * window to loose phy events */
1115 efx_stop_port(efx);
1116
1117 /* Flush reconfigure_work, refill_workqueue, monitor_work */
1118 efx_flush_all(efx);
1119
1120 /* Isolate the MAC from the TX and RX engines, so that queue
1121 * flushes will complete in a timely fashion. */
1122 falcon_deconfigure_mac_wrapper(efx);
1123 falcon_drain_tx_fifo(efx);
1124
1125 /* Stop the kernel transmit interface late, so the watchdog
1126 * timer isn't ticking over the flush */
1127 efx_stop_queue(efx);
1128 if (NET_DEV_REGISTERED(efx)) {
1129 netif_tx_lock_bh(efx->net_dev);
1130 netif_tx_unlock_bh(efx->net_dev);
1131 }
1132}
1133
1134static void efx_remove_all(struct efx_nic *efx)
1135{
1136 struct efx_channel *channel;
1137
1138 efx_for_each_channel(channel, efx)
1139 efx_remove_channel(channel);
1140 efx_remove_port(efx);
1141 efx_remove_nic(efx);
1142}
1143
1144/* A convinience function to safely flush all the queues */
1145int efx_flush_queues(struct efx_nic *efx)
1146{
1147 int rc;
1148
1149 EFX_ASSERT_RESET_SERIALISED(efx);
1150
1151 efx_stop_all(efx);
1152
1153 efx_fini_channels(efx);
1154 rc = efx_init_channels(efx);
1155 if (rc) {
1156 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1157 return rc;
1158 }
1159
1160 efx_start_all(efx);
1161
1162 return 0;
1163}
1164
1165/**************************************************************************
1166 *
1167 * Interrupt moderation
1168 *
1169 **************************************************************************/
1170
1171/* Set interrupt moderation parameters */
1172void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs)
1173{
1174 struct efx_tx_queue *tx_queue;
1175 struct efx_rx_queue *rx_queue;
1176
1177 EFX_ASSERT_RESET_SERIALISED(efx);
1178
1179 efx_for_each_tx_queue(tx_queue, efx)
1180 tx_queue->channel->irq_moderation = tx_usecs;
1181
1182 efx_for_each_rx_queue(rx_queue, efx)
1183 rx_queue->channel->irq_moderation = rx_usecs;
1184}
1185
1186/**************************************************************************
1187 *
1188 * Hardware monitor
1189 *
1190 **************************************************************************/
1191
1192/* Run periodically off the general workqueue. Serialised against
1193 * efx_reconfigure_port via the mac_lock */
1194static void efx_monitor(struct work_struct *data)
1195{
1196 struct efx_nic *efx = container_of(data, struct efx_nic,
1197 monitor_work.work);
1198 int rc = 0;
1199
1200 EFX_TRACE(efx, "hardware monitor executing on CPU %d\n",
1201 raw_smp_processor_id());
1202
1203
1204 /* If the mac_lock is already held then it is likely a port
1205 * reconfiguration is already in place, which will likely do
1206 * most of the work of check_hw() anyway. */
1207 if (!mutex_trylock(&efx->mac_lock)) {
1208 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1209 efx_monitor_interval);
1210 return;
1211 }
1212
1213 if (efx->port_enabled)
1214 rc = falcon_check_xmac(efx);
1215 mutex_unlock(&efx->mac_lock);
1216
1217 if (rc) {
1218 if (monitor_reset) {
1219 EFX_ERR(efx, "hardware monitor detected a fault: "
1220 "triggering reset\n");
1221 efx_schedule_reset(efx, RESET_TYPE_MONITOR);
1222 } else {
1223 EFX_ERR(efx, "hardware monitor detected a fault, "
1224 "skipping reset\n");
1225 }
1226 }
1227
1228 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1229 efx_monitor_interval);
1230}
1231
1232/**************************************************************************
1233 *
1234 * ioctls
1235 *
1236 *************************************************************************/
1237
1238/* Net device ioctl
1239 * Context: process, rtnl_lock() held.
1240 */
1241static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1242{
1243 struct efx_nic *efx = net_dev->priv;
1244
1245 EFX_ASSERT_RESET_SERIALISED(efx);
1246
1247 return generic_mii_ioctl(&efx->mii, if_mii(ifr), cmd, NULL);
1248}
1249
1250/**************************************************************************
1251 *
1252 * NAPI interface
1253 *
1254 **************************************************************************/
1255
1256static int efx_init_napi(struct efx_nic *efx)
1257{
1258 struct efx_channel *channel;
1259 int rc;
1260
1261 efx_for_each_channel(channel, efx) {
1262 channel->napi_dev = efx->net_dev;
1263 rc = efx_lro_init(&channel->lro_mgr, efx);
1264 if (rc)
1265 goto err;
1266 }
1267 return 0;
1268 err:
1269 efx_fini_napi(efx);
1270 return rc;
1271}
1272
1273static void efx_fini_napi(struct efx_nic *efx)
1274{
1275 struct efx_channel *channel;
1276
1277 efx_for_each_channel(channel, efx) {
1278 efx_lro_fini(&channel->lro_mgr);
1279 channel->napi_dev = NULL;
1280 }
1281}
1282
1283/**************************************************************************
1284 *
1285 * Kernel netpoll interface
1286 *
1287 *************************************************************************/
1288
1289#ifdef CONFIG_NET_POLL_CONTROLLER
1290
1291/* Although in the common case interrupts will be disabled, this is not
1292 * guaranteed. However, all our work happens inside the NAPI callback,
1293 * so no locking is required.
1294 */
1295static void efx_netpoll(struct net_device *net_dev)
1296{
1297 struct efx_nic *efx = net_dev->priv;
1298 struct efx_channel *channel;
1299
1300 efx_for_each_channel_with_interrupt(channel, efx)
1301 efx_schedule_channel(channel);
1302}
1303
1304#endif
1305
1306/**************************************************************************
1307 *
1308 * Kernel net device interface
1309 *
1310 *************************************************************************/
1311
1312/* Context: process, rtnl_lock() held. */
1313static int efx_net_open(struct net_device *net_dev)
1314{
1315 struct efx_nic *efx = net_dev->priv;
1316 EFX_ASSERT_RESET_SERIALISED(efx);
1317
1318 EFX_LOG(efx, "opening device %s on CPU %d\n", net_dev->name,
1319 raw_smp_processor_id());
1320
1321 efx_start_all(efx);
1322 return 0;
1323}
1324
1325/* Context: process, rtnl_lock() held.
1326 * Note that the kernel will ignore our return code; this method
1327 * should really be a void.
1328 */
1329static int efx_net_stop(struct net_device *net_dev)
1330{
1331 struct efx_nic *efx = net_dev->priv;
1332 int rc;
1333
1334 EFX_LOG(efx, "closing %s on CPU %d\n", net_dev->name,
1335 raw_smp_processor_id());
1336
1337 /* Stop the device and flush all the channels */
1338 efx_stop_all(efx);
1339 efx_fini_channels(efx);
1340 rc = efx_init_channels(efx);
1341 if (rc)
1342 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1343
1344 return 0;
1345}
1346
1347/* Context: process, dev_base_lock held, non-blocking. */
1348static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
1349{
1350 struct efx_nic *efx = net_dev->priv;
1351 struct efx_mac_stats *mac_stats = &efx->mac_stats;
1352 struct net_device_stats *stats = &net_dev->stats;
1353
1354 if (!spin_trylock(&efx->stats_lock))
1355 return stats;
1356 if (efx->state == STATE_RUNNING) {
1357 falcon_update_stats_xmac(efx);
1358 falcon_update_nic_stats(efx);
1359 }
1360 spin_unlock(&efx->stats_lock);
1361
1362 stats->rx_packets = mac_stats->rx_packets;
1363 stats->tx_packets = mac_stats->tx_packets;
1364 stats->rx_bytes = mac_stats->rx_bytes;
1365 stats->tx_bytes = mac_stats->tx_bytes;
1366 stats->multicast = mac_stats->rx_multicast;
1367 stats->collisions = mac_stats->tx_collision;
1368 stats->rx_length_errors = (mac_stats->rx_gtjumbo +
1369 mac_stats->rx_length_error);
1370 stats->rx_over_errors = efx->n_rx_nodesc_drop_cnt;
1371 stats->rx_crc_errors = mac_stats->rx_bad;
1372 stats->rx_frame_errors = mac_stats->rx_align_error;
1373 stats->rx_fifo_errors = mac_stats->rx_overflow;
1374 stats->rx_missed_errors = mac_stats->rx_missed;
1375 stats->tx_window_errors = mac_stats->tx_late_collision;
1376
1377 stats->rx_errors = (stats->rx_length_errors +
1378 stats->rx_over_errors +
1379 stats->rx_crc_errors +
1380 stats->rx_frame_errors +
1381 stats->rx_fifo_errors +
1382 stats->rx_missed_errors +
1383 mac_stats->rx_symbol_error);
1384 stats->tx_errors = (stats->tx_window_errors +
1385 mac_stats->tx_bad);
1386
1387 return stats;
1388}
1389
1390/* Context: netif_tx_lock held, BHs disabled. */
1391static void efx_watchdog(struct net_device *net_dev)
1392{
1393 struct efx_nic *efx = net_dev->priv;
1394
1395 EFX_ERR(efx, "TX stuck with stop_count=%d port_enabled=%d: %s\n",
1396 atomic_read(&efx->netif_stop_count), efx->port_enabled,
1397 monitor_reset ? "resetting channels" : "skipping reset");
1398
1399 if (monitor_reset)
1400 efx_schedule_reset(efx, RESET_TYPE_MONITOR);
1401}
1402
1403
1404/* Context: process, rtnl_lock() held. */
1405static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
1406{
1407 struct efx_nic *efx = net_dev->priv;
1408 int rc = 0;
1409
1410 EFX_ASSERT_RESET_SERIALISED(efx);
1411
1412 if (new_mtu > EFX_MAX_MTU)
1413 return -EINVAL;
1414
1415 efx_stop_all(efx);
1416
1417 EFX_LOG(efx, "changing MTU to %d\n", new_mtu);
1418
1419 efx_fini_channels(efx);
1420 net_dev->mtu = new_mtu;
1421 rc = efx_init_channels(efx);
1422 if (rc)
1423 goto fail;
1424
1425 efx_start_all(efx);
1426 return rc;
1427
1428 fail:
1429 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1430 return rc;
1431}
1432
1433static int efx_set_mac_address(struct net_device *net_dev, void *data)
1434{
1435 struct efx_nic *efx = net_dev->priv;
1436 struct sockaddr *addr = data;
1437 char *new_addr = addr->sa_data;
1438
1439 EFX_ASSERT_RESET_SERIALISED(efx);
1440
1441 if (!is_valid_ether_addr(new_addr)) {
1442 DECLARE_MAC_BUF(mac);
1443 EFX_ERR(efx, "invalid ethernet MAC address requested: %s\n",
1444 print_mac(mac, new_addr));
1445 return -EINVAL;
1446 }
1447
1448 memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len);
1449
1450 /* Reconfigure the MAC */
1451 efx_reconfigure_port(efx);
1452
1453 return 0;
1454}
1455
1456/* Context: netif_tx_lock held, BHs disabled. */
1457static void efx_set_multicast_list(struct net_device *net_dev)
1458{
1459 struct efx_nic *efx = net_dev->priv;
1460 struct dev_mc_list *mc_list = net_dev->mc_list;
1461 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
1462 int promiscuous;
1463 u32 crc;
1464 int bit;
1465 int i;
1466
1467 /* Set per-MAC promiscuity flag and reconfigure MAC if necessary */
1468 promiscuous = (net_dev->flags & IFF_PROMISC) ? 1 : 0;
1469 if (efx->promiscuous != promiscuous) {
1470 efx->promiscuous = promiscuous;
1471 /* Close the window between efx_stop_port() and efx_flush_all()
1472 * by only queuing work when the port is enabled. */
1473 if (efx->port_enabled)
1474 queue_work(efx->workqueue, &efx->reconfigure_work);
1475 }
1476
1477 /* Build multicast hash table */
1478 if (promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
1479 memset(mc_hash, 0xff, sizeof(*mc_hash));
1480 } else {
1481 memset(mc_hash, 0x00, sizeof(*mc_hash));
1482 for (i = 0; i < net_dev->mc_count; i++) {
1483 crc = ether_crc_le(ETH_ALEN, mc_list->dmi_addr);
1484 bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
1485 set_bit_le(bit, mc_hash->byte);
1486 mc_list = mc_list->next;
1487 }
1488 }
1489
1490 /* Create and activate new global multicast hash table */
1491 falcon_set_multicast_hash(efx);
1492}
1493
1494static int efx_netdev_event(struct notifier_block *this,
1495 unsigned long event, void *ptr)
1496{
1497 struct net_device *net_dev = (struct net_device *)ptr;
1498
1499 if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) {
1500 struct efx_nic *efx = net_dev->priv;
1501
1502 strcpy(efx->name, net_dev->name);
1503 }
1504
1505 return NOTIFY_DONE;
1506}
1507
1508static struct notifier_block efx_netdev_notifier = {
1509 .notifier_call = efx_netdev_event,
1510};
1511
1512static int efx_register_netdev(struct efx_nic *efx)
1513{
1514 struct net_device *net_dev = efx->net_dev;
1515 int rc;
1516
1517 net_dev->watchdog_timeo = 5 * HZ;
1518 net_dev->irq = efx->pci_dev->irq;
1519 net_dev->open = efx_net_open;
1520 net_dev->stop = efx_net_stop;
1521 net_dev->get_stats = efx_net_stats;
1522 net_dev->tx_timeout = &efx_watchdog;
1523 net_dev->hard_start_xmit = efx_hard_start_xmit;
1524 net_dev->do_ioctl = efx_ioctl;
1525 net_dev->change_mtu = efx_change_mtu;
1526 net_dev->set_mac_address = efx_set_mac_address;
1527 net_dev->set_multicast_list = efx_set_multicast_list;
1528#ifdef CONFIG_NET_POLL_CONTROLLER
1529 net_dev->poll_controller = efx_netpoll;
1530#endif
1531 SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev);
1532 SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
1533
1534 /* Always start with carrier off; PHY events will detect the link */
1535 netif_carrier_off(efx->net_dev);
1536
1537 /* Clear MAC statistics */
1538 falcon_update_stats_xmac(efx);
1539 memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
1540
1541 rc = register_netdev(net_dev);
1542 if (rc) {
1543 EFX_ERR(efx, "could not register net dev\n");
1544 return rc;
1545 }
1546 strcpy(efx->name, net_dev->name);
1547
1548 return 0;
1549}
1550
1551static void efx_unregister_netdev(struct efx_nic *efx)
1552{
1553 struct efx_tx_queue *tx_queue;
1554
1555 if (!efx->net_dev)
1556 return;
1557
1558 BUG_ON(efx->net_dev->priv != efx);
1559
1560 /* Free up any skbs still remaining. This has to happen before
1561 * we try to unregister the netdev as running their destructors
1562 * may be needed to get the device ref. count to 0. */
1563 efx_for_each_tx_queue(tx_queue, efx)
1564 efx_release_tx_buffers(tx_queue);
1565
1566 if (NET_DEV_REGISTERED(efx)) {
1567 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
1568 unregister_netdev(efx->net_dev);
1569 }
1570}
1571
1572/**************************************************************************
1573 *
1574 * Device reset and suspend
1575 *
1576 **************************************************************************/
1577
1578/* The final hardware and software finalisation before reset. */
1579static int efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd)
1580{
1581 int rc;
1582
1583 EFX_ASSERT_RESET_SERIALISED(efx);
1584
1585 rc = falcon_xmac_get_settings(efx, ecmd);
1586 if (rc) {
1587 EFX_ERR(efx, "could not back up PHY settings\n");
1588 goto fail;
1589 }
1590
1591 efx_fini_channels(efx);
1592 return 0;
1593
1594 fail:
1595 return rc;
1596}
1597
1598/* The first part of software initialisation after a hardware reset
1599 * This function does not handle serialisation with the kernel, it
1600 * assumes the caller has done this */
1601static int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd)
1602{
1603 int rc;
1604
1605 rc = efx_init_channels(efx);
1606 if (rc)
1607 goto fail1;
1608
1609 /* Restore MAC and PHY settings. */
1610 rc = falcon_xmac_set_settings(efx, ecmd);
1611 if (rc) {
1612 EFX_ERR(efx, "could not restore PHY settings\n");
1613 goto fail2;
1614 }
1615
1616 return 0;
1617
1618 fail2:
1619 efx_fini_channels(efx);
1620 fail1:
1621 return rc;
1622}
1623
1624/* Reset the NIC as transparently as possible. Do not reset the PHY
1625 * Note that the reset may fail, in which case the card will be left
1626 * in a most-probably-unusable state.
1627 *
1628 * This function will sleep. You cannot reset from within an atomic
1629 * state; use efx_schedule_reset() instead.
1630 *
1631 * Grabs the rtnl_lock.
1632 */
1633static int efx_reset(struct efx_nic *efx)
1634{
1635 struct ethtool_cmd ecmd;
1636 enum reset_type method = efx->reset_pending;
1637 int rc;
1638
1639 /* Serialise with kernel interfaces */
1640 rtnl_lock();
1641
1642 /* If we're not RUNNING then don't reset. Leave the reset_pending
1643 * flag set so that efx_pci_probe_main will be retried */
1644 if (efx->state != STATE_RUNNING) {
1645 EFX_INFO(efx, "scheduled reset quenched. NIC not RUNNING\n");
1646 goto unlock_rtnl;
1647 }
1648
1649 efx->state = STATE_RESETTING;
1650 EFX_INFO(efx, "resetting (%d)\n", method);
1651
1652 /* The net_dev->get_stats handler is quite slow, and will fail
1653 * if a fetch is pending over reset. Serialise against it. */
1654 spin_lock(&efx->stats_lock);
1655 spin_unlock(&efx->stats_lock);
1656
1657 efx_stop_all(efx);
1658 mutex_lock(&efx->mac_lock);
1659
1660 rc = efx_reset_down(efx, &ecmd);
1661 if (rc)
1662 goto fail1;
1663
1664 rc = falcon_reset_hw(efx, method);
1665 if (rc) {
1666 EFX_ERR(efx, "failed to reset hardware\n");
1667 goto fail2;
1668 }
1669
1670 /* Allow resets to be rescheduled. */
1671 efx->reset_pending = RESET_TYPE_NONE;
1672
1673 /* Reinitialise bus-mastering, which may have been turned off before
1674 * the reset was scheduled. This is still appropriate, even in the
1675 * RESET_TYPE_DISABLE since this driver generally assumes the hardware
1676 * can respond to requests. */
1677 pci_set_master(efx->pci_dev);
1678
1679 /* Reinitialise device. This is appropriate in the RESET_TYPE_DISABLE
1680 * case so the driver can talk to external SRAM */
1681 rc = falcon_init_nic(efx);
1682 if (rc) {
1683 EFX_ERR(efx, "failed to initialise NIC\n");
1684 goto fail3;
1685 }
1686
1687 /* Leave device stopped if necessary */
1688 if (method == RESET_TYPE_DISABLE) {
1689 /* Reinitialise the device anyway so the driver unload sequence
1690 * can talk to the external SRAM */
1691 (void) falcon_init_nic(efx);
1692 rc = -EIO;
1693 goto fail4;
1694 }
1695
1696 rc = efx_reset_up(efx, &ecmd);
1697 if (rc)
1698 goto fail5;
1699
1700 mutex_unlock(&efx->mac_lock);
1701 EFX_LOG(efx, "reset complete\n");
1702
1703 efx->state = STATE_RUNNING;
1704 efx_start_all(efx);
1705
1706 unlock_rtnl:
1707 rtnl_unlock();
1708 return 0;
1709
1710 fail5:
1711 fail4:
1712 fail3:
1713 fail2:
1714 fail1:
1715 EFX_ERR(efx, "has been disabled\n");
1716 efx->state = STATE_DISABLED;
1717
1718 mutex_unlock(&efx->mac_lock);
1719 rtnl_unlock();
1720 efx_unregister_netdev(efx);
1721 efx_fini_port(efx);
1722 return rc;
1723}
1724
1725/* The worker thread exists so that code that cannot sleep can
1726 * schedule a reset for later.
1727 */
1728static void efx_reset_work(struct work_struct *data)
1729{
1730 struct efx_nic *nic = container_of(data, struct efx_nic, reset_work);
1731
1732 efx_reset(nic);
1733}
1734
1735void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
1736{
1737 enum reset_type method;
1738
1739 if (efx->reset_pending != RESET_TYPE_NONE) {
1740 EFX_INFO(efx, "quenching already scheduled reset\n");
1741 return;
1742 }
1743
1744 switch (type) {
1745 case RESET_TYPE_INVISIBLE:
1746 case RESET_TYPE_ALL:
1747 case RESET_TYPE_WORLD:
1748 case RESET_TYPE_DISABLE:
1749 method = type;
1750 break;
1751 case RESET_TYPE_RX_RECOVERY:
1752 case RESET_TYPE_RX_DESC_FETCH:
1753 case RESET_TYPE_TX_DESC_FETCH:
1754 case RESET_TYPE_TX_SKIP:
1755 method = RESET_TYPE_INVISIBLE;
1756 break;
1757 default:
1758 method = RESET_TYPE_ALL;
1759 break;
1760 }
1761
1762 if (method != type)
1763 EFX_LOG(efx, "scheduling reset (%d:%d)\n", type, method);
1764 else
1765 EFX_LOG(efx, "scheduling reset (%d)\n", method);
1766
1767 efx->reset_pending = method;
1768
1769 queue_work(efx->workqueue, &efx->reset_work);
1770}
1771
1772/**************************************************************************
1773 *
1774 * List of NICs we support
1775 *
1776 **************************************************************************/
1777
1778/* PCI device ID table */
1779static struct pci_device_id efx_pci_table[] __devinitdata = {
1780 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID),
1781 .driver_data = (unsigned long) &falcon_a_nic_type},
1782 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID),
1783 .driver_data = (unsigned long) &falcon_b_nic_type},
1784 {0} /* end of list */
1785};
1786
1787/**************************************************************************
1788 *
1789 * Dummy PHY/MAC/Board operations
1790 *
1791 * Can be used where the MAC does not implement this operation
1792 * Needed so all function pointers are valid and do not have to be tested
1793 * before use
1794 *
1795 **************************************************************************/
1796int efx_port_dummy_op_int(struct efx_nic *efx)
1797{
1798 return 0;
1799}
1800void efx_port_dummy_op_void(struct efx_nic *efx) {}
1801void efx_port_dummy_op_blink(struct efx_nic *efx, int blink) {}
1802
1803static struct efx_phy_operations efx_dummy_phy_operations = {
1804 .init = efx_port_dummy_op_int,
1805 .reconfigure = efx_port_dummy_op_void,
1806 .check_hw = efx_port_dummy_op_int,
1807 .fini = efx_port_dummy_op_void,
1808 .clear_interrupt = efx_port_dummy_op_void,
1809 .reset_xaui = efx_port_dummy_op_void,
1810};
1811
1812/* Dummy board operations */
1813static int efx_nic_dummy_op_int(struct efx_nic *nic)
1814{
1815 return 0;
1816}
1817
1818static struct efx_board efx_dummy_board_info = {
1819 .init = efx_nic_dummy_op_int,
1820 .init_leds = efx_port_dummy_op_int,
1821 .set_fault_led = efx_port_dummy_op_blink,
1822};
1823
1824/**************************************************************************
1825 *
1826 * Data housekeeping
1827 *
1828 **************************************************************************/
1829
1830/* This zeroes out and then fills in the invariants in a struct
1831 * efx_nic (including all sub-structures).
1832 */
1833static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
1834 struct pci_dev *pci_dev, struct net_device *net_dev)
1835{
1836 struct efx_channel *channel;
1837 struct efx_tx_queue *tx_queue;
1838 struct efx_rx_queue *rx_queue;
1839 int i, rc;
1840
1841 /* Initialise common structures */
1842 memset(efx, 0, sizeof(*efx));
1843 spin_lock_init(&efx->biu_lock);
1844 spin_lock_init(&efx->phy_lock);
1845 INIT_WORK(&efx->reset_work, efx_reset_work);
1846 INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
1847 efx->pci_dev = pci_dev;
1848 efx->state = STATE_INIT;
1849 efx->reset_pending = RESET_TYPE_NONE;
1850 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
1851 efx->board_info = efx_dummy_board_info;
1852
1853 efx->net_dev = net_dev;
1854 efx->rx_checksum_enabled = 1;
1855 spin_lock_init(&efx->netif_stop_lock);
1856 spin_lock_init(&efx->stats_lock);
1857 mutex_init(&efx->mac_lock);
1858 efx->phy_op = &efx_dummy_phy_operations;
1859 efx->mii.dev = net_dev;
1860 INIT_WORK(&efx->reconfigure_work, efx_reconfigure_work);
1861 atomic_set(&efx->netif_stop_count, 1);
1862
1863 for (i = 0; i < EFX_MAX_CHANNELS; i++) {
1864 channel = &efx->channel[i];
1865 channel->efx = efx;
1866 channel->channel = i;
1867 channel->evqnum = i;
1868 channel->work_pending = 0;
1869 }
1870 for (i = 0; i < EFX_MAX_TX_QUEUES; i++) {
1871 tx_queue = &efx->tx_queue[i];
1872 tx_queue->efx = efx;
1873 tx_queue->queue = i;
1874 tx_queue->buffer = NULL;
1875 tx_queue->channel = &efx->channel[0]; /* for safety */
1876 }
1877 for (i = 0; i < EFX_MAX_RX_QUEUES; i++) {
1878 rx_queue = &efx->rx_queue[i];
1879 rx_queue->efx = efx;
1880 rx_queue->queue = i;
1881 rx_queue->channel = &efx->channel[0]; /* for safety */
1882 rx_queue->buffer = NULL;
1883 spin_lock_init(&rx_queue->add_lock);
1884 INIT_DELAYED_WORK(&rx_queue->work, efx_rx_work);
1885 }
1886
1887 efx->type = type;
1888
1889 /* Sanity-check NIC type */
1890 EFX_BUG_ON_PARANOID(efx->type->txd_ring_mask &
1891 (efx->type->txd_ring_mask + 1));
1892 EFX_BUG_ON_PARANOID(efx->type->rxd_ring_mask &
1893 (efx->type->rxd_ring_mask + 1));
1894 EFX_BUG_ON_PARANOID(efx->type->evq_size &
1895 (efx->type->evq_size - 1));
1896 /* As close as we can get to guaranteeing that we don't overflow */
1897 EFX_BUG_ON_PARANOID(efx->type->evq_size <
1898 (efx->type->txd_ring_mask + 1 +
1899 efx->type->rxd_ring_mask + 1));
1900 EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
1901
1902 /* Higher numbered interrupt modes are less capable! */
1903 efx->interrupt_mode = max(efx->type->max_interrupt_mode,
1904 interrupt_mode);
1905
1906 efx->workqueue = create_singlethread_workqueue("sfc_work");
1907 if (!efx->workqueue) {
1908 rc = -ENOMEM;
1909 goto fail1;
1910 }
1911
1912 return 0;
1913
1914 fail1:
1915 return rc;
1916}
1917
1918static void efx_fini_struct(struct efx_nic *efx)
1919{
1920 if (efx->workqueue) {
1921 destroy_workqueue(efx->workqueue);
1922 efx->workqueue = NULL;
1923 }
1924}
1925
1926/**************************************************************************
1927 *
1928 * PCI interface
1929 *
1930 **************************************************************************/
1931
1932/* Main body of final NIC shutdown code
1933 * This is called only at module unload (or hotplug removal).
1934 */
1935static void efx_pci_remove_main(struct efx_nic *efx)
1936{
1937 EFX_ASSERT_RESET_SERIALISED(efx);
1938
1939 /* Skip everything if we never obtained a valid membase */
1940 if (!efx->membase)
1941 return;
1942
1943 efx_fini_channels(efx);
1944 efx_fini_port(efx);
1945
1946 /* Shutdown the board, then the NIC and board state */
1947 falcon_fini_interrupt(efx);
1948
1949 efx_fini_napi(efx);
1950 efx_remove_all(efx);
1951}
1952
1953/* Final NIC shutdown
1954 * This is called only at module unload (or hotplug removal).
1955 */
1956static void efx_pci_remove(struct pci_dev *pci_dev)
1957{
1958 struct efx_nic *efx;
1959
1960 efx = pci_get_drvdata(pci_dev);
1961 if (!efx)
1962 return;
1963
1964 /* Mark the NIC as fini, then stop the interface */
1965 rtnl_lock();
1966 efx->state = STATE_FINI;
1967 dev_close(efx->net_dev);
1968
1969 /* Allow any queued efx_resets() to complete */
1970 rtnl_unlock();
1971
1972 if (efx->membase == NULL)
1973 goto out;
1974
1975 efx_unregister_netdev(efx);
1976
1977 /* Wait for any scheduled resets to complete. No more will be
1978 * scheduled from this point because efx_stop_all() has been
1979 * called, we are no longer registered with driverlink, and
1980 * the net_device's have been removed. */
1981 flush_workqueue(efx->workqueue);
1982
1983 efx_pci_remove_main(efx);
1984
1985out:
1986 efx_fini_io(efx);
1987 EFX_LOG(efx, "shutdown successful\n");
1988
1989 pci_set_drvdata(pci_dev, NULL);
1990 efx_fini_struct(efx);
1991 free_netdev(efx->net_dev);
1992};
1993
1994/* Main body of NIC initialisation
1995 * This is called at module load (or hotplug insertion, theoretically).
1996 */
1997static int efx_pci_probe_main(struct efx_nic *efx)
1998{
1999 int rc;
2000
2001 /* Do start-of-day initialisation */
2002 rc = efx_probe_all(efx);
2003 if (rc)
2004 goto fail1;
2005
2006 rc = efx_init_napi(efx);
2007 if (rc)
2008 goto fail2;
2009
2010 /* Initialise the board */
2011 rc = efx->board_info.init(efx);
2012 if (rc) {
2013 EFX_ERR(efx, "failed to initialise board\n");
2014 goto fail3;
2015 }
2016
2017 rc = falcon_init_nic(efx);
2018 if (rc) {
2019 EFX_ERR(efx, "failed to initialise NIC\n");
2020 goto fail4;
2021 }
2022
2023 rc = efx_init_port(efx);
2024 if (rc) {
2025 EFX_ERR(efx, "failed to initialise port\n");
2026 goto fail5;
2027 }
2028
2029 rc = efx_init_channels(efx);
2030 if (rc)
2031 goto fail6;
2032
2033 rc = falcon_init_interrupt(efx);
2034 if (rc)
2035 goto fail7;
2036
2037 return 0;
2038
2039 fail7:
2040 efx_fini_channels(efx);
2041 fail6:
2042 efx_fini_port(efx);
2043 fail5:
2044 fail4:
2045 fail3:
2046 efx_fini_napi(efx);
2047 fail2:
2048 efx_remove_all(efx);
2049 fail1:
2050 return rc;
2051}
2052
2053/* NIC initialisation
2054 *
2055 * This is called at module load (or hotplug insertion,
2056 * theoretically). It sets up PCI mappings, tests and resets the NIC,
2057 * sets up and registers the network devices with the kernel and hooks
2058 * the interrupt service routine. It does not prepare the device for
2059 * transmission; this is left to the first time one of the network
2060 * interfaces is brought up (i.e. efx_net_open).
2061 */
2062static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2063 const struct pci_device_id *entry)
2064{
2065 struct efx_nic_type *type = (struct efx_nic_type *) entry->driver_data;
2066 struct net_device *net_dev;
2067 struct efx_nic *efx;
2068 int i, rc;
2069
2070 /* Allocate and initialise a struct net_device and struct efx_nic */
2071 net_dev = alloc_etherdev(sizeof(*efx));
2072 if (!net_dev)
2073 return -ENOMEM;
2074 net_dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA;
2075 if (lro)
2076 net_dev->features |= NETIF_F_LRO;
2077 efx = net_dev->priv;
2078 pci_set_drvdata(pci_dev, efx);
2079 rc = efx_init_struct(efx, type, pci_dev, net_dev);
2080 if (rc)
2081 goto fail1;
2082
2083 EFX_INFO(efx, "Solarflare Communications NIC detected\n");
2084
2085 /* Set up basic I/O (BAR mappings etc) */
2086 rc = efx_init_io(efx);
2087 if (rc)
2088 goto fail2;
2089
2090 /* No serialisation is required with the reset path because
2091 * we're in STATE_INIT. */
2092 for (i = 0; i < 5; i++) {
2093 rc = efx_pci_probe_main(efx);
2094 if (rc == 0)
2095 break;
2096
2097 /* Serialise against efx_reset(). No more resets will be
2098 * scheduled since efx_stop_all() has been called, and we
2099 * have not and never have been registered with either
2100 * the rtnetlink or driverlink layers. */
2101 cancel_work_sync(&efx->reset_work);
2102
2103 /* Retry if a recoverably reset event has been scheduled */
2104 if ((efx->reset_pending != RESET_TYPE_INVISIBLE) &&
2105 (efx->reset_pending != RESET_TYPE_ALL))
2106 goto fail3;
2107
2108 efx->reset_pending = RESET_TYPE_NONE;
2109 }
2110
2111 if (rc) {
2112 EFX_ERR(efx, "Could not reset NIC\n");
2113 goto fail4;
2114 }
2115
2116 /* Switch to the running state before we expose the device to
2117 * the OS. This is to ensure that the initial gathering of
2118 * MAC stats succeeds. */
2119 rtnl_lock();
2120 efx->state = STATE_RUNNING;
2121 rtnl_unlock();
2122
2123 rc = efx_register_netdev(efx);
2124 if (rc)
2125 goto fail5;
2126
2127 EFX_LOG(efx, "initialisation successful\n");
2128
2129 return 0;
2130
2131 fail5:
2132 efx_pci_remove_main(efx);
2133 fail4:
2134 fail3:
2135 efx_fini_io(efx);
2136 fail2:
2137 efx_fini_struct(efx);
2138 fail1:
2139 EFX_LOG(efx, "initialisation failed. rc=%d\n", rc);
2140 free_netdev(net_dev);
2141 return rc;
2142}
2143
2144static struct pci_driver efx_pci_driver = {
2145 .name = EFX_DRIVER_NAME,
2146 .id_table = efx_pci_table,
2147 .probe = efx_pci_probe,
2148 .remove = efx_pci_remove,
2149};
2150
2151/**************************************************************************
2152 *
2153 * Kernel module interface
2154 *
2155 *************************************************************************/
2156
2157module_param(interrupt_mode, uint, 0444);
2158MODULE_PARM_DESC(interrupt_mode,
2159 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
2160
2161static int __init efx_init_module(void)
2162{
2163 int rc;
2164
2165 printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n");
2166
2167 rc = register_netdevice_notifier(&efx_netdev_notifier);
2168 if (rc)
2169 goto err_notifier;
2170
2171 refill_workqueue = create_workqueue("sfc_refill");
2172 if (!refill_workqueue) {
2173 rc = -ENOMEM;
2174 goto err_refill;
2175 }
2176
2177 rc = pci_register_driver(&efx_pci_driver);
2178 if (rc < 0)
2179 goto err_pci;
2180
2181 return 0;
2182
2183 err_pci:
2184 destroy_workqueue(refill_workqueue);
2185 err_refill:
2186 unregister_netdevice_notifier(&efx_netdev_notifier);
2187 err_notifier:
2188 return rc;
2189}
2190
2191static void __exit efx_exit_module(void)
2192{
2193 printk(KERN_INFO "Solarflare NET driver unloading\n");
2194
2195 pci_unregister_driver(&efx_pci_driver);
2196 destroy_workqueue(refill_workqueue);
2197 unregister_netdevice_notifier(&efx_netdev_notifier);
2198
2199}
2200
2201module_init(efx_init_module);
2202module_exit(efx_exit_module);
2203
2204MODULE_AUTHOR("Michael Brown <mbrown@fensystems.co.uk> and "
2205 "Solarflare Communications");
2206MODULE_DESCRIPTION("Solarflare Communications network driver");
2207MODULE_LICENSE("GPL");
2208MODULE_DEVICE_TABLE(pci, efx_pci_table);
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
new file mode 100644
index 000000000000..3b2f69f4a9ab
--- /dev/null
+++ b/drivers/net/sfc/efx.h
@@ -0,0 +1,67 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_EFX_H
12#define EFX_EFX_H
13
14#include "net_driver.h"
15
16/* PCI IDs */
17#define EFX_VENDID_SFC 0x1924
18#define FALCON_A_P_DEVID 0x0703
19#define FALCON_A_S_DEVID 0x6703
20#define FALCON_B_P_DEVID 0x0710
21
22/* TX */
23extern int efx_xmit(struct efx_nic *efx,
24 struct efx_tx_queue *tx_queue, struct sk_buff *skb);
25extern void efx_stop_queue(struct efx_nic *efx);
26extern void efx_wake_queue(struct efx_nic *efx);
27
28/* RX */
29extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
30extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
31 unsigned int len, int checksummed, int discard);
32extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay);
33
34/* Channels */
35extern void efx_process_channel_now(struct efx_channel *channel);
36extern int efx_flush_queues(struct efx_nic *efx);
37
38/* Ports */
39extern void efx_reconfigure_port(struct efx_nic *efx);
40
41/* Global */
42extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
43extern void efx_suspend(struct efx_nic *efx);
44extern void efx_resume(struct efx_nic *efx);
45extern void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs,
46 int rx_usecs);
47extern int efx_request_power(struct efx_nic *efx, int mw, const char *name);
48extern void efx_hex_dump(const u8 *, unsigned int, const char *);
49
50/* Dummy PHY ops for PHY drivers */
51extern int efx_port_dummy_op_int(struct efx_nic *efx);
52extern void efx_port_dummy_op_void(struct efx_nic *efx);
53extern void efx_port_dummy_op_blink(struct efx_nic *efx, int blink);
54
55
56extern unsigned int efx_monitor_interval;
57
58static inline void efx_schedule_channel(struct efx_channel *channel)
59{
60 EFX_TRACE(channel->efx, "channel %d scheduling NAPI poll on CPU%d\n",
61 channel->channel, raw_smp_processor_id());
62 channel->work_pending = 1;
63
64 netif_rx_schedule(channel->napi_dev, &channel->napi_str);
65}
66
67#endif /* EFX_EFX_H */
diff --git a/drivers/net/sfc/enum.h b/drivers/net/sfc/enum.h
new file mode 100644
index 000000000000..43663a4619da
--- /dev/null
+++ b/drivers/net/sfc/enum.h
@@ -0,0 +1,50 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#ifndef EFX_ENUM_H
11#define EFX_ENUM_H
12
13/*****************************************************************************/
14
15/**
16 * enum reset_type - reset types
17 *
18 * %RESET_TYPE_INVSIBLE, %RESET_TYPE_ALL, %RESET_TYPE_WORLD and
19 * %RESET_TYPE_DISABLE specify the method/scope of the reset. The
20 * other valuesspecify reasons, which efx_schedule_reset() will choose
21 * a method for.
22 *
23 * @RESET_TYPE_INVISIBLE: don't reset the PHYs or interrupts
24 * @RESET_TYPE_ALL: reset everything but PCI core blocks
25 * @RESET_TYPE_WORLD: reset everything, save & restore PCI config
26 * @RESET_TYPE_DISABLE: disable NIC
27 * @RESET_TYPE_MONITOR: reset due to hardware monitor
28 * @RESET_TYPE_INT_ERROR: reset due to internal error
29 * @RESET_TYPE_RX_RECOVERY: reset to recover from RX datapath errors
30 * @RESET_TYPE_RX_DESC_FETCH: pcie error during rx descriptor fetch
31 * @RESET_TYPE_TX_DESC_FETCH: pcie error during tx descriptor fetch
32 * @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors
33 */
34enum reset_type {
35 RESET_TYPE_NONE = -1,
36 RESET_TYPE_INVISIBLE = 0,
37 RESET_TYPE_ALL = 1,
38 RESET_TYPE_WORLD = 2,
39 RESET_TYPE_DISABLE = 3,
40 RESET_TYPE_MAX_METHOD,
41 RESET_TYPE_MONITOR,
42 RESET_TYPE_INT_ERROR,
43 RESET_TYPE_RX_RECOVERY,
44 RESET_TYPE_RX_DESC_FETCH,
45 RESET_TYPE_TX_DESC_FETCH,
46 RESET_TYPE_TX_SKIP,
47 RESET_TYPE_MAX,
48};
49
50#endif /* EFX_ENUM_H */
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
new file mode 100644
index 000000000000..ad541badbd98
--- /dev/null
+++ b/drivers/net/sfc/ethtool.c
@@ -0,0 +1,460 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/netdevice.h>
12#include <linux/ethtool.h>
13#include <linux/rtnetlink.h>
14#include "net_driver.h"
15#include "efx.h"
16#include "ethtool.h"
17#include "falcon.h"
18#include "gmii.h"
19#include "mac.h"
20
21static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable);
22
23struct ethtool_string {
24 char name[ETH_GSTRING_LEN];
25};
26
27struct efx_ethtool_stat {
28 const char *name;
29 enum {
30 EFX_ETHTOOL_STAT_SOURCE_mac_stats,
31 EFX_ETHTOOL_STAT_SOURCE_nic,
32 EFX_ETHTOOL_STAT_SOURCE_channel
33 } source;
34 unsigned offset;
35 u64(*get_stat) (void *field); /* Reader function */
36};
37
38/* Initialiser for a struct #efx_ethtool_stat with type-checking */
39#define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \
40 get_stat_function) { \
41 .name = #stat_name, \
42 .source = EFX_ETHTOOL_STAT_SOURCE_##source_name, \
43 .offset = ((((field_type *) 0) == \
44 &((struct efx_##source_name *)0)->field) ? \
45 offsetof(struct efx_##source_name, field) : \
46 offsetof(struct efx_##source_name, field)), \
47 .get_stat = get_stat_function, \
48}
49
50static u64 efx_get_uint_stat(void *field)
51{
52 return *(unsigned int *)field;
53}
54
55static u64 efx_get_ulong_stat(void *field)
56{
57 return *(unsigned long *)field;
58}
59
60static u64 efx_get_u64_stat(void *field)
61{
62 return *(u64 *) field;
63}
64
65static u64 efx_get_atomic_stat(void *field)
66{
67 return atomic_read((atomic_t *) field);
68}
69
70#define EFX_ETHTOOL_ULONG_MAC_STAT(field) \
71 EFX_ETHTOOL_STAT(field, mac_stats, field, \
72 unsigned long, efx_get_ulong_stat)
73
74#define EFX_ETHTOOL_U64_MAC_STAT(field) \
75 EFX_ETHTOOL_STAT(field, mac_stats, field, \
76 u64, efx_get_u64_stat)
77
78#define EFX_ETHTOOL_UINT_NIC_STAT(name) \
79 EFX_ETHTOOL_STAT(name, nic, n_##name, \
80 unsigned int, efx_get_uint_stat)
81
82#define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \
83 EFX_ETHTOOL_STAT(field, nic, field, \
84 atomic_t, efx_get_atomic_stat)
85
86#define EFX_ETHTOOL_UINT_CHANNEL_STAT(field) \
87 EFX_ETHTOOL_STAT(field, channel, n_##field, \
88 unsigned int, efx_get_uint_stat)
89
90static struct efx_ethtool_stat efx_ethtool_stats[] = {
91 EFX_ETHTOOL_U64_MAC_STAT(tx_bytes),
92 EFX_ETHTOOL_U64_MAC_STAT(tx_good_bytes),
93 EFX_ETHTOOL_U64_MAC_STAT(tx_bad_bytes),
94 EFX_ETHTOOL_ULONG_MAC_STAT(tx_packets),
95 EFX_ETHTOOL_ULONG_MAC_STAT(tx_bad),
96 EFX_ETHTOOL_ULONG_MAC_STAT(tx_pause),
97 EFX_ETHTOOL_ULONG_MAC_STAT(tx_control),
98 EFX_ETHTOOL_ULONG_MAC_STAT(tx_unicast),
99 EFX_ETHTOOL_ULONG_MAC_STAT(tx_multicast),
100 EFX_ETHTOOL_ULONG_MAC_STAT(tx_broadcast),
101 EFX_ETHTOOL_ULONG_MAC_STAT(tx_lt64),
102 EFX_ETHTOOL_ULONG_MAC_STAT(tx_64),
103 EFX_ETHTOOL_ULONG_MAC_STAT(tx_65_to_127),
104 EFX_ETHTOOL_ULONG_MAC_STAT(tx_128_to_255),
105 EFX_ETHTOOL_ULONG_MAC_STAT(tx_256_to_511),
106 EFX_ETHTOOL_ULONG_MAC_STAT(tx_512_to_1023),
107 EFX_ETHTOOL_ULONG_MAC_STAT(tx_1024_to_15xx),
108 EFX_ETHTOOL_ULONG_MAC_STAT(tx_15xx_to_jumbo),
109 EFX_ETHTOOL_ULONG_MAC_STAT(tx_gtjumbo),
110 EFX_ETHTOOL_ULONG_MAC_STAT(tx_collision),
111 EFX_ETHTOOL_ULONG_MAC_STAT(tx_single_collision),
112 EFX_ETHTOOL_ULONG_MAC_STAT(tx_multiple_collision),
113 EFX_ETHTOOL_ULONG_MAC_STAT(tx_excessive_collision),
114 EFX_ETHTOOL_ULONG_MAC_STAT(tx_deferred),
115 EFX_ETHTOOL_ULONG_MAC_STAT(tx_late_collision),
116 EFX_ETHTOOL_ULONG_MAC_STAT(tx_excessive_deferred),
117 EFX_ETHTOOL_ULONG_MAC_STAT(tx_non_tcpudp),
118 EFX_ETHTOOL_ULONG_MAC_STAT(tx_mac_src_error),
119 EFX_ETHTOOL_ULONG_MAC_STAT(tx_ip_src_error),
120 EFX_ETHTOOL_U64_MAC_STAT(rx_bytes),
121 EFX_ETHTOOL_U64_MAC_STAT(rx_good_bytes),
122 EFX_ETHTOOL_U64_MAC_STAT(rx_bad_bytes),
123 EFX_ETHTOOL_ULONG_MAC_STAT(rx_packets),
124 EFX_ETHTOOL_ULONG_MAC_STAT(rx_good),
125 EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad),
126 EFX_ETHTOOL_ULONG_MAC_STAT(rx_pause),
127 EFX_ETHTOOL_ULONG_MAC_STAT(rx_control),
128 EFX_ETHTOOL_ULONG_MAC_STAT(rx_unicast),
129 EFX_ETHTOOL_ULONG_MAC_STAT(rx_multicast),
130 EFX_ETHTOOL_ULONG_MAC_STAT(rx_broadcast),
131 EFX_ETHTOOL_ULONG_MAC_STAT(rx_lt64),
132 EFX_ETHTOOL_ULONG_MAC_STAT(rx_64),
133 EFX_ETHTOOL_ULONG_MAC_STAT(rx_65_to_127),
134 EFX_ETHTOOL_ULONG_MAC_STAT(rx_128_to_255),
135 EFX_ETHTOOL_ULONG_MAC_STAT(rx_256_to_511),
136 EFX_ETHTOOL_ULONG_MAC_STAT(rx_512_to_1023),
137 EFX_ETHTOOL_ULONG_MAC_STAT(rx_1024_to_15xx),
138 EFX_ETHTOOL_ULONG_MAC_STAT(rx_15xx_to_jumbo),
139 EFX_ETHTOOL_ULONG_MAC_STAT(rx_gtjumbo),
140 EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_lt64),
141 EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_64_to_15xx),
142 EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_15xx_to_jumbo),
143 EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_gtjumbo),
144 EFX_ETHTOOL_ULONG_MAC_STAT(rx_overflow),
145 EFX_ETHTOOL_ULONG_MAC_STAT(rx_missed),
146 EFX_ETHTOOL_ULONG_MAC_STAT(rx_false_carrier),
147 EFX_ETHTOOL_ULONG_MAC_STAT(rx_symbol_error),
148 EFX_ETHTOOL_ULONG_MAC_STAT(rx_align_error),
149 EFX_ETHTOOL_ULONG_MAC_STAT(rx_length_error),
150 EFX_ETHTOOL_ULONG_MAC_STAT(rx_internal_error),
151 EFX_ETHTOOL_UINT_NIC_STAT(rx_nodesc_drop_cnt),
152 EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
153 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
154 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
155 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
156 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
157};
158
159/* Number of ethtool statistics */
160#define EFX_ETHTOOL_NUM_STATS ARRAY_SIZE(efx_ethtool_stats)
161
162/**************************************************************************
163 *
164 * Ethtool operations
165 *
166 **************************************************************************
167 */
168
169/* Identify device by flashing LEDs */
170static int efx_ethtool_phys_id(struct net_device *net_dev, u32 seconds)
171{
172 struct efx_nic *efx = net_dev->priv;
173
174 efx->board_info.blink(efx, 1);
175 schedule_timeout_interruptible(seconds * HZ);
176 efx->board_info.blink(efx, 0);
177 return 0;
178}
179
180/* This must be called with rtnl_lock held. */
181int efx_ethtool_get_settings(struct net_device *net_dev,
182 struct ethtool_cmd *ecmd)
183{
184 struct efx_nic *efx = net_dev->priv;
185 int rc;
186
187 mutex_lock(&efx->mac_lock);
188 rc = falcon_xmac_get_settings(efx, ecmd);
189 mutex_unlock(&efx->mac_lock);
190
191 return rc;
192}
193
194/* This must be called with rtnl_lock held. */
195int efx_ethtool_set_settings(struct net_device *net_dev,
196 struct ethtool_cmd *ecmd)
197{
198 struct efx_nic *efx = net_dev->priv;
199 int rc;
200
201 mutex_lock(&efx->mac_lock);
202 rc = falcon_xmac_set_settings(efx, ecmd);
203 mutex_unlock(&efx->mac_lock);
204 if (!rc)
205 efx_reconfigure_port(efx);
206
207 return rc;
208}
209
210static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
211 struct ethtool_drvinfo *info)
212{
213 struct efx_nic *efx = net_dev->priv;
214
215 strlcpy(info->driver, EFX_DRIVER_NAME, sizeof(info->driver));
216 strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
217 strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
218}
219
220static int efx_ethtool_get_stats_count(struct net_device *net_dev)
221{
222 return EFX_ETHTOOL_NUM_STATS;
223}
224
225static void efx_ethtool_get_strings(struct net_device *net_dev,
226 u32 string_set, u8 *strings)
227{
228 struct ethtool_string *ethtool_strings =
229 (struct ethtool_string *)strings;
230 int i;
231
232 if (string_set == ETH_SS_STATS)
233 for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++)
234 strncpy(ethtool_strings[i].name,
235 efx_ethtool_stats[i].name,
236 sizeof(ethtool_strings[i].name));
237}
238
239static void efx_ethtool_get_stats(struct net_device *net_dev,
240 struct ethtool_stats *stats,
241 u64 *data)
242{
243 struct efx_nic *efx = net_dev->priv;
244 struct efx_mac_stats *mac_stats = &efx->mac_stats;
245 struct efx_ethtool_stat *stat;
246 struct efx_channel *channel;
247 int i;
248
249 EFX_BUG_ON_PARANOID(stats->n_stats != EFX_ETHTOOL_NUM_STATS);
250
251 /* Update MAC and NIC statistics */
252 net_dev->get_stats(net_dev);
253
254 /* Fill detailed statistics buffer */
255 for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) {
256 stat = &efx_ethtool_stats[i];
257 switch (stat->source) {
258 case EFX_ETHTOOL_STAT_SOURCE_mac_stats:
259 data[i] = stat->get_stat((void *)mac_stats +
260 stat->offset);
261 break;
262 case EFX_ETHTOOL_STAT_SOURCE_nic:
263 data[i] = stat->get_stat((void *)efx + stat->offset);
264 break;
265 case EFX_ETHTOOL_STAT_SOURCE_channel:
266 data[i] = 0;
267 efx_for_each_channel(channel, efx)
268 data[i] += stat->get_stat((void *)channel +
269 stat->offset);
270 break;
271 }
272 }
273}
274
275static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable)
276{
277 struct efx_nic *efx = net_dev->priv;
278 int rc;
279
280 rc = ethtool_op_set_tx_csum(net_dev, enable);
281 if (rc)
282 return rc;
283
284 efx_flush_queues(efx);
285
286 return 0;
287}
288
289static int efx_ethtool_set_rx_csum(struct net_device *net_dev, u32 enable)
290{
291 struct efx_nic *efx = net_dev->priv;
292
293 /* No way to stop the hardware doing the checks; we just
294 * ignore the result.
295 */
296 efx->rx_checksum_enabled = (enable ? 1 : 0);
297
298 return 0;
299}
300
301static u32 efx_ethtool_get_rx_csum(struct net_device *net_dev)
302{
303 struct efx_nic *efx = net_dev->priv;
304
305 return efx->rx_checksum_enabled;
306}
307
308/* Restart autonegotiation */
309static int efx_ethtool_nway_reset(struct net_device *net_dev)
310{
311 struct efx_nic *efx = net_dev->priv;
312
313 return mii_nway_restart(&efx->mii);
314}
315
316static u32 efx_ethtool_get_link(struct net_device *net_dev)
317{
318 struct efx_nic *efx = net_dev->priv;
319
320 return efx->link_up;
321}
322
323static int efx_ethtool_get_coalesce(struct net_device *net_dev,
324 struct ethtool_coalesce *coalesce)
325{
326 struct efx_nic *efx = net_dev->priv;
327 struct efx_tx_queue *tx_queue;
328 struct efx_rx_queue *rx_queue;
329 struct efx_channel *channel;
330
331 memset(coalesce, 0, sizeof(*coalesce));
332
333 /* Find lowest IRQ moderation across all used TX queues */
334 coalesce->tx_coalesce_usecs_irq = ~((u32) 0);
335 efx_for_each_tx_queue(tx_queue, efx) {
336 channel = tx_queue->channel;
337 if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) {
338 if (channel->used_flags != EFX_USED_BY_RX_TX)
339 coalesce->tx_coalesce_usecs_irq =
340 channel->irq_moderation;
341 else
342 coalesce->tx_coalesce_usecs_irq = 0;
343 }
344 }
345
346 /* Find lowest IRQ moderation across all used RX queues */
347 coalesce->rx_coalesce_usecs_irq = ~((u32) 0);
348 efx_for_each_rx_queue(rx_queue, efx) {
349 channel = rx_queue->channel;
350 if (channel->irq_moderation < coalesce->rx_coalesce_usecs_irq)
351 coalesce->rx_coalesce_usecs_irq =
352 channel->irq_moderation;
353 }
354
355 return 0;
356}
357
358/* Set coalescing parameters
359 * The difficulties occur for shared channels
360 */
361static int efx_ethtool_set_coalesce(struct net_device *net_dev,
362 struct ethtool_coalesce *coalesce)
363{
364 struct efx_nic *efx = net_dev->priv;
365 struct efx_channel *channel;
366 struct efx_tx_queue *tx_queue;
367 unsigned tx_usecs, rx_usecs;
368
369 if (coalesce->use_adaptive_rx_coalesce ||
370 coalesce->use_adaptive_tx_coalesce)
371 return -EOPNOTSUPP;
372
373 if (coalesce->rx_coalesce_usecs || coalesce->tx_coalesce_usecs) {
374 EFX_ERR(efx, "invalid coalescing setting. "
375 "Only rx/tx_coalesce_usecs_irq are supported\n");
376 return -EOPNOTSUPP;
377 }
378
379 rx_usecs = coalesce->rx_coalesce_usecs_irq;
380 tx_usecs = coalesce->tx_coalesce_usecs_irq;
381
382 /* If the channel is shared only allow RX parameters to be set */
383 efx_for_each_tx_queue(tx_queue, efx) {
384 if ((tx_queue->channel->used_flags == EFX_USED_BY_RX_TX) &&
385 tx_usecs) {
386 EFX_ERR(efx, "Channel is shared. "
387 "Only RX coalescing may be set\n");
388 return -EOPNOTSUPP;
389 }
390 }
391
392 efx_init_irq_moderation(efx, tx_usecs, rx_usecs);
393
394 /* Reset channel to pick up new moderation value. Note that
395 * this may change the value of the irq_moderation field
396 * (e.g. to allow for hardware timer granularity).
397 */
398 efx_for_each_channel(channel, efx)
399 falcon_set_int_moderation(channel);
400
401 return 0;
402}
403
404static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
405 struct ethtool_pauseparam *pause)
406{
407 struct efx_nic *efx = net_dev->priv;
408 enum efx_fc_type flow_control = efx->flow_control;
409 int rc;
410
411 flow_control &= ~(EFX_FC_RX | EFX_FC_TX | EFX_FC_AUTO);
412 flow_control |= pause->rx_pause ? EFX_FC_RX : 0;
413 flow_control |= pause->tx_pause ? EFX_FC_TX : 0;
414 flow_control |= pause->autoneg ? EFX_FC_AUTO : 0;
415
416 /* Try to push the pause parameters */
417 mutex_lock(&efx->mac_lock);
418 rc = falcon_xmac_set_pause(efx, flow_control);
419 mutex_unlock(&efx->mac_lock);
420
421 if (!rc)
422 efx_reconfigure_port(efx);
423
424 return rc;
425}
426
427static void efx_ethtool_get_pauseparam(struct net_device *net_dev,
428 struct ethtool_pauseparam *pause)
429{
430 struct efx_nic *efx = net_dev->priv;
431
432 pause->rx_pause = (efx->flow_control & EFX_FC_RX) ? 1 : 0;
433 pause->tx_pause = (efx->flow_control & EFX_FC_TX) ? 1 : 0;
434 pause->autoneg = (efx->flow_control & EFX_FC_AUTO) ? 1 : 0;
435}
436
437
438struct ethtool_ops efx_ethtool_ops = {
439 .get_settings = efx_ethtool_get_settings,
440 .set_settings = efx_ethtool_set_settings,
441 .get_drvinfo = efx_ethtool_get_drvinfo,
442 .nway_reset = efx_ethtool_nway_reset,
443 .get_link = efx_ethtool_get_link,
444 .get_coalesce = efx_ethtool_get_coalesce,
445 .set_coalesce = efx_ethtool_set_coalesce,
446 .get_pauseparam = efx_ethtool_get_pauseparam,
447 .set_pauseparam = efx_ethtool_set_pauseparam,
448 .get_rx_csum = efx_ethtool_get_rx_csum,
449 .set_rx_csum = efx_ethtool_set_rx_csum,
450 .get_tx_csum = ethtool_op_get_tx_csum,
451 .set_tx_csum = efx_ethtool_set_tx_csum,
452 .get_sg = ethtool_op_get_sg,
453 .set_sg = ethtool_op_set_sg,
454 .get_flags = ethtool_op_get_flags,
455 .set_flags = ethtool_op_set_flags,
456 .get_strings = efx_ethtool_get_strings,
457 .phys_id = efx_ethtool_phys_id,
458 .get_stats_count = efx_ethtool_get_stats_count,
459 .get_ethtool_stats = efx_ethtool_get_stats,
460};
diff --git a/drivers/net/sfc/ethtool.h b/drivers/net/sfc/ethtool.h
new file mode 100644
index 000000000000..3628e43df14d
--- /dev/null
+++ b/drivers/net/sfc/ethtool.h
@@ -0,0 +1,27 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005 Fen Systems Ltd.
4 * Copyright 2006 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_ETHTOOL_H
12#define EFX_ETHTOOL_H
13
14#include "net_driver.h"
15
16/*
17 * Ethtool support
18 */
19
20extern int efx_ethtool_get_settings(struct net_device *net_dev,
21 struct ethtool_cmd *ecmd);
22extern int efx_ethtool_set_settings(struct net_device *net_dev,
23 struct ethtool_cmd *ecmd);
24
25extern struct ethtool_ops efx_ethtool_ops;
26
27#endif /* EFX_ETHTOOL_H */
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
new file mode 100644
index 000000000000..46db549ce580
--- /dev/null
+++ b/drivers/net/sfc/falcon.c
@@ -0,0 +1,2722 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/bitops.h>
12#include <linux/delay.h>
13#include <linux/pci.h>
14#include <linux/module.h>
15#include <linux/seq_file.h>
16#include "net_driver.h"
17#include "bitfield.h"
18#include "efx.h"
19#include "mac.h"
20#include "gmii.h"
21#include "spi.h"
22#include "falcon.h"
23#include "falcon_hwdefs.h"
24#include "falcon_io.h"
25#include "mdio_10g.h"
26#include "phy.h"
27#include "boards.h"
28#include "workarounds.h"
29
30/* Falcon hardware control.
31 * Falcon is the internal codename for the SFC4000 controller that is
32 * present in SFE400X evaluation boards
33 */
34
35/**
36 * struct falcon_nic_data - Falcon NIC state
37 * @next_buffer_table: First available buffer table id
38 * @pci_dev2: The secondary PCI device if present
39 */
40struct falcon_nic_data {
41 unsigned next_buffer_table;
42 struct pci_dev *pci_dev2;
43};
44
45/**************************************************************************
46 *
47 * Configurable values
48 *
49 **************************************************************************
50 */
51
52static int disable_dma_stats;
53
54/* This is set to 16 for a good reason. In summary, if larger than
55 * 16, the descriptor cache holds more than a default socket
56 * buffer's worth of packets (for UDP we can only have at most one
57 * socket buffer's worth outstanding). This combined with the fact
58 * that we only get 1 TX event per descriptor cache means the NIC
59 * goes idle.
60 */
61#define TX_DC_ENTRIES 16
62#define TX_DC_ENTRIES_ORDER 0
63#define TX_DC_BASE 0x130000
64
65#define RX_DC_ENTRIES 64
66#define RX_DC_ENTRIES_ORDER 2
67#define RX_DC_BASE 0x100000
68
69/* RX FIFO XOFF watermark
70 *
71 * When the amount of the RX FIFO increases used increases past this
72 * watermark send XOFF. Only used if RX flow control is enabled (ethtool -A)
73 * This also has an effect on RX/TX arbitration
74 */
75static int rx_xoff_thresh_bytes = -1;
76module_param(rx_xoff_thresh_bytes, int, 0644);
77MODULE_PARM_DESC(rx_xoff_thresh_bytes, "RX fifo XOFF threshold");
78
79/* RX FIFO XON watermark
80 *
81 * When the amount of the RX FIFO used decreases below this
82 * watermark send XON. Only used if TX flow control is enabled (ethtool -A)
83 * This also has an effect on RX/TX arbitration
84 */
85static int rx_xon_thresh_bytes = -1;
86module_param(rx_xon_thresh_bytes, int, 0644);
87MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
88
89/* TX descriptor ring size - min 512 max 4k */
90#define FALCON_TXD_RING_ORDER TX_DESCQ_SIZE_1K
91#define FALCON_TXD_RING_SIZE 1024
92#define FALCON_TXD_RING_MASK (FALCON_TXD_RING_SIZE - 1)
93
94/* RX descriptor ring size - min 512 max 4k */
95#define FALCON_RXD_RING_ORDER RX_DESCQ_SIZE_1K
96#define FALCON_RXD_RING_SIZE 1024
97#define FALCON_RXD_RING_MASK (FALCON_RXD_RING_SIZE - 1)
98
99/* Event queue size - max 32k */
100#define FALCON_EVQ_ORDER EVQ_SIZE_4K
101#define FALCON_EVQ_SIZE 4096
102#define FALCON_EVQ_MASK (FALCON_EVQ_SIZE - 1)
103
104/* Max number of internal errors. After this resets will not be performed */
105#define FALCON_MAX_INT_ERRORS 4
106
107/* Maximum period that we wait for flush events. If the flush event
108 * doesn't arrive in this period of time then we check if the queue
109 * was disabled anyway. */
110#define FALCON_FLUSH_TIMEOUT 10 /* 10ms */
111
112/**************************************************************************
113 *
114 * Falcon constants
115 *
116 **************************************************************************
117 */
118
119/* DMA address mask (up to 46-bit, avoiding compiler warnings)
120 *
121 * Note that it is possible to have a platform with 64-bit longs and
122 * 32-bit DMA addresses, or vice versa. EFX_DMA_MASK takes care of the
123 * platform DMA mask.
124 */
125#if BITS_PER_LONG == 64
126#define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffUL)
127#else
128#define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffULL)
129#endif
130
131/* TX DMA length mask (13-bit) */
132#define FALCON_TX_DMA_MASK (4096 - 1)
133
134/* Size and alignment of special buffers (4KB) */
135#define FALCON_BUF_SIZE 4096
136
137/* Dummy SRAM size code */
138#define SRM_NB_BSZ_ONCHIP_ONLY (-1)
139
140/* Be nice if these (or equiv.) were in linux/pci_regs.h, but they're not. */
141#define PCI_EXP_DEVCAP_PWR_VAL_LBN 18
142#define PCI_EXP_DEVCAP_PWR_SCL_LBN 26
143#define PCI_EXP_DEVCTL_PAYLOAD_LBN 5
144#define PCI_EXP_LNKSTA_LNK_WID 0x3f0
145#define PCI_EXP_LNKSTA_LNK_WID_LBN 4
146
147#define FALCON_IS_DUAL_FUNC(efx) \
148 (FALCON_REV(efx) < FALCON_REV_B0)
149
150/**************************************************************************
151 *
152 * Falcon hardware access
153 *
154 **************************************************************************/
155
156/* Read the current event from the event queue */
157static inline efx_qword_t *falcon_event(struct efx_channel *channel,
158 unsigned int index)
159{
160 return (((efx_qword_t *) (channel->eventq.addr)) + index);
161}
162
163/* See if an event is present
164 *
165 * We check both the high and low dword of the event for all ones. We
166 * wrote all ones when we cleared the event, and no valid event can
167 * have all ones in either its high or low dwords. This approach is
168 * robust against reordering.
169 *
170 * Note that using a single 64-bit comparison is incorrect; even
171 * though the CPU read will be atomic, the DMA write may not be.
172 */
173static inline int falcon_event_present(efx_qword_t *event)
174{
175 return (!(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
176 EFX_DWORD_IS_ALL_ONES(event->dword[1])));
177}
178
179/**************************************************************************
180 *
181 * I2C bus - this is a bit-bashing interface using GPIO pins
182 * Note that it uses the output enables to tristate the outputs
183 * SDA is the data pin and SCL is the clock
184 *
185 **************************************************************************
186 */
187static void falcon_setsdascl(struct efx_i2c_interface *i2c)
188{
189 efx_oword_t reg;
190
191 falcon_read(i2c->efx, &reg, GPIO_CTL_REG_KER);
192 EFX_SET_OWORD_FIELD(reg, GPIO0_OEN, (i2c->scl ? 0 : 1));
193 EFX_SET_OWORD_FIELD(reg, GPIO3_OEN, (i2c->sda ? 0 : 1));
194 falcon_write(i2c->efx, &reg, GPIO_CTL_REG_KER);
195}
196
197static int falcon_getsda(struct efx_i2c_interface *i2c)
198{
199 efx_oword_t reg;
200
201 falcon_read(i2c->efx, &reg, GPIO_CTL_REG_KER);
202 return EFX_OWORD_FIELD(reg, GPIO3_IN);
203}
204
205static int falcon_getscl(struct efx_i2c_interface *i2c)
206{
207 efx_oword_t reg;
208
209 falcon_read(i2c->efx, &reg, GPIO_CTL_REG_KER);
210 return EFX_DWORD_FIELD(reg, GPIO0_IN);
211}
212
213static struct efx_i2c_bit_operations falcon_i2c_bit_operations = {
214 .setsda = falcon_setsdascl,
215 .setscl = falcon_setsdascl,
216 .getsda = falcon_getsda,
217 .getscl = falcon_getscl,
218 .udelay = 100,
219 .mdelay = 10,
220};
221
222/**************************************************************************
223 *
224 * Falcon special buffer handling
225 * Special buffers are used for event queues and the TX and RX
226 * descriptor rings.
227 *
228 *************************************************************************/
229
230/*
231 * Initialise a Falcon special buffer
232 *
233 * This will define a buffer (previously allocated via
234 * falcon_alloc_special_buffer()) in Falcon's buffer table, allowing
235 * it to be used for event queues, descriptor rings etc.
236 */
237static int
238falcon_init_special_buffer(struct efx_nic *efx,
239 struct efx_special_buffer *buffer)
240{
241 efx_qword_t buf_desc;
242 int index;
243 dma_addr_t dma_addr;
244 int i;
245
246 EFX_BUG_ON_PARANOID(!buffer->addr);
247
248 /* Write buffer descriptors to NIC */
249 for (i = 0; i < buffer->entries; i++) {
250 index = buffer->index + i;
251 dma_addr = buffer->dma_addr + (i * 4096);
252 EFX_LOG(efx, "mapping special buffer %d at %llx\n",
253 index, (unsigned long long)dma_addr);
254 EFX_POPULATE_QWORD_4(buf_desc,
255 IP_DAT_BUF_SIZE, IP_DAT_BUF_SIZE_4K,
256 BUF_ADR_REGION, 0,
257 BUF_ADR_FBUF, (dma_addr >> 12),
258 BUF_OWNER_ID_FBUF, 0);
259 falcon_write_sram(efx, &buf_desc, index);
260 }
261
262 return 0;
263}
264
265/* Unmaps a buffer from Falcon and clears the buffer table entries */
266static void
267falcon_fini_special_buffer(struct efx_nic *efx,
268 struct efx_special_buffer *buffer)
269{
270 efx_oword_t buf_tbl_upd;
271 unsigned int start = buffer->index;
272 unsigned int end = (buffer->index + buffer->entries - 1);
273
274 if (!buffer->entries)
275 return;
276
277 EFX_LOG(efx, "unmapping special buffers %d-%d\n",
278 buffer->index, buffer->index + buffer->entries - 1);
279
280 EFX_POPULATE_OWORD_4(buf_tbl_upd,
281 BUF_UPD_CMD, 0,
282 BUF_CLR_CMD, 1,
283 BUF_CLR_END_ID, end,
284 BUF_CLR_START_ID, start);
285 falcon_write(efx, &buf_tbl_upd, BUF_TBL_UPD_REG_KER);
286}
287
288/*
289 * Allocate a new Falcon special buffer
290 *
291 * This allocates memory for a new buffer, clears it and allocates a
292 * new buffer ID range. It does not write into Falcon's buffer table.
293 *
294 * This call will allocate 4KB buffers, since Falcon can't use 8KB
295 * buffers for event queues and descriptor rings.
296 */
297static int falcon_alloc_special_buffer(struct efx_nic *efx,
298 struct efx_special_buffer *buffer,
299 unsigned int len)
300{
301 struct falcon_nic_data *nic_data = efx->nic_data;
302
303 len = ALIGN(len, FALCON_BUF_SIZE);
304
305 buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
306 &buffer->dma_addr);
307 if (!buffer->addr)
308 return -ENOMEM;
309 buffer->len = len;
310 buffer->entries = len / FALCON_BUF_SIZE;
311 BUG_ON(buffer->dma_addr & (FALCON_BUF_SIZE - 1));
312
313 /* All zeros is a potentially valid event so memset to 0xff */
314 memset(buffer->addr, 0xff, len);
315
316 /* Select new buffer ID */
317 buffer->index = nic_data->next_buffer_table;
318 nic_data->next_buffer_table += buffer->entries;
319
320 EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x "
321 "(virt %p phys %lx)\n", buffer->index,
322 buffer->index + buffer->entries - 1,
323 (unsigned long long)buffer->dma_addr, len,
324 buffer->addr, virt_to_phys(buffer->addr));
325
326 return 0;
327}
328
329static void falcon_free_special_buffer(struct efx_nic *efx,
330 struct efx_special_buffer *buffer)
331{
332 if (!buffer->addr)
333 return;
334
335 EFX_LOG(efx, "deallocating special buffers %d-%d at %llx+%x "
336 "(virt %p phys %lx)\n", buffer->index,
337 buffer->index + buffer->entries - 1,
338 (unsigned long long)buffer->dma_addr, buffer->len,
339 buffer->addr, virt_to_phys(buffer->addr));
340
341 pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr,
342 buffer->dma_addr);
343 buffer->addr = NULL;
344 buffer->entries = 0;
345}
346
347/**************************************************************************
348 *
349 * Falcon generic buffer handling
350 * These buffers are used for interrupt status and MAC stats
351 *
352 **************************************************************************/
353
354static int falcon_alloc_buffer(struct efx_nic *efx,
355 struct efx_buffer *buffer, unsigned int len)
356{
357 buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
358 &buffer->dma_addr);
359 if (!buffer->addr)
360 return -ENOMEM;
361 buffer->len = len;
362 memset(buffer->addr, 0, len);
363 return 0;
364}
365
366static void falcon_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
367{
368 if (buffer->addr) {
369 pci_free_consistent(efx->pci_dev, buffer->len,
370 buffer->addr, buffer->dma_addr);
371 buffer->addr = NULL;
372 }
373}
374
375/**************************************************************************
376 *
377 * Falcon TX path
378 *
379 **************************************************************************/
380
381/* Returns a pointer to the specified transmit descriptor in the TX
382 * descriptor queue belonging to the specified channel.
383 */
384static inline efx_qword_t *falcon_tx_desc(struct efx_tx_queue *tx_queue,
385 unsigned int index)
386{
387 return (((efx_qword_t *) (tx_queue->txd.addr)) + index);
388}
389
390/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
391static inline void falcon_notify_tx_desc(struct efx_tx_queue *tx_queue)
392{
393 unsigned write_ptr;
394 efx_dword_t reg;
395
396 write_ptr = tx_queue->write_count & FALCON_TXD_RING_MASK;
397 EFX_POPULATE_DWORD_1(reg, TX_DESC_WPTR_DWORD, write_ptr);
398 falcon_writel_page(tx_queue->efx, &reg,
399 TX_DESC_UPD_REG_KER_DWORD, tx_queue->queue);
400}
401
402
403/* For each entry inserted into the software descriptor ring, create a
404 * descriptor in the hardware TX descriptor ring (in host memory), and
405 * write a doorbell.
406 */
407void falcon_push_buffers(struct efx_tx_queue *tx_queue)
408{
409
410 struct efx_tx_buffer *buffer;
411 efx_qword_t *txd;
412 unsigned write_ptr;
413
414 BUG_ON(tx_queue->write_count == tx_queue->insert_count);
415
416 do {
417 write_ptr = tx_queue->write_count & FALCON_TXD_RING_MASK;
418 buffer = &tx_queue->buffer[write_ptr];
419 txd = falcon_tx_desc(tx_queue, write_ptr);
420 ++tx_queue->write_count;
421
422 /* Create TX descriptor ring entry */
423 EFX_POPULATE_QWORD_5(*txd,
424 TX_KER_PORT, 0,
425 TX_KER_CONT, buffer->continuation,
426 TX_KER_BYTE_CNT, buffer->len,
427 TX_KER_BUF_REGION, 0,
428 TX_KER_BUF_ADR, buffer->dma_addr);
429 } while (tx_queue->write_count != tx_queue->insert_count);
430
431 wmb(); /* Ensure descriptors are written before they are fetched */
432 falcon_notify_tx_desc(tx_queue);
433}
434
435/* Allocate hardware resources for a TX queue */
436int falcon_probe_tx(struct efx_tx_queue *tx_queue)
437{
438 struct efx_nic *efx = tx_queue->efx;
439 return falcon_alloc_special_buffer(efx, &tx_queue->txd,
440 FALCON_TXD_RING_SIZE *
441 sizeof(efx_qword_t));
442}
443
444int falcon_init_tx(struct efx_tx_queue *tx_queue)
445{
446 efx_oword_t tx_desc_ptr;
447 struct efx_nic *efx = tx_queue->efx;
448 int rc;
449
450 /* Pin TX descriptor ring */
451 rc = falcon_init_special_buffer(efx, &tx_queue->txd);
452 if (rc)
453 return rc;
454
455 /* Push TX descriptor ring to card */
456 EFX_POPULATE_OWORD_10(tx_desc_ptr,
457 TX_DESCQ_EN, 1,
458 TX_ISCSI_DDIG_EN, 0,
459 TX_ISCSI_HDIG_EN, 0,
460 TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
461 TX_DESCQ_EVQ_ID, tx_queue->channel->evqnum,
462 TX_DESCQ_OWNER_ID, 0,
463 TX_DESCQ_LABEL, tx_queue->queue,
464 TX_DESCQ_SIZE, FALCON_TXD_RING_ORDER,
465 TX_DESCQ_TYPE, 0,
466 TX_NON_IP_DROP_DIS_B0, 1);
467
468 if (FALCON_REV(efx) >= FALCON_REV_B0) {
469 int csum = !(efx->net_dev->features & NETIF_F_IP_CSUM);
470 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, csum);
471 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, csum);
472 }
473
474 falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
475 tx_queue->queue);
476
477 if (FALCON_REV(efx) < FALCON_REV_B0) {
478 efx_oword_t reg;
479
480 BUG_ON(tx_queue->queue >= 128); /* HW limit */
481
482 falcon_read(efx, &reg, TX_CHKSM_CFG_REG_KER_A1);
483 if (efx->net_dev->features & NETIF_F_IP_CSUM)
484 clear_bit_le(tx_queue->queue, (void *)&reg);
485 else
486 set_bit_le(tx_queue->queue, (void *)&reg);
487 falcon_write(efx, &reg, TX_CHKSM_CFG_REG_KER_A1);
488 }
489
490 return 0;
491}
492
493static int falcon_flush_tx_queue(struct efx_tx_queue *tx_queue)
494{
495 struct efx_nic *efx = tx_queue->efx;
496 struct efx_channel *channel = &efx->channel[0];
497 efx_oword_t tx_flush_descq;
498 unsigned int read_ptr, i;
499
500 /* Post a flush command */
501 EFX_POPULATE_OWORD_2(tx_flush_descq,
502 TX_FLUSH_DESCQ_CMD, 1,
503 TX_FLUSH_DESCQ, tx_queue->queue);
504 falcon_write(efx, &tx_flush_descq, TX_FLUSH_DESCQ_REG_KER);
505 msleep(FALCON_FLUSH_TIMEOUT);
506
507 if (EFX_WORKAROUND_7803(efx))
508 return 0;
509
510 /* Look for a flush completed event */
511 read_ptr = channel->eventq_read_ptr;
512 for (i = 0; i < FALCON_EVQ_SIZE; ++i) {
513 efx_qword_t *event = falcon_event(channel, read_ptr);
514 int ev_code, ev_sub_code, ev_queue;
515 if (!falcon_event_present(event))
516 break;
517
518 ev_code = EFX_QWORD_FIELD(*event, EV_CODE);
519 ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
520 ev_queue = EFX_QWORD_FIELD(*event, DRIVER_EV_TX_DESCQ_ID);
521 if ((ev_sub_code == TX_DESCQ_FLS_DONE_EV_DECODE) &&
522 (ev_queue == tx_queue->queue)) {
523 EFX_LOG(efx, "tx queue %d flush command succesful\n",
524 tx_queue->queue);
525 return 0;
526 }
527
528 read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
529 }
530
531 if (EFX_WORKAROUND_11557(efx)) {
532 efx_oword_t reg;
533 int enabled;
534
535 falcon_read_table(efx, &reg, efx->type->txd_ptr_tbl_base,
536 tx_queue->queue);
537 enabled = EFX_OWORD_FIELD(reg, TX_DESCQ_EN);
538 if (!enabled) {
539 EFX_LOG(efx, "tx queue %d disabled without a "
540 "flush event seen\n", tx_queue->queue);
541 return 0;
542 }
543 }
544
545 EFX_ERR(efx, "tx queue %d flush command timed out\n", tx_queue->queue);
546 return -ETIMEDOUT;
547}
548
549void falcon_fini_tx(struct efx_tx_queue *tx_queue)
550{
551 struct efx_nic *efx = tx_queue->efx;
552 efx_oword_t tx_desc_ptr;
553
554 /* Stop the hardware using the queue */
555 if (falcon_flush_tx_queue(tx_queue))
556 EFX_ERR(efx, "failed to flush tx queue %d\n", tx_queue->queue);
557
558 /* Remove TX descriptor ring from card */
559 EFX_ZERO_OWORD(tx_desc_ptr);
560 falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
561 tx_queue->queue);
562
563 /* Unpin TX descriptor ring */
564 falcon_fini_special_buffer(efx, &tx_queue->txd);
565}
566
567/* Free buffers backing TX queue */
568void falcon_remove_tx(struct efx_tx_queue *tx_queue)
569{
570 falcon_free_special_buffer(tx_queue->efx, &tx_queue->txd);
571}
572
573/**************************************************************************
574 *
575 * Falcon RX path
576 *
577 **************************************************************************/
578
579/* Returns a pointer to the specified descriptor in the RX descriptor queue */
580static inline efx_qword_t *falcon_rx_desc(struct efx_rx_queue *rx_queue,
581 unsigned int index)
582{
583 return (((efx_qword_t *) (rx_queue->rxd.addr)) + index);
584}
585
586/* This creates an entry in the RX descriptor queue */
587static inline void falcon_build_rx_desc(struct efx_rx_queue *rx_queue,
588 unsigned index)
589{
590 struct efx_rx_buffer *rx_buf;
591 efx_qword_t *rxd;
592
593 rxd = falcon_rx_desc(rx_queue, index);
594 rx_buf = efx_rx_buffer(rx_queue, index);
595 EFX_POPULATE_QWORD_3(*rxd,
596 RX_KER_BUF_SIZE,
597 rx_buf->len -
598 rx_queue->efx->type->rx_buffer_padding,
599 RX_KER_BUF_REGION, 0,
600 RX_KER_BUF_ADR, rx_buf->dma_addr);
601}
602
603/* This writes to the RX_DESC_WPTR register for the specified receive
604 * descriptor ring.
605 */
606void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue)
607{
608 efx_dword_t reg;
609 unsigned write_ptr;
610
611 while (rx_queue->notified_count != rx_queue->added_count) {
612 falcon_build_rx_desc(rx_queue,
613 rx_queue->notified_count &
614 FALCON_RXD_RING_MASK);
615 ++rx_queue->notified_count;
616 }
617
618 wmb();
619 write_ptr = rx_queue->added_count & FALCON_RXD_RING_MASK;
620 EFX_POPULATE_DWORD_1(reg, RX_DESC_WPTR_DWORD, write_ptr);
621 falcon_writel_page(rx_queue->efx, &reg,
622 RX_DESC_UPD_REG_KER_DWORD, rx_queue->queue);
623}
624
625int falcon_probe_rx(struct efx_rx_queue *rx_queue)
626{
627 struct efx_nic *efx = rx_queue->efx;
628 return falcon_alloc_special_buffer(efx, &rx_queue->rxd,
629 FALCON_RXD_RING_SIZE *
630 sizeof(efx_qword_t));
631}
632
633int falcon_init_rx(struct efx_rx_queue *rx_queue)
634{
635 efx_oword_t rx_desc_ptr;
636 struct efx_nic *efx = rx_queue->efx;
637 int rc;
638 int is_b0 = FALCON_REV(efx) >= FALCON_REV_B0;
639 int iscsi_digest_en = is_b0;
640
641 EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n",
642 rx_queue->queue, rx_queue->rxd.index,
643 rx_queue->rxd.index + rx_queue->rxd.entries - 1);
644
645 /* Pin RX descriptor ring */
646 rc = falcon_init_special_buffer(efx, &rx_queue->rxd);
647 if (rc)
648 return rc;
649
650 /* Push RX descriptor ring to card */
651 EFX_POPULATE_OWORD_10(rx_desc_ptr,
652 RX_ISCSI_DDIG_EN, iscsi_digest_en,
653 RX_ISCSI_HDIG_EN, iscsi_digest_en,
654 RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
655 RX_DESCQ_EVQ_ID, rx_queue->channel->evqnum,
656 RX_DESCQ_OWNER_ID, 0,
657 RX_DESCQ_LABEL, rx_queue->queue,
658 RX_DESCQ_SIZE, FALCON_RXD_RING_ORDER,
659 RX_DESCQ_TYPE, 0 /* kernel queue */ ,
660 /* For >=B0 this is scatter so disable */
661 RX_DESCQ_JUMBO, !is_b0,
662 RX_DESCQ_EN, 1);
663 falcon_write_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
664 rx_queue->queue);
665 return 0;
666}
667
668static int falcon_flush_rx_queue(struct efx_rx_queue *rx_queue)
669{
670 struct efx_nic *efx = rx_queue->efx;
671 struct efx_channel *channel = &efx->channel[0];
672 unsigned int read_ptr, i;
673 efx_oword_t rx_flush_descq;
674
675 /* Post a flush command */
676 EFX_POPULATE_OWORD_2(rx_flush_descq,
677 RX_FLUSH_DESCQ_CMD, 1,
678 RX_FLUSH_DESCQ, rx_queue->queue);
679 falcon_write(efx, &rx_flush_descq, RX_FLUSH_DESCQ_REG_KER);
680 msleep(FALCON_FLUSH_TIMEOUT);
681
682 if (EFX_WORKAROUND_7803(efx))
683 return 0;
684
685 /* Look for a flush completed event */
686 read_ptr = channel->eventq_read_ptr;
687 for (i = 0; i < FALCON_EVQ_SIZE; ++i) {
688 efx_qword_t *event = falcon_event(channel, read_ptr);
689 int ev_code, ev_sub_code, ev_queue, ev_failed;
690 if (!falcon_event_present(event))
691 break;
692
693 ev_code = EFX_QWORD_FIELD(*event, EV_CODE);
694 ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
695 ev_queue = EFX_QWORD_FIELD(*event, DRIVER_EV_RX_DESCQ_ID);
696 ev_failed = EFX_QWORD_FIELD(*event, DRIVER_EV_RX_FLUSH_FAIL);
697
698 if ((ev_sub_code == RX_DESCQ_FLS_DONE_EV_DECODE) &&
699 (ev_queue == rx_queue->queue)) {
700 if (ev_failed) {
701 EFX_INFO(efx, "rx queue %d flush command "
702 "failed\n", rx_queue->queue);
703 return -EAGAIN;
704 } else {
705 EFX_LOG(efx, "rx queue %d flush command "
706 "succesful\n", rx_queue->queue);
707 return 0;
708 }
709 }
710
711 read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
712 }
713
714 if (EFX_WORKAROUND_11557(efx)) {
715 efx_oword_t reg;
716 int enabled;
717
718 falcon_read_table(efx, &reg, efx->type->rxd_ptr_tbl_base,
719 rx_queue->queue);
720 enabled = EFX_OWORD_FIELD(reg, RX_DESCQ_EN);
721 if (!enabled) {
722 EFX_LOG(efx, "rx queue %d disabled without a "
723 "flush event seen\n", rx_queue->queue);
724 return 0;
725 }
726 }
727
728 EFX_ERR(efx, "rx queue %d flush command timed out\n", rx_queue->queue);
729 return -ETIMEDOUT;
730}
731
732void falcon_fini_rx(struct efx_rx_queue *rx_queue)
733{
734 efx_oword_t rx_desc_ptr;
735 struct efx_nic *efx = rx_queue->efx;
736 int i, rc;
737
738 /* Try and flush the rx queue. This may need to be repeated */
739 for (i = 0; i < 5; i++) {
740 rc = falcon_flush_rx_queue(rx_queue);
741 if (rc == -EAGAIN)
742 continue;
743 break;
744 }
745 if (rc)
746 EFX_ERR(efx, "failed to flush rx queue %d\n", rx_queue->queue);
747
748 /* Remove RX descriptor ring from card */
749 EFX_ZERO_OWORD(rx_desc_ptr);
750 falcon_write_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
751 rx_queue->queue);
752
753 /* Unpin RX descriptor ring */
754 falcon_fini_special_buffer(efx, &rx_queue->rxd);
755}
756
757/* Free buffers backing RX queue */
758void falcon_remove_rx(struct efx_rx_queue *rx_queue)
759{
760 falcon_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
761}
762
763/**************************************************************************
764 *
765 * Falcon event queue processing
766 * Event queues are processed by per-channel tasklets.
767 *
768 **************************************************************************/
769
770/* Update a channel's event queue's read pointer (RPTR) register
771 *
772 * This writes the EVQ_RPTR_REG register for the specified channel's
773 * event queue.
774 *
775 * Note that EVQ_RPTR_REG contains the index of the "last read" event,
776 * whereas channel->eventq_read_ptr contains the index of the "next to
777 * read" event.
778 */
779void falcon_eventq_read_ack(struct efx_channel *channel)
780{
781 efx_dword_t reg;
782 struct efx_nic *efx = channel->efx;
783
784 EFX_POPULATE_DWORD_1(reg, EVQ_RPTR_DWORD, channel->eventq_read_ptr);
785 falcon_writel_table(efx, &reg, efx->type->evq_rptr_tbl_base,
786 channel->evqnum);
787}
788
789/* Use HW to insert a SW defined event */
790void falcon_generate_event(struct efx_channel *channel, efx_qword_t *event)
791{
792 efx_oword_t drv_ev_reg;
793
794 EFX_POPULATE_OWORD_2(drv_ev_reg,
795 DRV_EV_QID, channel->evqnum,
796 DRV_EV_DATA,
797 EFX_QWORD_FIELD64(*event, WHOLE_EVENT));
798 falcon_write(channel->efx, &drv_ev_reg, DRV_EV_REG_KER);
799}
800
801/* Handle a transmit completion event
802 *
803 * Falcon batches TX completion events; the message we receive is of
804 * the form "complete all TX events up to this index".
805 */
806static inline void falcon_handle_tx_event(struct efx_channel *channel,
807 efx_qword_t *event)
808{
809 unsigned int tx_ev_desc_ptr;
810 unsigned int tx_ev_q_label;
811 struct efx_tx_queue *tx_queue;
812 struct efx_nic *efx = channel->efx;
813
814 if (likely(EFX_QWORD_FIELD(*event, TX_EV_COMP))) {
815 /* Transmit completion */
816 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, TX_EV_DESC_PTR);
817 tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL);
818 tx_queue = &efx->tx_queue[tx_ev_q_label];
819 efx_xmit_done(tx_queue, tx_ev_desc_ptr);
820 } else if (EFX_QWORD_FIELD(*event, TX_EV_WQ_FF_FULL)) {
821 /* Rewrite the FIFO write pointer */
822 tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL);
823 tx_queue = &efx->tx_queue[tx_ev_q_label];
824
825 if (NET_DEV_REGISTERED(efx))
826 netif_tx_lock(efx->net_dev);
827 falcon_notify_tx_desc(tx_queue);
828 if (NET_DEV_REGISTERED(efx))
829 netif_tx_unlock(efx->net_dev);
830 } else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) &&
831 EFX_WORKAROUND_10727(efx)) {
832 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
833 } else {
834 EFX_ERR(efx, "channel %d unexpected TX event "
835 EFX_QWORD_FMT"\n", channel->channel,
836 EFX_QWORD_VAL(*event));
837 }
838}
839
840/* Check received packet's destination MAC address. */
841static int check_dest_mac(struct efx_rx_queue *rx_queue,
842 const efx_qword_t *event)
843{
844 struct efx_rx_buffer *rx_buf;
845 struct efx_nic *efx = rx_queue->efx;
846 int rx_ev_desc_ptr;
847 struct ethhdr *eh;
848
849 if (efx->promiscuous)
850 return 1;
851
852 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, RX_EV_DESC_PTR);
853 rx_buf = efx_rx_buffer(rx_queue, rx_ev_desc_ptr);
854 eh = (struct ethhdr *)rx_buf->data;
855 if (memcmp(eh->h_dest, efx->net_dev->dev_addr, ETH_ALEN))
856 return 0;
857 return 1;
858}
859
860/* Detect errors included in the rx_evt_pkt_ok bit. */
861static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
862 const efx_qword_t *event,
863 unsigned *rx_ev_pkt_ok,
864 int *discard, int byte_count)
865{
866 struct efx_nic *efx = rx_queue->efx;
867 unsigned rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
868 unsigned rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
869 unsigned rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
870 unsigned rx_ev_pkt_type, rx_ev_other_err, rx_ev_pause_frm;
871 unsigned rx_ev_ip_frag_err, rx_ev_hdr_type, rx_ev_mcast_pkt;
872 int snap, non_ip;
873
874 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE);
875 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, RX_EV_MCAST_PKT);
876 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, RX_EV_TOBE_DISC);
877 rx_ev_pkt_type = EFX_QWORD_FIELD(*event, RX_EV_PKT_TYPE);
878 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
879 RX_EV_BUF_OWNER_ID_ERR);
880 rx_ev_ip_frag_err = EFX_QWORD_FIELD(*event, RX_EV_IF_FRAG_ERR);
881 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
882 RX_EV_IP_HDR_CHKSUM_ERR);
883 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
884 RX_EV_TCP_UDP_CHKSUM_ERR);
885 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR);
886 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC);
887 rx_ev_drib_nib = ((FALCON_REV(efx) >= FALCON_REV_B0) ?
888 0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB));
889 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR);
890
891 /* Every error apart from tobe_disc and pause_frm */
892 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
893 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
894 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
895
896 snap = (rx_ev_pkt_type == RX_EV_PKT_TYPE_LLC_DECODE) ||
897 (rx_ev_pkt_type == RX_EV_PKT_TYPE_VLAN_LLC_DECODE);
898 non_ip = (rx_ev_hdr_type == RX_EV_HDR_TYPE_NON_IP_DECODE);
899
900 /* SFC bug 5475/8970: The Falcon XMAC incorrectly calculates the
901 * length field of an LLC frame, which sets TOBE_DISC. We could set
902 * PASS_LEN_ERR, but we want the MAC to filter out short frames (to
903 * protect the RX block).
904 *
905 * bug5475 - LLC/SNAP: Falcon identifies SNAP packets.
906 * bug8970 - LLC/noSNAP: Falcon does not provide an LLC flag.
907 * LLC can't encapsulate IP, so by definition
908 * these packets are NON_IP.
909 *
910 * Unicast mismatch will also cause TOBE_DISC, so the driver needs
911 * to check this.
912 */
913 if (EFX_WORKAROUND_5475(efx) && rx_ev_tobe_disc && (snap || non_ip)) {
914 /* If all the other flags are zero then we can state the
915 * entire packet is ok, which will flag to the kernel not
916 * to recalculate checksums.
917 */
918 if (!(non_ip | rx_ev_other_err | rx_ev_pause_frm))
919 *rx_ev_pkt_ok = 1;
920
921 rx_ev_tobe_disc = 0;
922
923 /* TOBE_DISC is set for unicast mismatch. But given that
924 * we can't trust TOBE_DISC here, we must validate the dest
925 * MAC address ourselves.
926 */
927 if (!rx_ev_mcast_pkt && !check_dest_mac(rx_queue, event))
928 rx_ev_tobe_disc = 1;
929 }
930
931 /* Count errors that are not in MAC stats. */
932 if (rx_ev_frm_trunc)
933 ++rx_queue->channel->n_rx_frm_trunc;
934 else if (rx_ev_tobe_disc)
935 ++rx_queue->channel->n_rx_tobe_disc;
936 else if (rx_ev_ip_hdr_chksum_err)
937 ++rx_queue->channel->n_rx_ip_hdr_chksum_err;
938 else if (rx_ev_tcp_udp_chksum_err)
939 ++rx_queue->channel->n_rx_tcp_udp_chksum_err;
940 if (rx_ev_ip_frag_err)
941 ++rx_queue->channel->n_rx_ip_frag_err;
942
943 /* The frame must be discarded if any of these are true. */
944 *discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
945 rx_ev_tobe_disc | rx_ev_pause_frm);
946
947 /* TOBE_DISC is expected on unicast mismatches; don't print out an
948 * error message. FRM_TRUNC indicates RXDP dropped the packet due
949 * to a FIFO overflow.
950 */
951#ifdef EFX_ENABLE_DEBUG
952 if (rx_ev_other_err) {
953 EFX_INFO_RL(efx, " RX queue %d unexpected RX event "
954 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s%s\n",
955 rx_queue->queue, EFX_QWORD_VAL(*event),
956 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
957 rx_ev_ip_hdr_chksum_err ?
958 " [IP_HDR_CHKSUM_ERR]" : "",
959 rx_ev_tcp_udp_chksum_err ?
960 " [TCP_UDP_CHKSUM_ERR]" : "",
961 rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
962 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
963 rx_ev_drib_nib ? " [DRIB_NIB]" : "",
964 rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
965 rx_ev_pause_frm ? " [PAUSE]" : "",
966 snap ? " [SNAP/LLC]" : "");
967 }
968#endif
969
970 if (unlikely(rx_ev_eth_crc_err && EFX_WORKAROUND_10750(efx) &&
971 efx->phy_type == PHY_TYPE_10XPRESS))
972 tenxpress_crc_err(efx);
973}
974
975/* Handle receive events that are not in-order. */
976static void falcon_handle_rx_bad_index(struct efx_rx_queue *rx_queue,
977 unsigned index)
978{
979 struct efx_nic *efx = rx_queue->efx;
980 unsigned expected, dropped;
981
982 expected = rx_queue->removed_count & FALCON_RXD_RING_MASK;
983 dropped = ((index + FALCON_RXD_RING_SIZE - expected) &
984 FALCON_RXD_RING_MASK);
985 EFX_INFO(efx, "dropped %d events (index=%d expected=%d)\n",
986 dropped, index, expected);
987
988 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
989 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
990}
991
992/* Handle a packet received event
993 *
994 * Falcon silicon gives a "discard" flag if it's a unicast packet with the
995 * wrong destination address
996 * Also "is multicast" and "matches multicast filter" flags can be used to
997 * discard non-matching multicast packets.
998 */
999static inline int falcon_handle_rx_event(struct efx_channel *channel,
1000 const efx_qword_t *event)
1001{
1002 unsigned int rx_ev_q_label, rx_ev_desc_ptr, rx_ev_byte_cnt;
1003 unsigned int rx_ev_pkt_ok, rx_ev_hdr_type, rx_ev_mcast_pkt;
1004 unsigned expected_ptr;
1005 int discard = 0, checksummed;
1006 struct efx_rx_queue *rx_queue;
1007 struct efx_nic *efx = channel->efx;
1008
1009 /* Basic packet information */
1010 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, RX_EV_BYTE_CNT);
1011 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, RX_EV_PKT_OK);
1012 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE);
1013 WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_JUMBO_CONT));
1014 WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_SOP) != 1);
1015
1016 rx_ev_q_label = EFX_QWORD_FIELD(*event, RX_EV_Q_LABEL);
1017 rx_queue = &efx->rx_queue[rx_ev_q_label];
1018
1019 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, RX_EV_DESC_PTR);
1020 expected_ptr = rx_queue->removed_count & FALCON_RXD_RING_MASK;
1021 if (unlikely(rx_ev_desc_ptr != expected_ptr)) {
1022 falcon_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
1023 return rx_ev_q_label;
1024 }
1025
1026 if (likely(rx_ev_pkt_ok)) {
1027 /* If packet is marked as OK and packet type is TCP/IPv4 or
1028 * UDP/IPv4, then we can rely on the hardware checksum.
1029 */
1030 checksummed = RX_EV_HDR_TYPE_HAS_CHECKSUMS(rx_ev_hdr_type);
1031 } else {
1032 falcon_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok,
1033 &discard, rx_ev_byte_cnt);
1034 checksummed = 0;
1035 }
1036
1037 /* Detect multicast packets that didn't match the filter */
1038 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, RX_EV_MCAST_PKT);
1039 if (rx_ev_mcast_pkt) {
1040 unsigned int rx_ev_mcast_hash_match =
1041 EFX_QWORD_FIELD(*event, RX_EV_MCAST_HASH_MATCH);
1042
1043 if (unlikely(!rx_ev_mcast_hash_match))
1044 discard = 1;
1045 }
1046
1047 /* Handle received packet */
1048 efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt,
1049 checksummed, discard);
1050
1051 return rx_ev_q_label;
1052}
1053
1054/* Global events are basically PHY events */
1055static void falcon_handle_global_event(struct efx_channel *channel,
1056 efx_qword_t *event)
1057{
1058 struct efx_nic *efx = channel->efx;
1059 int is_phy_event = 0, handled = 0;
1060
1061 /* Check for interrupt on either port. Some boards have a
1062 * single PHY wired to the interrupt line for port 1. */
1063 if (EFX_QWORD_FIELD(*event, G_PHY0_INTR) ||
1064 EFX_QWORD_FIELD(*event, G_PHY1_INTR) ||
1065 EFX_QWORD_FIELD(*event, XG_PHY_INTR))
1066 is_phy_event = 1;
1067
1068 if ((FALCON_REV(efx) >= FALCON_REV_B0) &&
1069 EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0))
1070 is_phy_event = 1;
1071
1072 if (is_phy_event) {
1073 efx->phy_op->clear_interrupt(efx);
1074 queue_work(efx->workqueue, &efx->reconfigure_work);
1075 handled = 1;
1076 }
1077
1078 if (EFX_QWORD_FIELD_VER(efx, *event, RX_RECOVERY)) {
1079 EFX_ERR(efx, "channel %d seen global RX_RESET "
1080 "event. Resetting.\n", channel->channel);
1081
1082 atomic_inc(&efx->rx_reset);
1083 efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
1084 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
1085 handled = 1;
1086 }
1087
1088 if (!handled)
1089 EFX_ERR(efx, "channel %d unknown global event "
1090 EFX_QWORD_FMT "\n", channel->channel,
1091 EFX_QWORD_VAL(*event));
1092}
1093
1094static void falcon_handle_driver_event(struct efx_channel *channel,
1095 efx_qword_t *event)
1096{
1097 struct efx_nic *efx = channel->efx;
1098 unsigned int ev_sub_code;
1099 unsigned int ev_sub_data;
1100
1101 ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
1102 ev_sub_data = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_DATA);
1103
1104 switch (ev_sub_code) {
1105 case TX_DESCQ_FLS_DONE_EV_DECODE:
1106 EFX_TRACE(efx, "channel %d TXQ %d flushed\n",
1107 channel->channel, ev_sub_data);
1108 break;
1109 case RX_DESCQ_FLS_DONE_EV_DECODE:
1110 EFX_TRACE(efx, "channel %d RXQ %d flushed\n",
1111 channel->channel, ev_sub_data);
1112 break;
1113 case EVQ_INIT_DONE_EV_DECODE:
1114 EFX_LOG(efx, "channel %d EVQ %d initialised\n",
1115 channel->channel, ev_sub_data);
1116 break;
1117 case SRM_UPD_DONE_EV_DECODE:
1118 EFX_TRACE(efx, "channel %d SRAM update done\n",
1119 channel->channel);
1120 break;
1121 case WAKE_UP_EV_DECODE:
1122 EFX_TRACE(efx, "channel %d RXQ %d wakeup event\n",
1123 channel->channel, ev_sub_data);
1124 break;
1125 case TIMER_EV_DECODE:
1126 EFX_TRACE(efx, "channel %d RX queue %d timer expired\n",
1127 channel->channel, ev_sub_data);
1128 break;
1129 case RX_RECOVERY_EV_DECODE:
1130 EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. "
1131 "Resetting.\n", channel->channel);
1132 efx_schedule_reset(efx,
1133 EFX_WORKAROUND_6555(efx) ?
1134 RESET_TYPE_RX_RECOVERY :
1135 RESET_TYPE_DISABLE);
1136 break;
1137 case RX_DSC_ERROR_EV_DECODE:
1138 EFX_ERR(efx, "RX DMA Q %d reports descriptor fetch error."
1139 " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
1140 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
1141 break;
1142 case TX_DSC_ERROR_EV_DECODE:
1143 EFX_ERR(efx, "TX DMA Q %d reports descriptor fetch error."
1144 " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
1145 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
1146 break;
1147 default:
1148 EFX_TRACE(efx, "channel %d unknown driver event code %d "
1149 "data %04x\n", channel->channel, ev_sub_code,
1150 ev_sub_data);
1151 break;
1152 }
1153}
1154
1155int falcon_process_eventq(struct efx_channel *channel, int *rx_quota)
1156{
1157 unsigned int read_ptr;
1158 efx_qword_t event, *p_event;
1159 int ev_code;
1160 int rxq;
1161 int rxdmaqs = 0;
1162
1163 read_ptr = channel->eventq_read_ptr;
1164
1165 do {
1166 p_event = falcon_event(channel, read_ptr);
1167 event = *p_event;
1168
1169 if (!falcon_event_present(&event))
1170 /* End of events */
1171 break;
1172
1173 EFX_TRACE(channel->efx, "channel %d event is "EFX_QWORD_FMT"\n",
1174 channel->channel, EFX_QWORD_VAL(event));
1175
1176 /* Clear this event by marking it all ones */
1177 EFX_SET_QWORD(*p_event);
1178
1179 ev_code = EFX_QWORD_FIELD(event, EV_CODE);
1180
1181 switch (ev_code) {
1182 case RX_IP_EV_DECODE:
1183 rxq = falcon_handle_rx_event(channel, &event);
1184 rxdmaqs |= (1 << rxq);
1185 (*rx_quota)--;
1186 break;
1187 case TX_IP_EV_DECODE:
1188 falcon_handle_tx_event(channel, &event);
1189 break;
1190 case DRV_GEN_EV_DECODE:
1191 channel->eventq_magic
1192 = EFX_QWORD_FIELD(event, EVQ_MAGIC);
1193 EFX_LOG(channel->efx, "channel %d received generated "
1194 "event "EFX_QWORD_FMT"\n", channel->channel,
1195 EFX_QWORD_VAL(event));
1196 break;
1197 case GLOBAL_EV_DECODE:
1198 falcon_handle_global_event(channel, &event);
1199 break;
1200 case DRIVER_EV_DECODE:
1201 falcon_handle_driver_event(channel, &event);
1202 break;
1203 default:
1204 EFX_ERR(channel->efx, "channel %d unknown event type %d"
1205 " (data " EFX_QWORD_FMT ")\n", channel->channel,
1206 ev_code, EFX_QWORD_VAL(event));
1207 }
1208
1209 /* Increment read pointer */
1210 read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
1211
1212 } while (*rx_quota);
1213
1214 channel->eventq_read_ptr = read_ptr;
1215 return rxdmaqs;
1216}
1217
1218void falcon_set_int_moderation(struct efx_channel *channel)
1219{
1220 efx_dword_t timer_cmd;
1221 struct efx_nic *efx = channel->efx;
1222
1223 /* Set timer register */
1224 if (channel->irq_moderation) {
1225 /* Round to resolution supported by hardware. The value we
1226 * program is based at 0. So actual interrupt moderation
1227 * achieved is ((x + 1) * res).
1228 */
1229 unsigned int res = 5;
1230 channel->irq_moderation -= (channel->irq_moderation % res);
1231 if (channel->irq_moderation < res)
1232 channel->irq_moderation = res;
1233 EFX_POPULATE_DWORD_2(timer_cmd,
1234 TIMER_MODE, TIMER_MODE_INT_HLDOFF,
1235 TIMER_VAL,
1236 (channel->irq_moderation / res) - 1);
1237 } else {
1238 EFX_POPULATE_DWORD_2(timer_cmd,
1239 TIMER_MODE, TIMER_MODE_DIS,
1240 TIMER_VAL, 0);
1241 }
1242 falcon_writel_page_locked(efx, &timer_cmd, TIMER_CMD_REG_KER,
1243 channel->evqnum);
1244
1245}
1246
1247/* Allocate buffer table entries for event queue */
1248int falcon_probe_eventq(struct efx_channel *channel)
1249{
1250 struct efx_nic *efx = channel->efx;
1251 unsigned int evq_size;
1252
1253 evq_size = FALCON_EVQ_SIZE * sizeof(efx_qword_t);
1254 return falcon_alloc_special_buffer(efx, &channel->eventq, evq_size);
1255}
1256
1257int falcon_init_eventq(struct efx_channel *channel)
1258{
1259 efx_oword_t evq_ptr;
1260 struct efx_nic *efx = channel->efx;
1261 int rc;
1262
1263 EFX_LOG(efx, "channel %d event queue in special buffers %d-%d\n",
1264 channel->channel, channel->eventq.index,
1265 channel->eventq.index + channel->eventq.entries - 1);
1266
1267 /* Pin event queue buffer */
1268 rc = falcon_init_special_buffer(efx, &channel->eventq);
1269 if (rc)
1270 return rc;
1271
1272 /* Fill event queue with all ones (i.e. empty events) */
1273 memset(channel->eventq.addr, 0xff, channel->eventq.len);
1274
1275 /* Push event queue to card */
1276 EFX_POPULATE_OWORD_3(evq_ptr,
1277 EVQ_EN, 1,
1278 EVQ_SIZE, FALCON_EVQ_ORDER,
1279 EVQ_BUF_BASE_ID, channel->eventq.index);
1280 falcon_write_table(efx, &evq_ptr, efx->type->evq_ptr_tbl_base,
1281 channel->evqnum);
1282
1283 falcon_set_int_moderation(channel);
1284
1285 return 0;
1286}
1287
1288void falcon_fini_eventq(struct efx_channel *channel)
1289{
1290 efx_oword_t eventq_ptr;
1291 struct efx_nic *efx = channel->efx;
1292
1293 /* Remove event queue from card */
1294 EFX_ZERO_OWORD(eventq_ptr);
1295 falcon_write_table(efx, &eventq_ptr, efx->type->evq_ptr_tbl_base,
1296 channel->evqnum);
1297
1298 /* Unpin event queue */
1299 falcon_fini_special_buffer(efx, &channel->eventq);
1300}
1301
1302/* Free buffers backing event queue */
1303void falcon_remove_eventq(struct efx_channel *channel)
1304{
1305 falcon_free_special_buffer(channel->efx, &channel->eventq);
1306}
1307
1308
1309/* Generates a test event on the event queue. A subsequent call to
1310 * process_eventq() should pick up the event and place the value of
1311 * "magic" into channel->eventq_magic;
1312 */
1313void falcon_generate_test_event(struct efx_channel *channel, unsigned int magic)
1314{
1315 efx_qword_t test_event;
1316
1317 EFX_POPULATE_QWORD_2(test_event,
1318 EV_CODE, DRV_GEN_EV_DECODE,
1319 EVQ_MAGIC, magic);
1320 falcon_generate_event(channel, &test_event);
1321}
1322
1323
1324/**************************************************************************
1325 *
1326 * Falcon hardware interrupts
1327 * The hardware interrupt handler does very little work; all the event
1328 * queue processing is carried out by per-channel tasklets.
1329 *
1330 **************************************************************************/
1331
1332/* Enable/disable/generate Falcon interrupts */
1333static inline void falcon_interrupts(struct efx_nic *efx, int enabled,
1334 int force)
1335{
1336 efx_oword_t int_en_reg_ker;
1337
1338 EFX_POPULATE_OWORD_2(int_en_reg_ker,
1339 KER_INT_KER, force,
1340 DRV_INT_EN_KER, enabled);
1341 falcon_write(efx, &int_en_reg_ker, INT_EN_REG_KER);
1342}
1343
1344void falcon_enable_interrupts(struct efx_nic *efx)
1345{
1346 efx_oword_t int_adr_reg_ker;
1347 struct efx_channel *channel;
1348
1349 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
1350 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1351
1352 /* Program address */
1353 EFX_POPULATE_OWORD_2(int_adr_reg_ker,
1354 NORM_INT_VEC_DIS_KER, EFX_INT_MODE_USE_MSI(efx),
1355 INT_ADR_KER, efx->irq_status.dma_addr);
1356 falcon_write(efx, &int_adr_reg_ker, INT_ADR_REG_KER);
1357
1358 /* Enable interrupts */
1359 falcon_interrupts(efx, 1, 0);
1360
1361 /* Force processing of all the channels to get the EVQ RPTRs up to
1362 date */
1363 efx_for_each_channel_with_interrupt(channel, efx)
1364 efx_schedule_channel(channel);
1365}
1366
1367void falcon_disable_interrupts(struct efx_nic *efx)
1368{
1369 /* Disable interrupts */
1370 falcon_interrupts(efx, 0, 0);
1371}
1372
1373/* Generate a Falcon test interrupt
1374 * Interrupt must already have been enabled, otherwise nasty things
1375 * may happen.
1376 */
1377void falcon_generate_interrupt(struct efx_nic *efx)
1378{
1379 falcon_interrupts(efx, 1, 1);
1380}
1381
1382/* Acknowledge a legacy interrupt from Falcon
1383 *
1384 * This acknowledges a legacy (not MSI) interrupt via INT_ACK_KER_REG.
1385 *
1386 * Due to SFC bug 3706 (silicon revision <=A1) reads can be duplicated in the
1387 * BIU. Interrupt acknowledge is read sensitive so must write instead
1388 * (then read to ensure the BIU collector is flushed)
1389 *
1390 * NB most hardware supports MSI interrupts
1391 */
1392static inline void falcon_irq_ack_a1(struct efx_nic *efx)
1393{
1394 efx_dword_t reg;
1395
1396 EFX_POPULATE_DWORD_1(reg, INT_ACK_DUMMY_DATA, 0xb7eb7e);
1397 falcon_writel(efx, &reg, INT_ACK_REG_KER_A1);
1398 falcon_readl(efx, &reg, WORK_AROUND_BROKEN_PCI_READS_REG_KER_A1);
1399}
1400
1401/* Process a fatal interrupt
1402 * Disable bus mastering ASAP and schedule a reset
1403 */
1404static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)
1405{
1406 struct falcon_nic_data *nic_data = efx->nic_data;
1407 efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr;
1408 efx_oword_t fatal_intr;
1409 int error, mem_perr;
1410 static int n_int_errors;
1411
1412 falcon_read(efx, &fatal_intr, FATAL_INTR_REG_KER);
1413 error = EFX_OWORD_FIELD(fatal_intr, INT_KER_ERROR);
1414
1415 EFX_ERR(efx, "SYSTEM ERROR " EFX_OWORD_FMT " status "
1416 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
1417 EFX_OWORD_VAL(fatal_intr),
1418 error ? "disabling bus mastering" : "no recognised error");
1419 if (error == 0)
1420 goto out;
1421
1422 /* If this is a memory parity error dump which blocks are offending */
1423 mem_perr = EFX_OWORD_FIELD(fatal_intr, MEM_PERR_INT_KER);
1424 if (mem_perr) {
1425 efx_oword_t reg;
1426 falcon_read(efx, &reg, MEM_STAT_REG_KER);
1427 EFX_ERR(efx, "SYSTEM ERROR: memory parity error "
1428 EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg));
1429 }
1430
1431 /* Disable DMA bus mastering on both devices */
1432 pci_disable_device(efx->pci_dev);
1433 if (FALCON_IS_DUAL_FUNC(efx))
1434 pci_disable_device(nic_data->pci_dev2);
1435
1436 if (++n_int_errors < FALCON_MAX_INT_ERRORS) {
1437 EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n");
1438 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
1439 } else {
1440 EFX_ERR(efx, "SYSTEM ERROR - max number of errors seen."
1441 "NIC will be disabled\n");
1442 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1443 }
1444out:
1445 return IRQ_HANDLED;
1446}
1447
1448/* Handle a legacy interrupt from Falcon
1449 * Acknowledges the interrupt and schedule event queue processing.
1450 */
1451static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id)
1452{
1453 struct efx_nic *efx = (struct efx_nic *)dev_id;
1454 efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr;
1455 struct efx_channel *channel;
1456 efx_dword_t reg;
1457 u32 queues;
1458 int syserr;
1459
1460 /* Read the ISR which also ACKs the interrupts */
1461 falcon_readl(efx, &reg, INT_ISR0_B0);
1462 queues = EFX_EXTRACT_DWORD(reg, 0, 31);
1463
1464 /* Check to see if we have a serious error condition */
1465 syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT);
1466 if (unlikely(syserr))
1467 return falcon_fatal_interrupt(efx);
1468
1469 if (queues == 0)
1470 return IRQ_NONE;
1471
1472 efx->last_irq_cpu = raw_smp_processor_id();
1473 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1474 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
1475
1476 /* Schedule processing of any interrupting queues */
1477 channel = &efx->channel[0];
1478 while (queues) {
1479 if (queues & 0x01)
1480 efx_schedule_channel(channel);
1481 channel++;
1482 queues >>= 1;
1483 }
1484
1485 return IRQ_HANDLED;
1486}
1487
1488
1489static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
1490{
1491 struct efx_nic *efx = (struct efx_nic *)dev_id;
1492 efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr;
1493 struct efx_channel *channel;
1494 int syserr;
1495 int queues;
1496
1497 /* Check to see if this is our interrupt. If it isn't, we
1498 * exit without having touched the hardware.
1499 */
1500 if (unlikely(EFX_OWORD_IS_ZERO(*int_ker))) {
1501 EFX_TRACE(efx, "IRQ %d on CPU %d not for me\n", irq,
1502 raw_smp_processor_id());
1503 return IRQ_NONE;
1504 }
1505 efx->last_irq_cpu = raw_smp_processor_id();
1506 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
1507 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1508
1509 /* Check to see if we have a serious error condition */
1510 syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT);
1511 if (unlikely(syserr))
1512 return falcon_fatal_interrupt(efx);
1513
1514 /* Determine interrupting queues, clear interrupt status
1515 * register and acknowledge the device interrupt.
1516 */
1517 BUILD_BUG_ON(INT_EVQS_WIDTH > EFX_MAX_CHANNELS);
1518 queues = EFX_OWORD_FIELD(*int_ker, INT_EVQS);
1519 EFX_ZERO_OWORD(*int_ker);
1520 wmb(); /* Ensure the vector is cleared before interrupt ack */
1521 falcon_irq_ack_a1(efx);
1522
1523 /* Schedule processing of any interrupting queues */
1524 channel = &efx->channel[0];
1525 while (queues) {
1526 if (queues & 0x01)
1527 efx_schedule_channel(channel);
1528 channel++;
1529 queues >>= 1;
1530 }
1531
1532 return IRQ_HANDLED;
1533}
1534
1535/* Handle an MSI interrupt from Falcon
1536 *
1537 * Handle an MSI hardware interrupt. This routine schedules event
1538 * queue processing. No interrupt acknowledgement cycle is necessary.
1539 * Also, we never need to check that the interrupt is for us, since
1540 * MSI interrupts cannot be shared.
1541 */
1542static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id)
1543{
1544 struct efx_channel *channel = (struct efx_channel *)dev_id;
1545 struct efx_nic *efx = channel->efx;
1546 efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr;
1547 int syserr;
1548
1549 efx->last_irq_cpu = raw_smp_processor_id();
1550 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
1551 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1552
1553 /* Check to see if we have a serious error condition */
1554 syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT);
1555 if (unlikely(syserr))
1556 return falcon_fatal_interrupt(efx);
1557
1558 /* Schedule processing of the channel */
1559 efx_schedule_channel(channel);
1560
1561 return IRQ_HANDLED;
1562}
1563
1564
1565/* Setup RSS indirection table.
1566 * This maps from the hash value of the packet to RXQ
1567 */
1568static void falcon_setup_rss_indir_table(struct efx_nic *efx)
1569{
1570 int i = 0;
1571 unsigned long offset;
1572 efx_dword_t dword;
1573
1574 if (FALCON_REV(efx) < FALCON_REV_B0)
1575 return;
1576
1577 for (offset = RX_RSS_INDIR_TBL_B0;
1578 offset < RX_RSS_INDIR_TBL_B0 + 0x800;
1579 offset += 0x10) {
1580 EFX_POPULATE_DWORD_1(dword, RX_RSS_INDIR_ENT_B0,
1581 i % efx->rss_queues);
1582 falcon_writel(efx, &dword, offset);
1583 i++;
1584 }
1585}
1586
1587/* Hook interrupt handler(s)
1588 * Try MSI and then legacy interrupts.
1589 */
1590int falcon_init_interrupt(struct efx_nic *efx)
1591{
1592 struct efx_channel *channel;
1593 int rc;
1594
1595 if (!EFX_INT_MODE_USE_MSI(efx)) {
1596 irq_handler_t handler;
1597 if (FALCON_REV(efx) >= FALCON_REV_B0)
1598 handler = falcon_legacy_interrupt_b0;
1599 else
1600 handler = falcon_legacy_interrupt_a1;
1601
1602 rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED,
1603 efx->name, efx);
1604 if (rc) {
1605 EFX_ERR(efx, "failed to hook legacy IRQ %d\n",
1606 efx->pci_dev->irq);
1607 goto fail1;
1608 }
1609 return 0;
1610 }
1611
1612 /* Hook MSI or MSI-X interrupt */
1613 efx_for_each_channel_with_interrupt(channel, efx) {
1614 rc = request_irq(channel->irq, falcon_msi_interrupt,
1615 IRQF_PROBE_SHARED, /* Not shared */
1616 efx->name, channel);
1617 if (rc) {
1618 EFX_ERR(efx, "failed to hook IRQ %d\n", channel->irq);
1619 goto fail2;
1620 }
1621 }
1622
1623 return 0;
1624
1625 fail2:
1626 efx_for_each_channel_with_interrupt(channel, efx)
1627 free_irq(channel->irq, channel);
1628 fail1:
1629 return rc;
1630}
1631
1632void falcon_fini_interrupt(struct efx_nic *efx)
1633{
1634 struct efx_channel *channel;
1635 efx_oword_t reg;
1636
1637 /* Disable MSI/MSI-X interrupts */
1638 efx_for_each_channel_with_interrupt(channel, efx)
1639 if (channel->irq)
1640 free_irq(channel->irq, channel);
1641
1642 /* ACK legacy interrupt */
1643 if (FALCON_REV(efx) >= FALCON_REV_B0)
1644 falcon_read(efx, &reg, INT_ISR0_B0);
1645 else
1646 falcon_irq_ack_a1(efx);
1647
1648 /* Disable legacy interrupt */
1649 if (efx->legacy_irq)
1650 free_irq(efx->legacy_irq, efx);
1651}
1652
1653/**************************************************************************
1654 *
1655 * EEPROM/flash
1656 *
1657 **************************************************************************
1658 */
1659
1660#define FALCON_SPI_MAX_LEN sizeof(efx_oword_t)
1661
1662/* Wait for SPI command completion */
1663static int falcon_spi_wait(struct efx_nic *efx)
1664{
1665 efx_oword_t reg;
1666 int cmd_en, timer_active;
1667 int count;
1668
1669 count = 0;
1670 do {
1671 falcon_read(efx, &reg, EE_SPI_HCMD_REG_KER);
1672 cmd_en = EFX_OWORD_FIELD(reg, EE_SPI_HCMD_CMD_EN);
1673 timer_active = EFX_OWORD_FIELD(reg, EE_WR_TIMER_ACTIVE);
1674 if (!cmd_en && !timer_active)
1675 return 0;
1676 udelay(10);
1677 } while (++count < 10000); /* wait upto 100msec */
1678 EFX_ERR(efx, "timed out waiting for SPI\n");
1679 return -ETIMEDOUT;
1680}
1681
1682static int
1683falcon_spi_read(struct efx_nic *efx, int device_id, unsigned int command,
1684 unsigned int address, unsigned int addr_len,
1685 void *data, unsigned int len)
1686{
1687 efx_oword_t reg;
1688 int rc;
1689
1690 BUG_ON(len > FALCON_SPI_MAX_LEN);
1691
1692 /* Check SPI not currently being accessed */
1693 rc = falcon_spi_wait(efx);
1694 if (rc)
1695 return rc;
1696
1697 /* Program address register */
1698 EFX_POPULATE_OWORD_1(reg, EE_SPI_HADR_ADR, address);
1699 falcon_write(efx, &reg, EE_SPI_HADR_REG_KER);
1700
1701 /* Issue read command */
1702 EFX_POPULATE_OWORD_7(reg,
1703 EE_SPI_HCMD_CMD_EN, 1,
1704 EE_SPI_HCMD_SF_SEL, device_id,
1705 EE_SPI_HCMD_DABCNT, len,
1706 EE_SPI_HCMD_READ, EE_SPI_READ,
1707 EE_SPI_HCMD_DUBCNT, 0,
1708 EE_SPI_HCMD_ADBCNT, addr_len,
1709 EE_SPI_HCMD_ENC, command);
1710 falcon_write(efx, &reg, EE_SPI_HCMD_REG_KER);
1711
1712 /* Wait for read to complete */
1713 rc = falcon_spi_wait(efx);
1714 if (rc)
1715 return rc;
1716
1717 /* Read data */
1718 falcon_read(efx, &reg, EE_SPI_HDATA_REG_KER);
1719 memcpy(data, &reg, len);
1720 return 0;
1721}
1722
1723/**************************************************************************
1724 *
1725 * MAC wrapper
1726 *
1727 **************************************************************************
1728 */
1729void falcon_drain_tx_fifo(struct efx_nic *efx)
1730{
1731 efx_oword_t temp;
1732 int count;
1733
1734 if (FALCON_REV(efx) < FALCON_REV_B0)
1735 return;
1736
1737 falcon_read(efx, &temp, MAC0_CTRL_REG_KER);
1738 /* There is no point in draining more than once */
1739 if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0))
1740 return;
1741
1742 /* MAC stats will fail whilst the TX fifo is draining. Serialise
1743 * the drain sequence with the statistics fetch */
1744 spin_lock(&efx->stats_lock);
1745
1746 EFX_SET_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0, 1);
1747 falcon_write(efx, &temp, MAC0_CTRL_REG_KER);
1748
1749 /* Reset the MAC and EM block. */
1750 falcon_read(efx, &temp, GLB_CTL_REG_KER);
1751 EFX_SET_OWORD_FIELD(temp, RST_XGTX, 1);
1752 EFX_SET_OWORD_FIELD(temp, RST_XGRX, 1);
1753 EFX_SET_OWORD_FIELD(temp, RST_EM, 1);
1754 falcon_write(efx, &temp, GLB_CTL_REG_KER);
1755
1756 count = 0;
1757 while (1) {
1758 falcon_read(efx, &temp, GLB_CTL_REG_KER);
1759 if (!EFX_OWORD_FIELD(temp, RST_XGTX) &&
1760 !EFX_OWORD_FIELD(temp, RST_XGRX) &&
1761 !EFX_OWORD_FIELD(temp, RST_EM)) {
1762 EFX_LOG(efx, "Completed MAC reset after %d loops\n",
1763 count);
1764 break;
1765 }
1766 if (count > 20) {
1767 EFX_ERR(efx, "MAC reset failed\n");
1768 break;
1769 }
1770 count++;
1771 udelay(10);
1772 }
1773
1774 spin_unlock(&efx->stats_lock);
1775
1776 /* If we've reset the EM block and the link is up, then
1777 * we'll have to kick the XAUI link so the PHY can recover */
1778 if (efx->link_up && EFX_WORKAROUND_5147(efx))
1779 falcon_reset_xaui(efx);
1780}
1781
1782void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
1783{
1784 efx_oword_t temp;
1785
1786 if (FALCON_REV(efx) < FALCON_REV_B0)
1787 return;
1788
1789 /* Isolate the MAC -> RX */
1790 falcon_read(efx, &temp, RX_CFG_REG_KER);
1791 EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 0);
1792 falcon_write(efx, &temp, RX_CFG_REG_KER);
1793
1794 if (!efx->link_up)
1795 falcon_drain_tx_fifo(efx);
1796}
1797
1798void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
1799{
1800 efx_oword_t reg;
1801 int link_speed;
1802 unsigned int tx_fc;
1803
1804 if (efx->link_options & GM_LPA_10000)
1805 link_speed = 0x3;
1806 else if (efx->link_options & GM_LPA_1000)
1807 link_speed = 0x2;
1808 else if (efx->link_options & GM_LPA_100)
1809 link_speed = 0x1;
1810 else
1811 link_speed = 0x0;
1812 /* MAC_LINK_STATUS controls MAC backpressure but doesn't work
1813 * as advertised. Disable to ensure packets are not
1814 * indefinitely held and TX queue can be flushed at any point
1815 * while the link is down. */
1816 EFX_POPULATE_OWORD_5(reg,
1817 MAC_XOFF_VAL, 0xffff /* max pause time */,
1818 MAC_BCAD_ACPT, 1,
1819 MAC_UC_PROM, efx->promiscuous,
1820 MAC_LINK_STATUS, 1, /* always set */
1821 MAC_SPEED, link_speed);
1822 /* On B0, MAC backpressure can be disabled and packets get
1823 * discarded. */
1824 if (FALCON_REV(efx) >= FALCON_REV_B0) {
1825 EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0,
1826 !efx->link_up);
1827 }
1828
1829 falcon_write(efx, &reg, MAC0_CTRL_REG_KER);
1830
1831 /* Restore the multicast hash registers. */
1832 falcon_set_multicast_hash(efx);
1833
1834 /* Transmission of pause frames when RX crosses the threshold is
1835 * covered by RX_XOFF_MAC_EN and XM_TX_CFG_REG:XM_FCNTL.
1836 * Action on receipt of pause frames is controller by XM_DIS_FCNTL */
1837 tx_fc = (efx->flow_control & EFX_FC_TX) ? 1 : 0;
1838 falcon_read(efx, &reg, RX_CFG_REG_KER);
1839 EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc);
1840
1841 /* Unisolate the MAC -> RX */
1842 if (FALCON_REV(efx) >= FALCON_REV_B0)
1843 EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1);
1844 falcon_write(efx, &reg, RX_CFG_REG_KER);
1845}
1846
1847int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset)
1848{
1849 efx_oword_t reg;
1850 u32 *dma_done;
1851 int i;
1852
1853 if (disable_dma_stats)
1854 return 0;
1855
1856 /* Statistics fetch will fail if the MAC is in TX drain */
1857 if (FALCON_REV(efx) >= FALCON_REV_B0) {
1858 efx_oword_t temp;
1859 falcon_read(efx, &temp, MAC0_CTRL_REG_KER);
1860 if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0))
1861 return 0;
1862 }
1863
1864 dma_done = (efx->stats_buffer.addr + done_offset);
1865 *dma_done = FALCON_STATS_NOT_DONE;
1866 wmb(); /* ensure done flag is clear */
1867
1868 /* Initiate DMA transfer of stats */
1869 EFX_POPULATE_OWORD_2(reg,
1870 MAC_STAT_DMA_CMD, 1,
1871 MAC_STAT_DMA_ADR,
1872 efx->stats_buffer.dma_addr);
1873 falcon_write(efx, &reg, MAC0_STAT_DMA_REG_KER);
1874
1875 /* Wait for transfer to complete */
1876 for (i = 0; i < 400; i++) {
1877 if (*(volatile u32 *)dma_done == FALCON_STATS_DONE)
1878 return 0;
1879 udelay(10);
1880 }
1881
1882 EFX_ERR(efx, "timed out waiting for statistics\n");
1883 return -ETIMEDOUT;
1884}
1885
1886/**************************************************************************
1887 *
1888 * PHY access via GMII
1889 *
1890 **************************************************************************
1891 */
1892
1893/* Use the top bit of the MII PHY id to indicate the PHY type
1894 * (1G/10G), with the remaining bits as the actual PHY id.
1895 *
1896 * This allows us to avoid leaking information from the mii_if_info
1897 * structure into other data structures.
1898 */
1899#define FALCON_PHY_ID_ID_WIDTH EFX_WIDTH(MD_PRT_DEV_ADR)
1900#define FALCON_PHY_ID_ID_MASK ((1 << FALCON_PHY_ID_ID_WIDTH) - 1)
1901#define FALCON_PHY_ID_WIDTH (FALCON_PHY_ID_ID_WIDTH + 1)
1902#define FALCON_PHY_ID_MASK ((1 << FALCON_PHY_ID_WIDTH) - 1)
1903#define FALCON_PHY_ID_10G (1 << (FALCON_PHY_ID_WIDTH - 1))
1904
1905
1906/* Packing the clause 45 port and device fields into a single value */
1907#define MD_PRT_ADR_COMP_LBN (MD_PRT_ADR_LBN - MD_DEV_ADR_LBN)
1908#define MD_PRT_ADR_COMP_WIDTH MD_PRT_ADR_WIDTH
1909#define MD_DEV_ADR_COMP_LBN 0
1910#define MD_DEV_ADR_COMP_WIDTH MD_DEV_ADR_WIDTH
1911
1912
1913/* Wait for GMII access to complete */
1914static int falcon_gmii_wait(struct efx_nic *efx)
1915{
1916 efx_dword_t md_stat;
1917 int count;
1918
1919 for (count = 0; count < 1000; count++) { /* wait upto 10ms */
1920 falcon_readl(efx, &md_stat, MD_STAT_REG_KER);
1921 if (EFX_DWORD_FIELD(md_stat, MD_BSY) == 0) {
1922 if (EFX_DWORD_FIELD(md_stat, MD_LNFL) != 0 ||
1923 EFX_DWORD_FIELD(md_stat, MD_BSERR) != 0) {
1924 EFX_ERR(efx, "error from GMII access "
1925 EFX_DWORD_FMT"\n",
1926 EFX_DWORD_VAL(md_stat));
1927 return -EIO;
1928 }
1929 return 0;
1930 }
1931 udelay(10);
1932 }
1933 EFX_ERR(efx, "timed out waiting for GMII\n");
1934 return -ETIMEDOUT;
1935}
1936
1937/* Writes a GMII register of a PHY connected to Falcon using MDIO. */
1938static void falcon_mdio_write(struct net_device *net_dev, int phy_id,
1939 int addr, int value)
1940{
1941 struct efx_nic *efx = (struct efx_nic *)net_dev->priv;
1942 unsigned int phy_id2 = phy_id & FALCON_PHY_ID_ID_MASK;
1943 efx_oword_t reg;
1944
1945 /* The 'generic' prt/dev packing in mdio_10g.h is conveniently
1946 * chosen so that the only current user, Falcon, can take the
1947 * packed value and use them directly.
1948 * Fail to build if this assumption is broken.
1949 */
1950 BUILD_BUG_ON(FALCON_PHY_ID_10G != MDIO45_XPRT_ID_IS10G);
1951 BUILD_BUG_ON(FALCON_PHY_ID_ID_WIDTH != MDIO45_PRT_DEV_WIDTH);
1952 BUILD_BUG_ON(MD_PRT_ADR_COMP_LBN != MDIO45_PRT_ID_COMP_LBN);
1953 BUILD_BUG_ON(MD_DEV_ADR_COMP_LBN != MDIO45_DEV_ID_COMP_LBN);
1954
1955 if (phy_id2 == PHY_ADDR_INVALID)
1956 return;
1957
1958 /* See falcon_mdio_read for an explanation. */
1959 if (!(phy_id & FALCON_PHY_ID_10G)) {
1960 int mmd = ffs(efx->phy_op->mmds) - 1;
1961 EFX_TRACE(efx, "Fixing erroneous clause22 write\n");
1962 phy_id2 = mdio_clause45_pack(phy_id2, mmd)
1963 & FALCON_PHY_ID_ID_MASK;
1964 }
1965
1966 EFX_REGDUMP(efx, "writing GMII %d register %02x with %04x\n", phy_id,
1967 addr, value);
1968
1969 spin_lock_bh(&efx->phy_lock);
1970
1971 /* Check MII not currently being accessed */
1972 if (falcon_gmii_wait(efx) != 0)
1973 goto out;
1974
1975 /* Write the address/ID register */
1976 EFX_POPULATE_OWORD_1(reg, MD_PHY_ADR, addr);
1977 falcon_write(efx, &reg, MD_PHY_ADR_REG_KER);
1978
1979 EFX_POPULATE_OWORD_1(reg, MD_PRT_DEV_ADR, phy_id2);
1980 falcon_write(efx, &reg, MD_ID_REG_KER);
1981
1982 /* Write data */
1983 EFX_POPULATE_OWORD_1(reg, MD_TXD, value);
1984 falcon_write(efx, &reg, MD_TXD_REG_KER);
1985
1986 EFX_POPULATE_OWORD_2(reg,
1987 MD_WRC, 1,
1988 MD_GC, 0);
1989 falcon_write(efx, &reg, MD_CS_REG_KER);
1990
1991 /* Wait for data to be written */
1992 if (falcon_gmii_wait(efx) != 0) {
1993 /* Abort the write operation */
1994 EFX_POPULATE_OWORD_2(reg,
1995 MD_WRC, 0,
1996 MD_GC, 1);
1997 falcon_write(efx, &reg, MD_CS_REG_KER);
1998 udelay(10);
1999 }
2000
2001 out:
2002 spin_unlock_bh(&efx->phy_lock);
2003}
2004
2005/* Reads a GMII register from a PHY connected to Falcon. If no value
2006 * could be read, -1 will be returned. */
2007static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr)
2008{
2009 struct efx_nic *efx = (struct efx_nic *)net_dev->priv;
2010 unsigned int phy_addr = phy_id & FALCON_PHY_ID_ID_MASK;
2011 efx_oword_t reg;
2012 int value = -1;
2013
2014 if (phy_addr == PHY_ADDR_INVALID)
2015 return -1;
2016
2017 /* Our PHY code knows whether it needs to talk clause 22(1G) or 45(10G)
2018 * but the generic Linux code does not make any distinction or have
2019 * any state for this.
2020 * We spot the case where someone tried to talk 22 to a 45 PHY and
2021 * redirect the request to the lowest numbered MMD as a clause45
2022 * request. This is enough to allow simple queries like id and link
2023 * state to succeed. TODO: We may need to do more in future.
2024 */
2025 if (!(phy_id & FALCON_PHY_ID_10G)) {
2026 int mmd = ffs(efx->phy_op->mmds) - 1;
2027 EFX_TRACE(efx, "Fixing erroneous clause22 read\n");
2028 phy_addr = mdio_clause45_pack(phy_addr, mmd)
2029 & FALCON_PHY_ID_ID_MASK;
2030 }
2031
2032 spin_lock_bh(&efx->phy_lock);
2033
2034 /* Check MII not currently being accessed */
2035 if (falcon_gmii_wait(efx) != 0)
2036 goto out;
2037
2038 EFX_POPULATE_OWORD_1(reg, MD_PHY_ADR, addr);
2039 falcon_write(efx, &reg, MD_PHY_ADR_REG_KER);
2040
2041 EFX_POPULATE_OWORD_1(reg, MD_PRT_DEV_ADR, phy_addr);
2042 falcon_write(efx, &reg, MD_ID_REG_KER);
2043
2044 /* Request data to be read */
2045 EFX_POPULATE_OWORD_2(reg, MD_RDC, 1, MD_GC, 0);
2046 falcon_write(efx, &reg, MD_CS_REG_KER);
2047
2048 /* Wait for data to become available */
2049 value = falcon_gmii_wait(efx);
2050 if (value == 0) {
2051 falcon_read(efx, &reg, MD_RXD_REG_KER);
2052 value = EFX_OWORD_FIELD(reg, MD_RXD);
2053 EFX_REGDUMP(efx, "read from GMII %d register %02x, got %04x\n",
2054 phy_id, addr, value);
2055 } else {
2056 /* Abort the read operation */
2057 EFX_POPULATE_OWORD_2(reg,
2058 MD_RIC, 0,
2059 MD_GC, 1);
2060 falcon_write(efx, &reg, MD_CS_REG_KER);
2061
2062 EFX_LOG(efx, "read from GMII 0x%x register %02x, got "
2063 "error %d\n", phy_id, addr, value);
2064 }
2065
2066 out:
2067 spin_unlock_bh(&efx->phy_lock);
2068
2069 return value;
2070}
2071
2072static void falcon_init_mdio(struct mii_if_info *gmii)
2073{
2074 gmii->mdio_read = falcon_mdio_read;
2075 gmii->mdio_write = falcon_mdio_write;
2076 gmii->phy_id_mask = FALCON_PHY_ID_MASK;
2077 gmii->reg_num_mask = ((1 << EFX_WIDTH(MD_PHY_ADR)) - 1);
2078}
2079
2080static int falcon_probe_phy(struct efx_nic *efx)
2081{
2082 switch (efx->phy_type) {
2083 case PHY_TYPE_10XPRESS:
2084 efx->phy_op = &falcon_tenxpress_phy_ops;
2085 break;
2086 case PHY_TYPE_XFP:
2087 efx->phy_op = &falcon_xfp_phy_ops;
2088 break;
2089 default:
2090 EFX_ERR(efx, "Unknown PHY type %d\n",
2091 efx->phy_type);
2092 return -1;
2093 }
2094 return 0;
2095}
2096
2097/* This call is responsible for hooking in the MAC and PHY operations */
2098int falcon_probe_port(struct efx_nic *efx)
2099{
2100 int rc;
2101
2102 /* Hook in PHY operations table */
2103 rc = falcon_probe_phy(efx);
2104 if (rc)
2105 return rc;
2106
2107 /* Set up GMII structure for PHY */
2108 efx->mii.supports_gmii = 1;
2109 falcon_init_mdio(&efx->mii);
2110
2111 /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
2112 if (FALCON_REV(efx) >= FALCON_REV_B0)
2113 efx->flow_control = EFX_FC_RX | EFX_FC_TX;
2114 else
2115 efx->flow_control = EFX_FC_RX;
2116
2117 /* Allocate buffer for stats */
2118 rc = falcon_alloc_buffer(efx, &efx->stats_buffer,
2119 FALCON_MAC_STATS_SIZE);
2120 if (rc)
2121 return rc;
2122 EFX_LOG(efx, "stats buffer at %llx (virt %p phys %lx)\n",
2123 (unsigned long long)efx->stats_buffer.dma_addr,
2124 efx->stats_buffer.addr,
2125 virt_to_phys(efx->stats_buffer.addr));
2126
2127 return 0;
2128}
2129
2130void falcon_remove_port(struct efx_nic *efx)
2131{
2132 falcon_free_buffer(efx, &efx->stats_buffer);
2133}
2134
2135/**************************************************************************
2136 *
2137 * Multicast filtering
2138 *
2139 **************************************************************************
2140 */
2141
2142void falcon_set_multicast_hash(struct efx_nic *efx)
2143{
2144 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
2145
2146 /* Broadcast packets go through the multicast hash filter.
2147 * ether_crc_le() of the broadcast address is 0xbe2612ff
2148 * so we always add bit 0xff to the mask.
2149 */
2150 set_bit_le(0xff, mc_hash->byte);
2151
2152 falcon_write(efx, &mc_hash->oword[0], MAC_MCAST_HASH_REG0_KER);
2153 falcon_write(efx, &mc_hash->oword[1], MAC_MCAST_HASH_REG1_KER);
2154}
2155
2156/**************************************************************************
2157 *
2158 * Device reset
2159 *
2160 **************************************************************************
2161 */
2162
2163/* Resets NIC to known state. This routine must be called in process
2164 * context and is allowed to sleep. */
2165int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
2166{
2167 struct falcon_nic_data *nic_data = efx->nic_data;
2168 efx_oword_t glb_ctl_reg_ker;
2169 int rc;
2170
2171 EFX_LOG(efx, "performing hardware reset (%d)\n", method);
2172
2173 /* Initiate device reset */
2174 if (method == RESET_TYPE_WORLD) {
2175 rc = pci_save_state(efx->pci_dev);
2176 if (rc) {
2177 EFX_ERR(efx, "failed to backup PCI state of primary "
2178 "function prior to hardware reset\n");
2179 goto fail1;
2180 }
2181 if (FALCON_IS_DUAL_FUNC(efx)) {
2182 rc = pci_save_state(nic_data->pci_dev2);
2183 if (rc) {
2184 EFX_ERR(efx, "failed to backup PCI state of "
2185 "secondary function prior to "
2186 "hardware reset\n");
2187 goto fail2;
2188 }
2189 }
2190
2191 EFX_POPULATE_OWORD_2(glb_ctl_reg_ker,
2192 EXT_PHY_RST_DUR, 0x7,
2193 SWRST, 1);
2194 } else {
2195 int reset_phy = (method == RESET_TYPE_INVISIBLE ?
2196 EXCLUDE_FROM_RESET : 0);
2197
2198 EFX_POPULATE_OWORD_7(glb_ctl_reg_ker,
2199 EXT_PHY_RST_CTL, reset_phy,
2200 PCIE_CORE_RST_CTL, EXCLUDE_FROM_RESET,
2201 PCIE_NSTCK_RST_CTL, EXCLUDE_FROM_RESET,
2202 PCIE_SD_RST_CTL, EXCLUDE_FROM_RESET,
2203 EE_RST_CTL, EXCLUDE_FROM_RESET,
2204 EXT_PHY_RST_DUR, 0x7 /* 10ms */,
2205 SWRST, 1);
2206 }
2207 falcon_write(efx, &glb_ctl_reg_ker, GLB_CTL_REG_KER);
2208
2209 EFX_LOG(efx, "waiting for hardware reset\n");
2210 schedule_timeout_uninterruptible(HZ / 20);
2211
2212 /* Restore PCI configuration if needed */
2213 if (method == RESET_TYPE_WORLD) {
2214 if (FALCON_IS_DUAL_FUNC(efx)) {
2215 rc = pci_restore_state(nic_data->pci_dev2);
2216 if (rc) {
2217 EFX_ERR(efx, "failed to restore PCI config for "
2218 "the secondary function\n");
2219 goto fail3;
2220 }
2221 }
2222 rc = pci_restore_state(efx->pci_dev);
2223 if (rc) {
2224 EFX_ERR(efx, "failed to restore PCI config for the "
2225 "primary function\n");
2226 goto fail4;
2227 }
2228 EFX_LOG(efx, "successfully restored PCI config\n");
2229 }
2230
2231 /* Assert that reset complete */
2232 falcon_read(efx, &glb_ctl_reg_ker, GLB_CTL_REG_KER);
2233 if (EFX_OWORD_FIELD(glb_ctl_reg_ker, SWRST) != 0) {
2234 rc = -ETIMEDOUT;
2235 EFX_ERR(efx, "timed out waiting for hardware reset\n");
2236 goto fail5;
2237 }
2238 EFX_LOG(efx, "hardware reset complete\n");
2239
2240 return 0;
2241
2242 /* pci_save_state() and pci_restore_state() MUST be called in pairs */
2243fail2:
2244fail3:
2245 pci_restore_state(efx->pci_dev);
2246fail1:
2247fail4:
2248fail5:
2249 return rc;
2250}
2251
2252/* Zeroes out the SRAM contents. This routine must be called in
2253 * process context and is allowed to sleep.
2254 */
2255static int falcon_reset_sram(struct efx_nic *efx)
2256{
2257 efx_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker;
2258 int count;
2259
2260 /* Set the SRAM wake/sleep GPIO appropriately. */
2261 falcon_read(efx, &gpio_cfg_reg_ker, GPIO_CTL_REG_KER);
2262 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, GPIO1_OEN, 1);
2263 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, GPIO1_OUT, 1);
2264 falcon_write(efx, &gpio_cfg_reg_ker, GPIO_CTL_REG_KER);
2265
2266 /* Initiate SRAM reset */
2267 EFX_POPULATE_OWORD_2(srm_cfg_reg_ker,
2268 SRAM_OOB_BT_INIT_EN, 1,
2269 SRM_NUM_BANKS_AND_BANK_SIZE, 0);
2270 falcon_write(efx, &srm_cfg_reg_ker, SRM_CFG_REG_KER);
2271
2272 /* Wait for SRAM reset to complete */
2273 count = 0;
2274 do {
2275 EFX_LOG(efx, "waiting for SRAM reset (attempt %d)...\n", count);
2276
2277 /* SRAM reset is slow; expect around 16ms */
2278 schedule_timeout_uninterruptible(HZ / 50);
2279
2280 /* Check for reset complete */
2281 falcon_read(efx, &srm_cfg_reg_ker, SRM_CFG_REG_KER);
2282 if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, SRAM_OOB_BT_INIT_EN)) {
2283 EFX_LOG(efx, "SRAM reset complete\n");
2284
2285 return 0;
2286 }
2287 } while (++count < 20); /* wait upto 0.4 sec */
2288
2289 EFX_ERR(efx, "timed out waiting for SRAM reset\n");
2290 return -ETIMEDOUT;
2291}
2292
2293/* Extract non-volatile configuration */
2294static int falcon_probe_nvconfig(struct efx_nic *efx)
2295{
2296 struct falcon_nvconfig *nvconfig;
2297 efx_oword_t nic_stat;
2298 int device_id;
2299 unsigned addr_len;
2300 size_t offset, len;
2301 int magic_num, struct_ver, board_rev;
2302 int rc;
2303
2304 /* Find the boot device. */
2305 falcon_read(efx, &nic_stat, NIC_STAT_REG);
2306 if (EFX_OWORD_FIELD(nic_stat, SF_PRST)) {
2307 device_id = EE_SPI_FLASH;
2308 addr_len = 3;
2309 } else if (EFX_OWORD_FIELD(nic_stat, EE_PRST)) {
2310 device_id = EE_SPI_EEPROM;
2311 addr_len = 2;
2312 } else {
2313 return -ENODEV;
2314 }
2315
2316 nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL);
2317
2318 /* Read the whole configuration structure into memory. */
2319 for (offset = 0; offset < sizeof(*nvconfig); offset += len) {
2320 len = min(sizeof(*nvconfig) - offset,
2321 (size_t) FALCON_SPI_MAX_LEN);
2322 rc = falcon_spi_read(efx, device_id, SPI_READ,
2323 NVCONFIG_BASE + offset, addr_len,
2324 (char *)nvconfig + offset, len);
2325 if (rc)
2326 goto out;
2327 }
2328
2329 /* Read the MAC addresses */
2330 memcpy(efx->mac_address, nvconfig->mac_address[0], ETH_ALEN);
2331
2332 /* Read the board configuration. */
2333 magic_num = le16_to_cpu(nvconfig->board_magic_num);
2334 struct_ver = le16_to_cpu(nvconfig->board_struct_ver);
2335
2336 if (magic_num != NVCONFIG_BOARD_MAGIC_NUM || struct_ver < 2) {
2337 EFX_ERR(efx, "Non volatile memory bad magic=%x ver=%x "
2338 "therefore using defaults\n", magic_num, struct_ver);
2339 efx->phy_type = PHY_TYPE_NONE;
2340 efx->mii.phy_id = PHY_ADDR_INVALID;
2341 board_rev = 0;
2342 } else {
2343 struct falcon_nvconfig_board_v2 *v2 = &nvconfig->board_v2;
2344
2345 efx->phy_type = v2->port0_phy_type;
2346 efx->mii.phy_id = v2->port0_phy_addr;
2347 board_rev = le16_to_cpu(v2->board_revision);
2348 }
2349
2350 EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mii.phy_id);
2351
2352 efx_set_board_info(efx, board_rev);
2353
2354 out:
2355 kfree(nvconfig);
2356 return rc;
2357}
2358
2359/* Probe the NIC variant (revision, ASIC vs FPGA, function count, port
2360 * count, port speed). Set workaround and feature flags accordingly.
2361 */
2362static int falcon_probe_nic_variant(struct efx_nic *efx)
2363{
2364 efx_oword_t altera_build;
2365
2366 falcon_read(efx, &altera_build, ALTERA_BUILD_REG_KER);
2367 if (EFX_OWORD_FIELD(altera_build, VER_ALL)) {
2368 EFX_ERR(efx, "Falcon FPGA not supported\n");
2369 return -ENODEV;
2370 }
2371
2372 switch (FALCON_REV(efx)) {
2373 case FALCON_REV_A0:
2374 case 0xff:
2375 EFX_ERR(efx, "Falcon rev A0 not supported\n");
2376 return -ENODEV;
2377
2378 case FALCON_REV_A1:{
2379 efx_oword_t nic_stat;
2380
2381 falcon_read(efx, &nic_stat, NIC_STAT_REG);
2382
2383 if (EFX_OWORD_FIELD(nic_stat, STRAP_PCIE) == 0) {
2384 EFX_ERR(efx, "Falcon rev A1 PCI-X not supported\n");
2385 return -ENODEV;
2386 }
2387 if (!EFX_OWORD_FIELD(nic_stat, STRAP_10G)) {
2388 EFX_ERR(efx, "1G mode not supported\n");
2389 return -ENODEV;
2390 }
2391 break;
2392 }
2393
2394 case FALCON_REV_B0:
2395 break;
2396
2397 default:
2398 EFX_ERR(efx, "Unknown Falcon rev %d\n", FALCON_REV(efx));
2399 return -ENODEV;
2400 }
2401
2402 return 0;
2403}
2404
2405int falcon_probe_nic(struct efx_nic *efx)
2406{
2407 struct falcon_nic_data *nic_data;
2408 int rc;
2409
2410 /* Initialise I2C interface state */
2411 efx->i2c.efx = efx;
2412 efx->i2c.op = &falcon_i2c_bit_operations;
2413 efx->i2c.sda = 1;
2414 efx->i2c.scl = 1;
2415
2416 /* Allocate storage for hardware specific data */
2417 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
2418 efx->nic_data = (void *) nic_data;
2419
2420 /* Determine number of ports etc. */
2421 rc = falcon_probe_nic_variant(efx);
2422 if (rc)
2423 goto fail1;
2424
2425 /* Probe secondary function if expected */
2426 if (FALCON_IS_DUAL_FUNC(efx)) {
2427 struct pci_dev *dev = pci_dev_get(efx->pci_dev);
2428
2429 while ((dev = pci_get_device(EFX_VENDID_SFC, FALCON_A_S_DEVID,
2430 dev))) {
2431 if (dev->bus == efx->pci_dev->bus &&
2432 dev->devfn == efx->pci_dev->devfn + 1) {
2433 nic_data->pci_dev2 = dev;
2434 break;
2435 }
2436 }
2437 if (!nic_data->pci_dev2) {
2438 EFX_ERR(efx, "failed to find secondary function\n");
2439 rc = -ENODEV;
2440 goto fail2;
2441 }
2442 }
2443
2444 /* Now we can reset the NIC */
2445 rc = falcon_reset_hw(efx, RESET_TYPE_ALL);
2446 if (rc) {
2447 EFX_ERR(efx, "failed to reset NIC\n");
2448 goto fail3;
2449 }
2450
2451 /* Allocate memory for INT_KER */
2452 rc = falcon_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t));
2453 if (rc)
2454 goto fail4;
2455 BUG_ON(efx->irq_status.dma_addr & 0x0f);
2456
2457 EFX_LOG(efx, "INT_KER at %llx (virt %p phys %lx)\n",
2458 (unsigned long long)efx->irq_status.dma_addr,
2459 efx->irq_status.addr, virt_to_phys(efx->irq_status.addr));
2460
2461 /* Read in the non-volatile configuration */
2462 rc = falcon_probe_nvconfig(efx);
2463 if (rc)
2464 goto fail5;
2465
2466 return 0;
2467
2468 fail5:
2469 falcon_free_buffer(efx, &efx->irq_status);
2470 fail4:
2471 /* fall-thru */
2472 fail3:
2473 if (nic_data->pci_dev2) {
2474 pci_dev_put(nic_data->pci_dev2);
2475 nic_data->pci_dev2 = NULL;
2476 }
2477 fail2:
2478 /* fall-thru */
2479 fail1:
2480 kfree(efx->nic_data);
2481 return rc;
2482}
2483
2484/* This call performs hardware-specific global initialisation, such as
2485 * defining the descriptor cache sizes and number of RSS channels.
2486 * It does not set up any buffers, descriptor rings or event queues.
2487 */
2488int falcon_init_nic(struct efx_nic *efx)
2489{
2490 struct falcon_nic_data *data;
2491 efx_oword_t temp;
2492 unsigned thresh;
2493 int rc;
2494
2495 data = (struct falcon_nic_data *)efx->nic_data;
2496
2497 /* Set up the address region register. This is only needed
2498 * for the B0 FPGA, but since we are just pushing in the
2499 * reset defaults this may as well be unconditional. */
2500 EFX_POPULATE_OWORD_4(temp, ADR_REGION0, 0,
2501 ADR_REGION1, (1 << 16),
2502 ADR_REGION2, (2 << 16),
2503 ADR_REGION3, (3 << 16));
2504 falcon_write(efx, &temp, ADR_REGION_REG_KER);
2505
2506 /* Use on-chip SRAM */
2507 falcon_read(efx, &temp, NIC_STAT_REG);
2508 EFX_SET_OWORD_FIELD(temp, ONCHIP_SRAM, 1);
2509 falcon_write(efx, &temp, NIC_STAT_REG);
2510
2511 /* Set buffer table mode */
2512 EFX_POPULATE_OWORD_1(temp, BUF_TBL_MODE, BUF_TBL_MODE_FULL);
2513 falcon_write(efx, &temp, BUF_TBL_CFG_REG_KER);
2514
2515 rc = falcon_reset_sram(efx);
2516 if (rc)
2517 return rc;
2518
2519 /* Set positions of descriptor caches in SRAM. */
2520 EFX_POPULATE_OWORD_1(temp, SRM_TX_DC_BASE_ADR, TX_DC_BASE / 8);
2521 falcon_write(efx, &temp, SRM_TX_DC_CFG_REG_KER);
2522 EFX_POPULATE_OWORD_1(temp, SRM_RX_DC_BASE_ADR, RX_DC_BASE / 8);
2523 falcon_write(efx, &temp, SRM_RX_DC_CFG_REG_KER);
2524
2525 /* Set TX descriptor cache size. */
2526 BUILD_BUG_ON(TX_DC_ENTRIES != (16 << TX_DC_ENTRIES_ORDER));
2527 EFX_POPULATE_OWORD_1(temp, TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
2528 falcon_write(efx, &temp, TX_DC_CFG_REG_KER);
2529
2530 /* Set RX descriptor cache size. Set low watermark to size-8, as
2531 * this allows most efficient prefetching.
2532 */
2533 BUILD_BUG_ON(RX_DC_ENTRIES != (16 << RX_DC_ENTRIES_ORDER));
2534 EFX_POPULATE_OWORD_1(temp, RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
2535 falcon_write(efx, &temp, RX_DC_CFG_REG_KER);
2536 EFX_POPULATE_OWORD_1(temp, RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
2537 falcon_write(efx, &temp, RX_DC_PF_WM_REG_KER);
2538
2539 /* Clear the parity enables on the TX data fifos as
2540 * they produce false parity errors because of timing issues
2541 */
2542 if (EFX_WORKAROUND_5129(efx)) {
2543 falcon_read(efx, &temp, SPARE_REG_KER);
2544 EFX_SET_OWORD_FIELD(temp, MEM_PERR_EN_TX_DATA, 0);
2545 falcon_write(efx, &temp, SPARE_REG_KER);
2546 }
2547
2548 /* Enable all the genuinely fatal interrupts. (They are still
2549 * masked by the overall interrupt mask, controlled by
2550 * falcon_interrupts()).
2551 *
2552 * Note: All other fatal interrupts are enabled
2553 */
2554 EFX_POPULATE_OWORD_3(temp,
2555 ILL_ADR_INT_KER_EN, 1,
2556 RBUF_OWN_INT_KER_EN, 1,
2557 TBUF_OWN_INT_KER_EN, 1);
2558 EFX_INVERT_OWORD(temp);
2559 falcon_write(efx, &temp, FATAL_INTR_REG_KER);
2560
2561 /* Set number of RSS queues for receive path. */
2562 falcon_read(efx, &temp, RX_FILTER_CTL_REG);
2563 if (FALCON_REV(efx) >= FALCON_REV_B0)
2564 EFX_SET_OWORD_FIELD(temp, NUM_KER, 0);
2565 else
2566 EFX_SET_OWORD_FIELD(temp, NUM_KER, efx->rss_queues - 1);
2567 if (EFX_WORKAROUND_7244(efx)) {
2568 EFX_SET_OWORD_FIELD(temp, UDP_FULL_SRCH_LIMIT, 8);
2569 EFX_SET_OWORD_FIELD(temp, UDP_WILD_SRCH_LIMIT, 8);
2570 EFX_SET_OWORD_FIELD(temp, TCP_FULL_SRCH_LIMIT, 8);
2571 EFX_SET_OWORD_FIELD(temp, TCP_WILD_SRCH_LIMIT, 8);
2572 }
2573 falcon_write(efx, &temp, RX_FILTER_CTL_REG);
2574
2575 falcon_setup_rss_indir_table(efx);
2576
2577 /* Setup RX. Wait for descriptor is broken and must
2578 * be disabled. RXDP recovery shouldn't be needed, but is.
2579 */
2580 falcon_read(efx, &temp, RX_SELF_RST_REG_KER);
2581 EFX_SET_OWORD_FIELD(temp, RX_NODESC_WAIT_DIS, 1);
2582 EFX_SET_OWORD_FIELD(temp, RX_RECOVERY_EN, 1);
2583 if (EFX_WORKAROUND_5583(efx))
2584 EFX_SET_OWORD_FIELD(temp, RX_ISCSI_DIS, 1);
2585 falcon_write(efx, &temp, RX_SELF_RST_REG_KER);
2586
2587 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
2588 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
2589 */
2590 falcon_read(efx, &temp, TX_CFG2_REG_KER);
2591 EFX_SET_OWORD_FIELD(temp, TX_RX_SPACER, 0xfe);
2592 EFX_SET_OWORD_FIELD(temp, TX_RX_SPACER_EN, 1);
2593 EFX_SET_OWORD_FIELD(temp, TX_ONE_PKT_PER_Q, 1);
2594 EFX_SET_OWORD_FIELD(temp, TX_CSR_PUSH_EN, 0);
2595 EFX_SET_OWORD_FIELD(temp, TX_DIS_NON_IP_EV, 1);
2596 /* Enable SW_EV to inherit in char driver - assume harmless here */
2597 EFX_SET_OWORD_FIELD(temp, TX_SW_EV_EN, 1);
2598 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
2599 EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2);
2600 /* Squash TX of packets of 16 bytes or less */
2601 if (FALCON_REV(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx))
2602 EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1);
2603 falcon_write(efx, &temp, TX_CFG2_REG_KER);
2604
2605 /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
2606 * descriptors (which is bad).
2607 */
2608 falcon_read(efx, &temp, TX_CFG_REG_KER);
2609 EFX_SET_OWORD_FIELD(temp, TX_NO_EOP_DISC_EN, 0);
2610 falcon_write(efx, &temp, TX_CFG_REG_KER);
2611
2612 /* RX config */
2613 falcon_read(efx, &temp, RX_CFG_REG_KER);
2614 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_DESC_PUSH_EN, 0);
2615 if (EFX_WORKAROUND_7575(efx))
2616 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE,
2617 (3 * 4096) / 32);
2618 if (FALCON_REV(efx) >= FALCON_REV_B0)
2619 EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1);
2620
2621 /* RX FIFO flow control thresholds */
2622 thresh = ((rx_xon_thresh_bytes >= 0) ?
2623 rx_xon_thresh_bytes : efx->type->rx_xon_thresh);
2624 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XON_MAC_TH, thresh / 256);
2625 thresh = ((rx_xoff_thresh_bytes >= 0) ?
2626 rx_xoff_thresh_bytes : efx->type->rx_xoff_thresh);
2627 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_MAC_TH, thresh / 256);
2628 /* RX control FIFO thresholds [32 entries] */
2629 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XON_TX_TH, 25);
2630 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_TX_TH, 20);
2631 falcon_write(efx, &temp, RX_CFG_REG_KER);
2632
2633 /* Set destination of both TX and RX Flush events */
2634 if (FALCON_REV(efx) >= FALCON_REV_B0) {
2635 EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0);
2636 falcon_write(efx, &temp, DP_CTRL_REG);
2637 }
2638
2639 return 0;
2640}
2641
2642void falcon_remove_nic(struct efx_nic *efx)
2643{
2644 struct falcon_nic_data *nic_data = efx->nic_data;
2645
2646 falcon_free_buffer(efx, &efx->irq_status);
2647
2648 (void) falcon_reset_hw(efx, RESET_TYPE_ALL);
2649
2650 /* Release the second function after the reset */
2651 if (nic_data->pci_dev2) {
2652 pci_dev_put(nic_data->pci_dev2);
2653 nic_data->pci_dev2 = NULL;
2654 }
2655
2656 /* Tear down the private nic state */
2657 kfree(efx->nic_data);
2658 efx->nic_data = NULL;
2659}
2660
2661void falcon_update_nic_stats(struct efx_nic *efx)
2662{
2663 efx_oword_t cnt;
2664
2665 falcon_read(efx, &cnt, RX_NODESC_DROP_REG_KER);
2666 efx->n_rx_nodesc_drop_cnt += EFX_OWORD_FIELD(cnt, RX_NODESC_DROP_CNT);
2667}
2668
2669/**************************************************************************
2670 *
2671 * Revision-dependent attributes used by efx.c
2672 *
2673 **************************************************************************
2674 */
2675
2676struct efx_nic_type falcon_a_nic_type = {
2677 .mem_bar = 2,
2678 .mem_map_size = 0x20000,
2679 .txd_ptr_tbl_base = TX_DESC_PTR_TBL_KER_A1,
2680 .rxd_ptr_tbl_base = RX_DESC_PTR_TBL_KER_A1,
2681 .buf_tbl_base = BUF_TBL_KER_A1,
2682 .evq_ptr_tbl_base = EVQ_PTR_TBL_KER_A1,
2683 .evq_rptr_tbl_base = EVQ_RPTR_REG_KER_A1,
2684 .txd_ring_mask = FALCON_TXD_RING_MASK,
2685 .rxd_ring_mask = FALCON_RXD_RING_MASK,
2686 .evq_size = FALCON_EVQ_SIZE,
2687 .max_dma_mask = FALCON_DMA_MASK,
2688 .tx_dma_mask = FALCON_TX_DMA_MASK,
2689 .bug5391_mask = 0xf,
2690 .rx_xoff_thresh = 2048,
2691 .rx_xon_thresh = 512,
2692 .rx_buffer_padding = 0x24,
2693 .max_interrupt_mode = EFX_INT_MODE_MSI,
2694 .phys_addr_channels = 4,
2695};
2696
2697struct efx_nic_type falcon_b_nic_type = {
2698 .mem_bar = 2,
2699 /* Map everything up to and including the RSS indirection
2700 * table. Don't map MSI-X table, MSI-X PBA since Linux
2701 * requires that they not be mapped. */
2702 .mem_map_size = RX_RSS_INDIR_TBL_B0 + 0x800,
2703 .txd_ptr_tbl_base = TX_DESC_PTR_TBL_KER_B0,
2704 .rxd_ptr_tbl_base = RX_DESC_PTR_TBL_KER_B0,
2705 .buf_tbl_base = BUF_TBL_KER_B0,
2706 .evq_ptr_tbl_base = EVQ_PTR_TBL_KER_B0,
2707 .evq_rptr_tbl_base = EVQ_RPTR_REG_KER_B0,
2708 .txd_ring_mask = FALCON_TXD_RING_MASK,
2709 .rxd_ring_mask = FALCON_RXD_RING_MASK,
2710 .evq_size = FALCON_EVQ_SIZE,
2711 .max_dma_mask = FALCON_DMA_MASK,
2712 .tx_dma_mask = FALCON_TX_DMA_MASK,
2713 .bug5391_mask = 0,
2714 .rx_xoff_thresh = 54272, /* ~80Kb - 3*max MTU */
2715 .rx_xon_thresh = 27648, /* ~3*max MTU */
2716 .rx_buffer_padding = 0,
2717 .max_interrupt_mode = EFX_INT_MODE_MSIX,
2718 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
2719 * interrupt handler only supports 32
2720 * channels */
2721};
2722
diff --git a/drivers/net/sfc/falcon.h b/drivers/net/sfc/falcon.h
new file mode 100644
index 000000000000..6117403b0c03
--- /dev/null
+++ b/drivers/net/sfc/falcon.h
@@ -0,0 +1,130 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_FALCON_H
12#define EFX_FALCON_H
13
14#include "net_driver.h"
15
16/*
17 * Falcon hardware control
18 */
19
20enum falcon_revision {
21 FALCON_REV_A0 = 0,
22 FALCON_REV_A1 = 1,
23 FALCON_REV_B0 = 2,
24};
25
26#define FALCON_REV(efx) ((efx)->pci_dev->revision)
27
28extern struct efx_nic_type falcon_a_nic_type;
29extern struct efx_nic_type falcon_b_nic_type;
30
31/**************************************************************************
32 *
33 * Externs
34 *
35 **************************************************************************
36 */
37
38/* TX data path */
39extern int falcon_probe_tx(struct efx_tx_queue *tx_queue);
40extern int falcon_init_tx(struct efx_tx_queue *tx_queue);
41extern void falcon_fini_tx(struct efx_tx_queue *tx_queue);
42extern void falcon_remove_tx(struct efx_tx_queue *tx_queue);
43extern void falcon_push_buffers(struct efx_tx_queue *tx_queue);
44
45/* RX data path */
46extern int falcon_probe_rx(struct efx_rx_queue *rx_queue);
47extern int falcon_init_rx(struct efx_rx_queue *rx_queue);
48extern void falcon_fini_rx(struct efx_rx_queue *rx_queue);
49extern void falcon_remove_rx(struct efx_rx_queue *rx_queue);
50extern void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue);
51
52/* Event data path */
53extern int falcon_probe_eventq(struct efx_channel *channel);
54extern int falcon_init_eventq(struct efx_channel *channel);
55extern void falcon_fini_eventq(struct efx_channel *channel);
56extern void falcon_remove_eventq(struct efx_channel *channel);
57extern int falcon_process_eventq(struct efx_channel *channel, int *rx_quota);
58extern void falcon_eventq_read_ack(struct efx_channel *channel);
59
60/* Ports */
61extern int falcon_probe_port(struct efx_nic *efx);
62extern void falcon_remove_port(struct efx_nic *efx);
63
64/* MAC/PHY */
65extern int falcon_xaui_link_ok(struct efx_nic *efx);
66extern int falcon_dma_stats(struct efx_nic *efx,
67 unsigned int done_offset);
68extern void falcon_drain_tx_fifo(struct efx_nic *efx);
69extern void falcon_deconfigure_mac_wrapper(struct efx_nic *efx);
70extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx);
71
72/* Interrupts and test events */
73extern int falcon_init_interrupt(struct efx_nic *efx);
74extern void falcon_enable_interrupts(struct efx_nic *efx);
75extern void falcon_generate_test_event(struct efx_channel *channel,
76 unsigned int magic);
77extern void falcon_generate_interrupt(struct efx_nic *efx);
78extern void falcon_set_int_moderation(struct efx_channel *channel);
79extern void falcon_disable_interrupts(struct efx_nic *efx);
80extern void falcon_fini_interrupt(struct efx_nic *efx);
81
82/* Global Resources */
83extern int falcon_probe_nic(struct efx_nic *efx);
84extern int falcon_probe_resources(struct efx_nic *efx);
85extern int falcon_init_nic(struct efx_nic *efx);
86extern int falcon_reset_hw(struct efx_nic *efx, enum reset_type method);
87extern void falcon_remove_resources(struct efx_nic *efx);
88extern void falcon_remove_nic(struct efx_nic *efx);
89extern void falcon_update_nic_stats(struct efx_nic *efx);
90extern void falcon_set_multicast_hash(struct efx_nic *efx);
91extern int falcon_reset_xaui(struct efx_nic *efx);
92
93/**************************************************************************
94 *
95 * Falcon MAC stats
96 *
97 **************************************************************************
98 */
99
100#define FALCON_STAT_OFFSET(falcon_stat) EFX_VAL(falcon_stat, offset)
101#define FALCON_STAT_WIDTH(falcon_stat) EFX_VAL(falcon_stat, WIDTH)
102
103/* Retrieve statistic from statistics block */
104#define FALCON_STAT(efx, falcon_stat, efx_stat) do { \
105 if (FALCON_STAT_WIDTH(falcon_stat) == 16) \
106 (efx)->mac_stats.efx_stat += le16_to_cpu( \
107 *((__force __le16 *) \
108 (efx->stats_buffer.addr + \
109 FALCON_STAT_OFFSET(falcon_stat)))); \
110 else if (FALCON_STAT_WIDTH(falcon_stat) == 32) \
111 (efx)->mac_stats.efx_stat += le32_to_cpu( \
112 *((__force __le32 *) \
113 (efx->stats_buffer.addr + \
114 FALCON_STAT_OFFSET(falcon_stat)))); \
115 else \
116 (efx)->mac_stats.efx_stat += le64_to_cpu( \
117 *((__force __le64 *) \
118 (efx->stats_buffer.addr + \
119 FALCON_STAT_OFFSET(falcon_stat)))); \
120 } while (0)
121
122#define FALCON_MAC_STATS_SIZE 0x100
123
124#define MAC_DATA_LBN 0
125#define MAC_DATA_WIDTH 32
126
127extern void falcon_generate_event(struct efx_channel *channel,
128 efx_qword_t *event);
129
130#endif /* EFX_FALCON_H */
diff --git a/drivers/net/sfc/falcon_hwdefs.h b/drivers/net/sfc/falcon_hwdefs.h
new file mode 100644
index 000000000000..0485a63eaff6
--- /dev/null
+++ b/drivers/net/sfc/falcon_hwdefs.h
@@ -0,0 +1,1135 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_FALCON_HWDEFS_H
12#define EFX_FALCON_HWDEFS_H
13
14/*
15 * Falcon hardware value definitions.
16 * Falcon is the internal codename for the SFC4000 controller that is
17 * present in SFE400X evaluation boards
18 */
19
20/**************************************************************************
21 *
22 * Falcon registers
23 *
24 **************************************************************************
25 */
26
27/* Address region register */
28#define ADR_REGION_REG_KER 0x00
29#define ADR_REGION0_LBN 0
30#define ADR_REGION0_WIDTH 18
31#define ADR_REGION1_LBN 32
32#define ADR_REGION1_WIDTH 18
33#define ADR_REGION2_LBN 64
34#define ADR_REGION2_WIDTH 18
35#define ADR_REGION3_LBN 96
36#define ADR_REGION3_WIDTH 18
37
38/* Interrupt enable register */
39#define INT_EN_REG_KER 0x0010
40#define KER_INT_KER_LBN 3
41#define KER_INT_KER_WIDTH 1
42#define DRV_INT_EN_KER_LBN 0
43#define DRV_INT_EN_KER_WIDTH 1
44
45/* Interrupt status address register */
46#define INT_ADR_REG_KER 0x0030
47#define NORM_INT_VEC_DIS_KER_LBN 64
48#define NORM_INT_VEC_DIS_KER_WIDTH 1
49#define INT_ADR_KER_LBN 0
50#define INT_ADR_KER_WIDTH EFX_DMA_TYPE_WIDTH(64) /* not 46 for this one */
51
52/* Interrupt status register (B0 only) */
53#define INT_ISR0_B0 0x90
54#define INT_ISR1_B0 0xA0
55
56/* Interrupt acknowledge register (A0/A1 only) */
57#define INT_ACK_REG_KER_A1 0x0050
58#define INT_ACK_DUMMY_DATA_LBN 0
59#define INT_ACK_DUMMY_DATA_WIDTH 32
60
61/* Interrupt acknowledge work-around register (A0/A1 only )*/
62#define WORK_AROUND_BROKEN_PCI_READS_REG_KER_A1 0x0070
63
64/* SPI host command register */
65#define EE_SPI_HCMD_REG_KER 0x0100
66#define EE_SPI_HCMD_CMD_EN_LBN 31
67#define EE_SPI_HCMD_CMD_EN_WIDTH 1
68#define EE_WR_TIMER_ACTIVE_LBN 28
69#define EE_WR_TIMER_ACTIVE_WIDTH 1
70#define EE_SPI_HCMD_SF_SEL_LBN 24
71#define EE_SPI_HCMD_SF_SEL_WIDTH 1
72#define EE_SPI_EEPROM 0
73#define EE_SPI_FLASH 1
74#define EE_SPI_HCMD_DABCNT_LBN 16
75#define EE_SPI_HCMD_DABCNT_WIDTH 5
76#define EE_SPI_HCMD_READ_LBN 15
77#define EE_SPI_HCMD_READ_WIDTH 1
78#define EE_SPI_READ 1
79#define EE_SPI_WRITE 0
80#define EE_SPI_HCMD_DUBCNT_LBN 12
81#define EE_SPI_HCMD_DUBCNT_WIDTH 2
82#define EE_SPI_HCMD_ADBCNT_LBN 8
83#define EE_SPI_HCMD_ADBCNT_WIDTH 2
84#define EE_SPI_HCMD_ENC_LBN 0
85#define EE_SPI_HCMD_ENC_WIDTH 8
86
87/* SPI host address register */
88#define EE_SPI_HADR_REG_KER 0x0110
89#define EE_SPI_HADR_ADR_LBN 0
90#define EE_SPI_HADR_ADR_WIDTH 24
91
92/* SPI host data register */
93#define EE_SPI_HDATA_REG_KER 0x0120
94
95/* PCIE CORE ACCESS REG */
96#define PCIE_CORE_ADDR_PCIE_DEVICE_CTRL_STAT 0x68
97#define PCIE_CORE_ADDR_PCIE_LINK_CTRL_STAT 0x70
98#define PCIE_CORE_ADDR_ACK_RPL_TIMER 0x700
99#define PCIE_CORE_ADDR_ACK_FREQ 0x70C
100
101/* NIC status register */
102#define NIC_STAT_REG 0x0200
103#define ONCHIP_SRAM_LBN 16
104#define ONCHIP_SRAM_WIDTH 1
105#define SF_PRST_LBN 9
106#define SF_PRST_WIDTH 1
107#define EE_PRST_LBN 8
108#define EE_PRST_WIDTH 1
109/* See pic_mode_t for decoding of this field */
110/* These bit definitions are extrapolated from the list of numerical
111 * values for STRAP_PINS.
112 */
113#define STRAP_10G_LBN 2
114#define STRAP_10G_WIDTH 1
115#define STRAP_PCIE_LBN 0
116#define STRAP_PCIE_WIDTH 1
117
118/* GPIO control register */
119#define GPIO_CTL_REG_KER 0x0210
120#define GPIO_OUTPUTS_LBN (16)
121#define GPIO_OUTPUTS_WIDTH (4)
122#define GPIO_INPUTS_LBN (8)
123#define GPIO_DIRECTION_LBN (24)
124#define GPIO_DIRECTION_WIDTH (4)
125#define GPIO_DIRECTION_OUT (1)
126#define GPIO_SRAM_SLEEP (1 << 1)
127
128#define GPIO3_OEN_LBN (GPIO_DIRECTION_LBN + 3)
129#define GPIO3_OEN_WIDTH 1
130#define GPIO2_OEN_LBN (GPIO_DIRECTION_LBN + 2)
131#define GPIO2_OEN_WIDTH 1
132#define GPIO1_OEN_LBN (GPIO_DIRECTION_LBN + 1)
133#define GPIO1_OEN_WIDTH 1
134#define GPIO0_OEN_LBN (GPIO_DIRECTION_LBN + 0)
135#define GPIO0_OEN_WIDTH 1
136
137#define GPIO3_OUT_LBN (GPIO_OUTPUTS_LBN + 3)
138#define GPIO3_OUT_WIDTH 1
139#define GPIO2_OUT_LBN (GPIO_OUTPUTS_LBN + 2)
140#define GPIO2_OUT_WIDTH 1
141#define GPIO1_OUT_LBN (GPIO_OUTPUTS_LBN + 1)
142#define GPIO1_OUT_WIDTH 1
143#define GPIO0_OUT_LBN (GPIO_OUTPUTS_LBN + 0)
144#define GPIO0_OUT_WIDTH 1
145
146#define GPIO3_IN_LBN (GPIO_INPUTS_LBN + 3)
147#define GPIO3_IN_WIDTH 1
148#define GPIO2_IN_WIDTH 1
149#define GPIO1_IN_WIDTH 1
150#define GPIO0_IN_LBN (GPIO_INPUTS_LBN + 0)
151#define GPIO0_IN_WIDTH 1
152
153/* Global control register */
154#define GLB_CTL_REG_KER 0x0220
155#define EXT_PHY_RST_CTL_LBN 63
156#define EXT_PHY_RST_CTL_WIDTH 1
157#define PCIE_SD_RST_CTL_LBN 61
158#define PCIE_SD_RST_CTL_WIDTH 1
159
160#define PCIE_NSTCK_RST_CTL_LBN 58
161#define PCIE_NSTCK_RST_CTL_WIDTH 1
162#define PCIE_CORE_RST_CTL_LBN 57
163#define PCIE_CORE_RST_CTL_WIDTH 1
164#define EE_RST_CTL_LBN 49
165#define EE_RST_CTL_WIDTH 1
166#define RST_XGRX_LBN 24
167#define RST_XGRX_WIDTH 1
168#define RST_XGTX_LBN 23
169#define RST_XGTX_WIDTH 1
170#define RST_EM_LBN 22
171#define RST_EM_WIDTH 1
172#define EXT_PHY_RST_DUR_LBN 1
173#define EXT_PHY_RST_DUR_WIDTH 3
174#define SWRST_LBN 0
175#define SWRST_WIDTH 1
176#define INCLUDE_IN_RESET 0
177#define EXCLUDE_FROM_RESET 1
178
179/* Fatal interrupt register */
180#define FATAL_INTR_REG_KER 0x0230
181#define RBUF_OWN_INT_KER_EN_LBN 39
182#define RBUF_OWN_INT_KER_EN_WIDTH 1
183#define TBUF_OWN_INT_KER_EN_LBN 38
184#define TBUF_OWN_INT_KER_EN_WIDTH 1
185#define ILL_ADR_INT_KER_EN_LBN 33
186#define ILL_ADR_INT_KER_EN_WIDTH 1
187#define MEM_PERR_INT_KER_LBN 8
188#define MEM_PERR_INT_KER_WIDTH 1
189#define INT_KER_ERROR_LBN 0
190#define INT_KER_ERROR_WIDTH 12
191
192#define DP_CTRL_REG 0x250
193#define FLS_EVQ_ID_LBN 0
194#define FLS_EVQ_ID_WIDTH 11
195
196#define MEM_STAT_REG_KER 0x260
197
198/* Debug probe register */
199#define DEBUG_BLK_SEL_MISC 7
200#define DEBUG_BLK_SEL_SERDES 6
201#define DEBUG_BLK_SEL_EM 5
202#define DEBUG_BLK_SEL_SR 4
203#define DEBUG_BLK_SEL_EV 3
204#define DEBUG_BLK_SEL_RX 2
205#define DEBUG_BLK_SEL_TX 1
206#define DEBUG_BLK_SEL_BIU 0
207
208/* FPGA build version */
209#define ALTERA_BUILD_REG_KER 0x0300
210#define VER_ALL_LBN 0
211#define VER_ALL_WIDTH 32
212
213/* Spare EEPROM bits register (flash 0x390) */
214#define SPARE_REG_KER 0x310
215#define MEM_PERR_EN_TX_DATA_LBN 72
216#define MEM_PERR_EN_TX_DATA_WIDTH 2
217
218/* Timer table for kernel access */
219#define TIMER_CMD_REG_KER 0x420
220#define TIMER_MODE_LBN 12
221#define TIMER_MODE_WIDTH 2
222#define TIMER_MODE_DIS 0
223#define TIMER_MODE_INT_HLDOFF 2
224#define TIMER_VAL_LBN 0
225#define TIMER_VAL_WIDTH 12
226
227/* Driver generated event register */
228#define DRV_EV_REG_KER 0x440
229#define DRV_EV_QID_LBN 64
230#define DRV_EV_QID_WIDTH 12
231#define DRV_EV_DATA_LBN 0
232#define DRV_EV_DATA_WIDTH 64
233
234/* Buffer table configuration register */
235#define BUF_TBL_CFG_REG_KER 0x600
236#define BUF_TBL_MODE_LBN 3
237#define BUF_TBL_MODE_WIDTH 1
238#define BUF_TBL_MODE_HALF 0
239#define BUF_TBL_MODE_FULL 1
240
241/* SRAM receive descriptor cache configuration register */
242#define SRM_RX_DC_CFG_REG_KER 0x610
243#define SRM_RX_DC_BASE_ADR_LBN 0
244#define SRM_RX_DC_BASE_ADR_WIDTH 21
245
246/* SRAM transmit descriptor cache configuration register */
247#define SRM_TX_DC_CFG_REG_KER 0x620
248#define SRM_TX_DC_BASE_ADR_LBN 0
249#define SRM_TX_DC_BASE_ADR_WIDTH 21
250
251/* SRAM configuration register */
252#define SRM_CFG_REG_KER 0x630
253#define SRAM_OOB_BT_INIT_EN_LBN 3
254#define SRAM_OOB_BT_INIT_EN_WIDTH 1
255#define SRM_NUM_BANKS_AND_BANK_SIZE_LBN 0
256#define SRM_NUM_BANKS_AND_BANK_SIZE_WIDTH 3
257#define SRM_NB_BSZ_1BANKS_2M 0
258#define SRM_NB_BSZ_1BANKS_4M 1
259#define SRM_NB_BSZ_1BANKS_8M 2
260#define SRM_NB_BSZ_DEFAULT 3 /* char driver will set the default */
261#define SRM_NB_BSZ_2BANKS_4M 4
262#define SRM_NB_BSZ_2BANKS_8M 5
263#define SRM_NB_BSZ_2BANKS_16M 6
264#define SRM_NB_BSZ_RESERVED 7
265
266/* Special buffer table update register */
267#define BUF_TBL_UPD_REG_KER 0x0650
268#define BUF_UPD_CMD_LBN 63
269#define BUF_UPD_CMD_WIDTH 1
270#define BUF_CLR_CMD_LBN 62
271#define BUF_CLR_CMD_WIDTH 1
272#define BUF_CLR_END_ID_LBN 32
273#define BUF_CLR_END_ID_WIDTH 20
274#define BUF_CLR_START_ID_LBN 0
275#define BUF_CLR_START_ID_WIDTH 20
276
277/* Receive configuration register */
278#define RX_CFG_REG_KER 0x800
279
280/* B0 */
281#define RX_INGR_EN_B0_LBN 47
282#define RX_INGR_EN_B0_WIDTH 1
283#define RX_DESC_PUSH_EN_B0_LBN 43
284#define RX_DESC_PUSH_EN_B0_WIDTH 1
285#define RX_XON_TX_TH_B0_LBN 33
286#define RX_XON_TX_TH_B0_WIDTH 5
287#define RX_XOFF_TX_TH_B0_LBN 28
288#define RX_XOFF_TX_TH_B0_WIDTH 5
289#define RX_USR_BUF_SIZE_B0_LBN 19
290#define RX_USR_BUF_SIZE_B0_WIDTH 9
291#define RX_XON_MAC_TH_B0_LBN 10
292#define RX_XON_MAC_TH_B0_WIDTH 9
293#define RX_XOFF_MAC_TH_B0_LBN 1
294#define RX_XOFF_MAC_TH_B0_WIDTH 9
295#define RX_XOFF_MAC_EN_B0_LBN 0
296#define RX_XOFF_MAC_EN_B0_WIDTH 1
297
298/* A1 */
299#define RX_DESC_PUSH_EN_A1_LBN 35
300#define RX_DESC_PUSH_EN_A1_WIDTH 1
301#define RX_XON_TX_TH_A1_LBN 25
302#define RX_XON_TX_TH_A1_WIDTH 5
303#define RX_XOFF_TX_TH_A1_LBN 20
304#define RX_XOFF_TX_TH_A1_WIDTH 5
305#define RX_USR_BUF_SIZE_A1_LBN 11
306#define RX_USR_BUF_SIZE_A1_WIDTH 9
307#define RX_XON_MAC_TH_A1_LBN 6
308#define RX_XON_MAC_TH_A1_WIDTH 5
309#define RX_XOFF_MAC_TH_A1_LBN 1
310#define RX_XOFF_MAC_TH_A1_WIDTH 5
311#define RX_XOFF_MAC_EN_A1_LBN 0
312#define RX_XOFF_MAC_EN_A1_WIDTH 1
313
314/* Receive filter control register */
315#define RX_FILTER_CTL_REG 0x810
316#define UDP_FULL_SRCH_LIMIT_LBN 32
317#define UDP_FULL_SRCH_LIMIT_WIDTH 8
318#define NUM_KER_LBN 24
319#define NUM_KER_WIDTH 2
320#define UDP_WILD_SRCH_LIMIT_LBN 16
321#define UDP_WILD_SRCH_LIMIT_WIDTH 8
322#define TCP_WILD_SRCH_LIMIT_LBN 8
323#define TCP_WILD_SRCH_LIMIT_WIDTH 8
324#define TCP_FULL_SRCH_LIMIT_LBN 0
325#define TCP_FULL_SRCH_LIMIT_WIDTH 8
326
327/* RX queue flush register */
328#define RX_FLUSH_DESCQ_REG_KER 0x0820
329#define RX_FLUSH_DESCQ_CMD_LBN 24
330#define RX_FLUSH_DESCQ_CMD_WIDTH 1
331#define RX_FLUSH_DESCQ_LBN 0
332#define RX_FLUSH_DESCQ_WIDTH 12
333
334/* Receive descriptor update register */
335#define RX_DESC_UPD_REG_KER_DWORD (0x830 + 12)
336#define RX_DESC_WPTR_DWORD_LBN 0
337#define RX_DESC_WPTR_DWORD_WIDTH 12
338
339/* Receive descriptor cache configuration register */
340#define RX_DC_CFG_REG_KER 0x840
341#define RX_DC_SIZE_LBN 0
342#define RX_DC_SIZE_WIDTH 2
343
344#define RX_DC_PF_WM_REG_KER 0x850
345#define RX_DC_PF_LWM_LBN 0
346#define RX_DC_PF_LWM_WIDTH 6
347
348/* RX no descriptor drop counter */
349#define RX_NODESC_DROP_REG_KER 0x880
350#define RX_NODESC_DROP_CNT_LBN 0
351#define RX_NODESC_DROP_CNT_WIDTH 16
352
353/* RX black magic register */
354#define RX_SELF_RST_REG_KER 0x890
355#define RX_ISCSI_DIS_LBN 17
356#define RX_ISCSI_DIS_WIDTH 1
357#define RX_NODESC_WAIT_DIS_LBN 9
358#define RX_NODESC_WAIT_DIS_WIDTH 1
359#define RX_RECOVERY_EN_LBN 8
360#define RX_RECOVERY_EN_WIDTH 1
361
362/* TX queue flush register */
363#define TX_FLUSH_DESCQ_REG_KER 0x0a00
364#define TX_FLUSH_DESCQ_CMD_LBN 12
365#define TX_FLUSH_DESCQ_CMD_WIDTH 1
366#define TX_FLUSH_DESCQ_LBN 0
367#define TX_FLUSH_DESCQ_WIDTH 12
368
369/* Transmit descriptor update register */
370#define TX_DESC_UPD_REG_KER_DWORD (0xa10 + 12)
371#define TX_DESC_WPTR_DWORD_LBN 0
372#define TX_DESC_WPTR_DWORD_WIDTH 12
373
374/* Transmit descriptor cache configuration register */
375#define TX_DC_CFG_REG_KER 0xa20
376#define TX_DC_SIZE_LBN 0
377#define TX_DC_SIZE_WIDTH 2
378
379/* Transmit checksum configuration register (A0/A1 only) */
380#define TX_CHKSM_CFG_REG_KER_A1 0xa30
381
382/* Transmit configuration register */
383#define TX_CFG_REG_KER 0xa50
384#define TX_NO_EOP_DISC_EN_LBN 5
385#define TX_NO_EOP_DISC_EN_WIDTH 1
386
387/* Transmit configuration register 2 */
388#define TX_CFG2_REG_KER 0xa80
389#define TX_CSR_PUSH_EN_LBN 89
390#define TX_CSR_PUSH_EN_WIDTH 1
391#define TX_RX_SPACER_LBN 64
392#define TX_RX_SPACER_WIDTH 8
393#define TX_SW_EV_EN_LBN 59
394#define TX_SW_EV_EN_WIDTH 1
395#define TX_RX_SPACER_EN_LBN 57
396#define TX_RX_SPACER_EN_WIDTH 1
397#define TX_PREF_THRESHOLD_LBN 19
398#define TX_PREF_THRESHOLD_WIDTH 2
399#define TX_ONE_PKT_PER_Q_LBN 18
400#define TX_ONE_PKT_PER_Q_WIDTH 1
401#define TX_DIS_NON_IP_EV_LBN 17
402#define TX_DIS_NON_IP_EV_WIDTH 1
403#define TX_FLUSH_MIN_LEN_EN_B0_LBN 7
404#define TX_FLUSH_MIN_LEN_EN_B0_WIDTH 1
405
406/* PHY management transmit data register */
407#define MD_TXD_REG_KER 0xc00
408#define MD_TXD_LBN 0
409#define MD_TXD_WIDTH 16
410
411/* PHY management receive data register */
412#define MD_RXD_REG_KER 0xc10
413#define MD_RXD_LBN 0
414#define MD_RXD_WIDTH 16
415
416/* PHY management configuration & status register */
417#define MD_CS_REG_KER 0xc20
418#define MD_GC_LBN 4
419#define MD_GC_WIDTH 1
420#define MD_RIC_LBN 2
421#define MD_RIC_WIDTH 1
422#define MD_RDC_LBN 1
423#define MD_RDC_WIDTH 1
424#define MD_WRC_LBN 0
425#define MD_WRC_WIDTH 1
426
427/* PHY management PHY address register */
428#define MD_PHY_ADR_REG_KER 0xc30
429#define MD_PHY_ADR_LBN 0
430#define MD_PHY_ADR_WIDTH 16
431
432/* PHY management ID register */
433#define MD_ID_REG_KER 0xc40
434#define MD_PRT_ADR_LBN 11
435#define MD_PRT_ADR_WIDTH 5
436#define MD_DEV_ADR_LBN 6
437#define MD_DEV_ADR_WIDTH 5
438/* Used for writing both at once */
439#define MD_PRT_DEV_ADR_LBN 6
440#define MD_PRT_DEV_ADR_WIDTH 10
441
442/* PHY management status & mask register (DWORD read only) */
443#define MD_STAT_REG_KER 0xc50
444#define MD_BSERR_LBN 2
445#define MD_BSERR_WIDTH 1
446#define MD_LNFL_LBN 1
447#define MD_LNFL_WIDTH 1
448#define MD_BSY_LBN 0
449#define MD_BSY_WIDTH 1
450
451/* Port 0 and 1 MAC stats registers */
452#define MAC0_STAT_DMA_REG_KER 0xc60
453#define MAC_STAT_DMA_CMD_LBN 48
454#define MAC_STAT_DMA_CMD_WIDTH 1
455#define MAC_STAT_DMA_ADR_LBN 0
456#define MAC_STAT_DMA_ADR_WIDTH EFX_DMA_TYPE_WIDTH(46)
457
458/* Port 0 and 1 MAC control registers */
459#define MAC0_CTRL_REG_KER 0xc80
460#define MAC_XOFF_VAL_LBN 16
461#define MAC_XOFF_VAL_WIDTH 16
462#define TXFIFO_DRAIN_EN_B0_LBN 7
463#define TXFIFO_DRAIN_EN_B0_WIDTH 1
464#define MAC_BCAD_ACPT_LBN 4
465#define MAC_BCAD_ACPT_WIDTH 1
466#define MAC_UC_PROM_LBN 3
467#define MAC_UC_PROM_WIDTH 1
468#define MAC_LINK_STATUS_LBN 2
469#define MAC_LINK_STATUS_WIDTH 1
470#define MAC_SPEED_LBN 0
471#define MAC_SPEED_WIDTH 2
472
473/* 10G XAUI XGXS default values */
474#define XX_TXDRV_DEQ_DEFAULT 0xe /* deq=.6 */
475#define XX_TXDRV_DTX_DEFAULT 0x5 /* 1.25 */
476#define XX_SD_CTL_DRV_DEFAULT 0 /* 20mA */
477
478/* Multicast address hash table */
479#define MAC_MCAST_HASH_REG0_KER 0xca0
480#define MAC_MCAST_HASH_REG1_KER 0xcb0
481
482/* GMAC registers */
483#define FALCON_GMAC_REGBANK 0xe00
484#define FALCON_GMAC_REGBANK_SIZE 0x200
485#define FALCON_GMAC_REG_SIZE 0x10
486
487/* XMAC registers */
488#define FALCON_XMAC_REGBANK 0x1200
489#define FALCON_XMAC_REGBANK_SIZE 0x200
490#define FALCON_XMAC_REG_SIZE 0x10
491
492/* XGMAC address register low */
493#define XM_ADR_LO_REG_MAC 0x00
494#define XM_ADR_3_LBN 24
495#define XM_ADR_3_WIDTH 8
496#define XM_ADR_2_LBN 16
497#define XM_ADR_2_WIDTH 8
498#define XM_ADR_1_LBN 8
499#define XM_ADR_1_WIDTH 8
500#define XM_ADR_0_LBN 0
501#define XM_ADR_0_WIDTH 8
502
503/* XGMAC address register high */
504#define XM_ADR_HI_REG_MAC 0x01
505#define XM_ADR_5_LBN 8
506#define XM_ADR_5_WIDTH 8
507#define XM_ADR_4_LBN 0
508#define XM_ADR_4_WIDTH 8
509
510/* XGMAC global configuration */
511#define XM_GLB_CFG_REG_MAC 0x02
512#define XM_RX_STAT_EN_LBN 11
513#define XM_RX_STAT_EN_WIDTH 1
514#define XM_TX_STAT_EN_LBN 10
515#define XM_TX_STAT_EN_WIDTH 1
516#define XM_RX_JUMBO_MODE_LBN 6
517#define XM_RX_JUMBO_MODE_WIDTH 1
518#define XM_INTCLR_MODE_LBN 3
519#define XM_INTCLR_MODE_WIDTH 1
520#define XM_CORE_RST_LBN 0
521#define XM_CORE_RST_WIDTH 1
522
523/* XGMAC transmit configuration */
524#define XM_TX_CFG_REG_MAC 0x03
525#define XM_IPG_LBN 16
526#define XM_IPG_WIDTH 4
527#define XM_FCNTL_LBN 10
528#define XM_FCNTL_WIDTH 1
529#define XM_TXCRC_LBN 8
530#define XM_TXCRC_WIDTH 1
531#define XM_AUTO_PAD_LBN 5
532#define XM_AUTO_PAD_WIDTH 1
533#define XM_TX_PRMBL_LBN 2
534#define XM_TX_PRMBL_WIDTH 1
535#define XM_TXEN_LBN 1
536#define XM_TXEN_WIDTH 1
537
538/* XGMAC receive configuration */
539#define XM_RX_CFG_REG_MAC 0x04
540#define XM_PASS_CRC_ERR_LBN 25
541#define XM_PASS_CRC_ERR_WIDTH 1
542#define XM_ACPT_ALL_MCAST_LBN 11
543#define XM_ACPT_ALL_MCAST_WIDTH 1
544#define XM_ACPT_ALL_UCAST_LBN 9
545#define XM_ACPT_ALL_UCAST_WIDTH 1
546#define XM_AUTO_DEPAD_LBN 8
547#define XM_AUTO_DEPAD_WIDTH 1
548#define XM_RXEN_LBN 1
549#define XM_RXEN_WIDTH 1
550
551/* XGMAC management interrupt mask register */
552#define XM_MGT_INT_MSK_REG_MAC_B0 0x5
553#define XM_MSK_PRMBLE_ERR_LBN 2
554#define XM_MSK_PRMBLE_ERR_WIDTH 1
555#define XM_MSK_RMTFLT_LBN 1
556#define XM_MSK_RMTFLT_WIDTH 1
557#define XM_MSK_LCLFLT_LBN 0
558#define XM_MSK_LCLFLT_WIDTH 1
559
560/* XGMAC flow control register */
561#define XM_FC_REG_MAC 0x7
562#define XM_PAUSE_TIME_LBN 16
563#define XM_PAUSE_TIME_WIDTH 16
564#define XM_DIS_FCNTL_LBN 0
565#define XM_DIS_FCNTL_WIDTH 1
566
567/* XGMAC pause time count register */
568#define XM_PAUSE_TIME_REG_MAC 0x9
569
570/* XGMAC transmit parameter register */
571#define XM_TX_PARAM_REG_MAC 0x0d
572#define XM_TX_JUMBO_MODE_LBN 31
573#define XM_TX_JUMBO_MODE_WIDTH 1
574#define XM_MAX_TX_FRM_SIZE_LBN 16
575#define XM_MAX_TX_FRM_SIZE_WIDTH 14
576
577/* XGMAC receive parameter register */
578#define XM_RX_PARAM_REG_MAC 0x0e
579#define XM_MAX_RX_FRM_SIZE_LBN 0
580#define XM_MAX_RX_FRM_SIZE_WIDTH 14
581
582/* XGMAC management interrupt status register */
583#define XM_MGT_INT_REG_MAC_B0 0x0f
584#define XM_PRMBLE_ERR 2
585#define XM_PRMBLE_WIDTH 1
586#define XM_RMTFLT_LBN 1
587#define XM_RMTFLT_WIDTH 1
588#define XM_LCLFLT_LBN 0
589#define XM_LCLFLT_WIDTH 1
590
591/* XGXS/XAUI powerdown/reset register */
592#define XX_PWR_RST_REG_MAC 0x10
593
594#define XX_PWRDND_EN_LBN 15
595#define XX_PWRDND_EN_WIDTH 1
596#define XX_PWRDNC_EN_LBN 14
597#define XX_PWRDNC_EN_WIDTH 1
598#define XX_PWRDNB_EN_LBN 13
599#define XX_PWRDNB_EN_WIDTH 1
600#define XX_PWRDNA_EN_LBN 12
601#define XX_PWRDNA_EN_WIDTH 1
602#define XX_RSTPLLCD_EN_LBN 9
603#define XX_RSTPLLCD_EN_WIDTH 1
604#define XX_RSTPLLAB_EN_LBN 8
605#define XX_RSTPLLAB_EN_WIDTH 1
606#define XX_RESETD_EN_LBN 7
607#define XX_RESETD_EN_WIDTH 1
608#define XX_RESETC_EN_LBN 6
609#define XX_RESETC_EN_WIDTH 1
610#define XX_RESETB_EN_LBN 5
611#define XX_RESETB_EN_WIDTH 1
612#define XX_RESETA_EN_LBN 4
613#define XX_RESETA_EN_WIDTH 1
614#define XX_RSTXGXSRX_EN_LBN 2
615#define XX_RSTXGXSRX_EN_WIDTH 1
616#define XX_RSTXGXSTX_EN_LBN 1
617#define XX_RSTXGXSTX_EN_WIDTH 1
618#define XX_RST_XX_EN_LBN 0
619#define XX_RST_XX_EN_WIDTH 1
620
621/* XGXS/XAUI powerdown/reset control register */
622#define XX_SD_CTL_REG_MAC 0x11
623#define XX_HIDRVD_LBN 15
624#define XX_HIDRVD_WIDTH 1
625#define XX_LODRVD_LBN 14
626#define XX_LODRVD_WIDTH 1
627#define XX_HIDRVC_LBN 13
628#define XX_HIDRVC_WIDTH 1
629#define XX_LODRVC_LBN 12
630#define XX_LODRVC_WIDTH 1
631#define XX_HIDRVB_LBN 11
632#define XX_HIDRVB_WIDTH 1
633#define XX_LODRVB_LBN 10
634#define XX_LODRVB_WIDTH 1
635#define XX_HIDRVA_LBN 9
636#define XX_HIDRVA_WIDTH 1
637#define XX_LODRVA_LBN 8
638#define XX_LODRVA_WIDTH 1
639
640#define XX_TXDRV_CTL_REG_MAC 0x12
641#define XX_DEQD_LBN 28
642#define XX_DEQD_WIDTH 4
643#define XX_DEQC_LBN 24
644#define XX_DEQC_WIDTH 4
645#define XX_DEQB_LBN 20
646#define XX_DEQB_WIDTH 4
647#define XX_DEQA_LBN 16
648#define XX_DEQA_WIDTH 4
649#define XX_DTXD_LBN 12
650#define XX_DTXD_WIDTH 4
651#define XX_DTXC_LBN 8
652#define XX_DTXC_WIDTH 4
653#define XX_DTXB_LBN 4
654#define XX_DTXB_WIDTH 4
655#define XX_DTXA_LBN 0
656#define XX_DTXA_WIDTH 4
657
658/* XAUI XGXS core status register */
659#define XX_FORCE_SIG_DECODE_FORCED 0xff
660#define XX_CORE_STAT_REG_MAC 0x16
661#define XX_ALIGN_DONE_LBN 20
662#define XX_ALIGN_DONE_WIDTH 1
663#define XX_SYNC_STAT_LBN 16
664#define XX_SYNC_STAT_WIDTH 4
665#define XX_SYNC_STAT_DECODE_SYNCED 0xf
666#define XX_COMMA_DET_LBN 12
667#define XX_COMMA_DET_WIDTH 4
668#define XX_COMMA_DET_DECODE_DETECTED 0xf
669#define XX_COMMA_DET_RESET 0xf
670#define XX_CHARERR_LBN 4
671#define XX_CHARERR_WIDTH 4
672#define XX_CHARERR_RESET 0xf
673#define XX_DISPERR_LBN 0
674#define XX_DISPERR_WIDTH 4
675#define XX_DISPERR_RESET 0xf
676
677/* Receive filter table */
678#define RX_FILTER_TBL0 0xF00000
679
680/* Receive descriptor pointer table */
681#define RX_DESC_PTR_TBL_KER_A1 0x11800
682#define RX_DESC_PTR_TBL_KER_B0 0xF40000
683#define RX_DESC_PTR_TBL_KER_P0 0x900
684#define RX_ISCSI_DDIG_EN_LBN 88
685#define RX_ISCSI_DDIG_EN_WIDTH 1
686#define RX_ISCSI_HDIG_EN_LBN 87
687#define RX_ISCSI_HDIG_EN_WIDTH 1
688#define RX_DESCQ_BUF_BASE_ID_LBN 36
689#define RX_DESCQ_BUF_BASE_ID_WIDTH 20
690#define RX_DESCQ_EVQ_ID_LBN 24
691#define RX_DESCQ_EVQ_ID_WIDTH 12
692#define RX_DESCQ_OWNER_ID_LBN 10
693#define RX_DESCQ_OWNER_ID_WIDTH 14
694#define RX_DESCQ_LABEL_LBN 5
695#define RX_DESCQ_LABEL_WIDTH 5
696#define RX_DESCQ_SIZE_LBN 3
697#define RX_DESCQ_SIZE_WIDTH 2
698#define RX_DESCQ_SIZE_4K 3
699#define RX_DESCQ_SIZE_2K 2
700#define RX_DESCQ_SIZE_1K 1
701#define RX_DESCQ_SIZE_512 0
702#define RX_DESCQ_TYPE_LBN 2
703#define RX_DESCQ_TYPE_WIDTH 1
704#define RX_DESCQ_JUMBO_LBN 1
705#define RX_DESCQ_JUMBO_WIDTH 1
706#define RX_DESCQ_EN_LBN 0
707#define RX_DESCQ_EN_WIDTH 1
708
709/* Transmit descriptor pointer table */
710#define TX_DESC_PTR_TBL_KER_A1 0x11900
711#define TX_DESC_PTR_TBL_KER_B0 0xF50000
712#define TX_DESC_PTR_TBL_KER_P0 0xa40
713#define TX_NON_IP_DROP_DIS_B0_LBN 91
714#define TX_NON_IP_DROP_DIS_B0_WIDTH 1
715#define TX_IP_CHKSM_DIS_B0_LBN 90
716#define TX_IP_CHKSM_DIS_B0_WIDTH 1
717#define TX_TCP_CHKSM_DIS_B0_LBN 89
718#define TX_TCP_CHKSM_DIS_B0_WIDTH 1
719#define TX_DESCQ_EN_LBN 88
720#define TX_DESCQ_EN_WIDTH 1
721#define TX_ISCSI_DDIG_EN_LBN 87
722#define TX_ISCSI_DDIG_EN_WIDTH 1
723#define TX_ISCSI_HDIG_EN_LBN 86
724#define TX_ISCSI_HDIG_EN_WIDTH 1
725#define TX_DESCQ_BUF_BASE_ID_LBN 36
726#define TX_DESCQ_BUF_BASE_ID_WIDTH 20
727#define TX_DESCQ_EVQ_ID_LBN 24
728#define TX_DESCQ_EVQ_ID_WIDTH 12
729#define TX_DESCQ_OWNER_ID_LBN 10
730#define TX_DESCQ_OWNER_ID_WIDTH 14
731#define TX_DESCQ_LABEL_LBN 5
732#define TX_DESCQ_LABEL_WIDTH 5
733#define TX_DESCQ_SIZE_LBN 3
734#define TX_DESCQ_SIZE_WIDTH 2
735#define TX_DESCQ_SIZE_4K 3
736#define TX_DESCQ_SIZE_2K 2
737#define TX_DESCQ_SIZE_1K 1
738#define TX_DESCQ_SIZE_512 0
739#define TX_DESCQ_TYPE_LBN 1
740#define TX_DESCQ_TYPE_WIDTH 2
741
742/* Event queue pointer */
743#define EVQ_PTR_TBL_KER_A1 0x11a00
744#define EVQ_PTR_TBL_KER_B0 0xf60000
745#define EVQ_PTR_TBL_KER_P0 0x500
746#define EVQ_EN_LBN 23
747#define EVQ_EN_WIDTH 1
748#define EVQ_SIZE_LBN 20
749#define EVQ_SIZE_WIDTH 3
750#define EVQ_SIZE_32K 6
751#define EVQ_SIZE_16K 5
752#define EVQ_SIZE_8K 4
753#define EVQ_SIZE_4K 3
754#define EVQ_SIZE_2K 2
755#define EVQ_SIZE_1K 1
756#define EVQ_SIZE_512 0
757#define EVQ_BUF_BASE_ID_LBN 0
758#define EVQ_BUF_BASE_ID_WIDTH 20
759
760/* Event queue read pointer */
761#define EVQ_RPTR_REG_KER_A1 0x11b00
762#define EVQ_RPTR_REG_KER_B0 0xfa0000
763#define EVQ_RPTR_REG_KER_DWORD (EVQ_RPTR_REG_KER + 0)
764#define EVQ_RPTR_DWORD_LBN 0
765#define EVQ_RPTR_DWORD_WIDTH 14
766
767/* RSS indirection table */
768#define RX_RSS_INDIR_TBL_B0 0xFB0000
769#define RX_RSS_INDIR_ENT_B0_LBN 0
770#define RX_RSS_INDIR_ENT_B0_WIDTH 6
771
772/* Special buffer descriptors (full-mode) */
773#define BUF_FULL_TBL_KER_A1 0x8000
774#define BUF_FULL_TBL_KER_B0 0x800000
775#define IP_DAT_BUF_SIZE_LBN 50
776#define IP_DAT_BUF_SIZE_WIDTH 1
777#define IP_DAT_BUF_SIZE_8K 1
778#define IP_DAT_BUF_SIZE_4K 0
779#define BUF_ADR_REGION_LBN 48
780#define BUF_ADR_REGION_WIDTH 2
781#define BUF_ADR_FBUF_LBN 14
782#define BUF_ADR_FBUF_WIDTH 34
783#define BUF_OWNER_ID_FBUF_LBN 0
784#define BUF_OWNER_ID_FBUF_WIDTH 14
785
786/* Transmit descriptor */
787#define TX_KER_PORT_LBN 63
788#define TX_KER_PORT_WIDTH 1
789#define TX_KER_CONT_LBN 62
790#define TX_KER_CONT_WIDTH 1
791#define TX_KER_BYTE_CNT_LBN 48
792#define TX_KER_BYTE_CNT_WIDTH 14
793#define TX_KER_BUF_REGION_LBN 46
794#define TX_KER_BUF_REGION_WIDTH 2
795#define TX_KER_BUF_REGION0_DECODE 0
796#define TX_KER_BUF_REGION1_DECODE 1
797#define TX_KER_BUF_REGION2_DECODE 2
798#define TX_KER_BUF_REGION3_DECODE 3
799#define TX_KER_BUF_ADR_LBN 0
800#define TX_KER_BUF_ADR_WIDTH EFX_DMA_TYPE_WIDTH(46)
801
802/* Receive descriptor */
803#define RX_KER_BUF_SIZE_LBN 48
804#define RX_KER_BUF_SIZE_WIDTH 14
805#define RX_KER_BUF_REGION_LBN 46
806#define RX_KER_BUF_REGION_WIDTH 2
807#define RX_KER_BUF_REGION0_DECODE 0
808#define RX_KER_BUF_REGION1_DECODE 1
809#define RX_KER_BUF_REGION2_DECODE 2
810#define RX_KER_BUF_REGION3_DECODE 3
811#define RX_KER_BUF_ADR_LBN 0
812#define RX_KER_BUF_ADR_WIDTH EFX_DMA_TYPE_WIDTH(46)
813
814/**************************************************************************
815 *
816 * Falcon events
817 *
818 **************************************************************************
819 */
820
821/* Event queue entries */
822#define EV_CODE_LBN 60
823#define EV_CODE_WIDTH 4
824#define RX_IP_EV_DECODE 0
825#define TX_IP_EV_DECODE 2
826#define DRIVER_EV_DECODE 5
827#define GLOBAL_EV_DECODE 6
828#define DRV_GEN_EV_DECODE 7
829#define WHOLE_EVENT_LBN 0
830#define WHOLE_EVENT_WIDTH 64
831
832/* Receive events */
833#define RX_EV_PKT_OK_LBN 56
834#define RX_EV_PKT_OK_WIDTH 1
835#define RX_EV_PAUSE_FRM_ERR_LBN 55
836#define RX_EV_PAUSE_FRM_ERR_WIDTH 1
837#define RX_EV_BUF_OWNER_ID_ERR_LBN 54
838#define RX_EV_BUF_OWNER_ID_ERR_WIDTH 1
839#define RX_EV_IF_FRAG_ERR_LBN 53
840#define RX_EV_IF_FRAG_ERR_WIDTH 1
841#define RX_EV_IP_HDR_CHKSUM_ERR_LBN 52
842#define RX_EV_IP_HDR_CHKSUM_ERR_WIDTH 1
843#define RX_EV_TCP_UDP_CHKSUM_ERR_LBN 51
844#define RX_EV_TCP_UDP_CHKSUM_ERR_WIDTH 1
845#define RX_EV_ETH_CRC_ERR_LBN 50
846#define RX_EV_ETH_CRC_ERR_WIDTH 1
847#define RX_EV_FRM_TRUNC_LBN 49
848#define RX_EV_FRM_TRUNC_WIDTH 1
849#define RX_EV_DRIB_NIB_LBN 48
850#define RX_EV_DRIB_NIB_WIDTH 1
851#define RX_EV_TOBE_DISC_LBN 47
852#define RX_EV_TOBE_DISC_WIDTH 1
853#define RX_EV_PKT_TYPE_LBN 44
854#define RX_EV_PKT_TYPE_WIDTH 3
855#define RX_EV_PKT_TYPE_ETH_DECODE 0
856#define RX_EV_PKT_TYPE_LLC_DECODE 1
857#define RX_EV_PKT_TYPE_JUMBO_DECODE 2
858#define RX_EV_PKT_TYPE_VLAN_DECODE 3
859#define RX_EV_PKT_TYPE_VLAN_LLC_DECODE 4
860#define RX_EV_PKT_TYPE_VLAN_JUMBO_DECODE 5
861#define RX_EV_HDR_TYPE_LBN 42
862#define RX_EV_HDR_TYPE_WIDTH 2
863#define RX_EV_HDR_TYPE_TCP_IPV4_DECODE 0
864#define RX_EV_HDR_TYPE_UDP_IPV4_DECODE 1
865#define RX_EV_HDR_TYPE_OTHER_IP_DECODE 2
866#define RX_EV_HDR_TYPE_NON_IP_DECODE 3
867#define RX_EV_HDR_TYPE_HAS_CHECKSUMS(hdr_type) \
868 ((hdr_type) <= RX_EV_HDR_TYPE_UDP_IPV4_DECODE)
869#define RX_EV_MCAST_HASH_MATCH_LBN 40
870#define RX_EV_MCAST_HASH_MATCH_WIDTH 1
871#define RX_EV_MCAST_PKT_LBN 39
872#define RX_EV_MCAST_PKT_WIDTH 1
873#define RX_EV_Q_LABEL_LBN 32
874#define RX_EV_Q_LABEL_WIDTH 5
875#define RX_EV_JUMBO_CONT_LBN 31
876#define RX_EV_JUMBO_CONT_WIDTH 1
877#define RX_EV_BYTE_CNT_LBN 16
878#define RX_EV_BYTE_CNT_WIDTH 14
879#define RX_EV_SOP_LBN 15
880#define RX_EV_SOP_WIDTH 1
881#define RX_EV_DESC_PTR_LBN 0
882#define RX_EV_DESC_PTR_WIDTH 12
883
884/* Transmit events */
885#define TX_EV_PKT_ERR_LBN 38
886#define TX_EV_PKT_ERR_WIDTH 1
887#define TX_EV_Q_LABEL_LBN 32
888#define TX_EV_Q_LABEL_WIDTH 5
889#define TX_EV_WQ_FF_FULL_LBN 15
890#define TX_EV_WQ_FF_FULL_WIDTH 1
891#define TX_EV_COMP_LBN 12
892#define TX_EV_COMP_WIDTH 1
893#define TX_EV_DESC_PTR_LBN 0
894#define TX_EV_DESC_PTR_WIDTH 12
895
896/* Driver events */
897#define DRIVER_EV_SUB_CODE_LBN 56
898#define DRIVER_EV_SUB_CODE_WIDTH 4
899#define DRIVER_EV_SUB_DATA_LBN 0
900#define DRIVER_EV_SUB_DATA_WIDTH 14
901#define TX_DESCQ_FLS_DONE_EV_DECODE 0
902#define RX_DESCQ_FLS_DONE_EV_DECODE 1
903#define EVQ_INIT_DONE_EV_DECODE 2
904#define EVQ_NOT_EN_EV_DECODE 3
905#define RX_DESCQ_FLSFF_OVFL_EV_DECODE 4
906#define SRM_UPD_DONE_EV_DECODE 5
907#define WAKE_UP_EV_DECODE 6
908#define TX_PKT_NON_TCP_UDP_DECODE 9
909#define TIMER_EV_DECODE 10
910#define RX_RECOVERY_EV_DECODE 11
911#define RX_DSC_ERROR_EV_DECODE 14
912#define TX_DSC_ERROR_EV_DECODE 15
913#define DRIVER_EV_TX_DESCQ_ID_LBN 0
914#define DRIVER_EV_TX_DESCQ_ID_WIDTH 12
915#define DRIVER_EV_RX_FLUSH_FAIL_LBN 12
916#define DRIVER_EV_RX_FLUSH_FAIL_WIDTH 1
917#define DRIVER_EV_RX_DESCQ_ID_LBN 0
918#define DRIVER_EV_RX_DESCQ_ID_WIDTH 12
919#define SRM_CLR_EV_DECODE 0
920#define SRM_UPD_EV_DECODE 1
921#define SRM_ILLCLR_EV_DECODE 2
922
923/* Global events */
924#define RX_RECOVERY_B0_LBN 12
925#define RX_RECOVERY_B0_WIDTH 1
926#define XG_MNT_INTR_B0_LBN 11
927#define XG_MNT_INTR_B0_WIDTH 1
928#define RX_RECOVERY_A1_LBN 11
929#define RX_RECOVERY_A1_WIDTH 1
930#define XG_PHY_INTR_LBN 9
931#define XG_PHY_INTR_WIDTH 1
932#define G_PHY1_INTR_LBN 8
933#define G_PHY1_INTR_WIDTH 1
934#define G_PHY0_INTR_LBN 7
935#define G_PHY0_INTR_WIDTH 1
936
937/* Driver-generated test events */
938#define EVQ_MAGIC_LBN 0
939#define EVQ_MAGIC_WIDTH 32
940
941/**************************************************************************
942 *
943 * Falcon MAC stats
944 *
945 **************************************************************************
946 *
947 */
948#define GRxGoodOct_offset 0x0
949#define GRxBadOct_offset 0x8
950#define GRxMissPkt_offset 0x10
951#define GRxFalseCRS_offset 0x14
952#define GRxPausePkt_offset 0x18
953#define GRxBadPkt_offset 0x1C
954#define GRxUcastPkt_offset 0x20
955#define GRxMcastPkt_offset 0x24
956#define GRxBcastPkt_offset 0x28
957#define GRxGoodLt64Pkt_offset 0x2C
958#define GRxBadLt64Pkt_offset 0x30
959#define GRx64Pkt_offset 0x34
960#define GRx65to127Pkt_offset 0x38
961#define GRx128to255Pkt_offset 0x3C
962#define GRx256to511Pkt_offset 0x40
963#define GRx512to1023Pkt_offset 0x44
964#define GRx1024to15xxPkt_offset 0x48
965#define GRx15xxtoJumboPkt_offset 0x4C
966#define GRxGtJumboPkt_offset 0x50
967#define GRxFcsErr64to15xxPkt_offset 0x54
968#define GRxFcsErr15xxtoJumboPkt_offset 0x58
969#define GRxFcsErrGtJumboPkt_offset 0x5C
970#define GTxGoodBadOct_offset 0x80
971#define GTxGoodOct_offset 0x88
972#define GTxSglColPkt_offset 0x90
973#define GTxMultColPkt_offset 0x94
974#define GTxExColPkt_offset 0x98
975#define GTxDefPkt_offset 0x9C
976#define GTxLateCol_offset 0xA0
977#define GTxExDefPkt_offset 0xA4
978#define GTxPausePkt_offset 0xA8
979#define GTxBadPkt_offset 0xAC
980#define GTxUcastPkt_offset 0xB0
981#define GTxMcastPkt_offset 0xB4
982#define GTxBcastPkt_offset 0xB8
983#define GTxLt64Pkt_offset 0xBC
984#define GTx64Pkt_offset 0xC0
985#define GTx65to127Pkt_offset 0xC4
986#define GTx128to255Pkt_offset 0xC8
987#define GTx256to511Pkt_offset 0xCC
988#define GTx512to1023Pkt_offset 0xD0
989#define GTx1024to15xxPkt_offset 0xD4
990#define GTx15xxtoJumboPkt_offset 0xD8
991#define GTxGtJumboPkt_offset 0xDC
992#define GTxNonTcpUdpPkt_offset 0xE0
993#define GTxMacSrcErrPkt_offset 0xE4
994#define GTxIpSrcErrPkt_offset 0xE8
995#define GDmaDone_offset 0xEC
996
997#define XgRxOctets_offset 0x0
998#define XgRxOctets_WIDTH 48
999#define XgRxOctetsOK_offset 0x8
1000#define XgRxOctetsOK_WIDTH 48
1001#define XgRxPkts_offset 0x10
1002#define XgRxPkts_WIDTH 32
1003#define XgRxPktsOK_offset 0x14
1004#define XgRxPktsOK_WIDTH 32
1005#define XgRxBroadcastPkts_offset 0x18
1006#define XgRxBroadcastPkts_WIDTH 32
1007#define XgRxMulticastPkts_offset 0x1C
1008#define XgRxMulticastPkts_WIDTH 32
1009#define XgRxUnicastPkts_offset 0x20
1010#define XgRxUnicastPkts_WIDTH 32
1011#define XgRxUndersizePkts_offset 0x24
1012#define XgRxUndersizePkts_WIDTH 32
1013#define XgRxOversizePkts_offset 0x28
1014#define XgRxOversizePkts_WIDTH 32
1015#define XgRxJabberPkts_offset 0x2C
1016#define XgRxJabberPkts_WIDTH 32
1017#define XgRxUndersizeFCSerrorPkts_offset 0x30
1018#define XgRxUndersizeFCSerrorPkts_WIDTH 32
1019#define XgRxDropEvents_offset 0x34
1020#define XgRxDropEvents_WIDTH 32
1021#define XgRxFCSerrorPkts_offset 0x38
1022#define XgRxFCSerrorPkts_WIDTH 32
1023#define XgRxAlignError_offset 0x3C
1024#define XgRxAlignError_WIDTH 32
1025#define XgRxSymbolError_offset 0x40
1026#define XgRxSymbolError_WIDTH 32
1027#define XgRxInternalMACError_offset 0x44
1028#define XgRxInternalMACError_WIDTH 32
1029#define XgRxControlPkts_offset 0x48
1030#define XgRxControlPkts_WIDTH 32
1031#define XgRxPausePkts_offset 0x4C
1032#define XgRxPausePkts_WIDTH 32
1033#define XgRxPkts64Octets_offset 0x50
1034#define XgRxPkts64Octets_WIDTH 32
1035#define XgRxPkts65to127Octets_offset 0x54
1036#define XgRxPkts65to127Octets_WIDTH 32
1037#define XgRxPkts128to255Octets_offset 0x58
1038#define XgRxPkts128to255Octets_WIDTH 32
1039#define XgRxPkts256to511Octets_offset 0x5C
1040#define XgRxPkts256to511Octets_WIDTH 32
1041#define XgRxPkts512to1023Octets_offset 0x60
1042#define XgRxPkts512to1023Octets_WIDTH 32
1043#define XgRxPkts1024to15xxOctets_offset 0x64
1044#define XgRxPkts1024to15xxOctets_WIDTH 32
1045#define XgRxPkts15xxtoMaxOctets_offset 0x68
1046#define XgRxPkts15xxtoMaxOctets_WIDTH 32
1047#define XgRxLengthError_offset 0x6C
1048#define XgRxLengthError_WIDTH 32
1049#define XgTxPkts_offset 0x80
1050#define XgTxPkts_WIDTH 32
1051#define XgTxOctets_offset 0x88
1052#define XgTxOctets_WIDTH 48
1053#define XgTxMulticastPkts_offset 0x90
1054#define XgTxMulticastPkts_WIDTH 32
1055#define XgTxBroadcastPkts_offset 0x94
1056#define XgTxBroadcastPkts_WIDTH 32
1057#define XgTxUnicastPkts_offset 0x98
1058#define XgTxUnicastPkts_WIDTH 32
1059#define XgTxControlPkts_offset 0x9C
1060#define XgTxControlPkts_WIDTH 32
1061#define XgTxPausePkts_offset 0xA0
1062#define XgTxPausePkts_WIDTH 32
1063#define XgTxPkts64Octets_offset 0xA4
1064#define XgTxPkts64Octets_WIDTH 32
1065#define XgTxPkts65to127Octets_offset 0xA8
1066#define XgTxPkts65to127Octets_WIDTH 32
1067#define XgTxPkts128to255Octets_offset 0xAC
1068#define XgTxPkts128to255Octets_WIDTH 32
1069#define XgTxPkts256to511Octets_offset 0xB0
1070#define XgTxPkts256to511Octets_WIDTH 32
1071#define XgTxPkts512to1023Octets_offset 0xB4
1072#define XgTxPkts512to1023Octets_WIDTH 32
1073#define XgTxPkts1024to15xxOctets_offset 0xB8
1074#define XgTxPkts1024to15xxOctets_WIDTH 32
1075#define XgTxPkts1519toMaxOctets_offset 0xBC
1076#define XgTxPkts1519toMaxOctets_WIDTH 32
1077#define XgTxUndersizePkts_offset 0xC0
1078#define XgTxUndersizePkts_WIDTH 32
1079#define XgTxOversizePkts_offset 0xC4
1080#define XgTxOversizePkts_WIDTH 32
1081#define XgTxNonTcpUdpPkt_offset 0xC8
1082#define XgTxNonTcpUdpPkt_WIDTH 16
1083#define XgTxMacSrcErrPkt_offset 0xCC
1084#define XgTxMacSrcErrPkt_WIDTH 16
1085#define XgTxIpSrcErrPkt_offset 0xD0
1086#define XgTxIpSrcErrPkt_WIDTH 16
1087#define XgDmaDone_offset 0xD4
1088
1089#define FALCON_STATS_NOT_DONE 0x00000000
1090#define FALCON_STATS_DONE 0xffffffff
1091
1092/* Interrupt status register bits */
1093#define FATAL_INT_LBN 64
1094#define FATAL_INT_WIDTH 1
1095#define INT_EVQS_LBN 40
1096#define INT_EVQS_WIDTH 4
1097
1098/**************************************************************************
1099 *
1100 * Falcon non-volatile configuration
1101 *
1102 **************************************************************************
1103 */
1104
1105/* Board configuration v2 (v1 is obsolete; later versions are compatible) */
1106struct falcon_nvconfig_board_v2 {
1107 __le16 nports;
1108 u8 port0_phy_addr;
1109 u8 port0_phy_type;
1110 u8 port1_phy_addr;
1111 u8 port1_phy_type;
1112 __le16 asic_sub_revision;
1113 __le16 board_revision;
1114} __attribute__ ((packed));
1115
1116#define NVCONFIG_BASE 0x300
1117#define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
1118struct falcon_nvconfig {
1119 efx_oword_t ee_vpd_cfg_reg; /* 0x300 */
1120 u8 mac_address[2][8]; /* 0x310 */
1121 efx_oword_t pcie_sd_ctl0123_reg; /* 0x320 */
1122 efx_oword_t pcie_sd_ctl45_reg; /* 0x330 */
1123 efx_oword_t pcie_pcs_ctl_stat_reg; /* 0x340 */
1124 efx_oword_t hw_init_reg; /* 0x350 */
1125 efx_oword_t nic_stat_reg; /* 0x360 */
1126 efx_oword_t glb_ctl_reg; /* 0x370 */
1127 efx_oword_t srm_cfg_reg; /* 0x380 */
1128 efx_oword_t spare_reg; /* 0x390 */
1129 __le16 board_magic_num; /* 0x3A0 */
1130 __le16 board_struct_ver;
1131 __le16 board_checksum;
1132 struct falcon_nvconfig_board_v2 board_v2;
1133} __attribute__ ((packed));
1134
1135#endif /* EFX_FALCON_HWDEFS_H */
diff --git a/drivers/net/sfc/falcon_io.h b/drivers/net/sfc/falcon_io.h
new file mode 100644
index 000000000000..ea08184ddfa9
--- /dev/null
+++ b/drivers/net/sfc/falcon_io.h
@@ -0,0 +1,243 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_FALCON_IO_H
12#define EFX_FALCON_IO_H
13
14#include <linux/io.h>
15#include <linux/spinlock.h>
16#include "net_driver.h"
17
18/**************************************************************************
19 *
20 * Falcon hardware access
21 *
22 **************************************************************************
23 *
24 * Notes on locking strategy:
25 *
26 * Most Falcon registers require 16-byte (or 8-byte, for SRAM
27 * registers) atomic writes which necessitates locking.
28 * Under normal operation few writes to the Falcon BAR are made and these
29 * registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and TX_DESC_UPD_REG) are special
30 * cased to allow 4-byte (hence lockless) accesses.
31 *
32 * It *is* safe to write to these 4-byte registers in the middle of an
33 * access to an 8-byte or 16-byte register. We therefore use a
34 * spinlock to protect accesses to the larger registers, but no locks
35 * for the 4-byte registers.
36 *
37 * A write barrier is needed to ensure that DW3 is written after DW0/1/2
38 * due to the way the 16byte registers are "collected" in the Falcon BIU
39 *
40 * We also lock when carrying out reads, to ensure consistency of the
41 * data (made possible since the BIU reads all 128 bits into a cache).
42 * Reads are very rare, so this isn't a significant performance
43 * impact. (Most data transferred from NIC to host is DMAed directly
44 * into host memory).
45 *
46 * I/O BAR access uses locks for both reads and writes (but is only provided
47 * for testing purposes).
48 */
49
50/* Special buffer descriptors (Falcon SRAM) */
51#define BUF_TBL_KER_A1 0x18000
52#define BUF_TBL_KER_B0 0x800000
53
54
55#if BITS_PER_LONG == 64
56#define FALCON_USE_QWORD_IO 1
57#endif
58
59#define _falcon_writeq(efx, value, reg) \
60 __raw_writeq((__force u64) (value), (efx)->membase + (reg))
61#define _falcon_writel(efx, value, reg) \
62 __raw_writel((__force u32) (value), (efx)->membase + (reg))
63#define _falcon_readq(efx, reg) \
64 ((__force __le64) __raw_readq((efx)->membase + (reg)))
65#define _falcon_readl(efx, reg) \
66 ((__force __le32) __raw_readl((efx)->membase + (reg)))
67
68/* Writes to a normal 16-byte Falcon register, locking as appropriate. */
69static inline void falcon_write(struct efx_nic *efx, efx_oword_t *value,
70 unsigned int reg)
71{
72 unsigned long flags;
73
74 EFX_REGDUMP(efx, "writing register %x with " EFX_OWORD_FMT "\n", reg,
75 EFX_OWORD_VAL(*value));
76
77 spin_lock_irqsave(&efx->biu_lock, flags);
78#ifdef FALCON_USE_QWORD_IO
79 _falcon_writeq(efx, value->u64[0], reg + 0);
80 wmb();
81 _falcon_writeq(efx, value->u64[1], reg + 8);
82#else
83 _falcon_writel(efx, value->u32[0], reg + 0);
84 _falcon_writel(efx, value->u32[1], reg + 4);
85 _falcon_writel(efx, value->u32[2], reg + 8);
86 wmb();
87 _falcon_writel(efx, value->u32[3], reg + 12);
88#endif
89 mmiowb();
90 spin_unlock_irqrestore(&efx->biu_lock, flags);
91}
92
93/* Writes to an 8-byte Falcon SRAM register, locking as appropriate. */
94static inline void falcon_write_sram(struct efx_nic *efx, efx_qword_t *value,
95 unsigned int index)
96{
97 unsigned int reg = efx->type->buf_tbl_base + (index * sizeof(*value));
98 unsigned long flags;
99
100 EFX_REGDUMP(efx, "writing SRAM register %x with " EFX_QWORD_FMT "\n",
101 reg, EFX_QWORD_VAL(*value));
102
103 spin_lock_irqsave(&efx->biu_lock, flags);
104#ifdef FALCON_USE_QWORD_IO
105 _falcon_writeq(efx, value->u64[0], reg + 0);
106#else
107 _falcon_writel(efx, value->u32[0], reg + 0);
108 wmb();
109 _falcon_writel(efx, value->u32[1], reg + 4);
110#endif
111 mmiowb();
112 spin_unlock_irqrestore(&efx->biu_lock, flags);
113}
114
115/* Write dword to Falcon register that allows partial writes
116 *
117 * Some Falcon registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and
118 * TX_DESC_UPD_REG) can be written to as a single dword. This allows
119 * for lockless writes.
120 */
121static inline void falcon_writel(struct efx_nic *efx, efx_dword_t *value,
122 unsigned int reg)
123{
124 EFX_REGDUMP(efx, "writing partial register %x with "EFX_DWORD_FMT"\n",
125 reg, EFX_DWORD_VAL(*value));
126
127 /* No lock required */
128 _falcon_writel(efx, value->u32[0], reg);
129}
130
131/* Read from a Falcon register
132 *
133 * This reads an entire 16-byte Falcon register in one go, locking as
134 * appropriate. It is essential to read the first dword first, as this
135 * prompts Falcon to load the current value into the shadow register.
136 */
137static inline void falcon_read(struct efx_nic *efx, efx_oword_t *value,
138 unsigned int reg)
139{
140 unsigned long flags;
141
142 spin_lock_irqsave(&efx->biu_lock, flags);
143 value->u32[0] = _falcon_readl(efx, reg + 0);
144 rmb();
145 value->u32[1] = _falcon_readl(efx, reg + 4);
146 value->u32[2] = _falcon_readl(efx, reg + 8);
147 value->u32[3] = _falcon_readl(efx, reg + 12);
148 spin_unlock_irqrestore(&efx->biu_lock, flags);
149
150 EFX_REGDUMP(efx, "read from register %x, got " EFX_OWORD_FMT "\n", reg,
151 EFX_OWORD_VAL(*value));
152}
153
154/* This reads an 8-byte Falcon SRAM entry in one go. */
155static inline void falcon_read_sram(struct efx_nic *efx, efx_qword_t *value,
156 unsigned int index)
157{
158 unsigned int reg = efx->type->buf_tbl_base + (index * sizeof(*value));
159 unsigned long flags;
160
161 spin_lock_irqsave(&efx->biu_lock, flags);
162#ifdef FALCON_USE_QWORD_IO
163 value->u64[0] = _falcon_readq(efx, reg + 0);
164#else
165 value->u32[0] = _falcon_readl(efx, reg + 0);
166 rmb();
167 value->u32[1] = _falcon_readl(efx, reg + 4);
168#endif
169 spin_unlock_irqrestore(&efx->biu_lock, flags);
170
171 EFX_REGDUMP(efx, "read from SRAM register %x, got "EFX_QWORD_FMT"\n",
172 reg, EFX_QWORD_VAL(*value));
173}
174
175/* Read dword from Falcon register that allows partial writes (sic) */
176static inline void falcon_readl(struct efx_nic *efx, efx_dword_t *value,
177 unsigned int reg)
178{
179 value->u32[0] = _falcon_readl(efx, reg);
180 EFX_REGDUMP(efx, "read from register %x, got "EFX_DWORD_FMT"\n",
181 reg, EFX_DWORD_VAL(*value));
182}
183
184/* Write to a register forming part of a table */
185static inline void falcon_write_table(struct efx_nic *efx, efx_oword_t *value,
186 unsigned int reg, unsigned int index)
187{
188 falcon_write(efx, value, reg + index * sizeof(efx_oword_t));
189}
190
191/* Read to a register forming part of a table */
192static inline void falcon_read_table(struct efx_nic *efx, efx_oword_t *value,
193 unsigned int reg, unsigned int index)
194{
195 falcon_read(efx, value, reg + index * sizeof(efx_oword_t));
196}
197
198/* Write to a dword register forming part of a table */
199static inline void falcon_writel_table(struct efx_nic *efx, efx_dword_t *value,
200 unsigned int reg, unsigned int index)
201{
202 falcon_writel(efx, value, reg + index * sizeof(efx_oword_t));
203}
204
205/* Page-mapped register block size */
206#define FALCON_PAGE_BLOCK_SIZE 0x2000
207
208/* Calculate offset to page-mapped register block */
209#define FALCON_PAGED_REG(page, reg) \
210 ((page) * FALCON_PAGE_BLOCK_SIZE + (reg))
211
212/* As for falcon_write(), but for a page-mapped register. */
213static inline void falcon_write_page(struct efx_nic *efx, efx_oword_t *value,
214 unsigned int reg, unsigned int page)
215{
216 falcon_write(efx, value, FALCON_PAGED_REG(page, reg));
217}
218
219/* As for falcon_writel(), but for a page-mapped register. */
220static inline void falcon_writel_page(struct efx_nic *efx, efx_dword_t *value,
221 unsigned int reg, unsigned int page)
222{
223 falcon_writel(efx, value, FALCON_PAGED_REG(page, reg));
224}
225
226/* Write dword to Falcon page-mapped register with an extra lock.
227 *
228 * As for falcon_writel_page(), but for a register that suffers from
229 * SFC bug 3181. Take out a lock so the BIU collector cannot be
230 * confused. */
231static inline void falcon_writel_page_locked(struct efx_nic *efx,
232 efx_dword_t *value,
233 unsigned int reg,
234 unsigned int page)
235{
236 unsigned long flags;
237
238 spin_lock_irqsave(&efx->biu_lock, flags);
239 falcon_writel(efx, value, FALCON_PAGED_REG(page, reg));
240 spin_unlock_irqrestore(&efx->biu_lock, flags);
241}
242
243#endif /* EFX_FALCON_IO_H */
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
new file mode 100644
index 000000000000..aa7521b24a5d
--- /dev/null
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -0,0 +1,585 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/delay.h>
12#include "net_driver.h"
13#include "efx.h"
14#include "falcon.h"
15#include "falcon_hwdefs.h"
16#include "falcon_io.h"
17#include "mac.h"
18#include "gmii.h"
19#include "mdio_10g.h"
20#include "phy.h"
21#include "boards.h"
22#include "workarounds.h"
23
24/**************************************************************************
25 *
26 * MAC register access
27 *
28 **************************************************************************/
29
30/* Offset of an XMAC register within Falcon */
31#define FALCON_XMAC_REG(mac_reg) \
32 (FALCON_XMAC_REGBANK + ((mac_reg) * FALCON_XMAC_REG_SIZE))
33
34void falcon_xmac_writel(struct efx_nic *efx,
35 efx_dword_t *value, unsigned int mac_reg)
36{
37 efx_oword_t temp;
38
39 EFX_POPULATE_OWORD_1(temp, MAC_DATA, EFX_DWORD_FIELD(*value, MAC_DATA));
40 falcon_write(efx, &temp, FALCON_XMAC_REG(mac_reg));
41}
42
43void falcon_xmac_readl(struct efx_nic *efx,
44 efx_dword_t *value, unsigned int mac_reg)
45{
46 efx_oword_t temp;
47
48 falcon_read(efx, &temp, FALCON_XMAC_REG(mac_reg));
49 EFX_POPULATE_DWORD_1(*value, MAC_DATA, EFX_OWORD_FIELD(temp, MAC_DATA));
50}
51
52/**************************************************************************
53 *
54 * MAC operations
55 *
56 *************************************************************************/
57static int falcon_reset_xmac(struct efx_nic *efx)
58{
59 efx_dword_t reg;
60 int count;
61
62 EFX_POPULATE_DWORD_1(reg, XM_CORE_RST, 1);
63 falcon_xmac_writel(efx, &reg, XM_GLB_CFG_REG_MAC);
64
65 for (count = 0; count < 10000; count++) { /* wait upto 100ms */
66 falcon_xmac_readl(efx, &reg, XM_GLB_CFG_REG_MAC);
67 if (EFX_DWORD_FIELD(reg, XM_CORE_RST) == 0)
68 return 0;
69 udelay(10);
70 }
71
72 EFX_ERR(efx, "timed out waiting for XMAC core reset\n");
73 return -ETIMEDOUT;
74}
75
76/* Configure the XAUI driver that is an output from Falcon */
77static void falcon_setup_xaui(struct efx_nic *efx)
78{
79 efx_dword_t sdctl, txdrv;
80
81 /* Move the XAUI into low power, unless there is no PHY, in
82 * which case the XAUI will have to drive a cable. */
83 if (efx->phy_type == PHY_TYPE_NONE)
84 return;
85
86 falcon_xmac_readl(efx, &sdctl, XX_SD_CTL_REG_MAC);
87 EFX_SET_DWORD_FIELD(sdctl, XX_HIDRVD, XX_SD_CTL_DRV_DEFAULT);
88 EFX_SET_DWORD_FIELD(sdctl, XX_LODRVD, XX_SD_CTL_DRV_DEFAULT);
89 EFX_SET_DWORD_FIELD(sdctl, XX_HIDRVC, XX_SD_CTL_DRV_DEFAULT);
90 EFX_SET_DWORD_FIELD(sdctl, XX_LODRVC, XX_SD_CTL_DRV_DEFAULT);
91 EFX_SET_DWORD_FIELD(sdctl, XX_HIDRVB, XX_SD_CTL_DRV_DEFAULT);
92 EFX_SET_DWORD_FIELD(sdctl, XX_LODRVB, XX_SD_CTL_DRV_DEFAULT);
93 EFX_SET_DWORD_FIELD(sdctl, XX_HIDRVA, XX_SD_CTL_DRV_DEFAULT);
94 EFX_SET_DWORD_FIELD(sdctl, XX_LODRVA, XX_SD_CTL_DRV_DEFAULT);
95 falcon_xmac_writel(efx, &sdctl, XX_SD_CTL_REG_MAC);
96
97 EFX_POPULATE_DWORD_8(txdrv,
98 XX_DEQD, XX_TXDRV_DEQ_DEFAULT,
99 XX_DEQC, XX_TXDRV_DEQ_DEFAULT,
100 XX_DEQB, XX_TXDRV_DEQ_DEFAULT,
101 XX_DEQA, XX_TXDRV_DEQ_DEFAULT,
102 XX_DTXD, XX_TXDRV_DTX_DEFAULT,
103 XX_DTXC, XX_TXDRV_DTX_DEFAULT,
104 XX_DTXB, XX_TXDRV_DTX_DEFAULT,
105 XX_DTXA, XX_TXDRV_DTX_DEFAULT);
106 falcon_xmac_writel(efx, &txdrv, XX_TXDRV_CTL_REG_MAC);
107}
108
109static void falcon_hold_xaui_in_rst(struct efx_nic *efx)
110{
111 efx_dword_t reg;
112
113 EFX_ZERO_DWORD(reg);
114 EFX_SET_DWORD_FIELD(reg, XX_PWRDNA_EN, 1);
115 EFX_SET_DWORD_FIELD(reg, XX_PWRDNB_EN, 1);
116 EFX_SET_DWORD_FIELD(reg, XX_PWRDNC_EN, 1);
117 EFX_SET_DWORD_FIELD(reg, XX_PWRDND_EN, 1);
118 EFX_SET_DWORD_FIELD(reg, XX_RSTPLLAB_EN, 1);
119 EFX_SET_DWORD_FIELD(reg, XX_RSTPLLCD_EN, 1);
120 EFX_SET_DWORD_FIELD(reg, XX_RESETA_EN, 1);
121 EFX_SET_DWORD_FIELD(reg, XX_RESETB_EN, 1);
122 EFX_SET_DWORD_FIELD(reg, XX_RESETC_EN, 1);
123 EFX_SET_DWORD_FIELD(reg, XX_RESETD_EN, 1);
124 EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSRX_EN, 1);
125 EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSTX_EN, 1);
126 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
127 udelay(10);
128}
129
130static int _falcon_reset_xaui_a(struct efx_nic *efx)
131{
132 efx_dword_t reg;
133
134 falcon_hold_xaui_in_rst(efx);
135 falcon_xmac_readl(efx, &reg, XX_PWR_RST_REG_MAC);
136
137 /* Follow the RAMBUS XAUI data reset sequencing
138 * Channels A and B first: power down, reset PLL, reset, clear
139 */
140 EFX_SET_DWORD_FIELD(reg, XX_PWRDNA_EN, 0);
141 EFX_SET_DWORD_FIELD(reg, XX_PWRDNB_EN, 0);
142 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
143 udelay(10);
144
145 EFX_SET_DWORD_FIELD(reg, XX_RSTPLLAB_EN, 0);
146 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
147 udelay(10);
148
149 EFX_SET_DWORD_FIELD(reg, XX_RESETA_EN, 0);
150 EFX_SET_DWORD_FIELD(reg, XX_RESETB_EN, 0);
151 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
152 udelay(10);
153
154 /* Channels C and D: power down, reset PLL, reset, clear */
155 EFX_SET_DWORD_FIELD(reg, XX_PWRDNC_EN, 0);
156 EFX_SET_DWORD_FIELD(reg, XX_PWRDND_EN, 0);
157 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
158 udelay(10);
159
160 EFX_SET_DWORD_FIELD(reg, XX_RSTPLLCD_EN, 0);
161 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
162 udelay(10);
163
164 EFX_SET_DWORD_FIELD(reg, XX_RESETC_EN, 0);
165 EFX_SET_DWORD_FIELD(reg, XX_RESETD_EN, 0);
166 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
167 udelay(10);
168
169 /* Setup XAUI */
170 falcon_setup_xaui(efx);
171 udelay(10);
172
173 /* Take XGXS out of reset */
174 EFX_ZERO_DWORD(reg);
175 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
176 udelay(10);
177
178 return 0;
179}
180
181static int _falcon_reset_xaui_b(struct efx_nic *efx)
182{
183 efx_dword_t reg;
184 int count;
185
186 EFX_POPULATE_DWORD_1(reg, XX_RST_XX_EN, 1);
187 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
188
189 /* Give some time for the link to establish */
190 for (count = 0; count < 1000; count++) { /* wait upto 10ms */
191 falcon_xmac_readl(efx, &reg, XX_PWR_RST_REG_MAC);
192 if (EFX_DWORD_FIELD(reg, XX_RST_XX_EN) == 0) {
193 falcon_setup_xaui(efx);
194 return 0;
195 }
196 udelay(10);
197 }
198 EFX_ERR(efx, "timed out waiting for XAUI/XGXS reset\n");
199 return -ETIMEDOUT;
200}
201
202int falcon_reset_xaui(struct efx_nic *efx)
203{
204 int rc;
205
206 if (EFX_WORKAROUND_9388(efx)) {
207 falcon_hold_xaui_in_rst(efx);
208 efx->phy_op->reset_xaui(efx);
209 rc = _falcon_reset_xaui_a(efx);
210 } else {
211 rc = _falcon_reset_xaui_b(efx);
212 }
213 return rc;
214}
215
216static int falcon_xgmii_status(struct efx_nic *efx)
217{
218 efx_dword_t reg;
219
220 if (FALCON_REV(efx) < FALCON_REV_B0)
221 return 1;
222
223 /* The ISR latches, so clear it and re-read */
224 falcon_xmac_readl(efx, &reg, XM_MGT_INT_REG_MAC_B0);
225 falcon_xmac_readl(efx, &reg, XM_MGT_INT_REG_MAC_B0);
226
227 if (EFX_DWORD_FIELD(reg, XM_LCLFLT) ||
228 EFX_DWORD_FIELD(reg, XM_RMTFLT)) {
229 EFX_INFO(efx, "MGT_INT: "EFX_DWORD_FMT"\n", EFX_DWORD_VAL(reg));
230 return 0;
231 }
232
233 return 1;
234}
235
236static void falcon_mask_status_intr(struct efx_nic *efx, int enable)
237{
238 efx_dword_t reg;
239
240 if (FALCON_REV(efx) < FALCON_REV_B0)
241 return;
242
243 /* Flush the ISR */
244 if (enable)
245 falcon_xmac_readl(efx, &reg, XM_MGT_INT_REG_MAC_B0);
246
247 EFX_POPULATE_DWORD_2(reg,
248 XM_MSK_RMTFLT, !enable,
249 XM_MSK_LCLFLT, !enable);
250 falcon_xmac_writel(efx, &reg, XM_MGT_INT_MSK_REG_MAC_B0);
251}
252
253int falcon_init_xmac(struct efx_nic *efx)
254{
255 int rc;
256
257 /* Initialize the PHY first so the clock is around */
258 rc = efx->phy_op->init(efx);
259 if (rc)
260 goto fail1;
261
262 rc = falcon_reset_xaui(efx);
263 if (rc)
264 goto fail2;
265
266 /* Wait again. Give the PHY and MAC time to come back */
267 schedule_timeout_uninterruptible(HZ / 10);
268
269 rc = falcon_reset_xmac(efx);
270 if (rc)
271 goto fail2;
272
273 falcon_mask_status_intr(efx, 1);
274 return 0;
275
276 fail2:
277 efx->phy_op->fini(efx);
278 fail1:
279 return rc;
280}
281
282int falcon_xaui_link_ok(struct efx_nic *efx)
283{
284 efx_dword_t reg;
285 int align_done, sync_status, link_ok = 0;
286
287 /* Read link status */
288 falcon_xmac_readl(efx, &reg, XX_CORE_STAT_REG_MAC);
289
290 align_done = EFX_DWORD_FIELD(reg, XX_ALIGN_DONE);
291 sync_status = EFX_DWORD_FIELD(reg, XX_SYNC_STAT);
292 if (align_done && (sync_status == XX_SYNC_STAT_DECODE_SYNCED))
293 link_ok = 1;
294
295 /* Clear link status ready for next read */
296 EFX_SET_DWORD_FIELD(reg, XX_COMMA_DET, XX_COMMA_DET_RESET);
297 EFX_SET_DWORD_FIELD(reg, XX_CHARERR, XX_CHARERR_RESET);
298 EFX_SET_DWORD_FIELD(reg, XX_DISPERR, XX_DISPERR_RESET);
299 falcon_xmac_writel(efx, &reg, XX_CORE_STAT_REG_MAC);
300
301 /* If the link is up, then check the phy side of the xaui link
302 * (error conditions from the wire side propoagate back through
303 * the phy to the xaui side). */
304 if (efx->link_up && link_ok) {
305 int has_phyxs = efx->phy_op->mmds & (1 << MDIO_MMD_PHYXS);
306 if (has_phyxs)
307 link_ok = mdio_clause45_phyxgxs_lane_sync(efx);
308 }
309
310 /* If the PHY and XAUI links are up, then check the mac's xgmii
311 * fault state */
312 if (efx->link_up && link_ok)
313 link_ok = falcon_xgmii_status(efx);
314
315 return link_ok;
316}
317
318static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
319{
320 unsigned int max_frame_len;
321 efx_dword_t reg;
322 int rx_fc = (efx->flow_control & EFX_FC_RX) ? 1 : 0;
323
324 /* Configure MAC - cut-thru mode is hard wired on */
325 EFX_POPULATE_DWORD_3(reg,
326 XM_RX_JUMBO_MODE, 1,
327 XM_TX_STAT_EN, 1,
328 XM_RX_STAT_EN, 1);
329 falcon_xmac_writel(efx, &reg, XM_GLB_CFG_REG_MAC);
330
331 /* Configure TX */
332 EFX_POPULATE_DWORD_6(reg,
333 XM_TXEN, 1,
334 XM_TX_PRMBL, 1,
335 XM_AUTO_PAD, 1,
336 XM_TXCRC, 1,
337 XM_FCNTL, 1,
338 XM_IPG, 0x3);
339 falcon_xmac_writel(efx, &reg, XM_TX_CFG_REG_MAC);
340
341 /* Configure RX */
342 EFX_POPULATE_DWORD_5(reg,
343 XM_RXEN, 1,
344 XM_AUTO_DEPAD, 0,
345 XM_ACPT_ALL_MCAST, 1,
346 XM_ACPT_ALL_UCAST, efx->promiscuous,
347 XM_PASS_CRC_ERR, 1);
348 falcon_xmac_writel(efx, &reg, XM_RX_CFG_REG_MAC);
349
350 /* Set frame length */
351 max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
352 EFX_POPULATE_DWORD_1(reg, XM_MAX_RX_FRM_SIZE, max_frame_len);
353 falcon_xmac_writel(efx, &reg, XM_RX_PARAM_REG_MAC);
354 EFX_POPULATE_DWORD_2(reg,
355 XM_MAX_TX_FRM_SIZE, max_frame_len,
356 XM_TX_JUMBO_MODE, 1);
357 falcon_xmac_writel(efx, &reg, XM_TX_PARAM_REG_MAC);
358
359 EFX_POPULATE_DWORD_2(reg,
360 XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */
361 XM_DIS_FCNTL, rx_fc ? 0 : 1);
362 falcon_xmac_writel(efx, &reg, XM_FC_REG_MAC);
363
364 /* Set MAC address */
365 EFX_POPULATE_DWORD_4(reg,
366 XM_ADR_0, efx->net_dev->dev_addr[0],
367 XM_ADR_1, efx->net_dev->dev_addr[1],
368 XM_ADR_2, efx->net_dev->dev_addr[2],
369 XM_ADR_3, efx->net_dev->dev_addr[3]);
370 falcon_xmac_writel(efx, &reg, XM_ADR_LO_REG_MAC);
371 EFX_POPULATE_DWORD_2(reg,
372 XM_ADR_4, efx->net_dev->dev_addr[4],
373 XM_ADR_5, efx->net_dev->dev_addr[5]);
374 falcon_xmac_writel(efx, &reg, XM_ADR_HI_REG_MAC);
375}
376
377/* Try and bring the Falcon side of the Falcon-Phy XAUI link fails
378 * to come back up. Bash it until it comes back up */
379static int falcon_check_xaui_link_up(struct efx_nic *efx)
380{
381 int max_tries, tries;
382 tries = EFX_WORKAROUND_5147(efx) ? 5 : 1;
383 max_tries = tries;
384
385 if (efx->phy_type == PHY_TYPE_NONE)
386 return 0;
387
388 while (tries) {
389 if (falcon_xaui_link_ok(efx))
390 return 1;
391
392 EFX_LOG(efx, "%s Clobbering XAUI (%d tries left).\n",
393 __func__, tries);
394 (void) falcon_reset_xaui(efx);
395 udelay(200);
396 tries--;
397 }
398
399 EFX_ERR(efx, "Failed to bring XAUI link back up in %d tries!\n",
400 max_tries);
401 return 0;
402}
403
404void falcon_reconfigure_xmac(struct efx_nic *efx)
405{
406 int xaui_link_ok;
407
408 falcon_mask_status_intr(efx, 0);
409
410 falcon_deconfigure_mac_wrapper(efx);
411 efx->phy_op->reconfigure(efx);
412 falcon_reconfigure_xmac_core(efx);
413 falcon_reconfigure_mac_wrapper(efx);
414
415 /* Ensure XAUI link is up */
416 xaui_link_ok = falcon_check_xaui_link_up(efx);
417
418 if (xaui_link_ok && efx->link_up)
419 falcon_mask_status_intr(efx, 1);
420}
421
422void falcon_fini_xmac(struct efx_nic *efx)
423{
424 /* Isolate the MAC - PHY */
425 falcon_deconfigure_mac_wrapper(efx);
426
427 /* Potentially power down the PHY */
428 efx->phy_op->fini(efx);
429}
430
431void falcon_update_stats_xmac(struct efx_nic *efx)
432{
433 struct efx_mac_stats *mac_stats = &efx->mac_stats;
434 int rc;
435
436 rc = falcon_dma_stats(efx, XgDmaDone_offset);
437 if (rc)
438 return;
439
440 /* Update MAC stats from DMAed values */
441 FALCON_STAT(efx, XgRxOctets, rx_bytes);
442 FALCON_STAT(efx, XgRxOctetsOK, rx_good_bytes);
443 FALCON_STAT(efx, XgRxPkts, rx_packets);
444 FALCON_STAT(efx, XgRxPktsOK, rx_good);
445 FALCON_STAT(efx, XgRxBroadcastPkts, rx_broadcast);
446 FALCON_STAT(efx, XgRxMulticastPkts, rx_multicast);
447 FALCON_STAT(efx, XgRxUnicastPkts, rx_unicast);
448 FALCON_STAT(efx, XgRxUndersizePkts, rx_lt64);
449 FALCON_STAT(efx, XgRxOversizePkts, rx_gtjumbo);
450 FALCON_STAT(efx, XgRxJabberPkts, rx_bad_gtjumbo);
451 FALCON_STAT(efx, XgRxUndersizeFCSerrorPkts, rx_bad_lt64);
452 FALCON_STAT(efx, XgRxDropEvents, rx_overflow);
453 FALCON_STAT(efx, XgRxFCSerrorPkts, rx_bad);
454 FALCON_STAT(efx, XgRxAlignError, rx_align_error);
455 FALCON_STAT(efx, XgRxSymbolError, rx_symbol_error);
456 FALCON_STAT(efx, XgRxInternalMACError, rx_internal_error);
457 FALCON_STAT(efx, XgRxControlPkts, rx_control);
458 FALCON_STAT(efx, XgRxPausePkts, rx_pause);
459 FALCON_STAT(efx, XgRxPkts64Octets, rx_64);
460 FALCON_STAT(efx, XgRxPkts65to127Octets, rx_65_to_127);
461 FALCON_STAT(efx, XgRxPkts128to255Octets, rx_128_to_255);
462 FALCON_STAT(efx, XgRxPkts256to511Octets, rx_256_to_511);
463 FALCON_STAT(efx, XgRxPkts512to1023Octets, rx_512_to_1023);
464 FALCON_STAT(efx, XgRxPkts1024to15xxOctets, rx_1024_to_15xx);
465 FALCON_STAT(efx, XgRxPkts15xxtoMaxOctets, rx_15xx_to_jumbo);
466 FALCON_STAT(efx, XgRxLengthError, rx_length_error);
467 FALCON_STAT(efx, XgTxPkts, tx_packets);
468 FALCON_STAT(efx, XgTxOctets, tx_bytes);
469 FALCON_STAT(efx, XgTxMulticastPkts, tx_multicast);
470 FALCON_STAT(efx, XgTxBroadcastPkts, tx_broadcast);
471 FALCON_STAT(efx, XgTxUnicastPkts, tx_unicast);
472 FALCON_STAT(efx, XgTxControlPkts, tx_control);
473 FALCON_STAT(efx, XgTxPausePkts, tx_pause);
474 FALCON_STAT(efx, XgTxPkts64Octets, tx_64);
475 FALCON_STAT(efx, XgTxPkts65to127Octets, tx_65_to_127);
476 FALCON_STAT(efx, XgTxPkts128to255Octets, tx_128_to_255);
477 FALCON_STAT(efx, XgTxPkts256to511Octets, tx_256_to_511);
478 FALCON_STAT(efx, XgTxPkts512to1023Octets, tx_512_to_1023);
479 FALCON_STAT(efx, XgTxPkts1024to15xxOctets, tx_1024_to_15xx);
480 FALCON_STAT(efx, XgTxPkts1519toMaxOctets, tx_15xx_to_jumbo);
481 FALCON_STAT(efx, XgTxUndersizePkts, tx_lt64);
482 FALCON_STAT(efx, XgTxOversizePkts, tx_gtjumbo);
483 FALCON_STAT(efx, XgTxNonTcpUdpPkt, tx_non_tcpudp);
484 FALCON_STAT(efx, XgTxMacSrcErrPkt, tx_mac_src_error);
485 FALCON_STAT(efx, XgTxIpSrcErrPkt, tx_ip_src_error);
486
487 /* Update derived statistics */
488 mac_stats->tx_good_bytes =
489 (mac_stats->tx_bytes - mac_stats->tx_bad_bytes);
490 mac_stats->rx_bad_bytes =
491 (mac_stats->rx_bytes - mac_stats->rx_good_bytes);
492}
493
494#define EFX_XAUI_RETRAIN_MAX 8
495
496int falcon_check_xmac(struct efx_nic *efx)
497{
498 unsigned xaui_link_ok;
499 int rc;
500
501 falcon_mask_status_intr(efx, 0);
502 xaui_link_ok = falcon_xaui_link_ok(efx);
503
504 if (EFX_WORKAROUND_5147(efx) && !xaui_link_ok)
505 (void) falcon_reset_xaui(efx);
506
507 /* Call the PHY check_hw routine */
508 rc = efx->phy_op->check_hw(efx);
509
510 /* Unmask interrupt if everything was (and still is) ok */
511 if (xaui_link_ok && efx->link_up)
512 falcon_mask_status_intr(efx, 1);
513
514 return rc;
515}
516
517/* Simulate a PHY event */
518void falcon_xmac_sim_phy_event(struct efx_nic *efx)
519{
520 efx_qword_t phy_event;
521
522 EFX_POPULATE_QWORD_2(phy_event,
523 EV_CODE, GLOBAL_EV_DECODE,
524 XG_PHY_INTR, 1);
525 falcon_generate_event(&efx->channel[0], &phy_event);
526}
527
528int falcon_xmac_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
529{
530 mdio_clause45_get_settings(efx, ecmd);
531 ecmd->transceiver = XCVR_INTERNAL;
532 ecmd->phy_address = efx->mii.phy_id;
533 ecmd->autoneg = AUTONEG_DISABLE;
534 ecmd->duplex = DUPLEX_FULL;
535 return 0;
536}
537
538int falcon_xmac_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
539{
540 if (ecmd->transceiver != XCVR_INTERNAL)
541 return -EINVAL;
542 if (ecmd->autoneg != AUTONEG_DISABLE)
543 return -EINVAL;
544 if (ecmd->duplex != DUPLEX_FULL)
545 return -EINVAL;
546
547 return mdio_clause45_set_settings(efx, ecmd);
548}
549
550
551int falcon_xmac_set_pause(struct efx_nic *efx, enum efx_fc_type flow_control)
552{
553 int reset;
554
555 if (flow_control & EFX_FC_AUTO) {
556 EFX_LOG(efx, "10G does not support flow control "
557 "autonegotiation\n");
558 return -EINVAL;
559 }
560
561 if ((flow_control & EFX_FC_TX) && !(flow_control & EFX_FC_RX))
562 return -EINVAL;
563
564 /* TX flow control may automatically turn itself off if the
565 * link partner (intermittently) stops responding to pause
566 * frames. There isn't any indication that this has happened,
567 * so the best we do is leave it up to the user to spot this
568 * and fix it be cycling transmit flow control on this end. */
569 reset = ((flow_control & EFX_FC_TX) &&
570 !(efx->flow_control & EFX_FC_TX));
571 if (EFX_WORKAROUND_11482(efx) && reset) {
572 if (FALCON_REV(efx) >= FALCON_REV_B0) {
573 /* Recover by resetting the EM block */
574 if (efx->link_up)
575 falcon_drain_tx_fifo(efx);
576 } else {
577 /* Schedule a reset to recover */
578 efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
579 }
580 }
581
582 efx->flow_control = flow_control;
583
584 return 0;
585}
diff --git a/drivers/net/sfc/gmii.h b/drivers/net/sfc/gmii.h
new file mode 100644
index 000000000000..d25bbd1297f4
--- /dev/null
+++ b/drivers/net/sfc/gmii.h
@@ -0,0 +1,195 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_GMII_H
12#define EFX_GMII_H
13
14/*
15 * GMII interface
16 */
17
18#include <linux/mii.h>
19
20/* GMII registers, excluding registers already defined as MII
21 * registers in mii.h
22 */
23#define GMII_IER 0x12 /* Interrupt enable register */
24#define GMII_ISR 0x13 /* Interrupt status register */
25
26/* Interrupt enable register */
27#define IER_ANEG_ERR 0x8000 /* Bit 15 - autonegotiation error */
28#define IER_SPEED_CHG 0x4000 /* Bit 14 - speed changed */
29#define IER_DUPLEX_CHG 0x2000 /* Bit 13 - duplex changed */
30#define IER_PAGE_RCVD 0x1000 /* Bit 12 - page received */
31#define IER_ANEG_DONE 0x0800 /* Bit 11 - autonegotiation complete */
32#define IER_LINK_CHG 0x0400 /* Bit 10 - link status changed */
33#define IER_SYM_ERR 0x0200 /* Bit 9 - symbol error */
34#define IER_FALSE_CARRIER 0x0100 /* Bit 8 - false carrier */
35#define IER_FIFO_ERR 0x0080 /* Bit 7 - FIFO over/underflow */
36#define IER_MDIX_CHG 0x0040 /* Bit 6 - MDI crossover changed */
37#define IER_DOWNSHIFT 0x0020 /* Bit 5 - downshift */
38#define IER_ENERGY 0x0010 /* Bit 4 - energy detect */
39#define IER_DTE_POWER 0x0004 /* Bit 2 - DTE power detect */
40#define IER_POLARITY_CHG 0x0002 /* Bit 1 - polarity changed */
41#define IER_JABBER 0x0001 /* Bit 0 - jabber */
42
43/* Interrupt status register */
44#define ISR_ANEG_ERR 0x8000 /* Bit 15 - autonegotiation error */
45#define ISR_SPEED_CHG 0x4000 /* Bit 14 - speed changed */
46#define ISR_DUPLEX_CHG 0x2000 /* Bit 13 - duplex changed */
47#define ISR_PAGE_RCVD 0x1000 /* Bit 12 - page received */
48#define ISR_ANEG_DONE 0x0800 /* Bit 11 - autonegotiation complete */
49#define ISR_LINK_CHG 0x0400 /* Bit 10 - link status changed */
50#define ISR_SYM_ERR 0x0200 /* Bit 9 - symbol error */
51#define ISR_FALSE_CARRIER 0x0100 /* Bit 8 - false carrier */
52#define ISR_FIFO_ERR 0x0080 /* Bit 7 - FIFO over/underflow */
53#define ISR_MDIX_CHG 0x0040 /* Bit 6 - MDI crossover changed */
54#define ISR_DOWNSHIFT 0x0020 /* Bit 5 - downshift */
55#define ISR_ENERGY 0x0010 /* Bit 4 - energy detect */
56#define ISR_DTE_POWER 0x0004 /* Bit 2 - DTE power detect */
57#define ISR_POLARITY_CHG 0x0002 /* Bit 1 - polarity changed */
58#define ISR_JABBER 0x0001 /* Bit 0 - jabber */
59
60/* Logically extended advertisement register */
61#define GM_ADVERTISE_SLCT ADVERTISE_SLCT
62#define GM_ADVERTISE_CSMA ADVERTISE_CSMA
63#define GM_ADVERTISE_10HALF ADVERTISE_10HALF
64#define GM_ADVERTISE_1000XFULL ADVERTISE_1000XFULL
65#define GM_ADVERTISE_10FULL ADVERTISE_10FULL
66#define GM_ADVERTISE_1000XHALF ADVERTISE_1000XHALF
67#define GM_ADVERTISE_100HALF ADVERTISE_100HALF
68#define GM_ADVERTISE_1000XPAUSE ADVERTISE_1000XPAUSE
69#define GM_ADVERTISE_100FULL ADVERTISE_100FULL
70#define GM_ADVERTISE_1000XPSE_ASYM ADVERTISE_1000XPSE_ASYM
71#define GM_ADVERTISE_100BASE4 ADVERTISE_100BASE4
72#define GM_ADVERTISE_PAUSE_CAP ADVERTISE_PAUSE_CAP
73#define GM_ADVERTISE_PAUSE_ASYM ADVERTISE_PAUSE_ASYM
74#define GM_ADVERTISE_RESV ADVERTISE_RESV
75#define GM_ADVERTISE_RFAULT ADVERTISE_RFAULT
76#define GM_ADVERTISE_LPACK ADVERTISE_LPACK
77#define GM_ADVERTISE_NPAGE ADVERTISE_NPAGE
78#define GM_ADVERTISE_1000FULL (ADVERTISE_1000FULL << 8)
79#define GM_ADVERTISE_1000HALF (ADVERTISE_1000HALF << 8)
80#define GM_ADVERTISE_1000 (GM_ADVERTISE_1000FULL | \
81 GM_ADVERTISE_1000HALF)
82#define GM_ADVERTISE_FULL (GM_ADVERTISE_1000FULL | \
83 ADVERTISE_FULL)
84#define GM_ADVERTISE_ALL (GM_ADVERTISE_1000FULL | \
85 GM_ADVERTISE_1000HALF | \
86 ADVERTISE_ALL)
87
88/* Logically extended link partner ability register */
89#define GM_LPA_SLCT LPA_SLCT
90#define GM_LPA_10HALF LPA_10HALF
91#define GM_LPA_1000XFULL LPA_1000XFULL
92#define GM_LPA_10FULL LPA_10FULL
93#define GM_LPA_1000XHALF LPA_1000XHALF
94#define GM_LPA_100HALF LPA_100HALF
95#define GM_LPA_1000XPAUSE LPA_1000XPAUSE
96#define GM_LPA_100FULL LPA_100FULL
97#define GM_LPA_1000XPAUSE_ASYM LPA_1000XPAUSE_ASYM
98#define GM_LPA_100BASE4 LPA_100BASE4
99#define GM_LPA_PAUSE_CAP LPA_PAUSE_CAP
100#define GM_LPA_PAUSE_ASYM LPA_PAUSE_ASYM
101#define GM_LPA_RESV LPA_RESV
102#define GM_LPA_RFAULT LPA_RFAULT
103#define GM_LPA_LPACK LPA_LPACK
104#define GM_LPA_NPAGE LPA_NPAGE
105#define GM_LPA_1000FULL (LPA_1000FULL << 6)
106#define GM_LPA_1000HALF (LPA_1000HALF << 6)
107#define GM_LPA_10000FULL 0x00040000
108#define GM_LPA_10000HALF 0x00080000
109#define GM_LPA_DUPLEX (GM_LPA_1000FULL | GM_LPA_10000FULL \
110 | LPA_DUPLEX)
111#define GM_LPA_10 (LPA_10FULL | LPA_10HALF)
112#define GM_LPA_100 LPA_100
113#define GM_LPA_1000 (GM_LPA_1000FULL | GM_LPA_1000HALF)
114#define GM_LPA_10000 (GM_LPA_10000FULL | GM_LPA_10000HALF)
115
116/* Retrieve GMII autonegotiation advertised abilities
117 *
118 * The MII advertisment register (MII_ADVERTISE) is logically extended
119 * to include advertisement bits ADVERTISE_1000FULL and
120 * ADVERTISE_1000HALF from MII_CTRL1000. The result can be tested
121 * against the GM_ADVERTISE_xxx constants.
122 */
123static inline unsigned int gmii_advertised(struct mii_if_info *gmii)
124{
125 unsigned int advertise;
126 unsigned int ctrl1000;
127
128 advertise = gmii->mdio_read(gmii->dev, gmii->phy_id, MII_ADVERTISE);
129 ctrl1000 = gmii->mdio_read(gmii->dev, gmii->phy_id, MII_CTRL1000);
130 return (((ctrl1000 << 8) & GM_ADVERTISE_1000) | advertise);
131}
132
133/* Retrieve GMII autonegotiation link partner abilities
134 *
135 * The MII link partner ability register (MII_LPA) is logically
136 * extended by adding bits LPA_1000HALF and LPA_1000FULL from
137 * MII_STAT1000. The result can be tested against the GM_LPA_xxx
138 * constants.
139 */
140static inline unsigned int gmii_lpa(struct mii_if_info *gmii)
141{
142 unsigned int lpa;
143 unsigned int stat1000;
144
145 lpa = gmii->mdio_read(gmii->dev, gmii->phy_id, MII_LPA);
146 stat1000 = gmii->mdio_read(gmii->dev, gmii->phy_id, MII_STAT1000);
147 return (((stat1000 << 6) & GM_LPA_1000) | lpa);
148}
149
150/* Calculate GMII autonegotiated link technology
151 *
152 * "negotiated" should be the result of gmii_advertised() logically
153 * ANDed with the result of gmii_lpa().
154 *
155 * "tech" will be negotiated with the unused bits masked out. For
156 * example, if both ends of the link are capable of both
157 * GM_LPA_1000FULL and GM_LPA_100FULL, GM_LPA_100FULL will be masked
158 * out.
159 */
160static inline unsigned int gmii_nway_result(unsigned int negotiated)
161{
162 unsigned int other_bits;
163
164 /* Mask out the speed and duplexity bits */
165 other_bits = negotiated & ~(GM_LPA_10 | GM_LPA_100 | GM_LPA_1000);
166
167 if (negotiated & GM_LPA_1000FULL)
168 return (other_bits | GM_LPA_1000FULL);
169 else if (negotiated & GM_LPA_1000HALF)
170 return (other_bits | GM_LPA_1000HALF);
171 else
172 return (other_bits | mii_nway_result(negotiated));
173}
174
175/* Calculate GMII non-autonegotiated link technology
176 *
177 * This provides an equivalent to gmii_nway_result for the case when
178 * autonegotiation is disabled.
179 */
180static inline unsigned int gmii_forced_result(unsigned int bmcr)
181{
182 unsigned int result;
183 int full_duplex;
184
185 full_duplex = bmcr & BMCR_FULLDPLX;
186 if (bmcr & BMCR_SPEED1000)
187 result = full_duplex ? GM_LPA_1000FULL : GM_LPA_1000HALF;
188 else if (bmcr & BMCR_SPEED100)
189 result = full_duplex ? GM_LPA_100FULL : GM_LPA_100HALF;
190 else
191 result = full_duplex ? GM_LPA_10FULL : GM_LPA_10HALF;
192 return result;
193}
194
195#endif /* EFX_GMII_H */
diff --git a/drivers/net/sfc/i2c-direct.c b/drivers/net/sfc/i2c-direct.c
new file mode 100644
index 000000000000..b6c62d0ed9c2
--- /dev/null
+++ b/drivers/net/sfc/i2c-direct.c
@@ -0,0 +1,381 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/delay.h>
12#include "net_driver.h"
13#include "i2c-direct.h"
14
15/*
16 * I2C data (SDA) and clock (SCL) line read/writes with appropriate
17 * delays.
18 */
19
20static inline void setsda(struct efx_i2c_interface *i2c, int state)
21{
22 udelay(i2c->op->udelay);
23 i2c->sda = state;
24 i2c->op->setsda(i2c);
25 udelay(i2c->op->udelay);
26}
27
28static inline void setscl(struct efx_i2c_interface *i2c, int state)
29{
30 udelay(i2c->op->udelay);
31 i2c->scl = state;
32 i2c->op->setscl(i2c);
33 udelay(i2c->op->udelay);
34}
35
36static inline int getsda(struct efx_i2c_interface *i2c)
37{
38 int sda;
39
40 udelay(i2c->op->udelay);
41 sda = i2c->op->getsda(i2c);
42 udelay(i2c->op->udelay);
43 return sda;
44}
45
46static inline int getscl(struct efx_i2c_interface *i2c)
47{
48 int scl;
49
50 udelay(i2c->op->udelay);
51 scl = i2c->op->getscl(i2c);
52 udelay(i2c->op->udelay);
53 return scl;
54}
55
56/*
57 * I2C low-level protocol operations
58 *
59 */
60
61static inline void i2c_release(struct efx_i2c_interface *i2c)
62{
63 EFX_WARN_ON_PARANOID(!i2c->scl);
64 EFX_WARN_ON_PARANOID(!i2c->sda);
65 /* Devices may time out if operations do not end */
66 setscl(i2c, 1);
67 setsda(i2c, 1);
68 EFX_BUG_ON_PARANOID(getsda(i2c) != 1);
69 EFX_BUG_ON_PARANOID(getscl(i2c) != 1);
70}
71
72static inline void i2c_start(struct efx_i2c_interface *i2c)
73{
74 /* We may be restarting immediately after a {send,recv}_bit,
75 * so SCL will not necessarily already be high.
76 */
77 EFX_WARN_ON_PARANOID(!i2c->sda);
78 setscl(i2c, 1);
79 setsda(i2c, 0);
80 setscl(i2c, 0);
81 setsda(i2c, 1);
82}
83
84static inline void i2c_send_bit(struct efx_i2c_interface *i2c, int bit)
85{
86 EFX_WARN_ON_PARANOID(i2c->scl != 0);
87 setsda(i2c, bit);
88 setscl(i2c, 1);
89 setscl(i2c, 0);
90 setsda(i2c, 1);
91}
92
93static inline int i2c_recv_bit(struct efx_i2c_interface *i2c)
94{
95 int bit;
96
97 EFX_WARN_ON_PARANOID(i2c->scl != 0);
98 EFX_WARN_ON_PARANOID(!i2c->sda);
99 setscl(i2c, 1);
100 bit = getsda(i2c);
101 setscl(i2c, 0);
102 return bit;
103}
104
105static inline void i2c_stop(struct efx_i2c_interface *i2c)
106{
107 EFX_WARN_ON_PARANOID(i2c->scl != 0);
108 setsda(i2c, 0);
109 setscl(i2c, 1);
110 setsda(i2c, 1);
111}
112
113/*
114 * I2C mid-level protocol operations
115 *
116 */
117
118/* Sends a byte via the I2C bus and checks for an acknowledgement from
119 * the slave device.
120 */
121static int i2c_send_byte(struct efx_i2c_interface *i2c, u8 byte)
122{
123 int i;
124
125 /* Send byte */
126 for (i = 0; i < 8; i++) {
127 i2c_send_bit(i2c, !!(byte & 0x80));
128 byte <<= 1;
129 }
130
131 /* Check for acknowledgement from slave */
132 return (i2c_recv_bit(i2c) == 0 ? 0 : -EIO);
133}
134
135/* Receives a byte via the I2C bus and sends ACK/NACK to the slave device. */
136static u8 i2c_recv_byte(struct efx_i2c_interface *i2c, int ack)
137{
138 u8 value = 0;
139 int i;
140
141 /* Receive byte */
142 for (i = 0; i < 8; i++)
143 value = (value << 1) | i2c_recv_bit(i2c);
144
145 /* Send ACK/NACK */
146 i2c_send_bit(i2c, (ack ? 0 : 1));
147
148 return value;
149}
150
151/* Calculate command byte for a read operation */
152static inline u8 i2c_read_cmd(u8 device_id)
153{
154 return ((device_id << 1) | 1);
155}
156
157/* Calculate command byte for a write operation */
158static inline u8 i2c_write_cmd(u8 device_id)
159{
160 return ((device_id << 1) | 0);
161}
162
163int efx_i2c_check_presence(struct efx_i2c_interface *i2c, u8 device_id)
164{
165 int rc;
166
167 /* If someone is driving the bus low we just give up. */
168 if (getsda(i2c) == 0 || getscl(i2c) == 0) {
169 EFX_ERR(i2c->efx, "%s someone is holding the I2C bus low."
170 " Giving up.\n", __func__);
171 return -EFAULT;
172 }
173
174 /* Pretend to initiate a device write */
175 i2c_start(i2c);
176 rc = i2c_send_byte(i2c, i2c_write_cmd(device_id));
177 if (rc)
178 goto out;
179
180 out:
181 i2c_stop(i2c);
182 i2c_release(i2c);
183
184 return rc;
185}
186
187/* This performs a fast read of one or more consecutive bytes from an
188 * I2C device. Not all devices support consecutive reads of more than
189 * one byte; for these devices use efx_i2c_read() instead.
190 */
191int efx_i2c_fast_read(struct efx_i2c_interface *i2c,
192 u8 device_id, u8 offset, u8 *data, unsigned int len)
193{
194 int i;
195 int rc;
196
197 EFX_WARN_ON_PARANOID(getsda(i2c) != 1);
198 EFX_WARN_ON_PARANOID(getscl(i2c) != 1);
199 EFX_WARN_ON_PARANOID(data == NULL);
200 EFX_WARN_ON_PARANOID(len < 1);
201
202 /* Select device and starting offset */
203 i2c_start(i2c);
204 rc = i2c_send_byte(i2c, i2c_write_cmd(device_id));
205 if (rc)
206 goto out;
207 rc = i2c_send_byte(i2c, offset);
208 if (rc)
209 goto out;
210
211 /* Read data from device */
212 i2c_start(i2c);
213 rc = i2c_send_byte(i2c, i2c_read_cmd(device_id));
214 if (rc)
215 goto out;
216 for (i = 0; i < (len - 1); i++)
217 /* Read and acknowledge all but the last byte */
218 data[i] = i2c_recv_byte(i2c, 1);
219 /* Read last byte with no acknowledgement */
220 data[i] = i2c_recv_byte(i2c, 0);
221
222 out:
223 i2c_stop(i2c);
224 i2c_release(i2c);
225
226 return rc;
227}
228
229/* This performs a fast write of one or more consecutive bytes to an
230 * I2C device. Not all devices support consecutive writes of more
231 * than one byte; for these devices use efx_i2c_write() instead.
232 */
233int efx_i2c_fast_write(struct efx_i2c_interface *i2c,
234 u8 device_id, u8 offset,
235 const u8 *data, unsigned int len)
236{
237 int i;
238 int rc;
239
240 EFX_WARN_ON_PARANOID(getsda(i2c) != 1);
241 EFX_WARN_ON_PARANOID(getscl(i2c) != 1);
242 EFX_WARN_ON_PARANOID(len < 1);
243
244 /* Select device and starting offset */
245 i2c_start(i2c);
246 rc = i2c_send_byte(i2c, i2c_write_cmd(device_id));
247 if (rc)
248 goto out;
249 rc = i2c_send_byte(i2c, offset);
250 if (rc)
251 goto out;
252
253 /* Write data to device */
254 for (i = 0; i < len; i++) {
255 rc = i2c_send_byte(i2c, data[i]);
256 if (rc)
257 goto out;
258 }
259
260 out:
261 i2c_stop(i2c);
262 i2c_release(i2c);
263
264 return rc;
265}
266
267/* I2C byte-by-byte read */
268int efx_i2c_read(struct efx_i2c_interface *i2c,
269 u8 device_id, u8 offset, u8 *data, unsigned int len)
270{
271 int rc;
272
273 /* i2c_fast_read with length 1 is a single byte read */
274 for (; len > 0; offset++, data++, len--) {
275 rc = efx_i2c_fast_read(i2c, device_id, offset, data, 1);
276 if (rc)
277 return rc;
278 }
279
280 return 0;
281}
282
283/* I2C byte-by-byte write */
284int efx_i2c_write(struct efx_i2c_interface *i2c,
285 u8 device_id, u8 offset, const u8 *data, unsigned int len)
286{
287 int rc;
288
289 /* i2c_fast_write with length 1 is a single byte write */
290 for (; len > 0; offset++, data++, len--) {
291 rc = efx_i2c_fast_write(i2c, device_id, offset, data, 1);
292 if (rc)
293 return rc;
294 mdelay(i2c->op->mdelay);
295 }
296
297 return 0;
298}
299
300
301/* This is just a slightly neater wrapper round efx_i2c_fast_write
302 * in the case where the target doesn't take an offset
303 */
304int efx_i2c_send_bytes(struct efx_i2c_interface *i2c,
305 u8 device_id, const u8 *data, unsigned int len)
306{
307 return efx_i2c_fast_write(i2c, device_id, data[0], data + 1, len - 1);
308}
309
310/* I2C receiving of bytes - does not send an offset byte */
311int efx_i2c_recv_bytes(struct efx_i2c_interface *i2c, u8 device_id,
312 u8 *bytes, unsigned int len)
313{
314 int i;
315 int rc;
316
317 EFX_WARN_ON_PARANOID(getsda(i2c) != 1);
318 EFX_WARN_ON_PARANOID(getscl(i2c) != 1);
319 EFX_WARN_ON_PARANOID(len < 1);
320
321 /* Select device */
322 i2c_start(i2c);
323
324 /* Read data from device */
325 rc = i2c_send_byte(i2c, i2c_read_cmd(device_id));
326 if (rc)
327 goto out;
328
329 for (i = 0; i < (len - 1); i++)
330 /* Read and acknowledge all but the last byte */
331 bytes[i] = i2c_recv_byte(i2c, 1);
332 /* Read last byte with no acknowledgement */
333 bytes[i] = i2c_recv_byte(i2c, 0);
334
335 out:
336 i2c_stop(i2c);
337 i2c_release(i2c);
338
339 return rc;
340}
341
342/* SMBus and some I2C devices will time out if the I2C clock is
343 * held low for too long. This is most likely to happen in virtualised
344 * systems (when the entire domain is descheduled) but could in
345 * principle happen due to preemption on any busy system (and given the
346 * potential length of an I2C operation turning preemption off is not
347 * a sensible option). The following functions deal with the failure by
348 * retrying up to a fixed number of times.
349 */
350
351#define I2C_MAX_RETRIES (10)
352
353/* The timeout problem will result in -EIO. If the wrapped function
354 * returns any other error, pass this up and do not retry. */
355#define RETRY_WRAPPER(_f) \
356 int retries = I2C_MAX_RETRIES; \
357 int rc; \
358 while (retries) { \
359 rc = _f; \
360 if (rc != -EIO) \
361 return rc; \
362 retries--; \
363 } \
364 return rc; \
365
366int efx_i2c_check_presence_retry(struct efx_i2c_interface *i2c, u8 device_id)
367{
368 RETRY_WRAPPER(efx_i2c_check_presence(i2c, device_id))
369}
370
371int efx_i2c_read_retry(struct efx_i2c_interface *i2c,
372 u8 device_id, u8 offset, u8 *data, unsigned int len)
373{
374 RETRY_WRAPPER(efx_i2c_read(i2c, device_id, offset, data, len))
375}
376
377int efx_i2c_write_retry(struct efx_i2c_interface *i2c,
378 u8 device_id, u8 offset, const u8 *data, unsigned int len)
379{
380 RETRY_WRAPPER(efx_i2c_write(i2c, device_id, offset, data, len))
381}
diff --git a/drivers/net/sfc/i2c-direct.h b/drivers/net/sfc/i2c-direct.h
new file mode 100644
index 000000000000..291e561071f5
--- /dev/null
+++ b/drivers/net/sfc/i2c-direct.h
@@ -0,0 +1,91 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005 Fen Systems Ltd.
4 * Copyright 2006 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_I2C_DIRECT_H
12#define EFX_I2C_DIRECT_H
13
14#include "net_driver.h"
15
16/*
17 * Direct control of an I2C bus
18 */
19
20struct efx_i2c_interface;
21
22/**
23 * struct efx_i2c_bit_operations - I2C bus direct control methods
24 *
25 * I2C bus direct control methods.
26 *
27 * @setsda: Set state of SDA line
28 * @setscl: Set state of SCL line
29 * @getsda: Get state of SDA line
30 * @getscl: Get state of SCL line
31 * @udelay: Delay between each bit operation
32 * @mdelay: Delay between each byte write
33 */
34struct efx_i2c_bit_operations {
35 void (*setsda) (struct efx_i2c_interface *i2c);
36 void (*setscl) (struct efx_i2c_interface *i2c);
37 int (*getsda) (struct efx_i2c_interface *i2c);
38 int (*getscl) (struct efx_i2c_interface *i2c);
39 unsigned int udelay;
40 unsigned int mdelay;
41};
42
43/**
44 * struct efx_i2c_interface - an I2C interface
45 *
46 * An I2C interface.
47 *
48 * @efx: Attached Efx NIC
49 * @op: I2C bus control methods
50 * @sda: Current output state of SDA line
51 * @scl: Current output state of SCL line
52 */
53struct efx_i2c_interface {
54 struct efx_nic *efx;
55 struct efx_i2c_bit_operations *op;
56 unsigned int sda:1;
57 unsigned int scl:1;
58};
59
60extern int efx_i2c_check_presence(struct efx_i2c_interface *i2c, u8 device_id);
61extern int efx_i2c_fast_read(struct efx_i2c_interface *i2c,
62 u8 device_id, u8 offset,
63 u8 *data, unsigned int len);
64extern int efx_i2c_fast_write(struct efx_i2c_interface *i2c,
65 u8 device_id, u8 offset,
66 const u8 *data, unsigned int len);
67extern int efx_i2c_read(struct efx_i2c_interface *i2c,
68 u8 device_id, u8 offset, u8 *data, unsigned int len);
69extern int efx_i2c_write(struct efx_i2c_interface *i2c,
70 u8 device_id, u8 offset,
71 const u8 *data, unsigned int len);
72
73extern int efx_i2c_send_bytes(struct efx_i2c_interface *i2c, u8 device_id,
74 const u8 *bytes, unsigned int len);
75
76extern int efx_i2c_recv_bytes(struct efx_i2c_interface *i2c, u8 device_id,
77 u8 *bytes, unsigned int len);
78
79
80/* Versions of the API that retry on failure. */
81extern int efx_i2c_check_presence_retry(struct efx_i2c_interface *i2c,
82 u8 device_id);
83
84extern int efx_i2c_read_retry(struct efx_i2c_interface *i2c,
85 u8 device_id, u8 offset, u8 *data, unsigned int len);
86
87extern int efx_i2c_write_retry(struct efx_i2c_interface *i2c,
88 u8 device_id, u8 offset,
89 const u8 *data, unsigned int len);
90
91#endif /* EFX_I2C_DIRECT_H */
diff --git a/drivers/net/sfc/mac.h b/drivers/net/sfc/mac.h
new file mode 100644
index 000000000000..edd07d4dee18
--- /dev/null
+++ b/drivers/net/sfc/mac.h
@@ -0,0 +1,33 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2007 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_MAC_H
12#define EFX_MAC_H
13
14#include "net_driver.h"
15
16extern void falcon_xmac_writel(struct efx_nic *efx,
17 efx_dword_t *value, unsigned int mac_reg);
18extern void falcon_xmac_readl(struct efx_nic *efx,
19 efx_dword_t *value, unsigned int mac_reg);
20extern int falcon_init_xmac(struct efx_nic *efx);
21extern void falcon_reconfigure_xmac(struct efx_nic *efx);
22extern void falcon_update_stats_xmac(struct efx_nic *efx);
23extern void falcon_fini_xmac(struct efx_nic *efx);
24extern int falcon_check_xmac(struct efx_nic *efx);
25extern void falcon_xmac_sim_phy_event(struct efx_nic *efx);
26extern int falcon_xmac_get_settings(struct efx_nic *efx,
27 struct ethtool_cmd *ecmd);
28extern int falcon_xmac_set_settings(struct efx_nic *efx,
29 struct ethtool_cmd *ecmd);
30extern int falcon_xmac_set_pause(struct efx_nic *efx,
31 enum efx_fc_type pause_params);
32
33#endif
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
new file mode 100644
index 000000000000..dc06bb0aa575
--- /dev/null
+++ b/drivers/net/sfc/mdio_10g.c
@@ -0,0 +1,282 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006-2008 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9/*
10 * Useful functions for working with MDIO clause 45 PHYs
11 */
12#include <linux/types.h>
13#include <linux/ethtool.h>
14#include <linux/delay.h>
15#include "net_driver.h"
16#include "mdio_10g.h"
17#include "boards.h"
18
19int mdio_clause45_reset_mmd(struct efx_nic *port, int mmd,
20 int spins, int spintime)
21{
22 u32 ctrl;
23 int phy_id = port->mii.phy_id;
24
25 /* Catch callers passing values in the wrong units (or just silly) */
26 EFX_BUG_ON_PARANOID(spins * spintime >= 5000);
27
28 mdio_clause45_write(port, phy_id, mmd, MDIO_MMDREG_CTRL1,
29 (1 << MDIO_MMDREG_CTRL1_RESET_LBN));
30 /* Wait for the reset bit to clear. */
31 do {
32 msleep(spintime);
33 ctrl = mdio_clause45_read(port, phy_id, mmd, MDIO_MMDREG_CTRL1);
34 spins--;
35
36 } while (spins && (ctrl & (1 << MDIO_MMDREG_CTRL1_RESET_LBN)));
37
38 return spins ? spins : -ETIMEDOUT;
39}
40
41static int mdio_clause45_check_mmd(struct efx_nic *efx, int mmd,
42 int fault_fatal)
43{
44 int status;
45 int phy_id = efx->mii.phy_id;
46
47 /* Read MMD STATUS2 to check it is responding. */
48 status = mdio_clause45_read(efx, phy_id, mmd, MDIO_MMDREG_STAT2);
49 if (((status >> MDIO_MMDREG_STAT2_PRESENT_LBN) &
50 ((1 << MDIO_MMDREG_STAT2_PRESENT_WIDTH) - 1)) !=
51 MDIO_MMDREG_STAT2_PRESENT_VAL) {
52 EFX_ERR(efx, "PHY MMD %d not responding.\n", mmd);
53 return -EIO;
54 }
55
56 /* Read MMD STATUS 1 to check for fault. */
57 status = mdio_clause45_read(efx, phy_id, mmd, MDIO_MMDREG_STAT1);
58 if ((status & (1 << MDIO_MMDREG_STAT1_FAULT_LBN)) != 0) {
59 if (fault_fatal) {
60 EFX_ERR(efx, "PHY MMD %d reporting fatal"
61 " fault: status %x\n", mmd, status);
62 return -EIO;
63 } else {
64 EFX_LOG(efx, "PHY MMD %d reporting status"
65 " %x (expected)\n", mmd, status);
66 }
67 }
68 return 0;
69}
70
71/* This ought to be ridiculous overkill. We expect it to fail rarely */
72#define MDIO45_RESET_TIME 1000 /* ms */
73#define MDIO45_RESET_ITERS 100
74
75int mdio_clause45_wait_reset_mmds(struct efx_nic *efx,
76 unsigned int mmd_mask)
77{
78 const int spintime = MDIO45_RESET_TIME / MDIO45_RESET_ITERS;
79 int tries = MDIO45_RESET_ITERS;
80 int rc = 0;
81 int in_reset;
82
83 while (tries) {
84 int mask = mmd_mask;
85 int mmd = 0;
86 int stat;
87 in_reset = 0;
88 while (mask) {
89 if (mask & 1) {
90 stat = mdio_clause45_read(efx,
91 efx->mii.phy_id,
92 mmd,
93 MDIO_MMDREG_CTRL1);
94 if (stat < 0) {
95 EFX_ERR(efx, "failed to read status of"
96 " MMD %d\n", mmd);
97 return -EIO;
98 }
99 if (stat & (1 << MDIO_MMDREG_CTRL1_RESET_LBN))
100 in_reset |= (1 << mmd);
101 }
102 mask = mask >> 1;
103 mmd++;
104 }
105 if (!in_reset)
106 break;
107 tries--;
108 msleep(spintime);
109 }
110 if (in_reset != 0) {
111 EFX_ERR(efx, "not all MMDs came out of reset in time."
112 " MMDs still in reset: %x\n", in_reset);
113 rc = -ETIMEDOUT;
114 }
115 return rc;
116}
117
118int mdio_clause45_check_mmds(struct efx_nic *efx,
119 unsigned int mmd_mask, unsigned int fatal_mask)
120{
121 int devices, mmd = 0;
122 int probe_mmd;
123
124 /* Historically we have probed the PHYXS to find out what devices are
125 * present,but that doesn't work so well if the PHYXS isn't expected
126 * to exist, if so just find the first item in the list supplied. */
127 probe_mmd = (mmd_mask & MDIO_MMDREG_DEVS0_PHYXS) ? MDIO_MMD_PHYXS :
128 __ffs(mmd_mask);
129 devices = mdio_clause45_read(efx, efx->mii.phy_id,
130 probe_mmd, MDIO_MMDREG_DEVS0);
131
132 /* Check all the expected MMDs are present */
133 if (devices < 0) {
134 EFX_ERR(efx, "failed to read devices present\n");
135 return -EIO;
136 }
137 if ((devices & mmd_mask) != mmd_mask) {
138 EFX_ERR(efx, "required MMDs not present: got %x, "
139 "wanted %x\n", devices, mmd_mask);
140 return -ENODEV;
141 }
142 EFX_TRACE(efx, "Devices present: %x\n", devices);
143
144 /* Check all required MMDs are responding and happy. */
145 while (mmd_mask) {
146 if (mmd_mask & 1) {
147 int fault_fatal = fatal_mask & 1;
148 if (mdio_clause45_check_mmd(efx, mmd, fault_fatal))
149 return -EIO;
150 }
151 mmd_mask = mmd_mask >> 1;
152 fatal_mask = fatal_mask >> 1;
153 mmd++;
154 }
155
156 return 0;
157}
158
159int mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
160{
161 int phy_id = efx->mii.phy_id;
162 int status;
163 int ok = 1;
164 int mmd = 0;
165 int good;
166
167 while (mmd_mask) {
168 if (mmd_mask & 1) {
169 /* Double reads because link state is latched, and a
170 * read moves the current state into the register */
171 status = mdio_clause45_read(efx, phy_id,
172 mmd, MDIO_MMDREG_STAT1);
173 status = mdio_clause45_read(efx, phy_id,
174 mmd, MDIO_MMDREG_STAT1);
175
176 good = status & (1 << MDIO_MMDREG_STAT1_LINK_LBN);
177 ok = ok && good;
178 }
179 mmd_mask = (mmd_mask >> 1);
180 mmd++;
181 }
182 return ok;
183}
184
185/**
186 * mdio_clause45_get_settings - Read (some of) the PHY settings over MDIO.
187 * @efx: Efx NIC
188 * @ecmd: Buffer for settings
189 *
190 * On return the 'port', 'speed', 'supported' and 'advertising' fields of
191 * ecmd have been filled out based on the PMA type.
192 */
193void mdio_clause45_get_settings(struct efx_nic *efx,
194 struct ethtool_cmd *ecmd)
195{
196 int pma_type;
197
198 /* If no PMA is present we are presumably talking something XAUI-ish
199 * like CX4. Which we report as FIBRE (see below) */
200 if ((efx->phy_op->mmds & DEV_PRESENT_BIT(MDIO_MMD_PMAPMD)) == 0) {
201 ecmd->speed = SPEED_10000;
202 ecmd->port = PORT_FIBRE;
203 ecmd->supported = SUPPORTED_FIBRE;
204 ecmd->advertising = ADVERTISED_FIBRE;
205 return;
206 }
207
208 pma_type = mdio_clause45_read(efx, efx->mii.phy_id,
209 MDIO_MMD_PMAPMD, MDIO_MMDREG_CTRL2);
210 pma_type &= MDIO_PMAPMD_CTRL2_TYPE_MASK;
211
212 switch (pma_type) {
213 /* We represent CX4 as fibre in the absence of anything
214 better. */
215 case MDIO_PMAPMD_CTRL2_10G_CX4:
216 ecmd->speed = SPEED_10000;
217 ecmd->port = PORT_FIBRE;
218 ecmd->supported = SUPPORTED_FIBRE;
219 ecmd->advertising = ADVERTISED_FIBRE;
220 break;
221 /* 10G Base-T */
222 case MDIO_PMAPMD_CTRL2_10G_BT:
223 ecmd->speed = SPEED_10000;
224 ecmd->port = PORT_TP;
225 ecmd->supported = SUPPORTED_TP | SUPPORTED_10000baseT_Full;
226 ecmd->advertising = (ADVERTISED_FIBRE
227 | ADVERTISED_10000baseT_Full);
228 break;
229 case MDIO_PMAPMD_CTRL2_1G_BT:
230 ecmd->speed = SPEED_1000;
231 ecmd->port = PORT_TP;
232 ecmd->supported = SUPPORTED_TP | SUPPORTED_1000baseT_Full;
233 ecmd->advertising = (ADVERTISED_FIBRE
234 | ADVERTISED_1000baseT_Full);
235 break;
236 case MDIO_PMAPMD_CTRL2_100_BT:
237 ecmd->speed = SPEED_100;
238 ecmd->port = PORT_TP;
239 ecmd->supported = SUPPORTED_TP | SUPPORTED_100baseT_Full;
240 ecmd->advertising = (ADVERTISED_FIBRE
241 | ADVERTISED_100baseT_Full);
242 break;
243 case MDIO_PMAPMD_CTRL2_10_BT:
244 ecmd->speed = SPEED_10;
245 ecmd->port = PORT_TP;
246 ecmd->supported = SUPPORTED_TP | SUPPORTED_10baseT_Full;
247 ecmd->advertising = ADVERTISED_FIBRE | ADVERTISED_10baseT_Full;
248 break;
249 /* All the other defined modes are flavours of
250 * 10G optical */
251 default:
252 ecmd->speed = SPEED_10000;
253 ecmd->port = PORT_FIBRE;
254 ecmd->supported = SUPPORTED_FIBRE;
255 ecmd->advertising = ADVERTISED_FIBRE;
256 break;
257 }
258}
259
260/**
261 * mdio_clause45_set_settings - Set (some of) the PHY settings over MDIO.
262 * @efx: Efx NIC
263 * @ecmd: New settings
264 *
265 * Currently this just enforces that we are _not_ changing the
266 * 'port', 'speed', 'supported' or 'advertising' settings as these
267 * cannot be changed on any currently supported PHY.
268 */
269int mdio_clause45_set_settings(struct efx_nic *efx,
270 struct ethtool_cmd *ecmd)
271{
272 struct ethtool_cmd tmpcmd;
273 mdio_clause45_get_settings(efx, &tmpcmd);
274 /* None of the current PHYs support more than one mode
275 * of operation (and only 10GBT ever will), so keep things
276 * simple for now */
277 if ((ecmd->speed == tmpcmd.speed) && (ecmd->port == tmpcmd.port) &&
278 (ecmd->supported == tmpcmd.supported) &&
279 (ecmd->advertising == tmpcmd.advertising))
280 return 0;
281 return -EOPNOTSUPP;
282}
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h
new file mode 100644
index 000000000000..2214b6d820a7
--- /dev/null
+++ b/drivers/net/sfc/mdio_10g.h
@@ -0,0 +1,232 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006-2008 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#ifndef EFX_MDIO_10G_H
11#define EFX_MDIO_10G_H
12
13/*
14 * Definitions needed for doing 10G MDIO as specified in clause 45
15 * MDIO, which do not appear in Linux yet. Also some helper functions.
16 */
17
18#include "efx.h"
19#include "boards.h"
20
21/* Numbering of the MDIO Manageable Devices (MMDs) */
22/* Physical Medium Attachment/ Physical Medium Dependent sublayer */
23#define MDIO_MMD_PMAPMD (1)
24/* WAN Interface Sublayer */
25#define MDIO_MMD_WIS (2)
26/* Physical Coding Sublayer */
27#define MDIO_MMD_PCS (3)
28/* PHY Extender Sublayer */
29#define MDIO_MMD_PHYXS (4)
30/* Extender Sublayer */
31#define MDIO_MMD_DTEXS (5)
32/* Transmission convergence */
33#define MDIO_MMD_TC (6)
34/* Auto negotiation */
35#define MDIO_MMD_AN (7)
36
37/* Generic register locations */
38#define MDIO_MMDREG_CTRL1 (0)
39#define MDIO_MMDREG_STAT1 (1)
40#define MDIO_MMDREG_IDHI (2)
41#define MDIO_MMDREG_IDLOW (3)
42#define MDIO_MMDREG_SPEED (4)
43#define MDIO_MMDREG_DEVS0 (5)
44#define MDIO_MMDREG_DEVS1 (6)
45#define MDIO_MMDREG_CTRL2 (7)
46#define MDIO_MMDREG_STAT2 (8)
47
48/* Bits in MMDREG_CTRL1 */
49/* Reset */
50#define MDIO_MMDREG_CTRL1_RESET_LBN (15)
51#define MDIO_MMDREG_CTRL1_RESET_WIDTH (1)
52
53/* Bits in MMDREG_STAT1 */
54#define MDIO_MMDREG_STAT1_FAULT_LBN (7)
55#define MDIO_MMDREG_STAT1_FAULT_WIDTH (1)
56/* Link state */
57#define MDIO_MMDREG_STAT1_LINK_LBN (2)
58#define MDIO_MMDREG_STAT1_LINK_WIDTH (1)
59
60/* Bits in ID reg */
61#define MDIO_ID_REV(_id32) (_id32 & 0xf)
62#define MDIO_ID_MODEL(_id32) ((_id32 >> 4) & 0x3f)
63#define MDIO_ID_OUI(_id32) (_id32 >> 10)
64
65/* Bits in MMDREG_DEVS0. Someone thoughtfully layed things out
66 * so the 'bit present' bit number of an MMD is the number of
67 * that MMD */
68#define DEV_PRESENT_BIT(_b) (1 << _b)
69
70#define MDIO_MMDREG_DEVS0_PHYXS DEV_PRESENT_BIT(MDIO_MMD_PHYXS)
71#define MDIO_MMDREG_DEVS0_PCS DEV_PRESENT_BIT(MDIO_MMD_PCS)
72#define MDIO_MMDREG_DEVS0_PMAPMD DEV_PRESENT_BIT(MDIO_MMD_PMAPMD)
73
74/* Bits in MMDREG_STAT2 */
75#define MDIO_MMDREG_STAT2_PRESENT_VAL (2)
76#define MDIO_MMDREG_STAT2_PRESENT_LBN (14)
77#define MDIO_MMDREG_STAT2_PRESENT_WIDTH (2)
78
79/* PMA type (4 bits) */
80#define MDIO_PMAPMD_CTRL2_10G_CX4 (0x0)
81#define MDIO_PMAPMD_CTRL2_10G_EW (0x1)
82#define MDIO_PMAPMD_CTRL2_10G_LW (0x2)
83#define MDIO_PMAPMD_CTRL2_10G_SW (0x3)
84#define MDIO_PMAPMD_CTRL2_10G_LX4 (0x4)
85#define MDIO_PMAPMD_CTRL2_10G_ER (0x5)
86#define MDIO_PMAPMD_CTRL2_10G_LR (0x6)
87#define MDIO_PMAPMD_CTRL2_10G_SR (0x7)
88/* Reserved */
89#define MDIO_PMAPMD_CTRL2_10G_BT (0x9)
90/* Reserved */
91/* Reserved */
92#define MDIO_PMAPMD_CTRL2_1G_BT (0xc)
93/* Reserved */
94#define MDIO_PMAPMD_CTRL2_100_BT (0xe)
95#define MDIO_PMAPMD_CTRL2_10_BT (0xf)
96#define MDIO_PMAPMD_CTRL2_TYPE_MASK (0xf)
97
98/* /\* PHY XGXS lane state *\/ */
99#define MDIO_PHYXS_LANE_STATE (0x18)
100#define MDIO_PHYXS_LANE_ALIGNED_LBN (12)
101
102/* AN registers */
103#define MDIO_AN_STATUS (1)
104#define MDIO_AN_STATUS_XNP_LBN (7)
105#define MDIO_AN_STATUS_PAGE_LBN (6)
106#define MDIO_AN_STATUS_AN_DONE_LBN (5)
107#define MDIO_AN_STATUS_LP_AN_CAP_LBN (0)
108
109#define MDIO_AN_10GBT_STATUS (33)
110#define MDIO_AN_10GBT_STATUS_MS_FLT_LBN (15) /* MASTER/SLAVE config fault */
111#define MDIO_AN_10GBT_STATUS_MS_LBN (14) /* MASTER/SLAVE config */
112#define MDIO_AN_10GBT_STATUS_LOC_OK_LBN (13) /* Local OK */
113#define MDIO_AN_10GBT_STATUS_REM_OK_LBN (12) /* Remote OK */
114#define MDIO_AN_10GBT_STATUS_LP_10G_LBN (11) /* Link partner is 10GBT capable */
115#define MDIO_AN_10GBT_STATUS_LP_LTA_LBN (10) /* LP loop timing ability */
116#define MDIO_AN_10GBT_STATUS_LP_TRR_LBN (9) /* LP Training Reset Request */
117
118
119/* Packing of the prt and dev arguments of clause 45 style MDIO into a
120 * single int so they can be passed into the mdio_read/write functions
121 * that currently exist. Note that as Falcon is the only current user,
122 * the packed form is chosen to match what Falcon needs to write into
123 * a register. This is checked at compile-time so do not change it. If
124 * your target chip needs things layed out differently you will need
125 * to unpack the arguments in your chip-specific mdio functions.
126 */
127 /* These are defined by the standard. */
128#define MDIO45_PRT_ID_WIDTH (5)
129#define MDIO45_DEV_ID_WIDTH (5)
130
131/* The prt ID is just packed in immediately to the left of the dev ID */
132#define MDIO45_PRT_DEV_WIDTH (MDIO45_PRT_ID_WIDTH + MDIO45_DEV_ID_WIDTH)
133
134#define MDIO45_PRT_ID_MASK ((1 << MDIO45_PRT_DEV_WIDTH) - 1)
135/* This is the prt + dev extended by 1 bit to hold the 'is clause 45' flag. */
136#define MDIO45_XPRT_ID_WIDTH (MDIO45_PRT_DEV_WIDTH + 1)
137#define MDIO45_XPRT_ID_MASK ((1 << MDIO45_XPRT_ID_WIDTH) - 1)
138#define MDIO45_XPRT_ID_IS10G (1 << (MDIO45_XPRT_ID_WIDTH - 1))
139
140
141#define MDIO45_PRT_ID_COMP_LBN MDIO45_DEV_ID_WIDTH
142#define MDIO45_PRT_ID_COMP_WIDTH MDIO45_PRT_ID_WIDTH
143#define MDIO45_DEV_ID_COMP_LBN 0
144#define MDIO45_DEV_ID_COMP_WIDTH MDIO45_DEV_ID_WIDTH
145
146/* Compose port and device into a phy_id */
147static inline int mdio_clause45_pack(u8 prt, u8 dev)
148{
149 efx_dword_t phy_id;
150 EFX_POPULATE_DWORD_2(phy_id, MDIO45_PRT_ID_COMP, prt,
151 MDIO45_DEV_ID_COMP, dev);
152 return MDIO45_XPRT_ID_IS10G | EFX_DWORD_VAL(phy_id);
153}
154
155static inline void mdio_clause45_unpack(u32 val, u8 *prt, u8 *dev)
156{
157 efx_dword_t phy_id;
158 EFX_POPULATE_DWORD_1(phy_id, EFX_DWORD_0, val);
159 *prt = EFX_DWORD_FIELD(phy_id, MDIO45_PRT_ID_COMP);
160 *dev = EFX_DWORD_FIELD(phy_id, MDIO45_DEV_ID_COMP);
161}
162
163static inline int mdio_clause45_read(struct efx_nic *efx,
164 u8 prt, u8 dev, u16 addr)
165{
166 return efx->mii.mdio_read(efx->net_dev,
167 mdio_clause45_pack(prt, dev), addr);
168}
169
170static inline void mdio_clause45_write(struct efx_nic *efx,
171 u8 prt, u8 dev, u16 addr, int value)
172{
173 efx->mii.mdio_write(efx->net_dev,
174 mdio_clause45_pack(prt, dev), addr, value);
175}
176
177
178static inline u32 mdio_clause45_read_id(struct efx_nic *efx, int mmd)
179{
180 int phy_id = efx->mii.phy_id;
181 u16 id_low = mdio_clause45_read(efx, phy_id, mmd, MDIO_MMDREG_IDLOW);
182 u16 id_hi = mdio_clause45_read(efx, phy_id, mmd, MDIO_MMDREG_IDHI);
183 return (id_hi << 16) | (id_low);
184}
185
186static inline int mdio_clause45_phyxgxs_lane_sync(struct efx_nic *efx)
187{
188 int i, sync, lane_status;
189
190 for (i = 0; i < 2; ++i)
191 lane_status = mdio_clause45_read(efx, efx->mii.phy_id,
192 MDIO_MMD_PHYXS,
193 MDIO_PHYXS_LANE_STATE);
194
195 sync = (lane_status & (1 << MDIO_PHYXS_LANE_ALIGNED_LBN)) != 0;
196 if (!sync)
197 EFX_INFO(efx, "XGXS lane status: %x\n", lane_status);
198 return sync;
199}
200
201extern const char *mdio_clause45_mmd_name(int mmd);
202
203/*
204 * Reset a specific MMD and wait for reset to clear.
205 * Return number of spins left (>0) on success, -%ETIMEDOUT on failure.
206 *
207 * This function will sleep
208 */
209extern int mdio_clause45_reset_mmd(struct efx_nic *efx, int mmd,
210 int spins, int spintime);
211
212/* As mdio_clause45_check_mmd but for multiple MMDs */
213int mdio_clause45_check_mmds(struct efx_nic *efx,
214 unsigned int mmd_mask, unsigned int fatal_mask);
215
216/* Check the link status of specified mmds in bit mask */
217extern int mdio_clause45_links_ok(struct efx_nic *efx,
218 unsigned int mmd_mask);
219
220/* Read (some of) the PHY settings over MDIO */
221extern void mdio_clause45_get_settings(struct efx_nic *efx,
222 struct ethtool_cmd *ecmd);
223
224/* Set (some of) the PHY settings over MDIO */
225extern int mdio_clause45_set_settings(struct efx_nic *efx,
226 struct ethtool_cmd *ecmd);
227
228/* Wait for specified MMDs to exit reset within a timeout */
229extern int mdio_clause45_wait_reset_mmds(struct efx_nic *efx,
230 unsigned int mmd_mask);
231
232#endif /* EFX_MDIO_10G_H */
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
new file mode 100644
index 000000000000..c505482c2520
--- /dev/null
+++ b/drivers/net/sfc/net_driver.h
@@ -0,0 +1,883 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11/* Common definitions for all Efx net driver code */
12
13#ifndef EFX_NET_DRIVER_H
14#define EFX_NET_DRIVER_H
15
16#include <linux/version.h>
17#include <linux/netdevice.h>
18#include <linux/etherdevice.h>
19#include <linux/ethtool.h>
20#include <linux/if_vlan.h>
21#include <linux/timer.h>
22#include <linux/mii.h>
23#include <linux/list.h>
24#include <linux/pci.h>
25#include <linux/device.h>
26#include <linux/highmem.h>
27#include <linux/workqueue.h>
28#include <linux/inet_lro.h>
29
30#include "enum.h"
31#include "bitfield.h"
32#include "i2c-direct.h"
33
34#define EFX_MAX_LRO_DESCRIPTORS 8
35#define EFX_MAX_LRO_AGGR MAX_SKB_FRAGS
36
37/**************************************************************************
38 *
39 * Build definitions
40 *
41 **************************************************************************/
42#ifndef EFX_DRIVER_NAME
43#define EFX_DRIVER_NAME "sfc"
44#endif
45#define EFX_DRIVER_VERSION "2.2.0136"
46
47#ifdef EFX_ENABLE_DEBUG
48#define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
49#define EFX_WARN_ON_PARANOID(x) WARN_ON(x)
50#else
51#define EFX_BUG_ON_PARANOID(x) do {} while (0)
52#define EFX_WARN_ON_PARANOID(x) do {} while (0)
53#endif
54
55#define NET_DEV_REGISTERED(efx) \
56 ((efx)->net_dev->reg_state == NETREG_REGISTERED)
57
58/* Include net device name in log messages if it has been registered.
59 * Use efx->name not efx->net_dev->name so that races with (un)registration
60 * are harmless.
61 */
62#define NET_DEV_NAME(efx) (NET_DEV_REGISTERED(efx) ? (efx)->name : "")
63
64/* Un-rate-limited logging */
65#define EFX_ERR(efx, fmt, args...) \
66dev_err(&((efx)->pci_dev->dev), "ERR: %s " fmt, NET_DEV_NAME(efx), ##args)
67
68#define EFX_INFO(efx, fmt, args...) \
69dev_info(&((efx)->pci_dev->dev), "INFO: %s " fmt, NET_DEV_NAME(efx), ##args)
70
71#ifdef EFX_ENABLE_DEBUG
72#define EFX_LOG(efx, fmt, args...) \
73dev_info(&((efx)->pci_dev->dev), "DBG: %s " fmt, NET_DEV_NAME(efx), ##args)
74#else
75#define EFX_LOG(efx, fmt, args...) \
76dev_dbg(&((efx)->pci_dev->dev), "DBG: %s " fmt, NET_DEV_NAME(efx), ##args)
77#endif
78
79#define EFX_TRACE(efx, fmt, args...) do {} while (0)
80
81#define EFX_REGDUMP(efx, fmt, args...) do {} while (0)
82
83/* Rate-limited logging */
84#define EFX_ERR_RL(efx, fmt, args...) \
85do {if (net_ratelimit()) EFX_ERR(efx, fmt, ##args); } while (0)
86
87#define EFX_INFO_RL(efx, fmt, args...) \
88do {if (net_ratelimit()) EFX_INFO(efx, fmt, ##args); } while (0)
89
90#define EFX_LOG_RL(efx, fmt, args...) \
91do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0)
92
93/* Kernel headers may redefine inline anyway */
94#ifndef inline
95#define inline inline __attribute__ ((always_inline))
96#endif
97
98/**************************************************************************
99 *
100 * Efx data structures
101 *
102 **************************************************************************/
103
104#define EFX_MAX_CHANNELS 32
105#define EFX_MAX_TX_QUEUES 1
106#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS
107
108/**
109 * struct efx_special_buffer - An Efx special buffer
110 * @addr: CPU base address of the buffer
111 * @dma_addr: DMA base address of the buffer
112 * @len: Buffer length, in bytes
113 * @index: Buffer index within controller;s buffer table
114 * @entries: Number of buffer table entries
115 *
116 * Special buffers are used for the event queues and the TX and RX
117 * descriptor queues for each channel. They are *not* used for the
118 * actual transmit and receive buffers.
119 *
120 * Note that for Falcon, TX and RX descriptor queues live in host memory.
121 * Allocation and freeing procedures must take this into account.
122 */
123struct efx_special_buffer {
124 void *addr;
125 dma_addr_t dma_addr;
126 unsigned int len;
127 int index;
128 int entries;
129};
130
131/**
132 * struct efx_tx_buffer - An Efx TX buffer
133 * @skb: The associated socket buffer.
134 * Set only on the final fragment of a packet; %NULL for all other
135 * fragments. When this fragment completes, then we can free this
136 * skb.
137 * @dma_addr: DMA address of the fragment.
138 * @len: Length of this fragment.
139 * This field is zero when the queue slot is empty.
140 * @continuation: True if this fragment is not the end of a packet.
141 * @unmap_single: True if pci_unmap_single should be used.
142 * @unmap_addr: DMA address to unmap
143 * @unmap_len: Length of this fragment to unmap
144 */
145struct efx_tx_buffer {
146 const struct sk_buff *skb;
147 dma_addr_t dma_addr;
148 unsigned short len;
149 unsigned char continuation;
150 unsigned char unmap_single;
151 dma_addr_t unmap_addr;
152 unsigned short unmap_len;
153};
154
155/**
156 * struct efx_tx_queue - An Efx TX queue
157 *
158 * This is a ring buffer of TX fragments.
159 * Since the TX completion path always executes on the same
160 * CPU and the xmit path can operate on different CPUs,
161 * performance is increased by ensuring that the completion
162 * path and the xmit path operate on different cache lines.
163 * This is particularly important if the xmit path is always
164 * executing on one CPU which is different from the completion
165 * path. There is also a cache line for members which are
166 * read but not written on the fast path.
167 *
168 * @efx: The associated Efx NIC
169 * @queue: DMA queue number
170 * @used: Queue is used by net driver
171 * @channel: The associated channel
172 * @buffer: The software buffer ring
173 * @txd: The hardware descriptor ring
174 * @read_count: Current read pointer.
175 * This is the number of buffers that have been removed from both rings.
176 * @stopped: Stopped flag.
177 * Set if this TX queue is currently stopping its port.
178 * @insert_count: Current insert pointer
179 * This is the number of buffers that have been added to the
180 * software ring.
181 * @write_count: Current write pointer
182 * This is the number of buffers that have been added to the
183 * hardware ring.
184 * @old_read_count: The value of read_count when last checked.
185 * This is here for performance reasons. The xmit path will
186 * only get the up-to-date value of read_count if this
187 * variable indicates that the queue is full. This is to
188 * avoid cache-line ping-pong between the xmit path and the
189 * completion path.
190 */
191struct efx_tx_queue {
192 /* Members which don't change on the fast path */
193 struct efx_nic *efx ____cacheline_aligned_in_smp;
194 int queue;
195 int used;
196 struct efx_channel *channel;
197 struct efx_nic *nic;
198 struct efx_tx_buffer *buffer;
199 struct efx_special_buffer txd;
200
201 /* Members used mainly on the completion path */
202 unsigned int read_count ____cacheline_aligned_in_smp;
203 int stopped;
204
205 /* Members used only on the xmit path */
206 unsigned int insert_count ____cacheline_aligned_in_smp;
207 unsigned int write_count;
208 unsigned int old_read_count;
209};
210
211/**
212 * struct efx_rx_buffer - An Efx RX data buffer
213 * @dma_addr: DMA base address of the buffer
214 * @skb: The associated socket buffer, if any.
215 * If both this and page are %NULL, the buffer slot is currently free.
216 * @page: The associated page buffer, if any.
217 * If both this and skb are %NULL, the buffer slot is currently free.
218 * @data: Pointer to ethernet header
219 * @len: Buffer length, in bytes.
220 * @unmap_addr: DMA address to unmap
221 */
222struct efx_rx_buffer {
223 dma_addr_t dma_addr;
224 struct sk_buff *skb;
225 struct page *page;
226 char *data;
227 unsigned int len;
228 dma_addr_t unmap_addr;
229};
230
231/**
232 * struct efx_rx_queue - An Efx RX queue
233 * @efx: The associated Efx NIC
234 * @queue: DMA queue number
235 * @used: Queue is used by net driver
236 * @channel: The associated channel
237 * @buffer: The software buffer ring
238 * @rxd: The hardware descriptor ring
239 * @added_count: Number of buffers added to the receive queue.
240 * @notified_count: Number of buffers given to NIC (<= @added_count).
241 * @removed_count: Number of buffers removed from the receive queue.
242 * @add_lock: Receive queue descriptor add spin lock.
243 * This lock must be held in order to add buffers to the RX
244 * descriptor ring (rxd and buffer) and to update added_count (but
245 * not removed_count).
246 * @max_fill: RX descriptor maximum fill level (<= ring size)
247 * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill
248 * (<= @max_fill)
249 * @fast_fill_limit: The level to which a fast fill will fill
250 * (@fast_fill_trigger <= @fast_fill_limit <= @max_fill)
251 * @min_fill: RX descriptor minimum non-zero fill level.
252 * This records the minimum fill level observed when a ring
253 * refill was triggered.
254 * @min_overfill: RX descriptor minimum overflow fill level.
255 * This records the minimum fill level at which RX queue
256 * overflow was observed. It should never be set.
257 * @alloc_page_count: RX allocation strategy counter.
258 * @alloc_skb_count: RX allocation strategy counter.
259 * @work: Descriptor push work thread
260 * @buf_page: Page for next RX buffer.
261 * We can use a single page for multiple RX buffers. This tracks
262 * the remaining space in the allocation.
263 * @buf_dma_addr: Page's DMA address.
264 * @buf_data: Page's host address.
265 */
266struct efx_rx_queue {
267 struct efx_nic *efx;
268 int queue;
269 int used;
270 struct efx_channel *channel;
271 struct efx_rx_buffer *buffer;
272 struct efx_special_buffer rxd;
273
274 int added_count;
275 int notified_count;
276 int removed_count;
277 spinlock_t add_lock;
278 unsigned int max_fill;
279 unsigned int fast_fill_trigger;
280 unsigned int fast_fill_limit;
281 unsigned int min_fill;
282 unsigned int min_overfill;
283 unsigned int alloc_page_count;
284 unsigned int alloc_skb_count;
285 struct delayed_work work;
286 unsigned int slow_fill_count;
287
288 struct page *buf_page;
289 dma_addr_t buf_dma_addr;
290 char *buf_data;
291};
292
293/**
294 * struct efx_buffer - An Efx general-purpose buffer
295 * @addr: host base address of the buffer
296 * @dma_addr: DMA base address of the buffer
297 * @len: Buffer length, in bytes
298 *
299 * Falcon uses these buffers for its interrupt status registers and
300 * MAC stats dumps.
301 */
302struct efx_buffer {
303 void *addr;
304 dma_addr_t dma_addr;
305 unsigned int len;
306};
307
308
309/* Flags for channel->used_flags */
310#define EFX_USED_BY_RX 1
311#define EFX_USED_BY_TX 2
312#define EFX_USED_BY_RX_TX (EFX_USED_BY_RX | EFX_USED_BY_TX)
313
314enum efx_rx_alloc_method {
315 RX_ALLOC_METHOD_AUTO = 0,
316 RX_ALLOC_METHOD_SKB = 1,
317 RX_ALLOC_METHOD_PAGE = 2,
318};
319
320/**
321 * struct efx_channel - An Efx channel
322 *
323 * A channel comprises an event queue, at least one TX queue, at least
324 * one RX queue, and an associated tasklet for processing the event
325 * queue.
326 *
327 * @efx: Associated Efx NIC
328 * @evqnum: Event queue number
329 * @channel: Channel instance number
330 * @used_flags: Channel is used by net driver
331 * @enabled: Channel enabled indicator
332 * @irq: IRQ number (MSI and MSI-X only)
333 * @has_interrupt: Channel has an interrupt
334 * @irq_moderation: IRQ moderation value (in us)
335 * @napi_dev: Net device used with NAPI
336 * @napi_str: NAPI control structure
337 * @reset_work: Scheduled reset work thread
338 * @work_pending: Is work pending via NAPI?
339 * @eventq: Event queue buffer
340 * @eventq_read_ptr: Event queue read pointer
341 * @last_eventq_read_ptr: Last event queue read pointer value.
342 * @eventq_magic: Event queue magic value for driver-generated test events
343 * @lro_mgr: LRO state
344 * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
345 * and diagnostic counters
346 * @rx_alloc_push_pages: RX allocation method currently in use for pushing
347 * descriptors
348 * @rx_alloc_pop_pages: RX allocation method currently in use for popping
349 * descriptors
350 * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
351 * @n_rx_ip_frag_err: Count of RX IP fragment errors
352 * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
353 * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors
354 * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors
355 * @n_rx_overlength: Count of RX_OVERLENGTH errors
356 * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
357 */
358struct efx_channel {
359 struct efx_nic *efx;
360 int evqnum;
361 int channel;
362 int used_flags;
363 int enabled;
364 int irq;
365 unsigned int has_interrupt;
366 unsigned int irq_moderation;
367 struct net_device *napi_dev;
368 struct napi_struct napi_str;
369 struct work_struct reset_work;
370 int work_pending;
371 struct efx_special_buffer eventq;
372 unsigned int eventq_read_ptr;
373 unsigned int last_eventq_read_ptr;
374 unsigned int eventq_magic;
375
376 struct net_lro_mgr lro_mgr;
377 int rx_alloc_level;
378 int rx_alloc_push_pages;
379 int rx_alloc_pop_pages;
380
381 unsigned n_rx_tobe_disc;
382 unsigned n_rx_ip_frag_err;
383 unsigned n_rx_ip_hdr_chksum_err;
384 unsigned n_rx_tcp_udp_chksum_err;
385 unsigned n_rx_frm_trunc;
386 unsigned n_rx_overlength;
387 unsigned n_skbuff_leaks;
388
389 /* Used to pipeline received packets in order to optimise memory
390 * access with prefetches.
391 */
392 struct efx_rx_buffer *rx_pkt;
393 int rx_pkt_csummed;
394
395};
396
397/**
398 * struct efx_blinker - S/W LED blinking context
399 * @led_num: LED ID (board-specific meaning)
400 * @state: Current state - on or off
401 * @resubmit: Timer resubmission flag
402 * @timer: Control timer for blinking
403 */
404struct efx_blinker {
405 int led_num;
406 int state;
407 int resubmit;
408 struct timer_list timer;
409};
410
411
412/**
413 * struct efx_board - board information
414 * @type: Board model type
415 * @major: Major rev. ('A', 'B' ...)
416 * @minor: Minor rev. (0, 1, ...)
417 * @init: Initialisation function
418 * @init_leds: Sets up board LEDs
419 * @set_fault_led: Turns the fault LED on or off
420 * @blink: Starts/stops blinking
421 * @blinker: used to blink LEDs in software
422 */
423struct efx_board {
424 int type;
425 int major;
426 int minor;
427 int (*init) (struct efx_nic *nic);
428 /* As the LEDs are typically attached to the PHY, LEDs
429 * have a separate init callback that happens later than
430 * board init. */
431 int (*init_leds)(struct efx_nic *efx);
432 void (*set_fault_led) (struct efx_nic *efx, int state);
433 void (*blink) (struct efx_nic *efx, int start);
434 struct efx_blinker blinker;
435};
436
437enum efx_int_mode {
438 /* Be careful if altering to correct macro below */
439 EFX_INT_MODE_MSIX = 0,
440 EFX_INT_MODE_MSI = 1,
441 EFX_INT_MODE_LEGACY = 2,
442 EFX_INT_MODE_MAX /* Insert any new items before this */
443};
444#define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI)
445
446enum phy_type {
447 PHY_TYPE_NONE = 0,
448 PHY_TYPE_CX4_RTMR = 1,
449 PHY_TYPE_1G_ALASKA = 2,
450 PHY_TYPE_10XPRESS = 3,
451 PHY_TYPE_XFP = 4,
452 PHY_TYPE_PM8358 = 6,
453 PHY_TYPE_MAX /* Insert any new items before this */
454};
455
456#define PHY_ADDR_INVALID 0xff
457
458enum nic_state {
459 STATE_INIT = 0,
460 STATE_RUNNING = 1,
461 STATE_FINI = 2,
462 STATE_RESETTING = 3, /* rtnl_lock always held */
463 STATE_DISABLED = 4,
464 STATE_MAX,
465};
466
467/*
468 * Alignment of page-allocated RX buffers
469 *
470 * Controls the number of bytes inserted at the start of an RX buffer.
471 * This is the equivalent of NET_IP_ALIGN [which controls the alignment
472 * of the skb->head for hardware DMA].
473 */
474#if defined(__i386__) || defined(__x86_64__)
475#define EFX_PAGE_IP_ALIGN 0
476#else
477#define EFX_PAGE_IP_ALIGN NET_IP_ALIGN
478#endif
479
480/*
481 * Alignment of the skb->head which wraps a page-allocated RX buffer
482 *
483 * The skb allocated to wrap an rx_buffer can have this alignment. Since
484 * the data is memcpy'd from the rx_buf, it does not need to be equal to
485 * EFX_PAGE_IP_ALIGN.
486 */
487#define EFX_PAGE_SKB_ALIGN 2
488
489/* Forward declaration */
490struct efx_nic;
491
492/* Pseudo bit-mask flow control field */
493enum efx_fc_type {
494 EFX_FC_RX = 1,
495 EFX_FC_TX = 2,
496 EFX_FC_AUTO = 4,
497};
498
499/**
500 * struct efx_phy_operations - Efx PHY operations table
501 * @init: Initialise PHY
502 * @fini: Shut down PHY
503 * @reconfigure: Reconfigure PHY (e.g. for new link parameters)
504 * @clear_interrupt: Clear down interrupt
505 * @blink: Blink LEDs
506 * @check_hw: Check hardware
507 * @reset_xaui: Reset XAUI side of PHY for (software sequenced reset)
508 * @mmds: MMD presence mask
509 */
510struct efx_phy_operations {
511 int (*init) (struct efx_nic *efx);
512 void (*fini) (struct efx_nic *efx);
513 void (*reconfigure) (struct efx_nic *efx);
514 void (*clear_interrupt) (struct efx_nic *efx);
515 int (*check_hw) (struct efx_nic *efx);
516 void (*reset_xaui) (struct efx_nic *efx);
517 int mmds;
518};
519
520/*
521 * Efx extended statistics
522 *
523 * Not all statistics are provided by all supported MACs. The purpose
524 * is this structure is to contain the raw statistics provided by each
525 * MAC.
526 */
527struct efx_mac_stats {
528 u64 tx_bytes;
529 u64 tx_good_bytes;
530 u64 tx_bad_bytes;
531 unsigned long tx_packets;
532 unsigned long tx_bad;
533 unsigned long tx_pause;
534 unsigned long tx_control;
535 unsigned long tx_unicast;
536 unsigned long tx_multicast;
537 unsigned long tx_broadcast;
538 unsigned long tx_lt64;
539 unsigned long tx_64;
540 unsigned long tx_65_to_127;
541 unsigned long tx_128_to_255;
542 unsigned long tx_256_to_511;
543 unsigned long tx_512_to_1023;
544 unsigned long tx_1024_to_15xx;
545 unsigned long tx_15xx_to_jumbo;
546 unsigned long tx_gtjumbo;
547 unsigned long tx_collision;
548 unsigned long tx_single_collision;
549 unsigned long tx_multiple_collision;
550 unsigned long tx_excessive_collision;
551 unsigned long tx_deferred;
552 unsigned long tx_late_collision;
553 unsigned long tx_excessive_deferred;
554 unsigned long tx_non_tcpudp;
555 unsigned long tx_mac_src_error;
556 unsigned long tx_ip_src_error;
557 u64 rx_bytes;
558 u64 rx_good_bytes;
559 u64 rx_bad_bytes;
560 unsigned long rx_packets;
561 unsigned long rx_good;
562 unsigned long rx_bad;
563 unsigned long rx_pause;
564 unsigned long rx_control;
565 unsigned long rx_unicast;
566 unsigned long rx_multicast;
567 unsigned long rx_broadcast;
568 unsigned long rx_lt64;
569 unsigned long rx_64;
570 unsigned long rx_65_to_127;
571 unsigned long rx_128_to_255;
572 unsigned long rx_256_to_511;
573 unsigned long rx_512_to_1023;
574 unsigned long rx_1024_to_15xx;
575 unsigned long rx_15xx_to_jumbo;
576 unsigned long rx_gtjumbo;
577 unsigned long rx_bad_lt64;
578 unsigned long rx_bad_64_to_15xx;
579 unsigned long rx_bad_15xx_to_jumbo;
580 unsigned long rx_bad_gtjumbo;
581 unsigned long rx_overflow;
582 unsigned long rx_missed;
583 unsigned long rx_false_carrier;
584 unsigned long rx_symbol_error;
585 unsigned long rx_align_error;
586 unsigned long rx_length_error;
587 unsigned long rx_internal_error;
588 unsigned long rx_good_lt64;
589};
590
591/* Number of bits used in a multicast filter hash address */
592#define EFX_MCAST_HASH_BITS 8
593
594/* Number of (single-bit) entries in a multicast filter hash */
595#define EFX_MCAST_HASH_ENTRIES (1 << EFX_MCAST_HASH_BITS)
596
597/* An Efx multicast filter hash */
598union efx_multicast_hash {
599 u8 byte[EFX_MCAST_HASH_ENTRIES / 8];
600 efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8];
601};
602
603/**
604 * struct efx_nic - an Efx NIC
605 * @name: Device name (net device name or bus id before net device registered)
606 * @pci_dev: The PCI device
607 * @type: Controller type attributes
608 * @legacy_irq: IRQ number
609 * @workqueue: Workqueue for resets, port reconfigures and the HW monitor
610 * @reset_work: Scheduled reset workitem
611 * @monitor_work: Hardware monitor workitem
612 * @membase_phys: Memory BAR value as physical address
613 * @membase: Memory BAR value
614 * @biu_lock: BIU (bus interface unit) lock
615 * @interrupt_mode: Interrupt mode
616 * @i2c: I2C interface
617 * @board_info: Board-level information
618 * @state: Device state flag. Serialised by the rtnl_lock.
619 * @reset_pending: Pending reset method (normally RESET_TYPE_NONE)
620 * @tx_queue: TX DMA queues
621 * @rx_queue: RX DMA queues
622 * @channel: Channels
623 * @rss_queues: Number of RSS queues
624 * @rx_buffer_len: RX buffer length
625 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
626 * @irq_status: Interrupt status buffer
627 * @last_irq_cpu: Last CPU to handle interrupt.
628 * This register is written with the SMP processor ID whenever an
629 * interrupt is handled. It is used by falcon_test_interrupt()
630 * to verify that an interrupt has occurred.
631 * @n_rx_nodesc_drop_cnt: RX no descriptor drop count
632 * @nic_data: Hardware dependant state
633 * @mac_lock: MAC access lock. Protects @port_enabled, efx_monitor() and
634 * efx_reconfigure_port()
635 * @port_enabled: Port enabled indicator.
636 * Serialises efx_stop_all(), efx_start_all() and efx_monitor() and
637 * efx_reconfigure_work with kernel interfaces. Safe to read under any
638 * one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must
639 * be held to modify it.
640 * @port_initialized: Port initialized?
641 * @net_dev: Operating system network device. Consider holding the rtnl lock
642 * @rx_checksum_enabled: RX checksumming enabled
643 * @netif_stop_count: Port stop count
644 * @netif_stop_lock: Port stop lock
645 * @mac_stats: MAC statistics. These include all statistics the MACs
646 * can provide. Generic code converts these into a standard
647 * &struct net_device_stats.
648 * @stats_buffer: DMA buffer for statistics
649 * @stats_lock: Statistics update lock
650 * @mac_address: Permanent MAC address
651 * @phy_type: PHY type
652 * @phy_lock: PHY access lock
653 * @phy_op: PHY interface
654 * @phy_data: PHY private data (including PHY-specific stats)
655 * @mii: PHY interface
656 * @phy_powered: PHY power state
657 * @tx_disabled: PHY transmitter turned off
658 * @link_up: Link status
659 * @link_options: Link options (MII/GMII format)
660 * @n_link_state_changes: Number of times the link has changed state
661 * @promiscuous: Promiscuous flag. Protected by netif_tx_lock.
662 * @multicast_hash: Multicast hash table
663 * @flow_control: Flow control flags - separate RX/TX so can't use link_options
664 * @reconfigure_work: work item for dealing with PHY events
665 *
666 * The @priv field of the corresponding &struct net_device points to
667 * this.
668 */
669struct efx_nic {
670 char name[IFNAMSIZ];
671 struct pci_dev *pci_dev;
672 const struct efx_nic_type *type;
673 int legacy_irq;
674 struct workqueue_struct *workqueue;
675 struct work_struct reset_work;
676 struct delayed_work monitor_work;
677 unsigned long membase_phys;
678 void __iomem *membase;
679 spinlock_t biu_lock;
680 enum efx_int_mode interrupt_mode;
681
682 struct efx_i2c_interface i2c;
683 struct efx_board board_info;
684
685 enum nic_state state;
686 enum reset_type reset_pending;
687
688 struct efx_tx_queue tx_queue[EFX_MAX_TX_QUEUES];
689 struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES];
690 struct efx_channel channel[EFX_MAX_CHANNELS];
691
692 int rss_queues;
693 unsigned int rx_buffer_len;
694 unsigned int rx_buffer_order;
695
696 struct efx_buffer irq_status;
697 volatile signed int last_irq_cpu;
698
699 unsigned n_rx_nodesc_drop_cnt;
700
701 void *nic_data;
702
703 struct mutex mac_lock;
704 int port_enabled;
705
706 int port_initialized;
707 struct net_device *net_dev;
708 int rx_checksum_enabled;
709
710 atomic_t netif_stop_count;
711 spinlock_t netif_stop_lock;
712
713 struct efx_mac_stats mac_stats;
714 struct efx_buffer stats_buffer;
715 spinlock_t stats_lock;
716
717 unsigned char mac_address[ETH_ALEN];
718
719 enum phy_type phy_type;
720 spinlock_t phy_lock;
721 struct efx_phy_operations *phy_op;
722 void *phy_data;
723 struct mii_if_info mii;
724
725 int link_up;
726 unsigned int link_options;
727 unsigned int n_link_state_changes;
728
729 int promiscuous;
730 union efx_multicast_hash multicast_hash;
731 enum efx_fc_type flow_control;
732 struct work_struct reconfigure_work;
733
734 atomic_t rx_reset;
735};
736
737/**
738 * struct efx_nic_type - Efx device type definition
739 * @mem_bar: Memory BAR number
740 * @mem_map_size: Memory BAR mapped size
741 * @txd_ptr_tbl_base: TX descriptor ring base address
742 * @rxd_ptr_tbl_base: RX descriptor ring base address
743 * @buf_tbl_base: Buffer table base address
744 * @evq_ptr_tbl_base: Event queue pointer table base address
745 * @evq_rptr_tbl_base: Event queue read-pointer table base address
746 * @txd_ring_mask: TX descriptor ring size - 1 (must be a power of two - 1)
747 * @rxd_ring_mask: RX descriptor ring size - 1 (must be a power of two - 1)
748 * @evq_size: Event queue size (must be a power of two)
749 * @max_dma_mask: Maximum possible DMA mask
750 * @tx_dma_mask: TX DMA mask
751 * @bug5391_mask: Address mask for bug 5391 workaround
752 * @rx_xoff_thresh: RX FIFO XOFF watermark (bytes)
753 * @rx_xon_thresh: RX FIFO XON watermark (bytes)
754 * @rx_buffer_padding: Padding added to each RX buffer
755 * @max_interrupt_mode: Highest capability interrupt mode supported
756 * from &enum efx_init_mode.
757 * @phys_addr_channels: Number of channels with physically addressed
758 * descriptors
759 */
760struct efx_nic_type {
761 unsigned int mem_bar;
762 unsigned int mem_map_size;
763 unsigned int txd_ptr_tbl_base;
764 unsigned int rxd_ptr_tbl_base;
765 unsigned int buf_tbl_base;
766 unsigned int evq_ptr_tbl_base;
767 unsigned int evq_rptr_tbl_base;
768
769 unsigned int txd_ring_mask;
770 unsigned int rxd_ring_mask;
771 unsigned int evq_size;
772 dma_addr_t max_dma_mask;
773 unsigned int tx_dma_mask;
774 unsigned bug5391_mask;
775
776 int rx_xoff_thresh;
777 int rx_xon_thresh;
778 unsigned int rx_buffer_padding;
779 unsigned int max_interrupt_mode;
780 unsigned int phys_addr_channels;
781};
782
783/**************************************************************************
784 *
785 * Prototypes and inline functions
786 *
787 *************************************************************************/
788
789/* Iterate over all used channels */
790#define efx_for_each_channel(_channel, _efx) \
791 for (_channel = &_efx->channel[0]; \
792 _channel < &_efx->channel[EFX_MAX_CHANNELS]; \
793 _channel++) \
794 if (!_channel->used_flags) \
795 continue; \
796 else
797
798/* Iterate over all used channels with interrupts */
799#define efx_for_each_channel_with_interrupt(_channel, _efx) \
800 for (_channel = &_efx->channel[0]; \
801 _channel < &_efx->channel[EFX_MAX_CHANNELS]; \
802 _channel++) \
803 if (!(_channel->used_flags && _channel->has_interrupt)) \
804 continue; \
805 else
806
807/* Iterate over all used TX queues */
808#define efx_for_each_tx_queue(_tx_queue, _efx) \
809 for (_tx_queue = &_efx->tx_queue[0]; \
810 _tx_queue < &_efx->tx_queue[EFX_MAX_TX_QUEUES]; \
811 _tx_queue++) \
812 if (!_tx_queue->used) \
813 continue; \
814 else
815
816/* Iterate over all TX queues belonging to a channel */
817#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \
818 for (_tx_queue = &_channel->efx->tx_queue[0]; \
819 _tx_queue < &_channel->efx->tx_queue[EFX_MAX_TX_QUEUES]; \
820 _tx_queue++) \
821 if ((!_tx_queue->used) || \
822 (_tx_queue->channel != _channel)) \
823 continue; \
824 else
825
826/* Iterate over all used RX queues */
827#define efx_for_each_rx_queue(_rx_queue, _efx) \
828 for (_rx_queue = &_efx->rx_queue[0]; \
829 _rx_queue < &_efx->rx_queue[EFX_MAX_RX_QUEUES]; \
830 _rx_queue++) \
831 if (!_rx_queue->used) \
832 continue; \
833 else
834
835/* Iterate over all RX queues belonging to a channel */
836#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \
837 for (_rx_queue = &_channel->efx->rx_queue[0]; \
838 _rx_queue < &_channel->efx->rx_queue[EFX_MAX_RX_QUEUES]; \
839 _rx_queue++) \
840 if ((!_rx_queue->used) || \
841 (_rx_queue->channel != _channel)) \
842 continue; \
843 else
844
845/* Returns a pointer to the specified receive buffer in the RX
846 * descriptor queue.
847 */
848static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue,
849 unsigned int index)
850{
851 return (&rx_queue->buffer[index]);
852}
853
854/* Set bit in a little-endian bitfield */
855static inline void set_bit_le(int nr, unsigned char *addr)
856{
857 addr[nr / 8] |= (1 << (nr % 8));
858}
859
860/* Clear bit in a little-endian bitfield */
861static inline void clear_bit_le(int nr, unsigned char *addr)
862{
863 addr[nr / 8] &= ~(1 << (nr % 8));
864}
865
866
867/**
868 * EFX_MAX_FRAME_LEN - calculate maximum frame length
869 *
870 * This calculates the maximum frame length that will be used for a
871 * given MTU. The frame length will be equal to the MTU plus a
872 * constant amount of header space and padding. This is the quantity
873 * that the net driver will program into the MAC as the maximum frame
874 * length.
875 *
876 * The 10G MAC used in Falcon requires 8-byte alignment on the frame
877 * length, so we round up to the nearest 8.
878 */
879#define EFX_MAX_FRAME_LEN(mtu) \
880 ((((mtu) + ETH_HLEN + VLAN_HLEN + 4/* FCS */) + 7) & ~7)
881
882
883#endif /* EFX_NET_DRIVER_H */
diff --git a/drivers/net/sfc/phy.h b/drivers/net/sfc/phy.h
new file mode 100644
index 000000000000..9d02c84e6b2d
--- /dev/null
+++ b/drivers/net/sfc/phy.h
@@ -0,0 +1,48 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#ifndef EFX_PHY_H
11#define EFX_PHY_H
12
13/****************************************************************************
14 * 10Xpress (SFX7101) PHY
15 */
16extern struct efx_phy_operations falcon_tenxpress_phy_ops;
17
18enum tenxpress_state {
19 TENXPRESS_STATUS_OFF = 0,
20 TENXPRESS_STATUS_OTEMP = 1,
21 TENXPRESS_STATUS_NORMAL = 2,
22};
23
24extern void tenxpress_set_state(struct efx_nic *efx,
25 enum tenxpress_state state);
26extern void tenxpress_phy_blink(struct efx_nic *efx, int blink);
27extern void tenxpress_crc_err(struct efx_nic *efx);
28
29/****************************************************************************
30 * Exported functions from the driver for XFP optical PHYs
31 */
32extern struct efx_phy_operations falcon_xfp_phy_ops;
33
34/* The QUAKE XFP PHY provides various H/W control states for LEDs */
35#define QUAKE_LED_LINK_INVAL (0)
36#define QUAKE_LED_LINK_STAT (1)
37#define QUAKE_LED_LINK_ACT (2)
38#define QUAKE_LED_LINK_ACTSTAT (3)
39#define QUAKE_LED_OFF (4)
40#define QUAKE_LED_ON (5)
41#define QUAKE_LED_LINK_INPUT (6) /* Pin is an input. */
42/* What link the LED tracks */
43#define QUAKE_LED_TXLINK (0)
44#define QUAKE_LED_RXLINK (8)
45
46extern void xfp_set_led(struct efx_nic *p, int led, int state);
47
48#endif
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
new file mode 100644
index 000000000000..551299b462ae
--- /dev/null
+++ b/drivers/net/sfc/rx.c
@@ -0,0 +1,875 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/socket.h>
12#include <linux/in.h>
13#include <linux/ip.h>
14#include <linux/tcp.h>
15#include <linux/udp.h>
16#include <net/ip.h>
17#include <net/checksum.h>
18#include "net_driver.h"
19#include "rx.h"
20#include "efx.h"
21#include "falcon.h"
22#include "workarounds.h"
23
24/* Number of RX descriptors pushed at once. */
25#define EFX_RX_BATCH 8
26
27/* Size of buffer allocated for skb header area. */
28#define EFX_SKB_HEADERS 64u
29
30/*
31 * rx_alloc_method - RX buffer allocation method
32 *
33 * This driver supports two methods for allocating and using RX buffers:
34 * each RX buffer may be backed by an skb or by an order-n page.
35 *
36 * When LRO is in use then the second method has a lower overhead,
37 * since we don't have to allocate then free skbs on reassembled frames.
38 *
39 * Values:
40 * - RX_ALLOC_METHOD_AUTO = 0
41 * - RX_ALLOC_METHOD_SKB = 1
42 * - RX_ALLOC_METHOD_PAGE = 2
43 *
44 * The heuristic for %RX_ALLOC_METHOD_AUTO is a simple hysteresis count
45 * controlled by the parameters below.
46 *
47 * - Since pushing and popping descriptors are separated by the rx_queue
48 * size, so the watermarks should be ~rxd_size.
49 * - The performance win by using page-based allocation for LRO is less
50 * than the performance hit of using page-based allocation of non-LRO,
51 * so the watermarks should reflect this.
52 *
53 * Per channel we maintain a single variable, updated by each channel:
54 *
55 * rx_alloc_level += (lro_performed ? RX_ALLOC_FACTOR_LRO :
56 * RX_ALLOC_FACTOR_SKB)
57 * Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which
58 * limits the hysteresis), and update the allocation strategy:
59 *
60 * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_LRO ?
61 * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB)
62 */
63static int rx_alloc_method = RX_ALLOC_METHOD_PAGE;
64
65#define RX_ALLOC_LEVEL_LRO 0x2000
66#define RX_ALLOC_LEVEL_MAX 0x3000
67#define RX_ALLOC_FACTOR_LRO 1
68#define RX_ALLOC_FACTOR_SKB (-2)
69
70/* This is the percentage fill level below which new RX descriptors
71 * will be added to the RX descriptor ring.
72 */
73static unsigned int rx_refill_threshold = 90;
74
75/* This is the percentage fill level to which an RX queue will be refilled
76 * when the "RX refill threshold" is reached.
77 */
78static unsigned int rx_refill_limit = 95;
79
80/*
81 * RX maximum head room required.
82 *
83 * This must be at least 1 to prevent overflow and at least 2 to allow
84 * pipelined receives.
85 */
86#define EFX_RXD_HEAD_ROOM 2
87
88/* Macros for zero-order pages (potentially) containing multiple RX buffers */
89#define RX_DATA_OFFSET(_data) \
90 (((unsigned long) (_data)) & (PAGE_SIZE-1))
91#define RX_BUF_OFFSET(_rx_buf) \
92 RX_DATA_OFFSET((_rx_buf)->data)
93
94#define RX_PAGE_SIZE(_efx) \
95 (PAGE_SIZE * (1u << (_efx)->rx_buffer_order))
96
97
98/**************************************************************************
99 *
100 * Linux generic LRO handling
101 *
102 **************************************************************************
103 */
104
105static int efx_lro_get_skb_hdr(struct sk_buff *skb, void **ip_hdr,
106 void **tcpudp_hdr, u64 *hdr_flags, void *priv)
107{
108 struct efx_channel *channel = (struct efx_channel *)priv;
109 struct iphdr *iph;
110 struct tcphdr *th;
111
112 iph = (struct iphdr *)skb->data;
113 if (skb->protocol != htons(ETH_P_IP) || iph->protocol != IPPROTO_TCP)
114 goto fail;
115
116 th = (struct tcphdr *)(skb->data + iph->ihl * 4);
117
118 *tcpudp_hdr = th;
119 *ip_hdr = iph;
120 *hdr_flags = LRO_IPV4 | LRO_TCP;
121
122 channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO;
123 return 0;
124fail:
125 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
126 return -1;
127}
128
129static int efx_get_frag_hdr(struct skb_frag_struct *frag, void **mac_hdr,
130 void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags,
131 void *priv)
132{
133 struct efx_channel *channel = (struct efx_channel *)priv;
134 struct ethhdr *eh;
135 struct iphdr *iph;
136
137 /* We support EtherII and VLAN encapsulated IPv4 */
138 eh = (struct ethhdr *)(page_address(frag->page) + frag->page_offset);
139 *mac_hdr = eh;
140
141 if (eh->h_proto == htons(ETH_P_IP)) {
142 iph = (struct iphdr *)(eh + 1);
143 } else {
144 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)eh;
145 if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP))
146 goto fail;
147
148 iph = (struct iphdr *)(veh + 1);
149 }
150 *ip_hdr = iph;
151
152 /* We can only do LRO over TCP */
153 if (iph->protocol != IPPROTO_TCP)
154 goto fail;
155
156 *hdr_flags = LRO_IPV4 | LRO_TCP;
157 *tcpudp_hdr = (struct tcphdr *)((u8 *) iph + iph->ihl * 4);
158
159 channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO;
160 return 0;
161 fail:
162 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
163 return -1;
164}
165
166int efx_lro_init(struct net_lro_mgr *lro_mgr, struct efx_nic *efx)
167{
168 size_t s = sizeof(struct net_lro_desc) * EFX_MAX_LRO_DESCRIPTORS;
169 struct net_lro_desc *lro_arr;
170
171 /* Allocate the LRO descriptors structure */
172 lro_arr = kzalloc(s, GFP_KERNEL);
173 if (lro_arr == NULL)
174 return -ENOMEM;
175
176 lro_mgr->lro_arr = lro_arr;
177 lro_mgr->max_desc = EFX_MAX_LRO_DESCRIPTORS;
178 lro_mgr->max_aggr = EFX_MAX_LRO_AGGR;
179 lro_mgr->frag_align_pad = EFX_PAGE_SKB_ALIGN;
180
181 lro_mgr->get_skb_header = efx_lro_get_skb_hdr;
182 lro_mgr->get_frag_header = efx_get_frag_hdr;
183 lro_mgr->dev = efx->net_dev;
184
185 lro_mgr->features = LRO_F_NAPI;
186
187 /* We can pass packets up with the checksum intact */
188 lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
189
190 lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
191
192 return 0;
193}
194
195void efx_lro_fini(struct net_lro_mgr *lro_mgr)
196{
197 kfree(lro_mgr->lro_arr);
198 lro_mgr->lro_arr = NULL;
199}
200
201/**
202 * efx_init_rx_buffer_skb - create new RX buffer using skb-based allocation
203 *
204 * @rx_queue: Efx RX queue
205 * @rx_buf: RX buffer structure to populate
206 *
207 * This allocates memory for a new receive buffer, maps it for DMA,
208 * and populates a struct efx_rx_buffer with the relevant
209 * information. Return a negative error code or 0 on success.
210 */
211static inline int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue,
212 struct efx_rx_buffer *rx_buf)
213{
214 struct efx_nic *efx = rx_queue->efx;
215 struct net_device *net_dev = efx->net_dev;
216 int skb_len = efx->rx_buffer_len;
217
218 rx_buf->skb = netdev_alloc_skb(net_dev, skb_len);
219 if (unlikely(!rx_buf->skb))
220 return -ENOMEM;
221
222 /* Adjust the SKB for padding and checksum */
223 skb_reserve(rx_buf->skb, NET_IP_ALIGN);
224 rx_buf->len = skb_len - NET_IP_ALIGN;
225 rx_buf->data = (char *)rx_buf->skb->data;
226 rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY;
227
228 rx_buf->dma_addr = pci_map_single(efx->pci_dev,
229 rx_buf->data, rx_buf->len,
230 PCI_DMA_FROMDEVICE);
231
232 if (unlikely(pci_dma_mapping_error(rx_buf->dma_addr))) {
233 dev_kfree_skb_any(rx_buf->skb);
234 rx_buf->skb = NULL;
235 return -EIO;
236 }
237
238 return 0;
239}
240
241/**
242 * efx_init_rx_buffer_page - create new RX buffer using page-based allocation
243 *
244 * @rx_queue: Efx RX queue
245 * @rx_buf: RX buffer structure to populate
246 *
247 * This allocates memory for a new receive buffer, maps it for DMA,
248 * and populates a struct efx_rx_buffer with the relevant
249 * information. Return a negative error code or 0 on success.
250 */
251static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
252 struct efx_rx_buffer *rx_buf)
253{
254 struct efx_nic *efx = rx_queue->efx;
255 int bytes, space, offset;
256
257 bytes = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
258
259 /* If there is space left in the previously allocated page,
260 * then use it. Otherwise allocate a new one */
261 rx_buf->page = rx_queue->buf_page;
262 if (rx_buf->page == NULL) {
263 dma_addr_t dma_addr;
264
265 rx_buf->page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
266 efx->rx_buffer_order);
267 if (unlikely(rx_buf->page == NULL))
268 return -ENOMEM;
269
270 dma_addr = pci_map_page(efx->pci_dev, rx_buf->page,
271 0, RX_PAGE_SIZE(efx),
272 PCI_DMA_FROMDEVICE);
273
274 if (unlikely(pci_dma_mapping_error(dma_addr))) {
275 __free_pages(rx_buf->page, efx->rx_buffer_order);
276 rx_buf->page = NULL;
277 return -EIO;
278 }
279
280 rx_queue->buf_page = rx_buf->page;
281 rx_queue->buf_dma_addr = dma_addr;
282 rx_queue->buf_data = ((char *) page_address(rx_buf->page) +
283 EFX_PAGE_IP_ALIGN);
284 }
285
286 offset = RX_DATA_OFFSET(rx_queue->buf_data);
287 rx_buf->len = bytes;
288 rx_buf->dma_addr = rx_queue->buf_dma_addr + offset;
289 rx_buf->data = rx_queue->buf_data;
290
291 /* Try to pack multiple buffers per page */
292 if (efx->rx_buffer_order == 0) {
293 /* The next buffer starts on the next 512 byte boundary */
294 rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff);
295 offset += ((bytes + 0x1ff) & ~0x1ff);
296
297 space = RX_PAGE_SIZE(efx) - offset;
298 if (space >= bytes) {
299 /* Refs dropped on kernel releasing each skb */
300 get_page(rx_queue->buf_page);
301 goto out;
302 }
303 }
304
305 /* This is the final RX buffer for this page, so mark it for
306 * unmapping */
307 rx_queue->buf_page = NULL;
308 rx_buf->unmap_addr = rx_queue->buf_dma_addr;
309
310 out:
311 return 0;
312}
313
314/* This allocates memory for a new receive buffer, maps it for DMA,
315 * and populates a struct efx_rx_buffer with the relevant
316 * information.
317 */
318static inline int efx_init_rx_buffer(struct efx_rx_queue *rx_queue,
319 struct efx_rx_buffer *new_rx_buf)
320{
321 int rc = 0;
322
323 if (rx_queue->channel->rx_alloc_push_pages) {
324 new_rx_buf->skb = NULL;
325 rc = efx_init_rx_buffer_page(rx_queue, new_rx_buf);
326 rx_queue->alloc_page_count++;
327 } else {
328 new_rx_buf->page = NULL;
329 rc = efx_init_rx_buffer_skb(rx_queue, new_rx_buf);
330 rx_queue->alloc_skb_count++;
331 }
332
333 if (unlikely(rc < 0))
334 EFX_LOG_RL(rx_queue->efx, "%s RXQ[%d] =%d\n", __func__,
335 rx_queue->queue, rc);
336 return rc;
337}
338
339static inline void efx_unmap_rx_buffer(struct efx_nic *efx,
340 struct efx_rx_buffer *rx_buf)
341{
342 if (rx_buf->page) {
343 EFX_BUG_ON_PARANOID(rx_buf->skb);
344 if (rx_buf->unmap_addr) {
345 pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr,
346 RX_PAGE_SIZE(efx), PCI_DMA_FROMDEVICE);
347 rx_buf->unmap_addr = 0;
348 }
349 } else if (likely(rx_buf->skb)) {
350 pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
351 rx_buf->len, PCI_DMA_FROMDEVICE);
352 }
353}
354
355static inline void efx_free_rx_buffer(struct efx_nic *efx,
356 struct efx_rx_buffer *rx_buf)
357{
358 if (rx_buf->page) {
359 __free_pages(rx_buf->page, efx->rx_buffer_order);
360 rx_buf->page = NULL;
361 } else if (likely(rx_buf->skb)) {
362 dev_kfree_skb_any(rx_buf->skb);
363 rx_buf->skb = NULL;
364 }
365}
366
367static inline void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
368 struct efx_rx_buffer *rx_buf)
369{
370 efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
371 efx_free_rx_buffer(rx_queue->efx, rx_buf);
372}
373
374/**
375 * efx_fast_push_rx_descriptors - push new RX descriptors quickly
376 * @rx_queue: RX descriptor queue
377 * @retry: Recheck the fill level
378 * This will aim to fill the RX descriptor queue up to
379 * @rx_queue->@fast_fill_limit. If there is insufficient atomic
380 * memory to do so, the caller should retry.
381 */
382static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
383 int retry)
384{
385 struct efx_rx_buffer *rx_buf;
386 unsigned fill_level, index;
387 int i, space, rc = 0;
388
389 /* Calculate current fill level. Do this outside the lock,
390 * because most of the time we'll end up not wanting to do the
391 * fill anyway.
392 */
393 fill_level = (rx_queue->added_count - rx_queue->removed_count);
394 EFX_BUG_ON_PARANOID(fill_level >
395 rx_queue->efx->type->rxd_ring_mask + 1);
396
397 /* Don't fill if we don't need to */
398 if (fill_level >= rx_queue->fast_fill_trigger)
399 return 0;
400
401 /* Record minimum fill level */
402 if (unlikely(fill_level < rx_queue->min_fill))
403 if (fill_level)
404 rx_queue->min_fill = fill_level;
405
406 /* Acquire RX add lock. If this lock is contended, then a fast
407 * fill must already be in progress (e.g. in the refill
408 * tasklet), so we don't need to do anything
409 */
410 if (!spin_trylock_bh(&rx_queue->add_lock))
411 return -1;
412
413 retry:
414 /* Recalculate current fill level now that we have the lock */
415 fill_level = (rx_queue->added_count - rx_queue->removed_count);
416 EFX_BUG_ON_PARANOID(fill_level >
417 rx_queue->efx->type->rxd_ring_mask + 1);
418 space = rx_queue->fast_fill_limit - fill_level;
419 if (space < EFX_RX_BATCH)
420 goto out_unlock;
421
422 EFX_TRACE(rx_queue->efx, "RX queue %d fast-filling descriptor ring from"
423 " level %d to level %d using %s allocation\n",
424 rx_queue->queue, fill_level, rx_queue->fast_fill_limit,
425 rx_queue->channel->rx_alloc_push_pages ? "page" : "skb");
426
427 do {
428 for (i = 0; i < EFX_RX_BATCH; ++i) {
429 index = (rx_queue->added_count &
430 rx_queue->efx->type->rxd_ring_mask);
431 rx_buf = efx_rx_buffer(rx_queue, index);
432 rc = efx_init_rx_buffer(rx_queue, rx_buf);
433 if (unlikely(rc))
434 goto out;
435 ++rx_queue->added_count;
436 }
437 } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH);
438
439 EFX_TRACE(rx_queue->efx, "RX queue %d fast-filled descriptor ring "
440 "to level %d\n", rx_queue->queue,
441 rx_queue->added_count - rx_queue->removed_count);
442
443 out:
444 /* Send write pointer to card. */
445 falcon_notify_rx_desc(rx_queue);
446
447 /* If the fast fill is running inside from the refill tasklet, then
448 * for SMP systems it may be running on a different CPU to
449 * RX event processing, which means that the fill level may now be
450 * out of date. */
451 if (unlikely(retry && (rc == 0)))
452 goto retry;
453
454 out_unlock:
455 spin_unlock_bh(&rx_queue->add_lock);
456
457 return rc;
458}
459
460/**
461 * efx_fast_push_rx_descriptors - push new RX descriptors quickly
462 * @rx_queue: RX descriptor queue
463 *
464 * This will aim to fill the RX descriptor queue up to
465 * @rx_queue->@fast_fill_limit. If there is insufficient memory to do so,
466 * it will schedule a work item to immediately continue the fast fill
467 */
468void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
469{
470 int rc;
471
472 rc = __efx_fast_push_rx_descriptors(rx_queue, 0);
473 if (unlikely(rc)) {
474 /* Schedule the work item to run immediately. The hope is
475 * that work is immediately pending to free some memory
476 * (e.g. an RX event or TX completion)
477 */
478 efx_schedule_slow_fill(rx_queue, 0);
479 }
480}
481
482void efx_rx_work(struct work_struct *data)
483{
484 struct efx_rx_queue *rx_queue;
485 int rc;
486
487 rx_queue = container_of(data, struct efx_rx_queue, work.work);
488
489 if (unlikely(!rx_queue->channel->enabled))
490 return;
491
492 EFX_TRACE(rx_queue->efx, "RX queue %d worker thread executing on CPU "
493 "%d\n", rx_queue->queue, raw_smp_processor_id());
494
495 ++rx_queue->slow_fill_count;
496 /* Push new RX descriptors, allowing at least 1 jiffy for
497 * the kernel to free some more memory. */
498 rc = __efx_fast_push_rx_descriptors(rx_queue, 1);
499 if (rc)
500 efx_schedule_slow_fill(rx_queue, 1);
501}
502
503static inline void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
504 struct efx_rx_buffer *rx_buf,
505 int len, int *discard,
506 int *leak_packet)
507{
508 struct efx_nic *efx = rx_queue->efx;
509 unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
510
511 if (likely(len <= max_len))
512 return;
513
514 /* The packet must be discarded, but this is only a fatal error
515 * if the caller indicated it was
516 */
517 *discard = 1;
518
519 if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
520 EFX_ERR_RL(efx, " RX queue %d seriously overlength "
521 "RX event (0x%x > 0x%x+0x%x). Leaking\n",
522 rx_queue->queue, len, max_len,
523 efx->type->rx_buffer_padding);
524 /* If this buffer was skb-allocated, then the meta
525 * data at the end of the skb will be trashed. So
526 * we have no choice but to leak the fragment.
527 */
528 *leak_packet = (rx_buf->skb != NULL);
529 efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
530 } else {
531 EFX_ERR_RL(efx, " RX queue %d overlength RX event "
532 "(0x%x > 0x%x)\n", rx_queue->queue, len, max_len);
533 }
534
535 rx_queue->channel->n_rx_overlength++;
536}
537
538/* Pass a received packet up through the generic LRO stack
539 *
540 * Handles driverlink veto, and passes the fragment up via
541 * the appropriate LRO method
542 */
543static inline void efx_rx_packet_lro(struct efx_channel *channel,
544 struct efx_rx_buffer *rx_buf)
545{
546 struct net_lro_mgr *lro_mgr = &channel->lro_mgr;
547 void *priv = channel;
548
549 /* Pass the skb/page into the LRO engine */
550 if (rx_buf->page) {
551 struct skb_frag_struct frags;
552
553 frags.page = rx_buf->page;
554 frags.page_offset = RX_BUF_OFFSET(rx_buf);
555 frags.size = rx_buf->len;
556
557 lro_receive_frags(lro_mgr, &frags, rx_buf->len,
558 rx_buf->len, priv, 0);
559
560 EFX_BUG_ON_PARANOID(rx_buf->skb);
561 rx_buf->page = NULL;
562 } else {
563 EFX_BUG_ON_PARANOID(!rx_buf->skb);
564
565 lro_receive_skb(lro_mgr, rx_buf->skb, priv);
566 rx_buf->skb = NULL;
567 }
568}
569
570/* Allocate and construct an SKB around a struct page.*/
571static inline struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf,
572 struct efx_nic *efx,
573 int hdr_len)
574{
575 struct sk_buff *skb;
576
577 /* Allocate an SKB to store the headers */
578 skb = netdev_alloc_skb(efx->net_dev, hdr_len + EFX_PAGE_SKB_ALIGN);
579 if (unlikely(skb == NULL)) {
580 EFX_ERR_RL(efx, "RX out of memory for skb\n");
581 return NULL;
582 }
583
584 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags);
585 EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);
586
587 skb->ip_summed = CHECKSUM_UNNECESSARY;
588 skb_reserve(skb, EFX_PAGE_SKB_ALIGN);
589
590 skb->len = rx_buf->len;
591 skb->truesize = rx_buf->len + sizeof(struct sk_buff);
592 memcpy(skb->data, rx_buf->data, hdr_len);
593 skb->tail += hdr_len;
594
595 /* Append the remaining page onto the frag list */
596 if (unlikely(rx_buf->len > hdr_len)) {
597 struct skb_frag_struct *frag = skb_shinfo(skb)->frags;
598 frag->page = rx_buf->page;
599 frag->page_offset = RX_BUF_OFFSET(rx_buf) + hdr_len;
600 frag->size = skb->len - hdr_len;
601 skb_shinfo(skb)->nr_frags = 1;
602 skb->data_len = frag->size;
603 } else {
604 __free_pages(rx_buf->page, efx->rx_buffer_order);
605 skb->data_len = 0;
606 }
607
608 /* Ownership has transferred from the rx_buf to skb */
609 rx_buf->page = NULL;
610
611 /* Move past the ethernet header */
612 skb->protocol = eth_type_trans(skb, efx->net_dev);
613
614 return skb;
615}
616
617void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
618 unsigned int len, int checksummed, int discard)
619{
620 struct efx_nic *efx = rx_queue->efx;
621 struct efx_rx_buffer *rx_buf;
622 int leak_packet = 0;
623
624 rx_buf = efx_rx_buffer(rx_queue, index);
625 EFX_BUG_ON_PARANOID(!rx_buf->data);
626 EFX_BUG_ON_PARANOID(rx_buf->skb && rx_buf->page);
627 EFX_BUG_ON_PARANOID(!(rx_buf->skb || rx_buf->page));
628
629 /* This allows the refill path to post another buffer.
630 * EFX_RXD_HEAD_ROOM ensures that the slot we are using
631 * isn't overwritten yet.
632 */
633 rx_queue->removed_count++;
634
635 /* Validate the length encoded in the event vs the descriptor pushed */
636 efx_rx_packet__check_len(rx_queue, rx_buf, len,
637 &discard, &leak_packet);
638
639 EFX_TRACE(efx, "RX queue %d received id %x at %llx+%x %s%s\n",
640 rx_queue->queue, index,
641 (unsigned long long)rx_buf->dma_addr, len,
642 (checksummed ? " [SUMMED]" : ""),
643 (discard ? " [DISCARD]" : ""));
644
645 /* Discard packet, if instructed to do so */
646 if (unlikely(discard)) {
647 if (unlikely(leak_packet))
648 rx_queue->channel->n_skbuff_leaks++;
649 else
650 /* We haven't called efx_unmap_rx_buffer yet,
651 * so fini the entire rx_buffer here */
652 efx_fini_rx_buffer(rx_queue, rx_buf);
653 return;
654 }
655
656 /* Release card resources - assumes all RX buffers consumed in-order
657 * per RX queue
658 */
659 efx_unmap_rx_buffer(efx, rx_buf);
660
661 /* Prefetch nice and early so data will (hopefully) be in cache by
662 * the time we look at it.
663 */
664 prefetch(rx_buf->data);
665
666 /* Pipeline receives so that we give time for packet headers to be
667 * prefetched into cache.
668 */
669 rx_buf->len = len;
670 if (rx_queue->channel->rx_pkt)
671 __efx_rx_packet(rx_queue->channel,
672 rx_queue->channel->rx_pkt,
673 rx_queue->channel->rx_pkt_csummed);
674 rx_queue->channel->rx_pkt = rx_buf;
675 rx_queue->channel->rx_pkt_csummed = checksummed;
676}
677
678/* Handle a received packet. Second half: Touches packet payload. */
679void __efx_rx_packet(struct efx_channel *channel,
680 struct efx_rx_buffer *rx_buf, int checksummed)
681{
682 struct efx_nic *efx = channel->efx;
683 struct sk_buff *skb;
684 int lro = efx->net_dev->features & NETIF_F_LRO;
685
686 if (rx_buf->skb) {
687 prefetch(skb_shinfo(rx_buf->skb));
688
689 skb_put(rx_buf->skb, rx_buf->len);
690
691 /* Move past the ethernet header. rx_buf->data still points
692 * at the ethernet header */
693 rx_buf->skb->protocol = eth_type_trans(rx_buf->skb,
694 efx->net_dev);
695 }
696
697 /* Both our generic-LRO and SFC-SSR support skb and page based
698 * allocation, but neither support switching from one to the
699 * other on the fly. If we spot that the allocation mode has
700 * changed, then flush the LRO state.
701 */
702 if (unlikely(channel->rx_alloc_pop_pages != (rx_buf->page != NULL))) {
703 efx_flush_lro(channel);
704 channel->rx_alloc_pop_pages = (rx_buf->page != NULL);
705 }
706 if (likely(checksummed && lro)) {
707 efx_rx_packet_lro(channel, rx_buf);
708 goto done;
709 }
710
711 /* Form an skb if required */
712 if (rx_buf->page) {
713 int hdr_len = min(rx_buf->len, EFX_SKB_HEADERS);
714 skb = efx_rx_mk_skb(rx_buf, efx, hdr_len);
715 if (unlikely(skb == NULL)) {
716 efx_free_rx_buffer(efx, rx_buf);
717 goto done;
718 }
719 } else {
720 /* We now own the SKB */
721 skb = rx_buf->skb;
722 rx_buf->skb = NULL;
723 }
724
725 EFX_BUG_ON_PARANOID(rx_buf->page);
726 EFX_BUG_ON_PARANOID(rx_buf->skb);
727 EFX_BUG_ON_PARANOID(!skb);
728
729 /* Set the SKB flags */
730 if (unlikely(!checksummed || !efx->rx_checksum_enabled))
731 skb->ip_summed = CHECKSUM_NONE;
732
733 /* Pass the packet up */
734 netif_receive_skb(skb);
735
736 /* Update allocation strategy method */
737 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
738
739 /* fall-thru */
740done:
741 efx->net_dev->last_rx = jiffies;
742}
743
744void efx_rx_strategy(struct efx_channel *channel)
745{
746 enum efx_rx_alloc_method method = rx_alloc_method;
747
748 /* Only makes sense to use page based allocation if LRO is enabled */
749 if (!(channel->efx->net_dev->features & NETIF_F_LRO)) {
750 method = RX_ALLOC_METHOD_SKB;
751 } else if (method == RX_ALLOC_METHOD_AUTO) {
752 /* Constrain the rx_alloc_level */
753 if (channel->rx_alloc_level < 0)
754 channel->rx_alloc_level = 0;
755 else if (channel->rx_alloc_level > RX_ALLOC_LEVEL_MAX)
756 channel->rx_alloc_level = RX_ALLOC_LEVEL_MAX;
757
758 /* Decide on the allocation method */
759 method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_LRO) ?
760 RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB);
761 }
762
763 /* Push the option */
764 channel->rx_alloc_push_pages = (method == RX_ALLOC_METHOD_PAGE);
765}
766
767int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
768{
769 struct efx_nic *efx = rx_queue->efx;
770 unsigned int rxq_size;
771 int rc;
772
773 EFX_LOG(efx, "creating RX queue %d\n", rx_queue->queue);
774
775 /* Allocate RX buffers */
776 rxq_size = (efx->type->rxd_ring_mask + 1) * sizeof(*rx_queue->buffer);
777 rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL);
778 if (!rx_queue->buffer) {
779 rc = -ENOMEM;
780 goto fail1;
781 }
782
783 rc = falcon_probe_rx(rx_queue);
784 if (rc)
785 goto fail2;
786
787 return 0;
788
789 fail2:
790 kfree(rx_queue->buffer);
791 rx_queue->buffer = NULL;
792 fail1:
793 rx_queue->used = 0;
794
795 return rc;
796}
797
798int efx_init_rx_queue(struct efx_rx_queue *rx_queue)
799{
800 struct efx_nic *efx = rx_queue->efx;
801 unsigned int max_fill, trigger, limit;
802
803 EFX_LOG(rx_queue->efx, "initialising RX queue %d\n", rx_queue->queue);
804
805 /* Initialise ptr fields */
806 rx_queue->added_count = 0;
807 rx_queue->notified_count = 0;
808 rx_queue->removed_count = 0;
809 rx_queue->min_fill = -1U;
810 rx_queue->min_overfill = -1U;
811
812 /* Initialise limit fields */
813 max_fill = efx->type->rxd_ring_mask + 1 - EFX_RXD_HEAD_ROOM;
814 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
815 limit = max_fill * min(rx_refill_limit, 100U) / 100U;
816
817 rx_queue->max_fill = max_fill;
818 rx_queue->fast_fill_trigger = trigger;
819 rx_queue->fast_fill_limit = limit;
820
821 /* Set up RX descriptor ring */
822 return falcon_init_rx(rx_queue);
823}
824
825void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
826{
827 int i;
828 struct efx_rx_buffer *rx_buf;
829
830 EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue);
831
832 falcon_fini_rx(rx_queue);
833
834 /* Release RX buffers NB start at index 0 not current HW ptr */
835 if (rx_queue->buffer) {
836 for (i = 0; i <= rx_queue->efx->type->rxd_ring_mask; i++) {
837 rx_buf = efx_rx_buffer(rx_queue, i);
838 efx_fini_rx_buffer(rx_queue, rx_buf);
839 }
840 }
841
842 /* For a page that is part-way through splitting into RX buffers */
843 if (rx_queue->buf_page != NULL) {
844 pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr,
845 RX_PAGE_SIZE(rx_queue->efx), PCI_DMA_FROMDEVICE);
846 __free_pages(rx_queue->buf_page,
847 rx_queue->efx->rx_buffer_order);
848 rx_queue->buf_page = NULL;
849 }
850}
851
852void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
853{
854 EFX_LOG(rx_queue->efx, "destroying RX queue %d\n", rx_queue->queue);
855
856 falcon_remove_rx(rx_queue);
857
858 kfree(rx_queue->buffer);
859 rx_queue->buffer = NULL;
860 rx_queue->used = 0;
861}
862
863void efx_flush_lro(struct efx_channel *channel)
864{
865 lro_flush_all(&channel->lro_mgr);
866}
867
868
869module_param(rx_alloc_method, int, 0644);
870MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers");
871
872module_param(rx_refill_threshold, uint, 0444);
873MODULE_PARM_DESC(rx_refill_threshold,
874 "RX descriptor ring fast/slow fill threshold (%)");
875
diff --git a/drivers/net/sfc/rx.h b/drivers/net/sfc/rx.h
new file mode 100644
index 000000000000..f35e377bfc5f
--- /dev/null
+++ b/drivers/net/sfc/rx.h
@@ -0,0 +1,29 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#ifndef EFX_RX_H
11#define EFX_RX_H
12
13#include "net_driver.h"
14
15int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
16void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
17int efx_init_rx_queue(struct efx_rx_queue *rx_queue);
18void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
19
20int efx_lro_init(struct net_lro_mgr *lro_mgr, struct efx_nic *efx);
21void efx_lro_fini(struct net_lro_mgr *lro_mgr);
22void efx_flush_lro(struct efx_channel *channel);
23void efx_rx_strategy(struct efx_channel *channel);
24void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
25void efx_rx_work(struct work_struct *data);
26void __efx_rx_packet(struct efx_channel *channel,
27 struct efx_rx_buffer *rx_buf, int checksummed);
28
29#endif /* EFX_RX_H */
diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c
new file mode 100644
index 000000000000..11fa9fb8f48b
--- /dev/null
+++ b/drivers/net/sfc/sfe4001.c
@@ -0,0 +1,252 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10/*****************************************************************************
11 * Support for the SFE4001 NIC: driver code for the PCA9539 I/O expander that
12 * controls the PHY power rails, and for the MAX6647 temp. sensor used to check
13 * the PHY
14 */
15#include <linux/delay.h>
16#include "efx.h"
17#include "phy.h"
18#include "boards.h"
19#include "falcon.h"
20#include "falcon_hwdefs.h"
21#include "mac.h"
22
23/**************************************************************************
24 *
25 * I2C IO Expander device
26 *
27 **************************************************************************/
28#define PCA9539 0x74
29
30#define P0_IN 0x00
31#define P0_OUT 0x02
32#define P0_INVERT 0x04
33#define P0_CONFIG 0x06
34
35#define P0_EN_1V0X_LBN 0
36#define P0_EN_1V0X_WIDTH 1
37#define P0_EN_1V2_LBN 1
38#define P0_EN_1V2_WIDTH 1
39#define P0_EN_2V5_LBN 2
40#define P0_EN_2V5_WIDTH 1
41#define P0_EN_3V3X_LBN 3
42#define P0_EN_3V3X_WIDTH 1
43#define P0_EN_5V_LBN 4
44#define P0_EN_5V_WIDTH 1
45#define P0_SHORTEN_JTAG_LBN 5
46#define P0_SHORTEN_JTAG_WIDTH 1
47#define P0_X_TRST_LBN 6
48#define P0_X_TRST_WIDTH 1
49#define P0_DSP_RESET_LBN 7
50#define P0_DSP_RESET_WIDTH 1
51
52#define P1_IN 0x01
53#define P1_OUT 0x03
54#define P1_INVERT 0x05
55#define P1_CONFIG 0x07
56
57#define P1_AFE_PWD_LBN 0
58#define P1_AFE_PWD_WIDTH 1
59#define P1_DSP_PWD25_LBN 1
60#define P1_DSP_PWD25_WIDTH 1
61#define P1_RESERVED_LBN 2
62#define P1_RESERVED_WIDTH 2
63#define P1_SPARE_LBN 4
64#define P1_SPARE_WIDTH 4
65
66
67/**************************************************************************
68 *
69 * Temperature Sensor
70 *
71 **************************************************************************/
72#define MAX6647 0x4e
73
74#define RLTS 0x00
75#define RLTE 0x01
76#define RSL 0x02
77#define RCL 0x03
78#define RCRA 0x04
79#define RLHN 0x05
80#define RLLI 0x06
81#define RRHI 0x07
82#define RRLS 0x08
83#define WCRW 0x0a
84#define WLHO 0x0b
85#define WRHA 0x0c
86#define WRLN 0x0e
87#define OSHT 0x0f
88#define REET 0x10
89#define RIET 0x11
90#define RWOE 0x19
91#define RWOI 0x20
92#define HYS 0x21
93#define QUEUE 0x22
94#define MFID 0xfe
95#define REVID 0xff
96
97/* Status bits */
98#define MAX6647_BUSY (1 << 7) /* ADC is converting */
99#define MAX6647_LHIGH (1 << 6) /* Local high temp. alarm */
100#define MAX6647_LLOW (1 << 5) /* Local low temp. alarm */
101#define MAX6647_RHIGH (1 << 4) /* Remote high temp. alarm */
102#define MAX6647_RLOW (1 << 3) /* Remote low temp. alarm */
103#define MAX6647_FAULT (1 << 2) /* DXN/DXP short/open circuit */
104#define MAX6647_EOT (1 << 1) /* Remote junction overtemp. */
105#define MAX6647_IOT (1 << 0) /* Local junction overtemp. */
106
107static const u8 xgphy_max_temperature = 90;
108
109void sfe4001_poweroff(struct efx_nic *efx)
110{
111 struct efx_i2c_interface *i2c = &efx->i2c;
112
113 u8 cfg, out, in;
114
115 EFX_INFO(efx, "%s\n", __func__);
116
117 /* Turn off all power rails */
118 out = 0xff;
119 (void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
120
121 /* Disable port 1 outputs on IO expander */
122 cfg = 0xff;
123 (void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, 1);
124
125 /* Disable port 0 outputs on IO expander */
126 cfg = 0xff;
127 (void) efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, 1);
128
129 /* Clear any over-temperature alert */
130 (void) efx_i2c_read(i2c, MAX6647, RSL, &in, 1);
131}
132
133/* This board uses an I2C expander to provider power to the PHY, which needs to
134 * be turned on before the PHY can be used.
135 * Context: Process context, rtnl lock held
136 */
137int sfe4001_poweron(struct efx_nic *efx)
138{
139 struct efx_i2c_interface *i2c = &efx->i2c;
140 unsigned int count;
141 int rc;
142 u8 out, in, cfg;
143 efx_dword_t reg;
144
145 /* 10Xpress has fixed-function LED pins, so there is no board-specific
146 * blink code. */
147 efx->board_info.blink = tenxpress_phy_blink;
148
149 /* Ensure that XGXS and XAUI SerDes are held in reset */
150 EFX_POPULATE_DWORD_7(reg, XX_PWRDNA_EN, 1,
151 XX_PWRDNB_EN, 1,
152 XX_RSTPLLAB_EN, 1,
153 XX_RESETA_EN, 1,
154 XX_RESETB_EN, 1,
155 XX_RSTXGXSRX_EN, 1,
156 XX_RSTXGXSTX_EN, 1);
157 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
158 udelay(10);
159
160 /* Set DSP over-temperature alert threshold */
161 EFX_INFO(efx, "DSP cut-out at %dC\n", xgphy_max_temperature);
162 rc = efx_i2c_write(i2c, MAX6647, WLHO,
163 &xgphy_max_temperature, 1);
164 if (rc)
165 goto fail1;
166
167 /* Read it back and verify */
168 rc = efx_i2c_read(i2c, MAX6647, RLHN, &in, 1);
169 if (rc)
170 goto fail1;
171 if (in != xgphy_max_temperature) {
172 rc = -EFAULT;
173 goto fail1;
174 }
175
176 /* Clear any previous over-temperature alert */
177 rc = efx_i2c_read(i2c, MAX6647, RSL, &in, 1);
178 if (rc)
179 goto fail1;
180
181 /* Enable port 0 and port 1 outputs on IO expander */
182 cfg = 0x00;
183 rc = efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, 1);
184 if (rc)
185 goto fail1;
186 cfg = 0xff & ~(1 << P1_SPARE_LBN);
187 rc = efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, 1);
188 if (rc)
189 goto fail2;
190
191 /* Turn all power off then wait 1 sec. This ensures PHY is reset */
192 out = 0xff & ~((0 << P0_EN_1V2_LBN) | (0 << P0_EN_2V5_LBN) |
193 (0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) |
194 (0 << P0_EN_1V0X_LBN));
195 rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
196 if (rc)
197 goto fail3;
198
199 schedule_timeout_uninterruptible(HZ);
200 count = 0;
201 do {
202 /* Turn on 1.2V, 2.5V, 3.3V and 5V power rails */
203 out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) |
204 (1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) |
205 (1 << P0_X_TRST_LBN));
206
207 rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
208 if (rc)
209 goto fail3;
210 msleep(10);
211
212 /* Turn on 1V power rail */
213 out &= ~(1 << P0_EN_1V0X_LBN);
214 rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
215 if (rc)
216 goto fail3;
217
218 EFX_INFO(efx, "waiting for power (attempt %d)...\n", count);
219
220 schedule_timeout_uninterruptible(HZ);
221
222 /* Check DSP is powered */
223 rc = efx_i2c_read(i2c, PCA9539, P1_IN, &in, 1);
224 if (rc)
225 goto fail3;
226 if (in & (1 << P1_AFE_PWD_LBN))
227 goto done;
228
229 } while (++count < 20);
230
231 EFX_INFO(efx, "timed out waiting for power\n");
232 rc = -ETIMEDOUT;
233 goto fail3;
234
235done:
236 EFX_INFO(efx, "PHY is powered on\n");
237 return 0;
238
239fail3:
240 /* Turn off all power rails */
241 out = 0xff;
242 (void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
243 /* Disable port 1 outputs on IO expander */
244 out = 0xff;
245 (void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &out, 1);
246fail2:
247 /* Disable port 0 outputs on IO expander */
248 out = 0xff;
249 (void) efx_i2c_write(i2c, PCA9539, P0_CONFIG, &out, 1);
250fail1:
251 return rc;
252}
diff --git a/drivers/net/sfc/spi.h b/drivers/net/sfc/spi.h
new file mode 100644
index 000000000000..34412f3d41c9
--- /dev/null
+++ b/drivers/net/sfc/spi.h
@@ -0,0 +1,71 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005 Fen Systems Ltd.
4 * Copyright 2006 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_SPI_H
12#define EFX_SPI_H
13
14#include "net_driver.h"
15
16/**************************************************************************
17 *
18 * Basic SPI command set and bit definitions
19 *
20 *************************************************************************/
21
22/*
23 * Commands common to all known devices.
24 *
25 */
26
27/* Write status register */
28#define SPI_WRSR 0x01
29
30/* Write data to memory array */
31#define SPI_WRITE 0x02
32
33/* Read data from memory array */
34#define SPI_READ 0x03
35
36/* Reset write enable latch */
37#define SPI_WRDI 0x04
38
39/* Read status register */
40#define SPI_RDSR 0x05
41
42/* Set write enable latch */
43#define SPI_WREN 0x06
44
45/* SST: Enable write to status register */
46#define SPI_SST_EWSR 0x50
47
48/*
49 * Status register bits. Not all bits are supported on all devices.
50 *
51 */
52
53/* Write-protect pin enabled */
54#define SPI_STATUS_WPEN 0x80
55
56/* Block protection bit 2 */
57#define SPI_STATUS_BP2 0x10
58
59/* Block protection bit 1 */
60#define SPI_STATUS_BP1 0x08
61
62/* Block protection bit 0 */
63#define SPI_STATUS_BP0 0x04
64
65/* State of the write enable latch */
66#define SPI_STATUS_WEN 0x02
67
68/* Device busy flag */
69#define SPI_STATUS_NRDY 0x01
70
71#endif /* EFX_SPI_H */
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
new file mode 100644
index 000000000000..a2e9f79e47b1
--- /dev/null
+++ b/drivers/net/sfc/tenxpress.c
@@ -0,0 +1,434 @@
1/****************************************************************************
2 * Driver for Solarflare 802.3an compliant PHY
3 * Copyright 2007 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#include <linux/delay.h>
11#include <linux/seq_file.h>
12#include "efx.h"
13#include "gmii.h"
14#include "mdio_10g.h"
15#include "falcon.h"
16#include "phy.h"
17#include "falcon_hwdefs.h"
18#include "boards.h"
19#include "mac.h"
20
21/* We expect these MMDs to be in the package */
22/* AN not here as mdio_check_mmds() requires STAT2 support */
23#define TENXPRESS_REQUIRED_DEVS (MDIO_MMDREG_DEVS0_PMAPMD | \
24 MDIO_MMDREG_DEVS0_PCS | \
25 MDIO_MMDREG_DEVS0_PHYXS)
26
27/* We complain if we fail to see the link partner as 10G capable this many
28 * times in a row (must be > 1 as sampling the autoneg. registers is racy)
29 */
30#define MAX_BAD_LP_TRIES (5)
31
32/* Extended control register */
33#define PMA_PMD_XCONTROL_REG 0xc000
34#define PMA_PMD_LNPGA_POWERDOWN_LBN 8
35#define PMA_PMD_LNPGA_POWERDOWN_WIDTH 1
36
37/* extended status register */
38#define PMA_PMD_XSTATUS_REG 0xc001
39#define PMA_PMD_XSTAT_FLP_LBN (12)
40
41/* LED control register */
42#define PMA_PMD_LED_CTRL_REG (0xc007)
43#define PMA_PMA_LED_ACTIVITY_LBN (3)
44
45/* LED function override register */
46#define PMA_PMD_LED_OVERR_REG (0xc009)
47/* Bit positions for different LEDs (there are more but not wired on SFE4001)*/
48#define PMA_PMD_LED_LINK_LBN (0)
49#define PMA_PMD_LED_SPEED_LBN (2)
50#define PMA_PMD_LED_TX_LBN (4)
51#define PMA_PMD_LED_RX_LBN (6)
52/* Override settings */
53#define PMA_PMD_LED_AUTO (0) /* H/W control */
54#define PMA_PMD_LED_ON (1)
55#define PMA_PMD_LED_OFF (2)
56#define PMA_PMD_LED_FLASH (3)
57/* All LEDs under hardware control */
58#define PMA_PMD_LED_FULL_AUTO (0)
59/* Green and Amber under hardware control, Red off */
60#define PMA_PMD_LED_DEFAULT (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN)
61
62
63/* Self test (BIST) control register */
64#define PMA_PMD_BIST_CTRL_REG (0xc014)
65#define PMA_PMD_BIST_BER_LBN (2) /* Run BER test */
66#define PMA_PMD_BIST_CONT_LBN (1) /* Run continuous BIST until cleared */
67#define PMA_PMD_BIST_SINGLE_LBN (0) /* Run 1 BIST iteration (self clears) */
68/* Self test status register */
69#define PMA_PMD_BIST_STAT_REG (0xc015)
70#define PMA_PMD_BIST_ENX_LBN (3)
71#define PMA_PMD_BIST_PMA_LBN (2)
72#define PMA_PMD_BIST_RXD_LBN (1)
73#define PMA_PMD_BIST_AFE_LBN (0)
74
75#define BIST_MAX_DELAY (1000)
76#define BIST_POLL_DELAY (10)
77
78/* Misc register defines */
79#define PCS_CLOCK_CTRL_REG 0xd801
80#define PLL312_RST_N_LBN 2
81
82#define PCS_SOFT_RST2_REG 0xd806
83#define SERDES_RST_N_LBN 13
84#define XGXS_RST_N_LBN 12
85
86#define PCS_TEST_SELECT_REG 0xd807 /* PRM 10.5.8 */
87#define CLK312_EN_LBN 3
88
89/* Boot status register */
90#define PCS_BOOT_STATUS_REG (0xd000)
91#define PCS_BOOT_FATAL_ERR_LBN (0)
92#define PCS_BOOT_PROGRESS_LBN (1)
93#define PCS_BOOT_PROGRESS_WIDTH (2)
94#define PCS_BOOT_COMPLETE_LBN (3)
95#define PCS_BOOT_MAX_DELAY (100)
96#define PCS_BOOT_POLL_DELAY (10)
97
98/* Time to wait between powering down the LNPGA and turning off the power
99 * rails */
100#define LNPGA_PDOWN_WAIT (HZ / 5)
101
102static int crc_error_reset_threshold = 100;
103module_param(crc_error_reset_threshold, int, 0644);
104MODULE_PARM_DESC(crc_error_reset_threshold,
105 "Max number of CRC errors before XAUI reset");
106
107struct tenxpress_phy_data {
108 enum tenxpress_state state;
109 atomic_t bad_crc_count;
110 int bad_lp_tries;
111};
112
113static int tenxpress_state_is(struct efx_nic *efx, int state)
114{
115 struct tenxpress_phy_data *phy_data = efx->phy_data;
116 return (phy_data != NULL) && (state == phy_data->state);
117}
118
119void tenxpress_set_state(struct efx_nic *efx,
120 enum tenxpress_state state)
121{
122 struct tenxpress_phy_data *phy_data = efx->phy_data;
123 if (phy_data != NULL)
124 phy_data->state = state;
125}
126
127void tenxpress_crc_err(struct efx_nic *efx)
128{
129 struct tenxpress_phy_data *phy_data = efx->phy_data;
130 if (phy_data != NULL)
131 atomic_inc(&phy_data->bad_crc_count);
132}
133
134/* Check that the C166 has booted successfully */
135static int tenxpress_phy_check(struct efx_nic *efx)
136{
137 int phy_id = efx->mii.phy_id;
138 int count = PCS_BOOT_MAX_DELAY / PCS_BOOT_POLL_DELAY;
139 int boot_stat;
140
141 /* Wait for the boot to complete (or not) */
142 while (count) {
143 boot_stat = mdio_clause45_read(efx, phy_id,
144 MDIO_MMD_PCS,
145 PCS_BOOT_STATUS_REG);
146 if (boot_stat & (1 << PCS_BOOT_COMPLETE_LBN))
147 break;
148 count--;
149 udelay(PCS_BOOT_POLL_DELAY);
150 }
151
152 if (!count) {
153 EFX_ERR(efx, "%s: PHY boot timed out. Last status "
154 "%x\n", __func__,
155 (boot_stat >> PCS_BOOT_PROGRESS_LBN) &
156 ((1 << PCS_BOOT_PROGRESS_WIDTH) - 1));
157 return -ETIMEDOUT;
158 }
159
160 return 0;
161}
162
163static void tenxpress_reset_xaui(struct efx_nic *efx);
164
165static int tenxpress_init(struct efx_nic *efx)
166{
167 int rc, reg;
168
169 /* Turn on the clock */
170 reg = (1 << CLK312_EN_LBN);
171 mdio_clause45_write(efx, efx->mii.phy_id,
172 MDIO_MMD_PCS, PCS_TEST_SELECT_REG, reg);
173
174 rc = tenxpress_phy_check(efx);
175 if (rc < 0)
176 return rc;
177
178 /* Set the LEDs up as: Green = Link, Amber = Link/Act, Red = Off */
179 reg = mdio_clause45_read(efx, efx->mii.phy_id,
180 MDIO_MMD_PMAPMD, PMA_PMD_LED_CTRL_REG);
181 reg |= (1 << PMA_PMA_LED_ACTIVITY_LBN);
182 mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
183 PMA_PMD_LED_CTRL_REG, reg);
184
185 reg = PMA_PMD_LED_DEFAULT;
186 mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
187 PMA_PMD_LED_OVERR_REG, reg);
188
189 return rc;
190}
191
192static int tenxpress_phy_init(struct efx_nic *efx)
193{
194 struct tenxpress_phy_data *phy_data;
195 int rc = 0;
196
197 phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
198 efx->phy_data = phy_data;
199
200 tenxpress_set_state(efx, TENXPRESS_STATUS_NORMAL);
201
202 rc = mdio_clause45_wait_reset_mmds(efx,
203 TENXPRESS_REQUIRED_DEVS);
204 if (rc < 0)
205 goto fail;
206
207 rc = mdio_clause45_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0);
208 if (rc < 0)
209 goto fail;
210
211 rc = tenxpress_init(efx);
212 if (rc < 0)
213 goto fail;
214
215 schedule_timeout_uninterruptible(HZ / 5); /* 200ms */
216
217 /* Let XGXS and SerDes out of reset and resets 10XPress */
218 falcon_reset_xaui(efx);
219
220 return 0;
221
222 fail:
223 kfree(efx->phy_data);
224 efx->phy_data = NULL;
225 return rc;
226}
227
228static void tenxpress_set_bad_lp(struct efx_nic *efx, int bad_lp)
229{
230 struct tenxpress_phy_data *pd = efx->phy_data;
231 int reg;
232
233 /* Nothing to do if all is well and was previously so. */
234 if (!(bad_lp || pd->bad_lp_tries))
235 return;
236
237 reg = mdio_clause45_read(efx, efx->mii.phy_id,
238 MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG);
239
240 if (bad_lp)
241 pd->bad_lp_tries++;
242 else
243 pd->bad_lp_tries = 0;
244
245 if (pd->bad_lp_tries == MAX_BAD_LP_TRIES) {
246 pd->bad_lp_tries = 0; /* Restart count */
247 reg &= ~(PMA_PMD_LED_FLASH << PMA_PMD_LED_RX_LBN);
248 reg |= (PMA_PMD_LED_FLASH << PMA_PMD_LED_RX_LBN);
249 EFX_ERR(efx, "This NIC appears to be plugged into"
250 " a port that is not 10GBASE-T capable.\n"
251 " This PHY is 10GBASE-T ONLY, so no link can"
252 " be established.\n");
253 } else {
254 reg |= (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN);
255 }
256 mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
257 PMA_PMD_LED_OVERR_REG, reg);
258}
259
260/* Check link status and return a boolean OK value. If the link is NOT
261 * OK we have a quick rummage round to see if we appear to be plugged
262 * into a non-10GBT port and if so warn the user that they won't get
263 * link any time soon as we are 10GBT only, unless caller specified
264 * not to do this check (it isn't useful in loopback) */
265static int tenxpress_link_ok(struct efx_nic *efx, int check_lp)
266{
267 int ok = mdio_clause45_links_ok(efx, TENXPRESS_REQUIRED_DEVS);
268
269 if (ok) {
270 tenxpress_set_bad_lp(efx, 0);
271 } else if (check_lp) {
272 /* Are we plugged into the wrong sort of link? */
273 int bad_lp = 0;
274 int phy_id = efx->mii.phy_id;
275 int an_stat = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN,
276 MDIO_AN_STATUS);
277 int xphy_stat = mdio_clause45_read(efx, phy_id,
278 MDIO_MMD_PMAPMD,
279 PMA_PMD_XSTATUS_REG);
280 /* Are we plugged into anything that sends FLPs? If
281 * not we can't distinguish between not being plugged
282 * in and being plugged into a non-AN antique. The FLP
283 * bit has the advantage of not clearing when autoneg
284 * restarts. */
285 if (!(xphy_stat & (1 << PMA_PMD_XSTAT_FLP_LBN))) {
286 tenxpress_set_bad_lp(efx, 0);
287 return ok;
288 }
289
290 /* If it can do 10GBT it must be XNP capable */
291 bad_lp = !(an_stat & (1 << MDIO_AN_STATUS_XNP_LBN));
292 if (!bad_lp && (an_stat & (1 << MDIO_AN_STATUS_PAGE_LBN))) {
293 bad_lp = !(mdio_clause45_read(efx, phy_id,
294 MDIO_MMD_AN, MDIO_AN_10GBT_STATUS) &
295 (1 << MDIO_AN_10GBT_STATUS_LP_10G_LBN));
296 }
297 tenxpress_set_bad_lp(efx, bad_lp);
298 }
299 return ok;
300}
301
302static void tenxpress_phy_reconfigure(struct efx_nic *efx)
303{
304 if (!tenxpress_state_is(efx, TENXPRESS_STATUS_NORMAL))
305 return;
306
307 efx->link_up = tenxpress_link_ok(efx, 0);
308 efx->link_options = GM_LPA_10000FULL;
309}
310
311static void tenxpress_phy_clear_interrupt(struct efx_nic *efx)
312{
313 /* Nothing done here - LASI interrupts aren't reliable so poll */
314}
315
316
317/* Poll PHY for interrupt */
318static int tenxpress_phy_check_hw(struct efx_nic *efx)
319{
320 struct tenxpress_phy_data *phy_data = efx->phy_data;
321 int phy_up = tenxpress_state_is(efx, TENXPRESS_STATUS_NORMAL);
322 int link_ok;
323
324 link_ok = phy_up && tenxpress_link_ok(efx, 1);
325
326 if (link_ok != efx->link_up)
327 falcon_xmac_sim_phy_event(efx);
328
329 /* Nothing to check if we've already shut down the PHY */
330 if (!phy_up)
331 return 0;
332
333 if (atomic_read(&phy_data->bad_crc_count) > crc_error_reset_threshold) {
334 EFX_ERR(efx, "Resetting XAUI due to too many CRC errors\n");
335 falcon_reset_xaui(efx);
336 atomic_set(&phy_data->bad_crc_count, 0);
337 }
338
339 return 0;
340}
341
342static void tenxpress_phy_fini(struct efx_nic *efx)
343{
344 int reg;
345
346 /* Power down the LNPGA */
347 reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN);
348 mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
349 PMA_PMD_XCONTROL_REG, reg);
350
351 /* Waiting here ensures that the board fini, which can turn off the
352 * power to the PHY, won't get run until the LNPGA powerdown has been
353 * given long enough to complete. */
354 schedule_timeout_uninterruptible(LNPGA_PDOWN_WAIT); /* 200 ms */
355
356 kfree(efx->phy_data);
357 efx->phy_data = NULL;
358}
359
360
361/* Set the RX and TX LEDs and Link LED flashing. The other LEDs
362 * (which probably aren't wired anyway) are left in AUTO mode */
363void tenxpress_phy_blink(struct efx_nic *efx, int blink)
364{
365 int reg;
366
367 if (blink)
368 reg = (PMA_PMD_LED_FLASH << PMA_PMD_LED_TX_LBN) |
369 (PMA_PMD_LED_FLASH << PMA_PMD_LED_RX_LBN) |
370 (PMA_PMD_LED_FLASH << PMA_PMD_LED_LINK_LBN);
371 else
372 reg = PMA_PMD_LED_DEFAULT;
373
374 mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
375 PMA_PMD_LED_OVERR_REG, reg);
376}
377
378static void tenxpress_reset_xaui(struct efx_nic *efx)
379{
380 int phy = efx->mii.phy_id;
381 int clk_ctrl, test_select, soft_rst2;
382
383 /* Real work is done on clock_ctrl other resets are thought to be
384 * optional but make the reset more reliable
385 */
386
387 /* Read */
388 clk_ctrl = mdio_clause45_read(efx, phy, MDIO_MMD_PCS,
389 PCS_CLOCK_CTRL_REG);
390 test_select = mdio_clause45_read(efx, phy, MDIO_MMD_PCS,
391 PCS_TEST_SELECT_REG);
392 soft_rst2 = mdio_clause45_read(efx, phy, MDIO_MMD_PCS,
393 PCS_SOFT_RST2_REG);
394
395 /* Put in reset */
396 test_select &= ~(1 << CLK312_EN_LBN);
397 mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
398 PCS_TEST_SELECT_REG, test_select);
399
400 soft_rst2 &= ~((1 << XGXS_RST_N_LBN) | (1 << SERDES_RST_N_LBN));
401 mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
402 PCS_SOFT_RST2_REG, soft_rst2);
403
404 clk_ctrl &= ~(1 << PLL312_RST_N_LBN);
405 mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
406 PCS_CLOCK_CTRL_REG, clk_ctrl);
407 udelay(10);
408
409 /* Remove reset */
410 clk_ctrl |= (1 << PLL312_RST_N_LBN);
411 mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
412 PCS_CLOCK_CTRL_REG, clk_ctrl);
413 udelay(10);
414
415 soft_rst2 |= ((1 << XGXS_RST_N_LBN) | (1 << SERDES_RST_N_LBN));
416 mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
417 PCS_SOFT_RST2_REG, soft_rst2);
418 udelay(10);
419
420 test_select |= (1 << CLK312_EN_LBN);
421 mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
422 PCS_TEST_SELECT_REG, test_select);
423 udelay(10);
424}
425
426struct efx_phy_operations falcon_tenxpress_phy_ops = {
427 .init = tenxpress_phy_init,
428 .reconfigure = tenxpress_phy_reconfigure,
429 .check_hw = tenxpress_phy_check_hw,
430 .fini = tenxpress_phy_fini,
431 .clear_interrupt = tenxpress_phy_clear_interrupt,
432 .reset_xaui = tenxpress_reset_xaui,
433 .mmds = TENXPRESS_REQUIRED_DEVS,
434};
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
new file mode 100644
index 000000000000..fbb866b2185e
--- /dev/null
+++ b/drivers/net/sfc/tx.c
@@ -0,0 +1,452 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/pci.h>
12#include <linux/tcp.h>
13#include <linux/ip.h>
14#include <linux/in.h>
15#include <linux/if_ether.h>
16#include <linux/highmem.h>
17#include "net_driver.h"
18#include "tx.h"
19#include "efx.h"
20#include "falcon.h"
21#include "workarounds.h"
22
23/*
24 * TX descriptor ring full threshold
25 *
26 * The tx_queue descriptor ring fill-level must fall below this value
27 * before we restart the netif queue
28 */
29#define EFX_NETDEV_TX_THRESHOLD(_tx_queue) \
30 (_tx_queue->efx->type->txd_ring_mask / 2u)
31
32/* We want to be able to nest calls to netif_stop_queue(), since each
33 * channel can have an individual stop on the queue.
34 */
35void efx_stop_queue(struct efx_nic *efx)
36{
37 spin_lock_bh(&efx->netif_stop_lock);
38 EFX_TRACE(efx, "stop TX queue\n");
39
40 atomic_inc(&efx->netif_stop_count);
41 netif_stop_queue(efx->net_dev);
42
43 spin_unlock_bh(&efx->netif_stop_lock);
44}
45
46/* Wake netif's TX queue
47 * We want to be able to nest calls to netif_stop_queue(), since each
48 * channel can have an individual stop on the queue.
49 */
50inline void efx_wake_queue(struct efx_nic *efx)
51{
52 local_bh_disable();
53 if (atomic_dec_and_lock(&efx->netif_stop_count,
54 &efx->netif_stop_lock)) {
55 EFX_TRACE(efx, "waking TX queue\n");
56 netif_wake_queue(efx->net_dev);
57 spin_unlock(&efx->netif_stop_lock);
58 }
59 local_bh_enable();
60}
61
62static inline void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
63 struct efx_tx_buffer *buffer)
64{
65 if (buffer->unmap_len) {
66 struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
67 if (buffer->unmap_single)
68 pci_unmap_single(pci_dev, buffer->unmap_addr,
69 buffer->unmap_len, PCI_DMA_TODEVICE);
70 else
71 pci_unmap_page(pci_dev, buffer->unmap_addr,
72 buffer->unmap_len, PCI_DMA_TODEVICE);
73 buffer->unmap_len = 0;
74 buffer->unmap_single = 0;
75 }
76
77 if (buffer->skb) {
78 dev_kfree_skb_any((struct sk_buff *) buffer->skb);
79 buffer->skb = NULL;
80 EFX_TRACE(tx_queue->efx, "TX queue %d transmission id %x "
81 "complete\n", tx_queue->queue, read_ptr);
82 }
83}
84
85
86/*
87 * Add a socket buffer to a TX queue
88 *
89 * This maps all fragments of a socket buffer for DMA and adds them to
90 * the TX queue. The queue's insert pointer will be incremented by
91 * the number of fragments in the socket buffer.
92 *
93 * If any DMA mapping fails, any mapped fragments will be unmapped,
94 * the queue's insert pointer will be restored to its original value.
95 *
96 * Returns NETDEV_TX_OK or NETDEV_TX_BUSY
97 * You must hold netif_tx_lock() to call this function.
98 */
99static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
100 const struct sk_buff *skb)
101{
102 struct efx_nic *efx = tx_queue->efx;
103 struct pci_dev *pci_dev = efx->pci_dev;
104 struct efx_tx_buffer *buffer;
105 skb_frag_t *fragment;
106 struct page *page;
107 int page_offset;
108 unsigned int len, unmap_len = 0, fill_level, insert_ptr, misalign;
109 dma_addr_t dma_addr, unmap_addr = 0;
110 unsigned int dma_len;
111 unsigned unmap_single;
112 int q_space, i = 0;
113 int rc = NETDEV_TX_OK;
114
115 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
116
117 /* Get size of the initial fragment */
118 len = skb_headlen(skb);
119
120 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
121 q_space = efx->type->txd_ring_mask - 1 - fill_level;
122
123 /* Map for DMA. Use pci_map_single rather than pci_map_page
124 * since this is more efficient on machines with sparse
125 * memory.
126 */
127 unmap_single = 1;
128 dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE);
129
130 /* Process all fragments */
131 while (1) {
132 if (unlikely(pci_dma_mapping_error(dma_addr)))
133 goto pci_err;
134
135 /* Store fields for marking in the per-fragment final
136 * descriptor */
137 unmap_len = len;
138 unmap_addr = dma_addr;
139
140 /* Add to TX queue, splitting across DMA boundaries */
141 do {
142 if (unlikely(q_space-- <= 0)) {
143 /* It might be that completions have
144 * happened since the xmit path last
145 * checked. Update the xmit path's
146 * copy of read_count.
147 */
148 ++tx_queue->stopped;
149 /* This memory barrier protects the
150 * change of stopped from the access
151 * of read_count. */
152 smp_mb();
153 tx_queue->old_read_count =
154 *(volatile unsigned *)
155 &tx_queue->read_count;
156 fill_level = (tx_queue->insert_count
157 - tx_queue->old_read_count);
158 q_space = (efx->type->txd_ring_mask - 1 -
159 fill_level);
160 if (unlikely(q_space-- <= 0))
161 goto stop;
162 smp_mb();
163 --tx_queue->stopped;
164 }
165
166 insert_ptr = (tx_queue->insert_count &
167 efx->type->txd_ring_mask);
168 buffer = &tx_queue->buffer[insert_ptr];
169 EFX_BUG_ON_PARANOID(buffer->skb);
170 EFX_BUG_ON_PARANOID(buffer->len);
171 EFX_BUG_ON_PARANOID(buffer->continuation != 1);
172 EFX_BUG_ON_PARANOID(buffer->unmap_len);
173
174 dma_len = (((~dma_addr) & efx->type->tx_dma_mask) + 1);
175 if (likely(dma_len > len))
176 dma_len = len;
177
178 misalign = (unsigned)dma_addr & efx->type->bug5391_mask;
179 if (misalign && dma_len + misalign > 512)
180 dma_len = 512 - misalign;
181
182 /* Fill out per descriptor fields */
183 buffer->len = dma_len;
184 buffer->dma_addr = dma_addr;
185 len -= dma_len;
186 dma_addr += dma_len;
187 ++tx_queue->insert_count;
188 } while (len);
189
190 /* Transfer ownership of the unmapping to the final buffer */
191 buffer->unmap_addr = unmap_addr;
192 buffer->unmap_single = unmap_single;
193 buffer->unmap_len = unmap_len;
194 unmap_len = 0;
195
196 /* Get address and size of next fragment */
197 if (i >= skb_shinfo(skb)->nr_frags)
198 break;
199 fragment = &skb_shinfo(skb)->frags[i];
200 len = fragment->size;
201 page = fragment->page;
202 page_offset = fragment->page_offset;
203 i++;
204 /* Map for DMA */
205 unmap_single = 0;
206 dma_addr = pci_map_page(pci_dev, page, page_offset, len,
207 PCI_DMA_TODEVICE);
208 }
209
210 /* Transfer ownership of the skb to the final buffer */
211 buffer->skb = skb;
212 buffer->continuation = 0;
213
214 /* Pass off to hardware */
215 falcon_push_buffers(tx_queue);
216
217 return NETDEV_TX_OK;
218
219 pci_err:
220 EFX_ERR_RL(efx, " TX queue %d could not map skb with %d bytes %d "
221 "fragments for DMA\n", tx_queue->queue, skb->len,
222 skb_shinfo(skb)->nr_frags + 1);
223
224 /* Mark the packet as transmitted, and free the SKB ourselves */
225 dev_kfree_skb_any((struct sk_buff *)skb);
226 goto unwind;
227
228 stop:
229 rc = NETDEV_TX_BUSY;
230
231 if (tx_queue->stopped == 1)
232 efx_stop_queue(efx);
233
234 unwind:
235 /* Work backwards until we hit the original insert pointer value */
236 while (tx_queue->insert_count != tx_queue->write_count) {
237 --tx_queue->insert_count;
238 insert_ptr = tx_queue->insert_count & efx->type->txd_ring_mask;
239 buffer = &tx_queue->buffer[insert_ptr];
240 efx_dequeue_buffer(tx_queue, buffer);
241 buffer->len = 0;
242 }
243
244 /* Free the fragment we were mid-way through pushing */
245 if (unmap_len)
246 pci_unmap_page(pci_dev, unmap_addr, unmap_len,
247 PCI_DMA_TODEVICE);
248
249 return rc;
250}
251
252/* Remove packets from the TX queue
253 *
254 * This removes packets from the TX queue, up to and including the
255 * specified index.
256 */
257static inline void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
258 unsigned int index)
259{
260 struct efx_nic *efx = tx_queue->efx;
261 unsigned int stop_index, read_ptr;
262 unsigned int mask = tx_queue->efx->type->txd_ring_mask;
263
264 stop_index = (index + 1) & mask;
265 read_ptr = tx_queue->read_count & mask;
266
267 while (read_ptr != stop_index) {
268 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
269 if (unlikely(buffer->len == 0)) {
270 EFX_ERR(tx_queue->efx, "TX queue %d spurious TX "
271 "completion id %x\n", tx_queue->queue,
272 read_ptr);
273 efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
274 return;
275 }
276
277 efx_dequeue_buffer(tx_queue, buffer);
278 buffer->continuation = 1;
279 buffer->len = 0;
280
281 ++tx_queue->read_count;
282 read_ptr = tx_queue->read_count & mask;
283 }
284}
285
286/* Initiate a packet transmission on the specified TX queue.
287 * Note that returning anything other than NETDEV_TX_OK will cause the
288 * OS to free the skb.
289 *
290 * This function is split out from efx_hard_start_xmit to allow the
291 * loopback test to direct packets via specific TX queues. It is
292 * therefore a non-static inline, so as not to penalise performance
293 * for non-loopback transmissions.
294 *
295 * Context: netif_tx_lock held
296 */
297inline int efx_xmit(struct efx_nic *efx,
298 struct efx_tx_queue *tx_queue, struct sk_buff *skb)
299{
300 int rc;
301
302 /* Map fragments for DMA and add to TX queue */
303 rc = efx_enqueue_skb(tx_queue, skb);
304 if (unlikely(rc != NETDEV_TX_OK))
305 goto out;
306
307 /* Update last TX timer */
308 efx->net_dev->trans_start = jiffies;
309
310 out:
311 return rc;
312}
313
314/* Initiate a packet transmission. We use one channel per CPU
315 * (sharing when we have more CPUs than channels). On Falcon, the TX
316 * completion events will be directed back to the CPU that transmitted
317 * the packet, which should be cache-efficient.
318 *
319 * Context: non-blocking.
320 * Note that returning anything other than NETDEV_TX_OK will cause the
321 * OS to free the skb.
322 */
323int efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
324{
325 struct efx_nic *efx = net_dev->priv;
326 return efx_xmit(efx, &efx->tx_queue[0], skb);
327}
328
329void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
330{
331 unsigned fill_level;
332 struct efx_nic *efx = tx_queue->efx;
333
334 EFX_BUG_ON_PARANOID(index > efx->type->txd_ring_mask);
335
336 efx_dequeue_buffers(tx_queue, index);
337
338 /* See if we need to restart the netif queue. This barrier
339 * separates the update of read_count from the test of
340 * stopped. */
341 smp_mb();
342 if (unlikely(tx_queue->stopped)) {
343 fill_level = tx_queue->insert_count - tx_queue->read_count;
344 if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) {
345 EFX_BUG_ON_PARANOID(!NET_DEV_REGISTERED(efx));
346
347 /* Do this under netif_tx_lock(), to avoid racing
348 * with efx_xmit(). */
349 netif_tx_lock(efx->net_dev);
350 if (tx_queue->stopped) {
351 tx_queue->stopped = 0;
352 efx_wake_queue(efx);
353 }
354 netif_tx_unlock(efx->net_dev);
355 }
356 }
357}
358
359int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
360{
361 struct efx_nic *efx = tx_queue->efx;
362 unsigned int txq_size;
363 int i, rc;
364
365 EFX_LOG(efx, "creating TX queue %d\n", tx_queue->queue);
366
367 /* Allocate software ring */
368 txq_size = (efx->type->txd_ring_mask + 1) * sizeof(*tx_queue->buffer);
369 tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL);
370 if (!tx_queue->buffer) {
371 rc = -ENOMEM;
372 goto fail1;
373 }
374 for (i = 0; i <= efx->type->txd_ring_mask; ++i)
375 tx_queue->buffer[i].continuation = 1;
376
377 /* Allocate hardware ring */
378 rc = falcon_probe_tx(tx_queue);
379 if (rc)
380 goto fail2;
381
382 return 0;
383
384 fail2:
385 kfree(tx_queue->buffer);
386 tx_queue->buffer = NULL;
387 fail1:
388 tx_queue->used = 0;
389
390 return rc;
391}
392
393int efx_init_tx_queue(struct efx_tx_queue *tx_queue)
394{
395 EFX_LOG(tx_queue->efx, "initialising TX queue %d\n", tx_queue->queue);
396
397 tx_queue->insert_count = 0;
398 tx_queue->write_count = 0;
399 tx_queue->read_count = 0;
400 tx_queue->old_read_count = 0;
401 BUG_ON(tx_queue->stopped);
402
403 /* Set up TX descriptor ring */
404 return falcon_init_tx(tx_queue);
405}
406
407void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
408{
409 struct efx_tx_buffer *buffer;
410
411 if (!tx_queue->buffer)
412 return;
413
414 /* Free any buffers left in the ring */
415 while (tx_queue->read_count != tx_queue->write_count) {
416 buffer = &tx_queue->buffer[tx_queue->read_count &
417 tx_queue->efx->type->txd_ring_mask];
418 efx_dequeue_buffer(tx_queue, buffer);
419 buffer->continuation = 1;
420 buffer->len = 0;
421
422 ++tx_queue->read_count;
423 }
424}
425
426void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
427{
428 EFX_LOG(tx_queue->efx, "shutting down TX queue %d\n", tx_queue->queue);
429
430 /* Flush TX queue, remove descriptor ring */
431 falcon_fini_tx(tx_queue);
432
433 efx_release_tx_buffers(tx_queue);
434
435 /* Release queue's stop on port, if any */
436 if (tx_queue->stopped) {
437 tx_queue->stopped = 0;
438 efx_wake_queue(tx_queue->efx);
439 }
440}
441
442void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
443{
444 EFX_LOG(tx_queue->efx, "destroying TX queue %d\n", tx_queue->queue);
445 falcon_remove_tx(tx_queue);
446
447 kfree(tx_queue->buffer);
448 tx_queue->buffer = NULL;
449 tx_queue->used = 0;
450}
451
452
diff --git a/drivers/net/sfc/tx.h b/drivers/net/sfc/tx.h
new file mode 100644
index 000000000000..1526a73b4b51
--- /dev/null
+++ b/drivers/net/sfc/tx.h
@@ -0,0 +1,24 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_TX_H
12#define EFX_TX_H
13
14#include "net_driver.h"
15
16int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
17void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
18int efx_init_tx_queue(struct efx_tx_queue *tx_queue);
19void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
20
21int efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
22void efx_release_tx_buffers(struct efx_tx_queue *tx_queue);
23
24#endif /* EFX_TX_H */
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
new file mode 100644
index 000000000000..dca62f190198
--- /dev/null
+++ b/drivers/net/sfc/workarounds.h
@@ -0,0 +1,56 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006-2008 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#ifndef EFX_WORKAROUNDS_H
11#define EFX_WORKAROUNDS_H
12
13/*
14 * Hardware workarounds.
15 * Bug numbers are from Solarflare's Bugzilla.
16 */
17
18#define EFX_WORKAROUND_ALWAYS(efx) 1
19#define EFX_WORKAROUND_FALCON_A(efx) (FALCON_REV(efx) <= FALCON_REV_A1)
20
21/* XAUI resets if link not detected */
22#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS
23/* SNAP frames have TOBE_DISC set */
24#define EFX_WORKAROUND_5475 EFX_WORKAROUND_ALWAYS
25/* RX PCIe double split performance issue */
26#define EFX_WORKAROUND_7575 EFX_WORKAROUND_ALWAYS
27/* TX pkt parser problem with <= 16 byte TXes */
28#define EFX_WORKAROUND_9141 EFX_WORKAROUND_ALWAYS
29/* XGXS and XAUI reset sequencing in SW */
30#define EFX_WORKAROUND_9388 EFX_WORKAROUND_ALWAYS
31/* Low rate CRC errors require XAUI reset */
32#define EFX_WORKAROUND_10750 EFX_WORKAROUND_ALWAYS
33/* TX_EV_PKT_ERR can be caused by a dangling TX descriptor
34 * or a PCIe error (bug 11028) */
35#define EFX_WORKAROUND_10727 EFX_WORKAROUND_ALWAYS
36/* Transmit flow control may get disabled */
37#define EFX_WORKAROUND_11482 EFX_WORKAROUND_ALWAYS
38/* Flush events can take a very long time to appear */
39#define EFX_WORKAROUND_11557 EFX_WORKAROUND_ALWAYS
40
41/* Spurious parity errors in TSORT buffers */
42#define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A
43/* iSCSI parsing errors */
44#define EFX_WORKAROUND_5583 EFX_WORKAROUND_FALCON_A
45/* RX events go missing */
46#define EFX_WORKAROUND_5676 EFX_WORKAROUND_FALCON_A
47/* RX_RESET on A1 */
48#define EFX_WORKAROUND_6555 EFX_WORKAROUND_FALCON_A
49/* Increase filter depth to avoid RX_RESET */
50#define EFX_WORKAROUND_7244 EFX_WORKAROUND_FALCON_A
51/* Flushes may never complete */
52#define EFX_WORKAROUND_7803 EFX_WORKAROUND_FALCON_A
53/* Leak overlength packets rather than free */
54#define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A
55
56#endif /* EFX_WORKAROUNDS_H */
diff --git a/drivers/net/sfc/xenpack.h b/drivers/net/sfc/xenpack.h
new file mode 100644
index 000000000000..b0d1f225b70a
--- /dev/null
+++ b/drivers/net/sfc/xenpack.h
@@ -0,0 +1,62 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#ifndef EFX_XENPACK_H
11#define EFX_XENPACK_H
12
13/* Exported functions from Xenpack standard PHY control */
14
15#include "mdio_10g.h"
16
17/****************************************************************************/
18/* XENPACK MDIO register extensions */
19#define MDIO_XP_LASI_RX_CTRL (0x9000)
20#define MDIO_XP_LASI_TX_CTRL (0x9001)
21#define MDIO_XP_LASI_CTRL (0x9002)
22#define MDIO_XP_LASI_RX_STAT (0x9003)
23#define MDIO_XP_LASI_TX_STAT (0x9004)
24#define MDIO_XP_LASI_STAT (0x9005)
25
26/* Control/Status bits */
27#define XP_LASI_LS_ALARM (1 << 0)
28#define XP_LASI_TX_ALARM (1 << 1)
29#define XP_LASI_RX_ALARM (1 << 2)
30/* These two are Quake vendor extensions to the standard XENPACK defines */
31#define XP_LASI_LS_INTB (1 << 3)
32#define XP_LASI_TEST (1 << 7)
33
34/* Enable LASI interrupts for PHY */
35static inline void xenpack_enable_lasi_irqs(struct efx_nic *efx)
36{
37 int reg;
38 int phy_id = efx->mii.phy_id;
39 /* Read to clear LASI status register */
40 reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
41 MDIO_XP_LASI_STAT);
42
43 mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD,
44 MDIO_XP_LASI_CTRL, XP_LASI_LS_ALARM);
45}
46
47/* Read the LASI interrupt status to clear the interrupt. */
48static inline int xenpack_clear_lasi_irqs(struct efx_nic *efx)
49{
50 /* Read to clear link status alarm */
51 return mdio_clause45_read(efx, efx->mii.phy_id,
52 MDIO_MMD_PMAPMD, MDIO_XP_LASI_STAT);
53}
54
55/* Turn off LASI interrupts */
56static inline void xenpack_disable_lasi_irqs(struct efx_nic *efx)
57{
58 mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
59 MDIO_XP_LASI_CTRL, 0);
60}
61
62#endif /* EFX_XENPACK_H */
diff --git a/drivers/net/sfc/xfp_phy.c b/drivers/net/sfc/xfp_phy.c
new file mode 100644
index 000000000000..66dd5bf1eaa9
--- /dev/null
+++ b/drivers/net/sfc/xfp_phy.c
@@ -0,0 +1,132 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006-2008 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9/*
10 * Driver for XFP optical PHYs (plus some support specific to the Quake 2032)
11 * See www.amcc.com for details (search for qt2032)
12 */
13
14#include <linux/timer.h>
15#include <linux/delay.h>
16#include "efx.h"
17#include "gmii.h"
18#include "mdio_10g.h"
19#include "xenpack.h"
20#include "phy.h"
21#include "mac.h"
22
23#define XFP_REQUIRED_DEVS (MDIO_MMDREG_DEVS0_PCS | \
24 MDIO_MMDREG_DEVS0_PMAPMD | \
25 MDIO_MMDREG_DEVS0_PHYXS)
26
27/****************************************************************************/
28/* Quake-specific MDIO registers */
29#define MDIO_QUAKE_LED0_REG (0xD006)
30
31void xfp_set_led(struct efx_nic *p, int led, int mode)
32{
33 int addr = MDIO_QUAKE_LED0_REG + led;
34 mdio_clause45_write(p, p->mii.phy_id, MDIO_MMD_PMAPMD, addr,
35 mode);
36}
37
38#define XFP_MAX_RESET_TIME 500
39#define XFP_RESET_WAIT 10
40
41/* Reset the PHYXS MMD. This is documented (for the Quake PHY) as doing
42 * a complete soft reset.
43 */
44static int xfp_reset_phy(struct efx_nic *efx)
45{
46 int rc;
47
48 rc = mdio_clause45_reset_mmd(efx, MDIO_MMD_PHYXS,
49 XFP_MAX_RESET_TIME / XFP_RESET_WAIT,
50 XFP_RESET_WAIT);
51 if (rc < 0)
52 goto fail;
53
54 /* Wait 250ms for the PHY to complete bootup */
55 msleep(250);
56
57 /* Check that all the MMDs we expect are present and responding. We
58 * expect faults on some if the link is down, but not on the PHY XS */
59 rc = mdio_clause45_check_mmds(efx, XFP_REQUIRED_DEVS,
60 MDIO_MMDREG_DEVS0_PHYXS);
61 if (rc < 0)
62 goto fail;
63
64 efx->board_info.init_leds(efx);
65
66 return rc;
67
68 fail:
69 EFX_ERR(efx, "XFP: reset timed out!\n");
70 return rc;
71}
72
73static int xfp_phy_init(struct efx_nic *efx)
74{
75 u32 devid = mdio_clause45_read_id(efx, MDIO_MMD_PHYXS);
76 int rc;
77
78 EFX_INFO(efx, "XFP: PHY ID reg %x (OUI %x model %x revision"
79 " %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid),
80 MDIO_ID_REV(devid));
81
82 rc = xfp_reset_phy(efx);
83
84 EFX_INFO(efx, "XFP: PHY init %s.\n",
85 rc ? "failed" : "successful");
86
87 return rc;
88}
89
90static void xfp_phy_clear_interrupt(struct efx_nic *efx)
91{
92 xenpack_clear_lasi_irqs(efx);
93}
94
95static int xfp_link_ok(struct efx_nic *efx)
96{
97 return mdio_clause45_links_ok(efx, XFP_REQUIRED_DEVS);
98}
99
100static int xfp_phy_check_hw(struct efx_nic *efx)
101{
102 int rc = 0;
103 int link_up = xfp_link_ok(efx);
104 /* Simulate a PHY event if link state has changed */
105 if (link_up != efx->link_up)
106 falcon_xmac_sim_phy_event(efx);
107
108 return rc;
109}
110
111static void xfp_phy_reconfigure(struct efx_nic *efx)
112{
113 efx->link_up = xfp_link_ok(efx);
114 efx->link_options = GM_LPA_10000FULL;
115}
116
117
118static void xfp_phy_fini(struct efx_nic *efx)
119{
120 /* Clobber the LED if it was blinking */
121 efx->board_info.blink(efx, 0);
122}
123
124struct efx_phy_operations falcon_xfp_phy_ops = {
125 .init = xfp_phy_init,
126 .reconfigure = xfp_phy_reconfigure,
127 .check_hw = xfp_phy_check_hw,
128 .fini = xfp_phy_fini,
129 .clear_interrupt = xfp_phy_clear_interrupt,
130 .reset_xaui = efx_port_dummy_op_void,
131 .mmds = XFP_REQUIRED_DEVS,
132};
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index 20745fd4e973..abc63b0663be 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -212,6 +212,12 @@ enum _DescStatusBit {
212 THOL2 = 0x20000000, 212 THOL2 = 0x20000000,
213 THOL1 = 0x10000000, 213 THOL1 = 0x10000000,
214 THOL0 = 0x00000000, 214 THOL0 = 0x00000000,
215
216 WND = 0x00080000,
217 TABRT = 0x00040000,
218 FIFO = 0x00020000,
219 LINK = 0x00010000,
220 ColCountMask = 0x0000ffff,
215 /* RxDesc.status */ 221 /* RxDesc.status */
216 IPON = 0x20000000, 222 IPON = 0x20000000,
217 TCPON = 0x10000000, 223 TCPON = 0x10000000,
@@ -480,30 +486,23 @@ static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
480 desc->status = 0x0; 486 desc->status = 0x0;
481} 487}
482 488
483static int sis190_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff, 489static struct sk_buff *sis190_alloc_rx_skb(struct sis190_private *tp,
484 struct RxDesc *desc, u32 rx_buf_sz) 490 struct RxDesc *desc)
485{ 491{
492 u32 rx_buf_sz = tp->rx_buf_sz;
486 struct sk_buff *skb; 493 struct sk_buff *skb;
487 dma_addr_t mapping;
488 int ret = 0;
489
490 skb = dev_alloc_skb(rx_buf_sz);
491 if (!skb)
492 goto err_out;
493
494 *sk_buff = skb;
495 494
496 mapping = pci_map_single(pdev, skb->data, rx_buf_sz, 495 skb = netdev_alloc_skb(tp->dev, rx_buf_sz);
497 PCI_DMA_FROMDEVICE); 496 if (likely(skb)) {
497 dma_addr_t mapping;
498 498
499 sis190_map_to_asic(desc, mapping, rx_buf_sz); 499 mapping = pci_map_single(tp->pci_dev, skb->data, tp->rx_buf_sz,
500out: 500 PCI_DMA_FROMDEVICE);
501 return ret; 501 sis190_map_to_asic(desc, mapping, rx_buf_sz);
502 } else
503 sis190_make_unusable_by_asic(desc);
502 504
503err_out: 505 return skb;
504 ret = -ENOMEM;
505 sis190_make_unusable_by_asic(desc);
506 goto out;
507} 506}
508 507
509static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev, 508static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
@@ -512,37 +511,41 @@ static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
512 u32 cur; 511 u32 cur;
513 512
514 for (cur = start; cur < end; cur++) { 513 for (cur = start; cur < end; cur++) {
515 int ret, i = cur % NUM_RX_DESC; 514 unsigned int i = cur % NUM_RX_DESC;
516 515
517 if (tp->Rx_skbuff[i]) 516 if (tp->Rx_skbuff[i])
518 continue; 517 continue;
519 518
520 ret = sis190_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i, 519 tp->Rx_skbuff[i] = sis190_alloc_rx_skb(tp, tp->RxDescRing + i);
521 tp->RxDescRing + i, tp->rx_buf_sz); 520
522 if (ret < 0) 521 if (!tp->Rx_skbuff[i])
523 break; 522 break;
524 } 523 }
525 return cur - start; 524 return cur - start;
526} 525}
527 526
528static inline int sis190_try_rx_copy(struct sk_buff **sk_buff, int pkt_size, 527static bool sis190_try_rx_copy(struct sis190_private *tp,
529 struct RxDesc *desc, int rx_buf_sz) 528 struct sk_buff **sk_buff, int pkt_size,
529 dma_addr_t addr)
530{ 530{
531 int ret = -1; 531 struct sk_buff *skb;
532 bool done = false;
532 533
533 if (pkt_size < rx_copybreak) { 534 if (pkt_size >= rx_copybreak)
534 struct sk_buff *skb; 535 goto out;
535 536
536 skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN); 537 skb = netdev_alloc_skb(tp->dev, pkt_size + 2);
537 if (skb) { 538 if (!skb)
538 skb_reserve(skb, NET_IP_ALIGN); 539 goto out;
539 skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size); 540
540 *sk_buff = skb; 541 pci_dma_sync_single_for_device(tp->pci_dev, addr, pkt_size,
541 sis190_give_to_asic(desc, rx_buf_sz); 542 PCI_DMA_FROMDEVICE);
542 ret = 0; 543 skb_reserve(skb, 2);
543 } 544 skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
544 } 545 *sk_buff = skb;
545 return ret; 546 done = true;
547out:
548 return done;
546} 549}
547 550
548static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats) 551static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats)
@@ -592,9 +595,9 @@ static int sis190_rx_interrupt(struct net_device *dev,
592 sis190_give_to_asic(desc, tp->rx_buf_sz); 595 sis190_give_to_asic(desc, tp->rx_buf_sz);
593 else { 596 else {
594 struct sk_buff *skb = tp->Rx_skbuff[entry]; 597 struct sk_buff *skb = tp->Rx_skbuff[entry];
598 dma_addr_t addr = le32_to_cpu(desc->addr);
595 int pkt_size = (status & RxSizeMask) - 4; 599 int pkt_size = (status & RxSizeMask) - 4;
596 void (*pci_action)(struct pci_dev *, dma_addr_t, 600 struct pci_dev *pdev = tp->pci_dev;
597 size_t, int) = pci_dma_sync_single_for_device;
598 601
599 if (unlikely(pkt_size > tp->rx_buf_sz)) { 602 if (unlikely(pkt_size > tp->rx_buf_sz)) {
600 net_intr(tp, KERN_INFO 603 net_intr(tp, KERN_INFO
@@ -606,20 +609,18 @@ static int sis190_rx_interrupt(struct net_device *dev,
606 continue; 609 continue;
607 } 610 }
608 611
609 pci_dma_sync_single_for_cpu(tp->pci_dev,
610 le32_to_cpu(desc->addr), tp->rx_buf_sz,
611 PCI_DMA_FROMDEVICE);
612 612
613 if (sis190_try_rx_copy(&skb, pkt_size, desc, 613 if (sis190_try_rx_copy(tp, &skb, pkt_size, addr)) {
614 tp->rx_buf_sz)) { 614 pci_dma_sync_single_for_device(pdev, addr,
615 pci_action = pci_unmap_single; 615 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
616 sis190_give_to_asic(desc, tp->rx_buf_sz);
617 } else {
618 pci_unmap_single(pdev, addr, tp->rx_buf_sz,
619 PCI_DMA_FROMDEVICE);
616 tp->Rx_skbuff[entry] = NULL; 620 tp->Rx_skbuff[entry] = NULL;
617 sis190_make_unusable_by_asic(desc); 621 sis190_make_unusable_by_asic(desc);
618 } 622 }
619 623
620 pci_action(tp->pci_dev, le32_to_cpu(desc->addr),
621 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
622
623 skb_put(skb, pkt_size); 624 skb_put(skb, pkt_size);
624 skb->protocol = eth_type_trans(skb, dev); 625 skb->protocol = eth_type_trans(skb, dev);
625 626
@@ -658,9 +659,31 @@ static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
658 memset(desc, 0x00, sizeof(*desc)); 659 memset(desc, 0x00, sizeof(*desc));
659} 660}
660 661
662static inline int sis190_tx_pkt_err(u32 status, struct net_device_stats *stats)
663{
664#define TxErrMask (WND | TABRT | FIFO | LINK)
665
666 if (!unlikely(status & TxErrMask))
667 return 0;
668
669 if (status & WND)
670 stats->tx_window_errors++;
671 if (status & TABRT)
672 stats->tx_aborted_errors++;
673 if (status & FIFO)
674 stats->tx_fifo_errors++;
675 if (status & LINK)
676 stats->tx_carrier_errors++;
677
678 stats->tx_errors++;
679
680 return -1;
681}
682
661static void sis190_tx_interrupt(struct net_device *dev, 683static void sis190_tx_interrupt(struct net_device *dev,
662 struct sis190_private *tp, void __iomem *ioaddr) 684 struct sis190_private *tp, void __iomem *ioaddr)
663{ 685{
686 struct net_device_stats *stats = &dev->stats;
664 u32 pending, dirty_tx = tp->dirty_tx; 687 u32 pending, dirty_tx = tp->dirty_tx;
665 /* 688 /*
666 * It would not be needed if queueing was allowed to be enabled 689 * It would not be needed if queueing was allowed to be enabled
@@ -675,15 +698,19 @@ static void sis190_tx_interrupt(struct net_device *dev,
675 for (; pending; pending--, dirty_tx++) { 698 for (; pending; pending--, dirty_tx++) {
676 unsigned int entry = dirty_tx % NUM_TX_DESC; 699 unsigned int entry = dirty_tx % NUM_TX_DESC;
677 struct TxDesc *txd = tp->TxDescRing + entry; 700 struct TxDesc *txd = tp->TxDescRing + entry;
701 u32 status = le32_to_cpu(txd->status);
678 struct sk_buff *skb; 702 struct sk_buff *skb;
679 703
680 if (le32_to_cpu(txd->status) & OWNbit) 704 if (status & OWNbit)
681 break; 705 break;
682 706
683 skb = tp->Tx_skbuff[entry]; 707 skb = tp->Tx_skbuff[entry];
684 708
685 dev->stats.tx_packets++; 709 if (likely(sis190_tx_pkt_err(status, stats) == 0)) {
686 dev->stats.tx_bytes += skb->len; 710 stats->tx_packets++;
711 stats->tx_bytes += skb->len;
712 stats->collisions += ((status & ColCountMask) - 1);
713 }
687 714
688 sis190_unmap_tx_skb(tp->pci_dev, skb, txd); 715 sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
689 tp->Tx_skbuff[entry] = NULL; 716 tp->Tx_skbuff[entry] = NULL;
@@ -904,10 +931,9 @@ static void sis190_phy_task(struct work_struct *work)
904 mod_timer(&tp->timer, jiffies + HZ/10); 931 mod_timer(&tp->timer, jiffies + HZ/10);
905 } else if (!(mdio_read_latched(ioaddr, phy_id, MII_BMSR) & 932 } else if (!(mdio_read_latched(ioaddr, phy_id, MII_BMSR) &
906 BMSR_ANEGCOMPLETE)) { 933 BMSR_ANEGCOMPLETE)) {
907 net_link(tp, KERN_WARNING "%s: PHY reset until link up.\n",
908 dev->name);
909 netif_carrier_off(dev); 934 netif_carrier_off(dev);
910 mdio_write(ioaddr, phy_id, MII_BMCR, val | BMCR_RESET); 935 net_link(tp, KERN_WARNING "%s: auto-negotiating...\n",
936 dev->name);
911 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT); 937 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
912 } else { 938 } else {
913 /* Rejoice ! */ 939 /* Rejoice ! */
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 9a25f550fd16..d5b7a76fcaad 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -6,6 +6,10 @@ config IWLCORE
6 tristate "Intel Wireless Wifi Core" 6 tristate "Intel Wireless Wifi Core"
7 depends on PCI && MAC80211 && WLAN_80211 && EXPERIMENTAL 7 depends on PCI && MAC80211 && WLAN_80211 && EXPERIMENTAL
8 select IWLWIFI 8 select IWLWIFI
9 select MAC80211_LEDS if IWLWIFI_LEDS
10 select LEDS_CLASS if IWLWIFI_LEDS
11 select RFKILL if IWLWIFI_RFKILL
12 select RFKILL_INPUT if IWLWIFI_RFKILL
9 13
10config IWLWIFI_LEDS 14config IWLWIFI_LEDS
11 bool 15 bool
@@ -14,8 +18,6 @@ config IWLWIFI_LEDS
14config IWLWIFI_RFKILL 18config IWLWIFI_RFKILL
15 boolean "IWLWIFI RF kill support" 19 boolean "IWLWIFI RF kill support"
16 depends on IWLCORE 20 depends on IWLCORE
17 select RFKILL
18 select RFKILL_INPUT
19 21
20config IWL4965 22config IWL4965
21 tristate "Intel Wireless WiFi 4965AGN" 23 tristate "Intel Wireless WiFi 4965AGN"
@@ -55,8 +57,6 @@ config IWL4965_HT
55config IWL4965_LEDS 57config IWL4965_LEDS
56 bool "Enable LEDS features in iwl4965 driver" 58 bool "Enable LEDS features in iwl4965 driver"
57 depends on IWL4965 59 depends on IWL4965
58 select MAC80211_LEDS
59 select LEDS_CLASS
60 select IWLWIFI_LEDS 60 select IWLWIFI_LEDS
61 ---help--- 61 ---help---
62 This option enables LEDS for the iwlwifi drivers 62 This option enables LEDS for the iwlwifi drivers
@@ -112,6 +112,8 @@ config IWL3945
112 depends on PCI && MAC80211 && WLAN_80211 && EXPERIMENTAL 112 depends on PCI && MAC80211 && WLAN_80211 && EXPERIMENTAL
113 select FW_LOADER 113 select FW_LOADER
114 select IWLWIFI 114 select IWLWIFI
115 select MAC80211_LEDS if IWL3945_LEDS
116 select LEDS_CLASS if IWL3945_LEDS
115 ---help--- 117 ---help---
116 Select to build the driver supporting the: 118 Select to build the driver supporting the:
117 119
@@ -143,8 +145,6 @@ config IWL3945_SPECTRUM_MEASUREMENT
143config IWL3945_LEDS 145config IWL3945_LEDS
144 bool "Enable LEDS features in iwl3945 driver" 146 bool "Enable LEDS features in iwl3945 driver"
145 depends on IWL3945 147 depends on IWL3945
146 select MAC80211_LEDS
147 select LEDS_CLASS
148 ---help--- 148 ---help---
149 This option enables LEDS for the iwl3945 driver. 149 This option enables LEDS for the iwl3945 driver.
150 150
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 85b2e51a42ae..26a930e832bd 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -152,44 +152,89 @@ __ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
152 return 0; 152 return 0;
153} 153}
154 154
155static int __get_next_bus_id(const char **buf, char *bus_id)
156{
157 int rc, len;
158 char *start, *end;
159
160 start = (char *)*buf;
161 end = strchr(start, ',');
162 if (!end) {
163 /* Last entry. Strip trailing newline, if applicable. */
164 end = strchr(start, '\n');
165 if (end)
166 *end = '\0';
167 len = strlen(start) + 1;
168 } else {
169 len = end - start + 1;
170 end++;
171 }
172 if (len < BUS_ID_SIZE) {
173 strlcpy(bus_id, start, len);
174 rc = 0;
175 } else
176 rc = -EINVAL;
177 *buf = end;
178 return rc;
179}
180
181static int __is_valid_bus_id(char bus_id[BUS_ID_SIZE])
182{
183 int cssid, ssid, devno;
184
185 /* Must be of form %x.%x.%04x */
186 if (sscanf(bus_id, "%x.%1x.%04x", &cssid, &ssid, &devno) != 3)
187 return 0;
188 return 1;
189}
190
155/** 191/**
156 * ccwgroup_create() - create and register a ccw group device 192 * ccwgroup_create_from_string() - create and register a ccw group device
157 * @root: parent device for the new device 193 * @root: parent device for the new device
158 * @creator_id: identifier of creating driver 194 * @creator_id: identifier of creating driver
159 * @cdrv: ccw driver of slave devices 195 * @cdrv: ccw driver of slave devices
160 * @argc: number of slave devices 196 * @num_devices: number of slave devices
161 * @argv: bus ids of slave devices 197 * @buf: buffer containing comma separated bus ids of slave devices
162 * 198 *
163 * Create and register a new ccw group device as a child of @root. Slave 199 * Create and register a new ccw group device as a child of @root. Slave
164 * devices are obtained from the list of bus ids given in @argv[] and must all 200 * devices are obtained from the list of bus ids given in @buf and must all
165 * belong to @cdrv. 201 * belong to @cdrv.
166 * Returns: 202 * Returns:
167 * %0 on success and an error code on failure. 203 * %0 on success and an error code on failure.
168 * Context: 204 * Context:
169 * non-atomic 205 * non-atomic
170 */ 206 */
171int ccwgroup_create(struct device *root, unsigned int creator_id, 207int ccwgroup_create_from_string(struct device *root, unsigned int creator_id,
172 struct ccw_driver *cdrv, int argc, char *argv[]) 208 struct ccw_driver *cdrv, int num_devices,
209 const char *buf)
173{ 210{
174 struct ccwgroup_device *gdev; 211 struct ccwgroup_device *gdev;
175 int i; 212 int rc, i;
176 int rc; 213 char tmp_bus_id[BUS_ID_SIZE];
214 const char *curr_buf;
177 215
178 if (argc > 256) /* disallow dumb users */ 216 gdev = kzalloc(sizeof(*gdev) + num_devices * sizeof(gdev->cdev[0]),
179 return -EINVAL; 217 GFP_KERNEL);
180
181 gdev = kzalloc(sizeof(*gdev) + argc*sizeof(gdev->cdev[0]), GFP_KERNEL);
182 if (!gdev) 218 if (!gdev)
183 return -ENOMEM; 219 return -ENOMEM;
184 220
185 atomic_set(&gdev->onoff, 0); 221 atomic_set(&gdev->onoff, 0);
186 mutex_init(&gdev->reg_mutex); 222 mutex_init(&gdev->reg_mutex);
187 mutex_lock(&gdev->reg_mutex); 223 mutex_lock(&gdev->reg_mutex);
188 for (i = 0; i < argc; i++) { 224 curr_buf = buf;
189 gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]); 225 for (i = 0; i < num_devices && curr_buf; i++) {
190 226 rc = __get_next_bus_id(&curr_buf, tmp_bus_id);
191 /* all devices have to be of the same type in 227 if (rc != 0)
192 * order to be grouped */ 228 goto error;
229 if (!__is_valid_bus_id(tmp_bus_id)) {
230 rc = -EINVAL;
231 goto error;
232 }
233 gdev->cdev[i] = get_ccwdev_by_busid(cdrv, tmp_bus_id);
234 /*
235 * All devices have to be of the same type in
236 * order to be grouped.
237 */
193 if (!gdev->cdev[i] 238 if (!gdev->cdev[i]
194 || gdev->cdev[i]->id.driver_info != 239 || gdev->cdev[i]->id.driver_info !=
195 gdev->cdev[0]->id.driver_info) { 240 gdev->cdev[0]->id.driver_info) {
@@ -203,9 +248,18 @@ int ccwgroup_create(struct device *root, unsigned int creator_id,
203 } 248 }
204 dev_set_drvdata(&gdev->cdev[i]->dev, gdev); 249 dev_set_drvdata(&gdev->cdev[i]->dev, gdev);
205 } 250 }
206 251 /* Check for sufficient number of bus ids. */
252 if (i < num_devices && !curr_buf) {
253 rc = -EINVAL;
254 goto error;
255 }
256 /* Check for trailing stuff. */
257 if (i == num_devices && strlen(curr_buf) > 0) {
258 rc = -EINVAL;
259 goto error;
260 }
207 gdev->creator_id = creator_id; 261 gdev->creator_id = creator_id;
208 gdev->count = argc; 262 gdev->count = num_devices;
209 gdev->dev.bus = &ccwgroup_bus_type; 263 gdev->dev.bus = &ccwgroup_bus_type;
210 gdev->dev.parent = root; 264 gdev->dev.parent = root;
211 gdev->dev.release = ccwgroup_release; 265 gdev->dev.release = ccwgroup_release;
@@ -233,7 +287,7 @@ int ccwgroup_create(struct device *root, unsigned int creator_id,
233 device_remove_file(&gdev->dev, &dev_attr_ungroup); 287 device_remove_file(&gdev->dev, &dev_attr_ungroup);
234 device_unregister(&gdev->dev); 288 device_unregister(&gdev->dev);
235error: 289error:
236 for (i = 0; i < argc; i++) 290 for (i = 0; i < num_devices; i++)
237 if (gdev->cdev[i]) { 291 if (gdev->cdev[i]) {
238 if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev) 292 if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev)
239 dev_set_drvdata(&gdev->cdev[i]->dev, NULL); 293 dev_set_drvdata(&gdev->cdev[i]->dev, NULL);
@@ -243,6 +297,7 @@ error:
243 put_device(&gdev->dev); 297 put_device(&gdev->dev);
244 return rc; 298 return rc;
245} 299}
300EXPORT_SYMBOL(ccwgroup_create_from_string);
246 301
247static int __init 302static int __init
248init_ccwgroup (void) 303init_ccwgroup (void)
@@ -521,6 +576,5 @@ void ccwgroup_remove_ccwdev(struct ccw_device *cdev)
521MODULE_LICENSE("GPL"); 576MODULE_LICENSE("GPL");
522EXPORT_SYMBOL(ccwgroup_driver_register); 577EXPORT_SYMBOL(ccwgroup_driver_register);
523EXPORT_SYMBOL(ccwgroup_driver_unregister); 578EXPORT_SYMBOL(ccwgroup_driver_unregister);
524EXPORT_SYMBOL(ccwgroup_create);
525EXPORT_SYMBOL(ccwgroup_probe_ccwdev); 579EXPORT_SYMBOL(ccwgroup_probe_ccwdev);
526EXPORT_SYMBOL(ccwgroup_remove_ccwdev); 580EXPORT_SYMBOL(ccwgroup_remove_ccwdev);
diff --git a/drivers/s390/net/cu3088.c b/drivers/s390/net/cu3088.c
index 76728ae4b843..8e7697305a4c 100644
--- a/drivers/s390/net/cu3088.c
+++ b/drivers/s390/net/cu3088.c
@@ -62,30 +62,14 @@ static struct device *cu3088_root_dev;
62static ssize_t 62static ssize_t
63group_write(struct device_driver *drv, const char *buf, size_t count) 63group_write(struct device_driver *drv, const char *buf, size_t count)
64{ 64{
65 const char *start, *end;
66 char bus_ids[2][BUS_ID_SIZE], *argv[2];
67 int i;
68 int ret; 65 int ret;
69 struct ccwgroup_driver *cdrv; 66 struct ccwgroup_driver *cdrv;
70 67
71 cdrv = to_ccwgroupdrv(drv); 68 cdrv = to_ccwgroupdrv(drv);
72 if (!cdrv) 69 if (!cdrv)
73 return -EINVAL; 70 return -EINVAL;
74 start = buf; 71 ret = ccwgroup_create_from_string(cu3088_root_dev, cdrv->driver_id,
75 for (i=0; i<2; i++) { 72 &cu3088_driver, 2, buf);
76 static const char delim[] = {',', '\n'};
77 int len;
78
79 if (!(end = strchr(start, delim[i])))
80 return -EINVAL;
81 len = min_t(ptrdiff_t, BUS_ID_SIZE, end - start + 1);
82 strlcpy (bus_ids[i], start, len);
83 argv[i] = bus_ids[i];
84 start = end + 1;
85 }
86
87 ret = ccwgroup_create(cu3088_root_dev, cdrv->driver_id,
88 &cu3088_driver, 2, argv);
89 73
90 return (ret == 0) ? count : ret; 74 return (ret == 0) ? count : ret;
91} 75}
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index f51ed9972587..dd22f4b37037 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -1793,7 +1793,8 @@ lcs_get_skb(struct lcs_card *card, char *skb_data, unsigned int skb_len)
1793 skb->protocol = card->lan_type_trans(skb, card->dev); 1793 skb->protocol = card->lan_type_trans(skb, card->dev);
1794 card->stats.rx_bytes += skb_len; 1794 card->stats.rx_bytes += skb_len;
1795 card->stats.rx_packets++; 1795 card->stats.rx_packets++;
1796 *((__u32 *)skb->cb) = ++card->pkt_seq; 1796 if (skb->protocol == htons(ETH_P_802_2))
1797 *((__u32 *)skb->cb) = ++card->pkt_seq;
1797 netif_rx(skb); 1798 netif_rx(skb);
1798} 1799}
1799 1800
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 8f876f6ab367..e4ba6a0372ac 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -1313,8 +1313,6 @@ static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
1313 * and throw away packet. 1313 * and throw away packet.
1314 */ 1314 */
1315 if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) { 1315 if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
1316 if (!in_atomic())
1317 fsm_event(privptr->fsm, DEV_EVENT_START, dev);
1318 dev_kfree_skb(skb); 1316 dev_kfree_skb(skb);
1319 privptr->stats.tx_dropped++; 1317 privptr->stats.tx_dropped++;
1320 privptr->stats.tx_errors++; 1318 privptr->stats.tx_errors++;
@@ -2147,6 +2145,7 @@ static int __init netiucv_init(void)
2147 if (rc) 2145 if (rc)
2148 goto out_dbf; 2146 goto out_dbf;
2149 IUCV_DBF_TEXT(trace, 3, __func__); 2147 IUCV_DBF_TEXT(trace, 3, __func__);
2148 netiucv_driver.groups = netiucv_drv_attr_groups;
2150 rc = driver_register(&netiucv_driver); 2149 rc = driver_register(&netiucv_driver);
2151 if (rc) { 2150 if (rc) {
2152 PRINT_ERR("NETIUCV: failed to register driver.\n"); 2151 PRINT_ERR("NETIUCV: failed to register driver.\n");
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 66f4f12503c9..699ac11debd8 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -72,22 +72,7 @@ struct qeth_dbf_info {
72 debug_sprintf_event(qeth_dbf[QETH_DBF_MSG].id, level, text) 72 debug_sprintf_event(qeth_dbf[QETH_DBF_MSG].id, level, text)
73 73
74#define QETH_DBF_TEXT_(name, level, text...) \ 74#define QETH_DBF_TEXT_(name, level, text...) \
75 do { \ 75 qeth_dbf_longtext(QETH_DBF_##name, level, text)
76 if (qeth_dbf_passes(qeth_dbf[QETH_DBF_##name].id, level)) { \
77 char *dbf_txt_buf = \
78 get_cpu_var(QETH_DBF_TXT_BUF); \
79 sprintf(dbf_txt_buf, text); \
80 debug_text_event(qeth_dbf[QETH_DBF_##name].id, \
81 level, dbf_txt_buf); \
82 put_cpu_var(QETH_DBF_TXT_BUF); \
83 } \
84 } while (0)
85
86/* Allow to sort out low debug levels early to avoid wasted sprints */
87static inline int qeth_dbf_passes(debug_info_t *dbf_grp, int level)
88{
89 return (level <= dbf_grp->level);
90}
91 76
92/** 77/**
93 * some more debug stuff 78 * some more debug stuff
@@ -773,27 +758,6 @@ static inline int qeth_get_micros(void)
773 return (int) (get_clock() >> 12); 758 return (int) (get_clock() >> 12);
774} 759}
775 760
776static inline void *qeth_push_skb(struct qeth_card *card, struct sk_buff *skb,
777 int size)
778{
779 void *hdr;
780
781 hdr = (void *) skb_push(skb, size);
782 /*
783 * sanity check, the Linux memory allocation scheme should
784 * never present us cases like this one (the qdio header size plus
785 * the first 40 bytes of the paket cross a 4k boundary)
786 */
787 if ((((unsigned long) hdr) & (~(PAGE_SIZE - 1))) !=
788 (((unsigned long) hdr + size +
789 QETH_IP_HEADER_SIZE) & (~(PAGE_SIZE - 1)))) {
790 PRINT_ERR("Misaligned packet on interface %s. Discarded.",
791 QETH_CARD_IFNAME(card));
792 return NULL;
793 }
794 return hdr;
795}
796
797static inline int qeth_get_ip_version(struct sk_buff *skb) 761static inline int qeth_get_ip_version(struct sk_buff *skb)
798{ 762{
799 switch (skb->protocol) { 763 switch (skb->protocol) {
@@ -806,6 +770,12 @@ static inline int qeth_get_ip_version(struct sk_buff *skb)
806 } 770 }
807} 771}
808 772
773static inline void qeth_put_buffer_pool_entry(struct qeth_card *card,
774 struct qeth_buffer_pool_entry *entry)
775{
776 list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list);
777}
778
809struct qeth_eddp_context; 779struct qeth_eddp_context;
810extern struct ccwgroup_driver qeth_l2_ccwgroup_driver; 780extern struct ccwgroup_driver qeth_l2_ccwgroup_driver;
811extern struct ccwgroup_driver qeth_l3_ccwgroup_driver; 781extern struct ccwgroup_driver qeth_l3_ccwgroup_driver;
@@ -843,8 +813,6 @@ struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *,
843int qeth_query_setadapterparms(struct qeth_card *); 813int qeth_query_setadapterparms(struct qeth_card *);
844int qeth_check_qdio_errors(struct qdio_buffer *, unsigned int, 814int qeth_check_qdio_errors(struct qdio_buffer *, unsigned int,
845 unsigned int, const char *); 815 unsigned int, const char *);
846void qeth_put_buffer_pool_entry(struct qeth_card *,
847 struct qeth_buffer_pool_entry *);
848void qeth_queue_input_buffer(struct qeth_card *, int); 816void qeth_queue_input_buffer(struct qeth_card *, int);
849struct sk_buff *qeth_core_get_next_skb(struct qeth_card *, 817struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
850 struct qdio_buffer *, struct qdio_buffer_element **, int *, 818 struct qdio_buffer *, struct qdio_buffer_element **, int *,
@@ -880,8 +848,6 @@ int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *,
880 void *reply_param); 848 void *reply_param);
881int qeth_get_cast_type(struct qeth_card *, struct sk_buff *); 849int qeth_get_cast_type(struct qeth_card *, struct sk_buff *);
882int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int); 850int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int);
883struct sk_buff *qeth_prepare_skb(struct qeth_card *, struct sk_buff *,
884 struct qeth_hdr **);
885int qeth_get_elements_no(struct qeth_card *, void *, struct sk_buff *, int); 851int qeth_get_elements_no(struct qeth_card *, void *, struct sk_buff *, int);
886int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *, 852int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *,
887 struct sk_buff *, struct qeth_hdr *, int, 853 struct sk_buff *, struct qeth_hdr *, int,
@@ -894,6 +860,8 @@ void qeth_core_get_ethtool_stats(struct net_device *,
894 struct ethtool_stats *, u64 *); 860 struct ethtool_stats *, u64 *);
895void qeth_core_get_strings(struct net_device *, u32, u8 *); 861void qeth_core_get_strings(struct net_device *, u32, u8 *);
896void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *); 862void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
863void qeth_dbf_longtext(enum qeth_dbf_names dbf_nix, int level, char *text, ...);
864int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *);
897 865
898/* exports for OSN */ 866/* exports for OSN */
899int qeth_osn_assist(struct net_device *, void *, int); 867int qeth_osn_assist(struct net_device *, void *, int);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 055f5c3e7b56..436bf1f6d4a6 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -26,9 +26,6 @@
26#include "qeth_core.h" 26#include "qeth_core.h"
27#include "qeth_core_offl.h" 27#include "qeth_core_offl.h"
28 28
29static DEFINE_PER_CPU(char[256], qeth_core_dbf_txt_buf);
30#define QETH_DBF_TXT_BUF qeth_core_dbf_txt_buf
31
32struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = { 29struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
33 /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */ 30 /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
34 /* N P A M L V H */ 31 /* N P A M L V H */
@@ -2255,14 +2252,6 @@ void qeth_print_status_message(struct qeth_card *card)
2255} 2252}
2256EXPORT_SYMBOL_GPL(qeth_print_status_message); 2253EXPORT_SYMBOL_GPL(qeth_print_status_message);
2257 2254
2258void qeth_put_buffer_pool_entry(struct qeth_card *card,
2259 struct qeth_buffer_pool_entry *entry)
2260{
2261 QETH_DBF_TEXT(TRACE, 6, "ptbfplen");
2262 list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list);
2263}
2264EXPORT_SYMBOL_GPL(qeth_put_buffer_pool_entry);
2265
2266static void qeth_initialize_working_pool_list(struct qeth_card *card) 2255static void qeth_initialize_working_pool_list(struct qeth_card *card)
2267{ 2256{
2268 struct qeth_buffer_pool_entry *entry; 2257 struct qeth_buffer_pool_entry *entry;
@@ -2603,7 +2592,6 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index)
2603 int rc; 2592 int rc;
2604 int newcount = 0; 2593 int newcount = 0;
2605 2594
2606 QETH_DBF_TEXT(TRACE, 6, "queinbuf");
2607 count = (index < queue->next_buf_to_init)? 2595 count = (index < queue->next_buf_to_init)?
2608 card->qdio.in_buf_pool.buf_count - 2596 card->qdio.in_buf_pool.buf_count -
2609 (queue->next_buf_to_init - index) : 2597 (queue->next_buf_to_init - index) :
@@ -2792,8 +2780,6 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int,
2792 int i; 2780 int i;
2793 unsigned int qdio_flags; 2781 unsigned int qdio_flags;
2794 2782
2795 QETH_DBF_TEXT(TRACE, 6, "flushbuf");
2796
2797 for (i = index; i < index + count; ++i) { 2783 for (i = index; i < index + count; ++i) {
2798 buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]; 2784 buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
2799 buf->buffer->element[buf->next_element_to_fill - 1].flags |= 2785 buf->buffer->element[buf->next_element_to_fill - 1].flags |=
@@ -3037,49 +3023,6 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
3037} 3023}
3038EXPORT_SYMBOL_GPL(qeth_get_priority_queue); 3024EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
3039 3025
3040static void __qeth_free_new_skb(struct sk_buff *orig_skb,
3041 struct sk_buff *new_skb)
3042{
3043 if (orig_skb != new_skb)
3044 dev_kfree_skb_any(new_skb);
3045}
3046
3047static inline struct sk_buff *qeth_realloc_headroom(struct qeth_card *card,
3048 struct sk_buff *skb, int size)
3049{
3050 struct sk_buff *new_skb = skb;
3051
3052 if (skb_headroom(skb) >= size)
3053 return skb;
3054 new_skb = skb_realloc_headroom(skb, size);
3055 if (!new_skb)
3056 PRINT_ERR("Could not realloc headroom for qeth_hdr "
3057 "on interface %s", QETH_CARD_IFNAME(card));
3058 return new_skb;
3059}
3060
3061struct sk_buff *qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb,
3062 struct qeth_hdr **hdr)
3063{
3064 struct sk_buff *new_skb;
3065
3066 QETH_DBF_TEXT(TRACE, 6, "prepskb");
3067
3068 new_skb = qeth_realloc_headroom(card, skb,
3069 sizeof(struct qeth_hdr));
3070 if (!new_skb)
3071 return NULL;
3072
3073 *hdr = ((struct qeth_hdr *)qeth_push_skb(card, new_skb,
3074 sizeof(struct qeth_hdr)));
3075 if (*hdr == NULL) {
3076 __qeth_free_new_skb(skb, new_skb);
3077 return NULL;
3078 }
3079 return new_skb;
3080}
3081EXPORT_SYMBOL_GPL(qeth_prepare_skb);
3082
3083int qeth_get_elements_no(struct qeth_card *card, void *hdr, 3026int qeth_get_elements_no(struct qeth_card *card, void *hdr,
3084 struct sk_buff *skb, int elems) 3027 struct sk_buff *skb, int elems)
3085{ 3028{
@@ -3100,8 +3043,8 @@ int qeth_get_elements_no(struct qeth_card *card, void *hdr,
3100} 3043}
3101EXPORT_SYMBOL_GPL(qeth_get_elements_no); 3044EXPORT_SYMBOL_GPL(qeth_get_elements_no);
3102 3045
3103static void __qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer, 3046static inline void __qeth_fill_buffer(struct sk_buff *skb,
3104 int is_tso, int *next_element_to_fill) 3047 struct qdio_buffer *buffer, int is_tso, int *next_element_to_fill)
3105{ 3048{
3106 int length = skb->len; 3049 int length = skb->len;
3107 int length_here; 3050 int length_here;
@@ -3143,15 +3086,13 @@ static void __qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer,
3143 *next_element_to_fill = element; 3086 *next_element_to_fill = element;
3144} 3087}
3145 3088
3146static int qeth_fill_buffer(struct qeth_qdio_out_q *queue, 3089static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
3147 struct qeth_qdio_out_buffer *buf, struct sk_buff *skb) 3090 struct qeth_qdio_out_buffer *buf, struct sk_buff *skb)
3148{ 3091{
3149 struct qdio_buffer *buffer; 3092 struct qdio_buffer *buffer;
3150 struct qeth_hdr_tso *hdr; 3093 struct qeth_hdr_tso *hdr;
3151 int flush_cnt = 0, hdr_len, large_send = 0; 3094 int flush_cnt = 0, hdr_len, large_send = 0;
3152 3095
3153 QETH_DBF_TEXT(TRACE, 6, "qdfillbf");
3154
3155 buffer = buf->buffer; 3096 buffer = buf->buffer;
3156 atomic_inc(&skb->users); 3097 atomic_inc(&skb->users);
3157 skb_queue_tail(&buf->skb_list, skb); 3098 skb_queue_tail(&buf->skb_list, skb);
@@ -3210,8 +3151,6 @@ int qeth_do_send_packet_fast(struct qeth_card *card,
3210 int flush_cnt = 0; 3151 int flush_cnt = 0;
3211 int index; 3152 int index;
3212 3153
3213 QETH_DBF_TEXT(TRACE, 6, "dosndpfa");
3214
3215 /* spin until we get the queue ... */ 3154 /* spin until we get the queue ... */
3216 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, 3155 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
3217 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED); 3156 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
@@ -3263,8 +3202,6 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
3263 int tmp; 3202 int tmp;
3264 int rc = 0; 3203 int rc = 0;
3265 3204
3266 QETH_DBF_TEXT(TRACE, 6, "dosndpkt");
3267
3268 /* spin until we get the queue ... */ 3205 /* spin until we get the queue ... */
3269 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, 3206 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
3270 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED); 3207 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
@@ -3827,27 +3764,8 @@ static struct ccw_driver qeth_ccw_driver = {
3827static int qeth_core_driver_group(const char *buf, struct device *root_dev, 3764static int qeth_core_driver_group(const char *buf, struct device *root_dev,
3828 unsigned long driver_id) 3765 unsigned long driver_id)
3829{ 3766{
3830 const char *start, *end; 3767 return ccwgroup_create_from_string(root_dev, driver_id,
3831 char bus_ids[3][BUS_ID_SIZE], *argv[3]; 3768 &qeth_ccw_driver, 3, buf);
3832 int i;
3833
3834 start = buf;
3835 for (i = 0; i < 3; i++) {
3836 static const char delim[] = { ',', ',', '\n' };
3837 int len;
3838
3839 end = strchr(start, delim[i]);
3840 if (!end)
3841 return -EINVAL;
3842 len = min_t(ptrdiff_t, BUS_ID_SIZE, end - start);
3843 strncpy(bus_ids[i], start, len);
3844 bus_ids[i][len] = '\0';
3845 start = end + 1;
3846 argv[i] = bus_ids[i];
3847 }
3848
3849 return (ccwgroup_create(root_dev, driver_id,
3850 &qeth_ccw_driver, 3, argv));
3851} 3769}
3852 3770
3853int qeth_core_hardsetup_card(struct qeth_card *card) 3771int qeth_core_hardsetup_card(struct qeth_card *card)
@@ -3885,8 +3803,9 @@ retry:
3885 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); 3803 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
3886 return rc; 3804 return rc;
3887 } 3805 }
3888 3806 mpno = qdio_get_ssqd_pct(CARD_DDEV(card));
3889 mpno = QETH_MAX_PORTNO; 3807 if (mpno)
3808 mpno = min(mpno - 1, QETH_MAX_PORTNO);
3890 if (card->info.portno > mpno) { 3809 if (card->info.portno > mpno) {
3891 PRINT_ERR("Device %s does not offer port number %d \n.", 3810 PRINT_ERR("Device %s does not offer port number %d \n.",
3892 CARD_BUS_ID(card), card->info.portno); 3811 CARD_BUS_ID(card), card->info.portno);
@@ -3980,7 +3899,6 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
3980 int use_rx_sg = 0; 3899 int use_rx_sg = 0;
3981 int frag = 0; 3900 int frag = 0;
3982 3901
3983 QETH_DBF_TEXT(TRACE, 6, "nextskb");
3984 /* qeth_hdr must not cross element boundaries */ 3902 /* qeth_hdr must not cross element boundaries */
3985 if (element->length < offset + sizeof(struct qeth_hdr)) { 3903 if (element->length < offset + sizeof(struct qeth_hdr)) {
3986 if (qeth_is_last_sbale(element)) 3904 if (qeth_is_last_sbale(element))
@@ -4086,6 +4004,18 @@ static void qeth_unregister_dbf_views(void)
4086 } 4004 }
4087} 4005}
4088 4006
4007void qeth_dbf_longtext(enum qeth_dbf_names dbf_nix, int level, char *text, ...)
4008{
4009 char dbf_txt_buf[32];
4010
4011 if (level > (qeth_dbf[dbf_nix].id)->level)
4012 return;
4013 snprintf(dbf_txt_buf, sizeof(dbf_txt_buf), text);
4014 debug_text_event(qeth_dbf[dbf_nix].id, level, dbf_txt_buf);
4015
4016}
4017EXPORT_SYMBOL_GPL(qeth_dbf_longtext);
4018
4089static int qeth_register_dbf_views(void) 4019static int qeth_register_dbf_views(void)
4090{ 4020{
4091 int ret; 4021 int ret;
@@ -4433,6 +4363,96 @@ void qeth_core_get_drvinfo(struct net_device *dev,
4433} 4363}
4434EXPORT_SYMBOL_GPL(qeth_core_get_drvinfo); 4364EXPORT_SYMBOL_GPL(qeth_core_get_drvinfo);
4435 4365
4366int qeth_core_ethtool_get_settings(struct net_device *netdev,
4367 struct ethtool_cmd *ecmd)
4368{
4369 struct qeth_card *card = netdev_priv(netdev);
4370 enum qeth_link_types link_type;
4371
4372 if ((card->info.type == QETH_CARD_TYPE_IQD) || (card->info.guestlan))
4373 link_type = QETH_LINK_TYPE_10GBIT_ETH;
4374 else
4375 link_type = card->info.link_type;
4376
4377 ecmd->transceiver = XCVR_INTERNAL;
4378 ecmd->supported = SUPPORTED_Autoneg;
4379 ecmd->advertising = ADVERTISED_Autoneg;
4380 ecmd->duplex = DUPLEX_FULL;
4381 ecmd->autoneg = AUTONEG_ENABLE;
4382
4383 switch (link_type) {
4384 case QETH_LINK_TYPE_FAST_ETH:
4385 case QETH_LINK_TYPE_LANE_ETH100:
4386 ecmd->supported |= SUPPORTED_10baseT_Half |
4387 SUPPORTED_10baseT_Full |
4388 SUPPORTED_100baseT_Half |
4389 SUPPORTED_100baseT_Full |
4390 SUPPORTED_TP;
4391 ecmd->advertising |= ADVERTISED_10baseT_Half |
4392 ADVERTISED_10baseT_Full |
4393 ADVERTISED_100baseT_Half |
4394 ADVERTISED_100baseT_Full |
4395 ADVERTISED_TP;
4396 ecmd->speed = SPEED_100;
4397 ecmd->port = PORT_TP;
4398 break;
4399
4400 case QETH_LINK_TYPE_GBIT_ETH:
4401 case QETH_LINK_TYPE_LANE_ETH1000:
4402 ecmd->supported |= SUPPORTED_10baseT_Half |
4403 SUPPORTED_10baseT_Full |
4404 SUPPORTED_100baseT_Half |
4405 SUPPORTED_100baseT_Full |
4406 SUPPORTED_1000baseT_Half |
4407 SUPPORTED_1000baseT_Full |
4408 SUPPORTED_FIBRE;
4409 ecmd->advertising |= ADVERTISED_10baseT_Half |
4410 ADVERTISED_10baseT_Full |
4411 ADVERTISED_100baseT_Half |
4412 ADVERTISED_100baseT_Full |
4413 ADVERTISED_1000baseT_Half |
4414 ADVERTISED_1000baseT_Full |
4415 ADVERTISED_FIBRE;
4416 ecmd->speed = SPEED_1000;
4417 ecmd->port = PORT_FIBRE;
4418 break;
4419
4420 case QETH_LINK_TYPE_10GBIT_ETH:
4421 ecmd->supported |= SUPPORTED_10baseT_Half |
4422 SUPPORTED_10baseT_Full |
4423 SUPPORTED_100baseT_Half |
4424 SUPPORTED_100baseT_Full |
4425 SUPPORTED_1000baseT_Half |
4426 SUPPORTED_1000baseT_Full |
4427 SUPPORTED_10000baseT_Full |
4428 SUPPORTED_FIBRE;
4429 ecmd->advertising |= ADVERTISED_10baseT_Half |
4430 ADVERTISED_10baseT_Full |
4431 ADVERTISED_100baseT_Half |
4432 ADVERTISED_100baseT_Full |
4433 ADVERTISED_1000baseT_Half |
4434 ADVERTISED_1000baseT_Full |
4435 ADVERTISED_10000baseT_Full |
4436 ADVERTISED_FIBRE;
4437 ecmd->speed = SPEED_10000;
4438 ecmd->port = PORT_FIBRE;
4439 break;
4440
4441 default:
4442 ecmd->supported |= SUPPORTED_10baseT_Half |
4443 SUPPORTED_10baseT_Full |
4444 SUPPORTED_TP;
4445 ecmd->advertising |= ADVERTISED_10baseT_Half |
4446 ADVERTISED_10baseT_Full |
4447 ADVERTISED_TP;
4448 ecmd->speed = SPEED_10;
4449 ecmd->port = PORT_TP;
4450 }
4451
4452 return 0;
4453}
4454EXPORT_SYMBOL_GPL(qeth_core_ethtool_get_settings);
4455
4436static int __init qeth_core_init(void) 4456static int __init qeth_core_init(void)
4437{ 4457{
4438 int rc; 4458 int rc;
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 3921d1631a78..86ec50ddae13 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -22,9 +22,6 @@
22#include "qeth_core.h" 22#include "qeth_core.h"
23#include "qeth_core_offl.h" 23#include "qeth_core_offl.h"
24 24
25#define QETH_DBF_TXT_BUF qeth_l2_dbf_txt_buf
26static DEFINE_PER_CPU(char[256], qeth_l2_dbf_txt_buf);
27
28static int qeth_l2_set_offline(struct ccwgroup_device *); 25static int qeth_l2_set_offline(struct ccwgroup_device *);
29static int qeth_l2_stop(struct net_device *); 26static int qeth_l2_stop(struct net_device *);
30static int qeth_l2_send_delmac(struct qeth_card *, __u8 *); 27static int qeth_l2_send_delmac(struct qeth_card *, __u8 *);
@@ -635,8 +632,6 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
635 enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO; 632 enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO;
636 struct qeth_eddp_context *ctx = NULL; 633 struct qeth_eddp_context *ctx = NULL;
637 634
638 QETH_DBF_TEXT(TRACE, 6, "l2xmit");
639
640 if ((card->state != CARD_STATE_UP) || !card->lan_online) { 635 if ((card->state != CARD_STATE_UP) || !card->lan_online) {
641 card->stats.tx_carrier_errors++; 636 card->stats.tx_carrier_errors++;
642 goto tx_drop; 637 goto tx_drop;
@@ -658,9 +653,12 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
658 if (card->info.type == QETH_CARD_TYPE_OSN) 653 if (card->info.type == QETH_CARD_TYPE_OSN)
659 hdr = (struct qeth_hdr *)skb->data; 654 hdr = (struct qeth_hdr *)skb->data;
660 else { 655 else {
661 new_skb = qeth_prepare_skb(card, skb, &hdr); 656 /* create a clone with writeable headroom */
657 new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr));
662 if (!new_skb) 658 if (!new_skb)
663 goto tx_drop; 659 goto tx_drop;
660 hdr = (struct qeth_hdr *)skb_push(new_skb,
661 sizeof(struct qeth_hdr));
664 qeth_l2_fill_header(card, hdr, new_skb, ipv, cast_type); 662 qeth_l2_fill_header(card, hdr, new_skb, ipv, cast_type);
665 } 663 }
666 664
@@ -747,7 +745,6 @@ static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev,
747 int index; 745 int index;
748 int i; 746 int i;
749 747
750 QETH_DBF_TEXT(TRACE, 6, "qdinput");
751 card = (struct qeth_card *) card_ptr; 748 card = (struct qeth_card *) card_ptr;
752 net_dev = card->dev; 749 net_dev = card->dev;
753 if (card->options.performance_stats) { 750 if (card->options.performance_stats) {
@@ -852,6 +849,22 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
852 return; 849 return;
853} 850}
854 851
852static int qeth_l2_ethtool_set_tso(struct net_device *dev, u32 data)
853{
854 struct qeth_card *card = netdev_priv(dev);
855
856 if (data) {
857 if (card->options.large_send == QETH_LARGE_SEND_NO) {
858 card->options.large_send = QETH_LARGE_SEND_EDDP;
859 dev->features |= NETIF_F_TSO;
860 }
861 } else {
862 dev->features &= ~NETIF_F_TSO;
863 card->options.large_send = QETH_LARGE_SEND_NO;
864 }
865 return 0;
866}
867
855static struct ethtool_ops qeth_l2_ethtool_ops = { 868static struct ethtool_ops qeth_l2_ethtool_ops = {
856 .get_link = ethtool_op_get_link, 869 .get_link = ethtool_op_get_link,
857 .get_tx_csum = ethtool_op_get_tx_csum, 870 .get_tx_csum = ethtool_op_get_tx_csum,
@@ -859,11 +872,12 @@ static struct ethtool_ops qeth_l2_ethtool_ops = {
859 .get_sg = ethtool_op_get_sg, 872 .get_sg = ethtool_op_get_sg,
860 .set_sg = ethtool_op_set_sg, 873 .set_sg = ethtool_op_set_sg,
861 .get_tso = ethtool_op_get_tso, 874 .get_tso = ethtool_op_get_tso,
862 .set_tso = ethtool_op_set_tso, 875 .set_tso = qeth_l2_ethtool_set_tso,
863 .get_strings = qeth_core_get_strings, 876 .get_strings = qeth_core_get_strings,
864 .get_ethtool_stats = qeth_core_get_ethtool_stats, 877 .get_ethtool_stats = qeth_core_get_ethtool_stats,
865 .get_stats_count = qeth_core_get_stats_count, 878 .get_stats_count = qeth_core_get_stats_count,
866 .get_drvinfo = qeth_core_get_drvinfo, 879 .get_drvinfo = qeth_core_get_drvinfo,
880 .get_settings = qeth_core_ethtool_get_settings,
867}; 881};
868 882
869static struct ethtool_ops qeth_l2_osn_ops = { 883static struct ethtool_ops qeth_l2_osn_ops = {
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index 1be353593a59..9f143c83bba3 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -13,9 +13,6 @@
13 13
14#include "qeth_core.h" 14#include "qeth_core.h"
15 15
16#define QETH_DBF_TXT_BUF qeth_l3_dbf_txt_buf
17DECLARE_PER_CPU(char[256], qeth_l3_dbf_txt_buf);
18
19struct qeth_ipaddr { 16struct qeth_ipaddr {
20 struct list_head entry; 17 struct list_head entry;
21 enum qeth_ip_types type; 18 enum qeth_ip_types type;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index e1bfe56087d6..94a8ead64ed4 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -28,8 +28,6 @@
28#include "qeth_l3.h" 28#include "qeth_l3.h"
29#include "qeth_core_offl.h" 29#include "qeth_core_offl.h"
30 30
31DEFINE_PER_CPU(char[256], qeth_l3_dbf_txt_buf);
32
33static int qeth_l3_set_offline(struct ccwgroup_device *); 31static int qeth_l3_set_offline(struct ccwgroup_device *);
34static int qeth_l3_recover(void *); 32static int qeth_l3_recover(void *);
35static int qeth_l3_stop(struct net_device *); 33static int qeth_l3_stop(struct net_device *);
@@ -2093,6 +2091,11 @@ static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
2093 (card->state == CARD_STATE_UP)) { 2091 (card->state == CARD_STATE_UP)) {
2094 if (recovery_mode) 2092 if (recovery_mode)
2095 qeth_l3_stop(card->dev); 2093 qeth_l3_stop(card->dev);
2094 else {
2095 rtnl_lock();
2096 dev_close(card->dev);
2097 rtnl_unlock();
2098 }
2096 if (!card->use_hard_stop) { 2099 if (!card->use_hard_stop) {
2097 rc = qeth_send_stoplan(card); 2100 rc = qeth_send_stoplan(card);
2098 if (rc) 2101 if (rc)
@@ -2559,8 +2562,6 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2559static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, 2562static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
2560 struct sk_buff *skb, int ipv, int cast_type) 2563 struct sk_buff *skb, int ipv, int cast_type)
2561{ 2564{
2562 QETH_DBF_TEXT(TRACE, 6, "fillhdr");
2563
2564 memset(hdr, 0, sizeof(struct qeth_hdr)); 2565 memset(hdr, 0, sizeof(struct qeth_hdr));
2565 hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3; 2566 hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
2566 hdr->hdr.l3.ext_flags = 0; 2567 hdr->hdr.l3.ext_flags = 0;
@@ -2570,9 +2571,10 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
2570 * v6 uses passthrough, v4 sets the tag in the QDIO header. 2571 * v6 uses passthrough, v4 sets the tag in the QDIO header.
2571 */ 2572 */
2572 if (card->vlangrp && vlan_tx_tag_present(skb)) { 2573 if (card->vlangrp && vlan_tx_tag_present(skb)) {
2573 hdr->hdr.l3.ext_flags = (ipv == 4) ? 2574 if ((ipv == 4) || (card->info.type == QETH_CARD_TYPE_IQD))
2574 QETH_HDR_EXT_VLAN_FRAME : 2575 hdr->hdr.l3.ext_flags = QETH_HDR_EXT_VLAN_FRAME;
2575 QETH_HDR_EXT_INCLUDE_VLAN_TAG; 2576 else
2577 hdr->hdr.l3.ext_flags = QETH_HDR_EXT_INCLUDE_VLAN_TAG;
2576 hdr->hdr.l3.vlan_id = vlan_tx_tag_get(skb); 2578 hdr->hdr.l3.vlan_id = vlan_tx_tag_get(skb);
2577 } 2579 }
2578 2580
@@ -2638,8 +2640,6 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2638 enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO; 2640 enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO;
2639 struct qeth_eddp_context *ctx = NULL; 2641 struct qeth_eddp_context *ctx = NULL;
2640 2642
2641 QETH_DBF_TEXT(TRACE, 6, "l3xmit");
2642
2643 if ((card->info.type == QETH_CARD_TYPE_IQD) && 2643 if ((card->info.type == QETH_CARD_TYPE_IQD) &&
2644 (skb->protocol != htons(ETH_P_IPV6)) && 2644 (skb->protocol != htons(ETH_P_IPV6)) &&
2645 (skb->protocol != htons(ETH_P_IP))) 2645 (skb->protocol != htons(ETH_P_IP)))
@@ -2890,6 +2890,7 @@ static struct ethtool_ops qeth_l3_ethtool_ops = {
2890 .get_ethtool_stats = qeth_core_get_ethtool_stats, 2890 .get_ethtool_stats = qeth_core_get_ethtool_stats,
2891 .get_stats_count = qeth_core_get_stats_count, 2891 .get_stats_count = qeth_core_get_stats_count,
2892 .get_drvinfo = qeth_core_get_drvinfo, 2892 .get_drvinfo = qeth_core_get_drvinfo,
2893 .get_settings = qeth_core_ethtool_get_settings,
2893}; 2894};
2894 2895
2895/* 2896/*
@@ -2982,7 +2983,6 @@ static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev,
2982 int index; 2983 int index;
2983 int i; 2984 int i;
2984 2985
2985 QETH_DBF_TEXT(TRACE, 6, "qdinput");
2986 card = (struct qeth_card *) card_ptr; 2986 card = (struct qeth_card *) card_ptr;
2987 net_dev = card->dev; 2987 net_dev = card->dev;
2988 if (card->options.performance_stats) { 2988 if (card->options.performance_stats) {
@@ -3140,9 +3140,15 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3140 netif_carrier_on(card->dev); 3140 netif_carrier_on(card->dev);
3141 3141
3142 qeth_set_allowed_threads(card, 0xffffffff, 0); 3142 qeth_set_allowed_threads(card, 0xffffffff, 0);
3143 if ((recover_flag == CARD_STATE_RECOVER) && recovery_mode) { 3143 if (recover_flag == CARD_STATE_RECOVER) {
3144 if (recovery_mode)
3144 qeth_l3_open(card->dev); 3145 qeth_l3_open(card->dev);
3145 qeth_l3_set_multicast_list(card->dev); 3146 else {
3147 rtnl_lock();
3148 dev_open(card->dev);
3149 rtnl_unlock();
3150 }
3151 qeth_l3_set_multicast_list(card->dev);
3146 } 3152 }
3147 /* let user_space know that device is online */ 3153 /* let user_space know that device is online */
3148 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); 3154 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);