aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2005-08-30 14:16:30 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-08-30 14:16:30 -0400
commit6b39374a27eb4be7e9d82145ae270ba02ea90dc8 (patch)
tree09933113cf28f253db1dd539463bdab741d67139 /drivers
parent62c592edead3c3a045662595f7ade3c12f133373 (diff)
parented735ccbefaf7e5e3ef61418f7e209b8c59308a7 (diff)
Merge refs/heads/upstream from master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6.git
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/Kconfig30
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/chelsio/Makefile11
-rw-r--r--drivers/net/chelsio/common.h314
-rw-r--r--drivers/net/chelsio/cphy.h148
-rw-r--r--drivers/net/chelsio/cpl5_cmd.h145
-rw-r--r--drivers/net/chelsio/cxgb2.c1256
-rw-r--r--drivers/net/chelsio/elmer0.h151
-rw-r--r--drivers/net/chelsio/espi.c346
-rw-r--r--drivers/net/chelsio/espi.h68
-rw-r--r--drivers/net/chelsio/gmac.h134
-rw-r--r--drivers/net/chelsio/mv88x201x.c252
-rw-r--r--drivers/net/chelsio/pm3393.c826
-rw-r--r--drivers/net/chelsio/regs.h468
-rw-r--r--drivers/net/chelsio/sge.c1684
-rw-r--r--drivers/net/chelsio/sge.h105
-rw-r--r--drivers/net/chelsio/subr.c812
-rw-r--r--drivers/net/chelsio/suni1x10gexp_regs.h213
-rw-r--r--drivers/net/e100.c241
-rw-r--r--drivers/net/sis190.c1843
-rw-r--r--drivers/net/tulip/Kconfig12
-rw-r--r--drivers/net/tulip/Makefile1
-rw-r--r--drivers/net/tulip/media.c36
-rw-r--r--drivers/net/tulip/timer.c1
-rw-r--r--drivers/net/tulip/tulip.h8
-rw-r--r--drivers/net/tulip/tulip_core.c34
-rw-r--r--drivers/net/tulip/uli526x.c1749
27 files changed, 10800 insertions, 90 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 79e8aa6f2b9e..e0239a10d325 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1923,6 +1923,17 @@ config R8169_VLAN
1923 1923
1924 If in doubt, say Y. 1924 If in doubt, say Y.
1925 1925
1926config SIS190
1927 tristate "SiS190 gigabit ethernet support"
1928 depends on PCI
1929 select CRC32
1930 select MII
1931 ---help---
1932 Say Y here if you have a SiS 190 PCI Gigabit Ethernet adapter.
1933
1934 To compile this driver as a module, choose M here: the module
1935 will be called sis190. This is recommended.
1936
1926config SKGE 1937config SKGE
1927 tristate "New SysKonnect GigaEthernet support (EXPERIMENTAL)" 1938 tristate "New SysKonnect GigaEthernet support (EXPERIMENTAL)"
1928 depends on PCI && EXPERIMENTAL 1939 depends on PCI && EXPERIMENTAL
@@ -2093,6 +2104,25 @@ endmenu
2093menu "Ethernet (10000 Mbit)" 2104menu "Ethernet (10000 Mbit)"
2094 depends on !UML 2105 depends on !UML
2095 2106
2107config CHELSIO_T1
2108 tristate "Chelsio 10Gb Ethernet support"
2109 depends on PCI
2110 help
2111 This driver supports Chelsio N110 and N210 models 10Gb Ethernet
2112 cards. More information about adapter features and performance
2113 tuning is in <file:Documentation/networking/cxgb.txt>.
2114
2115 For general information about Chelsio and our products, visit
2116 our website at <http://www.chelsio.com>.
2117
2118 For customer support, please visit our customer support page at
2119 <http://www.chelsio.com/support.htm>.
2120
2121 Please send feedback to <linux-bugs@chelsio.com>.
2122
2123 To compile this driver as a module, choose M here: the module
2124 will be called cxgb.
2125
2096config IXGB 2126config IXGB
2097 tristate "Intel(R) PRO/10GbE support" 2127 tristate "Intel(R) PRO/10GbE support"
2098 depends on PCI 2128 depends on PCI
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index a369ae284a9a..5baafcd55610 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -9,6 +9,7 @@ endif
9obj-$(CONFIG_E1000) += e1000/ 9obj-$(CONFIG_E1000) += e1000/
10obj-$(CONFIG_IBM_EMAC) += ibm_emac/ 10obj-$(CONFIG_IBM_EMAC) += ibm_emac/
11obj-$(CONFIG_IXGB) += ixgb/ 11obj-$(CONFIG_IXGB) += ixgb/
12obj-$(CONFIG_CHELSIO_T1) += chelsio/
12obj-$(CONFIG_BONDING) += bonding/ 13obj-$(CONFIG_BONDING) += bonding/
13obj-$(CONFIG_GIANFAR) += gianfar_driver.o 14obj-$(CONFIG_GIANFAR) += gianfar_driver.o
14 15
@@ -42,6 +43,7 @@ obj-$(CONFIG_EEPRO100) += eepro100.o
42obj-$(CONFIG_E100) += e100.o 43obj-$(CONFIG_E100) += e100.o
43obj-$(CONFIG_TLAN) += tlan.o 44obj-$(CONFIG_TLAN) += tlan.o
44obj-$(CONFIG_EPIC100) += epic100.o 45obj-$(CONFIG_EPIC100) += epic100.o
46obj-$(CONFIG_SIS190) += sis190.o
45obj-$(CONFIG_SIS900) += sis900.o 47obj-$(CONFIG_SIS900) += sis900.o
46obj-$(CONFIG_YELLOWFIN) += yellowfin.o 48obj-$(CONFIG_YELLOWFIN) += yellowfin.o
47obj-$(CONFIG_ACENIC) += acenic.o 49obj-$(CONFIG_ACENIC) += acenic.o
diff --git a/drivers/net/chelsio/Makefile b/drivers/net/chelsio/Makefile
new file mode 100644
index 000000000000..91e927827c43
--- /dev/null
+++ b/drivers/net/chelsio/Makefile
@@ -0,0 +1,11 @@
1#
2# Chelsio 10Gb NIC driver for Linux.
3#
4
5obj-$(CONFIG_CHELSIO_T1) += cxgb.o
6
7EXTRA_CFLAGS += -I$(TOPDIR)/drivers/net/chelsio $(DEBUG_FLAGS)
8
9
10cxgb-objs := cxgb2.o espi.o pm3393.o sge.o subr.o mv88x201x.o
11
diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h
new file mode 100644
index 000000000000..f09348802b46
--- /dev/null
+++ b/drivers/net/chelsio/common.h
@@ -0,0 +1,314 @@
1/*****************************************************************************
2 * *
3 * File: common.h *
4 * $Revision: 1.21 $ *
5 * $Date: 2005/06/22 00:43:25 $ *
6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#ifndef _CXGB_COMMON_H_
40#define _CXGB_COMMON_H_
41
42#include <linux/config.h>
43#include <linux/module.h>
44#include <linux/netdevice.h>
45#include <linux/types.h>
46#include <linux/delay.h>
47#include <linux/pci.h>
48#include <linux/ethtool.h>
49#include <linux/mii.h>
50#include <linux/crc32.h>
51#include <linux/init.h>
52#include <asm/io.h>
53#include <linux/pci_ids.h>
54
55#define DRV_DESCRIPTION "Chelsio 10Gb Ethernet Driver"
56#define DRV_NAME "cxgb"
57#define DRV_VERSION "2.1.1"
58#define PFX DRV_NAME ": "
59
60#define CH_ERR(fmt, ...) printk(KERN_ERR PFX fmt, ## __VA_ARGS__)
61#define CH_WARN(fmt, ...) printk(KERN_WARNING PFX fmt, ## __VA_ARGS__)
62#define CH_ALERT(fmt, ...) printk(KERN_ALERT PFX fmt, ## __VA_ARGS__)
63
64#define CH_DEVICE(devid, ssid, idx) \
65 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
66
67#define SUPPORTED_PAUSE (1 << 13)
68#define SUPPORTED_LOOPBACK (1 << 15)
69
70#define ADVERTISED_PAUSE (1 << 13)
71#define ADVERTISED_ASYM_PAUSE (1 << 14)
72
73typedef struct adapter adapter_t;
74
75void t1_elmer0_ext_intr(adapter_t *adapter);
76void t1_link_changed(adapter_t *adapter, int port_id, int link_status,
77 int speed, int duplex, int fc);
78
79struct t1_rx_mode {
80 struct net_device *dev;
81 u32 idx;
82 struct dev_mc_list *list;
83};
84
85#define t1_rx_mode_promisc(rm) (rm->dev->flags & IFF_PROMISC)
86#define t1_rx_mode_allmulti(rm) (rm->dev->flags & IFF_ALLMULTI)
87#define t1_rx_mode_mc_cnt(rm) (rm->dev->mc_count)
88
89static inline u8 *t1_get_next_mcaddr(struct t1_rx_mode *rm)
90{
91 u8 *addr = 0;
92
93 if (rm->idx++ < rm->dev->mc_count) {
94 addr = rm->list->dmi_addr;
95 rm->list = rm->list->next;
96 }
97 return addr;
98}
99
100#define MAX_NPORTS 4
101
102#define SPEED_INVALID 0xffff
103#define DUPLEX_INVALID 0xff
104
105enum {
106 CHBT_BOARD_N110,
107 CHBT_BOARD_N210
108};
109
110enum {
111 CHBT_TERM_T1,
112 CHBT_TERM_T2
113};
114
115enum {
116 CHBT_MAC_PM3393,
117};
118
119enum {
120 CHBT_PHY_88X2010,
121};
122
123enum {
124 PAUSE_RX = 1 << 0,
125 PAUSE_TX = 1 << 1,
126 PAUSE_AUTONEG = 1 << 2
127};
128
129/* Revisions of T1 chip */
130enum {
131 TERM_T1A = 0,
132 TERM_T1B = 1,
133 TERM_T2 = 3
134};
135
136struct sge_params {
137 unsigned int cmdQ_size[2];
138 unsigned int freelQ_size[2];
139 unsigned int large_buf_capacity;
140 unsigned int rx_coalesce_usecs;
141 unsigned int last_rx_coalesce_raw;
142 unsigned int default_rx_coalesce_usecs;
143 unsigned int sample_interval_usecs;
144 unsigned int coalesce_enable;
145 unsigned int polling;
146};
147
148struct chelsio_pci_params {
149 unsigned short speed;
150 unsigned char width;
151 unsigned char is_pcix;
152};
153
154struct adapter_params {
155 struct sge_params sge;
156 struct chelsio_pci_params pci;
157
158 const struct board_info *brd_info;
159
160 unsigned int nports; /* # of ethernet ports */
161 unsigned int stats_update_period;
162 unsigned short chip_revision;
163 unsigned char chip_version;
164};
165
166struct link_config {
167 unsigned int supported; /* link capabilities */
168 unsigned int advertising; /* advertised capabilities */
169 unsigned short requested_speed; /* speed user has requested */
170 unsigned short speed; /* actual link speed */
171 unsigned char requested_duplex; /* duplex user has requested */
172 unsigned char duplex; /* actual link duplex */
173 unsigned char requested_fc; /* flow control user has requested */
174 unsigned char fc; /* actual link flow control */
175 unsigned char autoneg; /* autonegotiating? */
176};
177
178struct cmac;
179struct cphy;
180
181struct port_info {
182 struct net_device *dev;
183 struct cmac *mac;
184 struct cphy *phy;
185 struct link_config link_config;
186 struct net_device_stats netstats;
187};
188
189struct sge;
190struct peespi;
191
192struct adapter {
193 u8 *regs;
194 struct pci_dev *pdev;
195 unsigned long registered_device_map;
196 unsigned long open_device_map;
197 unsigned long flags;
198
199 const char *name;
200 int msg_enable;
201 u32 mmio_len;
202
203 struct work_struct ext_intr_handler_task;
204 struct adapter_params params;
205
206 struct vlan_group *vlan_grp;
207
208 /* Terminator modules. */
209 struct sge *sge;
210 struct peespi *espi;
211
212 struct port_info port[MAX_NPORTS];
213 struct work_struct stats_update_task;
214 struct timer_list stats_update_timer;
215
216 struct semaphore mib_mutex;
217 spinlock_t tpi_lock;
218 spinlock_t work_lock;
219 /* guards async operations */
220 spinlock_t async_lock ____cacheline_aligned;
221 u32 slow_intr_mask;
222};
223
224enum { /* adapter flags */
225 FULL_INIT_DONE = 1 << 0,
226 TSO_CAPABLE = 1 << 2,
227 TCP_CSUM_CAPABLE = 1 << 3,
228 UDP_CSUM_CAPABLE = 1 << 4,
229 VLAN_ACCEL_CAPABLE = 1 << 5,
230 RX_CSUM_ENABLED = 1 << 6,
231};
232
233struct mdio_ops;
234struct gmac;
235struct gphy;
236
237struct board_info {
238 unsigned char board;
239 unsigned char port_number;
240 unsigned long caps;
241 unsigned char chip_term;
242 unsigned char chip_mac;
243 unsigned char chip_phy;
244 unsigned int clock_core;
245 unsigned int clock_mc3;
246 unsigned int clock_mc4;
247 unsigned int espi_nports;
248 unsigned int clock_cspi;
249 unsigned int clock_elmer0;
250 unsigned char mdio_mdien;
251 unsigned char mdio_mdiinv;
252 unsigned char mdio_mdc;
253 unsigned char mdio_phybaseaddr;
254 struct gmac *gmac;
255 struct gphy *gphy;
256 struct mdio_ops *mdio_ops;
257 const char *desc;
258};
259
260extern struct pci_device_id t1_pci_tbl[];
261
262static inline int adapter_matches_type(const adapter_t *adapter,
263 int version, int revision)
264{
265 return adapter->params.chip_version == version &&
266 adapter->params.chip_revision == revision;
267}
268
269#define t1_is_T1B(adap) adapter_matches_type(adap, CHBT_TERM_T1, TERM_T1B)
270#define is_T2(adap) adapter_matches_type(adap, CHBT_TERM_T2, TERM_T2)
271
272/* Returns true if an adapter supports VLAN acceleration and TSO */
273static inline int vlan_tso_capable(const adapter_t *adapter)
274{
275 return !t1_is_T1B(adapter);
276}
277
278#define for_each_port(adapter, iter) \
279 for (iter = 0; iter < (adapter)->params.nports; ++iter)
280
281#define board_info(adapter) ((adapter)->params.brd_info)
282#define is_10G(adapter) (board_info(adapter)->caps & SUPPORTED_10000baseT_Full)
283
284static inline unsigned int core_ticks_per_usec(const adapter_t *adap)
285{
286 return board_info(adap)->clock_core / 1000000;
287}
288
289extern int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
290extern int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *value);
291
292extern void t1_interrupts_enable(adapter_t *adapter);
293extern void t1_interrupts_disable(adapter_t *adapter);
294extern void t1_interrupts_clear(adapter_t *adapter);
295extern int elmer0_ext_intr_handler(adapter_t *adapter);
296extern int t1_slow_intr_handler(adapter_t *adapter);
297
298extern int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
299extern const struct board_info *t1_get_board_info(unsigned int board_id);
300extern const struct board_info *t1_get_board_info_from_ids(unsigned int devid,
301 unsigned short ssid);
302extern int t1_seeprom_read(adapter_t *adapter, u32 addr, u32 *data);
303extern int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
304 struct adapter_params *p);
305extern int t1_init_hw_modules(adapter_t *adapter);
306extern int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi);
307extern void t1_free_sw_modules(adapter_t *adapter);
308extern void t1_fatal_err(adapter_t *adapter);
309
310extern void t1_tp_set_udp_checksum_offload(adapter_t *adapter, int enable);
311extern void t1_tp_set_tcp_checksum_offload(adapter_t *adapter, int enable);
312extern void t1_tp_set_ip_checksum_offload(adapter_t *adapter, int enable);
313
314#endif /* _CXGB_COMMON_H_ */
diff --git a/drivers/net/chelsio/cphy.h b/drivers/net/chelsio/cphy.h
new file mode 100644
index 000000000000..3412342f7345
--- /dev/null
+++ b/drivers/net/chelsio/cphy.h
@@ -0,0 +1,148 @@
1/*****************************************************************************
2 * *
3 * File: cphy.h *
4 * $Revision: 1.7 $ *
5 * $Date: 2005/06/21 18:29:47 $ *
6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#ifndef _CXGB_CPHY_H_
40#define _CXGB_CPHY_H_
41
42#include "common.h"
43
44struct mdio_ops {
45 void (*init)(adapter_t *adapter, const struct board_info *bi);
46 int (*read)(adapter_t *adapter, int phy_addr, int mmd_addr,
47 int reg_addr, unsigned int *val);
48 int (*write)(adapter_t *adapter, int phy_addr, int mmd_addr,
49 int reg_addr, unsigned int val);
50};
51
52/* PHY interrupt types */
53enum {
54 cphy_cause_link_change = 0x1,
55 cphy_cause_error = 0x2
56};
57
58struct cphy;
59
60/* PHY operations */
61struct cphy_ops {
62 void (*destroy)(struct cphy *);
63 int (*reset)(struct cphy *, int wait);
64
65 int (*interrupt_enable)(struct cphy *);
66 int (*interrupt_disable)(struct cphy *);
67 int (*interrupt_clear)(struct cphy *);
68 int (*interrupt_handler)(struct cphy *);
69
70 int (*autoneg_enable)(struct cphy *);
71 int (*autoneg_disable)(struct cphy *);
72 int (*autoneg_restart)(struct cphy *);
73
74 int (*advertise)(struct cphy *phy, unsigned int advertise_map);
75 int (*set_loopback)(struct cphy *, int on);
76 int (*set_speed_duplex)(struct cphy *phy, int speed, int duplex);
77 int (*get_link_status)(struct cphy *phy, int *link_ok, int *speed,
78 int *duplex, int *fc);
79};
80
81/* A PHY instance */
82struct cphy {
83 int addr; /* PHY address */
84 adapter_t *adapter; /* associated adapter */
85 struct cphy_ops *ops; /* PHY operations */
86 int (*mdio_read)(adapter_t *adapter, int phy_addr, int mmd_addr,
87 int reg_addr, unsigned int *val);
88 int (*mdio_write)(adapter_t *adapter, int phy_addr, int mmd_addr,
89 int reg_addr, unsigned int val);
90 struct cphy_instance *instance;
91};
92
93/* Convenience MDIO read/write wrappers */
94static inline int mdio_read(struct cphy *cphy, int mmd, int reg,
95 unsigned int *valp)
96{
97 return cphy->mdio_read(cphy->adapter, cphy->addr, mmd, reg, valp);
98}
99
100static inline int mdio_write(struct cphy *cphy, int mmd, int reg,
101 unsigned int val)
102{
103 return cphy->mdio_write(cphy->adapter, cphy->addr, mmd, reg, val);
104}
105
106static inline int simple_mdio_read(struct cphy *cphy, int reg,
107 unsigned int *valp)
108{
109 return mdio_read(cphy, 0, reg, valp);
110}
111
112static inline int simple_mdio_write(struct cphy *cphy, int reg,
113 unsigned int val)
114{
115 return mdio_write(cphy, 0, reg, val);
116}
117
118/* Convenience initializer */
119static inline void cphy_init(struct cphy *phy, adapter_t *adapter,
120 int phy_addr, struct cphy_ops *phy_ops,
121 struct mdio_ops *mdio_ops)
122{
123 phy->adapter = adapter;
124 phy->addr = phy_addr;
125 phy->ops = phy_ops;
126 if (mdio_ops) {
127 phy->mdio_read = mdio_ops->read;
128 phy->mdio_write = mdio_ops->write;
129 }
130}
131
132/* Operations of the PHY-instance factory */
133struct gphy {
134 /* Construct a PHY instance with the given PHY address */
135 struct cphy *(*create)(adapter_t *adapter, int phy_addr,
136 struct mdio_ops *mdio_ops);
137
138 /*
139 * Reset the PHY chip. This resets the whole PHY chip, not individual
140 * ports.
141 */
142 int (*reset)(adapter_t *adapter);
143};
144
145extern struct gphy t1_mv88x201x_ops;
146extern struct gphy t1_dummy_phy_ops;
147
148#endif /* _CXGB_CPHY_H_ */
diff --git a/drivers/net/chelsio/cpl5_cmd.h b/drivers/net/chelsio/cpl5_cmd.h
new file mode 100644
index 000000000000..27925e487bcf
--- /dev/null
+++ b/drivers/net/chelsio/cpl5_cmd.h
@@ -0,0 +1,145 @@
1/*****************************************************************************
2 * *
3 * File: cpl5_cmd.h *
4 * $Revision: 1.6 $ *
5 * $Date: 2005/06/21 18:29:47 $ *
6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#ifndef _CXGB_CPL5_CMD_H_
40#define _CXGB_CPL5_CMD_H_
41
42#include <asm/byteorder.h>
43
44#if !defined(__LITTLE_ENDIAN_BITFIELD) && !defined(__BIG_ENDIAN_BITFIELD)
45#error "Adjust your <asm/byteorder.h> defines"
46#endif
47
48enum CPL_opcode {
49 CPL_RX_PKT = 0xAD,
50 CPL_TX_PKT = 0xB2,
51 CPL_TX_PKT_LSO = 0xB6,
52};
53
54enum { /* TX_PKT_LSO ethernet types */
55 CPL_ETH_II,
56 CPL_ETH_II_VLAN,
57 CPL_ETH_802_3,
58 CPL_ETH_802_3_VLAN
59};
60
61struct cpl_rx_data {
62 u32 rsvd0;
63 u32 len;
64 u32 seq;
65 u16 urg;
66 u8 rsvd1;
67 u8 status;
68};
69
70/*
71 * We want this header's alignment to be no more stringent than 2-byte aligned.
72 * All fields are u8 or u16 except for the length. However that field is not
73 * used so we break it into 2 16-bit parts to easily meet our alignment needs.
74 */
75struct cpl_tx_pkt {
76 u8 opcode;
77#if defined(__LITTLE_ENDIAN_BITFIELD)
78 u8 iff:4;
79 u8 ip_csum_dis:1;
80 u8 l4_csum_dis:1;
81 u8 vlan_valid:1;
82 u8 rsvd:1;
83#else
84 u8 rsvd:1;
85 u8 vlan_valid:1;
86 u8 l4_csum_dis:1;
87 u8 ip_csum_dis:1;
88 u8 iff:4;
89#endif
90 u16 vlan;
91 u16 len_hi;
92 u16 len_lo;
93};
94
95struct cpl_tx_pkt_lso {
96 u8 opcode;
97#if defined(__LITTLE_ENDIAN_BITFIELD)
98 u8 iff:4;
99 u8 ip_csum_dis:1;
100 u8 l4_csum_dis:1;
101 u8 vlan_valid:1;
102 u8 rsvd:1;
103#else
104 u8 rsvd:1;
105 u8 vlan_valid:1;
106 u8 l4_csum_dis:1;
107 u8 ip_csum_dis:1;
108 u8 iff:4;
109#endif
110 u16 vlan;
111 u32 len;
112
113 u32 rsvd2;
114 u8 rsvd3;
115#if defined(__LITTLE_ENDIAN_BITFIELD)
116 u8 tcp_hdr_words:4;
117 u8 ip_hdr_words:4;
118#else
119 u8 ip_hdr_words:4;
120 u8 tcp_hdr_words:4;
121#endif
122 u16 eth_type_mss;
123};
124
125struct cpl_rx_pkt {
126 u8 opcode;
127#if defined(__LITTLE_ENDIAN_BITFIELD)
128 u8 iff:4;
129 u8 csum_valid:1;
130 u8 bad_pkt:1;
131 u8 vlan_valid:1;
132 u8 rsvd:1;
133#else
134 u8 rsvd:1;
135 u8 vlan_valid:1;
136 u8 bad_pkt:1;
137 u8 csum_valid:1;
138 u8 iff:4;
139#endif
140 u16 csum;
141 u16 vlan;
142 u16 len;
143};
144
145#endif /* _CXGB_CPL5_CMD_H_ */
diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c
new file mode 100644
index 000000000000..28ae478b386d
--- /dev/null
+++ b/drivers/net/chelsio/cxgb2.c
@@ -0,0 +1,1256 @@
1/*****************************************************************************
2 * *
3 * File: cxgb2.c *
4 * $Revision: 1.25 $ *
5 * $Date: 2005/06/22 00:43:25 $ *
6 * Description: *
7 * Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#include "common.h"
40#include <linux/config.h>
41#include <linux/module.h>
42#include <linux/init.h>
43#include <linux/pci.h>
44#include <linux/netdevice.h>
45#include <linux/etherdevice.h>
46#include <linux/if_vlan.h>
47#include <linux/mii.h>
48#include <linux/sockios.h>
49#include <linux/proc_fs.h>
50#include <linux/dma-mapping.h>
51#include <asm/uaccess.h>
52
53#include "cpl5_cmd.h"
54#include "regs.h"
55#include "gmac.h"
56#include "cphy.h"
57#include "sge.h"
58#include "espi.h"
59
60#ifdef work_struct
61#include <linux/tqueue.h>
62#define INIT_WORK INIT_TQUEUE
63#define schedule_work schedule_task
64#define flush_scheduled_work flush_scheduled_tasks
65
66static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
67{
68 mod_timer(&ap->stats_update_timer, jiffies + secs * HZ);
69}
70
71static inline void cancel_mac_stats_update(struct adapter *ap)
72{
73 del_timer_sync(&ap->stats_update_timer);
74 flush_scheduled_tasks();
75}
76
77/*
78 * Stats update timer for 2.4. It schedules a task to do the actual update as
79 * we need to access MAC statistics in process context.
80 */
81static void mac_stats_timer(unsigned long data)
82{
83 struct adapter *ap = (struct adapter *)data;
84
85 schedule_task(&ap->stats_update_task);
86}
87#else
88#include <linux/workqueue.h>
89
90static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
91{
92 schedule_delayed_work(&ap->stats_update_task, secs * HZ);
93}
94
95static inline void cancel_mac_stats_update(struct adapter *ap)
96{
97 cancel_delayed_work(&ap->stats_update_task);
98}
99#endif
100
101#define MAX_CMDQ_ENTRIES 16384
102#define MAX_CMDQ1_ENTRIES 1024
103#define MAX_RX_BUFFERS 16384
104#define MAX_RX_JUMBO_BUFFERS 16384
105#define MAX_TX_BUFFERS_HIGH 16384U
106#define MAX_TX_BUFFERS_LOW 1536U
107#define MIN_FL_ENTRIES 32
108
109#define PORT_MASK ((1 << MAX_NPORTS) - 1)
110
111#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
112 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
113 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
114
115/*
116 * The EEPROM is actually bigger but only the first few bytes are used so we
117 * only report those.
118 */
119#define EEPROM_SIZE 32
120
121MODULE_DESCRIPTION(DRV_DESCRIPTION);
122MODULE_AUTHOR("Chelsio Communications");
123MODULE_LICENSE("GPL");
124
125static int dflt_msg_enable = DFLT_MSG_ENABLE;
126
127MODULE_PARM(dflt_msg_enable, "i");
128MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 message enable bitmap");
129
130
131static const char pci_speed[][4] = {
132 "33", "66", "100", "133"
133};
134
135/*
136 * Setup MAC to receive the types of packets we want.
137 */
138static void t1_set_rxmode(struct net_device *dev)
139{
140 struct adapter *adapter = dev->priv;
141 struct cmac *mac = adapter->port[dev->if_port].mac;
142 struct t1_rx_mode rm;
143
144 rm.dev = dev;
145 rm.idx = 0;
146 rm.list = dev->mc_list;
147 mac->ops->set_rx_mode(mac, &rm);
148}
149
150static void link_report(struct port_info *p)
151{
152 if (!netif_carrier_ok(p->dev))
153 printk(KERN_INFO "%s: link down\n", p->dev->name);
154 else {
155 const char *s = "10Mbps";
156
157 switch (p->link_config.speed) {
158 case SPEED_10000: s = "10Gbps"; break;
159 case SPEED_1000: s = "1000Mbps"; break;
160 case SPEED_100: s = "100Mbps"; break;
161 }
162
163 printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
164 p->dev->name, s,
165 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
166 }
167}
168
169void t1_link_changed(struct adapter *adapter, int port_id, int link_stat,
170 int speed, int duplex, int pause)
171{
172 struct port_info *p = &adapter->port[port_id];
173
174 if (link_stat != netif_carrier_ok(p->dev)) {
175 if (link_stat)
176 netif_carrier_on(p->dev);
177 else
178 netif_carrier_off(p->dev);
179 link_report(p);
180
181 }
182}
183
184static void link_start(struct port_info *p)
185{
186 struct cmac *mac = p->mac;
187
188 mac->ops->reset(mac);
189 if (mac->ops->macaddress_set)
190 mac->ops->macaddress_set(mac, p->dev->dev_addr);
191 t1_set_rxmode(p->dev);
192 t1_link_start(p->phy, mac, &p->link_config);
193 mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
194}
195
196static void enable_hw_csum(struct adapter *adapter)
197{
198 if (adapter->flags & TSO_CAPABLE)
199 t1_tp_set_ip_checksum_offload(adapter, 1); /* for TSO only */
200 t1_tp_set_tcp_checksum_offload(adapter, 1);
201}
202
203/*
204 * Things to do upon first use of a card.
205 * This must run with the rtnl lock held.
206 */
207static int cxgb_up(struct adapter *adapter)
208{
209 int err = 0;
210
211 if (!(adapter->flags & FULL_INIT_DONE)) {
212 err = t1_init_hw_modules(adapter);
213 if (err)
214 goto out_err;
215
216 enable_hw_csum(adapter);
217 adapter->flags |= FULL_INIT_DONE;
218 }
219
220 t1_interrupts_clear(adapter);
221 if ((err = request_irq(adapter->pdev->irq,
222 t1_select_intr_handler(adapter), SA_SHIRQ,
223 adapter->name, adapter))) {
224 goto out_err;
225 }
226 t1_sge_start(adapter->sge);
227 t1_interrupts_enable(adapter);
228 out_err:
229 return err;
230}
231
232/*
233 * Release resources when all the ports have been stopped.
234 */
235static void cxgb_down(struct adapter *adapter)
236{
237 t1_sge_stop(adapter->sge);
238 t1_interrupts_disable(adapter);
239 free_irq(adapter->pdev->irq, adapter);
240}
241
242static int cxgb_open(struct net_device *dev)
243{
244 int err;
245 struct adapter *adapter = dev->priv;
246 int other_ports = adapter->open_device_map & PORT_MASK;
247
248 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
249 return err;
250
251 __set_bit(dev->if_port, &adapter->open_device_map);
252 link_start(&adapter->port[dev->if_port]);
253 netif_start_queue(dev);
254 if (!other_ports && adapter->params.stats_update_period)
255 schedule_mac_stats_update(adapter,
256 adapter->params.stats_update_period);
257 return 0;
258}
259
260static int cxgb_close(struct net_device *dev)
261{
262 struct adapter *adapter = dev->priv;
263 struct port_info *p = &adapter->port[dev->if_port];
264 struct cmac *mac = p->mac;
265
266 netif_stop_queue(dev);
267 mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
268 netif_carrier_off(dev);
269
270 clear_bit(dev->if_port, &adapter->open_device_map);
271 if (adapter->params.stats_update_period &&
272 !(adapter->open_device_map & PORT_MASK)) {
273 /* Stop statistics accumulation. */
274 smp_mb__after_clear_bit();
275 spin_lock(&adapter->work_lock); /* sync with update task */
276 spin_unlock(&adapter->work_lock);
277 cancel_mac_stats_update(adapter);
278 }
279
280 if (!adapter->open_device_map)
281 cxgb_down(adapter);
282 return 0;
283}
284
285static struct net_device_stats *t1_get_stats(struct net_device *dev)
286{
287 struct adapter *adapter = dev->priv;
288 struct port_info *p = &adapter->port[dev->if_port];
289 struct net_device_stats *ns = &p->netstats;
290 const struct cmac_statistics *pstats;
291
292 /* Do a full update of the MAC stats */
293 pstats = p->mac->ops->statistics_update(p->mac,
294 MAC_STATS_UPDATE_FULL);
295
296 ns->tx_packets = pstats->TxUnicastFramesOK +
297 pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
298
299 ns->rx_packets = pstats->RxUnicastFramesOK +
300 pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
301
302 ns->tx_bytes = pstats->TxOctetsOK;
303 ns->rx_bytes = pstats->RxOctetsOK;
304
305 ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
306 pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
307 ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
308 pstats->RxFCSErrors + pstats->RxAlignErrors +
309 pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
310 pstats->RxSymbolErrors + pstats->RxRuntErrors;
311
312 ns->multicast = pstats->RxMulticastFramesOK;
313 ns->collisions = pstats->TxTotalCollisions;
314
315 /* detailed rx_errors */
316 ns->rx_length_errors = pstats->RxFrameTooLongErrors +
317 pstats->RxJabberErrors;
318 ns->rx_over_errors = 0;
319 ns->rx_crc_errors = pstats->RxFCSErrors;
320 ns->rx_frame_errors = pstats->RxAlignErrors;
321 ns->rx_fifo_errors = 0;
322 ns->rx_missed_errors = 0;
323
324 /* detailed tx_errors */
325 ns->tx_aborted_errors = pstats->TxFramesAbortedDueToXSCollisions;
326 ns->tx_carrier_errors = 0;
327 ns->tx_fifo_errors = pstats->TxUnderrun;
328 ns->tx_heartbeat_errors = 0;
329 ns->tx_window_errors = pstats->TxLateCollisions;
330 return ns;
331}
332
333static u32 get_msglevel(struct net_device *dev)
334{
335 struct adapter *adapter = dev->priv;
336
337 return adapter->msg_enable;
338}
339
340static void set_msglevel(struct net_device *dev, u32 val)
341{
342 struct adapter *adapter = dev->priv;
343
344 adapter->msg_enable = val;
345}
346
347static char stats_strings[][ETH_GSTRING_LEN] = {
348 "TxOctetsOK",
349 "TxOctetsBad",
350 "TxUnicastFramesOK",
351 "TxMulticastFramesOK",
352 "TxBroadcastFramesOK",
353 "TxPauseFrames",
354 "TxFramesWithDeferredXmissions",
355 "TxLateCollisions",
356 "TxTotalCollisions",
357 "TxFramesAbortedDueToXSCollisions",
358 "TxUnderrun",
359 "TxLengthErrors",
360 "TxInternalMACXmitError",
361 "TxFramesWithExcessiveDeferral",
362 "TxFCSErrors",
363
364 "RxOctetsOK",
365 "RxOctetsBad",
366 "RxUnicastFramesOK",
367 "RxMulticastFramesOK",
368 "RxBroadcastFramesOK",
369 "RxPauseFrames",
370 "RxFCSErrors",
371 "RxAlignErrors",
372 "RxSymbolErrors",
373 "RxDataErrors",
374 "RxSequenceErrors",
375 "RxRuntErrors",
376 "RxJabberErrors",
377 "RxInternalMACRcvError",
378 "RxInRangeLengthErrors",
379 "RxOutOfRangeLengthField",
380 "RxFrameTooLongErrors",
381
382 "TSO",
383 "VLANextractions",
384 "VLANinsertions",
385 "RxCsumGood",
386 "TxCsumOffload",
387 "RxDrops"
388
389 "respQ_empty",
390 "respQ_overflow",
391 "freelistQ_empty",
392 "pkt_too_big",
393 "pkt_mismatch",
394 "cmdQ_full0",
395 "cmdQ_full1",
396 "tx_ipfrags",
397 "tx_reg_pkts",
398 "tx_lso_pkts",
399 "tx_do_cksum",
400
401 "espi_DIP2ParityErr",
402 "espi_DIP4Err",
403 "espi_RxDrops",
404 "espi_TxDrops",
405 "espi_RxOvfl",
406 "espi_ParityErr"
407};
408
409#define T2_REGMAP_SIZE (3 * 1024)
410
411static int get_regs_len(struct net_device *dev)
412{
413 return T2_REGMAP_SIZE;
414}
415
416static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
417{
418 struct adapter *adapter = dev->priv;
419
420 strcpy(info->driver, DRV_NAME);
421 strcpy(info->version, DRV_VERSION);
422 strcpy(info->fw_version, "N/A");
423 strcpy(info->bus_info, pci_name(adapter->pdev));
424}
425
426static int get_stats_count(struct net_device *dev)
427{
428 return ARRAY_SIZE(stats_strings);
429}
430
431static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
432{
433 if (stringset == ETH_SS_STATS)
434 memcpy(data, stats_strings, sizeof(stats_strings));
435}
436
437static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
438 u64 *data)
439{
440 struct adapter *adapter = dev->priv;
441 struct cmac *mac = adapter->port[dev->if_port].mac;
442 const struct cmac_statistics *s;
443 const struct sge_port_stats *ss;
444 const struct sge_intr_counts *t;
445
446 s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
447 ss = t1_sge_get_port_stats(adapter->sge, dev->if_port);
448 t = t1_sge_get_intr_counts(adapter->sge);
449
450 *data++ = s->TxOctetsOK;
451 *data++ = s->TxOctetsBad;
452 *data++ = s->TxUnicastFramesOK;
453 *data++ = s->TxMulticastFramesOK;
454 *data++ = s->TxBroadcastFramesOK;
455 *data++ = s->TxPauseFrames;
456 *data++ = s->TxFramesWithDeferredXmissions;
457 *data++ = s->TxLateCollisions;
458 *data++ = s->TxTotalCollisions;
459 *data++ = s->TxFramesAbortedDueToXSCollisions;
460 *data++ = s->TxUnderrun;
461 *data++ = s->TxLengthErrors;
462 *data++ = s->TxInternalMACXmitError;
463 *data++ = s->TxFramesWithExcessiveDeferral;
464 *data++ = s->TxFCSErrors;
465
466 *data++ = s->RxOctetsOK;
467 *data++ = s->RxOctetsBad;
468 *data++ = s->RxUnicastFramesOK;
469 *data++ = s->RxMulticastFramesOK;
470 *data++ = s->RxBroadcastFramesOK;
471 *data++ = s->RxPauseFrames;
472 *data++ = s->RxFCSErrors;
473 *data++ = s->RxAlignErrors;
474 *data++ = s->RxSymbolErrors;
475 *data++ = s->RxDataErrors;
476 *data++ = s->RxSequenceErrors;
477 *data++ = s->RxRuntErrors;
478 *data++ = s->RxJabberErrors;
479 *data++ = s->RxInternalMACRcvError;
480 *data++ = s->RxInRangeLengthErrors;
481 *data++ = s->RxOutOfRangeLengthField;
482 *data++ = s->RxFrameTooLongErrors;
483
484 *data++ = ss->tso;
485 *data++ = ss->vlan_xtract;
486 *data++ = ss->vlan_insert;
487 *data++ = ss->rx_cso_good;
488 *data++ = ss->tx_cso;
489 *data++ = ss->rx_drops;
490
491 *data++ = (u64)t->respQ_empty;
492 *data++ = (u64)t->respQ_overflow;
493 *data++ = (u64)t->freelistQ_empty;
494 *data++ = (u64)t->pkt_too_big;
495 *data++ = (u64)t->pkt_mismatch;
496 *data++ = (u64)t->cmdQ_full[0];
497 *data++ = (u64)t->cmdQ_full[1];
498 *data++ = (u64)t->tx_ipfrags;
499 *data++ = (u64)t->tx_reg_pkts;
500 *data++ = (u64)t->tx_lso_pkts;
501 *data++ = (u64)t->tx_do_cksum;
502}
503
504static inline void reg_block_dump(struct adapter *ap, void *buf,
505 unsigned int start, unsigned int end)
506{
507 u32 *p = buf + start;
508
509 for ( ; start <= end; start += sizeof(u32))
510 *p++ = readl(ap->regs + start);
511}
512
513static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
514 void *buf)
515{
516 struct adapter *ap = dev->priv;
517
518 /*
519 * Version scheme: bits 0..9: chip version, bits 10..15: chip revision
520 */
521 regs->version = 2;
522
523 memset(buf, 0, T2_REGMAP_SIZE);
524 reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
525}
526
527static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
528{
529 struct adapter *adapter = dev->priv;
530 struct port_info *p = &adapter->port[dev->if_port];
531
532 cmd->supported = p->link_config.supported;
533 cmd->advertising = p->link_config.advertising;
534
535 if (netif_carrier_ok(dev)) {
536 cmd->speed = p->link_config.speed;
537 cmd->duplex = p->link_config.duplex;
538 } else {
539 cmd->speed = -1;
540 cmd->duplex = -1;
541 }
542
543 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
544 cmd->phy_address = p->phy->addr;
545 cmd->transceiver = XCVR_EXTERNAL;
546 cmd->autoneg = p->link_config.autoneg;
547 cmd->maxtxpkt = 0;
548 cmd->maxrxpkt = 0;
549 return 0;
550}
551
552static int speed_duplex_to_caps(int speed, int duplex)
553{
554 int cap = 0;
555
556 switch (speed) {
557 case SPEED_10:
558 if (duplex == DUPLEX_FULL)
559 cap = SUPPORTED_10baseT_Full;
560 else
561 cap = SUPPORTED_10baseT_Half;
562 break;
563 case SPEED_100:
564 if (duplex == DUPLEX_FULL)
565 cap = SUPPORTED_100baseT_Full;
566 else
567 cap = SUPPORTED_100baseT_Half;
568 break;
569 case SPEED_1000:
570 if (duplex == DUPLEX_FULL)
571 cap = SUPPORTED_1000baseT_Full;
572 else
573 cap = SUPPORTED_1000baseT_Half;
574 break;
575 case SPEED_10000:
576 if (duplex == DUPLEX_FULL)
577 cap = SUPPORTED_10000baseT_Full;
578 }
579 return cap;
580}
581
582#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
583 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
584 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
585 ADVERTISED_10000baseT_Full)
586
587static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
588{
589 struct adapter *adapter = dev->priv;
590 struct port_info *p = &adapter->port[dev->if_port];
591 struct link_config *lc = &p->link_config;
592
593 if (!(lc->supported & SUPPORTED_Autoneg))
594 return -EOPNOTSUPP; /* can't change speed/duplex */
595
596 if (cmd->autoneg == AUTONEG_DISABLE) {
597 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
598
599 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
600 return -EINVAL;
601 lc->requested_speed = cmd->speed;
602 lc->requested_duplex = cmd->duplex;
603 lc->advertising = 0;
604 } else {
605 cmd->advertising &= ADVERTISED_MASK;
606 if (cmd->advertising & (cmd->advertising - 1))
607 cmd->advertising = lc->supported;
608 cmd->advertising &= lc->supported;
609 if (!cmd->advertising)
610 return -EINVAL;
611 lc->requested_speed = SPEED_INVALID;
612 lc->requested_duplex = DUPLEX_INVALID;
613 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
614 }
615 lc->autoneg = cmd->autoneg;
616 if (netif_running(dev))
617 t1_link_start(p->phy, p->mac, lc);
618 return 0;
619}
620
621static void get_pauseparam(struct net_device *dev,
622 struct ethtool_pauseparam *epause)
623{
624 struct adapter *adapter = dev->priv;
625 struct port_info *p = &adapter->port[dev->if_port];
626
627 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
628 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
629 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
630}
631
632static int set_pauseparam(struct net_device *dev,
633 struct ethtool_pauseparam *epause)
634{
635 struct adapter *adapter = dev->priv;
636 struct port_info *p = &adapter->port[dev->if_port];
637 struct link_config *lc = &p->link_config;
638
639 if (epause->autoneg == AUTONEG_DISABLE)
640 lc->requested_fc = 0;
641 else if (lc->supported & SUPPORTED_Autoneg)
642 lc->requested_fc = PAUSE_AUTONEG;
643 else
644 return -EINVAL;
645
646 if (epause->rx_pause)
647 lc->requested_fc |= PAUSE_RX;
648 if (epause->tx_pause)
649 lc->requested_fc |= PAUSE_TX;
650 if (lc->autoneg == AUTONEG_ENABLE) {
651 if (netif_running(dev))
652 t1_link_start(p->phy, p->mac, lc);
653 } else {
654 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
655 if (netif_running(dev))
656 p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
657 lc->fc);
658 }
659 return 0;
660}
661
662static u32 get_rx_csum(struct net_device *dev)
663{
664 struct adapter *adapter = dev->priv;
665
666 return (adapter->flags & RX_CSUM_ENABLED) != 0;
667}
668
669static int set_rx_csum(struct net_device *dev, u32 data)
670{
671 struct adapter *adapter = dev->priv;
672
673 if (data)
674 adapter->flags |= RX_CSUM_ENABLED;
675 else
676 adapter->flags &= ~RX_CSUM_ENABLED;
677 return 0;
678}
679
680static int set_tso(struct net_device *dev, u32 value)
681{
682 struct adapter *adapter = dev->priv;
683
684 if (!(adapter->flags & TSO_CAPABLE))
685 return value ? -EOPNOTSUPP : 0;
686 return ethtool_op_set_tso(dev, value);
687}
688
689static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
690{
691 struct adapter *adapter = dev->priv;
692 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
693
694 e->rx_max_pending = MAX_RX_BUFFERS;
695 e->rx_mini_max_pending = 0;
696 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
697 e->tx_max_pending = MAX_CMDQ_ENTRIES;
698
699 e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
700 e->rx_mini_pending = 0;
701 e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
702 e->tx_pending = adapter->params.sge.cmdQ_size[0];
703}
704
705static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
706{
707 struct adapter *adapter = dev->priv;
708 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
709
710 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
711 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
712 e->tx_pending > MAX_CMDQ_ENTRIES ||
713 e->rx_pending < MIN_FL_ENTRIES ||
714 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
715 e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
716 return -EINVAL;
717
718 if (adapter->flags & FULL_INIT_DONE)
719 return -EBUSY;
720
721 adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
722 adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
723 adapter->params.sge.cmdQ_size[0] = e->tx_pending;
724 adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
725 MAX_CMDQ1_ENTRIES : e->tx_pending;
726 return 0;
727}
728
729static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
730{
731 struct adapter *adapter = dev->priv;
732
733 /*
734 * If RX coalescing is requested we use NAPI, otherwise interrupts.
735 * This choice can be made only when all ports and the TOE are off.
736 */
737 if (adapter->open_device_map == 0)
738 adapter->params.sge.polling = c->use_adaptive_rx_coalesce;
739
740 if (adapter->params.sge.polling) {
741 adapter->params.sge.rx_coalesce_usecs = 0;
742 } else {
743 adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
744 }
745 adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
746 adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
747 t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
748 return 0;
749}
750
751static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
752{
753 struct adapter *adapter = dev->priv;
754
755 c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
756 c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
757 c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
758 return 0;
759}
760
761static int get_eeprom_len(struct net_device *dev)
762{
763 return EEPROM_SIZE;
764}
765
766#define EEPROM_MAGIC(ap) \
767 (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
768
769static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
770 u8 *data)
771{
772 int i;
773 u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
774 struct adapter *adapter = dev->priv;
775
776 e->magic = EEPROM_MAGIC(adapter);
777 for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
778 t1_seeprom_read(adapter, i, (u32 *)&buf[i]);
779 memcpy(data, buf + e->offset, e->len);
780 return 0;
781}
782
783static struct ethtool_ops t1_ethtool_ops = {
784 .get_settings = get_settings,
785 .set_settings = set_settings,
786 .get_drvinfo = get_drvinfo,
787 .get_msglevel = get_msglevel,
788 .set_msglevel = set_msglevel,
789 .get_ringparam = get_sge_param,
790 .set_ringparam = set_sge_param,
791 .get_coalesce = get_coalesce,
792 .set_coalesce = set_coalesce,
793 .get_eeprom_len = get_eeprom_len,
794 .get_eeprom = get_eeprom,
795 .get_pauseparam = get_pauseparam,
796 .set_pauseparam = set_pauseparam,
797 .get_rx_csum = get_rx_csum,
798 .set_rx_csum = set_rx_csum,
799 .get_tx_csum = ethtool_op_get_tx_csum,
800 .set_tx_csum = ethtool_op_set_tx_csum,
801 .get_sg = ethtool_op_get_sg,
802 .set_sg = ethtool_op_set_sg,
803 .get_link = ethtool_op_get_link,
804 .get_strings = get_strings,
805 .get_stats_count = get_stats_count,
806 .get_ethtool_stats = get_stats,
807 .get_regs_len = get_regs_len,
808 .get_regs = get_regs,
809 .get_tso = ethtool_op_get_tso,
810 .set_tso = set_tso,
811};
812
813static void cxgb_proc_cleanup(struct adapter *adapter,
814 struct proc_dir_entry *dir)
815{
816 const char *name;
817 name = adapter->name;
818 remove_proc_entry(name, dir);
819}
820//#define chtoe_setup_toedev(adapter) NULL
821#define update_mtu_tab(adapter)
822#define write_smt_entry(adapter, idx)
823
824static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
825{
826 struct adapter *adapter = dev->priv;
827 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
828
829 switch (cmd) {
830 case SIOCGMIIPHY:
831 data->phy_id = adapter->port[dev->if_port].phy->addr;
832 /* FALLTHRU */
833 case SIOCGMIIREG: {
834 struct cphy *phy = adapter->port[dev->if_port].phy;
835 u32 val;
836
837 if (!phy->mdio_read)
838 return -EOPNOTSUPP;
839 phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f,
840 &val);
841 data->val_out = val;
842 break;
843 }
844 case SIOCSMIIREG: {
845 struct cphy *phy = adapter->port[dev->if_port].phy;
846
847 if (!capable(CAP_NET_ADMIN))
848 return -EPERM;
849 if (!phy->mdio_write)
850 return -EOPNOTSUPP;
851 phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f,
852 data->val_in);
853 break;
854 }
855
856 default:
857 return -EOPNOTSUPP;
858 }
859 return 0;
860}
861
862static int t1_change_mtu(struct net_device *dev, int new_mtu)
863{
864 int ret;
865 struct adapter *adapter = dev->priv;
866 struct cmac *mac = adapter->port[dev->if_port].mac;
867
868 if (!mac->ops->set_mtu)
869 return -EOPNOTSUPP;
870 if (new_mtu < 68)
871 return -EINVAL;
872 if ((ret = mac->ops->set_mtu(mac, new_mtu)))
873 return ret;
874 dev->mtu = new_mtu;
875 return 0;
876}
877
878static int t1_set_mac_addr(struct net_device *dev, void *p)
879{
880 struct adapter *adapter = dev->priv;
881 struct cmac *mac = adapter->port[dev->if_port].mac;
882 struct sockaddr *addr = p;
883
884 if (!mac->ops->macaddress_set)
885 return -EOPNOTSUPP;
886
887 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
888 mac->ops->macaddress_set(mac, dev->dev_addr);
889 return 0;
890}
891
892#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
893static void vlan_rx_register(struct net_device *dev,
894 struct vlan_group *grp)
895{
896 struct adapter *adapter = dev->priv;
897
898 spin_lock_irq(&adapter->async_lock);
899 adapter->vlan_grp = grp;
900 t1_set_vlan_accel(adapter, grp != NULL);
901 spin_unlock_irq(&adapter->async_lock);
902}
903
904static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
905{
906 struct adapter *adapter = dev->priv;
907
908 spin_lock_irq(&adapter->async_lock);
909 if (adapter->vlan_grp)
910 adapter->vlan_grp->vlan_devices[vid] = NULL;
911 spin_unlock_irq(&adapter->async_lock);
912}
913#endif
914
915#ifdef CONFIG_NET_POLL_CONTROLLER
916static void t1_netpoll(struct net_device *dev)
917{
918 unsigned long flags;
919 struct adapter *adapter = dev->priv;
920
921 local_irq_save(flags);
922 t1_select_intr_handler(adapter)(adapter->pdev->irq, adapter, NULL);
923 local_irq_restore(flags);
924}
925#endif
926
927/*
928 * Periodic accumulation of MAC statistics. This is used only if the MAC
929 * does not have any other way to prevent stats counter overflow.
930 */
931static void mac_stats_task(void *data)
932{
933 int i;
934 struct adapter *adapter = data;
935
936 for_each_port(adapter, i) {
937 struct port_info *p = &adapter->port[i];
938
939 if (netif_running(p->dev))
940 p->mac->ops->statistics_update(p->mac,
941 MAC_STATS_UPDATE_FAST);
942 }
943
944 /* Schedule the next statistics update if any port is active. */
945 spin_lock(&adapter->work_lock);
946 if (adapter->open_device_map & PORT_MASK)
947 schedule_mac_stats_update(adapter,
948 adapter->params.stats_update_period);
949 spin_unlock(&adapter->work_lock);
950}
951
952/*
953 * Processes elmer0 external interrupts in process context.
954 */
955static void ext_intr_task(void *data)
956{
957 struct adapter *adapter = data;
958
959 elmer0_ext_intr_handler(adapter);
960
961 /* Now reenable external interrupts */
962 spin_lock_irq(&adapter->async_lock);
963 adapter->slow_intr_mask |= F_PL_INTR_EXT;
964 writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
965 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
966 adapter->regs + A_PL_ENABLE);
967 spin_unlock_irq(&adapter->async_lock);
968}
969
970/*
971 * Interrupt-context handler for elmer0 external interrupts.
972 */
973void t1_elmer0_ext_intr(struct adapter *adapter)
974{
975 /*
976 * Schedule a task to handle external interrupts as we require
977 * a process context. We disable EXT interrupts in the interim
978 * and let the task reenable them when it's done.
979 */
980 adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
981 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
982 adapter->regs + A_PL_ENABLE);
983 schedule_work(&adapter->ext_intr_handler_task);
984}
985
986void t1_fatal_err(struct adapter *adapter)
987{
988 if (adapter->flags & FULL_INIT_DONE) {
989 t1_sge_stop(adapter->sge);
990 t1_interrupts_disable(adapter);
991 }
992 CH_ALERT("%s: encountered fatal error, operation suspended\n",
993 adapter->name);
994}
995
996static int __devinit init_one(struct pci_dev *pdev,
997 const struct pci_device_id *ent)
998{
999 static int version_printed;
1000
1001 int i, err, pci_using_dac = 0;
1002 unsigned long mmio_start, mmio_len;
1003 const struct board_info *bi;
1004 struct adapter *adapter = NULL;
1005 struct port_info *pi;
1006
1007 if (!version_printed) {
1008 printk(KERN_INFO "%s - version %s\n", DRV_DESCRIPTION,
1009 DRV_VERSION);
1010 ++version_printed;
1011 }
1012
1013 err = pci_enable_device(pdev);
1014 if (err)
1015 return err;
1016
1017 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1018 CH_ERR("%s: cannot find PCI device memory base address\n",
1019 pci_name(pdev));
1020 err = -ENODEV;
1021 goto out_disable_pdev;
1022 }
1023
1024 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1025 pci_using_dac = 1;
1026
1027 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
1028 CH_ERR("%s: unable to obtain 64-bit DMA for"
1029 "consistent allocations\n", pci_name(pdev));
1030 err = -ENODEV;
1031 goto out_disable_pdev;
1032 }
1033
1034 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
1035 CH_ERR("%s: no usable DMA configuration\n", pci_name(pdev));
1036 goto out_disable_pdev;
1037 }
1038
1039 err = pci_request_regions(pdev, DRV_NAME);
1040 if (err) {
1041 CH_ERR("%s: cannot obtain PCI resources\n", pci_name(pdev));
1042 goto out_disable_pdev;
1043 }
1044
1045 pci_set_master(pdev);
1046
1047 mmio_start = pci_resource_start(pdev, 0);
1048 mmio_len = pci_resource_len(pdev, 0);
1049 bi = t1_get_board_info(ent->driver_data);
1050
1051 for (i = 0; i < bi->port_number; ++i) {
1052 struct net_device *netdev;
1053
1054 netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
1055 if (!netdev) {
1056 err = -ENOMEM;
1057 goto out_free_dev;
1058 }
1059
1060 SET_MODULE_OWNER(netdev);
1061 SET_NETDEV_DEV(netdev, &pdev->dev);
1062
1063 if (!adapter) {
1064 adapter = netdev->priv;
1065 adapter->pdev = pdev;
1066 adapter->port[0].dev = netdev; /* so we don't leak it */
1067
1068 adapter->regs = ioremap(mmio_start, mmio_len);
1069 if (!adapter->regs) {
1070 CH_ERR("%s: cannot map device registers\n",
1071 pci_name(pdev));
1072 err = -ENOMEM;
1073 goto out_free_dev;
1074 }
1075
1076 if (t1_get_board_rev(adapter, bi, &adapter->params)) {
1077 err = -ENODEV; /* Can't handle this chip rev */
1078 goto out_free_dev;
1079 }
1080
1081 adapter->name = pci_name(pdev);
1082 adapter->msg_enable = dflt_msg_enable;
1083 adapter->mmio_len = mmio_len;
1084
1085 init_MUTEX(&adapter->mib_mutex);
1086 spin_lock_init(&adapter->tpi_lock);
1087 spin_lock_init(&adapter->work_lock);
1088 spin_lock_init(&adapter->async_lock);
1089
1090 INIT_WORK(&adapter->ext_intr_handler_task,
1091 ext_intr_task, adapter);
1092 INIT_WORK(&adapter->stats_update_task, mac_stats_task,
1093 adapter);
1094#ifdef work_struct
1095 init_timer(&adapter->stats_update_timer);
1096 adapter->stats_update_timer.function = mac_stats_timer;
1097 adapter->stats_update_timer.data =
1098 (unsigned long)adapter;
1099#endif
1100
1101 pci_set_drvdata(pdev, netdev);
1102 }
1103
1104 pi = &adapter->port[i];
1105 pi->dev = netdev;
1106 netif_carrier_off(netdev);
1107 netdev->irq = pdev->irq;
1108 netdev->if_port = i;
1109 netdev->mem_start = mmio_start;
1110 netdev->mem_end = mmio_start + mmio_len - 1;
1111 netdev->priv = adapter;
1112 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
1113 netdev->features |= NETIF_F_LLTX;
1114
1115 adapter->flags |= RX_CSUM_ENABLED | TCP_CSUM_CAPABLE;
1116 if (pci_using_dac)
1117 netdev->features |= NETIF_F_HIGHDMA;
1118 if (vlan_tso_capable(adapter)) {
1119#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1120 adapter->flags |= VLAN_ACCEL_CAPABLE;
1121 netdev->features |=
1122 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1123 netdev->vlan_rx_register = vlan_rx_register;
1124 netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
1125#endif
1126 adapter->flags |= TSO_CAPABLE;
1127 netdev->features |= NETIF_F_TSO;
1128 }
1129
1130 netdev->open = cxgb_open;
1131 netdev->stop = cxgb_close;
1132 netdev->hard_start_xmit = t1_start_xmit;
1133 netdev->hard_header_len += (adapter->flags & TSO_CAPABLE) ?
1134 sizeof(struct cpl_tx_pkt_lso) :
1135 sizeof(struct cpl_tx_pkt);
1136 netdev->get_stats = t1_get_stats;
1137 netdev->set_multicast_list = t1_set_rxmode;
1138 netdev->do_ioctl = t1_ioctl;
1139 netdev->change_mtu = t1_change_mtu;
1140 netdev->set_mac_address = t1_set_mac_addr;
1141#ifdef CONFIG_NET_POLL_CONTROLLER
1142 netdev->poll_controller = t1_netpoll;
1143#endif
1144 netdev->weight = 64;
1145
1146 SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
1147 }
1148
1149 if (t1_init_sw_modules(adapter, bi) < 0) {
1150 err = -ENODEV;
1151 goto out_free_dev;
1152 }
1153
1154 /*
1155 * The card is now ready to go. If any errors occur during device
1156 * registration we do not fail the whole card but rather proceed only
1157 * with the ports we manage to register successfully. However we must
1158 * register at least one net device.
1159 */
1160 for (i = 0; i < bi->port_number; ++i) {
1161 err = register_netdev(adapter->port[i].dev);
1162 if (err)
1163 CH_WARN("%s: cannot register net device %s, skipping\n",
1164 pci_name(pdev), adapter->port[i].dev->name);
1165 else {
1166 /*
1167 * Change the name we use for messages to the name of
1168 * the first successfully registered interface.
1169 */
1170 if (!adapter->registered_device_map)
1171 adapter->name = adapter->port[i].dev->name;
1172
1173 __set_bit(i, &adapter->registered_device_map);
1174 }
1175 }
1176 if (!adapter->registered_device_map) {
1177 CH_ERR("%s: could not register any net devices\n",
1178 pci_name(pdev));
1179 goto out_release_adapter_res;
1180 }
1181
1182 printk(KERN_INFO "%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name,
1183 bi->desc, adapter->params.chip_revision,
1184 adapter->params.pci.is_pcix ? "PCIX" : "PCI",
1185 adapter->params.pci.speed, adapter->params.pci.width);
1186 return 0;
1187
1188 out_release_adapter_res:
1189 t1_free_sw_modules(adapter);
1190 out_free_dev:
1191 if (adapter) {
1192 if (adapter->regs) iounmap(adapter->regs);
1193 for (i = bi->port_number - 1; i >= 0; --i)
1194 if (adapter->port[i].dev) {
1195 cxgb_proc_cleanup(adapter, proc_root_driver);
1196 kfree(adapter->port[i].dev);
1197 }
1198 }
1199 pci_release_regions(pdev);
1200 out_disable_pdev:
1201 pci_disable_device(pdev);
1202 pci_set_drvdata(pdev, NULL);
1203 return err;
1204}
1205
1206static inline void t1_sw_reset(struct pci_dev *pdev)
1207{
1208 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
1209 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
1210}
1211
1212static void __devexit remove_one(struct pci_dev *pdev)
1213{
1214 struct net_device *dev = pci_get_drvdata(pdev);
1215
1216 if (dev) {
1217 int i;
1218 struct adapter *adapter = dev->priv;
1219
1220 for_each_port(adapter, i)
1221 if (test_bit(i, &adapter->registered_device_map))
1222 unregister_netdev(adapter->port[i].dev);
1223
1224 t1_free_sw_modules(adapter);
1225 iounmap(adapter->regs);
1226 while (--i >= 0)
1227 if (adapter->port[i].dev) {
1228 cxgb_proc_cleanup(adapter, proc_root_driver);
1229 kfree(adapter->port[i].dev);
1230 }
1231 pci_release_regions(pdev);
1232 pci_disable_device(pdev);
1233 pci_set_drvdata(pdev, NULL);
1234 t1_sw_reset(pdev);
1235 }
1236}
1237
1238static struct pci_driver driver = {
1239 .name = DRV_NAME,
1240 .id_table = t1_pci_tbl,
1241 .probe = init_one,
1242 .remove = __devexit_p(remove_one),
1243};
1244
1245static int __init t1_init_module(void)
1246{
1247 return pci_module_init(&driver);
1248}
1249
1250static void __exit t1_cleanup_module(void)
1251{
1252 pci_unregister_driver(&driver);
1253}
1254
1255module_init(t1_init_module);
1256module_exit(t1_cleanup_module);
diff --git a/drivers/net/chelsio/elmer0.h b/drivers/net/chelsio/elmer0.h
new file mode 100644
index 000000000000..5590cb2dac19
--- /dev/null
+++ b/drivers/net/chelsio/elmer0.h
@@ -0,0 +1,151 @@
1/*****************************************************************************
2 * *
3 * File: elmer0.h *
4 * $Revision: 1.6 $ *
5 * $Date: 2005/06/21 22:49:43 $ *
6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#ifndef _CXGB_ELMER0_H_
40#define _CXGB_ELMER0_H_
41
42/* ELMER0 registers */
43#define A_ELMER0_VERSION 0x100000
44#define A_ELMER0_PHY_CFG 0x100004
45#define A_ELMER0_INT_ENABLE 0x100008
46#define A_ELMER0_INT_CAUSE 0x10000c
47#define A_ELMER0_GPI_CFG 0x100010
48#define A_ELMER0_GPI_STAT 0x100014
49#define A_ELMER0_GPO 0x100018
50#define A_ELMER0_PORT0_MI1_CFG 0x400000
51
52#define S_MI1_MDI_ENABLE 0
53#define V_MI1_MDI_ENABLE(x) ((x) << S_MI1_MDI_ENABLE)
54#define F_MI1_MDI_ENABLE V_MI1_MDI_ENABLE(1U)
55
56#define S_MI1_MDI_INVERT 1
57#define V_MI1_MDI_INVERT(x) ((x) << S_MI1_MDI_INVERT)
58#define F_MI1_MDI_INVERT V_MI1_MDI_INVERT(1U)
59
60#define S_MI1_PREAMBLE_ENABLE 2
61#define V_MI1_PREAMBLE_ENABLE(x) ((x) << S_MI1_PREAMBLE_ENABLE)
62#define F_MI1_PREAMBLE_ENABLE V_MI1_PREAMBLE_ENABLE(1U)
63
64#define S_MI1_SOF 3
65#define M_MI1_SOF 0x3
66#define V_MI1_SOF(x) ((x) << S_MI1_SOF)
67#define G_MI1_SOF(x) (((x) >> S_MI1_SOF) & M_MI1_SOF)
68
69#define S_MI1_CLK_DIV 5
70#define M_MI1_CLK_DIV 0xff
71#define V_MI1_CLK_DIV(x) ((x) << S_MI1_CLK_DIV)
72#define G_MI1_CLK_DIV(x) (((x) >> S_MI1_CLK_DIV) & M_MI1_CLK_DIV)
73
74#define A_ELMER0_PORT0_MI1_ADDR 0x400004
75
76#define S_MI1_REG_ADDR 0
77#define M_MI1_REG_ADDR 0x1f
78#define V_MI1_REG_ADDR(x) ((x) << S_MI1_REG_ADDR)
79#define G_MI1_REG_ADDR(x) (((x) >> S_MI1_REG_ADDR) & M_MI1_REG_ADDR)
80
81#define S_MI1_PHY_ADDR 5
82#define M_MI1_PHY_ADDR 0x1f
83#define V_MI1_PHY_ADDR(x) ((x) << S_MI1_PHY_ADDR)
84#define G_MI1_PHY_ADDR(x) (((x) >> S_MI1_PHY_ADDR) & M_MI1_PHY_ADDR)
85
86#define A_ELMER0_PORT0_MI1_DATA 0x400008
87
88#define S_MI1_DATA 0
89#define M_MI1_DATA 0xffff
90#define V_MI1_DATA(x) ((x) << S_MI1_DATA)
91#define G_MI1_DATA(x) (((x) >> S_MI1_DATA) & M_MI1_DATA)
92
93#define A_ELMER0_PORT0_MI1_OP 0x40000c
94
95#define S_MI1_OP 0
96#define M_MI1_OP 0x3
97#define V_MI1_OP(x) ((x) << S_MI1_OP)
98#define G_MI1_OP(x) (((x) >> S_MI1_OP) & M_MI1_OP)
99
100#define S_MI1_ADDR_AUTOINC 2
101#define V_MI1_ADDR_AUTOINC(x) ((x) << S_MI1_ADDR_AUTOINC)
102#define F_MI1_ADDR_AUTOINC V_MI1_ADDR_AUTOINC(1U)
103
104#define S_MI1_OP_BUSY 31
105#define V_MI1_OP_BUSY(x) ((x) << S_MI1_OP_BUSY)
106#define F_MI1_OP_BUSY V_MI1_OP_BUSY(1U)
107
108#define A_ELMER0_PORT1_MI1_CFG 0x500000
109#define A_ELMER0_PORT1_MI1_ADDR 0x500004
110#define A_ELMER0_PORT1_MI1_DATA 0x500008
111#define A_ELMER0_PORT1_MI1_OP 0x50000c
112#define A_ELMER0_PORT2_MI1_CFG 0x600000
113#define A_ELMER0_PORT2_MI1_ADDR 0x600004
114#define A_ELMER0_PORT2_MI1_DATA 0x600008
115#define A_ELMER0_PORT2_MI1_OP 0x60000c
116#define A_ELMER0_PORT3_MI1_CFG 0x700000
117#define A_ELMER0_PORT3_MI1_ADDR 0x700004
118#define A_ELMER0_PORT3_MI1_DATA 0x700008
119#define A_ELMER0_PORT3_MI1_OP 0x70000c
120
121/* Simple bit definition for GPI and GP0 registers. */
122#define ELMER0_GP_BIT0 0x0001
123#define ELMER0_GP_BIT1 0x0002
124#define ELMER0_GP_BIT2 0x0004
125#define ELMER0_GP_BIT3 0x0008
126#define ELMER0_GP_BIT4 0x0010
127#define ELMER0_GP_BIT5 0x0020
128#define ELMER0_GP_BIT6 0x0040
129#define ELMER0_GP_BIT7 0x0080
130#define ELMER0_GP_BIT8 0x0100
131#define ELMER0_GP_BIT9 0x0200
132#define ELMER0_GP_BIT10 0x0400
133#define ELMER0_GP_BIT11 0x0800
134#define ELMER0_GP_BIT12 0x1000
135#define ELMER0_GP_BIT13 0x2000
136#define ELMER0_GP_BIT14 0x4000
137#define ELMER0_GP_BIT15 0x8000
138#define ELMER0_GP_BIT16 0x10000
139#define ELMER0_GP_BIT17 0x20000
140#define ELMER0_GP_BIT18 0x40000
141#define ELMER0_GP_BIT19 0x80000
142
143#define MI1_OP_DIRECT_WRITE 1
144#define MI1_OP_DIRECT_READ 2
145
146#define MI1_OP_INDIRECT_ADDRESS 0
147#define MI1_OP_INDIRECT_WRITE 1
148#define MI1_OP_INDIRECT_READ_INC 2
149#define MI1_OP_INDIRECT_READ 3
150
151#endif /* _CXGB_ELMER0_H_ */
diff --git a/drivers/net/chelsio/espi.c b/drivers/net/chelsio/espi.c
new file mode 100644
index 000000000000..230642571c92
--- /dev/null
+++ b/drivers/net/chelsio/espi.c
@@ -0,0 +1,346 @@
1/*****************************************************************************
2 * *
3 * File: espi.c *
4 * $Revision: 1.14 $ *
5 * $Date: 2005/05/14 00:59:32 $ *
6 * Description: *
7 * Ethernet SPI functionality. *
8 * part of the Chelsio 10Gb Ethernet Driver. *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
13 * *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, write to the Free Software Foundation, Inc., *
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
17 * *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
21 * *
22 * http://www.chelsio.com *
23 * *
24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
25 * All rights reserved. *
26 * *
27 * Maintainers: maintainers@chelsio.com *
28 * *
29 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
30 * Tina Yang <tainay@chelsio.com> *
31 * Felix Marti <felix@chelsio.com> *
32 * Scott Bardone <sbardone@chelsio.com> *
33 * Kurt Ottaway <kottaway@chelsio.com> *
34 * Frank DiMambro <frank@chelsio.com> *
35 * *
36 * History: *
37 * *
38 ****************************************************************************/
39
40#include "common.h"
41#include "regs.h"
42#include "espi.h"
43
44struct peespi {
45 adapter_t *adapter;
46 struct espi_intr_counts intr_cnt;
47 u32 misc_ctrl;
48 spinlock_t lock;
49};
50
51#define ESPI_INTR_MASK (F_DIP4ERR | F_RXDROP | F_TXDROP | F_RXOVERFLOW | \
52 F_RAMPARITYERR | F_DIP2PARITYERR)
53#define MON_MASK (V_MONITORED_PORT_NUM(3) | F_MONITORED_DIRECTION \
54 | F_MONITORED_INTERFACE)
55
56#define TRICN_CNFG 14
57#define TRICN_CMD_READ 0x11
58#define TRICN_CMD_WRITE 0x21
59#define TRICN_CMD_ATTEMPTS 10
60
61static int tricn_write(adapter_t *adapter, int bundle_addr, int module_addr,
62 int ch_addr, int reg_offset, u32 wr_data)
63{
64 int busy, attempts = TRICN_CMD_ATTEMPTS;
65
66 writel(V_WRITE_DATA(wr_data) |
67 V_REGISTER_OFFSET(reg_offset) |
68 V_CHANNEL_ADDR(ch_addr) | V_MODULE_ADDR(module_addr) |
69 V_BUNDLE_ADDR(bundle_addr) |
70 V_SPI4_COMMAND(TRICN_CMD_WRITE),
71 adapter->regs + A_ESPI_CMD_ADDR);
72 writel(0, adapter->regs + A_ESPI_GOSTAT);
73
74 do {
75 busy = readl(adapter->regs + A_ESPI_GOSTAT) & F_ESPI_CMD_BUSY;
76 } while (busy && --attempts);
77
78 if (busy)
79 CH_ERR("%s: TRICN write timed out\n", adapter->name);
80
81 return busy;
82}
83
84/* 1. Deassert rx_reset_core. */
85/* 2. Program TRICN_CNFG registers. */
86/* 3. Deassert rx_reset_link */
87static int tricn_init(adapter_t *adapter)
88{
89 int i = 0;
90 int sme = 1;
91 int stat = 0;
92 int timeout = 0;
93 int is_ready = 0;
94 int dynamic_deskew = 0;
95
96 if (dynamic_deskew)
97 sme = 0;
98
99
100 /* 1 */
101 timeout=1000;
102 do {
103 stat = readl(adapter->regs + A_ESPI_RX_RESET);
104 is_ready = (stat & 0x4);
105 timeout--;
106 udelay(5);
107 } while (!is_ready || (timeout==0));
108 writel(0x2, adapter->regs + A_ESPI_RX_RESET);
109 if (timeout==0)
110 {
111 CH_ERR("ESPI : ERROR : Timeout tricn_init() \n");
112 t1_fatal_err(adapter);
113 }
114
115 /* 2 */
116 if (sme) {
117 tricn_write(adapter, 0, 0, 0, TRICN_CNFG, 0x81);
118 tricn_write(adapter, 0, 1, 0, TRICN_CNFG, 0x81);
119 tricn_write(adapter, 0, 2, 0, TRICN_CNFG, 0x81);
120 }
121 for (i=1; i<= 8; i++) tricn_write(adapter, 0, 0, i, TRICN_CNFG, 0xf1);
122 for (i=1; i<= 2; i++) tricn_write(adapter, 0, 1, i, TRICN_CNFG, 0xf1);
123 for (i=1; i<= 3; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1);
124 for (i=4; i<= 4; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1);
125 for (i=5; i<= 5; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1);
126 for (i=6; i<= 6; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1);
127 for (i=7; i<= 7; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0x80);
128 for (i=8; i<= 8; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1);
129
130 /* 3 */
131 writel(0x3, adapter->regs + A_ESPI_RX_RESET);
132
133 return 0;
134}
135
136void t1_espi_intr_enable(struct peespi *espi)
137{
138 u32 enable, pl_intr = readl(espi->adapter->regs + A_PL_ENABLE);
139
140 /*
141 * Cannot enable ESPI interrupts on T1B because HW asserts the
142 * interrupt incorrectly, namely the driver gets ESPI interrupts
143 * but no data is actually dropped (can verify this reading the ESPI
144 * drop registers). Also, once the ESPI interrupt is asserted it
145 * cannot be cleared (HW bug).
146 */
147 enable = t1_is_T1B(espi->adapter) ? 0 : ESPI_INTR_MASK;
148 writel(enable, espi->adapter->regs + A_ESPI_INTR_ENABLE);
149 writel(pl_intr | F_PL_INTR_ESPI, espi->adapter->regs + A_PL_ENABLE);
150}
151
152void t1_espi_intr_clear(struct peespi *espi)
153{
154 writel(0xffffffff, espi->adapter->regs + A_ESPI_INTR_STATUS);
155 writel(F_PL_INTR_ESPI, espi->adapter->regs + A_PL_CAUSE);
156}
157
158void t1_espi_intr_disable(struct peespi *espi)
159{
160 u32 pl_intr = readl(espi->adapter->regs + A_PL_ENABLE);
161
162 writel(0, espi->adapter->regs + A_ESPI_INTR_ENABLE);
163 writel(pl_intr & ~F_PL_INTR_ESPI, espi->adapter->regs + A_PL_ENABLE);
164}
165
166int t1_espi_intr_handler(struct peespi *espi)
167{
168 u32 cnt;
169 u32 status = readl(espi->adapter->regs + A_ESPI_INTR_STATUS);
170
171 if (status & F_DIP4ERR)
172 espi->intr_cnt.DIP4_err++;
173 if (status & F_RXDROP)
174 espi->intr_cnt.rx_drops++;
175 if (status & F_TXDROP)
176 espi->intr_cnt.tx_drops++;
177 if (status & F_RXOVERFLOW)
178 espi->intr_cnt.rx_ovflw++;
179 if (status & F_RAMPARITYERR)
180 espi->intr_cnt.parity_err++;
181 if (status & F_DIP2PARITYERR) {
182 espi->intr_cnt.DIP2_parity_err++;
183
184 /*
185 * Must read the error count to clear the interrupt
186 * that it causes.
187 */
188 cnt = readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT);
189 }
190
191 /*
192 * For T1B we need to write 1 to clear ESPI interrupts. For T2+ we
193 * write the status as is.
194 */
195 if (status && t1_is_T1B(espi->adapter))
196 status = 1;
197 writel(status, espi->adapter->regs + A_ESPI_INTR_STATUS);
198 return 0;
199}
200
201const struct espi_intr_counts *t1_espi_get_intr_counts(struct peespi *espi)
202{
203 return &espi->intr_cnt;
204}
205
206static void espi_setup_for_pm3393(adapter_t *adapter)
207{
208 u32 wmark = t1_is_T1B(adapter) ? 0x4000 : 0x3200;
209
210 writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN0);
211 writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN1);
212 writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN2);
213 writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN3);
214 writel(0x100, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK);
215 writel(wmark, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK);
216 writel(3, adapter->regs + A_ESPI_CALENDAR_LENGTH);
217 writel(0x08000008, adapter->regs + A_ESPI_TRAIN);
218 writel(V_RX_NPORTS(1) | V_TX_NPORTS(1), adapter->regs + A_PORT_CONFIG);
219}
220
221/* T2 Init part -- */
222/* 1. Set T_ESPI_MISCCTRL_ADDR */
223/* 2. Init ESPI registers. */
224/* 3. Init TriCN Hard Macro */
225int t1_espi_init(struct peespi *espi, int mac_type, int nports)
226{
227 u32 cnt;
228
229 u32 status_enable_extra = 0;
230 adapter_t *adapter = espi->adapter;
231 u32 status, burstval = 0x800100;
232
233 /* Disable ESPI training. MACs that can handle it enable it below. */
234 writel(0, adapter->regs + A_ESPI_TRAIN);
235
236 if (is_T2(adapter)) {
237 writel(V_OUT_OF_SYNC_COUNT(4) |
238 V_DIP2_PARITY_ERR_THRES(3) |
239 V_DIP4_THRES(1), adapter->regs + A_ESPI_MISC_CONTROL);
240 if (nports == 4) {
241 /* T204: maxburst1 = 0x40, maxburst2 = 0x20 */
242 burstval = 0x200040;
243 }
244 }
245 writel(burstval, adapter->regs + A_ESPI_MAXBURST1_MAXBURST2);
246
247 switch (mac_type) {
248 case CHBT_MAC_PM3393:
249 espi_setup_for_pm3393(adapter);
250 break;
251 default:
252 return -1;
253 }
254
255 /*
256 * Make sure any pending interrupts from the SPI are
257 * Cleared before enabling the interrupt.
258 */
259 writel(ESPI_INTR_MASK, espi->adapter->regs + A_ESPI_INTR_ENABLE);
260 status = readl(espi->adapter->regs + A_ESPI_INTR_STATUS);
261 if (status & F_DIP2PARITYERR) {
262 cnt = readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT);
263 }
264
265 /*
266 * For T1B we need to write 1 to clear ESPI interrupts. For T2+ we
267 * write the status as is.
268 */
269 if (status && t1_is_T1B(espi->adapter))
270 status = 1;
271 writel(status, espi->adapter->regs + A_ESPI_INTR_STATUS);
272
273 writel(status_enable_extra | F_RXSTATUSENABLE,
274 adapter->regs + A_ESPI_FIFO_STATUS_ENABLE);
275
276 if (is_T2(adapter)) {
277 tricn_init(adapter);
278 /*
279 * Always position the control at the 1st port egress IN
280 * (sop,eop) counter to reduce PIOs for T/N210 workaround.
281 */
282 espi->misc_ctrl = (readl(adapter->regs + A_ESPI_MISC_CONTROL)
283 & ~MON_MASK) | (F_MONITORED_DIRECTION
284 | F_MONITORED_INTERFACE);
285 writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
286 spin_lock_init(&espi->lock);
287 }
288
289 return 0;
290}
291
292void t1_espi_destroy(struct peespi *espi)
293{
294 kfree(espi);
295}
296
297struct peespi *t1_espi_create(adapter_t *adapter)
298{
299 struct peespi *espi = kmalloc(sizeof(*espi), GFP_KERNEL);
300
301 memset(espi, 0, sizeof(*espi));
302
303 if (espi)
304 espi->adapter = adapter;
305 return espi;
306}
307
308void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val)
309{
310 struct peespi *espi = adapter->espi;
311
312 if (!is_T2(adapter))
313 return;
314 spin_lock(&espi->lock);
315 espi->misc_ctrl = (val & ~MON_MASK) |
316 (espi->misc_ctrl & MON_MASK);
317 writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
318 spin_unlock(&espi->lock);
319}
320
321u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait)
322{
323 u32 sel;
324
325 struct peespi *espi = adapter->espi;
326
327 if (!is_T2(adapter))
328 return 0;
329 sel = V_MONITORED_PORT_NUM((addr & 0x3c) >> 2);
330 if (!wait) {
331 if (!spin_trylock(&espi->lock))
332 return 0;
333 }
334 else
335 spin_lock(&espi->lock);
336 if ((sel != (espi->misc_ctrl & MON_MASK))) {
337 writel(((espi->misc_ctrl & ~MON_MASK) | sel),
338 adapter->regs + A_ESPI_MISC_CONTROL);
339 sel = readl(adapter->regs + A_ESPI_SCH_TOKEN3);
340 writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
341 }
342 else
343 sel = readl(adapter->regs + A_ESPI_SCH_TOKEN3);
344 spin_unlock(&espi->lock);
345 return sel;
346}
diff --git a/drivers/net/chelsio/espi.h b/drivers/net/chelsio/espi.h
new file mode 100644
index 000000000000..c90e37f8457c
--- /dev/null
+++ b/drivers/net/chelsio/espi.h
@@ -0,0 +1,68 @@
1/*****************************************************************************
2 * *
3 * File: espi.h *
4 * $Revision: 1.7 $ *
5 * $Date: 2005/06/21 18:29:47 $ *
6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#ifndef _CXGB_ESPI_H_
40#define _CXGB_ESPI_H_
41
42#include "common.h"
43
44struct espi_intr_counts {
45 unsigned int DIP4_err;
46 unsigned int rx_drops;
47 unsigned int tx_drops;
48 unsigned int rx_ovflw;
49 unsigned int parity_err;
50 unsigned int DIP2_parity_err;
51};
52
53struct peespi;
54
55struct peespi *t1_espi_create(adapter_t *adapter);
56void t1_espi_destroy(struct peespi *espi);
57int t1_espi_init(struct peespi *espi, int mac_type, int nports);
58
59void t1_espi_intr_enable(struct peespi *);
60void t1_espi_intr_clear(struct peespi *);
61void t1_espi_intr_disable(struct peespi *);
62int t1_espi_intr_handler(struct peespi *);
63const struct espi_intr_counts *t1_espi_get_intr_counts(struct peespi *espi);
64
65void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val);
66u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait);
67
68#endif /* _CXGB_ESPI_H_ */
diff --git a/drivers/net/chelsio/gmac.h b/drivers/net/chelsio/gmac.h
new file mode 100644
index 000000000000..746b0eeea964
--- /dev/null
+++ b/drivers/net/chelsio/gmac.h
@@ -0,0 +1,134 @@
1/*****************************************************************************
2 * *
3 * File: gmac.h *
4 * $Revision: 1.6 $ *
5 * $Date: 2005/06/21 18:29:47 $ *
6 * Description: *
7 * Generic MAC functionality. *
8 * part of the Chelsio 10Gb Ethernet Driver. *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
13 * *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, write to the Free Software Foundation, Inc., *
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
17 * *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
21 * *
22 * http://www.chelsio.com *
23 * *
24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
25 * All rights reserved. *
26 * *
27 * Maintainers: maintainers@chelsio.com *
28 * *
29 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
30 * Tina Yang <tainay@chelsio.com> *
31 * Felix Marti <felix@chelsio.com> *
32 * Scott Bardone <sbardone@chelsio.com> *
33 * Kurt Ottaway <kottaway@chelsio.com> *
34 * Frank DiMambro <frank@chelsio.com> *
35 * *
36 * History: *
37 * *
38 ****************************************************************************/
39
40#ifndef _CXGB_GMAC_H_
41#define _CXGB_GMAC_H_
42
43#include "common.h"
44
45enum { MAC_STATS_UPDATE_FAST, MAC_STATS_UPDATE_FULL };
46enum { MAC_DIRECTION_RX = 1, MAC_DIRECTION_TX = 2 };
47
48struct cmac_statistics {
49 /* Transmit */
50 u64 TxOctetsOK;
51 u64 TxOctetsBad;
52 u64 TxUnicastFramesOK;
53 u64 TxMulticastFramesOK;
54 u64 TxBroadcastFramesOK;
55 u64 TxPauseFrames;
56 u64 TxFramesWithDeferredXmissions;
57 u64 TxLateCollisions;
58 u64 TxTotalCollisions;
59 u64 TxFramesAbortedDueToXSCollisions;
60 u64 TxUnderrun;
61 u64 TxLengthErrors;
62 u64 TxInternalMACXmitError;
63 u64 TxFramesWithExcessiveDeferral;
64 u64 TxFCSErrors;
65
66 /* Receive */
67 u64 RxOctetsOK;
68 u64 RxOctetsBad;
69 u64 RxUnicastFramesOK;
70 u64 RxMulticastFramesOK;
71 u64 RxBroadcastFramesOK;
72 u64 RxPauseFrames;
73 u64 RxFCSErrors;
74 u64 RxAlignErrors;
75 u64 RxSymbolErrors;
76 u64 RxDataErrors;
77 u64 RxSequenceErrors;
78 u64 RxRuntErrors;
79 u64 RxJabberErrors;
80 u64 RxInternalMACRcvError;
81 u64 RxInRangeLengthErrors;
82 u64 RxOutOfRangeLengthField;
83 u64 RxFrameTooLongErrors;
84};
85
86struct cmac_ops {
87 void (*destroy)(struct cmac *);
88 int (*reset)(struct cmac *);
89 int (*interrupt_enable)(struct cmac *);
90 int (*interrupt_disable)(struct cmac *);
91 int (*interrupt_clear)(struct cmac *);
92 int (*interrupt_handler)(struct cmac *);
93
94 int (*enable)(struct cmac *, int);
95 int (*disable)(struct cmac *, int);
96
97 int (*loopback_enable)(struct cmac *);
98 int (*loopback_disable)(struct cmac *);
99
100 int (*set_mtu)(struct cmac *, int mtu);
101 int (*set_rx_mode)(struct cmac *, struct t1_rx_mode *rm);
102
103 int (*set_speed_duplex_fc)(struct cmac *, int speed, int duplex, int fc);
104 int (*get_speed_duplex_fc)(struct cmac *, int *speed, int *duplex,
105 int *fc);
106
107 const struct cmac_statistics *(*statistics_update)(struct cmac *, int);
108
109 int (*macaddress_get)(struct cmac *, u8 mac_addr[6]);
110 int (*macaddress_set)(struct cmac *, u8 mac_addr[6]);
111};
112
113typedef struct _cmac_instance cmac_instance;
114
115struct cmac {
116 struct cmac_statistics stats;
117 adapter_t *adapter;
118 struct cmac_ops *ops;
119 cmac_instance *instance;
120};
121
122struct gmac {
123 unsigned int stats_update_period;
124 struct cmac *(*create)(adapter_t *adapter, int index);
125 int (*reset)(adapter_t *);
126};
127
128extern struct gmac t1_pm3393_ops;
129extern struct gmac t1_chelsio_mac_ops;
130extern struct gmac t1_vsc7321_ops;
131extern struct gmac t1_ixf1010_ops;
132extern struct gmac t1_dummy_mac_ops;
133
134#endif /* _CXGB_GMAC_H_ */
diff --git a/drivers/net/chelsio/mv88x201x.c b/drivers/net/chelsio/mv88x201x.c
new file mode 100644
index 000000000000..db5034282782
--- /dev/null
+++ b/drivers/net/chelsio/mv88x201x.c
@@ -0,0 +1,252 @@
1/*****************************************************************************
2 * *
3 * File: mv88x201x.c *
4 * $Revision: 1.12 $ *
5 * $Date: 2005/04/15 19:27:14 $ *
6 * Description: *
7 * Marvell PHY (mv88x201x) functionality. *
8 * part of the Chelsio 10Gb Ethernet Driver. *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
13 * *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, write to the Free Software Foundation, Inc., *
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
17 * *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
21 * *
22 * http://www.chelsio.com *
23 * *
24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
25 * All rights reserved. *
26 * *
27 * Maintainers: maintainers@chelsio.com *
28 * *
29 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
30 * Tina Yang <tainay@chelsio.com> *
31 * Felix Marti <felix@chelsio.com> *
32 * Scott Bardone <sbardone@chelsio.com> *
33 * Kurt Ottaway <kottaway@chelsio.com> *
34 * Frank DiMambro <frank@chelsio.com> *
35 * *
36 * History: *
37 * *
38 ****************************************************************************/
39
40#include "cphy.h"
41#include "elmer0.h"
42
43/*
44 * The 88x2010 Rev C. requires some link status registers * to be read
45 * twice in order to get the right values. Future * revisions will fix
46 * this problem and then this macro * can disappear.
47 */
48#define MV88x2010_LINK_STATUS_BUGS 1
49
50static int led_init(struct cphy *cphy)
51{
52 /* Setup the LED registers so we can turn on/off.
53 * Writing these bits maps control to another
54 * register. mmd(0x1) addr(0x7)
55 */
56 mdio_write(cphy, 0x3, 0x8304, 0xdddd);
57 return 0;
58}
59
60static int led_link(struct cphy *cphy, u32 do_enable)
61{
62 u32 led = 0;
63#define LINK_ENABLE_BIT 0x1
64
65 mdio_read(cphy, 0x1, 0x7, &led);
66
67 if (do_enable & LINK_ENABLE_BIT) {
68 led |= LINK_ENABLE_BIT;
69 mdio_write(cphy, 0x1, 0x7, led);
70 } else {
71 led &= ~LINK_ENABLE_BIT;
72 mdio_write(cphy, 0x1, 0x7, led);
73 }
74 return 0;
75}
76
77/* Port Reset */
78static int mv88x201x_reset(struct cphy *cphy, int wait)
79{
80 /* This can be done through registers. It is not required since
81 * a full chip reset is used.
82 */
83 return 0;
84}
85
86static int mv88x201x_interrupt_enable(struct cphy *cphy)
87{
88 u32 elmer;
89
90 /* Enable PHY LASI interrupts. */
91 mdio_write(cphy, 0x1, 0x9002, 0x1);
92
93 /* Enable Marvell interrupts through Elmer0. */
94 t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
95 elmer |= ELMER0_GP_BIT6;
96 t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
97 return 0;
98}
99
100static int mv88x201x_interrupt_disable(struct cphy *cphy)
101{
102 u32 elmer;
103
104 /* Disable PHY LASI interrupts. */
105 mdio_write(cphy, 0x1, 0x9002, 0x0);
106
107 /* Disable Marvell interrupts through Elmer0. */
108 t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
109 elmer &= ~ELMER0_GP_BIT6;
110 t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
111 return 0;
112}
113
114static int mv88x201x_interrupt_clear(struct cphy *cphy)
115{
116 u32 elmer;
117 u32 val;
118
119#ifdef MV88x2010_LINK_STATUS_BUGS
120 /* Required to read twice before clear takes affect. */
121 mdio_read(cphy, 0x1, 0x9003, &val);
122 mdio_read(cphy, 0x1, 0x9004, &val);
123 mdio_read(cphy, 0x1, 0x9005, &val);
124
125 /* Read this register after the others above it else
126 * the register doesn't clear correctly.
127 */
128 mdio_read(cphy, 0x1, 0x1, &val);
129#endif
130
131 /* Clear link status. */
132 mdio_read(cphy, 0x1, 0x1, &val);
133 /* Clear PHY LASI interrupts. */
134 mdio_read(cphy, 0x1, 0x9005, &val);
135
136#ifdef MV88x2010_LINK_STATUS_BUGS
137 /* Do it again. */
138 mdio_read(cphy, 0x1, 0x9003, &val);
139 mdio_read(cphy, 0x1, 0x9004, &val);
140#endif
141
142 /* Clear Marvell interrupts through Elmer0. */
143 t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer);
144 elmer |= ELMER0_GP_BIT6;
145 t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer);
146 return 0;
147}
148
149static int mv88x201x_interrupt_handler(struct cphy *cphy)
150{
151 /* Clear interrupts */
152 mv88x201x_interrupt_clear(cphy);
153
154 /* We have only enabled link change interrupts and so
155 * cphy_cause must be a link change interrupt.
156 */
157 return cphy_cause_link_change;
158}
159
160static int mv88x201x_set_loopback(struct cphy *cphy, int on)
161{
162 return 0;
163}
164
165static int mv88x201x_get_link_status(struct cphy *cphy, int *link_ok,
166 int *speed, int *duplex, int *fc)
167{
168 u32 val = 0;
169#define LINK_STATUS_BIT 0x4
170
171 if (link_ok) {
172 /* Read link status. */
173 mdio_read(cphy, 0x1, 0x1, &val);
174 val &= LINK_STATUS_BIT;
175 *link_ok = (val == LINK_STATUS_BIT);
176 /* Turn on/off Link LED */
177 led_link(cphy, *link_ok);
178 }
179 if (speed)
180 *speed = SPEED_10000;
181 if (duplex)
182 *duplex = DUPLEX_FULL;
183 if (fc)
184 *fc = PAUSE_RX | PAUSE_TX;
185 return 0;
186}
187
188static void mv88x201x_destroy(struct cphy *cphy)
189{
190 kfree(cphy);
191}
192
193static struct cphy_ops mv88x201x_ops = {
194 .destroy = mv88x201x_destroy,
195 .reset = mv88x201x_reset,
196 .interrupt_enable = mv88x201x_interrupt_enable,
197 .interrupt_disable = mv88x201x_interrupt_disable,
198 .interrupt_clear = mv88x201x_interrupt_clear,
199 .interrupt_handler = mv88x201x_interrupt_handler,
200 .get_link_status = mv88x201x_get_link_status,
201 .set_loopback = mv88x201x_set_loopback,
202};
203
204static struct cphy *mv88x201x_phy_create(adapter_t *adapter, int phy_addr,
205 struct mdio_ops *mdio_ops)
206{
207 u32 val;
208 struct cphy *cphy = kmalloc(sizeof(*cphy), GFP_KERNEL);
209
210 if (!cphy)
211 return NULL;
212 memset(cphy, 0, sizeof(*cphy));
213 cphy_init(cphy, adapter, phy_addr, &mv88x201x_ops, mdio_ops);
214
215 /* Commands the PHY to enable XFP's clock. */
216 mdio_read(cphy, 0x3, 0x8300, &val);
217 mdio_write(cphy, 0x3, 0x8300, val | 1);
218
219 /* Clear link status. Required because of a bug in the PHY. */
220 mdio_read(cphy, 0x1, 0x8, &val);
221 mdio_read(cphy, 0x3, 0x8, &val);
222
223 /* Allows for Link,Ack LED turn on/off */
224 led_init(cphy);
225 return cphy;
226}
227
228/* Chip Reset */
229static int mv88x201x_phy_reset(adapter_t *adapter)
230{
231 u32 val;
232
233 t1_tpi_read(adapter, A_ELMER0_GPO, &val);
234 val &= ~4;
235 t1_tpi_write(adapter, A_ELMER0_GPO, val);
236 msleep(100);
237
238 t1_tpi_write(adapter, A_ELMER0_GPO, val | 4);
239 msleep(1000);
240
241 /* Now lets enable the Laser. Delay 100us */
242 t1_tpi_read(adapter, A_ELMER0_GPO, &val);
243 val |= 0x8000;
244 t1_tpi_write(adapter, A_ELMER0_GPO, val);
245 udelay(100);
246 return 0;
247}
248
249struct gphy t1_mv88x201x_ops = {
250 mv88x201x_phy_create,
251 mv88x201x_phy_reset
252};
diff --git a/drivers/net/chelsio/pm3393.c b/drivers/net/chelsio/pm3393.c
new file mode 100644
index 000000000000..04a1404fc65e
--- /dev/null
+++ b/drivers/net/chelsio/pm3393.c
@@ -0,0 +1,826 @@
1/*****************************************************************************
2 * *
3 * File: pm3393.c *
4 * $Revision: 1.16 $ *
5 * $Date: 2005/05/14 00:59:32 $ *
6 * Description: *
7 * PMC/SIERRA (pm3393) MAC-PHY functionality. *
8 * part of the Chelsio 10Gb Ethernet Driver. *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
13 * *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, write to the Free Software Foundation, Inc., *
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
17 * *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
21 * *
22 * http://www.chelsio.com *
23 * *
24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
25 * All rights reserved. *
26 * *
27 * Maintainers: maintainers@chelsio.com *
28 * *
29 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
30 * Tina Yang <tainay@chelsio.com> *
31 * Felix Marti <felix@chelsio.com> *
32 * Scott Bardone <sbardone@chelsio.com> *
33 * Kurt Ottaway <kottaway@chelsio.com> *
34 * Frank DiMambro <frank@chelsio.com> *
35 * *
36 * History: *
37 * *
38 ****************************************************************************/
39
40#include "common.h"
41#include "regs.h"
42#include "gmac.h"
43#include "elmer0.h"
44#include "suni1x10gexp_regs.h"
45
46/* 802.3ae 10Gb/s MDIO Manageable Device(MMD)
47 */
48enum {
49 MMD_RESERVED,
50 MMD_PMAPMD,
51 MMD_WIS,
52 MMD_PCS,
53 MMD_PHY_XGXS, /* XGMII Extender Sublayer */
54 MMD_DTE_XGXS,
55};
56
57enum {
58 PHY_XGXS_CTRL_1,
59 PHY_XGXS_STATUS_1
60};
61
62#define OFFSET(REG_ADDR) (REG_ADDR << 2)
63
64/* Max frame size PM3393 can handle. Includes Ethernet header and CRC. */
65#define MAX_FRAME_SIZE 9600
66
67#define IPG 12
68#define TXXG_CONF1_VAL ((IPG << SUNI1x10GEXP_BITOFF_TXXG_IPGT) | \
69 SUNI1x10GEXP_BITMSK_TXXG_32BIT_ALIGN | SUNI1x10GEXP_BITMSK_TXXG_CRCEN | \
70 SUNI1x10GEXP_BITMSK_TXXG_PADEN)
71#define RXXG_CONF1_VAL (SUNI1x10GEXP_BITMSK_RXXG_PUREP | 0x14 | \
72 SUNI1x10GEXP_BITMSK_RXXG_FLCHK | SUNI1x10GEXP_BITMSK_RXXG_CRC_STRIP)
73
74/* Update statistics every 15 minutes */
75#define STATS_TICK_SECS (15 * 60)
76
77enum { /* RMON registers */
78 RxOctetsReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW,
79 RxUnicastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_4_LOW,
80 RxMulticastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_5_LOW,
81 RxBroadcastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_6_LOW,
82 RxPAUSEMACCtrlFramesReceived = SUNI1x10GEXP_REG_MSTAT_COUNTER_8_LOW,
83 RxFrameCheckSequenceErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_10_LOW,
84 RxFramesLostDueToInternalMACErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_11_LOW,
85 RxSymbolErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_12_LOW,
86 RxInRangeLengthErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_13_LOW,
87 RxFramesTooLongErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_15_LOW,
88 RxJabbers = SUNI1x10GEXP_REG_MSTAT_COUNTER_16_LOW,
89 RxFragments = SUNI1x10GEXP_REG_MSTAT_COUNTER_17_LOW,
90 RxUndersizedFrames = SUNI1x10GEXP_REG_MSTAT_COUNTER_18_LOW,
91
92 TxOctetsTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW,
93 TxFramesLostDueToInternalMACTransmissionError = SUNI1x10GEXP_REG_MSTAT_COUNTER_35_LOW,
94 TxTransmitSystemError = SUNI1x10GEXP_REG_MSTAT_COUNTER_36_LOW,
95 TxUnicastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_38_LOW,
96 TxMulticastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_40_LOW,
97 TxBroadcastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_42_LOW,
98 TxPAUSEMACCtrlFramesTransmitted = SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW
99};
100
101struct _cmac_instance {
102 u8 enabled;
103 u8 fc;
104 u8 mac_addr[6];
105};
106
107static int pmread(struct cmac *cmac, u32 reg, u32 * data32)
108{
109 t1_tpi_read(cmac->adapter, OFFSET(reg), data32);
110 return 0;
111}
112
113static int pmwrite(struct cmac *cmac, u32 reg, u32 data32)
114{
115 t1_tpi_write(cmac->adapter, OFFSET(reg), data32);
116 return 0;
117}
118
119/* Port reset. */
120static int pm3393_reset(struct cmac *cmac)
121{
122 return 0;
123}
124
125/*
126 * Enable interrupts for the PM3393
127
128 1. Enable PM3393 BLOCK interrupts.
129 2. Enable PM3393 Master Interrupt bit(INTE)
130 3. Enable ELMER's PM3393 bit.
131 4. Enable Terminator external interrupt.
132*/
133static int pm3393_interrupt_enable(struct cmac *cmac)
134{
135 u32 pl_intr;
136
137 /* PM3393 - Enabling all hardware block interrupts.
138 */
139 pmwrite(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE, 0xffff);
140 pmwrite(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE, 0xffff);
141 pmwrite(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE, 0xffff);
142 pmwrite(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE, 0xffff);
143
144 /* Don't interrupt on statistics overflow, we are polling */
145 pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0, 0);
146 pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1, 0);
147 pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2, 0);
148 pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3, 0);
149
150 pmwrite(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE, 0xffff);
151 pmwrite(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK, 0xffff);
152 pmwrite(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE, 0xffff);
153 pmwrite(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE, 0xffff);
154 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_3, 0xffff);
155 pmwrite(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK, 0xffff);
156 pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_3, 0xffff);
157 pmwrite(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK, 0xffff);
158 pmwrite(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE, 0xffff);
159
160 /* PM3393 - Global interrupt enable
161 */
162 /* TBD XXX Disable for now until we figure out why error interrupts keep asserting. */
163 pmwrite(cmac, SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE,
164 0 /*SUNI1x10GEXP_BITMSK_TOP_INTE */ );
165
166 /* TERMINATOR - PL_INTERUPTS_EXT */
167 pl_intr = readl(cmac->adapter->regs + A_PL_ENABLE);
168 pl_intr |= F_PL_INTR_EXT;
169 writel(pl_intr, cmac->adapter->regs + A_PL_ENABLE);
170 return 0;
171}
172
173static int pm3393_interrupt_disable(struct cmac *cmac)
174{
175 u32 elmer;
176
177 /* PM3393 - Enabling HW interrupt blocks. */
178 pmwrite(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE, 0);
179 pmwrite(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE, 0);
180 pmwrite(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE, 0);
181 pmwrite(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE, 0);
182 pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0, 0);
183 pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1, 0);
184 pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2, 0);
185 pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3, 0);
186 pmwrite(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE, 0);
187 pmwrite(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK, 0);
188 pmwrite(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE, 0);
189 pmwrite(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE, 0);
190 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_3, 0);
191 pmwrite(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK, 0);
192 pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_3, 0);
193 pmwrite(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK, 0);
194 pmwrite(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE, 0);
195
196 /* PM3393 - Global interrupt enable */
197 pmwrite(cmac, SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE, 0);
198
199 /* ELMER - External chip interrupts. */
200 t1_tpi_read(cmac->adapter, A_ELMER0_INT_ENABLE, &elmer);
201 elmer &= ~ELMER0_GP_BIT1;
202 t1_tpi_write(cmac->adapter, A_ELMER0_INT_ENABLE, elmer);
203
204 /* TERMINATOR - PL_INTERUPTS_EXT */
205 /* DO NOT DISABLE TERMINATOR's EXTERNAL INTERRUPTS. ANOTHER CHIP
206 * COULD WANT THEM ENABLED. We disable PM3393 at the ELMER level.
207 */
208
209 return 0;
210}
211
212static int pm3393_interrupt_clear(struct cmac *cmac)
213{
214 u32 elmer;
215 u32 pl_intr;
216 u32 val32;
217
218 /* PM3393 - Clearing HW interrupt blocks. Note, this assumes
219 * bit WCIMODE=0 for a clear-on-read.
220 */
221 pmread(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_STATUS, &val32);
222 pmread(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_STATUS, &val32);
223 pmread(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_STATUS, &val32);
224 pmread(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_STATUS, &val32);
225 pmread(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT, &val32);
226 pmread(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_STATUS, &val32);
227 pmread(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_INTERRUPT, &val32);
228 pmread(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_STATUS, &val32);
229 pmread(cmac, SUNI1x10GEXP_REG_RXXG_INTERRUPT, &val32);
230 pmread(cmac, SUNI1x10GEXP_REG_TXXG_INTERRUPT, &val32);
231 pmread(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT, &val32);
232 pmread(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_INDICATION,
233 &val32);
234 pmread(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_STATUS, &val32);
235 pmread(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_CHANGE, &val32);
236
237 /* PM3393 - Global interrupt status
238 */
239 pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS, &val32);
240
241 /* ELMER - External chip interrupts.
242 */
243 t1_tpi_read(cmac->adapter, A_ELMER0_INT_CAUSE, &elmer);
244 elmer |= ELMER0_GP_BIT1;
245 t1_tpi_write(cmac->adapter, A_ELMER0_INT_CAUSE, elmer);
246
247 /* TERMINATOR - PL_INTERUPTS_EXT
248 */
249 pl_intr = readl(cmac->adapter->regs + A_PL_CAUSE);
250 pl_intr |= F_PL_INTR_EXT;
251 writel(pl_intr, cmac->adapter->regs + A_PL_CAUSE);
252
253 return 0;
254}
255
256/* Interrupt handler */
257static int pm3393_interrupt_handler(struct cmac *cmac)
258{
259 u32 master_intr_status;
260/*
261 1. Read master interrupt register.
262 2. Read BLOCK's interrupt status registers.
263 3. Handle BLOCK interrupts.
264*/
265 /* Read the master interrupt status register. */
266 pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS,
267 &master_intr_status);
268
269 /* TBD XXX Lets just clear everything for now */
270 pm3393_interrupt_clear(cmac);
271
272 return 0;
273}
274
275static int pm3393_enable(struct cmac *cmac, int which)
276{
277 if (which & MAC_DIRECTION_RX)
278 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_1,
279 (RXXG_CONF1_VAL | SUNI1x10GEXP_BITMSK_RXXG_RXEN));
280
281 if (which & MAC_DIRECTION_TX) {
282 u32 val = TXXG_CONF1_VAL | SUNI1x10GEXP_BITMSK_TXXG_TXEN0;
283
284 if (cmac->instance->fc & PAUSE_RX)
285 val |= SUNI1x10GEXP_BITMSK_TXXG_FCRX;
286 if (cmac->instance->fc & PAUSE_TX)
287 val |= SUNI1x10GEXP_BITMSK_TXXG_FCTX;
288 pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_1, val);
289 }
290
291 cmac->instance->enabled |= which;
292 return 0;
293}
294
295static int pm3393_enable_port(struct cmac *cmac, int which)
296{
297 /* Clear port statistics */
298 pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_CONTROL,
299 SUNI1x10GEXP_BITMSK_MSTAT_CLEAR);
300 udelay(2);
301 memset(&cmac->stats, 0, sizeof(struct cmac_statistics));
302
303 pm3393_enable(cmac, which);
304
305 /*
306 * XXX This should be done by the PHY and preferrably not at all.
307 * The PHY doesn't give us link status indication on its own so have
308 * the link management code query it instead.
309 */
310 {
311 extern void link_changed(adapter_t *adapter, int port_id);
312
313 link_changed(cmac->adapter, 0);
314 }
315 return 0;
316}
317
318static int pm3393_disable(struct cmac *cmac, int which)
319{
320 if (which & MAC_DIRECTION_RX)
321 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_1, RXXG_CONF1_VAL);
322 if (which & MAC_DIRECTION_TX)
323 pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_1, TXXG_CONF1_VAL);
324
325 /*
326 * The disable is graceful. Give the PM3393 time. Can't wait very
327 * long here, we may be holding locks.
328 */
329 udelay(20);
330
331 cmac->instance->enabled &= ~which;
332 return 0;
333}
334
335static int pm3393_loopback_enable(struct cmac *cmac)
336{
337 return 0;
338}
339
340static int pm3393_loopback_disable(struct cmac *cmac)
341{
342 return 0;
343}
344
345static int pm3393_set_mtu(struct cmac *cmac, int mtu)
346{
347 int enabled = cmac->instance->enabled;
348
349 /* MAX_FRAME_SIZE includes header + FCS, mtu doesn't */
350 mtu += 14 + 4;
351 if (mtu > MAX_FRAME_SIZE)
352 return -EINVAL;
353
354 /* Disable Rx/Tx MAC before configuring it. */
355 if (enabled)
356 pm3393_disable(cmac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
357
358 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MAX_FRAME_LENGTH, mtu);
359 pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_MAX_FRAME_SIZE, mtu);
360
361 if (enabled)
362 pm3393_enable(cmac, enabled);
363 return 0;
364}
365
366static u32 calc_crc(u8 *b, int len)
367{
368 int i;
369 u32 crc = (u32)~0;
370
371 /* calculate crc one bit at a time */
372 while (len--) {
373 crc ^= *b++;
374 for (i = 0; i < 8; i++) {
375 if (crc & 0x1)
376 crc = (crc >> 1) ^ 0xedb88320;
377 else
378 crc = (crc >> 1);
379 }
380 }
381
382 /* reverse bits */
383 crc = ((crc >> 4) & 0x0f0f0f0f) | ((crc << 4) & 0xf0f0f0f0);
384 crc = ((crc >> 2) & 0x33333333) | ((crc << 2) & 0xcccccccc);
385 crc = ((crc >> 1) & 0x55555555) | ((crc << 1) & 0xaaaaaaaa);
386 /* swap bytes */
387 crc = (crc >> 16) | (crc << 16);
388 crc = (crc >> 8 & 0x00ff00ff) | (crc << 8 & 0xff00ff00);
389
390 return crc;
391}
392
393static int pm3393_set_rx_mode(struct cmac *cmac, struct t1_rx_mode *rm)
394{
395 int enabled = cmac->instance->enabled & MAC_DIRECTION_RX;
396 u32 rx_mode;
397
398 /* Disable MAC RX before reconfiguring it */
399 if (enabled)
400 pm3393_disable(cmac, MAC_DIRECTION_RX);
401
402 pmread(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2, &rx_mode);
403 rx_mode &= ~(SUNI1x10GEXP_BITMSK_RXXG_PMODE |
404 SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN);
405 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2,
406 (u16)rx_mode);
407
408 if (t1_rx_mode_promisc(rm)) {
409 /* Promiscuous mode. */
410 rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_PMODE;
411 }
412 if (t1_rx_mode_allmulti(rm)) {
413 /* Accept all multicast. */
414 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, 0xffff);
415 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW, 0xffff);
416 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH, 0xffff);
417 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH, 0xffff);
418 rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN;
419 } else if (t1_rx_mode_mc_cnt(rm)) {
420 /* Accept one or more multicast(s). */
421 u8 *addr;
422 int bit;
423 u16 mc_filter[4] = { 0, };
424
425 while ((addr = t1_get_next_mcaddr(rm))) {
426 bit = (calc_crc(addr, ETH_ALEN) >> 23) & 0x3f; /* bit[23:28] */
427 mc_filter[bit >> 4] |= 1 << (bit & 0xf);
428 }
429 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, mc_filter[0]);
430 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW, mc_filter[1]);
431 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH, mc_filter[2]);
432 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH, mc_filter[3]);
433 rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN;
434 }
435
436 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2, (u16)rx_mode);
437
438 if (enabled)
439 pm3393_enable(cmac, MAC_DIRECTION_RX);
440
441 return 0;
442}
443
444static int pm3393_get_speed_duplex_fc(struct cmac *cmac, int *speed,
445 int *duplex, int *fc)
446{
447 if (speed)
448 *speed = SPEED_10000;
449 if (duplex)
450 *duplex = DUPLEX_FULL;
451 if (fc)
452 *fc = cmac->instance->fc;
453 return 0;
454}
455
456static int pm3393_set_speed_duplex_fc(struct cmac *cmac, int speed, int duplex,
457 int fc)
458{
459 if (speed >= 0 && speed != SPEED_10000)
460 return -1;
461 if (duplex >= 0 && duplex != DUPLEX_FULL)
462 return -1;
463 if (fc & ~(PAUSE_TX | PAUSE_RX))
464 return -1;
465
466 if (fc != cmac->instance->fc) {
467 cmac->instance->fc = (u8) fc;
468 if (cmac->instance->enabled & MAC_DIRECTION_TX)
469 pm3393_enable(cmac, MAC_DIRECTION_TX);
470 }
471 return 0;
472}
473
474#define RMON_UPDATE(mac, name, stat_name) \
475 { \
476 t1_tpi_read((mac)->adapter, OFFSET(name), &val0); \
477 t1_tpi_read((mac)->adapter, OFFSET(((name)+1)), &val1); \
478 t1_tpi_read((mac)->adapter, OFFSET(((name)+2)), &val2); \
479 (mac)->stats.stat_name = ((u64)val0 & 0xffff) | \
480 (((u64)val1 & 0xffff) << 16) | \
481 (((u64)val2 & 0xff) << 32) | \
482 ((mac)->stats.stat_name & \
483 (~(u64)0 << 40)); \
484 if (ro & \
485 ((name - SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW) >> 2)) \
486 (mac)->stats.stat_name += ((u64)1 << 40); \
487 }
488
489static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac,
490 int flag)
491{
492 u64 ro;
493 u32 val0, val1, val2, val3;
494
495 /* Snap the counters */
496 pmwrite(mac, SUNI1x10GEXP_REG_MSTAT_CONTROL,
497 SUNI1x10GEXP_BITMSK_MSTAT_SNAP);
498
499 /* Counter rollover, clear on read */
500 pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_0, &val0);
501 pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_1, &val1);
502 pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_2, &val2);
503 pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_3, &val3);
504 ro = ((u64)val0 & 0xffff) | (((u64)val1 & 0xffff) << 16) |
505 (((u64)val2 & 0xffff) << 32) | (((u64)val3 & 0xffff) << 48);
506
507 /* Rx stats */
508 RMON_UPDATE(mac, RxOctetsReceivedOK, RxOctetsOK);
509 RMON_UPDATE(mac, RxUnicastFramesReceivedOK, RxUnicastFramesOK);
510 RMON_UPDATE(mac, RxMulticastFramesReceivedOK, RxMulticastFramesOK);
511 RMON_UPDATE(mac, RxBroadcastFramesReceivedOK, RxBroadcastFramesOK);
512 RMON_UPDATE(mac, RxPAUSEMACCtrlFramesReceived, RxPauseFrames);
513 RMON_UPDATE(mac, RxFrameCheckSequenceErrors, RxFCSErrors);
514 RMON_UPDATE(mac, RxFramesLostDueToInternalMACErrors,
515 RxInternalMACRcvError);
516 RMON_UPDATE(mac, RxSymbolErrors, RxSymbolErrors);
517 RMON_UPDATE(mac, RxInRangeLengthErrors, RxInRangeLengthErrors);
518 RMON_UPDATE(mac, RxFramesTooLongErrors , RxFrameTooLongErrors);
519 RMON_UPDATE(mac, RxJabbers, RxJabberErrors);
520 RMON_UPDATE(mac, RxFragments, RxRuntErrors);
521 RMON_UPDATE(mac, RxUndersizedFrames, RxRuntErrors);
522
523 /* Tx stats */
524 RMON_UPDATE(mac, TxOctetsTransmittedOK, TxOctetsOK);
525 RMON_UPDATE(mac, TxFramesLostDueToInternalMACTransmissionError,
526 TxInternalMACXmitError);
527 RMON_UPDATE(mac, TxTransmitSystemError, TxFCSErrors);
528 RMON_UPDATE(mac, TxUnicastFramesTransmittedOK, TxUnicastFramesOK);
529 RMON_UPDATE(mac, TxMulticastFramesTransmittedOK, TxMulticastFramesOK);
530 RMON_UPDATE(mac, TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK);
531 RMON_UPDATE(mac, TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames);
532
533 return &mac->stats;
534}
535
536static int pm3393_macaddress_get(struct cmac *cmac, u8 mac_addr[6])
537{
538 memcpy(mac_addr, cmac->instance->mac_addr, 6);
539 return 0;
540}
541
542static int pm3393_macaddress_set(struct cmac *cmac, u8 ma[6])
543{
544 u32 val, lo, mid, hi, enabled = cmac->instance->enabled;
545
546 /*
547 * MAC addr: 00:07:43:00:13:09
548 *
549 * ma[5] = 0x09
550 * ma[4] = 0x13
551 * ma[3] = 0x00
552 * ma[2] = 0x43
553 * ma[1] = 0x07
554 * ma[0] = 0x00
555 *
556 * The PM3393 requires byte swapping and reverse order entry
557 * when programming MAC addresses:
558 *
559 * low_bits[15:0] = ma[1]:ma[0]
560 * mid_bits[31:16] = ma[3]:ma[2]
561 * high_bits[47:32] = ma[5]:ma[4]
562 */
563
564 /* Store local copy */
565 memcpy(cmac->instance->mac_addr, ma, 6);
566
567 lo = ((u32) ma[1] << 8) | (u32) ma[0];
568 mid = ((u32) ma[3] << 8) | (u32) ma[2];
569 hi = ((u32) ma[5] << 8) | (u32) ma[4];
570
571 /* Disable Rx/Tx MAC before configuring it. */
572 if (enabled)
573 pm3393_disable(cmac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
574
575 /* Set RXXG Station Address */
576 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_15_0, lo);
577 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_31_16, mid);
578 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_47_32, hi);
579
580 /* Set TXXG Station Address */
581 pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_15_0, lo);
582 pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_31_16, mid);
583 pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_47_32, hi);
584
585 /* Setup Exact Match Filter 1 with our MAC address
586 *
587 * Must disable exact match filter before configuring it.
588 */
589 pmread(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, &val);
590 val &= 0xff0f;
591 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, val);
592
593 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_LOW, lo);
594 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_MID, mid);
595 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_HIGH, hi);
596
597 val |= 0x0090;
598 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, val);
599
600 if (enabled)
601 pm3393_enable(cmac, enabled);
602 return 0;
603}
604
605static void pm3393_destroy(struct cmac *cmac)
606{
607 kfree(cmac);
608}
609
610static struct cmac_ops pm3393_ops = {
611 .destroy = pm3393_destroy,
612 .reset = pm3393_reset,
613 .interrupt_enable = pm3393_interrupt_enable,
614 .interrupt_disable = pm3393_interrupt_disable,
615 .interrupt_clear = pm3393_interrupt_clear,
616 .interrupt_handler = pm3393_interrupt_handler,
617 .enable = pm3393_enable_port,
618 .disable = pm3393_disable,
619 .loopback_enable = pm3393_loopback_enable,
620 .loopback_disable = pm3393_loopback_disable,
621 .set_mtu = pm3393_set_mtu,
622 .set_rx_mode = pm3393_set_rx_mode,
623 .get_speed_duplex_fc = pm3393_get_speed_duplex_fc,
624 .set_speed_duplex_fc = pm3393_set_speed_duplex_fc,
625 .statistics_update = pm3393_update_statistics,
626 .macaddress_get = pm3393_macaddress_get,
627 .macaddress_set = pm3393_macaddress_set
628};
629
630static struct cmac *pm3393_mac_create(adapter_t *adapter, int index)
631{
632 struct cmac *cmac;
633
634 cmac = kmalloc(sizeof(*cmac) + sizeof(cmac_instance), GFP_KERNEL);
635 if (!cmac)
636 return NULL;
637 memset(cmac, 0, sizeof(*cmac));
638
639 cmac->ops = &pm3393_ops;
640 cmac->instance = (cmac_instance *) (cmac + 1);
641 cmac->adapter = adapter;
642 cmac->instance->fc = PAUSE_TX | PAUSE_RX;
643
644 t1_tpi_write(adapter, OFFSET(0x0001), 0x00008000);
645 t1_tpi_write(adapter, OFFSET(0x0001), 0x00000000);
646 t1_tpi_write(adapter, OFFSET(0x2308), 0x00009800);
647 t1_tpi_write(adapter, OFFSET(0x2305), 0x00001001); /* PL4IO Enable */
648 t1_tpi_write(adapter, OFFSET(0x2320), 0x00008800);
649 t1_tpi_write(adapter, OFFSET(0x2321), 0x00008800);
650 t1_tpi_write(adapter, OFFSET(0x2322), 0x00008800);
651 t1_tpi_write(adapter, OFFSET(0x2323), 0x00008800);
652 t1_tpi_write(adapter, OFFSET(0x2324), 0x00008800);
653 t1_tpi_write(adapter, OFFSET(0x2325), 0x00008800);
654 t1_tpi_write(adapter, OFFSET(0x2326), 0x00008800);
655 t1_tpi_write(adapter, OFFSET(0x2327), 0x00008800);
656 t1_tpi_write(adapter, OFFSET(0x2328), 0x00008800);
657 t1_tpi_write(adapter, OFFSET(0x2329), 0x00008800);
658 t1_tpi_write(adapter, OFFSET(0x232a), 0x00008800);
659 t1_tpi_write(adapter, OFFSET(0x232b), 0x00008800);
660 t1_tpi_write(adapter, OFFSET(0x232c), 0x00008800);
661 t1_tpi_write(adapter, OFFSET(0x232d), 0x00008800);
662 t1_tpi_write(adapter, OFFSET(0x232e), 0x00008800);
663 t1_tpi_write(adapter, OFFSET(0x232f), 0x00008800);
664 t1_tpi_write(adapter, OFFSET(0x230d), 0x00009c00);
665 t1_tpi_write(adapter, OFFSET(0x2304), 0x00000202); /* PL4IO Calendar Repetitions */
666
667 t1_tpi_write(adapter, OFFSET(0x3200), 0x00008080); /* EFLX Enable */
668 t1_tpi_write(adapter, OFFSET(0x3210), 0x00000000); /* EFLX Channel Deprovision */
669 t1_tpi_write(adapter, OFFSET(0x3203), 0x00000000); /* EFLX Low Limit */
670 t1_tpi_write(adapter, OFFSET(0x3204), 0x00000040); /* EFLX High Limit */
671 t1_tpi_write(adapter, OFFSET(0x3205), 0x000002cc); /* EFLX Almost Full */
672 t1_tpi_write(adapter, OFFSET(0x3206), 0x00000199); /* EFLX Almost Empty */
673 t1_tpi_write(adapter, OFFSET(0x3207), 0x00000240); /* EFLX Cut Through Threshold */
674 t1_tpi_write(adapter, OFFSET(0x3202), 0x00000000); /* EFLX Indirect Register Update */
675 t1_tpi_write(adapter, OFFSET(0x3210), 0x00000001); /* EFLX Channel Provision */
676 t1_tpi_write(adapter, OFFSET(0x3208), 0x0000ffff); /* EFLX Undocumented */
677 t1_tpi_write(adapter, OFFSET(0x320a), 0x0000ffff); /* EFLX Undocumented */
678 t1_tpi_write(adapter, OFFSET(0x320c), 0x0000ffff); /* EFLX enable overflow interrupt The other bit are undocumented */
679 t1_tpi_write(adapter, OFFSET(0x320e), 0x0000ffff); /* EFLX Undocumented */
680
681 t1_tpi_write(adapter, OFFSET(0x2200), 0x0000c000); /* IFLX Configuration - enable */
682 t1_tpi_write(adapter, OFFSET(0x2201), 0x00000000); /* IFLX Channel Deprovision */
683 t1_tpi_write(adapter, OFFSET(0x220e), 0x00000000); /* IFLX Low Limit */
684 t1_tpi_write(adapter, OFFSET(0x220f), 0x00000100); /* IFLX High Limit */
685 t1_tpi_write(adapter, OFFSET(0x2210), 0x00000c00); /* IFLX Almost Full Limit */
686 t1_tpi_write(adapter, OFFSET(0x2211), 0x00000599); /* IFLX Almost Empty Limit */
687 t1_tpi_write(adapter, OFFSET(0x220d), 0x00000000); /* IFLX Indirect Register Update */
688 t1_tpi_write(adapter, OFFSET(0x2201), 0x00000001); /* IFLX Channel Provision */
689 t1_tpi_write(adapter, OFFSET(0x2203), 0x0000ffff); /* IFLX Undocumented */
690 t1_tpi_write(adapter, OFFSET(0x2205), 0x0000ffff); /* IFLX Undocumented */
691 t1_tpi_write(adapter, OFFSET(0x2209), 0x0000ffff); /* IFLX Enable overflow interrupt. The other bit are undocumented */
692
693 t1_tpi_write(adapter, OFFSET(0x2241), 0xfffffffe); /* PL4MOS Undocumented */
694 t1_tpi_write(adapter, OFFSET(0x2242), 0x0000ffff); /* PL4MOS Undocumented */
695 t1_tpi_write(adapter, OFFSET(0x2243), 0x00000008); /* PL4MOS Starving Burst Size */
696 t1_tpi_write(adapter, OFFSET(0x2244), 0x00000008); /* PL4MOS Hungry Burst Size */
697 t1_tpi_write(adapter, OFFSET(0x2245), 0x00000008); /* PL4MOS Transfer Size */
698 t1_tpi_write(adapter, OFFSET(0x2240), 0x00000005); /* PL4MOS Disable */
699
700 t1_tpi_write(adapter, OFFSET(0x2280), 0x00002103); /* PL4ODP Training Repeat and SOP rule */
701 t1_tpi_write(adapter, OFFSET(0x2284), 0x00000000); /* PL4ODP MAX_T setting */
702
703 t1_tpi_write(adapter, OFFSET(0x3280), 0x00000087); /* PL4IDU Enable data forward, port state machine. Set ALLOW_NON_ZERO_OLB */
704 t1_tpi_write(adapter, OFFSET(0x3282), 0x0000001f); /* PL4IDU Enable Dip4 check error interrupts */
705
706 t1_tpi_write(adapter, OFFSET(0x3040), 0x0c32); /* # TXXG Config */
707 /* For T1 use timer based Mac flow control. */
708 t1_tpi_write(adapter, OFFSET(0x304d), 0x8000);
709 t1_tpi_write(adapter, OFFSET(0x2040), 0x059c); /* # RXXG Config */
710 t1_tpi_write(adapter, OFFSET(0x2049), 0x0001); /* # RXXG Cut Through */
711 t1_tpi_write(adapter, OFFSET(0x2070), 0x0000); /* # Disable promiscuous mode */
712
713 /* Setup Exact Match Filter 0 to allow broadcast packets.
714 */
715 t1_tpi_write(adapter, OFFSET(0x206e), 0x0000); /* # Disable Match Enable bit */
716 t1_tpi_write(adapter, OFFSET(0x204a), 0xffff); /* # low addr */
717 t1_tpi_write(adapter, OFFSET(0x204b), 0xffff); /* # mid addr */
718 t1_tpi_write(adapter, OFFSET(0x204c), 0xffff); /* # high addr */
719 t1_tpi_write(adapter, OFFSET(0x206e), 0x0009); /* # Enable Match Enable bit */
720
721 t1_tpi_write(adapter, OFFSET(0x0003), 0x0000); /* # NO SOP/ PAD_EN setup */
722 t1_tpi_write(adapter, OFFSET(0x0100), 0x0ff0); /* # RXEQB disabled */
723 t1_tpi_write(adapter, OFFSET(0x0101), 0x0f0f); /* # No Preemphasis */
724
725 return cmac;
726}
727
728static int pm3393_mac_reset(adapter_t * adapter)
729{
730 u32 val;
731 u32 x;
732 u32 is_pl4_reset_finished;
733 u32 is_pl4_outof_lock;
734 u32 is_xaui_mabc_pll_locked;
735 u32 successful_reset;
736 int i;
737
738 /* The following steps are required to properly reset
739 * the PM3393. This information is provided in the
740 * PM3393 datasheet (Issue 2: November 2002)
741 * section 13.1 -- Device Reset.
742 *
743 * The PM3393 has three types of components that are
744 * individually reset:
745 *
746 * DRESETB - Digital circuitry
747 * PL4_ARESETB - PL4 analog circuitry
748 * XAUI_ARESETB - XAUI bus analog circuitry
749 *
750 * Steps to reset PM3393 using RSTB pin:
751 *
752 * 1. Assert RSTB pin low ( write 0 )
753 * 2. Wait at least 1ms to initiate a complete initialization of device.
754 * 3. Wait until all external clocks and REFSEL are stable.
755 * 4. Wait minimum of 1ms. (after external clocks and REFEL are stable)
756 * 5. De-assert RSTB ( write 1 )
757 * 6. Wait until internal timers to expires after ~14ms.
758 * - Allows analog clock synthesizer(PL4CSU) to stabilize to
759 * selected reference frequency before allowing the digital
760 * portion of the device to operate.
761 * 7. Wait at least 200us for XAUI interface to stabilize.
762 * 8. Verify the PM3393 came out of reset successfully.
763 * Set successful reset flag if everything worked else try again
764 * a few more times.
765 */
766
767 successful_reset = 0;
768 for (i = 0; i < 3 && !successful_reset; i++) {
769 /* 1 */
770 t1_tpi_read(adapter, A_ELMER0_GPO, &val);
771 val &= ~1;
772 t1_tpi_write(adapter, A_ELMER0_GPO, val);
773
774 /* 2 */
775 msleep(1);
776
777 /* 3 */
778 msleep(1);
779
780 /* 4 */
781 msleep(2 /*1 extra ms for safety */ );
782
783 /* 5 */
784 val |= 1;
785 t1_tpi_write(adapter, A_ELMER0_GPO, val);
786
787 /* 6 */
788 msleep(15 /*1 extra ms for safety */ );
789
790 /* 7 */
791 msleep(1);
792
793 /* 8 */
794
795 /* Has PL4 analog block come out of reset correctly? */
796 t1_tpi_read(adapter, OFFSET(SUNI1x10GEXP_REG_DEVICE_STATUS), &val);
797 is_pl4_reset_finished = (val & SUNI1x10GEXP_BITMSK_TOP_EXPIRED);
798
799 /* TBD XXX SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL gets locked later in the init sequence
800 * figure out why? */
801
802 /* Have all PL4 block clocks locked? */
803 x = (SUNI1x10GEXP_BITMSK_TOP_PL4_ID_DOOL
804 /*| SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL */ |
805 SUNI1x10GEXP_BITMSK_TOP_PL4_ID_ROOL |
806 SUNI1x10GEXP_BITMSK_TOP_PL4_IS_ROOL |
807 SUNI1x10GEXP_BITMSK_TOP_PL4_OUT_ROOL);
808 is_pl4_outof_lock = (val & x);
809
810 /* ??? If this fails, might be able to software reset the XAUI part
811 * and try to recover... thus saving us from doing another HW reset */
812 /* Has the XAUI MABC PLL circuitry stablized? */
813 is_xaui_mabc_pll_locked =
814 (val & SUNI1x10GEXP_BITMSK_TOP_SXRA_EXPIRED);
815
816 successful_reset = (is_pl4_reset_finished && !is_pl4_outof_lock
817 && is_xaui_mabc_pll_locked);
818 }
819 return successful_reset ? 0 : 1;
820}
821
822struct gmac t1_pm3393_ops = {
823 STATS_TICK_SECS,
824 pm3393_mac_create,
825 pm3393_mac_reset
826};
diff --git a/drivers/net/chelsio/regs.h b/drivers/net/chelsio/regs.h
new file mode 100644
index 000000000000..b90e11f40d1f
--- /dev/null
+++ b/drivers/net/chelsio/regs.h
@@ -0,0 +1,468 @@
1/*****************************************************************************
2 * *
3 * File: regs.h *
4 * $Revision: 1.8 $ *
5 * $Date: 2005/06/21 18:29:48 $ *
6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#ifndef _CXGB_REGS_H_
40#define _CXGB_REGS_H_
41
42/* SGE registers */
43#define A_SG_CONTROL 0x0
44
45#define S_CMDQ0_ENABLE 0
46#define V_CMDQ0_ENABLE(x) ((x) << S_CMDQ0_ENABLE)
47#define F_CMDQ0_ENABLE V_CMDQ0_ENABLE(1U)
48
49#define S_CMDQ1_ENABLE 1
50#define V_CMDQ1_ENABLE(x) ((x) << S_CMDQ1_ENABLE)
51#define F_CMDQ1_ENABLE V_CMDQ1_ENABLE(1U)
52
53#define S_FL0_ENABLE 2
54#define V_FL0_ENABLE(x) ((x) << S_FL0_ENABLE)
55#define F_FL0_ENABLE V_FL0_ENABLE(1U)
56
57#define S_FL1_ENABLE 3
58#define V_FL1_ENABLE(x) ((x) << S_FL1_ENABLE)
59#define F_FL1_ENABLE V_FL1_ENABLE(1U)
60
61#define S_CPL_ENABLE 4
62#define V_CPL_ENABLE(x) ((x) << S_CPL_ENABLE)
63#define F_CPL_ENABLE V_CPL_ENABLE(1U)
64
65#define S_RESPONSE_QUEUE_ENABLE 5
66#define V_RESPONSE_QUEUE_ENABLE(x) ((x) << S_RESPONSE_QUEUE_ENABLE)
67#define F_RESPONSE_QUEUE_ENABLE V_RESPONSE_QUEUE_ENABLE(1U)
68
69#define S_CMDQ_PRIORITY 6
70#define M_CMDQ_PRIORITY 0x3
71#define V_CMDQ_PRIORITY(x) ((x) << S_CMDQ_PRIORITY)
72#define G_CMDQ_PRIORITY(x) (((x) >> S_CMDQ_PRIORITY) & M_CMDQ_PRIORITY)
73
74#define S_DISABLE_CMDQ1_GTS 9
75#define V_DISABLE_CMDQ1_GTS(x) ((x) << S_DISABLE_CMDQ1_GTS)
76#define F_DISABLE_CMDQ1_GTS V_DISABLE_CMDQ1_GTS(1U)
77
78#define S_DISABLE_FL0_GTS 10
79#define V_DISABLE_FL0_GTS(x) ((x) << S_DISABLE_FL0_GTS)
80#define F_DISABLE_FL0_GTS V_DISABLE_FL0_GTS(1U)
81
82#define S_DISABLE_FL1_GTS 11
83#define V_DISABLE_FL1_GTS(x) ((x) << S_DISABLE_FL1_GTS)
84#define F_DISABLE_FL1_GTS V_DISABLE_FL1_GTS(1U)
85
86#define S_ENABLE_BIG_ENDIAN 12
87#define V_ENABLE_BIG_ENDIAN(x) ((x) << S_ENABLE_BIG_ENDIAN)
88#define F_ENABLE_BIG_ENDIAN V_ENABLE_BIG_ENDIAN(1U)
89
90#define S_ISCSI_COALESCE 14
91#define V_ISCSI_COALESCE(x) ((x) << S_ISCSI_COALESCE)
92#define F_ISCSI_COALESCE V_ISCSI_COALESCE(1U)
93
94#define S_RX_PKT_OFFSET 15
95#define V_RX_PKT_OFFSET(x) ((x) << S_RX_PKT_OFFSET)
96
97#define S_VLAN_XTRACT 18
98#define V_VLAN_XTRACT(x) ((x) << S_VLAN_XTRACT)
99#define F_VLAN_XTRACT V_VLAN_XTRACT(1U)
100
101#define A_SG_DOORBELL 0x4
102#define A_SG_CMD0BASELWR 0x8
103#define A_SG_CMD0BASEUPR 0xc
104#define A_SG_CMD1BASELWR 0x10
105#define A_SG_CMD1BASEUPR 0x14
106#define A_SG_FL0BASELWR 0x18
107#define A_SG_FL0BASEUPR 0x1c
108#define A_SG_FL1BASELWR 0x20
109#define A_SG_FL1BASEUPR 0x24
110#define A_SG_CMD0SIZE 0x28
111#define A_SG_FL0SIZE 0x2c
112#define A_SG_RSPSIZE 0x30
113#define A_SG_RSPBASELWR 0x34
114#define A_SG_RSPBASEUPR 0x38
115#define A_SG_FLTHRESHOLD 0x3c
116#define A_SG_RSPQUEUECREDIT 0x40
117#define A_SG_SLEEPING 0x48
118#define A_SG_INTRTIMER 0x4c
119#define A_SG_CMD1SIZE 0xb0
120#define A_SG_FL1SIZE 0xb4
121#define A_SG_INT_ENABLE 0xb8
122
123#define S_RESPQ_EXHAUSTED 0
124#define V_RESPQ_EXHAUSTED(x) ((x) << S_RESPQ_EXHAUSTED)
125#define F_RESPQ_EXHAUSTED V_RESPQ_EXHAUSTED(1U)
126
127#define S_RESPQ_OVERFLOW 1
128#define V_RESPQ_OVERFLOW(x) ((x) << S_RESPQ_OVERFLOW)
129#define F_RESPQ_OVERFLOW V_RESPQ_OVERFLOW(1U)
130
131#define S_FL_EXHAUSTED 2
132#define V_FL_EXHAUSTED(x) ((x) << S_FL_EXHAUSTED)
133#define F_FL_EXHAUSTED V_FL_EXHAUSTED(1U)
134
135#define S_PACKET_TOO_BIG 3
136#define V_PACKET_TOO_BIG(x) ((x) << S_PACKET_TOO_BIG)
137#define F_PACKET_TOO_BIG V_PACKET_TOO_BIG(1U)
138
139#define S_PACKET_MISMATCH 4
140#define V_PACKET_MISMATCH(x) ((x) << S_PACKET_MISMATCH)
141#define F_PACKET_MISMATCH V_PACKET_MISMATCH(1U)
142
143#define A_SG_INT_CAUSE 0xbc
144#define A_SG_RESPACCUTIMER 0xc0
145
146/* MC3 registers */
147
148#define S_READY 1
149#define V_READY(x) ((x) << S_READY)
150#define F_READY V_READY(1U)
151
152/* MC4 registers */
153
154#define A_MC4_CFG 0x180
155#define S_MC4_SLOW 25
156#define V_MC4_SLOW(x) ((x) << S_MC4_SLOW)
157#define F_MC4_SLOW V_MC4_SLOW(1U)
158
159/* TPI registers */
160
161#define A_TPI_ADDR 0x280
162#define A_TPI_WR_DATA 0x284
163#define A_TPI_RD_DATA 0x288
164#define A_TPI_CSR 0x28c
165
166#define S_TPIWR 0
167#define V_TPIWR(x) ((x) << S_TPIWR)
168#define F_TPIWR V_TPIWR(1U)
169
170#define S_TPIRDY 1
171#define V_TPIRDY(x) ((x) << S_TPIRDY)
172#define F_TPIRDY V_TPIRDY(1U)
173
174#define A_TPI_PAR 0x29c
175
176#define S_TPIPAR 0
177#define M_TPIPAR 0x7f
178#define V_TPIPAR(x) ((x) << S_TPIPAR)
179#define G_TPIPAR(x) (((x) >> S_TPIPAR) & M_TPIPAR)
180
181/* TP registers */
182
183#define A_TP_IN_CONFIG 0x300
184
185#define S_TP_IN_CSPI_CPL 3
186#define V_TP_IN_CSPI_CPL(x) ((x) << S_TP_IN_CSPI_CPL)
187#define F_TP_IN_CSPI_CPL V_TP_IN_CSPI_CPL(1U)
188
189#define S_TP_IN_CSPI_CHECK_IP_CSUM 5
190#define V_TP_IN_CSPI_CHECK_IP_CSUM(x) ((x) << S_TP_IN_CSPI_CHECK_IP_CSUM)
191#define F_TP_IN_CSPI_CHECK_IP_CSUM V_TP_IN_CSPI_CHECK_IP_CSUM(1U)
192
193#define S_TP_IN_CSPI_CHECK_TCP_CSUM 6
194#define V_TP_IN_CSPI_CHECK_TCP_CSUM(x) ((x) << S_TP_IN_CSPI_CHECK_TCP_CSUM)
195#define F_TP_IN_CSPI_CHECK_TCP_CSUM V_TP_IN_CSPI_CHECK_TCP_CSUM(1U)
196
197#define S_TP_IN_ESPI_ETHERNET 8
198#define V_TP_IN_ESPI_ETHERNET(x) ((x) << S_TP_IN_ESPI_ETHERNET)
199#define F_TP_IN_ESPI_ETHERNET V_TP_IN_ESPI_ETHERNET(1U)
200
201#define S_TP_IN_ESPI_CHECK_IP_CSUM 12
202#define V_TP_IN_ESPI_CHECK_IP_CSUM(x) ((x) << S_TP_IN_ESPI_CHECK_IP_CSUM)
203#define F_TP_IN_ESPI_CHECK_IP_CSUM V_TP_IN_ESPI_CHECK_IP_CSUM(1U)
204
205#define S_TP_IN_ESPI_CHECK_TCP_CSUM 13
206#define V_TP_IN_ESPI_CHECK_TCP_CSUM(x) ((x) << S_TP_IN_ESPI_CHECK_TCP_CSUM)
207#define F_TP_IN_ESPI_CHECK_TCP_CSUM V_TP_IN_ESPI_CHECK_TCP_CSUM(1U)
208
209#define S_OFFLOAD_DISABLE 14
210#define V_OFFLOAD_DISABLE(x) ((x) << S_OFFLOAD_DISABLE)
211#define F_OFFLOAD_DISABLE V_OFFLOAD_DISABLE(1U)
212
213#define A_TP_OUT_CONFIG 0x304
214
215#define S_TP_OUT_CSPI_CPL 2
216#define V_TP_OUT_CSPI_CPL(x) ((x) << S_TP_OUT_CSPI_CPL)
217#define F_TP_OUT_CSPI_CPL V_TP_OUT_CSPI_CPL(1U)
218
219#define S_TP_OUT_ESPI_ETHERNET 6
220#define V_TP_OUT_ESPI_ETHERNET(x) ((x) << S_TP_OUT_ESPI_ETHERNET)
221#define F_TP_OUT_ESPI_ETHERNET V_TP_OUT_ESPI_ETHERNET(1U)
222
223#define S_TP_OUT_ESPI_GENERATE_IP_CSUM 10
224#define V_TP_OUT_ESPI_GENERATE_IP_CSUM(x) ((x) << S_TP_OUT_ESPI_GENERATE_IP_CSUM)
225#define F_TP_OUT_ESPI_GENERATE_IP_CSUM V_TP_OUT_ESPI_GENERATE_IP_CSUM(1U)
226
227#define S_TP_OUT_ESPI_GENERATE_TCP_CSUM 11
228#define V_TP_OUT_ESPI_GENERATE_TCP_CSUM(x) ((x) << S_TP_OUT_ESPI_GENERATE_TCP_CSUM)
229#define F_TP_OUT_ESPI_GENERATE_TCP_CSUM V_TP_OUT_ESPI_GENERATE_TCP_CSUM(1U)
230
231#define A_TP_GLOBAL_CONFIG 0x308
232
233#define S_IP_TTL 0
234#define M_IP_TTL 0xff
235#define V_IP_TTL(x) ((x) << S_IP_TTL)
236
237#define S_TCP_CSUM 11
238#define V_TCP_CSUM(x) ((x) << S_TCP_CSUM)
239#define F_TCP_CSUM V_TCP_CSUM(1U)
240
241#define S_UDP_CSUM 12
242#define V_UDP_CSUM(x) ((x) << S_UDP_CSUM)
243#define F_UDP_CSUM V_UDP_CSUM(1U)
244
245#define S_IP_CSUM 13
246#define V_IP_CSUM(x) ((x) << S_IP_CSUM)
247#define F_IP_CSUM V_IP_CSUM(1U)
248
249#define S_PATH_MTU 15
250#define V_PATH_MTU(x) ((x) << S_PATH_MTU)
251#define F_PATH_MTU V_PATH_MTU(1U)
252
253#define S_5TUPLE_LOOKUP 17
254#define V_5TUPLE_LOOKUP(x) ((x) << S_5TUPLE_LOOKUP)
255
256#define S_SYN_COOKIE_PARAMETER 26
257#define V_SYN_COOKIE_PARAMETER(x) ((x) << S_SYN_COOKIE_PARAMETER)
258
259#define A_TP_PC_CONFIG 0x348
260#define S_DIS_TX_FILL_WIN_PUSH 12
261#define V_DIS_TX_FILL_WIN_PUSH(x) ((x) << S_DIS_TX_FILL_WIN_PUSH)
262#define F_DIS_TX_FILL_WIN_PUSH V_DIS_TX_FILL_WIN_PUSH(1U)
263
264#define S_TP_PC_REV 30
265#define M_TP_PC_REV 0x3
266#define G_TP_PC_REV(x) (((x) >> S_TP_PC_REV) & M_TP_PC_REV)
267#define A_TP_RESET 0x44c
268#define S_TP_RESET 0
269#define V_TP_RESET(x) ((x) << S_TP_RESET)
270#define F_TP_RESET V_TP_RESET(1U)
271
272#define A_TP_INT_ENABLE 0x470
273#define A_TP_INT_CAUSE 0x474
274#define A_TP_TX_DROP_CONFIG 0x4b8
275
276#define S_ENABLE_TX_DROP 31
277#define V_ENABLE_TX_DROP(x) ((x) << S_ENABLE_TX_DROP)
278#define F_ENABLE_TX_DROP V_ENABLE_TX_DROP(1U)
279
280#define S_ENABLE_TX_ERROR 30
281#define V_ENABLE_TX_ERROR(x) ((x) << S_ENABLE_TX_ERROR)
282#define F_ENABLE_TX_ERROR V_ENABLE_TX_ERROR(1U)
283
284#define S_DROP_TICKS_CNT 4
285#define V_DROP_TICKS_CNT(x) ((x) << S_DROP_TICKS_CNT)
286
287#define S_NUM_PKTS_DROPPED 0
288#define V_NUM_PKTS_DROPPED(x) ((x) << S_NUM_PKTS_DROPPED)
289
290/* CSPI registers */
291
292#define S_DIP4ERR 0
293#define V_DIP4ERR(x) ((x) << S_DIP4ERR)
294#define F_DIP4ERR V_DIP4ERR(1U)
295
296#define S_RXDROP 1
297#define V_RXDROP(x) ((x) << S_RXDROP)
298#define F_RXDROP V_RXDROP(1U)
299
300#define S_TXDROP 2
301#define V_TXDROP(x) ((x) << S_TXDROP)
302#define F_TXDROP V_TXDROP(1U)
303
304#define S_RXOVERFLOW 3
305#define V_RXOVERFLOW(x) ((x) << S_RXOVERFLOW)
306#define F_RXOVERFLOW V_RXOVERFLOW(1U)
307
308#define S_RAMPARITYERR 4
309#define V_RAMPARITYERR(x) ((x) << S_RAMPARITYERR)
310#define F_RAMPARITYERR V_RAMPARITYERR(1U)
311
312/* ESPI registers */
313
314#define A_ESPI_SCH_TOKEN0 0x880
315#define A_ESPI_SCH_TOKEN1 0x884
316#define A_ESPI_SCH_TOKEN2 0x888
317#define A_ESPI_SCH_TOKEN3 0x88c
318#define A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK 0x890
319#define A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK 0x894
320#define A_ESPI_CALENDAR_LENGTH 0x898
321#define A_PORT_CONFIG 0x89c
322
323#define S_RX_NPORTS 0
324#define V_RX_NPORTS(x) ((x) << S_RX_NPORTS)
325
326#define S_TX_NPORTS 8
327#define V_TX_NPORTS(x) ((x) << S_TX_NPORTS)
328
329#define A_ESPI_FIFO_STATUS_ENABLE 0x8a0
330
331#define S_RXSTATUSENABLE 0
332#define V_RXSTATUSENABLE(x) ((x) << S_RXSTATUSENABLE)
333#define F_RXSTATUSENABLE V_RXSTATUSENABLE(1U)
334
335#define S_INTEL1010MODE 4
336#define V_INTEL1010MODE(x) ((x) << S_INTEL1010MODE)
337#define F_INTEL1010MODE V_INTEL1010MODE(1U)
338
339#define A_ESPI_MAXBURST1_MAXBURST2 0x8a8
340#define A_ESPI_TRAIN 0x8ac
341#define A_ESPI_INTR_STATUS 0x8c8
342
343#define S_DIP2PARITYERR 5
344#define V_DIP2PARITYERR(x) ((x) << S_DIP2PARITYERR)
345#define F_DIP2PARITYERR V_DIP2PARITYERR(1U)
346
347#define A_ESPI_INTR_ENABLE 0x8cc
348#define A_RX_DROP_THRESHOLD 0x8d0
349#define A_ESPI_RX_RESET 0x8ec
350#define A_ESPI_MISC_CONTROL 0x8f0
351
352#define S_OUT_OF_SYNC_COUNT 0
353#define V_OUT_OF_SYNC_COUNT(x) ((x) << S_OUT_OF_SYNC_COUNT)
354
355#define S_DIP2_PARITY_ERR_THRES 5
356#define V_DIP2_PARITY_ERR_THRES(x) ((x) << S_DIP2_PARITY_ERR_THRES)
357
358#define S_DIP4_THRES 9
359#define V_DIP4_THRES(x) ((x) << S_DIP4_THRES)
360
361#define S_MONITORED_PORT_NUM 25
362#define V_MONITORED_PORT_NUM(x) ((x) << S_MONITORED_PORT_NUM)
363
364#define S_MONITORED_DIRECTION 27
365#define V_MONITORED_DIRECTION(x) ((x) << S_MONITORED_DIRECTION)
366#define F_MONITORED_DIRECTION V_MONITORED_DIRECTION(1U)
367
368#define S_MONITORED_INTERFACE 28
369#define V_MONITORED_INTERFACE(x) ((x) << S_MONITORED_INTERFACE)
370#define F_MONITORED_INTERFACE V_MONITORED_INTERFACE(1U)
371
372#define A_ESPI_DIP2_ERR_COUNT 0x8f4
373#define A_ESPI_CMD_ADDR 0x8f8
374
375#define S_WRITE_DATA 0
376#define V_WRITE_DATA(x) ((x) << S_WRITE_DATA)
377
378#define S_REGISTER_OFFSET 8
379#define V_REGISTER_OFFSET(x) ((x) << S_REGISTER_OFFSET)
380
381#define S_CHANNEL_ADDR 12
382#define V_CHANNEL_ADDR(x) ((x) << S_CHANNEL_ADDR)
383
384#define S_MODULE_ADDR 16
385#define V_MODULE_ADDR(x) ((x) << S_MODULE_ADDR)
386
387#define S_BUNDLE_ADDR 20
388#define V_BUNDLE_ADDR(x) ((x) << S_BUNDLE_ADDR)
389
390#define S_SPI4_COMMAND 24
391#define V_SPI4_COMMAND(x) ((x) << S_SPI4_COMMAND)
392
393#define A_ESPI_GOSTAT 0x8fc
394#define S_ESPI_CMD_BUSY 8
395#define V_ESPI_CMD_BUSY(x) ((x) << S_ESPI_CMD_BUSY)
396#define F_ESPI_CMD_BUSY V_ESPI_CMD_BUSY(1U)
397
398/* PL registers */
399
400#define A_PL_ENABLE 0xa00
401
402#define S_PL_INTR_SGE_ERR 0
403#define V_PL_INTR_SGE_ERR(x) ((x) << S_PL_INTR_SGE_ERR)
404#define F_PL_INTR_SGE_ERR V_PL_INTR_SGE_ERR(1U)
405
406#define S_PL_INTR_SGE_DATA 1
407#define V_PL_INTR_SGE_DATA(x) ((x) << S_PL_INTR_SGE_DATA)
408#define F_PL_INTR_SGE_DATA V_PL_INTR_SGE_DATA(1U)
409
410#define S_PL_INTR_TP 6
411#define V_PL_INTR_TP(x) ((x) << S_PL_INTR_TP)
412#define F_PL_INTR_TP V_PL_INTR_TP(1U)
413
414#define S_PL_INTR_ESPI 8
415#define V_PL_INTR_ESPI(x) ((x) << S_PL_INTR_ESPI)
416#define F_PL_INTR_ESPI V_PL_INTR_ESPI(1U)
417
418#define S_PL_INTR_PCIX 10
419#define V_PL_INTR_PCIX(x) ((x) << S_PL_INTR_PCIX)
420#define F_PL_INTR_PCIX V_PL_INTR_PCIX(1U)
421
422#define S_PL_INTR_EXT 11
423#define V_PL_INTR_EXT(x) ((x) << S_PL_INTR_EXT)
424#define F_PL_INTR_EXT V_PL_INTR_EXT(1U)
425
426#define A_PL_CAUSE 0xa04
427
428/* MC5 registers */
429
430#define A_MC5_CONFIG 0xc04
431
432#define S_TCAM_RESET 1
433#define V_TCAM_RESET(x) ((x) << S_TCAM_RESET)
434#define F_TCAM_RESET V_TCAM_RESET(1U)
435
436#define S_M_BUS_ENABLE 5
437#define V_M_BUS_ENABLE(x) ((x) << S_M_BUS_ENABLE)
438#define F_M_BUS_ENABLE V_M_BUS_ENABLE(1U)
439
440/* PCICFG registers */
441
442#define A_PCICFG_PM_CSR 0x44
443#define A_PCICFG_VPD_ADDR 0x4a
444
445#define S_VPD_OP_FLAG 15
446#define V_VPD_OP_FLAG(x) ((x) << S_VPD_OP_FLAG)
447#define F_VPD_OP_FLAG V_VPD_OP_FLAG(1U)
448
449#define A_PCICFG_VPD_DATA 0x4c
450
451#define A_PCICFG_INTR_ENABLE 0xf4
452#define A_PCICFG_INTR_CAUSE 0xf8
453
454#define A_PCICFG_MODE 0xfc
455
456#define S_PCI_MODE_64BIT 0
457#define V_PCI_MODE_64BIT(x) ((x) << S_PCI_MODE_64BIT)
458#define F_PCI_MODE_64BIT V_PCI_MODE_64BIT(1U)
459
460#define S_PCI_MODE_PCIX 5
461#define V_PCI_MODE_PCIX(x) ((x) << S_PCI_MODE_PCIX)
462#define F_PCI_MODE_PCIX V_PCI_MODE_PCIX(1U)
463
464#define S_PCI_MODE_CLK 6
465#define M_PCI_MODE_CLK 0x3
466#define G_PCI_MODE_CLK(x) (((x) >> S_PCI_MODE_CLK) & M_PCI_MODE_CLK)
467
468#endif /* _CXGB_REGS_H_ */
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
new file mode 100644
index 000000000000..53b41d99b00b
--- /dev/null
+++ b/drivers/net/chelsio/sge.c
@@ -0,0 +1,1684 @@
1/*****************************************************************************
2 * *
3 * File: sge.c *
4 * $Revision: 1.26 $ *
5 * $Date: 2005/06/21 18:29:48 $ *
6 * Description: *
7 * DMA engine. *
8 * part of the Chelsio 10Gb Ethernet Driver. *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
13 * *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, write to the Free Software Foundation, Inc., *
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
17 * *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
21 * *
22 * http://www.chelsio.com *
23 * *
24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
25 * All rights reserved. *
26 * *
27 * Maintainers: maintainers@chelsio.com *
28 * *
29 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
30 * Tina Yang <tainay@chelsio.com> *
31 * Felix Marti <felix@chelsio.com> *
32 * Scott Bardone <sbardone@chelsio.com> *
33 * Kurt Ottaway <kottaway@chelsio.com> *
34 * Frank DiMambro <frank@chelsio.com> *
35 * *
36 * History: *
37 * *
38 ****************************************************************************/
39
40#include "common.h"
41
42#include <linux/config.h>
43#include <linux/types.h>
44#include <linux/errno.h>
45#include <linux/pci.h>
46#include <linux/netdevice.h>
47#include <linux/etherdevice.h>
48#include <linux/if_vlan.h>
49#include <linux/skbuff.h>
50#include <linux/init.h>
51#include <linux/mm.h>
52#include <linux/ip.h>
53#include <linux/in.h>
54#include <linux/if_arp.h>
55
56#include "cpl5_cmd.h"
57#include "sge.h"
58#include "regs.h"
59#include "espi.h"
60
61
62#ifdef NETIF_F_TSO
63#include <linux/tcp.h>
64#endif
65
66#define SGE_CMDQ_N 2
67#define SGE_FREELQ_N 2
68#define SGE_CMDQ0_E_N 1024
69#define SGE_CMDQ1_E_N 128
70#define SGE_FREEL_SIZE 4096
71#define SGE_JUMBO_FREEL_SIZE 512
72#define SGE_FREEL_REFILL_THRESH 16
73#define SGE_RESPQ_E_N 1024
74#define SGE_INTRTIMER_NRES 1000
75#define SGE_RX_COPY_THRES 256
76#define SGE_RX_SM_BUF_SIZE 1536
77
78# define SGE_RX_DROP_THRES 2
79
80#define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4)
81
82/*
83 * Period of the TX buffer reclaim timer. This timer does not need to run
84 * frequently as TX buffers are usually reclaimed by new TX packets.
85 */
86#define TX_RECLAIM_PERIOD (HZ / 4)
87
88#ifndef NET_IP_ALIGN
89# define NET_IP_ALIGN 2
90#endif
91
92#define M_CMD_LEN 0x7fffffff
93#define V_CMD_LEN(v) (v)
94#define G_CMD_LEN(v) ((v) & M_CMD_LEN)
95#define V_CMD_GEN1(v) ((v) << 31)
96#define V_CMD_GEN2(v) (v)
97#define F_CMD_DATAVALID (1 << 1)
98#define F_CMD_SOP (1 << 2)
99#define V_CMD_EOP(v) ((v) << 3)
100
101/*
102 * Command queue, receive buffer list, and response queue descriptors.
103 */
104#if defined(__BIG_ENDIAN_BITFIELD)
105struct cmdQ_e {
106 u32 addr_lo;
107 u32 len_gen;
108 u32 flags;
109 u32 addr_hi;
110};
111
112struct freelQ_e {
113 u32 addr_lo;
114 u32 len_gen;
115 u32 gen2;
116 u32 addr_hi;
117};
118
119struct respQ_e {
120 u32 Qsleeping : 4;
121 u32 Cmdq1CreditReturn : 5;
122 u32 Cmdq1DmaComplete : 5;
123 u32 Cmdq0CreditReturn : 5;
124 u32 Cmdq0DmaComplete : 5;
125 u32 FreelistQid : 2;
126 u32 CreditValid : 1;
127 u32 DataValid : 1;
128 u32 Offload : 1;
129 u32 Eop : 1;
130 u32 Sop : 1;
131 u32 GenerationBit : 1;
132 u32 BufferLength;
133};
134#elif defined(__LITTLE_ENDIAN_BITFIELD)
135struct cmdQ_e {
136 u32 len_gen;
137 u32 addr_lo;
138 u32 addr_hi;
139 u32 flags;
140};
141
142struct freelQ_e {
143 u32 len_gen;
144 u32 addr_lo;
145 u32 addr_hi;
146 u32 gen2;
147};
148
149struct respQ_e {
150 u32 BufferLength;
151 u32 GenerationBit : 1;
152 u32 Sop : 1;
153 u32 Eop : 1;
154 u32 Offload : 1;
155 u32 DataValid : 1;
156 u32 CreditValid : 1;
157 u32 FreelistQid : 2;
158 u32 Cmdq0DmaComplete : 5;
159 u32 Cmdq0CreditReturn : 5;
160 u32 Cmdq1DmaComplete : 5;
161 u32 Cmdq1CreditReturn : 5;
162 u32 Qsleeping : 4;
163} ;
164#endif
165
166/*
167 * SW Context Command and Freelist Queue Descriptors
168 */
169struct cmdQ_ce {
170 struct sk_buff *skb;
171 DECLARE_PCI_UNMAP_ADDR(dma_addr);
172 DECLARE_PCI_UNMAP_LEN(dma_len);
173};
174
175struct freelQ_ce {
176 struct sk_buff *skb;
177 DECLARE_PCI_UNMAP_ADDR(dma_addr);
178 DECLARE_PCI_UNMAP_LEN(dma_len);
179};
180
181/*
182 * SW command, freelist and response rings
183 */
184struct cmdQ {
185 unsigned long status; /* HW DMA fetch status */
186 unsigned int in_use; /* # of in-use command descriptors */
187 unsigned int size; /* # of descriptors */
188 unsigned int processed; /* total # of descs HW has processed */
189 unsigned int cleaned; /* total # of descs SW has reclaimed */
190 unsigned int stop_thres; /* SW TX queue suspend threshold */
191 u16 pidx; /* producer index (SW) */
192 u16 cidx; /* consumer index (HW) */
193 u8 genbit; /* current generation (=valid) bit */
194 u8 sop; /* is next entry start of packet? */
195 struct cmdQ_e *entries; /* HW command descriptor Q */
196 struct cmdQ_ce *centries; /* SW command context descriptor Q */
197 spinlock_t lock; /* Lock to protect cmdQ enqueuing */
198 dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */
199};
200
201struct freelQ {
202 unsigned int credits; /* # of available RX buffers */
203 unsigned int size; /* free list capacity */
204 u16 pidx; /* producer index (SW) */
205 u16 cidx; /* consumer index (HW) */
206 u16 rx_buffer_size; /* Buffer size on this free list */
207 u16 dma_offset; /* DMA offset to align IP headers */
208 u16 recycleq_idx; /* skb recycle q to use */
209 u8 genbit; /* current generation (=valid) bit */
210 struct freelQ_e *entries; /* HW freelist descriptor Q */
211 struct freelQ_ce *centries; /* SW freelist context descriptor Q */
212 dma_addr_t dma_addr; /* DMA addr HW freelist descriptor Q */
213};
214
215struct respQ {
216 unsigned int credits; /* credits to be returned to SGE */
217 unsigned int size; /* # of response Q descriptors */
218 u16 cidx; /* consumer index (SW) */
219 u8 genbit; /* current generation(=valid) bit */
220 struct respQ_e *entries; /* HW response descriptor Q */
221 dma_addr_t dma_addr; /* DMA addr HW response descriptor Q */
222};
223
224/* Bit flags for cmdQ.status */
225enum {
226 CMDQ_STAT_RUNNING = 1, /* fetch engine is running */
227 CMDQ_STAT_LAST_PKT_DB = 2 /* last packet rung the doorbell */
228};
229
230/*
231 * Main SGE data structure
232 *
233 * Interrupts are handled by a single CPU and it is likely that on a MP system
234 * the application is migrated to another CPU. In that scenario, we try to
235 * seperate the RX(in irq context) and TX state in order to decrease memory
236 * contention.
237 */
238struct sge {
239 struct adapter *adapter; /* adapter backpointer */
240 struct net_device *netdev; /* netdevice backpointer */
241 struct freelQ freelQ[SGE_FREELQ_N]; /* buffer free lists */
242 struct respQ respQ; /* response Q */
243 unsigned long stopped_tx_queues; /* bitmap of suspended Tx queues */
244 unsigned int rx_pkt_pad; /* RX padding for L2 packets */
245 unsigned int jumbo_fl; /* jumbo freelist Q index */
246 unsigned int intrtimer_nres; /* no-resource interrupt timer */
247 unsigned int fixed_intrtimer;/* non-adaptive interrupt timer */
248 struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
249 struct timer_list espibug_timer;
250 unsigned int espibug_timeout;
251 struct sk_buff *espibug_skb;
252 u32 sge_control; /* shadow value of sge control reg */
253 struct sge_intr_counts stats;
254 struct sge_port_stats port_stats[MAX_NPORTS];
255 struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp;
256};
257
258/*
259 * PIO to indicate that memory mapped Q contains valid descriptor(s).
260 */
261static inline void doorbell_pio(struct adapter *adapter, u32 val)
262{
263 wmb();
264 writel(val, adapter->regs + A_SG_DOORBELL);
265}
266
267/*
268 * Frees all RX buffers on the freelist Q. The caller must make sure that
269 * the SGE is turned off before calling this function.
270 */
271static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q)
272{
273 unsigned int cidx = q->cidx;
274
275 while (q->credits--) {
276 struct freelQ_ce *ce = &q->centries[cidx];
277
278 pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
279 pci_unmap_len(ce, dma_len),
280 PCI_DMA_FROMDEVICE);
281 dev_kfree_skb(ce->skb);
282 ce->skb = NULL;
283 if (++cidx == q->size)
284 cidx = 0;
285 }
286}
287
288/*
289 * Free RX free list and response queue resources.
290 */
291static void free_rx_resources(struct sge *sge)
292{
293 struct pci_dev *pdev = sge->adapter->pdev;
294 unsigned int size, i;
295
296 if (sge->respQ.entries) {
297 size = sizeof(struct respQ_e) * sge->respQ.size;
298 pci_free_consistent(pdev, size, sge->respQ.entries,
299 sge->respQ.dma_addr);
300 }
301
302 for (i = 0; i < SGE_FREELQ_N; i++) {
303 struct freelQ *q = &sge->freelQ[i];
304
305 if (q->centries) {
306 free_freelQ_buffers(pdev, q);
307 kfree(q->centries);
308 }
309 if (q->entries) {
310 size = sizeof(struct freelQ_e) * q->size;
311 pci_free_consistent(pdev, size, q->entries,
312 q->dma_addr);
313 }
314 }
315}
316
317/*
318 * Allocates basic RX resources, consisting of memory mapped freelist Qs and a
319 * response queue.
320 */
321static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
322{
323 struct pci_dev *pdev = sge->adapter->pdev;
324 unsigned int size, i;
325
326 for (i = 0; i < SGE_FREELQ_N; i++) {
327 struct freelQ *q = &sge->freelQ[i];
328
329 q->genbit = 1;
330 q->size = p->freelQ_size[i];
331 q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN;
332 size = sizeof(struct freelQ_e) * q->size;
333 q->entries = (struct freelQ_e *)
334 pci_alloc_consistent(pdev, size, &q->dma_addr);
335 if (!q->entries)
336 goto err_no_mem;
337 memset(q->entries, 0, size);
338 size = sizeof(struct freelQ_ce) * q->size;
339 q->centries = kmalloc(size, GFP_KERNEL);
340 if (!q->centries)
341 goto err_no_mem;
342 memset(q->centries, 0, size);
343 }
344
345 /*
346 * Calculate the buffer sizes for the two free lists. FL0 accommodates
347 * regular sized Ethernet frames, FL1 is sized not to exceed 16K,
348 * including all the sk_buff overhead.
349 *
350 * Note: For T2 FL0 and FL1 are reversed.
351 */
352 sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE +
353 sizeof(struct cpl_rx_data) +
354 sge->freelQ[!sge->jumbo_fl].dma_offset;
355 sge->freelQ[sge->jumbo_fl].rx_buffer_size = (16 * 1024) -
356 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
357
358 /*
359 * Setup which skb recycle Q should be used when recycling buffers from
360 * each free list.
361 */
362 sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0;
363 sge->freelQ[sge->jumbo_fl].recycleq_idx = 1;
364
365 sge->respQ.genbit = 1;
366 sge->respQ.size = SGE_RESPQ_E_N;
367 sge->respQ.credits = 0;
368 size = sizeof(struct respQ_e) * sge->respQ.size;
369 sge->respQ.entries = (struct respQ_e *)
370 pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr);
371 if (!sge->respQ.entries)
372 goto err_no_mem;
373 memset(sge->respQ.entries, 0, size);
374 return 0;
375
376err_no_mem:
377 free_rx_resources(sge);
378 return -ENOMEM;
379}
380
381/*
382 * Reclaims n TX descriptors and frees the buffers associated with them.
383 */
384static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
385{
386 struct cmdQ_ce *ce;
387 struct pci_dev *pdev = sge->adapter->pdev;
388 unsigned int cidx = q->cidx;
389
390 q->in_use -= n;
391 ce = &q->centries[cidx];
392 while (n--) {
393 if (q->sop)
394 pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
395 pci_unmap_len(ce, dma_len),
396 PCI_DMA_TODEVICE);
397 else
398 pci_unmap_page(pdev, pci_unmap_addr(ce, dma_addr),
399 pci_unmap_len(ce, dma_len),
400 PCI_DMA_TODEVICE);
401 q->sop = 0;
402 if (ce->skb) {
403 dev_kfree_skb(ce->skb);
404 q->sop = 1;
405 }
406 ce++;
407 if (++cidx == q->size) {
408 cidx = 0;
409 ce = q->centries;
410 }
411 }
412 q->cidx = cidx;
413}
414
415/*
416 * Free TX resources.
417 *
418 * Assumes that SGE is stopped and all interrupts are disabled.
419 */
420static void free_tx_resources(struct sge *sge)
421{
422 struct pci_dev *pdev = sge->adapter->pdev;
423 unsigned int size, i;
424
425 for (i = 0; i < SGE_CMDQ_N; i++) {
426 struct cmdQ *q = &sge->cmdQ[i];
427
428 if (q->centries) {
429 if (q->in_use)
430 free_cmdQ_buffers(sge, q, q->in_use);
431 kfree(q->centries);
432 }
433 if (q->entries) {
434 size = sizeof(struct cmdQ_e) * q->size;
435 pci_free_consistent(pdev, size, q->entries,
436 q->dma_addr);
437 }
438 }
439}
440
441/*
442 * Allocates basic TX resources, consisting of memory mapped command Qs.
443 */
444static int alloc_tx_resources(struct sge *sge, struct sge_params *p)
445{
446 struct pci_dev *pdev = sge->adapter->pdev;
447 unsigned int size, i;
448
449 for (i = 0; i < SGE_CMDQ_N; i++) {
450 struct cmdQ *q = &sge->cmdQ[i];
451
452 q->genbit = 1;
453 q->sop = 1;
454 q->size = p->cmdQ_size[i];
455 q->in_use = 0;
456 q->status = 0;
457 q->processed = q->cleaned = 0;
458 q->stop_thres = 0;
459 spin_lock_init(&q->lock);
460 size = sizeof(struct cmdQ_e) * q->size;
461 q->entries = (struct cmdQ_e *)
462 pci_alloc_consistent(pdev, size, &q->dma_addr);
463 if (!q->entries)
464 goto err_no_mem;
465 memset(q->entries, 0, size);
466 size = sizeof(struct cmdQ_ce) * q->size;
467 q->centries = kmalloc(size, GFP_KERNEL);
468 if (!q->centries)
469 goto err_no_mem;
470 memset(q->centries, 0, size);
471 }
472
473 /*
474 * CommandQ 0 handles Ethernet and TOE packets, while queue 1 is TOE
475 * only. For queue 0 set the stop threshold so we can handle one more
476 * packet from each port, plus reserve an additional 24 entries for
477 * Ethernet packets only. Queue 1 never suspends nor do we reserve
478 * space for Ethernet packets.
479 */
480 sge->cmdQ[0].stop_thres = sge->adapter->params.nports *
481 (MAX_SKB_FRAGS + 1);
482 return 0;
483
484err_no_mem:
485 free_tx_resources(sge);
486 return -ENOMEM;
487}
488
489static inline void setup_ring_params(struct adapter *adapter, u64 addr,
490 u32 size, int base_reg_lo,
491 int base_reg_hi, int size_reg)
492{
493 writel((u32)addr, adapter->regs + base_reg_lo);
494 writel(addr >> 32, adapter->regs + base_reg_hi);
495 writel(size, adapter->regs + size_reg);
496}
497
498/*
499 * Enable/disable VLAN acceleration.
500 */
501void t1_set_vlan_accel(struct adapter *adapter, int on_off)
502{
503 struct sge *sge = adapter->sge;
504
505 sge->sge_control &= ~F_VLAN_XTRACT;
506 if (on_off)
507 sge->sge_control |= F_VLAN_XTRACT;
508 if (adapter->open_device_map) {
509 writel(sge->sge_control, adapter->regs + A_SG_CONTROL);
510 readl(adapter->regs + A_SG_CONTROL); /* flush */
511 }
512}
513
514/*
515 * Programs the various SGE registers. However, the engine is not yet enabled,
516 * but sge->sge_control is setup and ready to go.
517 */
518static void configure_sge(struct sge *sge, struct sge_params *p)
519{
520 struct adapter *ap = sge->adapter;
521
522 writel(0, ap->regs + A_SG_CONTROL);
523 setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size,
524 A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE);
525 setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size,
526 A_SG_CMD1BASELWR, A_SG_CMD1BASEUPR, A_SG_CMD1SIZE);
527 setup_ring_params(ap, sge->freelQ[0].dma_addr,
528 sge->freelQ[0].size, A_SG_FL0BASELWR,
529 A_SG_FL0BASEUPR, A_SG_FL0SIZE);
530 setup_ring_params(ap, sge->freelQ[1].dma_addr,
531 sge->freelQ[1].size, A_SG_FL1BASELWR,
532 A_SG_FL1BASEUPR, A_SG_FL1SIZE);
533
534 /* The threshold comparison uses <. */
535 writel(SGE_RX_SM_BUF_SIZE + 1, ap->regs + A_SG_FLTHRESHOLD);
536
537 setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size,
538 A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE);
539 writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT);
540
541 sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE |
542 F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE |
543 V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE |
544 F_DISABLE_FL0_GTS | F_DISABLE_FL1_GTS |
545 V_RX_PKT_OFFSET(sge->rx_pkt_pad);
546
547#if defined(__BIG_ENDIAN_BITFIELD)
548 sge->sge_control |= F_ENABLE_BIG_ENDIAN;
549#endif
550
551 /* Initialize no-resource timer */
552 sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap);
553
554 t1_sge_set_coalesce_params(sge, p);
555}
556
557/*
558 * Return the payload capacity of the jumbo free-list buffers.
559 */
560static inline unsigned int jumbo_payload_capacity(const struct sge *sge)
561{
562 return sge->freelQ[sge->jumbo_fl].rx_buffer_size -
563 sge->freelQ[sge->jumbo_fl].dma_offset -
564 sizeof(struct cpl_rx_data);
565}
566
567/*
568 * Frees all SGE related resources and the sge structure itself
569 */
570void t1_sge_destroy(struct sge *sge)
571{
572 if (sge->espibug_skb)
573 kfree_skb(sge->espibug_skb);
574
575 free_tx_resources(sge);
576 free_rx_resources(sge);
577 kfree(sge);
578}
579
580/*
581 * Allocates new RX buffers on the freelist Q (and tracks them on the freelist
582 * context Q) until the Q is full or alloc_skb fails.
583 *
584 * It is possible that the generation bits already match, indicating that the
585 * buffer is already valid and nothing needs to be done. This happens when we
586 * copied a received buffer into a new sk_buff during the interrupt processing.
587 *
588 * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad),
589 * we specify a RX_OFFSET in order to make sure that the IP header is 4B
590 * aligned.
591 */
592static void refill_free_list(struct sge *sge, struct freelQ *q)
593{
594 struct pci_dev *pdev = sge->adapter->pdev;
595 struct freelQ_ce *ce = &q->centries[q->pidx];
596 struct freelQ_e *e = &q->entries[q->pidx];
597 unsigned int dma_len = q->rx_buffer_size - q->dma_offset;
598
599
600 while (q->credits < q->size) {
601 struct sk_buff *skb;
602 dma_addr_t mapping;
603
604 skb = alloc_skb(q->rx_buffer_size, GFP_ATOMIC);
605 if (!skb)
606 break;
607
608 skb_reserve(skb, q->dma_offset);
609 mapping = pci_map_single(pdev, skb->data, dma_len,
610 PCI_DMA_FROMDEVICE);
611 ce->skb = skb;
612 pci_unmap_addr_set(ce, dma_addr, mapping);
613 pci_unmap_len_set(ce, dma_len, dma_len);
614 e->addr_lo = (u32)mapping;
615 e->addr_hi = (u64)mapping >> 32;
616 e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit);
617 wmb();
618 e->gen2 = V_CMD_GEN2(q->genbit);
619
620 e++;
621 ce++;
622 if (++q->pidx == q->size) {
623 q->pidx = 0;
624 q->genbit ^= 1;
625 ce = q->centries;
626 e = q->entries;
627 }
628 q->credits++;
629 }
630
631}
632
633/*
634 * Calls refill_free_list for both free lists. If we cannot fill at least 1/4
635 * of both rings, we go into 'few interrupt mode' in order to give the system
636 * time to free up resources.
637 */
638static void freelQs_empty(struct sge *sge)
639{
640 struct adapter *adapter = sge->adapter;
641 u32 irq_reg = readl(adapter->regs + A_SG_INT_ENABLE);
642 u32 irqholdoff_reg;
643
644 refill_free_list(sge, &sge->freelQ[0]);
645 refill_free_list(sge, &sge->freelQ[1]);
646
647 if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) &&
648 sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) {
649 irq_reg |= F_FL_EXHAUSTED;
650 irqholdoff_reg = sge->fixed_intrtimer;
651 } else {
652 /* Clear the F_FL_EXHAUSTED interrupts for now */
653 irq_reg &= ~F_FL_EXHAUSTED;
654 irqholdoff_reg = sge->intrtimer_nres;
655 }
656 writel(irqholdoff_reg, adapter->regs + A_SG_INTRTIMER);
657 writel(irq_reg, adapter->regs + A_SG_INT_ENABLE);
658
659 /* We reenable the Qs to force a freelist GTS interrupt later */
660 doorbell_pio(adapter, F_FL0_ENABLE | F_FL1_ENABLE);
661}
662
663#define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA)
664#define SGE_INT_FATAL (F_RESPQ_OVERFLOW | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
665#define SGE_INT_ENABLE (F_RESPQ_EXHAUSTED | F_RESPQ_OVERFLOW | \
666 F_FL_EXHAUSTED | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
667
668/*
669 * Disable SGE Interrupts
670 */
671void t1_sge_intr_disable(struct sge *sge)
672{
673 u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
674
675 writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
676 writel(0, sge->adapter->regs + A_SG_INT_ENABLE);
677}
678
679/*
680 * Enable SGE interrupts.
681 */
682void t1_sge_intr_enable(struct sge *sge)
683{
684 u32 en = SGE_INT_ENABLE;
685 u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
686
687 if (sge->adapter->flags & TSO_CAPABLE)
688 en &= ~F_PACKET_TOO_BIG;
689 writel(en, sge->adapter->regs + A_SG_INT_ENABLE);
690 writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
691}
692
693/*
694 * Clear SGE interrupts.
695 */
696void t1_sge_intr_clear(struct sge *sge)
697{
698 writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE);
699 writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE);
700}
701
702/*
703 * SGE 'Error' interrupt handler
704 */
705int t1_sge_intr_error_handler(struct sge *sge)
706{
707 struct adapter *adapter = sge->adapter;
708 u32 cause = readl(adapter->regs + A_SG_INT_CAUSE);
709
710 if (adapter->flags & TSO_CAPABLE)
711 cause &= ~F_PACKET_TOO_BIG;
712 if (cause & F_RESPQ_EXHAUSTED)
713 sge->stats.respQ_empty++;
714 if (cause & F_RESPQ_OVERFLOW) {
715 sge->stats.respQ_overflow++;
716 CH_ALERT("%s: SGE response queue overflow\n",
717 adapter->name);
718 }
719 if (cause & F_FL_EXHAUSTED) {
720 sge->stats.freelistQ_empty++;
721 freelQs_empty(sge);
722 }
723 if (cause & F_PACKET_TOO_BIG) {
724 sge->stats.pkt_too_big++;
725 CH_ALERT("%s: SGE max packet size exceeded\n",
726 adapter->name);
727 }
728 if (cause & F_PACKET_MISMATCH) {
729 sge->stats.pkt_mismatch++;
730 CH_ALERT("%s: SGE packet mismatch\n", adapter->name);
731 }
732 if (cause & SGE_INT_FATAL)
733 t1_fatal_err(adapter);
734
735 writel(cause, adapter->regs + A_SG_INT_CAUSE);
736 return 0;
737}
738
739const struct sge_intr_counts *t1_sge_get_intr_counts(struct sge *sge)
740{
741 return &sge->stats;
742}
743
744const struct sge_port_stats *t1_sge_get_port_stats(struct sge *sge, int port)
745{
746 return &sge->port_stats[port];
747}
748
749/**
750 * recycle_fl_buf - recycle a free list buffer
751 * @fl: the free list
752 * @idx: index of buffer to recycle
753 *
754 * Recycles the specified buffer on the given free list by adding it at
755 * the next available slot on the list.
756 */
757static void recycle_fl_buf(struct freelQ *fl, int idx)
758{
759 struct freelQ_e *from = &fl->entries[idx];
760 struct freelQ_e *to = &fl->entries[fl->pidx];
761
762 fl->centries[fl->pidx] = fl->centries[idx];
763 to->addr_lo = from->addr_lo;
764 to->addr_hi = from->addr_hi;
765 to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit);
766 wmb();
767 to->gen2 = V_CMD_GEN2(fl->genbit);
768 fl->credits++;
769
770 if (++fl->pidx == fl->size) {
771 fl->pidx = 0;
772 fl->genbit ^= 1;
773 }
774}
775
776/**
777 * get_packet - return the next ingress packet buffer
778 * @pdev: the PCI device that received the packet
779 * @fl: the SGE free list holding the packet
780 * @len: the actual packet length, excluding any SGE padding
781 * @dma_pad: padding at beginning of buffer left by SGE DMA
782 * @skb_pad: padding to be used if the packet is copied
783 * @copy_thres: length threshold under which a packet should be copied
784 * @drop_thres: # of remaining buffers before we start dropping packets
785 *
786 * Get the next packet from a free list and complete setup of the
787 * sk_buff. If the packet is small we make a copy and recycle the
788 * original buffer, otherwise we use the original buffer itself. If a
789 * positive drop threshold is supplied packets are dropped and their
790 * buffers recycled if (a) the number of remaining buffers is under the
791 * threshold and the packet is too big to copy, or (b) the packet should
792 * be copied but there is no memory for the copy.
793 */
794static inline struct sk_buff *get_packet(struct pci_dev *pdev,
795 struct freelQ *fl, unsigned int len,
796 int dma_pad, int skb_pad,
797 unsigned int copy_thres,
798 unsigned int drop_thres)
799{
800 struct sk_buff *skb;
801 struct freelQ_ce *ce = &fl->centries[fl->cidx];
802
803 if (len < copy_thres) {
804 skb = alloc_skb(len + skb_pad, GFP_ATOMIC);
805 if (likely(skb != NULL)) {
806 skb_reserve(skb, skb_pad);
807 skb_put(skb, len);
808 pci_dma_sync_single_for_cpu(pdev,
809 pci_unmap_addr(ce, dma_addr),
810 pci_unmap_len(ce, dma_len),
811 PCI_DMA_FROMDEVICE);
812 memcpy(skb->data, ce->skb->data + dma_pad, len);
813 pci_dma_sync_single_for_device(pdev,
814 pci_unmap_addr(ce, dma_addr),
815 pci_unmap_len(ce, dma_len),
816 PCI_DMA_FROMDEVICE);
817 } else if (!drop_thres)
818 goto use_orig_buf;
819
820 recycle_fl_buf(fl, fl->cidx);
821 return skb;
822 }
823
824 if (fl->credits < drop_thres) {
825 recycle_fl_buf(fl, fl->cidx);
826 return NULL;
827 }
828
829use_orig_buf:
830 pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
831 pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
832 skb = ce->skb;
833 skb_reserve(skb, dma_pad);
834 skb_put(skb, len);
835 return skb;
836}
837
838/**
839 * unexpected_offload - handle an unexpected offload packet
840 * @adapter: the adapter
841 * @fl: the free list that received the packet
842 *
843 * Called when we receive an unexpected offload packet (e.g., the TOE
844 * function is disabled or the card is a NIC). Prints a message and
845 * recycles the buffer.
846 */
847static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
848{
849 struct freelQ_ce *ce = &fl->centries[fl->cidx];
850 struct sk_buff *skb = ce->skb;
851
852 pci_dma_sync_single_for_cpu(adapter->pdev, pci_unmap_addr(ce, dma_addr),
853 pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
854 CH_ERR("%s: unexpected offload packet, cmd %u\n",
855 adapter->name, *skb->data);
856 recycle_fl_buf(fl, fl->cidx);
857}
858
859/*
860 * Write the command descriptors to transmit the given skb starting at
861 * descriptor pidx with the given generation.
862 */
863static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
864 unsigned int pidx, unsigned int gen,
865 struct cmdQ *q)
866{
867 dma_addr_t mapping;
868 struct cmdQ_e *e, *e1;
869 struct cmdQ_ce *ce;
870 unsigned int i, flags, nfrags = skb_shinfo(skb)->nr_frags;
871
872 mapping = pci_map_single(adapter->pdev, skb->data,
873 skb->len - skb->data_len, PCI_DMA_TODEVICE);
874 ce = &q->centries[pidx];
875 ce->skb = NULL;
876 pci_unmap_addr_set(ce, dma_addr, mapping);
877 pci_unmap_len_set(ce, dma_len, skb->len - skb->data_len);
878
879 flags = F_CMD_DATAVALID | F_CMD_SOP | V_CMD_EOP(nfrags == 0) |
880 V_CMD_GEN2(gen);
881 e = &q->entries[pidx];
882 e->addr_lo = (u32)mapping;
883 e->addr_hi = (u64)mapping >> 32;
884 e->len_gen = V_CMD_LEN(skb->len - skb->data_len) | V_CMD_GEN1(gen);
885 for (e1 = e, i = 0; nfrags--; i++) {
886 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
887
888 ce++;
889 e1++;
890 if (++pidx == q->size) {
891 pidx = 0;
892 gen ^= 1;
893 ce = q->centries;
894 e1 = q->entries;
895 }
896
897 mapping = pci_map_page(adapter->pdev, frag->page,
898 frag->page_offset, frag->size,
899 PCI_DMA_TODEVICE);
900 ce->skb = NULL;
901 pci_unmap_addr_set(ce, dma_addr, mapping);
902 pci_unmap_len_set(ce, dma_len, frag->size);
903
904 e1->addr_lo = (u32)mapping;
905 e1->addr_hi = (u64)mapping >> 32;
906 e1->len_gen = V_CMD_LEN(frag->size) | V_CMD_GEN1(gen);
907 e1->flags = F_CMD_DATAVALID | V_CMD_EOP(nfrags == 0) |
908 V_CMD_GEN2(gen);
909 }
910
911 ce->skb = skb;
912 wmb();
913 e->flags = flags;
914}
915
916/*
917 * Clean up completed Tx buffers.
918 */
919static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q)
920{
921 unsigned int reclaim = q->processed - q->cleaned;
922
923 if (reclaim) {
924 free_cmdQ_buffers(sge, q, reclaim);
925 q->cleaned += reclaim;
926 }
927}
928
929#ifndef SET_ETHTOOL_OPS
930# define __netif_rx_complete(dev) netif_rx_complete(dev)
931#endif
932
933/*
934 * We cannot use the standard netif_rx_schedule_prep() because we have multiple
935 * ports plus the TOE all multiplexing onto a single response queue, therefore
936 * accepting new responses cannot depend on the state of any particular port.
937 * So define our own equivalent that omits the netif_running() test.
938 */
939static inline int napi_schedule_prep(struct net_device *dev)
940{
941 return !test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state);
942}
943
944
945/**
946 * sge_rx - process an ingress ethernet packet
947 * @sge: the sge structure
948 * @fl: the free list that contains the packet buffer
949 * @len: the packet length
950 *
951 * Process an ingress ethernet pakcet and deliver it to the stack.
952 */
953static int sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
954{
955 struct sk_buff *skb;
956 struct cpl_rx_pkt *p;
957 struct adapter *adapter = sge->adapter;
958
959 sge->stats.ethernet_pkts++;
960 skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad,
961 sge->rx_pkt_pad, 2, SGE_RX_COPY_THRES,
962 SGE_RX_DROP_THRES);
963 if (!skb) {
964 sge->port_stats[0].rx_drops++; /* charge only port 0 for now */
965 return 0;
966 }
967
968 p = (struct cpl_rx_pkt *)skb->data;
969 skb_pull(skb, sizeof(*p));
970 skb->dev = adapter->port[p->iff].dev;
971 skb->dev->last_rx = jiffies;
972 skb->protocol = eth_type_trans(skb, skb->dev);
973 if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff &&
974 skb->protocol == htons(ETH_P_IP) &&
975 (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) {
976 sge->port_stats[p->iff].rx_cso_good++;
977 skb->ip_summed = CHECKSUM_UNNECESSARY;
978 } else
979 skb->ip_summed = CHECKSUM_NONE;
980
981 if (unlikely(adapter->vlan_grp && p->vlan_valid)) {
982 sge->port_stats[p->iff].vlan_xtract++;
983 if (adapter->params.sge.polling)
984 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
985 ntohs(p->vlan));
986 else
987 vlan_hwaccel_rx(skb, adapter->vlan_grp,
988 ntohs(p->vlan));
989 } else if (adapter->params.sge.polling)
990 netif_receive_skb(skb);
991 else
992 netif_rx(skb);
993 return 0;
994}
995
996/*
997 * Returns true if a command queue has enough available descriptors that
998 * we can resume Tx operation after temporarily disabling its packet queue.
999 */
1000static inline int enough_free_Tx_descs(const struct cmdQ *q)
1001{
1002 unsigned int r = q->processed - q->cleaned;
1003
1004 return q->in_use - r < (q->size >> 1);
1005}
1006
1007/*
1008 * Called when sufficient space has become available in the SGE command queues
1009 * after the Tx packet schedulers have been suspended to restart the Tx path.
1010 */
1011static void restart_tx_queues(struct sge *sge)
1012{
1013 struct adapter *adap = sge->adapter;
1014
1015 if (enough_free_Tx_descs(&sge->cmdQ[0])) {
1016 int i;
1017
1018 for_each_port(adap, i) {
1019 struct net_device *nd = adap->port[i].dev;
1020
1021 if (test_and_clear_bit(nd->if_port,
1022 &sge->stopped_tx_queues) &&
1023 netif_running(nd)) {
1024 sge->stats.cmdQ_restarted[3]++;
1025 netif_wake_queue(nd);
1026 }
1027 }
1028 }
1029}
1030
1031/*
1032 * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0
1033 * information.
1034 */
1035static unsigned int update_tx_info(struct adapter *adapter,
1036 unsigned int flags,
1037 unsigned int pr0)
1038{
1039 struct sge *sge = adapter->sge;
1040 struct cmdQ *cmdq = &sge->cmdQ[0];
1041
1042 cmdq->processed += pr0;
1043
1044 if (flags & F_CMDQ0_ENABLE) {
1045 clear_bit(CMDQ_STAT_RUNNING, &cmdq->status);
1046
1047 if (cmdq->cleaned + cmdq->in_use != cmdq->processed &&
1048 !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) {
1049 set_bit(CMDQ_STAT_RUNNING, &cmdq->status);
1050 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1051 }
1052 flags &= ~F_CMDQ0_ENABLE;
1053 }
1054
1055 if (unlikely(sge->stopped_tx_queues != 0))
1056 restart_tx_queues(sge);
1057
1058 return flags;
1059}
1060
1061/*
1062 * Process SGE responses, up to the supplied budget. Returns the number of
1063 * responses processed. A negative budget is effectively unlimited.
1064 */
1065static int process_responses(struct adapter *adapter, int budget)
1066{
1067 struct sge *sge = adapter->sge;
1068 struct respQ *q = &sge->respQ;
1069 struct respQ_e *e = &q->entries[q->cidx];
1070 int budget_left = budget;
1071 unsigned int flags = 0;
1072 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
1073
1074
1075 while (likely(budget_left && e->GenerationBit == q->genbit)) {
1076 flags |= e->Qsleeping;
1077
1078 cmdq_processed[0] += e->Cmdq0CreditReturn;
1079 cmdq_processed[1] += e->Cmdq1CreditReturn;
1080
1081 /* We batch updates to the TX side to avoid cacheline
1082 * ping-pong of TX state information on MP where the sender
1083 * might run on a different CPU than this function...
1084 */
1085 if (unlikely(flags & F_CMDQ0_ENABLE || cmdq_processed[0] > 64)) {
1086 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1087 cmdq_processed[0] = 0;
1088 }
1089 if (unlikely(cmdq_processed[1] > 16)) {
1090 sge->cmdQ[1].processed += cmdq_processed[1];
1091 cmdq_processed[1] = 0;
1092 }
1093 if (likely(e->DataValid)) {
1094 struct freelQ *fl = &sge->freelQ[e->FreelistQid];
1095
1096 if (unlikely(!e->Sop || !e->Eop))
1097 BUG();
1098 if (unlikely(e->Offload))
1099 unexpected_offload(adapter, fl);
1100 else
1101 sge_rx(sge, fl, e->BufferLength);
1102
1103 /*
1104 * Note: this depends on each packet consuming a
1105 * single free-list buffer; cf. the BUG above.
1106 */
1107 if (++fl->cidx == fl->size)
1108 fl->cidx = 0;
1109 if (unlikely(--fl->credits <
1110 fl->size - SGE_FREEL_REFILL_THRESH))
1111 refill_free_list(sge, fl);
1112 } else
1113 sge->stats.pure_rsps++;
1114
1115 e++;
1116 if (unlikely(++q->cidx == q->size)) {
1117 q->cidx = 0;
1118 q->genbit ^= 1;
1119 e = q->entries;
1120 }
1121 prefetch(e);
1122
1123 if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
1124 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
1125 q->credits = 0;
1126 }
1127 --budget_left;
1128 }
1129
1130 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1131 sge->cmdQ[1].processed += cmdq_processed[1];
1132
1133 budget -= budget_left;
1134 return budget;
1135}
1136
1137/*
1138 * A simpler version of process_responses() that handles only pure (i.e.,
1139 * non data-carrying) responses. Such respones are too light-weight to justify
1140 * calling a softirq when using NAPI, so we handle them specially in hard
1141 * interrupt context. The function is called with a pointer to a response,
1142 * which the caller must ensure is a valid pure response. Returns 1 if it
1143 * encounters a valid data-carrying response, 0 otherwise.
1144 */
1145static int process_pure_responses(struct adapter *adapter, struct respQ_e *e)
1146{
1147 struct sge *sge = adapter->sge;
1148 struct respQ *q = &sge->respQ;
1149 unsigned int flags = 0;
1150 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
1151
1152 do {
1153 flags |= e->Qsleeping;
1154
1155 cmdq_processed[0] += e->Cmdq0CreditReturn;
1156 cmdq_processed[1] += e->Cmdq1CreditReturn;
1157
1158 e++;
1159 if (unlikely(++q->cidx == q->size)) {
1160 q->cidx = 0;
1161 q->genbit ^= 1;
1162 e = q->entries;
1163 }
1164 prefetch(e);
1165
1166 if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
1167 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
1168 q->credits = 0;
1169 }
1170 sge->stats.pure_rsps++;
1171 } while (e->GenerationBit == q->genbit && !e->DataValid);
1172
1173 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1174 sge->cmdQ[1].processed += cmdq_processed[1];
1175
1176 return e->GenerationBit == q->genbit;
1177}
1178
1179/*
1180 * Handler for new data events when using NAPI. This does not need any locking
1181 * or protection from interrupts as data interrupts are off at this point and
1182 * other adapter interrupts do not interfere.
1183 */
1184static int t1_poll(struct net_device *dev, int *budget)
1185{
1186 struct adapter *adapter = dev->priv;
1187 int effective_budget = min(*budget, dev->quota);
1188
1189 int work_done = process_responses(adapter, effective_budget);
1190 *budget -= work_done;
1191 dev->quota -= work_done;
1192
1193 if (work_done >= effective_budget)
1194 return 1;
1195
1196 __netif_rx_complete(dev);
1197
1198 /*
1199 * Because we don't atomically flush the following write it is
1200 * possible that in very rare cases it can reach the device in a way
1201 * that races with a new response being written plus an error interrupt
1202 * causing the NAPI interrupt handler below to return unhandled status
1203 * to the OS. To protect against this would require flushing the write
1204 * and doing both the write and the flush with interrupts off. Way too
1205 * expensive and unjustifiable given the rarity of the race.
1206 */
1207 writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
1208 return 0;
1209}
1210
1211/*
1212 * Returns true if the device is already scheduled for polling.
1213 */
1214static inline int napi_is_scheduled(struct net_device *dev)
1215{
1216 return test_bit(__LINK_STATE_RX_SCHED, &dev->state);
1217}
1218
1219/*
1220 * NAPI version of the main interrupt handler.
1221 */
1222static irqreturn_t t1_interrupt_napi(int irq, void *data, struct pt_regs *regs)
1223{
1224 int handled;
1225 struct adapter *adapter = data;
1226 struct sge *sge = adapter->sge;
1227 struct respQ *q = &adapter->sge->respQ;
1228
1229 /*
1230 * Clear the SGE_DATA interrupt first thing. Normally the NAPI
1231 * handler has control of the response queue and the interrupt handler
1232 * can look at the queue reliably only once it knows NAPI is off.
1233 * We can't wait that long to clear the SGE_DATA interrupt because we
1234 * could race with t1_poll rearming the SGE interrupt, so we need to
1235 * clear the interrupt speculatively and really early on.
1236 */
1237 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
1238
1239 spin_lock(&adapter->async_lock);
1240 if (!napi_is_scheduled(sge->netdev)) {
1241 struct respQ_e *e = &q->entries[q->cidx];
1242
1243 if (e->GenerationBit == q->genbit) {
1244 if (e->DataValid ||
1245 process_pure_responses(adapter, e)) {
1246 if (likely(napi_schedule_prep(sge->netdev)))
1247 __netif_rx_schedule(sge->netdev);
1248 else
1249 printk(KERN_CRIT
1250 "NAPI schedule failure!\n");
1251 } else
1252 writel(q->cidx, adapter->regs + A_SG_SLEEPING);
1253 handled = 1;
1254 goto unlock;
1255 } else
1256 writel(q->cidx, adapter->regs + A_SG_SLEEPING);
1257 } else
1258 if (readl(adapter->regs + A_PL_CAUSE) & F_PL_INTR_SGE_DATA)
1259 printk(KERN_ERR "data interrupt while NAPI running\n");
1260
1261 handled = t1_slow_intr_handler(adapter);
1262 if (!handled)
1263 sge->stats.unhandled_irqs++;
1264 unlock:
1265 spin_unlock(&adapter->async_lock);
1266 return IRQ_RETVAL(handled != 0);
1267}
1268
1269/*
1270 * Main interrupt handler, optimized assuming that we took a 'DATA'
1271 * interrupt.
1272 *
1273 * 1. Clear the interrupt
1274 * 2. Loop while we find valid descriptors and process them; accumulate
1275 * information that can be processed after the loop
1276 * 3. Tell the SGE at which index we stopped processing descriptors
1277 * 4. Bookkeeping; free TX buffers, ring doorbell if there are any
1278 * outstanding TX buffers waiting, replenish RX buffers, potentially
1279 * reenable upper layers if they were turned off due to lack of TX
1280 * resources which are available again.
1281 * 5. If we took an interrupt, but no valid respQ descriptors was found we
1282 * let the slow_intr_handler run and do error handling.
1283 */
1284static irqreturn_t t1_interrupt(int irq, void *cookie, struct pt_regs *regs)
1285{
1286 int work_done;
1287 struct respQ_e *e;
1288 struct adapter *adapter = cookie;
1289 struct respQ *Q = &adapter->sge->respQ;
1290
1291 spin_lock(&adapter->async_lock);
1292 e = &Q->entries[Q->cidx];
1293 prefetch(e);
1294
1295 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
1296
1297 if (likely(e->GenerationBit == Q->genbit))
1298 work_done = process_responses(adapter, -1);
1299 else
1300 work_done = t1_slow_intr_handler(adapter);
1301
1302 /*
1303 * The unconditional clearing of the PL_CAUSE above may have raced
1304 * with DMA completion and the corresponding generation of a response
1305 * to cause us to miss the resulting data interrupt. The next write
1306 * is also unconditional to recover the missed interrupt and render
1307 * this race harmless.
1308 */
1309 writel(Q->cidx, adapter->regs + A_SG_SLEEPING);
1310
1311 if (!work_done)
1312 adapter->sge->stats.unhandled_irqs++;
1313 spin_unlock(&adapter->async_lock);
1314 return IRQ_RETVAL(work_done != 0);
1315}
1316
1317intr_handler_t t1_select_intr_handler(adapter_t *adapter)
1318{
1319 return adapter->params.sge.polling ? t1_interrupt_napi : t1_interrupt;
1320}
1321
1322/*
1323 * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it.
1324 *
1325 * The code figures out how many entries the sk_buff will require in the
1326 * cmdQ and updates the cmdQ data structure with the state once the enqueue
1327 * has complete. Then, it doesn't access the global structure anymore, but
1328 * uses the corresponding fields on the stack. In conjuction with a spinlock
1329 * around that code, we can make the function reentrant without holding the
1330 * lock when we actually enqueue (which might be expensive, especially on
1331 * architectures with IO MMUs).
1332 *
1333 * This runs with softirqs disabled.
1334 */
1335unsigned int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
1336 unsigned int qid, struct net_device *dev)
1337{
1338 struct sge *sge = adapter->sge;
1339 struct cmdQ *q = &sge->cmdQ[qid];
1340 unsigned int credits, pidx, genbit, count;
1341
1342 spin_lock(&q->lock);
1343 reclaim_completed_tx(sge, q);
1344
1345 pidx = q->pidx;
1346 credits = q->size - q->in_use;
1347 count = 1 + skb_shinfo(skb)->nr_frags;
1348
1349 { /* Ethernet packet */
1350 if (unlikely(credits < count)) {
1351 netif_stop_queue(dev);
1352 set_bit(dev->if_port, &sge->stopped_tx_queues);
1353 sge->stats.cmdQ_full[3]++;
1354 spin_unlock(&q->lock);
1355 CH_ERR("%s: Tx ring full while queue awake!\n",
1356 adapter->name);
1357 return 1;
1358 }
1359 if (unlikely(credits - count < q->stop_thres)) {
1360 sge->stats.cmdQ_full[3]++;
1361 netif_stop_queue(dev);
1362 set_bit(dev->if_port, &sge->stopped_tx_queues);
1363 }
1364 }
1365 q->in_use += count;
1366 genbit = q->genbit;
1367 q->pidx += count;
1368 if (q->pidx >= q->size) {
1369 q->pidx -= q->size;
1370 q->genbit ^= 1;
1371 }
1372 spin_unlock(&q->lock);
1373
1374 write_tx_descs(adapter, skb, pidx, genbit, q);
1375
1376 /*
1377 * We always ring the doorbell for cmdQ1. For cmdQ0, we only ring
1378 * the doorbell if the Q is asleep. There is a natural race, where
1379 * the hardware is going to sleep just after we checked, however,
1380 * then the interrupt handler will detect the outstanding TX packet
1381 * and ring the doorbell for us.
1382 */
1383 if (qid)
1384 doorbell_pio(adapter, F_CMDQ1_ENABLE);
1385 else {
1386 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1387 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
1388 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1389 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1390 }
1391 }
1392 return 0;
1393}
1394
1395#define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14))
1396
1397/*
1398 * eth_hdr_len - return the length of an Ethernet header
1399 * @data: pointer to the start of the Ethernet header
1400 *
1401 * Returns the length of an Ethernet header, including optional VLAN tag.
1402 */
1403static inline int eth_hdr_len(const void *data)
1404{
1405 const struct ethhdr *e = data;
1406
1407 return e->h_proto == htons(ETH_P_8021Q) ? VLAN_ETH_HLEN : ETH_HLEN;
1408}
1409
1410/*
1411 * Adds the CPL header to the sk_buff and passes it to t1_sge_tx.
1412 */
1413int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
1414{
1415 struct adapter *adapter = dev->priv;
1416 struct sge_port_stats *st = &adapter->sge->port_stats[dev->if_port];
1417 struct sge *sge = adapter->sge;
1418 struct cpl_tx_pkt *cpl;
1419
1420#ifdef NETIF_F_TSO
1421 if (skb_shinfo(skb)->tso_size) {
1422 int eth_type;
1423 struct cpl_tx_pkt_lso *hdr;
1424
1425 st->tso++;
1426
1427 eth_type = skb->nh.raw - skb->data == ETH_HLEN ?
1428 CPL_ETH_II : CPL_ETH_II_VLAN;
1429
1430 hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr));
1431 hdr->opcode = CPL_TX_PKT_LSO;
1432 hdr->ip_csum_dis = hdr->l4_csum_dis = 0;
1433 hdr->ip_hdr_words = skb->nh.iph->ihl;
1434 hdr->tcp_hdr_words = skb->h.th->doff;
1435 hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
1436 skb_shinfo(skb)->tso_size));
1437 hdr->len = htonl(skb->len - sizeof(*hdr));
1438 cpl = (struct cpl_tx_pkt *)hdr;
1439 sge->stats.tx_lso_pkts++;
1440 } else
1441#endif
1442 {
1443 /*
1444 * Packets shorter than ETH_HLEN can break the MAC, drop them
1445 * early. Also, we may get oversized packets because some
1446 * parts of the kernel don't handle our unusual hard_header_len
1447 * right, drop those too.
1448 */
1449 if (unlikely(skb->len < ETH_HLEN ||
1450 skb->len > dev->mtu + eth_hdr_len(skb->data))) {
1451 dev_kfree_skb_any(skb);
1452 return NET_XMIT_SUCCESS;
1453 }
1454
1455 /*
1456 * We are using a non-standard hard_header_len and some kernel
1457 * components, such as pktgen, do not handle it right.
1458 * Complain when this happens but try to fix things up.
1459 */
1460 if (unlikely(skb_headroom(skb) <
1461 dev->hard_header_len - ETH_HLEN)) {
1462 struct sk_buff *orig_skb = skb;
1463
1464 if (net_ratelimit())
1465 printk(KERN_ERR "%s: inadequate headroom in "
1466 "Tx packet\n", dev->name);
1467 skb = skb_realloc_headroom(skb, sizeof(*cpl));
1468 dev_kfree_skb_any(orig_skb);
1469 if (!skb)
1470 return -ENOMEM;
1471 }
1472
1473 if (!(adapter->flags & UDP_CSUM_CAPABLE) &&
1474 skb->ip_summed == CHECKSUM_HW &&
1475 skb->nh.iph->protocol == IPPROTO_UDP)
1476 if (unlikely(skb_checksum_help(skb, 0))) {
1477 dev_kfree_skb_any(skb);
1478 return -ENOMEM;
1479 }
1480
1481 /* Hmmm, assuming to catch the gratious arp... and we'll use
1482 * it to flush out stuck espi packets...
1483 */
1484 if (unlikely(!adapter->sge->espibug_skb)) {
1485 if (skb->protocol == htons(ETH_P_ARP) &&
1486 skb->nh.arph->ar_op == htons(ARPOP_REQUEST)) {
1487 adapter->sge->espibug_skb = skb;
1488 /* We want to re-use this skb later. We
1489 * simply bump the reference count and it
1490 * will not be freed...
1491 */
1492 skb = skb_get(skb);
1493 }
1494 }
1495
1496 cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl));
1497 cpl->opcode = CPL_TX_PKT;
1498 cpl->ip_csum_dis = 1; /* SW calculates IP csum */
1499 cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_HW ? 0 : 1;
1500 /* the length field isn't used so don't bother setting it */
1501
1502 st->tx_cso += (skb->ip_summed == CHECKSUM_HW);
1503 sge->stats.tx_do_cksum += (skb->ip_summed == CHECKSUM_HW);
1504 sge->stats.tx_reg_pkts++;
1505 }
1506 cpl->iff = dev->if_port;
1507
1508#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1509 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
1510 cpl->vlan_valid = 1;
1511 cpl->vlan = htons(vlan_tx_tag_get(skb));
1512 st->vlan_insert++;
1513 } else
1514#endif
1515 cpl->vlan_valid = 0;
1516
1517 dev->trans_start = jiffies;
1518 return t1_sge_tx(skb, adapter, 0, dev);
1519}
1520
1521/*
1522 * Callback for the Tx buffer reclaim timer. Runs with softirqs disabled.
1523 */
1524static void sge_tx_reclaim_cb(unsigned long data)
1525{
1526 int i;
1527 struct sge *sge = (struct sge *)data;
1528
1529 for (i = 0; i < SGE_CMDQ_N; ++i) {
1530 struct cmdQ *q = &sge->cmdQ[i];
1531
1532 if (!spin_trylock(&q->lock))
1533 continue;
1534
1535 reclaim_completed_tx(sge, q);
1536 if (i == 0 && q->in_use) /* flush pending credits */
1537 writel(F_CMDQ0_ENABLE,
1538 sge->adapter->regs + A_SG_DOORBELL);
1539
1540 spin_unlock(&q->lock);
1541 }
1542 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
1543}
1544
1545/*
1546 * Propagate changes of the SGE coalescing parameters to the HW.
1547 */
1548int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p)
1549{
1550 sge->netdev->poll = t1_poll;
1551 sge->fixed_intrtimer = p->rx_coalesce_usecs *
1552 core_ticks_per_usec(sge->adapter);
1553 writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER);
1554 return 0;
1555}
1556
1557/*
1558 * Allocates both RX and TX resources and configures the SGE. However,
1559 * the hardware is not enabled yet.
1560 */
1561int t1_sge_configure(struct sge *sge, struct sge_params *p)
1562{
1563 if (alloc_rx_resources(sge, p))
1564 return -ENOMEM;
1565 if (alloc_tx_resources(sge, p)) {
1566 free_rx_resources(sge);
1567 return -ENOMEM;
1568 }
1569 configure_sge(sge, p);
1570
1571 /*
1572 * Now that we have sized the free lists calculate the payload
1573 * capacity of the large buffers. Other parts of the driver use
1574 * this to set the max offload coalescing size so that RX packets
1575 * do not overflow our large buffers.
1576 */
1577 p->large_buf_capacity = jumbo_payload_capacity(sge);
1578 return 0;
1579}
1580
1581/*
1582 * Disables the DMA engine.
1583 */
1584void t1_sge_stop(struct sge *sge)
1585{
1586 writel(0, sge->adapter->regs + A_SG_CONTROL);
1587 (void) readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
1588 if (is_T2(sge->adapter))
1589 del_timer_sync(&sge->espibug_timer);
1590 del_timer_sync(&sge->tx_reclaim_timer);
1591}
1592
1593/*
1594 * Enables the DMA engine.
1595 */
1596void t1_sge_start(struct sge *sge)
1597{
1598 refill_free_list(sge, &sge->freelQ[0]);
1599 refill_free_list(sge, &sge->freelQ[1]);
1600
1601 writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL);
1602 doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE);
1603 (void) readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
1604
1605 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
1606
1607 if (is_T2(sge->adapter))
1608 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
1609}
1610
1611/*
1612 * Callback for the T2 ESPI 'stuck packet feature' workaorund
1613 */
1614static void espibug_workaround(void *data)
1615{
1616 struct adapter *adapter = (struct adapter *)data;
1617 struct sge *sge = adapter->sge;
1618
1619 if (netif_running(adapter->port[0].dev)) {
1620 struct sk_buff *skb = sge->espibug_skb;
1621
1622 u32 seop = t1_espi_get_mon(adapter, 0x930, 0);
1623
1624 if ((seop & 0xfff0fff) == 0xfff && skb) {
1625 if (!skb->cb[0]) {
1626 u8 ch_mac_addr[ETH_ALEN] =
1627 {0x0, 0x7, 0x43, 0x0, 0x0, 0x0};
1628 memcpy(skb->data + sizeof(struct cpl_tx_pkt),
1629 ch_mac_addr, ETH_ALEN);
1630 memcpy(skb->data + skb->len - 10, ch_mac_addr,
1631 ETH_ALEN);
1632 skb->cb[0] = 0xff;
1633 }
1634
1635 /* bump the reference count to avoid freeing of the
1636 * skb once the DMA has completed.
1637 */
1638 skb = skb_get(skb);
1639 t1_sge_tx(skb, adapter, 0, adapter->port[0].dev);
1640 }
1641 }
1642 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
1643}
1644
1645/*
1646 * Creates a t1_sge structure and returns suggested resource parameters.
1647 */
1648struct sge * __devinit t1_sge_create(struct adapter *adapter,
1649 struct sge_params *p)
1650{
1651 struct sge *sge = kmalloc(sizeof(*sge), GFP_KERNEL);
1652
1653 if (!sge)
1654 return NULL;
1655 memset(sge, 0, sizeof(*sge));
1656
1657 sge->adapter = adapter;
1658 sge->netdev = adapter->port[0].dev;
1659 sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2;
1660 sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
1661
1662 init_timer(&sge->tx_reclaim_timer);
1663 sge->tx_reclaim_timer.data = (unsigned long)sge;
1664 sge->tx_reclaim_timer.function = sge_tx_reclaim_cb;
1665
1666 if (is_T2(sge->adapter)) {
1667 init_timer(&sge->espibug_timer);
1668 sge->espibug_timer.function = (void *)&espibug_workaround;
1669 sge->espibug_timer.data = (unsigned long)sge->adapter;
1670 sge->espibug_timeout = 1;
1671 }
1672
1673
1674 p->cmdQ_size[0] = SGE_CMDQ0_E_N;
1675 p->cmdQ_size[1] = SGE_CMDQ1_E_N;
1676 p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE;
1677 p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE;
1678 p->rx_coalesce_usecs = 50;
1679 p->coalesce_enable = 0;
1680 p->sample_interval_usecs = 0;
1681 p->polling = 0;
1682
1683 return sge;
1684}
diff --git a/drivers/net/chelsio/sge.h b/drivers/net/chelsio/sge.h
new file mode 100644
index 000000000000..434b25586851
--- /dev/null
+++ b/drivers/net/chelsio/sge.h
@@ -0,0 +1,105 @@
1/*****************************************************************************
2 * *
3 * File: sge.h *
4 * $Revision: 1.11 $ *
5 * $Date: 2005/06/21 22:10:55 $ *
6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#ifndef _CXGB_SGE_H_
40#define _CXGB_SGE_H_
41
42#include <linux/types.h>
43#include <linux/interrupt.h>
44#include <asm/byteorder.h>
45
46#ifndef IRQ_RETVAL
47#define IRQ_RETVAL(x)
48typedef void irqreturn_t;
49#endif
50
51typedef irqreturn_t (*intr_handler_t)(int, void *, struct pt_regs *);
52
53struct sge_intr_counts {
54 unsigned int respQ_empty; /* # times respQ empty */
55 unsigned int respQ_overflow; /* # respQ overflow (fatal) */
56 unsigned int freelistQ_empty; /* # times freelist empty */
57 unsigned int pkt_too_big; /* packet too large (fatal) */
58 unsigned int pkt_mismatch;
59 unsigned int cmdQ_full[3]; /* not HW IRQ, host cmdQ[] full */
60 unsigned int cmdQ_restarted[3];/* # of times cmdQ X was restarted */
61 unsigned int ethernet_pkts; /* # of Ethernet packets received */
62 unsigned int offload_pkts; /* # of offload packets received */
63 unsigned int offload_bundles; /* # of offload pkt bundles delivered */
64 unsigned int pure_rsps; /* # of non-payload responses */
65 unsigned int unhandled_irqs; /* # of unhandled interrupts */
66 unsigned int tx_ipfrags;
67 unsigned int tx_reg_pkts;
68 unsigned int tx_lso_pkts;
69 unsigned int tx_do_cksum;
70};
71
72struct sge_port_stats {
73 unsigned long rx_cso_good; /* # of successful RX csum offloads */
74 unsigned long tx_cso; /* # of TX checksum offloads */
75 unsigned long vlan_xtract; /* # of VLAN tag extractions */
76 unsigned long vlan_insert; /* # of VLAN tag extractions */
77 unsigned long tso; /* # of TSO requests */
78 unsigned long rx_drops; /* # of packets dropped due to no mem */
79};
80
81struct sk_buff;
82struct net_device;
83struct adapter;
84struct sge_params;
85struct sge;
86
87struct sge *t1_sge_create(struct adapter *, struct sge_params *);
88int t1_sge_configure(struct sge *, struct sge_params *);
89int t1_sge_set_coalesce_params(struct sge *, struct sge_params *);
90void t1_sge_destroy(struct sge *);
91intr_handler_t t1_select_intr_handler(adapter_t *adapter);
92unsigned int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
93 unsigned int qid, struct net_device *netdev);
94int t1_start_xmit(struct sk_buff *skb, struct net_device *dev);
95void t1_set_vlan_accel(struct adapter *adapter, int on_off);
96void t1_sge_start(struct sge *);
97void t1_sge_stop(struct sge *);
98int t1_sge_intr_error_handler(struct sge *);
99void t1_sge_intr_enable(struct sge *);
100void t1_sge_intr_disable(struct sge *);
101void t1_sge_intr_clear(struct sge *);
102const struct sge_intr_counts *t1_sge_get_intr_counts(struct sge *sge);
103const struct sge_port_stats *t1_sge_get_port_stats(struct sge *sge, int port);
104
105#endif /* _CXGB_SGE_H_ */
diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c
new file mode 100644
index 000000000000..1ebb5d149aef
--- /dev/null
+++ b/drivers/net/chelsio/subr.c
@@ -0,0 +1,812 @@
1/*****************************************************************************
2 * *
3 * File: subr.c *
4 * $Revision: 1.27 $ *
5 * $Date: 2005/06/22 01:08:36 $ *
6 * Description: *
7 * Various subroutines (intr,pio,etc.) used by Chelsio 10G Ethernet driver. *
8 * part of the Chelsio 10Gb Ethernet Driver. *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
13 * *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, write to the Free Software Foundation, Inc., *
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
17 * *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
21 * *
22 * http://www.chelsio.com *
23 * *
24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
25 * All rights reserved. *
26 * *
27 * Maintainers: maintainers@chelsio.com *
28 * *
29 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
30 * Tina Yang <tainay@chelsio.com> *
31 * Felix Marti <felix@chelsio.com> *
32 * Scott Bardone <sbardone@chelsio.com> *
33 * Kurt Ottaway <kottaway@chelsio.com> *
34 * Frank DiMambro <frank@chelsio.com> *
35 * *
36 * History: *
37 * *
38 ****************************************************************************/
39
40#include "common.h"
41#include "elmer0.h"
42#include "regs.h"
43#include "gmac.h"
44#include "cphy.h"
45#include "sge.h"
46#include "espi.h"
47
48/**
49 * t1_wait_op_done - wait until an operation is completed
50 * @adapter: the adapter performing the operation
51 * @reg: the register to check for completion
52 * @mask: a single-bit field within @reg that indicates completion
53 * @polarity: the value of the field when the operation is completed
54 * @attempts: number of check iterations
55 * @delay: delay in usecs between iterations
56 *
57 * Wait until an operation is completed by checking a bit in a register
58 * up to @attempts times. Returns %0 if the operation completes and %1
59 * otherwise.
60 */
61static int t1_wait_op_done(adapter_t *adapter, int reg, u32 mask, int polarity,
62 int attempts, int delay)
63{
64 while (1) {
65 u32 val = readl(adapter->regs + reg) & mask;
66
67 if (!!val == polarity)
68 return 0;
69 if (--attempts == 0)
70 return 1;
71 if (delay)
72 udelay(delay);
73 }
74}
75
76#define TPI_ATTEMPTS 50
77
78/*
79 * Write a register over the TPI interface (unlocked and locked versions).
80 */
81static int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value)
82{
83 int tpi_busy;
84
85 writel(addr, adapter->regs + A_TPI_ADDR);
86 writel(value, adapter->regs + A_TPI_WR_DATA);
87 writel(F_TPIWR, adapter->regs + A_TPI_CSR);
88
89 tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1,
90 TPI_ATTEMPTS, 3);
91 if (tpi_busy)
92 CH_ALERT("%s: TPI write to 0x%x failed\n",
93 adapter->name, addr);
94 return tpi_busy;
95}
96
97int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value)
98{
99 int ret;
100
101 spin_lock(&(adapter)->tpi_lock);
102 ret = __t1_tpi_write(adapter, addr, value);
103 spin_unlock(&(adapter)->tpi_lock);
104 return ret;
105}
106
107/*
108 * Read a register over the TPI interface (unlocked and locked versions).
109 */
110static int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp)
111{
112 int tpi_busy;
113
114 writel(addr, adapter->regs + A_TPI_ADDR);
115 writel(0, adapter->regs + A_TPI_CSR);
116
117 tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1,
118 TPI_ATTEMPTS, 3);
119 if (tpi_busy)
120 CH_ALERT("%s: TPI read from 0x%x failed\n",
121 adapter->name, addr);
122 else
123 *valp = readl(adapter->regs + A_TPI_RD_DATA);
124 return tpi_busy;
125}
126
127int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp)
128{
129 int ret;
130
131 spin_lock(&(adapter)->tpi_lock);
132 ret = __t1_tpi_read(adapter, addr, valp);
133 spin_unlock(&(adapter)->tpi_lock);
134 return ret;
135}
136
137/*
138 * Called when a port's link settings change to propagate the new values to the
139 * associated PHY and MAC. After performing the common tasks it invokes an
140 * OS-specific handler.
141 */
142/* static */ void link_changed(adapter_t *adapter, int port_id)
143{
144 int link_ok, speed, duplex, fc;
145 struct cphy *phy = adapter->port[port_id].phy;
146 struct link_config *lc = &adapter->port[port_id].link_config;
147
148 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
149
150 lc->speed = speed < 0 ? SPEED_INVALID : speed;
151 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
152 if (!(lc->requested_fc & PAUSE_AUTONEG))
153 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
154
155 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
156 /* Set MAC speed, duplex, and flow control to match PHY. */
157 struct cmac *mac = adapter->port[port_id].mac;
158
159 mac->ops->set_speed_duplex_fc(mac, speed, duplex, fc);
160 lc->fc = (unsigned char)fc;
161 }
162 t1_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
163}
164
165static int t1_pci_intr_handler(adapter_t *adapter)
166{
167 u32 pcix_cause;
168
169 pci_read_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, &pcix_cause);
170
171 if (pcix_cause) {
172 pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE,
173 pcix_cause);
174 t1_fatal_err(adapter); /* PCI errors are fatal */
175 }
176 return 0;
177}
178
179
180/*
181 * Wait until Elmer's MI1 interface is ready for new operations.
182 */
183static int mi1_wait_until_ready(adapter_t *adapter, int mi1_reg)
184{
185 int attempts = 100, busy;
186
187 do {
188 u32 val;
189
190 __t1_tpi_read(adapter, mi1_reg, &val);
191 busy = val & F_MI1_OP_BUSY;
192 if (busy)
193 udelay(10);
194 } while (busy && --attempts);
195 if (busy)
196 CH_ALERT("%s: MDIO operation timed out\n",
197 adapter->name);
198 return busy;
199}
200
201/*
202 * MI1 MDIO initialization.
203 */
204static void mi1_mdio_init(adapter_t *adapter, const struct board_info *bi)
205{
206 u32 clkdiv = bi->clock_elmer0 / (2 * bi->mdio_mdc) - 1;
207 u32 val = F_MI1_PREAMBLE_ENABLE | V_MI1_MDI_INVERT(bi->mdio_mdiinv) |
208 V_MI1_MDI_ENABLE(bi->mdio_mdien) | V_MI1_CLK_DIV(clkdiv);
209
210 if (!(bi->caps & SUPPORTED_10000baseT_Full))
211 val |= V_MI1_SOF(1);
212 t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_CFG, val);
213}
214
215static int mi1_mdio_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr,
216 int reg_addr, unsigned int *valp)
217{
218 u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr);
219
220 spin_lock(&(adapter)->tpi_lock);
221
222 /* Write the address we want. */
223 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr);
224 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, reg_addr);
225 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP,
226 MI1_OP_INDIRECT_ADDRESS);
227 mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
228
229 /* Write the operation we want. */
230 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_READ);
231 mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
232
233 /* Read the data. */
234 __t1_tpi_read(adapter, A_ELMER0_PORT0_MI1_DATA, valp);
235 spin_unlock(&(adapter)->tpi_lock);
236 return 0;
237}
238
239static int mi1_mdio_ext_write(adapter_t *adapter, int phy_addr, int mmd_addr,
240 int reg_addr, unsigned int val)
241{
242 u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr);
243
244 spin_lock(&(adapter)->tpi_lock);
245
246 /* Write the address we want. */
247 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr);
248 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, reg_addr);
249 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP,
250 MI1_OP_INDIRECT_ADDRESS);
251 mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
252
253 /* Write the data. */
254 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, val);
255 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_WRITE);
256 mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
257 spin_unlock(&(adapter)->tpi_lock);
258 return 0;
259}
260
261static struct mdio_ops mi1_mdio_ext_ops = {
262 mi1_mdio_init,
263 mi1_mdio_ext_read,
264 mi1_mdio_ext_write
265};
266
267enum {
268 CH_BRD_N110_1F,
269 CH_BRD_N210_1F,
270};
271
272static struct board_info t1_board[] = {
273
274{ CHBT_BOARD_N110, 1/*ports#*/,
275 SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE /*caps*/, CHBT_TERM_T1,
276 CHBT_MAC_PM3393, CHBT_PHY_88X2010,
277 125000000/*clk-core*/, 0/*clk-mc3*/, 0/*clk-mc4*/,
278 1/*espi-ports*/, 0/*clk-cspi*/, 44/*clk-elmer0*/, 0/*mdien*/,
279 0/*mdiinv*/, 1/*mdc*/, 0/*phybaseaddr*/, &t1_pm3393_ops,
280 &t1_mv88x201x_ops, &mi1_mdio_ext_ops,
281 "Chelsio N110 1x10GBaseX NIC" },
282
283{ CHBT_BOARD_N210, 1/*ports#*/,
284 SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE /*caps*/, CHBT_TERM_T2,
285 CHBT_MAC_PM3393, CHBT_PHY_88X2010,
286 125000000/*clk-core*/, 0/*clk-mc3*/, 0/*clk-mc4*/,
287 1/*espi-ports*/, 0/*clk-cspi*/, 44/*clk-elmer0*/, 0/*mdien*/,
288 0/*mdiinv*/, 1/*mdc*/, 0/*phybaseaddr*/, &t1_pm3393_ops,
289 &t1_mv88x201x_ops, &mi1_mdio_ext_ops,
290 "Chelsio N210 1x10GBaseX NIC" },
291
292};
293
294struct pci_device_id t1_pci_tbl[] = {
295 CH_DEVICE(7, 0, CH_BRD_N110_1F),
296 CH_DEVICE(10, 1, CH_BRD_N210_1F),
297 { 0, }
298};
299
300MODULE_DEVICE_TABLE(pci, t1_pci_tbl);
301
302/*
303 * Return the board_info structure with a given index. Out-of-range indices
304 * return NULL.
305 */
306const struct board_info *t1_get_board_info(unsigned int board_id)
307{
308 return board_id < ARRAY_SIZE(t1_board) ? &t1_board[board_id] : NULL;
309}
310
311struct chelsio_vpd_t {
312 u32 format_version;
313 u8 serial_number[16];
314 u8 mac_base_address[6];
315 u8 pad[2]; /* make multiple-of-4 size requirement explicit */
316};
317
318#define EEPROMSIZE (8 * 1024)
319#define EEPROM_MAX_POLL 4
320
321/*
322 * Read SEEPROM. A zero is written to the flag register when the addres is
323 * written to the Control register. The hardware device will set the flag to a
324 * one when 4B have been transferred to the Data register.
325 */
326int t1_seeprom_read(adapter_t *adapter, u32 addr, u32 *data)
327{
328 int i = EEPROM_MAX_POLL;
329 u16 val;
330
331 if (addr >= EEPROMSIZE || (addr & 3))
332 return -EINVAL;
333
334 pci_write_config_word(adapter->pdev, A_PCICFG_VPD_ADDR, (u16)addr);
335 do {
336 udelay(50);
337 pci_read_config_word(adapter->pdev, A_PCICFG_VPD_ADDR, &val);
338 } while (!(val & F_VPD_OP_FLAG) && --i);
339
340 if (!(val & F_VPD_OP_FLAG)) {
341 CH_ERR("%s: reading EEPROM address 0x%x failed\n",
342 adapter->name, addr);
343 return -EIO;
344 }
345 pci_read_config_dword(adapter->pdev, A_PCICFG_VPD_DATA, data);
346 *data = le32_to_cpu(*data);
347 return 0;
348}
349
350static int t1_eeprom_vpd_get(adapter_t *adapter, struct chelsio_vpd_t *vpd)
351{
352 int addr, ret = 0;
353
354 for (addr = 0; !ret && addr < sizeof(*vpd); addr += sizeof(u32))
355 ret = t1_seeprom_read(adapter, addr,
356 (u32 *)((u8 *)vpd + addr));
357
358 return ret;
359}
360
361/*
362 * Read a port's MAC address from the VPD ROM.
363 */
364static int vpd_macaddress_get(adapter_t *adapter, int index, u8 mac_addr[])
365{
366 struct chelsio_vpd_t vpd;
367
368 if (t1_eeprom_vpd_get(adapter, &vpd))
369 return 1;
370 memcpy(mac_addr, vpd.mac_base_address, 5);
371 mac_addr[5] = vpd.mac_base_address[5] + index;
372 return 0;
373}
374
375/*
376 * Set up the MAC/PHY according to the requested link settings.
377 *
378 * If the PHY can auto-negotiate first decide what to advertise, then
379 * enable/disable auto-negotiation as desired and reset.
380 *
381 * If the PHY does not auto-negotiate we just reset it.
382 *
383 * If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
384 * otherwise do it later based on the outcome of auto-negotiation.
385 */
386int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
387{
388 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
389
390 if (lc->supported & SUPPORTED_Autoneg) {
391 lc->advertising &= ~(ADVERTISED_ASYM_PAUSE | ADVERTISED_PAUSE);
392 if (fc) {
393 lc->advertising |= ADVERTISED_ASYM_PAUSE;
394 if (fc == (PAUSE_RX | PAUSE_TX))
395 lc->advertising |= ADVERTISED_PAUSE;
396 }
397 phy->ops->advertise(phy, lc->advertising);
398
399 if (lc->autoneg == AUTONEG_DISABLE) {
400 lc->speed = lc->requested_speed;
401 lc->duplex = lc->requested_duplex;
402 lc->fc = (unsigned char)fc;
403 mac->ops->set_speed_duplex_fc(mac, lc->speed,
404 lc->duplex, fc);
405 /* Also disables autoneg */
406 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
407 phy->ops->reset(phy, 0);
408 } else
409 phy->ops->autoneg_enable(phy); /* also resets PHY */
410 } else {
411 mac->ops->set_speed_duplex_fc(mac, -1, -1, fc);
412 lc->fc = (unsigned char)fc;
413 phy->ops->reset(phy, 0);
414 }
415 return 0;
416}
417
418/*
419 * External interrupt handler for boards using elmer0.
420 */
421int elmer0_ext_intr_handler(adapter_t *adapter)
422{
423 struct cphy *phy;
424 int phy_cause;
425 u32 cause;
426
427 t1_tpi_read(adapter, A_ELMER0_INT_CAUSE, &cause);
428
429 switch (board_info(adapter)->board) {
430 case CHBT_BOARD_N210:
431 case CHBT_BOARD_N110:
432 if (cause & ELMER0_GP_BIT6) { /* Marvell 88x2010 interrupt */
433 phy = adapter->port[0].phy;
434 phy_cause = phy->ops->interrupt_handler(phy);
435 if (phy_cause & cphy_cause_link_change)
436 link_changed(adapter, 0);
437 }
438 break;
439 }
440 t1_tpi_write(adapter, A_ELMER0_INT_CAUSE, cause);
441 return 0;
442}
443
444/* Enables all interrupts. */
445void t1_interrupts_enable(adapter_t *adapter)
446{
447 unsigned int i;
448 u32 pl_intr;
449
450 adapter->slow_intr_mask = F_PL_INTR_SGE_ERR;
451
452 t1_sge_intr_enable(adapter->sge);
453 if (adapter->espi) {
454 adapter->slow_intr_mask |= F_PL_INTR_ESPI;
455 t1_espi_intr_enable(adapter->espi);
456 }
457
458 /* Enable MAC/PHY interrupts for each port. */
459 for_each_port(adapter, i) {
460 adapter->port[i].mac->ops->interrupt_enable(adapter->port[i].mac);
461 adapter->port[i].phy->ops->interrupt_enable(adapter->port[i].phy);
462 }
463
464 /* Enable PCIX & external chip interrupts on ASIC boards. */
465 pl_intr = readl(adapter->regs + A_PL_ENABLE);
466
467 /* PCI-X interrupts */
468 pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE,
469 0xffffffff);
470
471 adapter->slow_intr_mask |= F_PL_INTR_EXT | F_PL_INTR_PCIX;
472 pl_intr |= F_PL_INTR_EXT | F_PL_INTR_PCIX;
473 writel(pl_intr, adapter->regs + A_PL_ENABLE);
474}
475
476/* Disables all interrupts. */
477void t1_interrupts_disable(adapter_t* adapter)
478{
479 unsigned int i;
480
481 t1_sge_intr_disable(adapter->sge);
482 if (adapter->espi)
483 t1_espi_intr_disable(adapter->espi);
484
485 /* Disable MAC/PHY interrupts for each port. */
486 for_each_port(adapter, i) {
487 adapter->port[i].mac->ops->interrupt_disable(adapter->port[i].mac);
488 adapter->port[i].phy->ops->interrupt_disable(adapter->port[i].phy);
489 }
490
491 /* Disable PCIX & external chip interrupts. */
492 writel(0, adapter->regs + A_PL_ENABLE);
493
494 /* PCI-X interrupts */
495 pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE, 0);
496
497 adapter->slow_intr_mask = 0;
498}
499
500/* Clears all interrupts */
501void t1_interrupts_clear(adapter_t* adapter)
502{
503 unsigned int i;
504 u32 pl_intr;
505
506
507 t1_sge_intr_clear(adapter->sge);
508 if (adapter->espi)
509 t1_espi_intr_clear(adapter->espi);
510
511 /* Clear MAC/PHY interrupts for each port. */
512 for_each_port(adapter, i) {
513 adapter->port[i].mac->ops->interrupt_clear(adapter->port[i].mac);
514 adapter->port[i].phy->ops->interrupt_clear(adapter->port[i].phy);
515 }
516
517 /* Enable interrupts for external devices. */
518 pl_intr = readl(adapter->regs + A_PL_CAUSE);
519
520 writel(pl_intr | F_PL_INTR_EXT | F_PL_INTR_PCIX,
521 adapter->regs + A_PL_CAUSE);
522
523 /* PCI-X interrupts */
524 pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, 0xffffffff);
525}
526
527/*
528 * Slow path interrupt handler for ASICs.
529 */
530int t1_slow_intr_handler(adapter_t *adapter)
531{
532 u32 cause = readl(adapter->regs + A_PL_CAUSE);
533
534 cause &= adapter->slow_intr_mask;
535 if (!cause)
536 return 0;
537 if (cause & F_PL_INTR_SGE_ERR)
538 t1_sge_intr_error_handler(adapter->sge);
539 if (cause & F_PL_INTR_ESPI)
540 t1_espi_intr_handler(adapter->espi);
541 if (cause & F_PL_INTR_PCIX)
542 t1_pci_intr_handler(adapter);
543 if (cause & F_PL_INTR_EXT)
544 t1_elmer0_ext_intr(adapter);
545
546 /* Clear the interrupts just processed. */
547 writel(cause, adapter->regs + A_PL_CAUSE);
548 (void)readl(adapter->regs + A_PL_CAUSE); /* flush writes */
549 return 1;
550}
551
552/* Pause deadlock avoidance parameters */
553#define DROP_MSEC 16
554#define DROP_PKTS_CNT 1
555
556static void set_csum_offload(adapter_t *adapter, u32 csum_bit, int enable)
557{
558 u32 val = readl(adapter->regs + A_TP_GLOBAL_CONFIG);
559
560 if (enable)
561 val |= csum_bit;
562 else
563 val &= ~csum_bit;
564 writel(val, adapter->regs + A_TP_GLOBAL_CONFIG);
565}
566
567void t1_tp_set_ip_checksum_offload(adapter_t *adapter, int enable)
568{
569 set_csum_offload(adapter, F_IP_CSUM, enable);
570}
571
572void t1_tp_set_udp_checksum_offload(adapter_t *adapter, int enable)
573{
574 set_csum_offload(adapter, F_UDP_CSUM, enable);
575}
576
577void t1_tp_set_tcp_checksum_offload(adapter_t *adapter, int enable)
578{
579 set_csum_offload(adapter, F_TCP_CSUM, enable);
580}
581
582static void t1_tp_reset(adapter_t *adapter, unsigned int tp_clk)
583{
584 u32 val;
585
586 val = F_TP_IN_CSPI_CPL | F_TP_IN_CSPI_CHECK_IP_CSUM |
587 F_TP_IN_CSPI_CHECK_TCP_CSUM | F_TP_IN_ESPI_ETHERNET;
588 val |= F_TP_IN_ESPI_CHECK_IP_CSUM |
589 F_TP_IN_ESPI_CHECK_TCP_CSUM;
590 writel(val, adapter->regs + A_TP_IN_CONFIG);
591 writel(F_TP_OUT_CSPI_CPL |
592 F_TP_OUT_ESPI_ETHERNET |
593 F_TP_OUT_ESPI_GENERATE_IP_CSUM |
594 F_TP_OUT_ESPI_GENERATE_TCP_CSUM,
595 adapter->regs + A_TP_OUT_CONFIG);
596
597 val = readl(adapter->regs + A_TP_GLOBAL_CONFIG);
598 val &= ~(F_IP_CSUM | F_UDP_CSUM | F_TCP_CSUM);
599 writel(val, adapter->regs + A_TP_GLOBAL_CONFIG);
600
601 /*
602 * Enable pause frame deadlock prevention.
603 */
604 if (is_T2(adapter)) {
605 u32 drop_ticks = DROP_MSEC * (tp_clk / 1000);
606
607 writel(F_ENABLE_TX_DROP | F_ENABLE_TX_ERROR |
608 V_DROP_TICKS_CNT(drop_ticks) |
609 V_NUM_PKTS_DROPPED(DROP_PKTS_CNT),
610 adapter->regs + A_TP_TX_DROP_CONFIG);
611 }
612
613 writel(F_TP_RESET, adapter->regs + A_TP_RESET);
614}
615
616int __devinit t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
617 struct adapter_params *p)
618{
619 p->chip_version = bi->chip_term;
620 if (p->chip_version == CHBT_TERM_T1 ||
621 p->chip_version == CHBT_TERM_T2) {
622 u32 val = readl(adapter->regs + A_TP_PC_CONFIG);
623
624 val = G_TP_PC_REV(val);
625 if (val == 2)
626 p->chip_revision = TERM_T1B;
627 else if (val == 3)
628 p->chip_revision = TERM_T2;
629 else
630 return -1;
631 } else
632 return -1;
633 return 0;
634}
635
636/*
637 * Enable board components other than the Chelsio chip, such as external MAC
638 * and PHY.
639 */
640static int board_init(adapter_t *adapter, const struct board_info *bi)
641{
642 switch (bi->board) {
643 case CHBT_BOARD_N110:
644 case CHBT_BOARD_N210:
645 writel(V_TPIPAR(0xf), adapter->regs + A_TPI_PAR);
646 t1_tpi_write(adapter, A_ELMER0_GPO, 0x800);
647 break;
648 }
649 return 0;
650}
651
652/*
653 * Initialize and configure the Terminator HW modules. Note that external
654 * MAC and PHYs are initialized separately.
655 */
656int t1_init_hw_modules(adapter_t *adapter)
657{
658 int err = -EIO;
659 const struct board_info *bi = board_info(adapter);
660
661 if (!bi->clock_mc4) {
662 u32 val = readl(adapter->regs + A_MC4_CFG);
663
664 writel(val | F_READY | F_MC4_SLOW, adapter->regs + A_MC4_CFG);
665 writel(F_M_BUS_ENABLE | F_TCAM_RESET,
666 adapter->regs + A_MC5_CONFIG);
667 }
668
669 if (adapter->espi && t1_espi_init(adapter->espi, bi->chip_mac,
670 bi->espi_nports))
671 goto out_err;
672
673 t1_tp_reset(adapter, bi->clock_core);
674
675 err = t1_sge_configure(adapter->sge, &adapter->params.sge);
676 if (err)
677 goto out_err;
678
679 err = 0;
680 out_err:
681 return err;
682}
683
684/*
685 * Determine a card's PCI mode.
686 */
687static void __devinit get_pci_mode(adapter_t *adapter, struct chelsio_pci_params *p)
688{
689 static unsigned short speed_map[] = { 33, 66, 100, 133 };
690 u32 pci_mode;
691
692 pci_read_config_dword(adapter->pdev, A_PCICFG_MODE, &pci_mode);
693 p->speed = speed_map[G_PCI_MODE_CLK(pci_mode)];
694 p->width = (pci_mode & F_PCI_MODE_64BIT) ? 64 : 32;
695 p->is_pcix = (pci_mode & F_PCI_MODE_PCIX) != 0;
696}
697
698/*
699 * Release the structures holding the SW per-Terminator-HW-module state.
700 */
701void t1_free_sw_modules(adapter_t *adapter)
702{
703 unsigned int i;
704
705 for_each_port(adapter, i) {
706 struct cmac *mac = adapter->port[i].mac;
707 struct cphy *phy = adapter->port[i].phy;
708
709 if (mac)
710 mac->ops->destroy(mac);
711 if (phy)
712 phy->ops->destroy(phy);
713 }
714
715 if (adapter->sge)
716 t1_sge_destroy(adapter->sge);
717 if (adapter->espi)
718 t1_espi_destroy(adapter->espi);
719}
720
721static void __devinit init_link_config(struct link_config *lc,
722 const struct board_info *bi)
723{
724 lc->supported = bi->caps;
725 lc->requested_speed = lc->speed = SPEED_INVALID;
726 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
727 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
728 if (lc->supported & SUPPORTED_Autoneg) {
729 lc->advertising = lc->supported;
730 lc->autoneg = AUTONEG_ENABLE;
731 lc->requested_fc |= PAUSE_AUTONEG;
732 } else {
733 lc->advertising = 0;
734 lc->autoneg = AUTONEG_DISABLE;
735 }
736}
737
738
739/*
740 * Allocate and initialize the data structures that hold the SW state of
741 * the Terminator HW modules.
742 */
743int __devinit t1_init_sw_modules(adapter_t *adapter,
744 const struct board_info *bi)
745{
746 unsigned int i;
747
748 adapter->params.brd_info = bi;
749 adapter->params.nports = bi->port_number;
750 adapter->params.stats_update_period = bi->gmac->stats_update_period;
751
752 adapter->sge = t1_sge_create(adapter, &adapter->params.sge);
753 if (!adapter->sge) {
754 CH_ERR("%s: SGE initialization failed\n",
755 adapter->name);
756 goto error;
757 }
758
759 if (bi->espi_nports && !(adapter->espi = t1_espi_create(adapter))) {
760 CH_ERR("%s: ESPI initialization failed\n",
761 adapter->name);
762 goto error;
763 }
764
765 board_init(adapter, bi);
766 bi->mdio_ops->init(adapter, bi);
767 if (bi->gphy->reset)
768 bi->gphy->reset(adapter);
769 if (bi->gmac->reset)
770 bi->gmac->reset(adapter);
771
772 for_each_port(adapter, i) {
773 u8 hw_addr[6];
774 struct cmac *mac;
775 int phy_addr = bi->mdio_phybaseaddr + i;
776
777 adapter->port[i].phy = bi->gphy->create(adapter, phy_addr,
778 bi->mdio_ops);
779 if (!adapter->port[i].phy) {
780 CH_ERR("%s: PHY %d initialization failed\n",
781 adapter->name, i);
782 goto error;
783 }
784
785 adapter->port[i].mac = mac = bi->gmac->create(adapter, i);
786 if (!mac) {
787 CH_ERR("%s: MAC %d initialization failed\n",
788 adapter->name, i);
789 goto error;
790 }
791
792 /*
793 * Get the port's MAC addresses either from the EEPROM if one
794 * exists or the one hardcoded in the MAC.
795 */
796 if (vpd_macaddress_get(adapter, i, hw_addr)) {
797 CH_ERR("%s: could not read MAC address from VPD ROM\n",
798 adapter->port[i].dev->name);
799 goto error;
800 }
801 memcpy(adapter->port[i].dev->dev_addr, hw_addr, ETH_ALEN);
802 init_link_config(&adapter->port[i].link_config, bi);
803 }
804
805 get_pci_mode(adapter, &adapter->params.pci);
806 t1_interrupts_clear(adapter);
807 return 0;
808
809 error:
810 t1_free_sw_modules(adapter);
811 return -1;
812}
diff --git a/drivers/net/chelsio/suni1x10gexp_regs.h b/drivers/net/chelsio/suni1x10gexp_regs.h
new file mode 100644
index 000000000000..81816c2b708a
--- /dev/null
+++ b/drivers/net/chelsio/suni1x10gexp_regs.h
@@ -0,0 +1,213 @@
1/*****************************************************************************
2 * *
3 * File: suni1x10gexp_regs.h *
4 * $Revision: 1.9 $ *
5 * $Date: 2005/06/22 00:17:04 $ *
6 * Description: *
7 * PMC/SIERRA (pm3393) MAC-PHY functionality. *
8 * part of the Chelsio 10Gb Ethernet Driver. *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
13 * *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, write to the Free Software Foundation, Inc., *
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
17 * *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
21 * *
22 * http://www.chelsio.com *
23 * *
24 * Maintainers: maintainers@chelsio.com *
25 * *
26 * Authors: PMC/SIERRA *
27 * *
28 * History: *
29 * *
30 ****************************************************************************/
31
32#ifndef _CXGB_SUNI1x10GEXP_REGS_H_
33#define _CXGB_SUNI1x10GEXP_REGS_H_
34
35/******************************************************************************/
36/** S/UNI-1x10GE-XP REGISTER ADDRESS MAP **/
37/******************************************************************************/
38/* Refer to the Register Bit Masks bellow for the naming of each register and */
39/* to the S/UNI-1x10GE-XP Data Sheet for the signification of each bit */
40/******************************************************************************/
41
42#define SUNI1x10GEXP_REG_DEVICE_STATUS 0x0004
43#define SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS 0x000D
44#define SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE 0x000E
45#define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE 0x0102
46#define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_STATUS 0x0104
47#define SUNI1x10GEXP_REG_RXXG_CONFIG_1 0x2040
48#define SUNI1x10GEXP_REG_RXXG_CONFIG_3 0x2042
49#define SUNI1x10GEXP_REG_RXXG_INTERRUPT 0x2043
50#define SUNI1x10GEXP_REG_RXXG_MAX_FRAME_LENGTH 0x2045
51#define SUNI1x10GEXP_REG_RXXG_SA_15_0 0x2046
52#define SUNI1x10GEXP_REG_RXXG_SA_31_16 0x2047
53#define SUNI1x10GEXP_REG_RXXG_SA_47_32 0x2048
54#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_LOW 0x204D
55#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_MID 0x204E
56#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_HIGH 0x204F
57#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW 0x206A
58#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW 0x206B
59#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH 0x206C
60#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH 0x206D
61#define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0 0x206E
62#define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2 0x2070
63#define SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE 0x2088
64#define SUNI1x10GEXP_REG_XRF_INTERRUPT_STATUS 0x2089
65#define SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE 0x208B
66#define SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_STATUS 0x208C
67#define SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE 0x20C7
68#define SUNI1x10GEXP_REG_RXOAM_INTERRUPT_STATUS 0x20C8
69#define SUNI1x10GEXP_REG_MSTAT_CONTROL 0x2100
70#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_0 0x2101
71#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_1 0x2102
72#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_2 0x2103
73#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_3 0x2104
74#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0 0x2105
75#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1 0x2106
76#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2 0x2107
77#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3 0x2108
78#define SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW 0x2110
79#define SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW 0x2114
80#define SUNI1x10GEXP_REG_MSTAT_COUNTER_4_LOW 0x2120
81#define SUNI1x10GEXP_REG_MSTAT_COUNTER_5_LOW 0x2124
82#define SUNI1x10GEXP_REG_MSTAT_COUNTER_6_LOW 0x2128
83#define SUNI1x10GEXP_REG_MSTAT_COUNTER_8_LOW 0x2130
84#define SUNI1x10GEXP_REG_MSTAT_COUNTER_10_LOW 0x2138
85#define SUNI1x10GEXP_REG_MSTAT_COUNTER_11_LOW 0x213C
86#define SUNI1x10GEXP_REG_MSTAT_COUNTER_12_LOW 0x2140
87#define SUNI1x10GEXP_REG_MSTAT_COUNTER_13_LOW 0x2144
88#define SUNI1x10GEXP_REG_MSTAT_COUNTER_15_LOW 0x214C
89#define SUNI1x10GEXP_REG_MSTAT_COUNTER_16_LOW 0x2150
90#define SUNI1x10GEXP_REG_MSTAT_COUNTER_17_LOW 0x2154
91#define SUNI1x10GEXP_REG_MSTAT_COUNTER_18_LOW 0x2158
92#define SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW 0x2194
93#define SUNI1x10GEXP_REG_MSTAT_COUNTER_35_LOW 0x219C
94#define SUNI1x10GEXP_REG_MSTAT_COUNTER_36_LOW 0x21A0
95#define SUNI1x10GEXP_REG_MSTAT_COUNTER_38_LOW 0x21A8
96#define SUNI1x10GEXP_REG_MSTAT_COUNTER_40_LOW 0x21B0
97#define SUNI1x10GEXP_REG_MSTAT_COUNTER_42_LOW 0x21B8
98#define SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW 0x21BC
99#define SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE 0x2209
100#define SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_INTERRUPT 0x220A
101#define SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK 0x2282
102#define SUNI1x10GEXP_REG_PL4ODP_INTERRUPT 0x2283
103#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_STATUS 0x2300
104#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_CHANGE 0x2301
105#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK 0x2302
106#define SUNI1x10GEXP_REG_TXXG_CONFIG_1 0x3040
107#define SUNI1x10GEXP_REG_TXXG_CONFIG_3 0x3042
108#define SUNI1x10GEXP_REG_TXXG_INTERRUPT 0x3043
109#define SUNI1x10GEXP_REG_TXXG_MAX_FRAME_SIZE 0x3045
110#define SUNI1x10GEXP_REG_TXXG_SA_15_0 0x3047
111#define SUNI1x10GEXP_REG_TXXG_SA_31_16 0x3048
112#define SUNI1x10GEXP_REG_TXXG_SA_47_32 0x3049
113#define SUNI1x10GEXP_REG_XTEF_INTERRUPT_STATUS 0x3084
114#define SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE 0x3085
115#define SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE 0x30C6
116#define SUNI1x10GEXP_REG_TXOAM_INTERRUPT_STATUS 0x30C7
117#define SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE 0x320C
118#define SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_INDICATION 0x320D
119#define SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK 0x3282
120#define SUNI1x10GEXP_REG_PL4IDU_INTERRUPT 0x3283
121
122/******************************************************************************/
123/* -- End register offset definitions -- */
124/******************************************************************************/
125
126/******************************************************************************/
127/** SUNI-1x10GE-XP REGISTER BIT MASKS **/
128/******************************************************************************/
129
130/*----------------------------------------------------------------------------
131 * Register 0x0004: S/UNI-1x10GE-XP Device Status
132 * Bit 9 TOP_SXRA_EXPIRED
133 * Bit 8 TOP_MDIO_BUSY
134 * Bit 7 TOP_DTRB
135 * Bit 6 TOP_EXPIRED
136 * Bit 5 TOP_PAUSED
137 * Bit 4 TOP_PL4_ID_DOOL
138 * Bit 3 TOP_PL4_IS_DOOL
139 * Bit 2 TOP_PL4_ID_ROOL
140 * Bit 1 TOP_PL4_IS_ROOL
141 * Bit 0 TOP_PL4_OUT_ROOL
142 *----------------------------------------------------------------------------*/
143#define SUNI1x10GEXP_BITMSK_TOP_SXRA_EXPIRED 0x0200
144#define SUNI1x10GEXP_BITMSK_TOP_EXPIRED 0x0040
145#define SUNI1x10GEXP_BITMSK_TOP_PL4_ID_DOOL 0x0010
146#define SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL 0x0008
147#define SUNI1x10GEXP_BITMSK_TOP_PL4_ID_ROOL 0x0004
148#define SUNI1x10GEXP_BITMSK_TOP_PL4_IS_ROOL 0x0002
149#define SUNI1x10GEXP_BITMSK_TOP_PL4_OUT_ROOL 0x0001
150
151/*----------------------------------------------------------------------------
152 * Register 0x000E:PM3393 Global interrupt enable
153 * Bit 15 TOP_INTE
154 *----------------------------------------------------------------------------*/
155#define SUNI1x10GEXP_BITMSK_TOP_INTE 0x8000
156
157/*----------------------------------------------------------------------------
158 * Register 0x2040: RXXG Configuration 1
159 * Bit 15 RXXG_RXEN
160 * Bit 14 RXXG_ROCF
161 * Bit 13 RXXG_PAD_STRIP
162 * Bit 10 RXXG_PUREP
163 * Bit 9 RXXG_LONGP
164 * Bit 8 RXXG_PARF
165 * Bit 7 RXXG_FLCHK
166 * Bit 5 RXXG_PASS_CTRL
167 * Bit 3 RXXG_CRC_STRIP
168 * Bit 2-0 RXXG_MIFG
169 *----------------------------------------------------------------------------*/
170#define SUNI1x10GEXP_BITMSK_RXXG_RXEN 0x8000
171#define SUNI1x10GEXP_BITMSK_RXXG_PUREP 0x0400
172#define SUNI1x10GEXP_BITMSK_RXXG_FLCHK 0x0080
173#define SUNI1x10GEXP_BITMSK_RXXG_CRC_STRIP 0x0008
174
175/*----------------------------------------------------------------------------
176 * Register 0x2070: RXXG Address Filter Control 2
177 * Bit 1 RXXG_PMODE
178 * Bit 0 RXXG_MHASH_EN
179 *----------------------------------------------------------------------------*/
180#define SUNI1x10GEXP_BITMSK_RXXG_PMODE 0x0002
181#define SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN 0x0001
182
183/*----------------------------------------------------------------------------
184 * Register 0x2100: MSTAT Control
185 * Bit 2 MSTAT_WRITE
186 * Bit 1 MSTAT_CLEAR
187 * Bit 0 MSTAT_SNAP
188 *----------------------------------------------------------------------------*/
189#define SUNI1x10GEXP_BITMSK_MSTAT_CLEAR 0x0002
190#define SUNI1x10GEXP_BITMSK_MSTAT_SNAP 0x0001
191
192/*----------------------------------------------------------------------------
193 * Register 0x3040: TXXG Configuration Register 1
194 * Bit 15 TXXG_TXEN0
195 * Bit 13 TXXG_HOSTPAUSE
196 * Bit 12-7 TXXG_IPGT
197 * Bit 5 TXXG_32BIT_ALIGN
198 * Bit 4 TXXG_CRCEN
199 * Bit 3 TXXG_FCTX
200 * Bit 2 TXXG_FCRX
201 * Bit 1 TXXG_PADEN
202 * Bit 0 TXXG_SPRE
203 *----------------------------------------------------------------------------*/
204#define SUNI1x10GEXP_BITMSK_TXXG_TXEN0 0x8000
205#define SUNI1x10GEXP_BITOFF_TXXG_IPGT 7
206#define SUNI1x10GEXP_BITMSK_TXXG_32BIT_ALIGN 0x0020
207#define SUNI1x10GEXP_BITMSK_TXXG_CRCEN 0x0010
208#define SUNI1x10GEXP_BITMSK_TXXG_FCTX 0x0008
209#define SUNI1x10GEXP_BITMSK_TXXG_FCRX 0x0004
210#define SUNI1x10GEXP_BITMSK_TXXG_PADEN 0x0002
211
212#endif /* _CXGB_SUNI1x10GEXP_REGS_H_ */
213
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index d0fa2448761d..25cc20e415da 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 3
4 Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. 4 Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the Free 7 under the terms of the GNU General Public License as published by the Free
@@ -156,7 +156,7 @@
156 156
157#define DRV_NAME "e100" 157#define DRV_NAME "e100"
158#define DRV_EXT "-NAPI" 158#define DRV_EXT "-NAPI"
159#define DRV_VERSION "3.4.8-k2"DRV_EXT 159#define DRV_VERSION "3.4.14-k2"DRV_EXT
160#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver" 160#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
161#define DRV_COPYRIGHT "Copyright(c) 1999-2005 Intel Corporation" 161#define DRV_COPYRIGHT "Copyright(c) 1999-2005 Intel Corporation"
162#define PFX DRV_NAME ": " 162#define PFX DRV_NAME ": "
@@ -785,6 +785,7 @@ static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
785} 785}
786 786
787#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */ 787#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
788#define E100_WAIT_SCB_FAST 20 /* delay like the old code */
788static inline int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr) 789static inline int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
789{ 790{
790 unsigned long flags; 791 unsigned long flags;
@@ -798,7 +799,7 @@ static inline int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
798 if(likely(!readb(&nic->csr->scb.cmd_lo))) 799 if(likely(!readb(&nic->csr->scb.cmd_lo)))
799 break; 800 break;
800 cpu_relax(); 801 cpu_relax();
801 if(unlikely(i > (E100_WAIT_SCB_TIMEOUT >> 1))) 802 if(unlikely(i > E100_WAIT_SCB_FAST))
802 udelay(5); 803 udelay(5);
803 } 804 }
804 if(unlikely(i == E100_WAIT_SCB_TIMEOUT)) { 805 if(unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
@@ -902,8 +903,8 @@ static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
902 903
903static void e100_get_defaults(struct nic *nic) 904static void e100_get_defaults(struct nic *nic)
904{ 905{
905 struct param_range rfds = { .min = 16, .max = 256, .count = 64 }; 906 struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
906 struct param_range cbs = { .min = 64, .max = 256, .count = 64 }; 907 struct param_range cbs = { .min = 64, .max = 256, .count = 128 };
907 908
908 pci_read_config_byte(nic->pdev, PCI_REVISION_ID, &nic->rev_id); 909 pci_read_config_byte(nic->pdev, PCI_REVISION_ID, &nic->rev_id);
909 /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */ 910 /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
@@ -1006,25 +1007,213 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1006 c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]); 1007 c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
1007} 1008}
1008 1009
1010/********************************************************/
1011/* Micro code for 8086:1229 Rev 8 */
1012/********************************************************/
1013
1014/* Parameter values for the D101M B-step */
1015#define D101M_CPUSAVER_TIMER_DWORD 78
1016#define D101M_CPUSAVER_BUNDLE_DWORD 65
1017#define D101M_CPUSAVER_MIN_SIZE_DWORD 126
1018
1019#define D101M_B_RCVBUNDLE_UCODE \
1020{\
10210x00550215, 0xFFFF0437, 0xFFFFFFFF, 0x06A70789, 0xFFFFFFFF, 0x0558FFFF, \
10220x000C0001, 0x00101312, 0x000C0008, 0x00380216, \
10230x0010009C, 0x00204056, 0x002380CC, 0x00380056, \
10240x0010009C, 0x00244C0B, 0x00000800, 0x00124818, \
10250x00380438, 0x00000000, 0x00140000, 0x00380555, \
10260x00308000, 0x00100662, 0x00100561, 0x000E0408, \
10270x00134861, 0x000C0002, 0x00103093, 0x00308000, \
10280x00100624, 0x00100561, 0x000E0408, 0x00100861, \
10290x000C007E, 0x00222C21, 0x000C0002, 0x00103093, \
10300x00380C7A, 0x00080000, 0x00103090, 0x00380C7A, \
10310x00000000, 0x00000000, 0x00000000, 0x00000000, \
10320x0010009C, 0x00244C2D, 0x00010004, 0x00041000, \
10330x003A0437, 0x00044010, 0x0038078A, 0x00000000, \
10340x00100099, 0x00206C7A, 0x0010009C, 0x00244C48, \
10350x00130824, 0x000C0001, 0x00101213, 0x00260C75, \
10360x00041000, 0x00010004, 0x00130826, 0x000C0006, \
10370x002206A8, 0x0013C926, 0x00101313, 0x003806A8, \
10380x00000000, 0x00000000, 0x00000000, 0x00000000, \
10390x00000000, 0x00000000, 0x00000000, 0x00000000, \
10400x00080600, 0x00101B10, 0x00050004, 0x00100826, \
10410x00101210, 0x00380C34, 0x00000000, 0x00000000, \
10420x0021155B, 0x00100099, 0x00206559, 0x0010009C, \
10430x00244559, 0x00130836, 0x000C0000, 0x00220C62, \
10440x000C0001, 0x00101B13, 0x00229C0E, 0x00210C0E, \
10450x00226C0E, 0x00216C0E, 0x0022FC0E, 0x00215C0E, \
10460x00214C0E, 0x00380555, 0x00010004, 0x00041000, \
10470x00278C67, 0x00040800, 0x00018100, 0x003A0437, \
10480x00130826, 0x000C0001, 0x00220559, 0x00101313, \
10490x00380559, 0x00000000, 0x00000000, 0x00000000, \
10500x00000000, 0x00000000, 0x00000000, 0x00000000, \
10510x00000000, 0x00130831, 0x0010090B, 0x00124813, \
10520x000CFF80, 0x002606AB, 0x00041000, 0x00010004, \
10530x003806A8, 0x00000000, 0x00000000, 0x00000000, \
1054}
1055
1056/********************************************************/
1057/* Micro code for 8086:1229 Rev 9 */
1058/********************************************************/
1059
1060/* Parameter values for the D101S */
1061#define D101S_CPUSAVER_TIMER_DWORD 78
1062#define D101S_CPUSAVER_BUNDLE_DWORD 67
1063#define D101S_CPUSAVER_MIN_SIZE_DWORD 128
1064
1065#define D101S_RCVBUNDLE_UCODE \
1066{\
10670x00550242, 0xFFFF047E, 0xFFFFFFFF, 0x06FF0818, 0xFFFFFFFF, 0x05A6FFFF, \
10680x000C0001, 0x00101312, 0x000C0008, 0x00380243, \
10690x0010009C, 0x00204056, 0x002380D0, 0x00380056, \
10700x0010009C, 0x00244F8B, 0x00000800, 0x00124818, \
10710x0038047F, 0x00000000, 0x00140000, 0x003805A3, \
10720x00308000, 0x00100610, 0x00100561, 0x000E0408, \
10730x00134861, 0x000C0002, 0x00103093, 0x00308000, \
10740x00100624, 0x00100561, 0x000E0408, 0x00100861, \
10750x000C007E, 0x00222FA1, 0x000C0002, 0x00103093, \
10760x00380F90, 0x00080000, 0x00103090, 0x00380F90, \
10770x00000000, 0x00000000, 0x00000000, 0x00000000, \
10780x0010009C, 0x00244FAD, 0x00010004, 0x00041000, \
10790x003A047E, 0x00044010, 0x00380819, 0x00000000, \
10800x00100099, 0x00206FFD, 0x0010009A, 0x0020AFFD, \
10810x0010009C, 0x00244FC8, 0x00130824, 0x000C0001, \
10820x00101213, 0x00260FF7, 0x00041000, 0x00010004, \
10830x00130826, 0x000C0006, 0x00220700, 0x0013C926, \
10840x00101313, 0x00380700, 0x00000000, 0x00000000, \
10850x00000000, 0x00000000, 0x00000000, 0x00000000, \
10860x00080600, 0x00101B10, 0x00050004, 0x00100826, \
10870x00101210, 0x00380FB6, 0x00000000, 0x00000000, \
10880x002115A9, 0x00100099, 0x002065A7, 0x0010009A, \
10890x0020A5A7, 0x0010009C, 0x002445A7, 0x00130836, \
10900x000C0000, 0x00220FE4, 0x000C0001, 0x00101B13, \
10910x00229F8E, 0x00210F8E, 0x00226F8E, 0x00216F8E, \
10920x0022FF8E, 0x00215F8E, 0x00214F8E, 0x003805A3, \
10930x00010004, 0x00041000, 0x00278FE9, 0x00040800, \
10940x00018100, 0x003A047E, 0x00130826, 0x000C0001, \
10950x002205A7, 0x00101313, 0x003805A7, 0x00000000, \
10960x00000000, 0x00000000, 0x00000000, 0x00000000, \
10970x00000000, 0x00000000, 0x00000000, 0x00130831, \
10980x0010090B, 0x00124813, 0x000CFF80, 0x00260703, \
10990x00041000, 0x00010004, 0x00380700 \
1100}
1101
1102/********************************************************/
1103/* Micro code for the 8086:1229 Rev F/10 */
1104/********************************************************/
1105
1106/* Parameter values for the D102 E-step */
1107#define D102_E_CPUSAVER_TIMER_DWORD 42
1108#define D102_E_CPUSAVER_BUNDLE_DWORD 54
1109#define D102_E_CPUSAVER_MIN_SIZE_DWORD 46
1110
1111#define D102_E_RCVBUNDLE_UCODE \
1112{\
11130x007D028F, 0x0E4204F9, 0x14ED0C85, 0x14FA14E9, 0x0EF70E36, 0x1FFF1FFF, \
11140x00E014B9, 0x00000000, 0x00000000, 0x00000000, \
11150x00E014BD, 0x00000000, 0x00000000, 0x00000000, \
11160x00E014D5, 0x00000000, 0x00000000, 0x00000000, \
11170x00000000, 0x00000000, 0x00000000, 0x00000000, \
11180x00E014C1, 0x00000000, 0x00000000, 0x00000000, \
11190x00000000, 0x00000000, 0x00000000, 0x00000000, \
11200x00000000, 0x00000000, 0x00000000, 0x00000000, \
11210x00000000, 0x00000000, 0x00000000, 0x00000000, \
11220x00E014C8, 0x00000000, 0x00000000, 0x00000000, \
11230x00200600, 0x00E014EE, 0x00000000, 0x00000000, \
11240x0030FF80, 0x00940E46, 0x00038200, 0x00102000, \
11250x00E00E43, 0x00000000, 0x00000000, 0x00000000, \
11260x00300006, 0x00E014FB, 0x00000000, 0x00000000, \
11270x00000000, 0x00000000, 0x00000000, 0x00000000, \
11280x00000000, 0x00000000, 0x00000000, 0x00000000, \
11290x00000000, 0x00000000, 0x00000000, 0x00000000, \
11300x00906E41, 0x00800E3C, 0x00E00E39, 0x00000000, \
11310x00906EFD, 0x00900EFD, 0x00E00EF8, 0x00000000, \
11320x00000000, 0x00000000, 0x00000000, 0x00000000, \
11330x00000000, 0x00000000, 0x00000000, 0x00000000, \
11340x00000000, 0x00000000, 0x00000000, 0x00000000, \
11350x00000000, 0x00000000, 0x00000000, 0x00000000, \
11360x00000000, 0x00000000, 0x00000000, 0x00000000, \
11370x00000000, 0x00000000, 0x00000000, 0x00000000, \
11380x00000000, 0x00000000, 0x00000000, 0x00000000, \
11390x00000000, 0x00000000, 0x00000000, 0x00000000, \
11400x00000000, 0x00000000, 0x00000000, 0x00000000, \
11410x00000000, 0x00000000, 0x00000000, 0x00000000, \
11420x00000000, 0x00000000, 0x00000000, 0x00000000, \
11430x00000000, 0x00000000, 0x00000000, 0x00000000, \
11440x00000000, 0x00000000, 0x00000000, 0x00000000, \
11450x00000000, 0x00000000, 0x00000000, 0x00000000, \
1146}
1147
1009static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb) 1148static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1010{ 1149{
1011 int i; 1150/* *INDENT-OFF* */
1012 static const u32 ucode[UCODE_SIZE] = { 1151 static struct {
1013 /* NFS packets are misinterpreted as TCO packets and 1152 u32 ucode[UCODE_SIZE + 1];
1014 * incorrectly routed to the BMC over SMBus. This 1153 u8 mac;
1015 * microcode patch checks the fragmented IP bit in the 1154 u8 timer_dword;
1016 * NFS/UDP header to distinguish between NFS and TCO. */ 1155 u8 bundle_dword;
1017 0x0EF70E36, 0x1FFF1FFF, 0x1FFF1FFF, 0x1FFF1FFF, 0x1FFF1FFF, 1156 u8 min_size_dword;
1018 0x1FFF1FFF, 0x00906E41, 0x00800E3C, 0x00E00E39, 0x00000000, 1157 } ucode_opts[] = {
1019 0x00906EFD, 0x00900EFD, 0x00E00EF8, 1158 { D101M_B_RCVBUNDLE_UCODE,
1020 }; 1159 mac_82559_D101M,
1160 D101M_CPUSAVER_TIMER_DWORD,
1161 D101M_CPUSAVER_BUNDLE_DWORD,
1162 D101M_CPUSAVER_MIN_SIZE_DWORD },
1163 { D101S_RCVBUNDLE_UCODE,
1164 mac_82559_D101S,
1165 D101S_CPUSAVER_TIMER_DWORD,
1166 D101S_CPUSAVER_BUNDLE_DWORD,
1167 D101S_CPUSAVER_MIN_SIZE_DWORD },
1168 { D102_E_RCVBUNDLE_UCODE,
1169 mac_82551_F,
1170 D102_E_CPUSAVER_TIMER_DWORD,
1171 D102_E_CPUSAVER_BUNDLE_DWORD,
1172 D102_E_CPUSAVER_MIN_SIZE_DWORD },
1173 { D102_E_RCVBUNDLE_UCODE,
1174 mac_82551_10,
1175 D102_E_CPUSAVER_TIMER_DWORD,
1176 D102_E_CPUSAVER_BUNDLE_DWORD,
1177 D102_E_CPUSAVER_MIN_SIZE_DWORD },
1178 { {0}, 0, 0, 0, 0}
1179 }, *opts;
1180/* *INDENT-ON* */
1181
1182#define BUNDLESMALL 1
1183#define BUNDLEMAX 50
1184#define INTDELAY 15000
1185
1186 opts = ucode_opts;
1187
1188 /* do not load u-code for ICH devices */
1189 if (nic->flags & ich)
1190 return;
1191
1192 /* Search for ucode match against h/w rev_id */
1193 while (opts->mac) {
1194 if (nic->mac == opts->mac) {
1195 int i;
1196 u32 *ucode = opts->ucode;
1197
1198 /* Insert user-tunable settings */
1199 ucode[opts->timer_dword] &= 0xFFFF0000;
1200 ucode[opts->timer_dword] |=
1201 (u16) INTDELAY;
1202 ucode[opts->bundle_dword] &= 0xFFFF0000;
1203 ucode[opts->bundle_dword] |= (u16) BUNDLEMAX;
1204 ucode[opts->min_size_dword] &= 0xFFFF0000;
1205 ucode[opts->min_size_dword] |=
1206 (BUNDLESMALL) ? 0xFFFF : 0xFF80;
1207
1208 for(i = 0; i < UCODE_SIZE; i++)
1209 cb->u.ucode[i] = cpu_to_le32(ucode[i]);
1210 cb->command = cpu_to_le16(cb_ucode);
1211 return;
1212 }
1213 opts++;
1214 }
1021 1215
1022 if(nic->mac == mac_82551_F || nic->mac == mac_82551_10) { 1216 cb->command = cpu_to_le16(cb_nop);
1023 for(i = 0; i < UCODE_SIZE; i++)
1024 cb->u.ucode[i] = cpu_to_le32(ucode[i]);
1025 cb->command = cpu_to_le16(cb_ucode);
1026 } else
1027 cb->command = cpu_to_le16(cb_nop);
1028} 1217}
1029 1218
1030static void e100_setup_iaaddr(struct nic *nic, struct cb *cb, 1219static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
@@ -1307,14 +1496,15 @@ static inline void e100_xmit_prepare(struct nic *nic, struct cb *cb,
1307{ 1496{
1308 cb->command = nic->tx_command; 1497 cb->command = nic->tx_command;
1309 /* interrupt every 16 packets regardless of delay */ 1498 /* interrupt every 16 packets regardless of delay */
1310 if((nic->cbs_avail & ~15) == nic->cbs_avail) cb->command |= cb_i; 1499 if((nic->cbs_avail & ~15) == nic->cbs_avail)
1500 cb->command |= cpu_to_le16(cb_i);
1311 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd); 1501 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
1312 cb->u.tcb.tcb_byte_count = 0; 1502 cb->u.tcb.tcb_byte_count = 0;
1313 cb->u.tcb.threshold = nic->tx_threshold; 1503 cb->u.tcb.threshold = nic->tx_threshold;
1314 cb->u.tcb.tbd_count = 1; 1504 cb->u.tcb.tbd_count = 1;
1315 cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev, 1505 cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
1316 skb->data, skb->len, PCI_DMA_TODEVICE)); 1506 skb->data, skb->len, PCI_DMA_TODEVICE));
1317 // check for mapping failure? 1507 /* check for mapping failure? */
1318 cb->u.tcb.tbd.size = cpu_to_le16(skb->len); 1508 cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
1319} 1509}
1320 1510
@@ -1539,7 +1729,7 @@ static inline int e100_rx_indicate(struct nic *nic, struct rx *rx,
1539 /* Don't indicate if hardware indicates errors */ 1729 /* Don't indicate if hardware indicates errors */
1540 nic->net_stats.rx_dropped++; 1730 nic->net_stats.rx_dropped++;
1541 dev_kfree_skb_any(skb); 1731 dev_kfree_skb_any(skb);
1542 } else if(actual_size > nic->netdev->mtu + VLAN_ETH_HLEN) { 1732 } else if(actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) {
1543 /* Don't indicate oversized frames */ 1733 /* Don't indicate oversized frames */
1544 nic->rx_over_length_errors++; 1734 nic->rx_over_length_errors++;
1545 nic->net_stats.rx_dropped++; 1735 nic->net_stats.rx_dropped++;
@@ -1706,6 +1896,7 @@ static int e100_poll(struct net_device *netdev, int *budget)
1706static void e100_netpoll(struct net_device *netdev) 1896static void e100_netpoll(struct net_device *netdev)
1707{ 1897{
1708 struct nic *nic = netdev_priv(netdev); 1898 struct nic *nic = netdev_priv(netdev);
1899
1709 e100_disable_irq(nic); 1900 e100_disable_irq(nic);
1710 e100_intr(nic->pdev->irq, netdev, NULL); 1901 e100_intr(nic->pdev->irq, netdev, NULL);
1711 e100_tx_clean(nic); 1902 e100_tx_clean(nic);
@@ -2108,6 +2299,8 @@ static void e100_diag_test(struct net_device *netdev,
2108 } 2299 }
2109 for(i = 0; i < E100_TEST_LEN; i++) 2300 for(i = 0; i < E100_TEST_LEN; i++)
2110 test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0; 2301 test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
2302
2303 msleep_interruptible(4 * 1000);
2111} 2304}
2112 2305
2113static int e100_phys_id(struct net_device *netdev, u32 data) 2306static int e100_phys_id(struct net_device *netdev, u32 data)
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
new file mode 100644
index 000000000000..bf3440aa6c24
--- /dev/null
+++ b/drivers/net/sis190.c
@@ -0,0 +1,1843 @@
1/*
2 sis190.c: Silicon Integrated Systems SiS190 ethernet driver
3
4 Copyright (c) 2003 K.M. Liu <kmliu@sis.com>
5 Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com>
6 Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com>
7
8 Based on r8169.c, tg3.c, 8139cp.c, skge.c, epic100.c and SiS 190/191
9 genuine driver.
10
11 This software may be used and distributed according to the terms of
12 the GNU General Public License (GPL), incorporated herein by reference.
13 Drivers based on or derived from this code fall under the GPL and must
14 retain the authorship, copyright and license notice. This file is not
15 a complete program and may only be used when the entire operating
16 system is licensed under the GPL.
17
18 See the file COPYING in this distribution for more information.
19
20 */
21
22#include <linux/module.h>
23#include <linux/moduleparam.h>
24#include <linux/netdevice.h>
25#include <linux/rtnetlink.h>
26#include <linux/etherdevice.h>
27#include <linux/ethtool.h>
28#include <linux/pci.h>
29#include <linux/mii.h>
30#include <linux/delay.h>
31#include <linux/crc32.h>
32#include <linux/dma-mapping.h>
33#include <asm/irq.h>
34
35#define net_drv(p, arg...) if (netif_msg_drv(p)) \
36 printk(arg)
37#define net_probe(p, arg...) if (netif_msg_probe(p)) \
38 printk(arg)
39#define net_link(p, arg...) if (netif_msg_link(p)) \
40 printk(arg)
41#define net_intr(p, arg...) if (netif_msg_intr(p)) \
42 printk(arg)
43#define net_tx_err(p, arg...) if (netif_msg_tx_err(p)) \
44 printk(arg)
45
46#define PHY_MAX_ADDR 32
47#define PHY_ID_ANY 0x1f
48#define MII_REG_ANY 0x1f
49
50#ifdef CONFIG_SIS190_NAPI
51#define NAPI_SUFFIX "-NAPI"
52#else
53#define NAPI_SUFFIX ""
54#endif
55
56#define DRV_VERSION "1.2" NAPI_SUFFIX
57#define DRV_NAME "sis190"
58#define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
59#define PFX DRV_NAME ": "
60
61#ifdef CONFIG_SIS190_NAPI
62#define sis190_rx_skb netif_receive_skb
63#define sis190_rx_quota(count, quota) min(count, quota)
64#else
65#define sis190_rx_skb netif_rx
66#define sis190_rx_quota(count, quota) count
67#endif
68
69#define MAC_ADDR_LEN 6
70
71#define NUM_TX_DESC 64 /* [8..1024] */
72#define NUM_RX_DESC 64 /* [8..8192] */
73#define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
74#define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
75#define RX_BUF_SIZE 1536
76#define RX_BUF_MASK 0xfff8
77
78#define SIS190_REGS_SIZE 0x80
79#define SIS190_TX_TIMEOUT (6*HZ)
80#define SIS190_PHY_TIMEOUT (10*HZ)
81#define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
82 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
83 NETIF_MSG_IFDOWN)
84
85/* Enhanced PHY access register bit definitions */
86#define EhnMIIread 0x0000
87#define EhnMIIwrite 0x0020
88#define EhnMIIdataShift 16
89#define EhnMIIpmdShift 6 /* 7016 only */
90#define EhnMIIregShift 11
91#define EhnMIIreq 0x0010
92#define EhnMIInotDone 0x0010
93
94/* Write/read MMIO register */
95#define SIS_W8(reg, val) writeb ((val), ioaddr + (reg))
96#define SIS_W16(reg, val) writew ((val), ioaddr + (reg))
97#define SIS_W32(reg, val) writel ((val), ioaddr + (reg))
98#define SIS_R8(reg) readb (ioaddr + (reg))
99#define SIS_R16(reg) readw (ioaddr + (reg))
100#define SIS_R32(reg) readl (ioaddr + (reg))
101
102#define SIS_PCI_COMMIT() SIS_R32(IntrControl)
103
104enum sis190_registers {
105 TxControl = 0x00,
106 TxDescStartAddr = 0x04,
107 rsv0 = 0x08, // reserved
108 TxSts = 0x0c, // unused (Control/Status)
109 RxControl = 0x10,
110 RxDescStartAddr = 0x14,
111 rsv1 = 0x18, // reserved
112 RxSts = 0x1c, // unused
113 IntrStatus = 0x20,
114 IntrMask = 0x24,
115 IntrControl = 0x28,
116 IntrTimer = 0x2c, // unused (Interupt Timer)
117 PMControl = 0x30, // unused (Power Mgmt Control/Status)
118 rsv2 = 0x34, // reserved
119 ROMControl = 0x38,
120 ROMInterface = 0x3c,
121 StationControl = 0x40,
122 GMIIControl = 0x44,
123 GIoCR = 0x48, // unused (GMAC IO Compensation)
124 GIoCtrl = 0x4c, // unused (GMAC IO Control)
125 TxMacControl = 0x50,
126 TxLimit = 0x54, // unused (Tx MAC Timer/TryLimit)
127 RGDelay = 0x58, // unused (RGMII Tx Internal Delay)
128 rsv3 = 0x5c, // reserved
129 RxMacControl = 0x60,
130 RxMacAddr = 0x62,
131 RxHashTable = 0x68,
132 // Undocumented = 0x6c,
133 RxWolCtrl = 0x70,
134 RxWolData = 0x74, // unused (Rx WOL Data Access)
135 RxMPSControl = 0x78, // unused (Rx MPS Control)
136 rsv4 = 0x7c, // reserved
137};
138
139enum sis190_register_content {
140 /* IntrStatus */
141 SoftInt = 0x40000000, // unused
142 Timeup = 0x20000000, // unused
143 PauseFrame = 0x00080000, // unused
144 MagicPacket = 0x00040000, // unused
145 WakeupFrame = 0x00020000, // unused
146 LinkChange = 0x00010000,
147 RxQEmpty = 0x00000080,
148 RxQInt = 0x00000040,
149 TxQ1Empty = 0x00000020, // unused
150 TxQ1Int = 0x00000010,
151 TxQ0Empty = 0x00000008, // unused
152 TxQ0Int = 0x00000004,
153 RxHalt = 0x00000002,
154 TxHalt = 0x00000001,
155
156 /* {Rx/Tx}CmdBits */
157 CmdReset = 0x10,
158 CmdRxEnb = 0x08, // unused
159 CmdTxEnb = 0x01,
160 RxBufEmpty = 0x01, // unused
161
162 /* Cfg9346Bits */
163 Cfg9346_Lock = 0x00, // unused
164 Cfg9346_Unlock = 0xc0, // unused
165
166 /* RxMacControl */
167 AcceptErr = 0x20, // unused
168 AcceptRunt = 0x10, // unused
169 AcceptBroadcast = 0x0800,
170 AcceptMulticast = 0x0400,
171 AcceptMyPhys = 0x0200,
172 AcceptAllPhys = 0x0100,
173
174 /* RxConfigBits */
175 RxCfgFIFOShift = 13,
176 RxCfgDMAShift = 8, // 0x1a in RxControl ?
177
178 /* TxConfigBits */
179 TxInterFrameGapShift = 24,
180 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
181
182 /* StationControl */
183 _1000bpsF = 0x1c00,
184 _1000bpsH = 0x0c00,
185 _100bpsF = 0x1800,
186 _100bpsH = 0x0800,
187 _10bpsF = 0x1400,
188 _10bpsH = 0x0400,
189
190 LinkStatus = 0x02, // unused
191 FullDup = 0x01, // unused
192
193 /* TBICSRBit */
194 TBILinkOK = 0x02000000, // unused
195};
196
197struct TxDesc {
198 __le32 PSize;
199 __le32 status;
200 __le32 addr;
201 __le32 size;
202};
203
204struct RxDesc {
205 __le32 PSize;
206 __le32 status;
207 __le32 addr;
208 __le32 size;
209};
210
211enum _DescStatusBit {
212 /* _Desc.status */
213 OWNbit = 0x80000000, // RXOWN/TXOWN
214 INTbit = 0x40000000, // RXINT/TXINT
215 CRCbit = 0x00020000, // CRCOFF/CRCEN
216 PADbit = 0x00010000, // PREADD/PADEN
217 /* _Desc.size */
218 RingEnd = 0x80000000,
219 /* TxDesc.status */
220 LSEN = 0x08000000, // TSO ? -- FR
221 IPCS = 0x04000000,
222 TCPCS = 0x02000000,
223 UDPCS = 0x01000000,
224 BSTEN = 0x00800000,
225 EXTEN = 0x00400000,
226 DEFEN = 0x00200000,
227 BKFEN = 0x00100000,
228 CRSEN = 0x00080000,
229 COLEN = 0x00040000,
230 THOL3 = 0x30000000,
231 THOL2 = 0x20000000,
232 THOL1 = 0x10000000,
233 THOL0 = 0x00000000,
234 /* RxDesc.status */
235 IPON = 0x20000000,
236 TCPON = 0x10000000,
237 UDPON = 0x08000000,
238 Wakup = 0x00400000,
239 Magic = 0x00200000,
240 Pause = 0x00100000,
241 DEFbit = 0x00200000,
242 BCAST = 0x000c0000,
243 MCAST = 0x00080000,
244 UCAST = 0x00040000,
245 /* RxDesc.PSize */
246 TAGON = 0x80000000,
247 RxDescCountMask = 0x7f000000, // multi-desc pkt when > 1 ? -- FR
248 ABORT = 0x00800000,
249 SHORT = 0x00400000,
250 LIMIT = 0x00200000,
251 MIIER = 0x00100000,
252 OVRUN = 0x00080000,
253 NIBON = 0x00040000,
254 COLON = 0x00020000,
255 CRCOK = 0x00010000,
256 RxSizeMask = 0x0000ffff
257 /*
258 * The asic could apparently do vlan, TSO, jumbo (sis191 only) and
259 * provide two (unused with Linux) Tx queues. No publically
260 * available documentation alas.
261 */
262};
263
264enum sis190_eeprom_access_register_bits {
265 EECS = 0x00000001, // unused
266 EECLK = 0x00000002, // unused
267 EEDO = 0x00000008, // unused
268 EEDI = 0x00000004, // unused
269 EEREQ = 0x00000080,
270 EEROP = 0x00000200,
271 EEWOP = 0x00000100 // unused
272};
273
274/* EEPROM Addresses */
275enum sis190_eeprom_address {
276 EEPROMSignature = 0x00,
277 EEPROMCLK = 0x01, // unused
278 EEPROMInfo = 0x02,
279 EEPROMMACAddr = 0x03
280};
281
282struct sis190_private {
283 void __iomem *mmio_addr;
284 struct pci_dev *pci_dev;
285 struct net_device_stats stats;
286 spinlock_t lock;
287 u32 rx_buf_sz;
288 u32 cur_rx;
289 u32 cur_tx;
290 u32 dirty_rx;
291 u32 dirty_tx;
292 dma_addr_t rx_dma;
293 dma_addr_t tx_dma;
294 struct RxDesc *RxDescRing;
295 struct TxDesc *TxDescRing;
296 struct sk_buff *Rx_skbuff[NUM_RX_DESC];
297 struct sk_buff *Tx_skbuff[NUM_TX_DESC];
298 struct work_struct phy_task;
299 struct timer_list timer;
300 u32 msg_enable;
301 struct mii_if_info mii_if;
302 struct list_head first_phy;
303};
304
305struct sis190_phy {
306 struct list_head list;
307 int phy_id;
308 u16 id[2];
309 u16 status;
310 u8 type;
311};
312
313enum sis190_phy_type {
314 UNKNOWN = 0x00,
315 HOME = 0x01,
316 LAN = 0x02,
317 MIX = 0x03
318};
319
320static struct mii_chip_info {
321 const char *name;
322 u16 id[2];
323 unsigned int type;
324} mii_chip_table[] = {
325 { "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN },
326 { "Agere PHY ET1101B", { 0x0282, 0xf010 }, LAN },
327 { "Marvell PHY 88E1111", { 0x0141, 0x0cc0 }, LAN },
328 { "Realtek PHY RTL8201", { 0x0000, 0x8200 }, LAN },
329 { NULL, }
330};
331
332const static struct {
333 const char *name;
334 u8 version; /* depend on docs */
335 u32 RxConfigMask; /* clear the bits supported by this chip */
336} sis_chip_info[] = {
337 { DRV_NAME, 0x00, 0xff7e1880, },
338};
339
340static struct pci_device_id sis190_pci_tbl[] __devinitdata = {
341 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
342 { 0, },
343};
344
345MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
346
347static int rx_copybreak = 200;
348
349static struct {
350 u32 msg_enable;
351} debug = { -1 };
352
353MODULE_DESCRIPTION("SiS sis190 Gigabit Ethernet driver");
354module_param(rx_copybreak, int, 0);
355MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
356module_param_named(debug, debug.msg_enable, int, 0);
357MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
358MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
359MODULE_VERSION(DRV_VERSION);
360MODULE_LICENSE("GPL");
361
362static const u32 sis190_intr_mask =
363 RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt;
364
365/*
366 * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
367 * The chips use a 64 element hash table based on the Ethernet CRC.
368 */
369static int multicast_filter_limit = 32;
370
371static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
372{
373 unsigned int i;
374
375 SIS_W32(GMIIControl, ctl);
376
377 msleep(1);
378
379 for (i = 0; i < 100; i++) {
380 if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
381 break;
382 msleep(1);
383 }
384
385 if (i > 999)
386 printk(KERN_ERR PFX "PHY command failed !\n");
387}
388
389static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val)
390{
391 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
392 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift) |
393 (((u32) val) << EhnMIIdataShift));
394}
395
396static int mdio_read(void __iomem *ioaddr, int phy_id, int reg)
397{
398 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
399 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift));
400
401 return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
402}
403
404static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val)
405{
406 struct sis190_private *tp = netdev_priv(dev);
407
408 mdio_write(tp->mmio_addr, phy_id, reg, val);
409}
410
411static int __mdio_read(struct net_device *dev, int phy_id, int reg)
412{
413 struct sis190_private *tp = netdev_priv(dev);
414
415 return mdio_read(tp->mmio_addr, phy_id, reg);
416}
417
418static u16 mdio_read_latched(void __iomem *ioaddr, int phy_id, int reg)
419{
420 mdio_read(ioaddr, phy_id, reg);
421 return mdio_read(ioaddr, phy_id, reg);
422}
423
424static u16 __devinit sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
425{
426 u16 data = 0xffff;
427 unsigned int i;
428
429 if (!(SIS_R32(ROMControl) & 0x0002))
430 return 0;
431
432 SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10));
433
434 for (i = 0; i < 200; i++) {
435 if (!(SIS_R32(ROMInterface) & EEREQ)) {
436 data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16;
437 break;
438 }
439 msleep(1);
440 }
441
442 return data;
443}
444
445static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
446{
447 SIS_W32(IntrMask, 0x00);
448 SIS_W32(IntrStatus, 0xffffffff);
449 SIS_PCI_COMMIT();
450}
451
452static void sis190_asic_down(void __iomem *ioaddr)
453{
454 /* Stop the chip's Tx and Rx DMA processes. */
455
456 SIS_W32(TxControl, 0x1a00);
457 SIS_W32(RxControl, 0x1a00);
458
459 sis190_irq_mask_and_ack(ioaddr);
460}
461
462static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
463{
464 desc->size |= cpu_to_le32(RingEnd);
465}
466
467static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
468{
469 u32 eor = le32_to_cpu(desc->size) & RingEnd;
470
471 desc->PSize = 0x0;
472 desc->size = cpu_to_le32((rx_buf_sz & RX_BUF_MASK) | eor);
473 wmb();
474 desc->status = cpu_to_le32(OWNbit | INTbit);
475}
476
477static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
478 u32 rx_buf_sz)
479{
480 desc->addr = cpu_to_le32(mapping);
481 sis190_give_to_asic(desc, rx_buf_sz);
482}
483
484static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
485{
486 desc->PSize = 0x0;
487 desc->addr = 0xdeadbeef;
488 desc->size &= cpu_to_le32(RingEnd);
489 wmb();
490 desc->status = 0x0;
491}
492
493static int sis190_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
494 struct RxDesc *desc, u32 rx_buf_sz)
495{
496 struct sk_buff *skb;
497 dma_addr_t mapping;
498 int ret = 0;
499
500 skb = dev_alloc_skb(rx_buf_sz);
501 if (!skb)
502 goto err_out;
503
504 *sk_buff = skb;
505
506 mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
507 PCI_DMA_FROMDEVICE);
508
509 sis190_map_to_asic(desc, mapping, rx_buf_sz);
510out:
511 return ret;
512
513err_out:
514 ret = -ENOMEM;
515 sis190_make_unusable_by_asic(desc);
516 goto out;
517}
518
519static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
520 u32 start, u32 end)
521{
522 u32 cur;
523
524 for (cur = start; cur < end; cur++) {
525 int ret, i = cur % NUM_RX_DESC;
526
527 if (tp->Rx_skbuff[i])
528 continue;
529
530 ret = sis190_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i,
531 tp->RxDescRing + i, tp->rx_buf_sz);
532 if (ret < 0)
533 break;
534 }
535 return cur - start;
536}
537
538static inline int sis190_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
539 struct RxDesc *desc, int rx_buf_sz)
540{
541 int ret = -1;
542
543 if (pkt_size < rx_copybreak) {
544 struct sk_buff *skb;
545
546 skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
547 if (skb) {
548 skb_reserve(skb, NET_IP_ALIGN);
549 eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0);
550 *sk_buff = skb;
551 sis190_give_to_asic(desc, rx_buf_sz);
552 ret = 0;
553 }
554 }
555 return ret;
556}
557
558static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats)
559{
560#define ErrMask (OVRUN | SHORT | LIMIT | MIIER | NIBON | COLON | ABORT)
561
562 if ((status & CRCOK) && !(status & ErrMask))
563 return 0;
564
565 if (!(status & CRCOK))
566 stats->rx_crc_errors++;
567 else if (status & OVRUN)
568 stats->rx_over_errors++;
569 else if (status & (SHORT | LIMIT))
570 stats->rx_length_errors++;
571 else if (status & (MIIER | NIBON | COLON))
572 stats->rx_frame_errors++;
573
574 stats->rx_errors++;
575 return -1;
576}
577
578static int sis190_rx_interrupt(struct net_device *dev,
579 struct sis190_private *tp, void __iomem *ioaddr)
580{
581 struct net_device_stats *stats = &tp->stats;
582 u32 rx_left, cur_rx = tp->cur_rx;
583 u32 delta, count;
584
585 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
586 rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
587
588 for (; rx_left > 0; rx_left--, cur_rx++) {
589 unsigned int entry = cur_rx % NUM_RX_DESC;
590 struct RxDesc *desc = tp->RxDescRing + entry;
591 u32 status;
592
593 if (desc->status & OWNbit)
594 break;
595
596 status = le32_to_cpu(desc->PSize);
597
598 // net_intr(tp, KERN_INFO "%s: Rx PSize = %08x.\n", dev->name,
599 // status);
600
601 if (sis190_rx_pkt_err(status, stats) < 0)
602 sis190_give_to_asic(desc, tp->rx_buf_sz);
603 else {
604 struct sk_buff *skb = tp->Rx_skbuff[entry];
605 int pkt_size = (status & RxSizeMask) - 4;
606 void (*pci_action)(struct pci_dev *, dma_addr_t,
607 size_t, int) = pci_dma_sync_single_for_device;
608
609 if (unlikely(pkt_size > tp->rx_buf_sz)) {
610 net_intr(tp, KERN_INFO
611 "%s: (frag) status = %08x.\n",
612 dev->name, status);
613 stats->rx_dropped++;
614 stats->rx_length_errors++;
615 sis190_give_to_asic(desc, tp->rx_buf_sz);
616 continue;
617 }
618
619 pci_dma_sync_single_for_cpu(tp->pci_dev,
620 le32_to_cpu(desc->addr), tp->rx_buf_sz,
621 PCI_DMA_FROMDEVICE);
622
623 if (sis190_try_rx_copy(&skb, pkt_size, desc,
624 tp->rx_buf_sz)) {
625 pci_action = pci_unmap_single;
626 tp->Rx_skbuff[entry] = NULL;
627 sis190_make_unusable_by_asic(desc);
628 }
629
630 pci_action(tp->pci_dev, le32_to_cpu(desc->addr),
631 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
632
633 skb->dev = dev;
634 skb_put(skb, pkt_size);
635 skb->protocol = eth_type_trans(skb, dev);
636
637 sis190_rx_skb(skb);
638
639 dev->last_rx = jiffies;
640 stats->rx_packets++;
641 stats->rx_bytes += pkt_size;
642 if ((status & BCAST) == MCAST)
643 stats->multicast++;
644 }
645 }
646 count = cur_rx - tp->cur_rx;
647 tp->cur_rx = cur_rx;
648
649 delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
650 if (!delta && count && netif_msg_intr(tp))
651 printk(KERN_INFO "%s: no Rx buffer allocated.\n", dev->name);
652 tp->dirty_rx += delta;
653
654 if (((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) && netif_msg_intr(tp))
655 printk(KERN_EMERG "%s: Rx buffers exhausted.\n", dev->name);
656
657 return count;
658}
659
660static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
661 struct TxDesc *desc)
662{
663 unsigned int len;
664
665 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
666
667 pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
668
669 memset(desc, 0x00, sizeof(*desc));
670}
671
672static void sis190_tx_interrupt(struct net_device *dev,
673 struct sis190_private *tp, void __iomem *ioaddr)
674{
675 u32 pending, dirty_tx = tp->dirty_tx;
676 /*
677 * It would not be needed if queueing was allowed to be enabled
678 * again too early (hint: think preempt and unclocked smp systems).
679 */
680 unsigned int queue_stopped;
681
682 smp_rmb();
683 pending = tp->cur_tx - dirty_tx;
684 queue_stopped = (pending == NUM_TX_DESC);
685
686 for (; pending; pending--, dirty_tx++) {
687 unsigned int entry = dirty_tx % NUM_TX_DESC;
688 struct TxDesc *txd = tp->TxDescRing + entry;
689 struct sk_buff *skb;
690
691 if (le32_to_cpu(txd->status) & OWNbit)
692 break;
693
694 skb = tp->Tx_skbuff[entry];
695
696 tp->stats.tx_packets++;
697 tp->stats.tx_bytes += skb->len;
698
699 sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
700 tp->Tx_skbuff[entry] = NULL;
701 dev_kfree_skb_irq(skb);
702 }
703
704 if (tp->dirty_tx != dirty_tx) {
705 tp->dirty_tx = dirty_tx;
706 smp_wmb();
707 if (queue_stopped)
708 netif_wake_queue(dev);
709 }
710}
711
712/*
713 * The interrupt handler does all of the Rx thread work and cleans up after
714 * the Tx thread.
715 */
716static irqreturn_t sis190_interrupt(int irq, void *__dev, struct pt_regs *regs)
717{
718 struct net_device *dev = __dev;
719 struct sis190_private *tp = netdev_priv(dev);
720 void __iomem *ioaddr = tp->mmio_addr;
721 unsigned int handled = 0;
722 u32 status;
723
724 status = SIS_R32(IntrStatus);
725
726 if ((status == 0xffffffff) || !status)
727 goto out;
728
729 handled = 1;
730
731 if (unlikely(!netif_running(dev))) {
732 sis190_asic_down(ioaddr);
733 goto out;
734 }
735
736 SIS_W32(IntrStatus, status);
737
738 // net_intr(tp, KERN_INFO "%s: status = %08x.\n", dev->name, status);
739
740 if (status & LinkChange) {
741 net_intr(tp, KERN_INFO "%s: link change.\n", dev->name);
742 schedule_work(&tp->phy_task);
743 }
744
745 if (status & RxQInt)
746 sis190_rx_interrupt(dev, tp, ioaddr);
747
748 if (status & TxQ0Int)
749 sis190_tx_interrupt(dev, tp, ioaddr);
750out:
751 return IRQ_RETVAL(handled);
752}
753
754#ifdef CONFIG_NET_POLL_CONTROLLER
755static void sis190_netpoll(struct net_device *dev)
756{
757 struct sis190_private *tp = netdev_priv(dev);
758 struct pci_dev *pdev = tp->pci_dev;
759
760 disable_irq(pdev->irq);
761 sis190_interrupt(pdev->irq, dev, NULL);
762 enable_irq(pdev->irq);
763}
764#endif
765
766static void sis190_free_rx_skb(struct sis190_private *tp,
767 struct sk_buff **sk_buff, struct RxDesc *desc)
768{
769 struct pci_dev *pdev = tp->pci_dev;
770
771 pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
772 PCI_DMA_FROMDEVICE);
773 dev_kfree_skb(*sk_buff);
774 *sk_buff = NULL;
775 sis190_make_unusable_by_asic(desc);
776}
777
778static void sis190_rx_clear(struct sis190_private *tp)
779{
780 unsigned int i;
781
782 for (i = 0; i < NUM_RX_DESC; i++) {
783 if (!tp->Rx_skbuff[i])
784 continue;
785 sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
786 }
787}
788
789static void sis190_init_ring_indexes(struct sis190_private *tp)
790{
791 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
792}
793
794static int sis190_init_ring(struct net_device *dev)
795{
796 struct sis190_private *tp = netdev_priv(dev);
797
798 sis190_init_ring_indexes(tp);
799
800 memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
801 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
802
803 if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
804 goto err_rx_clear;
805
806 sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
807
808 return 0;
809
810err_rx_clear:
811 sis190_rx_clear(tp);
812 return -ENOMEM;
813}
814
815static void sis190_set_rx_mode(struct net_device *dev)
816{
817 struct sis190_private *tp = netdev_priv(dev);
818 void __iomem *ioaddr = tp->mmio_addr;
819 unsigned long flags;
820 u32 mc_filter[2]; /* Multicast hash filter */
821 u16 rx_mode;
822
823 if (dev->flags & IFF_PROMISC) {
824 /* Unconditionally log net taps. */
825 net_drv(tp, KERN_NOTICE "%s: Promiscuous mode enabled.\n",
826 dev->name);
827 rx_mode =
828 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
829 AcceptAllPhys;
830 mc_filter[1] = mc_filter[0] = 0xffffffff;
831 } else if ((dev->mc_count > multicast_filter_limit) ||
832 (dev->flags & IFF_ALLMULTI)) {
833 /* Too many to filter perfectly -- accept all multicasts. */
834 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
835 mc_filter[1] = mc_filter[0] = 0xffffffff;
836 } else {
837 struct dev_mc_list *mclist;
838 unsigned int i;
839
840 rx_mode = AcceptBroadcast | AcceptMyPhys;
841 mc_filter[1] = mc_filter[0] = 0;
842 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
843 i++, mclist = mclist->next) {
844 int bit_nr =
845 ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
846 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
847 rx_mode |= AcceptMulticast;
848 }
849 }
850
851 spin_lock_irqsave(&tp->lock, flags);
852
853 SIS_W16(RxMacControl, rx_mode | 0x2);
854 SIS_W32(RxHashTable, mc_filter[0]);
855 SIS_W32(RxHashTable + 4, mc_filter[1]);
856
857 spin_unlock_irqrestore(&tp->lock, flags);
858}
859
860static void sis190_soft_reset(void __iomem *ioaddr)
861{
862 SIS_W32(IntrControl, 0x8000);
863 SIS_PCI_COMMIT();
864 msleep(1);
865 SIS_W32(IntrControl, 0x0);
866 sis190_asic_down(ioaddr);
867 msleep(1);
868}
869
870static void sis190_hw_start(struct net_device *dev)
871{
872 struct sis190_private *tp = netdev_priv(dev);
873 void __iomem *ioaddr = tp->mmio_addr;
874
875 sis190_soft_reset(ioaddr);
876
877 SIS_W32(TxDescStartAddr, tp->tx_dma);
878 SIS_W32(RxDescStartAddr, tp->rx_dma);
879
880 SIS_W32(IntrStatus, 0xffffffff);
881 SIS_W32(IntrMask, 0x0);
882 /*
883 * Default is 100Mbps.
884 * A bit strange: 100Mbps is 0x1801 elsewhere -- FR 2005/06/09
885 */
886 SIS_W16(StationControl, 0x1901);
887 SIS_W32(GMIIControl, 0x0);
888 SIS_W32(TxMacControl, 0x60);
889 SIS_W16(RxMacControl, 0x02);
890 SIS_W32(RxHashTable, 0x0);
891 SIS_W32(0x6c, 0x0);
892 SIS_W32(RxWolCtrl, 0x0);
893 SIS_W32(RxWolData, 0x0);
894
895 SIS_PCI_COMMIT();
896
897 sis190_set_rx_mode(dev);
898
899 /* Enable all known interrupts by setting the interrupt mask. */
900 SIS_W32(IntrMask, sis190_intr_mask);
901
902 SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
903 SIS_W32(RxControl, 0x1a1d);
904
905 netif_start_queue(dev);
906}
907
908static void sis190_phy_task(void * data)
909{
910 struct net_device *dev = data;
911 struct sis190_private *tp = netdev_priv(dev);
912 void __iomem *ioaddr = tp->mmio_addr;
913 int phy_id = tp->mii_if.phy_id;
914 u16 val;
915
916 rtnl_lock();
917
918 val = mdio_read(ioaddr, phy_id, MII_BMCR);
919 if (val & BMCR_RESET) {
920 // FIXME: needlessly high ? -- FR 02/07/2005
921 mod_timer(&tp->timer, jiffies + HZ/10);
922 } else if (!(mdio_read_latched(ioaddr, phy_id, MII_BMSR) &
923 BMSR_ANEGCOMPLETE)) {
924 net_link(tp, KERN_WARNING "%s: PHY reset until link up.\n",
925 dev->name);
926 mdio_write(ioaddr, phy_id, MII_BMCR, val | BMCR_RESET);
927 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
928 } else {
929 /* Rejoice ! */
930 struct {
931 int val;
932 const char *msg;
933 u16 ctl;
934 } reg31[] = {
935 { LPA_1000XFULL | LPA_SLCT,
936 "1000 Mbps Full Duplex",
937 0x01 | _1000bpsF },
938 { LPA_1000XHALF | LPA_SLCT,
939 "1000 Mbps Half Duplex",
940 0x01 | _1000bpsH },
941 { LPA_100FULL,
942 "100 Mbps Full Duplex",
943 0x01 | _100bpsF },
944 { LPA_100HALF,
945 "100 Mbps Half Duplex",
946 0x01 | _100bpsH },
947 { LPA_10FULL,
948 "10 Mbps Full Duplex",
949 0x01 | _10bpsF },
950 { LPA_10HALF,
951 "10 Mbps Half Duplex",
952 0x01 | _10bpsH },
953 { 0, "unknown", 0x0000 }
954 }, *p;
955 u16 adv;
956
957 val = mdio_read(ioaddr, phy_id, 0x1f);
958 net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val);
959
960 val = mdio_read(ioaddr, phy_id, MII_LPA);
961 adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
962 net_link(tp, KERN_INFO "%s: mii lpa = %04x adv = %04x.\n",
963 dev->name, val, adv);
964
965 val &= adv;
966
967 for (p = reg31; p->ctl; p++) {
968 if ((val & p->val) == p->val)
969 break;
970 }
971 if (p->ctl)
972 SIS_W16(StationControl, p->ctl);
973 net_link(tp, KERN_INFO "%s: link on %s mode.\n", dev->name,
974 p->msg);
975 netif_carrier_on(dev);
976 }
977
978 rtnl_unlock();
979}
980
981static void sis190_phy_timer(unsigned long __opaque)
982{
983 struct net_device *dev = (struct net_device *)__opaque;
984 struct sis190_private *tp = netdev_priv(dev);
985
986 if (likely(netif_running(dev)))
987 schedule_work(&tp->phy_task);
988}
989
990static inline void sis190_delete_timer(struct net_device *dev)
991{
992 struct sis190_private *tp = netdev_priv(dev);
993
994 del_timer_sync(&tp->timer);
995}
996
997static inline void sis190_request_timer(struct net_device *dev)
998{
999 struct sis190_private *tp = netdev_priv(dev);
1000 struct timer_list *timer = &tp->timer;
1001
1002 init_timer(timer);
1003 timer->expires = jiffies + SIS190_PHY_TIMEOUT;
1004 timer->data = (unsigned long)dev;
1005 timer->function = sis190_phy_timer;
1006 add_timer(timer);
1007}
1008
1009static void sis190_set_rxbufsize(struct sis190_private *tp,
1010 struct net_device *dev)
1011{
1012 unsigned int mtu = dev->mtu;
1013
1014 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
1015 /* RxDesc->size has a licence to kill the lower bits */
1016 if (tp->rx_buf_sz & 0x07) {
1017 tp->rx_buf_sz += 8;
1018 tp->rx_buf_sz &= RX_BUF_MASK;
1019 }
1020}
1021
1022static int sis190_open(struct net_device *dev)
1023{
1024 struct sis190_private *tp = netdev_priv(dev);
1025 struct pci_dev *pdev = tp->pci_dev;
1026 int rc = -ENOMEM;
1027
1028 sis190_set_rxbufsize(tp, dev);
1029
1030 /*
1031 * Rx and Tx descriptors need 256 bytes alignment.
1032 * pci_alloc_consistent() guarantees a stronger alignment.
1033 */
1034 tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
1035 if (!tp->TxDescRing)
1036 goto out;
1037
1038 tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
1039 if (!tp->RxDescRing)
1040 goto err_free_tx_0;
1041
1042 rc = sis190_init_ring(dev);
1043 if (rc < 0)
1044 goto err_free_rx_1;
1045
1046 INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
1047
1048 sis190_request_timer(dev);
1049
1050 rc = request_irq(dev->irq, sis190_interrupt, SA_SHIRQ, dev->name, dev);
1051 if (rc < 0)
1052 goto err_release_timer_2;
1053
1054 sis190_hw_start(dev);
1055out:
1056 return rc;
1057
1058err_release_timer_2:
1059 sis190_delete_timer(dev);
1060 sis190_rx_clear(tp);
1061err_free_rx_1:
1062 pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
1063 tp->rx_dma);
1064err_free_tx_0:
1065 pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
1066 tp->tx_dma);
1067 goto out;
1068}
1069
1070static void sis190_tx_clear(struct sis190_private *tp)
1071{
1072 unsigned int i;
1073
1074 for (i = 0; i < NUM_TX_DESC; i++) {
1075 struct sk_buff *skb = tp->Tx_skbuff[i];
1076
1077 if (!skb)
1078 continue;
1079
1080 sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
1081 tp->Tx_skbuff[i] = NULL;
1082 dev_kfree_skb(skb);
1083
1084 tp->stats.tx_dropped++;
1085 }
1086 tp->cur_tx = tp->dirty_tx = 0;
1087}
1088
1089static void sis190_down(struct net_device *dev)
1090{
1091 struct sis190_private *tp = netdev_priv(dev);
1092 void __iomem *ioaddr = tp->mmio_addr;
1093 unsigned int poll_locked = 0;
1094
1095 sis190_delete_timer(dev);
1096
1097 netif_stop_queue(dev);
1098
1099 flush_scheduled_work();
1100
1101 do {
1102 spin_lock_irq(&tp->lock);
1103
1104 sis190_asic_down(ioaddr);
1105
1106 spin_unlock_irq(&tp->lock);
1107
1108 synchronize_irq(dev->irq);
1109
1110 if (!poll_locked) {
1111 netif_poll_disable(dev);
1112 poll_locked++;
1113 }
1114
1115 synchronize_sched();
1116
1117 } while (SIS_R32(IntrMask));
1118
1119 sis190_tx_clear(tp);
1120 sis190_rx_clear(tp);
1121}
1122
1123static int sis190_close(struct net_device *dev)
1124{
1125 struct sis190_private *tp = netdev_priv(dev);
1126 struct pci_dev *pdev = tp->pci_dev;
1127
1128 sis190_down(dev);
1129
1130 free_irq(dev->irq, dev);
1131
1132 netif_poll_enable(dev);
1133
1134 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1135 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
1136
1137 tp->TxDescRing = NULL;
1138 tp->RxDescRing = NULL;
1139
1140 return 0;
1141}
1142
1143static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev)
1144{
1145 struct sis190_private *tp = netdev_priv(dev);
1146 void __iomem *ioaddr = tp->mmio_addr;
1147 u32 len, entry, dirty_tx;
1148 struct TxDesc *desc;
1149 dma_addr_t mapping;
1150
1151 if (unlikely(skb->len < ETH_ZLEN)) {
1152 skb = skb_padto(skb, ETH_ZLEN);
1153 if (!skb) {
1154 tp->stats.tx_dropped++;
1155 goto out;
1156 }
1157 len = ETH_ZLEN;
1158 } else {
1159 len = skb->len;
1160 }
1161
1162 entry = tp->cur_tx % NUM_TX_DESC;
1163 desc = tp->TxDescRing + entry;
1164
1165 if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
1166 netif_stop_queue(dev);
1167 net_tx_err(tp, KERN_ERR PFX
1168 "%s: BUG! Tx Ring full when queue awake!\n",
1169 dev->name);
1170 return NETDEV_TX_BUSY;
1171 }
1172
1173 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
1174
1175 tp->Tx_skbuff[entry] = skb;
1176
1177 desc->PSize = cpu_to_le32(len);
1178 desc->addr = cpu_to_le32(mapping);
1179
1180 desc->size = cpu_to_le32(len);
1181 if (entry == (NUM_TX_DESC - 1))
1182 desc->size |= cpu_to_le32(RingEnd);
1183
1184 wmb();
1185
1186 desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
1187
1188 tp->cur_tx++;
1189
1190 smp_wmb();
1191
1192 SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
1193
1194 dev->trans_start = jiffies;
1195
1196 dirty_tx = tp->dirty_tx;
1197 if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
1198 netif_stop_queue(dev);
1199 smp_rmb();
1200 if (dirty_tx != tp->dirty_tx)
1201 netif_wake_queue(dev);
1202 }
1203out:
1204 return NETDEV_TX_OK;
1205}
1206
1207static struct net_device_stats *sis190_get_stats(struct net_device *dev)
1208{
1209 struct sis190_private *tp = netdev_priv(dev);
1210
1211 return &tp->stats;
1212}
1213
1214static void sis190_free_phy(struct list_head *first_phy)
1215{
1216 struct sis190_phy *cur, *next;
1217
1218 list_for_each_entry_safe(cur, next, first_phy, list) {
1219 kfree(cur);
1220 }
1221}
1222
1223/**
1224 * sis190_default_phy - Select default PHY for sis190 mac.
1225 * @dev: the net device to probe for
1226 *
1227 * Select first detected PHY with link as default.
1228 * If no one is link on, select PHY whose types is HOME as default.
1229 * If HOME doesn't exist, select LAN.
1230 */
1231static u16 sis190_default_phy(struct net_device *dev)
1232{
1233 struct sis190_phy *phy, *phy_home, *phy_default, *phy_lan;
1234 struct sis190_private *tp = netdev_priv(dev);
1235 struct mii_if_info *mii_if = &tp->mii_if;
1236 void __iomem *ioaddr = tp->mmio_addr;
1237 u16 status;
1238
1239 phy_home = phy_default = phy_lan = NULL;
1240
1241 list_for_each_entry(phy, &tp->first_phy, list) {
1242 status = mdio_read_latched(ioaddr, phy->phy_id, MII_BMSR);
1243
1244 // Link ON & Not select default PHY & not ghost PHY.
1245 if ((status & BMSR_LSTATUS) &&
1246 !phy_default &&
1247 (phy->type != UNKNOWN)) {
1248 phy_default = phy;
1249 } else {
1250 status = mdio_read(ioaddr, phy->phy_id, MII_BMCR);
1251 mdio_write(ioaddr, phy->phy_id, MII_BMCR,
1252 status | BMCR_ANENABLE | BMCR_ISOLATE);
1253 if (phy->type == HOME)
1254 phy_home = phy;
1255 else if (phy->type == LAN)
1256 phy_lan = phy;
1257 }
1258 }
1259
1260 if (!phy_default) {
1261 if (phy_home)
1262 phy_default = phy_home;
1263 else if (phy_lan)
1264 phy_default = phy_lan;
1265 else
1266 phy_default = list_entry(&tp->first_phy,
1267 struct sis190_phy, list);
1268 }
1269
1270 if (mii_if->phy_id != phy_default->phy_id) {
1271 mii_if->phy_id = phy_default->phy_id;
1272 net_probe(tp, KERN_INFO
1273 "%s: Using transceiver at address %d as default.\n",
1274 pci_name(tp->pci_dev), mii_if->phy_id);
1275 }
1276
1277 status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR);
1278 status &= (~BMCR_ISOLATE);
1279
1280 mdio_write(ioaddr, mii_if->phy_id, MII_BMCR, status);
1281 status = mdio_read_latched(ioaddr, mii_if->phy_id, MII_BMSR);
1282
1283 return status;
1284}
1285
1286static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp,
1287 struct sis190_phy *phy, unsigned int phy_id,
1288 u16 mii_status)
1289{
1290 void __iomem *ioaddr = tp->mmio_addr;
1291 struct mii_chip_info *p;
1292
1293 INIT_LIST_HEAD(&phy->list);
1294 phy->status = mii_status;
1295 phy->phy_id = phy_id;
1296
1297 phy->id[0] = mdio_read(ioaddr, phy_id, MII_PHYSID1);
1298 phy->id[1] = mdio_read(ioaddr, phy_id, MII_PHYSID2);
1299
1300 for (p = mii_chip_table; p->type; p++) {
1301 if ((p->id[0] == phy->id[0]) &&
1302 (p->id[1] == (phy->id[1] & 0xfff0))) {
1303 break;
1304 }
1305 }
1306
1307 if (p->id[1]) {
1308 phy->type = (p->type == MIX) ?
1309 ((mii_status & (BMSR_100FULL | BMSR_100HALF)) ?
1310 LAN : HOME) : p->type;
1311 } else
1312 phy->type = UNKNOWN;
1313
1314 net_probe(tp, KERN_INFO "%s: %s transceiver at address %d.\n",
1315 pci_name(tp->pci_dev),
1316 (phy->type == UNKNOWN) ? "Unknown PHY" : p->name, phy_id);
1317}
1318
1319/**
1320 * sis190_mii_probe - Probe MII PHY for sis190
1321 * @dev: the net device to probe for
1322 *
1323 * Search for total of 32 possible mii phy addresses.
1324 * Identify and set current phy if found one,
1325 * return error if it failed to found.
1326 */
1327static int __devinit sis190_mii_probe(struct net_device *dev)
1328{
1329 struct sis190_private *tp = netdev_priv(dev);
1330 struct mii_if_info *mii_if = &tp->mii_if;
1331 void __iomem *ioaddr = tp->mmio_addr;
1332 int phy_id;
1333 int rc = 0;
1334
1335 INIT_LIST_HEAD(&tp->first_phy);
1336
1337 for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
1338 struct sis190_phy *phy;
1339 u16 status;
1340
1341 status = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
1342
1343 // Try next mii if the current one is not accessible.
1344 if (status == 0xffff || status == 0x0000)
1345 continue;
1346
1347 phy = kmalloc(sizeof(*phy), GFP_KERNEL);
1348 if (!phy) {
1349 sis190_free_phy(&tp->first_phy);
1350 rc = -ENOMEM;
1351 goto out;
1352 }
1353
1354 sis190_init_phy(dev, tp, phy, phy_id, status);
1355
1356 list_add(&tp->first_phy, &phy->list);
1357 }
1358
1359 if (list_empty(&tp->first_phy)) {
1360 net_probe(tp, KERN_INFO "%s: No MII transceivers found!\n",
1361 pci_name(tp->pci_dev));
1362 rc = -EIO;
1363 goto out;
1364 }
1365
1366 /* Select default PHY for mac */
1367 sis190_default_phy(dev);
1368
1369 mii_if->dev = dev;
1370 mii_if->mdio_read = __mdio_read;
1371 mii_if->mdio_write = __mdio_write;
1372 mii_if->phy_id_mask = PHY_ID_ANY;
1373 mii_if->reg_num_mask = MII_REG_ANY;
1374out:
1375 return rc;
1376}
1377
1378static void __devexit sis190_mii_remove(struct net_device *dev)
1379{
1380 struct sis190_private *tp = netdev_priv(dev);
1381
1382 sis190_free_phy(&tp->first_phy);
1383}
1384
1385static void sis190_release_board(struct pci_dev *pdev)
1386{
1387 struct net_device *dev = pci_get_drvdata(pdev);
1388 struct sis190_private *tp = netdev_priv(dev);
1389
1390 iounmap(tp->mmio_addr);
1391 pci_release_regions(pdev);
1392 pci_disable_device(pdev);
1393 free_netdev(dev);
1394}
1395
1396static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1397{
1398 struct sis190_private *tp;
1399 struct net_device *dev;
1400 void __iomem *ioaddr;
1401 int rc;
1402
1403 dev = alloc_etherdev(sizeof(*tp));
1404 if (!dev) {
1405 net_drv(&debug, KERN_ERR PFX "unable to alloc new ethernet\n");
1406 rc = -ENOMEM;
1407 goto err_out_0;
1408 }
1409
1410 SET_MODULE_OWNER(dev);
1411 SET_NETDEV_DEV(dev, &pdev->dev);
1412
1413 tp = netdev_priv(dev);
1414 tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1415
1416 rc = pci_enable_device(pdev);
1417 if (rc < 0) {
1418 net_probe(tp, KERN_ERR "%s: enable failure\n", pci_name(pdev));
1419 goto err_free_dev_1;
1420 }
1421
1422 rc = -ENODEV;
1423
1424 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1425 net_probe(tp, KERN_ERR "%s: region #0 is no MMIO resource.\n",
1426 pci_name(pdev));
1427 goto err_pci_disable_2;
1428 }
1429 if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
1430 net_probe(tp, KERN_ERR "%s: invalid PCI region size(s).\n",
1431 pci_name(pdev));
1432 goto err_pci_disable_2;
1433 }
1434
1435 rc = pci_request_regions(pdev, DRV_NAME);
1436 if (rc < 0) {
1437 net_probe(tp, KERN_ERR PFX "%s: could not request regions.\n",
1438 pci_name(pdev));
1439 goto err_pci_disable_2;
1440 }
1441
1442 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1443 if (rc < 0) {
1444 net_probe(tp, KERN_ERR "%s: DMA configuration failed.\n",
1445 pci_name(pdev));
1446 goto err_free_res_3;
1447 }
1448
1449 pci_set_master(pdev);
1450
1451 ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
1452 if (!ioaddr) {
1453 net_probe(tp, KERN_ERR "%s: cannot remap MMIO, aborting\n",
1454 pci_name(pdev));
1455 rc = -EIO;
1456 goto err_free_res_3;
1457 }
1458
1459 tp->pci_dev = pdev;
1460 tp->mmio_addr = ioaddr;
1461
1462 sis190_irq_mask_and_ack(ioaddr);
1463
1464 sis190_soft_reset(ioaddr);
1465out:
1466 return dev;
1467
1468err_free_res_3:
1469 pci_release_regions(pdev);
1470err_pci_disable_2:
1471 pci_disable_device(pdev);
1472err_free_dev_1:
1473 free_netdev(dev);
1474err_out_0:
1475 dev = ERR_PTR(rc);
1476 goto out;
1477}
1478
1479static void sis190_tx_timeout(struct net_device *dev)
1480{
1481 struct sis190_private *tp = netdev_priv(dev);
1482 void __iomem *ioaddr = tp->mmio_addr;
1483 u8 tmp8;
1484
1485 /* Disable Tx, if not already */
1486 tmp8 = SIS_R8(TxControl);
1487 if (tmp8 & CmdTxEnb)
1488 SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
1489
1490
1491 net_tx_err(tp, KERN_INFO "%s: Transmit timeout, status %08x %08x.\n",
1492 dev->name, SIS_R32(TxControl), SIS_R32(TxSts));
1493
1494 /* Disable interrupts by clearing the interrupt mask. */
1495 SIS_W32(IntrMask, 0x0000);
1496
1497 /* Stop a shared interrupt from scavenging while we are. */
1498 spin_lock_irq(&tp->lock);
1499 sis190_tx_clear(tp);
1500 spin_unlock_irq(&tp->lock);
1501
1502 /* ...and finally, reset everything. */
1503 sis190_hw_start(dev);
1504
1505 netif_wake_queue(dev);
1506}
1507
1508static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
1509 struct net_device *dev)
1510{
1511 struct sis190_private *tp = netdev_priv(dev);
1512 void __iomem *ioaddr = tp->mmio_addr;
1513 u16 sig;
1514 int i;
1515
1516 net_probe(tp, KERN_INFO "%s: Read MAC address from EEPROM\n",
1517 pci_name(pdev));
1518
1519 /* Check to see if there is a sane EEPROM */
1520 sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature);
1521
1522 if ((sig == 0xffff) || (sig == 0x0000)) {
1523 net_probe(tp, KERN_INFO "%s: Error EEPROM read %x.\n",
1524 pci_name(pdev), sig);
1525 return -EIO;
1526 }
1527
1528 /* Get MAC address from EEPROM */
1529 for (i = 0; i < MAC_ADDR_LEN / 2; i++) {
1530 __le16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
1531
1532 ((u16 *)dev->dev_addr)[0] = le16_to_cpu(w);
1533 }
1534
1535 return 0;
1536}
1537
1538/**
1539 * sis190_get_mac_addr_from_apc - Get MAC address for SiS965 model
1540 * @pdev: PCI device
1541 * @dev: network device to get address for
1542 *
1543 * SiS965 model, use APC CMOS RAM to store MAC address.
1544 * APC CMOS RAM is accessed through ISA bridge.
1545 * MAC address is read into @net_dev->dev_addr.
1546 */
1547static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
1548 struct net_device *dev)
1549{
1550 struct sis190_private *tp = netdev_priv(dev);
1551 struct pci_dev *isa_bridge;
1552 u8 reg, tmp8;
1553 int i;
1554
1555 net_probe(tp, KERN_INFO "%s: Read MAC address from APC.\n",
1556 pci_name(pdev));
1557
1558 isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, 0x0965, NULL);
1559 if (!isa_bridge) {
1560 net_probe(tp, KERN_INFO "%s: Can not find ISA bridge.\n",
1561 pci_name(pdev));
1562 return -EIO;
1563 }
1564
1565 /* Enable port 78h & 79h to access APC Registers. */
1566 pci_read_config_byte(isa_bridge, 0x48, &tmp8);
1567 reg = (tmp8 & ~0x02);
1568 pci_write_config_byte(isa_bridge, 0x48, reg);
1569 udelay(50);
1570 pci_read_config_byte(isa_bridge, 0x48, &reg);
1571
1572 for (i = 0; i < MAC_ADDR_LEN; i++) {
1573 outb(0x9 + i, 0x78);
1574 dev->dev_addr[i] = inb(0x79);
1575 }
1576
1577 outb(0x12, 0x78);
1578 reg = inb(0x79);
1579
1580 /* Restore the value to ISA Bridge */
1581 pci_write_config_byte(isa_bridge, 0x48, tmp8);
1582 pci_dev_put(isa_bridge);
1583
1584 return 0;
1585}
1586
1587/**
1588 * sis190_init_rxfilter - Initialize the Rx filter
1589 * @dev: network device to initialize
1590 *
1591 * Set receive filter address to our MAC address
1592 * and enable packet filtering.
1593 */
1594static inline void sis190_init_rxfilter(struct net_device *dev)
1595{
1596 struct sis190_private *tp = netdev_priv(dev);
1597 void __iomem *ioaddr = tp->mmio_addr;
1598 u16 ctl;
1599 int i;
1600
1601 ctl = SIS_R16(RxMacControl);
1602 /*
1603 * Disable packet filtering before setting filter.
1604 * Note: SiS's driver writes 32 bits but RxMacControl is 16 bits
1605 * only and followed by RxMacAddr (6 bytes). Strange. -- FR
1606 */
1607 SIS_W16(RxMacControl, ctl & ~0x0f00);
1608
1609 for (i = 0; i < MAC_ADDR_LEN; i++)
1610 SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
1611
1612 SIS_W16(RxMacControl, ctl);
1613 SIS_PCI_COMMIT();
1614}
1615
1616static int sis190_get_mac_addr(struct pci_dev *pdev, struct net_device *dev)
1617{
1618 u8 from;
1619
1620 pci_read_config_byte(pdev, 0x73, &from);
1621
1622 return (from & 0x00000001) ?
1623 sis190_get_mac_addr_from_apc(pdev, dev) :
1624 sis190_get_mac_addr_from_eeprom(pdev, dev);
1625}
1626
1627static void sis190_set_speed_auto(struct net_device *dev)
1628{
1629 struct sis190_private *tp = netdev_priv(dev);
1630 void __iomem *ioaddr = tp->mmio_addr;
1631 int phy_id = tp->mii_if.phy_id;
1632 int val;
1633
1634 net_link(tp, KERN_INFO "%s: Enabling Auto-negotiation.\n", dev->name);
1635
1636 val = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
1637
1638 // Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0
1639 // unchanged.
1640 mdio_write(ioaddr, phy_id, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
1641 ADVERTISE_100FULL | ADVERTISE_10FULL |
1642 ADVERTISE_100HALF | ADVERTISE_10HALF);
1643
1644 // Enable 1000 Full Mode.
1645 mdio_write(ioaddr, phy_id, MII_CTRL1000, ADVERTISE_1000FULL);
1646
1647 // Enable auto-negotiation and restart auto-negotiation.
1648 mdio_write(ioaddr, phy_id, MII_BMCR,
1649 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
1650}
1651
1652static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1653{
1654 struct sis190_private *tp = netdev_priv(dev);
1655
1656 return mii_ethtool_gset(&tp->mii_if, cmd);
1657}
1658
1659static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1660{
1661 struct sis190_private *tp = netdev_priv(dev);
1662
1663 return mii_ethtool_sset(&tp->mii_if, cmd);
1664}
1665
1666static void sis190_get_drvinfo(struct net_device *dev,
1667 struct ethtool_drvinfo *info)
1668{
1669 struct sis190_private *tp = netdev_priv(dev);
1670
1671 strcpy(info->driver, DRV_NAME);
1672 strcpy(info->version, DRV_VERSION);
1673 strcpy(info->bus_info, pci_name(tp->pci_dev));
1674}
1675
1676static int sis190_get_regs_len(struct net_device *dev)
1677{
1678 return SIS190_REGS_SIZE;
1679}
1680
1681static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1682 void *p)
1683{
1684 struct sis190_private *tp = netdev_priv(dev);
1685 unsigned long flags;
1686
1687 if (regs->len > SIS190_REGS_SIZE)
1688 regs->len = SIS190_REGS_SIZE;
1689
1690 spin_lock_irqsave(&tp->lock, flags);
1691 memcpy_fromio(p, tp->mmio_addr, regs->len);
1692 spin_unlock_irqrestore(&tp->lock, flags);
1693}
1694
1695static int sis190_nway_reset(struct net_device *dev)
1696{
1697 struct sis190_private *tp = netdev_priv(dev);
1698
1699 return mii_nway_restart(&tp->mii_if);
1700}
1701
1702static u32 sis190_get_msglevel(struct net_device *dev)
1703{
1704 struct sis190_private *tp = netdev_priv(dev);
1705
1706 return tp->msg_enable;
1707}
1708
1709static void sis190_set_msglevel(struct net_device *dev, u32 value)
1710{
1711 struct sis190_private *tp = netdev_priv(dev);
1712
1713 tp->msg_enable = value;
1714}
1715
1716static struct ethtool_ops sis190_ethtool_ops = {
1717 .get_settings = sis190_get_settings,
1718 .set_settings = sis190_set_settings,
1719 .get_drvinfo = sis190_get_drvinfo,
1720 .get_regs_len = sis190_get_regs_len,
1721 .get_regs = sis190_get_regs,
1722 .get_link = ethtool_op_get_link,
1723 .get_msglevel = sis190_get_msglevel,
1724 .set_msglevel = sis190_set_msglevel,
1725 .nway_reset = sis190_nway_reset,
1726};
1727
1728static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1729{
1730 struct sis190_private *tp = netdev_priv(dev);
1731
1732 return !netif_running(dev) ? -EINVAL :
1733 generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
1734}
1735
1736static int __devinit sis190_init_one(struct pci_dev *pdev,
1737 const struct pci_device_id *ent)
1738{
1739 static int printed_version = 0;
1740 struct sis190_private *tp;
1741 struct net_device *dev;
1742 void __iomem *ioaddr;
1743 int rc;
1744
1745 if (!printed_version) {
1746 net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n");
1747 printed_version = 1;
1748 }
1749
1750 dev = sis190_init_board(pdev);
1751 if (IS_ERR(dev)) {
1752 rc = PTR_ERR(dev);
1753 goto out;
1754 }
1755
1756 tp = netdev_priv(dev);
1757 ioaddr = tp->mmio_addr;
1758
1759 rc = sis190_get_mac_addr(pdev, dev);
1760 if (rc < 0)
1761 goto err_release_board;
1762
1763 sis190_init_rxfilter(dev);
1764
1765 INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
1766
1767 dev->open = sis190_open;
1768 dev->stop = sis190_close;
1769 dev->do_ioctl = sis190_ioctl;
1770 dev->get_stats = sis190_get_stats;
1771 dev->tx_timeout = sis190_tx_timeout;
1772 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1773 dev->hard_start_xmit = sis190_start_xmit;
1774#ifdef CONFIG_NET_POLL_CONTROLLER
1775 dev->poll_controller = sis190_netpoll;
1776#endif
1777 dev->set_multicast_list = sis190_set_rx_mode;
1778 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
1779 dev->irq = pdev->irq;
1780 dev->base_addr = (unsigned long) 0xdead;
1781
1782 spin_lock_init(&tp->lock);
1783
1784 rc = sis190_mii_probe(dev);
1785 if (rc < 0)
1786 goto err_release_board;
1787
1788 rc = register_netdev(dev);
1789 if (rc < 0)
1790 goto err_remove_mii;
1791
1792 pci_set_drvdata(pdev, dev);
1793
1794 net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), "
1795 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
1796 pci_name(pdev), sis_chip_info[ent->driver_data].name,
1797 ioaddr, dev->irq,
1798 dev->dev_addr[0], dev->dev_addr[1],
1799 dev->dev_addr[2], dev->dev_addr[3],
1800 dev->dev_addr[4], dev->dev_addr[5]);
1801
1802 netif_carrier_off(dev);
1803
1804 sis190_set_speed_auto(dev);
1805out:
1806 return rc;
1807
1808err_remove_mii:
1809 sis190_mii_remove(dev);
1810err_release_board:
1811 sis190_release_board(pdev);
1812 goto out;
1813}
1814
1815static void __devexit sis190_remove_one(struct pci_dev *pdev)
1816{
1817 struct net_device *dev = pci_get_drvdata(pdev);
1818
1819 sis190_mii_remove(dev);
1820 unregister_netdev(dev);
1821 sis190_release_board(pdev);
1822 pci_set_drvdata(pdev, NULL);
1823}
1824
1825static struct pci_driver sis190_pci_driver = {
1826 .name = DRV_NAME,
1827 .id_table = sis190_pci_tbl,
1828 .probe = sis190_init_one,
1829 .remove = __devexit_p(sis190_remove_one),
1830};
1831
1832static int __init sis190_init_module(void)
1833{
1834 return pci_module_init(&sis190_pci_driver);
1835}
1836
1837static void __exit sis190_cleanup_module(void)
1838{
1839 pci_unregister_driver(&sis190_pci_driver);
1840}
1841
1842module_init(sis190_init_module);
1843module_exit(sis190_cleanup_module);
diff --git a/drivers/net/tulip/Kconfig b/drivers/net/tulip/Kconfig
index e2cdaf876201..8c9634a98c11 100644
--- a/drivers/net/tulip/Kconfig
+++ b/drivers/net/tulip/Kconfig
@@ -135,6 +135,18 @@ config DM9102
135 <file:Documentation/networking/net-modules.txt>. The module will 135 <file:Documentation/networking/net-modules.txt>. The module will
136 be called dmfe. 136 be called dmfe.
137 137
138config ULI526X
139 tristate "ULi M526x controller support"
140 depends on NET_TULIP && PCI
141 select CRC32
142 ---help---
143 This driver is for ULi M5261/M5263 10/100M Ethernet Controller
144 (<http://www.uli.com.tw/>).
145
146 To compile this driver as a module, choose M here and read
147 <file:Documentation/networking/net-modules.txt>. The module will
148 be called uli526x.
149
138config PCMCIA_XIRCOM 150config PCMCIA_XIRCOM
139 tristate "Xircom CardBus support (new driver)" 151 tristate "Xircom CardBus support (new driver)"
140 depends on NET_TULIP && CARDBUS 152 depends on NET_TULIP && CARDBUS
diff --git a/drivers/net/tulip/Makefile b/drivers/net/tulip/Makefile
index 8bb9b4683979..451090d6fcca 100644
--- a/drivers/net/tulip/Makefile
+++ b/drivers/net/tulip/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_WINBOND_840) += winbond-840.o
9obj-$(CONFIG_DE2104X) += de2104x.o 9obj-$(CONFIG_DE2104X) += de2104x.o
10obj-$(CONFIG_TULIP) += tulip.o 10obj-$(CONFIG_TULIP) += tulip.o
11obj-$(CONFIG_DE4X5) += de4x5.o 11obj-$(CONFIG_DE4X5) += de4x5.o
12obj-$(CONFIG_ULI526X) += uli526x.o
12 13
13# Declare multi-part drivers. 14# Declare multi-part drivers.
14 15
diff --git a/drivers/net/tulip/media.c b/drivers/net/tulip/media.c
index e26c31f944bf..f53396fe79c9 100644
--- a/drivers/net/tulip/media.c
+++ b/drivers/net/tulip/media.c
@@ -81,25 +81,6 @@ int tulip_mdio_read(struct net_device *dev, int phy_id, int location)
81 return retval & 0xffff; 81 return retval & 0xffff;
82 } 82 }
83 83
84 if(tp->chip_id == ULI526X && tp->revision >= 0x40) {
85 int value;
86 int i = 1000;
87
88 value = ioread32(ioaddr + CSR9);
89 iowrite32(value & 0xFFEFFFFF, ioaddr + CSR9);
90
91 value = (phy_id << 21) | (location << 16) | 0x08000000;
92 iowrite32(value, ioaddr + CSR10);
93
94 while(--i > 0) {
95 mdio_delay();
96 if(ioread32(ioaddr + CSR10) & 0x10000000)
97 break;
98 }
99 retval = ioread32(ioaddr + CSR10);
100 spin_unlock_irqrestore(&tp->mii_lock, flags);
101 return retval & 0xFFFF;
102 }
103 /* Establish sync by sending at least 32 logic ones. */ 84 /* Establish sync by sending at least 32 logic ones. */
104 for (i = 32; i >= 0; i--) { 85 for (i = 32; i >= 0; i--) {
105 iowrite32(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr); 86 iowrite32(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
@@ -159,23 +140,6 @@ void tulip_mdio_write(struct net_device *dev, int phy_id, int location, int val)
159 spin_unlock_irqrestore(&tp->mii_lock, flags); 140 spin_unlock_irqrestore(&tp->mii_lock, flags);
160 return; 141 return;
161 } 142 }
162 if (tp->chip_id == ULI526X && tp->revision >= 0x40) {
163 int value;
164 int i = 1000;
165
166 value = ioread32(ioaddr + CSR9);
167 iowrite32(value & 0xFFEFFFFF, ioaddr + CSR9);
168
169 value = (phy_id << 21) | (location << 16) | 0x04000000 | (val & 0xFFFF);
170 iowrite32(value, ioaddr + CSR10);
171
172 while(--i > 0) {
173 if (ioread32(ioaddr + CSR10) & 0x10000000)
174 break;
175 }
176 spin_unlock_irqrestore(&tp->mii_lock, flags);
177 return;
178 }
179 143
180 /* Establish sync by sending 32 logic ones. */ 144 /* Establish sync by sending 32 logic ones. */
181 for (i = 32; i >= 0; i--) { 145 for (i = 32; i >= 0; i--) {
diff --git a/drivers/net/tulip/timer.c b/drivers/net/tulip/timer.c
index 691568283553..e058a9fbfe88 100644
--- a/drivers/net/tulip/timer.c
+++ b/drivers/net/tulip/timer.c
@@ -39,7 +39,6 @@ void tulip_timer(unsigned long data)
39 case MX98713: 39 case MX98713:
40 case COMPEX9881: 40 case COMPEX9881:
41 case DM910X: 41 case DM910X:
42 case ULI526X:
43 default: { 42 default: {
44 struct medialeaf *mleaf; 43 struct medialeaf *mleaf;
45 unsigned char *p; 44 unsigned char *p;
diff --git a/drivers/net/tulip/tulip.h b/drivers/net/tulip/tulip.h
index 20346d847d9e..05d2d96f7be2 100644
--- a/drivers/net/tulip/tulip.h
+++ b/drivers/net/tulip/tulip.h
@@ -88,7 +88,6 @@ enum chips {
88 I21145, 88 I21145,
89 DM910X, 89 DM910X,
90 CONEXANT, 90 CONEXANT,
91 ULI526X
92}; 91};
93 92
94 93
@@ -482,11 +481,8 @@ static inline void tulip_stop_rxtx(struct tulip_private *tp)
482 481
483static inline void tulip_restart_rxtx(struct tulip_private *tp) 482static inline void tulip_restart_rxtx(struct tulip_private *tp)
484{ 483{
485 if(!(tp->chip_id == ULI526X && 484 tulip_stop_rxtx(tp);
486 (tp->revision == 0x40 || tp->revision == 0x50))) { 485 udelay(5);
487 tulip_stop_rxtx(tp);
488 udelay(5);
489 }
490 tulip_start_rxtx(tp); 486 tulip_start_rxtx(tp);
491} 487}
492 488
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index d45d8f56e5b4..05da5bea564c 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -199,9 +199,6 @@ struct tulip_chip_table tulip_tbl[] = {
199 { "Conexant LANfinity", 256, 0x0001ebef, 199 { "Conexant LANfinity", 256, 0x0001ebef,
200 HAS_MII | HAS_ACPI, tulip_timer }, 200 HAS_MII | HAS_ACPI, tulip_timer },
201 201
202 /* ULi526X */
203 { "ULi M5261/M5263", 128, 0x0001ebef,
204 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI, tulip_timer },
205}; 202};
206 203
207 204
@@ -239,8 +236,6 @@ static struct pci_device_id tulip_pci_tbl[] = {
239 { 0x1737, 0xAB09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 236 { 0x1737, 0xAB09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
240 { 0x1737, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 237 { 0x1737, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
241 { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 238 { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
242 { 0x10b9, 0x5261, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ULI526X }, /* ALi 1563 integrated ethernet */
243 { 0x10b9, 0x5263, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ULI526X }, /* ALi 1563 integrated ethernet */
244 { 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */ 239 { 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */
245 { 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */ 240 { 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */
246 { } /* terminate list */ 241 { } /* terminate list */
@@ -522,7 +517,7 @@ static void tulip_tx_timeout(struct net_device *dev)
522 dev->name); 517 dev->name);
523 } else if (tp->chip_id == DC21140 || tp->chip_id == DC21142 518 } else if (tp->chip_id == DC21140 || tp->chip_id == DC21142
524 || tp->chip_id == MX98713 || tp->chip_id == COMPEX9881 519 || tp->chip_id == MX98713 || tp->chip_id == COMPEX9881
525 || tp->chip_id == DM910X || tp->chip_id == ULI526X) { 520 || tp->chip_id == DM910X) {
526 printk(KERN_WARNING "%s: 21140 transmit timed out, status %8.8x, " 521 printk(KERN_WARNING "%s: 21140 transmit timed out, status %8.8x, "
527 "SIA %8.8x %8.8x %8.8x %8.8x, resetting...\n", 522 "SIA %8.8x %8.8x %8.8x %8.8x, resetting...\n",
528 dev->name, ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12), 523 dev->name, ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
@@ -1103,18 +1098,16 @@ static void set_rx_mode(struct net_device *dev)
1103 entry = tp->cur_tx++ % TX_RING_SIZE; 1098 entry = tp->cur_tx++ % TX_RING_SIZE;
1104 1099
1105 if (entry != 0) { 1100 if (entry != 0) {
1106 /* Avoid a chip errata by prefixing a dummy entry. Don't do 1101 /* Avoid a chip errata by prefixing a dummy entry. */
1107 this on the ULI526X as it triggers a different problem */ 1102 tp->tx_buffers[entry].skb = NULL;
1108 if (!(tp->chip_id == ULI526X && (tp->revision == 0x40 || tp->revision == 0x50))) { 1103 tp->tx_buffers[entry].mapping = 0;
1109 tp->tx_buffers[entry].skb = NULL; 1104 tp->tx_ring[entry].length =
1110 tp->tx_buffers[entry].mapping = 0; 1105 (entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0;
1111 tp->tx_ring[entry].length = 1106 tp->tx_ring[entry].buffer1 = 0;
1112 (entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0; 1107 /* Must set DescOwned later to avoid race with chip */
1113 tp->tx_ring[entry].buffer1 = 0; 1108 dummy = entry;
1114 /* Must set DescOwned later to avoid race with chip */ 1109 entry = tp->cur_tx++ % TX_RING_SIZE;
1115 dummy = entry; 1110
1116 entry = tp->cur_tx++ % TX_RING_SIZE;
1117 }
1118 } 1111 }
1119 1112
1120 tp->tx_buffers[entry].skb = NULL; 1113 tp->tx_buffers[entry].skb = NULL;
@@ -1235,10 +1228,6 @@ static int tulip_uli_dm_quirk(struct pci_dev *pdev)
1235{ 1228{
1236 if (pdev->vendor == 0x1282 && pdev->device == 0x9102) 1229 if (pdev->vendor == 0x1282 && pdev->device == 0x9102)
1237 return 1; 1230 return 1;
1238 if (pdev->vendor == 0x10b9 && pdev->device == 0x5261)
1239 return 1;
1240 if (pdev->vendor == 0x10b9 && pdev->device == 0x5263)
1241 return 1;
1242 return 0; 1231 return 0;
1243} 1232}
1244 1233
@@ -1680,7 +1669,6 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1680 switch (chip_idx) { 1669 switch (chip_idx) {
1681 case DC21140: 1670 case DC21140:
1682 case DM910X: 1671 case DM910X:
1683 case ULI526X:
1684 default: 1672 default:
1685 if (tp->mtable) 1673 if (tp->mtable)
1686 iowrite32(tp->mtable->csr12dir | 0x100, ioaddr + CSR12); 1674 iowrite32(tp->mtable->csr12dir | 0x100, ioaddr + CSR12);
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
new file mode 100644
index 000000000000..5ae22b7bc5ca
--- /dev/null
+++ b/drivers/net/tulip/uli526x.c
@@ -0,0 +1,1749 @@
1/*
2 This program is free software; you can redistribute it and/or
3 modify it under the terms of the GNU General Public License
4 as published by the Free Software Foundation; either version 2
5 of the License, or (at your option) any later version.
6
7 This program is distributed in the hope that it will be useful,
8 but WITHOUT ANY WARRANTY; without even the implied warranty of
9 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 GNU General Public License for more details.
11
12
13*/
14
15#define DRV_NAME "uli526x"
16#define DRV_VERSION "0.9.3"
17#define DRV_RELDATE "2005-7-29"
18
19#include <linux/module.h>
20
21#include <linux/kernel.h>
22#include <linux/string.h>
23#include <linux/timer.h>
24#include <linux/ptrace.h>
25#include <linux/errno.h>
26#include <linux/ioport.h>
27#include <linux/slab.h>
28#include <linux/interrupt.h>
29#include <linux/pci.h>
30#include <linux/init.h>
31#include <linux/netdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/ethtool.h>
34#include <linux/skbuff.h>
35#include <linux/delay.h>
36#include <linux/spinlock.h>
37
38#include <asm/processor.h>
39#include <asm/bitops.h>
40#include <asm/io.h>
41#include <asm/dma.h>
42#include <asm/uaccess.h>
43
44
45/* Board/System/Debug information/definition ---------------- */
46#define PCI_ULI5261_ID 0x526110B9 /* ULi M5261 ID*/
47#define PCI_ULI5263_ID 0x526310B9 /* ULi M5263 ID*/
48
49#define ULI526X_IO_SIZE 0x100
50#define TX_DESC_CNT 0x20 /* Allocated Tx descriptors */
51#define RX_DESC_CNT 0x30 /* Allocated Rx descriptors */
52#define TX_FREE_DESC_CNT (TX_DESC_CNT - 2) /* Max TX packet count */
53#define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3) /* TX wakeup count */
54#define DESC_ALL_CNT (TX_DESC_CNT + RX_DESC_CNT)
55#define TX_BUF_ALLOC 0x600
56#define RX_ALLOC_SIZE 0x620
57#define ULI526X_RESET 1
58#define CR0_DEFAULT 0
59#define CR6_DEFAULT 0x22200000
60#define CR7_DEFAULT 0x180c1
61#define CR15_DEFAULT 0x06 /* TxJabber RxWatchdog */
62#define TDES0_ERR_MASK 0x4302 /* TXJT, LC, EC, FUE */
63#define MAX_PACKET_SIZE 1514
64#define ULI5261_MAX_MULTICAST 14
65#define RX_COPY_SIZE 100
66#define MAX_CHECK_PACKET 0x8000
67
68#define ULI526X_10MHF 0
69#define ULI526X_100MHF 1
70#define ULI526X_10MFD 4
71#define ULI526X_100MFD 5
72#define ULI526X_AUTO 8
73
74#define ULI526X_TXTH_72 0x400000 /* TX TH 72 byte */
75#define ULI526X_TXTH_96 0x404000 /* TX TH 96 byte */
76#define ULI526X_TXTH_128 0x0000 /* TX TH 128 byte */
77#define ULI526X_TXTH_256 0x4000 /* TX TH 256 byte */
78#define ULI526X_TXTH_512 0x8000 /* TX TH 512 byte */
79#define ULI526X_TXTH_1K 0xC000 /* TX TH 1K byte */
80
81#define ULI526X_TIMER_WUT (jiffies + HZ * 1)/* timer wakeup time : 1 second */
82#define ULI526X_TX_TIMEOUT ((16*HZ)/2) /* tx packet time-out time 8 s" */
83#define ULI526X_TX_KICK (4*HZ/2) /* tx packet Kick-out time 2 s" */
84
85#define ULI526X_DBUG(dbug_now, msg, value) if (uli526x_debug || (dbug_now)) printk(KERN_ERR DRV_NAME ": %s %lx\n", (msg), (long) (value))
86
87#define SHOW_MEDIA_TYPE(mode) printk(KERN_ERR DRV_NAME ": Change Speed to %sMhz %s duplex\n",mode & 1 ?"100":"10", mode & 4 ? "full":"half");
88
89
90/* CR9 definition: SROM/MII */
91#define CR9_SROM_READ 0x4800
92#define CR9_SRCS 0x1
93#define CR9_SRCLK 0x2
94#define CR9_CRDOUT 0x8
95#define SROM_DATA_0 0x0
96#define SROM_DATA_1 0x4
97#define PHY_DATA_1 0x20000
98#define PHY_DATA_0 0x00000
99#define MDCLKH 0x10000
100
101#define PHY_POWER_DOWN 0x800
102
103#define SROM_V41_CODE 0x14
104
105#define SROM_CLK_WRITE(data, ioaddr) \
106 outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
107 udelay(5); \
108 outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr); \
109 udelay(5); \
110 outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
111 udelay(5);
112
113/* Structure/enum declaration ------------------------------- */
114struct tx_desc {
115 u32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */
116 char *tx_buf_ptr; /* Data for us */
117 struct tx_desc *next_tx_desc;
118} __attribute__(( aligned(32) ));
119
120struct rx_desc {
121 u32 rdes0, rdes1, rdes2, rdes3; /* Data for the card */
122 struct sk_buff *rx_skb_ptr; /* Data for us */
123 struct rx_desc *next_rx_desc;
124} __attribute__(( aligned(32) ));
125
126struct uli526x_board_info {
127 u32 chip_id; /* Chip vendor/Device ID */
128 struct net_device *next_dev; /* next device */
129 struct pci_dev *pdev; /* PCI device */
130 spinlock_t lock;
131
132 long ioaddr; /* I/O base address */
133 u32 cr0_data;
134 u32 cr5_data;
135 u32 cr6_data;
136 u32 cr7_data;
137 u32 cr15_data;
138
139 /* pointer for memory physical address */
140 dma_addr_t buf_pool_dma_ptr; /* Tx buffer pool memory */
141 dma_addr_t buf_pool_dma_start; /* Tx buffer pool align dword */
142 dma_addr_t desc_pool_dma_ptr; /* descriptor pool memory */
143 dma_addr_t first_tx_desc_dma;
144 dma_addr_t first_rx_desc_dma;
145
146 /* descriptor pointer */
147 unsigned char *buf_pool_ptr; /* Tx buffer pool memory */
148 unsigned char *buf_pool_start; /* Tx buffer pool align dword */
149 unsigned char *desc_pool_ptr; /* descriptor pool memory */
150 struct tx_desc *first_tx_desc;
151 struct tx_desc *tx_insert_ptr;
152 struct tx_desc *tx_remove_ptr;
153 struct rx_desc *first_rx_desc;
154 struct rx_desc *rx_insert_ptr;
155 struct rx_desc *rx_ready_ptr; /* packet come pointer */
156 unsigned long tx_packet_cnt; /* transmitted packet count */
157 unsigned long rx_avail_cnt; /* available rx descriptor count */
158 unsigned long interval_rx_cnt; /* rx packet count a callback time */
159
160 u16 dbug_cnt;
161 u16 NIC_capability; /* NIC media capability */
162 u16 PHY_reg4; /* Saved Phyxcer register 4 value */
163
164 u8 media_mode; /* user specify media mode */
165 u8 op_mode; /* real work media mode */
166 u8 phy_addr;
167 u8 link_failed; /* Ever link failed */
168 u8 wait_reset; /* Hardware failed, need to reset */
169 struct timer_list timer;
170
171 /* System defined statistic counter */
172 struct net_device_stats stats;
173
174 /* Driver defined statistic counter */
175 unsigned long tx_fifo_underrun;
176 unsigned long tx_loss_carrier;
177 unsigned long tx_no_carrier;
178 unsigned long tx_late_collision;
179 unsigned long tx_excessive_collision;
180 unsigned long tx_jabber_timeout;
181 unsigned long reset_count;
182 unsigned long reset_cr8;
183 unsigned long reset_fatal;
184 unsigned long reset_TXtimeout;
185
186 /* NIC SROM data */
187 unsigned char srom[128];
188 u8 init;
189};
190
191enum uli526x_offsets {
192 DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20,
193 DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48,
194 DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70,
195 DCR15 = 0x78
196};
197
198enum uli526x_CR6_bits {
199 CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80,
200 CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000,
201 CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000
202};
203
204/* Global variable declaration ----------------------------- */
205static int __devinitdata printed_version;
206static char version[] __devinitdata =
207 KERN_INFO DRV_NAME ": ULi M5261/M5263 net driver, version "
208 DRV_VERSION " (" DRV_RELDATE ")\n";
209
210static int uli526x_debug;
211static unsigned char uli526x_media_mode = ULI526X_AUTO;
212static u32 uli526x_cr6_user_set;
213
214/* For module input parameter */
215static int debug;
216static u32 cr6set;
217static unsigned char mode = 8;
218
219/* function declaration ------------------------------------- */
220static int uli526x_open(struct net_device *);
221static int uli526x_start_xmit(struct sk_buff *, struct net_device *);
222static int uli526x_stop(struct net_device *);
223static struct net_device_stats * uli526x_get_stats(struct net_device *);
224static void uli526x_set_filter_mode(struct net_device *);
225static struct ethtool_ops netdev_ethtool_ops;
226static u16 read_srom_word(long, int);
227static irqreturn_t uli526x_interrupt(int, void *, struct pt_regs *);
228static void uli526x_descriptor_init(struct uli526x_board_info *, unsigned long);
229static void allocate_rx_buffer(struct uli526x_board_info *);
230static void update_cr6(u32, unsigned long);
231static void send_filter_frame(struct net_device *, int);
232static u16 phy_read(unsigned long, u8, u8, u32);
233static u16 phy_readby_cr10(unsigned long, u8, u8);
234static void phy_write(unsigned long, u8, u8, u16, u32);
235static void phy_writeby_cr10(unsigned long, u8, u8, u16);
236static void phy_write_1bit(unsigned long, u32, u32);
237static u16 phy_read_1bit(unsigned long, u32);
238static u8 uli526x_sense_speed(struct uli526x_board_info *);
239static void uli526x_process_mode(struct uli526x_board_info *);
240static void uli526x_timer(unsigned long);
241static void uli526x_rx_packet(struct net_device *, struct uli526x_board_info *);
242static void uli526x_free_tx_pkt(struct net_device *, struct uli526x_board_info *);
243static void uli526x_reuse_skb(struct uli526x_board_info *, struct sk_buff *);
244static void uli526x_dynamic_reset(struct net_device *);
245static void uli526x_free_rxbuffer(struct uli526x_board_info *);
246static void uli526x_init(struct net_device *);
247static void uli526x_set_phyxcer(struct uli526x_board_info *);
248
249/* ULI526X network board routine ---------------------------- */
250
251/*
252 * Search ULI526X board, allocate space and register it
253 */
254
255static int __devinit uli526x_init_one (struct pci_dev *pdev,
256 const struct pci_device_id *ent)
257{
258 struct uli526x_board_info *db; /* board information structure */
259 struct net_device *dev;
260 int i, err;
261
262 ULI526X_DBUG(0, "uli526x_init_one()", 0);
263
264 if (!printed_version++)
265 printk(version);
266
267 /* Init network device */
268 dev = alloc_etherdev(sizeof(*db));
269 if (dev == NULL)
270 return -ENOMEM;
271 SET_MODULE_OWNER(dev);
272 SET_NETDEV_DEV(dev, &pdev->dev);
273
274 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
275 printk(KERN_WARNING DRV_NAME ": 32-bit PCI DMA not available.\n");
276 err = -ENODEV;
277 goto err_out_free;
278 }
279
280 /* Enable Master/IO access, Disable memory access */
281 err = pci_enable_device(pdev);
282 if (err)
283 goto err_out_free;
284
285 if (!pci_resource_start(pdev, 0)) {
286 printk(KERN_ERR DRV_NAME ": I/O base is zero\n");
287 err = -ENODEV;
288 goto err_out_disable;
289 }
290
291 if (pci_resource_len(pdev, 0) < (ULI526X_IO_SIZE) ) {
292 printk(KERN_ERR DRV_NAME ": Allocated I/O size too small\n");
293 err = -ENODEV;
294 goto err_out_disable;
295 }
296
297 if (pci_request_regions(pdev, DRV_NAME)) {
298 printk(KERN_ERR DRV_NAME ": Failed to request PCI regions\n");
299 err = -ENODEV;
300 goto err_out_disable;
301 }
302
303 /* Init system & device */
304 db = netdev_priv(dev);
305
306 /* Allocate Tx/Rx descriptor memory */
307 db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
308 if(db->desc_pool_ptr == NULL)
309 {
310 err = -ENOMEM;
311 goto err_out_nomem;
312 }
313 db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
314 if(db->buf_pool_ptr == NULL)
315 {
316 err = -ENOMEM;
317 goto err_out_nomem;
318 }
319
320 db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
321 db->first_tx_desc_dma = db->desc_pool_dma_ptr;
322 db->buf_pool_start = db->buf_pool_ptr;
323 db->buf_pool_dma_start = db->buf_pool_dma_ptr;
324
325 db->chip_id = ent->driver_data;
326 db->ioaddr = pci_resource_start(pdev, 0);
327
328 db->pdev = pdev;
329 db->init = 1;
330
331 dev->base_addr = db->ioaddr;
332 dev->irq = pdev->irq;
333 pci_set_drvdata(pdev, dev);
334
335 /* Register some necessary functions */
336 dev->open = &uli526x_open;
337 dev->hard_start_xmit = &uli526x_start_xmit;
338 dev->stop = &uli526x_stop;
339 dev->get_stats = &uli526x_get_stats;
340 dev->set_multicast_list = &uli526x_set_filter_mode;
341 dev->ethtool_ops = &netdev_ethtool_ops;
342 spin_lock_init(&db->lock);
343
344
345 /* read 64 word srom data */
346 for (i = 0; i < 64; i++)
347 ((u16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db->ioaddr, i));
348
349 /* Set Node address */
350 if(((u16 *) db->srom)[0] == 0xffff || ((u16 *) db->srom)[0] == 0) /* SROM absent, so read MAC address from ID Table */
351 {
352 outl(0x10000, db->ioaddr + DCR0); //Diagnosis mode
353 outl(0x1c0, db->ioaddr + DCR13); //Reset dianostic pointer port
354 outl(0, db->ioaddr + DCR14); //Clear reset port
355 outl(0x10, db->ioaddr + DCR14); //Reset ID Table pointer
356 outl(0, db->ioaddr + DCR14); //Clear reset port
357 outl(0, db->ioaddr + DCR13); //Clear CR13
358 outl(0x1b0, db->ioaddr + DCR13); //Select ID Table access port
359 //Read MAC address from CR14
360 for (i = 0; i < 6; i++)
361 dev->dev_addr[i] = inl(db->ioaddr + DCR14);
362 //Read end
363 outl(0, db->ioaddr + DCR13); //Clear CR13
364 outl(0, db->ioaddr + DCR0); //Clear CR0
365 udelay(10);
366 }
367 else /*Exist SROM*/
368 {
369 for (i = 0; i < 6; i++)
370 dev->dev_addr[i] = db->srom[20 + i];
371 }
372 err = register_netdev (dev);
373 if (err)
374 goto err_out_res;
375
376 printk(KERN_INFO "%s: ULi M%04lx at pci%s,",dev->name,ent->driver_data >> 16,pci_name(pdev));
377
378 for (i = 0; i < 6; i++)
379 printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]);
380 printk(", irq %d.\n", dev->irq);
381
382 pci_set_master(pdev);
383
384 return 0;
385
386err_out_res:
387 pci_release_regions(pdev);
388err_out_nomem:
389 if(db->desc_pool_ptr)
390 pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
391 db->desc_pool_ptr, db->desc_pool_dma_ptr);
392
393 if(db->buf_pool_ptr != NULL)
394 pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
395 db->buf_pool_ptr, db->buf_pool_dma_ptr);
396err_out_disable:
397 pci_disable_device(pdev);
398err_out_free:
399 pci_set_drvdata(pdev, NULL);
400 free_netdev(dev);
401
402 return err;
403}
404
405
406static void __devexit uli526x_remove_one (struct pci_dev *pdev)
407{
408 struct net_device *dev = pci_get_drvdata(pdev);
409 struct uli526x_board_info *db = netdev_priv(dev);
410
411 ULI526X_DBUG(0, "uli526x_remove_one()", 0);
412
413 pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
414 DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
415 db->desc_pool_dma_ptr);
416 pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
417 db->buf_pool_ptr, db->buf_pool_dma_ptr);
418 unregister_netdev(dev);
419 pci_release_regions(pdev);
420 free_netdev(dev); /* free board information */
421 pci_set_drvdata(pdev, NULL);
422 pci_disable_device(pdev);
423 ULI526X_DBUG(0, "uli526x_remove_one() exit", 0);
424}
425
426
427/*
428 * Open the interface.
429 * The interface is opened whenever "ifconfig" activates it.
430 */
431
432static int uli526x_open(struct net_device *dev)
433{
434 int ret;
435 struct uli526x_board_info *db = netdev_priv(dev);
436
437 ULI526X_DBUG(0, "uli526x_open", 0);
438
439 ret = request_irq(dev->irq, &uli526x_interrupt, SA_SHIRQ, dev->name, dev);
440 if (ret)
441 return ret;
442
443 /* system variable init */
444 db->cr6_data = CR6_DEFAULT | uli526x_cr6_user_set;
445 db->tx_packet_cnt = 0;
446 db->rx_avail_cnt = 0;
447 db->link_failed = 1;
448 netif_carrier_off(dev);
449 db->wait_reset = 0;
450
451 db->NIC_capability = 0xf; /* All capability*/
452 db->PHY_reg4 = 0x1e0;
453
454 /* CR6 operation mode decision */
455 db->cr6_data |= ULI526X_TXTH_256;
456 db->cr0_data = CR0_DEFAULT;
457
458 /* Initialize ULI526X board */
459 uli526x_init(dev);
460
461 /* Active System Interface */
462 netif_wake_queue(dev);
463
464 /* set and active a timer process */
465 init_timer(&db->timer);
466 db->timer.expires = ULI526X_TIMER_WUT + HZ * 2;
467 db->timer.data = (unsigned long)dev;
468 db->timer.function = &uli526x_timer;
469 add_timer(&db->timer);
470
471 return 0;
472}
473
474
475/* Initialize ULI526X board
476 * Reset ULI526X board
477 * Initialize TX/Rx descriptor chain structure
478 * Send the set-up frame
479 * Enable Tx/Rx machine
480 */
481
482static void uli526x_init(struct net_device *dev)
483{
484 struct uli526x_board_info *db = netdev_priv(dev);
485 unsigned long ioaddr = db->ioaddr;
486 u8 phy_tmp;
487 u16 phy_value;
488 u16 phy_reg_reset;
489
490 ULI526X_DBUG(0, "uli526x_init()", 0);
491
492 /* Reset M526x MAC controller */
493 outl(ULI526X_RESET, ioaddr + DCR0); /* RESET MAC */
494 udelay(100);
495 outl(db->cr0_data, ioaddr + DCR0);
496 udelay(5);
497
498 /* Phy addr : In some boards,M5261/M5263 phy address != 1 */
499 db->phy_addr = 1;
500 for(phy_tmp=0;phy_tmp<32;phy_tmp++)
501 {
502 phy_value=phy_read(db->ioaddr,phy_tmp,3,db->chip_id);//peer add
503 if(phy_value != 0xffff&&phy_value!=0)
504 {
505 db->phy_addr = phy_tmp;
506 break;
507 }
508 }
509 if(phy_tmp == 32)
510 printk(KERN_WARNING "Can not find the phy address!!!");
511 /* Parser SROM and media mode */
512 db->media_mode = uli526x_media_mode;
513
514 /* Phyxcer capability setting */
515 phy_reg_reset = phy_read(db->ioaddr, db->phy_addr, 0, db->chip_id);
516 phy_reg_reset = (phy_reg_reset | 0x8000);
517 phy_write(db->ioaddr, db->phy_addr, 0, phy_reg_reset, db->chip_id);
518 udelay(500);
519
520 /* Process Phyxcer Media Mode */
521 uli526x_set_phyxcer(db);
522
523 /* Media Mode Process */
524 if ( !(db->media_mode & ULI526X_AUTO) )
525 db->op_mode = db->media_mode; /* Force Mode */
526
527 /* Initialize Transmit/Receive decriptor and CR3/4 */
528 uli526x_descriptor_init(db, ioaddr);
529
530 /* Init CR6 to program M526X operation */
531 update_cr6(db->cr6_data, ioaddr);
532
533 /* Send setup frame */
534 send_filter_frame(dev, dev->mc_count); /* M5261/M5263 */
535
536 /* Init CR7, interrupt active bit */
537 db->cr7_data = CR7_DEFAULT;
538 outl(db->cr7_data, ioaddr + DCR7);
539
540 /* Init CR15, Tx jabber and Rx watchdog timer */
541 outl(db->cr15_data, ioaddr + DCR15);
542
543 /* Enable ULI526X Tx/Rx function */
544 db->cr6_data |= CR6_RXSC | CR6_TXSC;
545 update_cr6(db->cr6_data, ioaddr);
546}
547
548
549/*
550 * Hardware start transmission.
551 * Send a packet to media from the upper layer.
552 */
553
554static int uli526x_start_xmit(struct sk_buff *skb, struct net_device *dev)
555{
556 struct uli526x_board_info *db = netdev_priv(dev);
557 struct tx_desc *txptr;
558 unsigned long flags;
559
560 ULI526X_DBUG(0, "uli526x_start_xmit", 0);
561
562 /* Resource flag check */
563 netif_stop_queue(dev);
564
565 /* Too large packet check */
566 if (skb->len > MAX_PACKET_SIZE) {
567 printk(KERN_ERR DRV_NAME ": big packet = %d\n", (u16)skb->len);
568 dev_kfree_skb(skb);
569 return 0;
570 }
571
572 spin_lock_irqsave(&db->lock, flags);
573
574 /* No Tx resource check, it never happen nromally */
575 if (db->tx_packet_cnt >= TX_FREE_DESC_CNT) {
576 spin_unlock_irqrestore(&db->lock, flags);
577 printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n", db->tx_packet_cnt);
578 return 1;
579 }
580
581 /* Disable NIC interrupt */
582 outl(0, dev->base_addr + DCR7);
583
584 /* transmit this packet */
585 txptr = db->tx_insert_ptr;
586 memcpy(txptr->tx_buf_ptr, skb->data, skb->len);
587 txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
588
589 /* Point to next transmit free descriptor */
590 db->tx_insert_ptr = txptr->next_tx_desc;
591
592 /* Transmit Packet Process */
593 if ( (db->tx_packet_cnt < TX_DESC_CNT) ) {
594 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
595 db->tx_packet_cnt++; /* Ready to send */
596 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
597 dev->trans_start = jiffies; /* saved time stamp */
598 }
599
600 /* Tx resource check */
601 if ( db->tx_packet_cnt < TX_FREE_DESC_CNT )
602 netif_wake_queue(dev);
603
604 /* Restore CR7 to enable interrupt */
605 spin_unlock_irqrestore(&db->lock, flags);
606 outl(db->cr7_data, dev->base_addr + DCR7);
607
608 /* free this SKB */
609 dev_kfree_skb(skb);
610
611 return 0;
612}
613
614
615/*
616 * Stop the interface.
617 * The interface is stopped when it is brought.
618 */
619
620static int uli526x_stop(struct net_device *dev)
621{
622 struct uli526x_board_info *db = netdev_priv(dev);
623 unsigned long ioaddr = dev->base_addr;
624
625 ULI526X_DBUG(0, "uli526x_stop", 0);
626
627 /* disable system */
628 netif_stop_queue(dev);
629
630 /* deleted timer */
631 del_timer_sync(&db->timer);
632
633 /* Reset & stop ULI526X board */
634 outl(ULI526X_RESET, ioaddr + DCR0);
635 udelay(5);
636 phy_write(db->ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
637
638 /* free interrupt */
639 free_irq(dev->irq, dev);
640
641 /* free allocated rx buffer */
642 uli526x_free_rxbuffer(db);
643
644#if 0
645 /* show statistic counter */
646 printk(DRV_NAME ": FU:%lx EC:%lx LC:%lx NC:%lx LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
647 db->tx_fifo_underrun, db->tx_excessive_collision,
648 db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
649 db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
650 db->reset_fatal, db->reset_TXtimeout);
651#endif
652
653 return 0;
654}
655
656
657/*
658 * M5261/M5263 insterrupt handler
659 * receive the packet to upper layer, free the transmitted packet
660 */
661
662static irqreturn_t uli526x_interrupt(int irq, void *dev_id, struct pt_regs *regs)
663{
664 struct net_device *dev = dev_id;
665 struct uli526x_board_info *db = netdev_priv(dev);
666 unsigned long ioaddr = dev->base_addr;
667 unsigned long flags;
668
669 if (!dev) {
670 ULI526X_DBUG(1, "uli526x_interrupt() without DEVICE arg", 0);
671 return IRQ_NONE;
672 }
673
674 spin_lock_irqsave(&db->lock, flags);
675 outl(0, ioaddr + DCR7);
676
677 /* Got ULI526X status */
678 db->cr5_data = inl(ioaddr + DCR5);
679 outl(db->cr5_data, ioaddr + DCR5);
680 if ( !(db->cr5_data & 0x180c1) ) {
681 spin_unlock_irqrestore(&db->lock, flags);
682 outl(db->cr7_data, ioaddr + DCR7);
683 return IRQ_HANDLED;
684 }
685
686 /* Check system status */
687 if (db->cr5_data & 0x2000) {
688 /* system bus error happen */
689 ULI526X_DBUG(1, "System bus error happen. CR5=", db->cr5_data);
690 db->reset_fatal++;
691 db->wait_reset = 1; /* Need to RESET */
692 spin_unlock_irqrestore(&db->lock, flags);
693 return IRQ_HANDLED;
694 }
695
696 /* Received the coming packet */
697 if ( (db->cr5_data & 0x40) && db->rx_avail_cnt )
698 uli526x_rx_packet(dev, db);
699
700 /* reallocate rx descriptor buffer */
701 if (db->rx_avail_cnt<RX_DESC_CNT)
702 allocate_rx_buffer(db);
703
704 /* Free the transmitted descriptor */
705 if ( db->cr5_data & 0x01)
706 uli526x_free_tx_pkt(dev, db);
707
708 /* Restore CR7 to enable interrupt mask */
709 outl(db->cr7_data, ioaddr + DCR7);
710
711 spin_unlock_irqrestore(&db->lock, flags);
712 return IRQ_HANDLED;
713}
714
715
716/*
717 * Free TX resource after TX complete
718 */
719
720static void uli526x_free_tx_pkt(struct net_device *dev, struct uli526x_board_info * db)
721{
722 struct tx_desc *txptr;
723 u32 tdes0;
724
725 txptr = db->tx_remove_ptr;
726 while(db->tx_packet_cnt) {
727 tdes0 = le32_to_cpu(txptr->tdes0);
728 /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
729 if (tdes0 & 0x80000000)
730 break;
731
732 /* A packet sent completed */
733 db->tx_packet_cnt--;
734 db->stats.tx_packets++;
735
736 /* Transmit statistic counter */
737 if ( tdes0 != 0x7fffffff ) {
738 /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
739 db->stats.collisions += (tdes0 >> 3) & 0xf;
740 db->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
741 if (tdes0 & TDES0_ERR_MASK) {
742 db->stats.tx_errors++;
743 if (tdes0 & 0x0002) { /* UnderRun */
744 db->tx_fifo_underrun++;
745 if ( !(db->cr6_data & CR6_SFT) ) {
746 db->cr6_data = db->cr6_data | CR6_SFT;
747 update_cr6(db->cr6_data, db->ioaddr);
748 }
749 }
750 if (tdes0 & 0x0100)
751 db->tx_excessive_collision++;
752 if (tdes0 & 0x0200)
753 db->tx_late_collision++;
754 if (tdes0 & 0x0400)
755 db->tx_no_carrier++;
756 if (tdes0 & 0x0800)
757 db->tx_loss_carrier++;
758 if (tdes0 & 0x4000)
759 db->tx_jabber_timeout++;
760 }
761 }
762
763 txptr = txptr->next_tx_desc;
764 }/* End of while */
765
766 /* Update TX remove pointer to next */
767 db->tx_remove_ptr = txptr;
768
769 /* Resource available check */
770 if ( db->tx_packet_cnt < TX_WAKE_DESC_CNT )
771 netif_wake_queue(dev); /* Active upper layer, send again */
772}
773
774
775/*
776 * Receive the come packet and pass to upper layer
777 */
778
779static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info * db)
780{
781 struct rx_desc *rxptr;
782 struct sk_buff *skb;
783 int rxlen;
784 u32 rdes0;
785
786 rxptr = db->rx_ready_ptr;
787
788 while(db->rx_avail_cnt) {
789 rdes0 = le32_to_cpu(rxptr->rdes0);
790 if (rdes0 & 0x80000000) /* packet owner check */
791 {
792 break;
793 }
794
795 db->rx_avail_cnt--;
796 db->interval_rx_cnt++;
797
798 pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2), RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
799 if ( (rdes0 & 0x300) != 0x300) {
800 /* A packet without First/Last flag */
801 /* reuse this SKB */
802 ULI526X_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
803 uli526x_reuse_skb(db, rxptr->rx_skb_ptr);
804 } else {
805 /* A packet with First/Last flag */
806 rxlen = ( (rdes0 >> 16) & 0x3fff) - 4;
807
808 /* error summary bit check */
809 if (rdes0 & 0x8000) {
810 /* This is a error packet */
811 //printk(DRV_NAME ": rdes0: %lx\n", rdes0);
812 db->stats.rx_errors++;
813 if (rdes0 & 1)
814 db->stats.rx_fifo_errors++;
815 if (rdes0 & 2)
816 db->stats.rx_crc_errors++;
817 if (rdes0 & 0x80)
818 db->stats.rx_length_errors++;
819 }
820
821 if ( !(rdes0 & 0x8000) ||
822 ((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
823 skb = rxptr->rx_skb_ptr;
824
825 /* Good packet, send to upper layer */
826 /* Shorst packet used new SKB */
827 if ( (rxlen < RX_COPY_SIZE) &&
828 ( (skb = dev_alloc_skb(rxlen + 2) )
829 != NULL) ) {
830 /* size less than COPY_SIZE, allocate a rxlen SKB */
831 skb->dev = dev;
832 skb_reserve(skb, 2); /* 16byte align */
833 memcpy(skb_put(skb, rxlen), rxptr->rx_skb_ptr->tail, rxlen);
834 uli526x_reuse_skb(db, rxptr->rx_skb_ptr);
835 } else {
836 skb->dev = dev;
837 skb_put(skb, rxlen);
838 }
839 skb->protocol = eth_type_trans(skb, dev);
840 netif_rx(skb);
841 dev->last_rx = jiffies;
842 db->stats.rx_packets++;
843 db->stats.rx_bytes += rxlen;
844
845 } else {
846 /* Reuse SKB buffer when the packet is error */
847 ULI526X_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
848 uli526x_reuse_skb(db, rxptr->rx_skb_ptr);
849 }
850 }
851
852 rxptr = rxptr->next_rx_desc;
853 }
854
855 db->rx_ready_ptr = rxptr;
856}
857
858
859/*
860 * Get statistics from driver.
861 */
862
863static struct net_device_stats * uli526x_get_stats(struct net_device *dev)
864{
865 struct uli526x_board_info *db = netdev_priv(dev);
866
867 ULI526X_DBUG(0, "uli526x_get_stats", 0);
868 return &db->stats;
869}
870
871
872/*
873 * Set ULI526X multicast address
874 */
875
876static void uli526x_set_filter_mode(struct net_device * dev)
877{
878 struct uli526x_board_info *db = dev->priv;
879 unsigned long flags;
880
881 ULI526X_DBUG(0, "uli526x_set_filter_mode()", 0);
882 spin_lock_irqsave(&db->lock, flags);
883
884 if (dev->flags & IFF_PROMISC) {
885 ULI526X_DBUG(0, "Enable PROM Mode", 0);
886 db->cr6_data |= CR6_PM | CR6_PBF;
887 update_cr6(db->cr6_data, db->ioaddr);
888 spin_unlock_irqrestore(&db->lock, flags);
889 return;
890 }
891
892 if (dev->flags & IFF_ALLMULTI || dev->mc_count > ULI5261_MAX_MULTICAST) {
893 ULI526X_DBUG(0, "Pass all multicast address", dev->mc_count);
894 db->cr6_data &= ~(CR6_PM | CR6_PBF);
895 db->cr6_data |= CR6_PAM;
896 spin_unlock_irqrestore(&db->lock, flags);
897 return;
898 }
899
900 ULI526X_DBUG(0, "Set multicast address", dev->mc_count);
901 send_filter_frame(dev, dev->mc_count); /* M5261/M5263 */
902 spin_unlock_irqrestore(&db->lock, flags);
903}
904
905static void
906ULi_ethtool_gset(struct uli526x_board_info *db, struct ethtool_cmd *ecmd)
907{
908 ecmd->supported = (SUPPORTED_10baseT_Half |
909 SUPPORTED_10baseT_Full |
910 SUPPORTED_100baseT_Half |
911 SUPPORTED_100baseT_Full |
912 SUPPORTED_Autoneg |
913 SUPPORTED_MII);
914
915 ecmd->advertising = (ADVERTISED_10baseT_Half |
916 ADVERTISED_10baseT_Full |
917 ADVERTISED_100baseT_Half |
918 ADVERTISED_100baseT_Full |
919 ADVERTISED_Autoneg |
920 ADVERTISED_MII);
921
922
923 ecmd->port = PORT_MII;
924 ecmd->phy_address = db->phy_addr;
925
926 ecmd->transceiver = XCVR_EXTERNAL;
927
928 ecmd->speed = 10;
929 ecmd->duplex = DUPLEX_HALF;
930
931 if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD)
932 {
933 ecmd->speed = 100;
934 }
935 if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD)
936 {
937 ecmd->duplex = DUPLEX_FULL;
938 }
939 if(db->link_failed)
940 {
941 ecmd->speed = -1;
942 ecmd->duplex = -1;
943 }
944
945 if (db->media_mode & ULI526X_AUTO)
946 {
947 ecmd->autoneg = AUTONEG_ENABLE;
948 }
949}
950
951static void netdev_get_drvinfo(struct net_device *dev,
952 struct ethtool_drvinfo *info)
953{
954 struct uli526x_board_info *np = netdev_priv(dev);
955
956 strcpy(info->driver, DRV_NAME);
957 strcpy(info->version, DRV_VERSION);
958 if (np->pdev)
959 strcpy(info->bus_info, pci_name(np->pdev));
960 else
961 sprintf(info->bus_info, "EISA 0x%lx %d",
962 dev->base_addr, dev->irq);
963}
964
965static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) {
966 struct uli526x_board_info *np = netdev_priv(dev);
967
968 ULi_ethtool_gset(np, cmd);
969
970 return 0;
971}
972
973static u32 netdev_get_link(struct net_device *dev) {
974 struct uli526x_board_info *np = netdev_priv(dev);
975
976 if(np->link_failed)
977 return 0;
978 else
979 return 1;
980}
981
982static void uli526x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
983{
984 wol->supported = WAKE_PHY | WAKE_MAGIC;
985 wol->wolopts = 0;
986}
987
988static struct ethtool_ops netdev_ethtool_ops = {
989 .get_drvinfo = netdev_get_drvinfo,
990 .get_settings = netdev_get_settings,
991 .get_link = netdev_get_link,
992 .get_wol = uli526x_get_wol,
993};
994
995/*
996 * A periodic timer routine
997 * Dynamic media sense, allocate Rx buffer...
998 */
999
1000static void uli526x_timer(unsigned long data)
1001{
1002 u32 tmp_cr8;
1003 unsigned char tmp_cr12=0;
1004 struct net_device *dev = (struct net_device *) data;
1005 struct uli526x_board_info *db = netdev_priv(dev);
1006 unsigned long flags;
1007 u8 TmpSpeed=10;
1008
1009 //ULI526X_DBUG(0, "uli526x_timer()", 0);
1010 spin_lock_irqsave(&db->lock, flags);
1011
1012
1013 /* Dynamic reset ULI526X : system error or transmit time-out */
1014 tmp_cr8 = inl(db->ioaddr + DCR8);
1015 if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
1016 db->reset_cr8++;
1017 db->wait_reset = 1;
1018 }
1019 db->interval_rx_cnt = 0;
1020
1021 /* TX polling kick monitor */
1022 if ( db->tx_packet_cnt &&
1023 time_after(jiffies, dev->trans_start + ULI526X_TX_KICK) ) {
1024 outl(0x1, dev->base_addr + DCR1); // Tx polling again
1025
1026 // TX Timeout
1027 if ( time_after(jiffies, dev->trans_start + ULI526X_TX_TIMEOUT) ) {
1028 db->reset_TXtimeout++;
1029 db->wait_reset = 1;
1030 printk( "%s: Tx timeout - resetting\n",
1031 dev->name);
1032 }
1033 }
1034
1035 if (db->wait_reset) {
1036 ULI526X_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt);
1037 db->reset_count++;
1038 uli526x_dynamic_reset(dev);
1039 db->timer.expires = ULI526X_TIMER_WUT;
1040 add_timer(&db->timer);
1041 spin_unlock_irqrestore(&db->lock, flags);
1042 return;
1043 }
1044
1045 /* Link status check, Dynamic media type change */
1046 if((phy_read(db->ioaddr, db->phy_addr, 5, db->chip_id) & 0x01e0)!=0)
1047 tmp_cr12 = 3;
1048
1049 if ( !(tmp_cr12 & 0x3) && !db->link_failed ) {
1050 /* Link Failed */
1051 ULI526X_DBUG(0, "Link Failed", tmp_cr12);
1052 netif_carrier_off(dev);
1053 printk(KERN_INFO "uli526x: %s NIC Link is Down\n",dev->name);
1054 db->link_failed = 1;
1055
1056 /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
1057 /* AUTO don't need */
1058 if ( !(db->media_mode & 0x8) )
1059 phy_write(db->ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
1060
1061 /* AUTO mode, if INT phyxcer link failed, select EXT device */
1062 if (db->media_mode & ULI526X_AUTO) {
1063 db->cr6_data&=~0x00000200; /* bit9=0, HD mode */
1064 update_cr6(db->cr6_data, db->ioaddr);
1065 }
1066 } else
1067 if ((tmp_cr12 & 0x3) && db->link_failed) {
1068 ULI526X_DBUG(0, "Link link OK", tmp_cr12);
1069 db->link_failed = 0;
1070
1071 /* Auto Sense Speed */
1072 if ( (db->media_mode & ULI526X_AUTO) &&
1073 uli526x_sense_speed(db) )
1074 db->link_failed = 1;
1075 uli526x_process_mode(db);
1076
1077 if(db->link_failed==0)
1078 {
1079 if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD)
1080 {
1081 TmpSpeed = 100;
1082 }
1083 if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD)
1084 {
1085 printk(KERN_INFO "uli526x: %s NIC Link is Up %d Mbps Full duplex\n",dev->name,TmpSpeed);
1086 }
1087 else
1088 {
1089 printk(KERN_INFO "uli526x: %s NIC Link is Up %d Mbps Half duplex\n",dev->name,TmpSpeed);
1090 }
1091 netif_carrier_on(dev);
1092 }
1093 /* SHOW_MEDIA_TYPE(db->op_mode); */
1094 }
1095 else if(!(tmp_cr12 & 0x3) && db->link_failed)
1096 {
1097 if(db->init==1)
1098 {
1099 printk(KERN_INFO "uli526x: %s NIC Link is Down\n",dev->name);
1100 netif_carrier_off(dev);
1101 }
1102 }
1103 db->init=0;
1104
1105 /* Timer active again */
1106 db->timer.expires = ULI526X_TIMER_WUT;
1107 add_timer(&db->timer);
1108 spin_unlock_irqrestore(&db->lock, flags);
1109}
1110
1111
1112/*
1113 * Dynamic reset the ULI526X board
1114 * Stop ULI526X board
1115 * Free Tx/Rx allocated memory
1116 * Reset ULI526X board
1117 * Re-initialize ULI526X board
1118 */
1119
1120static void uli526x_dynamic_reset(struct net_device *dev)
1121{
1122 struct uli526x_board_info *db = netdev_priv(dev);
1123
1124 ULI526X_DBUG(0, "uli526x_dynamic_reset()", 0);
1125
1126 /* Sopt MAC controller */
1127 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */
1128 update_cr6(db->cr6_data, dev->base_addr);
1129 outl(0, dev->base_addr + DCR7); /* Disable Interrupt */
1130 outl(inl(dev->base_addr + DCR5), dev->base_addr + DCR5);
1131
1132 /* Disable upper layer interface */
1133 netif_stop_queue(dev);
1134
1135 /* Free Rx Allocate buffer */
1136 uli526x_free_rxbuffer(db);
1137
1138 /* system variable init */
1139 db->tx_packet_cnt = 0;
1140 db->rx_avail_cnt = 0;
1141 db->link_failed = 1;
1142 db->init=1;
1143 db->wait_reset = 0;
1144
1145 /* Re-initialize ULI526X board */
1146 uli526x_init(dev);
1147
1148 /* Restart upper layer interface */
1149 netif_wake_queue(dev);
1150}
1151
1152
1153/*
1154 * free all allocated rx buffer
1155 */
1156
1157static void uli526x_free_rxbuffer(struct uli526x_board_info * db)
1158{
1159 ULI526X_DBUG(0, "uli526x_free_rxbuffer()", 0);
1160
1161 /* free allocated rx buffer */
1162 while (db->rx_avail_cnt) {
1163 dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr);
1164 db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc;
1165 db->rx_avail_cnt--;
1166 }
1167}
1168
1169
1170/*
1171 * Reuse the SK buffer
1172 */
1173
1174static void uli526x_reuse_skb(struct uli526x_board_info *db, struct sk_buff * skb)
1175{
1176 struct rx_desc *rxptr = db->rx_insert_ptr;
1177
1178 if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
1179 rxptr->rx_skb_ptr = skb;
1180 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1181 wmb();
1182 rxptr->rdes0 = cpu_to_le32(0x80000000);
1183 db->rx_avail_cnt++;
1184 db->rx_insert_ptr = rxptr->next_rx_desc;
1185 } else
1186 ULI526X_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt);
1187}
1188
1189
1190/*
1191 * Initialize transmit/Receive descriptor
1192 * Using Chain structure, and allocate Tx/Rx buffer
1193 */
1194
1195static void uli526x_descriptor_init(struct uli526x_board_info *db, unsigned long ioaddr)
1196{
1197 struct tx_desc *tmp_tx;
1198 struct rx_desc *tmp_rx;
1199 unsigned char *tmp_buf;
1200 dma_addr_t tmp_tx_dma, tmp_rx_dma;
1201 dma_addr_t tmp_buf_dma;
1202 int i;
1203
1204 ULI526X_DBUG(0, "uli526x_descriptor_init()", 0);
1205
1206 /* tx descriptor start pointer */
1207 db->tx_insert_ptr = db->first_tx_desc;
1208 db->tx_remove_ptr = db->first_tx_desc;
1209 outl(db->first_tx_desc_dma, ioaddr + DCR4); /* TX DESC address */
1210
1211 /* rx descriptor start pointer */
1212 db->first_rx_desc = (void *)db->first_tx_desc + sizeof(struct tx_desc) * TX_DESC_CNT;
1213 db->first_rx_desc_dma = db->first_tx_desc_dma + sizeof(struct tx_desc) * TX_DESC_CNT;
1214 db->rx_insert_ptr = db->first_rx_desc;
1215 db->rx_ready_ptr = db->first_rx_desc;
1216 outl(db->first_rx_desc_dma, ioaddr + DCR3); /* RX DESC address */
1217
1218 /* Init Transmit chain */
1219 tmp_buf = db->buf_pool_start;
1220 tmp_buf_dma = db->buf_pool_dma_start;
1221 tmp_tx_dma = db->first_tx_desc_dma;
1222 for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) {
1223 tmp_tx->tx_buf_ptr = tmp_buf;
1224 tmp_tx->tdes0 = cpu_to_le32(0);
1225 tmp_tx->tdes1 = cpu_to_le32(0x81000000); /* IC, chain */
1226 tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma);
1227 tmp_tx_dma += sizeof(struct tx_desc);
1228 tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma);
1229 tmp_tx->next_tx_desc = tmp_tx + 1;
1230 tmp_buf = tmp_buf + TX_BUF_ALLOC;
1231 tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC;
1232 }
1233 (--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma);
1234 tmp_tx->next_tx_desc = db->first_tx_desc;
1235
1236 /* Init Receive descriptor chain */
1237 tmp_rx_dma=db->first_rx_desc_dma;
1238 for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) {
1239 tmp_rx->rdes0 = cpu_to_le32(0);
1240 tmp_rx->rdes1 = cpu_to_le32(0x01000600);
1241 tmp_rx_dma += sizeof(struct rx_desc);
1242 tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma);
1243 tmp_rx->next_rx_desc = tmp_rx + 1;
1244 }
1245 (--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma);
1246 tmp_rx->next_rx_desc = db->first_rx_desc;
1247
1248 /* pre-allocate Rx buffer */
1249 allocate_rx_buffer(db);
1250}
1251
1252
1253/*
1254 * Update CR6 value
1255 * Firstly stop ULI526X, then written value and start
1256 */
1257
1258static void update_cr6(u32 cr6_data, unsigned long ioaddr)
1259{
1260
1261 outl(cr6_data, ioaddr + DCR6);
1262 udelay(5);
1263}
1264
1265
1266/*
1267 * Send a setup frame for M5261/M5263
1268 * This setup frame initialize ULI526X address filter mode
1269 */
1270
1271static void send_filter_frame(struct net_device *dev, int mc_cnt)
1272{
1273 struct uli526x_board_info *db = netdev_priv(dev);
1274 struct dev_mc_list *mcptr;
1275 struct tx_desc *txptr;
1276 u16 * addrptr;
1277 u32 * suptr;
1278 int i;
1279
1280 ULI526X_DBUG(0, "send_filter_frame()", 0);
1281
1282 txptr = db->tx_insert_ptr;
1283 suptr = (u32 *) txptr->tx_buf_ptr;
1284
1285 /* Node address */
1286 addrptr = (u16 *) dev->dev_addr;
1287 *suptr++ = addrptr[0];
1288 *suptr++ = addrptr[1];
1289 *suptr++ = addrptr[2];
1290
1291 /* broadcast address */
1292 *suptr++ = 0xffff;
1293 *suptr++ = 0xffff;
1294 *suptr++ = 0xffff;
1295
1296 /* fit the multicast address */
1297 for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
1298 addrptr = (u16 *) mcptr->dmi_addr;
1299 *suptr++ = addrptr[0];
1300 *suptr++ = addrptr[1];
1301 *suptr++ = addrptr[2];
1302 }
1303
1304 for (; i<14; i++) {
1305 *suptr++ = 0xffff;
1306 *suptr++ = 0xffff;
1307 *suptr++ = 0xffff;
1308 }
1309
1310 /* prepare the setup frame */
1311 db->tx_insert_ptr = txptr->next_tx_desc;
1312 txptr->tdes1 = cpu_to_le32(0x890000c0);
1313
1314 /* Resource Check and Send the setup packet */
1315 if (db->tx_packet_cnt < TX_DESC_CNT) {
1316 /* Resource Empty */
1317 db->tx_packet_cnt++;
1318 txptr->tdes0 = cpu_to_le32(0x80000000);
1319 update_cr6(db->cr6_data | 0x2000, dev->base_addr);
1320 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
1321 update_cr6(db->cr6_data, dev->base_addr);
1322 dev->trans_start = jiffies;
1323 } else
1324 printk(KERN_ERR DRV_NAME ": No Tx resource - Send_filter_frame!\n");
1325}
1326
1327
1328/*
1329 * Allocate rx buffer,
1330 * As possible as allocate maxiumn Rx buffer
1331 */
1332
1333static void allocate_rx_buffer(struct uli526x_board_info *db)
1334{
1335 struct rx_desc *rxptr;
1336 struct sk_buff *skb;
1337
1338 rxptr = db->rx_insert_ptr;
1339
1340 while(db->rx_avail_cnt < RX_DESC_CNT) {
1341 if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL )
1342 break;
1343 rxptr->rx_skb_ptr = skb; /* FIXME (?) */
1344 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1345 wmb();
1346 rxptr->rdes0 = cpu_to_le32(0x80000000);
1347 rxptr = rxptr->next_rx_desc;
1348 db->rx_avail_cnt++;
1349 }
1350
1351 db->rx_insert_ptr = rxptr;
1352}
1353
1354
1355/*
1356 * Read one word data from the serial ROM
1357 */
1358
1359static u16 read_srom_word(long ioaddr, int offset)
1360{
1361 int i;
1362 u16 srom_data = 0;
1363 long cr9_ioaddr = ioaddr + DCR9;
1364
1365 outl(CR9_SROM_READ, cr9_ioaddr);
1366 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1367
1368 /* Send the Read Command 110b */
1369 SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
1370 SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
1371 SROM_CLK_WRITE(SROM_DATA_0, cr9_ioaddr);
1372
1373 /* Send the offset */
1374 for (i = 5; i >= 0; i--) {
1375 srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
1376 SROM_CLK_WRITE(srom_data, cr9_ioaddr);
1377 }
1378
1379 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1380
1381 for (i = 16; i > 0; i--) {
1382 outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr);
1383 udelay(5);
1384 srom_data = (srom_data << 1) | ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0);
1385 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1386 udelay(5);
1387 }
1388
1389 outl(CR9_SROM_READ, cr9_ioaddr);
1390 return srom_data;
1391}
1392
1393
1394/*
1395 * Auto sense the media mode
1396 */
1397
1398static u8 uli526x_sense_speed(struct uli526x_board_info * db)
1399{
1400 u8 ErrFlag = 0;
1401 u16 phy_mode;
1402
1403 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1404 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1405
1406 if ( (phy_mode & 0x24) == 0x24 ) {
1407
1408 phy_mode = ((phy_read(db->ioaddr, db->phy_addr, 5, db->chip_id) & 0x01e0)<<7);
1409 if(phy_mode&0x8000)
1410 phy_mode = 0x8000;
1411 else if(phy_mode&0x4000)
1412 phy_mode = 0x4000;
1413 else if(phy_mode&0x2000)
1414 phy_mode = 0x2000;
1415 else
1416 phy_mode = 0x1000;
1417
1418 /* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */
1419 switch (phy_mode) {
1420 case 0x1000: db->op_mode = ULI526X_10MHF; break;
1421 case 0x2000: db->op_mode = ULI526X_10MFD; break;
1422 case 0x4000: db->op_mode = ULI526X_100MHF; break;
1423 case 0x8000: db->op_mode = ULI526X_100MFD; break;
1424 default: db->op_mode = ULI526X_10MHF; ErrFlag = 1; break;
1425 }
1426 } else {
1427 db->op_mode = ULI526X_10MHF;
1428 ULI526X_DBUG(0, "Link Failed :", phy_mode);
1429 ErrFlag = 1;
1430 }
1431
1432 return ErrFlag;
1433}
1434
1435
1436/*
1437 * Set 10/100 phyxcer capability
1438 * AUTO mode : phyxcer register4 is NIC capability
1439 * Force mode: phyxcer register4 is the force media
1440 */
1441
1442static void uli526x_set_phyxcer(struct uli526x_board_info *db)
1443{
1444 u16 phy_reg;
1445
1446 /* Phyxcer capability setting */
1447 phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
1448
1449 if (db->media_mode & ULI526X_AUTO) {
1450 /* AUTO Mode */
1451 phy_reg |= db->PHY_reg4;
1452 } else {
1453 /* Force Mode */
1454 switch(db->media_mode) {
1455 case ULI526X_10MHF: phy_reg |= 0x20; break;
1456 case ULI526X_10MFD: phy_reg |= 0x40; break;
1457 case ULI526X_100MHF: phy_reg |= 0x80; break;
1458 case ULI526X_100MFD: phy_reg |= 0x100; break;
1459 }
1460
1461 }
1462
1463 /* Write new capability to Phyxcer Reg4 */
1464 if ( !(phy_reg & 0x01e0)) {
1465 phy_reg|=db->PHY_reg4;
1466 db->media_mode|=ULI526X_AUTO;
1467 }
1468 phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id);
1469
1470 /* Restart Auto-Negotiation */
1471 phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id);
1472 udelay(50);
1473}
1474
1475
1476/*
1477 * Process op-mode
1478 AUTO mode : PHY controller in Auto-negotiation Mode
1479 * Force mode: PHY controller in force mode with HUB
1480 * N-way force capability with SWITCH
1481 */
1482
1483static void uli526x_process_mode(struct uli526x_board_info *db)
1484{
1485 u16 phy_reg;
1486
1487 /* Full Duplex Mode Check */
1488 if (db->op_mode & 0x4)
1489 db->cr6_data |= CR6_FDM; /* Set Full Duplex Bit */
1490 else
1491 db->cr6_data &= ~CR6_FDM; /* Clear Full Duplex Bit */
1492
1493 update_cr6(db->cr6_data, db->ioaddr);
1494
1495 /* 10/100M phyxcer force mode need */
1496 if ( !(db->media_mode & 0x8)) {
1497 /* Forece Mode */
1498 phy_reg = phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id);
1499 if ( !(phy_reg & 0x1) ) {
1500 /* parter without N-Way capability */
1501 phy_reg = 0x0;
1502 switch(db->op_mode) {
1503 case ULI526X_10MHF: phy_reg = 0x0; break;
1504 case ULI526X_10MFD: phy_reg = 0x100; break;
1505 case ULI526X_100MHF: phy_reg = 0x2000; break;
1506 case ULI526X_100MFD: phy_reg = 0x2100; break;
1507 }
1508 phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id);
1509 phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id);
1510 }
1511 }
1512}
1513
1514
1515/*
1516 * Write a word to Phy register
1517 */
1518
1519static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data, u32 chip_id)
1520{
1521 u16 i;
1522 unsigned long ioaddr;
1523
1524 if(chip_id == PCI_ULI5263_ID)
1525 {
1526 phy_writeby_cr10(iobase, phy_addr, offset, phy_data);
1527 return;
1528 }
1529 /* M5261/M5263 Chip */
1530 ioaddr = iobase + DCR9;
1531
1532 /* Send 33 synchronization clock to Phy controller */
1533 for (i = 0; i < 35; i++)
1534 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
1535
1536 /* Send start command(01) to Phy */
1537 phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
1538 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
1539
1540 /* Send write command(01) to Phy */
1541 phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
1542 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
1543
1544 /* Send Phy address */
1545 for (i = 0x10; i > 0; i = i >> 1)
1546 phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
1547
1548 /* Send register address */
1549 for (i = 0x10; i > 0; i = i >> 1)
1550 phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
1551
1552 /* written trasnition */
1553 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
1554 phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
1555
1556 /* Write a word data to PHY controller */
1557 for ( i = 0x8000; i > 0; i >>= 1)
1558 phy_write_1bit(ioaddr, phy_data & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
1559
1560}
1561
1562
1563/*
1564 * Read a word data from phy register
1565 */
1566
1567static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
1568{
1569 int i;
1570 u16 phy_data;
1571 unsigned long ioaddr;
1572
1573 if(chip_id == PCI_ULI5263_ID)
1574 return phy_readby_cr10(iobase, phy_addr, offset);
1575 /* M5261/M5263 Chip */
1576 ioaddr = iobase + DCR9;
1577
1578 /* Send 33 synchronization clock to Phy controller */
1579 for (i = 0; i < 35; i++)
1580 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
1581
1582 /* Send start command(01) to Phy */
1583 phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
1584 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
1585
1586 /* Send read command(10) to Phy */
1587 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
1588 phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
1589
1590 /* Send Phy address */
1591 for (i = 0x10; i > 0; i = i >> 1)
1592 phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
1593
1594 /* Send register address */
1595 for (i = 0x10; i > 0; i = i >> 1)
1596 phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
1597
1598 /* Skip transition state */
1599 phy_read_1bit(ioaddr, chip_id);
1600
1601 /* read 16bit data */
1602 for (phy_data = 0, i = 0; i < 16; i++) {
1603 phy_data <<= 1;
1604 phy_data |= phy_read_1bit(ioaddr, chip_id);
1605 }
1606
1607 return phy_data;
1608}
1609
1610static u16 phy_readby_cr10(unsigned long iobase, u8 phy_addr, u8 offset)
1611{
1612 unsigned long ioaddr,cr10_value;
1613
1614 ioaddr = iobase + DCR10;
1615 cr10_value = phy_addr;
1616 cr10_value = (cr10_value<<5) + offset;
1617 cr10_value = (cr10_value<<16) + 0x08000000;
1618 outl(cr10_value,ioaddr);
1619 udelay(1);
1620 while(1)
1621 {
1622 cr10_value = inl(ioaddr);
1623 if(cr10_value&0x10000000)
1624 break;
1625 }
1626 return (cr10_value&0x0ffff);
1627}
1628
1629static void phy_writeby_cr10(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data)
1630{
1631 unsigned long ioaddr,cr10_value;
1632
1633 ioaddr = iobase + DCR10;
1634 cr10_value = phy_addr;
1635 cr10_value = (cr10_value<<5) + offset;
1636 cr10_value = (cr10_value<<16) + 0x04000000 + phy_data;
1637 outl(cr10_value,ioaddr);
1638 udelay(1);
1639}
1640/*
1641 * Write one bit data to Phy Controller
1642 */
1643
1644static void phy_write_1bit(unsigned long ioaddr, u32 phy_data, u32 chip_id)
1645{
1646 outl(phy_data , ioaddr); /* MII Clock Low */
1647 udelay(1);
1648 outl(phy_data | MDCLKH, ioaddr); /* MII Clock High */
1649 udelay(1);
1650 outl(phy_data , ioaddr); /* MII Clock Low */
1651 udelay(1);
1652}
1653
1654
1655/*
1656 * Read one bit phy data from PHY controller
1657 */
1658
1659static u16 phy_read_1bit(unsigned long ioaddr, u32 chip_id)
1660{
1661 u16 phy_data;
1662
1663 outl(0x50000 , ioaddr);
1664 udelay(1);
1665 phy_data = ( inl(ioaddr) >> 19 ) & 0x1;
1666 outl(0x40000 , ioaddr);
1667 udelay(1);
1668
1669 return phy_data;
1670}
1671
1672
1673static struct pci_device_id uli526x_pci_tbl[] = {
1674 { 0x10B9, 0x5261, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5261_ID },
1675 { 0x10B9, 0x5263, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5263_ID },
1676 { 0, }
1677};
1678MODULE_DEVICE_TABLE(pci, uli526x_pci_tbl);
1679
1680
1681static struct pci_driver uli526x_driver = {
1682 .name = "uli526x",
1683 .id_table = uli526x_pci_tbl,
1684 .probe = uli526x_init_one,
1685 .remove = __devexit_p(uli526x_remove_one),
1686};
1687
1688MODULE_AUTHOR("Peer Chen, peer.chen@uli.com.tw");
1689MODULE_DESCRIPTION("ULi M5261/M5263 fast ethernet driver");
1690MODULE_LICENSE("GPL");
1691
1692MODULE_PARM(debug, "i");
1693MODULE_PARM(mode, "i");
1694MODULE_PARM(cr6set, "i");
1695MODULE_PARM_DESC(debug, "ULi M5261/M5263 enable debugging (0-1)");
1696MODULE_PARM_DESC(mode, "ULi M5261/M5263: Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
1697
1698/* Description:
1699 * when user used insmod to add module, system invoked init_module()
1700 * to register the services.
1701 */
1702
1703static int __init uli526x_init_module(void)
1704{
1705 int rc;
1706
1707 printk(version);
1708 printed_version = 1;
1709
1710 ULI526X_DBUG(0, "init_module() ", debug);
1711
1712 if (debug)
1713 uli526x_debug = debug; /* set debug flag */
1714 if (cr6set)
1715 uli526x_cr6_user_set = cr6set;
1716
1717 switch(mode) {
1718 case ULI526X_10MHF:
1719 case ULI526X_100MHF:
1720 case ULI526X_10MFD:
1721 case ULI526X_100MFD:
1722 uli526x_media_mode = mode;
1723 break;
1724 default:uli526x_media_mode = ULI526X_AUTO;
1725 break;
1726 }
1727
1728 rc = pci_module_init(&uli526x_driver);
1729 if (rc < 0)
1730 return rc;
1731
1732 return 0;
1733}
1734
1735
1736/*
1737 * Description:
1738 * when user used rmmod to delete module, system invoked clean_module()
1739 * to un-register all registered services.
1740 */
1741
1742static void __exit uli526x_cleanup_module(void)
1743{
1744 ULI526X_DBUG(0, "uli526x_clean_module() ", debug);
1745 pci_unregister_driver(&uli526x_driver);
1746}
1747
1748module_init(uli526x_init_module);
1749module_exit(uli526x_cleanup_module);