aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/chelsio
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/chelsio')
-rw-r--r--drivers/net/chelsio/Makefile11
-rw-r--r--drivers/net/chelsio/common.h314
-rw-r--r--drivers/net/chelsio/cphy.h148
-rw-r--r--drivers/net/chelsio/cpl5_cmd.h145
-rw-r--r--drivers/net/chelsio/cxgb2.c1256
-rw-r--r--drivers/net/chelsio/elmer0.h151
-rw-r--r--drivers/net/chelsio/espi.c346
-rw-r--r--drivers/net/chelsio/espi.h68
-rw-r--r--drivers/net/chelsio/gmac.h134
-rw-r--r--drivers/net/chelsio/mv88x201x.c252
-rw-r--r--drivers/net/chelsio/pm3393.c826
-rw-r--r--drivers/net/chelsio/regs.h468
-rw-r--r--drivers/net/chelsio/sge.c1684
-rw-r--r--drivers/net/chelsio/sge.h105
-rw-r--r--drivers/net/chelsio/subr.c812
-rw-r--r--drivers/net/chelsio/suni1x10gexp_regs.h213
16 files changed, 6933 insertions, 0 deletions
diff --git a/drivers/net/chelsio/Makefile b/drivers/net/chelsio/Makefile
new file mode 100644
index 000000000000..91e927827c43
--- /dev/null
+++ b/drivers/net/chelsio/Makefile
@@ -0,0 +1,11 @@
1#
2# Chelsio 10Gb NIC driver for Linux.
3#
4
5obj-$(CONFIG_CHELSIO_T1) += cxgb.o
6
7EXTRA_CFLAGS += -I$(TOPDIR)/drivers/net/chelsio $(DEBUG_FLAGS)
8
9
10cxgb-objs := cxgb2.o espi.o pm3393.o sge.o subr.o mv88x201x.o
11
diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h
new file mode 100644
index 000000000000..bf3e7b6a7a18
--- /dev/null
+++ b/drivers/net/chelsio/common.h
@@ -0,0 +1,314 @@
1/*****************************************************************************
2 * *
3 * File: common.h *
4 * $Revision: 1.21 $ *
5 * $Date: 2005/06/22 00:43:25 $ *
6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#ifndef _CXGB_COMMON_H_
40#define _CXGB_COMMON_H_
41
42#include <linux/config.h>
43#include <linux/module.h>
44#include <linux/netdevice.h>
45#include <linux/types.h>
46#include <linux/delay.h>
47#include <linux/pci.h>
48#include <linux/ethtool.h>
49#include <linux/mii.h>
50#include <linux/crc32.h>
51#include <linux/init.h>
52#include <asm/io.h>
53#include <linux/pci_ids.h>
54
55#define DRV_DESCRIPTION "Chelsio 10Gb Ethernet Driver"
56#define DRV_NAME "cxgb"
57#define DRV_VERSION "2.1.1"
58#define PFX DRV_NAME ": "
59
60#define CH_ERR(fmt, ...) printk(KERN_ERR PFX fmt, ## __VA_ARGS__)
61#define CH_WARN(fmt, ...) printk(KERN_WARNING PFX fmt, ## __VA_ARGS__)
62#define CH_ALERT(fmt, ...) printk(KERN_ALERT PFX fmt, ## __VA_ARGS__)
63
64#define CH_DEVICE(devid, ssid, idx) \
65 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
66
67#define SUPPORTED_PAUSE (1 << 13)
68#define SUPPORTED_LOOPBACK (1 << 15)
69
70#define ADVERTISED_PAUSE (1 << 13)
71#define ADVERTISED_ASYM_PAUSE (1 << 14)
72
73typedef struct adapter adapter_t;
74
75void t1_elmer0_ext_intr(adapter_t *adapter);
76void t1_link_changed(adapter_t *adapter, int port_id, int link_status,
77 int speed, int duplex, int fc);
78
79struct t1_rx_mode {
80 struct net_device *dev;
81 u32 idx;
82 struct dev_mc_list *list;
83};
84
85#define t1_rx_mode_promisc(rm) (rm->dev->flags & IFF_PROMISC)
86#define t1_rx_mode_allmulti(rm) (rm->dev->flags & IFF_ALLMULTI)
87#define t1_rx_mode_mc_cnt(rm) (rm->dev->mc_count)
88
89static inline u8 *t1_get_next_mcaddr(struct t1_rx_mode *rm)
90{
91 u8 *addr = NULL;
92
93 if (rm->idx++ < rm->dev->mc_count) {
94 addr = rm->list->dmi_addr;
95 rm->list = rm->list->next;
96 }
97 return addr;
98}
99
100#define MAX_NPORTS 4
101
102#define SPEED_INVALID 0xffff
103#define DUPLEX_INVALID 0xff
104
105enum {
106 CHBT_BOARD_N110,
107 CHBT_BOARD_N210
108};
109
110enum {
111 CHBT_TERM_T1,
112 CHBT_TERM_T2
113};
114
115enum {
116 CHBT_MAC_PM3393,
117};
118
119enum {
120 CHBT_PHY_88X2010,
121};
122
123enum {
124 PAUSE_RX = 1 << 0,
125 PAUSE_TX = 1 << 1,
126 PAUSE_AUTONEG = 1 << 2
127};
128
129/* Revisions of T1 chip */
130enum {
131 TERM_T1A = 0,
132 TERM_T1B = 1,
133 TERM_T2 = 3
134};
135
136struct sge_params {
137 unsigned int cmdQ_size[2];
138 unsigned int freelQ_size[2];
139 unsigned int large_buf_capacity;
140 unsigned int rx_coalesce_usecs;
141 unsigned int last_rx_coalesce_raw;
142 unsigned int default_rx_coalesce_usecs;
143 unsigned int sample_interval_usecs;
144 unsigned int coalesce_enable;
145 unsigned int polling;
146};
147
148struct chelsio_pci_params {
149 unsigned short speed;
150 unsigned char width;
151 unsigned char is_pcix;
152};
153
154struct adapter_params {
155 struct sge_params sge;
156 struct chelsio_pci_params pci;
157
158 const struct board_info *brd_info;
159
160 unsigned int nports; /* # of ethernet ports */
161 unsigned int stats_update_period;
162 unsigned short chip_revision;
163 unsigned char chip_version;
164};
165
166struct link_config {
167 unsigned int supported; /* link capabilities */
168 unsigned int advertising; /* advertised capabilities */
169 unsigned short requested_speed; /* speed user has requested */
170 unsigned short speed; /* actual link speed */
171 unsigned char requested_duplex; /* duplex user has requested */
172 unsigned char duplex; /* actual link duplex */
173 unsigned char requested_fc; /* flow control user has requested */
174 unsigned char fc; /* actual link flow control */
175 unsigned char autoneg; /* autonegotiating? */
176};
177
178struct cmac;
179struct cphy;
180
181struct port_info {
182 struct net_device *dev;
183 struct cmac *mac;
184 struct cphy *phy;
185 struct link_config link_config;
186 struct net_device_stats netstats;
187};
188
189struct sge;
190struct peespi;
191
192struct adapter {
193 u8 __iomem *regs;
194 struct pci_dev *pdev;
195 unsigned long registered_device_map;
196 unsigned long open_device_map;
197 unsigned long flags;
198
199 const char *name;
200 int msg_enable;
201 u32 mmio_len;
202
203 struct work_struct ext_intr_handler_task;
204 struct adapter_params params;
205
206 struct vlan_group *vlan_grp;
207
208 /* Terminator modules. */
209 struct sge *sge;
210 struct peespi *espi;
211
212 struct port_info port[MAX_NPORTS];
213 struct work_struct stats_update_task;
214 struct timer_list stats_update_timer;
215
216 struct semaphore mib_mutex;
217 spinlock_t tpi_lock;
218 spinlock_t work_lock;
219 /* guards async operations */
220 spinlock_t async_lock ____cacheline_aligned;
221 u32 slow_intr_mask;
222};
223
224enum { /* adapter flags */
225 FULL_INIT_DONE = 1 << 0,
226 TSO_CAPABLE = 1 << 2,
227 TCP_CSUM_CAPABLE = 1 << 3,
228 UDP_CSUM_CAPABLE = 1 << 4,
229 VLAN_ACCEL_CAPABLE = 1 << 5,
230 RX_CSUM_ENABLED = 1 << 6,
231};
232
233struct mdio_ops;
234struct gmac;
235struct gphy;
236
237struct board_info {
238 unsigned char board;
239 unsigned char port_number;
240 unsigned long caps;
241 unsigned char chip_term;
242 unsigned char chip_mac;
243 unsigned char chip_phy;
244 unsigned int clock_core;
245 unsigned int clock_mc3;
246 unsigned int clock_mc4;
247 unsigned int espi_nports;
248 unsigned int clock_cspi;
249 unsigned int clock_elmer0;
250 unsigned char mdio_mdien;
251 unsigned char mdio_mdiinv;
252 unsigned char mdio_mdc;
253 unsigned char mdio_phybaseaddr;
254 struct gmac *gmac;
255 struct gphy *gphy;
256 struct mdio_ops *mdio_ops;
257 const char *desc;
258};
259
260extern struct pci_device_id t1_pci_tbl[];
261
262static inline int adapter_matches_type(const adapter_t *adapter,
263 int version, int revision)
264{
265 return adapter->params.chip_version == version &&
266 adapter->params.chip_revision == revision;
267}
268
269#define t1_is_T1B(adap) adapter_matches_type(adap, CHBT_TERM_T1, TERM_T1B)
270#define is_T2(adap) adapter_matches_type(adap, CHBT_TERM_T2, TERM_T2)
271
272/* Returns true if an adapter supports VLAN acceleration and TSO */
273static inline int vlan_tso_capable(const adapter_t *adapter)
274{
275 return !t1_is_T1B(adapter);
276}
277
278#define for_each_port(adapter, iter) \
279 for (iter = 0; iter < (adapter)->params.nports; ++iter)
280
281#define board_info(adapter) ((adapter)->params.brd_info)
282#define is_10G(adapter) (board_info(adapter)->caps & SUPPORTED_10000baseT_Full)
283
284static inline unsigned int core_ticks_per_usec(const adapter_t *adap)
285{
286 return board_info(adap)->clock_core / 1000000;
287}
288
289extern int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
290extern int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *value);
291
292extern void t1_interrupts_enable(adapter_t *adapter);
293extern void t1_interrupts_disable(adapter_t *adapter);
294extern void t1_interrupts_clear(adapter_t *adapter);
295extern int elmer0_ext_intr_handler(adapter_t *adapter);
296extern int t1_slow_intr_handler(adapter_t *adapter);
297
298extern int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
299extern const struct board_info *t1_get_board_info(unsigned int board_id);
300extern const struct board_info *t1_get_board_info_from_ids(unsigned int devid,
301 unsigned short ssid);
302extern int t1_seeprom_read(adapter_t *adapter, u32 addr, u32 *data);
303extern int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
304 struct adapter_params *p);
305extern int t1_init_hw_modules(adapter_t *adapter);
306extern int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi);
307extern void t1_free_sw_modules(adapter_t *adapter);
308extern void t1_fatal_err(adapter_t *adapter);
309
310extern void t1_tp_set_udp_checksum_offload(adapter_t *adapter, int enable);
311extern void t1_tp_set_tcp_checksum_offload(adapter_t *adapter, int enable);
312extern void t1_tp_set_ip_checksum_offload(adapter_t *adapter, int enable);
313
314#endif /* _CXGB_COMMON_H_ */
diff --git a/drivers/net/chelsio/cphy.h b/drivers/net/chelsio/cphy.h
new file mode 100644
index 000000000000..3412342f7345
--- /dev/null
+++ b/drivers/net/chelsio/cphy.h
@@ -0,0 +1,148 @@
1/*****************************************************************************
2 * *
3 * File: cphy.h *
4 * $Revision: 1.7 $ *
5 * $Date: 2005/06/21 18:29:47 $ *
6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#ifndef _CXGB_CPHY_H_
40#define _CXGB_CPHY_H_
41
42#include "common.h"
43
44struct mdio_ops {
45 void (*init)(adapter_t *adapter, const struct board_info *bi);
46 int (*read)(adapter_t *adapter, int phy_addr, int mmd_addr,
47 int reg_addr, unsigned int *val);
48 int (*write)(adapter_t *adapter, int phy_addr, int mmd_addr,
49 int reg_addr, unsigned int val);
50};
51
52/* PHY interrupt types */
53enum {
54 cphy_cause_link_change = 0x1,
55 cphy_cause_error = 0x2
56};
57
58struct cphy;
59
60/* PHY operations */
61struct cphy_ops {
62 void (*destroy)(struct cphy *);
63 int (*reset)(struct cphy *, int wait);
64
65 int (*interrupt_enable)(struct cphy *);
66 int (*interrupt_disable)(struct cphy *);
67 int (*interrupt_clear)(struct cphy *);
68 int (*interrupt_handler)(struct cphy *);
69
70 int (*autoneg_enable)(struct cphy *);
71 int (*autoneg_disable)(struct cphy *);
72 int (*autoneg_restart)(struct cphy *);
73
74 int (*advertise)(struct cphy *phy, unsigned int advertise_map);
75 int (*set_loopback)(struct cphy *, int on);
76 int (*set_speed_duplex)(struct cphy *phy, int speed, int duplex);
77 int (*get_link_status)(struct cphy *phy, int *link_ok, int *speed,
78 int *duplex, int *fc);
79};
80
81/* A PHY instance */
82struct cphy {
83 int addr; /* PHY address */
84 adapter_t *adapter; /* associated adapter */
85 struct cphy_ops *ops; /* PHY operations */
86 int (*mdio_read)(adapter_t *adapter, int phy_addr, int mmd_addr,
87 int reg_addr, unsigned int *val);
88 int (*mdio_write)(adapter_t *adapter, int phy_addr, int mmd_addr,
89 int reg_addr, unsigned int val);
90 struct cphy_instance *instance;
91};
92
93/* Convenience MDIO read/write wrappers */
94static inline int mdio_read(struct cphy *cphy, int mmd, int reg,
95 unsigned int *valp)
96{
97 return cphy->mdio_read(cphy->adapter, cphy->addr, mmd, reg, valp);
98}
99
100static inline int mdio_write(struct cphy *cphy, int mmd, int reg,
101 unsigned int val)
102{
103 return cphy->mdio_write(cphy->adapter, cphy->addr, mmd, reg, val);
104}
105
106static inline int simple_mdio_read(struct cphy *cphy, int reg,
107 unsigned int *valp)
108{
109 return mdio_read(cphy, 0, reg, valp);
110}
111
112static inline int simple_mdio_write(struct cphy *cphy, int reg,
113 unsigned int val)
114{
115 return mdio_write(cphy, 0, reg, val);
116}
117
118/* Convenience initializer */
119static inline void cphy_init(struct cphy *phy, adapter_t *adapter,
120 int phy_addr, struct cphy_ops *phy_ops,
121 struct mdio_ops *mdio_ops)
122{
123 phy->adapter = adapter;
124 phy->addr = phy_addr;
125 phy->ops = phy_ops;
126 if (mdio_ops) {
127 phy->mdio_read = mdio_ops->read;
128 phy->mdio_write = mdio_ops->write;
129 }
130}
131
132/* Operations of the PHY-instance factory */
133struct gphy {
134 /* Construct a PHY instance with the given PHY address */
135 struct cphy *(*create)(adapter_t *adapter, int phy_addr,
136 struct mdio_ops *mdio_ops);
137
138 /*
139 * Reset the PHY chip. This resets the whole PHY chip, not individual
140 * ports.
141 */
142 int (*reset)(adapter_t *adapter);
143};
144
145extern struct gphy t1_mv88x201x_ops;
146extern struct gphy t1_dummy_phy_ops;
147
148#endif /* _CXGB_CPHY_H_ */
diff --git a/drivers/net/chelsio/cpl5_cmd.h b/drivers/net/chelsio/cpl5_cmd.h
new file mode 100644
index 000000000000..27925e487bcf
--- /dev/null
+++ b/drivers/net/chelsio/cpl5_cmd.h
@@ -0,0 +1,145 @@
1/*****************************************************************************
2 * *
3 * File: cpl5_cmd.h *
4 * $Revision: 1.6 $ *
5 * $Date: 2005/06/21 18:29:47 $ *
6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#ifndef _CXGB_CPL5_CMD_H_
40#define _CXGB_CPL5_CMD_H_
41
42#include <asm/byteorder.h>
43
44#if !defined(__LITTLE_ENDIAN_BITFIELD) && !defined(__BIG_ENDIAN_BITFIELD)
45#error "Adjust your <asm/byteorder.h> defines"
46#endif
47
48enum CPL_opcode {
49 CPL_RX_PKT = 0xAD,
50 CPL_TX_PKT = 0xB2,
51 CPL_TX_PKT_LSO = 0xB6,
52};
53
54enum { /* TX_PKT_LSO ethernet types */
55 CPL_ETH_II,
56 CPL_ETH_II_VLAN,
57 CPL_ETH_802_3,
58 CPL_ETH_802_3_VLAN
59};
60
61struct cpl_rx_data {
62 u32 rsvd0;
63 u32 len;
64 u32 seq;
65 u16 urg;
66 u8 rsvd1;
67 u8 status;
68};
69
70/*
71 * We want this header's alignment to be no more stringent than 2-byte aligned.
72 * All fields are u8 or u16 except for the length. However that field is not
73 * used so we break it into 2 16-bit parts to easily meet our alignment needs.
74 */
75struct cpl_tx_pkt {
76 u8 opcode;
77#if defined(__LITTLE_ENDIAN_BITFIELD)
78 u8 iff:4;
79 u8 ip_csum_dis:1;
80 u8 l4_csum_dis:1;
81 u8 vlan_valid:1;
82 u8 rsvd:1;
83#else
84 u8 rsvd:1;
85 u8 vlan_valid:1;
86 u8 l4_csum_dis:1;
87 u8 ip_csum_dis:1;
88 u8 iff:4;
89#endif
90 u16 vlan;
91 u16 len_hi;
92 u16 len_lo;
93};
94
95struct cpl_tx_pkt_lso {
96 u8 opcode;
97#if defined(__LITTLE_ENDIAN_BITFIELD)
98 u8 iff:4;
99 u8 ip_csum_dis:1;
100 u8 l4_csum_dis:1;
101 u8 vlan_valid:1;
102 u8 rsvd:1;
103#else
104 u8 rsvd:1;
105 u8 vlan_valid:1;
106 u8 l4_csum_dis:1;
107 u8 ip_csum_dis:1;
108 u8 iff:4;
109#endif
110 u16 vlan;
111 u32 len;
112
113 u32 rsvd2;
114 u8 rsvd3;
115#if defined(__LITTLE_ENDIAN_BITFIELD)
116 u8 tcp_hdr_words:4;
117 u8 ip_hdr_words:4;
118#else
119 u8 ip_hdr_words:4;
120 u8 tcp_hdr_words:4;
121#endif
122 u16 eth_type_mss;
123};
124
125struct cpl_rx_pkt {
126 u8 opcode;
127#if defined(__LITTLE_ENDIAN_BITFIELD)
128 u8 iff:4;
129 u8 csum_valid:1;
130 u8 bad_pkt:1;
131 u8 vlan_valid:1;
132 u8 rsvd:1;
133#else
134 u8 rsvd:1;
135 u8 vlan_valid:1;
136 u8 bad_pkt:1;
137 u8 csum_valid:1;
138 u8 iff:4;
139#endif
140 u16 csum;
141 u16 vlan;
142 u16 len;
143};
144
145#endif /* _CXGB_CPL5_CMD_H_ */
diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c
new file mode 100644
index 000000000000..349ebe783ed6
--- /dev/null
+++ b/drivers/net/chelsio/cxgb2.c
@@ -0,0 +1,1256 @@
1/*****************************************************************************
2 * *
3 * File: cxgb2.c *
4 * $Revision: 1.25 $ *
5 * $Date: 2005/06/22 00:43:25 $ *
6 * Description: *
7 * Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#include "common.h"
40#include <linux/config.h>
41#include <linux/module.h>
42#include <linux/init.h>
43#include <linux/pci.h>
44#include <linux/netdevice.h>
45#include <linux/etherdevice.h>
46#include <linux/if_vlan.h>
47#include <linux/mii.h>
48#include <linux/sockios.h>
49#include <linux/proc_fs.h>
50#include <linux/dma-mapping.h>
51#include <asm/uaccess.h>
52
53#include "cpl5_cmd.h"
54#include "regs.h"
55#include "gmac.h"
56#include "cphy.h"
57#include "sge.h"
58#include "espi.h"
59
60#ifdef work_struct
61#include <linux/tqueue.h>
62#define INIT_WORK INIT_TQUEUE
63#define schedule_work schedule_task
64#define flush_scheduled_work flush_scheduled_tasks
65
66static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
67{
68 mod_timer(&ap->stats_update_timer, jiffies + secs * HZ);
69}
70
71static inline void cancel_mac_stats_update(struct adapter *ap)
72{
73 del_timer_sync(&ap->stats_update_timer);
74 flush_scheduled_tasks();
75}
76
77/*
78 * Stats update timer for 2.4. It schedules a task to do the actual update as
79 * we need to access MAC statistics in process context.
80 */
81static void mac_stats_timer(unsigned long data)
82{
83 struct adapter *ap = (struct adapter *)data;
84
85 schedule_task(&ap->stats_update_task);
86}
87#else
88#include <linux/workqueue.h>
89
90static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
91{
92 schedule_delayed_work(&ap->stats_update_task, secs * HZ);
93}
94
95static inline void cancel_mac_stats_update(struct adapter *ap)
96{
97 cancel_delayed_work(&ap->stats_update_task);
98}
99#endif
100
101#define MAX_CMDQ_ENTRIES 16384
102#define MAX_CMDQ1_ENTRIES 1024
103#define MAX_RX_BUFFERS 16384
104#define MAX_RX_JUMBO_BUFFERS 16384
105#define MAX_TX_BUFFERS_HIGH 16384U
106#define MAX_TX_BUFFERS_LOW 1536U
107#define MIN_FL_ENTRIES 32
108
109#define PORT_MASK ((1 << MAX_NPORTS) - 1)
110
111#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
112 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
113 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
114
115/*
116 * The EEPROM is actually bigger but only the first few bytes are used so we
117 * only report those.
118 */
119#define EEPROM_SIZE 32
120
121MODULE_DESCRIPTION(DRV_DESCRIPTION);
122MODULE_AUTHOR("Chelsio Communications");
123MODULE_LICENSE("GPL");
124
125static int dflt_msg_enable = DFLT_MSG_ENABLE;
126
127MODULE_PARM(dflt_msg_enable, "i");
128MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 message enable bitmap");
129
130
131static const char pci_speed[][4] = {
132 "33", "66", "100", "133"
133};
134
135/*
136 * Setup MAC to receive the types of packets we want.
137 */
138static void t1_set_rxmode(struct net_device *dev)
139{
140 struct adapter *adapter = dev->priv;
141 struct cmac *mac = adapter->port[dev->if_port].mac;
142 struct t1_rx_mode rm;
143
144 rm.dev = dev;
145 rm.idx = 0;
146 rm.list = dev->mc_list;
147 mac->ops->set_rx_mode(mac, &rm);
148}
149
150static void link_report(struct port_info *p)
151{
152 if (!netif_carrier_ok(p->dev))
153 printk(KERN_INFO "%s: link down\n", p->dev->name);
154 else {
155 const char *s = "10Mbps";
156
157 switch (p->link_config.speed) {
158 case SPEED_10000: s = "10Gbps"; break;
159 case SPEED_1000: s = "1000Mbps"; break;
160 case SPEED_100: s = "100Mbps"; break;
161 }
162
163 printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
164 p->dev->name, s,
165 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
166 }
167}
168
169void t1_link_changed(struct adapter *adapter, int port_id, int link_stat,
170 int speed, int duplex, int pause)
171{
172 struct port_info *p = &adapter->port[port_id];
173
174 if (link_stat != netif_carrier_ok(p->dev)) {
175 if (link_stat)
176 netif_carrier_on(p->dev);
177 else
178 netif_carrier_off(p->dev);
179 link_report(p);
180
181 }
182}
183
184static void link_start(struct port_info *p)
185{
186 struct cmac *mac = p->mac;
187
188 mac->ops->reset(mac);
189 if (mac->ops->macaddress_set)
190 mac->ops->macaddress_set(mac, p->dev->dev_addr);
191 t1_set_rxmode(p->dev);
192 t1_link_start(p->phy, mac, &p->link_config);
193 mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
194}
195
196static void enable_hw_csum(struct adapter *adapter)
197{
198 if (adapter->flags & TSO_CAPABLE)
199 t1_tp_set_ip_checksum_offload(adapter, 1); /* for TSO only */
200 t1_tp_set_tcp_checksum_offload(adapter, 1);
201}
202
203/*
204 * Things to do upon first use of a card.
205 * This must run with the rtnl lock held.
206 */
207static int cxgb_up(struct adapter *adapter)
208{
209 int err = 0;
210
211 if (!(adapter->flags & FULL_INIT_DONE)) {
212 err = t1_init_hw_modules(adapter);
213 if (err)
214 goto out_err;
215
216 enable_hw_csum(adapter);
217 adapter->flags |= FULL_INIT_DONE;
218 }
219
220 t1_interrupts_clear(adapter);
221 if ((err = request_irq(adapter->pdev->irq,
222 t1_select_intr_handler(adapter), SA_SHIRQ,
223 adapter->name, adapter))) {
224 goto out_err;
225 }
226 t1_sge_start(adapter->sge);
227 t1_interrupts_enable(adapter);
228 out_err:
229 return err;
230}
231
232/*
233 * Release resources when all the ports have been stopped.
234 */
235static void cxgb_down(struct adapter *adapter)
236{
237 t1_sge_stop(adapter->sge);
238 t1_interrupts_disable(adapter);
239 free_irq(adapter->pdev->irq, adapter);
240}
241
242static int cxgb_open(struct net_device *dev)
243{
244 int err;
245 struct adapter *adapter = dev->priv;
246 int other_ports = adapter->open_device_map & PORT_MASK;
247
248 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
249 return err;
250
251 __set_bit(dev->if_port, &adapter->open_device_map);
252 link_start(&adapter->port[dev->if_port]);
253 netif_start_queue(dev);
254 if (!other_ports && adapter->params.stats_update_period)
255 schedule_mac_stats_update(adapter,
256 adapter->params.stats_update_period);
257 return 0;
258}
259
260static int cxgb_close(struct net_device *dev)
261{
262 struct adapter *adapter = dev->priv;
263 struct port_info *p = &adapter->port[dev->if_port];
264 struct cmac *mac = p->mac;
265
266 netif_stop_queue(dev);
267 mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
268 netif_carrier_off(dev);
269
270 clear_bit(dev->if_port, &adapter->open_device_map);
271 if (adapter->params.stats_update_period &&
272 !(adapter->open_device_map & PORT_MASK)) {
273 /* Stop statistics accumulation. */
274 smp_mb__after_clear_bit();
275 spin_lock(&adapter->work_lock); /* sync with update task */
276 spin_unlock(&adapter->work_lock);
277 cancel_mac_stats_update(adapter);
278 }
279
280 if (!adapter->open_device_map)
281 cxgb_down(adapter);
282 return 0;
283}
284
285static struct net_device_stats *t1_get_stats(struct net_device *dev)
286{
287 struct adapter *adapter = dev->priv;
288 struct port_info *p = &adapter->port[dev->if_port];
289 struct net_device_stats *ns = &p->netstats;
290 const struct cmac_statistics *pstats;
291
292 /* Do a full update of the MAC stats */
293 pstats = p->mac->ops->statistics_update(p->mac,
294 MAC_STATS_UPDATE_FULL);
295
296 ns->tx_packets = pstats->TxUnicastFramesOK +
297 pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
298
299 ns->rx_packets = pstats->RxUnicastFramesOK +
300 pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
301
302 ns->tx_bytes = pstats->TxOctetsOK;
303 ns->rx_bytes = pstats->RxOctetsOK;
304
305 ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
306 pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
307 ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
308 pstats->RxFCSErrors + pstats->RxAlignErrors +
309 pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
310 pstats->RxSymbolErrors + pstats->RxRuntErrors;
311
312 ns->multicast = pstats->RxMulticastFramesOK;
313 ns->collisions = pstats->TxTotalCollisions;
314
315 /* detailed rx_errors */
316 ns->rx_length_errors = pstats->RxFrameTooLongErrors +
317 pstats->RxJabberErrors;
318 ns->rx_over_errors = 0;
319 ns->rx_crc_errors = pstats->RxFCSErrors;
320 ns->rx_frame_errors = pstats->RxAlignErrors;
321 ns->rx_fifo_errors = 0;
322 ns->rx_missed_errors = 0;
323
324 /* detailed tx_errors */
325 ns->tx_aborted_errors = pstats->TxFramesAbortedDueToXSCollisions;
326 ns->tx_carrier_errors = 0;
327 ns->tx_fifo_errors = pstats->TxUnderrun;
328 ns->tx_heartbeat_errors = 0;
329 ns->tx_window_errors = pstats->TxLateCollisions;
330 return ns;
331}
332
333static u32 get_msglevel(struct net_device *dev)
334{
335 struct adapter *adapter = dev->priv;
336
337 return adapter->msg_enable;
338}
339
340static void set_msglevel(struct net_device *dev, u32 val)
341{
342 struct adapter *adapter = dev->priv;
343
344 adapter->msg_enable = val;
345}
346
347static char stats_strings[][ETH_GSTRING_LEN] = {
348 "TxOctetsOK",
349 "TxOctetsBad",
350 "TxUnicastFramesOK",
351 "TxMulticastFramesOK",
352 "TxBroadcastFramesOK",
353 "TxPauseFrames",
354 "TxFramesWithDeferredXmissions",
355 "TxLateCollisions",
356 "TxTotalCollisions",
357 "TxFramesAbortedDueToXSCollisions",
358 "TxUnderrun",
359 "TxLengthErrors",
360 "TxInternalMACXmitError",
361 "TxFramesWithExcessiveDeferral",
362 "TxFCSErrors",
363
364 "RxOctetsOK",
365 "RxOctetsBad",
366 "RxUnicastFramesOK",
367 "RxMulticastFramesOK",
368 "RxBroadcastFramesOK",
369 "RxPauseFrames",
370 "RxFCSErrors",
371 "RxAlignErrors",
372 "RxSymbolErrors",
373 "RxDataErrors",
374 "RxSequenceErrors",
375 "RxRuntErrors",
376 "RxJabberErrors",
377 "RxInternalMACRcvError",
378 "RxInRangeLengthErrors",
379 "RxOutOfRangeLengthField",
380 "RxFrameTooLongErrors",
381
382 "TSO",
383 "VLANextractions",
384 "VLANinsertions",
385 "RxCsumGood",
386 "TxCsumOffload",
387 "RxDrops"
388
389 "respQ_empty",
390 "respQ_overflow",
391 "freelistQ_empty",
392 "pkt_too_big",
393 "pkt_mismatch",
394 "cmdQ_full0",
395 "cmdQ_full1",
396 "tx_ipfrags",
397 "tx_reg_pkts",
398 "tx_lso_pkts",
399 "tx_do_cksum",
400
401 "espi_DIP2ParityErr",
402 "espi_DIP4Err",
403 "espi_RxDrops",
404 "espi_TxDrops",
405 "espi_RxOvfl",
406 "espi_ParityErr"
407};
408
409#define T2_REGMAP_SIZE (3 * 1024)
410
411static int get_regs_len(struct net_device *dev)
412{
413 return T2_REGMAP_SIZE;
414}
415
416static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
417{
418 struct adapter *adapter = dev->priv;
419
420 strcpy(info->driver, DRV_NAME);
421 strcpy(info->version, DRV_VERSION);
422 strcpy(info->fw_version, "N/A");
423 strcpy(info->bus_info, pci_name(adapter->pdev));
424}
425
426static int get_stats_count(struct net_device *dev)
427{
428 return ARRAY_SIZE(stats_strings);
429}
430
431static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
432{
433 if (stringset == ETH_SS_STATS)
434 memcpy(data, stats_strings, sizeof(stats_strings));
435}
436
437static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
438 u64 *data)
439{
440 struct adapter *adapter = dev->priv;
441 struct cmac *mac = adapter->port[dev->if_port].mac;
442 const struct cmac_statistics *s;
443 const struct sge_port_stats *ss;
444 const struct sge_intr_counts *t;
445
446 s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
447 ss = t1_sge_get_port_stats(adapter->sge, dev->if_port);
448 t = t1_sge_get_intr_counts(adapter->sge);
449
450 *data++ = s->TxOctetsOK;
451 *data++ = s->TxOctetsBad;
452 *data++ = s->TxUnicastFramesOK;
453 *data++ = s->TxMulticastFramesOK;
454 *data++ = s->TxBroadcastFramesOK;
455 *data++ = s->TxPauseFrames;
456 *data++ = s->TxFramesWithDeferredXmissions;
457 *data++ = s->TxLateCollisions;
458 *data++ = s->TxTotalCollisions;
459 *data++ = s->TxFramesAbortedDueToXSCollisions;
460 *data++ = s->TxUnderrun;
461 *data++ = s->TxLengthErrors;
462 *data++ = s->TxInternalMACXmitError;
463 *data++ = s->TxFramesWithExcessiveDeferral;
464 *data++ = s->TxFCSErrors;
465
466 *data++ = s->RxOctetsOK;
467 *data++ = s->RxOctetsBad;
468 *data++ = s->RxUnicastFramesOK;
469 *data++ = s->RxMulticastFramesOK;
470 *data++ = s->RxBroadcastFramesOK;
471 *data++ = s->RxPauseFrames;
472 *data++ = s->RxFCSErrors;
473 *data++ = s->RxAlignErrors;
474 *data++ = s->RxSymbolErrors;
475 *data++ = s->RxDataErrors;
476 *data++ = s->RxSequenceErrors;
477 *data++ = s->RxRuntErrors;
478 *data++ = s->RxJabberErrors;
479 *data++ = s->RxInternalMACRcvError;
480 *data++ = s->RxInRangeLengthErrors;
481 *data++ = s->RxOutOfRangeLengthField;
482 *data++ = s->RxFrameTooLongErrors;
483
484 *data++ = ss->tso;
485 *data++ = ss->vlan_xtract;
486 *data++ = ss->vlan_insert;
487 *data++ = ss->rx_cso_good;
488 *data++ = ss->tx_cso;
489 *data++ = ss->rx_drops;
490
491 *data++ = (u64)t->respQ_empty;
492 *data++ = (u64)t->respQ_overflow;
493 *data++ = (u64)t->freelistQ_empty;
494 *data++ = (u64)t->pkt_too_big;
495 *data++ = (u64)t->pkt_mismatch;
496 *data++ = (u64)t->cmdQ_full[0];
497 *data++ = (u64)t->cmdQ_full[1];
498 *data++ = (u64)t->tx_ipfrags;
499 *data++ = (u64)t->tx_reg_pkts;
500 *data++ = (u64)t->tx_lso_pkts;
501 *data++ = (u64)t->tx_do_cksum;
502}
503
504static inline void reg_block_dump(struct adapter *ap, void *buf,
505 unsigned int start, unsigned int end)
506{
507 u32 *p = buf + start;
508
509 for ( ; start <= end; start += sizeof(u32))
510 *p++ = readl(ap->regs + start);
511}
512
513static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
514 void *buf)
515{
516 struct adapter *ap = dev->priv;
517
518 /*
519 * Version scheme: bits 0..9: chip version, bits 10..15: chip revision
520 */
521 regs->version = 2;
522
523 memset(buf, 0, T2_REGMAP_SIZE);
524 reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
525}
526
527static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
528{
529 struct adapter *adapter = dev->priv;
530 struct port_info *p = &adapter->port[dev->if_port];
531
532 cmd->supported = p->link_config.supported;
533 cmd->advertising = p->link_config.advertising;
534
535 if (netif_carrier_ok(dev)) {
536 cmd->speed = p->link_config.speed;
537 cmd->duplex = p->link_config.duplex;
538 } else {
539 cmd->speed = -1;
540 cmd->duplex = -1;
541 }
542
543 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
544 cmd->phy_address = p->phy->addr;
545 cmd->transceiver = XCVR_EXTERNAL;
546 cmd->autoneg = p->link_config.autoneg;
547 cmd->maxtxpkt = 0;
548 cmd->maxrxpkt = 0;
549 return 0;
550}
551
552static int speed_duplex_to_caps(int speed, int duplex)
553{
554 int cap = 0;
555
556 switch (speed) {
557 case SPEED_10:
558 if (duplex == DUPLEX_FULL)
559 cap = SUPPORTED_10baseT_Full;
560 else
561 cap = SUPPORTED_10baseT_Half;
562 break;
563 case SPEED_100:
564 if (duplex == DUPLEX_FULL)
565 cap = SUPPORTED_100baseT_Full;
566 else
567 cap = SUPPORTED_100baseT_Half;
568 break;
569 case SPEED_1000:
570 if (duplex == DUPLEX_FULL)
571 cap = SUPPORTED_1000baseT_Full;
572 else
573 cap = SUPPORTED_1000baseT_Half;
574 break;
575 case SPEED_10000:
576 if (duplex == DUPLEX_FULL)
577 cap = SUPPORTED_10000baseT_Full;
578 }
579 return cap;
580}
581
582#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
583 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
584 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
585 ADVERTISED_10000baseT_Full)
586
587static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
588{
589 struct adapter *adapter = dev->priv;
590 struct port_info *p = &adapter->port[dev->if_port];
591 struct link_config *lc = &p->link_config;
592
593 if (!(lc->supported & SUPPORTED_Autoneg))
594 return -EOPNOTSUPP; /* can't change speed/duplex */
595
596 if (cmd->autoneg == AUTONEG_DISABLE) {
597 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
598
599 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
600 return -EINVAL;
601 lc->requested_speed = cmd->speed;
602 lc->requested_duplex = cmd->duplex;
603 lc->advertising = 0;
604 } else {
605 cmd->advertising &= ADVERTISED_MASK;
606 if (cmd->advertising & (cmd->advertising - 1))
607 cmd->advertising = lc->supported;
608 cmd->advertising &= lc->supported;
609 if (!cmd->advertising)
610 return -EINVAL;
611 lc->requested_speed = SPEED_INVALID;
612 lc->requested_duplex = DUPLEX_INVALID;
613 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
614 }
615 lc->autoneg = cmd->autoneg;
616 if (netif_running(dev))
617 t1_link_start(p->phy, p->mac, lc);
618 return 0;
619}
620
621static void get_pauseparam(struct net_device *dev,
622 struct ethtool_pauseparam *epause)
623{
624 struct adapter *adapter = dev->priv;
625 struct port_info *p = &adapter->port[dev->if_port];
626
627 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
628 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
629 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
630}
631
632static int set_pauseparam(struct net_device *dev,
633 struct ethtool_pauseparam *epause)
634{
635 struct adapter *adapter = dev->priv;
636 struct port_info *p = &adapter->port[dev->if_port];
637 struct link_config *lc = &p->link_config;
638
639 if (epause->autoneg == AUTONEG_DISABLE)
640 lc->requested_fc = 0;
641 else if (lc->supported & SUPPORTED_Autoneg)
642 lc->requested_fc = PAUSE_AUTONEG;
643 else
644 return -EINVAL;
645
646 if (epause->rx_pause)
647 lc->requested_fc |= PAUSE_RX;
648 if (epause->tx_pause)
649 lc->requested_fc |= PAUSE_TX;
650 if (lc->autoneg == AUTONEG_ENABLE) {
651 if (netif_running(dev))
652 t1_link_start(p->phy, p->mac, lc);
653 } else {
654 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
655 if (netif_running(dev))
656 p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
657 lc->fc);
658 }
659 return 0;
660}
661
662static u32 get_rx_csum(struct net_device *dev)
663{
664 struct adapter *adapter = dev->priv;
665
666 return (adapter->flags & RX_CSUM_ENABLED) != 0;
667}
668
669static int set_rx_csum(struct net_device *dev, u32 data)
670{
671 struct adapter *adapter = dev->priv;
672
673 if (data)
674 adapter->flags |= RX_CSUM_ENABLED;
675 else
676 adapter->flags &= ~RX_CSUM_ENABLED;
677 return 0;
678}
679
680static int set_tso(struct net_device *dev, u32 value)
681{
682 struct adapter *adapter = dev->priv;
683
684 if (!(adapter->flags & TSO_CAPABLE))
685 return value ? -EOPNOTSUPP : 0;
686 return ethtool_op_set_tso(dev, value);
687}
688
689static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
690{
691 struct adapter *adapter = dev->priv;
692 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
693
694 e->rx_max_pending = MAX_RX_BUFFERS;
695 e->rx_mini_max_pending = 0;
696 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
697 e->tx_max_pending = MAX_CMDQ_ENTRIES;
698
699 e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
700 e->rx_mini_pending = 0;
701 e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
702 e->tx_pending = adapter->params.sge.cmdQ_size[0];
703}
704
705static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
706{
707 struct adapter *adapter = dev->priv;
708 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
709
710 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
711 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
712 e->tx_pending > MAX_CMDQ_ENTRIES ||
713 e->rx_pending < MIN_FL_ENTRIES ||
714 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
715 e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
716 return -EINVAL;
717
718 if (adapter->flags & FULL_INIT_DONE)
719 return -EBUSY;
720
721 adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
722 adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
723 adapter->params.sge.cmdQ_size[0] = e->tx_pending;
724 adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
725 MAX_CMDQ1_ENTRIES : e->tx_pending;
726 return 0;
727}
728
729static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
730{
731 struct adapter *adapter = dev->priv;
732
733 /*
734 * If RX coalescing is requested we use NAPI, otherwise interrupts.
735 * This choice can be made only when all ports and the TOE are off.
736 */
737 if (adapter->open_device_map == 0)
738 adapter->params.sge.polling = c->use_adaptive_rx_coalesce;
739
740 if (adapter->params.sge.polling) {
741 adapter->params.sge.rx_coalesce_usecs = 0;
742 } else {
743 adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
744 }
745 adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
746 adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
747 t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
748 return 0;
749}
750
751static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
752{
753 struct adapter *adapter = dev->priv;
754
755 c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
756 c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
757 c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
758 return 0;
759}
760
761static int get_eeprom_len(struct net_device *dev)
762{
763 return EEPROM_SIZE;
764}
765
766#define EEPROM_MAGIC(ap) \
767 (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
768
769static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
770 u8 *data)
771{
772 int i;
773 u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
774 struct adapter *adapter = dev->priv;
775
776 e->magic = EEPROM_MAGIC(adapter);
777 for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
778 t1_seeprom_read(adapter, i, (u32 *)&buf[i]);
779 memcpy(data, buf + e->offset, e->len);
780 return 0;
781}
782
783static struct ethtool_ops t1_ethtool_ops = {
784 .get_settings = get_settings,
785 .set_settings = set_settings,
786 .get_drvinfo = get_drvinfo,
787 .get_msglevel = get_msglevel,
788 .set_msglevel = set_msglevel,
789 .get_ringparam = get_sge_param,
790 .set_ringparam = set_sge_param,
791 .get_coalesce = get_coalesce,
792 .set_coalesce = set_coalesce,
793 .get_eeprom_len = get_eeprom_len,
794 .get_eeprom = get_eeprom,
795 .get_pauseparam = get_pauseparam,
796 .set_pauseparam = set_pauseparam,
797 .get_rx_csum = get_rx_csum,
798 .set_rx_csum = set_rx_csum,
799 .get_tx_csum = ethtool_op_get_tx_csum,
800 .set_tx_csum = ethtool_op_set_tx_csum,
801 .get_sg = ethtool_op_get_sg,
802 .set_sg = ethtool_op_set_sg,
803 .get_link = ethtool_op_get_link,
804 .get_strings = get_strings,
805 .get_stats_count = get_stats_count,
806 .get_ethtool_stats = get_stats,
807 .get_regs_len = get_regs_len,
808 .get_regs = get_regs,
809 .get_tso = ethtool_op_get_tso,
810 .set_tso = set_tso,
811};
812
813static void cxgb_proc_cleanup(struct adapter *adapter,
814 struct proc_dir_entry *dir)
815{
816 const char *name;
817 name = adapter->name;
818 remove_proc_entry(name, dir);
819}
820//#define chtoe_setup_toedev(adapter) NULL
821#define update_mtu_tab(adapter)
822#define write_smt_entry(adapter, idx)
823
824static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
825{
826 struct adapter *adapter = dev->priv;
827 struct mii_ioctl_data *data = if_mii(req);
828
829 switch (cmd) {
830 case SIOCGMIIPHY:
831 data->phy_id = adapter->port[dev->if_port].phy->addr;
832 /* FALLTHRU */
833 case SIOCGMIIREG: {
834 struct cphy *phy = adapter->port[dev->if_port].phy;
835 u32 val;
836
837 if (!phy->mdio_read)
838 return -EOPNOTSUPP;
839 phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f,
840 &val);
841 data->val_out = val;
842 break;
843 }
844 case SIOCSMIIREG: {
845 struct cphy *phy = adapter->port[dev->if_port].phy;
846
847 if (!capable(CAP_NET_ADMIN))
848 return -EPERM;
849 if (!phy->mdio_write)
850 return -EOPNOTSUPP;
851 phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f,
852 data->val_in);
853 break;
854 }
855
856 default:
857 return -EOPNOTSUPP;
858 }
859 return 0;
860}
861
862static int t1_change_mtu(struct net_device *dev, int new_mtu)
863{
864 int ret;
865 struct adapter *adapter = dev->priv;
866 struct cmac *mac = adapter->port[dev->if_port].mac;
867
868 if (!mac->ops->set_mtu)
869 return -EOPNOTSUPP;
870 if (new_mtu < 68)
871 return -EINVAL;
872 if ((ret = mac->ops->set_mtu(mac, new_mtu)))
873 return ret;
874 dev->mtu = new_mtu;
875 return 0;
876}
877
878static int t1_set_mac_addr(struct net_device *dev, void *p)
879{
880 struct adapter *adapter = dev->priv;
881 struct cmac *mac = adapter->port[dev->if_port].mac;
882 struct sockaddr *addr = p;
883
884 if (!mac->ops->macaddress_set)
885 return -EOPNOTSUPP;
886
887 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
888 mac->ops->macaddress_set(mac, dev->dev_addr);
889 return 0;
890}
891
892#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
893static void vlan_rx_register(struct net_device *dev,
894 struct vlan_group *grp)
895{
896 struct adapter *adapter = dev->priv;
897
898 spin_lock_irq(&adapter->async_lock);
899 adapter->vlan_grp = grp;
900 t1_set_vlan_accel(adapter, grp != NULL);
901 spin_unlock_irq(&adapter->async_lock);
902}
903
904static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
905{
906 struct adapter *adapter = dev->priv;
907
908 spin_lock_irq(&adapter->async_lock);
909 if (adapter->vlan_grp)
910 adapter->vlan_grp->vlan_devices[vid] = NULL;
911 spin_unlock_irq(&adapter->async_lock);
912}
913#endif
914
915#ifdef CONFIG_NET_POLL_CONTROLLER
916static void t1_netpoll(struct net_device *dev)
917{
918 unsigned long flags;
919 struct adapter *adapter = dev->priv;
920
921 local_irq_save(flags);
922 t1_select_intr_handler(adapter)(adapter->pdev->irq, adapter, NULL);
923 local_irq_restore(flags);
924}
925#endif
926
927/*
928 * Periodic accumulation of MAC statistics. This is used only if the MAC
929 * does not have any other way to prevent stats counter overflow.
930 */
931static void mac_stats_task(void *data)
932{
933 int i;
934 struct adapter *adapter = data;
935
936 for_each_port(adapter, i) {
937 struct port_info *p = &adapter->port[i];
938
939 if (netif_running(p->dev))
940 p->mac->ops->statistics_update(p->mac,
941 MAC_STATS_UPDATE_FAST);
942 }
943
944 /* Schedule the next statistics update if any port is active. */
945 spin_lock(&adapter->work_lock);
946 if (adapter->open_device_map & PORT_MASK)
947 schedule_mac_stats_update(adapter,
948 adapter->params.stats_update_period);
949 spin_unlock(&adapter->work_lock);
950}
951
952/*
953 * Processes elmer0 external interrupts in process context.
954 */
955static void ext_intr_task(void *data)
956{
957 struct adapter *adapter = data;
958
959 elmer0_ext_intr_handler(adapter);
960
961 /* Now reenable external interrupts */
962 spin_lock_irq(&adapter->async_lock);
963 adapter->slow_intr_mask |= F_PL_INTR_EXT;
964 writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
965 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
966 adapter->regs + A_PL_ENABLE);
967 spin_unlock_irq(&adapter->async_lock);
968}
969
970/*
971 * Interrupt-context handler for elmer0 external interrupts.
972 */
973void t1_elmer0_ext_intr(struct adapter *adapter)
974{
975 /*
976 * Schedule a task to handle external interrupts as we require
977 * a process context. We disable EXT interrupts in the interim
978 * and let the task reenable them when it's done.
979 */
980 adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
981 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
982 adapter->regs + A_PL_ENABLE);
983 schedule_work(&adapter->ext_intr_handler_task);
984}
985
986void t1_fatal_err(struct adapter *adapter)
987{
988 if (adapter->flags & FULL_INIT_DONE) {
989 t1_sge_stop(adapter->sge);
990 t1_interrupts_disable(adapter);
991 }
992 CH_ALERT("%s: encountered fatal error, operation suspended\n",
993 adapter->name);
994}
995
996static int __devinit init_one(struct pci_dev *pdev,
997 const struct pci_device_id *ent)
998{
999 static int version_printed;
1000
1001 int i, err, pci_using_dac = 0;
1002 unsigned long mmio_start, mmio_len;
1003 const struct board_info *bi;
1004 struct adapter *adapter = NULL;
1005 struct port_info *pi;
1006
1007 if (!version_printed) {
1008 printk(KERN_INFO "%s - version %s\n", DRV_DESCRIPTION,
1009 DRV_VERSION);
1010 ++version_printed;
1011 }
1012
1013 err = pci_enable_device(pdev);
1014 if (err)
1015 return err;
1016
1017 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1018 CH_ERR("%s: cannot find PCI device memory base address\n",
1019 pci_name(pdev));
1020 err = -ENODEV;
1021 goto out_disable_pdev;
1022 }
1023
1024 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1025 pci_using_dac = 1;
1026
1027 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
1028 CH_ERR("%s: unable to obtain 64-bit DMA for"
1029 "consistent allocations\n", pci_name(pdev));
1030 err = -ENODEV;
1031 goto out_disable_pdev;
1032 }
1033
1034 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
1035 CH_ERR("%s: no usable DMA configuration\n", pci_name(pdev));
1036 goto out_disable_pdev;
1037 }
1038
1039 err = pci_request_regions(pdev, DRV_NAME);
1040 if (err) {
1041 CH_ERR("%s: cannot obtain PCI resources\n", pci_name(pdev));
1042 goto out_disable_pdev;
1043 }
1044
1045 pci_set_master(pdev);
1046
1047 mmio_start = pci_resource_start(pdev, 0);
1048 mmio_len = pci_resource_len(pdev, 0);
1049 bi = t1_get_board_info(ent->driver_data);
1050
1051 for (i = 0; i < bi->port_number; ++i) {
1052 struct net_device *netdev;
1053
1054 netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
1055 if (!netdev) {
1056 err = -ENOMEM;
1057 goto out_free_dev;
1058 }
1059
1060 SET_MODULE_OWNER(netdev);
1061 SET_NETDEV_DEV(netdev, &pdev->dev);
1062
1063 if (!adapter) {
1064 adapter = netdev->priv;
1065 adapter->pdev = pdev;
1066 adapter->port[0].dev = netdev; /* so we don't leak it */
1067
1068 adapter->regs = ioremap(mmio_start, mmio_len);
1069 if (!adapter->regs) {
1070 CH_ERR("%s: cannot map device registers\n",
1071 pci_name(pdev));
1072 err = -ENOMEM;
1073 goto out_free_dev;
1074 }
1075
1076 if (t1_get_board_rev(adapter, bi, &adapter->params)) {
1077 err = -ENODEV; /* Can't handle this chip rev */
1078 goto out_free_dev;
1079 }
1080
1081 adapter->name = pci_name(pdev);
1082 adapter->msg_enable = dflt_msg_enable;
1083 adapter->mmio_len = mmio_len;
1084
1085 init_MUTEX(&adapter->mib_mutex);
1086 spin_lock_init(&adapter->tpi_lock);
1087 spin_lock_init(&adapter->work_lock);
1088 spin_lock_init(&adapter->async_lock);
1089
1090 INIT_WORK(&adapter->ext_intr_handler_task,
1091 ext_intr_task, adapter);
1092 INIT_WORK(&adapter->stats_update_task, mac_stats_task,
1093 adapter);
1094#ifdef work_struct
1095 init_timer(&adapter->stats_update_timer);
1096 adapter->stats_update_timer.function = mac_stats_timer;
1097 adapter->stats_update_timer.data =
1098 (unsigned long)adapter;
1099#endif
1100
1101 pci_set_drvdata(pdev, netdev);
1102 }
1103
1104 pi = &adapter->port[i];
1105 pi->dev = netdev;
1106 netif_carrier_off(netdev);
1107 netdev->irq = pdev->irq;
1108 netdev->if_port = i;
1109 netdev->mem_start = mmio_start;
1110 netdev->mem_end = mmio_start + mmio_len - 1;
1111 netdev->priv = adapter;
1112 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
1113 netdev->features |= NETIF_F_LLTX;
1114
1115 adapter->flags |= RX_CSUM_ENABLED | TCP_CSUM_CAPABLE;
1116 if (pci_using_dac)
1117 netdev->features |= NETIF_F_HIGHDMA;
1118 if (vlan_tso_capable(adapter)) {
1119#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1120 adapter->flags |= VLAN_ACCEL_CAPABLE;
1121 netdev->features |=
1122 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1123 netdev->vlan_rx_register = vlan_rx_register;
1124 netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
1125#endif
1126 adapter->flags |= TSO_CAPABLE;
1127 netdev->features |= NETIF_F_TSO;
1128 }
1129
1130 netdev->open = cxgb_open;
1131 netdev->stop = cxgb_close;
1132 netdev->hard_start_xmit = t1_start_xmit;
1133 netdev->hard_header_len += (adapter->flags & TSO_CAPABLE) ?
1134 sizeof(struct cpl_tx_pkt_lso) :
1135 sizeof(struct cpl_tx_pkt);
1136 netdev->get_stats = t1_get_stats;
1137 netdev->set_multicast_list = t1_set_rxmode;
1138 netdev->do_ioctl = t1_ioctl;
1139 netdev->change_mtu = t1_change_mtu;
1140 netdev->set_mac_address = t1_set_mac_addr;
1141#ifdef CONFIG_NET_POLL_CONTROLLER
1142 netdev->poll_controller = t1_netpoll;
1143#endif
1144 netdev->weight = 64;
1145
1146 SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
1147 }
1148
1149 if (t1_init_sw_modules(adapter, bi) < 0) {
1150 err = -ENODEV;
1151 goto out_free_dev;
1152 }
1153
1154 /*
1155 * The card is now ready to go. If any errors occur during device
1156 * registration we do not fail the whole card but rather proceed only
1157 * with the ports we manage to register successfully. However we must
1158 * register at least one net device.
1159 */
1160 for (i = 0; i < bi->port_number; ++i) {
1161 err = register_netdev(adapter->port[i].dev);
1162 if (err)
1163 CH_WARN("%s: cannot register net device %s, skipping\n",
1164 pci_name(pdev), adapter->port[i].dev->name);
1165 else {
1166 /*
1167 * Change the name we use for messages to the name of
1168 * the first successfully registered interface.
1169 */
1170 if (!adapter->registered_device_map)
1171 adapter->name = adapter->port[i].dev->name;
1172
1173 __set_bit(i, &adapter->registered_device_map);
1174 }
1175 }
1176 if (!adapter->registered_device_map) {
1177 CH_ERR("%s: could not register any net devices\n",
1178 pci_name(pdev));
1179 goto out_release_adapter_res;
1180 }
1181
1182 printk(KERN_INFO "%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name,
1183 bi->desc, adapter->params.chip_revision,
1184 adapter->params.pci.is_pcix ? "PCIX" : "PCI",
1185 adapter->params.pci.speed, adapter->params.pci.width);
1186 return 0;
1187
1188 out_release_adapter_res:
1189 t1_free_sw_modules(adapter);
1190 out_free_dev:
1191 if (adapter) {
1192 if (adapter->regs) iounmap(adapter->regs);
1193 for (i = bi->port_number - 1; i >= 0; --i)
1194 if (adapter->port[i].dev) {
1195 cxgb_proc_cleanup(adapter, proc_root_driver);
1196 kfree(adapter->port[i].dev);
1197 }
1198 }
1199 pci_release_regions(pdev);
1200 out_disable_pdev:
1201 pci_disable_device(pdev);
1202 pci_set_drvdata(pdev, NULL);
1203 return err;
1204}
1205
1206static inline void t1_sw_reset(struct pci_dev *pdev)
1207{
1208 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
1209 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
1210}
1211
1212static void __devexit remove_one(struct pci_dev *pdev)
1213{
1214 struct net_device *dev = pci_get_drvdata(pdev);
1215
1216 if (dev) {
1217 int i;
1218 struct adapter *adapter = dev->priv;
1219
1220 for_each_port(adapter, i)
1221 if (test_bit(i, &adapter->registered_device_map))
1222 unregister_netdev(adapter->port[i].dev);
1223
1224 t1_free_sw_modules(adapter);
1225 iounmap(adapter->regs);
1226 while (--i >= 0)
1227 if (adapter->port[i].dev) {
1228 cxgb_proc_cleanup(adapter, proc_root_driver);
1229 kfree(adapter->port[i].dev);
1230 }
1231 pci_release_regions(pdev);
1232 pci_disable_device(pdev);
1233 pci_set_drvdata(pdev, NULL);
1234 t1_sw_reset(pdev);
1235 }
1236}
1237
1238static struct pci_driver driver = {
1239 .name = DRV_NAME,
1240 .id_table = t1_pci_tbl,
1241 .probe = init_one,
1242 .remove = __devexit_p(remove_one),
1243};
1244
1245static int __init t1_init_module(void)
1246{
1247 return pci_module_init(&driver);
1248}
1249
1250static void __exit t1_cleanup_module(void)
1251{
1252 pci_unregister_driver(&driver);
1253}
1254
1255module_init(t1_init_module);
1256module_exit(t1_cleanup_module);
diff --git a/drivers/net/chelsio/elmer0.h b/drivers/net/chelsio/elmer0.h
new file mode 100644
index 000000000000..5590cb2dac19
--- /dev/null
+++ b/drivers/net/chelsio/elmer0.h
@@ -0,0 +1,151 @@
1/*****************************************************************************
2 * *
3 * File: elmer0.h *
4 * $Revision: 1.6 $ *
5 * $Date: 2005/06/21 22:49:43 $ *
6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#ifndef _CXGB_ELMER0_H_
40#define _CXGB_ELMER0_H_
41
42/* ELMER0 registers */
43#define A_ELMER0_VERSION 0x100000
44#define A_ELMER0_PHY_CFG 0x100004
45#define A_ELMER0_INT_ENABLE 0x100008
46#define A_ELMER0_INT_CAUSE 0x10000c
47#define A_ELMER0_GPI_CFG 0x100010
48#define A_ELMER0_GPI_STAT 0x100014
49#define A_ELMER0_GPO 0x100018
50#define A_ELMER0_PORT0_MI1_CFG 0x400000
51
52#define S_MI1_MDI_ENABLE 0
53#define V_MI1_MDI_ENABLE(x) ((x) << S_MI1_MDI_ENABLE)
54#define F_MI1_MDI_ENABLE V_MI1_MDI_ENABLE(1U)
55
56#define S_MI1_MDI_INVERT 1
57#define V_MI1_MDI_INVERT(x) ((x) << S_MI1_MDI_INVERT)
58#define F_MI1_MDI_INVERT V_MI1_MDI_INVERT(1U)
59
60#define S_MI1_PREAMBLE_ENABLE 2
61#define V_MI1_PREAMBLE_ENABLE(x) ((x) << S_MI1_PREAMBLE_ENABLE)
62#define F_MI1_PREAMBLE_ENABLE V_MI1_PREAMBLE_ENABLE(1U)
63
64#define S_MI1_SOF 3
65#define M_MI1_SOF 0x3
66#define V_MI1_SOF(x) ((x) << S_MI1_SOF)
67#define G_MI1_SOF(x) (((x) >> S_MI1_SOF) & M_MI1_SOF)
68
69#define S_MI1_CLK_DIV 5
70#define M_MI1_CLK_DIV 0xff
71#define V_MI1_CLK_DIV(x) ((x) << S_MI1_CLK_DIV)
72#define G_MI1_CLK_DIV(x) (((x) >> S_MI1_CLK_DIV) & M_MI1_CLK_DIV)
73
74#define A_ELMER0_PORT0_MI1_ADDR 0x400004
75
76#define S_MI1_REG_ADDR 0
77#define M_MI1_REG_ADDR 0x1f
78#define V_MI1_REG_ADDR(x) ((x) << S_MI1_REG_ADDR)
79#define G_MI1_REG_ADDR(x) (((x) >> S_MI1_REG_ADDR) & M_MI1_REG_ADDR)
80
81#define S_MI1_PHY_ADDR 5
82#define M_MI1_PHY_ADDR 0x1f
83#define V_MI1_PHY_ADDR(x) ((x) << S_MI1_PHY_ADDR)
84#define G_MI1_PHY_ADDR(x) (((x) >> S_MI1_PHY_ADDR) & M_MI1_PHY_ADDR)
85
86#define A_ELMER0_PORT0_MI1_DATA 0x400008
87
88#define S_MI1_DATA 0
89#define M_MI1_DATA 0xffff
90#define V_MI1_DATA(x) ((x) << S_MI1_DATA)
91#define G_MI1_DATA(x) (((x) >> S_MI1_DATA) & M_MI1_DATA)
92
93#define A_ELMER0_PORT0_MI1_OP 0x40000c
94
95#define S_MI1_OP 0
96#define M_MI1_OP 0x3
97#define V_MI1_OP(x) ((x) << S_MI1_OP)
98#define G_MI1_OP(x) (((x) >> S_MI1_OP) & M_MI1_OP)
99
100#define S_MI1_ADDR_AUTOINC 2
101#define V_MI1_ADDR_AUTOINC(x) ((x) << S_MI1_ADDR_AUTOINC)
102#define F_MI1_ADDR_AUTOINC V_MI1_ADDR_AUTOINC(1U)
103
104#define S_MI1_OP_BUSY 31
105#define V_MI1_OP_BUSY(x) ((x) << S_MI1_OP_BUSY)
106#define F_MI1_OP_BUSY V_MI1_OP_BUSY(1U)
107
108#define A_ELMER0_PORT1_MI1_CFG 0x500000
109#define A_ELMER0_PORT1_MI1_ADDR 0x500004
110#define A_ELMER0_PORT1_MI1_DATA 0x500008
111#define A_ELMER0_PORT1_MI1_OP 0x50000c
112#define A_ELMER0_PORT2_MI1_CFG 0x600000
113#define A_ELMER0_PORT2_MI1_ADDR 0x600004
114#define A_ELMER0_PORT2_MI1_DATA 0x600008
115#define A_ELMER0_PORT2_MI1_OP 0x60000c
116#define A_ELMER0_PORT3_MI1_CFG 0x700000
117#define A_ELMER0_PORT3_MI1_ADDR 0x700004
118#define A_ELMER0_PORT3_MI1_DATA 0x700008
119#define A_ELMER0_PORT3_MI1_OP 0x70000c
120
121/* Simple bit definition for GPI and GP0 registers. */
122#define ELMER0_GP_BIT0 0x0001
123#define ELMER0_GP_BIT1 0x0002
124#define ELMER0_GP_BIT2 0x0004
125#define ELMER0_GP_BIT3 0x0008
126#define ELMER0_GP_BIT4 0x0010
127#define ELMER0_GP_BIT5 0x0020
128#define ELMER0_GP_BIT6 0x0040
129#define ELMER0_GP_BIT7 0x0080
130#define ELMER0_GP_BIT8 0x0100
131#define ELMER0_GP_BIT9 0x0200
132#define ELMER0_GP_BIT10 0x0400
133#define ELMER0_GP_BIT11 0x0800
134#define ELMER0_GP_BIT12 0x1000
135#define ELMER0_GP_BIT13 0x2000
136#define ELMER0_GP_BIT14 0x4000
137#define ELMER0_GP_BIT15 0x8000
138#define ELMER0_GP_BIT16 0x10000
139#define ELMER0_GP_BIT17 0x20000
140#define ELMER0_GP_BIT18 0x40000
141#define ELMER0_GP_BIT19 0x80000
142
143#define MI1_OP_DIRECT_WRITE 1
144#define MI1_OP_DIRECT_READ 2
145
146#define MI1_OP_INDIRECT_ADDRESS 0
147#define MI1_OP_INDIRECT_WRITE 1
148#define MI1_OP_INDIRECT_READ_INC 2
149#define MI1_OP_INDIRECT_READ 3
150
151#endif /* _CXGB_ELMER0_H_ */
diff --git a/drivers/net/chelsio/espi.c b/drivers/net/chelsio/espi.c
new file mode 100644
index 000000000000..230642571c92
--- /dev/null
+++ b/drivers/net/chelsio/espi.c
@@ -0,0 +1,346 @@
1/*****************************************************************************
2 * *
3 * File: espi.c *
4 * $Revision: 1.14 $ *
5 * $Date: 2005/05/14 00:59:32 $ *
6 * Description: *
7 * Ethernet SPI functionality. *
8 * part of the Chelsio 10Gb Ethernet Driver. *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
13 * *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, write to the Free Software Foundation, Inc., *
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
17 * *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
21 * *
22 * http://www.chelsio.com *
23 * *
24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
25 * All rights reserved. *
26 * *
27 * Maintainers: maintainers@chelsio.com *
28 * *
29 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
30 * Tina Yang <tainay@chelsio.com> *
31 * Felix Marti <felix@chelsio.com> *
32 * Scott Bardone <sbardone@chelsio.com> *
33 * Kurt Ottaway <kottaway@chelsio.com> *
34 * Frank DiMambro <frank@chelsio.com> *
35 * *
36 * History: *
37 * *
38 ****************************************************************************/
39
40#include "common.h"
41#include "regs.h"
42#include "espi.h"
43
44struct peespi {
45 adapter_t *adapter;
46 struct espi_intr_counts intr_cnt;
47 u32 misc_ctrl;
48 spinlock_t lock;
49};
50
51#define ESPI_INTR_MASK (F_DIP4ERR | F_RXDROP | F_TXDROP | F_RXOVERFLOW | \
52 F_RAMPARITYERR | F_DIP2PARITYERR)
53#define MON_MASK (V_MONITORED_PORT_NUM(3) | F_MONITORED_DIRECTION \
54 | F_MONITORED_INTERFACE)
55
56#define TRICN_CNFG 14
57#define TRICN_CMD_READ 0x11
58#define TRICN_CMD_WRITE 0x21
59#define TRICN_CMD_ATTEMPTS 10
60
61static int tricn_write(adapter_t *adapter, int bundle_addr, int module_addr,
62 int ch_addr, int reg_offset, u32 wr_data)
63{
64 int busy, attempts = TRICN_CMD_ATTEMPTS;
65
66 writel(V_WRITE_DATA(wr_data) |
67 V_REGISTER_OFFSET(reg_offset) |
68 V_CHANNEL_ADDR(ch_addr) | V_MODULE_ADDR(module_addr) |
69 V_BUNDLE_ADDR(bundle_addr) |
70 V_SPI4_COMMAND(TRICN_CMD_WRITE),
71 adapter->regs + A_ESPI_CMD_ADDR);
72 writel(0, adapter->regs + A_ESPI_GOSTAT);
73
74 do {
75 busy = readl(adapter->regs + A_ESPI_GOSTAT) & F_ESPI_CMD_BUSY;
76 } while (busy && --attempts);
77
78 if (busy)
79 CH_ERR("%s: TRICN write timed out\n", adapter->name);
80
81 return busy;
82}
83
84/* 1. Deassert rx_reset_core. */
85/* 2. Program TRICN_CNFG registers. */
86/* 3. Deassert rx_reset_link */
87static int tricn_init(adapter_t *adapter)
88{
89 int i = 0;
90 int sme = 1;
91 int stat = 0;
92 int timeout = 0;
93 int is_ready = 0;
94 int dynamic_deskew = 0;
95
96 if (dynamic_deskew)
97 sme = 0;
98
99
100 /* 1 */
101 timeout=1000;
102 do {
103 stat = readl(adapter->regs + A_ESPI_RX_RESET);
104 is_ready = (stat & 0x4);
105 timeout--;
106 udelay(5);
107 } while (!is_ready || (timeout==0));
108 writel(0x2, adapter->regs + A_ESPI_RX_RESET);
109 if (timeout==0)
110 {
111 CH_ERR("ESPI : ERROR : Timeout tricn_init() \n");
112 t1_fatal_err(adapter);
113 }
114
115 /* 2 */
116 if (sme) {
117 tricn_write(adapter, 0, 0, 0, TRICN_CNFG, 0x81);
118 tricn_write(adapter, 0, 1, 0, TRICN_CNFG, 0x81);
119 tricn_write(adapter, 0, 2, 0, TRICN_CNFG, 0x81);
120 }
121 for (i=1; i<= 8; i++) tricn_write(adapter, 0, 0, i, TRICN_CNFG, 0xf1);
122 for (i=1; i<= 2; i++) tricn_write(adapter, 0, 1, i, TRICN_CNFG, 0xf1);
123 for (i=1; i<= 3; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1);
124 for (i=4; i<= 4; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1);
125 for (i=5; i<= 5; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1);
126 for (i=6; i<= 6; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1);
127 for (i=7; i<= 7; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0x80);
128 for (i=8; i<= 8; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1);
129
130 /* 3 */
131 writel(0x3, adapter->regs + A_ESPI_RX_RESET);
132
133 return 0;
134}
135
136void t1_espi_intr_enable(struct peespi *espi)
137{
138 u32 enable, pl_intr = readl(espi->adapter->regs + A_PL_ENABLE);
139
140 /*
141 * Cannot enable ESPI interrupts on T1B because HW asserts the
142 * interrupt incorrectly, namely the driver gets ESPI interrupts
143 * but no data is actually dropped (can verify this reading the ESPI
144 * drop registers). Also, once the ESPI interrupt is asserted it
145 * cannot be cleared (HW bug).
146 */
147 enable = t1_is_T1B(espi->adapter) ? 0 : ESPI_INTR_MASK;
148 writel(enable, espi->adapter->regs + A_ESPI_INTR_ENABLE);
149 writel(pl_intr | F_PL_INTR_ESPI, espi->adapter->regs + A_PL_ENABLE);
150}
151
152void t1_espi_intr_clear(struct peespi *espi)
153{
154 writel(0xffffffff, espi->adapter->regs + A_ESPI_INTR_STATUS);
155 writel(F_PL_INTR_ESPI, espi->adapter->regs + A_PL_CAUSE);
156}
157
158void t1_espi_intr_disable(struct peespi *espi)
159{
160 u32 pl_intr = readl(espi->adapter->regs + A_PL_ENABLE);
161
162 writel(0, espi->adapter->regs + A_ESPI_INTR_ENABLE);
163 writel(pl_intr & ~F_PL_INTR_ESPI, espi->adapter->regs + A_PL_ENABLE);
164}
165
166int t1_espi_intr_handler(struct peespi *espi)
167{
168 u32 cnt;
169 u32 status = readl(espi->adapter->regs + A_ESPI_INTR_STATUS);
170
171 if (status & F_DIP4ERR)
172 espi->intr_cnt.DIP4_err++;
173 if (status & F_RXDROP)
174 espi->intr_cnt.rx_drops++;
175 if (status & F_TXDROP)
176 espi->intr_cnt.tx_drops++;
177 if (status & F_RXOVERFLOW)
178 espi->intr_cnt.rx_ovflw++;
179 if (status & F_RAMPARITYERR)
180 espi->intr_cnt.parity_err++;
181 if (status & F_DIP2PARITYERR) {
182 espi->intr_cnt.DIP2_parity_err++;
183
184 /*
185 * Must read the error count to clear the interrupt
186 * that it causes.
187 */
188 cnt = readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT);
189 }
190
191 /*
192 * For T1B we need to write 1 to clear ESPI interrupts. For T2+ we
193 * write the status as is.
194 */
195 if (status && t1_is_T1B(espi->adapter))
196 status = 1;
197 writel(status, espi->adapter->regs + A_ESPI_INTR_STATUS);
198 return 0;
199}
200
201const struct espi_intr_counts *t1_espi_get_intr_counts(struct peespi *espi)
202{
203 return &espi->intr_cnt;
204}
205
206static void espi_setup_for_pm3393(adapter_t *adapter)
207{
208 u32 wmark = t1_is_T1B(adapter) ? 0x4000 : 0x3200;
209
210 writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN0);
211 writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN1);
212 writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN2);
213 writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN3);
214 writel(0x100, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK);
215 writel(wmark, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK);
216 writel(3, adapter->regs + A_ESPI_CALENDAR_LENGTH);
217 writel(0x08000008, adapter->regs + A_ESPI_TRAIN);
218 writel(V_RX_NPORTS(1) | V_TX_NPORTS(1), adapter->regs + A_PORT_CONFIG);
219}
220
221/* T2 Init part -- */
222/* 1. Set T_ESPI_MISCCTRL_ADDR */
223/* 2. Init ESPI registers. */
224/* 3. Init TriCN Hard Macro */
225int t1_espi_init(struct peespi *espi, int mac_type, int nports)
226{
227 u32 cnt;
228
229 u32 status_enable_extra = 0;
230 adapter_t *adapter = espi->adapter;
231 u32 status, burstval = 0x800100;
232
233 /* Disable ESPI training. MACs that can handle it enable it below. */
234 writel(0, adapter->regs + A_ESPI_TRAIN);
235
236 if (is_T2(adapter)) {
237 writel(V_OUT_OF_SYNC_COUNT(4) |
238 V_DIP2_PARITY_ERR_THRES(3) |
239 V_DIP4_THRES(1), adapter->regs + A_ESPI_MISC_CONTROL);
240 if (nports == 4) {
241 /* T204: maxburst1 = 0x40, maxburst2 = 0x20 */
242 burstval = 0x200040;
243 }
244 }
245 writel(burstval, adapter->regs + A_ESPI_MAXBURST1_MAXBURST2);
246
247 switch (mac_type) {
248 case CHBT_MAC_PM3393:
249 espi_setup_for_pm3393(adapter);
250 break;
251 default:
252 return -1;
253 }
254
255 /*
256 * Make sure any pending interrupts from the SPI are
257 * Cleared before enabling the interrupt.
258 */
259 writel(ESPI_INTR_MASK, espi->adapter->regs + A_ESPI_INTR_ENABLE);
260 status = readl(espi->adapter->regs + A_ESPI_INTR_STATUS);
261 if (status & F_DIP2PARITYERR) {
262 cnt = readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT);
263 }
264
265 /*
266 * For T1B we need to write 1 to clear ESPI interrupts. For T2+ we
267 * write the status as is.
268 */
269 if (status && t1_is_T1B(espi->adapter))
270 status = 1;
271 writel(status, espi->adapter->regs + A_ESPI_INTR_STATUS);
272
273 writel(status_enable_extra | F_RXSTATUSENABLE,
274 adapter->regs + A_ESPI_FIFO_STATUS_ENABLE);
275
276 if (is_T2(adapter)) {
277 tricn_init(adapter);
278 /*
279 * Always position the control at the 1st port egress IN
280 * (sop,eop) counter to reduce PIOs for T/N210 workaround.
281 */
282 espi->misc_ctrl = (readl(adapter->regs + A_ESPI_MISC_CONTROL)
283 & ~MON_MASK) | (F_MONITORED_DIRECTION
284 | F_MONITORED_INTERFACE);
285 writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
286 spin_lock_init(&espi->lock);
287 }
288
289 return 0;
290}
291
292void t1_espi_destroy(struct peespi *espi)
293{
294 kfree(espi);
295}
296
297struct peespi *t1_espi_create(adapter_t *adapter)
298{
299 struct peespi *espi = kmalloc(sizeof(*espi), GFP_KERNEL);
300
301 memset(espi, 0, sizeof(*espi));
302
303 if (espi)
304 espi->adapter = adapter;
305 return espi;
306}
307
308void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val)
309{
310 struct peespi *espi = adapter->espi;
311
312 if (!is_T2(adapter))
313 return;
314 spin_lock(&espi->lock);
315 espi->misc_ctrl = (val & ~MON_MASK) |
316 (espi->misc_ctrl & MON_MASK);
317 writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
318 spin_unlock(&espi->lock);
319}
320
321u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait)
322{
323 u32 sel;
324
325 struct peespi *espi = adapter->espi;
326
327 if (!is_T2(adapter))
328 return 0;
329 sel = V_MONITORED_PORT_NUM((addr & 0x3c) >> 2);
330 if (!wait) {
331 if (!spin_trylock(&espi->lock))
332 return 0;
333 }
334 else
335 spin_lock(&espi->lock);
336 if ((sel != (espi->misc_ctrl & MON_MASK))) {
337 writel(((espi->misc_ctrl & ~MON_MASK) | sel),
338 adapter->regs + A_ESPI_MISC_CONTROL);
339 sel = readl(adapter->regs + A_ESPI_SCH_TOKEN3);
340 writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
341 }
342 else
343 sel = readl(adapter->regs + A_ESPI_SCH_TOKEN3);
344 spin_unlock(&espi->lock);
345 return sel;
346}
diff --git a/drivers/net/chelsio/espi.h b/drivers/net/chelsio/espi.h
new file mode 100644
index 000000000000..c90e37f8457c
--- /dev/null
+++ b/drivers/net/chelsio/espi.h
@@ -0,0 +1,68 @@
1/*****************************************************************************
2 * *
3 * File: espi.h *
4 * $Revision: 1.7 $ *
5 * $Date: 2005/06/21 18:29:47 $ *
6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#ifndef _CXGB_ESPI_H_
40#define _CXGB_ESPI_H_
41
42#include "common.h"
43
44struct espi_intr_counts {
45 unsigned int DIP4_err;
46 unsigned int rx_drops;
47 unsigned int tx_drops;
48 unsigned int rx_ovflw;
49 unsigned int parity_err;
50 unsigned int DIP2_parity_err;
51};
52
53struct peespi;
54
55struct peespi *t1_espi_create(adapter_t *adapter);
56void t1_espi_destroy(struct peespi *espi);
57int t1_espi_init(struct peespi *espi, int mac_type, int nports);
58
59void t1_espi_intr_enable(struct peespi *);
60void t1_espi_intr_clear(struct peespi *);
61void t1_espi_intr_disable(struct peespi *);
62int t1_espi_intr_handler(struct peespi *);
63const struct espi_intr_counts *t1_espi_get_intr_counts(struct peespi *espi);
64
65void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val);
66u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait);
67
68#endif /* _CXGB_ESPI_H_ */
diff --git a/drivers/net/chelsio/gmac.h b/drivers/net/chelsio/gmac.h
new file mode 100644
index 000000000000..746b0eeea964
--- /dev/null
+++ b/drivers/net/chelsio/gmac.h
@@ -0,0 +1,134 @@
1/*****************************************************************************
2 * *
3 * File: gmac.h *
4 * $Revision: 1.6 $ *
5 * $Date: 2005/06/21 18:29:47 $ *
6 * Description: *
7 * Generic MAC functionality. *
8 * part of the Chelsio 10Gb Ethernet Driver. *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
13 * *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, write to the Free Software Foundation, Inc., *
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
17 * *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
21 * *
22 * http://www.chelsio.com *
23 * *
24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
25 * All rights reserved. *
26 * *
27 * Maintainers: maintainers@chelsio.com *
28 * *
29 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
30 * Tina Yang <tainay@chelsio.com> *
31 * Felix Marti <felix@chelsio.com> *
32 * Scott Bardone <sbardone@chelsio.com> *
33 * Kurt Ottaway <kottaway@chelsio.com> *
34 * Frank DiMambro <frank@chelsio.com> *
35 * *
36 * History: *
37 * *
38 ****************************************************************************/
39
40#ifndef _CXGB_GMAC_H_
41#define _CXGB_GMAC_H_
42
43#include "common.h"
44
45enum { MAC_STATS_UPDATE_FAST, MAC_STATS_UPDATE_FULL };
46enum { MAC_DIRECTION_RX = 1, MAC_DIRECTION_TX = 2 };
47
48struct cmac_statistics {
49 /* Transmit */
50 u64 TxOctetsOK;
51 u64 TxOctetsBad;
52 u64 TxUnicastFramesOK;
53 u64 TxMulticastFramesOK;
54 u64 TxBroadcastFramesOK;
55 u64 TxPauseFrames;
56 u64 TxFramesWithDeferredXmissions;
57 u64 TxLateCollisions;
58 u64 TxTotalCollisions;
59 u64 TxFramesAbortedDueToXSCollisions;
60 u64 TxUnderrun;
61 u64 TxLengthErrors;
62 u64 TxInternalMACXmitError;
63 u64 TxFramesWithExcessiveDeferral;
64 u64 TxFCSErrors;
65
66 /* Receive */
67 u64 RxOctetsOK;
68 u64 RxOctetsBad;
69 u64 RxUnicastFramesOK;
70 u64 RxMulticastFramesOK;
71 u64 RxBroadcastFramesOK;
72 u64 RxPauseFrames;
73 u64 RxFCSErrors;
74 u64 RxAlignErrors;
75 u64 RxSymbolErrors;
76 u64 RxDataErrors;
77 u64 RxSequenceErrors;
78 u64 RxRuntErrors;
79 u64 RxJabberErrors;
80 u64 RxInternalMACRcvError;
81 u64 RxInRangeLengthErrors;
82 u64 RxOutOfRangeLengthField;
83 u64 RxFrameTooLongErrors;
84};
85
86struct cmac_ops {
87 void (*destroy)(struct cmac *);
88 int (*reset)(struct cmac *);
89 int (*interrupt_enable)(struct cmac *);
90 int (*interrupt_disable)(struct cmac *);
91 int (*interrupt_clear)(struct cmac *);
92 int (*interrupt_handler)(struct cmac *);
93
94 int (*enable)(struct cmac *, int);
95 int (*disable)(struct cmac *, int);
96
97 int (*loopback_enable)(struct cmac *);
98 int (*loopback_disable)(struct cmac *);
99
100 int (*set_mtu)(struct cmac *, int mtu);
101 int (*set_rx_mode)(struct cmac *, struct t1_rx_mode *rm);
102
103 int (*set_speed_duplex_fc)(struct cmac *, int speed, int duplex, int fc);
104 int (*get_speed_duplex_fc)(struct cmac *, int *speed, int *duplex,
105 int *fc);
106
107 const struct cmac_statistics *(*statistics_update)(struct cmac *, int);
108
109 int (*macaddress_get)(struct cmac *, u8 mac_addr[6]);
110 int (*macaddress_set)(struct cmac *, u8 mac_addr[6]);
111};
112
113typedef struct _cmac_instance cmac_instance;
114
115struct cmac {
116 struct cmac_statistics stats;
117 adapter_t *adapter;
118 struct cmac_ops *ops;
119 cmac_instance *instance;
120};
121
122struct gmac {
123 unsigned int stats_update_period;
124 struct cmac *(*create)(adapter_t *adapter, int index);
125 int (*reset)(adapter_t *);
126};
127
128extern struct gmac t1_pm3393_ops;
129extern struct gmac t1_chelsio_mac_ops;
130extern struct gmac t1_vsc7321_ops;
131extern struct gmac t1_ixf1010_ops;
132extern struct gmac t1_dummy_mac_ops;
133
134#endif /* _CXGB_GMAC_H_ */
diff --git a/drivers/net/chelsio/mv88x201x.c b/drivers/net/chelsio/mv88x201x.c
new file mode 100644
index 000000000000..db5034282782
--- /dev/null
+++ b/drivers/net/chelsio/mv88x201x.c
@@ -0,0 +1,252 @@
1/*****************************************************************************
2 * *
3 * File: mv88x201x.c *
4 * $Revision: 1.12 $ *
5 * $Date: 2005/04/15 19:27:14 $ *
6 * Description: *
7 * Marvell PHY (mv88x201x) functionality. *
8 * part of the Chelsio 10Gb Ethernet Driver. *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
13 * *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, write to the Free Software Foundation, Inc., *
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
17 * *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
21 * *
22 * http://www.chelsio.com *
23 * *
24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
25 * All rights reserved. *
26 * *
27 * Maintainers: maintainers@chelsio.com *
28 * *
29 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
30 * Tina Yang <tainay@chelsio.com> *
31 * Felix Marti <felix@chelsio.com> *
32 * Scott Bardone <sbardone@chelsio.com> *
33 * Kurt Ottaway <kottaway@chelsio.com> *
34 * Frank DiMambro <frank@chelsio.com> *
35 * *
36 * History: *
37 * *
38 ****************************************************************************/
39
40#include "cphy.h"
41#include "elmer0.h"
42
43/*
44 * The 88x2010 Rev C. requires some link status registers * to be read
45 * twice in order to get the right values. Future * revisions will fix
46 * this problem and then this macro * can disappear.
47 */
48#define MV88x2010_LINK_STATUS_BUGS 1
49
50static int led_init(struct cphy *cphy)
51{
52 /* Setup the LED registers so we can turn on/off.
53 * Writing these bits maps control to another
54 * register. mmd(0x1) addr(0x7)
55 */
56 mdio_write(cphy, 0x3, 0x8304, 0xdddd);
57 return 0;
58}
59
60static int led_link(struct cphy *cphy, u32 do_enable)
61{
62 u32 led = 0;
63#define LINK_ENABLE_BIT 0x1
64
65 mdio_read(cphy, 0x1, 0x7, &led);
66
67 if (do_enable & LINK_ENABLE_BIT) {
68 led |= LINK_ENABLE_BIT;
69 mdio_write(cphy, 0x1, 0x7, led);
70 } else {
71 led &= ~LINK_ENABLE_BIT;
72 mdio_write(cphy, 0x1, 0x7, led);
73 }
74 return 0;
75}
76
77/* Port Reset */
78static int mv88x201x_reset(struct cphy *cphy, int wait)
79{
80 /* This can be done through registers. It is not required since
81 * a full chip reset is used.
82 */
83 return 0;
84}
85
86static int mv88x201x_interrupt_enable(struct cphy *cphy)
87{
88 u32 elmer;
89
90 /* Enable PHY LASI interrupts. */
91 mdio_write(cphy, 0x1, 0x9002, 0x1);
92
93 /* Enable Marvell interrupts through Elmer0. */
94 t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
95 elmer |= ELMER0_GP_BIT6;
96 t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
97 return 0;
98}
99
100static int mv88x201x_interrupt_disable(struct cphy *cphy)
101{
102 u32 elmer;
103
104 /* Disable PHY LASI interrupts. */
105 mdio_write(cphy, 0x1, 0x9002, 0x0);
106
107 /* Disable Marvell interrupts through Elmer0. */
108 t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
109 elmer &= ~ELMER0_GP_BIT6;
110 t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
111 return 0;
112}
113
114static int mv88x201x_interrupt_clear(struct cphy *cphy)
115{
116 u32 elmer;
117 u32 val;
118
119#ifdef MV88x2010_LINK_STATUS_BUGS
120 /* Required to read twice before clear takes affect. */
121 mdio_read(cphy, 0x1, 0x9003, &val);
122 mdio_read(cphy, 0x1, 0x9004, &val);
123 mdio_read(cphy, 0x1, 0x9005, &val);
124
125 /* Read this register after the others above it else
126 * the register doesn't clear correctly.
127 */
128 mdio_read(cphy, 0x1, 0x1, &val);
129#endif
130
131 /* Clear link status. */
132 mdio_read(cphy, 0x1, 0x1, &val);
133 /* Clear PHY LASI interrupts. */
134 mdio_read(cphy, 0x1, 0x9005, &val);
135
136#ifdef MV88x2010_LINK_STATUS_BUGS
137 /* Do it again. */
138 mdio_read(cphy, 0x1, 0x9003, &val);
139 mdio_read(cphy, 0x1, 0x9004, &val);
140#endif
141
142 /* Clear Marvell interrupts through Elmer0. */
143 t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer);
144 elmer |= ELMER0_GP_BIT6;
145 t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer);
146 return 0;
147}
148
149static int mv88x201x_interrupt_handler(struct cphy *cphy)
150{
151 /* Clear interrupts */
152 mv88x201x_interrupt_clear(cphy);
153
154 /* We have only enabled link change interrupts and so
155 * cphy_cause must be a link change interrupt.
156 */
157 return cphy_cause_link_change;
158}
159
160static int mv88x201x_set_loopback(struct cphy *cphy, int on)
161{
162 return 0;
163}
164
165static int mv88x201x_get_link_status(struct cphy *cphy, int *link_ok,
166 int *speed, int *duplex, int *fc)
167{
168 u32 val = 0;
169#define LINK_STATUS_BIT 0x4
170
171 if (link_ok) {
172 /* Read link status. */
173 mdio_read(cphy, 0x1, 0x1, &val);
174 val &= LINK_STATUS_BIT;
175 *link_ok = (val == LINK_STATUS_BIT);
176 /* Turn on/off Link LED */
177 led_link(cphy, *link_ok);
178 }
179 if (speed)
180 *speed = SPEED_10000;
181 if (duplex)
182 *duplex = DUPLEX_FULL;
183 if (fc)
184 *fc = PAUSE_RX | PAUSE_TX;
185 return 0;
186}
187
188static void mv88x201x_destroy(struct cphy *cphy)
189{
190 kfree(cphy);
191}
192
193static struct cphy_ops mv88x201x_ops = {
194 .destroy = mv88x201x_destroy,
195 .reset = mv88x201x_reset,
196 .interrupt_enable = mv88x201x_interrupt_enable,
197 .interrupt_disable = mv88x201x_interrupt_disable,
198 .interrupt_clear = mv88x201x_interrupt_clear,
199 .interrupt_handler = mv88x201x_interrupt_handler,
200 .get_link_status = mv88x201x_get_link_status,
201 .set_loopback = mv88x201x_set_loopback,
202};
203
204static struct cphy *mv88x201x_phy_create(adapter_t *adapter, int phy_addr,
205 struct mdio_ops *mdio_ops)
206{
207 u32 val;
208 struct cphy *cphy = kmalloc(sizeof(*cphy), GFP_KERNEL);
209
210 if (!cphy)
211 return NULL;
212 memset(cphy, 0, sizeof(*cphy));
213 cphy_init(cphy, adapter, phy_addr, &mv88x201x_ops, mdio_ops);
214
215 /* Commands the PHY to enable XFP's clock. */
216 mdio_read(cphy, 0x3, 0x8300, &val);
217 mdio_write(cphy, 0x3, 0x8300, val | 1);
218
219 /* Clear link status. Required because of a bug in the PHY. */
220 mdio_read(cphy, 0x1, 0x8, &val);
221 mdio_read(cphy, 0x3, 0x8, &val);
222
223 /* Allows for Link,Ack LED turn on/off */
224 led_init(cphy);
225 return cphy;
226}
227
228/* Chip Reset */
229static int mv88x201x_phy_reset(adapter_t *adapter)
230{
231 u32 val;
232
233 t1_tpi_read(adapter, A_ELMER0_GPO, &val);
234 val &= ~4;
235 t1_tpi_write(adapter, A_ELMER0_GPO, val);
236 msleep(100);
237
238 t1_tpi_write(adapter, A_ELMER0_GPO, val | 4);
239 msleep(1000);
240
241 /* Now lets enable the Laser. Delay 100us */
242 t1_tpi_read(adapter, A_ELMER0_GPO, &val);
243 val |= 0x8000;
244 t1_tpi_write(adapter, A_ELMER0_GPO, val);
245 udelay(100);
246 return 0;
247}
248
249struct gphy t1_mv88x201x_ops = {
250 mv88x201x_phy_create,
251 mv88x201x_phy_reset
252};
diff --git a/drivers/net/chelsio/pm3393.c b/drivers/net/chelsio/pm3393.c
new file mode 100644
index 000000000000..04a1404fc65e
--- /dev/null
+++ b/drivers/net/chelsio/pm3393.c
@@ -0,0 +1,826 @@
1/*****************************************************************************
2 * *
3 * File: pm3393.c *
4 * $Revision: 1.16 $ *
5 * $Date: 2005/05/14 00:59:32 $ *
6 * Description: *
7 * PMC/SIERRA (pm3393) MAC-PHY functionality. *
8 * part of the Chelsio 10Gb Ethernet Driver. *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
13 * *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, write to the Free Software Foundation, Inc., *
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
17 * *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
21 * *
22 * http://www.chelsio.com *
23 * *
24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
25 * All rights reserved. *
26 * *
27 * Maintainers: maintainers@chelsio.com *
28 * *
29 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
30 * Tina Yang <tainay@chelsio.com> *
31 * Felix Marti <felix@chelsio.com> *
32 * Scott Bardone <sbardone@chelsio.com> *
33 * Kurt Ottaway <kottaway@chelsio.com> *
34 * Frank DiMambro <frank@chelsio.com> *
35 * *
36 * History: *
37 * *
38 ****************************************************************************/
39
40#include "common.h"
41#include "regs.h"
42#include "gmac.h"
43#include "elmer0.h"
44#include "suni1x10gexp_regs.h"
45
46/* 802.3ae 10Gb/s MDIO Manageable Device(MMD)
47 */
48enum {
49 MMD_RESERVED,
50 MMD_PMAPMD,
51 MMD_WIS,
52 MMD_PCS,
53 MMD_PHY_XGXS, /* XGMII Extender Sublayer */
54 MMD_DTE_XGXS,
55};
56
57enum {
58 PHY_XGXS_CTRL_1,
59 PHY_XGXS_STATUS_1
60};
61
62#define OFFSET(REG_ADDR) (REG_ADDR << 2)
63
64/* Max frame size PM3393 can handle. Includes Ethernet header and CRC. */
65#define MAX_FRAME_SIZE 9600
66
67#define IPG 12
68#define TXXG_CONF1_VAL ((IPG << SUNI1x10GEXP_BITOFF_TXXG_IPGT) | \
69 SUNI1x10GEXP_BITMSK_TXXG_32BIT_ALIGN | SUNI1x10GEXP_BITMSK_TXXG_CRCEN | \
70 SUNI1x10GEXP_BITMSK_TXXG_PADEN)
71#define RXXG_CONF1_VAL (SUNI1x10GEXP_BITMSK_RXXG_PUREP | 0x14 | \
72 SUNI1x10GEXP_BITMSK_RXXG_FLCHK | SUNI1x10GEXP_BITMSK_RXXG_CRC_STRIP)
73
74/* Update statistics every 15 minutes */
75#define STATS_TICK_SECS (15 * 60)
76
77enum { /* RMON registers */
78 RxOctetsReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW,
79 RxUnicastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_4_LOW,
80 RxMulticastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_5_LOW,
81 RxBroadcastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_6_LOW,
82 RxPAUSEMACCtrlFramesReceived = SUNI1x10GEXP_REG_MSTAT_COUNTER_8_LOW,
83 RxFrameCheckSequenceErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_10_LOW,
84 RxFramesLostDueToInternalMACErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_11_LOW,
85 RxSymbolErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_12_LOW,
86 RxInRangeLengthErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_13_LOW,
87 RxFramesTooLongErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_15_LOW,
88 RxJabbers = SUNI1x10GEXP_REG_MSTAT_COUNTER_16_LOW,
89 RxFragments = SUNI1x10GEXP_REG_MSTAT_COUNTER_17_LOW,
90 RxUndersizedFrames = SUNI1x10GEXP_REG_MSTAT_COUNTER_18_LOW,
91
92 TxOctetsTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW,
93 TxFramesLostDueToInternalMACTransmissionError = SUNI1x10GEXP_REG_MSTAT_COUNTER_35_LOW,
94 TxTransmitSystemError = SUNI1x10GEXP_REG_MSTAT_COUNTER_36_LOW,
95 TxUnicastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_38_LOW,
96 TxMulticastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_40_LOW,
97 TxBroadcastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_42_LOW,
98 TxPAUSEMACCtrlFramesTransmitted = SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW
99};
100
101struct _cmac_instance {
102 u8 enabled;
103 u8 fc;
104 u8 mac_addr[6];
105};
106
107static int pmread(struct cmac *cmac, u32 reg, u32 * data32)
108{
109 t1_tpi_read(cmac->adapter, OFFSET(reg), data32);
110 return 0;
111}
112
113static int pmwrite(struct cmac *cmac, u32 reg, u32 data32)
114{
115 t1_tpi_write(cmac->adapter, OFFSET(reg), data32);
116 return 0;
117}
118
119/* Port reset. */
120static int pm3393_reset(struct cmac *cmac)
121{
122 return 0;
123}
124
125/*
126 * Enable interrupts for the PM3393
127
128 1. Enable PM3393 BLOCK interrupts.
129 2. Enable PM3393 Master Interrupt bit(INTE)
130 3. Enable ELMER's PM3393 bit.
131 4. Enable Terminator external interrupt.
132*/
133static int pm3393_interrupt_enable(struct cmac *cmac)
134{
135 u32 pl_intr;
136
137 /* PM3393 - Enabling all hardware block interrupts.
138 */
139 pmwrite(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE, 0xffff);
140 pmwrite(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE, 0xffff);
141 pmwrite(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE, 0xffff);
142 pmwrite(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE, 0xffff);
143
144 /* Don't interrupt on statistics overflow, we are polling */
145 pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0, 0);
146 pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1, 0);
147 pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2, 0);
148 pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3, 0);
149
150 pmwrite(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE, 0xffff);
151 pmwrite(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK, 0xffff);
152 pmwrite(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE, 0xffff);
153 pmwrite(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE, 0xffff);
154 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_3, 0xffff);
155 pmwrite(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK, 0xffff);
156 pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_3, 0xffff);
157 pmwrite(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK, 0xffff);
158 pmwrite(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE, 0xffff);
159
160 /* PM3393 - Global interrupt enable
161 */
162 /* TBD XXX Disable for now until we figure out why error interrupts keep asserting. */
163 pmwrite(cmac, SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE,
164 0 /*SUNI1x10GEXP_BITMSK_TOP_INTE */ );
165
166 /* TERMINATOR - PL_INTERUPTS_EXT */
167 pl_intr = readl(cmac->adapter->regs + A_PL_ENABLE);
168 pl_intr |= F_PL_INTR_EXT;
169 writel(pl_intr, cmac->adapter->regs + A_PL_ENABLE);
170 return 0;
171}
172
173static int pm3393_interrupt_disable(struct cmac *cmac)
174{
175 u32 elmer;
176
177 /* PM3393 - Enabling HW interrupt blocks. */
178 pmwrite(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE, 0);
179 pmwrite(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE, 0);
180 pmwrite(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE, 0);
181 pmwrite(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE, 0);
182 pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0, 0);
183 pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1, 0);
184 pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2, 0);
185 pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3, 0);
186 pmwrite(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE, 0);
187 pmwrite(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK, 0);
188 pmwrite(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE, 0);
189 pmwrite(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE, 0);
190 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_3, 0);
191 pmwrite(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK, 0);
192 pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_3, 0);
193 pmwrite(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK, 0);
194 pmwrite(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE, 0);
195
196 /* PM3393 - Global interrupt enable */
197 pmwrite(cmac, SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE, 0);
198
199 /* ELMER - External chip interrupts. */
200 t1_tpi_read(cmac->adapter, A_ELMER0_INT_ENABLE, &elmer);
201 elmer &= ~ELMER0_GP_BIT1;
202 t1_tpi_write(cmac->adapter, A_ELMER0_INT_ENABLE, elmer);
203
204 /* TERMINATOR - PL_INTERUPTS_EXT */
205 /* DO NOT DISABLE TERMINATOR's EXTERNAL INTERRUPTS. ANOTHER CHIP
206 * COULD WANT THEM ENABLED. We disable PM3393 at the ELMER level.
207 */
208
209 return 0;
210}
211
212static int pm3393_interrupt_clear(struct cmac *cmac)
213{
214 u32 elmer;
215 u32 pl_intr;
216 u32 val32;
217
218 /* PM3393 - Clearing HW interrupt blocks. Note, this assumes
219 * bit WCIMODE=0 for a clear-on-read.
220 */
221 pmread(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_STATUS, &val32);
222 pmread(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_STATUS, &val32);
223 pmread(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_STATUS, &val32);
224 pmread(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_STATUS, &val32);
225 pmread(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT, &val32);
226 pmread(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_STATUS, &val32);
227 pmread(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_INTERRUPT, &val32);
228 pmread(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_STATUS, &val32);
229 pmread(cmac, SUNI1x10GEXP_REG_RXXG_INTERRUPT, &val32);
230 pmread(cmac, SUNI1x10GEXP_REG_TXXG_INTERRUPT, &val32);
231 pmread(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT, &val32);
232 pmread(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_INDICATION,
233 &val32);
234 pmread(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_STATUS, &val32);
235 pmread(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_CHANGE, &val32);
236
237 /* PM3393 - Global interrupt status
238 */
239 pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS, &val32);
240
241 /* ELMER - External chip interrupts.
242 */
243 t1_tpi_read(cmac->adapter, A_ELMER0_INT_CAUSE, &elmer);
244 elmer |= ELMER0_GP_BIT1;
245 t1_tpi_write(cmac->adapter, A_ELMER0_INT_CAUSE, elmer);
246
247 /* TERMINATOR - PL_INTERUPTS_EXT
248 */
249 pl_intr = readl(cmac->adapter->regs + A_PL_CAUSE);
250 pl_intr |= F_PL_INTR_EXT;
251 writel(pl_intr, cmac->adapter->regs + A_PL_CAUSE);
252
253 return 0;
254}
255
256/* Interrupt handler */
257static int pm3393_interrupt_handler(struct cmac *cmac)
258{
259 u32 master_intr_status;
260/*
261 1. Read master interrupt register.
262 2. Read BLOCK's interrupt status registers.
263 3. Handle BLOCK interrupts.
264*/
265 /* Read the master interrupt status register. */
266 pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS,
267 &master_intr_status);
268
269 /* TBD XXX Lets just clear everything for now */
270 pm3393_interrupt_clear(cmac);
271
272 return 0;
273}
274
275static int pm3393_enable(struct cmac *cmac, int which)
276{
277 if (which & MAC_DIRECTION_RX)
278 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_1,
279 (RXXG_CONF1_VAL | SUNI1x10GEXP_BITMSK_RXXG_RXEN));
280
281 if (which & MAC_DIRECTION_TX) {
282 u32 val = TXXG_CONF1_VAL | SUNI1x10GEXP_BITMSK_TXXG_TXEN0;
283
284 if (cmac->instance->fc & PAUSE_RX)
285 val |= SUNI1x10GEXP_BITMSK_TXXG_FCRX;
286 if (cmac->instance->fc & PAUSE_TX)
287 val |= SUNI1x10GEXP_BITMSK_TXXG_FCTX;
288 pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_1, val);
289 }
290
291 cmac->instance->enabled |= which;
292 return 0;
293}
294
295static int pm3393_enable_port(struct cmac *cmac, int which)
296{
297 /* Clear port statistics */
298 pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_CONTROL,
299 SUNI1x10GEXP_BITMSK_MSTAT_CLEAR);
300 udelay(2);
301 memset(&cmac->stats, 0, sizeof(struct cmac_statistics));
302
303 pm3393_enable(cmac, which);
304
305 /*
306 * XXX This should be done by the PHY and preferrably not at all.
307 * The PHY doesn't give us link status indication on its own so have
308 * the link management code query it instead.
309 */
310 {
311 extern void link_changed(adapter_t *adapter, int port_id);
312
313 link_changed(cmac->adapter, 0);
314 }
315 return 0;
316}
317
318static int pm3393_disable(struct cmac *cmac, int which)
319{
320 if (which & MAC_DIRECTION_RX)
321 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_1, RXXG_CONF1_VAL);
322 if (which & MAC_DIRECTION_TX)
323 pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_1, TXXG_CONF1_VAL);
324
325 /*
326 * The disable is graceful. Give the PM3393 time. Can't wait very
327 * long here, we may be holding locks.
328 */
329 udelay(20);
330
331 cmac->instance->enabled &= ~which;
332 return 0;
333}
334
335static int pm3393_loopback_enable(struct cmac *cmac)
336{
337 return 0;
338}
339
340static int pm3393_loopback_disable(struct cmac *cmac)
341{
342 return 0;
343}
344
345static int pm3393_set_mtu(struct cmac *cmac, int mtu)
346{
347 int enabled = cmac->instance->enabled;
348
349 /* MAX_FRAME_SIZE includes header + FCS, mtu doesn't */
350 mtu += 14 + 4;
351 if (mtu > MAX_FRAME_SIZE)
352 return -EINVAL;
353
354 /* Disable Rx/Tx MAC before configuring it. */
355 if (enabled)
356 pm3393_disable(cmac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
357
358 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MAX_FRAME_LENGTH, mtu);
359 pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_MAX_FRAME_SIZE, mtu);
360
361 if (enabled)
362 pm3393_enable(cmac, enabled);
363 return 0;
364}
365
366static u32 calc_crc(u8 *b, int len)
367{
368 int i;
369 u32 crc = (u32)~0;
370
371 /* calculate crc one bit at a time */
372 while (len--) {
373 crc ^= *b++;
374 for (i = 0; i < 8; i++) {
375 if (crc & 0x1)
376 crc = (crc >> 1) ^ 0xedb88320;
377 else
378 crc = (crc >> 1);
379 }
380 }
381
382 /* reverse bits */
383 crc = ((crc >> 4) & 0x0f0f0f0f) | ((crc << 4) & 0xf0f0f0f0);
384 crc = ((crc >> 2) & 0x33333333) | ((crc << 2) & 0xcccccccc);
385 crc = ((crc >> 1) & 0x55555555) | ((crc << 1) & 0xaaaaaaaa);
386 /* swap bytes */
387 crc = (crc >> 16) | (crc << 16);
388 crc = (crc >> 8 & 0x00ff00ff) | (crc << 8 & 0xff00ff00);
389
390 return crc;
391}
392
393static int pm3393_set_rx_mode(struct cmac *cmac, struct t1_rx_mode *rm)
394{
395 int enabled = cmac->instance->enabled & MAC_DIRECTION_RX;
396 u32 rx_mode;
397
398 /* Disable MAC RX before reconfiguring it */
399 if (enabled)
400 pm3393_disable(cmac, MAC_DIRECTION_RX);
401
402 pmread(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2, &rx_mode);
403 rx_mode &= ~(SUNI1x10GEXP_BITMSK_RXXG_PMODE |
404 SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN);
405 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2,
406 (u16)rx_mode);
407
408 if (t1_rx_mode_promisc(rm)) {
409 /* Promiscuous mode. */
410 rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_PMODE;
411 }
412 if (t1_rx_mode_allmulti(rm)) {
413 /* Accept all multicast. */
414 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, 0xffff);
415 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW, 0xffff);
416 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH, 0xffff);
417 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH, 0xffff);
418 rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN;
419 } else if (t1_rx_mode_mc_cnt(rm)) {
420 /* Accept one or more multicast(s). */
421 u8 *addr;
422 int bit;
423 u16 mc_filter[4] = { 0, };
424
425 while ((addr = t1_get_next_mcaddr(rm))) {
426 bit = (calc_crc(addr, ETH_ALEN) >> 23) & 0x3f; /* bit[23:28] */
427 mc_filter[bit >> 4] |= 1 << (bit & 0xf);
428 }
429 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, mc_filter[0]);
430 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW, mc_filter[1]);
431 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH, mc_filter[2]);
432 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH, mc_filter[3]);
433 rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN;
434 }
435
436 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2, (u16)rx_mode);
437
438 if (enabled)
439 pm3393_enable(cmac, MAC_DIRECTION_RX);
440
441 return 0;
442}
443
444static int pm3393_get_speed_duplex_fc(struct cmac *cmac, int *speed,
445 int *duplex, int *fc)
446{
447 if (speed)
448 *speed = SPEED_10000;
449 if (duplex)
450 *duplex = DUPLEX_FULL;
451 if (fc)
452 *fc = cmac->instance->fc;
453 return 0;
454}
455
456static int pm3393_set_speed_duplex_fc(struct cmac *cmac, int speed, int duplex,
457 int fc)
458{
459 if (speed >= 0 && speed != SPEED_10000)
460 return -1;
461 if (duplex >= 0 && duplex != DUPLEX_FULL)
462 return -1;
463 if (fc & ~(PAUSE_TX | PAUSE_RX))
464 return -1;
465
466 if (fc != cmac->instance->fc) {
467 cmac->instance->fc = (u8) fc;
468 if (cmac->instance->enabled & MAC_DIRECTION_TX)
469 pm3393_enable(cmac, MAC_DIRECTION_TX);
470 }
471 return 0;
472}
473
474#define RMON_UPDATE(mac, name, stat_name) \
475 { \
476 t1_tpi_read((mac)->adapter, OFFSET(name), &val0); \
477 t1_tpi_read((mac)->adapter, OFFSET(((name)+1)), &val1); \
478 t1_tpi_read((mac)->adapter, OFFSET(((name)+2)), &val2); \
479 (mac)->stats.stat_name = ((u64)val0 & 0xffff) | \
480 (((u64)val1 & 0xffff) << 16) | \
481 (((u64)val2 & 0xff) << 32) | \
482 ((mac)->stats.stat_name & \
483 (~(u64)0 << 40)); \
484 if (ro & \
485 ((name - SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW) >> 2)) \
486 (mac)->stats.stat_name += ((u64)1 << 40); \
487 }
488
489static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac,
490 int flag)
491{
492 u64 ro;
493 u32 val0, val1, val2, val3;
494
495 /* Snap the counters */
496 pmwrite(mac, SUNI1x10GEXP_REG_MSTAT_CONTROL,
497 SUNI1x10GEXP_BITMSK_MSTAT_SNAP);
498
499 /* Counter rollover, clear on read */
500 pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_0, &val0);
501 pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_1, &val1);
502 pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_2, &val2);
503 pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_3, &val3);
504 ro = ((u64)val0 & 0xffff) | (((u64)val1 & 0xffff) << 16) |
505 (((u64)val2 & 0xffff) << 32) | (((u64)val3 & 0xffff) << 48);
506
507 /* Rx stats */
508 RMON_UPDATE(mac, RxOctetsReceivedOK, RxOctetsOK);
509 RMON_UPDATE(mac, RxUnicastFramesReceivedOK, RxUnicastFramesOK);
510 RMON_UPDATE(mac, RxMulticastFramesReceivedOK, RxMulticastFramesOK);
511 RMON_UPDATE(mac, RxBroadcastFramesReceivedOK, RxBroadcastFramesOK);
512 RMON_UPDATE(mac, RxPAUSEMACCtrlFramesReceived, RxPauseFrames);
513 RMON_UPDATE(mac, RxFrameCheckSequenceErrors, RxFCSErrors);
514 RMON_UPDATE(mac, RxFramesLostDueToInternalMACErrors,
515 RxInternalMACRcvError);
516 RMON_UPDATE(mac, RxSymbolErrors, RxSymbolErrors);
517 RMON_UPDATE(mac, RxInRangeLengthErrors, RxInRangeLengthErrors);
518 RMON_UPDATE(mac, RxFramesTooLongErrors , RxFrameTooLongErrors);
519 RMON_UPDATE(mac, RxJabbers, RxJabberErrors);
520 RMON_UPDATE(mac, RxFragments, RxRuntErrors);
521 RMON_UPDATE(mac, RxUndersizedFrames, RxRuntErrors);
522
523 /* Tx stats */
524 RMON_UPDATE(mac, TxOctetsTransmittedOK, TxOctetsOK);
525 RMON_UPDATE(mac, TxFramesLostDueToInternalMACTransmissionError,
526 TxInternalMACXmitError);
527 RMON_UPDATE(mac, TxTransmitSystemError, TxFCSErrors);
528 RMON_UPDATE(mac, TxUnicastFramesTransmittedOK, TxUnicastFramesOK);
529 RMON_UPDATE(mac, TxMulticastFramesTransmittedOK, TxMulticastFramesOK);
530 RMON_UPDATE(mac, TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK);
531 RMON_UPDATE(mac, TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames);
532
533 return &mac->stats;
534}
535
536static int pm3393_macaddress_get(struct cmac *cmac, u8 mac_addr[6])
537{
538 memcpy(mac_addr, cmac->instance->mac_addr, 6);
539 return 0;
540}
541
542static int pm3393_macaddress_set(struct cmac *cmac, u8 ma[6])
543{
544 u32 val, lo, mid, hi, enabled = cmac->instance->enabled;
545
546 /*
547 * MAC addr: 00:07:43:00:13:09
548 *
549 * ma[5] = 0x09
550 * ma[4] = 0x13
551 * ma[3] = 0x00
552 * ma[2] = 0x43
553 * ma[1] = 0x07
554 * ma[0] = 0x00
555 *
556 * The PM3393 requires byte swapping and reverse order entry
557 * when programming MAC addresses:
558 *
559 * low_bits[15:0] = ma[1]:ma[0]
560 * mid_bits[31:16] = ma[3]:ma[2]
561 * high_bits[47:32] = ma[5]:ma[4]
562 */
563
564 /* Store local copy */
565 memcpy(cmac->instance->mac_addr, ma, 6);
566
567 lo = ((u32) ma[1] << 8) | (u32) ma[0];
568 mid = ((u32) ma[3] << 8) | (u32) ma[2];
569 hi = ((u32) ma[5] << 8) | (u32) ma[4];
570
571 /* Disable Rx/Tx MAC before configuring it. */
572 if (enabled)
573 pm3393_disable(cmac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
574
575 /* Set RXXG Station Address */
576 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_15_0, lo);
577 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_31_16, mid);
578 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_47_32, hi);
579
580 /* Set TXXG Station Address */
581 pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_15_0, lo);
582 pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_31_16, mid);
583 pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_47_32, hi);
584
585 /* Setup Exact Match Filter 1 with our MAC address
586 *
587 * Must disable exact match filter before configuring it.
588 */
589 pmread(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, &val);
590 val &= 0xff0f;
591 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, val);
592
593 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_LOW, lo);
594 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_MID, mid);
595 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_HIGH, hi);
596
597 val |= 0x0090;
598 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, val);
599
600 if (enabled)
601 pm3393_enable(cmac, enabled);
602 return 0;
603}
604
605static void pm3393_destroy(struct cmac *cmac)
606{
607 kfree(cmac);
608}
609
610static struct cmac_ops pm3393_ops = {
611 .destroy = pm3393_destroy,
612 .reset = pm3393_reset,
613 .interrupt_enable = pm3393_interrupt_enable,
614 .interrupt_disable = pm3393_interrupt_disable,
615 .interrupt_clear = pm3393_interrupt_clear,
616 .interrupt_handler = pm3393_interrupt_handler,
617 .enable = pm3393_enable_port,
618 .disable = pm3393_disable,
619 .loopback_enable = pm3393_loopback_enable,
620 .loopback_disable = pm3393_loopback_disable,
621 .set_mtu = pm3393_set_mtu,
622 .set_rx_mode = pm3393_set_rx_mode,
623 .get_speed_duplex_fc = pm3393_get_speed_duplex_fc,
624 .set_speed_duplex_fc = pm3393_set_speed_duplex_fc,
625 .statistics_update = pm3393_update_statistics,
626 .macaddress_get = pm3393_macaddress_get,
627 .macaddress_set = pm3393_macaddress_set
628};
629
630static struct cmac *pm3393_mac_create(adapter_t *adapter, int index)
631{
632 struct cmac *cmac;
633
634 cmac = kmalloc(sizeof(*cmac) + sizeof(cmac_instance), GFP_KERNEL);
635 if (!cmac)
636 return NULL;
637 memset(cmac, 0, sizeof(*cmac));
638
639 cmac->ops = &pm3393_ops;
640 cmac->instance = (cmac_instance *) (cmac + 1);
641 cmac->adapter = adapter;
642 cmac->instance->fc = PAUSE_TX | PAUSE_RX;
643
644 t1_tpi_write(adapter, OFFSET(0x0001), 0x00008000);
645 t1_tpi_write(adapter, OFFSET(0x0001), 0x00000000);
646 t1_tpi_write(adapter, OFFSET(0x2308), 0x00009800);
647 t1_tpi_write(adapter, OFFSET(0x2305), 0x00001001); /* PL4IO Enable */
648 t1_tpi_write(adapter, OFFSET(0x2320), 0x00008800);
649 t1_tpi_write(adapter, OFFSET(0x2321), 0x00008800);
650 t1_tpi_write(adapter, OFFSET(0x2322), 0x00008800);
651 t1_tpi_write(adapter, OFFSET(0x2323), 0x00008800);
652 t1_tpi_write(adapter, OFFSET(0x2324), 0x00008800);
653 t1_tpi_write(adapter, OFFSET(0x2325), 0x00008800);
654 t1_tpi_write(adapter, OFFSET(0x2326), 0x00008800);
655 t1_tpi_write(adapter, OFFSET(0x2327), 0x00008800);
656 t1_tpi_write(adapter, OFFSET(0x2328), 0x00008800);
657 t1_tpi_write(adapter, OFFSET(0x2329), 0x00008800);
658 t1_tpi_write(adapter, OFFSET(0x232a), 0x00008800);
659 t1_tpi_write(adapter, OFFSET(0x232b), 0x00008800);
660 t1_tpi_write(adapter, OFFSET(0x232c), 0x00008800);
661 t1_tpi_write(adapter, OFFSET(0x232d), 0x00008800);
662 t1_tpi_write(adapter, OFFSET(0x232e), 0x00008800);
663 t1_tpi_write(adapter, OFFSET(0x232f), 0x00008800);
664 t1_tpi_write(adapter, OFFSET(0x230d), 0x00009c00);
665 t1_tpi_write(adapter, OFFSET(0x2304), 0x00000202); /* PL4IO Calendar Repetitions */
666
667 t1_tpi_write(adapter, OFFSET(0x3200), 0x00008080); /* EFLX Enable */
668 t1_tpi_write(adapter, OFFSET(0x3210), 0x00000000); /* EFLX Channel Deprovision */
669 t1_tpi_write(adapter, OFFSET(0x3203), 0x00000000); /* EFLX Low Limit */
670 t1_tpi_write(adapter, OFFSET(0x3204), 0x00000040); /* EFLX High Limit */
671 t1_tpi_write(adapter, OFFSET(0x3205), 0x000002cc); /* EFLX Almost Full */
672 t1_tpi_write(adapter, OFFSET(0x3206), 0x00000199); /* EFLX Almost Empty */
673 t1_tpi_write(adapter, OFFSET(0x3207), 0x00000240); /* EFLX Cut Through Threshold */
674 t1_tpi_write(adapter, OFFSET(0x3202), 0x00000000); /* EFLX Indirect Register Update */
675 t1_tpi_write(adapter, OFFSET(0x3210), 0x00000001); /* EFLX Channel Provision */
676 t1_tpi_write(adapter, OFFSET(0x3208), 0x0000ffff); /* EFLX Undocumented */
677 t1_tpi_write(adapter, OFFSET(0x320a), 0x0000ffff); /* EFLX Undocumented */
678 t1_tpi_write(adapter, OFFSET(0x320c), 0x0000ffff); /* EFLX enable overflow interrupt The other bit are undocumented */
679 t1_tpi_write(adapter, OFFSET(0x320e), 0x0000ffff); /* EFLX Undocumented */
680
681 t1_tpi_write(adapter, OFFSET(0x2200), 0x0000c000); /* IFLX Configuration - enable */
682 t1_tpi_write(adapter, OFFSET(0x2201), 0x00000000); /* IFLX Channel Deprovision */
683 t1_tpi_write(adapter, OFFSET(0x220e), 0x00000000); /* IFLX Low Limit */
684 t1_tpi_write(adapter, OFFSET(0x220f), 0x00000100); /* IFLX High Limit */
685 t1_tpi_write(adapter, OFFSET(0x2210), 0x00000c00); /* IFLX Almost Full Limit */
686 t1_tpi_write(adapter, OFFSET(0x2211), 0x00000599); /* IFLX Almost Empty Limit */
687 t1_tpi_write(adapter, OFFSET(0x220d), 0x00000000); /* IFLX Indirect Register Update */
688 t1_tpi_write(adapter, OFFSET(0x2201), 0x00000001); /* IFLX Channel Provision */
689 t1_tpi_write(adapter, OFFSET(0x2203), 0x0000ffff); /* IFLX Undocumented */
690 t1_tpi_write(adapter, OFFSET(0x2205), 0x0000ffff); /* IFLX Undocumented */
691 t1_tpi_write(adapter, OFFSET(0x2209), 0x0000ffff); /* IFLX Enable overflow interrupt. The other bit are undocumented */
692
693 t1_tpi_write(adapter, OFFSET(0x2241), 0xfffffffe); /* PL4MOS Undocumented */
694 t1_tpi_write(adapter, OFFSET(0x2242), 0x0000ffff); /* PL4MOS Undocumented */
695 t1_tpi_write(adapter, OFFSET(0x2243), 0x00000008); /* PL4MOS Starving Burst Size */
696 t1_tpi_write(adapter, OFFSET(0x2244), 0x00000008); /* PL4MOS Hungry Burst Size */
697 t1_tpi_write(adapter, OFFSET(0x2245), 0x00000008); /* PL4MOS Transfer Size */
698 t1_tpi_write(adapter, OFFSET(0x2240), 0x00000005); /* PL4MOS Disable */
699
700 t1_tpi_write(adapter, OFFSET(0x2280), 0x00002103); /* PL4ODP Training Repeat and SOP rule */
701 t1_tpi_write(adapter, OFFSET(0x2284), 0x00000000); /* PL4ODP MAX_T setting */
702
703 t1_tpi_write(adapter, OFFSET(0x3280), 0x00000087); /* PL4IDU Enable data forward, port state machine. Set ALLOW_NON_ZERO_OLB */
704 t1_tpi_write(adapter, OFFSET(0x3282), 0x0000001f); /* PL4IDU Enable Dip4 check error interrupts */
705
706 t1_tpi_write(adapter, OFFSET(0x3040), 0x0c32); /* # TXXG Config */
707 /* For T1 use timer based Mac flow control. */
708 t1_tpi_write(adapter, OFFSET(0x304d), 0x8000);
709 t1_tpi_write(adapter, OFFSET(0x2040), 0x059c); /* # RXXG Config */
710 t1_tpi_write(adapter, OFFSET(0x2049), 0x0001); /* # RXXG Cut Through */
711 t1_tpi_write(adapter, OFFSET(0x2070), 0x0000); /* # Disable promiscuous mode */
712
713 /* Setup Exact Match Filter 0 to allow broadcast packets.
714 */
715 t1_tpi_write(adapter, OFFSET(0x206e), 0x0000); /* # Disable Match Enable bit */
716 t1_tpi_write(adapter, OFFSET(0x204a), 0xffff); /* # low addr */
717 t1_tpi_write(adapter, OFFSET(0x204b), 0xffff); /* # mid addr */
718 t1_tpi_write(adapter, OFFSET(0x204c), 0xffff); /* # high addr */
719 t1_tpi_write(adapter, OFFSET(0x206e), 0x0009); /* # Enable Match Enable bit */
720
721 t1_tpi_write(adapter, OFFSET(0x0003), 0x0000); /* # NO SOP/ PAD_EN setup */
722 t1_tpi_write(adapter, OFFSET(0x0100), 0x0ff0); /* # RXEQB disabled */
723 t1_tpi_write(adapter, OFFSET(0x0101), 0x0f0f); /* # No Preemphasis */
724
725 return cmac;
726}
727
728static int pm3393_mac_reset(adapter_t * adapter)
729{
730 u32 val;
731 u32 x;
732 u32 is_pl4_reset_finished;
733 u32 is_pl4_outof_lock;
734 u32 is_xaui_mabc_pll_locked;
735 u32 successful_reset;
736 int i;
737
738 /* The following steps are required to properly reset
739 * the PM3393. This information is provided in the
740 * PM3393 datasheet (Issue 2: November 2002)
741 * section 13.1 -- Device Reset.
742 *
743 * The PM3393 has three types of components that are
744 * individually reset:
745 *
746 * DRESETB - Digital circuitry
747 * PL4_ARESETB - PL4 analog circuitry
748 * XAUI_ARESETB - XAUI bus analog circuitry
749 *
750 * Steps to reset PM3393 using RSTB pin:
751 *
752 * 1. Assert RSTB pin low ( write 0 )
753 * 2. Wait at least 1ms to initiate a complete initialization of device.
754 * 3. Wait until all external clocks and REFSEL are stable.
755 * 4. Wait minimum of 1ms. (after external clocks and REFEL are stable)
756 * 5. De-assert RSTB ( write 1 )
757 * 6. Wait until internal timers to expires after ~14ms.
758 * - Allows analog clock synthesizer(PL4CSU) to stabilize to
759 * selected reference frequency before allowing the digital
760 * portion of the device to operate.
761 * 7. Wait at least 200us for XAUI interface to stabilize.
762 * 8. Verify the PM3393 came out of reset successfully.
763 * Set successful reset flag if everything worked else try again
764 * a few more times.
765 */
766
767 successful_reset = 0;
768 for (i = 0; i < 3 && !successful_reset; i++) {
769 /* 1 */
770 t1_tpi_read(adapter, A_ELMER0_GPO, &val);
771 val &= ~1;
772 t1_tpi_write(adapter, A_ELMER0_GPO, val);
773
774 /* 2 */
775 msleep(1);
776
777 /* 3 */
778 msleep(1);
779
780 /* 4 */
781 msleep(2 /*1 extra ms for safety */ );
782
783 /* 5 */
784 val |= 1;
785 t1_tpi_write(adapter, A_ELMER0_GPO, val);
786
787 /* 6 */
788 msleep(15 /*1 extra ms for safety */ );
789
790 /* 7 */
791 msleep(1);
792
793 /* 8 */
794
795 /* Has PL4 analog block come out of reset correctly? */
796 t1_tpi_read(adapter, OFFSET(SUNI1x10GEXP_REG_DEVICE_STATUS), &val);
797 is_pl4_reset_finished = (val & SUNI1x10GEXP_BITMSK_TOP_EXPIRED);
798
799 /* TBD XXX SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL gets locked later in the init sequence
800 * figure out why? */
801
802 /* Have all PL4 block clocks locked? */
803 x = (SUNI1x10GEXP_BITMSK_TOP_PL4_ID_DOOL
804 /*| SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL */ |
805 SUNI1x10GEXP_BITMSK_TOP_PL4_ID_ROOL |
806 SUNI1x10GEXP_BITMSK_TOP_PL4_IS_ROOL |
807 SUNI1x10GEXP_BITMSK_TOP_PL4_OUT_ROOL);
808 is_pl4_outof_lock = (val & x);
809
810 /* ??? If this fails, might be able to software reset the XAUI part
811 * and try to recover... thus saving us from doing another HW reset */
812 /* Has the XAUI MABC PLL circuitry stablized? */
813 is_xaui_mabc_pll_locked =
814 (val & SUNI1x10GEXP_BITMSK_TOP_SXRA_EXPIRED);
815
816 successful_reset = (is_pl4_reset_finished && !is_pl4_outof_lock
817 && is_xaui_mabc_pll_locked);
818 }
819 return successful_reset ? 0 : 1;
820}
821
822struct gmac t1_pm3393_ops = {
823 STATS_TICK_SECS,
824 pm3393_mac_create,
825 pm3393_mac_reset
826};
diff --git a/drivers/net/chelsio/regs.h b/drivers/net/chelsio/regs.h
new file mode 100644
index 000000000000..b90e11f40d1f
--- /dev/null
+++ b/drivers/net/chelsio/regs.h
@@ -0,0 +1,468 @@
1/*****************************************************************************
2 * *
3 * File: regs.h *
4 * $Revision: 1.8 $ *
5 * $Date: 2005/06/21 18:29:48 $ *
6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#ifndef _CXGB_REGS_H_
40#define _CXGB_REGS_H_
41
42/* SGE registers */
43#define A_SG_CONTROL 0x0
44
45#define S_CMDQ0_ENABLE 0
46#define V_CMDQ0_ENABLE(x) ((x) << S_CMDQ0_ENABLE)
47#define F_CMDQ0_ENABLE V_CMDQ0_ENABLE(1U)
48
49#define S_CMDQ1_ENABLE 1
50#define V_CMDQ1_ENABLE(x) ((x) << S_CMDQ1_ENABLE)
51#define F_CMDQ1_ENABLE V_CMDQ1_ENABLE(1U)
52
53#define S_FL0_ENABLE 2
54#define V_FL0_ENABLE(x) ((x) << S_FL0_ENABLE)
55#define F_FL0_ENABLE V_FL0_ENABLE(1U)
56
57#define S_FL1_ENABLE 3
58#define V_FL1_ENABLE(x) ((x) << S_FL1_ENABLE)
59#define F_FL1_ENABLE V_FL1_ENABLE(1U)
60
61#define S_CPL_ENABLE 4
62#define V_CPL_ENABLE(x) ((x) << S_CPL_ENABLE)
63#define F_CPL_ENABLE V_CPL_ENABLE(1U)
64
65#define S_RESPONSE_QUEUE_ENABLE 5
66#define V_RESPONSE_QUEUE_ENABLE(x) ((x) << S_RESPONSE_QUEUE_ENABLE)
67#define F_RESPONSE_QUEUE_ENABLE V_RESPONSE_QUEUE_ENABLE(1U)
68
69#define S_CMDQ_PRIORITY 6
70#define M_CMDQ_PRIORITY 0x3
71#define V_CMDQ_PRIORITY(x) ((x) << S_CMDQ_PRIORITY)
72#define G_CMDQ_PRIORITY(x) (((x) >> S_CMDQ_PRIORITY) & M_CMDQ_PRIORITY)
73
74#define S_DISABLE_CMDQ1_GTS 9
75#define V_DISABLE_CMDQ1_GTS(x) ((x) << S_DISABLE_CMDQ1_GTS)
76#define F_DISABLE_CMDQ1_GTS V_DISABLE_CMDQ1_GTS(1U)
77
78#define S_DISABLE_FL0_GTS 10
79#define V_DISABLE_FL0_GTS(x) ((x) << S_DISABLE_FL0_GTS)
80#define F_DISABLE_FL0_GTS V_DISABLE_FL0_GTS(1U)
81
82#define S_DISABLE_FL1_GTS 11
83#define V_DISABLE_FL1_GTS(x) ((x) << S_DISABLE_FL1_GTS)
84#define F_DISABLE_FL1_GTS V_DISABLE_FL1_GTS(1U)
85
86#define S_ENABLE_BIG_ENDIAN 12
87#define V_ENABLE_BIG_ENDIAN(x) ((x) << S_ENABLE_BIG_ENDIAN)
88#define F_ENABLE_BIG_ENDIAN V_ENABLE_BIG_ENDIAN(1U)
89
90#define S_ISCSI_COALESCE 14
91#define V_ISCSI_COALESCE(x) ((x) << S_ISCSI_COALESCE)
92#define F_ISCSI_COALESCE V_ISCSI_COALESCE(1U)
93
94#define S_RX_PKT_OFFSET 15
95#define V_RX_PKT_OFFSET(x) ((x) << S_RX_PKT_OFFSET)
96
97#define S_VLAN_XTRACT 18
98#define V_VLAN_XTRACT(x) ((x) << S_VLAN_XTRACT)
99#define F_VLAN_XTRACT V_VLAN_XTRACT(1U)
100
101#define A_SG_DOORBELL 0x4
102#define A_SG_CMD0BASELWR 0x8
103#define A_SG_CMD0BASEUPR 0xc
104#define A_SG_CMD1BASELWR 0x10
105#define A_SG_CMD1BASEUPR 0x14
106#define A_SG_FL0BASELWR 0x18
107#define A_SG_FL0BASEUPR 0x1c
108#define A_SG_FL1BASELWR 0x20
109#define A_SG_FL1BASEUPR 0x24
110#define A_SG_CMD0SIZE 0x28
111#define A_SG_FL0SIZE 0x2c
112#define A_SG_RSPSIZE 0x30
113#define A_SG_RSPBASELWR 0x34
114#define A_SG_RSPBASEUPR 0x38
115#define A_SG_FLTHRESHOLD 0x3c
116#define A_SG_RSPQUEUECREDIT 0x40
117#define A_SG_SLEEPING 0x48
118#define A_SG_INTRTIMER 0x4c
119#define A_SG_CMD1SIZE 0xb0
120#define A_SG_FL1SIZE 0xb4
121#define A_SG_INT_ENABLE 0xb8
122
123#define S_RESPQ_EXHAUSTED 0
124#define V_RESPQ_EXHAUSTED(x) ((x) << S_RESPQ_EXHAUSTED)
125#define F_RESPQ_EXHAUSTED V_RESPQ_EXHAUSTED(1U)
126
127#define S_RESPQ_OVERFLOW 1
128#define V_RESPQ_OVERFLOW(x) ((x) << S_RESPQ_OVERFLOW)
129#define F_RESPQ_OVERFLOW V_RESPQ_OVERFLOW(1U)
130
131#define S_FL_EXHAUSTED 2
132#define V_FL_EXHAUSTED(x) ((x) << S_FL_EXHAUSTED)
133#define F_FL_EXHAUSTED V_FL_EXHAUSTED(1U)
134
135#define S_PACKET_TOO_BIG 3
136#define V_PACKET_TOO_BIG(x) ((x) << S_PACKET_TOO_BIG)
137#define F_PACKET_TOO_BIG V_PACKET_TOO_BIG(1U)
138
139#define S_PACKET_MISMATCH 4
140#define V_PACKET_MISMATCH(x) ((x) << S_PACKET_MISMATCH)
141#define F_PACKET_MISMATCH V_PACKET_MISMATCH(1U)
142
143#define A_SG_INT_CAUSE 0xbc
144#define A_SG_RESPACCUTIMER 0xc0
145
146/* MC3 registers */
147
148#define S_READY 1
149#define V_READY(x) ((x) << S_READY)
150#define F_READY V_READY(1U)
151
152/* MC4 registers */
153
154#define A_MC4_CFG 0x180
155#define S_MC4_SLOW 25
156#define V_MC4_SLOW(x) ((x) << S_MC4_SLOW)
157#define F_MC4_SLOW V_MC4_SLOW(1U)
158
159/* TPI registers */
160
161#define A_TPI_ADDR 0x280
162#define A_TPI_WR_DATA 0x284
163#define A_TPI_RD_DATA 0x288
164#define A_TPI_CSR 0x28c
165
166#define S_TPIWR 0
167#define V_TPIWR(x) ((x) << S_TPIWR)
168#define F_TPIWR V_TPIWR(1U)
169
170#define S_TPIRDY 1
171#define V_TPIRDY(x) ((x) << S_TPIRDY)
172#define F_TPIRDY V_TPIRDY(1U)
173
174#define A_TPI_PAR 0x29c
175
176#define S_TPIPAR 0
177#define M_TPIPAR 0x7f
178#define V_TPIPAR(x) ((x) << S_TPIPAR)
179#define G_TPIPAR(x) (((x) >> S_TPIPAR) & M_TPIPAR)
180
181/* TP registers */
182
183#define A_TP_IN_CONFIG 0x300
184
185#define S_TP_IN_CSPI_CPL 3
186#define V_TP_IN_CSPI_CPL(x) ((x) << S_TP_IN_CSPI_CPL)
187#define F_TP_IN_CSPI_CPL V_TP_IN_CSPI_CPL(1U)
188
189#define S_TP_IN_CSPI_CHECK_IP_CSUM 5
190#define V_TP_IN_CSPI_CHECK_IP_CSUM(x) ((x) << S_TP_IN_CSPI_CHECK_IP_CSUM)
191#define F_TP_IN_CSPI_CHECK_IP_CSUM V_TP_IN_CSPI_CHECK_IP_CSUM(1U)
192
193#define S_TP_IN_CSPI_CHECK_TCP_CSUM 6
194#define V_TP_IN_CSPI_CHECK_TCP_CSUM(x) ((x) << S_TP_IN_CSPI_CHECK_TCP_CSUM)
195#define F_TP_IN_CSPI_CHECK_TCP_CSUM V_TP_IN_CSPI_CHECK_TCP_CSUM(1U)
196
197#define S_TP_IN_ESPI_ETHERNET 8
198#define V_TP_IN_ESPI_ETHERNET(x) ((x) << S_TP_IN_ESPI_ETHERNET)
199#define F_TP_IN_ESPI_ETHERNET V_TP_IN_ESPI_ETHERNET(1U)
200
201#define S_TP_IN_ESPI_CHECK_IP_CSUM 12
202#define V_TP_IN_ESPI_CHECK_IP_CSUM(x) ((x) << S_TP_IN_ESPI_CHECK_IP_CSUM)
203#define F_TP_IN_ESPI_CHECK_IP_CSUM V_TP_IN_ESPI_CHECK_IP_CSUM(1U)
204
205#define S_TP_IN_ESPI_CHECK_TCP_CSUM 13
206#define V_TP_IN_ESPI_CHECK_TCP_CSUM(x) ((x) << S_TP_IN_ESPI_CHECK_TCP_CSUM)
207#define F_TP_IN_ESPI_CHECK_TCP_CSUM V_TP_IN_ESPI_CHECK_TCP_CSUM(1U)
208
209#define S_OFFLOAD_DISABLE 14
210#define V_OFFLOAD_DISABLE(x) ((x) << S_OFFLOAD_DISABLE)
211#define F_OFFLOAD_DISABLE V_OFFLOAD_DISABLE(1U)
212
213#define A_TP_OUT_CONFIG 0x304
214
215#define S_TP_OUT_CSPI_CPL 2
216#define V_TP_OUT_CSPI_CPL(x) ((x) << S_TP_OUT_CSPI_CPL)
217#define F_TP_OUT_CSPI_CPL V_TP_OUT_CSPI_CPL(1U)
218
219#define S_TP_OUT_ESPI_ETHERNET 6
220#define V_TP_OUT_ESPI_ETHERNET(x) ((x) << S_TP_OUT_ESPI_ETHERNET)
221#define F_TP_OUT_ESPI_ETHERNET V_TP_OUT_ESPI_ETHERNET(1U)
222
223#define S_TP_OUT_ESPI_GENERATE_IP_CSUM 10
224#define V_TP_OUT_ESPI_GENERATE_IP_CSUM(x) ((x) << S_TP_OUT_ESPI_GENERATE_IP_CSUM)
225#define F_TP_OUT_ESPI_GENERATE_IP_CSUM V_TP_OUT_ESPI_GENERATE_IP_CSUM(1U)
226
227#define S_TP_OUT_ESPI_GENERATE_TCP_CSUM 11
228#define V_TP_OUT_ESPI_GENERATE_TCP_CSUM(x) ((x) << S_TP_OUT_ESPI_GENERATE_TCP_CSUM)
229#define F_TP_OUT_ESPI_GENERATE_TCP_CSUM V_TP_OUT_ESPI_GENERATE_TCP_CSUM(1U)
230
231#define A_TP_GLOBAL_CONFIG 0x308
232
233#define S_IP_TTL 0
234#define M_IP_TTL 0xff
235#define V_IP_TTL(x) ((x) << S_IP_TTL)
236
237#define S_TCP_CSUM 11
238#define V_TCP_CSUM(x) ((x) << S_TCP_CSUM)
239#define F_TCP_CSUM V_TCP_CSUM(1U)
240
241#define S_UDP_CSUM 12
242#define V_UDP_CSUM(x) ((x) << S_UDP_CSUM)
243#define F_UDP_CSUM V_UDP_CSUM(1U)
244
245#define S_IP_CSUM 13
246#define V_IP_CSUM(x) ((x) << S_IP_CSUM)
247#define F_IP_CSUM V_IP_CSUM(1U)
248
249#define S_PATH_MTU 15
250#define V_PATH_MTU(x) ((x) << S_PATH_MTU)
251#define F_PATH_MTU V_PATH_MTU(1U)
252
253#define S_5TUPLE_LOOKUP 17
254#define V_5TUPLE_LOOKUP(x) ((x) << S_5TUPLE_LOOKUP)
255
256#define S_SYN_COOKIE_PARAMETER 26
257#define V_SYN_COOKIE_PARAMETER(x) ((x) << S_SYN_COOKIE_PARAMETER)
258
259#define A_TP_PC_CONFIG 0x348
260#define S_DIS_TX_FILL_WIN_PUSH 12
261#define V_DIS_TX_FILL_WIN_PUSH(x) ((x) << S_DIS_TX_FILL_WIN_PUSH)
262#define F_DIS_TX_FILL_WIN_PUSH V_DIS_TX_FILL_WIN_PUSH(1U)
263
264#define S_TP_PC_REV 30
265#define M_TP_PC_REV 0x3
266#define G_TP_PC_REV(x) (((x) >> S_TP_PC_REV) & M_TP_PC_REV)
267#define A_TP_RESET 0x44c
268#define S_TP_RESET 0
269#define V_TP_RESET(x) ((x) << S_TP_RESET)
270#define F_TP_RESET V_TP_RESET(1U)
271
272#define A_TP_INT_ENABLE 0x470
273#define A_TP_INT_CAUSE 0x474
274#define A_TP_TX_DROP_CONFIG 0x4b8
275
276#define S_ENABLE_TX_DROP 31
277#define V_ENABLE_TX_DROP(x) ((x) << S_ENABLE_TX_DROP)
278#define F_ENABLE_TX_DROP V_ENABLE_TX_DROP(1U)
279
280#define S_ENABLE_TX_ERROR 30
281#define V_ENABLE_TX_ERROR(x) ((x) << S_ENABLE_TX_ERROR)
282#define F_ENABLE_TX_ERROR V_ENABLE_TX_ERROR(1U)
283
284#define S_DROP_TICKS_CNT 4
285#define V_DROP_TICKS_CNT(x) ((x) << S_DROP_TICKS_CNT)
286
287#define S_NUM_PKTS_DROPPED 0
288#define V_NUM_PKTS_DROPPED(x) ((x) << S_NUM_PKTS_DROPPED)
289
290/* CSPI registers */
291
292#define S_DIP4ERR 0
293#define V_DIP4ERR(x) ((x) << S_DIP4ERR)
294#define F_DIP4ERR V_DIP4ERR(1U)
295
296#define S_RXDROP 1
297#define V_RXDROP(x) ((x) << S_RXDROP)
298#define F_RXDROP V_RXDROP(1U)
299
300#define S_TXDROP 2
301#define V_TXDROP(x) ((x) << S_TXDROP)
302#define F_TXDROP V_TXDROP(1U)
303
304#define S_RXOVERFLOW 3
305#define V_RXOVERFLOW(x) ((x) << S_RXOVERFLOW)
306#define F_RXOVERFLOW V_RXOVERFLOW(1U)
307
308#define S_RAMPARITYERR 4
309#define V_RAMPARITYERR(x) ((x) << S_RAMPARITYERR)
310#define F_RAMPARITYERR V_RAMPARITYERR(1U)
311
312/* ESPI registers */
313
314#define A_ESPI_SCH_TOKEN0 0x880
315#define A_ESPI_SCH_TOKEN1 0x884
316#define A_ESPI_SCH_TOKEN2 0x888
317#define A_ESPI_SCH_TOKEN3 0x88c
318#define A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK 0x890
319#define A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK 0x894
320#define A_ESPI_CALENDAR_LENGTH 0x898
321#define A_PORT_CONFIG 0x89c
322
323#define S_RX_NPORTS 0
324#define V_RX_NPORTS(x) ((x) << S_RX_NPORTS)
325
326#define S_TX_NPORTS 8
327#define V_TX_NPORTS(x) ((x) << S_TX_NPORTS)
328
329#define A_ESPI_FIFO_STATUS_ENABLE 0x8a0
330
331#define S_RXSTATUSENABLE 0
332#define V_RXSTATUSENABLE(x) ((x) << S_RXSTATUSENABLE)
333#define F_RXSTATUSENABLE V_RXSTATUSENABLE(1U)
334
335#define S_INTEL1010MODE 4
336#define V_INTEL1010MODE(x) ((x) << S_INTEL1010MODE)
337#define F_INTEL1010MODE V_INTEL1010MODE(1U)
338
339#define A_ESPI_MAXBURST1_MAXBURST2 0x8a8
340#define A_ESPI_TRAIN 0x8ac
341#define A_ESPI_INTR_STATUS 0x8c8
342
343#define S_DIP2PARITYERR 5
344#define V_DIP2PARITYERR(x) ((x) << S_DIP2PARITYERR)
345#define F_DIP2PARITYERR V_DIP2PARITYERR(1U)
346
347#define A_ESPI_INTR_ENABLE 0x8cc
348#define A_RX_DROP_THRESHOLD 0x8d0
349#define A_ESPI_RX_RESET 0x8ec
350#define A_ESPI_MISC_CONTROL 0x8f0
351
352#define S_OUT_OF_SYNC_COUNT 0
353#define V_OUT_OF_SYNC_COUNT(x) ((x) << S_OUT_OF_SYNC_COUNT)
354
355#define S_DIP2_PARITY_ERR_THRES 5
356#define V_DIP2_PARITY_ERR_THRES(x) ((x) << S_DIP2_PARITY_ERR_THRES)
357
358#define S_DIP4_THRES 9
359#define V_DIP4_THRES(x) ((x) << S_DIP4_THRES)
360
361#define S_MONITORED_PORT_NUM 25
362#define V_MONITORED_PORT_NUM(x) ((x) << S_MONITORED_PORT_NUM)
363
364#define S_MONITORED_DIRECTION 27
365#define V_MONITORED_DIRECTION(x) ((x) << S_MONITORED_DIRECTION)
366#define F_MONITORED_DIRECTION V_MONITORED_DIRECTION(1U)
367
368#define S_MONITORED_INTERFACE 28
369#define V_MONITORED_INTERFACE(x) ((x) << S_MONITORED_INTERFACE)
370#define F_MONITORED_INTERFACE V_MONITORED_INTERFACE(1U)
371
372#define A_ESPI_DIP2_ERR_COUNT 0x8f4
373#define A_ESPI_CMD_ADDR 0x8f8
374
375#define S_WRITE_DATA 0
376#define V_WRITE_DATA(x) ((x) << S_WRITE_DATA)
377
378#define S_REGISTER_OFFSET 8
379#define V_REGISTER_OFFSET(x) ((x) << S_REGISTER_OFFSET)
380
381#define S_CHANNEL_ADDR 12
382#define V_CHANNEL_ADDR(x) ((x) << S_CHANNEL_ADDR)
383
384#define S_MODULE_ADDR 16
385#define V_MODULE_ADDR(x) ((x) << S_MODULE_ADDR)
386
387#define S_BUNDLE_ADDR 20
388#define V_BUNDLE_ADDR(x) ((x) << S_BUNDLE_ADDR)
389
390#define S_SPI4_COMMAND 24
391#define V_SPI4_COMMAND(x) ((x) << S_SPI4_COMMAND)
392
393#define A_ESPI_GOSTAT 0x8fc
394#define S_ESPI_CMD_BUSY 8
395#define V_ESPI_CMD_BUSY(x) ((x) << S_ESPI_CMD_BUSY)
396#define F_ESPI_CMD_BUSY V_ESPI_CMD_BUSY(1U)
397
398/* PL registers */
399
400#define A_PL_ENABLE 0xa00
401
402#define S_PL_INTR_SGE_ERR 0
403#define V_PL_INTR_SGE_ERR(x) ((x) << S_PL_INTR_SGE_ERR)
404#define F_PL_INTR_SGE_ERR V_PL_INTR_SGE_ERR(1U)
405
406#define S_PL_INTR_SGE_DATA 1
407#define V_PL_INTR_SGE_DATA(x) ((x) << S_PL_INTR_SGE_DATA)
408#define F_PL_INTR_SGE_DATA V_PL_INTR_SGE_DATA(1U)
409
410#define S_PL_INTR_TP 6
411#define V_PL_INTR_TP(x) ((x) << S_PL_INTR_TP)
412#define F_PL_INTR_TP V_PL_INTR_TP(1U)
413
414#define S_PL_INTR_ESPI 8
415#define V_PL_INTR_ESPI(x) ((x) << S_PL_INTR_ESPI)
416#define F_PL_INTR_ESPI V_PL_INTR_ESPI(1U)
417
418#define S_PL_INTR_PCIX 10
419#define V_PL_INTR_PCIX(x) ((x) << S_PL_INTR_PCIX)
420#define F_PL_INTR_PCIX V_PL_INTR_PCIX(1U)
421
422#define S_PL_INTR_EXT 11
423#define V_PL_INTR_EXT(x) ((x) << S_PL_INTR_EXT)
424#define F_PL_INTR_EXT V_PL_INTR_EXT(1U)
425
426#define A_PL_CAUSE 0xa04
427
428/* MC5 registers */
429
430#define A_MC5_CONFIG 0xc04
431
432#define S_TCAM_RESET 1
433#define V_TCAM_RESET(x) ((x) << S_TCAM_RESET)
434#define F_TCAM_RESET V_TCAM_RESET(1U)
435
436#define S_M_BUS_ENABLE 5
437#define V_M_BUS_ENABLE(x) ((x) << S_M_BUS_ENABLE)
438#define F_M_BUS_ENABLE V_M_BUS_ENABLE(1U)
439
440/* PCICFG registers */
441
442#define A_PCICFG_PM_CSR 0x44
443#define A_PCICFG_VPD_ADDR 0x4a
444
445#define S_VPD_OP_FLAG 15
446#define V_VPD_OP_FLAG(x) ((x) << S_VPD_OP_FLAG)
447#define F_VPD_OP_FLAG V_VPD_OP_FLAG(1U)
448
449#define A_PCICFG_VPD_DATA 0x4c
450
451#define A_PCICFG_INTR_ENABLE 0xf4
452#define A_PCICFG_INTR_CAUSE 0xf8
453
454#define A_PCICFG_MODE 0xfc
455
456#define S_PCI_MODE_64BIT 0
457#define V_PCI_MODE_64BIT(x) ((x) << S_PCI_MODE_64BIT)
458#define F_PCI_MODE_64BIT V_PCI_MODE_64BIT(1U)
459
460#define S_PCI_MODE_PCIX 5
461#define V_PCI_MODE_PCIX(x) ((x) << S_PCI_MODE_PCIX)
462#define F_PCI_MODE_PCIX V_PCI_MODE_PCIX(1U)
463
464#define S_PCI_MODE_CLK 6
465#define M_PCI_MODE_CLK 0x3
466#define G_PCI_MODE_CLK(x) (((x) >> S_PCI_MODE_CLK) & M_PCI_MODE_CLK)
467
468#endif /* _CXGB_REGS_H_ */
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
new file mode 100644
index 000000000000..53b41d99b00b
--- /dev/null
+++ b/drivers/net/chelsio/sge.c
@@ -0,0 +1,1684 @@
1/*****************************************************************************
2 * *
3 * File: sge.c *
4 * $Revision: 1.26 $ *
5 * $Date: 2005/06/21 18:29:48 $ *
6 * Description: *
7 * DMA engine. *
8 * part of the Chelsio 10Gb Ethernet Driver. *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
13 * *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, write to the Free Software Foundation, Inc., *
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
17 * *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
21 * *
22 * http://www.chelsio.com *
23 * *
24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
25 * All rights reserved. *
26 * *
27 * Maintainers: maintainers@chelsio.com *
28 * *
29 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
30 * Tina Yang <tainay@chelsio.com> *
31 * Felix Marti <felix@chelsio.com> *
32 * Scott Bardone <sbardone@chelsio.com> *
33 * Kurt Ottaway <kottaway@chelsio.com> *
34 * Frank DiMambro <frank@chelsio.com> *
35 * *
36 * History: *
37 * *
38 ****************************************************************************/
39
40#include "common.h"
41
42#include <linux/config.h>
43#include <linux/types.h>
44#include <linux/errno.h>
45#include <linux/pci.h>
46#include <linux/netdevice.h>
47#include <linux/etherdevice.h>
48#include <linux/if_vlan.h>
49#include <linux/skbuff.h>
50#include <linux/init.h>
51#include <linux/mm.h>
52#include <linux/ip.h>
53#include <linux/in.h>
54#include <linux/if_arp.h>
55
56#include "cpl5_cmd.h"
57#include "sge.h"
58#include "regs.h"
59#include "espi.h"
60
61
62#ifdef NETIF_F_TSO
63#include <linux/tcp.h>
64#endif
65
66#define SGE_CMDQ_N 2
67#define SGE_FREELQ_N 2
68#define SGE_CMDQ0_E_N 1024
69#define SGE_CMDQ1_E_N 128
70#define SGE_FREEL_SIZE 4096
71#define SGE_JUMBO_FREEL_SIZE 512
72#define SGE_FREEL_REFILL_THRESH 16
73#define SGE_RESPQ_E_N 1024
74#define SGE_INTRTIMER_NRES 1000
75#define SGE_RX_COPY_THRES 256
76#define SGE_RX_SM_BUF_SIZE 1536
77
78# define SGE_RX_DROP_THRES 2
79
80#define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4)
81
82/*
83 * Period of the TX buffer reclaim timer. This timer does not need to run
84 * frequently as TX buffers are usually reclaimed by new TX packets.
85 */
86#define TX_RECLAIM_PERIOD (HZ / 4)
87
88#ifndef NET_IP_ALIGN
89# define NET_IP_ALIGN 2
90#endif
91
92#define M_CMD_LEN 0x7fffffff
93#define V_CMD_LEN(v) (v)
94#define G_CMD_LEN(v) ((v) & M_CMD_LEN)
95#define V_CMD_GEN1(v) ((v) << 31)
96#define V_CMD_GEN2(v) (v)
97#define F_CMD_DATAVALID (1 << 1)
98#define F_CMD_SOP (1 << 2)
99#define V_CMD_EOP(v) ((v) << 3)
100
101/*
102 * Command queue, receive buffer list, and response queue descriptors.
103 */
104#if defined(__BIG_ENDIAN_BITFIELD)
105struct cmdQ_e {
106 u32 addr_lo;
107 u32 len_gen;
108 u32 flags;
109 u32 addr_hi;
110};
111
112struct freelQ_e {
113 u32 addr_lo;
114 u32 len_gen;
115 u32 gen2;
116 u32 addr_hi;
117};
118
119struct respQ_e {
120 u32 Qsleeping : 4;
121 u32 Cmdq1CreditReturn : 5;
122 u32 Cmdq1DmaComplete : 5;
123 u32 Cmdq0CreditReturn : 5;
124 u32 Cmdq0DmaComplete : 5;
125 u32 FreelistQid : 2;
126 u32 CreditValid : 1;
127 u32 DataValid : 1;
128 u32 Offload : 1;
129 u32 Eop : 1;
130 u32 Sop : 1;
131 u32 GenerationBit : 1;
132 u32 BufferLength;
133};
134#elif defined(__LITTLE_ENDIAN_BITFIELD)
135struct cmdQ_e {
136 u32 len_gen;
137 u32 addr_lo;
138 u32 addr_hi;
139 u32 flags;
140};
141
142struct freelQ_e {
143 u32 len_gen;
144 u32 addr_lo;
145 u32 addr_hi;
146 u32 gen2;
147};
148
149struct respQ_e {
150 u32 BufferLength;
151 u32 GenerationBit : 1;
152 u32 Sop : 1;
153 u32 Eop : 1;
154 u32 Offload : 1;
155 u32 DataValid : 1;
156 u32 CreditValid : 1;
157 u32 FreelistQid : 2;
158 u32 Cmdq0DmaComplete : 5;
159 u32 Cmdq0CreditReturn : 5;
160 u32 Cmdq1DmaComplete : 5;
161 u32 Cmdq1CreditReturn : 5;
162 u32 Qsleeping : 4;
163} ;
164#endif
165
166/*
167 * SW Context Command and Freelist Queue Descriptors
168 */
169struct cmdQ_ce {
170 struct sk_buff *skb;
171 DECLARE_PCI_UNMAP_ADDR(dma_addr);
172 DECLARE_PCI_UNMAP_LEN(dma_len);
173};
174
175struct freelQ_ce {
176 struct sk_buff *skb;
177 DECLARE_PCI_UNMAP_ADDR(dma_addr);
178 DECLARE_PCI_UNMAP_LEN(dma_len);
179};
180
181/*
182 * SW command, freelist and response rings
183 */
184struct cmdQ {
185 unsigned long status; /* HW DMA fetch status */
186 unsigned int in_use; /* # of in-use command descriptors */
187 unsigned int size; /* # of descriptors */
188 unsigned int processed; /* total # of descs HW has processed */
189 unsigned int cleaned; /* total # of descs SW has reclaimed */
190 unsigned int stop_thres; /* SW TX queue suspend threshold */
191 u16 pidx; /* producer index (SW) */
192 u16 cidx; /* consumer index (HW) */
193 u8 genbit; /* current generation (=valid) bit */
194 u8 sop; /* is next entry start of packet? */
195 struct cmdQ_e *entries; /* HW command descriptor Q */
196 struct cmdQ_ce *centries; /* SW command context descriptor Q */
197 spinlock_t lock; /* Lock to protect cmdQ enqueuing */
198 dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */
199};
200
201struct freelQ {
202 unsigned int credits; /* # of available RX buffers */
203 unsigned int size; /* free list capacity */
204 u16 pidx; /* producer index (SW) */
205 u16 cidx; /* consumer index (HW) */
206 u16 rx_buffer_size; /* Buffer size on this free list */
207 u16 dma_offset; /* DMA offset to align IP headers */
208 u16 recycleq_idx; /* skb recycle q to use */
209 u8 genbit; /* current generation (=valid) bit */
210 struct freelQ_e *entries; /* HW freelist descriptor Q */
211 struct freelQ_ce *centries; /* SW freelist context descriptor Q */
212 dma_addr_t dma_addr; /* DMA addr HW freelist descriptor Q */
213};
214
215struct respQ {
216 unsigned int credits; /* credits to be returned to SGE */
217 unsigned int size; /* # of response Q descriptors */
218 u16 cidx; /* consumer index (SW) */
219 u8 genbit; /* current generation(=valid) bit */
220 struct respQ_e *entries; /* HW response descriptor Q */
221 dma_addr_t dma_addr; /* DMA addr HW response descriptor Q */
222};
223
224/* Bit flags for cmdQ.status */
225enum {
226 CMDQ_STAT_RUNNING = 1, /* fetch engine is running */
227 CMDQ_STAT_LAST_PKT_DB = 2 /* last packet rung the doorbell */
228};
229
230/*
231 * Main SGE data structure
232 *
233 * Interrupts are handled by a single CPU and it is likely that on a MP system
234 * the application is migrated to another CPU. In that scenario, we try to
235 * seperate the RX(in irq context) and TX state in order to decrease memory
236 * contention.
237 */
238struct sge {
239 struct adapter *adapter; /* adapter backpointer */
240 struct net_device *netdev; /* netdevice backpointer */
241 struct freelQ freelQ[SGE_FREELQ_N]; /* buffer free lists */
242 struct respQ respQ; /* response Q */
243 unsigned long stopped_tx_queues; /* bitmap of suspended Tx queues */
244 unsigned int rx_pkt_pad; /* RX padding for L2 packets */
245 unsigned int jumbo_fl; /* jumbo freelist Q index */
246 unsigned int intrtimer_nres; /* no-resource interrupt timer */
247 unsigned int fixed_intrtimer;/* non-adaptive interrupt timer */
248 struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
249 struct timer_list espibug_timer;
250 unsigned int espibug_timeout;
251 struct sk_buff *espibug_skb;
252 u32 sge_control; /* shadow value of sge control reg */
253 struct sge_intr_counts stats;
254 struct sge_port_stats port_stats[MAX_NPORTS];
255 struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp;
256};
257
258/*
259 * PIO to indicate that memory mapped Q contains valid descriptor(s).
260 */
261static inline void doorbell_pio(struct adapter *adapter, u32 val)
262{
263 wmb();
264 writel(val, adapter->regs + A_SG_DOORBELL);
265}
266
267/*
268 * Frees all RX buffers on the freelist Q. The caller must make sure that
269 * the SGE is turned off before calling this function.
270 */
271static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q)
272{
273 unsigned int cidx = q->cidx;
274
275 while (q->credits--) {
276 struct freelQ_ce *ce = &q->centries[cidx];
277
278 pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
279 pci_unmap_len(ce, dma_len),
280 PCI_DMA_FROMDEVICE);
281 dev_kfree_skb(ce->skb);
282 ce->skb = NULL;
283 if (++cidx == q->size)
284 cidx = 0;
285 }
286}
287
288/*
289 * Free RX free list and response queue resources.
290 */
291static void free_rx_resources(struct sge *sge)
292{
293 struct pci_dev *pdev = sge->adapter->pdev;
294 unsigned int size, i;
295
296 if (sge->respQ.entries) {
297 size = sizeof(struct respQ_e) * sge->respQ.size;
298 pci_free_consistent(pdev, size, sge->respQ.entries,
299 sge->respQ.dma_addr);
300 }
301
302 for (i = 0; i < SGE_FREELQ_N; i++) {
303 struct freelQ *q = &sge->freelQ[i];
304
305 if (q->centries) {
306 free_freelQ_buffers(pdev, q);
307 kfree(q->centries);
308 }
309 if (q->entries) {
310 size = sizeof(struct freelQ_e) * q->size;
311 pci_free_consistent(pdev, size, q->entries,
312 q->dma_addr);
313 }
314 }
315}
316
317/*
318 * Allocates basic RX resources, consisting of memory mapped freelist Qs and a
319 * response queue.
320 */
321static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
322{
323 struct pci_dev *pdev = sge->adapter->pdev;
324 unsigned int size, i;
325
326 for (i = 0; i < SGE_FREELQ_N; i++) {
327 struct freelQ *q = &sge->freelQ[i];
328
329 q->genbit = 1;
330 q->size = p->freelQ_size[i];
331 q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN;
332 size = sizeof(struct freelQ_e) * q->size;
333 q->entries = (struct freelQ_e *)
334 pci_alloc_consistent(pdev, size, &q->dma_addr);
335 if (!q->entries)
336 goto err_no_mem;
337 memset(q->entries, 0, size);
338 size = sizeof(struct freelQ_ce) * q->size;
339 q->centries = kmalloc(size, GFP_KERNEL);
340 if (!q->centries)
341 goto err_no_mem;
342 memset(q->centries, 0, size);
343 }
344
345 /*
346 * Calculate the buffer sizes for the two free lists. FL0 accommodates
347 * regular sized Ethernet frames, FL1 is sized not to exceed 16K,
348 * including all the sk_buff overhead.
349 *
350 * Note: For T2 FL0 and FL1 are reversed.
351 */
352 sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE +
353 sizeof(struct cpl_rx_data) +
354 sge->freelQ[!sge->jumbo_fl].dma_offset;
355 sge->freelQ[sge->jumbo_fl].rx_buffer_size = (16 * 1024) -
356 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
357
358 /*
359 * Setup which skb recycle Q should be used when recycling buffers from
360 * each free list.
361 */
362 sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0;
363 sge->freelQ[sge->jumbo_fl].recycleq_idx = 1;
364
365 sge->respQ.genbit = 1;
366 sge->respQ.size = SGE_RESPQ_E_N;
367 sge->respQ.credits = 0;
368 size = sizeof(struct respQ_e) * sge->respQ.size;
369 sge->respQ.entries = (struct respQ_e *)
370 pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr);
371 if (!sge->respQ.entries)
372 goto err_no_mem;
373 memset(sge->respQ.entries, 0, size);
374 return 0;
375
376err_no_mem:
377 free_rx_resources(sge);
378 return -ENOMEM;
379}
380
381/*
382 * Reclaims n TX descriptors and frees the buffers associated with them.
383 */
384static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
385{
386 struct cmdQ_ce *ce;
387 struct pci_dev *pdev = sge->adapter->pdev;
388 unsigned int cidx = q->cidx;
389
390 q->in_use -= n;
391 ce = &q->centries[cidx];
392 while (n--) {
393 if (q->sop)
394 pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
395 pci_unmap_len(ce, dma_len),
396 PCI_DMA_TODEVICE);
397 else
398 pci_unmap_page(pdev, pci_unmap_addr(ce, dma_addr),
399 pci_unmap_len(ce, dma_len),
400 PCI_DMA_TODEVICE);
401 q->sop = 0;
402 if (ce->skb) {
403 dev_kfree_skb(ce->skb);
404 q->sop = 1;
405 }
406 ce++;
407 if (++cidx == q->size) {
408 cidx = 0;
409 ce = q->centries;
410 }
411 }
412 q->cidx = cidx;
413}
414
415/*
416 * Free TX resources.
417 *
418 * Assumes that SGE is stopped and all interrupts are disabled.
419 */
420static void free_tx_resources(struct sge *sge)
421{
422 struct pci_dev *pdev = sge->adapter->pdev;
423 unsigned int size, i;
424
425 for (i = 0; i < SGE_CMDQ_N; i++) {
426 struct cmdQ *q = &sge->cmdQ[i];
427
428 if (q->centries) {
429 if (q->in_use)
430 free_cmdQ_buffers(sge, q, q->in_use);
431 kfree(q->centries);
432 }
433 if (q->entries) {
434 size = sizeof(struct cmdQ_e) * q->size;
435 pci_free_consistent(pdev, size, q->entries,
436 q->dma_addr);
437 }
438 }
439}
440
441/*
442 * Allocates basic TX resources, consisting of memory mapped command Qs.
443 */
444static int alloc_tx_resources(struct sge *sge, struct sge_params *p)
445{
446 struct pci_dev *pdev = sge->adapter->pdev;
447 unsigned int size, i;
448
449 for (i = 0; i < SGE_CMDQ_N; i++) {
450 struct cmdQ *q = &sge->cmdQ[i];
451
452 q->genbit = 1;
453 q->sop = 1;
454 q->size = p->cmdQ_size[i];
455 q->in_use = 0;
456 q->status = 0;
457 q->processed = q->cleaned = 0;
458 q->stop_thres = 0;
459 spin_lock_init(&q->lock);
460 size = sizeof(struct cmdQ_e) * q->size;
461 q->entries = (struct cmdQ_e *)
462 pci_alloc_consistent(pdev, size, &q->dma_addr);
463 if (!q->entries)
464 goto err_no_mem;
465 memset(q->entries, 0, size);
466 size = sizeof(struct cmdQ_ce) * q->size;
467 q->centries = kmalloc(size, GFP_KERNEL);
468 if (!q->centries)
469 goto err_no_mem;
470 memset(q->centries, 0, size);
471 }
472
473 /*
474 * CommandQ 0 handles Ethernet and TOE packets, while queue 1 is TOE
475 * only. For queue 0 set the stop threshold so we can handle one more
476 * packet from each port, plus reserve an additional 24 entries for
477 * Ethernet packets only. Queue 1 never suspends nor do we reserve
478 * space for Ethernet packets.
479 */
480 sge->cmdQ[0].stop_thres = sge->adapter->params.nports *
481 (MAX_SKB_FRAGS + 1);
482 return 0;
483
484err_no_mem:
485 free_tx_resources(sge);
486 return -ENOMEM;
487}
488
489static inline void setup_ring_params(struct adapter *adapter, u64 addr,
490 u32 size, int base_reg_lo,
491 int base_reg_hi, int size_reg)
492{
493 writel((u32)addr, adapter->regs + base_reg_lo);
494 writel(addr >> 32, adapter->regs + base_reg_hi);
495 writel(size, adapter->regs + size_reg);
496}
497
498/*
499 * Enable/disable VLAN acceleration.
500 */
501void t1_set_vlan_accel(struct adapter *adapter, int on_off)
502{
503 struct sge *sge = adapter->sge;
504
505 sge->sge_control &= ~F_VLAN_XTRACT;
506 if (on_off)
507 sge->sge_control |= F_VLAN_XTRACT;
508 if (adapter->open_device_map) {
509 writel(sge->sge_control, adapter->regs + A_SG_CONTROL);
510 readl(adapter->regs + A_SG_CONTROL); /* flush */
511 }
512}
513
514/*
515 * Programs the various SGE registers. However, the engine is not yet enabled,
516 * but sge->sge_control is setup and ready to go.
517 */
518static void configure_sge(struct sge *sge, struct sge_params *p)
519{
520 struct adapter *ap = sge->adapter;
521
522 writel(0, ap->regs + A_SG_CONTROL);
523 setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size,
524 A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE);
525 setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size,
526 A_SG_CMD1BASELWR, A_SG_CMD1BASEUPR, A_SG_CMD1SIZE);
527 setup_ring_params(ap, sge->freelQ[0].dma_addr,
528 sge->freelQ[0].size, A_SG_FL0BASELWR,
529 A_SG_FL0BASEUPR, A_SG_FL0SIZE);
530 setup_ring_params(ap, sge->freelQ[1].dma_addr,
531 sge->freelQ[1].size, A_SG_FL1BASELWR,
532 A_SG_FL1BASEUPR, A_SG_FL1SIZE);
533
534 /* The threshold comparison uses <. */
535 writel(SGE_RX_SM_BUF_SIZE + 1, ap->regs + A_SG_FLTHRESHOLD);
536
537 setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size,
538 A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE);
539 writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT);
540
541 sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE |
542 F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE |
543 V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE |
544 F_DISABLE_FL0_GTS | F_DISABLE_FL1_GTS |
545 V_RX_PKT_OFFSET(sge->rx_pkt_pad);
546
547#if defined(__BIG_ENDIAN_BITFIELD)
548 sge->sge_control |= F_ENABLE_BIG_ENDIAN;
549#endif
550
551 /* Initialize no-resource timer */
552 sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap);
553
554 t1_sge_set_coalesce_params(sge, p);
555}
556
557/*
558 * Return the payload capacity of the jumbo free-list buffers.
559 */
560static inline unsigned int jumbo_payload_capacity(const struct sge *sge)
561{
562 return sge->freelQ[sge->jumbo_fl].rx_buffer_size -
563 sge->freelQ[sge->jumbo_fl].dma_offset -
564 sizeof(struct cpl_rx_data);
565}
566
567/*
568 * Frees all SGE related resources and the sge structure itself
569 */
570void t1_sge_destroy(struct sge *sge)
571{
572 if (sge->espibug_skb)
573 kfree_skb(sge->espibug_skb);
574
575 free_tx_resources(sge);
576 free_rx_resources(sge);
577 kfree(sge);
578}
579
580/*
581 * Allocates new RX buffers on the freelist Q (and tracks them on the freelist
582 * context Q) until the Q is full or alloc_skb fails.
583 *
584 * It is possible that the generation bits already match, indicating that the
585 * buffer is already valid and nothing needs to be done. This happens when we
586 * copied a received buffer into a new sk_buff during the interrupt processing.
587 *
588 * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad),
589 * we specify a RX_OFFSET in order to make sure that the IP header is 4B
590 * aligned.
591 */
592static void refill_free_list(struct sge *sge, struct freelQ *q)
593{
594 struct pci_dev *pdev = sge->adapter->pdev;
595 struct freelQ_ce *ce = &q->centries[q->pidx];
596 struct freelQ_e *e = &q->entries[q->pidx];
597 unsigned int dma_len = q->rx_buffer_size - q->dma_offset;
598
599
600 while (q->credits < q->size) {
601 struct sk_buff *skb;
602 dma_addr_t mapping;
603
604 skb = alloc_skb(q->rx_buffer_size, GFP_ATOMIC);
605 if (!skb)
606 break;
607
608 skb_reserve(skb, q->dma_offset);
609 mapping = pci_map_single(pdev, skb->data, dma_len,
610 PCI_DMA_FROMDEVICE);
611 ce->skb = skb;
612 pci_unmap_addr_set(ce, dma_addr, mapping);
613 pci_unmap_len_set(ce, dma_len, dma_len);
614 e->addr_lo = (u32)mapping;
615 e->addr_hi = (u64)mapping >> 32;
616 e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit);
617 wmb();
618 e->gen2 = V_CMD_GEN2(q->genbit);
619
620 e++;
621 ce++;
622 if (++q->pidx == q->size) {
623 q->pidx = 0;
624 q->genbit ^= 1;
625 ce = q->centries;
626 e = q->entries;
627 }
628 q->credits++;
629 }
630
631}
632
633/*
634 * Calls refill_free_list for both free lists. If we cannot fill at least 1/4
635 * of both rings, we go into 'few interrupt mode' in order to give the system
636 * time to free up resources.
637 */
638static void freelQs_empty(struct sge *sge)
639{
640 struct adapter *adapter = sge->adapter;
641 u32 irq_reg = readl(adapter->regs + A_SG_INT_ENABLE);
642 u32 irqholdoff_reg;
643
644 refill_free_list(sge, &sge->freelQ[0]);
645 refill_free_list(sge, &sge->freelQ[1]);
646
647 if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) &&
648 sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) {
649 irq_reg |= F_FL_EXHAUSTED;
650 irqholdoff_reg = sge->fixed_intrtimer;
651 } else {
652 /* Clear the F_FL_EXHAUSTED interrupts for now */
653 irq_reg &= ~F_FL_EXHAUSTED;
654 irqholdoff_reg = sge->intrtimer_nres;
655 }
656 writel(irqholdoff_reg, adapter->regs + A_SG_INTRTIMER);
657 writel(irq_reg, adapter->regs + A_SG_INT_ENABLE);
658
659 /* We reenable the Qs to force a freelist GTS interrupt later */
660 doorbell_pio(adapter, F_FL0_ENABLE | F_FL1_ENABLE);
661}
662
663#define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA)
664#define SGE_INT_FATAL (F_RESPQ_OVERFLOW | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
665#define SGE_INT_ENABLE (F_RESPQ_EXHAUSTED | F_RESPQ_OVERFLOW | \
666 F_FL_EXHAUSTED | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
667
668/*
669 * Disable SGE Interrupts
670 */
671void t1_sge_intr_disable(struct sge *sge)
672{
673 u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
674
675 writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
676 writel(0, sge->adapter->regs + A_SG_INT_ENABLE);
677}
678
679/*
680 * Enable SGE interrupts.
681 */
682void t1_sge_intr_enable(struct sge *sge)
683{
684 u32 en = SGE_INT_ENABLE;
685 u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
686
687 if (sge->adapter->flags & TSO_CAPABLE)
688 en &= ~F_PACKET_TOO_BIG;
689 writel(en, sge->adapter->regs + A_SG_INT_ENABLE);
690 writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
691}
692
693/*
694 * Clear SGE interrupts.
695 */
696void t1_sge_intr_clear(struct sge *sge)
697{
698 writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE);
699 writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE);
700}
701
702/*
703 * SGE 'Error' interrupt handler
704 */
705int t1_sge_intr_error_handler(struct sge *sge)
706{
707 struct adapter *adapter = sge->adapter;
708 u32 cause = readl(adapter->regs + A_SG_INT_CAUSE);
709
710 if (adapter->flags & TSO_CAPABLE)
711 cause &= ~F_PACKET_TOO_BIG;
712 if (cause & F_RESPQ_EXHAUSTED)
713 sge->stats.respQ_empty++;
714 if (cause & F_RESPQ_OVERFLOW) {
715 sge->stats.respQ_overflow++;
716 CH_ALERT("%s: SGE response queue overflow\n",
717 adapter->name);
718 }
719 if (cause & F_FL_EXHAUSTED) {
720 sge->stats.freelistQ_empty++;
721 freelQs_empty(sge);
722 }
723 if (cause & F_PACKET_TOO_BIG) {
724 sge->stats.pkt_too_big++;
725 CH_ALERT("%s: SGE max packet size exceeded\n",
726 adapter->name);
727 }
728 if (cause & F_PACKET_MISMATCH) {
729 sge->stats.pkt_mismatch++;
730 CH_ALERT("%s: SGE packet mismatch\n", adapter->name);
731 }
732 if (cause & SGE_INT_FATAL)
733 t1_fatal_err(adapter);
734
735 writel(cause, adapter->regs + A_SG_INT_CAUSE);
736 return 0;
737}
738
739const struct sge_intr_counts *t1_sge_get_intr_counts(struct sge *sge)
740{
741 return &sge->stats;
742}
743
744const struct sge_port_stats *t1_sge_get_port_stats(struct sge *sge, int port)
745{
746 return &sge->port_stats[port];
747}
748
749/**
750 * recycle_fl_buf - recycle a free list buffer
751 * @fl: the free list
752 * @idx: index of buffer to recycle
753 *
754 * Recycles the specified buffer on the given free list by adding it at
755 * the next available slot on the list.
756 */
757static void recycle_fl_buf(struct freelQ *fl, int idx)
758{
759 struct freelQ_e *from = &fl->entries[idx];
760 struct freelQ_e *to = &fl->entries[fl->pidx];
761
762 fl->centries[fl->pidx] = fl->centries[idx];
763 to->addr_lo = from->addr_lo;
764 to->addr_hi = from->addr_hi;
765 to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit);
766 wmb();
767 to->gen2 = V_CMD_GEN2(fl->genbit);
768 fl->credits++;
769
770 if (++fl->pidx == fl->size) {
771 fl->pidx = 0;
772 fl->genbit ^= 1;
773 }
774}
775
776/**
777 * get_packet - return the next ingress packet buffer
778 * @pdev: the PCI device that received the packet
779 * @fl: the SGE free list holding the packet
780 * @len: the actual packet length, excluding any SGE padding
781 * @dma_pad: padding at beginning of buffer left by SGE DMA
782 * @skb_pad: padding to be used if the packet is copied
783 * @copy_thres: length threshold under which a packet should be copied
784 * @drop_thres: # of remaining buffers before we start dropping packets
785 *
786 * Get the next packet from a free list and complete setup of the
787 * sk_buff. If the packet is small we make a copy and recycle the
788 * original buffer, otherwise we use the original buffer itself. If a
789 * positive drop threshold is supplied packets are dropped and their
790 * buffers recycled if (a) the number of remaining buffers is under the
791 * threshold and the packet is too big to copy, or (b) the packet should
792 * be copied but there is no memory for the copy.
793 */
794static inline struct sk_buff *get_packet(struct pci_dev *pdev,
795 struct freelQ *fl, unsigned int len,
796 int dma_pad, int skb_pad,
797 unsigned int copy_thres,
798 unsigned int drop_thres)
799{
800 struct sk_buff *skb;
801 struct freelQ_ce *ce = &fl->centries[fl->cidx];
802
803 if (len < copy_thres) {
804 skb = alloc_skb(len + skb_pad, GFP_ATOMIC);
805 if (likely(skb != NULL)) {
806 skb_reserve(skb, skb_pad);
807 skb_put(skb, len);
808 pci_dma_sync_single_for_cpu(pdev,
809 pci_unmap_addr(ce, dma_addr),
810 pci_unmap_len(ce, dma_len),
811 PCI_DMA_FROMDEVICE);
812 memcpy(skb->data, ce->skb->data + dma_pad, len);
813 pci_dma_sync_single_for_device(pdev,
814 pci_unmap_addr(ce, dma_addr),
815 pci_unmap_len(ce, dma_len),
816 PCI_DMA_FROMDEVICE);
817 } else if (!drop_thres)
818 goto use_orig_buf;
819
820 recycle_fl_buf(fl, fl->cidx);
821 return skb;
822 }
823
824 if (fl->credits < drop_thres) {
825 recycle_fl_buf(fl, fl->cidx);
826 return NULL;
827 }
828
829use_orig_buf:
830 pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
831 pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
832 skb = ce->skb;
833 skb_reserve(skb, dma_pad);
834 skb_put(skb, len);
835 return skb;
836}
837
838/**
839 * unexpected_offload - handle an unexpected offload packet
840 * @adapter: the adapter
841 * @fl: the free list that received the packet
842 *
843 * Called when we receive an unexpected offload packet (e.g., the TOE
844 * function is disabled or the card is a NIC). Prints a message and
845 * recycles the buffer.
846 */
847static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
848{
849 struct freelQ_ce *ce = &fl->centries[fl->cidx];
850 struct sk_buff *skb = ce->skb;
851
852 pci_dma_sync_single_for_cpu(adapter->pdev, pci_unmap_addr(ce, dma_addr),
853 pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
854 CH_ERR("%s: unexpected offload packet, cmd %u\n",
855 adapter->name, *skb->data);
856 recycle_fl_buf(fl, fl->cidx);
857}
858
859/*
860 * Write the command descriptors to transmit the given skb starting at
861 * descriptor pidx with the given generation.
862 */
863static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
864 unsigned int pidx, unsigned int gen,
865 struct cmdQ *q)
866{
867 dma_addr_t mapping;
868 struct cmdQ_e *e, *e1;
869 struct cmdQ_ce *ce;
870 unsigned int i, flags, nfrags = skb_shinfo(skb)->nr_frags;
871
872 mapping = pci_map_single(adapter->pdev, skb->data,
873 skb->len - skb->data_len, PCI_DMA_TODEVICE);
874 ce = &q->centries[pidx];
875 ce->skb = NULL;
876 pci_unmap_addr_set(ce, dma_addr, mapping);
877 pci_unmap_len_set(ce, dma_len, skb->len - skb->data_len);
878
879 flags = F_CMD_DATAVALID | F_CMD_SOP | V_CMD_EOP(nfrags == 0) |
880 V_CMD_GEN2(gen);
881 e = &q->entries[pidx];
882 e->addr_lo = (u32)mapping;
883 e->addr_hi = (u64)mapping >> 32;
884 e->len_gen = V_CMD_LEN(skb->len - skb->data_len) | V_CMD_GEN1(gen);
885 for (e1 = e, i = 0; nfrags--; i++) {
886 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
887
888 ce++;
889 e1++;
890 if (++pidx == q->size) {
891 pidx = 0;
892 gen ^= 1;
893 ce = q->centries;
894 e1 = q->entries;
895 }
896
897 mapping = pci_map_page(adapter->pdev, frag->page,
898 frag->page_offset, frag->size,
899 PCI_DMA_TODEVICE);
900 ce->skb = NULL;
901 pci_unmap_addr_set(ce, dma_addr, mapping);
902 pci_unmap_len_set(ce, dma_len, frag->size);
903
904 e1->addr_lo = (u32)mapping;
905 e1->addr_hi = (u64)mapping >> 32;
906 e1->len_gen = V_CMD_LEN(frag->size) | V_CMD_GEN1(gen);
907 e1->flags = F_CMD_DATAVALID | V_CMD_EOP(nfrags == 0) |
908 V_CMD_GEN2(gen);
909 }
910
911 ce->skb = skb;
912 wmb();
913 e->flags = flags;
914}
915
916/*
917 * Clean up completed Tx buffers.
918 */
919static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q)
920{
921 unsigned int reclaim = q->processed - q->cleaned;
922
923 if (reclaim) {
924 free_cmdQ_buffers(sge, q, reclaim);
925 q->cleaned += reclaim;
926 }
927}
928
929#ifndef SET_ETHTOOL_OPS
930# define __netif_rx_complete(dev) netif_rx_complete(dev)
931#endif
932
933/*
934 * We cannot use the standard netif_rx_schedule_prep() because we have multiple
935 * ports plus the TOE all multiplexing onto a single response queue, therefore
936 * accepting new responses cannot depend on the state of any particular port.
937 * So define our own equivalent that omits the netif_running() test.
938 */
939static inline int napi_schedule_prep(struct net_device *dev)
940{
941 return !test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state);
942}
943
944
945/**
946 * sge_rx - process an ingress ethernet packet
947 * @sge: the sge structure
948 * @fl: the free list that contains the packet buffer
949 * @len: the packet length
950 *
951 * Process an ingress ethernet pakcet and deliver it to the stack.
952 */
953static int sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
954{
955 struct sk_buff *skb;
956 struct cpl_rx_pkt *p;
957 struct adapter *adapter = sge->adapter;
958
959 sge->stats.ethernet_pkts++;
960 skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad,
961 sge->rx_pkt_pad, 2, SGE_RX_COPY_THRES,
962 SGE_RX_DROP_THRES);
963 if (!skb) {
964 sge->port_stats[0].rx_drops++; /* charge only port 0 for now */
965 return 0;
966 }
967
968 p = (struct cpl_rx_pkt *)skb->data;
969 skb_pull(skb, sizeof(*p));
970 skb->dev = adapter->port[p->iff].dev;
971 skb->dev->last_rx = jiffies;
972 skb->protocol = eth_type_trans(skb, skb->dev);
973 if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff &&
974 skb->protocol == htons(ETH_P_IP) &&
975 (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) {
976 sge->port_stats[p->iff].rx_cso_good++;
977 skb->ip_summed = CHECKSUM_UNNECESSARY;
978 } else
979 skb->ip_summed = CHECKSUM_NONE;
980
981 if (unlikely(adapter->vlan_grp && p->vlan_valid)) {
982 sge->port_stats[p->iff].vlan_xtract++;
983 if (adapter->params.sge.polling)
984 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
985 ntohs(p->vlan));
986 else
987 vlan_hwaccel_rx(skb, adapter->vlan_grp,
988 ntohs(p->vlan));
989 } else if (adapter->params.sge.polling)
990 netif_receive_skb(skb);
991 else
992 netif_rx(skb);
993 return 0;
994}
995
996/*
997 * Returns true if a command queue has enough available descriptors that
998 * we can resume Tx operation after temporarily disabling its packet queue.
999 */
1000static inline int enough_free_Tx_descs(const struct cmdQ *q)
1001{
1002 unsigned int r = q->processed - q->cleaned;
1003
1004 return q->in_use - r < (q->size >> 1);
1005}
1006
1007/*
1008 * Called when sufficient space has become available in the SGE command queues
1009 * after the Tx packet schedulers have been suspended to restart the Tx path.
1010 */
1011static void restart_tx_queues(struct sge *sge)
1012{
1013 struct adapter *adap = sge->adapter;
1014
1015 if (enough_free_Tx_descs(&sge->cmdQ[0])) {
1016 int i;
1017
1018 for_each_port(adap, i) {
1019 struct net_device *nd = adap->port[i].dev;
1020
1021 if (test_and_clear_bit(nd->if_port,
1022 &sge->stopped_tx_queues) &&
1023 netif_running(nd)) {
1024 sge->stats.cmdQ_restarted[3]++;
1025 netif_wake_queue(nd);
1026 }
1027 }
1028 }
1029}
1030
1031/*
1032 * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0
1033 * information.
1034 */
1035static unsigned int update_tx_info(struct adapter *adapter,
1036 unsigned int flags,
1037 unsigned int pr0)
1038{
1039 struct sge *sge = adapter->sge;
1040 struct cmdQ *cmdq = &sge->cmdQ[0];
1041
1042 cmdq->processed += pr0;
1043
1044 if (flags & F_CMDQ0_ENABLE) {
1045 clear_bit(CMDQ_STAT_RUNNING, &cmdq->status);
1046
1047 if (cmdq->cleaned + cmdq->in_use != cmdq->processed &&
1048 !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) {
1049 set_bit(CMDQ_STAT_RUNNING, &cmdq->status);
1050 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1051 }
1052 flags &= ~F_CMDQ0_ENABLE;
1053 }
1054
1055 if (unlikely(sge->stopped_tx_queues != 0))
1056 restart_tx_queues(sge);
1057
1058 return flags;
1059}
1060
1061/*
1062 * Process SGE responses, up to the supplied budget. Returns the number of
1063 * responses processed. A negative budget is effectively unlimited.
1064 */
1065static int process_responses(struct adapter *adapter, int budget)
1066{
1067 struct sge *sge = adapter->sge;
1068 struct respQ *q = &sge->respQ;
1069 struct respQ_e *e = &q->entries[q->cidx];
1070 int budget_left = budget;
1071 unsigned int flags = 0;
1072 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
1073
1074
1075 while (likely(budget_left && e->GenerationBit == q->genbit)) {
1076 flags |= e->Qsleeping;
1077
1078 cmdq_processed[0] += e->Cmdq0CreditReturn;
1079 cmdq_processed[1] += e->Cmdq1CreditReturn;
1080
1081 /* We batch updates to the TX side to avoid cacheline
1082 * ping-pong of TX state information on MP where the sender
1083 * might run on a different CPU than this function...
1084 */
1085 if (unlikely(flags & F_CMDQ0_ENABLE || cmdq_processed[0] > 64)) {
1086 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1087 cmdq_processed[0] = 0;
1088 }
1089 if (unlikely(cmdq_processed[1] > 16)) {
1090 sge->cmdQ[1].processed += cmdq_processed[1];
1091 cmdq_processed[1] = 0;
1092 }
1093 if (likely(e->DataValid)) {
1094 struct freelQ *fl = &sge->freelQ[e->FreelistQid];
1095
1096 if (unlikely(!e->Sop || !e->Eop))
1097 BUG();
1098 if (unlikely(e->Offload))
1099 unexpected_offload(adapter, fl);
1100 else
1101 sge_rx(sge, fl, e->BufferLength);
1102
1103 /*
1104 * Note: this depends on each packet consuming a
1105 * single free-list buffer; cf. the BUG above.
1106 */
1107 if (++fl->cidx == fl->size)
1108 fl->cidx = 0;
1109 if (unlikely(--fl->credits <
1110 fl->size - SGE_FREEL_REFILL_THRESH))
1111 refill_free_list(sge, fl);
1112 } else
1113 sge->stats.pure_rsps++;
1114
1115 e++;
1116 if (unlikely(++q->cidx == q->size)) {
1117 q->cidx = 0;
1118 q->genbit ^= 1;
1119 e = q->entries;
1120 }
1121 prefetch(e);
1122
1123 if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
1124 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
1125 q->credits = 0;
1126 }
1127 --budget_left;
1128 }
1129
1130 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1131 sge->cmdQ[1].processed += cmdq_processed[1];
1132
1133 budget -= budget_left;
1134 return budget;
1135}
1136
1137/*
1138 * A simpler version of process_responses() that handles only pure (i.e.,
1139 * non data-carrying) responses. Such respones are too light-weight to justify
1140 * calling a softirq when using NAPI, so we handle them specially in hard
1141 * interrupt context. The function is called with a pointer to a response,
1142 * which the caller must ensure is a valid pure response. Returns 1 if it
1143 * encounters a valid data-carrying response, 0 otherwise.
1144 */
1145static int process_pure_responses(struct adapter *adapter, struct respQ_e *e)
1146{
1147 struct sge *sge = adapter->sge;
1148 struct respQ *q = &sge->respQ;
1149 unsigned int flags = 0;
1150 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
1151
1152 do {
1153 flags |= e->Qsleeping;
1154
1155 cmdq_processed[0] += e->Cmdq0CreditReturn;
1156 cmdq_processed[1] += e->Cmdq1CreditReturn;
1157
1158 e++;
1159 if (unlikely(++q->cidx == q->size)) {
1160 q->cidx = 0;
1161 q->genbit ^= 1;
1162 e = q->entries;
1163 }
1164 prefetch(e);
1165
1166 if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
1167 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
1168 q->credits = 0;
1169 }
1170 sge->stats.pure_rsps++;
1171 } while (e->GenerationBit == q->genbit && !e->DataValid);
1172
1173 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1174 sge->cmdQ[1].processed += cmdq_processed[1];
1175
1176 return e->GenerationBit == q->genbit;
1177}
1178
1179/*
1180 * Handler for new data events when using NAPI. This does not need any locking
1181 * or protection from interrupts as data interrupts are off at this point and
1182 * other adapter interrupts do not interfere.
1183 */
1184static int t1_poll(struct net_device *dev, int *budget)
1185{
1186 struct adapter *adapter = dev->priv;
1187 int effective_budget = min(*budget, dev->quota);
1188
1189 int work_done = process_responses(adapter, effective_budget);
1190 *budget -= work_done;
1191 dev->quota -= work_done;
1192
1193 if (work_done >= effective_budget)
1194 return 1;
1195
1196 __netif_rx_complete(dev);
1197
1198 /*
1199 * Because we don't atomically flush the following write it is
1200 * possible that in very rare cases it can reach the device in a way
1201 * that races with a new response being written plus an error interrupt
1202 * causing the NAPI interrupt handler below to return unhandled status
1203 * to the OS. To protect against this would require flushing the write
1204 * and doing both the write and the flush with interrupts off. Way too
1205 * expensive and unjustifiable given the rarity of the race.
1206 */
1207 writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
1208 return 0;
1209}
1210
1211/*
1212 * Returns true if the device is already scheduled for polling.
1213 */
1214static inline int napi_is_scheduled(struct net_device *dev)
1215{
1216 return test_bit(__LINK_STATE_RX_SCHED, &dev->state);
1217}
1218
1219/*
1220 * NAPI version of the main interrupt handler.
1221 */
1222static irqreturn_t t1_interrupt_napi(int irq, void *data, struct pt_regs *regs)
1223{
1224 int handled;
1225 struct adapter *adapter = data;
1226 struct sge *sge = adapter->sge;
1227 struct respQ *q = &adapter->sge->respQ;
1228
1229 /*
1230 * Clear the SGE_DATA interrupt first thing. Normally the NAPI
1231 * handler has control of the response queue and the interrupt handler
1232 * can look at the queue reliably only once it knows NAPI is off.
1233 * We can't wait that long to clear the SGE_DATA interrupt because we
1234 * could race with t1_poll rearming the SGE interrupt, so we need to
1235 * clear the interrupt speculatively and really early on.
1236 */
1237 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
1238
1239 spin_lock(&adapter->async_lock);
1240 if (!napi_is_scheduled(sge->netdev)) {
1241 struct respQ_e *e = &q->entries[q->cidx];
1242
1243 if (e->GenerationBit == q->genbit) {
1244 if (e->DataValid ||
1245 process_pure_responses(adapter, e)) {
1246 if (likely(napi_schedule_prep(sge->netdev)))
1247 __netif_rx_schedule(sge->netdev);
1248 else
1249 printk(KERN_CRIT
1250 "NAPI schedule failure!\n");
1251 } else
1252 writel(q->cidx, adapter->regs + A_SG_SLEEPING);
1253 handled = 1;
1254 goto unlock;
1255 } else
1256 writel(q->cidx, adapter->regs + A_SG_SLEEPING);
1257 } else
1258 if (readl(adapter->regs + A_PL_CAUSE) & F_PL_INTR_SGE_DATA)
1259 printk(KERN_ERR "data interrupt while NAPI running\n");
1260
1261 handled = t1_slow_intr_handler(adapter);
1262 if (!handled)
1263 sge->stats.unhandled_irqs++;
1264 unlock:
1265 spin_unlock(&adapter->async_lock);
1266 return IRQ_RETVAL(handled != 0);
1267}
1268
1269/*
1270 * Main interrupt handler, optimized assuming that we took a 'DATA'
1271 * interrupt.
1272 *
1273 * 1. Clear the interrupt
1274 * 2. Loop while we find valid descriptors and process them; accumulate
1275 * information that can be processed after the loop
1276 * 3. Tell the SGE at which index we stopped processing descriptors
1277 * 4. Bookkeeping; free TX buffers, ring doorbell if there are any
1278 * outstanding TX buffers waiting, replenish RX buffers, potentially
1279 * reenable upper layers if they were turned off due to lack of TX
1280 * resources which are available again.
1281 * 5. If we took an interrupt, but no valid respQ descriptors was found we
1282 * let the slow_intr_handler run and do error handling.
1283 */
1284static irqreturn_t t1_interrupt(int irq, void *cookie, struct pt_regs *regs)
1285{
1286 int work_done;
1287 struct respQ_e *e;
1288 struct adapter *adapter = cookie;
1289 struct respQ *Q = &adapter->sge->respQ;
1290
1291 spin_lock(&adapter->async_lock);
1292 e = &Q->entries[Q->cidx];
1293 prefetch(e);
1294
1295 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
1296
1297 if (likely(e->GenerationBit == Q->genbit))
1298 work_done = process_responses(adapter, -1);
1299 else
1300 work_done = t1_slow_intr_handler(adapter);
1301
1302 /*
1303 * The unconditional clearing of the PL_CAUSE above may have raced
1304 * with DMA completion and the corresponding generation of a response
1305 * to cause us to miss the resulting data interrupt. The next write
1306 * is also unconditional to recover the missed interrupt and render
1307 * this race harmless.
1308 */
1309 writel(Q->cidx, adapter->regs + A_SG_SLEEPING);
1310
1311 if (!work_done)
1312 adapter->sge->stats.unhandled_irqs++;
1313 spin_unlock(&adapter->async_lock);
1314 return IRQ_RETVAL(work_done != 0);
1315}
1316
1317intr_handler_t t1_select_intr_handler(adapter_t *adapter)
1318{
1319 return adapter->params.sge.polling ? t1_interrupt_napi : t1_interrupt;
1320}
1321
1322/*
1323 * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it.
1324 *
1325 * The code figures out how many entries the sk_buff will require in the
1326 * cmdQ and updates the cmdQ data structure with the state once the enqueue
1327 * has complete. Then, it doesn't access the global structure anymore, but
1328 * uses the corresponding fields on the stack. In conjuction with a spinlock
1329 * around that code, we can make the function reentrant without holding the
1330 * lock when we actually enqueue (which might be expensive, especially on
1331 * architectures with IO MMUs).
1332 *
1333 * This runs with softirqs disabled.
1334 */
1335unsigned int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
1336 unsigned int qid, struct net_device *dev)
1337{
1338 struct sge *sge = adapter->sge;
1339 struct cmdQ *q = &sge->cmdQ[qid];
1340 unsigned int credits, pidx, genbit, count;
1341
1342 spin_lock(&q->lock);
1343 reclaim_completed_tx(sge, q);
1344
1345 pidx = q->pidx;
1346 credits = q->size - q->in_use;
1347 count = 1 + skb_shinfo(skb)->nr_frags;
1348
1349 { /* Ethernet packet */
1350 if (unlikely(credits < count)) {
1351 netif_stop_queue(dev);
1352 set_bit(dev->if_port, &sge->stopped_tx_queues);
1353 sge->stats.cmdQ_full[3]++;
1354 spin_unlock(&q->lock);
1355 CH_ERR("%s: Tx ring full while queue awake!\n",
1356 adapter->name);
1357 return 1;
1358 }
1359 if (unlikely(credits - count < q->stop_thres)) {
1360 sge->stats.cmdQ_full[3]++;
1361 netif_stop_queue(dev);
1362 set_bit(dev->if_port, &sge->stopped_tx_queues);
1363 }
1364 }
1365 q->in_use += count;
1366 genbit = q->genbit;
1367 q->pidx += count;
1368 if (q->pidx >= q->size) {
1369 q->pidx -= q->size;
1370 q->genbit ^= 1;
1371 }
1372 spin_unlock(&q->lock);
1373
1374 write_tx_descs(adapter, skb, pidx, genbit, q);
1375
1376 /*
1377 * We always ring the doorbell for cmdQ1. For cmdQ0, we only ring
1378 * the doorbell if the Q is asleep. There is a natural race, where
1379 * the hardware is going to sleep just after we checked, however,
1380 * then the interrupt handler will detect the outstanding TX packet
1381 * and ring the doorbell for us.
1382 */
1383 if (qid)
1384 doorbell_pio(adapter, F_CMDQ1_ENABLE);
1385 else {
1386 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1387 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
1388 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1389 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1390 }
1391 }
1392 return 0;
1393}
1394
1395#define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14))
1396
1397/*
1398 * eth_hdr_len - return the length of an Ethernet header
1399 * @data: pointer to the start of the Ethernet header
1400 *
1401 * Returns the length of an Ethernet header, including optional VLAN tag.
1402 */
1403static inline int eth_hdr_len(const void *data)
1404{
1405 const struct ethhdr *e = data;
1406
1407 return e->h_proto == htons(ETH_P_8021Q) ? VLAN_ETH_HLEN : ETH_HLEN;
1408}
1409
1410/*
1411 * Adds the CPL header to the sk_buff and passes it to t1_sge_tx.
1412 */
1413int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
1414{
1415 struct adapter *adapter = dev->priv;
1416 struct sge_port_stats *st = &adapter->sge->port_stats[dev->if_port];
1417 struct sge *sge = adapter->sge;
1418 struct cpl_tx_pkt *cpl;
1419
1420#ifdef NETIF_F_TSO
1421 if (skb_shinfo(skb)->tso_size) {
1422 int eth_type;
1423 struct cpl_tx_pkt_lso *hdr;
1424
1425 st->tso++;
1426
1427 eth_type = skb->nh.raw - skb->data == ETH_HLEN ?
1428 CPL_ETH_II : CPL_ETH_II_VLAN;
1429
1430 hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr));
1431 hdr->opcode = CPL_TX_PKT_LSO;
1432 hdr->ip_csum_dis = hdr->l4_csum_dis = 0;
1433 hdr->ip_hdr_words = skb->nh.iph->ihl;
1434 hdr->tcp_hdr_words = skb->h.th->doff;
1435 hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
1436 skb_shinfo(skb)->tso_size));
1437 hdr->len = htonl(skb->len - sizeof(*hdr));
1438 cpl = (struct cpl_tx_pkt *)hdr;
1439 sge->stats.tx_lso_pkts++;
1440 } else
1441#endif
1442 {
1443 /*
1444 * Packets shorter than ETH_HLEN can break the MAC, drop them
1445 * early. Also, we may get oversized packets because some
1446 * parts of the kernel don't handle our unusual hard_header_len
1447 * right, drop those too.
1448 */
1449 if (unlikely(skb->len < ETH_HLEN ||
1450 skb->len > dev->mtu + eth_hdr_len(skb->data))) {
1451 dev_kfree_skb_any(skb);
1452 return NET_XMIT_SUCCESS;
1453 }
1454
1455 /*
1456 * We are using a non-standard hard_header_len and some kernel
1457 * components, such as pktgen, do not handle it right.
1458 * Complain when this happens but try to fix things up.
1459 */
1460 if (unlikely(skb_headroom(skb) <
1461 dev->hard_header_len - ETH_HLEN)) {
1462 struct sk_buff *orig_skb = skb;
1463
1464 if (net_ratelimit())
1465 printk(KERN_ERR "%s: inadequate headroom in "
1466 "Tx packet\n", dev->name);
1467 skb = skb_realloc_headroom(skb, sizeof(*cpl));
1468 dev_kfree_skb_any(orig_skb);
1469 if (!skb)
1470 return -ENOMEM;
1471 }
1472
1473 if (!(adapter->flags & UDP_CSUM_CAPABLE) &&
1474 skb->ip_summed == CHECKSUM_HW &&
1475 skb->nh.iph->protocol == IPPROTO_UDP)
1476 if (unlikely(skb_checksum_help(skb, 0))) {
1477 dev_kfree_skb_any(skb);
1478 return -ENOMEM;
1479 }
1480
1481 /* Hmmm, assuming to catch the gratious arp... and we'll use
1482 * it to flush out stuck espi packets...
1483 */
1484 if (unlikely(!adapter->sge->espibug_skb)) {
1485 if (skb->protocol == htons(ETH_P_ARP) &&
1486 skb->nh.arph->ar_op == htons(ARPOP_REQUEST)) {
1487 adapter->sge->espibug_skb = skb;
1488 /* We want to re-use this skb later. We
1489 * simply bump the reference count and it
1490 * will not be freed...
1491 */
1492 skb = skb_get(skb);
1493 }
1494 }
1495
1496 cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl));
1497 cpl->opcode = CPL_TX_PKT;
1498 cpl->ip_csum_dis = 1; /* SW calculates IP csum */
1499 cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_HW ? 0 : 1;
1500 /* the length field isn't used so don't bother setting it */
1501
1502 st->tx_cso += (skb->ip_summed == CHECKSUM_HW);
1503 sge->stats.tx_do_cksum += (skb->ip_summed == CHECKSUM_HW);
1504 sge->stats.tx_reg_pkts++;
1505 }
1506 cpl->iff = dev->if_port;
1507
1508#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1509 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
1510 cpl->vlan_valid = 1;
1511 cpl->vlan = htons(vlan_tx_tag_get(skb));
1512 st->vlan_insert++;
1513 } else
1514#endif
1515 cpl->vlan_valid = 0;
1516
1517 dev->trans_start = jiffies;
1518 return t1_sge_tx(skb, adapter, 0, dev);
1519}
1520
1521/*
1522 * Callback for the Tx buffer reclaim timer. Runs with softirqs disabled.
1523 */
1524static void sge_tx_reclaim_cb(unsigned long data)
1525{
1526 int i;
1527 struct sge *sge = (struct sge *)data;
1528
1529 for (i = 0; i < SGE_CMDQ_N; ++i) {
1530 struct cmdQ *q = &sge->cmdQ[i];
1531
1532 if (!spin_trylock(&q->lock))
1533 continue;
1534
1535 reclaim_completed_tx(sge, q);
1536 if (i == 0 && q->in_use) /* flush pending credits */
1537 writel(F_CMDQ0_ENABLE,
1538 sge->adapter->regs + A_SG_DOORBELL);
1539
1540 spin_unlock(&q->lock);
1541 }
1542 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
1543}
1544
1545/*
1546 * Propagate changes of the SGE coalescing parameters to the HW.
1547 */
1548int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p)
1549{
1550 sge->netdev->poll = t1_poll;
1551 sge->fixed_intrtimer = p->rx_coalesce_usecs *
1552 core_ticks_per_usec(sge->adapter);
1553 writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER);
1554 return 0;
1555}
1556
1557/*
1558 * Allocates both RX and TX resources and configures the SGE. However,
1559 * the hardware is not enabled yet.
1560 */
1561int t1_sge_configure(struct sge *sge, struct sge_params *p)
1562{
1563 if (alloc_rx_resources(sge, p))
1564 return -ENOMEM;
1565 if (alloc_tx_resources(sge, p)) {
1566 free_rx_resources(sge);
1567 return -ENOMEM;
1568 }
1569 configure_sge(sge, p);
1570
1571 /*
1572 * Now that we have sized the free lists calculate the payload
1573 * capacity of the large buffers. Other parts of the driver use
1574 * this to set the max offload coalescing size so that RX packets
1575 * do not overflow our large buffers.
1576 */
1577 p->large_buf_capacity = jumbo_payload_capacity(sge);
1578 return 0;
1579}
1580
1581/*
1582 * Disables the DMA engine.
1583 */
1584void t1_sge_stop(struct sge *sge)
1585{
1586 writel(0, sge->adapter->regs + A_SG_CONTROL);
1587 (void) readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
1588 if (is_T2(sge->adapter))
1589 del_timer_sync(&sge->espibug_timer);
1590 del_timer_sync(&sge->tx_reclaim_timer);
1591}
1592
1593/*
1594 * Enables the DMA engine.
1595 */
1596void t1_sge_start(struct sge *sge)
1597{
1598 refill_free_list(sge, &sge->freelQ[0]);
1599 refill_free_list(sge, &sge->freelQ[1]);
1600
1601 writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL);
1602 doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE);
1603 (void) readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
1604
1605 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
1606
1607 if (is_T2(sge->adapter))
1608 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
1609}
1610
1611/*
1612 * Callback for the T2 ESPI 'stuck packet feature' workaorund
1613 */
1614static void espibug_workaround(void *data)
1615{
1616 struct adapter *adapter = (struct adapter *)data;
1617 struct sge *sge = adapter->sge;
1618
1619 if (netif_running(adapter->port[0].dev)) {
1620 struct sk_buff *skb = sge->espibug_skb;
1621
1622 u32 seop = t1_espi_get_mon(adapter, 0x930, 0);
1623
1624 if ((seop & 0xfff0fff) == 0xfff && skb) {
1625 if (!skb->cb[0]) {
1626 u8 ch_mac_addr[ETH_ALEN] =
1627 {0x0, 0x7, 0x43, 0x0, 0x0, 0x0};
1628 memcpy(skb->data + sizeof(struct cpl_tx_pkt),
1629 ch_mac_addr, ETH_ALEN);
1630 memcpy(skb->data + skb->len - 10, ch_mac_addr,
1631 ETH_ALEN);
1632 skb->cb[0] = 0xff;
1633 }
1634
1635 /* bump the reference count to avoid freeing of the
1636 * skb once the DMA has completed.
1637 */
1638 skb = skb_get(skb);
1639 t1_sge_tx(skb, adapter, 0, adapter->port[0].dev);
1640 }
1641 }
1642 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
1643}
1644
1645/*
1646 * Creates a t1_sge structure and returns suggested resource parameters.
1647 */
1648struct sge * __devinit t1_sge_create(struct adapter *adapter,
1649 struct sge_params *p)
1650{
1651 struct sge *sge = kmalloc(sizeof(*sge), GFP_KERNEL);
1652
1653 if (!sge)
1654 return NULL;
1655 memset(sge, 0, sizeof(*sge));
1656
1657 sge->adapter = adapter;
1658 sge->netdev = adapter->port[0].dev;
1659 sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2;
1660 sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
1661
1662 init_timer(&sge->tx_reclaim_timer);
1663 sge->tx_reclaim_timer.data = (unsigned long)sge;
1664 sge->tx_reclaim_timer.function = sge_tx_reclaim_cb;
1665
1666 if (is_T2(sge->adapter)) {
1667 init_timer(&sge->espibug_timer);
1668 sge->espibug_timer.function = (void *)&espibug_workaround;
1669 sge->espibug_timer.data = (unsigned long)sge->adapter;
1670 sge->espibug_timeout = 1;
1671 }
1672
1673
1674 p->cmdQ_size[0] = SGE_CMDQ0_E_N;
1675 p->cmdQ_size[1] = SGE_CMDQ1_E_N;
1676 p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE;
1677 p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE;
1678 p->rx_coalesce_usecs = 50;
1679 p->coalesce_enable = 0;
1680 p->sample_interval_usecs = 0;
1681 p->polling = 0;
1682
1683 return sge;
1684}
diff --git a/drivers/net/chelsio/sge.h b/drivers/net/chelsio/sge.h
new file mode 100644
index 000000000000..434b25586851
--- /dev/null
+++ b/drivers/net/chelsio/sge.h
@@ -0,0 +1,105 @@
1/*****************************************************************************
2 * *
3 * File: sge.h *
4 * $Revision: 1.11 $ *
5 * $Date: 2005/06/21 22:10:55 $ *
6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#ifndef _CXGB_SGE_H_
40#define _CXGB_SGE_H_
41
42#include <linux/types.h>
43#include <linux/interrupt.h>
44#include <asm/byteorder.h>
45
46#ifndef IRQ_RETVAL
47#define IRQ_RETVAL(x)
48typedef void irqreturn_t;
49#endif
50
51typedef irqreturn_t (*intr_handler_t)(int, void *, struct pt_regs *);
52
53struct sge_intr_counts {
54 unsigned int respQ_empty; /* # times respQ empty */
55 unsigned int respQ_overflow; /* # respQ overflow (fatal) */
56 unsigned int freelistQ_empty; /* # times freelist empty */
57 unsigned int pkt_too_big; /* packet too large (fatal) */
58 unsigned int pkt_mismatch;
59 unsigned int cmdQ_full[3]; /* not HW IRQ, host cmdQ[] full */
60 unsigned int cmdQ_restarted[3];/* # of times cmdQ X was restarted */
61 unsigned int ethernet_pkts; /* # of Ethernet packets received */
62 unsigned int offload_pkts; /* # of offload packets received */
63 unsigned int offload_bundles; /* # of offload pkt bundles delivered */
64 unsigned int pure_rsps; /* # of non-payload responses */
65 unsigned int unhandled_irqs; /* # of unhandled interrupts */
66 unsigned int tx_ipfrags;
67 unsigned int tx_reg_pkts;
68 unsigned int tx_lso_pkts;
69 unsigned int tx_do_cksum;
70};
71
72struct sge_port_stats {
73 unsigned long rx_cso_good; /* # of successful RX csum offloads */
74 unsigned long tx_cso; /* # of TX checksum offloads */
75 unsigned long vlan_xtract; /* # of VLAN tag extractions */
76 unsigned long vlan_insert; /* # of VLAN tag extractions */
77 unsigned long tso; /* # of TSO requests */
78 unsigned long rx_drops; /* # of packets dropped due to no mem */
79};
80
81struct sk_buff;
82struct net_device;
83struct adapter;
84struct sge_params;
85struct sge;
86
87struct sge *t1_sge_create(struct adapter *, struct sge_params *);
88int t1_sge_configure(struct sge *, struct sge_params *);
89int t1_sge_set_coalesce_params(struct sge *, struct sge_params *);
90void t1_sge_destroy(struct sge *);
91intr_handler_t t1_select_intr_handler(adapter_t *adapter);
92unsigned int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
93 unsigned int qid, struct net_device *netdev);
94int t1_start_xmit(struct sk_buff *skb, struct net_device *dev);
95void t1_set_vlan_accel(struct adapter *adapter, int on_off);
96void t1_sge_start(struct sge *);
97void t1_sge_stop(struct sge *);
98int t1_sge_intr_error_handler(struct sge *);
99void t1_sge_intr_enable(struct sge *);
100void t1_sge_intr_disable(struct sge *);
101void t1_sge_intr_clear(struct sge *);
102const struct sge_intr_counts *t1_sge_get_intr_counts(struct sge *sge);
103const struct sge_port_stats *t1_sge_get_port_stats(struct sge *sge, int port);
104
105#endif /* _CXGB_SGE_H_ */
diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c
new file mode 100644
index 000000000000..1ebb5d149aef
--- /dev/null
+++ b/drivers/net/chelsio/subr.c
@@ -0,0 +1,812 @@
1/*****************************************************************************
2 * *
3 * File: subr.c *
4 * $Revision: 1.27 $ *
5 * $Date: 2005/06/22 01:08:36 $ *
6 * Description: *
7 * Various subroutines (intr,pio,etc.) used by Chelsio 10G Ethernet driver. *
8 * part of the Chelsio 10Gb Ethernet Driver. *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
13 * *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, write to the Free Software Foundation, Inc., *
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
17 * *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
21 * *
22 * http://www.chelsio.com *
23 * *
24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
25 * All rights reserved. *
26 * *
27 * Maintainers: maintainers@chelsio.com *
28 * *
29 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
30 * Tina Yang <tainay@chelsio.com> *
31 * Felix Marti <felix@chelsio.com> *
32 * Scott Bardone <sbardone@chelsio.com> *
33 * Kurt Ottaway <kottaway@chelsio.com> *
34 * Frank DiMambro <frank@chelsio.com> *
35 * *
36 * History: *
37 * *
38 ****************************************************************************/
39
40#include "common.h"
41#include "elmer0.h"
42#include "regs.h"
43#include "gmac.h"
44#include "cphy.h"
45#include "sge.h"
46#include "espi.h"
47
48/**
49 * t1_wait_op_done - wait until an operation is completed
50 * @adapter: the adapter performing the operation
51 * @reg: the register to check for completion
52 * @mask: a single-bit field within @reg that indicates completion
53 * @polarity: the value of the field when the operation is completed
54 * @attempts: number of check iterations
55 * @delay: delay in usecs between iterations
56 *
57 * Wait until an operation is completed by checking a bit in a register
58 * up to @attempts times. Returns %0 if the operation completes and %1
59 * otherwise.
60 */
61static int t1_wait_op_done(adapter_t *adapter, int reg, u32 mask, int polarity,
62 int attempts, int delay)
63{
64 while (1) {
65 u32 val = readl(adapter->regs + reg) & mask;
66
67 if (!!val == polarity)
68 return 0;
69 if (--attempts == 0)
70 return 1;
71 if (delay)
72 udelay(delay);
73 }
74}
75
76#define TPI_ATTEMPTS 50
77
78/*
79 * Write a register over the TPI interface (unlocked and locked versions).
80 */
81static int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value)
82{
83 int tpi_busy;
84
85 writel(addr, adapter->regs + A_TPI_ADDR);
86 writel(value, adapter->regs + A_TPI_WR_DATA);
87 writel(F_TPIWR, adapter->regs + A_TPI_CSR);
88
89 tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1,
90 TPI_ATTEMPTS, 3);
91 if (tpi_busy)
92 CH_ALERT("%s: TPI write to 0x%x failed\n",
93 adapter->name, addr);
94 return tpi_busy;
95}
96
97int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value)
98{
99 int ret;
100
101 spin_lock(&(adapter)->tpi_lock);
102 ret = __t1_tpi_write(adapter, addr, value);
103 spin_unlock(&(adapter)->tpi_lock);
104 return ret;
105}
106
107/*
108 * Read a register over the TPI interface (unlocked and locked versions).
109 */
110static int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp)
111{
112 int tpi_busy;
113
114 writel(addr, adapter->regs + A_TPI_ADDR);
115 writel(0, adapter->regs + A_TPI_CSR);
116
117 tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1,
118 TPI_ATTEMPTS, 3);
119 if (tpi_busy)
120 CH_ALERT("%s: TPI read from 0x%x failed\n",
121 adapter->name, addr);
122 else
123 *valp = readl(adapter->regs + A_TPI_RD_DATA);
124 return tpi_busy;
125}
126
127int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp)
128{
129 int ret;
130
131 spin_lock(&(adapter)->tpi_lock);
132 ret = __t1_tpi_read(adapter, addr, valp);
133 spin_unlock(&(adapter)->tpi_lock);
134 return ret;
135}
136
137/*
138 * Called when a port's link settings change to propagate the new values to the
139 * associated PHY and MAC. After performing the common tasks it invokes an
140 * OS-specific handler.
141 */
142/* static */ void link_changed(adapter_t *adapter, int port_id)
143{
144 int link_ok, speed, duplex, fc;
145 struct cphy *phy = adapter->port[port_id].phy;
146 struct link_config *lc = &adapter->port[port_id].link_config;
147
148 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
149
150 lc->speed = speed < 0 ? SPEED_INVALID : speed;
151 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
152 if (!(lc->requested_fc & PAUSE_AUTONEG))
153 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
154
155 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
156 /* Set MAC speed, duplex, and flow control to match PHY. */
157 struct cmac *mac = adapter->port[port_id].mac;
158
159 mac->ops->set_speed_duplex_fc(mac, speed, duplex, fc);
160 lc->fc = (unsigned char)fc;
161 }
162 t1_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
163}
164
165static int t1_pci_intr_handler(adapter_t *adapter)
166{
167 u32 pcix_cause;
168
169 pci_read_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, &pcix_cause);
170
171 if (pcix_cause) {
172 pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE,
173 pcix_cause);
174 t1_fatal_err(adapter); /* PCI errors are fatal */
175 }
176 return 0;
177}
178
179
180/*
181 * Wait until Elmer's MI1 interface is ready for new operations.
182 */
183static int mi1_wait_until_ready(adapter_t *adapter, int mi1_reg)
184{
185 int attempts = 100, busy;
186
187 do {
188 u32 val;
189
190 __t1_tpi_read(adapter, mi1_reg, &val);
191 busy = val & F_MI1_OP_BUSY;
192 if (busy)
193 udelay(10);
194 } while (busy && --attempts);
195 if (busy)
196 CH_ALERT("%s: MDIO operation timed out\n",
197 adapter->name);
198 return busy;
199}
200
201/*
202 * MI1 MDIO initialization.
203 */
204static void mi1_mdio_init(adapter_t *adapter, const struct board_info *bi)
205{
206 u32 clkdiv = bi->clock_elmer0 / (2 * bi->mdio_mdc) - 1;
207 u32 val = F_MI1_PREAMBLE_ENABLE | V_MI1_MDI_INVERT(bi->mdio_mdiinv) |
208 V_MI1_MDI_ENABLE(bi->mdio_mdien) | V_MI1_CLK_DIV(clkdiv);
209
210 if (!(bi->caps & SUPPORTED_10000baseT_Full))
211 val |= V_MI1_SOF(1);
212 t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_CFG, val);
213}
214
215static int mi1_mdio_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr,
216 int reg_addr, unsigned int *valp)
217{
218 u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr);
219
220 spin_lock(&(adapter)->tpi_lock);
221
222 /* Write the address we want. */
223 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr);
224 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, reg_addr);
225 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP,
226 MI1_OP_INDIRECT_ADDRESS);
227 mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
228
229 /* Write the operation we want. */
230 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_READ);
231 mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
232
233 /* Read the data. */
234 __t1_tpi_read(adapter, A_ELMER0_PORT0_MI1_DATA, valp);
235 spin_unlock(&(adapter)->tpi_lock);
236 return 0;
237}
238
239static int mi1_mdio_ext_write(adapter_t *adapter, int phy_addr, int mmd_addr,
240 int reg_addr, unsigned int val)
241{
242 u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr);
243
244 spin_lock(&(adapter)->tpi_lock);
245
246 /* Write the address we want. */
247 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr);
248 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, reg_addr);
249 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP,
250 MI1_OP_INDIRECT_ADDRESS);
251 mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
252
253 /* Write the data. */
254 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, val);
255 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_WRITE);
256 mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
257 spin_unlock(&(adapter)->tpi_lock);
258 return 0;
259}
260
261static struct mdio_ops mi1_mdio_ext_ops = {
262 mi1_mdio_init,
263 mi1_mdio_ext_read,
264 mi1_mdio_ext_write
265};
266
267enum {
268 CH_BRD_N110_1F,
269 CH_BRD_N210_1F,
270};
271
272static struct board_info t1_board[] = {
273
274{ CHBT_BOARD_N110, 1/*ports#*/,
275 SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE /*caps*/, CHBT_TERM_T1,
276 CHBT_MAC_PM3393, CHBT_PHY_88X2010,
277 125000000/*clk-core*/, 0/*clk-mc3*/, 0/*clk-mc4*/,
278 1/*espi-ports*/, 0/*clk-cspi*/, 44/*clk-elmer0*/, 0/*mdien*/,
279 0/*mdiinv*/, 1/*mdc*/, 0/*phybaseaddr*/, &t1_pm3393_ops,
280 &t1_mv88x201x_ops, &mi1_mdio_ext_ops,
281 "Chelsio N110 1x10GBaseX NIC" },
282
283{ CHBT_BOARD_N210, 1/*ports#*/,
284 SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE /*caps*/, CHBT_TERM_T2,
285 CHBT_MAC_PM3393, CHBT_PHY_88X2010,
286 125000000/*clk-core*/, 0/*clk-mc3*/, 0/*clk-mc4*/,
287 1/*espi-ports*/, 0/*clk-cspi*/, 44/*clk-elmer0*/, 0/*mdien*/,
288 0/*mdiinv*/, 1/*mdc*/, 0/*phybaseaddr*/, &t1_pm3393_ops,
289 &t1_mv88x201x_ops, &mi1_mdio_ext_ops,
290 "Chelsio N210 1x10GBaseX NIC" },
291
292};
293
294struct pci_device_id t1_pci_tbl[] = {
295 CH_DEVICE(7, 0, CH_BRD_N110_1F),
296 CH_DEVICE(10, 1, CH_BRD_N210_1F),
297 { 0, }
298};
299
300MODULE_DEVICE_TABLE(pci, t1_pci_tbl);
301
302/*
303 * Return the board_info structure with a given index. Out-of-range indices
304 * return NULL.
305 */
306const struct board_info *t1_get_board_info(unsigned int board_id)
307{
308 return board_id < ARRAY_SIZE(t1_board) ? &t1_board[board_id] : NULL;
309}
310
311struct chelsio_vpd_t {
312 u32 format_version;
313 u8 serial_number[16];
314 u8 mac_base_address[6];
315 u8 pad[2]; /* make multiple-of-4 size requirement explicit */
316};
317
318#define EEPROMSIZE (8 * 1024)
319#define EEPROM_MAX_POLL 4
320
321/*
322 * Read SEEPROM. A zero is written to the flag register when the addres is
323 * written to the Control register. The hardware device will set the flag to a
324 * one when 4B have been transferred to the Data register.
325 */
326int t1_seeprom_read(adapter_t *adapter, u32 addr, u32 *data)
327{
328 int i = EEPROM_MAX_POLL;
329 u16 val;
330
331 if (addr >= EEPROMSIZE || (addr & 3))
332 return -EINVAL;
333
334 pci_write_config_word(adapter->pdev, A_PCICFG_VPD_ADDR, (u16)addr);
335 do {
336 udelay(50);
337 pci_read_config_word(adapter->pdev, A_PCICFG_VPD_ADDR, &val);
338 } while (!(val & F_VPD_OP_FLAG) && --i);
339
340 if (!(val & F_VPD_OP_FLAG)) {
341 CH_ERR("%s: reading EEPROM address 0x%x failed\n",
342 adapter->name, addr);
343 return -EIO;
344 }
345 pci_read_config_dword(adapter->pdev, A_PCICFG_VPD_DATA, data);
346 *data = le32_to_cpu(*data);
347 return 0;
348}
349
350static int t1_eeprom_vpd_get(adapter_t *adapter, struct chelsio_vpd_t *vpd)
351{
352 int addr, ret = 0;
353
354 for (addr = 0; !ret && addr < sizeof(*vpd); addr += sizeof(u32))
355 ret = t1_seeprom_read(adapter, addr,
356 (u32 *)((u8 *)vpd + addr));
357
358 return ret;
359}
360
361/*
362 * Read a port's MAC address from the VPD ROM.
363 */
364static int vpd_macaddress_get(adapter_t *adapter, int index, u8 mac_addr[])
365{
366 struct chelsio_vpd_t vpd;
367
368 if (t1_eeprom_vpd_get(adapter, &vpd))
369 return 1;
370 memcpy(mac_addr, vpd.mac_base_address, 5);
371 mac_addr[5] = vpd.mac_base_address[5] + index;
372 return 0;
373}
374
375/*
376 * Set up the MAC/PHY according to the requested link settings.
377 *
378 * If the PHY can auto-negotiate first decide what to advertise, then
379 * enable/disable auto-negotiation as desired and reset.
380 *
381 * If the PHY does not auto-negotiate we just reset it.
382 *
383 * If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
384 * otherwise do it later based on the outcome of auto-negotiation.
385 */
386int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
387{
388 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
389
390 if (lc->supported & SUPPORTED_Autoneg) {
391 lc->advertising &= ~(ADVERTISED_ASYM_PAUSE | ADVERTISED_PAUSE);
392 if (fc) {
393 lc->advertising |= ADVERTISED_ASYM_PAUSE;
394 if (fc == (PAUSE_RX | PAUSE_TX))
395 lc->advertising |= ADVERTISED_PAUSE;
396 }
397 phy->ops->advertise(phy, lc->advertising);
398
399 if (lc->autoneg == AUTONEG_DISABLE) {
400 lc->speed = lc->requested_speed;
401 lc->duplex = lc->requested_duplex;
402 lc->fc = (unsigned char)fc;
403 mac->ops->set_speed_duplex_fc(mac, lc->speed,
404 lc->duplex, fc);
405 /* Also disables autoneg */
406 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
407 phy->ops->reset(phy, 0);
408 } else
409 phy->ops->autoneg_enable(phy); /* also resets PHY */
410 } else {
411 mac->ops->set_speed_duplex_fc(mac, -1, -1, fc);
412 lc->fc = (unsigned char)fc;
413 phy->ops->reset(phy, 0);
414 }
415 return 0;
416}
417
418/*
419 * External interrupt handler for boards using elmer0.
420 */
421int elmer0_ext_intr_handler(adapter_t *adapter)
422{
423 struct cphy *phy;
424 int phy_cause;
425 u32 cause;
426
427 t1_tpi_read(adapter, A_ELMER0_INT_CAUSE, &cause);
428
429 switch (board_info(adapter)->board) {
430 case CHBT_BOARD_N210:
431 case CHBT_BOARD_N110:
432 if (cause & ELMER0_GP_BIT6) { /* Marvell 88x2010 interrupt */
433 phy = adapter->port[0].phy;
434 phy_cause = phy->ops->interrupt_handler(phy);
435 if (phy_cause & cphy_cause_link_change)
436 link_changed(adapter, 0);
437 }
438 break;
439 }
440 t1_tpi_write(adapter, A_ELMER0_INT_CAUSE, cause);
441 return 0;
442}
443
444/* Enables all interrupts. */
445void t1_interrupts_enable(adapter_t *adapter)
446{
447 unsigned int i;
448 u32 pl_intr;
449
450 adapter->slow_intr_mask = F_PL_INTR_SGE_ERR;
451
452 t1_sge_intr_enable(adapter->sge);
453 if (adapter->espi) {
454 adapter->slow_intr_mask |= F_PL_INTR_ESPI;
455 t1_espi_intr_enable(adapter->espi);
456 }
457
458 /* Enable MAC/PHY interrupts for each port. */
459 for_each_port(adapter, i) {
460 adapter->port[i].mac->ops->interrupt_enable(adapter->port[i].mac);
461 adapter->port[i].phy->ops->interrupt_enable(adapter->port[i].phy);
462 }
463
464 /* Enable PCIX & external chip interrupts on ASIC boards. */
465 pl_intr = readl(adapter->regs + A_PL_ENABLE);
466
467 /* PCI-X interrupts */
468 pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE,
469 0xffffffff);
470
471 adapter->slow_intr_mask |= F_PL_INTR_EXT | F_PL_INTR_PCIX;
472 pl_intr |= F_PL_INTR_EXT | F_PL_INTR_PCIX;
473 writel(pl_intr, adapter->regs + A_PL_ENABLE);
474}
475
476/* Disables all interrupts. */
477void t1_interrupts_disable(adapter_t* adapter)
478{
479 unsigned int i;
480
481 t1_sge_intr_disable(adapter->sge);
482 if (adapter->espi)
483 t1_espi_intr_disable(adapter->espi);
484
485 /* Disable MAC/PHY interrupts for each port. */
486 for_each_port(adapter, i) {
487 adapter->port[i].mac->ops->interrupt_disable(adapter->port[i].mac);
488 adapter->port[i].phy->ops->interrupt_disable(adapter->port[i].phy);
489 }
490
491 /* Disable PCIX & external chip interrupts. */
492 writel(0, adapter->regs + A_PL_ENABLE);
493
494 /* PCI-X interrupts */
495 pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE, 0);
496
497 adapter->slow_intr_mask = 0;
498}
499
500/* Clears all interrupts */
501void t1_interrupts_clear(adapter_t* adapter)
502{
503 unsigned int i;
504 u32 pl_intr;
505
506
507 t1_sge_intr_clear(adapter->sge);
508 if (adapter->espi)
509 t1_espi_intr_clear(adapter->espi);
510
511 /* Clear MAC/PHY interrupts for each port. */
512 for_each_port(adapter, i) {
513 adapter->port[i].mac->ops->interrupt_clear(adapter->port[i].mac);
514 adapter->port[i].phy->ops->interrupt_clear(adapter->port[i].phy);
515 }
516
517 /* Enable interrupts for external devices. */
518 pl_intr = readl(adapter->regs + A_PL_CAUSE);
519
520 writel(pl_intr | F_PL_INTR_EXT | F_PL_INTR_PCIX,
521 adapter->regs + A_PL_CAUSE);
522
523 /* PCI-X interrupts */
524 pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, 0xffffffff);
525}
526
527/*
528 * Slow path interrupt handler for ASICs.
529 */
530int t1_slow_intr_handler(adapter_t *adapter)
531{
532 u32 cause = readl(adapter->regs + A_PL_CAUSE);
533
534 cause &= adapter->slow_intr_mask;
535 if (!cause)
536 return 0;
537 if (cause & F_PL_INTR_SGE_ERR)
538 t1_sge_intr_error_handler(adapter->sge);
539 if (cause & F_PL_INTR_ESPI)
540 t1_espi_intr_handler(adapter->espi);
541 if (cause & F_PL_INTR_PCIX)
542 t1_pci_intr_handler(adapter);
543 if (cause & F_PL_INTR_EXT)
544 t1_elmer0_ext_intr(adapter);
545
546 /* Clear the interrupts just processed. */
547 writel(cause, adapter->regs + A_PL_CAUSE);
548 (void)readl(adapter->regs + A_PL_CAUSE); /* flush writes */
549 return 1;
550}
551
552/* Pause deadlock avoidance parameters */
553#define DROP_MSEC 16
554#define DROP_PKTS_CNT 1
555
556static void set_csum_offload(adapter_t *adapter, u32 csum_bit, int enable)
557{
558 u32 val = readl(adapter->regs + A_TP_GLOBAL_CONFIG);
559
560 if (enable)
561 val |= csum_bit;
562 else
563 val &= ~csum_bit;
564 writel(val, adapter->regs + A_TP_GLOBAL_CONFIG);
565}
566
567void t1_tp_set_ip_checksum_offload(adapter_t *adapter, int enable)
568{
569 set_csum_offload(adapter, F_IP_CSUM, enable);
570}
571
572void t1_tp_set_udp_checksum_offload(adapter_t *adapter, int enable)
573{
574 set_csum_offload(adapter, F_UDP_CSUM, enable);
575}
576
577void t1_tp_set_tcp_checksum_offload(adapter_t *adapter, int enable)
578{
579 set_csum_offload(adapter, F_TCP_CSUM, enable);
580}
581
582static void t1_tp_reset(adapter_t *adapter, unsigned int tp_clk)
583{
584 u32 val;
585
586 val = F_TP_IN_CSPI_CPL | F_TP_IN_CSPI_CHECK_IP_CSUM |
587 F_TP_IN_CSPI_CHECK_TCP_CSUM | F_TP_IN_ESPI_ETHERNET;
588 val |= F_TP_IN_ESPI_CHECK_IP_CSUM |
589 F_TP_IN_ESPI_CHECK_TCP_CSUM;
590 writel(val, adapter->regs + A_TP_IN_CONFIG);
591 writel(F_TP_OUT_CSPI_CPL |
592 F_TP_OUT_ESPI_ETHERNET |
593 F_TP_OUT_ESPI_GENERATE_IP_CSUM |
594 F_TP_OUT_ESPI_GENERATE_TCP_CSUM,
595 adapter->regs + A_TP_OUT_CONFIG);
596
597 val = readl(adapter->regs + A_TP_GLOBAL_CONFIG);
598 val &= ~(F_IP_CSUM | F_UDP_CSUM | F_TCP_CSUM);
599 writel(val, adapter->regs + A_TP_GLOBAL_CONFIG);
600
601 /*
602 * Enable pause frame deadlock prevention.
603 */
604 if (is_T2(adapter)) {
605 u32 drop_ticks = DROP_MSEC * (tp_clk / 1000);
606
607 writel(F_ENABLE_TX_DROP | F_ENABLE_TX_ERROR |
608 V_DROP_TICKS_CNT(drop_ticks) |
609 V_NUM_PKTS_DROPPED(DROP_PKTS_CNT),
610 adapter->regs + A_TP_TX_DROP_CONFIG);
611 }
612
613 writel(F_TP_RESET, adapter->regs + A_TP_RESET);
614}
615
616int __devinit t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
617 struct adapter_params *p)
618{
619 p->chip_version = bi->chip_term;
620 if (p->chip_version == CHBT_TERM_T1 ||
621 p->chip_version == CHBT_TERM_T2) {
622 u32 val = readl(adapter->regs + A_TP_PC_CONFIG);
623
624 val = G_TP_PC_REV(val);
625 if (val == 2)
626 p->chip_revision = TERM_T1B;
627 else if (val == 3)
628 p->chip_revision = TERM_T2;
629 else
630 return -1;
631 } else
632 return -1;
633 return 0;
634}
635
636/*
637 * Enable board components other than the Chelsio chip, such as external MAC
638 * and PHY.
639 */
640static int board_init(adapter_t *adapter, const struct board_info *bi)
641{
642 switch (bi->board) {
643 case CHBT_BOARD_N110:
644 case CHBT_BOARD_N210:
645 writel(V_TPIPAR(0xf), adapter->regs + A_TPI_PAR);
646 t1_tpi_write(adapter, A_ELMER0_GPO, 0x800);
647 break;
648 }
649 return 0;
650}
651
652/*
653 * Initialize and configure the Terminator HW modules. Note that external
654 * MAC and PHYs are initialized separately.
655 */
656int t1_init_hw_modules(adapter_t *adapter)
657{
658 int err = -EIO;
659 const struct board_info *bi = board_info(adapter);
660
661 if (!bi->clock_mc4) {
662 u32 val = readl(adapter->regs + A_MC4_CFG);
663
664 writel(val | F_READY | F_MC4_SLOW, adapter->regs + A_MC4_CFG);
665 writel(F_M_BUS_ENABLE | F_TCAM_RESET,
666 adapter->regs + A_MC5_CONFIG);
667 }
668
669 if (adapter->espi && t1_espi_init(adapter->espi, bi->chip_mac,
670 bi->espi_nports))
671 goto out_err;
672
673 t1_tp_reset(adapter, bi->clock_core);
674
675 err = t1_sge_configure(adapter->sge, &adapter->params.sge);
676 if (err)
677 goto out_err;
678
679 err = 0;
680 out_err:
681 return err;
682}
683
684/*
685 * Determine a card's PCI mode.
686 */
687static void __devinit get_pci_mode(adapter_t *adapter, struct chelsio_pci_params *p)
688{
689 static unsigned short speed_map[] = { 33, 66, 100, 133 };
690 u32 pci_mode;
691
692 pci_read_config_dword(adapter->pdev, A_PCICFG_MODE, &pci_mode);
693 p->speed = speed_map[G_PCI_MODE_CLK(pci_mode)];
694 p->width = (pci_mode & F_PCI_MODE_64BIT) ? 64 : 32;
695 p->is_pcix = (pci_mode & F_PCI_MODE_PCIX) != 0;
696}
697
698/*
699 * Release the structures holding the SW per-Terminator-HW-module state.
700 */
701void t1_free_sw_modules(adapter_t *adapter)
702{
703 unsigned int i;
704
705 for_each_port(adapter, i) {
706 struct cmac *mac = adapter->port[i].mac;
707 struct cphy *phy = adapter->port[i].phy;
708
709 if (mac)
710 mac->ops->destroy(mac);
711 if (phy)
712 phy->ops->destroy(phy);
713 }
714
715 if (adapter->sge)
716 t1_sge_destroy(adapter->sge);
717 if (adapter->espi)
718 t1_espi_destroy(adapter->espi);
719}
720
721static void __devinit init_link_config(struct link_config *lc,
722 const struct board_info *bi)
723{
724 lc->supported = bi->caps;
725 lc->requested_speed = lc->speed = SPEED_INVALID;
726 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
727 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
728 if (lc->supported & SUPPORTED_Autoneg) {
729 lc->advertising = lc->supported;
730 lc->autoneg = AUTONEG_ENABLE;
731 lc->requested_fc |= PAUSE_AUTONEG;
732 } else {
733 lc->advertising = 0;
734 lc->autoneg = AUTONEG_DISABLE;
735 }
736}
737
738
739/*
740 * Allocate and initialize the data structures that hold the SW state of
741 * the Terminator HW modules.
742 */
743int __devinit t1_init_sw_modules(adapter_t *adapter,
744 const struct board_info *bi)
745{
746 unsigned int i;
747
748 adapter->params.brd_info = bi;
749 adapter->params.nports = bi->port_number;
750 adapter->params.stats_update_period = bi->gmac->stats_update_period;
751
752 adapter->sge = t1_sge_create(adapter, &adapter->params.sge);
753 if (!adapter->sge) {
754 CH_ERR("%s: SGE initialization failed\n",
755 adapter->name);
756 goto error;
757 }
758
759 if (bi->espi_nports && !(adapter->espi = t1_espi_create(adapter))) {
760 CH_ERR("%s: ESPI initialization failed\n",
761 adapter->name);
762 goto error;
763 }
764
765 board_init(adapter, bi);
766 bi->mdio_ops->init(adapter, bi);
767 if (bi->gphy->reset)
768 bi->gphy->reset(adapter);
769 if (bi->gmac->reset)
770 bi->gmac->reset(adapter);
771
772 for_each_port(adapter, i) {
773 u8 hw_addr[6];
774 struct cmac *mac;
775 int phy_addr = bi->mdio_phybaseaddr + i;
776
777 adapter->port[i].phy = bi->gphy->create(adapter, phy_addr,
778 bi->mdio_ops);
779 if (!adapter->port[i].phy) {
780 CH_ERR("%s: PHY %d initialization failed\n",
781 adapter->name, i);
782 goto error;
783 }
784
785 adapter->port[i].mac = mac = bi->gmac->create(adapter, i);
786 if (!mac) {
787 CH_ERR("%s: MAC %d initialization failed\n",
788 adapter->name, i);
789 goto error;
790 }
791
792 /*
793 * Get the port's MAC addresses either from the EEPROM if one
794 * exists or the one hardcoded in the MAC.
795 */
796 if (vpd_macaddress_get(adapter, i, hw_addr)) {
797 CH_ERR("%s: could not read MAC address from VPD ROM\n",
798 adapter->port[i].dev->name);
799 goto error;
800 }
801 memcpy(adapter->port[i].dev->dev_addr, hw_addr, ETH_ALEN);
802 init_link_config(&adapter->port[i].link_config, bi);
803 }
804
805 get_pci_mode(adapter, &adapter->params.pci);
806 t1_interrupts_clear(adapter);
807 return 0;
808
809 error:
810 t1_free_sw_modules(adapter);
811 return -1;
812}
diff --git a/drivers/net/chelsio/suni1x10gexp_regs.h b/drivers/net/chelsio/suni1x10gexp_regs.h
new file mode 100644
index 000000000000..81816c2b708a
--- /dev/null
+++ b/drivers/net/chelsio/suni1x10gexp_regs.h
@@ -0,0 +1,213 @@
1/*****************************************************************************
2 * *
3 * File: suni1x10gexp_regs.h *
4 * $Revision: 1.9 $ *
5 * $Date: 2005/06/22 00:17:04 $ *
6 * Description: *
7 * PMC/SIERRA (pm3393) MAC-PHY functionality. *
8 * part of the Chelsio 10Gb Ethernet Driver. *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
13 * *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, write to the Free Software Foundation, Inc., *
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
17 * *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
21 * *
22 * http://www.chelsio.com *
23 * *
24 * Maintainers: maintainers@chelsio.com *
25 * *
26 * Authors: PMC/SIERRA *
27 * *
28 * History: *
29 * *
30 ****************************************************************************/
31
32#ifndef _CXGB_SUNI1x10GEXP_REGS_H_
33#define _CXGB_SUNI1x10GEXP_REGS_H_
34
35/******************************************************************************/
36/** S/UNI-1x10GE-XP REGISTER ADDRESS MAP **/
37/******************************************************************************/
38/* Refer to the Register Bit Masks bellow for the naming of each register and */
39/* to the S/UNI-1x10GE-XP Data Sheet for the signification of each bit */
40/******************************************************************************/
41
42#define SUNI1x10GEXP_REG_DEVICE_STATUS 0x0004
43#define SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS 0x000D
44#define SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE 0x000E
45#define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE 0x0102
46#define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_STATUS 0x0104
47#define SUNI1x10GEXP_REG_RXXG_CONFIG_1 0x2040
48#define SUNI1x10GEXP_REG_RXXG_CONFIG_3 0x2042
49#define SUNI1x10GEXP_REG_RXXG_INTERRUPT 0x2043
50#define SUNI1x10GEXP_REG_RXXG_MAX_FRAME_LENGTH 0x2045
51#define SUNI1x10GEXP_REG_RXXG_SA_15_0 0x2046
52#define SUNI1x10GEXP_REG_RXXG_SA_31_16 0x2047
53#define SUNI1x10GEXP_REG_RXXG_SA_47_32 0x2048
54#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_LOW 0x204D
55#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_MID 0x204E
56#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_HIGH 0x204F
57#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW 0x206A
58#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW 0x206B
59#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH 0x206C
60#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH 0x206D
61#define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0 0x206E
62#define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2 0x2070
63#define SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE 0x2088
64#define SUNI1x10GEXP_REG_XRF_INTERRUPT_STATUS 0x2089
65#define SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE 0x208B
66#define SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_STATUS 0x208C
67#define SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE 0x20C7
68#define SUNI1x10GEXP_REG_RXOAM_INTERRUPT_STATUS 0x20C8
69#define SUNI1x10GEXP_REG_MSTAT_CONTROL 0x2100
70#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_0 0x2101
71#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_1 0x2102
72#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_2 0x2103
73#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_3 0x2104
74#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0 0x2105
75#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1 0x2106
76#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2 0x2107
77#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3 0x2108
78#define SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW 0x2110
79#define SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW 0x2114
80#define SUNI1x10GEXP_REG_MSTAT_COUNTER_4_LOW 0x2120
81#define SUNI1x10GEXP_REG_MSTAT_COUNTER_5_LOW 0x2124
82#define SUNI1x10GEXP_REG_MSTAT_COUNTER_6_LOW 0x2128
83#define SUNI1x10GEXP_REG_MSTAT_COUNTER_8_LOW 0x2130
84#define SUNI1x10GEXP_REG_MSTAT_COUNTER_10_LOW 0x2138
85#define SUNI1x10GEXP_REG_MSTAT_COUNTER_11_LOW 0x213C
86#define SUNI1x10GEXP_REG_MSTAT_COUNTER_12_LOW 0x2140
87#define SUNI1x10GEXP_REG_MSTAT_COUNTER_13_LOW 0x2144
88#define SUNI1x10GEXP_REG_MSTAT_COUNTER_15_LOW 0x214C
89#define SUNI1x10GEXP_REG_MSTAT_COUNTER_16_LOW 0x2150
90#define SUNI1x10GEXP_REG_MSTAT_COUNTER_17_LOW 0x2154
91#define SUNI1x10GEXP_REG_MSTAT_COUNTER_18_LOW 0x2158
92#define SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW 0x2194
93#define SUNI1x10GEXP_REG_MSTAT_COUNTER_35_LOW 0x219C
94#define SUNI1x10GEXP_REG_MSTAT_COUNTER_36_LOW 0x21A0
95#define SUNI1x10GEXP_REG_MSTAT_COUNTER_38_LOW 0x21A8
96#define SUNI1x10GEXP_REG_MSTAT_COUNTER_40_LOW 0x21B0
97#define SUNI1x10GEXP_REG_MSTAT_COUNTER_42_LOW 0x21B8
98#define SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW 0x21BC
99#define SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE 0x2209
100#define SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_INTERRUPT 0x220A
101#define SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK 0x2282
102#define SUNI1x10GEXP_REG_PL4ODP_INTERRUPT 0x2283
103#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_STATUS 0x2300
104#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_CHANGE 0x2301
105#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK 0x2302
106#define SUNI1x10GEXP_REG_TXXG_CONFIG_1 0x3040
107#define SUNI1x10GEXP_REG_TXXG_CONFIG_3 0x3042
108#define SUNI1x10GEXP_REG_TXXG_INTERRUPT 0x3043
109#define SUNI1x10GEXP_REG_TXXG_MAX_FRAME_SIZE 0x3045
110#define SUNI1x10GEXP_REG_TXXG_SA_15_0 0x3047
111#define SUNI1x10GEXP_REG_TXXG_SA_31_16 0x3048
112#define SUNI1x10GEXP_REG_TXXG_SA_47_32 0x3049
113#define SUNI1x10GEXP_REG_XTEF_INTERRUPT_STATUS 0x3084
114#define SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE 0x3085
115#define SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE 0x30C6
116#define SUNI1x10GEXP_REG_TXOAM_INTERRUPT_STATUS 0x30C7
117#define SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE 0x320C
118#define SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_INDICATION 0x320D
119#define SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK 0x3282
120#define SUNI1x10GEXP_REG_PL4IDU_INTERRUPT 0x3283
121
122/******************************************************************************/
123/* -- End register offset definitions -- */
124/******************************************************************************/
125
126/******************************************************************************/
127/** SUNI-1x10GE-XP REGISTER BIT MASKS **/
128/******************************************************************************/
129
130/*----------------------------------------------------------------------------
131 * Register 0x0004: S/UNI-1x10GE-XP Device Status
132 * Bit 9 TOP_SXRA_EXPIRED
133 * Bit 8 TOP_MDIO_BUSY
134 * Bit 7 TOP_DTRB
135 * Bit 6 TOP_EXPIRED
136 * Bit 5 TOP_PAUSED
137 * Bit 4 TOP_PL4_ID_DOOL
138 * Bit 3 TOP_PL4_IS_DOOL
139 * Bit 2 TOP_PL4_ID_ROOL
140 * Bit 1 TOP_PL4_IS_ROOL
141 * Bit 0 TOP_PL4_OUT_ROOL
142 *----------------------------------------------------------------------------*/
143#define SUNI1x10GEXP_BITMSK_TOP_SXRA_EXPIRED 0x0200
144#define SUNI1x10GEXP_BITMSK_TOP_EXPIRED 0x0040
145#define SUNI1x10GEXP_BITMSK_TOP_PL4_ID_DOOL 0x0010
146#define SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL 0x0008
147#define SUNI1x10GEXP_BITMSK_TOP_PL4_ID_ROOL 0x0004
148#define SUNI1x10GEXP_BITMSK_TOP_PL4_IS_ROOL 0x0002
149#define SUNI1x10GEXP_BITMSK_TOP_PL4_OUT_ROOL 0x0001
150
151/*----------------------------------------------------------------------------
152 * Register 0x000E:PM3393 Global interrupt enable
153 * Bit 15 TOP_INTE
154 *----------------------------------------------------------------------------*/
155#define SUNI1x10GEXP_BITMSK_TOP_INTE 0x8000
156
157/*----------------------------------------------------------------------------
158 * Register 0x2040: RXXG Configuration 1
159 * Bit 15 RXXG_RXEN
160 * Bit 14 RXXG_ROCF
161 * Bit 13 RXXG_PAD_STRIP
162 * Bit 10 RXXG_PUREP
163 * Bit 9 RXXG_LONGP
164 * Bit 8 RXXG_PARF
165 * Bit 7 RXXG_FLCHK
166 * Bit 5 RXXG_PASS_CTRL
167 * Bit 3 RXXG_CRC_STRIP
168 * Bit 2-0 RXXG_MIFG
169 *----------------------------------------------------------------------------*/
170#define SUNI1x10GEXP_BITMSK_RXXG_RXEN 0x8000
171#define SUNI1x10GEXP_BITMSK_RXXG_PUREP 0x0400
172#define SUNI1x10GEXP_BITMSK_RXXG_FLCHK 0x0080
173#define SUNI1x10GEXP_BITMSK_RXXG_CRC_STRIP 0x0008
174
175/*----------------------------------------------------------------------------
176 * Register 0x2070: RXXG Address Filter Control 2
177 * Bit 1 RXXG_PMODE
178 * Bit 0 RXXG_MHASH_EN
179 *----------------------------------------------------------------------------*/
180#define SUNI1x10GEXP_BITMSK_RXXG_PMODE 0x0002
181#define SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN 0x0001
182
183/*----------------------------------------------------------------------------
184 * Register 0x2100: MSTAT Control
185 * Bit 2 MSTAT_WRITE
186 * Bit 1 MSTAT_CLEAR
187 * Bit 0 MSTAT_SNAP
188 *----------------------------------------------------------------------------*/
189#define SUNI1x10GEXP_BITMSK_MSTAT_CLEAR 0x0002
190#define SUNI1x10GEXP_BITMSK_MSTAT_SNAP 0x0001
191
192/*----------------------------------------------------------------------------
193 * Register 0x3040: TXXG Configuration Register 1
194 * Bit 15 TXXG_TXEN0
195 * Bit 13 TXXG_HOSTPAUSE
196 * Bit 12-7 TXXG_IPGT
197 * Bit 5 TXXG_32BIT_ALIGN
198 * Bit 4 TXXG_CRCEN
199 * Bit 3 TXXG_FCTX
200 * Bit 2 TXXG_FCRX
201 * Bit 1 TXXG_PADEN
202 * Bit 0 TXXG_SPRE
203 *----------------------------------------------------------------------------*/
204#define SUNI1x10GEXP_BITMSK_TXXG_TXEN0 0x8000
205#define SUNI1x10GEXP_BITOFF_TXXG_IPGT 7
206#define SUNI1x10GEXP_BITMSK_TXXG_32BIT_ALIGN 0x0020
207#define SUNI1x10GEXP_BITMSK_TXXG_CRCEN 0x0010
208#define SUNI1x10GEXP_BITMSK_TXXG_FCTX 0x0008
209#define SUNI1x10GEXP_BITMSK_TXXG_FCRX 0x0004
210#define SUNI1x10GEXP_BITMSK_TXXG_PADEN 0x0002
211
212#endif /* _CXGB_SUNI1x10GEXP_REGS_H_ */
213