aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/networking/cxgb.txt322
-rw-r--r--drivers/net/Kconfig19
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/chelsio/Makefile12
-rw-r--r--drivers/net/chelsio/ch_ethtool.h102
-rw-r--r--drivers/net/chelsio/common.h269
-rw-r--r--drivers/net/chelsio/cphy.h150
-rw-r--r--drivers/net/chelsio/cpl5_cmd.h145
-rw-r--r--drivers/net/chelsio/cxgb2.c1231
-rw-r--r--drivers/net/chelsio/cxgb2.h122
-rw-r--r--drivers/net/chelsio/elmer0.h157
-rw-r--r--drivers/net/chelsio/espi.c386
-rw-r--r--drivers/net/chelsio/espi.h67
-rw-r--r--drivers/net/chelsio/gmac.h133
-rw-r--r--drivers/net/chelsio/mv88x201x.c258
-rw-r--r--drivers/net/chelsio/osdep.h169
-rw-r--r--drivers/net/chelsio/pm3393.c831
-rw-r--r--drivers/net/chelsio/regs.h453
-rw-r--r--drivers/net/chelsio/sge.c1451
-rw-r--r--drivers/net/chelsio/sge.h79
-rw-r--r--drivers/net/chelsio/subr.c831
-rw-r--r--drivers/net/chelsio/suni1x10gexp_regs.h221
-rw-r--r--drivers/net/chelsio/tp.c188
-rw-r--r--drivers/net/chelsio/tp.h110
24 files changed, 7707 insertions, 0 deletions
diff --git a/Documentation/networking/cxgb.txt b/Documentation/networking/cxgb.txt
new file mode 100644
index 000000000000..9f2eb646c6f5
--- /dev/null
+++ b/Documentation/networking/cxgb.txt
@@ -0,0 +1,322 @@
1 Chelsio N210 10Gb Ethernet Network Controller
2
3 Driver Release Notes for Linux
4
5 Version 2.1.0
6
7 March 8, 2005
8
9CONTENTS
10========
11 INTRODUCTION
12 FEATURES
13 PERFORMANCE
14 DRIVER MESSAGES
15 KNOWN ISSUES
16 SUPPORT
17
18
19INTRODUCTION
20============
21
22 This document describes the Linux driver for Chelsio 10Gb Ethernet Network
23 Controller. This driver supports the Chelsio N210 NIC and is backward
24 compatible with the Chelsio N110 model 10Gb NICs. This driver supports AMD64
25 and EM64T, and x86 systems.
26
27
28FEATURES
29========
30
31 Adaptive Interrupts (adaptive-rx)
32 ---------------------------------
33
34 This feature provides an adaptive algorithm that adjusts the interrupt
35 coalescing parameters, allowing the driver to dynamically adapt the latency
36 settings to achieve the highest performance during various types of network
37 load.
38
39 The interface used to control this feature is ethtool. Please see the
40 ethtool manpage for additional usage information.
41
42 By default, adaptive-rx is disabled.
43 To enable adaptive-rx:
44
45 ethtool -C <interface> adaptive-rx on
46
47 To disable adaptive-rx, use ethtool:
48
49 ethtool -C <interface> adaptive-rx off
50
51 After disabling adaptive-rx, the timer latency value will be set to 50us.
52 You may set the timer latency after disabling adaptive-rx:
53
54 ethtool -C <interface> rx-usecs <microseconds>
55
56 An example to set the timer latency value to 100us on eth0:
57
58 ethtool -C eth0 rx-usecs 100
59
60 You may also provide a timer latency value while disabling adpative-rx:
61
62 ethtool -C <interface> adaptive-rx off rx-usecs <microseconds>
63
64 If adaptive-rx is disabled and a timer latency value is specified, the timer
65 will be set to the specified value until changed by the user or until
66 adaptive-rx is enabled.
67
68 To view the status of the adaptive-rx and timer latency values:
69
70 ethtool -c <interface>
71
72
73 TCP Segmentation Offloading (TSO) Support
74 -----------------------------------------
75
76 This feature, also known as "large send", enables a system's protocol stack
77 to offload portions of outbound TCP processing to a network interface card
78 thereby reducing system CPU utilization and enhancing performance.
79
80 The interface used to control this feature is ethtool version 1.8 or higher.
81 Please see the ethtool manpage for additional usage information.
82
83 By default, TSO is enabled.
84 To disable TSO:
85
86 ethtool -K <interface> tso off
87
88 To enable TSO:
89
90 ethtool -K <interface> tso on
91
92 To view the status of TSO:
93
94 ethtool -k <interface>
95
96
97PERFORMANCE
98===========
99
100 The following information is provided as an example of how to change system
101 parameters for "performance tuning" an what value to use. You may or may not
102 want to change these system parameters, depending on your server/workstation
103 application. Doing so is not warranted in any way by Chelsio Communications,
104 and is done at "YOUR OWN RISK". Chelsio will not be held responsible for loss
105 of data or damage to equipment.
106
107 Your distribution may have a different way of doing things, or you may prefer
108 a different method. These commands are shown only to provide an example of
109 what to do and are by no means definitive.
110
111 Making any of the following system changes will only last until you reboot
112 your system. You may want to write a script that runs at boot-up which
113 includes the optimal settings for your system.
114
115 Setting PCI Latency Timer:
116 setpci -d 1425:* 0x0c.l=0x0000F800
117
118 Disabling TCP timestamp:
119 sysctl -w net.ipv4.tcp_timestamps=0
120
121 Disabling SACK:
122 sysctl -w net.ipv4.tcp_sack=0
123
124 Setting TCP read buffers (min/default/max):
125 sysctl -w net.ipv4.tcp_rmem="10000000 10000000 10000000"
126
127 Setting TCP write buffers (min/pressure/max):
128 sysctl -w net.ipv4.tcp_wmem="10000000 10000000 10000000"
129
130 Setting TCP buffer space (min/pressure/max):
131 sysctl -w net.ipv4.tcp_mem="10000000 10000000 10000000"
132
133 Setting large number of incoming connection requests (2.6.x only):
134 sysctl -w net.ipv4.tcp_max_syn_backlog=3000
135
136 Setting maximum receive socket buffer size:
137 sysctl -w net.core.rmem_max=524287
138
139 Setting maximum send socket buffer size:
140 sysctl -w net.core.wmem_max=524287
141
142 Setting default receive socket buffer size:
143 sysctl -w net.core.rmem_default=524287
144
145 Setting default send socket buffer size:
146 sysctl -w net.core.wmem_default=524287
147
148 Setting maximum option memory buffers:
149 sysctl -w net.core.optmem_max=524287
150
151 Setting maximum backlog (# of unprocessed packets before kernel drops):
152 sysctl -w net.core.netdev_max_backlog=300000
153
154 Set smp_affinity (on a multiprocessor system) to a single CPU:
155 echo 00000001 > /proc/irq/<interrupt_number>/smp_affinity
156
157 TCP window size for single connections:
158 The receive buffer (RX_WINDOW) size must be at least as large as the
159 Bandwidth-Delay Product of the communication link between the sender and
160 receiver. Due to the variations of RTT, you may want to increase the buffer
161 size up to 2 times the Bandwidth-Delay Product. Reference page 289 of
162 "TCP/IP Illustrated, Volume 1, The Protocols" by W. Richard Stevens.
163 At 10Gb speeds, use the following formula:
164 RX_WINDOW >= 1.25MBytes * RTT(in milliseconds)
165 Example for RTT with 100us: RX_WINDOW = (1,250,000 * 0.1) = 125,000
166 RX_WINDOW sizes of 256KB - 512KB should be sufficient.
167 Setting the min, max, and default receive buffer (RX_WINDOW) size:
168 sysctl -w net.ipv4.tcp_rmem="<min> <default> <max>"
169
170 TCP window size for multiple connections:
171 The receive buffer (RX_WINDOW) size may be calculated the same as single
172 connections, but should be divided by the number of connections. The
173 smaller window prevents congestion and facilitates better pacing,
174 especially if/when MAC level flow control does not work well or when it is
175 not supported on the machine. Experimentation may be necessary to attain
176 the correct value. This method is provided as a starting point fot the
177 correct receive buffer size.
178 Setting the min, max, and default receive buffer (RX_WINDOW) size is
179 performed in the same manner as single connection.
180
181
182DRIVER MESSAGES
183===============
184
185 The following messages are the most common messages logged by syslog. These
186 may be found in /var/log/messages.
187
188 Driver up:
189 Chelsio Network Driver - version 2.1.0
190
191 NIC detected:
192 eth#: Chelsio N210 1x10GBaseX NIC (rev #), PCIX 133MHz/64-bit
193
194 Link up:
195 eth#: link is up at 10 Gbps, full duplex
196
197 Link down:
198 eth#: link is down
199
200
201KNOWN ISSUES
202============
203
204 These issues have been identified during testing. The following information
205 is provided as a workaround to the problem. In some cases, this problem is
206 inherent to Linux or to a particular Linux Distribution and/or hardware
207 platform.
208
209 1. Large number of TCP retransmits on a multiprocessor (SMP) system.
210
211 On a system with multiple CPUs, the interrupt (IRQ) for the network
212 controller may be bound to more than one CPU. This will cause TCP
213 retransmits if the packet data were to be split across different CPUs
214 and re-assembled in a different order than expected.
215
216 To eliminate the TCP retransmits, set smp_affinity on the particular
217 interrupt to a single CPU. You can locate the interrupt (IRQ) used on
218 the N110/N210 by using ifconfig:
219 ifconfig <dev_name> | grep Interrupt
220 Set the smp_affinity to a single CPU:
221 echo 1 > /proc/irq/<interrupt_number>/smp_affinity
222
223 It is highly suggested that you do not run the irqbalance daemon on your
224 system, as this will change any smp_affinity setting you have applied.
225 The irqbalance daemon runs on a 10 second interval and binds interrupts
226 to the least loaded CPU determined by the daemon. To disable this daemon:
227 chkconfig --level 2345 irqbalance off
228
229 By default, some Linux distributions enable the kernel feature,
230 irqbalance, which performs the same function as the daemon. To disable
231 this feature, add the following line to your bootloader:
232 noirqbalance
233
234 Example using the Grub bootloader:
235 title Red Hat Enterprise Linux AS (2.4.21-27.ELsmp)
236 root (hd0,0)
237 kernel /vmlinuz-2.4.21-27.ELsmp ro root=/dev/hda3 noirqbalance
238 initrd /initrd-2.4.21-27.ELsmp.img
239
240 2. After running insmod, the driver is loaded and the incorrect network
241 interface is brought up without running ifup.
242
243 When using 2.4.x kernels, including RHEL kernels, the Linux kernel
244 invokes a script named "hotplug". This script is primarily used to
245 automatically bring up USB devices when they are plugged in, however,
246 the script also attempts to automatically bring up a network interface
247 after loading the kernel module. The hotplug script does this by scanning
248 the ifcfg-eth# config files in /etc/sysconfig/network-scripts, looking
249 for HWADDR=<mac_address>.
250
251 If the hotplug script does not find the HWADDRR within any of the
252 ifcfg-eth# files, it will bring up the device with the next available
253 interface name. If this interface is already configured for a different
254 network card, your new interface will have incorrect IP address and
255 network settings.
256
257 To solve this issue, you can add the HWADDR=<mac_address> key to the
258 interface config file of your network controller.
259
260 To disable this "hotplug" feature, you may add the driver (module name)
261 to the "blacklist" file located in /etc/hotplug. It has been noted that
262 this does not work for network devices because the net.agent script
263 does not use the blacklist file. Simply remove, or rename, the net.agent
264 script located in /etc/hotplug to disable this feature.
265
266 3. Transport Protocol (TP) hangs when running heavy multi-connection traffic
267 on an AMD Opteron system with HyperTransport PCI-X Tunnel chipset.
268
269 If your AMD Opteron system uses the AMD-8131 HyperTransport PCI-X Tunnel
270 chipset, you may experience the "133-Mhz Mode Split Completion Data
271 Corruption" bug identified by AMD while using a 133Mhz PCI-X card on the
272 bus PCI-X bus.
273
274 AMD states, "Under highly specific conditions, the AMD-8131 PCI-X Tunnel
275 can provide stale data via split completion cycles to a PCI-X card that
276 is operating at 133 Mhz", causing data corruption.
277
278 AMD's provides three workarounds for this problem, however, Chelsio
279 recommends the first option for best performance with this bug:
280
281 For 133Mhz secondary bus operation, limit the transaction length and
282 the number of outstanding transactions, via BIOS configuration
283 programming of the PCI-X card, to the following:
284
285 Data Length (bytes): 2k
286 Total allowed outstanding transactions: 1
287
288 Please refer to AMD 8131-HT/PCI-X Errata 26310 Rev 3.08 August 2004,
289 section 56, "133-MHz Mode Split Completion Data Corruption" for more
290 details with this bug and workarounds suggested by AMD.
291
292
293SUPPORT
294=======
295
296 If you have problems with the software or hardware, please contact our
297 customer support team via email at support@chelsio.com or check our website
298 at http://www.chelsio.com
299
300===============================================================================
301
302 Chelsio Communications
303 370 San Aleso Ave.
304 Suite 100
305 Sunnyvale, CA 94085
306 http://www.chelsio.com
307
308This program is free software; you can redistribute it and/or modify
309it under the terms of the GNU General Public License, version 2, as
310published by the Free Software Foundation.
311
312You should have received a copy of the GNU General Public License along
313with this program; if not, write to the Free Software Foundation, Inc.,
31459 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
315
316THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
317WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
318MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
319
320 Copyright (c) 2003-2005 Chelsio Communications. All rights reserved.
321
322===============================================================================
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 3a0a55b62aaf..8a7928f1d579 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2080,6 +2080,25 @@ endmenu
2080menu "Ethernet (10000 Mbit)" 2080menu "Ethernet (10000 Mbit)"
2081 depends on NETDEVICES && !UML 2081 depends on NETDEVICES && !UML
2082 2082
2083config CHELSIO_T1
2084 tristate "Chelsio 10Gb Ethernet support"
2085 depends on PCI
2086 help
2087 This driver supports Chelsio N110 and N210 models 10Gb Ethernet
2088 cards. More information about adapter features and performance
2089 tuning is in <file:Documentation/networking/cxgb.txt>.
2090
2091 For general information about Chelsio and our products, visit
2092 our website at <http://www.chelsio.com>.
2093
2094 For customer support, please visit our customer support page at
2095 <http://www.chelsio.com/support.htm>.
2096
2097 Please send feedback to <linux-bugs@chelsio.com>.
2098
2099 To compile this driver as a module, choose M here: the module
2100 will be called cxgb.
2101
2083config IXGB 2102config IXGB
2084 tristate "Intel(R) PRO/10GbE support" 2103 tristate "Intel(R) PRO/10GbE support"
2085 depends on PCI 2104 depends on PCI
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 6202b10dbb4d..1992166ffba9 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -9,6 +9,7 @@ endif
9obj-$(CONFIG_E1000) += e1000/ 9obj-$(CONFIG_E1000) += e1000/
10obj-$(CONFIG_IBM_EMAC) += ibm_emac/ 10obj-$(CONFIG_IBM_EMAC) += ibm_emac/
11obj-$(CONFIG_IXGB) += ixgb/ 11obj-$(CONFIG_IXGB) += ixgb/
12obj-$(CONFIG_CHELSIO_T1) += chelsio/
12obj-$(CONFIG_BONDING) += bonding/ 13obj-$(CONFIG_BONDING) += bonding/
13obj-$(CONFIG_GIANFAR) += gianfar_driver.o 14obj-$(CONFIG_GIANFAR) += gianfar_driver.o
14 15
diff --git a/drivers/net/chelsio/Makefile b/drivers/net/chelsio/Makefile
new file mode 100644
index 000000000000..ff8c11b3a4e1
--- /dev/null
+++ b/drivers/net/chelsio/Makefile
@@ -0,0 +1,12 @@
1#
2# Chelsio 10Gb NIC driver for Linux.
3#
4
5obj-$(CONFIG_CHELSIO_T1) += cxgb.o
6
7EXTRA_CFLAGS += -I$(TOPDIR)/drivers/net/chelsio $(DEBUG_FLAGS)
8
9
10cxgb-objs := cxgb2.o espi.o tp.o pm3393.o sge.o subr.o mv88x201x.o
11
12
diff --git a/drivers/net/chelsio/ch_ethtool.h b/drivers/net/chelsio/ch_ethtool.h
new file mode 100644
index 000000000000..c523d24836b5
--- /dev/null
+++ b/drivers/net/chelsio/ch_ethtool.h
@@ -0,0 +1,102 @@
1/*****************************************************************************
2 * *
3 * File: ch_ethtool.h *
4 * $Revision: 1.5 $ *
5 * $Date: 2005/03/23 07:15:58 $ *
6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#ifndef __CHETHTOOL_LINUX_H__
40#define __CHETHTOOL_LINUX_H__
41
42/* TCB size in 32-bit words */
43#define TCB_WORDS (TCB_SIZE / 4)
44
45enum {
46 ETHTOOL_SETREG,
47 ETHTOOL_GETREG,
48 ETHTOOL_SETTPI,
49 ETHTOOL_GETTPI,
50 ETHTOOL_DEVUP,
51 ETHTOOL_GETMTUTAB,
52 ETHTOOL_SETMTUTAB,
53 ETHTOOL_GETMTU,
54 ETHTOOL_SET_PM,
55 ETHTOOL_GET_PM,
56 ETHTOOL_GET_TCAM,
57 ETHTOOL_SET_TCAM,
58 ETHTOOL_GET_TCB,
59 ETHTOOL_READ_TCAM_WORD,
60};
61
62struct ethtool_reg {
63 uint32_t cmd;
64 uint32_t addr;
65 uint32_t val;
66};
67
68struct ethtool_mtus {
69 uint32_t cmd;
70 uint16_t mtus[NMTUS];
71};
72
73struct ethtool_pm {
74 uint32_t cmd;
75 uint32_t tx_pg_sz;
76 uint32_t tx_num_pg;
77 uint32_t rx_pg_sz;
78 uint32_t rx_num_pg;
79 uint32_t pm_total;
80};
81
82struct ethtool_tcam {
83 uint32_t cmd;
84 uint32_t tcam_size;
85 uint32_t nservers;
86 uint32_t nroutes;
87};
88
89struct ethtool_tcb {
90 uint32_t cmd;
91 uint32_t tcb_index;
92 uint32_t tcb_data[TCB_WORDS];
93};
94
95struct ethtool_tcam_word {
96 uint32_t cmd;
97 uint32_t addr;
98 uint32_t buf[3];
99};
100
101#define SIOCCHETHTOOL SIOCDEVPRIVATE
102#endif
diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h
new file mode 100644
index 000000000000..017684ff48dc
--- /dev/null
+++ b/drivers/net/chelsio/common.h
@@ -0,0 +1,269 @@
1/*****************************************************************************
2 * *
3 * File: common.h *
4 * $Revision: 1.5 $ *
5 * $Date: 2005/03/23 07:41:27 $ *
6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#ifndef CHELSIO_COMMON_H
40#define CHELSIO_COMMON_H
41
42#define DIMOF(x) (sizeof(x)/sizeof(x[0]))
43
44#define NMTUS 8
45#define MAX_NPORTS 4
46#define TCB_SIZE 128
47
48enum {
49 CHBT_BOARD_7500,
50 CHBT_BOARD_8000,
51 CHBT_BOARD_CHT101,
52 CHBT_BOARD_CHT110,
53 CHBT_BOARD_CHT210,
54 CHBT_BOARD_CHT204,
55 CHBT_BOARD_N110,
56 CHBT_BOARD_N210,
57 CHBT_BOARD_COUGAR,
58 CHBT_BOARD_6800,
59 CHBT_BOARD_SIMUL
60};
61
62enum {
63 CHBT_TERM_FPGA,
64 CHBT_TERM_T1,
65 CHBT_TERM_T2,
66 CHBT_TERM_T3
67};
68
69enum {
70 CHBT_MAC_CHELSIO_A,
71 CHBT_MAC_IXF1010,
72 CHBT_MAC_PM3393,
73 CHBT_MAC_VSC7321,
74 CHBT_MAC_DUMMY
75};
76
77enum {
78 CHBT_PHY_88E1041,
79 CHBT_PHY_88E1111,
80 CHBT_PHY_88X2010,
81 CHBT_PHY_XPAK,
82 CHBT_PHY_MY3126,
83 CHBT_PHY_DUMMY
84};
85
86enum {
87 PAUSE_RX = 1,
88 PAUSE_TX = 2,
89 PAUSE_AUTONEG = 4
90};
91
92/* Revisions of T1 chip */
93#define TERM_T1A 0
94#define TERM_T1B 1
95#define TERM_T2 3
96
97struct tp_params {
98 unsigned int pm_size;
99 unsigned int cm_size;
100 unsigned int pm_rx_base;
101 unsigned int pm_tx_base;
102 unsigned int pm_rx_pg_size;
103 unsigned int pm_tx_pg_size;
104 unsigned int pm_rx_num_pgs;
105 unsigned int pm_tx_num_pgs;
106 unsigned int use_5tuple_mode;
107};
108
109struct sge_params {
110 unsigned int cmdQ_size[2];
111 unsigned int freelQ_size[2];
112 unsigned int large_buf_capacity;
113 unsigned int rx_coalesce_usecs;
114 unsigned int last_rx_coalesce_raw;
115 unsigned int default_rx_coalesce_usecs;
116 unsigned int sample_interval_usecs;
117 unsigned int coalesce_enable;
118 unsigned int polling;
119};
120
121struct mc5_params {
122 unsigned int mode; /* selects MC5 width */
123 unsigned int nservers; /* size of server region */
124 unsigned int nroutes; /* size of routing region */
125};
126
127/* Default MC5 region sizes */
128#define DEFAULT_SERVER_REGION_LEN 256
129#define DEFAULT_RT_REGION_LEN 1024
130
131struct pci_params {
132 unsigned short speed;
133 unsigned char width;
134 unsigned char is_pcix;
135};
136
137struct adapter_params {
138 struct sge_params sge;
139 struct mc5_params mc5;
140 struct tp_params tp;
141 struct pci_params pci;
142
143 const struct board_info *brd_info;
144
145 unsigned short mtus[NMTUS];
146 unsigned int nports; /* # of ethernet ports */
147 unsigned int stats_update_period;
148 unsigned short chip_revision;
149 unsigned char chip_version;
150 unsigned char is_asic;
151};
152
153struct pci_err_cnt {
154 unsigned int master_parity_err;
155 unsigned int sig_target_abort;
156 unsigned int rcv_target_abort;
157 unsigned int rcv_master_abort;
158 unsigned int sig_sys_err;
159 unsigned int det_parity_err;
160 unsigned int pio_parity_err;
161 unsigned int wf_parity_err;
162 unsigned int rf_parity_err;
163 unsigned int cf_parity_err;
164};
165
166struct link_config {
167 unsigned int supported; /* link capabilities */
168 unsigned int advertising; /* advertised capabilities */
169 unsigned short requested_speed; /* speed user has requested */
170 unsigned short speed; /* actual link speed */
171 unsigned char requested_duplex; /* duplex user has requested */
172 unsigned char duplex; /* actual link duplex */
173 unsigned char requested_fc; /* flow control user has requested */
174 unsigned char fc; /* actual link flow control */
175 unsigned char autoneg; /* autonegotiating? */
176};
177
178#define SPEED_INVALID 0xffff
179#define DUPLEX_INVALID 0xff
180
181struct mdio_ops;
182struct gmac;
183struct gphy;
184
185struct board_info {
186 unsigned char board;
187 unsigned char port_number;
188 unsigned long caps;
189 unsigned char chip_term;
190 unsigned char chip_mac;
191 unsigned char chip_phy;
192 unsigned int clock_core;
193 unsigned int clock_mc3;
194 unsigned int clock_mc4;
195 unsigned int espi_nports;
196 unsigned int clock_cspi;
197 unsigned int clock_elmer0;
198 unsigned char mdio_mdien;
199 unsigned char mdio_mdiinv;
200 unsigned char mdio_mdc;
201 unsigned char mdio_phybaseaddr;
202 struct gmac *gmac;
203 struct gphy *gphy;
204 struct mdio_ops *mdio_ops;
205 const char *desc;
206};
207
208#include "osdep.h"
209
210#ifndef PCI_VENDOR_ID_CHELSIO
211#define PCI_VENDOR_ID_CHELSIO 0x1425
212#endif
213
214extern struct pci_device_id t1_pci_tbl[];
215
216static inline int t1_is_asic(const adapter_t *adapter)
217{
218 return adapter->params.is_asic;
219}
220
221static inline int adapter_matches_type(const adapter_t *adapter,
222 int version, int revision)
223{
224 return adapter->params.chip_version == version &&
225 adapter->params.chip_revision == revision;
226}
227
228#define t1_is_T1B(adap) adapter_matches_type(adap, CHBT_TERM_T1, TERM_T1B)
229#define is_T2(adap) adapter_matches_type(adap, CHBT_TERM_T2, TERM_T2)
230
231/* Returns true if an adapter supports VLAN acceleration and TSO */
232static inline int vlan_tso_capable(const adapter_t *adapter)
233{
234 return !t1_is_T1B(adapter);
235}
236
237#define for_each_port(adapter, iter) \
238 for (iter = 0; iter < (adapter)->params.nports; ++iter)
239
240#define board_info(adapter) ((adapter)->params.brd_info)
241#define is_10G(adapter) (board_info(adapter)->caps & SUPPORTED_10000baseT_Full)
242
243static inline unsigned int core_ticks_per_usec(const adapter_t *adap)
244{
245 return board_info(adap)->clock_core / 1000000;
246}
247
248int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
249int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *value);
250
251void t1_interrupts_enable(adapter_t *adapter);
252void t1_interrupts_disable(adapter_t *adapter);
253void t1_interrupts_clear(adapter_t *adapter);
254int elmer0_ext_intr_handler(adapter_t *adapter);
255int t1_slow_intr_handler(adapter_t *adapter);
256
257int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
258const struct board_info *t1_get_board_info(unsigned int board_id);
259const struct board_info *t1_get_board_info_from_ids(unsigned int devid,
260 unsigned short ssid);
261int t1_seeprom_read(adapter_t *adapter, u32 addr, u32 *data);
262int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
263 struct adapter_params *p);
264int t1_init_hw_modules(adapter_t *adapter);
265int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi);
266void t1_free_sw_modules(adapter_t *adapter);
267void t1_fatal_err(adapter_t *adapter);
268#endif
269
diff --git a/drivers/net/chelsio/cphy.h b/drivers/net/chelsio/cphy.h
new file mode 100644
index 000000000000..1bc2248264c0
--- /dev/null
+++ b/drivers/net/chelsio/cphy.h
@@ -0,0 +1,150 @@
1/*****************************************************************************
2 * *
3 * File: cphy.h *
4 * $Revision: 1.4 $ *
5 * $Date: 2005/03/23 07:41:27 $ *
6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#ifndef CHELSIO_CPHY_H
40#define CHELSIO_CPHY_H
41
42#include "common.h"
43
44struct mdio_ops {
45 void (*init)(adapter_t *adapter, const struct board_info *bi);
46 int (*read)(adapter_t *adapter, int phy_addr, int mmd_addr,
47 int reg_addr, unsigned int *val);
48 int (*write)(adapter_t *adapter, int phy_addr, int mmd_addr,
49 int reg_addr, unsigned int val);
50};
51
52/* PHY interrupt types */
53enum {
54 cphy_cause_link_change = 0x1,
55 cphy_cause_error = 0x2
56};
57
58struct cphy;
59
60/* PHY operations */
61struct cphy_ops {
62 void (*destroy)(struct cphy *);
63 int (*reset)(struct cphy *, int wait);
64
65 int (*interrupt_enable)(struct cphy *);
66 int (*interrupt_disable)(struct cphy *);
67 int (*interrupt_clear)(struct cphy *);
68 int (*interrupt_handler)(struct cphy *);
69
70 int (*autoneg_enable)(struct cphy *);
71 int (*autoneg_disable)(struct cphy *);
72 int (*autoneg_restart)(struct cphy *);
73
74 int (*advertise)(struct cphy *phy, unsigned int advertise_map);
75 int (*set_loopback)(struct cphy *, int on);
76 int (*set_speed_duplex)(struct cphy *phy, int speed, int duplex);
77 int (*get_link_status)(struct cphy *phy, int *link_ok, int *speed,
78 int *duplex, int *fc);
79};
80
81/* A PHY instance */
82struct cphy {
83 int addr; /* PHY address */
84 adapter_t *adapter; /* associated adapter */
85 struct cphy_ops *ops; /* PHY operations */
86 int (*mdio_read)(adapter_t *adapter, int phy_addr, int mmd_addr,
87 int reg_addr, unsigned int *val);
88 int (*mdio_write)(adapter_t *adapter, int phy_addr, int mmd_addr,
89 int reg_addr, unsigned int val);
90 struct cphy_instance *instance;
91};
92
93/* Convenience MDIO read/write wrappers */
94static inline int mdio_read(struct cphy *cphy, int mmd, int reg,
95 unsigned int *valp)
96{
97 return cphy->mdio_read(cphy->adapter, cphy->addr, mmd, reg, valp);
98}
99
100static inline int mdio_write(struct cphy *cphy, int mmd, int reg,
101 unsigned int val)
102{
103 return cphy->mdio_write(cphy->adapter, cphy->addr, mmd, reg, val);
104}
105
106static inline int simple_mdio_read(struct cphy *cphy, int reg,
107 unsigned int *valp)
108{
109 return mdio_read(cphy, 0, reg, valp);
110}
111
112static inline int simple_mdio_write(struct cphy *cphy, int reg,
113 unsigned int val)
114{
115 return mdio_write(cphy, 0, reg, val);
116}
117
118/* Convenience initializer */
119static inline void cphy_init(struct cphy *phy, adapter_t *adapter,
120 int phy_addr, struct cphy_ops *phy_ops,
121 struct mdio_ops *mdio_ops)
122{
123 phy->adapter = adapter;
124 phy->addr = phy_addr;
125 phy->ops = phy_ops;
126 if (mdio_ops) {
127 phy->mdio_read = mdio_ops->read;
128 phy->mdio_write = mdio_ops->write;
129 }
130}
131
132/* Operations of the PHY-instance factory */
133struct gphy {
134 /* Construct a PHY instance with the given PHY address */
135 struct cphy *(*create)(adapter_t *adapter, int phy_addr,
136 struct mdio_ops *mdio_ops);
137
138 /*
139 * Reset the PHY chip. This resets the whole PHY chip, not individual
140 * ports.
141 */
142 int (*reset)(adapter_t *adapter);
143};
144
145extern struct gphy t1_my3126_ops;
146extern struct gphy t1_mv88e1xxx_ops;
147extern struct gphy t1_xpak_ops;
148extern struct gphy t1_mv88x201x_ops;
149extern struct gphy t1_dummy_phy_ops;
150#endif
diff --git a/drivers/net/chelsio/cpl5_cmd.h b/drivers/net/chelsio/cpl5_cmd.h
new file mode 100644
index 000000000000..45e9248979f1
--- /dev/null
+++ b/drivers/net/chelsio/cpl5_cmd.h
@@ -0,0 +1,145 @@
1/*****************************************************************************
2 * *
3 * File: cpl5_cmd.h *
4 * $Revision: 1.4 $ *
5 * $Date: 2005/03/23 07:15:58 $ *
6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#ifndef _CPL5_CMD_H
40#define _CPL5_CMD_H
41
42#include <asm/byteorder.h>
43
44#if !defined(__LITTLE_ENDIAN_BITFIELD) && !defined(__BIG_ENDIAN_BITFIELD)
45#error "Adjust your <asm/byteorder.h> defines"
46#endif
47
48enum CPL_opcode {
49 CPL_RX_PKT = 0xAD,
50 CPL_TX_PKT = 0xB2,
51 CPL_TX_PKT_LSO = 0xB6,
52};
53
54enum { /* TX_PKT_LSO ethernet types */
55 CPL_ETH_II,
56 CPL_ETH_II_VLAN,
57 CPL_ETH_802_3,
58 CPL_ETH_802_3_VLAN
59};
60
61struct cpl_rx_data {
62 __u32 rsvd0;
63 __u32 len;
64 __u32 seq;
65 __u16 urg;
66 __u8 rsvd1;
67 __u8 status;
68};
69
70/*
71 * We want this header's alignment to be no more stringent than 2-byte aligned.
72 * All fields are u8 or u16 except for the length. However that field is not
73 * used so we break it into 2 16-bit parts to easily meet our alignment needs.
74 */
75struct cpl_tx_pkt {
76 __u8 opcode;
77#if defined(__LITTLE_ENDIAN_BITFIELD)
78 __u8 iff:4;
79 __u8 ip_csum_dis:1;
80 __u8 l4_csum_dis:1;
81 __u8 vlan_valid:1;
82 __u8 rsvd:1;
83#else
84 __u8 rsvd:1;
85 __u8 vlan_valid:1;
86 __u8 l4_csum_dis:1;
87 __u8 ip_csum_dis:1;
88 __u8 iff:4;
89#endif
90 __u16 vlan;
91 __u16 len_hi;
92 __u16 len_lo;
93};
94
95struct cpl_tx_pkt_lso {
96 __u8 opcode;
97#if defined(__LITTLE_ENDIAN_BITFIELD)
98 __u8 iff:4;
99 __u8 ip_csum_dis:1;
100 __u8 l4_csum_dis:1;
101 __u8 vlan_valid:1;
102 __u8 rsvd:1;
103#else
104 __u8 rsvd:1;
105 __u8 vlan_valid:1;
106 __u8 l4_csum_dis:1;
107 __u8 ip_csum_dis:1;
108 __u8 iff:4;
109#endif
110 __u16 vlan;
111 __u32 len;
112
113 __u32 rsvd2;
114 __u8 rsvd3;
115#if defined(__LITTLE_ENDIAN_BITFIELD)
116 __u8 tcp_hdr_words:4;
117 __u8 ip_hdr_words:4;
118#else
119 __u8 ip_hdr_words:4;
120 __u8 tcp_hdr_words:4;
121#endif
122 __u16 eth_type_mss;
123};
124
125struct cpl_rx_pkt {
126 __u8 opcode;
127#if defined(__LITTLE_ENDIAN_BITFIELD)
128 __u8 iff:4;
129 __u8 csum_valid:1;
130 __u8 bad_pkt:1;
131 __u8 vlan_valid:1;
132 __u8 rsvd:1;
133#else
134 __u8 rsvd:1;
135 __u8 vlan_valid:1;
136 __u8 bad_pkt:1;
137 __u8 csum_valid:1;
138 __u8 iff:4;
139#endif
140 __u16 csum;
141 __u16 vlan;
142 __u16 len;
143};
144
145#endif
diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c
new file mode 100644
index 000000000000..48c4d5acfcd1
--- /dev/null
+++ b/drivers/net/chelsio/cxgb2.c
@@ -0,0 +1,1231 @@
1/*****************************************************************************
2 * *
3 * File: cxgb2.c *
4 * $Revision: 1.11 $ *
5 * $Date: 2005/03/23 07:41:27 $ *
6 * Description: *
7 * Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#include "common.h"
40
41#include <linux/config.h>
42#include <linux/module.h>
43#include <linux/init.h>
44#include <linux/pci.h>
45#include <linux/netdevice.h>
46#include <linux/etherdevice.h>
47#include <linux/if_vlan.h>
48#include <linux/mii.h>
49#include <linux/sockios.h>
50#include <linux/proc_fs.h>
51#include <linux/version.h>
52#include <linux/workqueue.h>
53#include <asm/uaccess.h>
54
55#include "ch_ethtool.h"
56#include "cpl5_cmd.h"
57#include "regs.h"
58#include "gmac.h"
59#include "cphy.h"
60#include "sge.h"
61#include "tp.h"
62#include "espi.h"
63
64static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
65{
66 schedule_delayed_work(&ap->stats_update_task, secs * HZ);
67}
68
69static inline void cancel_mac_stats_update(struct adapter *ap)
70{
71 cancel_delayed_work(&ap->stats_update_task);
72}
73
74#if BITS_PER_LONG == 64 && !defined(CONFIG_X86_64)
75# define FMT64 "l"
76#else
77# define FMT64 "ll"
78#endif
79
80# define DRV_TYPE ""
81# define MODULE_DESC "Chelsio Network Driver"
82
83static char driver_name[] = DRV_NAME;
84static char driver_string[] = "Chelsio " DRV_TYPE "Network Driver";
85static char driver_version[] = "2.1.0";
86
87#define PCI_DMA_64BIT ~0ULL
88#define PCI_DMA_32BIT 0xffffffffULL
89
90#define MAX_CMDQ_ENTRIES 16384
91#define MAX_CMDQ1_ENTRIES 1024
92#define MAX_RX_BUFFERS 16384
93#define MAX_RX_JUMBO_BUFFERS 16384
94#define MAX_TX_BUFFERS_HIGH 16384U
95#define MAX_TX_BUFFERS_LOW 1536U
96#define MIN_FL_ENTRIES 32
97
98#define PORT_MASK ((1 << MAX_NPORTS) - 1)
99
100#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
101 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
102 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
103
104/*
105 * The EEPROM is actually bigger but only the first few bytes are used so we
106 * only report those.
107 */
108#define EEPROM_SIZE 32
109
110MODULE_DESCRIPTION(MODULE_DESC);
111MODULE_AUTHOR("Chelsio Communications");
112MODULE_LICENSE("GPL");
113MODULE_DEVICE_TABLE(pci, t1_pci_tbl);
114
115static int dflt_msg_enable = DFLT_MSG_ENABLE;
116
117MODULE_PARM(dflt_msg_enable, "i");
118MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 message enable bitmap");
119
120
121static const char pci_speed[][4] = {
122 "33", "66", "100", "133"
123};
124
125/*
126 * Setup MAC to receive the types of packets we want.
127 */
128static void t1_set_rxmode(struct net_device *dev)
129{
130 struct adapter *adapter = dev->priv;
131 struct cmac *mac = adapter->port[dev->if_port].mac;
132 struct t1_rx_mode rm;
133
134 rm.dev = dev;
135 rm.idx = 0;
136 rm.list = dev->mc_list;
137 mac->ops->set_rx_mode(mac, &rm);
138}
139
140static void link_report(struct port_info *p)
141{
142 if (!netif_carrier_ok(p->dev))
143 printk(KERN_INFO "%s: link is down\n", p->dev->name);
144 else {
145 const char *s = "10 Mbps";
146
147 switch (p->link_config.speed) {
148 case SPEED_10000: s = "10 Gbps"; break;
149 case SPEED_1000: s = "1000 Mbps"; break;
150 case SPEED_100: s = "100 Mbps"; break;
151 }
152
153 printk(KERN_INFO "%s: link is up at %s, %s duplex\n",
154 p->dev->name, s,
155 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
156 }
157}
158
159void t1_link_changed(struct adapter *adapter, int port_id, int link_stat,
160 int speed, int duplex, int pause)
161{
162 struct port_info *p = &adapter->port[port_id];
163
164 if (link_stat != netif_carrier_ok(p->dev)) {
165 if (link_stat)
166 netif_carrier_on(p->dev);
167 else
168 netif_carrier_off(p->dev);
169 link_report(p);
170
171 }
172}
173
174static void link_start(struct port_info *p)
175{
176 struct cmac *mac = p->mac;
177
178 mac->ops->reset(mac);
179 if (mac->ops->macaddress_set)
180 mac->ops->macaddress_set(mac, p->dev->dev_addr);
181 t1_set_rxmode(p->dev);
182 t1_link_start(p->phy, mac, &p->link_config);
183 mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
184}
185
186static void enable_hw_csum(struct adapter *adapter)
187{
188 if (adapter->flags & TSO_CAPABLE)
189 t1_tp_set_ip_checksum_offload(adapter->tp, 1); /* for TSO only */
190 if (adapter->flags & UDP_CSUM_CAPABLE)
191 t1_tp_set_udp_checksum_offload(adapter->tp, 1);
192 t1_tp_set_tcp_checksum_offload(adapter->tp, 1);
193}
194
195/*
196 * Things to do upon first use of a card.
197 * This must run with the rtnl lock held.
198 */
199static int cxgb_up(struct adapter *adapter)
200{
201 int err = 0;
202
203 if (!(adapter->flags & FULL_INIT_DONE)) {
204 err = t1_init_hw_modules(adapter);
205 if (err)
206 goto out_err;
207
208 enable_hw_csum(adapter);
209 adapter->flags |= FULL_INIT_DONE;
210 }
211
212 t1_interrupts_clear(adapter);
213
214 if ((err = request_irq(adapter->pdev->irq, &t1_interrupt, SA_SHIRQ,
215 adapter->name, adapter)))
216 goto out_err;
217
218 t1_sge_start(adapter->sge);
219 t1_interrupts_enable(adapter);
220
221 err = 0;
222 out_err:
223 return err;
224}
225
226/*
227 * Release resources when all the ports have been stopped.
228 */
229static void cxgb_down(struct adapter *adapter)
230{
231 t1_sge_stop(adapter->sge);
232 t1_interrupts_disable(adapter);
233 free_irq(adapter->pdev->irq, adapter);
234}
235
236static int cxgb_open(struct net_device *dev)
237{
238 int err;
239 struct adapter *adapter = dev->priv;
240 int other_ports = adapter->open_device_map & PORT_MASK;
241
242 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
243 return err;
244
245 __set_bit(dev->if_port, &adapter->open_device_map);
246 link_start(&adapter->port[dev->if_port]);
247 netif_start_queue(dev);
248 if (!other_ports && adapter->params.stats_update_period)
249 schedule_mac_stats_update(adapter,
250 adapter->params.stats_update_period);
251 return 0;
252}
253
254static int cxgb_close(struct net_device *dev)
255{
256 struct adapter *adapter = dev->priv;
257 struct port_info *p = &adapter->port[dev->if_port];
258 struct cmac *mac = p->mac;
259
260 netif_stop_queue(dev);
261 mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
262 netif_carrier_off(dev);
263
264 clear_bit(dev->if_port, &adapter->open_device_map);
265 if (adapter->params.stats_update_period &&
266 !(adapter->open_device_map & PORT_MASK)) {
267 /* Stop statistics accumulation. */
268 smp_mb__after_clear_bit();
269 spin_lock(&adapter->work_lock); /* sync with update task */
270 spin_unlock(&adapter->work_lock);
271 cancel_mac_stats_update(adapter);
272 }
273
274 if (!adapter->open_device_map)
275 cxgb_down(adapter);
276 return 0;
277}
278
279static struct net_device_stats *t1_get_stats(struct net_device *dev)
280{
281 struct adapter *adapter = dev->priv;
282 struct port_info *p = &adapter->port[dev->if_port];
283 struct net_device_stats *ns = &p->netstats;
284 const struct cmac_statistics *pstats;
285
286 /* Do a full update of the MAC stats */
287 pstats = p->mac->ops->statistics_update(p->mac,
288 MAC_STATS_UPDATE_FULL);
289
290 ns->tx_packets = pstats->TxUnicastFramesOK +
291 pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
292
293 ns->rx_packets = pstats->RxUnicastFramesOK +
294 pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
295
296 ns->tx_bytes = pstats->TxOctetsOK;
297 ns->rx_bytes = pstats->RxOctetsOK;
298
299 ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
300 pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
301 ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
302 pstats->RxFCSErrors + pstats->RxAlignErrors +
303 pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
304 pstats->RxSymbolErrors + pstats->RxRuntErrors;
305
306 ns->multicast = pstats->RxMulticastFramesOK;
307 ns->collisions = pstats->TxTotalCollisions;
308
309 /* detailed rx_errors */
310 ns->rx_length_errors = pstats->RxFrameTooLongErrors +
311 pstats->RxJabberErrors;
312 ns->rx_over_errors = 0;
313 ns->rx_crc_errors = pstats->RxFCSErrors;
314 ns->rx_frame_errors = pstats->RxAlignErrors;
315 ns->rx_fifo_errors = 0;
316 ns->rx_missed_errors = 0;
317
318 /* detailed tx_errors */
319 ns->tx_aborted_errors = pstats->TxFramesAbortedDueToXSCollisions;
320 ns->tx_carrier_errors = 0;
321 ns->tx_fifo_errors = pstats->TxUnderrun;
322 ns->tx_heartbeat_errors = 0;
323 ns->tx_window_errors = pstats->TxLateCollisions;
324 return ns;
325}
326
327static u32 get_msglevel(struct net_device *dev)
328{
329 struct adapter *adapter = dev->priv;
330
331 return adapter->msg_enable;
332}
333
334static void set_msglevel(struct net_device *dev, u32 val)
335{
336 struct adapter *adapter = dev->priv;
337
338 adapter->msg_enable = val;
339}
340
341static char stats_strings[][ETH_GSTRING_LEN] = {
342 "TxOctetsOK",
343 "TxOctetsBad",
344 "TxUnicastFramesOK",
345 "TxMulticastFramesOK",
346 "TxBroadcastFramesOK",
347 "TxPauseFrames",
348 "TxFramesWithDeferredXmissions",
349 "TxLateCollisions",
350 "TxTotalCollisions",
351 "TxFramesAbortedDueToXSCollisions",
352 "TxUnderrun",
353 "TxLengthErrors",
354 "TxInternalMACXmitError",
355 "TxFramesWithExcessiveDeferral",
356 "TxFCSErrors",
357
358 "RxOctetsOK",
359 "RxOctetsBad",
360 "RxUnicastFramesOK",
361 "RxMulticastFramesOK",
362 "RxBroadcastFramesOK",
363 "RxPauseFrames",
364 "RxFCSErrors",
365 "RxAlignErrors",
366 "RxSymbolErrors",
367 "RxDataErrors",
368 "RxSequenceErrors",
369 "RxRuntErrors",
370 "RxJabberErrors",
371 "RxInternalMACRcvError",
372 "RxInRangeLengthErrors",
373 "RxOutOfRangeLengthField",
374 "RxFrameTooLongErrors"
375};
376
377static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
378{
379 struct adapter *adapter = dev->priv;
380
381 strcpy(info->driver, driver_name);
382 strcpy(info->version, driver_version);
383 strcpy(info->fw_version, "N/A");
384 strcpy(info->bus_info, pci_name(adapter->pdev));
385}
386
387static int get_stats_count(struct net_device *dev)
388{
389 return ARRAY_SIZE(stats_strings);
390}
391
392static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
393{
394 if (stringset == ETH_SS_STATS)
395 memcpy(data, stats_strings, sizeof(stats_strings));
396}
397
398static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
399 u64 *data)
400{
401 struct adapter *adapter = dev->priv;
402 struct cmac *mac = adapter->port[dev->if_port].mac;
403 const struct cmac_statistics *s;
404
405 s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
406
407 *data++ = s->TxOctetsOK;
408 *data++ = s->TxOctetsBad;
409 *data++ = s->TxUnicastFramesOK;
410 *data++ = s->TxMulticastFramesOK;
411 *data++ = s->TxBroadcastFramesOK;
412 *data++ = s->TxPauseFrames;
413 *data++ = s->TxFramesWithDeferredXmissions;
414 *data++ = s->TxLateCollisions;
415 *data++ = s->TxTotalCollisions;
416 *data++ = s->TxFramesAbortedDueToXSCollisions;
417 *data++ = s->TxUnderrun;
418 *data++ = s->TxLengthErrors;
419 *data++ = s->TxInternalMACXmitError;
420 *data++ = s->TxFramesWithExcessiveDeferral;
421 *data++ = s->TxFCSErrors;
422
423 *data++ = s->RxOctetsOK;
424 *data++ = s->RxOctetsBad;
425 *data++ = s->RxUnicastFramesOK;
426 *data++ = s->RxMulticastFramesOK;
427 *data++ = s->RxBroadcastFramesOK;
428 *data++ = s->RxPauseFrames;
429 *data++ = s->RxFCSErrors;
430 *data++ = s->RxAlignErrors;
431 *data++ = s->RxSymbolErrors;
432 *data++ = s->RxDataErrors;
433 *data++ = s->RxSequenceErrors;
434 *data++ = s->RxRuntErrors;
435 *data++ = s->RxJabberErrors;
436 *data++ = s->RxInternalMACRcvError;
437 *data++ = s->RxInRangeLengthErrors;
438 *data++ = s->RxOutOfRangeLengthField;
439 *data++ = s->RxFrameTooLongErrors;
440}
441
442static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
443{
444 struct adapter *adapter = dev->priv;
445 struct port_info *p = &adapter->port[dev->if_port];
446
447 cmd->supported = p->link_config.supported;
448 cmd->advertising = p->link_config.advertising;
449
450 if (netif_carrier_ok(dev)) {
451 cmd->speed = p->link_config.speed;
452 cmd->duplex = p->link_config.duplex;
453 } else {
454 cmd->speed = -1;
455 cmd->duplex = -1;
456 }
457
458 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
459 cmd->phy_address = p->phy->addr;
460 cmd->transceiver = XCVR_EXTERNAL;
461 cmd->autoneg = p->link_config.autoneg;
462 cmd->maxtxpkt = 0;
463 cmd->maxrxpkt = 0;
464 return 0;
465}
466
467static int speed_duplex_to_caps(int speed, int duplex)
468{
469 int cap = 0;
470
471 switch (speed) {
472 case SPEED_10:
473 if (duplex == DUPLEX_FULL)
474 cap = SUPPORTED_10baseT_Full;
475 else
476 cap = SUPPORTED_10baseT_Half;
477 break;
478 case SPEED_100:
479 if (duplex == DUPLEX_FULL)
480 cap = SUPPORTED_100baseT_Full;
481 else
482 cap = SUPPORTED_100baseT_Half;
483 break;
484 case SPEED_1000:
485 if (duplex == DUPLEX_FULL)
486 cap = SUPPORTED_1000baseT_Full;
487 else
488 cap = SUPPORTED_1000baseT_Half;
489 break;
490 case SPEED_10000:
491 if (duplex == DUPLEX_FULL)
492 cap = SUPPORTED_10000baseT_Full;
493 }
494 return cap;
495}
496
497#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
498 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
499 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
500 ADVERTISED_10000baseT_Full)
501
502static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
503{
504 struct adapter *adapter = dev->priv;
505 struct port_info *p = &adapter->port[dev->if_port];
506 struct link_config *lc = &p->link_config;
507
508 if (!(lc->supported & SUPPORTED_Autoneg))
509 return -EOPNOTSUPP; /* can't change speed/duplex */
510
511 if (cmd->autoneg == AUTONEG_DISABLE) {
512 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
513
514 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
515 return -EINVAL;
516 lc->requested_speed = cmd->speed;
517 lc->requested_duplex = cmd->duplex;
518 lc->advertising = 0;
519 } else {
520 cmd->advertising &= ADVERTISED_MASK;
521 if (cmd->advertising & (cmd->advertising - 1))
522 cmd->advertising = lc->supported;
523 cmd->advertising &= lc->supported;
524 if (!cmd->advertising)
525 return -EINVAL;
526 lc->requested_speed = SPEED_INVALID;
527 lc->requested_duplex = DUPLEX_INVALID;
528 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
529 }
530 lc->autoneg = cmd->autoneg;
531 if (netif_running(dev))
532 t1_link_start(p->phy, p->mac, lc);
533 return 0;
534}
535
536static void get_pauseparam(struct net_device *dev,
537 struct ethtool_pauseparam *epause)
538{
539 struct adapter *adapter = dev->priv;
540 struct port_info *p = &adapter->port[dev->if_port];
541
542 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
543 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
544 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
545}
546
547static int set_pauseparam(struct net_device *dev,
548 struct ethtool_pauseparam *epause)
549{
550 struct adapter *adapter = dev->priv;
551 struct port_info *p = &adapter->port[dev->if_port];
552 struct link_config *lc = &p->link_config;
553
554 if (epause->autoneg == AUTONEG_DISABLE)
555 lc->requested_fc = 0;
556 else if (lc->supported & SUPPORTED_Autoneg)
557 lc->requested_fc = PAUSE_AUTONEG;
558 else
559 return -EINVAL;
560
561 if (epause->rx_pause)
562 lc->requested_fc |= PAUSE_RX;
563 if (epause->tx_pause)
564 lc->requested_fc |= PAUSE_TX;
565 if (lc->autoneg == AUTONEG_ENABLE) {
566 if (netif_running(dev))
567 t1_link_start(p->phy, p->mac, lc);
568 } else {
569 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
570 if (netif_running(dev))
571 p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
572 lc->fc);
573 }
574 return 0;
575}
576
577static u32 get_rx_csum(struct net_device *dev)
578{
579 struct adapter *adapter = dev->priv;
580
581 return (adapter->flags & RX_CSUM_ENABLED) != 0;
582}
583
584static int set_rx_csum(struct net_device *dev, u32 data)
585{
586 struct adapter *adapter = dev->priv;
587
588 if (data)
589 adapter->flags |= RX_CSUM_ENABLED;
590 else
591 adapter->flags &= ~RX_CSUM_ENABLED;
592 return 0;
593}
594
595static int set_tso(struct net_device *dev, u32 value)
596{
597 struct adapter *adapter = dev->priv;
598
599 if (!(adapter->flags & TSO_CAPABLE))
600 return value ? -EOPNOTSUPP : 0;
601 return ethtool_op_set_tso(dev, value);
602}
603
604static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
605{
606 struct adapter *adapter = dev->priv;
607 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
608
609 e->rx_max_pending = MAX_RX_BUFFERS;
610 e->rx_mini_max_pending = 0;
611 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
612 e->tx_max_pending = MAX_CMDQ_ENTRIES;
613
614 e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
615 e->rx_mini_pending = 0;
616 e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
617 e->tx_pending = adapter->params.sge.cmdQ_size[0];
618}
619
620static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
621{
622 struct adapter *adapter = dev->priv;
623 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
624
625 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
626 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
627 e->tx_pending > MAX_CMDQ_ENTRIES ||
628 e->rx_pending < MIN_FL_ENTRIES ||
629 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
630 e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
631 return -EINVAL;
632
633 if (adapter->flags & FULL_INIT_DONE)
634 return -EBUSY;
635
636 adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
637 adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
638 adapter->params.sge.cmdQ_size[0] = e->tx_pending;
639 adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
640 MAX_CMDQ1_ENTRIES : e->tx_pending;
641 return 0;
642}
643
644static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
645{
646 struct adapter *adapter = dev->priv;
647
648 unsigned int sge_coalesce_usecs = 0;
649
650 sge_coalesce_usecs = adapter->params.sge.last_rx_coalesce_raw;
651 sge_coalesce_usecs /= board_info(adapter)->clock_core / 1000000;
652 if ( (adapter->params.sge.coalesce_enable && !c->use_adaptive_rx_coalesce) &&
653 (c->rx_coalesce_usecs == sge_coalesce_usecs) ) {
654 adapter->params.sge.rx_coalesce_usecs =
655 adapter->params.sge.default_rx_coalesce_usecs;
656 } else {
657 adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
658 }
659
660 adapter->params.sge.last_rx_coalesce_raw = adapter->params.sge.rx_coalesce_usecs;
661 adapter->params.sge.last_rx_coalesce_raw *= (board_info(adapter)->clock_core / 1000000);
662 adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
663 adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
664 t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
665 return 0;
666}
667
668static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
669{
670 struct adapter *adapter = dev->priv;
671
672 if (adapter->params.sge.coalesce_enable) { /* Adaptive algorithm on */
673 c->rx_coalesce_usecs = adapter->params.sge.last_rx_coalesce_raw;
674 c->rx_coalesce_usecs /= board_info(adapter)->clock_core / 1000000;
675 } else {
676 c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
677 }
678 c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
679 c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
680 return 0;
681}
682
683static int get_eeprom_len(struct net_device *dev)
684{
685 struct adapter *adapter = dev->priv;
686
687 return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
688}
689
690#define EEPROM_MAGIC(ap) \
691 (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
692
693static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
694 u8 *data)
695{
696 int i;
697 u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
698 struct adapter *adapter = dev->priv;
699
700 e->magic = EEPROM_MAGIC(adapter);
701 for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
702 t1_seeprom_read(adapter, i, (u32 *)&buf[i]);
703 memcpy(data, buf + e->offset, e->len);
704 return 0;
705}
706
707static struct ethtool_ops t1_ethtool_ops = {
708 .get_settings = get_settings,
709 .set_settings = set_settings,
710 .get_drvinfo = get_drvinfo,
711 .get_msglevel = get_msglevel,
712 .set_msglevel = set_msglevel,
713 .get_ringparam = get_sge_param,
714 .set_ringparam = set_sge_param,
715 .get_coalesce = get_coalesce,
716 .set_coalesce = set_coalesce,
717 .get_eeprom_len = get_eeprom_len,
718 .get_eeprom = get_eeprom,
719 .get_pauseparam = get_pauseparam,
720 .set_pauseparam = set_pauseparam,
721 .get_rx_csum = get_rx_csum,
722 .set_rx_csum = set_rx_csum,
723 .get_tx_csum = ethtool_op_get_tx_csum,
724 .set_tx_csum = ethtool_op_set_tx_csum,
725 .get_sg = ethtool_op_get_sg,
726 .set_sg = ethtool_op_set_sg,
727 .get_link = ethtool_op_get_link,
728 .get_strings = get_strings,
729 .get_stats_count = get_stats_count,
730 .get_ethtool_stats = get_stats,
731 .get_tso = ethtool_op_get_tso,
732 .set_tso = set_tso,
733};
734
735static int ethtool_ioctl(struct net_device *dev, void *useraddr)
736{
737 u32 cmd;
738 struct adapter *adapter = dev->priv;
739
740 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
741 return -EFAULT;
742
743 switch (cmd) {
744 case ETHTOOL_SETREG: {
745 struct ethtool_reg edata;
746
747 if (!capable(CAP_NET_ADMIN))
748 return -EPERM;
749 if (copy_from_user(&edata, useraddr, sizeof(edata)))
750 return -EFAULT;
751 if ((edata.addr & 3) != 0 || edata.addr >= adapter->mmio_len)
752 return -EINVAL;
753 if (edata.addr == A_ESPI_MISC_CONTROL)
754 t1_espi_set_misc_ctrl(adapter, edata.val);
755 else {
756 if (edata.addr == 0x950)
757 t1_sge_set_ptimeout(adapter, edata.val);
758 else
759 writel(edata.val, adapter->regs + edata.addr);
760 }
761 break;
762 }
763 case ETHTOOL_GETREG: {
764 struct ethtool_reg edata;
765
766 if (copy_from_user(&edata, useraddr, sizeof(edata)))
767 return -EFAULT;
768 if ((edata.addr & 3) != 0 || edata.addr >= adapter->mmio_len)
769 return -EINVAL;
770 if (edata.addr >= 0x900 && edata.addr <= 0x93c)
771 edata.val = t1_espi_get_mon(adapter, edata.addr, 1);
772 else {
773 if (edata.addr == 0x950)
774 edata.val = t1_sge_get_ptimeout(adapter);
775 else
776 edata.val = readl(adapter->regs + edata.addr);
777 }
778 if (copy_to_user(useraddr, &edata, sizeof(edata)))
779 return -EFAULT;
780 break;
781 }
782 case ETHTOOL_SETTPI: {
783 struct ethtool_reg edata;
784
785 if (!capable(CAP_NET_ADMIN))
786 return -EPERM;
787 if (copy_from_user(&edata, useraddr, sizeof(edata)))
788 return -EFAULT;
789 if ((edata.addr & 3) != 0)
790 return -EINVAL;
791 t1_tpi_write(adapter, edata.addr, edata.val);
792 break;
793 }
794 case ETHTOOL_GETTPI: {
795 struct ethtool_reg edata;
796
797 if (copy_from_user(&edata, useraddr, sizeof(edata)))
798 return -EFAULT;
799 if ((edata.addr & 3) != 0)
800 return -EINVAL;
801 t1_tpi_read(adapter, edata.addr, &edata.val);
802 if (copy_to_user(useraddr, &edata, sizeof(edata)))
803 return -EFAULT;
804 break;
805 }
806 default:
807 return -EOPNOTSUPP;
808 }
809 return 0;
810}
811
812static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
813{
814 struct adapter *adapter = dev->priv;
815 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
816
817 switch (cmd) {
818 case SIOCGMIIPHY:
819 data->phy_id = adapter->port[dev->if_port].phy->addr;
820 /* FALLTHRU */
821 case SIOCGMIIREG: {
822 struct cphy *phy = adapter->port[dev->if_port].phy;
823 u32 val;
824
825 if (!phy->mdio_read) return -EOPNOTSUPP;
826 phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f,
827 &val);
828 data->val_out = val;
829 break;
830 }
831 case SIOCSMIIREG: {
832 struct cphy *phy = adapter->port[dev->if_port].phy;
833
834 if (!capable(CAP_NET_ADMIN)) return -EPERM;
835 if (!phy->mdio_write) return -EOPNOTSUPP;
836 phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f,
837 data->val_in);
838 break;
839 }
840
841 case SIOCCHETHTOOL:
842 return ethtool_ioctl(dev, (void *)req->ifr_data);
843 default:
844 return -EOPNOTSUPP;
845 }
846 return 0;
847}
848
849static int t1_change_mtu(struct net_device *dev, int new_mtu)
850{
851 int ret;
852 struct adapter *adapter = dev->priv;
853 struct cmac *mac = adapter->port[dev->if_port].mac;
854
855 if (!mac->ops->set_mtu)
856 return -EOPNOTSUPP;
857 if (new_mtu < 68)
858 return -EINVAL;
859 if ((ret = mac->ops->set_mtu(mac, new_mtu)))
860 return ret;
861 dev->mtu = new_mtu;
862 return 0;
863}
864
865static int t1_set_mac_addr(struct net_device *dev, void *p)
866{
867 struct adapter *adapter = dev->priv;
868 struct cmac *mac = adapter->port[dev->if_port].mac;
869 struct sockaddr *addr = p;
870
871 if (!mac->ops->macaddress_set)
872 return -EOPNOTSUPP;
873
874 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
875 mac->ops->macaddress_set(mac, dev->dev_addr);
876 return 0;
877}
878
879#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
880static void vlan_rx_register(struct net_device *dev,
881 struct vlan_group *grp)
882{
883 struct adapter *adapter = dev->priv;
884
885 spin_lock_irq(&adapter->async_lock);
886 adapter->vlan_grp = grp;
887 t1_set_vlan_accel(adapter, grp != NULL);
888 spin_unlock_irq(&adapter->async_lock);
889}
890
891static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
892{
893 struct adapter *adapter = dev->priv;
894
895 spin_lock_irq(&adapter->async_lock);
896 if (adapter->vlan_grp)
897 adapter->vlan_grp->vlan_devices[vid] = NULL;
898 spin_unlock_irq(&adapter->async_lock);
899}
900#endif
901
902#ifdef CONFIG_NET_POLL_CONTROLLER
903static void t1_netpoll(struct net_device *dev)
904{
905 struct adapter *adapter = dev->priv;
906
907 t1_interrupt(adapter->pdev->irq, adapter, NULL);
908}
909#endif
910
911/*
912 * Periodic accumulation of MAC statistics. This is used only if the MAC
913 * does not have any other way to prevent stats counter overflow.
914 */
915static void mac_stats_task(void *data)
916{
917 int i;
918 struct adapter *adapter = data;
919
920 for_each_port(adapter, i) {
921 struct port_info *p = &adapter->port[i];
922
923 if (netif_running(p->dev))
924 p->mac->ops->statistics_update(p->mac,
925 MAC_STATS_UPDATE_FAST);
926 }
927
928 /* Schedule the next statistics update if any port is active. */
929 spin_lock(&adapter->work_lock);
930 if (adapter->open_device_map & PORT_MASK)
931 schedule_mac_stats_update(adapter,
932 adapter->params.stats_update_period);
933 spin_unlock(&adapter->work_lock);
934}
935
936/*
937 * Processes elmer0 external interrupts in process context.
938 */
939static void ext_intr_task(void *data)
940{
941 u32 enable;
942 struct adapter *adapter = data;
943
944 elmer0_ext_intr_handler(adapter);
945
946 /* Now reenable external interrupts */
947 t1_write_reg_4(adapter, A_PL_CAUSE, F_PL_INTR_EXT);
948 enable = t1_read_reg_4(adapter, A_PL_ENABLE);
949 t1_write_reg_4(adapter, A_PL_ENABLE, enable | F_PL_INTR_EXT);
950 adapter->slow_intr_mask |= F_PL_INTR_EXT;
951}
952
953/*
954 * Interrupt-context handler for elmer0 external interrupts.
955 */
956void t1_elmer0_ext_intr(struct adapter *adapter)
957{
958 u32 enable = t1_read_reg_4(adapter, A_PL_ENABLE);
959
960 /*
961 * Schedule a task to handle external interrupts as we require
962 * a process context. We disable EXT interrupts in the interim
963 * and let the task reenable them when it's done.
964 */
965 adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
966 t1_write_reg_4(adapter, A_PL_ENABLE, enable & ~F_PL_INTR_EXT);
967 schedule_work(&adapter->ext_intr_handler_task);
968}
969
970void t1_fatal_err(struct adapter *adapter)
971{
972 if (adapter->flags & FULL_INIT_DONE) {
973 t1_sge_stop(adapter->sge);
974 t1_interrupts_disable(adapter);
975 }
976 CH_ALERT("%s: encountered fatal error, operation suspended\n",
977 adapter->name);
978}
979
980
981static int __devinit init_one(struct pci_dev *pdev,
982 const struct pci_device_id *ent)
983{
984 static int version_printed;
985
986 int i, err, pci_using_dac = 0;
987 unsigned long mmio_start, mmio_len;
988 const struct board_info *bi;
989 struct adapter *adapter = NULL;
990 struct port_info *pi;
991
992 if (!version_printed) {
993 printk(KERN_INFO "%s - version %s\n", driver_string,
994 driver_version);
995 ++version_printed;
996 }
997
998 err = pci_enable_device(pdev);
999 if (err)
1000 return err;
1001
1002 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1003 CH_ERR("%s: cannot find PCI device memory base address\n",
1004 pci_name(pdev));
1005 err = -ENODEV;
1006 goto out_disable_pdev;
1007 }
1008
1009 if (!pci_set_dma_mask(pdev, PCI_DMA_64BIT)) {
1010 pci_using_dac = 1;
1011 if (pci_set_consistent_dma_mask(pdev, PCI_DMA_64BIT)) {
1012 CH_ERR("%s: unable to obtain 64-bit DMA for"
1013 "consistent allocations\n", pci_name(pdev));
1014 err = -ENODEV;
1015 goto out_disable_pdev;
1016 }
1017 } else if ((err = pci_set_dma_mask(pdev, PCI_DMA_32BIT)) != 0) {
1018 CH_ERR("%s: no usable DMA configuration\n", pci_name(pdev));
1019 goto out_disable_pdev;
1020 }
1021
1022 err = pci_request_regions(pdev, driver_name);
1023 if (err) {
1024 CH_ERR("%s: cannot obtain PCI resources\n", pci_name(pdev));
1025 goto out_disable_pdev;
1026 }
1027
1028 pci_set_master(pdev);
1029
1030 mmio_start = pci_resource_start(pdev, 0);
1031 mmio_len = pci_resource_len(pdev, 0);
1032 bi = t1_get_board_info(ent->driver_data);
1033
1034 for (i = 0; i < bi->port_number; ++i) {
1035 struct net_device *netdev;
1036
1037 netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
1038 if (!netdev) {
1039 err = -ENOMEM;
1040 goto out_free_dev;
1041 }
1042
1043 SET_MODULE_OWNER(netdev);
1044 SET_NETDEV_DEV(netdev, &pdev->dev);
1045
1046 if (!adapter) {
1047 adapter = netdev->priv;
1048 adapter->pdev = pdev;
1049 adapter->port[0].dev = netdev; /* so we don't leak it */
1050
1051 adapter->regs = ioremap(mmio_start, mmio_len);
1052 if (!adapter->regs) {
1053 CH_ERR("%s: cannot map device registers\n",
1054 pci_name(pdev));
1055 err = -ENOMEM;
1056 goto out_free_dev;
1057 }
1058
1059 if (t1_get_board_rev(adapter, bi, &adapter->params)) {
1060 err = -ENODEV; /* Can't handle this chip rev */
1061 goto out_free_dev;
1062 }
1063
1064 adapter->name = pci_name(pdev);
1065 adapter->msg_enable = dflt_msg_enable;
1066 adapter->mmio_len = mmio_len;
1067
1068 init_MUTEX(&adapter->mib_mutex);
1069 spin_lock_init(&adapter->tpi_lock);
1070 spin_lock_init(&adapter->work_lock);
1071 spin_lock_init(&adapter->async_lock);
1072
1073 INIT_WORK(&adapter->ext_intr_handler_task,
1074 ext_intr_task, adapter);
1075 INIT_WORK(&adapter->stats_update_task, mac_stats_task,
1076 adapter);
1077
1078 pci_set_drvdata(pdev, netdev);
1079
1080 }
1081
1082 pi = &adapter->port[i];
1083 pi->dev = netdev;
1084 netif_carrier_off(netdev);
1085 netdev->irq = pdev->irq;
1086 netdev->if_port = i;
1087 netdev->mem_start = mmio_start;
1088 netdev->mem_end = mmio_start + mmio_len - 1;
1089 netdev->priv = adapter;
1090 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
1091 adapter->flags |= RX_CSUM_ENABLED | TCP_CSUM_CAPABLE;
1092 if (pci_using_dac)
1093 netdev->features |= NETIF_F_HIGHDMA;
1094 if (vlan_tso_capable(adapter)) {
1095 adapter->flags |= UDP_CSUM_CAPABLE;
1096#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1097 adapter->flags |= VLAN_ACCEL_CAPABLE;
1098 netdev->features |=
1099 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1100 netdev->vlan_rx_register = vlan_rx_register;
1101 netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
1102#endif
1103 adapter->flags |= TSO_CAPABLE;
1104 netdev->features |= NETIF_F_TSO;
1105 }
1106
1107 netdev->open = cxgb_open;
1108 netdev->stop = cxgb_close;
1109 netdev->hard_start_xmit = t1_start_xmit;
1110 netdev->hard_header_len += (adapter->flags & TSO_CAPABLE) ?
1111 sizeof(struct cpl_tx_pkt_lso) :
1112 sizeof(struct cpl_tx_pkt);
1113 netdev->get_stats = t1_get_stats;
1114 netdev->set_multicast_list = t1_set_rxmode;
1115 netdev->do_ioctl = t1_ioctl;
1116 netdev->change_mtu = t1_change_mtu;
1117 netdev->set_mac_address = t1_set_mac_addr;
1118#ifdef CONFIG_NET_POLL_CONTROLLER
1119 netdev->poll_controller = t1_netpoll;
1120#endif
1121 netdev->weight = 64;
1122
1123 SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
1124 }
1125
1126 if (t1_init_sw_modules(adapter, bi) < 0) {
1127 err = -ENODEV;
1128 goto out_free_dev;
1129 }
1130
1131 /*
1132 * The card is now ready to go. If any errors occur during device
1133 * registration we do not fail the whole card but rather proceed only
1134 * with the ports we manage to register successfully. However we must
1135 * register at least one net device.
1136 */
1137 for (i = 0; i < bi->port_number; ++i) {
1138 err = register_netdev(adapter->port[i].dev);
1139 if (err)
1140 CH_WARN("%s: cannot register net device %s, skipping\n",
1141 pci_name(pdev), adapter->port[i].dev->name);
1142 else {
1143 /*
1144 * Change the name we use for messages to the name of
1145 * the first successfully registered interface.
1146 */
1147 if (!adapter->registered_device_map)
1148 adapter->name = adapter->port[i].dev->name;
1149
1150 __set_bit(i, &adapter->registered_device_map);
1151 }
1152 }
1153 if (!adapter->registered_device_map) {
1154 CH_ERR("%s: could not register any net devices\n",
1155 pci_name(pdev));
1156 goto out_release_adapter_res;
1157 }
1158
1159 printk(KERN_INFO "%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name,
1160 bi->desc, adapter->params.chip_revision,
1161 adapter->params.pci.is_pcix ? "PCIX" : "PCI",
1162 adapter->params.pci.speed, adapter->params.pci.width);
1163 return 0;
1164
1165 out_release_adapter_res:
1166 t1_free_sw_modules(adapter);
1167 out_free_dev:
1168 if (adapter) {
1169 if (adapter->regs)
1170 iounmap(adapter->regs);
1171 for (i = bi->port_number - 1; i >= 0; --i)
1172 if (adapter->port[i].dev)
1173 free_netdev(adapter->port[i].dev);
1174 }
1175 pci_release_regions(pdev);
1176 out_disable_pdev:
1177 pci_disable_device(pdev);
1178 pci_set_drvdata(pdev, NULL);
1179 return err;
1180}
1181
1182static inline void t1_sw_reset(struct pci_dev *pdev)
1183{
1184 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
1185 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
1186}
1187
1188static void __devexit remove_one(struct pci_dev *pdev)
1189{
1190 struct net_device *dev = pci_get_drvdata(pdev);
1191
1192 if (dev) {
1193 int i;
1194 struct adapter *adapter = dev->priv;
1195
1196 for_each_port(adapter, i)
1197 if (test_bit(i, &adapter->registered_device_map))
1198 unregister_netdev(adapter->port[i].dev);
1199
1200 t1_free_sw_modules(adapter);
1201 iounmap(adapter->regs);
1202 while (--i >= 0)
1203 if (adapter->port[i].dev)
1204 free_netdev(adapter->port[i].dev);
1205 pci_release_regions(pdev);
1206 pci_disable_device(pdev);
1207 pci_set_drvdata(pdev, NULL);
1208 t1_sw_reset(pdev);
1209 }
1210}
1211
1212static struct pci_driver driver = {
1213 .name = driver_name,
1214 .id_table = t1_pci_tbl,
1215 .probe = init_one,
1216 .remove = __devexit_p(remove_one),
1217};
1218
1219static int __init t1_init_module(void)
1220{
1221 return pci_module_init(&driver);
1222}
1223
1224static void __exit t1_cleanup_module(void)
1225{
1226 pci_unregister_driver(&driver);
1227}
1228
1229module_init(t1_init_module);
1230module_exit(t1_cleanup_module);
1231
diff --git a/drivers/net/chelsio/cxgb2.h b/drivers/net/chelsio/cxgb2.h
new file mode 100644
index 000000000000..6ac326afcf01
--- /dev/null
+++ b/drivers/net/chelsio/cxgb2.h
@@ -0,0 +1,122 @@
1/*****************************************************************************
2 * *
3 * File: cxgb2.h *
4 * $Revision: 1.8 $ *
5 * $Date: 2005/03/23 07:41:27 $ *
6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#ifndef __CXGB_LINUX_H__
40#define __CXGB_LINUX_H__
41
42#include <linux/netdevice.h>
43#include <linux/skbuff.h>
44#include <linux/version.h>
45#include <asm/semaphore.h>
46#include <asm/bitops.h>
47
48/* This belongs in if_ether.h */
49#define ETH_P_CPL5 0xf
50
51struct cmac;
52struct cphy;
53
54struct port_info {
55 struct net_device *dev;
56 struct cmac *mac;
57 struct cphy *phy;
58 struct link_config link_config;
59 struct net_device_stats netstats;
60};
61
62struct cxgbdev;
63struct t1_sge;
64struct pemc3;
65struct pemc4;
66struct pemc5;
67struct peulp;
68struct petp;
69struct pecspi;
70struct peespi;
71struct work_struct;
72struct vlan_group;
73
74enum { /* adapter flags */
75 FULL_INIT_DONE = 0x1,
76 USING_MSI = 0x2,
77 TSO_CAPABLE = 0x4,
78 TCP_CSUM_CAPABLE = 0x8,
79 UDP_CSUM_CAPABLE = 0x10,
80 VLAN_ACCEL_CAPABLE = 0x20,
81 RX_CSUM_ENABLED = 0x40,
82};
83
84struct adapter {
85 u8 *regs;
86 struct pci_dev *pdev;
87 unsigned long registered_device_map;
88 unsigned long open_device_map;
89 unsigned int flags;
90
91 const char *name;
92 int msg_enable;
93 u32 mmio_len;
94
95 struct work_struct ext_intr_handler_task;
96 struct adapter_params params;
97
98 struct vlan_group *vlan_grp;
99
100 /* Terminator modules. */
101 struct sge *sge;
102 struct pemc3 *mc3;
103 struct pemc4 *mc4;
104 struct pemc5 *mc5;
105 struct petp *tp;
106 struct pecspi *cspi;
107 struct peespi *espi;
108 struct peulp *ulp;
109
110 struct port_info port[MAX_NPORTS];
111 struct work_struct stats_update_task;
112 struct timer_list stats_update_timer;
113
114 struct semaphore mib_mutex;
115 spinlock_t tpi_lock;
116 spinlock_t work_lock;
117
118 spinlock_t async_lock ____cacheline_aligned; /* guards async operations */
119 u32 slow_intr_mask;
120};
121
122#endif
diff --git a/drivers/net/chelsio/elmer0.h b/drivers/net/chelsio/elmer0.h
new file mode 100644
index 000000000000..08f148643e7f
--- /dev/null
+++ b/drivers/net/chelsio/elmer0.h
@@ -0,0 +1,157 @@
1/*****************************************************************************
2 * *
3 * File: elmer0.h *
4 * $Revision: 1.3 $ *
5 * $Date: 2005/03/23 07:15:58 $ *
6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#ifndef CHELSIO_ELMER0_H
40#define CHELSIO_ELMER0_H
41
42/* ELMER0 flavors */
43enum {
44 ELMER0_XC2S300E_6FT256_C,
45 ELMER0_XC2S100E_6TQ144_C
46};
47
48/* ELMER0 registers */
49#define A_ELMER0_VERSION 0x100000
50#define A_ELMER0_PHY_CFG 0x100004
51#define A_ELMER0_INT_ENABLE 0x100008
52#define A_ELMER0_INT_CAUSE 0x10000c
53#define A_ELMER0_GPI_CFG 0x100010
54#define A_ELMER0_GPI_STAT 0x100014
55#define A_ELMER0_GPO 0x100018
56#define A_ELMER0_PORT0_MI1_CFG 0x400000
57
58#define S_MI1_MDI_ENABLE 0
59#define V_MI1_MDI_ENABLE(x) ((x) << S_MI1_MDI_ENABLE)
60#define F_MI1_MDI_ENABLE V_MI1_MDI_ENABLE(1U)
61
62#define S_MI1_MDI_INVERT 1
63#define V_MI1_MDI_INVERT(x) ((x) << S_MI1_MDI_INVERT)
64#define F_MI1_MDI_INVERT V_MI1_MDI_INVERT(1U)
65
66#define S_MI1_PREAMBLE_ENABLE 2
67#define V_MI1_PREAMBLE_ENABLE(x) ((x) << S_MI1_PREAMBLE_ENABLE)
68#define F_MI1_PREAMBLE_ENABLE V_MI1_PREAMBLE_ENABLE(1U)
69
70#define S_MI1_SOF 3
71#define M_MI1_SOF 0x3
72#define V_MI1_SOF(x) ((x) << S_MI1_SOF)
73#define G_MI1_SOF(x) (((x) >> S_MI1_SOF) & M_MI1_SOF)
74
75#define S_MI1_CLK_DIV 5
76#define M_MI1_CLK_DIV 0xff
77#define V_MI1_CLK_DIV(x) ((x) << S_MI1_CLK_DIV)
78#define G_MI1_CLK_DIV(x) (((x) >> S_MI1_CLK_DIV) & M_MI1_CLK_DIV)
79
80#define A_ELMER0_PORT0_MI1_ADDR 0x400004
81
82#define S_MI1_REG_ADDR 0
83#define M_MI1_REG_ADDR 0x1f
84#define V_MI1_REG_ADDR(x) ((x) << S_MI1_REG_ADDR)
85#define G_MI1_REG_ADDR(x) (((x) >> S_MI1_REG_ADDR) & M_MI1_REG_ADDR)
86
87#define S_MI1_PHY_ADDR 5
88#define M_MI1_PHY_ADDR 0x1f
89#define V_MI1_PHY_ADDR(x) ((x) << S_MI1_PHY_ADDR)
90#define G_MI1_PHY_ADDR(x) (((x) >> S_MI1_PHY_ADDR) & M_MI1_PHY_ADDR)
91
92#define A_ELMER0_PORT0_MI1_DATA 0x400008
93
94#define S_MI1_DATA 0
95#define M_MI1_DATA 0xffff
96#define V_MI1_DATA(x) ((x) << S_MI1_DATA)
97#define G_MI1_DATA(x) (((x) >> S_MI1_DATA) & M_MI1_DATA)
98
99#define A_ELMER0_PORT0_MI1_OP 0x40000c
100
101#define S_MI1_OP 0
102#define M_MI1_OP 0x3
103#define V_MI1_OP(x) ((x) << S_MI1_OP)
104#define G_MI1_OP(x) (((x) >> S_MI1_OP) & M_MI1_OP)
105
106#define S_MI1_ADDR_AUTOINC 2
107#define V_MI1_ADDR_AUTOINC(x) ((x) << S_MI1_ADDR_AUTOINC)
108#define F_MI1_ADDR_AUTOINC V_MI1_ADDR_AUTOINC(1U)
109
110#define S_MI1_OP_BUSY 31
111#define V_MI1_OP_BUSY(x) ((x) << S_MI1_OP_BUSY)
112#define F_MI1_OP_BUSY V_MI1_OP_BUSY(1U)
113
114#define A_ELMER0_PORT1_MI1_CFG 0x500000
115#define A_ELMER0_PORT1_MI1_ADDR 0x500004
116#define A_ELMER0_PORT1_MI1_DATA 0x500008
117#define A_ELMER0_PORT1_MI1_OP 0x50000c
118#define A_ELMER0_PORT2_MI1_CFG 0x600000
119#define A_ELMER0_PORT2_MI1_ADDR 0x600004
120#define A_ELMER0_PORT2_MI1_DATA 0x600008
121#define A_ELMER0_PORT2_MI1_OP 0x60000c
122#define A_ELMER0_PORT3_MI1_CFG 0x700000
123#define A_ELMER0_PORT3_MI1_ADDR 0x700004
124#define A_ELMER0_PORT3_MI1_DATA 0x700008
125#define A_ELMER0_PORT3_MI1_OP 0x70000c
126
127/* Simple bit definition for GPI and GP0 registers. */
128#define ELMER0_GP_BIT0 0x0001
129#define ELMER0_GP_BIT1 0x0002
130#define ELMER0_GP_BIT2 0x0004
131#define ELMER0_GP_BIT3 0x0008
132#define ELMER0_GP_BIT4 0x0010
133#define ELMER0_GP_BIT5 0x0020
134#define ELMER0_GP_BIT6 0x0040
135#define ELMER0_GP_BIT7 0x0080
136#define ELMER0_GP_BIT8 0x0100
137#define ELMER0_GP_BIT9 0x0200
138#define ELMER0_GP_BIT10 0x0400
139#define ELMER0_GP_BIT11 0x0800
140#define ELMER0_GP_BIT12 0x1000
141#define ELMER0_GP_BIT13 0x2000
142#define ELMER0_GP_BIT14 0x4000
143#define ELMER0_GP_BIT15 0x8000
144#define ELMER0_GP_BIT16 0x10000
145#define ELMER0_GP_BIT17 0x20000
146#define ELMER0_GP_BIT18 0x40000
147#define ELMER0_GP_BIT19 0x80000
148
149#define MI1_OP_DIRECT_WRITE 1
150#define MI1_OP_DIRECT_READ 2
151
152#define MI1_OP_INDIRECT_ADDRESS 0
153#define MI1_OP_INDIRECT_WRITE 1
154#define MI1_OP_INDIRECT_READ_INC 2
155#define MI1_OP_INDIRECT_READ 3
156
157#endif
diff --git a/drivers/net/chelsio/espi.c b/drivers/net/chelsio/espi.c
new file mode 100644
index 000000000000..7ec2dc7bafac
--- /dev/null
+++ b/drivers/net/chelsio/espi.c
@@ -0,0 +1,386 @@
1/*****************************************************************************
2 * *
3 * File: espi.c *
4 * $Revision: 1.9 $ *
5 * $Date: 2005/03/23 07:41:27 $ *
6 * Description: *
7 * Ethernet SPI functionality. *
8 * part of the Chelsio 10Gb Ethernet Driver. *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
13 * *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, write to the Free Software Foundation, Inc., *
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
17 * *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
21 * *
22 * http://www.chelsio.com *
23 * *
24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
25 * All rights reserved. *
26 * *
27 * Maintainers: maintainers@chelsio.com *
28 * *
29 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
30 * Tina Yang <tainay@chelsio.com> *
31 * Felix Marti <felix@chelsio.com> *
32 * Scott Bardone <sbardone@chelsio.com> *
33 * Kurt Ottaway <kottaway@chelsio.com> *
34 * Frank DiMambro <frank@chelsio.com> *
35 * *
36 * History: *
37 * *
38 ****************************************************************************/
39
40#include "common.h"
41#include "regs.h"
42#include "espi.h"
43
44struct peespi {
45 adapter_t *adapter;
46 struct espi_intr_counts intr_cnt;
47 u32 misc_ctrl;
48 spinlock_t lock;
49};
50
51#define ESPI_INTR_MASK (F_DIP4ERR | F_RXDROP | F_TXDROP | F_RXOVERFLOW | \
52 F_RAMPARITYERR | F_DIP2PARITYERR)
53#define MON_MASK (V_MONITORED_PORT_NUM(3) | F_MONITORED_DIRECTION \
54 | F_MONITORED_INTERFACE)
55
56#define TRICN_CNFG 14
57#define TRICN_CMD_READ 0x11
58#define TRICN_CMD_WRITE 0x21
59#define TRICN_CMD_ATTEMPTS 10
60
61static int tricn_write(adapter_t *adapter, int bundle_addr, int module_addr,
62 int ch_addr, int reg_offset, u32 wr_data)
63{
64 int busy, attempts = TRICN_CMD_ATTEMPTS;
65
66 t1_write_reg_4(adapter, A_ESPI_CMD_ADDR, V_WRITE_DATA(wr_data) |
67 V_REGISTER_OFFSET(reg_offset) |
68 V_CHANNEL_ADDR(ch_addr) | V_MODULE_ADDR(module_addr) |
69 V_BUNDLE_ADDR(bundle_addr) |
70 V_SPI4_COMMAND(TRICN_CMD_WRITE));
71 t1_write_reg_4(adapter, A_ESPI_GOSTAT, 0);
72
73 do {
74 busy = t1_read_reg_4(adapter, A_ESPI_GOSTAT) & F_ESPI_CMD_BUSY;
75 } while (busy && --attempts);
76
77 if (busy)
78 CH_ERR("%s: TRICN write timed out\n", adapter->name);
79
80 return busy;
81}
82
83/* 1. Deassert rx_reset_core. */
84/* 2. Program TRICN_CNFG registers. */
85/* 3. Deassert rx_reset_link */
86static int tricn_init(adapter_t *adapter)
87{
88 int i = 0;
89 int sme = 1;
90 int stat = 0;
91 int timeout = 0;
92 int is_ready = 0;
93 int dynamic_deskew = 0;
94
95 if (dynamic_deskew)
96 sme = 0;
97
98
99 /* 1 */
100 timeout=1000;
101 do {
102 stat = t1_read_reg_4(adapter, A_ESPI_RX_RESET);
103 is_ready = (stat & 0x4);
104 timeout--;
105 udelay(5);
106 } while (!is_ready || (timeout==0));
107 t1_write_reg_4(adapter, A_ESPI_RX_RESET, 0x2);
108 if (timeout==0)
109 {
110 CH_ERR("ESPI : ERROR : Timeout tricn_init() \n");
111 t1_fatal_err(adapter);
112 }
113
114 /* 2 */
115 if (sme) {
116 tricn_write(adapter, 0, 0, 0, TRICN_CNFG, 0x81);
117 tricn_write(adapter, 0, 1, 0, TRICN_CNFG, 0x81);
118 tricn_write(adapter, 0, 2, 0, TRICN_CNFG, 0x81);
119 }
120 for (i=1; i<= 8; i++) tricn_write(adapter, 0, 0, i, TRICN_CNFG, 0xf1);
121 for (i=1; i<= 2; i++) tricn_write(adapter, 0, 1, i, TRICN_CNFG, 0xf1);
122 for (i=1; i<= 3; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1);
123 for (i=4; i<= 4; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1);
124 for (i=5; i<= 5; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1);
125 for (i=6; i<= 6; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1);
126 for (i=7; i<= 7; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0x80);
127 for (i=8; i<= 8; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1);
128
129 /* 3 */
130 t1_write_reg_4(adapter, A_ESPI_RX_RESET, 0x3);
131
132 return 0;
133}
134
135void t1_espi_intr_enable(struct peespi *espi)
136{
137 u32 enable, pl_intr = t1_read_reg_4(espi->adapter, A_PL_ENABLE);
138
139 /*
140 * Cannot enable ESPI interrupts on T1B because HW asserts the
141 * interrupt incorrectly, namely the driver gets ESPI interrupts
142 * but no data is actually dropped (can verify this reading the ESPI
143 * drop registers). Also, once the ESPI interrupt is asserted it
144 * cannot be cleared (HW bug).
145 */
146 enable = t1_is_T1B(espi->adapter) ? 0 : ESPI_INTR_MASK;
147 t1_write_reg_4(espi->adapter, A_ESPI_INTR_ENABLE, enable);
148 t1_write_reg_4(espi->adapter, A_PL_ENABLE, pl_intr | F_PL_INTR_ESPI);
149}
150
151void t1_espi_intr_clear(struct peespi *espi)
152{
153 t1_write_reg_4(espi->adapter, A_ESPI_INTR_STATUS, 0xffffffff);
154 t1_write_reg_4(espi->adapter, A_PL_CAUSE, F_PL_INTR_ESPI);
155}
156
157void t1_espi_intr_disable(struct peespi *espi)
158{
159 u32 pl_intr = t1_read_reg_4(espi->adapter, A_PL_ENABLE);
160
161 t1_write_reg_4(espi->adapter, A_ESPI_INTR_ENABLE, 0);
162 t1_write_reg_4(espi->adapter, A_PL_ENABLE, pl_intr & ~F_PL_INTR_ESPI);
163}
164
165int t1_espi_intr_handler(struct peespi *espi)
166{
167 u32 cnt;
168 u32 status = t1_read_reg_4(espi->adapter, A_ESPI_INTR_STATUS);
169
170 if (status & F_DIP4ERR)
171 espi->intr_cnt.DIP4_err++;
172 if (status & F_RXDROP)
173 espi->intr_cnt.rx_drops++;
174 if (status & F_TXDROP)
175 espi->intr_cnt.tx_drops++;
176 if (status & F_RXOVERFLOW)
177 espi->intr_cnt.rx_ovflw++;
178 if (status & F_RAMPARITYERR)
179 espi->intr_cnt.parity_err++;
180 if (status & F_DIP2PARITYERR) {
181 espi->intr_cnt.DIP2_parity_err++;
182
183 /*
184 * Must read the error count to clear the interrupt
185 * that it causes.
186 */
187 cnt = t1_read_reg_4(espi->adapter, A_ESPI_DIP2_ERR_COUNT);
188 }
189
190 /*
191 * For T1B we need to write 1 to clear ESPI interrupts. For T2+ we
192 * write the status as is.
193 */
194 if (status && t1_is_T1B(espi->adapter))
195 status = 1;
196 t1_write_reg_4(espi->adapter, A_ESPI_INTR_STATUS, status);
197 return 0;
198}
199
200static void espi_setup_for_pm3393(adapter_t *adapter)
201{
202 u32 wmark = t1_is_T1B(adapter) ? 0x4000 : 0x3200;
203
204 t1_write_reg_4(adapter, A_ESPI_SCH_TOKEN0, 0x1f4);
205 t1_write_reg_4(adapter, A_ESPI_SCH_TOKEN1, 0x1f4);
206 t1_write_reg_4(adapter, A_ESPI_SCH_TOKEN2, 0x1f4);
207 t1_write_reg_4(adapter, A_ESPI_SCH_TOKEN3, 0x1f4);
208 t1_write_reg_4(adapter, A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK, 0x100);
209 t1_write_reg_4(adapter, A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK, wmark);
210 t1_write_reg_4(adapter, A_ESPI_CALENDAR_LENGTH, 3);
211 t1_write_reg_4(adapter, A_ESPI_TRAIN, 0x08000008);
212 t1_write_reg_4(adapter, A_PORT_CONFIG,
213 V_RX_NPORTS(1) | V_TX_NPORTS(1));
214}
215
216static void espi_setup_for_vsc7321(adapter_t *adapter)
217{
218 u32 wmark = t1_is_T1B(adapter) ? 0x4000 : 0x3200;
219
220 t1_write_reg_4(adapter, A_ESPI_SCH_TOKEN0, 0x1f4);
221 t1_write_reg_4(adapter, A_ESPI_SCH_TOKEN1, 0x1f4);
222 t1_write_reg_4(adapter, A_ESPI_SCH_TOKEN2, 0x1f4);
223 t1_write_reg_4(adapter, A_ESPI_SCH_TOKEN3, 0x1f4);
224 t1_write_reg_4(adapter, A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK, 0x100);
225 t1_write_reg_4(adapter, A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK, wmark);
226 t1_write_reg_4(adapter, A_ESPI_CALENDAR_LENGTH, 3);
227 t1_write_reg_4(adapter, A_ESPI_TRAIN, 0x08000008);
228 t1_write_reg_4(adapter, A_PORT_CONFIG,
229 V_RX_NPORTS(1) | V_TX_NPORTS(1));
230}
231
232/*
233 * Note that T1B requires at least 2 ports for IXF1010 due to a HW bug.
234 */
235static void espi_setup_for_ixf1010(adapter_t *adapter, int nports)
236{
237 t1_write_reg_4(adapter, A_ESPI_CALENDAR_LENGTH, 1);
238 if (nports == 4) {
239 if (is_T2(adapter)) {
240 t1_write_reg_4(adapter, A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK,
241 0xf00);
242 t1_write_reg_4(adapter, A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK,
243 0x3c0);
244 } else {
245 t1_write_reg_4(adapter, A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK,
246 0x7ff);
247 t1_write_reg_4(adapter, A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK,
248 0x1ff);
249 }
250 } else {
251 t1_write_reg_4(adapter, A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK,
252 0x1fff);
253 t1_write_reg_4(adapter, A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK,
254 0x7ff);
255 }
256 t1_write_reg_4(adapter, A_PORT_CONFIG,
257 V_RX_NPORTS(nports) | V_TX_NPORTS(nports));
258}
259
260/* T2 Init part -- */
261/* 1. Set T_ESPI_MISCCTRL_ADDR */
262/* 2. Init ESPI registers. */
263/* 3. Init TriCN Hard Macro */
264int t1_espi_init(struct peespi *espi, int mac_type, int nports)
265{
266 u32 status_enable_extra = 0;
267 adapter_t *adapter = espi->adapter;
268 u32 cnt;
269 u32 status, burstval = 0x800100;
270
271 /* Disable ESPI training. MACs that can handle it enable it below. */
272 t1_write_reg_4(adapter, A_ESPI_TRAIN, 0);
273
274 if (is_T2(adapter)) {
275 t1_write_reg_4(adapter, A_ESPI_MISC_CONTROL,
276 V_OUT_OF_SYNC_COUNT(4) |
277 V_DIP2_PARITY_ERR_THRES(3) | V_DIP4_THRES(1));
278 if (nports == 4) {
279 /* T204: maxburst1 = 0x40, maxburst2 = 0x20 */
280 burstval = 0x200040;
281 }
282 }
283 t1_write_reg_4(adapter, A_ESPI_MAXBURST1_MAXBURST2, burstval);
284
285 if (mac_type == CHBT_MAC_PM3393)
286 espi_setup_for_pm3393(adapter);
287 else if (mac_type == CHBT_MAC_VSC7321)
288 espi_setup_for_vsc7321(adapter);
289 else if (mac_type == CHBT_MAC_IXF1010) {
290 status_enable_extra = F_INTEL1010MODE;
291 espi_setup_for_ixf1010(adapter, nports);
292 } else
293 return -1;
294
295 /*
296 * Make sure any pending interrupts from the SPI are
297 * Cleared before enabling the interrupt.
298 */
299 t1_write_reg_4(espi->adapter, A_ESPI_INTR_ENABLE, ESPI_INTR_MASK);
300 status = t1_read_reg_4(espi->adapter, A_ESPI_INTR_STATUS);
301 if (status & F_DIP2PARITYERR) {
302 cnt = t1_read_reg_4(espi->adapter, A_ESPI_DIP2_ERR_COUNT);
303 }
304
305 /*
306 * For T1B we need to write 1 to clear ESPI interrupts. For T2+ we
307 * write the status as is.
308 */
309 if (status && t1_is_T1B(espi->adapter))
310 status = 1;
311 t1_write_reg_4(espi->adapter, A_ESPI_INTR_STATUS, status);
312
313 t1_write_reg_4(adapter, A_ESPI_FIFO_STATUS_ENABLE,
314 status_enable_extra | F_RXSTATUSENABLE);
315
316 if (is_T2(adapter)) {
317 tricn_init(adapter);
318 /*
319 * Always position the control at the 1st port egress IN
320 * (sop,eop) counter to reduce PIOs for T/N210 workaround.
321 */
322 espi->misc_ctrl = (t1_read_reg_4(adapter, A_ESPI_MISC_CONTROL)
323 & ~MON_MASK) | (F_MONITORED_DIRECTION
324 | F_MONITORED_INTERFACE);
325 t1_write_reg_4(adapter, A_ESPI_MISC_CONTROL, espi->misc_ctrl);
326 spin_lock_init(&espi->lock);
327 }
328
329 return 0;
330}
331
332void t1_espi_destroy(struct peespi *espi)
333{
334 kfree(espi);
335}
336
337struct peespi *t1_espi_create(adapter_t *adapter)
338{
339 struct peespi *espi = kmalloc(sizeof(*espi), GFP_KERNEL);
340
341 memset(espi, 0, sizeof(*espi));
342
343 if (espi)
344 espi->adapter = adapter;
345 return espi;
346}
347
348void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val)
349{
350 struct peespi *espi = adapter->espi;
351
352 if (!is_T2(adapter))
353 return;
354 spin_lock(&espi->lock);
355 espi->misc_ctrl = (val & ~MON_MASK) |
356 (espi->misc_ctrl & MON_MASK);
357 t1_write_reg_4(adapter, A_ESPI_MISC_CONTROL, espi->misc_ctrl);
358 spin_unlock(&espi->lock);
359}
360
361u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait)
362{
363 struct peespi *espi = adapter->espi;
364 u32 sel;
365
366 if (!is_T2(adapter))
367 return 0;
368 sel = V_MONITORED_PORT_NUM((addr & 0x3c) >> 2);
369 if (!wait) {
370 if (!spin_trylock(&espi->lock))
371 return 0;
372 }
373 else
374 spin_lock(&espi->lock);
375 if ((sel != (espi->misc_ctrl & MON_MASK))) {
376 t1_write_reg_4(adapter, A_ESPI_MISC_CONTROL,
377 ((espi->misc_ctrl & ~MON_MASK) | sel));
378 sel = t1_read_reg_4(adapter, A_ESPI_SCH_TOKEN3);
379 t1_write_reg_4(adapter, A_ESPI_MISC_CONTROL,
380 espi->misc_ctrl);
381 }
382 else
383 sel = t1_read_reg_4(adapter, A_ESPI_SCH_TOKEN3);
384 spin_unlock(&espi->lock);
385 return sel;
386}
diff --git a/drivers/net/chelsio/espi.h b/drivers/net/chelsio/espi.h
new file mode 100644
index 000000000000..0f84e8b6399f
--- /dev/null
+++ b/drivers/net/chelsio/espi.h
@@ -0,0 +1,67 @@
1/*****************************************************************************
2 * *
3 * File: espi.h *
4 * $Revision: 1.4 $ *
5 * $Date: 2005/03/23 07:15:58 $ *
6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#ifndef CHELSIO_ESPI_H
40#define CHELSIO_ESPI_H
41
42#include "common.h"
43
44struct espi_intr_counts {
45 unsigned int DIP4_err;
46 unsigned int rx_drops;
47 unsigned int tx_drops;
48 unsigned int rx_ovflw;
49 unsigned int parity_err;
50 unsigned int DIP2_parity_err;
51};
52
53struct peespi;
54
55struct peespi *t1_espi_create(adapter_t *adapter);
56void t1_espi_destroy(struct peespi *espi);
57int t1_espi_init(struct peespi *espi, int mac_type, int nports);
58
59void t1_espi_intr_enable(struct peespi *);
60void t1_espi_intr_clear(struct peespi *);
61void t1_espi_intr_disable(struct peespi *);
62int t1_espi_intr_handler(struct peespi *);
63
64void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val);
65u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait);
66
67#endif
diff --git a/drivers/net/chelsio/gmac.h b/drivers/net/chelsio/gmac.h
new file mode 100644
index 000000000000..24501e2232cc
--- /dev/null
+++ b/drivers/net/chelsio/gmac.h
@@ -0,0 +1,133 @@
1/*****************************************************************************
2 * *
3 * File: gmac.h *
4 * $Revision: 1.3 $ *
5 * $Date: 2005/03/23 07:15:58 $ *
6 * Description: *
7 * Generic MAC functionality. *
8 * part of the Chelsio 10Gb Ethernet Driver. *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
13 * *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, write to the Free Software Foundation, Inc., *
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
17 * *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
21 * *
22 * http://www.chelsio.com *
23 * *
24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
25 * All rights reserved. *
26 * *
27 * Maintainers: maintainers@chelsio.com *
28 * *
29 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
30 * Tina Yang <tainay@chelsio.com> *
31 * Felix Marti <felix@chelsio.com> *
32 * Scott Bardone <sbardone@chelsio.com> *
33 * Kurt Ottaway <kottaway@chelsio.com> *
34 * Frank DiMambro <frank@chelsio.com> *
35 * *
36 * History: *
37 * *
38 ****************************************************************************/
39
40#ifndef CHELSIO_GMAC_H
41#define CHELSIO_GMAC_H
42
43#include "common.h"
44
45enum { MAC_STATS_UPDATE_FAST, MAC_STATS_UPDATE_FULL };
46enum { MAC_DIRECTION_RX = 1, MAC_DIRECTION_TX = 2 };
47
48struct cmac_statistics {
49 /* Transmit */
50 u64 TxOctetsOK;
51 u64 TxOctetsBad;
52 u64 TxUnicastFramesOK;
53 u64 TxMulticastFramesOK;
54 u64 TxBroadcastFramesOK;
55 u64 TxPauseFrames;
56 u64 TxFramesWithDeferredXmissions;
57 u64 TxLateCollisions;
58 u64 TxTotalCollisions;
59 u64 TxFramesAbortedDueToXSCollisions;
60 u64 TxUnderrun;
61 u64 TxLengthErrors;
62 u64 TxInternalMACXmitError;
63 u64 TxFramesWithExcessiveDeferral;
64 u64 TxFCSErrors;
65
66 /* Receive */
67 u64 RxOctetsOK;
68 u64 RxOctetsBad;
69 u64 RxUnicastFramesOK;
70 u64 RxMulticastFramesOK;
71 u64 RxBroadcastFramesOK;
72 u64 RxPauseFrames;
73 u64 RxFCSErrors;
74 u64 RxAlignErrors;
75 u64 RxSymbolErrors;
76 u64 RxDataErrors;
77 u64 RxSequenceErrors;
78 u64 RxRuntErrors;
79 u64 RxJabberErrors;
80 u64 RxInternalMACRcvError;
81 u64 RxInRangeLengthErrors;
82 u64 RxOutOfRangeLengthField;
83 u64 RxFrameTooLongErrors;
84};
85
86struct cmac_ops {
87 void (*destroy)(struct cmac *);
88 int (*reset)(struct cmac *);
89 int (*interrupt_enable)(struct cmac *);
90 int (*interrupt_disable)(struct cmac *);
91 int (*interrupt_clear)(struct cmac *);
92 int (*interrupt_handler)(struct cmac *);
93
94 int (*enable)(struct cmac *, int);
95 int (*disable)(struct cmac *, int);
96
97 int (*loopback_enable)(struct cmac *);
98 int (*loopback_disable)(struct cmac *);
99
100 int (*set_mtu)(struct cmac *, int mtu);
101 int (*set_rx_mode)(struct cmac *, struct t1_rx_mode *rm);
102
103 int (*set_speed_duplex_fc)(struct cmac *, int speed, int duplex, int fc);
104 int (*get_speed_duplex_fc)(struct cmac *, int *speed, int *duplex,
105 int *fc);
106
107 const struct cmac_statistics *(*statistics_update)(struct cmac *, int);
108
109 int (*macaddress_get)(struct cmac *, u8 mac_addr[6]);
110 int (*macaddress_set)(struct cmac *, u8 mac_addr[6]);
111};
112
113typedef struct _cmac_instance cmac_instance;
114
115struct cmac {
116 struct cmac_statistics stats;
117 adapter_t *adapter;
118 struct cmac_ops *ops;
119 cmac_instance *instance;
120};
121
122struct gmac {
123 unsigned int stats_update_period;
124 struct cmac *(*create)(adapter_t *adapter, int index);
125 int (*reset)(adapter_t *);
126};
127
128extern struct gmac t1_pm3393_ops;
129extern struct gmac t1_chelsio_mac_ops;
130extern struct gmac t1_vsc7321_ops;
131extern struct gmac t1_ixf1010_ops;
132extern struct gmac t1_dummy_mac_ops;
133#endif
diff --git a/drivers/net/chelsio/mv88x201x.c b/drivers/net/chelsio/mv88x201x.c
new file mode 100644
index 000000000000..f54133af1bce
--- /dev/null
+++ b/drivers/net/chelsio/mv88x201x.c
@@ -0,0 +1,258 @@
1/*****************************************************************************
2 * *
3 * File: mv88x201x.c *
4 * $Revision: 1.7 $ *
5 * $Date: 2005/03/23 07:15:59 $ *
6 * Description: *
7 * Marvell PHY (mv88x201x) functionality. *
8 * part of the Chelsio 10Gb Ethernet Driver. *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
13 * *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, write to the Free Software Foundation, Inc., *
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
17 * *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
21 * *
22 * http://www.chelsio.com *
23 * *
24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
25 * All rights reserved. *
26 * *
27 * Maintainers: maintainers@chelsio.com *
28 * *
29 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
30 * Tina Yang <tainay@chelsio.com> *
31 * Felix Marti <felix@chelsio.com> *
32 * Scott Bardone <sbardone@chelsio.com> *
33 * Kurt Ottaway <kottaway@chelsio.com> *
34 * Frank DiMambro <frank@chelsio.com> *
35 * *
36 * History: *
37 * *
38 ****************************************************************************/
39
40#include "cphy.h"
41#include "elmer0.h"
42
43/*
44 * The 88x2010 Rev C. requires some link status registers * to be read
45 * twice in order to get the right values. Future * revisions will fix
46 * this problem and then this macro * can disappear.
47 */
48#define MV88x2010_LINK_STATUS_BUGS 1
49
50static int led_init(struct cphy *cphy)
51{
52 /* Setup the LED registers so we can turn on/off.
53 * Writing these bits maps control to another
54 * register. mmd(0x1) addr(0x7)
55 */
56 mdio_write(cphy, 0x3, 0x8304, 0xdddd);
57 return 0;
58}
59
60static int led_link(struct cphy *cphy, u32 do_enable)
61{
62 u32 led = 0;
63#define LINK_ENABLE_BIT 0x1
64
65 mdio_read(cphy, 0x1, 0x7, &led);
66
67 if (do_enable & LINK_ENABLE_BIT) {
68 led |= LINK_ENABLE_BIT;
69 mdio_write(cphy, 0x1, 0x7, led);
70 } else {
71 led &= ~LINK_ENABLE_BIT;
72 mdio_write(cphy, 0x1, 0x7, led);
73 }
74 return 0;
75}
76
77/* Port Reset */
78static int mv88x201x_reset(struct cphy *cphy, int wait)
79{
80 /* This can be done through registers. It is not required since
81 * a full chip reset is used.
82 */
83 return 0;
84}
85
86static int mv88x201x_interrupt_enable(struct cphy *cphy)
87{
88 /* Enable PHY LASI interrupts. */
89 mdio_write(cphy, 0x1, 0x9002, 0x1);
90
91 /* Enable Marvell interrupts through Elmer0. */
92 if (t1_is_asic(cphy->adapter)) {
93 u32 elmer;
94
95 t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
96 elmer |= ELMER0_GP_BIT6;
97 t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
98 }
99 return 0;
100}
101
102static int mv88x201x_interrupt_disable(struct cphy *cphy)
103{
104 /* Disable PHY LASI interrupts. */
105 mdio_write(cphy, 0x1, 0x9002, 0x0);
106
107 /* Disable Marvell interrupts through Elmer0. */
108 if (t1_is_asic(cphy->adapter)) {
109 u32 elmer;
110
111 t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
112 elmer &= ~ELMER0_GP_BIT6;
113 t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
114 }
115 return 0;
116}
117
118static int mv88x201x_interrupt_clear(struct cphy *cphy)
119{
120 u32 elmer;
121 u32 val;
122
123#ifdef MV88x2010_LINK_STATUS_BUGS
124 /* Required to read twice before clear takes affect. */
125 mdio_read(cphy, 0x1, 0x9003, &val);
126 mdio_read(cphy, 0x1, 0x9004, &val);
127 mdio_read(cphy, 0x1, 0x9005, &val);
128
129 /* Read this register after the others above it else
130 * the register doesn't clear correctly.
131 */
132 mdio_read(cphy, 0x1, 0x1, &val);
133#endif
134
135 /* Clear link status. */
136 mdio_read(cphy, 0x1, 0x1, &val);
137 /* Clear PHY LASI interrupts. */
138 mdio_read(cphy, 0x1, 0x9005, &val);
139
140#ifdef MV88x2010_LINK_STATUS_BUGS
141 /* Do it again. */
142 mdio_read(cphy, 0x1, 0x9003, &val);
143 mdio_read(cphy, 0x1, 0x9004, &val);
144#endif
145
146 /* Clear Marvell interrupts through Elmer0. */
147 if (t1_is_asic(cphy->adapter)) {
148 t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer);
149 elmer |= ELMER0_GP_BIT6;
150 t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer);
151 }
152 return 0;
153}
154
155static int mv88x201x_interrupt_handler(struct cphy *cphy)
156{
157 /* Clear interrupts */
158 mv88x201x_interrupt_clear(cphy);
159
160 /* We have only enabled link change interrupts and so
161 * cphy_cause must be a link change interrupt.
162 */
163 return cphy_cause_link_change;
164}
165
166static int mv88x201x_set_loopback(struct cphy *cphy, int on)
167{
168 return 0;
169}
170
171static int mv88x201x_get_link_status(struct cphy *cphy, int *link_ok,
172 int *speed, int *duplex, int *fc)
173{
174 u32 val = 0;
175#define LINK_STATUS_BIT 0x4
176
177 if (link_ok) {
178 /* Read link status. */
179 mdio_read(cphy, 0x1, 0x1, &val);
180 val &= LINK_STATUS_BIT;
181 *link_ok = (val == LINK_STATUS_BIT);
182 /* Turn on/off Link LED */
183 led_link(cphy, *link_ok);
184 }
185 if (speed)
186 *speed = SPEED_10000;
187 if (duplex)
188 *duplex = DUPLEX_FULL;
189 if (fc)
190 *fc = PAUSE_RX | PAUSE_TX;
191 return 0;
192}
193
194static void mv88x201x_destroy(struct cphy *cphy)
195{
196 kfree(cphy);
197}
198
199static struct cphy_ops mv88x201x_ops = {
200 .destroy = mv88x201x_destroy,
201 .reset = mv88x201x_reset,
202 .interrupt_enable = mv88x201x_interrupt_enable,
203 .interrupt_disable = mv88x201x_interrupt_disable,
204 .interrupt_clear = mv88x201x_interrupt_clear,
205 .interrupt_handler = mv88x201x_interrupt_handler,
206 .get_link_status = mv88x201x_get_link_status,
207 .set_loopback = mv88x201x_set_loopback,
208};
209
210static struct cphy *mv88x201x_phy_create(adapter_t *adapter, int phy_addr,
211 struct mdio_ops *mdio_ops)
212{
213 u32 val;
214 struct cphy *cphy = kmalloc(sizeof(*cphy), GFP_KERNEL);
215
216 if (!cphy)
217 return NULL;
218 memset(cphy, 0, sizeof(*cphy));
219 cphy_init(cphy, adapter, phy_addr, &mv88x201x_ops, mdio_ops);
220
221 /* Commands the PHY to enable XFP's clock. */
222 mdio_read(cphy, 0x3, 0x8300, &val);
223 mdio_write(cphy, 0x3, 0x8300, val | 1);
224
225 /* Clear link status. Required because of a bug in the PHY. */
226 mdio_read(cphy, 0x1, 0x8, &val);
227 mdio_read(cphy, 0x3, 0x8, &val);
228
229 /* Allows for Link,Ack LED turn on/off */
230 led_init(cphy);
231 return cphy;
232}
233
234/* Chip Reset */
235static int mv88x201x_phy_reset(adapter_t *adapter)
236{
237 u32 val;
238
239 t1_tpi_read(adapter, A_ELMER0_GPO, &val);
240 val &= ~4;
241 t1_tpi_write(adapter, A_ELMER0_GPO, val);
242 msleep(100);
243
244 t1_tpi_write(adapter, A_ELMER0_GPO, val | 4);
245 msleep(1000);
246
247 /* Now lets enable the Laser. Delay 100us */
248 t1_tpi_read(adapter, A_ELMER0_GPO, &val);
249 val |= 0x8000;
250 t1_tpi_write(adapter, A_ELMER0_GPO, val);
251 udelay(100);
252 return 0;
253}
254
255struct gphy t1_mv88x201x_ops = {
256 mv88x201x_phy_create,
257 mv88x201x_phy_reset
258};
diff --git a/drivers/net/chelsio/osdep.h b/drivers/net/chelsio/osdep.h
new file mode 100644
index 000000000000..095cb474434f
--- /dev/null
+++ b/drivers/net/chelsio/osdep.h
@@ -0,0 +1,169 @@
1/*****************************************************************************
2 * *
3 * File: osdep.h *
4 * $Revision: 1.9 $ *
5 * $Date: 2005/03/23 07:41:27 $ *
6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#ifndef __CHELSIO_OSDEP_H
40#define __CHELSIO_OSDEP_H
41
42#include <linux/version.h>
43#include <linux/module.h>
44#include <linux/config.h>
45#include <linux/types.h>
46#include <linux/delay.h>
47#include <linux/pci.h>
48#include <linux/ethtool.h>
49#include <linux/mii.h>
50#include <linux/crc32.h>
51#include <linux/init.h>
52#include <asm/io.h>
53
54#include "cxgb2.h"
55
56#define DRV_NAME "cxgb"
57#define PFX DRV_NAME ": "
58
59#define CH_ERR(fmt, ...) printk(KERN_ERR PFX fmt, ## __VA_ARGS__)
60#define CH_WARN(fmt, ...) printk(KERN_WARNING PFX fmt, ## __VA_ARGS__)
61#define CH_ALERT(fmt, ...) printk(KERN_ALERT PFX fmt, ## __VA_ARGS__)
62
63/*
64 * More powerful macro that selectively prints messages based on msg_enable.
65 * For info and debugging messages.
66 */
67#define CH_MSG(adapter, level, category, fmt, ...) do { \
68 if ((adapter)->msg_enable & NETIF_MSG_##category) \
69 printk(KERN_##level PFX "%s: " fmt, (adapter)->name, \
70 ## __VA_ARGS__); \
71} while (0)
72
73#ifdef DEBUG
74# define CH_DBG(adapter, category, fmt, ...) \
75 CH_MSG(adapter, DEBUG, category, fmt, ## __VA_ARGS__)
76#else
77# define CH_DBG(fmt, ...)
78#endif
79
80/* Additional NETIF_MSG_* categories */
81#define NETIF_MSG_MMIO 0x8000000
82
83#define CH_DEVICE(devid, ssid, idx) \
84 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
85
86#define SUPPORTED_PAUSE (1 << 13)
87#define SUPPORTED_LOOPBACK (1 << 15)
88
89#define ADVERTISED_PAUSE (1 << 13)
90#define ADVERTISED_ASYM_PAUSE (1 << 14)
91
92/*
93 * Now that we have included the driver's main data structure,
94 * we typedef it to something the rest of the system understands.
95 */
96typedef struct adapter adapter_t;
97
98#define TPI_LOCK(adapter) spin_lock(&(adapter)->tpi_lock)
99#define TPI_UNLOCK(adapter) spin_unlock(&(adapter)->tpi_lock)
100
101void t1_elmer0_ext_intr(adapter_t *adapter);
102void t1_link_changed(adapter_t *adapter, int port_id, int link_status,
103 int speed, int duplex, int fc);
104
105static inline u16 t1_read_reg_2(adapter_t *adapter, u32 reg_addr)
106{
107 u16 val = readw(adapter->regs + reg_addr);
108
109 CH_DBG(adapter, MMIO, "read register 0x%x value 0x%x\n", reg_addr,
110 val);
111 return val;
112}
113
114static inline void t1_write_reg_2(adapter_t *adapter, u32 reg_addr, u16 val)
115{
116 CH_DBG(adapter, MMIO, "setting register 0x%x to 0x%x\n", reg_addr,
117 val);
118 writew(val, adapter->regs + reg_addr);
119}
120
121static inline u32 t1_read_reg_4(adapter_t *adapter, u32 reg_addr)
122{
123 u32 val = readl(adapter->regs + reg_addr);
124
125 CH_DBG(adapter, MMIO, "read register 0x%x value 0x%x\n", reg_addr,
126 val);
127 return val;
128}
129
130static inline void t1_write_reg_4(adapter_t *adapter, u32 reg_addr, u32 val)
131{
132 CH_DBG(adapter, MMIO, "setting register 0x%x to 0x%x\n", reg_addr,
133 val);
134 writel(val, adapter->regs + reg_addr);
135}
136
137static inline const char *port_name(adapter_t *adapter, int port_idx)
138{
139 return adapter->port[port_idx].dev->name;
140}
141
142static inline void t1_set_hw_addr(adapter_t *adapter, int port_idx,
143 u8 hw_addr[])
144{
145 memcpy(adapter->port[port_idx].dev->dev_addr, hw_addr, ETH_ALEN);
146}
147
148struct t1_rx_mode {
149 struct net_device *dev;
150 u32 idx;
151 struct dev_mc_list *list;
152};
153
154#define t1_rx_mode_promisc(rm) (rm->dev->flags & IFF_PROMISC)
155#define t1_rx_mode_allmulti(rm) (rm->dev->flags & IFF_ALLMULTI)
156#define t1_rx_mode_mc_cnt(rm) (rm->dev->mc_count)
157
158static inline u8 *t1_get_next_mcaddr(struct t1_rx_mode *rm)
159{
160 u8 *addr = 0;
161
162 if (rm->idx++ < rm->dev->mc_count) {
163 addr = rm->list->dmi_addr;
164 rm->list = rm->list->next;
165 }
166 return addr;
167}
168
169#endif
diff --git a/drivers/net/chelsio/pm3393.c b/drivers/net/chelsio/pm3393.c
new file mode 100644
index 000000000000..17bd20f60d99
--- /dev/null
+++ b/drivers/net/chelsio/pm3393.c
@@ -0,0 +1,831 @@
1/*****************************************************************************
2 * *
3 * File: pm3393.c *
4 * $Revision: 1.9 $ *
5 * $Date: 2005/03/23 07:41:27 $ *
6 * Description: *
7 * PMC/SIERRA (pm3393) MAC-PHY functionality. *
8 * part of the Chelsio 10Gb Ethernet Driver. *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
13 * *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, write to the Free Software Foundation, Inc., *
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
17 * *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
21 * *
22 * http://www.chelsio.com *
23 * *
24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
25 * All rights reserved. *
26 * *
27 * Maintainers: maintainers@chelsio.com *
28 * *
29 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
30 * Tina Yang <tainay@chelsio.com> *
31 * Felix Marti <felix@chelsio.com> *
32 * Scott Bardone <sbardone@chelsio.com> *
33 * Kurt Ottaway <kottaway@chelsio.com> *
34 * Frank DiMambro <frank@chelsio.com> *
35 * *
36 * History: *
37 * *
38 ****************************************************************************/
39
40#include "common.h"
41#include "regs.h"
42#include "gmac.h"
43#include "elmer0.h"
44#include "suni1x10gexp_regs.h"
45
46/* 802.3ae 10Gb/s MDIO Manageable Device(MMD)
47 */
48#define MMD_RESERVED 0
49#define MMD_PMAPMD 1
50#define MMD_WIS 2
51#define MMD_PCS 3
52#define MMD_PHY_XGXS 4 /* XGMII Extender Sublayer */
53#define MMD_DTE_XGXS 5
54
55#define PHY_XGXS_CTRL_1 0
56#define PHY_XGXS_STATUS_1 1
57
58#define OFFSET(REG_ADDR) (REG_ADDR << 2)
59
60/* Max frame size PM3393 can handle. Includes Ethernet header and CRC. */
61#define MAX_FRAME_SIZE 9600
62
63#define IPG 12
64#define TXXG_CONF1_VAL ((IPG << SUNI1x10GEXP_BITOFF_TXXG_IPGT) | \
65 SUNI1x10GEXP_BITMSK_TXXG_32BIT_ALIGN | SUNI1x10GEXP_BITMSK_TXXG_CRCEN | \
66 SUNI1x10GEXP_BITMSK_TXXG_PADEN)
67#define RXXG_CONF1_VAL (SUNI1x10GEXP_BITMSK_RXXG_PUREP | 0x14 | \
68 SUNI1x10GEXP_BITMSK_RXXG_FLCHK | SUNI1x10GEXP_BITMSK_RXXG_CRC_STRIP)
69
70/* Update statistics every 15 minutes */
71#define STATS_TICK_SECS (15 * 60)
72
73enum { /* RMON registers */
74 RxOctetsReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW,
75 RxUnicastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_4_LOW,
76 RxMulticastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_5_LOW,
77 RxBroadcastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_6_LOW,
78 RxPAUSEMACCtrlFramesReceived = SUNI1x10GEXP_REG_MSTAT_COUNTER_8_LOW,
79 RxFrameCheckSequenceErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_10_LOW,
80 RxFramesLostDueToInternalMACErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_11_LOW,
81 RxSymbolErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_12_LOW,
82 RxInRangeLengthErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_13_LOW,
83 RxFramesTooLongErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_15_LOW,
84 RxJabbers = SUNI1x10GEXP_REG_MSTAT_COUNTER_16_LOW,
85 RxFragments = SUNI1x10GEXP_REG_MSTAT_COUNTER_17_LOW,
86 RxUndersizedFrames = SUNI1x10GEXP_REG_MSTAT_COUNTER_18_LOW,
87
88 TxOctetsTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW,
89 TxFramesLostDueToInternalMACTransmissionError = SUNI1x10GEXP_REG_MSTAT_COUNTER_35_LOW,
90 TxTransmitSystemError = SUNI1x10GEXP_REG_MSTAT_COUNTER_36_LOW,
91 TxUnicastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_38_LOW,
92 TxMulticastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_40_LOW,
93 TxBroadcastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_42_LOW,
94 TxPAUSEMACCtrlFramesTransmitted = SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW
95};
96
97struct _cmac_instance {
98 u8 enabled;
99 u8 fc;
100 u8 mac_addr[6];
101};
102
103static int pmread(struct cmac *cmac, u32 reg, u32 * data32)
104{
105 t1_tpi_read(cmac->adapter, OFFSET(reg), data32);
106 return 0;
107}
108
109static int pmwrite(struct cmac *cmac, u32 reg, u32 data32)
110{
111 t1_tpi_write(cmac->adapter, OFFSET(reg), data32);
112 return 0;
113}
114
115/* Port reset. */
116static int pm3393_reset(struct cmac *cmac)
117{
118 return 0;
119}
120
121/*
122 * Enable interrupts for the PM3393
123
124 1. Enable PM3393 BLOCK interrupts.
125 2. Enable PM3393 Master Interrupt bit(INTE)
126 3. Enable ELMER's PM3393 bit.
127 4. Enable Terminator external interrupt.
128*/
129static int pm3393_interrupt_enable(struct cmac *cmac)
130{
131 u32 pl_intr;
132
133 /* PM3393 - Enabling all hardware block interrupts.
134 */
135 pmwrite(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE, 0xffff);
136 pmwrite(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE, 0xffff);
137 pmwrite(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE, 0xffff);
138 pmwrite(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE, 0xffff);
139
140 /* Don't interrupt on statistics overflow, we are polling */
141 pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0, 0);
142 pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1, 0);
143 pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2, 0);
144 pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3, 0);
145
146 pmwrite(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE, 0xffff);
147 pmwrite(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK, 0xffff);
148 pmwrite(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE, 0xffff);
149 pmwrite(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE, 0xffff);
150 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_3, 0xffff);
151 pmwrite(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK, 0xffff);
152 pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_3, 0xffff);
153 pmwrite(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK, 0xffff);
154 pmwrite(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE, 0xffff);
155
156 /* PM3393 - Global interrupt enable
157 */
158 /* TBD XXX Disable for now until we figure out why error interrupts keep asserting. */
159 pmwrite(cmac, SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE,
160 0 /*SUNI1x10GEXP_BITMSK_TOP_INTE */ );
161
162 /* TERMINATOR - PL_INTERUPTS_EXT */
163 pl_intr = t1_read_reg_4(cmac->adapter, A_PL_ENABLE);
164 pl_intr |= F_PL_INTR_EXT;
165 t1_write_reg_4(cmac->adapter, A_PL_ENABLE, pl_intr);
166 return 0;
167}
168
169static int pm3393_interrupt_disable(struct cmac *cmac)
170{
171 u32 elmer;
172
173 /* PM3393 - Enabling HW interrupt blocks. */
174 pmwrite(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE, 0);
175 pmwrite(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE, 0);
176 pmwrite(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE, 0);
177 pmwrite(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE, 0);
178 pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0, 0);
179 pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1, 0);
180 pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2, 0);
181 pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3, 0);
182 pmwrite(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE, 0);
183 pmwrite(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK, 0);
184 pmwrite(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE, 0);
185 pmwrite(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE, 0);
186 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_3, 0);
187 pmwrite(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK, 0);
188 pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_3, 0);
189 pmwrite(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK, 0);
190 pmwrite(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE, 0);
191
192 /* PM3393 - Global interrupt enable */
193 pmwrite(cmac, SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE, 0);
194
195 /* ELMER - External chip interrupts. */
196 t1_tpi_read(cmac->adapter, A_ELMER0_INT_ENABLE, &elmer);
197 elmer &= ~ELMER0_GP_BIT1;
198 t1_tpi_write(cmac->adapter, A_ELMER0_INT_ENABLE, elmer);
199
200 /* TERMINATOR - PL_INTERUPTS_EXT */
201 /* DO NOT DISABLE TERMINATOR's EXTERNAL INTERRUPTS. ANOTHER CHIP
202 * COULD WANT THEM ENABLED. We disable PM3393 at the ELMER level.
203 */
204
205 return 0;
206}
207
208static int pm3393_interrupt_clear(struct cmac *cmac)
209{
210 u32 elmer;
211 u32 pl_intr;
212 u32 val32;
213
214 /* PM3393 - Clearing HW interrupt blocks. Note, this assumes
215 * bit WCIMODE=0 for a clear-on-read.
216 */
217 pmread(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_STATUS, &val32);
218 pmread(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_STATUS, &val32);
219 pmread(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_STATUS, &val32);
220 pmread(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_STATUS, &val32);
221 pmread(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT, &val32);
222 pmread(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_STATUS, &val32);
223 pmread(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_INTERRUPT, &val32);
224 pmread(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_STATUS, &val32);
225 pmread(cmac, SUNI1x10GEXP_REG_RXXG_INTERRUPT, &val32);
226 pmread(cmac, SUNI1x10GEXP_REG_TXXG_INTERRUPT, &val32);
227 pmread(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT, &val32);
228 pmread(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_INDICATION,
229 &val32);
230 pmread(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_STATUS, &val32);
231 pmread(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_CHANGE, &val32);
232
233 /* PM3393 - Global interrupt status
234 */
235 pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS, &val32);
236
237 /* ELMER - External chip interrupts.
238 */
239 t1_tpi_read(cmac->adapter, A_ELMER0_INT_CAUSE, &elmer);
240 elmer |= ELMER0_GP_BIT1;
241 t1_tpi_write(cmac->adapter, A_ELMER0_INT_CAUSE, elmer);
242
243 /* TERMINATOR - PL_INTERUPTS_EXT
244 */
245 pl_intr = t1_read_reg_4(cmac->adapter, A_PL_CAUSE);
246 pl_intr |= F_PL_INTR_EXT;
247 t1_write_reg_4(cmac->adapter, A_PL_CAUSE, pl_intr);
248
249 return 0;
250}
251
252/* Interrupt handler */
253static int pm3393_interrupt_handler(struct cmac *cmac)
254{
255 u32 master_intr_status;
256/*
257 1. Read master interrupt register.
258 2. Read BLOCK's interrupt status registers.
259 3. Handle BLOCK interrupts.
260*/
261 /* Read the master interrupt status register. */
262 pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS,
263 &master_intr_status);
264 CH_DBG(cmac->adapter, INTR, "PM3393 intr cause 0x%x\n",
265 master_intr_status);
266
267 /* TBD XXX Lets just clear everything for now */
268 pm3393_interrupt_clear(cmac);
269
270 return 0;
271}
272
273static int pm3393_enable(struct cmac *cmac, int which)
274{
275 if (which & MAC_DIRECTION_RX)
276 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_1,
277 (RXXG_CONF1_VAL | SUNI1x10GEXP_BITMSK_RXXG_RXEN));
278
279 if (which & MAC_DIRECTION_TX) {
280 u32 val = TXXG_CONF1_VAL | SUNI1x10GEXP_BITMSK_TXXG_TXEN0;
281
282 if (cmac->instance->fc & PAUSE_RX)
283 val |= SUNI1x10GEXP_BITMSK_TXXG_FCRX;
284 if (cmac->instance->fc & PAUSE_TX)
285 val |= SUNI1x10GEXP_BITMSK_TXXG_FCTX;
286 pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_1, val);
287 }
288
289 cmac->instance->enabled |= which;
290 return 0;
291}
292
293static int pm3393_enable_port(struct cmac *cmac, int which)
294{
295 /* Clear port statistics */
296 pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_CONTROL,
297 SUNI1x10GEXP_BITMSK_MSTAT_CLEAR);
298 udelay(2);
299 memset(&cmac->stats, 0, sizeof(struct cmac_statistics));
300
301 pm3393_enable(cmac, which);
302
303 /*
304 * XXX This should be done by the PHY and preferrably not at all.
305 * The PHY doesn't give us link status indication on its own so have
306 * the link management code query it instead.
307 */
308 {
309 extern void link_changed(adapter_t *adapter, int port_id);
310
311 link_changed(cmac->adapter, 0);
312 }
313 return 0;
314}
315
316static int pm3393_disable(struct cmac *cmac, int which)
317{
318 if (which & MAC_DIRECTION_RX)
319 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_1, RXXG_CONF1_VAL);
320 if (which & MAC_DIRECTION_TX)
321 pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_1, TXXG_CONF1_VAL);
322
323 /*
324 * The disable is graceful. Give the PM3393 time. Can't wait very
325 * long here, we may be holding locks.
326 */
327 udelay(20);
328
329 cmac->instance->enabled &= ~which;
330 return 0;
331}
332
333static int pm3393_loopback_enable(struct cmac *cmac)
334{
335 return 0;
336}
337
338static int pm3393_loopback_disable(struct cmac *cmac)
339{
340 return 0;
341}
342
343static int pm3393_set_mtu(struct cmac *cmac, int mtu)
344{
345 int enabled = cmac->instance->enabled;
346
347 /* MAX_FRAME_SIZE includes header + FCS, mtu doesn't */
348 mtu += 14 + 4;
349 if (mtu > MAX_FRAME_SIZE)
350 return -EINVAL;
351
352 /* Disable Rx/Tx MAC before configuring it. */
353 if (enabled)
354 pm3393_disable(cmac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
355
356 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MAX_FRAME_LENGTH, mtu);
357 pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_MAX_FRAME_SIZE, mtu);
358
359 if (enabled)
360 pm3393_enable(cmac, enabled);
361 return 0;
362}
363
364static u32 calc_crc(u8 *b, int len)
365{
366 int i;
367 u32 crc = (u32)~0;
368
369 /* calculate crc one bit at a time */
370 while (len--) {
371 crc ^= *b++;
372 for (i = 0; i < 8; i++) {
373 if (crc & 0x1)
374 crc = (crc >> 1) ^ 0xedb88320;
375 else
376 crc = (crc >> 1);
377 }
378 }
379
380 /* reverse bits */
381 crc = ((crc >> 4) & 0x0f0f0f0f) | ((crc << 4) & 0xf0f0f0f0);
382 crc = ((crc >> 2) & 0x33333333) | ((crc << 2) & 0xcccccccc);
383 crc = ((crc >> 1) & 0x55555555) | ((crc << 1) & 0xaaaaaaaa);
384 /* swap bytes */
385 crc = (crc >> 16) | (crc << 16);
386 crc = (crc >> 8 & 0x00ff00ff) | (crc << 8 & 0xff00ff00);
387
388 return crc;
389}
390
391static int pm3393_set_rx_mode(struct cmac *cmac, struct t1_rx_mode *rm)
392{
393 int enabled = cmac->instance->enabled & MAC_DIRECTION_RX;
394 u32 rx_mode;
395
396 /* Disable MAC RX before reconfiguring it */
397 if (enabled)
398 pm3393_disable(cmac, MAC_DIRECTION_RX);
399
400 pmread(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2, &rx_mode);
401 rx_mode &= ~(SUNI1x10GEXP_BITMSK_RXXG_PMODE |
402 SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN);
403 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2,
404 (u16)rx_mode);
405
406 if (t1_rx_mode_promisc(rm)) {
407 /* Promiscuous mode. */
408 rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_PMODE;
409 }
410 if (t1_rx_mode_allmulti(rm)) {
411 /* Accept all multicast. */
412 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, 0xffff);
413 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW, 0xffff);
414 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH, 0xffff);
415 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH, 0xffff);
416 rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN;
417 } else if (t1_rx_mode_mc_cnt(rm)) {
418 /* Accept one or more multicast(s). */
419 u8 *addr;
420 int bit;
421 u16 mc_filter[4] = { 0, };
422
423 while ((addr = t1_get_next_mcaddr(rm))) {
424 bit = (calc_crc(addr, ETH_ALEN) >> 23) & 0x3f; /* bit[23:28] */
425 mc_filter[bit >> 4] |= 1 << (bit & 0xf);
426 }
427 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, mc_filter[0]);
428 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW, mc_filter[1]);
429 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH, mc_filter[2]);
430 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH, mc_filter[3]);
431 rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN;
432 }
433
434 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2, (u16)rx_mode);
435
436 if (enabled)
437 pm3393_enable(cmac, MAC_DIRECTION_RX);
438
439 return 0;
440}
441
442static int pm3393_get_speed_duplex_fc(struct cmac *cmac, int *speed,
443 int *duplex, int *fc)
444{
445 if (speed)
446 *speed = SPEED_10000;
447 if (duplex)
448 *duplex = DUPLEX_FULL;
449 if (fc)
450 *fc = cmac->instance->fc;
451 return 0;
452}
453
454static int pm3393_set_speed_duplex_fc(struct cmac *cmac, int speed, int duplex,
455 int fc)
456{
457 if (speed >= 0 && speed != SPEED_10000)
458 return -1;
459 if (duplex >= 0 && duplex != DUPLEX_FULL)
460 return -1;
461 if (fc & ~(PAUSE_TX | PAUSE_RX))
462 return -1;
463
464 if (fc != cmac->instance->fc) {
465 cmac->instance->fc = (u8) fc;
466 if (cmac->instance->enabled & MAC_DIRECTION_TX)
467 pm3393_enable(cmac, MAC_DIRECTION_TX);
468 }
469 return 0;
470}
471
472#define RMON_UPDATE(mac, name, stat_name) \
473 { \
474 t1_tpi_read((mac)->adapter, OFFSET(name), &val0); \
475 t1_tpi_read((mac)->adapter, OFFSET(((name)+1)), &val1); \
476 t1_tpi_read((mac)->adapter, OFFSET(((name)+2)), &val2); \
477 (mac)->stats.stat_name = ((u64)val0 & 0xffff) | \
478 (((u64)val1 & 0xffff) << 16) | \
479 (((u64)val2 & 0xff) << 32) | \
480 ((mac)->stats.stat_name & \
481 (~(u64)0 << 40)); \
482 if (ro & \
483 ((name - SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW) >> 2)) \
484 (mac)->stats.stat_name += ((u64)1 << 40); \
485 }
486
487static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac,
488 int flag)
489{
490 u64 ro;
491 u32 val0, val1, val2, val3;
492
493 /* Snap the counters */
494 pmwrite(mac, SUNI1x10GEXP_REG_MSTAT_CONTROL,
495 SUNI1x10GEXP_BITMSK_MSTAT_SNAP);
496
497 /* Counter rollover, clear on read */
498 pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_0, &val0);
499 pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_1, &val1);
500 pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_2, &val2);
501 pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_3, &val3);
502 ro = ((u64)val0 & 0xffff) | (((u64)val1 & 0xffff) << 16) |
503 (((u64)val2 & 0xffff) << 32) | (((u64)val3 & 0xffff) << 48);
504
505 /* Rx stats */
506 RMON_UPDATE(mac, RxOctetsReceivedOK, RxOctetsOK);
507 RMON_UPDATE(mac, RxUnicastFramesReceivedOK, RxUnicastFramesOK);
508 RMON_UPDATE(mac, RxMulticastFramesReceivedOK, RxMulticastFramesOK);
509 RMON_UPDATE(mac, RxBroadcastFramesReceivedOK, RxBroadcastFramesOK);
510 RMON_UPDATE(mac, RxPAUSEMACCtrlFramesReceived, RxPauseFrames);
511 RMON_UPDATE(mac, RxFrameCheckSequenceErrors, RxFCSErrors);
512 RMON_UPDATE(mac, RxFramesLostDueToInternalMACErrors,
513 RxInternalMACRcvError);
514 RMON_UPDATE(mac, RxSymbolErrors, RxSymbolErrors);
515 RMON_UPDATE(mac, RxInRangeLengthErrors, RxInRangeLengthErrors);
516 RMON_UPDATE(mac, RxFramesTooLongErrors , RxFrameTooLongErrors);
517 RMON_UPDATE(mac, RxJabbers, RxJabberErrors);
518 RMON_UPDATE(mac, RxFragments, RxRuntErrors);
519 RMON_UPDATE(mac, RxUndersizedFrames, RxRuntErrors);
520
521 /* Tx stats */
522 RMON_UPDATE(mac, TxOctetsTransmittedOK, TxOctetsOK);
523 RMON_UPDATE(mac, TxFramesLostDueToInternalMACTransmissionError,
524 TxInternalMACXmitError);
525 RMON_UPDATE(mac, TxTransmitSystemError, TxFCSErrors);
526 RMON_UPDATE(mac, TxUnicastFramesTransmittedOK, TxUnicastFramesOK);
527 RMON_UPDATE(mac, TxMulticastFramesTransmittedOK, TxMulticastFramesOK);
528 RMON_UPDATE(mac, TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK);
529 RMON_UPDATE(mac, TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames);
530
531 return &mac->stats;
532}
533
534static int pm3393_macaddress_get(struct cmac *cmac, u8 mac_addr[6])
535{
536 memcpy(mac_addr, cmac->instance->mac_addr, 6);
537 return 0;
538}
539
540static int pm3393_macaddress_set(struct cmac *cmac, u8 ma[6])
541{
542 u32 val, lo, mid, hi, enabled = cmac->instance->enabled;
543
544 /*
545 * MAC addr: 00:07:43:00:13:09
546 *
547 * ma[5] = 0x09
548 * ma[4] = 0x13
549 * ma[3] = 0x00
550 * ma[2] = 0x43
551 * ma[1] = 0x07
552 * ma[0] = 0x00
553 *
554 * The PM3393 requires byte swapping and reverse order entry
555 * when programming MAC addresses:
556 *
557 * low_bits[15:0] = ma[1]:ma[0]
558 * mid_bits[31:16] = ma[3]:ma[2]
559 * high_bits[47:32] = ma[5]:ma[4]
560 */
561
562 /* Store local copy */
563 memcpy(cmac->instance->mac_addr, ma, 6);
564
565 lo = ((u32) ma[1] << 8) | (u32) ma[0];
566 mid = ((u32) ma[3] << 8) | (u32) ma[2];
567 hi = ((u32) ma[5] << 8) | (u32) ma[4];
568
569 /* Disable Rx/Tx MAC before configuring it. */
570 if (enabled)
571 pm3393_disable(cmac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
572
573 /* Set RXXG Station Address */
574 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_15_0, lo);
575 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_31_16, mid);
576 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_47_32, hi);
577
578 /* Set TXXG Station Address */
579 pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_15_0, lo);
580 pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_31_16, mid);
581 pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_47_32, hi);
582
583 /* Setup Exact Match Filter 1 with our MAC address
584 *
585 * Must disable exact match filter before configuring it.
586 */
587 pmread(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, &val);
588 val &= 0xff0f;
589 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, val);
590
591 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_LOW, lo);
592 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_MID, mid);
593 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_HIGH, hi);
594
595 val |= 0x0090;
596 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, val);
597
598 if (enabled)
599 pm3393_enable(cmac, enabled);
600 return 0;
601}
602
603static void pm3393_destroy(struct cmac *cmac)
604{
605 kfree(cmac);
606}
607
608static struct cmac_ops pm3393_ops = {
609 .destroy = pm3393_destroy,
610 .reset = pm3393_reset,
611 .interrupt_enable = pm3393_interrupt_enable,
612 .interrupt_disable = pm3393_interrupt_disable,
613 .interrupt_clear = pm3393_interrupt_clear,
614 .interrupt_handler = pm3393_interrupt_handler,
615 .enable = pm3393_enable_port,
616 .disable = pm3393_disable,
617 .loopback_enable = pm3393_loopback_enable,
618 .loopback_disable = pm3393_loopback_disable,
619 .set_mtu = pm3393_set_mtu,
620 .set_rx_mode = pm3393_set_rx_mode,
621 .get_speed_duplex_fc = pm3393_get_speed_duplex_fc,
622 .set_speed_duplex_fc = pm3393_set_speed_duplex_fc,
623 .statistics_update = pm3393_update_statistics,
624 .macaddress_get = pm3393_macaddress_get,
625 .macaddress_set = pm3393_macaddress_set
626};
627
628static struct cmac *pm3393_mac_create(adapter_t *adapter, int index)
629{
630 struct cmac *cmac;
631
632 cmac = kmalloc(sizeof(*cmac) + sizeof(cmac_instance), GFP_KERNEL);
633 if (!cmac)
634 return NULL;
635 memset(cmac, 0, sizeof(*cmac));
636
637 cmac->ops = &pm3393_ops;
638 cmac->instance = (cmac_instance *) (cmac + 1);
639 cmac->adapter = adapter;
640 cmac->instance->fc = PAUSE_TX | PAUSE_RX;
641
642 t1_tpi_write(adapter, OFFSET(0x0001), 0x00008000);
643 t1_tpi_write(adapter, OFFSET(0x0001), 0x00000000);
644 t1_tpi_write(adapter, OFFSET(0x2308), 0x00009800);
645 t1_tpi_write(adapter, OFFSET(0x2305), 0x00001001); /* PL4IO Enable */
646 t1_tpi_write(adapter, OFFSET(0x2320), 0x00008800);
647 t1_tpi_write(adapter, OFFSET(0x2321), 0x00008800);
648 t1_tpi_write(adapter, OFFSET(0x2322), 0x00008800);
649 t1_tpi_write(adapter, OFFSET(0x2323), 0x00008800);
650 t1_tpi_write(adapter, OFFSET(0x2324), 0x00008800);
651 t1_tpi_write(adapter, OFFSET(0x2325), 0x00008800);
652 t1_tpi_write(adapter, OFFSET(0x2326), 0x00008800);
653 t1_tpi_write(adapter, OFFSET(0x2327), 0x00008800);
654 t1_tpi_write(adapter, OFFSET(0x2328), 0x00008800);
655 t1_tpi_write(adapter, OFFSET(0x2329), 0x00008800);
656 t1_tpi_write(adapter, OFFSET(0x232a), 0x00008800);
657 t1_tpi_write(adapter, OFFSET(0x232b), 0x00008800);
658 t1_tpi_write(adapter, OFFSET(0x232c), 0x00008800);
659 t1_tpi_write(adapter, OFFSET(0x232d), 0x00008800);
660 t1_tpi_write(adapter, OFFSET(0x232e), 0x00008800);
661 t1_tpi_write(adapter, OFFSET(0x232f), 0x00008800);
662 t1_tpi_write(adapter, OFFSET(0x230d), 0x00009c00);
663 t1_tpi_write(adapter, OFFSET(0x2304), 0x00000202); /* PL4IO Calendar Repetitions */
664
665 t1_tpi_write(adapter, OFFSET(0x3200), 0x00008080); /* EFLX Enable */
666 t1_tpi_write(adapter, OFFSET(0x3210), 0x00000000); /* EFLX Channel Deprovision */
667 t1_tpi_write(adapter, OFFSET(0x3203), 0x00000000); /* EFLX Low Limit */
668 t1_tpi_write(adapter, OFFSET(0x3204), 0x00000040); /* EFLX High Limit */
669 t1_tpi_write(adapter, OFFSET(0x3205), 0x000002cc); /* EFLX Almost Full */
670 t1_tpi_write(adapter, OFFSET(0x3206), 0x00000199); /* EFLX Almost Empty */
671 t1_tpi_write(adapter, OFFSET(0x3207), 0x00000240); /* EFLX Cut Through Threshold */
672 t1_tpi_write(adapter, OFFSET(0x3202), 0x00000000); /* EFLX Indirect Register Update */
673 t1_tpi_write(adapter, OFFSET(0x3210), 0x00000001); /* EFLX Channel Provision */
674 t1_tpi_write(adapter, OFFSET(0x3208), 0x0000ffff); /* EFLX Undocumented */
675 t1_tpi_write(adapter, OFFSET(0x320a), 0x0000ffff); /* EFLX Undocumented */
676 t1_tpi_write(adapter, OFFSET(0x320c), 0x0000ffff); /* EFLX enable overflow interrupt The other bit are undocumented */
677 t1_tpi_write(adapter, OFFSET(0x320e), 0x0000ffff); /* EFLX Undocumented */
678
679 t1_tpi_write(adapter, OFFSET(0x2200), 0x0000c000); /* IFLX Configuration - enable */
680 t1_tpi_write(adapter, OFFSET(0x2201), 0x00000000); /* IFLX Channel Deprovision */
681 t1_tpi_write(adapter, OFFSET(0x220e), 0x00000000); /* IFLX Low Limit */
682 t1_tpi_write(adapter, OFFSET(0x220f), 0x00000100); /* IFLX High Limit */
683 t1_tpi_write(adapter, OFFSET(0x2210), 0x00000c00); /* IFLX Almost Full Limit */
684 t1_tpi_write(adapter, OFFSET(0x2211), 0x00000599); /* IFLX Almost Empty Limit */
685 t1_tpi_write(adapter, OFFSET(0x220d), 0x00000000); /* IFLX Indirect Register Update */
686 t1_tpi_write(adapter, OFFSET(0x2201), 0x00000001); /* IFLX Channel Provision */
687 t1_tpi_write(adapter, OFFSET(0x2203), 0x0000ffff); /* IFLX Undocumented */
688 t1_tpi_write(adapter, OFFSET(0x2205), 0x0000ffff); /* IFLX Undocumented */
689 t1_tpi_write(adapter, OFFSET(0x2209), 0x0000ffff); /* IFLX Enable overflow interrupt. The other bit are undocumented */
690
691 t1_tpi_write(adapter, OFFSET(0x2241), 0xfffffffe); /* PL4MOS Undocumented */
692 t1_tpi_write(adapter, OFFSET(0x2242), 0x0000ffff); /* PL4MOS Undocumented */
693 t1_tpi_write(adapter, OFFSET(0x2243), 0x00000008); /* PL4MOS Starving Burst Size */
694 t1_tpi_write(adapter, OFFSET(0x2244), 0x00000008); /* PL4MOS Hungry Burst Size */
695 t1_tpi_write(adapter, OFFSET(0x2245), 0x00000008); /* PL4MOS Transfer Size */
696 t1_tpi_write(adapter, OFFSET(0x2240), 0x00000005); /* PL4MOS Disable */
697
698 t1_tpi_write(adapter, OFFSET(0x2280), 0x00002103); /* PL4ODP Training Repeat and SOP rule */
699 t1_tpi_write(adapter, OFFSET(0x2284), 0x00000000); /* PL4ODP MAX_T setting */
700
701 t1_tpi_write(adapter, OFFSET(0x3280), 0x00000087); /* PL4IDU Enable data forward, port state machine. Set ALLOW_NON_ZERO_OLB */
702 t1_tpi_write(adapter, OFFSET(0x3282), 0x0000001f); /* PL4IDU Enable Dip4 check error interrupts */
703
704 t1_tpi_write(adapter, OFFSET(0x3040), 0x0c32); /* # TXXG Config */
705 /* For T1 use timer based Mac flow control. */
706 if (t1_is_T1B(adapter))
707 t1_tpi_write(adapter, OFFSET(0x304d), 0x8000);
708 t1_tpi_write(adapter, OFFSET(0x2040), 0x059c); /* # RXXG Config */
709 t1_tpi_write(adapter, OFFSET(0x2049), 0x0000); /* # RXXG Cut Through */
710 t1_tpi_write(adapter, OFFSET(0x2070), 0x0000); /* # Disable promiscuous mode */
711
712 /* Setup Exact Match Filter 0 to allow broadcast packets.
713 */
714 t1_tpi_write(adapter, OFFSET(0x206e), 0x0000); /* # Disable Match Enable bit */
715 t1_tpi_write(adapter, OFFSET(0x204a), 0xffff); /* # low addr */
716 t1_tpi_write(adapter, OFFSET(0x204b), 0xffff); /* # mid addr */
717 t1_tpi_write(adapter, OFFSET(0x204c), 0xffff); /* # high addr */
718 t1_tpi_write(adapter, OFFSET(0x206e), 0x0009); /* # Enable Match Enable bit */
719
720 t1_tpi_write(adapter, OFFSET(0x0003), 0x0000); /* # NO SOP/ PAD_EN setup */
721 t1_tpi_write(adapter, OFFSET(0x0100), 0x0ff0); /* # RXEQB disabled */
722 t1_tpi_write(adapter, OFFSET(0x0101), 0x0f0f); /* # No Preemphasis */
723
724 return cmac;
725}
726
727static int pm3393_mac_reset(adapter_t * adapter)
728{
729 u32 val;
730 u32 x;
731 u32 is_pl4_reset_finished;
732 u32 is_pl4_outof_lock;
733 u32 is_xaui_mabc_pll_locked;
734 u32 successful_reset;
735 int i;
736
737 /* The following steps are required to properly reset
738 * the PM3393. This information is provided in the
739 * PM3393 datasheet (Issue 2: November 2002)
740 * section 13.1 -- Device Reset.
741 *
742 * The PM3393 has three types of components that are
743 * individually reset:
744 *
745 * DRESETB - Digital circuitry
746 * PL4_ARESETB - PL4 analog circuitry
747 * XAUI_ARESETB - XAUI bus analog circuitry
748 *
749 * Steps to reset PM3393 using RSTB pin:
750 *
751 * 1. Assert RSTB pin low ( write 0 )
752 * 2. Wait at least 1ms to initiate a complete initialization of device.
753 * 3. Wait until all external clocks and REFSEL are stable.
754 * 4. Wait minimum of 1ms. (after external clocks and REFEL are stable)
755 * 5. De-assert RSTB ( write 1 )
756 * 6. Wait until internal timers to expires after ~14ms.
757 * - Allows analog clock synthesizer(PL4CSU) to stabilize to
758 * selected reference frequency before allowing the digital
759 * portion of the device to operate.
760 * 7. Wait at least 200us for XAUI interface to stabilize.
761 * 8. Verify the PM3393 came out of reset successfully.
762 * Set successful reset flag if everything worked else try again
763 * a few more times.
764 */
765
766 successful_reset = 0;
767 for (i = 0; i < 3 && !successful_reset; i++) {
768 /* 1 */
769 t1_tpi_read(adapter, A_ELMER0_GPO, &val);
770 val &= ~1;
771 t1_tpi_write(adapter, A_ELMER0_GPO, val);
772
773 /* 2 */
774 msleep(1);
775
776 /* 3 */
777 msleep(1);
778
779 /* 4 */
780 msleep(2 /*1 extra ms for safety */ );
781
782 /* 5 */
783 val |= 1;
784 t1_tpi_write(adapter, A_ELMER0_GPO, val);
785
786 /* 6 */
787 msleep(15 /*1 extra ms for safety */ );
788
789 /* 7 */
790 msleep(1);
791
792 /* 8 */
793
794 /* Has PL4 analog block come out of reset correctly? */
795 t1_tpi_read(adapter, OFFSET(SUNI1x10GEXP_REG_DEVICE_STATUS), &val);
796 is_pl4_reset_finished = (val & SUNI1x10GEXP_BITMSK_TOP_EXPIRED);
797
798 /* TBD XXX SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL gets locked later in the init sequence
799 * figure out why? */
800
801 /* Have all PL4 block clocks locked? */
802 x = (SUNI1x10GEXP_BITMSK_TOP_PL4_ID_DOOL
803 /*| SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL */ |
804 SUNI1x10GEXP_BITMSK_TOP_PL4_ID_ROOL |
805 SUNI1x10GEXP_BITMSK_TOP_PL4_IS_ROOL |
806 SUNI1x10GEXP_BITMSK_TOP_PL4_OUT_ROOL);
807 is_pl4_outof_lock = (val & x);
808
809 /* ??? If this fails, might be able to software reset the XAUI part
810 * and try to recover... thus saving us from doing another HW reset */
811 /* Has the XAUI MABC PLL circuitry stablized? */
812 is_xaui_mabc_pll_locked =
813 (val & SUNI1x10GEXP_BITMSK_TOP_SXRA_EXPIRED);
814
815 successful_reset = (is_pl4_reset_finished && !is_pl4_outof_lock
816 && is_xaui_mabc_pll_locked);
817
818 CH_DBG(adapter, HW,
819 "PM3393 HW reset %d: pl4_reset 0x%x, val 0x%x, "
820 "is_pl4_outof_lock 0x%x, xaui_locked 0x%x\n",
821 i, is_pl4_reset_finished, val, is_pl4_outof_lock,
822 is_xaui_mabc_pll_locked);
823 }
824 return successful_reset ? 0 : 1;
825}
826
827struct gmac t1_pm3393_ops = {
828 STATS_TICK_SECS,
829 pm3393_mac_create,
830 pm3393_mac_reset
831};
diff --git a/drivers/net/chelsio/regs.h b/drivers/net/chelsio/regs.h
new file mode 100644
index 000000000000..5a70803eb1b6
--- /dev/null
+++ b/drivers/net/chelsio/regs.h
@@ -0,0 +1,453 @@
1/*****************************************************************************
2 * *
3 * File: regs.h *
4 * $Revision: 1.4 $ *
5 * $Date: 2005/03/23 07:15:59 $ *
6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39/* Do not edit this file */
40
41/* SGE registers */
42#define A_SG_CONTROL 0x0
43
44#define S_CMDQ0_ENABLE 0
45#define V_CMDQ0_ENABLE(x) ((x) << S_CMDQ0_ENABLE)
46#define F_CMDQ0_ENABLE V_CMDQ0_ENABLE(1U)
47
48#define S_CMDQ1_ENABLE 1
49#define V_CMDQ1_ENABLE(x) ((x) << S_CMDQ1_ENABLE)
50#define F_CMDQ1_ENABLE V_CMDQ1_ENABLE(1U)
51
52#define S_FL0_ENABLE 2
53#define V_FL0_ENABLE(x) ((x) << S_FL0_ENABLE)
54#define F_FL0_ENABLE V_FL0_ENABLE(1U)
55
56#define S_FL1_ENABLE 3
57#define V_FL1_ENABLE(x) ((x) << S_FL1_ENABLE)
58#define F_FL1_ENABLE V_FL1_ENABLE(1U)
59
60#define S_CPL_ENABLE 4
61#define V_CPL_ENABLE(x) ((x) << S_CPL_ENABLE)
62#define F_CPL_ENABLE V_CPL_ENABLE(1U)
63
64#define S_RESPONSE_QUEUE_ENABLE 5
65#define V_RESPONSE_QUEUE_ENABLE(x) ((x) << S_RESPONSE_QUEUE_ENABLE)
66#define F_RESPONSE_QUEUE_ENABLE V_RESPONSE_QUEUE_ENABLE(1U)
67
68#define S_CMDQ_PRIORITY 6
69#define M_CMDQ_PRIORITY 0x3
70#define V_CMDQ_PRIORITY(x) ((x) << S_CMDQ_PRIORITY)
71#define G_CMDQ_PRIORITY(x) (((x) >> S_CMDQ_PRIORITY) & M_CMDQ_PRIORITY)
72
73#define S_DISABLE_CMDQ1_GTS 9
74#define V_DISABLE_CMDQ1_GTS(x) ((x) << S_DISABLE_CMDQ1_GTS)
75#define F_DISABLE_CMDQ1_GTS V_DISABLE_CMDQ1_GTS(1U)
76
77#define S_ENABLE_BIG_ENDIAN 12
78#define V_ENABLE_BIG_ENDIAN(x) ((x) << S_ENABLE_BIG_ENDIAN)
79#define F_ENABLE_BIG_ENDIAN V_ENABLE_BIG_ENDIAN(1U)
80
81#define S_ISCSI_COALESCE 14
82#define V_ISCSI_COALESCE(x) ((x) << S_ISCSI_COALESCE)
83#define F_ISCSI_COALESCE V_ISCSI_COALESCE(1U)
84
85#define S_RX_PKT_OFFSET 15
86#define V_RX_PKT_OFFSET(x) ((x) << S_RX_PKT_OFFSET)
87
88#define S_VLAN_XTRACT 18
89#define V_VLAN_XTRACT(x) ((x) << S_VLAN_XTRACT)
90#define F_VLAN_XTRACT V_VLAN_XTRACT(1U)
91
92#define A_SG_DOORBELL 0x4
93#define A_SG_CMD0BASELWR 0x8
94#define A_SG_CMD0BASEUPR 0xc
95#define A_SG_CMD1BASELWR 0x10
96#define A_SG_CMD1BASEUPR 0x14
97#define A_SG_FL0BASELWR 0x18
98#define A_SG_FL0BASEUPR 0x1c
99#define A_SG_FL1BASELWR 0x20
100#define A_SG_FL1BASEUPR 0x24
101#define A_SG_CMD0SIZE 0x28
102#define A_SG_FL0SIZE 0x2c
103#define A_SG_RSPSIZE 0x30
104#define A_SG_RSPBASELWR 0x34
105#define A_SG_RSPBASEUPR 0x38
106#define A_SG_FLTHRESHOLD 0x3c
107#define A_SG_RSPQUEUECREDIT 0x40
108#define A_SG_SLEEPING 0x48
109#define A_SG_INTRTIMER 0x4c
110#define A_SG_CMD1SIZE 0xb0
111#define A_SG_FL1SIZE 0xb4
112#define A_SG_INT_ENABLE 0xb8
113
114#define S_RESPQ_EXHAUSTED 0
115#define V_RESPQ_EXHAUSTED(x) ((x) << S_RESPQ_EXHAUSTED)
116#define F_RESPQ_EXHAUSTED V_RESPQ_EXHAUSTED(1U)
117
118#define S_RESPQ_OVERFLOW 1
119#define V_RESPQ_OVERFLOW(x) ((x) << S_RESPQ_OVERFLOW)
120#define F_RESPQ_OVERFLOW V_RESPQ_OVERFLOW(1U)
121
122#define S_FL_EXHAUSTED 2
123#define V_FL_EXHAUSTED(x) ((x) << S_FL_EXHAUSTED)
124#define F_FL_EXHAUSTED V_FL_EXHAUSTED(1U)
125
126#define S_PACKET_TOO_BIG 3
127#define V_PACKET_TOO_BIG(x) ((x) << S_PACKET_TOO_BIG)
128#define F_PACKET_TOO_BIG V_PACKET_TOO_BIG(1U)
129
130#define S_PACKET_MISMATCH 4
131#define V_PACKET_MISMATCH(x) ((x) << S_PACKET_MISMATCH)
132#define F_PACKET_MISMATCH V_PACKET_MISMATCH(1U)
133
134#define A_SG_INT_CAUSE 0xbc
135
136/* MC3 registers */
137
138#define S_READY 1
139#define V_READY(x) ((x) << S_READY)
140#define F_READY V_READY(1U)
141
142/* MC4 registers */
143
144#define A_MC4_CFG 0x180
145#define S_MC4_SLOW 25
146#define V_MC4_SLOW(x) ((x) << S_MC4_SLOW)
147#define F_MC4_SLOW V_MC4_SLOW(1U)
148
149/* TPI registers */
150
151#define A_TPI_ADDR 0x280
152#define A_TPI_WR_DATA 0x284
153#define A_TPI_RD_DATA 0x288
154#define A_TPI_CSR 0x28c
155
156#define S_TPIWR 0
157#define V_TPIWR(x) ((x) << S_TPIWR)
158#define F_TPIWR V_TPIWR(1U)
159
160#define S_TPIRDY 1
161#define V_TPIRDY(x) ((x) << S_TPIRDY)
162#define F_TPIRDY V_TPIRDY(1U)
163
164#define A_TPI_PAR 0x29c
165
166#define S_TPIPAR 0
167#define M_TPIPAR 0x7f
168#define V_TPIPAR(x) ((x) << S_TPIPAR)
169#define G_TPIPAR(x) (((x) >> S_TPIPAR) & M_TPIPAR)
170
171/* TP registers */
172
173#define A_TP_IN_CONFIG 0x300
174
175#define S_TP_IN_CSPI_CPL 3
176#define V_TP_IN_CSPI_CPL(x) ((x) << S_TP_IN_CSPI_CPL)
177#define F_TP_IN_CSPI_CPL V_TP_IN_CSPI_CPL(1U)
178
179#define S_TP_IN_CSPI_CHECK_IP_CSUM 5
180#define V_TP_IN_CSPI_CHECK_IP_CSUM(x) ((x) << S_TP_IN_CSPI_CHECK_IP_CSUM)
181#define F_TP_IN_CSPI_CHECK_IP_CSUM V_TP_IN_CSPI_CHECK_IP_CSUM(1U)
182
183#define S_TP_IN_CSPI_CHECK_TCP_CSUM 6
184#define V_TP_IN_CSPI_CHECK_TCP_CSUM(x) ((x) << S_TP_IN_CSPI_CHECK_TCP_CSUM)
185#define F_TP_IN_CSPI_CHECK_TCP_CSUM V_TP_IN_CSPI_CHECK_TCP_CSUM(1U)
186
187#define S_TP_IN_ESPI_ETHERNET 8
188#define V_TP_IN_ESPI_ETHERNET(x) ((x) << S_TP_IN_ESPI_ETHERNET)
189#define F_TP_IN_ESPI_ETHERNET V_TP_IN_ESPI_ETHERNET(1U)
190
191#define S_TP_IN_ESPI_CHECK_IP_CSUM 12
192#define V_TP_IN_ESPI_CHECK_IP_CSUM(x) ((x) << S_TP_IN_ESPI_CHECK_IP_CSUM)
193#define F_TP_IN_ESPI_CHECK_IP_CSUM V_TP_IN_ESPI_CHECK_IP_CSUM(1U)
194
195#define S_TP_IN_ESPI_CHECK_TCP_CSUM 13
196#define V_TP_IN_ESPI_CHECK_TCP_CSUM(x) ((x) << S_TP_IN_ESPI_CHECK_TCP_CSUM)
197#define F_TP_IN_ESPI_CHECK_TCP_CSUM V_TP_IN_ESPI_CHECK_TCP_CSUM(1U)
198
199#define S_OFFLOAD_DISABLE 14
200#define V_OFFLOAD_DISABLE(x) ((x) << S_OFFLOAD_DISABLE)
201#define F_OFFLOAD_DISABLE V_OFFLOAD_DISABLE(1U)
202
203#define A_TP_OUT_CONFIG 0x304
204
205#define S_TP_OUT_CSPI_CPL 2
206#define V_TP_OUT_CSPI_CPL(x) ((x) << S_TP_OUT_CSPI_CPL)
207#define F_TP_OUT_CSPI_CPL V_TP_OUT_CSPI_CPL(1U)
208
209#define S_TP_OUT_ESPI_ETHERNET 6
210#define V_TP_OUT_ESPI_ETHERNET(x) ((x) << S_TP_OUT_ESPI_ETHERNET)
211#define F_TP_OUT_ESPI_ETHERNET V_TP_OUT_ESPI_ETHERNET(1U)
212
213#define S_TP_OUT_ESPI_GENERATE_IP_CSUM 10
214#define V_TP_OUT_ESPI_GENERATE_IP_CSUM(x) ((x) << S_TP_OUT_ESPI_GENERATE_IP_CSUM)
215#define F_TP_OUT_ESPI_GENERATE_IP_CSUM V_TP_OUT_ESPI_GENERATE_IP_CSUM(1U)
216
217#define S_TP_OUT_ESPI_GENERATE_TCP_CSUM 11
218#define V_TP_OUT_ESPI_GENERATE_TCP_CSUM(x) ((x) << S_TP_OUT_ESPI_GENERATE_TCP_CSUM)
219#define F_TP_OUT_ESPI_GENERATE_TCP_CSUM V_TP_OUT_ESPI_GENERATE_TCP_CSUM(1U)
220
221#define A_TP_GLOBAL_CONFIG 0x308
222
223#define S_IP_TTL 0
224#define M_IP_TTL 0xff
225#define V_IP_TTL(x) ((x) << S_IP_TTL)
226
227#define S_TCP_CSUM 11
228#define V_TCP_CSUM(x) ((x) << S_TCP_CSUM)
229#define F_TCP_CSUM V_TCP_CSUM(1U)
230
231#define S_UDP_CSUM 12
232#define V_UDP_CSUM(x) ((x) << S_UDP_CSUM)
233#define F_UDP_CSUM V_UDP_CSUM(1U)
234
235#define S_IP_CSUM 13
236#define V_IP_CSUM(x) ((x) << S_IP_CSUM)
237#define F_IP_CSUM V_IP_CSUM(1U)
238
239#define S_PATH_MTU 15
240#define V_PATH_MTU(x) ((x) << S_PATH_MTU)
241#define F_PATH_MTU V_PATH_MTU(1U)
242
243#define S_5TUPLE_LOOKUP 17
244#define V_5TUPLE_LOOKUP(x) ((x) << S_5TUPLE_LOOKUP)
245
246#define S_SYN_COOKIE_PARAMETER 26
247#define V_SYN_COOKIE_PARAMETER(x) ((x) << S_SYN_COOKIE_PARAMETER)
248
249#define A_TP_PC_CONFIG 0x348
250#define S_TP_PC_REV 30
251#define M_TP_PC_REV 0x3
252#define G_TP_PC_REV(x) (((x) >> S_TP_PC_REV) & M_TP_PC_REV)
253#define A_TP_RESET 0x44c
254#define S_TP_RESET 0
255#define V_TP_RESET(x) ((x) << S_TP_RESET)
256#define F_TP_RESET V_TP_RESET(1U)
257
258#define A_TP_INT_ENABLE 0x470
259#define A_TP_INT_CAUSE 0x474
260#define A_TP_TX_DROP_CONFIG 0x4b8
261
262#define S_ENABLE_TX_DROP 31
263#define V_ENABLE_TX_DROP(x) ((x) << S_ENABLE_TX_DROP)
264#define F_ENABLE_TX_DROP V_ENABLE_TX_DROP(1U)
265
266#define S_ENABLE_TX_ERROR 30
267#define V_ENABLE_TX_ERROR(x) ((x) << S_ENABLE_TX_ERROR)
268#define F_ENABLE_TX_ERROR V_ENABLE_TX_ERROR(1U)
269
270#define S_DROP_TICKS_CNT 4
271#define V_DROP_TICKS_CNT(x) ((x) << S_DROP_TICKS_CNT)
272
273#define S_NUM_PKTS_DROPPED 0
274#define V_NUM_PKTS_DROPPED(x) ((x) << S_NUM_PKTS_DROPPED)
275
276/* CSPI registers */
277
278#define S_DIP4ERR 0
279#define V_DIP4ERR(x) ((x) << S_DIP4ERR)
280#define F_DIP4ERR V_DIP4ERR(1U)
281
282#define S_RXDROP 1
283#define V_RXDROP(x) ((x) << S_RXDROP)
284#define F_RXDROP V_RXDROP(1U)
285
286#define S_TXDROP 2
287#define V_TXDROP(x) ((x) << S_TXDROP)
288#define F_TXDROP V_TXDROP(1U)
289
290#define S_RXOVERFLOW 3
291#define V_RXOVERFLOW(x) ((x) << S_RXOVERFLOW)
292#define F_RXOVERFLOW V_RXOVERFLOW(1U)
293
294#define S_RAMPARITYERR 4
295#define V_RAMPARITYERR(x) ((x) << S_RAMPARITYERR)
296#define F_RAMPARITYERR V_RAMPARITYERR(1U)
297
298/* ESPI registers */
299
300#define A_ESPI_SCH_TOKEN0 0x880
301#define A_ESPI_SCH_TOKEN1 0x884
302#define A_ESPI_SCH_TOKEN2 0x888
303#define A_ESPI_SCH_TOKEN3 0x88c
304#define A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK 0x890
305#define A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK 0x894
306#define A_ESPI_CALENDAR_LENGTH 0x898
307#define A_PORT_CONFIG 0x89c
308
309#define S_RX_NPORTS 0
310#define V_RX_NPORTS(x) ((x) << S_RX_NPORTS)
311
312#define S_TX_NPORTS 8
313#define V_TX_NPORTS(x) ((x) << S_TX_NPORTS)
314
315#define A_ESPI_FIFO_STATUS_ENABLE 0x8a0
316
317#define S_RXSTATUSENABLE 0
318#define V_RXSTATUSENABLE(x) ((x) << S_RXSTATUSENABLE)
319#define F_RXSTATUSENABLE V_RXSTATUSENABLE(1U)
320
321#define S_INTEL1010MODE 4
322#define V_INTEL1010MODE(x) ((x) << S_INTEL1010MODE)
323#define F_INTEL1010MODE V_INTEL1010MODE(1U)
324
325#define A_ESPI_MAXBURST1_MAXBURST2 0x8a8
326#define A_ESPI_TRAIN 0x8ac
327#define A_ESPI_INTR_STATUS 0x8c8
328
329#define S_DIP2PARITYERR 5
330#define V_DIP2PARITYERR(x) ((x) << S_DIP2PARITYERR)
331#define F_DIP2PARITYERR V_DIP2PARITYERR(1U)
332
333#define A_ESPI_INTR_ENABLE 0x8cc
334#define A_RX_DROP_THRESHOLD 0x8d0
335#define A_ESPI_RX_RESET 0x8ec
336#define A_ESPI_MISC_CONTROL 0x8f0
337
338#define S_OUT_OF_SYNC_COUNT 0
339#define V_OUT_OF_SYNC_COUNT(x) ((x) << S_OUT_OF_SYNC_COUNT)
340
341#define S_DIP2_PARITY_ERR_THRES 5
342#define V_DIP2_PARITY_ERR_THRES(x) ((x) << S_DIP2_PARITY_ERR_THRES)
343
344#define S_DIP4_THRES 9
345#define V_DIP4_THRES(x) ((x) << S_DIP4_THRES)
346
347#define S_MONITORED_PORT_NUM 25
348#define V_MONITORED_PORT_NUM(x) ((x) << S_MONITORED_PORT_NUM)
349
350#define S_MONITORED_DIRECTION 27
351#define V_MONITORED_DIRECTION(x) ((x) << S_MONITORED_DIRECTION)
352#define F_MONITORED_DIRECTION V_MONITORED_DIRECTION(1U)
353
354#define S_MONITORED_INTERFACE 28
355#define V_MONITORED_INTERFACE(x) ((x) << S_MONITORED_INTERFACE)
356#define F_MONITORED_INTERFACE V_MONITORED_INTERFACE(1U)
357
358#define A_ESPI_DIP2_ERR_COUNT 0x8f4
359#define A_ESPI_CMD_ADDR 0x8f8
360
361#define S_WRITE_DATA 0
362#define V_WRITE_DATA(x) ((x) << S_WRITE_DATA)
363
364#define S_REGISTER_OFFSET 8
365#define V_REGISTER_OFFSET(x) ((x) << S_REGISTER_OFFSET)
366
367#define S_CHANNEL_ADDR 12
368#define V_CHANNEL_ADDR(x) ((x) << S_CHANNEL_ADDR)
369
370#define S_MODULE_ADDR 16
371#define V_MODULE_ADDR(x) ((x) << S_MODULE_ADDR)
372
373#define S_BUNDLE_ADDR 20
374#define V_BUNDLE_ADDR(x) ((x) << S_BUNDLE_ADDR)
375
376#define S_SPI4_COMMAND 24
377#define V_SPI4_COMMAND(x) ((x) << S_SPI4_COMMAND)
378
379#define A_ESPI_GOSTAT 0x8fc
380#define S_ESPI_CMD_BUSY 8
381#define V_ESPI_CMD_BUSY(x) ((x) << S_ESPI_CMD_BUSY)
382#define F_ESPI_CMD_BUSY V_ESPI_CMD_BUSY(1U)
383
384/* PL registers */
385
386#define A_PL_ENABLE 0xa00
387
388#define S_PL_INTR_SGE_ERR 0
389#define V_PL_INTR_SGE_ERR(x) ((x) << S_PL_INTR_SGE_ERR)
390#define F_PL_INTR_SGE_ERR V_PL_INTR_SGE_ERR(1U)
391
392#define S_PL_INTR_SGE_DATA 1
393#define V_PL_INTR_SGE_DATA(x) ((x) << S_PL_INTR_SGE_DATA)
394#define F_PL_INTR_SGE_DATA V_PL_INTR_SGE_DATA(1U)
395
396#define S_PL_INTR_TP 6
397#define V_PL_INTR_TP(x) ((x) << S_PL_INTR_TP)
398#define F_PL_INTR_TP V_PL_INTR_TP(1U)
399
400#define S_PL_INTR_ESPI 8
401#define V_PL_INTR_ESPI(x) ((x) << S_PL_INTR_ESPI)
402#define F_PL_INTR_ESPI V_PL_INTR_ESPI(1U)
403
404#define S_PL_INTR_PCIX 10
405#define V_PL_INTR_PCIX(x) ((x) << S_PL_INTR_PCIX)
406#define F_PL_INTR_PCIX V_PL_INTR_PCIX(1U)
407
408#define S_PL_INTR_EXT 11
409#define V_PL_INTR_EXT(x) ((x) << S_PL_INTR_EXT)
410#define F_PL_INTR_EXT V_PL_INTR_EXT(1U)
411
412#define A_PL_CAUSE 0xa04
413
414/* MC5 registers */
415
416#define A_MC5_CONFIG 0xc04
417
418#define S_TCAM_RESET 1
419#define V_TCAM_RESET(x) ((x) << S_TCAM_RESET)
420#define F_TCAM_RESET V_TCAM_RESET(1U)
421
422#define S_M_BUS_ENABLE 5
423#define V_M_BUS_ENABLE(x) ((x) << S_M_BUS_ENABLE)
424#define F_M_BUS_ENABLE V_M_BUS_ENABLE(1U)
425
426/* PCICFG registers */
427
428#define A_PCICFG_PM_CSR 0x44
429#define A_PCICFG_VPD_ADDR 0x4a
430
431#define S_VPD_OP_FLAG 15
432#define V_VPD_OP_FLAG(x) ((x) << S_VPD_OP_FLAG)
433#define F_VPD_OP_FLAG V_VPD_OP_FLAG(1U)
434
435#define A_PCICFG_VPD_DATA 0x4c
436
437#define A_PCICFG_INTR_ENABLE 0xf4
438#define A_PCICFG_INTR_CAUSE 0xf8
439
440#define A_PCICFG_MODE 0xfc
441
442#define S_PCI_MODE_64BIT 0
443#define V_PCI_MODE_64BIT(x) ((x) << S_PCI_MODE_64BIT)
444#define F_PCI_MODE_64BIT V_PCI_MODE_64BIT(1U)
445
446#define S_PCI_MODE_PCIX 5
447#define V_PCI_MODE_PCIX(x) ((x) << S_PCI_MODE_PCIX)
448#define F_PCI_MODE_PCIX V_PCI_MODE_PCIX(1U)
449
450#define S_PCI_MODE_CLK 6
451#define M_PCI_MODE_CLK 0x3
452#define G_PCI_MODE_CLK(x) (((x) >> S_PCI_MODE_CLK) & M_PCI_MODE_CLK)
453
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
new file mode 100644
index 000000000000..bcf8b1e939b0
--- /dev/null
+++ b/drivers/net/chelsio/sge.c
@@ -0,0 +1,1451 @@
1/*****************************************************************************
2 * *
3 * File: sge.c *
4 * $Revision: 1.13 $ *
5 * $Date: 2005/03/23 07:41:27 $ *
6 * Description: *
7 * DMA engine. *
8 * part of the Chelsio 10Gb Ethernet Driver. *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
13 * *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, write to the Free Software Foundation, Inc., *
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
17 * *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
21 * *
22 * http://www.chelsio.com *
23 * *
24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
25 * All rights reserved. *
26 * *
27 * Maintainers: maintainers@chelsio.com *
28 * *
29 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
30 * Tina Yang <tainay@chelsio.com> *
31 * Felix Marti <felix@chelsio.com> *
32 * Scott Bardone <sbardone@chelsio.com> *
33 * Kurt Ottaway <kottaway@chelsio.com> *
34 * Frank DiMambro <frank@chelsio.com> *
35 * *
36 * History: *
37 * *
38 ****************************************************************************/
39
40#include "common.h"
41
42#include <linux/config.h>
43#include <linux/types.h>
44#include <linux/errno.h>
45#include <linux/pci.h>
46#include <linux/netdevice.h>
47#include <linux/etherdevice.h>
48#include <linux/if_vlan.h>
49#include <linux/skbuff.h>
50#include <linux/init.h>
51#include <linux/mm.h>
52#include <linux/ip.h>
53#include <linux/in.h>
54#include <linux/if_arp.h>
55
56#include "cpl5_cmd.h"
57#include "sge.h"
58#include "regs.h"
59#include "espi.h"
60
61#include <linux/tcp.h>
62
63#define SGE_CMDQ_N 2
64#define SGE_FREELQ_N 2
65#define SGE_CMDQ0_E_N 512
66#define SGE_CMDQ1_E_N 128
67#define SGE_FREEL_SIZE 4096
68#define SGE_JUMBO_FREEL_SIZE 512
69#define SGE_FREEL_REFILL_THRESH 16
70#define SGE_RESPQ_E_N 1024
71#define SGE_INTR_BUCKETSIZE 100
72#define SGE_INTR_LATBUCKETS 5
73#define SGE_INTR_MAXBUCKETS 11
74#define SGE_INTRTIMER0 1
75#define SGE_INTRTIMER1 50
76#define SGE_INTRTIMER_NRES 10000
77#define SGE_RX_COPY_THRESHOLD 256
78#define SGE_RX_SM_BUF_SIZE 1536
79
80#define SGE_RESPQ_REPLENISH_THRES ((3 * SGE_RESPQ_E_N) / 4)
81
82#define SGE_RX_OFFSET 2
83#ifndef NET_IP_ALIGN
84# define NET_IP_ALIGN SGE_RX_OFFSET
85#endif
86
87/*
88 * Memory Mapped HW Command, Freelist and Response Queue Descriptors
89 */
90#if defined(__BIG_ENDIAN_BITFIELD)
91struct cmdQ_e {
92 u32 AddrLow;
93 u32 GenerationBit : 1;
94 u32 BufferLength : 31;
95 u32 RespQueueSelector : 4;
96 u32 ResponseTokens : 12;
97 u32 CmdId : 8;
98 u32 Reserved : 3;
99 u32 TokenValid : 1;
100 u32 Eop : 1;
101 u32 Sop : 1;
102 u32 DataValid : 1;
103 u32 GenerationBit2 : 1;
104 u32 AddrHigh;
105};
106
107struct freelQ_e {
108 u32 AddrLow;
109 u32 GenerationBit : 1;
110 u32 BufferLength : 31;
111 u32 Reserved : 31;
112 u32 GenerationBit2 : 1;
113 u32 AddrHigh;
114};
115
116struct respQ_e {
117 u32 Qsleeping : 4;
118 u32 Cmdq1CreditReturn : 5;
119 u32 Cmdq1DmaComplete : 5;
120 u32 Cmdq0CreditReturn : 5;
121 u32 Cmdq0DmaComplete : 5;
122 u32 FreelistQid : 2;
123 u32 CreditValid : 1;
124 u32 DataValid : 1;
125 u32 Offload : 1;
126 u32 Eop : 1;
127 u32 Sop : 1;
128 u32 GenerationBit : 1;
129 u32 BufferLength;
130};
131
132#elif defined(__LITTLE_ENDIAN_BITFIELD)
133struct cmdQ_e {
134 u32 BufferLength : 31;
135 u32 GenerationBit : 1;
136 u32 AddrLow;
137 u32 AddrHigh;
138 u32 GenerationBit2 : 1;
139 u32 DataValid : 1;
140 u32 Sop : 1;
141 u32 Eop : 1;
142 u32 TokenValid : 1;
143 u32 Reserved : 3;
144 u32 CmdId : 8;
145 u32 ResponseTokens : 12;
146 u32 RespQueueSelector : 4;
147};
148
149struct freelQ_e {
150 u32 BufferLength : 31;
151 u32 GenerationBit : 1;
152 u32 AddrLow;
153 u32 AddrHigh;
154 u32 GenerationBit2 : 1;
155 u32 Reserved : 31;
156};
157
158struct respQ_e {
159 u32 BufferLength;
160 u32 GenerationBit : 1;
161 u32 Sop : 1;
162 u32 Eop : 1;
163 u32 Offload : 1;
164 u32 DataValid : 1;
165 u32 CreditValid : 1;
166 u32 FreelistQid : 2;
167 u32 Cmdq0DmaComplete : 5;
168 u32 Cmdq0CreditReturn : 5;
169 u32 Cmdq1DmaComplete : 5;
170 u32 Cmdq1CreditReturn : 5;
171 u32 Qsleeping : 4;
172} ;
173#endif
174
175/*
176 * SW Context Command and Freelist Queue Descriptors
177 */
178struct cmdQ_ce {
179 struct sk_buff *skb;
180 DECLARE_PCI_UNMAP_ADDR(dma_addr);
181 DECLARE_PCI_UNMAP_LEN(dma_len);
182 unsigned int single;
183};
184
185struct freelQ_ce {
186 struct sk_buff *skb;
187 DECLARE_PCI_UNMAP_ADDR(dma_addr);
188 DECLARE_PCI_UNMAP_LEN(dma_len);
189};
190
191/*
192 * SW Command, Freelist and Response Queue
193 */
194struct cmdQ {
195 atomic_t asleep; /* HW DMA Fetch status */
196 atomic_t credits; /* # available descriptors for TX */
197 atomic_t pio_pidx; /* Variable updated on Doorbell */
198 u16 entries_n; /* # descriptors for TX */
199 u16 pidx; /* producer index (SW) */
200 u16 cidx; /* consumer index (HW) */
201 u8 genbit; /* current generation (=valid) bit */
202 struct cmdQ_e *entries; /* HW command descriptor Q */
203 struct cmdQ_ce *centries; /* SW command context descriptor Q */
204 spinlock_t Qlock; /* Lock to protect cmdQ enqueuing */
205 dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */
206};
207
208struct freelQ {
209 unsigned int credits; /* # of available RX buffers */
210 unsigned int entries_n; /* free list capacity */
211 u16 pidx; /* producer index (SW) */
212 u16 cidx; /* consumer index (HW) */
213 u16 rx_buffer_size; /* Buffer size on this free list */
214 u16 dma_offset; /* DMA offset to align IP headers */
215 u8 genbit; /* current generation (=valid) bit */
216 struct freelQ_e *entries; /* HW freelist descriptor Q */
217 struct freelQ_ce *centries; /* SW freelist conext descriptor Q */
218 dma_addr_t dma_addr; /* DMA addr HW freelist descriptor Q */
219};
220
221struct respQ {
222 u16 credits; /* # of available respQ descriptors */
223 u16 credits_pend; /* # of not yet returned descriptors */
224 u16 entries_n; /* # of response Q descriptors */
225 u16 pidx; /* producer index (HW) */
226 u16 cidx; /* consumer index (SW) */
227 u8 genbit; /* current generation(=valid) bit */
228 struct respQ_e *entries; /* HW response descriptor Q */
229 dma_addr_t dma_addr; /* DMA addr HW response descriptor Q */
230};
231
232/*
233 * Main SGE data structure
234 *
235 * Interrupts are handled by a single CPU and it is likely that on a MP system
236 * the application is migrated to another CPU. In that scenario, we try to
237 * seperate the RX(in irq context) and TX state in order to decrease memory
238 * contention.
239 */
240struct sge {
241 struct adapter *adapter; /* adapter backpointer */
242 struct freelQ freelQ[SGE_FREELQ_N]; /* freelist Q(s) */
243 struct respQ respQ; /* response Q instatiation */
244 unsigned int rx_pkt_pad; /* RX padding for L2 packets */
245 unsigned int jumbo_fl; /* jumbo freelist Q index */
246 u32 intrtimer[SGE_INTR_MAXBUCKETS]; /* ! */
247 u32 currIndex; /* current index into intrtimer[] */
248 u32 intrtimer_nres; /* no resource interrupt timer value */
249 u32 sge_control; /* shadow content of sge control reg */
250 struct sge_intr_counts intr_cnt;
251 struct timer_list ptimer;
252 struct sk_buff *pskb;
253 u32 ptimeout;
254 struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned; /* command Q(s)*/
255};
256
257static unsigned int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
258 unsigned int qid);
259
260/*
261 * PIO to indicate that memory mapped Q contains valid descriptor(s).
262 */
263static inline void doorbell_pio(struct sge *sge, u32 val)
264{
265 wmb();
266 t1_write_reg_4(sge->adapter, A_SG_DOORBELL, val);
267}
268
269/*
270 * Disables the DMA engine.
271 */
272void t1_sge_stop(struct sge *sge)
273{
274 t1_write_reg_4(sge->adapter, A_SG_CONTROL, 0);
275 t1_read_reg_4(sge->adapter, A_SG_CONTROL); /* flush write */
276 if (is_T2(sge->adapter))
277 del_timer_sync(&sge->ptimer);
278}
279
280static u8 ch_mac_addr[ETH_ALEN] = {0x0, 0x7, 0x43, 0x0, 0x0, 0x0};
281static void t1_espi_workaround(void *data)
282{
283 struct adapter *adapter = (struct adapter *)data;
284 struct sge *sge = adapter->sge;
285
286 if (netif_running(adapter->port[0].dev) &&
287 atomic_read(&sge->cmdQ[0].asleep)) {
288
289 u32 seop = t1_espi_get_mon(adapter, 0x930, 0);
290
291 if ((seop & 0xfff0fff) == 0xfff && sge->pskb) {
292 struct sk_buff *skb = sge->pskb;
293 if (!skb->cb[0]) {
294 memcpy(skb->data+sizeof(struct cpl_tx_pkt), ch_mac_addr, ETH_ALEN);
295 memcpy(skb->data+skb->len-10, ch_mac_addr, ETH_ALEN);
296
297 skb->cb[0] = 0xff;
298 }
299 t1_sge_tx(skb, adapter,0);
300 }
301 }
302 mod_timer(&adapter->sge->ptimer, jiffies + sge->ptimeout);
303}
304
305/*
306 * Enables the DMA engine.
307 */
308void t1_sge_start(struct sge *sge)
309{
310 t1_write_reg_4(sge->adapter, A_SG_CONTROL, sge->sge_control);
311 t1_read_reg_4(sge->adapter, A_SG_CONTROL); /* flush write */
312 if (is_T2(sge->adapter)) {
313 init_timer(&sge->ptimer);
314 sge->ptimer.function = (void *)&t1_espi_workaround;
315 sge->ptimer.data = (unsigned long)sge->adapter;
316 sge->ptimer.expires = jiffies + sge->ptimeout;
317 add_timer(&sge->ptimer);
318 }
319}
320
321/*
322 * Creates a t1_sge structure and returns suggested resource parameters.
323 */
324struct sge * __devinit t1_sge_create(struct adapter *adapter,
325 struct sge_params *p)
326{
327 struct sge *sge = kmalloc(sizeof(*sge), GFP_KERNEL);
328
329 if (!sge)
330 return NULL;
331 memset(sge, 0, sizeof(*sge));
332
333 if (is_T2(adapter))
334 sge->ptimeout = 1; /* finest allowed */
335
336 sge->adapter = adapter;
337 sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : SGE_RX_OFFSET;
338 sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
339
340 p->cmdQ_size[0] = SGE_CMDQ0_E_N;
341 p->cmdQ_size[1] = SGE_CMDQ1_E_N;
342 p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE;
343 p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE;
344 p->rx_coalesce_usecs = SGE_INTRTIMER1;
345 p->last_rx_coalesce_raw = SGE_INTRTIMER1 *
346 (board_info(sge->adapter)->clock_core / 1000000);
347 p->default_rx_coalesce_usecs = SGE_INTRTIMER1;
348 p->coalesce_enable = 0; /* Turn off adaptive algorithm by default */
349 p->sample_interval_usecs = 0;
350 return sge;
351}
352
353/*
354 * Frees all RX buffers on the freelist Q. The caller must make sure that
355 * the SGE is turned off before calling this function.
356 */
357static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *Q)
358{
359 unsigned int cidx = Q->cidx, credits = Q->credits;
360
361 while (credits--) {
362 struct freelQ_ce *ce = &Q->centries[cidx];
363
364 pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
365 pci_unmap_len(ce, dma_len),
366 PCI_DMA_FROMDEVICE);
367 dev_kfree_skb(ce->skb);
368 ce->skb = NULL;
369 if (++cidx == Q->entries_n)
370 cidx = 0;
371 }
372}
373
374/*
375 * Free RX free list and response queue resources.
376 */
377static void free_rx_resources(struct sge *sge)
378{
379 struct pci_dev *pdev = sge->adapter->pdev;
380 unsigned int size, i;
381
382 if (sge->respQ.entries) {
383 size = sizeof(struct respQ_e) * sge->respQ.entries_n;
384 pci_free_consistent(pdev, size, sge->respQ.entries,
385 sge->respQ.dma_addr);
386 }
387
388 for (i = 0; i < SGE_FREELQ_N; i++) {
389 struct freelQ *Q = &sge->freelQ[i];
390
391 if (Q->centries) {
392 free_freelQ_buffers(pdev, Q);
393 kfree(Q->centries);
394 }
395 if (Q->entries) {
396 size = sizeof(struct freelQ_e) * Q->entries_n;
397 pci_free_consistent(pdev, size, Q->entries,
398 Q->dma_addr);
399 }
400 }
401}
402
403/*
404 * Allocates basic RX resources, consisting of memory mapped freelist Qs and a
405 * response Q.
406 */
407static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
408{
409 struct pci_dev *pdev = sge->adapter->pdev;
410 unsigned int size, i;
411
412 for (i = 0; i < SGE_FREELQ_N; i++) {
413 struct freelQ *Q = &sge->freelQ[i];
414
415 Q->genbit = 1;
416 Q->entries_n = p->freelQ_size[i];
417 Q->dma_offset = SGE_RX_OFFSET - sge->rx_pkt_pad;
418 size = sizeof(struct freelQ_e) * Q->entries_n;
419 Q->entries = (struct freelQ_e *)
420 pci_alloc_consistent(pdev, size, &Q->dma_addr);
421 if (!Q->entries)
422 goto err_no_mem;
423 memset(Q->entries, 0, size);
424 Q->centries = kcalloc(Q->entries_n, sizeof(struct freelQ_ce),
425 GFP_KERNEL);
426 if (!Q->centries)
427 goto err_no_mem;
428 }
429
430 /*
431 * Calculate the buffer sizes for the two free lists. FL0 accommodates
432 * regular sized Ethernet frames, FL1 is sized not to exceed 16K,
433 * including all the sk_buff overhead.
434 *
435 * Note: For T2 FL0 and FL1 are reversed.
436 */
437 sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE +
438 sizeof(struct cpl_rx_data) +
439 sge->freelQ[!sge->jumbo_fl].dma_offset;
440 sge->freelQ[sge->jumbo_fl].rx_buffer_size = (16 * 1024) -
441 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
442
443 sge->respQ.genbit = 1;
444 sge->respQ.entries_n = SGE_RESPQ_E_N;
445 sge->respQ.credits = SGE_RESPQ_E_N;
446 size = sizeof(struct respQ_e) * sge->respQ.entries_n;
447 sge->respQ.entries = (struct respQ_e *)
448 pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr);
449 if (!sge->respQ.entries)
450 goto err_no_mem;
451 memset(sge->respQ.entries, 0, size);
452 return 0;
453
454err_no_mem:
455 free_rx_resources(sge);
456 return -ENOMEM;
457}
458
459/*
460 * Frees 'credits_pend' TX buffers and returns the credits to Q->credits.
461 *
462 * The adaptive algorithm receives the total size of the buffers freed
463 * accumulated in @*totpayload. No initialization of this argument here.
464 *
465 */
466static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *Q,
467 unsigned int credits_pend, unsigned int *totpayload)
468{
469 struct pci_dev *pdev = sge->adapter->pdev;
470 struct sk_buff *skb;
471 struct cmdQ_ce *ce, *cq = Q->centries;
472 unsigned int entries_n = Q->entries_n, cidx = Q->cidx,
473 i = credits_pend;
474
475
476 ce = &cq[cidx];
477 while (i--) {
478 if (ce->single)
479 pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
480 pci_unmap_len(ce, dma_len),
481 PCI_DMA_TODEVICE);
482 else
483 pci_unmap_page(pdev, pci_unmap_addr(ce, dma_addr),
484 pci_unmap_len(ce, dma_len),
485 PCI_DMA_TODEVICE);
486 if (totpayload)
487 *totpayload += pci_unmap_len(ce, dma_len);
488
489 skb = ce->skb;
490 if (skb)
491 dev_kfree_skb_irq(skb);
492
493 ce++;
494 if (++cidx == entries_n) {
495 cidx = 0;
496 ce = cq;
497 }
498 }
499
500 Q->cidx = cidx;
501 atomic_add(credits_pend, &Q->credits);
502}
503
504/*
505 * Free TX resources.
506 *
507 * Assumes that SGE is stopped and all interrupts are disabled.
508 */
509static void free_tx_resources(struct sge *sge)
510{
511 struct pci_dev *pdev = sge->adapter->pdev;
512 unsigned int size, i;
513
514 for (i = 0; i < SGE_CMDQ_N; i++) {
515 struct cmdQ *Q = &sge->cmdQ[i];
516
517 if (Q->centries) {
518 unsigned int pending = Q->entries_n -
519 atomic_read(&Q->credits);
520
521 if (pending)
522 free_cmdQ_buffers(sge, Q, pending, NULL);
523 kfree(Q->centries);
524 }
525 if (Q->entries) {
526 size = sizeof(struct cmdQ_e) * Q->entries_n;
527 pci_free_consistent(pdev, size, Q->entries,
528 Q->dma_addr);
529 }
530 }
531}
532
533/*
534 * Allocates basic TX resources, consisting of memory mapped command Qs.
535 */
536static int alloc_tx_resources(struct sge *sge, struct sge_params *p)
537{
538 struct pci_dev *pdev = sge->adapter->pdev;
539 unsigned int size, i;
540
541 for (i = 0; i < SGE_CMDQ_N; i++) {
542 struct cmdQ *Q = &sge->cmdQ[i];
543
544 Q->genbit = 1;
545 Q->entries_n = p->cmdQ_size[i];
546 atomic_set(&Q->credits, Q->entries_n);
547 atomic_set(&Q->asleep, 1);
548 spin_lock_init(&Q->Qlock);
549 size = sizeof(struct cmdQ_e) * Q->entries_n;
550 Q->entries = (struct cmdQ_e *)
551 pci_alloc_consistent(pdev, size, &Q->dma_addr);
552 if (!Q->entries)
553 goto err_no_mem;
554 memset(Q->entries, 0, size);
555 Q->centries = kcalloc(Q->entries_n, sizeof(struct cmdQ_ce),
556 GFP_KERNEL);
557 if (!Q->centries)
558 goto err_no_mem;
559 }
560
561 return 0;
562
563err_no_mem:
564 free_tx_resources(sge);
565 return -ENOMEM;
566}
567
568static inline void setup_ring_params(struct adapter *adapter, u64 addr,
569 u32 size, int base_reg_lo,
570 int base_reg_hi, int size_reg)
571{
572 t1_write_reg_4(adapter, base_reg_lo, (u32)addr);
573 t1_write_reg_4(adapter, base_reg_hi, addr >> 32);
574 t1_write_reg_4(adapter, size_reg, size);
575}
576
577/*
578 * Enable/disable VLAN acceleration.
579 */
580void t1_set_vlan_accel(struct adapter *adapter, int on_off)
581{
582 struct sge *sge = adapter->sge;
583
584 sge->sge_control &= ~F_VLAN_XTRACT;
585 if (on_off)
586 sge->sge_control |= F_VLAN_XTRACT;
587 if (adapter->open_device_map) {
588 t1_write_reg_4(adapter, A_SG_CONTROL, sge->sge_control);
589 t1_read_reg_4(adapter, A_SG_CONTROL); /* flush */
590 }
591}
592
593/*
594 * Sets the interrupt latency timer when the adaptive Rx coalescing
595 * is turned off. Do nothing when it is turned on again.
596 *
597 * This routine relies on the fact that the caller has already set
598 * the adaptive policy in adapter->sge_params before calling it.
599*/
600int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p)
601{
602 if (!p->coalesce_enable) {
603 u32 newTimer = p->rx_coalesce_usecs *
604 (board_info(sge->adapter)->clock_core / 1000000);
605
606 t1_write_reg_4(sge->adapter, A_SG_INTRTIMER, newTimer);
607 }
608 return 0;
609}
610
611/*
612 * Programs the various SGE registers. However, the engine is not yet enabled,
613 * but sge->sge_control is setup and ready to go.
614 */
615static void configure_sge(struct sge *sge, struct sge_params *p)
616{
617 struct adapter *ap = sge->adapter;
618 int i;
619
620 t1_write_reg_4(ap, A_SG_CONTROL, 0);
621 setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].entries_n,
622 A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE);
623 setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].entries_n,
624 A_SG_CMD1BASELWR, A_SG_CMD1BASEUPR, A_SG_CMD1SIZE);
625 setup_ring_params(ap, sge->freelQ[0].dma_addr,
626 sge->freelQ[0].entries_n, A_SG_FL0BASELWR,
627 A_SG_FL0BASEUPR, A_SG_FL0SIZE);
628 setup_ring_params(ap, sge->freelQ[1].dma_addr,
629 sge->freelQ[1].entries_n, A_SG_FL1BASELWR,
630 A_SG_FL1BASEUPR, A_SG_FL1SIZE);
631
632 /* The threshold comparison uses <. */
633 t1_write_reg_4(ap, A_SG_FLTHRESHOLD, SGE_RX_SM_BUF_SIZE + 1);
634
635 setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.entries_n,
636 A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE);
637 t1_write_reg_4(ap, A_SG_RSPQUEUECREDIT, (u32)sge->respQ.entries_n);
638
639 sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE |
640 F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE |
641 V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE |
642 V_RX_PKT_OFFSET(sge->rx_pkt_pad);
643
644#if defined(__BIG_ENDIAN_BITFIELD)
645 sge->sge_control |= F_ENABLE_BIG_ENDIAN;
646#endif
647
648 /*
649 * Initialize the SGE Interrupt Timer arrray:
650 * intrtimer[0] = (SGE_INTRTIMER0) usec
651 * intrtimer[0<i<5] = (SGE_INTRTIMER0 + i*2) usec
652 * intrtimer[4<i<10] = ((i - 3) * 6) usec
653 * intrtimer[10] = (SGE_INTRTIMER1) usec
654 *
655 */
656 sge->intrtimer[0] = board_info(sge->adapter)->clock_core / 1000000;
657 for (i = 1; i < SGE_INTR_LATBUCKETS; ++i) {
658 sge->intrtimer[i] = SGE_INTRTIMER0 + (2 * i);
659 sge->intrtimer[i] *= sge->intrtimer[0];
660 }
661 for (i = SGE_INTR_LATBUCKETS; i < SGE_INTR_MAXBUCKETS - 1; ++i) {
662 sge->intrtimer[i] = (i - 3) * 6;
663 sge->intrtimer[i] *= sge->intrtimer[0];
664 }
665 sge->intrtimer[SGE_INTR_MAXBUCKETS - 1] =
666 sge->intrtimer[0] * SGE_INTRTIMER1;
667 /* Initialize resource timer */
668 sge->intrtimer_nres = sge->intrtimer[0] * SGE_INTRTIMER_NRES;
669 /* Finally finish initialization of intrtimer[0] */
670 sge->intrtimer[0] *= SGE_INTRTIMER0;
671 /* Initialize for a throughput oriented workload */
672 sge->currIndex = SGE_INTR_MAXBUCKETS - 1;
673
674 if (p->coalesce_enable)
675 t1_write_reg_4(ap, A_SG_INTRTIMER,
676 sge->intrtimer[sge->currIndex]);
677 else
678 t1_sge_set_coalesce_params(sge, p);
679}
680
681/*
682 * Return the payload capacity of the jumbo free-list buffers.
683 */
684static inline unsigned int jumbo_payload_capacity(const struct sge *sge)
685{
686 return sge->freelQ[sge->jumbo_fl].rx_buffer_size -
687 sizeof(struct cpl_rx_data) - SGE_RX_OFFSET + sge->rx_pkt_pad;
688}
689
690/*
691 * Allocates both RX and TX resources and configures the SGE. However,
692 * the hardware is not enabled yet.
693 */
694int t1_sge_configure(struct sge *sge, struct sge_params *p)
695{
696 if (alloc_rx_resources(sge, p))
697 return -ENOMEM;
698 if (alloc_tx_resources(sge, p)) {
699 free_rx_resources(sge);
700 return -ENOMEM;
701 }
702 configure_sge(sge, p);
703
704 /*
705 * Now that we have sized the free lists calculate the payload
706 * capacity of the large buffers. Other parts of the driver use
707 * this to set the max offload coalescing size so that RX packets
708 * do not overflow our large buffers.
709 */
710 p->large_buf_capacity = jumbo_payload_capacity(sge);
711 return 0;
712}
713
714/*
715 * Frees all SGE related resources and the sge structure itself
716 */
717void t1_sge_destroy(struct sge *sge)
718{
719 if (sge->pskb)
720 dev_kfree_skb(sge->pskb);
721 free_tx_resources(sge);
722 free_rx_resources(sge);
723 kfree(sge);
724}
725
726/*
727 * Allocates new RX buffers on the freelist Q (and tracks them on the freelist
728 * context Q) until the Q is full or alloc_skb fails.
729 *
730 * It is possible that the generation bits already match, indicating that the
731 * buffer is already valid and nothing needs to be done. This happens when we
732 * copied a received buffer into a new sk_buff during the interrupt processing.
733 *
734 * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad),
735 * we specify a RX_OFFSET in order to make sure that the IP header is 4B
736 * aligned.
737 */
738static void refill_free_list(struct sge *sge, struct freelQ *Q)
739{
740 struct pci_dev *pdev = sge->adapter->pdev;
741 struct freelQ_ce *ce = &Q->centries[Q->pidx];
742 struct freelQ_e *e = &Q->entries[Q->pidx];
743 unsigned int dma_len = Q->rx_buffer_size - Q->dma_offset;
744
745
746 while (Q->credits < Q->entries_n) {
747 if (e->GenerationBit != Q->genbit) {
748 struct sk_buff *skb;
749 dma_addr_t mapping;
750
751 skb = alloc_skb(Q->rx_buffer_size, GFP_ATOMIC);
752 if (!skb)
753 break;
754 if (Q->dma_offset)
755 skb_reserve(skb, Q->dma_offset);
756 mapping = pci_map_single(pdev, skb->data, dma_len,
757 PCI_DMA_FROMDEVICE);
758 ce->skb = skb;
759 pci_unmap_addr_set(ce, dma_addr, mapping);
760 pci_unmap_len_set(ce, dma_len, dma_len);
761 e->AddrLow = (u32)mapping;
762 e->AddrHigh = (u64)mapping >> 32;
763 e->BufferLength = dma_len;
764 e->GenerationBit = e->GenerationBit2 = Q->genbit;
765 }
766
767 e++;
768 ce++;
769 if (++Q->pidx == Q->entries_n) {
770 Q->pidx = 0;
771 Q->genbit ^= 1;
772 ce = Q->centries;
773 e = Q->entries;
774 }
775 Q->credits++;
776 }
777
778}
779
780/*
781 * Calls refill_free_list for both freelist Qs. If we cannot
782 * fill at least 1/4 of both Qs, we go into 'few interrupt mode' in order
783 * to give the system time to free up resources.
784 */
785static void freelQs_empty(struct sge *sge)
786{
787 u32 irq_reg = t1_read_reg_4(sge->adapter, A_SG_INT_ENABLE);
788 u32 irqholdoff_reg;
789
790 refill_free_list(sge, &sge->freelQ[0]);
791 refill_free_list(sge, &sge->freelQ[1]);
792
793 if (sge->freelQ[0].credits > (sge->freelQ[0].entries_n >> 2) &&
794 sge->freelQ[1].credits > (sge->freelQ[1].entries_n >> 2)) {
795 irq_reg |= F_FL_EXHAUSTED;
796 irqholdoff_reg = sge->intrtimer[sge->currIndex];
797 } else {
798 /* Clear the F_FL_EXHAUSTED interrupts for now */
799 irq_reg &= ~F_FL_EXHAUSTED;
800 irqholdoff_reg = sge->intrtimer_nres;
801 }
802 t1_write_reg_4(sge->adapter, A_SG_INTRTIMER, irqholdoff_reg);
803 t1_write_reg_4(sge->adapter, A_SG_INT_ENABLE, irq_reg);
804
805 /* We reenable the Qs to force a freelist GTS interrupt later */
806 doorbell_pio(sge, F_FL0_ENABLE | F_FL1_ENABLE);
807}
808
809#define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA)
810#define SGE_INT_FATAL (F_RESPQ_OVERFLOW | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
811#define SGE_INT_ENABLE (F_RESPQ_EXHAUSTED | F_RESPQ_OVERFLOW | \
812 F_FL_EXHAUSTED | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
813
814/*
815 * Disable SGE Interrupts
816 */
817void t1_sge_intr_disable(struct sge *sge)
818{
819 u32 val = t1_read_reg_4(sge->adapter, A_PL_ENABLE);
820
821 t1_write_reg_4(sge->adapter, A_PL_ENABLE, val & ~SGE_PL_INTR_MASK);
822 t1_write_reg_4(sge->adapter, A_SG_INT_ENABLE, 0);
823}
824
825/*
826 * Enable SGE interrupts.
827 */
828void t1_sge_intr_enable(struct sge *sge)
829{
830 u32 en = SGE_INT_ENABLE;
831 u32 val = t1_read_reg_4(sge->adapter, A_PL_ENABLE);
832
833 if (sge->adapter->flags & TSO_CAPABLE)
834 en &= ~F_PACKET_TOO_BIG;
835 t1_write_reg_4(sge->adapter, A_SG_INT_ENABLE, en);
836 t1_write_reg_4(sge->adapter, A_PL_ENABLE, val | SGE_PL_INTR_MASK);
837}
838
839/*
840 * Clear SGE interrupts.
841 */
842void t1_sge_intr_clear(struct sge *sge)
843{
844 t1_write_reg_4(sge->adapter, A_PL_CAUSE, SGE_PL_INTR_MASK);
845 t1_write_reg_4(sge->adapter, A_SG_INT_CAUSE, 0xffffffff);
846}
847
848/*
849 * SGE 'Error' interrupt handler
850 */
851int t1_sge_intr_error_handler(struct sge *sge)
852{
853 struct adapter *adapter = sge->adapter;
854 u32 cause = t1_read_reg_4(adapter, A_SG_INT_CAUSE);
855
856 if (adapter->flags & TSO_CAPABLE)
857 cause &= ~F_PACKET_TOO_BIG;
858 if (cause & F_RESPQ_EXHAUSTED)
859 sge->intr_cnt.respQ_empty++;
860 if (cause & F_RESPQ_OVERFLOW) {
861 sge->intr_cnt.respQ_overflow++;
862 CH_ALERT("%s: SGE response queue overflow\n",
863 adapter->name);
864 }
865 if (cause & F_FL_EXHAUSTED) {
866 sge->intr_cnt.freelistQ_empty++;
867 freelQs_empty(sge);
868 }
869 if (cause & F_PACKET_TOO_BIG) {
870 sge->intr_cnt.pkt_too_big++;
871 CH_ALERT("%s: SGE max packet size exceeded\n",
872 adapter->name);
873 }
874 if (cause & F_PACKET_MISMATCH) {
875 sge->intr_cnt.pkt_mismatch++;
876 CH_ALERT("%s: SGE packet mismatch\n", adapter->name);
877 }
878 if (cause & SGE_INT_FATAL)
879 t1_fatal_err(adapter);
880
881 t1_write_reg_4(adapter, A_SG_INT_CAUSE, cause);
882 return 0;
883}
884
885/*
886 * The following code is copied from 2.6, where the skb_pull is doing the
887 * right thing and only pulls ETH_HLEN.
888 *
889 * Determine the packet's protocol ID. The rule here is that we
890 * assume 802.3 if the type field is short enough to be a length.
891 * This is normal practice and works for any 'now in use' protocol.
892 */
893static unsigned short sge_eth_type_trans(struct sk_buff *skb,
894 struct net_device *dev)
895{
896 struct ethhdr *eth;
897 unsigned char *rawp;
898
899 skb->mac.raw = skb->data;
900 skb_pull(skb, ETH_HLEN);
901 eth = (struct ethhdr *)skb->mac.raw;
902
903 if (*eth->h_dest&1) {
904 if(memcmp(eth->h_dest, dev->broadcast, ETH_ALEN) == 0)
905 skb->pkt_type = PACKET_BROADCAST;
906 else
907 skb->pkt_type = PACKET_MULTICAST;
908 }
909
910 /*
911 * This ALLMULTI check should be redundant by 1.4
912 * so don't forget to remove it.
913 *
914 * Seems, you forgot to remove it. All silly devices
915 * seems to set IFF_PROMISC.
916 */
917
918 else if (1 /*dev->flags&IFF_PROMISC*/)
919 {
920 if(memcmp(eth->h_dest,dev->dev_addr, ETH_ALEN))
921 skb->pkt_type=PACKET_OTHERHOST;
922 }
923
924 if (ntohs(eth->h_proto) >= 1536)
925 return eth->h_proto;
926
927 rawp = skb->data;
928
929 /*
930 * This is a magic hack to spot IPX packets. Older Novell breaks
931 * the protocol design and runs IPX over 802.3 without an 802.2 LLC
932 * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
933 * won't work for fault tolerant netware but does for the rest.
934 */
935 if (*(unsigned short *)rawp == 0xFFFF)
936 return htons(ETH_P_802_3);
937
938 /*
939 * Real 802.2 LLC
940 */
941 return htons(ETH_P_802_2);
942}
943
944/*
945 * Prepare the received buffer and pass it up the stack. If it is small enough
946 * and allocation doesn't fail, we use a new sk_buff and copy the content.
947 */
948static unsigned int t1_sge_rx(struct sge *sge, struct freelQ *Q,
949 unsigned int len, unsigned int offload)
950{
951 struct sk_buff *skb;
952 struct adapter *adapter = sge->adapter;
953 struct freelQ_ce *ce = &Q->centries[Q->cidx];
954
955 if (len <= SGE_RX_COPY_THRESHOLD &&
956 (skb = alloc_skb(len + NET_IP_ALIGN, GFP_ATOMIC))) {
957 struct freelQ_e *e;
958 char *src = ce->skb->data;
959
960 pci_dma_sync_single_for_cpu(adapter->pdev,
961 pci_unmap_addr(ce, dma_addr),
962 pci_unmap_len(ce, dma_len),
963 PCI_DMA_FROMDEVICE);
964 if (!offload) {
965 skb_reserve(skb, NET_IP_ALIGN);
966 src += sge->rx_pkt_pad;
967 }
968 memcpy(skb->data, src, len);
969
970 /* Reuse the entry. */
971 e = &Q->entries[Q->cidx];
972 e->GenerationBit ^= 1;
973 e->GenerationBit2 ^= 1;
974 } else {
975 pci_unmap_single(adapter->pdev, pci_unmap_addr(ce, dma_addr),
976 pci_unmap_len(ce, dma_len),
977 PCI_DMA_FROMDEVICE);
978 skb = ce->skb;
979 if (!offload && sge->rx_pkt_pad)
980 __skb_pull(skb, sge->rx_pkt_pad);
981 }
982
983 skb_put(skb, len);
984
985
986 if (unlikely(offload)) {
987 {
988 printk(KERN_ERR
989 "%s: unexpected offloaded packet, cmd %u\n",
990 adapter->name, *skb->data);
991 dev_kfree_skb_any(skb);
992 }
993 } else {
994 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)skb->data;
995
996 skb_pull(skb, sizeof(*p));
997 skb->dev = adapter->port[p->iff].dev;
998 skb->dev->last_rx = jiffies;
999 skb->protocol = sge_eth_type_trans(skb, skb->dev);
1000 if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff &&
1001 skb->protocol == htons(ETH_P_IP) &&
1002 (skb->data[9] == IPPROTO_TCP ||
1003 skb->data[9] == IPPROTO_UDP))
1004 skb->ip_summed = CHECKSUM_UNNECESSARY;
1005 else
1006 skb->ip_summed = CHECKSUM_NONE;
1007 if (adapter->vlan_grp && p->vlan_valid)
1008 vlan_hwaccel_rx(skb, adapter->vlan_grp,
1009 ntohs(p->vlan));
1010 else
1011 netif_rx(skb);
1012 }
1013
1014 if (++Q->cidx == Q->entries_n)
1015 Q->cidx = 0;
1016
1017 if (unlikely(--Q->credits < Q->entries_n - SGE_FREEL_REFILL_THRESH))
1018 refill_free_list(sge, Q);
1019 return 1;
1020}
1021
1022
1023/*
1024 * Adaptive interrupt timer logic to keep the CPU utilization to
1025 * manageable levels. Basically, as the Average Packet Size (APS)
1026 * gets higher, the interrupt latency setting gets longer. Every
1027 * SGE_INTR_BUCKETSIZE (of 100B) causes a bump of 2usec to the
1028 * base value of SGE_INTRTIMER0. At large values of payload the
1029 * latency hits the ceiling value of SGE_INTRTIMER1 stored at
1030 * index SGE_INTR_MAXBUCKETS-1 in sge->intrtimer[].
1031 *
1032 * sge->currIndex caches the last index to save unneeded PIOs.
1033 */
1034static inline void update_intr_timer(struct sge *sge, unsigned int avg_payload)
1035{
1036 unsigned int newIndex;
1037
1038 newIndex = avg_payload / SGE_INTR_BUCKETSIZE;
1039 if (newIndex > SGE_INTR_MAXBUCKETS - 1) {
1040 newIndex = SGE_INTR_MAXBUCKETS - 1;
1041 }
1042 /* Save a PIO with this check....maybe */
1043 if (newIndex != sge->currIndex) {
1044 t1_write_reg_4(sge->adapter, A_SG_INTRTIMER,
1045 sge->intrtimer[newIndex]);
1046 sge->currIndex = newIndex;
1047 sge->adapter->params.sge.last_rx_coalesce_raw =
1048 sge->intrtimer[newIndex];
1049 }
1050}
1051
1052/*
1053 * Returns true if command queue q_num has enough available descriptors that
1054 * we can resume Tx operation after temporarily disabling its packet queue.
1055 */
1056static inline int enough_free_Tx_descs(struct sge *sge, int q_num)
1057{
1058 return atomic_read(&sge->cmdQ[q_num].credits) >
1059 (sge->cmdQ[q_num].entries_n >> 2);
1060}
1061
1062/*
1063 * Main interrupt handler, optimized assuming that we took a 'DATA'
1064 * interrupt.
1065 *
1066 * 1. Clear the interrupt
1067 * 2. Loop while we find valid descriptors and process them; accumulate
1068 * information that can be processed after the loop
1069 * 3. Tell the SGE at which index we stopped processing descriptors
1070 * 4. Bookkeeping; free TX buffers, ring doorbell if there are any
1071 * outstanding TX buffers waiting, replenish RX buffers, potentially
1072 * reenable upper layers if they were turned off due to lack of TX
1073 * resources which are available again.
1074 * 5. If we took an interrupt, but no valid respQ descriptors was found we
1075 * let the slow_intr_handler run and do error handling.
1076 */
1077irqreturn_t t1_interrupt(int irq, void *cookie, struct pt_regs *regs)
1078{
1079 struct net_device *netdev;
1080 struct adapter *adapter = cookie;
1081 struct sge *sge = adapter->sge;
1082 struct respQ *Q = &sge->respQ;
1083 unsigned int credits = Q->credits, flags = 0, ret = 0;
1084 unsigned int tot_rxpayload = 0, tot_txpayload = 0, n_rx = 0, n_tx = 0;
1085 unsigned int credits_pend[SGE_CMDQ_N] = { 0, 0 };
1086
1087 struct respQ_e *e = &Q->entries[Q->cidx];
1088 prefetch(e);
1089
1090 t1_write_reg_4(adapter, A_PL_CAUSE, F_PL_INTR_SGE_DATA);
1091
1092
1093 while (e->GenerationBit == Q->genbit) {
1094 if (--credits < SGE_RESPQ_REPLENISH_THRES) {
1095 u32 n = Q->entries_n - credits - 1;
1096
1097 t1_write_reg_4(adapter, A_SG_RSPQUEUECREDIT, n);
1098 credits += n;
1099 }
1100 if (likely(e->DataValid)) {
1101 if (!e->Sop || !e->Eop)
1102 BUG();
1103 t1_sge_rx(sge, &sge->freelQ[e->FreelistQid],
1104 e->BufferLength, e->Offload);
1105 tot_rxpayload += e->BufferLength;
1106 ++n_rx;
1107 }
1108 flags |= e->Qsleeping;
1109 credits_pend[0] += e->Cmdq0CreditReturn;
1110 credits_pend[1] += e->Cmdq1CreditReturn;
1111
1112#ifdef CONFIG_SMP
1113 /*
1114 * If enough cmdQ0 buffers have finished DMAing free them so
1115 * anyone that may be waiting for their release can continue.
1116 * We do this only on MP systems to allow other CPUs to proceed
1117 * promptly. UP systems can wait for the free_cmdQ_buffers()
1118 * calls after this loop as the sole CPU is currently busy in
1119 * this loop.
1120 */
1121 if (unlikely(credits_pend[0] > SGE_FREEL_REFILL_THRESH)) {
1122 free_cmdQ_buffers(sge, &sge->cmdQ[0], credits_pend[0],
1123 &tot_txpayload);
1124 n_tx += credits_pend[0];
1125 credits_pend[0] = 0;
1126 }
1127#endif
1128 ret++;
1129 e++;
1130 if (unlikely(++Q->cidx == Q->entries_n)) {
1131 Q->cidx = 0;
1132 Q->genbit ^= 1;
1133 e = Q->entries;
1134 }
1135 }
1136
1137 Q->credits = credits;
1138 t1_write_reg_4(adapter, A_SG_SLEEPING, Q->cidx);
1139
1140 if (credits_pend[0])
1141 free_cmdQ_buffers(sge, &sge->cmdQ[0], credits_pend[0], &tot_txpayload);
1142 if (credits_pend[1])
1143 free_cmdQ_buffers(sge, &sge->cmdQ[1], credits_pend[1], &tot_txpayload);
1144
1145 /* Do any coalescing and interrupt latency timer adjustments */
1146 if (adapter->params.sge.coalesce_enable) {
1147 unsigned int avg_txpayload = 0, avg_rxpayload = 0;
1148
1149 n_tx += credits_pend[0] + credits_pend[1];
1150
1151 /*
1152 * Choose larger avg. payload size to increase
1153 * throughput and reduce [CPU util., intr/s.]
1154 *
1155 * Throughput behavior favored in mixed-mode.
1156 */
1157 if (n_tx)
1158 avg_txpayload = tot_txpayload/n_tx;
1159 if (n_rx)
1160 avg_rxpayload = tot_rxpayload/n_rx;
1161
1162 if (n_tx && avg_txpayload > avg_rxpayload){
1163 update_intr_timer(sge, avg_txpayload);
1164 } else if (n_rx) {
1165 update_intr_timer(sge, avg_rxpayload);
1166 }
1167 }
1168
1169 if (flags & F_CMDQ0_ENABLE) {
1170 struct cmdQ *cmdQ = &sge->cmdQ[0];
1171
1172 atomic_set(&cmdQ->asleep, 1);
1173 if (atomic_read(&cmdQ->pio_pidx) != cmdQ->pidx) {
1174 doorbell_pio(sge, F_CMDQ0_ENABLE);
1175 atomic_set(&cmdQ->pio_pidx, cmdQ->pidx);
1176 }
1177 }
1178 if (unlikely(flags & (F_FL0_ENABLE | F_FL1_ENABLE)))
1179 freelQs_empty(sge);
1180
1181 netdev = adapter->port[0].dev;
1182 if (unlikely(netif_queue_stopped(netdev) && netif_carrier_ok(netdev) &&
1183 enough_free_Tx_descs(sge, 0) &&
1184 enough_free_Tx_descs(sge, 1))) {
1185 netif_wake_queue(netdev);
1186 }
1187 if (unlikely(!ret))
1188 ret = t1_slow_intr_handler(adapter);
1189
1190 return IRQ_RETVAL(ret != 0);
1191}
1192
1193/*
1194 * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it.
1195 *
1196 * The code figures out how many entries the sk_buff will require in the
1197 * cmdQ and updates the cmdQ data structure with the state once the enqueue
1198 * has complete. Then, it doesn't access the global structure anymore, but
1199 * uses the corresponding fields on the stack. In conjuction with a spinlock
1200 * around that code, we can make the function reentrant without holding the
1201 * lock when we actually enqueue (which might be expensive, especially on
1202 * architectures with IO MMUs).
1203 */
1204static unsigned int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
1205 unsigned int qid)
1206{
1207 struct sge *sge = adapter->sge;
1208 struct cmdQ *Q = &sge->cmdQ[qid];
1209 struct cmdQ_e *e;
1210 struct cmdQ_ce *ce;
1211 dma_addr_t mapping;
1212 unsigned int credits, pidx, genbit;
1213
1214 unsigned int count = 1 + skb_shinfo(skb)->nr_frags;
1215
1216 /*
1217 * Coming from the timer
1218 */
1219 if ((skb == sge->pskb)) {
1220 /*
1221 * Quit if any cmdQ activities
1222 */
1223 if (!spin_trylock(&Q->Qlock))
1224 return 0;
1225 if (atomic_read(&Q->credits) != Q->entries_n) {
1226 spin_unlock(&Q->Qlock);
1227 return 0;
1228 }
1229 }
1230 else
1231 spin_lock(&Q->Qlock);
1232
1233 genbit = Q->genbit;
1234 pidx = Q->pidx;
1235 credits = atomic_read(&Q->credits);
1236
1237 credits -= count;
1238 atomic_sub(count, &Q->credits);
1239 Q->pidx += count;
1240 if (Q->pidx >= Q->entries_n) {
1241 Q->pidx -= Q->entries_n;
1242 Q->genbit ^= 1;
1243 }
1244
1245 if (unlikely(credits < (MAX_SKB_FRAGS + 1))) {
1246 sge->intr_cnt.cmdQ_full[qid]++;
1247 netif_stop_queue(adapter->port[0].dev);
1248 }
1249 spin_unlock(&Q->Qlock);
1250
1251 mapping = pci_map_single(adapter->pdev, skb->data,
1252 skb->len - skb->data_len, PCI_DMA_TODEVICE);
1253 ce = &Q->centries[pidx];
1254 ce->skb = NULL;
1255 pci_unmap_addr_set(ce, dma_addr, mapping);
1256 pci_unmap_len_set(ce, dma_len, skb->len - skb->data_len);
1257 ce->single = 1;
1258
1259 e = &Q->entries[pidx];
1260 e->Sop = 1;
1261 e->DataValid = 1;
1262 e->BufferLength = skb->len - skb->data_len;
1263 e->AddrHigh = (u64)mapping >> 32;
1264 e->AddrLow = (u32)mapping;
1265
1266 if (--count > 0) {
1267 unsigned int i;
1268
1269 e->Eop = 0;
1270 wmb();
1271 e->GenerationBit = e->GenerationBit2 = genbit;
1272
1273 for (i = 0; i < count; i++) {
1274 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1275
1276 ce++; e++;
1277 if (++pidx == Q->entries_n) {
1278 pidx = 0;
1279 genbit ^= 1;
1280 ce = Q->centries;
1281 e = Q->entries;
1282 }
1283
1284 mapping = pci_map_page(adapter->pdev, frag->page,
1285 frag->page_offset,
1286 frag->size,
1287 PCI_DMA_TODEVICE);
1288 ce->skb = NULL;
1289 pci_unmap_addr_set(ce, dma_addr, mapping);
1290 pci_unmap_len_set(ce, dma_len, frag->size);
1291 ce->single = 0;
1292
1293 e->Sop = 0;
1294 e->DataValid = 1;
1295 e->BufferLength = frag->size;
1296 e->AddrHigh = (u64)mapping >> 32;
1297 e->AddrLow = (u32)mapping;
1298
1299 if (i < count - 1) {
1300 e->Eop = 0;
1301 wmb();
1302 e->GenerationBit = e->GenerationBit2 = genbit;
1303 }
1304 }
1305 }
1306
1307 if (skb != sge->pskb)
1308 ce->skb = skb;
1309 e->Eop = 1;
1310 wmb();
1311 e->GenerationBit = e->GenerationBit2 = genbit;
1312
1313 /*
1314 * We always ring the doorbell for cmdQ1. For cmdQ0, we only ring
1315 * the doorbell if the Q is asleep. There is a natural race, where
1316 * the hardware is going to sleep just after we checked, however,
1317 * then the interrupt handler will detect the outstanding TX packet
1318 * and ring the doorbell for us.
1319 */
1320 if (qid) {
1321 doorbell_pio(sge, F_CMDQ1_ENABLE);
1322 } else if (atomic_read(&Q->asleep)) {
1323 atomic_set(&Q->asleep, 0);
1324 doorbell_pio(sge, F_CMDQ0_ENABLE);
1325 atomic_set(&Q->pio_pidx, Q->pidx);
1326 }
1327 return 0;
1328}
1329
1330#define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14))
1331
1332/*
1333 * Adds the CPL header to the sk_buff and passes it to t1_sge_tx.
1334 */
1335int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
1336{
1337 struct adapter *adapter = dev->priv;
1338 struct cpl_tx_pkt *cpl;
1339 struct ethhdr *eth;
1340 size_t max_len;
1341
1342 /*
1343 * We are using a non-standard hard_header_len and some kernel
1344 * components, such as pktgen, do not handle it right. Complain
1345 * when this happens but try to fix things up.
1346 */
1347 if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) {
1348 struct sk_buff *orig_skb = skb;
1349
1350 if (net_ratelimit())
1351 printk(KERN_ERR
1352 "%s: Tx packet has inadequate headroom\n",
1353 dev->name);
1354 skb = skb_realloc_headroom(skb, sizeof(struct cpl_tx_pkt_lso));
1355 dev_kfree_skb_any(orig_skb);
1356 if (!skb)
1357 return -ENOMEM;
1358 }
1359
1360 if (skb_shinfo(skb)->tso_size) {
1361 int eth_type;
1362 struct cpl_tx_pkt_lso *hdr;
1363
1364 eth_type = skb->nh.raw - skb->data == ETH_HLEN ?
1365 CPL_ETH_II : CPL_ETH_II_VLAN;
1366
1367 hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr));
1368 hdr->opcode = CPL_TX_PKT_LSO;
1369 hdr->ip_csum_dis = hdr->l4_csum_dis = 0;
1370 hdr->ip_hdr_words = skb->nh.iph->ihl;
1371 hdr->tcp_hdr_words = skb->h.th->doff;
1372 hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
1373 skb_shinfo(skb)->tso_size));
1374 hdr->len = htonl(skb->len - sizeof(*hdr));
1375 cpl = (struct cpl_tx_pkt *)hdr;
1376 } else
1377 {
1378 /*
1379 * An Ethernet packet must have at least space for
1380 * the DIX Ethernet header and be no greater than
1381 * the device set MTU. Otherwise trash the packet.
1382 */
1383 if (skb->len < ETH_HLEN)
1384 goto t1_start_xmit_fail2;
1385 eth = (struct ethhdr *)skb->data;
1386 if (eth->h_proto == htons(ETH_P_8021Q))
1387 max_len = dev->mtu + VLAN_ETH_HLEN;
1388 else
1389 max_len = dev->mtu + ETH_HLEN;
1390 if (skb->len > max_len)
1391 goto t1_start_xmit_fail2;
1392
1393 if (!(adapter->flags & UDP_CSUM_CAPABLE) &&
1394 skb->ip_summed == CHECKSUM_HW &&
1395 skb->nh.iph->protocol == IPPROTO_UDP &&
1396 skb_checksum_help(skb, 0))
1397 goto t1_start_xmit_fail3;
1398
1399
1400 if (!adapter->sge->pskb) {
1401 if (skb->protocol == htons(ETH_P_ARP) &&
1402 skb->nh.arph->ar_op == htons(ARPOP_REQUEST))
1403 adapter->sge->pskb = skb;
1404 }
1405 cpl = (struct cpl_tx_pkt *)skb_push(skb, sizeof(*cpl));
1406 cpl->opcode = CPL_TX_PKT;
1407 cpl->ip_csum_dis = 1; /* SW calculates IP csum */
1408 cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_HW ? 0 : 1;
1409 /* the length field isn't used so don't bother setting it */
1410 }
1411 cpl->iff = dev->if_port;
1412
1413#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1414 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
1415 cpl->vlan_valid = 1;
1416 cpl->vlan = htons(vlan_tx_tag_get(skb));
1417 } else
1418#endif
1419 cpl->vlan_valid = 0;
1420
1421 dev->trans_start = jiffies;
1422 return t1_sge_tx(skb, adapter, 0);
1423
1424t1_start_xmit_fail3:
1425 printk(KERN_INFO "%s: Unable to complete checksum\n", dev->name);
1426 goto t1_start_xmit_fail1;
1427
1428t1_start_xmit_fail2:
1429 printk(KERN_INFO "%s: Invalid packet length %d, dropping\n",
1430 dev->name, skb->len);
1431
1432t1_start_xmit_fail1:
1433 dev_kfree_skb_any(skb);
1434 return 0;
1435}
1436
1437void t1_sge_set_ptimeout(adapter_t *adapter, u32 val)
1438{
1439 struct sge *sge = adapter->sge;
1440
1441 if (is_T2(adapter))
1442 sge->ptimeout = max((u32)((HZ * val) / 1000), (u32)1);
1443}
1444
1445u32 t1_sge_get_ptimeout(adapter_t *adapter)
1446{
1447 struct sge *sge = adapter->sge;
1448
1449 return (is_T2(adapter) ? ((sge->ptimeout * 1000) / HZ) : 0);
1450}
1451
diff --git a/drivers/net/chelsio/sge.h b/drivers/net/chelsio/sge.h
new file mode 100644
index 000000000000..140f896def60
--- /dev/null
+++ b/drivers/net/chelsio/sge.h
@@ -0,0 +1,79 @@
1/*****************************************************************************
2 * *
3 * File: sge.h *
4 * $Revision: 1.7 $ *
5 * $Date: 2005/03/23 07:15:59 $ *
6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#ifndef _CHELSIO_LINUX_SGE_H_
40#define _CHELSIO_LINUX_SGE_H_
41
42#include <linux/types.h>
43#include <linux/interrupt.h>
44#include <asm/byteorder.h>
45
46struct sge_intr_counts {
47 unsigned int respQ_empty; /* # times respQ empty */
48 unsigned int respQ_overflow; /* # respQ overflow (fatal) */
49 unsigned int freelistQ_empty; /* # times freelist empty */
50 unsigned int pkt_too_big; /* packet too large (fatal) */
51 unsigned int pkt_mismatch;
52 unsigned int cmdQ_full[2]; /* not HW interrupt, host cmdQ[] full */
53};
54
55struct sk_buff;
56struct net_device;
57struct cxgbdev;
58struct adapter;
59struct sge_params;
60struct sge;
61
62struct sge *t1_sge_create(struct adapter *, struct sge_params *);
63int t1_sge_configure(struct sge *, struct sge_params *);
64int t1_sge_set_coalesce_params(struct sge *, struct sge_params *);
65void t1_sge_destroy(struct sge *);
66irqreturn_t t1_interrupt(int, void *, struct pt_regs *);
67int t1_start_xmit(struct sk_buff *skb, struct net_device *dev);
68void t1_set_vlan_accel(struct adapter *adapter, int on_off);
69void t1_sge_start(struct sge *);
70void t1_sge_stop(struct sge *);
71int t1_sge_intr_error_handler(struct sge *);
72void t1_sge_intr_enable(struct sge *);
73void t1_sge_intr_disable(struct sge *);
74void t1_sge_intr_clear(struct sge *);
75
76void t1_sge_set_ptimeout(adapter_t *adapter, u32 val);
77u32 t1_sge_get_ptimeout(adapter_t *adapter);
78
79#endif /* _CHELSIO_LINUX_SGE_H_ */
diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c
new file mode 100644
index 000000000000..a90a3f95fcac
--- /dev/null
+++ b/drivers/net/chelsio/subr.c
@@ -0,0 +1,831 @@
1/*****************************************************************************
2 * *
3 * File: subr.c *
4 * $Revision: 1.12 $ *
5 * $Date: 2005/03/23 07:41:27 $ *
6 * Description: *
7 * Various subroutines (intr,pio,etc.) used by Chelsio 10G Ethernet driver. *
8 * part of the Chelsio 10Gb Ethernet Driver. *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
13 * *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, write to the Free Software Foundation, Inc., *
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
17 * *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
21 * *
22 * http://www.chelsio.com *
23 * *
24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
25 * All rights reserved. *
26 * *
27 * Maintainers: maintainers@chelsio.com *
28 * *
29 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
30 * Tina Yang <tainay@chelsio.com> *
31 * Felix Marti <felix@chelsio.com> *
32 * Scott Bardone <sbardone@chelsio.com> *
33 * Kurt Ottaway <kottaway@chelsio.com> *
34 * Frank DiMambro <frank@chelsio.com> *
35 * *
36 * History: *
37 * *
38 ****************************************************************************/
39
40#include "common.h"
41#include "elmer0.h"
42#include "regs.h"
43
44#include "gmac.h"
45#include "cphy.h"
46#include "sge.h"
47#include "tp.h"
48#include "espi.h"
49
50/**
51 * t1_wait_op_done - wait until an operation is completed
52 * @adapter: the adapter performing the operation
53 * @reg: the register to check for completion
54 * @mask: a single-bit field within @reg that indicates completion
55 * @polarity: the value of the field when the operation is completed
56 * @attempts: number of check iterations
57 * @delay: delay in usecs between iterations
58 *
59 * Wait until an operation is completed by checking a bit in a register
60 * up to @attempts times. Returns %0 if the operation completes and %1
61 * otherwise.
62 */
63static int t1_wait_op_done(adapter_t *adapter, int reg, u32 mask, int polarity,
64 int attempts, int delay)
65{
66 while (1) {
67 u32 val = t1_read_reg_4(adapter, reg) & mask;
68
69 if (!!val == polarity)
70 return 0;
71 if (--attempts == 0)
72 return 1;
73 if (delay)
74 udelay(delay);
75 }
76}
77
78#define TPI_ATTEMPTS 50
79
80/*
81 * Write a register over the TPI interface (unlocked and locked versions).
82 */
83static int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value)
84{
85 int tpi_busy;
86
87 t1_write_reg_4(adapter, A_TPI_ADDR, addr);
88 t1_write_reg_4(adapter, A_TPI_WR_DATA, value);
89 t1_write_reg_4(adapter, A_TPI_CSR, F_TPIWR);
90
91 tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1,
92 TPI_ATTEMPTS, 3);
93 if (tpi_busy)
94 CH_ALERT("%s: TPI write to 0x%x failed\n",
95 adapter->name, addr);
96 return tpi_busy;
97}
98
99int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value)
100{
101 int ret;
102
103 TPI_LOCK(adapter);
104 ret = __t1_tpi_write(adapter, addr, value);
105 TPI_UNLOCK(adapter);
106 return ret;
107}
108
109/*
110 * Read a register over the TPI interface (unlocked and locked versions).
111 */
112static int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp)
113{
114 int tpi_busy;
115
116 t1_write_reg_4(adapter, A_TPI_ADDR, addr);
117 t1_write_reg_4(adapter, A_TPI_CSR, 0);
118
119 tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1,
120 TPI_ATTEMPTS, 3);
121 if (tpi_busy)
122 CH_ALERT("%s: TPI read from 0x%x failed\n",
123 adapter->name, addr);
124 else
125 *valp = t1_read_reg_4(adapter, A_TPI_RD_DATA);
126 return tpi_busy;
127}
128
129int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp)
130{
131 int ret;
132
133 TPI_LOCK(adapter);
134 ret = __t1_tpi_read(adapter, addr, valp);
135 TPI_UNLOCK(adapter);
136 return ret;
137}
138
139/*
140 * Set a TPI parameter.
141 */
142static void t1_tpi_par(adapter_t *adapter, u32 value)
143{
144 t1_write_reg_4(adapter, A_TPI_PAR, V_TPIPAR(value));
145}
146
147/*
148 * Called when a port's link settings change to propagate the new values to the
149 * associated PHY and MAC. After performing the common tasks it invokes an
150 * OS-specific handler.
151 */
152/* static */ void link_changed(adapter_t *adapter, int port_id)
153{
154 int link_ok, speed, duplex, fc;
155 struct cphy *phy = adapter->port[port_id].phy;
156 struct link_config *lc = &adapter->port[port_id].link_config;
157
158 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
159
160 lc->speed = speed < 0 ? SPEED_INVALID : speed;
161 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
162 if (!(lc->requested_fc & PAUSE_AUTONEG))
163 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
164
165 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
166 /* Set MAC speed, duplex, and flow control to match PHY. */
167 struct cmac *mac = adapter->port[port_id].mac;
168
169 mac->ops->set_speed_duplex_fc(mac, speed, duplex, fc);
170 lc->fc = (unsigned char)fc;
171 }
172 t1_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
173}
174
175static int t1_pci_intr_handler(adapter_t *adapter)
176{
177 u32 pcix_cause;
178
179 pci_read_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, &pcix_cause);
180
181 if (pcix_cause) {
182 pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE,
183 pcix_cause);
184 t1_fatal_err(adapter); /* PCI errors are fatal */
185 }
186 return 0;
187}
188
189
190/*
191 * Wait until Elmer's MI1 interface is ready for new operations.
192 */
193static int mi1_wait_until_ready(adapter_t *adapter, int mi1_reg)
194{
195 int attempts = 100, busy;
196
197 do {
198 u32 val;
199
200 __t1_tpi_read(adapter, mi1_reg, &val);
201 busy = val & F_MI1_OP_BUSY;
202 if (busy)
203 udelay(10);
204 } while (busy && --attempts);
205 if (busy)
206 CH_ALERT("%s: MDIO operation timed out\n",
207 adapter->name);
208 return busy;
209}
210
211/*
212 * MI1 MDIO initialization.
213 */
214static void mi1_mdio_init(adapter_t *adapter, const struct board_info *bi)
215{
216 u32 clkdiv = bi->clock_elmer0 / (2 * bi->mdio_mdc) - 1;
217 u32 val = F_MI1_PREAMBLE_ENABLE | V_MI1_MDI_INVERT(bi->mdio_mdiinv) |
218 V_MI1_MDI_ENABLE(bi->mdio_mdien) | V_MI1_CLK_DIV(clkdiv);
219
220 if (!(bi->caps & SUPPORTED_10000baseT_Full))
221 val |= V_MI1_SOF(1);
222 t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_CFG, val);
223}
224
225static int mi1_mdio_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr,
226 int reg_addr, unsigned int *valp)
227{
228 u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr);
229
230 TPI_LOCK(adapter);
231
232 /* Write the address we want. */
233 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr);
234 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, reg_addr);
235 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP,
236 MI1_OP_INDIRECT_ADDRESS);
237 mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
238
239 /* Write the operation we want. */
240 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_READ);
241 mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
242
243 /* Read the data. */
244 __t1_tpi_read(adapter, A_ELMER0_PORT0_MI1_DATA, valp);
245 TPI_UNLOCK(adapter);
246 return 0;
247}
248
249static int mi1_mdio_ext_write(adapter_t *adapter, int phy_addr, int mmd_addr,
250 int reg_addr, unsigned int val)
251{
252 u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr);
253
254 TPI_LOCK(adapter);
255
256 /* Write the address we want. */
257 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr);
258 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, reg_addr);
259 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP,
260 MI1_OP_INDIRECT_ADDRESS);
261 mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
262
263 /* Write the data. */
264 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, val);
265 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_WRITE);
266 mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
267 TPI_UNLOCK(adapter);
268 return 0;
269}
270
271static struct mdio_ops mi1_mdio_ext_ops = {
272 mi1_mdio_init,
273 mi1_mdio_ext_read,
274 mi1_mdio_ext_write
275};
276
277enum {
278 CH_BRD_N110_1F,
279 CH_BRD_N210_1F,
280 CH_BRD_T210_1F,
281};
282
283static struct board_info t1_board[] = {
284
285{ CHBT_BOARD_N110, 1/*ports#*/,
286 SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE /*caps*/, CHBT_TERM_T1,
287 CHBT_MAC_PM3393, CHBT_PHY_88X2010,
288 125000000/*clk-core*/, 0/*clk-mc3*/, 0/*clk-mc4*/,
289 1/*espi-ports*/, 0/*clk-cspi*/, 44/*clk-elmer0*/, 0/*mdien*/,
290 0/*mdiinv*/, 1/*mdc*/, 0/*phybaseaddr*/, &t1_pm3393_ops,
291 &t1_mv88x201x_ops, &mi1_mdio_ext_ops,
292 "Chelsio N110 1x10GBaseX NIC" },
293
294{ CHBT_BOARD_N210, 1/*ports#*/,
295 SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE /*caps*/, CHBT_TERM_T2,
296 CHBT_MAC_PM3393, CHBT_PHY_88X2010,
297 125000000/*clk-core*/, 0/*clk-mc3*/, 0/*clk-mc4*/,
298 1/*espi-ports*/, 0/*clk-cspi*/, 44/*clk-elmer0*/, 0/*mdien*/,
299 0/*mdiinv*/, 1/*mdc*/, 0/*phybaseaddr*/, &t1_pm3393_ops,
300 &t1_mv88x201x_ops, &mi1_mdio_ext_ops,
301 "Chelsio N210 1x10GBaseX NIC" },
302
303};
304
305struct pci_device_id t1_pci_tbl[] = {
306 CH_DEVICE(7, 0, CH_BRD_N110_1F),
307 CH_DEVICE(10, 1, CH_BRD_N210_1F),
308 { 0, }
309};
310
311/*
312 * Return the board_info structure with a given index. Out-of-range indices
313 * return NULL.
314 */
315const struct board_info *t1_get_board_info(unsigned int board_id)
316{
317 return board_id < DIMOF(t1_board) ? &t1_board[board_id] : NULL;
318}
319
320struct chelsio_vpd_t {
321 u32 format_version;
322 u8 serial_number[16];
323 u8 mac_base_address[6];
324 u8 pad[2]; /* make multiple-of-4 size requirement explicit */
325};
326
327#define EEPROMSIZE (8 * 1024)
328#define EEPROM_MAX_POLL 4
329
330/*
331 * Read SEEPROM. A zero is written to the flag register when the addres is
332 * written to the Control register. The hardware device will set the flag to a
333 * one when 4B have been transferred to the Data register.
334 */
335int t1_seeprom_read(adapter_t *adapter, u32 addr, u32 *data)
336{
337 int i = EEPROM_MAX_POLL;
338 u16 val;
339
340 if (addr >= EEPROMSIZE || (addr & 3))
341 return -EINVAL;
342
343 pci_write_config_word(adapter->pdev, A_PCICFG_VPD_ADDR, (u16)addr);
344 do {
345 udelay(50);
346 pci_read_config_word(adapter->pdev, A_PCICFG_VPD_ADDR, &val);
347 } while (!(val & F_VPD_OP_FLAG) && --i);
348
349 if (!(val & F_VPD_OP_FLAG)) {
350 CH_ERR("%s: reading EEPROM address 0x%x failed\n",
351 adapter->name, addr);
352 return -EIO;
353 }
354 pci_read_config_dword(adapter->pdev, A_PCICFG_VPD_DATA, data);
355 *data = le32_to_cpu(*data);
356 return 0;
357}
358
359static int t1_eeprom_vpd_get(adapter_t *adapter, struct chelsio_vpd_t *vpd)
360{
361 int addr, ret = 0;
362
363 for (addr = 0; !ret && addr < sizeof(*vpd); addr += sizeof(u32))
364 ret = t1_seeprom_read(adapter, addr,
365 (u32 *)((u8 *)vpd + addr));
366
367 return ret;
368}
369
370/*
371 * Read a port's MAC address from the VPD ROM.
372 */
373static int vpd_macaddress_get(adapter_t *adapter, int index, u8 mac_addr[])
374{
375 struct chelsio_vpd_t vpd;
376
377 if (t1_eeprom_vpd_get(adapter, &vpd))
378 return 1;
379 memcpy(mac_addr, vpd.mac_base_address, 5);
380 mac_addr[5] = vpd.mac_base_address[5] + index;
381 return 0;
382}
383
384/*
385 * Set up the MAC/PHY according to the requested link settings.
386 *
387 * If the PHY can auto-negotiate first decide what to advertise, then
388 * enable/disable auto-negotiation as desired and reset.
389 *
390 * If the PHY does not auto-negotiate we just reset it.
391 *
392 * If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
393 * otherwise do it later based on the outcome of auto-negotiation.
394 */
395int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
396{
397 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
398
399 if (lc->supported & SUPPORTED_Autoneg) {
400 lc->advertising &= ~(ADVERTISED_ASYM_PAUSE | ADVERTISED_PAUSE);
401 if (fc) {
402 lc->advertising |= ADVERTISED_ASYM_PAUSE;
403 if (fc == (PAUSE_RX | PAUSE_TX))
404 lc->advertising |= ADVERTISED_PAUSE;
405 }
406 phy->ops->advertise(phy, lc->advertising);
407
408 if (lc->autoneg == AUTONEG_DISABLE) {
409 lc->speed = lc->requested_speed;
410 lc->duplex = lc->requested_duplex;
411 lc->fc = (unsigned char)fc;
412 mac->ops->set_speed_duplex_fc(mac, lc->speed,
413 lc->duplex, fc);
414 /* Also disables autoneg */
415 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
416 phy->ops->reset(phy, 0);
417 } else
418 phy->ops->autoneg_enable(phy); /* also resets PHY */
419 } else {
420 mac->ops->set_speed_duplex_fc(mac, -1, -1, fc);
421 lc->fc = (unsigned char)fc;
422 phy->ops->reset(phy, 0);
423 }
424 return 0;
425}
426
427/*
428 * External interrupt handler for boards using elmer0.
429 */
430int elmer0_ext_intr_handler(adapter_t *adapter)
431{
432 struct cphy *phy;
433 int phy_cause;
434 u32 cause;
435
436 t1_tpi_read(adapter, A_ELMER0_INT_CAUSE, &cause);
437
438 switch (board_info(adapter)->board) {
439 case CHBT_BOARD_CHT210:
440 case CHBT_BOARD_N210:
441 case CHBT_BOARD_N110:
442 if (cause & ELMER0_GP_BIT6) { /* Marvell 88x2010 interrupt */
443 phy = adapter->port[0].phy;
444 phy_cause = phy->ops->interrupt_handler(phy);
445 if (phy_cause & cphy_cause_link_change)
446 link_changed(adapter, 0);
447 }
448 break;
449 case CHBT_BOARD_8000:
450 case CHBT_BOARD_CHT110:
451 CH_DBG(adapter, INTR, "External interrupt cause 0x%x\n",
452 cause);
453 if (cause & ELMER0_GP_BIT1) { /* PMC3393 INTB */
454 struct cmac *mac = adapter->port[0].mac;
455
456 mac->ops->interrupt_handler(mac);
457 }
458 if (cause & ELMER0_GP_BIT5) { /* XPAK MOD_DETECT */
459 u32 mod_detect;
460
461 t1_tpi_read(adapter, A_ELMER0_GPI_STAT, &mod_detect);
462 CH_MSG(adapter, INFO, LINK, "XPAK %s\n",
463 mod_detect ? "removed" : "inserted");
464 }
465 break;
466 }
467 t1_tpi_write(adapter, A_ELMER0_INT_CAUSE, cause);
468 return 0;
469}
470
471/* Enables all interrupts. */
472void t1_interrupts_enable(adapter_t *adapter)
473{
474 unsigned int i;
475
476 adapter->slow_intr_mask = F_PL_INTR_SGE_ERR | F_PL_INTR_TP;
477
478 t1_sge_intr_enable(adapter->sge);
479 t1_tp_intr_enable(adapter->tp);
480 if (adapter->espi) {
481 adapter->slow_intr_mask |= F_PL_INTR_ESPI;
482 t1_espi_intr_enable(adapter->espi);
483 }
484
485 /* Enable MAC/PHY interrupts for each port. */
486 for_each_port(adapter, i) {
487 adapter->port[i].mac->ops->interrupt_enable(adapter->port[i].mac);
488 adapter->port[i].phy->ops->interrupt_enable(adapter->port[i].phy);
489 }
490
491 /* Enable PCIX & external chip interrupts on ASIC boards. */
492 if (t1_is_asic(adapter)) {
493 u32 pl_intr = t1_read_reg_4(adapter, A_PL_ENABLE);
494
495 /* PCI-X interrupts */
496 pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE,
497 0xffffffff);
498
499 adapter->slow_intr_mask |= F_PL_INTR_EXT | F_PL_INTR_PCIX;
500 pl_intr |= F_PL_INTR_EXT | F_PL_INTR_PCIX;
501 t1_write_reg_4(adapter, A_PL_ENABLE, pl_intr);
502 }
503}
504
505/* Disables all interrupts. */
506void t1_interrupts_disable(adapter_t* adapter)
507{
508 unsigned int i;
509
510 t1_sge_intr_disable(adapter->sge);
511 t1_tp_intr_disable(adapter->tp);
512 if (adapter->espi)
513 t1_espi_intr_disable(adapter->espi);
514
515 /* Disable MAC/PHY interrupts for each port. */
516 for_each_port(adapter, i) {
517 adapter->port[i].mac->ops->interrupt_disable(adapter->port[i].mac);
518 adapter->port[i].phy->ops->interrupt_disable(adapter->port[i].phy);
519 }
520
521 /* Disable PCIX & external chip interrupts. */
522 if (t1_is_asic(adapter))
523 t1_write_reg_4(adapter, A_PL_ENABLE, 0);
524
525 /* PCI-X interrupts */
526 pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE, 0);
527
528 adapter->slow_intr_mask = 0;
529}
530
531/* Clears all interrupts */
532void t1_interrupts_clear(adapter_t* adapter)
533{
534 unsigned int i;
535
536 t1_sge_intr_clear(adapter->sge);
537 t1_tp_intr_clear(adapter->tp);
538 if (adapter->espi)
539 t1_espi_intr_clear(adapter->espi);
540
541 /* Clear MAC/PHY interrupts for each port. */
542 for_each_port(adapter, i) {
543 adapter->port[i].mac->ops->interrupt_clear(adapter->port[i].mac);
544 adapter->port[i].phy->ops->interrupt_clear(adapter->port[i].phy);
545 }
546
547 /* Enable interrupts for external devices. */
548 if (t1_is_asic(adapter)) {
549 u32 pl_intr = t1_read_reg_4(adapter, A_PL_CAUSE);
550
551 t1_write_reg_4(adapter, A_PL_CAUSE,
552 pl_intr | F_PL_INTR_EXT | F_PL_INTR_PCIX);
553 }
554
555 /* PCI-X interrupts */
556 pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, 0xffffffff);
557}
558
559/*
560 * Slow path interrupt handler for ASICs.
561 */
562static int asic_slow_intr(adapter_t *adapter)
563{
564 u32 cause = t1_read_reg_4(adapter, A_PL_CAUSE);
565
566 cause &= adapter->slow_intr_mask;
567 if (!cause)
568 return 0;
569 if (cause & F_PL_INTR_SGE_ERR)
570 t1_sge_intr_error_handler(adapter->sge);
571 if (cause & F_PL_INTR_TP)
572 t1_tp_intr_handler(adapter->tp);
573 if (cause & F_PL_INTR_ESPI)
574 t1_espi_intr_handler(adapter->espi);
575 if (cause & F_PL_INTR_PCIX)
576 t1_pci_intr_handler(adapter);
577 if (cause & F_PL_INTR_EXT)
578 t1_elmer0_ext_intr(adapter);
579
580 /* Clear the interrupts just processed. */
581 t1_write_reg_4(adapter, A_PL_CAUSE, cause);
582 (void)t1_read_reg_4(adapter, A_PL_CAUSE); /* flush writes */
583 return 1;
584}
585
586int t1_slow_intr_handler(adapter_t *adapter)
587{
588 return asic_slow_intr(adapter);
589}
590
591/* Power sequencing is a work-around for Intel's XPAKs. */
592static void power_sequence_xpak(adapter_t* adapter)
593{
594 u32 mod_detect;
595 u32 gpo;
596
597 /* Check for XPAK */
598 t1_tpi_read(adapter, A_ELMER0_GPI_STAT, &mod_detect);
599 if (!(ELMER0_GP_BIT5 & mod_detect)) {
600 /* XPAK is present */
601 t1_tpi_read(adapter, A_ELMER0_GPO, &gpo);
602 gpo |= ELMER0_GP_BIT18;
603 t1_tpi_write(adapter, A_ELMER0_GPO, gpo);
604 }
605}
606
607int __devinit t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
608 struct adapter_params *p)
609{
610 p->chip_version = bi->chip_term;
611 p->is_asic = (p->chip_version != CHBT_TERM_FPGA);
612 if (p->chip_version == CHBT_TERM_T1 ||
613 p->chip_version == CHBT_TERM_T2 ||
614 p->chip_version == CHBT_TERM_FPGA) {
615 u32 val = t1_read_reg_4(adapter, A_TP_PC_CONFIG);
616
617 val = G_TP_PC_REV(val);
618 if (val == 2)
619 p->chip_revision = TERM_T1B;
620 else if (val == 3)
621 p->chip_revision = TERM_T2;
622 else
623 return -1;
624 } else
625 return -1;
626 return 0;
627}
628
629/*
630 * Enable board components other than the Chelsio chip, such as external MAC
631 * and PHY.
632 */
633static int board_init(adapter_t *adapter, const struct board_info *bi)
634{
635 switch (bi->board) {
636 case CHBT_BOARD_8000:
637 case CHBT_BOARD_N110:
638 case CHBT_BOARD_N210:
639 case CHBT_BOARD_CHT210:
640 case CHBT_BOARD_COUGAR:
641 t1_tpi_par(adapter, 0xf);
642 t1_tpi_write(adapter, A_ELMER0_GPO, 0x800);
643 break;
644 case CHBT_BOARD_CHT110:
645 t1_tpi_par(adapter, 0xf);
646 t1_tpi_write(adapter, A_ELMER0_GPO, 0x1800);
647
648 /* TBD XXX Might not need. This fixes a problem
649 * described in the Intel SR XPAK errata.
650 */
651 power_sequence_xpak(adapter);
652 break;
653 }
654 return 0;
655}
656
657/*
658 * Initialize and configure the Terminator HW modules. Note that external
659 * MAC and PHYs are initialized separately.
660 */
661int t1_init_hw_modules(adapter_t *adapter)
662{
663 int err = -EIO;
664 const struct board_info *bi = board_info(adapter);
665
666 if (!adapter->mc4) {
667 u32 val = t1_read_reg_4(adapter, A_MC4_CFG);
668
669 t1_write_reg_4(adapter, A_MC4_CFG, val | F_READY | F_MC4_SLOW);
670 t1_write_reg_4(adapter, A_MC5_CONFIG,
671 F_M_BUS_ENABLE | F_TCAM_RESET);
672 }
673
674 if (adapter->espi && t1_espi_init(adapter->espi, bi->chip_mac,
675 bi->espi_nports))
676 goto out_err;
677
678 if (t1_tp_reset(adapter->tp, &adapter->params.tp, bi->clock_core))
679 goto out_err;
680
681 err = t1_sge_configure(adapter->sge, &adapter->params.sge);
682 if (err)
683 goto out_err;
684
685 err = 0;
686 out_err:
687 return err;
688}
689
690/*
691 * Determine a card's PCI mode.
692 */
693static void __devinit get_pci_mode(adapter_t *adapter, struct pci_params *p)
694{
695 static unsigned short speed_map[] = { 33, 66, 100, 133 };
696 u32 pci_mode;
697
698 pci_read_config_dword(adapter->pdev, A_PCICFG_MODE, &pci_mode);
699 p->speed = speed_map[G_PCI_MODE_CLK(pci_mode)];
700 p->width = (pci_mode & F_PCI_MODE_64BIT) ? 64 : 32;
701 p->is_pcix = (pci_mode & F_PCI_MODE_PCIX) != 0;
702}
703
704/*
705 * Release the structures holding the SW per-Terminator-HW-module state.
706 */
707void t1_free_sw_modules(adapter_t *adapter)
708{
709 unsigned int i;
710
711 for_each_port(adapter, i) {
712 struct cmac *mac = adapter->port[i].mac;
713 struct cphy *phy = adapter->port[i].phy;
714
715 if (mac)
716 mac->ops->destroy(mac);
717 if (phy)
718 phy->ops->destroy(phy);
719 }
720
721 if (adapter->sge)
722 t1_sge_destroy(adapter->sge);
723 if (adapter->tp)
724 t1_tp_destroy(adapter->tp);
725 if (adapter->espi)
726 t1_espi_destroy(adapter->espi);
727}
728
729static void __devinit init_link_config(struct link_config *lc,
730 const struct board_info *bi)
731{
732 lc->supported = bi->caps;
733 lc->requested_speed = lc->speed = SPEED_INVALID;
734 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
735 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
736 if (lc->supported & SUPPORTED_Autoneg) {
737 lc->advertising = lc->supported;
738 lc->autoneg = AUTONEG_ENABLE;
739 lc->requested_fc |= PAUSE_AUTONEG;
740 } else {
741 lc->advertising = 0;
742 lc->autoneg = AUTONEG_DISABLE;
743 }
744}
745
746
747/*
748 * Allocate and initialize the data structures that hold the SW state of
749 * the Terminator HW modules.
750 */
751int __devinit t1_init_sw_modules(adapter_t *adapter,
752 const struct board_info *bi)
753{
754 unsigned int i;
755
756 adapter->params.brd_info = bi;
757 adapter->params.nports = bi->port_number;
758 adapter->params.stats_update_period = bi->gmac->stats_update_period;
759
760 adapter->sge = t1_sge_create(adapter, &adapter->params.sge);
761 if (!adapter->sge) {
762 CH_ERR("%s: SGE initialization failed\n",
763 adapter->name);
764 goto error;
765 }
766
767
768
769 if (bi->espi_nports && !(adapter->espi = t1_espi_create(adapter))) {
770 CH_ERR("%s: ESPI initialization failed\n",
771 adapter->name);
772 goto error;
773 }
774
775 adapter->tp = t1_tp_create(adapter, &adapter->params.tp);
776 if (!adapter->tp) {
777 CH_ERR("%s: TP initialization failed\n",
778 adapter->name);
779 goto error;
780 }
781
782 board_init(adapter, bi);
783 bi->mdio_ops->init(adapter, bi);
784 if (bi->gphy->reset)
785 bi->gphy->reset(adapter);
786 if (bi->gmac->reset)
787 bi->gmac->reset(adapter);
788
789 for_each_port(adapter, i) {
790 u8 hw_addr[6];
791 struct cmac *mac;
792 int phy_addr = bi->mdio_phybaseaddr + i;
793
794 adapter->port[i].phy = bi->gphy->create(adapter, phy_addr,
795 bi->mdio_ops);
796 if (!adapter->port[i].phy) {
797 CH_ERR("%s: PHY %d initialization failed\n",
798 adapter->name, i);
799 goto error;
800 }
801
802 adapter->port[i].mac = mac = bi->gmac->create(adapter, i);
803 if (!mac) {
804 CH_ERR("%s: MAC %d initialization failed\n",
805 adapter->name, i);
806 goto error;
807 }
808
809 /*
810 * Get the port's MAC addresses either from the EEPROM if one
811 * exists or the one hardcoded in the MAC.
812 */
813 if (!t1_is_asic(adapter) || bi->chip_mac == CHBT_MAC_DUMMY)
814 mac->ops->macaddress_get(mac, hw_addr);
815 else if (vpd_macaddress_get(adapter, i, hw_addr)) {
816 CH_ERR("%s: could not read MAC address from VPD ROM\n",
817 port_name(adapter, i));
818 goto error;
819 }
820 t1_set_hw_addr(adapter, i, hw_addr);
821 init_link_config(&adapter->port[i].link_config, bi);
822 }
823
824 get_pci_mode(adapter, &adapter->params.pci);
825 t1_interrupts_clear(adapter);
826 return 0;
827
828 error:
829 t1_free_sw_modules(adapter);
830 return -1;
831}
diff --git a/drivers/net/chelsio/suni1x10gexp_regs.h b/drivers/net/chelsio/suni1x10gexp_regs.h
new file mode 100644
index 000000000000..98352bdda89b
--- /dev/null
+++ b/drivers/net/chelsio/suni1x10gexp_regs.h
@@ -0,0 +1,221 @@
1/*****************************************************************************
2 * *
3 * File: suni1x10gexp_regs.h *
4 * $Revision: 1.4 $ *
5 * $Date: 2005/03/23 07:15:59 $ *
6 * Description: *
7 * PMC/SIERRA (pm3393) MAC-PHY functionality. *
8 * part of the Chelsio 10Gb Ethernet Driver. *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
13 * *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, write to the Free Software Foundation, Inc., *
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
17 * *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
21 * *
22 * http://www.chelsio.com *
23 * *
24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
25 * All rights reserved. *
26 * *
27 * Maintainers: maintainers@chelsio.com *
28 * *
29 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
30 * Tina Yang <tainay@chelsio.com> *
31 * Felix Marti <felix@chelsio.com> *
32 * Scott Bardone <sbardone@chelsio.com> *
33 * Kurt Ottaway <kottaway@chelsio.com> *
34 * Frank DiMambro <frank@chelsio.com> *
35 * *
36 * History: *
37 * *
38 ****************************************************************************/
39
40#ifndef _SUNI1x10GEXP_REGS_H
41#define _SUNI1x10GEXP_REGS_H
42
43/******************************************************************************/
44/** S/UNI-1x10GE-XP REGISTER ADDRESS MAP **/
45/******************************************************************************/
46/* Refer to the Register Bit Masks bellow for the naming of each register and */
47/* to the S/UNI-1x10GE-XP Data Sheet for the signification of each bit */
48/******************************************************************************/
49
50#define SUNI1x10GEXP_REG_DEVICE_STATUS 0x0004
51#define SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS 0x000D
52#define SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE 0x000E
53#define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE 0x0102
54#define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_STATUS 0x0104
55#define SUNI1x10GEXP_REG_RXXG_CONFIG_1 0x2040
56#define SUNI1x10GEXP_REG_RXXG_CONFIG_3 0x2042
57#define SUNI1x10GEXP_REG_RXXG_INTERRUPT 0x2043
58#define SUNI1x10GEXP_REG_RXXG_MAX_FRAME_LENGTH 0x2045
59#define SUNI1x10GEXP_REG_RXXG_SA_15_0 0x2046
60#define SUNI1x10GEXP_REG_RXXG_SA_31_16 0x2047
61#define SUNI1x10GEXP_REG_RXXG_SA_47_32 0x2048
62#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_LOW 0x204D
63#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_MID 0x204E
64#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_HIGH 0x204F
65#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW 0x206A
66#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW 0x206B
67#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH 0x206C
68#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH 0x206D
69#define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0 0x206E
70#define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2 0x2070
71#define SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE 0x2088
72#define SUNI1x10GEXP_REG_XRF_INTERRUPT_STATUS 0x2089
73#define SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE 0x208B
74#define SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_STATUS 0x208C
75#define SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE 0x20C7
76#define SUNI1x10GEXP_REG_RXOAM_INTERRUPT_STATUS 0x20C8
77#define SUNI1x10GEXP_REG_MSTAT_CONTROL 0x2100
78#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_0 0x2101
79#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_1 0x2102
80#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_2 0x2103
81#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_3 0x2104
82#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0 0x2105
83#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1 0x2106
84#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2 0x2107
85#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3 0x2108
86#define SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW 0x2110
87#define SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW 0x2114
88#define SUNI1x10GEXP_REG_MSTAT_COUNTER_4_LOW 0x2120
89#define SUNI1x10GEXP_REG_MSTAT_COUNTER_5_LOW 0x2124
90#define SUNI1x10GEXP_REG_MSTAT_COUNTER_6_LOW 0x2128
91#define SUNI1x10GEXP_REG_MSTAT_COUNTER_8_LOW 0x2130
92#define SUNI1x10GEXP_REG_MSTAT_COUNTER_10_LOW 0x2138
93#define SUNI1x10GEXP_REG_MSTAT_COUNTER_11_LOW 0x213C
94#define SUNI1x10GEXP_REG_MSTAT_COUNTER_12_LOW 0x2140
95#define SUNI1x10GEXP_REG_MSTAT_COUNTER_13_LOW 0x2144
96#define SUNI1x10GEXP_REG_MSTAT_COUNTER_15_LOW 0x214C
97#define SUNI1x10GEXP_REG_MSTAT_COUNTER_16_LOW 0x2150
98#define SUNI1x10GEXP_REG_MSTAT_COUNTER_17_LOW 0x2154
99#define SUNI1x10GEXP_REG_MSTAT_COUNTER_18_LOW 0x2158
100#define SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW 0x2194
101#define SUNI1x10GEXP_REG_MSTAT_COUNTER_35_LOW 0x219C
102#define SUNI1x10GEXP_REG_MSTAT_COUNTER_36_LOW 0x21A0
103#define SUNI1x10GEXP_REG_MSTAT_COUNTER_38_LOW 0x21A8
104#define SUNI1x10GEXP_REG_MSTAT_COUNTER_40_LOW 0x21B0
105#define SUNI1x10GEXP_REG_MSTAT_COUNTER_42_LOW 0x21B8
106#define SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW 0x21BC
107#define SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE 0x2209
108#define SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_INTERRUPT 0x220A
109#define SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK 0x2282
110#define SUNI1x10GEXP_REG_PL4ODP_INTERRUPT 0x2283
111#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_STATUS 0x2300
112#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_CHANGE 0x2301
113#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK 0x2302
114#define SUNI1x10GEXP_REG_TXXG_CONFIG_1 0x3040
115#define SUNI1x10GEXP_REG_TXXG_CONFIG_3 0x3042
116#define SUNI1x10GEXP_REG_TXXG_INTERRUPT 0x3043
117#define SUNI1x10GEXP_REG_TXXG_MAX_FRAME_SIZE 0x3045
118#define SUNI1x10GEXP_REG_TXXG_SA_15_0 0x3047
119#define SUNI1x10GEXP_REG_TXXG_SA_31_16 0x3048
120#define SUNI1x10GEXP_REG_TXXG_SA_47_32 0x3049
121#define SUNI1x10GEXP_REG_XTEF_INTERRUPT_STATUS 0x3084
122#define SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE 0x3085
123#define SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE 0x30C6
124#define SUNI1x10GEXP_REG_TXOAM_INTERRUPT_STATUS 0x30C7
125#define SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE 0x320C
126#define SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_INDICATION 0x320D
127#define SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK 0x3282
128#define SUNI1x10GEXP_REG_PL4IDU_INTERRUPT 0x3283
129
130/******************************************************************************/
131/* -- End register offset definitions -- */
132/******************************************************************************/
133
134/******************************************************************************/
135/** SUNI-1x10GE-XP REGISTER BIT MASKS **/
136/******************************************************************************/
137
138/*----------------------------------------------------------------------------
139 * Register 0x0004: S/UNI-1x10GE-XP Device Status
140 * Bit 9 TOP_SXRA_EXPIRED
141 * Bit 8 TOP_MDIO_BUSY
142 * Bit 7 TOP_DTRB
143 * Bit 6 TOP_EXPIRED
144 * Bit 5 TOP_PAUSED
145 * Bit 4 TOP_PL4_ID_DOOL
146 * Bit 3 TOP_PL4_IS_DOOL
147 * Bit 2 TOP_PL4_ID_ROOL
148 * Bit 1 TOP_PL4_IS_ROOL
149 * Bit 0 TOP_PL4_OUT_ROOL
150 *----------------------------------------------------------------------------*/
151#define SUNI1x10GEXP_BITMSK_TOP_SXRA_EXPIRED 0x0200
152#define SUNI1x10GEXP_BITMSK_TOP_EXPIRED 0x0040
153#define SUNI1x10GEXP_BITMSK_TOP_PL4_ID_DOOL 0x0010
154#define SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL 0x0008
155#define SUNI1x10GEXP_BITMSK_TOP_PL4_ID_ROOL 0x0004
156#define SUNI1x10GEXP_BITMSK_TOP_PL4_IS_ROOL 0x0002
157#define SUNI1x10GEXP_BITMSK_TOP_PL4_OUT_ROOL 0x0001
158
159/*----------------------------------------------------------------------------
160 * Register 0x000E:PM3393 Global interrupt enable
161 * Bit 15 TOP_INTE
162 *----------------------------------------------------------------------------*/
163#define SUNI1x10GEXP_BITMSK_TOP_INTE 0x8000
164
165/*----------------------------------------------------------------------------
166 * Register 0x2040: RXXG Configuration 1
167 * Bit 15 RXXG_RXEN
168 * Bit 14 RXXG_ROCF
169 * Bit 13 RXXG_PAD_STRIP
170 * Bit 10 RXXG_PUREP
171 * Bit 9 RXXG_LONGP
172 * Bit 8 RXXG_PARF
173 * Bit 7 RXXG_FLCHK
174 * Bit 5 RXXG_PASS_CTRL
175 * Bit 3 RXXG_CRC_STRIP
176 * Bit 2-0 RXXG_MIFG
177 *----------------------------------------------------------------------------*/
178#define SUNI1x10GEXP_BITMSK_RXXG_RXEN 0x8000
179#define SUNI1x10GEXP_BITMSK_RXXG_PUREP 0x0400
180#define SUNI1x10GEXP_BITMSK_RXXG_FLCHK 0x0080
181#define SUNI1x10GEXP_BITMSK_RXXG_CRC_STRIP 0x0008
182
183/*----------------------------------------------------------------------------
184 * Register 0x2070: RXXG Address Filter Control 2
185 * Bit 1 RXXG_PMODE
186 * Bit 0 RXXG_MHASH_EN
187 *----------------------------------------------------------------------------*/
188#define SUNI1x10GEXP_BITMSK_RXXG_PMODE 0x0002
189#define SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN 0x0001
190
191/*----------------------------------------------------------------------------
192 * Register 0x2100: MSTAT Control
193 * Bit 2 MSTAT_WRITE
194 * Bit 1 MSTAT_CLEAR
195 * Bit 0 MSTAT_SNAP
196 *----------------------------------------------------------------------------*/
197#define SUNI1x10GEXP_BITMSK_MSTAT_CLEAR 0x0002
198#define SUNI1x10GEXP_BITMSK_MSTAT_SNAP 0x0001
199
200/*----------------------------------------------------------------------------
201 * Register 0x3040: TXXG Configuration Register 1
202 * Bit 15 TXXG_TXEN0
203 * Bit 13 TXXG_HOSTPAUSE
204 * Bit 12-7 TXXG_IPGT
205 * Bit 5 TXXG_32BIT_ALIGN
206 * Bit 4 TXXG_CRCEN
207 * Bit 3 TXXG_FCTX
208 * Bit 2 TXXG_FCRX
209 * Bit 1 TXXG_PADEN
210 * Bit 0 TXXG_SPRE
211 *----------------------------------------------------------------------------*/
212#define SUNI1x10GEXP_BITMSK_TXXG_TXEN0 0x8000
213#define SUNI1x10GEXP_BITOFF_TXXG_IPGT 7
214#define SUNI1x10GEXP_BITMSK_TXXG_32BIT_ALIGN 0x0020
215#define SUNI1x10GEXP_BITMSK_TXXG_CRCEN 0x0010
216#define SUNI1x10GEXP_BITMSK_TXXG_FCTX 0x0008
217#define SUNI1x10GEXP_BITMSK_TXXG_FCRX 0x0004
218#define SUNI1x10GEXP_BITMSK_TXXG_PADEN 0x0002
219
220#endif /* _SUNI1x10GEXP_REGS_H */
221
diff --git a/drivers/net/chelsio/tp.c b/drivers/net/chelsio/tp.c
new file mode 100644
index 000000000000..9ad5c539fd28
--- /dev/null
+++ b/drivers/net/chelsio/tp.c
@@ -0,0 +1,188 @@
1/*****************************************************************************
2 * *
3 * File: tp.c *
4 * $Revision: 1.6 $ *
5 * $Date: 2005/03/23 07:15:59 $ *
6 * Description: *
7 * Core ASIC Management. *
8 * part of the Chelsio 10Gb Ethernet Driver. *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
13 * *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, write to the Free Software Foundation, Inc., *
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
17 * *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
21 * *
22 * http://www.chelsio.com *
23 * *
24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
25 * All rights reserved. *
26 * *
27 * Maintainers: maintainers@chelsio.com *
28 * *
29 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
30 * Tina Yang <tainay@chelsio.com> *
31 * Felix Marti <felix@chelsio.com> *
32 * Scott Bardone <sbardone@chelsio.com> *
33 * Kurt Ottaway <kottaway@chelsio.com> *
34 * Frank DiMambro <frank@chelsio.com> *
35 * *
36 * History: *
37 * *
38 ****************************************************************************/
39
40#include "common.h"
41#include "regs.h"
42#include "tp.h"
43
44struct petp {
45 adapter_t *adapter;
46};
47
48/* Pause deadlock avoidance parameters */
49#define DROP_MSEC 16
50#define DROP_PKTS_CNT 1
51
52
53static void tp_init(adapter_t *ap, const struct tp_params *p,
54 unsigned int tp_clk)
55{
56 if (t1_is_asic(ap)) {
57 u32 val;
58
59 val = F_TP_IN_CSPI_CPL | F_TP_IN_CSPI_CHECK_IP_CSUM |
60 F_TP_IN_CSPI_CHECK_TCP_CSUM | F_TP_IN_ESPI_ETHERNET;
61 if (!p->pm_size)
62 val |= F_OFFLOAD_DISABLE;
63 else
64 val |= F_TP_IN_ESPI_CHECK_IP_CSUM |
65 F_TP_IN_ESPI_CHECK_TCP_CSUM;
66 t1_write_reg_4(ap, A_TP_IN_CONFIG, val);
67 t1_write_reg_4(ap, A_TP_OUT_CONFIG, F_TP_OUT_CSPI_CPL |
68 F_TP_OUT_ESPI_ETHERNET |
69 F_TP_OUT_ESPI_GENERATE_IP_CSUM |
70 F_TP_OUT_ESPI_GENERATE_TCP_CSUM);
71 t1_write_reg_4(ap, A_TP_GLOBAL_CONFIG, V_IP_TTL(64) |
72 F_PATH_MTU /* IP DF bit */ |
73 V_5TUPLE_LOOKUP(p->use_5tuple_mode) |
74 V_SYN_COOKIE_PARAMETER(29));
75
76 /*
77 * Enable pause frame deadlock prevention.
78 */
79 if (is_T2(ap)) {
80 u32 drop_ticks = DROP_MSEC * (tp_clk / 1000);
81
82 t1_write_reg_4(ap, A_TP_TX_DROP_CONFIG,
83 F_ENABLE_TX_DROP | F_ENABLE_TX_ERROR |
84 V_DROP_TICKS_CNT(drop_ticks) |
85 V_NUM_PKTS_DROPPED(DROP_PKTS_CNT));
86 }
87
88 }
89}
90
91void t1_tp_destroy(struct petp *tp)
92{
93 kfree(tp);
94}
95
96struct petp * __devinit t1_tp_create(adapter_t *adapter, struct tp_params *p)
97{
98 struct petp *tp = kmalloc(sizeof(*tp), GFP_KERNEL);
99 if (!tp)
100 return NULL;
101 memset(tp, 0, sizeof(*tp));
102 tp->adapter = adapter;
103
104 return tp;
105}
106
107void t1_tp_intr_enable(struct petp *tp)
108{
109 u32 tp_intr = t1_read_reg_4(tp->adapter, A_PL_ENABLE);
110
111 {
112 /* We don't use any TP interrupts */
113 t1_write_reg_4(tp->adapter, A_TP_INT_ENABLE, 0);
114 t1_write_reg_4(tp->adapter, A_PL_ENABLE,
115 tp_intr | F_PL_INTR_TP);
116 }
117}
118
119void t1_tp_intr_disable(struct petp *tp)
120{
121 u32 tp_intr = t1_read_reg_4(tp->adapter, A_PL_ENABLE);
122
123 {
124 t1_write_reg_4(tp->adapter, A_TP_INT_ENABLE, 0);
125 t1_write_reg_4(tp->adapter, A_PL_ENABLE,
126 tp_intr & ~F_PL_INTR_TP);
127 }
128}
129
130void t1_tp_intr_clear(struct petp *tp)
131{
132 t1_write_reg_4(tp->adapter, A_TP_INT_CAUSE, 0xffffffff);
133 t1_write_reg_4(tp->adapter, A_PL_CAUSE, F_PL_INTR_TP);
134}
135
136int t1_tp_intr_handler(struct petp *tp)
137{
138 u32 cause;
139
140
141 cause = t1_read_reg_4(tp->adapter, A_TP_INT_CAUSE);
142 t1_write_reg_4(tp->adapter, A_TP_INT_CAUSE, cause);
143 return 0;
144}
145
146static void set_csum_offload(struct petp *tp, u32 csum_bit, int enable)
147{
148 u32 val = t1_read_reg_4(tp->adapter, A_TP_GLOBAL_CONFIG);
149
150 if (enable)
151 val |= csum_bit;
152 else
153 val &= ~csum_bit;
154 t1_write_reg_4(tp->adapter, A_TP_GLOBAL_CONFIG, val);
155}
156
157void t1_tp_set_ip_checksum_offload(struct petp *tp, int enable)
158{
159 set_csum_offload(tp, F_IP_CSUM, enable);
160}
161
162void t1_tp_set_udp_checksum_offload(struct petp *tp, int enable)
163{
164 set_csum_offload(tp, F_UDP_CSUM, enable);
165}
166
167void t1_tp_set_tcp_checksum_offload(struct petp *tp, int enable)
168{
169 set_csum_offload(tp, F_TCP_CSUM, enable);
170}
171
172/*
173 * Initialize TP state. tp_params contains initial settings for some TP
174 * parameters, particularly the one-time PM and CM settings.
175 */
176int t1_tp_reset(struct petp *tp, struct tp_params *p, unsigned int tp_clk)
177{
178 int busy = 0;
179 adapter_t *adapter = tp->adapter;
180
181 tp_init(adapter, p, tp_clk);
182 if (!busy)
183 t1_write_reg_4(adapter, A_TP_RESET, F_TP_RESET);
184 else
185 CH_ERR("%s: TP initialization timed out\n",
186 adapter->name);
187 return busy;
188}
diff --git a/drivers/net/chelsio/tp.h b/drivers/net/chelsio/tp.h
new file mode 100644
index 000000000000..2ebc5c0d62e7
--- /dev/null
+++ b/drivers/net/chelsio/tp.h
@@ -0,0 +1,110 @@
1/*****************************************************************************
2 * *
3 * File: tp.h *
4 * $Revision: 1.3 $ *
5 * $Date: 2005/03/23 07:15:59 $ *
6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#ifndef CHELSIO_TP_H
40#define CHELSIO_TP_H
41
42#include "common.h"
43
44#define TP_MAX_RX_COALESCING_SIZE 16224U
45
46struct tp_mib_statistics {
47
48 /* IP */
49 u32 ipInReceive_hi;
50 u32 ipInReceive_lo;
51 u32 ipInHdrErrors_hi;
52 u32 ipInHdrErrors_lo;
53 u32 ipInAddrErrors_hi;
54 u32 ipInAddrErrors_lo;
55 u32 ipInUnknownProtos_hi;
56 u32 ipInUnknownProtos_lo;
57 u32 ipInDiscards_hi;
58 u32 ipInDiscards_lo;
59 u32 ipInDelivers_hi;
60 u32 ipInDelivers_lo;
61 u32 ipOutRequests_hi;
62 u32 ipOutRequests_lo;
63 u32 ipOutDiscards_hi;
64 u32 ipOutDiscards_lo;
65 u32 ipOutNoRoutes_hi;
66 u32 ipOutNoRoutes_lo;
67 u32 ipReasmTimeout;
68 u32 ipReasmReqds;
69 u32 ipReasmOKs;
70 u32 ipReasmFails;
71
72 u32 reserved[8];
73
74 /* TCP */
75 u32 tcpActiveOpens;
76 u32 tcpPassiveOpens;
77 u32 tcpAttemptFails;
78 u32 tcpEstabResets;
79 u32 tcpOutRsts;
80 u32 tcpCurrEstab;
81 u32 tcpInSegs_hi;
82 u32 tcpInSegs_lo;
83 u32 tcpOutSegs_hi;
84 u32 tcpOutSegs_lo;
85 u32 tcpRetransSeg_hi;
86 u32 tcpRetransSeg_lo;
87 u32 tcpInErrs_hi;
88 u32 tcpInErrs_lo;
89 u32 tcpRtoMin;
90 u32 tcpRtoMax;
91};
92
93struct petp;
94struct tp_params;
95
96struct petp *t1_tp_create(adapter_t *adapter, struct tp_params *p);
97void t1_tp_destroy(struct petp *tp);
98
99void t1_tp_intr_disable(struct petp *tp);
100void t1_tp_intr_enable(struct petp *tp);
101void t1_tp_intr_clear(struct petp *tp);
102int t1_tp_intr_handler(struct petp *tp);
103
104void t1_tp_get_mib_statistics(adapter_t *adap, struct tp_mib_statistics *tps);
105void t1_tp_set_udp_checksum_offload(struct petp *tp, int enable);
106void t1_tp_set_tcp_checksum_offload(struct petp *tp, int enable);
107void t1_tp_set_ip_checksum_offload(struct petp *tp, int enable);
108int t1_tp_set_coalescing_size(struct petp *tp, unsigned int size);
109int t1_tp_reset(struct petp *tp, struct tp_params *p, unsigned int tp_clk);
110#endif