aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/chelsio
diff options
context:
space:
mode:
authorScott Bardone <sbardone@chelsio.com>2005-06-23 01:40:19 -0400
committerJeff Garzik <jgarzik@pobox.com>2005-06-23 01:40:19 -0400
commit559fb51ba7e66fe298b8355fabde1275b7def35f (patch)
treee1de3eb86ea5e6ac8c5f27dc32140a0c2aacc51e /drivers/net/chelsio
parenta5324343955997d1439f26518ddac567cd5d134b (diff)
Update Chelsio gige net driver.
- Use extern prefix for functions required. - Removed a lot of wrappers, including t1_read/write_reg_4. - Removed various macros, using native kernel calls now. - Enumerated various #defines. - Removed a lot of shared code which is not currently used in "NIC only" mode. - Removed dead code. Documentation/networking/cxgb.txt: - Updated release notes for version 2.1.1 drivers/net/chelsio/ch_ethtool.h - removed file, no longer using ETHTOOL namespace. drivers/net/chelsio/common.h - moved code from osdep.h to common.h - added comment to #endif indicating which symbol it closes. drivers/net/chelsio/cphy.h - removed dead code. - added comment to #endif indicating which symbol it closes. drivers/net/chelsio/cxgb2.c - use DMA_{32,64}BIT_MASK in include/linux/dma-mapping.h. - removed unused code. - use printk message for link info resembling drivers/net/mii.c. - no longer using the MODULE_xxx namespace. - no longer using "pci_" namespace. - no longer using ETHTOOL namespace. drivers/net/chelsio/cxgb2.h - removed file, merged into common.h drivers/net/chelsio/elmer0.h - removed dead code. - added various enums. - added comment to #endif indicating which symbol it closes. drivers/net/chelsio/espi.c - removed various macros, using native kernel calls now. - removed a lot of wrappers, including t1_read/write_reg_4. drivers/net/chelsio/espi.h - added comment to #endif indicating which symbol it closes. drivers/net/chelsio/gmac.h - added comment to #endif indicating which symbol it closes. drivers/net/chelsio/mv88x201x.c - changes to sync with Chelsio TOT. drivers/net/chelsio/osdep.h - removed file, consolidation. osdep was used to translate wrapper functions since our code supports multiple OSs. removed wrappers. drivers/net/chelsio/pm3393.c - removed various macros, using native kernel calls now. - removed a lot of wrappers, including t1_read/write_reg_4. - removed unused code. drivers/net/chelsio/regs.h - added a few register entries for future and current feature support. - added comment to #endif indicating which symbol it closes. drivers/net/chelsio/sge.c - rewrote large portion of scatter-gather engine to stabilize performance. - using u8/u16/u32 kernel types instead of __u8/__u16/__u32 compiler types. drivers/net/chelsio/sge.h - rewrote large portion of scatter-gather engine to stabilize performance. - added comment to #endif indicating which symbol it closes. drivers/net/chelsio/subr.c - merged tp.c into subr.c - removed various macros, using native kernel calls now. - removed a lot of wrappers, including t1_read/write_reg_4. - removed unused code. drivers/net/chelsio/suni1x10gexp_regs.h - modified copyright and authorship of file. - added comment to #endif indicating which symbol it closes. drivers/net/chelsio/tp.c - removed file, merged into subr.c. drivers/net/chelsio/tp.h - removed file. include/linux/pci_ids.h - patched to include PCI_VENDOR_ID_CHELSIO 0x1425, removed define from our code.
Diffstat (limited to 'drivers/net/chelsio')
-rw-r--r--drivers/net/chelsio/Makefile3
-rw-r--r--drivers/net/chelsio/ch_ethtool.h102
-rw-r--r--drivers/net/chelsio/common.h259
-rw-r--r--drivers/net/chelsio/cphy.h14
-rw-r--r--drivers/net/chelsio/cpl5_cmd.h118
-rw-r--r--drivers/net/chelsio/cxgb2.c537
-rw-r--r--drivers/net/chelsio/cxgb2.h122
-rw-r--r--drivers/net/chelsio/elmer0.h16
-rw-r--r--drivers/net/chelsio/espi.c168
-rw-r--r--drivers/net/chelsio/espi.h11
-rw-r--r--drivers/net/chelsio/gmac.h11
-rw-r--r--drivers/net/chelsio/mv88x201x.c36
-rw-r--r--drivers/net/chelsio/osdep.h169
-rw-r--r--drivers/net/chelsio/pm3393.c45
-rw-r--r--drivers/net/chelsio/regs.h21
-rw-r--r--drivers/net/chelsio/sge.c1859
-rw-r--r--drivers/net/chelsio/sge.h48
-rw-r--r--drivers/net/chelsio/subr.c235
-rw-r--r--drivers/net/chelsio/suni1x10gexp_regs.h20
-rw-r--r--drivers/net/chelsio/tp.c188
-rw-r--r--drivers/net/chelsio/tp.h110
21 files changed, 1830 insertions, 2262 deletions
diff --git a/drivers/net/chelsio/Makefile b/drivers/net/chelsio/Makefile
index ff8c11b3a4e1..91e927827c43 100644
--- a/drivers/net/chelsio/Makefile
+++ b/drivers/net/chelsio/Makefile
@@ -7,6 +7,5 @@ obj-$(CONFIG_CHELSIO_T1) += cxgb.o
7EXTRA_CFLAGS += -I$(TOPDIR)/drivers/net/chelsio $(DEBUG_FLAGS) 7EXTRA_CFLAGS += -I$(TOPDIR)/drivers/net/chelsio $(DEBUG_FLAGS)
8 8
9 9
10cxgb-objs := cxgb2.o espi.o tp.o pm3393.o sge.o subr.o mv88x201x.o 10cxgb-objs := cxgb2.o espi.o pm3393.o sge.o subr.o mv88x201x.o
11
12 11
diff --git a/drivers/net/chelsio/ch_ethtool.h b/drivers/net/chelsio/ch_ethtool.h
deleted file mode 100644
index c523d24836b5..000000000000
--- a/drivers/net/chelsio/ch_ethtool.h
+++ /dev/null
@@ -1,102 +0,0 @@
1/*****************************************************************************
2 * *
3 * File: ch_ethtool.h *
4 * $Revision: 1.5 $ *
5 * $Date: 2005/03/23 07:15:58 $ *
6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#ifndef __CHETHTOOL_LINUX_H__
40#define __CHETHTOOL_LINUX_H__
41
42/* TCB size in 32-bit words */
43#define TCB_WORDS (TCB_SIZE / 4)
44
45enum {
46 ETHTOOL_SETREG,
47 ETHTOOL_GETREG,
48 ETHTOOL_SETTPI,
49 ETHTOOL_GETTPI,
50 ETHTOOL_DEVUP,
51 ETHTOOL_GETMTUTAB,
52 ETHTOOL_SETMTUTAB,
53 ETHTOOL_GETMTU,
54 ETHTOOL_SET_PM,
55 ETHTOOL_GET_PM,
56 ETHTOOL_GET_TCAM,
57 ETHTOOL_SET_TCAM,
58 ETHTOOL_GET_TCB,
59 ETHTOOL_READ_TCAM_WORD,
60};
61
62struct ethtool_reg {
63 uint32_t cmd;
64 uint32_t addr;
65 uint32_t val;
66};
67
68struct ethtool_mtus {
69 uint32_t cmd;
70 uint16_t mtus[NMTUS];
71};
72
73struct ethtool_pm {
74 uint32_t cmd;
75 uint32_t tx_pg_sz;
76 uint32_t tx_num_pg;
77 uint32_t rx_pg_sz;
78 uint32_t rx_num_pg;
79 uint32_t pm_total;
80};
81
82struct ethtool_tcam {
83 uint32_t cmd;
84 uint32_t tcam_size;
85 uint32_t nservers;
86 uint32_t nroutes;
87};
88
89struct ethtool_tcb {
90 uint32_t cmd;
91 uint32_t tcb_index;
92 uint32_t tcb_data[TCB_WORDS];
93};
94
95struct ethtool_tcam_word {
96 uint32_t cmd;
97 uint32_t addr;
98 uint32_t buf[3];
99};
100
101#define SIOCCHETHTOOL SIOCDEVPRIVATE
102#endif
diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h
index 017684ff48dc..f09348802b46 100644
--- a/drivers/net/chelsio/common.h
+++ b/drivers/net/chelsio/common.h
@@ -1,8 +1,8 @@
1/***************************************************************************** 1/*****************************************************************************
2 * * 2 * *
3 * File: common.h * 3 * File: common.h *
4 * $Revision: 1.5 $ * 4 * $Revision: 1.21 $ *
5 * $Date: 2005/03/23 07:41:27 $ * 5 * $Date: 2005/06/22 00:43:25 $ *
6 * Description: * 6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. * 7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * * 8 * *
@@ -36,74 +36,101 @@
36 * * 36 * *
37 ****************************************************************************/ 37 ****************************************************************************/
38 38
39#ifndef CHELSIO_COMMON_H 39#ifndef _CXGB_COMMON_H_
40#define CHELSIO_COMMON_H 40#define _CXGB_COMMON_H_
41
42#include <linux/config.h>
43#include <linux/module.h>
44#include <linux/netdevice.h>
45#include <linux/types.h>
46#include <linux/delay.h>
47#include <linux/pci.h>
48#include <linux/ethtool.h>
49#include <linux/mii.h>
50#include <linux/crc32.h>
51#include <linux/init.h>
52#include <asm/io.h>
53#include <linux/pci_ids.h>
54
55#define DRV_DESCRIPTION "Chelsio 10Gb Ethernet Driver"
56#define DRV_NAME "cxgb"
57#define DRV_VERSION "2.1.1"
58#define PFX DRV_NAME ": "
59
60#define CH_ERR(fmt, ...) printk(KERN_ERR PFX fmt, ## __VA_ARGS__)
61#define CH_WARN(fmt, ...) printk(KERN_WARNING PFX fmt, ## __VA_ARGS__)
62#define CH_ALERT(fmt, ...) printk(KERN_ALERT PFX fmt, ## __VA_ARGS__)
63
64#define CH_DEVICE(devid, ssid, idx) \
65 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
66
67#define SUPPORTED_PAUSE (1 << 13)
68#define SUPPORTED_LOOPBACK (1 << 15)
69
70#define ADVERTISED_PAUSE (1 << 13)
71#define ADVERTISED_ASYM_PAUSE (1 << 14)
72
73typedef struct adapter adapter_t;
74
75void t1_elmer0_ext_intr(adapter_t *adapter);
76void t1_link_changed(adapter_t *adapter, int port_id, int link_status,
77 int speed, int duplex, int fc);
78
79struct t1_rx_mode {
80 struct net_device *dev;
81 u32 idx;
82 struct dev_mc_list *list;
83};
84
85#define t1_rx_mode_promisc(rm) (rm->dev->flags & IFF_PROMISC)
86#define t1_rx_mode_allmulti(rm) (rm->dev->flags & IFF_ALLMULTI)
87#define t1_rx_mode_mc_cnt(rm) (rm->dev->mc_count)
88
89static inline u8 *t1_get_next_mcaddr(struct t1_rx_mode *rm)
90{
91 u8 *addr = 0;
41 92
42#define DIMOF(x) (sizeof(x)/sizeof(x[0])) 93 if (rm->idx++ < rm->dev->mc_count) {
94 addr = rm->list->dmi_addr;
95 rm->list = rm->list->next;
96 }
97 return addr;
98}
99
100#define MAX_NPORTS 4
43 101
44#define NMTUS 8 102#define SPEED_INVALID 0xffff
45#define MAX_NPORTS 4 103#define DUPLEX_INVALID 0xff
46#define TCB_SIZE 128
47 104
48enum { 105enum {
49 CHBT_BOARD_7500,
50 CHBT_BOARD_8000,
51 CHBT_BOARD_CHT101,
52 CHBT_BOARD_CHT110,
53 CHBT_BOARD_CHT210,
54 CHBT_BOARD_CHT204,
55 CHBT_BOARD_N110, 106 CHBT_BOARD_N110,
56 CHBT_BOARD_N210, 107 CHBT_BOARD_N210
57 CHBT_BOARD_COUGAR,
58 CHBT_BOARD_6800,
59 CHBT_BOARD_SIMUL
60}; 108};
61 109
62enum { 110enum {
63 CHBT_TERM_FPGA,
64 CHBT_TERM_T1, 111 CHBT_TERM_T1,
65 CHBT_TERM_T2, 112 CHBT_TERM_T2
66 CHBT_TERM_T3
67}; 113};
68 114
69enum { 115enum {
70 CHBT_MAC_CHELSIO_A,
71 CHBT_MAC_IXF1010,
72 CHBT_MAC_PM3393, 116 CHBT_MAC_PM3393,
73 CHBT_MAC_VSC7321,
74 CHBT_MAC_DUMMY
75}; 117};
76 118
77enum { 119enum {
78 CHBT_PHY_88E1041,
79 CHBT_PHY_88E1111,
80 CHBT_PHY_88X2010, 120 CHBT_PHY_88X2010,
81 CHBT_PHY_XPAK,
82 CHBT_PHY_MY3126,
83 CHBT_PHY_DUMMY
84}; 121};
85 122
86enum { 123enum {
87 PAUSE_RX = 1, 124 PAUSE_RX = 1 << 0,
88 PAUSE_TX = 2, 125 PAUSE_TX = 1 << 1,
89 PAUSE_AUTONEG = 4 126 PAUSE_AUTONEG = 1 << 2
90}; 127};
91 128
92/* Revisions of T1 chip */ 129/* Revisions of T1 chip */
93#define TERM_T1A 0 130enum {
94#define TERM_T1B 1 131 TERM_T1A = 0,
95#define TERM_T2 3 132 TERM_T1B = 1,
96 133 TERM_T2 = 3
97struct tp_params {
98 unsigned int pm_size;
99 unsigned int cm_size;
100 unsigned int pm_rx_base;
101 unsigned int pm_tx_base;
102 unsigned int pm_rx_pg_size;
103 unsigned int pm_tx_pg_size;
104 unsigned int pm_rx_num_pgs;
105 unsigned int pm_tx_num_pgs;
106 unsigned int use_5tuple_mode;
107}; 134};
108 135
109struct sge_params { 136struct sge_params {
@@ -118,17 +145,7 @@ struct sge_params {
118 unsigned int polling; 145 unsigned int polling;
119}; 146};
120 147
121struct mc5_params { 148struct chelsio_pci_params {
122 unsigned int mode; /* selects MC5 width */
123 unsigned int nservers; /* size of server region */
124 unsigned int nroutes; /* size of routing region */
125};
126
127/* Default MC5 region sizes */
128#define DEFAULT_SERVER_REGION_LEN 256
129#define DEFAULT_RT_REGION_LEN 1024
130
131struct pci_params {
132 unsigned short speed; 149 unsigned short speed;
133 unsigned char width; 150 unsigned char width;
134 unsigned char is_pcix; 151 unsigned char is_pcix;
@@ -136,31 +153,14 @@ struct pci_params {
136 153
137struct adapter_params { 154struct adapter_params {
138 struct sge_params sge; 155 struct sge_params sge;
139 struct mc5_params mc5; 156 struct chelsio_pci_params pci;
140 struct tp_params tp;
141 struct pci_params pci;
142 157
143 const struct board_info *brd_info; 158 const struct board_info *brd_info;
144 159
145 unsigned short mtus[NMTUS]; 160 unsigned int nports; /* # of ethernet ports */
146 unsigned int nports; /* # of ethernet ports */
147 unsigned int stats_update_period; 161 unsigned int stats_update_period;
148 unsigned short chip_revision; 162 unsigned short chip_revision;
149 unsigned char chip_version; 163 unsigned char chip_version;
150 unsigned char is_asic;
151};
152
153struct pci_err_cnt {
154 unsigned int master_parity_err;
155 unsigned int sig_target_abort;
156 unsigned int rcv_target_abort;
157 unsigned int rcv_master_abort;
158 unsigned int sig_sys_err;
159 unsigned int det_parity_err;
160 unsigned int pio_parity_err;
161 unsigned int wf_parity_err;
162 unsigned int rf_parity_err;
163 unsigned int cf_parity_err;
164}; 164};
165 165
166struct link_config { 166struct link_config {
@@ -175,8 +175,60 @@ struct link_config {
175 unsigned char autoneg; /* autonegotiating? */ 175 unsigned char autoneg; /* autonegotiating? */
176}; 176};
177 177
178#define SPEED_INVALID 0xffff 178struct cmac;
179#define DUPLEX_INVALID 0xff 179struct cphy;
180
181struct port_info {
182 struct net_device *dev;
183 struct cmac *mac;
184 struct cphy *phy;
185 struct link_config link_config;
186 struct net_device_stats netstats;
187};
188
189struct sge;
190struct peespi;
191
192struct adapter {
193 u8 *regs;
194 struct pci_dev *pdev;
195 unsigned long registered_device_map;
196 unsigned long open_device_map;
197 unsigned long flags;
198
199 const char *name;
200 int msg_enable;
201 u32 mmio_len;
202
203 struct work_struct ext_intr_handler_task;
204 struct adapter_params params;
205
206 struct vlan_group *vlan_grp;
207
208 /* Terminator modules. */
209 struct sge *sge;
210 struct peespi *espi;
211
212 struct port_info port[MAX_NPORTS];
213 struct work_struct stats_update_task;
214 struct timer_list stats_update_timer;
215
216 struct semaphore mib_mutex;
217 spinlock_t tpi_lock;
218 spinlock_t work_lock;
219 /* guards async operations */
220 spinlock_t async_lock ____cacheline_aligned;
221 u32 slow_intr_mask;
222};
223
224enum { /* adapter flags */
225 FULL_INIT_DONE = 1 << 0,
226 TSO_CAPABLE = 1 << 2,
227 TCP_CSUM_CAPABLE = 1 << 3,
228 UDP_CSUM_CAPABLE = 1 << 4,
229 VLAN_ACCEL_CAPABLE = 1 << 5,
230 RX_CSUM_ENABLED = 1 << 6,
231};
180 232
181struct mdio_ops; 233struct mdio_ops;
182struct gmac; 234struct gmac;
@@ -205,19 +257,8 @@ struct board_info {
205 const char *desc; 257 const char *desc;
206}; 258};
207 259
208#include "osdep.h"
209
210#ifndef PCI_VENDOR_ID_CHELSIO
211#define PCI_VENDOR_ID_CHELSIO 0x1425
212#endif
213
214extern struct pci_device_id t1_pci_tbl[]; 260extern struct pci_device_id t1_pci_tbl[];
215 261
216static inline int t1_is_asic(const adapter_t *adapter)
217{
218 return adapter->params.is_asic;
219}
220
221static inline int adapter_matches_type(const adapter_t *adapter, 262static inline int adapter_matches_type(const adapter_t *adapter,
222 int version, int revision) 263 int version, int revision)
223{ 264{
@@ -245,25 +286,29 @@ static inline unsigned int core_ticks_per_usec(const adapter_t *adap)
245 return board_info(adap)->clock_core / 1000000; 286 return board_info(adap)->clock_core / 1000000;
246} 287}
247 288
248int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value); 289extern int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
249int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *value); 290extern int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *value);
250 291
251void t1_interrupts_enable(adapter_t *adapter); 292extern void t1_interrupts_enable(adapter_t *adapter);
252void t1_interrupts_disable(adapter_t *adapter); 293extern void t1_interrupts_disable(adapter_t *adapter);
253void t1_interrupts_clear(adapter_t *adapter); 294extern void t1_interrupts_clear(adapter_t *adapter);
254int elmer0_ext_intr_handler(adapter_t *adapter); 295extern int elmer0_ext_intr_handler(adapter_t *adapter);
255int t1_slow_intr_handler(adapter_t *adapter); 296extern int t1_slow_intr_handler(adapter_t *adapter);
256 297
257int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc); 298extern int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
258const struct board_info *t1_get_board_info(unsigned int board_id); 299extern const struct board_info *t1_get_board_info(unsigned int board_id);
259const struct board_info *t1_get_board_info_from_ids(unsigned int devid, 300extern const struct board_info *t1_get_board_info_from_ids(unsigned int devid,
260 unsigned short ssid); 301 unsigned short ssid);
261int t1_seeprom_read(adapter_t *adapter, u32 addr, u32 *data); 302extern int t1_seeprom_read(adapter_t *adapter, u32 addr, u32 *data);
262int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi, 303extern int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
263 struct adapter_params *p); 304 struct adapter_params *p);
264int t1_init_hw_modules(adapter_t *adapter); 305extern int t1_init_hw_modules(adapter_t *adapter);
265int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi); 306extern int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi);
266void t1_free_sw_modules(adapter_t *adapter); 307extern void t1_free_sw_modules(adapter_t *adapter);
267void t1_fatal_err(adapter_t *adapter); 308extern void t1_fatal_err(adapter_t *adapter);
268#endif 309
310extern void t1_tp_set_udp_checksum_offload(adapter_t *adapter, int enable);
311extern void t1_tp_set_tcp_checksum_offload(adapter_t *adapter, int enable);
312extern void t1_tp_set_ip_checksum_offload(adapter_t *adapter, int enable);
269 313
314#endif /* _CXGB_COMMON_H_ */
diff --git a/drivers/net/chelsio/cphy.h b/drivers/net/chelsio/cphy.h
index 1bc2248264c0..3412342f7345 100644
--- a/drivers/net/chelsio/cphy.h
+++ b/drivers/net/chelsio/cphy.h
@@ -1,8 +1,8 @@
1/***************************************************************************** 1/*****************************************************************************
2 * * 2 * *
3 * File: cphy.h * 3 * File: cphy.h *
4 * $Revision: 1.4 $ * 4 * $Revision: 1.7 $ *
5 * $Date: 2005/03/23 07:41:27 $ * 5 * $Date: 2005/06/21 18:29:47 $ *
6 * Description: * 6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. * 7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * * 8 * *
@@ -36,8 +36,8 @@
36 * * 36 * *
37 ****************************************************************************/ 37 ****************************************************************************/
38 38
39#ifndef CHELSIO_CPHY_H 39#ifndef _CXGB_CPHY_H_
40#define CHELSIO_CPHY_H 40#define _CXGB_CPHY_H_
41 41
42#include "common.h" 42#include "common.h"
43 43
@@ -142,9 +142,7 @@ struct gphy {
142 int (*reset)(adapter_t *adapter); 142 int (*reset)(adapter_t *adapter);
143}; 143};
144 144
145extern struct gphy t1_my3126_ops;
146extern struct gphy t1_mv88e1xxx_ops;
147extern struct gphy t1_xpak_ops;
148extern struct gphy t1_mv88x201x_ops; 145extern struct gphy t1_mv88x201x_ops;
149extern struct gphy t1_dummy_phy_ops; 146extern struct gphy t1_dummy_phy_ops;
150#endif 147
148#endif /* _CXGB_CPHY_H_ */
diff --git a/drivers/net/chelsio/cpl5_cmd.h b/drivers/net/chelsio/cpl5_cmd.h
index 45e9248979f1..27925e487bcf 100644
--- a/drivers/net/chelsio/cpl5_cmd.h
+++ b/drivers/net/chelsio/cpl5_cmd.h
@@ -1,8 +1,8 @@
1/***************************************************************************** 1/*****************************************************************************
2 * * 2 * *
3 * File: cpl5_cmd.h * 3 * File: cpl5_cmd.h *
4 * $Revision: 1.4 $ * 4 * $Revision: 1.6 $ *
5 * $Date: 2005/03/23 07:15:58 $ * 5 * $Date: 2005/06/21 18:29:47 $ *
6 * Description: * 6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. * 7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * * 8 * *
@@ -36,8 +36,8 @@
36 * * 36 * *
37 ****************************************************************************/ 37 ****************************************************************************/
38 38
39#ifndef _CPL5_CMD_H 39#ifndef _CXGB_CPL5_CMD_H_
40#define _CPL5_CMD_H 40#define _CXGB_CPL5_CMD_H_
41 41
42#include <asm/byteorder.h> 42#include <asm/byteorder.h>
43 43
@@ -59,12 +59,12 @@ enum { /* TX_PKT_LSO ethernet types */
59}; 59};
60 60
61struct cpl_rx_data { 61struct cpl_rx_data {
62 __u32 rsvd0; 62 u32 rsvd0;
63 __u32 len; 63 u32 len;
64 __u32 seq; 64 u32 seq;
65 __u16 urg; 65 u16 urg;
66 __u8 rsvd1; 66 u8 rsvd1;
67 __u8 status; 67 u8 status;
68}; 68};
69 69
70/* 70/*
@@ -73,73 +73,73 @@ struct cpl_rx_data {
73 * used so we break it into 2 16-bit parts to easily meet our alignment needs. 73 * used so we break it into 2 16-bit parts to easily meet our alignment needs.
74 */ 74 */
75struct cpl_tx_pkt { 75struct cpl_tx_pkt {
76 __u8 opcode; 76 u8 opcode;
77#if defined(__LITTLE_ENDIAN_BITFIELD) 77#if defined(__LITTLE_ENDIAN_BITFIELD)
78 __u8 iff:4; 78 u8 iff:4;
79 __u8 ip_csum_dis:1; 79 u8 ip_csum_dis:1;
80 __u8 l4_csum_dis:1; 80 u8 l4_csum_dis:1;
81 __u8 vlan_valid:1; 81 u8 vlan_valid:1;
82 __u8 rsvd:1; 82 u8 rsvd:1;
83#else 83#else
84 __u8 rsvd:1; 84 u8 rsvd:1;
85 __u8 vlan_valid:1; 85 u8 vlan_valid:1;
86 __u8 l4_csum_dis:1; 86 u8 l4_csum_dis:1;
87 __u8 ip_csum_dis:1; 87 u8 ip_csum_dis:1;
88 __u8 iff:4; 88 u8 iff:4;
89#endif 89#endif
90 __u16 vlan; 90 u16 vlan;
91 __u16 len_hi; 91 u16 len_hi;
92 __u16 len_lo; 92 u16 len_lo;
93}; 93};
94 94
95struct cpl_tx_pkt_lso { 95struct cpl_tx_pkt_lso {
96 __u8 opcode; 96 u8 opcode;
97#if defined(__LITTLE_ENDIAN_BITFIELD) 97#if defined(__LITTLE_ENDIAN_BITFIELD)
98 __u8 iff:4; 98 u8 iff:4;
99 __u8 ip_csum_dis:1; 99 u8 ip_csum_dis:1;
100 __u8 l4_csum_dis:1; 100 u8 l4_csum_dis:1;
101 __u8 vlan_valid:1; 101 u8 vlan_valid:1;
102 __u8 rsvd:1; 102 u8 rsvd:1;
103#else 103#else
104 __u8 rsvd:1; 104 u8 rsvd:1;
105 __u8 vlan_valid:1; 105 u8 vlan_valid:1;
106 __u8 l4_csum_dis:1; 106 u8 l4_csum_dis:1;
107 __u8 ip_csum_dis:1; 107 u8 ip_csum_dis:1;
108 __u8 iff:4; 108 u8 iff:4;
109#endif 109#endif
110 __u16 vlan; 110 u16 vlan;
111 __u32 len; 111 u32 len;
112 112
113 __u32 rsvd2; 113 u32 rsvd2;
114 __u8 rsvd3; 114 u8 rsvd3;
115#if defined(__LITTLE_ENDIAN_BITFIELD) 115#if defined(__LITTLE_ENDIAN_BITFIELD)
116 __u8 tcp_hdr_words:4; 116 u8 tcp_hdr_words:4;
117 __u8 ip_hdr_words:4; 117 u8 ip_hdr_words:4;
118#else 118#else
119 __u8 ip_hdr_words:4; 119 u8 ip_hdr_words:4;
120 __u8 tcp_hdr_words:4; 120 u8 tcp_hdr_words:4;
121#endif 121#endif
122 __u16 eth_type_mss; 122 u16 eth_type_mss;
123}; 123};
124 124
125struct cpl_rx_pkt { 125struct cpl_rx_pkt {
126 __u8 opcode; 126 u8 opcode;
127#if defined(__LITTLE_ENDIAN_BITFIELD) 127#if defined(__LITTLE_ENDIAN_BITFIELD)
128 __u8 iff:4; 128 u8 iff:4;
129 __u8 csum_valid:1; 129 u8 csum_valid:1;
130 __u8 bad_pkt:1; 130 u8 bad_pkt:1;
131 __u8 vlan_valid:1; 131 u8 vlan_valid:1;
132 __u8 rsvd:1; 132 u8 rsvd:1;
133#else 133#else
134 __u8 rsvd:1; 134 u8 rsvd:1;
135 __u8 vlan_valid:1; 135 u8 vlan_valid:1;
136 __u8 bad_pkt:1; 136 u8 bad_pkt:1;
137 __u8 csum_valid:1; 137 u8 csum_valid:1;
138 __u8 iff:4; 138 u8 iff:4;
139#endif 139#endif
140 __u16 csum; 140 u16 csum;
141 __u16 vlan; 141 u16 vlan;
142 __u16 len; 142 u16 len;
143}; 143};
144 144
145#endif 145#endif /* _CXGB_CPL5_CMD_H_ */
diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c
index 48c4d5acfcd1..28ae478b386d 100644
--- a/drivers/net/chelsio/cxgb2.c
+++ b/drivers/net/chelsio/cxgb2.c
@@ -1,8 +1,8 @@
1/***************************************************************************** 1/*****************************************************************************
2 * * 2 * *
3 * File: cxgb2.c * 3 * File: cxgb2.c *
4 * $Revision: 1.11 $ * 4 * $Revision: 1.25 $ *
5 * $Date: 2005/03/23 07:41:27 $ * 5 * $Date: 2005/06/22 00:43:25 $ *
6 * Description: * 6 * Description: *
7 * Chelsio 10Gb Ethernet Driver. * 7 * Chelsio 10Gb Ethernet Driver. *
8 * * 8 * *
@@ -37,7 +37,6 @@
37 ****************************************************************************/ 37 ****************************************************************************/
38 38
39#include "common.h" 39#include "common.h"
40
41#include <linux/config.h> 40#include <linux/config.h>
42#include <linux/module.h> 41#include <linux/module.h>
43#include <linux/init.h> 42#include <linux/init.h>
@@ -48,44 +47,56 @@
48#include <linux/mii.h> 47#include <linux/mii.h>
49#include <linux/sockios.h> 48#include <linux/sockios.h>
50#include <linux/proc_fs.h> 49#include <linux/proc_fs.h>
51#include <linux/version.h> 50#include <linux/dma-mapping.h>
52#include <linux/workqueue.h>
53#include <asm/uaccess.h> 51#include <asm/uaccess.h>
54 52
55#include "ch_ethtool.h"
56#include "cpl5_cmd.h" 53#include "cpl5_cmd.h"
57#include "regs.h" 54#include "regs.h"
58#include "gmac.h" 55#include "gmac.h"
59#include "cphy.h" 56#include "cphy.h"
60#include "sge.h" 57#include "sge.h"
61#include "tp.h"
62#include "espi.h" 58#include "espi.h"
63 59
60#ifdef work_struct
61#include <linux/tqueue.h>
62#define INIT_WORK INIT_TQUEUE
63#define schedule_work schedule_task
64#define flush_scheduled_work flush_scheduled_tasks
65
64static inline void schedule_mac_stats_update(struct adapter *ap, int secs) 66static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
65{ 67{
66 schedule_delayed_work(&ap->stats_update_task, secs * HZ); 68 mod_timer(&ap->stats_update_timer, jiffies + secs * HZ);
67} 69}
68 70
69static inline void cancel_mac_stats_update(struct adapter *ap) 71static inline void cancel_mac_stats_update(struct adapter *ap)
70{ 72{
71 cancel_delayed_work(&ap->stats_update_task); 73 del_timer_sync(&ap->stats_update_timer);
74 flush_scheduled_tasks();
72} 75}
73 76
74#if BITS_PER_LONG == 64 && !defined(CONFIG_X86_64) 77/*
75# define FMT64 "l" 78 * Stats update timer for 2.4. It schedules a task to do the actual update as
76#else 79 * we need to access MAC statistics in process context.
77# define FMT64 "ll" 80 */
78#endif 81static void mac_stats_timer(unsigned long data)
82{
83 struct adapter *ap = (struct adapter *)data;
79 84
80# define DRV_TYPE "" 85 schedule_task(&ap->stats_update_task);
81# define MODULE_DESC "Chelsio Network Driver" 86}
87#else
88#include <linux/workqueue.h>
82 89
83static char driver_name[] = DRV_NAME; 90static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
84static char driver_string[] = "Chelsio " DRV_TYPE "Network Driver"; 91{
85static char driver_version[] = "2.1.0"; 92 schedule_delayed_work(&ap->stats_update_task, secs * HZ);
93}
86 94
87#define PCI_DMA_64BIT ~0ULL 95static inline void cancel_mac_stats_update(struct adapter *ap)
88#define PCI_DMA_32BIT 0xffffffffULL 96{
97 cancel_delayed_work(&ap->stats_update_task);
98}
99#endif
89 100
90#define MAX_CMDQ_ENTRIES 16384 101#define MAX_CMDQ_ENTRIES 16384
91#define MAX_CMDQ1_ENTRIES 1024 102#define MAX_CMDQ1_ENTRIES 1024
@@ -107,10 +118,9 @@ static char driver_version[] = "2.1.0";
107 */ 118 */
108#define EEPROM_SIZE 32 119#define EEPROM_SIZE 32
109 120
110MODULE_DESCRIPTION(MODULE_DESC); 121MODULE_DESCRIPTION(DRV_DESCRIPTION);
111MODULE_AUTHOR("Chelsio Communications"); 122MODULE_AUTHOR("Chelsio Communications");
112MODULE_LICENSE("GPL"); 123MODULE_LICENSE("GPL");
113MODULE_DEVICE_TABLE(pci, t1_pci_tbl);
114 124
115static int dflt_msg_enable = DFLT_MSG_ENABLE; 125static int dflt_msg_enable = DFLT_MSG_ENABLE;
116 126
@@ -140,17 +150,17 @@ static void t1_set_rxmode(struct net_device *dev)
140static void link_report(struct port_info *p) 150static void link_report(struct port_info *p)
141{ 151{
142 if (!netif_carrier_ok(p->dev)) 152 if (!netif_carrier_ok(p->dev))
143 printk(KERN_INFO "%s: link is down\n", p->dev->name); 153 printk(KERN_INFO "%s: link down\n", p->dev->name);
144 else { 154 else {
145 const char *s = "10 Mbps"; 155 const char *s = "10Mbps";
146 156
147 switch (p->link_config.speed) { 157 switch (p->link_config.speed) {
148 case SPEED_10000: s = "10 Gbps"; break; 158 case SPEED_10000: s = "10Gbps"; break;
149 case SPEED_1000: s = "1000 Mbps"; break; 159 case SPEED_1000: s = "1000Mbps"; break;
150 case SPEED_100: s = "100 Mbps"; break; 160 case SPEED_100: s = "100Mbps"; break;
151 } 161 }
152 162
153 printk(KERN_INFO "%s: link is up at %s, %s duplex\n", 163 printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
154 p->dev->name, s, 164 p->dev->name, s,
155 p->link_config.duplex == DUPLEX_FULL ? "full" : "half"); 165 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
156 } 166 }
@@ -186,10 +196,8 @@ static void link_start(struct port_info *p)
186static void enable_hw_csum(struct adapter *adapter) 196static void enable_hw_csum(struct adapter *adapter)
187{ 197{
188 if (adapter->flags & TSO_CAPABLE) 198 if (adapter->flags & TSO_CAPABLE)
189 t1_tp_set_ip_checksum_offload(adapter->tp, 1); /* for TSO only */ 199 t1_tp_set_ip_checksum_offload(adapter, 1); /* for TSO only */
190 if (adapter->flags & UDP_CSUM_CAPABLE) 200 t1_tp_set_tcp_checksum_offload(adapter, 1);
191 t1_tp_set_udp_checksum_offload(adapter->tp, 1);
192 t1_tp_set_tcp_checksum_offload(adapter->tp, 1);
193} 201}
194 202
195/* 203/*
@@ -210,15 +218,13 @@ static int cxgb_up(struct adapter *adapter)
210 } 218 }
211 219
212 t1_interrupts_clear(adapter); 220 t1_interrupts_clear(adapter);
213 221 if ((err = request_irq(adapter->pdev->irq,
214 if ((err = request_irq(adapter->pdev->irq, &t1_interrupt, SA_SHIRQ, 222 t1_select_intr_handler(adapter), SA_SHIRQ,
215 adapter->name, adapter))) 223 adapter->name, adapter))) {
216 goto out_err; 224 goto out_err;
217 225 }
218 t1_sge_start(adapter->sge); 226 t1_sge_start(adapter->sge);
219 t1_interrupts_enable(adapter); 227 t1_interrupts_enable(adapter);
220
221 err = 0;
222 out_err: 228 out_err:
223 return err; 229 return err;
224} 230}
@@ -339,47 +345,80 @@ static void set_msglevel(struct net_device *dev, u32 val)
339} 345}
340 346
341static char stats_strings[][ETH_GSTRING_LEN] = { 347static char stats_strings[][ETH_GSTRING_LEN] = {
342 "TxOctetsOK", 348 "TxOctetsOK",
343 "TxOctetsBad", 349 "TxOctetsBad",
344 "TxUnicastFramesOK", 350 "TxUnicastFramesOK",
345 "TxMulticastFramesOK", 351 "TxMulticastFramesOK",
346 "TxBroadcastFramesOK", 352 "TxBroadcastFramesOK",
347 "TxPauseFrames", 353 "TxPauseFrames",
348 "TxFramesWithDeferredXmissions", 354 "TxFramesWithDeferredXmissions",
349 "TxLateCollisions", 355 "TxLateCollisions",
350 "TxTotalCollisions", 356 "TxTotalCollisions",
351 "TxFramesAbortedDueToXSCollisions", 357 "TxFramesAbortedDueToXSCollisions",
352 "TxUnderrun", 358 "TxUnderrun",
353 "TxLengthErrors", 359 "TxLengthErrors",
354 "TxInternalMACXmitError", 360 "TxInternalMACXmitError",
355 "TxFramesWithExcessiveDeferral", 361 "TxFramesWithExcessiveDeferral",
356 "TxFCSErrors", 362 "TxFCSErrors",
357 363
358 "RxOctetsOK", 364 "RxOctetsOK",
359 "RxOctetsBad", 365 "RxOctetsBad",
360 "RxUnicastFramesOK", 366 "RxUnicastFramesOK",
361 "RxMulticastFramesOK", 367 "RxMulticastFramesOK",
362 "RxBroadcastFramesOK", 368 "RxBroadcastFramesOK",
363 "RxPauseFrames", 369 "RxPauseFrames",
364 "RxFCSErrors", 370 "RxFCSErrors",
365 "RxAlignErrors", 371 "RxAlignErrors",
366 "RxSymbolErrors", 372 "RxSymbolErrors",
367 "RxDataErrors", 373 "RxDataErrors",
368 "RxSequenceErrors", 374 "RxSequenceErrors",
369 "RxRuntErrors", 375 "RxRuntErrors",
370 "RxJabberErrors", 376 "RxJabberErrors",
371 "RxInternalMACRcvError", 377 "RxInternalMACRcvError",
372 "RxInRangeLengthErrors", 378 "RxInRangeLengthErrors",
373 "RxOutOfRangeLengthField", 379 "RxOutOfRangeLengthField",
374 "RxFrameTooLongErrors" 380 "RxFrameTooLongErrors",
381
382 "TSO",
383 "VLANextractions",
384 "VLANinsertions",
385 "RxCsumGood",
386 "TxCsumOffload",
387 "RxDrops"
388
389 "respQ_empty",
390 "respQ_overflow",
391 "freelistQ_empty",
392 "pkt_too_big",
393 "pkt_mismatch",
394 "cmdQ_full0",
395 "cmdQ_full1",
396 "tx_ipfrags",
397 "tx_reg_pkts",
398 "tx_lso_pkts",
399 "tx_do_cksum",
400
401 "espi_DIP2ParityErr",
402 "espi_DIP4Err",
403 "espi_RxDrops",
404 "espi_TxDrops",
405 "espi_RxOvfl",
406 "espi_ParityErr"
375}; 407};
408
409#define T2_REGMAP_SIZE (3 * 1024)
410
411static int get_regs_len(struct net_device *dev)
412{
413 return T2_REGMAP_SIZE;
414}
376 415
377static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 416static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
378{ 417{
379 struct adapter *adapter = dev->priv; 418 struct adapter *adapter = dev->priv;
380 419
381 strcpy(info->driver, driver_name); 420 strcpy(info->driver, DRV_NAME);
382 strcpy(info->version, driver_version); 421 strcpy(info->version, DRV_VERSION);
383 strcpy(info->fw_version, "N/A"); 422 strcpy(info->fw_version, "N/A");
384 strcpy(info->bus_info, pci_name(adapter->pdev)); 423 strcpy(info->bus_info, pci_name(adapter->pdev));
385} 424}
@@ -401,42 +440,88 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
401 struct adapter *adapter = dev->priv; 440 struct adapter *adapter = dev->priv;
402 struct cmac *mac = adapter->port[dev->if_port].mac; 441 struct cmac *mac = adapter->port[dev->if_port].mac;
403 const struct cmac_statistics *s; 442 const struct cmac_statistics *s;
443 const struct sge_port_stats *ss;
444 const struct sge_intr_counts *t;
404 445
405 s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL); 446 s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
447 ss = t1_sge_get_port_stats(adapter->sge, dev->if_port);
448 t = t1_sge_get_intr_counts(adapter->sge);
449
450 *data++ = s->TxOctetsOK;
451 *data++ = s->TxOctetsBad;
452 *data++ = s->TxUnicastFramesOK;
453 *data++ = s->TxMulticastFramesOK;
454 *data++ = s->TxBroadcastFramesOK;
455 *data++ = s->TxPauseFrames;
456 *data++ = s->TxFramesWithDeferredXmissions;
457 *data++ = s->TxLateCollisions;
458 *data++ = s->TxTotalCollisions;
459 *data++ = s->TxFramesAbortedDueToXSCollisions;
460 *data++ = s->TxUnderrun;
461 *data++ = s->TxLengthErrors;
462 *data++ = s->TxInternalMACXmitError;
463 *data++ = s->TxFramesWithExcessiveDeferral;
464 *data++ = s->TxFCSErrors;
465
466 *data++ = s->RxOctetsOK;
467 *data++ = s->RxOctetsBad;
468 *data++ = s->RxUnicastFramesOK;
469 *data++ = s->RxMulticastFramesOK;
470 *data++ = s->RxBroadcastFramesOK;
471 *data++ = s->RxPauseFrames;
472 *data++ = s->RxFCSErrors;
473 *data++ = s->RxAlignErrors;
474 *data++ = s->RxSymbolErrors;
475 *data++ = s->RxDataErrors;
476 *data++ = s->RxSequenceErrors;
477 *data++ = s->RxRuntErrors;
478 *data++ = s->RxJabberErrors;
479 *data++ = s->RxInternalMACRcvError;
480 *data++ = s->RxInRangeLengthErrors;
481 *data++ = s->RxOutOfRangeLengthField;
482 *data++ = s->RxFrameTooLongErrors;
483
484 *data++ = ss->tso;
485 *data++ = ss->vlan_xtract;
486 *data++ = ss->vlan_insert;
487 *data++ = ss->rx_cso_good;
488 *data++ = ss->tx_cso;
489 *data++ = ss->rx_drops;
490
491 *data++ = (u64)t->respQ_empty;
492 *data++ = (u64)t->respQ_overflow;
493 *data++ = (u64)t->freelistQ_empty;
494 *data++ = (u64)t->pkt_too_big;
495 *data++ = (u64)t->pkt_mismatch;
496 *data++ = (u64)t->cmdQ_full[0];
497 *data++ = (u64)t->cmdQ_full[1];
498 *data++ = (u64)t->tx_ipfrags;
499 *data++ = (u64)t->tx_reg_pkts;
500 *data++ = (u64)t->tx_lso_pkts;
501 *data++ = (u64)t->tx_do_cksum;
502}
503
504static inline void reg_block_dump(struct adapter *ap, void *buf,
505 unsigned int start, unsigned int end)
506{
507 u32 *p = buf + start;
508
509 for ( ; start <= end; start += sizeof(u32))
510 *p++ = readl(ap->regs + start);
511}
406 512
407 *data++ = s->TxOctetsOK; 513static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
408 *data++ = s->TxOctetsBad; 514 void *buf)
409 *data++ = s->TxUnicastFramesOK; 515{
410 *data++ = s->TxMulticastFramesOK; 516 struct adapter *ap = dev->priv;
411 *data++ = s->TxBroadcastFramesOK; 517
412 *data++ = s->TxPauseFrames; 518 /*
413 *data++ = s->TxFramesWithDeferredXmissions; 519 * Version scheme: bits 0..9: chip version, bits 10..15: chip revision
414 *data++ = s->TxLateCollisions; 520 */
415 *data++ = s->TxTotalCollisions; 521 regs->version = 2;
416 *data++ = s->TxFramesAbortedDueToXSCollisions; 522
417 *data++ = s->TxUnderrun; 523 memset(buf, 0, T2_REGMAP_SIZE);
418 *data++ = s->TxLengthErrors; 524 reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
419 *data++ = s->TxInternalMACXmitError;
420 *data++ = s->TxFramesWithExcessiveDeferral;
421 *data++ = s->TxFCSErrors;
422
423 *data++ = s->RxOctetsOK;
424 *data++ = s->RxOctetsBad;
425 *data++ = s->RxUnicastFramesOK;
426 *data++ = s->RxMulticastFramesOK;
427 *data++ = s->RxBroadcastFramesOK;
428 *data++ = s->RxPauseFrames;
429 *data++ = s->RxFCSErrors;
430 *data++ = s->RxAlignErrors;
431 *data++ = s->RxSymbolErrors;
432 *data++ = s->RxDataErrors;
433 *data++ = s->RxSequenceErrors;
434 *data++ = s->RxRuntErrors;
435 *data++ = s->RxJabberErrors;
436 *data++ = s->RxInternalMACRcvError;
437 *data++ = s->RxInRangeLengthErrors;
438 *data++ = s->RxOutOfRangeLengthField;
439 *data++ = s->RxFrameTooLongErrors;
440} 525}
441 526
442static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 527static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -455,12 +540,12 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
455 cmd->duplex = -1; 540 cmd->duplex = -1;
456 } 541 }
457 542
458 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE; 543 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
459 cmd->phy_address = p->phy->addr; 544 cmd->phy_address = p->phy->addr;
460 cmd->transceiver = XCVR_EXTERNAL; 545 cmd->transceiver = XCVR_EXTERNAL;
461 cmd->autoneg = p->link_config.autoneg; 546 cmd->autoneg = p->link_config.autoneg;
462 cmd->maxtxpkt = 0; 547 cmd->maxtxpkt = 0;
463 cmd->maxrxpkt = 0; 548 cmd->maxrxpkt = 0;
464 return 0; 549 return 0;
465} 550}
466 551
@@ -506,7 +591,7 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
506 struct link_config *lc = &p->link_config; 591 struct link_config *lc = &p->link_config;
507 592
508 if (!(lc->supported & SUPPORTED_Autoneg)) 593 if (!(lc->supported & SUPPORTED_Autoneg))
509 return -EOPNOTSUPP; /* can't change speed/duplex */ 594 return -EOPNOTSUPP; /* can't change speed/duplex */
510 595
511 if (cmd->autoneg == AUTONEG_DISABLE) { 596 if (cmd->autoneg == AUTONEG_DISABLE) {
512 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex); 597 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
@@ -631,7 +716,7 @@ static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
631 return -EINVAL; 716 return -EINVAL;
632 717
633 if (adapter->flags & FULL_INIT_DONE) 718 if (adapter->flags & FULL_INIT_DONE)
634 return -EBUSY; 719 return -EBUSY;
635 720
636 adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending; 721 adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
637 adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending; 722 adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
@@ -645,22 +730,20 @@ static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
645{ 730{
646 struct adapter *adapter = dev->priv; 731 struct adapter *adapter = dev->priv;
647 732
648 unsigned int sge_coalesce_usecs = 0; 733 /*
734 * If RX coalescing is requested we use NAPI, otherwise interrupts.
735 * This choice can be made only when all ports and the TOE are off.
736 */
737 if (adapter->open_device_map == 0)
738 adapter->params.sge.polling = c->use_adaptive_rx_coalesce;
649 739
650 sge_coalesce_usecs = adapter->params.sge.last_rx_coalesce_raw; 740 if (adapter->params.sge.polling) {
651 sge_coalesce_usecs /= board_info(adapter)->clock_core / 1000000; 741 adapter->params.sge.rx_coalesce_usecs = 0;
652 if ( (adapter->params.sge.coalesce_enable && !c->use_adaptive_rx_coalesce) &&
653 (c->rx_coalesce_usecs == sge_coalesce_usecs) ) {
654 adapter->params.sge.rx_coalesce_usecs =
655 adapter->params.sge.default_rx_coalesce_usecs;
656 } else { 742 } else {
657 adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs; 743 adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
658 } 744 }
659 745 adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
660 adapter->params.sge.last_rx_coalesce_raw = adapter->params.sge.rx_coalesce_usecs;
661 adapter->params.sge.last_rx_coalesce_raw *= (board_info(adapter)->clock_core / 1000000);
662 adapter->params.sge.sample_interval_usecs = c->rate_sample_interval; 746 adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
663 adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
664 t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge); 747 t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
665 return 0; 748 return 0;
666} 749}
@@ -669,12 +752,7 @@ static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
669{ 752{
670 struct adapter *adapter = dev->priv; 753 struct adapter *adapter = dev->priv;
671 754
672 if (adapter->params.sge.coalesce_enable) { /* Adaptive algorithm on */ 755 c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
673 c->rx_coalesce_usecs = adapter->params.sge.last_rx_coalesce_raw;
674 c->rx_coalesce_usecs /= board_info(adapter)->clock_core / 1000000;
675 } else {
676 c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
677 }
678 c->rate_sample_interval = adapter->params.sge.sample_interval_usecs; 756 c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
679 c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable; 757 c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
680 return 0; 758 return 0;
@@ -682,9 +760,7 @@ static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
682 760
683static int get_eeprom_len(struct net_device *dev) 761static int get_eeprom_len(struct net_device *dev)
684{ 762{
685 struct adapter *adapter = dev->priv; 763 return EEPROM_SIZE;
686
687 return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
688} 764}
689 765
690#define EEPROM_MAGIC(ap) \ 766#define EEPROM_MAGIC(ap) \
@@ -728,118 +804,55 @@ static struct ethtool_ops t1_ethtool_ops = {
728 .get_strings = get_strings, 804 .get_strings = get_strings,
729 .get_stats_count = get_stats_count, 805 .get_stats_count = get_stats_count,
730 .get_ethtool_stats = get_stats, 806 .get_ethtool_stats = get_stats,
807 .get_regs_len = get_regs_len,
808 .get_regs = get_regs,
731 .get_tso = ethtool_op_get_tso, 809 .get_tso = ethtool_op_get_tso,
732 .set_tso = set_tso, 810 .set_tso = set_tso,
733}; 811};
734 812
735static int ethtool_ioctl(struct net_device *dev, void *useraddr) 813static void cxgb_proc_cleanup(struct adapter *adapter,
814 struct proc_dir_entry *dir)
736{ 815{
737 u32 cmd; 816 const char *name;
738 struct adapter *adapter = dev->priv; 817 name = adapter->name;
739 818 remove_proc_entry(name, dir);
740 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
741 return -EFAULT;
742
743 switch (cmd) {
744 case ETHTOOL_SETREG: {
745 struct ethtool_reg edata;
746
747 if (!capable(CAP_NET_ADMIN))
748 return -EPERM;
749 if (copy_from_user(&edata, useraddr, sizeof(edata)))
750 return -EFAULT;
751 if ((edata.addr & 3) != 0 || edata.addr >= adapter->mmio_len)
752 return -EINVAL;
753 if (edata.addr == A_ESPI_MISC_CONTROL)
754 t1_espi_set_misc_ctrl(adapter, edata.val);
755 else {
756 if (edata.addr == 0x950)
757 t1_sge_set_ptimeout(adapter, edata.val);
758 else
759 writel(edata.val, adapter->regs + edata.addr);
760 }
761 break;
762 }
763 case ETHTOOL_GETREG: {
764 struct ethtool_reg edata;
765
766 if (copy_from_user(&edata, useraddr, sizeof(edata)))
767 return -EFAULT;
768 if ((edata.addr & 3) != 0 || edata.addr >= adapter->mmio_len)
769 return -EINVAL;
770 if (edata.addr >= 0x900 && edata.addr <= 0x93c)
771 edata.val = t1_espi_get_mon(adapter, edata.addr, 1);
772 else {
773 if (edata.addr == 0x950)
774 edata.val = t1_sge_get_ptimeout(adapter);
775 else
776 edata.val = readl(adapter->regs + edata.addr);
777 }
778 if (copy_to_user(useraddr, &edata, sizeof(edata)))
779 return -EFAULT;
780 break;
781 }
782 case ETHTOOL_SETTPI: {
783 struct ethtool_reg edata;
784
785 if (!capable(CAP_NET_ADMIN))
786 return -EPERM;
787 if (copy_from_user(&edata, useraddr, sizeof(edata)))
788 return -EFAULT;
789 if ((edata.addr & 3) != 0)
790 return -EINVAL;
791 t1_tpi_write(adapter, edata.addr, edata.val);
792 break;
793 }
794 case ETHTOOL_GETTPI: {
795 struct ethtool_reg edata;
796
797 if (copy_from_user(&edata, useraddr, sizeof(edata)))
798 return -EFAULT;
799 if ((edata.addr & 3) != 0)
800 return -EINVAL;
801 t1_tpi_read(adapter, edata.addr, &edata.val);
802 if (copy_to_user(useraddr, &edata, sizeof(edata)))
803 return -EFAULT;
804 break;
805 }
806 default:
807 return -EOPNOTSUPP;
808 }
809 return 0;
810} 819}
820//#define chtoe_setup_toedev(adapter) NULL
821#define update_mtu_tab(adapter)
822#define write_smt_entry(adapter, idx)
811 823
812static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd) 824static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
813{ 825{
814 struct adapter *adapter = dev->priv; 826 struct adapter *adapter = dev->priv;
815 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data; 827 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
816 828
817 switch (cmd) { 829 switch (cmd) {
818 case SIOCGMIIPHY: 830 case SIOCGMIIPHY:
819 data->phy_id = adapter->port[dev->if_port].phy->addr; 831 data->phy_id = adapter->port[dev->if_port].phy->addr;
820 /* FALLTHRU */ 832 /* FALLTHRU */
821 case SIOCGMIIREG: { 833 case SIOCGMIIREG: {
822 struct cphy *phy = adapter->port[dev->if_port].phy; 834 struct cphy *phy = adapter->port[dev->if_port].phy;
823 u32 val; 835 u32 val;
824 836
825 if (!phy->mdio_read) return -EOPNOTSUPP; 837 if (!phy->mdio_read)
838 return -EOPNOTSUPP;
826 phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f, 839 phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f,
827 &val); 840 &val);
828 data->val_out = val; 841 data->val_out = val;
829 break; 842 break;
830 } 843 }
831 case SIOCSMIIREG: { 844 case SIOCSMIIREG: {
832 struct cphy *phy = adapter->port[dev->if_port].phy; 845 struct cphy *phy = adapter->port[dev->if_port].phy;
833 846
834 if (!capable(CAP_NET_ADMIN)) return -EPERM; 847 if (!capable(CAP_NET_ADMIN))
835 if (!phy->mdio_write) return -EOPNOTSUPP; 848 return -EPERM;
849 if (!phy->mdio_write)
850 return -EOPNOTSUPP;
836 phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f, 851 phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f,
837 data->val_in); 852 data->val_in);
838 break; 853 break;
839 } 854 }
840 855
841 case SIOCCHETHTOOL:
842 return ethtool_ioctl(dev, (void *)req->ifr_data);
843 default: 856 default:
844 return -EOPNOTSUPP; 857 return -EOPNOTSUPP;
845 } 858 }
@@ -853,9 +866,9 @@ static int t1_change_mtu(struct net_device *dev, int new_mtu)
853 struct cmac *mac = adapter->port[dev->if_port].mac; 866 struct cmac *mac = adapter->port[dev->if_port].mac;
854 867
855 if (!mac->ops->set_mtu) 868 if (!mac->ops->set_mtu)
856 return -EOPNOTSUPP; 869 return -EOPNOTSUPP;
857 if (new_mtu < 68) 870 if (new_mtu < 68)
858 return -EINVAL; 871 return -EINVAL;
859 if ((ret = mac->ops->set_mtu(mac, new_mtu))) 872 if ((ret = mac->ops->set_mtu(mac, new_mtu)))
860 return ret; 873 return ret;
861 dev->mtu = new_mtu; 874 dev->mtu = new_mtu;
@@ -902,9 +915,12 @@ static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
902#ifdef CONFIG_NET_POLL_CONTROLLER 915#ifdef CONFIG_NET_POLL_CONTROLLER
903static void t1_netpoll(struct net_device *dev) 916static void t1_netpoll(struct net_device *dev)
904{ 917{
918 unsigned long flags;
905 struct adapter *adapter = dev->priv; 919 struct adapter *adapter = dev->priv;
906 920
907 t1_interrupt(adapter->pdev->irq, adapter, NULL); 921 local_irq_save(flags);
922 t1_select_intr_handler(adapter)(adapter->pdev->irq, adapter, NULL);
923 local_irq_restore(flags);
908} 924}
909#endif 925#endif
910 926
@@ -938,16 +954,17 @@ static void mac_stats_task(void *data)
938 */ 954 */
939static void ext_intr_task(void *data) 955static void ext_intr_task(void *data)
940{ 956{
941 u32 enable;
942 struct adapter *adapter = data; 957 struct adapter *adapter = data;
943 958
944 elmer0_ext_intr_handler(adapter); 959 elmer0_ext_intr_handler(adapter);
945 960
946 /* Now reenable external interrupts */ 961 /* Now reenable external interrupts */
947 t1_write_reg_4(adapter, A_PL_CAUSE, F_PL_INTR_EXT); 962 spin_lock_irq(&adapter->async_lock);
948 enable = t1_read_reg_4(adapter, A_PL_ENABLE);
949 t1_write_reg_4(adapter, A_PL_ENABLE, enable | F_PL_INTR_EXT);
950 adapter->slow_intr_mask |= F_PL_INTR_EXT; 963 adapter->slow_intr_mask |= F_PL_INTR_EXT;
964 writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
965 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
966 adapter->regs + A_PL_ENABLE);
967 spin_unlock_irq(&adapter->async_lock);
951} 968}
952 969
953/* 970/*
@@ -955,15 +972,14 @@ static void ext_intr_task(void *data)
955 */ 972 */
956void t1_elmer0_ext_intr(struct adapter *adapter) 973void t1_elmer0_ext_intr(struct adapter *adapter)
957{ 974{
958 u32 enable = t1_read_reg_4(adapter, A_PL_ENABLE);
959
960 /* 975 /*
961 * Schedule a task to handle external interrupts as we require 976 * Schedule a task to handle external interrupts as we require
962 * a process context. We disable EXT interrupts in the interim 977 * a process context. We disable EXT interrupts in the interim
963 * and let the task reenable them when it's done. 978 * and let the task reenable them when it's done.
964 */ 979 */
965 adapter->slow_intr_mask &= ~F_PL_INTR_EXT; 980 adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
966 t1_write_reg_4(adapter, A_PL_ENABLE, enable & ~F_PL_INTR_EXT); 981 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
982 adapter->regs + A_PL_ENABLE);
967 schedule_work(&adapter->ext_intr_handler_task); 983 schedule_work(&adapter->ext_intr_handler_task);
968} 984}
969 985
@@ -977,7 +993,6 @@ void t1_fatal_err(struct adapter *adapter)
977 adapter->name); 993 adapter->name);
978} 994}
979 995
980
981static int __devinit init_one(struct pci_dev *pdev, 996static int __devinit init_one(struct pci_dev *pdev,
982 const struct pci_device_id *ent) 997 const struct pci_device_id *ent)
983{ 998{
@@ -990,14 +1005,14 @@ static int __devinit init_one(struct pci_dev *pdev,
990 struct port_info *pi; 1005 struct port_info *pi;
991 1006
992 if (!version_printed) { 1007 if (!version_printed) {
993 printk(KERN_INFO "%s - version %s\n", driver_string, 1008 printk(KERN_INFO "%s - version %s\n", DRV_DESCRIPTION,
994 driver_version); 1009 DRV_VERSION);
995 ++version_printed; 1010 ++version_printed;
996 } 1011 }
997 1012
998 err = pci_enable_device(pdev); 1013 err = pci_enable_device(pdev);
999 if (err) 1014 if (err)
1000 return err; 1015 return err;
1001 1016
1002 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 1017 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1003 CH_ERR("%s: cannot find PCI device memory base address\n", 1018 CH_ERR("%s: cannot find PCI device memory base address\n",
@@ -1006,20 +1021,22 @@ static int __devinit init_one(struct pci_dev *pdev,
1006 goto out_disable_pdev; 1021 goto out_disable_pdev;
1007 } 1022 }
1008 1023
1009 if (!pci_set_dma_mask(pdev, PCI_DMA_64BIT)) { 1024 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1010 pci_using_dac = 1; 1025 pci_using_dac = 1;
1011 if (pci_set_consistent_dma_mask(pdev, PCI_DMA_64BIT)) { 1026
1027 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
1012 CH_ERR("%s: unable to obtain 64-bit DMA for" 1028 CH_ERR("%s: unable to obtain 64-bit DMA for"
1013 "consistent allocations\n", pci_name(pdev)); 1029 "consistent allocations\n", pci_name(pdev));
1014 err = -ENODEV; 1030 err = -ENODEV;
1015 goto out_disable_pdev; 1031 goto out_disable_pdev;
1016 } 1032 }
1017 } else if ((err = pci_set_dma_mask(pdev, PCI_DMA_32BIT)) != 0) { 1033
1034 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
1018 CH_ERR("%s: no usable DMA configuration\n", pci_name(pdev)); 1035 CH_ERR("%s: no usable DMA configuration\n", pci_name(pdev));
1019 goto out_disable_pdev; 1036 goto out_disable_pdev;
1020 } 1037 }
1021 1038
1022 err = pci_request_regions(pdev, driver_name); 1039 err = pci_request_regions(pdev, DRV_NAME);
1023 if (err) { 1040 if (err) {
1024 CH_ERR("%s: cannot obtain PCI resources\n", pci_name(pdev)); 1041 CH_ERR("%s: cannot obtain PCI resources\n", pci_name(pdev));
1025 goto out_disable_pdev; 1042 goto out_disable_pdev;
@@ -1027,7 +1044,7 @@ static int __devinit init_one(struct pci_dev *pdev,
1027 1044
1028 pci_set_master(pdev); 1045 pci_set_master(pdev);
1029 1046
1030 mmio_start = pci_resource_start(pdev, 0); 1047 mmio_start = pci_resource_start(pdev, 0);
1031 mmio_len = pci_resource_len(pdev, 0); 1048 mmio_len = pci_resource_len(pdev, 0);
1032 bi = t1_get_board_info(ent->driver_data); 1049 bi = t1_get_board_info(ent->driver_data);
1033 1050
@@ -1074,9 +1091,14 @@ static int __devinit init_one(struct pci_dev *pdev,
1074 ext_intr_task, adapter); 1091 ext_intr_task, adapter);
1075 INIT_WORK(&adapter->stats_update_task, mac_stats_task, 1092 INIT_WORK(&adapter->stats_update_task, mac_stats_task,
1076 adapter); 1093 adapter);
1094#ifdef work_struct
1095 init_timer(&adapter->stats_update_timer);
1096 adapter->stats_update_timer.function = mac_stats_timer;
1097 adapter->stats_update_timer.data =
1098 (unsigned long)adapter;
1099#endif
1077 1100
1078 pci_set_drvdata(pdev, netdev); 1101 pci_set_drvdata(pdev, netdev);
1079
1080 } 1102 }
1081 1103
1082 pi = &adapter->port[i]; 1104 pi = &adapter->port[i];
@@ -1088,11 +1110,12 @@ static int __devinit init_one(struct pci_dev *pdev,
1088 netdev->mem_end = mmio_start + mmio_len - 1; 1110 netdev->mem_end = mmio_start + mmio_len - 1;
1089 netdev->priv = adapter; 1111 netdev->priv = adapter;
1090 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; 1112 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
1113 netdev->features |= NETIF_F_LLTX;
1114
1091 adapter->flags |= RX_CSUM_ENABLED | TCP_CSUM_CAPABLE; 1115 adapter->flags |= RX_CSUM_ENABLED | TCP_CSUM_CAPABLE;
1092 if (pci_using_dac) 1116 if (pci_using_dac)
1093 netdev->features |= NETIF_F_HIGHDMA; 1117 netdev->features |= NETIF_F_HIGHDMA;
1094 if (vlan_tso_capable(adapter)) { 1118 if (vlan_tso_capable(adapter)) {
1095 adapter->flags |= UDP_CSUM_CAPABLE;
1096#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 1119#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1097 adapter->flags |= VLAN_ACCEL_CAPABLE; 1120 adapter->flags |= VLAN_ACCEL_CAPABLE;
1098 netdev->features |= 1121 netdev->features |=
@@ -1120,7 +1143,7 @@ static int __devinit init_one(struct pci_dev *pdev,
1120#endif 1143#endif
1121 netdev->weight = 64; 1144 netdev->weight = 64;
1122 1145
1123 SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops); 1146 SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
1124 } 1147 }
1125 1148
1126 if (t1_init_sw_modules(adapter, bi) < 0) { 1149 if (t1_init_sw_modules(adapter, bi) < 0) {
@@ -1147,7 +1170,7 @@ static int __devinit init_one(struct pci_dev *pdev,
1147 if (!adapter->registered_device_map) 1170 if (!adapter->registered_device_map)
1148 adapter->name = adapter->port[i].dev->name; 1171 adapter->name = adapter->port[i].dev->name;
1149 1172
1150 __set_bit(i, &adapter->registered_device_map); 1173 __set_bit(i, &adapter->registered_device_map);
1151 } 1174 }
1152 } 1175 }
1153 if (!adapter->registered_device_map) { 1176 if (!adapter->registered_device_map) {
@@ -1166,11 +1189,12 @@ static int __devinit init_one(struct pci_dev *pdev,
1166 t1_free_sw_modules(adapter); 1189 t1_free_sw_modules(adapter);
1167 out_free_dev: 1190 out_free_dev:
1168 if (adapter) { 1191 if (adapter) {
1169 if (adapter->regs) 1192 if (adapter->regs) iounmap(adapter->regs);
1170 iounmap(adapter->regs);
1171 for (i = bi->port_number - 1; i >= 0; --i) 1193 for (i = bi->port_number - 1; i >= 0; --i)
1172 if (adapter->port[i].dev) 1194 if (adapter->port[i].dev) {
1173 free_netdev(adapter->port[i].dev); 1195 cxgb_proc_cleanup(adapter, proc_root_driver);
1196 kfree(adapter->port[i].dev);
1197 }
1174 } 1198 }
1175 pci_release_regions(pdev); 1199 pci_release_regions(pdev);
1176 out_disable_pdev: 1200 out_disable_pdev:
@@ -1200,8 +1224,10 @@ static void __devexit remove_one(struct pci_dev *pdev)
1200 t1_free_sw_modules(adapter); 1224 t1_free_sw_modules(adapter);
1201 iounmap(adapter->regs); 1225 iounmap(adapter->regs);
1202 while (--i >= 0) 1226 while (--i >= 0)
1203 if (adapter->port[i].dev) 1227 if (adapter->port[i].dev) {
1204 free_netdev(adapter->port[i].dev); 1228 cxgb_proc_cleanup(adapter, proc_root_driver);
1229 kfree(adapter->port[i].dev);
1230 }
1205 pci_release_regions(pdev); 1231 pci_release_regions(pdev);
1206 pci_disable_device(pdev); 1232 pci_disable_device(pdev);
1207 pci_set_drvdata(pdev, NULL); 1233 pci_set_drvdata(pdev, NULL);
@@ -1210,7 +1236,7 @@ static void __devexit remove_one(struct pci_dev *pdev)
1210} 1236}
1211 1237
1212static struct pci_driver driver = { 1238static struct pci_driver driver = {
1213 .name = driver_name, 1239 .name = DRV_NAME,
1214 .id_table = t1_pci_tbl, 1240 .id_table = t1_pci_tbl,
1215 .probe = init_one, 1241 .probe = init_one,
1216 .remove = __devexit_p(remove_one), 1242 .remove = __devexit_p(remove_one),
@@ -1228,4 +1254,3 @@ static void __exit t1_cleanup_module(void)
1228 1254
1229module_init(t1_init_module); 1255module_init(t1_init_module);
1230module_exit(t1_cleanup_module); 1256module_exit(t1_cleanup_module);
1231
diff --git a/drivers/net/chelsio/cxgb2.h b/drivers/net/chelsio/cxgb2.h
deleted file mode 100644
index 6ac326afcf01..000000000000
--- a/drivers/net/chelsio/cxgb2.h
+++ /dev/null
@@ -1,122 +0,0 @@
1/*****************************************************************************
2 * *
3 * File: cxgb2.h *
4 * $Revision: 1.8 $ *
5 * $Date: 2005/03/23 07:41:27 $ *
6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#ifndef __CXGB_LINUX_H__
40#define __CXGB_LINUX_H__
41
42#include <linux/netdevice.h>
43#include <linux/skbuff.h>
44#include <linux/version.h>
45#include <asm/semaphore.h>
46#include <asm/bitops.h>
47
48/* This belongs in if_ether.h */
49#define ETH_P_CPL5 0xf
50
51struct cmac;
52struct cphy;
53
54struct port_info {
55 struct net_device *dev;
56 struct cmac *mac;
57 struct cphy *phy;
58 struct link_config link_config;
59 struct net_device_stats netstats;
60};
61
62struct cxgbdev;
63struct t1_sge;
64struct pemc3;
65struct pemc4;
66struct pemc5;
67struct peulp;
68struct petp;
69struct pecspi;
70struct peespi;
71struct work_struct;
72struct vlan_group;
73
74enum { /* adapter flags */
75 FULL_INIT_DONE = 0x1,
76 USING_MSI = 0x2,
77 TSO_CAPABLE = 0x4,
78 TCP_CSUM_CAPABLE = 0x8,
79 UDP_CSUM_CAPABLE = 0x10,
80 VLAN_ACCEL_CAPABLE = 0x20,
81 RX_CSUM_ENABLED = 0x40,
82};
83
84struct adapter {
85 u8 *regs;
86 struct pci_dev *pdev;
87 unsigned long registered_device_map;
88 unsigned long open_device_map;
89 unsigned int flags;
90
91 const char *name;
92 int msg_enable;
93 u32 mmio_len;
94
95 struct work_struct ext_intr_handler_task;
96 struct adapter_params params;
97
98 struct vlan_group *vlan_grp;
99
100 /* Terminator modules. */
101 struct sge *sge;
102 struct pemc3 *mc3;
103 struct pemc4 *mc4;
104 struct pemc5 *mc5;
105 struct petp *tp;
106 struct pecspi *cspi;
107 struct peespi *espi;
108 struct peulp *ulp;
109
110 struct port_info port[MAX_NPORTS];
111 struct work_struct stats_update_task;
112 struct timer_list stats_update_timer;
113
114 struct semaphore mib_mutex;
115 spinlock_t tpi_lock;
116 spinlock_t work_lock;
117
118 spinlock_t async_lock ____cacheline_aligned; /* guards async operations */
119 u32 slow_intr_mask;
120};
121
122#endif
diff --git a/drivers/net/chelsio/elmer0.h b/drivers/net/chelsio/elmer0.h
index 08f148643e7f..5590cb2dac19 100644
--- a/drivers/net/chelsio/elmer0.h
+++ b/drivers/net/chelsio/elmer0.h
@@ -1,8 +1,8 @@
1/***************************************************************************** 1/*****************************************************************************
2 * * 2 * *
3 * File: elmer0.h * 3 * File: elmer0.h *
4 * $Revision: 1.3 $ * 4 * $Revision: 1.6 $ *
5 * $Date: 2005/03/23 07:15:58 $ * 5 * $Date: 2005/06/21 22:49:43 $ *
6 * Description: * 6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. * 7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * * 8 * *
@@ -36,14 +36,8 @@
36 * * 36 * *
37 ****************************************************************************/ 37 ****************************************************************************/
38 38
39#ifndef CHELSIO_ELMER0_H 39#ifndef _CXGB_ELMER0_H_
40#define CHELSIO_ELMER0_H 40#define _CXGB_ELMER0_H_
41
42/* ELMER0 flavors */
43enum {
44 ELMER0_XC2S300E_6FT256_C,
45 ELMER0_XC2S100E_6TQ144_C
46};
47 41
48/* ELMER0 registers */ 42/* ELMER0 registers */
49#define A_ELMER0_VERSION 0x100000 43#define A_ELMER0_VERSION 0x100000
@@ -154,4 +148,4 @@ enum {
154#define MI1_OP_INDIRECT_READ_INC 2 148#define MI1_OP_INDIRECT_READ_INC 2
155#define MI1_OP_INDIRECT_READ 3 149#define MI1_OP_INDIRECT_READ 3
156 150
157#endif 151#endif /* _CXGB_ELMER0_H_ */
diff --git a/drivers/net/chelsio/espi.c b/drivers/net/chelsio/espi.c
index 7ec2dc7bafac..230642571c92 100644
--- a/drivers/net/chelsio/espi.c
+++ b/drivers/net/chelsio/espi.c
@@ -1,8 +1,8 @@
1/***************************************************************************** 1/*****************************************************************************
2 * * 2 * *
3 * File: espi.c * 3 * File: espi.c *
4 * $Revision: 1.9 $ * 4 * $Revision: 1.14 $ *
5 * $Date: 2005/03/23 07:41:27 $ * 5 * $Date: 2005/05/14 00:59:32 $ *
6 * Description: * 6 * Description: *
7 * Ethernet SPI functionality. * 7 * Ethernet SPI functionality. *
8 * part of the Chelsio 10Gb Ethernet Driver. * 8 * part of the Chelsio 10Gb Ethernet Driver. *
@@ -63,15 +63,16 @@ static int tricn_write(adapter_t *adapter, int bundle_addr, int module_addr,
63{ 63{
64 int busy, attempts = TRICN_CMD_ATTEMPTS; 64 int busy, attempts = TRICN_CMD_ATTEMPTS;
65 65
66 t1_write_reg_4(adapter, A_ESPI_CMD_ADDR, V_WRITE_DATA(wr_data) | 66 writel(V_WRITE_DATA(wr_data) |
67 V_REGISTER_OFFSET(reg_offset) | 67 V_REGISTER_OFFSET(reg_offset) |
68 V_CHANNEL_ADDR(ch_addr) | V_MODULE_ADDR(module_addr) | 68 V_CHANNEL_ADDR(ch_addr) | V_MODULE_ADDR(module_addr) |
69 V_BUNDLE_ADDR(bundle_addr) | 69 V_BUNDLE_ADDR(bundle_addr) |
70 V_SPI4_COMMAND(TRICN_CMD_WRITE)); 70 V_SPI4_COMMAND(TRICN_CMD_WRITE),
71 t1_write_reg_4(adapter, A_ESPI_GOSTAT, 0); 71 adapter->regs + A_ESPI_CMD_ADDR);
72 writel(0, adapter->regs + A_ESPI_GOSTAT);
72 73
73 do { 74 do {
74 busy = t1_read_reg_4(adapter, A_ESPI_GOSTAT) & F_ESPI_CMD_BUSY; 75 busy = readl(adapter->regs + A_ESPI_GOSTAT) & F_ESPI_CMD_BUSY;
75 } while (busy && --attempts); 76 } while (busy && --attempts);
76 77
77 if (busy) 78 if (busy)
@@ -99,12 +100,12 @@ static int tricn_init(adapter_t *adapter)
99 /* 1 */ 100 /* 1 */
100 timeout=1000; 101 timeout=1000;
101 do { 102 do {
102 stat = t1_read_reg_4(adapter, A_ESPI_RX_RESET); 103 stat = readl(adapter->regs + A_ESPI_RX_RESET);
103 is_ready = (stat & 0x4); 104 is_ready = (stat & 0x4);
104 timeout--; 105 timeout--;
105 udelay(5); 106 udelay(5);
106 } while (!is_ready || (timeout==0)); 107 } while (!is_ready || (timeout==0));
107 t1_write_reg_4(adapter, A_ESPI_RX_RESET, 0x2); 108 writel(0x2, adapter->regs + A_ESPI_RX_RESET);
108 if (timeout==0) 109 if (timeout==0)
109 { 110 {
110 CH_ERR("ESPI : ERROR : Timeout tricn_init() \n"); 111 CH_ERR("ESPI : ERROR : Timeout tricn_init() \n");
@@ -127,14 +128,14 @@ static int tricn_init(adapter_t *adapter)
127 for (i=8; i<= 8; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1); 128 for (i=8; i<= 8; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1);
128 129
129 /* 3 */ 130 /* 3 */
130 t1_write_reg_4(adapter, A_ESPI_RX_RESET, 0x3); 131 writel(0x3, adapter->regs + A_ESPI_RX_RESET);
131 132
132 return 0; 133 return 0;
133} 134}
134 135
135void t1_espi_intr_enable(struct peespi *espi) 136void t1_espi_intr_enable(struct peespi *espi)
136{ 137{
137 u32 enable, pl_intr = t1_read_reg_4(espi->adapter, A_PL_ENABLE); 138 u32 enable, pl_intr = readl(espi->adapter->regs + A_PL_ENABLE);
138 139
139 /* 140 /*
140 * Cannot enable ESPI interrupts on T1B because HW asserts the 141 * Cannot enable ESPI interrupts on T1B because HW asserts the
@@ -144,28 +145,28 @@ void t1_espi_intr_enable(struct peespi *espi)
144 * cannot be cleared (HW bug). 145 * cannot be cleared (HW bug).
145 */ 146 */
146 enable = t1_is_T1B(espi->adapter) ? 0 : ESPI_INTR_MASK; 147 enable = t1_is_T1B(espi->adapter) ? 0 : ESPI_INTR_MASK;
147 t1_write_reg_4(espi->adapter, A_ESPI_INTR_ENABLE, enable); 148 writel(enable, espi->adapter->regs + A_ESPI_INTR_ENABLE);
148 t1_write_reg_4(espi->adapter, A_PL_ENABLE, pl_intr | F_PL_INTR_ESPI); 149 writel(pl_intr | F_PL_INTR_ESPI, espi->adapter->regs + A_PL_ENABLE);
149} 150}
150 151
151void t1_espi_intr_clear(struct peespi *espi) 152void t1_espi_intr_clear(struct peespi *espi)
152{ 153{
153 t1_write_reg_4(espi->adapter, A_ESPI_INTR_STATUS, 0xffffffff); 154 writel(0xffffffff, espi->adapter->regs + A_ESPI_INTR_STATUS);
154 t1_write_reg_4(espi->adapter, A_PL_CAUSE, F_PL_INTR_ESPI); 155 writel(F_PL_INTR_ESPI, espi->adapter->regs + A_PL_CAUSE);
155} 156}
156 157
157void t1_espi_intr_disable(struct peespi *espi) 158void t1_espi_intr_disable(struct peespi *espi)
158{ 159{
159 u32 pl_intr = t1_read_reg_4(espi->adapter, A_PL_ENABLE); 160 u32 pl_intr = readl(espi->adapter->regs + A_PL_ENABLE);
160 161
161 t1_write_reg_4(espi->adapter, A_ESPI_INTR_ENABLE, 0); 162 writel(0, espi->adapter->regs + A_ESPI_INTR_ENABLE);
162 t1_write_reg_4(espi->adapter, A_PL_ENABLE, pl_intr & ~F_PL_INTR_ESPI); 163 writel(pl_intr & ~F_PL_INTR_ESPI, espi->adapter->regs + A_PL_ENABLE);
163} 164}
164 165
165int t1_espi_intr_handler(struct peespi *espi) 166int t1_espi_intr_handler(struct peespi *espi)
166{ 167{
167 u32 cnt; 168 u32 cnt;
168 u32 status = t1_read_reg_4(espi->adapter, A_ESPI_INTR_STATUS); 169 u32 status = readl(espi->adapter->regs + A_ESPI_INTR_STATUS);
169 170
170 if (status & F_DIP4ERR) 171 if (status & F_DIP4ERR)
171 espi->intr_cnt.DIP4_err++; 172 espi->intr_cnt.DIP4_err++;
@@ -184,7 +185,7 @@ int t1_espi_intr_handler(struct peespi *espi)
184 * Must read the error count to clear the interrupt 185 * Must read the error count to clear the interrupt
185 * that it causes. 186 * that it causes.
186 */ 187 */
187 cnt = t1_read_reg_4(espi->adapter, A_ESPI_DIP2_ERR_COUNT); 188 cnt = readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT);
188 } 189 }
189 190
190 /* 191 /*
@@ -193,68 +194,28 @@ int t1_espi_intr_handler(struct peespi *espi)
193 */ 194 */
194 if (status && t1_is_T1B(espi->adapter)) 195 if (status && t1_is_T1B(espi->adapter))
195 status = 1; 196 status = 1;
196 t1_write_reg_4(espi->adapter, A_ESPI_INTR_STATUS, status); 197 writel(status, espi->adapter->regs + A_ESPI_INTR_STATUS);
197 return 0; 198 return 0;
198} 199}
199 200
200static void espi_setup_for_pm3393(adapter_t *adapter) 201const struct espi_intr_counts *t1_espi_get_intr_counts(struct peespi *espi)
201{ 202{
202 u32 wmark = t1_is_T1B(adapter) ? 0x4000 : 0x3200; 203 return &espi->intr_cnt;
203
204 t1_write_reg_4(adapter, A_ESPI_SCH_TOKEN0, 0x1f4);
205 t1_write_reg_4(adapter, A_ESPI_SCH_TOKEN1, 0x1f4);
206 t1_write_reg_4(adapter, A_ESPI_SCH_TOKEN2, 0x1f4);
207 t1_write_reg_4(adapter, A_ESPI_SCH_TOKEN3, 0x1f4);
208 t1_write_reg_4(adapter, A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK, 0x100);
209 t1_write_reg_4(adapter, A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK, wmark);
210 t1_write_reg_4(adapter, A_ESPI_CALENDAR_LENGTH, 3);
211 t1_write_reg_4(adapter, A_ESPI_TRAIN, 0x08000008);
212 t1_write_reg_4(adapter, A_PORT_CONFIG,
213 V_RX_NPORTS(1) | V_TX_NPORTS(1));
214} 204}
215 205
216static void espi_setup_for_vsc7321(adapter_t *adapter) 206static void espi_setup_for_pm3393(adapter_t *adapter)
217{ 207{
218 u32 wmark = t1_is_T1B(adapter) ? 0x4000 : 0x3200; 208 u32 wmark = t1_is_T1B(adapter) ? 0x4000 : 0x3200;
219 209
220 t1_write_reg_4(adapter, A_ESPI_SCH_TOKEN0, 0x1f4); 210 writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN0);
221 t1_write_reg_4(adapter, A_ESPI_SCH_TOKEN1, 0x1f4); 211 writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN1);
222 t1_write_reg_4(adapter, A_ESPI_SCH_TOKEN2, 0x1f4); 212 writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN2);
223 t1_write_reg_4(adapter, A_ESPI_SCH_TOKEN3, 0x1f4); 213 writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN3);
224 t1_write_reg_4(adapter, A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK, 0x100); 214 writel(0x100, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK);
225 t1_write_reg_4(adapter, A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK, wmark); 215 writel(wmark, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK);
226 t1_write_reg_4(adapter, A_ESPI_CALENDAR_LENGTH, 3); 216 writel(3, adapter->regs + A_ESPI_CALENDAR_LENGTH);
227 t1_write_reg_4(adapter, A_ESPI_TRAIN, 0x08000008); 217 writel(0x08000008, adapter->regs + A_ESPI_TRAIN);
228 t1_write_reg_4(adapter, A_PORT_CONFIG, 218 writel(V_RX_NPORTS(1) | V_TX_NPORTS(1), adapter->regs + A_PORT_CONFIG);
229 V_RX_NPORTS(1) | V_TX_NPORTS(1));
230}
231
232/*
233 * Note that T1B requires at least 2 ports for IXF1010 due to a HW bug.
234 */
235static void espi_setup_for_ixf1010(adapter_t *adapter, int nports)
236{
237 t1_write_reg_4(adapter, A_ESPI_CALENDAR_LENGTH, 1);
238 if (nports == 4) {
239 if (is_T2(adapter)) {
240 t1_write_reg_4(adapter, A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK,
241 0xf00);
242 t1_write_reg_4(adapter, A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK,
243 0x3c0);
244 } else {
245 t1_write_reg_4(adapter, A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK,
246 0x7ff);
247 t1_write_reg_4(adapter, A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK,
248 0x1ff);
249 }
250 } else {
251 t1_write_reg_4(adapter, A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK,
252 0x1fff);
253 t1_write_reg_4(adapter, A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK,
254 0x7ff);
255 }
256 t1_write_reg_4(adapter, A_PORT_CONFIG,
257 V_RX_NPORTS(nports) | V_TX_NPORTS(nports));
258} 219}
259 220
260/* T2 Init part -- */ 221/* T2 Init part -- */
@@ -263,43 +224,42 @@ static void espi_setup_for_ixf1010(adapter_t *adapter, int nports)
263/* 3. Init TriCN Hard Macro */ 224/* 3. Init TriCN Hard Macro */
264int t1_espi_init(struct peespi *espi, int mac_type, int nports) 225int t1_espi_init(struct peespi *espi, int mac_type, int nports)
265{ 226{
227 u32 cnt;
228
266 u32 status_enable_extra = 0; 229 u32 status_enable_extra = 0;
267 adapter_t *adapter = espi->adapter; 230 adapter_t *adapter = espi->adapter;
268 u32 cnt;
269 u32 status, burstval = 0x800100; 231 u32 status, burstval = 0x800100;
270 232
271 /* Disable ESPI training. MACs that can handle it enable it below. */ 233 /* Disable ESPI training. MACs that can handle it enable it below. */
272 t1_write_reg_4(adapter, A_ESPI_TRAIN, 0); 234 writel(0, adapter->regs + A_ESPI_TRAIN);
273 235
274 if (is_T2(adapter)) { 236 if (is_T2(adapter)) {
275 t1_write_reg_4(adapter, A_ESPI_MISC_CONTROL, 237 writel(V_OUT_OF_SYNC_COUNT(4) |
276 V_OUT_OF_SYNC_COUNT(4) | 238 V_DIP2_PARITY_ERR_THRES(3) |
277 V_DIP2_PARITY_ERR_THRES(3) | V_DIP4_THRES(1)); 239 V_DIP4_THRES(1), adapter->regs + A_ESPI_MISC_CONTROL);
278 if (nports == 4) { 240 if (nports == 4) {
279 /* T204: maxburst1 = 0x40, maxburst2 = 0x20 */ 241 /* T204: maxburst1 = 0x40, maxburst2 = 0x20 */
280 burstval = 0x200040; 242 burstval = 0x200040;
281 } 243 }
282 } 244 }
283 t1_write_reg_4(adapter, A_ESPI_MAXBURST1_MAXBURST2, burstval); 245 writel(burstval, adapter->regs + A_ESPI_MAXBURST1_MAXBURST2);
284 246
285 if (mac_type == CHBT_MAC_PM3393) 247 switch (mac_type) {
248 case CHBT_MAC_PM3393:
286 espi_setup_for_pm3393(adapter); 249 espi_setup_for_pm3393(adapter);
287 else if (mac_type == CHBT_MAC_VSC7321) 250 break;
288 espi_setup_for_vsc7321(adapter); 251 default:
289 else if (mac_type == CHBT_MAC_IXF1010) {
290 status_enable_extra = F_INTEL1010MODE;
291 espi_setup_for_ixf1010(adapter, nports);
292 } else
293 return -1; 252 return -1;
253 }
294 254
295 /* 255 /*
296 * Make sure any pending interrupts from the SPI are 256 * Make sure any pending interrupts from the SPI are
297 * Cleared before enabling the interrupt. 257 * Cleared before enabling the interrupt.
298 */ 258 */
299 t1_write_reg_4(espi->adapter, A_ESPI_INTR_ENABLE, ESPI_INTR_MASK); 259 writel(ESPI_INTR_MASK, espi->adapter->regs + A_ESPI_INTR_ENABLE);
300 status = t1_read_reg_4(espi->adapter, A_ESPI_INTR_STATUS); 260 status = readl(espi->adapter->regs + A_ESPI_INTR_STATUS);
301 if (status & F_DIP2PARITYERR) { 261 if (status & F_DIP2PARITYERR) {
302 cnt = t1_read_reg_4(espi->adapter, A_ESPI_DIP2_ERR_COUNT); 262 cnt = readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT);
303 } 263 }
304 264
305 /* 265 /*
@@ -308,10 +268,10 @@ int t1_espi_init(struct peespi *espi, int mac_type, int nports)
308 */ 268 */
309 if (status && t1_is_T1B(espi->adapter)) 269 if (status && t1_is_T1B(espi->adapter))
310 status = 1; 270 status = 1;
311 t1_write_reg_4(espi->adapter, A_ESPI_INTR_STATUS, status); 271 writel(status, espi->adapter->regs + A_ESPI_INTR_STATUS);
312 272
313 t1_write_reg_4(adapter, A_ESPI_FIFO_STATUS_ENABLE, 273 writel(status_enable_extra | F_RXSTATUSENABLE,
314 status_enable_extra | F_RXSTATUSENABLE); 274 adapter->regs + A_ESPI_FIFO_STATUS_ENABLE);
315 275
316 if (is_T2(adapter)) { 276 if (is_T2(adapter)) {
317 tricn_init(adapter); 277 tricn_init(adapter);
@@ -319,10 +279,10 @@ int t1_espi_init(struct peespi *espi, int mac_type, int nports)
319 * Always position the control at the 1st port egress IN 279 * Always position the control at the 1st port egress IN
320 * (sop,eop) counter to reduce PIOs for T/N210 workaround. 280 * (sop,eop) counter to reduce PIOs for T/N210 workaround.
321 */ 281 */
322 espi->misc_ctrl = (t1_read_reg_4(adapter, A_ESPI_MISC_CONTROL) 282 espi->misc_ctrl = (readl(adapter->regs + A_ESPI_MISC_CONTROL)
323 & ~MON_MASK) | (F_MONITORED_DIRECTION 283 & ~MON_MASK) | (F_MONITORED_DIRECTION
324 | F_MONITORED_INTERFACE); 284 | F_MONITORED_INTERFACE);
325 t1_write_reg_4(adapter, A_ESPI_MISC_CONTROL, espi->misc_ctrl); 285 writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
326 spin_lock_init(&espi->lock); 286 spin_lock_init(&espi->lock);
327 } 287 }
328 288
@@ -354,15 +314,16 @@ void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val)
354 spin_lock(&espi->lock); 314 spin_lock(&espi->lock);
355 espi->misc_ctrl = (val & ~MON_MASK) | 315 espi->misc_ctrl = (val & ~MON_MASK) |
356 (espi->misc_ctrl & MON_MASK); 316 (espi->misc_ctrl & MON_MASK);
357 t1_write_reg_4(adapter, A_ESPI_MISC_CONTROL, espi->misc_ctrl); 317 writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
358 spin_unlock(&espi->lock); 318 spin_unlock(&espi->lock);
359} 319}
360 320
361u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait) 321u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait)
362{ 322{
363 struct peespi *espi = adapter->espi;
364 u32 sel; 323 u32 sel;
365 324
325 struct peespi *espi = adapter->espi;
326
366 if (!is_T2(adapter)) 327 if (!is_T2(adapter))
367 return 0; 328 return 0;
368 sel = V_MONITORED_PORT_NUM((addr & 0x3c) >> 2); 329 sel = V_MONITORED_PORT_NUM((addr & 0x3c) >> 2);
@@ -373,14 +334,13 @@ u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait)
373 else 334 else
374 spin_lock(&espi->lock); 335 spin_lock(&espi->lock);
375 if ((sel != (espi->misc_ctrl & MON_MASK))) { 336 if ((sel != (espi->misc_ctrl & MON_MASK))) {
376 t1_write_reg_4(adapter, A_ESPI_MISC_CONTROL, 337 writel(((espi->misc_ctrl & ~MON_MASK) | sel),
377 ((espi->misc_ctrl & ~MON_MASK) | sel)); 338 adapter->regs + A_ESPI_MISC_CONTROL);
378 sel = t1_read_reg_4(adapter, A_ESPI_SCH_TOKEN3); 339 sel = readl(adapter->regs + A_ESPI_SCH_TOKEN3);
379 t1_write_reg_4(adapter, A_ESPI_MISC_CONTROL, 340 writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
380 espi->misc_ctrl);
381 } 341 }
382 else 342 else
383 sel = t1_read_reg_4(adapter, A_ESPI_SCH_TOKEN3); 343 sel = readl(adapter->regs + A_ESPI_SCH_TOKEN3);
384 spin_unlock(&espi->lock); 344 spin_unlock(&espi->lock);
385 return sel; 345 return sel;
386} 346}
diff --git a/drivers/net/chelsio/espi.h b/drivers/net/chelsio/espi.h
index 0f84e8b6399f..c90e37f8457c 100644
--- a/drivers/net/chelsio/espi.h
+++ b/drivers/net/chelsio/espi.h
@@ -1,8 +1,8 @@
1/***************************************************************************** 1/*****************************************************************************
2 * * 2 * *
3 * File: espi.h * 3 * File: espi.h *
4 * $Revision: 1.4 $ * 4 * $Revision: 1.7 $ *
5 * $Date: 2005/03/23 07:15:58 $ * 5 * $Date: 2005/06/21 18:29:47 $ *
6 * Description: * 6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. * 7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * * 8 * *
@@ -36,8 +36,8 @@
36 * * 36 * *
37 ****************************************************************************/ 37 ****************************************************************************/
38 38
39#ifndef CHELSIO_ESPI_H 39#ifndef _CXGB_ESPI_H_
40#define CHELSIO_ESPI_H 40#define _CXGB_ESPI_H_
41 41
42#include "common.h" 42#include "common.h"
43 43
@@ -60,8 +60,9 @@ void t1_espi_intr_enable(struct peespi *);
60void t1_espi_intr_clear(struct peespi *); 60void t1_espi_intr_clear(struct peespi *);
61void t1_espi_intr_disable(struct peespi *); 61void t1_espi_intr_disable(struct peespi *);
62int t1_espi_intr_handler(struct peespi *); 62int t1_espi_intr_handler(struct peespi *);
63const struct espi_intr_counts *t1_espi_get_intr_counts(struct peespi *espi);
63 64
64void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val); 65void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val);
65u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait); 66u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait);
66 67
67#endif 68#endif /* _CXGB_ESPI_H_ */
diff --git a/drivers/net/chelsio/gmac.h b/drivers/net/chelsio/gmac.h
index 24501e2232cc..746b0eeea964 100644
--- a/drivers/net/chelsio/gmac.h
+++ b/drivers/net/chelsio/gmac.h
@@ -1,8 +1,8 @@
1/***************************************************************************** 1/*****************************************************************************
2 * * 2 * *
3 * File: gmac.h * 3 * File: gmac.h *
4 * $Revision: 1.3 $ * 4 * $Revision: 1.6 $ *
5 * $Date: 2005/03/23 07:15:58 $ * 5 * $Date: 2005/06/21 18:29:47 $ *
6 * Description: * 6 * Description: *
7 * Generic MAC functionality. * 7 * Generic MAC functionality. *
8 * part of the Chelsio 10Gb Ethernet Driver. * 8 * part of the Chelsio 10Gb Ethernet Driver. *
@@ -37,8 +37,8 @@
37 * * 37 * *
38 ****************************************************************************/ 38 ****************************************************************************/
39 39
40#ifndef CHELSIO_GMAC_H 40#ifndef _CXGB_GMAC_H_
41#define CHELSIO_GMAC_H 41#define _CXGB_GMAC_H_
42 42
43#include "common.h" 43#include "common.h"
44 44
@@ -130,4 +130,5 @@ extern struct gmac t1_chelsio_mac_ops;
130extern struct gmac t1_vsc7321_ops; 130extern struct gmac t1_vsc7321_ops;
131extern struct gmac t1_ixf1010_ops; 131extern struct gmac t1_ixf1010_ops;
132extern struct gmac t1_dummy_mac_ops; 132extern struct gmac t1_dummy_mac_ops;
133#endif 133
134#endif /* _CXGB_GMAC_H_ */
diff --git a/drivers/net/chelsio/mv88x201x.c b/drivers/net/chelsio/mv88x201x.c
index f54133af1bce..db5034282782 100644
--- a/drivers/net/chelsio/mv88x201x.c
+++ b/drivers/net/chelsio/mv88x201x.c
@@ -1,8 +1,8 @@
1/***************************************************************************** 1/*****************************************************************************
2 * * 2 * *
3 * File: mv88x201x.c * 3 * File: mv88x201x.c *
4 * $Revision: 1.7 $ * 4 * $Revision: 1.12 $ *
5 * $Date: 2005/03/23 07:15:59 $ * 5 * $Date: 2005/04/15 19:27:14 $ *
6 * Description: * 6 * Description: *
7 * Marvell PHY (mv88x201x) functionality. * 7 * Marvell PHY (mv88x201x) functionality. *
8 * part of the Chelsio 10Gb Ethernet Driver. * 8 * part of the Chelsio 10Gb Ethernet Driver. *
@@ -85,33 +85,29 @@ static int mv88x201x_reset(struct cphy *cphy, int wait)
85 85
86static int mv88x201x_interrupt_enable(struct cphy *cphy) 86static int mv88x201x_interrupt_enable(struct cphy *cphy)
87{ 87{
88 u32 elmer;
89
88 /* Enable PHY LASI interrupts. */ 90 /* Enable PHY LASI interrupts. */
89 mdio_write(cphy, 0x1, 0x9002, 0x1); 91 mdio_write(cphy, 0x1, 0x9002, 0x1);
90 92
91 /* Enable Marvell interrupts through Elmer0. */ 93 /* Enable Marvell interrupts through Elmer0. */
92 if (t1_is_asic(cphy->adapter)) { 94 t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
93 u32 elmer; 95 elmer |= ELMER0_GP_BIT6;
94 96 t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
95 t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
96 elmer |= ELMER0_GP_BIT6;
97 t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
98 }
99 return 0; 97 return 0;
100} 98}
101 99
102static int mv88x201x_interrupt_disable(struct cphy *cphy) 100static int mv88x201x_interrupt_disable(struct cphy *cphy)
103{ 101{
102 u32 elmer;
103
104 /* Disable PHY LASI interrupts. */ 104 /* Disable PHY LASI interrupts. */
105 mdio_write(cphy, 0x1, 0x9002, 0x0); 105 mdio_write(cphy, 0x1, 0x9002, 0x0);
106 106
107 /* Disable Marvell interrupts through Elmer0. */ 107 /* Disable Marvell interrupts through Elmer0. */
108 if (t1_is_asic(cphy->adapter)) { 108 t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
109 u32 elmer; 109 elmer &= ~ELMER0_GP_BIT6;
110 110 t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
111 t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
112 elmer &= ~ELMER0_GP_BIT6;
113 t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
114 }
115 return 0; 111 return 0;
116} 112}
117 113
@@ -144,11 +140,9 @@ static int mv88x201x_interrupt_clear(struct cphy *cphy)
144#endif 140#endif
145 141
146 /* Clear Marvell interrupts through Elmer0. */ 142 /* Clear Marvell interrupts through Elmer0. */
147 if (t1_is_asic(cphy->adapter)) { 143 t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer);
148 t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer); 144 elmer |= ELMER0_GP_BIT6;
149 elmer |= ELMER0_GP_BIT6; 145 t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer);
150 t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer);
151 }
152 return 0; 146 return 0;
153} 147}
154 148
diff --git a/drivers/net/chelsio/osdep.h b/drivers/net/chelsio/osdep.h
deleted file mode 100644
index 095cb474434f..000000000000
--- a/drivers/net/chelsio/osdep.h
+++ /dev/null
@@ -1,169 +0,0 @@
1/*****************************************************************************
2 * *
3 * File: osdep.h *
4 * $Revision: 1.9 $ *
5 * $Date: 2005/03/23 07:41:27 $ *
6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#ifndef __CHELSIO_OSDEP_H
40#define __CHELSIO_OSDEP_H
41
42#include <linux/version.h>
43#include <linux/module.h>
44#include <linux/config.h>
45#include <linux/types.h>
46#include <linux/delay.h>
47#include <linux/pci.h>
48#include <linux/ethtool.h>
49#include <linux/mii.h>
50#include <linux/crc32.h>
51#include <linux/init.h>
52#include <asm/io.h>
53
54#include "cxgb2.h"
55
56#define DRV_NAME "cxgb"
57#define PFX DRV_NAME ": "
58
59#define CH_ERR(fmt, ...) printk(KERN_ERR PFX fmt, ## __VA_ARGS__)
60#define CH_WARN(fmt, ...) printk(KERN_WARNING PFX fmt, ## __VA_ARGS__)
61#define CH_ALERT(fmt, ...) printk(KERN_ALERT PFX fmt, ## __VA_ARGS__)
62
63/*
64 * More powerful macro that selectively prints messages based on msg_enable.
65 * For info and debugging messages.
66 */
67#define CH_MSG(adapter, level, category, fmt, ...) do { \
68 if ((adapter)->msg_enable & NETIF_MSG_##category) \
69 printk(KERN_##level PFX "%s: " fmt, (adapter)->name, \
70 ## __VA_ARGS__); \
71} while (0)
72
73#ifdef DEBUG
74# define CH_DBG(adapter, category, fmt, ...) \
75 CH_MSG(adapter, DEBUG, category, fmt, ## __VA_ARGS__)
76#else
77# define CH_DBG(fmt, ...)
78#endif
79
80/* Additional NETIF_MSG_* categories */
81#define NETIF_MSG_MMIO 0x8000000
82
83#define CH_DEVICE(devid, ssid, idx) \
84 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
85
86#define SUPPORTED_PAUSE (1 << 13)
87#define SUPPORTED_LOOPBACK (1 << 15)
88
89#define ADVERTISED_PAUSE (1 << 13)
90#define ADVERTISED_ASYM_PAUSE (1 << 14)
91
92/*
93 * Now that we have included the driver's main data structure,
94 * we typedef it to something the rest of the system understands.
95 */
96typedef struct adapter adapter_t;
97
98#define TPI_LOCK(adapter) spin_lock(&(adapter)->tpi_lock)
99#define TPI_UNLOCK(adapter) spin_unlock(&(adapter)->tpi_lock)
100
101void t1_elmer0_ext_intr(adapter_t *adapter);
102void t1_link_changed(adapter_t *adapter, int port_id, int link_status,
103 int speed, int duplex, int fc);
104
105static inline u16 t1_read_reg_2(adapter_t *adapter, u32 reg_addr)
106{
107 u16 val = readw(adapter->regs + reg_addr);
108
109 CH_DBG(adapter, MMIO, "read register 0x%x value 0x%x\n", reg_addr,
110 val);
111 return val;
112}
113
114static inline void t1_write_reg_2(adapter_t *adapter, u32 reg_addr, u16 val)
115{
116 CH_DBG(adapter, MMIO, "setting register 0x%x to 0x%x\n", reg_addr,
117 val);
118 writew(val, adapter->regs + reg_addr);
119}
120
121static inline u32 t1_read_reg_4(adapter_t *adapter, u32 reg_addr)
122{
123 u32 val = readl(adapter->regs + reg_addr);
124
125 CH_DBG(adapter, MMIO, "read register 0x%x value 0x%x\n", reg_addr,
126 val);
127 return val;
128}
129
130static inline void t1_write_reg_4(adapter_t *adapter, u32 reg_addr, u32 val)
131{
132 CH_DBG(adapter, MMIO, "setting register 0x%x to 0x%x\n", reg_addr,
133 val);
134 writel(val, adapter->regs + reg_addr);
135}
136
137static inline const char *port_name(adapter_t *adapter, int port_idx)
138{
139 return adapter->port[port_idx].dev->name;
140}
141
142static inline void t1_set_hw_addr(adapter_t *adapter, int port_idx,
143 u8 hw_addr[])
144{
145 memcpy(adapter->port[port_idx].dev->dev_addr, hw_addr, ETH_ALEN);
146}
147
148struct t1_rx_mode {
149 struct net_device *dev;
150 u32 idx;
151 struct dev_mc_list *list;
152};
153
154#define t1_rx_mode_promisc(rm) (rm->dev->flags & IFF_PROMISC)
155#define t1_rx_mode_allmulti(rm) (rm->dev->flags & IFF_ALLMULTI)
156#define t1_rx_mode_mc_cnt(rm) (rm->dev->mc_count)
157
158static inline u8 *t1_get_next_mcaddr(struct t1_rx_mode *rm)
159{
160 u8 *addr = 0;
161
162 if (rm->idx++ < rm->dev->mc_count) {
163 addr = rm->list->dmi_addr;
164 rm->list = rm->list->next;
165 }
166 return addr;
167}
168
169#endif
diff --git a/drivers/net/chelsio/pm3393.c b/drivers/net/chelsio/pm3393.c
index 17bd20f60d99..04a1404fc65e 100644
--- a/drivers/net/chelsio/pm3393.c
+++ b/drivers/net/chelsio/pm3393.c
@@ -1,8 +1,8 @@
1/***************************************************************************** 1/*****************************************************************************
2 * * 2 * *
3 * File: pm3393.c * 3 * File: pm3393.c *
4 * $Revision: 1.9 $ * 4 * $Revision: 1.16 $ *
5 * $Date: 2005/03/23 07:41:27 $ * 5 * $Date: 2005/05/14 00:59:32 $ *
6 * Description: * 6 * Description: *
7 * PMC/SIERRA (pm3393) MAC-PHY functionality. * 7 * PMC/SIERRA (pm3393) MAC-PHY functionality. *
8 * part of the Chelsio 10Gb Ethernet Driver. * 8 * part of the Chelsio 10Gb Ethernet Driver. *
@@ -45,15 +45,19 @@
45 45
46/* 802.3ae 10Gb/s MDIO Manageable Device(MMD) 46/* 802.3ae 10Gb/s MDIO Manageable Device(MMD)
47 */ 47 */
48#define MMD_RESERVED 0 48enum {
49#define MMD_PMAPMD 1 49 MMD_RESERVED,
50#define MMD_WIS 2 50 MMD_PMAPMD,
51#define MMD_PCS 3 51 MMD_WIS,
52#define MMD_PHY_XGXS 4 /* XGMII Extender Sublayer */ 52 MMD_PCS,
53#define MMD_DTE_XGXS 5 53 MMD_PHY_XGXS, /* XGMII Extender Sublayer */
54 MMD_DTE_XGXS,
55};
54 56
55#define PHY_XGXS_CTRL_1 0 57enum {
56#define PHY_XGXS_STATUS_1 1 58 PHY_XGXS_CTRL_1,
59 PHY_XGXS_STATUS_1
60};
57 61
58#define OFFSET(REG_ADDR) (REG_ADDR << 2) 62#define OFFSET(REG_ADDR) (REG_ADDR << 2)
59 63
@@ -160,9 +164,9 @@ static int pm3393_interrupt_enable(struct cmac *cmac)
160 0 /*SUNI1x10GEXP_BITMSK_TOP_INTE */ ); 164 0 /*SUNI1x10GEXP_BITMSK_TOP_INTE */ );
161 165
162 /* TERMINATOR - PL_INTERUPTS_EXT */ 166 /* TERMINATOR - PL_INTERUPTS_EXT */
163 pl_intr = t1_read_reg_4(cmac->adapter, A_PL_ENABLE); 167 pl_intr = readl(cmac->adapter->regs + A_PL_ENABLE);
164 pl_intr |= F_PL_INTR_EXT; 168 pl_intr |= F_PL_INTR_EXT;
165 t1_write_reg_4(cmac->adapter, A_PL_ENABLE, pl_intr); 169 writel(pl_intr, cmac->adapter->regs + A_PL_ENABLE);
166 return 0; 170 return 0;
167} 171}
168 172
@@ -242,9 +246,9 @@ static int pm3393_interrupt_clear(struct cmac *cmac)
242 246
243 /* TERMINATOR - PL_INTERUPTS_EXT 247 /* TERMINATOR - PL_INTERUPTS_EXT
244 */ 248 */
245 pl_intr = t1_read_reg_4(cmac->adapter, A_PL_CAUSE); 249 pl_intr = readl(cmac->adapter->regs + A_PL_CAUSE);
246 pl_intr |= F_PL_INTR_EXT; 250 pl_intr |= F_PL_INTR_EXT;
247 t1_write_reg_4(cmac->adapter, A_PL_CAUSE, pl_intr); 251 writel(pl_intr, cmac->adapter->regs + A_PL_CAUSE);
248 252
249 return 0; 253 return 0;
250} 254}
@@ -261,8 +265,6 @@ static int pm3393_interrupt_handler(struct cmac *cmac)
261 /* Read the master interrupt status register. */ 265 /* Read the master interrupt status register. */
262 pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS, 266 pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS,
263 &master_intr_status); 267 &master_intr_status);
264 CH_DBG(cmac->adapter, INTR, "PM3393 intr cause 0x%x\n",
265 master_intr_status);
266 268
267 /* TBD XXX Lets just clear everything for now */ 269 /* TBD XXX Lets just clear everything for now */
268 pm3393_interrupt_clear(cmac); 270 pm3393_interrupt_clear(cmac);
@@ -703,10 +705,9 @@ static struct cmac *pm3393_mac_create(adapter_t *adapter, int index)
703 705
704 t1_tpi_write(adapter, OFFSET(0x3040), 0x0c32); /* # TXXG Config */ 706 t1_tpi_write(adapter, OFFSET(0x3040), 0x0c32); /* # TXXG Config */
705 /* For T1 use timer based Mac flow control. */ 707 /* For T1 use timer based Mac flow control. */
706 if (t1_is_T1B(adapter)) 708 t1_tpi_write(adapter, OFFSET(0x304d), 0x8000);
707 t1_tpi_write(adapter, OFFSET(0x304d), 0x8000);
708 t1_tpi_write(adapter, OFFSET(0x2040), 0x059c); /* # RXXG Config */ 709 t1_tpi_write(adapter, OFFSET(0x2040), 0x059c); /* # RXXG Config */
709 t1_tpi_write(adapter, OFFSET(0x2049), 0x0000); /* # RXXG Cut Through */ 710 t1_tpi_write(adapter, OFFSET(0x2049), 0x0001); /* # RXXG Cut Through */
710 t1_tpi_write(adapter, OFFSET(0x2070), 0x0000); /* # Disable promiscuous mode */ 711 t1_tpi_write(adapter, OFFSET(0x2070), 0x0000); /* # Disable promiscuous mode */
711 712
712 /* Setup Exact Match Filter 0 to allow broadcast packets. 713 /* Setup Exact Match Filter 0 to allow broadcast packets.
@@ -814,12 +815,6 @@ static int pm3393_mac_reset(adapter_t * adapter)
814 815
815 successful_reset = (is_pl4_reset_finished && !is_pl4_outof_lock 816 successful_reset = (is_pl4_reset_finished && !is_pl4_outof_lock
816 && is_xaui_mabc_pll_locked); 817 && is_xaui_mabc_pll_locked);
817
818 CH_DBG(adapter, HW,
819 "PM3393 HW reset %d: pl4_reset 0x%x, val 0x%x, "
820 "is_pl4_outof_lock 0x%x, xaui_locked 0x%x\n",
821 i, is_pl4_reset_finished, val, is_pl4_outof_lock,
822 is_xaui_mabc_pll_locked);
823 } 818 }
824 return successful_reset ? 0 : 1; 819 return successful_reset ? 0 : 1;
825} 820}
diff --git a/drivers/net/chelsio/regs.h b/drivers/net/chelsio/regs.h
index 5a70803eb1b6..b90e11f40d1f 100644
--- a/drivers/net/chelsio/regs.h
+++ b/drivers/net/chelsio/regs.h
@@ -1,8 +1,8 @@
1/***************************************************************************** 1/*****************************************************************************
2 * * 2 * *
3 * File: regs.h * 3 * File: regs.h *
4 * $Revision: 1.4 $ * 4 * $Revision: 1.8 $ *
5 * $Date: 2005/03/23 07:15:59 $ * 5 * $Date: 2005/06/21 18:29:48 $ *
6 * Description: * 6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. * 7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * * 8 * *
@@ -36,7 +36,8 @@
36 * * 36 * *
37 ****************************************************************************/ 37 ****************************************************************************/
38 38
39/* Do not edit this file */ 39#ifndef _CXGB_REGS_H_
40#define _CXGB_REGS_H_
40 41
41/* SGE registers */ 42/* SGE registers */
42#define A_SG_CONTROL 0x0 43#define A_SG_CONTROL 0x0
@@ -74,6 +75,14 @@
74#define V_DISABLE_CMDQ1_GTS(x) ((x) << S_DISABLE_CMDQ1_GTS) 75#define V_DISABLE_CMDQ1_GTS(x) ((x) << S_DISABLE_CMDQ1_GTS)
75#define F_DISABLE_CMDQ1_GTS V_DISABLE_CMDQ1_GTS(1U) 76#define F_DISABLE_CMDQ1_GTS V_DISABLE_CMDQ1_GTS(1U)
76 77
78#define S_DISABLE_FL0_GTS 10
79#define V_DISABLE_FL0_GTS(x) ((x) << S_DISABLE_FL0_GTS)
80#define F_DISABLE_FL0_GTS V_DISABLE_FL0_GTS(1U)
81
82#define S_DISABLE_FL1_GTS 11
83#define V_DISABLE_FL1_GTS(x) ((x) << S_DISABLE_FL1_GTS)
84#define F_DISABLE_FL1_GTS V_DISABLE_FL1_GTS(1U)
85
77#define S_ENABLE_BIG_ENDIAN 12 86#define S_ENABLE_BIG_ENDIAN 12
78#define V_ENABLE_BIG_ENDIAN(x) ((x) << S_ENABLE_BIG_ENDIAN) 87#define V_ENABLE_BIG_ENDIAN(x) ((x) << S_ENABLE_BIG_ENDIAN)
79#define F_ENABLE_BIG_ENDIAN V_ENABLE_BIG_ENDIAN(1U) 88#define F_ENABLE_BIG_ENDIAN V_ENABLE_BIG_ENDIAN(1U)
@@ -132,6 +141,7 @@
132#define F_PACKET_MISMATCH V_PACKET_MISMATCH(1U) 141#define F_PACKET_MISMATCH V_PACKET_MISMATCH(1U)
133 142
134#define A_SG_INT_CAUSE 0xbc 143#define A_SG_INT_CAUSE 0xbc
144#define A_SG_RESPACCUTIMER 0xc0
135 145
136/* MC3 registers */ 146/* MC3 registers */
137 147
@@ -247,6 +257,10 @@
247#define V_SYN_COOKIE_PARAMETER(x) ((x) << S_SYN_COOKIE_PARAMETER) 257#define V_SYN_COOKIE_PARAMETER(x) ((x) << S_SYN_COOKIE_PARAMETER)
248 258
249#define A_TP_PC_CONFIG 0x348 259#define A_TP_PC_CONFIG 0x348
260#define S_DIS_TX_FILL_WIN_PUSH 12
261#define V_DIS_TX_FILL_WIN_PUSH(x) ((x) << S_DIS_TX_FILL_WIN_PUSH)
262#define F_DIS_TX_FILL_WIN_PUSH V_DIS_TX_FILL_WIN_PUSH(1U)
263
250#define S_TP_PC_REV 30 264#define S_TP_PC_REV 30
251#define M_TP_PC_REV 0x3 265#define M_TP_PC_REV 0x3
252#define G_TP_PC_REV(x) (((x) >> S_TP_PC_REV) & M_TP_PC_REV) 266#define G_TP_PC_REV(x) (((x) >> S_TP_PC_REV) & M_TP_PC_REV)
@@ -451,3 +465,4 @@
451#define M_PCI_MODE_CLK 0x3 465#define M_PCI_MODE_CLK 0x3
452#define G_PCI_MODE_CLK(x) (((x) >> S_PCI_MODE_CLK) & M_PCI_MODE_CLK) 466#define G_PCI_MODE_CLK(x) (((x) >> S_PCI_MODE_CLK) & M_PCI_MODE_CLK)
453 467
468#endif /* _CXGB_REGS_H_ */
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index bcf8b1e939b0..53b41d99b00b 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -1,8 +1,8 @@
1/***************************************************************************** 1/*****************************************************************************
2 * * 2 * *
3 * File: sge.c * 3 * File: sge.c *
4 * $Revision: 1.13 $ * 4 * $Revision: 1.26 $ *
5 * $Date: 2005/03/23 07:41:27 $ * 5 * $Date: 2005/06/21 18:29:48 $ *
6 * Description: * 6 * Description: *
7 * DMA engine. * 7 * DMA engine. *
8 * part of the Chelsio 10Gb Ethernet Driver. * 8 * part of the Chelsio 10Gb Ethernet Driver. *
@@ -58,59 +58,62 @@
58#include "regs.h" 58#include "regs.h"
59#include "espi.h" 59#include "espi.h"
60 60
61
62#ifdef NETIF_F_TSO
61#include <linux/tcp.h> 63#include <linux/tcp.h>
64#endif
62 65
63#define SGE_CMDQ_N 2 66#define SGE_CMDQ_N 2
64#define SGE_FREELQ_N 2 67#define SGE_FREELQ_N 2
65#define SGE_CMDQ0_E_N 512 68#define SGE_CMDQ0_E_N 1024
66#define SGE_CMDQ1_E_N 128 69#define SGE_CMDQ1_E_N 128
67#define SGE_FREEL_SIZE 4096 70#define SGE_FREEL_SIZE 4096
68#define SGE_JUMBO_FREEL_SIZE 512 71#define SGE_JUMBO_FREEL_SIZE 512
69#define SGE_FREEL_REFILL_THRESH 16 72#define SGE_FREEL_REFILL_THRESH 16
70#define SGE_RESPQ_E_N 1024 73#define SGE_RESPQ_E_N 1024
71#define SGE_INTR_BUCKETSIZE 100 74#define SGE_INTRTIMER_NRES 1000
72#define SGE_INTR_LATBUCKETS 5 75#define SGE_RX_COPY_THRES 256
73#define SGE_INTR_MAXBUCKETS 11
74#define SGE_INTRTIMER0 1
75#define SGE_INTRTIMER1 50
76#define SGE_INTRTIMER_NRES 10000
77#define SGE_RX_COPY_THRESHOLD 256
78#define SGE_RX_SM_BUF_SIZE 1536 76#define SGE_RX_SM_BUF_SIZE 1536
79 77
80#define SGE_RESPQ_REPLENISH_THRES ((3 * SGE_RESPQ_E_N) / 4) 78# define SGE_RX_DROP_THRES 2
79
80#define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4)
81
82/*
83 * Period of the TX buffer reclaim timer. This timer does not need to run
84 * frequently as TX buffers are usually reclaimed by new TX packets.
85 */
86#define TX_RECLAIM_PERIOD (HZ / 4)
81 87
82#define SGE_RX_OFFSET 2
83#ifndef NET_IP_ALIGN 88#ifndef NET_IP_ALIGN
84# define NET_IP_ALIGN SGE_RX_OFFSET 89# define NET_IP_ALIGN 2
85#endif 90#endif
86 91
92#define M_CMD_LEN 0x7fffffff
93#define V_CMD_LEN(v) (v)
94#define G_CMD_LEN(v) ((v) & M_CMD_LEN)
95#define V_CMD_GEN1(v) ((v) << 31)
96#define V_CMD_GEN2(v) (v)
97#define F_CMD_DATAVALID (1 << 1)
98#define F_CMD_SOP (1 << 2)
99#define V_CMD_EOP(v) ((v) << 3)
100
87/* 101/*
88 * Memory Mapped HW Command, Freelist and Response Queue Descriptors 102 * Command queue, receive buffer list, and response queue descriptors.
89 */ 103 */
90#if defined(__BIG_ENDIAN_BITFIELD) 104#if defined(__BIG_ENDIAN_BITFIELD)
91struct cmdQ_e { 105struct cmdQ_e {
92 u32 AddrLow; 106 u32 addr_lo;
93 u32 GenerationBit : 1; 107 u32 len_gen;
94 u32 BufferLength : 31; 108 u32 flags;
95 u32 RespQueueSelector : 4; 109 u32 addr_hi;
96 u32 ResponseTokens : 12;
97 u32 CmdId : 8;
98 u32 Reserved : 3;
99 u32 TokenValid : 1;
100 u32 Eop : 1;
101 u32 Sop : 1;
102 u32 DataValid : 1;
103 u32 GenerationBit2 : 1;
104 u32 AddrHigh;
105}; 110};
106 111
107struct freelQ_e { 112struct freelQ_e {
108 u32 AddrLow; 113 u32 addr_lo;
109 u32 GenerationBit : 1; 114 u32 len_gen;
110 u32 BufferLength : 31; 115 u32 gen2;
111 u32 Reserved : 31; 116 u32 addr_hi;
112 u32 GenerationBit2 : 1;
113 u32 AddrHigh;
114}; 117};
115 118
116struct respQ_e { 119struct respQ_e {
@@ -128,31 +131,19 @@ struct respQ_e {
128 u32 GenerationBit : 1; 131 u32 GenerationBit : 1;
129 u32 BufferLength; 132 u32 BufferLength;
130}; 133};
131
132#elif defined(__LITTLE_ENDIAN_BITFIELD) 134#elif defined(__LITTLE_ENDIAN_BITFIELD)
133struct cmdQ_e { 135struct cmdQ_e {
134 u32 BufferLength : 31; 136 u32 len_gen;
135 u32 GenerationBit : 1; 137 u32 addr_lo;
136 u32 AddrLow; 138 u32 addr_hi;
137 u32 AddrHigh; 139 u32 flags;
138 u32 GenerationBit2 : 1;
139 u32 DataValid : 1;
140 u32 Sop : 1;
141 u32 Eop : 1;
142 u32 TokenValid : 1;
143 u32 Reserved : 3;
144 u32 CmdId : 8;
145 u32 ResponseTokens : 12;
146 u32 RespQueueSelector : 4;
147}; 140};
148 141
149struct freelQ_e { 142struct freelQ_e {
150 u32 BufferLength : 31; 143 u32 len_gen;
151 u32 GenerationBit : 1; 144 u32 addr_lo;
152 u32 AddrLow; 145 u32 addr_hi;
153 u32 AddrHigh; 146 u32 gen2;
154 u32 GenerationBit2 : 1;
155 u32 Reserved : 31;
156}; 147};
157 148
158struct respQ_e { 149struct respQ_e {
@@ -179,7 +170,6 @@ struct cmdQ_ce {
179 struct sk_buff *skb; 170 struct sk_buff *skb;
180 DECLARE_PCI_UNMAP_ADDR(dma_addr); 171 DECLARE_PCI_UNMAP_ADDR(dma_addr);
181 DECLARE_PCI_UNMAP_LEN(dma_len); 172 DECLARE_PCI_UNMAP_LEN(dma_len);
182 unsigned int single;
183}; 173};
184 174
185struct freelQ_ce { 175struct freelQ_ce {
@@ -189,44 +179,52 @@ struct freelQ_ce {
189}; 179};
190 180
191/* 181/*
192 * SW Command, Freelist and Response Queue 182 * SW command, freelist and response rings
193 */ 183 */
194struct cmdQ { 184struct cmdQ {
195 atomic_t asleep; /* HW DMA Fetch status */ 185 unsigned long status; /* HW DMA fetch status */
196 atomic_t credits; /* # available descriptors for TX */ 186 unsigned int in_use; /* # of in-use command descriptors */
197 atomic_t pio_pidx; /* Variable updated on Doorbell */ 187 unsigned int size; /* # of descriptors */
198 u16 entries_n; /* # descriptors for TX */ 188 unsigned int processed; /* total # of descs HW has processed */
199 u16 pidx; /* producer index (SW) */ 189 unsigned int cleaned; /* total # of descs SW has reclaimed */
200 u16 cidx; /* consumer index (HW) */ 190 unsigned int stop_thres; /* SW TX queue suspend threshold */
201 u8 genbit; /* current generation (=valid) bit */ 191 u16 pidx; /* producer index (SW) */
202 struct cmdQ_e *entries; /* HW command descriptor Q */ 192 u16 cidx; /* consumer index (HW) */
203 struct cmdQ_ce *centries; /* SW command context descriptor Q */ 193 u8 genbit; /* current generation (=valid) bit */
204 spinlock_t Qlock; /* Lock to protect cmdQ enqueuing */ 194 u8 sop; /* is next entry start of packet? */
205 dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */ 195 struct cmdQ_e *entries; /* HW command descriptor Q */
196 struct cmdQ_ce *centries; /* SW command context descriptor Q */
197 spinlock_t lock; /* Lock to protect cmdQ enqueuing */
198 dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */
206}; 199};
207 200
208struct freelQ { 201struct freelQ {
209 unsigned int credits; /* # of available RX buffers */ 202 unsigned int credits; /* # of available RX buffers */
210 unsigned int entries_n; /* free list capacity */ 203 unsigned int size; /* free list capacity */
211 u16 pidx; /* producer index (SW) */ 204 u16 pidx; /* producer index (SW) */
212 u16 cidx; /* consumer index (HW) */ 205 u16 cidx; /* consumer index (HW) */
213 u16 rx_buffer_size; /* Buffer size on this free list */ 206 u16 rx_buffer_size; /* Buffer size on this free list */
214 u16 dma_offset; /* DMA offset to align IP headers */ 207 u16 dma_offset; /* DMA offset to align IP headers */
215 u8 genbit; /* current generation (=valid) bit */ 208 u16 recycleq_idx; /* skb recycle q to use */
216 struct freelQ_e *entries; /* HW freelist descriptor Q */ 209 u8 genbit; /* current generation (=valid) bit */
217 struct freelQ_ce *centries; /* SW freelist conext descriptor Q */ 210 struct freelQ_e *entries; /* HW freelist descriptor Q */
218 dma_addr_t dma_addr; /* DMA addr HW freelist descriptor Q */ 211 struct freelQ_ce *centries; /* SW freelist context descriptor Q */
212 dma_addr_t dma_addr; /* DMA addr HW freelist descriptor Q */
219}; 213};
220 214
221struct respQ { 215struct respQ {
222 u16 credits; /* # of available respQ descriptors */ 216 unsigned int credits; /* credits to be returned to SGE */
223 u16 credits_pend; /* # of not yet returned descriptors */ 217 unsigned int size; /* # of response Q descriptors */
224 u16 entries_n; /* # of response Q descriptors */ 218 u16 cidx; /* consumer index (SW) */
225 u16 pidx; /* producer index (HW) */ 219 u8 genbit; /* current generation(=valid) bit */
226 u16 cidx; /* consumer index (SW) */
227 u8 genbit; /* current generation(=valid) bit */
228 struct respQ_e *entries; /* HW response descriptor Q */ 220 struct respQ_e *entries; /* HW response descriptor Q */
229 dma_addr_t dma_addr; /* DMA addr HW response descriptor Q */ 221 dma_addr_t dma_addr; /* DMA addr HW response descriptor Q */
222};
223
224/* Bit flags for cmdQ.status */
225enum {
226 CMDQ_STAT_RUNNING = 1, /* fetch engine is running */
227 CMDQ_STAT_LAST_PKT_DB = 2 /* last packet rung the doorbell */
230}; 228};
231 229
232/* 230/*
@@ -239,134 +237,50 @@ struct respQ {
239 */ 237 */
240struct sge { 238struct sge {
241 struct adapter *adapter; /* adapter backpointer */ 239 struct adapter *adapter; /* adapter backpointer */
242 struct freelQ freelQ[SGE_FREELQ_N]; /* freelist Q(s) */ 240 struct net_device *netdev; /* netdevice backpointer */
243 struct respQ respQ; /* response Q instatiation */ 241 struct freelQ freelQ[SGE_FREELQ_N]; /* buffer free lists */
242 struct respQ respQ; /* response Q */
243 unsigned long stopped_tx_queues; /* bitmap of suspended Tx queues */
244 unsigned int rx_pkt_pad; /* RX padding for L2 packets */ 244 unsigned int rx_pkt_pad; /* RX padding for L2 packets */
245 unsigned int jumbo_fl; /* jumbo freelist Q index */ 245 unsigned int jumbo_fl; /* jumbo freelist Q index */
246 u32 intrtimer[SGE_INTR_MAXBUCKETS]; /* ! */ 246 unsigned int intrtimer_nres; /* no-resource interrupt timer */
247 u32 currIndex; /* current index into intrtimer[] */ 247 unsigned int fixed_intrtimer;/* non-adaptive interrupt timer */
248 u32 intrtimer_nres; /* no resource interrupt timer value */ 248 struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
249 u32 sge_control; /* shadow content of sge control reg */ 249 struct timer_list espibug_timer;
250 struct sge_intr_counts intr_cnt; 250 unsigned int espibug_timeout;
251 struct timer_list ptimer; 251 struct sk_buff *espibug_skb;
252 struct sk_buff *pskb; 252 u32 sge_control; /* shadow value of sge control reg */
253 u32 ptimeout; 253 struct sge_intr_counts stats;
254 struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned; /* command Q(s)*/ 254 struct sge_port_stats port_stats[MAX_NPORTS];
255 struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp;
255}; 256};
256 257
257static unsigned int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
258 unsigned int qid);
259
260/* 258/*
261 * PIO to indicate that memory mapped Q contains valid descriptor(s). 259 * PIO to indicate that memory mapped Q contains valid descriptor(s).
262 */ 260 */
263static inline void doorbell_pio(struct sge *sge, u32 val) 261static inline void doorbell_pio(struct adapter *adapter, u32 val)
264{ 262{
265 wmb(); 263 wmb();
266 t1_write_reg_4(sge->adapter, A_SG_DOORBELL, val); 264 writel(val, adapter->regs + A_SG_DOORBELL);
267}
268
269/*
270 * Disables the DMA engine.
271 */
272void t1_sge_stop(struct sge *sge)
273{
274 t1_write_reg_4(sge->adapter, A_SG_CONTROL, 0);
275 t1_read_reg_4(sge->adapter, A_SG_CONTROL); /* flush write */
276 if (is_T2(sge->adapter))
277 del_timer_sync(&sge->ptimer);
278}
279
280static u8 ch_mac_addr[ETH_ALEN] = {0x0, 0x7, 0x43, 0x0, 0x0, 0x0};
281static void t1_espi_workaround(void *data)
282{
283 struct adapter *adapter = (struct adapter *)data;
284 struct sge *sge = adapter->sge;
285
286 if (netif_running(adapter->port[0].dev) &&
287 atomic_read(&sge->cmdQ[0].asleep)) {
288
289 u32 seop = t1_espi_get_mon(adapter, 0x930, 0);
290
291 if ((seop & 0xfff0fff) == 0xfff && sge->pskb) {
292 struct sk_buff *skb = sge->pskb;
293 if (!skb->cb[0]) {
294 memcpy(skb->data+sizeof(struct cpl_tx_pkt), ch_mac_addr, ETH_ALEN);
295 memcpy(skb->data+skb->len-10, ch_mac_addr, ETH_ALEN);
296
297 skb->cb[0] = 0xff;
298 }
299 t1_sge_tx(skb, adapter,0);
300 }
301 }
302 mod_timer(&adapter->sge->ptimer, jiffies + sge->ptimeout);
303}
304
305/*
306 * Enables the DMA engine.
307 */
308void t1_sge_start(struct sge *sge)
309{
310 t1_write_reg_4(sge->adapter, A_SG_CONTROL, sge->sge_control);
311 t1_read_reg_4(sge->adapter, A_SG_CONTROL); /* flush write */
312 if (is_T2(sge->adapter)) {
313 init_timer(&sge->ptimer);
314 sge->ptimer.function = (void *)&t1_espi_workaround;
315 sge->ptimer.data = (unsigned long)sge->adapter;
316 sge->ptimer.expires = jiffies + sge->ptimeout;
317 add_timer(&sge->ptimer);
318 }
319}
320
321/*
322 * Creates a t1_sge structure and returns suggested resource parameters.
323 */
324struct sge * __devinit t1_sge_create(struct adapter *adapter,
325 struct sge_params *p)
326{
327 struct sge *sge = kmalloc(sizeof(*sge), GFP_KERNEL);
328
329 if (!sge)
330 return NULL;
331 memset(sge, 0, sizeof(*sge));
332
333 if (is_T2(adapter))
334 sge->ptimeout = 1; /* finest allowed */
335
336 sge->adapter = adapter;
337 sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : SGE_RX_OFFSET;
338 sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
339
340 p->cmdQ_size[0] = SGE_CMDQ0_E_N;
341 p->cmdQ_size[1] = SGE_CMDQ1_E_N;
342 p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE;
343 p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE;
344 p->rx_coalesce_usecs = SGE_INTRTIMER1;
345 p->last_rx_coalesce_raw = SGE_INTRTIMER1 *
346 (board_info(sge->adapter)->clock_core / 1000000);
347 p->default_rx_coalesce_usecs = SGE_INTRTIMER1;
348 p->coalesce_enable = 0; /* Turn off adaptive algorithm by default */
349 p->sample_interval_usecs = 0;
350 return sge;
351} 265}
352 266
353/* 267/*
354 * Frees all RX buffers on the freelist Q. The caller must make sure that 268 * Frees all RX buffers on the freelist Q. The caller must make sure that
355 * the SGE is turned off before calling this function. 269 * the SGE is turned off before calling this function.
356 */ 270 */
357static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *Q) 271static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q)
358{ 272{
359 unsigned int cidx = Q->cidx, credits = Q->credits; 273 unsigned int cidx = q->cidx;
360 274
361 while (credits--) { 275 while (q->credits--) {
362 struct freelQ_ce *ce = &Q->centries[cidx]; 276 struct freelQ_ce *ce = &q->centries[cidx];
363 277
364 pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr), 278 pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
365 pci_unmap_len(ce, dma_len), 279 pci_unmap_len(ce, dma_len),
366 PCI_DMA_FROMDEVICE); 280 PCI_DMA_FROMDEVICE);
367 dev_kfree_skb(ce->skb); 281 dev_kfree_skb(ce->skb);
368 ce->skb = NULL; 282 ce->skb = NULL;
369 if (++cidx == Q->entries_n) 283 if (++cidx == q->size)
370 cidx = 0; 284 cidx = 0;
371 } 285 }
372} 286}
@@ -380,29 +294,29 @@ static void free_rx_resources(struct sge *sge)
380 unsigned int size, i; 294 unsigned int size, i;
381 295
382 if (sge->respQ.entries) { 296 if (sge->respQ.entries) {
383 size = sizeof(struct respQ_e) * sge->respQ.entries_n; 297 size = sizeof(struct respQ_e) * sge->respQ.size;
384 pci_free_consistent(pdev, size, sge->respQ.entries, 298 pci_free_consistent(pdev, size, sge->respQ.entries,
385 sge->respQ.dma_addr); 299 sge->respQ.dma_addr);
386 } 300 }
387 301
388 for (i = 0; i < SGE_FREELQ_N; i++) { 302 for (i = 0; i < SGE_FREELQ_N; i++) {
389 struct freelQ *Q = &sge->freelQ[i]; 303 struct freelQ *q = &sge->freelQ[i];
390 304
391 if (Q->centries) { 305 if (q->centries) {
392 free_freelQ_buffers(pdev, Q); 306 free_freelQ_buffers(pdev, q);
393 kfree(Q->centries); 307 kfree(q->centries);
394 } 308 }
395 if (Q->entries) { 309 if (q->entries) {
396 size = sizeof(struct freelQ_e) * Q->entries_n; 310 size = sizeof(struct freelQ_e) * q->size;
397 pci_free_consistent(pdev, size, Q->entries, 311 pci_free_consistent(pdev, size, q->entries,
398 Q->dma_addr); 312 q->dma_addr);
399 } 313 }
400 } 314 }
401} 315}
402 316
403/* 317/*
404 * Allocates basic RX resources, consisting of memory mapped freelist Qs and a 318 * Allocates basic RX resources, consisting of memory mapped freelist Qs and a
405 * response Q. 319 * response queue.
406 */ 320 */
407static int alloc_rx_resources(struct sge *sge, struct sge_params *p) 321static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
408{ 322{
@@ -410,21 +324,22 @@ static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
410 unsigned int size, i; 324 unsigned int size, i;
411 325
412 for (i = 0; i < SGE_FREELQ_N; i++) { 326 for (i = 0; i < SGE_FREELQ_N; i++) {
413 struct freelQ *Q = &sge->freelQ[i]; 327 struct freelQ *q = &sge->freelQ[i];
414 328
415 Q->genbit = 1; 329 q->genbit = 1;
416 Q->entries_n = p->freelQ_size[i]; 330 q->size = p->freelQ_size[i];
417 Q->dma_offset = SGE_RX_OFFSET - sge->rx_pkt_pad; 331 q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN;
418 size = sizeof(struct freelQ_e) * Q->entries_n; 332 size = sizeof(struct freelQ_e) * q->size;
419 Q->entries = (struct freelQ_e *) 333 q->entries = (struct freelQ_e *)
420 pci_alloc_consistent(pdev, size, &Q->dma_addr); 334 pci_alloc_consistent(pdev, size, &q->dma_addr);
421 if (!Q->entries) 335 if (!q->entries)
422 goto err_no_mem; 336 goto err_no_mem;
423 memset(Q->entries, 0, size); 337 memset(q->entries, 0, size);
424 Q->centries = kcalloc(Q->entries_n, sizeof(struct freelQ_ce), 338 size = sizeof(struct freelQ_ce) * q->size;
425 GFP_KERNEL); 339 q->centries = kmalloc(size, GFP_KERNEL);
426 if (!Q->centries) 340 if (!q->centries)
427 goto err_no_mem; 341 goto err_no_mem;
342 memset(q->centries, 0, size);
428 } 343 }
429 344
430 /* 345 /*
@@ -440,10 +355,17 @@ static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
440 sge->freelQ[sge->jumbo_fl].rx_buffer_size = (16 * 1024) - 355 sge->freelQ[sge->jumbo_fl].rx_buffer_size = (16 * 1024) -
441 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 356 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
442 357
358 /*
359 * Setup which skb recycle Q should be used when recycling buffers from
360 * each free list.
361 */
362 sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0;
363 sge->freelQ[sge->jumbo_fl].recycleq_idx = 1;
364
443 sge->respQ.genbit = 1; 365 sge->respQ.genbit = 1;
444 sge->respQ.entries_n = SGE_RESPQ_E_N; 366 sge->respQ.size = SGE_RESPQ_E_N;
445 sge->respQ.credits = SGE_RESPQ_E_N; 367 sge->respQ.credits = 0;
446 size = sizeof(struct respQ_e) * sge->respQ.entries_n; 368 size = sizeof(struct respQ_e) * sge->respQ.size;
447 sge->respQ.entries = (struct respQ_e *) 369 sge->respQ.entries = (struct respQ_e *)
448 pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr); 370 pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr);
449 if (!sge->respQ.entries) 371 if (!sge->respQ.entries)
@@ -457,48 +379,37 @@ err_no_mem:
457} 379}
458 380
459/* 381/*
460 * Frees 'credits_pend' TX buffers and returns the credits to Q->credits. 382 * Reclaims n TX descriptors and frees the buffers associated with them.
461 *
462 * The adaptive algorithm receives the total size of the buffers freed
463 * accumulated in @*totpayload. No initialization of this argument here.
464 *
465 */ 383 */
466static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *Q, 384static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
467 unsigned int credits_pend, unsigned int *totpayload)
468{ 385{
386 struct cmdQ_ce *ce;
469 struct pci_dev *pdev = sge->adapter->pdev; 387 struct pci_dev *pdev = sge->adapter->pdev;
470 struct sk_buff *skb; 388 unsigned int cidx = q->cidx;
471 struct cmdQ_ce *ce, *cq = Q->centries;
472 unsigned int entries_n = Q->entries_n, cidx = Q->cidx,
473 i = credits_pend;
474
475 389
476 ce = &cq[cidx]; 390 q->in_use -= n;
477 while (i--) { 391 ce = &q->centries[cidx];
478 if (ce->single) 392 while (n--) {
393 if (q->sop)
479 pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr), 394 pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
480 pci_unmap_len(ce, dma_len), 395 pci_unmap_len(ce, dma_len),
481 PCI_DMA_TODEVICE); 396 PCI_DMA_TODEVICE);
482 else 397 else
483 pci_unmap_page(pdev, pci_unmap_addr(ce, dma_addr), 398 pci_unmap_page(pdev, pci_unmap_addr(ce, dma_addr),
484 pci_unmap_len(ce, dma_len), 399 pci_unmap_len(ce, dma_len),
485 PCI_DMA_TODEVICE); 400 PCI_DMA_TODEVICE);
486 if (totpayload) 401 q->sop = 0;
487 *totpayload += pci_unmap_len(ce, dma_len); 402 if (ce->skb) {
488 403 dev_kfree_skb(ce->skb);
489 skb = ce->skb; 404 q->sop = 1;
490 if (skb) 405 }
491 dev_kfree_skb_irq(skb);
492
493 ce++; 406 ce++;
494 if (++cidx == entries_n) { 407 if (++cidx == q->size) {
495 cidx = 0; 408 cidx = 0;
496 ce = cq; 409 ce = q->centries;
497 } 410 }
498 } 411 }
499 412 q->cidx = cidx;
500 Q->cidx = cidx;
501 atomic_add(credits_pend, &Q->credits);
502} 413}
503 414
504/* 415/*
@@ -512,20 +423,17 @@ static void free_tx_resources(struct sge *sge)
512 unsigned int size, i; 423 unsigned int size, i;
513 424
514 for (i = 0; i < SGE_CMDQ_N; i++) { 425 for (i = 0; i < SGE_CMDQ_N; i++) {
515 struct cmdQ *Q = &sge->cmdQ[i]; 426 struct cmdQ *q = &sge->cmdQ[i];
516 427
517 if (Q->centries) { 428 if (q->centries) {
518 unsigned int pending = Q->entries_n - 429 if (q->in_use)
519 atomic_read(&Q->credits); 430 free_cmdQ_buffers(sge, q, q->in_use);
520 431 kfree(q->centries);
521 if (pending)
522 free_cmdQ_buffers(sge, Q, pending, NULL);
523 kfree(Q->centries);
524 } 432 }
525 if (Q->entries) { 433 if (q->entries) {
526 size = sizeof(struct cmdQ_e) * Q->entries_n; 434 size = sizeof(struct cmdQ_e) * q->size;
527 pci_free_consistent(pdev, size, Q->entries, 435 pci_free_consistent(pdev, size, q->entries,
528 Q->dma_addr); 436 q->dma_addr);
529 } 437 }
530 } 438 }
531} 439}
@@ -539,25 +447,38 @@ static int alloc_tx_resources(struct sge *sge, struct sge_params *p)
539 unsigned int size, i; 447 unsigned int size, i;
540 448
541 for (i = 0; i < SGE_CMDQ_N; i++) { 449 for (i = 0; i < SGE_CMDQ_N; i++) {
542 struct cmdQ *Q = &sge->cmdQ[i]; 450 struct cmdQ *q = &sge->cmdQ[i];
543 451
544 Q->genbit = 1; 452 q->genbit = 1;
545 Q->entries_n = p->cmdQ_size[i]; 453 q->sop = 1;
546 atomic_set(&Q->credits, Q->entries_n); 454 q->size = p->cmdQ_size[i];
547 atomic_set(&Q->asleep, 1); 455 q->in_use = 0;
548 spin_lock_init(&Q->Qlock); 456 q->status = 0;
549 size = sizeof(struct cmdQ_e) * Q->entries_n; 457 q->processed = q->cleaned = 0;
550 Q->entries = (struct cmdQ_e *) 458 q->stop_thres = 0;
551 pci_alloc_consistent(pdev, size, &Q->dma_addr); 459 spin_lock_init(&q->lock);
552 if (!Q->entries) 460 size = sizeof(struct cmdQ_e) * q->size;
461 q->entries = (struct cmdQ_e *)
462 pci_alloc_consistent(pdev, size, &q->dma_addr);
463 if (!q->entries)
553 goto err_no_mem; 464 goto err_no_mem;
554 memset(Q->entries, 0, size); 465 memset(q->entries, 0, size);
555 Q->centries = kcalloc(Q->entries_n, sizeof(struct cmdQ_ce), 466 size = sizeof(struct cmdQ_ce) * q->size;
556 GFP_KERNEL); 467 q->centries = kmalloc(size, GFP_KERNEL);
557 if (!Q->centries) 468 if (!q->centries)
558 goto err_no_mem; 469 goto err_no_mem;
470 memset(q->centries, 0, size);
559 } 471 }
560 472
473 /*
474 * CommandQ 0 handles Ethernet and TOE packets, while queue 1 is TOE
475 * only. For queue 0 set the stop threshold so we can handle one more
476 * packet from each port, plus reserve an additional 24 entries for
477 * Ethernet packets only. Queue 1 never suspends nor do we reserve
478 * space for Ethernet packets.
479 */
480 sge->cmdQ[0].stop_thres = sge->adapter->params.nports *
481 (MAX_SKB_FRAGS + 1);
561 return 0; 482 return 0;
562 483
563err_no_mem: 484err_no_mem:
@@ -569,9 +490,9 @@ static inline void setup_ring_params(struct adapter *adapter, u64 addr,
569 u32 size, int base_reg_lo, 490 u32 size, int base_reg_lo,
570 int base_reg_hi, int size_reg) 491 int base_reg_hi, int size_reg)
571{ 492{
572 t1_write_reg_4(adapter, base_reg_lo, (u32)addr); 493 writel((u32)addr, adapter->regs + base_reg_lo);
573 t1_write_reg_4(adapter, base_reg_hi, addr >> 32); 494 writel(addr >> 32, adapter->regs + base_reg_hi);
574 t1_write_reg_4(adapter, size_reg, size); 495 writel(size, adapter->regs + size_reg);
575} 496}
576 497
577/* 498/*
@@ -585,97 +506,52 @@ void t1_set_vlan_accel(struct adapter *adapter, int on_off)
585 if (on_off) 506 if (on_off)
586 sge->sge_control |= F_VLAN_XTRACT; 507 sge->sge_control |= F_VLAN_XTRACT;
587 if (adapter->open_device_map) { 508 if (adapter->open_device_map) {
588 t1_write_reg_4(adapter, A_SG_CONTROL, sge->sge_control); 509 writel(sge->sge_control, adapter->regs + A_SG_CONTROL);
589 t1_read_reg_4(adapter, A_SG_CONTROL); /* flush */ 510 readl(adapter->regs + A_SG_CONTROL); /* flush */
590 } 511 }
591} 512}
592 513
593/* 514/*
594 * Sets the interrupt latency timer when the adaptive Rx coalescing
595 * is turned off. Do nothing when it is turned on again.
596 *
597 * This routine relies on the fact that the caller has already set
598 * the adaptive policy in adapter->sge_params before calling it.
599*/
600int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p)
601{
602 if (!p->coalesce_enable) {
603 u32 newTimer = p->rx_coalesce_usecs *
604 (board_info(sge->adapter)->clock_core / 1000000);
605
606 t1_write_reg_4(sge->adapter, A_SG_INTRTIMER, newTimer);
607 }
608 return 0;
609}
610
611/*
612 * Programs the various SGE registers. However, the engine is not yet enabled, 515 * Programs the various SGE registers. However, the engine is not yet enabled,
613 * but sge->sge_control is setup and ready to go. 516 * but sge->sge_control is setup and ready to go.
614 */ 517 */
615static void configure_sge(struct sge *sge, struct sge_params *p) 518static void configure_sge(struct sge *sge, struct sge_params *p)
616{ 519{
617 struct adapter *ap = sge->adapter; 520 struct adapter *ap = sge->adapter;
618 int i; 521
619 522 writel(0, ap->regs + A_SG_CONTROL);
620 t1_write_reg_4(ap, A_SG_CONTROL, 0); 523 setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size,
621 setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].entries_n,
622 A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE); 524 A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE);
623 setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].entries_n, 525 setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size,
624 A_SG_CMD1BASELWR, A_SG_CMD1BASEUPR, A_SG_CMD1SIZE); 526 A_SG_CMD1BASELWR, A_SG_CMD1BASEUPR, A_SG_CMD1SIZE);
625 setup_ring_params(ap, sge->freelQ[0].dma_addr, 527 setup_ring_params(ap, sge->freelQ[0].dma_addr,
626 sge->freelQ[0].entries_n, A_SG_FL0BASELWR, 528 sge->freelQ[0].size, A_SG_FL0BASELWR,
627 A_SG_FL0BASEUPR, A_SG_FL0SIZE); 529 A_SG_FL0BASEUPR, A_SG_FL0SIZE);
628 setup_ring_params(ap, sge->freelQ[1].dma_addr, 530 setup_ring_params(ap, sge->freelQ[1].dma_addr,
629 sge->freelQ[1].entries_n, A_SG_FL1BASELWR, 531 sge->freelQ[1].size, A_SG_FL1BASELWR,
630 A_SG_FL1BASEUPR, A_SG_FL1SIZE); 532 A_SG_FL1BASEUPR, A_SG_FL1SIZE);
631 533
632 /* The threshold comparison uses <. */ 534 /* The threshold comparison uses <. */
633 t1_write_reg_4(ap, A_SG_FLTHRESHOLD, SGE_RX_SM_BUF_SIZE + 1); 535 writel(SGE_RX_SM_BUF_SIZE + 1, ap->regs + A_SG_FLTHRESHOLD);
634 536
635 setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.entries_n, 537 setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size,
636 A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE); 538 A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE);
637 t1_write_reg_4(ap, A_SG_RSPQUEUECREDIT, (u32)sge->respQ.entries_n); 539 writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT);
638 540
639 sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE | 541 sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE |
640 F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE | 542 F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE |
641 V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE | 543 V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE |
544 F_DISABLE_FL0_GTS | F_DISABLE_FL1_GTS |
642 V_RX_PKT_OFFSET(sge->rx_pkt_pad); 545 V_RX_PKT_OFFSET(sge->rx_pkt_pad);
643 546
644#if defined(__BIG_ENDIAN_BITFIELD) 547#if defined(__BIG_ENDIAN_BITFIELD)
645 sge->sge_control |= F_ENABLE_BIG_ENDIAN; 548 sge->sge_control |= F_ENABLE_BIG_ENDIAN;
646#endif 549#endif
647 550
648 /* 551 /* Initialize no-resource timer */
649 * Initialize the SGE Interrupt Timer arrray: 552 sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap);
650 * intrtimer[0] = (SGE_INTRTIMER0) usec 553
651 * intrtimer[0<i<5] = (SGE_INTRTIMER0 + i*2) usec 554 t1_sge_set_coalesce_params(sge, p);
652 * intrtimer[4<i<10] = ((i - 3) * 6) usec
653 * intrtimer[10] = (SGE_INTRTIMER1) usec
654 *
655 */
656 sge->intrtimer[0] = board_info(sge->adapter)->clock_core / 1000000;
657 for (i = 1; i < SGE_INTR_LATBUCKETS; ++i) {
658 sge->intrtimer[i] = SGE_INTRTIMER0 + (2 * i);
659 sge->intrtimer[i] *= sge->intrtimer[0];
660 }
661 for (i = SGE_INTR_LATBUCKETS; i < SGE_INTR_MAXBUCKETS - 1; ++i) {
662 sge->intrtimer[i] = (i - 3) * 6;
663 sge->intrtimer[i] *= sge->intrtimer[0];
664 }
665 sge->intrtimer[SGE_INTR_MAXBUCKETS - 1] =
666 sge->intrtimer[0] * SGE_INTRTIMER1;
667 /* Initialize resource timer */
668 sge->intrtimer_nres = sge->intrtimer[0] * SGE_INTRTIMER_NRES;
669 /* Finally finish initialization of intrtimer[0] */
670 sge->intrtimer[0] *= SGE_INTRTIMER0;
671 /* Initialize for a throughput oriented workload */
672 sge->currIndex = SGE_INTR_MAXBUCKETS - 1;
673
674 if (p->coalesce_enable)
675 t1_write_reg_4(ap, A_SG_INTRTIMER,
676 sge->intrtimer[sge->currIndex]);
677 else
678 t1_sge_set_coalesce_params(sge, p);
679} 555}
680 556
681/* 557/*
@@ -684,31 +560,8 @@ static void configure_sge(struct sge *sge, struct sge_params *p)
684static inline unsigned int jumbo_payload_capacity(const struct sge *sge) 560static inline unsigned int jumbo_payload_capacity(const struct sge *sge)
685{ 561{
686 return sge->freelQ[sge->jumbo_fl].rx_buffer_size - 562 return sge->freelQ[sge->jumbo_fl].rx_buffer_size -
687 sizeof(struct cpl_rx_data) - SGE_RX_OFFSET + sge->rx_pkt_pad; 563 sge->freelQ[sge->jumbo_fl].dma_offset -
688} 564 sizeof(struct cpl_rx_data);
689
690/*
691 * Allocates both RX and TX resources and configures the SGE. However,
692 * the hardware is not enabled yet.
693 */
694int t1_sge_configure(struct sge *sge, struct sge_params *p)
695{
696 if (alloc_rx_resources(sge, p))
697 return -ENOMEM;
698 if (alloc_tx_resources(sge, p)) {
699 free_rx_resources(sge);
700 return -ENOMEM;
701 }
702 configure_sge(sge, p);
703
704 /*
705 * Now that we have sized the free lists calculate the payload
706 * capacity of the large buffers. Other parts of the driver use
707 * this to set the max offload coalescing size so that RX packets
708 * do not overflow our large buffers.
709 */
710 p->large_buf_capacity = jumbo_payload_capacity(sge);
711 return 0;
712} 565}
713 566
714/* 567/*
@@ -716,8 +569,9 @@ int t1_sge_configure(struct sge *sge, struct sge_params *p)
716 */ 569 */
717void t1_sge_destroy(struct sge *sge) 570void t1_sge_destroy(struct sge *sge)
718{ 571{
719 if (sge->pskb) 572 if (sge->espibug_skb)
720 dev_kfree_skb(sge->pskb); 573 kfree_skb(sge->espibug_skb);
574
721 free_tx_resources(sge); 575 free_tx_resources(sge);
722 free_rx_resources(sge); 576 free_rx_resources(sge);
723 kfree(sge); 577 kfree(sge);
@@ -735,75 +589,75 @@ void t1_sge_destroy(struct sge *sge)
735 * we specify a RX_OFFSET in order to make sure that the IP header is 4B 589 * we specify a RX_OFFSET in order to make sure that the IP header is 4B
736 * aligned. 590 * aligned.
737 */ 591 */
738static void refill_free_list(struct sge *sge, struct freelQ *Q) 592static void refill_free_list(struct sge *sge, struct freelQ *q)
739{ 593{
740 struct pci_dev *pdev = sge->adapter->pdev; 594 struct pci_dev *pdev = sge->adapter->pdev;
741 struct freelQ_ce *ce = &Q->centries[Q->pidx]; 595 struct freelQ_ce *ce = &q->centries[q->pidx];
742 struct freelQ_e *e = &Q->entries[Q->pidx]; 596 struct freelQ_e *e = &q->entries[q->pidx];
743 unsigned int dma_len = Q->rx_buffer_size - Q->dma_offset; 597 unsigned int dma_len = q->rx_buffer_size - q->dma_offset;
744 598
745 599
746 while (Q->credits < Q->entries_n) { 600 while (q->credits < q->size) {
747 if (e->GenerationBit != Q->genbit) { 601 struct sk_buff *skb;
748 struct sk_buff *skb; 602 dma_addr_t mapping;
749 dma_addr_t mapping;
750 603
751 skb = alloc_skb(Q->rx_buffer_size, GFP_ATOMIC); 604 skb = alloc_skb(q->rx_buffer_size, GFP_ATOMIC);
752 if (!skb) 605 if (!skb)
753 break; 606 break;
754 if (Q->dma_offset) 607
755 skb_reserve(skb, Q->dma_offset); 608 skb_reserve(skb, q->dma_offset);
756 mapping = pci_map_single(pdev, skb->data, dma_len, 609 mapping = pci_map_single(pdev, skb->data, dma_len,
757 PCI_DMA_FROMDEVICE); 610 PCI_DMA_FROMDEVICE);
758 ce->skb = skb; 611 ce->skb = skb;
759 pci_unmap_addr_set(ce, dma_addr, mapping); 612 pci_unmap_addr_set(ce, dma_addr, mapping);
760 pci_unmap_len_set(ce, dma_len, dma_len); 613 pci_unmap_len_set(ce, dma_len, dma_len);
761 e->AddrLow = (u32)mapping; 614 e->addr_lo = (u32)mapping;
762 e->AddrHigh = (u64)mapping >> 32; 615 e->addr_hi = (u64)mapping >> 32;
763 e->BufferLength = dma_len; 616 e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit);
764 e->GenerationBit = e->GenerationBit2 = Q->genbit; 617 wmb();
765 } 618 e->gen2 = V_CMD_GEN2(q->genbit);
766 619
767 e++; 620 e++;
768 ce++; 621 ce++;
769 if (++Q->pidx == Q->entries_n) { 622 if (++q->pidx == q->size) {
770 Q->pidx = 0; 623 q->pidx = 0;
771 Q->genbit ^= 1; 624 q->genbit ^= 1;
772 ce = Q->centries; 625 ce = q->centries;
773 e = Q->entries; 626 e = q->entries;
774 } 627 }
775 Q->credits++; 628 q->credits++;
776 } 629 }
777 630
778} 631}
779 632
780/* 633/*
781 * Calls refill_free_list for both freelist Qs. If we cannot 634 * Calls refill_free_list for both free lists. If we cannot fill at least 1/4
782 * fill at least 1/4 of both Qs, we go into 'few interrupt mode' in order 635 * of both rings, we go into 'few interrupt mode' in order to give the system
783 * to give the system time to free up resources. 636 * time to free up resources.
784 */ 637 */
785static void freelQs_empty(struct sge *sge) 638static void freelQs_empty(struct sge *sge)
786{ 639{
787 u32 irq_reg = t1_read_reg_4(sge->adapter, A_SG_INT_ENABLE); 640 struct adapter *adapter = sge->adapter;
641 u32 irq_reg = readl(adapter->regs + A_SG_INT_ENABLE);
788 u32 irqholdoff_reg; 642 u32 irqholdoff_reg;
789 643
790 refill_free_list(sge, &sge->freelQ[0]); 644 refill_free_list(sge, &sge->freelQ[0]);
791 refill_free_list(sge, &sge->freelQ[1]); 645 refill_free_list(sge, &sge->freelQ[1]);
792 646
793 if (sge->freelQ[0].credits > (sge->freelQ[0].entries_n >> 2) && 647 if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) &&
794 sge->freelQ[1].credits > (sge->freelQ[1].entries_n >> 2)) { 648 sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) {
795 irq_reg |= F_FL_EXHAUSTED; 649 irq_reg |= F_FL_EXHAUSTED;
796 irqholdoff_reg = sge->intrtimer[sge->currIndex]; 650 irqholdoff_reg = sge->fixed_intrtimer;
797 } else { 651 } else {
798 /* Clear the F_FL_EXHAUSTED interrupts for now */ 652 /* Clear the F_FL_EXHAUSTED interrupts for now */
799 irq_reg &= ~F_FL_EXHAUSTED; 653 irq_reg &= ~F_FL_EXHAUSTED;
800 irqholdoff_reg = sge->intrtimer_nres; 654 irqholdoff_reg = sge->intrtimer_nres;
801 } 655 }
802 t1_write_reg_4(sge->adapter, A_SG_INTRTIMER, irqholdoff_reg); 656 writel(irqholdoff_reg, adapter->regs + A_SG_INTRTIMER);
803 t1_write_reg_4(sge->adapter, A_SG_INT_ENABLE, irq_reg); 657 writel(irq_reg, adapter->regs + A_SG_INT_ENABLE);
804 658
805 /* We reenable the Qs to force a freelist GTS interrupt later */ 659 /* We reenable the Qs to force a freelist GTS interrupt later */
806 doorbell_pio(sge, F_FL0_ENABLE | F_FL1_ENABLE); 660 doorbell_pio(adapter, F_FL0_ENABLE | F_FL1_ENABLE);
807} 661}
808 662
809#define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA) 663#define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA)
@@ -816,10 +670,10 @@ static void freelQs_empty(struct sge *sge)
816 */ 670 */
817void t1_sge_intr_disable(struct sge *sge) 671void t1_sge_intr_disable(struct sge *sge)
818{ 672{
819 u32 val = t1_read_reg_4(sge->adapter, A_PL_ENABLE); 673 u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
820 674
821 t1_write_reg_4(sge->adapter, A_PL_ENABLE, val & ~SGE_PL_INTR_MASK); 675 writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
822 t1_write_reg_4(sge->adapter, A_SG_INT_ENABLE, 0); 676 writel(0, sge->adapter->regs + A_SG_INT_ENABLE);
823} 677}
824 678
825/* 679/*
@@ -828,12 +682,12 @@ void t1_sge_intr_disable(struct sge *sge)
828void t1_sge_intr_enable(struct sge *sge) 682void t1_sge_intr_enable(struct sge *sge)
829{ 683{
830 u32 en = SGE_INT_ENABLE; 684 u32 en = SGE_INT_ENABLE;
831 u32 val = t1_read_reg_4(sge->adapter, A_PL_ENABLE); 685 u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
832 686
833 if (sge->adapter->flags & TSO_CAPABLE) 687 if (sge->adapter->flags & TSO_CAPABLE)
834 en &= ~F_PACKET_TOO_BIG; 688 en &= ~F_PACKET_TOO_BIG;
835 t1_write_reg_4(sge->adapter, A_SG_INT_ENABLE, en); 689 writel(en, sge->adapter->regs + A_SG_INT_ENABLE);
836 t1_write_reg_4(sge->adapter, A_PL_ENABLE, val | SGE_PL_INTR_MASK); 690 writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
837} 691}
838 692
839/* 693/*
@@ -841,8 +695,8 @@ void t1_sge_intr_enable(struct sge *sge)
841 */ 695 */
842void t1_sge_intr_clear(struct sge *sge) 696void t1_sge_intr_clear(struct sge *sge)
843{ 697{
844 t1_write_reg_4(sge->adapter, A_PL_CAUSE, SGE_PL_INTR_MASK); 698 writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE);
845 t1_write_reg_4(sge->adapter, A_SG_INT_CAUSE, 0xffffffff); 699 writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE);
846} 700}
847 701
848/* 702/*
@@ -851,464 +705,673 @@ void t1_sge_intr_clear(struct sge *sge)
851int t1_sge_intr_error_handler(struct sge *sge) 705int t1_sge_intr_error_handler(struct sge *sge)
852{ 706{
853 struct adapter *adapter = sge->adapter; 707 struct adapter *adapter = sge->adapter;
854 u32 cause = t1_read_reg_4(adapter, A_SG_INT_CAUSE); 708 u32 cause = readl(adapter->regs + A_SG_INT_CAUSE);
855 709
856 if (adapter->flags & TSO_CAPABLE) 710 if (adapter->flags & TSO_CAPABLE)
857 cause &= ~F_PACKET_TOO_BIG; 711 cause &= ~F_PACKET_TOO_BIG;
858 if (cause & F_RESPQ_EXHAUSTED) 712 if (cause & F_RESPQ_EXHAUSTED)
859 sge->intr_cnt.respQ_empty++; 713 sge->stats.respQ_empty++;
860 if (cause & F_RESPQ_OVERFLOW) { 714 if (cause & F_RESPQ_OVERFLOW) {
861 sge->intr_cnt.respQ_overflow++; 715 sge->stats.respQ_overflow++;
862 CH_ALERT("%s: SGE response queue overflow\n", 716 CH_ALERT("%s: SGE response queue overflow\n",
863 adapter->name); 717 adapter->name);
864 } 718 }
865 if (cause & F_FL_EXHAUSTED) { 719 if (cause & F_FL_EXHAUSTED) {
866 sge->intr_cnt.freelistQ_empty++; 720 sge->stats.freelistQ_empty++;
867 freelQs_empty(sge); 721 freelQs_empty(sge);
868 } 722 }
869 if (cause & F_PACKET_TOO_BIG) { 723 if (cause & F_PACKET_TOO_BIG) {
870 sge->intr_cnt.pkt_too_big++; 724 sge->stats.pkt_too_big++;
871 CH_ALERT("%s: SGE max packet size exceeded\n", 725 CH_ALERT("%s: SGE max packet size exceeded\n",
872 adapter->name); 726 adapter->name);
873 } 727 }
874 if (cause & F_PACKET_MISMATCH) { 728 if (cause & F_PACKET_MISMATCH) {
875 sge->intr_cnt.pkt_mismatch++; 729 sge->stats.pkt_mismatch++;
876 CH_ALERT("%s: SGE packet mismatch\n", adapter->name); 730 CH_ALERT("%s: SGE packet mismatch\n", adapter->name);
877 } 731 }
878 if (cause & SGE_INT_FATAL) 732 if (cause & SGE_INT_FATAL)
879 t1_fatal_err(adapter); 733 t1_fatal_err(adapter);
880 734
881 t1_write_reg_4(adapter, A_SG_INT_CAUSE, cause); 735 writel(cause, adapter->regs + A_SG_INT_CAUSE);
882 return 0; 736 return 0;
883} 737}
884 738
885/* 739const struct sge_intr_counts *t1_sge_get_intr_counts(struct sge *sge)
886 * The following code is copied from 2.6, where the skb_pull is doing the 740{
887 * right thing and only pulls ETH_HLEN. 741 return &sge->stats;
742}
743
744const struct sge_port_stats *t1_sge_get_port_stats(struct sge *sge, int port)
745{
746 return &sge->port_stats[port];
747}
748
749/**
750 * recycle_fl_buf - recycle a free list buffer
751 * @fl: the free list
752 * @idx: index of buffer to recycle
888 * 753 *
889 * Determine the packet's protocol ID. The rule here is that we 754 * Recycles the specified buffer on the given free list by adding it at
890 * assume 802.3 if the type field is short enough to be a length. 755 * the next available slot on the list.
891 * This is normal practice and works for any 'now in use' protocol.
892 */ 756 */
893static unsigned short sge_eth_type_trans(struct sk_buff *skb, 757static void recycle_fl_buf(struct freelQ *fl, int idx)
894 struct net_device *dev)
895{ 758{
896 struct ethhdr *eth; 759 struct freelQ_e *from = &fl->entries[idx];
897 unsigned char *rawp; 760 struct freelQ_e *to = &fl->entries[fl->pidx];
898 761
899 skb->mac.raw = skb->data; 762 fl->centries[fl->pidx] = fl->centries[idx];
900 skb_pull(skb, ETH_HLEN); 763 to->addr_lo = from->addr_lo;
901 eth = (struct ethhdr *)skb->mac.raw; 764 to->addr_hi = from->addr_hi;
765 to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit);
766 wmb();
767 to->gen2 = V_CMD_GEN2(fl->genbit);
768 fl->credits++;
902 769
903 if (*eth->h_dest&1) { 770 if (++fl->pidx == fl->size) {
904 if(memcmp(eth->h_dest, dev->broadcast, ETH_ALEN) == 0) 771 fl->pidx = 0;
905 skb->pkt_type = PACKET_BROADCAST; 772 fl->genbit ^= 1;
906 else
907 skb->pkt_type = PACKET_MULTICAST;
908 } 773 }
774}
909 775
910 /* 776/**
911 * This ALLMULTI check should be redundant by 1.4 777 * get_packet - return the next ingress packet buffer
912 * so don't forget to remove it. 778 * @pdev: the PCI device that received the packet
913 * 779 * @fl: the SGE free list holding the packet
914 * Seems, you forgot to remove it. All silly devices 780 * @len: the actual packet length, excluding any SGE padding
915 * seems to set IFF_PROMISC. 781 * @dma_pad: padding at beginning of buffer left by SGE DMA
916 */ 782 * @skb_pad: padding to be used if the packet is copied
783 * @copy_thres: length threshold under which a packet should be copied
784 * @drop_thres: # of remaining buffers before we start dropping packets
785 *
786 * Get the next packet from a free list and complete setup of the
787 * sk_buff. If the packet is small we make a copy and recycle the
788 * original buffer, otherwise we use the original buffer itself. If a
789 * positive drop threshold is supplied packets are dropped and their
790 * buffers recycled if (a) the number of remaining buffers is under the
791 * threshold and the packet is too big to copy, or (b) the packet should
792 * be copied but there is no memory for the copy.
793 */
794static inline struct sk_buff *get_packet(struct pci_dev *pdev,
795 struct freelQ *fl, unsigned int len,
796 int dma_pad, int skb_pad,
797 unsigned int copy_thres,
798 unsigned int drop_thres)
799{
800 struct sk_buff *skb;
801 struct freelQ_ce *ce = &fl->centries[fl->cidx];
802
803 if (len < copy_thres) {
804 skb = alloc_skb(len + skb_pad, GFP_ATOMIC);
805 if (likely(skb != NULL)) {
806 skb_reserve(skb, skb_pad);
807 skb_put(skb, len);
808 pci_dma_sync_single_for_cpu(pdev,
809 pci_unmap_addr(ce, dma_addr),
810 pci_unmap_len(ce, dma_len),
811 PCI_DMA_FROMDEVICE);
812 memcpy(skb->data, ce->skb->data + dma_pad, len);
813 pci_dma_sync_single_for_device(pdev,
814 pci_unmap_addr(ce, dma_addr),
815 pci_unmap_len(ce, dma_len),
816 PCI_DMA_FROMDEVICE);
817 } else if (!drop_thres)
818 goto use_orig_buf;
917 819
918 else if (1 /*dev->flags&IFF_PROMISC*/) 820 recycle_fl_buf(fl, fl->cidx);
919 { 821 return skb;
920 if(memcmp(eth->h_dest,dev->dev_addr, ETH_ALEN))
921 skb->pkt_type=PACKET_OTHERHOST;
922 } 822 }
923 823
924 if (ntohs(eth->h_proto) >= 1536) 824 if (fl->credits < drop_thres) {
925 return eth->h_proto; 825 recycle_fl_buf(fl, fl->cidx);
926 826 return NULL;
927 rawp = skb->data; 827 }
928 828
929 /* 829use_orig_buf:
930 * This is a magic hack to spot IPX packets. Older Novell breaks 830 pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
931 * the protocol design and runs IPX over 802.3 without an 802.2 LLC 831 pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
932 * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This 832 skb = ce->skb;
933 * won't work for fault tolerant netware but does for the rest. 833 skb_reserve(skb, dma_pad);
934 */ 834 skb_put(skb, len);
935 if (*(unsigned short *)rawp == 0xFFFF) 835 return skb;
936 return htons(ETH_P_802_3); 836}
937 837
938 /* 838/**
939 * Real 802.2 LLC 839 * unexpected_offload - handle an unexpected offload packet
940 */ 840 * @adapter: the adapter
941 return htons(ETH_P_802_2); 841 * @fl: the free list that received the packet
842 *
843 * Called when we receive an unexpected offload packet (e.g., the TOE
844 * function is disabled or the card is a NIC). Prints a message and
845 * recycles the buffer.
846 */
847static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
848{
849 struct freelQ_ce *ce = &fl->centries[fl->cidx];
850 struct sk_buff *skb = ce->skb;
851
852 pci_dma_sync_single_for_cpu(adapter->pdev, pci_unmap_addr(ce, dma_addr),
853 pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
854 CH_ERR("%s: unexpected offload packet, cmd %u\n",
855 adapter->name, *skb->data);
856 recycle_fl_buf(fl, fl->cidx);
942} 857}
943 858
944/* 859/*
945 * Prepare the received buffer and pass it up the stack. If it is small enough 860 * Write the command descriptors to transmit the given skb starting at
946 * and allocation doesn't fail, we use a new sk_buff and copy the content. 861 * descriptor pidx with the given generation.
947 */ 862 */
948static unsigned int t1_sge_rx(struct sge *sge, struct freelQ *Q, 863static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
949 unsigned int len, unsigned int offload) 864 unsigned int pidx, unsigned int gen,
865 struct cmdQ *q)
950{ 866{
951 struct sk_buff *skb; 867 dma_addr_t mapping;
952 struct adapter *adapter = sge->adapter; 868 struct cmdQ_e *e, *e1;
953 struct freelQ_ce *ce = &Q->centries[Q->cidx]; 869 struct cmdQ_ce *ce;
870 unsigned int i, flags, nfrags = skb_shinfo(skb)->nr_frags;
871
872 mapping = pci_map_single(adapter->pdev, skb->data,
873 skb->len - skb->data_len, PCI_DMA_TODEVICE);
874 ce = &q->centries[pidx];
875 ce->skb = NULL;
876 pci_unmap_addr_set(ce, dma_addr, mapping);
877 pci_unmap_len_set(ce, dma_len, skb->len - skb->data_len);
954 878
955 if (len <= SGE_RX_COPY_THRESHOLD && 879 flags = F_CMD_DATAVALID | F_CMD_SOP | V_CMD_EOP(nfrags == 0) |
956 (skb = alloc_skb(len + NET_IP_ALIGN, GFP_ATOMIC))) { 880 V_CMD_GEN2(gen);
957 struct freelQ_e *e; 881 e = &q->entries[pidx];
958 char *src = ce->skb->data; 882 e->addr_lo = (u32)mapping;
883 e->addr_hi = (u64)mapping >> 32;
884 e->len_gen = V_CMD_LEN(skb->len - skb->data_len) | V_CMD_GEN1(gen);
885 for (e1 = e, i = 0; nfrags--; i++) {
886 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
959 887
960 pci_dma_sync_single_for_cpu(adapter->pdev, 888 ce++;
961 pci_unmap_addr(ce, dma_addr), 889 e1++;
962 pci_unmap_len(ce, dma_len), 890 if (++pidx == q->size) {
963 PCI_DMA_FROMDEVICE); 891 pidx = 0;
964 if (!offload) { 892 gen ^= 1;
965 skb_reserve(skb, NET_IP_ALIGN); 893 ce = q->centries;
966 src += sge->rx_pkt_pad; 894 e1 = q->entries;
967 } 895 }
968 memcpy(skb->data, src, len);
969 896
970 /* Reuse the entry. */ 897 mapping = pci_map_page(adapter->pdev, frag->page,
971 e = &Q->entries[Q->cidx]; 898 frag->page_offset, frag->size,
972 e->GenerationBit ^= 1; 899 PCI_DMA_TODEVICE);
973 e->GenerationBit2 ^= 1; 900 ce->skb = NULL;
974 } else { 901 pci_unmap_addr_set(ce, dma_addr, mapping);
975 pci_unmap_single(adapter->pdev, pci_unmap_addr(ce, dma_addr), 902 pci_unmap_len_set(ce, dma_len, frag->size);
976 pci_unmap_len(ce, dma_len), 903
977 PCI_DMA_FROMDEVICE); 904 e1->addr_lo = (u32)mapping;
978 skb = ce->skb; 905 e1->addr_hi = (u64)mapping >> 32;
979 if (!offload && sge->rx_pkt_pad) 906 e1->len_gen = V_CMD_LEN(frag->size) | V_CMD_GEN1(gen);
980 __skb_pull(skb, sge->rx_pkt_pad); 907 e1->flags = F_CMD_DATAVALID | V_CMD_EOP(nfrags == 0) |
908 V_CMD_GEN2(gen);
981 } 909 }
982 910
983 skb_put(skb, len); 911 ce->skb = skb;
912 wmb();
913 e->flags = flags;
914}
984 915
916/*
917 * Clean up completed Tx buffers.
918 */
919static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q)
920{
921 unsigned int reclaim = q->processed - q->cleaned;
985 922
986 if (unlikely(offload)) { 923 if (reclaim) {
987 { 924 free_cmdQ_buffers(sge, q, reclaim);
988 printk(KERN_ERR 925 q->cleaned += reclaim;
989 "%s: unexpected offloaded packet, cmd %u\n",
990 adapter->name, *skb->data);
991 dev_kfree_skb_any(skb);
992 }
993 } else {
994 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)skb->data;
995
996 skb_pull(skb, sizeof(*p));
997 skb->dev = adapter->port[p->iff].dev;
998 skb->dev->last_rx = jiffies;
999 skb->protocol = sge_eth_type_trans(skb, skb->dev);
1000 if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff &&
1001 skb->protocol == htons(ETH_P_IP) &&
1002 (skb->data[9] == IPPROTO_TCP ||
1003 skb->data[9] == IPPROTO_UDP))
1004 skb->ip_summed = CHECKSUM_UNNECESSARY;
1005 else
1006 skb->ip_summed = CHECKSUM_NONE;
1007 if (adapter->vlan_grp && p->vlan_valid)
1008 vlan_hwaccel_rx(skb, adapter->vlan_grp,
1009 ntohs(p->vlan));
1010 else
1011 netif_rx(skb);
1012 } 926 }
927}
1013 928
1014 if (++Q->cidx == Q->entries_n) 929#ifndef SET_ETHTOOL_OPS
1015 Q->cidx = 0; 930# define __netif_rx_complete(dev) netif_rx_complete(dev)
931#endif
1016 932
1017 if (unlikely(--Q->credits < Q->entries_n - SGE_FREEL_REFILL_THRESH)) 933/*
1018 refill_free_list(sge, Q); 934 * We cannot use the standard netif_rx_schedule_prep() because we have multiple
1019 return 1; 935 * ports plus the TOE all multiplexing onto a single response queue, therefore
936 * accepting new responses cannot depend on the state of any particular port.
937 * So define our own equivalent that omits the netif_running() test.
938 */
939static inline int napi_schedule_prep(struct net_device *dev)
940{
941 return !test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state);
1020} 942}
1021 943
1022 944
1023/* 945/**
1024 * Adaptive interrupt timer logic to keep the CPU utilization to 946 * sge_rx - process an ingress ethernet packet
1025 * manageable levels. Basically, as the Average Packet Size (APS) 947 * @sge: the sge structure
1026 * gets higher, the interrupt latency setting gets longer. Every 948 * @fl: the free list that contains the packet buffer
1027 * SGE_INTR_BUCKETSIZE (of 100B) causes a bump of 2usec to the 949 * @len: the packet length
1028 * base value of SGE_INTRTIMER0. At large values of payload the
1029 * latency hits the ceiling value of SGE_INTRTIMER1 stored at
1030 * index SGE_INTR_MAXBUCKETS-1 in sge->intrtimer[].
1031 * 950 *
1032 * sge->currIndex caches the last index to save unneeded PIOs. 951 * Process an ingress ethernet pakcet and deliver it to the stack.
1033 */ 952 */
1034static inline void update_intr_timer(struct sge *sge, unsigned int avg_payload) 953static int sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
1035{ 954{
1036 unsigned int newIndex; 955 struct sk_buff *skb;
956 struct cpl_rx_pkt *p;
957 struct adapter *adapter = sge->adapter;
1037 958
1038 newIndex = avg_payload / SGE_INTR_BUCKETSIZE; 959 sge->stats.ethernet_pkts++;
1039 if (newIndex > SGE_INTR_MAXBUCKETS - 1) { 960 skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad,
1040 newIndex = SGE_INTR_MAXBUCKETS - 1; 961 sge->rx_pkt_pad, 2, SGE_RX_COPY_THRES,
1041 } 962 SGE_RX_DROP_THRES);
1042 /* Save a PIO with this check....maybe */ 963 if (!skb) {
1043 if (newIndex != sge->currIndex) { 964 sge->port_stats[0].rx_drops++; /* charge only port 0 for now */
1044 t1_write_reg_4(sge->adapter, A_SG_INTRTIMER, 965 return 0;
1045 sge->intrtimer[newIndex]);
1046 sge->currIndex = newIndex;
1047 sge->adapter->params.sge.last_rx_coalesce_raw =
1048 sge->intrtimer[newIndex];
1049 } 966 }
967
968 p = (struct cpl_rx_pkt *)skb->data;
969 skb_pull(skb, sizeof(*p));
970 skb->dev = adapter->port[p->iff].dev;
971 skb->dev->last_rx = jiffies;
972 skb->protocol = eth_type_trans(skb, skb->dev);
973 if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff &&
974 skb->protocol == htons(ETH_P_IP) &&
975 (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) {
976 sge->port_stats[p->iff].rx_cso_good++;
977 skb->ip_summed = CHECKSUM_UNNECESSARY;
978 } else
979 skb->ip_summed = CHECKSUM_NONE;
980
981 if (unlikely(adapter->vlan_grp && p->vlan_valid)) {
982 sge->port_stats[p->iff].vlan_xtract++;
983 if (adapter->params.sge.polling)
984 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
985 ntohs(p->vlan));
986 else
987 vlan_hwaccel_rx(skb, adapter->vlan_grp,
988 ntohs(p->vlan));
989 } else if (adapter->params.sge.polling)
990 netif_receive_skb(skb);
991 else
992 netif_rx(skb);
993 return 0;
1050} 994}
1051 995
1052/* 996/*
1053 * Returns true if command queue q_num has enough available descriptors that 997 * Returns true if a command queue has enough available descriptors that
1054 * we can resume Tx operation after temporarily disabling its packet queue. 998 * we can resume Tx operation after temporarily disabling its packet queue.
1055 */ 999 */
1056static inline int enough_free_Tx_descs(struct sge *sge, int q_num) 1000static inline int enough_free_Tx_descs(const struct cmdQ *q)
1057{ 1001{
1058 return atomic_read(&sge->cmdQ[q_num].credits) > 1002 unsigned int r = q->processed - q->cleaned;
1059 (sge->cmdQ[q_num].entries_n >> 2); 1003
1004 return q->in_use - r < (q->size >> 1);
1060} 1005}
1061 1006
1062/* 1007/*
1063 * Main interrupt handler, optimized assuming that we took a 'DATA' 1008 * Called when sufficient space has become available in the SGE command queues
1064 * interrupt. 1009 * after the Tx packet schedulers have been suspended to restart the Tx path.
1065 *
1066 * 1. Clear the interrupt
1067 * 2. Loop while we find valid descriptors and process them; accumulate
1068 * information that can be processed after the loop
1069 * 3. Tell the SGE at which index we stopped processing descriptors
1070 * 4. Bookkeeping; free TX buffers, ring doorbell if there are any
1071 * outstanding TX buffers waiting, replenish RX buffers, potentially
1072 * reenable upper layers if they were turned off due to lack of TX
1073 * resources which are available again.
1074 * 5. If we took an interrupt, but no valid respQ descriptors was found we
1075 * let the slow_intr_handler run and do error handling.
1076 */ 1010 */
1077irqreturn_t t1_interrupt(int irq, void *cookie, struct pt_regs *regs) 1011static void restart_tx_queues(struct sge *sge)
1078{ 1012{
1079 struct net_device *netdev; 1013 struct adapter *adap = sge->adapter;
1080 struct adapter *adapter = cookie;
1081 struct sge *sge = adapter->sge;
1082 struct respQ *Q = &sge->respQ;
1083 unsigned int credits = Q->credits, flags = 0, ret = 0;
1084 unsigned int tot_rxpayload = 0, tot_txpayload = 0, n_rx = 0, n_tx = 0;
1085 unsigned int credits_pend[SGE_CMDQ_N] = { 0, 0 };
1086 1014
1087 struct respQ_e *e = &Q->entries[Q->cidx]; 1015 if (enough_free_Tx_descs(&sge->cmdQ[0])) {
1088 prefetch(e); 1016 int i;
1017
1018 for_each_port(adap, i) {
1019 struct net_device *nd = adap->port[i].dev;
1020
1021 if (test_and_clear_bit(nd->if_port,
1022 &sge->stopped_tx_queues) &&
1023 netif_running(nd)) {
1024 sge->stats.cmdQ_restarted[3]++;
1025 netif_wake_queue(nd);
1026 }
1027 }
1028 }
1029}
1030
1031/*
1032 * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0
1033 * information.
1034 */
1035static unsigned int update_tx_info(struct adapter *adapter,
1036 unsigned int flags,
1037 unsigned int pr0)
1038{
1039 struct sge *sge = adapter->sge;
1040 struct cmdQ *cmdq = &sge->cmdQ[0];
1089 1041
1090 t1_write_reg_4(adapter, A_PL_CAUSE, F_PL_INTR_SGE_DATA); 1042 cmdq->processed += pr0;
1091 1043
1044 if (flags & F_CMDQ0_ENABLE) {
1045 clear_bit(CMDQ_STAT_RUNNING, &cmdq->status);
1046
1047 if (cmdq->cleaned + cmdq->in_use != cmdq->processed &&
1048 !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) {
1049 set_bit(CMDQ_STAT_RUNNING, &cmdq->status);
1050 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1051 }
1052 flags &= ~F_CMDQ0_ENABLE;
1053 }
1054
1055 if (unlikely(sge->stopped_tx_queues != 0))
1056 restart_tx_queues(sge);
1092 1057
1093 while (e->GenerationBit == Q->genbit) { 1058 return flags;
1094 if (--credits < SGE_RESPQ_REPLENISH_THRES) { 1059}
1095 u32 n = Q->entries_n - credits - 1;
1096 1060
1097 t1_write_reg_4(adapter, A_SG_RSPQUEUECREDIT, n); 1061/*
1098 credits += n; 1062 * Process SGE responses, up to the supplied budget. Returns the number of
1063 * responses processed. A negative budget is effectively unlimited.
1064 */
1065static int process_responses(struct adapter *adapter, int budget)
1066{
1067 struct sge *sge = adapter->sge;
1068 struct respQ *q = &sge->respQ;
1069 struct respQ_e *e = &q->entries[q->cidx];
1070 int budget_left = budget;
1071 unsigned int flags = 0;
1072 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
1073
1074
1075 while (likely(budget_left && e->GenerationBit == q->genbit)) {
1076 flags |= e->Qsleeping;
1077
1078 cmdq_processed[0] += e->Cmdq0CreditReturn;
1079 cmdq_processed[1] += e->Cmdq1CreditReturn;
1080
1081 /* We batch updates to the TX side to avoid cacheline
1082 * ping-pong of TX state information on MP where the sender
1083 * might run on a different CPU than this function...
1084 */
1085 if (unlikely(flags & F_CMDQ0_ENABLE || cmdq_processed[0] > 64)) {
1086 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1087 cmdq_processed[0] = 0;
1088 }
1089 if (unlikely(cmdq_processed[1] > 16)) {
1090 sge->cmdQ[1].processed += cmdq_processed[1];
1091 cmdq_processed[1] = 0;
1099 } 1092 }
1100 if (likely(e->DataValid)) { 1093 if (likely(e->DataValid)) {
1101 if (!e->Sop || !e->Eop) 1094 struct freelQ *fl = &sge->freelQ[e->FreelistQid];
1095
1096 if (unlikely(!e->Sop || !e->Eop))
1102 BUG(); 1097 BUG();
1103 t1_sge_rx(sge, &sge->freelQ[e->FreelistQid], 1098 if (unlikely(e->Offload))
1104 e->BufferLength, e->Offload); 1099 unexpected_offload(adapter, fl);
1105 tot_rxpayload += e->BufferLength; 1100 else
1106 ++n_rx; 1101 sge_rx(sge, fl, e->BufferLength);
1107 } 1102
1108 flags |= e->Qsleeping; 1103 /*
1109 credits_pend[0] += e->Cmdq0CreditReturn; 1104 * Note: this depends on each packet consuming a
1110 credits_pend[1] += e->Cmdq1CreditReturn; 1105 * single free-list buffer; cf. the BUG above.
1106 */
1107 if (++fl->cidx == fl->size)
1108 fl->cidx = 0;
1109 if (unlikely(--fl->credits <
1110 fl->size - SGE_FREEL_REFILL_THRESH))
1111 refill_free_list(sge, fl);
1112 } else
1113 sge->stats.pure_rsps++;
1111 1114
1112#ifdef CONFIG_SMP
1113 /*
1114 * If enough cmdQ0 buffers have finished DMAing free them so
1115 * anyone that may be waiting for their release can continue.
1116 * We do this only on MP systems to allow other CPUs to proceed
1117 * promptly. UP systems can wait for the free_cmdQ_buffers()
1118 * calls after this loop as the sole CPU is currently busy in
1119 * this loop.
1120 */
1121 if (unlikely(credits_pend[0] > SGE_FREEL_REFILL_THRESH)) {
1122 free_cmdQ_buffers(sge, &sge->cmdQ[0], credits_pend[0],
1123 &tot_txpayload);
1124 n_tx += credits_pend[0];
1125 credits_pend[0] = 0;
1126 }
1127#endif
1128 ret++;
1129 e++; 1115 e++;
1130 if (unlikely(++Q->cidx == Q->entries_n)) { 1116 if (unlikely(++q->cidx == q->size)) {
1131 Q->cidx = 0; 1117 q->cidx = 0;
1132 Q->genbit ^= 1; 1118 q->genbit ^= 1;
1133 e = Q->entries; 1119 e = q->entries;
1120 }
1121 prefetch(e);
1122
1123 if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
1124 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
1125 q->credits = 0;
1134 } 1126 }
1127 --budget_left;
1135 } 1128 }
1136 1129
1137 Q->credits = credits; 1130 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1138 t1_write_reg_4(adapter, A_SG_SLEEPING, Q->cidx); 1131 sge->cmdQ[1].processed += cmdq_processed[1];
1139 1132
1140 if (credits_pend[0]) 1133 budget -= budget_left;
1141 free_cmdQ_buffers(sge, &sge->cmdQ[0], credits_pend[0], &tot_txpayload); 1134 return budget;
1142 if (credits_pend[1]) 1135}
1143 free_cmdQ_buffers(sge, &sge->cmdQ[1], credits_pend[1], &tot_txpayload);
1144 1136
1145 /* Do any coalescing and interrupt latency timer adjustments */ 1137/*
1146 if (adapter->params.sge.coalesce_enable) { 1138 * A simpler version of process_responses() that handles only pure (i.e.,
1147 unsigned int avg_txpayload = 0, avg_rxpayload = 0; 1139 * non data-carrying) responses. Such respones are too light-weight to justify
1140 * calling a softirq when using NAPI, so we handle them specially in hard
1141 * interrupt context. The function is called with a pointer to a response,
1142 * which the caller must ensure is a valid pure response. Returns 1 if it
1143 * encounters a valid data-carrying response, 0 otherwise.
1144 */
1145static int process_pure_responses(struct adapter *adapter, struct respQ_e *e)
1146{
1147 struct sge *sge = adapter->sge;
1148 struct respQ *q = &sge->respQ;
1149 unsigned int flags = 0;
1150 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
1148 1151
1149 n_tx += credits_pend[0] + credits_pend[1]; 1152 do {
1153 flags |= e->Qsleeping;
1150 1154
1151 /* 1155 cmdq_processed[0] += e->Cmdq0CreditReturn;
1152 * Choose larger avg. payload size to increase 1156 cmdq_processed[1] += e->Cmdq1CreditReturn;
1153 * throughput and reduce [CPU util., intr/s.] 1157
1154 * 1158 e++;
1155 * Throughput behavior favored in mixed-mode. 1159 if (unlikely(++q->cidx == q->size)) {
1156 */ 1160 q->cidx = 0;
1157 if (n_tx) 1161 q->genbit ^= 1;
1158 avg_txpayload = tot_txpayload/n_tx; 1162 e = q->entries;
1159 if (n_rx)
1160 avg_rxpayload = tot_rxpayload/n_rx;
1161
1162 if (n_tx && avg_txpayload > avg_rxpayload){
1163 update_intr_timer(sge, avg_txpayload);
1164 } else if (n_rx) {
1165 update_intr_timer(sge, avg_rxpayload);
1166 } 1163 }
1167 } 1164 prefetch(e);
1168
1169 if (flags & F_CMDQ0_ENABLE) {
1170 struct cmdQ *cmdQ = &sge->cmdQ[0];
1171 1165
1172 atomic_set(&cmdQ->asleep, 1); 1166 if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
1173 if (atomic_read(&cmdQ->pio_pidx) != cmdQ->pidx) { 1167 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
1174 doorbell_pio(sge, F_CMDQ0_ENABLE); 1168 q->credits = 0;
1175 atomic_set(&cmdQ->pio_pidx, cmdQ->pidx);
1176 } 1169 }
1177 } 1170 sge->stats.pure_rsps++;
1178 if (unlikely(flags & (F_FL0_ENABLE | F_FL1_ENABLE))) 1171 } while (e->GenerationBit == q->genbit && !e->DataValid);
1179 freelQs_empty(sge);
1180 1172
1181 netdev = adapter->port[0].dev; 1173 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1182 if (unlikely(netif_queue_stopped(netdev) && netif_carrier_ok(netdev) && 1174 sge->cmdQ[1].processed += cmdq_processed[1];
1183 enough_free_Tx_descs(sge, 0) &&
1184 enough_free_Tx_descs(sge, 1))) {
1185 netif_wake_queue(netdev);
1186 }
1187 if (unlikely(!ret))
1188 ret = t1_slow_intr_handler(adapter);
1189 1175
1190 return IRQ_RETVAL(ret != 0); 1176 return e->GenerationBit == q->genbit;
1191} 1177}
1192 1178
1193/* 1179/*
1194 * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it. 1180 * Handler for new data events when using NAPI. This does not need any locking
1195 * 1181 * or protection from interrupts as data interrupts are off at this point and
1196 * The code figures out how many entries the sk_buff will require in the 1182 * other adapter interrupts do not interfere.
1197 * cmdQ and updates the cmdQ data structure with the state once the enqueue
1198 * has complete. Then, it doesn't access the global structure anymore, but
1199 * uses the corresponding fields on the stack. In conjuction with a spinlock
1200 * around that code, we can make the function reentrant without holding the
1201 * lock when we actually enqueue (which might be expensive, especially on
1202 * architectures with IO MMUs).
1203 */ 1183 */
1204static unsigned int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter, 1184static int t1_poll(struct net_device *dev, int *budget)
1205 unsigned int qid)
1206{ 1185{
1207 struct sge *sge = adapter->sge; 1186 struct adapter *adapter = dev->priv;
1208 struct cmdQ *Q = &sge->cmdQ[qid]; 1187 int effective_budget = min(*budget, dev->quota);
1209 struct cmdQ_e *e; 1188
1210 struct cmdQ_ce *ce; 1189 int work_done = process_responses(adapter, effective_budget);
1211 dma_addr_t mapping; 1190 *budget -= work_done;
1212 unsigned int credits, pidx, genbit; 1191 dev->quota -= work_done;
1213 1192
1214 unsigned int count = 1 + skb_shinfo(skb)->nr_frags; 1193 if (work_done >= effective_budget)
1194 return 1;
1195
1196 __netif_rx_complete(dev);
1215 1197
1216 /* 1198 /*
1217 * Coming from the timer 1199 * Because we don't atomically flush the following write it is
1200 * possible that in very rare cases it can reach the device in a way
1201 * that races with a new response being written plus an error interrupt
1202 * causing the NAPI interrupt handler below to return unhandled status
1203 * to the OS. To protect against this would require flushing the write
1204 * and doing both the write and the flush with interrupts off. Way too
1205 * expensive and unjustifiable given the rarity of the race.
1218 */ 1206 */
1219 if ((skb == sge->pskb)) { 1207 writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
1220 /* 1208 return 0;
1221 * Quit if any cmdQ activities 1209}
1222 */
1223 if (!spin_trylock(&Q->Qlock))
1224 return 0;
1225 if (atomic_read(&Q->credits) != Q->entries_n) {
1226 spin_unlock(&Q->Qlock);
1227 return 0;
1228 }
1229 }
1230 else
1231 spin_lock(&Q->Qlock);
1232
1233 genbit = Q->genbit;
1234 pidx = Q->pidx;
1235 credits = atomic_read(&Q->credits);
1236
1237 credits -= count;
1238 atomic_sub(count, &Q->credits);
1239 Q->pidx += count;
1240 if (Q->pidx >= Q->entries_n) {
1241 Q->pidx -= Q->entries_n;
1242 Q->genbit ^= 1;
1243 }
1244 1210
1245 if (unlikely(credits < (MAX_SKB_FRAGS + 1))) { 1211/*
1246 sge->intr_cnt.cmdQ_full[qid]++; 1212 * Returns true if the device is already scheduled for polling.
1247 netif_stop_queue(adapter->port[0].dev); 1213 */
1248 } 1214static inline int napi_is_scheduled(struct net_device *dev)
1249 spin_unlock(&Q->Qlock); 1215{
1216 return test_bit(__LINK_STATE_RX_SCHED, &dev->state);
1217}
1250 1218
1251 mapping = pci_map_single(adapter->pdev, skb->data, 1219/*
1252 skb->len - skb->data_len, PCI_DMA_TODEVICE); 1220 * NAPI version of the main interrupt handler.
1253 ce = &Q->centries[pidx]; 1221 */
1254 ce->skb = NULL; 1222static irqreturn_t t1_interrupt_napi(int irq, void *data, struct pt_regs *regs)
1255 pci_unmap_addr_set(ce, dma_addr, mapping); 1223{
1256 pci_unmap_len_set(ce, dma_len, skb->len - skb->data_len); 1224 int handled;
1257 ce->single = 1; 1225 struct adapter *adapter = data;
1226 struct sge *sge = adapter->sge;
1227 struct respQ *q = &adapter->sge->respQ;
1258 1228
1259 e = &Q->entries[pidx]; 1229 /*
1260 e->Sop = 1; 1230 * Clear the SGE_DATA interrupt first thing. Normally the NAPI
1261 e->DataValid = 1; 1231 * handler has control of the response queue and the interrupt handler
1262 e->BufferLength = skb->len - skb->data_len; 1232 * can look at the queue reliably only once it knows NAPI is off.
1263 e->AddrHigh = (u64)mapping >> 32; 1233 * We can't wait that long to clear the SGE_DATA interrupt because we
1264 e->AddrLow = (u32)mapping; 1234 * could race with t1_poll rearming the SGE interrupt, so we need to
1235 * clear the interrupt speculatively and really early on.
1236 */
1237 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
1238
1239 spin_lock(&adapter->async_lock);
1240 if (!napi_is_scheduled(sge->netdev)) {
1241 struct respQ_e *e = &q->entries[q->cidx];
1242
1243 if (e->GenerationBit == q->genbit) {
1244 if (e->DataValid ||
1245 process_pure_responses(adapter, e)) {
1246 if (likely(napi_schedule_prep(sge->netdev)))
1247 __netif_rx_schedule(sge->netdev);
1248 else
1249 printk(KERN_CRIT
1250 "NAPI schedule failure!\n");
1251 } else
1252 writel(q->cidx, adapter->regs + A_SG_SLEEPING);
1253 handled = 1;
1254 goto unlock;
1255 } else
1256 writel(q->cidx, adapter->regs + A_SG_SLEEPING);
1257 } else
1258 if (readl(adapter->regs + A_PL_CAUSE) & F_PL_INTR_SGE_DATA)
1259 printk(KERN_ERR "data interrupt while NAPI running\n");
1260
1261 handled = t1_slow_intr_handler(adapter);
1262 if (!handled)
1263 sge->stats.unhandled_irqs++;
1264 unlock:
1265 spin_unlock(&adapter->async_lock);
1266 return IRQ_RETVAL(handled != 0);
1267}
1265 1268
1266 if (--count > 0) { 1269/*
1267 unsigned int i; 1270 * Main interrupt handler, optimized assuming that we took a 'DATA'
1271 * interrupt.
1272 *
1273 * 1. Clear the interrupt
1274 * 2. Loop while we find valid descriptors and process them; accumulate
1275 * information that can be processed after the loop
1276 * 3. Tell the SGE at which index we stopped processing descriptors
1277 * 4. Bookkeeping; free TX buffers, ring doorbell if there are any
1278 * outstanding TX buffers waiting, replenish RX buffers, potentially
1279 * reenable upper layers if they were turned off due to lack of TX
1280 * resources which are available again.
1281 * 5. If we took an interrupt, but no valid respQ descriptors was found we
1282 * let the slow_intr_handler run and do error handling.
1283 */
1284static irqreturn_t t1_interrupt(int irq, void *cookie, struct pt_regs *regs)
1285{
1286 int work_done;
1287 struct respQ_e *e;
1288 struct adapter *adapter = cookie;
1289 struct respQ *Q = &adapter->sge->respQ;
1268 1290
1269 e->Eop = 0; 1291 spin_lock(&adapter->async_lock);
1270 wmb(); 1292 e = &Q->entries[Q->cidx];
1271 e->GenerationBit = e->GenerationBit2 = genbit; 1293 prefetch(e);
1272 1294
1273 for (i = 0; i < count; i++) { 1295 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
1274 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1275 1296
1276 ce++; e++; 1297 if (likely(e->GenerationBit == Q->genbit))
1277 if (++pidx == Q->entries_n) { 1298 work_done = process_responses(adapter, -1);
1278 pidx = 0; 1299 else
1279 genbit ^= 1; 1300 work_done = t1_slow_intr_handler(adapter);
1280 ce = Q->centries;
1281 e = Q->entries;
1282 }
1283 1301
1284 mapping = pci_map_page(adapter->pdev, frag->page, 1302 /*
1285 frag->page_offset, 1303 * The unconditional clearing of the PL_CAUSE above may have raced
1286 frag->size, 1304 * with DMA completion and the corresponding generation of a response
1287 PCI_DMA_TODEVICE); 1305 * to cause us to miss the resulting data interrupt. The next write
1288 ce->skb = NULL; 1306 * is also unconditional to recover the missed interrupt and render
1289 pci_unmap_addr_set(ce, dma_addr, mapping); 1307 * this race harmless.
1290 pci_unmap_len_set(ce, dma_len, frag->size); 1308 */
1291 ce->single = 0; 1309 writel(Q->cidx, adapter->regs + A_SG_SLEEPING);
1292 1310
1293 e->Sop = 0; 1311 if (!work_done)
1294 e->DataValid = 1; 1312 adapter->sge->stats.unhandled_irqs++;
1295 e->BufferLength = frag->size; 1313 spin_unlock(&adapter->async_lock);
1296 e->AddrHigh = (u64)mapping >> 32; 1314 return IRQ_RETVAL(work_done != 0);
1297 e->AddrLow = (u32)mapping; 1315}
1298 1316
1299 if (i < count - 1) { 1317intr_handler_t t1_select_intr_handler(adapter_t *adapter)
1300 e->Eop = 0; 1318{
1301 wmb(); 1319 return adapter->params.sge.polling ? t1_interrupt_napi : t1_interrupt;
1302 e->GenerationBit = e->GenerationBit2 = genbit; 1320}
1303 } 1321
1322/*
1323 * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it.
1324 *
1325 * The code figures out how many entries the sk_buff will require in the
1326 * cmdQ and updates the cmdQ data structure with the state once the enqueue
1327 * has complete. Then, it doesn't access the global structure anymore, but
1328 * uses the corresponding fields on the stack. In conjuction with a spinlock
1329 * around that code, we can make the function reentrant without holding the
1330 * lock when we actually enqueue (which might be expensive, especially on
1331 * architectures with IO MMUs).
1332 *
1333 * This runs with softirqs disabled.
1334 */
1335unsigned int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
1336 unsigned int qid, struct net_device *dev)
1337{
1338 struct sge *sge = adapter->sge;
1339 struct cmdQ *q = &sge->cmdQ[qid];
1340 unsigned int credits, pidx, genbit, count;
1341
1342 spin_lock(&q->lock);
1343 reclaim_completed_tx(sge, q);
1344
1345 pidx = q->pidx;
1346 credits = q->size - q->in_use;
1347 count = 1 + skb_shinfo(skb)->nr_frags;
1348
1349 { /* Ethernet packet */
1350 if (unlikely(credits < count)) {
1351 netif_stop_queue(dev);
1352 set_bit(dev->if_port, &sge->stopped_tx_queues);
1353 sge->stats.cmdQ_full[3]++;
1354 spin_unlock(&q->lock);
1355 CH_ERR("%s: Tx ring full while queue awake!\n",
1356 adapter->name);
1357 return 1;
1304 } 1358 }
1359 if (unlikely(credits - count < q->stop_thres)) {
1360 sge->stats.cmdQ_full[3]++;
1361 netif_stop_queue(dev);
1362 set_bit(dev->if_port, &sge->stopped_tx_queues);
1363 }
1364 }
1365 q->in_use += count;
1366 genbit = q->genbit;
1367 q->pidx += count;
1368 if (q->pidx >= q->size) {
1369 q->pidx -= q->size;
1370 q->genbit ^= 1;
1305 } 1371 }
1372 spin_unlock(&q->lock);
1306 1373
1307 if (skb != sge->pskb) 1374 write_tx_descs(adapter, skb, pidx, genbit, q);
1308 ce->skb = skb;
1309 e->Eop = 1;
1310 wmb();
1311 e->GenerationBit = e->GenerationBit2 = genbit;
1312 1375
1313 /* 1376 /*
1314 * We always ring the doorbell for cmdQ1. For cmdQ0, we only ring 1377 * We always ring the doorbell for cmdQ1. For cmdQ0, we only ring
@@ -1317,12 +1380,14 @@ static unsigned int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
1317 * then the interrupt handler will detect the outstanding TX packet 1380 * then the interrupt handler will detect the outstanding TX packet
1318 * and ring the doorbell for us. 1381 * and ring the doorbell for us.
1319 */ 1382 */
1320 if (qid) { 1383 if (qid)
1321 doorbell_pio(sge, F_CMDQ1_ENABLE); 1384 doorbell_pio(adapter, F_CMDQ1_ENABLE);
1322 } else if (atomic_read(&Q->asleep)) { 1385 else {
1323 atomic_set(&Q->asleep, 0); 1386 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1324 doorbell_pio(sge, F_CMDQ0_ENABLE); 1387 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
1325 atomic_set(&Q->pio_pidx, Q->pidx); 1388 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1389 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1390 }
1326 } 1391 }
1327 return 0; 1392 return 0;
1328} 1393}
@@ -1330,37 +1395,35 @@ static unsigned int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
1330#define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14)) 1395#define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14))
1331 1396
1332/* 1397/*
1398 * eth_hdr_len - return the length of an Ethernet header
1399 * @data: pointer to the start of the Ethernet header
1400 *
1401 * Returns the length of an Ethernet header, including optional VLAN tag.
1402 */
1403static inline int eth_hdr_len(const void *data)
1404{
1405 const struct ethhdr *e = data;
1406
1407 return e->h_proto == htons(ETH_P_8021Q) ? VLAN_ETH_HLEN : ETH_HLEN;
1408}
1409
1410/*
1333 * Adds the CPL header to the sk_buff and passes it to t1_sge_tx. 1411 * Adds the CPL header to the sk_buff and passes it to t1_sge_tx.
1334 */ 1412 */
1335int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) 1413int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
1336{ 1414{
1337 struct adapter *adapter = dev->priv; 1415 struct adapter *adapter = dev->priv;
1416 struct sge_port_stats *st = &adapter->sge->port_stats[dev->if_port];
1417 struct sge *sge = adapter->sge;
1338 struct cpl_tx_pkt *cpl; 1418 struct cpl_tx_pkt *cpl;
1339 struct ethhdr *eth;
1340 size_t max_len;
1341
1342 /*
1343 * We are using a non-standard hard_header_len and some kernel
1344 * components, such as pktgen, do not handle it right. Complain
1345 * when this happens but try to fix things up.
1346 */
1347 if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) {
1348 struct sk_buff *orig_skb = skb;
1349
1350 if (net_ratelimit())
1351 printk(KERN_ERR
1352 "%s: Tx packet has inadequate headroom\n",
1353 dev->name);
1354 skb = skb_realloc_headroom(skb, sizeof(struct cpl_tx_pkt_lso));
1355 dev_kfree_skb_any(orig_skb);
1356 if (!skb)
1357 return -ENOMEM;
1358 }
1359 1419
1420#ifdef NETIF_F_TSO
1360 if (skb_shinfo(skb)->tso_size) { 1421 if (skb_shinfo(skb)->tso_size) {
1361 int eth_type; 1422 int eth_type;
1362 struct cpl_tx_pkt_lso *hdr; 1423 struct cpl_tx_pkt_lso *hdr;
1363 1424
1425 st->tso++;
1426
1364 eth_type = skb->nh.raw - skb->data == ETH_HLEN ? 1427 eth_type = skb->nh.raw - skb->data == ETH_HLEN ?
1365 CPL_ETH_II : CPL_ETH_II_VLAN; 1428 CPL_ETH_II : CPL_ETH_II_VLAN;
1366 1429
@@ -1373,40 +1436,72 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
1373 skb_shinfo(skb)->tso_size)); 1436 skb_shinfo(skb)->tso_size));
1374 hdr->len = htonl(skb->len - sizeof(*hdr)); 1437 hdr->len = htonl(skb->len - sizeof(*hdr));
1375 cpl = (struct cpl_tx_pkt *)hdr; 1438 cpl = (struct cpl_tx_pkt *)hdr;
1439 sge->stats.tx_lso_pkts++;
1376 } else 1440 } else
1441#endif
1377 { 1442 {
1378 /* 1443 /*
1379 * An Ethernet packet must have at least space for 1444 * Packets shorter than ETH_HLEN can break the MAC, drop them
1380 * the DIX Ethernet header and be no greater than 1445 * early. Also, we may get oversized packets because some
1381 * the device set MTU. Otherwise trash the packet. 1446 * parts of the kernel don't handle our unusual hard_header_len
1447 * right, drop those too.
1382 */ 1448 */
1383 if (skb->len < ETH_HLEN) 1449 if (unlikely(skb->len < ETH_HLEN ||
1384 goto t1_start_xmit_fail2; 1450 skb->len > dev->mtu + eth_hdr_len(skb->data))) {
1385 eth = (struct ethhdr *)skb->data; 1451 dev_kfree_skb_any(skb);
1386 if (eth->h_proto == htons(ETH_P_8021Q)) 1452 return NET_XMIT_SUCCESS;
1387 max_len = dev->mtu + VLAN_ETH_HLEN; 1453 }
1388 else 1454
1389 max_len = dev->mtu + ETH_HLEN; 1455 /*
1390 if (skb->len > max_len) 1456 * We are using a non-standard hard_header_len and some kernel
1391 goto t1_start_xmit_fail2; 1457 * components, such as pktgen, do not handle it right.
1458 * Complain when this happens but try to fix things up.
1459 */
1460 if (unlikely(skb_headroom(skb) <
1461 dev->hard_header_len - ETH_HLEN)) {
1462 struct sk_buff *orig_skb = skb;
1463
1464 if (net_ratelimit())
1465 printk(KERN_ERR "%s: inadequate headroom in "
1466 "Tx packet\n", dev->name);
1467 skb = skb_realloc_headroom(skb, sizeof(*cpl));
1468 dev_kfree_skb_any(orig_skb);
1469 if (!skb)
1470 return -ENOMEM;
1471 }
1392 1472
1393 if (!(adapter->flags & UDP_CSUM_CAPABLE) && 1473 if (!(adapter->flags & UDP_CSUM_CAPABLE) &&
1394 skb->ip_summed == CHECKSUM_HW && 1474 skb->ip_summed == CHECKSUM_HW &&
1395 skb->nh.iph->protocol == IPPROTO_UDP && 1475 skb->nh.iph->protocol == IPPROTO_UDP)
1396 skb_checksum_help(skb, 0)) 1476 if (unlikely(skb_checksum_help(skb, 0))) {
1397 goto t1_start_xmit_fail3; 1477 dev_kfree_skb_any(skb);
1398 1478 return -ENOMEM;
1479 }
1399 1480
1400 if (!adapter->sge->pskb) { 1481 /* Hmmm, assuming to catch the gratious arp... and we'll use
1482 * it to flush out stuck espi packets...
1483 */
1484 if (unlikely(!adapter->sge->espibug_skb)) {
1401 if (skb->protocol == htons(ETH_P_ARP) && 1485 if (skb->protocol == htons(ETH_P_ARP) &&
1402 skb->nh.arph->ar_op == htons(ARPOP_REQUEST)) 1486 skb->nh.arph->ar_op == htons(ARPOP_REQUEST)) {
1403 adapter->sge->pskb = skb; 1487 adapter->sge->espibug_skb = skb;
1488 /* We want to re-use this skb later. We
1489 * simply bump the reference count and it
1490 * will not be freed...
1491 */
1492 skb = skb_get(skb);
1493 }
1404 } 1494 }
1405 cpl = (struct cpl_tx_pkt *)skb_push(skb, sizeof(*cpl)); 1495
1496 cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl));
1406 cpl->opcode = CPL_TX_PKT; 1497 cpl->opcode = CPL_TX_PKT;
1407 cpl->ip_csum_dis = 1; /* SW calculates IP csum */ 1498 cpl->ip_csum_dis = 1; /* SW calculates IP csum */
1408 cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_HW ? 0 : 1; 1499 cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_HW ? 0 : 1;
1409 /* the length field isn't used so don't bother setting it */ 1500 /* the length field isn't used so don't bother setting it */
1501
1502 st->tx_cso += (skb->ip_summed == CHECKSUM_HW);
1503 sge->stats.tx_do_cksum += (skb->ip_summed == CHECKSUM_HW);
1504 sge->stats.tx_reg_pkts++;
1410 } 1505 }
1411 cpl->iff = dev->if_port; 1506 cpl->iff = dev->if_port;
1412 1507
@@ -1414,38 +1509,176 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
1414 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) { 1509 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
1415 cpl->vlan_valid = 1; 1510 cpl->vlan_valid = 1;
1416 cpl->vlan = htons(vlan_tx_tag_get(skb)); 1511 cpl->vlan = htons(vlan_tx_tag_get(skb));
1512 st->vlan_insert++;
1417 } else 1513 } else
1418#endif 1514#endif
1419 cpl->vlan_valid = 0; 1515 cpl->vlan_valid = 0;
1420 1516
1421 dev->trans_start = jiffies; 1517 dev->trans_start = jiffies;
1422 return t1_sge_tx(skb, adapter, 0); 1518 return t1_sge_tx(skb, adapter, 0, dev);
1519}
1423 1520
1424t1_start_xmit_fail3: 1521/*
1425 printk(KERN_INFO "%s: Unable to complete checksum\n", dev->name); 1522 * Callback for the Tx buffer reclaim timer. Runs with softirqs disabled.
1426 goto t1_start_xmit_fail1; 1523 */
1524static void sge_tx_reclaim_cb(unsigned long data)
1525{
1526 int i;
1527 struct sge *sge = (struct sge *)data;
1528
1529 for (i = 0; i < SGE_CMDQ_N; ++i) {
1530 struct cmdQ *q = &sge->cmdQ[i];
1531
1532 if (!spin_trylock(&q->lock))
1533 continue;
1427 1534
1428t1_start_xmit_fail2: 1535 reclaim_completed_tx(sge, q);
1429 printk(KERN_INFO "%s: Invalid packet length %d, dropping\n", 1536 if (i == 0 && q->in_use) /* flush pending credits */
1430 dev->name, skb->len); 1537 writel(F_CMDQ0_ENABLE,
1538 sge->adapter->regs + A_SG_DOORBELL);
1431 1539
1432t1_start_xmit_fail1: 1540 spin_unlock(&q->lock);
1433 dev_kfree_skb_any(skb); 1541 }
1542 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
1543}
1544
1545/*
1546 * Propagate changes of the SGE coalescing parameters to the HW.
1547 */
1548int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p)
1549{
1550 sge->netdev->poll = t1_poll;
1551 sge->fixed_intrtimer = p->rx_coalesce_usecs *
1552 core_ticks_per_usec(sge->adapter);
1553 writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER);
1434 return 0; 1554 return 0;
1435} 1555}
1436 1556
1437void t1_sge_set_ptimeout(adapter_t *adapter, u32 val) 1557/*
1558 * Allocates both RX and TX resources and configures the SGE. However,
1559 * the hardware is not enabled yet.
1560 */
1561int t1_sge_configure(struct sge *sge, struct sge_params *p)
1438{ 1562{
1439 struct sge *sge = adapter->sge; 1563 if (alloc_rx_resources(sge, p))
1564 return -ENOMEM;
1565 if (alloc_tx_resources(sge, p)) {
1566 free_rx_resources(sge);
1567 return -ENOMEM;
1568 }
1569 configure_sge(sge, p);
1570
1571 /*
1572 * Now that we have sized the free lists calculate the payload
1573 * capacity of the large buffers. Other parts of the driver use
1574 * this to set the max offload coalescing size so that RX packets
1575 * do not overflow our large buffers.
1576 */
1577 p->large_buf_capacity = jumbo_payload_capacity(sge);
1578 return 0;
1579}
1440 1580
1441 if (is_T2(adapter)) 1581/*
1442 sge->ptimeout = max((u32)((HZ * val) / 1000), (u32)1); 1582 * Disables the DMA engine.
1583 */
1584void t1_sge_stop(struct sge *sge)
1585{
1586 writel(0, sge->adapter->regs + A_SG_CONTROL);
1587 (void) readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
1588 if (is_T2(sge->adapter))
1589 del_timer_sync(&sge->espibug_timer);
1590 del_timer_sync(&sge->tx_reclaim_timer);
1443} 1591}
1444 1592
1445u32 t1_sge_get_ptimeout(adapter_t *adapter) 1593/*
1594 * Enables the DMA engine.
1595 */
1596void t1_sge_start(struct sge *sge)
1446{ 1597{
1598 refill_free_list(sge, &sge->freelQ[0]);
1599 refill_free_list(sge, &sge->freelQ[1]);
1600
1601 writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL);
1602 doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE);
1603 (void) readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
1604
1605 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
1606
1607 if (is_T2(sge->adapter))
1608 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
1609}
1610
1611/*
1612 * Callback for the T2 ESPI 'stuck packet feature' workaorund
1613 */
1614static void espibug_workaround(void *data)
1615{
1616 struct adapter *adapter = (struct adapter *)data;
1447 struct sge *sge = adapter->sge; 1617 struct sge *sge = adapter->sge;
1448 1618
1449 return (is_T2(adapter) ? ((sge->ptimeout * 1000) / HZ) : 0); 1619 if (netif_running(adapter->port[0].dev)) {
1620 struct sk_buff *skb = sge->espibug_skb;
1621
1622 u32 seop = t1_espi_get_mon(adapter, 0x930, 0);
1623
1624 if ((seop & 0xfff0fff) == 0xfff && skb) {
1625 if (!skb->cb[0]) {
1626 u8 ch_mac_addr[ETH_ALEN] =
1627 {0x0, 0x7, 0x43, 0x0, 0x0, 0x0};
1628 memcpy(skb->data + sizeof(struct cpl_tx_pkt),
1629 ch_mac_addr, ETH_ALEN);
1630 memcpy(skb->data + skb->len - 10, ch_mac_addr,
1631 ETH_ALEN);
1632 skb->cb[0] = 0xff;
1633 }
1634
1635 /* bump the reference count to avoid freeing of the
1636 * skb once the DMA has completed.
1637 */
1638 skb = skb_get(skb);
1639 t1_sge_tx(skb, adapter, 0, adapter->port[0].dev);
1640 }
1641 }
1642 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
1450} 1643}
1451 1644
1645/*
1646 * Creates a t1_sge structure and returns suggested resource parameters.
1647 */
1648struct sge * __devinit t1_sge_create(struct adapter *adapter,
1649 struct sge_params *p)
1650{
1651 struct sge *sge = kmalloc(sizeof(*sge), GFP_KERNEL);
1652
1653 if (!sge)
1654 return NULL;
1655 memset(sge, 0, sizeof(*sge));
1656
1657 sge->adapter = adapter;
1658 sge->netdev = adapter->port[0].dev;
1659 sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2;
1660 sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
1661
1662 init_timer(&sge->tx_reclaim_timer);
1663 sge->tx_reclaim_timer.data = (unsigned long)sge;
1664 sge->tx_reclaim_timer.function = sge_tx_reclaim_cb;
1665
1666 if (is_T2(sge->adapter)) {
1667 init_timer(&sge->espibug_timer);
1668 sge->espibug_timer.function = (void *)&espibug_workaround;
1669 sge->espibug_timer.data = (unsigned long)sge->adapter;
1670 sge->espibug_timeout = 1;
1671 }
1672
1673
1674 p->cmdQ_size[0] = SGE_CMDQ0_E_N;
1675 p->cmdQ_size[1] = SGE_CMDQ1_E_N;
1676 p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE;
1677 p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE;
1678 p->rx_coalesce_usecs = 50;
1679 p->coalesce_enable = 0;
1680 p->sample_interval_usecs = 0;
1681 p->polling = 0;
1682
1683 return sge;
1684}
diff --git a/drivers/net/chelsio/sge.h b/drivers/net/chelsio/sge.h
index 140f896def60..434b25586851 100644
--- a/drivers/net/chelsio/sge.h
+++ b/drivers/net/chelsio/sge.h
@@ -1,8 +1,8 @@
1/***************************************************************************** 1/*****************************************************************************
2 * * 2 * *
3 * File: sge.h * 3 * File: sge.h *
4 * $Revision: 1.7 $ * 4 * $Revision: 1.11 $ *
5 * $Date: 2005/03/23 07:15:59 $ * 5 * $Date: 2005/06/21 22:10:55 $ *
6 * Description: * 6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. * 7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * * 8 * *
@@ -36,25 +36,50 @@
36 * * 36 * *
37 ****************************************************************************/ 37 ****************************************************************************/
38 38
39#ifndef _CHELSIO_LINUX_SGE_H_ 39#ifndef _CXGB_SGE_H_
40#define _CHELSIO_LINUX_SGE_H_ 40#define _CXGB_SGE_H_
41 41
42#include <linux/types.h> 42#include <linux/types.h>
43#include <linux/interrupt.h> 43#include <linux/interrupt.h>
44#include <asm/byteorder.h> 44#include <asm/byteorder.h>
45 45
46#ifndef IRQ_RETVAL
47#define IRQ_RETVAL(x)
48typedef void irqreturn_t;
49#endif
50
51typedef irqreturn_t (*intr_handler_t)(int, void *, struct pt_regs *);
52
46struct sge_intr_counts { 53struct sge_intr_counts {
47 unsigned int respQ_empty; /* # times respQ empty */ 54 unsigned int respQ_empty; /* # times respQ empty */
48 unsigned int respQ_overflow; /* # respQ overflow (fatal) */ 55 unsigned int respQ_overflow; /* # respQ overflow (fatal) */
49 unsigned int freelistQ_empty; /* # times freelist empty */ 56 unsigned int freelistQ_empty; /* # times freelist empty */
50 unsigned int pkt_too_big; /* packet too large (fatal) */ 57 unsigned int pkt_too_big; /* packet too large (fatal) */
51 unsigned int pkt_mismatch; 58 unsigned int pkt_mismatch;
52 unsigned int cmdQ_full[2]; /* not HW interrupt, host cmdQ[] full */ 59 unsigned int cmdQ_full[3]; /* not HW IRQ, host cmdQ[] full */
60 unsigned int cmdQ_restarted[3];/* # of times cmdQ X was restarted */
61 unsigned int ethernet_pkts; /* # of Ethernet packets received */
62 unsigned int offload_pkts; /* # of offload packets received */
63 unsigned int offload_bundles; /* # of offload pkt bundles delivered */
64 unsigned int pure_rsps; /* # of non-payload responses */
65 unsigned int unhandled_irqs; /* # of unhandled interrupts */
66 unsigned int tx_ipfrags;
67 unsigned int tx_reg_pkts;
68 unsigned int tx_lso_pkts;
69 unsigned int tx_do_cksum;
70};
71
72struct sge_port_stats {
73 unsigned long rx_cso_good; /* # of successful RX csum offloads */
74 unsigned long tx_cso; /* # of TX checksum offloads */
75 unsigned long vlan_xtract; /* # of VLAN tag extractions */
76 unsigned long vlan_insert; /* # of VLAN tag extractions */
77 unsigned long tso; /* # of TSO requests */
78 unsigned long rx_drops; /* # of packets dropped due to no mem */
53}; 79};
54 80
55struct sk_buff; 81struct sk_buff;
56struct net_device; 82struct net_device;
57struct cxgbdev;
58struct adapter; 83struct adapter;
59struct sge_params; 84struct sge_params;
60struct sge; 85struct sge;
@@ -63,7 +88,9 @@ struct sge *t1_sge_create(struct adapter *, struct sge_params *);
63int t1_sge_configure(struct sge *, struct sge_params *); 88int t1_sge_configure(struct sge *, struct sge_params *);
64int t1_sge_set_coalesce_params(struct sge *, struct sge_params *); 89int t1_sge_set_coalesce_params(struct sge *, struct sge_params *);
65void t1_sge_destroy(struct sge *); 90void t1_sge_destroy(struct sge *);
66irqreturn_t t1_interrupt(int, void *, struct pt_regs *); 91intr_handler_t t1_select_intr_handler(adapter_t *adapter);
92unsigned int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
93 unsigned int qid, struct net_device *netdev);
67int t1_start_xmit(struct sk_buff *skb, struct net_device *dev); 94int t1_start_xmit(struct sk_buff *skb, struct net_device *dev);
68void t1_set_vlan_accel(struct adapter *adapter, int on_off); 95void t1_set_vlan_accel(struct adapter *adapter, int on_off);
69void t1_sge_start(struct sge *); 96void t1_sge_start(struct sge *);
@@ -72,8 +99,7 @@ int t1_sge_intr_error_handler(struct sge *);
72void t1_sge_intr_enable(struct sge *); 99void t1_sge_intr_enable(struct sge *);
73void t1_sge_intr_disable(struct sge *); 100void t1_sge_intr_disable(struct sge *);
74void t1_sge_intr_clear(struct sge *); 101void t1_sge_intr_clear(struct sge *);
102const struct sge_intr_counts *t1_sge_get_intr_counts(struct sge *sge);
103const struct sge_port_stats *t1_sge_get_port_stats(struct sge *sge, int port);
75 104
76void t1_sge_set_ptimeout(adapter_t *adapter, u32 val); 105#endif /* _CXGB_SGE_H_ */
77u32 t1_sge_get_ptimeout(adapter_t *adapter);
78
79#endif /* _CHELSIO_LINUX_SGE_H_ */
diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c
index a90a3f95fcac..1ebb5d149aef 100644
--- a/drivers/net/chelsio/subr.c
+++ b/drivers/net/chelsio/subr.c
@@ -1,8 +1,8 @@
1/***************************************************************************** 1/*****************************************************************************
2 * * 2 * *
3 * File: subr.c * 3 * File: subr.c *
4 * $Revision: 1.12 $ * 4 * $Revision: 1.27 $ *
5 * $Date: 2005/03/23 07:41:27 $ * 5 * $Date: 2005/06/22 01:08:36 $ *
6 * Description: * 6 * Description: *
7 * Various subroutines (intr,pio,etc.) used by Chelsio 10G Ethernet driver. * 7 * Various subroutines (intr,pio,etc.) used by Chelsio 10G Ethernet driver. *
8 * part of the Chelsio 10Gb Ethernet Driver. * 8 * part of the Chelsio 10Gb Ethernet Driver. *
@@ -40,11 +40,9 @@
40#include "common.h" 40#include "common.h"
41#include "elmer0.h" 41#include "elmer0.h"
42#include "regs.h" 42#include "regs.h"
43
44#include "gmac.h" 43#include "gmac.h"
45#include "cphy.h" 44#include "cphy.h"
46#include "sge.h" 45#include "sge.h"
47#include "tp.h"
48#include "espi.h" 46#include "espi.h"
49 47
50/** 48/**
@@ -64,7 +62,7 @@ static int t1_wait_op_done(adapter_t *adapter, int reg, u32 mask, int polarity,
64 int attempts, int delay) 62 int attempts, int delay)
65{ 63{
66 while (1) { 64 while (1) {
67 u32 val = t1_read_reg_4(adapter, reg) & mask; 65 u32 val = readl(adapter->regs + reg) & mask;
68 66
69 if (!!val == polarity) 67 if (!!val == polarity)
70 return 0; 68 return 0;
@@ -84,9 +82,9 @@ static int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value)
84{ 82{
85 int tpi_busy; 83 int tpi_busy;
86 84
87 t1_write_reg_4(adapter, A_TPI_ADDR, addr); 85 writel(addr, adapter->regs + A_TPI_ADDR);
88 t1_write_reg_4(adapter, A_TPI_WR_DATA, value); 86 writel(value, adapter->regs + A_TPI_WR_DATA);
89 t1_write_reg_4(adapter, A_TPI_CSR, F_TPIWR); 87 writel(F_TPIWR, adapter->regs + A_TPI_CSR);
90 88
91 tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1, 89 tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1,
92 TPI_ATTEMPTS, 3); 90 TPI_ATTEMPTS, 3);
@@ -100,9 +98,9 @@ int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value)
100{ 98{
101 int ret; 99 int ret;
102 100
103 TPI_LOCK(adapter); 101 spin_lock(&(adapter)->tpi_lock);
104 ret = __t1_tpi_write(adapter, addr, value); 102 ret = __t1_tpi_write(adapter, addr, value);
105 TPI_UNLOCK(adapter); 103 spin_unlock(&(adapter)->tpi_lock);
106 return ret; 104 return ret;
107} 105}
108 106
@@ -113,8 +111,8 @@ static int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp)
113{ 111{
114 int tpi_busy; 112 int tpi_busy;
115 113
116 t1_write_reg_4(adapter, A_TPI_ADDR, addr); 114 writel(addr, adapter->regs + A_TPI_ADDR);
117 t1_write_reg_4(adapter, A_TPI_CSR, 0); 115 writel(0, adapter->regs + A_TPI_CSR);
118 116
119 tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1, 117 tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1,
120 TPI_ATTEMPTS, 3); 118 TPI_ATTEMPTS, 3);
@@ -122,7 +120,7 @@ static int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp)
122 CH_ALERT("%s: TPI read from 0x%x failed\n", 120 CH_ALERT("%s: TPI read from 0x%x failed\n",
123 adapter->name, addr); 121 adapter->name, addr);
124 else 122 else
125 *valp = t1_read_reg_4(adapter, A_TPI_RD_DATA); 123 *valp = readl(adapter->regs + A_TPI_RD_DATA);
126 return tpi_busy; 124 return tpi_busy;
127} 125}
128 126
@@ -130,21 +128,13 @@ int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp)
130{ 128{
131 int ret; 129 int ret;
132 130
133 TPI_LOCK(adapter); 131 spin_lock(&(adapter)->tpi_lock);
134 ret = __t1_tpi_read(adapter, addr, valp); 132 ret = __t1_tpi_read(adapter, addr, valp);
135 TPI_UNLOCK(adapter); 133 spin_unlock(&(adapter)->tpi_lock);
136 return ret; 134 return ret;
137} 135}
138 136
139/* 137/*
140 * Set a TPI parameter.
141 */
142static void t1_tpi_par(adapter_t *adapter, u32 value)
143{
144 t1_write_reg_4(adapter, A_TPI_PAR, V_TPIPAR(value));
145}
146
147/*
148 * Called when a port's link settings change to propagate the new values to the 138 * Called when a port's link settings change to propagate the new values to the
149 * associated PHY and MAC. After performing the common tasks it invokes an 139 * associated PHY and MAC. After performing the common tasks it invokes an
150 * OS-specific handler. 140 * OS-specific handler.
@@ -227,7 +217,7 @@ static int mi1_mdio_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr,
227{ 217{
228 u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr); 218 u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr);
229 219
230 TPI_LOCK(adapter); 220 spin_lock(&(adapter)->tpi_lock);
231 221
232 /* Write the address we want. */ 222 /* Write the address we want. */
233 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr); 223 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr);
@@ -242,7 +232,7 @@ static int mi1_mdio_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr,
242 232
243 /* Read the data. */ 233 /* Read the data. */
244 __t1_tpi_read(adapter, A_ELMER0_PORT0_MI1_DATA, valp); 234 __t1_tpi_read(adapter, A_ELMER0_PORT0_MI1_DATA, valp);
245 TPI_UNLOCK(adapter); 235 spin_unlock(&(adapter)->tpi_lock);
246 return 0; 236 return 0;
247} 237}
248 238
@@ -251,7 +241,7 @@ static int mi1_mdio_ext_write(adapter_t *adapter, int phy_addr, int mmd_addr,
251{ 241{
252 u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr); 242 u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr);
253 243
254 TPI_LOCK(adapter); 244 spin_lock(&(adapter)->tpi_lock);
255 245
256 /* Write the address we want. */ 246 /* Write the address we want. */
257 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr); 247 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr);
@@ -264,7 +254,7 @@ static int mi1_mdio_ext_write(adapter_t *adapter, int phy_addr, int mmd_addr,
264 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, val); 254 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, val);
265 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_WRITE); 255 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_WRITE);
266 mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP); 256 mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
267 TPI_UNLOCK(adapter); 257 spin_unlock(&(adapter)->tpi_lock);
268 return 0; 258 return 0;
269} 259}
270 260
@@ -277,7 +267,6 @@ static struct mdio_ops mi1_mdio_ext_ops = {
277enum { 267enum {
278 CH_BRD_N110_1F, 268 CH_BRD_N110_1F,
279 CH_BRD_N210_1F, 269 CH_BRD_N210_1F,
280 CH_BRD_T210_1F,
281}; 270};
282 271
283static struct board_info t1_board[] = { 272static struct board_info t1_board[] = {
@@ -308,13 +297,15 @@ struct pci_device_id t1_pci_tbl[] = {
308 { 0, } 297 { 0, }
309}; 298};
310 299
300MODULE_DEVICE_TABLE(pci, t1_pci_tbl);
301
311/* 302/*
312 * Return the board_info structure with a given index. Out-of-range indices 303 * Return the board_info structure with a given index. Out-of-range indices
313 * return NULL. 304 * return NULL.
314 */ 305 */
315const struct board_info *t1_get_board_info(unsigned int board_id) 306const struct board_info *t1_get_board_info(unsigned int board_id)
316{ 307{
317 return board_id < DIMOF(t1_board) ? &t1_board[board_id] : NULL; 308 return board_id < ARRAY_SIZE(t1_board) ? &t1_board[board_id] : NULL;
318} 309}
319 310
320struct chelsio_vpd_t { 311struct chelsio_vpd_t {
@@ -436,7 +427,6 @@ int elmer0_ext_intr_handler(adapter_t *adapter)
436 t1_tpi_read(adapter, A_ELMER0_INT_CAUSE, &cause); 427 t1_tpi_read(adapter, A_ELMER0_INT_CAUSE, &cause);
437 428
438 switch (board_info(adapter)->board) { 429 switch (board_info(adapter)->board) {
439 case CHBT_BOARD_CHT210:
440 case CHBT_BOARD_N210: 430 case CHBT_BOARD_N210:
441 case CHBT_BOARD_N110: 431 case CHBT_BOARD_N110:
442 if (cause & ELMER0_GP_BIT6) { /* Marvell 88x2010 interrupt */ 432 if (cause & ELMER0_GP_BIT6) { /* Marvell 88x2010 interrupt */
@@ -446,23 +436,6 @@ int elmer0_ext_intr_handler(adapter_t *adapter)
446 link_changed(adapter, 0); 436 link_changed(adapter, 0);
447 } 437 }
448 break; 438 break;
449 case CHBT_BOARD_8000:
450 case CHBT_BOARD_CHT110:
451 CH_DBG(adapter, INTR, "External interrupt cause 0x%x\n",
452 cause);
453 if (cause & ELMER0_GP_BIT1) { /* PMC3393 INTB */
454 struct cmac *mac = adapter->port[0].mac;
455
456 mac->ops->interrupt_handler(mac);
457 }
458 if (cause & ELMER0_GP_BIT5) { /* XPAK MOD_DETECT */
459 u32 mod_detect;
460
461 t1_tpi_read(adapter, A_ELMER0_GPI_STAT, &mod_detect);
462 CH_MSG(adapter, INFO, LINK, "XPAK %s\n",
463 mod_detect ? "removed" : "inserted");
464 }
465 break;
466 } 439 }
467 t1_tpi_write(adapter, A_ELMER0_INT_CAUSE, cause); 440 t1_tpi_write(adapter, A_ELMER0_INT_CAUSE, cause);
468 return 0; 441 return 0;
@@ -472,11 +445,11 @@ int elmer0_ext_intr_handler(adapter_t *adapter)
472void t1_interrupts_enable(adapter_t *adapter) 445void t1_interrupts_enable(adapter_t *adapter)
473{ 446{
474 unsigned int i; 447 unsigned int i;
448 u32 pl_intr;
475 449
476 adapter->slow_intr_mask = F_PL_INTR_SGE_ERR | F_PL_INTR_TP; 450 adapter->slow_intr_mask = F_PL_INTR_SGE_ERR;
477 451
478 t1_sge_intr_enable(adapter->sge); 452 t1_sge_intr_enable(adapter->sge);
479 t1_tp_intr_enable(adapter->tp);
480 if (adapter->espi) { 453 if (adapter->espi) {
481 adapter->slow_intr_mask |= F_PL_INTR_ESPI; 454 adapter->slow_intr_mask |= F_PL_INTR_ESPI;
482 t1_espi_intr_enable(adapter->espi); 455 t1_espi_intr_enable(adapter->espi);
@@ -489,17 +462,15 @@ void t1_interrupts_enable(adapter_t *adapter)
489 } 462 }
490 463
491 /* Enable PCIX & external chip interrupts on ASIC boards. */ 464 /* Enable PCIX & external chip interrupts on ASIC boards. */
492 if (t1_is_asic(adapter)) { 465 pl_intr = readl(adapter->regs + A_PL_ENABLE);
493 u32 pl_intr = t1_read_reg_4(adapter, A_PL_ENABLE);
494 466
495 /* PCI-X interrupts */ 467 /* PCI-X interrupts */
496 pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE, 468 pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE,
497 0xffffffff); 469 0xffffffff);
498 470
499 adapter->slow_intr_mask |= F_PL_INTR_EXT | F_PL_INTR_PCIX; 471 adapter->slow_intr_mask |= F_PL_INTR_EXT | F_PL_INTR_PCIX;
500 pl_intr |= F_PL_INTR_EXT | F_PL_INTR_PCIX; 472 pl_intr |= F_PL_INTR_EXT | F_PL_INTR_PCIX;
501 t1_write_reg_4(adapter, A_PL_ENABLE, pl_intr); 473 writel(pl_intr, adapter->regs + A_PL_ENABLE);
502 }
503} 474}
504 475
505/* Disables all interrupts. */ 476/* Disables all interrupts. */
@@ -508,7 +479,6 @@ void t1_interrupts_disable(adapter_t* adapter)
508 unsigned int i; 479 unsigned int i;
509 480
510 t1_sge_intr_disable(adapter->sge); 481 t1_sge_intr_disable(adapter->sge);
511 t1_tp_intr_disable(adapter->tp);
512 if (adapter->espi) 482 if (adapter->espi)
513 t1_espi_intr_disable(adapter->espi); 483 t1_espi_intr_disable(adapter->espi);
514 484
@@ -519,8 +489,7 @@ void t1_interrupts_disable(adapter_t* adapter)
519 } 489 }
520 490
521 /* Disable PCIX & external chip interrupts. */ 491 /* Disable PCIX & external chip interrupts. */
522 if (t1_is_asic(adapter)) 492 writel(0, adapter->regs + A_PL_ENABLE);
523 t1_write_reg_4(adapter, A_PL_ENABLE, 0);
524 493
525 /* PCI-X interrupts */ 494 /* PCI-X interrupts */
526 pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE, 0); 495 pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE, 0);
@@ -532,9 +501,10 @@ void t1_interrupts_disable(adapter_t* adapter)
532void t1_interrupts_clear(adapter_t* adapter) 501void t1_interrupts_clear(adapter_t* adapter)
533{ 502{
534 unsigned int i; 503 unsigned int i;
504 u32 pl_intr;
505
535 506
536 t1_sge_intr_clear(adapter->sge); 507 t1_sge_intr_clear(adapter->sge);
537 t1_tp_intr_clear(adapter->tp);
538 if (adapter->espi) 508 if (adapter->espi)
539 t1_espi_intr_clear(adapter->espi); 509 t1_espi_intr_clear(adapter->espi);
540 510
@@ -545,12 +515,10 @@ void t1_interrupts_clear(adapter_t* adapter)
545 } 515 }
546 516
547 /* Enable interrupts for external devices. */ 517 /* Enable interrupts for external devices. */
548 if (t1_is_asic(adapter)) { 518 pl_intr = readl(adapter->regs + A_PL_CAUSE);
549 u32 pl_intr = t1_read_reg_4(adapter, A_PL_CAUSE);
550 519
551 t1_write_reg_4(adapter, A_PL_CAUSE, 520 writel(pl_intr | F_PL_INTR_EXT | F_PL_INTR_PCIX,
552 pl_intr | F_PL_INTR_EXT | F_PL_INTR_PCIX); 521 adapter->regs + A_PL_CAUSE);
553 }
554 522
555 /* PCI-X interrupts */ 523 /* PCI-X interrupts */
556 pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, 0xffffffff); 524 pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, 0xffffffff);
@@ -559,17 +527,15 @@ void t1_interrupts_clear(adapter_t* adapter)
559/* 527/*
560 * Slow path interrupt handler for ASICs. 528 * Slow path interrupt handler for ASICs.
561 */ 529 */
562static int asic_slow_intr(adapter_t *adapter) 530int t1_slow_intr_handler(adapter_t *adapter)
563{ 531{
564 u32 cause = t1_read_reg_4(adapter, A_PL_CAUSE); 532 u32 cause = readl(adapter->regs + A_PL_CAUSE);
565 533
566 cause &= adapter->slow_intr_mask; 534 cause &= adapter->slow_intr_mask;
567 if (!cause) 535 if (!cause)
568 return 0; 536 return 0;
569 if (cause & F_PL_INTR_SGE_ERR) 537 if (cause & F_PL_INTR_SGE_ERR)
570 t1_sge_intr_error_handler(adapter->sge); 538 t1_sge_intr_error_handler(adapter->sge);
571 if (cause & F_PL_INTR_TP)
572 t1_tp_intr_handler(adapter->tp);
573 if (cause & F_PL_INTR_ESPI) 539 if (cause & F_PL_INTR_ESPI)
574 t1_espi_intr_handler(adapter->espi); 540 t1_espi_intr_handler(adapter->espi);
575 if (cause & F_PL_INTR_PCIX) 541 if (cause & F_PL_INTR_PCIX)
@@ -578,41 +544,82 @@ static int asic_slow_intr(adapter_t *adapter)
578 t1_elmer0_ext_intr(adapter); 544 t1_elmer0_ext_intr(adapter);
579 545
580 /* Clear the interrupts just processed. */ 546 /* Clear the interrupts just processed. */
581 t1_write_reg_4(adapter, A_PL_CAUSE, cause); 547 writel(cause, adapter->regs + A_PL_CAUSE);
582 (void)t1_read_reg_4(adapter, A_PL_CAUSE); /* flush writes */ 548 (void)readl(adapter->regs + A_PL_CAUSE); /* flush writes */
583 return 1; 549 return 1;
584} 550}
585 551
586int t1_slow_intr_handler(adapter_t *adapter) 552/* Pause deadlock avoidance parameters */
553#define DROP_MSEC 16
554#define DROP_PKTS_CNT 1
555
556static void set_csum_offload(adapter_t *adapter, u32 csum_bit, int enable)
557{
558 u32 val = readl(adapter->regs + A_TP_GLOBAL_CONFIG);
559
560 if (enable)
561 val |= csum_bit;
562 else
563 val &= ~csum_bit;
564 writel(val, adapter->regs + A_TP_GLOBAL_CONFIG);
565}
566
567void t1_tp_set_ip_checksum_offload(adapter_t *adapter, int enable)
587{ 568{
588 return asic_slow_intr(adapter); 569 set_csum_offload(adapter, F_IP_CSUM, enable);
589} 570}
590 571
591/* Power sequencing is a work-around for Intel's XPAKs. */ 572void t1_tp_set_udp_checksum_offload(adapter_t *adapter, int enable)
592static void power_sequence_xpak(adapter_t* adapter)
593{ 573{
594 u32 mod_detect; 574 set_csum_offload(adapter, F_UDP_CSUM, enable);
595 u32 gpo; 575}
596 576
597 /* Check for XPAK */ 577void t1_tp_set_tcp_checksum_offload(adapter_t *adapter, int enable)
598 t1_tpi_read(adapter, A_ELMER0_GPI_STAT, &mod_detect); 578{
599 if (!(ELMER0_GP_BIT5 & mod_detect)) { 579 set_csum_offload(adapter, F_TCP_CSUM, enable);
600 /* XPAK is present */ 580}
601 t1_tpi_read(adapter, A_ELMER0_GPO, &gpo); 581
602 gpo |= ELMER0_GP_BIT18; 582static void t1_tp_reset(adapter_t *adapter, unsigned int tp_clk)
603 t1_tpi_write(adapter, A_ELMER0_GPO, gpo); 583{
584 u32 val;
585
586 val = F_TP_IN_CSPI_CPL | F_TP_IN_CSPI_CHECK_IP_CSUM |
587 F_TP_IN_CSPI_CHECK_TCP_CSUM | F_TP_IN_ESPI_ETHERNET;
588 val |= F_TP_IN_ESPI_CHECK_IP_CSUM |
589 F_TP_IN_ESPI_CHECK_TCP_CSUM;
590 writel(val, adapter->regs + A_TP_IN_CONFIG);
591 writel(F_TP_OUT_CSPI_CPL |
592 F_TP_OUT_ESPI_ETHERNET |
593 F_TP_OUT_ESPI_GENERATE_IP_CSUM |
594 F_TP_OUT_ESPI_GENERATE_TCP_CSUM,
595 adapter->regs + A_TP_OUT_CONFIG);
596
597 val = readl(adapter->regs + A_TP_GLOBAL_CONFIG);
598 val &= ~(F_IP_CSUM | F_UDP_CSUM | F_TCP_CSUM);
599 writel(val, adapter->regs + A_TP_GLOBAL_CONFIG);
600
601 /*
602 * Enable pause frame deadlock prevention.
603 */
604 if (is_T2(adapter)) {
605 u32 drop_ticks = DROP_MSEC * (tp_clk / 1000);
606
607 writel(F_ENABLE_TX_DROP | F_ENABLE_TX_ERROR |
608 V_DROP_TICKS_CNT(drop_ticks) |
609 V_NUM_PKTS_DROPPED(DROP_PKTS_CNT),
610 adapter->regs + A_TP_TX_DROP_CONFIG);
604 } 611 }
612
613 writel(F_TP_RESET, adapter->regs + A_TP_RESET);
605} 614}
606 615
607int __devinit t1_get_board_rev(adapter_t *adapter, const struct board_info *bi, 616int __devinit t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
608 struct adapter_params *p) 617 struct adapter_params *p)
609{ 618{
610 p->chip_version = bi->chip_term; 619 p->chip_version = bi->chip_term;
611 p->is_asic = (p->chip_version != CHBT_TERM_FPGA);
612 if (p->chip_version == CHBT_TERM_T1 || 620 if (p->chip_version == CHBT_TERM_T1 ||
613 p->chip_version == CHBT_TERM_T2 || 621 p->chip_version == CHBT_TERM_T2) {
614 p->chip_version == CHBT_TERM_FPGA) { 622 u32 val = readl(adapter->regs + A_TP_PC_CONFIG);
615 u32 val = t1_read_reg_4(adapter, A_TP_PC_CONFIG);
616 623
617 val = G_TP_PC_REV(val); 624 val = G_TP_PC_REV(val);
618 if (val == 2) 625 if (val == 2)
@@ -633,23 +640,11 @@ int __devinit t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
633static int board_init(adapter_t *adapter, const struct board_info *bi) 640static int board_init(adapter_t *adapter, const struct board_info *bi)
634{ 641{
635 switch (bi->board) { 642 switch (bi->board) {
636 case CHBT_BOARD_8000:
637 case CHBT_BOARD_N110: 643 case CHBT_BOARD_N110:
638 case CHBT_BOARD_N210: 644 case CHBT_BOARD_N210:
639 case CHBT_BOARD_CHT210: 645 writel(V_TPIPAR(0xf), adapter->regs + A_TPI_PAR);
640 case CHBT_BOARD_COUGAR:
641 t1_tpi_par(adapter, 0xf);
642 t1_tpi_write(adapter, A_ELMER0_GPO, 0x800); 646 t1_tpi_write(adapter, A_ELMER0_GPO, 0x800);
643 break; 647 break;
644 case CHBT_BOARD_CHT110:
645 t1_tpi_par(adapter, 0xf);
646 t1_tpi_write(adapter, A_ELMER0_GPO, 0x1800);
647
648 /* TBD XXX Might not need. This fixes a problem
649 * described in the Intel SR XPAK errata.
650 */
651 power_sequence_xpak(adapter);
652 break;
653 } 648 }
654 return 0; 649 return 0;
655} 650}
@@ -663,20 +658,19 @@ int t1_init_hw_modules(adapter_t *adapter)
663 int err = -EIO; 658 int err = -EIO;
664 const struct board_info *bi = board_info(adapter); 659 const struct board_info *bi = board_info(adapter);
665 660
666 if (!adapter->mc4) { 661 if (!bi->clock_mc4) {
667 u32 val = t1_read_reg_4(adapter, A_MC4_CFG); 662 u32 val = readl(adapter->regs + A_MC4_CFG);
668 663
669 t1_write_reg_4(adapter, A_MC4_CFG, val | F_READY | F_MC4_SLOW); 664 writel(val | F_READY | F_MC4_SLOW, adapter->regs + A_MC4_CFG);
670 t1_write_reg_4(adapter, A_MC5_CONFIG, 665 writel(F_M_BUS_ENABLE | F_TCAM_RESET,
671 F_M_BUS_ENABLE | F_TCAM_RESET); 666 adapter->regs + A_MC5_CONFIG);
672 } 667 }
673 668
674 if (adapter->espi && t1_espi_init(adapter->espi, bi->chip_mac, 669 if (adapter->espi && t1_espi_init(adapter->espi, bi->chip_mac,
675 bi->espi_nports)) 670 bi->espi_nports))
676 goto out_err; 671 goto out_err;
677 672
678 if (t1_tp_reset(adapter->tp, &adapter->params.tp, bi->clock_core)) 673 t1_tp_reset(adapter, bi->clock_core);
679 goto out_err;
680 674
681 err = t1_sge_configure(adapter->sge, &adapter->params.sge); 675 err = t1_sge_configure(adapter->sge, &adapter->params.sge);
682 if (err) 676 if (err)
@@ -690,7 +684,7 @@ int t1_init_hw_modules(adapter_t *adapter)
690/* 684/*
691 * Determine a card's PCI mode. 685 * Determine a card's PCI mode.
692 */ 686 */
693static void __devinit get_pci_mode(adapter_t *adapter, struct pci_params *p) 687static void __devinit get_pci_mode(adapter_t *adapter, struct chelsio_pci_params *p)
694{ 688{
695 static unsigned short speed_map[] = { 33, 66, 100, 133 }; 689 static unsigned short speed_map[] = { 33, 66, 100, 133 };
696 u32 pci_mode; 690 u32 pci_mode;
@@ -720,8 +714,6 @@ void t1_free_sw_modules(adapter_t *adapter)
720 714
721 if (adapter->sge) 715 if (adapter->sge)
722 t1_sge_destroy(adapter->sge); 716 t1_sge_destroy(adapter->sge);
723 if (adapter->tp)
724 t1_tp_destroy(adapter->tp);
725 if (adapter->espi) 717 if (adapter->espi)
726 t1_espi_destroy(adapter->espi); 718 t1_espi_destroy(adapter->espi);
727} 719}
@@ -764,21 +756,12 @@ int __devinit t1_init_sw_modules(adapter_t *adapter,
764 goto error; 756 goto error;
765 } 757 }
766 758
767
768
769 if (bi->espi_nports && !(adapter->espi = t1_espi_create(adapter))) { 759 if (bi->espi_nports && !(adapter->espi = t1_espi_create(adapter))) {
770 CH_ERR("%s: ESPI initialization failed\n", 760 CH_ERR("%s: ESPI initialization failed\n",
771 adapter->name); 761 adapter->name);
772 goto error; 762 goto error;
773 } 763 }
774 764
775 adapter->tp = t1_tp_create(adapter, &adapter->params.tp);
776 if (!adapter->tp) {
777 CH_ERR("%s: TP initialization failed\n",
778 adapter->name);
779 goto error;
780 }
781
782 board_init(adapter, bi); 765 board_init(adapter, bi);
783 bi->mdio_ops->init(adapter, bi); 766 bi->mdio_ops->init(adapter, bi);
784 if (bi->gphy->reset) 767 if (bi->gphy->reset)
@@ -810,14 +793,12 @@ int __devinit t1_init_sw_modules(adapter_t *adapter,
810 * Get the port's MAC addresses either from the EEPROM if one 793 * Get the port's MAC addresses either from the EEPROM if one
811 * exists or the one hardcoded in the MAC. 794 * exists or the one hardcoded in the MAC.
812 */ 795 */
813 if (!t1_is_asic(adapter) || bi->chip_mac == CHBT_MAC_DUMMY) 796 if (vpd_macaddress_get(adapter, i, hw_addr)) {
814 mac->ops->macaddress_get(mac, hw_addr);
815 else if (vpd_macaddress_get(adapter, i, hw_addr)) {
816 CH_ERR("%s: could not read MAC address from VPD ROM\n", 797 CH_ERR("%s: could not read MAC address from VPD ROM\n",
817 port_name(adapter, i)); 798 adapter->port[i].dev->name);
818 goto error; 799 goto error;
819 } 800 }
820 t1_set_hw_addr(adapter, i, hw_addr); 801 memcpy(adapter->port[i].dev->dev_addr, hw_addr, ETH_ALEN);
821 init_link_config(&adapter->port[i].link_config, bi); 802 init_link_config(&adapter->port[i].link_config, bi);
822 } 803 }
823 804
diff --git a/drivers/net/chelsio/suni1x10gexp_regs.h b/drivers/net/chelsio/suni1x10gexp_regs.h
index 98352bdda89b..81816c2b708a 100644
--- a/drivers/net/chelsio/suni1x10gexp_regs.h
+++ b/drivers/net/chelsio/suni1x10gexp_regs.h
@@ -1,8 +1,8 @@
1/***************************************************************************** 1/*****************************************************************************
2 * * 2 * *
3 * File: suni1x10gexp_regs.h * 3 * File: suni1x10gexp_regs.h *
4 * $Revision: 1.4 $ * 4 * $Revision: 1.9 $ *
5 * $Date: 2005/03/23 07:15:59 $ * 5 * $Date: 2005/06/22 00:17:04 $ *
6 * Description: * 6 * Description: *
7 * PMC/SIERRA (pm3393) MAC-PHY functionality. * 7 * PMC/SIERRA (pm3393) MAC-PHY functionality. *
8 * part of the Chelsio 10Gb Ethernet Driver. * 8 * part of the Chelsio 10Gb Ethernet Driver. *
@@ -21,24 +21,16 @@
21 * * 21 * *
22 * http://www.chelsio.com * 22 * http://www.chelsio.com *
23 * * 23 * *
24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
25 * All rights reserved. *
26 * *
27 * Maintainers: maintainers@chelsio.com * 24 * Maintainers: maintainers@chelsio.com *
28 * * 25 * *
29 * Authors: Dimitrios Michailidis <dm@chelsio.com> * 26 * Authors: PMC/SIERRA *
30 * Tina Yang <tainay@chelsio.com> *
31 * Felix Marti <felix@chelsio.com> *
32 * Scott Bardone <sbardone@chelsio.com> *
33 * Kurt Ottaway <kottaway@chelsio.com> *
34 * Frank DiMambro <frank@chelsio.com> *
35 * * 27 * *
36 * History: * 28 * History: *
37 * * 29 * *
38 ****************************************************************************/ 30 ****************************************************************************/
39 31
40#ifndef _SUNI1x10GEXP_REGS_H 32#ifndef _CXGB_SUNI1x10GEXP_REGS_H_
41#define _SUNI1x10GEXP_REGS_H 33#define _CXGB_SUNI1x10GEXP_REGS_H_
42 34
43/******************************************************************************/ 35/******************************************************************************/
44/** S/UNI-1x10GE-XP REGISTER ADDRESS MAP **/ 36/** S/UNI-1x10GE-XP REGISTER ADDRESS MAP **/
@@ -217,5 +209,5 @@
217#define SUNI1x10GEXP_BITMSK_TXXG_FCRX 0x0004 209#define SUNI1x10GEXP_BITMSK_TXXG_FCRX 0x0004
218#define SUNI1x10GEXP_BITMSK_TXXG_PADEN 0x0002 210#define SUNI1x10GEXP_BITMSK_TXXG_PADEN 0x0002
219 211
220#endif /* _SUNI1x10GEXP_REGS_H */ 212#endif /* _CXGB_SUNI1x10GEXP_REGS_H_ */
221 213
diff --git a/drivers/net/chelsio/tp.c b/drivers/net/chelsio/tp.c
deleted file mode 100644
index 9ad5c539fd28..000000000000
--- a/drivers/net/chelsio/tp.c
+++ /dev/null
@@ -1,188 +0,0 @@
1/*****************************************************************************
2 * *
3 * File: tp.c *
4 * $Revision: 1.6 $ *
5 * $Date: 2005/03/23 07:15:59 $ *
6 * Description: *
7 * Core ASIC Management. *
8 * part of the Chelsio 10Gb Ethernet Driver. *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
13 * *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, write to the Free Software Foundation, Inc., *
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
17 * *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
21 * *
22 * http://www.chelsio.com *
23 * *
24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
25 * All rights reserved. *
26 * *
27 * Maintainers: maintainers@chelsio.com *
28 * *
29 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
30 * Tina Yang <tainay@chelsio.com> *
31 * Felix Marti <felix@chelsio.com> *
32 * Scott Bardone <sbardone@chelsio.com> *
33 * Kurt Ottaway <kottaway@chelsio.com> *
34 * Frank DiMambro <frank@chelsio.com> *
35 * *
36 * History: *
37 * *
38 ****************************************************************************/
39
40#include "common.h"
41#include "regs.h"
42#include "tp.h"
43
44struct petp {
45 adapter_t *adapter;
46};
47
48/* Pause deadlock avoidance parameters */
49#define DROP_MSEC 16
50#define DROP_PKTS_CNT 1
51
52
53static void tp_init(adapter_t *ap, const struct tp_params *p,
54 unsigned int tp_clk)
55{
56 if (t1_is_asic(ap)) {
57 u32 val;
58
59 val = F_TP_IN_CSPI_CPL | F_TP_IN_CSPI_CHECK_IP_CSUM |
60 F_TP_IN_CSPI_CHECK_TCP_CSUM | F_TP_IN_ESPI_ETHERNET;
61 if (!p->pm_size)
62 val |= F_OFFLOAD_DISABLE;
63 else
64 val |= F_TP_IN_ESPI_CHECK_IP_CSUM |
65 F_TP_IN_ESPI_CHECK_TCP_CSUM;
66 t1_write_reg_4(ap, A_TP_IN_CONFIG, val);
67 t1_write_reg_4(ap, A_TP_OUT_CONFIG, F_TP_OUT_CSPI_CPL |
68 F_TP_OUT_ESPI_ETHERNET |
69 F_TP_OUT_ESPI_GENERATE_IP_CSUM |
70 F_TP_OUT_ESPI_GENERATE_TCP_CSUM);
71 t1_write_reg_4(ap, A_TP_GLOBAL_CONFIG, V_IP_TTL(64) |
72 F_PATH_MTU /* IP DF bit */ |
73 V_5TUPLE_LOOKUP(p->use_5tuple_mode) |
74 V_SYN_COOKIE_PARAMETER(29));
75
76 /*
77 * Enable pause frame deadlock prevention.
78 */
79 if (is_T2(ap)) {
80 u32 drop_ticks = DROP_MSEC * (tp_clk / 1000);
81
82 t1_write_reg_4(ap, A_TP_TX_DROP_CONFIG,
83 F_ENABLE_TX_DROP | F_ENABLE_TX_ERROR |
84 V_DROP_TICKS_CNT(drop_ticks) |
85 V_NUM_PKTS_DROPPED(DROP_PKTS_CNT));
86 }
87
88 }
89}
90
91void t1_tp_destroy(struct petp *tp)
92{
93 kfree(tp);
94}
95
96struct petp * __devinit t1_tp_create(adapter_t *adapter, struct tp_params *p)
97{
98 struct petp *tp = kmalloc(sizeof(*tp), GFP_KERNEL);
99 if (!tp)
100 return NULL;
101 memset(tp, 0, sizeof(*tp));
102 tp->adapter = adapter;
103
104 return tp;
105}
106
107void t1_tp_intr_enable(struct petp *tp)
108{
109 u32 tp_intr = t1_read_reg_4(tp->adapter, A_PL_ENABLE);
110
111 {
112 /* We don't use any TP interrupts */
113 t1_write_reg_4(tp->adapter, A_TP_INT_ENABLE, 0);
114 t1_write_reg_4(tp->adapter, A_PL_ENABLE,
115 tp_intr | F_PL_INTR_TP);
116 }
117}
118
119void t1_tp_intr_disable(struct petp *tp)
120{
121 u32 tp_intr = t1_read_reg_4(tp->adapter, A_PL_ENABLE);
122
123 {
124 t1_write_reg_4(tp->adapter, A_TP_INT_ENABLE, 0);
125 t1_write_reg_4(tp->adapter, A_PL_ENABLE,
126 tp_intr & ~F_PL_INTR_TP);
127 }
128}
129
130void t1_tp_intr_clear(struct petp *tp)
131{
132 t1_write_reg_4(tp->adapter, A_TP_INT_CAUSE, 0xffffffff);
133 t1_write_reg_4(tp->adapter, A_PL_CAUSE, F_PL_INTR_TP);
134}
135
136int t1_tp_intr_handler(struct petp *tp)
137{
138 u32 cause;
139
140
141 cause = t1_read_reg_4(tp->adapter, A_TP_INT_CAUSE);
142 t1_write_reg_4(tp->adapter, A_TP_INT_CAUSE, cause);
143 return 0;
144}
145
146static void set_csum_offload(struct petp *tp, u32 csum_bit, int enable)
147{
148 u32 val = t1_read_reg_4(tp->adapter, A_TP_GLOBAL_CONFIG);
149
150 if (enable)
151 val |= csum_bit;
152 else
153 val &= ~csum_bit;
154 t1_write_reg_4(tp->adapter, A_TP_GLOBAL_CONFIG, val);
155}
156
157void t1_tp_set_ip_checksum_offload(struct petp *tp, int enable)
158{
159 set_csum_offload(tp, F_IP_CSUM, enable);
160}
161
162void t1_tp_set_udp_checksum_offload(struct petp *tp, int enable)
163{
164 set_csum_offload(tp, F_UDP_CSUM, enable);
165}
166
167void t1_tp_set_tcp_checksum_offload(struct petp *tp, int enable)
168{
169 set_csum_offload(tp, F_TCP_CSUM, enable);
170}
171
172/*
173 * Initialize TP state. tp_params contains initial settings for some TP
174 * parameters, particularly the one-time PM and CM settings.
175 */
176int t1_tp_reset(struct petp *tp, struct tp_params *p, unsigned int tp_clk)
177{
178 int busy = 0;
179 adapter_t *adapter = tp->adapter;
180
181 tp_init(adapter, p, tp_clk);
182 if (!busy)
183 t1_write_reg_4(adapter, A_TP_RESET, F_TP_RESET);
184 else
185 CH_ERR("%s: TP initialization timed out\n",
186 adapter->name);
187 return busy;
188}
diff --git a/drivers/net/chelsio/tp.h b/drivers/net/chelsio/tp.h
deleted file mode 100644
index 2ebc5c0d62e7..000000000000
--- a/drivers/net/chelsio/tp.h
+++ /dev/null
@@ -1,110 +0,0 @@
1/*****************************************************************************
2 * *
3 * File: tp.h *
4 * $Revision: 1.3 $ *
5 * $Date: 2005/03/23 07:15:59 $ *
6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#ifndef CHELSIO_TP_H
40#define CHELSIO_TP_H
41
42#include "common.h"
43
44#define TP_MAX_RX_COALESCING_SIZE 16224U
45
46struct tp_mib_statistics {
47
48 /* IP */
49 u32 ipInReceive_hi;
50 u32 ipInReceive_lo;
51 u32 ipInHdrErrors_hi;
52 u32 ipInHdrErrors_lo;
53 u32 ipInAddrErrors_hi;
54 u32 ipInAddrErrors_lo;
55 u32 ipInUnknownProtos_hi;
56 u32 ipInUnknownProtos_lo;
57 u32 ipInDiscards_hi;
58 u32 ipInDiscards_lo;
59 u32 ipInDelivers_hi;
60 u32 ipInDelivers_lo;
61 u32 ipOutRequests_hi;
62 u32 ipOutRequests_lo;
63 u32 ipOutDiscards_hi;
64 u32 ipOutDiscards_lo;
65 u32 ipOutNoRoutes_hi;
66 u32 ipOutNoRoutes_lo;
67 u32 ipReasmTimeout;
68 u32 ipReasmReqds;
69 u32 ipReasmOKs;
70 u32 ipReasmFails;
71
72 u32 reserved[8];
73
74 /* TCP */
75 u32 tcpActiveOpens;
76 u32 tcpPassiveOpens;
77 u32 tcpAttemptFails;
78 u32 tcpEstabResets;
79 u32 tcpOutRsts;
80 u32 tcpCurrEstab;
81 u32 tcpInSegs_hi;
82 u32 tcpInSegs_lo;
83 u32 tcpOutSegs_hi;
84 u32 tcpOutSegs_lo;
85 u32 tcpRetransSeg_hi;
86 u32 tcpRetransSeg_lo;
87 u32 tcpInErrs_hi;
88 u32 tcpInErrs_lo;
89 u32 tcpRtoMin;
90 u32 tcpRtoMax;
91};
92
93struct petp;
94struct tp_params;
95
96struct petp *t1_tp_create(adapter_t *adapter, struct tp_params *p);
97void t1_tp_destroy(struct petp *tp);
98
99void t1_tp_intr_disable(struct petp *tp);
100void t1_tp_intr_enable(struct petp *tp);
101void t1_tp_intr_clear(struct petp *tp);
102int t1_tp_intr_handler(struct petp *tp);
103
104void t1_tp_get_mib_statistics(adapter_t *adap, struct tp_mib_statistics *tps);
105void t1_tp_set_udp_checksum_offload(struct petp *tp, int enable);
106void t1_tp_set_tcp_checksum_offload(struct petp *tp, int enable);
107void t1_tp_set_ip_checksum_offload(struct petp *tp, int enable);
108int t1_tp_set_coalescing_size(struct petp *tp, unsigned int size);
109int t1_tp_reset(struct petp *tp, struct tp_params *p, unsigned int tp_clk);
110#endif