diff options
Diffstat (limited to 'drivers/net/ixgb')
-rw-r--r-- | drivers/net/ixgb/Makefile | 35 | ||||
-rw-r--r-- | drivers/net/ixgb/ixgb.h | 217 | ||||
-rw-r--r-- | drivers/net/ixgb/ixgb_ee.c | 607 | ||||
-rw-r--r-- | drivers/net/ixgb/ixgb_ee.h | 106 | ||||
-rw-r--r-- | drivers/net/ixgb/ixgb_ethtool.c | 758 | ||||
-rw-r--r-- | drivers/net/ixgb/ixgb_hw.c | 1262 | ||||
-rw-r--r-- | drivers/net/ixgb/ixgb_hw.h | 801 | ||||
-rw-r--r-- | drivers/net/ixgb/ixgb_ids.h | 53 | ||||
-rw-r--r-- | drivers/net/ixgb/ixgb_main.c | 2332 | ||||
-rw-r--r-- | drivers/net/ixgb/ixgb_osdep.h | 63 | ||||
-rw-r--r-- | drivers/net/ixgb/ixgb_param.c | 469 |
11 files changed, 6703 insertions, 0 deletions
diff --git a/drivers/net/ixgb/Makefile b/drivers/net/ixgb/Makefile new file mode 100644 index 00000000000..0b20c5e62ff --- /dev/null +++ b/drivers/net/ixgb/Makefile | |||
@@ -0,0 +1,35 @@ | |||
1 | ################################################################################ | ||
2 | # | ||
3 | # Intel PRO/10GbE Linux driver | ||
4 | # Copyright(c) 1999 - 2008 Intel Corporation. | ||
5 | # | ||
6 | # This program is free software; you can redistribute it and/or modify it | ||
7 | # under the terms and conditions of the GNU General Public License, | ||
8 | # version 2, as published by the Free Software Foundation. | ||
9 | # | ||
10 | # This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | # more details. | ||
14 | # | ||
15 | # You should have received a copy of the GNU General Public License along with | ||
16 | # this program; if not, write to the Free Software Foundation, Inc., | ||
17 | # 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | # | ||
19 | # The full GNU General Public License is included in this distribution in | ||
20 | # the file called "COPYING". | ||
21 | # | ||
22 | # Contact Information: | ||
23 | # Linux NICS <linux.nics@intel.com> | ||
24 | # e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
25 | # Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
26 | # | ||
27 | ################################################################################ | ||
28 | |||
29 | # | ||
30 | # Makefile for the Intel(R) PRO/10GbE ethernet driver | ||
31 | # | ||
32 | |||
33 | obj-$(CONFIG_IXGB) += ixgb.o | ||
34 | |||
35 | ixgb-objs := ixgb_main.o ixgb_hw.o ixgb_ee.o ixgb_ethtool.o ixgb_param.o | ||
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h new file mode 100644 index 00000000000..49e8408f05f --- /dev/null +++ b/drivers/net/ixgb/ixgb.h | |||
@@ -0,0 +1,217 @@ | |||
1 | /******************************************************************************* | ||
2 | |||
3 | Intel PRO/10GbE Linux driver | ||
4 | Copyright(c) 1999 - 2008 Intel Corporation. | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Contact Information: | ||
23 | Linux NICS <linux.nics@intel.com> | ||
24 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
25 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
26 | |||
27 | *******************************************************************************/ | ||
28 | |||
29 | #ifndef _IXGB_H_ | ||
30 | #define _IXGB_H_ | ||
31 | |||
32 | #include <linux/stddef.h> | ||
33 | #include <linux/module.h> | ||
34 | #include <linux/types.h> | ||
35 | #include <asm/byteorder.h> | ||
36 | #include <linux/init.h> | ||
37 | #include <linux/mm.h> | ||
38 | #include <linux/errno.h> | ||
39 | #include <linux/ioport.h> | ||
40 | #include <linux/pci.h> | ||
41 | #include <linux/kernel.h> | ||
42 | #include <linux/netdevice.h> | ||
43 | #include <linux/etherdevice.h> | ||
44 | #include <linux/skbuff.h> | ||
45 | #include <linux/delay.h> | ||
46 | #include <linux/timer.h> | ||
47 | #include <linux/slab.h> | ||
48 | #include <linux/vmalloc.h> | ||
49 | #include <linux/interrupt.h> | ||
50 | #include <linux/string.h> | ||
51 | #include <linux/pagemap.h> | ||
52 | #include <linux/dma-mapping.h> | ||
53 | #include <linux/bitops.h> | ||
54 | #include <asm/io.h> | ||
55 | #include <asm/irq.h> | ||
56 | #include <linux/capability.h> | ||
57 | #include <linux/in.h> | ||
58 | #include <linux/ip.h> | ||
59 | #include <linux/tcp.h> | ||
60 | #include <linux/udp.h> | ||
61 | #include <net/pkt_sched.h> | ||
62 | #include <linux/list.h> | ||
63 | #include <linux/reboot.h> | ||
64 | #include <net/checksum.h> | ||
65 | |||
66 | #include <linux/ethtool.h> | ||
67 | #include <linux/if_vlan.h> | ||
68 | |||
69 | #define BAR_0 0 | ||
70 | #define BAR_1 1 | ||
71 | #define BAR_5 5 | ||
72 | |||
73 | struct ixgb_adapter; | ||
74 | #include "ixgb_hw.h" | ||
75 | #include "ixgb_ee.h" | ||
76 | #include "ixgb_ids.h" | ||
77 | |||
78 | #define PFX "ixgb: " | ||
79 | |||
80 | #ifdef _DEBUG_DRIVER_ | ||
81 | #define IXGB_DBG(fmt, args...) printk(KERN_DEBUG PFX fmt, ##args) | ||
82 | #else | ||
83 | #define IXGB_DBG(fmt, args...) \ | ||
84 | do { \ | ||
85 | if (0) \ | ||
86 | printk(KERN_DEBUG PFX fmt, ##args); \ | ||
87 | } while (0) | ||
88 | #endif | ||
89 | |||
90 | /* TX/RX descriptor defines */ | ||
91 | #define DEFAULT_TXD 256 | ||
92 | #define MAX_TXD 4096 | ||
93 | #define MIN_TXD 64 | ||
94 | |||
95 | /* hardware cannot reliably support more than 512 descriptors owned by | ||
96 | * hardware descriptor cache otherwise an unreliable ring under heavy | ||
97 | * receive load may result */ | ||
98 | #define DEFAULT_RXD 512 | ||
99 | #define MAX_RXD 512 | ||
100 | #define MIN_RXD 64 | ||
101 | |||
102 | /* Supported Rx Buffer Sizes */ | ||
103 | #define IXGB_RXBUFFER_2048 2048 | ||
104 | #define IXGB_RXBUFFER_4096 4096 | ||
105 | #define IXGB_RXBUFFER_8192 8192 | ||
106 | #define IXGB_RXBUFFER_16384 16384 | ||
107 | |||
108 | /* How many Rx Buffers do we bundle into one write to the hardware ? */ | ||
109 | #define IXGB_RX_BUFFER_WRITE 8 /* Must be power of 2 */ | ||
110 | |||
111 | /* wrapper around a pointer to a socket buffer, | ||
112 | * so a DMA handle can be stored along with the buffer */ | ||
113 | struct ixgb_buffer { | ||
114 | struct sk_buff *skb; | ||
115 | dma_addr_t dma; | ||
116 | unsigned long time_stamp; | ||
117 | u16 length; | ||
118 | u16 next_to_watch; | ||
119 | u16 mapped_as_page; | ||
120 | }; | ||
121 | |||
122 | struct ixgb_desc_ring { | ||
123 | /* pointer to the descriptor ring memory */ | ||
124 | void *desc; | ||
125 | /* physical address of the descriptor ring */ | ||
126 | dma_addr_t dma; | ||
127 | /* length of descriptor ring in bytes */ | ||
128 | unsigned int size; | ||
129 | /* number of descriptors in the ring */ | ||
130 | unsigned int count; | ||
131 | /* next descriptor to associate a buffer with */ | ||
132 | unsigned int next_to_use; | ||
133 | /* next descriptor to check for DD status bit */ | ||
134 | unsigned int next_to_clean; | ||
135 | /* array of buffer information structs */ | ||
136 | struct ixgb_buffer *buffer_info; | ||
137 | }; | ||
138 | |||
139 | #define IXGB_DESC_UNUSED(R) \ | ||
140 | ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ | ||
141 | (R)->next_to_clean - (R)->next_to_use - 1) | ||
142 | |||
143 | #define IXGB_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) | ||
144 | #define IXGB_RX_DESC(R, i) IXGB_GET_DESC(R, i, ixgb_rx_desc) | ||
145 | #define IXGB_TX_DESC(R, i) IXGB_GET_DESC(R, i, ixgb_tx_desc) | ||
146 | #define IXGB_CONTEXT_DESC(R, i) IXGB_GET_DESC(R, i, ixgb_context_desc) | ||
147 | |||
148 | /* board specific private data structure */ | ||
149 | |||
150 | struct ixgb_adapter { | ||
151 | struct timer_list watchdog_timer; | ||
152 | unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; | ||
153 | u32 bd_number; | ||
154 | u32 rx_buffer_len; | ||
155 | u32 part_num; | ||
156 | u16 link_speed; | ||
157 | u16 link_duplex; | ||
158 | struct work_struct tx_timeout_task; | ||
159 | |||
160 | /* TX */ | ||
161 | struct ixgb_desc_ring tx_ring ____cacheline_aligned_in_smp; | ||
162 | unsigned int restart_queue; | ||
163 | unsigned long timeo_start; | ||
164 | u32 tx_cmd_type; | ||
165 | u64 hw_csum_tx_good; | ||
166 | u64 hw_csum_tx_error; | ||
167 | u32 tx_int_delay; | ||
168 | u32 tx_timeout_count; | ||
169 | bool tx_int_delay_enable; | ||
170 | bool detect_tx_hung; | ||
171 | |||
172 | /* RX */ | ||
173 | struct ixgb_desc_ring rx_ring; | ||
174 | u64 hw_csum_rx_error; | ||
175 | u64 hw_csum_rx_good; | ||
176 | u32 rx_int_delay; | ||
177 | bool rx_csum; | ||
178 | |||
179 | /* OS defined structs */ | ||
180 | struct napi_struct napi; | ||
181 | struct net_device *netdev; | ||
182 | struct pci_dev *pdev; | ||
183 | |||
184 | /* structs defined in ixgb_hw.h */ | ||
185 | struct ixgb_hw hw; | ||
186 | u16 msg_enable; | ||
187 | struct ixgb_hw_stats stats; | ||
188 | u32 alloc_rx_buff_failed; | ||
189 | bool have_msi; | ||
190 | unsigned long flags; | ||
191 | }; | ||
192 | |||
193 | enum ixgb_state_t { | ||
194 | /* TBD | ||
195 | __IXGB_TESTING, | ||
196 | __IXGB_RESETTING, | ||
197 | */ | ||
198 | __IXGB_DOWN | ||
199 | }; | ||
200 | |||
201 | /* Exported from other modules */ | ||
202 | extern void ixgb_check_options(struct ixgb_adapter *adapter); | ||
203 | extern void ixgb_set_ethtool_ops(struct net_device *netdev); | ||
204 | extern char ixgb_driver_name[]; | ||
205 | extern const char ixgb_driver_version[]; | ||
206 | |||
207 | extern int ixgb_up(struct ixgb_adapter *adapter); | ||
208 | extern void ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog); | ||
209 | extern void ixgb_reset(struct ixgb_adapter *adapter); | ||
210 | extern int ixgb_setup_rx_resources(struct ixgb_adapter *adapter); | ||
211 | extern int ixgb_setup_tx_resources(struct ixgb_adapter *adapter); | ||
212 | extern void ixgb_free_rx_resources(struct ixgb_adapter *adapter); | ||
213 | extern void ixgb_free_tx_resources(struct ixgb_adapter *adapter); | ||
214 | extern void ixgb_update_stats(struct ixgb_adapter *adapter); | ||
215 | |||
216 | |||
217 | #endif /* _IXGB_H_ */ | ||
diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c new file mode 100644 index 00000000000..38b362b6785 --- /dev/null +++ b/drivers/net/ixgb/ixgb_ee.c | |||
@@ -0,0 +1,607 @@ | |||
1 | /******************************************************************************* | ||
2 | |||
3 | Intel PRO/10GbE Linux driver | ||
4 | Copyright(c) 1999 - 2008 Intel Corporation. | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Contact Information: | ||
23 | Linux NICS <linux.nics@intel.com> | ||
24 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
25 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
26 | |||
27 | *******************************************************************************/ | ||
28 | |||
29 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
30 | |||
31 | #include "ixgb_hw.h" | ||
32 | #include "ixgb_ee.h" | ||
33 | /* Local prototypes */ | ||
34 | static u16 ixgb_shift_in_bits(struct ixgb_hw *hw); | ||
35 | |||
36 | static void ixgb_shift_out_bits(struct ixgb_hw *hw, | ||
37 | u16 data, | ||
38 | u16 count); | ||
39 | static void ixgb_standby_eeprom(struct ixgb_hw *hw); | ||
40 | |||
41 | static bool ixgb_wait_eeprom_command(struct ixgb_hw *hw); | ||
42 | |||
43 | static void ixgb_cleanup_eeprom(struct ixgb_hw *hw); | ||
44 | |||
45 | /****************************************************************************** | ||
46 | * Raises the EEPROM's clock input. | ||
47 | * | ||
48 | * hw - Struct containing variables accessed by shared code | ||
49 | * eecd_reg - EECD's current value | ||
50 | *****************************************************************************/ | ||
51 | static void | ||
52 | ixgb_raise_clock(struct ixgb_hw *hw, | ||
53 | u32 *eecd_reg) | ||
54 | { | ||
55 | /* Raise the clock input to the EEPROM (by setting the SK bit), and then | ||
56 | * wait 50 microseconds. | ||
57 | */ | ||
58 | *eecd_reg = *eecd_reg | IXGB_EECD_SK; | ||
59 | IXGB_WRITE_REG(hw, EECD, *eecd_reg); | ||
60 | IXGB_WRITE_FLUSH(hw); | ||
61 | udelay(50); | ||
62 | } | ||
63 | |||
64 | /****************************************************************************** | ||
65 | * Lowers the EEPROM's clock input. | ||
66 | * | ||
67 | * hw - Struct containing variables accessed by shared code | ||
68 | * eecd_reg - EECD's current value | ||
69 | *****************************************************************************/ | ||
70 | static void | ||
71 | ixgb_lower_clock(struct ixgb_hw *hw, | ||
72 | u32 *eecd_reg) | ||
73 | { | ||
74 | /* Lower the clock input to the EEPROM (by clearing the SK bit), and then | ||
75 | * wait 50 microseconds. | ||
76 | */ | ||
77 | *eecd_reg = *eecd_reg & ~IXGB_EECD_SK; | ||
78 | IXGB_WRITE_REG(hw, EECD, *eecd_reg); | ||
79 | IXGB_WRITE_FLUSH(hw); | ||
80 | udelay(50); | ||
81 | } | ||
82 | |||
83 | /****************************************************************************** | ||
84 | * Shift data bits out to the EEPROM. | ||
85 | * | ||
86 | * hw - Struct containing variables accessed by shared code | ||
87 | * data - data to send to the EEPROM | ||
88 | * count - number of bits to shift out | ||
89 | *****************************************************************************/ | ||
90 | static void | ||
91 | ixgb_shift_out_bits(struct ixgb_hw *hw, | ||
92 | u16 data, | ||
93 | u16 count) | ||
94 | { | ||
95 | u32 eecd_reg; | ||
96 | u32 mask; | ||
97 | |||
98 | /* We need to shift "count" bits out to the EEPROM. So, value in the | ||
99 | * "data" parameter will be shifted out to the EEPROM one bit at a time. | ||
100 | * In order to do this, "data" must be broken down into bits. | ||
101 | */ | ||
102 | mask = 0x01 << (count - 1); | ||
103 | eecd_reg = IXGB_READ_REG(hw, EECD); | ||
104 | eecd_reg &= ~(IXGB_EECD_DO | IXGB_EECD_DI); | ||
105 | do { | ||
106 | /* A "1" is shifted out to the EEPROM by setting bit "DI" to a "1", | ||
107 | * and then raising and then lowering the clock (the SK bit controls | ||
108 | * the clock input to the EEPROM). A "0" is shifted out to the EEPROM | ||
109 | * by setting "DI" to "0" and then raising and then lowering the clock. | ||
110 | */ | ||
111 | eecd_reg &= ~IXGB_EECD_DI; | ||
112 | |||
113 | if (data & mask) | ||
114 | eecd_reg |= IXGB_EECD_DI; | ||
115 | |||
116 | IXGB_WRITE_REG(hw, EECD, eecd_reg); | ||
117 | IXGB_WRITE_FLUSH(hw); | ||
118 | |||
119 | udelay(50); | ||
120 | |||
121 | ixgb_raise_clock(hw, &eecd_reg); | ||
122 | ixgb_lower_clock(hw, &eecd_reg); | ||
123 | |||
124 | mask = mask >> 1; | ||
125 | |||
126 | } while (mask); | ||
127 | |||
128 | /* We leave the "DI" bit set to "0" when we leave this routine. */ | ||
129 | eecd_reg &= ~IXGB_EECD_DI; | ||
130 | IXGB_WRITE_REG(hw, EECD, eecd_reg); | ||
131 | } | ||
132 | |||
133 | /****************************************************************************** | ||
134 | * Shift data bits in from the EEPROM | ||
135 | * | ||
136 | * hw - Struct containing variables accessed by shared code | ||
137 | *****************************************************************************/ | ||
138 | static u16 | ||
139 | ixgb_shift_in_bits(struct ixgb_hw *hw) | ||
140 | { | ||
141 | u32 eecd_reg; | ||
142 | u32 i; | ||
143 | u16 data; | ||
144 | |||
145 | /* In order to read a register from the EEPROM, we need to shift 16 bits | ||
146 | * in from the EEPROM. Bits are "shifted in" by raising the clock input to | ||
147 | * the EEPROM (setting the SK bit), and then reading the value of the "DO" | ||
148 | * bit. During this "shifting in" process the "DI" bit should always be | ||
149 | * clear.. | ||
150 | */ | ||
151 | |||
152 | eecd_reg = IXGB_READ_REG(hw, EECD); | ||
153 | |||
154 | eecd_reg &= ~(IXGB_EECD_DO | IXGB_EECD_DI); | ||
155 | data = 0; | ||
156 | |||
157 | for (i = 0; i < 16; i++) { | ||
158 | data = data << 1; | ||
159 | ixgb_raise_clock(hw, &eecd_reg); | ||
160 | |||
161 | eecd_reg = IXGB_READ_REG(hw, EECD); | ||
162 | |||
163 | eecd_reg &= ~(IXGB_EECD_DI); | ||
164 | if (eecd_reg & IXGB_EECD_DO) | ||
165 | data |= 1; | ||
166 | |||
167 | ixgb_lower_clock(hw, &eecd_reg); | ||
168 | } | ||
169 | |||
170 | return data; | ||
171 | } | ||
172 | |||
173 | /****************************************************************************** | ||
174 | * Prepares EEPROM for access | ||
175 | * | ||
176 | * hw - Struct containing variables accessed by shared code | ||
177 | * | ||
178 | * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This | ||
179 | * function should be called before issuing a command to the EEPROM. | ||
180 | *****************************************************************************/ | ||
181 | static void | ||
182 | ixgb_setup_eeprom(struct ixgb_hw *hw) | ||
183 | { | ||
184 | u32 eecd_reg; | ||
185 | |||
186 | eecd_reg = IXGB_READ_REG(hw, EECD); | ||
187 | |||
188 | /* Clear SK and DI */ | ||
189 | eecd_reg &= ~(IXGB_EECD_SK | IXGB_EECD_DI); | ||
190 | IXGB_WRITE_REG(hw, EECD, eecd_reg); | ||
191 | |||
192 | /* Set CS */ | ||
193 | eecd_reg |= IXGB_EECD_CS; | ||
194 | IXGB_WRITE_REG(hw, EECD, eecd_reg); | ||
195 | } | ||
196 | |||
197 | /****************************************************************************** | ||
198 | * Returns EEPROM to a "standby" state | ||
199 | * | ||
200 | * hw - Struct containing variables accessed by shared code | ||
201 | *****************************************************************************/ | ||
202 | static void | ||
203 | ixgb_standby_eeprom(struct ixgb_hw *hw) | ||
204 | { | ||
205 | u32 eecd_reg; | ||
206 | |||
207 | eecd_reg = IXGB_READ_REG(hw, EECD); | ||
208 | |||
209 | /* Deselect EEPROM */ | ||
210 | eecd_reg &= ~(IXGB_EECD_CS | IXGB_EECD_SK); | ||
211 | IXGB_WRITE_REG(hw, EECD, eecd_reg); | ||
212 | IXGB_WRITE_FLUSH(hw); | ||
213 | udelay(50); | ||
214 | |||
215 | /* Clock high */ | ||
216 | eecd_reg |= IXGB_EECD_SK; | ||
217 | IXGB_WRITE_REG(hw, EECD, eecd_reg); | ||
218 | IXGB_WRITE_FLUSH(hw); | ||
219 | udelay(50); | ||
220 | |||
221 | /* Select EEPROM */ | ||
222 | eecd_reg |= IXGB_EECD_CS; | ||
223 | IXGB_WRITE_REG(hw, EECD, eecd_reg); | ||
224 | IXGB_WRITE_FLUSH(hw); | ||
225 | udelay(50); | ||
226 | |||
227 | /* Clock low */ | ||
228 | eecd_reg &= ~IXGB_EECD_SK; | ||
229 | IXGB_WRITE_REG(hw, EECD, eecd_reg); | ||
230 | IXGB_WRITE_FLUSH(hw); | ||
231 | udelay(50); | ||
232 | } | ||
233 | |||
234 | /****************************************************************************** | ||
235 | * Raises then lowers the EEPROM's clock pin | ||
236 | * | ||
237 | * hw - Struct containing variables accessed by shared code | ||
238 | *****************************************************************************/ | ||
239 | static void | ||
240 | ixgb_clock_eeprom(struct ixgb_hw *hw) | ||
241 | { | ||
242 | u32 eecd_reg; | ||
243 | |||
244 | eecd_reg = IXGB_READ_REG(hw, EECD); | ||
245 | |||
246 | /* Rising edge of clock */ | ||
247 | eecd_reg |= IXGB_EECD_SK; | ||
248 | IXGB_WRITE_REG(hw, EECD, eecd_reg); | ||
249 | IXGB_WRITE_FLUSH(hw); | ||
250 | udelay(50); | ||
251 | |||
252 | /* Falling edge of clock */ | ||
253 | eecd_reg &= ~IXGB_EECD_SK; | ||
254 | IXGB_WRITE_REG(hw, EECD, eecd_reg); | ||
255 | IXGB_WRITE_FLUSH(hw); | ||
256 | udelay(50); | ||
257 | } | ||
258 | |||
259 | /****************************************************************************** | ||
260 | * Terminates a command by lowering the EEPROM's chip select pin | ||
261 | * | ||
262 | * hw - Struct containing variables accessed by shared code | ||
263 | *****************************************************************************/ | ||
264 | static void | ||
265 | ixgb_cleanup_eeprom(struct ixgb_hw *hw) | ||
266 | { | ||
267 | u32 eecd_reg; | ||
268 | |||
269 | eecd_reg = IXGB_READ_REG(hw, EECD); | ||
270 | |||
271 | eecd_reg &= ~(IXGB_EECD_CS | IXGB_EECD_DI); | ||
272 | |||
273 | IXGB_WRITE_REG(hw, EECD, eecd_reg); | ||
274 | |||
275 | ixgb_clock_eeprom(hw); | ||
276 | } | ||
277 | |||
278 | /****************************************************************************** | ||
279 | * Waits for the EEPROM to finish the current command. | ||
280 | * | ||
281 | * hw - Struct containing variables accessed by shared code | ||
282 | * | ||
283 | * The command is done when the EEPROM's data out pin goes high. | ||
284 | * | ||
285 | * Returns: | ||
286 | * true: EEPROM data pin is high before timeout. | ||
287 | * false: Time expired. | ||
288 | *****************************************************************************/ | ||
289 | static bool | ||
290 | ixgb_wait_eeprom_command(struct ixgb_hw *hw) | ||
291 | { | ||
292 | u32 eecd_reg; | ||
293 | u32 i; | ||
294 | |||
295 | /* Toggle the CS line. This in effect tells to EEPROM to actually execute | ||
296 | * the command in question. | ||
297 | */ | ||
298 | ixgb_standby_eeprom(hw); | ||
299 | |||
300 | /* Now read DO repeatedly until is high (equal to '1'). The EEPROM will | ||
301 | * signal that the command has been completed by raising the DO signal. | ||
302 | * If DO does not go high in 10 milliseconds, then error out. | ||
303 | */ | ||
304 | for (i = 0; i < 200; i++) { | ||
305 | eecd_reg = IXGB_READ_REG(hw, EECD); | ||
306 | |||
307 | if (eecd_reg & IXGB_EECD_DO) | ||
308 | return true; | ||
309 | |||
310 | udelay(50); | ||
311 | } | ||
312 | ASSERT(0); | ||
313 | return false; | ||
314 | } | ||
315 | |||
316 | /****************************************************************************** | ||
317 | * Verifies that the EEPROM has a valid checksum | ||
318 | * | ||
319 | * hw - Struct containing variables accessed by shared code | ||
320 | * | ||
321 | * Reads the first 64 16 bit words of the EEPROM and sums the values read. | ||
322 | * If the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is | ||
323 | * valid. | ||
324 | * | ||
325 | * Returns: | ||
326 | * true: Checksum is valid | ||
327 | * false: Checksum is not valid. | ||
328 | *****************************************************************************/ | ||
329 | bool | ||
330 | ixgb_validate_eeprom_checksum(struct ixgb_hw *hw) | ||
331 | { | ||
332 | u16 checksum = 0; | ||
333 | u16 i; | ||
334 | |||
335 | for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) | ||
336 | checksum += ixgb_read_eeprom(hw, i); | ||
337 | |||
338 | if (checksum == (u16) EEPROM_SUM) | ||
339 | return true; | ||
340 | else | ||
341 | return false; | ||
342 | } | ||
343 | |||
344 | /****************************************************************************** | ||
345 | * Calculates the EEPROM checksum and writes it to the EEPROM | ||
346 | * | ||
347 | * hw - Struct containing variables accessed by shared code | ||
348 | * | ||
349 | * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA. | ||
350 | * Writes the difference to word offset 63 of the EEPROM. | ||
351 | *****************************************************************************/ | ||
352 | void | ||
353 | ixgb_update_eeprom_checksum(struct ixgb_hw *hw) | ||
354 | { | ||
355 | u16 checksum = 0; | ||
356 | u16 i; | ||
357 | |||
358 | for (i = 0; i < EEPROM_CHECKSUM_REG; i++) | ||
359 | checksum += ixgb_read_eeprom(hw, i); | ||
360 | |||
361 | checksum = (u16) EEPROM_SUM - checksum; | ||
362 | |||
363 | ixgb_write_eeprom(hw, EEPROM_CHECKSUM_REG, checksum); | ||
364 | } | ||
365 | |||
366 | /****************************************************************************** | ||
367 | * Writes a 16 bit word to a given offset in the EEPROM. | ||
368 | * | ||
369 | * hw - Struct containing variables accessed by shared code | ||
370 | * reg - offset within the EEPROM to be written to | ||
371 | * data - 16 bit word to be written to the EEPROM | ||
372 | * | ||
373 | * If ixgb_update_eeprom_checksum is not called after this function, the | ||
374 | * EEPROM will most likely contain an invalid checksum. | ||
375 | * | ||
376 | *****************************************************************************/ | ||
377 | void | ||
378 | ixgb_write_eeprom(struct ixgb_hw *hw, u16 offset, u16 data) | ||
379 | { | ||
380 | struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; | ||
381 | |||
382 | /* Prepare the EEPROM for writing */ | ||
383 | ixgb_setup_eeprom(hw); | ||
384 | |||
385 | /* Send the 9-bit EWEN (write enable) command to the EEPROM (5-bit opcode | ||
386 | * plus 4-bit dummy). This puts the EEPROM into write/erase mode. | ||
387 | */ | ||
388 | ixgb_shift_out_bits(hw, EEPROM_EWEN_OPCODE, 5); | ||
389 | ixgb_shift_out_bits(hw, 0, 4); | ||
390 | |||
391 | /* Prepare the EEPROM */ | ||
392 | ixgb_standby_eeprom(hw); | ||
393 | |||
394 | /* Send the Write command (3-bit opcode + 6-bit addr) */ | ||
395 | ixgb_shift_out_bits(hw, EEPROM_WRITE_OPCODE, 3); | ||
396 | ixgb_shift_out_bits(hw, offset, 6); | ||
397 | |||
398 | /* Send the data */ | ||
399 | ixgb_shift_out_bits(hw, data, 16); | ||
400 | |||
401 | ixgb_wait_eeprom_command(hw); | ||
402 | |||
403 | /* Recover from write */ | ||
404 | ixgb_standby_eeprom(hw); | ||
405 | |||
406 | /* Send the 9-bit EWDS (write disable) command to the EEPROM (5-bit | ||
407 | * opcode plus 4-bit dummy). This takes the EEPROM out of write/erase | ||
408 | * mode. | ||
409 | */ | ||
410 | ixgb_shift_out_bits(hw, EEPROM_EWDS_OPCODE, 5); | ||
411 | ixgb_shift_out_bits(hw, 0, 4); | ||
412 | |||
413 | /* Done with writing */ | ||
414 | ixgb_cleanup_eeprom(hw); | ||
415 | |||
416 | /* clear the init_ctrl_reg_1 to signify that the cache is invalidated */ | ||
417 | ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR); | ||
418 | } | ||
419 | |||
420 | /****************************************************************************** | ||
421 | * Reads a 16 bit word from the EEPROM. | ||
422 | * | ||
423 | * hw - Struct containing variables accessed by shared code | ||
424 | * offset - offset of 16 bit word in the EEPROM to read | ||
425 | * | ||
426 | * Returns: | ||
427 | * The 16-bit value read from the eeprom | ||
428 | *****************************************************************************/ | ||
429 | u16 | ||
430 | ixgb_read_eeprom(struct ixgb_hw *hw, | ||
431 | u16 offset) | ||
432 | { | ||
433 | u16 data; | ||
434 | |||
435 | /* Prepare the EEPROM for reading */ | ||
436 | ixgb_setup_eeprom(hw); | ||
437 | |||
438 | /* Send the READ command (opcode + addr) */ | ||
439 | ixgb_shift_out_bits(hw, EEPROM_READ_OPCODE, 3); | ||
440 | /* | ||
441 | * We have a 64 word EEPROM, there are 6 address bits | ||
442 | */ | ||
443 | ixgb_shift_out_bits(hw, offset, 6); | ||
444 | |||
445 | /* Read the data */ | ||
446 | data = ixgb_shift_in_bits(hw); | ||
447 | |||
448 | /* End this read operation */ | ||
449 | ixgb_standby_eeprom(hw); | ||
450 | |||
451 | return data; | ||
452 | } | ||
453 | |||
454 | /****************************************************************************** | ||
455 | * Reads eeprom and stores data in shared structure. | ||
456 | * Validates eeprom checksum and eeprom signature. | ||
457 | * | ||
458 | * hw - Struct containing variables accessed by shared code | ||
459 | * | ||
460 | * Returns: | ||
461 | * true: if eeprom read is successful | ||
462 | * false: otherwise. | ||
463 | *****************************************************************************/ | ||
464 | bool | ||
465 | ixgb_get_eeprom_data(struct ixgb_hw *hw) | ||
466 | { | ||
467 | u16 i; | ||
468 | u16 checksum = 0; | ||
469 | struct ixgb_ee_map_type *ee_map; | ||
470 | |||
471 | ENTER(); | ||
472 | |||
473 | ee_map = (struct ixgb_ee_map_type *)hw->eeprom; | ||
474 | |||
475 | pr_debug("Reading eeprom data\n"); | ||
476 | for (i = 0; i < IXGB_EEPROM_SIZE ; i++) { | ||
477 | u16 ee_data; | ||
478 | ee_data = ixgb_read_eeprom(hw, i); | ||
479 | checksum += ee_data; | ||
480 | hw->eeprom[i] = cpu_to_le16(ee_data); | ||
481 | } | ||
482 | |||
483 | if (checksum != (u16) EEPROM_SUM) { | ||
484 | pr_debug("Checksum invalid\n"); | ||
485 | /* clear the init_ctrl_reg_1 to signify that the cache is | ||
486 | * invalidated */ | ||
487 | ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR); | ||
488 | return false; | ||
489 | } | ||
490 | |||
491 | if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK)) | ||
492 | != cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) { | ||
493 | pr_debug("Signature invalid\n"); | ||
494 | return false; | ||
495 | } | ||
496 | |||
497 | return true; | ||
498 | } | ||
499 | |||
500 | /****************************************************************************** | ||
501 | * Local function to check if the eeprom signature is good | ||
502 | * If the eeprom signature is good, calls ixgb)get_eeprom_data. | ||
503 | * | ||
504 | * hw - Struct containing variables accessed by shared code | ||
505 | * | ||
506 | * Returns: | ||
507 | * true: eeprom signature was good and the eeprom read was successful | ||
508 | * false: otherwise. | ||
509 | ******************************************************************************/ | ||
510 | static bool | ||
511 | ixgb_check_and_get_eeprom_data (struct ixgb_hw* hw) | ||
512 | { | ||
513 | struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; | ||
514 | |||
515 | if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK)) | ||
516 | == cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) { | ||
517 | return true; | ||
518 | } else { | ||
519 | return ixgb_get_eeprom_data(hw); | ||
520 | } | ||
521 | } | ||
522 | |||
523 | /****************************************************************************** | ||
524 | * return a word from the eeprom | ||
525 | * | ||
526 | * hw - Struct containing variables accessed by shared code | ||
527 | * index - Offset of eeprom word | ||
528 | * | ||
529 | * Returns: | ||
530 | * Word at indexed offset in eeprom, if valid, 0 otherwise. | ||
531 | ******************************************************************************/ | ||
532 | __le16 | ||
533 | ixgb_get_eeprom_word(struct ixgb_hw *hw, u16 index) | ||
534 | { | ||
535 | |||
536 | if ((index < IXGB_EEPROM_SIZE) && | ||
537 | (ixgb_check_and_get_eeprom_data(hw) == true)) { | ||
538 | return hw->eeprom[index]; | ||
539 | } | ||
540 | |||
541 | return 0; | ||
542 | } | ||
543 | |||
544 | /****************************************************************************** | ||
545 | * return the mac address from EEPROM | ||
546 | * | ||
547 | * hw - Struct containing variables accessed by shared code | ||
548 | * mac_addr - Ethernet Address if EEPROM contents are valid, 0 otherwise | ||
549 | * | ||
550 | * Returns: None. | ||
551 | ******************************************************************************/ | ||
552 | void | ||
553 | ixgb_get_ee_mac_addr(struct ixgb_hw *hw, | ||
554 | u8 *mac_addr) | ||
555 | { | ||
556 | int i; | ||
557 | struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; | ||
558 | |||
559 | ENTER(); | ||
560 | |||
561 | if (ixgb_check_and_get_eeprom_data(hw) == true) { | ||
562 | for (i = 0; i < IXGB_ETH_LENGTH_OF_ADDRESS; i++) { | ||
563 | mac_addr[i] = ee_map->mac_addr[i]; | ||
564 | } | ||
565 | pr_debug("eeprom mac address = %pM\n", mac_addr); | ||
566 | } | ||
567 | } | ||
568 | |||
569 | |||
570 | /****************************************************************************** | ||
571 | * return the Printed Board Assembly number from EEPROM | ||
572 | * | ||
573 | * hw - Struct containing variables accessed by shared code | ||
574 | * | ||
575 | * Returns: | ||
576 | * PBA number if EEPROM contents are valid, 0 otherwise | ||
577 | ******************************************************************************/ | ||
578 | u32 | ||
579 | ixgb_get_ee_pba_number(struct ixgb_hw *hw) | ||
580 | { | ||
581 | if (ixgb_check_and_get_eeprom_data(hw) == true) | ||
582 | return le16_to_cpu(hw->eeprom[EEPROM_PBA_1_2_REG]) | ||
583 | | (le16_to_cpu(hw->eeprom[EEPROM_PBA_3_4_REG])<<16); | ||
584 | |||
585 | return 0; | ||
586 | } | ||
587 | |||
588 | |||
589 | /****************************************************************************** | ||
590 | * return the Device Id from EEPROM | ||
591 | * | ||
592 | * hw - Struct containing variables accessed by shared code | ||
593 | * | ||
594 | * Returns: | ||
595 | * Device Id if EEPROM contents are valid, 0 otherwise | ||
596 | ******************************************************************************/ | ||
597 | u16 | ||
598 | ixgb_get_ee_device_id(struct ixgb_hw *hw) | ||
599 | { | ||
600 | struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; | ||
601 | |||
602 | if (ixgb_check_and_get_eeprom_data(hw) == true) | ||
603 | return le16_to_cpu(ee_map->device_id); | ||
604 | |||
605 | return 0; | ||
606 | } | ||
607 | |||
diff --git a/drivers/net/ixgb/ixgb_ee.h b/drivers/net/ixgb/ixgb_ee.h new file mode 100644 index 00000000000..7ea12652f47 --- /dev/null +++ b/drivers/net/ixgb/ixgb_ee.h | |||
@@ -0,0 +1,106 @@ | |||
1 | /******************************************************************************* | ||
2 | |||
3 | Intel PRO/10GbE Linux driver | ||
4 | Copyright(c) 1999 - 2008 Intel Corporation. | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Contact Information: | ||
23 | Linux NICS <linux.nics@intel.com> | ||
24 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
25 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
26 | |||
27 | *******************************************************************************/ | ||
28 | |||
29 | #ifndef _IXGB_EE_H_ | ||
30 | #define _IXGB_EE_H_ | ||
31 | |||
32 | #define IXGB_EEPROM_SIZE 64 /* Size in words */ | ||
33 | |||
34 | #define IXGB_ETH_LENGTH_OF_ADDRESS 6 | ||
35 | |||
36 | /* EEPROM Commands */ | ||
37 | #define EEPROM_READ_OPCODE 0x6 /* EEPROM read opcode */ | ||
38 | #define EEPROM_WRITE_OPCODE 0x5 /* EEPROM write opcode */ | ||
39 | #define EEPROM_ERASE_OPCODE 0x7 /* EEPROM erase opcode */ | ||
40 | #define EEPROM_EWEN_OPCODE 0x13 /* EEPROM erase/write enable */ | ||
41 | #define EEPROM_EWDS_OPCODE 0x10 /* EEPROM erase/write disable */ | ||
42 | |||
43 | /* EEPROM MAP (Word Offsets) */ | ||
44 | #define EEPROM_IA_1_2_REG 0x0000 | ||
45 | #define EEPROM_IA_3_4_REG 0x0001 | ||
46 | #define EEPROM_IA_5_6_REG 0x0002 | ||
47 | #define EEPROM_COMPATIBILITY_REG 0x0003 | ||
48 | #define EEPROM_PBA_1_2_REG 0x0008 | ||
49 | #define EEPROM_PBA_3_4_REG 0x0009 | ||
50 | #define EEPROM_INIT_CONTROL1_REG 0x000A | ||
51 | #define EEPROM_SUBSYS_ID_REG 0x000B | ||
52 | #define EEPROM_SUBVEND_ID_REG 0x000C | ||
53 | #define EEPROM_DEVICE_ID_REG 0x000D | ||
54 | #define EEPROM_VENDOR_ID_REG 0x000E | ||
55 | #define EEPROM_INIT_CONTROL2_REG 0x000F | ||
56 | #define EEPROM_SWDPINS_REG 0x0020 | ||
57 | #define EEPROM_CIRCUIT_CTRL_REG 0x0021 | ||
58 | #define EEPROM_D0_D3_POWER_REG 0x0022 | ||
59 | #define EEPROM_FLASH_VERSION 0x0032 | ||
60 | #define EEPROM_CHECKSUM_REG 0x003F | ||
61 | |||
62 | /* Mask bits for fields in Word 0x0a of the EEPROM */ | ||
63 | |||
64 | #define EEPROM_ICW1_SIGNATURE_MASK 0xC000 | ||
65 | #define EEPROM_ICW1_SIGNATURE_VALID 0x4000 | ||
66 | #define EEPROM_ICW1_SIGNATURE_CLEAR 0x0000 | ||
67 | |||
68 | /* For checksumming, the sum of all words in the EEPROM should equal 0xBABA. */ | ||
69 | #define EEPROM_SUM 0xBABA | ||
70 | |||
71 | /* EEPROM Map Sizes (Byte Counts) */ | ||
72 | #define PBA_SIZE 4 | ||
73 | |||
74 | /* EEPROM Map defines (WORD OFFSETS)*/ | ||
75 | |||
76 | /* EEPROM structure */ | ||
77 | struct ixgb_ee_map_type { | ||
78 | u8 mac_addr[IXGB_ETH_LENGTH_OF_ADDRESS]; | ||
79 | __le16 compatibility; | ||
80 | __le16 reserved1[4]; | ||
81 | __le32 pba_number; | ||
82 | __le16 init_ctrl_reg_1; | ||
83 | __le16 subsystem_id; | ||
84 | __le16 subvendor_id; | ||
85 | __le16 device_id; | ||
86 | __le16 vendor_id; | ||
87 | __le16 init_ctrl_reg_2; | ||
88 | __le16 oem_reserved[16]; | ||
89 | __le16 swdpins_reg; | ||
90 | __le16 circuit_ctrl_reg; | ||
91 | u8 d3_power; | ||
92 | u8 d0_power; | ||
93 | __le16 reserved2[28]; | ||
94 | __le16 checksum; | ||
95 | }; | ||
96 | |||
97 | /* EEPROM Functions */ | ||
98 | u16 ixgb_read_eeprom(struct ixgb_hw *hw, u16 reg); | ||
99 | |||
100 | bool ixgb_validate_eeprom_checksum(struct ixgb_hw *hw); | ||
101 | |||
102 | void ixgb_update_eeprom_checksum(struct ixgb_hw *hw); | ||
103 | |||
104 | void ixgb_write_eeprom(struct ixgb_hw *hw, u16 reg, u16 data); | ||
105 | |||
106 | #endif /* IXGB_EE_H */ | ||
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c new file mode 100644 index 00000000000..6da890b9534 --- /dev/null +++ b/drivers/net/ixgb/ixgb_ethtool.c | |||
@@ -0,0 +1,758 @@ | |||
1 | /******************************************************************************* | ||
2 | |||
3 | Intel PRO/10GbE Linux driver | ||
4 | Copyright(c) 1999 - 2008 Intel Corporation. | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Contact Information: | ||
23 | Linux NICS <linux.nics@intel.com> | ||
24 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
25 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
26 | |||
27 | *******************************************************************************/ | ||
28 | |||
29 | /* ethtool support for ixgb */ | ||
30 | |||
31 | #include "ixgb.h" | ||
32 | |||
33 | #include <asm/uaccess.h> | ||
34 | |||
35 | #define IXGB_ALL_RAR_ENTRIES 16 | ||
36 | |||
37 | enum {NETDEV_STATS, IXGB_STATS}; | ||
38 | |||
39 | struct ixgb_stats { | ||
40 | char stat_string[ETH_GSTRING_LEN]; | ||
41 | int type; | ||
42 | int sizeof_stat; | ||
43 | int stat_offset; | ||
44 | }; | ||
45 | |||
46 | #define IXGB_STAT(m) IXGB_STATS, \ | ||
47 | FIELD_SIZEOF(struct ixgb_adapter, m), \ | ||
48 | offsetof(struct ixgb_adapter, m) | ||
49 | #define IXGB_NETDEV_STAT(m) NETDEV_STATS, \ | ||
50 | FIELD_SIZEOF(struct net_device, m), \ | ||
51 | offsetof(struct net_device, m) | ||
52 | |||
53 | static struct ixgb_stats ixgb_gstrings_stats[] = { | ||
54 | {"rx_packets", IXGB_NETDEV_STAT(stats.rx_packets)}, | ||
55 | {"tx_packets", IXGB_NETDEV_STAT(stats.tx_packets)}, | ||
56 | {"rx_bytes", IXGB_NETDEV_STAT(stats.rx_bytes)}, | ||
57 | {"tx_bytes", IXGB_NETDEV_STAT(stats.tx_bytes)}, | ||
58 | {"rx_errors", IXGB_NETDEV_STAT(stats.rx_errors)}, | ||
59 | {"tx_errors", IXGB_NETDEV_STAT(stats.tx_errors)}, | ||
60 | {"rx_dropped", IXGB_NETDEV_STAT(stats.rx_dropped)}, | ||
61 | {"tx_dropped", IXGB_NETDEV_STAT(stats.tx_dropped)}, | ||
62 | {"multicast", IXGB_NETDEV_STAT(stats.multicast)}, | ||
63 | {"collisions", IXGB_NETDEV_STAT(stats.collisions)}, | ||
64 | |||
65 | /* { "rx_length_errors", IXGB_NETDEV_STAT(stats.rx_length_errors) }, */ | ||
66 | {"rx_over_errors", IXGB_NETDEV_STAT(stats.rx_over_errors)}, | ||
67 | {"rx_crc_errors", IXGB_NETDEV_STAT(stats.rx_crc_errors)}, | ||
68 | {"rx_frame_errors", IXGB_NETDEV_STAT(stats.rx_frame_errors)}, | ||
69 | {"rx_no_buffer_count", IXGB_STAT(stats.rnbc)}, | ||
70 | {"rx_fifo_errors", IXGB_NETDEV_STAT(stats.rx_fifo_errors)}, | ||
71 | {"rx_missed_errors", IXGB_NETDEV_STAT(stats.rx_missed_errors)}, | ||
72 | {"tx_aborted_errors", IXGB_NETDEV_STAT(stats.tx_aborted_errors)}, | ||
73 | {"tx_carrier_errors", IXGB_NETDEV_STAT(stats.tx_carrier_errors)}, | ||
74 | {"tx_fifo_errors", IXGB_NETDEV_STAT(stats.tx_fifo_errors)}, | ||
75 | {"tx_heartbeat_errors", IXGB_NETDEV_STAT(stats.tx_heartbeat_errors)}, | ||
76 | {"tx_window_errors", IXGB_NETDEV_STAT(stats.tx_window_errors)}, | ||
77 | {"tx_deferred_ok", IXGB_STAT(stats.dc)}, | ||
78 | {"tx_timeout_count", IXGB_STAT(tx_timeout_count) }, | ||
79 | {"tx_restart_queue", IXGB_STAT(restart_queue) }, | ||
80 | {"rx_long_length_errors", IXGB_STAT(stats.roc)}, | ||
81 | {"rx_short_length_errors", IXGB_STAT(stats.ruc)}, | ||
82 | {"tx_tcp_seg_good", IXGB_STAT(stats.tsctc)}, | ||
83 | {"tx_tcp_seg_failed", IXGB_STAT(stats.tsctfc)}, | ||
84 | {"rx_flow_control_xon", IXGB_STAT(stats.xonrxc)}, | ||
85 | {"rx_flow_control_xoff", IXGB_STAT(stats.xoffrxc)}, | ||
86 | {"tx_flow_control_xon", IXGB_STAT(stats.xontxc)}, | ||
87 | {"tx_flow_control_xoff", IXGB_STAT(stats.xofftxc)}, | ||
88 | {"rx_csum_offload_good", IXGB_STAT(hw_csum_rx_good)}, | ||
89 | {"rx_csum_offload_errors", IXGB_STAT(hw_csum_rx_error)}, | ||
90 | {"tx_csum_offload_good", IXGB_STAT(hw_csum_tx_good)}, | ||
91 | {"tx_csum_offload_errors", IXGB_STAT(hw_csum_tx_error)} | ||
92 | }; | ||
93 | |||
94 | #define IXGB_STATS_LEN ARRAY_SIZE(ixgb_gstrings_stats) | ||
95 | |||
96 | static int | ||
97 | ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) | ||
98 | { | ||
99 | struct ixgb_adapter *adapter = netdev_priv(netdev); | ||
100 | |||
101 | ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); | ||
102 | ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); | ||
103 | ecmd->port = PORT_FIBRE; | ||
104 | ecmd->transceiver = XCVR_EXTERNAL; | ||
105 | |||
106 | if (netif_carrier_ok(adapter->netdev)) { | ||
107 | ethtool_cmd_speed_set(ecmd, SPEED_10000); | ||
108 | ecmd->duplex = DUPLEX_FULL; | ||
109 | } else { | ||
110 | ethtool_cmd_speed_set(ecmd, -1); | ||
111 | ecmd->duplex = -1; | ||
112 | } | ||
113 | |||
114 | ecmd->autoneg = AUTONEG_DISABLE; | ||
115 | return 0; | ||
116 | } | ||
117 | |||
118 | static void ixgb_set_speed_duplex(struct net_device *netdev) | ||
119 | { | ||
120 | struct ixgb_adapter *adapter = netdev_priv(netdev); | ||
121 | /* be optimistic about our link, since we were up before */ | ||
122 | adapter->link_speed = 10000; | ||
123 | adapter->link_duplex = FULL_DUPLEX; | ||
124 | netif_carrier_on(netdev); | ||
125 | netif_wake_queue(netdev); | ||
126 | } | ||
127 | |||
128 | static int | ||
129 | ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) | ||
130 | { | ||
131 | struct ixgb_adapter *adapter = netdev_priv(netdev); | ||
132 | u32 speed = ethtool_cmd_speed(ecmd); | ||
133 | |||
134 | if (ecmd->autoneg == AUTONEG_ENABLE || | ||
135 | (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)) | ||
136 | return -EINVAL; | ||
137 | |||
138 | if (netif_running(adapter->netdev)) { | ||
139 | ixgb_down(adapter, true); | ||
140 | ixgb_reset(adapter); | ||
141 | ixgb_up(adapter); | ||
142 | ixgb_set_speed_duplex(netdev); | ||
143 | } else | ||
144 | ixgb_reset(adapter); | ||
145 | |||
146 | return 0; | ||
147 | } | ||
148 | |||
149 | static void | ||
150 | ixgb_get_pauseparam(struct net_device *netdev, | ||
151 | struct ethtool_pauseparam *pause) | ||
152 | { | ||
153 | struct ixgb_adapter *adapter = netdev_priv(netdev); | ||
154 | struct ixgb_hw *hw = &adapter->hw; | ||
155 | |||
156 | pause->autoneg = AUTONEG_DISABLE; | ||
157 | |||
158 | if (hw->fc.type == ixgb_fc_rx_pause) | ||
159 | pause->rx_pause = 1; | ||
160 | else if (hw->fc.type == ixgb_fc_tx_pause) | ||
161 | pause->tx_pause = 1; | ||
162 | else if (hw->fc.type == ixgb_fc_full) { | ||
163 | pause->rx_pause = 1; | ||
164 | pause->tx_pause = 1; | ||
165 | } | ||
166 | } | ||
167 | |||
168 | static int | ||
169 | ixgb_set_pauseparam(struct net_device *netdev, | ||
170 | struct ethtool_pauseparam *pause) | ||
171 | { | ||
172 | struct ixgb_adapter *adapter = netdev_priv(netdev); | ||
173 | struct ixgb_hw *hw = &adapter->hw; | ||
174 | |||
175 | if (pause->autoneg == AUTONEG_ENABLE) | ||
176 | return -EINVAL; | ||
177 | |||
178 | if (pause->rx_pause && pause->tx_pause) | ||
179 | hw->fc.type = ixgb_fc_full; | ||
180 | else if (pause->rx_pause && !pause->tx_pause) | ||
181 | hw->fc.type = ixgb_fc_rx_pause; | ||
182 | else if (!pause->rx_pause && pause->tx_pause) | ||
183 | hw->fc.type = ixgb_fc_tx_pause; | ||
184 | else if (!pause->rx_pause && !pause->tx_pause) | ||
185 | hw->fc.type = ixgb_fc_none; | ||
186 | |||
187 | if (netif_running(adapter->netdev)) { | ||
188 | ixgb_down(adapter, true); | ||
189 | ixgb_up(adapter); | ||
190 | ixgb_set_speed_duplex(netdev); | ||
191 | } else | ||
192 | ixgb_reset(adapter); | ||
193 | |||
194 | return 0; | ||
195 | } | ||
196 | |||
197 | static u32 | ||
198 | ixgb_get_rx_csum(struct net_device *netdev) | ||
199 | { | ||
200 | struct ixgb_adapter *adapter = netdev_priv(netdev); | ||
201 | |||
202 | return adapter->rx_csum; | ||
203 | } | ||
204 | |||
205 | static int | ||
206 | ixgb_set_rx_csum(struct net_device *netdev, u32 data) | ||
207 | { | ||
208 | struct ixgb_adapter *adapter = netdev_priv(netdev); | ||
209 | |||
210 | adapter->rx_csum = data; | ||
211 | |||
212 | if (netif_running(netdev)) { | ||
213 | ixgb_down(adapter, true); | ||
214 | ixgb_up(adapter); | ||
215 | ixgb_set_speed_duplex(netdev); | ||
216 | } else | ||
217 | ixgb_reset(adapter); | ||
218 | return 0; | ||
219 | } | ||
220 | |||
221 | static u32 | ||
222 | ixgb_get_tx_csum(struct net_device *netdev) | ||
223 | { | ||
224 | return (netdev->features & NETIF_F_HW_CSUM) != 0; | ||
225 | } | ||
226 | |||
227 | static int | ||
228 | ixgb_set_tx_csum(struct net_device *netdev, u32 data) | ||
229 | { | ||
230 | if (data) | ||
231 | netdev->features |= NETIF_F_HW_CSUM; | ||
232 | else | ||
233 | netdev->features &= ~NETIF_F_HW_CSUM; | ||
234 | |||
235 | return 0; | ||
236 | } | ||
237 | |||
238 | static int | ||
239 | ixgb_set_tso(struct net_device *netdev, u32 data) | ||
240 | { | ||
241 | if (data) | ||
242 | netdev->features |= NETIF_F_TSO; | ||
243 | else | ||
244 | netdev->features &= ~NETIF_F_TSO; | ||
245 | return 0; | ||
246 | } | ||
247 | |||
248 | static u32 | ||
249 | ixgb_get_msglevel(struct net_device *netdev) | ||
250 | { | ||
251 | struct ixgb_adapter *adapter = netdev_priv(netdev); | ||
252 | return adapter->msg_enable; | ||
253 | } | ||
254 | |||
255 | static void | ||
256 | ixgb_set_msglevel(struct net_device *netdev, u32 data) | ||
257 | { | ||
258 | struct ixgb_adapter *adapter = netdev_priv(netdev); | ||
259 | adapter->msg_enable = data; | ||
260 | } | ||
261 | #define IXGB_GET_STAT(_A_, _R_) _A_->stats._R_ | ||
262 | |||
263 | static int | ||
264 | ixgb_get_regs_len(struct net_device *netdev) | ||
265 | { | ||
266 | #define IXGB_REG_DUMP_LEN 136*sizeof(u32) | ||
267 | return IXGB_REG_DUMP_LEN; | ||
268 | } | ||
269 | |||
270 | static void | ||
271 | ixgb_get_regs(struct net_device *netdev, | ||
272 | struct ethtool_regs *regs, void *p) | ||
273 | { | ||
274 | struct ixgb_adapter *adapter = netdev_priv(netdev); | ||
275 | struct ixgb_hw *hw = &adapter->hw; | ||
276 | u32 *reg = p; | ||
277 | u32 *reg_start = reg; | ||
278 | u8 i; | ||
279 | |||
280 | /* the 1 (one) below indicates an attempt at versioning, if the | ||
281 | * interface in ethtool or the driver changes, this 1 should be | ||
282 | * incremented */ | ||
283 | regs->version = (1<<24) | hw->revision_id << 16 | hw->device_id; | ||
284 | |||
285 | /* General Registers */ | ||
286 | *reg++ = IXGB_READ_REG(hw, CTRL0); /* 0 */ | ||
287 | *reg++ = IXGB_READ_REG(hw, CTRL1); /* 1 */ | ||
288 | *reg++ = IXGB_READ_REG(hw, STATUS); /* 2 */ | ||
289 | *reg++ = IXGB_READ_REG(hw, EECD); /* 3 */ | ||
290 | *reg++ = IXGB_READ_REG(hw, MFS); /* 4 */ | ||
291 | |||
292 | /* Interrupt */ | ||
293 | *reg++ = IXGB_READ_REG(hw, ICR); /* 5 */ | ||
294 | *reg++ = IXGB_READ_REG(hw, ICS); /* 6 */ | ||
295 | *reg++ = IXGB_READ_REG(hw, IMS); /* 7 */ | ||
296 | *reg++ = IXGB_READ_REG(hw, IMC); /* 8 */ | ||
297 | |||
298 | /* Receive */ | ||
299 | *reg++ = IXGB_READ_REG(hw, RCTL); /* 9 */ | ||
300 | *reg++ = IXGB_READ_REG(hw, FCRTL); /* 10 */ | ||
301 | *reg++ = IXGB_READ_REG(hw, FCRTH); /* 11 */ | ||
302 | *reg++ = IXGB_READ_REG(hw, RDBAL); /* 12 */ | ||
303 | *reg++ = IXGB_READ_REG(hw, RDBAH); /* 13 */ | ||
304 | *reg++ = IXGB_READ_REG(hw, RDLEN); /* 14 */ | ||
305 | *reg++ = IXGB_READ_REG(hw, RDH); /* 15 */ | ||
306 | *reg++ = IXGB_READ_REG(hw, RDT); /* 16 */ | ||
307 | *reg++ = IXGB_READ_REG(hw, RDTR); /* 17 */ | ||
308 | *reg++ = IXGB_READ_REG(hw, RXDCTL); /* 18 */ | ||
309 | *reg++ = IXGB_READ_REG(hw, RAIDC); /* 19 */ | ||
310 | *reg++ = IXGB_READ_REG(hw, RXCSUM); /* 20 */ | ||
311 | |||
312 | /* there are 16 RAR entries in hardware, we only use 3 */ | ||
313 | for (i = 0; i < IXGB_ALL_RAR_ENTRIES; i++) { | ||
314 | *reg++ = IXGB_READ_REG_ARRAY(hw, RAL, (i << 1)); /*21,...,51 */ | ||
315 | *reg++ = IXGB_READ_REG_ARRAY(hw, RAH, (i << 1)); /*22,...,52 */ | ||
316 | } | ||
317 | |||
318 | /* Transmit */ | ||
319 | *reg++ = IXGB_READ_REG(hw, TCTL); /* 53 */ | ||
320 | *reg++ = IXGB_READ_REG(hw, TDBAL); /* 54 */ | ||
321 | *reg++ = IXGB_READ_REG(hw, TDBAH); /* 55 */ | ||
322 | *reg++ = IXGB_READ_REG(hw, TDLEN); /* 56 */ | ||
323 | *reg++ = IXGB_READ_REG(hw, TDH); /* 57 */ | ||
324 | *reg++ = IXGB_READ_REG(hw, TDT); /* 58 */ | ||
325 | *reg++ = IXGB_READ_REG(hw, TIDV); /* 59 */ | ||
326 | *reg++ = IXGB_READ_REG(hw, TXDCTL); /* 60 */ | ||
327 | *reg++ = IXGB_READ_REG(hw, TSPMT); /* 61 */ | ||
328 | *reg++ = IXGB_READ_REG(hw, PAP); /* 62 */ | ||
329 | |||
330 | /* Physical */ | ||
331 | *reg++ = IXGB_READ_REG(hw, PCSC1); /* 63 */ | ||
332 | *reg++ = IXGB_READ_REG(hw, PCSC2); /* 64 */ | ||
333 | *reg++ = IXGB_READ_REG(hw, PCSS1); /* 65 */ | ||
334 | *reg++ = IXGB_READ_REG(hw, PCSS2); /* 66 */ | ||
335 | *reg++ = IXGB_READ_REG(hw, XPCSS); /* 67 */ | ||
336 | *reg++ = IXGB_READ_REG(hw, UCCR); /* 68 */ | ||
337 | *reg++ = IXGB_READ_REG(hw, XPCSTC); /* 69 */ | ||
338 | *reg++ = IXGB_READ_REG(hw, MACA); /* 70 */ | ||
339 | *reg++ = IXGB_READ_REG(hw, APAE); /* 71 */ | ||
340 | *reg++ = IXGB_READ_REG(hw, ARD); /* 72 */ | ||
341 | *reg++ = IXGB_READ_REG(hw, AIS); /* 73 */ | ||
342 | *reg++ = IXGB_READ_REG(hw, MSCA); /* 74 */ | ||
343 | *reg++ = IXGB_READ_REG(hw, MSRWD); /* 75 */ | ||
344 | |||
345 | /* Statistics */ | ||
346 | *reg++ = IXGB_GET_STAT(adapter, tprl); /* 76 */ | ||
347 | *reg++ = IXGB_GET_STAT(adapter, tprh); /* 77 */ | ||
348 | *reg++ = IXGB_GET_STAT(adapter, gprcl); /* 78 */ | ||
349 | *reg++ = IXGB_GET_STAT(adapter, gprch); /* 79 */ | ||
350 | *reg++ = IXGB_GET_STAT(adapter, bprcl); /* 80 */ | ||
351 | *reg++ = IXGB_GET_STAT(adapter, bprch); /* 81 */ | ||
352 | *reg++ = IXGB_GET_STAT(adapter, mprcl); /* 82 */ | ||
353 | *reg++ = IXGB_GET_STAT(adapter, mprch); /* 83 */ | ||
354 | *reg++ = IXGB_GET_STAT(adapter, uprcl); /* 84 */ | ||
355 | *reg++ = IXGB_GET_STAT(adapter, uprch); /* 85 */ | ||
356 | *reg++ = IXGB_GET_STAT(adapter, vprcl); /* 86 */ | ||
357 | *reg++ = IXGB_GET_STAT(adapter, vprch); /* 87 */ | ||
358 | *reg++ = IXGB_GET_STAT(adapter, jprcl); /* 88 */ | ||
359 | *reg++ = IXGB_GET_STAT(adapter, jprch); /* 89 */ | ||
360 | *reg++ = IXGB_GET_STAT(adapter, gorcl); /* 90 */ | ||
361 | *reg++ = IXGB_GET_STAT(adapter, gorch); /* 91 */ | ||
362 | *reg++ = IXGB_GET_STAT(adapter, torl); /* 92 */ | ||
363 | *reg++ = IXGB_GET_STAT(adapter, torh); /* 93 */ | ||
364 | *reg++ = IXGB_GET_STAT(adapter, rnbc); /* 94 */ | ||
365 | *reg++ = IXGB_GET_STAT(adapter, ruc); /* 95 */ | ||
366 | *reg++ = IXGB_GET_STAT(adapter, roc); /* 96 */ | ||
367 | *reg++ = IXGB_GET_STAT(adapter, rlec); /* 97 */ | ||
368 | *reg++ = IXGB_GET_STAT(adapter, crcerrs); /* 98 */ | ||
369 | *reg++ = IXGB_GET_STAT(adapter, icbc); /* 99 */ | ||
370 | *reg++ = IXGB_GET_STAT(adapter, ecbc); /* 100 */ | ||
371 | *reg++ = IXGB_GET_STAT(adapter, mpc); /* 101 */ | ||
372 | *reg++ = IXGB_GET_STAT(adapter, tptl); /* 102 */ | ||
373 | *reg++ = IXGB_GET_STAT(adapter, tpth); /* 103 */ | ||
374 | *reg++ = IXGB_GET_STAT(adapter, gptcl); /* 104 */ | ||
375 | *reg++ = IXGB_GET_STAT(adapter, gptch); /* 105 */ | ||
376 | *reg++ = IXGB_GET_STAT(adapter, bptcl); /* 106 */ | ||
377 | *reg++ = IXGB_GET_STAT(adapter, bptch); /* 107 */ | ||
378 | *reg++ = IXGB_GET_STAT(adapter, mptcl); /* 108 */ | ||
379 | *reg++ = IXGB_GET_STAT(adapter, mptch); /* 109 */ | ||
380 | *reg++ = IXGB_GET_STAT(adapter, uptcl); /* 110 */ | ||
381 | *reg++ = IXGB_GET_STAT(adapter, uptch); /* 111 */ | ||
382 | *reg++ = IXGB_GET_STAT(adapter, vptcl); /* 112 */ | ||
383 | *reg++ = IXGB_GET_STAT(adapter, vptch); /* 113 */ | ||
384 | *reg++ = IXGB_GET_STAT(adapter, jptcl); /* 114 */ | ||
385 | *reg++ = IXGB_GET_STAT(adapter, jptch); /* 115 */ | ||
386 | *reg++ = IXGB_GET_STAT(adapter, gotcl); /* 116 */ | ||
387 | *reg++ = IXGB_GET_STAT(adapter, gotch); /* 117 */ | ||
388 | *reg++ = IXGB_GET_STAT(adapter, totl); /* 118 */ | ||
389 | *reg++ = IXGB_GET_STAT(adapter, toth); /* 119 */ | ||
390 | *reg++ = IXGB_GET_STAT(adapter, dc); /* 120 */ | ||
391 | *reg++ = IXGB_GET_STAT(adapter, plt64c); /* 121 */ | ||
392 | *reg++ = IXGB_GET_STAT(adapter, tsctc); /* 122 */ | ||
393 | *reg++ = IXGB_GET_STAT(adapter, tsctfc); /* 123 */ | ||
394 | *reg++ = IXGB_GET_STAT(adapter, ibic); /* 124 */ | ||
395 | *reg++ = IXGB_GET_STAT(adapter, rfc); /* 125 */ | ||
396 | *reg++ = IXGB_GET_STAT(adapter, lfc); /* 126 */ | ||
397 | *reg++ = IXGB_GET_STAT(adapter, pfrc); /* 127 */ | ||
398 | *reg++ = IXGB_GET_STAT(adapter, pftc); /* 128 */ | ||
399 | *reg++ = IXGB_GET_STAT(adapter, mcfrc); /* 129 */ | ||
400 | *reg++ = IXGB_GET_STAT(adapter, mcftc); /* 130 */ | ||
401 | *reg++ = IXGB_GET_STAT(adapter, xonrxc); /* 131 */ | ||
402 | *reg++ = IXGB_GET_STAT(adapter, xontxc); /* 132 */ | ||
403 | *reg++ = IXGB_GET_STAT(adapter, xoffrxc); /* 133 */ | ||
404 | *reg++ = IXGB_GET_STAT(adapter, xofftxc); /* 134 */ | ||
405 | *reg++ = IXGB_GET_STAT(adapter, rjc); /* 135 */ | ||
406 | |||
407 | regs->len = (reg - reg_start) * sizeof(u32); | ||
408 | } | ||
409 | |||
410 | static int | ||
411 | ixgb_get_eeprom_len(struct net_device *netdev) | ||
412 | { | ||
413 | /* return size in bytes */ | ||
414 | return IXGB_EEPROM_SIZE << 1; | ||
415 | } | ||
416 | |||
417 | static int | ||
418 | ixgb_get_eeprom(struct net_device *netdev, | ||
419 | struct ethtool_eeprom *eeprom, u8 *bytes) | ||
420 | { | ||
421 | struct ixgb_adapter *adapter = netdev_priv(netdev); | ||
422 | struct ixgb_hw *hw = &adapter->hw; | ||
423 | __le16 *eeprom_buff; | ||
424 | int i, max_len, first_word, last_word; | ||
425 | int ret_val = 0; | ||
426 | |||
427 | if (eeprom->len == 0) { | ||
428 | ret_val = -EINVAL; | ||
429 | goto geeprom_error; | ||
430 | } | ||
431 | |||
432 | eeprom->magic = hw->vendor_id | (hw->device_id << 16); | ||
433 | |||
434 | max_len = ixgb_get_eeprom_len(netdev); | ||
435 | |||
436 | if (eeprom->offset > eeprom->offset + eeprom->len) { | ||
437 | ret_val = -EINVAL; | ||
438 | goto geeprom_error; | ||
439 | } | ||
440 | |||
441 | if ((eeprom->offset + eeprom->len) > max_len) | ||
442 | eeprom->len = (max_len - eeprom->offset); | ||
443 | |||
444 | first_word = eeprom->offset >> 1; | ||
445 | last_word = (eeprom->offset + eeprom->len - 1) >> 1; | ||
446 | |||
447 | eeprom_buff = kmalloc(sizeof(__le16) * | ||
448 | (last_word - first_word + 1), GFP_KERNEL); | ||
449 | if (!eeprom_buff) | ||
450 | return -ENOMEM; | ||
451 | |||
452 | /* note the eeprom was good because the driver loaded */ | ||
453 | for (i = 0; i <= (last_word - first_word); i++) | ||
454 | eeprom_buff[i] = ixgb_get_eeprom_word(hw, (first_word + i)); | ||
455 | |||
456 | memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); | ||
457 | kfree(eeprom_buff); | ||
458 | |||
459 | geeprom_error: | ||
460 | return ret_val; | ||
461 | } | ||
462 | |||
463 | static int | ||
464 | ixgb_set_eeprom(struct net_device *netdev, | ||
465 | struct ethtool_eeprom *eeprom, u8 *bytes) | ||
466 | { | ||
467 | struct ixgb_adapter *adapter = netdev_priv(netdev); | ||
468 | struct ixgb_hw *hw = &adapter->hw; | ||
469 | u16 *eeprom_buff; | ||
470 | void *ptr; | ||
471 | int max_len, first_word, last_word; | ||
472 | u16 i; | ||
473 | |||
474 | if (eeprom->len == 0) | ||
475 | return -EINVAL; | ||
476 | |||
477 | if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) | ||
478 | return -EFAULT; | ||
479 | |||
480 | max_len = ixgb_get_eeprom_len(netdev); | ||
481 | |||
482 | if (eeprom->offset > eeprom->offset + eeprom->len) | ||
483 | return -EINVAL; | ||
484 | |||
485 | if ((eeprom->offset + eeprom->len) > max_len) | ||
486 | eeprom->len = (max_len - eeprom->offset); | ||
487 | |||
488 | first_word = eeprom->offset >> 1; | ||
489 | last_word = (eeprom->offset + eeprom->len - 1) >> 1; | ||
490 | eeprom_buff = kmalloc(max_len, GFP_KERNEL); | ||
491 | if (!eeprom_buff) | ||
492 | return -ENOMEM; | ||
493 | |||
494 | ptr = (void *)eeprom_buff; | ||
495 | |||
496 | if (eeprom->offset & 1) { | ||
497 | /* need read/modify/write of first changed EEPROM word */ | ||
498 | /* only the second byte of the word is being modified */ | ||
499 | eeprom_buff[0] = ixgb_read_eeprom(hw, first_word); | ||
500 | ptr++; | ||
501 | } | ||
502 | if ((eeprom->offset + eeprom->len) & 1) { | ||
503 | /* need read/modify/write of last changed EEPROM word */ | ||
504 | /* only the first byte of the word is being modified */ | ||
505 | eeprom_buff[last_word - first_word] | ||
506 | = ixgb_read_eeprom(hw, last_word); | ||
507 | } | ||
508 | |||
509 | memcpy(ptr, bytes, eeprom->len); | ||
510 | for (i = 0; i <= (last_word - first_word); i++) | ||
511 | ixgb_write_eeprom(hw, first_word + i, eeprom_buff[i]); | ||
512 | |||
513 | /* Update the checksum over the first part of the EEPROM if needed */ | ||
514 | if (first_word <= EEPROM_CHECKSUM_REG) | ||
515 | ixgb_update_eeprom_checksum(hw); | ||
516 | |||
517 | kfree(eeprom_buff); | ||
518 | return 0; | ||
519 | } | ||
520 | |||
521 | static void | ||
522 | ixgb_get_drvinfo(struct net_device *netdev, | ||
523 | struct ethtool_drvinfo *drvinfo) | ||
524 | { | ||
525 | struct ixgb_adapter *adapter = netdev_priv(netdev); | ||
526 | |||
527 | strncpy(drvinfo->driver, ixgb_driver_name, 32); | ||
528 | strncpy(drvinfo->version, ixgb_driver_version, 32); | ||
529 | strncpy(drvinfo->fw_version, "N/A", 32); | ||
530 | strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); | ||
531 | drvinfo->n_stats = IXGB_STATS_LEN; | ||
532 | drvinfo->regdump_len = ixgb_get_regs_len(netdev); | ||
533 | drvinfo->eedump_len = ixgb_get_eeprom_len(netdev); | ||
534 | } | ||
535 | |||
536 | static void | ||
537 | ixgb_get_ringparam(struct net_device *netdev, | ||
538 | struct ethtool_ringparam *ring) | ||
539 | { | ||
540 | struct ixgb_adapter *adapter = netdev_priv(netdev); | ||
541 | struct ixgb_desc_ring *txdr = &adapter->tx_ring; | ||
542 | struct ixgb_desc_ring *rxdr = &adapter->rx_ring; | ||
543 | |||
544 | ring->rx_max_pending = MAX_RXD; | ||
545 | ring->tx_max_pending = MAX_TXD; | ||
546 | ring->rx_mini_max_pending = 0; | ||
547 | ring->rx_jumbo_max_pending = 0; | ||
548 | ring->rx_pending = rxdr->count; | ||
549 | ring->tx_pending = txdr->count; | ||
550 | ring->rx_mini_pending = 0; | ||
551 | ring->rx_jumbo_pending = 0; | ||
552 | } | ||
553 | |||
554 | static int | ||
555 | ixgb_set_ringparam(struct net_device *netdev, | ||
556 | struct ethtool_ringparam *ring) | ||
557 | { | ||
558 | struct ixgb_adapter *adapter = netdev_priv(netdev); | ||
559 | struct ixgb_desc_ring *txdr = &adapter->tx_ring; | ||
560 | struct ixgb_desc_ring *rxdr = &adapter->rx_ring; | ||
561 | struct ixgb_desc_ring tx_old, tx_new, rx_old, rx_new; | ||
562 | int err; | ||
563 | |||
564 | tx_old = adapter->tx_ring; | ||
565 | rx_old = adapter->rx_ring; | ||
566 | |||
567 | if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) | ||
568 | return -EINVAL; | ||
569 | |||
570 | if (netif_running(adapter->netdev)) | ||
571 | ixgb_down(adapter, true); | ||
572 | |||
573 | rxdr->count = max(ring->rx_pending,(u32)MIN_RXD); | ||
574 | rxdr->count = min(rxdr->count,(u32)MAX_RXD); | ||
575 | rxdr->count = ALIGN(rxdr->count, IXGB_REQ_RX_DESCRIPTOR_MULTIPLE); | ||
576 | |||
577 | txdr->count = max(ring->tx_pending,(u32)MIN_TXD); | ||
578 | txdr->count = min(txdr->count,(u32)MAX_TXD); | ||
579 | txdr->count = ALIGN(txdr->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE); | ||
580 | |||
581 | if (netif_running(adapter->netdev)) { | ||
582 | /* Try to get new resources before deleting old */ | ||
583 | if ((err = ixgb_setup_rx_resources(adapter))) | ||
584 | goto err_setup_rx; | ||
585 | if ((err = ixgb_setup_tx_resources(adapter))) | ||
586 | goto err_setup_tx; | ||
587 | |||
588 | /* save the new, restore the old in order to free it, | ||
589 | * then restore the new back again */ | ||
590 | |||
591 | rx_new = adapter->rx_ring; | ||
592 | tx_new = adapter->tx_ring; | ||
593 | adapter->rx_ring = rx_old; | ||
594 | adapter->tx_ring = tx_old; | ||
595 | ixgb_free_rx_resources(adapter); | ||
596 | ixgb_free_tx_resources(adapter); | ||
597 | adapter->rx_ring = rx_new; | ||
598 | adapter->tx_ring = tx_new; | ||
599 | if ((err = ixgb_up(adapter))) | ||
600 | return err; | ||
601 | ixgb_set_speed_duplex(netdev); | ||
602 | } | ||
603 | |||
604 | return 0; | ||
605 | err_setup_tx: | ||
606 | ixgb_free_rx_resources(adapter); | ||
607 | err_setup_rx: | ||
608 | adapter->rx_ring = rx_old; | ||
609 | adapter->tx_ring = tx_old; | ||
610 | ixgb_up(adapter); | ||
611 | return err; | ||
612 | } | ||
613 | |||
614 | static int | ||
615 | ixgb_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) | ||
616 | { | ||
617 | struct ixgb_adapter *adapter = netdev_priv(netdev); | ||
618 | |||
619 | switch (state) { | ||
620 | case ETHTOOL_ID_ACTIVE: | ||
621 | return 2; | ||
622 | |||
623 | case ETHTOOL_ID_ON: | ||
624 | ixgb_led_on(&adapter->hw); | ||
625 | break; | ||
626 | |||
627 | case ETHTOOL_ID_OFF: | ||
628 | case ETHTOOL_ID_INACTIVE: | ||
629 | ixgb_led_off(&adapter->hw); | ||
630 | } | ||
631 | |||
632 | return 0; | ||
633 | } | ||
634 | |||
635 | static int | ||
636 | ixgb_get_sset_count(struct net_device *netdev, int sset) | ||
637 | { | ||
638 | switch (sset) { | ||
639 | case ETH_SS_STATS: | ||
640 | return IXGB_STATS_LEN; | ||
641 | default: | ||
642 | return -EOPNOTSUPP; | ||
643 | } | ||
644 | } | ||
645 | |||
646 | static void | ||
647 | ixgb_get_ethtool_stats(struct net_device *netdev, | ||
648 | struct ethtool_stats *stats, u64 *data) | ||
649 | { | ||
650 | struct ixgb_adapter *adapter = netdev_priv(netdev); | ||
651 | int i; | ||
652 | char *p = NULL; | ||
653 | |||
654 | ixgb_update_stats(adapter); | ||
655 | for (i = 0; i < IXGB_STATS_LEN; i++) { | ||
656 | switch (ixgb_gstrings_stats[i].type) { | ||
657 | case NETDEV_STATS: | ||
658 | p = (char *) netdev + | ||
659 | ixgb_gstrings_stats[i].stat_offset; | ||
660 | break; | ||
661 | case IXGB_STATS: | ||
662 | p = (char *) adapter + | ||
663 | ixgb_gstrings_stats[i].stat_offset; | ||
664 | break; | ||
665 | } | ||
666 | |||
667 | data[i] = (ixgb_gstrings_stats[i].sizeof_stat == | ||
668 | sizeof(u64)) ? *(u64 *)p : *(u32 *)p; | ||
669 | } | ||
670 | } | ||
671 | |||
672 | static void | ||
673 | ixgb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) | ||
674 | { | ||
675 | int i; | ||
676 | |||
677 | switch(stringset) { | ||
678 | case ETH_SS_STATS: | ||
679 | for (i = 0; i < IXGB_STATS_LEN; i++) { | ||
680 | memcpy(data + i * ETH_GSTRING_LEN, | ||
681 | ixgb_gstrings_stats[i].stat_string, | ||
682 | ETH_GSTRING_LEN); | ||
683 | } | ||
684 | break; | ||
685 | } | ||
686 | } | ||
687 | |||
688 | static int ixgb_set_flags(struct net_device *netdev, u32 data) | ||
689 | { | ||
690 | struct ixgb_adapter *adapter = netdev_priv(netdev); | ||
691 | bool need_reset; | ||
692 | int rc; | ||
693 | |||
694 | /* | ||
695 | * Tx VLAN insertion does not work per HW design when Rx stripping is | ||
696 | * disabled. Disable txvlan when rxvlan is turned off, and enable | ||
697 | * rxvlan when txvlan is turned on. | ||
698 | */ | ||
699 | if (!(data & ETH_FLAG_RXVLAN) && | ||
700 | (netdev->features & NETIF_F_HW_VLAN_TX)) | ||
701 | data &= ~ETH_FLAG_TXVLAN; | ||
702 | else if (data & ETH_FLAG_TXVLAN) | ||
703 | data |= ETH_FLAG_RXVLAN; | ||
704 | |||
705 | need_reset = (data & ETH_FLAG_RXVLAN) != | ||
706 | (netdev->features & NETIF_F_HW_VLAN_RX); | ||
707 | |||
708 | rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_RXVLAN | | ||
709 | ETH_FLAG_TXVLAN); | ||
710 | if (rc) | ||
711 | return rc; | ||
712 | |||
713 | if (need_reset) { | ||
714 | if (netif_running(netdev)) { | ||
715 | ixgb_down(adapter, true); | ||
716 | ixgb_up(adapter); | ||
717 | ixgb_set_speed_duplex(netdev); | ||
718 | } else | ||
719 | ixgb_reset(adapter); | ||
720 | } | ||
721 | |||
722 | return 0; | ||
723 | } | ||
724 | |||
725 | static const struct ethtool_ops ixgb_ethtool_ops = { | ||
726 | .get_settings = ixgb_get_settings, | ||
727 | .set_settings = ixgb_set_settings, | ||
728 | .get_drvinfo = ixgb_get_drvinfo, | ||
729 | .get_regs_len = ixgb_get_regs_len, | ||
730 | .get_regs = ixgb_get_regs, | ||
731 | .get_link = ethtool_op_get_link, | ||
732 | .get_eeprom_len = ixgb_get_eeprom_len, | ||
733 | .get_eeprom = ixgb_get_eeprom, | ||
734 | .set_eeprom = ixgb_set_eeprom, | ||
735 | .get_ringparam = ixgb_get_ringparam, | ||
736 | .set_ringparam = ixgb_set_ringparam, | ||
737 | .get_pauseparam = ixgb_get_pauseparam, | ||
738 | .set_pauseparam = ixgb_set_pauseparam, | ||
739 | .get_rx_csum = ixgb_get_rx_csum, | ||
740 | .set_rx_csum = ixgb_set_rx_csum, | ||
741 | .get_tx_csum = ixgb_get_tx_csum, | ||
742 | .set_tx_csum = ixgb_set_tx_csum, | ||
743 | .set_sg = ethtool_op_set_sg, | ||
744 | .get_msglevel = ixgb_get_msglevel, | ||
745 | .set_msglevel = ixgb_set_msglevel, | ||
746 | .set_tso = ixgb_set_tso, | ||
747 | .get_strings = ixgb_get_strings, | ||
748 | .set_phys_id = ixgb_set_phys_id, | ||
749 | .get_sset_count = ixgb_get_sset_count, | ||
750 | .get_ethtool_stats = ixgb_get_ethtool_stats, | ||
751 | .get_flags = ethtool_op_get_flags, | ||
752 | .set_flags = ixgb_set_flags, | ||
753 | }; | ||
754 | |||
755 | void ixgb_set_ethtool_ops(struct net_device *netdev) | ||
756 | { | ||
757 | SET_ETHTOOL_OPS(netdev, &ixgb_ethtool_ops); | ||
758 | } | ||
diff --git a/drivers/net/ixgb/ixgb_hw.c b/drivers/net/ixgb/ixgb_hw.c new file mode 100644 index 00000000000..3d61a9e4faf --- /dev/null +++ b/drivers/net/ixgb/ixgb_hw.c | |||
@@ -0,0 +1,1262 @@ | |||
1 | /******************************************************************************* | ||
2 | |||
3 | Intel PRO/10GbE Linux driver | ||
4 | Copyright(c) 1999 - 2008 Intel Corporation. | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Contact Information: | ||
23 | Linux NICS <linux.nics@intel.com> | ||
24 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
25 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
26 | |||
27 | *******************************************************************************/ | ||
28 | |||
29 | /* ixgb_hw.c | ||
30 | * Shared functions for accessing and configuring the adapter | ||
31 | */ | ||
32 | |||
33 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
34 | |||
35 | #include "ixgb_hw.h" | ||
36 | #include "ixgb_ids.h" | ||
37 | |||
38 | #include <linux/etherdevice.h> | ||
39 | |||
40 | /* Local function prototypes */ | ||
41 | |||
42 | static u32 ixgb_hash_mc_addr(struct ixgb_hw *hw, u8 * mc_addr); | ||
43 | |||
44 | static void ixgb_mta_set(struct ixgb_hw *hw, u32 hash_value); | ||
45 | |||
46 | static void ixgb_get_bus_info(struct ixgb_hw *hw); | ||
47 | |||
48 | static bool ixgb_link_reset(struct ixgb_hw *hw); | ||
49 | |||
50 | static void ixgb_optics_reset(struct ixgb_hw *hw); | ||
51 | |||
52 | static void ixgb_optics_reset_bcm(struct ixgb_hw *hw); | ||
53 | |||
54 | static ixgb_phy_type ixgb_identify_phy(struct ixgb_hw *hw); | ||
55 | |||
56 | static void ixgb_clear_hw_cntrs(struct ixgb_hw *hw); | ||
57 | |||
58 | static void ixgb_clear_vfta(struct ixgb_hw *hw); | ||
59 | |||
60 | static void ixgb_init_rx_addrs(struct ixgb_hw *hw); | ||
61 | |||
62 | static u16 ixgb_read_phy_reg(struct ixgb_hw *hw, | ||
63 | u32 reg_address, | ||
64 | u32 phy_address, | ||
65 | u32 device_type); | ||
66 | |||
67 | static bool ixgb_setup_fc(struct ixgb_hw *hw); | ||
68 | |||
69 | static bool mac_addr_valid(u8 *mac_addr); | ||
70 | |||
71 | static u32 ixgb_mac_reset(struct ixgb_hw *hw) | ||
72 | { | ||
73 | u32 ctrl_reg; | ||
74 | |||
75 | ctrl_reg = IXGB_CTRL0_RST | | ||
76 | IXGB_CTRL0_SDP3_DIR | /* All pins are Output=1 */ | ||
77 | IXGB_CTRL0_SDP2_DIR | | ||
78 | IXGB_CTRL0_SDP1_DIR | | ||
79 | IXGB_CTRL0_SDP0_DIR | | ||
80 | IXGB_CTRL0_SDP3 | /* Initial value 1101 */ | ||
81 | IXGB_CTRL0_SDP2 | | ||
82 | IXGB_CTRL0_SDP0; | ||
83 | |||
84 | #ifdef HP_ZX1 | ||
85 | /* Workaround for 82597EX reset errata */ | ||
86 | IXGB_WRITE_REG_IO(hw, CTRL0, ctrl_reg); | ||
87 | #else | ||
88 | IXGB_WRITE_REG(hw, CTRL0, ctrl_reg); | ||
89 | #endif | ||
90 | |||
91 | /* Delay a few ms just to allow the reset to complete */ | ||
92 | msleep(IXGB_DELAY_AFTER_RESET); | ||
93 | ctrl_reg = IXGB_READ_REG(hw, CTRL0); | ||
94 | #ifdef DBG | ||
95 | /* Make sure the self-clearing global reset bit did self clear */ | ||
96 | ASSERT(!(ctrl_reg & IXGB_CTRL0_RST)); | ||
97 | #endif | ||
98 | |||
99 | if (hw->subsystem_vendor_id == SUN_SUBVENDOR_ID) { | ||
100 | ctrl_reg = /* Enable interrupt from XFP and SerDes */ | ||
101 | IXGB_CTRL1_GPI0_EN | | ||
102 | IXGB_CTRL1_SDP6_DIR | | ||
103 | IXGB_CTRL1_SDP7_DIR | | ||
104 | IXGB_CTRL1_SDP6 | | ||
105 | IXGB_CTRL1_SDP7; | ||
106 | IXGB_WRITE_REG(hw, CTRL1, ctrl_reg); | ||
107 | ixgb_optics_reset_bcm(hw); | ||
108 | } | ||
109 | |||
110 | if (hw->phy_type == ixgb_phy_type_txn17401) | ||
111 | ixgb_optics_reset(hw); | ||
112 | |||
113 | return ctrl_reg; | ||
114 | } | ||
115 | |||
116 | /****************************************************************************** | ||
117 | * Reset the transmit and receive units; mask and clear all interrupts. | ||
118 | * | ||
119 | * hw - Struct containing variables accessed by shared code | ||
120 | *****************************************************************************/ | ||
121 | bool | ||
122 | ixgb_adapter_stop(struct ixgb_hw *hw) | ||
123 | { | ||
124 | u32 ctrl_reg; | ||
125 | u32 icr_reg; | ||
126 | |||
127 | ENTER(); | ||
128 | |||
129 | /* If we are stopped or resetting exit gracefully and wait to be | ||
130 | * started again before accessing the hardware. | ||
131 | */ | ||
132 | if (hw->adapter_stopped) { | ||
133 | pr_debug("Exiting because the adapter is already stopped!!!\n"); | ||
134 | return false; | ||
135 | } | ||
136 | |||
137 | /* Set the Adapter Stopped flag so other driver functions stop | ||
138 | * touching the Hardware. | ||
139 | */ | ||
140 | hw->adapter_stopped = true; | ||
141 | |||
142 | /* Clear interrupt mask to stop board from generating interrupts */ | ||
143 | pr_debug("Masking off all interrupts\n"); | ||
144 | IXGB_WRITE_REG(hw, IMC, 0xFFFFFFFF); | ||
145 | |||
146 | /* Disable the Transmit and Receive units. Then delay to allow | ||
147 | * any pending transactions to complete before we hit the MAC with | ||
148 | * the global reset. | ||
149 | */ | ||
150 | IXGB_WRITE_REG(hw, RCTL, IXGB_READ_REG(hw, RCTL) & ~IXGB_RCTL_RXEN); | ||
151 | IXGB_WRITE_REG(hw, TCTL, IXGB_READ_REG(hw, TCTL) & ~IXGB_TCTL_TXEN); | ||
152 | IXGB_WRITE_FLUSH(hw); | ||
153 | msleep(IXGB_DELAY_BEFORE_RESET); | ||
154 | |||
155 | /* Issue a global reset to the MAC. This will reset the chip's | ||
156 | * transmit, receive, DMA, and link units. It will not effect | ||
157 | * the current PCI configuration. The global reset bit is self- | ||
158 | * clearing, and should clear within a microsecond. | ||
159 | */ | ||
160 | pr_debug("Issuing a global reset to MAC\n"); | ||
161 | |||
162 | ctrl_reg = ixgb_mac_reset(hw); | ||
163 | |||
164 | /* Clear interrupt mask to stop board from generating interrupts */ | ||
165 | pr_debug("Masking off all interrupts\n"); | ||
166 | IXGB_WRITE_REG(hw, IMC, 0xffffffff); | ||
167 | |||
168 | /* Clear any pending interrupt events. */ | ||
169 | icr_reg = IXGB_READ_REG(hw, ICR); | ||
170 | |||
171 | return ctrl_reg & IXGB_CTRL0_RST; | ||
172 | } | ||
173 | |||
174 | |||
175 | /****************************************************************************** | ||
176 | * Identifies the vendor of the optics module on the adapter. The SR adapters | ||
177 | * support two different types of XPAK optics, so it is necessary to determine | ||
178 | * which optics are present before applying any optics-specific workarounds. | ||
179 | * | ||
180 | * hw - Struct containing variables accessed by shared code. | ||
181 | * | ||
182 | * Returns: the vendor of the XPAK optics module. | ||
183 | *****************************************************************************/ | ||
184 | static ixgb_xpak_vendor | ||
185 | ixgb_identify_xpak_vendor(struct ixgb_hw *hw) | ||
186 | { | ||
187 | u32 i; | ||
188 | u16 vendor_name[5]; | ||
189 | ixgb_xpak_vendor xpak_vendor; | ||
190 | |||
191 | ENTER(); | ||
192 | |||
193 | /* Read the first few bytes of the vendor string from the XPAK NVR | ||
194 | * registers. These are standard XENPAK/XPAK registers, so all XPAK | ||
195 | * devices should implement them. */ | ||
196 | for (i = 0; i < 5; i++) { | ||
197 | vendor_name[i] = ixgb_read_phy_reg(hw, | ||
198 | MDIO_PMA_PMD_XPAK_VENDOR_NAME | ||
199 | + i, IXGB_PHY_ADDRESS, | ||
200 | MDIO_MMD_PMAPMD); | ||
201 | } | ||
202 | |||
203 | /* Determine the actual vendor */ | ||
204 | if (vendor_name[0] == 'I' && | ||
205 | vendor_name[1] == 'N' && | ||
206 | vendor_name[2] == 'T' && | ||
207 | vendor_name[3] == 'E' && vendor_name[4] == 'L') { | ||
208 | xpak_vendor = ixgb_xpak_vendor_intel; | ||
209 | } else { | ||
210 | xpak_vendor = ixgb_xpak_vendor_infineon; | ||
211 | } | ||
212 | |||
213 | return xpak_vendor; | ||
214 | } | ||
215 | |||
216 | /****************************************************************************** | ||
217 | * Determine the physical layer module on the adapter. | ||
218 | * | ||
219 | * hw - Struct containing variables accessed by shared code. The device_id | ||
220 | * field must be (correctly) populated before calling this routine. | ||
221 | * | ||
222 | * Returns: the phy type of the adapter. | ||
223 | *****************************************************************************/ | ||
224 | static ixgb_phy_type | ||
225 | ixgb_identify_phy(struct ixgb_hw *hw) | ||
226 | { | ||
227 | ixgb_phy_type phy_type; | ||
228 | ixgb_xpak_vendor xpak_vendor; | ||
229 | |||
230 | ENTER(); | ||
231 | |||
232 | /* Infer the transceiver/phy type from the device id */ | ||
233 | switch (hw->device_id) { | ||
234 | case IXGB_DEVICE_ID_82597EX: | ||
235 | pr_debug("Identified TXN17401 optics\n"); | ||
236 | phy_type = ixgb_phy_type_txn17401; | ||
237 | break; | ||
238 | |||
239 | case IXGB_DEVICE_ID_82597EX_SR: | ||
240 | /* The SR adapters carry two different types of XPAK optics | ||
241 | * modules; read the vendor identifier to determine the exact | ||
242 | * type of optics. */ | ||
243 | xpak_vendor = ixgb_identify_xpak_vendor(hw); | ||
244 | if (xpak_vendor == ixgb_xpak_vendor_intel) { | ||
245 | pr_debug("Identified TXN17201 optics\n"); | ||
246 | phy_type = ixgb_phy_type_txn17201; | ||
247 | } else { | ||
248 | pr_debug("Identified G6005 optics\n"); | ||
249 | phy_type = ixgb_phy_type_g6005; | ||
250 | } | ||
251 | break; | ||
252 | case IXGB_DEVICE_ID_82597EX_LR: | ||
253 | pr_debug("Identified G6104 optics\n"); | ||
254 | phy_type = ixgb_phy_type_g6104; | ||
255 | break; | ||
256 | case IXGB_DEVICE_ID_82597EX_CX4: | ||
257 | pr_debug("Identified CX4\n"); | ||
258 | xpak_vendor = ixgb_identify_xpak_vendor(hw); | ||
259 | if (xpak_vendor == ixgb_xpak_vendor_intel) { | ||
260 | pr_debug("Identified TXN17201 optics\n"); | ||
261 | phy_type = ixgb_phy_type_txn17201; | ||
262 | } else { | ||
263 | pr_debug("Identified G6005 optics\n"); | ||
264 | phy_type = ixgb_phy_type_g6005; | ||
265 | } | ||
266 | break; | ||
267 | default: | ||
268 | pr_debug("Unknown physical layer module\n"); | ||
269 | phy_type = ixgb_phy_type_unknown; | ||
270 | break; | ||
271 | } | ||
272 | |||
273 | /* update phy type for sun specific board */ | ||
274 | if (hw->subsystem_vendor_id == SUN_SUBVENDOR_ID) | ||
275 | phy_type = ixgb_phy_type_bcm; | ||
276 | |||
277 | return phy_type; | ||
278 | } | ||
279 | |||
280 | /****************************************************************************** | ||
281 | * Performs basic configuration of the adapter. | ||
282 | * | ||
283 | * hw - Struct containing variables accessed by shared code | ||
284 | * | ||
285 | * Resets the controller. | ||
286 | * Reads and validates the EEPROM. | ||
287 | * Initializes the receive address registers. | ||
288 | * Initializes the multicast table. | ||
289 | * Clears all on-chip counters. | ||
290 | * Calls routine to setup flow control settings. | ||
291 | * Leaves the transmit and receive units disabled and uninitialized. | ||
292 | * | ||
293 | * Returns: | ||
294 | * true if successful, | ||
295 | * false if unrecoverable problems were encountered. | ||
296 | *****************************************************************************/ | ||
297 | bool | ||
298 | ixgb_init_hw(struct ixgb_hw *hw) | ||
299 | { | ||
300 | u32 i; | ||
301 | u32 ctrl_reg; | ||
302 | bool status; | ||
303 | |||
304 | ENTER(); | ||
305 | |||
306 | /* Issue a global reset to the MAC. This will reset the chip's | ||
307 | * transmit, receive, DMA, and link units. It will not effect | ||
308 | * the current PCI configuration. The global reset bit is self- | ||
309 | * clearing, and should clear within a microsecond. | ||
310 | */ | ||
311 | pr_debug("Issuing a global reset to MAC\n"); | ||
312 | |||
313 | ctrl_reg = ixgb_mac_reset(hw); | ||
314 | |||
315 | pr_debug("Issuing an EE reset to MAC\n"); | ||
316 | #ifdef HP_ZX1 | ||
317 | /* Workaround for 82597EX reset errata */ | ||
318 | IXGB_WRITE_REG_IO(hw, CTRL1, IXGB_CTRL1_EE_RST); | ||
319 | #else | ||
320 | IXGB_WRITE_REG(hw, CTRL1, IXGB_CTRL1_EE_RST); | ||
321 | #endif | ||
322 | |||
323 | /* Delay a few ms just to allow the reset to complete */ | ||
324 | msleep(IXGB_DELAY_AFTER_EE_RESET); | ||
325 | |||
326 | if (!ixgb_get_eeprom_data(hw)) | ||
327 | return false; | ||
328 | |||
329 | /* Use the device id to determine the type of phy/transceiver. */ | ||
330 | hw->device_id = ixgb_get_ee_device_id(hw); | ||
331 | hw->phy_type = ixgb_identify_phy(hw); | ||
332 | |||
333 | /* Setup the receive addresses. | ||
334 | * Receive Address Registers (RARs 0 - 15). | ||
335 | */ | ||
336 | ixgb_init_rx_addrs(hw); | ||
337 | |||
338 | /* | ||
339 | * Check that a valid MAC address has been set. | ||
340 | * If it is not valid, we fail hardware init. | ||
341 | */ | ||
342 | if (!mac_addr_valid(hw->curr_mac_addr)) { | ||
343 | pr_debug("MAC address invalid after ixgb_init_rx_addrs\n"); | ||
344 | return(false); | ||
345 | } | ||
346 | |||
347 | /* tell the routines in this file they can access hardware again */ | ||
348 | hw->adapter_stopped = false; | ||
349 | |||
350 | /* Fill in the bus_info structure */ | ||
351 | ixgb_get_bus_info(hw); | ||
352 | |||
353 | /* Zero out the Multicast HASH table */ | ||
354 | pr_debug("Zeroing the MTA\n"); | ||
355 | for (i = 0; i < IXGB_MC_TBL_SIZE; i++) | ||
356 | IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0); | ||
357 | |||
358 | /* Zero out the VLAN Filter Table Array */ | ||
359 | ixgb_clear_vfta(hw); | ||
360 | |||
361 | /* Zero all of the hardware counters */ | ||
362 | ixgb_clear_hw_cntrs(hw); | ||
363 | |||
364 | /* Call a subroutine to setup flow control. */ | ||
365 | status = ixgb_setup_fc(hw); | ||
366 | |||
367 | /* 82597EX errata: Call check-for-link in case lane deskew is locked */ | ||
368 | ixgb_check_for_link(hw); | ||
369 | |||
370 | return status; | ||
371 | } | ||
372 | |||
373 | /****************************************************************************** | ||
374 | * Initializes receive address filters. | ||
375 | * | ||
376 | * hw - Struct containing variables accessed by shared code | ||
377 | * | ||
378 | * Places the MAC address in receive address register 0 and clears the rest | ||
379 | * of the receive address registers. Clears the multicast table. Assumes | ||
380 | * the receiver is in reset when the routine is called. | ||
381 | *****************************************************************************/ | ||
382 | static void | ||
383 | ixgb_init_rx_addrs(struct ixgb_hw *hw) | ||
384 | { | ||
385 | u32 i; | ||
386 | |||
387 | ENTER(); | ||
388 | |||
389 | /* | ||
390 | * If the current mac address is valid, assume it is a software override | ||
391 | * to the permanent address. | ||
392 | * Otherwise, use the permanent address from the eeprom. | ||
393 | */ | ||
394 | if (!mac_addr_valid(hw->curr_mac_addr)) { | ||
395 | |||
396 | /* Get the MAC address from the eeprom for later reference */ | ||
397 | ixgb_get_ee_mac_addr(hw, hw->curr_mac_addr); | ||
398 | |||
399 | pr_debug("Keeping Permanent MAC Addr = %pM\n", | ||
400 | hw->curr_mac_addr); | ||
401 | } else { | ||
402 | |||
403 | /* Setup the receive address. */ | ||
404 | pr_debug("Overriding MAC Address in RAR[0]\n"); | ||
405 | pr_debug("New MAC Addr = %pM\n", hw->curr_mac_addr); | ||
406 | |||
407 | ixgb_rar_set(hw, hw->curr_mac_addr, 0); | ||
408 | } | ||
409 | |||
410 | /* Zero out the other 15 receive addresses. */ | ||
411 | pr_debug("Clearing RAR[1-15]\n"); | ||
412 | for (i = 1; i < IXGB_RAR_ENTRIES; i++) { | ||
413 | /* Write high reg first to disable the AV bit first */ | ||
414 | IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); | ||
415 | IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); | ||
416 | } | ||
417 | } | ||
418 | |||
419 | /****************************************************************************** | ||
420 | * Updates the MAC's list of multicast addresses. | ||
421 | * | ||
422 | * hw - Struct containing variables accessed by shared code | ||
423 | * mc_addr_list - the list of new multicast addresses | ||
424 | * mc_addr_count - number of addresses | ||
425 | * pad - number of bytes between addresses in the list | ||
426 | * | ||
427 | * The given list replaces any existing list. Clears the last 15 receive | ||
428 | * address registers and the multicast table. Uses receive address registers | ||
429 | * for the first 15 multicast addresses, and hashes the rest into the | ||
430 | * multicast table. | ||
431 | *****************************************************************************/ | ||
432 | void | ||
433 | ixgb_mc_addr_list_update(struct ixgb_hw *hw, | ||
434 | u8 *mc_addr_list, | ||
435 | u32 mc_addr_count, | ||
436 | u32 pad) | ||
437 | { | ||
438 | u32 hash_value; | ||
439 | u32 i; | ||
440 | u32 rar_used_count = 1; /* RAR[0] is used for our MAC address */ | ||
441 | u8 *mca; | ||
442 | |||
443 | ENTER(); | ||
444 | |||
445 | /* Set the new number of MC addresses that we are being requested to use. */ | ||
446 | hw->num_mc_addrs = mc_addr_count; | ||
447 | |||
448 | /* Clear RAR[1-15] */ | ||
449 | pr_debug("Clearing RAR[1-15]\n"); | ||
450 | for (i = rar_used_count; i < IXGB_RAR_ENTRIES; i++) { | ||
451 | IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); | ||
452 | IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); | ||
453 | } | ||
454 | |||
455 | /* Clear the MTA */ | ||
456 | pr_debug("Clearing MTA\n"); | ||
457 | for (i = 0; i < IXGB_MC_TBL_SIZE; i++) | ||
458 | IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0); | ||
459 | |||
460 | /* Add the new addresses */ | ||
461 | mca = mc_addr_list; | ||
462 | for (i = 0; i < mc_addr_count; i++) { | ||
463 | pr_debug("Adding the multicast addresses:\n"); | ||
464 | pr_debug("MC Addr #%d = %pM\n", i, mca); | ||
465 | |||
466 | /* Place this multicast address in the RAR if there is room, * | ||
467 | * else put it in the MTA | ||
468 | */ | ||
469 | if (rar_used_count < IXGB_RAR_ENTRIES) { | ||
470 | ixgb_rar_set(hw, mca, rar_used_count); | ||
471 | pr_debug("Added a multicast address to RAR[%d]\n", i); | ||
472 | rar_used_count++; | ||
473 | } else { | ||
474 | hash_value = ixgb_hash_mc_addr(hw, mca); | ||
475 | |||
476 | pr_debug("Hash value = 0x%03X\n", hash_value); | ||
477 | |||
478 | ixgb_mta_set(hw, hash_value); | ||
479 | } | ||
480 | |||
481 | mca += IXGB_ETH_LENGTH_OF_ADDRESS + pad; | ||
482 | } | ||
483 | |||
484 | pr_debug("MC Update Complete\n"); | ||
485 | } | ||
486 | |||
487 | /****************************************************************************** | ||
488 | * Hashes an address to determine its location in the multicast table | ||
489 | * | ||
490 | * hw - Struct containing variables accessed by shared code | ||
491 | * mc_addr - the multicast address to hash | ||
492 | * | ||
493 | * Returns: | ||
494 | * The hash value | ||
495 | *****************************************************************************/ | ||
496 | static u32 | ||
497 | ixgb_hash_mc_addr(struct ixgb_hw *hw, | ||
498 | u8 *mc_addr) | ||
499 | { | ||
500 | u32 hash_value = 0; | ||
501 | |||
502 | ENTER(); | ||
503 | |||
504 | /* The portion of the address that is used for the hash table is | ||
505 | * determined by the mc_filter_type setting. | ||
506 | */ | ||
507 | switch (hw->mc_filter_type) { | ||
508 | /* [0] [1] [2] [3] [4] [5] | ||
509 | * 01 AA 00 12 34 56 | ||
510 | * LSB MSB - According to H/W docs */ | ||
511 | case 0: | ||
512 | /* [47:36] i.e. 0x563 for above example address */ | ||
513 | hash_value = | ||
514 | ((mc_addr[4] >> 4) | (((u16) mc_addr[5]) << 4)); | ||
515 | break; | ||
516 | case 1: /* [46:35] i.e. 0xAC6 for above example address */ | ||
517 | hash_value = | ||
518 | ((mc_addr[4] >> 3) | (((u16) mc_addr[5]) << 5)); | ||
519 | break; | ||
520 | case 2: /* [45:34] i.e. 0x5D8 for above example address */ | ||
521 | hash_value = | ||
522 | ((mc_addr[4] >> 2) | (((u16) mc_addr[5]) << 6)); | ||
523 | break; | ||
524 | case 3: /* [43:32] i.e. 0x634 for above example address */ | ||
525 | hash_value = ((mc_addr[4]) | (((u16) mc_addr[5]) << 8)); | ||
526 | break; | ||
527 | default: | ||
528 | /* Invalid mc_filter_type, what should we do? */ | ||
529 | pr_debug("MC filter type param set incorrectly\n"); | ||
530 | ASSERT(0); | ||
531 | break; | ||
532 | } | ||
533 | |||
534 | hash_value &= 0xFFF; | ||
535 | return hash_value; | ||
536 | } | ||
537 | |||
538 | /****************************************************************************** | ||
539 | * Sets the bit in the multicast table corresponding to the hash value. | ||
540 | * | ||
541 | * hw - Struct containing variables accessed by shared code | ||
542 | * hash_value - Multicast address hash value | ||
543 | *****************************************************************************/ | ||
544 | static void | ||
545 | ixgb_mta_set(struct ixgb_hw *hw, | ||
546 | u32 hash_value) | ||
547 | { | ||
548 | u32 hash_bit, hash_reg; | ||
549 | u32 mta_reg; | ||
550 | |||
551 | /* The MTA is a register array of 128 32-bit registers. | ||
552 | * It is treated like an array of 4096 bits. We want to set | ||
553 | * bit BitArray[hash_value]. So we figure out what register | ||
554 | * the bit is in, read it, OR in the new bit, then write | ||
555 | * back the new value. The register is determined by the | ||
556 | * upper 7 bits of the hash value and the bit within that | ||
557 | * register are determined by the lower 5 bits of the value. | ||
558 | */ | ||
559 | hash_reg = (hash_value >> 5) & 0x7F; | ||
560 | hash_bit = hash_value & 0x1F; | ||
561 | |||
562 | mta_reg = IXGB_READ_REG_ARRAY(hw, MTA, hash_reg); | ||
563 | |||
564 | mta_reg |= (1 << hash_bit); | ||
565 | |||
566 | IXGB_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta_reg); | ||
567 | } | ||
568 | |||
569 | /****************************************************************************** | ||
570 | * Puts an ethernet address into a receive address register. | ||
571 | * | ||
572 | * hw - Struct containing variables accessed by shared code | ||
573 | * addr - Address to put into receive address register | ||
574 | * index - Receive address register to write | ||
575 | *****************************************************************************/ | ||
576 | void | ||
577 | ixgb_rar_set(struct ixgb_hw *hw, | ||
578 | u8 *addr, | ||
579 | u32 index) | ||
580 | { | ||
581 | u32 rar_low, rar_high; | ||
582 | |||
583 | ENTER(); | ||
584 | |||
585 | /* HW expects these in little endian so we reverse the byte order | ||
586 | * from network order (big endian) to little endian | ||
587 | */ | ||
588 | rar_low = ((u32) addr[0] | | ||
589 | ((u32)addr[1] << 8) | | ||
590 | ((u32)addr[2] << 16) | | ||
591 | ((u32)addr[3] << 24)); | ||
592 | |||
593 | rar_high = ((u32) addr[4] | | ||
594 | ((u32)addr[5] << 8) | | ||
595 | IXGB_RAH_AV); | ||
596 | |||
597 | IXGB_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low); | ||
598 | IXGB_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high); | ||
599 | } | ||
600 | |||
601 | /****************************************************************************** | ||
602 | * Writes a value to the specified offset in the VLAN filter table. | ||
603 | * | ||
604 | * hw - Struct containing variables accessed by shared code | ||
605 | * offset - Offset in VLAN filer table to write | ||
606 | * value - Value to write into VLAN filter table | ||
607 | *****************************************************************************/ | ||
608 | void | ||
609 | ixgb_write_vfta(struct ixgb_hw *hw, | ||
610 | u32 offset, | ||
611 | u32 value) | ||
612 | { | ||
613 | IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, value); | ||
614 | } | ||
615 | |||
616 | /****************************************************************************** | ||
617 | * Clears the VLAN filer table | ||
618 | * | ||
619 | * hw - Struct containing variables accessed by shared code | ||
620 | *****************************************************************************/ | ||
621 | static void | ||
622 | ixgb_clear_vfta(struct ixgb_hw *hw) | ||
623 | { | ||
624 | u32 offset; | ||
625 | |||
626 | for (offset = 0; offset < IXGB_VLAN_FILTER_TBL_SIZE; offset++) | ||
627 | IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, 0); | ||
628 | } | ||
629 | |||
630 | /****************************************************************************** | ||
631 | * Configures the flow control settings based on SW configuration. | ||
632 | * | ||
633 | * hw - Struct containing variables accessed by shared code | ||
634 | *****************************************************************************/ | ||
635 | |||
636 | static bool | ||
637 | ixgb_setup_fc(struct ixgb_hw *hw) | ||
638 | { | ||
639 | u32 ctrl_reg; | ||
640 | u32 pap_reg = 0; /* by default, assume no pause time */ | ||
641 | bool status = true; | ||
642 | |||
643 | ENTER(); | ||
644 | |||
645 | /* Get the current control reg 0 settings */ | ||
646 | ctrl_reg = IXGB_READ_REG(hw, CTRL0); | ||
647 | |||
648 | /* Clear the Receive Pause Enable and Transmit Pause Enable bits */ | ||
649 | ctrl_reg &= ~(IXGB_CTRL0_RPE | IXGB_CTRL0_TPE); | ||
650 | |||
651 | /* The possible values of the "flow_control" parameter are: | ||
652 | * 0: Flow control is completely disabled | ||
653 | * 1: Rx flow control is enabled (we can receive pause frames | ||
654 | * but not send pause frames). | ||
655 | * 2: Tx flow control is enabled (we can send pause frames | ||
656 | * but we do not support receiving pause frames). | ||
657 | * 3: Both Rx and TX flow control (symmetric) are enabled. | ||
658 | * other: Invalid. | ||
659 | */ | ||
660 | switch (hw->fc.type) { | ||
661 | case ixgb_fc_none: /* 0 */ | ||
662 | /* Set CMDC bit to disable Rx Flow control */ | ||
663 | ctrl_reg |= (IXGB_CTRL0_CMDC); | ||
664 | break; | ||
665 | case ixgb_fc_rx_pause: /* 1 */ | ||
666 | /* RX Flow control is enabled, and TX Flow control is | ||
667 | * disabled. | ||
668 | */ | ||
669 | ctrl_reg |= (IXGB_CTRL0_RPE); | ||
670 | break; | ||
671 | case ixgb_fc_tx_pause: /* 2 */ | ||
672 | /* TX Flow control is enabled, and RX Flow control is | ||
673 | * disabled, by a software over-ride. | ||
674 | */ | ||
675 | ctrl_reg |= (IXGB_CTRL0_TPE); | ||
676 | pap_reg = hw->fc.pause_time; | ||
677 | break; | ||
678 | case ixgb_fc_full: /* 3 */ | ||
679 | /* Flow control (both RX and TX) is enabled by a software | ||
680 | * over-ride. | ||
681 | */ | ||
682 | ctrl_reg |= (IXGB_CTRL0_RPE | IXGB_CTRL0_TPE); | ||
683 | pap_reg = hw->fc.pause_time; | ||
684 | break; | ||
685 | default: | ||
686 | /* We should never get here. The value should be 0-3. */ | ||
687 | pr_debug("Flow control param set incorrectly\n"); | ||
688 | ASSERT(0); | ||
689 | break; | ||
690 | } | ||
691 | |||
692 | /* Write the new settings */ | ||
693 | IXGB_WRITE_REG(hw, CTRL0, ctrl_reg); | ||
694 | |||
695 | if (pap_reg != 0) | ||
696 | IXGB_WRITE_REG(hw, PAP, pap_reg); | ||
697 | |||
698 | /* Set the flow control receive threshold registers. Normally, | ||
699 | * these registers will be set to a default threshold that may be | ||
700 | * adjusted later by the driver's runtime code. However, if the | ||
701 | * ability to transmit pause frames in not enabled, then these | ||
702 | * registers will be set to 0. | ||
703 | */ | ||
704 | if (!(hw->fc.type & ixgb_fc_tx_pause)) { | ||
705 | IXGB_WRITE_REG(hw, FCRTL, 0); | ||
706 | IXGB_WRITE_REG(hw, FCRTH, 0); | ||
707 | } else { | ||
708 | /* We need to set up the Receive Threshold high and low water | ||
709 | * marks as well as (optionally) enabling the transmission of XON | ||
710 | * frames. */ | ||
711 | if (hw->fc.send_xon) { | ||
712 | IXGB_WRITE_REG(hw, FCRTL, | ||
713 | (hw->fc.low_water | IXGB_FCRTL_XONE)); | ||
714 | } else { | ||
715 | IXGB_WRITE_REG(hw, FCRTL, hw->fc.low_water); | ||
716 | } | ||
717 | IXGB_WRITE_REG(hw, FCRTH, hw->fc.high_water); | ||
718 | } | ||
719 | return status; | ||
720 | } | ||
721 | |||
722 | /****************************************************************************** | ||
723 | * Reads a word from a device over the Management Data Interface (MDI) bus. | ||
724 | * This interface is used to manage Physical layer devices. | ||
725 | * | ||
726 | * hw - Struct containing variables accessed by hw code | ||
727 | * reg_address - Offset of device register being read. | ||
728 | * phy_address - Address of device on MDI. | ||
729 | * | ||
730 | * Returns: Data word (16 bits) from MDI device. | ||
731 | * | ||
732 | * The 82597EX has support for several MDI access methods. This routine | ||
733 | * uses the new protocol MDI Single Command and Address Operation. | ||
734 | * This requires that first an address cycle command is sent, followed by a | ||
735 | * read command. | ||
736 | *****************************************************************************/ | ||
737 | static u16 | ||
738 | ixgb_read_phy_reg(struct ixgb_hw *hw, | ||
739 | u32 reg_address, | ||
740 | u32 phy_address, | ||
741 | u32 device_type) | ||
742 | { | ||
743 | u32 i; | ||
744 | u32 data; | ||
745 | u32 command = 0; | ||
746 | |||
747 | ASSERT(reg_address <= IXGB_MAX_PHY_REG_ADDRESS); | ||
748 | ASSERT(phy_address <= IXGB_MAX_PHY_ADDRESS); | ||
749 | ASSERT(device_type <= IXGB_MAX_PHY_DEV_TYPE); | ||
750 | |||
751 | /* Setup and write the address cycle command */ | ||
752 | command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) | | ||
753 | (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) | | ||
754 | (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) | | ||
755 | (IXGB_MSCA_ADDR_CYCLE | IXGB_MSCA_MDI_COMMAND)); | ||
756 | |||
757 | IXGB_WRITE_REG(hw, MSCA, command); | ||
758 | |||
759 | /************************************************************** | ||
760 | ** Check every 10 usec to see if the address cycle completed | ||
761 | ** The COMMAND bit will clear when the operation is complete. | ||
762 | ** This may take as long as 64 usecs (we'll wait 100 usecs max) | ||
763 | ** from the CPU Write to the Ready bit assertion. | ||
764 | **************************************************************/ | ||
765 | |||
766 | for (i = 0; i < 10; i++) | ||
767 | { | ||
768 | udelay(10); | ||
769 | |||
770 | command = IXGB_READ_REG(hw, MSCA); | ||
771 | |||
772 | if ((command & IXGB_MSCA_MDI_COMMAND) == 0) | ||
773 | break; | ||
774 | } | ||
775 | |||
776 | ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0); | ||
777 | |||
778 | /* Address cycle complete, setup and write the read command */ | ||
779 | command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) | | ||
780 | (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) | | ||
781 | (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) | | ||
782 | (IXGB_MSCA_READ | IXGB_MSCA_MDI_COMMAND)); | ||
783 | |||
784 | IXGB_WRITE_REG(hw, MSCA, command); | ||
785 | |||
786 | /************************************************************** | ||
787 | ** Check every 10 usec to see if the read command completed | ||
788 | ** The COMMAND bit will clear when the operation is complete. | ||
789 | ** The read may take as long as 64 usecs (we'll wait 100 usecs max) | ||
790 | ** from the CPU Write to the Ready bit assertion. | ||
791 | **************************************************************/ | ||
792 | |||
793 | for (i = 0; i < 10; i++) | ||
794 | { | ||
795 | udelay(10); | ||
796 | |||
797 | command = IXGB_READ_REG(hw, MSCA); | ||
798 | |||
799 | if ((command & IXGB_MSCA_MDI_COMMAND) == 0) | ||
800 | break; | ||
801 | } | ||
802 | |||
803 | ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0); | ||
804 | |||
805 | /* Operation is complete, get the data from the MDIO Read/Write Data | ||
806 | * register and return. | ||
807 | */ | ||
808 | data = IXGB_READ_REG(hw, MSRWD); | ||
809 | data >>= IXGB_MSRWD_READ_DATA_SHIFT; | ||
810 | return((u16) data); | ||
811 | } | ||
812 | |||
813 | /****************************************************************************** | ||
814 | * Writes a word to a device over the Management Data Interface (MDI) bus. | ||
815 | * This interface is used to manage Physical layer devices. | ||
816 | * | ||
817 | * hw - Struct containing variables accessed by hw code | ||
818 | * reg_address - Offset of device register being read. | ||
819 | * phy_address - Address of device on MDI. | ||
820 | * device_type - Also known as the Device ID or DID. | ||
821 | * data - 16-bit value to be written | ||
822 | * | ||
823 | * Returns: void. | ||
824 | * | ||
825 | * The 82597EX has support for several MDI access methods. This routine | ||
826 | * uses the new protocol MDI Single Command and Address Operation. | ||
827 | * This requires that first an address cycle command is sent, followed by a | ||
828 | * write command. | ||
829 | *****************************************************************************/ | ||
830 | static void | ||
831 | ixgb_write_phy_reg(struct ixgb_hw *hw, | ||
832 | u32 reg_address, | ||
833 | u32 phy_address, | ||
834 | u32 device_type, | ||
835 | u16 data) | ||
836 | { | ||
837 | u32 i; | ||
838 | u32 command = 0; | ||
839 | |||
840 | ASSERT(reg_address <= IXGB_MAX_PHY_REG_ADDRESS); | ||
841 | ASSERT(phy_address <= IXGB_MAX_PHY_ADDRESS); | ||
842 | ASSERT(device_type <= IXGB_MAX_PHY_DEV_TYPE); | ||
843 | |||
844 | /* Put the data in the MDIO Read/Write Data register */ | ||
845 | IXGB_WRITE_REG(hw, MSRWD, (u32)data); | ||
846 | |||
847 | /* Setup and write the address cycle command */ | ||
848 | command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) | | ||
849 | (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) | | ||
850 | (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) | | ||
851 | (IXGB_MSCA_ADDR_CYCLE | IXGB_MSCA_MDI_COMMAND)); | ||
852 | |||
853 | IXGB_WRITE_REG(hw, MSCA, command); | ||
854 | |||
855 | /************************************************************** | ||
856 | ** Check every 10 usec to see if the address cycle completed | ||
857 | ** The COMMAND bit will clear when the operation is complete. | ||
858 | ** This may take as long as 64 usecs (we'll wait 100 usecs max) | ||
859 | ** from the CPU Write to the Ready bit assertion. | ||
860 | **************************************************************/ | ||
861 | |||
862 | for (i = 0; i < 10; i++) | ||
863 | { | ||
864 | udelay(10); | ||
865 | |||
866 | command = IXGB_READ_REG(hw, MSCA); | ||
867 | |||
868 | if ((command & IXGB_MSCA_MDI_COMMAND) == 0) | ||
869 | break; | ||
870 | } | ||
871 | |||
872 | ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0); | ||
873 | |||
874 | /* Address cycle complete, setup and write the write command */ | ||
875 | command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) | | ||
876 | (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) | | ||
877 | (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) | | ||
878 | (IXGB_MSCA_WRITE | IXGB_MSCA_MDI_COMMAND)); | ||
879 | |||
880 | IXGB_WRITE_REG(hw, MSCA, command); | ||
881 | |||
882 | /************************************************************** | ||
883 | ** Check every 10 usec to see if the read command completed | ||
884 | ** The COMMAND bit will clear when the operation is complete. | ||
885 | ** The write may take as long as 64 usecs (we'll wait 100 usecs max) | ||
886 | ** from the CPU Write to the Ready bit assertion. | ||
887 | **************************************************************/ | ||
888 | |||
889 | for (i = 0; i < 10; i++) | ||
890 | { | ||
891 | udelay(10); | ||
892 | |||
893 | command = IXGB_READ_REG(hw, MSCA); | ||
894 | |||
895 | if ((command & IXGB_MSCA_MDI_COMMAND) == 0) | ||
896 | break; | ||
897 | } | ||
898 | |||
899 | ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0); | ||
900 | |||
901 | /* Operation is complete, return. */ | ||
902 | } | ||
903 | |||
904 | /****************************************************************************** | ||
905 | * Checks to see if the link status of the hardware has changed. | ||
906 | * | ||
907 | * hw - Struct containing variables accessed by hw code | ||
908 | * | ||
909 | * Called by any function that needs to check the link status of the adapter. | ||
910 | *****************************************************************************/ | ||
911 | void | ||
912 | ixgb_check_for_link(struct ixgb_hw *hw) | ||
913 | { | ||
914 | u32 status_reg; | ||
915 | u32 xpcss_reg; | ||
916 | |||
917 | ENTER(); | ||
918 | |||
919 | xpcss_reg = IXGB_READ_REG(hw, XPCSS); | ||
920 | status_reg = IXGB_READ_REG(hw, STATUS); | ||
921 | |||
922 | if ((xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) && | ||
923 | (status_reg & IXGB_STATUS_LU)) { | ||
924 | hw->link_up = true; | ||
925 | } else if (!(xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) && | ||
926 | (status_reg & IXGB_STATUS_LU)) { | ||
927 | pr_debug("XPCSS Not Aligned while Status:LU is set\n"); | ||
928 | hw->link_up = ixgb_link_reset(hw); | ||
929 | } else { | ||
930 | /* | ||
931 | * 82597EX errata. Since the lane deskew problem may prevent | ||
932 | * link, reset the link before reporting link down. | ||
933 | */ | ||
934 | hw->link_up = ixgb_link_reset(hw); | ||
935 | } | ||
936 | /* Anything else for 10 Gig?? */ | ||
937 | } | ||
938 | |||
939 | /****************************************************************************** | ||
940 | * Check for a bad link condition that may have occurred. | ||
941 | * The indication is that the RFC / LFC registers may be incrementing | ||
942 | * continually. A full adapter reset is required to recover. | ||
943 | * | ||
944 | * hw - Struct containing variables accessed by hw code | ||
945 | * | ||
946 | * Called by any function that needs to check the link status of the adapter. | ||
947 | *****************************************************************************/ | ||
948 | bool ixgb_check_for_bad_link(struct ixgb_hw *hw) | ||
949 | { | ||
950 | u32 newLFC, newRFC; | ||
951 | bool bad_link_returncode = false; | ||
952 | |||
953 | if (hw->phy_type == ixgb_phy_type_txn17401) { | ||
954 | newLFC = IXGB_READ_REG(hw, LFC); | ||
955 | newRFC = IXGB_READ_REG(hw, RFC); | ||
956 | if ((hw->lastLFC + 250 < newLFC) | ||
957 | || (hw->lastRFC + 250 < newRFC)) { | ||
958 | pr_debug("BAD LINK! too many LFC/RFC since last check\n"); | ||
959 | bad_link_returncode = true; | ||
960 | } | ||
961 | hw->lastLFC = newLFC; | ||
962 | hw->lastRFC = newRFC; | ||
963 | } | ||
964 | |||
965 | return bad_link_returncode; | ||
966 | } | ||
967 | |||
968 | /****************************************************************************** | ||
969 | * Clears all hardware statistics counters. | ||
970 | * | ||
971 | * hw - Struct containing variables accessed by shared code | ||
972 | *****************************************************************************/ | ||
973 | static void | ||
974 | ixgb_clear_hw_cntrs(struct ixgb_hw *hw) | ||
975 | { | ||
976 | volatile u32 temp_reg; | ||
977 | |||
978 | ENTER(); | ||
979 | |||
980 | /* if we are stopped or resetting exit gracefully */ | ||
981 | if (hw->adapter_stopped) { | ||
982 | pr_debug("Exiting because the adapter is stopped!!!\n"); | ||
983 | return; | ||
984 | } | ||
985 | |||
986 | temp_reg = IXGB_READ_REG(hw, TPRL); | ||
987 | temp_reg = IXGB_READ_REG(hw, TPRH); | ||
988 | temp_reg = IXGB_READ_REG(hw, GPRCL); | ||
989 | temp_reg = IXGB_READ_REG(hw, GPRCH); | ||
990 | temp_reg = IXGB_READ_REG(hw, BPRCL); | ||
991 | temp_reg = IXGB_READ_REG(hw, BPRCH); | ||
992 | temp_reg = IXGB_READ_REG(hw, MPRCL); | ||
993 | temp_reg = IXGB_READ_REG(hw, MPRCH); | ||
994 | temp_reg = IXGB_READ_REG(hw, UPRCL); | ||
995 | temp_reg = IXGB_READ_REG(hw, UPRCH); | ||
996 | temp_reg = IXGB_READ_REG(hw, VPRCL); | ||
997 | temp_reg = IXGB_READ_REG(hw, VPRCH); | ||
998 | temp_reg = IXGB_READ_REG(hw, JPRCL); | ||
999 | temp_reg = IXGB_READ_REG(hw, JPRCH); | ||
1000 | temp_reg = IXGB_READ_REG(hw, GORCL); | ||
1001 | temp_reg = IXGB_READ_REG(hw, GORCH); | ||
1002 | temp_reg = IXGB_READ_REG(hw, TORL); | ||
1003 | temp_reg = IXGB_READ_REG(hw, TORH); | ||
1004 | temp_reg = IXGB_READ_REG(hw, RNBC); | ||
1005 | temp_reg = IXGB_READ_REG(hw, RUC); | ||
1006 | temp_reg = IXGB_READ_REG(hw, ROC); | ||
1007 | temp_reg = IXGB_READ_REG(hw, RLEC); | ||
1008 | temp_reg = IXGB_READ_REG(hw, CRCERRS); | ||
1009 | temp_reg = IXGB_READ_REG(hw, ICBC); | ||
1010 | temp_reg = IXGB_READ_REG(hw, ECBC); | ||
1011 | temp_reg = IXGB_READ_REG(hw, MPC); | ||
1012 | temp_reg = IXGB_READ_REG(hw, TPTL); | ||
1013 | temp_reg = IXGB_READ_REG(hw, TPTH); | ||
1014 | temp_reg = IXGB_READ_REG(hw, GPTCL); | ||
1015 | temp_reg = IXGB_READ_REG(hw, GPTCH); | ||
1016 | temp_reg = IXGB_READ_REG(hw, BPTCL); | ||
1017 | temp_reg = IXGB_READ_REG(hw, BPTCH); | ||
1018 | temp_reg = IXGB_READ_REG(hw, MPTCL); | ||
1019 | temp_reg = IXGB_READ_REG(hw, MPTCH); | ||
1020 | temp_reg = IXGB_READ_REG(hw, UPTCL); | ||
1021 | temp_reg = IXGB_READ_REG(hw, UPTCH); | ||
1022 | temp_reg = IXGB_READ_REG(hw, VPTCL); | ||
1023 | temp_reg = IXGB_READ_REG(hw, VPTCH); | ||
1024 | temp_reg = IXGB_READ_REG(hw, JPTCL); | ||
1025 | temp_reg = IXGB_READ_REG(hw, JPTCH); | ||
1026 | temp_reg = IXGB_READ_REG(hw, GOTCL); | ||
1027 | temp_reg = IXGB_READ_REG(hw, GOTCH); | ||
1028 | temp_reg = IXGB_READ_REG(hw, TOTL); | ||
1029 | temp_reg = IXGB_READ_REG(hw, TOTH); | ||
1030 | temp_reg = IXGB_READ_REG(hw, DC); | ||
1031 | temp_reg = IXGB_READ_REG(hw, PLT64C); | ||
1032 | temp_reg = IXGB_READ_REG(hw, TSCTC); | ||
1033 | temp_reg = IXGB_READ_REG(hw, TSCTFC); | ||
1034 | temp_reg = IXGB_READ_REG(hw, IBIC); | ||
1035 | temp_reg = IXGB_READ_REG(hw, RFC); | ||
1036 | temp_reg = IXGB_READ_REG(hw, LFC); | ||
1037 | temp_reg = IXGB_READ_REG(hw, PFRC); | ||
1038 | temp_reg = IXGB_READ_REG(hw, PFTC); | ||
1039 | temp_reg = IXGB_READ_REG(hw, MCFRC); | ||
1040 | temp_reg = IXGB_READ_REG(hw, MCFTC); | ||
1041 | temp_reg = IXGB_READ_REG(hw, XONRXC); | ||
1042 | temp_reg = IXGB_READ_REG(hw, XONTXC); | ||
1043 | temp_reg = IXGB_READ_REG(hw, XOFFRXC); | ||
1044 | temp_reg = IXGB_READ_REG(hw, XOFFTXC); | ||
1045 | temp_reg = IXGB_READ_REG(hw, RJC); | ||
1046 | } | ||
1047 | |||
1048 | /****************************************************************************** | ||
1049 | * Turns on the software controllable LED | ||
1050 | * | ||
1051 | * hw - Struct containing variables accessed by shared code | ||
1052 | *****************************************************************************/ | ||
1053 | void | ||
1054 | ixgb_led_on(struct ixgb_hw *hw) | ||
1055 | { | ||
1056 | u32 ctrl0_reg = IXGB_READ_REG(hw, CTRL0); | ||
1057 | |||
1058 | /* To turn on the LED, clear software-definable pin 0 (SDP0). */ | ||
1059 | ctrl0_reg &= ~IXGB_CTRL0_SDP0; | ||
1060 | IXGB_WRITE_REG(hw, CTRL0, ctrl0_reg); | ||
1061 | } | ||
1062 | |||
1063 | /****************************************************************************** | ||
1064 | * Turns off the software controllable LED | ||
1065 | * | ||
1066 | * hw - Struct containing variables accessed by shared code | ||
1067 | *****************************************************************************/ | ||
1068 | void | ||
1069 | ixgb_led_off(struct ixgb_hw *hw) | ||
1070 | { | ||
1071 | u32 ctrl0_reg = IXGB_READ_REG(hw, CTRL0); | ||
1072 | |||
1073 | /* To turn off the LED, set software-definable pin 0 (SDP0). */ | ||
1074 | ctrl0_reg |= IXGB_CTRL0_SDP0; | ||
1075 | IXGB_WRITE_REG(hw, CTRL0, ctrl0_reg); | ||
1076 | } | ||
1077 | |||
1078 | /****************************************************************************** | ||
1079 | * Gets the current PCI bus type, speed, and width of the hardware | ||
1080 | * | ||
1081 | * hw - Struct containing variables accessed by shared code | ||
1082 | *****************************************************************************/ | ||
1083 | static void | ||
1084 | ixgb_get_bus_info(struct ixgb_hw *hw) | ||
1085 | { | ||
1086 | u32 status_reg; | ||
1087 | |||
1088 | status_reg = IXGB_READ_REG(hw, STATUS); | ||
1089 | |||
1090 | hw->bus.type = (status_reg & IXGB_STATUS_PCIX_MODE) ? | ||
1091 | ixgb_bus_type_pcix : ixgb_bus_type_pci; | ||
1092 | |||
1093 | if (hw->bus.type == ixgb_bus_type_pci) { | ||
1094 | hw->bus.speed = (status_reg & IXGB_STATUS_PCI_SPD) ? | ||
1095 | ixgb_bus_speed_66 : ixgb_bus_speed_33; | ||
1096 | } else { | ||
1097 | switch (status_reg & IXGB_STATUS_PCIX_SPD_MASK) { | ||
1098 | case IXGB_STATUS_PCIX_SPD_66: | ||
1099 | hw->bus.speed = ixgb_bus_speed_66; | ||
1100 | break; | ||
1101 | case IXGB_STATUS_PCIX_SPD_100: | ||
1102 | hw->bus.speed = ixgb_bus_speed_100; | ||
1103 | break; | ||
1104 | case IXGB_STATUS_PCIX_SPD_133: | ||
1105 | hw->bus.speed = ixgb_bus_speed_133; | ||
1106 | break; | ||
1107 | default: | ||
1108 | hw->bus.speed = ixgb_bus_speed_reserved; | ||
1109 | break; | ||
1110 | } | ||
1111 | } | ||
1112 | |||
1113 | hw->bus.width = (status_reg & IXGB_STATUS_BUS64) ? | ||
1114 | ixgb_bus_width_64 : ixgb_bus_width_32; | ||
1115 | } | ||
1116 | |||
1117 | /****************************************************************************** | ||
1118 | * Tests a MAC address to ensure it is a valid Individual Address | ||
1119 | * | ||
1120 | * mac_addr - pointer to MAC address. | ||
1121 | * | ||
1122 | *****************************************************************************/ | ||
1123 | static bool | ||
1124 | mac_addr_valid(u8 *mac_addr) | ||
1125 | { | ||
1126 | bool is_valid = true; | ||
1127 | ENTER(); | ||
1128 | |||
1129 | /* Make sure it is not a multicast address */ | ||
1130 | if (is_multicast_ether_addr(mac_addr)) { | ||
1131 | pr_debug("MAC address is multicast\n"); | ||
1132 | is_valid = false; | ||
1133 | } | ||
1134 | /* Not a broadcast address */ | ||
1135 | else if (is_broadcast_ether_addr(mac_addr)) { | ||
1136 | pr_debug("MAC address is broadcast\n"); | ||
1137 | is_valid = false; | ||
1138 | } | ||
1139 | /* Reject the zero address */ | ||
1140 | else if (is_zero_ether_addr(mac_addr)) { | ||
1141 | pr_debug("MAC address is all zeros\n"); | ||
1142 | is_valid = false; | ||
1143 | } | ||
1144 | return is_valid; | ||
1145 | } | ||
1146 | |||
1147 | /****************************************************************************** | ||
1148 | * Resets the 10GbE link. Waits the settle time and returns the state of | ||
1149 | * the link. | ||
1150 | * | ||
1151 | * hw - Struct containing variables accessed by shared code | ||
1152 | *****************************************************************************/ | ||
1153 | static bool | ||
1154 | ixgb_link_reset(struct ixgb_hw *hw) | ||
1155 | { | ||
1156 | bool link_status = false; | ||
1157 | u8 wait_retries = MAX_RESET_ITERATIONS; | ||
1158 | u8 lrst_retries = MAX_RESET_ITERATIONS; | ||
1159 | |||
1160 | do { | ||
1161 | /* Reset the link */ | ||
1162 | IXGB_WRITE_REG(hw, CTRL0, | ||
1163 | IXGB_READ_REG(hw, CTRL0) | IXGB_CTRL0_LRST); | ||
1164 | |||
1165 | /* Wait for link-up and lane re-alignment */ | ||
1166 | do { | ||
1167 | udelay(IXGB_DELAY_USECS_AFTER_LINK_RESET); | ||
1168 | link_status = | ||
1169 | ((IXGB_READ_REG(hw, STATUS) & IXGB_STATUS_LU) | ||
1170 | && (IXGB_READ_REG(hw, XPCSS) & | ||
1171 | IXGB_XPCSS_ALIGN_STATUS)) ? true : false; | ||
1172 | } while (!link_status && --wait_retries); | ||
1173 | |||
1174 | } while (!link_status && --lrst_retries); | ||
1175 | |||
1176 | return link_status; | ||
1177 | } | ||
1178 | |||
1179 | /****************************************************************************** | ||
1180 | * Resets the 10GbE optics module. | ||
1181 | * | ||
1182 | * hw - Struct containing variables accessed by shared code | ||
1183 | *****************************************************************************/ | ||
1184 | static void | ||
1185 | ixgb_optics_reset(struct ixgb_hw *hw) | ||
1186 | { | ||
1187 | if (hw->phy_type == ixgb_phy_type_txn17401) { | ||
1188 | u16 mdio_reg; | ||
1189 | |||
1190 | ixgb_write_phy_reg(hw, | ||
1191 | MDIO_CTRL1, | ||
1192 | IXGB_PHY_ADDRESS, | ||
1193 | MDIO_MMD_PMAPMD, | ||
1194 | MDIO_CTRL1_RESET); | ||
1195 | |||
1196 | mdio_reg = ixgb_read_phy_reg(hw, | ||
1197 | MDIO_CTRL1, | ||
1198 | IXGB_PHY_ADDRESS, | ||
1199 | MDIO_MMD_PMAPMD); | ||
1200 | } | ||
1201 | } | ||
1202 | |||
1203 | /****************************************************************************** | ||
1204 | * Resets the 10GbE optics module for Sun variant NIC. | ||
1205 | * | ||
1206 | * hw - Struct containing variables accessed by shared code | ||
1207 | *****************************************************************************/ | ||
1208 | |||
1209 | #define IXGB_BCM8704_USER_PMD_TX_CTRL_REG 0xC803 | ||
1210 | #define IXGB_BCM8704_USER_PMD_TX_CTRL_REG_VAL 0x0164 | ||
1211 | #define IXGB_BCM8704_USER_CTRL_REG 0xC800 | ||
1212 | #define IXGB_BCM8704_USER_CTRL_REG_VAL 0x7FBF | ||
1213 | #define IXGB_BCM8704_USER_DEV3_ADDR 0x0003 | ||
1214 | #define IXGB_SUN_PHY_ADDRESS 0x0000 | ||
1215 | #define IXGB_SUN_PHY_RESET_DELAY 305 | ||
1216 | |||
1217 | static void | ||
1218 | ixgb_optics_reset_bcm(struct ixgb_hw *hw) | ||
1219 | { | ||
1220 | u32 ctrl = IXGB_READ_REG(hw, CTRL0); | ||
1221 | ctrl &= ~IXGB_CTRL0_SDP2; | ||
1222 | ctrl |= IXGB_CTRL0_SDP3; | ||
1223 | IXGB_WRITE_REG(hw, CTRL0, ctrl); | ||
1224 | IXGB_WRITE_FLUSH(hw); | ||
1225 | |||
1226 | /* SerDes needs extra delay */ | ||
1227 | msleep(IXGB_SUN_PHY_RESET_DELAY); | ||
1228 | |||
1229 | /* Broadcom 7408L configuration */ | ||
1230 | /* Reference clock config */ | ||
1231 | ixgb_write_phy_reg(hw, | ||
1232 | IXGB_BCM8704_USER_PMD_TX_CTRL_REG, | ||
1233 | IXGB_SUN_PHY_ADDRESS, | ||
1234 | IXGB_BCM8704_USER_DEV3_ADDR, | ||
1235 | IXGB_BCM8704_USER_PMD_TX_CTRL_REG_VAL); | ||
1236 | /* we must read the registers twice */ | ||
1237 | ixgb_read_phy_reg(hw, | ||
1238 | IXGB_BCM8704_USER_PMD_TX_CTRL_REG, | ||
1239 | IXGB_SUN_PHY_ADDRESS, | ||
1240 | IXGB_BCM8704_USER_DEV3_ADDR); | ||
1241 | ixgb_read_phy_reg(hw, | ||
1242 | IXGB_BCM8704_USER_PMD_TX_CTRL_REG, | ||
1243 | IXGB_SUN_PHY_ADDRESS, | ||
1244 | IXGB_BCM8704_USER_DEV3_ADDR); | ||
1245 | |||
1246 | ixgb_write_phy_reg(hw, | ||
1247 | IXGB_BCM8704_USER_CTRL_REG, | ||
1248 | IXGB_SUN_PHY_ADDRESS, | ||
1249 | IXGB_BCM8704_USER_DEV3_ADDR, | ||
1250 | IXGB_BCM8704_USER_CTRL_REG_VAL); | ||
1251 | ixgb_read_phy_reg(hw, | ||
1252 | IXGB_BCM8704_USER_CTRL_REG, | ||
1253 | IXGB_SUN_PHY_ADDRESS, | ||
1254 | IXGB_BCM8704_USER_DEV3_ADDR); | ||
1255 | ixgb_read_phy_reg(hw, | ||
1256 | IXGB_BCM8704_USER_CTRL_REG, | ||
1257 | IXGB_SUN_PHY_ADDRESS, | ||
1258 | IXGB_BCM8704_USER_DEV3_ADDR); | ||
1259 | |||
1260 | /* SerDes needs extra delay */ | ||
1261 | msleep(IXGB_SUN_PHY_RESET_DELAY); | ||
1262 | } | ||
diff --git a/drivers/net/ixgb/ixgb_hw.h b/drivers/net/ixgb/ixgb_hw.h new file mode 100644 index 00000000000..873d32b89fb --- /dev/null +++ b/drivers/net/ixgb/ixgb_hw.h | |||
@@ -0,0 +1,801 @@ | |||
1 | /******************************************************************************* | ||
2 | |||
3 | Intel PRO/10GbE Linux driver | ||
4 | Copyright(c) 1999 - 2008 Intel Corporation. | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Contact Information: | ||
23 | Linux NICS <linux.nics@intel.com> | ||
24 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
25 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
26 | |||
27 | *******************************************************************************/ | ||
28 | |||
29 | #ifndef _IXGB_HW_H_ | ||
30 | #define _IXGB_HW_H_ | ||
31 | |||
32 | #include <linux/mdio.h> | ||
33 | |||
34 | #include "ixgb_osdep.h" | ||
35 | |||
36 | /* Enums */ | ||
37 | typedef enum { | ||
38 | ixgb_mac_unknown = 0, | ||
39 | ixgb_82597, | ||
40 | ixgb_num_macs | ||
41 | } ixgb_mac_type; | ||
42 | |||
43 | /* Types of physical layer modules */ | ||
44 | typedef enum { | ||
45 | ixgb_phy_type_unknown = 0, | ||
46 | ixgb_phy_type_g6005, /* 850nm, MM fiber, XPAK transceiver */ | ||
47 | ixgb_phy_type_g6104, /* 1310nm, SM fiber, XPAK transceiver */ | ||
48 | ixgb_phy_type_txn17201, /* 850nm, MM fiber, XPAK transceiver */ | ||
49 | ixgb_phy_type_txn17401, /* 1310nm, SM fiber, XENPAK transceiver */ | ||
50 | ixgb_phy_type_bcm /* SUN specific board */ | ||
51 | } ixgb_phy_type; | ||
52 | |||
53 | /* XPAK transceiver vendors, for the SR adapters */ | ||
54 | typedef enum { | ||
55 | ixgb_xpak_vendor_intel, | ||
56 | ixgb_xpak_vendor_infineon | ||
57 | } ixgb_xpak_vendor; | ||
58 | |||
59 | /* Media Types */ | ||
60 | typedef enum { | ||
61 | ixgb_media_type_unknown = 0, | ||
62 | ixgb_media_type_fiber = 1, | ||
63 | ixgb_media_type_copper = 2, | ||
64 | ixgb_num_media_types | ||
65 | } ixgb_media_type; | ||
66 | |||
67 | /* Flow Control Settings */ | ||
68 | typedef enum { | ||
69 | ixgb_fc_none = 0, | ||
70 | ixgb_fc_rx_pause = 1, | ||
71 | ixgb_fc_tx_pause = 2, | ||
72 | ixgb_fc_full = 3, | ||
73 | ixgb_fc_default = 0xFF | ||
74 | } ixgb_fc_type; | ||
75 | |||
76 | /* PCI bus types */ | ||
77 | typedef enum { | ||
78 | ixgb_bus_type_unknown = 0, | ||
79 | ixgb_bus_type_pci, | ||
80 | ixgb_bus_type_pcix | ||
81 | } ixgb_bus_type; | ||
82 | |||
83 | /* PCI bus speeds */ | ||
84 | typedef enum { | ||
85 | ixgb_bus_speed_unknown = 0, | ||
86 | ixgb_bus_speed_33, | ||
87 | ixgb_bus_speed_66, | ||
88 | ixgb_bus_speed_100, | ||
89 | ixgb_bus_speed_133, | ||
90 | ixgb_bus_speed_reserved | ||
91 | } ixgb_bus_speed; | ||
92 | |||
93 | /* PCI bus widths */ | ||
94 | typedef enum { | ||
95 | ixgb_bus_width_unknown = 0, | ||
96 | ixgb_bus_width_32, | ||
97 | ixgb_bus_width_64 | ||
98 | } ixgb_bus_width; | ||
99 | |||
100 | #define IXGB_ETH_LENGTH_OF_ADDRESS 6 | ||
101 | |||
102 | #define IXGB_EEPROM_SIZE 64 /* Size in words */ | ||
103 | |||
104 | #define SPEED_10000 10000 | ||
105 | #define FULL_DUPLEX 2 | ||
106 | |||
107 | #define MIN_NUMBER_OF_DESCRIPTORS 8 | ||
108 | #define MAX_NUMBER_OF_DESCRIPTORS 0xFFF8 /* 13 bits in RDLEN/TDLEN, 128B aligned */ | ||
109 | |||
110 | #define IXGB_DELAY_BEFORE_RESET 10 /* allow 10ms after idling rx/tx units */ | ||
111 | #define IXGB_DELAY_AFTER_RESET 1 /* allow 1ms after the reset */ | ||
112 | #define IXGB_DELAY_AFTER_EE_RESET 10 /* allow 10ms after the EEPROM reset */ | ||
113 | |||
114 | #define IXGB_DELAY_USECS_AFTER_LINK_RESET 13 /* allow 13 microseconds after the reset */ | ||
115 | /* NOTE: this is MICROSECONDS */ | ||
116 | #define MAX_RESET_ITERATIONS 8 /* number of iterations to get things right */ | ||
117 | |||
118 | /* General Registers */ | ||
119 | #define IXGB_CTRL0 0x00000 /* Device Control Register 0 - RW */ | ||
120 | #define IXGB_CTRL1 0x00008 /* Device Control Register 1 - RW */ | ||
121 | #define IXGB_STATUS 0x00010 /* Device Status Register - RO */ | ||
122 | #define IXGB_EECD 0x00018 /* EEPROM/Flash Control/Data Register - RW */ | ||
123 | #define IXGB_MFS 0x00020 /* Maximum Frame Size - RW */ | ||
124 | |||
125 | /* Interrupt */ | ||
126 | #define IXGB_ICR 0x00080 /* Interrupt Cause Read - R/clr */ | ||
127 | #define IXGB_ICS 0x00088 /* Interrupt Cause Set - RW */ | ||
128 | #define IXGB_IMS 0x00090 /* Interrupt Mask Set/Read - RW */ | ||
129 | #define IXGB_IMC 0x00098 /* Interrupt Mask Clear - WO */ | ||
130 | |||
131 | /* Receive */ | ||
132 | #define IXGB_RCTL 0x00100 /* RX Control - RW */ | ||
133 | #define IXGB_FCRTL 0x00108 /* Flow Control Receive Threshold Low - RW */ | ||
134 | #define IXGB_FCRTH 0x00110 /* Flow Control Receive Threshold High - RW */ | ||
135 | #define IXGB_RDBAL 0x00118 /* RX Descriptor Base Low - RW */ | ||
136 | #define IXGB_RDBAH 0x0011C /* RX Descriptor Base High - RW */ | ||
137 | #define IXGB_RDLEN 0x00120 /* RX Descriptor Length - RW */ | ||
138 | #define IXGB_RDH 0x00128 /* RX Descriptor Head - RW */ | ||
139 | #define IXGB_RDT 0x00130 /* RX Descriptor Tail - RW */ | ||
140 | #define IXGB_RDTR 0x00138 /* RX Delay Timer Ring - RW */ | ||
141 | #define IXGB_RXDCTL 0x00140 /* Receive Descriptor Control - RW */ | ||
142 | #define IXGB_RAIDC 0x00148 /* Receive Adaptive Interrupt Delay Control - RW */ | ||
143 | #define IXGB_RXCSUM 0x00158 /* Receive Checksum Control - RW */ | ||
144 | #define IXGB_RA 0x00180 /* Receive Address Array Base - RW */ | ||
145 | #define IXGB_RAL 0x00180 /* Receive Address Low [0:15] - RW */ | ||
146 | #define IXGB_RAH 0x00184 /* Receive Address High [0:15] - RW */ | ||
147 | #define IXGB_MTA 0x00200 /* Multicast Table Array [0:127] - RW */ | ||
148 | #define IXGB_VFTA 0x00400 /* VLAN Filter Table Array [0:127] - RW */ | ||
149 | #define IXGB_REQ_RX_DESCRIPTOR_MULTIPLE 8 | ||
150 | |||
151 | /* Transmit */ | ||
152 | #define IXGB_TCTL 0x00600 /* TX Control - RW */ | ||
153 | #define IXGB_TDBAL 0x00608 /* TX Descriptor Base Low - RW */ | ||
154 | #define IXGB_TDBAH 0x0060C /* TX Descriptor Base High - RW */ | ||
155 | #define IXGB_TDLEN 0x00610 /* TX Descriptor Length - RW */ | ||
156 | #define IXGB_TDH 0x00618 /* TX Descriptor Head - RW */ | ||
157 | #define IXGB_TDT 0x00620 /* TX Descriptor Tail - RW */ | ||
158 | #define IXGB_TIDV 0x00628 /* TX Interrupt Delay Value - RW */ | ||
159 | #define IXGB_TXDCTL 0x00630 /* Transmit Descriptor Control - RW */ | ||
160 | #define IXGB_TSPMT 0x00638 /* TCP Segmentation PAD & Min Threshold - RW */ | ||
161 | #define IXGB_PAP 0x00640 /* Pause and Pace - RW */ | ||
162 | #define IXGB_REQ_TX_DESCRIPTOR_MULTIPLE 8 | ||
163 | |||
164 | /* Physical */ | ||
165 | #define IXGB_PCSC1 0x00700 /* PCS Control 1 - RW */ | ||
166 | #define IXGB_PCSC2 0x00708 /* PCS Control 2 - RW */ | ||
167 | #define IXGB_PCSS1 0x00710 /* PCS Status 1 - RO */ | ||
168 | #define IXGB_PCSS2 0x00718 /* PCS Status 2 - RO */ | ||
169 | #define IXGB_XPCSS 0x00720 /* 10GBASE-X PCS Status (or XGXS Lane Status) - RO */ | ||
170 | #define IXGB_UCCR 0x00728 /* Unilink Circuit Control Register */ | ||
171 | #define IXGB_XPCSTC 0x00730 /* 10GBASE-X PCS Test Control */ | ||
172 | #define IXGB_MACA 0x00738 /* MDI Autoscan Command and Address - RW */ | ||
173 | #define IXGB_APAE 0x00740 /* Autoscan PHY Address Enable - RW */ | ||
174 | #define IXGB_ARD 0x00748 /* Autoscan Read Data - RO */ | ||
175 | #define IXGB_AIS 0x00750 /* Autoscan Interrupt Status - RO */ | ||
176 | #define IXGB_MSCA 0x00758 /* MDI Single Command and Address - RW */ | ||
177 | #define IXGB_MSRWD 0x00760 /* MDI Single Read and Write Data - RW, RO */ | ||
178 | |||
179 | /* Wake-up */ | ||
180 | #define IXGB_WUFC 0x00808 /* Wake Up Filter Control - RW */ | ||
181 | #define IXGB_WUS 0x00810 /* Wake Up Status - RO */ | ||
182 | #define IXGB_FFLT 0x01000 /* Flexible Filter Length Table - RW */ | ||
183 | #define IXGB_FFMT 0x01020 /* Flexible Filter Mask Table - RW */ | ||
184 | #define IXGB_FTVT 0x01420 /* Flexible Filter Value Table - RW */ | ||
185 | |||
186 | /* Statistics */ | ||
187 | #define IXGB_TPRL 0x02000 /* Total Packets Received (Low) */ | ||
188 | #define IXGB_TPRH 0x02004 /* Total Packets Received (High) */ | ||
189 | #define IXGB_GPRCL 0x02008 /* Good Packets Received Count (Low) */ | ||
190 | #define IXGB_GPRCH 0x0200C /* Good Packets Received Count (High) */ | ||
191 | #define IXGB_BPRCL 0x02010 /* Broadcast Packets Received Count (Low) */ | ||
192 | #define IXGB_BPRCH 0x02014 /* Broadcast Packets Received Count (High) */ | ||
193 | #define IXGB_MPRCL 0x02018 /* Multicast Packets Received Count (Low) */ | ||
194 | #define IXGB_MPRCH 0x0201C /* Multicast Packets Received Count (High) */ | ||
195 | #define IXGB_UPRCL 0x02020 /* Unicast Packets Received Count (Low) */ | ||
196 | #define IXGB_UPRCH 0x02024 /* Unicast Packets Received Count (High) */ | ||
197 | #define IXGB_VPRCL 0x02028 /* VLAN Packets Received Count (Low) */ | ||
198 | #define IXGB_VPRCH 0x0202C /* VLAN Packets Received Count (High) */ | ||
199 | #define IXGB_JPRCL 0x02030 /* Jumbo Packets Received Count (Low) */ | ||
200 | #define IXGB_JPRCH 0x02034 /* Jumbo Packets Received Count (High) */ | ||
201 | #define IXGB_GORCL 0x02038 /* Good Octets Received Count (Low) */ | ||
202 | #define IXGB_GORCH 0x0203C /* Good Octets Received Count (High) */ | ||
203 | #define IXGB_TORL 0x02040 /* Total Octets Received (Low) */ | ||
204 | #define IXGB_TORH 0x02044 /* Total Octets Received (High) */ | ||
205 | #define IXGB_RNBC 0x02048 /* Receive No Buffers Count */ | ||
206 | #define IXGB_RUC 0x02050 /* Receive Undersize Count */ | ||
207 | #define IXGB_ROC 0x02058 /* Receive Oversize Count */ | ||
208 | #define IXGB_RLEC 0x02060 /* Receive Length Error Count */ | ||
209 | #define IXGB_CRCERRS 0x02068 /* CRC Error Count */ | ||
210 | #define IXGB_ICBC 0x02070 /* Illegal control byte in mid-packet Count */ | ||
211 | #define IXGB_ECBC 0x02078 /* Error Control byte in mid-packet Count */ | ||
212 | #define IXGB_MPC 0x02080 /* Missed Packets Count */ | ||
213 | #define IXGB_TPTL 0x02100 /* Total Packets Transmitted (Low) */ | ||
214 | #define IXGB_TPTH 0x02104 /* Total Packets Transmitted (High) */ | ||
215 | #define IXGB_GPTCL 0x02108 /* Good Packets Transmitted Count (Low) */ | ||
216 | #define IXGB_GPTCH 0x0210C /* Good Packets Transmitted Count (High) */ | ||
217 | #define IXGB_BPTCL 0x02110 /* Broadcast Packets Transmitted Count (Low) */ | ||
218 | #define IXGB_BPTCH 0x02114 /* Broadcast Packets Transmitted Count (High) */ | ||
219 | #define IXGB_MPTCL 0x02118 /* Multicast Packets Transmitted Count (Low) */ | ||
220 | #define IXGB_MPTCH 0x0211C /* Multicast Packets Transmitted Count (High) */ | ||
221 | #define IXGB_UPTCL 0x02120 /* Unicast Packets Transmitted Count (Low) */ | ||
222 | #define IXGB_UPTCH 0x02124 /* Unicast Packets Transmitted Count (High) */ | ||
223 | #define IXGB_VPTCL 0x02128 /* VLAN Packets Transmitted Count (Low) */ | ||
224 | #define IXGB_VPTCH 0x0212C /* VLAN Packets Transmitted Count (High) */ | ||
225 | #define IXGB_JPTCL 0x02130 /* Jumbo Packets Transmitted Count (Low) */ | ||
226 | #define IXGB_JPTCH 0x02134 /* Jumbo Packets Transmitted Count (High) */ | ||
227 | #define IXGB_GOTCL 0x02138 /* Good Octets Transmitted Count (Low) */ | ||
228 | #define IXGB_GOTCH 0x0213C /* Good Octets Transmitted Count (High) */ | ||
229 | #define IXGB_TOTL 0x02140 /* Total Octets Transmitted Count (Low) */ | ||
230 | #define IXGB_TOTH 0x02144 /* Total Octets Transmitted Count (High) */ | ||
231 | #define IXGB_DC 0x02148 /* Defer Count */ | ||
232 | #define IXGB_PLT64C 0x02150 /* Packet Transmitted was less than 64 bytes Count */ | ||
233 | #define IXGB_TSCTC 0x02170 /* TCP Segmentation Context Transmitted Count */ | ||
234 | #define IXGB_TSCTFC 0x02178 /* TCP Segmentation Context Tx Fail Count */ | ||
235 | #define IXGB_IBIC 0x02180 /* Illegal byte during Idle stream count */ | ||
236 | #define IXGB_RFC 0x02188 /* Remote Fault Count */ | ||
237 | #define IXGB_LFC 0x02190 /* Local Fault Count */ | ||
238 | #define IXGB_PFRC 0x02198 /* Pause Frame Receive Count */ | ||
239 | #define IXGB_PFTC 0x021A0 /* Pause Frame Transmit Count */ | ||
240 | #define IXGB_MCFRC 0x021A8 /* MAC Control Frames (non-Pause) Received Count */ | ||
241 | #define IXGB_MCFTC 0x021B0 /* MAC Control Frames (non-Pause) Transmitted Count */ | ||
242 | #define IXGB_XONRXC 0x021B8 /* XON Received Count */ | ||
243 | #define IXGB_XONTXC 0x021C0 /* XON Transmitted Count */ | ||
244 | #define IXGB_XOFFRXC 0x021C8 /* XOFF Received Count */ | ||
245 | #define IXGB_XOFFTXC 0x021D0 /* XOFF Transmitted Count */ | ||
246 | #define IXGB_RJC 0x021D8 /* Receive Jabber Count */ | ||
247 | |||
248 | /* CTRL0 Bit Masks */ | ||
249 | #define IXGB_CTRL0_LRST 0x00000008 | ||
250 | #define IXGB_CTRL0_JFE 0x00000010 | ||
251 | #define IXGB_CTRL0_XLE 0x00000020 | ||
252 | #define IXGB_CTRL0_MDCS 0x00000040 | ||
253 | #define IXGB_CTRL0_CMDC 0x00000080 | ||
254 | #define IXGB_CTRL0_SDP0 0x00040000 | ||
255 | #define IXGB_CTRL0_SDP1 0x00080000 | ||
256 | #define IXGB_CTRL0_SDP2 0x00100000 | ||
257 | #define IXGB_CTRL0_SDP3 0x00200000 | ||
258 | #define IXGB_CTRL0_SDP0_DIR 0x00400000 | ||
259 | #define IXGB_CTRL0_SDP1_DIR 0x00800000 | ||
260 | #define IXGB_CTRL0_SDP2_DIR 0x01000000 | ||
261 | #define IXGB_CTRL0_SDP3_DIR 0x02000000 | ||
262 | #define IXGB_CTRL0_RST 0x04000000 | ||
263 | #define IXGB_CTRL0_RPE 0x08000000 | ||
264 | #define IXGB_CTRL0_TPE 0x10000000 | ||
265 | #define IXGB_CTRL0_VME 0x40000000 | ||
266 | |||
267 | /* CTRL1 Bit Masks */ | ||
268 | #define IXGB_CTRL1_GPI0_EN 0x00000001 | ||
269 | #define IXGB_CTRL1_GPI1_EN 0x00000002 | ||
270 | #define IXGB_CTRL1_GPI2_EN 0x00000004 | ||
271 | #define IXGB_CTRL1_GPI3_EN 0x00000008 | ||
272 | #define IXGB_CTRL1_SDP4 0x00000010 | ||
273 | #define IXGB_CTRL1_SDP5 0x00000020 | ||
274 | #define IXGB_CTRL1_SDP6 0x00000040 | ||
275 | #define IXGB_CTRL1_SDP7 0x00000080 | ||
276 | #define IXGB_CTRL1_SDP4_DIR 0x00000100 | ||
277 | #define IXGB_CTRL1_SDP5_DIR 0x00000200 | ||
278 | #define IXGB_CTRL1_SDP6_DIR 0x00000400 | ||
279 | #define IXGB_CTRL1_SDP7_DIR 0x00000800 | ||
280 | #define IXGB_CTRL1_EE_RST 0x00002000 | ||
281 | #define IXGB_CTRL1_RO_DIS 0x00020000 | ||
282 | #define IXGB_CTRL1_PCIXHM_MASK 0x00C00000 | ||
283 | #define IXGB_CTRL1_PCIXHM_1_2 0x00000000 | ||
284 | #define IXGB_CTRL1_PCIXHM_5_8 0x00400000 | ||
285 | #define IXGB_CTRL1_PCIXHM_3_4 0x00800000 | ||
286 | #define IXGB_CTRL1_PCIXHM_7_8 0x00C00000 | ||
287 | |||
288 | /* STATUS Bit Masks */ | ||
289 | #define IXGB_STATUS_LU 0x00000002 | ||
290 | #define IXGB_STATUS_AIP 0x00000004 | ||
291 | #define IXGB_STATUS_TXOFF 0x00000010 | ||
292 | #define IXGB_STATUS_XAUIME 0x00000020 | ||
293 | #define IXGB_STATUS_RES 0x00000040 | ||
294 | #define IXGB_STATUS_RIS 0x00000080 | ||
295 | #define IXGB_STATUS_RIE 0x00000100 | ||
296 | #define IXGB_STATUS_RLF 0x00000200 | ||
297 | #define IXGB_STATUS_RRF 0x00000400 | ||
298 | #define IXGB_STATUS_PCI_SPD 0x00000800 | ||
299 | #define IXGB_STATUS_BUS64 0x00001000 | ||
300 | #define IXGB_STATUS_PCIX_MODE 0x00002000 | ||
301 | #define IXGB_STATUS_PCIX_SPD_MASK 0x0000C000 | ||
302 | #define IXGB_STATUS_PCIX_SPD_66 0x00000000 | ||
303 | #define IXGB_STATUS_PCIX_SPD_100 0x00004000 | ||
304 | #define IXGB_STATUS_PCIX_SPD_133 0x00008000 | ||
305 | #define IXGB_STATUS_REV_ID_MASK 0x000F0000 | ||
306 | #define IXGB_STATUS_REV_ID_SHIFT 16 | ||
307 | |||
308 | /* EECD Bit Masks */ | ||
309 | #define IXGB_EECD_SK 0x00000001 | ||
310 | #define IXGB_EECD_CS 0x00000002 | ||
311 | #define IXGB_EECD_DI 0x00000004 | ||
312 | #define IXGB_EECD_DO 0x00000008 | ||
313 | #define IXGB_EECD_FWE_MASK 0x00000030 | ||
314 | #define IXGB_EECD_FWE_DIS 0x00000010 | ||
315 | #define IXGB_EECD_FWE_EN 0x00000020 | ||
316 | |||
317 | /* MFS */ | ||
318 | #define IXGB_MFS_SHIFT 16 | ||
319 | |||
320 | /* Interrupt Register Bit Masks (used for ICR, ICS, IMS, and IMC) */ | ||
321 | #define IXGB_INT_TXDW 0x00000001 | ||
322 | #define IXGB_INT_TXQE 0x00000002 | ||
323 | #define IXGB_INT_LSC 0x00000004 | ||
324 | #define IXGB_INT_RXSEQ 0x00000008 | ||
325 | #define IXGB_INT_RXDMT0 0x00000010 | ||
326 | #define IXGB_INT_RXO 0x00000040 | ||
327 | #define IXGB_INT_RXT0 0x00000080 | ||
328 | #define IXGB_INT_AUTOSCAN 0x00000200 | ||
329 | #define IXGB_INT_GPI0 0x00000800 | ||
330 | #define IXGB_INT_GPI1 0x00001000 | ||
331 | #define IXGB_INT_GPI2 0x00002000 | ||
332 | #define IXGB_INT_GPI3 0x00004000 | ||
333 | |||
334 | /* RCTL Bit Masks */ | ||
335 | #define IXGB_RCTL_RXEN 0x00000002 | ||
336 | #define IXGB_RCTL_SBP 0x00000004 | ||
337 | #define IXGB_RCTL_UPE 0x00000008 | ||
338 | #define IXGB_RCTL_MPE 0x00000010 | ||
339 | #define IXGB_RCTL_RDMTS_MASK 0x00000300 | ||
340 | #define IXGB_RCTL_RDMTS_1_2 0x00000000 | ||
341 | #define IXGB_RCTL_RDMTS_1_4 0x00000100 | ||
342 | #define IXGB_RCTL_RDMTS_1_8 0x00000200 | ||
343 | #define IXGB_RCTL_MO_MASK 0x00003000 | ||
344 | #define IXGB_RCTL_MO_47_36 0x00000000 | ||
345 | #define IXGB_RCTL_MO_46_35 0x00001000 | ||
346 | #define IXGB_RCTL_MO_45_34 0x00002000 | ||
347 | #define IXGB_RCTL_MO_43_32 0x00003000 | ||
348 | #define IXGB_RCTL_MO_SHIFT 12 | ||
349 | #define IXGB_RCTL_BAM 0x00008000 | ||
350 | #define IXGB_RCTL_BSIZE_MASK 0x00030000 | ||
351 | #define IXGB_RCTL_BSIZE_2048 0x00000000 | ||
352 | #define IXGB_RCTL_BSIZE_4096 0x00010000 | ||
353 | #define IXGB_RCTL_BSIZE_8192 0x00020000 | ||
354 | #define IXGB_RCTL_BSIZE_16384 0x00030000 | ||
355 | #define IXGB_RCTL_VFE 0x00040000 | ||
356 | #define IXGB_RCTL_CFIEN 0x00080000 | ||
357 | #define IXGB_RCTL_CFI 0x00100000 | ||
358 | #define IXGB_RCTL_RPDA_MASK 0x00600000 | ||
359 | #define IXGB_RCTL_RPDA_MC_MAC 0x00000000 | ||
360 | #define IXGB_RCTL_MC_ONLY 0x00400000 | ||
361 | #define IXGB_RCTL_CFF 0x00800000 | ||
362 | #define IXGB_RCTL_SECRC 0x04000000 | ||
363 | #define IXGB_RDT_FPDB 0x80000000 | ||
364 | |||
365 | #define IXGB_RCTL_IDLE_RX_UNIT 0 | ||
366 | |||
367 | /* FCRTL Bit Masks */ | ||
368 | #define IXGB_FCRTL_XONE 0x80000000 | ||
369 | |||
370 | /* RXDCTL Bit Masks */ | ||
371 | #define IXGB_RXDCTL_PTHRESH_MASK 0x000001FF | ||
372 | #define IXGB_RXDCTL_PTHRESH_SHIFT 0 | ||
373 | #define IXGB_RXDCTL_HTHRESH_MASK 0x0003FE00 | ||
374 | #define IXGB_RXDCTL_HTHRESH_SHIFT 9 | ||
375 | #define IXGB_RXDCTL_WTHRESH_MASK 0x07FC0000 | ||
376 | #define IXGB_RXDCTL_WTHRESH_SHIFT 18 | ||
377 | |||
378 | /* RAIDC Bit Masks */ | ||
379 | #define IXGB_RAIDC_HIGHTHRS_MASK 0x0000003F | ||
380 | #define IXGB_RAIDC_DELAY_MASK 0x000FF800 | ||
381 | #define IXGB_RAIDC_DELAY_SHIFT 11 | ||
382 | #define IXGB_RAIDC_POLL_MASK 0x1FF00000 | ||
383 | #define IXGB_RAIDC_POLL_SHIFT 20 | ||
384 | #define IXGB_RAIDC_RXT_GATE 0x40000000 | ||
385 | #define IXGB_RAIDC_EN 0x80000000 | ||
386 | |||
387 | #define IXGB_RAIDC_POLL_1000_INTERRUPTS_PER_SECOND 1220 | ||
388 | #define IXGB_RAIDC_POLL_5000_INTERRUPTS_PER_SECOND 244 | ||
389 | #define IXGB_RAIDC_POLL_10000_INTERRUPTS_PER_SECOND 122 | ||
390 | #define IXGB_RAIDC_POLL_20000_INTERRUPTS_PER_SECOND 61 | ||
391 | |||
392 | /* RXCSUM Bit Masks */ | ||
393 | #define IXGB_RXCSUM_IPOFL 0x00000100 | ||
394 | #define IXGB_RXCSUM_TUOFL 0x00000200 | ||
395 | |||
396 | /* RAH Bit Masks */ | ||
397 | #define IXGB_RAH_ASEL_MASK 0x00030000 | ||
398 | #define IXGB_RAH_ASEL_DEST 0x00000000 | ||
399 | #define IXGB_RAH_ASEL_SRC 0x00010000 | ||
400 | #define IXGB_RAH_AV 0x80000000 | ||
401 | |||
402 | /* TCTL Bit Masks */ | ||
403 | #define IXGB_TCTL_TCE 0x00000001 | ||
404 | #define IXGB_TCTL_TXEN 0x00000002 | ||
405 | #define IXGB_TCTL_TPDE 0x00000004 | ||
406 | |||
407 | #define IXGB_TCTL_IDLE_TX_UNIT 0 | ||
408 | |||
409 | /* TXDCTL Bit Masks */ | ||
410 | #define IXGB_TXDCTL_PTHRESH_MASK 0x0000007F | ||
411 | #define IXGB_TXDCTL_HTHRESH_MASK 0x00007F00 | ||
412 | #define IXGB_TXDCTL_HTHRESH_SHIFT 8 | ||
413 | #define IXGB_TXDCTL_WTHRESH_MASK 0x007F0000 | ||
414 | #define IXGB_TXDCTL_WTHRESH_SHIFT 16 | ||
415 | |||
416 | /* TSPMT Bit Masks */ | ||
417 | #define IXGB_TSPMT_TSMT_MASK 0x0000FFFF | ||
418 | #define IXGB_TSPMT_TSPBP_MASK 0xFFFF0000 | ||
419 | #define IXGB_TSPMT_TSPBP_SHIFT 16 | ||
420 | |||
421 | /* PAP Bit Masks */ | ||
422 | #define IXGB_PAP_TXPC_MASK 0x0000FFFF | ||
423 | #define IXGB_PAP_TXPV_MASK 0x000F0000 | ||
424 | #define IXGB_PAP_TXPV_10G 0x00000000 | ||
425 | #define IXGB_PAP_TXPV_1G 0x00010000 | ||
426 | #define IXGB_PAP_TXPV_2G 0x00020000 | ||
427 | #define IXGB_PAP_TXPV_3G 0x00030000 | ||
428 | #define IXGB_PAP_TXPV_4G 0x00040000 | ||
429 | #define IXGB_PAP_TXPV_5G 0x00050000 | ||
430 | #define IXGB_PAP_TXPV_6G 0x00060000 | ||
431 | #define IXGB_PAP_TXPV_7G 0x00070000 | ||
432 | #define IXGB_PAP_TXPV_8G 0x00080000 | ||
433 | #define IXGB_PAP_TXPV_9G 0x00090000 | ||
434 | #define IXGB_PAP_TXPV_WAN 0x000F0000 | ||
435 | |||
436 | /* PCSC1 Bit Masks */ | ||
437 | #define IXGB_PCSC1_LOOPBACK 0x00004000 | ||
438 | |||
439 | /* PCSC2 Bit Masks */ | ||
440 | #define IXGB_PCSC2_PCS_TYPE_MASK 0x00000003 | ||
441 | #define IXGB_PCSC2_PCS_TYPE_10GBX 0x00000001 | ||
442 | |||
443 | /* PCSS1 Bit Masks */ | ||
444 | #define IXGB_PCSS1_LOCAL_FAULT 0x00000080 | ||
445 | #define IXGB_PCSS1_RX_LINK_STATUS 0x00000004 | ||
446 | |||
447 | /* PCSS2 Bit Masks */ | ||
448 | #define IXGB_PCSS2_DEV_PRES_MASK 0x0000C000 | ||
449 | #define IXGB_PCSS2_DEV_PRES 0x00004000 | ||
450 | #define IXGB_PCSS2_TX_LF 0x00000800 | ||
451 | #define IXGB_PCSS2_RX_LF 0x00000400 | ||
452 | #define IXGB_PCSS2_10GBW 0x00000004 | ||
453 | #define IXGB_PCSS2_10GBX 0x00000002 | ||
454 | #define IXGB_PCSS2_10GBR 0x00000001 | ||
455 | |||
456 | /* XPCSS Bit Masks */ | ||
457 | #define IXGB_XPCSS_ALIGN_STATUS 0x00001000 | ||
458 | #define IXGB_XPCSS_PATTERN_TEST 0x00000800 | ||
459 | #define IXGB_XPCSS_LANE_3_SYNC 0x00000008 | ||
460 | #define IXGB_XPCSS_LANE_2_SYNC 0x00000004 | ||
461 | #define IXGB_XPCSS_LANE_1_SYNC 0x00000002 | ||
462 | #define IXGB_XPCSS_LANE_0_SYNC 0x00000001 | ||
463 | |||
464 | /* XPCSTC Bit Masks */ | ||
465 | #define IXGB_XPCSTC_BERT_TRIG 0x00200000 | ||
466 | #define IXGB_XPCSTC_BERT_SST 0x00100000 | ||
467 | #define IXGB_XPCSTC_BERT_PSZ_MASK 0x000C0000 | ||
468 | #define IXGB_XPCSTC_BERT_PSZ_SHIFT 17 | ||
469 | #define IXGB_XPCSTC_BERT_PSZ_INF 0x00000003 | ||
470 | #define IXGB_XPCSTC_BERT_PSZ_68 0x00000001 | ||
471 | #define IXGB_XPCSTC_BERT_PSZ_1028 0x00000000 | ||
472 | |||
473 | /* MSCA bit Masks */ | ||
474 | /* New Protocol Address */ | ||
475 | #define IXGB_MSCA_NP_ADDR_MASK 0x0000FFFF | ||
476 | #define IXGB_MSCA_NP_ADDR_SHIFT 0 | ||
477 | /* Either Device Type or Register Address,depending on ST_CODE */ | ||
478 | #define IXGB_MSCA_DEV_TYPE_MASK 0x001F0000 | ||
479 | #define IXGB_MSCA_DEV_TYPE_SHIFT 16 | ||
480 | #define IXGB_MSCA_PHY_ADDR_MASK 0x03E00000 | ||
481 | #define IXGB_MSCA_PHY_ADDR_SHIFT 21 | ||
482 | #define IXGB_MSCA_OP_CODE_MASK 0x0C000000 | ||
483 | /* OP_CODE == 00, Address cycle, New Protocol */ | ||
484 | /* OP_CODE == 01, Write operation */ | ||
485 | /* OP_CODE == 10, Read operation */ | ||
486 | /* OP_CODE == 11, Read, auto increment, New Protocol */ | ||
487 | #define IXGB_MSCA_ADDR_CYCLE 0x00000000 | ||
488 | #define IXGB_MSCA_WRITE 0x04000000 | ||
489 | #define IXGB_MSCA_READ 0x08000000 | ||
490 | #define IXGB_MSCA_READ_AUTOINC 0x0C000000 | ||
491 | #define IXGB_MSCA_OP_CODE_SHIFT 26 | ||
492 | #define IXGB_MSCA_ST_CODE_MASK 0x30000000 | ||
493 | /* ST_CODE == 00, New Protocol */ | ||
494 | /* ST_CODE == 01, Old Protocol */ | ||
495 | #define IXGB_MSCA_NEW_PROTOCOL 0x00000000 | ||
496 | #define IXGB_MSCA_OLD_PROTOCOL 0x10000000 | ||
497 | #define IXGB_MSCA_ST_CODE_SHIFT 28 | ||
498 | /* Initiate command, self-clearing when command completes */ | ||
499 | #define IXGB_MSCA_MDI_COMMAND 0x40000000 | ||
500 | /*MDI In Progress Enable. */ | ||
501 | #define IXGB_MSCA_MDI_IN_PROG_EN 0x80000000 | ||
502 | |||
503 | /* MSRWD bit masks */ | ||
504 | #define IXGB_MSRWD_WRITE_DATA_MASK 0x0000FFFF | ||
505 | #define IXGB_MSRWD_WRITE_DATA_SHIFT 0 | ||
506 | #define IXGB_MSRWD_READ_DATA_MASK 0xFFFF0000 | ||
507 | #define IXGB_MSRWD_READ_DATA_SHIFT 16 | ||
508 | |||
509 | /* Definitions for the optics devices on the MDIO bus. */ | ||
510 | #define IXGB_PHY_ADDRESS 0x0 /* Single PHY, multiple "Devices" */ | ||
511 | |||
512 | #define MDIO_PMA_PMD_XPAK_VENDOR_NAME 0x803A /* XPAK/XENPAK devices only */ | ||
513 | |||
514 | /* Vendor-specific MDIO registers */ | ||
515 | #define G6XXX_PMA_PMD_VS1 0xC001 /* Vendor-specific register */ | ||
516 | #define G6XXX_XGXS_XAUI_VS2 0x18 /* Vendor-specific register */ | ||
517 | |||
518 | #define G6XXX_PMA_PMD_VS1_PLL_RESET 0x80 | ||
519 | #define G6XXX_PMA_PMD_VS1_REMOVE_PLL_RESET 0x00 | ||
520 | #define G6XXX_XGXS_XAUI_VS2_INPUT_MASK 0x0F /* XAUI lanes synchronized */ | ||
521 | |||
522 | /* Layout of a single receive descriptor. The controller assumes that this | ||
523 | * structure is packed into 16 bytes, which is a safe assumption with most | ||
524 | * compilers. However, some compilers may insert padding between the fields, | ||
525 | * in which case the structure must be packed in some compiler-specific | ||
526 | * manner. */ | ||
527 | struct ixgb_rx_desc { | ||
528 | __le64 buff_addr; | ||
529 | __le16 length; | ||
530 | __le16 reserved; | ||
531 | u8 status; | ||
532 | u8 errors; | ||
533 | __le16 special; | ||
534 | }; | ||
535 | |||
536 | #define IXGB_RX_DESC_STATUS_DD 0x01 | ||
537 | #define IXGB_RX_DESC_STATUS_EOP 0x02 | ||
538 | #define IXGB_RX_DESC_STATUS_IXSM 0x04 | ||
539 | #define IXGB_RX_DESC_STATUS_VP 0x08 | ||
540 | #define IXGB_RX_DESC_STATUS_TCPCS 0x20 | ||
541 | #define IXGB_RX_DESC_STATUS_IPCS 0x40 | ||
542 | #define IXGB_RX_DESC_STATUS_PIF 0x80 | ||
543 | |||
544 | #define IXGB_RX_DESC_ERRORS_CE 0x01 | ||
545 | #define IXGB_RX_DESC_ERRORS_SE 0x02 | ||
546 | #define IXGB_RX_DESC_ERRORS_P 0x08 | ||
547 | #define IXGB_RX_DESC_ERRORS_TCPE 0x20 | ||
548 | #define IXGB_RX_DESC_ERRORS_IPE 0x40 | ||
549 | #define IXGB_RX_DESC_ERRORS_RXE 0x80 | ||
550 | |||
551 | #define IXGB_RX_DESC_SPECIAL_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ | ||
552 | #define IXGB_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ | ||
553 | #define IXGB_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority is in upper 3 of 16 */ | ||
554 | |||
555 | /* Layout of a single transmit descriptor. The controller assumes that this | ||
556 | * structure is packed into 16 bytes, which is a safe assumption with most | ||
557 | * compilers. However, some compilers may insert padding between the fields, | ||
558 | * in which case the structure must be packed in some compiler-specific | ||
559 | * manner. */ | ||
560 | struct ixgb_tx_desc { | ||
561 | __le64 buff_addr; | ||
562 | __le32 cmd_type_len; | ||
563 | u8 status; | ||
564 | u8 popts; | ||
565 | __le16 vlan; | ||
566 | }; | ||
567 | |||
568 | #define IXGB_TX_DESC_LENGTH_MASK 0x000FFFFF | ||
569 | #define IXGB_TX_DESC_TYPE_MASK 0x00F00000 | ||
570 | #define IXGB_TX_DESC_TYPE_SHIFT 20 | ||
571 | #define IXGB_TX_DESC_CMD_MASK 0xFF000000 | ||
572 | #define IXGB_TX_DESC_CMD_SHIFT 24 | ||
573 | #define IXGB_TX_DESC_CMD_EOP 0x01000000 | ||
574 | #define IXGB_TX_DESC_CMD_TSE 0x04000000 | ||
575 | #define IXGB_TX_DESC_CMD_RS 0x08000000 | ||
576 | #define IXGB_TX_DESC_CMD_VLE 0x40000000 | ||
577 | #define IXGB_TX_DESC_CMD_IDE 0x80000000 | ||
578 | |||
579 | #define IXGB_TX_DESC_TYPE 0x00100000 | ||
580 | |||
581 | #define IXGB_TX_DESC_STATUS_DD 0x01 | ||
582 | |||
583 | #define IXGB_TX_DESC_POPTS_IXSM 0x01 | ||
584 | #define IXGB_TX_DESC_POPTS_TXSM 0x02 | ||
585 | #define IXGB_TX_DESC_SPECIAL_PRI_SHIFT IXGB_RX_DESC_SPECIAL_PRI_SHIFT /* Priority is in upper 3 of 16 */ | ||
586 | |||
587 | struct ixgb_context_desc { | ||
588 | u8 ipcss; | ||
589 | u8 ipcso; | ||
590 | __le16 ipcse; | ||
591 | u8 tucss; | ||
592 | u8 tucso; | ||
593 | __le16 tucse; | ||
594 | __le32 cmd_type_len; | ||
595 | u8 status; | ||
596 | u8 hdr_len; | ||
597 | __le16 mss; | ||
598 | }; | ||
599 | |||
600 | #define IXGB_CONTEXT_DESC_CMD_TCP 0x01000000 | ||
601 | #define IXGB_CONTEXT_DESC_CMD_IP 0x02000000 | ||
602 | #define IXGB_CONTEXT_DESC_CMD_TSE 0x04000000 | ||
603 | #define IXGB_CONTEXT_DESC_CMD_RS 0x08000000 | ||
604 | #define IXGB_CONTEXT_DESC_CMD_IDE 0x80000000 | ||
605 | |||
606 | #define IXGB_CONTEXT_DESC_TYPE 0x00000000 | ||
607 | |||
608 | #define IXGB_CONTEXT_DESC_STATUS_DD 0x01 | ||
609 | |||
610 | /* Filters */ | ||
611 | #define IXGB_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */ | ||
612 | #define IXGB_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ | ||
613 | #define IXGB_RAR_ENTRIES 3 /* Number of entries in Rx Address array */ | ||
614 | |||
615 | #define IXGB_MEMORY_REGISTER_BASE_ADDRESS 0 | ||
616 | #define ENET_HEADER_SIZE 14 | ||
617 | #define ENET_FCS_LENGTH 4 | ||
618 | #define IXGB_MAX_NUM_MULTICAST_ADDRESSES 128 | ||
619 | #define IXGB_MIN_ENET_FRAME_SIZE_WITHOUT_FCS 60 | ||
620 | #define IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS 1514 | ||
621 | #define IXGB_MAX_JUMBO_FRAME_SIZE 0x3F00 | ||
622 | |||
623 | /* Phy Addresses */ | ||
624 | #define IXGB_OPTICAL_PHY_ADDR 0x0 /* Optical Module phy address */ | ||
625 | #define IXGB_XAUII_PHY_ADDR 0x1 /* Xauii transceiver phy address */ | ||
626 | #define IXGB_DIAG_PHY_ADDR 0x1F /* Diagnostic Device phy address */ | ||
627 | |||
628 | /* This structure takes a 64k flash and maps it for identification commands */ | ||
629 | struct ixgb_flash_buffer { | ||
630 | u8 manufacturer_id; | ||
631 | u8 device_id; | ||
632 | u8 filler1[0x2AA8]; | ||
633 | u8 cmd2; | ||
634 | u8 filler2[0x2AAA]; | ||
635 | u8 cmd1; | ||
636 | u8 filler3[0xAAAA]; | ||
637 | }; | ||
638 | |||
639 | /* Flow control parameters */ | ||
640 | struct ixgb_fc { | ||
641 | u32 high_water; /* Flow Control High-water */ | ||
642 | u32 low_water; /* Flow Control Low-water */ | ||
643 | u16 pause_time; /* Flow Control Pause timer */ | ||
644 | bool send_xon; /* Flow control send XON */ | ||
645 | ixgb_fc_type type; /* Type of flow control */ | ||
646 | }; | ||
647 | |||
648 | /* The historical defaults for the flow control values are given below. */ | ||
649 | #define FC_DEFAULT_HI_THRESH (0x8000) /* 32KB */ | ||
650 | #define FC_DEFAULT_LO_THRESH (0x4000) /* 16KB */ | ||
651 | #define FC_DEFAULT_TX_TIMER (0x100) /* ~130 us */ | ||
652 | |||
653 | /* Phy definitions */ | ||
654 | #define IXGB_MAX_PHY_REG_ADDRESS 0xFFFF | ||
655 | #define IXGB_MAX_PHY_ADDRESS 31 | ||
656 | #define IXGB_MAX_PHY_DEV_TYPE 31 | ||
657 | |||
658 | /* Bus parameters */ | ||
659 | struct ixgb_bus { | ||
660 | ixgb_bus_speed speed; | ||
661 | ixgb_bus_width width; | ||
662 | ixgb_bus_type type; | ||
663 | }; | ||
664 | |||
665 | struct ixgb_hw { | ||
666 | u8 __iomem *hw_addr;/* Base Address of the hardware */ | ||
667 | void *back; /* Pointer to OS-dependent struct */ | ||
668 | struct ixgb_fc fc; /* Flow control parameters */ | ||
669 | struct ixgb_bus bus; /* Bus parameters */ | ||
670 | u32 phy_id; /* Phy Identifier */ | ||
671 | u32 phy_addr; /* XGMII address of Phy */ | ||
672 | ixgb_mac_type mac_type; /* Identifier for MAC controller */ | ||
673 | ixgb_phy_type phy_type; /* Transceiver/phy identifier */ | ||
674 | u32 max_frame_size; /* Maximum frame size supported */ | ||
675 | u32 mc_filter_type; /* Multicast filter hash type */ | ||
676 | u32 num_mc_addrs; /* Number of current Multicast addrs */ | ||
677 | u8 curr_mac_addr[IXGB_ETH_LENGTH_OF_ADDRESS]; /* Individual address currently programmed in MAC */ | ||
678 | u32 num_tx_desc; /* Number of Transmit descriptors */ | ||
679 | u32 num_rx_desc; /* Number of Receive descriptors */ | ||
680 | u32 rx_buffer_size; /* Size of Receive buffer */ | ||
681 | bool link_up; /* true if link is valid */ | ||
682 | bool adapter_stopped; /* State of adapter */ | ||
683 | u16 device_id; /* device id from PCI configuration space */ | ||
684 | u16 vendor_id; /* vendor id from PCI configuration space */ | ||
685 | u8 revision_id; /* revision id from PCI configuration space */ | ||
686 | u16 subsystem_vendor_id; /* subsystem vendor id from PCI configuration space */ | ||
687 | u16 subsystem_id; /* subsystem id from PCI configuration space */ | ||
688 | u32 bar0; /* Base Address registers */ | ||
689 | u32 bar1; | ||
690 | u32 bar2; | ||
691 | u32 bar3; | ||
692 | u16 pci_cmd_word; /* PCI command register id from PCI configuration space */ | ||
693 | __le16 eeprom[IXGB_EEPROM_SIZE]; /* EEPROM contents read at init time */ | ||
694 | unsigned long io_base; /* Our I/O mapped location */ | ||
695 | u32 lastLFC; | ||
696 | u32 lastRFC; | ||
697 | }; | ||
698 | |||
699 | /* Statistics reported by the hardware */ | ||
700 | struct ixgb_hw_stats { | ||
701 | u64 tprl; | ||
702 | u64 tprh; | ||
703 | u64 gprcl; | ||
704 | u64 gprch; | ||
705 | u64 bprcl; | ||
706 | u64 bprch; | ||
707 | u64 mprcl; | ||
708 | u64 mprch; | ||
709 | u64 uprcl; | ||
710 | u64 uprch; | ||
711 | u64 vprcl; | ||
712 | u64 vprch; | ||
713 | u64 jprcl; | ||
714 | u64 jprch; | ||
715 | u64 gorcl; | ||
716 | u64 gorch; | ||
717 | u64 torl; | ||
718 | u64 torh; | ||
719 | u64 rnbc; | ||
720 | u64 ruc; | ||
721 | u64 roc; | ||
722 | u64 rlec; | ||
723 | u64 crcerrs; | ||
724 | u64 icbc; | ||
725 | u64 ecbc; | ||
726 | u64 mpc; | ||
727 | u64 tptl; | ||
728 | u64 tpth; | ||
729 | u64 gptcl; | ||
730 | u64 gptch; | ||
731 | u64 bptcl; | ||
732 | u64 bptch; | ||
733 | u64 mptcl; | ||
734 | u64 mptch; | ||
735 | u64 uptcl; | ||
736 | u64 uptch; | ||
737 | u64 vptcl; | ||
738 | u64 vptch; | ||
739 | u64 jptcl; | ||
740 | u64 jptch; | ||
741 | u64 gotcl; | ||
742 | u64 gotch; | ||
743 | u64 totl; | ||
744 | u64 toth; | ||
745 | u64 dc; | ||
746 | u64 plt64c; | ||
747 | u64 tsctc; | ||
748 | u64 tsctfc; | ||
749 | u64 ibic; | ||
750 | u64 rfc; | ||
751 | u64 lfc; | ||
752 | u64 pfrc; | ||
753 | u64 pftc; | ||
754 | u64 mcfrc; | ||
755 | u64 mcftc; | ||
756 | u64 xonrxc; | ||
757 | u64 xontxc; | ||
758 | u64 xoffrxc; | ||
759 | u64 xofftxc; | ||
760 | u64 rjc; | ||
761 | }; | ||
762 | |||
763 | /* Function Prototypes */ | ||
764 | extern bool ixgb_adapter_stop(struct ixgb_hw *hw); | ||
765 | extern bool ixgb_init_hw(struct ixgb_hw *hw); | ||
766 | extern bool ixgb_adapter_start(struct ixgb_hw *hw); | ||
767 | extern void ixgb_check_for_link(struct ixgb_hw *hw); | ||
768 | extern bool ixgb_check_for_bad_link(struct ixgb_hw *hw); | ||
769 | |||
770 | extern void ixgb_rar_set(struct ixgb_hw *hw, | ||
771 | u8 *addr, | ||
772 | u32 index); | ||
773 | |||
774 | |||
775 | /* Filters (multicast, vlan, receive) */ | ||
776 | extern void ixgb_mc_addr_list_update(struct ixgb_hw *hw, | ||
777 | u8 *mc_addr_list, | ||
778 | u32 mc_addr_count, | ||
779 | u32 pad); | ||
780 | |||
781 | /* Vfta functions */ | ||
782 | extern void ixgb_write_vfta(struct ixgb_hw *hw, | ||
783 | u32 offset, | ||
784 | u32 value); | ||
785 | |||
786 | /* Access functions to eeprom data */ | ||
787 | void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, u8 *mac_addr); | ||
788 | u32 ixgb_get_ee_pba_number(struct ixgb_hw *hw); | ||
789 | u16 ixgb_get_ee_device_id(struct ixgb_hw *hw); | ||
790 | bool ixgb_get_eeprom_data(struct ixgb_hw *hw); | ||
791 | __le16 ixgb_get_eeprom_word(struct ixgb_hw *hw, u16 index); | ||
792 | |||
793 | /* Everything else */ | ||
794 | void ixgb_led_on(struct ixgb_hw *hw); | ||
795 | void ixgb_led_off(struct ixgb_hw *hw); | ||
796 | void ixgb_write_pci_cfg(struct ixgb_hw *hw, | ||
797 | u32 reg, | ||
798 | u16 * value); | ||
799 | |||
800 | |||
801 | #endif /* _IXGB_HW_H_ */ | ||
diff --git a/drivers/net/ixgb/ixgb_ids.h b/drivers/net/ixgb/ixgb_ids.h new file mode 100644 index 00000000000..2a58847f46e --- /dev/null +++ b/drivers/net/ixgb/ixgb_ids.h | |||
@@ -0,0 +1,53 @@ | |||
1 | /******************************************************************************* | ||
2 | |||
3 | Intel PRO/10GbE Linux driver | ||
4 | Copyright(c) 1999 - 2008 Intel Corporation. | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Contact Information: | ||
23 | Linux NICS <linux.nics@intel.com> | ||
24 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
25 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
26 | |||
27 | *******************************************************************************/ | ||
28 | |||
29 | #ifndef _IXGB_IDS_H_ | ||
30 | #define _IXGB_IDS_H_ | ||
31 | |||
32 | /********************************************************************** | ||
33 | ** The Device and Vendor IDs for 10 Gigabit MACs | ||
34 | **********************************************************************/ | ||
35 | |||
36 | #define INTEL_VENDOR_ID 0x8086 | ||
37 | #define INTEL_SUBVENDOR_ID 0x8086 | ||
38 | #define SUN_VENDOR_ID 0x108E | ||
39 | #define SUN_SUBVENDOR_ID 0x108E | ||
40 | |||
41 | #define IXGB_DEVICE_ID_82597EX 0x1048 | ||
42 | #define IXGB_DEVICE_ID_82597EX_SR 0x1A48 | ||
43 | #define IXGB_DEVICE_ID_82597EX_LR 0x1B48 | ||
44 | #define IXGB_SUBDEVICE_ID_A11F 0xA11F | ||
45 | #define IXGB_SUBDEVICE_ID_A01F 0xA01F | ||
46 | |||
47 | #define IXGB_DEVICE_ID_82597EX_CX4 0x109E | ||
48 | #define IXGB_SUBDEVICE_ID_A00C 0xA00C | ||
49 | #define IXGB_SUBDEVICE_ID_A01C 0xA01C | ||
50 | #define IXGB_SUBDEVICE_ID_7036 0x7036 | ||
51 | |||
52 | #endif /* #ifndef _IXGB_IDS_H_ */ | ||
53 | /* End of File */ | ||
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c new file mode 100644 index 00000000000..6a130eb51cf --- /dev/null +++ b/drivers/net/ixgb/ixgb_main.c | |||
@@ -0,0 +1,2332 @@ | |||
1 | /******************************************************************************* | ||
2 | |||
3 | Intel PRO/10GbE Linux driver | ||
4 | Copyright(c) 1999 - 2008 Intel Corporation. | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Contact Information: | ||
23 | Linux NICS <linux.nics@intel.com> | ||
24 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
25 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
26 | |||
27 | *******************************************************************************/ | ||
28 | |||
29 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
30 | |||
31 | #include <linux/prefetch.h> | ||
32 | #include "ixgb.h" | ||
33 | |||
34 | char ixgb_driver_name[] = "ixgb"; | ||
35 | static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver"; | ||
36 | |||
37 | #define DRIVERNAPI "-NAPI" | ||
38 | #define DRV_VERSION "1.0.135-k2" DRIVERNAPI | ||
39 | const char ixgb_driver_version[] = DRV_VERSION; | ||
40 | static const char ixgb_copyright[] = "Copyright (c) 1999-2008 Intel Corporation."; | ||
41 | |||
42 | #define IXGB_CB_LENGTH 256 | ||
43 | static unsigned int copybreak __read_mostly = IXGB_CB_LENGTH; | ||
44 | module_param(copybreak, uint, 0644); | ||
45 | MODULE_PARM_DESC(copybreak, | ||
46 | "Maximum size of packet that is copied to a new buffer on receive"); | ||
47 | |||
48 | /* ixgb_pci_tbl - PCI Device ID Table | ||
49 | * | ||
50 | * Wildcard entries (PCI_ANY_ID) should come last | ||
51 | * Last entry must be all 0s | ||
52 | * | ||
53 | * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, | ||
54 | * Class, Class Mask, private data (not used) } | ||
55 | */ | ||
56 | static DEFINE_PCI_DEVICE_TABLE(ixgb_pci_tbl) = { | ||
57 | {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX, | ||
58 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, | ||
59 | {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_CX4, | ||
60 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, | ||
61 | {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR, | ||
62 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, | ||
63 | {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_LR, | ||
64 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, | ||
65 | |||
66 | /* required last entry */ | ||
67 | {0,} | ||
68 | }; | ||
69 | |||
70 | MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl); | ||
71 | |||
72 | /* Local Function Prototypes */ | ||
73 | static int ixgb_init_module(void); | ||
74 | static void ixgb_exit_module(void); | ||
75 | static int ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent); | ||
76 | static void __devexit ixgb_remove(struct pci_dev *pdev); | ||
77 | static int ixgb_sw_init(struct ixgb_adapter *adapter); | ||
78 | static int ixgb_open(struct net_device *netdev); | ||
79 | static int ixgb_close(struct net_device *netdev); | ||
80 | static void ixgb_configure_tx(struct ixgb_adapter *adapter); | ||
81 | static void ixgb_configure_rx(struct ixgb_adapter *adapter); | ||
82 | static void ixgb_setup_rctl(struct ixgb_adapter *adapter); | ||
83 | static void ixgb_clean_tx_ring(struct ixgb_adapter *adapter); | ||
84 | static void ixgb_clean_rx_ring(struct ixgb_adapter *adapter); | ||
85 | static void ixgb_set_multi(struct net_device *netdev); | ||
86 | static void ixgb_watchdog(unsigned long data); | ||
87 | static netdev_tx_t ixgb_xmit_frame(struct sk_buff *skb, | ||
88 | struct net_device *netdev); | ||
89 | static struct net_device_stats *ixgb_get_stats(struct net_device *netdev); | ||
90 | static int ixgb_change_mtu(struct net_device *netdev, int new_mtu); | ||
91 | static int ixgb_set_mac(struct net_device *netdev, void *p); | ||
92 | static irqreturn_t ixgb_intr(int irq, void *data); | ||
93 | static bool ixgb_clean_tx_irq(struct ixgb_adapter *adapter); | ||
94 | |||
95 | static int ixgb_clean(struct napi_struct *, int); | ||
96 | static bool ixgb_clean_rx_irq(struct ixgb_adapter *, int *, int); | ||
97 | static void ixgb_alloc_rx_buffers(struct ixgb_adapter *, int); | ||
98 | |||
99 | static void ixgb_tx_timeout(struct net_device *dev); | ||
100 | static void ixgb_tx_timeout_task(struct work_struct *work); | ||
101 | |||
102 | static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter); | ||
103 | static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter); | ||
104 | static void ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid); | ||
105 | static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); | ||
106 | static void ixgb_restore_vlan(struct ixgb_adapter *adapter); | ||
107 | |||
108 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
109 | /* for netdump / net console */ | ||
110 | static void ixgb_netpoll(struct net_device *dev); | ||
111 | #endif | ||
112 | |||
113 | static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev, | ||
114 | enum pci_channel_state state); | ||
115 | static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev); | ||
116 | static void ixgb_io_resume (struct pci_dev *pdev); | ||
117 | |||
118 | static struct pci_error_handlers ixgb_err_handler = { | ||
119 | .error_detected = ixgb_io_error_detected, | ||
120 | .slot_reset = ixgb_io_slot_reset, | ||
121 | .resume = ixgb_io_resume, | ||
122 | }; | ||
123 | |||
124 | static struct pci_driver ixgb_driver = { | ||
125 | .name = ixgb_driver_name, | ||
126 | .id_table = ixgb_pci_tbl, | ||
127 | .probe = ixgb_probe, | ||
128 | .remove = __devexit_p(ixgb_remove), | ||
129 | .err_handler = &ixgb_err_handler | ||
130 | }; | ||
131 | |||
132 | MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); | ||
133 | MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver"); | ||
134 | MODULE_LICENSE("GPL"); | ||
135 | MODULE_VERSION(DRV_VERSION); | ||
136 | |||
137 | #define DEFAULT_DEBUG_LEVEL_SHIFT 3 | ||
138 | static int debug = DEFAULT_DEBUG_LEVEL_SHIFT; | ||
139 | module_param(debug, int, 0); | ||
140 | MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); | ||
141 | |||
142 | /** | ||
143 | * ixgb_init_module - Driver Registration Routine | ||
144 | * | ||
145 | * ixgb_init_module is the first routine called when the driver is | ||
146 | * loaded. All it does is register with the PCI subsystem. | ||
147 | **/ | ||
148 | |||
149 | static int __init | ||
150 | ixgb_init_module(void) | ||
151 | { | ||
152 | pr_info("%s - version %s\n", ixgb_driver_string, ixgb_driver_version); | ||
153 | pr_info("%s\n", ixgb_copyright); | ||
154 | |||
155 | return pci_register_driver(&ixgb_driver); | ||
156 | } | ||
157 | |||
158 | module_init(ixgb_init_module); | ||
159 | |||
160 | /** | ||
161 | * ixgb_exit_module - Driver Exit Cleanup Routine | ||
162 | * | ||
163 | * ixgb_exit_module is called just before the driver is removed | ||
164 | * from memory. | ||
165 | **/ | ||
166 | |||
167 | static void __exit | ||
168 | ixgb_exit_module(void) | ||
169 | { | ||
170 | pci_unregister_driver(&ixgb_driver); | ||
171 | } | ||
172 | |||
173 | module_exit(ixgb_exit_module); | ||
174 | |||
175 | /** | ||
176 | * ixgb_irq_disable - Mask off interrupt generation on the NIC | ||
177 | * @adapter: board private structure | ||
178 | **/ | ||
179 | |||
180 | static void | ||
181 | ixgb_irq_disable(struct ixgb_adapter *adapter) | ||
182 | { | ||
183 | IXGB_WRITE_REG(&adapter->hw, IMC, ~0); | ||
184 | IXGB_WRITE_FLUSH(&adapter->hw); | ||
185 | synchronize_irq(adapter->pdev->irq); | ||
186 | } | ||
187 | |||
188 | /** | ||
189 | * ixgb_irq_enable - Enable default interrupt generation settings | ||
190 | * @adapter: board private structure | ||
191 | **/ | ||
192 | |||
193 | static void | ||
194 | ixgb_irq_enable(struct ixgb_adapter *adapter) | ||
195 | { | ||
196 | u32 val = IXGB_INT_RXT0 | IXGB_INT_RXDMT0 | | ||
197 | IXGB_INT_TXDW | IXGB_INT_LSC; | ||
198 | if (adapter->hw.subsystem_vendor_id == SUN_SUBVENDOR_ID) | ||
199 | val |= IXGB_INT_GPI0; | ||
200 | IXGB_WRITE_REG(&adapter->hw, IMS, val); | ||
201 | IXGB_WRITE_FLUSH(&adapter->hw); | ||
202 | } | ||
203 | |||
204 | int | ||
205 | ixgb_up(struct ixgb_adapter *adapter) | ||
206 | { | ||
207 | struct net_device *netdev = adapter->netdev; | ||
208 | int err, irq_flags = IRQF_SHARED; | ||
209 | int max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; | ||
210 | struct ixgb_hw *hw = &adapter->hw; | ||
211 | |||
212 | /* hardware has been reset, we need to reload some things */ | ||
213 | |||
214 | ixgb_rar_set(hw, netdev->dev_addr, 0); | ||
215 | ixgb_set_multi(netdev); | ||
216 | |||
217 | ixgb_restore_vlan(adapter); | ||
218 | |||
219 | ixgb_configure_tx(adapter); | ||
220 | ixgb_setup_rctl(adapter); | ||
221 | ixgb_configure_rx(adapter); | ||
222 | ixgb_alloc_rx_buffers(adapter, IXGB_DESC_UNUSED(&adapter->rx_ring)); | ||
223 | |||
224 | /* disable interrupts and get the hardware into a known state */ | ||
225 | IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff); | ||
226 | |||
227 | /* only enable MSI if bus is in PCI-X mode */ | ||
228 | if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_PCIX_MODE) { | ||
229 | err = pci_enable_msi(adapter->pdev); | ||
230 | if (!err) { | ||
231 | adapter->have_msi = 1; | ||
232 | irq_flags = 0; | ||
233 | } | ||
234 | /* proceed to try to request regular interrupt */ | ||
235 | } | ||
236 | |||
237 | err = request_irq(adapter->pdev->irq, ixgb_intr, irq_flags, | ||
238 | netdev->name, netdev); | ||
239 | if (err) { | ||
240 | if (adapter->have_msi) | ||
241 | pci_disable_msi(adapter->pdev); | ||
242 | netif_err(adapter, probe, adapter->netdev, | ||
243 | "Unable to allocate interrupt Error: %d\n", err); | ||
244 | return err; | ||
245 | } | ||
246 | |||
247 | if ((hw->max_frame_size != max_frame) || | ||
248 | (hw->max_frame_size != | ||
249 | (IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) { | ||
250 | |||
251 | hw->max_frame_size = max_frame; | ||
252 | |||
253 | IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT); | ||
254 | |||
255 | if (hw->max_frame_size > | ||
256 | IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) { | ||
257 | u32 ctrl0 = IXGB_READ_REG(hw, CTRL0); | ||
258 | |||
259 | if (!(ctrl0 & IXGB_CTRL0_JFE)) { | ||
260 | ctrl0 |= IXGB_CTRL0_JFE; | ||
261 | IXGB_WRITE_REG(hw, CTRL0, ctrl0); | ||
262 | } | ||
263 | } | ||
264 | } | ||
265 | |||
266 | clear_bit(__IXGB_DOWN, &adapter->flags); | ||
267 | |||
268 | napi_enable(&adapter->napi); | ||
269 | ixgb_irq_enable(adapter); | ||
270 | |||
271 | netif_wake_queue(netdev); | ||
272 | |||
273 | mod_timer(&adapter->watchdog_timer, jiffies); | ||
274 | |||
275 | return 0; | ||
276 | } | ||
277 | |||
278 | void | ||
279 | ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog) | ||
280 | { | ||
281 | struct net_device *netdev = adapter->netdev; | ||
282 | |||
283 | /* prevent the interrupt handler from restarting watchdog */ | ||
284 | set_bit(__IXGB_DOWN, &adapter->flags); | ||
285 | |||
286 | napi_disable(&adapter->napi); | ||
287 | /* waiting for NAPI to complete can re-enable interrupts */ | ||
288 | ixgb_irq_disable(adapter); | ||
289 | free_irq(adapter->pdev->irq, netdev); | ||
290 | |||
291 | if (adapter->have_msi) | ||
292 | pci_disable_msi(adapter->pdev); | ||
293 | |||
294 | if (kill_watchdog) | ||
295 | del_timer_sync(&adapter->watchdog_timer); | ||
296 | |||
297 | adapter->link_speed = 0; | ||
298 | adapter->link_duplex = 0; | ||
299 | netif_carrier_off(netdev); | ||
300 | netif_stop_queue(netdev); | ||
301 | |||
302 | ixgb_reset(adapter); | ||
303 | ixgb_clean_tx_ring(adapter); | ||
304 | ixgb_clean_rx_ring(adapter); | ||
305 | } | ||
306 | |||
307 | void | ||
308 | ixgb_reset(struct ixgb_adapter *adapter) | ||
309 | { | ||
310 | struct ixgb_hw *hw = &adapter->hw; | ||
311 | |||
312 | ixgb_adapter_stop(hw); | ||
313 | if (!ixgb_init_hw(hw)) | ||
314 | netif_err(adapter, probe, adapter->netdev, "ixgb_init_hw failed\n"); | ||
315 | |||
316 | /* restore frame size information */ | ||
317 | IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT); | ||
318 | if (hw->max_frame_size > | ||
319 | IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) { | ||
320 | u32 ctrl0 = IXGB_READ_REG(hw, CTRL0); | ||
321 | if (!(ctrl0 & IXGB_CTRL0_JFE)) { | ||
322 | ctrl0 |= IXGB_CTRL0_JFE; | ||
323 | IXGB_WRITE_REG(hw, CTRL0, ctrl0); | ||
324 | } | ||
325 | } | ||
326 | } | ||
327 | |||
328 | static const struct net_device_ops ixgb_netdev_ops = { | ||
329 | .ndo_open = ixgb_open, | ||
330 | .ndo_stop = ixgb_close, | ||
331 | .ndo_start_xmit = ixgb_xmit_frame, | ||
332 | .ndo_get_stats = ixgb_get_stats, | ||
333 | .ndo_set_multicast_list = ixgb_set_multi, | ||
334 | .ndo_validate_addr = eth_validate_addr, | ||
335 | .ndo_set_mac_address = ixgb_set_mac, | ||
336 | .ndo_change_mtu = ixgb_change_mtu, | ||
337 | .ndo_tx_timeout = ixgb_tx_timeout, | ||
338 | .ndo_vlan_rx_add_vid = ixgb_vlan_rx_add_vid, | ||
339 | .ndo_vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid, | ||
340 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
341 | .ndo_poll_controller = ixgb_netpoll, | ||
342 | #endif | ||
343 | }; | ||
344 | |||
345 | /** | ||
346 | * ixgb_probe - Device Initialization Routine | ||
347 | * @pdev: PCI device information struct | ||
348 | * @ent: entry in ixgb_pci_tbl | ||
349 | * | ||
350 | * Returns 0 on success, negative on failure | ||
351 | * | ||
352 | * ixgb_probe initializes an adapter identified by a pci_dev structure. | ||
353 | * The OS initialization, configuring of the adapter private structure, | ||
354 | * and a hardware reset occur. | ||
355 | **/ | ||
356 | |||
357 | static int __devinit | ||
358 | ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | ||
359 | { | ||
360 | struct net_device *netdev = NULL; | ||
361 | struct ixgb_adapter *adapter; | ||
362 | static int cards_found = 0; | ||
363 | int pci_using_dac; | ||
364 | int i; | ||
365 | int err; | ||
366 | |||
367 | err = pci_enable_device(pdev); | ||
368 | if (err) | ||
369 | return err; | ||
370 | |||
371 | pci_using_dac = 0; | ||
372 | err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); | ||
373 | if (!err) { | ||
374 | err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); | ||
375 | if (!err) | ||
376 | pci_using_dac = 1; | ||
377 | } else { | ||
378 | err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); | ||
379 | if (err) { | ||
380 | err = dma_set_coherent_mask(&pdev->dev, | ||
381 | DMA_BIT_MASK(32)); | ||
382 | if (err) { | ||
383 | pr_err("No usable DMA configuration, aborting\n"); | ||
384 | goto err_dma_mask; | ||
385 | } | ||
386 | } | ||
387 | } | ||
388 | |||
389 | err = pci_request_regions(pdev, ixgb_driver_name); | ||
390 | if (err) | ||
391 | goto err_request_regions; | ||
392 | |||
393 | pci_set_master(pdev); | ||
394 | |||
395 | netdev = alloc_etherdev(sizeof(struct ixgb_adapter)); | ||
396 | if (!netdev) { | ||
397 | err = -ENOMEM; | ||
398 | goto err_alloc_etherdev; | ||
399 | } | ||
400 | |||
401 | SET_NETDEV_DEV(netdev, &pdev->dev); | ||
402 | |||
403 | pci_set_drvdata(pdev, netdev); | ||
404 | adapter = netdev_priv(netdev); | ||
405 | adapter->netdev = netdev; | ||
406 | adapter->pdev = pdev; | ||
407 | adapter->hw.back = adapter; | ||
408 | adapter->msg_enable = netif_msg_init(debug, DEFAULT_DEBUG_LEVEL_SHIFT); | ||
409 | |||
410 | adapter->hw.hw_addr = pci_ioremap_bar(pdev, BAR_0); | ||
411 | if (!adapter->hw.hw_addr) { | ||
412 | err = -EIO; | ||
413 | goto err_ioremap; | ||
414 | } | ||
415 | |||
416 | for (i = BAR_1; i <= BAR_5; i++) { | ||
417 | if (pci_resource_len(pdev, i) == 0) | ||
418 | continue; | ||
419 | if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { | ||
420 | adapter->hw.io_base = pci_resource_start(pdev, i); | ||
421 | break; | ||
422 | } | ||
423 | } | ||
424 | |||
425 | netdev->netdev_ops = &ixgb_netdev_ops; | ||
426 | ixgb_set_ethtool_ops(netdev); | ||
427 | netdev->watchdog_timeo = 5 * HZ; | ||
428 | netif_napi_add(netdev, &adapter->napi, ixgb_clean, 64); | ||
429 | |||
430 | strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); | ||
431 | |||
432 | adapter->bd_number = cards_found; | ||
433 | adapter->link_speed = 0; | ||
434 | adapter->link_duplex = 0; | ||
435 | |||
436 | /* setup the private structure */ | ||
437 | |||
438 | err = ixgb_sw_init(adapter); | ||
439 | if (err) | ||
440 | goto err_sw_init; | ||
441 | |||
442 | netdev->features = NETIF_F_SG | | ||
443 | NETIF_F_HW_CSUM | | ||
444 | NETIF_F_HW_VLAN_TX | | ||
445 | NETIF_F_HW_VLAN_RX | | ||
446 | NETIF_F_HW_VLAN_FILTER; | ||
447 | netdev->features |= NETIF_F_TSO; | ||
448 | |||
449 | if (pci_using_dac) { | ||
450 | netdev->features |= NETIF_F_HIGHDMA; | ||
451 | netdev->vlan_features |= NETIF_F_HIGHDMA; | ||
452 | } | ||
453 | |||
454 | /* make sure the EEPROM is good */ | ||
455 | |||
456 | if (!ixgb_validate_eeprom_checksum(&adapter->hw)) { | ||
457 | netif_err(adapter, probe, adapter->netdev, | ||
458 | "The EEPROM Checksum Is Not Valid\n"); | ||
459 | err = -EIO; | ||
460 | goto err_eeprom; | ||
461 | } | ||
462 | |||
463 | ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr); | ||
464 | memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); | ||
465 | |||
466 | if (!is_valid_ether_addr(netdev->perm_addr)) { | ||
467 | netif_err(adapter, probe, adapter->netdev, "Invalid MAC Address\n"); | ||
468 | err = -EIO; | ||
469 | goto err_eeprom; | ||
470 | } | ||
471 | |||
472 | adapter->part_num = ixgb_get_ee_pba_number(&adapter->hw); | ||
473 | |||
474 | init_timer(&adapter->watchdog_timer); | ||
475 | adapter->watchdog_timer.function = ixgb_watchdog; | ||
476 | adapter->watchdog_timer.data = (unsigned long)adapter; | ||
477 | |||
478 | INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task); | ||
479 | |||
480 | strcpy(netdev->name, "eth%d"); | ||
481 | err = register_netdev(netdev); | ||
482 | if (err) | ||
483 | goto err_register; | ||
484 | |||
485 | /* carrier off reporting is important to ethtool even BEFORE open */ | ||
486 | netif_carrier_off(netdev); | ||
487 | |||
488 | netif_info(adapter, probe, adapter->netdev, | ||
489 | "Intel(R) PRO/10GbE Network Connection\n"); | ||
490 | ixgb_check_options(adapter); | ||
491 | /* reset the hardware with the new settings */ | ||
492 | |||
493 | ixgb_reset(adapter); | ||
494 | |||
495 | cards_found++; | ||
496 | return 0; | ||
497 | |||
498 | err_register: | ||
499 | err_sw_init: | ||
500 | err_eeprom: | ||
501 | iounmap(adapter->hw.hw_addr); | ||
502 | err_ioremap: | ||
503 | free_netdev(netdev); | ||
504 | err_alloc_etherdev: | ||
505 | pci_release_regions(pdev); | ||
506 | err_request_regions: | ||
507 | err_dma_mask: | ||
508 | pci_disable_device(pdev); | ||
509 | return err; | ||
510 | } | ||
511 | |||
512 | /** | ||
513 | * ixgb_remove - Device Removal Routine | ||
514 | * @pdev: PCI device information struct | ||
515 | * | ||
516 | * ixgb_remove is called by the PCI subsystem to alert the driver | ||
517 | * that it should release a PCI device. The could be caused by a | ||
518 | * Hot-Plug event, or because the driver is going to be removed from | ||
519 | * memory. | ||
520 | **/ | ||
521 | |||
522 | static void __devexit | ||
523 | ixgb_remove(struct pci_dev *pdev) | ||
524 | { | ||
525 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
526 | struct ixgb_adapter *adapter = netdev_priv(netdev); | ||
527 | |||
528 | cancel_work_sync(&adapter->tx_timeout_task); | ||
529 | |||
530 | unregister_netdev(netdev); | ||
531 | |||
532 | iounmap(adapter->hw.hw_addr); | ||
533 | pci_release_regions(pdev); | ||
534 | |||
535 | free_netdev(netdev); | ||
536 | pci_disable_device(pdev); | ||
537 | } | ||
538 | |||
539 | /** | ||
540 | * ixgb_sw_init - Initialize general software structures (struct ixgb_adapter) | ||
541 | * @adapter: board private structure to initialize | ||
542 | * | ||
543 | * ixgb_sw_init initializes the Adapter private data structure. | ||
544 | * Fields are initialized based on PCI device information and | ||
545 | * OS network device settings (MTU size). | ||
546 | **/ | ||
547 | |||
548 | static int __devinit | ||
549 | ixgb_sw_init(struct ixgb_adapter *adapter) | ||
550 | { | ||
551 | struct ixgb_hw *hw = &adapter->hw; | ||
552 | struct net_device *netdev = adapter->netdev; | ||
553 | struct pci_dev *pdev = adapter->pdev; | ||
554 | |||
555 | /* PCI config space info */ | ||
556 | |||
557 | hw->vendor_id = pdev->vendor; | ||
558 | hw->device_id = pdev->device; | ||
559 | hw->subsystem_vendor_id = pdev->subsystem_vendor; | ||
560 | hw->subsystem_id = pdev->subsystem_device; | ||
561 | |||
562 | hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; | ||
563 | adapter->rx_buffer_len = hw->max_frame_size + 8; /* + 8 for errata */ | ||
564 | |||
565 | if ((hw->device_id == IXGB_DEVICE_ID_82597EX) || | ||
566 | (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4) || | ||
567 | (hw->device_id == IXGB_DEVICE_ID_82597EX_LR) || | ||
568 | (hw->device_id == IXGB_DEVICE_ID_82597EX_SR)) | ||
569 | hw->mac_type = ixgb_82597; | ||
570 | else { | ||
571 | /* should never have loaded on this device */ | ||
572 | netif_err(adapter, probe, adapter->netdev, "unsupported device id\n"); | ||
573 | } | ||
574 | |||
575 | /* enable flow control to be programmed */ | ||
576 | hw->fc.send_xon = 1; | ||
577 | |||
578 | set_bit(__IXGB_DOWN, &adapter->flags); | ||
579 | return 0; | ||
580 | } | ||
581 | |||
582 | /** | ||
583 | * ixgb_open - Called when a network interface is made active | ||
584 | * @netdev: network interface device structure | ||
585 | * | ||
586 | * Returns 0 on success, negative value on failure | ||
587 | * | ||
588 | * The open entry point is called when a network interface is made | ||
589 | * active by the system (IFF_UP). At this point all resources needed | ||
590 | * for transmit and receive operations are allocated, the interrupt | ||
591 | * handler is registered with the OS, the watchdog timer is started, | ||
592 | * and the stack is notified that the interface is ready. | ||
593 | **/ | ||
594 | |||
595 | static int | ||
596 | ixgb_open(struct net_device *netdev) | ||
597 | { | ||
598 | struct ixgb_adapter *adapter = netdev_priv(netdev); | ||
599 | int err; | ||
600 | |||
601 | /* allocate transmit descriptors */ | ||
602 | err = ixgb_setup_tx_resources(adapter); | ||
603 | if (err) | ||
604 | goto err_setup_tx; | ||
605 | |||
606 | netif_carrier_off(netdev); | ||
607 | |||
608 | /* allocate receive descriptors */ | ||
609 | |||
610 | err = ixgb_setup_rx_resources(adapter); | ||
611 | if (err) | ||
612 | goto err_setup_rx; | ||
613 | |||
614 | err = ixgb_up(adapter); | ||
615 | if (err) | ||
616 | goto err_up; | ||
617 | |||
618 | netif_start_queue(netdev); | ||
619 | |||
620 | return 0; | ||
621 | |||
622 | err_up: | ||
623 | ixgb_free_rx_resources(adapter); | ||
624 | err_setup_rx: | ||
625 | ixgb_free_tx_resources(adapter); | ||
626 | err_setup_tx: | ||
627 | ixgb_reset(adapter); | ||
628 | |||
629 | return err; | ||
630 | } | ||
631 | |||
632 | /** | ||
633 | * ixgb_close - Disables a network interface | ||
634 | * @netdev: network interface device structure | ||
635 | * | ||
636 | * Returns 0, this is not allowed to fail | ||
637 | * | ||
638 | * The close entry point is called when an interface is de-activated | ||
639 | * by the OS. The hardware is still under the drivers control, but | ||
640 | * needs to be disabled. A global MAC reset is issued to stop the | ||
641 | * hardware, and all transmit and receive resources are freed. | ||
642 | **/ | ||
643 | |||
644 | static int | ||
645 | ixgb_close(struct net_device *netdev) | ||
646 | { | ||
647 | struct ixgb_adapter *adapter = netdev_priv(netdev); | ||
648 | |||
649 | ixgb_down(adapter, true); | ||
650 | |||
651 | ixgb_free_tx_resources(adapter); | ||
652 | ixgb_free_rx_resources(adapter); | ||
653 | |||
654 | return 0; | ||
655 | } | ||
656 | |||
657 | /** | ||
658 | * ixgb_setup_tx_resources - allocate Tx resources (Descriptors) | ||
659 | * @adapter: board private structure | ||
660 | * | ||
661 | * Return 0 on success, negative on failure | ||
662 | **/ | ||
663 | |||
664 | int | ||
665 | ixgb_setup_tx_resources(struct ixgb_adapter *adapter) | ||
666 | { | ||
667 | struct ixgb_desc_ring *txdr = &adapter->tx_ring; | ||
668 | struct pci_dev *pdev = adapter->pdev; | ||
669 | int size; | ||
670 | |||
671 | size = sizeof(struct ixgb_buffer) * txdr->count; | ||
672 | txdr->buffer_info = vzalloc(size); | ||
673 | if (!txdr->buffer_info) { | ||
674 | netif_err(adapter, probe, adapter->netdev, | ||
675 | "Unable to allocate transmit descriptor ring memory\n"); | ||
676 | return -ENOMEM; | ||
677 | } | ||
678 | |||
679 | /* round up to nearest 4K */ | ||
680 | |||
681 | txdr->size = txdr->count * sizeof(struct ixgb_tx_desc); | ||
682 | txdr->size = ALIGN(txdr->size, 4096); | ||
683 | |||
684 | txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, | ||
685 | GFP_KERNEL); | ||
686 | if (!txdr->desc) { | ||
687 | vfree(txdr->buffer_info); | ||
688 | netif_err(adapter, probe, adapter->netdev, | ||
689 | "Unable to allocate transmit descriptor memory\n"); | ||
690 | return -ENOMEM; | ||
691 | } | ||
692 | memset(txdr->desc, 0, txdr->size); | ||
693 | |||
694 | txdr->next_to_use = 0; | ||
695 | txdr->next_to_clean = 0; | ||
696 | |||
697 | return 0; | ||
698 | } | ||
699 | |||
700 | /** | ||
701 | * ixgb_configure_tx - Configure 82597 Transmit Unit after Reset. | ||
702 | * @adapter: board private structure | ||
703 | * | ||
704 | * Configure the Tx unit of the MAC after a reset. | ||
705 | **/ | ||
706 | |||
707 | static void | ||
708 | ixgb_configure_tx(struct ixgb_adapter *adapter) | ||
709 | { | ||
710 | u64 tdba = adapter->tx_ring.dma; | ||
711 | u32 tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc); | ||
712 | u32 tctl; | ||
713 | struct ixgb_hw *hw = &adapter->hw; | ||
714 | |||
715 | /* Setup the Base and Length of the Tx Descriptor Ring | ||
716 | * tx_ring.dma can be either a 32 or 64 bit value | ||
717 | */ | ||
718 | |||
719 | IXGB_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL)); | ||
720 | IXGB_WRITE_REG(hw, TDBAH, (tdba >> 32)); | ||
721 | |||
722 | IXGB_WRITE_REG(hw, TDLEN, tdlen); | ||
723 | |||
724 | /* Setup the HW Tx Head and Tail descriptor pointers */ | ||
725 | |||
726 | IXGB_WRITE_REG(hw, TDH, 0); | ||
727 | IXGB_WRITE_REG(hw, TDT, 0); | ||
728 | |||
729 | /* don't set up txdctl, it induces performance problems if configured | ||
730 | * incorrectly */ | ||
731 | /* Set the Tx Interrupt Delay register */ | ||
732 | |||
733 | IXGB_WRITE_REG(hw, TIDV, adapter->tx_int_delay); | ||
734 | |||
735 | /* Program the Transmit Control Register */ | ||
736 | |||
737 | tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE; | ||
738 | IXGB_WRITE_REG(hw, TCTL, tctl); | ||
739 | |||
740 | /* Setup Transmit Descriptor Settings for this adapter */ | ||
741 | adapter->tx_cmd_type = | ||
742 | IXGB_TX_DESC_TYPE | | ||
743 | (adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0); | ||
744 | } | ||
745 | |||
746 | /** | ||
747 | * ixgb_setup_rx_resources - allocate Rx resources (Descriptors) | ||
748 | * @adapter: board private structure | ||
749 | * | ||
750 | * Returns 0 on success, negative on failure | ||
751 | **/ | ||
752 | |||
753 | int | ||
754 | ixgb_setup_rx_resources(struct ixgb_adapter *adapter) | ||
755 | { | ||
756 | struct ixgb_desc_ring *rxdr = &adapter->rx_ring; | ||
757 | struct pci_dev *pdev = adapter->pdev; | ||
758 | int size; | ||
759 | |||
760 | size = sizeof(struct ixgb_buffer) * rxdr->count; | ||
761 | rxdr->buffer_info = vzalloc(size); | ||
762 | if (!rxdr->buffer_info) { | ||
763 | netif_err(adapter, probe, adapter->netdev, | ||
764 | "Unable to allocate receive descriptor ring\n"); | ||
765 | return -ENOMEM; | ||
766 | } | ||
767 | |||
768 | /* Round up to nearest 4K */ | ||
769 | |||
770 | rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc); | ||
771 | rxdr->size = ALIGN(rxdr->size, 4096); | ||
772 | |||
773 | rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, | ||
774 | GFP_KERNEL); | ||
775 | |||
776 | if (!rxdr->desc) { | ||
777 | vfree(rxdr->buffer_info); | ||
778 | netif_err(adapter, probe, adapter->netdev, | ||
779 | "Unable to allocate receive descriptors\n"); | ||
780 | return -ENOMEM; | ||
781 | } | ||
782 | memset(rxdr->desc, 0, rxdr->size); | ||
783 | |||
784 | rxdr->next_to_clean = 0; | ||
785 | rxdr->next_to_use = 0; | ||
786 | |||
787 | return 0; | ||
788 | } | ||
789 | |||
790 | /** | ||
791 | * ixgb_setup_rctl - configure the receive control register | ||
792 | * @adapter: Board private structure | ||
793 | **/ | ||
794 | |||
795 | static void | ||
796 | ixgb_setup_rctl(struct ixgb_adapter *adapter) | ||
797 | { | ||
798 | u32 rctl; | ||
799 | |||
800 | rctl = IXGB_READ_REG(&adapter->hw, RCTL); | ||
801 | |||
802 | rctl &= ~(3 << IXGB_RCTL_MO_SHIFT); | ||
803 | |||
804 | rctl |= | ||
805 | IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 | | ||
806 | IXGB_RCTL_RXEN | IXGB_RCTL_CFF | | ||
807 | (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT); | ||
808 | |||
809 | rctl |= IXGB_RCTL_SECRC; | ||
810 | |||
811 | if (adapter->rx_buffer_len <= IXGB_RXBUFFER_2048) | ||
812 | rctl |= IXGB_RCTL_BSIZE_2048; | ||
813 | else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_4096) | ||
814 | rctl |= IXGB_RCTL_BSIZE_4096; | ||
815 | else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_8192) | ||
816 | rctl |= IXGB_RCTL_BSIZE_8192; | ||
817 | else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_16384) | ||
818 | rctl |= IXGB_RCTL_BSIZE_16384; | ||
819 | |||
820 | IXGB_WRITE_REG(&adapter->hw, RCTL, rctl); | ||
821 | } | ||
822 | |||
823 | /** | ||
824 | * ixgb_configure_rx - Configure 82597 Receive Unit after Reset. | ||
825 | * @adapter: board private structure | ||
826 | * | ||
827 | * Configure the Rx unit of the MAC after a reset. | ||
828 | **/ | ||
829 | |||
830 | static void | ||
831 | ixgb_configure_rx(struct ixgb_adapter *adapter) | ||
832 | { | ||
833 | u64 rdba = adapter->rx_ring.dma; | ||
834 | u32 rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc); | ||
835 | struct ixgb_hw *hw = &adapter->hw; | ||
836 | u32 rctl; | ||
837 | u32 rxcsum; | ||
838 | |||
839 | /* make sure receives are disabled while setting up the descriptors */ | ||
840 | |||
841 | rctl = IXGB_READ_REG(hw, RCTL); | ||
842 | IXGB_WRITE_REG(hw, RCTL, rctl & ~IXGB_RCTL_RXEN); | ||
843 | |||
844 | /* set the Receive Delay Timer Register */ | ||
845 | |||
846 | IXGB_WRITE_REG(hw, RDTR, adapter->rx_int_delay); | ||
847 | |||
848 | /* Setup the Base and Length of the Rx Descriptor Ring */ | ||
849 | |||
850 | IXGB_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL)); | ||
851 | IXGB_WRITE_REG(hw, RDBAH, (rdba >> 32)); | ||
852 | |||
853 | IXGB_WRITE_REG(hw, RDLEN, rdlen); | ||
854 | |||
855 | /* Setup the HW Rx Head and Tail Descriptor Pointers */ | ||
856 | IXGB_WRITE_REG(hw, RDH, 0); | ||
857 | IXGB_WRITE_REG(hw, RDT, 0); | ||
858 | |||
859 | /* due to the hardware errata with RXDCTL, we are unable to use any of | ||
860 | * the performance enhancing features of it without causing other | ||
861 | * subtle bugs, some of the bugs could include receive length | ||
862 | * corruption at high data rates (WTHRESH > 0) and/or receive | ||
863 | * descriptor ring irregularites (particularly in hardware cache) */ | ||
864 | IXGB_WRITE_REG(hw, RXDCTL, 0); | ||
865 | |||
866 | /* Enable Receive Checksum Offload for TCP and UDP */ | ||
867 | if (adapter->rx_csum) { | ||
868 | rxcsum = IXGB_READ_REG(hw, RXCSUM); | ||
869 | rxcsum |= IXGB_RXCSUM_TUOFL; | ||
870 | IXGB_WRITE_REG(hw, RXCSUM, rxcsum); | ||
871 | } | ||
872 | |||
873 | /* Enable Receives */ | ||
874 | |||
875 | IXGB_WRITE_REG(hw, RCTL, rctl); | ||
876 | } | ||
877 | |||
878 | /** | ||
879 | * ixgb_free_tx_resources - Free Tx Resources | ||
880 | * @adapter: board private structure | ||
881 | * | ||
882 | * Free all transmit software resources | ||
883 | **/ | ||
884 | |||
885 | void | ||
886 | ixgb_free_tx_resources(struct ixgb_adapter *adapter) | ||
887 | { | ||
888 | struct pci_dev *pdev = adapter->pdev; | ||
889 | |||
890 | ixgb_clean_tx_ring(adapter); | ||
891 | |||
892 | vfree(adapter->tx_ring.buffer_info); | ||
893 | adapter->tx_ring.buffer_info = NULL; | ||
894 | |||
895 | dma_free_coherent(&pdev->dev, adapter->tx_ring.size, | ||
896 | adapter->tx_ring.desc, adapter->tx_ring.dma); | ||
897 | |||
898 | adapter->tx_ring.desc = NULL; | ||
899 | } | ||
900 | |||
901 | static void | ||
902 | ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter, | ||
903 | struct ixgb_buffer *buffer_info) | ||
904 | { | ||
905 | if (buffer_info->dma) { | ||
906 | if (buffer_info->mapped_as_page) | ||
907 | dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, | ||
908 | buffer_info->length, DMA_TO_DEVICE); | ||
909 | else | ||
910 | dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, | ||
911 | buffer_info->length, DMA_TO_DEVICE); | ||
912 | buffer_info->dma = 0; | ||
913 | } | ||
914 | |||
915 | if (buffer_info->skb) { | ||
916 | dev_kfree_skb_any(buffer_info->skb); | ||
917 | buffer_info->skb = NULL; | ||
918 | } | ||
919 | buffer_info->time_stamp = 0; | ||
920 | /* these fields must always be initialized in tx | ||
921 | * buffer_info->length = 0; | ||
922 | * buffer_info->next_to_watch = 0; */ | ||
923 | } | ||
924 | |||
925 | /** | ||
926 | * ixgb_clean_tx_ring - Free Tx Buffers | ||
927 | * @adapter: board private structure | ||
928 | **/ | ||
929 | |||
930 | static void | ||
931 | ixgb_clean_tx_ring(struct ixgb_adapter *adapter) | ||
932 | { | ||
933 | struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; | ||
934 | struct ixgb_buffer *buffer_info; | ||
935 | unsigned long size; | ||
936 | unsigned int i; | ||
937 | |||
938 | /* Free all the Tx ring sk_buffs */ | ||
939 | |||
940 | for (i = 0; i < tx_ring->count; i++) { | ||
941 | buffer_info = &tx_ring->buffer_info[i]; | ||
942 | ixgb_unmap_and_free_tx_resource(adapter, buffer_info); | ||
943 | } | ||
944 | |||
945 | size = sizeof(struct ixgb_buffer) * tx_ring->count; | ||
946 | memset(tx_ring->buffer_info, 0, size); | ||
947 | |||
948 | /* Zero out the descriptor ring */ | ||
949 | |||
950 | memset(tx_ring->desc, 0, tx_ring->size); | ||
951 | |||
952 | tx_ring->next_to_use = 0; | ||
953 | tx_ring->next_to_clean = 0; | ||
954 | |||
955 | IXGB_WRITE_REG(&adapter->hw, TDH, 0); | ||
956 | IXGB_WRITE_REG(&adapter->hw, TDT, 0); | ||
957 | } | ||
958 | |||
959 | /** | ||
960 | * ixgb_free_rx_resources - Free Rx Resources | ||
961 | * @adapter: board private structure | ||
962 | * | ||
963 | * Free all receive software resources | ||
964 | **/ | ||
965 | |||
966 | void | ||
967 | ixgb_free_rx_resources(struct ixgb_adapter *adapter) | ||
968 | { | ||
969 | struct ixgb_desc_ring *rx_ring = &adapter->rx_ring; | ||
970 | struct pci_dev *pdev = adapter->pdev; | ||
971 | |||
972 | ixgb_clean_rx_ring(adapter); | ||
973 | |||
974 | vfree(rx_ring->buffer_info); | ||
975 | rx_ring->buffer_info = NULL; | ||
976 | |||
977 | dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, | ||
978 | rx_ring->dma); | ||
979 | |||
980 | rx_ring->desc = NULL; | ||
981 | } | ||
982 | |||
983 | /** | ||
984 | * ixgb_clean_rx_ring - Free Rx Buffers | ||
985 | * @adapter: board private structure | ||
986 | **/ | ||
987 | |||
988 | static void | ||
989 | ixgb_clean_rx_ring(struct ixgb_adapter *adapter) | ||
990 | { | ||
991 | struct ixgb_desc_ring *rx_ring = &adapter->rx_ring; | ||
992 | struct ixgb_buffer *buffer_info; | ||
993 | struct pci_dev *pdev = adapter->pdev; | ||
994 | unsigned long size; | ||
995 | unsigned int i; | ||
996 | |||
997 | /* Free all the Rx ring sk_buffs */ | ||
998 | |||
999 | for (i = 0; i < rx_ring->count; i++) { | ||
1000 | buffer_info = &rx_ring->buffer_info[i]; | ||
1001 | if (buffer_info->dma) { | ||
1002 | dma_unmap_single(&pdev->dev, | ||
1003 | buffer_info->dma, | ||
1004 | buffer_info->length, | ||
1005 | DMA_FROM_DEVICE); | ||
1006 | buffer_info->dma = 0; | ||
1007 | buffer_info->length = 0; | ||
1008 | } | ||
1009 | |||
1010 | if (buffer_info->skb) { | ||
1011 | dev_kfree_skb(buffer_info->skb); | ||
1012 | buffer_info->skb = NULL; | ||
1013 | } | ||
1014 | } | ||
1015 | |||
1016 | size = sizeof(struct ixgb_buffer) * rx_ring->count; | ||
1017 | memset(rx_ring->buffer_info, 0, size); | ||
1018 | |||
1019 | /* Zero out the descriptor ring */ | ||
1020 | |||
1021 | memset(rx_ring->desc, 0, rx_ring->size); | ||
1022 | |||
1023 | rx_ring->next_to_clean = 0; | ||
1024 | rx_ring->next_to_use = 0; | ||
1025 | |||
1026 | IXGB_WRITE_REG(&adapter->hw, RDH, 0); | ||
1027 | IXGB_WRITE_REG(&adapter->hw, RDT, 0); | ||
1028 | } | ||
1029 | |||
1030 | /** | ||
1031 | * ixgb_set_mac - Change the Ethernet Address of the NIC | ||
1032 | * @netdev: network interface device structure | ||
1033 | * @p: pointer to an address structure | ||
1034 | * | ||
1035 | * Returns 0 on success, negative on failure | ||
1036 | **/ | ||
1037 | |||
1038 | static int | ||
1039 | ixgb_set_mac(struct net_device *netdev, void *p) | ||
1040 | { | ||
1041 | struct ixgb_adapter *adapter = netdev_priv(netdev); | ||
1042 | struct sockaddr *addr = p; | ||
1043 | |||
1044 | if (!is_valid_ether_addr(addr->sa_data)) | ||
1045 | return -EADDRNOTAVAIL; | ||
1046 | |||
1047 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | ||
1048 | |||
1049 | ixgb_rar_set(&adapter->hw, addr->sa_data, 0); | ||
1050 | |||
1051 | return 0; | ||
1052 | } | ||
1053 | |||
1054 | /** | ||
1055 | * ixgb_set_multi - Multicast and Promiscuous mode set | ||
1056 | * @netdev: network interface device structure | ||
1057 | * | ||
1058 | * The set_multi entry point is called whenever the multicast address | ||
1059 | * list or the network interface flags are updated. This routine is | ||
1060 | * responsible for configuring the hardware for proper multicast, | ||
1061 | * promiscuous mode, and all-multi behavior. | ||
1062 | **/ | ||
1063 | |||
1064 | static void | ||
1065 | ixgb_set_multi(struct net_device *netdev) | ||
1066 | { | ||
1067 | struct ixgb_adapter *adapter = netdev_priv(netdev); | ||
1068 | struct ixgb_hw *hw = &adapter->hw; | ||
1069 | struct netdev_hw_addr *ha; | ||
1070 | u32 rctl; | ||
1071 | int i; | ||
1072 | |||
1073 | /* Check for Promiscuous and All Multicast modes */ | ||
1074 | |||
1075 | rctl = IXGB_READ_REG(hw, RCTL); | ||
1076 | |||
1077 | if (netdev->flags & IFF_PROMISC) { | ||
1078 | rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE); | ||
1079 | /* disable VLAN filtering */ | ||
1080 | rctl &= ~IXGB_RCTL_CFIEN; | ||
1081 | rctl &= ~IXGB_RCTL_VFE; | ||
1082 | } else { | ||
1083 | if (netdev->flags & IFF_ALLMULTI) { | ||
1084 | rctl |= IXGB_RCTL_MPE; | ||
1085 | rctl &= ~IXGB_RCTL_UPE; | ||
1086 | } else { | ||
1087 | rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE); | ||
1088 | } | ||
1089 | /* enable VLAN filtering */ | ||
1090 | rctl |= IXGB_RCTL_VFE; | ||
1091 | rctl &= ~IXGB_RCTL_CFIEN; | ||
1092 | } | ||
1093 | |||
1094 | if (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES) { | ||
1095 | rctl |= IXGB_RCTL_MPE; | ||
1096 | IXGB_WRITE_REG(hw, RCTL, rctl); | ||
1097 | } else { | ||
1098 | u8 mta[IXGB_MAX_NUM_MULTICAST_ADDRESSES * | ||
1099 | IXGB_ETH_LENGTH_OF_ADDRESS]; | ||
1100 | |||
1101 | IXGB_WRITE_REG(hw, RCTL, rctl); | ||
1102 | |||
1103 | i = 0; | ||
1104 | netdev_for_each_mc_addr(ha, netdev) | ||
1105 | memcpy(&mta[i++ * IXGB_ETH_LENGTH_OF_ADDRESS], | ||
1106 | ha->addr, IXGB_ETH_LENGTH_OF_ADDRESS); | ||
1107 | |||
1108 | ixgb_mc_addr_list_update(hw, mta, netdev_mc_count(netdev), 0); | ||
1109 | } | ||
1110 | |||
1111 | if (netdev->features & NETIF_F_HW_VLAN_RX) | ||
1112 | ixgb_vlan_strip_enable(adapter); | ||
1113 | else | ||
1114 | ixgb_vlan_strip_disable(adapter); | ||
1115 | |||
1116 | } | ||
1117 | |||
1118 | /** | ||
1119 | * ixgb_watchdog - Timer Call-back | ||
1120 | * @data: pointer to netdev cast into an unsigned long | ||
1121 | **/ | ||
1122 | |||
1123 | static void | ||
1124 | ixgb_watchdog(unsigned long data) | ||
1125 | { | ||
1126 | struct ixgb_adapter *adapter = (struct ixgb_adapter *)data; | ||
1127 | struct net_device *netdev = adapter->netdev; | ||
1128 | struct ixgb_desc_ring *txdr = &adapter->tx_ring; | ||
1129 | |||
1130 | ixgb_check_for_link(&adapter->hw); | ||
1131 | |||
1132 | if (ixgb_check_for_bad_link(&adapter->hw)) { | ||
1133 | /* force the reset path */ | ||
1134 | netif_stop_queue(netdev); | ||
1135 | } | ||
1136 | |||
1137 | if (adapter->hw.link_up) { | ||
1138 | if (!netif_carrier_ok(netdev)) { | ||
1139 | netdev_info(netdev, | ||
1140 | "NIC Link is Up 10 Gbps Full Duplex, Flow Control: %s\n", | ||
1141 | (adapter->hw.fc.type == ixgb_fc_full) ? | ||
1142 | "RX/TX" : | ||
1143 | (adapter->hw.fc.type == ixgb_fc_rx_pause) ? | ||
1144 | "RX" : | ||
1145 | (adapter->hw.fc.type == ixgb_fc_tx_pause) ? | ||
1146 | "TX" : "None"); | ||
1147 | adapter->link_speed = 10000; | ||
1148 | adapter->link_duplex = FULL_DUPLEX; | ||
1149 | netif_carrier_on(netdev); | ||
1150 | } | ||
1151 | } else { | ||
1152 | if (netif_carrier_ok(netdev)) { | ||
1153 | adapter->link_speed = 0; | ||
1154 | adapter->link_duplex = 0; | ||
1155 | netdev_info(netdev, "NIC Link is Down\n"); | ||
1156 | netif_carrier_off(netdev); | ||
1157 | } | ||
1158 | } | ||
1159 | |||
1160 | ixgb_update_stats(adapter); | ||
1161 | |||
1162 | if (!netif_carrier_ok(netdev)) { | ||
1163 | if (IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) { | ||
1164 | /* We've lost link, so the controller stops DMA, | ||
1165 | * but we've got queued Tx work that's never going | ||
1166 | * to get done, so reset controller to flush Tx. | ||
1167 | * (Do the reset outside of interrupt context). */ | ||
1168 | schedule_work(&adapter->tx_timeout_task); | ||
1169 | /* return immediately since reset is imminent */ | ||
1170 | return; | ||
1171 | } | ||
1172 | } | ||
1173 | |||
1174 | /* Force detection of hung controller every watchdog period */ | ||
1175 | adapter->detect_tx_hung = true; | ||
1176 | |||
1177 | /* generate an interrupt to force clean up of any stragglers */ | ||
1178 | IXGB_WRITE_REG(&adapter->hw, ICS, IXGB_INT_TXDW); | ||
1179 | |||
1180 | /* Reset the timer */ | ||
1181 | mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); | ||
1182 | } | ||
1183 | |||
1184 | #define IXGB_TX_FLAGS_CSUM 0x00000001 | ||
1185 | #define IXGB_TX_FLAGS_VLAN 0x00000002 | ||
1186 | #define IXGB_TX_FLAGS_TSO 0x00000004 | ||
1187 | |||
1188 | static int | ||
1189 | ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb) | ||
1190 | { | ||
1191 | struct ixgb_context_desc *context_desc; | ||
1192 | unsigned int i; | ||
1193 | u8 ipcss, ipcso, tucss, tucso, hdr_len; | ||
1194 | u16 ipcse, tucse, mss; | ||
1195 | int err; | ||
1196 | |||
1197 | if (likely(skb_is_gso(skb))) { | ||
1198 | struct ixgb_buffer *buffer_info; | ||
1199 | struct iphdr *iph; | ||
1200 | |||
1201 | if (skb_header_cloned(skb)) { | ||
1202 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); | ||
1203 | if (err) | ||
1204 | return err; | ||
1205 | } | ||
1206 | |||
1207 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | ||
1208 | mss = skb_shinfo(skb)->gso_size; | ||
1209 | iph = ip_hdr(skb); | ||
1210 | iph->tot_len = 0; | ||
1211 | iph->check = 0; | ||
1212 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, | ||
1213 | iph->daddr, 0, | ||
1214 | IPPROTO_TCP, 0); | ||
1215 | ipcss = skb_network_offset(skb); | ||
1216 | ipcso = (void *)&(iph->check) - (void *)skb->data; | ||
1217 | ipcse = skb_transport_offset(skb) - 1; | ||
1218 | tucss = skb_transport_offset(skb); | ||
1219 | tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; | ||
1220 | tucse = 0; | ||
1221 | |||
1222 | i = adapter->tx_ring.next_to_use; | ||
1223 | context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i); | ||
1224 | buffer_info = &adapter->tx_ring.buffer_info[i]; | ||
1225 | WARN_ON(buffer_info->dma != 0); | ||
1226 | |||
1227 | context_desc->ipcss = ipcss; | ||
1228 | context_desc->ipcso = ipcso; | ||
1229 | context_desc->ipcse = cpu_to_le16(ipcse); | ||
1230 | context_desc->tucss = tucss; | ||
1231 | context_desc->tucso = tucso; | ||
1232 | context_desc->tucse = cpu_to_le16(tucse); | ||
1233 | context_desc->mss = cpu_to_le16(mss); | ||
1234 | context_desc->hdr_len = hdr_len; | ||
1235 | context_desc->status = 0; | ||
1236 | context_desc->cmd_type_len = cpu_to_le32( | ||
1237 | IXGB_CONTEXT_DESC_TYPE | ||
1238 | | IXGB_CONTEXT_DESC_CMD_TSE | ||
1239 | | IXGB_CONTEXT_DESC_CMD_IP | ||
1240 | | IXGB_CONTEXT_DESC_CMD_TCP | ||
1241 | | IXGB_CONTEXT_DESC_CMD_IDE | ||
1242 | | (skb->len - (hdr_len))); | ||
1243 | |||
1244 | |||
1245 | if (++i == adapter->tx_ring.count) i = 0; | ||
1246 | adapter->tx_ring.next_to_use = i; | ||
1247 | |||
1248 | return 1; | ||
1249 | } | ||
1250 | |||
1251 | return 0; | ||
1252 | } | ||
1253 | |||
1254 | static bool | ||
1255 | ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb) | ||
1256 | { | ||
1257 | struct ixgb_context_desc *context_desc; | ||
1258 | unsigned int i; | ||
1259 | u8 css, cso; | ||
1260 | |||
1261 | if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { | ||
1262 | struct ixgb_buffer *buffer_info; | ||
1263 | css = skb_checksum_start_offset(skb); | ||
1264 | cso = css + skb->csum_offset; | ||
1265 | |||
1266 | i = adapter->tx_ring.next_to_use; | ||
1267 | context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i); | ||
1268 | buffer_info = &adapter->tx_ring.buffer_info[i]; | ||
1269 | WARN_ON(buffer_info->dma != 0); | ||
1270 | |||
1271 | context_desc->tucss = css; | ||
1272 | context_desc->tucso = cso; | ||
1273 | context_desc->tucse = 0; | ||
1274 | /* zero out any previously existing data in one instruction */ | ||
1275 | *(u32 *)&(context_desc->ipcss) = 0; | ||
1276 | context_desc->status = 0; | ||
1277 | context_desc->hdr_len = 0; | ||
1278 | context_desc->mss = 0; | ||
1279 | context_desc->cmd_type_len = | ||
1280 | cpu_to_le32(IXGB_CONTEXT_DESC_TYPE | ||
1281 | | IXGB_TX_DESC_CMD_IDE); | ||
1282 | |||
1283 | if (++i == adapter->tx_ring.count) i = 0; | ||
1284 | adapter->tx_ring.next_to_use = i; | ||
1285 | |||
1286 | return true; | ||
1287 | } | ||
1288 | |||
1289 | return false; | ||
1290 | } | ||
1291 | |||
1292 | #define IXGB_MAX_TXD_PWR 14 | ||
1293 | #define IXGB_MAX_DATA_PER_TXD (1<<IXGB_MAX_TXD_PWR) | ||
1294 | |||
1295 | static int | ||
1296 | ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb, | ||
1297 | unsigned int first) | ||
1298 | { | ||
1299 | struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; | ||
1300 | struct pci_dev *pdev = adapter->pdev; | ||
1301 | struct ixgb_buffer *buffer_info; | ||
1302 | int len = skb_headlen(skb); | ||
1303 | unsigned int offset = 0, size, count = 0, i; | ||
1304 | unsigned int mss = skb_shinfo(skb)->gso_size; | ||
1305 | unsigned int nr_frags = skb_shinfo(skb)->nr_frags; | ||
1306 | unsigned int f; | ||
1307 | |||
1308 | i = tx_ring->next_to_use; | ||
1309 | |||
1310 | while (len) { | ||
1311 | buffer_info = &tx_ring->buffer_info[i]; | ||
1312 | size = min(len, IXGB_MAX_DATA_PER_TXD); | ||
1313 | /* Workaround for premature desc write-backs | ||
1314 | * in TSO mode. Append 4-byte sentinel desc */ | ||
1315 | if (unlikely(mss && !nr_frags && size == len && size > 8)) | ||
1316 | size -= 4; | ||
1317 | |||
1318 | buffer_info->length = size; | ||
1319 | WARN_ON(buffer_info->dma != 0); | ||
1320 | buffer_info->time_stamp = jiffies; | ||
1321 | buffer_info->mapped_as_page = false; | ||
1322 | buffer_info->dma = dma_map_single(&pdev->dev, | ||
1323 | skb->data + offset, | ||
1324 | size, DMA_TO_DEVICE); | ||
1325 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) | ||
1326 | goto dma_error; | ||
1327 | buffer_info->next_to_watch = 0; | ||
1328 | |||
1329 | len -= size; | ||
1330 | offset += size; | ||
1331 | count++; | ||
1332 | if (len) { | ||
1333 | i++; | ||
1334 | if (i == tx_ring->count) | ||
1335 | i = 0; | ||
1336 | } | ||
1337 | } | ||
1338 | |||
1339 | for (f = 0; f < nr_frags; f++) { | ||
1340 | struct skb_frag_struct *frag; | ||
1341 | |||
1342 | frag = &skb_shinfo(skb)->frags[f]; | ||
1343 | len = frag->size; | ||
1344 | offset = frag->page_offset; | ||
1345 | |||
1346 | while (len) { | ||
1347 | i++; | ||
1348 | if (i == tx_ring->count) | ||
1349 | i = 0; | ||
1350 | |||
1351 | buffer_info = &tx_ring->buffer_info[i]; | ||
1352 | size = min(len, IXGB_MAX_DATA_PER_TXD); | ||
1353 | |||
1354 | /* Workaround for premature desc write-backs | ||
1355 | * in TSO mode. Append 4-byte sentinel desc */ | ||
1356 | if (unlikely(mss && (f == (nr_frags - 1)) | ||
1357 | && size == len && size > 8)) | ||
1358 | size -= 4; | ||
1359 | |||
1360 | buffer_info->length = size; | ||
1361 | buffer_info->time_stamp = jiffies; | ||
1362 | buffer_info->mapped_as_page = true; | ||
1363 | buffer_info->dma = | ||
1364 | dma_map_page(&pdev->dev, frag->page, | ||
1365 | offset, size, DMA_TO_DEVICE); | ||
1366 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) | ||
1367 | goto dma_error; | ||
1368 | buffer_info->next_to_watch = 0; | ||
1369 | |||
1370 | len -= size; | ||
1371 | offset += size; | ||
1372 | count++; | ||
1373 | } | ||
1374 | } | ||
1375 | tx_ring->buffer_info[i].skb = skb; | ||
1376 | tx_ring->buffer_info[first].next_to_watch = i; | ||
1377 | |||
1378 | return count; | ||
1379 | |||
1380 | dma_error: | ||
1381 | dev_err(&pdev->dev, "TX DMA map failed\n"); | ||
1382 | buffer_info->dma = 0; | ||
1383 | if (count) | ||
1384 | count--; | ||
1385 | |||
1386 | while (count--) { | ||
1387 | if (i==0) | ||
1388 | i += tx_ring->count; | ||
1389 | i--; | ||
1390 | buffer_info = &tx_ring->buffer_info[i]; | ||
1391 | ixgb_unmap_and_free_tx_resource(adapter, buffer_info); | ||
1392 | } | ||
1393 | |||
1394 | return 0; | ||
1395 | } | ||
1396 | |||
1397 | static void | ||
1398 | ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags) | ||
1399 | { | ||
1400 | struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; | ||
1401 | struct ixgb_tx_desc *tx_desc = NULL; | ||
1402 | struct ixgb_buffer *buffer_info; | ||
1403 | u32 cmd_type_len = adapter->tx_cmd_type; | ||
1404 | u8 status = 0; | ||
1405 | u8 popts = 0; | ||
1406 | unsigned int i; | ||
1407 | |||
1408 | if (tx_flags & IXGB_TX_FLAGS_TSO) { | ||
1409 | cmd_type_len |= IXGB_TX_DESC_CMD_TSE; | ||
1410 | popts |= (IXGB_TX_DESC_POPTS_IXSM | IXGB_TX_DESC_POPTS_TXSM); | ||
1411 | } | ||
1412 | |||
1413 | if (tx_flags & IXGB_TX_FLAGS_CSUM) | ||
1414 | popts |= IXGB_TX_DESC_POPTS_TXSM; | ||
1415 | |||
1416 | if (tx_flags & IXGB_TX_FLAGS_VLAN) | ||
1417 | cmd_type_len |= IXGB_TX_DESC_CMD_VLE; | ||
1418 | |||
1419 | i = tx_ring->next_to_use; | ||
1420 | |||
1421 | while (count--) { | ||
1422 | buffer_info = &tx_ring->buffer_info[i]; | ||
1423 | tx_desc = IXGB_TX_DESC(*tx_ring, i); | ||
1424 | tx_desc->buff_addr = cpu_to_le64(buffer_info->dma); | ||
1425 | tx_desc->cmd_type_len = | ||
1426 | cpu_to_le32(cmd_type_len | buffer_info->length); | ||
1427 | tx_desc->status = status; | ||
1428 | tx_desc->popts = popts; | ||
1429 | tx_desc->vlan = cpu_to_le16(vlan_id); | ||
1430 | |||
1431 | if (++i == tx_ring->count) i = 0; | ||
1432 | } | ||
1433 | |||
1434 | tx_desc->cmd_type_len |= | ||
1435 | cpu_to_le32(IXGB_TX_DESC_CMD_EOP | IXGB_TX_DESC_CMD_RS); | ||
1436 | |||
1437 | /* Force memory writes to complete before letting h/w | ||
1438 | * know there are new descriptors to fetch. (Only | ||
1439 | * applicable for weak-ordered memory model archs, | ||
1440 | * such as IA-64). */ | ||
1441 | wmb(); | ||
1442 | |||
1443 | tx_ring->next_to_use = i; | ||
1444 | IXGB_WRITE_REG(&adapter->hw, TDT, i); | ||
1445 | } | ||
1446 | |||
1447 | static int __ixgb_maybe_stop_tx(struct net_device *netdev, int size) | ||
1448 | { | ||
1449 | struct ixgb_adapter *adapter = netdev_priv(netdev); | ||
1450 | struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; | ||
1451 | |||
1452 | netif_stop_queue(netdev); | ||
1453 | /* Herbert's original patch had: | ||
1454 | * smp_mb__after_netif_stop_queue(); | ||
1455 | * but since that doesn't exist yet, just open code it. */ | ||
1456 | smp_mb(); | ||
1457 | |||
1458 | /* We need to check again in a case another CPU has just | ||
1459 | * made room available. */ | ||
1460 | if (likely(IXGB_DESC_UNUSED(tx_ring) < size)) | ||
1461 | return -EBUSY; | ||
1462 | |||
1463 | /* A reprieve! */ | ||
1464 | netif_start_queue(netdev); | ||
1465 | ++adapter->restart_queue; | ||
1466 | return 0; | ||
1467 | } | ||
1468 | |||
1469 | static int ixgb_maybe_stop_tx(struct net_device *netdev, | ||
1470 | struct ixgb_desc_ring *tx_ring, int size) | ||
1471 | { | ||
1472 | if (likely(IXGB_DESC_UNUSED(tx_ring) >= size)) | ||
1473 | return 0; | ||
1474 | return __ixgb_maybe_stop_tx(netdev, size); | ||
1475 | } | ||
1476 | |||
1477 | |||
1478 | /* Tx Descriptors needed, worst case */ | ||
1479 | #define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \ | ||
1480 | (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0)) | ||
1481 | #define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) /* skb->date */ + \ | ||
1482 | MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 /* for context */ \ | ||
1483 | + 1 /* one more needed for sentinel TSO workaround */ | ||
1484 | |||
1485 | static netdev_tx_t | ||
1486 | ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | ||
1487 | { | ||
1488 | struct ixgb_adapter *adapter = netdev_priv(netdev); | ||
1489 | unsigned int first; | ||
1490 | unsigned int tx_flags = 0; | ||
1491 | int vlan_id = 0; | ||
1492 | int count = 0; | ||
1493 | int tso; | ||
1494 | |||
1495 | if (test_bit(__IXGB_DOWN, &adapter->flags)) { | ||
1496 | dev_kfree_skb(skb); | ||
1497 | return NETDEV_TX_OK; | ||
1498 | } | ||
1499 | |||
1500 | if (skb->len <= 0) { | ||
1501 | dev_kfree_skb(skb); | ||
1502 | return NETDEV_TX_OK; | ||
1503 | } | ||
1504 | |||
1505 | if (unlikely(ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, | ||
1506 | DESC_NEEDED))) | ||
1507 | return NETDEV_TX_BUSY; | ||
1508 | |||
1509 | if (vlan_tx_tag_present(skb)) { | ||
1510 | tx_flags |= IXGB_TX_FLAGS_VLAN; | ||
1511 | vlan_id = vlan_tx_tag_get(skb); | ||
1512 | } | ||
1513 | |||
1514 | first = adapter->tx_ring.next_to_use; | ||
1515 | |||
1516 | tso = ixgb_tso(adapter, skb); | ||
1517 | if (tso < 0) { | ||
1518 | dev_kfree_skb(skb); | ||
1519 | return NETDEV_TX_OK; | ||
1520 | } | ||
1521 | |||
1522 | if (likely(tso)) | ||
1523 | tx_flags |= IXGB_TX_FLAGS_TSO; | ||
1524 | else if (ixgb_tx_csum(adapter, skb)) | ||
1525 | tx_flags |= IXGB_TX_FLAGS_CSUM; | ||
1526 | |||
1527 | count = ixgb_tx_map(adapter, skb, first); | ||
1528 | |||
1529 | if (count) { | ||
1530 | ixgb_tx_queue(adapter, count, vlan_id, tx_flags); | ||
1531 | /* Make sure there is space in the ring for the next send. */ | ||
1532 | ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED); | ||
1533 | |||
1534 | } else { | ||
1535 | dev_kfree_skb_any(skb); | ||
1536 | adapter->tx_ring.buffer_info[first].time_stamp = 0; | ||
1537 | adapter->tx_ring.next_to_use = first; | ||
1538 | } | ||
1539 | |||
1540 | return NETDEV_TX_OK; | ||
1541 | } | ||
1542 | |||
1543 | /** | ||
1544 | * ixgb_tx_timeout - Respond to a Tx Hang | ||
1545 | * @netdev: network interface device structure | ||
1546 | **/ | ||
1547 | |||
1548 | static void | ||
1549 | ixgb_tx_timeout(struct net_device *netdev) | ||
1550 | { | ||
1551 | struct ixgb_adapter *adapter = netdev_priv(netdev); | ||
1552 | |||
1553 | /* Do the reset outside of interrupt context */ | ||
1554 | schedule_work(&adapter->tx_timeout_task); | ||
1555 | } | ||
1556 | |||
1557 | static void | ||
1558 | ixgb_tx_timeout_task(struct work_struct *work) | ||
1559 | { | ||
1560 | struct ixgb_adapter *adapter = | ||
1561 | container_of(work, struct ixgb_adapter, tx_timeout_task); | ||
1562 | |||
1563 | adapter->tx_timeout_count++; | ||
1564 | ixgb_down(adapter, true); | ||
1565 | ixgb_up(adapter); | ||
1566 | } | ||
1567 | |||
1568 | /** | ||
1569 | * ixgb_get_stats - Get System Network Statistics | ||
1570 | * @netdev: network interface device structure | ||
1571 | * | ||
1572 | * Returns the address of the device statistics structure. | ||
1573 | * The statistics are actually updated from the timer callback. | ||
1574 | **/ | ||
1575 | |||
1576 | static struct net_device_stats * | ||
1577 | ixgb_get_stats(struct net_device *netdev) | ||
1578 | { | ||
1579 | return &netdev->stats; | ||
1580 | } | ||
1581 | |||
1582 | /** | ||
1583 | * ixgb_change_mtu - Change the Maximum Transfer Unit | ||
1584 | * @netdev: network interface device structure | ||
1585 | * @new_mtu: new value for maximum frame size | ||
1586 | * | ||
1587 | * Returns 0 on success, negative on failure | ||
1588 | **/ | ||
1589 | |||
1590 | static int | ||
1591 | ixgb_change_mtu(struct net_device *netdev, int new_mtu) | ||
1592 | { | ||
1593 | struct ixgb_adapter *adapter = netdev_priv(netdev); | ||
1594 | int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; | ||
1595 | int old_max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; | ||
1596 | |||
1597 | /* MTU < 68 is an error for IPv4 traffic, just don't allow it */ | ||
1598 | if ((new_mtu < 68) || | ||
1599 | (max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) { | ||
1600 | netif_err(adapter, probe, adapter->netdev, | ||
1601 | "Invalid MTU setting %d\n", new_mtu); | ||
1602 | return -EINVAL; | ||
1603 | } | ||
1604 | |||
1605 | if (old_max_frame == max_frame) | ||
1606 | return 0; | ||
1607 | |||
1608 | if (netif_running(netdev)) | ||
1609 | ixgb_down(adapter, true); | ||
1610 | |||
1611 | adapter->rx_buffer_len = max_frame + 8; /* + 8 for errata */ | ||
1612 | |||
1613 | netdev->mtu = new_mtu; | ||
1614 | |||
1615 | if (netif_running(netdev)) | ||
1616 | ixgb_up(adapter); | ||
1617 | |||
1618 | return 0; | ||
1619 | } | ||
1620 | |||
1621 | /** | ||
1622 | * ixgb_update_stats - Update the board statistics counters. | ||
1623 | * @adapter: board private structure | ||
1624 | **/ | ||
1625 | |||
1626 | void | ||
1627 | ixgb_update_stats(struct ixgb_adapter *adapter) | ||
1628 | { | ||
1629 | struct net_device *netdev = adapter->netdev; | ||
1630 | struct pci_dev *pdev = adapter->pdev; | ||
1631 | |||
1632 | /* Prevent stats update while adapter is being reset */ | ||
1633 | if (pci_channel_offline(pdev)) | ||
1634 | return; | ||
1635 | |||
1636 | if ((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) || | ||
1637 | (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) { | ||
1638 | u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL); | ||
1639 | u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL); | ||
1640 | u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH); | ||
1641 | u64 bcast = ((u64)bcast_h << 32) | bcast_l; | ||
1642 | |||
1643 | multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32); | ||
1644 | /* fix up multicast stats by removing broadcasts */ | ||
1645 | if (multi >= bcast) | ||
1646 | multi -= bcast; | ||
1647 | |||
1648 | adapter->stats.mprcl += (multi & 0xFFFFFFFF); | ||
1649 | adapter->stats.mprch += (multi >> 32); | ||
1650 | adapter->stats.bprcl += bcast_l; | ||
1651 | adapter->stats.bprch += bcast_h; | ||
1652 | } else { | ||
1653 | adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL); | ||
1654 | adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH); | ||
1655 | adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL); | ||
1656 | adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH); | ||
1657 | } | ||
1658 | adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL); | ||
1659 | adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH); | ||
1660 | adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL); | ||
1661 | adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH); | ||
1662 | adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL); | ||
1663 | adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH); | ||
1664 | adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL); | ||
1665 | adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH); | ||
1666 | adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL); | ||
1667 | adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH); | ||
1668 | adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL); | ||
1669 | adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH); | ||
1670 | adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL); | ||
1671 | adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH); | ||
1672 | adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC); | ||
1673 | adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC); | ||
1674 | adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC); | ||
1675 | adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC); | ||
1676 | adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS); | ||
1677 | adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC); | ||
1678 | adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC); | ||
1679 | adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC); | ||
1680 | adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL); | ||
1681 | adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH); | ||
1682 | adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL); | ||
1683 | adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH); | ||
1684 | adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL); | ||
1685 | adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH); | ||
1686 | adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL); | ||
1687 | adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH); | ||
1688 | adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL); | ||
1689 | adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH); | ||
1690 | adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL); | ||
1691 | adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH); | ||
1692 | adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL); | ||
1693 | adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH); | ||
1694 | adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL); | ||
1695 | adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH); | ||
1696 | adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL); | ||
1697 | adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH); | ||
1698 | adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC); | ||
1699 | adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C); | ||
1700 | adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC); | ||
1701 | adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC); | ||
1702 | adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC); | ||
1703 | adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC); | ||
1704 | adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC); | ||
1705 | adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC); | ||
1706 | adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC); | ||
1707 | adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC); | ||
1708 | adapter->stats.mcftc += IXGB_READ_REG(&adapter->hw, MCFTC); | ||
1709 | adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC); | ||
1710 | adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC); | ||
1711 | adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC); | ||
1712 | adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC); | ||
1713 | adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC); | ||
1714 | |||
1715 | /* Fill out the OS statistics structure */ | ||
1716 | |||
1717 | netdev->stats.rx_packets = adapter->stats.gprcl; | ||
1718 | netdev->stats.tx_packets = adapter->stats.gptcl; | ||
1719 | netdev->stats.rx_bytes = adapter->stats.gorcl; | ||
1720 | netdev->stats.tx_bytes = adapter->stats.gotcl; | ||
1721 | netdev->stats.multicast = adapter->stats.mprcl; | ||
1722 | netdev->stats.collisions = 0; | ||
1723 | |||
1724 | /* ignore RLEC as it reports errors for padded (<64bytes) frames | ||
1725 | * with a length in the type/len field */ | ||
1726 | netdev->stats.rx_errors = | ||
1727 | /* adapter->stats.rnbc + */ adapter->stats.crcerrs + | ||
1728 | adapter->stats.ruc + | ||
1729 | adapter->stats.roc /*+ adapter->stats.rlec */ + | ||
1730 | adapter->stats.icbc + | ||
1731 | adapter->stats.ecbc + adapter->stats.mpc; | ||
1732 | |||
1733 | /* see above | ||
1734 | * netdev->stats.rx_length_errors = adapter->stats.rlec; | ||
1735 | */ | ||
1736 | |||
1737 | netdev->stats.rx_crc_errors = adapter->stats.crcerrs; | ||
1738 | netdev->stats.rx_fifo_errors = adapter->stats.mpc; | ||
1739 | netdev->stats.rx_missed_errors = adapter->stats.mpc; | ||
1740 | netdev->stats.rx_over_errors = adapter->stats.mpc; | ||
1741 | |||
1742 | netdev->stats.tx_errors = 0; | ||
1743 | netdev->stats.rx_frame_errors = 0; | ||
1744 | netdev->stats.tx_aborted_errors = 0; | ||
1745 | netdev->stats.tx_carrier_errors = 0; | ||
1746 | netdev->stats.tx_fifo_errors = 0; | ||
1747 | netdev->stats.tx_heartbeat_errors = 0; | ||
1748 | netdev->stats.tx_window_errors = 0; | ||
1749 | } | ||
1750 | |||
1751 | #define IXGB_MAX_INTR 10 | ||
1752 | /** | ||
1753 | * ixgb_intr - Interrupt Handler | ||
1754 | * @irq: interrupt number | ||
1755 | * @data: pointer to a network interface device structure | ||
1756 | **/ | ||
1757 | |||
1758 | static irqreturn_t | ||
1759 | ixgb_intr(int irq, void *data) | ||
1760 | { | ||
1761 | struct net_device *netdev = data; | ||
1762 | struct ixgb_adapter *adapter = netdev_priv(netdev); | ||
1763 | struct ixgb_hw *hw = &adapter->hw; | ||
1764 | u32 icr = IXGB_READ_REG(hw, ICR); | ||
1765 | |||
1766 | if (unlikely(!icr)) | ||
1767 | return IRQ_NONE; /* Not our interrupt */ | ||
1768 | |||
1769 | if (unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC))) | ||
1770 | if (!test_bit(__IXGB_DOWN, &adapter->flags)) | ||
1771 | mod_timer(&adapter->watchdog_timer, jiffies); | ||
1772 | |||
1773 | if (napi_schedule_prep(&adapter->napi)) { | ||
1774 | |||
1775 | /* Disable interrupts and register for poll. The flush | ||
1776 | of the posted write is intentionally left out. | ||
1777 | */ | ||
1778 | |||
1779 | IXGB_WRITE_REG(&adapter->hw, IMC, ~0); | ||
1780 | __napi_schedule(&adapter->napi); | ||
1781 | } | ||
1782 | return IRQ_HANDLED; | ||
1783 | } | ||
1784 | |||
1785 | /** | ||
1786 | * ixgb_clean - NAPI Rx polling callback | ||
1787 | * @adapter: board private structure | ||
1788 | **/ | ||
1789 | |||
1790 | static int | ||
1791 | ixgb_clean(struct napi_struct *napi, int budget) | ||
1792 | { | ||
1793 | struct ixgb_adapter *adapter = container_of(napi, struct ixgb_adapter, napi); | ||
1794 | int work_done = 0; | ||
1795 | |||
1796 | ixgb_clean_tx_irq(adapter); | ||
1797 | ixgb_clean_rx_irq(adapter, &work_done, budget); | ||
1798 | |||
1799 | /* If budget not fully consumed, exit the polling mode */ | ||
1800 | if (work_done < budget) { | ||
1801 | napi_complete(napi); | ||
1802 | if (!test_bit(__IXGB_DOWN, &adapter->flags)) | ||
1803 | ixgb_irq_enable(adapter); | ||
1804 | } | ||
1805 | |||
1806 | return work_done; | ||
1807 | } | ||
1808 | |||
1809 | /** | ||
1810 | * ixgb_clean_tx_irq - Reclaim resources after transmit completes | ||
1811 | * @adapter: board private structure | ||
1812 | **/ | ||
1813 | |||
1814 | static bool | ||
1815 | ixgb_clean_tx_irq(struct ixgb_adapter *adapter) | ||
1816 | { | ||
1817 | struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; | ||
1818 | struct net_device *netdev = adapter->netdev; | ||
1819 | struct ixgb_tx_desc *tx_desc, *eop_desc; | ||
1820 | struct ixgb_buffer *buffer_info; | ||
1821 | unsigned int i, eop; | ||
1822 | bool cleaned = false; | ||
1823 | |||
1824 | i = tx_ring->next_to_clean; | ||
1825 | eop = tx_ring->buffer_info[i].next_to_watch; | ||
1826 | eop_desc = IXGB_TX_DESC(*tx_ring, eop); | ||
1827 | |||
1828 | while (eop_desc->status & IXGB_TX_DESC_STATUS_DD) { | ||
1829 | |||
1830 | rmb(); /* read buffer_info after eop_desc */ | ||
1831 | for (cleaned = false; !cleaned; ) { | ||
1832 | tx_desc = IXGB_TX_DESC(*tx_ring, i); | ||
1833 | buffer_info = &tx_ring->buffer_info[i]; | ||
1834 | |||
1835 | if (tx_desc->popts & | ||
1836 | (IXGB_TX_DESC_POPTS_TXSM | | ||
1837 | IXGB_TX_DESC_POPTS_IXSM)) | ||
1838 | adapter->hw_csum_tx_good++; | ||
1839 | |||
1840 | ixgb_unmap_and_free_tx_resource(adapter, buffer_info); | ||
1841 | |||
1842 | *(u32 *)&(tx_desc->status) = 0; | ||
1843 | |||
1844 | cleaned = (i == eop); | ||
1845 | if (++i == tx_ring->count) i = 0; | ||
1846 | } | ||
1847 | |||
1848 | eop = tx_ring->buffer_info[i].next_to_watch; | ||
1849 | eop_desc = IXGB_TX_DESC(*tx_ring, eop); | ||
1850 | } | ||
1851 | |||
1852 | tx_ring->next_to_clean = i; | ||
1853 | |||
1854 | if (unlikely(cleaned && netif_carrier_ok(netdev) && | ||
1855 | IXGB_DESC_UNUSED(tx_ring) >= DESC_NEEDED)) { | ||
1856 | /* Make sure that anybody stopping the queue after this | ||
1857 | * sees the new next_to_clean. */ | ||
1858 | smp_mb(); | ||
1859 | |||
1860 | if (netif_queue_stopped(netdev) && | ||
1861 | !(test_bit(__IXGB_DOWN, &adapter->flags))) { | ||
1862 | netif_wake_queue(netdev); | ||
1863 | ++adapter->restart_queue; | ||
1864 | } | ||
1865 | } | ||
1866 | |||
1867 | if (adapter->detect_tx_hung) { | ||
1868 | /* detect a transmit hang in hardware, this serializes the | ||
1869 | * check with the clearing of time_stamp and movement of i */ | ||
1870 | adapter->detect_tx_hung = false; | ||
1871 | if (tx_ring->buffer_info[eop].time_stamp && | ||
1872 | time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + HZ) | ||
1873 | && !(IXGB_READ_REG(&adapter->hw, STATUS) & | ||
1874 | IXGB_STATUS_TXOFF)) { | ||
1875 | /* detected Tx unit hang */ | ||
1876 | netif_err(adapter, drv, adapter->netdev, | ||
1877 | "Detected Tx Unit Hang\n" | ||
1878 | " TDH <%x>\n" | ||
1879 | " TDT <%x>\n" | ||
1880 | " next_to_use <%x>\n" | ||
1881 | " next_to_clean <%x>\n" | ||
1882 | "buffer_info[next_to_clean]\n" | ||
1883 | " time_stamp <%lx>\n" | ||
1884 | " next_to_watch <%x>\n" | ||
1885 | " jiffies <%lx>\n" | ||
1886 | " next_to_watch.status <%x>\n", | ||
1887 | IXGB_READ_REG(&adapter->hw, TDH), | ||
1888 | IXGB_READ_REG(&adapter->hw, TDT), | ||
1889 | tx_ring->next_to_use, | ||
1890 | tx_ring->next_to_clean, | ||
1891 | tx_ring->buffer_info[eop].time_stamp, | ||
1892 | eop, | ||
1893 | jiffies, | ||
1894 | eop_desc->status); | ||
1895 | netif_stop_queue(netdev); | ||
1896 | } | ||
1897 | } | ||
1898 | |||
1899 | return cleaned; | ||
1900 | } | ||
1901 | |||
1902 | /** | ||
1903 | * ixgb_rx_checksum - Receive Checksum Offload for 82597. | ||
1904 | * @adapter: board private structure | ||
1905 | * @rx_desc: receive descriptor | ||
1906 | * @sk_buff: socket buffer with received data | ||
1907 | **/ | ||
1908 | |||
1909 | static void | ||
1910 | ixgb_rx_checksum(struct ixgb_adapter *adapter, | ||
1911 | struct ixgb_rx_desc *rx_desc, | ||
1912 | struct sk_buff *skb) | ||
1913 | { | ||
1914 | /* Ignore Checksum bit is set OR | ||
1915 | * TCP Checksum has not been calculated | ||
1916 | */ | ||
1917 | if ((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) || | ||
1918 | (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) { | ||
1919 | skb_checksum_none_assert(skb); | ||
1920 | return; | ||
1921 | } | ||
1922 | |||
1923 | /* At this point we know the hardware did the TCP checksum */ | ||
1924 | /* now look at the TCP checksum error bit */ | ||
1925 | if (rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) { | ||
1926 | /* let the stack verify checksum errors */ | ||
1927 | skb_checksum_none_assert(skb); | ||
1928 | adapter->hw_csum_rx_error++; | ||
1929 | } else { | ||
1930 | /* TCP checksum is good */ | ||
1931 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
1932 | adapter->hw_csum_rx_good++; | ||
1933 | } | ||
1934 | } | ||
1935 | |||
1936 | /* | ||
1937 | * this should improve performance for small packets with large amounts | ||
1938 | * of reassembly being done in the stack | ||
1939 | */ | ||
1940 | static void ixgb_check_copybreak(struct net_device *netdev, | ||
1941 | struct ixgb_buffer *buffer_info, | ||
1942 | u32 length, struct sk_buff **skb) | ||
1943 | { | ||
1944 | struct sk_buff *new_skb; | ||
1945 | |||
1946 | if (length > copybreak) | ||
1947 | return; | ||
1948 | |||
1949 | new_skb = netdev_alloc_skb_ip_align(netdev, length); | ||
1950 | if (!new_skb) | ||
1951 | return; | ||
1952 | |||
1953 | skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN, | ||
1954 | (*skb)->data - NET_IP_ALIGN, | ||
1955 | length + NET_IP_ALIGN); | ||
1956 | /* save the skb in buffer_info as good */ | ||
1957 | buffer_info->skb = *skb; | ||
1958 | *skb = new_skb; | ||
1959 | } | ||
1960 | |||
1961 | /** | ||
1962 | * ixgb_clean_rx_irq - Send received data up the network stack, | ||
1963 | * @adapter: board private structure | ||
1964 | **/ | ||
1965 | |||
1966 | static bool | ||
1967 | ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do) | ||
1968 | { | ||
1969 | struct ixgb_desc_ring *rx_ring = &adapter->rx_ring; | ||
1970 | struct net_device *netdev = adapter->netdev; | ||
1971 | struct pci_dev *pdev = adapter->pdev; | ||
1972 | struct ixgb_rx_desc *rx_desc, *next_rxd; | ||
1973 | struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer; | ||
1974 | u32 length; | ||
1975 | unsigned int i, j; | ||
1976 | int cleaned_count = 0; | ||
1977 | bool cleaned = false; | ||
1978 | |||
1979 | i = rx_ring->next_to_clean; | ||
1980 | rx_desc = IXGB_RX_DESC(*rx_ring, i); | ||
1981 | buffer_info = &rx_ring->buffer_info[i]; | ||
1982 | |||
1983 | while (rx_desc->status & IXGB_RX_DESC_STATUS_DD) { | ||
1984 | struct sk_buff *skb; | ||
1985 | u8 status; | ||
1986 | |||
1987 | if (*work_done >= work_to_do) | ||
1988 | break; | ||
1989 | |||
1990 | (*work_done)++; | ||
1991 | rmb(); /* read descriptor and rx_buffer_info after status DD */ | ||
1992 | status = rx_desc->status; | ||
1993 | skb = buffer_info->skb; | ||
1994 | buffer_info->skb = NULL; | ||
1995 | |||
1996 | prefetch(skb->data - NET_IP_ALIGN); | ||
1997 | |||
1998 | if (++i == rx_ring->count) | ||
1999 | i = 0; | ||
2000 | next_rxd = IXGB_RX_DESC(*rx_ring, i); | ||
2001 | prefetch(next_rxd); | ||
2002 | |||
2003 | j = i + 1; | ||
2004 | if (j == rx_ring->count) | ||
2005 | j = 0; | ||
2006 | next2_buffer = &rx_ring->buffer_info[j]; | ||
2007 | prefetch(next2_buffer); | ||
2008 | |||
2009 | next_buffer = &rx_ring->buffer_info[i]; | ||
2010 | |||
2011 | cleaned = true; | ||
2012 | cleaned_count++; | ||
2013 | |||
2014 | dma_unmap_single(&pdev->dev, | ||
2015 | buffer_info->dma, | ||
2016 | buffer_info->length, | ||
2017 | DMA_FROM_DEVICE); | ||
2018 | buffer_info->dma = 0; | ||
2019 | |||
2020 | length = le16_to_cpu(rx_desc->length); | ||
2021 | rx_desc->length = 0; | ||
2022 | |||
2023 | if (unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) { | ||
2024 | |||
2025 | /* All receives must fit into a single buffer */ | ||
2026 | |||
2027 | IXGB_DBG("Receive packet consumed multiple buffers " | ||
2028 | "length<%x>\n", length); | ||
2029 | |||
2030 | dev_kfree_skb_irq(skb); | ||
2031 | goto rxdesc_done; | ||
2032 | } | ||
2033 | |||
2034 | if (unlikely(rx_desc->errors & | ||
2035 | (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE | | ||
2036 | IXGB_RX_DESC_ERRORS_P | IXGB_RX_DESC_ERRORS_RXE))) { | ||
2037 | dev_kfree_skb_irq(skb); | ||
2038 | goto rxdesc_done; | ||
2039 | } | ||
2040 | |||
2041 | ixgb_check_copybreak(netdev, buffer_info, length, &skb); | ||
2042 | |||
2043 | /* Good Receive */ | ||
2044 | skb_put(skb, length); | ||
2045 | |||
2046 | /* Receive Checksum Offload */ | ||
2047 | ixgb_rx_checksum(adapter, rx_desc, skb); | ||
2048 | |||
2049 | skb->protocol = eth_type_trans(skb, netdev); | ||
2050 | if (status & IXGB_RX_DESC_STATUS_VP) | ||
2051 | __vlan_hwaccel_put_tag(skb, | ||
2052 | le16_to_cpu(rx_desc->special)); | ||
2053 | |||
2054 | netif_receive_skb(skb); | ||
2055 | |||
2056 | rxdesc_done: | ||
2057 | /* clean up descriptor, might be written over by hw */ | ||
2058 | rx_desc->status = 0; | ||
2059 | |||
2060 | /* return some buffers to hardware, one at a time is too slow */ | ||
2061 | if (unlikely(cleaned_count >= IXGB_RX_BUFFER_WRITE)) { | ||
2062 | ixgb_alloc_rx_buffers(adapter, cleaned_count); | ||
2063 | cleaned_count = 0; | ||
2064 | } | ||
2065 | |||
2066 | /* use prefetched values */ | ||
2067 | rx_desc = next_rxd; | ||
2068 | buffer_info = next_buffer; | ||
2069 | } | ||
2070 | |||
2071 | rx_ring->next_to_clean = i; | ||
2072 | |||
2073 | cleaned_count = IXGB_DESC_UNUSED(rx_ring); | ||
2074 | if (cleaned_count) | ||
2075 | ixgb_alloc_rx_buffers(adapter, cleaned_count); | ||
2076 | |||
2077 | return cleaned; | ||
2078 | } | ||
2079 | |||
2080 | /** | ||
2081 | * ixgb_alloc_rx_buffers - Replace used receive buffers | ||
2082 | * @adapter: address of board private structure | ||
2083 | **/ | ||
2084 | |||
2085 | static void | ||
2086 | ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count) | ||
2087 | { | ||
2088 | struct ixgb_desc_ring *rx_ring = &adapter->rx_ring; | ||
2089 | struct net_device *netdev = adapter->netdev; | ||
2090 | struct pci_dev *pdev = adapter->pdev; | ||
2091 | struct ixgb_rx_desc *rx_desc; | ||
2092 | struct ixgb_buffer *buffer_info; | ||
2093 | struct sk_buff *skb; | ||
2094 | unsigned int i; | ||
2095 | long cleancount; | ||
2096 | |||
2097 | i = rx_ring->next_to_use; | ||
2098 | buffer_info = &rx_ring->buffer_info[i]; | ||
2099 | cleancount = IXGB_DESC_UNUSED(rx_ring); | ||
2100 | |||
2101 | |||
2102 | /* leave three descriptors unused */ | ||
2103 | while (--cleancount > 2 && cleaned_count--) { | ||
2104 | /* recycle! its good for you */ | ||
2105 | skb = buffer_info->skb; | ||
2106 | if (skb) { | ||
2107 | skb_trim(skb, 0); | ||
2108 | goto map_skb; | ||
2109 | } | ||
2110 | |||
2111 | skb = netdev_alloc_skb_ip_align(netdev, adapter->rx_buffer_len); | ||
2112 | if (unlikely(!skb)) { | ||
2113 | /* Better luck next round */ | ||
2114 | adapter->alloc_rx_buff_failed++; | ||
2115 | break; | ||
2116 | } | ||
2117 | |||
2118 | buffer_info->skb = skb; | ||
2119 | buffer_info->length = adapter->rx_buffer_len; | ||
2120 | map_skb: | ||
2121 | buffer_info->dma = dma_map_single(&pdev->dev, | ||
2122 | skb->data, | ||
2123 | adapter->rx_buffer_len, | ||
2124 | DMA_FROM_DEVICE); | ||
2125 | |||
2126 | rx_desc = IXGB_RX_DESC(*rx_ring, i); | ||
2127 | rx_desc->buff_addr = cpu_to_le64(buffer_info->dma); | ||
2128 | /* guarantee DD bit not set now before h/w gets descriptor | ||
2129 | * this is the rest of the workaround for h/w double | ||
2130 | * writeback. */ | ||
2131 | rx_desc->status = 0; | ||
2132 | |||
2133 | |||
2134 | if (++i == rx_ring->count) i = 0; | ||
2135 | buffer_info = &rx_ring->buffer_info[i]; | ||
2136 | } | ||
2137 | |||
2138 | if (likely(rx_ring->next_to_use != i)) { | ||
2139 | rx_ring->next_to_use = i; | ||
2140 | if (unlikely(i-- == 0)) | ||
2141 | i = (rx_ring->count - 1); | ||
2142 | |||
2143 | /* Force memory writes to complete before letting h/w | ||
2144 | * know there are new descriptors to fetch. (Only | ||
2145 | * applicable for weak-ordered memory model archs, such | ||
2146 | * as IA-64). */ | ||
2147 | wmb(); | ||
2148 | IXGB_WRITE_REG(&adapter->hw, RDT, i); | ||
2149 | } | ||
2150 | } | ||
2151 | |||
2152 | static void | ||
2153 | ixgb_vlan_strip_enable(struct ixgb_adapter *adapter) | ||
2154 | { | ||
2155 | u32 ctrl; | ||
2156 | |||
2157 | /* enable VLAN tag insert/strip */ | ||
2158 | ctrl = IXGB_READ_REG(&adapter->hw, CTRL0); | ||
2159 | ctrl |= IXGB_CTRL0_VME; | ||
2160 | IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl); | ||
2161 | } | ||
2162 | |||
2163 | static void | ||
2164 | ixgb_vlan_strip_disable(struct ixgb_adapter *adapter) | ||
2165 | { | ||
2166 | u32 ctrl; | ||
2167 | |||
2168 | /* disable VLAN tag insert/strip */ | ||
2169 | ctrl = IXGB_READ_REG(&adapter->hw, CTRL0); | ||
2170 | ctrl &= ~IXGB_CTRL0_VME; | ||
2171 | IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl); | ||
2172 | } | ||
2173 | |||
2174 | static void | ||
2175 | ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid) | ||
2176 | { | ||
2177 | struct ixgb_adapter *adapter = netdev_priv(netdev); | ||
2178 | u32 vfta, index; | ||
2179 | |||
2180 | /* add VID to filter table */ | ||
2181 | |||
2182 | index = (vid >> 5) & 0x7F; | ||
2183 | vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index); | ||
2184 | vfta |= (1 << (vid & 0x1F)); | ||
2185 | ixgb_write_vfta(&adapter->hw, index, vfta); | ||
2186 | set_bit(vid, adapter->active_vlans); | ||
2187 | } | ||
2188 | |||
2189 | static void | ||
2190 | ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | ||
2191 | { | ||
2192 | struct ixgb_adapter *adapter = netdev_priv(netdev); | ||
2193 | u32 vfta, index; | ||
2194 | |||
2195 | /* remove VID from filter table */ | ||
2196 | |||
2197 | index = (vid >> 5) & 0x7F; | ||
2198 | vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index); | ||
2199 | vfta &= ~(1 << (vid & 0x1F)); | ||
2200 | ixgb_write_vfta(&adapter->hw, index, vfta); | ||
2201 | clear_bit(vid, adapter->active_vlans); | ||
2202 | } | ||
2203 | |||
2204 | static void | ||
2205 | ixgb_restore_vlan(struct ixgb_adapter *adapter) | ||
2206 | { | ||
2207 | u16 vid; | ||
2208 | |||
2209 | for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) | ||
2210 | ixgb_vlan_rx_add_vid(adapter->netdev, vid); | ||
2211 | } | ||
2212 | |||
2213 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
2214 | /* | ||
2215 | * Polling 'interrupt' - used by things like netconsole to send skbs | ||
2216 | * without having to re-enable interrupts. It's not called while | ||
2217 | * the interrupt routine is executing. | ||
2218 | */ | ||
2219 | |||
2220 | static void ixgb_netpoll(struct net_device *dev) | ||
2221 | { | ||
2222 | struct ixgb_adapter *adapter = netdev_priv(dev); | ||
2223 | |||
2224 | disable_irq(adapter->pdev->irq); | ||
2225 | ixgb_intr(adapter->pdev->irq, dev); | ||
2226 | enable_irq(adapter->pdev->irq); | ||
2227 | } | ||
2228 | #endif | ||
2229 | |||
2230 | /** | ||
2231 | * ixgb_io_error_detected() - called when PCI error is detected | ||
2232 | * @pdev pointer to pci device with error | ||
2233 | * @state pci channel state after error | ||
2234 | * | ||
2235 | * This callback is called by the PCI subsystem whenever | ||
2236 | * a PCI bus error is detected. | ||
2237 | */ | ||
2238 | static pci_ers_result_t ixgb_io_error_detected(struct pci_dev *pdev, | ||
2239 | enum pci_channel_state state) | ||
2240 | { | ||
2241 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
2242 | struct ixgb_adapter *adapter = netdev_priv(netdev); | ||
2243 | |||
2244 | netif_device_detach(netdev); | ||
2245 | |||
2246 | if (state == pci_channel_io_perm_failure) | ||
2247 | return PCI_ERS_RESULT_DISCONNECT; | ||
2248 | |||
2249 | if (netif_running(netdev)) | ||
2250 | ixgb_down(adapter, true); | ||
2251 | |||
2252 | pci_disable_device(pdev); | ||
2253 | |||
2254 | /* Request a slot reset. */ | ||
2255 | return PCI_ERS_RESULT_NEED_RESET; | ||
2256 | } | ||
2257 | |||
2258 | /** | ||
2259 | * ixgb_io_slot_reset - called after the pci bus has been reset. | ||
2260 | * @pdev pointer to pci device with error | ||
2261 | * | ||
2262 | * This callback is called after the PCI bus has been reset. | ||
2263 | * Basically, this tries to restart the card from scratch. | ||
2264 | * This is a shortened version of the device probe/discovery code, | ||
2265 | * it resembles the first-half of the ixgb_probe() routine. | ||
2266 | */ | ||
2267 | static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev) | ||
2268 | { | ||
2269 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
2270 | struct ixgb_adapter *adapter = netdev_priv(netdev); | ||
2271 | |||
2272 | if (pci_enable_device(pdev)) { | ||
2273 | netif_err(adapter, probe, adapter->netdev, | ||
2274 | "Cannot re-enable PCI device after reset\n"); | ||
2275 | return PCI_ERS_RESULT_DISCONNECT; | ||
2276 | } | ||
2277 | |||
2278 | /* Perform card reset only on one instance of the card */ | ||
2279 | if (0 != PCI_FUNC (pdev->devfn)) | ||
2280 | return PCI_ERS_RESULT_RECOVERED; | ||
2281 | |||
2282 | pci_set_master(pdev); | ||
2283 | |||
2284 | netif_carrier_off(netdev); | ||
2285 | netif_stop_queue(netdev); | ||
2286 | ixgb_reset(adapter); | ||
2287 | |||
2288 | /* Make sure the EEPROM is good */ | ||
2289 | if (!ixgb_validate_eeprom_checksum(&adapter->hw)) { | ||
2290 | netif_err(adapter, probe, adapter->netdev, | ||
2291 | "After reset, the EEPROM checksum is not valid\n"); | ||
2292 | return PCI_ERS_RESULT_DISCONNECT; | ||
2293 | } | ||
2294 | ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr); | ||
2295 | memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); | ||
2296 | |||
2297 | if (!is_valid_ether_addr(netdev->perm_addr)) { | ||
2298 | netif_err(adapter, probe, adapter->netdev, | ||
2299 | "After reset, invalid MAC address\n"); | ||
2300 | return PCI_ERS_RESULT_DISCONNECT; | ||
2301 | } | ||
2302 | |||
2303 | return PCI_ERS_RESULT_RECOVERED; | ||
2304 | } | ||
2305 | |||
2306 | /** | ||
2307 | * ixgb_io_resume - called when its OK to resume normal operations | ||
2308 | * @pdev pointer to pci device with error | ||
2309 | * | ||
2310 | * The error recovery driver tells us that its OK to resume | ||
2311 | * normal operation. Implementation resembles the second-half | ||
2312 | * of the ixgb_probe() routine. | ||
2313 | */ | ||
2314 | static void ixgb_io_resume(struct pci_dev *pdev) | ||
2315 | { | ||
2316 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
2317 | struct ixgb_adapter *adapter = netdev_priv(netdev); | ||
2318 | |||
2319 | pci_set_master(pdev); | ||
2320 | |||
2321 | if (netif_running(netdev)) { | ||
2322 | if (ixgb_up(adapter)) { | ||
2323 | pr_err("can't bring device back up after reset\n"); | ||
2324 | return; | ||
2325 | } | ||
2326 | } | ||
2327 | |||
2328 | netif_device_attach(netdev); | ||
2329 | mod_timer(&adapter->watchdog_timer, jiffies); | ||
2330 | } | ||
2331 | |||
2332 | /* ixgb_main.c */ | ||
diff --git a/drivers/net/ixgb/ixgb_osdep.h b/drivers/net/ixgb/ixgb_osdep.h new file mode 100644 index 00000000000..e361185920e --- /dev/null +++ b/drivers/net/ixgb/ixgb_osdep.h | |||
@@ -0,0 +1,63 @@ | |||
1 | /******************************************************************************* | ||
2 | |||
3 | Intel PRO/10GbE Linux driver | ||
4 | Copyright(c) 1999 - 2008 Intel Corporation. | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Contact Information: | ||
23 | Linux NICS <linux.nics@intel.com> | ||
24 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
25 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
26 | |||
27 | *******************************************************************************/ | ||
28 | |||
29 | /* glue for the OS independent part of ixgb | ||
30 | * includes register access macros | ||
31 | */ | ||
32 | |||
33 | #ifndef _IXGB_OSDEP_H_ | ||
34 | #define _IXGB_OSDEP_H_ | ||
35 | |||
36 | #include <linux/types.h> | ||
37 | #include <linux/delay.h> | ||
38 | #include <asm/io.h> | ||
39 | #include <linux/interrupt.h> | ||
40 | #include <linux/sched.h> | ||
41 | |||
42 | #undef ASSERT | ||
43 | #define ASSERT(x) BUG_ON(!(x)) | ||
44 | |||
45 | #define ENTER() pr_debug("%s\n", __func__); | ||
46 | |||
47 | #define IXGB_WRITE_REG(a, reg, value) ( \ | ||
48 | writel((value), ((a)->hw_addr + IXGB_##reg))) | ||
49 | |||
50 | #define IXGB_READ_REG(a, reg) ( \ | ||
51 | readl((a)->hw_addr + IXGB_##reg)) | ||
52 | |||
53 | #define IXGB_WRITE_REG_ARRAY(a, reg, offset, value) ( \ | ||
54 | writel((value), ((a)->hw_addr + IXGB_##reg + ((offset) << 2)))) | ||
55 | |||
56 | #define IXGB_READ_REG_ARRAY(a, reg, offset) ( \ | ||
57 | readl((a)->hw_addr + IXGB_##reg + ((offset) << 2))) | ||
58 | |||
59 | #define IXGB_WRITE_FLUSH(a) IXGB_READ_REG(a, STATUS) | ||
60 | |||
61 | #define IXGB_MEMCPY memcpy | ||
62 | |||
63 | #endif /* _IXGB_OSDEP_H_ */ | ||
diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c new file mode 100644 index 00000000000..dd7fbeb1f7d --- /dev/null +++ b/drivers/net/ixgb/ixgb_param.c | |||
@@ -0,0 +1,469 @@ | |||
1 | /******************************************************************************* | ||
2 | |||
3 | Intel PRO/10GbE Linux driver | ||
4 | Copyright(c) 1999 - 2008 Intel Corporation. | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Contact Information: | ||
23 | Linux NICS <linux.nics@intel.com> | ||
24 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
25 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
26 | |||
27 | *******************************************************************************/ | ||
28 | |||
29 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
30 | |||
31 | #include "ixgb.h" | ||
32 | |||
33 | /* This is the only thing that needs to be changed to adjust the | ||
34 | * maximum number of ports that the driver can manage. | ||
35 | */ | ||
36 | |||
37 | #define IXGB_MAX_NIC 8 | ||
38 | |||
39 | #define OPTION_UNSET -1 | ||
40 | #define OPTION_DISABLED 0 | ||
41 | #define OPTION_ENABLED 1 | ||
42 | |||
43 | /* All parameters are treated the same, as an integer array of values. | ||
44 | * This macro just reduces the need to repeat the same declaration code | ||
45 | * over and over (plus this helps to avoid typo bugs). | ||
46 | */ | ||
47 | |||
48 | #define IXGB_PARAM_INIT { [0 ... IXGB_MAX_NIC] = OPTION_UNSET } | ||
49 | #define IXGB_PARAM(X, desc) \ | ||
50 | static int __devinitdata X[IXGB_MAX_NIC+1] \ | ||
51 | = IXGB_PARAM_INIT; \ | ||
52 | static unsigned int num_##X = 0; \ | ||
53 | module_param_array_named(X, X, int, &num_##X, 0); \ | ||
54 | MODULE_PARM_DESC(X, desc); | ||
55 | |||
56 | /* Transmit Descriptor Count | ||
57 | * | ||
58 | * Valid Range: 64-4096 | ||
59 | * | ||
60 | * Default Value: 256 | ||
61 | */ | ||
62 | |||
63 | IXGB_PARAM(TxDescriptors, "Number of transmit descriptors"); | ||
64 | |||
65 | /* Receive Descriptor Count | ||
66 | * | ||
67 | * Valid Range: 64-4096 | ||
68 | * | ||
69 | * Default Value: 1024 | ||
70 | */ | ||
71 | |||
72 | IXGB_PARAM(RxDescriptors, "Number of receive descriptors"); | ||
73 | |||
74 | /* User Specified Flow Control Override | ||
75 | * | ||
76 | * Valid Range: 0-3 | ||
77 | * - 0 - No Flow Control | ||
78 | * - 1 - Rx only, respond to PAUSE frames but do not generate them | ||
79 | * - 2 - Tx only, generate PAUSE frames but ignore them on receive | ||
80 | * - 3 - Full Flow Control Support | ||
81 | * | ||
82 | * Default Value: 2 - Tx only (silicon bug avoidance) | ||
83 | */ | ||
84 | |||
85 | IXGB_PARAM(FlowControl, "Flow Control setting"); | ||
86 | |||
87 | /* XsumRX - Receive Checksum Offload Enable/Disable | ||
88 | * | ||
89 | * Valid Range: 0, 1 | ||
90 | * - 0 - disables all checksum offload | ||
91 | * - 1 - enables receive IP/TCP/UDP checksum offload | ||
92 | * on 82597 based NICs | ||
93 | * | ||
94 | * Default Value: 1 | ||
95 | */ | ||
96 | |||
97 | IXGB_PARAM(XsumRX, "Disable or enable Receive Checksum offload"); | ||
98 | |||
99 | /* Transmit Interrupt Delay in units of 0.8192 microseconds | ||
100 | * | ||
101 | * Valid Range: 0-65535 | ||
102 | * | ||
103 | * Default Value: 32 | ||
104 | */ | ||
105 | |||
106 | IXGB_PARAM(TxIntDelay, "Transmit Interrupt Delay"); | ||
107 | |||
108 | /* Receive Interrupt Delay in units of 0.8192 microseconds | ||
109 | * | ||
110 | * Valid Range: 0-65535 | ||
111 | * | ||
112 | * Default Value: 72 | ||
113 | */ | ||
114 | |||
115 | IXGB_PARAM(RxIntDelay, "Receive Interrupt Delay"); | ||
116 | |||
117 | /* Receive Flow control high threshold (when we send a pause frame) | ||
118 | * (FCRTH) | ||
119 | * | ||
120 | * Valid Range: 1,536 - 262,136 (0x600 - 0x3FFF8, 8 byte granularity) | ||
121 | * | ||
122 | * Default Value: 196,608 (0x30000) | ||
123 | */ | ||
124 | |||
125 | IXGB_PARAM(RxFCHighThresh, "Receive Flow Control High Threshold"); | ||
126 | |||
127 | /* Receive Flow control low threshold (when we send a resume frame) | ||
128 | * (FCRTL) | ||
129 | * | ||
130 | * Valid Range: 64 - 262,136 (0x40 - 0x3FFF8, 8 byte granularity) | ||
131 | * must be less than high threshold by at least 8 bytes | ||
132 | * | ||
133 | * Default Value: 163,840 (0x28000) | ||
134 | */ | ||
135 | |||
136 | IXGB_PARAM(RxFCLowThresh, "Receive Flow Control Low Threshold"); | ||
137 | |||
138 | /* Flow control request timeout (how long to pause the link partner's tx) | ||
139 | * (PAP 15:0) | ||
140 | * | ||
141 | * Valid Range: 1 - 65535 | ||
142 | * | ||
143 | * Default Value: 65535 (0xffff) (we'll send an xon if we recover) | ||
144 | */ | ||
145 | |||
146 | IXGB_PARAM(FCReqTimeout, "Flow Control Request Timeout"); | ||
147 | |||
148 | /* Interrupt Delay Enable | ||
149 | * | ||
150 | * Valid Range: 0, 1 | ||
151 | * | ||
152 | * - 0 - disables transmit interrupt delay | ||
153 | * - 1 - enables transmmit interrupt delay | ||
154 | * | ||
155 | * Default Value: 1 | ||
156 | */ | ||
157 | |||
158 | IXGB_PARAM(IntDelayEnable, "Transmit Interrupt Delay Enable"); | ||
159 | |||
160 | |||
161 | #define DEFAULT_TIDV 32 | ||
162 | #define MAX_TIDV 0xFFFF | ||
163 | #define MIN_TIDV 0 | ||
164 | |||
165 | #define DEFAULT_RDTR 72 | ||
166 | #define MAX_RDTR 0xFFFF | ||
167 | #define MIN_RDTR 0 | ||
168 | |||
169 | #define XSUMRX_DEFAULT OPTION_ENABLED | ||
170 | |||
171 | #define DEFAULT_FCRTL 0x28000 | ||
172 | #define DEFAULT_FCRTH 0x30000 | ||
173 | #define MIN_FCRTL 0 | ||
174 | #define MAX_FCRTL 0x3FFE8 | ||
175 | #define MIN_FCRTH 8 | ||
176 | #define MAX_FCRTH 0x3FFF0 | ||
177 | |||
178 | #define MIN_FCPAUSE 1 | ||
179 | #define MAX_FCPAUSE 0xffff | ||
180 | #define DEFAULT_FCPAUSE 0xFFFF /* this may be too long */ | ||
181 | |||
182 | struct ixgb_option { | ||
183 | enum { enable_option, range_option, list_option } type; | ||
184 | const char *name; | ||
185 | const char *err; | ||
186 | int def; | ||
187 | union { | ||
188 | struct { /* range_option info */ | ||
189 | int min; | ||
190 | int max; | ||
191 | } r; | ||
192 | struct { /* list_option info */ | ||
193 | int nr; | ||
194 | const struct ixgb_opt_list { | ||
195 | int i; | ||
196 | const char *str; | ||
197 | } *p; | ||
198 | } l; | ||
199 | } arg; | ||
200 | }; | ||
201 | |||
202 | static int __devinit | ||
203 | ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt) | ||
204 | { | ||
205 | if (*value == OPTION_UNSET) { | ||
206 | *value = opt->def; | ||
207 | return 0; | ||
208 | } | ||
209 | |||
210 | switch (opt->type) { | ||
211 | case enable_option: | ||
212 | switch (*value) { | ||
213 | case OPTION_ENABLED: | ||
214 | pr_info("%s Enabled\n", opt->name); | ||
215 | return 0; | ||
216 | case OPTION_DISABLED: | ||
217 | pr_info("%s Disabled\n", opt->name); | ||
218 | return 0; | ||
219 | } | ||
220 | break; | ||
221 | case range_option: | ||
222 | if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { | ||
223 | pr_info("%s set to %i\n", opt->name, *value); | ||
224 | return 0; | ||
225 | } | ||
226 | break; | ||
227 | case list_option: { | ||
228 | int i; | ||
229 | const struct ixgb_opt_list *ent; | ||
230 | |||
231 | for (i = 0; i < opt->arg.l.nr; i++) { | ||
232 | ent = &opt->arg.l.p[i]; | ||
233 | if (*value == ent->i) { | ||
234 | if (ent->str[0] != '\0') | ||
235 | pr_info("%s\n", ent->str); | ||
236 | return 0; | ||
237 | } | ||
238 | } | ||
239 | } | ||
240 | break; | ||
241 | default: | ||
242 | BUG(); | ||
243 | } | ||
244 | |||
245 | pr_info("Invalid %s specified (%i) %s\n", opt->name, *value, opt->err); | ||
246 | *value = opt->def; | ||
247 | return -1; | ||
248 | } | ||
249 | |||
250 | /** | ||
251 | * ixgb_check_options - Range Checking for Command Line Parameters | ||
252 | * @adapter: board private structure | ||
253 | * | ||
254 | * This routine checks all command line parameters for valid user | ||
255 | * input. If an invalid value is given, or if no user specified | ||
256 | * value exists, a default value is used. The final value is stored | ||
257 | * in a variable in the adapter structure. | ||
258 | **/ | ||
259 | |||
260 | void __devinit | ||
261 | ixgb_check_options(struct ixgb_adapter *adapter) | ||
262 | { | ||
263 | int bd = adapter->bd_number; | ||
264 | if (bd >= IXGB_MAX_NIC) { | ||
265 | pr_notice("Warning: no configuration for board #%i\n", bd); | ||
266 | pr_notice("Using defaults for all values\n"); | ||
267 | } | ||
268 | |||
269 | { /* Transmit Descriptor Count */ | ||
270 | const struct ixgb_option opt = { | ||
271 | .type = range_option, | ||
272 | .name = "Transmit Descriptors", | ||
273 | .err = "using default of " __MODULE_STRING(DEFAULT_TXD), | ||
274 | .def = DEFAULT_TXD, | ||
275 | .arg = { .r = { .min = MIN_TXD, | ||
276 | .max = MAX_TXD}} | ||
277 | }; | ||
278 | struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; | ||
279 | |||
280 | if (num_TxDescriptors > bd) { | ||
281 | tx_ring->count = TxDescriptors[bd]; | ||
282 | ixgb_validate_option(&tx_ring->count, &opt); | ||
283 | } else { | ||
284 | tx_ring->count = opt.def; | ||
285 | } | ||
286 | tx_ring->count = ALIGN(tx_ring->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE); | ||
287 | } | ||
288 | { /* Receive Descriptor Count */ | ||
289 | const struct ixgb_option opt = { | ||
290 | .type = range_option, | ||
291 | .name = "Receive Descriptors", | ||
292 | .err = "using default of " __MODULE_STRING(DEFAULT_RXD), | ||
293 | .def = DEFAULT_RXD, | ||
294 | .arg = { .r = { .min = MIN_RXD, | ||
295 | .max = MAX_RXD}} | ||
296 | }; | ||
297 | struct ixgb_desc_ring *rx_ring = &adapter->rx_ring; | ||
298 | |||
299 | if (num_RxDescriptors > bd) { | ||
300 | rx_ring->count = RxDescriptors[bd]; | ||
301 | ixgb_validate_option(&rx_ring->count, &opt); | ||
302 | } else { | ||
303 | rx_ring->count = opt.def; | ||
304 | } | ||
305 | rx_ring->count = ALIGN(rx_ring->count, IXGB_REQ_RX_DESCRIPTOR_MULTIPLE); | ||
306 | } | ||
307 | { /* Receive Checksum Offload Enable */ | ||
308 | const struct ixgb_option opt = { | ||
309 | .type = enable_option, | ||
310 | .name = "Receive Checksum Offload", | ||
311 | .err = "defaulting to Enabled", | ||
312 | .def = OPTION_ENABLED | ||
313 | }; | ||
314 | |||
315 | if (num_XsumRX > bd) { | ||
316 | unsigned int rx_csum = XsumRX[bd]; | ||
317 | ixgb_validate_option(&rx_csum, &opt); | ||
318 | adapter->rx_csum = rx_csum; | ||
319 | } else { | ||
320 | adapter->rx_csum = opt.def; | ||
321 | } | ||
322 | } | ||
323 | { /* Flow Control */ | ||
324 | |||
325 | static const struct ixgb_opt_list fc_list[] = { | ||
326 | { ixgb_fc_none, "Flow Control Disabled" }, | ||
327 | { ixgb_fc_rx_pause, "Flow Control Receive Only" }, | ||
328 | { ixgb_fc_tx_pause, "Flow Control Transmit Only" }, | ||
329 | { ixgb_fc_full, "Flow Control Enabled" }, | ||
330 | { ixgb_fc_default, "Flow Control Hardware Default" } | ||
331 | }; | ||
332 | |||
333 | static const struct ixgb_option opt = { | ||
334 | .type = list_option, | ||
335 | .name = "Flow Control", | ||
336 | .err = "reading default settings from EEPROM", | ||
337 | .def = ixgb_fc_tx_pause, | ||
338 | .arg = { .l = { .nr = ARRAY_SIZE(fc_list), | ||
339 | .p = fc_list }} | ||
340 | }; | ||
341 | |||
342 | if (num_FlowControl > bd) { | ||
343 | unsigned int fc = FlowControl[bd]; | ||
344 | ixgb_validate_option(&fc, &opt); | ||
345 | adapter->hw.fc.type = fc; | ||
346 | } else { | ||
347 | adapter->hw.fc.type = opt.def; | ||
348 | } | ||
349 | } | ||
350 | { /* Receive Flow Control High Threshold */ | ||
351 | const struct ixgb_option opt = { | ||
352 | .type = range_option, | ||
353 | .name = "Rx Flow Control High Threshold", | ||
354 | .err = "using default of " __MODULE_STRING(DEFAULT_FCRTH), | ||
355 | .def = DEFAULT_FCRTH, | ||
356 | .arg = { .r = { .min = MIN_FCRTH, | ||
357 | .max = MAX_FCRTH}} | ||
358 | }; | ||
359 | |||
360 | if (num_RxFCHighThresh > bd) { | ||
361 | adapter->hw.fc.high_water = RxFCHighThresh[bd]; | ||
362 | ixgb_validate_option(&adapter->hw.fc.high_water, &opt); | ||
363 | } else { | ||
364 | adapter->hw.fc.high_water = opt.def; | ||
365 | } | ||
366 | if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) ) | ||
367 | pr_info("Ignoring RxFCHighThresh when no RxFC\n"); | ||
368 | } | ||
369 | { /* Receive Flow Control Low Threshold */ | ||
370 | const struct ixgb_option opt = { | ||
371 | .type = range_option, | ||
372 | .name = "Rx Flow Control Low Threshold", | ||
373 | .err = "using default of " __MODULE_STRING(DEFAULT_FCRTL), | ||
374 | .def = DEFAULT_FCRTL, | ||
375 | .arg = { .r = { .min = MIN_FCRTL, | ||
376 | .max = MAX_FCRTL}} | ||
377 | }; | ||
378 | |||
379 | if (num_RxFCLowThresh > bd) { | ||
380 | adapter->hw.fc.low_water = RxFCLowThresh[bd]; | ||
381 | ixgb_validate_option(&adapter->hw.fc.low_water, &opt); | ||
382 | } else { | ||
383 | adapter->hw.fc.low_water = opt.def; | ||
384 | } | ||
385 | if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) ) | ||
386 | pr_info("Ignoring RxFCLowThresh when no RxFC\n"); | ||
387 | } | ||
388 | { /* Flow Control Pause Time Request*/ | ||
389 | const struct ixgb_option opt = { | ||
390 | .type = range_option, | ||
391 | .name = "Flow Control Pause Time Request", | ||
392 | .err = "using default of "__MODULE_STRING(DEFAULT_FCPAUSE), | ||
393 | .def = DEFAULT_FCPAUSE, | ||
394 | .arg = { .r = { .min = MIN_FCPAUSE, | ||
395 | .max = MAX_FCPAUSE}} | ||
396 | }; | ||
397 | |||
398 | if (num_FCReqTimeout > bd) { | ||
399 | unsigned int pause_time = FCReqTimeout[bd]; | ||
400 | ixgb_validate_option(&pause_time, &opt); | ||
401 | adapter->hw.fc.pause_time = pause_time; | ||
402 | } else { | ||
403 | adapter->hw.fc.pause_time = opt.def; | ||
404 | } | ||
405 | if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) ) | ||
406 | pr_info("Ignoring FCReqTimeout when no RxFC\n"); | ||
407 | } | ||
408 | /* high low and spacing check for rx flow control thresholds */ | ||
409 | if (adapter->hw.fc.type & ixgb_fc_tx_pause) { | ||
410 | /* high must be greater than low */ | ||
411 | if (adapter->hw.fc.high_water < (adapter->hw.fc.low_water + 8)) { | ||
412 | /* set defaults */ | ||
413 | pr_info("RxFCHighThresh must be >= (RxFCLowThresh + 8), Using Defaults\n"); | ||
414 | adapter->hw.fc.high_water = DEFAULT_FCRTH; | ||
415 | adapter->hw.fc.low_water = DEFAULT_FCRTL; | ||
416 | } | ||
417 | } | ||
418 | { /* Receive Interrupt Delay */ | ||
419 | const struct ixgb_option opt = { | ||
420 | .type = range_option, | ||
421 | .name = "Receive Interrupt Delay", | ||
422 | .err = "using default of " __MODULE_STRING(DEFAULT_RDTR), | ||
423 | .def = DEFAULT_RDTR, | ||
424 | .arg = { .r = { .min = MIN_RDTR, | ||
425 | .max = MAX_RDTR}} | ||
426 | }; | ||
427 | |||
428 | if (num_RxIntDelay > bd) { | ||
429 | adapter->rx_int_delay = RxIntDelay[bd]; | ||
430 | ixgb_validate_option(&adapter->rx_int_delay, &opt); | ||
431 | } else { | ||
432 | adapter->rx_int_delay = opt.def; | ||
433 | } | ||
434 | } | ||
435 | { /* Transmit Interrupt Delay */ | ||
436 | const struct ixgb_option opt = { | ||
437 | .type = range_option, | ||
438 | .name = "Transmit Interrupt Delay", | ||
439 | .err = "using default of " __MODULE_STRING(DEFAULT_TIDV), | ||
440 | .def = DEFAULT_TIDV, | ||
441 | .arg = { .r = { .min = MIN_TIDV, | ||
442 | .max = MAX_TIDV}} | ||
443 | }; | ||
444 | |||
445 | if (num_TxIntDelay > bd) { | ||
446 | adapter->tx_int_delay = TxIntDelay[bd]; | ||
447 | ixgb_validate_option(&adapter->tx_int_delay, &opt); | ||
448 | } else { | ||
449 | adapter->tx_int_delay = opt.def; | ||
450 | } | ||
451 | } | ||
452 | |||
453 | { /* Transmit Interrupt Delay Enable */ | ||
454 | const struct ixgb_option opt = { | ||
455 | .type = enable_option, | ||
456 | .name = "Tx Interrupt Delay Enable", | ||
457 | .err = "defaulting to Enabled", | ||
458 | .def = OPTION_ENABLED | ||
459 | }; | ||
460 | |||
461 | if (num_IntDelayEnable > bd) { | ||
462 | unsigned int ide = IntDelayEnable[bd]; | ||
463 | ixgb_validate_option(&ide, &opt); | ||
464 | adapter->tx_int_delay_enable = ide; | ||
465 | } else { | ||
466 | adapter->tx_int_delay_enable = opt.def; | ||
467 | } | ||
468 | } | ||
469 | } | ||