diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-16 20:48:54 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-16 20:48:54 -0400 |
commit | 1f1c2881f673671539b25686df463518d69c4649 (patch) | |
tree | 45f4a79f2371ae4525fd621d4b5820732efa161e /drivers/net/atl1 | |
parent | 7608a864e5211df1e3c1948e2719aec7c27b9333 (diff) | |
parent | c5e3ae8823693b260ce1f217adca8add1bc0b3de (diff) |
Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6
* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6: (37 commits)
forcedeth bug fix: realtek phy
forcedeth bug fix: vitesse phy
forcedeth bug fix: cicada phy
atl1: reorder atl1_main functions
atl1: fix excessively indented code
atl1: cleanup atl1_main
atl1: header file cleanup
atl1: remove irq_sem
cdc-subset to support new vendor/product ID
8139cp: implement the missing dev->tx_timeout
myri10ge: Remove nonsensical limit in the tx done routine
gianfar: kill unused header
EP93XX_ETH must select MII
macb: Add multicast capability
macb: Use generic PHY layer
s390: add barriers to qeth driver
s390: scatter-gather for inbound traffic in qeth driver
eHEA: Introducing support vor DLPAR memory add
Fix a potential NULL pointer dereference in free_shared_mem() in drivers/net/s2io.c
[PATCH] softmac: Fix ESSID problem
...
Diffstat (limited to 'drivers/net/atl1')
-rw-r--r-- | drivers/net/atl1/atl1.h | 156 | ||||
-rw-r--r-- | drivers/net/atl1/atl1_main.c | 2176 |
2 files changed, 1172 insertions, 1160 deletions
diff --git a/drivers/net/atl1/atl1.h b/drivers/net/atl1/atl1.h index df4c1a0071aa..ff4765f6c3de 100644 --- a/drivers/net/atl1/atl1.h +++ b/drivers/net/atl1/atl1.h | |||
@@ -43,6 +43,7 @@ extern const struct ethtool_ops atl1_ethtool_ops; | |||
43 | struct atl1_adapter; | 43 | struct atl1_adapter; |
44 | 44 | ||
45 | #define ATL1_MAX_INTR 3 | 45 | #define ATL1_MAX_INTR 3 |
46 | #define ATL1_MAX_TX_BUF_LEN 0x3000 /* 12288 bytes */ | ||
46 | 47 | ||
47 | #define ATL1_DEFAULT_TPD 256 | 48 | #define ATL1_DEFAULT_TPD 256 |
48 | #define ATL1_MAX_TPD 1024 | 49 | #define ATL1_MAX_TPD 1024 |
@@ -57,29 +58,45 @@ struct atl1_adapter; | |||
57 | #define ATL1_RRD_DESC(R, i) ATL1_GET_DESC(R, i, struct rx_return_desc) | 58 | #define ATL1_RRD_DESC(R, i) ATL1_GET_DESC(R, i, struct rx_return_desc) |
58 | 59 | ||
59 | /* | 60 | /* |
61 | * This detached comment is preserved for documentation purposes only. | ||
62 | * It was originally attached to some code that got deleted, but seems | ||
63 | * important enough to keep around... | ||
64 | * | ||
65 | * <begin detached comment> | ||
60 | * Some workarounds require millisecond delays and are run during interrupt | 66 | * Some workarounds require millisecond delays and are run during interrupt |
61 | * context. Most notably, when establishing link, the phy may need tweaking | 67 | * context. Most notably, when establishing link, the phy may need tweaking |
62 | * but cannot process phy register reads/writes faster than millisecond | 68 | * but cannot process phy register reads/writes faster than millisecond |
63 | * intervals...and we establish link due to a "link status change" interrupt. | 69 | * intervals...and we establish link due to a "link status change" interrupt. |
70 | * <end detached comment> | ||
71 | */ | ||
72 | |||
73 | /* | ||
74 | * atl1_ring_header represents a single, contiguous block of DMA space | ||
75 | * mapped for the three descriptor rings (tpd, rfd, rrd) and the two | ||
76 | * message blocks (cmb, smb) described below | ||
64 | */ | 77 | */ |
78 | struct atl1_ring_header { | ||
79 | void *desc; /* virtual address */ | ||
80 | dma_addr_t dma; /* physical address*/ | ||
81 | unsigned int size; /* length in bytes */ | ||
82 | }; | ||
65 | 83 | ||
66 | /* | 84 | /* |
67 | * wrapper around a pointer to a socket buffer, | 85 | * atl1_buffer is wrapper around a pointer to a socket buffer |
68 | * so a DMA handle can be stored along with the buffer | 86 | * so a DMA handle can be stored along with the skb |
69 | */ | 87 | */ |
70 | struct atl1_buffer { | 88 | struct atl1_buffer { |
71 | struct sk_buff *skb; | 89 | struct sk_buff *skb; /* socket buffer */ |
72 | u16 length; | 90 | u16 length; /* rx buffer length */ |
73 | u16 alloced; | 91 | u16 alloced; /* 1 if skb allocated */ |
74 | dma_addr_t dma; | 92 | dma_addr_t dma; |
75 | }; | 93 | }; |
76 | 94 | ||
77 | #define MAX_TX_BUF_LEN 0x3000 /* 12KB */ | 95 | /* transmit packet descriptor (tpd) ring */ |
78 | |||
79 | struct atl1_tpd_ring { | 96 | struct atl1_tpd_ring { |
80 | void *desc; /* pointer to the descriptor ring memory */ | 97 | void *desc; /* descriptor ring virtual address */ |
81 | dma_addr_t dma; /* physical adress of the descriptor ring */ | 98 | dma_addr_t dma; /* descriptor ring physical address */ |
82 | u16 size; /* length of descriptor ring in bytes */ | 99 | u16 size; /* descriptor ring length in bytes */ |
83 | u16 count; /* number of descriptors in the ring */ | 100 | u16 count; /* number of descriptors in the ring */ |
84 | u16 hw_idx; /* hardware index */ | 101 | u16 hw_idx; /* hardware index */ |
85 | atomic_t next_to_clean; | 102 | atomic_t next_to_clean; |
@@ -87,36 +104,34 @@ struct atl1_tpd_ring { | |||
87 | struct atl1_buffer *buffer_info; | 104 | struct atl1_buffer *buffer_info; |
88 | }; | 105 | }; |
89 | 106 | ||
107 | /* receive free descriptor (rfd) ring */ | ||
90 | struct atl1_rfd_ring { | 108 | struct atl1_rfd_ring { |
91 | void *desc; | 109 | void *desc; /* descriptor ring virtual address */ |
92 | dma_addr_t dma; | 110 | dma_addr_t dma; /* descriptor ring physical address */ |
93 | u16 size; | 111 | u16 size; /* descriptor ring length in bytes */ |
94 | u16 count; | 112 | u16 count; /* number of descriptors in the ring */ |
95 | atomic_t next_to_use; | 113 | atomic_t next_to_use; |
96 | u16 next_to_clean; | 114 | u16 next_to_clean; |
97 | struct atl1_buffer *buffer_info; | 115 | struct atl1_buffer *buffer_info; |
98 | }; | 116 | }; |
99 | 117 | ||
118 | /* receive return descriptor (rrd) ring */ | ||
100 | struct atl1_rrd_ring { | 119 | struct atl1_rrd_ring { |
101 | void *desc; | 120 | void *desc; /* descriptor ring virtual address */ |
102 | dma_addr_t dma; | 121 | dma_addr_t dma; /* descriptor ring physical address */ |
103 | unsigned int size; | 122 | unsigned int size; /* descriptor ring length in bytes */ |
104 | u16 count; | 123 | u16 count; /* number of descriptors in the ring */ |
105 | u16 next_to_use; | 124 | u16 next_to_use; |
106 | atomic_t next_to_clean; | 125 | atomic_t next_to_clean; |
107 | }; | 126 | }; |
108 | 127 | ||
109 | struct atl1_ring_header { | 128 | /* coalescing message block (cmb) */ |
110 | void *desc; /* pointer to the descriptor ring memory */ | ||
111 | dma_addr_t dma; /* physical adress of the descriptor ring */ | ||
112 | unsigned int size; /* length of descriptor ring in bytes */ | ||
113 | }; | ||
114 | |||
115 | struct atl1_cmb { | 129 | struct atl1_cmb { |
116 | struct coals_msg_block *cmb; | 130 | struct coals_msg_block *cmb; |
117 | dma_addr_t dma; | 131 | dma_addr_t dma; |
118 | }; | 132 | }; |
119 | 133 | ||
134 | /* statistics message block (smb) */ | ||
120 | struct atl1_smb { | 135 | struct atl1_smb { |
121 | struct stats_msg_block *smb; | 136 | struct stats_msg_block *smb; |
122 | dma_addr_t dma; | 137 | dma_addr_t dma; |
@@ -141,24 +156,26 @@ struct atl1_sft_stats { | |||
141 | u64 tx_aborted_errors; | 156 | u64 tx_aborted_errors; |
142 | u64 tx_window_errors; | 157 | u64 tx_window_errors; |
143 | u64 tx_carrier_errors; | 158 | u64 tx_carrier_errors; |
144 | 159 | u64 tx_pause; /* num pause packets transmitted. */ | |
145 | u64 tx_pause; /* num Pause packet transmitted. */ | 160 | u64 excecol; /* num tx packets w/ excessive collisions. */ |
146 | u64 excecol; /* num tx packets aborted due to excessive collisions. */ | 161 | u64 deffer; /* num tx packets deferred */ |
147 | u64 deffer; /* num deferred tx packets */ | 162 | u64 scc; /* num packets subsequently transmitted |
148 | u64 scc; /* num packets subsequently transmitted successfully w/ single prior collision. */ | 163 | * successfully w/ single prior collision. */ |
149 | u64 mcc; /* num packets subsequently transmitted successfully w/ multiple prior collisions. */ | 164 | u64 mcc; /* num packets subsequently transmitted |
165 | * successfully w/ multiple prior collisions. */ | ||
150 | u64 latecol; /* num tx packets w/ late collisions. */ | 166 | u64 latecol; /* num tx packets w/ late collisions. */ |
151 | u64 tx_underun; /* num tx packets aborted due to transmit FIFO underrun, or TRD FIFO underrun */ | 167 | u64 tx_underun; /* num tx packets aborted due to transmit |
152 | u64 tx_trunc; /* num tx packets truncated due to size exceeding MTU, regardless whether truncated by Selene or not. (The name doesn't really reflect the meaning in this case.) */ | 168 | * FIFO underrun, or TRD FIFO underrun */ |
169 | u64 tx_trunc; /* num tx packets truncated due to size | ||
170 | * exceeding MTU, regardless whether truncated | ||
171 | * by the chip or not. (The name doesn't really | ||
172 | * reflect the meaning in this case.) */ | ||
153 | u64 rx_pause; /* num Pause packets received. */ | 173 | u64 rx_pause; /* num Pause packets received. */ |
154 | u64 rx_rrd_ov; | 174 | u64 rx_rrd_ov; |
155 | u64 rx_trunc; | 175 | u64 rx_trunc; |
156 | }; | 176 | }; |
157 | 177 | ||
158 | /* board specific private data structure */ | 178 | /* hardware structure */ |
159 | #define ATL1_REGS_LEN 8 | ||
160 | |||
161 | /* Structure containing variables used by the shared code */ | ||
162 | struct atl1_hw { | 179 | struct atl1_hw { |
163 | u8 __iomem *hw_addr; | 180 | u8 __iomem *hw_addr; |
164 | struct atl1_adapter *back; | 181 | struct atl1_adapter *back; |
@@ -167,24 +184,35 @@ struct atl1_hw { | |||
167 | enum atl1_dma_req_block dmar_block; | 184 | enum atl1_dma_req_block dmar_block; |
168 | enum atl1_dma_req_block dmaw_block; | 185 | enum atl1_dma_req_block dmaw_block; |
169 | u8 preamble_len; | 186 | u8 preamble_len; |
170 | u8 max_retry; /* Retransmission maximum, after which the packet will be discarded */ | 187 | u8 max_retry; /* Retransmission maximum, after which the |
171 | u8 jam_ipg; /* IPG to start JAM for collision based flow control in half-duplex mode. In units of 8-bit time */ | 188 | * packet will be discarded */ |
172 | u8 ipgt; /* Desired back to back inter-packet gap. The default is 96-bit time */ | 189 | u8 jam_ipg; /* IPG to start JAM for collision based flow |
173 | u8 min_ifg; /* Minimum number of IFG to enforce in between RX frames. Frame gap below such IFP is dropped */ | 190 | * control in half-duplex mode. In units of |
191 | * 8-bit time */ | ||
192 | u8 ipgt; /* Desired back to back inter-packet gap. | ||
193 | * The default is 96-bit time */ | ||
194 | u8 min_ifg; /* Minimum number of IFG to enforce in between | ||
195 | * receive frames. Frame gap below such IFP | ||
196 | * is dropped */ | ||
174 | u8 ipgr1; /* 64bit Carrier-Sense window */ | 197 | u8 ipgr1; /* 64bit Carrier-Sense window */ |
175 | u8 ipgr2; /* 96-bit IPG window */ | 198 | u8 ipgr2; /* 96-bit IPG window */ |
176 | u8 tpd_burst; /* Number of TPD to prefetch in cache-aligned burst. Each TPD is 16 bytes long */ | 199 | u8 tpd_burst; /* Number of TPD to prefetch in cache-aligned |
177 | u8 rfd_burst; /* Number of RFD to prefetch in cache-aligned burst. Each RFD is 12 bytes long */ | 200 | * burst. Each TPD is 16 bytes long */ |
201 | u8 rfd_burst; /* Number of RFD to prefetch in cache-aligned | ||
202 | * burst. Each RFD is 12 bytes long */ | ||
178 | u8 rfd_fetch_gap; | 203 | u8 rfd_fetch_gap; |
179 | u8 rrd_burst; /* Threshold number of RRDs that can be retired in a burst. Each RRD is 16 bytes long */ | 204 | u8 rrd_burst; /* Threshold number of RRDs that can be retired |
205 | * in a burst. Each RRD is 16 bytes long */ | ||
180 | u8 tpd_fetch_th; | 206 | u8 tpd_fetch_th; |
181 | u8 tpd_fetch_gap; | 207 | u8 tpd_fetch_gap; |
182 | u16 tx_jumbo_task_th; | 208 | u16 tx_jumbo_task_th; |
183 | u16 txf_burst; /* Number of data bytes to read in a cache-aligned burst. Each SRAM entry is | 209 | u16 txf_burst; /* Number of data bytes to read in a cache- |
184 | 8 bytes long */ | 210 | * aligned burst. Each SRAM entry is 8 bytes */ |
185 | u16 rx_jumbo_th; /* Jumbo packet size for non-VLAN packet. VLAN packets should add 4 bytes */ | 211 | u16 rx_jumbo_th; /* Jumbo packet size for non-VLAN packet. VLAN |
212 | * packets should add 4 bytes */ | ||
186 | u16 rx_jumbo_lkah; | 213 | u16 rx_jumbo_lkah; |
187 | u16 rrd_ret_timer; /* RRD retirement timer. Decrement by 1 after every 512ns passes. */ | 214 | u16 rrd_ret_timer; /* RRD retirement timer. Decrement by 1 after |
215 | * every 512ns passes. */ | ||
188 | u16 lcol; /* Collision Window */ | 216 | u16 lcol; /* Collision Window */ |
189 | 217 | ||
190 | u16 cmb_tpd; | 218 | u16 cmb_tpd; |
@@ -194,48 +222,35 @@ struct atl1_hw { | |||
194 | u32 smb_timer; | 222 | u32 smb_timer; |
195 | u16 media_type; | 223 | u16 media_type; |
196 | u16 autoneg_advertised; | 224 | u16 autoneg_advertised; |
197 | u16 pci_cmd_word; | ||
198 | 225 | ||
199 | u16 mii_autoneg_adv_reg; | 226 | u16 mii_autoneg_adv_reg; |
200 | u16 mii_1000t_ctrl_reg; | 227 | u16 mii_1000t_ctrl_reg; |
201 | 228 | ||
202 | u32 mem_rang; | ||
203 | u32 txcw; | ||
204 | u32 max_frame_size; | 229 | u32 max_frame_size; |
205 | u32 min_frame_size; | 230 | u32 min_frame_size; |
206 | u32 mc_filter_type; | ||
207 | u32 num_mc_addrs; | ||
208 | u32 collision_delta; | ||
209 | u32 tx_packet_delta; | ||
210 | u16 phy_spd_default; | ||
211 | 231 | ||
212 | u16 dev_rev; | 232 | u16 dev_rev; |
213 | 233 | ||
214 | /* spi flash */ | 234 | /* spi flash */ |
215 | u8 flash_vendor; | 235 | u8 flash_vendor; |
216 | 236 | ||
217 | u8 dma_fairness; | ||
218 | u8 mac_addr[ETH_ALEN]; | 237 | u8 mac_addr[ETH_ALEN]; |
219 | u8 perm_mac_addr[ETH_ALEN]; | 238 | u8 perm_mac_addr[ETH_ALEN]; |
220 | 239 | ||
221 | /* bool phy_preamble_sup; */ | ||
222 | bool phy_configured; | 240 | bool phy_configured; |
223 | }; | 241 | }; |
224 | 242 | ||
225 | struct atl1_adapter { | 243 | struct atl1_adapter { |
226 | /* OS defined structs */ | ||
227 | struct net_device *netdev; | 244 | struct net_device *netdev; |
228 | struct pci_dev *pdev; | 245 | struct pci_dev *pdev; |
229 | struct net_device_stats net_stats; | 246 | struct net_device_stats net_stats; |
230 | struct atl1_sft_stats soft_stats; | 247 | struct atl1_sft_stats soft_stats; |
231 | |||
232 | struct vlan_group *vlgrp; | 248 | struct vlan_group *vlgrp; |
233 | u32 rx_buffer_len; | 249 | u32 rx_buffer_len; |
234 | u32 wol; | 250 | u32 wol; |
235 | u16 link_speed; | 251 | u16 link_speed; |
236 | u16 link_duplex; | 252 | u16 link_duplex; |
237 | spinlock_t lock; | 253 | spinlock_t lock; |
238 | atomic_t irq_sem; | ||
239 | struct work_struct tx_timeout_task; | 254 | struct work_struct tx_timeout_task; |
240 | struct work_struct link_chg_task; | 255 | struct work_struct link_chg_task; |
241 | struct work_struct pcie_dma_to_rst_task; | 256 | struct work_struct pcie_dma_to_rst_task; |
@@ -243,9 +258,7 @@ struct atl1_adapter { | |||
243 | struct timer_list phy_config_timer; | 258 | struct timer_list phy_config_timer; |
244 | bool phy_timer_pending; | 259 | bool phy_timer_pending; |
245 | 260 | ||
246 | bool mac_disabled; | 261 | /* all descriptor rings' memory */ |
247 | |||
248 | /* All descriptor rings' memory */ | ||
249 | struct atl1_ring_header ring_header; | 262 | struct atl1_ring_header ring_header; |
250 | 263 | ||
251 | /* TX */ | 264 | /* TX */ |
@@ -258,25 +271,16 @@ struct atl1_adapter { | |||
258 | u64 hw_csum_err; | 271 | u64 hw_csum_err; |
259 | u64 hw_csum_good; | 272 | u64 hw_csum_good; |
260 | 273 | ||
261 | u32 gorcl; | 274 | u16 imt; /* interrupt moderator timer (2us resolution */ |
262 | u64 gorcl_old; | 275 | u16 ict; /* interrupt clear timer (2us resolution */ |
263 | 276 | struct mii_if_info mii; /* MII interface info */ | |
264 | /* Interrupt Moderator timer ( 2us resolution) */ | ||
265 | u16 imt; | ||
266 | /* Interrupt Clear timer (2us resolution) */ | ||
267 | u16 ict; | ||
268 | |||
269 | /* MII interface info */ | ||
270 | struct mii_if_info mii; | ||
271 | 277 | ||
272 | /* structs defined in atl1_hw.h */ | 278 | /* structs defined in atl1_hw.h */ |
273 | u32 bd_number; /* board number */ | 279 | u32 bd_number; /* board number */ |
274 | bool pci_using_64; | 280 | bool pci_using_64; |
275 | struct atl1_hw hw; | 281 | struct atl1_hw hw; |
276 | struct atl1_smb smb; | 282 | struct atl1_smb smb; |
277 | struct atl1_cmb cmb; | 283 | struct atl1_cmb cmb; |
278 | |||
279 | u32 pci_state[16]; | ||
280 | }; | 284 | }; |
281 | 285 | ||
282 | #endif /* _ATL1_H_ */ | 286 | #endif /* _ATL1_H_ */ |
diff --git a/drivers/net/atl1/atl1_main.c b/drivers/net/atl1/atl1_main.c index 501919eb7f5e..4a18b881ae9a 100644 --- a/drivers/net/atl1/atl1_main.c +++ b/drivers/net/atl1/atl1_main.c | |||
@@ -38,7 +38,7 @@ | |||
38 | * TODO: | 38 | * TODO: |
39 | * Fix TSO; tx performance is horrible with TSO enabled. | 39 | * Fix TSO; tx performance is horrible with TSO enabled. |
40 | * Wake on LAN. | 40 | * Wake on LAN. |
41 | * Add more ethtool functions, including set ring parameters. | 41 | * Add more ethtool functions. |
42 | * Fix abstruse irq enable/disable condition described here: | 42 | * Fix abstruse irq enable/disable condition described here: |
43 | * http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2 | 43 | * http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2 |
44 | * | 44 | * |
@@ -158,13 +158,70 @@ static int __devinit atl1_sw_init(struct atl1_adapter *adapter) | |||
158 | hw->cmb_tx_timer = 1; /* about 2us */ | 158 | hw->cmb_tx_timer = 1; /* about 2us */ |
159 | hw->smb_timer = 100000; /* about 200ms */ | 159 | hw->smb_timer = 100000; /* about 200ms */ |
160 | 160 | ||
161 | atomic_set(&adapter->irq_sem, 0); | ||
162 | spin_lock_init(&adapter->lock); | 161 | spin_lock_init(&adapter->lock); |
163 | spin_lock_init(&adapter->mb_lock); | 162 | spin_lock_init(&adapter->mb_lock); |
164 | 163 | ||
165 | return 0; | 164 | return 0; |
166 | } | 165 | } |
167 | 166 | ||
167 | static int mdio_read(struct net_device *netdev, int phy_id, int reg_num) | ||
168 | { | ||
169 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
170 | u16 result; | ||
171 | |||
172 | atl1_read_phy_reg(&adapter->hw, reg_num & 0x1f, &result); | ||
173 | |||
174 | return result; | ||
175 | } | ||
176 | |||
177 | static void mdio_write(struct net_device *netdev, int phy_id, int reg_num, | ||
178 | int val) | ||
179 | { | ||
180 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
181 | |||
182 | atl1_write_phy_reg(&adapter->hw, reg_num, val); | ||
183 | } | ||
184 | |||
185 | /* | ||
186 | * atl1_mii_ioctl - | ||
187 | * @netdev: | ||
188 | * @ifreq: | ||
189 | * @cmd: | ||
190 | */ | ||
191 | static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | ||
192 | { | ||
193 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
194 | unsigned long flags; | ||
195 | int retval; | ||
196 | |||
197 | if (!netif_running(netdev)) | ||
198 | return -EINVAL; | ||
199 | |||
200 | spin_lock_irqsave(&adapter->lock, flags); | ||
201 | retval = generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL); | ||
202 | spin_unlock_irqrestore(&adapter->lock, flags); | ||
203 | |||
204 | return retval; | ||
205 | } | ||
206 | |||
207 | /* | ||
208 | * atl1_ioctl - | ||
209 | * @netdev: | ||
210 | * @ifreq: | ||
211 | * @cmd: | ||
212 | */ | ||
213 | static int atl1_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | ||
214 | { | ||
215 | switch (cmd) { | ||
216 | case SIOCGMIIPHY: | ||
217 | case SIOCGMIIREG: | ||
218 | case SIOCSMIIREG: | ||
219 | return atl1_mii_ioctl(netdev, ifr, cmd); | ||
220 | default: | ||
221 | return -EOPNOTSUPP; | ||
222 | } | ||
223 | } | ||
224 | |||
168 | /* | 225 | /* |
169 | * atl1_setup_mem_resources - allocate Tx / RX descriptor resources | 226 | * atl1_setup_mem_resources - allocate Tx / RX descriptor resources |
170 | * @adapter: board private structure | 227 | * @adapter: board private structure |
@@ -188,19 +245,22 @@ s32 atl1_setup_ring_resources(struct atl1_adapter *adapter) | |||
188 | goto err_nomem; | 245 | goto err_nomem; |
189 | } | 246 | } |
190 | rfd_ring->buffer_info = | 247 | rfd_ring->buffer_info = |
191 | (struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count); | 248 | (struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count); |
192 | 249 | ||
193 | /* real ring DMA buffer */ | 250 | /* real ring DMA buffer |
194 | ring_header->size = size = sizeof(struct tx_packet_desc) * | 251 | * each ring/block may need up to 8 bytes for alignment, hence the |
195 | tpd_ring->count | 252 | * additional 40 bytes tacked onto the end. |
196 | + sizeof(struct rx_free_desc) * rfd_ring->count | 253 | */ |
197 | + sizeof(struct rx_return_desc) * rrd_ring->count | 254 | ring_header->size = size = |
198 | + sizeof(struct coals_msg_block) | 255 | sizeof(struct tx_packet_desc) * tpd_ring->count |
199 | + sizeof(struct stats_msg_block) | 256 | + sizeof(struct rx_free_desc) * rfd_ring->count |
200 | + 40; /* "40: for 8 bytes align" huh? -- CHS */ | 257 | + sizeof(struct rx_return_desc) * rrd_ring->count |
258 | + sizeof(struct coals_msg_block) | ||
259 | + sizeof(struct stats_msg_block) | ||
260 | + 40; | ||
201 | 261 | ||
202 | ring_header->desc = pci_alloc_consistent(pdev, ring_header->size, | 262 | ring_header->desc = pci_alloc_consistent(pdev, ring_header->size, |
203 | &ring_header->dma); | 263 | &ring_header->dma); |
204 | if (unlikely(!ring_header->desc)) { | 264 | if (unlikely(!ring_header->desc)) { |
205 | dev_err(&pdev->dev, "pci_alloc_consistent failed\n"); | 265 | dev_err(&pdev->dev, "pci_alloc_consistent failed\n"); |
206 | goto err_nomem; | 266 | goto err_nomem; |
@@ -214,8 +274,6 @@ s32 atl1_setup_ring_resources(struct atl1_adapter *adapter) | |||
214 | tpd_ring->dma += offset; | 274 | tpd_ring->dma += offset; |
215 | tpd_ring->desc = (u8 *) ring_header->desc + offset; | 275 | tpd_ring->desc = (u8 *) ring_header->desc + offset; |
216 | tpd_ring->size = sizeof(struct tx_packet_desc) * tpd_ring->count; | 276 | tpd_ring->size = sizeof(struct tx_packet_desc) * tpd_ring->count; |
217 | atomic_set(&tpd_ring->next_to_use, 0); | ||
218 | atomic_set(&tpd_ring->next_to_clean, 0); | ||
219 | 277 | ||
220 | /* init RFD ring */ | 278 | /* init RFD ring */ |
221 | rfd_ring->dma = tpd_ring->dma + tpd_ring->size; | 279 | rfd_ring->dma = tpd_ring->dma + tpd_ring->size; |
@@ -223,9 +281,7 @@ s32 atl1_setup_ring_resources(struct atl1_adapter *adapter) | |||
223 | rfd_ring->dma += offset; | 281 | rfd_ring->dma += offset; |
224 | rfd_ring->desc = (u8 *) tpd_ring->desc + (tpd_ring->size + offset); | 282 | rfd_ring->desc = (u8 *) tpd_ring->desc + (tpd_ring->size + offset); |
225 | rfd_ring->size = sizeof(struct rx_free_desc) * rfd_ring->count; | 283 | rfd_ring->size = sizeof(struct rx_free_desc) * rfd_ring->count; |
226 | rfd_ring->next_to_clean = 0; | 284 | |
227 | /* rfd_ring->next_to_use = rfd_ring->count - 1; */ | ||
228 | atomic_set(&rfd_ring->next_to_use, 0); | ||
229 | 285 | ||
230 | /* init RRD ring */ | 286 | /* init RRD ring */ |
231 | rrd_ring->dma = rfd_ring->dma + rfd_ring->size; | 287 | rrd_ring->dma = rfd_ring->dma + rfd_ring->size; |
@@ -233,23 +289,22 @@ s32 atl1_setup_ring_resources(struct atl1_adapter *adapter) | |||
233 | rrd_ring->dma += offset; | 289 | rrd_ring->dma += offset; |
234 | rrd_ring->desc = (u8 *) rfd_ring->desc + (rfd_ring->size + offset); | 290 | rrd_ring->desc = (u8 *) rfd_ring->desc + (rfd_ring->size + offset); |
235 | rrd_ring->size = sizeof(struct rx_return_desc) * rrd_ring->count; | 291 | rrd_ring->size = sizeof(struct rx_return_desc) * rrd_ring->count; |
236 | rrd_ring->next_to_use = 0; | 292 | |
237 | atomic_set(&rrd_ring->next_to_clean, 0); | ||
238 | 293 | ||
239 | /* init CMB */ | 294 | /* init CMB */ |
240 | adapter->cmb.dma = rrd_ring->dma + rrd_ring->size; | 295 | adapter->cmb.dma = rrd_ring->dma + rrd_ring->size; |
241 | offset = (adapter->cmb.dma & 0x7) ? (8 - (adapter->cmb.dma & 0x7)) : 0; | 296 | offset = (adapter->cmb.dma & 0x7) ? (8 - (adapter->cmb.dma & 0x7)) : 0; |
242 | adapter->cmb.dma += offset; | 297 | adapter->cmb.dma += offset; |
243 | adapter->cmb.cmb = | 298 | adapter->cmb.cmb = (struct coals_msg_block *) |
244 | (struct coals_msg_block *) ((u8 *) rrd_ring->desc + | 299 | ((u8 *) rrd_ring->desc + (rrd_ring->size + offset)); |
245 | (rrd_ring->size + offset)); | ||
246 | 300 | ||
247 | /* init SMB */ | 301 | /* init SMB */ |
248 | adapter->smb.dma = adapter->cmb.dma + sizeof(struct coals_msg_block); | 302 | adapter->smb.dma = adapter->cmb.dma + sizeof(struct coals_msg_block); |
249 | offset = (adapter->smb.dma & 0x7) ? (8 - (adapter->smb.dma & 0x7)) : 0; | 303 | offset = (adapter->smb.dma & 0x7) ? (8 - (adapter->smb.dma & 0x7)) : 0; |
250 | adapter->smb.dma += offset; | 304 | adapter->smb.dma += offset; |
251 | adapter->smb.smb = (struct stats_msg_block *) | 305 | adapter->smb.smb = (struct stats_msg_block *) |
252 | ((u8 *) adapter->cmb.cmb + (sizeof(struct coals_msg_block) + offset)); | 306 | ((u8 *) adapter->cmb.cmb + |
307 | (sizeof(struct coals_msg_block) + offset)); | ||
253 | 308 | ||
254 | return ATL1_SUCCESS; | 309 | return ATL1_SUCCESS; |
255 | 310 | ||
@@ -258,559 +313,133 @@ err_nomem: | |||
258 | return -ENOMEM; | 313 | return -ENOMEM; |
259 | } | 314 | } |
260 | 315 | ||
261 | /* | 316 | void atl1_init_ring_ptrs(struct atl1_adapter *adapter) |
262 | * atl1_irq_enable - Enable default interrupt generation settings | ||
263 | * @adapter: board private structure | ||
264 | */ | ||
265 | static void atl1_irq_enable(struct atl1_adapter *adapter) | ||
266 | { | ||
267 | if (likely(!atomic_dec_and_test(&adapter->irq_sem))) | ||
268 | iowrite32(IMR_NORMAL_MASK, adapter->hw.hw_addr + REG_IMR); | ||
269 | } | ||
270 | |||
271 | static void atl1_clear_phy_int(struct atl1_adapter *adapter) | ||
272 | { | ||
273 | u16 phy_data; | ||
274 | unsigned long flags; | ||
275 | |||
276 | spin_lock_irqsave(&adapter->lock, flags); | ||
277 | atl1_read_phy_reg(&adapter->hw, 19, &phy_data); | ||
278 | spin_unlock_irqrestore(&adapter->lock, flags); | ||
279 | } | ||
280 | |||
281 | static void atl1_inc_smb(struct atl1_adapter *adapter) | ||
282 | { | ||
283 | struct stats_msg_block *smb = adapter->smb.smb; | ||
284 | |||
285 | /* Fill out the OS statistics structure */ | ||
286 | adapter->soft_stats.rx_packets += smb->rx_ok; | ||
287 | adapter->soft_stats.tx_packets += smb->tx_ok; | ||
288 | adapter->soft_stats.rx_bytes += smb->rx_byte_cnt; | ||
289 | adapter->soft_stats.tx_bytes += smb->tx_byte_cnt; | ||
290 | adapter->soft_stats.multicast += smb->rx_mcast; | ||
291 | adapter->soft_stats.collisions += (smb->tx_1_col + | ||
292 | smb->tx_2_col * 2 + | ||
293 | smb->tx_late_col + | ||
294 | smb->tx_abort_col * | ||
295 | adapter->hw.max_retry); | ||
296 | |||
297 | /* Rx Errors */ | ||
298 | adapter->soft_stats.rx_errors += (smb->rx_frag + | ||
299 | smb->rx_fcs_err + | ||
300 | smb->rx_len_err + | ||
301 | smb->rx_sz_ov + | ||
302 | smb->rx_rxf_ov + | ||
303 | smb->rx_rrd_ov + smb->rx_align_err); | ||
304 | adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov; | ||
305 | adapter->soft_stats.rx_length_errors += smb->rx_len_err; | ||
306 | adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err; | ||
307 | adapter->soft_stats.rx_frame_errors += smb->rx_align_err; | ||
308 | adapter->soft_stats.rx_missed_errors += (smb->rx_rrd_ov + | ||
309 | smb->rx_rxf_ov); | ||
310 | |||
311 | adapter->soft_stats.rx_pause += smb->rx_pause; | ||
312 | adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov; | ||
313 | adapter->soft_stats.rx_trunc += smb->rx_sz_ov; | ||
314 | |||
315 | /* Tx Errors */ | ||
316 | adapter->soft_stats.tx_errors += (smb->tx_late_col + | ||
317 | smb->tx_abort_col + | ||
318 | smb->tx_underrun + smb->tx_trunc); | ||
319 | adapter->soft_stats.tx_fifo_errors += smb->tx_underrun; | ||
320 | adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col; | ||
321 | adapter->soft_stats.tx_window_errors += smb->tx_late_col; | ||
322 | |||
323 | adapter->soft_stats.excecol += smb->tx_abort_col; | ||
324 | adapter->soft_stats.deffer += smb->tx_defer; | ||
325 | adapter->soft_stats.scc += smb->tx_1_col; | ||
326 | adapter->soft_stats.mcc += smb->tx_2_col; | ||
327 | adapter->soft_stats.latecol += smb->tx_late_col; | ||
328 | adapter->soft_stats.tx_underun += smb->tx_underrun; | ||
329 | adapter->soft_stats.tx_trunc += smb->tx_trunc; | ||
330 | adapter->soft_stats.tx_pause += smb->tx_pause; | ||
331 | |||
332 | adapter->net_stats.rx_packets = adapter->soft_stats.rx_packets; | ||
333 | adapter->net_stats.tx_packets = adapter->soft_stats.tx_packets; | ||
334 | adapter->net_stats.rx_bytes = adapter->soft_stats.rx_bytes; | ||
335 | adapter->net_stats.tx_bytes = adapter->soft_stats.tx_bytes; | ||
336 | adapter->net_stats.multicast = adapter->soft_stats.multicast; | ||
337 | adapter->net_stats.collisions = adapter->soft_stats.collisions; | ||
338 | adapter->net_stats.rx_errors = adapter->soft_stats.rx_errors; | ||
339 | adapter->net_stats.rx_over_errors = | ||
340 | adapter->soft_stats.rx_missed_errors; | ||
341 | adapter->net_stats.rx_length_errors = | ||
342 | adapter->soft_stats.rx_length_errors; | ||
343 | adapter->net_stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors; | ||
344 | adapter->net_stats.rx_frame_errors = | ||
345 | adapter->soft_stats.rx_frame_errors; | ||
346 | adapter->net_stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors; | ||
347 | adapter->net_stats.rx_missed_errors = | ||
348 | adapter->soft_stats.rx_missed_errors; | ||
349 | adapter->net_stats.tx_errors = adapter->soft_stats.tx_errors; | ||
350 | adapter->net_stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors; | ||
351 | adapter->net_stats.tx_aborted_errors = | ||
352 | adapter->soft_stats.tx_aborted_errors; | ||
353 | adapter->net_stats.tx_window_errors = | ||
354 | adapter->soft_stats.tx_window_errors; | ||
355 | adapter->net_stats.tx_carrier_errors = | ||
356 | adapter->soft_stats.tx_carrier_errors; | ||
357 | } | ||
358 | |||
359 | static void atl1_rx_checksum(struct atl1_adapter *adapter, | ||
360 | struct rx_return_desc *rrd, | ||
361 | struct sk_buff *skb) | ||
362 | { | 317 | { |
363 | skb->ip_summed = CHECKSUM_NONE; | 318 | struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; |
364 | 319 | struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; | |
365 | if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { | 320 | struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; |
366 | if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC | | ||
367 | ERR_FLAG_CODE | ERR_FLAG_OV)) { | ||
368 | adapter->hw_csum_err++; | ||
369 | dev_dbg(&adapter->pdev->dev, "rx checksum error\n"); | ||
370 | return; | ||
371 | } | ||
372 | } | ||
373 | 321 | ||
374 | /* not IPv4 */ | 322 | atomic_set(&tpd_ring->next_to_use, 0); |
375 | if (!(rrd->pkt_flg & PACKET_FLAG_IPV4)) | 323 | atomic_set(&tpd_ring->next_to_clean, 0); |
376 | /* checksum is invalid, but it's not an IPv4 pkt, so ok */ | ||
377 | return; | ||
378 | 324 | ||
379 | /* IPv4 packet */ | 325 | rfd_ring->next_to_clean = 0; |
380 | if (likely(!(rrd->err_flg & | 326 | atomic_set(&rfd_ring->next_to_use, 0); |
381 | (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM)))) { | ||
382 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
383 | adapter->hw_csum_good++; | ||
384 | return; | ||
385 | } | ||
386 | 327 | ||
387 | /* IPv4, but hardware thinks its checksum is wrong */ | 328 | rrd_ring->next_to_use = 0; |
388 | dev_dbg(&adapter->pdev->dev, | 329 | atomic_set(&rrd_ring->next_to_clean, 0); |
389 | "hw csum wrong, pkt_flag:%x, err_flag:%x\n", | ||
390 | rrd->pkt_flg, rrd->err_flg); | ||
391 | skb->ip_summed = CHECKSUM_COMPLETE; | ||
392 | skb->csum = htons(rrd->xsz.xsum_sz.rx_chksum); | ||
393 | adapter->hw_csum_err++; | ||
394 | return; | ||
395 | } | 330 | } |
396 | 331 | ||
397 | /* | 332 | /* |
398 | * atl1_alloc_rx_buffers - Replace used receive buffers | 333 | * atl1_clean_rx_ring - Free RFD Buffers |
399 | * @adapter: address of board private structure | 334 | * @adapter: board private structure |
400 | */ | 335 | */ |
401 | static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter) | 336 | static void atl1_clean_rx_ring(struct atl1_adapter *adapter) |
402 | { | ||
403 | struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; | ||
404 | struct pci_dev *pdev = adapter->pdev; | ||
405 | struct page *page; | ||
406 | unsigned long offset; | ||
407 | struct atl1_buffer *buffer_info, *next_info; | ||
408 | struct sk_buff *skb; | ||
409 | u16 num_alloc = 0; | ||
410 | u16 rfd_next_to_use, next_next; | ||
411 | struct rx_free_desc *rfd_desc; | ||
412 | |||
413 | next_next = rfd_next_to_use = atomic_read(&rfd_ring->next_to_use); | ||
414 | if (++next_next == rfd_ring->count) | ||
415 | next_next = 0; | ||
416 | buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; | ||
417 | next_info = &rfd_ring->buffer_info[next_next]; | ||
418 | |||
419 | while (!buffer_info->alloced && !next_info->alloced) { | ||
420 | if (buffer_info->skb) { | ||
421 | buffer_info->alloced = 1; | ||
422 | goto next; | ||
423 | } | ||
424 | |||
425 | rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use); | ||
426 | |||
427 | skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN); | ||
428 | if (unlikely(!skb)) { /* Better luck next round */ | ||
429 | adapter->net_stats.rx_dropped++; | ||
430 | break; | ||
431 | } | ||
432 | |||
433 | /* | ||
434 | * Make buffer alignment 2 beyond a 16 byte boundary | ||
435 | * this will result in a 16 byte aligned IP header after | ||
436 | * the 14 byte MAC header is removed | ||
437 | */ | ||
438 | skb_reserve(skb, NET_IP_ALIGN); | ||
439 | |||
440 | buffer_info->alloced = 1; | ||
441 | buffer_info->skb = skb; | ||
442 | buffer_info->length = (u16) adapter->rx_buffer_len; | ||
443 | page = virt_to_page(skb->data); | ||
444 | offset = (unsigned long)skb->data & ~PAGE_MASK; | ||
445 | buffer_info->dma = pci_map_page(pdev, page, offset, | ||
446 | adapter->rx_buffer_len, | ||
447 | PCI_DMA_FROMDEVICE); | ||
448 | rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma); | ||
449 | rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len); | ||
450 | rfd_desc->coalese = 0; | ||
451 | |||
452 | next: | ||
453 | rfd_next_to_use = next_next; | ||
454 | if (unlikely(++next_next == rfd_ring->count)) | ||
455 | next_next = 0; | ||
456 | |||
457 | buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; | ||
458 | next_info = &rfd_ring->buffer_info[next_next]; | ||
459 | num_alloc++; | ||
460 | } | ||
461 | |||
462 | if (num_alloc) { | ||
463 | /* | ||
464 | * Force memory writes to complete before letting h/w | ||
465 | * know there are new descriptors to fetch. (Only | ||
466 | * applicable for weak-ordered memory model archs, | ||
467 | * such as IA-64). | ||
468 | */ | ||
469 | wmb(); | ||
470 | atomic_set(&rfd_ring->next_to_use, (int)rfd_next_to_use); | ||
471 | } | ||
472 | return num_alloc; | ||
473 | } | ||
474 | |||
475 | static void atl1_intr_rx(struct atl1_adapter *adapter) | ||
476 | { | 337 | { |
477 | int i, count; | ||
478 | u16 length; | ||
479 | u16 rrd_next_to_clean; | ||
480 | u32 value; | ||
481 | struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; | 338 | struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; |
482 | struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; | 339 | struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; |
483 | struct atl1_buffer *buffer_info; | 340 | struct atl1_buffer *buffer_info; |
484 | struct rx_return_desc *rrd; | 341 | struct pci_dev *pdev = adapter->pdev; |
485 | struct sk_buff *skb; | 342 | unsigned long size; |
486 | 343 | unsigned int i; | |
487 | count = 0; | ||
488 | |||
489 | rrd_next_to_clean = atomic_read(&rrd_ring->next_to_clean); | ||
490 | |||
491 | while (1) { | ||
492 | rrd = ATL1_RRD_DESC(rrd_ring, rrd_next_to_clean); | ||
493 | i = 1; | ||
494 | if (likely(rrd->xsz.valid)) { /* packet valid */ | ||
495 | chk_rrd: | ||
496 | /* check rrd status */ | ||
497 | if (likely(rrd->num_buf == 1)) | ||
498 | goto rrd_ok; | ||
499 | |||
500 | /* rrd seems to be bad */ | ||
501 | if (unlikely(i-- > 0)) { | ||
502 | /* rrd may not be DMAed completely */ | ||
503 | dev_dbg(&adapter->pdev->dev, | ||
504 | "incomplete RRD DMA transfer\n"); | ||
505 | udelay(1); | ||
506 | goto chk_rrd; | ||
507 | } | ||
508 | /* bad rrd */ | ||
509 | dev_dbg(&adapter->pdev->dev, "bad RRD\n"); | ||
510 | /* see if update RFD index */ | ||
511 | if (rrd->num_buf > 1) { | ||
512 | u16 num_buf; | ||
513 | num_buf = | ||
514 | (rrd->xsz.xsum_sz.pkt_size + | ||
515 | adapter->rx_buffer_len - | ||
516 | 1) / adapter->rx_buffer_len; | ||
517 | if (rrd->num_buf == num_buf) { | ||
518 | /* clean alloc flag for bad rrd */ | ||
519 | while (rfd_ring->next_to_clean != | ||
520 | (rrd->buf_indx + num_buf)) { | ||
521 | rfd_ring->buffer_info[rfd_ring-> | ||
522 | next_to_clean].alloced = 0; | ||
523 | if (++rfd_ring->next_to_clean == | ||
524 | rfd_ring->count) { | ||
525 | rfd_ring-> | ||
526 | next_to_clean = 0; | ||
527 | } | ||
528 | } | ||
529 | } | ||
530 | } | ||
531 | |||
532 | /* update rrd */ | ||
533 | rrd->xsz.valid = 0; | ||
534 | if (++rrd_next_to_clean == rrd_ring->count) | ||
535 | rrd_next_to_clean = 0; | ||
536 | count++; | ||
537 | continue; | ||
538 | } else { /* current rrd still not be updated */ | ||
539 | 344 | ||
540 | break; | 345 | /* Free all the Rx ring sk_buffs */ |
541 | } | 346 | for (i = 0; i < rfd_ring->count; i++) { |
542 | rrd_ok: | 347 | buffer_info = &rfd_ring->buffer_info[i]; |
543 | /* clean alloc flag for bad rrd */ | 348 | if (buffer_info->dma) { |
544 | while (rfd_ring->next_to_clean != rrd->buf_indx) { | 349 | pci_unmap_page(pdev, buffer_info->dma, |
545 | rfd_ring->buffer_info[rfd_ring->next_to_clean].alloced = | 350 | buffer_info->length, PCI_DMA_FROMDEVICE); |
546 | 0; | 351 | buffer_info->dma = 0; |
547 | if (++rfd_ring->next_to_clean == rfd_ring->count) | ||
548 | rfd_ring->next_to_clean = 0; | ||
549 | } | 352 | } |
550 | 353 | if (buffer_info->skb) { | |
551 | buffer_info = &rfd_ring->buffer_info[rrd->buf_indx]; | 354 | dev_kfree_skb(buffer_info->skb); |
552 | if (++rfd_ring->next_to_clean == rfd_ring->count) | 355 | buffer_info->skb = NULL; |
553 | rfd_ring->next_to_clean = 0; | ||
554 | |||
555 | /* update rrd next to clean */ | ||
556 | if (++rrd_next_to_clean == rrd_ring->count) | ||
557 | rrd_next_to_clean = 0; | ||
558 | count++; | ||
559 | |||
560 | if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { | ||
561 | if (!(rrd->err_flg & | ||
562 | (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM | ||
563 | | ERR_FLAG_LEN))) { | ||
564 | /* packet error, don't need upstream */ | ||
565 | buffer_info->alloced = 0; | ||
566 | rrd->xsz.valid = 0; | ||
567 | continue; | ||
568 | } | ||
569 | } | 356 | } |
570 | |||
571 | /* Good Receive */ | ||
572 | pci_unmap_page(adapter->pdev, buffer_info->dma, | ||
573 | buffer_info->length, PCI_DMA_FROMDEVICE); | ||
574 | skb = buffer_info->skb; | ||
575 | length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size); | ||
576 | |||
577 | skb_put(skb, length - ETHERNET_FCS_SIZE); | ||
578 | |||
579 | /* Receive Checksum Offload */ | ||
580 | atl1_rx_checksum(adapter, rrd, skb); | ||
581 | skb->protocol = eth_type_trans(skb, adapter->netdev); | ||
582 | |||
583 | if (adapter->vlgrp && (rrd->pkt_flg & PACKET_FLAG_VLAN_INS)) { | ||
584 | u16 vlan_tag = (rrd->vlan_tag >> 4) | | ||
585 | ((rrd->vlan_tag & 7) << 13) | | ||
586 | ((rrd->vlan_tag & 8) << 9); | ||
587 | vlan_hwaccel_rx(skb, adapter->vlgrp, vlan_tag); | ||
588 | } else | ||
589 | netif_rx(skb); | ||
590 | |||
591 | /* let protocol layer free skb */ | ||
592 | buffer_info->skb = NULL; | ||
593 | buffer_info->alloced = 0; | ||
594 | rrd->xsz.valid = 0; | ||
595 | |||
596 | adapter->netdev->last_rx = jiffies; | ||
597 | } | 357 | } |
598 | 358 | ||
599 | atomic_set(&rrd_ring->next_to_clean, rrd_next_to_clean); | 359 | size = sizeof(struct atl1_buffer) * rfd_ring->count; |
600 | 360 | memset(rfd_ring->buffer_info, 0, size); | |
601 | atl1_alloc_rx_buffers(adapter); | ||
602 | 361 | ||
603 | /* update mailbox ? */ | 362 | /* Zero out the descriptor ring */ |
604 | if (count) { | 363 | memset(rfd_ring->desc, 0, rfd_ring->size); |
605 | u32 tpd_next_to_use; | ||
606 | u32 rfd_next_to_use; | ||
607 | u32 rrd_next_to_clean; | ||
608 | 364 | ||
609 | spin_lock(&adapter->mb_lock); | 365 | rfd_ring->next_to_clean = 0; |
366 | atomic_set(&rfd_ring->next_to_use, 0); | ||
610 | 367 | ||
611 | tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use); | 368 | rrd_ring->next_to_use = 0; |
612 | rfd_next_to_use = | 369 | atomic_set(&rrd_ring->next_to_clean, 0); |
613 | atomic_read(&adapter->rfd_ring.next_to_use); | ||
614 | rrd_next_to_clean = | ||
615 | atomic_read(&adapter->rrd_ring.next_to_clean); | ||
616 | value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) << | ||
617 | MB_RFD_PROD_INDX_SHIFT) | | ||
618 | ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) << | ||
619 | MB_RRD_CONS_INDX_SHIFT) | | ||
620 | ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) << | ||
621 | MB_TPD_PROD_INDX_SHIFT); | ||
622 | iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX); | ||
623 | spin_unlock(&adapter->mb_lock); | ||
624 | } | ||
625 | } | 370 | } |
626 | 371 | ||
627 | static void atl1_intr_tx(struct atl1_adapter *adapter) | 372 | /* |
373 | * atl1_clean_tx_ring - Free Tx Buffers | ||
374 | * @adapter: board private structure | ||
375 | */ | ||
376 | static void atl1_clean_tx_ring(struct atl1_adapter *adapter) | ||
628 | { | 377 | { |
629 | struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; | 378 | struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; |
630 | struct atl1_buffer *buffer_info; | 379 | struct atl1_buffer *buffer_info; |
631 | u16 sw_tpd_next_to_clean; | 380 | struct pci_dev *pdev = adapter->pdev; |
632 | u16 cmb_tpd_next_to_clean; | 381 | unsigned long size; |
633 | 382 | unsigned int i; | |
634 | sw_tpd_next_to_clean = atomic_read(&tpd_ring->next_to_clean); | ||
635 | cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx); | ||
636 | |||
637 | while (cmb_tpd_next_to_clean != sw_tpd_next_to_clean) { | ||
638 | struct tx_packet_desc *tpd; | ||
639 | 383 | ||
640 | tpd = ATL1_TPD_DESC(tpd_ring, sw_tpd_next_to_clean); | 384 | /* Free all the Tx ring sk_buffs */ |
641 | buffer_info = &tpd_ring->buffer_info[sw_tpd_next_to_clean]; | 385 | for (i = 0; i < tpd_ring->count; i++) { |
386 | buffer_info = &tpd_ring->buffer_info[i]; | ||
642 | if (buffer_info->dma) { | 387 | if (buffer_info->dma) { |
643 | pci_unmap_page(adapter->pdev, buffer_info->dma, | 388 | pci_unmap_page(pdev, buffer_info->dma, |
644 | buffer_info->length, PCI_DMA_TODEVICE); | 389 | buffer_info->length, PCI_DMA_TODEVICE); |
645 | buffer_info->dma = 0; | 390 | buffer_info->dma = 0; |
646 | } | 391 | } |
392 | } | ||
647 | 393 | ||
394 | for (i = 0; i < tpd_ring->count; i++) { | ||
395 | buffer_info = &tpd_ring->buffer_info[i]; | ||
648 | if (buffer_info->skb) { | 396 | if (buffer_info->skb) { |
649 | dev_kfree_skb_irq(buffer_info->skb); | 397 | dev_kfree_skb_any(buffer_info->skb); |
650 | buffer_info->skb = NULL; | 398 | buffer_info->skb = NULL; |
651 | } | 399 | } |
652 | tpd->buffer_addr = 0; | ||
653 | tpd->desc.data = 0; | ||
654 | |||
655 | if (++sw_tpd_next_to_clean == tpd_ring->count) | ||
656 | sw_tpd_next_to_clean = 0; | ||
657 | } | 400 | } |
658 | atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean); | ||
659 | |||
660 | if (netif_queue_stopped(adapter->netdev) | ||
661 | && netif_carrier_ok(adapter->netdev)) | ||
662 | netif_wake_queue(adapter->netdev); | ||
663 | } | ||
664 | 401 | ||
665 | static void atl1_check_for_link(struct atl1_adapter *adapter) | 402 | size = sizeof(struct atl1_buffer) * tpd_ring->count; |
666 | { | 403 | memset(tpd_ring->buffer_info, 0, size); |
667 | struct net_device *netdev = adapter->netdev; | ||
668 | u16 phy_data = 0; | ||
669 | |||
670 | spin_lock(&adapter->lock); | ||
671 | adapter->phy_timer_pending = false; | ||
672 | atl1_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); | ||
673 | atl1_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); | ||
674 | spin_unlock(&adapter->lock); | ||
675 | |||
676 | /* notify upper layer link down ASAP */ | ||
677 | if (!(phy_data & BMSR_LSTATUS)) { /* Link Down */ | ||
678 | if (netif_carrier_ok(netdev)) { /* old link state: Up */ | ||
679 | dev_info(&adapter->pdev->dev, "%s link is down\n", | ||
680 | netdev->name); | ||
681 | adapter->link_speed = SPEED_0; | ||
682 | netif_carrier_off(netdev); | ||
683 | netif_stop_queue(netdev); | ||
684 | } | ||
685 | } | ||
686 | schedule_work(&adapter->link_chg_task); | ||
687 | } | ||
688 | |||
689 | /* | ||
690 | * atl1_intr - Interrupt Handler | ||
691 | * @irq: interrupt number | ||
692 | * @data: pointer to a network interface device structure | ||
693 | * @pt_regs: CPU registers structure | ||
694 | */ | ||
695 | static irqreturn_t atl1_intr(int irq, void *data) | ||
696 | { | ||
697 | /*struct atl1_adapter *adapter = ((struct net_device *)data)->priv;*/ | ||
698 | struct atl1_adapter *adapter = netdev_priv(data); | ||
699 | u32 status; | ||
700 | u8 update_rx; | ||
701 | int max_ints = 10; | ||
702 | |||
703 | status = adapter->cmb.cmb->int_stats; | ||
704 | if (!status) | ||
705 | return IRQ_NONE; | ||
706 | |||
707 | update_rx = 0; | ||
708 | |||
709 | do { | ||
710 | /* clear CMB interrupt status at once */ | ||
711 | adapter->cmb.cmb->int_stats = 0; | ||
712 | |||
713 | if (status & ISR_GPHY) /* clear phy status */ | ||
714 | atl1_clear_phy_int(adapter); | ||
715 | |||
716 | /* clear ISR status, and Enable CMB DMA/Disable Interrupt */ | ||
717 | iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR); | ||
718 | |||
719 | /* check if SMB intr */ | ||
720 | if (status & ISR_SMB) | ||
721 | atl1_inc_smb(adapter); | ||
722 | |||
723 | /* check if PCIE PHY Link down */ | ||
724 | if (status & ISR_PHY_LINKDOWN) { | ||
725 | dev_dbg(&adapter->pdev->dev, "pcie phy link down %x\n", | ||
726 | status); | ||
727 | if (netif_running(adapter->netdev)) { /* reset MAC */ | ||
728 | iowrite32(0, adapter->hw.hw_addr + REG_IMR); | ||
729 | schedule_work(&adapter->pcie_dma_to_rst_task); | ||
730 | return IRQ_HANDLED; | ||
731 | } | ||
732 | } | ||
733 | |||
734 | /* check if DMA read/write error ? */ | ||
735 | if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) { | ||
736 | dev_dbg(&adapter->pdev->dev, | ||
737 | "pcie DMA r/w error (status = 0x%x)\n", | ||
738 | status); | ||
739 | iowrite32(0, adapter->hw.hw_addr + REG_IMR); | ||
740 | schedule_work(&adapter->pcie_dma_to_rst_task); | ||
741 | return IRQ_HANDLED; | ||
742 | } | ||
743 | |||
744 | /* link event */ | ||
745 | if (status & ISR_GPHY) { | ||
746 | adapter->soft_stats.tx_carrier_errors++; | ||
747 | atl1_check_for_link(adapter); | ||
748 | } | ||
749 | |||
750 | /* transmit event */ | ||
751 | if (status & ISR_CMB_TX) | ||
752 | atl1_intr_tx(adapter); | ||
753 | |||
754 | /* rx exception */ | ||
755 | if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN | | ||
756 | ISR_RRD_OV | ISR_HOST_RFD_UNRUN | | ||
757 | ISR_HOST_RRD_OV | ISR_CMB_RX))) { | ||
758 | if (status & (ISR_RXF_OV | ISR_RFD_UNRUN | | ||
759 | ISR_RRD_OV | ISR_HOST_RFD_UNRUN | | ||
760 | ISR_HOST_RRD_OV)) | ||
761 | dev_dbg(&adapter->pdev->dev, | ||
762 | "rx exception, ISR = 0x%x\n", status); | ||
763 | atl1_intr_rx(adapter); | ||
764 | } | ||
765 | |||
766 | if (--max_ints < 0) | ||
767 | break; | ||
768 | 404 | ||
769 | } while ((status = adapter->cmb.cmb->int_stats)); | 405 | /* Zero out the descriptor ring */ |
406 | memset(tpd_ring->desc, 0, tpd_ring->size); | ||
770 | 407 | ||
771 | /* re-enable Interrupt */ | 408 | atomic_set(&tpd_ring->next_to_use, 0); |
772 | iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR); | 409 | atomic_set(&tpd_ring->next_to_clean, 0); |
773 | return IRQ_HANDLED; | ||
774 | } | 410 | } |
775 | 411 | ||
776 | /* | 412 | /* |
777 | * atl1_set_multi - Multicast and Promiscuous mode set | 413 | * atl1_free_ring_resources - Free Tx / RX descriptor Resources |
778 | * @netdev: network interface device structure | 414 | * @adapter: board private structure |
779 | * | 415 | * |
780 | * The set_multi entry point is called whenever the multicast address | 416 | * Free all transmit software resources |
781 | * list or the network interface flags are updated. This routine is | ||
782 | * responsible for configuring the hardware for proper multicast, | ||
783 | * promiscuous mode, and all-multi behavior. | ||
784 | */ | 417 | */ |
785 | static void atl1_set_multi(struct net_device *netdev) | 418 | void atl1_free_ring_resources(struct atl1_adapter *adapter) |
786 | { | 419 | { |
787 | struct atl1_adapter *adapter = netdev_priv(netdev); | 420 | struct pci_dev *pdev = adapter->pdev; |
788 | struct atl1_hw *hw = &adapter->hw; | 421 | struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; |
789 | struct dev_mc_list *mc_ptr; | 422 | struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; |
790 | u32 rctl; | 423 | struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; |
791 | u32 hash_value; | 424 | struct atl1_ring_header *ring_header = &adapter->ring_header; |
792 | 425 | ||
793 | /* Check for Promiscuous and All Multicast modes */ | 426 | atl1_clean_tx_ring(adapter); |
794 | rctl = ioread32(hw->hw_addr + REG_MAC_CTRL); | 427 | atl1_clean_rx_ring(adapter); |
795 | if (netdev->flags & IFF_PROMISC) | ||
796 | rctl |= MAC_CTRL_PROMIS_EN; | ||
797 | else if (netdev->flags & IFF_ALLMULTI) { | ||
798 | rctl |= MAC_CTRL_MC_ALL_EN; | ||
799 | rctl &= ~MAC_CTRL_PROMIS_EN; | ||
800 | } else | ||
801 | rctl &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN); | ||
802 | 428 | ||
803 | iowrite32(rctl, hw->hw_addr + REG_MAC_CTRL); | 429 | kfree(tpd_ring->buffer_info); |
430 | pci_free_consistent(pdev, ring_header->size, ring_header->desc, | ||
431 | ring_header->dma); | ||
804 | 432 | ||
805 | /* clear the old settings from the multicast hash table */ | 433 | tpd_ring->buffer_info = NULL; |
806 | iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE); | 434 | tpd_ring->desc = NULL; |
807 | iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2)); | 435 | tpd_ring->dma = 0; |
808 | 436 | ||
809 | /* compute mc addresses' hash value ,and put it into hash table */ | 437 | rfd_ring->buffer_info = NULL; |
810 | for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) { | 438 | rfd_ring->desc = NULL; |
811 | hash_value = atl1_hash_mc_addr(hw, mc_ptr->dmi_addr); | 439 | rfd_ring->dma = 0; |
812 | atl1_hash_set(hw, hash_value); | 440 | |
813 | } | 441 | rrd_ring->desc = NULL; |
442 | rrd_ring->dma = 0; | ||
814 | } | 443 | } |
815 | 444 | ||
816 | static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter) | 445 | static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter) |
@@ -851,6 +480,31 @@ static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter) | |||
851 | iowrite32(value, hw->hw_addr + REG_MAC_CTRL); | 480 | iowrite32(value, hw->hw_addr + REG_MAC_CTRL); |
852 | } | 481 | } |
853 | 482 | ||
483 | /* | ||
484 | * atl1_set_mac - Change the Ethernet Address of the NIC | ||
485 | * @netdev: network interface device structure | ||
486 | * @p: pointer to an address structure | ||
487 | * | ||
488 | * Returns 0 on success, negative on failure | ||
489 | */ | ||
490 | static int atl1_set_mac(struct net_device *netdev, void *p) | ||
491 | { | ||
492 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
493 | struct sockaddr *addr = p; | ||
494 | |||
495 | if (netif_running(netdev)) | ||
496 | return -EBUSY; | ||
497 | |||
498 | if (!is_valid_ether_addr(addr->sa_data)) | ||
499 | return -EADDRNOTAVAIL; | ||
500 | |||
501 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | ||
502 | memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); | ||
503 | |||
504 | atl1_set_mac_addr(&adapter->hw); | ||
505 | return 0; | ||
506 | } | ||
507 | |||
854 | static u32 atl1_check_link(struct atl1_adapter *adapter) | 508 | static u32 atl1_check_link(struct atl1_adapter *adapter) |
855 | { | 509 | { |
856 | struct atl1_hw *hw = &adapter->hw; | 510 | struct atl1_hw *hw = &adapter->hw; |
@@ -958,6 +612,103 @@ static u32 atl1_check_link(struct atl1_adapter *adapter) | |||
958 | return ATL1_SUCCESS; | 612 | return ATL1_SUCCESS; |
959 | } | 613 | } |
960 | 614 | ||
615 | static void atl1_check_for_link(struct atl1_adapter *adapter) | ||
616 | { | ||
617 | struct net_device *netdev = adapter->netdev; | ||
618 | u16 phy_data = 0; | ||
619 | |||
620 | spin_lock(&adapter->lock); | ||
621 | adapter->phy_timer_pending = false; | ||
622 | atl1_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); | ||
623 | atl1_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); | ||
624 | spin_unlock(&adapter->lock); | ||
625 | |||
626 | /* notify upper layer link down ASAP */ | ||
627 | if (!(phy_data & BMSR_LSTATUS)) { /* Link Down */ | ||
628 | if (netif_carrier_ok(netdev)) { /* old link state: Up */ | ||
629 | dev_info(&adapter->pdev->dev, "%s link is down\n", | ||
630 | netdev->name); | ||
631 | adapter->link_speed = SPEED_0; | ||
632 | netif_carrier_off(netdev); | ||
633 | netif_stop_queue(netdev); | ||
634 | } | ||
635 | } | ||
636 | schedule_work(&adapter->link_chg_task); | ||
637 | } | ||
638 | |||
639 | /* | ||
640 | * atl1_set_multi - Multicast and Promiscuous mode set | ||
641 | * @netdev: network interface device structure | ||
642 | * | ||
643 | * The set_multi entry point is called whenever the multicast address | ||
644 | * list or the network interface flags are updated. This routine is | ||
645 | * responsible for configuring the hardware for proper multicast, | ||
646 | * promiscuous mode, and all-multi behavior. | ||
647 | */ | ||
648 | static void atl1_set_multi(struct net_device *netdev) | ||
649 | { | ||
650 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
651 | struct atl1_hw *hw = &adapter->hw; | ||
652 | struct dev_mc_list *mc_ptr; | ||
653 | u32 rctl; | ||
654 | u32 hash_value; | ||
655 | |||
656 | /* Check for Promiscuous and All Multicast modes */ | ||
657 | rctl = ioread32(hw->hw_addr + REG_MAC_CTRL); | ||
658 | if (netdev->flags & IFF_PROMISC) | ||
659 | rctl |= MAC_CTRL_PROMIS_EN; | ||
660 | else if (netdev->flags & IFF_ALLMULTI) { | ||
661 | rctl |= MAC_CTRL_MC_ALL_EN; | ||
662 | rctl &= ~MAC_CTRL_PROMIS_EN; | ||
663 | } else | ||
664 | rctl &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN); | ||
665 | |||
666 | iowrite32(rctl, hw->hw_addr + REG_MAC_CTRL); | ||
667 | |||
668 | /* clear the old settings from the multicast hash table */ | ||
669 | iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE); | ||
670 | iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2)); | ||
671 | |||
672 | /* compute mc addresses' hash value ,and put it into hash table */ | ||
673 | for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) { | ||
674 | hash_value = atl1_hash_mc_addr(hw, mc_ptr->dmi_addr); | ||
675 | atl1_hash_set(hw, hash_value); | ||
676 | } | ||
677 | } | ||
678 | |||
679 | /* | ||
680 | * atl1_change_mtu - Change the Maximum Transfer Unit | ||
681 | * @netdev: network interface device structure | ||
682 | * @new_mtu: new value for maximum frame size | ||
683 | * | ||
684 | * Returns 0 on success, negative on failure | ||
685 | */ | ||
686 | static int atl1_change_mtu(struct net_device *netdev, int new_mtu) | ||
687 | { | ||
688 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
689 | int old_mtu = netdev->mtu; | ||
690 | int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; | ||
691 | |||
692 | if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || | ||
693 | (max_frame > MAX_JUMBO_FRAME_SIZE)) { | ||
694 | dev_warn(&adapter->pdev->dev, "invalid MTU setting\n"); | ||
695 | return -EINVAL; | ||
696 | } | ||
697 | |||
698 | adapter->hw.max_frame_size = max_frame; | ||
699 | adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3; | ||
700 | adapter->rx_buffer_len = (max_frame + 7) & ~7; | ||
701 | adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8; | ||
702 | |||
703 | netdev->mtu = new_mtu; | ||
704 | if ((old_mtu != new_mtu) && netif_running(netdev)) { | ||
705 | atl1_down(adapter); | ||
706 | atl1_up(adapter); | ||
707 | } | ||
708 | |||
709 | return 0; | ||
710 | } | ||
711 | |||
961 | static void set_flow_ctrl_old(struct atl1_adapter *adapter) | 712 | static void set_flow_ctrl_old(struct atl1_adapter *adapter) |
962 | { | 713 | { |
963 | u32 hi, lo, value; | 714 | u32 hi, lo, value; |
@@ -970,7 +721,7 @@ static void set_flow_ctrl_old(struct atl1_adapter *adapter) | |||
970 | lo = value * 7 / 8; | 721 | lo = value * 7 / 8; |
971 | 722 | ||
972 | value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) | | 723 | value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) | |
973 | ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT); | 724 | ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT); |
974 | iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RXF_PAUSE_THRESH); | 725 | iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RXF_PAUSE_THRESH); |
975 | 726 | ||
976 | /* RRD Flow Control */ | 727 | /* RRD Flow Control */ |
@@ -980,7 +731,7 @@ static void set_flow_ctrl_old(struct atl1_adapter *adapter) | |||
980 | if (lo < 2) | 731 | if (lo < 2) |
981 | lo = 2; | 732 | lo = 2; |
982 | value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) | | 733 | value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) | |
983 | ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT); | 734 | ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT); |
984 | iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RRD_PAUSE_THRESH); | 735 | iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RRD_PAUSE_THRESH); |
985 | } | 736 | } |
986 | 737 | ||
@@ -997,7 +748,7 @@ static void set_flow_ctrl_new(struct atl1_hw *hw) | |||
997 | if (hi < lo) | 748 | if (hi < lo) |
998 | hi = lo + 16; | 749 | hi = lo + 16; |
999 | value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) | | 750 | value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) | |
1000 | ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT); | 751 | ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT); |
1001 | iowrite32(value, hw->hw_addr + REG_RXQ_RXF_PAUSE_THRESH); | 752 | iowrite32(value, hw->hw_addr + REG_RXQ_RXF_PAUSE_THRESH); |
1002 | 753 | ||
1003 | /* RRD Flow Control */ | 754 | /* RRD Flow Control */ |
@@ -1009,7 +760,7 @@ static void set_flow_ctrl_new(struct atl1_hw *hw) | |||
1009 | if (hi < lo) | 760 | if (hi < lo) |
1010 | hi = lo + 3; | 761 | hi = lo + 3; |
1011 | value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) | | 762 | value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) | |
1012 | ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT); | 763 | ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT); |
1013 | iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH); | 764 | iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH); |
1014 | } | 765 | } |
1015 | 766 | ||
@@ -1058,7 +809,8 @@ static u32 atl1_configure(struct atl1_adapter *adapter) | |||
1058 | value <<= 16; | 809 | value <<= 16; |
1059 | value += adapter->rfd_ring.count; | 810 | value += adapter->rfd_ring.count; |
1060 | iowrite32(value, hw->hw_addr + REG_DESC_RFD_RRD_RING_SIZE); | 811 | iowrite32(value, hw->hw_addr + REG_DESC_RFD_RRD_RING_SIZE); |
1061 | iowrite32(adapter->tpd_ring.count, hw->hw_addr + REG_DESC_TPD_RING_SIZE); | 812 | iowrite32(adapter->tpd_ring.count, hw->hw_addr + |
813 | REG_DESC_TPD_RING_SIZE); | ||
1062 | 814 | ||
1063 | /* Load Ptr */ | 815 | /* Load Ptr */ |
1064 | iowrite32(1, hw->hw_addr + REG_LOAD_PTR); | 816 | iowrite32(1, hw->hw_addr + REG_LOAD_PTR); |
@@ -1066,31 +818,31 @@ static u32 atl1_configure(struct atl1_adapter *adapter) | |||
1066 | /* config Mailbox */ | 818 | /* config Mailbox */ |
1067 | value = ((atomic_read(&adapter->tpd_ring.next_to_use) | 819 | value = ((atomic_read(&adapter->tpd_ring.next_to_use) |
1068 | & MB_TPD_PROD_INDX_MASK) << MB_TPD_PROD_INDX_SHIFT) | | 820 | & MB_TPD_PROD_INDX_MASK) << MB_TPD_PROD_INDX_SHIFT) | |
1069 | ((atomic_read(&adapter->rrd_ring.next_to_clean) | 821 | ((atomic_read(&adapter->rrd_ring.next_to_clean) |
1070 | & MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) | | 822 | & MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) | |
1071 | ((atomic_read(&adapter->rfd_ring.next_to_use) | 823 | ((atomic_read(&adapter->rfd_ring.next_to_use) |
1072 | & MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT); | 824 | & MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT); |
1073 | iowrite32(value, hw->hw_addr + REG_MAILBOX); | 825 | iowrite32(value, hw->hw_addr + REG_MAILBOX); |
1074 | 826 | ||
1075 | /* config IPG/IFG */ | 827 | /* config IPG/IFG */ |
1076 | value = (((u32) hw->ipgt & MAC_IPG_IFG_IPGT_MASK) | 828 | value = (((u32) hw->ipgt & MAC_IPG_IFG_IPGT_MASK) |
1077 | << MAC_IPG_IFG_IPGT_SHIFT) | | 829 | << MAC_IPG_IFG_IPGT_SHIFT) | |
1078 | (((u32) hw->min_ifg & MAC_IPG_IFG_MIFG_MASK) | 830 | (((u32) hw->min_ifg & MAC_IPG_IFG_MIFG_MASK) |
1079 | << MAC_IPG_IFG_MIFG_SHIFT) | | 831 | << MAC_IPG_IFG_MIFG_SHIFT) | |
1080 | (((u32) hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK) | 832 | (((u32) hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK) |
1081 | << MAC_IPG_IFG_IPGR1_SHIFT) | | 833 | << MAC_IPG_IFG_IPGR1_SHIFT) | |
1082 | (((u32) hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK) | 834 | (((u32) hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK) |
1083 | << MAC_IPG_IFG_IPGR2_SHIFT); | 835 | << MAC_IPG_IFG_IPGR2_SHIFT); |
1084 | iowrite32(value, hw->hw_addr + REG_MAC_IPG_IFG); | 836 | iowrite32(value, hw->hw_addr + REG_MAC_IPG_IFG); |
1085 | 837 | ||
1086 | /* config Half-Duplex Control */ | 838 | /* config Half-Duplex Control */ |
1087 | value = ((u32) hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) | | 839 | value = ((u32) hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) | |
1088 | (((u32) hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK) | 840 | (((u32) hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK) |
1089 | << MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) | | 841 | << MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) | |
1090 | MAC_HALF_DUPLX_CTRL_EXC_DEF_EN | | 842 | MAC_HALF_DUPLX_CTRL_EXC_DEF_EN | |
1091 | (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) | | 843 | (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) | |
1092 | (((u32) hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK) | 844 | (((u32) hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK) |
1093 | << MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT); | 845 | << MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT); |
1094 | iowrite32(value, hw->hw_addr + REG_MAC_HALF_DUPLX_CTRL); | 846 | iowrite32(value, hw->hw_addr + REG_MAC_HALF_DUPLX_CTRL); |
1095 | 847 | ||
1096 | /* set Interrupt Moderator Timer */ | 848 | /* set Interrupt Moderator Timer */ |
@@ -1106,10 +858,10 @@ static u32 atl1_configure(struct atl1_adapter *adapter) | |||
1106 | /* jumbo size & rrd retirement timer */ | 858 | /* jumbo size & rrd retirement timer */ |
1107 | value = (((u32) hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK) | 859 | value = (((u32) hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK) |
1108 | << RXQ_JMBOSZ_TH_SHIFT) | | 860 | << RXQ_JMBOSZ_TH_SHIFT) | |
1109 | (((u32) hw->rx_jumbo_lkah & RXQ_JMBO_LKAH_MASK) | 861 | (((u32) hw->rx_jumbo_lkah & RXQ_JMBO_LKAH_MASK) |
1110 | << RXQ_JMBO_LKAH_SHIFT) | | 862 | << RXQ_JMBO_LKAH_SHIFT) | |
1111 | (((u32) hw->rrd_ret_timer & RXQ_RRD_TIMER_MASK) | 863 | (((u32) hw->rrd_ret_timer & RXQ_RRD_TIMER_MASK) |
1112 | << RXQ_RRD_TIMER_SHIFT); | 864 | << RXQ_RRD_TIMER_SHIFT); |
1113 | iowrite32(value, hw->hw_addr + REG_RXQ_JMBOSZ_RRDTIM); | 865 | iowrite32(value, hw->hw_addr + REG_RXQ_JMBOSZ_RRDTIM); |
1114 | 866 | ||
1115 | /* Flow Control */ | 867 | /* Flow Control */ |
@@ -1128,35 +880,36 @@ static u32 atl1_configure(struct atl1_adapter *adapter) | |||
1128 | /* config TXQ */ | 880 | /* config TXQ */ |
1129 | value = (((u32) hw->tpd_burst & TXQ_CTRL_TPD_BURST_NUM_MASK) | 881 | value = (((u32) hw->tpd_burst & TXQ_CTRL_TPD_BURST_NUM_MASK) |
1130 | << TXQ_CTRL_TPD_BURST_NUM_SHIFT) | | 882 | << TXQ_CTRL_TPD_BURST_NUM_SHIFT) | |
1131 | (((u32) hw->txf_burst & TXQ_CTRL_TXF_BURST_NUM_MASK) | 883 | (((u32) hw->txf_burst & TXQ_CTRL_TXF_BURST_NUM_MASK) |
1132 | << TXQ_CTRL_TXF_BURST_NUM_SHIFT) | | 884 | << TXQ_CTRL_TXF_BURST_NUM_SHIFT) | |
1133 | (((u32) hw->tpd_fetch_th & TXQ_CTRL_TPD_FETCH_TH_MASK) | 885 | (((u32) hw->tpd_fetch_th & TXQ_CTRL_TPD_FETCH_TH_MASK) |
1134 | << TXQ_CTRL_TPD_FETCH_TH_SHIFT) | TXQ_CTRL_ENH_MODE | TXQ_CTRL_EN; | 886 | << TXQ_CTRL_TPD_FETCH_TH_SHIFT) | TXQ_CTRL_ENH_MODE | |
887 | TXQ_CTRL_EN; | ||
1135 | iowrite32(value, hw->hw_addr + REG_TXQ_CTRL); | 888 | iowrite32(value, hw->hw_addr + REG_TXQ_CTRL); |
1136 | 889 | ||
1137 | /* min tpd fetch gap & tx jumbo packet size threshold for taskoffload */ | 890 | /* min tpd fetch gap & tx jumbo packet size threshold for taskoffload */ |
1138 | value = (((u32) hw->tx_jumbo_task_th & TX_JUMBO_TASK_TH_MASK) | 891 | value = (((u32) hw->tx_jumbo_task_th & TX_JUMBO_TASK_TH_MASK) |
1139 | << TX_JUMBO_TASK_TH_SHIFT) | | 892 | << TX_JUMBO_TASK_TH_SHIFT) | |
1140 | (((u32) hw->tpd_fetch_gap & TX_TPD_MIN_IPG_MASK) | 893 | (((u32) hw->tpd_fetch_gap & TX_TPD_MIN_IPG_MASK) |
1141 | << TX_TPD_MIN_IPG_SHIFT); | 894 | << TX_TPD_MIN_IPG_SHIFT); |
1142 | iowrite32(value, hw->hw_addr + REG_TX_JUMBO_TASK_TH_TPD_IPG); | 895 | iowrite32(value, hw->hw_addr + REG_TX_JUMBO_TASK_TH_TPD_IPG); |
1143 | 896 | ||
1144 | /* config RXQ */ | 897 | /* config RXQ */ |
1145 | value = (((u32) hw->rfd_burst & RXQ_CTRL_RFD_BURST_NUM_MASK) | 898 | value = (((u32) hw->rfd_burst & RXQ_CTRL_RFD_BURST_NUM_MASK) |
1146 | << RXQ_CTRL_RFD_BURST_NUM_SHIFT) | | 899 | << RXQ_CTRL_RFD_BURST_NUM_SHIFT) | |
1147 | (((u32) hw->rrd_burst & RXQ_CTRL_RRD_BURST_THRESH_MASK) | 900 | (((u32) hw->rrd_burst & RXQ_CTRL_RRD_BURST_THRESH_MASK) |
1148 | << RXQ_CTRL_RRD_BURST_THRESH_SHIFT) | | 901 | << RXQ_CTRL_RRD_BURST_THRESH_SHIFT) | |
1149 | (((u32) hw->rfd_fetch_gap & RXQ_CTRL_RFD_PREF_MIN_IPG_MASK) | 902 | (((u32) hw->rfd_fetch_gap & RXQ_CTRL_RFD_PREF_MIN_IPG_MASK) |
1150 | << RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT) | | 903 | << RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT) | RXQ_CTRL_CUT_THRU_EN | |
1151 | RXQ_CTRL_CUT_THRU_EN | RXQ_CTRL_EN; | 904 | RXQ_CTRL_EN; |
1152 | iowrite32(value, hw->hw_addr + REG_RXQ_CTRL); | 905 | iowrite32(value, hw->hw_addr + REG_RXQ_CTRL); |
1153 | 906 | ||
1154 | /* config DMA Engine */ | 907 | /* config DMA Engine */ |
1155 | value = ((((u32) hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK) | 908 | value = ((((u32) hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK) |
1156 | << DMA_CTRL_DMAR_BURST_LEN_SHIFT) | | 909 | << DMA_CTRL_DMAR_BURST_LEN_SHIFT) | |
1157 | ((((u32) hw->dmaw_block) & DMA_CTRL_DMAR_BURST_LEN_MASK) | 910 | ((((u32) hw->dmaw_block) & DMA_CTRL_DMAR_BURST_LEN_MASK) |
1158 | << DMA_CTRL_DMAR_BURST_LEN_SHIFT) | | 911 | << DMA_CTRL_DMAR_BURST_LEN_SHIFT) | DMA_CTRL_DMAR_EN | |
1159 | DMA_CTRL_DMAR_EN | DMA_CTRL_DMAW_EN; | 912 | DMA_CTRL_DMAW_EN; |
1160 | value |= (u32) hw->dma_ord; | 913 | value |= (u32) hw->dma_ord; |
1161 | if (atl1_rcb_128 == hw->rcb_value) | 914 | if (atl1_rcb_128 == hw->rcb_value) |
1162 | value |= DMA_CTRL_RCB_VALUE; | 915 | value |= DMA_CTRL_RCB_VALUE; |
@@ -1186,56 +939,495 @@ static u32 atl1_configure(struct atl1_adapter *adapter) | |||
1186 | } | 939 | } |
1187 | 940 | ||
1188 | /* | 941 | /* |
942 | * atl1_pcie_patch - Patch for PCIE module | ||
943 | */ | ||
944 | static void atl1_pcie_patch(struct atl1_adapter *adapter) | ||
945 | { | ||
946 | u32 value; | ||
947 | |||
948 | /* much vendor magic here */ | ||
949 | value = 0x6500; | ||
950 | iowrite32(value, adapter->hw.hw_addr + 0x12FC); | ||
951 | /* pcie flow control mode change */ | ||
952 | value = ioread32(adapter->hw.hw_addr + 0x1008); | ||
953 | value |= 0x8000; | ||
954 | iowrite32(value, adapter->hw.hw_addr + 0x1008); | ||
955 | } | ||
956 | |||
957 | /* | ||
958 | * When ACPI resume on some VIA MotherBoard, the Interrupt Disable bit/0x400 | ||
959 | * on PCI Command register is disable. | ||
960 | * The function enable this bit. | ||
961 | * Brackett, 2006/03/15 | ||
962 | */ | ||
963 | static void atl1_via_workaround(struct atl1_adapter *adapter) | ||
964 | { | ||
965 | unsigned long value; | ||
966 | |||
967 | value = ioread16(adapter->hw.hw_addr + PCI_COMMAND); | ||
968 | if (value & PCI_COMMAND_INTX_DISABLE) | ||
969 | value &= ~PCI_COMMAND_INTX_DISABLE; | ||
970 | iowrite32(value, adapter->hw.hw_addr + PCI_COMMAND); | ||
971 | } | ||
972 | |||
973 | /* | ||
974 | * atl1_irq_enable - Enable default interrupt generation settings | ||
975 | * @adapter: board private structure | ||
976 | */ | ||
977 | static void atl1_irq_enable(struct atl1_adapter *adapter) | ||
978 | { | ||
979 | iowrite32(IMR_NORMAL_MASK, adapter->hw.hw_addr + REG_IMR); | ||
980 | ioread32(adapter->hw.hw_addr + REG_IMR); | ||
981 | } | ||
982 | |||
983 | /* | ||
1189 | * atl1_irq_disable - Mask off interrupt generation on the NIC | 984 | * atl1_irq_disable - Mask off interrupt generation on the NIC |
1190 | * @adapter: board private structure | 985 | * @adapter: board private structure |
1191 | */ | 986 | */ |
1192 | static void atl1_irq_disable(struct atl1_adapter *adapter) | 987 | static void atl1_irq_disable(struct atl1_adapter *adapter) |
1193 | { | 988 | { |
1194 | atomic_inc(&adapter->irq_sem); | ||
1195 | iowrite32(0, adapter->hw.hw_addr + REG_IMR); | 989 | iowrite32(0, adapter->hw.hw_addr + REG_IMR); |
1196 | ioread32(adapter->hw.hw_addr + REG_IMR); | 990 | ioread32(adapter->hw.hw_addr + REG_IMR); |
1197 | synchronize_irq(adapter->pdev->irq); | 991 | synchronize_irq(adapter->pdev->irq); |
1198 | } | 992 | } |
1199 | 993 | ||
1200 | static void atl1_vlan_rx_register(struct net_device *netdev, | 994 | static void atl1_clear_phy_int(struct atl1_adapter *adapter) |
1201 | struct vlan_group *grp) | ||
1202 | { | 995 | { |
1203 | struct atl1_adapter *adapter = netdev_priv(netdev); | 996 | u16 phy_data; |
1204 | unsigned long flags; | 997 | unsigned long flags; |
1205 | u32 ctrl; | ||
1206 | 998 | ||
1207 | spin_lock_irqsave(&adapter->lock, flags); | 999 | spin_lock_irqsave(&adapter->lock, flags); |
1208 | /* atl1_irq_disable(adapter); */ | 1000 | atl1_read_phy_reg(&adapter->hw, 19, &phy_data); |
1209 | adapter->vlgrp = grp; | 1001 | spin_unlock_irqrestore(&adapter->lock, flags); |
1002 | } | ||
1210 | 1003 | ||
1211 | if (grp) { | 1004 | static void atl1_inc_smb(struct atl1_adapter *adapter) |
1212 | /* enable VLAN tag insert/strip */ | 1005 | { |
1213 | ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL); | 1006 | struct stats_msg_block *smb = adapter->smb.smb; |
1214 | ctrl |= MAC_CTRL_RMV_VLAN; | 1007 | |
1215 | iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL); | 1008 | /* Fill out the OS statistics structure */ |
1216 | } else { | 1009 | adapter->soft_stats.rx_packets += smb->rx_ok; |
1217 | /* disable VLAN tag insert/strip */ | 1010 | adapter->soft_stats.tx_packets += smb->tx_ok; |
1218 | ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL); | 1011 | adapter->soft_stats.rx_bytes += smb->rx_byte_cnt; |
1219 | ctrl &= ~MAC_CTRL_RMV_VLAN; | 1012 | adapter->soft_stats.tx_bytes += smb->tx_byte_cnt; |
1220 | iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL); | 1013 | adapter->soft_stats.multicast += smb->rx_mcast; |
1014 | adapter->soft_stats.collisions += (smb->tx_1_col + smb->tx_2_col * 2 + | ||
1015 | smb->tx_late_col + smb->tx_abort_col * adapter->hw.max_retry); | ||
1016 | |||
1017 | /* Rx Errors */ | ||
1018 | adapter->soft_stats.rx_errors += (smb->rx_frag + smb->rx_fcs_err + | ||
1019 | smb->rx_len_err + smb->rx_sz_ov + smb->rx_rxf_ov + | ||
1020 | smb->rx_rrd_ov + smb->rx_align_err); | ||
1021 | adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov; | ||
1022 | adapter->soft_stats.rx_length_errors += smb->rx_len_err; | ||
1023 | adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err; | ||
1024 | adapter->soft_stats.rx_frame_errors += smb->rx_align_err; | ||
1025 | adapter->soft_stats.rx_missed_errors += (smb->rx_rrd_ov + | ||
1026 | smb->rx_rxf_ov); | ||
1027 | |||
1028 | adapter->soft_stats.rx_pause += smb->rx_pause; | ||
1029 | adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov; | ||
1030 | adapter->soft_stats.rx_trunc += smb->rx_sz_ov; | ||
1031 | |||
1032 | /* Tx Errors */ | ||
1033 | adapter->soft_stats.tx_errors += (smb->tx_late_col + | ||
1034 | smb->tx_abort_col + smb->tx_underrun + smb->tx_trunc); | ||
1035 | adapter->soft_stats.tx_fifo_errors += smb->tx_underrun; | ||
1036 | adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col; | ||
1037 | adapter->soft_stats.tx_window_errors += smb->tx_late_col; | ||
1038 | |||
1039 | adapter->soft_stats.excecol += smb->tx_abort_col; | ||
1040 | adapter->soft_stats.deffer += smb->tx_defer; | ||
1041 | adapter->soft_stats.scc += smb->tx_1_col; | ||
1042 | adapter->soft_stats.mcc += smb->tx_2_col; | ||
1043 | adapter->soft_stats.latecol += smb->tx_late_col; | ||
1044 | adapter->soft_stats.tx_underun += smb->tx_underrun; | ||
1045 | adapter->soft_stats.tx_trunc += smb->tx_trunc; | ||
1046 | adapter->soft_stats.tx_pause += smb->tx_pause; | ||
1047 | |||
1048 | adapter->net_stats.rx_packets = adapter->soft_stats.rx_packets; | ||
1049 | adapter->net_stats.tx_packets = adapter->soft_stats.tx_packets; | ||
1050 | adapter->net_stats.rx_bytes = adapter->soft_stats.rx_bytes; | ||
1051 | adapter->net_stats.tx_bytes = adapter->soft_stats.tx_bytes; | ||
1052 | adapter->net_stats.multicast = adapter->soft_stats.multicast; | ||
1053 | adapter->net_stats.collisions = adapter->soft_stats.collisions; | ||
1054 | adapter->net_stats.rx_errors = adapter->soft_stats.rx_errors; | ||
1055 | adapter->net_stats.rx_over_errors = | ||
1056 | adapter->soft_stats.rx_missed_errors; | ||
1057 | adapter->net_stats.rx_length_errors = | ||
1058 | adapter->soft_stats.rx_length_errors; | ||
1059 | adapter->net_stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors; | ||
1060 | adapter->net_stats.rx_frame_errors = | ||
1061 | adapter->soft_stats.rx_frame_errors; | ||
1062 | adapter->net_stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors; | ||
1063 | adapter->net_stats.rx_missed_errors = | ||
1064 | adapter->soft_stats.rx_missed_errors; | ||
1065 | adapter->net_stats.tx_errors = adapter->soft_stats.tx_errors; | ||
1066 | adapter->net_stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors; | ||
1067 | adapter->net_stats.tx_aborted_errors = | ||
1068 | adapter->soft_stats.tx_aborted_errors; | ||
1069 | adapter->net_stats.tx_window_errors = | ||
1070 | adapter->soft_stats.tx_window_errors; | ||
1071 | adapter->net_stats.tx_carrier_errors = | ||
1072 | adapter->soft_stats.tx_carrier_errors; | ||
1073 | } | ||
1074 | |||
1075 | /* | ||
1076 | * atl1_get_stats - Get System Network Statistics | ||
1077 | * @netdev: network interface device structure | ||
1078 | * | ||
1079 | * Returns the address of the device statistics structure. | ||
1080 | * The statistics are actually updated from the timer callback. | ||
1081 | */ | ||
1082 | static struct net_device_stats *atl1_get_stats(struct net_device *netdev) | ||
1083 | { | ||
1084 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
1085 | return &adapter->net_stats; | ||
1086 | } | ||
1087 | |||
1088 | static void atl1_update_mailbox(struct atl1_adapter *adapter) | ||
1089 | { | ||
1090 | unsigned long flags; | ||
1091 | u32 tpd_next_to_use; | ||
1092 | u32 rfd_next_to_use; | ||
1093 | u32 rrd_next_to_clean; | ||
1094 | u32 value; | ||
1095 | |||
1096 | spin_lock_irqsave(&adapter->mb_lock, flags); | ||
1097 | |||
1098 | tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use); | ||
1099 | rfd_next_to_use = atomic_read(&adapter->rfd_ring.next_to_use); | ||
1100 | rrd_next_to_clean = atomic_read(&adapter->rrd_ring.next_to_clean); | ||
1101 | |||
1102 | value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) << | ||
1103 | MB_RFD_PROD_INDX_SHIFT) | | ||
1104 | ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) << | ||
1105 | MB_RRD_CONS_INDX_SHIFT) | | ||
1106 | ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) << | ||
1107 | MB_TPD_PROD_INDX_SHIFT); | ||
1108 | iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX); | ||
1109 | |||
1110 | spin_unlock_irqrestore(&adapter->mb_lock, flags); | ||
1111 | } | ||
1112 | |||
1113 | static void atl1_clean_alloc_flag(struct atl1_adapter *adapter, | ||
1114 | struct rx_return_desc *rrd, u16 offset) | ||
1115 | { | ||
1116 | struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; | ||
1117 | |||
1118 | while (rfd_ring->next_to_clean != (rrd->buf_indx + offset)) { | ||
1119 | rfd_ring->buffer_info[rfd_ring->next_to_clean].alloced = 0; | ||
1120 | if (++rfd_ring->next_to_clean == rfd_ring->count) { | ||
1121 | rfd_ring->next_to_clean = 0; | ||
1122 | } | ||
1221 | } | 1123 | } |
1124 | } | ||
1222 | 1125 | ||
1223 | /* atl1_irq_enable(adapter); */ | 1126 | static void atl1_update_rfd_index(struct atl1_adapter *adapter, |
1224 | spin_unlock_irqrestore(&adapter->lock, flags); | 1127 | struct rx_return_desc *rrd) |
1128 | { | ||
1129 | u16 num_buf; | ||
1130 | |||
1131 | num_buf = (rrd->xsz.xsum_sz.pkt_size + adapter->rx_buffer_len - 1) / | ||
1132 | adapter->rx_buffer_len; | ||
1133 | if (rrd->num_buf == num_buf) | ||
1134 | /* clean alloc flag for bad rrd */ | ||
1135 | atl1_clean_alloc_flag(adapter, rrd, num_buf); | ||
1225 | } | 1136 | } |
1226 | 1137 | ||
1227 | static void atl1_restore_vlan(struct atl1_adapter *adapter) | 1138 | static void atl1_rx_checksum(struct atl1_adapter *adapter, |
1139 | struct rx_return_desc *rrd, struct sk_buff *skb) | ||
1228 | { | 1140 | { |
1229 | atl1_vlan_rx_register(adapter->netdev, adapter->vlgrp); | 1141 | struct pci_dev *pdev = adapter->pdev; |
1142 | |||
1143 | skb->ip_summed = CHECKSUM_NONE; | ||
1144 | |||
1145 | if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { | ||
1146 | if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC | | ||
1147 | ERR_FLAG_CODE | ERR_FLAG_OV)) { | ||
1148 | adapter->hw_csum_err++; | ||
1149 | dev_printk(KERN_DEBUG, &pdev->dev, | ||
1150 | "rx checksum error\n"); | ||
1151 | return; | ||
1152 | } | ||
1153 | } | ||
1154 | |||
1155 | /* not IPv4 */ | ||
1156 | if (!(rrd->pkt_flg & PACKET_FLAG_IPV4)) | ||
1157 | /* checksum is invalid, but it's not an IPv4 pkt, so ok */ | ||
1158 | return; | ||
1159 | |||
1160 | /* IPv4 packet */ | ||
1161 | if (likely(!(rrd->err_flg & | ||
1162 | (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM)))) { | ||
1163 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
1164 | adapter->hw_csum_good++; | ||
1165 | return; | ||
1166 | } | ||
1167 | |||
1168 | /* IPv4, but hardware thinks its checksum is wrong */ | ||
1169 | dev_printk(KERN_DEBUG, &pdev->dev, | ||
1170 | "hw csum wrong, pkt_flag:%x, err_flag:%x\n", | ||
1171 | rrd->pkt_flg, rrd->err_flg); | ||
1172 | skb->ip_summed = CHECKSUM_COMPLETE; | ||
1173 | skb->csum = htons(rrd->xsz.xsum_sz.rx_chksum); | ||
1174 | adapter->hw_csum_err++; | ||
1175 | return; | ||
1176 | } | ||
1177 | |||
1178 | /* | ||
1179 | * atl1_alloc_rx_buffers - Replace used receive buffers | ||
1180 | * @adapter: address of board private structure | ||
1181 | */ | ||
1182 | static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter) | ||
1183 | { | ||
1184 | struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; | ||
1185 | struct pci_dev *pdev = adapter->pdev; | ||
1186 | struct page *page; | ||
1187 | unsigned long offset; | ||
1188 | struct atl1_buffer *buffer_info, *next_info; | ||
1189 | struct sk_buff *skb; | ||
1190 | u16 num_alloc = 0; | ||
1191 | u16 rfd_next_to_use, next_next; | ||
1192 | struct rx_free_desc *rfd_desc; | ||
1193 | |||
1194 | next_next = rfd_next_to_use = atomic_read(&rfd_ring->next_to_use); | ||
1195 | if (++next_next == rfd_ring->count) | ||
1196 | next_next = 0; | ||
1197 | buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; | ||
1198 | next_info = &rfd_ring->buffer_info[next_next]; | ||
1199 | |||
1200 | while (!buffer_info->alloced && !next_info->alloced) { | ||
1201 | if (buffer_info->skb) { | ||
1202 | buffer_info->alloced = 1; | ||
1203 | goto next; | ||
1204 | } | ||
1205 | |||
1206 | rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use); | ||
1207 | |||
1208 | skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN); | ||
1209 | if (unlikely(!skb)) { /* Better luck next round */ | ||
1210 | adapter->net_stats.rx_dropped++; | ||
1211 | break; | ||
1212 | } | ||
1213 | |||
1214 | /* | ||
1215 | * Make buffer alignment 2 beyond a 16 byte boundary | ||
1216 | * this will result in a 16 byte aligned IP header after | ||
1217 | * the 14 byte MAC header is removed | ||
1218 | */ | ||
1219 | skb_reserve(skb, NET_IP_ALIGN); | ||
1220 | |||
1221 | buffer_info->alloced = 1; | ||
1222 | buffer_info->skb = skb; | ||
1223 | buffer_info->length = (u16) adapter->rx_buffer_len; | ||
1224 | page = virt_to_page(skb->data); | ||
1225 | offset = (unsigned long)skb->data & ~PAGE_MASK; | ||
1226 | buffer_info->dma = pci_map_page(pdev, page, offset, | ||
1227 | adapter->rx_buffer_len, | ||
1228 | PCI_DMA_FROMDEVICE); | ||
1229 | rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma); | ||
1230 | rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len); | ||
1231 | rfd_desc->coalese = 0; | ||
1232 | |||
1233 | next: | ||
1234 | rfd_next_to_use = next_next; | ||
1235 | if (unlikely(++next_next == rfd_ring->count)) | ||
1236 | next_next = 0; | ||
1237 | |||
1238 | buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; | ||
1239 | next_info = &rfd_ring->buffer_info[next_next]; | ||
1240 | num_alloc++; | ||
1241 | } | ||
1242 | |||
1243 | if (num_alloc) { | ||
1244 | /* | ||
1245 | * Force memory writes to complete before letting h/w | ||
1246 | * know there are new descriptors to fetch. (Only | ||
1247 | * applicable for weak-ordered memory model archs, | ||
1248 | * such as IA-64). | ||
1249 | */ | ||
1250 | wmb(); | ||
1251 | atomic_set(&rfd_ring->next_to_use, (int)rfd_next_to_use); | ||
1252 | } | ||
1253 | return num_alloc; | ||
1254 | } | ||
1255 | |||
1256 | static void atl1_intr_rx(struct atl1_adapter *adapter) | ||
1257 | { | ||
1258 | int i, count; | ||
1259 | u16 length; | ||
1260 | u16 rrd_next_to_clean; | ||
1261 | u32 value; | ||
1262 | struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; | ||
1263 | struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; | ||
1264 | struct atl1_buffer *buffer_info; | ||
1265 | struct rx_return_desc *rrd; | ||
1266 | struct sk_buff *skb; | ||
1267 | |||
1268 | count = 0; | ||
1269 | |||
1270 | rrd_next_to_clean = atomic_read(&rrd_ring->next_to_clean); | ||
1271 | |||
1272 | while (1) { | ||
1273 | rrd = ATL1_RRD_DESC(rrd_ring, rrd_next_to_clean); | ||
1274 | i = 1; | ||
1275 | if (likely(rrd->xsz.valid)) { /* packet valid */ | ||
1276 | chk_rrd: | ||
1277 | /* check rrd status */ | ||
1278 | if (likely(rrd->num_buf == 1)) | ||
1279 | goto rrd_ok; | ||
1280 | |||
1281 | /* rrd seems to be bad */ | ||
1282 | if (unlikely(i-- > 0)) { | ||
1283 | /* rrd may not be DMAed completely */ | ||
1284 | dev_printk(KERN_DEBUG, &adapter->pdev->dev, | ||
1285 | "incomplete RRD DMA transfer\n"); | ||
1286 | udelay(1); | ||
1287 | goto chk_rrd; | ||
1288 | } | ||
1289 | /* bad rrd */ | ||
1290 | dev_printk(KERN_DEBUG, &adapter->pdev->dev, | ||
1291 | "bad RRD\n"); | ||
1292 | /* see if update RFD index */ | ||
1293 | if (rrd->num_buf > 1) | ||
1294 | atl1_update_rfd_index(adapter, rrd); | ||
1295 | |||
1296 | /* update rrd */ | ||
1297 | rrd->xsz.valid = 0; | ||
1298 | if (++rrd_next_to_clean == rrd_ring->count) | ||
1299 | rrd_next_to_clean = 0; | ||
1300 | count++; | ||
1301 | continue; | ||
1302 | } else { /* current rrd still not be updated */ | ||
1303 | |||
1304 | break; | ||
1305 | } | ||
1306 | rrd_ok: | ||
1307 | /* clean alloc flag for bad rrd */ | ||
1308 | atl1_clean_alloc_flag(adapter, rrd, 0); | ||
1309 | |||
1310 | buffer_info = &rfd_ring->buffer_info[rrd->buf_indx]; | ||
1311 | if (++rfd_ring->next_to_clean == rfd_ring->count) | ||
1312 | rfd_ring->next_to_clean = 0; | ||
1313 | |||
1314 | /* update rrd next to clean */ | ||
1315 | if (++rrd_next_to_clean == rrd_ring->count) | ||
1316 | rrd_next_to_clean = 0; | ||
1317 | count++; | ||
1318 | |||
1319 | if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { | ||
1320 | if (!(rrd->err_flg & | ||
1321 | (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM | ||
1322 | | ERR_FLAG_LEN))) { | ||
1323 | /* packet error, don't need upstream */ | ||
1324 | buffer_info->alloced = 0; | ||
1325 | rrd->xsz.valid = 0; | ||
1326 | continue; | ||
1327 | } | ||
1328 | } | ||
1329 | |||
1330 | /* Good Receive */ | ||
1331 | pci_unmap_page(adapter->pdev, buffer_info->dma, | ||
1332 | buffer_info->length, PCI_DMA_FROMDEVICE); | ||
1333 | skb = buffer_info->skb; | ||
1334 | length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size); | ||
1335 | |||
1336 | skb_put(skb, length - ETHERNET_FCS_SIZE); | ||
1337 | |||
1338 | /* Receive Checksum Offload */ | ||
1339 | atl1_rx_checksum(adapter, rrd, skb); | ||
1340 | skb->protocol = eth_type_trans(skb, adapter->netdev); | ||
1341 | |||
1342 | if (adapter->vlgrp && (rrd->pkt_flg & PACKET_FLAG_VLAN_INS)) { | ||
1343 | u16 vlan_tag = (rrd->vlan_tag >> 4) | | ||
1344 | ((rrd->vlan_tag & 7) << 13) | | ||
1345 | ((rrd->vlan_tag & 8) << 9); | ||
1346 | vlan_hwaccel_rx(skb, adapter->vlgrp, vlan_tag); | ||
1347 | } else | ||
1348 | netif_rx(skb); | ||
1349 | |||
1350 | /* let protocol layer free skb */ | ||
1351 | buffer_info->skb = NULL; | ||
1352 | buffer_info->alloced = 0; | ||
1353 | rrd->xsz.valid = 0; | ||
1354 | |||
1355 | adapter->netdev->last_rx = jiffies; | ||
1356 | } | ||
1357 | |||
1358 | atomic_set(&rrd_ring->next_to_clean, rrd_next_to_clean); | ||
1359 | |||
1360 | atl1_alloc_rx_buffers(adapter); | ||
1361 | |||
1362 | /* update mailbox ? */ | ||
1363 | if (count) { | ||
1364 | u32 tpd_next_to_use; | ||
1365 | u32 rfd_next_to_use; | ||
1366 | u32 rrd_next_to_clean; | ||
1367 | |||
1368 | spin_lock(&adapter->mb_lock); | ||
1369 | |||
1370 | tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use); | ||
1371 | rfd_next_to_use = | ||
1372 | atomic_read(&adapter->rfd_ring.next_to_use); | ||
1373 | rrd_next_to_clean = | ||
1374 | atomic_read(&adapter->rrd_ring.next_to_clean); | ||
1375 | value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) << | ||
1376 | MB_RFD_PROD_INDX_SHIFT) | | ||
1377 | ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) << | ||
1378 | MB_RRD_CONS_INDX_SHIFT) | | ||
1379 | ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) << | ||
1380 | MB_TPD_PROD_INDX_SHIFT); | ||
1381 | iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX); | ||
1382 | spin_unlock(&adapter->mb_lock); | ||
1383 | } | ||
1384 | } | ||
1385 | |||
1386 | static void atl1_intr_tx(struct atl1_adapter *adapter) | ||
1387 | { | ||
1388 | struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; | ||
1389 | struct atl1_buffer *buffer_info; | ||
1390 | u16 sw_tpd_next_to_clean; | ||
1391 | u16 cmb_tpd_next_to_clean; | ||
1392 | |||
1393 | sw_tpd_next_to_clean = atomic_read(&tpd_ring->next_to_clean); | ||
1394 | cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx); | ||
1395 | |||
1396 | while (cmb_tpd_next_to_clean != sw_tpd_next_to_clean) { | ||
1397 | struct tx_packet_desc *tpd; | ||
1398 | |||
1399 | tpd = ATL1_TPD_DESC(tpd_ring, sw_tpd_next_to_clean); | ||
1400 | buffer_info = &tpd_ring->buffer_info[sw_tpd_next_to_clean]; | ||
1401 | if (buffer_info->dma) { | ||
1402 | pci_unmap_page(adapter->pdev, buffer_info->dma, | ||
1403 | buffer_info->length, PCI_DMA_TODEVICE); | ||
1404 | buffer_info->dma = 0; | ||
1405 | } | ||
1406 | |||
1407 | if (buffer_info->skb) { | ||
1408 | dev_kfree_skb_irq(buffer_info->skb); | ||
1409 | buffer_info->skb = NULL; | ||
1410 | } | ||
1411 | tpd->buffer_addr = 0; | ||
1412 | tpd->desc.data = 0; | ||
1413 | |||
1414 | if (++sw_tpd_next_to_clean == tpd_ring->count) | ||
1415 | sw_tpd_next_to_clean = 0; | ||
1416 | } | ||
1417 | atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean); | ||
1418 | |||
1419 | if (netif_queue_stopped(adapter->netdev) | ||
1420 | && netif_carrier_ok(adapter->netdev)) | ||
1421 | netif_wake_queue(adapter->netdev); | ||
1230 | } | 1422 | } |
1231 | 1423 | ||
1232 | static u16 tpd_avail(struct atl1_tpd_ring *tpd_ring) | 1424 | static u16 tpd_avail(struct atl1_tpd_ring *tpd_ring) |
1233 | { | 1425 | { |
1234 | u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean); | 1426 | u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean); |
1235 | u16 next_to_use = atomic_read(&tpd_ring->next_to_use); | 1427 | u16 next_to_use = atomic_read(&tpd_ring->next_to_use); |
1236 | return ((next_to_clean > | 1428 | return ((next_to_clean > next_to_use) ? |
1237 | next_to_use) ? next_to_clean - next_to_use - | 1429 | next_to_clean - next_to_use - 1 : |
1238 | 1 : tpd_ring->count + next_to_clean - next_to_use - 1); | 1430 | tpd_ring->count + next_to_clean - next_to_use - 1); |
1239 | } | 1431 | } |
1240 | 1432 | ||
1241 | static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, | 1433 | static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, |
@@ -1258,9 +1450,7 @@ static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, | |||
1258 | iph->tot_len = 0; | 1450 | iph->tot_len = 0; |
1259 | iph->check = 0; | 1451 | iph->check = 0; |
1260 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, | 1452 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, |
1261 | iph->daddr, 0, | 1453 | iph->daddr, 0, IPPROTO_TCP, 0); |
1262 | IPPROTO_TCP, | ||
1263 | 0); | ||
1264 | ipofst = skb_network_offset(skb); | 1454 | ipofst = skb_network_offset(skb); |
1265 | if (ipofst != ENET_HEADER_SIZE) /* 802.3 frame */ | 1455 | if (ipofst != ENET_HEADER_SIZE) /* 802.3 frame */ |
1266 | tso->tsopl |= 1 << TSO_PARAM_ETHTYPE_SHIFT; | 1456 | tso->tsopl |= 1 << TSO_PARAM_ETHTYPE_SHIFT; |
@@ -1268,7 +1458,8 @@ static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, | |||
1268 | tso->tsopl |= (iph->ihl & | 1458 | tso->tsopl |= (iph->ihl & |
1269 | CSUM_PARAM_IPHL_MASK) << CSUM_PARAM_IPHL_SHIFT; | 1459 | CSUM_PARAM_IPHL_MASK) << CSUM_PARAM_IPHL_SHIFT; |
1270 | tso->tsopl |= (tcp_hdrlen(skb) & | 1460 | tso->tsopl |= (tcp_hdrlen(skb) & |
1271 | TSO_PARAM_TCPHDRLEN_MASK) << TSO_PARAM_TCPHDRLEN_SHIFT; | 1461 | TSO_PARAM_TCPHDRLEN_MASK) << |
1462 | TSO_PARAM_TCPHDRLEN_SHIFT; | ||
1272 | tso->tsopl |= (skb_shinfo(skb)->gso_size & | 1463 | tso->tsopl |= (skb_shinfo(skb)->gso_size & |
1273 | TSO_PARAM_MSS_MASK) << TSO_PARAM_MSS_SHIFT; | 1464 | TSO_PARAM_MSS_MASK) << TSO_PARAM_MSS_SHIFT; |
1274 | tso->tsopl |= 1 << TSO_PARAM_IPCKSUM_SHIFT; | 1465 | tso->tsopl |= 1 << TSO_PARAM_IPCKSUM_SHIFT; |
@@ -1281,7 +1472,7 @@ static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, | |||
1281 | } | 1472 | } |
1282 | 1473 | ||
1283 | static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb, | 1474 | static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb, |
1284 | struct csum_param *csum) | 1475 | struct csum_param *csum) |
1285 | { | 1476 | { |
1286 | u8 css, cso; | 1477 | u8 css, cso; |
1287 | 1478 | ||
@@ -1289,7 +1480,7 @@ static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb, | |||
1289 | cso = skb_transport_offset(skb); | 1480 | cso = skb_transport_offset(skb); |
1290 | css = cso + skb->csum_offset; | 1481 | css = cso + skb->csum_offset; |
1291 | if (unlikely(cso & 0x1)) { | 1482 | if (unlikely(cso & 0x1)) { |
1292 | dev_dbg(&adapter->pdev->dev, | 1483 | dev_printk(KERN_DEBUG, &adapter->pdev->dev, |
1293 | "payload offset not an even number\n"); | 1484 | "payload offset not an even number\n"); |
1294 | return -1; | 1485 | return -1; |
1295 | } | 1486 | } |
@@ -1304,8 +1495,8 @@ static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb, | |||
1304 | return true; | 1495 | return true; |
1305 | } | 1496 | } |
1306 | 1497 | ||
1307 | static void atl1_tx_map(struct atl1_adapter *adapter, | 1498 | static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, |
1308 | struct sk_buff *skb, bool tcp_seg) | 1499 | bool tcp_seg) |
1309 | { | 1500 | { |
1310 | /* We enter this function holding a spinlock. */ | 1501 | /* We enter this function holding a spinlock. */ |
1311 | struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; | 1502 | struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; |
@@ -1342,26 +1533,25 @@ static void atl1_tx_map(struct atl1_adapter *adapter, | |||
1342 | 1533 | ||
1343 | if (first_buf_len > proto_hdr_len) { | 1534 | if (first_buf_len > proto_hdr_len) { |
1344 | len12 = first_buf_len - proto_hdr_len; | 1535 | len12 = first_buf_len - proto_hdr_len; |
1345 | m = (len12 + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN; | 1536 | m = (len12 + ATL1_MAX_TX_BUF_LEN - 1) / |
1537 | ATL1_MAX_TX_BUF_LEN; | ||
1346 | for (i = 0; i < m; i++) { | 1538 | for (i = 0; i < m; i++) { |
1347 | buffer_info = | 1539 | buffer_info = |
1348 | &tpd_ring->buffer_info[tpd_next_to_use]; | 1540 | &tpd_ring->buffer_info[tpd_next_to_use]; |
1349 | buffer_info->skb = NULL; | 1541 | buffer_info->skb = NULL; |
1350 | buffer_info->length = | 1542 | buffer_info->length = |
1351 | (MAX_TX_BUF_LEN >= | 1543 | (ATL1_MAX_TX_BUF_LEN >= |
1352 | len12) ? MAX_TX_BUF_LEN : len12; | 1544 | len12) ? ATL1_MAX_TX_BUF_LEN : len12; |
1353 | len12 -= buffer_info->length; | 1545 | len12 -= buffer_info->length; |
1354 | page = virt_to_page(skb->data + | 1546 | page = virt_to_page(skb->data + |
1355 | (proto_hdr_len + | 1547 | (proto_hdr_len + |
1356 | i * MAX_TX_BUF_LEN)); | 1548 | i * ATL1_MAX_TX_BUF_LEN)); |
1357 | offset = (unsigned long)(skb->data + | 1549 | offset = (unsigned long)(skb->data + |
1358 | (proto_hdr_len + | 1550 | (proto_hdr_len + |
1359 | i * MAX_TX_BUF_LEN)) & | 1551 | i * ATL1_MAX_TX_BUF_LEN)) & ~PAGE_MASK; |
1360 | ~PAGE_MASK; | 1552 | buffer_info->dma = pci_map_page(adapter->pdev, |
1361 | buffer_info->dma = | 1553 | page, offset, buffer_info->length, |
1362 | pci_map_page(adapter->pdev, page, offset, | 1554 | PCI_DMA_TODEVICE); |
1363 | buffer_info->length, | ||
1364 | PCI_DMA_TODEVICE); | ||
1365 | if (++tpd_next_to_use == tpd_ring->count) | 1555 | if (++tpd_next_to_use == tpd_ring->count) |
1366 | tpd_next_to_use = 0; | 1556 | tpd_next_to_use = 0; |
1367 | } | 1557 | } |
@@ -1372,8 +1562,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, | |||
1372 | page = virt_to_page(skb->data); | 1562 | page = virt_to_page(skb->data); |
1373 | offset = (unsigned long)skb->data & ~PAGE_MASK; | 1563 | offset = (unsigned long)skb->data & ~PAGE_MASK; |
1374 | buffer_info->dma = pci_map_page(adapter->pdev, page, | 1564 | buffer_info->dma = pci_map_page(adapter->pdev, page, |
1375 | offset, first_buf_len, | 1565 | offset, first_buf_len, PCI_DMA_TODEVICE); |
1376 | PCI_DMA_TODEVICE); | ||
1377 | if (++tpd_next_to_use == tpd_ring->count) | 1566 | if (++tpd_next_to_use == tpd_ring->count) |
1378 | tpd_next_to_use = 0; | 1567 | tpd_next_to_use = 0; |
1379 | } | 1568 | } |
@@ -1385,19 +1574,19 @@ static void atl1_tx_map(struct atl1_adapter *adapter, | |||
1385 | frag = &skb_shinfo(skb)->frags[f]; | 1574 | frag = &skb_shinfo(skb)->frags[f]; |
1386 | lenf = frag->size; | 1575 | lenf = frag->size; |
1387 | 1576 | ||
1388 | m = (lenf + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN; | 1577 | m = (lenf + ATL1_MAX_TX_BUF_LEN - 1) / ATL1_MAX_TX_BUF_LEN; |
1389 | for (i = 0; i < m; i++) { | 1578 | for (i = 0; i < m; i++) { |
1390 | buffer_info = &tpd_ring->buffer_info[tpd_next_to_use]; | 1579 | buffer_info = &tpd_ring->buffer_info[tpd_next_to_use]; |
1391 | if (unlikely(buffer_info->skb)) | 1580 | if (unlikely(buffer_info->skb)) |
1392 | BUG(); | 1581 | BUG(); |
1393 | buffer_info->skb = NULL; | 1582 | buffer_info->skb = NULL; |
1394 | buffer_info->length = | 1583 | buffer_info->length = (lenf > ATL1_MAX_TX_BUF_LEN) ? |
1395 | (lenf > MAX_TX_BUF_LEN) ? MAX_TX_BUF_LEN : lenf; | 1584 | ATL1_MAX_TX_BUF_LEN : lenf; |
1396 | lenf -= buffer_info->length; | 1585 | lenf -= buffer_info->length; |
1397 | buffer_info->dma = | 1586 | buffer_info->dma = pci_map_page(adapter->pdev, |
1398 | pci_map_page(adapter->pdev, frag->page, | 1587 | frag->page, |
1399 | frag->page_offset + i * MAX_TX_BUF_LEN, | 1588 | frag->page_offset + (i * ATL1_MAX_TX_BUF_LEN), |
1400 | buffer_info->length, PCI_DMA_TODEVICE); | 1589 | buffer_info->length, PCI_DMA_TODEVICE); |
1401 | 1590 | ||
1402 | if (++tpd_next_to_use == tpd_ring->count) | 1591 | if (++tpd_next_to_use == tpd_ring->count) |
1403 | tpd_next_to_use = 0; | 1592 | tpd_next_to_use = 0; |
@@ -1409,7 +1598,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, | |||
1409 | } | 1598 | } |
1410 | 1599 | ||
1411 | static void atl1_tx_queue(struct atl1_adapter *adapter, int count, | 1600 | static void atl1_tx_queue(struct atl1_adapter *adapter, int count, |
1412 | union tpd_descr *descr) | 1601 | union tpd_descr *descr) |
1413 | { | 1602 | { |
1414 | /* We enter this function holding a spinlock. */ | 1603 | /* We enter this function holding a spinlock. */ |
1415 | struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; | 1604 | struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; |
@@ -1453,31 +1642,6 @@ static void atl1_tx_queue(struct atl1_adapter *adapter, int count, | |||
1453 | atomic_set(&tpd_ring->next_to_use, (int)tpd_next_to_use); | 1642 | atomic_set(&tpd_ring->next_to_use, (int)tpd_next_to_use); |
1454 | } | 1643 | } |
1455 | 1644 | ||
1456 | static void atl1_update_mailbox(struct atl1_adapter *adapter) | ||
1457 | { | ||
1458 | unsigned long flags; | ||
1459 | u32 tpd_next_to_use; | ||
1460 | u32 rfd_next_to_use; | ||
1461 | u32 rrd_next_to_clean; | ||
1462 | u32 value; | ||
1463 | |||
1464 | spin_lock_irqsave(&adapter->mb_lock, flags); | ||
1465 | |||
1466 | tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use); | ||
1467 | rfd_next_to_use = atomic_read(&adapter->rfd_ring.next_to_use); | ||
1468 | rrd_next_to_clean = atomic_read(&adapter->rrd_ring.next_to_clean); | ||
1469 | |||
1470 | value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) << | ||
1471 | MB_RFD_PROD_INDX_SHIFT) | | ||
1472 | ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) << | ||
1473 | MB_RRD_CONS_INDX_SHIFT) | | ||
1474 | ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) << | ||
1475 | MB_TPD_PROD_INDX_SHIFT); | ||
1476 | iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX); | ||
1477 | |||
1478 | spin_unlock_irqrestore(&adapter->mb_lock, flags); | ||
1479 | } | ||
1480 | |||
1481 | static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | 1645 | static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) |
1482 | { | 1646 | { |
1483 | struct atl1_adapter *adapter = netdev_priv(netdev); | 1647 | struct atl1_adapter *adapter = netdev_priv(netdev); |
@@ -1513,8 +1677,8 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1513 | for (f = 0; f < nr_frags; f++) { | 1677 | for (f = 0; f < nr_frags; f++) { |
1514 | frag_size = skb_shinfo(skb)->frags[f].size; | 1678 | frag_size = skb_shinfo(skb)->frags[f].size; |
1515 | if (frag_size) | 1679 | if (frag_size) |
1516 | count += | 1680 | count += (frag_size + ATL1_MAX_TX_BUF_LEN - 1) / |
1517 | (frag_size + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN; | 1681 | ATL1_MAX_TX_BUF_LEN; |
1518 | } | 1682 | } |
1519 | 1683 | ||
1520 | /* mss will be nonzero if we're doing segment offload (TSO/GSO) */ | 1684 | /* mss will be nonzero if we're doing segment offload (TSO/GSO) */ |
@@ -1530,7 +1694,8 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1530 | /* need additional TPD ? */ | 1694 | /* need additional TPD ? */ |
1531 | if (proto_hdr_len != len) | 1695 | if (proto_hdr_len != len) |
1532 | count += (len - proto_hdr_len + | 1696 | count += (len - proto_hdr_len + |
1533 | MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN; | 1697 | ATL1_MAX_TX_BUF_LEN - 1) / |
1698 | ATL1_MAX_TX_BUF_LEN; | ||
1534 | } | 1699 | } |
1535 | } | 1700 | } |
1536 | 1701 | ||
@@ -1538,7 +1703,7 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1538 | if (!spin_trylock(&adapter->lock)) { | 1703 | if (!spin_trylock(&adapter->lock)) { |
1539 | /* Can't get lock - tell upper layer to requeue */ | 1704 | /* Can't get lock - tell upper layer to requeue */ |
1540 | local_irq_restore(flags); | 1705 | local_irq_restore(flags); |
1541 | dev_dbg(&adapter->pdev->dev, "tx locked\n"); | 1706 | dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx locked\n"); |
1542 | return NETDEV_TX_LOCKED; | 1707 | return NETDEV_TX_LOCKED; |
1543 | } | 1708 | } |
1544 | 1709 | ||
@@ -1546,7 +1711,7 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1546 | /* not enough descriptors */ | 1711 | /* not enough descriptors */ |
1547 | netif_stop_queue(netdev); | 1712 | netif_stop_queue(netdev); |
1548 | spin_unlock_irqrestore(&adapter->lock, flags); | 1713 | spin_unlock_irqrestore(&adapter->lock, flags); |
1549 | dev_dbg(&adapter->pdev->dev, "tx busy\n"); | 1714 | dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx busy\n"); |
1550 | return NETDEV_TX_BUSY; | 1715 | return NETDEV_TX_BUSY; |
1551 | } | 1716 | } |
1552 | 1717 | ||
@@ -1588,131 +1753,208 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1588 | } | 1753 | } |
1589 | 1754 | ||
1590 | /* | 1755 | /* |
1591 | * atl1_get_stats - Get System Network Statistics | 1756 | * atl1_intr - Interrupt Handler |
1592 | * @netdev: network interface device structure | 1757 | * @irq: interrupt number |
1593 | * | 1758 | * @data: pointer to a network interface device structure |
1594 | * Returns the address of the device statistics structure. | 1759 | * @pt_regs: CPU registers structure |
1595 | * The statistics are actually updated from the timer callback. | ||
1596 | */ | 1760 | */ |
1597 | static struct net_device_stats *atl1_get_stats(struct net_device *netdev) | 1761 | static irqreturn_t atl1_intr(int irq, void *data) |
1598 | { | 1762 | { |
1599 | struct atl1_adapter *adapter = netdev_priv(netdev); | 1763 | struct atl1_adapter *adapter = netdev_priv(data); |
1600 | return &adapter->net_stats; | 1764 | u32 status; |
1601 | } | 1765 | u8 update_rx; |
1766 | int max_ints = 10; | ||
1602 | 1767 | ||
1603 | /* | 1768 | status = adapter->cmb.cmb->int_stats; |
1604 | * atl1_clean_rx_ring - Free RFD Buffers | 1769 | if (!status) |
1605 | * @adapter: board private structure | 1770 | return IRQ_NONE; |
1606 | */ | ||
1607 | static void atl1_clean_rx_ring(struct atl1_adapter *adapter) | ||
1608 | { | ||
1609 | struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; | ||
1610 | struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; | ||
1611 | struct atl1_buffer *buffer_info; | ||
1612 | struct pci_dev *pdev = adapter->pdev; | ||
1613 | unsigned long size; | ||
1614 | unsigned int i; | ||
1615 | 1771 | ||
1616 | /* Free all the Rx ring sk_buffs */ | 1772 | update_rx = 0; |
1617 | for (i = 0; i < rfd_ring->count; i++) { | 1773 | |
1618 | buffer_info = &rfd_ring->buffer_info[i]; | 1774 | do { |
1619 | if (buffer_info->dma) { | 1775 | /* clear CMB interrupt status at once */ |
1620 | pci_unmap_page(pdev, | 1776 | adapter->cmb.cmb->int_stats = 0; |
1621 | buffer_info->dma, | 1777 | |
1622 | buffer_info->length, | 1778 | if (status & ISR_GPHY) /* clear phy status */ |
1623 | PCI_DMA_FROMDEVICE); | 1779 | atl1_clear_phy_int(adapter); |
1624 | buffer_info->dma = 0; | 1780 | |
1781 | /* clear ISR status, and Enable CMB DMA/Disable Interrupt */ | ||
1782 | iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR); | ||
1783 | |||
1784 | /* check if SMB intr */ | ||
1785 | if (status & ISR_SMB) | ||
1786 | atl1_inc_smb(adapter); | ||
1787 | |||
1788 | /* check if PCIE PHY Link down */ | ||
1789 | if (status & ISR_PHY_LINKDOWN) { | ||
1790 | dev_printk(KERN_DEBUG, &adapter->pdev->dev, | ||
1791 | "pcie phy link down %x\n", status); | ||
1792 | if (netif_running(adapter->netdev)) { /* reset MAC */ | ||
1793 | iowrite32(0, adapter->hw.hw_addr + REG_IMR); | ||
1794 | schedule_work(&adapter->pcie_dma_to_rst_task); | ||
1795 | return IRQ_HANDLED; | ||
1796 | } | ||
1625 | } | 1797 | } |
1626 | if (buffer_info->skb) { | 1798 | |
1627 | dev_kfree_skb(buffer_info->skb); | 1799 | /* check if DMA read/write error ? */ |
1628 | buffer_info->skb = NULL; | 1800 | if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) { |
1801 | dev_printk(KERN_DEBUG, &adapter->pdev->dev, | ||
1802 | "pcie DMA r/w error (status = 0x%x)\n", | ||
1803 | status); | ||
1804 | iowrite32(0, adapter->hw.hw_addr + REG_IMR); | ||
1805 | schedule_work(&adapter->pcie_dma_to_rst_task); | ||
1806 | return IRQ_HANDLED; | ||
1629 | } | 1807 | } |
1630 | } | ||
1631 | 1808 | ||
1632 | size = sizeof(struct atl1_buffer) * rfd_ring->count; | 1809 | /* link event */ |
1633 | memset(rfd_ring->buffer_info, 0, size); | 1810 | if (status & ISR_GPHY) { |
1811 | adapter->soft_stats.tx_carrier_errors++; | ||
1812 | atl1_check_for_link(adapter); | ||
1813 | } | ||
1634 | 1814 | ||
1635 | /* Zero out the descriptor ring */ | 1815 | /* transmit event */ |
1636 | memset(rfd_ring->desc, 0, rfd_ring->size); | 1816 | if (status & ISR_CMB_TX) |
1817 | atl1_intr_tx(adapter); | ||
1637 | 1818 | ||
1638 | rfd_ring->next_to_clean = 0; | 1819 | /* rx exception */ |
1639 | atomic_set(&rfd_ring->next_to_use, 0); | 1820 | if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN | |
1821 | ISR_RRD_OV | ISR_HOST_RFD_UNRUN | | ||
1822 | ISR_HOST_RRD_OV | ISR_CMB_RX))) { | ||
1823 | if (status & (ISR_RXF_OV | ISR_RFD_UNRUN | | ||
1824 | ISR_RRD_OV | ISR_HOST_RFD_UNRUN | | ||
1825 | ISR_HOST_RRD_OV)) | ||
1826 | dev_printk(KERN_DEBUG, &adapter->pdev->dev, | ||
1827 | "rx exception, ISR = 0x%x\n", status); | ||
1828 | atl1_intr_rx(adapter); | ||
1829 | } | ||
1640 | 1830 | ||
1641 | rrd_ring->next_to_use = 0; | 1831 | if (--max_ints < 0) |
1642 | atomic_set(&rrd_ring->next_to_clean, 0); | 1832 | break; |
1833 | |||
1834 | } while ((status = adapter->cmb.cmb->int_stats)); | ||
1835 | |||
1836 | /* re-enable Interrupt */ | ||
1837 | iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR); | ||
1838 | return IRQ_HANDLED; | ||
1643 | } | 1839 | } |
1644 | 1840 | ||
1645 | /* | 1841 | /* |
1646 | * atl1_clean_tx_ring - Free Tx Buffers | 1842 | * atl1_watchdog - Timer Call-back |
1647 | * @adapter: board private structure | 1843 | * @data: pointer to netdev cast into an unsigned long |
1648 | */ | 1844 | */ |
1649 | static void atl1_clean_tx_ring(struct atl1_adapter *adapter) | 1845 | static void atl1_watchdog(unsigned long data) |
1650 | { | 1846 | { |
1651 | struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; | 1847 | struct atl1_adapter *adapter = (struct atl1_adapter *)data; |
1652 | struct atl1_buffer *buffer_info; | ||
1653 | struct pci_dev *pdev = adapter->pdev; | ||
1654 | unsigned long size; | ||
1655 | unsigned int i; | ||
1656 | 1848 | ||
1657 | /* Free all the Tx ring sk_buffs */ | 1849 | /* Reset the timer */ |
1658 | for (i = 0; i < tpd_ring->count; i++) { | 1850 | mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); |
1659 | buffer_info = &tpd_ring->buffer_info[i]; | 1851 | } |
1660 | if (buffer_info->dma) { | ||
1661 | pci_unmap_page(pdev, buffer_info->dma, | ||
1662 | buffer_info->length, PCI_DMA_TODEVICE); | ||
1663 | buffer_info->dma = 0; | ||
1664 | } | ||
1665 | } | ||
1666 | 1852 | ||
1667 | for (i = 0; i < tpd_ring->count; i++) { | 1853 | /* |
1668 | buffer_info = &tpd_ring->buffer_info[i]; | 1854 | * atl1_phy_config - Timer Call-back |
1669 | if (buffer_info->skb) { | 1855 | * @data: pointer to netdev cast into an unsigned long |
1670 | dev_kfree_skb_any(buffer_info->skb); | 1856 | */ |
1671 | buffer_info->skb = NULL; | 1857 | static void atl1_phy_config(unsigned long data) |
1672 | } | 1858 | { |
1673 | } | 1859 | struct atl1_adapter *adapter = (struct atl1_adapter *)data; |
1860 | struct atl1_hw *hw = &adapter->hw; | ||
1861 | unsigned long flags; | ||
1674 | 1862 | ||
1675 | size = sizeof(struct atl1_buffer) * tpd_ring->count; | 1863 | spin_lock_irqsave(&adapter->lock, flags); |
1676 | memset(tpd_ring->buffer_info, 0, size); | 1864 | adapter->phy_timer_pending = false; |
1865 | atl1_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg); | ||
1866 | atl1_write_phy_reg(hw, MII_AT001_CR, hw->mii_1000t_ctrl_reg); | ||
1867 | atl1_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN); | ||
1868 | spin_unlock_irqrestore(&adapter->lock, flags); | ||
1869 | } | ||
1677 | 1870 | ||
1678 | /* Zero out the descriptor ring */ | 1871 | /* |
1679 | memset(tpd_ring->desc, 0, tpd_ring->size); | 1872 | * atl1_tx_timeout - Respond to a Tx Hang |
1873 | * @netdev: network interface device structure | ||
1874 | */ | ||
1875 | static void atl1_tx_timeout(struct net_device *netdev) | ||
1876 | { | ||
1877 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
1878 | /* Do the reset outside of interrupt context */ | ||
1879 | schedule_work(&adapter->tx_timeout_task); | ||
1880 | } | ||
1680 | 1881 | ||
1681 | atomic_set(&tpd_ring->next_to_use, 0); | 1882 | /* |
1682 | atomic_set(&tpd_ring->next_to_clean, 0); | 1883 | * Orphaned vendor comment left intact here: |
1884 | * <vendor comment> | ||
1885 | * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT | ||
1886 | * will assert. We do soft reset <0x1400=1> according | ||
1887 | * with the SPEC. BUT, it seemes that PCIE or DMA | ||
1888 | * state-machine will not be reset. DMAR_TO_INT will | ||
1889 | * assert again and again. | ||
1890 | * </vendor comment> | ||
1891 | */ | ||
1892 | static void atl1_tx_timeout_task(struct work_struct *work) | ||
1893 | { | ||
1894 | struct atl1_adapter *adapter = | ||
1895 | container_of(work, struct atl1_adapter, tx_timeout_task); | ||
1896 | struct net_device *netdev = adapter->netdev; | ||
1897 | |||
1898 | netif_device_detach(netdev); | ||
1899 | atl1_down(adapter); | ||
1900 | atl1_up(adapter); | ||
1901 | netif_device_attach(netdev); | ||
1683 | } | 1902 | } |
1684 | 1903 | ||
1685 | /* | 1904 | /* |
1686 | * atl1_free_ring_resources - Free Tx / RX descriptor Resources | 1905 | * atl1_link_chg_task - deal with link change event Out of interrupt context |
1687 | * @adapter: board private structure | ||
1688 | * | ||
1689 | * Free all transmit software resources | ||
1690 | */ | 1906 | */ |
1691 | void atl1_free_ring_resources(struct atl1_adapter *adapter) | 1907 | static void atl1_link_chg_task(struct work_struct *work) |
1692 | { | 1908 | { |
1693 | struct pci_dev *pdev = adapter->pdev; | 1909 | struct atl1_adapter *adapter = |
1694 | struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; | 1910 | container_of(work, struct atl1_adapter, link_chg_task); |
1695 | struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; | 1911 | unsigned long flags; |
1696 | struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; | ||
1697 | struct atl1_ring_header *ring_header = &adapter->ring_header; | ||
1698 | 1912 | ||
1699 | atl1_clean_tx_ring(adapter); | 1913 | spin_lock_irqsave(&adapter->lock, flags); |
1700 | atl1_clean_rx_ring(adapter); | 1914 | atl1_check_link(adapter); |
1915 | spin_unlock_irqrestore(&adapter->lock, flags); | ||
1916 | } | ||
1701 | 1917 | ||
1702 | kfree(tpd_ring->buffer_info); | 1918 | static void atl1_vlan_rx_register(struct net_device *netdev, |
1703 | pci_free_consistent(pdev, ring_header->size, ring_header->desc, | 1919 | struct vlan_group *grp) |
1704 | ring_header->dma); | 1920 | { |
1921 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
1922 | unsigned long flags; | ||
1923 | u32 ctrl; | ||
1705 | 1924 | ||
1706 | tpd_ring->buffer_info = NULL; | 1925 | spin_lock_irqsave(&adapter->lock, flags); |
1707 | tpd_ring->desc = NULL; | 1926 | /* atl1_irq_disable(adapter); */ |
1708 | tpd_ring->dma = 0; | 1927 | adapter->vlgrp = grp; |
1709 | 1928 | ||
1710 | rfd_ring->buffer_info = NULL; | 1929 | if (grp) { |
1711 | rfd_ring->desc = NULL; | 1930 | /* enable VLAN tag insert/strip */ |
1712 | rfd_ring->dma = 0; | 1931 | ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL); |
1932 | ctrl |= MAC_CTRL_RMV_VLAN; | ||
1933 | iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL); | ||
1934 | } else { | ||
1935 | /* disable VLAN tag insert/strip */ | ||
1936 | ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL); | ||
1937 | ctrl &= ~MAC_CTRL_RMV_VLAN; | ||
1938 | iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL); | ||
1939 | } | ||
1713 | 1940 | ||
1714 | rrd_ring->desc = NULL; | 1941 | /* atl1_irq_enable(adapter); */ |
1715 | rrd_ring->dma = 0; | 1942 | spin_unlock_irqrestore(&adapter->lock, flags); |
1943 | } | ||
1944 | |||
1945 | static void atl1_restore_vlan(struct atl1_adapter *adapter) | ||
1946 | { | ||
1947 | atl1_vlan_rx_register(adapter->netdev, adapter->vlgrp); | ||
1948 | } | ||
1949 | |||
1950 | int atl1_reset(struct atl1_adapter *adapter) | ||
1951 | { | ||
1952 | int ret; | ||
1953 | |||
1954 | ret = atl1_reset_hw(&adapter->hw); | ||
1955 | if (ret != ATL1_SUCCESS) | ||
1956 | return ret; | ||
1957 | return atl1_init_hw(&adapter->hw); | ||
1716 | } | 1958 | } |
1717 | 1959 | ||
1718 | s32 atl1_up(struct atl1_adapter *adapter) | 1960 | s32 atl1_up(struct atl1_adapter *adapter) |
@@ -1723,6 +1965,7 @@ s32 atl1_up(struct atl1_adapter *adapter) | |||
1723 | 1965 | ||
1724 | /* hardware has been reset, we need to reload some things */ | 1966 | /* hardware has been reset, we need to reload some things */ |
1725 | atl1_set_multi(netdev); | 1967 | atl1_set_multi(netdev); |
1968 | atl1_init_ring_ptrs(adapter); | ||
1726 | atl1_restore_vlan(adapter); | 1969 | atl1_restore_vlan(adapter); |
1727 | err = atl1_alloc_rx_buffers(adapter); | 1970 | err = atl1_alloc_rx_buffers(adapter); |
1728 | if (unlikely(!err)) /* no RX BUFFER allocated */ | 1971 | if (unlikely(!err)) /* no RX BUFFER allocated */ |
@@ -1750,11 +1993,6 @@ s32 atl1_up(struct atl1_adapter *adapter) | |||
1750 | atl1_check_link(adapter); | 1993 | atl1_check_link(adapter); |
1751 | return 0; | 1994 | return 0; |
1752 | 1995 | ||
1753 | /* FIXME: unreachable code! -- CHS */ | ||
1754 | /* free irq disable any interrupt */ | ||
1755 | iowrite32(0, adapter->hw.hw_addr + REG_IMR); | ||
1756 | free_irq(adapter->pdev->irq, netdev); | ||
1757 | |||
1758 | err_up: | 1996 | err_up: |
1759 | pci_disable_msi(adapter->pdev); | 1997 | pci_disable_msi(adapter->pdev); |
1760 | /* free rx_buffers */ | 1998 | /* free rx_buffers */ |
@@ -1786,172 +2024,6 @@ void atl1_down(struct atl1_adapter *adapter) | |||
1786 | } | 2024 | } |
1787 | 2025 | ||
1788 | /* | 2026 | /* |
1789 | * atl1_change_mtu - Change the Maximum Transfer Unit | ||
1790 | * @netdev: network interface device structure | ||
1791 | * @new_mtu: new value for maximum frame size | ||
1792 | * | ||
1793 | * Returns 0 on success, negative on failure | ||
1794 | */ | ||
1795 | static int atl1_change_mtu(struct net_device *netdev, int new_mtu) | ||
1796 | { | ||
1797 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
1798 | int old_mtu = netdev->mtu; | ||
1799 | int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; | ||
1800 | |||
1801 | if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || | ||
1802 | (max_frame > MAX_JUMBO_FRAME_SIZE)) { | ||
1803 | dev_warn(&adapter->pdev->dev, "invalid MTU setting\n"); | ||
1804 | return -EINVAL; | ||
1805 | } | ||
1806 | |||
1807 | adapter->hw.max_frame_size = max_frame; | ||
1808 | adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3; | ||
1809 | adapter->rx_buffer_len = (max_frame + 7) & ~7; | ||
1810 | adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8; | ||
1811 | |||
1812 | netdev->mtu = new_mtu; | ||
1813 | if ((old_mtu != new_mtu) && netif_running(netdev)) { | ||
1814 | atl1_down(adapter); | ||
1815 | atl1_up(adapter); | ||
1816 | } | ||
1817 | |||
1818 | return 0; | ||
1819 | } | ||
1820 | |||
1821 | /* | ||
1822 | * atl1_set_mac - Change the Ethernet Address of the NIC | ||
1823 | * @netdev: network interface device structure | ||
1824 | * @p: pointer to an address structure | ||
1825 | * | ||
1826 | * Returns 0 on success, negative on failure | ||
1827 | */ | ||
1828 | static int atl1_set_mac(struct net_device *netdev, void *p) | ||
1829 | { | ||
1830 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
1831 | struct sockaddr *addr = p; | ||
1832 | |||
1833 | if (netif_running(netdev)) | ||
1834 | return -EBUSY; | ||
1835 | |||
1836 | if (!is_valid_ether_addr(addr->sa_data)) | ||
1837 | return -EADDRNOTAVAIL; | ||
1838 | |||
1839 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | ||
1840 | memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); | ||
1841 | |||
1842 | atl1_set_mac_addr(&adapter->hw); | ||
1843 | return 0; | ||
1844 | } | ||
1845 | |||
1846 | /* | ||
1847 | * atl1_watchdog - Timer Call-back | ||
1848 | * @data: pointer to netdev cast into an unsigned long | ||
1849 | */ | ||
1850 | static void atl1_watchdog(unsigned long data) | ||
1851 | { | ||
1852 | struct atl1_adapter *adapter = (struct atl1_adapter *)data; | ||
1853 | |||
1854 | /* Reset the timer */ | ||
1855 | mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); | ||
1856 | } | ||
1857 | |||
1858 | static int mdio_read(struct net_device *netdev, int phy_id, int reg_num) | ||
1859 | { | ||
1860 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
1861 | u16 result; | ||
1862 | |||
1863 | atl1_read_phy_reg(&adapter->hw, reg_num & 0x1f, &result); | ||
1864 | |||
1865 | return result; | ||
1866 | } | ||
1867 | |||
1868 | static void mdio_write(struct net_device *netdev, int phy_id, int reg_num, int val) | ||
1869 | { | ||
1870 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
1871 | |||
1872 | atl1_write_phy_reg(&adapter->hw, reg_num, val); | ||
1873 | } | ||
1874 | |||
1875 | /* | ||
1876 | * atl1_mii_ioctl - | ||
1877 | * @netdev: | ||
1878 | * @ifreq: | ||
1879 | * @cmd: | ||
1880 | */ | ||
1881 | static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | ||
1882 | { | ||
1883 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
1884 | unsigned long flags; | ||
1885 | int retval; | ||
1886 | |||
1887 | if (!netif_running(netdev)) | ||
1888 | return -EINVAL; | ||
1889 | |||
1890 | spin_lock_irqsave(&adapter->lock, flags); | ||
1891 | retval = generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL); | ||
1892 | spin_unlock_irqrestore(&adapter->lock, flags); | ||
1893 | |||
1894 | return retval; | ||
1895 | } | ||
1896 | |||
1897 | /* | ||
1898 | * atl1_ioctl - | ||
1899 | * @netdev: | ||
1900 | * @ifreq: | ||
1901 | * @cmd: | ||
1902 | */ | ||
1903 | static int atl1_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | ||
1904 | { | ||
1905 | switch (cmd) { | ||
1906 | case SIOCGMIIPHY: | ||
1907 | case SIOCGMIIREG: | ||
1908 | case SIOCSMIIREG: | ||
1909 | return atl1_mii_ioctl(netdev, ifr, cmd); | ||
1910 | default: | ||
1911 | return -EOPNOTSUPP; | ||
1912 | } | ||
1913 | } | ||
1914 | |||
1915 | /* | ||
1916 | * atl1_tx_timeout - Respond to a Tx Hang | ||
1917 | * @netdev: network interface device structure | ||
1918 | */ | ||
1919 | static void atl1_tx_timeout(struct net_device *netdev) | ||
1920 | { | ||
1921 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
1922 | /* Do the reset outside of interrupt context */ | ||
1923 | schedule_work(&adapter->tx_timeout_task); | ||
1924 | } | ||
1925 | |||
1926 | /* | ||
1927 | * atl1_phy_config - Timer Call-back | ||
1928 | * @data: pointer to netdev cast into an unsigned long | ||
1929 | */ | ||
1930 | static void atl1_phy_config(unsigned long data) | ||
1931 | { | ||
1932 | struct atl1_adapter *adapter = (struct atl1_adapter *)data; | ||
1933 | struct atl1_hw *hw = &adapter->hw; | ||
1934 | unsigned long flags; | ||
1935 | |||
1936 | spin_lock_irqsave(&adapter->lock, flags); | ||
1937 | adapter->phy_timer_pending = false; | ||
1938 | atl1_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg); | ||
1939 | atl1_write_phy_reg(hw, MII_AT001_CR, hw->mii_1000t_ctrl_reg); | ||
1940 | atl1_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN); | ||
1941 | spin_unlock_irqrestore(&adapter->lock, flags); | ||
1942 | } | ||
1943 | |||
1944 | int atl1_reset(struct atl1_adapter *adapter) | ||
1945 | { | ||
1946 | int ret; | ||
1947 | |||
1948 | ret = atl1_reset_hw(&adapter->hw); | ||
1949 | if (ret != ATL1_SUCCESS) | ||
1950 | return ret; | ||
1951 | return atl1_init_hw(&adapter->hw); | ||
1952 | } | ||
1953 | |||
1954 | /* | ||
1955 | * atl1_open - Called when a network interface is made active | 2027 | * atl1_open - Called when a network interface is made active |
1956 | * @netdev: network interface device structure | 2028 | * @netdev: network interface device structure |
1957 | * | 2029 | * |
@@ -2003,77 +2075,113 @@ static int atl1_close(struct net_device *netdev) | |||
2003 | return 0; | 2075 | return 0; |
2004 | } | 2076 | } |
2005 | 2077 | ||
2006 | #ifdef CONFIG_NET_POLL_CONTROLLER | 2078 | #ifdef CONFIG_PM |
2007 | static void atl1_poll_controller(struct net_device *netdev) | 2079 | static int atl1_suspend(struct pci_dev *pdev, pm_message_t state) |
2008 | { | ||
2009 | disable_irq(netdev->irq); | ||
2010 | atl1_intr(netdev->irq, netdev); | ||
2011 | enable_irq(netdev->irq); | ||
2012 | } | ||
2013 | #endif | ||
2014 | |||
2015 | /* | ||
2016 | * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT | ||
2017 | * will assert. We do soft reset <0x1400=1> according | ||
2018 | * with the SPEC. BUT, it seemes that PCIE or DMA | ||
2019 | * state-machine will not be reset. DMAR_TO_INT will | ||
2020 | * assert again and again. | ||
2021 | */ | ||
2022 | static void atl1_tx_timeout_task(struct work_struct *work) | ||
2023 | { | 2080 | { |
2024 | struct atl1_adapter *adapter = | 2081 | struct net_device *netdev = pci_get_drvdata(pdev); |
2025 | container_of(work, struct atl1_adapter, tx_timeout_task); | 2082 | struct atl1_adapter *adapter = netdev_priv(netdev); |
2026 | struct net_device *netdev = adapter->netdev; | 2083 | struct atl1_hw *hw = &adapter->hw; |
2084 | u32 ctrl = 0; | ||
2085 | u32 wufc = adapter->wol; | ||
2027 | 2086 | ||
2028 | netif_device_detach(netdev); | 2087 | netif_device_detach(netdev); |
2029 | atl1_down(adapter); | 2088 | if (netif_running(netdev)) |
2030 | atl1_up(adapter); | 2089 | atl1_down(adapter); |
2031 | netif_device_attach(netdev); | ||
2032 | } | ||
2033 | 2090 | ||
2034 | /* | 2091 | atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); |
2035 | * atl1_link_chg_task - deal with link change event Out of interrupt context | 2092 | atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); |
2036 | */ | 2093 | if (ctrl & BMSR_LSTATUS) |
2037 | static void atl1_link_chg_task(struct work_struct *work) | 2094 | wufc &= ~ATL1_WUFC_LNKC; |
2038 | { | ||
2039 | struct atl1_adapter *adapter = | ||
2040 | container_of(work, struct atl1_adapter, link_chg_task); | ||
2041 | unsigned long flags; | ||
2042 | 2095 | ||
2043 | spin_lock_irqsave(&adapter->lock, flags); | 2096 | /* reduce speed to 10/100M */ |
2044 | atl1_check_link(adapter); | 2097 | if (wufc) { |
2045 | spin_unlock_irqrestore(&adapter->lock, flags); | 2098 | atl1_phy_enter_power_saving(hw); |
2099 | /* if resume, let driver to re- setup link */ | ||
2100 | hw->phy_configured = false; | ||
2101 | atl1_set_mac_addr(hw); | ||
2102 | atl1_set_multi(netdev); | ||
2103 | |||
2104 | ctrl = 0; | ||
2105 | /* turn on magic packet wol */ | ||
2106 | if (wufc & ATL1_WUFC_MAG) | ||
2107 | ctrl = WOL_MAGIC_EN | WOL_MAGIC_PME_EN; | ||
2108 | |||
2109 | /* turn on Link change WOL */ | ||
2110 | if (wufc & ATL1_WUFC_LNKC) | ||
2111 | ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN); | ||
2112 | iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); | ||
2113 | |||
2114 | /* turn on all-multi mode if wake on multicast is enabled */ | ||
2115 | ctrl = ioread32(hw->hw_addr + REG_MAC_CTRL); | ||
2116 | ctrl &= ~MAC_CTRL_DBG; | ||
2117 | ctrl &= ~MAC_CTRL_PROMIS_EN; | ||
2118 | if (wufc & ATL1_WUFC_MC) | ||
2119 | ctrl |= MAC_CTRL_MC_ALL_EN; | ||
2120 | else | ||
2121 | ctrl &= ~MAC_CTRL_MC_ALL_EN; | ||
2122 | |||
2123 | /* turn on broadcast mode if wake on-BC is enabled */ | ||
2124 | if (wufc & ATL1_WUFC_BC) | ||
2125 | ctrl |= MAC_CTRL_BC_EN; | ||
2126 | else | ||
2127 | ctrl &= ~MAC_CTRL_BC_EN; | ||
2128 | |||
2129 | /* enable RX */ | ||
2130 | ctrl |= MAC_CTRL_RX_EN; | ||
2131 | iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL); | ||
2132 | pci_enable_wake(pdev, PCI_D3hot, 1); | ||
2133 | pci_enable_wake(pdev, PCI_D3cold, 1); | ||
2134 | } else { | ||
2135 | iowrite32(0, hw->hw_addr + REG_WOL_CTRL); | ||
2136 | pci_enable_wake(pdev, PCI_D3hot, 0); | ||
2137 | pci_enable_wake(pdev, PCI_D3cold, 0); | ||
2138 | } | ||
2139 | |||
2140 | pci_save_state(pdev); | ||
2141 | pci_disable_device(pdev); | ||
2142 | |||
2143 | pci_set_power_state(pdev, PCI_D3hot); | ||
2144 | |||
2145 | return 0; | ||
2046 | } | 2146 | } |
2047 | 2147 | ||
2048 | /* | 2148 | static int atl1_resume(struct pci_dev *pdev) |
2049 | * atl1_pcie_patch - Patch for PCIE module | ||
2050 | */ | ||
2051 | static void atl1_pcie_patch(struct atl1_adapter *adapter) | ||
2052 | { | 2149 | { |
2053 | u32 value; | 2150 | struct net_device *netdev = pci_get_drvdata(pdev); |
2054 | value = 0x6500; | 2151 | struct atl1_adapter *adapter = netdev_priv(netdev); |
2055 | iowrite32(value, adapter->hw.hw_addr + 0x12FC); | 2152 | u32 ret_val; |
2056 | /* pcie flow control mode change */ | 2153 | |
2057 | value = ioread32(adapter->hw.hw_addr + 0x1008); | 2154 | pci_set_power_state(pdev, 0); |
2058 | value |= 0x8000; | 2155 | pci_restore_state(pdev); |
2059 | iowrite32(value, adapter->hw.hw_addr + 0x1008); | 2156 | |
2157 | ret_val = pci_enable_device(pdev); | ||
2158 | pci_enable_wake(pdev, PCI_D3hot, 0); | ||
2159 | pci_enable_wake(pdev, PCI_D3cold, 0); | ||
2160 | |||
2161 | iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL); | ||
2162 | atl1_reset(adapter); | ||
2163 | |||
2164 | if (netif_running(netdev)) | ||
2165 | atl1_up(adapter); | ||
2166 | netif_device_attach(netdev); | ||
2167 | |||
2168 | atl1_via_workaround(adapter); | ||
2169 | |||
2170 | return 0; | ||
2060 | } | 2171 | } |
2172 | #else | ||
2173 | #define atl1_suspend NULL | ||
2174 | #define atl1_resume NULL | ||
2175 | #endif | ||
2061 | 2176 | ||
2062 | /* | 2177 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2063 | * When ACPI resume on some VIA MotherBoard, the Interrupt Disable bit/0x400 | 2178 | static void atl1_poll_controller(struct net_device *netdev) |
2064 | * on PCI Command register is disable. | ||
2065 | * The function enable this bit. | ||
2066 | * Brackett, 2006/03/15 | ||
2067 | */ | ||
2068 | static void atl1_via_workaround(struct atl1_adapter *adapter) | ||
2069 | { | 2179 | { |
2070 | unsigned long value; | 2180 | disable_irq(netdev->irq); |
2071 | 2181 | atl1_intr(netdev->irq, netdev); | |
2072 | value = ioread16(adapter->hw.hw_addr + PCI_COMMAND); | 2182 | enable_irq(netdev->irq); |
2073 | if (value & PCI_COMMAND_INTX_DISABLE) | ||
2074 | value &= ~PCI_COMMAND_INTX_DISABLE; | ||
2075 | iowrite32(value, adapter->hw.hw_addr + PCI_COMMAND); | ||
2076 | } | 2183 | } |
2184 | #endif | ||
2077 | 2185 | ||
2078 | /* | 2186 | /* |
2079 | * atl1_probe - Device Initialization Routine | 2187 | * atl1_probe - Device Initialization Routine |
@@ -2087,7 +2195,7 @@ static void atl1_via_workaround(struct atl1_adapter *adapter) | |||
2087 | * and a hardware reset occur. | 2195 | * and a hardware reset occur. |
2088 | */ | 2196 | */ |
2089 | static int __devinit atl1_probe(struct pci_dev *pdev, | 2197 | static int __devinit atl1_probe(struct pci_dev *pdev, |
2090 | const struct pci_device_id *ent) | 2198 | const struct pci_device_id *ent) |
2091 | { | 2199 | { |
2092 | struct net_device *netdev; | 2200 | struct net_device *netdev; |
2093 | struct atl1_adapter *adapter; | 2201 | struct atl1_adapter *adapter; |
@@ -2141,7 +2249,7 @@ static int __devinit atl1_probe(struct pci_dev *pdev, | |||
2141 | } | 2249 | } |
2142 | /* get device revision number */ | 2250 | /* get device revision number */ |
2143 | adapter->hw.dev_rev = ioread16(adapter->hw.hw_addr + | 2251 | adapter->hw.dev_rev = ioread16(adapter->hw.hw_addr + |
2144 | (REG_MASTER_CTRL + 2)); | 2252 | (REG_MASTER_CTRL + 2)); |
2145 | dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION); | 2253 | dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION); |
2146 | 2254 | ||
2147 | /* set default ring resource counts */ | 2255 | /* set default ring resource counts */ |
@@ -2294,7 +2402,8 @@ static void __devexit atl1_remove(struct pci_dev *pdev) | |||
2294 | * address, we need to save the permanent one. | 2402 | * address, we need to save the permanent one. |
2295 | */ | 2403 | */ |
2296 | if (memcmp(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, ETH_ALEN)) { | 2404 | if (memcmp(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, ETH_ALEN)) { |
2297 | memcpy(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, ETH_ALEN); | 2405 | memcpy(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, |
2406 | ETH_ALEN); | ||
2298 | atl1_set_mac_addr(&adapter->hw); | 2407 | atl1_set_mac_addr(&adapter->hw); |
2299 | } | 2408 | } |
2300 | 2409 | ||
@@ -2306,112 +2415,11 @@ static void __devexit atl1_remove(struct pci_dev *pdev) | |||
2306 | pci_disable_device(pdev); | 2415 | pci_disable_device(pdev); |
2307 | } | 2416 | } |
2308 | 2417 | ||
2309 | #ifdef CONFIG_PM | ||
2310 | static int atl1_suspend(struct pci_dev *pdev, pm_message_t state) | ||
2311 | { | ||
2312 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
2313 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
2314 | struct atl1_hw *hw = &adapter->hw; | ||
2315 | u32 ctrl = 0; | ||
2316 | u32 wufc = adapter->wol; | ||
2317 | |||
2318 | netif_device_detach(netdev); | ||
2319 | if (netif_running(netdev)) | ||
2320 | atl1_down(adapter); | ||
2321 | |||
2322 | atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); | ||
2323 | atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); | ||
2324 | if (ctrl & BMSR_LSTATUS) | ||
2325 | wufc &= ~ATL1_WUFC_LNKC; | ||
2326 | |||
2327 | /* reduce speed to 10/100M */ | ||
2328 | if (wufc) { | ||
2329 | atl1_phy_enter_power_saving(hw); | ||
2330 | /* if resume, let driver to re- setup link */ | ||
2331 | hw->phy_configured = false; | ||
2332 | atl1_set_mac_addr(hw); | ||
2333 | atl1_set_multi(netdev); | ||
2334 | |||
2335 | ctrl = 0; | ||
2336 | /* turn on magic packet wol */ | ||
2337 | if (wufc & ATL1_WUFC_MAG) | ||
2338 | ctrl = WOL_MAGIC_EN | WOL_MAGIC_PME_EN; | ||
2339 | |||
2340 | /* turn on Link change WOL */ | ||
2341 | if (wufc & ATL1_WUFC_LNKC) | ||
2342 | ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN); | ||
2343 | iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); | ||
2344 | |||
2345 | /* turn on all-multi mode if wake on multicast is enabled */ | ||
2346 | ctrl = ioread32(hw->hw_addr + REG_MAC_CTRL); | ||
2347 | ctrl &= ~MAC_CTRL_DBG; | ||
2348 | ctrl &= ~MAC_CTRL_PROMIS_EN; | ||
2349 | if (wufc & ATL1_WUFC_MC) | ||
2350 | ctrl |= MAC_CTRL_MC_ALL_EN; | ||
2351 | else | ||
2352 | ctrl &= ~MAC_CTRL_MC_ALL_EN; | ||
2353 | |||
2354 | /* turn on broadcast mode if wake on-BC is enabled */ | ||
2355 | if (wufc & ATL1_WUFC_BC) | ||
2356 | ctrl |= MAC_CTRL_BC_EN; | ||
2357 | else | ||
2358 | ctrl &= ~MAC_CTRL_BC_EN; | ||
2359 | |||
2360 | /* enable RX */ | ||
2361 | ctrl |= MAC_CTRL_RX_EN; | ||
2362 | iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL); | ||
2363 | pci_enable_wake(pdev, PCI_D3hot, 1); | ||
2364 | pci_enable_wake(pdev, PCI_D3cold, 1); /* 4 == D3 cold */ | ||
2365 | } else { | ||
2366 | iowrite32(0, hw->hw_addr + REG_WOL_CTRL); | ||
2367 | pci_enable_wake(pdev, PCI_D3hot, 0); | ||
2368 | pci_enable_wake(pdev, PCI_D3cold, 0); /* 4 == D3 cold */ | ||
2369 | } | ||
2370 | |||
2371 | pci_save_state(pdev); | ||
2372 | pci_disable_device(pdev); | ||
2373 | |||
2374 | pci_set_power_state(pdev, PCI_D3hot); | ||
2375 | |||
2376 | return 0; | ||
2377 | } | ||
2378 | |||
2379 | static int atl1_resume(struct pci_dev *pdev) | ||
2380 | { | ||
2381 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
2382 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
2383 | u32 ret_val; | ||
2384 | |||
2385 | pci_set_power_state(pdev, 0); | ||
2386 | pci_restore_state(pdev); | ||
2387 | |||
2388 | ret_val = pci_enable_device(pdev); | ||
2389 | pci_enable_wake(pdev, PCI_D3hot, 0); | ||
2390 | pci_enable_wake(pdev, PCI_D3cold, 0); | ||
2391 | |||
2392 | iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL); | ||
2393 | atl1_reset(adapter); | ||
2394 | |||
2395 | if (netif_running(netdev)) | ||
2396 | atl1_up(adapter); | ||
2397 | netif_device_attach(netdev); | ||
2398 | |||
2399 | atl1_via_workaround(adapter); | ||
2400 | |||
2401 | return 0; | ||
2402 | } | ||
2403 | #else | ||
2404 | #define atl1_suspend NULL | ||
2405 | #define atl1_resume NULL | ||
2406 | #endif | ||
2407 | |||
2408 | static struct pci_driver atl1_driver = { | 2418 | static struct pci_driver atl1_driver = { |
2409 | .name = atl1_driver_name, | 2419 | .name = atl1_driver_name, |
2410 | .id_table = atl1_pci_tbl, | 2420 | .id_table = atl1_pci_tbl, |
2411 | .probe = atl1_probe, | 2421 | .probe = atl1_probe, |
2412 | .remove = __devexit_p(atl1_remove), | 2422 | .remove = __devexit_p(atl1_remove), |
2413 | /* Power Managment Hooks */ | ||
2414 | /* probably broken right now -- CHS */ | ||
2415 | .suspend = atl1_suspend, | 2423 | .suspend = atl1_suspend, |
2416 | .resume = atl1_resume | 2424 | .resume = atl1_resume |
2417 | }; | 2425 | }; |