aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/atlx/atl1.c
diff options
context:
space:
mode:
authorJay Cliburn <jacliburn@bellsouth.net>2008-02-02 20:50:04 -0500
committerJeff Garzik <jeff@garzik.org>2008-03-17 07:49:23 -0400
commit305282ba19f81e571bd6d2dcc10ebb02e59a06ef (patch)
tree836ea0e55d46d06e690b9b9cd67ce8a6feda9ce6 /drivers/net/atlx/atl1.c
parent2e5071bce5ce4037ce852a916e8106811e68677b (diff)
atl1: move common functions to atlx files
The future atl2 driver and the existing atl1 driver can share certain functions and definitions. Move these shareable functions and definitions out of atl1-specific files and into atlx.c and atlx.h. Some transitory hackery will be present until atl2 is merged. Reduce the number of source files by moving ethtool, hw, and param functions from separate files into atl1_main.c, then rename it to just atl1.c. Move all atl1-specific definitions from atl1_hw.h to atl1.h. Finally, clean up to make checkpatch.pl happy. Signed-off-by: Chris Snook <csnook@redhat.com> Signed-off-by: Jay Cliburn <jacliburn@bellsouth.net> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/atlx/atl1.c')
-rw-r--r--drivers/net/atlx/atl1.c3417
1 files changed, 3417 insertions, 0 deletions
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
new file mode 100644
index 000000000000..a84c97c75c75
--- /dev/null
+++ b/drivers/net/atlx/atl1.c
@@ -0,0 +1,3417 @@
1/*
2 * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
3 * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com>
4 * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
5 *
6 * Derived from Intel e1000 driver
7 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program; if not, write to the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 *
23 * The full GNU General Public License is included in this distribution in the
24 * file called COPYING.
25 *
26 * Contact Information:
27 * Xiong Huang <xiong_huang@attansic.com>
28 * Attansic Technology Corp. 3F 147, Xianzheng 9th Road, Zhubei,
29 * Xinzhu 302, TAIWAN, REPUBLIC OF CHINA
30 *
31 * Chris Snook <csnook@redhat.com>
32 * Jay Cliburn <jcliburn@gmail.com>
33 *
34 * This version is adapted from the Attansic reference driver for
35 * inclusion in the Linux kernel. It is currently under heavy development.
36 * A very incomplete list of things that need to be dealt with:
37 *
38 * TODO:
39 * Fix TSO; tx performance is horrible with TSO enabled.
40 * Wake on LAN.
41 * Add more ethtool functions.
42 * Fix abstruse irq enable/disable condition described here:
43 * http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2
44 *
45 * NEEDS TESTING:
46 * VLAN
47 * multicast
48 * promiscuous mode
49 * interrupt coalescing
50 * SMP torture testing
51 */
52
53#include <asm/atomic.h>
54#include <asm/byteorder.h>
55
56#include <linux/compiler.h>
57#include <linux/crc32.h>
58#include <linux/delay.h>
59#include <linux/dma-mapping.h>
60#include <linux/etherdevice.h>
61#include <linux/hardirq.h>
62#include <linux/if_ether.h>
63#include <linux/if_vlan.h>
64#include <linux/in.h>
65#include <linux/interrupt.h>
66#include <linux/ip.h>
67#include <linux/irqflags.h>
68#include <linux/irqreturn.h>
69#include <linux/jiffies.h>
70#include <linux/mii.h>
71#include <linux/module.h>
72#include <linux/moduleparam.h>
73#include <linux/net.h>
74#include <linux/netdevice.h>
75#include <linux/pci.h>
76#include <linux/pci_ids.h>
77#include <linux/pm.h>
78#include <linux/skbuff.h>
79#include <linux/slab.h>
80#include <linux/spinlock.h>
81#include <linux/string.h>
82#include <linux/tcp.h>
83#include <linux/timer.h>
84#include <linux/types.h>
85#include <linux/workqueue.h>
86
87#include <net/checksum.h>
88
89#include "atl1.h"
90
91/* Temporary hack for merging atl1 and atl2 */
92#include "atlx.c"
93
94/*
95 * atl1_pci_tbl - PCI Device ID Table
96 */
97static const struct pci_device_id atl1_pci_tbl[] = {
98 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1)},
99 /* required last entry */
100 {0,}
101};
102MODULE_DEVICE_TABLE(pci, atl1_pci_tbl);
103
104/*
105 * atl1_sw_init - Initialize general software structures (struct atl1_adapter)
106 * @adapter: board private structure to initialize
107 *
108 * atl1_sw_init initializes the Adapter private data structure.
109 * Fields are initialized based on PCI device information and
110 * OS network device settings (MTU size).
111 */
112static int __devinit atl1_sw_init(struct atl1_adapter *adapter)
113{
114 struct atl1_hw *hw = &adapter->hw;
115 struct net_device *netdev = adapter->netdev;
116
117 hw->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
118 hw->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
119
120 adapter->wol = 0;
121 adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7;
122 adapter->ict = 50000; /* 100ms */
123 adapter->link_speed = SPEED_0; /* hardware init */
124 adapter->link_duplex = FULL_DUPLEX;
125
126 hw->phy_configured = false;
127 hw->preamble_len = 7;
128 hw->ipgt = 0x60;
129 hw->min_ifg = 0x50;
130 hw->ipgr1 = 0x40;
131 hw->ipgr2 = 0x60;
132 hw->max_retry = 0xf;
133 hw->lcol = 0x37;
134 hw->jam_ipg = 7;
135 hw->rfd_burst = 8;
136 hw->rrd_burst = 8;
137 hw->rfd_fetch_gap = 1;
138 hw->rx_jumbo_th = adapter->rx_buffer_len / 8;
139 hw->rx_jumbo_lkah = 1;
140 hw->rrd_ret_timer = 16;
141 hw->tpd_burst = 4;
142 hw->tpd_fetch_th = 16;
143 hw->txf_burst = 0x100;
144 hw->tx_jumbo_task_th = (hw->max_frame_size + 7) >> 3;
145 hw->tpd_fetch_gap = 1;
146 hw->rcb_value = atl1_rcb_64;
147 hw->dma_ord = atl1_dma_ord_enh;
148 hw->dmar_block = atl1_dma_req_256;
149 hw->dmaw_block = atl1_dma_req_256;
150 hw->cmb_rrd = 4;
151 hw->cmb_tpd = 4;
152 hw->cmb_rx_timer = 1; /* about 2us */
153 hw->cmb_tx_timer = 1; /* about 2us */
154 hw->smb_timer = 100000; /* about 200ms */
155
156 spin_lock_init(&adapter->lock);
157 spin_lock_init(&adapter->mb_lock);
158
159 return 0;
160}
161
162static int mdio_read(struct net_device *netdev, int phy_id, int reg_num)
163{
164 struct atl1_adapter *adapter = netdev_priv(netdev);
165 u16 result;
166
167 atl1_read_phy_reg(&adapter->hw, reg_num & 0x1f, &result);
168
169 return result;
170}
171
172static void mdio_write(struct net_device *netdev, int phy_id, int reg_num,
173 int val)
174{
175 struct atl1_adapter *adapter = netdev_priv(netdev);
176
177 atl1_write_phy_reg(&adapter->hw, reg_num, val);
178}
179
180/*
181 * atl1_mii_ioctl -
182 * @netdev:
183 * @ifreq:
184 * @cmd:
185 */
186static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
187{
188 struct atl1_adapter *adapter = netdev_priv(netdev);
189 unsigned long flags;
190 int retval;
191
192 if (!netif_running(netdev))
193 return -EINVAL;
194
195 spin_lock_irqsave(&adapter->lock, flags);
196 retval = generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
197 spin_unlock_irqrestore(&adapter->lock, flags);
198
199 return retval;
200}
201
202/*
203 * atl1_setup_mem_resources - allocate Tx / RX descriptor resources
204 * @adapter: board private structure
205 *
206 * Return 0 on success, negative on failure
207 */
208s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
209{
210 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
211 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
212 struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
213 struct atl1_ring_header *ring_header = &adapter->ring_header;
214 struct pci_dev *pdev = adapter->pdev;
215 int size;
216 u8 offset = 0;
217
218 size = sizeof(struct atl1_buffer) * (tpd_ring->count + rfd_ring->count);
219 tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL);
220 if (unlikely(!tpd_ring->buffer_info)) {
221 dev_err(&pdev->dev, "kzalloc failed , size = D%d\n", size);
222 goto err_nomem;
223 }
224 rfd_ring->buffer_info =
225 (struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count);
226
227 /*
228 * real ring DMA buffer
229 * each ring/block may need up to 8 bytes for alignment, hence the
230 * additional 40 bytes tacked onto the end.
231 */
232 ring_header->size = size =
233 sizeof(struct tx_packet_desc) * tpd_ring->count
234 + sizeof(struct rx_free_desc) * rfd_ring->count
235 + sizeof(struct rx_return_desc) * rrd_ring->count
236 + sizeof(struct coals_msg_block)
237 + sizeof(struct stats_msg_block)
238 + 40;
239
240 ring_header->desc = pci_alloc_consistent(pdev, ring_header->size,
241 &ring_header->dma);
242 if (unlikely(!ring_header->desc)) {
243 dev_err(&pdev->dev, "pci_alloc_consistent failed\n");
244 goto err_nomem;
245 }
246
247 memset(ring_header->desc, 0, ring_header->size);
248
249 /* init TPD ring */
250 tpd_ring->dma = ring_header->dma;
251 offset = (tpd_ring->dma & 0x7) ? (8 - (ring_header->dma & 0x7)) : 0;
252 tpd_ring->dma += offset;
253 tpd_ring->desc = (u8 *) ring_header->desc + offset;
254 tpd_ring->size = sizeof(struct tx_packet_desc) * tpd_ring->count;
255
256 /* init RFD ring */
257 rfd_ring->dma = tpd_ring->dma + tpd_ring->size;
258 offset = (rfd_ring->dma & 0x7) ? (8 - (rfd_ring->dma & 0x7)) : 0;
259 rfd_ring->dma += offset;
260 rfd_ring->desc = (u8 *) tpd_ring->desc + (tpd_ring->size + offset);
261 rfd_ring->size = sizeof(struct rx_free_desc) * rfd_ring->count;
262
263
264 /* init RRD ring */
265 rrd_ring->dma = rfd_ring->dma + rfd_ring->size;
266 offset = (rrd_ring->dma & 0x7) ? (8 - (rrd_ring->dma & 0x7)) : 0;
267 rrd_ring->dma += offset;
268 rrd_ring->desc = (u8 *) rfd_ring->desc + (rfd_ring->size + offset);
269 rrd_ring->size = sizeof(struct rx_return_desc) * rrd_ring->count;
270
271
272 /* init CMB */
273 adapter->cmb.dma = rrd_ring->dma + rrd_ring->size;
274 offset = (adapter->cmb.dma & 0x7) ? (8 - (adapter->cmb.dma & 0x7)) : 0;
275 adapter->cmb.dma += offset;
276 adapter->cmb.cmb = (struct coals_msg_block *)
277 ((u8 *) rrd_ring->desc + (rrd_ring->size + offset));
278
279 /* init SMB */
280 adapter->smb.dma = adapter->cmb.dma + sizeof(struct coals_msg_block);
281 offset = (adapter->smb.dma & 0x7) ? (8 - (adapter->smb.dma & 0x7)) : 0;
282 adapter->smb.dma += offset;
283 adapter->smb.smb = (struct stats_msg_block *)
284 ((u8 *) adapter->cmb.cmb +
285 (sizeof(struct coals_msg_block) + offset));
286
287 return 0;
288
289err_nomem:
290 kfree(tpd_ring->buffer_info);
291 return -ENOMEM;
292}
293
294static void atl1_init_ring_ptrs(struct atl1_adapter *adapter)
295{
296 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
297 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
298 struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
299
300 atomic_set(&tpd_ring->next_to_use, 0);
301 atomic_set(&tpd_ring->next_to_clean, 0);
302
303 rfd_ring->next_to_clean = 0;
304 atomic_set(&rfd_ring->next_to_use, 0);
305
306 rrd_ring->next_to_use = 0;
307 atomic_set(&rrd_ring->next_to_clean, 0);
308}
309
310/*
311 * atl1_clean_rx_ring - Free RFD Buffers
312 * @adapter: board private structure
313 */
314static void atl1_clean_rx_ring(struct atl1_adapter *adapter)
315{
316 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
317 struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
318 struct atl1_buffer *buffer_info;
319 struct pci_dev *pdev = adapter->pdev;
320 unsigned long size;
321 unsigned int i;
322
323 /* Free all the Rx ring sk_buffs */
324 for (i = 0; i < rfd_ring->count; i++) {
325 buffer_info = &rfd_ring->buffer_info[i];
326 if (buffer_info->dma) {
327 pci_unmap_page(pdev, buffer_info->dma,
328 buffer_info->length, PCI_DMA_FROMDEVICE);
329 buffer_info->dma = 0;
330 }
331 if (buffer_info->skb) {
332 dev_kfree_skb(buffer_info->skb);
333 buffer_info->skb = NULL;
334 }
335 }
336
337 size = sizeof(struct atl1_buffer) * rfd_ring->count;
338 memset(rfd_ring->buffer_info, 0, size);
339
340 /* Zero out the descriptor ring */
341 memset(rfd_ring->desc, 0, rfd_ring->size);
342
343 rfd_ring->next_to_clean = 0;
344 atomic_set(&rfd_ring->next_to_use, 0);
345
346 rrd_ring->next_to_use = 0;
347 atomic_set(&rrd_ring->next_to_clean, 0);
348}
349
350/*
351 * atl1_clean_tx_ring - Free Tx Buffers
352 * @adapter: board private structure
353 */
354static void atl1_clean_tx_ring(struct atl1_adapter *adapter)
355{
356 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
357 struct atl1_buffer *buffer_info;
358 struct pci_dev *pdev = adapter->pdev;
359 unsigned long size;
360 unsigned int i;
361
362 /* Free all the Tx ring sk_buffs */
363 for (i = 0; i < tpd_ring->count; i++) {
364 buffer_info = &tpd_ring->buffer_info[i];
365 if (buffer_info->dma) {
366 pci_unmap_page(pdev, buffer_info->dma,
367 buffer_info->length, PCI_DMA_TODEVICE);
368 buffer_info->dma = 0;
369 }
370 }
371
372 for (i = 0; i < tpd_ring->count; i++) {
373 buffer_info = &tpd_ring->buffer_info[i];
374 if (buffer_info->skb) {
375 dev_kfree_skb_any(buffer_info->skb);
376 buffer_info->skb = NULL;
377 }
378 }
379
380 size = sizeof(struct atl1_buffer) * tpd_ring->count;
381 memset(tpd_ring->buffer_info, 0, size);
382
383 /* Zero out the descriptor ring */
384 memset(tpd_ring->desc, 0, tpd_ring->size);
385
386 atomic_set(&tpd_ring->next_to_use, 0);
387 atomic_set(&tpd_ring->next_to_clean, 0);
388}
389
390/*
391 * atl1_free_ring_resources - Free Tx / RX descriptor Resources
392 * @adapter: board private structure
393 *
394 * Free all transmit software resources
395 */
396void atl1_free_ring_resources(struct atl1_adapter *adapter)
397{
398 struct pci_dev *pdev = adapter->pdev;
399 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
400 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
401 struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
402 struct atl1_ring_header *ring_header = &adapter->ring_header;
403
404 atl1_clean_tx_ring(adapter);
405 atl1_clean_rx_ring(adapter);
406
407 kfree(tpd_ring->buffer_info);
408 pci_free_consistent(pdev, ring_header->size, ring_header->desc,
409 ring_header->dma);
410
411 tpd_ring->buffer_info = NULL;
412 tpd_ring->desc = NULL;
413 tpd_ring->dma = 0;
414
415 rfd_ring->buffer_info = NULL;
416 rfd_ring->desc = NULL;
417 rfd_ring->dma = 0;
418
419 rrd_ring->desc = NULL;
420 rrd_ring->dma = 0;
421}
422
423static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter)
424{
425 u32 value;
426 struct atl1_hw *hw = &adapter->hw;
427 struct net_device *netdev = adapter->netdev;
428 /* Config MAC CTRL Register */
429 value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN;
430 /* duplex */
431 if (FULL_DUPLEX == adapter->link_duplex)
432 value |= MAC_CTRL_DUPLX;
433 /* speed */
434 value |= ((u32) ((SPEED_1000 == adapter->link_speed) ?
435 MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) <<
436 MAC_CTRL_SPEED_SHIFT);
437 /* flow control */
438 value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW);
439 /* PAD & CRC */
440 value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
441 /* preamble length */
442 value |= (((u32) adapter->hw.preamble_len
443 & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT);
444 /* vlan */
445 if (adapter->vlgrp)
446 value |= MAC_CTRL_RMV_VLAN;
447 /* rx checksum
448 if (adapter->rx_csum)
449 value |= MAC_CTRL_RX_CHKSUM_EN;
450 */
451 /* filter mode */
452 value |= MAC_CTRL_BC_EN;
453 if (netdev->flags & IFF_PROMISC)
454 value |= MAC_CTRL_PROMIS_EN;
455 else if (netdev->flags & IFF_ALLMULTI)
456 value |= MAC_CTRL_MC_ALL_EN;
457 /* value |= MAC_CTRL_LOOPBACK; */
458 iowrite32(value, hw->hw_addr + REG_MAC_CTRL);
459}
460
461static u32 atl1_check_link(struct atl1_adapter *adapter)
462{
463 struct atl1_hw *hw = &adapter->hw;
464 struct net_device *netdev = adapter->netdev;
465 u32 ret_val;
466 u16 speed, duplex, phy_data;
467 int reconfig = 0;
468
469 /* MII_BMSR must read twice */
470 atl1_read_phy_reg(hw, MII_BMSR, &phy_data);
471 atl1_read_phy_reg(hw, MII_BMSR, &phy_data);
472 if (!(phy_data & BMSR_LSTATUS)) {
473 /* link down */
474 if (netif_carrier_ok(netdev)) {
475 /* old link state: Up */
476 dev_info(&adapter->pdev->dev, "link is down\n");
477 adapter->link_speed = SPEED_0;
478 netif_carrier_off(netdev);
479 netif_stop_queue(netdev);
480 }
481 return 0;
482 }
483
484 /* Link Up */
485 ret_val = atl1_get_speed_and_duplex(hw, &speed, &duplex);
486 if (ret_val)
487 return ret_val;
488
489 switch (hw->media_type) {
490 case MEDIA_TYPE_1000M_FULL:
491 if (speed != SPEED_1000 || duplex != FULL_DUPLEX)
492 reconfig = 1;
493 break;
494 case MEDIA_TYPE_100M_FULL:
495 if (speed != SPEED_100 || duplex != FULL_DUPLEX)
496 reconfig = 1;
497 break;
498 case MEDIA_TYPE_100M_HALF:
499 if (speed != SPEED_100 || duplex != HALF_DUPLEX)
500 reconfig = 1;
501 break;
502 case MEDIA_TYPE_10M_FULL:
503 if (speed != SPEED_10 || duplex != FULL_DUPLEX)
504 reconfig = 1;
505 break;
506 case MEDIA_TYPE_10M_HALF:
507 if (speed != SPEED_10 || duplex != HALF_DUPLEX)
508 reconfig = 1;
509 break;
510 }
511
512 /* link result is our setting */
513 if (!reconfig) {
514 if (adapter->link_speed != speed
515 || adapter->link_duplex != duplex) {
516 adapter->link_speed = speed;
517 adapter->link_duplex = duplex;
518 atl1_setup_mac_ctrl(adapter);
519 dev_info(&adapter->pdev->dev,
520 "%s link is up %d Mbps %s\n",
521 netdev->name, adapter->link_speed,
522 adapter->link_duplex == FULL_DUPLEX ?
523 "full duplex" : "half duplex");
524 }
525 if (!netif_carrier_ok(netdev)) {
526 /* Link down -> Up */
527 netif_carrier_on(netdev);
528 netif_wake_queue(netdev);
529 }
530 return 0;
531 }
532
533 /* change original link status */
534 if (netif_carrier_ok(netdev)) {
535 adapter->link_speed = SPEED_0;
536 netif_carrier_off(netdev);
537 netif_stop_queue(netdev);
538 }
539
540 if (hw->media_type != MEDIA_TYPE_AUTO_SENSOR &&
541 hw->media_type != MEDIA_TYPE_1000M_FULL) {
542 switch (hw->media_type) {
543 case MEDIA_TYPE_100M_FULL:
544 phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
545 MII_CR_RESET;
546 break;
547 case MEDIA_TYPE_100M_HALF:
548 phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
549 break;
550 case MEDIA_TYPE_10M_FULL:
551 phy_data =
552 MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
553 break;
554 default:
555 /* MEDIA_TYPE_10M_HALF: */
556 phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
557 break;
558 }
559 atl1_write_phy_reg(hw, MII_BMCR, phy_data);
560 return 0;
561 }
562
563 /* auto-neg, insert timer to re-config phy */
564 if (!adapter->phy_timer_pending) {
565 adapter->phy_timer_pending = true;
566 mod_timer(&adapter->phy_config_timer, jiffies + 3 * HZ);
567 }
568
569 return 0;
570}
571
572/*
573 * atl1_change_mtu - Change the Maximum Transfer Unit
574 * @netdev: network interface device structure
575 * @new_mtu: new value for maximum frame size
576 *
577 * Returns 0 on success, negative on failure
578 */
579static int atl1_change_mtu(struct net_device *netdev, int new_mtu)
580{
581 struct atl1_adapter *adapter = netdev_priv(netdev);
582 int old_mtu = netdev->mtu;
583 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
584
585 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
586 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
587 dev_warn(&adapter->pdev->dev, "invalid MTU setting\n");
588 return -EINVAL;
589 }
590
591 adapter->hw.max_frame_size = max_frame;
592 adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3;
593 adapter->rx_buffer_len = (max_frame + 7) & ~7;
594 adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8;
595
596 netdev->mtu = new_mtu;
597 if ((old_mtu != new_mtu) && netif_running(netdev)) {
598 atl1_down(adapter);
599 atl1_up(adapter);
600 }
601
602 return 0;
603}
604
605static void set_flow_ctrl_old(struct atl1_adapter *adapter)
606{
607 u32 hi, lo, value;
608
609 /* RFD Flow Control */
610 value = adapter->rfd_ring.count;
611 hi = value / 16;
612 if (hi < 2)
613 hi = 2;
614 lo = value * 7 / 8;
615
616 value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
617 ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT);
618 iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RXF_PAUSE_THRESH);
619
620 /* RRD Flow Control */
621 value = adapter->rrd_ring.count;
622 lo = value / 16;
623 hi = value * 7 / 8;
624 if (lo < 2)
625 lo = 2;
626 value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) |
627 ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT);
628 iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RRD_PAUSE_THRESH);
629}
630
631static void set_flow_ctrl_new(struct atl1_hw *hw)
632{
633 u32 hi, lo, value;
634
635 /* RXF Flow Control */
636 value = ioread32(hw->hw_addr + REG_SRAM_RXF_LEN);
637 lo = value / 16;
638 if (lo < 192)
639 lo = 192;
640 hi = value * 7 / 8;
641 if (hi < lo)
642 hi = lo + 16;
643 value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
644 ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT);
645 iowrite32(value, hw->hw_addr + REG_RXQ_RXF_PAUSE_THRESH);
646
647 /* RRD Flow Control */
648 value = ioread32(hw->hw_addr + REG_SRAM_RRD_LEN);
649 lo = value / 8;
650 hi = value * 7 / 8;
651 if (lo < 2)
652 lo = 2;
653 if (hi < lo)
654 hi = lo + 3;
655 value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) |
656 ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT);
657 iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH);
658}
659
660/*
661 * atl1_configure - Configure Transmit&Receive Unit after Reset
662 * @adapter: board private structure
663 *
664 * Configure the Tx /Rx unit of the MAC after a reset.
665 */
666static u32 atl1_configure(struct atl1_adapter *adapter)
667{
668 struct atl1_hw *hw = &adapter->hw;
669 u32 value;
670
671 /* clear interrupt status */
672 iowrite32(0xffffffff, adapter->hw.hw_addr + REG_ISR);
673
674 /* set MAC Address */
675 value = (((u32) hw->mac_addr[2]) << 24) |
676 (((u32) hw->mac_addr[3]) << 16) |
677 (((u32) hw->mac_addr[4]) << 8) |
678 (((u32) hw->mac_addr[5]));
679 iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR);
680 value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1]));
681 iowrite32(value, hw->hw_addr + (REG_MAC_STA_ADDR + 4));
682
683 /* tx / rx ring */
684
685 /* HI base address */
686 iowrite32((u32) ((adapter->tpd_ring.dma & 0xffffffff00000000ULL) >> 32),
687 hw->hw_addr + REG_DESC_BASE_ADDR_HI);
688 /* LO base address */
689 iowrite32((u32) (adapter->rfd_ring.dma & 0x00000000ffffffffULL),
690 hw->hw_addr + REG_DESC_RFD_ADDR_LO);
691 iowrite32((u32) (adapter->rrd_ring.dma & 0x00000000ffffffffULL),
692 hw->hw_addr + REG_DESC_RRD_ADDR_LO);
693 iowrite32((u32) (adapter->tpd_ring.dma & 0x00000000ffffffffULL),
694 hw->hw_addr + REG_DESC_TPD_ADDR_LO);
695 iowrite32((u32) (adapter->cmb.dma & 0x00000000ffffffffULL),
696 hw->hw_addr + REG_DESC_CMB_ADDR_LO);
697 iowrite32((u32) (adapter->smb.dma & 0x00000000ffffffffULL),
698 hw->hw_addr + REG_DESC_SMB_ADDR_LO);
699
700 /* element count */
701 value = adapter->rrd_ring.count;
702 value <<= 16;
703 value += adapter->rfd_ring.count;
704 iowrite32(value, hw->hw_addr + REG_DESC_RFD_RRD_RING_SIZE);
705 iowrite32(adapter->tpd_ring.count, hw->hw_addr +
706 REG_DESC_TPD_RING_SIZE);
707
708 /* Load Ptr */
709 iowrite32(1, hw->hw_addr + REG_LOAD_PTR);
710
711 /* config Mailbox */
712 value = ((atomic_read(&adapter->tpd_ring.next_to_use)
713 & MB_TPD_PROD_INDX_MASK) << MB_TPD_PROD_INDX_SHIFT) |
714 ((atomic_read(&adapter->rrd_ring.next_to_clean)
715 & MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) |
716 ((atomic_read(&adapter->rfd_ring.next_to_use)
717 & MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT);
718 iowrite32(value, hw->hw_addr + REG_MAILBOX);
719
720 /* config IPG/IFG */
721 value = (((u32) hw->ipgt & MAC_IPG_IFG_IPGT_MASK)
722 << MAC_IPG_IFG_IPGT_SHIFT) |
723 (((u32) hw->min_ifg & MAC_IPG_IFG_MIFG_MASK)
724 << MAC_IPG_IFG_MIFG_SHIFT) |
725 (((u32) hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK)
726 << MAC_IPG_IFG_IPGR1_SHIFT) |
727 (((u32) hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK)
728 << MAC_IPG_IFG_IPGR2_SHIFT);
729 iowrite32(value, hw->hw_addr + REG_MAC_IPG_IFG);
730
731 /* config Half-Duplex Control */
732 value = ((u32) hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) |
733 (((u32) hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK)
734 << MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) |
735 MAC_HALF_DUPLX_CTRL_EXC_DEF_EN |
736 (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) |
737 (((u32) hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK)
738 << MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT);
739 iowrite32(value, hw->hw_addr + REG_MAC_HALF_DUPLX_CTRL);
740
741 /* set Interrupt Moderator Timer */
742 iowrite16(adapter->imt, hw->hw_addr + REG_IRQ_MODU_TIMER_INIT);
743 iowrite32(MASTER_CTRL_ITIMER_EN, hw->hw_addr + REG_MASTER_CTRL);
744
745 /* set Interrupt Clear Timer */
746 iowrite16(adapter->ict, hw->hw_addr + REG_CMBDISDMA_TIMER);
747
748 /* set max frame size hw will accept */
749 iowrite32(hw->max_frame_size, hw->hw_addr + REG_MTU);
750
751 /* jumbo size & rrd retirement timer */
752 value = (((u32) hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK)
753 << RXQ_JMBOSZ_TH_SHIFT) |
754 (((u32) hw->rx_jumbo_lkah & RXQ_JMBO_LKAH_MASK)
755 << RXQ_JMBO_LKAH_SHIFT) |
756 (((u32) hw->rrd_ret_timer & RXQ_RRD_TIMER_MASK)
757 << RXQ_RRD_TIMER_SHIFT);
758 iowrite32(value, hw->hw_addr + REG_RXQ_JMBOSZ_RRDTIM);
759
760 /* Flow Control */
761 switch (hw->dev_rev) {
762 case 0x8001:
763 case 0x9001:
764 case 0x9002:
765 case 0x9003:
766 set_flow_ctrl_old(adapter);
767 break;
768 default:
769 set_flow_ctrl_new(hw);
770 break;
771 }
772
773 /* config TXQ */
774 value = (((u32) hw->tpd_burst & TXQ_CTRL_TPD_BURST_NUM_MASK)
775 << TXQ_CTRL_TPD_BURST_NUM_SHIFT) |
776 (((u32) hw->txf_burst & TXQ_CTRL_TXF_BURST_NUM_MASK)
777 << TXQ_CTRL_TXF_BURST_NUM_SHIFT) |
778 (((u32) hw->tpd_fetch_th & TXQ_CTRL_TPD_FETCH_TH_MASK)
779 << TXQ_CTRL_TPD_FETCH_TH_SHIFT) | TXQ_CTRL_ENH_MODE |
780 TXQ_CTRL_EN;
781 iowrite32(value, hw->hw_addr + REG_TXQ_CTRL);
782
783 /* min tpd fetch gap & tx jumbo packet size threshold for taskoffload */
784 value = (((u32) hw->tx_jumbo_task_th & TX_JUMBO_TASK_TH_MASK)
785 << TX_JUMBO_TASK_TH_SHIFT) |
786 (((u32) hw->tpd_fetch_gap & TX_TPD_MIN_IPG_MASK)
787 << TX_TPD_MIN_IPG_SHIFT);
788 iowrite32(value, hw->hw_addr + REG_TX_JUMBO_TASK_TH_TPD_IPG);
789
790 /* config RXQ */
791 value = (((u32) hw->rfd_burst & RXQ_CTRL_RFD_BURST_NUM_MASK)
792 << RXQ_CTRL_RFD_BURST_NUM_SHIFT) |
793 (((u32) hw->rrd_burst & RXQ_CTRL_RRD_BURST_THRESH_MASK)
794 << RXQ_CTRL_RRD_BURST_THRESH_SHIFT) |
795 (((u32) hw->rfd_fetch_gap & RXQ_CTRL_RFD_PREF_MIN_IPG_MASK)
796 << RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT) | RXQ_CTRL_CUT_THRU_EN |
797 RXQ_CTRL_EN;
798 iowrite32(value, hw->hw_addr + REG_RXQ_CTRL);
799
800 /* config DMA Engine */
801 value = ((((u32) hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
802 << DMA_CTRL_DMAR_BURST_LEN_SHIFT) |
803 ((((u32) hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK)
804 << DMA_CTRL_DMAW_BURST_LEN_SHIFT) | DMA_CTRL_DMAR_EN |
805 DMA_CTRL_DMAW_EN;
806 value |= (u32) hw->dma_ord;
807 if (atl1_rcb_128 == hw->rcb_value)
808 value |= DMA_CTRL_RCB_VALUE;
809 iowrite32(value, hw->hw_addr + REG_DMA_CTRL);
810
811 /* config CMB / SMB */
812 value = (hw->cmb_tpd > adapter->tpd_ring.count) ?
813 hw->cmb_tpd : adapter->tpd_ring.count;
814 value <<= 16;
815 value |= hw->cmb_rrd;
816 iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TH);
817 value = hw->cmb_rx_timer | ((u32) hw->cmb_tx_timer << 16);
818 iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TIMER);
819 iowrite32(hw->smb_timer, hw->hw_addr + REG_SMB_TIMER);
820
821 /* --- enable CMB / SMB */
822 value = CSMB_CTRL_CMB_EN | CSMB_CTRL_SMB_EN;
823 iowrite32(value, hw->hw_addr + REG_CSMB_CTRL);
824
825 value = ioread32(adapter->hw.hw_addr + REG_ISR);
826 if (unlikely((value & ISR_PHY_LINKDOWN) != 0))
827 value = 1; /* config failed */
828 else
829 value = 0;
830
831 /* clear all interrupt status */
832 iowrite32(0x3fffffff, adapter->hw.hw_addr + REG_ISR);
833 iowrite32(0, adapter->hw.hw_addr + REG_ISR);
834 return value;
835}
836
837/*
838 * atl1_pcie_patch - Patch for PCIE module
839 */
840static void atl1_pcie_patch(struct atl1_adapter *adapter)
841{
842 u32 value;
843
844 /* much vendor magic here */
845 value = 0x6500;
846 iowrite32(value, adapter->hw.hw_addr + 0x12FC);
847 /* pcie flow control mode change */
848 value = ioread32(adapter->hw.hw_addr + 0x1008);
849 value |= 0x8000;
850 iowrite32(value, adapter->hw.hw_addr + 0x1008);
851}
852
853/*
854 * When ACPI resume on some VIA MotherBoard, the Interrupt Disable bit/0x400
855 * on PCI Command register is disable.
856 * The function enable this bit.
857 * Brackett, 2006/03/15
858 */
859static void atl1_via_workaround(struct atl1_adapter *adapter)
860{
861 unsigned long value;
862
863 value = ioread16(adapter->hw.hw_addr + PCI_COMMAND);
864 if (value & PCI_COMMAND_INTX_DISABLE)
865 value &= ~PCI_COMMAND_INTX_DISABLE;
866 iowrite32(value, adapter->hw.hw_addr + PCI_COMMAND);
867}
868
869static void atl1_inc_smb(struct atl1_adapter *adapter)
870{
871 struct stats_msg_block *smb = adapter->smb.smb;
872
873 /* Fill out the OS statistics structure */
874 adapter->soft_stats.rx_packets += smb->rx_ok;
875 adapter->soft_stats.tx_packets += smb->tx_ok;
876 adapter->soft_stats.rx_bytes += smb->rx_byte_cnt;
877 adapter->soft_stats.tx_bytes += smb->tx_byte_cnt;
878 adapter->soft_stats.multicast += smb->rx_mcast;
879 adapter->soft_stats.collisions += (smb->tx_1_col + smb->tx_2_col * 2 +
880 smb->tx_late_col + smb->tx_abort_col * adapter->hw.max_retry);
881
882 /* Rx Errors */
883 adapter->soft_stats.rx_errors += (smb->rx_frag + smb->rx_fcs_err +
884 smb->rx_len_err + smb->rx_sz_ov + smb->rx_rxf_ov +
885 smb->rx_rrd_ov + smb->rx_align_err);
886 adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov;
887 adapter->soft_stats.rx_length_errors += smb->rx_len_err;
888 adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err;
889 adapter->soft_stats.rx_frame_errors += smb->rx_align_err;
890 adapter->soft_stats.rx_missed_errors += (smb->rx_rrd_ov +
891 smb->rx_rxf_ov);
892
893 adapter->soft_stats.rx_pause += smb->rx_pause;
894 adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov;
895 adapter->soft_stats.rx_trunc += smb->rx_sz_ov;
896
897 /* Tx Errors */
898 adapter->soft_stats.tx_errors += (smb->tx_late_col +
899 smb->tx_abort_col + smb->tx_underrun + smb->tx_trunc);
900 adapter->soft_stats.tx_fifo_errors += smb->tx_underrun;
901 adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col;
902 adapter->soft_stats.tx_window_errors += smb->tx_late_col;
903
904 adapter->soft_stats.excecol += smb->tx_abort_col;
905 adapter->soft_stats.deffer += smb->tx_defer;
906 adapter->soft_stats.scc += smb->tx_1_col;
907 adapter->soft_stats.mcc += smb->tx_2_col;
908 adapter->soft_stats.latecol += smb->tx_late_col;
909 adapter->soft_stats.tx_underun += smb->tx_underrun;
910 adapter->soft_stats.tx_trunc += smb->tx_trunc;
911 adapter->soft_stats.tx_pause += smb->tx_pause;
912
913 adapter->net_stats.rx_packets = adapter->soft_stats.rx_packets;
914 adapter->net_stats.tx_packets = adapter->soft_stats.tx_packets;
915 adapter->net_stats.rx_bytes = adapter->soft_stats.rx_bytes;
916 adapter->net_stats.tx_bytes = adapter->soft_stats.tx_bytes;
917 adapter->net_stats.multicast = adapter->soft_stats.multicast;
918 adapter->net_stats.collisions = adapter->soft_stats.collisions;
919 adapter->net_stats.rx_errors = adapter->soft_stats.rx_errors;
920 adapter->net_stats.rx_over_errors =
921 adapter->soft_stats.rx_missed_errors;
922 adapter->net_stats.rx_length_errors =
923 adapter->soft_stats.rx_length_errors;
924 adapter->net_stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors;
925 adapter->net_stats.rx_frame_errors =
926 adapter->soft_stats.rx_frame_errors;
927 adapter->net_stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors;
928 adapter->net_stats.rx_missed_errors =
929 adapter->soft_stats.rx_missed_errors;
930 adapter->net_stats.tx_errors = adapter->soft_stats.tx_errors;
931 adapter->net_stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors;
932 adapter->net_stats.tx_aborted_errors =
933 adapter->soft_stats.tx_aborted_errors;
934 adapter->net_stats.tx_window_errors =
935 adapter->soft_stats.tx_window_errors;
936 adapter->net_stats.tx_carrier_errors =
937 adapter->soft_stats.tx_carrier_errors;
938}
939
940static void atl1_update_mailbox(struct atl1_adapter *adapter)
941{
942 unsigned long flags;
943 u32 tpd_next_to_use;
944 u32 rfd_next_to_use;
945 u32 rrd_next_to_clean;
946 u32 value;
947
948 spin_lock_irqsave(&adapter->mb_lock, flags);
949
950 tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use);
951 rfd_next_to_use = atomic_read(&adapter->rfd_ring.next_to_use);
952 rrd_next_to_clean = atomic_read(&adapter->rrd_ring.next_to_clean);
953
954 value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) <<
955 MB_RFD_PROD_INDX_SHIFT) |
956 ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) <<
957 MB_RRD_CONS_INDX_SHIFT) |
958 ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) <<
959 MB_TPD_PROD_INDX_SHIFT);
960 iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX);
961
962 spin_unlock_irqrestore(&adapter->mb_lock, flags);
963}
964
965static void atl1_clean_alloc_flag(struct atl1_adapter *adapter,
966 struct rx_return_desc *rrd, u16 offset)
967{
968 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
969
970 while (rfd_ring->next_to_clean != (rrd->buf_indx + offset)) {
971 rfd_ring->buffer_info[rfd_ring->next_to_clean].alloced = 0;
972 if (++rfd_ring->next_to_clean == rfd_ring->count) {
973 rfd_ring->next_to_clean = 0;
974 }
975 }
976}
977
978static void atl1_update_rfd_index(struct atl1_adapter *adapter,
979 struct rx_return_desc *rrd)
980{
981 u16 num_buf;
982
983 num_buf = (rrd->xsz.xsum_sz.pkt_size + adapter->rx_buffer_len - 1) /
984 adapter->rx_buffer_len;
985 if (rrd->num_buf == num_buf)
986 /* clean alloc flag for bad rrd */
987 atl1_clean_alloc_flag(adapter, rrd, num_buf);
988}
989
990static void atl1_rx_checksum(struct atl1_adapter *adapter,
991 struct rx_return_desc *rrd, struct sk_buff *skb)
992{
993 struct pci_dev *pdev = adapter->pdev;
994
995 skb->ip_summed = CHECKSUM_NONE;
996
997 if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) {
998 if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC |
999 ERR_FLAG_CODE | ERR_FLAG_OV)) {
1000 adapter->hw_csum_err++;
1001 dev_printk(KERN_DEBUG, &pdev->dev,
1002 "rx checksum error\n");
1003 return;
1004 }
1005 }
1006
1007 /* not IPv4 */
1008 if (!(rrd->pkt_flg & PACKET_FLAG_IPV4))
1009 /* checksum is invalid, but it's not an IPv4 pkt, so ok */
1010 return;
1011
1012 /* IPv4 packet */
1013 if (likely(!(rrd->err_flg &
1014 (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM)))) {
1015 skb->ip_summed = CHECKSUM_UNNECESSARY;
1016 adapter->hw_csum_good++;
1017 return;
1018 }
1019
1020 /* IPv4, but hardware thinks its checksum is wrong */
1021 dev_printk(KERN_DEBUG, &pdev->dev,
1022 "hw csum wrong, pkt_flag:%x, err_flag:%x\n",
1023 rrd->pkt_flg, rrd->err_flg);
1024 skb->ip_summed = CHECKSUM_COMPLETE;
1025 skb->csum = htons(rrd->xsz.xsum_sz.rx_chksum);
1026 adapter->hw_csum_err++;
1027 return;
1028}
1029
1030/*
1031 * atl1_alloc_rx_buffers - Replace used receive buffers
1032 * @adapter: address of board private structure
1033 */
1034static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
1035{
1036 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
1037 struct pci_dev *pdev = adapter->pdev;
1038 struct page *page;
1039 unsigned long offset;
1040 struct atl1_buffer *buffer_info, *next_info;
1041 struct sk_buff *skb;
1042 u16 num_alloc = 0;
1043 u16 rfd_next_to_use, next_next;
1044 struct rx_free_desc *rfd_desc;
1045
1046 next_next = rfd_next_to_use = atomic_read(&rfd_ring->next_to_use);
1047 if (++next_next == rfd_ring->count)
1048 next_next = 0;
1049 buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
1050 next_info = &rfd_ring->buffer_info[next_next];
1051
1052 while (!buffer_info->alloced && !next_info->alloced) {
1053 if (buffer_info->skb) {
1054 buffer_info->alloced = 1;
1055 goto next;
1056 }
1057
1058 rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use);
1059
1060 skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN);
1061 if (unlikely(!skb)) {
1062 /* Better luck next round */
1063 adapter->net_stats.rx_dropped++;
1064 break;
1065 }
1066
1067 /*
1068 * Make buffer alignment 2 beyond a 16 byte boundary
1069 * this will result in a 16 byte aligned IP header after
1070 * the 14 byte MAC header is removed
1071 */
1072 skb_reserve(skb, NET_IP_ALIGN);
1073
1074 buffer_info->alloced = 1;
1075 buffer_info->skb = skb;
1076 buffer_info->length = (u16) adapter->rx_buffer_len;
1077 page = virt_to_page(skb->data);
1078 offset = (unsigned long)skb->data & ~PAGE_MASK;
1079 buffer_info->dma = pci_map_page(pdev, page, offset,
1080 adapter->rx_buffer_len,
1081 PCI_DMA_FROMDEVICE);
1082 rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
1083 rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len);
1084 rfd_desc->coalese = 0;
1085
1086next:
1087 rfd_next_to_use = next_next;
1088 if (unlikely(++next_next == rfd_ring->count))
1089 next_next = 0;
1090
1091 buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
1092 next_info = &rfd_ring->buffer_info[next_next];
1093 num_alloc++;
1094 }
1095
1096 if (num_alloc) {
1097 /*
1098 * Force memory writes to complete before letting h/w
1099 * know there are new descriptors to fetch. (Only
1100 * applicable for weak-ordered memory model archs,
1101 * such as IA-64).
1102 */
1103 wmb();
1104 atomic_set(&rfd_ring->next_to_use, (int)rfd_next_to_use);
1105 }
1106 return num_alloc;
1107}
1108
1109static void atl1_intr_rx(struct atl1_adapter *adapter)
1110{
1111 int i, count;
1112 u16 length;
1113 u16 rrd_next_to_clean;
1114 u32 value;
1115 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
1116 struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
1117 struct atl1_buffer *buffer_info;
1118 struct rx_return_desc *rrd;
1119 struct sk_buff *skb;
1120
1121 count = 0;
1122
1123 rrd_next_to_clean = atomic_read(&rrd_ring->next_to_clean);
1124
1125 while (1) {
1126 rrd = ATL1_RRD_DESC(rrd_ring, rrd_next_to_clean);
1127 i = 1;
1128 if (likely(rrd->xsz.valid)) { /* packet valid */
1129chk_rrd:
1130 /* check rrd status */
1131 if (likely(rrd->num_buf == 1))
1132 goto rrd_ok;
1133
1134 /* rrd seems to be bad */
1135 if (unlikely(i-- > 0)) {
1136 /* rrd may not be DMAed completely */
1137 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1138 "incomplete RRD DMA transfer\n");
1139 udelay(1);
1140 goto chk_rrd;
1141 }
1142 /* bad rrd */
1143 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1144 "bad RRD\n");
1145 /* see if update RFD index */
1146 if (rrd->num_buf > 1)
1147 atl1_update_rfd_index(adapter, rrd);
1148
1149 /* update rrd */
1150 rrd->xsz.valid = 0;
1151 if (++rrd_next_to_clean == rrd_ring->count)
1152 rrd_next_to_clean = 0;
1153 count++;
1154 continue;
1155 } else { /* current rrd still not be updated */
1156
1157 break;
1158 }
1159rrd_ok:
1160 /* clean alloc flag for bad rrd */
1161 atl1_clean_alloc_flag(adapter, rrd, 0);
1162
1163 buffer_info = &rfd_ring->buffer_info[rrd->buf_indx];
1164 if (++rfd_ring->next_to_clean == rfd_ring->count)
1165 rfd_ring->next_to_clean = 0;
1166
1167 /* update rrd next to clean */
1168 if (++rrd_next_to_clean == rrd_ring->count)
1169 rrd_next_to_clean = 0;
1170 count++;
1171
1172 if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) {
1173 if (!(rrd->err_flg &
1174 (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM
1175 | ERR_FLAG_LEN))) {
1176 /* packet error, don't need upstream */
1177 buffer_info->alloced = 0;
1178 rrd->xsz.valid = 0;
1179 continue;
1180 }
1181 }
1182
1183 /* Good Receive */
1184 pci_unmap_page(adapter->pdev, buffer_info->dma,
1185 buffer_info->length, PCI_DMA_FROMDEVICE);
1186 skb = buffer_info->skb;
1187 length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size);
1188
1189 skb_put(skb, length - ETH_FCS_LEN);
1190
1191 /* Receive Checksum Offload */
1192 atl1_rx_checksum(adapter, rrd, skb);
1193 skb->protocol = eth_type_trans(skb, adapter->netdev);
1194
1195 if (adapter->vlgrp && (rrd->pkt_flg & PACKET_FLAG_VLAN_INS)) {
1196 u16 vlan_tag = (rrd->vlan_tag >> 4) |
1197 ((rrd->vlan_tag & 7) << 13) |
1198 ((rrd->vlan_tag & 8) << 9);
1199 vlan_hwaccel_rx(skb, adapter->vlgrp, vlan_tag);
1200 } else
1201 netif_rx(skb);
1202
1203 /* let protocol layer free skb */
1204 buffer_info->skb = NULL;
1205 buffer_info->alloced = 0;
1206 rrd->xsz.valid = 0;
1207
1208 adapter->netdev->last_rx = jiffies;
1209 }
1210
1211 atomic_set(&rrd_ring->next_to_clean, rrd_next_to_clean);
1212
1213 atl1_alloc_rx_buffers(adapter);
1214
1215 /* update mailbox ? */
1216 if (count) {
1217 u32 tpd_next_to_use;
1218 u32 rfd_next_to_use;
1219
1220 spin_lock(&adapter->mb_lock);
1221
1222 tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use);
1223 rfd_next_to_use =
1224 atomic_read(&adapter->rfd_ring.next_to_use);
1225 rrd_next_to_clean =
1226 atomic_read(&adapter->rrd_ring.next_to_clean);
1227 value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) <<
1228 MB_RFD_PROD_INDX_SHIFT) |
1229 ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) <<
1230 MB_RRD_CONS_INDX_SHIFT) |
1231 ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) <<
1232 MB_TPD_PROD_INDX_SHIFT);
1233 iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX);
1234 spin_unlock(&adapter->mb_lock);
1235 }
1236}
1237
1238static void atl1_intr_tx(struct atl1_adapter *adapter)
1239{
1240 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1241 struct atl1_buffer *buffer_info;
1242 u16 sw_tpd_next_to_clean;
1243 u16 cmb_tpd_next_to_clean;
1244
1245 sw_tpd_next_to_clean = atomic_read(&tpd_ring->next_to_clean);
1246 cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx);
1247
1248 while (cmb_tpd_next_to_clean != sw_tpd_next_to_clean) {
1249 struct tx_packet_desc *tpd;
1250
1251 tpd = ATL1_TPD_DESC(tpd_ring, sw_tpd_next_to_clean);
1252 buffer_info = &tpd_ring->buffer_info[sw_tpd_next_to_clean];
1253 if (buffer_info->dma) {
1254 pci_unmap_page(adapter->pdev, buffer_info->dma,
1255 buffer_info->length, PCI_DMA_TODEVICE);
1256 buffer_info->dma = 0;
1257 }
1258
1259 if (buffer_info->skb) {
1260 dev_kfree_skb_irq(buffer_info->skb);
1261 buffer_info->skb = NULL;
1262 }
1263 tpd->buffer_addr = 0;
1264 tpd->desc.data = 0;
1265
1266 if (++sw_tpd_next_to_clean == tpd_ring->count)
1267 sw_tpd_next_to_clean = 0;
1268 }
1269 atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean);
1270
1271 if (netif_queue_stopped(adapter->netdev)
1272 && netif_carrier_ok(adapter->netdev))
1273 netif_wake_queue(adapter->netdev);
1274}
1275
1276static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring)
1277{
1278 u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
1279 u16 next_to_use = atomic_read(&tpd_ring->next_to_use);
1280 return ((next_to_clean > next_to_use) ?
1281 next_to_clean - next_to_use - 1 :
1282 tpd_ring->count + next_to_clean - next_to_use - 1);
1283}
1284
1285static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
1286 struct tso_param *tso)
1287{
1288 /* We enter this function holding a spinlock. */
1289 u8 ipofst;
1290 int err;
1291
1292 if (skb_shinfo(skb)->gso_size) {
1293 if (skb_header_cloned(skb)) {
1294 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1295 if (unlikely(err))
1296 return err;
1297 }
1298
1299 if (skb->protocol == ntohs(ETH_P_IP)) {
1300 struct iphdr *iph = ip_hdr(skb);
1301
1302 iph->tot_len = 0;
1303 iph->check = 0;
1304 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1305 iph->daddr, 0, IPPROTO_TCP, 0);
1306 ipofst = skb_network_offset(skb);
1307 if (ipofst != ETH_HLEN) /* 802.3 frame */
1308 tso->tsopl |= 1 << TSO_PARAM_ETHTYPE_SHIFT;
1309
1310 tso->tsopl |= (iph->ihl &
1311 CSUM_PARAM_IPHL_MASK) << CSUM_PARAM_IPHL_SHIFT;
1312 tso->tsopl |= (tcp_hdrlen(skb) &
1313 TSO_PARAM_TCPHDRLEN_MASK) <<
1314 TSO_PARAM_TCPHDRLEN_SHIFT;
1315 tso->tsopl |= (skb_shinfo(skb)->gso_size &
1316 TSO_PARAM_MSS_MASK) << TSO_PARAM_MSS_SHIFT;
1317 tso->tsopl |= 1 << TSO_PARAM_IPCKSUM_SHIFT;
1318 tso->tsopl |= 1 << TSO_PARAM_TCPCKSUM_SHIFT;
1319 tso->tsopl |= 1 << TSO_PARAM_SEGMENT_SHIFT;
1320 return true;
1321 }
1322 }
1323 return false;
1324}
1325
1326static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb,
1327 struct csum_param *csum)
1328{
1329 u8 css, cso;
1330
1331 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1332 cso = skb_transport_offset(skb);
1333 css = cso + skb->csum_offset;
1334 if (unlikely(cso & 0x1)) {
1335 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1336 "payload offset not an even number\n");
1337 return -1;
1338 }
1339 csum->csumpl |= (cso & CSUM_PARAM_PLOADOFFSET_MASK) <<
1340 CSUM_PARAM_PLOADOFFSET_SHIFT;
1341 csum->csumpl |= (css & CSUM_PARAM_XSUMOFFSET_MASK) <<
1342 CSUM_PARAM_XSUMOFFSET_SHIFT;
1343 csum->csumpl |= 1 << CSUM_PARAM_CUSTOMCKSUM_SHIFT;
1344 return true;
1345 }
1346
1347 return true;
1348}
1349
1350static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
1351 bool tcp_seg)
1352{
1353 /* We enter this function holding a spinlock. */
1354 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1355 struct atl1_buffer *buffer_info;
1356 struct page *page;
1357 int first_buf_len = skb->len;
1358 unsigned long offset;
1359 unsigned int nr_frags;
1360 unsigned int f;
1361 u16 tpd_next_to_use;
1362 u16 proto_hdr_len;
1363 u16 len12;
1364
1365 first_buf_len -= skb->data_len;
1366 nr_frags = skb_shinfo(skb)->nr_frags;
1367 tpd_next_to_use = atomic_read(&tpd_ring->next_to_use);
1368 buffer_info = &tpd_ring->buffer_info[tpd_next_to_use];
1369 if (unlikely(buffer_info->skb))
1370 BUG();
1371 /* put skb in last TPD */
1372 buffer_info->skb = NULL;
1373
1374 if (tcp_seg) {
1375 /* TSO/GSO */
1376 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1377 buffer_info->length = proto_hdr_len;
1378 page = virt_to_page(skb->data);
1379 offset = (unsigned long)skb->data & ~PAGE_MASK;
1380 buffer_info->dma = pci_map_page(adapter->pdev, page,
1381 offset, proto_hdr_len,
1382 PCI_DMA_TODEVICE);
1383
1384 if (++tpd_next_to_use == tpd_ring->count)
1385 tpd_next_to_use = 0;
1386
1387 if (first_buf_len > proto_hdr_len) {
1388 int i, m;
1389
1390 len12 = first_buf_len - proto_hdr_len;
1391 m = (len12 + ATL1_MAX_TX_BUF_LEN - 1) /
1392 ATL1_MAX_TX_BUF_LEN;
1393 for (i = 0; i < m; i++) {
1394 buffer_info =
1395 &tpd_ring->buffer_info[tpd_next_to_use];
1396 buffer_info->skb = NULL;
1397 buffer_info->length =
1398 (ATL1_MAX_TX_BUF_LEN >=
1399 len12) ? ATL1_MAX_TX_BUF_LEN : len12;
1400 len12 -= buffer_info->length;
1401 page = virt_to_page(skb->data +
1402 (proto_hdr_len +
1403 i * ATL1_MAX_TX_BUF_LEN));
1404 offset = (unsigned long)(skb->data +
1405 (proto_hdr_len +
1406 i * ATL1_MAX_TX_BUF_LEN)) & ~PAGE_MASK;
1407 buffer_info->dma = pci_map_page(adapter->pdev,
1408 page, offset, buffer_info->length,
1409 PCI_DMA_TODEVICE);
1410 if (++tpd_next_to_use == tpd_ring->count)
1411 tpd_next_to_use = 0;
1412 }
1413 }
1414 } else {
1415 /* not TSO/GSO */
1416 buffer_info->length = first_buf_len;
1417 page = virt_to_page(skb->data);
1418 offset = (unsigned long)skb->data & ~PAGE_MASK;
1419 buffer_info->dma = pci_map_page(adapter->pdev, page,
1420 offset, first_buf_len, PCI_DMA_TODEVICE);
1421 if (++tpd_next_to_use == tpd_ring->count)
1422 tpd_next_to_use = 0;
1423 }
1424
1425 for (f = 0; f < nr_frags; f++) {
1426 struct skb_frag_struct *frag;
1427 u16 lenf, i, m;
1428
1429 frag = &skb_shinfo(skb)->frags[f];
1430 lenf = frag->size;
1431
1432 m = (lenf + ATL1_MAX_TX_BUF_LEN - 1) / ATL1_MAX_TX_BUF_LEN;
1433 for (i = 0; i < m; i++) {
1434 buffer_info = &tpd_ring->buffer_info[tpd_next_to_use];
1435 if (unlikely(buffer_info->skb))
1436 BUG();
1437 buffer_info->skb = NULL;
1438 buffer_info->length = (lenf > ATL1_MAX_TX_BUF_LEN) ?
1439 ATL1_MAX_TX_BUF_LEN : lenf;
1440 lenf -= buffer_info->length;
1441 buffer_info->dma = pci_map_page(adapter->pdev,
1442 frag->page,
1443 frag->page_offset + (i * ATL1_MAX_TX_BUF_LEN),
1444 buffer_info->length, PCI_DMA_TODEVICE);
1445
1446 if (++tpd_next_to_use == tpd_ring->count)
1447 tpd_next_to_use = 0;
1448 }
1449 }
1450
1451 /* last tpd's buffer-info */
1452 buffer_info->skb = skb;
1453}
1454
1455static void atl1_tx_queue(struct atl1_adapter *adapter, int count,
1456 union tpd_descr *descr)
1457{
1458 /* We enter this function holding a spinlock. */
1459 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1460 int j;
1461 u32 val;
1462 struct atl1_buffer *buffer_info;
1463 struct tx_packet_desc *tpd;
1464 u16 tpd_next_to_use = atomic_read(&tpd_ring->next_to_use);
1465
1466 for (j = 0; j < count; j++) {
1467 buffer_info = &tpd_ring->buffer_info[tpd_next_to_use];
1468 tpd = ATL1_TPD_DESC(&adapter->tpd_ring, tpd_next_to_use);
1469 tpd->desc.csum.csumpu = descr->csum.csumpu;
1470 tpd->desc.csum.csumpl = descr->csum.csumpl;
1471 tpd->desc.tso.tsopu = descr->tso.tsopu;
1472 tpd->desc.tso.tsopl = descr->tso.tsopl;
1473 tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
1474 tpd->desc.data = descr->data;
1475 tpd->desc.csum.csumpu |= (cpu_to_le16(buffer_info->length) &
1476 CSUM_PARAM_BUFLEN_MASK) << CSUM_PARAM_BUFLEN_SHIFT;
1477
1478 val = (descr->tso.tsopl >> TSO_PARAM_SEGMENT_SHIFT) &
1479 TSO_PARAM_SEGMENT_MASK;
1480 if (val && !j)
1481 tpd->desc.tso.tsopl |= 1 << TSO_PARAM_HDRFLAG_SHIFT;
1482
1483 if (j == (count - 1))
1484 tpd->desc.csum.csumpl |= 1 << CSUM_PARAM_EOP_SHIFT;
1485
1486 if (++tpd_next_to_use == tpd_ring->count)
1487 tpd_next_to_use = 0;
1488 }
1489 /*
1490 * Force memory writes to complete before letting h/w
1491 * know there are new descriptors to fetch. (Only
1492 * applicable for weak-ordered memory model archs,
1493 * such as IA-64).
1494 */
1495 wmb();
1496
1497 atomic_set(&tpd_ring->next_to_use, (int)tpd_next_to_use);
1498}
1499
1500static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1501{
1502 struct atl1_adapter *adapter = netdev_priv(netdev);
1503 int len = skb->len;
1504 int tso;
1505 int count = 1;
1506 int ret_val;
1507 u32 val;
1508 union tpd_descr param;
1509 u16 frag_size;
1510 u16 vlan_tag;
1511 unsigned long flags;
1512 unsigned int nr_frags = 0;
1513 unsigned int mss = 0;
1514 unsigned int f;
1515 unsigned int proto_hdr_len;
1516
1517 len -= skb->data_len;
1518
1519 if (unlikely(skb->len == 0)) {
1520 dev_kfree_skb_any(skb);
1521 return NETDEV_TX_OK;
1522 }
1523
1524 param.data = 0;
1525 param.tso.tsopu = 0;
1526 param.tso.tsopl = 0;
1527 param.csum.csumpu = 0;
1528 param.csum.csumpl = 0;
1529
1530 /* nr_frags will be nonzero if we're doing scatter/gather (SG) */
1531 nr_frags = skb_shinfo(skb)->nr_frags;
1532 for (f = 0; f < nr_frags; f++) {
1533 frag_size = skb_shinfo(skb)->frags[f].size;
1534 if (frag_size)
1535 count += (frag_size + ATL1_MAX_TX_BUF_LEN - 1) /
1536 ATL1_MAX_TX_BUF_LEN;
1537 }
1538
1539 /* mss will be nonzero if we're doing segment offload (TSO/GSO) */
1540 mss = skb_shinfo(skb)->gso_size;
1541 if (mss) {
1542 if (skb->protocol == htons(ETH_P_IP)) {
1543 proto_hdr_len = (skb_transport_offset(skb) +
1544 tcp_hdrlen(skb));
1545 if (unlikely(proto_hdr_len > len)) {
1546 dev_kfree_skb_any(skb);
1547 return NETDEV_TX_OK;
1548 }
1549 /* need additional TPD ? */
1550 if (proto_hdr_len != len)
1551 count += (len - proto_hdr_len +
1552 ATL1_MAX_TX_BUF_LEN - 1) /
1553 ATL1_MAX_TX_BUF_LEN;
1554 }
1555 }
1556
1557 if (!spin_trylock_irqsave(&adapter->lock, flags)) {
1558 /* Can't get lock - tell upper layer to requeue */
1559 dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx locked\n");
1560 return NETDEV_TX_LOCKED;
1561 }
1562
1563 if (atl1_tpd_avail(&adapter->tpd_ring) < count) {
1564 /* not enough descriptors */
1565 netif_stop_queue(netdev);
1566 spin_unlock_irqrestore(&adapter->lock, flags);
1567 dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx busy\n");
1568 return NETDEV_TX_BUSY;
1569 }
1570
1571 param.data = 0;
1572
1573 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
1574 vlan_tag = vlan_tx_tag_get(skb);
1575 vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) |
1576 ((vlan_tag >> 9) & 0x8);
1577 param.csum.csumpl |= 1 << CSUM_PARAM_INSVLAG_SHIFT;
1578 param.csum.csumpu |= (vlan_tag & CSUM_PARAM_VALANTAG_MASK) <<
1579 CSUM_PARAM_VALAN_SHIFT;
1580 }
1581
1582 tso = atl1_tso(adapter, skb, &param.tso);
1583 if (tso < 0) {
1584 spin_unlock_irqrestore(&adapter->lock, flags);
1585 dev_kfree_skb_any(skb);
1586 return NETDEV_TX_OK;
1587 }
1588
1589 if (!tso) {
1590 ret_val = atl1_tx_csum(adapter, skb, &param.csum);
1591 if (ret_val < 0) {
1592 spin_unlock_irqrestore(&adapter->lock, flags);
1593 dev_kfree_skb_any(skb);
1594 return NETDEV_TX_OK;
1595 }
1596 }
1597
1598 val = (param.csum.csumpl >> CSUM_PARAM_SEGMENT_SHIFT) &
1599 CSUM_PARAM_SEGMENT_MASK;
1600 atl1_tx_map(adapter, skb, 1 == val);
1601 atl1_tx_queue(adapter, count, &param);
1602 netdev->trans_start = jiffies;
1603 spin_unlock_irqrestore(&adapter->lock, flags);
1604 atl1_update_mailbox(adapter);
1605 return NETDEV_TX_OK;
1606}
1607
1608/*
1609 * atl1_intr - Interrupt Handler
1610 * @irq: interrupt number
1611 * @data: pointer to a network interface device structure
1612 * @pt_regs: CPU registers structure
1613 */
1614static irqreturn_t atl1_intr(int irq, void *data)
1615{
1616 struct atl1_adapter *adapter = netdev_priv(data);
1617 u32 status;
1618 u8 update_rx;
1619 int max_ints = 10;
1620
1621 status = adapter->cmb.cmb->int_stats;
1622 if (!status)
1623 return IRQ_NONE;
1624
1625 update_rx = 0;
1626
1627 do {
1628 /* clear CMB interrupt status at once */
1629 adapter->cmb.cmb->int_stats = 0;
1630
1631 if (status & ISR_GPHY) /* clear phy status */
1632 atlx_clear_phy_int(adapter);
1633
1634 /* clear ISR status, and Enable CMB DMA/Disable Interrupt */
1635 iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR);
1636
1637 /* check if SMB intr */
1638 if (status & ISR_SMB)
1639 atl1_inc_smb(adapter);
1640
1641 /* check if PCIE PHY Link down */
1642 if (status & ISR_PHY_LINKDOWN) {
1643 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1644 "pcie phy link down %x\n", status);
1645 if (netif_running(adapter->netdev)) { /* reset MAC */
1646 iowrite32(0, adapter->hw.hw_addr + REG_IMR);
1647 schedule_work(&adapter->pcie_dma_to_rst_task);
1648 return IRQ_HANDLED;
1649 }
1650 }
1651
1652 /* check if DMA read/write error ? */
1653 if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
1654 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1655 "pcie DMA r/w error (status = 0x%x)\n",
1656 status);
1657 iowrite32(0, adapter->hw.hw_addr + REG_IMR);
1658 schedule_work(&adapter->pcie_dma_to_rst_task);
1659 return IRQ_HANDLED;
1660 }
1661
1662 /* link event */
1663 if (status & ISR_GPHY) {
1664 adapter->soft_stats.tx_carrier_errors++;
1665 atl1_check_for_link(adapter);
1666 }
1667
1668 /* transmit event */
1669 if (status & ISR_CMB_TX)
1670 atl1_intr_tx(adapter);
1671
1672 /* rx exception */
1673 if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN |
1674 ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
1675 ISR_HOST_RRD_OV | ISR_CMB_RX))) {
1676 if (status & (ISR_RXF_OV | ISR_RFD_UNRUN |
1677 ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
1678 ISR_HOST_RRD_OV))
1679 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1680 "rx exception, ISR = 0x%x\n", status);
1681 atl1_intr_rx(adapter);
1682 }
1683
1684 if (--max_ints < 0)
1685 break;
1686
1687 } while ((status = adapter->cmb.cmb->int_stats));
1688
1689 /* re-enable Interrupt */
1690 iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR);
1691 return IRQ_HANDLED;
1692}
1693
1694/*
1695 * atl1_watchdog - Timer Call-back
1696 * @data: pointer to netdev cast into an unsigned long
1697 */
1698static void atl1_watchdog(unsigned long data)
1699{
1700 struct atl1_adapter *adapter = (struct atl1_adapter *)data;
1701
1702 /* Reset the timer */
1703 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1704}
1705
1706/*
1707 * atl1_phy_config - Timer Call-back
1708 * @data: pointer to netdev cast into an unsigned long
1709 */
1710static void atl1_phy_config(unsigned long data)
1711{
1712 struct atl1_adapter *adapter = (struct atl1_adapter *)data;
1713 struct atl1_hw *hw = &adapter->hw;
1714 unsigned long flags;
1715
1716 spin_lock_irqsave(&adapter->lock, flags);
1717 adapter->phy_timer_pending = false;
1718 atl1_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg);
1719 atl1_write_phy_reg(hw, MII_ATLX_CR, hw->mii_1000t_ctrl_reg);
1720 atl1_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN);
1721 spin_unlock_irqrestore(&adapter->lock, flags);
1722}
1723
1724/*
1725 * Orphaned vendor comment left intact here:
1726 * <vendor comment>
1727 * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT
1728 * will assert. We do soft reset <0x1400=1> according
1729 * with the SPEC. BUT, it seemes that PCIE or DMA
1730 * state-machine will not be reset. DMAR_TO_INT will
1731 * assert again and again.
1732 * </vendor comment>
1733 */
1734static void atl1_tx_timeout_task(struct work_struct *work)
1735{
1736 struct atl1_adapter *adapter =
1737 container_of(work, struct atl1_adapter, tx_timeout_task);
1738 struct net_device *netdev = adapter->netdev;
1739
1740 netif_device_detach(netdev);
1741 atl1_down(adapter);
1742 atl1_up(adapter);
1743 netif_device_attach(netdev);
1744}
1745
1746int atl1_reset(struct atl1_adapter *adapter)
1747{
1748 int ret;
1749 ret = atl1_reset_hw(&adapter->hw);
1750 if (ret)
1751 return ret;
1752 return atl1_init_hw(&adapter->hw);
1753}
1754
1755s32 atl1_up(struct atl1_adapter *adapter)
1756{
1757 struct net_device *netdev = adapter->netdev;
1758 int err;
1759 int irq_flags = IRQF_SAMPLE_RANDOM;
1760
1761 /* hardware has been reset, we need to reload some things */
1762 atlx_set_multi(netdev);
1763 atl1_init_ring_ptrs(adapter);
1764 atlx_restore_vlan(adapter);
1765 err = atl1_alloc_rx_buffers(adapter);
1766 if (unlikely(!err))
1767 /* no RX BUFFER allocated */
1768 return -ENOMEM;
1769
1770 if (unlikely(atl1_configure(adapter))) {
1771 err = -EIO;
1772 goto err_up;
1773 }
1774
1775 err = pci_enable_msi(adapter->pdev);
1776 if (err) {
1777 dev_info(&adapter->pdev->dev,
1778 "Unable to enable MSI: %d\n", err);
1779 irq_flags |= IRQF_SHARED;
1780 }
1781
1782 err = request_irq(adapter->pdev->irq, &atl1_intr, irq_flags,
1783 netdev->name, netdev);
1784 if (unlikely(err))
1785 goto err_up;
1786
1787 mod_timer(&adapter->watchdog_timer, jiffies);
1788 atlx_irq_enable(adapter);
1789 atl1_check_link(adapter);
1790 return 0;
1791
1792err_up:
1793 pci_disable_msi(adapter->pdev);
1794 /* free rx_buffers */
1795 atl1_clean_rx_ring(adapter);
1796 return err;
1797}
1798
1799void atl1_down(struct atl1_adapter *adapter)
1800{
1801 struct net_device *netdev = adapter->netdev;
1802
1803 del_timer_sync(&adapter->watchdog_timer);
1804 del_timer_sync(&adapter->phy_config_timer);
1805 adapter->phy_timer_pending = false;
1806
1807 atlx_irq_disable(adapter);
1808 free_irq(adapter->pdev->irq, netdev);
1809 pci_disable_msi(adapter->pdev);
1810 atl1_reset_hw(&adapter->hw);
1811 adapter->cmb.cmb->int_stats = 0;
1812
1813 adapter->link_speed = SPEED_0;
1814 adapter->link_duplex = -1;
1815 netif_carrier_off(netdev);
1816 netif_stop_queue(netdev);
1817
1818 atl1_clean_tx_ring(adapter);
1819 atl1_clean_rx_ring(adapter);
1820}
1821
1822/*
1823 * atl1_open - Called when a network interface is made active
1824 * @netdev: network interface device structure
1825 *
1826 * Returns 0 on success, negative value on failure
1827 *
1828 * The open entry point is called when a network interface is made
1829 * active by the system (IFF_UP). At this point all resources needed
1830 * for transmit and receive operations are allocated, the interrupt
1831 * handler is registered with the OS, the watchdog timer is started,
1832 * and the stack is notified that the interface is ready.
1833 */
1834static int atl1_open(struct net_device *netdev)
1835{
1836 struct atl1_adapter *adapter = netdev_priv(netdev);
1837 int err;
1838
1839 /* allocate transmit descriptors */
1840 err = atl1_setup_ring_resources(adapter);
1841 if (err)
1842 return err;
1843
1844 err = atl1_up(adapter);
1845 if (err)
1846 goto err_up;
1847
1848 return 0;
1849
1850err_up:
1851 atl1_reset(adapter);
1852 return err;
1853}
1854
1855/*
1856 * atl1_close - Disables a network interface
1857 * @netdev: network interface device structure
1858 *
1859 * Returns 0, this is not allowed to fail
1860 *
1861 * The close entry point is called when an interface is de-activated
1862 * by the OS. The hardware is still under the drivers control, but
1863 * needs to be disabled. A global MAC reset is issued to stop the
1864 * hardware, and all transmit and receive resources are freed.
1865 */
1866static int atl1_close(struct net_device *netdev)
1867{
1868 struct atl1_adapter *adapter = netdev_priv(netdev);
1869 atl1_down(adapter);
1870 atl1_free_ring_resources(adapter);
1871 return 0;
1872}
1873
1874#ifdef CONFIG_PM
1875static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
1876{
1877 struct net_device *netdev = pci_get_drvdata(pdev);
1878 struct atl1_adapter *adapter = netdev_priv(netdev);
1879 struct atl1_hw *hw = &adapter->hw;
1880 u32 ctrl = 0;
1881 u32 wufc = adapter->wol;
1882
1883 netif_device_detach(netdev);
1884 if (netif_running(netdev))
1885 atl1_down(adapter);
1886
1887 atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
1888 atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
1889 if (ctrl & BMSR_LSTATUS)
1890 wufc &= ~ATLX_WUFC_LNKC;
1891
1892 /* reduce speed to 10/100M */
1893 if (wufc) {
1894 atl1_phy_enter_power_saving(hw);
1895 /* if resume, let driver to re- setup link */
1896 hw->phy_configured = false;
1897 atl1_set_mac_addr(hw);
1898 atlx_set_multi(netdev);
1899
1900 ctrl = 0;
1901 /* turn on magic packet wol */
1902 if (wufc & ATLX_WUFC_MAG)
1903 ctrl = WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
1904
1905 /* turn on Link change WOL */
1906 if (wufc & ATLX_WUFC_LNKC)
1907 ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN);
1908 iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL);
1909
1910 /* turn on all-multi mode if wake on multicast is enabled */
1911 ctrl = ioread32(hw->hw_addr + REG_MAC_CTRL);
1912 ctrl &= ~MAC_CTRL_DBG;
1913 ctrl &= ~MAC_CTRL_PROMIS_EN;
1914 if (wufc & ATLX_WUFC_MC)
1915 ctrl |= MAC_CTRL_MC_ALL_EN;
1916 else
1917 ctrl &= ~MAC_CTRL_MC_ALL_EN;
1918
1919 /* turn on broadcast mode if wake on-BC is enabled */
1920 if (wufc & ATLX_WUFC_BC)
1921 ctrl |= MAC_CTRL_BC_EN;
1922 else
1923 ctrl &= ~MAC_CTRL_BC_EN;
1924
1925 /* enable RX */
1926 ctrl |= MAC_CTRL_RX_EN;
1927 iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL);
1928 pci_enable_wake(pdev, PCI_D3hot, 1);
1929 pci_enable_wake(pdev, PCI_D3cold, 1);
1930 } else {
1931 iowrite32(0, hw->hw_addr + REG_WOL_CTRL);
1932 pci_enable_wake(pdev, PCI_D3hot, 0);
1933 pci_enable_wake(pdev, PCI_D3cold, 0);
1934 }
1935
1936 pci_save_state(pdev);
1937 pci_disable_device(pdev);
1938
1939 pci_set_power_state(pdev, PCI_D3hot);
1940
1941 return 0;
1942}
1943
1944static int atl1_resume(struct pci_dev *pdev)
1945{
1946 struct net_device *netdev = pci_get_drvdata(pdev);
1947 struct atl1_adapter *adapter = netdev_priv(netdev);
1948 u32 err;
1949
1950 pci_set_power_state(pdev, PCI_D0);
1951 pci_restore_state(pdev);
1952
1953 /* FIXME: check and handle */
1954 err = pci_enable_device(pdev);
1955 pci_enable_wake(pdev, PCI_D3hot, 0);
1956 pci_enable_wake(pdev, PCI_D3cold, 0);
1957
1958 iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL);
1959 atl1_reset(adapter);
1960
1961 if (netif_running(netdev))
1962 atl1_up(adapter);
1963 netif_device_attach(netdev);
1964
1965 atl1_via_workaround(adapter);
1966
1967 return 0;
1968}
1969#else
1970#define atl1_suspend NULL
1971#define atl1_resume NULL
1972#endif
1973
1974#ifdef CONFIG_NET_POLL_CONTROLLER
1975static void atl1_poll_controller(struct net_device *netdev)
1976{
1977 disable_irq(netdev->irq);
1978 atl1_intr(netdev->irq, netdev);
1979 enable_irq(netdev->irq);
1980}
1981#endif
1982
1983/*
1984 * atl1_probe - Device Initialization Routine
1985 * @pdev: PCI device information struct
1986 * @ent: entry in atl1_pci_tbl
1987 *
1988 * Returns 0 on success, negative on failure
1989 *
1990 * atl1_probe initializes an adapter identified by a pci_dev structure.
1991 * The OS initialization, configuring of the adapter private structure,
1992 * and a hardware reset occur.
1993 */
1994static int __devinit atl1_probe(struct pci_dev *pdev,
1995 const struct pci_device_id *ent)
1996{
1997 struct net_device *netdev;
1998 struct atl1_adapter *adapter;
1999 static int cards_found = 0;
2000 int err;
2001
2002 err = pci_enable_device(pdev);
2003 if (err)
2004 return err;
2005
2006 /*
2007 * The atl1 chip can DMA to 64-bit addresses, but it uses a single
2008 * shared register for the high 32 bits, so only a single, aligned,
2009 * 4 GB physical address range can be used at a time.
2010 *
2011 * Supporting 64-bit DMA on this hardware is more trouble than it's
2012 * worth. It is far easier to limit to 32-bit DMA than update
2013 * various kernel subsystems to support the mechanics required by a
2014 * fixed-high-32-bit system.
2015 */
2016 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2017 if (err) {
2018 dev_err(&pdev->dev, "no usable DMA configuration\n");
2019 goto err_dma;
2020 }
2021 /*
2022 * Mark all PCI regions associated with PCI device
2023 * pdev as being reserved by owner atl1_driver_name
2024 */
2025 err = pci_request_regions(pdev, ATLX_DRIVER_NAME);
2026 if (err)
2027 goto err_request_regions;
2028
2029 /*
2030 * Enables bus-mastering on the device and calls
2031 * pcibios_set_master to do the needed arch specific settings
2032 */
2033 pci_set_master(pdev);
2034
2035 netdev = alloc_etherdev(sizeof(struct atl1_adapter));
2036 if (!netdev) {
2037 err = -ENOMEM;
2038 goto err_alloc_etherdev;
2039 }
2040 SET_NETDEV_DEV(netdev, &pdev->dev);
2041
2042 pci_set_drvdata(pdev, netdev);
2043 adapter = netdev_priv(netdev);
2044 adapter->netdev = netdev;
2045 adapter->pdev = pdev;
2046 adapter->hw.back = adapter;
2047
2048 adapter->hw.hw_addr = pci_iomap(pdev, 0, 0);
2049 if (!adapter->hw.hw_addr) {
2050 err = -EIO;
2051 goto err_pci_iomap;
2052 }
2053 /* get device revision number */
2054 adapter->hw.dev_rev = ioread16(adapter->hw.hw_addr +
2055 (REG_MASTER_CTRL + 2));
2056 dev_info(&pdev->dev, "version %s\n", ATLX_DRIVER_VERSION);
2057
2058 /* set default ring resource counts */
2059 adapter->rfd_ring.count = adapter->rrd_ring.count = ATL1_DEFAULT_RFD;
2060 adapter->tpd_ring.count = ATL1_DEFAULT_TPD;
2061
2062 adapter->mii.dev = netdev;
2063 adapter->mii.mdio_read = mdio_read;
2064 adapter->mii.mdio_write = mdio_write;
2065 adapter->mii.phy_id_mask = 0x1f;
2066 adapter->mii.reg_num_mask = 0x1f;
2067
2068 netdev->open = &atl1_open;
2069 netdev->stop = &atl1_close;
2070 netdev->hard_start_xmit = &atl1_xmit_frame;
2071 netdev->get_stats = &atlx_get_stats;
2072 netdev->set_multicast_list = &atlx_set_multi;
2073 netdev->set_mac_address = &atl1_set_mac;
2074 netdev->change_mtu = &atl1_change_mtu;
2075 netdev->do_ioctl = &atlx_ioctl;
2076 netdev->tx_timeout = &atlx_tx_timeout;
2077 netdev->watchdog_timeo = 5 * HZ;
2078#ifdef CONFIG_NET_POLL_CONTROLLER
2079 netdev->poll_controller = atl1_poll_controller;
2080#endif
2081 netdev->vlan_rx_register = atlx_vlan_rx_register;
2082
2083 netdev->ethtool_ops = &atl1_ethtool_ops;
2084 adapter->bd_number = cards_found;
2085
2086 /* setup the private structure */
2087 err = atl1_sw_init(adapter);
2088 if (err)
2089 goto err_common;
2090
2091 netdev->features = NETIF_F_HW_CSUM;
2092 netdev->features |= NETIF_F_SG;
2093 netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
2094
2095 /*
2096 * FIXME - Until tso performance gets fixed, disable the feature.
2097 * Enable it with ethtool -K if desired.
2098 */
2099 /* netdev->features |= NETIF_F_TSO; */
2100
2101 netdev->features |= NETIF_F_LLTX;
2102
2103 /*
2104 * patch for some L1 of old version,
2105 * the final version of L1 may not need these
2106 * patches
2107 */
2108 /* atl1_pcie_patch(adapter); */
2109
2110 /* really reset GPHY core */
2111 iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE);
2112
2113 /*
2114 * reset the controller to
2115 * put the device in a known good starting state
2116 */
2117 if (atl1_reset_hw(&adapter->hw)) {
2118 err = -EIO;
2119 goto err_common;
2120 }
2121
2122 /* copy the MAC address out of the EEPROM */
2123 atl1_read_mac_addr(&adapter->hw);
2124 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
2125
2126 if (!is_valid_ether_addr(netdev->dev_addr)) {
2127 err = -EIO;
2128 goto err_common;
2129 }
2130
2131 atl1_check_options(adapter);
2132
2133 /* pre-init the MAC, and setup link */
2134 err = atl1_init_hw(&adapter->hw);
2135 if (err) {
2136 err = -EIO;
2137 goto err_common;
2138 }
2139
2140 atl1_pcie_patch(adapter);
2141 /* assume we have no link for now */
2142 netif_carrier_off(netdev);
2143 netif_stop_queue(netdev);
2144
2145 init_timer(&adapter->watchdog_timer);
2146 adapter->watchdog_timer.function = &atl1_watchdog;
2147 adapter->watchdog_timer.data = (unsigned long)adapter;
2148
2149 init_timer(&adapter->phy_config_timer);
2150 adapter->phy_config_timer.function = &atl1_phy_config;
2151 adapter->phy_config_timer.data = (unsigned long)adapter;
2152 adapter->phy_timer_pending = false;
2153
2154 INIT_WORK(&adapter->tx_timeout_task, atl1_tx_timeout_task);
2155
2156 INIT_WORK(&adapter->link_chg_task, atlx_link_chg_task);
2157
2158 INIT_WORK(&adapter->pcie_dma_to_rst_task, atl1_tx_timeout_task);
2159
2160 err = register_netdev(netdev);
2161 if (err)
2162 goto err_common;
2163
2164 cards_found++;
2165 atl1_via_workaround(adapter);
2166 return 0;
2167
2168err_common:
2169 pci_iounmap(pdev, adapter->hw.hw_addr);
2170err_pci_iomap:
2171 free_netdev(netdev);
2172err_alloc_etherdev:
2173 pci_release_regions(pdev);
2174err_dma:
2175err_request_regions:
2176 pci_disable_device(pdev);
2177 return err;
2178}
2179
2180/*
2181 * atl1_remove - Device Removal Routine
2182 * @pdev: PCI device information struct
2183 *
2184 * atl1_remove is called by the PCI subsystem to alert the driver
2185 * that it should release a PCI device. The could be caused by a
2186 * Hot-Plug event, or because the driver is going to be removed from
2187 * memory.
2188 */
2189static void __devexit atl1_remove(struct pci_dev *pdev)
2190{
2191 struct net_device *netdev = pci_get_drvdata(pdev);
2192 struct atl1_adapter *adapter;
2193 /* Device not available. Return. */
2194 if (!netdev)
2195 return;
2196
2197 adapter = netdev_priv(netdev);
2198
2199 /*
2200 * Some atl1 boards lack persistent storage for their MAC, and get it
2201 * from the BIOS during POST. If we've been messing with the MAC
2202 * address, we need to save the permanent one.
2203 */
2204 if (memcmp(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, ETH_ALEN)) {
2205 memcpy(adapter->hw.mac_addr, adapter->hw.perm_mac_addr,
2206 ETH_ALEN);
2207 atl1_set_mac_addr(&adapter->hw);
2208 }
2209
2210 iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE);
2211 unregister_netdev(netdev);
2212 pci_iounmap(pdev, adapter->hw.hw_addr);
2213 pci_release_regions(pdev);
2214 free_netdev(netdev);
2215 pci_disable_device(pdev);
2216}
2217
2218static struct pci_driver atl1_driver = {
2219 .name = ATLX_DRIVER_NAME,
2220 .id_table = atl1_pci_tbl,
2221 .probe = atl1_probe,
2222 .remove = __devexit_p(atl1_remove),
2223 .suspend = atl1_suspend,
2224 .resume = atl1_resume
2225};
2226
2227/*
2228 * atl1_exit_module - Driver Exit Cleanup Routine
2229 *
2230 * atl1_exit_module is called just before the driver is removed
2231 * from memory.
2232 */
2233static void __exit atl1_exit_module(void)
2234{
2235 pci_unregister_driver(&atl1_driver);
2236}
2237
2238/*
2239 * atl1_init_module - Driver Registration Routine
2240 *
2241 * atl1_init_module is the first routine called when the driver is
2242 * loaded. All it does is register with the PCI subsystem.
2243 */
2244static int __init atl1_init_module(void)
2245{
2246 return pci_register_driver(&atl1_driver);
2247}
2248
2249module_init(atl1_init_module);
2250module_exit(atl1_exit_module);
2251
2252struct atl1_stats {
2253 char stat_string[ETH_GSTRING_LEN];
2254 int sizeof_stat;
2255 int stat_offset;
2256};
2257
2258#define ATL1_STAT(m) \
2259 sizeof(((struct atl1_adapter *)0)->m), offsetof(struct atl1_adapter, m)
2260
2261static struct atl1_stats atl1_gstrings_stats[] = {
2262 {"rx_packets", ATL1_STAT(soft_stats.rx_packets)},
2263 {"tx_packets", ATL1_STAT(soft_stats.tx_packets)},
2264 {"rx_bytes", ATL1_STAT(soft_stats.rx_bytes)},
2265 {"tx_bytes", ATL1_STAT(soft_stats.tx_bytes)},
2266 {"rx_errors", ATL1_STAT(soft_stats.rx_errors)},
2267 {"tx_errors", ATL1_STAT(soft_stats.tx_errors)},
2268 {"rx_dropped", ATL1_STAT(net_stats.rx_dropped)},
2269 {"tx_dropped", ATL1_STAT(net_stats.tx_dropped)},
2270 {"multicast", ATL1_STAT(soft_stats.multicast)},
2271 {"collisions", ATL1_STAT(soft_stats.collisions)},
2272 {"rx_length_errors", ATL1_STAT(soft_stats.rx_length_errors)},
2273 {"rx_over_errors", ATL1_STAT(soft_stats.rx_missed_errors)},
2274 {"rx_crc_errors", ATL1_STAT(soft_stats.rx_crc_errors)},
2275 {"rx_frame_errors", ATL1_STAT(soft_stats.rx_frame_errors)},
2276 {"rx_fifo_errors", ATL1_STAT(soft_stats.rx_fifo_errors)},
2277 {"rx_missed_errors", ATL1_STAT(soft_stats.rx_missed_errors)},
2278 {"tx_aborted_errors", ATL1_STAT(soft_stats.tx_aborted_errors)},
2279 {"tx_carrier_errors", ATL1_STAT(soft_stats.tx_carrier_errors)},
2280 {"tx_fifo_errors", ATL1_STAT(soft_stats.tx_fifo_errors)},
2281 {"tx_window_errors", ATL1_STAT(soft_stats.tx_window_errors)},
2282 {"tx_abort_exce_coll", ATL1_STAT(soft_stats.excecol)},
2283 {"tx_abort_late_coll", ATL1_STAT(soft_stats.latecol)},
2284 {"tx_deferred_ok", ATL1_STAT(soft_stats.deffer)},
2285 {"tx_single_coll_ok", ATL1_STAT(soft_stats.scc)},
2286 {"tx_multi_coll_ok", ATL1_STAT(soft_stats.mcc)},
2287 {"tx_underun", ATL1_STAT(soft_stats.tx_underun)},
2288 {"tx_trunc", ATL1_STAT(soft_stats.tx_trunc)},
2289 {"tx_pause", ATL1_STAT(soft_stats.tx_pause)},
2290 {"rx_pause", ATL1_STAT(soft_stats.rx_pause)},
2291 {"rx_rrd_ov", ATL1_STAT(soft_stats.rx_rrd_ov)},
2292 {"rx_trunc", ATL1_STAT(soft_stats.rx_trunc)}
2293};
2294
2295static void atl1_get_ethtool_stats(struct net_device *netdev,
2296 struct ethtool_stats *stats, u64 *data)
2297{
2298 struct atl1_adapter *adapter = netdev_priv(netdev);
2299 int i;
2300 char *p;
2301
2302 for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) {
2303 p = (char *)adapter+atl1_gstrings_stats[i].stat_offset;
2304 data[i] = (atl1_gstrings_stats[i].sizeof_stat ==
2305 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
2306 }
2307
2308}
2309
2310static int atl1_get_sset_count(struct net_device *netdev, int sset)
2311{
2312 switch (sset) {
2313 case ETH_SS_STATS:
2314 return ARRAY_SIZE(atl1_gstrings_stats);
2315 default:
2316 return -EOPNOTSUPP;
2317 }
2318}
2319
2320static int atl1_get_settings(struct net_device *netdev,
2321 struct ethtool_cmd *ecmd)
2322{
2323 struct atl1_adapter *adapter = netdev_priv(netdev);
2324 struct atl1_hw *hw = &adapter->hw;
2325
2326 ecmd->supported = (SUPPORTED_10baseT_Half |
2327 SUPPORTED_10baseT_Full |
2328 SUPPORTED_100baseT_Half |
2329 SUPPORTED_100baseT_Full |
2330 SUPPORTED_1000baseT_Full |
2331 SUPPORTED_Autoneg | SUPPORTED_TP);
2332 ecmd->advertising = ADVERTISED_TP;
2333 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
2334 hw->media_type == MEDIA_TYPE_1000M_FULL) {
2335 ecmd->advertising |= ADVERTISED_Autoneg;
2336 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR) {
2337 ecmd->advertising |= ADVERTISED_Autoneg;
2338 ecmd->advertising |=
2339 (ADVERTISED_10baseT_Half |
2340 ADVERTISED_10baseT_Full |
2341 ADVERTISED_100baseT_Half |
2342 ADVERTISED_100baseT_Full |
2343 ADVERTISED_1000baseT_Full);
2344 } else
2345 ecmd->advertising |= (ADVERTISED_1000baseT_Full);
2346 }
2347 ecmd->port = PORT_TP;
2348 ecmd->phy_address = 0;
2349 ecmd->transceiver = XCVR_INTERNAL;
2350
2351 if (netif_carrier_ok(adapter->netdev)) {
2352 u16 link_speed, link_duplex;
2353 atl1_get_speed_and_duplex(hw, &link_speed, &link_duplex);
2354 ecmd->speed = link_speed;
2355 if (link_duplex == FULL_DUPLEX)
2356 ecmd->duplex = DUPLEX_FULL;
2357 else
2358 ecmd->duplex = DUPLEX_HALF;
2359 } else {
2360 ecmd->speed = -1;
2361 ecmd->duplex = -1;
2362 }
2363 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
2364 hw->media_type == MEDIA_TYPE_1000M_FULL)
2365 ecmd->autoneg = AUTONEG_ENABLE;
2366 else
2367 ecmd->autoneg = AUTONEG_DISABLE;
2368
2369 return 0;
2370}
2371
2372static int atl1_set_settings(struct net_device *netdev,
2373 struct ethtool_cmd *ecmd)
2374{
2375 struct atl1_adapter *adapter = netdev_priv(netdev);
2376 struct atl1_hw *hw = &adapter->hw;
2377 u16 phy_data;
2378 int ret_val = 0;
2379 u16 old_media_type = hw->media_type;
2380
2381 if (netif_running(adapter->netdev)) {
2382 dev_dbg(&adapter->pdev->dev, "ethtool shutting down adapter\n");
2383 atl1_down(adapter);
2384 }
2385
2386 if (ecmd->autoneg == AUTONEG_ENABLE)
2387 hw->media_type = MEDIA_TYPE_AUTO_SENSOR;
2388 else {
2389 if (ecmd->speed == SPEED_1000) {
2390 if (ecmd->duplex != DUPLEX_FULL) {
2391 dev_warn(&adapter->pdev->dev,
2392 "can't force to 1000M half duplex\n");
2393 ret_val = -EINVAL;
2394 goto exit_sset;
2395 }
2396 hw->media_type = MEDIA_TYPE_1000M_FULL;
2397 } else if (ecmd->speed == SPEED_100) {
2398 if (ecmd->duplex == DUPLEX_FULL)
2399 hw->media_type = MEDIA_TYPE_100M_FULL;
2400 else
2401 hw->media_type = MEDIA_TYPE_100M_HALF;
2402 } else {
2403 if (ecmd->duplex == DUPLEX_FULL)
2404 hw->media_type = MEDIA_TYPE_10M_FULL;
2405 else
2406 hw->media_type = MEDIA_TYPE_10M_HALF;
2407 }
2408 }
2409 switch (hw->media_type) {
2410 case MEDIA_TYPE_AUTO_SENSOR:
2411 ecmd->advertising =
2412 ADVERTISED_10baseT_Half |
2413 ADVERTISED_10baseT_Full |
2414 ADVERTISED_100baseT_Half |
2415 ADVERTISED_100baseT_Full |
2416 ADVERTISED_1000baseT_Full |
2417 ADVERTISED_Autoneg | ADVERTISED_TP;
2418 break;
2419 case MEDIA_TYPE_1000M_FULL:
2420 ecmd->advertising =
2421 ADVERTISED_1000baseT_Full |
2422 ADVERTISED_Autoneg | ADVERTISED_TP;
2423 break;
2424 default:
2425 ecmd->advertising = 0;
2426 break;
2427 }
2428 if (atl1_phy_setup_autoneg_adv(hw)) {
2429 ret_val = -EINVAL;
2430 dev_warn(&adapter->pdev->dev,
2431 "invalid ethtool speed/duplex setting\n");
2432 goto exit_sset;
2433 }
2434 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
2435 hw->media_type == MEDIA_TYPE_1000M_FULL)
2436 phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
2437 else {
2438 switch (hw->media_type) {
2439 case MEDIA_TYPE_100M_FULL:
2440 phy_data =
2441 MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
2442 MII_CR_RESET;
2443 break;
2444 case MEDIA_TYPE_100M_HALF:
2445 phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
2446 break;
2447 case MEDIA_TYPE_10M_FULL:
2448 phy_data =
2449 MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
2450 break;
2451 default:
2452 /* MEDIA_TYPE_10M_HALF: */
2453 phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
2454 break;
2455 }
2456 }
2457 atl1_write_phy_reg(hw, MII_BMCR, phy_data);
2458exit_sset:
2459 if (ret_val)
2460 hw->media_type = old_media_type;
2461
2462 if (netif_running(adapter->netdev)) {
2463 dev_dbg(&adapter->pdev->dev, "ethtool starting adapter\n");
2464 atl1_up(adapter);
2465 } else if (!ret_val) {
2466 dev_dbg(&adapter->pdev->dev, "ethtool resetting adapter\n");
2467 atl1_reset(adapter);
2468 }
2469 return ret_val;
2470}
2471
2472static void atl1_get_drvinfo(struct net_device *netdev,
2473 struct ethtool_drvinfo *drvinfo)
2474{
2475 struct atl1_adapter *adapter = netdev_priv(netdev);
2476
2477 strncpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver));
2478 strncpy(drvinfo->version, ATLX_DRIVER_VERSION,
2479 sizeof(drvinfo->version));
2480 strncpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
2481 strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
2482 sizeof(drvinfo->bus_info));
2483 drvinfo->eedump_len = ATL1_EEDUMP_LEN;
2484}
2485
2486static void atl1_get_wol(struct net_device *netdev,
2487 struct ethtool_wolinfo *wol)
2488{
2489 struct atl1_adapter *adapter = netdev_priv(netdev);
2490
2491 wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC;
2492 wol->wolopts = 0;
2493 if (adapter->wol & ATLX_WUFC_EX)
2494 wol->wolopts |= WAKE_UCAST;
2495 if (adapter->wol & ATLX_WUFC_MC)
2496 wol->wolopts |= WAKE_MCAST;
2497 if (adapter->wol & ATLX_WUFC_BC)
2498 wol->wolopts |= WAKE_BCAST;
2499 if (adapter->wol & ATLX_WUFC_MAG)
2500 wol->wolopts |= WAKE_MAGIC;
2501 return;
2502}
2503
2504static int atl1_set_wol(struct net_device *netdev,
2505 struct ethtool_wolinfo *wol)
2506{
2507 struct atl1_adapter *adapter = netdev_priv(netdev);
2508
2509 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
2510 return -EOPNOTSUPP;
2511 adapter->wol = 0;
2512 if (wol->wolopts & WAKE_UCAST)
2513 adapter->wol |= ATLX_WUFC_EX;
2514 if (wol->wolopts & WAKE_MCAST)
2515 adapter->wol |= ATLX_WUFC_MC;
2516 if (wol->wolopts & WAKE_BCAST)
2517 adapter->wol |= ATLX_WUFC_BC;
2518 if (wol->wolopts & WAKE_MAGIC)
2519 adapter->wol |= ATLX_WUFC_MAG;
2520 return 0;
2521}
2522
2523static void atl1_get_ringparam(struct net_device *netdev,
2524 struct ethtool_ringparam *ring)
2525{
2526 struct atl1_adapter *adapter = netdev_priv(netdev);
2527 struct atl1_tpd_ring *txdr = &adapter->tpd_ring;
2528 struct atl1_rfd_ring *rxdr = &adapter->rfd_ring;
2529
2530 ring->rx_max_pending = ATL1_MAX_RFD;
2531 ring->tx_max_pending = ATL1_MAX_TPD;
2532 ring->rx_mini_max_pending = 0;
2533 ring->rx_jumbo_max_pending = 0;
2534 ring->rx_pending = rxdr->count;
2535 ring->tx_pending = txdr->count;
2536 ring->rx_mini_pending = 0;
2537 ring->rx_jumbo_pending = 0;
2538}
2539
2540static int atl1_set_ringparam(struct net_device *netdev,
2541 struct ethtool_ringparam *ring)
2542{
2543 struct atl1_adapter *adapter = netdev_priv(netdev);
2544 struct atl1_tpd_ring *tpdr = &adapter->tpd_ring;
2545 struct atl1_rrd_ring *rrdr = &adapter->rrd_ring;
2546 struct atl1_rfd_ring *rfdr = &adapter->rfd_ring;
2547
2548 struct atl1_tpd_ring tpd_old, tpd_new;
2549 struct atl1_rfd_ring rfd_old, rfd_new;
2550 struct atl1_rrd_ring rrd_old, rrd_new;
2551 struct atl1_ring_header rhdr_old, rhdr_new;
2552 int err;
2553
2554 tpd_old = adapter->tpd_ring;
2555 rfd_old = adapter->rfd_ring;
2556 rrd_old = adapter->rrd_ring;
2557 rhdr_old = adapter->ring_header;
2558
2559 if (netif_running(adapter->netdev))
2560 atl1_down(adapter);
2561
2562 rfdr->count = (u16) max(ring->rx_pending, (u32) ATL1_MIN_RFD);
2563 rfdr->count = rfdr->count > ATL1_MAX_RFD ? ATL1_MAX_RFD :
2564 rfdr->count;
2565 rfdr->count = (rfdr->count + 3) & ~3;
2566 rrdr->count = rfdr->count;
2567
2568 tpdr->count = (u16) max(ring->tx_pending, (u32) ATL1_MIN_TPD);
2569 tpdr->count = tpdr->count > ATL1_MAX_TPD ? ATL1_MAX_TPD :
2570 tpdr->count;
2571 tpdr->count = (tpdr->count + 3) & ~3;
2572
2573 if (netif_running(adapter->netdev)) {
2574 /* try to get new resources before deleting old */
2575 err = atl1_setup_ring_resources(adapter);
2576 if (err)
2577 goto err_setup_ring;
2578
2579 /*
2580 * save the new, restore the old in order to free it,
2581 * then restore the new back again
2582 */
2583
2584 rfd_new = adapter->rfd_ring;
2585 rrd_new = adapter->rrd_ring;
2586 tpd_new = adapter->tpd_ring;
2587 rhdr_new = adapter->ring_header;
2588 adapter->rfd_ring = rfd_old;
2589 adapter->rrd_ring = rrd_old;
2590 adapter->tpd_ring = tpd_old;
2591 adapter->ring_header = rhdr_old;
2592 atl1_free_ring_resources(adapter);
2593 adapter->rfd_ring = rfd_new;
2594 adapter->rrd_ring = rrd_new;
2595 adapter->tpd_ring = tpd_new;
2596 adapter->ring_header = rhdr_new;
2597
2598 err = atl1_up(adapter);
2599 if (err)
2600 return err;
2601 }
2602 return 0;
2603
2604err_setup_ring:
2605 adapter->rfd_ring = rfd_old;
2606 adapter->rrd_ring = rrd_old;
2607 adapter->tpd_ring = tpd_old;
2608 adapter->ring_header = rhdr_old;
2609 atl1_up(adapter);
2610 return err;
2611}
2612
2613static void atl1_get_pauseparam(struct net_device *netdev,
2614 struct ethtool_pauseparam *epause)
2615{
2616 struct atl1_adapter *adapter = netdev_priv(netdev);
2617 struct atl1_hw *hw = &adapter->hw;
2618
2619 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
2620 hw->media_type == MEDIA_TYPE_1000M_FULL) {
2621 epause->autoneg = AUTONEG_ENABLE;
2622 } else {
2623 epause->autoneg = AUTONEG_DISABLE;
2624 }
2625 epause->rx_pause = 1;
2626 epause->tx_pause = 1;
2627}
2628
2629static int atl1_set_pauseparam(struct net_device *netdev,
2630 struct ethtool_pauseparam *epause)
2631{
2632 struct atl1_adapter *adapter = netdev_priv(netdev);
2633 struct atl1_hw *hw = &adapter->hw;
2634
2635 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
2636 hw->media_type == MEDIA_TYPE_1000M_FULL) {
2637 epause->autoneg = AUTONEG_ENABLE;
2638 } else {
2639 epause->autoneg = AUTONEG_DISABLE;
2640 }
2641
2642 epause->rx_pause = 1;
2643 epause->tx_pause = 1;
2644
2645 return 0;
2646}
2647
2648/* FIXME: is this right? -- CHS */
2649static u32 atl1_get_rx_csum(struct net_device *netdev)
2650{
2651 return 1;
2652}
2653
2654static void atl1_get_strings(struct net_device *netdev, u32 stringset,
2655 u8 *data)
2656{
2657 u8 *p = data;
2658 int i;
2659
2660 switch (stringset) {
2661 case ETH_SS_STATS:
2662 for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) {
2663 memcpy(p, atl1_gstrings_stats[i].stat_string,
2664 ETH_GSTRING_LEN);
2665 p += ETH_GSTRING_LEN;
2666 }
2667 break;
2668 }
2669}
2670
2671static int atl1_nway_reset(struct net_device *netdev)
2672{
2673 struct atl1_adapter *adapter = netdev_priv(netdev);
2674 struct atl1_hw *hw = &adapter->hw;
2675
2676 if (netif_running(netdev)) {
2677 u16 phy_data;
2678 atl1_down(adapter);
2679
2680 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
2681 hw->media_type == MEDIA_TYPE_1000M_FULL) {
2682 phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
2683 } else {
2684 switch (hw->media_type) {
2685 case MEDIA_TYPE_100M_FULL:
2686 phy_data = MII_CR_FULL_DUPLEX |
2687 MII_CR_SPEED_100 | MII_CR_RESET;
2688 break;
2689 case MEDIA_TYPE_100M_HALF:
2690 phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
2691 break;
2692 case MEDIA_TYPE_10M_FULL:
2693 phy_data = MII_CR_FULL_DUPLEX |
2694 MII_CR_SPEED_10 | MII_CR_RESET;
2695 break;
2696 default:
2697 /* MEDIA_TYPE_10M_HALF */
2698 phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
2699 }
2700 }
2701 atl1_write_phy_reg(hw, MII_BMCR, phy_data);
2702 atl1_up(adapter);
2703 }
2704 return 0;
2705}
2706
2707const struct ethtool_ops atl1_ethtool_ops = {
2708 .get_settings = atl1_get_settings,
2709 .set_settings = atl1_set_settings,
2710 .get_drvinfo = atl1_get_drvinfo,
2711 .get_wol = atl1_get_wol,
2712 .set_wol = atl1_set_wol,
2713 .get_ringparam = atl1_get_ringparam,
2714 .set_ringparam = atl1_set_ringparam,
2715 .get_pauseparam = atl1_get_pauseparam,
2716 .set_pauseparam = atl1_set_pauseparam,
2717 .get_rx_csum = atl1_get_rx_csum,
2718 .set_tx_csum = ethtool_op_set_tx_hw_csum,
2719 .get_link = ethtool_op_get_link,
2720 .set_sg = ethtool_op_set_sg,
2721 .get_strings = atl1_get_strings,
2722 .nway_reset = atl1_nway_reset,
2723 .get_ethtool_stats = atl1_get_ethtool_stats,
2724 .get_sset_count = atl1_get_sset_count,
2725 .set_tso = ethtool_op_set_tso,
2726};
2727
2728/*
2729 * Reset the transmit and receive units; mask and clear all interrupts.
2730 * hw - Struct containing variables accessed by shared code
2731 * return : 0 or idle status (if error)
2732 */
2733s32 atl1_reset_hw(struct atl1_hw *hw)
2734{
2735 struct pci_dev *pdev = hw->back->pdev;
2736 u32 icr;
2737 int i;
2738
2739 /*
2740 * Clear Interrupt mask to stop board from generating
2741 * interrupts & Clear any pending interrupt events
2742 */
2743 /*
2744 * iowrite32(0, hw->hw_addr + REG_IMR);
2745 * iowrite32(0xffffffff, hw->hw_addr + REG_ISR);
2746 */
2747
2748 /*
2749 * Issue Soft Reset to the MAC. This will reset the chip's
2750 * transmit, receive, DMA. It will not effect
2751 * the current PCI configuration. The global reset bit is self-
2752 * clearing, and should clear within a microsecond.
2753 */
2754 iowrite32(MASTER_CTRL_SOFT_RST, hw->hw_addr + REG_MASTER_CTRL);
2755 ioread32(hw->hw_addr + REG_MASTER_CTRL);
2756
2757 iowrite16(1, hw->hw_addr + REG_PHY_ENABLE);
2758 ioread16(hw->hw_addr + REG_PHY_ENABLE);
2759
2760 /* delay about 1ms */
2761 msleep(1);
2762
2763 /* Wait at least 10ms for All module to be Idle */
2764 for (i = 0; i < 10; i++) {
2765 icr = ioread32(hw->hw_addr + REG_IDLE_STATUS);
2766 if (!icr)
2767 break;
2768 /* delay 1 ms */
2769 msleep(1);
2770 /* FIXME: still the right way to do this? */
2771 cpu_relax();
2772 }
2773
2774 if (icr) {
2775 dev_dbg(&pdev->dev, "ICR = 0x%x\n", icr);
2776 return icr;
2777 }
2778
2779 return 0;
2780}
2781
2782/* function about EEPROM
2783 *
2784 * check_eeprom_exist
2785 * return 0 if eeprom exist
2786 */
2787static int atl1_check_eeprom_exist(struct atl1_hw *hw)
2788{
2789 u32 value;
2790 value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
2791 if (value & SPI_FLASH_CTRL_EN_VPD) {
2792 value &= ~SPI_FLASH_CTRL_EN_VPD;
2793 iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
2794 }
2795
2796 value = ioread16(hw->hw_addr + REG_PCIE_CAP_LIST);
2797 return ((value & 0xFF00) == 0x6C00) ? 0 : 1;
2798}
2799
2800static bool atl1_read_eeprom(struct atl1_hw *hw, u32 offset, u32 *p_value)
2801{
2802 int i;
2803 u32 control;
2804
2805 if (offset & 3)
2806 /* address do not align */
2807 return false;
2808
2809 iowrite32(0, hw->hw_addr + REG_VPD_DATA);
2810 control = (offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT;
2811 iowrite32(control, hw->hw_addr + REG_VPD_CAP);
2812 ioread32(hw->hw_addr + REG_VPD_CAP);
2813
2814 for (i = 0; i < 10; i++) {
2815 msleep(2);
2816 control = ioread32(hw->hw_addr + REG_VPD_CAP);
2817 if (control & VPD_CAP_VPD_FLAG)
2818 break;
2819 }
2820 if (control & VPD_CAP_VPD_FLAG) {
2821 *p_value = ioread32(hw->hw_addr + REG_VPD_DATA);
2822 return true;
2823 }
2824 /* timeout */
2825 return false;
2826}
2827
2828/*
2829 * Reads the value from a PHY register
2830 * hw - Struct containing variables accessed by shared code
2831 * reg_addr - address of the PHY register to read
2832 */
2833s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data)
2834{
2835 u32 val;
2836 int i;
2837
2838 val = ((u32) (reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT |
2839 MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW | MDIO_CLK_25_4 <<
2840 MDIO_CLK_SEL_SHIFT;
2841 iowrite32(val, hw->hw_addr + REG_MDIO_CTRL);
2842 ioread32(hw->hw_addr + REG_MDIO_CTRL);
2843
2844 for (i = 0; i < MDIO_WAIT_TIMES; i++) {
2845 udelay(2);
2846 val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
2847 if (!(val & (MDIO_START | MDIO_BUSY)))
2848 break;
2849 }
2850 if (!(val & (MDIO_START | MDIO_BUSY))) {
2851 *phy_data = (u16) val;
2852 return 0;
2853 }
2854 return ATLX_ERR_PHY;
2855}
2856
2857#define CUSTOM_SPI_CS_SETUP 2
2858#define CUSTOM_SPI_CLK_HI 2
2859#define CUSTOM_SPI_CLK_LO 2
2860#define CUSTOM_SPI_CS_HOLD 2
2861#define CUSTOM_SPI_CS_HI 3
2862
2863static bool atl1_spi_read(struct atl1_hw *hw, u32 addr, u32 *buf)
2864{
2865 int i;
2866 u32 value;
2867
2868 iowrite32(0, hw->hw_addr + REG_SPI_DATA);
2869 iowrite32(addr, hw->hw_addr + REG_SPI_ADDR);
2870
2871 value = SPI_FLASH_CTRL_WAIT_READY |
2872 (CUSTOM_SPI_CS_SETUP & SPI_FLASH_CTRL_CS_SETUP_MASK) <<
2873 SPI_FLASH_CTRL_CS_SETUP_SHIFT | (CUSTOM_SPI_CLK_HI &
2874 SPI_FLASH_CTRL_CLK_HI_MASK) <<
2875 SPI_FLASH_CTRL_CLK_HI_SHIFT | (CUSTOM_SPI_CLK_LO &
2876 SPI_FLASH_CTRL_CLK_LO_MASK) <<
2877 SPI_FLASH_CTRL_CLK_LO_SHIFT | (CUSTOM_SPI_CS_HOLD &
2878 SPI_FLASH_CTRL_CS_HOLD_MASK) <<
2879 SPI_FLASH_CTRL_CS_HOLD_SHIFT | (CUSTOM_SPI_CS_HI &
2880 SPI_FLASH_CTRL_CS_HI_MASK) <<
2881 SPI_FLASH_CTRL_CS_HI_SHIFT | (1 & SPI_FLASH_CTRL_INS_MASK) <<
2882 SPI_FLASH_CTRL_INS_SHIFT;
2883
2884 iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
2885
2886 value |= SPI_FLASH_CTRL_START;
2887 iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
2888 ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
2889
2890 for (i = 0; i < 10; i++) {
2891 msleep(1);
2892 value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
2893 if (!(value & SPI_FLASH_CTRL_START))
2894 break;
2895 }
2896
2897 if (value & SPI_FLASH_CTRL_START)
2898 return false;
2899
2900 *buf = ioread32(hw->hw_addr + REG_SPI_DATA);
2901
2902 return true;
2903}
2904
2905/*
2906 * get_permanent_address
2907 * return 0 if get valid mac address,
2908 */
2909static int atl1_get_permanent_address(struct atl1_hw *hw)
2910{
2911 u32 addr[2];
2912 u32 i, control;
2913 u16 reg;
2914 u8 eth_addr[ETH_ALEN];
2915 bool key_valid;
2916
2917 if (is_valid_ether_addr(hw->perm_mac_addr))
2918 return 0;
2919
2920 /* init */
2921 addr[0] = addr[1] = 0;
2922
2923 if (!atl1_check_eeprom_exist(hw)) {
2924 reg = 0;
2925 key_valid = false;
2926 /* Read out all EEPROM content */
2927 i = 0;
2928 while (1) {
2929 if (atl1_read_eeprom(hw, i + 0x100, &control)) {
2930 if (key_valid) {
2931 if (reg == REG_MAC_STA_ADDR)
2932 addr[0] = control;
2933 else if (reg == (REG_MAC_STA_ADDR + 4))
2934 addr[1] = control;
2935 key_valid = false;
2936 } else if ((control & 0xff) == 0x5A) {
2937 key_valid = true;
2938 reg = (u16) (control >> 16);
2939 } else
2940 break;
2941 } else
2942 /* read error */
2943 break;
2944 i += 4;
2945 }
2946
2947 *(u32 *) &eth_addr[2] = swab32(addr[0]);
2948 *(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]);
2949 if (is_valid_ether_addr(eth_addr)) {
2950 memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
2951 return 0;
2952 }
2953 return 1;
2954 }
2955
2956 /* see if SPI FLAGS exist ? */
2957 addr[0] = addr[1] = 0;
2958 reg = 0;
2959 key_valid = false;
2960 i = 0;
2961 while (1) {
2962 if (atl1_spi_read(hw, i + 0x1f000, &control)) {
2963 if (key_valid) {
2964 if (reg == REG_MAC_STA_ADDR)
2965 addr[0] = control;
2966 else if (reg == (REG_MAC_STA_ADDR + 4))
2967 addr[1] = control;
2968 key_valid = false;
2969 } else if ((control & 0xff) == 0x5A) {
2970 key_valid = true;
2971 reg = (u16) (control >> 16);
2972 } else
2973 /* data end */
2974 break;
2975 } else
2976 /* read error */
2977 break;
2978 i += 4;
2979 }
2980
2981 *(u32 *) &eth_addr[2] = swab32(addr[0]);
2982 *(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]);
2983 if (is_valid_ether_addr(eth_addr)) {
2984 memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
2985 return 0;
2986 }
2987
2988 /*
2989 * On some motherboards, the MAC address is written by the
2990 * BIOS directly to the MAC register during POST, and is
2991 * not stored in eeprom. If all else thus far has failed
2992 * to fetch the permanent MAC address, try reading it directly.
2993 */
2994 addr[0] = ioread32(hw->hw_addr + REG_MAC_STA_ADDR);
2995 addr[1] = ioread16(hw->hw_addr + (REG_MAC_STA_ADDR + 4));
2996 *(u32 *) &eth_addr[2] = swab32(addr[0]);
2997 *(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]);
2998 if (is_valid_ether_addr(eth_addr)) {
2999 memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
3000 return 0;
3001 }
3002
3003 return 1;
3004}
3005
3006/*
3007 * Reads the adapter's MAC address from the EEPROM
3008 * hw - Struct containing variables accessed by shared code
3009 */
3010s32 atl1_read_mac_addr(struct atl1_hw *hw)
3011{
3012 u16 i;
3013
3014 if (atl1_get_permanent_address(hw))
3015 random_ether_addr(hw->perm_mac_addr);
3016
3017 for (i = 0; i < ETH_ALEN; i++)
3018 hw->mac_addr[i] = hw->perm_mac_addr[i];
3019 return 0;
3020}
3021
3022/*
3023 * Hashes an address to determine its location in the multicast table
3024 * hw - Struct containing variables accessed by shared code
3025 * mc_addr - the multicast address to hash
3026 *
3027 * atl1_hash_mc_addr
3028 * purpose
3029 * set hash value for a multicast address
3030 * hash calcu processing :
3031 * 1. calcu 32bit CRC for multicast address
3032 * 2. reverse crc with MSB to LSB
3033 */
3034u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
3035{
3036 u32 crc32, value = 0;
3037 int i;
3038
3039 crc32 = ether_crc_le(6, mc_addr);
3040 for (i = 0; i < 32; i++)
3041 value |= (((crc32 >> i) & 1) << (31 - i));
3042
3043 return value;
3044}
3045
3046/*
3047 * Sets the bit in the multicast table corresponding to the hash value.
3048 * hw - Struct containing variables accessed by shared code
3049 * hash_value - Multicast address hash value
3050 */
3051void atl1_hash_set(struct atl1_hw *hw, u32 hash_value)
3052{
3053 u32 hash_bit, hash_reg;
3054 u32 mta;
3055
3056 /*
3057 * The HASH Table is a register array of 2 32-bit registers.
3058 * It is treated like an array of 64 bits. We want to set
3059 * bit BitArray[hash_value]. So we figure out what register
3060 * the bit is in, read it, OR in the new bit, then write
3061 * back the new value. The register is determined by the
3062 * upper 7 bits of the hash value and the bit within that
3063 * register are determined by the lower 5 bits of the value.
3064 */
3065 hash_reg = (hash_value >> 31) & 0x1;
3066 hash_bit = (hash_value >> 26) & 0x1F;
3067 mta = ioread32((hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2));
3068 mta |= (1 << hash_bit);
3069 iowrite32(mta, (hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2));
3070}
3071
3072/*
3073 * Writes a value to a PHY register
3074 * hw - Struct containing variables accessed by shared code
3075 * reg_addr - address of the PHY register to write
3076 * data - data to write to the PHY
3077 */
3078s32 atl1_write_phy_reg(struct atl1_hw *hw, u32 reg_addr, u16 phy_data)
3079{
3080 int i;
3081 u32 val;
3082
3083 val = ((u32) (phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT |
3084 (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT |
3085 MDIO_SUP_PREAMBLE |
3086 MDIO_START | MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
3087 iowrite32(val, hw->hw_addr + REG_MDIO_CTRL);
3088 ioread32(hw->hw_addr + REG_MDIO_CTRL);
3089
3090 for (i = 0; i < MDIO_WAIT_TIMES; i++) {
3091 udelay(2);
3092 val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
3093 if (!(val & (MDIO_START | MDIO_BUSY)))
3094 break;
3095 }
3096
3097 if (!(val & (MDIO_START | MDIO_BUSY)))
3098 return 0;
3099
3100 return ATLX_ERR_PHY;
3101}
3102
3103/*
3104 * Make L001's PHY out of Power Saving State (bug)
3105 * hw - Struct containing variables accessed by shared code
3106 * when power on, L001's PHY always on Power saving State
3107 * (Gigabit Link forbidden)
3108 */
3109static s32 atl1_phy_leave_power_saving(struct atl1_hw *hw)
3110{
3111 s32 ret;
3112 ret = atl1_write_phy_reg(hw, 29, 0x0029);
3113 if (ret)
3114 return ret;
3115 return atl1_write_phy_reg(hw, 30, 0);
3116}
3117
3118/*
3119 *TODO: do something or get rid of this
3120 */
3121s32 atl1_phy_enter_power_saving(struct atl1_hw *hw)
3122{
3123/* s32 ret_val;
3124 * u16 phy_data;
3125 */
3126
3127/*
3128 ret_val = atl1_write_phy_reg(hw, ...);
3129 ret_val = atl1_write_phy_reg(hw, ...);
3130 ....
3131*/
3132 return 0;
3133}
3134
3135/*
3136 * Resets the PHY and make all config validate
3137 * hw - Struct containing variables accessed by shared code
3138 *
3139 * Sets bit 15 and 12 of the MII Control regiser (for F001 bug)
3140 */
3141static s32 atl1_phy_reset(struct atl1_hw *hw)
3142{
3143 struct pci_dev *pdev = hw->back->pdev;
3144 s32 ret_val;
3145 u16 phy_data;
3146
3147 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
3148 hw->media_type == MEDIA_TYPE_1000M_FULL)
3149 phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
3150 else {
3151 switch (hw->media_type) {
3152 case MEDIA_TYPE_100M_FULL:
3153 phy_data =
3154 MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
3155 MII_CR_RESET;
3156 break;
3157 case MEDIA_TYPE_100M_HALF:
3158 phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
3159 break;
3160 case MEDIA_TYPE_10M_FULL:
3161 phy_data =
3162 MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
3163 break;
3164 default:
3165 /* MEDIA_TYPE_10M_HALF: */
3166 phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
3167 break;
3168 }
3169 }
3170
3171 ret_val = atl1_write_phy_reg(hw, MII_BMCR, phy_data);
3172 if (ret_val) {
3173 u32 val;
3174 int i;
3175 /* pcie serdes link may be down! */
3176 dev_dbg(&pdev->dev, "pcie phy link down\n");
3177
3178 for (i = 0; i < 25; i++) {
3179 msleep(1);
3180 val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
3181 if (!(val & (MDIO_START | MDIO_BUSY)))
3182 break;
3183 }
3184
3185 if ((val & (MDIO_START | MDIO_BUSY)) != 0) {
3186 dev_warn(&pdev->dev, "pcie link down at least 25ms\n");
3187 return ret_val;
3188 }
3189 }
3190 return 0;
3191}
3192
3193/*
3194 * Configures PHY autoneg and flow control advertisement settings
3195 * hw - Struct containing variables accessed by shared code
3196 */
3197s32 atl1_phy_setup_autoneg_adv(struct atl1_hw *hw)
3198{
3199 s32 ret_val;
3200 s16 mii_autoneg_adv_reg;
3201 s16 mii_1000t_ctrl_reg;
3202
3203 /* Read the MII Auto-Neg Advertisement Register (Address 4). */
3204 mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK;
3205
3206 /* Read the MII 1000Base-T Control Register (Address 9). */
3207 mii_1000t_ctrl_reg = MII_ATLX_CR_1000T_DEFAULT_CAP_MASK;
3208
3209 /*
3210 * First we clear all the 10/100 mb speed bits in the Auto-Neg
3211 * Advertisement Register (Address 4) and the 1000 mb speed bits in
3212 * the 1000Base-T Control Register (Address 9).
3213 */
3214 mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK;
3215 mii_1000t_ctrl_reg &= ~MII_ATLX_CR_1000T_SPEED_MASK;
3216
3217 /*
3218 * Need to parse media_type and set up
3219 * the appropriate PHY registers.
3220 */
3221 switch (hw->media_type) {
3222 case MEDIA_TYPE_AUTO_SENSOR:
3223 mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS |
3224 MII_AR_10T_FD_CAPS |
3225 MII_AR_100TX_HD_CAPS |
3226 MII_AR_100TX_FD_CAPS);
3227 mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS;
3228 break;
3229
3230 case MEDIA_TYPE_1000M_FULL:
3231 mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS;
3232 break;
3233
3234 case MEDIA_TYPE_100M_FULL:
3235 mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS;
3236 break;
3237
3238 case MEDIA_TYPE_100M_HALF:
3239 mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS;
3240 break;
3241
3242 case MEDIA_TYPE_10M_FULL:
3243 mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS;
3244 break;
3245
3246 default:
3247 mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS;
3248 break;
3249 }
3250
3251 /* flow control fixed to enable all */
3252 mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE);
3253
3254 hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
3255 hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg;
3256
3257 ret_val = atl1_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg);
3258 if (ret_val)
3259 return ret_val;
3260
3261 ret_val = atl1_write_phy_reg(hw, MII_ATLX_CR, mii_1000t_ctrl_reg);
3262 if (ret_val)
3263 return ret_val;
3264
3265 return 0;
3266}
3267
3268/*
3269 * Configures link settings.
3270 * hw - Struct containing variables accessed by shared code
3271 * Assumes the hardware has previously been reset and the
3272 * transmitter and receiver are not enabled.
3273 */
3274static s32 atl1_setup_link(struct atl1_hw *hw)
3275{
3276 struct pci_dev *pdev = hw->back->pdev;
3277 s32 ret_val;
3278
3279 /*
3280 * Options:
3281 * PHY will advertise value(s) parsed from
3282 * autoneg_advertised and fc
3283 * no matter what autoneg is , We will not wait link result.
3284 */
3285 ret_val = atl1_phy_setup_autoneg_adv(hw);
3286 if (ret_val) {
3287 dev_dbg(&pdev->dev, "error setting up autonegotiation\n");
3288 return ret_val;
3289 }
3290 /* SW.Reset , En-Auto-Neg if needed */
3291 ret_val = atl1_phy_reset(hw);
3292 if (ret_val) {
3293 dev_dbg(&pdev->dev, "error resetting phy\n");
3294 return ret_val;
3295 }
3296 hw->phy_configured = true;
3297 return ret_val;
3298}
3299
3300static void atl1_init_flash_opcode(struct atl1_hw *hw)
3301{
3302 if (hw->flash_vendor >= ARRAY_SIZE(flash_table))
3303 /* Atmel */
3304 hw->flash_vendor = 0;
3305
3306 /* Init OP table */
3307 iowrite8(flash_table[hw->flash_vendor].cmd_program,
3308 hw->hw_addr + REG_SPI_FLASH_OP_PROGRAM);
3309 iowrite8(flash_table[hw->flash_vendor].cmd_sector_erase,
3310 hw->hw_addr + REG_SPI_FLASH_OP_SC_ERASE);
3311 iowrite8(flash_table[hw->flash_vendor].cmd_chip_erase,
3312 hw->hw_addr + REG_SPI_FLASH_OP_CHIP_ERASE);
3313 iowrite8(flash_table[hw->flash_vendor].cmd_rdid,
3314 hw->hw_addr + REG_SPI_FLASH_OP_RDID);
3315 iowrite8(flash_table[hw->flash_vendor].cmd_wren,
3316 hw->hw_addr + REG_SPI_FLASH_OP_WREN);
3317 iowrite8(flash_table[hw->flash_vendor].cmd_rdsr,
3318 hw->hw_addr + REG_SPI_FLASH_OP_RDSR);
3319 iowrite8(flash_table[hw->flash_vendor].cmd_wrsr,
3320 hw->hw_addr + REG_SPI_FLASH_OP_WRSR);
3321 iowrite8(flash_table[hw->flash_vendor].cmd_read,
3322 hw->hw_addr + REG_SPI_FLASH_OP_READ);
3323}
3324
3325/*
3326 * Performs basic configuration of the adapter.
3327 * hw - Struct containing variables accessed by shared code
3328 * Assumes that the controller has previously been reset and is in a
3329 * post-reset uninitialized state. Initializes multicast table,
3330 * and Calls routines to setup link
3331 * Leaves the transmit and receive units disabled and uninitialized.
3332 */
3333s32 atl1_init_hw(struct atl1_hw *hw)
3334{
3335 u32 ret_val = 0;
3336
3337 /* Zero out the Multicast HASH table */
3338 iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE);
3339 /* clear the old settings from the multicast hash table */
3340 iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2));
3341
3342 atl1_init_flash_opcode(hw);
3343
3344 if (!hw->phy_configured) {
3345 /* enable GPHY LinkChange Interrrupt */
3346 ret_val = atl1_write_phy_reg(hw, 18, 0xC00);
3347 if (ret_val)
3348 return ret_val;
3349 /* make PHY out of power-saving state */
3350 ret_val = atl1_phy_leave_power_saving(hw);
3351 if (ret_val)
3352 return ret_val;
3353 /* Call a subroutine to configure the link */
3354 ret_val = atl1_setup_link(hw);
3355 }
3356 return ret_val;
3357}
3358
3359/*
3360 * Detects the current speed and duplex settings of the hardware.
3361 * hw - Struct containing variables accessed by shared code
3362 * speed - Speed of the connection
3363 * duplex - Duplex setting of the connection
3364 */
3365s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex)
3366{
3367 struct pci_dev *pdev = hw->back->pdev;
3368 s32 ret_val;
3369 u16 phy_data;
3370
3371 /* ; --- Read PHY Specific Status Register (17) */
3372 ret_val = atl1_read_phy_reg(hw, MII_ATLX_PSSR, &phy_data);
3373 if (ret_val)
3374 return ret_val;
3375
3376 if (!(phy_data & MII_ATLX_PSSR_SPD_DPLX_RESOLVED))
3377 return ATLX_ERR_PHY_RES;
3378
3379 switch (phy_data & MII_ATLX_PSSR_SPEED) {
3380 case MII_ATLX_PSSR_1000MBS:
3381 *speed = SPEED_1000;
3382 break;
3383 case MII_ATLX_PSSR_100MBS:
3384 *speed = SPEED_100;
3385 break;
3386 case MII_ATLX_PSSR_10MBS:
3387 *speed = SPEED_10;
3388 break;
3389 default:
3390 dev_dbg(&pdev->dev, "error getting speed\n");
3391 return ATLX_ERR_PHY_SPEED;
3392 break;
3393 }
3394 if (phy_data & MII_ATLX_PSSR_DPLX)
3395 *duplex = FULL_DUPLEX;
3396 else
3397 *duplex = HALF_DUPLEX;
3398
3399 return 0;
3400}
3401
3402void atl1_set_mac_addr(struct atl1_hw *hw)
3403{
3404 u32 value;
3405 /*
3406 * 00-0B-6A-F6-00-DC
3407 * 0: 6AF600DC 1: 000B
3408 * low dword
3409 */
3410 value = (((u32) hw->mac_addr[2]) << 24) |
3411 (((u32) hw->mac_addr[3]) << 16) |
3412 (((u32) hw->mac_addr[4]) << 8) | (((u32) hw->mac_addr[5]));
3413 iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR);
3414 /* high dword */
3415 value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1]));
3416 iowrite32(value, (hw->hw_addr + REG_MAC_STA_ADDR) + (1 << 2));
3417}