aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/et131x
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/et131x')
-rw-r--r--drivers/staging/et131x/et1310_address_map.h1434
-rw-r--r--drivers/staging/et131x/et1310_eeprom.c407
-rw-r--r--drivers/staging/et131x/et1310_mac.c654
-rw-r--r--drivers/staging/et131x/et1310_phy.c979
-rw-r--r--drivers/staging/et131x/et1310_phy.h458
-rw-r--r--drivers/staging/et131x/et1310_pm.c180
-rw-r--r--drivers/staging/et131x/et1310_rx.c1152
-rw-r--r--drivers/staging/et131x/et1310_rx.h243
-rw-r--r--drivers/staging/et131x/et1310_tx.c797
-rw-r--r--drivers/staging/et131x/et1310_tx.h150
-rw-r--r--drivers/staging/et131x/et131x_adapter.h243
-rw-r--r--drivers/staging/et131x/et131x_defs.h126
-rw-r--r--drivers/staging/et131x/et131x_initpci.c848
-rw-r--r--drivers/staging/et131x/et131x_isr.c480
-rw-r--r--drivers/staging/et131x/et131x_netdev.c686
-rw-r--r--drivers/staging/et131x/et131x_version.h74
16 files changed, 8911 insertions, 0 deletions
diff --git a/drivers/staging/et131x/et1310_address_map.h b/drivers/staging/et131x/et1310_address_map.h
new file mode 100644
index 00000000000..410677ee22b
--- /dev/null
+++ b/drivers/staging/et131x/et1310_address_map.h
@@ -0,0 +1,1434 @@
1/*
2 * Agere Systems Inc.
3 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
4 *
5 * Copyright © 2005 Agere Systems Inc.
6 * All rights reserved.
7 * http://www.agere.com
8 *
9 *------------------------------------------------------------------------------
10 *
11 * et1310_address_map.h - Contains the register mapping for the ET1310
12 *
13 *------------------------------------------------------------------------------
14 *
15 * SOFTWARE LICENSE
16 *
17 * This software is provided subject to the following terms and conditions,
18 * which you should read carefully before using the software. Using this
19 * software indicates your acceptance of these terms and conditions. If you do
20 * not agree with these terms and conditions, do not use the software.
21 *
22 * Copyright © 2005 Agere Systems Inc.
23 * All rights reserved.
24 *
25 * Redistribution and use in source or binary forms, with or without
26 * modifications, are permitted provided that the following conditions are met:
27 *
28 * . Redistributions of source code must retain the above copyright notice, this
29 * list of conditions and the following Disclaimer as comments in the code as
30 * well as in the documentation and/or other materials provided with the
31 * distribution.
32 *
33 * . Redistributions in binary form must reproduce the above copyright notice,
34 * this list of conditions and the following Disclaimer in the documentation
35 * and/or other materials provided with the distribution.
36 *
37 * . Neither the name of Agere Systems Inc. nor the names of the contributors
38 * may be used to endorse or promote products derived from this software
39 * without specific prior written permission.
40 *
41 * Disclaimer
42 *
43 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
44 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
45 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
46 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
47 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
48 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
49 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
50 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
51 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
52 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
53 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
54 * DAMAGE.
55 *
56 */
57
58#ifndef _ET1310_ADDRESS_MAP_H_
59#define _ET1310_ADDRESS_MAP_H_
60
61
62/* START OF GLOBAL REGISTER ADDRESS MAP */
63
64/*
65 * 10bit registers
66 *
67 * Tx queue start address reg in global address map at address 0x0000
68 * tx queue end address reg in global address map at address 0x0004
69 * rx queue start address reg in global address map at address 0x0008
70 * rx queue end address reg in global address map at address 0x000C
71 */
72
73/*
74 * structure for power management control status reg in global address map
75 * located at address 0x0010
76 * jagcore_rx_rdy bit 9
77 * jagcore_tx_rdy bit 8
78 * phy_lped_en bit 7
79 * phy_sw_coma bit 6
80 * rxclk_gate bit 5
81 * txclk_gate bit 4
82 * sysclk_gate bit 3
83 * jagcore_rx_en bit 2
84 * jagcore_tx_en bit 1
85 * gigephy_en bit 0
86 */
87
88#define ET_PM_PHY_SW_COMA 0x40
89#define ET_PMCSR_INIT 0x38
90
91/*
92 * Interrupt status reg at address 0x0018
93 */
94
95#define ET_INTR_TXDMA_ISR 0x00000008
96#define ET_INTR_TXDMA_ERR 0x00000010
97#define ET_INTR_RXDMA_XFR_DONE 0x00000020
98#define ET_INTR_RXDMA_FB_R0_LOW 0x00000040
99#define ET_INTR_RXDMA_FB_R1_LOW 0x00000080
100#define ET_INTR_RXDMA_STAT_LOW 0x00000100
101#define ET_INTR_RXDMA_ERR 0x00000200
102#define ET_INTR_WATCHDOG 0x00004000
103#define ET_INTR_WOL 0x00008000
104#define ET_INTR_PHY 0x00010000
105#define ET_INTR_TXMAC 0x00020000
106#define ET_INTR_RXMAC 0x00040000
107#define ET_INTR_MAC_STAT 0x00080000
108#define ET_INTR_SLV_TIMEOUT 0x00100000
109
110/*
111 * Interrupt mask register at address 0x001C
112 * Interrupt alias clear mask reg at address 0x0020
113 * Interrupt status alias reg at address 0x0024
114 *
115 * Same masks as above
116 */
117
118/*
119 * Software reset reg at address 0x0028
120 * 0: txdma_sw_reset
121 * 1: rxdma_sw_reset
122 * 2: txmac_sw_reset
123 * 3: rxmac_sw_reset
124 * 4: mac_sw_reset
125 * 5: mac_stat_sw_reset
126 * 6: mmc_sw_reset
127 *31: selfclr_disable
128 */
129
130/*
131 * SLV Timer reg at address 0x002C (low 24 bits)
132 */
133
134/*
135 * MSI Configuration reg at address 0x0030
136 */
137
138#define ET_MSI_VECTOR 0x0000001F
139#define ET_MSI_TC 0x00070000
140
141/*
142 * Loopback reg located at address 0x0034
143 */
144
145#define ET_LOOP_MAC 0x00000001
146#define ET_LOOP_DMA 0x00000002
147
148/*
149 * GLOBAL Module of JAGCore Address Mapping
150 * Located at address 0x0000
151 */
152struct global_regs { /* Location: */
153 u32 txq_start_addr; /* 0x0000 */
154 u32 txq_end_addr; /* 0x0004 */
155 u32 rxq_start_addr; /* 0x0008 */
156 u32 rxq_end_addr; /* 0x000C */
157 u32 pm_csr; /* 0x0010 */
158 u32 unused; /* 0x0014 */
159 u32 int_status; /* 0x0018 */
160 u32 int_mask; /* 0x001C */
161 u32 int_alias_clr_en; /* 0x0020 */
162 u32 int_status_alias; /* 0x0024 */
163 u32 sw_reset; /* 0x0028 */
164 u32 slv_timer; /* 0x002C */
165 u32 msi_config; /* 0x0030 */
166 u32 loopback; /* 0x0034 */
167 u32 watchdog_timer; /* 0x0038 */
168};
169
170
171/* START OF TXDMA REGISTER ADDRESS MAP */
172
173/*
174 * txdma control status reg at address 0x1000
175 */
176
177#define ET_TXDMA_CSR_HALT 0x00000001
178#define ET_TXDMA_DROP_TLP 0x00000002
179#define ET_TXDMA_CACHE_THRS 0x000000F0
180#define ET_TXDMA_CACHE_SHIFT 4
181#define ET_TXDMA_SNGL_EPKT 0x00000100
182#define ET_TXDMA_CLASS 0x00001E00
183
184/*
185 * structure for txdma packet ring base address hi reg in txdma address map
186 * located at address 0x1004
187 * Defined earlier (u32)
188 */
189
190/*
191 * structure for txdma packet ring base address low reg in txdma address map
192 * located at address 0x1008
193 * Defined earlier (u32)
194 */
195
196/*
197 * structure for txdma packet ring number of descriptor reg in txdma address
198 * map. Located at address 0x100C
199 *
200 * 31-10: unused
201 * 9-0: pr ndes
202 */
203
204#define ET_DMA12_MASK 0x0FFF /* 12 bit mask for DMA12W types */
205#define ET_DMA12_WRAP 0x1000
206#define ET_DMA10_MASK 0x03FF /* 10 bit mask for DMA10W types */
207#define ET_DMA10_WRAP 0x0400
208#define ET_DMA4_MASK 0x000F /* 4 bit mask for DMA4W types */
209#define ET_DMA4_WRAP 0x0010
210
211#define INDEX12(x) ((x) & ET_DMA12_MASK)
212#define INDEX10(x) ((x) & ET_DMA10_MASK)
213#define INDEX4(x) ((x) & ET_DMA4_MASK)
214
215extern inline void add_10bit(u32 *v, int n)
216{
217 *v = INDEX10(*v + n) | (*v & ET_DMA10_WRAP);
218}
219
220extern inline void add_12bit(u32 *v, int n)
221{
222 *v = INDEX12(*v + n) | (*v & ET_DMA12_WRAP);
223}
224
225/*
226 * 10bit DMA with wrap
227 * txdma tx queue write address reg in txdma address map at 0x1010
228 * txdma tx queue write address external reg in txdma address map at 0x1014
229 * txdma tx queue read address reg in txdma address map at 0x1018
230 *
231 * u32
232 * txdma status writeback address hi reg in txdma address map at0x101C
233 * txdma status writeback address lo reg in txdma address map at 0x1020
234 *
235 * 10bit DMA with wrap
236 * txdma service request reg in txdma address map at 0x1024
237 * structure for txdma service complete reg in txdma address map at 0x1028
238 *
239 * 4bit DMA with wrap
240 * txdma tx descriptor cache read index reg in txdma address map at 0x102C
241 * txdma tx descriptor cache write index reg in txdma address map at 0x1030
242 *
243 * txdma error reg in txdma address map at address 0x1034
244 * 0: PyldResend
245 * 1: PyldRewind
246 * 4: DescrResend
247 * 5: DescrRewind
248 * 8: WrbkResend
249 * 9: WrbkRewind
250 */
251
252/*
253 * Tx DMA Module of JAGCore Address Mapping
254 * Located at address 0x1000
255 */
256struct txdma_regs { /* Location: */
257 u32 csr; /* 0x1000 */
258 u32 pr_base_hi; /* 0x1004 */
259 u32 pr_base_lo; /* 0x1008 */
260 u32 pr_num_des; /* 0x100C */
261 u32 txq_wr_addr; /* 0x1010 */
262 u32 txq_wr_addr_ext; /* 0x1014 */
263 u32 txq_rd_addr; /* 0x1018 */
264 u32 dma_wb_base_hi; /* 0x101C */
265 u32 dma_wb_base_lo; /* 0x1020 */
266 u32 service_request; /* 0x1024 */
267 u32 service_complete; /* 0x1028 */
268 u32 cache_rd_index; /* 0x102C */
269 u32 cache_wr_index; /* 0x1030 */
270 u32 tx_dma_error; /* 0x1034 */
271 u32 desc_abort_cnt; /* 0x1038 */
272 u32 payload_abort_cnt; /* 0x103c */
273 u32 writeback_abort_cnt; /* 0x1040 */
274 u32 desc_timeout_cnt; /* 0x1044 */
275 u32 payload_timeout_cnt; /* 0x1048 */
276 u32 writeback_timeout_cnt; /* 0x104c */
277 u32 desc_error_cnt; /* 0x1050 */
278 u32 payload_error_cnt; /* 0x1054 */
279 u32 writeback_error_cnt; /* 0x1058 */
280 u32 dropped_tlp_cnt; /* 0x105c */
281 u32 new_service_complete; /* 0x1060 */
282 u32 ethernet_packet_cnt; /* 0x1064 */
283};
284
285/* END OF TXDMA REGISTER ADDRESS MAP */
286
287
288/* START OF RXDMA REGISTER ADDRESS MAP */
289
290/*
291 * structure for control status reg in rxdma address map
292 * Located at address 0x2000
293 *
294 * CSR
295 * 0: halt
296 * 1-3: tc
297 * 4: fbr_big_endian
298 * 5: psr_big_endian
299 * 6: pkt_big_endian
300 * 7: dma_big_endian
301 * 8-9: fbr0_size
302 * 10: fbr0_enable
303 * 11-12: fbr1_size
304 * 13: fbr1_enable
305 * 14: unused
306 * 15: pkt_drop_disable
307 * 16: pkt_done_flush
308 * 17: halt_status
309 * 18-31: unused
310 */
311
312
313/*
314 * structure for dma writeback lo reg in rxdma address map
315 * located at address 0x2004
316 * Defined earlier (u32)
317 */
318
319/*
320 * structure for dma writeback hi reg in rxdma address map
321 * located at address 0x2008
322 * Defined earlier (u32)
323 */
324
325/*
326 * structure for number of packets done reg in rxdma address map
327 * located at address 0x200C
328 *
329 * 31-8: unused
330 * 7-0: num done
331 */
332
333/*
334 * structure for max packet time reg in rxdma address map
335 * located at address 0x2010
336 *
337 * 31-18: unused
338 * 17-0: time done
339 */
340
341/*
342 * structure for rx queue read address reg in rxdma address map
343 * located at address 0x2014
344 * Defined earlier (u32)
345 */
346
347/*
348 * structure for rx queue read address external reg in rxdma address map
349 * located at address 0x2018
350 * Defined earlier (u32)
351 */
352
353/*
354 * structure for rx queue write address reg in rxdma address map
355 * located at address 0x201C
356 * Defined earlier (u32)
357 */
358
359/*
360 * structure for packet status ring base address lo reg in rxdma address map
361 * located at address 0x2020
362 * Defined earlier (u32)
363 */
364
365/*
366 * structure for packet status ring base address hi reg in rxdma address map
367 * located at address 0x2024
368 * Defined earlier (u32)
369 */
370
371/*
372 * structure for packet status ring number of descriptors reg in rxdma address
373 * map. Located at address 0x2028
374 *
375 * 31-12: unused
376 * 11-0: psr ndes
377 */
378
379/*
380 * structure for packet status ring available offset reg in rxdma address map
381 * located at address 0x202C
382 *
383 * 31-13: unused
384 * 12: psr avail wrap
385 * 11-0: psr avail
386 */
387
388/*
389 * structure for packet status ring full offset reg in rxdma address map
390 * located at address 0x2030
391 *
392 * 31-13: unused
393 * 12: psr full wrap
394 * 11-0: psr full
395 */
396
397/*
398 * structure for packet status ring access index reg in rxdma address map
399 * located at address 0x2034
400 *
401 * 31-5: unused
402 * 4-0: psr_ai
403 */
404
405/*
406 * structure for packet status ring minimum descriptors reg in rxdma address
407 * map. Located at address 0x2038
408 *
409 * 31-12: unused
410 * 11-0: psr_min
411 */
412
413/*
414 * structure for free buffer ring base lo address reg in rxdma address map
415 * located at address 0x203C
416 * Defined earlier (u32)
417 */
418
419/*
420 * structure for free buffer ring base hi address reg in rxdma address map
421 * located at address 0x2040
422 * Defined earlier (u32)
423 */
424
425/*
426 * structure for free buffer ring number of descriptors reg in rxdma address
427 * map. Located at address 0x2044
428 *
429 * 31-10: unused
430 * 9-0: fbr ndesc
431 */
432
433/*
434 * structure for free buffer ring 0 available offset reg in rxdma address map
435 * located at address 0x2048
436 * Defined earlier (u32)
437 */
438
439/*
440 * structure for free buffer ring 0 full offset reg in rxdma address map
441 * located at address 0x204C
442 * Defined earlier (u32)
443 */
444
445/*
446 * structure for free buffer cache 0 full offset reg in rxdma address map
447 * located at address 0x2050
448 *
449 * 31-5: unused
450 * 4-0: fbc rdi
451 */
452
453/*
454 * structure for free buffer ring 0 minimum descriptor reg in rxdma address map
455 * located at address 0x2054
456 *
457 * 31-10: unused
458 * 9-0: fbr min
459 */
460
461/*
462 * structure for free buffer ring 1 base address lo reg in rxdma address map
463 * located at address 0x2058 - 0x205C
464 * Defined earlier (RXDMA_FBR_BASE_LO_t and RXDMA_FBR_BASE_HI_t)
465 */
466
467/*
468 * structure for free buffer ring 1 number of descriptors reg in rxdma address
469 * map. Located at address 0x2060
470 * Defined earlier (RXDMA_FBR_NUM_DES_t)
471 */
472
473/*
474 * structure for free buffer ring 1 available offset reg in rxdma address map
475 * located at address 0x2064
476 * Defined Earlier (RXDMA_FBR_AVAIL_OFFSET_t)
477 */
478
479/*
480 * structure for free buffer ring 1 full offset reg in rxdma address map
481 * located at address 0x2068
482 * Defined Earlier (RXDMA_FBR_FULL_OFFSET_t)
483 */
484
485/*
486 * structure for free buffer cache 1 read index reg in rxdma address map
487 * located at address 0x206C
488 * Defined Earlier (RXDMA_FBC_RD_INDEX_t)
489 */
490
491/*
492 * structure for free buffer ring 1 minimum descriptor reg in rxdma address map
493 * located at address 0x2070
494 * Defined Earlier (RXDMA_FBR_MIN_DES_t)
495 */
496
497/*
498 * Rx DMA Module of JAGCore Address Mapping
499 * Located at address 0x2000
500 */
501struct rxdma_regs { /* Location: */
502 u32 csr; /* 0x2000 */
503 u32 dma_wb_base_lo; /* 0x2004 */
504 u32 dma_wb_base_hi; /* 0x2008 */
505 u32 num_pkt_done; /* 0x200C */
506 u32 max_pkt_time; /* 0x2010 */
507 u32 rxq_rd_addr; /* 0x2014 */
508 u32 rxq_rd_addr_ext; /* 0x2018 */
509 u32 rxq_wr_addr; /* 0x201C */
510 u32 psr_base_lo; /* 0x2020 */
511 u32 psr_base_hi; /* 0x2024 */
512 u32 psr_num_des; /* 0x2028 */
513 u32 psr_avail_offset; /* 0x202C */
514 u32 psr_full_offset; /* 0x2030 */
515 u32 psr_access_index; /* 0x2034 */
516 u32 psr_min_des; /* 0x2038 */
517 u32 fbr0_base_lo; /* 0x203C */
518 u32 fbr0_base_hi; /* 0x2040 */
519 u32 fbr0_num_des; /* 0x2044 */
520 u32 fbr0_avail_offset; /* 0x2048 */
521 u32 fbr0_full_offset; /* 0x204C */
522 u32 fbr0_rd_index; /* 0x2050 */
523 u32 fbr0_min_des; /* 0x2054 */
524 u32 fbr1_base_lo; /* 0x2058 */
525 u32 fbr1_base_hi; /* 0x205C */
526 u32 fbr1_num_des; /* 0x2060 */
527 u32 fbr1_avail_offset; /* 0x2064 */
528 u32 fbr1_full_offset; /* 0x2068 */
529 u32 fbr1_rd_index; /* 0x206C */
530 u32 fbr1_min_des; /* 0x2070 */
531};
532
533/* END OF RXDMA REGISTER ADDRESS MAP */
534
535
536/* START OF TXMAC REGISTER ADDRESS MAP */
537
538/*
539 * structure for control reg in txmac address map
540 * located at address 0x3000
541 *
542 * bits
543 * 31-8: unused
544 * 7: cklseg_disable
545 * 6: ckbcnt_disable
546 * 5: cksegnum
547 * 4: async_disable
548 * 3: fc_disable
549 * 2: mcif_disable
550 * 1: mif_disable
551 * 0: txmac_en
552 */
553
554/*
555 * structure for shadow pointer reg in txmac address map
556 * located at address 0x3004
557 * 31-27: reserved
558 * 26-16: txq rd ptr
559 * 15-11: reserved
560 * 10-0: txq wr ptr
561 */
562
563/*
564 * structure for error count reg in txmac address map
565 * located at address 0x3008
566 *
567 * 31-12: unused
568 * 11-8: reserved
569 * 7-4: txq_underrun
570 * 3-0: fifo_underrun
571 */
572
573/*
574 * structure for max fill reg in txmac address map
575 * located at address 0x300C
576 * 31-12: unused
577 * 11-0: max fill
578 */
579
580/*
581 * structure for cf parameter reg in txmac address map
582 * located at address 0x3010
583 * 31-16: cfep
584 * 15-0: cfpt
585 */
586
587/*
588 * structure for tx test reg in txmac address map
589 * located at address 0x3014
590 * 31-17: unused
591 * 16: reserved1
592 * 15: txtest_en
593 * 14-11: unused
594 * 10-0: txq test pointer
595 */
596
597/*
598 * structure for error reg in txmac address map
599 * located at address 0x3018
600 *
601 * 31-9: unused
602 * 8: fifo_underrun
603 * 7-6: unused
604 * 5: ctrl2_err
605 * 4: txq_underrun
606 * 3: bcnt_err
607 * 2: lseg_err
608 * 1: segnum_err
609 * 0: seg0_err
610 */
611
612/*
613 * structure for error interrupt reg in txmac address map
614 * located at address 0x301C
615 *
616 * 31-9: unused
617 * 8: fifo_underrun
618 * 7-6: unused
619 * 5: ctrl2_err
620 * 4: txq_underrun
621 * 3: bcnt_err
622 * 2: lseg_err
623 * 1: segnum_err
624 * 0: seg0_err
625 */
626
627/*
628 * structure for error interrupt reg in txmac address map
629 * located at address 0x3020
630 *
631 * 31-2: unused
632 * 1: bp_req
633 * 0: bp_xonxoff
634 */
635
636/*
637 * Tx MAC Module of JAGCore Address Mapping
638 */
639struct txmac_regs { /* Location: */
640 u32 ctl; /* 0x3000 */
641 u32 shadow_ptr; /* 0x3004 */
642 u32 err_cnt; /* 0x3008 */
643 u32 max_fill; /* 0x300C */
644 u32 cf_param; /* 0x3010 */
645 u32 tx_test; /* 0x3014 */
646 u32 err; /* 0x3018 */
647 u32 err_int; /* 0x301C */
648 u32 bp_ctrl; /* 0x3020 */
649};
650
651/* END OF TXMAC REGISTER ADDRESS MAP */
652
653/* START OF RXMAC REGISTER ADDRESS MAP */
654
655/*
656 * structure for rxmac control reg in rxmac address map
657 * located at address 0x4000
658 *
659 * 31-7: reserved
660 * 6: rxmac_int_disable
661 * 5: async_disable
662 * 4: mif_disable
663 * 3: wol_disable
664 * 2: pkt_filter_disable
665 * 1: mcif_disable
666 * 0: rxmac_en
667 */
668
669/*
670 * structure for Wake On Lan Control and CRC 0 reg in rxmac address map
671 * located at address 0x4004
672 * 31-16: crc
673 * 15-12: reserved
674 * 11: ignore_pp
675 * 10: ignore_mp
676 * 9: clr_intr
677 * 8: ignore_link_chg
678 * 7: ignore_uni
679 * 6: ignore_multi
680 * 5: ignore_broad
681 * 4-0: valid_crc 4-0
682 */
683
684/*
685 * structure for CRC 1 and CRC 2 reg in rxmac address map
686 * located at address 0x4008
687 *
688 * 31-16: crc2
689 * 15-0: crc1
690 */
691
692/*
693 * structure for CRC 3 and CRC 4 reg in rxmac address map
694 * located at address 0x400C
695 *
696 * 31-16: crc4
697 * 15-0: crc3
698 */
699
700/*
701 * structure for Wake On Lan Source Address Lo reg in rxmac address map
702 * located at address 0x4010
703 *
704 * 31-24: sa3
705 * 23-16: sa4
706 * 15-8: sa5
707 * 7-0: sa6
708 */
709
710#define ET_WOL_LO_SA3_SHIFT 24
711#define ET_WOL_LO_SA4_SHIFT 16
712#define ET_WOL_LO_SA5_SHIFT 8
713
714/*
715 * structure for Wake On Lan Source Address Hi reg in rxmac address map
716 * located at address 0x4014
717 *
718 * 31-16: reserved
719 * 15-8: sa1
720 * 7-0: sa2
721 */
722
723#define ET_WOL_HI_SA1_SHIFT 8
724
725/*
726 * structure for Wake On Lan mask reg in rxmac address map
727 * located at address 0x4018 - 0x4064
728 * Defined earlier (u32)
729 */
730
731/*
732 * structure for Unicast Paket Filter Address 1 reg in rxmac address map
733 * located at address 0x4068
734 *
735 * 31-24: addr1_3
736 * 23-16: addr1_4
737 * 15-8: addr1_5
738 * 7-0: addr1_6
739 */
740
741#define ET_UNI_PF_ADDR1_3_SHIFT 24
742#define ET_UNI_PF_ADDR1_4_SHIFT 16
743#define ET_UNI_PF_ADDR1_5_SHIFT 8
744
745/*
746 * structure for Unicast Paket Filter Address 2 reg in rxmac address map
747 * located at address 0x406C
748 *
749 * 31-24: addr2_3
750 * 23-16: addr2_4
751 * 15-8: addr2_5
752 * 7-0: addr2_6
753 */
754
755#define ET_UNI_PF_ADDR2_3_SHIFT 24
756#define ET_UNI_PF_ADDR2_4_SHIFT 16
757#define ET_UNI_PF_ADDR2_5_SHIFT 8
758
759/*
760 * structure for Unicast Paket Filter Address 1 & 2 reg in rxmac address map
761 * located at address 0x4070
762 *
763 * 31-24: addr2_1
764 * 23-16: addr2_2
765 * 15-8: addr1_1
766 * 7-0: addr1_2
767 */
768
769#define ET_UNI_PF_ADDR2_1_SHIFT 24
770#define ET_UNI_PF_ADDR2_2_SHIFT 16
771#define ET_UNI_PF_ADDR1_1_SHIFT 8
772
773
774/*
775 * structure for Multicast Hash reg in rxmac address map
776 * located at address 0x4074 - 0x4080
777 * Defined earlier (u32)
778 */
779
780/*
781 * structure for Packet Filter Control reg in rxmac address map
782 * located at address 0x4084
783 *
784 * 31-23: unused
785 * 22-16: min_pkt_size
786 * 15-4: unused
787 * 3: filter_frag_en
788 * 2: filter_uni_en
789 * 1: filter_multi_en
790 * 0: filter_broad_en
791 */
792
793/*
794 * structure for Memory Controller Interface Control Max Segment reg in rxmac
795 * address map. Located at address 0x4088
796 *
797 * 31-10: reserved
798 * 9-2: max_size
799 * 1: fc_en
800 * 0: seg_en
801 */
802
803/*
804 * structure for Memory Controller Interface Water Mark reg in rxmac address
805 * map. Located at address 0x408C
806 *
807 * 31-26: unused
808 * 25-16: mark_hi
809 * 15-10: unused
810 * 9-0: mark_lo
811 */
812
813/*
814 * structure for Rx Queue Dialog reg in rxmac address map.
815 * located at address 0x4090
816 *
817 * 31-26: reserved
818 * 25-16: rd_ptr
819 * 15-10: reserved
820 * 9-0: wr_ptr
821 */
822
823/*
824 * structure for space available reg in rxmac address map.
825 * located at address 0x4094
826 *
827 * 31-17: reserved
828 * 16: space_avail_en
829 * 15-10: reserved
830 * 9-0: space_avail
831 */
832
833/*
834 * structure for management interface reg in rxmac address map.
835 * located at address 0x4098
836 *
837 * 31-18: reserved
838 * 17: drop_pkt_en
839 * 16-0: drop_pkt_mask
840 */
841
842/*
843 * structure for Error reg in rxmac address map.
844 * located at address 0x409C
845 *
846 * 31-4: unused
847 * 3: mif
848 * 2: async
849 * 1: pkt_filter
850 * 0: mcif
851 */
852
853/*
854 * Rx MAC Module of JAGCore Address Mapping
855 */
856struct rxmac_regs { /* Location: */
857 u32 ctrl; /* 0x4000 */
858 u32 crc0; /* 0x4004 */
859 u32 crc12; /* 0x4008 */
860 u32 crc34; /* 0x400C */
861 u32 sa_lo; /* 0x4010 */
862 u32 sa_hi; /* 0x4014 */
863 u32 mask0_word0; /* 0x4018 */
864 u32 mask0_word1; /* 0x401C */
865 u32 mask0_word2; /* 0x4020 */
866 u32 mask0_word3; /* 0x4024 */
867 u32 mask1_word0; /* 0x4028 */
868 u32 mask1_word1; /* 0x402C */
869 u32 mask1_word2; /* 0x4030 */
870 u32 mask1_word3; /* 0x4034 */
871 u32 mask2_word0; /* 0x4038 */
872 u32 mask2_word1; /* 0x403C */
873 u32 mask2_word2; /* 0x4040 */
874 u32 mask2_word3; /* 0x4044 */
875 u32 mask3_word0; /* 0x4048 */
876 u32 mask3_word1; /* 0x404C */
877 u32 mask3_word2; /* 0x4050 */
878 u32 mask3_word3; /* 0x4054 */
879 u32 mask4_word0; /* 0x4058 */
880 u32 mask4_word1; /* 0x405C */
881 u32 mask4_word2; /* 0x4060 */
882 u32 mask4_word3; /* 0x4064 */
883 u32 uni_pf_addr1; /* 0x4068 */
884 u32 uni_pf_addr2; /* 0x406C */
885 u32 uni_pf_addr3; /* 0x4070 */
886 u32 multi_hash1; /* 0x4074 */
887 u32 multi_hash2; /* 0x4078 */
888 u32 multi_hash3; /* 0x407C */
889 u32 multi_hash4; /* 0x4080 */
890 u32 pf_ctrl; /* 0x4084 */
891 u32 mcif_ctrl_max_seg; /* 0x4088 */
892 u32 mcif_water_mark; /* 0x408C */
893 u32 rxq_diag; /* 0x4090 */
894 u32 space_avail; /* 0x4094 */
895
896 u32 mif_ctrl; /* 0x4098 */
897 u32 err_reg; /* 0x409C */
898};
899
900/* END OF RXMAC REGISTER ADDRESS MAP */
901
902
903/* START OF MAC REGISTER ADDRESS MAP */
904
905/*
906 * structure for configuration #1 reg in mac address map.
907 * located at address 0x5000
908 *
909 * 31: soft reset
910 * 30: sim reset
911 * 29-20: reserved
912 * 19: reset rx mc
913 * 18: reset tx mc
914 * 17: reset rx func
915 * 16: reset tx fnc
916 * 15-9: reserved
917 * 8: loopback
918 * 7-6: reserved
919 * 5: rx flow
920 * 4: tx flow
921 * 3: syncd rx en
922 * 2: rx enable
923 * 1: syncd tx en
924 * 0: tx enable
925 */
926
927#define CFG1_LOOPBACK 0x00000100
928#define CFG1_RX_FLOW 0x00000020
929#define CFG1_TX_FLOW 0x00000010
930#define CFG1_RX_ENABLE 0x00000004
931#define CFG1_TX_ENABLE 0x00000001
932#define CFG1_WAIT 0x0000000A /* RX & TX syncd */
933
934/*
935 * structure for configuration #2 reg in mac address map.
936 * located at address 0x5004
937 * 31-16: reserved
938 * 15-12: preamble
939 * 11-10: reserved
940 * 9-8: if mode
941 * 7-6: reserved
942 * 5: huge frame
943 * 4: length check
944 * 3: undefined
945 * 2: pad crc
946 * 1: crc enable
947 * 0: full duplex
948 */
949
950
951/*
952 * structure for Interpacket gap reg in mac address map.
953 * located at address 0x5008
954 *
955 * 31: reserved
956 * 30-24: non B2B ipg 1
957 * 23: undefined
958 * 22-16: non B2B ipg 2
959 * 15-8: Min ifg enforce
960 * 7-0: B2B ipg
961 *
962 * structure for half duplex reg in mac address map.
963 * located at address 0x500C
964 * 31-24: reserved
965 * 23-20: Alt BEB trunc
966 * 19: Alt BEB enable
967 * 18: BP no backoff
968 * 17: no backoff
969 * 16: excess defer
970 * 15-12: re-xmit max
971 * 11-10: reserved
972 * 9-0: collision window
973 */
974
975/*
976 * structure for Maximum Frame Length reg in mac address map.
977 * located at address 0x5010: bits 0-15 hold the length.
978 */
979
980/*
981 * structure for Reserve 1 reg in mac address map.
982 * located at address 0x5014 - 0x5018
983 * Defined earlier (u32)
984 */
985
986/*
987 * structure for Test reg in mac address map.
988 * located at address 0x501C
989 * test: bits 0-2, rest unused
990 */
991
992/*
993 * structure for MII Management Configuration reg in mac address map.
994 * located at address 0x5020
995 *
996 * 31: reset MII mgmt
997 * 30-6: unused
998 * 5: scan auto increment
999 * 4: preamble suppress
1000 * 3: undefined
1001 * 2-0: mgmt clock reset
1002 */
1003
1004/*
1005 * structure for MII Management Command reg in mac address map.
1006 * located at address 0x5024
1007 * bit 1: scan cycle
1008 * bit 0: read cycle
1009 */
1010
1011/*
1012 * structure for MII Management Address reg in mac address map.
1013 * located at address 0x5028
1014 * 31-13: reserved
1015 * 12-8: phy addr
1016 * 7-5: reserved
1017 * 4-0: register
1018 */
1019
1020#define MII_ADDR(phy, reg) ((phy) << 8 | (reg))
1021
1022/*
1023 * structure for MII Management Control reg in mac address map.
1024 * located at address 0x502C
1025 * 31-16: reserved
1026 * 15-0: phy control
1027 */
1028
1029/*
1030 * structure for MII Management Status reg in mac address map.
1031 * located at address 0x5030
1032 * 31-16: reserved
1033 * 15-0: phy control
1034 */
1035
1036/*
1037 * structure for MII Management Indicators reg in mac address map.
1038 * located at address 0x5034
1039 * 31-3: reserved
1040 * 2: not valid
1041 * 1: scanning
1042 * 0: busy
1043 */
1044
1045#define MGMT_BUSY 0x00000001 /* busy */
1046#define MGMT_WAIT 0x00000005 /* busy | not valid */
1047
1048/*
1049 * structure for Interface Control reg in mac address map.
1050 * located at address 0x5038
1051 *
1052 * 31: reset if module
1053 * 30-28: reserved
1054 * 27: tbi mode
1055 * 26: ghd mode
1056 * 25: lhd mode
1057 * 24: phy mode
1058 * 23: reset per mii
1059 * 22-17: reserved
1060 * 16: speed
1061 * 15: reset pe100x
1062 * 14-11: reserved
1063 * 10: force quiet
1064 * 9: no cipher
1065 * 8: disable link fail
1066 * 7: reset gpsi
1067 * 6-1: reserved
1068 * 0: enable jabber protection
1069 */
1070
1071/*
1072 * structure for Interface Status reg in mac address map.
1073 * located at address 0x503C
1074 *
1075 * 31-10: reserved
1076 * 9: excess_defer
1077 * 8: clash
1078 * 7: phy_jabber
1079 * 6: phy_link_ok
1080 * 5: phy_full_duplex
1081 * 4: phy_speed
1082 * 3: pe100x_link_fail
1083 * 2: pe10t_loss_carrier
1084 * 1: pe10t_sqe_error
1085 * 0: pe10t_jabber
1086 */
1087
1088/*
1089 * structure for Mac Station Address, Part 1 reg in mac address map.
1090 * located at address 0x5040
1091 *
1092 * 31-24: Octet6
1093 * 23-16: Octet5
1094 * 15-8: Octet4
1095 * 7-0: Octet3
1096 */
1097
1098#define ET_MAC_STATION_ADDR1_OC6_SHIFT 24
1099#define ET_MAC_STATION_ADDR1_OC5_SHIFT 16
1100#define ET_MAC_STATION_ADDR1_OC4_SHIFT 8
1101
1102/*
1103 * structure for Mac Station Address, Part 2 reg in mac address map.
1104 * located at address 0x5044
1105 *
1106 * 31-24: Octet2
1107 * 23-16: Octet1
1108 * 15-0: reserved
1109 */
1110
1111#define ET_MAC_STATION_ADDR2_OC2_SHIFT 24
1112#define ET_MAC_STATION_ADDR2_OC1_SHIFT 16
1113
1114/*
1115 * MAC Module of JAGCore Address Mapping
1116 */
1117struct mac_regs { /* Location: */
1118 u32 cfg1; /* 0x5000 */
1119 u32 cfg2; /* 0x5004 */
1120 u32 ipg; /* 0x5008 */
1121 u32 hfdp; /* 0x500C */
1122 u32 max_fm_len; /* 0x5010 */
1123 u32 rsv1; /* 0x5014 */
1124 u32 rsv2; /* 0x5018 */
1125 u32 mac_test; /* 0x501C */
1126 u32 mii_mgmt_cfg; /* 0x5020 */
1127 u32 mii_mgmt_cmd; /* 0x5024 */
1128 u32 mii_mgmt_addr; /* 0x5028 */
1129 u32 mii_mgmt_ctrl; /* 0x502C */
1130 u32 mii_mgmt_stat; /* 0x5030 */
1131 u32 mii_mgmt_indicator; /* 0x5034 */
1132 u32 if_ctrl; /* 0x5038 */
1133 u32 if_stat; /* 0x503C */
1134 u32 station_addr_1; /* 0x5040 */
1135 u32 station_addr_2; /* 0x5044 */
1136};
1137
1138/* END OF MAC REGISTER ADDRESS MAP */
1139
1140/* START OF MAC STAT REGISTER ADDRESS MAP */
1141
1142/*
1143 * structure for Carry Register One and it's Mask Register reg located in mac
1144 * stat address map address 0x6130 and 0x6138.
1145 *
1146 * 31: tr64
1147 * 30: tr127
1148 * 29: tr255
1149 * 28: tr511
1150 * 27: tr1k
1151 * 26: trmax
1152 * 25: trmgv
1153 * 24-17: unused
1154 * 16: rbyt
1155 * 15: rpkt
1156 * 14: rfcs
1157 * 13: rmca
1158 * 12: rbca
1159 * 11: rxcf
1160 * 10: rxpf
1161 * 9: rxuo
1162 * 8: raln
1163 * 7: rflr
1164 * 6: rcde
1165 * 5: rcse
1166 * 4: rund
1167 * 3: rovr
1168 * 2: rfrg
1169 * 1: rjbr
1170 * 0: rdrp
1171 */
1172
1173/*
1174 * structure for Carry Register Two Mask Register reg in mac stat address map.
1175 * located at address 0x613C
1176 *
1177 * 31-20: unused
1178 * 19: tjbr
1179 * 18: tfcs
1180 * 17: txcf
1181 * 16: tovr
1182 * 15: tund
1183 * 14: trfg
1184 * 13: tbyt
1185 * 12: tpkt
1186 * 11: tmca
1187 * 10: tbca
1188 * 9: txpf
1189 * 8: tdfr
1190 * 7: tedf
1191 * 6: tscl
1192 * 5: tmcl
1193 * 4: tlcl
1194 * 3: txcl
1195 * 2: tncl
1196 * 1: tpfh
1197 * 0: tdrp
1198 */
1199
1200/*
1201 * MAC STATS Module of JAGCore Address Mapping
1202 */
1203struct macstat_regs { /* Location: */
1204 u32 pad[32]; /* 0x6000 - 607C */
1205
1206 /* Tx/Rx 0-64 Byte Frame Counter */
1207 u32 txrx_0_64_byte_frames; /* 0x6080 */
1208
1209 /* Tx/Rx 65-127 Byte Frame Counter */
1210 u32 txrx_65_127_byte_frames; /* 0x6084 */
1211
1212 /* Tx/Rx 128-255 Byte Frame Counter */
1213 u32 txrx_128_255_byte_frames; /* 0x6088 */
1214
1215 /* Tx/Rx 256-511 Byte Frame Counter */
1216 u32 txrx_256_511_byte_frames; /* 0x608C */
1217
1218 /* Tx/Rx 512-1023 Byte Frame Counter */
1219 u32 txrx_512_1023_byte_frames; /* 0x6090 */
1220
1221 /* Tx/Rx 1024-1518 Byte Frame Counter */
1222 u32 txrx_1024_1518_byte_frames; /* 0x6094 */
1223
1224 /* Tx/Rx 1519-1522 Byte Good VLAN Frame Count */
1225 u32 txrx_1519_1522_gvln_frames; /* 0x6098 */
1226
1227 /* Rx Byte Counter */
1228 u32 rx_bytes; /* 0x609C */
1229
1230 /* Rx Packet Counter */
1231 u32 rx_packets; /* 0x60A0 */
1232
1233 /* Rx FCS Error Counter */
1234 u32 rx_fcs_errs; /* 0x60A4 */
1235
1236 /* Rx Multicast Packet Counter */
1237 u32 rx_multicast_packets; /* 0x60A8 */
1238
1239 /* Rx Broadcast Packet Counter */
1240 u32 rx_broadcast_packets; /* 0x60AC */
1241
1242 /* Rx Control Frame Packet Counter */
1243 u32 rx_control_frames; /* 0x60B0 */
1244
1245 /* Rx Pause Frame Packet Counter */
1246 u32 rx_pause_frames; /* 0x60B4 */
1247
1248 /* Rx Unknown OP Code Counter */
1249 u32 rx_unknown_opcodes; /* 0x60B8 */
1250
1251 /* Rx Alignment Error Counter */
1252 u32 rx_align_errs; /* 0x60BC */
1253
1254 /* Rx Frame Length Error Counter */
1255 u32 rx_frame_len_errs; /* 0x60C0 */
1256
1257 /* Rx Code Error Counter */
1258 u32 rx_code_errs; /* 0x60C4 */
1259
1260 /* Rx Carrier Sense Error Counter */
1261 u32 rx_carrier_sense_errs; /* 0x60C8 */
1262
1263 /* Rx Undersize Packet Counter */
1264 u32 rx_undersize_packets; /* 0x60CC */
1265
1266 /* Rx Oversize Packet Counter */
1267 u32 rx_oversize_packets; /* 0x60D0 */
1268
1269 /* Rx Fragment Counter */
1270 u32 rx_fragment_packets; /* 0x60D4 */
1271
1272 /* Rx Jabber Counter */
1273 u32 rx_jabbers; /* 0x60D8 */
1274
1275 /* Rx Drop */
1276 u32 rx_drops; /* 0x60DC */
1277
1278 /* Tx Byte Counter */
1279 u32 tx_bytes; /* 0x60E0 */
1280
1281 /* Tx Packet Counter */
1282 u32 tx_packets; /* 0x60E4 */
1283
1284 /* Tx Multicast Packet Counter */
1285 u32 tx_multicast_packets; /* 0x60E8 */
1286
1287 /* Tx Broadcast Packet Counter */
1288 u32 tx_broadcast_packets; /* 0x60EC */
1289
1290 /* Tx Pause Control Frame Counter */
1291 u32 tx_pause_frames; /* 0x60F0 */
1292
1293 /* Tx Deferral Packet Counter */
1294 u32 tx_deferred; /* 0x60F4 */
1295
1296 /* Tx Excessive Deferral Packet Counter */
1297 u32 tx_excessive_deferred; /* 0x60F8 */
1298
1299 /* Tx Single Collision Packet Counter */
1300 u32 tx_single_collisions; /* 0x60FC */
1301
1302 /* Tx Multiple Collision Packet Counter */
1303 u32 tx_multiple_collisions; /* 0x6100 */
1304
1305 /* Tx Late Collision Packet Counter */
1306 u32 tx_late_collisions; /* 0x6104 */
1307
1308 /* Tx Excessive Collision Packet Counter */
1309 u32 tx_excessive_collisions; /* 0x6108 */
1310
1311 /* Tx Total Collision Packet Counter */
1312 u32 tx_total_collisions; /* 0x610C */
1313
1314 /* Tx Pause Frame Honored Counter */
1315 u32 tx_pause_honored_frames; /* 0x6110 */
1316
1317 /* Tx Drop Frame Counter */
1318 u32 tx_drops; /* 0x6114 */
1319
1320 /* Tx Jabber Frame Counter */
1321 u32 tx_jabbers; /* 0x6118 */
1322
1323 /* Tx FCS Error Counter */
1324 u32 tx_fcs_errs; /* 0x611C */
1325
1326 /* Tx Control Frame Counter */
1327 u32 tx_control_frames; /* 0x6120 */
1328
1329 /* Tx Oversize Frame Counter */
1330 u32 tx_oversize_frames; /* 0x6124 */
1331
1332 /* Tx Undersize Frame Counter */
1333 u32 tx_undersize_frames; /* 0x6128 */
1334
1335 /* Tx Fragments Frame Counter */
1336 u32 tx_fragments; /* 0x612C */
1337
1338 /* Carry Register One Register */
1339 u32 carry_reg1; /* 0x6130 */
1340
1341 /* Carry Register Two Register */
1342 u32 carry_reg2; /* 0x6134 */
1343
1344 /* Carry Register One Mask Register */
1345 u32 carry_reg1_mask; /* 0x6138 */
1346
1347 /* Carry Register Two Mask Register */
1348 u32 carry_reg2_mask; /* 0x613C */
1349};
1350
1351/* END OF MAC STAT REGISTER ADDRESS MAP */
1352
1353
1354/* START OF MMC REGISTER ADDRESS MAP */
1355
1356/*
1357 * Main Memory Controller Control reg in mmc address map.
1358 * located at address 0x7000
1359 */
1360
1361#define ET_MMC_ENABLE 1
1362#define ET_MMC_ARB_DISABLE 2
1363#define ET_MMC_RXMAC_DISABLE 4
1364#define ET_MMC_TXMAC_DISABLE 8
1365#define ET_MMC_TXDMA_DISABLE 16
1366#define ET_MMC_RXDMA_DISABLE 32
1367#define ET_MMC_FORCE_CE 64
1368
1369/*
1370 * Main Memory Controller Host Memory Access Address reg in mmc
1371 * address map. Located at address 0x7004. Top 16 bits hold the address bits
1372 */
1373
1374#define ET_SRAM_REQ_ACCESS 1
1375#define ET_SRAM_WR_ACCESS 2
1376#define ET_SRAM_IS_CTRL 4
1377
1378/*
1379 * structure for Main Memory Controller Host Memory Access Data reg in mmc
1380 * address map. Located at address 0x7008 - 0x7014
1381 * Defined earlier (u32)
1382 */
1383
1384/*
1385 * Memory Control Module of JAGCore Address Mapping
1386 */
1387struct mmc_regs { /* Location: */
1388 u32 mmc_ctrl; /* 0x7000 */
1389 u32 sram_access; /* 0x7004 */
1390 u32 sram_word1; /* 0x7008 */
1391 u32 sram_word2; /* 0x700C */
1392 u32 sram_word3; /* 0x7010 */
1393 u32 sram_word4; /* 0x7014 */
1394};
1395
1396/* END OF MMC REGISTER ADDRESS MAP */
1397
1398
1399/*
1400 * JAGCore Address Mapping
1401 */
1402struct address_map {
1403 struct global_regs global;
1404 /* unused section of global address map */
1405 u8 unused_global[4096 - sizeof(struct global_regs)];
1406 struct txdma_regs txdma;
1407 /* unused section of txdma address map */
1408 u8 unused_txdma[4096 - sizeof(struct txdma_regs)];
1409 struct rxdma_regs rxdma;
1410 /* unused section of rxdma address map */
1411 u8 unused_rxdma[4096 - sizeof(struct rxdma_regs)];
1412 struct txmac_regs txmac;
1413 /* unused section of txmac address map */
1414 u8 unused_txmac[4096 - sizeof(struct txmac_regs)];
1415 struct rxmac_regs rxmac;
1416 /* unused section of rxmac address map */
1417 u8 unused_rxmac[4096 - sizeof(struct rxmac_regs)];
1418 struct mac_regs mac;
1419 /* unused section of mac address map */
1420 u8 unused_mac[4096 - sizeof(struct mac_regs)];
1421 struct macstat_regs macstat;
1422 /* unused section of mac stat address map */
1423 u8 unused_mac_stat[4096 - sizeof(struct macstat_regs)];
1424 struct mmc_regs mmc;
1425 /* unused section of mmc address map */
1426 u8 unused_mmc[4096 - sizeof(struct mmc_regs)];
1427 /* unused section of address map */
1428 u8 unused_[1015808];
1429
1430 u8 unused_exp_rom[4096]; /* MGS-size TBD */
1431 u8 unused__[524288]; /* unused section of address map */
1432};
1433
1434#endif /* _ET1310_ADDRESS_MAP_H_ */
diff --git a/drivers/staging/et131x/et1310_eeprom.c b/drivers/staging/et131x/et1310_eeprom.c
new file mode 100644
index 00000000000..237584001a8
--- /dev/null
+++ b/drivers/staging/et131x/et1310_eeprom.c
@@ -0,0 +1,407 @@
1/*
2 * Agere Systems Inc.
3 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
4 *
5 * Copyright © 2005 Agere Systems Inc.
6 * All rights reserved.
7 * http://www.agere.com
8 *
9 *------------------------------------------------------------------------------
10 *
11 * et1310_eeprom.c - Code used to access the device's EEPROM
12 *
13 *------------------------------------------------------------------------------
14 *
15 * SOFTWARE LICENSE
16 *
17 * This software is provided subject to the following terms and conditions,
18 * which you should read carefully before using the software. Using this
19 * software indicates your acceptance of these terms and conditions. If you do
20 * not agree with these terms and conditions, do not use the software.
21 *
22 * Copyright © 2005 Agere Systems Inc.
23 * All rights reserved.
24 *
25 * Redistribution and use in source or binary forms, with or without
26 * modifications, are permitted provided that the following conditions are met:
27 *
28 * . Redistributions of source code must retain the above copyright notice, this
29 * list of conditions and the following Disclaimer as comments in the code as
30 * well as in the documentation and/or other materials provided with the
31 * distribution.
32 *
33 * . Redistributions in binary form must reproduce the above copyright notice,
34 * this list of conditions and the following Disclaimer in the documentation
35 * and/or other materials provided with the distribution.
36 *
37 * . Neither the name of Agere Systems Inc. nor the names of the contributors
38 * may be used to endorse or promote products derived from this software
39 * without specific prior written permission.
40 *
41 * Disclaimer
42 *
43 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
44 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
45 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
46 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
47 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
48 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
49 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
50 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
51 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
52 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
53 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
54 * DAMAGE.
55 *
56 */
57
58#include "et131x_version.h"
59#include "et131x_defs.h"
60
61#include <linux/pci.h>
62#include <linux/init.h>
63#include <linux/module.h>
64#include <linux/types.h>
65#include <linux/kernel.h>
66
67#include <linux/sched.h>
68#include <linux/ptrace.h>
69#include <linux/ctype.h>
70#include <linux/string.h>
71#include <linux/timer.h>
72#include <linux/interrupt.h>
73#include <linux/in.h>
74#include <linux/delay.h>
75#include <linux/bitops.h>
76#include <linux/io.h>
77#include <asm/system.h>
78
79#include <linux/netdevice.h>
80#include <linux/etherdevice.h>
81#include <linux/skbuff.h>
82#include <linux/if_arp.h>
83#include <linux/ioport.h>
84
85#include "et1310_phy.h"
86#include "et131x_adapter.h"
87#include "et131x.h"
88
89/*
90 * EEPROM Defines
91 */
92
93/* LBCIF Register Groups (addressed via 32-bit offsets) */
94#define LBCIF_DWORD0_GROUP 0xAC
95#define LBCIF_DWORD1_GROUP 0xB0
96
97/* LBCIF Registers (addressed via 8-bit offsets) */
98#define LBCIF_ADDRESS_REGISTER 0xAC
99#define LBCIF_DATA_REGISTER 0xB0
100#define LBCIF_CONTROL_REGISTER 0xB1
101#define LBCIF_STATUS_REGISTER 0xB2
102
103/* LBCIF Control Register Bits */
104#define LBCIF_CONTROL_SEQUENTIAL_READ 0x01
105#define LBCIF_CONTROL_PAGE_WRITE 0x02
106#define LBCIF_CONTROL_EEPROM_RELOAD 0x08
107#define LBCIF_CONTROL_TWO_BYTE_ADDR 0x20
108#define LBCIF_CONTROL_I2C_WRITE 0x40
109#define LBCIF_CONTROL_LBCIF_ENABLE 0x80
110
111/* LBCIF Status Register Bits */
112#define LBCIF_STATUS_PHY_QUEUE_AVAIL 0x01
113#define LBCIF_STATUS_I2C_IDLE 0x02
114#define LBCIF_STATUS_ACK_ERROR 0x04
115#define LBCIF_STATUS_GENERAL_ERROR 0x08
116#define LBCIF_STATUS_CHECKSUM_ERROR 0x40
117#define LBCIF_STATUS_EEPROM_PRESENT 0x80
118
119/* Miscellaneous Constraints */
120#define MAX_NUM_REGISTER_POLLS 1000
121#define MAX_NUM_WRITE_RETRIES 2
122
123static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status)
124{
125 u32 reg;
126 int i;
127
128 /*
129 * 1. Check LBCIF Status Register for bits 6 & 3:2 all equal to 0 and
130 * bits 7,1:0 both equal to 1, at least once after reset.
131 * Subsequent operations need only to check that bits 1:0 are equal
132 * to 1 prior to starting a single byte read/write
133 */
134
135 for (i = 0; i < MAX_NUM_REGISTER_POLLS; i++) {
136 /* Read registers grouped in DWORD1 */
137 if (pci_read_config_dword(pdev, LBCIF_DWORD1_GROUP, &reg))
138 return -EIO;
139
140 /* I2C idle and Phy Queue Avail both true */
141 if ((reg & 0x3000) == 0x3000) {
142 if (status)
143 *status = reg;
144 return reg & 0xFF;
145 }
146 }
147 return -ETIMEDOUT;
148}
149
150
151/**
152 * eeprom_write - Write a byte to the ET1310's EEPROM
153 * @etdev: pointer to our private adapter structure
154 * @addr: the address to write
155 * @data: the value to write
156 *
157 * Returns 1 for a successful write.
158 */
159static int eeprom_write(struct et131x_adapter *etdev, u32 addr, u8 data)
160{
161 struct pci_dev *pdev = etdev->pdev;
162 int index = 0;
163 int retries;
164 int err = 0;
165 int i2c_wack = 0;
166 int writeok = 0;
167 u32 status;
168 u32 val = 0;
169
170 /*
171 * For an EEPROM, an I2C single byte write is defined as a START
172 * condition followed by the device address, EEPROM address, one byte
173 * of data and a STOP condition. The STOP condition will trigger the
174 * EEPROM's internally timed write cycle to the nonvolatile memory.
175 * All inputs are disabled during this write cycle and the EEPROM will
176 * not respond to any access until the internal write is complete.
177 */
178
179 err = eeprom_wait_ready(pdev, NULL);
180 if (err)
181 return err;
182
183 /*
184 * 2. Write to the LBCIF Control Register: bit 7=1, bit 6=1, bit 3=0,
185 * and bits 1:0 both =0. Bit 5 should be set according to the
186 * type of EEPROM being accessed (1=two byte addressing, 0=one
187 * byte addressing).
188 */
189 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
190 LBCIF_CONTROL_LBCIF_ENABLE | LBCIF_CONTROL_I2C_WRITE))
191 return -EIO;
192
193 i2c_wack = 1;
194
195 /* Prepare EEPROM address for Step 3 */
196
197 for (retries = 0; retries < MAX_NUM_WRITE_RETRIES; retries++) {
198 /* Write the address to the LBCIF Address Register */
199 if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
200 break;
201 /*
202 * Write the data to the LBCIF Data Register (the I2C write
203 * will begin).
204 */
205 if (pci_write_config_byte(pdev, LBCIF_DATA_REGISTER, data))
206 break;
207 /*
208 * Monitor bit 1:0 of the LBCIF Status Register. When bits
209 * 1:0 are both equal to 1, the I2C write has completed and the
210 * internal write cycle of the EEPROM is about to start.
211 * (bits 1:0 = 01 is a legal state while waiting from both
212 * equal to 1, but bits 1:0 = 10 is invalid and implies that
213 * something is broken).
214 */
215 err = eeprom_wait_ready(pdev, &status);
216 if (err < 0)
217 return 0;
218
219 /*
220 * Check bit 3 of the LBCIF Status Register. If equal to 1,
221 * an error has occurred.Don't break here if we are revision
222 * 1, this is so we do a blind write for load bug.
223 */
224 if ((status & LBCIF_STATUS_GENERAL_ERROR)
225 && etdev->pdev->revision == 0)
226 break;
227
228 /*
229 * Check bit 2 of the LBCIF Status Register. If equal to 1 an
230 * ACK error has occurred on the address phase of the write.
231 * This could be due to an actual hardware failure or the
232 * EEPROM may still be in its internal write cycle from a
233 * previous write. This write operation was ignored and must be
234 *repeated later.
235 */
236 if (status & LBCIF_STATUS_ACK_ERROR) {
237 /*
238 * This could be due to an actual hardware failure
239 * or the EEPROM may still be in its internal write
240 * cycle from a previous write. This write operation
241 * was ignored and must be repeated later.
242 */
243 udelay(10);
244 continue;
245 }
246
247 writeok = 1;
248 break;
249 }
250
251 /*
252 * Set bit 6 of the LBCIF Control Register = 0.
253 */
254 udelay(10);
255
256 while (i2c_wack) {
257 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
258 LBCIF_CONTROL_LBCIF_ENABLE))
259 writeok = 0;
260
261 /* Do read until internal ACK_ERROR goes away meaning write
262 * completed
263 */
264 do {
265 pci_write_config_dword(pdev,
266 LBCIF_ADDRESS_REGISTER,
267 addr);
268 do {
269 pci_read_config_dword(pdev,
270 LBCIF_DATA_REGISTER, &val);
271 } while ((val & 0x00010000) == 0);
272 } while (val & 0x00040000);
273
274 if ((val & 0xFF00) != 0xC000 || index == 10000)
275 break;
276 index++;
277 }
278 return writeok ? 0 : -EIO;
279}
280
281/**
282 * eeprom_read - Read a byte from the ET1310's EEPROM
283 * @etdev: pointer to our private adapter structure
284 * @addr: the address from which to read
285 * @pdata: a pointer to a byte in which to store the value of the read
286 * @eeprom_id: the ID of the EEPROM
287 * @addrmode: how the EEPROM is to be accessed
288 *
289 * Returns 1 for a successful read
290 */
291static int eeprom_read(struct et131x_adapter *etdev, u32 addr, u8 *pdata)
292{
293 struct pci_dev *pdev = etdev->pdev;
294 int err;
295 u32 status;
296
297 /*
298 * A single byte read is similar to the single byte write, with the
299 * exception of the data flow:
300 */
301
302 err = eeprom_wait_ready(pdev, NULL);
303 if (err)
304 return err;
305 /*
306 * Write to the LBCIF Control Register: bit 7=1, bit 6=0, bit 3=0,
307 * and bits 1:0 both =0. Bit 5 should be set according to the type
308 * of EEPROM being accessed (1=two byte addressing, 0=one byte
309 * addressing).
310 */
311 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
312 LBCIF_CONTROL_LBCIF_ENABLE))
313 return -EIO;
314 /*
315 * Write the address to the LBCIF Address Register (I2C read will
316 * begin).
317 */
318 if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
319 return -EIO;
320 /*
321 * Monitor bit 0 of the LBCIF Status Register. When = 1, I2C read
322 * is complete. (if bit 1 =1 and bit 0 stays = 0, a hardware failure
323 * has occurred).
324 */
325 err = eeprom_wait_ready(pdev, &status);
326 if (err < 0)
327 return err;
328 /*
329 * Regardless of error status, read data byte from LBCIF Data
330 * Register.
331 */
332 *pdata = err;
333 /*
334 * Check bit 2 of the LBCIF Status Register. If = 1,
335 * then an error has occurred.
336 */
337 return (status & LBCIF_STATUS_ACK_ERROR) ? -EIO : 0;
338}
339
340int et131x_init_eeprom(struct et131x_adapter *etdev)
341{
342 struct pci_dev *pdev = etdev->pdev;
343 u8 eestatus;
344
345 /* We first need to check the EEPROM Status code located at offset
346 * 0xB2 of config space
347 */
348 pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS,
349 &eestatus);
350
351 /* THIS IS A WORKAROUND:
352 * I need to call this function twice to get my card in a
353 * LG M1 Express Dual running. I tried also a msleep before this
354 * function, because I thougth there could be some time condidions
355 * but it didn't work. Call the whole function twice also work.
356 */
357 if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) {
358 dev_err(&pdev->dev,
359 "Could not read PCI config space for EEPROM Status\n");
360 return -EIO;
361 }
362
363 /* Determine if the error(s) we care about are present. If they are
364 * present we need to fail.
365 */
366 if (eestatus & 0x4C) {
367 int write_failed = 0;
368 if (pdev->revision == 0x01) {
369 int i;
370 static const u8 eedata[4] = { 0xFE, 0x13, 0x10, 0xFF };
371
372 /* Re-write the first 4 bytes if we have an eeprom
373 * present and the revision id is 1, this fixes the
374 * corruption seen with 1310 B Silicon
375 */
376 for (i = 0; i < 3; i++)
377 if (eeprom_write(etdev, i, eedata[i]) < 0)
378 write_failed = 1;
379 }
380 if (pdev->revision != 0x01 || write_failed) {
381 dev_err(&pdev->dev,
382 "Fatal EEPROM Status Error - 0x%04x\n", eestatus);
383
384 /* This error could mean that there was an error
385 * reading the eeprom or that the eeprom doesn't exist.
386 * We will treat each case the same and not try to
387 * gather additional information that normally would
388 * come from the eeprom, like MAC Address
389 */
390 etdev->has_eeprom = 0;
391 return -EIO;
392 }
393 }
394 etdev->has_eeprom = 1;
395
396 /* Read the EEPROM for information regarding LED behavior. Refer to
397 * ET1310_phy.c, et131x_xcvr_init(), for its use.
398 */
399 eeprom_read(etdev, 0x70, &etdev->eeprom_data[0]);
400 eeprom_read(etdev, 0x71, &etdev->eeprom_data[1]);
401
402 if (etdev->eeprom_data[0] != 0xcd)
403 /* Disable all optional features */
404 etdev->eeprom_data[1] = 0x00;
405
406 return 0;
407}
diff --git a/drivers/staging/et131x/et1310_mac.c b/drivers/staging/et131x/et1310_mac.c
new file mode 100644
index 00000000000..656be4b99cf
--- /dev/null
+++ b/drivers/staging/et131x/et1310_mac.c
@@ -0,0 +1,654 @@
1/*
2 * Agere Systems Inc.
3 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
4 *
5 * Copyright © 2005 Agere Systems Inc.
6 * All rights reserved.
7 * http://www.agere.com
8 *
9 *------------------------------------------------------------------------------
10 *
11 * et1310_mac.c - All code and routines pertaining to the MAC
12 *
13 *------------------------------------------------------------------------------
14 *
15 * SOFTWARE LICENSE
16 *
17 * This software is provided subject to the following terms and conditions,
18 * which you should read carefully before using the software. Using this
19 * software indicates your acceptance of these terms and conditions. If you do
20 * not agree with these terms and conditions, do not use the software.
21 *
22 * Copyright © 2005 Agere Systems Inc.
23 * All rights reserved.
24 *
25 * Redistribution and use in source or binary forms, with or without
26 * modifications, are permitted provided that the following conditions are met:
27 *
28 * . Redistributions of source code must retain the above copyright notice, this
29 * list of conditions and the following Disclaimer as comments in the code as
30 * well as in the documentation and/or other materials provided with the
31 * distribution.
32 *
33 * . Redistributions in binary form must reproduce the above copyright notice,
34 * this list of conditions and the following Disclaimer in the documentation
35 * and/or other materials provided with the distribution.
36 *
37 * . Neither the name of Agere Systems Inc. nor the names of the contributors
38 * may be used to endorse or promote products derived from this software
39 * without specific prior written permission.
40 *
41 * Disclaimer
42 *
43 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
44 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
45 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
46 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
47 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
48 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
49 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
50 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
51 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
52 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
53 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
54 * DAMAGE.
55 *
56 */
57
58#include "et131x_version.h"
59#include "et131x_defs.h"
60
61#include <linux/init.h>
62#include <linux/module.h>
63#include <linux/types.h>
64#include <linux/kernel.h>
65
66#include <linux/sched.h>
67#include <linux/ptrace.h>
68#include <linux/ctype.h>
69#include <linux/string.h>
70#include <linux/timer.h>
71#include <linux/interrupt.h>
72#include <linux/in.h>
73#include <linux/delay.h>
74#include <linux/io.h>
75#include <linux/bitops.h>
76#include <linux/pci.h>
77#include <asm/system.h>
78
79#include <linux/netdevice.h>
80#include <linux/etherdevice.h>
81#include <linux/skbuff.h>
82#include <linux/if_arp.h>
83#include <linux/ioport.h>
84#include <linux/crc32.h>
85
86#include "et1310_phy.h"
87#include "et131x_adapter.h"
88#include "et131x.h"
89
90
91#define COUNTER_WRAP_28_BIT 0x10000000
92#define COUNTER_WRAP_22_BIT 0x400000
93#define COUNTER_WRAP_16_BIT 0x10000
94#define COUNTER_WRAP_12_BIT 0x1000
95
96#define COUNTER_MASK_28_BIT (COUNTER_WRAP_28_BIT - 1)
97#define COUNTER_MASK_22_BIT (COUNTER_WRAP_22_BIT - 1)
98#define COUNTER_MASK_16_BIT (COUNTER_WRAP_16_BIT - 1)
99#define COUNTER_MASK_12_BIT (COUNTER_WRAP_12_BIT - 1)
100
101/**
102 * ConfigMacRegs1 - Initialize the first part of MAC regs
103 * @pAdpater: pointer to our adapter structure
104 */
105void ConfigMACRegs1(struct et131x_adapter *etdev)
106{
107 struct mac_regs __iomem *pMac = &etdev->regs->mac;
108 u32 station1;
109 u32 station2;
110 u32 ipg;
111
112 /* First we need to reset everything. Write to MAC configuration
113 * register 1 to perform reset.
114 */
115 writel(0xC00F0000, &pMac->cfg1);
116
117 /* Next lets configure the MAC Inter-packet gap register */
118 ipg = 0x38005860; /* IPG1 0x38 IPG2 0x58 B2B 0x60 */
119 ipg |= 0x50 << 8; /* ifg enforce 0x50 */
120 writel(ipg, &pMac->ipg);
121
122 /* Next lets configure the MAC Half Duplex register */
123 /* BEB trunc 0xA, Ex Defer, Rexmit 0xF Coll 0x37 */
124 writel(0x00A1F037, &pMac->hfdp);
125
126 /* Next lets configure the MAC Interface Control register */
127 writel(0, &pMac->if_ctrl);
128
129 /* Let's move on to setting up the mii management configuration */
130 writel(0x07, &pMac->mii_mgmt_cfg); /* Clock reset 0x7 */
131
132 /* Next lets configure the MAC Station Address register. These
133 * values are read from the EEPROM during initialization and stored
134 * in the adapter structure. We write what is stored in the adapter
135 * structure to the MAC Station Address registers high and low. This
136 * station address is used for generating and checking pause control
137 * packets.
138 */
139 station2 = (etdev->addr[1] << ET_MAC_STATION_ADDR2_OC2_SHIFT) |
140 (etdev->addr[0] << ET_MAC_STATION_ADDR2_OC1_SHIFT);
141 station1 = (etdev->addr[5] << ET_MAC_STATION_ADDR1_OC6_SHIFT) |
142 (etdev->addr[4] << ET_MAC_STATION_ADDR1_OC5_SHIFT) |
143 (etdev->addr[3] << ET_MAC_STATION_ADDR1_OC4_SHIFT) |
144 etdev->addr[2];
145 writel(station1, &pMac->station_addr_1);
146 writel(station2, &pMac->station_addr_2);
147
148 /* Max ethernet packet in bytes that will passed by the mac without
149 * being truncated. Allow the MAC to pass 4 more than our max packet
150 * size. This is 4 for the Ethernet CRC.
151 *
152 * Packets larger than (RegistryJumboPacket) that do not contain a
153 * VLAN ID will be dropped by the Rx function.
154 */
155 writel(etdev->RegistryJumboPacket + 4, &pMac->max_fm_len);
156
157 /* clear out MAC config reset */
158 writel(0, &pMac->cfg1);
159}
160
161/**
162 * ConfigMacRegs2 - Initialize the second part of MAC regs
163 * @pAdpater: pointer to our adapter structure
164 */
165void ConfigMACRegs2(struct et131x_adapter *etdev)
166{
167 int32_t delay = 0;
168 struct mac_regs __iomem *pMac = &etdev->regs->mac;
169 u32 cfg1;
170 u32 cfg2;
171 u32 ifctrl;
172 u32 ctl;
173
174 ctl = readl(&etdev->regs->txmac.ctl);
175 cfg1 = readl(&pMac->cfg1);
176 cfg2 = readl(&pMac->cfg2);
177 ifctrl = readl(&pMac->if_ctrl);
178
179 /* Set up the if mode bits */
180 cfg2 &= ~0x300;
181 if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) {
182 cfg2 |= 0x200;
183 /* Phy mode bit */
184 ifctrl &= ~(1 << 24);
185 } else {
186 cfg2 |= 0x100;
187 ifctrl |= (1 << 24);
188 }
189
190 /* We need to enable Rx/Tx */
191 cfg1 |= CFG1_RX_ENABLE|CFG1_TX_ENABLE|CFG1_TX_FLOW;
192 /* Initialize loop back to off */
193 cfg1 &= ~(CFG1_LOOPBACK|CFG1_RX_FLOW);
194 if (etdev->flowcontrol == FLOW_RXONLY || etdev->flowcontrol == FLOW_BOTH)
195 cfg1 |= CFG1_RX_FLOW;
196 writel(cfg1, &pMac->cfg1);
197
198 /* Now we need to initialize the MAC Configuration 2 register */
199 /* preamble 7, check length, huge frame off, pad crc, crc enable
200 full duplex off */
201 cfg2 |= 0x7016;
202 cfg2 &= ~0x0021;
203
204 /* Turn on duplex if needed */
205 if (etdev->duplex_mode)
206 cfg2 |= 0x01;
207
208 ifctrl &= ~(1 << 26);
209 if (!etdev->duplex_mode)
210 ifctrl |= (1<<26); /* Enable ghd */
211
212 writel(ifctrl, &pMac->if_ctrl);
213 writel(cfg2, &pMac->cfg2);
214
215 do {
216 udelay(10);
217 delay++;
218 cfg1 = readl(&pMac->cfg1);
219 } while ((cfg1 & CFG1_WAIT) != CFG1_WAIT && delay < 100);
220
221 if (delay == 100) {
222 dev_warn(&etdev->pdev->dev,
223 "Syncd bits did not respond correctly cfg1 word 0x%08x\n",
224 cfg1);
225 }
226
227 /* Enable TXMAC */
228 ctl |= 0x09; /* TX mac enable, FC disable */
229 writel(ctl, &etdev->regs->txmac.ctl);
230
231 /* Ready to start the RXDMA/TXDMA engine */
232 if (etdev->flags & fMP_ADAPTER_LOWER_POWER) {
233 et131x_rx_dma_enable(etdev);
234 et131x_tx_dma_enable(etdev);
235 }
236}
237
238void ConfigRxMacRegs(struct et131x_adapter *etdev)
239{
240 struct rxmac_regs __iomem *pRxMac = &etdev->regs->rxmac;
241 u32 sa_lo;
242 u32 sa_hi = 0;
243 u32 pf_ctrl = 0;
244
245 /* Disable the MAC while it is being configured (also disable WOL) */
246 writel(0x8, &pRxMac->ctrl);
247
248 /* Initialize WOL to disabled. */
249 writel(0, &pRxMac->crc0);
250 writel(0, &pRxMac->crc12);
251 writel(0, &pRxMac->crc34);
252
253 /* We need to set the WOL mask0 - mask4 next. We initialize it to
254 * its default Values of 0x00000000 because there are not WOL masks
255 * as of this time.
256 */
257 writel(0, &pRxMac->mask0_word0);
258 writel(0, &pRxMac->mask0_word1);
259 writel(0, &pRxMac->mask0_word2);
260 writel(0, &pRxMac->mask0_word3);
261
262 writel(0, &pRxMac->mask1_word0);
263 writel(0, &pRxMac->mask1_word1);
264 writel(0, &pRxMac->mask1_word2);
265 writel(0, &pRxMac->mask1_word3);
266
267 writel(0, &pRxMac->mask2_word0);
268 writel(0, &pRxMac->mask2_word1);
269 writel(0, &pRxMac->mask2_word2);
270 writel(0, &pRxMac->mask2_word3);
271
272 writel(0, &pRxMac->mask3_word0);
273 writel(0, &pRxMac->mask3_word1);
274 writel(0, &pRxMac->mask3_word2);
275 writel(0, &pRxMac->mask3_word3);
276
277 writel(0, &pRxMac->mask4_word0);
278 writel(0, &pRxMac->mask4_word1);
279 writel(0, &pRxMac->mask4_word2);
280 writel(0, &pRxMac->mask4_word3);
281
282 /* Lets setup the WOL Source Address */
283 sa_lo = (etdev->addr[2] << ET_WOL_LO_SA3_SHIFT) |
284 (etdev->addr[3] << ET_WOL_LO_SA4_SHIFT) |
285 (etdev->addr[4] << ET_WOL_LO_SA5_SHIFT) |
286 etdev->addr[5];
287 writel(sa_lo, &pRxMac->sa_lo);
288
289 sa_hi = (u32) (etdev->addr[0] << ET_WOL_HI_SA1_SHIFT) |
290 etdev->addr[1];
291 writel(sa_hi, &pRxMac->sa_hi);
292
293 /* Disable all Packet Filtering */
294 writel(0, &pRxMac->pf_ctrl);
295
296 /* Let's initialize the Unicast Packet filtering address */
297 if (etdev->PacketFilter & ET131X_PACKET_TYPE_DIRECTED) {
298 SetupDeviceForUnicast(etdev);
299 pf_ctrl |= 4; /* Unicast filter */
300 } else {
301 writel(0, &pRxMac->uni_pf_addr1);
302 writel(0, &pRxMac->uni_pf_addr2);
303 writel(0, &pRxMac->uni_pf_addr3);
304 }
305
306 /* Let's initialize the Multicast hash */
307 if (!(etdev->PacketFilter & ET131X_PACKET_TYPE_ALL_MULTICAST)) {
308 pf_ctrl |= 2; /* Multicast filter */
309 SetupDeviceForMulticast(etdev);
310 }
311
312 /* Runt packet filtering. Didn't work in version A silicon. */
313 pf_ctrl |= (NIC_MIN_PACKET_SIZE + 4) << 16;
314 pf_ctrl |= 8; /* Fragment filter */
315
316 if (etdev->RegistryJumboPacket > 8192)
317 /* In order to transmit jumbo packets greater than 8k, the
318 * FIFO between RxMAC and RxDMA needs to be reduced in size
319 * to (16k - Jumbo packet size). In order to implement this,
320 * we must use "cut through" mode in the RxMAC, which chops
321 * packets down into segments which are (max_size * 16). In
322 * this case we selected 256 bytes, since this is the size of
323 * the PCI-Express TLP's that the 1310 uses.
324 *
325 * seg_en on, fc_en off, size 0x10
326 */
327 writel(0x41, &pRxMac->mcif_ctrl_max_seg);
328 else
329 writel(0, &pRxMac->mcif_ctrl_max_seg);
330
331 /* Initialize the MCIF water marks */
332 writel(0, &pRxMac->mcif_water_mark);
333
334 /* Initialize the MIF control */
335 writel(0, &pRxMac->mif_ctrl);
336
337 /* Initialize the Space Available Register */
338 writel(0, &pRxMac->space_avail);
339
340 /* Initialize the the mif_ctrl register
341 * bit 3: Receive code error. One or more nibbles were signaled as
342 * errors during the reception of the packet. Clear this
343 * bit in Gigabit, set it in 100Mbit. This was derived
344 * experimentally at UNH.
345 * bit 4: Receive CRC error. The packet's CRC did not match the
346 * internally generated CRC.
347 * bit 5: Receive length check error. Indicates that frame length
348 * field value in the packet does not match the actual data
349 * byte length and is not a type field.
350 * bit 16: Receive frame truncated.
351 * bit 17: Drop packet enable
352 */
353 if (etdev->linkspeed == TRUEPHY_SPEED_100MBPS)
354 writel(0x30038, &pRxMac->mif_ctrl);
355 else
356 writel(0x30030, &pRxMac->mif_ctrl);
357
358 /* Finally we initialize RxMac to be enabled & WOL disabled. Packet
359 * filter is always enabled since it is where the runt packets are
360 * supposed to be dropped. For version A silicon, runt packet
361 * dropping doesn't work, so it is disabled in the pf_ctrl register,
362 * but we still leave the packet filter on.
363 */
364 writel(pf_ctrl, &pRxMac->pf_ctrl);
365 writel(0x9, &pRxMac->ctrl);
366}
367
368void ConfigTxMacRegs(struct et131x_adapter *etdev)
369{
370 struct txmac_regs *txmac = &etdev->regs->txmac;
371
372 /* We need to update the Control Frame Parameters
373 * cfpt - control frame pause timer set to 64 (0x40)
374 * cfep - control frame extended pause timer set to 0x0
375 */
376 if (etdev->flowcontrol == FLOW_NONE)
377 writel(0, &txmac->cf_param);
378 else
379 writel(0x40, &txmac->cf_param);
380}
381
382void ConfigMacStatRegs(struct et131x_adapter *etdev)
383{
384 struct macstat_regs __iomem *macstat =
385 &etdev->regs->macstat;
386
387 /* Next we need to initialize all the macstat registers to zero on
388 * the device.
389 */
390 writel(0, &macstat->txrx_0_64_byte_frames);
391 writel(0, &macstat->txrx_65_127_byte_frames);
392 writel(0, &macstat->txrx_128_255_byte_frames);
393 writel(0, &macstat->txrx_256_511_byte_frames);
394 writel(0, &macstat->txrx_512_1023_byte_frames);
395 writel(0, &macstat->txrx_1024_1518_byte_frames);
396 writel(0, &macstat->txrx_1519_1522_gvln_frames);
397
398 writel(0, &macstat->rx_bytes);
399 writel(0, &macstat->rx_packets);
400 writel(0, &macstat->rx_fcs_errs);
401 writel(0, &macstat->rx_multicast_packets);
402 writel(0, &macstat->rx_broadcast_packets);
403 writel(0, &macstat->rx_control_frames);
404 writel(0, &macstat->rx_pause_frames);
405 writel(0, &macstat->rx_unknown_opcodes);
406 writel(0, &macstat->rx_align_errs);
407 writel(0, &macstat->rx_frame_len_errs);
408 writel(0, &macstat->rx_code_errs);
409 writel(0, &macstat->rx_carrier_sense_errs);
410 writel(0, &macstat->rx_undersize_packets);
411 writel(0, &macstat->rx_oversize_packets);
412 writel(0, &macstat->rx_fragment_packets);
413 writel(0, &macstat->rx_jabbers);
414 writel(0, &macstat->rx_drops);
415
416 writel(0, &macstat->tx_bytes);
417 writel(0, &macstat->tx_packets);
418 writel(0, &macstat->tx_multicast_packets);
419 writel(0, &macstat->tx_broadcast_packets);
420 writel(0, &macstat->tx_pause_frames);
421 writel(0, &macstat->tx_deferred);
422 writel(0, &macstat->tx_excessive_deferred);
423 writel(0, &macstat->tx_single_collisions);
424 writel(0, &macstat->tx_multiple_collisions);
425 writel(0, &macstat->tx_late_collisions);
426 writel(0, &macstat->tx_excessive_collisions);
427 writel(0, &macstat->tx_total_collisions);
428 writel(0, &macstat->tx_pause_honored_frames);
429 writel(0, &macstat->tx_drops);
430 writel(0, &macstat->tx_jabbers);
431 writel(0, &macstat->tx_fcs_errs);
432 writel(0, &macstat->tx_control_frames);
433 writel(0, &macstat->tx_oversize_frames);
434 writel(0, &macstat->tx_undersize_frames);
435 writel(0, &macstat->tx_fragments);
436 writel(0, &macstat->carry_reg1);
437 writel(0, &macstat->carry_reg2);
438
439 /* Unmask any counters that we want to track the overflow of.
440 * Initially this will be all counters. It may become clear later
441 * that we do not need to track all counters.
442 */
443 writel(0xFFFFBE32, &macstat->carry_reg1_mask);
444 writel(0xFFFE7E8B, &macstat->carry_reg2_mask);
445}
446
447void ConfigFlowControl(struct et131x_adapter *etdev)
448{
449 if (etdev->duplex_mode == 0) {
450 etdev->flowcontrol = FLOW_NONE;
451 } else {
452 char remote_pause, remote_async_pause;
453
454 ET1310_PhyAccessMiBit(etdev,
455 TRUEPHY_BIT_READ, 5, 10, &remote_pause);
456 ET1310_PhyAccessMiBit(etdev,
457 TRUEPHY_BIT_READ, 5, 11,
458 &remote_async_pause);
459
460 if ((remote_pause == TRUEPHY_BIT_SET) &&
461 (remote_async_pause == TRUEPHY_BIT_SET)) {
462 etdev->flowcontrol = etdev->wanted_flow;
463 } else if ((remote_pause == TRUEPHY_BIT_SET) &&
464 (remote_async_pause == TRUEPHY_BIT_CLEAR)) {
465 if (etdev->wanted_flow == FLOW_BOTH)
466 etdev->flowcontrol = FLOW_BOTH;
467 else
468 etdev->flowcontrol = FLOW_NONE;
469 } else if ((remote_pause == TRUEPHY_BIT_CLEAR) &&
470 (remote_async_pause == TRUEPHY_BIT_CLEAR)) {
471 etdev->flowcontrol = FLOW_NONE;
472 } else {/* if (remote_pause == TRUEPHY_CLEAR_BIT &&
473 remote_async_pause == TRUEPHY_SET_BIT) */
474 if (etdev->wanted_flow == FLOW_BOTH)
475 etdev->flowcontrol = FLOW_RXONLY;
476 else
477 etdev->flowcontrol = FLOW_NONE;
478 }
479 }
480}
481
482/**
483 * UpdateMacStatHostCounters - Update the local copy of the statistics
484 * @etdev: pointer to the adapter structure
485 */
486void UpdateMacStatHostCounters(struct et131x_adapter *etdev)
487{
488 struct ce_stats *stats = &etdev->stats;
489 struct macstat_regs __iomem *macstat =
490 &etdev->regs->macstat;
491
492 stats->collisions += readl(&macstat->tx_total_collisions);
493 stats->first_collision += readl(&macstat->tx_single_collisions);
494 stats->tx_deferred += readl(&macstat->tx_deferred);
495 stats->excessive_collisions += readl(&macstat->tx_multiple_collisions);
496 stats->late_collisions += readl(&macstat->tx_late_collisions);
497 stats->tx_uflo += readl(&macstat->tx_undersize_frames);
498 stats->max_pkt_error += readl(&macstat->tx_oversize_frames);
499
500 stats->alignment_err += readl(&macstat->rx_align_errs);
501 stats->crc_err += readl(&macstat->rx_code_errs);
502 stats->norcvbuf += readl(&macstat->rx_drops);
503 stats->rx_ov_flow += readl(&macstat->rx_oversize_packets);
504 stats->code_violations += readl(&macstat->rx_fcs_errs);
505 stats->length_err += readl(&macstat->rx_frame_len_errs);
506
507 stats->other_errors += readl(&macstat->rx_fragment_packets);
508}
509
510/**
511 * HandleMacStatInterrupt
512 * @etdev: pointer to the adapter structure
513 *
514 * One of the MACSTAT counters has wrapped. Update the local copy of
515 * the statistics held in the adapter structure, checking the "wrap"
516 * bit for each counter.
517 */
518void HandleMacStatInterrupt(struct et131x_adapter *etdev)
519{
520 u32 carry_reg1;
521 u32 carry_reg2;
522
523 /* Read the interrupt bits from the register(s). These are Clear On
524 * Write.
525 */
526 carry_reg1 = readl(&etdev->regs->macstat.carry_reg1);
527 carry_reg2 = readl(&etdev->regs->macstat.carry_reg2);
528
529 writel(carry_reg2, &etdev->regs->macstat.carry_reg1);
530 writel(carry_reg2, &etdev->regs->macstat.carry_reg2);
531
532 /* We need to do update the host copy of all the MAC_STAT counters.
533 * For each counter, check it's overflow bit. If the overflow bit is
534 * set, then increment the host version of the count by one complete
535 * revolution of the counter. This routine is called when the counter
536 * block indicates that one of the counters has wrapped.
537 */
538 if (carry_reg1 & (1 << 14))
539 etdev->stats.code_violations += COUNTER_WRAP_16_BIT;
540 if (carry_reg1 & (1 << 8))
541 etdev->stats.alignment_err += COUNTER_WRAP_12_BIT;
542 if (carry_reg1 & (1 << 7))
543 etdev->stats.length_err += COUNTER_WRAP_16_BIT;
544 if (carry_reg1 & (1 << 2))
545 etdev->stats.other_errors += COUNTER_WRAP_16_BIT;
546 if (carry_reg1 & (1 << 6))
547 etdev->stats.crc_err += COUNTER_WRAP_16_BIT;
548 if (carry_reg1 & (1 << 3))
549 etdev->stats.rx_ov_flow += COUNTER_WRAP_16_BIT;
550 if (carry_reg1 & (1 << 0))
551 etdev->stats.norcvbuf += COUNTER_WRAP_16_BIT;
552 if (carry_reg2 & (1 << 16))
553 etdev->stats.max_pkt_error += COUNTER_WRAP_12_BIT;
554 if (carry_reg2 & (1 << 15))
555 etdev->stats.tx_uflo += COUNTER_WRAP_12_BIT;
556 if (carry_reg2 & (1 << 6))
557 etdev->stats.first_collision += COUNTER_WRAP_12_BIT;
558 if (carry_reg2 & (1 << 8))
559 etdev->stats.tx_deferred += COUNTER_WRAP_12_BIT;
560 if (carry_reg2 & (1 << 5))
561 etdev->stats.excessive_collisions += COUNTER_WRAP_12_BIT;
562 if (carry_reg2 & (1 << 4))
563 etdev->stats.late_collisions += COUNTER_WRAP_12_BIT;
564 if (carry_reg2 & (1 << 2))
565 etdev->stats.collisions += COUNTER_WRAP_12_BIT;
566}
567
568void SetupDeviceForMulticast(struct et131x_adapter *etdev)
569{
570 struct rxmac_regs __iomem *rxmac = &etdev->regs->rxmac;
571 uint32_t nIndex;
572 uint32_t result;
573 uint32_t hash1 = 0;
574 uint32_t hash2 = 0;
575 uint32_t hash3 = 0;
576 uint32_t hash4 = 0;
577 u32 pm_csr;
578
579 /* If ET131X_PACKET_TYPE_MULTICAST is specified, then we provision
580 * the multi-cast LIST. If it is NOT specified, (and "ALL" is not
581 * specified) then we should pass NO multi-cast addresses to the
582 * driver.
583 */
584 if (etdev->PacketFilter & ET131X_PACKET_TYPE_MULTICAST) {
585 /* Loop through our multicast array and set up the device */
586 for (nIndex = 0; nIndex < etdev->MCAddressCount; nIndex++) {
587 result = ether_crc(6, etdev->MCList[nIndex]);
588
589 result = (result & 0x3F800000) >> 23;
590
591 if (result < 32) {
592 hash1 |= (1 << result);
593 } else if ((31 < result) && (result < 64)) {
594 result -= 32;
595 hash2 |= (1 << result);
596 } else if ((63 < result) && (result < 96)) {
597 result -= 64;
598 hash3 |= (1 << result);
599 } else {
600 result -= 96;
601 hash4 |= (1 << result);
602 }
603 }
604 }
605
606 /* Write out the new hash to the device */
607 pm_csr = readl(&etdev->regs->global.pm_csr);
608 if ((pm_csr & ET_PM_PHY_SW_COMA) == 0) {
609 writel(hash1, &rxmac->multi_hash1);
610 writel(hash2, &rxmac->multi_hash2);
611 writel(hash3, &rxmac->multi_hash3);
612 writel(hash4, &rxmac->multi_hash4);
613 }
614}
615
616void SetupDeviceForUnicast(struct et131x_adapter *etdev)
617{
618 struct rxmac_regs __iomem *rxmac = &etdev->regs->rxmac;
619 u32 uni_pf1;
620 u32 uni_pf2;
621 u32 uni_pf3;
622 u32 pm_csr;
623
624 /* Set up unicast packet filter reg 3 to be the first two octets of
625 * the MAC address for both address
626 *
627 * Set up unicast packet filter reg 2 to be the octets 2 - 5 of the
628 * MAC address for second address
629 *
630 * Set up unicast packet filter reg 3 to be the octets 2 - 5 of the
631 * MAC address for first address
632 */
633 uni_pf3 = (etdev->addr[0] << ET_UNI_PF_ADDR2_1_SHIFT) |
634 (etdev->addr[1] << ET_UNI_PF_ADDR2_2_SHIFT) |
635 (etdev->addr[0] << ET_UNI_PF_ADDR1_1_SHIFT) |
636 etdev->addr[1];
637
638 uni_pf2 = (etdev->addr[2] << ET_UNI_PF_ADDR2_3_SHIFT) |
639 (etdev->addr[3] << ET_UNI_PF_ADDR2_4_SHIFT) |
640 (etdev->addr[4] << ET_UNI_PF_ADDR2_5_SHIFT) |
641 etdev->addr[5];
642
643 uni_pf1 = (etdev->addr[2] << ET_UNI_PF_ADDR1_3_SHIFT) |
644 (etdev->addr[3] << ET_UNI_PF_ADDR1_4_SHIFT) |
645 (etdev->addr[4] << ET_UNI_PF_ADDR1_5_SHIFT) |
646 etdev->addr[5];
647
648 pm_csr = readl(&etdev->regs->global.pm_csr);
649 if ((pm_csr & ET_PM_PHY_SW_COMA) == 0) {
650 writel(uni_pf1, &rxmac->uni_pf_addr1);
651 writel(uni_pf2, &rxmac->uni_pf_addr2);
652 writel(uni_pf3, &rxmac->uni_pf_addr3);
653 }
654}
diff --git a/drivers/staging/et131x/et1310_phy.c b/drivers/staging/et131x/et1310_phy.c
new file mode 100644
index 00000000000..0bcb7fb6e2c
--- /dev/null
+++ b/drivers/staging/et131x/et1310_phy.c
@@ -0,0 +1,979 @@
1/*
2 * Agere Systems Inc.
3 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
4 *
5 * Copyright * 2005 Agere Systems Inc.
6 * All rights reserved.
7 * http://www.agere.com
8 *
9 *------------------------------------------------------------------------------
10 *
11 * et1310_phy.c - Routines for configuring and accessing the PHY
12 *
13 *------------------------------------------------------------------------------
14 *
15 * SOFTWARE LICENSE
16 *
17 * This software is provided subject to the following terms and conditions,
18 * which you should read carefully before using the software. Using this
19 * software indicates your acceptance of these terms and conditions. If you do
20 * not agree with these terms and conditions, do not use the software.
21 *
22 * Copyright * 2005 Agere Systems Inc.
23 * All rights reserved.
24 *
25 * Redistribution and use in source or binary forms, with or without
26 * modifications, are permitted provided that the following conditions are met:
27 *
28 * . Redistributions of source code must retain the above copyright notice, this
29 * list of conditions and the following Disclaimer as comments in the code as
30 * well as in the documentation and/or other materials provided with the
31 * distribution.
32 *
33 * . Redistributions in binary form must reproduce the above copyright notice,
34 * this list of conditions and the following Disclaimer in the documentation
35 * and/or other materials provided with the distribution.
36 *
37 * . Neither the name of Agere Systems Inc. nor the names of the contributors
38 * may be used to endorse or promote products derived from this software
39 * without specific prior written permission.
40 *
41 * Disclaimer
42 *
43 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
44 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
45 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
46 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
47 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
48 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
49 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
50 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
51 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
52 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
53 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
54 * DAMAGE.
55 *
56 */
57
58#include "et131x_version.h"
59#include "et131x_defs.h"
60
61#include <linux/pci.h>
62#include <linux/init.h>
63#include <linux/module.h>
64#include <linux/types.h>
65#include <linux/kernel.h>
66
67#include <linux/sched.h>
68#include <linux/ptrace.h>
69#include <linux/ctype.h>
70#include <linux/string.h>
71#include <linux/timer.h>
72#include <linux/interrupt.h>
73#include <linux/in.h>
74#include <linux/delay.h>
75#include <linux/io.h>
76#include <linux/bitops.h>
77#include <asm/system.h>
78
79#include <linux/netdevice.h>
80#include <linux/etherdevice.h>
81#include <linux/skbuff.h>
82#include <linux/if_arp.h>
83#include <linux/ioport.h>
84#include <linux/random.h>
85
86#include "et1310_phy.h"
87
88#include "et131x_adapter.h"
89
90#include "et1310_address_map.h"
91#include "et1310_tx.h"
92#include "et1310_rx.h"
93
94#include "et131x.h"
95
96/* Prototypes for functions with local scope */
97static void et131x_xcvr_init(struct et131x_adapter *etdev);
98
99/**
100 * PhyMiRead - Read from the PHY through the MII Interface on the MAC
101 * @etdev: pointer to our private adapter structure
102 * @xcvrAddr: the address of the transceiver
103 * @xcvrReg: the register to read
104 * @value: pointer to a 16-bit value in which the value will be stored
105 *
106 * Returns 0 on success, errno on failure (as defined in errno.h)
107 */
108int PhyMiRead(struct et131x_adapter *etdev, u8 xcvrAddr,
109 u8 xcvrReg, u16 *value)
110{
111 struct mac_regs __iomem *mac = &etdev->regs->mac;
112 int status = 0;
113 u32 delay;
114 u32 miiAddr;
115 u32 miiCmd;
116 u32 miiIndicator;
117
118 /* Save a local copy of the registers we are dealing with so we can
119 * set them back
120 */
121 miiAddr = readl(&mac->mii_mgmt_addr);
122 miiCmd = readl(&mac->mii_mgmt_cmd);
123
124 /* Stop the current operation */
125 writel(0, &mac->mii_mgmt_cmd);
126
127 /* Set up the register we need to read from on the correct PHY */
128 writel(MII_ADDR(xcvrAddr, xcvrReg), &mac->mii_mgmt_addr);
129
130 /* Kick the read cycle off */
131 delay = 0;
132
133 writel(0x1, &mac->mii_mgmt_cmd);
134
135 do {
136 udelay(50);
137 delay++;
138 miiIndicator = readl(&mac->mii_mgmt_indicator);
139 } while ((miiIndicator & MGMT_WAIT) && delay < 50);
140
141 /* If we hit the max delay, we could not read the register */
142 if (delay == 50) {
143 dev_warn(&etdev->pdev->dev,
144 "xcvrReg 0x%08x could not be read\n", xcvrReg);
145 dev_warn(&etdev->pdev->dev, "status is 0x%08x\n",
146 miiIndicator);
147
148 status = -EIO;
149 }
150
151 /* If we hit here we were able to read the register and we need to
152 * return the value to the caller */
153 *value = readl(&mac->mii_mgmt_stat) & 0xFFFF;
154
155 /* Stop the read operation */
156 writel(0, &mac->mii_mgmt_cmd);
157
158 /* set the registers we touched back to the state at which we entered
159 * this function
160 */
161 writel(miiAddr, &mac->mii_mgmt_addr);
162 writel(miiCmd, &mac->mii_mgmt_cmd);
163
164 return status;
165}
166
167/**
168 * MiWrite - Write to a PHY register through the MII interface of the MAC
169 * @etdev: pointer to our private adapter structure
170 * @xcvrReg: the register to read
171 * @value: 16-bit value to write
172 *
173 * FIXME: one caller in netdev still
174 *
175 * Return 0 on success, errno on failure (as defined in errno.h)
176 */
177int MiWrite(struct et131x_adapter *etdev, u8 xcvrReg, u16 value)
178{
179 struct mac_regs __iomem *mac = &etdev->regs->mac;
180 int status = 0;
181 u8 xcvrAddr = etdev->stats.xcvr_addr;
182 u32 delay;
183 u32 miiAddr;
184 u32 miiCmd;
185 u32 miiIndicator;
186
187 /* Save a local copy of the registers we are dealing with so we can
188 * set them back
189 */
190 miiAddr = readl(&mac->mii_mgmt_addr);
191 miiCmd = readl(&mac->mii_mgmt_cmd);
192
193 /* Stop the current operation */
194 writel(0, &mac->mii_mgmt_cmd);
195
196 /* Set up the register we need to write to on the correct PHY */
197 writel(MII_ADDR(xcvrAddr, xcvrReg), &mac->mii_mgmt_addr);
198
199 /* Add the value to write to the registers to the mac */
200 writel(value, &mac->mii_mgmt_ctrl);
201 delay = 0;
202
203 do {
204 udelay(50);
205 delay++;
206 miiIndicator = readl(&mac->mii_mgmt_indicator);
207 } while ((miiIndicator & MGMT_BUSY) && delay < 100);
208
209 /* If we hit the max delay, we could not write the register */
210 if (delay == 100) {
211 u16 TempValue;
212
213 dev_warn(&etdev->pdev->dev,
214 "xcvrReg 0x%08x could not be written", xcvrReg);
215 dev_warn(&etdev->pdev->dev, "status is 0x%08x\n",
216 miiIndicator);
217 dev_warn(&etdev->pdev->dev, "command is 0x%08x\n",
218 readl(&mac->mii_mgmt_cmd));
219
220 MiRead(etdev, xcvrReg, &TempValue);
221
222 status = -EIO;
223 }
224 /* Stop the write operation */
225 writel(0, &mac->mii_mgmt_cmd);
226
227 /* set the registers we touched back to the state at which we entered
228 * this function
229 */
230 writel(miiAddr, &mac->mii_mgmt_addr);
231 writel(miiCmd, &mac->mii_mgmt_cmd);
232
233 return status;
234}
235
236/**
237 * et131x_xcvr_find - Find the PHY ID
238 * @etdev: pointer to our private adapter structure
239 *
240 * Returns 0 on success, errno on failure (as defined in errno.h)
241 */
242int et131x_xcvr_find(struct et131x_adapter *etdev)
243{
244 u8 xcvr_addr;
245 u16 idr1;
246 u16 idr2;
247 u32 xcvr_id;
248
249 /* We need to get xcvr id and address we just get the first one */
250 for (xcvr_addr = 0; xcvr_addr < 32; xcvr_addr++) {
251 /* Read the ID from the PHY */
252 PhyMiRead(etdev, xcvr_addr,
253 (u8) offsetof(struct mi_regs, idr1),
254 &idr1);
255 PhyMiRead(etdev, xcvr_addr,
256 (u8) offsetof(struct mi_regs, idr2),
257 &idr2);
258
259 xcvr_id = (u32) ((idr1 << 16) | idr2);
260
261 if (idr1 != 0 && idr1 != 0xffff) {
262 etdev->stats.xcvr_id = xcvr_id;
263 etdev->stats.xcvr_addr = xcvr_addr;
264 return 0;
265 }
266 }
267 return -ENODEV;
268}
269
270void ET1310_PhyReset(struct et131x_adapter *etdev)
271{
272 MiWrite(etdev, PHY_CONTROL, 0x8000);
273}
274
275/**
276 * ET1310_PhyPowerDown - PHY power control
277 * @etdev: device to control
278 * @down: true for off/false for back on
279 *
280 * one hundred, ten, one thousand megs
281 * How would you like to have your LAN accessed
282 * Can't you see that this code processed
283 * Phy power, phy power..
284 */
285
286void ET1310_PhyPowerDown(struct et131x_adapter *etdev, bool down)
287{
288 u16 data;
289
290 MiRead(etdev, PHY_CONTROL, &data);
291 data &= ~0x0800; /* Power UP */
292 if (down) /* Power DOWN */
293 data |= 0x0800;
294 MiWrite(etdev, PHY_CONTROL, data);
295}
296
297/**
298 * ET130_PhyAutoNEg - autonegotiate control
299 * @etdev: device to control
300 * @enabe: autoneg on/off
301 *
302 * Set up the autonegotiation state according to whether we will be
303 * negotiating the state or forcing a speed.
304 */
305
306static void ET1310_PhyAutoNeg(struct et131x_adapter *etdev, bool enable)
307{
308 u16 data;
309
310 MiRead(etdev, PHY_CONTROL, &data);
311 data &= ~0x1000; /* Autonegotiation OFF */
312 if (enable)
313 data |= 0x1000; /* Autonegotiation ON */
314 MiWrite(etdev, PHY_CONTROL, data);
315}
316
317/**
318 * ET130_PhyDuplexMode - duplex control
319 * @etdev: device to control
320 * @duplex: duplex on/off
321 *
322 * Set up the duplex state on the PHY
323 */
324
325static void ET1310_PhyDuplexMode(struct et131x_adapter *etdev, u16 duplex)
326{
327 u16 data;
328
329 MiRead(etdev, PHY_CONTROL, &data);
330 data &= ~0x100; /* Set Half Duplex */
331 if (duplex == TRUEPHY_DUPLEX_FULL)
332 data |= 0x100; /* Set Full Duplex */
333 MiWrite(etdev, PHY_CONTROL, data);
334}
335
336/**
337 * ET130_PhySpeedSelect - speed control
338 * @etdev: device to control
339 * @duplex: duplex on/off
340 *
341 * Set the speed of our PHY.
342 */
343
344static void ET1310_PhySpeedSelect(struct et131x_adapter *etdev, u16 speed)
345{
346 u16 data;
347 static const u16 bits[3] = {0x0000, 0x2000, 0x0040};
348
349 /* Read the PHY control register */
350 MiRead(etdev, PHY_CONTROL, &data);
351 /* Clear all Speed settings (Bits 6, 13) */
352 data &= ~0x2040;
353 /* Write back the new speed */
354 MiWrite(etdev, PHY_CONTROL, data | bits[speed]);
355}
356
357/**
358 * ET1310_PhyLinkStatus - read link state
359 * @etdev: device to read
360 * @link_status: reported link state
361 * @autoneg: reported autonegotiation state (complete/incomplete/disabled)
362 * @linkspeed: returnedlink speed in use
363 * @duplex_mode: reported half/full duplex state
364 * @mdi_mdix: not yet working
365 * @masterslave: report whether we are master or slave
366 * @polarity: link polarity
367 *
368 * I can read your lan like a magazine
369 * I see if your up
370 * I know your link speed
371 * I see all the setting that you'd rather keep
372 */
373
374static void ET1310_PhyLinkStatus(struct et131x_adapter *etdev,
375 u8 *link_status,
376 u32 *autoneg,
377 u32 *linkspeed,
378 u32 *duplex_mode,
379 u32 *mdi_mdix,
380 u32 *masterslave, u32 *polarity)
381{
382 u16 mistatus = 0;
383 u16 is1000BaseT = 0;
384 u16 vmi_phystatus = 0;
385 u16 control = 0;
386
387 MiRead(etdev, PHY_STATUS, &mistatus);
388 MiRead(etdev, PHY_1000_STATUS, &is1000BaseT);
389 MiRead(etdev, PHY_PHY_STATUS, &vmi_phystatus);
390 MiRead(etdev, PHY_CONTROL, &control);
391
392 *link_status = (vmi_phystatus & 0x0040) ? 1 : 0;
393 *autoneg = (control & 0x1000) ? ((vmi_phystatus & 0x0020) ?
394 TRUEPHY_ANEG_COMPLETE :
395 TRUEPHY_ANEG_NOT_COMPLETE) :
396 TRUEPHY_ANEG_DISABLED;
397 *linkspeed = (vmi_phystatus & 0x0300) >> 8;
398 *duplex_mode = (vmi_phystatus & 0x0080) >> 7;
399 /* NOTE: Need to complete this */
400 *mdi_mdix = 0;
401
402 *masterslave = (is1000BaseT & 0x4000) ?
403 TRUEPHY_CFG_MASTER : TRUEPHY_CFG_SLAVE;
404 *polarity = (vmi_phystatus & 0x0400) ?
405 TRUEPHY_POLARITY_INVERTED : TRUEPHY_POLARITY_NORMAL;
406}
407
408static void ET1310_PhyAndOrReg(struct et131x_adapter *etdev,
409 u16 regnum, u16 andMask, u16 orMask)
410{
411 u16 reg;
412
413 MiRead(etdev, regnum, &reg);
414 reg &= andMask;
415 reg |= orMask;
416 MiWrite(etdev, regnum, reg);
417}
418
419/* Still used from _mac for BIT_READ */
420void ET1310_PhyAccessMiBit(struct et131x_adapter *etdev, u16 action,
421 u16 regnum, u16 bitnum, u8 *value)
422{
423 u16 reg;
424 u16 mask = 0x0001 << bitnum;
425
426 /* Read the requested register */
427 MiRead(etdev, regnum, &reg);
428
429 switch (action) {
430 case TRUEPHY_BIT_READ:
431 *value = (reg & mask) >> bitnum;
432 break;
433
434 case TRUEPHY_BIT_SET:
435 MiWrite(etdev, regnum, reg | mask);
436 break;
437
438 case TRUEPHY_BIT_CLEAR:
439 MiWrite(etdev, regnum, reg & ~mask);
440 break;
441
442 default:
443 break;
444 }
445}
446
447void ET1310_PhyAdvertise1000BaseT(struct et131x_adapter *etdev,
448 u16 duplex)
449{
450 u16 data;
451
452 /* Read the PHY 1000 Base-T Control Register */
453 MiRead(etdev, PHY_1000_CONTROL, &data);
454
455 /* Clear Bits 8,9 */
456 data &= ~0x0300;
457
458 switch (duplex) {
459 case TRUEPHY_ADV_DUPLEX_NONE:
460 /* Duplex already cleared, do nothing */
461 break;
462
463 case TRUEPHY_ADV_DUPLEX_FULL:
464 /* Set Bit 9 */
465 data |= 0x0200;
466 break;
467
468 case TRUEPHY_ADV_DUPLEX_HALF:
469 /* Set Bit 8 */
470 data |= 0x0100;
471 break;
472
473 case TRUEPHY_ADV_DUPLEX_BOTH:
474 default:
475 data |= 0x0300;
476 break;
477 }
478
479 /* Write back advertisement */
480 MiWrite(etdev, PHY_1000_CONTROL, data);
481}
482
483static void ET1310_PhyAdvertise100BaseT(struct et131x_adapter *etdev,
484 u16 duplex)
485{
486 u16 data;
487
488 /* Read the Autonegotiation Register (10/100) */
489 MiRead(etdev, PHY_AUTO_ADVERTISEMENT, &data);
490
491 /* Clear bits 7,8 */
492 data &= ~0x0180;
493
494 switch (duplex) {
495 case TRUEPHY_ADV_DUPLEX_NONE:
496 /* Duplex already cleared, do nothing */
497 break;
498
499 case TRUEPHY_ADV_DUPLEX_FULL:
500 /* Set Bit 8 */
501 data |= 0x0100;
502 break;
503
504 case TRUEPHY_ADV_DUPLEX_HALF:
505 /* Set Bit 7 */
506 data |= 0x0080;
507 break;
508
509 case TRUEPHY_ADV_DUPLEX_BOTH:
510 default:
511 /* Set Bits 7,8 */
512 data |= 0x0180;
513 break;
514 }
515
516 /* Write back advertisement */
517 MiWrite(etdev, PHY_AUTO_ADVERTISEMENT, data);
518}
519
520static void ET1310_PhyAdvertise10BaseT(struct et131x_adapter *etdev,
521 u16 duplex)
522{
523 u16 data;
524
525 /* Read the Autonegotiation Register (10/100) */
526 MiRead(etdev, PHY_AUTO_ADVERTISEMENT, &data);
527
528 /* Clear bits 5,6 */
529 data &= ~0x0060;
530
531 switch (duplex) {
532 case TRUEPHY_ADV_DUPLEX_NONE:
533 /* Duplex already cleared, do nothing */
534 break;
535
536 case TRUEPHY_ADV_DUPLEX_FULL:
537 /* Set Bit 6 */
538 data |= 0x0040;
539 break;
540
541 case TRUEPHY_ADV_DUPLEX_HALF:
542 /* Set Bit 5 */
543 data |= 0x0020;
544 break;
545
546 case TRUEPHY_ADV_DUPLEX_BOTH:
547 default:
548 /* Set Bits 5,6 */
549 data |= 0x0060;
550 break;
551 }
552
553 /* Write back advertisement */
554 MiWrite(etdev, PHY_AUTO_ADVERTISEMENT, data);
555}
556
557/**
558 * et131x_setphy_normal - Set PHY for normal operation.
559 * @etdev: pointer to our private adapter structure
560 *
561 * Used by Power Management to force the PHY into 10 Base T half-duplex mode,
562 * when going to D3 in WOL mode. Also used during initialization to set the
563 * PHY for normal operation.
564 */
565void et131x_setphy_normal(struct et131x_adapter *etdev)
566{
567 /* Make sure the PHY is powered up */
568 ET1310_PhyPowerDown(etdev, 0);
569 et131x_xcvr_init(etdev);
570}
571
572
573/**
574 * et131x_xcvr_init - Init the phy if we are setting it into force mode
575 * @etdev: pointer to our private adapter structure
576 *
577 */
578static void et131x_xcvr_init(struct et131x_adapter *etdev)
579{
580 u16 imr;
581 u16 isr;
582 u16 lcr2;
583
584 /* Zero out the adapter structure variable representing BMSR */
585 etdev->bmsr = 0;
586
587 MiRead(etdev, (u8) offsetof(struct mi_regs, isr), &isr);
588 MiRead(etdev, (u8) offsetof(struct mi_regs, imr), &imr);
589
590 /* Set the link status interrupt only. Bad behavior when link status
591 * and auto neg are set, we run into a nested interrupt problem
592 */
593 imr |= 0x0105;
594
595 MiWrite(etdev, (u8) offsetof(struct mi_regs, imr), imr);
596
597 /* Set the LED behavior such that LED 1 indicates speed (off =
598 * 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates
599 * link and activity (on for link, blink off for activity).
600 *
601 * NOTE: Some customizations have been added here for specific
602 * vendors; The LED behavior is now determined by vendor data in the
603 * EEPROM. However, the above description is the default.
604 */
605 if ((etdev->eeprom_data[1] & 0x4) == 0) {
606 MiRead(etdev, (u8) offsetof(struct mi_regs, lcr2),
607 &lcr2);
608
609 lcr2 &= 0x00FF;
610 lcr2 |= 0xA000; /* led link */
611
612 if ((etdev->eeprom_data[1] & 0x8) == 0)
613 lcr2 |= 0x0300;
614 else
615 lcr2 |= 0x0400;
616
617 MiWrite(etdev, (u8) offsetof(struct mi_regs, lcr2),
618 lcr2);
619 }
620
621 /* Determine if we need to go into a force mode and set it */
622 if (etdev->AiForceSpeed == 0 && etdev->AiForceDpx == 0) {
623 if (etdev->wanted_flow == FLOW_TXONLY ||
624 etdev->wanted_flow == FLOW_BOTH)
625 ET1310_PhyAccessMiBit(etdev,
626 TRUEPHY_BIT_SET, 4, 11, NULL);
627 else
628 ET1310_PhyAccessMiBit(etdev,
629 TRUEPHY_BIT_CLEAR, 4, 11, NULL);
630
631 if (etdev->wanted_flow == FLOW_BOTH)
632 ET1310_PhyAccessMiBit(etdev,
633 TRUEPHY_BIT_SET, 4, 10, NULL);
634 else
635 ET1310_PhyAccessMiBit(etdev,
636 TRUEPHY_BIT_CLEAR, 4, 10, NULL);
637
638 /* Set the phy to autonegotiation */
639 ET1310_PhyAutoNeg(etdev, true);
640
641 /* NOTE - Do we need this? */
642 ET1310_PhyAccessMiBit(etdev, TRUEPHY_BIT_SET, 0, 9, NULL);
643 return;
644 }
645
646 ET1310_PhyAutoNeg(etdev, false);
647
648 /* Set to the correct force mode. */
649 if (etdev->AiForceDpx != 1) {
650 if (etdev->wanted_flow == FLOW_TXONLY ||
651 etdev->wanted_flow == FLOW_BOTH)
652 ET1310_PhyAccessMiBit(etdev,
653 TRUEPHY_BIT_SET, 4, 11, NULL);
654 else
655 ET1310_PhyAccessMiBit(etdev,
656 TRUEPHY_BIT_CLEAR, 4, 11, NULL);
657
658 if (etdev->wanted_flow == FLOW_BOTH)
659 ET1310_PhyAccessMiBit(etdev,
660 TRUEPHY_BIT_SET, 4, 10, NULL);
661 else
662 ET1310_PhyAccessMiBit(etdev,
663 TRUEPHY_BIT_CLEAR, 4, 10, NULL);
664 } else {
665 ET1310_PhyAccessMiBit(etdev, TRUEPHY_BIT_CLEAR, 4, 10, NULL);
666 ET1310_PhyAccessMiBit(etdev, TRUEPHY_BIT_CLEAR, 4, 11, NULL);
667 }
668 ET1310_PhyPowerDown(etdev, 1);
669 switch (etdev->AiForceSpeed) {
670 case 10:
671 /* First we need to turn off all other advertisement */
672 ET1310_PhyAdvertise1000BaseT(etdev, TRUEPHY_ADV_DUPLEX_NONE);
673 ET1310_PhyAdvertise100BaseT(etdev, TRUEPHY_ADV_DUPLEX_NONE);
674 if (etdev->AiForceDpx == 1) {
675 /* Set our advertise values accordingly */
676 ET1310_PhyAdvertise10BaseT(etdev,
677 TRUEPHY_ADV_DUPLEX_HALF);
678 } else if (etdev->AiForceDpx == 2) {
679 /* Set our advertise values accordingly */
680 ET1310_PhyAdvertise10BaseT(etdev,
681 TRUEPHY_ADV_DUPLEX_FULL);
682 } else {
683 /* Disable autoneg */
684 ET1310_PhyAutoNeg(etdev, false);
685 /* Disable rest of the advertisements */
686 ET1310_PhyAdvertise10BaseT(etdev,
687 TRUEPHY_ADV_DUPLEX_NONE);
688 /* Force 10 Mbps */
689 ET1310_PhySpeedSelect(etdev, TRUEPHY_SPEED_10MBPS);
690 /* Force Full duplex */
691 ET1310_PhyDuplexMode(etdev, TRUEPHY_DUPLEX_FULL);
692 }
693 break;
694 case 100:
695 /* first we need to turn off all other advertisement */
696 ET1310_PhyAdvertise1000BaseT(etdev, TRUEPHY_ADV_DUPLEX_NONE);
697 ET1310_PhyAdvertise10BaseT(etdev, TRUEPHY_ADV_DUPLEX_NONE);
698 if (etdev->AiForceDpx == 1) {
699 /* Set our advertise values accordingly */
700 ET1310_PhyAdvertise100BaseT(etdev,
701 TRUEPHY_ADV_DUPLEX_HALF);
702 /* Set speed */
703 ET1310_PhySpeedSelect(etdev, TRUEPHY_SPEED_100MBPS);
704 } else if (etdev->AiForceDpx == 2) {
705 /* Set our advertise values accordingly */
706 ET1310_PhyAdvertise100BaseT(etdev,
707 TRUEPHY_ADV_DUPLEX_FULL);
708 } else {
709 /* Disable autoneg */
710 ET1310_PhyAutoNeg(etdev, false);
711 /* Disable other advertisement */
712 ET1310_PhyAdvertise100BaseT(etdev,
713 TRUEPHY_ADV_DUPLEX_NONE);
714 /* Force 100 Mbps */
715 ET1310_PhySpeedSelect(etdev, TRUEPHY_SPEED_100MBPS);
716 /* Force Full duplex */
717 ET1310_PhyDuplexMode(etdev, TRUEPHY_DUPLEX_FULL);
718 }
719 break;
720 case 1000:
721 /* first we need to turn off all other advertisement */
722 ET1310_PhyAdvertise100BaseT(etdev, TRUEPHY_ADV_DUPLEX_NONE);
723 ET1310_PhyAdvertise10BaseT(etdev, TRUEPHY_ADV_DUPLEX_NONE);
724 /* set our advertise values accordingly */
725 ET1310_PhyAdvertise1000BaseT(etdev, TRUEPHY_ADV_DUPLEX_FULL);
726 break;
727 }
728 ET1310_PhyPowerDown(etdev, 0);
729}
730
731void et131x_Mii_check(struct et131x_adapter *etdev,
732 u16 bmsr, u16 bmsr_ints)
733{
734 u8 link_status;
735 u32 autoneg_status;
736 u32 speed;
737 u32 duplex;
738 u32 mdi_mdix;
739 u32 masterslave;
740 u32 polarity;
741 unsigned long flags;
742
743 if (bmsr_ints & MI_BMSR_LINK_STATUS) {
744 if (bmsr & MI_BMSR_LINK_STATUS) {
745 etdev->boot_coma = 20;
746
747 /* Update our state variables and indicate the
748 * connected state
749 */
750 spin_lock_irqsave(&etdev->Lock, flags);
751
752 etdev->MediaState = NETIF_STATUS_MEDIA_CONNECT;
753
754 spin_unlock_irqrestore(&etdev->Lock, flags);
755
756 netif_carrier_on(etdev->netdev);
757 } else {
758 dev_warn(&etdev->pdev->dev,
759 "Link down - cable problem ?\n");
760
761 if (etdev->linkspeed == TRUEPHY_SPEED_10MBPS) {
762 /* NOTE - Is there a way to query this without
763 * TruePHY?
764 * && TRU_QueryCoreType(etdev->hTruePhy, 0) ==
765 * EMI_TRUEPHY_A13O) {
766 */
767 u16 Register18;
768
769 MiRead(etdev, 0x12, &Register18);
770 MiWrite(etdev, 0x12, Register18 | 0x4);
771 MiWrite(etdev, 0x10, Register18 | 0x8402);
772 MiWrite(etdev, 0x11, Register18 | 511);
773 MiWrite(etdev, 0x12, Register18);
774 }
775
776 /* For the first N seconds of life, we are in "link
777 * detection" When we are in this state, we should
778 * only report "connected". When the LinkDetection
779 * Timer expires, we can report disconnected (handled
780 * in the LinkDetectionDPC).
781 */
782 if ((etdev->MediaState == NETIF_STATUS_MEDIA_DISCONNECT)) {
783 spin_lock_irqsave(&etdev->Lock, flags);
784 etdev->MediaState =
785 NETIF_STATUS_MEDIA_DISCONNECT;
786 spin_unlock_irqrestore(&etdev->Lock,
787 flags);
788
789 netif_carrier_off(etdev->netdev);
790 }
791
792 etdev->linkspeed = 0;
793 etdev->duplex_mode = 0;
794
795 /* Free the packets being actively sent & stopped */
796 et131x_free_busy_send_packets(etdev);
797
798 /* Re-initialize the send structures */
799 et131x_init_send(etdev);
800
801 /* Reset the RFD list and re-start RU */
802 et131x_reset_recv(etdev);
803
804 /*
805 * Bring the device back to the state it was during
806 * init prior to autonegotiation being complete. This
807 * way, when we get the auto-neg complete interrupt,
808 * we can complete init by calling ConfigMacREGS2.
809 */
810 et131x_soft_reset(etdev);
811
812 /* Setup ET1310 as per the documentation */
813 et131x_adapter_setup(etdev);
814
815 /* Setup the PHY into coma mode until the cable is
816 * plugged back in
817 */
818 if (etdev->RegistryPhyComa == 1)
819 EnablePhyComa(etdev);
820 }
821 }
822
823 if ((bmsr_ints & MI_BMSR_AUTO_NEG_COMPLETE) ||
824 (etdev->AiForceDpx == 3 && (bmsr_ints & MI_BMSR_LINK_STATUS))) {
825 if ((bmsr & MI_BMSR_AUTO_NEG_COMPLETE) ||
826 etdev->AiForceDpx == 3) {
827 ET1310_PhyLinkStatus(etdev,
828 &link_status, &autoneg_status,
829 &speed, &duplex, &mdi_mdix,
830 &masterslave, &polarity);
831
832 etdev->linkspeed = speed;
833 etdev->duplex_mode = duplex;
834
835 etdev->boot_coma = 20;
836
837 if (etdev->linkspeed == TRUEPHY_SPEED_10MBPS) {
838 /*
839 * NOTE - Is there a way to query this without
840 * TruePHY?
841 * && TRU_QueryCoreType(etdev->hTruePhy, 0)==
842 * EMI_TRUEPHY_A13O) {
843 */
844 u16 Register18;
845
846 MiRead(etdev, 0x12, &Register18);
847 MiWrite(etdev, 0x12, Register18 | 0x4);
848 MiWrite(etdev, 0x10, Register18 | 0x8402);
849 MiWrite(etdev, 0x11, Register18 | 511);
850 MiWrite(etdev, 0x12, Register18);
851 }
852
853 ConfigFlowControl(etdev);
854
855 if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS &&
856 etdev->RegistryJumboPacket > 2048)
857 ET1310_PhyAndOrReg(etdev, 0x16, 0xcfff,
858 0x2000);
859
860 SetRxDmaTimer(etdev);
861 ConfigMACRegs2(etdev);
862 }
863 }
864}
865
866/*
867 * The routines which follow provide low-level access to the PHY, and are used
868 * primarily by the routines above (although there are a few places elsewhere
869 * in the driver where this level of access is required).
870 */
871
872static const u16 ConfigPhy[25][2] = {
873 /* Reg Value Register */
874 /* Addr */
875 {0x880B, 0x0926}, /* AfeIfCreg4B1000Msbs */
876 {0x880C, 0x0926}, /* AfeIfCreg4B100Msbs */
877 {0x880D, 0x0926}, /* AfeIfCreg4B10Msbs */
878
879 {0x880E, 0xB4D3}, /* AfeIfCreg4B1000Lsbs */
880 {0x880F, 0xB4D3}, /* AfeIfCreg4B100Lsbs */
881 {0x8810, 0xB4D3}, /* AfeIfCreg4B10Lsbs */
882
883 {0x8805, 0xB03E}, /* AfeIfCreg3B1000Msbs */
884 {0x8806, 0xB03E}, /* AfeIfCreg3B100Msbs */
885 {0x8807, 0xFF00}, /* AfeIfCreg3B10Msbs */
886
887 {0x8808, 0xE090}, /* AfeIfCreg3B1000Lsbs */
888 {0x8809, 0xE110}, /* AfeIfCreg3B100Lsbs */
889 {0x880A, 0x0000}, /* AfeIfCreg3B10Lsbs */
890
891 {0x300D, 1}, /* DisableNorm */
892
893 {0x280C, 0x0180}, /* LinkHoldEnd */
894
895 {0x1C21, 0x0002}, /* AlphaM */
896
897 {0x3821, 6}, /* FfeLkgTx0 */
898 {0x381D, 1}, /* FfeLkg1g4 */
899 {0x381E, 1}, /* FfeLkg1g5 */
900 {0x381F, 1}, /* FfeLkg1g6 */
901 {0x3820, 1}, /* FfeLkg1g7 */
902
903 {0x8402, 0x01F0}, /* Btinact */
904 {0x800E, 20}, /* LftrainTime */
905 {0x800F, 24}, /* DvguardTime */
906 {0x8010, 46}, /* IdlguardTime */
907
908 {0, 0}
909
910};
911
912/* condensed version of the phy initialization routine */
913void ET1310_PhyInit(struct et131x_adapter *etdev)
914{
915 u16 data, index;
916
917 if (etdev == NULL)
918 return;
919
920 /* get the identity (again ?) */
921 MiRead(etdev, PHY_ID_1, &data);
922 MiRead(etdev, PHY_ID_2, &data);
923
924 /* what does this do/achieve ? */
925 MiRead(etdev, PHY_MPHY_CONTROL_REG, &data); /* should read 0002 */
926 MiWrite(etdev, PHY_MPHY_CONTROL_REG, 0x0006);
927
928 /* read modem register 0402, should I do something with the return
929 data ? */
930 MiWrite(etdev, PHY_INDEX_REG, 0x0402);
931 MiRead(etdev, PHY_DATA_REG, &data);
932
933 /* what does this do/achieve ? */
934 MiWrite(etdev, PHY_MPHY_CONTROL_REG, 0x0002);
935
936 /* get the identity (again ?) */
937 MiRead(etdev, PHY_ID_1, &data);
938 MiRead(etdev, PHY_ID_2, &data);
939
940 /* what does this achieve ? */
941 MiRead(etdev, PHY_MPHY_CONTROL_REG, &data); /* should read 0002 */
942 MiWrite(etdev, PHY_MPHY_CONTROL_REG, 0x0006);
943
944 /* read modem register 0402, should I do something with
945 the return data? */
946 MiWrite(etdev, PHY_INDEX_REG, 0x0402);
947 MiRead(etdev, PHY_DATA_REG, &data);
948
949 MiWrite(etdev, PHY_MPHY_CONTROL_REG, 0x0002);
950
951 /* what does this achieve (should return 0x1040) */
952 MiRead(etdev, PHY_CONTROL, &data);
953 MiRead(etdev, PHY_MPHY_CONTROL_REG, &data); /* should read 0002 */
954 MiWrite(etdev, PHY_CONTROL, 0x1840);
955
956 MiWrite(etdev, PHY_MPHY_CONTROL_REG, 0x0007);
957
958 /* here the writing of the array starts.... */
959 index = 0;
960 while (ConfigPhy[index][0] != 0x0000) {
961 /* write value */
962 MiWrite(etdev, PHY_INDEX_REG, ConfigPhy[index][0]);
963 MiWrite(etdev, PHY_DATA_REG, ConfigPhy[index][1]);
964
965 /* read it back */
966 MiWrite(etdev, PHY_INDEX_REG, ConfigPhy[index][0]);
967 MiRead(etdev, PHY_DATA_REG, &data);
968
969 /* do a check on the value read back ? */
970 index++;
971 }
972 /* here the writing of the array ends... */
973
974 MiRead(etdev, PHY_CONTROL, &data); /* 0x1840 */
975 MiRead(etdev, PHY_MPHY_CONTROL_REG, &data);/* should read 0007 */
976 MiWrite(etdev, PHY_CONTROL, 0x1040);
977 MiWrite(etdev, PHY_MPHY_CONTROL_REG, 0x0002);
978}
979
diff --git a/drivers/staging/et131x/et1310_phy.h b/drivers/staging/et131x/et1310_phy.h
new file mode 100644
index 00000000000..6b38a3e0cab
--- /dev/null
+++ b/drivers/staging/et131x/et1310_phy.h
@@ -0,0 +1,458 @@
1/*
2 * Agere Systems Inc.
3 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
4 *
5 * Copyright © 2005 Agere Systems Inc.
6 * All rights reserved.
7 * http://www.agere.com
8 *
9 *------------------------------------------------------------------------------
10 *
11 * et1310_phy.h - Defines, structs, enums, prototypes, etc. pertaining to the
12 * PHY.
13 *
14 *------------------------------------------------------------------------------
15 *
16 * SOFTWARE LICENSE
17 *
18 * This software is provided subject to the following terms and conditions,
19 * which you should read carefully before using the software. Using this
20 * software indicates your acceptance of these terms and conditions. If you do
21 * not agree with these terms and conditions, do not use the software.
22 *
23 * Copyright © 2005 Agere Systems Inc.
24 * All rights reserved.
25 *
26 * Redistribution and use in source or binary forms, with or without
27 * modifications, are permitted provided that the following conditions are met:
28 *
29 * . Redistributions of source code must retain the above copyright notice, this
30 * list of conditions and the following Disclaimer as comments in the code as
31 * well as in the documentation and/or other materials provided with the
32 * distribution.
33 *
34 * . Redistributions in binary form must reproduce the above copyright notice,
35 * this list of conditions and the following Disclaimer in the documentation
36 * and/or other materials provided with the distribution.
37 *
38 * . Neither the name of Agere Systems Inc. nor the names of the contributors
39 * may be used to endorse or promote products derived from this software
40 * without specific prior written permission.
41 *
42 * Disclaimer
43 *
44 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
45 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
46 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
47 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
48 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
49 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
50 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
51 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
52 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
54 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
55 * DAMAGE.
56 *
57 */
58
59#ifndef _ET1310_PHY_H_
60#define _ET1310_PHY_H_
61
62#include "et1310_address_map.h"
63
64/* MI Register Addresses */
65#define MI_CONTROL_REG 0
66#define MI_STATUS_REG 1
67#define MI_PHY_IDENTIFIER_1_REG 2
68#define MI_PHY_IDENTIFIER_2_REG 3
69#define MI_AUTONEG_ADVERTISEMENT_REG 4
70#define MI_AUTONEG_LINK_PARTNER_ABILITY_REG 5
71#define MI_AUTONEG_EXPANSION_REG 6
72#define MI_AUTONEG_NEXT_PAGE_TRANSMIT_REG 7
73#define MI_LINK_PARTNER_NEXT_PAGE_REG 8
74#define MI_1000BASET_CONTROL_REG 9
75#define MI_1000BASET_STATUS_REG 10
76#define MI_RESERVED11_REG 11
77#define MI_RESERVED12_REG 12
78#define MI_RESERVED13_REG 13
79#define MI_RESERVED14_REG 14
80#define MI_EXTENDED_STATUS_REG 15
81
82/* VMI Register Addresses */
83#define VMI_RESERVED16_REG 16
84#define VMI_RESERVED17_REG 17
85#define VMI_RESERVED18_REG 18
86#define VMI_LOOPBACK_CONTROL_REG 19
87#define VMI_RESERVED20_REG 20
88#define VMI_MI_CONTROL_REG 21
89#define VMI_PHY_CONFIGURATION_REG 22
90#define VMI_PHY_CONTROL_REG 23
91#define VMI_INTERRUPT_MASK_REG 24
92#define VMI_INTERRUPT_STATUS_REG 25
93#define VMI_PHY_STATUS_REG 26
94#define VMI_LED_CONTROL_1_REG 27
95#define VMI_LED_CONTROL_2_REG 28
96#define VMI_RESERVED29_REG 29
97#define VMI_RESERVED30_REG 30
98#define VMI_RESERVED31_REG 31
99
100/* PHY Register Mapping(MI) Management Interface Regs */
101struct mi_regs {
102 u8 bmcr; /* Basic mode control reg(Reg 0x00) */
103 u8 bmsr; /* Basic mode status reg(Reg 0x01) */
104 u8 idr1; /* Phy identifier reg 1(Reg 0x02) */
105 u8 idr2; /* Phy identifier reg 2(Reg 0x03) */
106 u8 anar; /* Auto-Negotiation advertisement(Reg 0x04) */
107 u8 anlpar; /* Auto-Negotiation link Partner Ability(Reg 0x05) */
108 u8 aner; /* Auto-Negotiation expansion reg(Reg 0x06) */
109 u8 annptr; /* Auto-Negotiation next page transmit reg(Reg 0x07) */
110 u8 lpnpr; /* link partner next page reg(Reg 0x08) */
111 u8 gcr; /* Gigabit basic mode control reg(Reg 0x09) */
112 u8 gsr; /* Gigabit basic mode status reg(Reg 0x0A) */
113 u8 mi_res1[4]; /* Future use by MI working group(Reg 0x0B - 0x0E) */
114 u8 esr; /* Extended status reg(Reg 0x0F) */
115 u8 mi_res2[3]; /* Future use by MI working group(Reg 0x10 - 0x12) */
116 u8 loop_ctl; /* Loopback Control Reg(Reg 0x13) */
117 u8 mi_res3; /* Future use by MI working group(Reg 0x14) */
118 u8 mcr; /* MI Control Reg(Reg 0x15) */
119 u8 pcr; /* Configuration Reg(Reg 0x16) */
120 u8 phy_ctl; /* PHY Control Reg(Reg 0x17) */
121 u8 imr; /* Interrupt Mask Reg(Reg 0x18) */
122 u8 isr; /* Interrupt Status Reg(Reg 0x19) */
123 u8 psr; /* PHY Status Reg(Reg 0x1A) */
124 u8 lcr1; /* LED Control 1 Reg(Reg 0x1B) */
125 u8 lcr2; /* LED Control 2 Reg(Reg 0x1C) */
126 u8 mi_res4[3]; /* Future use by MI working group(Reg 0x1D - 0x1F) */
127};
128
129/*
130 * MI Register 0: Basic mode control register
131 * 15: reset
132 * 14: loopback
133 * 13: speed_sel
134 * 12: enable_autoneg
135 * 11: power_down
136 * 10: isolate
137 * 9: restart_autoneg
138 * 8: duplex_mode
139 * 7: col_test
140 * 6: speed_1000_sel
141 * 5-0: res1
142 */
143
144/*
145 * MI Register 1: Basic mode status register
146 * 15: link_100T4
147 * 14: link_100fdx
148 * 13: link_100hdx
149 * 12: link_10fdx
150 * 11: link_10hdx
151 * 10: link_100T2fdx
152 * 9: link_100T2hdx
153 * 8: extend_status
154 * 7: res1
155 * 6: preamble_supress
156 * 5: auto_neg_complete
157 * 4: remote_fault
158 * 3: auto_neg_able
159 * 2: link_status
160 * 1: jabber_detect
161 * 0: ext_cap
162 */
163
164#define MI_BMSR_LINK_STATUS 0x04
165#define MI_BMSR_AUTO_NEG_COMPLETE 0x20
166
167/*
168 * MI Register 4: Auto-negotiation advertisement register
169 *
170 * 15: np_indication
171 * 14: res2
172 * 13: remote_fault
173 * 12: res1
174 * 11: cap_asmpause
175 * 10: cap_pause
176 * 9: cap_100T4
177 * 8: cap_100fdx
178 * 7: cap_100hdx
179 * 6: cap_10fdx
180 * 5: cap_10hdx
181 * 4-0: selector
182 */
183
184/* MI Register 5: Auto-negotiation link partner advertisement register
185 * 15: np_indication
186 * 14: acknowledge
187 * 13: remote_fault
188 * 12: res1
189 * 11: cap_asmpause
190 * 10: cap_pause
191 * 9: cap_100T4
192 * 8: cap_100fdx
193 * 7: cap_100hdx
194 * 6: cap_10fdx
195 * 5: cap_10hdx
196 * 4-0: selector
197 */
198
199/* MI Register 6: Auto-negotiation expansion register
200 * 15-5: reserved
201 * 4: pdf
202 * 3: lp_np_able
203 * 2: np_able
204 * 1: page_rx
205 * 0: lp_an_able
206 */
207
208/* MI Register 7: Auto-negotiation next page transmit reg(0x07)
209 * 15: np
210 * 14: reserved
211 * 13: msg_page
212 * 12: ack2
213 * 11: toggle
214 * 10-0 msg
215 */
216
217/* MI Register 8: Link Partner Next Page Reg(0x08)
218 * 15: np
219 * 14: ack
220 * 13: msg_page
221 * 12: ack2
222 * 11: toggle
223 * 10-0: msg
224 */
225
226/* MI Register 9: 1000BaseT Control Reg(0x09)
227 * 15-13: test_mode
228 * 12: ms_config_en
229 * 11: ms_value
230 * 10: port_type
231 * 9: link_1000fdx
232 * 8: link_1000hdx
233 * 7-0: reserved
234 */
235
236/* MI Register 10: 1000BaseT Status Reg(0x0A)
237 * 15: ms_config_fault
238 * 14: ms_resolve
239 * 13: local_rx_status
240 * 12: remote_rx_status
241 * 11: link_1000fdx
242 * 10: link_1000hdx
243 * 9-8: reserved
244 * 7-0: idle_err_cnt
245 */
246
247/* MI Register 11 - 14: Reserved Regs(0x0B - 0x0E) */
248
249/* MI Register 15: Extended status Reg(0x0F)
250 * 15: link_1000Xfdx
251 * 14: link_1000Xhdx
252 * 13: link_1000fdx
253 * 12: link_1000hdx
254 * 11-0: reserved
255 */
256
257/* MI Register 16 - 18: Reserved Reg(0x10-0x12) */
258
259/* MI Register 19: Loopback Control Reg(0x13)
260 * 15: mii_en
261 * 14: pcs_en
262 * 13: pmd_en
263 * 12: all_digital_en
264 * 11: replica_en
265 * 10: line_driver_en
266 * 9-0: reserved
267 */
268
269/* MI Register 20: Reserved Reg(0x14) */
270
271/* MI Register 21: Management Interface Control Reg(0x15)
272 * 15-11: reserved
273 * 10-4: mi_error_count
274 * 3: reserved
275 * 2: ignore_10g_fr
276 * 1: reserved
277 * 0: preamble_supress_en
278 */
279
280/* MI Register 22: PHY Configuration Reg(0x16)
281 * 15: crs_tx_en
282 * 14: reserved
283 * 13-12: tx_fifo_depth
284 * 11-10: speed_downshift
285 * 9: pbi_detect
286 * 8: tbi_rate
287 * 7: alternate_np
288 * 6: group_mdio_en
289 * 5: tx_clock_en
290 * 4: sys_clock_en
291 * 3: reserved
292 * 2-0: mac_if_mode
293 */
294
295/* MI Register 23: PHY CONTROL Reg(0x17)
296 * 15: reserved
297 * 14: tdr_en
298 * 13: reserved
299 * 12-11: downshift_attempts
300 * 10-6: reserved
301 * 5: jabber_10baseT
302 * 4: sqe_10baseT
303 * 3: tp_loopback_10baseT
304 * 2: preamble_gen_en
305 * 1: reserved
306 * 0: force_int
307 */
308
309/* MI Register 24: Interrupt Mask Reg(0x18)
310 * 15-10: reserved
311 * 9: mdio_sync_lost
312 * 8: autoneg_status
313 * 7: hi_bit_err
314 * 6: np_rx
315 * 5: err_counter_full
316 * 4: fifo_over_underflow
317 * 3: rx_status
318 * 2: link_status
319 * 1: automatic_speed
320 * 0: int_en
321 */
322
323
324/* MI Register 25: Interrupt Status Reg(0x19)
325 * 15-10: reserved
326 * 9: mdio_sync_lost
327 * 8: autoneg_status
328 * 7: hi_bit_err
329 * 6: np_rx
330 * 5: err_counter_full
331 * 4: fifo_over_underflow
332 * 3: rx_status
333 * 2: link_status
334 * 1: automatic_speed
335 * 0: int_en
336 */
337
338/* MI Register 26: PHY Status Reg(0x1A)
339 * 15: reserved
340 * 14-13: autoneg_fault
341 * 12: autoneg_status
342 * 11: mdi_x_status
343 * 10: polarity_status
344 * 9-8: speed_status
345 * 7: duplex_status
346 * 6: link_status
347 * 5: tx_status
348 * 4: rx_status
349 * 3: collision_status
350 * 2: autoneg_en
351 * 1: pause_en
352 * 0: asymmetric_dir
353 */
354
355/* MI Register 27: LED Control Reg 1(0x1B)
356 * 15-14: reserved
357 * 13-12: led_dup_indicate
358 * 11-10: led_10baseT
359 * 9-8: led_collision
360 * 7-4: reserved
361 * 3-2: pulse_dur
362 * 1: pulse_stretch1
363 * 0: pulse_stretch0
364 */
365
366/* MI Register 28: LED Control Reg 2(0x1C)
367 * 15-12: led_link
368 * 11-8: led_tx_rx
369 * 7-4: led_100BaseTX
370 * 3-0: led_1000BaseT
371 */
372
373/* MI Register 29 - 31: Reserved Reg(0x1D - 0x1E) */
374
375
376/* Prototypes for ET1310_phy.c */
377/* Defines for PHY access routines */
378
379/* Define bit operation flags */
380#define TRUEPHY_BIT_CLEAR 0
381#define TRUEPHY_BIT_SET 1
382#define TRUEPHY_BIT_READ 2
383
384/* Define read/write operation flags */
385#ifndef TRUEPHY_READ
386#define TRUEPHY_READ 0
387#define TRUEPHY_WRITE 1
388#define TRUEPHY_MASK 2
389#endif
390
391/* Define speeds */
392#define TRUEPHY_SPEED_10MBPS 0
393#define TRUEPHY_SPEED_100MBPS 1
394#define TRUEPHY_SPEED_1000MBPS 2
395
396/* Define duplex modes */
397#define TRUEPHY_DUPLEX_HALF 0
398#define TRUEPHY_DUPLEX_FULL 1
399
400/* Define master/slave configuration values */
401#define TRUEPHY_CFG_SLAVE 0
402#define TRUEPHY_CFG_MASTER 1
403
404/* Define MDI/MDI-X settings */
405#define TRUEPHY_MDI 0
406#define TRUEPHY_MDIX 1
407#define TRUEPHY_AUTO_MDI_MDIX 2
408
409/* Define 10Base-T link polarities */
410#define TRUEPHY_POLARITY_NORMAL 0
411#define TRUEPHY_POLARITY_INVERTED 1
412
413/* Define auto-negotiation results */
414#define TRUEPHY_ANEG_NOT_COMPLETE 0
415#define TRUEPHY_ANEG_COMPLETE 1
416#define TRUEPHY_ANEG_DISABLED 2
417
418/* Define duplex advertisement flags */
419#define TRUEPHY_ADV_DUPLEX_NONE 0x00
420#define TRUEPHY_ADV_DUPLEX_FULL 0x01
421#define TRUEPHY_ADV_DUPLEX_HALF 0x02
422#define TRUEPHY_ADV_DUPLEX_BOTH \
423 (TRUEPHY_ADV_DUPLEX_FULL | TRUEPHY_ADV_DUPLEX_HALF)
424
425#define PHY_CONTROL 0x00 /* #define TRU_MI_CONTROL_REGISTER 0 */
426#define PHY_STATUS 0x01 /* #define TRU_MI_STATUS_REGISTER 1 */
427#define PHY_ID_1 0x02 /* #define TRU_MI_PHY_IDENTIFIER_1_REGISTER 2 */
428#define PHY_ID_2 0x03 /* #define TRU_MI_PHY_IDENTIFIER_2_REGISTER 3 */
429#define PHY_AUTO_ADVERTISEMENT 0x04 /* #define TRU_MI_ADVERTISEMENT_REGISTER 4 */
430#define PHY_AUTO_LINK_PARTNER 0x05 /* #define TRU_MI_LINK_PARTNER_ABILITY_REGISTER 5 */
431#define PHY_AUTO_EXPANSION 0x06 /* #define TRU_MI_EXPANSION_REGISTER 6 */
432#define PHY_AUTO_NEXT_PAGE_TX 0x07 /* #define TRU_MI_NEXT_PAGE_TRANSMIT_REGISTER 7 */
433#define PHY_LINK_PARTNER_NEXT_PAGE 0x08 /* #define TRU_MI_LINK_PARTNER_NEXT_PAGE_REGISTER 8 */
434#define PHY_1000_CONTROL 0x09 /* #define TRU_MI_1000BASET_CONTROL_REGISTER 9 */
435#define PHY_1000_STATUS 0x0A /* #define TRU_MI_1000BASET_STATUS_REGISTER 10 */
436
437#define PHY_EXTENDED_STATUS 0x0F /* #define TRU_MI_EXTENDED_STATUS_REGISTER 15 */
438
439/* some defines for modem registers that seem to be 'reserved' */
440#define PHY_INDEX_REG 0x10
441#define PHY_DATA_REG 0x11
442
443#define PHY_MPHY_CONTROL_REG 0x12 /* #define TRU_VMI_MPHY_CONTROL_REGISTER 18 */
444
445#define PHY_LOOPBACK_CONTROL 0x13 /* #define TRU_VMI_LOOPBACK_CONTROL_1_REGISTER 19 */
446 /* #define TRU_VMI_LOOPBACK_CONTROL_2_REGISTER 20 */
447#define PHY_REGISTER_MGMT_CONTROL 0x15 /* #define TRU_VMI_MI_SEQ_CONTROL_REGISTER 21 */
448#define PHY_CONFIG 0x16 /* #define TRU_VMI_CONFIGURATION_REGISTER 22 */
449#define PHY_PHY_CONTROL 0x17 /* #define TRU_VMI_PHY_CONTROL_REGISTER 23 */
450#define PHY_INTERRUPT_MASK 0x18 /* #define TRU_VMI_INTERRUPT_MASK_REGISTER 24 */
451#define PHY_INTERRUPT_STATUS 0x19 /* #define TRU_VMI_INTERRUPT_STATUS_REGISTER 25 */
452#define PHY_PHY_STATUS 0x1A /* #define TRU_VMI_PHY_STATUS_REGISTER 26 */
453#define PHY_LED_1 0x1B /* #define TRU_VMI_LED_CONTROL_1_REGISTER 27 */
454#define PHY_LED_2 0x1C /* #define TRU_VMI_LED_CONTROL_2_REGISTER 28 */
455 /* #define TRU_VMI_LINK_CONTROL_REGISTER 29 */
456 /* #define TRU_VMI_TIMING_CONTROL_REGISTER */
457
458#endif /* _ET1310_PHY_H_ */
diff --git a/drivers/staging/et131x/et1310_pm.c b/drivers/staging/et131x/et1310_pm.c
new file mode 100644
index 00000000000..29d4d66d345
--- /dev/null
+++ b/drivers/staging/et131x/et1310_pm.c
@@ -0,0 +1,180 @@
1/*
2 * Agere Systems Inc.
3 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
4 *
5 * Copyright © 2005 Agere Systems Inc.
6 * All rights reserved.
7 * http://www.agere.com
8 *
9 *------------------------------------------------------------------------------
10 *
11 * et1310_pm.c - All power management related code (not completely implemented)
12 *
13 *------------------------------------------------------------------------------
14 *
15 * SOFTWARE LICENSE
16 *
17 * This software is provided subject to the following terms and conditions,
18 * which you should read carefully before using the software. Using this
19 * software indicates your acceptance of these terms and conditions. If you do
20 * not agree with these terms and conditions, do not use the software.
21 *
22 * Copyright © 2005 Agere Systems Inc.
23 * All rights reserved.
24 *
25 * Redistribution and use in source or binary forms, with or without
26 * modifications, are permitted provided that the following conditions are met:
27 *
28 * . Redistributions of source code must retain the above copyright notice, this
29 * list of conditions and the following Disclaimer as comments in the code as
30 * well as in the documentation and/or other materials provided with the
31 * distribution.
32 *
33 * . Redistributions in binary form must reproduce the above copyright notice,
34 * this list of conditions and the following Disclaimer in the documentation
35 * and/or other materials provided with the distribution.
36 *
37 * . Neither the name of Agere Systems Inc. nor the names of the contributors
38 * may be used to endorse or promote products derived from this software
39 * without specific prior written permission.
40 *
41 * Disclaimer
42 *
43 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
44 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
45 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
46 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
47 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
48 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
49 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
50 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
51 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
52 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
53 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
54 * DAMAGE.
55 *
56 */
57
58#include "et131x_version.h"
59#include "et131x_defs.h"
60
61#include <linux/init.h>
62#include <linux/module.h>
63#include <linux/types.h>
64#include <linux/kernel.h>
65
66#include <linux/sched.h>
67#include <linux/ptrace.h>
68#include <linux/ctype.h>
69#include <linux/string.h>
70#include <linux/timer.h>
71#include <linux/interrupt.h>
72#include <linux/in.h>
73#include <linux/delay.h>
74#include <linux/io.h>
75#include <linux/bitops.h>
76#include <asm/system.h>
77
78#include <linux/netdevice.h>
79#include <linux/etherdevice.h>
80#include <linux/skbuff.h>
81#include <linux/if_arp.h>
82#include <linux/ioport.h>
83
84#include "et1310_phy.h"
85#include "et1310_rx.h"
86#include "et131x_adapter.h"
87#include "et131x.h"
88
89/**
90 * EnablePhyComa - called when network cable is unplugged
91 * @etdev: pointer to our adapter structure
92 *
93 * driver receive an phy status change interrupt while in D0 and check that
94 * phy_status is down.
95 *
96 * -- gate off JAGCore;
97 * -- set gigE PHY in Coma mode
98 * -- wake on phy_interrupt; Perform software reset JAGCore,
99 * re-initialize jagcore and gigE PHY
100 *
101 * Add D0-ASPM-PhyLinkDown Support:
102 * -- while in D0, when there is a phy_interrupt indicating phy link
103 * down status, call the MPSetPhyComa routine to enter this active
104 * state power saving mode
105 * -- while in D0-ASPM-PhyLinkDown mode, when there is a phy_interrupt
106 * indicating linkup status, call the MPDisablePhyComa routine to
107 * restore JAGCore and gigE PHY
108 */
109void EnablePhyComa(struct et131x_adapter *etdev)
110{
111 unsigned long flags;
112 u32 pmcsr;
113
114 pmcsr = readl(&etdev->regs->global.pm_csr);
115
116 /* Save the GbE PHY speed and duplex modes. Need to restore this
117 * when cable is plugged back in
118 */
119 etdev->pdown_speed = etdev->AiForceSpeed;
120 etdev->pdown_duplex = etdev->AiForceDpx;
121
122 /* Stop sending packets. */
123 spin_lock_irqsave(&etdev->send_hw_lock, flags);
124 etdev->flags |= fMP_ADAPTER_LOWER_POWER;
125 spin_unlock_irqrestore(&etdev->send_hw_lock, flags);
126
127 /* Wait for outstanding Receive packets */
128
129 /* Gate off JAGCore 3 clock domains */
130 pmcsr &= ~ET_PMCSR_INIT;
131 writel(pmcsr, &etdev->regs->global.pm_csr);
132
133 /* Program gigE PHY in to Coma mode */
134 pmcsr |= ET_PM_PHY_SW_COMA;
135 writel(pmcsr, &etdev->regs->global.pm_csr);
136}
137
138/**
139 * DisablePhyComa - Disable the Phy Coma Mode
140 * @etdev: pointer to our adapter structure
141 */
142void DisablePhyComa(struct et131x_adapter *etdev)
143{
144 u32 pmcsr;
145
146 pmcsr = readl(&etdev->regs->global.pm_csr);
147
148 /* Disable phy_sw_coma register and re-enable JAGCore clocks */
149 pmcsr |= ET_PMCSR_INIT;
150 pmcsr &= ~ET_PM_PHY_SW_COMA;
151 writel(pmcsr, &etdev->regs->global.pm_csr);
152
153 /* Restore the GbE PHY speed and duplex modes;
154 * Reset JAGCore; re-configure and initialize JAGCore and gigE PHY
155 */
156 etdev->AiForceSpeed = etdev->pdown_speed;
157 etdev->AiForceDpx = etdev->pdown_duplex;
158
159 /* Re-initialize the send structures */
160 et131x_init_send(etdev);
161
162 /* Reset the RFD list and re-start RU */
163 et131x_reset_recv(etdev);
164
165 /* Bring the device back to the state it was during init prior to
166 * autonegotiation being complete. This way, when we get the auto-neg
167 * complete interrupt, we can complete init by calling ConfigMacREGS2.
168 */
169 et131x_soft_reset(etdev);
170
171 /* setup et1310 as per the documentation ?? */
172 et131x_adapter_setup(etdev);
173
174 /* Allow Tx to restart */
175 etdev->flags &= ~fMP_ADAPTER_LOWER_POWER;
176
177 /* Need to re-enable Rx. */
178 et131x_rx_dma_enable(etdev);
179}
180
diff --git a/drivers/staging/et131x/et1310_rx.c b/drivers/staging/et131x/et1310_rx.c
new file mode 100644
index 00000000000..7e386e07ff9
--- /dev/null
+++ b/drivers/staging/et131x/et1310_rx.c
@@ -0,0 +1,1152 @@
1/*
2 * Agere Systems Inc.
3 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
4 *
5 * Copyright © 2005 Agere Systems Inc.
6 * All rights reserved.
7 * http://www.agere.com
8 *
9 *------------------------------------------------------------------------------
10 *
11 * et1310_rx.c - Routines used to perform data reception
12 *
13 *------------------------------------------------------------------------------
14 *
15 * SOFTWARE LICENSE
16 *
17 * This software is provided subject to the following terms and conditions,
18 * which you should read carefully before using the software. Using this
19 * software indicates your acceptance of these terms and conditions. If you do
20 * not agree with these terms and conditions, do not use the software.
21 *
22 * Copyright © 2005 Agere Systems Inc.
23 * All rights reserved.
24 *
25 * Redistribution and use in source or binary forms, with or without
26 * modifications, are permitted provided that the following conditions are met:
27 *
28 * . Redistributions of source code must retain the above copyright notice, this
29 * list of conditions and the following Disclaimer as comments in the code as
30 * well as in the documentation and/or other materials provided with the
31 * distribution.
32 *
33 * . Redistributions in binary form must reproduce the above copyright notice,
34 * this list of conditions and the following Disclaimer in the documentation
35 * and/or other materials provided with the distribution.
36 *
37 * . Neither the name of Agere Systems Inc. nor the names of the contributors
38 * may be used to endorse or promote products derived from this software
39 * without specific prior written permission.
40 *
41 * Disclaimer
42 *
43 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
44 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
45 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
46 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
47 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
48 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
49 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
50 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
51 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
52 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
53 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
54 * DAMAGE.
55 *
56 */
57
58#include "et131x_version.h"
59#include "et131x_defs.h"
60
61#include <linux/pci.h>
62#include <linux/init.h>
63#include <linux/module.h>
64#include <linux/types.h>
65#include <linux/kernel.h>
66
67#include <linux/sched.h>
68#include <linux/ptrace.h>
69#include <linux/slab.h>
70#include <linux/ctype.h>
71#include <linux/string.h>
72#include <linux/timer.h>
73#include <linux/interrupt.h>
74#include <linux/in.h>
75#include <linux/delay.h>
76#include <linux/io.h>
77#include <linux/bitops.h>
78#include <asm/system.h>
79
80#include <linux/netdevice.h>
81#include <linux/etherdevice.h>
82#include <linux/skbuff.h>
83#include <linux/if_arp.h>
84#include <linux/ioport.h>
85
86#include "et1310_phy.h"
87#include "et131x_adapter.h"
88#include "et1310_rx.h"
89#include "et131x.h"
90
91static inline u32 bump_fbr(u32 *fbr, u32 limit)
92{
93 u32 v = *fbr;
94 v++;
95 /* This works for all cases where limit < 1024. The 1023 case
96 works because 1023++ is 1024 which means the if condition is not
97 taken but the carry of the bit into the wrap bit toggles the wrap
98 value correctly */
99 if ((v & ET_DMA10_MASK) > limit) {
100 v &= ~ET_DMA10_MASK;
101 v ^= ET_DMA10_WRAP;
102 }
103 /* For the 1023 case */
104 v &= (ET_DMA10_MASK|ET_DMA10_WRAP);
105 *fbr = v;
106 return v;
107}
108
109/**
110 * et131x_rx_dma_memory_alloc
111 * @adapter: pointer to our private adapter structure
112 *
113 * Returns 0 on success and errno on failure (as defined in errno.h)
114 *
115 * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required,
116 * and the Packet Status Ring.
117 */
118int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
119{
120 u32 i, j;
121 u32 bufsize;
122 u32 pktStatRingSize, FBRChunkSize;
123 struct rx_ring *rx_ring;
124
125 /* Setup some convenience pointers */
126 rx_ring = &adapter->rx_ring;
127
128 /* Alloc memory for the lookup table */
129#ifdef USE_FBR0
130 rx_ring->fbr[0] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
131#endif
132 rx_ring->fbr[1] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
133
134 /* The first thing we will do is configure the sizes of the buffer
135 * rings. These will change based on jumbo packet support. Larger
136 * jumbo packets increases the size of each entry in FBR0, and the
137 * number of entries in FBR0, while at the same time decreasing the
138 * number of entries in FBR1.
139 *
140 * FBR1 holds "large" frames, FBR0 holds "small" frames. If FBR1
141 * entries are huge in order to accommodate a "jumbo" frame, then it
142 * will have less entries. Conversely, FBR1 will now be relied upon
143 * to carry more "normal" frames, thus it's entry size also increases
144 * and the number of entries goes up too (since it now carries
145 * "small" + "regular" packets.
146 *
147 * In this scheme, we try to maintain 512 entries between the two
148 * rings. Also, FBR1 remains a constant size - when it's size doubles
149 * the number of entries halves. FBR0 increases in size, however.
150 */
151
152 if (adapter->RegistryJumboPacket < 2048) {
153#ifdef USE_FBR0
154 rx_ring->Fbr0BufferSize = 256;
155 rx_ring->Fbr0NumEntries = 512;
156#endif
157 rx_ring->Fbr1BufferSize = 2048;
158 rx_ring->Fbr1NumEntries = 512;
159 } else if (adapter->RegistryJumboPacket < 4096) {
160#ifdef USE_FBR0
161 rx_ring->Fbr0BufferSize = 512;
162 rx_ring->Fbr0NumEntries = 1024;
163#endif
164 rx_ring->Fbr1BufferSize = 4096;
165 rx_ring->Fbr1NumEntries = 512;
166 } else {
167#ifdef USE_FBR0
168 rx_ring->Fbr0BufferSize = 1024;
169 rx_ring->Fbr0NumEntries = 768;
170#endif
171 rx_ring->Fbr1BufferSize = 16384;
172 rx_ring->Fbr1NumEntries = 128;
173 }
174
175#ifdef USE_FBR0
176 adapter->rx_ring.PsrNumEntries = adapter->rx_ring.Fbr0NumEntries +
177 adapter->rx_ring.Fbr1NumEntries;
178#else
179 adapter->rx_ring.PsrNumEntries = adapter->rx_ring.Fbr1NumEntries;
180#endif
181
182 /* Allocate an area of memory for Free Buffer Ring 1 */
183 bufsize = (sizeof(struct fbr_desc) * rx_ring->Fbr1NumEntries) + 0xfff;
184 rx_ring->pFbr1RingVa = pci_alloc_consistent(adapter->pdev,
185 bufsize,
186 &rx_ring->pFbr1RingPa);
187 if (!rx_ring->pFbr1RingVa) {
188 dev_err(&adapter->pdev->dev,
189 "Cannot alloc memory for Free Buffer Ring 1\n");
190 return -ENOMEM;
191 }
192
193 /* Save physical address
194 *
195 * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
196 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
197 * are ever returned, make sure the high part is retrieved here
198 * before storing the adjusted address.
199 */
200 rx_ring->Fbr1Realpa = rx_ring->pFbr1RingPa;
201
202 /* Align Free Buffer Ring 1 on a 4K boundary */
203 et131x_align_allocated_memory(adapter,
204 &rx_ring->Fbr1Realpa,
205 &rx_ring->Fbr1offset, 0x0FFF);
206
207 rx_ring->pFbr1RingVa = (void *)((u8 *) rx_ring->pFbr1RingVa +
208 rx_ring->Fbr1offset);
209
210#ifdef USE_FBR0
211 /* Allocate an area of memory for Free Buffer Ring 0 */
212 bufsize = (sizeof(struct fbr_desc) * rx_ring->Fbr0NumEntries) + 0xfff;
213 rx_ring->pFbr0RingVa = pci_alloc_consistent(adapter->pdev,
214 bufsize,
215 &rx_ring->pFbr0RingPa);
216 if (!rx_ring->pFbr0RingVa) {
217 dev_err(&adapter->pdev->dev,
218 "Cannot alloc memory for Free Buffer Ring 0\n");
219 return -ENOMEM;
220 }
221
222 /* Save physical address
223 *
224 * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
225 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
226 * are ever returned, make sure the high part is retrieved here before
227 * storing the adjusted address.
228 */
229 rx_ring->Fbr0Realpa = rx_ring->pFbr0RingPa;
230
231 /* Align Free Buffer Ring 0 on a 4K boundary */
232 et131x_align_allocated_memory(adapter,
233 &rx_ring->Fbr0Realpa,
234 &rx_ring->Fbr0offset, 0x0FFF);
235
236 rx_ring->pFbr0RingVa = (void *)((u8 *) rx_ring->pFbr0RingVa +
237 rx_ring->Fbr0offset);
238#endif
239
240 for (i = 0; i < (rx_ring->Fbr1NumEntries / FBR_CHUNKS);
241 i++) {
242 u64 Fbr1Offset;
243 u64 Fbr1TempPa;
244 u32 Fbr1Align;
245
246 /* This code allocates an area of memory big enough for N
247 * free buffers + (buffer_size - 1) so that the buffers can
248 * be aligned on 4k boundaries. If each buffer were aligned
249 * to a buffer_size boundary, the effect would be to double
250 * the size of FBR0. By allocating N buffers at once, we
251 * reduce this overhead.
252 */
253 if (rx_ring->Fbr1BufferSize > 4096)
254 Fbr1Align = 4096;
255 else
256 Fbr1Align = rx_ring->Fbr1BufferSize;
257
258 FBRChunkSize =
259 (FBR_CHUNKS * rx_ring->Fbr1BufferSize) + Fbr1Align - 1;
260 rx_ring->Fbr1MemVa[i] =
261 pci_alloc_consistent(adapter->pdev, FBRChunkSize,
262 &rx_ring->Fbr1MemPa[i]);
263
264 if (!rx_ring->Fbr1MemVa[i]) {
265 dev_err(&adapter->pdev->dev,
266 "Could not alloc memory\n");
267 return -ENOMEM;
268 }
269
270 /* See NOTE in "Save Physical Address" comment above */
271 Fbr1TempPa = rx_ring->Fbr1MemPa[i];
272
273 et131x_align_allocated_memory(adapter,
274 &Fbr1TempPa,
275 &Fbr1Offset, (Fbr1Align - 1));
276
277 for (j = 0; j < FBR_CHUNKS; j++) {
278 u32 index = (i * FBR_CHUNKS) + j;
279
280 /* Save the Virtual address of this index for quick
281 * access later
282 */
283 rx_ring->fbr[1]->virt[index] =
284 (u8 *) rx_ring->Fbr1MemVa[i] +
285 (j * rx_ring->Fbr1BufferSize) + Fbr1Offset;
286
287 /* now store the physical address in the descriptor
288 * so the device can access it
289 */
290 rx_ring->fbr[1]->bus_high[index] =
291 (u32) (Fbr1TempPa >> 32);
292 rx_ring->fbr[1]->bus_low[index] = (u32) Fbr1TempPa;
293
294 Fbr1TempPa += rx_ring->Fbr1BufferSize;
295
296 rx_ring->fbr[1]->buffer1[index] =
297 rx_ring->fbr[1]->virt[index];
298 rx_ring->fbr[1]->buffer2[index] =
299 rx_ring->fbr[1]->virt[index] - 4;
300 }
301 }
302
303#ifdef USE_FBR0
304 /* Same for FBR0 (if in use) */
305 for (i = 0; i < (rx_ring->Fbr0NumEntries / FBR_CHUNKS);
306 i++) {
307 u64 Fbr0Offset;
308 u64 Fbr0TempPa;
309
310 FBRChunkSize = ((FBR_CHUNKS + 1) * rx_ring->Fbr0BufferSize) - 1;
311 rx_ring->Fbr0MemVa[i] =
312 pci_alloc_consistent(adapter->pdev, FBRChunkSize,
313 &rx_ring->Fbr0MemPa[i]);
314
315 if (!rx_ring->Fbr0MemVa[i]) {
316 dev_err(&adapter->pdev->dev,
317 "Could not alloc memory\n");
318 return -ENOMEM;
319 }
320
321 /* See NOTE in "Save Physical Address" comment above */
322 Fbr0TempPa = rx_ring->Fbr0MemPa[i];
323
324 et131x_align_allocated_memory(adapter,
325 &Fbr0TempPa,
326 &Fbr0Offset,
327 rx_ring->Fbr0BufferSize - 1);
328
329 for (j = 0; j < FBR_CHUNKS; j++) {
330 u32 index = (i * FBR_CHUNKS) + j;
331
332 rx_ring->fbr[0]->virt[index] =
333 (u8 *) rx_ring->Fbr0MemVa[i] +
334 (j * rx_ring->Fbr0BufferSize) + Fbr0Offset;
335
336 rx_ring->fbr[0]->bus_high[index] =
337 (u32) (Fbr0TempPa >> 32);
338 rx_ring->fbr[0]->bus_low[index] = (u32) Fbr0TempPa;
339
340 Fbr0TempPa += rx_ring->Fbr0BufferSize;
341
342 rx_ring->fbr[0]->buffer1[index] =
343 rx_ring->fbr[0]->virt[index];
344 rx_ring->fbr[0]->buffer2[index] =
345 rx_ring->fbr[0]->virt[index] - 4;
346 }
347 }
348#endif
349
350 /* Allocate an area of memory for FIFO of Packet Status ring entries */
351 pktStatRingSize =
352 sizeof(struct pkt_stat_desc) * adapter->rx_ring.PsrNumEntries;
353
354 rx_ring->pPSRingVa = pci_alloc_consistent(adapter->pdev,
355 pktStatRingSize,
356 &rx_ring->pPSRingPa);
357
358 if (!rx_ring->pPSRingVa) {
359 dev_err(&adapter->pdev->dev,
360 "Cannot alloc memory for Packet Status Ring\n");
361 return -ENOMEM;
362 }
363 printk(KERN_INFO "PSR %lx\n", (unsigned long) rx_ring->pPSRingPa);
364
365 /*
366 * NOTE : pci_alloc_consistent(), used above to alloc DMA regions,
367 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
368 * are ever returned, make sure the high part is retrieved here before
369 * storing the adjusted address.
370 */
371
372 /* Allocate an area of memory for writeback of status information */
373 rx_ring->rx_status_block = pci_alloc_consistent(adapter->pdev,
374 sizeof(struct rx_status_block),
375 &rx_ring->rx_status_bus);
376 if (!rx_ring->rx_status_block) {
377 dev_err(&adapter->pdev->dev,
378 "Cannot alloc memory for Status Block\n");
379 return -ENOMEM;
380 }
381 rx_ring->NumRfd = NIC_DEFAULT_NUM_RFD;
382 printk(KERN_INFO "PRS %lx\n", (unsigned long)rx_ring->rx_status_bus);
383
384 /* Recv
385 * pci_pool_create initializes a lookaside list. After successful
386 * creation, nonpaged fixed-size blocks can be allocated from and
387 * freed to the lookaside list.
388 * RFDs will be allocated from this pool.
389 */
390 rx_ring->RecvLookaside = kmem_cache_create(adapter->netdev->name,
391 sizeof(struct rfd),
392 0,
393 SLAB_CACHE_DMA |
394 SLAB_HWCACHE_ALIGN,
395 NULL);
396
397 adapter->flags |= fMP_ADAPTER_RECV_LOOKASIDE;
398
399 /* The RFDs are going to be put on lists later on, so initialize the
400 * lists now.
401 */
402 INIT_LIST_HEAD(&rx_ring->RecvList);
403 return 0;
404}
405
406/**
407 * et131x_rx_dma_memory_free - Free all memory allocated within this module.
408 * @adapter: pointer to our private adapter structure
409 */
410void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
411{
412 u32 index;
413 u32 bufsize;
414 u32 pktStatRingSize;
415 struct rfd *rfd;
416 struct rx_ring *rx_ring;
417
418 /* Setup some convenience pointers */
419 rx_ring = &adapter->rx_ring;
420
421 /* Free RFDs and associated packet descriptors */
422 WARN_ON(rx_ring->nReadyRecv != rx_ring->NumRfd);
423
424 while (!list_empty(&rx_ring->RecvList)) {
425 rfd = (struct rfd *) list_entry(rx_ring->RecvList.next,
426 struct rfd, list_node);
427
428 list_del(&rfd->list_node);
429 rfd->skb = NULL;
430 kmem_cache_free(adapter->rx_ring.RecvLookaside, rfd);
431 }
432
433 /* Free Free Buffer Ring 1 */
434 if (rx_ring->pFbr1RingVa) {
435 /* First the packet memory */
436 for (index = 0; index <
437 (rx_ring->Fbr1NumEntries / FBR_CHUNKS); index++) {
438 if (rx_ring->Fbr1MemVa[index]) {
439 u32 Fbr1Align;
440
441 if (rx_ring->Fbr1BufferSize > 4096)
442 Fbr1Align = 4096;
443 else
444 Fbr1Align = rx_ring->Fbr1BufferSize;
445
446 bufsize =
447 (rx_ring->Fbr1BufferSize * FBR_CHUNKS) +
448 Fbr1Align - 1;
449
450 pci_free_consistent(adapter->pdev,
451 bufsize,
452 rx_ring->Fbr1MemVa[index],
453 rx_ring->Fbr1MemPa[index]);
454
455 rx_ring->Fbr1MemVa[index] = NULL;
456 }
457 }
458
459 /* Now the FIFO itself */
460 rx_ring->pFbr1RingVa = (void *)((u8 *)
461 rx_ring->pFbr1RingVa - rx_ring->Fbr1offset);
462
463 bufsize = (sizeof(struct fbr_desc) * rx_ring->Fbr1NumEntries)
464 + 0xfff;
465
466 pci_free_consistent(adapter->pdev, bufsize,
467 rx_ring->pFbr1RingVa, rx_ring->pFbr1RingPa);
468
469 rx_ring->pFbr1RingVa = NULL;
470 }
471
472#ifdef USE_FBR0
473 /* Now the same for Free Buffer Ring 0 */
474 if (rx_ring->pFbr0RingVa) {
475 /* First the packet memory */
476 for (index = 0; index <
477 (rx_ring->Fbr0NumEntries / FBR_CHUNKS); index++) {
478 if (rx_ring->Fbr0MemVa[index]) {
479 bufsize =
480 (rx_ring->Fbr0BufferSize *
481 (FBR_CHUNKS + 1)) - 1;
482
483 pci_free_consistent(adapter->pdev,
484 bufsize,
485 rx_ring->Fbr0MemVa[index],
486 rx_ring->Fbr0MemPa[index]);
487
488 rx_ring->Fbr0MemVa[index] = NULL;
489 }
490 }
491
492 /* Now the FIFO itself */
493 rx_ring->pFbr0RingVa = (void *)((u8 *)
494 rx_ring->pFbr0RingVa - rx_ring->Fbr0offset);
495
496 bufsize = (sizeof(struct fbr_desc) * rx_ring->Fbr0NumEntries)
497 + 0xfff;
498
499 pci_free_consistent(adapter->pdev,
500 bufsize,
501 rx_ring->pFbr0RingVa, rx_ring->pFbr0RingPa);
502
503 rx_ring->pFbr0RingVa = NULL;
504 }
505#endif
506
507 /* Free Packet Status Ring */
508 if (rx_ring->pPSRingVa) {
509 pktStatRingSize =
510 sizeof(struct pkt_stat_desc) * adapter->rx_ring.PsrNumEntries;
511
512 pci_free_consistent(adapter->pdev, pktStatRingSize,
513 rx_ring->pPSRingVa, rx_ring->pPSRingPa);
514
515 rx_ring->pPSRingVa = NULL;
516 }
517
518 /* Free area of memory for the writeback of status information */
519 if (rx_ring->rx_status_block) {
520 pci_free_consistent(adapter->pdev,
521 sizeof(struct rx_status_block),
522 rx_ring->rx_status_block, rx_ring->rx_status_bus);
523 rx_ring->rx_status_block = NULL;
524 }
525
526 /* Free receive buffer pool */
527
528 /* Free receive packet pool */
529
530 /* Destroy the lookaside (RFD) pool */
531 if (adapter->flags & fMP_ADAPTER_RECV_LOOKASIDE) {
532 kmem_cache_destroy(rx_ring->RecvLookaside);
533 adapter->flags &= ~fMP_ADAPTER_RECV_LOOKASIDE;
534 }
535
536 /* Free the FBR Lookup Table */
537#ifdef USE_FBR0
538 kfree(rx_ring->fbr[0]);
539#endif
540
541 kfree(rx_ring->fbr[1]);
542
543 /* Reset Counters */
544 rx_ring->nReadyRecv = 0;
545}
546
547/**
548 * et131x_init_recv - Initialize receive data structures.
549 * @adapter: pointer to our private adapter structure
550 *
551 * Returns 0 on success and errno on failure (as defined in errno.h)
552 */
553int et131x_init_recv(struct et131x_adapter *adapter)
554{
555 int status = -ENOMEM;
556 struct rfd *rfd = NULL;
557 u32 rfdct;
558 u32 numrfd = 0;
559 struct rx_ring *rx_ring;
560
561 /* Setup some convenience pointers */
562 rx_ring = &adapter->rx_ring;
563
564 /* Setup each RFD */
565 for (rfdct = 0; rfdct < rx_ring->NumRfd; rfdct++) {
566 rfd = kmem_cache_alloc(rx_ring->RecvLookaside,
567 GFP_ATOMIC | GFP_DMA);
568
569 if (!rfd) {
570 dev_err(&adapter->pdev->dev,
571 "Couldn't alloc RFD out of kmem_cache\n");
572 status = -ENOMEM;
573 continue;
574 }
575
576 rfd->skb = NULL;
577
578 /* Add this RFD to the RecvList */
579 list_add_tail(&rfd->list_node, &rx_ring->RecvList);
580
581 /* Increment both the available RFD's, and the total RFD's. */
582 rx_ring->nReadyRecv++;
583 numrfd++;
584 }
585
586 if (numrfd > NIC_MIN_NUM_RFD)
587 status = 0;
588
589 rx_ring->NumRfd = numrfd;
590
591 if (status != 0) {
592 kmem_cache_free(rx_ring->RecvLookaside, rfd);
593 dev_err(&adapter->pdev->dev,
594 "Allocation problems in et131x_init_recv\n");
595 }
596 return status;
597}
598
599/**
600 * ConfigRxDmaRegs - Start of Rx_DMA init sequence
601 * @etdev: pointer to our adapter structure
602 */
603void ConfigRxDmaRegs(struct et131x_adapter *etdev)
604{
605 struct rxdma_regs __iomem *rx_dma = &etdev->regs->rxdma;
606 struct rx_ring *rx_local = &etdev->rx_ring;
607 struct fbr_desc *fbr_entry;
608 u32 entry;
609 u32 psr_num_des;
610 unsigned long flags;
611
612 /* Halt RXDMA to perform the reconfigure. */
613 et131x_rx_dma_disable(etdev);
614
615 /* Load the completion writeback physical address
616 *
617 * NOTE : pci_alloc_consistent(), used above to alloc DMA regions,
618 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
619 * are ever returned, make sure the high part is retrieved here
620 * before storing the adjusted address.
621 */
622 writel((u32) ((u64)rx_local->rx_status_bus >> 32),
623 &rx_dma->dma_wb_base_hi);
624 writel((u32) rx_local->rx_status_bus, &rx_dma->dma_wb_base_lo);
625
626 memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block));
627
628 /* Set the address and parameters of the packet status ring into the
629 * 1310's registers
630 */
631 writel((u32) ((u64)rx_local->pPSRingPa >> 32),
632 &rx_dma->psr_base_hi);
633 writel((u32) rx_local->pPSRingPa, &rx_dma->psr_base_lo);
634 writel(rx_local->PsrNumEntries - 1, &rx_dma->psr_num_des);
635 writel(0, &rx_dma->psr_full_offset);
636
637 psr_num_des = readl(&rx_dma->psr_num_des) & 0xFFF;
638 writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100,
639 &rx_dma->psr_min_des);
640
641 spin_lock_irqsave(&etdev->rcv_lock, flags);
642
643 /* These local variables track the PSR in the adapter structure */
644 rx_local->local_psr_full = 0;
645
646 /* Now's the best time to initialize FBR1 contents */
647 fbr_entry = (struct fbr_desc *) rx_local->pFbr1RingVa;
648 for (entry = 0; entry < rx_local->Fbr1NumEntries; entry++) {
649 fbr_entry->addr_hi = rx_local->fbr[1]->bus_high[entry];
650 fbr_entry->addr_lo = rx_local->fbr[1]->bus_low[entry];
651 fbr_entry->word2 = entry;
652 fbr_entry++;
653 }
654
655 /* Set the address and parameters of Free buffer ring 1 (and 0 if
656 * required) into the 1310's registers
657 */
658 writel((u32) (rx_local->Fbr1Realpa >> 32), &rx_dma->fbr1_base_hi);
659 writel((u32) rx_local->Fbr1Realpa, &rx_dma->fbr1_base_lo);
660 writel(rx_local->Fbr1NumEntries - 1, &rx_dma->fbr1_num_des);
661 writel(ET_DMA10_WRAP, &rx_dma->fbr1_full_offset);
662
663 /* This variable tracks the free buffer ring 1 full position, so it
664 * has to match the above.
665 */
666 rx_local->local_Fbr1_full = ET_DMA10_WRAP;
667 writel(((rx_local->Fbr1NumEntries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
668 &rx_dma->fbr1_min_des);
669
670#ifdef USE_FBR0
671 /* Now's the best time to initialize FBR0 contents */
672 fbr_entry = (struct fbr_desc *) rx_local->pFbr0RingVa;
673 for (entry = 0; entry < rx_local->Fbr0NumEntries; entry++) {
674 fbr_entry->addr_hi = rx_local->fbr[0]->bus_high[entry];
675 fbr_entry->addr_lo = rx_local->fbr[0]->bus_low[entry];
676 fbr_entry->word2 = entry;
677 fbr_entry++;
678 }
679
680 writel((u32) (rx_local->Fbr0Realpa >> 32), &rx_dma->fbr0_base_hi);
681 writel((u32) rx_local->Fbr0Realpa, &rx_dma->fbr0_base_lo);
682 writel(rx_local->Fbr0NumEntries - 1, &rx_dma->fbr0_num_des);
683 writel(ET_DMA10_WRAP, &rx_dma->fbr0_full_offset);
684
685 /* This variable tracks the free buffer ring 0 full position, so it
686 * has to match the above.
687 */
688 rx_local->local_Fbr0_full = ET_DMA10_WRAP;
689 writel(((rx_local->Fbr0NumEntries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
690 &rx_dma->fbr0_min_des);
691#endif
692
693 /* Program the number of packets we will receive before generating an
694 * interrupt.
695 * For version B silicon, this value gets updated once autoneg is
696 *complete.
697 */
698 writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done);
699
700 /* The "time_done" is not working correctly to coalesce interrupts
701 * after a given time period, but rather is giving us an interrupt
702 * regardless of whether we have received packets.
703 * This value gets updated once autoneg is complete.
704 */
705 writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time);
706
707 spin_unlock_irqrestore(&etdev->rcv_lock, flags);
708}
709
710/**
711 * SetRxDmaTimer - Set the heartbeat timer according to line rate.
712 * @etdev: pointer to our adapter structure
713 */
714void SetRxDmaTimer(struct et131x_adapter *etdev)
715{
716 /* For version B silicon, we do not use the RxDMA timer for 10 and 100
717 * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing.
718 */
719 if ((etdev->linkspeed == TRUEPHY_SPEED_100MBPS) ||
720 (etdev->linkspeed == TRUEPHY_SPEED_10MBPS)) {
721 writel(0, &etdev->regs->rxdma.max_pkt_time);
722 writel(1, &etdev->regs->rxdma.num_pkt_done);
723 }
724}
725
726/**
727 * NICReturnRFD - Recycle a RFD and put it back onto the receive list
728 * @etdev: pointer to our adapter
729 * @rfd: pointer to the RFD
730 */
731void nic_return_rfd(struct et131x_adapter *etdev, struct rfd *rfd)
732{
733 struct rx_ring *rx_local = &etdev->rx_ring;
734 struct rxdma_regs __iomem *rx_dma = &etdev->regs->rxdma;
735 u16 bi = rfd->bufferindex;
736 u8 ri = rfd->ringindex;
737 unsigned long flags;
738
739 /* We don't use any of the OOB data besides status. Otherwise, we
740 * need to clean up OOB data
741 */
742 if (
743#ifdef USE_FBR0
744 (ri == 0 && bi < rx_local->Fbr0NumEntries) ||
745#endif
746 (ri == 1 && bi < rx_local->Fbr1NumEntries)) {
747 spin_lock_irqsave(&etdev->FbrLock, flags);
748
749 if (ri == 1) {
750 struct fbr_desc *next =
751 (struct fbr_desc *) (rx_local->pFbr1RingVa) +
752 INDEX10(rx_local->local_Fbr1_full);
753
754 /* Handle the Free Buffer Ring advancement here. Write
755 * the PA / Buffer Index for the returned buffer into
756 * the oldest (next to be freed)FBR entry
757 */
758 next->addr_hi = rx_local->fbr[1]->bus_high[bi];
759 next->addr_lo = rx_local->fbr[1]->bus_low[bi];
760 next->word2 = bi;
761
762 writel(bump_fbr(&rx_local->local_Fbr1_full,
763 rx_local->Fbr1NumEntries - 1),
764 &rx_dma->fbr1_full_offset);
765 }
766#ifdef USE_FBR0
767 else {
768 struct fbr_desc *next = (struct fbr_desc *)
769 rx_local->pFbr0RingVa +
770 INDEX10(rx_local->local_Fbr0_full);
771
772 /* Handle the Free Buffer Ring advancement here. Write
773 * the PA / Buffer Index for the returned buffer into
774 * the oldest (next to be freed) FBR entry
775 */
776 next->addr_hi = rx_local->fbr[0]->bus_high[bi];
777 next->addr_lo = rx_local->fbr[0]->bus_low[bi];
778 next->word2 = bi;
779
780 writel(bump_fbr(&rx_local->local_Fbr0_full,
781 rx_local->Fbr0NumEntries - 1),
782 &rx_dma->fbr0_full_offset);
783 }
784#endif
785 spin_unlock_irqrestore(&etdev->FbrLock, flags);
786 } else {
787 dev_err(&etdev->pdev->dev,
788 "NICReturnRFD illegal Buffer Index returned\n");
789 }
790
791 /* The processing on this RFD is done, so put it back on the tail of
792 * our list
793 */
794 spin_lock_irqsave(&etdev->rcv_lock, flags);
795 list_add_tail(&rfd->list_node, &rx_local->RecvList);
796 rx_local->nReadyRecv++;
797 spin_unlock_irqrestore(&etdev->rcv_lock, flags);
798
799 WARN_ON(rx_local->nReadyRecv > rx_local->NumRfd);
800}
801
802/**
803 * et131x_rx_dma_disable - Stop of Rx_DMA on the ET1310
804 * @etdev: pointer to our adapter structure
805 */
806void et131x_rx_dma_disable(struct et131x_adapter *etdev)
807{
808 u32 csr;
809 /* Setup the receive dma configuration register */
810 writel(0x00002001, &etdev->regs->rxdma.csr);
811 csr = readl(&etdev->regs->rxdma.csr);
812 if ((csr & 0x00020000) == 0) { /* Check halt status (bit 17) */
813 udelay(5);
814 csr = readl(&etdev->regs->rxdma.csr);
815 if ((csr & 0x00020000) == 0)
816 dev_err(&etdev->pdev->dev,
817 "RX Dma failed to enter halt state. CSR 0x%08x\n",
818 csr);
819 }
820}
821
822/**
823 * et131x_rx_dma_enable - re-start of Rx_DMA on the ET1310.
824 * @etdev: pointer to our adapter structure
825 */
826void et131x_rx_dma_enable(struct et131x_adapter *etdev)
827{
828 /* Setup the receive dma configuration register for normal operation */
829 u32 csr = 0x2000; /* FBR1 enable */
830
831 if (etdev->rx_ring.Fbr1BufferSize == 4096)
832 csr |= 0x0800;
833 else if (etdev->rx_ring.Fbr1BufferSize == 8192)
834 csr |= 0x1000;
835 else if (etdev->rx_ring.Fbr1BufferSize == 16384)
836 csr |= 0x1800;
837#ifdef USE_FBR0
838 csr |= 0x0400; /* FBR0 enable */
839 if (etdev->rx_ring.Fbr0BufferSize == 256)
840 csr |= 0x0100;
841 else if (etdev->rx_ring.Fbr0BufferSize == 512)
842 csr |= 0x0200;
843 else if (etdev->rx_ring.Fbr0BufferSize == 1024)
844 csr |= 0x0300;
845#endif
846 writel(csr, &etdev->regs->rxdma.csr);
847
848 csr = readl(&etdev->regs->rxdma.csr);
849 if ((csr & 0x00020000) != 0) {
850 udelay(5);
851 csr = readl(&etdev->regs->rxdma.csr);
852 if ((csr & 0x00020000) != 0) {
853 dev_err(&etdev->pdev->dev,
854 "RX Dma failed to exit halt state. CSR 0x%08x\n",
855 csr);
856 }
857 }
858}
859
860/**
861 * nic_rx_pkts - Checks the hardware for available packets
862 * @etdev: pointer to our adapter
863 *
864 * Returns rfd, a pointer to our MPRFD.
865 *
866 * Checks the hardware for available packets, using completion ring
867 * If packets are available, it gets an RFD from the RecvList, attaches
868 * the packet to it, puts the RFD in the RecvPendList, and also returns
869 * the pointer to the RFD.
870 */
871struct rfd *nic_rx_pkts(struct et131x_adapter *etdev)
872{
873 struct rx_ring *rx_local = &etdev->rx_ring;
874 struct rx_status_block *status;
875 struct pkt_stat_desc *psr;
876 struct rfd *rfd;
877 u32 i;
878 u8 *buf;
879 unsigned long flags;
880 struct list_head *element;
881 u8 rindex;
882 u16 bindex;
883 u32 len;
884 u32 word0;
885 u32 word1;
886
887 /* RX Status block is written by the DMA engine prior to every
888 * interrupt. It contains the next to be used entry in the Packet
889 * Status Ring, and also the two Free Buffer rings.
890 */
891 status = rx_local->rx_status_block;
892 word1 = status->Word1 >> 16; /* Get the useful bits */
893
894 /* Check the PSR and wrap bits do not match */
895 if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF))
896 /* Looks like this ring is not updated yet */
897 return NULL;
898
899 /* The packet status ring indicates that data is available. */
900 psr = (struct pkt_stat_desc *) (rx_local->pPSRingVa) +
901 (rx_local->local_psr_full & 0xFFF);
902
903 /* Grab any information that is required once the PSR is
904 * advanced, since we can no longer rely on the memory being
905 * accurate
906 */
907 len = psr->word1 & 0xFFFF;
908 rindex = (psr->word1 >> 26) & 0x03;
909 bindex = (psr->word1 >> 16) & 0x3FF;
910 word0 = psr->word0;
911
912 /* Indicate that we have used this PSR entry. */
913 /* FIXME wrap 12 */
914 add_12bit(&rx_local->local_psr_full, 1);
915 if ((rx_local->local_psr_full & 0xFFF) > rx_local->PsrNumEntries - 1) {
916 /* Clear psr full and toggle the wrap bit */
917 rx_local->local_psr_full &= ~0xFFF;
918 rx_local->local_psr_full ^= 0x1000;
919 }
920
921 writel(rx_local->local_psr_full,
922 &etdev->regs->rxdma.psr_full_offset);
923
924#ifndef USE_FBR0
925 if (rindex != 1)
926 return NULL;
927#endif
928
929#ifdef USE_FBR0
930 if (rindex > 1 ||
931 (rindex == 0 &&
932 bindex > rx_local->Fbr0NumEntries - 1) ||
933 (rindex == 1 &&
934 bindex > rx_local->Fbr1NumEntries - 1))
935#else
936 if (rindex != 1 || bindex > rx_local->Fbr1NumEntries - 1)
937#endif
938 {
939 /* Illegal buffer or ring index cannot be used by S/W*/
940 dev_err(&etdev->pdev->dev,
941 "NICRxPkts PSR Entry %d indicates "
942 "length of %d and/or bad bi(%d)\n",
943 rx_local->local_psr_full & 0xFFF,
944 len, bindex);
945 return NULL;
946 }
947
948 /* Get and fill the RFD. */
949 spin_lock_irqsave(&etdev->rcv_lock, flags);
950
951 rfd = NULL;
952 element = rx_local->RecvList.next;
953 rfd = (struct rfd *) list_entry(element, struct rfd, list_node);
954
955 if (rfd == NULL) {
956 spin_unlock_irqrestore(&etdev->rcv_lock, flags);
957 return NULL;
958 }
959
960 list_del(&rfd->list_node);
961 rx_local->nReadyRecv--;
962
963 spin_unlock_irqrestore(&etdev->rcv_lock, flags);
964
965 rfd->bufferindex = bindex;
966 rfd->ringindex = rindex;
967
968 /* In V1 silicon, there is a bug which screws up filtering of
969 * runt packets. Therefore runt packet filtering is disabled
970 * in the MAC and the packets are dropped here. They are
971 * also counted here.
972 */
973 if (len < (NIC_MIN_PACKET_SIZE + 4)) {
974 etdev->stats.other_errors++;
975 len = 0;
976 }
977
978 if (len) {
979 if (etdev->ReplicaPhyLoopbk == 1) {
980 buf = rx_local->fbr[rindex]->virt[bindex];
981
982 if (memcmp(&buf[6], etdev->addr, ETH_ALEN) == 0) {
983 if (memcmp(&buf[42], "Replica packet",
984 ETH_HLEN)) {
985 etdev->ReplicaPhyLoopbkPF = 1;
986 }
987 }
988 }
989
990 /* Determine if this is a multicast packet coming in */
991 if ((word0 & ALCATEL_MULTICAST_PKT) &&
992 !(word0 & ALCATEL_BROADCAST_PKT)) {
993 /* Promiscuous mode and Multicast mode are
994 * not mutually exclusive as was first
995 * thought. I guess Promiscuous is just
996 * considered a super-set of the other
997 * filters. Generally filter is 0x2b when in
998 * promiscuous mode.
999 */
1000 if ((etdev->PacketFilter & ET131X_PACKET_TYPE_MULTICAST)
1001 && !(etdev->PacketFilter & ET131X_PACKET_TYPE_PROMISCUOUS)
1002 && !(etdev->PacketFilter & ET131X_PACKET_TYPE_ALL_MULTICAST)) {
1003 buf = rx_local->fbr[rindex]->
1004 virt[bindex];
1005
1006 /* Loop through our list to see if the
1007 * destination address of this packet
1008 * matches one in our list.
1009 */
1010 for (i = 0;
1011 i < etdev->MCAddressCount;
1012 i++) {
1013 if (buf[0] ==
1014 etdev->MCList[i][0]
1015 && buf[1] ==
1016 etdev->MCList[i][1]
1017 && buf[2] ==
1018 etdev->MCList[i][2]
1019 && buf[3] ==
1020 etdev->MCList[i][3]
1021 && buf[4] ==
1022 etdev->MCList[i][4]
1023 && buf[5] ==
1024 etdev->MCList[i][5]) {
1025 break;
1026 }
1027 }
1028
1029 /* If our index is equal to the number
1030 * of Multicast address we have, then
1031 * this means we did not find this
1032 * packet's matching address in our
1033 * list. Set the len to zero,
1034 * so we free our RFD when we return
1035 * from this function.
1036 */
1037 if (i == etdev->MCAddressCount)
1038 len = 0;
1039 }
1040
1041 if (len > 0)
1042 etdev->stats.multircv++;
1043 } else if (word0 & ALCATEL_BROADCAST_PKT)
1044 etdev->stats.brdcstrcv++;
1045 else
1046 /* Not sure what this counter measures in
1047 * promiscuous mode. Perhaps we should check
1048 * the MAC address to see if it is directed
1049 * to us in promiscuous mode.
1050 */
1051 etdev->stats.unircv++;
1052 }
1053
1054 if (len > 0) {
1055 struct sk_buff *skb = NULL;
1056
1057 /*rfd->len = len - 4; */
1058 rfd->len = len;
1059
1060 skb = dev_alloc_skb(rfd->len + 2);
1061 if (!skb) {
1062 dev_err(&etdev->pdev->dev,
1063 "Couldn't alloc an SKB for Rx\n");
1064 return NULL;
1065 }
1066
1067 etdev->net_stats.rx_bytes += rfd->len;
1068
1069 memcpy(skb_put(skb, rfd->len),
1070 rx_local->fbr[rindex]->virt[bindex],
1071 rfd->len);
1072
1073 skb->dev = etdev->netdev;
1074 skb->protocol = eth_type_trans(skb, etdev->netdev);
1075 skb->ip_summed = CHECKSUM_NONE;
1076
1077 netif_rx(skb);
1078 } else {
1079 rfd->len = 0;
1080 }
1081
1082 nic_return_rfd(etdev, rfd);
1083 return rfd;
1084}
1085
1086/**
1087 * et131x_reset_recv - Reset the receive list
1088 * @etdev: pointer to our adapter
1089 *
1090 * Assumption, Rcv spinlock has been acquired.
1091 */
1092void et131x_reset_recv(struct et131x_adapter *etdev)
1093{
1094 WARN_ON(list_empty(&etdev->rx_ring.RecvList));
1095
1096}
1097
1098/**
1099 * et131x_handle_recv_interrupt - Interrupt handler for receive processing
1100 * @etdev: pointer to our adapter
1101 *
1102 * Assumption, Rcv spinlock has been acquired.
1103 */
1104void et131x_handle_recv_interrupt(struct et131x_adapter *etdev)
1105{
1106 struct rfd *rfd = NULL;
1107 u32 count = 0;
1108 bool done = true;
1109
1110 /* Process up to available RFD's */
1111 while (count < NUM_PACKETS_HANDLED) {
1112 if (list_empty(&etdev->rx_ring.RecvList)) {
1113 WARN_ON(etdev->rx_ring.nReadyRecv != 0);
1114 done = false;
1115 break;
1116 }
1117
1118 rfd = nic_rx_pkts(etdev);
1119
1120 if (rfd == NULL)
1121 break;
1122
1123 /* Do not receive any packets until a filter has been set.
1124 * Do not receive any packets until we have link.
1125 * If length is zero, return the RFD in order to advance the
1126 * Free buffer ring.
1127 */
1128 if (!etdev->PacketFilter ||
1129 !netif_carrier_ok(etdev->netdev) ||
1130 rfd->len == 0)
1131 continue;
1132
1133 /* Increment the number of packets we received */
1134 etdev->net_stats.rx_packets++;
1135
1136 /* Set the status on the packet, either resources or success */
1137 if (etdev->rx_ring.nReadyRecv < RFD_LOW_WATER_MARK) {
1138 dev_warn(&etdev->pdev->dev,
1139 "RFD's are running out\n");
1140 }
1141 count++;
1142 }
1143
1144 if (count == NUM_PACKETS_HANDLED || !done) {
1145 etdev->rx_ring.UnfinishedReceives = true;
1146 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
1147 &etdev->regs->global.watchdog_timer);
1148 } else
1149 /* Watchdog timer will disable itself if appropriate. */
1150 etdev->rx_ring.UnfinishedReceives = false;
1151}
1152
diff --git a/drivers/staging/et131x/et1310_rx.h b/drivers/staging/et131x/et1310_rx.h
new file mode 100644
index 00000000000..e8c653d37a7
--- /dev/null
+++ b/drivers/staging/et131x/et1310_rx.h
@@ -0,0 +1,243 @@
1/*
2 * Agere Systems Inc.
3 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
4 *
5 * Copyright © 2005 Agere Systems Inc.
6 * All rights reserved.
7 * http://www.agere.com
8 *
9 *------------------------------------------------------------------------------
10 *
11 * et1310_rx.h - Defines, structs, enums, prototypes, etc. pertaining to data
12 * reception.
13 *
14 *------------------------------------------------------------------------------
15 *
16 * SOFTWARE LICENSE
17 *
18 * This software is provided subject to the following terms and conditions,
19 * which you should read carefully before using the software. Using this
20 * software indicates your acceptance of these terms and conditions. If you do
21 * not agree with these terms and conditions, do not use the software.
22 *
23 * Copyright © 2005 Agere Systems Inc.
24 * All rights reserved.
25 *
26 * Redistribution and use in source or binary forms, with or without
27 * modifications, are permitted provided that the following conditions are met:
28 *
29 * . Redistributions of source code must retain the above copyright notice, this
30 * list of conditions and the following Disclaimer as comments in the code as
31 * well as in the documentation and/or other materials provided with the
32 * distribution.
33 *
34 * . Redistributions in binary form must reproduce the above copyright notice,
35 * this list of conditions and the following Disclaimer in the documentation
36 * and/or other materials provided with the distribution.
37 *
38 * . Neither the name of Agere Systems Inc. nor the names of the contributors
39 * may be used to endorse or promote products derived from this software
40 * without specific prior written permission.
41 *
42 * Disclaimer
43 *
44 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
45 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
46 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
47 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
48 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
49 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
50 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
51 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
52 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
54 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
55 * DAMAGE.
56 *
57 */
58
59#ifndef __ET1310_RX_H__
60#define __ET1310_RX_H__
61
62#include "et1310_address_map.h"
63
64#define USE_FBR0 true
65
66#ifdef USE_FBR0
67/* #define FBR0_BUFFER_SIZE 256 */
68#endif
69
70/* #define FBR1_BUFFER_SIZE 2048 */
71
72#define FBR_CHUNKS 32
73
74#define MAX_DESC_PER_RING_RX 1024
75
76/* number of RFDs - default and min */
77#ifdef USE_FBR0
78#define RFD_LOW_WATER_MARK 40
79#define NIC_MIN_NUM_RFD 64
80#define NIC_DEFAULT_NUM_RFD 1024
81#else
82#define RFD_LOW_WATER_MARK 20
83#define NIC_MIN_NUM_RFD 64
84#define NIC_DEFAULT_NUM_RFD 256
85#endif
86
87#define NUM_PACKETS_HANDLED 256
88
89#define ALCATEL_BAD_STATUS 0xe47f0000
90#define ALCATEL_MULTICAST_PKT 0x01000000
91#define ALCATEL_BROADCAST_PKT 0x02000000
92
93/* typedefs for Free Buffer Descriptors */
94struct fbr_desc {
95 u32 addr_lo;
96 u32 addr_hi;
97 u32 word2; /* Bits 10-31 reserved, 0-9 descriptor */
98};
99
100/* Packet Status Ring Descriptors
101 *
102 * Word 0:
103 *
104 * top 16 bits are from the Alcatel Status Word as enumerated in
105 * PE-MCXMAC Data Sheet IPD DS54 0210-1 (also IPD-DS80 0205-2)
106 *
107 * 0: hp hash pass
108 * 1: ipa IP checksum assist
109 * 2: ipp IP checksum pass
110 * 3: tcpa TCP checksum assist
111 * 4: tcpp TCP checksum pass
112 * 5: wol WOL Event
113 * 6: rxmac_error RXMAC Error Indicator
114 * 7: drop Drop packet
115 * 8: ft Frame Truncated
116 * 9: jp Jumbo Packet
117 * 10: vp VLAN Packet
118 * 11-15: unused
119 * 16: asw_prev_pkt_dropped e.g. IFG too small on previous
120 * 17: asw_RX_DV_event short receive event detected
121 * 18: asw_false_carrier_event bad carrier since last good packet
122 * 19: asw_code_err one or more nibbles signalled as errors
123 * 20: asw_CRC_err CRC error
124 * 21: asw_len_chk_err frame length field incorrect
125 * 22: asw_too_long frame length > 1518 bytes
126 * 23: asw_OK valid CRC + no code error
127 * 24: asw_multicast has a multicast address
128 * 25: asw_broadcast has a broadcast address
129 * 26: asw_dribble_nibble spurious bits after EOP
130 * 27: asw_control_frame is a control frame
131 * 28: asw_pause_frame is a pause frame
132 * 29: asw_unsupported_op unsupported OP code
133 * 30: asw_VLAN_tag VLAN tag detected
134 * 31: asw_long_evt Rx long event
135 *
136 * Word 1:
137 * 0-15: length length in bytes
138 * 16-25: bi Buffer Index
139 * 26-27: ri Ring Index
140 * 28-31: reserved
141 */
142
143struct pkt_stat_desc {
144 u32 word0;
145 u32 word1;
146};
147
148/* Typedefs for the RX DMA status word */
149
150/*
151 * rx status word 0 holds part of the status bits of the Rx DMA engine
152 * that get copied out to memory by the ET-1310. Word 0 is a 32 bit word
153 * which contains the Free Buffer ring 0 and 1 available offset.
154 *
155 * bit 0-9 FBR1 offset
156 * bit 10 Wrap flag for FBR1
157 * bit 16-25 FBR0 offset
158 * bit 26 Wrap flag for FBR0
159 */
160
161/*
162 * RXSTAT_WORD1_t structure holds part of the status bits of the Rx DMA engine
163 * that get copied out to memory by the ET-1310. Word 3 is a 32 bit word
164 * which contains the Packet Status Ring available offset.
165 *
166 * bit 0-15 reserved
167 * bit 16-27 PSRoffset
168 * bit 28 PSRwrap
169 * bit 29-31 unused
170 */
171
172/*
173 * struct rx_status_block is a structure representing the status of the Rx
174 * DMA engine it sits in free memory, and is pointed to by 0x101c / 0x1020
175 */
176struct rx_status_block {
177 u32 Word0;
178 u32 Word1;
179};
180
181/*
182 * Structure for look-up table holding free buffer ring pointers
183 */
184struct fbr_lookup {
185 void *virt[MAX_DESC_PER_RING_RX];
186 void *buffer1[MAX_DESC_PER_RING_RX];
187 void *buffer2[MAX_DESC_PER_RING_RX];
188 u32 bus_high[MAX_DESC_PER_RING_RX];
189 u32 bus_low[MAX_DESC_PER_RING_RX];
190};
191
192/*
193 * struct rx_ring is the ssructure representing the adaptor's local
194 * reference(s) to the rings
195 */
196struct rx_ring {
197#ifdef USE_FBR0
198 void *pFbr0RingVa;
199 dma_addr_t pFbr0RingPa;
200 void *Fbr0MemVa[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
201 dma_addr_t Fbr0MemPa[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
202 uint64_t Fbr0Realpa;
203 uint64_t Fbr0offset;
204 u32 local_Fbr0_full;
205 u32 Fbr0NumEntries;
206 u32 Fbr0BufferSize;
207#endif
208 void *pFbr1RingVa;
209 dma_addr_t pFbr1RingPa;
210 void *Fbr1MemVa[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
211 dma_addr_t Fbr1MemPa[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
212 uint64_t Fbr1Realpa;
213 uint64_t Fbr1offset;
214 struct fbr_lookup *fbr[2]; /* One per ring */
215 u32 local_Fbr1_full;
216 u32 Fbr1NumEntries;
217 u32 Fbr1BufferSize;
218
219 void *pPSRingVa;
220 dma_addr_t pPSRingPa;
221 u32 local_psr_full;
222 u32 PsrNumEntries;
223
224 struct rx_status_block *rx_status_block;
225 dma_addr_t rx_status_bus;
226
227 struct list_head RecvBufferPool;
228
229 /* RECV */
230 struct list_head RecvList;
231 u32 nReadyRecv;
232
233 u32 NumRfd;
234
235 bool UnfinishedReceives;
236
237 struct list_head RecvPacketPool;
238
239 /* lookaside lists */
240 struct kmem_cache *RecvLookaside;
241};
242
243#endif /* __ET1310_RX_H__ */
diff --git a/drivers/staging/et131x/et1310_tx.c b/drivers/staging/et131x/et1310_tx.c
new file mode 100644
index 00000000000..8fb3051fe28
--- /dev/null
+++ b/drivers/staging/et131x/et1310_tx.c
@@ -0,0 +1,797 @@
1/*
2 * Agere Systems Inc.
3 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
4 *
5 * Copyright © 2005 Agere Systems Inc.
6 * All rights reserved.
7 * http://www.agere.com
8 *
9 *------------------------------------------------------------------------------
10 *
11 * et1310_tx.c - Routines used to perform data transmission.
12 *
13 *------------------------------------------------------------------------------
14 *
15 * SOFTWARE LICENSE
16 *
17 * This software is provided subject to the following terms and conditions,
18 * which you should read carefully before using the software. Using this
19 * software indicates your acceptance of these terms and conditions. If you do
20 * not agree with these terms and conditions, do not use the software.
21 *
22 * Copyright © 2005 Agere Systems Inc.
23 * All rights reserved.
24 *
25 * Redistribution and use in source or binary forms, with or without
26 * modifications, are permitted provided that the following conditions are met:
27 *
28 * . Redistributions of source code must retain the above copyright notice, this
29 * list of conditions and the following Disclaimer as comments in the code as
30 * well as in the documentation and/or other materials provided with the
31 * distribution.
32 *
33 * . Redistributions in binary form must reproduce the above copyright notice,
34 * this list of conditions and the following Disclaimer in the documentation
35 * and/or other materials provided with the distribution.
36 *
37 * . Neither the name of Agere Systems Inc. nor the names of the contributors
38 * may be used to endorse or promote products derived from this software
39 * without specific prior written permission.
40 *
41 * Disclaimer
42 *
43 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
44 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
45 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
46 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
47 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
48 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
49 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
50 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
51 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
52 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
53 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
54 * DAMAGE.
55 *
56 */
57
58#include "et131x_version.h"
59#include "et131x_defs.h"
60
61#include <linux/pci.h>
62#include <linux/init.h>
63#include <linux/module.h>
64#include <linux/types.h>
65#include <linux/kernel.h>
66
67#include <linux/sched.h>
68#include <linux/ptrace.h>
69#include <linux/slab.h>
70#include <linux/ctype.h>
71#include <linux/string.h>
72#include <linux/timer.h>
73#include <linux/interrupt.h>
74#include <linux/in.h>
75#include <linux/delay.h>
76#include <linux/io.h>
77#include <linux/bitops.h>
78#include <asm/system.h>
79
80#include <linux/netdevice.h>
81#include <linux/etherdevice.h>
82#include <linux/skbuff.h>
83#include <linux/if_arp.h>
84#include <linux/ioport.h>
85
86#include "et1310_phy.h"
87#include "et131x_adapter.h"
88#include "et1310_tx.h"
89#include "et131x.h"
90
91static inline void et131x_free_send_packet(struct et131x_adapter *etdev,
92 struct tcb *tcb);
93static int et131x_send_packet(struct sk_buff *skb,
94 struct et131x_adapter *etdev);
95static int nic_send_packet(struct et131x_adapter *etdev, struct tcb *tcb);
96
97/**
98 * et131x_tx_dma_memory_alloc
99 * @adapter: pointer to our private adapter structure
100 *
101 * Returns 0 on success and errno on failure (as defined in errno.h).
102 *
103 * Allocates memory that will be visible both to the device and to the CPU.
104 * The OS will pass us packets, pointers to which we will insert in the Tx
105 * Descriptor queue. The device will read this queue to find the packets in
106 * memory. The device will update the "status" in memory each time it xmits a
107 * packet.
108 */
109int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
110{
111 int desc_size = 0;
112 struct tx_ring *tx_ring = &adapter->tx_ring;
113
114 /* Allocate memory for the TCB's (Transmit Control Block) */
115 adapter->tx_ring.tcb_ring =
116 kcalloc(NUM_TCB, sizeof(struct tcb), GFP_ATOMIC | GFP_DMA);
117 if (!adapter->tx_ring.tcb_ring) {
118 dev_err(&adapter->pdev->dev, "Cannot alloc memory for TCBs\n");
119 return -ENOMEM;
120 }
121
122 /* Allocate enough memory for the Tx descriptor ring, and allocate
123 * some extra so that the ring can be aligned on a 4k boundary.
124 */
125 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX) + 4096 - 1;
126 tx_ring->tx_desc_ring =
127 (struct tx_desc *) pci_alloc_consistent(adapter->pdev, desc_size,
128 &tx_ring->tx_desc_ring_pa);
129 if (!adapter->tx_ring.tx_desc_ring) {
130 dev_err(&adapter->pdev->dev,
131 "Cannot alloc memory for Tx Ring\n");
132 return -ENOMEM;
133 }
134
135 /* Save physical address
136 *
137 * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
138 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
139 * are ever returned, make sure the high part is retrieved here before
140 * storing the adjusted address.
141 */
142 /* Allocate memory for the Tx status block */
143 tx_ring->tx_status = pci_alloc_consistent(adapter->pdev,
144 sizeof(u32),
145 &tx_ring->tx_status_pa);
146 if (!adapter->tx_ring.tx_status_pa) {
147 dev_err(&adapter->pdev->dev,
148 "Cannot alloc memory for Tx status block\n");
149 return -ENOMEM;
150 }
151 return 0;
152}
153
154/**
155 * et131x_tx_dma_memory_free - Free all memory allocated within this module
156 * @adapter: pointer to our private adapter structure
157 *
158 * Returns 0 on success and errno on failure (as defined in errno.h).
159 */
160void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
161{
162 int desc_size = 0;
163
164 if (adapter->tx_ring.tx_desc_ring) {
165 /* Free memory relating to Tx rings here */
166 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX)
167 + 4096 - 1;
168 pci_free_consistent(adapter->pdev,
169 desc_size,
170 adapter->tx_ring.tx_desc_ring,
171 adapter->tx_ring.tx_desc_ring_pa);
172 adapter->tx_ring.tx_desc_ring = NULL;
173 }
174
175 /* Free memory for the Tx status block */
176 if (adapter->tx_ring.tx_status) {
177 pci_free_consistent(adapter->pdev,
178 sizeof(u32),
179 adapter->tx_ring.tx_status,
180 adapter->tx_ring.tx_status_pa);
181
182 adapter->tx_ring.tx_status = NULL;
183 }
184 /* Free the memory for the tcb structures */
185 kfree(adapter->tx_ring.tcb_ring);
186}
187
188/**
189 * ConfigTxDmaRegs - Set up the tx dma section of the JAGCore.
190 * @etdev: pointer to our private adapter structure
191 *
192 * Configure the transmit engine with the ring buffers we have created
193 * and prepare it for use.
194 */
195void ConfigTxDmaRegs(struct et131x_adapter *etdev)
196{
197 struct txdma_regs __iomem *txdma = &etdev->regs->txdma;
198
199 /* Load the hardware with the start of the transmit descriptor ring. */
200 writel((u32) ((u64)etdev->tx_ring.tx_desc_ring_pa >> 32),
201 &txdma->pr_base_hi);
202 writel((u32) etdev->tx_ring.tx_desc_ring_pa,
203 &txdma->pr_base_lo);
204
205 /* Initialise the transmit DMA engine */
206 writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des);
207
208 /* Load the completion writeback physical address */
209 writel((u32)((u64)etdev->tx_ring.tx_status_pa >> 32),
210 &txdma->dma_wb_base_hi);
211 writel((u32)etdev->tx_ring.tx_status_pa, &txdma->dma_wb_base_lo);
212
213 *etdev->tx_ring.tx_status = 0;
214
215 writel(0, &txdma->service_request);
216 etdev->tx_ring.send_idx = 0;
217}
218
219/**
220 * et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310
221 * @etdev: pointer to our adapter structure
222 */
223void et131x_tx_dma_disable(struct et131x_adapter *etdev)
224{
225 /* Setup the tramsmit dma configuration register */
226 writel(ET_TXDMA_CSR_HALT|ET_TXDMA_SNGL_EPKT,
227 &etdev->regs->txdma.csr);
228}
229
230/**
231 * et131x_tx_dma_enable - re-start of Tx_DMA on the ET1310.
232 * @etdev: pointer to our adapter structure
233 *
234 * Mainly used after a return to the D0 (full-power) state from a lower state.
235 */
236void et131x_tx_dma_enable(struct et131x_adapter *etdev)
237{
238 /* Setup the transmit dma configuration register for normal
239 * operation
240 */
241 writel(ET_TXDMA_SNGL_EPKT|(PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT),
242 &etdev->regs->txdma.csr);
243}
244
245/**
246 * et131x_init_send - Initialize send data structures
247 * @adapter: pointer to our private adapter structure
248 */
249void et131x_init_send(struct et131x_adapter *adapter)
250{
251 struct tcb *tcb;
252 u32 ct;
253 struct tx_ring *tx_ring;
254
255 /* Setup some convenience pointers */
256 tx_ring = &adapter->tx_ring;
257 tcb = adapter->tx_ring.tcb_ring;
258
259 tx_ring->tcb_qhead = tcb;
260
261 memset(tcb, 0, sizeof(struct tcb) * NUM_TCB);
262
263 /* Go through and set up each TCB */
264 for (ct = 0; ct++ < NUM_TCB; tcb++)
265 /* Set the link pointer in HW TCB to the next TCB in the
266 * chain
267 */
268 tcb->next = tcb + 1;
269
270 /* Set the tail pointer */
271 tcb--;
272 tx_ring->tcb_qtail = tcb;
273 tcb->next = NULL;
274 /* Curr send queue should now be empty */
275 tx_ring->send_head = NULL;
276 tx_ring->send_tail = NULL;
277}
278
279/**
280 * et131x_send_packets - This function is called by the OS to send packets
281 * @skb: the packet(s) to send
282 * @netdev:device on which to TX the above packet(s)
283 *
284 * Return 0 in almost all cases; non-zero value in extreme hard failure only
285 */
286int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
287{
288 int status = 0;
289 struct et131x_adapter *etdev = NULL;
290
291 etdev = netdev_priv(netdev);
292
293 /* Send these packets
294 *
295 * NOTE: The Linux Tx entry point is only given one packet at a time
296 * to Tx, so the PacketCount and it's array used makes no sense here
297 */
298
299 /* TCB is not available */
300 if (etdev->tx_ring.used >= NUM_TCB) {
301 /* NOTE: If there's an error on send, no need to queue the
302 * packet under Linux; if we just send an error up to the
303 * netif layer, it will resend the skb to us.
304 */
305 status = -ENOMEM;
306 } else {
307 /* We need to see if the link is up; if it's not, make the
308 * netif layer think we're good and drop the packet
309 */
310 if ((etdev->flags & fMP_ADAPTER_FAIL_SEND_MASK) ||
311 !netif_carrier_ok(netdev)) {
312 dev_kfree_skb_any(skb);
313 skb = NULL;
314
315 etdev->net_stats.tx_dropped++;
316 } else {
317 status = et131x_send_packet(skb, etdev);
318 if (status != 0 && status != -ENOMEM) {
319 /* On any other error, make netif think we're
320 * OK and drop the packet
321 */
322 dev_kfree_skb_any(skb);
323 skb = NULL;
324 etdev->net_stats.tx_dropped++;
325 }
326 }
327 }
328 return status;
329}
330
331/**
332 * et131x_send_packet - Do the work to send a packet
333 * @skb: the packet(s) to send
334 * @etdev: a pointer to the device's private adapter structure
335 *
336 * Return 0 in almost all cases; non-zero value in extreme hard failure only.
337 *
338 * Assumption: Send spinlock has been acquired
339 */
340static int et131x_send_packet(struct sk_buff *skb,
341 struct et131x_adapter *etdev)
342{
343 int status;
344 struct tcb *tcb = NULL;
345 u16 *shbufva;
346 unsigned long flags;
347
348 /* All packets must have at least a MAC address and a protocol type */
349 if (skb->len < ETH_HLEN)
350 return -EIO;
351
352 /* Get a TCB for this packet */
353 spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
354
355 tcb = etdev->tx_ring.tcb_qhead;
356
357 if (tcb == NULL) {
358 spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
359 return -ENOMEM;
360 }
361
362 etdev->tx_ring.tcb_qhead = tcb->next;
363
364 if (etdev->tx_ring.tcb_qhead == NULL)
365 etdev->tx_ring.tcb_qtail = NULL;
366
367 spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
368
369 tcb->skb = skb;
370
371 if (skb->data != NULL && skb->len - skb->data_len >= 6) {
372 shbufva = (u16 *) skb->data;
373
374 if ((shbufva[0] == 0xffff) &&
375 (shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) {
376 tcb->flags |= fMP_DEST_BROAD;
377 } else if ((shbufva[0] & 0x3) == 0x0001) {
378 tcb->flags |= fMP_DEST_MULTI;
379 }
380 }
381
382 tcb->next = NULL;
383
384 /* Call the NIC specific send handler. */
385 status = nic_send_packet(etdev, tcb);
386
387 if (status != 0) {
388 spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
389
390 if (etdev->tx_ring.tcb_qtail)
391 etdev->tx_ring.tcb_qtail->next = tcb;
392 else
393 /* Apparently ready Q is empty. */
394 etdev->tx_ring.tcb_qhead = tcb;
395
396 etdev->tx_ring.tcb_qtail = tcb;
397 spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
398 return status;
399 }
400 WARN_ON(etdev->tx_ring.used > NUM_TCB);
401 return 0;
402}
403
404/**
405 * nic_send_packet - NIC specific send handler for version B silicon.
406 * @etdev: pointer to our adapter
407 * @tcb: pointer to struct tcb
408 *
409 * Returns 0 or errno.
410 */
411static int nic_send_packet(struct et131x_adapter *etdev, struct tcb *tcb)
412{
413 u32 i;
414 struct tx_desc desc[24]; /* 24 x 16 byte */
415 u32 frag = 0;
416 u32 thiscopy, remainder;
417 struct sk_buff *skb = tcb->skb;
418 u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
419 struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0];
420 unsigned long flags;
421
422 /* Part of the optimizations of this send routine restrict us to
423 * sending 24 fragments at a pass. In practice we should never see
424 * more than 5 fragments.
425 *
426 * NOTE: The older version of this function (below) can handle any
427 * number of fragments. If needed, we can call this function,
428 * although it is less efficient.
429 */
430 if (nr_frags > 23)
431 return -EIO;
432
433 memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1));
434
435 for (i = 0; i < nr_frags; i++) {
436 /* If there is something in this element, lets get a
437 * descriptor from the ring and get the necessary data
438 */
439 if (i == 0) {
440 /* If the fragments are smaller than a standard MTU,
441 * then map them to a single descriptor in the Tx
442 * Desc ring. However, if they're larger, as is
443 * possible with support for jumbo packets, then
444 * split them each across 2 descriptors.
445 *
446 * This will work until we determine why the hardware
447 * doesn't seem to like large fragments.
448 */
449 if ((skb->len - skb->data_len) <= 1514) {
450 desc[frag].addr_hi = 0;
451 /* Low 16bits are length, high is vlan and
452 unused currently so zero */
453 desc[frag].len_vlan =
454 skb->len - skb->data_len;
455
456 /* NOTE: Here, the dma_addr_t returned from
457 * pci_map_single() is implicitly cast as a
458 * u32. Although dma_addr_t can be
459 * 64-bit, the address returned by
460 * pci_map_single() is always 32-bit
461 * addressable (as defined by the pci/dma
462 * subsystem)
463 */
464 desc[frag++].addr_lo =
465 pci_map_single(etdev->pdev,
466 skb->data,
467 skb->len -
468 skb->data_len,
469 PCI_DMA_TODEVICE);
470 } else {
471 desc[frag].addr_hi = 0;
472 desc[frag].len_vlan =
473 (skb->len - skb->data_len) / 2;
474
475 /* NOTE: Here, the dma_addr_t returned from
476 * pci_map_single() is implicitly cast as a
477 * u32. Although dma_addr_t can be
478 * 64-bit, the address returned by
479 * pci_map_single() is always 32-bit
480 * addressable (as defined by the pci/dma
481 * subsystem)
482 */
483 desc[frag++].addr_lo =
484 pci_map_single(etdev->pdev,
485 skb->data,
486 ((skb->len -
487 skb->data_len) / 2),
488 PCI_DMA_TODEVICE);
489 desc[frag].addr_hi = 0;
490
491 desc[frag].len_vlan =
492 (skb->len - skb->data_len) / 2;
493
494 /* NOTE: Here, the dma_addr_t returned from
495 * pci_map_single() is implicitly cast as a
496 * u32. Although dma_addr_t can be
497 * 64-bit, the address returned by
498 * pci_map_single() is always 32-bit
499 * addressable (as defined by the pci/dma
500 * subsystem)
501 */
502 desc[frag++].addr_lo =
503 pci_map_single(etdev->pdev,
504 skb->data +
505 ((skb->len -
506 skb->data_len) / 2),
507 ((skb->len -
508 skb->data_len) / 2),
509 PCI_DMA_TODEVICE);
510 }
511 } else {
512 desc[frag].addr_hi = 0;
513 desc[frag].len_vlan =
514 frags[i - 1].size;
515
516 /* NOTE: Here, the dma_addr_t returned from
517 * pci_map_page() is implicitly cast as a u32.
518 * Although dma_addr_t can be 64-bit, the address
519 * returned by pci_map_page() is always 32-bit
520 * addressable (as defined by the pci/dma subsystem)
521 */
522 desc[frag++].addr_lo =
523 pci_map_page(etdev->pdev,
524 frags[i - 1].page,
525 frags[i - 1].page_offset,
526 frags[i - 1].size,
527 PCI_DMA_TODEVICE);
528 }
529 }
530
531 if (frag == 0)
532 return -EIO;
533
534 if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) {
535 if (++etdev->tx_ring.since_irq == PARM_TX_NUM_BUFS_DEF) {
536 /* Last element & Interrupt flag */
537 desc[frag - 1].flags = 0x5;
538 etdev->tx_ring.since_irq = 0;
539 } else { /* Last element */
540 desc[frag - 1].flags = 0x1;
541 }
542 } else
543 desc[frag - 1].flags = 0x5;
544
545 desc[0].flags |= 2; /* First element flag */
546
547 tcb->index_start = etdev->tx_ring.send_idx;
548 tcb->stale = 0;
549
550 spin_lock_irqsave(&etdev->send_hw_lock, flags);
551
552 thiscopy = NUM_DESC_PER_RING_TX -
553 INDEX10(etdev->tx_ring.send_idx);
554
555 if (thiscopy >= frag) {
556 remainder = 0;
557 thiscopy = frag;
558 } else {
559 remainder = frag - thiscopy;
560 }
561
562 memcpy(etdev->tx_ring.tx_desc_ring +
563 INDEX10(etdev->tx_ring.send_idx), desc,
564 sizeof(struct tx_desc) * thiscopy);
565
566 add_10bit(&etdev->tx_ring.send_idx, thiscopy);
567
568 if (INDEX10(etdev->tx_ring.send_idx) == 0 ||
569 INDEX10(etdev->tx_ring.send_idx) == NUM_DESC_PER_RING_TX) {
570 etdev->tx_ring.send_idx &= ~ET_DMA10_MASK;
571 etdev->tx_ring.send_idx ^= ET_DMA10_WRAP;
572 }
573
574 if (remainder) {
575 memcpy(etdev->tx_ring.tx_desc_ring,
576 desc + thiscopy,
577 sizeof(struct tx_desc) * remainder);
578
579 add_10bit(&etdev->tx_ring.send_idx, remainder);
580 }
581
582 if (INDEX10(etdev->tx_ring.send_idx) == 0) {
583 if (etdev->tx_ring.send_idx)
584 tcb->index = NUM_DESC_PER_RING_TX - 1;
585 else
586 tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1);
587 } else
588 tcb->index = etdev->tx_ring.send_idx - 1;
589
590 spin_lock(&etdev->TCBSendQLock);
591
592 if (etdev->tx_ring.send_tail)
593 etdev->tx_ring.send_tail->next = tcb;
594 else
595 etdev->tx_ring.send_head = tcb;
596
597 etdev->tx_ring.send_tail = tcb;
598
599 WARN_ON(tcb->next != NULL);
600
601 etdev->tx_ring.used++;
602
603 spin_unlock(&etdev->TCBSendQLock);
604
605 /* Write the new write pointer back to the device. */
606 writel(etdev->tx_ring.send_idx,
607 &etdev->regs->txdma.service_request);
608
609 /* For Gig only, we use Tx Interrupt coalescing. Enable the software
610 * timer to wake us up if this packet isn't followed by N more.
611 */
612 if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) {
613 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
614 &etdev->regs->global.watchdog_timer);
615 }
616 spin_unlock_irqrestore(&etdev->send_hw_lock, flags);
617
618 return 0;
619}
620
621
622/**
623 * et131x_free_send_packet - Recycle a struct tcb
624 * @etdev: pointer to our adapter
625 * @tcb: pointer to struct tcb
626 *
627 * Complete the packet if necessary
628 * Assumption - Send spinlock has been acquired
629 */
630inline void et131x_free_send_packet(struct et131x_adapter *etdev,
631 struct tcb *tcb)
632{
633 unsigned long flags;
634 struct tx_desc *desc = NULL;
635 struct net_device_stats *stats = &etdev->net_stats;
636
637 if (tcb->flags & fMP_DEST_BROAD)
638 atomic_inc(&etdev->stats.brdcstxmt);
639 else if (tcb->flags & fMP_DEST_MULTI)
640 atomic_inc(&etdev->stats.multixmt);
641 else
642 atomic_inc(&etdev->stats.unixmt);
643
644 if (tcb->skb) {
645 stats->tx_bytes += tcb->skb->len;
646
647 /* Iterate through the TX descriptors on the ring
648 * corresponding to this packet and umap the fragments
649 * they point to
650 */
651 do {
652 desc = (struct tx_desc *)(etdev->tx_ring.tx_desc_ring +
653 INDEX10(tcb->index_start));
654
655 pci_unmap_single(etdev->pdev,
656 desc->addr_lo,
657 desc->len_vlan, PCI_DMA_TODEVICE);
658
659 add_10bit(&tcb->index_start, 1);
660 if (INDEX10(tcb->index_start) >=
661 NUM_DESC_PER_RING_TX) {
662 tcb->index_start &= ~ET_DMA10_MASK;
663 tcb->index_start ^= ET_DMA10_WRAP;
664 }
665 } while (desc != (etdev->tx_ring.tx_desc_ring +
666 INDEX10(tcb->index)));
667
668 dev_kfree_skb_any(tcb->skb);
669 }
670
671 memset(tcb, 0, sizeof(struct tcb));
672
673 /* Add the TCB to the Ready Q */
674 spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
675
676 etdev->net_stats.tx_packets++;
677
678 if (etdev->tx_ring.tcb_qtail)
679 etdev->tx_ring.tcb_qtail->next = tcb;
680 else
681 /* Apparently ready Q is empty. */
682 etdev->tx_ring.tcb_qhead = tcb;
683
684 etdev->tx_ring.tcb_qtail = tcb;
685
686 spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
687 WARN_ON(etdev->tx_ring.used < 0);
688}
689
690/**
691 * et131x_free_busy_send_packets - Free and complete the stopped active sends
692 * @etdev: pointer to our adapter
693 *
694 * Assumption - Send spinlock has been acquired
695 */
696void et131x_free_busy_send_packets(struct et131x_adapter *etdev)
697{
698 struct tcb *tcb;
699 unsigned long flags;
700 u32 freed = 0;
701
702 /* Any packets being sent? Check the first TCB on the send list */
703 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
704
705 tcb = etdev->tx_ring.send_head;
706
707 while (tcb != NULL && freed < NUM_TCB) {
708 struct tcb *next = tcb->next;
709
710 etdev->tx_ring.send_head = next;
711
712 if (next == NULL)
713 etdev->tx_ring.send_tail = NULL;
714
715 etdev->tx_ring.used--;
716
717 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
718
719 freed++;
720 et131x_free_send_packet(etdev, tcb);
721
722 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
723
724 tcb = etdev->tx_ring.send_head;
725 }
726
727 WARN_ON(freed == NUM_TCB);
728
729 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
730
731 etdev->tx_ring.used = 0;
732}
733
734/**
735 * et131x_handle_send_interrupt - Interrupt handler for sending processing
736 * @etdev: pointer to our adapter
737 *
738 * Re-claim the send resources, complete sends and get more to send from
739 * the send wait queue.
740 *
741 * Assumption - Send spinlock has been acquired
742 */
743void et131x_handle_send_interrupt(struct et131x_adapter *etdev)
744{
745 unsigned long flags;
746 u32 serviced;
747 struct tcb *tcb;
748 u32 index;
749
750 serviced = readl(&etdev->regs->txdma.new_service_complete);
751 index = INDEX10(serviced);
752
753 /* Has the ring wrapped? Process any descriptors that do not have
754 * the same "wrap" indicator as the current completion indicator
755 */
756 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
757
758 tcb = etdev->tx_ring.send_head;
759
760 while (tcb &&
761 ((serviced ^ tcb->index) & ET_DMA10_WRAP) &&
762 index < INDEX10(tcb->index)) {
763 etdev->tx_ring.used--;
764 etdev->tx_ring.send_head = tcb->next;
765 if (tcb->next == NULL)
766 etdev->tx_ring.send_tail = NULL;
767
768 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
769 et131x_free_send_packet(etdev, tcb);
770 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
771
772 /* Goto the next packet */
773 tcb = etdev->tx_ring.send_head;
774 }
775 while (tcb &&
776 !((serviced ^ tcb->index) & ET_DMA10_WRAP)
777 && index > (tcb->index & ET_DMA10_MASK)) {
778 etdev->tx_ring.used--;
779 etdev->tx_ring.send_head = tcb->next;
780 if (tcb->next == NULL)
781 etdev->tx_ring.send_tail = NULL;
782
783 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
784 et131x_free_send_packet(etdev, tcb);
785 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
786
787 /* Goto the next packet */
788 tcb = etdev->tx_ring.send_head;
789 }
790
791 /* Wake up the queue when we hit a low-water mark */
792 if (etdev->tx_ring.used <= NUM_TCB / 3)
793 netif_wake_queue(etdev->netdev);
794
795 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
796}
797
diff --git a/drivers/staging/et131x/et1310_tx.h b/drivers/staging/et131x/et1310_tx.h
new file mode 100644
index 00000000000..82d06e9870d
--- /dev/null
+++ b/drivers/staging/et131x/et1310_tx.h
@@ -0,0 +1,150 @@
1/*
2 * Agere Systems Inc.
3 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
4 *
5 * Copyright © 2005 Agere Systems Inc.
6 * All rights reserved.
7 * http://www.agere.com
8 *
9 *------------------------------------------------------------------------------
10 *
11 * et1310_tx.h - Defines, structs, enums, prototypes, etc. pertaining to data
12 * transmission.
13 *
14 *------------------------------------------------------------------------------
15 *
16 * SOFTWARE LICENSE
17 *
18 * This software is provided subject to the following terms and conditions,
19 * which you should read carefully before using the software. Using this
20 * software indicates your acceptance of these terms and conditions. If you do
21 * not agree with these terms and conditions, do not use the software.
22 *
23 * Copyright © 2005 Agere Systems Inc.
24 * All rights reserved.
25 *
26 * Redistribution and use in source or binary forms, with or without
27 * modifications, are permitted provided that the following conditions are met:
28 *
29 * . Redistributions of source code must retain the above copyright notice, this
30 * list of conditions and the following Disclaimer as comments in the code as
31 * well as in the documentation and/or other materials provided with the
32 * distribution.
33 *
34 * . Redistributions in binary form must reproduce the above copyright notice,
35 * this list of conditions and the following Disclaimer in the documentation
36 * and/or other materials provided with the distribution.
37 *
38 * . Neither the name of Agere Systems Inc. nor the names of the contributors
39 * may be used to endorse or promote products derived from this software
40 * without specific prior written permission.
41 *
42 * Disclaimer
43 *
44 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
45 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
46 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
47 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
48 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
49 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
50 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
51 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
52 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
54 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
55 * DAMAGE.
56 *
57 */
58
59#ifndef __ET1310_TX_H__
60#define __ET1310_TX_H__
61
62
63/* Typedefs for Tx Descriptor Ring */
64
65/*
66 * word 2 of the control bits in the Tx Descriptor ring for the ET-1310
67 *
68 * 0-15: length of packet
69 * 16-27: VLAN tag
70 * 28: VLAN CFI
71 * 29-31: VLAN priority
72 *
73 * word 3 of the control bits in the Tx Descriptor ring for the ET-1310
74 *
75 * 0: last packet in the sequence
76 * 1: first packet in the sequence
77 * 2: interrupt the processor when this pkt sent
78 * 3: Control word - no packet data
79 * 4: Issue half-duplex backpressure : XON/XOFF
80 * 5: send pause frame
81 * 6: Tx frame has error
82 * 7: append CRC
83 * 8: MAC override
84 * 9: pad packet
85 * 10: Packet is a Huge packet
86 * 11: append VLAN tag
87 * 12: IP checksum assist
88 * 13: TCP checksum assist
89 * 14: UDP checksum assist
90 */
91
92/* struct tx_desc represents each descriptor on the ring */
93struct tx_desc {
94 u32 addr_hi;
95 u32 addr_lo;
96 u32 len_vlan; /* control words how to xmit the */
97 u32 flags; /* data (detailed above) */
98};
99
100/*
101 * The status of the Tx DMA engine it sits in free memory, and is pointed to
102 * by 0x101c / 0x1020. This is a DMA10 type
103 */
104
105/* TCB (Transmit Control Block: Host Side) */
106struct tcb {
107 struct tcb *next; /* Next entry in ring */
108 u32 flags; /* Our flags for the packet */
109 u32 count; /* Used to spot stuck/lost packets */
110 u32 stale; /* Used to spot stuck/lost packets */
111 struct sk_buff *skb; /* Network skb we are tied to */
112 u32 index; /* Ring indexes */
113 u32 index_start;
114};
115
116/* Structure representing our local reference(s) to the ring */
117struct tx_ring {
118 /* TCB (Transmit Control Block) memory and lists */
119 struct tcb *tcb_ring;
120
121 /* List of TCBs that are ready to be used */
122 struct tcb *tcb_qhead;
123 struct tcb *tcb_qtail;
124
125 /* list of TCBs that are currently being sent. NOTE that access to all
126 * three of these (including used) are controlled via the
127 * TCBSendQLock. This lock should be secured prior to incementing /
128 * decrementing used, or any queue manipulation on send_head /
129 * tail
130 */
131 struct tcb *send_head;
132 struct tcb *send_tail;
133 int used;
134
135 /* The actual descriptor ring */
136 struct tx_desc *tx_desc_ring;
137 dma_addr_t tx_desc_ring_pa;
138
139 /* send_idx indicates where we last wrote to in the descriptor ring. */
140 u32 send_idx;
141
142 /* The location of the write-back status block */
143 u32 *tx_status;
144 dma_addr_t tx_status_pa;
145
146 /* Packets since the last IRQ: used for interrupt coalescing */
147 int since_irq;
148};
149
150#endif /* __ET1310_TX_H__ */
diff --git a/drivers/staging/et131x/et131x_adapter.h b/drivers/staging/et131x/et131x_adapter.h
new file mode 100644
index 00000000000..408c50ba4f2
--- /dev/null
+++ b/drivers/staging/et131x/et131x_adapter.h
@@ -0,0 +1,243 @@
1/*
2 * Agere Systems Inc.
3 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
4 *
5 * Copyright © 2005 Agere Systems Inc.
6 * All rights reserved.
7 * http://www.agere.com
8 *
9 *------------------------------------------------------------------------------
10 *
11 * et131x_adapter.h - Header which includes the private adapter structure, along
12 * with related support structures, macros, definitions, etc.
13 *
14 *------------------------------------------------------------------------------
15 *
16 * SOFTWARE LICENSE
17 *
18 * This software is provided subject to the following terms and conditions,
19 * which you should read carefully before using the software. Using this
20 * software indicates your acceptance of these terms and conditions. If you do
21 * not agree with these terms and conditions, do not use the software.
22 *
23 * Copyright © 2005 Agere Systems Inc.
24 * All rights reserved.
25 *
26 * Redistribution and use in source or binary forms, with or without
27 * modifications, are permitted provided that the following conditions are met:
28 *
29 * . Redistributions of source code must retain the above copyright notice, this
30 * list of conditions and the following Disclaimer as comments in the code as
31 * well as in the documentation and/or other materials provided with the
32 * distribution.
33 *
34 * . Redistributions in binary form must reproduce the above copyright notice,
35 * this list of conditions and the following Disclaimer in the documentation
36 * and/or other materials provided with the distribution.
37 *
38 * . Neither the name of Agere Systems Inc. nor the names of the contributors
39 * may be used to endorse or promote products derived from this software
40 * without specific prior written permission.
41 *
42 * Disclaimer
43 *
44 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
45 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
46 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
47 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
48 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
49 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
50 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
51 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
52 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
54 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
55 * DAMAGE.
56 *
57 */
58
59#ifndef __ET131X_ADAPTER_H__
60#define __ET131X_ADAPTER_H__
61
62#include "et1310_address_map.h"
63#include "et1310_tx.h"
64#include "et1310_rx.h"
65
66/*
67 * Do not change these values: if changed, then change also in respective
68 * TXdma and Rxdma engines
69 */
70#define NUM_DESC_PER_RING_TX 512 /* TX Do not change these values */
71#define NUM_TCB 64
72
73/*
74 * These values are all superseded by registry entries to facilitate tuning.
75 * Once the desired performance has been achieved, the optimal registry values
76 * should be re-populated to these #defines:
77 */
78#define NUM_TRAFFIC_CLASSES 1
79
80#define TX_ERROR_PERIOD 1000
81
82#define LO_MARK_PERCENT_FOR_PSR 15
83#define LO_MARK_PERCENT_FOR_RX 15
84
85/* RFD (Receive Frame Descriptor) */
86struct rfd {
87 struct list_head list_node;
88 struct sk_buff *skb;
89 u32 len; /* total size of receive frame */
90 u16 bufferindex;
91 u8 ringindex;
92};
93
94/* Flow Control */
95#define FLOW_BOTH 0
96#define FLOW_TXONLY 1
97#define FLOW_RXONLY 2
98#define FLOW_NONE 3
99
100/* Struct to define some device statistics */
101struct ce_stats {
102 /* MIB II variables
103 *
104 * NOTE: atomic_t types are only guaranteed to store 24-bits; if we
105 * MUST have 32, then we'll need another way to perform atomic
106 * operations
107 */
108 u32 unircv; /* # multicast packets received */
109 atomic_t unixmt; /* # multicast packets for Tx */
110 u32 multircv; /* # multicast packets received */
111 atomic_t multixmt; /* # multicast packets for Tx */
112 u32 brdcstrcv; /* # broadcast packets received */
113 atomic_t brdcstxmt; /* # broadcast packets for Tx */
114 u32 norcvbuf; /* # Rx packets discarded */
115 u32 noxmtbuf; /* # Tx packets discarded */
116
117 /* Transceiver state informations. */
118 u8 xcvr_addr;
119 u32 xcvr_id;
120
121 /* Tx Statistics. */
122 u32 tx_uflo; /* Tx Underruns */
123
124 u32 collisions;
125 u32 excessive_collisions;
126 u32 first_collision;
127 u32 late_collisions;
128 u32 max_pkt_error;
129 u32 tx_deferred;
130
131 /* Rx Statistics. */
132 u32 rx_ov_flow; /* Rx Overflow */
133
134 u32 length_err;
135 u32 alignment_err;
136 u32 crc_err;
137 u32 code_violations;
138 u32 other_errors;
139
140 u32 SynchrounousIterations;
141 u32 InterruptStatus;
142};
143
144
145/* The private adapter structure */
146struct et131x_adapter {
147 struct net_device *netdev;
148 struct pci_dev *pdev;
149
150 struct work_struct task;
151
152 /* Flags that indicate current state of the adapter */
153 u32 flags;
154 u32 HwErrCount;
155
156 /* Configuration */
157 u8 rom_addr[ETH_ALEN];
158 u8 addr[ETH_ALEN];
159 bool has_eeprom;
160 u8 eeprom_data[2];
161
162 /* Spinlocks */
163 spinlock_t Lock;
164
165 spinlock_t TCBSendQLock;
166 spinlock_t TCBReadyQLock;
167 spinlock_t send_hw_lock;
168
169 spinlock_t rcv_lock;
170 spinlock_t RcvPendLock;
171 spinlock_t FbrLock;
172
173 spinlock_t PHYLock;
174
175 /* Packet Filter and look ahead size */
176 u32 PacketFilter;
177 u32 linkspeed;
178 u32 duplex_mode;
179
180 /* multicast list */
181 u32 MCAddressCount;
182 u8 MCList[NIC_MAX_MCAST_LIST][ETH_ALEN];
183
184 /* Pointer to the device's PCI register space */
185 struct address_map __iomem *regs;
186
187 /* Registry parameters */
188 u8 SpeedDuplex; /* speed/duplex */
189 u8 wanted_flow; /* Flow we want for 802.3x flow control */
190 u8 RegistryPhyComa; /* Phy Coma mode enable/disable */
191
192 u32 RegistryRxMemEnd; /* Size of internal rx memory */
193 u32 RegistryJumboPacket; /* Max supported ethernet packet size */
194
195
196 /* Derived from the registry: */
197 u8 AiForceDpx; /* duplex setting */
198 u16 AiForceSpeed; /* 'Speed', user over-ride of line speed */
199 u8 flowcontrol; /* flow control validated by the far-end */
200 enum {
201 NETIF_STATUS_INVALID = 0,
202 NETIF_STATUS_MEDIA_CONNECT,
203 NETIF_STATUS_MEDIA_DISCONNECT,
204 NETIF_STATUS_MAX
205 } MediaState;
206
207 /* Minimize init-time */
208 struct timer_list ErrorTimer;
209
210 /* variable putting the phy into coma mode when boot up with no cable
211 * plugged in after 5 seconds
212 */
213 u8 boot_coma;
214
215 /* Next two used to save power information at power down. This
216 * information will be used during power up to set up parts of Power
217 * Management in JAGCore
218 */
219 u16 pdown_speed;
220 u8 pdown_duplex;
221
222 u32 CachedMaskValue;
223
224 /* Xcvr status at last poll */
225 u16 bmsr;
226
227 /* Tx Memory Variables */
228 struct tx_ring tx_ring;
229
230 /* Rx Memory Variables */
231 struct rx_ring rx_ring;
232
233 /* Loopback specifics */
234 u8 ReplicaPhyLoopbk; /* Replica Enable */
235 u8 ReplicaPhyLoopbkPF; /* Replica Enable Pass/Fail */
236
237 /* Stats */
238 struct ce_stats stats;
239
240 struct net_device_stats net_stats;
241};
242
243#endif /* __ET131X_ADAPTER_H__ */
diff --git a/drivers/staging/et131x/et131x_defs.h b/drivers/staging/et131x/et131x_defs.h
new file mode 100644
index 00000000000..3d5193fdb00
--- /dev/null
+++ b/drivers/staging/et131x/et131x_defs.h
@@ -0,0 +1,126 @@
1/*
2 * Agere Systems Inc.
3 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
4 *
5 * Copyright © 2005 Agere Systems Inc.
6 * All rights reserved.
7 * http://www.agere.com
8 *
9 *------------------------------------------------------------------------------
10 *
11 * et131x_defs.h - Defines, structs, enums, prototypes, etc. to assist with OS
12 * compatibility
13 *
14 *------------------------------------------------------------------------------
15 *
16 * SOFTWARE LICENSE
17 *
18 * This software is provided subject to the following terms and conditions,
19 * which you should read carefully before using the software. Using this
20 * software indicates your acceptance of these terms and conditions. If you do
21 * not agree with these terms and conditions, do not use the software.
22 *
23 * Copyright © 2005 Agere Systems Inc.
24 * All rights reserved.
25 *
26 * Redistribution and use in source or binary forms, with or without
27 * modifications, are permitted provided that the following conditions are met:
28 *
29 * . Redistributions of source code must retain the above copyright notice, this
30 * list of conditions and the following Disclaimer as comments in the code as
31 * well as in the documentation and/or other materials provided with the
32 * distribution.
33 *
34 * . Redistributions in binary form must reproduce the above copyright notice,
35 * this list of conditions and the following Disclaimer in the documentation
36 * and/or other materials provided with the distribution.
37 *
38 * . Neither the name of Agere Systems Inc. nor the names of the contributors
39 * may be used to endorse or promote products derived from this software
40 * without specific prior written permission.
41 *
42 * Disclaimer
43 *
44 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
45 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
46 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
47 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
48 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
49 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
50 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
51 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
52 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
54 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
55 * DAMAGE.
56 *
57 */
58
59#ifndef __ET131X_DEFS_H__
60#define __ET131X_DEFS_H__
61
62/* Packet and header sizes */
63#define NIC_MIN_PACKET_SIZE 60
64
65/* Multicast list size */
66#define NIC_MAX_MCAST_LIST 128
67
68/* Supported Filters */
69#define ET131X_PACKET_TYPE_DIRECTED 0x0001
70#define ET131X_PACKET_TYPE_MULTICAST 0x0002
71#define ET131X_PACKET_TYPE_BROADCAST 0x0004
72#define ET131X_PACKET_TYPE_PROMISCUOUS 0x0008
73#define ET131X_PACKET_TYPE_ALL_MULTICAST 0x0010
74
75/* Tx Timeout */
76#define ET131X_TX_TIMEOUT (1 * HZ)
77#define NIC_SEND_HANG_THRESHOLD 0
78
79/* MP_TCB flags */
80#define fMP_DEST_MULTI 0x00000001
81#define fMP_DEST_BROAD 0x00000002
82
83/* MP_ADAPTER flags */
84#define fMP_ADAPTER_RECV_LOOKASIDE 0x00000004
85#define fMP_ADAPTER_INTERRUPT_IN_USE 0x00000008
86#define fMP_ADAPTER_SECONDARY 0x00000010
87
88/* MP_SHARED flags */
89#define fMP_ADAPTER_SHUTDOWN 0x00100000
90#define fMP_ADAPTER_LOWER_POWER 0x00200000
91
92#define fMP_ADAPTER_NON_RECOVER_ERROR 0x00800000
93#define fMP_ADAPTER_RESET_IN_PROGRESS 0x01000000
94#define fMP_ADAPTER_NO_CABLE 0x02000000
95#define fMP_ADAPTER_HARDWARE_ERROR 0x04000000
96#define fMP_ADAPTER_REMOVE_IN_PROGRESS 0x08000000
97#define fMP_ADAPTER_HALT_IN_PROGRESS 0x10000000
98
99#define fMP_ADAPTER_FAIL_SEND_MASK 0x3ff00000
100#define fMP_ADAPTER_NOT_READY_MASK 0x3ff00000
101
102/* Some offsets in PCI config space that are actually used. */
103#define ET1310_PCI_MAX_PYLD 0x4C
104#define ET1310_PCI_MAC_ADDRESS 0xA4
105#define ET1310_PCI_EEPROM_STATUS 0xB2
106#define ET1310_PCI_ACK_NACK 0xC0
107#define ET1310_PCI_REPLAY 0xC2
108#define ET1310_PCI_L0L1LATENCY 0xCF
109
110/* PCI Vendor/Product IDs */
111#define ET131X_PCI_VENDOR_ID 0x11C1 /* Agere Systems */
112#define ET131X_PCI_DEVICE_ID_GIG 0xED00 /* ET1310 1000 Base-T 8 */
113#define ET131X_PCI_DEVICE_ID_FAST 0xED01 /* ET1310 100 Base-T */
114
115/* Define order of magnitude converter */
116#define NANO_IN_A_MICRO 1000
117
118#define PARM_RX_NUM_BUFS_DEF 4
119#define PARM_RX_TIME_INT_DEF 10
120#define PARM_RX_MEM_END_DEF 0x2bc
121#define PARM_TX_TIME_INT_DEF 40
122#define PARM_TX_NUM_BUFS_DEF 4
123#define PARM_DMA_CACHE_DEF 0
124
125
126#endif /* __ET131X_DEFS_H__ */
diff --git a/drivers/staging/et131x/et131x_initpci.c b/drivers/staging/et131x/et131x_initpci.c
new file mode 100644
index 00000000000..8c8d6b87a25
--- /dev/null
+++ b/drivers/staging/et131x/et131x_initpci.c
@@ -0,0 +1,848 @@
1/*
2 * Agere Systems Inc.
3 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
4 *
5 * Copyright © 2005 Agere Systems Inc.
6 * All rights reserved.
7 * http://www.agere.com
8 *
9 *------------------------------------------------------------------------------
10 *
11 * et131x_initpci.c - Routines and data used to register the driver with the
12 * PCI (and PCI Express) subsystem, as well as basic driver
13 * init and startup.
14 *
15 *------------------------------------------------------------------------------
16 *
17 * SOFTWARE LICENSE
18 *
19 * This software is provided subject to the following terms and conditions,
20 * which you should read carefully before using the software. Using this
21 * software indicates your acceptance of these terms and conditions. If you do
22 * not agree with these terms and conditions, do not use the software.
23 *
24 * Copyright © 2005 Agere Systems Inc.
25 * All rights reserved.
26 *
27 * Redistribution and use in source or binary forms, with or without
28 * modifications, are permitted provided that the following conditions are met:
29 *
30 * . Redistributions of source code must retain the above copyright notice, this
31 * list of conditions and the following Disclaimer as comments in the code as
32 * well as in the documentation and/or other materials provided with the
33 * distribution.
34 *
35 * . Redistributions in binary form must reproduce the above copyright notice,
36 * this list of conditions and the following Disclaimer in the documentation
37 * and/or other materials provided with the distribution.
38 *
39 * . Neither the name of Agere Systems Inc. nor the names of the contributors
40 * may be used to endorse or promote products derived from this software
41 * without specific prior written permission.
42 *
43 * Disclaimer
44 *
45 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
46 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
47 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
48 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
49 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
50 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
51 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
52 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
53 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
54 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
55 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
56 * DAMAGE.
57 *
58 */
59
60#include "et131x_version.h"
61#include "et131x_defs.h"
62
63#include <linux/pci.h>
64#include <linux/init.h>
65#include <linux/module.h>
66#include <linux/types.h>
67#include <linux/kernel.h>
68
69#include <linux/sched.h>
70#include <linux/ptrace.h>
71#include <linux/ctype.h>
72#include <linux/string.h>
73#include <linux/timer.h>
74#include <linux/interrupt.h>
75#include <linux/in.h>
76#include <linux/delay.h>
77#include <linux/io.h>
78#include <linux/bitops.h>
79#include <asm/system.h>
80
81#include <linux/netdevice.h>
82#include <linux/etherdevice.h>
83#include <linux/skbuff.h>
84#include <linux/if_arp.h>
85#include <linux/ioport.h>
86#include <linux/random.h>
87
88#include "et1310_phy.h"
89
90#include "et131x_adapter.h"
91
92#include "et1310_address_map.h"
93#include "et1310_tx.h"
94#include "et1310_rx.h"
95#include "et131x.h"
96
97#define INTERNAL_MEM_SIZE 0x400 /* 1024 of internal memory */
98#define INTERNAL_MEM_RX_OFFSET 0x1FF /* 50% Tx, 50% Rx */
99
100/* Defines for Parameter Default/Min/Max vaules */
101#define PARM_SPEED_DUPLEX_MIN 0
102#define PARM_SPEED_DUPLEX_MAX 5
103
104/* Module parameter for manual speed setting
105 * Set Link speed and dublex manually (0-5) [0]
106 * 1 : 10Mb Half-Duplex
107 * 2 : 10Mb Full-Duplex
108 * 3 : 100Mb Half-Duplex
109 * 4 : 100Mb Full-Duplex
110 * 5 : 1000Mb Full-Duplex
111 * 0 : Auto Speed Auto Duplex // default
112 */
113static u32 et131x_speed_set;
114module_param(et131x_speed_set, uint, 0);
115MODULE_PARM_DESC(et131x_speed_set,
116 "Set Link speed and dublex manually (0-5) [0]\n"
117 "1 : 10Mb Half-Duplex\n"
118 "2 : 10Mb Full-Duplex\n"
119 "3 : 100Mb Half-Duplex\n"
120 "4 : 100Mb Full-Duplex\n"
121 "5 : 1000Mb Full-Duplex\n"
122 "0 : Auto Speed Auto Dublex");
123
124/**
125 * et131x_hwaddr_init - set up the MAC Address on the ET1310
126 * @adapter: pointer to our private adapter structure
127 */
128void et131x_hwaddr_init(struct et131x_adapter *adapter)
129{
130 /* If have our default mac from init and no mac address from
131 * EEPROM then we need to generate the last octet and set it on the
132 * device
133 */
134 if (adapter->rom_addr[0] == 0x00 &&
135 adapter->rom_addr[1] == 0x00 &&
136 adapter->rom_addr[2] == 0x00 &&
137 adapter->rom_addr[3] == 0x00 &&
138 adapter->rom_addr[4] == 0x00 &&
139 adapter->rom_addr[5] == 0x00) {
140 /*
141 * We need to randomly generate the last octet so we
142 * decrease our chances of setting the mac address to
143 * same as another one of our cards in the system
144 */
145 get_random_bytes(&adapter->addr[5], 1);
146 /*
147 * We have the default value in the register we are
148 * working with so we need to copy the current
149 * address into the permanent address
150 */
151 memcpy(adapter->rom_addr,
152 adapter->addr, ETH_ALEN);
153 } else {
154 /* We do not have an override address, so set the
155 * current address to the permanent address and add
156 * it to the device
157 */
158 memcpy(adapter->addr,
159 adapter->rom_addr, ETH_ALEN);
160 }
161}
162
163
164/**
165 * et131x_pci_init - initial PCI setup
166 * @adapter: pointer to our private adapter structure
167 * @pdev: our PCI device
168 *
169 * Perform the initial setup of PCI registers and if possible initialise
170 * the MAC address. At this point the I/O registers have yet to be mapped
171 */
172
173static int et131x_pci_init(struct et131x_adapter *adapter,
174 struct pci_dev *pdev)
175{
176 int i;
177 u8 max_payload;
178 u8 read_size_reg;
179
180 if (et131x_init_eeprom(adapter) < 0)
181 return -EIO;
182
183 /* Let's set up the PORT LOGIC Register. First we need to know what
184 * the max_payload_size is
185 */
186 if (pci_read_config_byte(pdev, ET1310_PCI_MAX_PYLD, &max_payload)) {
187 dev_err(&pdev->dev,
188 "Could not read PCI config space for Max Payload Size\n");
189 return -EIO;
190 }
191
192 /* Program the Ack/Nak latency and replay timers */
193 max_payload &= 0x07; /* Only the lower 3 bits are valid */
194
195 if (max_payload < 2) {
196 static const u16 acknak[2] = { 0x76, 0xD0 };
197 static const u16 replay[2] = { 0x1E0, 0x2ED };
198
199 if (pci_write_config_word(pdev, ET1310_PCI_ACK_NACK,
200 acknak[max_payload])) {
201 dev_err(&pdev->dev,
202 "Could not write PCI config space for ACK/NAK\n");
203 return -EIO;
204 }
205 if (pci_write_config_word(pdev, ET1310_PCI_REPLAY,
206 replay[max_payload])) {
207 dev_err(&pdev->dev,
208 "Could not write PCI config space for Replay Timer\n");
209 return -EIO;
210 }
211 }
212
213 /* l0s and l1 latency timers. We are using default values.
214 * Representing 001 for L0s and 010 for L1
215 */
216 if (pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11)) {
217 dev_err(&pdev->dev,
218 "Could not write PCI config space for Latency Timers\n");
219 return -EIO;
220 }
221
222 /* Change the max read size to 2k */
223 if (pci_read_config_byte(pdev, 0x51, &read_size_reg)) {
224 dev_err(&pdev->dev,
225 "Could not read PCI config space for Max read size\n");
226 return -EIO;
227 }
228
229 read_size_reg &= 0x8f;
230 read_size_reg |= 0x40;
231
232 if (pci_write_config_byte(pdev, 0x51, read_size_reg)) {
233 dev_err(&pdev->dev,
234 "Could not write PCI config space for Max read size\n");
235 return -EIO;
236 }
237
238 /* Get MAC address from config space if an eeprom exists, otherwise
239 * the MAC address there will not be valid
240 */
241 if (!adapter->has_eeprom) {
242 et131x_hwaddr_init(adapter);
243 return 0;
244 }
245
246 for (i = 0; i < ETH_ALEN; i++) {
247 if (pci_read_config_byte(pdev, ET1310_PCI_MAC_ADDRESS + i,
248 adapter->rom_addr + i)) {
249 dev_err(&pdev->dev, "Could not read PCI config space for MAC address\n");
250 return -EIO;
251 }
252 }
253 memcpy(adapter->addr, adapter->rom_addr, ETH_ALEN);
254 return 0;
255}
256
257/**
258 * et131x_error_timer_handler
259 * @data: timer-specific variable; here a pointer to our adapter structure
260 *
261 * The routine called when the error timer expires, to track the number of
262 * recurring errors.
263 */
264void et131x_error_timer_handler(unsigned long data)
265{
266 struct et131x_adapter *etdev = (struct et131x_adapter *) data;
267 u32 pm_csr;
268
269 pm_csr = readl(&etdev->regs->global.pm_csr);
270
271 if ((pm_csr & ET_PM_PHY_SW_COMA) == 0)
272 UpdateMacStatHostCounters(etdev);
273 else
274 dev_err(&etdev->pdev->dev,
275 "No interrupts, in PHY coma, pm_csr = 0x%x\n", pm_csr);
276
277 if (!(etdev->bmsr & MI_BMSR_LINK_STATUS) &&
278 etdev->RegistryPhyComa &&
279 etdev->boot_coma < 11) {
280 etdev->boot_coma++;
281 }
282
283 if (etdev->boot_coma == 10) {
284 if (!(etdev->bmsr & MI_BMSR_LINK_STATUS)
285 && etdev->RegistryPhyComa) {
286 if ((pm_csr & ET_PM_PHY_SW_COMA) == 0) {
287 /* NOTE - This was originally a 'sync with
288 * interrupt'. How to do that under Linux?
289 */
290 et131x_enable_interrupts(etdev);
291 EnablePhyComa(etdev);
292 }
293 }
294 }
295
296 /* This is a periodic timer, so reschedule */
297 mod_timer(&etdev->ErrorTimer, jiffies +
298 TX_ERROR_PERIOD * HZ / 1000);
299}
300
301/**
302 * et131x_link_detection_handler
303 *
304 * Timer function for link up at driver load time
305 */
306void et131x_link_detection_handler(unsigned long data)
307{
308 struct et131x_adapter *etdev = (struct et131x_adapter *) data;
309 unsigned long flags;
310
311 if (etdev->MediaState == 0) {
312 spin_lock_irqsave(&etdev->Lock, flags);
313
314 etdev->MediaState = NETIF_STATUS_MEDIA_DISCONNECT;
315
316 spin_unlock_irqrestore(&etdev->Lock, flags);
317
318 netif_carrier_off(etdev->netdev);
319 }
320}
321
322/**
323 * et131x_configure_global_regs - configure JAGCore global regs
324 * @etdev: pointer to our adapter structure
325 *
326 * Used to configure the global registers on the JAGCore
327 */
328void ConfigGlobalRegs(struct et131x_adapter *etdev)
329{
330 struct global_regs __iomem *regs = &etdev->regs->global;
331
332 writel(0, &regs->rxq_start_addr);
333 writel(INTERNAL_MEM_SIZE - 1, &regs->txq_end_addr);
334
335 if (etdev->RegistryJumboPacket < 2048) {
336 /* Tx / RxDMA and Tx/Rx MAC interfaces have a 1k word
337 * block of RAM that the driver can split between Tx
338 * and Rx as it desires. Our default is to split it
339 * 50/50:
340 */
341 writel(PARM_RX_MEM_END_DEF, &regs->rxq_end_addr);
342 writel(PARM_RX_MEM_END_DEF + 1, &regs->txq_start_addr);
343 } else if (etdev->RegistryJumboPacket < 8192) {
344 /* For jumbo packets > 2k but < 8k, split 50-50. */
345 writel(INTERNAL_MEM_RX_OFFSET, &regs->rxq_end_addr);
346 writel(INTERNAL_MEM_RX_OFFSET + 1, &regs->txq_start_addr);
347 } else {
348 /* 9216 is the only packet size greater than 8k that
349 * is available. The Tx buffer has to be big enough
350 * for one whole packet on the Tx side. We'll make
351 * the Tx 9408, and give the rest to Rx
352 */
353 writel(0x01b3, &regs->rxq_end_addr);
354 writel(0x01b4, &regs->txq_start_addr);
355 }
356
357 /* Initialize the loopback register. Disable all loopbacks. */
358 writel(0, &regs->loopback);
359
360 /* MSI Register */
361 writel(0, &regs->msi_config);
362
363 /* By default, disable the watchdog timer. It will be enabled when
364 * a packet is queued.
365 */
366 writel(0, &regs->watchdog_timer);
367}
368
369
370/**
371 * et131x_adapter_setup - Set the adapter up as per cassini+ documentation
372 * @adapter: pointer to our private adapter structure
373 *
374 * Returns 0 on success, errno on failure (as defined in errno.h)
375 */
376int et131x_adapter_setup(struct et131x_adapter *etdev)
377{
378 int status = 0;
379
380 /* Configure the JAGCore */
381 ConfigGlobalRegs(etdev);
382
383 ConfigMACRegs1(etdev);
384
385 /* Configure the MMC registers */
386 /* All we need to do is initialize the Memory Control Register */
387 writel(ET_MMC_ENABLE, &etdev->regs->mmc.mmc_ctrl);
388
389 ConfigRxMacRegs(etdev);
390 ConfigTxMacRegs(etdev);
391
392 ConfigRxDmaRegs(etdev);
393 ConfigTxDmaRegs(etdev);
394
395 ConfigMacStatRegs(etdev);
396
397 /* Move the following code to Timer function?? */
398 status = et131x_xcvr_find(etdev);
399
400 if (status != 0)
401 dev_warn(&etdev->pdev->dev, "Could not find the xcvr\n");
402
403 /* Prepare the TRUEPHY library. */
404 ET1310_PhyInit(etdev);
405
406 /* Reset the phy now so changes take place */
407 ET1310_PhyReset(etdev);
408
409 /* Power down PHY */
410 ET1310_PhyPowerDown(etdev, 1);
411
412 /*
413 * We need to turn off 1000 base half dulplex, the mac does not
414 * support it. For the 10/100 part, turn off all gig advertisement
415 */
416 if (etdev->pdev->device != ET131X_PCI_DEVICE_ID_FAST)
417 ET1310_PhyAdvertise1000BaseT(etdev, TRUEPHY_ADV_DUPLEX_FULL);
418 else
419 ET1310_PhyAdvertise1000BaseT(etdev, TRUEPHY_ADV_DUPLEX_NONE);
420
421 /* Power up PHY */
422 ET1310_PhyPowerDown(etdev, 0);
423
424 et131x_setphy_normal(etdev);
425; return status;
426}
427
428/**
429 * et131x_soft_reset - Issue a soft reset to the hardware, complete for ET1310
430 * @adapter: pointer to our private adapter structure
431 */
432void et131x_soft_reset(struct et131x_adapter *adapter)
433{
434 /* Disable MAC Core */
435 writel(0xc00f0000, &adapter->regs->mac.cfg1);
436
437 /* Set everything to a reset value */
438 writel(0x7F, &adapter->regs->global.sw_reset);
439 writel(0x000f0000, &adapter->regs->mac.cfg1);
440 writel(0x00000000, &adapter->regs->mac.cfg1);
441}
442
443/**
444 * et131x_align_allocated_memory - Align allocated memory on a given boundary
445 * @adapter: pointer to our adapter structure
446 * @phys_addr: pointer to Physical address
447 * @offset: pointer to the offset variable
448 * @mask: correct mask
449 */
450void et131x_align_allocated_memory(struct et131x_adapter *adapter,
451 uint64_t *phys_addr,
452 uint64_t *offset, uint64_t mask)
453{
454 uint64_t new_addr;
455
456 *offset = 0;
457
458 new_addr = *phys_addr & ~mask;
459
460 if (new_addr != *phys_addr) {
461 /* Move to next aligned block */
462 new_addr += mask + 1;
463 /* Return offset for adjusting virt addr */
464 *offset = new_addr - *phys_addr;
465 /* Return new physical address */
466 *phys_addr = new_addr;
467 }
468}
469
470/**
471 * et131x_adapter_memory_alloc
472 * @adapter: pointer to our private adapter structure
473 *
474 * Returns 0 on success, errno on failure (as defined in errno.h).
475 *
476 * Allocate all the memory blocks for send, receive and others.
477 */
478int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
479{
480 int status;
481
482 /* Allocate memory for the Tx Ring */
483 status = et131x_tx_dma_memory_alloc(adapter);
484 if (status != 0) {
485 dev_err(&adapter->pdev->dev,
486 "et131x_tx_dma_memory_alloc FAILED\n");
487 return status;
488 }
489 /* Receive buffer memory allocation */
490 status = et131x_rx_dma_memory_alloc(adapter);
491 if (status != 0) {
492 dev_err(&adapter->pdev->dev,
493 "et131x_rx_dma_memory_alloc FAILED\n");
494 et131x_tx_dma_memory_free(adapter);
495 return status;
496 }
497
498 /* Init receive data structures */
499 status = et131x_init_recv(adapter);
500 if (status != 0) {
501 dev_err(&adapter->pdev->dev,
502 "et131x_init_recv FAILED\n");
503 et131x_tx_dma_memory_free(adapter);
504 et131x_rx_dma_memory_free(adapter);
505 }
506 return status;
507}
508
509/**
510 * et131x_adapter_memory_free - Free all memory allocated for use by Tx & Rx
511 * @adapter: pointer to our private adapter structure
512 */
513void et131x_adapter_memory_free(struct et131x_adapter *adapter)
514{
515 /* Free DMA memory */
516 et131x_tx_dma_memory_free(adapter);
517 et131x_rx_dma_memory_free(adapter);
518}
519
520
521
522/**
523 * et131x_adapter_init
524 * @etdev: pointer to the private adapter struct
525 * @pdev: pointer to the PCI device
526 *
527 * Initialize the data structures for the et131x_adapter object and link
528 * them together with the platform provided device structures.
529 */
530
531
532static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev,
533 struct pci_dev *pdev)
534{
535 static const u8 default_mac[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 };
536 static const u8 duplex[] = { 0, 1, 2, 1, 2, 2 };
537 static const u16 speed[] = { 0, 10, 10, 100, 100, 1000 };
538
539 struct et131x_adapter *etdev;
540
541 /* Setup the fundamental net_device and private adapter structure
542 * elements */
543 SET_NETDEV_DEV(netdev, &pdev->dev);
544
545 /* Allocate private adapter struct and copy in relevant information */
546 etdev = netdev_priv(netdev);
547 etdev->pdev = pci_dev_get(pdev);
548 etdev->netdev = netdev;
549
550 /* Do the same for the netdev struct */
551 netdev->irq = pdev->irq;
552 netdev->base_addr = pci_resource_start(pdev, 0);
553
554 /* Initialize spinlocks here */
555 spin_lock_init(&etdev->Lock);
556 spin_lock_init(&etdev->TCBSendQLock);
557 spin_lock_init(&etdev->TCBReadyQLock);
558 spin_lock_init(&etdev->send_hw_lock);
559 spin_lock_init(&etdev->rcv_lock);
560 spin_lock_init(&etdev->RcvPendLock);
561 spin_lock_init(&etdev->FbrLock);
562 spin_lock_init(&etdev->PHYLock);
563
564 /* Parse configuration parameters into the private adapter struct */
565 if (et131x_speed_set)
566 dev_info(&etdev->pdev->dev,
567 "Speed set manually to : %d\n", et131x_speed_set);
568
569 etdev->SpeedDuplex = et131x_speed_set;
570 etdev->RegistryJumboPacket = 1514; /* 1514-9216 */
571
572 /* Set the MAC address to a default */
573 memcpy(etdev->addr, default_mac, ETH_ALEN);
574
575 /* Decode SpeedDuplex
576 *
577 * Set up as if we are auto negotiating always and then change if we
578 * go into force mode
579 *
580 * If we are the 10/100 device, and gigabit is somehow requested then
581 * knock it down to 100 full.
582 */
583 if (etdev->pdev->device == ET131X_PCI_DEVICE_ID_FAST &&
584 etdev->SpeedDuplex == 5)
585 etdev->SpeedDuplex = 4;
586
587 etdev->AiForceSpeed = speed[etdev->SpeedDuplex];
588 etdev->AiForceDpx = duplex[etdev->SpeedDuplex]; /* Auto FDX */
589
590 return etdev;
591}
592
593/**
594 * et131x_pci_setup - Perform device initialization
595 * @pdev: a pointer to the device's pci_dev structure
596 * @ent: this device's entry in the pci_device_id table
597 *
598 * Returns 0 on success, errno on failure (as defined in errno.h)
599 *
600 * Registered in the pci_driver structure, this function is called when the
601 * PCI subsystem finds a new PCI device which matches the information
602 * contained in the pci_device_id table. This routine is the equivalent to
603 * a device insertion routine.
604 */
605
606static int __devinit et131x_pci_setup(struct pci_dev *pdev,
607 const struct pci_device_id *ent)
608{
609 int result = -EBUSY;
610 int pm_cap;
611 bool pci_using_dac;
612 struct net_device *netdev;
613 struct et131x_adapter *adapter;
614
615 /* Enable the device via the PCI subsystem */
616 if (pci_enable_device(pdev) != 0) {
617 dev_err(&pdev->dev,
618 "pci_enable_device() failed\n");
619 return -EIO;
620 }
621
622 /* Perform some basic PCI checks */
623 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
624 dev_err(&pdev->dev,
625 "Can't find PCI device's base address\n");
626 goto err_disable;
627 }
628
629 if (pci_request_regions(pdev, DRIVER_NAME)) {
630 dev_err(&pdev->dev,
631 "Can't get PCI resources\n");
632 goto err_disable;
633 }
634
635 /* Enable PCI bus mastering */
636 pci_set_master(pdev);
637
638 /* Query PCI for Power Mgmt Capabilities
639 *
640 * NOTE: Now reading PowerMgmt in another location; is this still
641 * needed?
642 */
643 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
644 if (pm_cap == 0) {
645 dev_err(&pdev->dev,
646 "Cannot find Power Management capabilities\n");
647 result = -EIO;
648 goto err_release_res;
649 }
650
651 /* Check the DMA addressing support of this device */
652 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
653 pci_using_dac = true;
654
655 result = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
656 if (result != 0) {
657 dev_err(&pdev->dev,
658 "Unable to obtain 64 bit DMA for consistent allocations\n");
659 goto err_release_res;
660 }
661 } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
662 pci_using_dac = false;
663 } else {
664 dev_err(&pdev->dev,
665 "No usable DMA addressing method\n");
666 result = -EIO;
667 goto err_release_res;
668 }
669
670 /* Allocate netdev and private adapter structs */
671 netdev = et131x_device_alloc();
672 if (netdev == NULL) {
673 dev_err(&pdev->dev, "Couldn't alloc netdev struct\n");
674 result = -ENOMEM;
675 goto err_release_res;
676 }
677 adapter = et131x_adapter_init(netdev, pdev);
678 /* Initialise the PCI setup for the device */
679 et131x_pci_init(adapter, pdev);
680
681 /* Map the bus-relative registers to system virtual memory */
682 adapter->regs = pci_ioremap_bar(pdev, 0);
683 if (adapter->regs == NULL) {
684 dev_err(&pdev->dev, "Cannot map device registers\n");
685 result = -ENOMEM;
686 goto err_free_dev;
687 }
688
689 /* If Phy COMA mode was enabled when we went down, disable it here. */
690 writel(ET_PMCSR_INIT, &adapter->regs->global.pm_csr);
691
692 /* Issue a global reset to the et1310 */
693 et131x_soft_reset(adapter);
694
695 /* Disable all interrupts (paranoid) */
696 et131x_disable_interrupts(adapter);
697
698 /* Allocate DMA memory */
699 result = et131x_adapter_memory_alloc(adapter);
700 if (result != 0) {
701 dev_err(&pdev->dev, "Could not alloc adapater memory (DMA)\n");
702 goto err_iounmap;
703 }
704
705 /* Init send data structures */
706 et131x_init_send(adapter);
707
708 /*
709 * Set up the task structure for the ISR's deferred handler
710 */
711 INIT_WORK(&adapter->task, et131x_isr_handler);
712
713 /* Copy address into the net_device struct */
714 memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN);
715
716 /* Setup et1310 as per the documentation */
717 et131x_adapter_setup(adapter);
718
719 /* Create a timer to count errors received by the NIC */
720 init_timer(&adapter->ErrorTimer);
721
722 adapter->ErrorTimer.expires = jiffies + TX_ERROR_PERIOD * HZ / 1000;
723 adapter->ErrorTimer.function = et131x_error_timer_handler;
724 adapter->ErrorTimer.data = (unsigned long)adapter;
725
726 /* Initialize link state */
727 et131x_link_detection_handler((unsigned long)adapter);
728
729 /* Initialize variable for counting how long we do not have
730 link status */
731 adapter->boot_coma = 0;
732
733 /* We can enable interrupts now
734 *
735 * NOTE - Because registration of interrupt handler is done in the
736 * device's open(), defer enabling device interrupts to that
737 * point
738 */
739
740 /* Register the net_device struct with the Linux network layer */
741 result = register_netdev(netdev);
742 if (result != 0) {
743 dev_err(&pdev->dev, "register_netdev() failed\n");
744 goto err_mem_free;
745 }
746
747 /* Register the net_device struct with the PCI subsystem. Save a copy
748 * of the PCI config space for this device now that the device has
749 * been initialized, just in case it needs to be quickly restored.
750 */
751 pci_set_drvdata(pdev, netdev);
752 pci_save_state(adapter->pdev);
753 return result;
754
755err_mem_free:
756 et131x_adapter_memory_free(adapter);
757err_iounmap:
758 iounmap(adapter->regs);
759err_free_dev:
760 pci_dev_put(pdev);
761 free_netdev(netdev);
762err_release_res:
763 pci_release_regions(pdev);
764err_disable:
765 pci_disable_device(pdev);
766 return result;
767}
768
769/**
770 * et131x_pci_remove
771 * @pdev: a pointer to the device's pci_dev structure
772 *
773 * Registered in the pci_driver structure, this function is called when the
774 * PCI subsystem detects that a PCI device which matches the information
775 * contained in the pci_device_id table has been removed.
776 */
777
778static void __devexit et131x_pci_remove(struct pci_dev *pdev)
779{
780 struct net_device *netdev;
781 struct et131x_adapter *adapter;
782
783 /* Retrieve the net_device pointer from the pci_dev struct, as well
784 * as the private adapter struct
785 */
786 netdev = pci_get_drvdata(pdev);
787 adapter = netdev_priv(netdev);
788
789 /* Perform device cleanup */
790 unregister_netdev(netdev);
791 et131x_adapter_memory_free(adapter);
792 iounmap(adapter->regs);
793 pci_dev_put(adapter->pdev);
794 free_netdev(netdev);
795 pci_release_regions(pdev);
796 pci_disable_device(pdev);
797}
798
799static struct pci_device_id et131x_pci_table[] __devinitdata = {
800 {ET131X_PCI_VENDOR_ID, ET131X_PCI_DEVICE_ID_GIG, PCI_ANY_ID,
801 PCI_ANY_ID, 0, 0, 0UL},
802 {ET131X_PCI_VENDOR_ID, ET131X_PCI_DEVICE_ID_FAST, PCI_ANY_ID,
803 PCI_ANY_ID, 0, 0, 0UL},
804 {0,}
805};
806
807MODULE_DEVICE_TABLE(pci, et131x_pci_table);
808
809static struct pci_driver et131x_driver = {
810 .name = DRIVER_NAME,
811 .id_table = et131x_pci_table,
812 .probe = et131x_pci_setup,
813 .remove = __devexit_p(et131x_pci_remove),
814 .suspend = NULL, /* et131x_pci_suspend */
815 .resume = NULL, /* et131x_pci_resume */
816};
817
818
819/**
820 * et131x_init_module - The "main" entry point called on driver initialization
821 *
822 * Returns 0 on success, errno on failure (as defined in errno.h)
823 */
824static int __init et131x_init_module(void)
825{
826 if (et131x_speed_set < PARM_SPEED_DUPLEX_MIN ||
827 et131x_speed_set > PARM_SPEED_DUPLEX_MAX) {
828 printk(KERN_WARNING "et131x: invalid speed setting ignored.\n");
829 et131x_speed_set = 0;
830 }
831 return pci_register_driver(&et131x_driver);
832}
833
834/**
835 * et131x_cleanup_module - The entry point called on driver cleanup
836 */
837static void __exit et131x_cleanup_module(void)
838{
839 pci_unregister_driver(&et131x_driver);
840}
841
842module_init(et131x_init_module);
843module_exit(et131x_cleanup_module);
844
845/* Modinfo parameters (filled out using defines from et131x_version.h) */
846MODULE_AUTHOR(DRIVER_AUTHOR);
847MODULE_DESCRIPTION(DRIVER_INFO);
848MODULE_LICENSE(DRIVER_LICENSE);
diff --git a/drivers/staging/et131x/et131x_isr.c b/drivers/staging/et131x/et131x_isr.c
new file mode 100644
index 00000000000..9c33209c840
--- /dev/null
+++ b/drivers/staging/et131x/et131x_isr.c
@@ -0,0 +1,480 @@
1/*
2 * Agere Systems Inc.
3 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
4 *
5 * Copyright © 2005 Agere Systems Inc.
6 * All rights reserved.
7 * http://www.agere.com
8 *
9 *------------------------------------------------------------------------------
10 *
11 * et131x_isr.c - File which contains the ISR, ISR handler, and related routines
12 * for processing interrupts from the device.
13 *
14 *------------------------------------------------------------------------------
15 *
16 * SOFTWARE LICENSE
17 *
18 * This software is provided subject to the following terms and conditions,
19 * which you should read carefully before using the software. Using this
20 * software indicates your acceptance of these terms and conditions. If you do
21 * not agree with these terms and conditions, do not use the software.
22 *
23 * Copyright © 2005 Agere Systems Inc.
24 * All rights reserved.
25 *
26 * Redistribution and use in source or binary forms, with or without
27 * modifications, are permitted provided that the following conditions are met:
28 *
29 * . Redistributions of source code must retain the above copyright notice, this
30 * list of conditions and the following Disclaimer as comments in the code as
31 * well as in the documentation and/or other materials provided with the
32 * distribution.
33 *
34 * . Redistributions in binary form must reproduce the above copyright notice,
35 * this list of conditions and the following Disclaimer in the documentation
36 * and/or other materials provided with the distribution.
37 *
38 * . Neither the name of Agere Systems Inc. nor the names of the contributors
39 * may be used to endorse or promote products derived from this software
40 * without specific prior written permission.
41 *
42 * Disclaimer
43 *
44 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
45 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
46 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
47 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
48 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
49 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
50 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
51 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
52 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
54 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
55 * DAMAGE.
56 *
57 */
58
59#include "et131x_version.h"
60#include "et131x_defs.h"
61
62#include <linux/init.h>
63#include <linux/module.h>
64#include <linux/types.h>
65#include <linux/kernel.h>
66
67#include <linux/sched.h>
68#include <linux/ptrace.h>
69#include <linux/ctype.h>
70#include <linux/string.h>
71#include <linux/timer.h>
72#include <linux/interrupt.h>
73#include <linux/in.h>
74#include <linux/delay.h>
75#include <linux/io.h>
76#include <linux/bitops.h>
77#include <linux/pci.h>
78#include <asm/system.h>
79
80#include <linux/netdevice.h>
81#include <linux/etherdevice.h>
82#include <linux/skbuff.h>
83#include <linux/if_arp.h>
84#include <linux/ioport.h>
85
86#include "et1310_phy.h"
87#include "et131x_adapter.h"
88#include "et131x.h"
89
90/*
91 * For interrupts, normal running is:
92 * rxdma_xfr_done, phy_interrupt, mac_stat_interrupt,
93 * watchdog_interrupt & txdma_xfer_done
94 *
95 * In both cases, when flow control is enabled for either Tx or bi-direction,
96 * we additional enable rx_fbr0_low and rx_fbr1_low, so we know when the
97 * buffer rings are running low.
98 */
99#define INT_MASK_DISABLE 0xffffffff
100
101/* NOTE: Masking out MAC_STAT Interrupt for now...
102 * #define INT_MASK_ENABLE 0xfff6bf17
103 * #define INT_MASK_ENABLE_NO_FLOW 0xfff6bfd7
104 */
105#define INT_MASK_ENABLE 0xfffebf17
106#define INT_MASK_ENABLE_NO_FLOW 0xfffebfd7
107
108
109/**
110 * et131x_enable_interrupts - enable interrupt
111 * @adapter: et131x device
112 *
113 * Enable the appropriate interrupts on the ET131x according to our
114 * configuration
115 */
116
117void et131x_enable_interrupts(struct et131x_adapter *adapter)
118{
119 u32 mask;
120
121 /* Enable all global interrupts */
122 if (adapter->flowcontrol == FLOW_TXONLY || adapter->flowcontrol == FLOW_BOTH)
123 mask = INT_MASK_ENABLE;
124 else
125 mask = INT_MASK_ENABLE_NO_FLOW;
126
127 adapter->CachedMaskValue = mask;
128 writel(mask, &adapter->regs->global.int_mask);
129}
130
131/**
132 * et131x_disable_interrupts - interrupt disable
133 * @adapter: et131x device
134 *
135 * Block all interrupts from the et131x device at the device itself
136 */
137
138void et131x_disable_interrupts(struct et131x_adapter *adapter)
139{
140 /* Disable all global interrupts */
141 adapter->CachedMaskValue = INT_MASK_DISABLE;
142 writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask);
143}
144
145
146/**
147 * et131x_isr - The Interrupt Service Routine for the driver.
148 * @irq: the IRQ on which the interrupt was received.
149 * @dev_id: device-specific info (here a pointer to a net_device struct)
150 *
151 * Returns a value indicating if the interrupt was handled.
152 */
153
154irqreturn_t et131x_isr(int irq, void *dev_id)
155{
156 bool handled = true;
157 struct net_device *netdev = (struct net_device *)dev_id;
158 struct et131x_adapter *adapter = NULL;
159 u32 status;
160
161 if (!netif_device_present(netdev)) {
162 handled = false;
163 goto out;
164 }
165
166 adapter = netdev_priv(netdev);
167
168 /* If the adapter is in low power state, then it should not
169 * recognize any interrupt
170 */
171
172 /* Disable Device Interrupts */
173 et131x_disable_interrupts(adapter);
174
175 /* Get a copy of the value in the interrupt status register
176 * so we can process the interrupting section
177 */
178 status = readl(&adapter->regs->global.int_status);
179
180 if (adapter->flowcontrol == FLOW_TXONLY ||
181 adapter->flowcontrol == FLOW_BOTH) {
182 status &= ~INT_MASK_ENABLE;
183 } else {
184 status &= ~INT_MASK_ENABLE_NO_FLOW;
185 }
186
187 /* Make sure this is our interrupt */
188 if (!status) {
189 handled = false;
190 et131x_enable_interrupts(adapter);
191 goto out;
192 }
193
194 /* This is our interrupt, so process accordingly */
195
196 if (status & ET_INTR_WATCHDOG) {
197 struct tcb *tcb = adapter->tx_ring.send_head;
198
199 if (tcb)
200 if (++tcb->stale > 1)
201 status |= ET_INTR_TXDMA_ISR;
202
203 if (adapter->rx_ring.UnfinishedReceives)
204 status |= ET_INTR_RXDMA_XFR_DONE;
205 else if (tcb == NULL)
206 writel(0, &adapter->regs->global.watchdog_timer);
207
208 status &= ~ET_INTR_WATCHDOG;
209 }
210
211 if (status == 0) {
212 /* This interrupt has in some way been "handled" by
213 * the ISR. Either it was a spurious Rx interrupt, or
214 * it was a Tx interrupt that has been filtered by
215 * the ISR.
216 */
217 et131x_enable_interrupts(adapter);
218 goto out;
219 }
220
221 /* We need to save the interrupt status value for use in our
222 * DPC. We will clear the software copy of that in that
223 * routine.
224 */
225 adapter->stats.InterruptStatus = status;
226
227 /* Schedule the ISR handler as a bottom-half task in the
228 * kernel's tq_immediate queue, and mark the queue for
229 * execution
230 */
231 schedule_work(&adapter->task);
232out:
233 return IRQ_RETVAL(handled);
234}
235
236/**
237 * et131x_isr_handler - The ISR handler
238 * @p_adapter, a pointer to the device's private adapter structure
239 *
240 * scheduled to run in a deferred context by the ISR. This is where the ISR's
241 * work actually gets done.
242 */
243void et131x_isr_handler(struct work_struct *work)
244{
245 struct et131x_adapter *etdev =
246 container_of(work, struct et131x_adapter, task);
247 u32 status = etdev->stats.InterruptStatus;
248 struct address_map __iomem *iomem = etdev->regs;
249
250 /*
251 * These first two are by far the most common. Once handled, we clear
252 * their two bits in the status word. If the word is now zero, we
253 * exit.
254 */
255 /* Handle all the completed Transmit interrupts */
256 if (status & ET_INTR_TXDMA_ISR)
257 et131x_handle_send_interrupt(etdev);
258
259 /* Handle all the completed Receives interrupts */
260 if (status & ET_INTR_RXDMA_XFR_DONE)
261 et131x_handle_recv_interrupt(etdev);
262
263 status &= 0xffffffd7;
264
265 if (status) {
266 /* Handle the TXDMA Error interrupt */
267 if (status & ET_INTR_TXDMA_ERR) {
268 u32 txdma_err;
269
270 /* Following read also clears the register (COR) */
271 txdma_err = readl(&iomem->txdma.tx_dma_error);
272
273 dev_warn(&etdev->pdev->dev,
274 "TXDMA_ERR interrupt, error = %d\n",
275 txdma_err);
276 }
277
278 /* Handle Free Buffer Ring 0 and 1 Low interrupt */
279 if (status & (ET_INTR_RXDMA_FB_R0_LOW | ET_INTR_RXDMA_FB_R1_LOW)) {
280 /*
281 * This indicates the number of unused buffers in
282 * RXDMA free buffer ring 0 is <= the limit you
283 * programmed. Free buffer resources need to be
284 * returned. Free buffers are consumed as packets
285 * are passed from the network to the host. The host
286 * becomes aware of the packets from the contents of
287 * the packet status ring. This ring is queried when
288 * the packet done interrupt occurs. Packets are then
289 * passed to the OS. When the OS is done with the
290 * packets the resources can be returned to the
291 * ET1310 for re-use. This interrupt is one method of
292 * returning resources.
293 */
294
295 /* If the user has flow control on, then we will
296 * send a pause packet, otherwise just exit
297 */
298 if (etdev->flowcontrol == FLOW_TXONLY ||
299 etdev->flowcontrol == FLOW_BOTH) {
300 u32 pm_csr;
301
302 /* Tell the device to send a pause packet via
303 * the back pressure register (bp req and
304 * bp xon/xoff)
305 */
306 pm_csr = readl(&iomem->global.pm_csr);
307 if ((pm_csr & ET_PM_PHY_SW_COMA) == 0)
308 writel(3, &iomem->txmac.bp_ctrl);
309 }
310 }
311
312 /* Handle Packet Status Ring Low Interrupt */
313 if (status & ET_INTR_RXDMA_STAT_LOW) {
314
315 /*
316 * Same idea as with the two Free Buffer Rings.
317 * Packets going from the network to the host each
318 * consume a free buffer resource and a packet status
319 * resource. These resoures are passed to the OS.
320 * When the OS is done with the resources, they need
321 * to be returned to the ET1310. This is one method
322 * of returning the resources.
323 */
324 }
325
326 /* Handle RXDMA Error Interrupt */
327 if (status & ET_INTR_RXDMA_ERR) {
328 /*
329 * The rxdma_error interrupt is sent when a time-out
330 * on a request issued by the JAGCore has occurred or
331 * a completion is returned with an un-successful
332 * status. In both cases the request is considered
333 * complete. The JAGCore will automatically re-try the
334 * request in question. Normally information on events
335 * like these are sent to the host using the "Advanced
336 * Error Reporting" capability. This interrupt is
337 * another way of getting similar information. The
338 * only thing required is to clear the interrupt by
339 * reading the ISR in the global resources. The
340 * JAGCore will do a re-try on the request. Normally
341 * you should never see this interrupt. If you start
342 * to see this interrupt occurring frequently then
343 * something bad has occurred. A reset might be the
344 * thing to do.
345 */
346 /* TRAP();*/
347
348 dev_warn(&etdev->pdev->dev,
349 "RxDMA_ERR interrupt, error %x\n",
350 readl(&iomem->txmac.tx_test));
351 }
352
353 /* Handle the Wake on LAN Event */
354 if (status & ET_INTR_WOL) {
355 /*
356 * This is a secondary interrupt for wake on LAN.
357 * The driver should never see this, if it does,
358 * something serious is wrong. We will TRAP the
359 * message when we are in DBG mode, otherwise we
360 * will ignore it.
361 */
362 dev_err(&etdev->pdev->dev, "WAKE_ON_LAN interrupt\n");
363 }
364
365 /* Handle the PHY interrupt */
366 if (status & ET_INTR_PHY) {
367 u32 pm_csr;
368 u16 bmsr_ints;
369 u16 bmsr_data;
370 u16 myisr;
371
372 /* If we are in coma mode when we get this interrupt,
373 * we need to disable it.
374 */
375 pm_csr = readl(&iomem->global.pm_csr);
376 if (pm_csr & ET_PM_PHY_SW_COMA) {
377 /*
378 * Check to see if we are in coma mode and if
379 * so, disable it because we will not be able
380 * to read PHY values until we are out.
381 */
382 DisablePhyComa(etdev);
383 }
384
385 /* Read the PHY ISR to clear the reason for the
386 * interrupt.
387 */
388 MiRead(etdev, (uint8_t) offsetof(struct mi_regs, isr),
389 &myisr);
390
391 if (!etdev->ReplicaPhyLoopbk) {
392 MiRead(etdev,
393 (uint8_t) offsetof(struct mi_regs, bmsr),
394 &bmsr_data);
395
396 bmsr_ints = etdev->bmsr ^ bmsr_data;
397 etdev->bmsr = bmsr_data;
398
399 /* Do all the cable in / cable out stuff */
400 et131x_Mii_check(etdev, bmsr_data, bmsr_ints);
401 }
402 }
403
404 /* Let's move on to the TxMac */
405 if (status & ET_INTR_TXMAC) {
406 u32 err = readl(&iomem->txmac.err);
407
408 /*
409 * When any of the errors occur and TXMAC generates
410 * an interrupt to report these errors, it usually
411 * means that TXMAC has detected an error in the data
412 * stream retrieved from the on-chip Tx Q. All of
413 * these errors are catastrophic and TXMAC won't be
414 * able to recover data when these errors occur. In
415 * a nutshell, the whole Tx path will have to be reset
416 * and re-configured afterwards.
417 */
418 dev_warn(&etdev->pdev->dev,
419 "TXMAC interrupt, error 0x%08x\n",
420 err);
421
422 /* If we are debugging, we want to see this error,
423 * otherwise we just want the device to be reset and
424 * continue
425 */
426 }
427
428 /* Handle RXMAC Interrupt */
429 if (status & ET_INTR_RXMAC) {
430 /*
431 * These interrupts are catastrophic to the device,
432 * what we need to do is disable the interrupts and
433 * set the flag to cause us to reset so we can solve
434 * this issue.
435 */
436 /* MP_SET_FLAG( etdev,
437 fMP_ADAPTER_HARDWARE_ERROR); */
438
439 dev_warn(&etdev->pdev->dev,
440 "RXMAC interrupt, error 0x%08x. Requesting reset\n",
441 readl(&iomem->rxmac.err_reg));
442
443 dev_warn(&etdev->pdev->dev,
444 "Enable 0x%08x, Diag 0x%08x\n",
445 readl(&iomem->rxmac.ctrl),
446 readl(&iomem->rxmac.rxq_diag));
447
448 /*
449 * If we are debugging, we want to see this error,
450 * otherwise we just want the device to be reset and
451 * continue
452 */
453 }
454
455 /* Handle MAC_STAT Interrupt */
456 if (status & ET_INTR_MAC_STAT) {
457 /*
458 * This means at least one of the un-masked counters
459 * in the MAC_STAT block has rolled over. Use this
460 * to maintain the top, software managed bits of the
461 * counter(s).
462 */
463 HandleMacStatInterrupt(etdev);
464 }
465
466 /* Handle SLV Timeout Interrupt */
467 if (status & ET_INTR_SLV_TIMEOUT) {
468 /*
469 * This means a timeout has occurred on a read or
470 * write request to one of the JAGCore registers. The
471 * Global Resources block has terminated the request
472 * and on a read request, returned a "fake" value.
473 * The most likely reasons are: Bad Address or the
474 * addressed module is in a power-down state and
475 * can't respond.
476 */
477 }
478 }
479 et131x_enable_interrupts(etdev);
480}
diff --git a/drivers/staging/et131x/et131x_netdev.c b/drivers/staging/et131x/et131x_netdev.c
new file mode 100644
index 00000000000..5f25bbad36b
--- /dev/null
+++ b/drivers/staging/et131x/et131x_netdev.c
@@ -0,0 +1,686 @@
1/*
2 * Agere Systems Inc.
3 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
4 *
5 * Copyright © 2005 Agere Systems Inc.
6 * All rights reserved.
7 * http://www.agere.com
8 *
9 *------------------------------------------------------------------------------
10 *
11 * et131x_netdev.c - Routines and data required by all Linux network devices.
12 *
13 *------------------------------------------------------------------------------
14 *
15 * SOFTWARE LICENSE
16 *
17 * This software is provided subject to the following terms and conditions,
18 * which you should read carefully before using the software. Using this
19 * software indicates your acceptance of these terms and conditions. If you do
20 * not agree with these terms and conditions, do not use the software.
21 *
22 * Copyright © 2005 Agere Systems Inc.
23 * All rights reserved.
24 *
25 * Redistribution and use in source or binary forms, with or without
26 * modifications, are permitted provided that the following conditions are met:
27 *
28 * . Redistributions of source code must retain the above copyright notice, this
29 * list of conditions and the following Disclaimer as comments in the code as
30 * well as in the documentation and/or other materials provided with the
31 * distribution.
32 *
33 * . Redistributions in binary form must reproduce the above copyright notice,
34 * this list of conditions and the following Disclaimer in the documentation
35 * and/or other materials provided with the distribution.
36 *
37 * . Neither the name of Agere Systems Inc. nor the names of the contributors
38 * may be used to endorse or promote products derived from this software
39 * without specific prior written permission.
40 *
41 * Disclaimer
42 *
43 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
44 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
45 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
46 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
47 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
48 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
49 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
50 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
51 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
52 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
53 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
54 * DAMAGE.
55 *
56 */
57
58#include "et131x_version.h"
59#include "et131x_defs.h"
60
61#include <linux/init.h>
62#include <linux/module.h>
63#include <linux/types.h>
64#include <linux/kernel.h>
65
66#include <linux/sched.h>
67#include <linux/ptrace.h>
68#include <linux/ctype.h>
69#include <linux/string.h>
70#include <linux/timer.h>
71#include <linux/interrupt.h>
72#include <linux/in.h>
73#include <linux/delay.h>
74#include <linux/io.h>
75#include <linux/bitops.h>
76#include <linux/pci.h>
77#include <asm/system.h>
78
79#include <linux/mii.h>
80#include <linux/netdevice.h>
81#include <linux/etherdevice.h>
82#include <linux/skbuff.h>
83#include <linux/if_arp.h>
84#include <linux/ioport.h>
85
86#include "et1310_phy.h"
87#include "et1310_tx.h"
88#include "et131x_adapter.h"
89#include "et131x.h"
90
91/**
92 * et131x_stats - Return the current device statistics.
93 * @netdev: device whose stats are being queried
94 *
95 * Returns 0 on success, errno on failure (as defined in errno.h)
96 */
97static struct net_device_stats *et131x_stats(struct net_device *netdev)
98{
99 struct et131x_adapter *adapter = netdev_priv(netdev);
100 struct net_device_stats *stats = &adapter->net_stats;
101 struct ce_stats *devstat = &adapter->stats;
102
103 stats->rx_errors = devstat->length_err + devstat->alignment_err +
104 devstat->crc_err + devstat->code_violations + devstat->other_errors;
105 stats->tx_errors = devstat->max_pkt_error;
106 stats->multicast = devstat->multircv;
107 stats->collisions = devstat->collisions;
108
109 stats->rx_length_errors = devstat->length_err;
110 stats->rx_over_errors = devstat->rx_ov_flow;
111 stats->rx_crc_errors = devstat->crc_err;
112
113 /* NOTE: These stats don't have corresponding values in CE_STATS,
114 * so we're going to have to update these directly from within the
115 * TX/RX code
116 */
117 /* stats->rx_bytes = 20; devstat->; */
118 /* stats->tx_bytes = 20; devstat->; */
119 /* stats->rx_dropped = devstat->; */
120 /* stats->tx_dropped = devstat->; */
121
122 /* NOTE: Not used, can't find analogous statistics */
123 /* stats->rx_frame_errors = devstat->; */
124 /* stats->rx_fifo_errors = devstat->; */
125 /* stats->rx_missed_errors = devstat->; */
126
127 /* stats->tx_aborted_errors = devstat->; */
128 /* stats->tx_carrier_errors = devstat->; */
129 /* stats->tx_fifo_errors = devstat->; */
130 /* stats->tx_heartbeat_errors = devstat->; */
131 /* stats->tx_window_errors = devstat->; */
132 return stats;
133}
134
135/**
136 * et131x_open - Open the device for use.
137 * @netdev: device to be opened
138 *
139 * Returns 0 on success, errno on failure (as defined in errno.h)
140 */
141int et131x_open(struct net_device *netdev)
142{
143 int result = 0;
144 struct et131x_adapter *adapter = netdev_priv(netdev);
145
146 /* Start the timer to track NIC errors */
147 add_timer(&adapter->ErrorTimer);
148
149 /* Register our IRQ */
150 result = request_irq(netdev->irq, et131x_isr, IRQF_SHARED,
151 netdev->name, netdev);
152 if (result) {
153 dev_err(&adapter->pdev->dev, "c ould not register IRQ %d\n",
154 netdev->irq);
155 return result;
156 }
157
158 /* Enable the Tx and Rx DMA engines (if not already enabled) */
159 et131x_rx_dma_enable(adapter);
160 et131x_tx_dma_enable(adapter);
161
162 /* Enable device interrupts */
163 et131x_enable_interrupts(adapter);
164
165 adapter->flags |= fMP_ADAPTER_INTERRUPT_IN_USE;
166
167 /* We're ready to move some data, so start the queue */
168 netif_start_queue(netdev);
169 return result;
170}
171
172/**
173 * et131x_close - Close the device
174 * @netdev: device to be closed
175 *
176 * Returns 0 on success, errno on failure (as defined in errno.h)
177 */
178int et131x_close(struct net_device *netdev)
179{
180 struct et131x_adapter *adapter = netdev_priv(netdev);
181
182 /* First thing is to stop the queue */
183 netif_stop_queue(netdev);
184
185 /* Stop the Tx and Rx DMA engines */
186 et131x_rx_dma_disable(adapter);
187 et131x_tx_dma_disable(adapter);
188
189 /* Disable device interrupts */
190 et131x_disable_interrupts(adapter);
191
192 /* Deregistering ISR */
193 adapter->flags &= ~fMP_ADAPTER_INTERRUPT_IN_USE;
194 free_irq(netdev->irq, netdev);
195
196 /* Stop the error timer */
197 del_timer_sync(&adapter->ErrorTimer);
198 return 0;
199}
200
201/**
202 * et131x_ioctl_mii - The function which handles MII IOCTLs
203 * @netdev: device on which the query is being made
204 * @reqbuf: the request-specific data buffer
205 * @cmd: the command request code
206 *
207 * Returns 0 on success, errno on failure (as defined in errno.h)
208 */
209int et131x_ioctl_mii(struct net_device *netdev, struct ifreq *reqbuf, int cmd)
210{
211 int status = 0;
212 struct et131x_adapter *etdev = netdev_priv(netdev);
213 struct mii_ioctl_data *data = if_mii(reqbuf);
214
215 switch (cmd) {
216 case SIOCGMIIPHY:
217 data->phy_id = etdev->stats.xcvr_addr;
218 break;
219
220 case SIOCGMIIREG:
221 if (!capable(CAP_NET_ADMIN))
222 status = -EPERM;
223 else
224 status = MiRead(etdev,
225 data->reg_num, &data->val_out);
226 break;
227
228 case SIOCSMIIREG:
229 if (!capable(CAP_NET_ADMIN))
230 status = -EPERM;
231 else
232 status = MiWrite(etdev, data->reg_num,
233 data->val_in);
234 break;
235
236 default:
237 status = -EOPNOTSUPP;
238 }
239 return status;
240}
241
242/**
243 * et131x_ioctl - The I/O Control handler for the driver
244 * @netdev: device on which the control request is being made
245 * @reqbuf: a pointer to the IOCTL request buffer
246 * @cmd: the IOCTL command code
247 *
248 * Returns 0 on success, errno on failure (as defined in errno.h)
249 */
250int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf, int cmd)
251{
252 int status = 0;
253
254 switch (cmd) {
255 case SIOCGMIIPHY:
256 case SIOCGMIIREG:
257 case SIOCSMIIREG:
258 status = et131x_ioctl_mii(netdev, reqbuf, cmd);
259 break;
260
261 default:
262 status = -EOPNOTSUPP;
263 }
264 return status;
265}
266
267/**
268 * et131x_set_packet_filter - Configures the Rx Packet filtering on the device
269 * @adapter: pointer to our private adapter structure
270 *
271 * FIXME: lot of dups with MAC code
272 *
273 * Returns 0 on success, errno on failure
274 */
275int et131x_set_packet_filter(struct et131x_adapter *adapter)
276{
277 int status = 0;
278 uint32_t filter = adapter->PacketFilter;
279 u32 ctrl;
280 u32 pf_ctrl;
281
282 ctrl = readl(&adapter->regs->rxmac.ctrl);
283 pf_ctrl = readl(&adapter->regs->rxmac.pf_ctrl);
284
285 /* Default to disabled packet filtering. Enable it in the individual
286 * case statements that require the device to filter something
287 */
288 ctrl |= 0x04;
289
290 /* Set us to be in promiscuous mode so we receive everything, this
291 * is also true when we get a packet filter of 0
292 */
293 if ((filter & ET131X_PACKET_TYPE_PROMISCUOUS) || filter == 0)
294 pf_ctrl &= ~7; /* Clear filter bits */
295 else {
296 /*
297 * Set us up with Multicast packet filtering. Three cases are
298 * possible - (1) we have a multi-cast list, (2) we receive ALL
299 * multicast entries or (3) we receive none.
300 */
301 if (filter & ET131X_PACKET_TYPE_ALL_MULTICAST)
302 pf_ctrl &= ~2; /* Multicast filter bit */
303 else {
304 SetupDeviceForMulticast(adapter);
305 pf_ctrl |= 2;
306 ctrl &= ~0x04;
307 }
308
309 /* Set us up with Unicast packet filtering */
310 if (filter & ET131X_PACKET_TYPE_DIRECTED) {
311 SetupDeviceForUnicast(adapter);
312 pf_ctrl |= 4;
313 ctrl &= ~0x04;
314 }
315
316 /* Set us up with Broadcast packet filtering */
317 if (filter & ET131X_PACKET_TYPE_BROADCAST) {
318 pf_ctrl |= 1; /* Broadcast filter bit */
319 ctrl &= ~0x04;
320 } else
321 pf_ctrl &= ~1;
322
323 /* Setup the receive mac configuration registers - Packet
324 * Filter control + the enable / disable for packet filter
325 * in the control reg.
326 */
327 writel(pf_ctrl, &adapter->regs->rxmac.pf_ctrl);
328 writel(ctrl, &adapter->regs->rxmac.ctrl);
329 }
330 return status;
331}
332
333/**
334 * et131x_multicast - The handler to configure multicasting on the interface
335 * @netdev: a pointer to a net_device struct representing the device
336 */
337void et131x_multicast(struct net_device *netdev)
338{
339 struct et131x_adapter *adapter = netdev_priv(netdev);
340 uint32_t PacketFilter = 0;
341 unsigned long flags;
342 struct netdev_hw_addr *ha;
343 int i;
344
345 spin_lock_irqsave(&adapter->Lock, flags);
346
347 /* Before we modify the platform-independent filter flags, store them
348 * locally. This allows us to determine if anything's changed and if
349 * we even need to bother the hardware
350 */
351 PacketFilter = adapter->PacketFilter;
352
353 /* Clear the 'multicast' flag locally; because we only have a single
354 * flag to check multicast, and multiple multicast addresses can be
355 * set, this is the easiest way to determine if more than one
356 * multicast address is being set.
357 */
358 PacketFilter &= ~ET131X_PACKET_TYPE_MULTICAST;
359
360 /* Check the net_device flags and set the device independent flags
361 * accordingly
362 */
363
364 if (netdev->flags & IFF_PROMISC)
365 adapter->PacketFilter |= ET131X_PACKET_TYPE_PROMISCUOUS;
366 else
367 adapter->PacketFilter &= ~ET131X_PACKET_TYPE_PROMISCUOUS;
368
369 if (netdev->flags & IFF_ALLMULTI)
370 adapter->PacketFilter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
371
372 if (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST)
373 adapter->PacketFilter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
374
375 if (netdev_mc_count(netdev) < 1) {
376 adapter->PacketFilter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST;
377 adapter->PacketFilter &= ~ET131X_PACKET_TYPE_MULTICAST;
378 } else
379 adapter->PacketFilter |= ET131X_PACKET_TYPE_MULTICAST;
380
381 /* Set values in the private adapter struct */
382 i = 0;
383 netdev_for_each_mc_addr(ha, netdev) {
384 if (i == NIC_MAX_MCAST_LIST)
385 break;
386 memcpy(adapter->MCList[i++], ha->addr, ETH_ALEN);
387 }
388 adapter->MCAddressCount = i;
389
390 /* Are the new flags different from the previous ones? If not, then no
391 * action is required
392 *
393 * NOTE - This block will always update the MCList with the hardware,
394 * even if the addresses aren't the same.
395 */
396 if (PacketFilter != adapter->PacketFilter) {
397 /* Call the device's filter function */
398 et131x_set_packet_filter(adapter);
399 }
400 spin_unlock_irqrestore(&adapter->Lock, flags);
401}
402
403/**
404 * et131x_tx - The handler to tx a packet on the device
405 * @skb: data to be Tx'd
406 * @netdev: device on which data is to be Tx'd
407 *
408 * Returns 0 on success, errno on failure (as defined in errno.h)
409 */
410int et131x_tx(struct sk_buff *skb, struct net_device *netdev)
411{
412 int status = 0;
413
414 /* Save the timestamp for the TX timeout watchdog */
415 netdev->trans_start = jiffies;
416
417 /* Call the device-specific data Tx routine */
418 status = et131x_send_packets(skb, netdev);
419
420 /* Check status and manage the netif queue if necessary */
421 if (status != 0) {
422 if (status == -ENOMEM) {
423 /* Put the queue to sleep until resources are
424 * available
425 */
426 netif_stop_queue(netdev);
427 status = NETDEV_TX_BUSY;
428 } else {
429 status = NETDEV_TX_OK;
430 }
431 }
432 return status;
433}
434
435/**
436 * et131x_tx_timeout - Timeout handler
437 * @netdev: a pointer to a net_device struct representing the device
438 *
439 * The handler called when a Tx request times out. The timeout period is
440 * specified by the 'tx_timeo" element in the net_device structure (see
441 * et131x_alloc_device() to see how this value is set).
442 */
443void et131x_tx_timeout(struct net_device *netdev)
444{
445 struct et131x_adapter *etdev = netdev_priv(netdev);
446 struct tcb *tcb;
447 unsigned long flags;
448
449 /* Any nonrecoverable hardware error?
450 * Checks adapter->flags for any failure in phy reading
451 */
452 if (etdev->flags & fMP_ADAPTER_NON_RECOVER_ERROR)
453 return;
454
455 /* Hardware failure? */
456 if (etdev->flags & fMP_ADAPTER_HARDWARE_ERROR) {
457 dev_err(&etdev->pdev->dev, "hardware error - reset\n");
458 return;
459 }
460
461 /* Is send stuck? */
462 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
463
464 tcb = etdev->tx_ring.send_head;
465
466 if (tcb != NULL) {
467 tcb->count++;
468
469 if (tcb->count > NIC_SEND_HANG_THRESHOLD) {
470 spin_unlock_irqrestore(&etdev->TCBSendQLock,
471 flags);
472
473 dev_warn(&etdev->pdev->dev,
474 "Send stuck - reset. tcb->WrIndex %x, flags 0x%08x\n",
475 tcb->index,
476 tcb->flags);
477
478 et131x_close(netdev);
479 et131x_open(netdev);
480
481 return;
482 }
483 }
484
485 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
486}
487
488/**
489 * et131x_change_mtu - The handler called to change the MTU for the device
490 * @netdev: device whose MTU is to be changed
491 * @new_mtu: the desired MTU
492 *
493 * Returns 0 on success, errno on failure (as defined in errno.h)
494 */
495int et131x_change_mtu(struct net_device *netdev, int new_mtu)
496{
497 int result = 0;
498 struct et131x_adapter *adapter = netdev_priv(netdev);
499
500 /* Make sure the requested MTU is valid */
501 if (new_mtu < 64 || new_mtu > 9216)
502 return -EINVAL;
503
504 /* Stop the netif queue */
505 netif_stop_queue(netdev);
506
507 /* Stop the Tx and Rx DMA engines */
508 et131x_rx_dma_disable(adapter);
509 et131x_tx_dma_disable(adapter);
510
511 /* Disable device interrupts */
512 et131x_disable_interrupts(adapter);
513 et131x_handle_send_interrupt(adapter);
514 et131x_handle_recv_interrupt(adapter);
515
516 /* Set the new MTU */
517 netdev->mtu = new_mtu;
518
519 /* Free Rx DMA memory */
520 et131x_adapter_memory_free(adapter);
521
522 /* Set the config parameter for Jumbo Packet support */
523 adapter->RegistryJumboPacket = new_mtu + 14;
524 et131x_soft_reset(adapter);
525
526 /* Alloc and init Rx DMA memory */
527 result = et131x_adapter_memory_alloc(adapter);
528 if (result != 0) {
529 dev_warn(&adapter->pdev->dev,
530 "Change MTU failed; couldn't re-alloc DMA memory\n");
531 return result;
532 }
533
534 et131x_init_send(adapter);
535
536 et131x_hwaddr_init(adapter);
537 memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN);
538
539 /* Init the device with the new settings */
540 et131x_adapter_setup(adapter);
541
542 /* Enable interrupts */
543 if (adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE)
544 et131x_enable_interrupts(adapter);
545
546 /* Restart the Tx and Rx DMA engines */
547 et131x_rx_dma_enable(adapter);
548 et131x_tx_dma_enable(adapter);
549
550 /* Restart the netif queue */
551 netif_wake_queue(netdev);
552 return result;
553}
554
555/**
556 * et131x_set_mac_addr - handler to change the MAC address for the device
557 * @netdev: device whose MAC is to be changed
558 * @new_mac: the desired MAC address
559 *
560 * Returns 0 on success, errno on failure (as defined in errno.h)
561 *
562 * IMPLEMENTED BY : blux http://berndlux.de 22.01.2007 21:14
563 */
564int et131x_set_mac_addr(struct net_device *netdev, void *new_mac)
565{
566 int result = 0;
567 struct et131x_adapter *adapter = netdev_priv(netdev);
568 struct sockaddr *address = new_mac;
569
570 /* begin blux */
571
572 if (adapter == NULL)
573 return -ENODEV;
574
575 /* Make sure the requested MAC is valid */
576 if (!is_valid_ether_addr(address->sa_data))
577 return -EINVAL;
578
579 /* Stop the netif queue */
580 netif_stop_queue(netdev);
581
582 /* Stop the Tx and Rx DMA engines */
583 et131x_rx_dma_disable(adapter);
584 et131x_tx_dma_disable(adapter);
585
586 /* Disable device interrupts */
587 et131x_disable_interrupts(adapter);
588 et131x_handle_send_interrupt(adapter);
589 et131x_handle_recv_interrupt(adapter);
590
591 /* Set the new MAC */
592 /* netdev->set_mac_address = &new_mac; */
593 /* netdev->mtu = new_mtu; */
594
595 memcpy(netdev->dev_addr, address->sa_data, netdev->addr_len);
596
597 printk(KERN_INFO "%s: Setting MAC address to %pM\n",
598 netdev->name, netdev->dev_addr);
599
600 /* Free Rx DMA memory */
601 et131x_adapter_memory_free(adapter);
602
603 /* Set the config parameter for Jumbo Packet support */
604 /* adapter->RegistryJumboPacket = new_mtu + 14; */
605 /* blux: not needet here, we'll change the MAC */
606
607 et131x_soft_reset(adapter);
608
609 /* Alloc and init Rx DMA memory */
610 result = et131x_adapter_memory_alloc(adapter);
611 if (result != 0) {
612 dev_err(&adapter->pdev->dev,
613 "Change MAC failed; couldn't re-alloc DMA memory\n");
614 return result;
615 }
616
617 et131x_init_send(adapter);
618
619 et131x_hwaddr_init(adapter);
620
621 /* Init the device with the new settings */
622 et131x_adapter_setup(adapter);
623
624 /* Enable interrupts */
625 if (adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE)
626 et131x_enable_interrupts(adapter);
627
628 /* Restart the Tx and Rx DMA engines */
629 et131x_rx_dma_enable(adapter);
630 et131x_tx_dma_enable(adapter);
631
632 /* Restart the netif queue */
633 netif_wake_queue(netdev);
634 return result;
635}
636
637static const struct net_device_ops et131x_netdev_ops = {
638 .ndo_open = et131x_open,
639 .ndo_stop = et131x_close,
640 .ndo_start_xmit = et131x_tx,
641 .ndo_set_multicast_list = et131x_multicast,
642 .ndo_tx_timeout = et131x_tx_timeout,
643 .ndo_change_mtu = et131x_change_mtu,
644 .ndo_set_mac_address = et131x_set_mac_addr,
645 .ndo_validate_addr = eth_validate_addr,
646 .ndo_get_stats = et131x_stats,
647 .ndo_do_ioctl = et131x_ioctl,
648};
649
650/**
651 * et131x_device_alloc
652 *
653 * Returns pointer to the allocated and initialized net_device struct for
654 * this device.
655 *
656 * Create instances of net_device and wl_private for the new adapter and
657 * register the device's entry points in the net_device structure.
658 */
659struct net_device *et131x_device_alloc(void)
660{
661 struct net_device *netdev;
662
663 /* Alloc net_device and adapter structs */
664 netdev = alloc_etherdev(sizeof(struct et131x_adapter));
665
666 if (netdev == NULL) {
667 printk(KERN_ERR "et131x: Alloc of net_device struct failed\n");
668 return NULL;
669 }
670
671 /* Setup the function registration table (and other data) for a
672 * net_device
673 */
674 /* netdev->init = &et131x_init; */
675 /* netdev->set_config = &et131x_config; */
676 netdev->watchdog_timeo = ET131X_TX_TIMEOUT;
677 netdev->netdev_ops = &et131x_netdev_ops;
678
679 /* netdev->ethtool_ops = &et131x_ethtool_ops; */
680
681 /* Poll? */
682 /* netdev->poll = &et131x_poll; */
683 /* netdev->poll_controller = &et131x_poll_controller; */
684 return netdev;
685}
686
diff --git a/drivers/staging/et131x/et131x_version.h b/drivers/staging/et131x/et131x_version.h
new file mode 100644
index 00000000000..2aa9bda44ac
--- /dev/null
+++ b/drivers/staging/et131x/et131x_version.h
@@ -0,0 +1,74 @@
1/*
2 * Agere Systems Inc.
3 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
4 *
5 * Copyright © 2005 Agere Systems Inc.
6 * All rights reserved.
7 * http://www.agere.com
8 *
9 *------------------------------------------------------------------------------
10 *
11 * et131x_version.h - This file provides system and device version information.
12 *
13 *------------------------------------------------------------------------------
14 *
15 * SOFTWARE LICENSE
16 *
17 * This software is provided subject to the following terms and conditions,
18 * which you should read carefully before using the software. Using this
19 * software indicates your acceptance of these terms and conditions. If you do
20 * not agree with these terms and conditions, do not use the software.
21 *
22 * Copyright © 2005 Agere Systems Inc.
23 * All rights reserved.
24 *
25 * Redistribution and use in source or binary forms, with or without
26 * modifications, are permitted provided that the following conditions are met:
27 *
28 * . Redistributions of source code must retain the above copyright notice, this
29 * list of conditions and the following Disclaimer as comments in the code as
30 * well as in the documentation and/or other materials provided with the
31 * distribution.
32 *
33 * . Redistributions in binary form must reproduce the above copyright notice,
34 * this list of conditions and the following Disclaimer in the documentation
35 * and/or other materials provided with the distribution.
36 *
37 * . Neither the name of Agere Systems Inc. nor the names of the contributors
38 * may be used to endorse or promote products derived from this software
39 * without specific prior written permission.
40 *
41 * Disclaimer
42 *
43 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
44 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
45 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
46 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
47 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
48 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
49 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
50 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
51 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
52 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
53 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
54 * DAMAGE.
55 *
56 */
57
58#ifndef __ET131X_VERSION_H__
59#define __ET131X_VERSION_H__
60
61#define DRIVER_AUTHOR "Victor Soriano (vjsoriano@agere.com)"
62#define DRIVER_LICENSE "Dual BSD/GPL"
63#define DRIVER_DEVICE_STRING "ET1310"
64#define DRIVER_NAME "et131x"
65#define DRIVER_VERSION_STRING "1.2.3-lk"
66#define DRIVER_VENDOR "Agere Systems, http://www.agere.com"
67#define DRIVER_DESC "10/100/1000 Base-T Ethernet Driver"
68
69#define DRIVER_INFO DRIVER_DESC " for the "\
70 DRIVER_DEVICE_STRING ", v" \
71 DRIVER_VERSION_STRING " by " \
72 DRIVER_VENDOR
73
74#endif /* __ET131X_VERSION_H__ */