aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ucc_geth.c
diff options
context:
space:
mode:
authorLi Yang <leoli@freescale.com>2006-08-15 02:00:11 -0400
committerJeff Garzik <jeff@garzik.org>2006-08-19 17:44:29 -0400
commitce973b141dfac4a0f160c7435d65e3ea47753ce8 (patch)
tree2dfda8aa29023fca37e1f5127d2e5e6fae1c6180 /drivers/net/ucc_geth.c
parente4c780b1ffc7d7bc27b7dc57fcf17ebb8d3006bc (diff)
[PATCH] Freescale QE UCC gigabit ethernet driver
QE(QUICC Engine) is a new generation communication coprocessor, which can be found on some of the latest Freescale PowerQUICC CPUs(e.g. MPC8360). The UCC(Unified Communications Controller) module of QE can work as gigabit Ethernet device. This patch provides driver for the device. Signed-off-by: Shlomi Gridish <gridish@freescale.com> Signed-off-by: Li Yang <leoli@freescale.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/ucc_geth.c')
-rw-r--r--drivers/net/ucc_geth.c4278
1 files changed, 4278 insertions, 0 deletions
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
new file mode 100644
index 000000000000..47f49ef72bdc
--- /dev/null
+++ b/drivers/net/ucc_geth.c
@@ -0,0 +1,4278 @@
1/*
2 * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved.
3 *
4 * Author: Shlomi Gridish <gridish@freescale.com>
5 *
6 * Description:
7 * QE UCC Gigabit Ethernet Driver
8 *
9 * Changelog:
10 * Jul 6, 2006 Li Yang <LeoLi@freescale.com>
11 * - Rearrange code and style fixes
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
17 */
18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/errno.h>
21#include <linux/slab.h>
22#include <linux/stddef.h>
23#include <linux/interrupt.h>
24#include <linux/netdevice.h>
25#include <linux/etherdevice.h>
26#include <linux/skbuff.h>
27#include <linux/spinlock.h>
28#include <linux/mm.h>
29#include <linux/ethtool.h>
30#include <linux/delay.h>
31#include <linux/dma-mapping.h>
32#include <linux/fsl_devices.h>
33#include <linux/ethtool.h>
34#include <linux/platform_device.h>
35#include <linux/mii.h>
36
37#include <asm/uaccess.h>
38#include <asm/irq.h>
39#include <asm/io.h>
40#include <asm/immap_qe.h>
41#include <asm/qe.h>
42#include <asm/ucc.h>
43#include <asm/ucc_fast.h>
44
45#include "ucc_geth.h"
46#include "ucc_geth_phy.h"
47
48#undef DEBUG
49
50#define DRV_DESC "QE UCC Gigabit Ethernet Controller version:June 20, 2006"
51#define DRV_NAME "ucc_geth"
52
53#define ugeth_printk(level, format, arg...) \
54 printk(level format "\n", ## arg)
55
56#define ugeth_dbg(format, arg...) \
57 ugeth_printk(KERN_DEBUG , format , ## arg)
58#define ugeth_err(format, arg...) \
59 ugeth_printk(KERN_ERR , format , ## arg)
60#define ugeth_info(format, arg...) \
61 ugeth_printk(KERN_INFO , format , ## arg)
62#define ugeth_warn(format, arg...) \
63 ugeth_printk(KERN_WARNING , format , ## arg)
64
65#ifdef UGETH_VERBOSE_DEBUG
66#define ugeth_vdbg ugeth_dbg
67#else
68#define ugeth_vdbg(fmt, args...) do { } while (0)
69#endif /* UGETH_VERBOSE_DEBUG */
70
71static DEFINE_SPINLOCK(ugeth_lock);
72
73static ucc_geth_info_t ugeth_primary_info = {
74 .uf_info = {
75 .bd_mem_part = MEM_PART_SYSTEM,
76 .rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES,
77 .max_rx_buf_length = 1536,
78/* FIXME: should be changed in run time for 1G and 100M */
79#ifdef CONFIG_UGETH_HAS_GIGA
80 .urfs = UCC_GETH_URFS_GIGA_INIT,
81 .urfet = UCC_GETH_URFET_GIGA_INIT,
82 .urfset = UCC_GETH_URFSET_GIGA_INIT,
83 .utfs = UCC_GETH_UTFS_GIGA_INIT,
84 .utfet = UCC_GETH_UTFET_GIGA_INIT,
85 .utftt = UCC_GETH_UTFTT_GIGA_INIT,
86#else
87 .urfs = UCC_GETH_URFS_INIT,
88 .urfet = UCC_GETH_URFET_INIT,
89 .urfset = UCC_GETH_URFSET_INIT,
90 .utfs = UCC_GETH_UTFS_INIT,
91 .utfet = UCC_GETH_UTFET_INIT,
92 .utftt = UCC_GETH_UTFTT_INIT,
93#endif
94 .ufpt = 256,
95 .mode = UCC_FAST_PROTOCOL_MODE_ETHERNET,
96 .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
97 .tenc = UCC_FAST_TX_ENCODING_NRZ,
98 .renc = UCC_FAST_RX_ENCODING_NRZ,
99 .tcrc = UCC_FAST_16_BIT_CRC,
100 .synl = UCC_FAST_SYNC_LEN_NOT_USED,
101 },
102 .numQueuesTx = 1,
103 .numQueuesRx = 1,
104 .extendedFilteringChainPointer = ((uint32_t) NULL),
105 .typeorlen = 3072 /*1536 */ ,
106 .nonBackToBackIfgPart1 = 0x40,
107 .nonBackToBackIfgPart2 = 0x60,
108 .miminumInterFrameGapEnforcement = 0x50,
109 .backToBackInterFrameGap = 0x60,
110 .mblinterval = 128,
111 .nortsrbytetime = 5,
112 .fracsiz = 1,
113 .strictpriorityq = 0xff,
114 .altBebTruncation = 0xa,
115 .excessDefer = 1,
116 .maxRetransmission = 0xf,
117 .collisionWindow = 0x37,
118 .receiveFlowControl = 1,
119 .maxGroupAddrInHash = 4,
120 .maxIndAddrInHash = 4,
121 .prel = 7,
122 .maxFrameLength = 1518,
123 .minFrameLength = 64,
124 .maxD1Length = 1520,
125 .maxD2Length = 1520,
126 .vlantype = 0x8100,
127 .ecamptr = ((uint32_t) NULL),
128 .eventRegMask = UCCE_OTHER,
129 .pausePeriod = 0xf000,
130 .interruptcoalescingmaxvalue = {1, 1, 1, 1, 1, 1, 1, 1},
131 .bdRingLenTx = {
132 TX_BD_RING_LEN,
133 TX_BD_RING_LEN,
134 TX_BD_RING_LEN,
135 TX_BD_RING_LEN,
136 TX_BD_RING_LEN,
137 TX_BD_RING_LEN,
138 TX_BD_RING_LEN,
139 TX_BD_RING_LEN},
140
141 .bdRingLenRx = {
142 RX_BD_RING_LEN,
143 RX_BD_RING_LEN,
144 RX_BD_RING_LEN,
145 RX_BD_RING_LEN,
146 RX_BD_RING_LEN,
147 RX_BD_RING_LEN,
148 RX_BD_RING_LEN,
149 RX_BD_RING_LEN},
150
151 .numStationAddresses = UCC_GETH_NUM_OF_STATION_ADDRESSES_1,
152 .largestexternallookupkeysize =
153 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE,
154 .statisticsMode = UCC_GETH_STATISTICS_GATHERING_MODE_NONE,
155 .vlanOperationTagged = UCC_GETH_VLAN_OPERATION_TAGGED_NOP,
156 .vlanOperationNonTagged = UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP,
157 .rxQoSMode = UCC_GETH_QOS_MODE_DEFAULT,
158 .aufc = UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE,
159 .padAndCrc = MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC,
160 .numThreadsTx = UCC_GETH_NUM_OF_THREADS_4,
161 .numThreadsRx = UCC_GETH_NUM_OF_THREADS_4,
162 .riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
163 .riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
164};
165
166static ucc_geth_info_t ugeth_info[8];
167
168#ifdef DEBUG
169static void mem_disp(u8 *addr, int size)
170{
171 u8 *i;
172 int size16Aling = (size >> 4) << 4;
173 int size4Aling = (size >> 2) << 2;
174 int notAlign = 0;
175 if (size % 16)
176 notAlign = 1;
177
178 for (i = addr; (u32) i < (u32) addr + size16Aling; i += 16)
179 printk("0x%08x: %08x %08x %08x %08x\r\n",
180 (u32) i,
181 *((u32 *) (i)),
182 *((u32 *) (i + 4)),
183 *((u32 *) (i + 8)), *((u32 *) (i + 12)));
184 if (notAlign == 1)
185 printk("0x%08x: ", (u32) i);
186 for (; (u32) i < (u32) addr + size4Aling; i += 4)
187 printk("%08x ", *((u32 *) (i)));
188 for (; (u32) i < (u32) addr + size; i++)
189 printk("%02x", *((u8 *) (i)));
190 if (notAlign == 1)
191 printk("\r\n");
192}
193#endif /* DEBUG */
194
195#ifdef CONFIG_UGETH_FILTERING
196static void enqueue(struct list_head *node, struct list_head *lh)
197{
198 unsigned long flags;
199
200 spin_lock_irqsave(ugeth_lock, flags);
201 list_add_tail(node, lh);
202 spin_unlock_irqrestore(ugeth_lock, flags);
203}
204#endif /* CONFIG_UGETH_FILTERING */
205
206static struct list_head *dequeue(struct list_head *lh)
207{
208 unsigned long flags;
209
210 spin_lock_irqsave(ugeth_lock, flags);
211 if (!list_empty(lh)) {
212 struct list_head *node = lh->next;
213 list_del(node);
214 spin_unlock_irqrestore(ugeth_lock, flags);
215 return node;
216 } else {
217 spin_unlock_irqrestore(ugeth_lock, flags);
218 return NULL;
219 }
220}
221
222static int get_interface_details(enet_interface_e enet_interface,
223 enet_speed_e *speed,
224 int *r10m,
225 int *rmm,
226 int *rpm,
227 int *tbi, int *limited_to_full_duplex)
228{
229 /* Analyze enet_interface according to Interface Mode
230 Configuration table */
231 switch (enet_interface) {
232 case ENET_10_MII:
233 *speed = ENET_SPEED_10BT;
234 break;
235 case ENET_10_RMII:
236 *speed = ENET_SPEED_10BT;
237 *r10m = 1;
238 *rmm = 1;
239 break;
240 case ENET_10_RGMII:
241 *speed = ENET_SPEED_10BT;
242 *rpm = 1;
243 *r10m = 1;
244 *limited_to_full_duplex = 1;
245 break;
246 case ENET_100_MII:
247 *speed = ENET_SPEED_100BT;
248 break;
249 case ENET_100_RMII:
250 *speed = ENET_SPEED_100BT;
251 *rmm = 1;
252 break;
253 case ENET_100_RGMII:
254 *speed = ENET_SPEED_100BT;
255 *rpm = 1;
256 *limited_to_full_duplex = 1;
257 break;
258 case ENET_1000_GMII:
259 *speed = ENET_SPEED_1000BT;
260 *limited_to_full_duplex = 1;
261 break;
262 case ENET_1000_RGMII:
263 *speed = ENET_SPEED_1000BT;
264 *rpm = 1;
265 *limited_to_full_duplex = 1;
266 break;
267 case ENET_1000_TBI:
268 *speed = ENET_SPEED_1000BT;
269 *tbi = 1;
270 *limited_to_full_duplex = 1;
271 break;
272 case ENET_1000_RTBI:
273 *speed = ENET_SPEED_1000BT;
274 *rpm = 1;
275 *tbi = 1;
276 *limited_to_full_duplex = 1;
277 break;
278 default:
279 return -EINVAL;
280 break;
281 }
282
283 return 0;
284}
285
286static struct sk_buff *get_new_skb(ucc_geth_private_t *ugeth, u8 *bd)
287{
288 struct sk_buff *skb = NULL;
289
290 skb = dev_alloc_skb(ugeth->ug_info->uf_info.max_rx_buf_length +
291 UCC_GETH_RX_DATA_BUF_ALIGNMENT);
292
293 if (skb == NULL)
294 return NULL;
295
296 /* We need the data buffer to be aligned properly. We will reserve
297 * as many bytes as needed to align the data properly
298 */
299 skb_reserve(skb,
300 UCC_GETH_RX_DATA_BUF_ALIGNMENT -
301 (((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT -
302 1)));
303
304 skb->dev = ugeth->dev;
305
306 BD_BUFFER_SET(bd,
307 dma_map_single(NULL,
308 skb->data,
309 ugeth->ug_info->uf_info.max_rx_buf_length +
310 UCC_GETH_RX_DATA_BUF_ALIGNMENT,
311 DMA_FROM_DEVICE));
312
313 BD_STATUS_AND_LENGTH_SET(bd,
314 (R_E | R_I |
315 (BD_STATUS_AND_LENGTH(bd) & R_W)));
316
317 return skb;
318}
319
320static int rx_bd_buffer_set(ucc_geth_private_t *ugeth, u8 rxQ)
321{
322 u8 *bd;
323 u32 bd_status;
324 struct sk_buff *skb;
325 int i;
326
327 bd = ugeth->p_rx_bd_ring[rxQ];
328 i = 0;
329
330 do {
331 bd_status = BD_STATUS_AND_LENGTH(bd);
332 skb = get_new_skb(ugeth, bd);
333
334 if (!skb) /* If can not allocate data buffer,
335 abort. Cleanup will be elsewhere */
336 return -ENOMEM;
337
338 ugeth->rx_skbuff[rxQ][i] = skb;
339
340 /* advance the BD pointer */
341 bd += UCC_GETH_SIZE_OF_BD;
342 i++;
343 } while (!(bd_status & R_W));
344
345 return 0;
346}
347
348static int fill_init_enet_entries(ucc_geth_private_t *ugeth,
349 volatile u32 *p_start,
350 u8 num_entries,
351 u32 thread_size,
352 u32 thread_alignment,
353 qe_risc_allocation_e risc,
354 int skip_page_for_first_entry)
355{
356 u32 init_enet_offset;
357 u8 i;
358 int snum;
359
360 for (i = 0; i < num_entries; i++) {
361 if ((snum = qe_get_snum()) < 0) {
362 ugeth_err("fill_init_enet_entries: Can not get SNUM.");
363 return snum;
364 }
365 if ((i == 0) && skip_page_for_first_entry)
366 /* First entry of Rx does not have page */
367 init_enet_offset = 0;
368 else {
369 init_enet_offset =
370 qe_muram_alloc(thread_size, thread_alignment);
371 if (IS_MURAM_ERR(init_enet_offset)) {
372 ugeth_err
373 ("fill_init_enet_entries: Can not allocate DPRAM memory.");
374 qe_put_snum((u8) snum);
375 return -ENOMEM;
376 }
377 }
378 *(p_start++) =
379 ((u8) snum << ENET_INIT_PARAM_SNUM_SHIFT) | init_enet_offset
380 | risc;
381 }
382
383 return 0;
384}
385
386static int return_init_enet_entries(ucc_geth_private_t *ugeth,
387 volatile u32 *p_start,
388 u8 num_entries,
389 qe_risc_allocation_e risc,
390 int skip_page_for_first_entry)
391{
392 u32 init_enet_offset;
393 u8 i;
394 int snum;
395
396 for (i = 0; i < num_entries; i++) {
397 /* Check that this entry was actually valid --
398 needed in case failed in allocations */
399 if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) {
400 snum =
401 (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >>
402 ENET_INIT_PARAM_SNUM_SHIFT;
403 qe_put_snum((u8) snum);
404 if (!((i == 0) && skip_page_for_first_entry)) {
405 /* First entry of Rx does not have page */
406 init_enet_offset =
407 (in_be32(p_start) &
408 ENET_INIT_PARAM_PTR_MASK);
409 qe_muram_free(init_enet_offset);
410 }
411 *(p_start++) = 0; /* Just for cosmetics */
412 }
413 }
414
415 return 0;
416}
417
418#ifdef DEBUG
419static int dump_init_enet_entries(ucc_geth_private_t *ugeth,
420 volatile u32 *p_start,
421 u8 num_entries,
422 u32 thread_size,
423 qe_risc_allocation_e risc,
424 int skip_page_for_first_entry)
425{
426 u32 init_enet_offset;
427 u8 i;
428 int snum;
429
430 for (i = 0; i < num_entries; i++) {
431 /* Check that this entry was actually valid --
432 needed in case failed in allocations */
433 if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) {
434 snum =
435 (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >>
436 ENET_INIT_PARAM_SNUM_SHIFT;
437 qe_put_snum((u8) snum);
438 if (!((i == 0) && skip_page_for_first_entry)) {
439 /* First entry of Rx does not have page */
440 init_enet_offset =
441 (in_be32(p_start) &
442 ENET_INIT_PARAM_PTR_MASK);
443 ugeth_info("Init enet entry %d:", i);
444 ugeth_info("Base address: 0x%08x",
445 (u32)
446 qe_muram_addr(init_enet_offset));
447 mem_disp(qe_muram_addr(init_enet_offset),
448 thread_size);
449 }
450 p_start++;
451 }
452 }
453
454 return 0;
455}
456#endif
457
458#ifdef CONFIG_UGETH_FILTERING
459static enet_addr_container_t *get_enet_addr_container(void)
460{
461 enet_addr_container_t *enet_addr_cont;
462
463 /* allocate memory */
464 enet_addr_cont = kmalloc(sizeof(enet_addr_container_t), GFP_KERNEL);
465 if (!enet_addr_cont) {
466 ugeth_err("%s: No memory for enet_addr_container_t object.",
467 __FUNCTION__);
468 return NULL;
469 }
470
471 return enet_addr_cont;
472}
473#endif /* CONFIG_UGETH_FILTERING */
474
475static void put_enet_addr_container(enet_addr_container_t *enet_addr_cont)
476{
477 kfree(enet_addr_cont);
478}
479
480#ifdef CONFIG_UGETH_FILTERING
481static int hw_add_addr_in_paddr(ucc_geth_private_t *ugeth,
482 enet_addr_t *p_enet_addr, u8 paddr_num)
483{
484 ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
485
486 if (!(paddr_num < NUM_OF_PADDRS)) {
487 ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__);
488 return -EINVAL;
489 }
490
491 p_82xx_addr_filt =
492 (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram->
493 addressfiltering;
494
495 /* Ethernet frames are defined in Little Endian mode, */
496 /* therefore to insert the address we reverse the bytes. */
497 out_be16(&p_82xx_addr_filt->paddr[paddr_num].h,
498 (u16) (((u16) (((u16) ((*p_enet_addr)[5])) << 8)) |
499 (u16) (*p_enet_addr)[4]));
500 out_be16(&p_82xx_addr_filt->paddr[paddr_num].m,
501 (u16) (((u16) (((u16) ((*p_enet_addr)[3])) << 8)) |
502 (u16) (*p_enet_addr)[2]));
503 out_be16(&p_82xx_addr_filt->paddr[paddr_num].l,
504 (u16) (((u16) (((u16) ((*p_enet_addr)[1])) << 8)) |
505 (u16) (*p_enet_addr)[0]));
506
507 return 0;
508}
509#endif /* CONFIG_UGETH_FILTERING */
510
511static int hw_clear_addr_in_paddr(ucc_geth_private_t *ugeth, u8 paddr_num)
512{
513 ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
514
515 if (!(paddr_num < NUM_OF_PADDRS)) {
516 ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__);
517 return -EINVAL;
518 }
519
520 p_82xx_addr_filt =
521 (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram->
522 addressfiltering;
523
524 /* Writing address ff.ff.ff.ff.ff.ff disables address
525 recognition for this register */
526 out_be16(&p_82xx_addr_filt->paddr[paddr_num].h, 0xffff);
527 out_be16(&p_82xx_addr_filt->paddr[paddr_num].m, 0xffff);
528 out_be16(&p_82xx_addr_filt->paddr[paddr_num].l, 0xffff);
529
530 return 0;
531}
532
533static void hw_add_addr_in_hash(ucc_geth_private_t *ugeth,
534 enet_addr_t *p_enet_addr)
535{
536 ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
537 u32 cecr_subblock;
538
539 p_82xx_addr_filt =
540 (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram->
541 addressfiltering;
542
543 cecr_subblock =
544 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
545
546 /* Ethernet frames are defined in Little Endian mode,
547 therefor to insert */
548 /* the address to the hash (Big Endian mode), we reverse the bytes.*/
549 out_be16(&p_82xx_addr_filt->taddr.h,
550 (u16) (((u16) (((u16) ((*p_enet_addr)[5])) << 8)) |
551 (u16) (*p_enet_addr)[4]));
552 out_be16(&p_82xx_addr_filt->taddr.m,
553 (u16) (((u16) (((u16) ((*p_enet_addr)[3])) << 8)) |
554 (u16) (*p_enet_addr)[2]));
555 out_be16(&p_82xx_addr_filt->taddr.l,
556 (u16) (((u16) (((u16) ((*p_enet_addr)[1])) << 8)) |
557 (u16) (*p_enet_addr)[0]));
558
559 qe_issue_cmd(QE_SET_GROUP_ADDRESS, cecr_subblock,
560 (u8) QE_CR_PROTOCOL_ETHERNET, 0);
561}
562
563#ifdef CONFIG_UGETH_MAGIC_PACKET
564static void magic_packet_detection_enable(ucc_geth_private_t *ugeth)
565{
566 ucc_fast_private_t *uccf;
567 ucc_geth_t *ug_regs;
568 u32 maccfg2, uccm;
569
570 uccf = ugeth->uccf;
571 ug_regs = ugeth->ug_regs;
572
573 /* Enable interrupts for magic packet detection */
574 uccm = in_be32(uccf->p_uccm);
575 uccm |= UCCE_MPD;
576 out_be32(uccf->p_uccm, uccm);
577
578 /* Enable magic packet detection */
579 maccfg2 = in_be32(&ug_regs->maccfg2);
580 maccfg2 |= MACCFG2_MPE;
581 out_be32(&ug_regs->maccfg2, maccfg2);
582}
583
584static void magic_packet_detection_disable(ucc_geth_private_t *ugeth)
585{
586 ucc_fast_private_t *uccf;
587 ucc_geth_t *ug_regs;
588 u32 maccfg2, uccm;
589
590 uccf = ugeth->uccf;
591 ug_regs = ugeth->ug_regs;
592
593 /* Disable interrupts for magic packet detection */
594 uccm = in_be32(uccf->p_uccm);
595 uccm &= ~UCCE_MPD;
596 out_be32(uccf->p_uccm, uccm);
597
598 /* Disable magic packet detection */
599 maccfg2 = in_be32(&ug_regs->maccfg2);
600 maccfg2 &= ~MACCFG2_MPE;
601 out_be32(&ug_regs->maccfg2, maccfg2);
602}
603#endif /* MAGIC_PACKET */
604
605static inline int compare_addr(enet_addr_t *addr1, enet_addr_t *addr2)
606{
607 return memcmp(addr1, addr2, ENET_NUM_OCTETS_PER_ADDRESS);
608}
609
610#ifdef DEBUG
611static void get_statistics(ucc_geth_private_t *ugeth,
612 ucc_geth_tx_firmware_statistics_t *
613 tx_firmware_statistics,
614 ucc_geth_rx_firmware_statistics_t *
615 rx_firmware_statistics,
616 ucc_geth_hardware_statistics_t *hardware_statistics)
617{
618 ucc_fast_t *uf_regs;
619 ucc_geth_t *ug_regs;
620 ucc_geth_tx_firmware_statistics_pram_t *p_tx_fw_statistics_pram;
621 ucc_geth_rx_firmware_statistics_pram_t *p_rx_fw_statistics_pram;
622
623 ug_regs = ugeth->ug_regs;
624 uf_regs = (ucc_fast_t *) ug_regs;
625 p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram;
626 p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram;
627
628 /* Tx firmware only if user handed pointer and driver actually
629 gathers Tx firmware statistics */
630 if (tx_firmware_statistics && p_tx_fw_statistics_pram) {
631 tx_firmware_statistics->sicoltx =
632 in_be32(&p_tx_fw_statistics_pram->sicoltx);
633 tx_firmware_statistics->mulcoltx =
634 in_be32(&p_tx_fw_statistics_pram->mulcoltx);
635 tx_firmware_statistics->latecoltxfr =
636 in_be32(&p_tx_fw_statistics_pram->latecoltxfr);
637 tx_firmware_statistics->frabortduecol =
638 in_be32(&p_tx_fw_statistics_pram->frabortduecol);
639 tx_firmware_statistics->frlostinmactxer =
640 in_be32(&p_tx_fw_statistics_pram->frlostinmactxer);
641 tx_firmware_statistics->carriersenseertx =
642 in_be32(&p_tx_fw_statistics_pram->carriersenseertx);
643 tx_firmware_statistics->frtxok =
644 in_be32(&p_tx_fw_statistics_pram->frtxok);
645 tx_firmware_statistics->txfrexcessivedefer =
646 in_be32(&p_tx_fw_statistics_pram->txfrexcessivedefer);
647 tx_firmware_statistics->txpkts256 =
648 in_be32(&p_tx_fw_statistics_pram->txpkts256);
649 tx_firmware_statistics->txpkts512 =
650 in_be32(&p_tx_fw_statistics_pram->txpkts512);
651 tx_firmware_statistics->txpkts1024 =
652 in_be32(&p_tx_fw_statistics_pram->txpkts1024);
653 tx_firmware_statistics->txpktsjumbo =
654 in_be32(&p_tx_fw_statistics_pram->txpktsjumbo);
655 }
656
657 /* Rx firmware only if user handed pointer and driver actually
658 * gathers Rx firmware statistics */
659 if (rx_firmware_statistics && p_rx_fw_statistics_pram) {
660 int i;
661 rx_firmware_statistics->frrxfcser =
662 in_be32(&p_rx_fw_statistics_pram->frrxfcser);
663 rx_firmware_statistics->fraligner =
664 in_be32(&p_rx_fw_statistics_pram->fraligner);
665 rx_firmware_statistics->inrangelenrxer =
666 in_be32(&p_rx_fw_statistics_pram->inrangelenrxer);
667 rx_firmware_statistics->outrangelenrxer =
668 in_be32(&p_rx_fw_statistics_pram->outrangelenrxer);
669 rx_firmware_statistics->frtoolong =
670 in_be32(&p_rx_fw_statistics_pram->frtoolong);
671 rx_firmware_statistics->runt =
672 in_be32(&p_rx_fw_statistics_pram->runt);
673 rx_firmware_statistics->verylongevent =
674 in_be32(&p_rx_fw_statistics_pram->verylongevent);
675 rx_firmware_statistics->symbolerror =
676 in_be32(&p_rx_fw_statistics_pram->symbolerror);
677 rx_firmware_statistics->dropbsy =
678 in_be32(&p_rx_fw_statistics_pram->dropbsy);
679 for (i = 0; i < 0x8; i++)
680 rx_firmware_statistics->res0[i] =
681 p_rx_fw_statistics_pram->res0[i];
682 rx_firmware_statistics->mismatchdrop =
683 in_be32(&p_rx_fw_statistics_pram->mismatchdrop);
684 rx_firmware_statistics->underpkts =
685 in_be32(&p_rx_fw_statistics_pram->underpkts);
686 rx_firmware_statistics->pkts256 =
687 in_be32(&p_rx_fw_statistics_pram->pkts256);
688 rx_firmware_statistics->pkts512 =
689 in_be32(&p_rx_fw_statistics_pram->pkts512);
690 rx_firmware_statistics->pkts1024 =
691 in_be32(&p_rx_fw_statistics_pram->pkts1024);
692 rx_firmware_statistics->pktsjumbo =
693 in_be32(&p_rx_fw_statistics_pram->pktsjumbo);
694 rx_firmware_statistics->frlossinmacer =
695 in_be32(&p_rx_fw_statistics_pram->frlossinmacer);
696 rx_firmware_statistics->pausefr =
697 in_be32(&p_rx_fw_statistics_pram->pausefr);
698 for (i = 0; i < 0x4; i++)
699 rx_firmware_statistics->res1[i] =
700 p_rx_fw_statistics_pram->res1[i];
701 rx_firmware_statistics->removevlan =
702 in_be32(&p_rx_fw_statistics_pram->removevlan);
703 rx_firmware_statistics->replacevlan =
704 in_be32(&p_rx_fw_statistics_pram->replacevlan);
705 rx_firmware_statistics->insertvlan =
706 in_be32(&p_rx_fw_statistics_pram->insertvlan);
707 }
708
709 /* Hardware only if user handed pointer and driver actually
710 gathers hardware statistics */
711 if (hardware_statistics && (in_be32(&uf_regs->upsmr) & UPSMR_HSE)) {
712 hardware_statistics->tx64 = in_be32(&ug_regs->tx64);
713 hardware_statistics->tx127 = in_be32(&ug_regs->tx127);
714 hardware_statistics->tx255 = in_be32(&ug_regs->tx255);
715 hardware_statistics->rx64 = in_be32(&ug_regs->rx64);
716 hardware_statistics->rx127 = in_be32(&ug_regs->rx127);
717 hardware_statistics->rx255 = in_be32(&ug_regs->rx255);
718 hardware_statistics->txok = in_be32(&ug_regs->txok);
719 hardware_statistics->txcf = in_be16(&ug_regs->txcf);
720 hardware_statistics->tmca = in_be32(&ug_regs->tmca);
721 hardware_statistics->tbca = in_be32(&ug_regs->tbca);
722 hardware_statistics->rxfok = in_be32(&ug_regs->rxfok);
723 hardware_statistics->rxbok = in_be32(&ug_regs->rxbok);
724 hardware_statistics->rbyt = in_be32(&ug_regs->rbyt);
725 hardware_statistics->rmca = in_be32(&ug_regs->rmca);
726 hardware_statistics->rbca = in_be32(&ug_regs->rbca);
727 }
728}
729
730static void dump_bds(ucc_geth_private_t *ugeth)
731{
732 int i;
733 int length;
734
735 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
736 if (ugeth->p_tx_bd_ring[i]) {
737 length =
738 (ugeth->ug_info->bdRingLenTx[i] *
739 UCC_GETH_SIZE_OF_BD);
740 ugeth_info("TX BDs[%d]", i);
741 mem_disp(ugeth->p_tx_bd_ring[i], length);
742 }
743 }
744 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
745 if (ugeth->p_rx_bd_ring[i]) {
746 length =
747 (ugeth->ug_info->bdRingLenRx[i] *
748 UCC_GETH_SIZE_OF_BD);
749 ugeth_info("RX BDs[%d]", i);
750 mem_disp(ugeth->p_rx_bd_ring[i], length);
751 }
752 }
753}
754
755static void dump_regs(ucc_geth_private_t *ugeth)
756{
757 int i;
758
759 ugeth_info("UCC%d Geth registers:", ugeth->ug_info->uf_info.ucc_num);
760 ugeth_info("Base address: 0x%08x", (u32) ugeth->ug_regs);
761
762 ugeth_info("maccfg1 : addr - 0x%08x, val - 0x%08x",
763 (u32) & ugeth->ug_regs->maccfg1,
764 in_be32(&ugeth->ug_regs->maccfg1));
765 ugeth_info("maccfg2 : addr - 0x%08x, val - 0x%08x",
766 (u32) & ugeth->ug_regs->maccfg2,
767 in_be32(&ugeth->ug_regs->maccfg2));
768 ugeth_info("ipgifg : addr - 0x%08x, val - 0x%08x",
769 (u32) & ugeth->ug_regs->ipgifg,
770 in_be32(&ugeth->ug_regs->ipgifg));
771 ugeth_info("hafdup : addr - 0x%08x, val - 0x%08x",
772 (u32) & ugeth->ug_regs->hafdup,
773 in_be32(&ugeth->ug_regs->hafdup));
774 ugeth_info("miimcfg : addr - 0x%08x, val - 0x%08x",
775 (u32) & ugeth->ug_regs->miimng.miimcfg,
776 in_be32(&ugeth->ug_regs->miimng.miimcfg));
777 ugeth_info("miimcom : addr - 0x%08x, val - 0x%08x",
778 (u32) & ugeth->ug_regs->miimng.miimcom,
779 in_be32(&ugeth->ug_regs->miimng.miimcom));
780 ugeth_info("miimadd : addr - 0x%08x, val - 0x%08x",
781 (u32) & ugeth->ug_regs->miimng.miimadd,
782 in_be32(&ugeth->ug_regs->miimng.miimadd));
783 ugeth_info("miimcon : addr - 0x%08x, val - 0x%08x",
784 (u32) & ugeth->ug_regs->miimng.miimcon,
785 in_be32(&ugeth->ug_regs->miimng.miimcon));
786 ugeth_info("miimstat : addr - 0x%08x, val - 0x%08x",
787 (u32) & ugeth->ug_regs->miimng.miimstat,
788 in_be32(&ugeth->ug_regs->miimng.miimstat));
789 ugeth_info("miimmind : addr - 0x%08x, val - 0x%08x",
790 (u32) & ugeth->ug_regs->miimng.miimind,
791 in_be32(&ugeth->ug_regs->miimng.miimind));
792 ugeth_info("ifctl : addr - 0x%08x, val - 0x%08x",
793 (u32) & ugeth->ug_regs->ifctl,
794 in_be32(&ugeth->ug_regs->ifctl));
795 ugeth_info("ifstat : addr - 0x%08x, val - 0x%08x",
796 (u32) & ugeth->ug_regs->ifstat,
797 in_be32(&ugeth->ug_regs->ifstat));
798 ugeth_info("macstnaddr1: addr - 0x%08x, val - 0x%08x",
799 (u32) & ugeth->ug_regs->macstnaddr1,
800 in_be32(&ugeth->ug_regs->macstnaddr1));
801 ugeth_info("macstnaddr2: addr - 0x%08x, val - 0x%08x",
802 (u32) & ugeth->ug_regs->macstnaddr2,
803 in_be32(&ugeth->ug_regs->macstnaddr2));
804 ugeth_info("uempr : addr - 0x%08x, val - 0x%08x",
805 (u32) & ugeth->ug_regs->uempr,
806 in_be32(&ugeth->ug_regs->uempr));
807 ugeth_info("utbipar : addr - 0x%08x, val - 0x%08x",
808 (u32) & ugeth->ug_regs->utbipar,
809 in_be32(&ugeth->ug_regs->utbipar));
810 ugeth_info("uescr : addr - 0x%08x, val - 0x%04x",
811 (u32) & ugeth->ug_regs->uescr,
812 in_be16(&ugeth->ug_regs->uescr));
813 ugeth_info("tx64 : addr - 0x%08x, val - 0x%08x",
814 (u32) & ugeth->ug_regs->tx64,
815 in_be32(&ugeth->ug_regs->tx64));
816 ugeth_info("tx127 : addr - 0x%08x, val - 0x%08x",
817 (u32) & ugeth->ug_regs->tx127,
818 in_be32(&ugeth->ug_regs->tx127));
819 ugeth_info("tx255 : addr - 0x%08x, val - 0x%08x",
820 (u32) & ugeth->ug_regs->tx255,
821 in_be32(&ugeth->ug_regs->tx255));
822 ugeth_info("rx64 : addr - 0x%08x, val - 0x%08x",
823 (u32) & ugeth->ug_regs->rx64,
824 in_be32(&ugeth->ug_regs->rx64));
825 ugeth_info("rx127 : addr - 0x%08x, val - 0x%08x",
826 (u32) & ugeth->ug_regs->rx127,
827 in_be32(&ugeth->ug_regs->rx127));
828 ugeth_info("rx255 : addr - 0x%08x, val - 0x%08x",
829 (u32) & ugeth->ug_regs->rx255,
830 in_be32(&ugeth->ug_regs->rx255));
831 ugeth_info("txok : addr - 0x%08x, val - 0x%08x",
832 (u32) & ugeth->ug_regs->txok,
833 in_be32(&ugeth->ug_regs->txok));
834 ugeth_info("txcf : addr - 0x%08x, val - 0x%04x",
835 (u32) & ugeth->ug_regs->txcf,
836 in_be16(&ugeth->ug_regs->txcf));
837 ugeth_info("tmca : addr - 0x%08x, val - 0x%08x",
838 (u32) & ugeth->ug_regs->tmca,
839 in_be32(&ugeth->ug_regs->tmca));
840 ugeth_info("tbca : addr - 0x%08x, val - 0x%08x",
841 (u32) & ugeth->ug_regs->tbca,
842 in_be32(&ugeth->ug_regs->tbca));
843 ugeth_info("rxfok : addr - 0x%08x, val - 0x%08x",
844 (u32) & ugeth->ug_regs->rxfok,
845 in_be32(&ugeth->ug_regs->rxfok));
846 ugeth_info("rxbok : addr - 0x%08x, val - 0x%08x",
847 (u32) & ugeth->ug_regs->rxbok,
848 in_be32(&ugeth->ug_regs->rxbok));
849 ugeth_info("rbyt : addr - 0x%08x, val - 0x%08x",
850 (u32) & ugeth->ug_regs->rbyt,
851 in_be32(&ugeth->ug_regs->rbyt));
852 ugeth_info("rmca : addr - 0x%08x, val - 0x%08x",
853 (u32) & ugeth->ug_regs->rmca,
854 in_be32(&ugeth->ug_regs->rmca));
855 ugeth_info("rbca : addr - 0x%08x, val - 0x%08x",
856 (u32) & ugeth->ug_regs->rbca,
857 in_be32(&ugeth->ug_regs->rbca));
858 ugeth_info("scar : addr - 0x%08x, val - 0x%08x",
859 (u32) & ugeth->ug_regs->scar,
860 in_be32(&ugeth->ug_regs->scar));
861 ugeth_info("scam : addr - 0x%08x, val - 0x%08x",
862 (u32) & ugeth->ug_regs->scam,
863 in_be32(&ugeth->ug_regs->scam));
864
865 if (ugeth->p_thread_data_tx) {
866 int numThreadsTxNumerical;
867 switch (ugeth->ug_info->numThreadsTx) {
868 case UCC_GETH_NUM_OF_THREADS_1:
869 numThreadsTxNumerical = 1;
870 break;
871 case UCC_GETH_NUM_OF_THREADS_2:
872 numThreadsTxNumerical = 2;
873 break;
874 case UCC_GETH_NUM_OF_THREADS_4:
875 numThreadsTxNumerical = 4;
876 break;
877 case UCC_GETH_NUM_OF_THREADS_6:
878 numThreadsTxNumerical = 6;
879 break;
880 case UCC_GETH_NUM_OF_THREADS_8:
881 numThreadsTxNumerical = 8;
882 break;
883 default:
884 numThreadsTxNumerical = 0;
885 break;
886 }
887
888 ugeth_info("Thread data TXs:");
889 ugeth_info("Base address: 0x%08x",
890 (u32) ugeth->p_thread_data_tx);
891 for (i = 0; i < numThreadsTxNumerical; i++) {
892 ugeth_info("Thread data TX[%d]:", i);
893 ugeth_info("Base address: 0x%08x",
894 (u32) & ugeth->p_thread_data_tx[i]);
895 mem_disp((u8 *) & ugeth->p_thread_data_tx[i],
896 sizeof(ucc_geth_thread_data_tx_t));
897 }
898 }
899 if (ugeth->p_thread_data_rx) {
900 int numThreadsRxNumerical;
901 switch (ugeth->ug_info->numThreadsRx) {
902 case UCC_GETH_NUM_OF_THREADS_1:
903 numThreadsRxNumerical = 1;
904 break;
905 case UCC_GETH_NUM_OF_THREADS_2:
906 numThreadsRxNumerical = 2;
907 break;
908 case UCC_GETH_NUM_OF_THREADS_4:
909 numThreadsRxNumerical = 4;
910 break;
911 case UCC_GETH_NUM_OF_THREADS_6:
912 numThreadsRxNumerical = 6;
913 break;
914 case UCC_GETH_NUM_OF_THREADS_8:
915 numThreadsRxNumerical = 8;
916 break;
917 default:
918 numThreadsRxNumerical = 0;
919 break;
920 }
921
922 ugeth_info("Thread data RX:");
923 ugeth_info("Base address: 0x%08x",
924 (u32) ugeth->p_thread_data_rx);
925 for (i = 0; i < numThreadsRxNumerical; i++) {
926 ugeth_info("Thread data RX[%d]:", i);
927 ugeth_info("Base address: 0x%08x",
928 (u32) & ugeth->p_thread_data_rx[i]);
929 mem_disp((u8 *) & ugeth->p_thread_data_rx[i],
930 sizeof(ucc_geth_thread_data_rx_t));
931 }
932 }
933 if (ugeth->p_exf_glbl_param) {
934 ugeth_info("EXF global param:");
935 ugeth_info("Base address: 0x%08x",
936 (u32) ugeth->p_exf_glbl_param);
937 mem_disp((u8 *) ugeth->p_exf_glbl_param,
938 sizeof(*ugeth->p_exf_glbl_param));
939 }
940 if (ugeth->p_tx_glbl_pram) {
941 ugeth_info("TX global param:");
942 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_tx_glbl_pram);
943 ugeth_info("temoder : addr - 0x%08x, val - 0x%04x",
944 (u32) & ugeth->p_tx_glbl_pram->temoder,
945 in_be16(&ugeth->p_tx_glbl_pram->temoder));
946 ugeth_info("sqptr : addr - 0x%08x, val - 0x%08x",
947 (u32) & ugeth->p_tx_glbl_pram->sqptr,
948 in_be32(&ugeth->p_tx_glbl_pram->sqptr));
949 ugeth_info("schedulerbasepointer: addr - 0x%08x, val - 0x%08x",
950 (u32) & ugeth->p_tx_glbl_pram->schedulerbasepointer,
951 in_be32(&ugeth->p_tx_glbl_pram->
952 schedulerbasepointer));
953 ugeth_info("txrmonbaseptr: addr - 0x%08x, val - 0x%08x",
954 (u32) & ugeth->p_tx_glbl_pram->txrmonbaseptr,
955 in_be32(&ugeth->p_tx_glbl_pram->txrmonbaseptr));
956 ugeth_info("tstate : addr - 0x%08x, val - 0x%08x",
957 (u32) & ugeth->p_tx_glbl_pram->tstate,
958 in_be32(&ugeth->p_tx_glbl_pram->tstate));
959 ugeth_info("iphoffset[0] : addr - 0x%08x, val - 0x%02x",
960 (u32) & ugeth->p_tx_glbl_pram->iphoffset[0],
961 ugeth->p_tx_glbl_pram->iphoffset[0]);
962 ugeth_info("iphoffset[1] : addr - 0x%08x, val - 0x%02x",
963 (u32) & ugeth->p_tx_glbl_pram->iphoffset[1],
964 ugeth->p_tx_glbl_pram->iphoffset[1]);
965 ugeth_info("iphoffset[2] : addr - 0x%08x, val - 0x%02x",
966 (u32) & ugeth->p_tx_glbl_pram->iphoffset[2],
967 ugeth->p_tx_glbl_pram->iphoffset[2]);
968 ugeth_info("iphoffset[3] : addr - 0x%08x, val - 0x%02x",
969 (u32) & ugeth->p_tx_glbl_pram->iphoffset[3],
970 ugeth->p_tx_glbl_pram->iphoffset[3]);
971 ugeth_info("iphoffset[4] : addr - 0x%08x, val - 0x%02x",
972 (u32) & ugeth->p_tx_glbl_pram->iphoffset[4],
973 ugeth->p_tx_glbl_pram->iphoffset[4]);
974 ugeth_info("iphoffset[5] : addr - 0x%08x, val - 0x%02x",
975 (u32) & ugeth->p_tx_glbl_pram->iphoffset[5],
976 ugeth->p_tx_glbl_pram->iphoffset[5]);
977 ugeth_info("iphoffset[6] : addr - 0x%08x, val - 0x%02x",
978 (u32) & ugeth->p_tx_glbl_pram->iphoffset[6],
979 ugeth->p_tx_glbl_pram->iphoffset[6]);
980 ugeth_info("iphoffset[7] : addr - 0x%08x, val - 0x%02x",
981 (u32) & ugeth->p_tx_glbl_pram->iphoffset[7],
982 ugeth->p_tx_glbl_pram->iphoffset[7]);
983 ugeth_info("vtagtable[0] : addr - 0x%08x, val - 0x%08x",
984 (u32) & ugeth->p_tx_glbl_pram->vtagtable[0],
985 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[0]));
986 ugeth_info("vtagtable[1] : addr - 0x%08x, val - 0x%08x",
987 (u32) & ugeth->p_tx_glbl_pram->vtagtable[1],
988 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[1]));
989 ugeth_info("vtagtable[2] : addr - 0x%08x, val - 0x%08x",
990 (u32) & ugeth->p_tx_glbl_pram->vtagtable[2],
991 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[2]));
992 ugeth_info("vtagtable[3] : addr - 0x%08x, val - 0x%08x",
993 (u32) & ugeth->p_tx_glbl_pram->vtagtable[3],
994 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[3]));
995 ugeth_info("vtagtable[4] : addr - 0x%08x, val - 0x%08x",
996 (u32) & ugeth->p_tx_glbl_pram->vtagtable[4],
997 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[4]));
998 ugeth_info("vtagtable[5] : addr - 0x%08x, val - 0x%08x",
999 (u32) & ugeth->p_tx_glbl_pram->vtagtable[5],
1000 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[5]));
1001 ugeth_info("vtagtable[6] : addr - 0x%08x, val - 0x%08x",
1002 (u32) & ugeth->p_tx_glbl_pram->vtagtable[6],
1003 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6]));
1004 ugeth_info("vtagtable[7] : addr - 0x%08x, val - 0x%08x",
1005 (u32) & ugeth->p_tx_glbl_pram->vtagtable[7],
1006 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[7]));
1007 ugeth_info("tqptr : addr - 0x%08x, val - 0x%08x",
1008 (u32) & ugeth->p_tx_glbl_pram->tqptr,
1009 in_be32(&ugeth->p_tx_glbl_pram->tqptr));
1010 }
1011 if (ugeth->p_rx_glbl_pram) {
1012 ugeth_info("RX global param:");
1013 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_glbl_pram);
1014 ugeth_info("remoder : addr - 0x%08x, val - 0x%08x",
1015 (u32) & ugeth->p_rx_glbl_pram->remoder,
1016 in_be32(&ugeth->p_rx_glbl_pram->remoder));
1017 ugeth_info("rqptr : addr - 0x%08x, val - 0x%08x",
1018 (u32) & ugeth->p_rx_glbl_pram->rqptr,
1019 in_be32(&ugeth->p_rx_glbl_pram->rqptr));
1020 ugeth_info("typeorlen : addr - 0x%08x, val - 0x%04x",
1021 (u32) & ugeth->p_rx_glbl_pram->typeorlen,
1022 in_be16(&ugeth->p_rx_glbl_pram->typeorlen));
1023 ugeth_info("rxgstpack : addr - 0x%08x, val - 0x%02x",
1024 (u32) & ugeth->p_rx_glbl_pram->rxgstpack,
1025 ugeth->p_rx_glbl_pram->rxgstpack);
1026 ugeth_info("rxrmonbaseptr : addr - 0x%08x, val - 0x%08x",
1027 (u32) & ugeth->p_rx_glbl_pram->rxrmonbaseptr,
1028 in_be32(&ugeth->p_rx_glbl_pram->rxrmonbaseptr));
1029 ugeth_info("intcoalescingptr: addr - 0x%08x, val - 0x%08x",
1030 (u32) & ugeth->p_rx_glbl_pram->intcoalescingptr,
1031 in_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr));
1032 ugeth_info("rstate : addr - 0x%08x, val - 0x%02x",
1033 (u32) & ugeth->p_rx_glbl_pram->rstate,
1034 ugeth->p_rx_glbl_pram->rstate);
1035 ugeth_info("mrblr : addr - 0x%08x, val - 0x%04x",
1036 (u32) & ugeth->p_rx_glbl_pram->mrblr,
1037 in_be16(&ugeth->p_rx_glbl_pram->mrblr));
1038 ugeth_info("rbdqptr : addr - 0x%08x, val - 0x%08x",
1039 (u32) & ugeth->p_rx_glbl_pram->rbdqptr,
1040 in_be32(&ugeth->p_rx_glbl_pram->rbdqptr));
1041 ugeth_info("mflr : addr - 0x%08x, val - 0x%04x",
1042 (u32) & ugeth->p_rx_glbl_pram->mflr,
1043 in_be16(&ugeth->p_rx_glbl_pram->mflr));
1044 ugeth_info("minflr : addr - 0x%08x, val - 0x%04x",
1045 (u32) & ugeth->p_rx_glbl_pram->minflr,
1046 in_be16(&ugeth->p_rx_glbl_pram->minflr));
1047 ugeth_info("maxd1 : addr - 0x%08x, val - 0x%04x",
1048 (u32) & ugeth->p_rx_glbl_pram->maxd1,
1049 in_be16(&ugeth->p_rx_glbl_pram->maxd1));
1050 ugeth_info("maxd2 : addr - 0x%08x, val - 0x%04x",
1051 (u32) & ugeth->p_rx_glbl_pram->maxd2,
1052 in_be16(&ugeth->p_rx_glbl_pram->maxd2));
1053 ugeth_info("ecamptr : addr - 0x%08x, val - 0x%08x",
1054 (u32) & ugeth->p_rx_glbl_pram->ecamptr,
1055 in_be32(&ugeth->p_rx_glbl_pram->ecamptr));
1056 ugeth_info("l2qt : addr - 0x%08x, val - 0x%08x",
1057 (u32) & ugeth->p_rx_glbl_pram->l2qt,
1058 in_be32(&ugeth->p_rx_glbl_pram->l2qt));
1059 ugeth_info("l3qt[0] : addr - 0x%08x, val - 0x%08x",
1060 (u32) & ugeth->p_rx_glbl_pram->l3qt[0],
1061 in_be32(&ugeth->p_rx_glbl_pram->l3qt[0]));
1062 ugeth_info("l3qt[1] : addr - 0x%08x, val - 0x%08x",
1063 (u32) & ugeth->p_rx_glbl_pram->l3qt[1],
1064 in_be32(&ugeth->p_rx_glbl_pram->l3qt[1]));
1065 ugeth_info("l3qt[2] : addr - 0x%08x, val - 0x%08x",
1066 (u32) & ugeth->p_rx_glbl_pram->l3qt[2],
1067 in_be32(&ugeth->p_rx_glbl_pram->l3qt[2]));
1068 ugeth_info("l3qt[3] : addr - 0x%08x, val - 0x%08x",
1069 (u32) & ugeth->p_rx_glbl_pram->l3qt[3],
1070 in_be32(&ugeth->p_rx_glbl_pram->l3qt[3]));
1071 ugeth_info("l3qt[4] : addr - 0x%08x, val - 0x%08x",
1072 (u32) & ugeth->p_rx_glbl_pram->l3qt[4],
1073 in_be32(&ugeth->p_rx_glbl_pram->l3qt[4]));
1074 ugeth_info("l3qt[5] : addr - 0x%08x, val - 0x%08x",
1075 (u32) & ugeth->p_rx_glbl_pram->l3qt[5],
1076 in_be32(&ugeth->p_rx_glbl_pram->l3qt[5]));
1077 ugeth_info("l3qt[6] : addr - 0x%08x, val - 0x%08x",
1078 (u32) & ugeth->p_rx_glbl_pram->l3qt[6],
1079 in_be32(&ugeth->p_rx_glbl_pram->l3qt[6]));
1080 ugeth_info("l3qt[7] : addr - 0x%08x, val - 0x%08x",
1081 (u32) & ugeth->p_rx_glbl_pram->l3qt[7],
1082 in_be32(&ugeth->p_rx_glbl_pram->l3qt[7]));
1083 ugeth_info("vlantype : addr - 0x%08x, val - 0x%04x",
1084 (u32) & ugeth->p_rx_glbl_pram->vlantype,
1085 in_be16(&ugeth->p_rx_glbl_pram->vlantype));
1086 ugeth_info("vlantci : addr - 0x%08x, val - 0x%04x",
1087 (u32) & ugeth->p_rx_glbl_pram->vlantci,
1088 in_be16(&ugeth->p_rx_glbl_pram->vlantci));
1089 for (i = 0; i < 64; i++)
1090 ugeth_info
1091 ("addressfiltering[%d]: addr - 0x%08x, val - 0x%02x",
1092 i,
1093 (u32) & ugeth->p_rx_glbl_pram->addressfiltering[i],
1094 ugeth->p_rx_glbl_pram->addressfiltering[i]);
1095 ugeth_info("exfGlobalParam : addr - 0x%08x, val - 0x%08x",
1096 (u32) & ugeth->p_rx_glbl_pram->exfGlobalParam,
1097 in_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam));
1098 }
1099 if (ugeth->p_send_q_mem_reg) {
1100 ugeth_info("Send Q memory registers:");
1101 ugeth_info("Base address: 0x%08x",
1102 (u32) ugeth->p_send_q_mem_reg);
1103 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
1104 ugeth_info("SQQD[%d]:", i);
1105 ugeth_info("Base address: 0x%08x",
1106 (u32) & ugeth->p_send_q_mem_reg->sqqd[i]);
1107 mem_disp((u8 *) & ugeth->p_send_q_mem_reg->sqqd[i],
1108 sizeof(ucc_geth_send_queue_qd_t));
1109 }
1110 }
1111 if (ugeth->p_scheduler) {
1112 ugeth_info("Scheduler:");
1113 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_scheduler);
1114 mem_disp((u8 *) ugeth->p_scheduler,
1115 sizeof(*ugeth->p_scheduler));
1116 }
1117 if (ugeth->p_tx_fw_statistics_pram) {
1118 ugeth_info("TX FW statistics pram:");
1119 ugeth_info("Base address: 0x%08x",
1120 (u32) ugeth->p_tx_fw_statistics_pram);
1121 mem_disp((u8 *) ugeth->p_tx_fw_statistics_pram,
1122 sizeof(*ugeth->p_tx_fw_statistics_pram));
1123 }
1124 if (ugeth->p_rx_fw_statistics_pram) {
1125 ugeth_info("RX FW statistics pram:");
1126 ugeth_info("Base address: 0x%08x",
1127 (u32) ugeth->p_rx_fw_statistics_pram);
1128 mem_disp((u8 *) ugeth->p_rx_fw_statistics_pram,
1129 sizeof(*ugeth->p_rx_fw_statistics_pram));
1130 }
1131 if (ugeth->p_rx_irq_coalescing_tbl) {
1132 ugeth_info("RX IRQ coalescing tables:");
1133 ugeth_info("Base address: 0x%08x",
1134 (u32) ugeth->p_rx_irq_coalescing_tbl);
1135 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
1136 ugeth_info("RX IRQ coalescing table entry[%d]:", i);
1137 ugeth_info("Base address: 0x%08x",
1138 (u32) & ugeth->p_rx_irq_coalescing_tbl->
1139 coalescingentry[i]);
1140 ugeth_info
1141 ("interruptcoalescingmaxvalue: addr - 0x%08x, val - 0x%08x",
1142 (u32) & ugeth->p_rx_irq_coalescing_tbl->
1143 coalescingentry[i].interruptcoalescingmaxvalue,
1144 in_be32(&ugeth->p_rx_irq_coalescing_tbl->
1145 coalescingentry[i].
1146 interruptcoalescingmaxvalue));
1147 ugeth_info
1148 ("interruptcoalescingcounter : addr - 0x%08x, val - 0x%08x",
1149 (u32) & ugeth->p_rx_irq_coalescing_tbl->
1150 coalescingentry[i].interruptcoalescingcounter,
1151 in_be32(&ugeth->p_rx_irq_coalescing_tbl->
1152 coalescingentry[i].
1153 interruptcoalescingcounter));
1154 }
1155 }
1156 if (ugeth->p_rx_bd_qs_tbl) {
1157 ugeth_info("RX BD QS tables:");
1158 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_bd_qs_tbl);
1159 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
1160 ugeth_info("RX BD QS table[%d]:", i);
1161 ugeth_info("Base address: 0x%08x",
1162 (u32) & ugeth->p_rx_bd_qs_tbl[i]);
1163 ugeth_info
1164 ("bdbaseptr : addr - 0x%08x, val - 0x%08x",
1165 (u32) & ugeth->p_rx_bd_qs_tbl[i].bdbaseptr,
1166 in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr));
1167 ugeth_info
1168 ("bdptr : addr - 0x%08x, val - 0x%08x",
1169 (u32) & ugeth->p_rx_bd_qs_tbl[i].bdptr,
1170 in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdptr));
1171 ugeth_info
1172 ("externalbdbaseptr: addr - 0x%08x, val - 0x%08x",
1173 (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
1174 in_be32(&ugeth->p_rx_bd_qs_tbl[i].
1175 externalbdbaseptr));
1176 ugeth_info
1177 ("externalbdptr : addr - 0x%08x, val - 0x%08x",
1178 (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdptr,
1179 in_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdptr));
1180 ugeth_info("ucode RX Prefetched BDs:");
1181 ugeth_info("Base address: 0x%08x",
1182 (u32)
1183 qe_muram_addr(in_be32
1184 (&ugeth->p_rx_bd_qs_tbl[i].
1185 bdbaseptr)));
1186 mem_disp((u8 *)
1187 qe_muram_addr(in_be32
1188 (&ugeth->p_rx_bd_qs_tbl[i].
1189 bdbaseptr)),
1190 sizeof(ucc_geth_rx_prefetched_bds_t));
1191 }
1192 }
1193 if (ugeth->p_init_enet_param_shadow) {
1194 int size;
1195 ugeth_info("Init enet param shadow:");
1196 ugeth_info("Base address: 0x%08x",
1197 (u32) ugeth->p_init_enet_param_shadow);
1198 mem_disp((u8 *) ugeth->p_init_enet_param_shadow,
1199 sizeof(*ugeth->p_init_enet_param_shadow));
1200
1201 size = sizeof(ucc_geth_thread_rx_pram_t);
1202 if (ugeth->ug_info->rxExtendedFiltering) {
1203 size +=
1204 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
1205 if (ugeth->ug_info->largestexternallookupkeysize ==
1206 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
1207 size +=
1208 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
1209 if (ugeth->ug_info->largestexternallookupkeysize ==
1210 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
1211 size +=
1212 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
1213 }
1214
1215 dump_init_enet_entries(ugeth,
1216 &(ugeth->p_init_enet_param_shadow->
1217 txthread[0]),
1218 ENET_INIT_PARAM_MAX_ENTRIES_TX,
1219 sizeof(ucc_geth_thread_tx_pram_t),
1220 ugeth->ug_info->riscTx, 0);
1221 dump_init_enet_entries(ugeth,
1222 &(ugeth->p_init_enet_param_shadow->
1223 rxthread[0]),
1224 ENET_INIT_PARAM_MAX_ENTRIES_RX, size,
1225 ugeth->ug_info->riscRx, 1);
1226 }
1227}
1228#endif /* DEBUG */
1229
1230static void init_default_reg_vals(volatile u32 *upsmr_register,
1231 volatile u32 *maccfg1_register,
1232 volatile u32 *maccfg2_register)
1233{
1234 out_be32(upsmr_register, UCC_GETH_UPSMR_INIT);
1235 out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT);
1236 out_be32(maccfg2_register, UCC_GETH_MACCFG2_INIT);
1237}
1238
1239static int init_half_duplex_params(int alt_beb,
1240 int back_pressure_no_backoff,
1241 int no_backoff,
1242 int excess_defer,
1243 u8 alt_beb_truncation,
1244 u8 max_retransmissions,
1245 u8 collision_window,
1246 volatile u32 *hafdup_register)
1247{
1248 u32 value = 0;
1249
1250 if ((alt_beb_truncation > HALFDUP_ALT_BEB_TRUNCATION_MAX) ||
1251 (max_retransmissions > HALFDUP_MAX_RETRANSMISSION_MAX) ||
1252 (collision_window > HALFDUP_COLLISION_WINDOW_MAX))
1253 return -EINVAL;
1254
1255 value = (u32) (alt_beb_truncation << HALFDUP_ALT_BEB_TRUNCATION_SHIFT);
1256
1257 if (alt_beb)
1258 value |= HALFDUP_ALT_BEB;
1259 if (back_pressure_no_backoff)
1260 value |= HALFDUP_BACK_PRESSURE_NO_BACKOFF;
1261 if (no_backoff)
1262 value |= HALFDUP_NO_BACKOFF;
1263 if (excess_defer)
1264 value |= HALFDUP_EXCESSIVE_DEFER;
1265
1266 value |= (max_retransmissions << HALFDUP_MAX_RETRANSMISSION_SHIFT);
1267
1268 value |= collision_window;
1269
1270 out_be32(hafdup_register, value);
1271 return 0;
1272}
1273
1274static int init_inter_frame_gap_params(u8 non_btb_cs_ipg,
1275 u8 non_btb_ipg,
1276 u8 min_ifg,
1277 u8 btb_ipg,
1278 volatile u32 *ipgifg_register)
1279{
1280 u32 value = 0;
1281
1282 /* Non-Back-to-back IPG part 1 should be <= Non-Back-to-back
1283 IPG part 2 */
1284 if (non_btb_cs_ipg > non_btb_ipg)
1285 return -EINVAL;
1286
1287 if ((non_btb_cs_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX) ||
1288 (non_btb_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX) ||
1289 /*(min_ifg > IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX) || */
1290 (btb_ipg > IPGIFG_BACK_TO_BACK_IFG_MAX))
1291 return -EINVAL;
1292
1293 value |=
1294 ((non_btb_cs_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT) &
1295 IPGIFG_NBTB_CS_IPG_MASK);
1296 value |=
1297 ((non_btb_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT) &
1298 IPGIFG_NBTB_IPG_MASK);
1299 value |=
1300 ((min_ifg << IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT) &
1301 IPGIFG_MIN_IFG_MASK);
1302 value |= (btb_ipg & IPGIFG_BTB_IPG_MASK);
1303
1304 out_be32(ipgifg_register, value);
1305 return 0;
1306}
1307
1308static int init_flow_control_params(u32 automatic_flow_control_mode,
1309 int rx_flow_control_enable,
1310 int tx_flow_control_enable,
1311 u16 pause_period,
1312 u16 extension_field,
1313 volatile u32 *upsmr_register,
1314 volatile u32 *uempr_register,
1315 volatile u32 *maccfg1_register)
1316{
1317 u32 value = 0;
1318
1319 /* Set UEMPR register */
1320 value = (u32) pause_period << UEMPR_PAUSE_TIME_VALUE_SHIFT;
1321 value |= (u32) extension_field << UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT;
1322 out_be32(uempr_register, value);
1323
1324 /* Set UPSMR register */
1325 value = in_be32(upsmr_register);
1326 value |= automatic_flow_control_mode;
1327 out_be32(upsmr_register, value);
1328
1329 value = in_be32(maccfg1_register);
1330 if (rx_flow_control_enable)
1331 value |= MACCFG1_FLOW_RX;
1332 if (tx_flow_control_enable)
1333 value |= MACCFG1_FLOW_TX;
1334 out_be32(maccfg1_register, value);
1335
1336 return 0;
1337}
1338
1339static int init_hw_statistics_gathering_mode(int enable_hardware_statistics,
1340 int auto_zero_hardware_statistics,
1341 volatile u32 *upsmr_register,
1342 volatile u16 *uescr_register)
1343{
1344 u32 upsmr_value = 0;
1345 u16 uescr_value = 0;
1346 /* Enable hardware statistics gathering if requested */
1347 if (enable_hardware_statistics) {
1348 upsmr_value = in_be32(upsmr_register);
1349 upsmr_value |= UPSMR_HSE;
1350 out_be32(upsmr_register, upsmr_value);
1351 }
1352
1353 /* Clear hardware statistics counters */
1354 uescr_value = in_be16(uescr_register);
1355 uescr_value |= UESCR_CLRCNT;
1356 /* Automatically zero hardware statistics counters on read,
1357 if requested */
1358 if (auto_zero_hardware_statistics)
1359 uescr_value |= UESCR_AUTOZ;
1360 out_be16(uescr_register, uescr_value);
1361
1362 return 0;
1363}
1364
1365static int init_firmware_statistics_gathering_mode(int
1366 enable_tx_firmware_statistics,
1367 int enable_rx_firmware_statistics,
1368 volatile u32 *tx_rmon_base_ptr,
1369 u32 tx_firmware_statistics_structure_address,
1370 volatile u32 *rx_rmon_base_ptr,
1371 u32 rx_firmware_statistics_structure_address,
1372 volatile u16 *temoder_register,
1373 volatile u32 *remoder_register)
1374{
1375 /* Note: this function does not check if */
1376 /* the parameters it receives are NULL */
1377 u16 temoder_value;
1378 u32 remoder_value;
1379
1380 if (enable_tx_firmware_statistics) {
1381 out_be32(tx_rmon_base_ptr,
1382 tx_firmware_statistics_structure_address);
1383 temoder_value = in_be16(temoder_register);
1384 temoder_value |= TEMODER_TX_RMON_STATISTICS_ENABLE;
1385 out_be16(temoder_register, temoder_value);
1386 }
1387
1388 if (enable_rx_firmware_statistics) {
1389 out_be32(rx_rmon_base_ptr,
1390 rx_firmware_statistics_structure_address);
1391 remoder_value = in_be32(remoder_register);
1392 remoder_value |= REMODER_RX_RMON_STATISTICS_ENABLE;
1393 out_be32(remoder_register, remoder_value);
1394 }
1395
1396 return 0;
1397}
1398
1399static int init_mac_station_addr_regs(u8 address_byte_0,
1400 u8 address_byte_1,
1401 u8 address_byte_2,
1402 u8 address_byte_3,
1403 u8 address_byte_4,
1404 u8 address_byte_5,
1405 volatile u32 *macstnaddr1_register,
1406 volatile u32 *macstnaddr2_register)
1407{
1408 u32 value = 0;
1409
1410 /* Example: for a station address of 0x12345678ABCD, */
1411 /* 0x12 is byte 0, 0x34 is byte 1 and so on and 0xCD is byte 5 */
1412
1413 /* MACSTNADDR1 Register: */
1414
1415 /* 0 7 8 15 */
1416 /* station address byte 5 station address byte 4 */
1417 /* 16 23 24 31 */
1418 /* station address byte 3 station address byte 2 */
1419 value |= (u32) ((address_byte_2 << 0) & 0x000000FF);
1420 value |= (u32) ((address_byte_3 << 8) & 0x0000FF00);
1421 value |= (u32) ((address_byte_4 << 16) & 0x00FF0000);
1422 value |= (u32) ((address_byte_5 << 24) & 0xFF000000);
1423
1424 out_be32(macstnaddr1_register, value);
1425
1426 /* MACSTNADDR2 Register: */
1427
1428 /* 0 7 8 15 */
1429 /* station address byte 1 station address byte 0 */
1430 /* 16 23 24 31 */
1431 /* reserved reserved */
1432 value = 0;
1433 value |= (u32) ((address_byte_0 << 16) & 0x00FF0000);
1434 value |= (u32) ((address_byte_1 << 24) & 0xFF000000);
1435
1436 out_be32(macstnaddr2_register, value);
1437
1438 return 0;
1439}
1440
1441static int init_mac_duplex_mode(int full_duplex,
1442 int limited_to_full_duplex,
1443 volatile u32 *maccfg2_register)
1444{
1445 u32 value = 0;
1446
1447 /* some interfaces must work in full duplex mode */
1448 if ((full_duplex == 0) && (limited_to_full_duplex == 1))
1449 return -EINVAL;
1450
1451 value = in_be32(maccfg2_register);
1452
1453 if (full_duplex)
1454 value |= MACCFG2_FDX;
1455 else
1456 value &= ~MACCFG2_FDX;
1457
1458 out_be32(maccfg2_register, value);
1459 return 0;
1460}
1461
1462static int init_check_frame_length_mode(int length_check,
1463 volatile u32 *maccfg2_register)
1464{
1465 u32 value = 0;
1466
1467 value = in_be32(maccfg2_register);
1468
1469 if (length_check)
1470 value |= MACCFG2_LC;
1471 else
1472 value &= ~MACCFG2_LC;
1473
1474 out_be32(maccfg2_register, value);
1475 return 0;
1476}
1477
1478static int init_preamble_length(u8 preamble_length,
1479 volatile u32 *maccfg2_register)
1480{
1481 u32 value = 0;
1482
1483 if ((preamble_length < 3) || (preamble_length > 7))
1484 return -EINVAL;
1485
1486 value = in_be32(maccfg2_register);
1487 value &= ~MACCFG2_PREL_MASK;
1488 value |= (preamble_length << MACCFG2_PREL_SHIFT);
1489 out_be32(maccfg2_register, value);
1490 return 0;
1491}
1492
1493static int init_mii_management_configuration(int reset_mgmt,
1494 int preamble_supress,
1495 volatile u32 *miimcfg_register,
1496 volatile u32 *miimind_register)
1497{
1498 unsigned int timeout = PHY_INIT_TIMEOUT;
1499 u32 value = 0;
1500
1501 value = in_be32(miimcfg_register);
1502 if (reset_mgmt) {
1503 value |= MIIMCFG_RESET_MANAGEMENT;
1504 out_be32(miimcfg_register, value);
1505 }
1506
1507 value = 0;
1508
1509 if (preamble_supress)
1510 value |= MIIMCFG_NO_PREAMBLE;
1511
1512 value |= UCC_GETH_MIIMCFG_MNGMNT_CLC_DIV_INIT;
1513 out_be32(miimcfg_register, value);
1514
1515 /* Wait until the bus is free */
1516 while ((in_be32(miimind_register) & MIIMIND_BUSY) && timeout--)
1517 cpu_relax();
1518
1519 if (timeout <= 0) {
1520 ugeth_err("%s: The MII Bus is stuck!", __FUNCTION__);
1521 return -ETIMEDOUT;
1522 }
1523
1524 return 0;
1525}
1526
1527static int init_rx_parameters(int reject_broadcast,
1528 int receive_short_frames,
1529 int promiscuous, volatile u32 *upsmr_register)
1530{
1531 u32 value = 0;
1532
1533 value = in_be32(upsmr_register);
1534
1535 if (reject_broadcast)
1536 value |= UPSMR_BRO;
1537 else
1538 value &= ~UPSMR_BRO;
1539
1540 if (receive_short_frames)
1541 value |= UPSMR_RSH;
1542 else
1543 value &= ~UPSMR_RSH;
1544
1545 if (promiscuous)
1546 value |= UPSMR_PRO;
1547 else
1548 value &= ~UPSMR_PRO;
1549
1550 out_be32(upsmr_register, value);
1551
1552 return 0;
1553}
1554
1555static int init_max_rx_buff_len(u16 max_rx_buf_len,
1556 volatile u16 *mrblr_register)
1557{
1558 /* max_rx_buf_len value must be a multiple of 128 */
1559 if ((max_rx_buf_len == 0)
1560 || (max_rx_buf_len % UCC_GETH_MRBLR_ALIGNMENT))
1561 return -EINVAL;
1562
1563 out_be16(mrblr_register, max_rx_buf_len);
1564 return 0;
1565}
1566
1567static int init_min_frame_len(u16 min_frame_length,
1568 volatile u16 *minflr_register,
1569 volatile u16 *mrblr_register)
1570{
1571 u16 mrblr_value = 0;
1572
1573 mrblr_value = in_be16(mrblr_register);
1574 if (min_frame_length >= (mrblr_value - 4))
1575 return -EINVAL;
1576
1577 out_be16(minflr_register, min_frame_length);
1578 return 0;
1579}
1580
1581static int adjust_enet_interface(ucc_geth_private_t *ugeth)
1582{
1583 ucc_geth_info_t *ug_info;
1584 ucc_geth_t *ug_regs;
1585 ucc_fast_t *uf_regs;
1586 enet_speed_e speed;
1587 int ret_val, rpm = 0, tbi = 0, r10m = 0, rmm =
1588 0, limited_to_full_duplex = 0;
1589 u32 upsmr, maccfg2, utbipar, tbiBaseAddress;
1590 u16 value;
1591
1592 ugeth_vdbg("%s: IN", __FUNCTION__);
1593
1594 ug_info = ugeth->ug_info;
1595 ug_regs = ugeth->ug_regs;
1596 uf_regs = ugeth->uccf->uf_regs;
1597
1598 /* Analyze enet_interface according to Interface Mode Configuration
1599 table */
1600 ret_val =
1601 get_interface_details(ug_info->enet_interface, &speed, &r10m, &rmm,
1602 &rpm, &tbi, &limited_to_full_duplex);
1603 if (ret_val != 0) {
1604 ugeth_err
1605 ("%s: half duplex not supported in requested configuration.",
1606 __FUNCTION__);
1607 return ret_val;
1608 }
1609
1610 /* Set MACCFG2 */
1611 maccfg2 = in_be32(&ug_regs->maccfg2);
1612 maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
1613 if ((speed == ENET_SPEED_10BT) || (speed == ENET_SPEED_100BT))
1614 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
1615 else if (speed == ENET_SPEED_1000BT)
1616 maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
1617 maccfg2 |= ug_info->padAndCrc;
1618 out_be32(&ug_regs->maccfg2, maccfg2);
1619
1620 /* Set UPSMR */
1621 upsmr = in_be32(&uf_regs->upsmr);
1622 upsmr &= ~(UPSMR_RPM | UPSMR_R10M | UPSMR_TBIM | UPSMR_RMM);
1623 if (rpm)
1624 upsmr |= UPSMR_RPM;
1625 if (r10m)
1626 upsmr |= UPSMR_R10M;
1627 if (tbi)
1628 upsmr |= UPSMR_TBIM;
1629 if (rmm)
1630 upsmr |= UPSMR_RMM;
1631 out_be32(&uf_regs->upsmr, upsmr);
1632
1633 /* Set UTBIPAR */
1634 utbipar = in_be32(&ug_regs->utbipar);
1635 utbipar &= ~UTBIPAR_PHY_ADDRESS_MASK;
1636 if (tbi)
1637 utbipar |=
1638 (ug_info->phy_address +
1639 ugeth->ug_info->uf_info.
1640 ucc_num) << UTBIPAR_PHY_ADDRESS_SHIFT;
1641 else
1642 utbipar |=
1643 (0x10 +
1644 ugeth->ug_info->uf_info.
1645 ucc_num) << UTBIPAR_PHY_ADDRESS_SHIFT;
1646 out_be32(&ug_regs->utbipar, utbipar);
1647
1648 /* Disable autonegotiation in tbi mode, because by default it
1649 comes up in autonegotiation mode. */
1650 /* Note that this depends on proper setting in utbipar register. */
1651 if (tbi) {
1652 tbiBaseAddress = in_be32(&ug_regs->utbipar);
1653 tbiBaseAddress &= UTBIPAR_PHY_ADDRESS_MASK;
1654 tbiBaseAddress >>= UTBIPAR_PHY_ADDRESS_SHIFT;
1655 value =
1656 ugeth->mii_info->mdio_read(ugeth->dev, (u8) tbiBaseAddress,
1657 ENET_TBI_MII_CR);
1658 value &= ~0x1000; /* Turn off autonegotiation */
1659 ugeth->mii_info->mdio_write(ugeth->dev, (u8) tbiBaseAddress,
1660 ENET_TBI_MII_CR, value);
1661 }
1662
1663 ret_val = init_mac_duplex_mode(1,
1664 limited_to_full_duplex,
1665 &ug_regs->maccfg2);
1666 if (ret_val != 0) {
1667 ugeth_err
1668 ("%s: half duplex not supported in requested configuration.",
1669 __FUNCTION__);
1670 return ret_val;
1671 }
1672
1673 init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
1674
1675 ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2);
1676 if (ret_val != 0) {
1677 ugeth_err
1678 ("%s: Preamble length must be between 3 and 7 inclusive.",
1679 __FUNCTION__);
1680 return ret_val;
1681 }
1682
1683 return 0;
1684}
1685
1686/* Called every time the controller might need to be made
1687 * aware of new link state. The PHY code conveys this
1688 * information through variables in the ugeth structure, and this
1689 * function converts those variables into the appropriate
1690 * register values, and can bring down the device if needed.
1691 */
1692static void adjust_link(struct net_device *dev)
1693{
1694 ucc_geth_private_t *ugeth = netdev_priv(dev);
1695 ucc_geth_t *ug_regs;
1696 u32 tempval;
1697 struct ugeth_mii_info *mii_info = ugeth->mii_info;
1698
1699 ug_regs = ugeth->ug_regs;
1700
1701 if (mii_info->link) {
1702 /* Now we make sure that we can be in full duplex mode.
1703 * If not, we operate in half-duplex mode. */
1704 if (mii_info->duplex != ugeth->oldduplex) {
1705 if (!(mii_info->duplex)) {
1706 tempval = in_be32(&ug_regs->maccfg2);
1707 tempval &= ~(MACCFG2_FDX);
1708 out_be32(&ug_regs->maccfg2, tempval);
1709
1710 ugeth_info("%s: Half Duplex", dev->name);
1711 } else {
1712 tempval = in_be32(&ug_regs->maccfg2);
1713 tempval |= MACCFG2_FDX;
1714 out_be32(&ug_regs->maccfg2, tempval);
1715
1716 ugeth_info("%s: Full Duplex", dev->name);
1717 }
1718
1719 ugeth->oldduplex = mii_info->duplex;
1720 }
1721
1722 if (mii_info->speed != ugeth->oldspeed) {
1723 switch (mii_info->speed) {
1724 case 1000:
1725#ifdef CONFIG_MPC836x
1726/* FIXME: This code is for 100Mbs BUG fixing,
1727remove this when it is fixed!!! */
1728 if (ugeth->ug_info->enet_interface ==
1729 ENET_1000_GMII)
1730 /* Run the commands which initialize the PHY */
1731 {
1732 tempval =
1733 (u32) mii_info->mdio_read(ugeth->
1734 dev, mii_info->mii_id, 0x1b);
1735 tempval |= 0x000f;
1736 mii_info->mdio_write(ugeth->dev,
1737 mii_info->mii_id, 0x1b,
1738 (u16) tempval);
1739 tempval =
1740 (u32) mii_info->mdio_read(ugeth->
1741 dev, mii_info->mii_id,
1742 MII_BMCR);
1743 mii_info->mdio_write(ugeth->dev,
1744 mii_info->mii_id, MII_BMCR,
1745 (u16) (tempval | BMCR_RESET));
1746 } else if (ugeth->ug_info->enet_interface ==
1747 ENET_1000_RGMII)
1748 /* Run the commands which initialize the PHY */
1749 {
1750 tempval =
1751 (u32) mii_info->mdio_read(ugeth->
1752 dev, mii_info->mii_id, 0x1b);
1753 tempval = (tempval & ~0x000f) | 0x000b;
1754 mii_info->mdio_write(ugeth->dev,
1755 mii_info->mii_id, 0x1b,
1756 (u16) tempval);
1757 tempval =
1758 (u32) mii_info->mdio_read(ugeth->
1759 dev, mii_info->mii_id,
1760 MII_BMCR);
1761 mii_info->mdio_write(ugeth->dev,
1762 mii_info->mii_id, MII_BMCR,
1763 (u16) (tempval | BMCR_RESET));
1764 }
1765 msleep(4000);
1766#endif /* CONFIG_MPC8360 */
1767 adjust_enet_interface(ugeth);
1768 break;
1769 case 100:
1770 case 10:
1771#ifdef CONFIG_MPC836x
1772/* FIXME: This code is for 100Mbs BUG fixing,
1773remove this lines when it will be fixed!!! */
1774 ugeth->ug_info->enet_interface = ENET_100_RGMII;
1775 tempval =
1776 (u32) mii_info->mdio_read(ugeth->dev,
1777 mii_info->mii_id,
1778 0x1b);
1779 tempval = (tempval & ~0x000f) | 0x000b;
1780 mii_info->mdio_write(ugeth->dev,
1781 mii_info->mii_id, 0x1b,
1782 (u16) tempval);
1783 tempval =
1784 (u32) mii_info->mdio_read(ugeth->dev,
1785 mii_info->mii_id,
1786 MII_BMCR);
1787 mii_info->mdio_write(ugeth->dev,
1788 mii_info->mii_id, MII_BMCR,
1789 (u16) (tempval |
1790 BMCR_RESET));
1791 msleep(4000);
1792#endif /* CONFIG_MPC8360 */
1793 adjust_enet_interface(ugeth);
1794 break;
1795 default:
1796 ugeth_warn
1797 ("%s: Ack! Speed (%d) is not 10/100/1000!",
1798 dev->name, mii_info->speed);
1799 break;
1800 }
1801
1802 ugeth_info("%s: Speed %dBT", dev->name,
1803 mii_info->speed);
1804
1805 ugeth->oldspeed = mii_info->speed;
1806 }
1807
1808 if (!ugeth->oldlink) {
1809 ugeth_info("%s: Link is up", dev->name);
1810 ugeth->oldlink = 1;
1811 netif_carrier_on(dev);
1812 netif_schedule(dev);
1813 }
1814 } else {
1815 if (ugeth->oldlink) {
1816 ugeth_info("%s: Link is down", dev->name);
1817 ugeth->oldlink = 0;
1818 ugeth->oldspeed = 0;
1819 ugeth->oldduplex = -1;
1820 netif_carrier_off(dev);
1821 }
1822 }
1823}
1824
1825/* Configure the PHY for dev.
1826 * returns 0 if success. -1 if failure
1827 */
1828static int init_phy(struct net_device *dev)
1829{
1830 ucc_geth_private_t *ugeth = netdev_priv(dev);
1831 struct phy_info *curphy;
1832 ucc_mii_mng_t *mii_regs;
1833 struct ugeth_mii_info *mii_info;
1834 int err;
1835
1836 mii_regs = &ugeth->ug_regs->miimng;
1837
1838 ugeth->oldlink = 0;
1839 ugeth->oldspeed = 0;
1840 ugeth->oldduplex = -1;
1841
1842 mii_info = kmalloc(sizeof(struct ugeth_mii_info), GFP_KERNEL);
1843
1844 if (NULL == mii_info) {
1845 ugeth_err("%s: Could not allocate mii_info", dev->name);
1846 return -ENOMEM;
1847 }
1848
1849 mii_info->mii_regs = mii_regs;
1850 mii_info->speed = SPEED_1000;
1851 mii_info->duplex = DUPLEX_FULL;
1852 mii_info->pause = 0;
1853 mii_info->link = 0;
1854
1855 mii_info->advertising = (ADVERTISED_10baseT_Half |
1856 ADVERTISED_10baseT_Full |
1857 ADVERTISED_100baseT_Half |
1858 ADVERTISED_100baseT_Full |
1859 ADVERTISED_1000baseT_Full);
1860 mii_info->autoneg = 1;
1861
1862 mii_info->mii_id = ugeth->ug_info->phy_address;
1863
1864 mii_info->dev = dev;
1865
1866 mii_info->mdio_read = &read_phy_reg;
1867 mii_info->mdio_write = &write_phy_reg;
1868
1869 ugeth->mii_info = mii_info;
1870
1871 spin_lock_irq(&ugeth->lock);
1872
1873 /* Set this UCC to be the master of the MII managment */
1874 ucc_set_qe_mux_mii_mng(ugeth->ug_info->uf_info.ucc_num);
1875
1876 if (init_mii_management_configuration(1,
1877 ugeth->ug_info->
1878 miiPreambleSupress,
1879 &mii_regs->miimcfg,
1880 &mii_regs->miimind)) {
1881 ugeth_err("%s: The MII Bus is stuck!", dev->name);
1882 err = -1;
1883 goto bus_fail;
1884 }
1885
1886 spin_unlock_irq(&ugeth->lock);
1887
1888 /* get info for this PHY */
1889 curphy = get_phy_info(ugeth->mii_info);
1890
1891 if (curphy == NULL) {
1892 ugeth_err("%s: No PHY found", dev->name);
1893 err = -1;
1894 goto no_phy;
1895 }
1896
1897 mii_info->phyinfo = curphy;
1898
1899 /* Run the commands which initialize the PHY */
1900 if (curphy->init) {
1901 err = curphy->init(ugeth->mii_info);
1902 if (err)
1903 goto phy_init_fail;
1904 }
1905
1906 return 0;
1907
1908 phy_init_fail:
1909 no_phy:
1910 bus_fail:
1911 kfree(mii_info);
1912
1913 return err;
1914}
1915
1916#ifdef CONFIG_UGETH_TX_ON_DEMOND
1917static int ugeth_transmit_on_demand(ucc_geth_private_t *ugeth)
1918{
1919 ucc_fast_transmit_on_demand(ugeth->uccf);
1920
1921 return 0;
1922}
1923#endif
1924
1925static int ugeth_graceful_stop_tx(ucc_geth_private_t *ugeth)
1926{
1927 ucc_fast_private_t *uccf;
1928 u32 cecr_subblock;
1929 u32 temp;
1930
1931 uccf = ugeth->uccf;
1932
1933 /* Mask GRACEFUL STOP TX interrupt bit and clear it */
1934 temp = in_be32(uccf->p_uccm);
1935 temp &= ~UCCE_GRA;
1936 out_be32(uccf->p_uccm, temp);
1937 out_be32(uccf->p_ucce, UCCE_GRA); /* clear by writing 1 */
1938
1939 /* Issue host command */
1940 cecr_subblock =
1941 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
1942 qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
1943 (u8) QE_CR_PROTOCOL_ETHERNET, 0);
1944
1945 /* Wait for command to complete */
1946 do {
1947 temp = in_be32(uccf->p_ucce);
1948 } while (!(temp & UCCE_GRA));
1949
1950 uccf->stopped_tx = 1;
1951
1952 return 0;
1953}
1954
1955static int ugeth_graceful_stop_rx(ucc_geth_private_t * ugeth)
1956{
1957 ucc_fast_private_t *uccf;
1958 u32 cecr_subblock;
1959 u8 temp;
1960
1961 uccf = ugeth->uccf;
1962
1963 /* Clear acknowledge bit */
1964 temp = ugeth->p_rx_glbl_pram->rxgstpack;
1965 temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX;
1966 ugeth->p_rx_glbl_pram->rxgstpack = temp;
1967
1968 /* Keep issuing command and checking acknowledge bit until
1969 it is asserted, according to spec */
1970 do {
1971 /* Issue host command */
1972 cecr_subblock =
1973 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.
1974 ucc_num);
1975 qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock,
1976 (u8) QE_CR_PROTOCOL_ETHERNET, 0);
1977
1978 temp = ugeth->p_rx_glbl_pram->rxgstpack;
1979 } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX));
1980
1981 uccf->stopped_rx = 1;
1982
1983 return 0;
1984}
1985
1986static int ugeth_restart_tx(ucc_geth_private_t *ugeth)
1987{
1988 ucc_fast_private_t *uccf;
1989 u32 cecr_subblock;
1990
1991 uccf = ugeth->uccf;
1992
1993 cecr_subblock =
1994 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
1995 qe_issue_cmd(QE_RESTART_TX, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET,
1996 0);
1997 uccf->stopped_tx = 0;
1998
1999 return 0;
2000}
2001
2002static int ugeth_restart_rx(ucc_geth_private_t *ugeth)
2003{
2004 ucc_fast_private_t *uccf;
2005 u32 cecr_subblock;
2006
2007 uccf = ugeth->uccf;
2008
2009 cecr_subblock =
2010 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
2011 qe_issue_cmd(QE_RESTART_RX, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET,
2012 0);
2013 uccf->stopped_rx = 0;
2014
2015 return 0;
2016}
2017
2018static int ugeth_enable(ucc_geth_private_t *ugeth, comm_dir_e mode)
2019{
2020 ucc_fast_private_t *uccf;
2021 int enabled_tx, enabled_rx;
2022
2023 uccf = ugeth->uccf;
2024
2025 /* check if the UCC number is in range. */
2026 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
2027 ugeth_err("%s: ucc_num out of range.", __FUNCTION__);
2028 return -EINVAL;
2029 }
2030
2031 enabled_tx = uccf->enabled_tx;
2032 enabled_rx = uccf->enabled_rx;
2033
2034 /* Get Tx and Rx going again, in case this channel was actively
2035 disabled. */
2036 if ((mode & COMM_DIR_TX) && (!enabled_tx) && uccf->stopped_tx)
2037 ugeth_restart_tx(ugeth);
2038 if ((mode & COMM_DIR_RX) && (!enabled_rx) && uccf->stopped_rx)
2039 ugeth_restart_rx(ugeth);
2040
2041 ucc_fast_enable(uccf, mode); /* OK to do even if not disabled */
2042
2043 return 0;
2044
2045}
2046
2047static int ugeth_disable(ucc_geth_private_t * ugeth, comm_dir_e mode)
2048{
2049 ucc_fast_private_t *uccf;
2050
2051 uccf = ugeth->uccf;
2052
2053 /* check if the UCC number is in range. */
2054 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
2055 ugeth_err("%s: ucc_num out of range.", __FUNCTION__);
2056 return -EINVAL;
2057 }
2058
2059 /* Stop any transmissions */
2060 if ((mode & COMM_DIR_TX) && uccf->enabled_tx && !uccf->stopped_tx)
2061 ugeth_graceful_stop_tx(ugeth);
2062
2063 /* Stop any receptions */
2064 if ((mode & COMM_DIR_RX) && uccf->enabled_rx && !uccf->stopped_rx)
2065 ugeth_graceful_stop_rx(ugeth);
2066
2067 ucc_fast_disable(ugeth->uccf, mode); /* OK to do even if not enabled */
2068
2069 return 0;
2070}
2071
2072static void ugeth_dump_regs(ucc_geth_private_t *ugeth)
2073{
2074#ifdef DEBUG
2075 ucc_fast_dump_regs(ugeth->uccf);
2076 dump_regs(ugeth);
2077 dump_bds(ugeth);
2078#endif
2079}
2080
2081#ifdef CONFIG_UGETH_FILTERING
2082static int ugeth_ext_filtering_serialize_tad(ucc_geth_tad_params_t *
2083 p_UccGethTadParams,
2084 qe_fltr_tad_t *qe_fltr_tad)
2085{
2086 u16 temp;
2087
2088 /* Zero serialized TAD */
2089 memset(qe_fltr_tad, 0, QE_FLTR_TAD_SIZE);
2090
2091 qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_V; /* Must have this */
2092 if (p_UccGethTadParams->rx_non_dynamic_extended_features_mode ||
2093 (p_UccGethTadParams->vtag_op != UCC_GETH_VLAN_OPERATION_TAGGED_NOP)
2094 || (p_UccGethTadParams->vnontag_op !=
2095 UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP)
2096 )
2097 qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_EF;
2098 if (p_UccGethTadParams->reject_frame)
2099 qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_REJ;
2100 temp =
2101 (u16) (((u16) p_UccGethTadParams->
2102 vtag_op) << UCC_GETH_TAD_VTAG_OP_SHIFT);
2103 qe_fltr_tad->serialized[0] |= (u8) (temp >> 8); /* upper bits */
2104
2105 qe_fltr_tad->serialized[1] |= (u8) (temp & 0x00ff); /* lower bits */
2106 if (p_UccGethTadParams->vnontag_op ==
2107 UCC_GETH_VLAN_OPERATION_NON_TAGGED_Q_TAG_INSERT)
2108 qe_fltr_tad->serialized[1] |= UCC_GETH_TAD_V_NON_VTAG_OP;
2109 qe_fltr_tad->serialized[1] |=
2110 p_UccGethTadParams->rqos << UCC_GETH_TAD_RQOS_SHIFT;
2111
2112 qe_fltr_tad->serialized[2] |=
2113 p_UccGethTadParams->vpri << UCC_GETH_TAD_V_PRIORITY_SHIFT;
2114 /* upper bits */
2115 qe_fltr_tad->serialized[2] |= (u8) (p_UccGethTadParams->vid >> 8);
2116 /* lower bits */
2117 qe_fltr_tad->serialized[3] |= (u8) (p_UccGethTadParams->vid & 0x00ff);
2118
2119 return 0;
2120}
2121
2122static enet_addr_container_t
2123 *ugeth_82xx_filtering_get_match_addr_in_hash(ucc_geth_private_t *ugeth,
2124 enet_addr_t *p_enet_addr)
2125{
2126 enet_addr_container_t *enet_addr_cont;
2127 struct list_head *p_lh;
2128 u16 i, num;
2129 int32_t j;
2130 u8 *p_counter;
2131
2132 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) {
2133 p_lh = &ugeth->group_hash_q;
2134 p_counter = &(ugeth->numGroupAddrInHash);
2135 } else {
2136 p_lh = &ugeth->ind_hash_q;
2137 p_counter = &(ugeth->numIndAddrInHash);
2138 }
2139
2140 if (!p_lh)
2141 return NULL;
2142
2143 num = *p_counter;
2144
2145 for (i = 0; i < num; i++) {
2146 enet_addr_cont =
2147 (enet_addr_container_t *)
2148 ENET_ADDR_CONT_ENTRY(dequeue(p_lh));
2149 for (j = ENET_NUM_OCTETS_PER_ADDRESS - 1; j >= 0; j--) {
2150 if ((*p_enet_addr)[j] != (enet_addr_cont->address)[j])
2151 break;
2152 if (j == 0)
2153 return enet_addr_cont; /* Found */
2154 }
2155 enqueue(p_lh, &enet_addr_cont->node); /* Put it back */
2156 }
2157 return NULL;
2158}
2159
2160static int ugeth_82xx_filtering_add_addr_in_hash(ucc_geth_private_t *ugeth,
2161 enet_addr_t *p_enet_addr)
2162{
2163 ucc_geth_enet_address_recognition_location_e location;
2164 enet_addr_container_t *enet_addr_cont;
2165 struct list_head *p_lh;
2166 u8 i;
2167 u32 limit;
2168 u8 *p_counter;
2169
2170 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) {
2171 p_lh = &ugeth->group_hash_q;
2172 limit = ugeth->ug_info->maxGroupAddrInHash;
2173 location =
2174 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_GROUP_HASH;
2175 p_counter = &(ugeth->numGroupAddrInHash);
2176 } else {
2177 p_lh = &ugeth->ind_hash_q;
2178 limit = ugeth->ug_info->maxIndAddrInHash;
2179 location =
2180 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_INDIVIDUAL_HASH;
2181 p_counter = &(ugeth->numIndAddrInHash);
2182 }
2183
2184 if ((enet_addr_cont =
2185 ugeth_82xx_filtering_get_match_addr_in_hash(ugeth, p_enet_addr))) {
2186 list_add(p_lh, &enet_addr_cont->node); /* Put it back */
2187 return 0;
2188 }
2189 if ((!p_lh) || (!(*p_counter < limit)))
2190 return -EBUSY;
2191 if (!(enet_addr_cont = get_enet_addr_container()))
2192 return -ENOMEM;
2193 for (i = 0; i < ENET_NUM_OCTETS_PER_ADDRESS; i++)
2194 (enet_addr_cont->address)[i] = (*p_enet_addr)[i];
2195 enet_addr_cont->location = location;
2196 enqueue(p_lh, &enet_addr_cont->node); /* Put it back */
2197 ++(*p_counter);
2198
2199 hw_add_addr_in_hash(ugeth, &(enet_addr_cont->address));
2200
2201 return 0;
2202}
2203
2204static int ugeth_82xx_filtering_clear_addr_in_hash(ucc_geth_private_t *ugeth,
2205 enet_addr_t *p_enet_addr)
2206{
2207 ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
2208 enet_addr_container_t *enet_addr_cont;
2209 ucc_fast_private_t *uccf;
2210 comm_dir_e comm_dir;
2211 u16 i, num;
2212 struct list_head *p_lh;
2213 u32 *addr_h, *addr_l;
2214 u8 *p_counter;
2215
2216 uccf = ugeth->uccf;
2217
2218 p_82xx_addr_filt =
2219 (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram->
2220 addressfiltering;
2221
2222 if (!
2223 (enet_addr_cont =
2224 ugeth_82xx_filtering_get_match_addr_in_hash(ugeth, p_enet_addr)))
2225 return -ENOENT;
2226
2227 /* It's been found and removed from the CQ. */
2228 /* Now destroy its container */
2229 put_enet_addr_container(enet_addr_cont);
2230
2231 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) {
2232 addr_h = &(p_82xx_addr_filt->gaddr_h);
2233 addr_l = &(p_82xx_addr_filt->gaddr_l);
2234 p_lh = &ugeth->group_hash_q;
2235 p_counter = &(ugeth->numGroupAddrInHash);
2236 } else {
2237 addr_h = &(p_82xx_addr_filt->iaddr_h);
2238 addr_l = &(p_82xx_addr_filt->iaddr_l);
2239 p_lh = &ugeth->ind_hash_q;
2240 p_counter = &(ugeth->numIndAddrInHash);
2241 }
2242
2243 comm_dir = 0;
2244 if (uccf->enabled_tx)
2245 comm_dir |= COMM_DIR_TX;
2246 if (uccf->enabled_rx)
2247 comm_dir |= COMM_DIR_RX;
2248 if (comm_dir)
2249 ugeth_disable(ugeth, comm_dir);
2250
2251 /* Clear the hash table. */
2252 out_be32(addr_h, 0x00000000);
2253 out_be32(addr_l, 0x00000000);
2254
2255 /* Add all remaining CQ elements back into hash */
2256 num = --(*p_counter);
2257 for (i = 0; i < num; i++) {
2258 enet_addr_cont =
2259 (enet_addr_container_t *)
2260 ENET_ADDR_CONT_ENTRY(dequeue(p_lh));
2261 hw_add_addr_in_hash(ugeth, &(enet_addr_cont->address));
2262 enqueue(p_lh, &enet_addr_cont->node); /* Put it back */
2263 }
2264
2265 if (comm_dir)
2266 ugeth_enable(ugeth, comm_dir);
2267
2268 return 0;
2269}
2270#endif /* CONFIG_UGETH_FILTERING */
2271
2272static int ugeth_82xx_filtering_clear_all_addr_in_hash(ucc_geth_private_t *
2273 ugeth,
2274 enet_addr_type_e
2275 enet_addr_type)
2276{
2277 ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
2278 ucc_fast_private_t *uccf;
2279 comm_dir_e comm_dir;
2280 struct list_head *p_lh;
2281 u16 i, num;
2282 u32 *addr_h, *addr_l;
2283 u8 *p_counter;
2284
2285 uccf = ugeth->uccf;
2286
2287 p_82xx_addr_filt =
2288 (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram->
2289 addressfiltering;
2290
2291 if (enet_addr_type == ENET_ADDR_TYPE_GROUP) {
2292 addr_h = &(p_82xx_addr_filt->gaddr_h);
2293 addr_l = &(p_82xx_addr_filt->gaddr_l);
2294 p_lh = &ugeth->group_hash_q;
2295 p_counter = &(ugeth->numGroupAddrInHash);
2296 } else if (enet_addr_type == ENET_ADDR_TYPE_INDIVIDUAL) {
2297 addr_h = &(p_82xx_addr_filt->iaddr_h);
2298 addr_l = &(p_82xx_addr_filt->iaddr_l);
2299 p_lh = &ugeth->ind_hash_q;
2300 p_counter = &(ugeth->numIndAddrInHash);
2301 } else
2302 return -EINVAL;
2303
2304 comm_dir = 0;
2305 if (uccf->enabled_tx)
2306 comm_dir |= COMM_DIR_TX;
2307 if (uccf->enabled_rx)
2308 comm_dir |= COMM_DIR_RX;
2309 if (comm_dir)
2310 ugeth_disable(ugeth, comm_dir);
2311
2312 /* Clear the hash table. */
2313 out_be32(addr_h, 0x00000000);
2314 out_be32(addr_l, 0x00000000);
2315
2316 if (!p_lh)
2317 return 0;
2318
2319 num = *p_counter;
2320
2321 /* Delete all remaining CQ elements */
2322 for (i = 0; i < num; i++)
2323 put_enet_addr_container(ENET_ADDR_CONT_ENTRY(dequeue(p_lh)));
2324
2325 *p_counter = 0;
2326
2327 if (comm_dir)
2328 ugeth_enable(ugeth, comm_dir);
2329
2330 return 0;
2331}
2332
2333#ifdef CONFIG_UGETH_FILTERING
2334static int ugeth_82xx_filtering_add_addr_in_paddr(ucc_geth_private_t *ugeth,
2335 enet_addr_t *p_enet_addr,
2336 u8 paddr_num)
2337{
2338 int i;
2339
2340 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR)
2341 ugeth_warn
2342 ("%s: multicast address added to paddr will have no "
2343 "effect - is this what you wanted?",
2344 __FUNCTION__);
2345
2346 ugeth->indAddrRegUsed[paddr_num] = 1; /* mark this paddr as used */
2347 /* store address in our database */
2348 for (i = 0; i < ENET_NUM_OCTETS_PER_ADDRESS; i++)
2349 ugeth->paddr[paddr_num][i] = (*p_enet_addr)[i];
2350 /* put in hardware */
2351 return hw_add_addr_in_paddr(ugeth, p_enet_addr, paddr_num);
2352}
2353#endif /* CONFIG_UGETH_FILTERING */
2354
2355static int ugeth_82xx_filtering_clear_addr_in_paddr(ucc_geth_private_t *ugeth,
2356 u8 paddr_num)
2357{
2358 ugeth->indAddrRegUsed[paddr_num] = 0; /* mark this paddr as not used */
2359 return hw_clear_addr_in_paddr(ugeth, paddr_num);/* clear in hardware */
2360}
2361
2362static void ucc_geth_memclean(ucc_geth_private_t *ugeth)
2363{
2364 u16 i, j;
2365 u8 *bd;
2366
2367 if (!ugeth)
2368 return;
2369
2370 if (ugeth->uccf)
2371 ucc_fast_free(ugeth->uccf);
2372
2373 if (ugeth->p_thread_data_tx) {
2374 qe_muram_free(ugeth->thread_dat_tx_offset);
2375 ugeth->p_thread_data_tx = NULL;
2376 }
2377 if (ugeth->p_thread_data_rx) {
2378 qe_muram_free(ugeth->thread_dat_rx_offset);
2379 ugeth->p_thread_data_rx = NULL;
2380 }
2381 if (ugeth->p_exf_glbl_param) {
2382 qe_muram_free(ugeth->exf_glbl_param_offset);
2383 ugeth->p_exf_glbl_param = NULL;
2384 }
2385 if (ugeth->p_rx_glbl_pram) {
2386 qe_muram_free(ugeth->rx_glbl_pram_offset);
2387 ugeth->p_rx_glbl_pram = NULL;
2388 }
2389 if (ugeth->p_tx_glbl_pram) {
2390 qe_muram_free(ugeth->tx_glbl_pram_offset);
2391 ugeth->p_tx_glbl_pram = NULL;
2392 }
2393 if (ugeth->p_send_q_mem_reg) {
2394 qe_muram_free(ugeth->send_q_mem_reg_offset);
2395 ugeth->p_send_q_mem_reg = NULL;
2396 }
2397 if (ugeth->p_scheduler) {
2398 qe_muram_free(ugeth->scheduler_offset);
2399 ugeth->p_scheduler = NULL;
2400 }
2401 if (ugeth->p_tx_fw_statistics_pram) {
2402 qe_muram_free(ugeth->tx_fw_statistics_pram_offset);
2403 ugeth->p_tx_fw_statistics_pram = NULL;
2404 }
2405 if (ugeth->p_rx_fw_statistics_pram) {
2406 qe_muram_free(ugeth->rx_fw_statistics_pram_offset);
2407 ugeth->p_rx_fw_statistics_pram = NULL;
2408 }
2409 if (ugeth->p_rx_irq_coalescing_tbl) {
2410 qe_muram_free(ugeth->rx_irq_coalescing_tbl_offset);
2411 ugeth->p_rx_irq_coalescing_tbl = NULL;
2412 }
2413 if (ugeth->p_rx_bd_qs_tbl) {
2414 qe_muram_free(ugeth->rx_bd_qs_tbl_offset);
2415 ugeth->p_rx_bd_qs_tbl = NULL;
2416 }
2417 if (ugeth->p_init_enet_param_shadow) {
2418 return_init_enet_entries(ugeth,
2419 &(ugeth->p_init_enet_param_shadow->
2420 rxthread[0]),
2421 ENET_INIT_PARAM_MAX_ENTRIES_RX,
2422 ugeth->ug_info->riscRx, 1);
2423 return_init_enet_entries(ugeth,
2424 &(ugeth->p_init_enet_param_shadow->
2425 txthread[0]),
2426 ENET_INIT_PARAM_MAX_ENTRIES_TX,
2427 ugeth->ug_info->riscTx, 0);
2428 kfree(ugeth->p_init_enet_param_shadow);
2429 ugeth->p_init_enet_param_shadow = NULL;
2430 }
2431 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
2432 bd = ugeth->p_tx_bd_ring[i];
2433 for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) {
2434 if (ugeth->tx_skbuff[i][j]) {
2435 dma_unmap_single(NULL,
2436 BD_BUFFER_ARG(bd),
2437 (BD_STATUS_AND_LENGTH(bd) &
2438 BD_LENGTH_MASK),
2439 DMA_TO_DEVICE);
2440 dev_kfree_skb_any(ugeth->tx_skbuff[i][j]);
2441 ugeth->tx_skbuff[i][j] = NULL;
2442 }
2443 }
2444
2445 kfree(ugeth->tx_skbuff[i]);
2446
2447 if (ugeth->p_tx_bd_ring[i]) {
2448 if (ugeth->ug_info->uf_info.bd_mem_part ==
2449 MEM_PART_SYSTEM)
2450 kfree((void *)ugeth->tx_bd_ring_offset[i]);
2451 else if (ugeth->ug_info->uf_info.bd_mem_part ==
2452 MEM_PART_MURAM)
2453 qe_muram_free(ugeth->tx_bd_ring_offset[i]);
2454 ugeth->p_tx_bd_ring[i] = NULL;
2455 }
2456 }
2457 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
2458 if (ugeth->p_rx_bd_ring[i]) {
2459 /* Return existing data buffers in ring */
2460 bd = ugeth->p_rx_bd_ring[i];
2461 for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) {
2462 if (ugeth->rx_skbuff[i][j]) {
2463 dma_unmap_single(NULL, BD_BUFFER(bd),
2464 ugeth->ug_info->
2465 uf_info.
2466 max_rx_buf_length +
2467 UCC_GETH_RX_DATA_BUF_ALIGNMENT,
2468 DMA_FROM_DEVICE);
2469
2470 dev_kfree_skb_any(ugeth->
2471 rx_skbuff[i][j]);
2472 ugeth->rx_skbuff[i][j] = NULL;
2473 }
2474 bd += UCC_GETH_SIZE_OF_BD;
2475 }
2476
2477 kfree(ugeth->rx_skbuff[i]);
2478
2479 if (ugeth->ug_info->uf_info.bd_mem_part ==
2480 MEM_PART_SYSTEM)
2481 kfree((void *)ugeth->rx_bd_ring_offset[i]);
2482 else if (ugeth->ug_info->uf_info.bd_mem_part ==
2483 MEM_PART_MURAM)
2484 qe_muram_free(ugeth->rx_bd_ring_offset[i]);
2485 ugeth->p_rx_bd_ring[i] = NULL;
2486 }
2487 }
2488 while (!list_empty(&ugeth->group_hash_q))
2489 put_enet_addr_container(ENET_ADDR_CONT_ENTRY
2490 (dequeue(&ugeth->group_hash_q)));
2491 while (!list_empty(&ugeth->ind_hash_q))
2492 put_enet_addr_container(ENET_ADDR_CONT_ENTRY
2493 (dequeue(&ugeth->ind_hash_q)));
2494
2495}
2496
2497static void ucc_geth_set_multi(struct net_device *dev)
2498{
2499 ucc_geth_private_t *ugeth;
2500 struct dev_mc_list *dmi;
2501 ucc_fast_t *uf_regs;
2502 ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
2503 enet_addr_t tempaddr;
2504 u8 *mcptr, *tdptr;
2505 int i, j;
2506
2507 ugeth = netdev_priv(dev);
2508
2509 uf_regs = ugeth->uccf->uf_regs;
2510
2511 if (dev->flags & IFF_PROMISC) {
2512
2513 /* Log any net taps. */
2514 printk("%s: Promiscuous mode enabled.\n", dev->name);
2515 uf_regs->upsmr |= UPSMR_PRO;
2516
2517 } else {
2518
2519 uf_regs->upsmr &= ~UPSMR_PRO;
2520
2521 p_82xx_addr_filt =
2522 (ucc_geth_82xx_address_filtering_pram_t *) ugeth->
2523 p_rx_glbl_pram->addressfiltering;
2524
2525 if (dev->flags & IFF_ALLMULTI) {
2526 /* Catch all multicast addresses, so set the
2527 * filter to all 1's.
2528 */
2529 out_be32(&p_82xx_addr_filt->gaddr_h, 0xffffffff);
2530 out_be32(&p_82xx_addr_filt->gaddr_l, 0xffffffff);
2531 } else {
2532 /* Clear filter and add the addresses in the list.
2533 */
2534 out_be32(&p_82xx_addr_filt->gaddr_h, 0x0);
2535 out_be32(&p_82xx_addr_filt->gaddr_l, 0x0);
2536
2537 dmi = dev->mc_list;
2538
2539 for (i = 0; i < dev->mc_count; i++, dmi = dmi->next) {
2540
2541 /* Only support group multicast for now.
2542 */
2543 if (!(dmi->dmi_addr[0] & 1))
2544 continue;
2545
2546 /* The address in dmi_addr is LSB first,
2547 * and taddr is MSB first. We have to
2548 * copy bytes MSB first from dmi_addr.
2549 */
2550 mcptr = (u8 *) dmi->dmi_addr + 5;
2551 tdptr = (u8 *) & tempaddr;
2552 for (j = 0; j < 6; j++)
2553 *tdptr++ = *mcptr--;
2554
2555 /* Ask CPM to run CRC and set bit in
2556 * filter mask.
2557 */
2558 hw_add_addr_in_hash(ugeth, &tempaddr);
2559
2560 }
2561 }
2562 }
2563}
2564
2565static void ucc_geth_stop(ucc_geth_private_t *ugeth)
2566{
2567 ucc_geth_t *ug_regs = ugeth->ug_regs;
2568 u32 tempval;
2569
2570 ugeth_vdbg("%s: IN", __FUNCTION__);
2571
2572 /* Disable the controller */
2573 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
2574
2575 /* Tell the kernel the link is down */
2576 ugeth->mii_info->link = 0;
2577 adjust_link(ugeth->dev);
2578
2579 /* Mask all interrupts */
2580 out_be32(ugeth->uccf->p_ucce, 0x00000000);
2581
2582 /* Clear all interrupts */
2583 out_be32(ugeth->uccf->p_ucce, 0xffffffff);
2584
2585 /* Disable Rx and Tx */
2586 tempval = in_be32(&ug_regs->maccfg1);
2587 tempval &= ~(MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
2588 out_be32(&ug_regs->maccfg1, tempval);
2589
2590 if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR) {
2591 /* Clear any pending interrupts */
2592 mii_clear_phy_interrupt(ugeth->mii_info);
2593
2594 /* Disable PHY Interrupts */
2595 mii_configure_phy_interrupt(ugeth->mii_info,
2596 MII_INTERRUPT_DISABLED);
2597 }
2598
2599 free_irq(ugeth->ug_info->uf_info.irq, ugeth->dev);
2600
2601 if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR) {
2602 free_irq(ugeth->ug_info->phy_interrupt, ugeth->dev);
2603 } else {
2604 del_timer_sync(&ugeth->phy_info_timer);
2605 }
2606
2607 ucc_geth_memclean(ugeth);
2608}
2609
2610static int ucc_geth_startup(ucc_geth_private_t *ugeth)
2611{
2612 ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
2613 ucc_geth_init_pram_t *p_init_enet_pram;
2614 ucc_fast_private_t *uccf;
2615 ucc_geth_info_t *ug_info;
2616 ucc_fast_info_t *uf_info;
2617 ucc_fast_t *uf_regs;
2618 ucc_geth_t *ug_regs;
2619 int ret_val = -EINVAL;
2620 u32 remoder = UCC_GETH_REMODER_INIT;
2621 u32 init_enet_pram_offset, cecr_subblock, command, maccfg1;
2622 u32 ifstat, i, j, size, l2qt, l3qt, length;
2623 u16 temoder = UCC_GETH_TEMODER_INIT;
2624 u16 test;
2625 u8 function_code = 0;
2626 u8 *bd, *endOfRing;
2627 u8 numThreadsRxNumerical, numThreadsTxNumerical;
2628
2629 ugeth_vdbg("%s: IN", __FUNCTION__);
2630
2631 ug_info = ugeth->ug_info;
2632 uf_info = &ug_info->uf_info;
2633
2634 if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) ||
2635 (uf_info->bd_mem_part == MEM_PART_MURAM))) {
2636 ugeth_err("%s: Bad memory partition value.", __FUNCTION__);
2637 return -EINVAL;
2638 }
2639
2640 /* Rx BD lengths */
2641 for (i = 0; i < ug_info->numQueuesRx; i++) {
2642 if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) ||
2643 (ug_info->bdRingLenRx[i] %
2644 UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) {
2645 ugeth_err
2646 ("%s: Rx BD ring length must be multiple of 4,"
2647 " no smaller than 8.", __FUNCTION__);
2648 return -EINVAL;
2649 }
2650 }
2651
2652 /* Tx BD lengths */
2653 for (i = 0; i < ug_info->numQueuesTx; i++) {
2654 if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) {
2655 ugeth_err
2656 ("%s: Tx BD ring length must be no smaller than 2.",
2657 __FUNCTION__);
2658 return -EINVAL;
2659 }
2660 }
2661
2662 /* mrblr */
2663 if ((uf_info->max_rx_buf_length == 0) ||
2664 (uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) {
2665 ugeth_err
2666 ("%s: max_rx_buf_length must be non-zero multiple of 128.",
2667 __FUNCTION__);
2668 return -EINVAL;
2669 }
2670
2671 /* num Tx queues */
2672 if (ug_info->numQueuesTx > NUM_TX_QUEUES) {
2673 ugeth_err("%s: number of tx queues too large.", __FUNCTION__);
2674 return -EINVAL;
2675 }
2676
2677 /* num Rx queues */
2678 if (ug_info->numQueuesRx > NUM_RX_QUEUES) {
2679 ugeth_err("%s: number of rx queues too large.", __FUNCTION__);
2680 return -EINVAL;
2681 }
2682
2683 /* l2qt */
2684 for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) {
2685 if (ug_info->l2qt[i] >= ug_info->numQueuesRx) {
2686 ugeth_err
2687 ("%s: VLAN priority table entry must not be"
2688 " larger than number of Rx queues.",
2689 __FUNCTION__);
2690 return -EINVAL;
2691 }
2692 }
2693
2694 /* l3qt */
2695 for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) {
2696 if (ug_info->l3qt[i] >= ug_info->numQueuesRx) {
2697 ugeth_err
2698 ("%s: IP priority table entry must not be"
2699 " larger than number of Rx queues.",
2700 __FUNCTION__);
2701 return -EINVAL;
2702 }
2703 }
2704
2705 if (ug_info->cam && !ug_info->ecamptr) {
2706 ugeth_err("%s: If cam mode is chosen, must supply cam ptr.",
2707 __FUNCTION__);
2708 return -EINVAL;
2709 }
2710
2711 if ((ug_info->numStationAddresses !=
2712 UCC_GETH_NUM_OF_STATION_ADDRESSES_1)
2713 && ug_info->rxExtendedFiltering) {
2714 ugeth_err("%s: Number of station addresses greater than 1 "
2715 "not allowed in extended parsing mode.",
2716 __FUNCTION__);
2717 return -EINVAL;
2718 }
2719
2720 /* Generate uccm_mask for receive */
2721 uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */
2722 for (i = 0; i < ug_info->numQueuesRx; i++)
2723 uf_info->uccm_mask |= (UCCE_RXBF_SINGLE_MASK << i);
2724
2725 for (i = 0; i < ug_info->numQueuesTx; i++)
2726 uf_info->uccm_mask |= (UCCE_TXBF_SINGLE_MASK << i);
2727 /* Initialize the general fast UCC block. */
2728 if (ucc_fast_init(uf_info, &uccf)) {
2729 ugeth_err("%s: Failed to init uccf.", __FUNCTION__);
2730 ucc_geth_memclean(ugeth);
2731 return -ENOMEM;
2732 }
2733 ugeth->uccf = uccf;
2734
2735 switch (ug_info->numThreadsRx) {
2736 case UCC_GETH_NUM_OF_THREADS_1:
2737 numThreadsRxNumerical = 1;
2738 break;
2739 case UCC_GETH_NUM_OF_THREADS_2:
2740 numThreadsRxNumerical = 2;
2741 break;
2742 case UCC_GETH_NUM_OF_THREADS_4:
2743 numThreadsRxNumerical = 4;
2744 break;
2745 case UCC_GETH_NUM_OF_THREADS_6:
2746 numThreadsRxNumerical = 6;
2747 break;
2748 case UCC_GETH_NUM_OF_THREADS_8:
2749 numThreadsRxNumerical = 8;
2750 break;
2751 default:
2752 ugeth_err("%s: Bad number of Rx threads value.", __FUNCTION__);
2753 ucc_geth_memclean(ugeth);
2754 return -EINVAL;
2755 break;
2756 }
2757
2758 switch (ug_info->numThreadsTx) {
2759 case UCC_GETH_NUM_OF_THREADS_1:
2760 numThreadsTxNumerical = 1;
2761 break;
2762 case UCC_GETH_NUM_OF_THREADS_2:
2763 numThreadsTxNumerical = 2;
2764 break;
2765 case UCC_GETH_NUM_OF_THREADS_4:
2766 numThreadsTxNumerical = 4;
2767 break;
2768 case UCC_GETH_NUM_OF_THREADS_6:
2769 numThreadsTxNumerical = 6;
2770 break;
2771 case UCC_GETH_NUM_OF_THREADS_8:
2772 numThreadsTxNumerical = 8;
2773 break;
2774 default:
2775 ugeth_err("%s: Bad number of Tx threads value.", __FUNCTION__);
2776 ucc_geth_memclean(ugeth);
2777 return -EINVAL;
2778 break;
2779 }
2780
2781 /* Calculate rx_extended_features */
2782 ugeth->rx_non_dynamic_extended_features = ug_info->ipCheckSumCheck ||
2783 ug_info->ipAddressAlignment ||
2784 (ug_info->numStationAddresses !=
2785 UCC_GETH_NUM_OF_STATION_ADDRESSES_1);
2786
2787 ugeth->rx_extended_features = ugeth->rx_non_dynamic_extended_features ||
2788 (ug_info->vlanOperationTagged != UCC_GETH_VLAN_OPERATION_TAGGED_NOP)
2789 || (ug_info->vlanOperationNonTagged !=
2790 UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP);
2791
2792 uf_regs = uccf->uf_regs;
2793 ug_regs = (ucc_geth_t *) (uccf->uf_regs);
2794 ugeth->ug_regs = ug_regs;
2795
2796 init_default_reg_vals(&uf_regs->upsmr,
2797 &ug_regs->maccfg1, &ug_regs->maccfg2);
2798
2799 /* Set UPSMR */
2800 /* For more details see the hardware spec. */
2801 init_rx_parameters(ug_info->bro,
2802 ug_info->rsh, ug_info->pro, &uf_regs->upsmr);
2803
2804 /* We're going to ignore other registers for now, */
2805 /* except as needed to get up and running */
2806
2807 /* Set MACCFG1 */
2808 /* For more details see the hardware spec. */
2809 init_flow_control_params(ug_info->aufc,
2810 ug_info->receiveFlowControl,
2811 1,
2812 ug_info->pausePeriod,
2813 ug_info->extensionField,
2814 &uf_regs->upsmr,
2815 &ug_regs->uempr, &ug_regs->maccfg1);
2816
2817 maccfg1 = in_be32(&ug_regs->maccfg1);
2818 maccfg1 |= MACCFG1_ENABLE_RX;
2819 maccfg1 |= MACCFG1_ENABLE_TX;
2820 out_be32(&ug_regs->maccfg1, maccfg1);
2821
2822 /* Set IPGIFG */
2823 /* For more details see the hardware spec. */
2824 ret_val = init_inter_frame_gap_params(ug_info->nonBackToBackIfgPart1,
2825 ug_info->nonBackToBackIfgPart2,
2826 ug_info->
2827 miminumInterFrameGapEnforcement,
2828 ug_info->backToBackInterFrameGap,
2829 &ug_regs->ipgifg);
2830 if (ret_val != 0) {
2831 ugeth_err("%s: IPGIFG initialization parameter too large.",
2832 __FUNCTION__);
2833 ucc_geth_memclean(ugeth);
2834 return ret_val;
2835 }
2836
2837 /* Set HAFDUP */
2838 /* For more details see the hardware spec. */
2839 ret_val = init_half_duplex_params(ug_info->altBeb,
2840 ug_info->backPressureNoBackoff,
2841 ug_info->noBackoff,
2842 ug_info->excessDefer,
2843 ug_info->altBebTruncation,
2844 ug_info->maxRetransmission,
2845 ug_info->collisionWindow,
2846 &ug_regs->hafdup);
2847 if (ret_val != 0) {
2848 ugeth_err("%s: Half Duplex initialization parameter too large.",
2849 __FUNCTION__);
2850 ucc_geth_memclean(ugeth);
2851 return ret_val;
2852 }
2853
2854 /* Set IFSTAT */
2855 /* For more details see the hardware spec. */
2856 /* Read only - resets upon read */
2857 ifstat = in_be32(&ug_regs->ifstat);
2858
2859 /* Clear UEMPR */
2860 /* For more details see the hardware spec. */
2861 out_be32(&ug_regs->uempr, 0);
2862
2863 /* Set UESCR */
2864 /* For more details see the hardware spec. */
2865 init_hw_statistics_gathering_mode((ug_info->statisticsMode &
2866 UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE),
2867 0, &uf_regs->upsmr, &ug_regs->uescr);
2868
2869 /* Allocate Tx bds */
2870 for (j = 0; j < ug_info->numQueuesTx; j++) {
2871 /* Allocate in multiple of
2872 UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT,
2873 according to spec */
2874 length = ((ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD)
2875 / UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
2876 * UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
2877 if ((ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD) %
2878 UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
2879 length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
2880 if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
2881 u32 align = 4;
2882 if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4)
2883 align = UCC_GETH_TX_BD_RING_ALIGNMENT;
2884 ugeth->tx_bd_ring_offset[j] =
2885 (u32) (kmalloc((u32) (length + align),
2886 GFP_KERNEL));
2887 if (ugeth->tx_bd_ring_offset[j] != 0)
2888 ugeth->p_tx_bd_ring[j] =
2889 (void*)((ugeth->tx_bd_ring_offset[j] +
2890 align) & ~(align - 1));
2891 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
2892 ugeth->tx_bd_ring_offset[j] =
2893 qe_muram_alloc(length,
2894 UCC_GETH_TX_BD_RING_ALIGNMENT);
2895 if (!IS_MURAM_ERR(ugeth->tx_bd_ring_offset[j]))
2896 ugeth->p_tx_bd_ring[j] =
2897 (u8 *) qe_muram_addr(ugeth->
2898 tx_bd_ring_offset[j]);
2899 }
2900 if (!ugeth->p_tx_bd_ring[j]) {
2901 ugeth_err
2902 ("%s: Can not allocate memory for Tx bd rings.",
2903 __FUNCTION__);
2904 ucc_geth_memclean(ugeth);
2905 return -ENOMEM;
2906 }
2907 /* Zero unused end of bd ring, according to spec */
2908 memset(ugeth->p_tx_bd_ring[j] +
2909 ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD, 0,
2910 length - ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD);
2911 }
2912
2913 /* Allocate Rx bds */
2914 for (j = 0; j < ug_info->numQueuesRx; j++) {
2915 length = ug_info->bdRingLenRx[j] * UCC_GETH_SIZE_OF_BD;
2916 if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
2917 u32 align = 4;
2918 if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4)
2919 align = UCC_GETH_RX_BD_RING_ALIGNMENT;
2920 ugeth->rx_bd_ring_offset[j] =
2921 (u32) (kmalloc((u32) (length + align), GFP_KERNEL));
2922 if (ugeth->rx_bd_ring_offset[j] != 0)
2923 ugeth->p_rx_bd_ring[j] =
2924 (void*)((ugeth->rx_bd_ring_offset[j] +
2925 align) & ~(align - 1));
2926 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
2927 ugeth->rx_bd_ring_offset[j] =
2928 qe_muram_alloc(length,
2929 UCC_GETH_RX_BD_RING_ALIGNMENT);
2930 if (!IS_MURAM_ERR(ugeth->rx_bd_ring_offset[j]))
2931 ugeth->p_rx_bd_ring[j] =
2932 (u8 *) qe_muram_addr(ugeth->
2933 rx_bd_ring_offset[j]);
2934 }
2935 if (!ugeth->p_rx_bd_ring[j]) {
2936 ugeth_err
2937 ("%s: Can not allocate memory for Rx bd rings.",
2938 __FUNCTION__);
2939 ucc_geth_memclean(ugeth);
2940 return -ENOMEM;
2941 }
2942 }
2943
2944 /* Init Tx bds */
2945 for (j = 0; j < ug_info->numQueuesTx; j++) {
2946 /* Setup the skbuff rings */
2947 ugeth->tx_skbuff[j] =
2948 (struct sk_buff **)kmalloc(sizeof(struct sk_buff *) *
2949 ugeth->ug_info->bdRingLenTx[j],
2950 GFP_KERNEL);
2951
2952 if (ugeth->tx_skbuff[j] == NULL) {
2953 ugeth_err("%s: Could not allocate tx_skbuff",
2954 __FUNCTION__);
2955 ucc_geth_memclean(ugeth);
2956 return -ENOMEM;
2957 }
2958
2959 for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++)
2960 ugeth->tx_skbuff[j][i] = NULL;
2961
2962 ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0;
2963 bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j];
2964 for (i = 0; i < ug_info->bdRingLenTx[j]; i++) {
2965 BD_BUFFER_CLEAR(bd);
2966 BD_STATUS_AND_LENGTH_SET(bd, 0);
2967 bd += UCC_GETH_SIZE_OF_BD;
2968 }
2969 bd -= UCC_GETH_SIZE_OF_BD;
2970 BD_STATUS_AND_LENGTH_SET(bd, T_W);/* for last BD set Wrap bit */
2971 }
2972
2973 /* Init Rx bds */
2974 for (j = 0; j < ug_info->numQueuesRx; j++) {
2975 /* Setup the skbuff rings */
2976 ugeth->rx_skbuff[j] =
2977 (struct sk_buff **)kmalloc(sizeof(struct sk_buff *) *
2978 ugeth->ug_info->bdRingLenRx[j],
2979 GFP_KERNEL);
2980
2981 if (ugeth->rx_skbuff[j] == NULL) {
2982 ugeth_err("%s: Could not allocate rx_skbuff",
2983 __FUNCTION__);
2984 ucc_geth_memclean(ugeth);
2985 return -ENOMEM;
2986 }
2987
2988 for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++)
2989 ugeth->rx_skbuff[j][i] = NULL;
2990
2991 ugeth->skb_currx[j] = 0;
2992 bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j];
2993 for (i = 0; i < ug_info->bdRingLenRx[j]; i++) {
2994 BD_STATUS_AND_LENGTH_SET(bd, R_I);
2995 BD_BUFFER_CLEAR(bd);
2996 bd += UCC_GETH_SIZE_OF_BD;
2997 }
2998 bd -= UCC_GETH_SIZE_OF_BD;
2999 BD_STATUS_AND_LENGTH_SET(bd, R_W);/* for last BD set Wrap bit */
3000 }
3001
3002 /*
3003 * Global PRAM
3004 */
3005 /* Tx global PRAM */
3006 /* Allocate global tx parameter RAM page */
3007 ugeth->tx_glbl_pram_offset =
3008 qe_muram_alloc(sizeof(ucc_geth_tx_global_pram_t),
3009 UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT);
3010 if (IS_MURAM_ERR(ugeth->tx_glbl_pram_offset)) {
3011 ugeth_err
3012 ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.",
3013 __FUNCTION__);
3014 ucc_geth_memclean(ugeth);
3015 return -ENOMEM;
3016 }
3017 ugeth->p_tx_glbl_pram =
3018 (ucc_geth_tx_global_pram_t *) qe_muram_addr(ugeth->
3019 tx_glbl_pram_offset);
3020 /* Zero out p_tx_glbl_pram */
3021 memset(ugeth->p_tx_glbl_pram, 0, sizeof(ucc_geth_tx_global_pram_t));
3022
3023 /* Fill global PRAM */
3024
3025 /* TQPTR */
3026 /* Size varies with number of Tx threads */
3027 ugeth->thread_dat_tx_offset =
3028 qe_muram_alloc(numThreadsTxNumerical *
3029 sizeof(ucc_geth_thread_data_tx_t) +
3030 32 * (numThreadsTxNumerical == 1),
3031 UCC_GETH_THREAD_DATA_ALIGNMENT);
3032 if (IS_MURAM_ERR(ugeth->thread_dat_tx_offset)) {
3033 ugeth_err
3034 ("%s: Can not allocate DPRAM memory for p_thread_data_tx.",
3035 __FUNCTION__);
3036 ucc_geth_memclean(ugeth);
3037 return -ENOMEM;
3038 }
3039
3040 ugeth->p_thread_data_tx =
3041 (ucc_geth_thread_data_tx_t *) qe_muram_addr(ugeth->
3042 thread_dat_tx_offset);
3043 out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset);
3044
3045 /* vtagtable */
3046 for (i = 0; i < UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX; i++)
3047 out_be32(&ugeth->p_tx_glbl_pram->vtagtable[i],
3048 ug_info->vtagtable[i]);
3049
3050 /* iphoffset */
3051 for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++)
3052 ugeth->p_tx_glbl_pram->iphoffset[i] = ug_info->iphoffset[i];
3053
3054 /* SQPTR */
3055 /* Size varies with number of Tx queues */
3056 ugeth->send_q_mem_reg_offset =
3057 qe_muram_alloc(ug_info->numQueuesTx *
3058 sizeof(ucc_geth_send_queue_qd_t),
3059 UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
3060 if (IS_MURAM_ERR(ugeth->send_q_mem_reg_offset)) {
3061 ugeth_err
3062 ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.",
3063 __FUNCTION__);
3064 ucc_geth_memclean(ugeth);
3065 return -ENOMEM;
3066 }
3067
3068 ugeth->p_send_q_mem_reg =
3069 (ucc_geth_send_queue_mem_region_t *) qe_muram_addr(ugeth->
3070 send_q_mem_reg_offset);
3071 out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset);
3072
3073 /* Setup the table */
3074 /* Assume BD rings are already established */
3075 for (i = 0; i < ug_info->numQueuesTx; i++) {
3076 endOfRing =
3077 ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] -
3078 1) * UCC_GETH_SIZE_OF_BD;
3079 if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
3080 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
3081 (u32) virt_to_phys(ugeth->p_tx_bd_ring[i]));
3082 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
3083 last_bd_completed_address,
3084 (u32) virt_to_phys(endOfRing));
3085 } else if (ugeth->ug_info->uf_info.bd_mem_part ==
3086 MEM_PART_MURAM) {
3087 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
3088 (u32) immrbar_virt_to_phys(ugeth->
3089 p_tx_bd_ring[i]));
3090 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
3091 last_bd_completed_address,
3092 (u32) immrbar_virt_to_phys(endOfRing));
3093 }
3094 }
3095
3096 /* schedulerbasepointer */
3097
3098 if (ug_info->numQueuesTx > 1) {
3099 /* scheduler exists only if more than 1 tx queue */
3100 ugeth->scheduler_offset =
3101 qe_muram_alloc(sizeof(ucc_geth_scheduler_t),
3102 UCC_GETH_SCHEDULER_ALIGNMENT);
3103 if (IS_MURAM_ERR(ugeth->scheduler_offset)) {
3104 ugeth_err
3105 ("%s: Can not allocate DPRAM memory for p_scheduler.",
3106 __FUNCTION__);
3107 ucc_geth_memclean(ugeth);
3108 return -ENOMEM;
3109 }
3110
3111 ugeth->p_scheduler =
3112 (ucc_geth_scheduler_t *) qe_muram_addr(ugeth->
3113 scheduler_offset);
3114 out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer,
3115 ugeth->scheduler_offset);
3116 /* Zero out p_scheduler */
3117 memset(ugeth->p_scheduler, 0, sizeof(ucc_geth_scheduler_t));
3118
3119 /* Set values in scheduler */
3120 out_be32(&ugeth->p_scheduler->mblinterval,
3121 ug_info->mblinterval);
3122 out_be16(&ugeth->p_scheduler->nortsrbytetime,
3123 ug_info->nortsrbytetime);
3124 ugeth->p_scheduler->fracsiz = ug_info->fracsiz;
3125 ugeth->p_scheduler->strictpriorityq = ug_info->strictpriorityq;
3126 ugeth->p_scheduler->txasap = ug_info->txasap;
3127 ugeth->p_scheduler->extrabw = ug_info->extrabw;
3128 for (i = 0; i < NUM_TX_QUEUES; i++)
3129 ugeth->p_scheduler->weightfactor[i] =
3130 ug_info->weightfactor[i];
3131
3132 /* Set pointers to cpucount registers in scheduler */
3133 ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0);
3134 ugeth->p_cpucount[1] = &(ugeth->p_scheduler->cpucount1);
3135 ugeth->p_cpucount[2] = &(ugeth->p_scheduler->cpucount2);
3136 ugeth->p_cpucount[3] = &(ugeth->p_scheduler->cpucount3);
3137 ugeth->p_cpucount[4] = &(ugeth->p_scheduler->cpucount4);
3138 ugeth->p_cpucount[5] = &(ugeth->p_scheduler->cpucount5);
3139 ugeth->p_cpucount[6] = &(ugeth->p_scheduler->cpucount6);
3140 ugeth->p_cpucount[7] = &(ugeth->p_scheduler->cpucount7);
3141 }
3142
3143 /* schedulerbasepointer */
3144 /* TxRMON_PTR (statistics) */
3145 if (ug_info->
3146 statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) {
3147 ugeth->tx_fw_statistics_pram_offset =
3148 qe_muram_alloc(sizeof
3149 (ucc_geth_tx_firmware_statistics_pram_t),
3150 UCC_GETH_TX_STATISTICS_ALIGNMENT);
3151 if (IS_MURAM_ERR(ugeth->tx_fw_statistics_pram_offset)) {
3152 ugeth_err
3153 ("%s: Can not allocate DPRAM memory for"
3154 " p_tx_fw_statistics_pram.", __FUNCTION__);
3155 ucc_geth_memclean(ugeth);
3156 return -ENOMEM;
3157 }
3158 ugeth->p_tx_fw_statistics_pram =
3159 (ucc_geth_tx_firmware_statistics_pram_t *)
3160 qe_muram_addr(ugeth->tx_fw_statistics_pram_offset);
3161 /* Zero out p_tx_fw_statistics_pram */
3162 memset(ugeth->p_tx_fw_statistics_pram,
3163 0, sizeof(ucc_geth_tx_firmware_statistics_pram_t));
3164 }
3165
3166 /* temoder */
3167 /* Already has speed set */
3168
3169 if (ug_info->numQueuesTx > 1)
3170 temoder |= TEMODER_SCHEDULER_ENABLE;
3171 if (ug_info->ipCheckSumGenerate)
3172 temoder |= TEMODER_IP_CHECKSUM_GENERATE;
3173 temoder |= ((ug_info->numQueuesTx - 1) << TEMODER_NUM_OF_QUEUES_SHIFT);
3174 out_be16(&ugeth->p_tx_glbl_pram->temoder, temoder);
3175
3176 test = in_be16(&ugeth->p_tx_glbl_pram->temoder);
3177
3178 /* Function code register value to be used later */
3179 function_code = QE_BMR_BYTE_ORDER_BO_MOT | UCC_FAST_FUNCTION_CODE_GBL;
3180 /* Required for QE */
3181
3182 /* function code register */
3183 out_be32(&ugeth->p_tx_glbl_pram->tstate, ((u32) function_code) << 24);
3184
3185 /* Rx global PRAM */
3186 /* Allocate global rx parameter RAM page */
3187 ugeth->rx_glbl_pram_offset =
3188 qe_muram_alloc(sizeof(ucc_geth_rx_global_pram_t),
3189 UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT);
3190 if (IS_MURAM_ERR(ugeth->rx_glbl_pram_offset)) {
3191 ugeth_err
3192 ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.",
3193 __FUNCTION__);
3194 ucc_geth_memclean(ugeth);
3195 return -ENOMEM;
3196 }
3197 ugeth->p_rx_glbl_pram =
3198 (ucc_geth_rx_global_pram_t *) qe_muram_addr(ugeth->
3199 rx_glbl_pram_offset);
3200 /* Zero out p_rx_glbl_pram */
3201 memset(ugeth->p_rx_glbl_pram, 0, sizeof(ucc_geth_rx_global_pram_t));
3202
3203 /* Fill global PRAM */
3204
3205 /* RQPTR */
3206 /* Size varies with number of Rx threads */
3207 ugeth->thread_dat_rx_offset =
3208 qe_muram_alloc(numThreadsRxNumerical *
3209 sizeof(ucc_geth_thread_data_rx_t),
3210 UCC_GETH_THREAD_DATA_ALIGNMENT);
3211 if (IS_MURAM_ERR(ugeth->thread_dat_rx_offset)) {
3212 ugeth_err
3213 ("%s: Can not allocate DPRAM memory for p_thread_data_rx.",
3214 __FUNCTION__);
3215 ucc_geth_memclean(ugeth);
3216 return -ENOMEM;
3217 }
3218
3219 ugeth->p_thread_data_rx =
3220 (ucc_geth_thread_data_rx_t *) qe_muram_addr(ugeth->
3221 thread_dat_rx_offset);
3222 out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset);
3223
3224 /* typeorlen */
3225 out_be16(&ugeth->p_rx_glbl_pram->typeorlen, ug_info->typeorlen);
3226
3227 /* rxrmonbaseptr (statistics) */
3228 if (ug_info->
3229 statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) {
3230 ugeth->rx_fw_statistics_pram_offset =
3231 qe_muram_alloc(sizeof
3232 (ucc_geth_rx_firmware_statistics_pram_t),
3233 UCC_GETH_RX_STATISTICS_ALIGNMENT);
3234 if (IS_MURAM_ERR(ugeth->rx_fw_statistics_pram_offset)) {
3235 ugeth_err
3236 ("%s: Can not allocate DPRAM memory for"
3237 " p_rx_fw_statistics_pram.", __FUNCTION__);
3238 ucc_geth_memclean(ugeth);
3239 return -ENOMEM;
3240 }
3241 ugeth->p_rx_fw_statistics_pram =
3242 (ucc_geth_rx_firmware_statistics_pram_t *)
3243 qe_muram_addr(ugeth->rx_fw_statistics_pram_offset);
3244 /* Zero out p_rx_fw_statistics_pram */
3245 memset(ugeth->p_rx_fw_statistics_pram, 0,
3246 sizeof(ucc_geth_rx_firmware_statistics_pram_t));
3247 }
3248
3249 /* intCoalescingPtr */
3250
3251 /* Size varies with number of Rx queues */
3252 ugeth->rx_irq_coalescing_tbl_offset =
3253 qe_muram_alloc(ug_info->numQueuesRx *
3254 sizeof(ucc_geth_rx_interrupt_coalescing_entry_t),
3255 UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT);
3256 if (IS_MURAM_ERR(ugeth->rx_irq_coalescing_tbl_offset)) {
3257 ugeth_err
3258 ("%s: Can not allocate DPRAM memory for"
3259 " p_rx_irq_coalescing_tbl.", __FUNCTION__);
3260 ucc_geth_memclean(ugeth);
3261 return -ENOMEM;
3262 }
3263
3264 ugeth->p_rx_irq_coalescing_tbl =
3265 (ucc_geth_rx_interrupt_coalescing_table_t *)
3266 qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset);
3267 out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr,
3268 ugeth->rx_irq_coalescing_tbl_offset);
3269
3270 /* Fill interrupt coalescing table */
3271 for (i = 0; i < ug_info->numQueuesRx; i++) {
3272 out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
3273 interruptcoalescingmaxvalue,
3274 ug_info->interruptcoalescingmaxvalue[i]);
3275 out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
3276 interruptcoalescingcounter,
3277 ug_info->interruptcoalescingmaxvalue[i]);
3278 }
3279
3280 /* MRBLR */
3281 init_max_rx_buff_len(uf_info->max_rx_buf_length,
3282 &ugeth->p_rx_glbl_pram->mrblr);
3283 /* MFLR */
3284 out_be16(&ugeth->p_rx_glbl_pram->mflr, ug_info->maxFrameLength);
3285 /* MINFLR */
3286 init_min_frame_len(ug_info->minFrameLength,
3287 &ugeth->p_rx_glbl_pram->minflr,
3288 &ugeth->p_rx_glbl_pram->mrblr);
3289 /* MAXD1 */
3290 out_be16(&ugeth->p_rx_glbl_pram->maxd1, ug_info->maxD1Length);
3291 /* MAXD2 */
3292 out_be16(&ugeth->p_rx_glbl_pram->maxd2, ug_info->maxD2Length);
3293
3294 /* l2qt */
3295 l2qt = 0;
3296 for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++)
3297 l2qt |= (ug_info->l2qt[i] << (28 - 4 * i));
3298 out_be32(&ugeth->p_rx_glbl_pram->l2qt, l2qt);
3299
3300 /* l3qt */
3301 for (j = 0; j < UCC_GETH_IP_PRIORITY_MAX; j += 8) {
3302 l3qt = 0;
3303 for (i = 0; i < 8; i++)
3304 l3qt |= (ug_info->l3qt[j + i] << (28 - 4 * i));
3305 out_be32(&ugeth->p_rx_glbl_pram->l3qt[j], l3qt);
3306 }
3307
3308 /* vlantype */
3309 out_be16(&ugeth->p_rx_glbl_pram->vlantype, ug_info->vlantype);
3310
3311 /* vlantci */
3312 out_be16(&ugeth->p_rx_glbl_pram->vlantci, ug_info->vlantci);
3313
3314 /* ecamptr */
3315 out_be32(&ugeth->p_rx_glbl_pram->ecamptr, ug_info->ecamptr);
3316
3317 /* RBDQPTR */
3318 /* Size varies with number of Rx queues */
3319 ugeth->rx_bd_qs_tbl_offset =
3320 qe_muram_alloc(ug_info->numQueuesRx *
3321 (sizeof(ucc_geth_rx_bd_queues_entry_t) +
3322 sizeof(ucc_geth_rx_prefetched_bds_t)),
3323 UCC_GETH_RX_BD_QUEUES_ALIGNMENT);
3324 if (IS_MURAM_ERR(ugeth->rx_bd_qs_tbl_offset)) {
3325 ugeth_err
3326 ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.",
3327 __FUNCTION__);
3328 ucc_geth_memclean(ugeth);
3329 return -ENOMEM;
3330 }
3331
3332 ugeth->p_rx_bd_qs_tbl =
3333 (ucc_geth_rx_bd_queues_entry_t *) qe_muram_addr(ugeth->
3334 rx_bd_qs_tbl_offset);
3335 out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset);
3336 /* Zero out p_rx_bd_qs_tbl */
3337 memset(ugeth->p_rx_bd_qs_tbl,
3338 0,
3339 ug_info->numQueuesRx * (sizeof(ucc_geth_rx_bd_queues_entry_t) +
3340 sizeof(ucc_geth_rx_prefetched_bds_t)));
3341
3342 /* Setup the table */
3343 /* Assume BD rings are already established */
3344 for (i = 0; i < ug_info->numQueuesRx; i++) {
3345 if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
3346 out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
3347 (u32) virt_to_phys(ugeth->p_rx_bd_ring[i]));
3348 } else if (ugeth->ug_info->uf_info.bd_mem_part ==
3349 MEM_PART_MURAM) {
3350 out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
3351 (u32) immrbar_virt_to_phys(ugeth->
3352 p_rx_bd_ring[i]));
3353 }
3354 /* rest of fields handled by QE */
3355 }
3356
3357 /* remoder */
3358 /* Already has speed set */
3359
3360 if (ugeth->rx_extended_features)
3361 remoder |= REMODER_RX_EXTENDED_FEATURES;
3362 if (ug_info->rxExtendedFiltering)
3363 remoder |= REMODER_RX_EXTENDED_FILTERING;
3364 if (ug_info->dynamicMaxFrameLength)
3365 remoder |= REMODER_DYNAMIC_MAX_FRAME_LENGTH;
3366 if (ug_info->dynamicMinFrameLength)
3367 remoder |= REMODER_DYNAMIC_MIN_FRAME_LENGTH;
3368 remoder |=
3369 ug_info->vlanOperationTagged << REMODER_VLAN_OPERATION_TAGGED_SHIFT;
3370 remoder |=
3371 ug_info->
3372 vlanOperationNonTagged << REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT;
3373 remoder |= ug_info->rxQoSMode << REMODER_RX_QOS_MODE_SHIFT;
3374 remoder |= ((ug_info->numQueuesRx - 1) << REMODER_NUM_OF_QUEUES_SHIFT);
3375 if (ug_info->ipCheckSumCheck)
3376 remoder |= REMODER_IP_CHECKSUM_CHECK;
3377 if (ug_info->ipAddressAlignment)
3378 remoder |= REMODER_IP_ADDRESS_ALIGNMENT;
3379 out_be32(&ugeth->p_rx_glbl_pram->remoder, remoder);
3380
3381 /* Note that this function must be called */
3382 /* ONLY AFTER p_tx_fw_statistics_pram */
3383 /* andp_UccGethRxFirmwareStatisticsPram are allocated ! */
3384 init_firmware_statistics_gathering_mode((ug_info->
3385 statisticsMode &
3386 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX),
3387 (ug_info->statisticsMode &
3388 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX),
3389 &ugeth->p_tx_glbl_pram->txrmonbaseptr,
3390 ugeth->tx_fw_statistics_pram_offset,
3391 &ugeth->p_rx_glbl_pram->rxrmonbaseptr,
3392 ugeth->rx_fw_statistics_pram_offset,
3393 &ugeth->p_tx_glbl_pram->temoder,
3394 &ugeth->p_rx_glbl_pram->remoder);
3395
3396 /* function code register */
3397 ugeth->p_rx_glbl_pram->rstate = function_code;
3398
3399 /* initialize extended filtering */
3400 if (ug_info->rxExtendedFiltering) {
3401 if (!ug_info->extendedFilteringChainPointer) {
3402 ugeth_err("%s: Null Extended Filtering Chain Pointer.",
3403 __FUNCTION__);
3404 ucc_geth_memclean(ugeth);
3405 return -EINVAL;
3406 }
3407
3408 /* Allocate memory for extended filtering Mode Global
3409 Parameters */
3410 ugeth->exf_glbl_param_offset =
3411 qe_muram_alloc(sizeof(ucc_geth_exf_global_pram_t),
3412 UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT);
3413 if (IS_MURAM_ERR(ugeth->exf_glbl_param_offset)) {
3414 ugeth_err
3415 ("%s: Can not allocate DPRAM memory for"
3416 " p_exf_glbl_param.", __FUNCTION__);
3417 ucc_geth_memclean(ugeth);
3418 return -ENOMEM;
3419 }
3420
3421 ugeth->p_exf_glbl_param =
3422 (ucc_geth_exf_global_pram_t *) qe_muram_addr(ugeth->
3423 exf_glbl_param_offset);
3424 out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam,
3425 ugeth->exf_glbl_param_offset);
3426 out_be32(&ugeth->p_exf_glbl_param->l2pcdptr,
3427 (u32) ug_info->extendedFilteringChainPointer);
3428
3429 } else { /* initialize 82xx style address filtering */
3430
3431 /* Init individual address recognition registers to disabled */
3432
3433 for (j = 0; j < NUM_OF_PADDRS; j++)
3434 ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j);
3435
3436 /* Create CQs for hash tables */
3437 if (ug_info->maxGroupAddrInHash > 0) {
3438 INIT_LIST_HEAD(&ugeth->group_hash_q);
3439 }
3440 if (ug_info->maxIndAddrInHash > 0) {
3441 INIT_LIST_HEAD(&ugeth->ind_hash_q);
3442 }
3443 p_82xx_addr_filt =
3444 (ucc_geth_82xx_address_filtering_pram_t *) ugeth->
3445 p_rx_glbl_pram->addressfiltering;
3446
3447 ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
3448 ENET_ADDR_TYPE_GROUP);
3449 ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
3450 ENET_ADDR_TYPE_INDIVIDUAL);
3451 }
3452
3453 /*
3454 * Initialize UCC at QE level
3455 */
3456
3457 command = QE_INIT_TX_RX;
3458
3459 /* Allocate shadow InitEnet command parameter structure.
3460 * This is needed because after the InitEnet command is executed,
3461 * the structure in DPRAM is released, because DPRAM is a premium
3462 * resource.
3463 * This shadow structure keeps a copy of what was done so that the
3464 * allocated resources can be released when the channel is freed.
3465 */
3466 if (!(ugeth->p_init_enet_param_shadow =
3467 (ucc_geth_init_pram_t *) kmalloc(sizeof(ucc_geth_init_pram_t),
3468 GFP_KERNEL))) {
3469 ugeth_err
3470 ("%s: Can not allocate memory for"
3471 " p_UccInitEnetParamShadows.", __FUNCTION__);
3472 ucc_geth_memclean(ugeth);
3473 return -ENOMEM;
3474 }
3475 /* Zero out *p_init_enet_param_shadow */
3476 memset((char *)ugeth->p_init_enet_param_shadow,
3477 0, sizeof(ucc_geth_init_pram_t));
3478
3479 /* Fill shadow InitEnet command parameter structure */
3480
3481 ugeth->p_init_enet_param_shadow->resinit1 =
3482 ENET_INIT_PARAM_MAGIC_RES_INIT1;
3483 ugeth->p_init_enet_param_shadow->resinit2 =
3484 ENET_INIT_PARAM_MAGIC_RES_INIT2;
3485 ugeth->p_init_enet_param_shadow->resinit3 =
3486 ENET_INIT_PARAM_MAGIC_RES_INIT3;
3487 ugeth->p_init_enet_param_shadow->resinit4 =
3488 ENET_INIT_PARAM_MAGIC_RES_INIT4;
3489 ugeth->p_init_enet_param_shadow->resinit5 =
3490 ENET_INIT_PARAM_MAGIC_RES_INIT5;
3491 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
3492 ((u32) ug_info->numThreadsRx) << ENET_INIT_PARAM_RGF_SHIFT;
3493 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
3494 ((u32) ug_info->numThreadsTx) << ENET_INIT_PARAM_TGF_SHIFT;
3495
3496 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
3497 ugeth->rx_glbl_pram_offset | ug_info->riscRx;
3498 if ((ug_info->largestexternallookupkeysize !=
3499 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE)
3500 && (ug_info->largestexternallookupkeysize !=
3501 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
3502 && (ug_info->largestexternallookupkeysize !=
3503 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) {
3504 ugeth_err("%s: Invalid largest External Lookup Key Size.",
3505 __FUNCTION__);
3506 ucc_geth_memclean(ugeth);
3507 return -EINVAL;
3508 }
3509 ugeth->p_init_enet_param_shadow->largestexternallookupkeysize =
3510 ug_info->largestexternallookupkeysize;
3511 size = sizeof(ucc_geth_thread_rx_pram_t);
3512 if (ug_info->rxExtendedFiltering) {
3513 size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
3514 if (ug_info->largestexternallookupkeysize ==
3515 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
3516 size +=
3517 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
3518 if (ug_info->largestexternallookupkeysize ==
3519 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
3520 size +=
3521 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
3522 }
3523
3524 if ((ret_val = fill_init_enet_entries(ugeth, &(ugeth->
3525 p_init_enet_param_shadow->rxthread[0]),
3526 (u8) (numThreadsRxNumerical + 1)
3527 /* Rx needs one extra for terminator */
3528 , size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT,
3529 ug_info->riscRx, 1)) != 0) {
3530 ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
3531 __FUNCTION__);
3532 ucc_geth_memclean(ugeth);
3533 return ret_val;
3534 }
3535
3536 ugeth->p_init_enet_param_shadow->txglobal =
3537 ugeth->tx_glbl_pram_offset | ug_info->riscTx;
3538 if ((ret_val =
3539 fill_init_enet_entries(ugeth,
3540 &(ugeth->p_init_enet_param_shadow->
3541 txthread[0]), numThreadsTxNumerical,
3542 sizeof(ucc_geth_thread_tx_pram_t),
3543 UCC_GETH_THREAD_TX_PRAM_ALIGNMENT,
3544 ug_info->riscTx, 0)) != 0) {
3545 ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
3546 __FUNCTION__);
3547 ucc_geth_memclean(ugeth);
3548 return ret_val;
3549 }
3550
3551 /* Load Rx bds with buffers */
3552 for (i = 0; i < ug_info->numQueuesRx; i++) {
3553 if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) {
3554 ugeth_err("%s: Can not fill Rx bds with buffers.",
3555 __FUNCTION__);
3556 ucc_geth_memclean(ugeth);
3557 return ret_val;
3558 }
3559 }
3560
3561 /* Allocate InitEnet command parameter structure */
3562 init_enet_pram_offset = qe_muram_alloc(sizeof(ucc_geth_init_pram_t), 4);
3563 if (IS_MURAM_ERR(init_enet_pram_offset)) {
3564 ugeth_err
3565 ("%s: Can not allocate DPRAM memory for p_init_enet_pram.",
3566 __FUNCTION__);
3567 ucc_geth_memclean(ugeth);
3568 return -ENOMEM;
3569 }
3570 p_init_enet_pram =
3571 (ucc_geth_init_pram_t *) qe_muram_addr(init_enet_pram_offset);
3572
3573 /* Copy shadow InitEnet command parameter structure into PRAM */
3574 p_init_enet_pram->resinit1 = ugeth->p_init_enet_param_shadow->resinit1;
3575 p_init_enet_pram->resinit2 = ugeth->p_init_enet_param_shadow->resinit2;
3576 p_init_enet_pram->resinit3 = ugeth->p_init_enet_param_shadow->resinit3;
3577 p_init_enet_pram->resinit4 = ugeth->p_init_enet_param_shadow->resinit4;
3578 out_be16(&p_init_enet_pram->resinit5,
3579 ugeth->p_init_enet_param_shadow->resinit5);
3580 p_init_enet_pram->largestexternallookupkeysize =
3581 ugeth->p_init_enet_param_shadow->largestexternallookupkeysize;
3582 out_be32(&p_init_enet_pram->rgftgfrxglobal,
3583 ugeth->p_init_enet_param_shadow->rgftgfrxglobal);
3584 for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++)
3585 out_be32(&p_init_enet_pram->rxthread[i],
3586 ugeth->p_init_enet_param_shadow->rxthread[i]);
3587 out_be32(&p_init_enet_pram->txglobal,
3588 ugeth->p_init_enet_param_shadow->txglobal);
3589 for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_TX; i++)
3590 out_be32(&p_init_enet_pram->txthread[i],
3591 ugeth->p_init_enet_param_shadow->txthread[i]);
3592
3593 /* Issue QE command */
3594 cecr_subblock =
3595 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
3596 qe_issue_cmd(command, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET,
3597 init_enet_pram_offset);
3598
3599 /* Free InitEnet command parameter */
3600 qe_muram_free(init_enet_pram_offset);
3601
3602 return 0;
3603}
3604
3605/* returns a net_device_stats structure pointer */
3606static struct net_device_stats *ucc_geth_get_stats(struct net_device *dev)
3607{
3608 ucc_geth_private_t *ugeth = netdev_priv(dev);
3609
3610 return &(ugeth->stats);
3611}
3612
3613/* ucc_geth_timeout gets called when a packet has not been
3614 * transmitted after a set amount of time.
3615 * For now, assume that clearing out all the structures, and
3616 * starting over will fix the problem. */
3617static void ucc_geth_timeout(struct net_device *dev)
3618{
3619 ucc_geth_private_t *ugeth = netdev_priv(dev);
3620
3621 ugeth_vdbg("%s: IN", __FUNCTION__);
3622
3623 ugeth->stats.tx_errors++;
3624
3625 ugeth_dump_regs(ugeth);
3626
3627 if (dev->flags & IFF_UP) {
3628 ucc_geth_stop(ugeth);
3629 ucc_geth_startup(ugeth);
3630 }
3631
3632 netif_schedule(dev);
3633}
3634
3635/* This is called by the kernel when a frame is ready for transmission. */
3636/* It is pointed to by the dev->hard_start_xmit function pointer */
3637static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
3638{
3639 ucc_geth_private_t *ugeth = netdev_priv(dev);
3640 u8 *bd; /* BD pointer */
3641 u32 bd_status;
3642 u8 txQ = 0;
3643
3644 ugeth_vdbg("%s: IN", __FUNCTION__);
3645
3646 spin_lock_irq(&ugeth->lock);
3647
3648 ugeth->stats.tx_bytes += skb->len;
3649
3650 /* Start from the next BD that should be filled */
3651 bd = ugeth->txBd[txQ];
3652 bd_status = BD_STATUS_AND_LENGTH(bd);
3653 /* Save the skb pointer so we can free it later */
3654 ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb;
3655
3656 /* Update the current skb pointer (wrapping if this was the last) */
3657 ugeth->skb_curtx[txQ] =
3658 (ugeth->skb_curtx[txQ] +
3659 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
3660
3661 /* set up the buffer descriptor */
3662 BD_BUFFER_SET(bd,
3663 dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE));
3664
3665 //printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data);
3666
3667 bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len;
3668
3669 BD_STATUS_AND_LENGTH_SET(bd, bd_status);
3670
3671 dev->trans_start = jiffies;
3672
3673 /* Move to next BD in the ring */
3674 if (!(bd_status & T_W))
3675 ugeth->txBd[txQ] = bd + UCC_GETH_SIZE_OF_BD;
3676 else
3677 ugeth->txBd[txQ] = ugeth->p_tx_bd_ring[txQ];
3678
3679 /* If the next BD still needs to be cleaned up, then the bds
3680 are full. We need to tell the kernel to stop sending us stuff. */
3681 if (bd == ugeth->confBd[txQ]) {
3682 if (!netif_queue_stopped(dev))
3683 netif_stop_queue(dev);
3684 }
3685
3686 if (ugeth->p_scheduler) {
3687 ugeth->cpucount[txQ]++;
3688 /* Indicate to QE that there are more Tx bds ready for
3689 transmission */
3690 /* This is done by writing a running counter of the bd
3691 count to the scheduler PRAM. */
3692 out_be16(ugeth->p_cpucount[txQ], ugeth->cpucount[txQ]);
3693 }
3694
3695 spin_unlock_irq(&ugeth->lock);
3696
3697 return 0;
3698}
3699
3700static int ucc_geth_rx(ucc_geth_private_t *ugeth, u8 rxQ, int rx_work_limit)
3701{
3702 struct sk_buff *skb;
3703 u8 *bd;
3704 u16 length, howmany = 0;
3705 u32 bd_status;
3706 u8 *bdBuffer;
3707
3708 ugeth_vdbg("%s: IN", __FUNCTION__);
3709
3710 spin_lock(&ugeth->lock);
3711 /* collect received buffers */
3712 bd = ugeth->rxBd[rxQ];
3713
3714 bd_status = BD_STATUS_AND_LENGTH(bd);
3715
3716 /* while there are received buffers and BD is full (~R_E) */
3717 while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) {
3718 bdBuffer = (u8 *) BD_BUFFER(bd);
3719 length = (u16) ((bd_status & BD_LENGTH_MASK) - 4);
3720 skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]];
3721
3722 /* determine whether buffer is first, last, first and last
3723 (single buffer frame) or middle (not first and not last) */
3724 if (!skb ||
3725 (!(bd_status & (R_F | R_L))) ||
3726 (bd_status & R_ERRORS_FATAL)) {
3727 ugeth_vdbg("%s, %d: ERROR!!! skb - 0x%08x",
3728 __FUNCTION__, __LINE__, (u32) skb);
3729 if (skb)
3730 dev_kfree_skb_any(skb);
3731
3732 ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL;
3733 ugeth->stats.rx_dropped++;
3734 } else {
3735 ugeth->stats.rx_packets++;
3736 howmany++;
3737
3738 /* Prep the skb for the packet */
3739 skb_put(skb, length);
3740
3741 /* Tell the skb what kind of packet this is */
3742 skb->protocol = eth_type_trans(skb, ugeth->dev);
3743
3744 ugeth->stats.rx_bytes += length;
3745 /* Send the packet up the stack */
3746#ifdef CONFIG_UGETH_NAPI
3747 netif_receive_skb(skb);
3748#else
3749 netif_rx(skb);
3750#endif /* CONFIG_UGETH_NAPI */
3751 }
3752
3753 ugeth->dev->last_rx = jiffies;
3754
3755 skb = get_new_skb(ugeth, bd);
3756 if (!skb) {
3757 ugeth_warn("%s: No Rx Data Buffer", __FUNCTION__);
3758 spin_unlock(&ugeth->lock);
3759 ugeth->stats.rx_dropped++;
3760 break;
3761 }
3762
3763 ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = skb;
3764
3765 /* update to point at the next skb */
3766 ugeth->skb_currx[rxQ] =
3767 (ugeth->skb_currx[rxQ] +
3768 1) & RX_RING_MOD_MASK(ugeth->ug_info->bdRingLenRx[rxQ]);
3769
3770 if (bd_status & R_W)
3771 bd = ugeth->p_rx_bd_ring[rxQ];
3772 else
3773 bd += UCC_GETH_SIZE_OF_BD;
3774
3775 bd_status = BD_STATUS_AND_LENGTH(bd);
3776 }
3777
3778 ugeth->rxBd[rxQ] = bd;
3779 spin_unlock(&ugeth->lock);
3780 return howmany;
3781}
3782
3783static int ucc_geth_tx(struct net_device *dev, u8 txQ)
3784{
3785 /* Start from the next BD that should be filled */
3786 ucc_geth_private_t *ugeth = netdev_priv(dev);
3787 u8 *bd; /* BD pointer */
3788 u32 bd_status;
3789
3790 bd = ugeth->confBd[txQ];
3791 bd_status = BD_STATUS_AND_LENGTH(bd);
3792
3793 /* Normal processing. */
3794 while ((bd_status & T_R) == 0) {
3795 /* BD contains already transmitted buffer. */
3796 /* Handle the transmitted buffer and release */
3797 /* the BD to be used with the current frame */
3798
3799 if ((bd = ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0))
3800 break;
3801
3802 ugeth->stats.tx_packets++;
3803
3804 /* Free the sk buffer associated with this TxBD */
3805 dev_kfree_skb_irq(ugeth->
3806 tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]);
3807 ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL;
3808 ugeth->skb_dirtytx[txQ] =
3809 (ugeth->skb_dirtytx[txQ] +
3810 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
3811
3812 /* We freed a buffer, so now we can restart transmission */
3813 if (netif_queue_stopped(dev))
3814 netif_wake_queue(dev);
3815
3816 /* Advance the confirmation BD pointer */
3817 if (!(bd_status & T_W))
3818 ugeth->confBd[txQ] += UCC_GETH_SIZE_OF_BD;
3819 else
3820 ugeth->confBd[txQ] = ugeth->p_tx_bd_ring[txQ];
3821 }
3822 return 0;
3823}
3824
3825#ifdef CONFIG_UGETH_NAPI
3826static int ucc_geth_poll(struct net_device *dev, int *budget)
3827{
3828 ucc_geth_private_t *ugeth = netdev_priv(dev);
3829 int howmany;
3830 int rx_work_limit = *budget;
3831 u8 rxQ = 0;
3832
3833 if (rx_work_limit > dev->quota)
3834 rx_work_limit = dev->quota;
3835
3836 howmany = ucc_geth_rx(ugeth, rxQ, rx_work_limit);
3837
3838 dev->quota -= howmany;
3839 rx_work_limit -= howmany;
3840 *budget -= howmany;
3841
3842 if (rx_work_limit >= 0)
3843 netif_rx_complete(dev);
3844
3845 return (rx_work_limit < 0) ? 1 : 0;
3846}
3847#endif /* CONFIG_UGETH_NAPI */
3848
3849static irqreturn_t ucc_geth_irq_handler(int irq, void *info,
3850 struct pt_regs *regs)
3851{
3852 struct net_device *dev = (struct net_device *)info;
3853 ucc_geth_private_t *ugeth = netdev_priv(dev);
3854 ucc_fast_private_t *uccf;
3855 ucc_geth_info_t *ug_info;
3856 register u32 ucce = 0;
3857 register u32 bit_mask = UCCE_RXBF_SINGLE_MASK;
3858 register u32 tx_mask = UCCE_TXBF_SINGLE_MASK;
3859 register u8 i;
3860
3861 ugeth_vdbg("%s: IN", __FUNCTION__);
3862
3863 if (!ugeth)
3864 return IRQ_NONE;
3865
3866 uccf = ugeth->uccf;
3867 ug_info = ugeth->ug_info;
3868
3869 do {
3870 ucce |= (u32) (in_be32(uccf->p_ucce) & in_be32(uccf->p_uccm));
3871
3872 /* clear event bits for next time */
3873 /* Side effect here is to mask ucce variable
3874 for future processing below. */
3875 out_be32(uccf->p_ucce, ucce); /* Clear with ones,
3876 but only bits in UCCM */
3877
3878 /* We ignore Tx interrupts because Tx confirmation is
3879 done inside Tx routine */
3880
3881 for (i = 0; i < ug_info->numQueuesRx; i++) {
3882 if (ucce & bit_mask)
3883 ucc_geth_rx(ugeth, i,
3884 (int)ugeth->ug_info->
3885 bdRingLenRx[i]);
3886 ucce &= ~bit_mask;
3887 bit_mask <<= 1;
3888 }
3889
3890 for (i = 0; i < ug_info->numQueuesTx; i++) {
3891 if (ucce & tx_mask)
3892 ucc_geth_tx(dev, i);
3893 ucce &= ~tx_mask;
3894 tx_mask <<= 1;
3895 }
3896
3897 /* Exceptions */
3898 if (ucce & UCCE_BSY) {
3899 ugeth_vdbg("Got BUSY irq!!!!");
3900 ugeth->stats.rx_errors++;
3901 ucce &= ~UCCE_BSY;
3902 }
3903 if (ucce & UCCE_OTHER) {
3904 ugeth_vdbg("Got frame with error (ucce - 0x%08x)!!!!",
3905 ucce);
3906 ugeth->stats.rx_errors++;
3907 ucce &= ~ucce;
3908 }
3909 }
3910 while (ucce);
3911
3912 return IRQ_HANDLED;
3913}
3914
3915static irqreturn_t phy_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3916{
3917 struct net_device *dev = (struct net_device *)dev_id;
3918 ucc_geth_private_t *ugeth = netdev_priv(dev);
3919
3920 ugeth_vdbg("%s: IN", __FUNCTION__);
3921
3922 /* Clear the interrupt */
3923 mii_clear_phy_interrupt(ugeth->mii_info);
3924
3925 /* Disable PHY interrupts */
3926 mii_configure_phy_interrupt(ugeth->mii_info, MII_INTERRUPT_DISABLED);
3927
3928 /* Schedule the phy change */
3929 schedule_work(&ugeth->tq);
3930
3931 return IRQ_HANDLED;
3932}
3933
3934/* Scheduled by the phy_interrupt/timer to handle PHY changes */
3935static void ugeth_phy_change(void *data)
3936{
3937 struct net_device *dev = (struct net_device *)data;
3938 ucc_geth_private_t *ugeth = netdev_priv(dev);
3939 ucc_geth_t *ug_regs;
3940 int result = 0;
3941
3942 ugeth_vdbg("%s: IN", __FUNCTION__);
3943
3944 ug_regs = ugeth->ug_regs;
3945
3946 /* Delay to give the PHY a chance to change the
3947 * register state */
3948 msleep(1);
3949
3950 /* Update the link, speed, duplex */
3951 result = ugeth->mii_info->phyinfo->read_status(ugeth->mii_info);
3952
3953 /* Adjust the known status as long as the link
3954 * isn't still coming up */
3955 if ((0 == result) || (ugeth->mii_info->link == 0))
3956 adjust_link(dev);
3957
3958 /* Reenable interrupts, if needed */
3959 if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR)
3960 mii_configure_phy_interrupt(ugeth->mii_info,
3961 MII_INTERRUPT_ENABLED);
3962}
3963
3964/* Called every so often on systems that don't interrupt
3965 * the core for PHY changes */
3966static void ugeth_phy_timer(unsigned long data)
3967{
3968 struct net_device *dev = (struct net_device *)data;
3969 ucc_geth_private_t *ugeth = netdev_priv(dev);
3970
3971 schedule_work(&ugeth->tq);
3972
3973 mod_timer(&ugeth->phy_info_timer, jiffies + PHY_CHANGE_TIME * HZ);
3974}
3975
3976/* Keep trying aneg for some time
3977 * If, after GFAR_AN_TIMEOUT seconds, it has not
3978 * finished, we switch to forced.
3979 * Either way, once the process has completed, we either
3980 * request the interrupt, or switch the timer over to
3981 * using ugeth_phy_timer to check status */
3982static void ugeth_phy_startup_timer(unsigned long data)
3983{
3984 struct ugeth_mii_info *mii_info = (struct ugeth_mii_info *)data;
3985 ucc_geth_private_t *ugeth = netdev_priv(mii_info->dev);
3986 static int secondary = UGETH_AN_TIMEOUT;
3987 int result;
3988
3989 /* Configure the Auto-negotiation */
3990 result = mii_info->phyinfo->config_aneg(mii_info);
3991
3992 /* If autonegotiation failed to start, and
3993 * we haven't timed out, reset the timer, and return */
3994 if (result && secondary--) {
3995 mod_timer(&ugeth->phy_info_timer, jiffies + HZ);
3996 return;
3997 } else if (result) {
3998 /* Couldn't start autonegotiation.
3999 * Try switching to forced */
4000 mii_info->autoneg = 0;
4001 result = mii_info->phyinfo->config_aneg(mii_info);
4002
4003 /* Forcing failed! Give up */
4004 if (result) {
4005 ugeth_err("%s: Forcing failed!", mii_info->dev->name);
4006 return;
4007 }
4008 }
4009
4010 /* Kill the timer so it can be restarted */
4011 del_timer_sync(&ugeth->phy_info_timer);
4012
4013 /* Grab the PHY interrupt, if necessary/possible */
4014 if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR) {
4015 if (request_irq(ugeth->ug_info->phy_interrupt,
4016 phy_interrupt,
4017 SA_SHIRQ, "phy_interrupt", mii_info->dev) < 0) {
4018 ugeth_err("%s: Can't get IRQ %d (PHY)",
4019 mii_info->dev->name,
4020 ugeth->ug_info->phy_interrupt);
4021 } else {
4022 mii_configure_phy_interrupt(ugeth->mii_info,
4023 MII_INTERRUPT_ENABLED);
4024 return;
4025 }
4026 }
4027
4028 /* Start the timer again, this time in order to
4029 * handle a change in status */
4030 init_timer(&ugeth->phy_info_timer);
4031 ugeth->phy_info_timer.function = &ugeth_phy_timer;
4032 ugeth->phy_info_timer.data = (unsigned long)mii_info->dev;
4033 mod_timer(&ugeth->phy_info_timer, jiffies + PHY_CHANGE_TIME * HZ);
4034}
4035
4036/* Called when something needs to use the ethernet device */
4037/* Returns 0 for success. */
4038static int ucc_geth_open(struct net_device *dev)
4039{
4040 ucc_geth_private_t *ugeth = netdev_priv(dev);
4041 int err;
4042
4043 ugeth_vdbg("%s: IN", __FUNCTION__);
4044
4045 /* Test station address */
4046 if (dev->dev_addr[0] & ENET_GROUP_ADDR) {
4047 ugeth_err("%s: Multicast address used for station address"
4048 " - is this what you wanted?", __FUNCTION__);
4049 return -EINVAL;
4050 }
4051
4052 err = ucc_geth_startup(ugeth);
4053 if (err) {
4054 ugeth_err("%s: Cannot configure net device, aborting.",
4055 dev->name);
4056 return err;
4057 }
4058
4059 err = adjust_enet_interface(ugeth);
4060 if (err) {
4061 ugeth_err("%s: Cannot configure net device, aborting.",
4062 dev->name);
4063 return err;
4064 }
4065
4066 /* Set MACSTNADDR1, MACSTNADDR2 */
4067 /* For more details see the hardware spec. */
4068 init_mac_station_addr_regs(dev->dev_addr[0],
4069 dev->dev_addr[1],
4070 dev->dev_addr[2],
4071 dev->dev_addr[3],
4072 dev->dev_addr[4],
4073 dev->dev_addr[5],
4074 &ugeth->ug_regs->macstnaddr1,
4075 &ugeth->ug_regs->macstnaddr2);
4076
4077 err = init_phy(dev);
4078 if (err) {
4079 ugeth_err("%s: Cannot initialzie PHY, aborting.", dev->name);
4080 return err;
4081 }
4082#ifndef CONFIG_UGETH_NAPI
4083 err =
4084 request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler, 0,
4085 "UCC Geth", dev);
4086 if (err) {
4087 ugeth_err("%s: Cannot get IRQ for net device, aborting.",
4088 dev->name);
4089 ucc_geth_stop(ugeth);
4090 return err;
4091 }
4092#endif /* CONFIG_UGETH_NAPI */
4093
4094 /* Set up the PHY change work queue */
4095 INIT_WORK(&ugeth->tq, ugeth_phy_change, dev);
4096
4097 init_timer(&ugeth->phy_info_timer);
4098 ugeth->phy_info_timer.function = &ugeth_phy_startup_timer;
4099 ugeth->phy_info_timer.data = (unsigned long)ugeth->mii_info;
4100 mod_timer(&ugeth->phy_info_timer, jiffies + HZ);
4101
4102 err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
4103 if (err) {
4104 ugeth_err("%s: Cannot enable net device, aborting.", dev->name);
4105 ucc_geth_stop(ugeth);
4106 return err;
4107 }
4108
4109 netif_start_queue(dev);
4110
4111 return err;
4112}
4113
4114/* Stops the kernel queue, and halts the controller */
4115static int ucc_geth_close(struct net_device *dev)
4116{
4117 ucc_geth_private_t *ugeth = netdev_priv(dev);
4118
4119 ugeth_vdbg("%s: IN", __FUNCTION__);
4120
4121 ucc_geth_stop(ugeth);
4122
4123 /* Shutdown the PHY */
4124 if (ugeth->mii_info->phyinfo->close)
4125 ugeth->mii_info->phyinfo->close(ugeth->mii_info);
4126
4127 kfree(ugeth->mii_info);
4128
4129 netif_stop_queue(dev);
4130
4131 return 0;
4132}
4133
4134struct ethtool_ops ucc_geth_ethtool_ops = {
4135 .get_settings = NULL,
4136 .get_drvinfo = NULL,
4137 .get_regs_len = NULL,
4138 .get_regs = NULL,
4139 .get_link = NULL,
4140 .get_coalesce = NULL,
4141 .set_coalesce = NULL,
4142 .get_ringparam = NULL,
4143 .set_ringparam = NULL,
4144 .get_strings = NULL,
4145 .get_stats_count = NULL,
4146 .get_ethtool_stats = NULL,
4147};
4148
4149static int ucc_geth_probe(struct device *device)
4150{
4151 struct platform_device *pdev = to_platform_device(device);
4152 struct ucc_geth_platform_data *ugeth_pdata;
4153 struct net_device *dev = NULL;
4154 struct ucc_geth_private *ugeth = NULL;
4155 struct ucc_geth_info *ug_info;
4156 int err;
4157 static int mii_mng_configured = 0;
4158
4159 ugeth_vdbg("%s: IN", __FUNCTION__);
4160
4161 ugeth_pdata = (struct ucc_geth_platform_data *)pdev->dev.platform_data;
4162
4163 ug_info = &ugeth_info[pdev->id];
4164 ug_info->uf_info.ucc_num = pdev->id;
4165 ug_info->uf_info.rx_clock = ugeth_pdata->rx_clock;
4166 ug_info->uf_info.tx_clock = ugeth_pdata->tx_clock;
4167 ug_info->uf_info.regs = ugeth_pdata->phy_reg_addr;
4168 ug_info->uf_info.irq = platform_get_irq(pdev, 0);
4169 ug_info->phy_address = ugeth_pdata->phy_id;
4170 ug_info->enet_interface = ugeth_pdata->phy_interface;
4171 ug_info->board_flags = ugeth_pdata->board_flags;
4172 ug_info->phy_interrupt = ugeth_pdata->phy_interrupt;
4173
4174 printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d) \n",
4175 ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs,
4176 ug_info->uf_info.irq);
4177
4178 if (ug_info == NULL) {
4179 ugeth_err("%s: [%d] Missing additional data!", __FUNCTION__,
4180 pdev->id);
4181 return -ENODEV;
4182 }
4183
4184 if (!mii_mng_configured) {
4185 ucc_set_qe_mux_mii_mng(ug_info->uf_info.ucc_num);
4186 mii_mng_configured = 1;
4187 }
4188
4189 /* Create an ethernet device instance */
4190 dev = alloc_etherdev(sizeof(*ugeth));
4191
4192 if (dev == NULL)
4193 return -ENOMEM;
4194
4195 ugeth = netdev_priv(dev);
4196 spin_lock_init(&ugeth->lock);
4197
4198 dev_set_drvdata(device, dev);
4199
4200 /* Set the dev->base_addr to the gfar reg region */
4201 dev->base_addr = (unsigned long)(ug_info->uf_info.regs);
4202
4203 SET_MODULE_OWNER(dev);
4204 SET_NETDEV_DEV(dev, device);
4205
4206 /* Fill in the dev structure */
4207 dev->open = ucc_geth_open;
4208 dev->hard_start_xmit = ucc_geth_start_xmit;
4209 dev->tx_timeout = ucc_geth_timeout;
4210 dev->watchdog_timeo = TX_TIMEOUT;
4211#ifdef CONFIG_UGETH_NAPI
4212 dev->poll = ucc_geth_poll;
4213 dev->weight = UCC_GETH_DEV_WEIGHT;
4214#endif /* CONFIG_UGETH_NAPI */
4215 dev->stop = ucc_geth_close;
4216 dev->get_stats = ucc_geth_get_stats;
4217// dev->change_mtu = ucc_geth_change_mtu;
4218 dev->mtu = 1500;
4219 dev->set_multicast_list = ucc_geth_set_multi;
4220 dev->ethtool_ops = &ucc_geth_ethtool_ops;
4221
4222 err = register_netdev(dev);
4223 if (err) {
4224 ugeth_err("%s: Cannot register net device, aborting.",
4225 dev->name);
4226 free_netdev(dev);
4227 return err;
4228 }
4229
4230 ugeth->ug_info = ug_info;
4231 ugeth->dev = dev;
4232 memcpy(dev->dev_addr, ugeth_pdata->mac_addr, 6);
4233
4234 return 0;
4235}
4236
4237static int ucc_geth_remove(struct device *device)
4238{
4239 struct net_device *dev = dev_get_drvdata(device);
4240 struct ucc_geth_private *ugeth = netdev_priv(dev);
4241
4242 dev_set_drvdata(device, NULL);
4243 ucc_geth_memclean(ugeth);
4244 free_netdev(dev);
4245
4246 return 0;
4247}
4248
4249/* Structure for a device driver */
4250static struct device_driver ucc_geth_driver = {
4251 .name = DRV_NAME,
4252 .bus = &platform_bus_type,
4253 .probe = ucc_geth_probe,
4254 .remove = ucc_geth_remove,
4255};
4256
4257static int __init ucc_geth_init(void)
4258{
4259 int i;
4260 printk(KERN_INFO "ucc_geth: " DRV_DESC "\n");
4261 for (i = 0; i < 8; i++)
4262 memcpy(&(ugeth_info[i]), &ugeth_primary_info,
4263 sizeof(ugeth_primary_info));
4264
4265 return driver_register(&ucc_geth_driver);
4266}
4267
4268static void __exit ucc_geth_exit(void)
4269{
4270 driver_unregister(&ucc_geth_driver);
4271}
4272
4273module_init(ucc_geth_init);
4274module_exit(ucc_geth_exit);
4275
4276MODULE_AUTHOR("Freescale Semiconductor, Inc");
4277MODULE_DESCRIPTION(DRV_DESC);
4278MODULE_LICENSE("GPL");