aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/declance.c
diff options
context:
space:
mode:
authorMaciej W. Rozycki <macro@linux-mips.org>2006-12-04 18:04:54 -0500
committerJeff Garzik <jeff@garzik.org>2006-12-04 18:36:02 -0500
commit3b6e8fe7eca12fca2cc7fde46ba2a94a86ab0815 (patch)
treebf6e7f0d0ee881f1fe671104c607b568f9d2a707 /drivers/net/declance.c
parent4e1400796c93df5e7f92d766e4a4332d0c98795f (diff)
[PATCH] declance: Fix PMAX and PMAD support
The shared buffer used by the LANCE on the PMAX only supports halfword (16-bit) accesses. And the PMAD has the buffer wired differently. This is a change to fix these issues. Tested with a DECstation 2100 (thanks Flo for making this possible) and a DECstation 5000/133 (both the PMAD and the onboard LANCE). Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org> Cc: Jeff Garzik <jeff@garzik.org> Cc: Ralf Baechle <ralf@linux-mips.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/declance.c')
-rw-r--r--drivers/net/declance.c398
1 files changed, 207 insertions, 191 deletions
diff --git a/drivers/net/declance.c b/drivers/net/declance.c
index 00e2a8a134d7..1167f8f7c272 100644
--- a/drivers/net/declance.c
+++ b/drivers/net/declance.c
@@ -40,6 +40,10 @@
40 * 40 *
41 * v0.009: Module support fixes, multiple interfaces support, various 41 * v0.009: Module support fixes, multiple interfaces support, various
42 * bits. macro 42 * bits. macro
43 *
44 * v0.010: Fixes for the PMAD mapping of the LANCE buffer and for the
45 * PMAX requirement to only use halfword accesses to the
46 * buffer. macro
43 */ 47 */
44 48
45#include <linux/crc32.h> 49#include <linux/crc32.h>
@@ -54,6 +58,7 @@
54#include <linux/spinlock.h> 58#include <linux/spinlock.h>
55#include <linux/stddef.h> 59#include <linux/stddef.h>
56#include <linux/string.h> 60#include <linux/string.h>
61#include <linux/types.h>
57 62
58#include <asm/addrspace.h> 63#include <asm/addrspace.h>
59#include <asm/system.h> 64#include <asm/system.h>
@@ -67,7 +72,7 @@
67#include <asm/dec/tc.h> 72#include <asm/dec/tc.h>
68 73
69static char version[] __devinitdata = 74static char version[] __devinitdata =
70"declance.c: v0.009 by Linux MIPS DECstation task force\n"; 75"declance.c: v0.010 by Linux MIPS DECstation task force\n";
71 76
72MODULE_AUTHOR("Linux MIPS DECstation task force"); 77MODULE_AUTHOR("Linux MIPS DECstation task force");
73MODULE_DESCRIPTION("DEC LANCE (DECstation onboard, PMAD-xx) driver"); 78MODULE_DESCRIPTION("DEC LANCE (DECstation onboard, PMAD-xx) driver");
@@ -110,24 +115,25 @@ MODULE_LICENSE("GPL");
110#define LE_C3_BCON 0x1 /* Byte control */ 115#define LE_C3_BCON 0x1 /* Byte control */
111 116
112/* Receive message descriptor 1 */ 117/* Receive message descriptor 1 */
113#define LE_R1_OWN 0x80 /* Who owns the entry */ 118#define LE_R1_OWN 0x8000 /* Who owns the entry */
114#define LE_R1_ERR 0x40 /* Error: if FRA, OFL, CRC or BUF is set */ 119#define LE_R1_ERR 0x4000 /* Error: if FRA, OFL, CRC or BUF is set */
115#define LE_R1_FRA 0x20 /* FRA: Frame error */ 120#define LE_R1_FRA 0x2000 /* FRA: Frame error */
116#define LE_R1_OFL 0x10 /* OFL: Frame overflow */ 121#define LE_R1_OFL 0x1000 /* OFL: Frame overflow */
117#define LE_R1_CRC 0x08 /* CRC error */ 122#define LE_R1_CRC 0x0800 /* CRC error */
118#define LE_R1_BUF 0x04 /* BUF: Buffer error */ 123#define LE_R1_BUF 0x0400 /* BUF: Buffer error */
119#define LE_R1_SOP 0x02 /* Start of packet */ 124#define LE_R1_SOP 0x0200 /* Start of packet */
120#define LE_R1_EOP 0x01 /* End of packet */ 125#define LE_R1_EOP 0x0100 /* End of packet */
121#define LE_R1_POK 0x03 /* Packet is complete: SOP + EOP */ 126#define LE_R1_POK 0x0300 /* Packet is complete: SOP + EOP */
122 127
123#define LE_T1_OWN 0x80 /* Lance owns the packet */ 128/* Transmit message descriptor 1 */
124#define LE_T1_ERR 0x40 /* Error summary */ 129#define LE_T1_OWN 0x8000 /* Lance owns the packet */
125#define LE_T1_EMORE 0x10 /* Error: more than one retry needed */ 130#define LE_T1_ERR 0x4000 /* Error summary */
126#define LE_T1_EONE 0x08 /* Error: one retry needed */ 131#define LE_T1_EMORE 0x1000 /* Error: more than one retry needed */
127#define LE_T1_EDEF 0x04 /* Error: deferred */ 132#define LE_T1_EONE 0x0800 /* Error: one retry needed */
128#define LE_T1_SOP 0x02 /* Start of packet */ 133#define LE_T1_EDEF 0x0400 /* Error: deferred */
129#define LE_T1_EOP 0x01 /* End of packet */ 134#define LE_T1_SOP 0x0200 /* Start of packet */
130#define LE_T1_POK 0x03 /* Packet is complete: SOP + EOP */ 135#define LE_T1_EOP 0x0100 /* End of packet */
136#define LE_T1_POK 0x0300 /* Packet is complete: SOP + EOP */
131 137
132#define LE_T3_BUF 0x8000 /* Buffer error */ 138#define LE_T3_BUF 0x8000 /* Buffer error */
133#define LE_T3_UFL 0x4000 /* Error underflow */ 139#define LE_T3_UFL 0x4000 /* Error underflow */
@@ -156,69 +162,57 @@ MODULE_LICENSE("GPL");
156#undef TEST_HITS 162#undef TEST_HITS
157#define ZERO 0 163#define ZERO 0
158 164
159/* The DS2000/3000 have a linear 64 KB buffer. 165/*
160 166 * The DS2100/3100 have a linear 64 kB buffer which supports halfword
161 * The PMAD-AA has 128 kb buffer on-board. 167 * accesses only. Each halfword of the buffer is word-aligned in the
168 * CPU address space.
169 *
170 * The PMAD-AA has a 128 kB buffer on-board.
162 * 171 *
163 * The IOASIC LANCE devices use a shared memory region. This region as seen 172 * The IOASIC LANCE devices use a shared memory region. This region
164 * from the CPU is (max) 128 KB long and has to be on an 128 KB boundary. 173 * as seen from the CPU is (max) 128 kB long and has to be on an 128 kB
165 * The LANCE sees this as a 64 KB long continuous memory region. 174 * boundary. The LANCE sees this as a 64 kB long continuous memory
175 * region.
166 * 176 *
167 * The LANCE's DMA address is used as an index in this buffer and DMA takes 177 * The LANCE's DMA address is used as an index in this buffer and DMA
168 * place in bursts of eight 16-Bit words which are packed into four 32-Bit words 178 * takes place in bursts of eight 16-bit words which are packed into
169 * by the IOASIC. This leads to a strange padding: 16 bytes of valid data followed 179 * four 32-bit words by the IOASIC. This leads to a strange padding:
170 * by a 16 byte gap :-(. 180 * 16 bytes of valid data followed by a 16 byte gap :-(.
171 */ 181 */
172 182
173struct lance_rx_desc { 183struct lance_rx_desc {
174 unsigned short rmd0; /* low address of packet */ 184 unsigned short rmd0; /* low address of packet */
175 short gap0; 185 unsigned short rmd1; /* high address of packet
176 unsigned char rmd1_hadr; /* high address of packet */ 186 and descriptor bits */
177 unsigned char rmd1_bits; /* descriptor bits */
178 short gap1;
179 short length; /* 2s complement (negative!) 187 short length; /* 2s complement (negative!)
180 of buffer length */ 188 of buffer length */
181 short gap2;
182 unsigned short mblength; /* actual number of bytes received */ 189 unsigned short mblength; /* actual number of bytes received */
183 short gap3;
184}; 190};
185 191
186struct lance_tx_desc { 192struct lance_tx_desc {
187 unsigned short tmd0; /* low address of packet */ 193 unsigned short tmd0; /* low address of packet */
188 short gap0; 194 unsigned short tmd1; /* high address of packet
189 unsigned char tmd1_hadr; /* high address of packet */ 195 and descriptor bits */
190 unsigned char tmd1_bits; /* descriptor bits */
191 short gap1;
192 short length; /* 2s complement (negative!) 196 short length; /* 2s complement (negative!)
193 of buffer length */ 197 of buffer length */
194 short gap2;
195 unsigned short misc; 198 unsigned short misc;
196 short gap3;
197}; 199};
198 200
199 201
200/* First part of the LANCE initialization block, described in databook. */ 202/* First part of the LANCE initialization block, described in databook. */
201struct lance_init_block { 203struct lance_init_block {
202 unsigned short mode; /* pre-set mode (reg. 15) */ 204 unsigned short mode; /* pre-set mode (reg. 15) */
203 short gap0;
204 205
205 unsigned char phys_addr[12]; /* physical ethernet address 206 unsigned short phys_addr[3]; /* physical ethernet address */
206 only 0, 1, 4, 5, 8, 9 are valid 207 unsigned short filter[4]; /* multicast filter */
207 2, 3, 6, 7, 10, 11 are gaps */
208 unsigned short filter[8]; /* multicast filter
209 only 0, 2, 4, 6 are valid
210 1, 3, 5, 7 are gaps */
211 208
212 /* Receive and transmit ring base, along with extra bits. */ 209 /* Receive and transmit ring base, along with extra bits. */
213 unsigned short rx_ptr; /* receive descriptor addr */ 210 unsigned short rx_ptr; /* receive descriptor addr */
214 short gap1;
215 unsigned short rx_len; /* receive len and high addr */ 211 unsigned short rx_len; /* receive len and high addr */
216 short gap2;
217 unsigned short tx_ptr; /* transmit descriptor addr */ 212 unsigned short tx_ptr; /* transmit descriptor addr */
218 short gap3;
219 unsigned short tx_len; /* transmit len and high addr */ 213 unsigned short tx_len; /* transmit len and high addr */
220 short gap4; 214
221 short gap5[8]; 215 short gap[4];
222 216
223 /* The buffer descriptors */ 217 /* The buffer descriptors */
224 struct lance_rx_desc brx_ring[RX_RING_SIZE]; 218 struct lance_rx_desc brx_ring[RX_RING_SIZE];
@@ -226,15 +220,28 @@ struct lance_init_block {
226}; 220};
227 221
228#define BUF_OFFSET_CPU sizeof(struct lance_init_block) 222#define BUF_OFFSET_CPU sizeof(struct lance_init_block)
229#define BUF_OFFSET_LNC (sizeof(struct lance_init_block)>>1) 223#define BUF_OFFSET_LNC sizeof(struct lance_init_block)
230 224
231#define libdesc_offset(rt, elem) \ 225#define shift_off(off, type) \
232((__u32)(((unsigned long)(&(((struct lance_init_block *)0)->rt[elem]))))) 226 (type == ASIC_LANCE || type == PMAX_LANCE ? off << 1 : off)
233 227
234/* 228#define lib_off(rt, type) \
235 * This works *only* for the ring descriptors 229 shift_off(offsetof(struct lance_init_block, rt), type)
236 */ 230
237#define LANCE_ADDR(x) (CPHYSADDR(x) >> 1) 231#define lib_ptr(ib, rt, type) \
232 ((volatile u16 *)((u8 *)(ib) + lib_off(rt, type)))
233
234#define rds_off(rt, type) \
235 shift_off(offsetof(struct lance_rx_desc, rt), type)
236
237#define rds_ptr(rd, rt, type) \
238 ((volatile u16 *)((u8 *)(rd) + rds_off(rt, type)))
239
240#define tds_off(rt, type) \
241 shift_off(offsetof(struct lance_tx_desc, rt), type)
242
243#define tds_ptr(td, rt, type) \
244 ((volatile u16 *)((u8 *)(td) + tds_off(rt, type)))
238 245
239struct lance_private { 246struct lance_private {
240 struct net_device *next; 247 struct net_device *next;
@@ -242,7 +249,6 @@ struct lance_private {
242 int slot; 249 int slot;
243 int dma_irq; 250 int dma_irq;
244 volatile struct lance_regs *ll; 251 volatile struct lance_regs *ll;
245 volatile struct lance_init_block *init_block;
246 252
247 spinlock_t lock; 253 spinlock_t lock;
248 254
@@ -260,8 +266,8 @@ struct lance_private {
260 char *tx_buf_ptr_cpu[TX_RING_SIZE]; 266 char *tx_buf_ptr_cpu[TX_RING_SIZE];
261 267
262 /* Pointers to the ring buffers as seen from the LANCE */ 268 /* Pointers to the ring buffers as seen from the LANCE */
263 char *rx_buf_ptr_lnc[RX_RING_SIZE]; 269 uint rx_buf_ptr_lnc[RX_RING_SIZE];
264 char *tx_buf_ptr_lnc[TX_RING_SIZE]; 270 uint tx_buf_ptr_lnc[TX_RING_SIZE];
265}; 271};
266 272
267#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\ 273#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
@@ -294,7 +300,7 @@ static inline void writereg(volatile unsigned short *regptr, short value)
294static void load_csrs(struct lance_private *lp) 300static void load_csrs(struct lance_private *lp)
295{ 301{
296 volatile struct lance_regs *ll = lp->ll; 302 volatile struct lance_regs *ll = lp->ll;
297 int leptr; 303 uint leptr;
298 304
299 /* The address space as seen from the LANCE 305 /* The address space as seen from the LANCE
300 * begins at address 0. HK 306 * begins at address 0. HK
@@ -316,12 +322,14 @@ static void load_csrs(struct lance_private *lp)
316 * Our specialized copy routines 322 * Our specialized copy routines
317 * 323 *
318 */ 324 */
319void cp_to_buf(const int type, void *to, const void *from, int len) 325static void cp_to_buf(const int type, void *to, const void *from, int len)
320{ 326{
321 unsigned short *tp, *fp, clen; 327 unsigned short *tp, *fp, clen;
322 unsigned char *rtp, *rfp; 328 unsigned char *rtp, *rfp;
323 329
324 if (type == PMAX_LANCE) { 330 if (type == PMAD_LANCE) {
331 memcpy(to, from, len);
332 } else if (type == PMAX_LANCE) {
325 clen = len >> 1; 333 clen = len >> 1;
326 tp = (unsigned short *) to; 334 tp = (unsigned short *) to;
327 fp = (unsigned short *) from; 335 fp = (unsigned short *) from;
@@ -370,12 +378,14 @@ void cp_to_buf(const int type, void *to, const void *from, int len)
370 iob(); 378 iob();
371} 379}
372 380
373void cp_from_buf(const int type, void *to, const void *from, int len) 381static void cp_from_buf(const int type, void *to, const void *from, int len)
374{ 382{
375 unsigned short *tp, *fp, clen; 383 unsigned short *tp, *fp, clen;
376 unsigned char *rtp, *rfp; 384 unsigned char *rtp, *rfp;
377 385
378 if (type == PMAX_LANCE) { 386 if (type == PMAD_LANCE) {
387 memcpy(to, from, len);
388 } else if (type == PMAX_LANCE) {
379 clen = len >> 1; 389 clen = len >> 1;
380 tp = (unsigned short *) to; 390 tp = (unsigned short *) to;
381 fp = (unsigned short *) from; 391 fp = (unsigned short *) from;
@@ -431,12 +441,10 @@ void cp_from_buf(const int type, void *to, const void *from, int len)
431static void lance_init_ring(struct net_device *dev) 441static void lance_init_ring(struct net_device *dev)
432{ 442{
433 struct lance_private *lp = netdev_priv(dev); 443 struct lance_private *lp = netdev_priv(dev);
434 volatile struct lance_init_block *ib; 444 volatile u16 *ib = (volatile u16 *)dev->mem_start;
435 int leptr; 445 uint leptr;
436 int i; 446 int i;
437 447
438 ib = (struct lance_init_block *) (dev->mem_start);
439
440 /* Lock out other processes while setting up hardware */ 448 /* Lock out other processes while setting up hardware */
441 netif_stop_queue(dev); 449 netif_stop_queue(dev);
442 lp->rx_new = lp->tx_new = 0; 450 lp->rx_new = lp->tx_new = 0;
@@ -445,55 +453,64 @@ static void lance_init_ring(struct net_device *dev)
445 /* Copy the ethernet address to the lance init block. 453 /* Copy the ethernet address to the lance init block.
446 * XXX bit 0 of the physical address registers has to be zero 454 * XXX bit 0 of the physical address registers has to be zero
447 */ 455 */
448 ib->phys_addr[0] = dev->dev_addr[0]; 456 *lib_ptr(ib, phys_addr[0], lp->type) = (dev->dev_addr[1] << 8) |
449 ib->phys_addr[1] = dev->dev_addr[1]; 457 dev->dev_addr[0];
450 ib->phys_addr[4] = dev->dev_addr[2]; 458 *lib_ptr(ib, phys_addr[1], lp->type) = (dev->dev_addr[3] << 8) |
451 ib->phys_addr[5] = dev->dev_addr[3]; 459 dev->dev_addr[2];
452 ib->phys_addr[8] = dev->dev_addr[4]; 460 *lib_ptr(ib, phys_addr[2], lp->type) = (dev->dev_addr[5] << 8) |
453 ib->phys_addr[9] = dev->dev_addr[5]; 461 dev->dev_addr[4];
454 /* Setup the initialization block */ 462 /* Setup the initialization block */
455 463
456 /* Setup rx descriptor pointer */ 464 /* Setup rx descriptor pointer */
457 leptr = LANCE_ADDR(libdesc_offset(brx_ring, 0)); 465 leptr = offsetof(struct lance_init_block, brx_ring);
458 ib->rx_len = (LANCE_LOG_RX_BUFFERS << 13) | (leptr >> 16); 466 *lib_ptr(ib, rx_len, lp->type) = (LANCE_LOG_RX_BUFFERS << 13) |
459 ib->rx_ptr = leptr; 467 (leptr >> 16);
468 *lib_ptr(ib, rx_ptr, lp->type) = leptr;
460 if (ZERO) 469 if (ZERO)
461 printk("RX ptr: %8.8x(%8.8x)\n", leptr, libdesc_offset(brx_ring, 0)); 470 printk("RX ptr: %8.8x(%8.8x)\n",
471 leptr, lib_off(brx_ring, lp->type));
462 472
463 /* Setup tx descriptor pointer */ 473 /* Setup tx descriptor pointer */
464 leptr = LANCE_ADDR(libdesc_offset(btx_ring, 0)); 474 leptr = offsetof(struct lance_init_block, btx_ring);
465 ib->tx_len = (LANCE_LOG_TX_BUFFERS << 13) | (leptr >> 16); 475 *lib_ptr(ib, tx_len, lp->type) = (LANCE_LOG_TX_BUFFERS << 13) |
466 ib->tx_ptr = leptr; 476 (leptr >> 16);
477 *lib_ptr(ib, tx_ptr, lp->type) = leptr;
467 if (ZERO) 478 if (ZERO)
468 printk("TX ptr: %8.8x(%8.8x)\n", leptr, libdesc_offset(btx_ring, 0)); 479 printk("TX ptr: %8.8x(%8.8x)\n",
480 leptr, lib_off(btx_ring, lp->type));
469 481
470 if (ZERO) 482 if (ZERO)
471 printk("TX rings:\n"); 483 printk("TX rings:\n");
472 484
473 /* Setup the Tx ring entries */ 485 /* Setup the Tx ring entries */
474 for (i = 0; i < TX_RING_SIZE; i++) { 486 for (i = 0; i < TX_RING_SIZE; i++) {
475 leptr = (int) lp->tx_buf_ptr_lnc[i]; 487 leptr = lp->tx_buf_ptr_lnc[i];
476 ib->btx_ring[i].tmd0 = leptr; 488 *lib_ptr(ib, btx_ring[i].tmd0, lp->type) = leptr;
477 ib->btx_ring[i].tmd1_hadr = leptr >> 16; 489 *lib_ptr(ib, btx_ring[i].tmd1, lp->type) = (leptr >> 16) &
478 ib->btx_ring[i].tmd1_bits = 0; 490 0xff;
479 ib->btx_ring[i].length = 0xf000; /* The ones required by tmd2 */ 491 *lib_ptr(ib, btx_ring[i].length, lp->type) = 0xf000;
480 ib->btx_ring[i].misc = 0; 492 /* The ones required by tmd2 */
493 *lib_ptr(ib, btx_ring[i].misc, lp->type) = 0;
481 if (i < 3 && ZERO) 494 if (i < 3 && ZERO)
482 printk("%d: 0x%8.8x(0x%8.8x)\n", i, leptr, (int) lp->tx_buf_ptr_cpu[i]); 495 printk("%d: 0x%8.8x(0x%8.8x)\n",
496 i, leptr, (uint)lp->tx_buf_ptr_cpu[i]);
483 } 497 }
484 498
485 /* Setup the Rx ring entries */ 499 /* Setup the Rx ring entries */
486 if (ZERO) 500 if (ZERO)
487 printk("RX rings:\n"); 501 printk("RX rings:\n");
488 for (i = 0; i < RX_RING_SIZE; i++) { 502 for (i = 0; i < RX_RING_SIZE; i++) {
489 leptr = (int) lp->rx_buf_ptr_lnc[i]; 503 leptr = lp->rx_buf_ptr_lnc[i];
490 ib->brx_ring[i].rmd0 = leptr; 504 *lib_ptr(ib, brx_ring[i].rmd0, lp->type) = leptr;
491 ib->brx_ring[i].rmd1_hadr = leptr >> 16; 505 *lib_ptr(ib, brx_ring[i].rmd1, lp->type) = ((leptr >> 16) &
492 ib->brx_ring[i].rmd1_bits = LE_R1_OWN; 506 0xff) |
493 ib->brx_ring[i].length = -RX_BUFF_SIZE | 0xf000; 507 LE_R1_OWN;
494 ib->brx_ring[i].mblength = 0; 508 *lib_ptr(ib, brx_ring[i].length, lp->type) = -RX_BUFF_SIZE |
509 0xf000;
510 *lib_ptr(ib, brx_ring[i].mblength, lp->type) = 0;
495 if (i < 3 && ZERO) 511 if (i < 3 && ZERO)
496 printk("%d: 0x%8.8x(0x%8.8x)\n", i, leptr, (int) lp->rx_buf_ptr_cpu[i]); 512 printk("%d: 0x%8.8x(0x%8.8x)\n",
513 i, leptr, (uint)lp->rx_buf_ptr_cpu[i]);
497 } 514 }
498 iob(); 515 iob();
499} 516}
@@ -511,11 +528,13 @@ static int init_restart_lance(struct lance_private *lp)
511 udelay(10); 528 udelay(10);
512 } 529 }
513 if ((i == 100) || (ll->rdp & LE_C0_ERR)) { 530 if ((i == 100) || (ll->rdp & LE_C0_ERR)) {
514 printk("LANCE unopened after %d ticks, csr0=%4.4x.\n", i, ll->rdp); 531 printk("LANCE unopened after %d ticks, csr0=%4.4x.\n",
532 i, ll->rdp);
515 return -1; 533 return -1;
516 } 534 }
517 if ((ll->rdp & LE_C0_ERR)) { 535 if ((ll->rdp & LE_C0_ERR)) {
518 printk("LANCE unopened after %d ticks, csr0=%4.4x.\n", i, ll->rdp); 536 printk("LANCE unopened after %d ticks, csr0=%4.4x.\n",
537 i, ll->rdp);
519 return -1; 538 return -1;
520 } 539 }
521 writereg(&ll->rdp, LE_C0_IDON); 540 writereg(&ll->rdp, LE_C0_IDON);
@@ -528,12 +547,11 @@ static int init_restart_lance(struct lance_private *lp)
528static int lance_rx(struct net_device *dev) 547static int lance_rx(struct net_device *dev)
529{ 548{
530 struct lance_private *lp = netdev_priv(dev); 549 struct lance_private *lp = netdev_priv(dev);
531 volatile struct lance_init_block *ib; 550 volatile u16 *ib = (volatile u16 *)dev->mem_start;
532 volatile struct lance_rx_desc *rd = 0; 551 volatile u16 *rd;
533 unsigned char bits; 552 unsigned short bits;
534 int len = 0; 553 int entry, len;
535 struct sk_buff *skb = 0; 554 struct sk_buff *skb;
536 ib = (struct lance_init_block *) (dev->mem_start);
537 555
538#ifdef TEST_HITS 556#ifdef TEST_HITS
539 { 557 {
@@ -542,19 +560,22 @@ static int lance_rx(struct net_device *dev)
542 printk("["); 560 printk("[");
543 for (i = 0; i < RX_RING_SIZE; i++) { 561 for (i = 0; i < RX_RING_SIZE; i++) {
544 if (i == lp->rx_new) 562 if (i == lp->rx_new)
545 printk("%s", ib->brx_ring[i].rmd1_bits & 563 printk("%s", *lib_ptr(ib, brx_ring[i].rmd1,
564 lp->type) &
546 LE_R1_OWN ? "_" : "X"); 565 LE_R1_OWN ? "_" : "X");
547 else 566 else
548 printk("%s", ib->brx_ring[i].rmd1_bits & 567 printk("%s", *lib_ptr(ib, brx_ring[i].rmd1,
568 lp->type) &
549 LE_R1_OWN ? "." : "1"); 569 LE_R1_OWN ? "." : "1");
550 } 570 }
551 printk("]"); 571 printk("]");
552 } 572 }
553#endif 573#endif
554 574
555 for (rd = &ib->brx_ring[lp->rx_new]; 575 for (rd = lib_ptr(ib, brx_ring[lp->rx_new], lp->type);
556 !((bits = rd->rmd1_bits) & LE_R1_OWN); 576 !((bits = *rds_ptr(rd, rmd1, lp->type)) & LE_R1_OWN);
557 rd = &ib->brx_ring[lp->rx_new]) { 577 rd = lib_ptr(ib, brx_ring[lp->rx_new], lp->type)) {
578 entry = lp->rx_new;
558 579
559 /* We got an incomplete frame? */ 580 /* We got an incomplete frame? */
560 if ((bits & LE_R1_POK) != LE_R1_POK) { 581 if ((bits & LE_R1_POK) != LE_R1_POK) {
@@ -575,16 +596,18 @@ static int lance_rx(struct net_device *dev)
575 if (bits & LE_R1_EOP) 596 if (bits & LE_R1_EOP)
576 lp->stats.rx_errors++; 597 lp->stats.rx_errors++;
577 } else { 598 } else {
578 len = (rd->mblength & 0xfff) - 4; 599 len = (*rds_ptr(rd, mblength, lp->type) & 0xfff) - 4;
579 skb = dev_alloc_skb(len + 2); 600 skb = dev_alloc_skb(len + 2);
580 601
581 if (skb == 0) { 602 if (skb == 0) {
582 printk("%s: Memory squeeze, deferring packet.\n", 603 printk("%s: Memory squeeze, deferring packet.\n",
583 dev->name); 604 dev->name);
584 lp->stats.rx_dropped++; 605 lp->stats.rx_dropped++;
585 rd->mblength = 0; 606 *rds_ptr(rd, mblength, lp->type) = 0;
586 rd->rmd1_bits = LE_R1_OWN; 607 *rds_ptr(rd, rmd1, lp->type) =
587 lp->rx_new = (lp->rx_new + 1) & RX_RING_MOD_MASK; 608 ((lp->rx_buf_ptr_lnc[entry] >> 16) &
609 0xff) | LE_R1_OWN;
610 lp->rx_new = (entry + 1) & RX_RING_MOD_MASK;
588 return 0; 611 return 0;
589 } 612 }
590 lp->stats.rx_bytes += len; 613 lp->stats.rx_bytes += len;
@@ -594,8 +617,7 @@ static int lance_rx(struct net_device *dev)
594 skb_put(skb, len); /* make room */ 617 skb_put(skb, len); /* make room */
595 618
596 cp_from_buf(lp->type, skb->data, 619 cp_from_buf(lp->type, skb->data,
597 (char *)lp->rx_buf_ptr_cpu[lp->rx_new], 620 (char *)lp->rx_buf_ptr_cpu[entry], len);
598 len);
599 621
600 skb->protocol = eth_type_trans(skb, dev); 622 skb->protocol = eth_type_trans(skb, dev);
601 netif_rx(skb); 623 netif_rx(skb);
@@ -604,10 +626,11 @@ static int lance_rx(struct net_device *dev)
604 } 626 }
605 627
606 /* Return the packet to the pool */ 628 /* Return the packet to the pool */
607 rd->mblength = 0; 629 *rds_ptr(rd, mblength, lp->type) = 0;
608 rd->length = -RX_BUFF_SIZE | 0xf000; 630 *rds_ptr(rd, length, lp->type) = -RX_BUFF_SIZE | 0xf000;
609 rd->rmd1_bits = LE_R1_OWN; 631 *rds_ptr(rd, rmd1, lp->type) =
610 lp->rx_new = (lp->rx_new + 1) & RX_RING_MOD_MASK; 632 ((lp->rx_buf_ptr_lnc[entry] >> 16) & 0xff) | LE_R1_OWN;
633 lp->rx_new = (entry + 1) & RX_RING_MOD_MASK;
611 } 634 }
612 return 0; 635 return 0;
613} 636}
@@ -615,24 +638,24 @@ static int lance_rx(struct net_device *dev)
615static void lance_tx(struct net_device *dev) 638static void lance_tx(struct net_device *dev)
616{ 639{
617 struct lance_private *lp = netdev_priv(dev); 640 struct lance_private *lp = netdev_priv(dev);
618 volatile struct lance_init_block *ib; 641 volatile u16 *ib = (volatile u16 *)dev->mem_start;
619 volatile struct lance_regs *ll = lp->ll; 642 volatile struct lance_regs *ll = lp->ll;
620 volatile struct lance_tx_desc *td; 643 volatile u16 *td;
621 int i, j; 644 int i, j;
622 int status; 645 int status;
623 ib = (struct lance_init_block *) (dev->mem_start); 646
624 j = lp->tx_old; 647 j = lp->tx_old;
625 648
626 spin_lock(&lp->lock); 649 spin_lock(&lp->lock);
627 650
628 for (i = j; i != lp->tx_new; i = j) { 651 for (i = j; i != lp->tx_new; i = j) {
629 td = &ib->btx_ring[i]; 652 td = lib_ptr(ib, btx_ring[i], lp->type);
630 /* If we hit a packet not owned by us, stop */ 653 /* If we hit a packet not owned by us, stop */
631 if (td->tmd1_bits & LE_T1_OWN) 654 if (*tds_ptr(td, tmd1, lp->type) & LE_T1_OWN)
632 break; 655 break;
633 656
634 if (td->tmd1_bits & LE_T1_ERR) { 657 if (*tds_ptr(td, tmd1, lp->type) & LE_T1_ERR) {
635 status = td->misc; 658 status = *tds_ptr(td, misc, lp->type);
636 659
637 lp->stats.tx_errors++; 660 lp->stats.tx_errors++;
638 if (status & LE_T3_RTY) 661 if (status & LE_T3_RTY)
@@ -667,18 +690,19 @@ static void lance_tx(struct net_device *dev)
667 init_restart_lance(lp); 690 init_restart_lance(lp);
668 goto out; 691 goto out;
669 } 692 }
670 } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) { 693 } else if ((*tds_ptr(td, tmd1, lp->type) & LE_T1_POK) ==
694 LE_T1_POK) {
671 /* 695 /*
672 * So we don't count the packet more than once. 696 * So we don't count the packet more than once.
673 */ 697 */
674 td->tmd1_bits &= ~(LE_T1_POK); 698 *tds_ptr(td, tmd1, lp->type) &= ~(LE_T1_POK);
675 699
676 /* One collision before packet was sent. */ 700 /* One collision before packet was sent. */
677 if (td->tmd1_bits & LE_T1_EONE) 701 if (*tds_ptr(td, tmd1, lp->type) & LE_T1_EONE)
678 lp->stats.collisions++; 702 lp->stats.collisions++;
679 703
680 /* More than one collision, be optimistic. */ 704 /* More than one collision, be optimistic. */
681 if (td->tmd1_bits & LE_T1_EMORE) 705 if (*tds_ptr(td, tmd1, lp->type) & LE_T1_EMORE)
682 lp->stats.collisions += 2; 706 lp->stats.collisions += 2;
683 707
684 lp->stats.tx_packets++; 708 lp->stats.tx_packets++;
@@ -752,7 +776,7 @@ struct net_device *last_dev = 0;
752 776
753static int lance_open(struct net_device *dev) 777static int lance_open(struct net_device *dev)
754{ 778{
755 volatile struct lance_init_block *ib = (struct lance_init_block *) (dev->mem_start); 779 volatile u16 *ib = (volatile u16 *)dev->mem_start;
756 struct lance_private *lp = netdev_priv(dev); 780 struct lance_private *lp = netdev_priv(dev);
757 volatile struct lance_regs *ll = lp->ll; 781 volatile struct lance_regs *ll = lp->ll;
758 int status = 0; 782 int status = 0;
@@ -769,11 +793,11 @@ static int lance_open(struct net_device *dev)
769 * 793 *
770 * BTW it is common bug in all lance drivers! --ANK 794 * BTW it is common bug in all lance drivers! --ANK
771 */ 795 */
772 ib->mode = 0; 796 *lib_ptr(ib, mode, lp->type) = 0;
773 ib->filter [0] = 0; 797 *lib_ptr(ib, filter[0], lp->type) = 0;
774 ib->filter [2] = 0; 798 *lib_ptr(ib, filter[1], lp->type) = 0;
775 ib->filter [4] = 0; 799 *lib_ptr(ib, filter[2], lp->type) = 0;
776 ib->filter [6] = 0; 800 *lib_ptr(ib, filter[3], lp->type) = 0;
777 801
778 lance_init_ring(dev); 802 lance_init_ring(dev);
779 load_csrs(lp); 803 load_csrs(lp);
@@ -874,12 +898,10 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
874{ 898{
875 struct lance_private *lp = netdev_priv(dev); 899 struct lance_private *lp = netdev_priv(dev);
876 volatile struct lance_regs *ll = lp->ll; 900 volatile struct lance_regs *ll = lp->ll;
877 volatile struct lance_init_block *ib = (struct lance_init_block *) (dev->mem_start); 901 volatile u16 *ib = (volatile u16 *)dev->mem_start;
878 int entry, skblen, len; 902 int entry, len;
879
880 skblen = skb->len;
881 903
882 len = skblen; 904 len = skb->len;
883 905
884 if (len < ETH_ZLEN) { 906 if (len < ETH_ZLEN) {
885 if (skb_padto(skb, ETH_ZLEN)) 907 if (skb_padto(skb, ETH_ZLEN))
@@ -889,23 +911,17 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
889 911
890 lp->stats.tx_bytes += len; 912 lp->stats.tx_bytes += len;
891 913
892 entry = lp->tx_new & TX_RING_MOD_MASK; 914 entry = lp->tx_new;
893 ib->btx_ring[entry].length = (-len); 915 *lib_ptr(ib, btx_ring[entry].length, lp->type) = (-len);
894 ib->btx_ring[entry].misc = 0; 916 *lib_ptr(ib, btx_ring[entry].misc, lp->type) = 0;
895 917
896 cp_to_buf(lp->type, (char *)lp->tx_buf_ptr_cpu[entry], skb->data, 918 cp_to_buf(lp->type, (char *)lp->tx_buf_ptr_cpu[entry], skb->data, len);
897 skblen);
898
899 /* Clear the slack of the packet, do I need this? */
900 /* For a firewall it's a good idea - AC */
901/*
902 if (len != skblen)
903 memset ((char *) &ib->tx_buf [entry][skblen], 0, (len - skblen) << 1);
904 */
905 919
906 /* Now, give the packet to the lance */ 920 /* Now, give the packet to the lance */
907 ib->btx_ring[entry].tmd1_bits = (LE_T1_POK | LE_T1_OWN); 921 *lib_ptr(ib, btx_ring[entry].tmd1, lp->type) =
908 lp->tx_new = (lp->tx_new + 1) & TX_RING_MOD_MASK; 922 ((lp->tx_buf_ptr_lnc[entry] >> 16) & 0xff) |
923 (LE_T1_POK | LE_T1_OWN);
924 lp->tx_new = (entry + 1) & TX_RING_MOD_MASK;
909 925
910 if (TX_BUFFS_AVAIL <= 0) 926 if (TX_BUFFS_AVAIL <= 0)
911 netif_stop_queue(dev); 927 netif_stop_queue(dev);
@@ -930,8 +946,8 @@ static struct net_device_stats *lance_get_stats(struct net_device *dev)
930 946
931static void lance_load_multicast(struct net_device *dev) 947static void lance_load_multicast(struct net_device *dev)
932{ 948{
933 volatile struct lance_init_block *ib = (struct lance_init_block *) (dev->mem_start); 949 struct lance_private *lp = netdev_priv(dev);
934 volatile u16 *mcast_table = (u16 *) & ib->filter; 950 volatile u16 *ib = (volatile u16 *)dev->mem_start;
935 struct dev_mc_list *dmi = dev->mc_list; 951 struct dev_mc_list *dmi = dev->mc_list;
936 char *addrs; 952 char *addrs;
937 int i; 953 int i;
@@ -939,17 +955,17 @@ static void lance_load_multicast(struct net_device *dev)
939 955
940 /* set all multicast bits */ 956 /* set all multicast bits */
941 if (dev->flags & IFF_ALLMULTI) { 957 if (dev->flags & IFF_ALLMULTI) {
942 ib->filter[0] = 0xffff; 958 *lib_ptr(ib, filter[0], lp->type) = 0xffff;
943 ib->filter[2] = 0xffff; 959 *lib_ptr(ib, filter[1], lp->type) = 0xffff;
944 ib->filter[4] = 0xffff; 960 *lib_ptr(ib, filter[2], lp->type) = 0xffff;
945 ib->filter[6] = 0xffff; 961 *lib_ptr(ib, filter[3], lp->type) = 0xffff;
946 return; 962 return;
947 } 963 }
948 /* clear the multicast filter */ 964 /* clear the multicast filter */
949 ib->filter[0] = 0; 965 *lib_ptr(ib, filter[0], lp->type) = 0;
950 ib->filter[2] = 0; 966 *lib_ptr(ib, filter[1], lp->type) = 0;
951 ib->filter[4] = 0; 967 *lib_ptr(ib, filter[2], lp->type) = 0;
952 ib->filter[6] = 0; 968 *lib_ptr(ib, filter[3], lp->type) = 0;
953 969
954 /* Add addresses */ 970 /* Add addresses */
955 for (i = 0; i < dev->mc_count; i++) { 971 for (i = 0; i < dev->mc_count; i++) {
@@ -962,7 +978,7 @@ static void lance_load_multicast(struct net_device *dev)
962 978
963 crc = ether_crc_le(ETH_ALEN, addrs); 979 crc = ether_crc_le(ETH_ALEN, addrs);
964 crc = crc >> 26; 980 crc = crc >> 26;
965 mcast_table[2 * (crc >> 4)] |= 1 << (crc & 0xf); 981 *lib_ptr(ib, filter[crc >> 4], lp->type) |= 1 << (crc & 0xf);
966 } 982 }
967 return; 983 return;
968} 984}
@@ -970,11 +986,9 @@ static void lance_load_multicast(struct net_device *dev)
970static void lance_set_multicast(struct net_device *dev) 986static void lance_set_multicast(struct net_device *dev)
971{ 987{
972 struct lance_private *lp = netdev_priv(dev); 988 struct lance_private *lp = netdev_priv(dev);
973 volatile struct lance_init_block *ib; 989 volatile u16 *ib = (volatile u16 *)dev->mem_start;
974 volatile struct lance_regs *ll = lp->ll; 990 volatile struct lance_regs *ll = lp->ll;
975 991
976 ib = (struct lance_init_block *) (dev->mem_start);
977
978 if (!netif_running(dev)) 992 if (!netif_running(dev))
979 return; 993 return;
980 994
@@ -992,9 +1006,9 @@ static void lance_set_multicast(struct net_device *dev)
992 lance_init_ring(dev); 1006 lance_init_ring(dev);
993 1007
994 if (dev->flags & IFF_PROMISC) { 1008 if (dev->flags & IFF_PROMISC) {
995 ib->mode |= LE_MO_PROM; 1009 *lib_ptr(ib, mode, lp->type) |= LE_MO_PROM;
996 } else { 1010 } else {
997 ib->mode &= ~LE_MO_PROM; 1011 *lib_ptr(ib, mode, lp->type) &= ~LE_MO_PROM;
998 lance_load_multicast(dev); 1012 lance_load_multicast(dev);
999 } 1013 }
1000 load_csrs(lp); 1014 load_csrs(lp);
@@ -1073,20 +1087,20 @@ static int __init dec_lance_init(const int type, const int slot)
1073 */ 1087 */
1074 for (i = 0; i < RX_RING_SIZE; i++) { 1088 for (i = 0; i < RX_RING_SIZE; i++) {
1075 lp->rx_buf_ptr_cpu[i] = 1089 lp->rx_buf_ptr_cpu[i] =
1076 (char *)(dev->mem_start + BUF_OFFSET_CPU + 1090 (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
1077 2 * i * RX_BUFF_SIZE); 1091 2 * i * RX_BUFF_SIZE);
1078 lp->rx_buf_ptr_lnc[i] = 1092 lp->rx_buf_ptr_lnc[i] =
1079 (char *)(BUF_OFFSET_LNC + i * RX_BUFF_SIZE); 1093 (BUF_OFFSET_LNC + i * RX_BUFF_SIZE);
1080 } 1094 }
1081 for (i = 0; i < TX_RING_SIZE; i++) { 1095 for (i = 0; i < TX_RING_SIZE; i++) {
1082 lp->tx_buf_ptr_cpu[i] = 1096 lp->tx_buf_ptr_cpu[i] =
1083 (char *)(dev->mem_start + BUF_OFFSET_CPU + 1097 (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
1084 2 * RX_RING_SIZE * RX_BUFF_SIZE + 1098 2 * RX_RING_SIZE * RX_BUFF_SIZE +
1085 2 * i * TX_BUFF_SIZE); 1099 2 * i * TX_BUFF_SIZE);
1086 lp->tx_buf_ptr_lnc[i] = 1100 lp->tx_buf_ptr_lnc[i] =
1087 (char *)(BUF_OFFSET_LNC + 1101 (BUF_OFFSET_LNC +
1088 RX_RING_SIZE * RX_BUFF_SIZE + 1102 RX_RING_SIZE * RX_BUFF_SIZE +
1089 i * TX_BUFF_SIZE); 1103 i * TX_BUFF_SIZE);
1090 } 1104 }
1091 1105
1092 /* Setup I/O ASIC LANCE DMA. */ 1106 /* Setup I/O ASIC LANCE DMA. */
@@ -1100,6 +1114,7 @@ static int __init dec_lance_init(const int type, const int slot)
1100 claim_tc_card(slot); 1114 claim_tc_card(slot);
1101 1115
1102 dev->mem_start = CKSEG1ADDR(get_tc_base_addr(slot)); 1116 dev->mem_start = CKSEG1ADDR(get_tc_base_addr(slot));
1117 dev->mem_end = dev->mem_start + 0x100000;
1103 dev->base_addr = dev->mem_start + 0x100000; 1118 dev->base_addr = dev->mem_start + 0x100000;
1104 dev->irq = get_tc_irq_nr(slot); 1119 dev->irq = get_tc_irq_nr(slot);
1105 esar_base = dev->mem_start + 0x1c0002; 1120 esar_base = dev->mem_start + 0x1c0002;
@@ -1110,7 +1125,7 @@ static int __init dec_lance_init(const int type, const int slot)
1110 (char *)(dev->mem_start + BUF_OFFSET_CPU + 1125 (char *)(dev->mem_start + BUF_OFFSET_CPU +
1111 i * RX_BUFF_SIZE); 1126 i * RX_BUFF_SIZE);
1112 lp->rx_buf_ptr_lnc[i] = 1127 lp->rx_buf_ptr_lnc[i] =
1113 (char *)(BUF_OFFSET_LNC + i * RX_BUFF_SIZE); 1128 (BUF_OFFSET_LNC + i * RX_BUFF_SIZE);
1114 } 1129 }
1115 for (i = 0; i < TX_RING_SIZE; i++) { 1130 for (i = 0; i < TX_RING_SIZE; i++) {
1116 lp->tx_buf_ptr_cpu[i] = 1131 lp->tx_buf_ptr_cpu[i] =
@@ -1118,9 +1133,9 @@ static int __init dec_lance_init(const int type, const int slot)
1118 RX_RING_SIZE * RX_BUFF_SIZE + 1133 RX_RING_SIZE * RX_BUFF_SIZE +
1119 i * TX_BUFF_SIZE); 1134 i * TX_BUFF_SIZE);
1120 lp->tx_buf_ptr_lnc[i] = 1135 lp->tx_buf_ptr_lnc[i] =
1121 (char *)(BUF_OFFSET_LNC + 1136 (BUF_OFFSET_LNC +
1122 RX_RING_SIZE * RX_BUFF_SIZE + 1137 RX_RING_SIZE * RX_BUFF_SIZE +
1123 i * TX_BUFF_SIZE); 1138 i * TX_BUFF_SIZE);
1124 } 1139 }
1125 1140
1126 break; 1141 break;
@@ -1130,6 +1145,7 @@ static int __init dec_lance_init(const int type, const int slot)
1130 dev->irq = dec_interrupt[DEC_IRQ_LANCE]; 1145 dev->irq = dec_interrupt[DEC_IRQ_LANCE];
1131 dev->base_addr = CKSEG1ADDR(KN01_SLOT_BASE + KN01_LANCE); 1146 dev->base_addr = CKSEG1ADDR(KN01_SLOT_BASE + KN01_LANCE);
1132 dev->mem_start = CKSEG1ADDR(KN01_SLOT_BASE + KN01_LANCE_MEM); 1147 dev->mem_start = CKSEG1ADDR(KN01_SLOT_BASE + KN01_LANCE_MEM);
1148 dev->mem_end = dev->mem_start + KN01_SLOT_SIZE;
1133 esar_base = CKSEG1ADDR(KN01_SLOT_BASE + KN01_ESAR + 1); 1149 esar_base = CKSEG1ADDR(KN01_SLOT_BASE + KN01_ESAR + 1);
1134 lp->dma_irq = -1; 1150 lp->dma_irq = -1;
1135 1151
@@ -1138,20 +1154,20 @@ static int __init dec_lance_init(const int type, const int slot)
1138 */ 1154 */
1139 for (i = 0; i < RX_RING_SIZE; i++) { 1155 for (i = 0; i < RX_RING_SIZE; i++) {
1140 lp->rx_buf_ptr_cpu[i] = 1156 lp->rx_buf_ptr_cpu[i] =
1141 (char *)(dev->mem_start + BUF_OFFSET_CPU + 1157 (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
1142 2 * i * RX_BUFF_SIZE); 1158 2 * i * RX_BUFF_SIZE);
1143 lp->rx_buf_ptr_lnc[i] = 1159 lp->rx_buf_ptr_lnc[i] =
1144 (char *)(BUF_OFFSET_LNC + i * RX_BUFF_SIZE); 1160 (BUF_OFFSET_LNC + i * RX_BUFF_SIZE);
1145 } 1161 }
1146 for (i = 0; i < TX_RING_SIZE; i++) { 1162 for (i = 0; i < TX_RING_SIZE; i++) {
1147 lp->tx_buf_ptr_cpu[i] = 1163 lp->tx_buf_ptr_cpu[i] =
1148 (char *)(dev->mem_start + BUF_OFFSET_CPU + 1164 (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
1149 2 * RX_RING_SIZE * RX_BUFF_SIZE + 1165 2 * RX_RING_SIZE * RX_BUFF_SIZE +
1150 2 * i * TX_BUFF_SIZE); 1166 2 * i * TX_BUFF_SIZE);
1151 lp->tx_buf_ptr_lnc[i] = 1167 lp->tx_buf_ptr_lnc[i] =
1152 (char *)(BUF_OFFSET_LNC + 1168 (BUF_OFFSET_LNC +
1153 RX_RING_SIZE * RX_BUFF_SIZE + 1169 RX_RING_SIZE * RX_BUFF_SIZE +
1154 i * TX_BUFF_SIZE); 1170 i * TX_BUFF_SIZE);
1155 } 1171 }
1156 1172
1157 break; 1173 break;