aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJ.R. Mauro <jrm8005@gmail.com>2008-10-20 19:28:58 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2008-10-22 12:56:37 -0400
commitb243c4aaf8d470a99101521c9197ed5b38084793 (patch)
tree3da19a9ea14d31cd46eeb3095a00128a26232ec8
parent4460a860f728983f685cb23140c241c10dca0d32 (diff)
Staging: sxg: clean up C99 comments
Change C99 comments to C89 comments Some nested comments seem to have been missed and some blocks are redundantly commented, but at least most of the //'s are gone Signed-off by: J.R. Mauro <jrm8005@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r--drivers/staging/sxg/sxg.c1040
-rw-r--r--drivers/staging/sxg/sxghw.h120
2 files changed, 580 insertions, 580 deletions
diff --git a/drivers/staging/sxg/sxg.c b/drivers/staging/sxg/sxg.c
index d8772e5bf2d8..3b05acf35d8b 100644
--- a/drivers/staging/sxg/sxg.c
+++ b/drivers/staging/sxg/sxg.c
@@ -223,7 +223,7 @@ static void sxg_dbg_macaddrs(p_adapter_t adapter)
223 return; 223 return;
224} 224}
225 225
226// SXG Globals 226/* SXG Globals */
227static SXG_DRIVER SxgDriver; 227static SXG_DRIVER SxgDriver;
228 228
229#ifdef ATKDBG 229#ifdef ATKDBG
@@ -250,7 +250,7 @@ static bool sxg_download_microcode(p_adapter_t adapter, SXG_UCODE_SEL UcodeSel)
250 u32 ThisSectionSize; 250 u32 ThisSectionSize;
251 u32 *Instruction = NULL; 251 u32 *Instruction = NULL;
252 u32 BaseAddress, AddressOffset, Address; 252 u32 BaseAddress, AddressOffset, Address;
253// u32 Failure; 253/* u32 Failure; */
254 u32 ValueRead; 254 u32 ValueRead;
255 u32 i; 255 u32 i;
256 u32 numSections = 0; 256 u32 numSections = 0;
@@ -262,7 +262,7 @@ static bool sxg_download_microcode(p_adapter_t adapter, SXG_UCODE_SEL UcodeSel)
262 DBG_ERROR("sxg: %s ENTER\n", __func__); 262 DBG_ERROR("sxg: %s ENTER\n", __func__);
263 263
264 switch (UcodeSel) { 264 switch (UcodeSel) {
265 case SXG_UCODE_SAHARA: // Sahara operational ucode 265 case SXG_UCODE_SAHARA: /* Sahara operational ucode */
266 numSections = SNumSections; 266 numSections = SNumSections;
267 for (i = 0; i < numSections; i++) { 267 for (i = 0; i < numSections; i++) {
268 sectionSize[i] = SSectionSize[i]; 268 sectionSize[i] = SSectionSize[i];
@@ -276,13 +276,13 @@ static bool sxg_download_microcode(p_adapter_t adapter, SXG_UCODE_SEL UcodeSel)
276 } 276 }
277 277
278 DBG_ERROR("sxg: RESET THE CARD\n"); 278 DBG_ERROR("sxg: RESET THE CARD\n");
279 // First, reset the card 279 /* First, reset the card */
280 WRITE_REG(HwRegs->Reset, 0xDEAD, FLUSH); 280 WRITE_REG(HwRegs->Reset, 0xDEAD, FLUSH);
281 281
282 // Download each section of the microcode as specified in 282 /* Download each section of the microcode as specified in */
283 // its download file. The *download.c file is generated using 283 /* its download file. The *download.c file is generated using */
284 // the saharaobjtoc facility which converts the metastep .obj 284 /* the saharaobjtoc facility which converts the metastep .obj */
285 // file to a .c file which contains a two dimentional array. 285 /* file to a .c file which contains a two dimentional array. */
286 for (Section = 0; Section < numSections; Section++) { 286 for (Section = 0; Section < numSections; Section++) {
287 DBG_ERROR("sxg: SECTION # %d\n", Section); 287 DBG_ERROR("sxg: SECTION # %d\n", Section);
288 switch (UcodeSel) { 288 switch (UcodeSel) {
@@ -294,35 +294,35 @@ static bool sxg_download_microcode(p_adapter_t adapter, SXG_UCODE_SEL UcodeSel)
294 break; 294 break;
295 } 295 }
296 BaseAddress = sectionStart[Section]; 296 BaseAddress = sectionStart[Section];
297 ThisSectionSize = sectionSize[Section] / 12; // Size in instructions 297 ThisSectionSize = sectionSize[Section] / 12; /* Size in instructions */
298 for (AddressOffset = 0; AddressOffset < ThisSectionSize; 298 for (AddressOffset = 0; AddressOffset < ThisSectionSize;
299 AddressOffset++) { 299 AddressOffset++) {
300 Address = BaseAddress + AddressOffset; 300 Address = BaseAddress + AddressOffset;
301 ASSERT((Address & ~MICROCODE_ADDRESS_MASK) == 0); 301 ASSERT((Address & ~MICROCODE_ADDRESS_MASK) == 0);
302 // Write instruction bits 31 - 0 302 /* Write instruction bits 31 - 0 */
303 WRITE_REG(HwRegs->UcodeDataLow, *Instruction, FLUSH); 303 WRITE_REG(HwRegs->UcodeDataLow, *Instruction, FLUSH);
304 // Write instruction bits 63-32 304 /* Write instruction bits 63-32 */
305 WRITE_REG(HwRegs->UcodeDataMiddle, *(Instruction + 1), 305 WRITE_REG(HwRegs->UcodeDataMiddle, *(Instruction + 1),
306 FLUSH); 306 FLUSH);
307 // Write instruction bits 95-64 307 /* Write instruction bits 95-64 */
308 WRITE_REG(HwRegs->UcodeDataHigh, *(Instruction + 2), 308 WRITE_REG(HwRegs->UcodeDataHigh, *(Instruction + 2),
309 FLUSH); 309 FLUSH);
310 // Write instruction address with the WRITE bit set 310 /* Write instruction address with the WRITE bit set */
311 WRITE_REG(HwRegs->UcodeAddr, 311 WRITE_REG(HwRegs->UcodeAddr,
312 (Address | MICROCODE_ADDRESS_WRITE), FLUSH); 312 (Address | MICROCODE_ADDRESS_WRITE), FLUSH);
313 // Sahara bug in the ucode download logic - the write to DataLow 313 /* Sahara bug in the ucode download logic - the write to DataLow */
314 // for the next instruction could get corrupted. To avoid this, 314 /* for the next instruction could get corrupted. To avoid this, */
315 // write to DataLow again for this instruction (which may get 315 /* write to DataLow again for this instruction (which may get */
316 // corrupted, but it doesn't matter), then increment the address 316 /* corrupted, but it doesn't matter), then increment the address */
317 // and write the data for the next instruction to DataLow. That 317 /* and write the data for the next instruction to DataLow. That */
318 // write should succeed. 318 /* write should succeed. */
319 WRITE_REG(HwRegs->UcodeDataLow, *Instruction, TRUE); 319 WRITE_REG(HwRegs->UcodeDataLow, *Instruction, TRUE);
320 // Advance 3 u32S to start of next instruction 320 /* Advance 3 u32S to start of next instruction */
321 Instruction += 3; 321 Instruction += 3;
322 } 322 }
323 } 323 }
324 // Now repeat the entire operation reading the instruction back and 324 /* Now repeat the entire operation reading the instruction back and */
325 // checking for parity errors 325 /* checking for parity errors */
326 for (Section = 0; Section < numSections; Section++) { 326 for (Section = 0; Section < numSections; Section++) {
327 DBG_ERROR("sxg: check SECTION # %d\n", Section); 327 DBG_ERROR("sxg: check SECTION # %d\n", Section);
328 switch (UcodeSel) { 328 switch (UcodeSel) {
@@ -334,51 +334,51 @@ static bool sxg_download_microcode(p_adapter_t adapter, SXG_UCODE_SEL UcodeSel)
334 break; 334 break;
335 } 335 }
336 BaseAddress = sectionStart[Section]; 336 BaseAddress = sectionStart[Section];
337 ThisSectionSize = sectionSize[Section] / 12; // Size in instructions 337 ThisSectionSize = sectionSize[Section] / 12; /* Size in instructions */
338 for (AddressOffset = 0; AddressOffset < ThisSectionSize; 338 for (AddressOffset = 0; AddressOffset < ThisSectionSize;
339 AddressOffset++) { 339 AddressOffset++) {
340 Address = BaseAddress + AddressOffset; 340 Address = BaseAddress + AddressOffset;
341 // Write the address with the READ bit set 341 /* Write the address with the READ bit set */
342 WRITE_REG(HwRegs->UcodeAddr, 342 WRITE_REG(HwRegs->UcodeAddr,
343 (Address | MICROCODE_ADDRESS_READ), FLUSH); 343 (Address | MICROCODE_ADDRESS_READ), FLUSH);
344 // Read it back and check parity bit. 344 /* Read it back and check parity bit. */
345 READ_REG(HwRegs->UcodeAddr, ValueRead); 345 READ_REG(HwRegs->UcodeAddr, ValueRead);
346 if (ValueRead & MICROCODE_ADDRESS_PARITY) { 346 if (ValueRead & MICROCODE_ADDRESS_PARITY) {
347 DBG_ERROR("sxg: %s PARITY ERROR\n", 347 DBG_ERROR("sxg: %s PARITY ERROR\n",
348 __func__); 348 __func__);
349 349
350 return (FALSE); // Parity error 350 return (FALSE); /* Parity error */
351 } 351 }
352 ASSERT((ValueRead & MICROCODE_ADDRESS_MASK) == Address); 352 ASSERT((ValueRead & MICROCODE_ADDRESS_MASK) == Address);
353 // Read the instruction back and compare 353 /* Read the instruction back and compare */
354 READ_REG(HwRegs->UcodeDataLow, ValueRead); 354 READ_REG(HwRegs->UcodeDataLow, ValueRead);
355 if (ValueRead != *Instruction) { 355 if (ValueRead != *Instruction) {
356 DBG_ERROR("sxg: %s MISCOMPARE LOW\n", 356 DBG_ERROR("sxg: %s MISCOMPARE LOW\n",
357 __func__); 357 __func__);
358 return (FALSE); // Miscompare 358 return (FALSE); /* Miscompare */
359 } 359 }
360 READ_REG(HwRegs->UcodeDataMiddle, ValueRead); 360 READ_REG(HwRegs->UcodeDataMiddle, ValueRead);
361 if (ValueRead != *(Instruction + 1)) { 361 if (ValueRead != *(Instruction + 1)) {
362 DBG_ERROR("sxg: %s MISCOMPARE MIDDLE\n", 362 DBG_ERROR("sxg: %s MISCOMPARE MIDDLE\n",
363 __func__); 363 __func__);
364 return (FALSE); // Miscompare 364 return (FALSE); /* Miscompare */
365 } 365 }
366 READ_REG(HwRegs->UcodeDataHigh, ValueRead); 366 READ_REG(HwRegs->UcodeDataHigh, ValueRead);
367 if (ValueRead != *(Instruction + 2)) { 367 if (ValueRead != *(Instruction + 2)) {
368 DBG_ERROR("sxg: %s MISCOMPARE HIGH\n", 368 DBG_ERROR("sxg: %s MISCOMPARE HIGH\n",
369 __func__); 369 __func__);
370 return (FALSE); // Miscompare 370 return (FALSE); /* Miscompare */
371 } 371 }
372 // Advance 3 u32S to start of next instruction 372 /* Advance 3 u32S to start of next instruction */
373 Instruction += 3; 373 Instruction += 3;
374 } 374 }
375 } 375 }
376 376
377 // Everything OK, Go. 377 /* Everything OK, Go. */
378 WRITE_REG(HwRegs->UcodeAddr, MICROCODE_ADDRESS_GO, FLUSH); 378 WRITE_REG(HwRegs->UcodeAddr, MICROCODE_ADDRESS_GO, FLUSH);
379 379
380 // Poll the CardUp register to wait for microcode to initialize 380 /* Poll the CardUp register to wait for microcode to initialize */
381 // Give up after 10,000 attemps (500ms). 381 /* Give up after 10,000 attemps (500ms). */
382 for (i = 0; i < 10000; i++) { 382 for (i = 0; i < 10000; i++) {
383 udelay(50); 383 udelay(50);
384 READ_REG(adapter->UcodeRegs[0].CardUp, ValueRead); 384 READ_REG(adapter->UcodeRegs[0].CardUp, ValueRead);
@@ -390,11 +390,11 @@ static bool sxg_download_microcode(p_adapter_t adapter, SXG_UCODE_SEL UcodeSel)
390 if (i == 10000) { 390 if (i == 10000) {
391 DBG_ERROR("sxg: %s TIMEOUT\n", __func__); 391 DBG_ERROR("sxg: %s TIMEOUT\n", __func__);
392 392
393 return (FALSE); // Timeout 393 return (FALSE); /* Timeout */
394 } 394 }
395 // Now write the LoadSync register. This is used to 395 /* Now write the LoadSync register. This is used to */
396 // synchronize with the card so it can scribble on the memory 396 /* synchronize with the card so it can scribble on the memory */
397 // that contained 0xCAFE from the "CardUp" step above 397 /* that contained 0xCAFE from the "CardUp" step above */
398 if (UcodeSel == SXG_UCODE_SAHARA) { 398 if (UcodeSel == SXG_UCODE_SAHARA) {
399 WRITE_REG(adapter->UcodeRegs[0].LoadSync, 0, FLUSH); 399 WRITE_REG(adapter->UcodeRegs[0].LoadSync, 0, FLUSH);
400 } 400 }
@@ -420,22 +420,22 @@ static int sxg_allocate_resources(p_adapter_t adapter)
420 int status; 420 int status;
421 u32 i; 421 u32 i;
422 u32 RssIds, IsrCount; 422 u32 RssIds, IsrCount;
423// PSXG_XMT_RING XmtRing; 423/* PSXG_XMT_RING XmtRing; */
424// PSXG_RCV_RING RcvRing; 424/* PSXG_RCV_RING RcvRing; */
425 425
426 DBG_ERROR("%s ENTER\n", __func__); 426 DBG_ERROR("%s ENTER\n", __func__);
427 427
428 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocRes", 428 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocRes",
429 adapter, 0, 0, 0); 429 adapter, 0, 0, 0);
430 430
431 // Windows tells us how many CPUs it plans to use for 431 /* Windows tells us how many CPUs it plans to use for */
432 // RSS 432 /* RSS */
433 RssIds = SXG_RSS_CPU_COUNT(adapter); 433 RssIds = SXG_RSS_CPU_COUNT(adapter);
434 IsrCount = adapter->MsiEnabled ? RssIds : 1; 434 IsrCount = adapter->MsiEnabled ? RssIds : 1;
435 435
436 DBG_ERROR("%s Setup the spinlocks\n", __func__); 436 DBG_ERROR("%s Setup the spinlocks\n", __func__);
437 437
438 // Allocate spinlocks and initialize listheads first. 438 /* Allocate spinlocks and initialize listheads first. */
439 spin_lock_init(&adapter->RcvQLock); 439 spin_lock_init(&adapter->RcvQLock);
440 spin_lock_init(&adapter->SglQLock); 440 spin_lock_init(&adapter->SglQLock);
441 spin_lock_init(&adapter->XmtZeroLock); 441 spin_lock_init(&adapter->XmtZeroLock);
@@ -450,21 +450,21 @@ static int sxg_allocate_resources(p_adapter_t adapter)
450 InitializeListHead(&adapter->FreeSglBuffers); 450 InitializeListHead(&adapter->FreeSglBuffers);
451 InitializeListHead(&adapter->AllSglBuffers); 451 InitializeListHead(&adapter->AllSglBuffers);
452 452
453 // Mark these basic allocations done. This flags essentially 453 /* Mark these basic allocations done. This flags essentially */
454 // tells the SxgFreeResources routine that it can grab spinlocks 454 /* tells the SxgFreeResources routine that it can grab spinlocks */
455 // and reference listheads. 455 /* and reference listheads. */
456 adapter->BasicAllocations = TRUE; 456 adapter->BasicAllocations = TRUE;
457 // Main allocation loop. Start with the maximum supported by 457 /* Main allocation loop. Start with the maximum supported by */
458 // the microcode and back off if memory allocation 458 /* the microcode and back off if memory allocation */
459 // fails. If we hit a minimum, fail. 459 /* fails. If we hit a minimum, fail. */
460 460
461 for (;;) { 461 for (;;) {
462 DBG_ERROR("%s Allocate XmtRings size[%lx]\n", __func__, 462 DBG_ERROR("%s Allocate XmtRings size[%lx]\n", __func__,
463 (sizeof(SXG_XMT_RING) * 1)); 463 (sizeof(SXG_XMT_RING) * 1));
464 464
465 // Start with big items first - receive and transmit rings. At the moment 465 /* Start with big items first - receive and transmit rings. At the moment */
466 // I'm going to keep the ring size fixed and adjust the number of 466 /* I'm going to keep the ring size fixed and adjust the number of */
467 // TCBs if we fail. Later we might consider reducing the ring size as well.. 467 /* TCBs if we fail. Later we might consider reducing the ring size as well.. */
468 adapter->XmtRings = pci_alloc_consistent(adapter->pcidev, 468 adapter->XmtRings = pci_alloc_consistent(adapter->pcidev,
469 sizeof(SXG_XMT_RING) * 469 sizeof(SXG_XMT_RING) *
470 1, 470 1,
@@ -490,7 +490,7 @@ static int sxg_allocate_resources(p_adapter_t adapter)
490 break; 490 break;
491 491
492 per_tcb_allocation_failed: 492 per_tcb_allocation_failed:
493 // an allocation failed. Free any successful allocations. 493 /* an allocation failed. Free any successful allocations. */
494 if (adapter->XmtRings) { 494 if (adapter->XmtRings) {
495 pci_free_consistent(adapter->pcidev, 495 pci_free_consistent(adapter->pcidev,
496 sizeof(SXG_XMT_RING) * 4096, 496 sizeof(SXG_XMT_RING) * 4096,
@@ -505,22 +505,22 @@ static int sxg_allocate_resources(p_adapter_t adapter)
505 adapter->PRcvRings); 505 adapter->PRcvRings);
506 adapter->RcvRings = NULL; 506 adapter->RcvRings = NULL;
507 } 507 }
508 // Loop around and try again.... 508 /* Loop around and try again.... */
509 } 509 }
510 510
511 DBG_ERROR("%s Initialize RCV ZERO and XMT ZERO rings\n", __func__); 511 DBG_ERROR("%s Initialize RCV ZERO and XMT ZERO rings\n", __func__);
512 // Initialize rcv zero and xmt zero rings 512 /* Initialize rcv zero and xmt zero rings */
513 SXG_INITIALIZE_RING(adapter->RcvRingZeroInfo, SXG_RCV_RING_SIZE); 513 SXG_INITIALIZE_RING(adapter->RcvRingZeroInfo, SXG_RCV_RING_SIZE);
514 SXG_INITIALIZE_RING(adapter->XmtRingZeroInfo, SXG_XMT_RING_SIZE); 514 SXG_INITIALIZE_RING(adapter->XmtRingZeroInfo, SXG_XMT_RING_SIZE);
515 515
516 // Sanity check receive data structure format 516 /* Sanity check receive data structure format */
517 ASSERT((adapter->ReceiveBufferSize == SXG_RCV_DATA_BUFFER_SIZE) || 517 ASSERT((adapter->ReceiveBufferSize == SXG_RCV_DATA_BUFFER_SIZE) ||
518 (adapter->ReceiveBufferSize == SXG_RCV_JUMBO_BUFFER_SIZE)); 518 (adapter->ReceiveBufferSize == SXG_RCV_JUMBO_BUFFER_SIZE));
519 ASSERT(sizeof(SXG_RCV_DESCRIPTOR_BLOCK) == 519 ASSERT(sizeof(SXG_RCV_DESCRIPTOR_BLOCK) ==
520 SXG_RCV_DESCRIPTOR_BLOCK_SIZE); 520 SXG_RCV_DESCRIPTOR_BLOCK_SIZE);
521 521
522 // Allocate receive data buffers. We allocate a block of buffers and 522 /* Allocate receive data buffers. We allocate a block of buffers and */
523 // a corresponding descriptor block at once. See sxghw.h:SXG_RCV_BLOCK 523 /* a corresponding descriptor block at once. See sxghw.h:SXG_RCV_BLOCK */
524 for (i = 0; i < SXG_INITIAL_RCV_DATA_BUFFERS; 524 for (i = 0; i < SXG_INITIAL_RCV_DATA_BUFFERS;
525 i += SXG_RCV_DESCRIPTORS_PER_BLOCK) { 525 i += SXG_RCV_DESCRIPTORS_PER_BLOCK) {
526 sxg_allocate_buffer_memory(adapter, 526 sxg_allocate_buffer_memory(adapter,
@@ -528,8 +528,8 @@ static int sxg_allocate_resources(p_adapter_t adapter)
528 ReceiveBufferSize), 528 ReceiveBufferSize),
529 SXG_BUFFER_TYPE_RCV); 529 SXG_BUFFER_TYPE_RCV);
530 } 530 }
531 // NBL resource allocation can fail in the 'AllocateComplete' routine, which 531 /* NBL resource allocation can fail in the 'AllocateComplete' routine, which */
532 // doesn't return status. Make sure we got the number of buffers we requested 532 /* doesn't return status. Make sure we got the number of buffers we requested */
533 if (adapter->FreeRcvBufferCount < SXG_INITIAL_RCV_DATA_BUFFERS) { 533 if (adapter->FreeRcvBufferCount < SXG_INITIAL_RCV_DATA_BUFFERS) {
534 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF6", 534 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF6",
535 adapter, adapter->FreeRcvBufferCount, SXG_MAX_ENTRIES, 535 adapter, adapter->FreeRcvBufferCount, SXG_MAX_ENTRIES,
@@ -540,14 +540,14 @@ static int sxg_allocate_resources(p_adapter_t adapter)
540 DBG_ERROR("%s Allocate EventRings size[%lx]\n", __func__, 540 DBG_ERROR("%s Allocate EventRings size[%lx]\n", __func__,
541 (sizeof(SXG_EVENT_RING) * RssIds)); 541 (sizeof(SXG_EVENT_RING) * RssIds));
542 542
543 // Allocate event queues. 543 /* Allocate event queues. */
544 adapter->EventRings = pci_alloc_consistent(adapter->pcidev, 544 adapter->EventRings = pci_alloc_consistent(adapter->pcidev,
545 sizeof(SXG_EVENT_RING) * 545 sizeof(SXG_EVENT_RING) *
546 RssIds, 546 RssIds,
547 &adapter->PEventRings); 547 &adapter->PEventRings);
548 548
549 if (!adapter->EventRings) { 549 if (!adapter->EventRings) {
550 // Caller will call SxgFreeAdapter to clean up above allocations 550 /* Caller will call SxgFreeAdapter to clean up above allocations */
551 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF8", 551 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF8",
552 adapter, SXG_MAX_ENTRIES, 0, 0); 552 adapter, SXG_MAX_ENTRIES, 0, 0);
553 status = STATUS_RESOURCES; 553 status = STATUS_RESOURCES;
@@ -556,11 +556,11 @@ static int sxg_allocate_resources(p_adapter_t adapter)
556 memset(adapter->EventRings, 0, sizeof(SXG_EVENT_RING) * RssIds); 556 memset(adapter->EventRings, 0, sizeof(SXG_EVENT_RING) * RssIds);
557 557
558 DBG_ERROR("%s Allocate ISR size[%x]\n", __func__, IsrCount); 558 DBG_ERROR("%s Allocate ISR size[%x]\n", __func__, IsrCount);
559 // Allocate ISR 559 /* Allocate ISR */
560 adapter->Isr = pci_alloc_consistent(adapter->pcidev, 560 adapter->Isr = pci_alloc_consistent(adapter->pcidev,
561 IsrCount, &adapter->PIsr); 561 IsrCount, &adapter->PIsr);
562 if (!adapter->Isr) { 562 if (!adapter->Isr) {
563 // Caller will call SxgFreeAdapter to clean up above allocations 563 /* Caller will call SxgFreeAdapter to clean up above allocations */
564 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF9", 564 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF9",
565 adapter, SXG_MAX_ENTRIES, 0, 0); 565 adapter, SXG_MAX_ENTRIES, 0, 0);
566 status = STATUS_RESOURCES; 566 status = STATUS_RESOURCES;
@@ -571,7 +571,7 @@ static int sxg_allocate_resources(p_adapter_t adapter)
571 DBG_ERROR("%s Allocate shared XMT ring zero index location size[%lx]\n", 571 DBG_ERROR("%s Allocate shared XMT ring zero index location size[%lx]\n",
572 __func__, sizeof(u32)); 572 __func__, sizeof(u32));
573 573
574 // Allocate shared XMT ring zero index location 574 /* Allocate shared XMT ring zero index location */
575 adapter->XmtRingZeroIndex = pci_alloc_consistent(adapter->pcidev, 575 adapter->XmtRingZeroIndex = pci_alloc_consistent(adapter->pcidev,
576 sizeof(u32), 576 sizeof(u32),
577 &adapter-> 577 &adapter->
@@ -607,13 +607,13 @@ static void sxg_config_pci(struct pci_dev *pcidev)
607 607
608 pci_read_config_word(pcidev, PCI_COMMAND, &pci_command); 608 pci_read_config_word(pcidev, PCI_COMMAND, &pci_command);
609 DBG_ERROR("sxg: %s PCI command[%4.4x]\n", __func__, pci_command); 609 DBG_ERROR("sxg: %s PCI command[%4.4x]\n", __func__, pci_command);
610 // Set the command register 610 /* Set the command register */
611 new_command = pci_command | (PCI_COMMAND_MEMORY | // Memory Space Enable 611 new_command = pci_command | (PCI_COMMAND_MEMORY | /* Memory Space Enable */
612 PCI_COMMAND_MASTER | // Bus master enable 612 PCI_COMMAND_MASTER | /* Bus master enable */
613 PCI_COMMAND_INVALIDATE | // Memory write and invalidate 613 PCI_COMMAND_INVALIDATE | /* Memory write and invalidate */
614 PCI_COMMAND_PARITY | // Parity error response 614 PCI_COMMAND_PARITY | /* Parity error response */
615 PCI_COMMAND_SERR | // System ERR 615 PCI_COMMAND_SERR | /* System ERR */
616 PCI_COMMAND_FAST_BACK); // Fast back-to-back 616 PCI_COMMAND_FAST_BACK); /* Fast back-to-back */
617 if (pci_command != new_command) { 617 if (pci_command != new_command) {
618 DBG_ERROR("%s -- Updating PCI COMMAND register %4.4x->%4.4x.\n", 618 DBG_ERROR("%s -- Updating PCI COMMAND register %4.4x->%4.4x.\n",
619 __func__, pci_command, new_command); 619 __func__, pci_command, new_command);
@@ -636,7 +636,7 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
636 DBG_ERROR("sxg: %s 2.6 VERSION ENTER jiffies[%lx] cpu %d\n", 636 DBG_ERROR("sxg: %s 2.6 VERSION ENTER jiffies[%lx] cpu %d\n",
637 __func__, jiffies, smp_processor_id()); 637 __func__, jiffies, smp_processor_id());
638 638
639 // Initialize trace buffer 639 /* Initialize trace buffer */
640#ifdef ATKDBG 640#ifdef ATKDBG
641 SxgTraceBuffer = &LSxgTraceBuffer; 641 SxgTraceBuffer = &LSxgTraceBuffer;
642 SXG_TRACE_INIT(SxgTraceBuffer, TRACE_NOISY); 642 SXG_TRACE_INIT(SxgTraceBuffer, TRACE_NOISY);
@@ -738,13 +738,13 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
738 adapter->UcodeRegs = (void *)memmapped_ioaddr; 738 adapter->UcodeRegs = (void *)memmapped_ioaddr;
739 739
740 adapter->State = SXG_STATE_INITIALIZING; 740 adapter->State = SXG_STATE_INITIALIZING;
741 // Maintain a list of all adapters anchored by 741 /* Maintain a list of all adapters anchored by */
742 // the global SxgDriver structure. 742 /* the global SxgDriver structure. */
743 adapter->Next = SxgDriver.Adapters; 743 adapter->Next = SxgDriver.Adapters;
744 SxgDriver.Adapters = adapter; 744 SxgDriver.Adapters = adapter;
745 adapter->AdapterID = ++SxgDriver.AdapterID; 745 adapter->AdapterID = ++SxgDriver.AdapterID;
746 746
747 // Initialize CRC table used to determine multicast hash 747 /* Initialize CRC table used to determine multicast hash */
748 sxg_mcast_init_crc32(); 748 sxg_mcast_init_crc32();
749 749
750 adapter->JumboEnabled = FALSE; 750 adapter->JumboEnabled = FALSE;
@@ -757,10 +757,10 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
757 adapter->ReceiveBufferSize = SXG_RCV_DATA_BUFFER_SIZE; 757 adapter->ReceiveBufferSize = SXG_RCV_DATA_BUFFER_SIZE;
758 } 758 }
759 759
760// status = SXG_READ_EEPROM(adapter); 760/* status = SXG_READ_EEPROM(adapter); */
761// if (!status) { 761/* if (!status) { */
762// goto sxg_init_bad; 762/* goto sxg_init_bad; */
763// } 763/* } */
764 764
765 DBG_ERROR("sxg: %s ENTER sxg_config_pci\n", __func__); 765 DBG_ERROR("sxg: %s ENTER sxg_config_pci\n", __func__);
766 sxg_config_pci(pcidev); 766 sxg_config_pci(pcidev);
@@ -780,11 +780,11 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
780 adapter->irq = pcidev->irq; 780 adapter->irq = pcidev->irq;
781 adapter->next_netdevice = head_netdevice; 781 adapter->next_netdevice = head_netdevice;
782 head_netdevice = netdev; 782 head_netdevice = netdev;
783// adapter->chipid = chip_idx; 783/* adapter->chipid = chip_idx; */
784 adapter->port = 0; //adapter->functionnumber; 784 adapter->port = 0; /*adapter->functionnumber; */
785 adapter->cardindex = adapter->port; 785 adapter->cardindex = adapter->port;
786 786
787 // Allocate memory and other resources 787 /* Allocate memory and other resources */
788 DBG_ERROR("sxg: %s ENTER sxg_allocate_resources\n", __func__); 788 DBG_ERROR("sxg: %s ENTER sxg_allocate_resources\n", __func__);
789 status = sxg_allocate_resources(adapter); 789 status = sxg_allocate_resources(adapter);
790 DBG_ERROR("sxg: %s EXIT sxg_allocate_resources status %x\n", 790 DBG_ERROR("sxg: %s EXIT sxg_allocate_resources status %x\n",
@@ -819,7 +819,7 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
819#endif 819#endif
820 820
821 strcpy(netdev->name, "eth%d"); 821 strcpy(netdev->name, "eth%d");
822// strcpy(netdev->name, pci_name(pcidev)); 822/* strcpy(netdev->name, pci_name(pcidev)); */
823 if ((err = register_netdev(netdev))) { 823 if ((err = register_netdev(netdev))) {
824 DBG_ERROR("Cannot register net device, aborting. %s\n", 824 DBG_ERROR("Cannot register net device, aborting. %s\n",
825 netdev->name); 825 netdev->name);
@@ -832,9 +832,9 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
832 netdev->dev_addr[1], netdev->dev_addr[2], netdev->dev_addr[3], 832 netdev->dev_addr[1], netdev->dev_addr[2], netdev->dev_addr[3],
833 netdev->dev_addr[4], netdev->dev_addr[5]); 833 netdev->dev_addr[4], netdev->dev_addr[5]);
834 834
835//sxg_init_bad: 835/*sxg_init_bad: */
836 ASSERT(status == FALSE); 836 ASSERT(status == FALSE);
837// sxg_free_adapter(adapter); 837/* sxg_free_adapter(adapter); */
838 838
839 DBG_ERROR("sxg: %s EXIT status[%x] jiffies[%lx] cpu %d\n", __func__, 839 DBG_ERROR("sxg: %s EXIT status[%x] jiffies[%lx] cpu %d\n", __func__,
840 status, jiffies, smp_processor_id()); 840 status, jiffies, smp_processor_id());
@@ -874,12 +874,12 @@ static void sxg_disable_interrupt(p_adapter_t adapter)
874{ 874{
875 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DisIntr", 875 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DisIntr",
876 adapter, adapter->InterruptsEnabled, 0, 0); 876 adapter, adapter->InterruptsEnabled, 0, 0);
877 // For now, RSS is disabled with line based interrupts 877 /* For now, RSS is disabled with line based interrupts */
878 ASSERT(adapter->RssEnabled == FALSE); 878 ASSERT(adapter->RssEnabled == FALSE);
879 ASSERT(adapter->MsiEnabled == FALSE); 879 ASSERT(adapter->MsiEnabled == FALSE);
880 // 880 /* */
881 // Turn off interrupts by writing to the icr register. 881 /* Turn off interrupts by writing to the icr register. */
882 // 882 /* */
883 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_DISABLE), TRUE); 883 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_DISABLE), TRUE);
884 884
885 adapter->InterruptsEnabled = 0; 885 adapter->InterruptsEnabled = 0;
@@ -905,12 +905,12 @@ static void sxg_enable_interrupt(p_adapter_t adapter)
905{ 905{
906 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "EnIntr", 906 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "EnIntr",
907 adapter, adapter->InterruptsEnabled, 0, 0); 907 adapter, adapter->InterruptsEnabled, 0, 0);
908 // For now, RSS is disabled with line based interrupts 908 /* For now, RSS is disabled with line based interrupts */
909 ASSERT(adapter->RssEnabled == FALSE); 909 ASSERT(adapter->RssEnabled == FALSE);
910 ASSERT(adapter->MsiEnabled == FALSE); 910 ASSERT(adapter->MsiEnabled == FALSE);
911 // 911 /* */
912 // Turn on interrupts by writing to the icr register. 912 /* Turn on interrupts by writing to the icr register. */
913 // 913 /* */
914 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_ENABLE), TRUE); 914 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_ENABLE), TRUE);
915 915
916 adapter->InterruptsEnabled = 1; 916 adapter->InterruptsEnabled = 1;
@@ -935,29 +935,29 @@ static irqreturn_t sxg_isr(int irq, void *dev_id)
935{ 935{
936 p_net_device dev = (p_net_device) dev_id; 936 p_net_device dev = (p_net_device) dev_id;
937 p_adapter_t adapter = (p_adapter_t) netdev_priv(dev); 937 p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
938// u32 CpuMask = 0, i; 938/* u32 CpuMask = 0, i; */
939 939
940 adapter->Stats.NumInts++; 940 adapter->Stats.NumInts++;
941 if (adapter->Isr[0] == 0) { 941 if (adapter->Isr[0] == 0) {
942 // The SLIC driver used to experience a number of spurious interrupts 942 /* The SLIC driver used to experience a number of spurious interrupts */
943 // due to the delay associated with the masking of the interrupt 943 /* due to the delay associated with the masking of the interrupt */
944 // (we'd bounce back in here). If we see that again with Sahara, 944 /* (we'd bounce back in here). If we see that again with Sahara, */
945 // add a READ_REG of the Icr register after the WRITE_REG below. 945 /* add a READ_REG of the Icr register after the WRITE_REG below. */
946 adapter->Stats.FalseInts++; 946 adapter->Stats.FalseInts++;
947 return IRQ_NONE; 947 return IRQ_NONE;
948 } 948 }
949 // 949 /* */
950 // Move the Isr contents and clear the value in 950 /* Move the Isr contents and clear the value in */
951 // shared memory, and mask interrupts 951 /* shared memory, and mask interrupts */
952 // 952 /* */
953 adapter->IsrCopy[0] = adapter->Isr[0]; 953 adapter->IsrCopy[0] = adapter->Isr[0];
954 adapter->Isr[0] = 0; 954 adapter->Isr[0] = 0;
955 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_MASK), TRUE); 955 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_MASK), TRUE);
956// ASSERT(adapter->IsrDpcsPending == 0); 956/* ASSERT(adapter->IsrDpcsPending == 0); */
957#if XXXTODO // RSS Stuff 957#if XXXTODO /* RSS Stuff */
958 // If RSS is enabled and the ISR specifies 958 /* If RSS is enabled and the ISR specifies */
959 // SXG_ISR_EVENT, then schedule DPC's 959 /* SXG_ISR_EVENT, then schedule DPC's */
960 // based on event queues. 960 /* based on event queues. */
961 if (adapter->RssEnabled && (adapter->IsrCopy[0] & SXG_ISR_EVENT)) { 961 if (adapter->RssEnabled && (adapter->IsrCopy[0] & SXG_ISR_EVENT)) {
962 for (i = 0; 962 for (i = 0;
963 i < adapter->RssSystemInfo->ProcessorInfo.RssCpuCount; 963 i < adapter->RssSystemInfo->ProcessorInfo.RssCpuCount;
@@ -973,8 +973,8 @@ static irqreturn_t sxg_isr(int irq, void *dev_id)
973 } 973 }
974 } 974 }
975 } 975 }
976 // Now, either schedule the CPUs specified by the CpuMask, 976 /* Now, either schedule the CPUs specified by the CpuMask, */
977 // or queue default 977 /* or queue default */
978 if (CpuMask) { 978 if (CpuMask) {
979 *QueueDefault = FALSE; 979 *QueueDefault = FALSE;
980 } else { 980 } else {
@@ -983,9 +983,9 @@ static irqreturn_t sxg_isr(int irq, void *dev_id)
983 } 983 }
984 *TargetCpus = CpuMask; 984 *TargetCpus = CpuMask;
985#endif 985#endif
986 // 986 /* */
987 // There are no DPCs in Linux, so call the handler now 987 /* There are no DPCs in Linux, so call the handler now */
988 // 988 /* */
989 sxg_handle_interrupt(adapter); 989 sxg_handle_interrupt(adapter);
990 990
991 return IRQ_HANDLED; 991 return IRQ_HANDLED;
@@ -993,7 +993,7 @@ static irqreturn_t sxg_isr(int irq, void *dev_id)
993 993
994static void sxg_handle_interrupt(p_adapter_t adapter) 994static void sxg_handle_interrupt(p_adapter_t adapter)
995{ 995{
996// unsigned char RssId = 0; 996/* unsigned char RssId = 0; */
997 u32 NewIsr; 997 u32 NewIsr;
998 998
999 if (adapter->Stats.RcvNoBuffer < 5) { 999 if (adapter->Stats.RcvNoBuffer < 5) {
@@ -1002,32 +1002,32 @@ static void sxg_handle_interrupt(p_adapter_t adapter)
1002 } 1002 }
1003 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "HndlIntr", 1003 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "HndlIntr",
1004 adapter, adapter->IsrCopy[0], 0, 0); 1004 adapter, adapter->IsrCopy[0], 0, 0);
1005 // For now, RSS is disabled with line based interrupts 1005 /* For now, RSS is disabled with line based interrupts */
1006 ASSERT(adapter->RssEnabled == FALSE); 1006 ASSERT(adapter->RssEnabled == FALSE);
1007 ASSERT(adapter->MsiEnabled == FALSE); 1007 ASSERT(adapter->MsiEnabled == FALSE);
1008 ASSERT(adapter->IsrCopy[0]); 1008 ASSERT(adapter->IsrCopy[0]);
1009///////////////////////////// 1009/*/////////////////////////// */
1010 1010
1011 // Always process the event queue. 1011 /* Always process the event queue. */
1012 sxg_process_event_queue(adapter, 1012 sxg_process_event_queue(adapter,
1013 (adapter->RssEnabled ? /*RssId */ 0 : 0)); 1013 (adapter->RssEnabled ? /*RssId */ 0 : 0));
1014 1014
1015#if XXXTODO // RSS stuff 1015#if XXXTODO /* RSS stuff */
1016 if (--adapter->IsrDpcsPending) { 1016 if (--adapter->IsrDpcsPending) {
1017 // We're done. 1017 /* We're done. */
1018 ASSERT(adapter->RssEnabled); 1018 ASSERT(adapter->RssEnabled);
1019 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DPCsPend", 1019 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DPCsPend",
1020 adapter, 0, 0, 0); 1020 adapter, 0, 0, 0);
1021 return; 1021 return;
1022 } 1022 }
1023#endif 1023#endif
1024 // 1024 /* */
1025 // Last (or only) DPC processes the ISR and clears the interrupt. 1025 /* Last (or only) DPC processes the ISR and clears the interrupt. */
1026 // 1026 /* */
1027 NewIsr = sxg_process_isr(adapter, 0); 1027 NewIsr = sxg_process_isr(adapter, 0);
1028 // 1028 /* */
1029 // Reenable interrupts 1029 /* Reenable interrupts */
1030 // 1030 /* */
1031 adapter->IsrCopy[0] = 0; 1031 adapter->IsrCopy[0] = 0;
1032 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ClearIsr", 1032 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ClearIsr",
1033 adapter, NewIsr, 0, 0); 1033 adapter, NewIsr, 0, 0);
@@ -1063,29 +1063,29 @@ static int sxg_process_isr(p_adapter_t adapter, u32 MessageId)
1063 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ProcIsr", 1063 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ProcIsr",
1064 adapter, Isr, 0, 0); 1064 adapter, Isr, 0, 0);
1065 1065
1066 // Error 1066 /* Error */
1067 if (Isr & SXG_ISR_ERR) { 1067 if (Isr & SXG_ISR_ERR) {
1068 if (Isr & SXG_ISR_PDQF) { 1068 if (Isr & SXG_ISR_PDQF) {
1069 adapter->Stats.PdqFull++; 1069 adapter->Stats.PdqFull++;
1070 DBG_ERROR("%s: SXG_ISR_ERR PDQF!!\n", __func__); 1070 DBG_ERROR("%s: SXG_ISR_ERR PDQF!!\n", __func__);
1071 } 1071 }
1072 // No host buffer 1072 /* No host buffer */
1073 if (Isr & SXG_ISR_RMISS) { 1073 if (Isr & SXG_ISR_RMISS) {
1074 // There is a bunch of code in the SLIC driver which 1074 /* There is a bunch of code in the SLIC driver which */
1075 // attempts to process more receive events per DPC 1075 /* attempts to process more receive events per DPC */
1076 // if we start to fall behind. We'll probably 1076 /* if we start to fall behind. We'll probably */
1077 // need to do something similar here, but hold 1077 /* need to do something similar here, but hold */
1078 // off for now. I don't want to make the code more 1078 /* off for now. I don't want to make the code more */
1079 // complicated than strictly needed. 1079 /* complicated than strictly needed. */
1080 adapter->Stats.RcvNoBuffer++; 1080 adapter->Stats.RcvNoBuffer++;
1081 if (adapter->Stats.RcvNoBuffer < 5) { 1081 if (adapter->Stats.RcvNoBuffer < 5) {
1082 DBG_ERROR("%s: SXG_ISR_ERR RMISS!!\n", 1082 DBG_ERROR("%s: SXG_ISR_ERR RMISS!!\n",
1083 __func__); 1083 __func__);
1084 } 1084 }
1085 } 1085 }
1086 // Card crash 1086 /* Card crash */
1087 if (Isr & SXG_ISR_DEAD) { 1087 if (Isr & SXG_ISR_DEAD) {
1088 // Set aside the crash info and set the adapter state to RESET 1088 /* Set aside the crash info and set the adapter state to RESET */
1089 adapter->CrashCpu = 1089 adapter->CrashCpu =
1090 (unsigned char)((Isr & SXG_ISR_CPU) >> 1090 (unsigned char)((Isr & SXG_ISR_CPU) >>
1091 SXG_ISR_CPU_SHIFT); 1091 SXG_ISR_CPU_SHIFT);
@@ -1094,44 +1094,44 @@ static int sxg_process_isr(p_adapter_t adapter, u32 MessageId)
1094 DBG_ERROR("%s: ISR_DEAD %x, CPU: %d\n", __func__, 1094 DBG_ERROR("%s: ISR_DEAD %x, CPU: %d\n", __func__,
1095 adapter->CrashLocation, adapter->CrashCpu); 1095 adapter->CrashLocation, adapter->CrashCpu);
1096 } 1096 }
1097 // Event ring full 1097 /* Event ring full */
1098 if (Isr & SXG_ISR_ERFULL) { 1098 if (Isr & SXG_ISR_ERFULL) {
1099 // Same issue as RMISS, really. This means the 1099 /* Same issue as RMISS, really. This means the */
1100 // host is falling behind the card. Need to increase 1100 /* host is falling behind the card. Need to increase */
1101 // event ring size, process more events per interrupt, 1101 /* event ring size, process more events per interrupt, */
1102 // and/or reduce/remove interrupt aggregation. 1102 /* and/or reduce/remove interrupt aggregation. */
1103 adapter->Stats.EventRingFull++; 1103 adapter->Stats.EventRingFull++;
1104 DBG_ERROR("%s: SXG_ISR_ERR EVENT RING FULL!!\n", 1104 DBG_ERROR("%s: SXG_ISR_ERR EVENT RING FULL!!\n",
1105 __func__); 1105 __func__);
1106 } 1106 }
1107 // Transmit drop - no DRAM buffers or XMT error 1107 /* Transmit drop - no DRAM buffers or XMT error */
1108 if (Isr & SXG_ISR_XDROP) { 1108 if (Isr & SXG_ISR_XDROP) {
1109 adapter->Stats.XmtDrops++; 1109 adapter->Stats.XmtDrops++;
1110 adapter->Stats.XmtErrors++; 1110 adapter->Stats.XmtErrors++;
1111 DBG_ERROR("%s: SXG_ISR_ERR XDROP!!\n", __func__); 1111 DBG_ERROR("%s: SXG_ISR_ERR XDROP!!\n", __func__);
1112 } 1112 }
1113 } 1113 }
1114 // Slowpath send completions 1114 /* Slowpath send completions */
1115 if (Isr & SXG_ISR_SPSEND) { 1115 if (Isr & SXG_ISR_SPSEND) {
1116 sxg_complete_slow_send(adapter); 1116 sxg_complete_slow_send(adapter);
1117 } 1117 }
1118 // Dump 1118 /* Dump */
1119 if (Isr & SXG_ISR_UPC) { 1119 if (Isr & SXG_ISR_UPC) {
1120 ASSERT(adapter->DumpCmdRunning); // Maybe change when debug is added.. 1120 ASSERT(adapter->DumpCmdRunning); /* Maybe change when debug is added.. */
1121 adapter->DumpCmdRunning = FALSE; 1121 adapter->DumpCmdRunning = FALSE;
1122 } 1122 }
1123 // Link event 1123 /* Link event */
1124 if (Isr & SXG_ISR_LINK) { 1124 if (Isr & SXG_ISR_LINK) {
1125 sxg_link_event(adapter); 1125 sxg_link_event(adapter);
1126 } 1126 }
1127 // Debug - breakpoint hit 1127 /* Debug - breakpoint hit */
1128 if (Isr & SXG_ISR_BREAK) { 1128 if (Isr & SXG_ISR_BREAK) {
1129 // At the moment AGDB isn't written to support interactive 1129 /* At the moment AGDB isn't written to support interactive */
1130 // debug sessions. When it is, this interrupt will be used 1130 /* debug sessions. When it is, this interrupt will be used */
1131 // to signal AGDB that it has hit a breakpoint. For now, ASSERT. 1131 /* to signal AGDB that it has hit a breakpoint. For now, ASSERT. */
1132 ASSERT(0); 1132 ASSERT(0);
1133 } 1133 }
1134 // Heartbeat response 1134 /* Heartbeat response */
1135 if (Isr & SXG_ISR_PING) { 1135 if (Isr & SXG_ISR_PING) {
1136 adapter->PingOutstanding = FALSE; 1136 adapter->PingOutstanding = FALSE;
1137 } 1137 }
@@ -1171,39 +1171,39 @@ static u32 sxg_process_event_queue(p_adapter_t adapter, u32 RssId)
1171 (adapter->State == SXG_STATE_PAUSING) || 1171 (adapter->State == SXG_STATE_PAUSING) ||
1172 (adapter->State == SXG_STATE_PAUSED) || 1172 (adapter->State == SXG_STATE_PAUSED) ||
1173 (adapter->State == SXG_STATE_HALTING)); 1173 (adapter->State == SXG_STATE_HALTING));
1174 // We may still have unprocessed events on the queue if 1174 /* We may still have unprocessed events on the queue if */
1175 // the card crashed. Don't process them. 1175 /* the card crashed. Don't process them. */
1176 if (adapter->Dead) { 1176 if (adapter->Dead) {
1177 return (0); 1177 return (0);
1178 } 1178 }
1179 // In theory there should only be a single processor that 1179 /* In theory there should only be a single processor that */
1180 // accesses this queue, and only at interrupt-DPC time. So 1180 /* accesses this queue, and only at interrupt-DPC time. So */
1181 // we shouldn't need a lock for any of this. 1181 /* we shouldn't need a lock for any of this. */
1182 while (Event->Status & EVENT_STATUS_VALID) { 1182 while (Event->Status & EVENT_STATUS_VALID) {
1183 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "Event", 1183 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "Event",
1184 Event, Event->Code, Event->Status, 1184 Event, Event->Code, Event->Status,
1185 adapter->NextEvent); 1185 adapter->NextEvent);
1186 switch (Event->Code) { 1186 switch (Event->Code) {
1187 case EVENT_CODE_BUFFERS: 1187 case EVENT_CODE_BUFFERS:
1188 ASSERT(!(Event->CommandIndex & 0xFF00)); // SXG_RING_INFO Head & Tail == unsigned char 1188 ASSERT(!(Event->CommandIndex & 0xFF00)); /* SXG_RING_INFO Head & Tail == unsigned char */
1189 // 1189 /* */
1190 sxg_complete_descriptor_blocks(adapter, 1190 sxg_complete_descriptor_blocks(adapter,
1191 Event->CommandIndex); 1191 Event->CommandIndex);
1192 // 1192 /* */
1193 break; 1193 break;
1194 case EVENT_CODE_SLOWRCV: 1194 case EVENT_CODE_SLOWRCV:
1195 --adapter->RcvBuffersOnCard; 1195 --adapter->RcvBuffersOnCard;
1196 if ((skb = sxg_slow_receive(adapter, Event))) { 1196 if ((skb = sxg_slow_receive(adapter, Event))) {
1197 u32 rx_bytes; 1197 u32 rx_bytes;
1198#ifdef LINUX_HANDLES_RCV_INDICATION_LISTS 1198#ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
1199 // Add it to our indication list 1199 /* Add it to our indication list */
1200 SXG_ADD_RCV_PACKET(adapter, skb, prev_skb, 1200 SXG_ADD_RCV_PACKET(adapter, skb, prev_skb,
1201 IndicationList, num_skbs); 1201 IndicationList, num_skbs);
1202 // In Linux, we just pass up each skb to the protocol above at this point, 1202 /* In Linux, we just pass up each skb to the protocol above at this point, */
1203 // there is no capability of an indication list. 1203 /* there is no capability of an indication list. */
1204#else 1204#else
1205// CHECK skb_pull(skb, INIC_RCVBUF_HEADSIZE); 1205/* CHECK skb_pull(skb, INIC_RCVBUF_HEADSIZE); */
1206 rx_bytes = Event->Length; // (rcvbuf->length & IRHDDR_FLEN_MSK); 1206 rx_bytes = Event->Length; /* (rcvbuf->length & IRHDDR_FLEN_MSK); */
1207 skb_put(skb, rx_bytes); 1207 skb_put(skb, rx_bytes);
1208 adapter->stats.rx_packets++; 1208 adapter->stats.rx_packets++;
1209 adapter->stats.rx_bytes += rx_bytes; 1209 adapter->stats.rx_bytes += rx_bytes;
@@ -1219,42 +1219,42 @@ static u32 sxg_process_event_queue(p_adapter_t adapter, u32 RssId)
1219 default: 1219 default:
1220 DBG_ERROR("%s: ERROR Invalid EventCode %d\n", 1220 DBG_ERROR("%s: ERROR Invalid EventCode %d\n",
1221 __func__, Event->Code); 1221 __func__, Event->Code);
1222// ASSERT(0); 1222/* ASSERT(0); */
1223 } 1223 }
1224 // See if we need to restock card receive buffers. 1224 /* See if we need to restock card receive buffers. */
1225 // There are two things to note here: 1225 /* There are two things to note here: */
1226 // First - This test is not SMP safe. The 1226 /* First - This test is not SMP safe. The */
1227 // adapter->BuffersOnCard field is protected via atomic interlocked calls, but 1227 /* adapter->BuffersOnCard field is protected via atomic interlocked calls, but */
1228 // we do not protect it with respect to these tests. The only way to do that 1228 /* we do not protect it with respect to these tests. The only way to do that */
1229 // is with a lock, and I don't want to grab a lock every time we adjust the 1229 /* is with a lock, and I don't want to grab a lock every time we adjust the */
1230 // BuffersOnCard count. Instead, we allow the buffer replenishment to be off 1230 /* BuffersOnCard count. Instead, we allow the buffer replenishment to be off */
1231 // once in a while. The worst that can happen is the card is given one 1231 /* once in a while. The worst that can happen is the card is given one */
1232 // more-or-less descriptor block than the arbitrary value we've chosen. 1232 /* more-or-less descriptor block than the arbitrary value we've chosen. */
1233 // No big deal 1233 /* No big deal */
1234 // In short DO NOT ADD A LOCK HERE, OR WHERE RcvBuffersOnCard is adjusted. 1234 /* In short DO NOT ADD A LOCK HERE, OR WHERE RcvBuffersOnCard is adjusted. */
1235 // Second - We expect this test to rarely evaluate to true. We attempt to 1235 /* Second - We expect this test to rarely evaluate to true. We attempt to */
1236 // refill descriptor blocks as they are returned to us 1236 /* refill descriptor blocks as they are returned to us */
1237 // (sxg_complete_descriptor_blocks), so The only time this should evaluate 1237 /* (sxg_complete_descriptor_blocks), so The only time this should evaluate */
1238 // to true is when sxg_complete_descriptor_blocks failed to allocate 1238 /* to true is when sxg_complete_descriptor_blocks failed to allocate */
1239 // receive buffers. 1239 /* receive buffers. */
1240 if (adapter->RcvBuffersOnCard < SXG_RCV_DATA_BUFFERS) { 1240 if (adapter->RcvBuffersOnCard < SXG_RCV_DATA_BUFFERS) {
1241 sxg_stock_rcv_buffers(adapter); 1241 sxg_stock_rcv_buffers(adapter);
1242 } 1242 }
1243 // It's more efficient to just set this to zero. 1243 /* It's more efficient to just set this to zero. */
1244 // But clearing the top bit saves potential debug info... 1244 /* But clearing the top bit saves potential debug info... */
1245 Event->Status &= ~EVENT_STATUS_VALID; 1245 Event->Status &= ~EVENT_STATUS_VALID;
1246 // Advanct to the next event 1246 /* Advanct to the next event */
1247 SXG_ADVANCE_INDEX(adapter->NextEvent[RssId], EVENT_RING_SIZE); 1247 SXG_ADVANCE_INDEX(adapter->NextEvent[RssId], EVENT_RING_SIZE);
1248 Event = &EventRing->Ring[adapter->NextEvent[RssId]]; 1248 Event = &EventRing->Ring[adapter->NextEvent[RssId]];
1249 EventsProcessed++; 1249 EventsProcessed++;
1250 if (EventsProcessed == EVENT_RING_BATCH) { 1250 if (EventsProcessed == EVENT_RING_BATCH) {
1251 // Release a batch of events back to the card 1251 /* Release a batch of events back to the card */
1252 WRITE_REG(adapter->UcodeRegs[RssId].EventRelease, 1252 WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
1253 EVENT_RING_BATCH, FALSE); 1253 EVENT_RING_BATCH, FALSE);
1254 EventsProcessed = 0; 1254 EventsProcessed = 0;
1255 // If we've processed our batch limit, break out of the 1255 /* If we've processed our batch limit, break out of the */
1256 // loop and return SXG_ISR_EVENT to arrange for us to 1256 /* loop and return SXG_ISR_EVENT to arrange for us to */
1257 // be called again 1257 /* be called again */
1258 if (Batches++ == EVENT_BATCH_LIMIT) { 1258 if (Batches++ == EVENT_BATCH_LIMIT) {
1259 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, 1259 SXG_TRACE(TRACE_SXG, SxgTraceBuffer,
1260 TRACE_NOISY, "EvtLimit", Batches, 1260 TRACE_NOISY, "EvtLimit", Batches,
@@ -1265,14 +1265,14 @@ static u32 sxg_process_event_queue(p_adapter_t adapter, u32 RssId)
1265 } 1265 }
1266 } 1266 }
1267#ifdef LINUX_HANDLES_RCV_INDICATION_LISTS 1267#ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
1268 // 1268 /* */
1269 // Indicate any received dumb-nic frames 1269 /* Indicate any received dumb-nic frames */
1270 // 1270 /* */
1271 SXG_INDICATE_PACKETS(adapter, IndicationList, num_skbs); 1271 SXG_INDICATE_PACKETS(adapter, IndicationList, num_skbs);
1272#endif 1272#endif
1273 // 1273 /* */
1274 // Release events back to the card. 1274 /* Release events back to the card. */
1275 // 1275 /* */
1276 if (EventsProcessed) { 1276 if (EventsProcessed) {
1277 WRITE_REG(adapter->UcodeRegs[RssId].EventRelease, 1277 WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
1278 EventsProcessed, FALSE); 1278 EventsProcessed, FALSE);
@@ -1299,43 +1299,43 @@ static void sxg_complete_slow_send(p_adapter_t adapter)
1299 u32 *ContextType; 1299 u32 *ContextType;
1300 PSXG_CMD XmtCmd; 1300 PSXG_CMD XmtCmd;
1301 1301
1302 // NOTE - This lock is dropped and regrabbed in this loop. 1302 /* NOTE - This lock is dropped and regrabbed in this loop. */
1303 // This means two different processors can both be running 1303 /* This means two different processors can both be running */
1304 // through this loop. Be *very* careful. 1304 /* through this loop. Be *very* careful. */
1305 spin_lock(&adapter->XmtZeroLock); 1305 spin_lock(&adapter->XmtZeroLock);
1306 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnds", 1306 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnds",
1307 adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0); 1307 adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
1308 1308
1309 while (XmtRingInfo->Tail != *adapter->XmtRingZeroIndex) { 1309 while (XmtRingInfo->Tail != *adapter->XmtRingZeroIndex) {
1310 // Locate the current Cmd (ring descriptor entry), and 1310 /* Locate the current Cmd (ring descriptor entry), and */
1311 // associated SGL, and advance the tail 1311 /* associated SGL, and advance the tail */
1312 SXG_RETURN_CMD(XmtRing, XmtRingInfo, XmtCmd, ContextType); 1312 SXG_RETURN_CMD(XmtRing, XmtRingInfo, XmtCmd, ContextType);
1313 ASSERT(ContextType); 1313 ASSERT(ContextType);
1314 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnd", 1314 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnd",
1315 XmtRingInfo->Head, XmtRingInfo->Tail, XmtCmd, 0); 1315 XmtRingInfo->Head, XmtRingInfo->Tail, XmtCmd, 0);
1316 // Clear the SGL field. 1316 /* Clear the SGL field. */
1317 XmtCmd->Sgl = 0; 1317 XmtCmd->Sgl = 0;
1318 1318
1319 switch (*ContextType) { 1319 switch (*ContextType) {
1320 case SXG_SGL_DUMB: 1320 case SXG_SGL_DUMB:
1321 { 1321 {
1322 struct sk_buff *skb; 1322 struct sk_buff *skb;
1323 // Dumb-nic send. Command context is the dumb-nic SGL 1323 /* Dumb-nic send. Command context is the dumb-nic SGL */
1324 skb = (struct sk_buff *)ContextType; 1324 skb = (struct sk_buff *)ContextType;
1325 // Complete the send 1325 /* Complete the send */
1326 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, 1326 SXG_TRACE(TRACE_SXG, SxgTraceBuffer,
1327 TRACE_IMPORTANT, "DmSndCmp", skb, 0, 1327 TRACE_IMPORTANT, "DmSndCmp", skb, 0,
1328 0, 0); 1328 0, 0);
1329 ASSERT(adapter->Stats.XmtQLen); 1329 ASSERT(adapter->Stats.XmtQLen);
1330 adapter->Stats.XmtQLen--; // within XmtZeroLock 1330 adapter->Stats.XmtQLen--; /* within XmtZeroLock */
1331 adapter->Stats.XmtOk++; 1331 adapter->Stats.XmtOk++;
1332 // Now drop the lock and complete the send back to 1332 /* Now drop the lock and complete the send back to */
1333 // Microsoft. We need to drop the lock because 1333 /* Microsoft. We need to drop the lock because */
1334 // Microsoft can come back with a chimney send, which 1334 /* Microsoft can come back with a chimney send, which */
1335 // results in a double trip in SxgTcpOuput 1335 /* results in a double trip in SxgTcpOuput */
1336 spin_unlock(&adapter->XmtZeroLock); 1336 spin_unlock(&adapter->XmtZeroLock);
1337 SXG_COMPLETE_DUMB_SEND(adapter, skb); 1337 SXG_COMPLETE_DUMB_SEND(adapter, skb);
1338 // and reacquire.. 1338 /* and reacquire.. */
1339 spin_lock(&adapter->XmtZeroLock); 1339 spin_lock(&adapter->XmtZeroLock);
1340 } 1340 }
1341 break; 1341 break;
@@ -1371,7 +1371,7 @@ static struct sk_buff *sxg_slow_receive(p_adapter_t adapter, PSXG_EVENT Event)
1371 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "SlowRcv", Event, 1371 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "SlowRcv", Event,
1372 RcvDataBufferHdr, RcvDataBufferHdr->State, 1372 RcvDataBufferHdr, RcvDataBufferHdr->State,
1373 RcvDataBufferHdr->VirtualAddress); 1373 RcvDataBufferHdr->VirtualAddress);
1374 // Drop rcv frames in non-running state 1374 /* Drop rcv frames in non-running state */
1375 switch (adapter->State) { 1375 switch (adapter->State) {
1376 case SXG_STATE_RUNNING: 1376 case SXG_STATE_RUNNING:
1377 break; 1377 break;
@@ -1384,12 +1384,12 @@ static struct sk_buff *sxg_slow_receive(p_adapter_t adapter, PSXG_EVENT Event)
1384 goto drop; 1384 goto drop;
1385 } 1385 }
1386 1386
1387 // Change buffer state to UPSTREAM 1387 /* Change buffer state to UPSTREAM */
1388 RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM; 1388 RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM;
1389 if (Event->Status & EVENT_STATUS_RCVERR) { 1389 if (Event->Status & EVENT_STATUS_RCVERR) {
1390 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "RcvError", 1390 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "RcvError",
1391 Event, Event->Status, Event->HostHandle, 0); 1391 Event, Event->Status, Event->HostHandle, 0);
1392 // XXXTODO - Remove this print later 1392 /* XXXTODO - Remove this print later */
1393 DBG_ERROR("SXG: Receive error %x\n", *(u32 *) 1393 DBG_ERROR("SXG: Receive error %x\n", *(u32 *)
1394 SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr)); 1394 SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr));
1395 sxg_process_rcv_error(adapter, *(u32 *) 1395 sxg_process_rcv_error(adapter, *(u32 *)
@@ -1397,8 +1397,8 @@ static struct sk_buff *sxg_slow_receive(p_adapter_t adapter, PSXG_EVENT Event)
1397 (RcvDataBufferHdr)); 1397 (RcvDataBufferHdr));
1398 goto drop; 1398 goto drop;
1399 } 1399 }
1400#if XXXTODO // VLAN stuff 1400#if XXXTODO /* VLAN stuff */
1401 // If there's a VLAN tag, extract it and validate it 1401 /* If there's a VLAN tag, extract it and validate it */
1402 if (((p_ether_header) (SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr)))-> 1402 if (((p_ether_header) (SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr)))->
1403 EtherType == ETHERTYPE_VLAN) { 1403 EtherType == ETHERTYPE_VLAN) {
1404 if (SxgExtractVlanHeader(adapter, RcvDataBufferHdr, Event) != 1404 if (SxgExtractVlanHeader(adapter, RcvDataBufferHdr, Event) !=
@@ -1411,9 +1411,9 @@ static struct sk_buff *sxg_slow_receive(p_adapter_t adapter, PSXG_EVENT Event)
1411 } 1411 }
1412 } 1412 }
1413#endif 1413#endif
1414 // 1414 /* */
1415 // Dumb-nic frame. See if it passes our mac filter and update stats 1415 /* Dumb-nic frame. See if it passes our mac filter and update stats */
1416 // 1416 /* */
1417 if (!sxg_mac_filter(adapter, (p_ether_header) 1417 if (!sxg_mac_filter(adapter, (p_ether_header)
1418 SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr), 1418 SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
1419 Event->Length)) { 1419 Event->Length)) {
@@ -1427,9 +1427,9 @@ static struct sk_buff *sxg_slow_receive(p_adapter_t adapter, PSXG_EVENT Event)
1427 1427
1428 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumbRcv", 1428 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumbRcv",
1429 RcvDataBufferHdr, Packet, Event->Length, 0); 1429 RcvDataBufferHdr, Packet, Event->Length, 0);
1430 // 1430 /* */
1431 // Lastly adjust the receive packet length. 1431 /* Lastly adjust the receive packet length. */
1432 // 1432 /* */
1433 SXG_ADJUST_RCV_PACKET(Packet, RcvDataBufferHdr, Event); 1433 SXG_ADJUST_RCV_PACKET(Packet, RcvDataBufferHdr, Event);
1434 1434
1435 return (Packet); 1435 return (Packet);
@@ -1541,7 +1541,7 @@ static bool sxg_mac_filter(p_adapter_t adapter, p_ether_header EtherHdr,
1541 1541
1542 if (SXG_MULTICAST_PACKET(EtherHdr)) { 1542 if (SXG_MULTICAST_PACKET(EtherHdr)) {
1543 if (SXG_BROADCAST_PACKET(EtherHdr)) { 1543 if (SXG_BROADCAST_PACKET(EtherHdr)) {
1544 // broadcast 1544 /* broadcast */
1545 if (adapter->MacFilter & MAC_BCAST) { 1545 if (adapter->MacFilter & MAC_BCAST) {
1546 adapter->Stats.DumbRcvBcastPkts++; 1546 adapter->Stats.DumbRcvBcastPkts++;
1547 adapter->Stats.DumbRcvBcastBytes += length; 1547 adapter->Stats.DumbRcvBcastBytes += length;
@@ -1550,7 +1550,7 @@ static bool sxg_mac_filter(p_adapter_t adapter, p_ether_header EtherHdr,
1550 return (TRUE); 1550 return (TRUE);
1551 } 1551 }
1552 } else { 1552 } else {
1553 // multicast 1553 /* multicast */
1554 if (adapter->MacFilter & MAC_ALLMCAST) { 1554 if (adapter->MacFilter & MAC_ALLMCAST) {
1555 adapter->Stats.DumbRcvMcastPkts++; 1555 adapter->Stats.DumbRcvMcastPkts++;
1556 adapter->Stats.DumbRcvMcastBytes += length; 1556 adapter->Stats.DumbRcvMcastBytes += length;
@@ -1580,9 +1580,9 @@ static bool sxg_mac_filter(p_adapter_t adapter, p_ether_header EtherHdr,
1580 } 1580 }
1581 } 1581 }
1582 } else if (adapter->MacFilter & MAC_DIRECTED) { 1582 } else if (adapter->MacFilter & MAC_DIRECTED) {
1583 // Not broadcast or multicast. Must be directed at us or 1583 /* Not broadcast or multicast. Must be directed at us or */
1584 // the card is in promiscuous mode. Either way, consider it 1584 /* the card is in promiscuous mode. Either way, consider it */
1585 // ours if MAC_DIRECTED is set 1585 /* ours if MAC_DIRECTED is set */
1586 adapter->Stats.DumbRcvUcastPkts++; 1586 adapter->Stats.DumbRcvUcastPkts++;
1587 adapter->Stats.DumbRcvUcastBytes += length; 1587 adapter->Stats.DumbRcvUcastBytes += length;
1588 adapter->Stats.DumbRcvPkts++; 1588 adapter->Stats.DumbRcvPkts++;
@@ -1590,7 +1590,7 @@ static bool sxg_mac_filter(p_adapter_t adapter, p_ether_header EtherHdr,
1590 return (TRUE); 1590 return (TRUE);
1591 } 1591 }
1592 if (adapter->MacFilter & MAC_PROMISC) { 1592 if (adapter->MacFilter & MAC_PROMISC) {
1593 // Whatever it is, keep it. 1593 /* Whatever it is, keep it. */
1594 adapter->Stats.DumbRcvPkts++; 1594 adapter->Stats.DumbRcvPkts++;
1595 adapter->Stats.DumbRcvBytes += length; 1595 adapter->Stats.DumbRcvBytes += length;
1596 return (TRUE); 1596 return (TRUE);
@@ -1625,7 +1625,7 @@ static int sxg_register_interrupt(p_adapter_t adapter)
1625 } 1625 }
1626 adapter->intrregistered = 1; 1626 adapter->intrregistered = 1;
1627 adapter->IntRegistered = TRUE; 1627 adapter->IntRegistered = TRUE;
1628 // Disable RSS with line-based interrupts 1628 /* Disable RSS with line-based interrupts */
1629 adapter->MsiEnabled = FALSE; 1629 adapter->MsiEnabled = FALSE;
1630 adapter->RssEnabled = FALSE; 1630 adapter->RssEnabled = FALSE;
1631 DBG_ERROR("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x]\n", 1631 DBG_ERROR("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x]\n",
@@ -1738,7 +1738,7 @@ static int sxg_entry_open(p_net_device dev)
1738 sxg_global.num_sxg_ports_active++; 1738 sxg_global.num_sxg_ports_active++;
1739 adapter->activated = 1; 1739 adapter->activated = 1;
1740 } 1740 }
1741 // Initialize the adapter 1741 /* Initialize the adapter */
1742 DBG_ERROR("sxg: %s ENTER sxg_initialize_adapter\n", __func__); 1742 DBG_ERROR("sxg: %s ENTER sxg_initialize_adapter\n", __func__);
1743 status = sxg_initialize_adapter(adapter); 1743 status = sxg_initialize_adapter(adapter);
1744 DBG_ERROR("sxg: %s EXIT sxg_initialize_adapter status[%x]\n", 1744 DBG_ERROR("sxg: %s EXIT sxg_initialize_adapter status[%x]\n",
@@ -1762,7 +1762,7 @@ static int sxg_entry_open(p_net_device dev)
1762 } 1762 }
1763 DBG_ERROR("sxg: %s ENABLE ALL INTERRUPTS\n", __func__); 1763 DBG_ERROR("sxg: %s ENABLE ALL INTERRUPTS\n", __func__);
1764 1764
1765 // Enable interrupts 1765 /* Enable interrupts */
1766 SXG_ENABLE_ALL_INTERRUPTS(adapter); 1766 SXG_ENABLE_ALL_INTERRUPTS(adapter);
1767 1767
1768 DBG_ERROR("sxg: %s EXIT\n", __func__); 1768 DBG_ERROR("sxg: %s EXIT\n", __func__);
@@ -1825,11 +1825,11 @@ static int sxg_entry_halt(p_net_device dev)
1825static int sxg_ioctl(p_net_device dev, struct ifreq *rq, int cmd) 1825static int sxg_ioctl(p_net_device dev, struct ifreq *rq, int cmd)
1826{ 1826{
1827 ASSERT(rq); 1827 ASSERT(rq);
1828// DBG_ERROR("sxg: %s cmd[%x] rq[%p] dev[%p]\n", __func__, cmd, rq, dev); 1828/* DBG_ERROR("sxg: %s cmd[%x] rq[%p] dev[%p]\n", __func__, cmd, rq, dev); */
1829 switch (cmd) { 1829 switch (cmd) {
1830 case SIOCSLICSETINTAGG: 1830 case SIOCSLICSETINTAGG:
1831 { 1831 {
1832// p_adapter_t adapter = (p_adapter_t) netdev_priv(dev); 1832/* p_adapter_t adapter = (p_adapter_t) netdev_priv(dev); */
1833 u32 data[7]; 1833 u32 data[7];
1834 u32 intagg; 1834 u32 intagg;
1835 1835
@@ -1846,7 +1846,7 @@ static int sxg_ioctl(p_net_device dev, struct ifreq *rq, int cmd)
1846 } 1846 }
1847 1847
1848 default: 1848 default:
1849// DBG_ERROR("sxg: %s UNSUPPORTED[%x]\n", __func__, cmd); 1849/* DBG_ERROR("sxg: %s UNSUPPORTED[%x]\n", __func__, cmd); */
1850 return -EOPNOTSUPP; 1850 return -EOPNOTSUPP;
1851 } 1851 }
1852 return 0; 1852 return 0;
@@ -1872,13 +1872,13 @@ static int sxg_send_packets(struct sk_buff *skb, p_net_device dev)
1872 1872
1873 DBG_ERROR("sxg: %s ENTER sxg_send_packets skb[%p]\n", __func__, 1873 DBG_ERROR("sxg: %s ENTER sxg_send_packets skb[%p]\n", __func__,
1874 skb); 1874 skb);
1875 // Check the adapter state 1875 /* Check the adapter state */
1876 switch (adapter->State) { 1876 switch (adapter->State) {
1877 case SXG_STATE_INITIALIZING: 1877 case SXG_STATE_INITIALIZING:
1878 case SXG_STATE_HALTED: 1878 case SXG_STATE_HALTED:
1879 case SXG_STATE_SHUTDOWN: 1879 case SXG_STATE_SHUTDOWN:
1880 ASSERT(0); // unexpected 1880 ASSERT(0); /* unexpected */
1881 // fall through 1881 /* fall through */
1882 case SXG_STATE_RESETTING: 1882 case SXG_STATE_RESETTING:
1883 case SXG_STATE_SLEEP: 1883 case SXG_STATE_SLEEP:
1884 case SXG_STATE_BOOTDIAG: 1884 case SXG_STATE_BOOTDIAG:
@@ -1898,17 +1898,17 @@ static int sxg_send_packets(struct sk_buff *skb, p_net_device dev)
1898 if (status != STATUS_SUCCESS) { 1898 if (status != STATUS_SUCCESS) {
1899 goto xmit_fail; 1899 goto xmit_fail;
1900 } 1900 }
1901 // send a packet 1901 /* send a packet */
1902 status = sxg_transmit_packet(adapter, skb); 1902 status = sxg_transmit_packet(adapter, skb);
1903 if (status == STATUS_SUCCESS) { 1903 if (status == STATUS_SUCCESS) {
1904 goto xmit_done; 1904 goto xmit_done;
1905 } 1905 }
1906 1906
1907 xmit_fail: 1907 xmit_fail:
1908 // reject & complete all the packets if they cant be sent 1908 /* reject & complete all the packets if they cant be sent */
1909 if (status != STATUS_SUCCESS) { 1909 if (status != STATUS_SUCCESS) {
1910#if XXXTODO 1910#if XXXTODO
1911// sxg_send_packets_fail(adapter, skb, status); 1911/* sxg_send_packets_fail(adapter, skb, status); */
1912#else 1912#else
1913 SXG_DROP_DUMB_SEND(adapter, skb); 1913 SXG_DROP_DUMB_SEND(adapter, skb);
1914 adapter->stats.tx_dropped++; 1914 adapter->stats.tx_dropped++;
@@ -1940,12 +1940,12 @@ static int sxg_transmit_packet(p_adapter_t adapter, struct sk_buff *skb)
1940 void *SglBuffer; 1940 void *SglBuffer;
1941 u32 SglBufferLength; 1941 u32 SglBufferLength;
1942 1942
1943 // The vast majority of work is done in the shared 1943 /* The vast majority of work is done in the shared */
1944 // sxg_dumb_sgl routine. 1944 /* sxg_dumb_sgl routine. */
1945 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSend", 1945 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSend",
1946 adapter, skb, 0, 0); 1946 adapter, skb, 0, 0);
1947 1947
1948 // Allocate a SGL buffer 1948 /* Allocate a SGL buffer */
1949 SXG_GET_SGL_BUFFER(adapter, SxgSgl); 1949 SXG_GET_SGL_BUFFER(adapter, SxgSgl);
1950 if (!SxgSgl) { 1950 if (!SxgSgl) {
1951 adapter->Stats.NoSglBuf++; 1951 adapter->Stats.NoSglBuf++;
@@ -1963,9 +1963,9 @@ static int sxg_transmit_packet(p_adapter_t adapter, struct sk_buff *skb)
1963 SxgSgl->DumbPacket = skb; 1963 SxgSgl->DumbPacket = skb;
1964 pSgl = NULL; 1964 pSgl = NULL;
1965 1965
1966 // Call the common sxg_dumb_sgl routine to complete the send. 1966 /* Call the common sxg_dumb_sgl routine to complete the send. */
1967 sxg_dumb_sgl(pSgl, SxgSgl); 1967 sxg_dumb_sgl(pSgl, SxgSgl);
1968 // Return success sxg_dumb_sgl (or something later) will complete it. 1968 /* Return success sxg_dumb_sgl (or something later) will complete it. */
1969 return (STATUS_SUCCESS); 1969 return (STATUS_SUCCESS);
1970} 1970}
1971 1971
@@ -1983,39 +1983,39 @@ static void sxg_dumb_sgl(PSCATTER_GATHER_LIST pSgl, PSXG_SCATTER_GATHER SxgSgl)
1983{ 1983{
1984 p_adapter_t adapter = SxgSgl->adapter; 1984 p_adapter_t adapter = SxgSgl->adapter;
1985 struct sk_buff *skb = SxgSgl->DumbPacket; 1985 struct sk_buff *skb = SxgSgl->DumbPacket;
1986 // For now, all dumb-nic sends go on RSS queue zero 1986 /* For now, all dumb-nic sends go on RSS queue zero */
1987 PSXG_XMT_RING XmtRing = &adapter->XmtRings[0]; 1987 PSXG_XMT_RING XmtRing = &adapter->XmtRings[0];
1988 PSXG_RING_INFO XmtRingInfo = &adapter->XmtRingZeroInfo; 1988 PSXG_RING_INFO XmtRingInfo = &adapter->XmtRingZeroInfo;
1989 PSXG_CMD XmtCmd = NULL; 1989 PSXG_CMD XmtCmd = NULL;
1990// u32 Index = 0; 1990/* u32 Index = 0; */
1991 u32 DataLength = skb->len; 1991 u32 DataLength = skb->len;
1992// unsigned int BufLen; 1992/* unsigned int BufLen; */
1993// u32 SglOffset; 1993/* u32 SglOffset; */
1994 u64 phys_addr; 1994 u64 phys_addr;
1995 1995
1996 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSgl", 1996 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSgl",
1997 pSgl, SxgSgl, 0, 0); 1997 pSgl, SxgSgl, 0, 0);
1998 1998
1999 // Set aside a pointer to the sgl 1999 /* Set aside a pointer to the sgl */
2000 SxgSgl->pSgl = pSgl; 2000 SxgSgl->pSgl = pSgl;
2001 2001
2002 // Sanity check that our SGL format is as we expect. 2002 /* Sanity check that our SGL format is as we expect. */
2003 ASSERT(sizeof(SXG_X64_SGE) == sizeof(SCATTER_GATHER_ELEMENT)); 2003 ASSERT(sizeof(SXG_X64_SGE) == sizeof(SCATTER_GATHER_ELEMENT));
2004 // Shouldn't be a vlan tag on this frame 2004 /* Shouldn't be a vlan tag on this frame */
2005 ASSERT(SxgSgl->VlanTag.VlanTci == 0); 2005 ASSERT(SxgSgl->VlanTag.VlanTci == 0);
2006 ASSERT(SxgSgl->VlanTag.VlanTpid == 0); 2006 ASSERT(SxgSgl->VlanTag.VlanTpid == 0);
2007 2007
2008 // From here below we work with the SGL placed in our 2008 /* From here below we work with the SGL placed in our */
2009 // buffer. 2009 /* buffer. */
2010 2010
2011 SxgSgl->Sgl.NumberOfElements = 1; 2011 SxgSgl->Sgl.NumberOfElements = 1;
2012 2012
2013 // Grab the spinlock and acquire a command 2013 /* Grab the spinlock and acquire a command */
2014 spin_lock(&adapter->XmtZeroLock); 2014 spin_lock(&adapter->XmtZeroLock);
2015 SXG_GET_CMD(XmtRing, XmtRingInfo, XmtCmd, SxgSgl); 2015 SXG_GET_CMD(XmtRing, XmtRingInfo, XmtCmd, SxgSgl);
2016 if (XmtCmd == NULL) { 2016 if (XmtCmd == NULL) {
2017 // Call sxg_complete_slow_send to see if we can 2017 /* Call sxg_complete_slow_send to see if we can */
2018 // free up any XmtRingZero entries and then try again 2018 /* free up any XmtRingZero entries and then try again */
2019 spin_unlock(&adapter->XmtZeroLock); 2019 spin_unlock(&adapter->XmtZeroLock);
2020 sxg_complete_slow_send(adapter); 2020 sxg_complete_slow_send(adapter);
2021 spin_lock(&adapter->XmtZeroLock); 2021 spin_lock(&adapter->XmtZeroLock);
@@ -2027,10 +2027,10 @@ static void sxg_dumb_sgl(PSCATTER_GATHER_LIST pSgl, PSXG_SCATTER_GATHER SxgSgl)
2027 } 2027 }
2028 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbCmd", 2028 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbCmd",
2029 XmtCmd, XmtRingInfo->Head, XmtRingInfo->Tail, 0); 2029 XmtCmd, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
2030 // Update stats 2030 /* Update stats */
2031 adapter->Stats.DumbXmtPkts++; 2031 adapter->Stats.DumbXmtPkts++;
2032 adapter->Stats.DumbXmtBytes += DataLength; 2032 adapter->Stats.DumbXmtBytes += DataLength;
2033#if XXXTODO // Stats stuff 2033#if XXXTODO /* Stats stuff */
2034 if (SXG_MULTICAST_PACKET(EtherHdr)) { 2034 if (SXG_MULTICAST_PACKET(EtherHdr)) {
2035 if (SXG_BROADCAST_PACKET(EtherHdr)) { 2035 if (SXG_BROADCAST_PACKET(EtherHdr)) {
2036 adapter->Stats.DumbXmtBcastPkts++; 2036 adapter->Stats.DumbXmtBcastPkts++;
@@ -2044,8 +2044,8 @@ static void sxg_dumb_sgl(PSCATTER_GATHER_LIST pSgl, PSXG_SCATTER_GATHER SxgSgl)
2044 adapter->Stats.DumbXmtUcastBytes += DataLength; 2044 adapter->Stats.DumbXmtUcastBytes += DataLength;
2045 } 2045 }
2046#endif 2046#endif
2047 // Fill in the command 2047 /* Fill in the command */
2048 // Copy out the first SGE to the command and adjust for offset 2048 /* Copy out the first SGE to the command and adjust for offset */
2049 phys_addr = 2049 phys_addr =
2050 pci_map_single(adapter->pcidev, skb->data, skb->len, 2050 pci_map_single(adapter->pcidev, skb->data, skb->len,
2051 PCI_DMA_TODEVICE); 2051 PCI_DMA_TODEVICE);
@@ -2053,54 +2053,54 @@ static void sxg_dumb_sgl(PSCATTER_GATHER_LIST pSgl, PSXG_SCATTER_GATHER SxgSgl)
2053 XmtCmd->Buffer.FirstSgeAddress = XmtCmd->Buffer.FirstSgeAddress << 32; 2053 XmtCmd->Buffer.FirstSgeAddress = XmtCmd->Buffer.FirstSgeAddress << 32;
2054 XmtCmd->Buffer.FirstSgeAddress = 2054 XmtCmd->Buffer.FirstSgeAddress =
2055 XmtCmd->Buffer.FirstSgeAddress | SXG_GET_ADDR_LOW(phys_addr); 2055 XmtCmd->Buffer.FirstSgeAddress | SXG_GET_ADDR_LOW(phys_addr);
2056// XmtCmd->Buffer.FirstSgeAddress = SxgSgl->Sgl.Elements[Index].Address; 2056/* XmtCmd->Buffer.FirstSgeAddress = SxgSgl->Sgl.Elements[Index].Address; */
2057// XmtCmd->Buffer.FirstSgeAddress.LowPart += MdlOffset; 2057/* XmtCmd->Buffer.FirstSgeAddress.LowPart += MdlOffset; */
2058 XmtCmd->Buffer.FirstSgeLength = DataLength; 2058 XmtCmd->Buffer.FirstSgeLength = DataLength;
2059 // Set a pointer to the remaining SGL entries 2059 /* Set a pointer to the remaining SGL entries */
2060// XmtCmd->Sgl = SxgSgl->PhysicalAddress; 2060/* XmtCmd->Sgl = SxgSgl->PhysicalAddress; */
2061 // Advance the physical address of the SxgSgl structure to 2061 /* Advance the physical address of the SxgSgl structure to */
2062 // the second SGE 2062 /* the second SGE */
2063// SglOffset = (u32)((u32 *)(&SxgSgl->Sgl.Elements[Index+1]) - 2063/* SglOffset = (u32)((u32 *)(&SxgSgl->Sgl.Elements[Index+1]) - */
2064// (u32 *)SxgSgl); 2064/* (u32 *)SxgSgl); */
2065// XmtCmd->Sgl.LowPart += SglOffset; 2065/* XmtCmd->Sgl.LowPart += SglOffset; */
2066 XmtCmd->Buffer.SgeOffset = 0; 2066 XmtCmd->Buffer.SgeOffset = 0;
2067 // Note - TotalLength might be overwritten with MSS below.. 2067 /* Note - TotalLength might be overwritten with MSS below.. */
2068 XmtCmd->Buffer.TotalLength = DataLength; 2068 XmtCmd->Buffer.TotalLength = DataLength;
2069 XmtCmd->SgEntries = 1; //(ushort)(SxgSgl->Sgl.NumberOfElements - Index); 2069 XmtCmd->SgEntries = 1; /*(ushort)(SxgSgl->Sgl.NumberOfElements - Index); */
2070 XmtCmd->Flags = 0; 2070 XmtCmd->Flags = 0;
2071 // 2071 /* */
2072 // Advance transmit cmd descripter by 1. 2072 /* Advance transmit cmd descripter by 1. */
2073 // NOTE - See comments in SxgTcpOutput where we write 2073 /* NOTE - See comments in SxgTcpOutput where we write */
2074 // to the XmtCmd register regarding CPU ID values and/or 2074 /* to the XmtCmd register regarding CPU ID values and/or */
2075 // multiple commands. 2075 /* multiple commands. */
2076 // 2076 /* */
2077 // 2077 /* */
2078 WRITE_REG(adapter->UcodeRegs[0].XmtCmd, 1, TRUE); 2078 WRITE_REG(adapter->UcodeRegs[0].XmtCmd, 1, TRUE);
2079 // 2079 /* */
2080 // 2080 /* */
2081 adapter->Stats.XmtQLen++; // Stats within lock 2081 adapter->Stats.XmtQLen++; /* Stats within lock */
2082 spin_unlock(&adapter->XmtZeroLock); 2082 spin_unlock(&adapter->XmtZeroLock);
2083 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDumSgl2", 2083 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDumSgl2",
2084 XmtCmd, pSgl, SxgSgl, 0); 2084 XmtCmd, pSgl, SxgSgl, 0);
2085 return; 2085 return;
2086 2086
2087 abortcmd: 2087 abortcmd:
2088 // NOTE - Only jump to this label AFTER grabbing the 2088 /* NOTE - Only jump to this label AFTER grabbing the */
2089 // XmtZeroLock, and DO NOT DROP IT between the 2089 /* XmtZeroLock, and DO NOT DROP IT between the */
2090 // command allocation and the following abort. 2090 /* command allocation and the following abort. */
2091 if (XmtCmd) { 2091 if (XmtCmd) {
2092 SXG_ABORT_CMD(XmtRingInfo); 2092 SXG_ABORT_CMD(XmtRingInfo);
2093 } 2093 }
2094 spin_unlock(&adapter->XmtZeroLock); 2094 spin_unlock(&adapter->XmtZeroLock);
2095 2095
2096// failsgl: 2096/* failsgl: */
2097 // Jump to this label if failure occurs before the 2097 /* Jump to this label if failure occurs before the */
2098 // XmtZeroLock is grabbed 2098 /* XmtZeroLock is grabbed */
2099 adapter->Stats.XmtErrors++; 2099 adapter->Stats.XmtErrors++;
2100 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumSGFal", 2100 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumSGFal",
2101 pSgl, SxgSgl, XmtRingInfo->Head, XmtRingInfo->Tail); 2101 pSgl, SxgSgl, XmtRingInfo->Head, XmtRingInfo->Tail);
2102 2102
2103 SXG_COMPLETE_DUMB_SEND(adapter, SxgSgl->DumbPacket); // SxgSgl->DumbPacket is the skb 2103 SXG_COMPLETE_DUMB_SEND(adapter, SxgSgl->DumbPacket); /* SxgSgl->DumbPacket is the skb */
2104} 2104}
2105 2105
2106/*************************************************************** 2106/***************************************************************
@@ -2127,122 +2127,122 @@ static int sxg_initialize_link(p_adapter_t adapter)
2127 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitLink", 2127 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitLink",
2128 adapter, 0, 0, 0); 2128 adapter, 0, 0, 0);
2129 2129
2130 // Reset PHY and XGXS module 2130 /* Reset PHY and XGXS module */
2131 WRITE_REG(HwRegs->LinkStatus, LS_SERDES_POWER_DOWN, TRUE); 2131 WRITE_REG(HwRegs->LinkStatus, LS_SERDES_POWER_DOWN, TRUE);
2132 2132
2133 // Reset transmit configuration register 2133 /* Reset transmit configuration register */
2134 WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_RESET, TRUE); 2134 WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_RESET, TRUE);
2135 2135
2136 // Reset receive configuration register 2136 /* Reset receive configuration register */
2137 WRITE_REG(HwRegs->RcvConfig, RCV_CONFIG_RESET, TRUE); 2137 WRITE_REG(HwRegs->RcvConfig, RCV_CONFIG_RESET, TRUE);
2138 2138
2139 // Reset all MAC modules 2139 /* Reset all MAC modules */
2140 WRITE_REG(HwRegs->MacConfig0, AXGMAC_CFG0_SUB_RESET, TRUE); 2140 WRITE_REG(HwRegs->MacConfig0, AXGMAC_CFG0_SUB_RESET, TRUE);
2141 2141
2142 // Link address 0 2142 /* Link address 0 */
2143 // XXXTODO - This assumes the MAC address (0a:0b:0c:0d:0e:0f) 2143 /* XXXTODO - This assumes the MAC address (0a:0b:0c:0d:0e:0f) */
2144 // is stored with the first nibble (0a) in the byte 0 2144 /* is stored with the first nibble (0a) in the byte 0 */
2145 // of the Mac address. Possibly reverse? 2145 /* of the Mac address. Possibly reverse? */
2146 Value = *(u32 *) adapter->MacAddr; 2146 Value = *(u32 *) adapter->MacAddr;
2147 WRITE_REG(HwRegs->LinkAddress0Low, Value, TRUE); 2147 WRITE_REG(HwRegs->LinkAddress0Low, Value, TRUE);
2148 // also write the MAC address to the MAC. Endian is reversed. 2148 /* also write the MAC address to the MAC. Endian is reversed. */
2149 WRITE_REG(HwRegs->MacAddressLow, ntohl(Value), TRUE); 2149 WRITE_REG(HwRegs->MacAddressLow, ntohl(Value), TRUE);
2150 Value = (*(u16 *) & adapter->MacAddr[4] & 0x0000FFFF); 2150 Value = (*(u16 *) & adapter->MacAddr[4] & 0x0000FFFF);
2151 WRITE_REG(HwRegs->LinkAddress0High, Value | LINK_ADDRESS_ENABLE, TRUE); 2151 WRITE_REG(HwRegs->LinkAddress0High, Value | LINK_ADDRESS_ENABLE, TRUE);
2152 // endian swap for the MAC (put high bytes in bits [31:16], swapped) 2152 /* endian swap for the MAC (put high bytes in bits [31:16], swapped) */
2153 Value = ntohl(Value); 2153 Value = ntohl(Value);
2154 WRITE_REG(HwRegs->MacAddressHigh, Value, TRUE); 2154 WRITE_REG(HwRegs->MacAddressHigh, Value, TRUE);
2155 // Link address 1 2155 /* Link address 1 */
2156 WRITE_REG(HwRegs->LinkAddress1Low, 0, TRUE); 2156 WRITE_REG(HwRegs->LinkAddress1Low, 0, TRUE);
2157 WRITE_REG(HwRegs->LinkAddress1High, 0, TRUE); 2157 WRITE_REG(HwRegs->LinkAddress1High, 0, TRUE);
2158 // Link address 2 2158 /* Link address 2 */
2159 WRITE_REG(HwRegs->LinkAddress2Low, 0, TRUE); 2159 WRITE_REG(HwRegs->LinkAddress2Low, 0, TRUE);
2160 WRITE_REG(HwRegs->LinkAddress2High, 0, TRUE); 2160 WRITE_REG(HwRegs->LinkAddress2High, 0, TRUE);
2161 // Link address 3 2161 /* Link address 3 */
2162 WRITE_REG(HwRegs->LinkAddress3Low, 0, TRUE); 2162 WRITE_REG(HwRegs->LinkAddress3Low, 0, TRUE);
2163 WRITE_REG(HwRegs->LinkAddress3High, 0, TRUE); 2163 WRITE_REG(HwRegs->LinkAddress3High, 0, TRUE);
2164 2164
2165 // Enable MAC modules 2165 /* Enable MAC modules */
2166 WRITE_REG(HwRegs->MacConfig0, 0, TRUE); 2166 WRITE_REG(HwRegs->MacConfig0, 0, TRUE);
2167 2167
2168 // Configure MAC 2168 /* Configure MAC */
2169 WRITE_REG(HwRegs->MacConfig1, (AXGMAC_CFG1_XMT_PAUSE | // Allow sending of pause 2169 WRITE_REG(HwRegs->MacConfig1, (AXGMAC_CFG1_XMT_PAUSE | /* Allow sending of pause */
2170 AXGMAC_CFG1_XMT_EN | // Enable XMT 2170 AXGMAC_CFG1_XMT_EN | /* Enable XMT */
2171 AXGMAC_CFG1_RCV_PAUSE | // Enable detection of pause 2171 AXGMAC_CFG1_RCV_PAUSE | /* Enable detection of pause */
2172 AXGMAC_CFG1_RCV_EN | // Enable receive 2172 AXGMAC_CFG1_RCV_EN | /* Enable receive */
2173 AXGMAC_CFG1_SHORT_ASSERT | // short frame detection 2173 AXGMAC_CFG1_SHORT_ASSERT | /* short frame detection */
2174 AXGMAC_CFG1_CHECK_LEN | // Verify frame length 2174 AXGMAC_CFG1_CHECK_LEN | /* Verify frame length */
2175 AXGMAC_CFG1_GEN_FCS | // Generate FCS 2175 AXGMAC_CFG1_GEN_FCS | /* Generate FCS */
2176 AXGMAC_CFG1_PAD_64), // Pad frames to 64 bytes 2176 AXGMAC_CFG1_PAD_64), /* Pad frames to 64 bytes */
2177 TRUE); 2177 TRUE);
2178 2178
2179 // Set AXGMAC max frame length if jumbo. Not needed for standard MTU 2179 /* Set AXGMAC max frame length if jumbo. Not needed for standard MTU */
2180 if (adapter->JumboEnabled) { 2180 if (adapter->JumboEnabled) {
2181 WRITE_REG(HwRegs->MacMaxFrameLen, AXGMAC_MAXFRAME_JUMBO, TRUE); 2181 WRITE_REG(HwRegs->MacMaxFrameLen, AXGMAC_MAXFRAME_JUMBO, TRUE);
2182 } 2182 }
2183 // AMIIM Configuration Register - 2183 /* AMIIM Configuration Register - */
2184 // The value placed in the AXGMAC_AMIIM_CFG_HALF_CLOCK portion 2184 /* The value placed in the AXGMAC_AMIIM_CFG_HALF_CLOCK portion */
2185 // (bottom bits) of this register is used to determine the 2185 /* (bottom bits) of this register is used to determine the */
2186 // MDC frequency as specified in the A-XGMAC Design Document. 2186 /* MDC frequency as specified in the A-XGMAC Design Document. */
2187 // This value must not be zero. The following value (62 or 0x3E) 2187 /* This value must not be zero. The following value (62 or 0x3E) */
2188 // is based on our MAC transmit clock frequency (MTCLK) of 312.5 MHz. 2188 /* is based on our MAC transmit clock frequency (MTCLK) of 312.5 MHz. */
2189 // Given a maximum MDIO clock frequency of 2.5 MHz (see the PHY spec), 2189 /* Given a maximum MDIO clock frequency of 2.5 MHz (see the PHY spec), */
2190 // we get: 312.5/(2*(X+1)) < 2.5 ==> X = 62. 2190 /* we get: 312.5/(2*(X+1)) < 2.5 ==> X = 62. */
2191 // This value happens to be the default value for this register, 2191 /* This value happens to be the default value for this register, */
2192 // so we really don't have to do this. 2192 /* so we really don't have to do this. */
2193 WRITE_REG(HwRegs->MacAmiimConfig, 0x0000003E, TRUE); 2193 WRITE_REG(HwRegs->MacAmiimConfig, 0x0000003E, TRUE);
2194 2194
2195 // Power up and enable PHY and XAUI/XGXS/Serdes logic 2195 /* Power up and enable PHY and XAUI/XGXS/Serdes logic */
2196 WRITE_REG(HwRegs->LinkStatus, 2196 WRITE_REG(HwRegs->LinkStatus,
2197 (LS_PHY_CLR_RESET | 2197 (LS_PHY_CLR_RESET |
2198 LS_XGXS_ENABLE | 2198 LS_XGXS_ENABLE |
2199 LS_XGXS_CTL | LS_PHY_CLK_EN | LS_ATTN_ALARM), TRUE); 2199 LS_XGXS_CTL | LS_PHY_CLK_EN | LS_ATTN_ALARM), TRUE);
2200 DBG_ERROR("After Power Up and enable PHY in sxg_initialize_link\n"); 2200 DBG_ERROR("After Power Up and enable PHY in sxg_initialize_link\n");
2201 2201
2202 // Per information given by Aeluros, wait 100 ms after removing reset. 2202 /* Per information given by Aeluros, wait 100 ms after removing reset. */
2203 // It's not enough to wait for the self-clearing reset bit in reg 0 to clear. 2203 /* It's not enough to wait for the self-clearing reset bit in reg 0 to clear. */
2204 mdelay(100); 2204 mdelay(100);
2205 2205
2206 // Verify the PHY has come up by checking that the Reset bit has cleared. 2206 /* Verify the PHY has come up by checking that the Reset bit has cleared. */
2207 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module 2207 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
2208 PHY_PMA_CONTROL1, // PMA/PMD control register 2208 PHY_PMA_CONTROL1, /* PMA/PMD control register */
2209 &Value); 2209 &Value);
2210 if (status != STATUS_SUCCESS) 2210 if (status != STATUS_SUCCESS)
2211 return (STATUS_FAILURE); 2211 return (STATUS_FAILURE);
2212 if (Value & PMA_CONTROL1_RESET) // reset complete if bit is 0 2212 if (Value & PMA_CONTROL1_RESET) /* reset complete if bit is 0 */
2213 return (STATUS_FAILURE); 2213 return (STATUS_FAILURE);
2214 2214
2215 // The SERDES should be initialized by now - confirm 2215 /* The SERDES should be initialized by now - confirm */
2216 READ_REG(HwRegs->LinkStatus, Value); 2216 READ_REG(HwRegs->LinkStatus, Value);
2217 if (Value & LS_SERDES_DOWN) // verify SERDES is initialized 2217 if (Value & LS_SERDES_DOWN) /* verify SERDES is initialized */
2218 return (STATUS_FAILURE); 2218 return (STATUS_FAILURE);
2219 2219
2220 // The XAUI link should also be up - confirm 2220 /* The XAUI link should also be up - confirm */
2221 if (!(Value & LS_XAUI_LINK_UP)) // verify XAUI link is up 2221 if (!(Value & LS_XAUI_LINK_UP)) /* verify XAUI link is up */
2222 return (STATUS_FAILURE); 2222 return (STATUS_FAILURE);
2223 2223
2224 // Initialize the PHY 2224 /* Initialize the PHY */
2225 status = sxg_phy_init(adapter); 2225 status = sxg_phy_init(adapter);
2226 if (status != STATUS_SUCCESS) 2226 if (status != STATUS_SUCCESS)
2227 return (STATUS_FAILURE); 2227 return (STATUS_FAILURE);
2228 2228
2229 // Enable the Link Alarm 2229 /* Enable the Link Alarm */
2230 status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module 2230 status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
2231 LASI_CONTROL, // LASI control register 2231 LASI_CONTROL, /* LASI control register */
2232 LASI_CTL_LS_ALARM_ENABLE); // enable link alarm bit 2232 LASI_CTL_LS_ALARM_ENABLE); /* enable link alarm bit */
2233 if (status != STATUS_SUCCESS) 2233 if (status != STATUS_SUCCESS)
2234 return (STATUS_FAILURE); 2234 return (STATUS_FAILURE);
2235 2235
2236 // XXXTODO - temporary - verify bit is set 2236 /* XXXTODO - temporary - verify bit is set */
2237 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module 2237 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
2238 LASI_CONTROL, // LASI control register 2238 LASI_CONTROL, /* LASI control register */
2239 &Value); 2239 &Value);
2240 if (status != STATUS_SUCCESS) 2240 if (status != STATUS_SUCCESS)
2241 return (STATUS_FAILURE); 2241 return (STATUS_FAILURE);
2242 if (!(Value & LASI_CTL_LS_ALARM_ENABLE)) { 2242 if (!(Value & LASI_CTL_LS_ALARM_ENABLE)) {
2243 DBG_ERROR("Error! LASI Control Alarm Enable bit not set!\n"); 2243 DBG_ERROR("Error! LASI Control Alarm Enable bit not set!\n");
2244 } 2244 }
2245 // Enable receive 2245 /* Enable receive */
2246 MaxFrame = adapter->JumboEnabled ? JUMBOMAXFRAME : ETHERMAXFRAME; 2246 MaxFrame = adapter->JumboEnabled ? JUMBOMAXFRAME : ETHERMAXFRAME;
2247 ConfigData = (RCV_CONFIG_ENABLE | 2247 ConfigData = (RCV_CONFIG_ENABLE |
2248 RCV_CONFIG_ENPARSE | 2248 RCV_CONFIG_ENPARSE |
@@ -2256,7 +2256,7 @@ static int sxg_initialize_link(p_adapter_t adapter)
2256 2256
2257 WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_ENABLE, TRUE); 2257 WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_ENABLE, TRUE);
2258 2258
2259 // Mark the link as down. We'll get a link event when it comes up. 2259 /* Mark the link as down. We'll get a link event when it comes up. */
2260 sxg_link_state(adapter, SXG_LINK_DOWN); 2260 sxg_link_state(adapter, SXG_LINK_DOWN);
2261 2261
2262 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XInitLnk", 2262 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XInitLnk",
@@ -2281,27 +2281,27 @@ static int sxg_phy_init(p_adapter_t adapter)
2281 2281
2282 DBG_ERROR("ENTER %s\n", __func__); 2282 DBG_ERROR("ENTER %s\n", __func__);
2283 2283
2284 // Read a register to identify the PHY type 2284 /* Read a register to identify the PHY type */
2285 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module 2285 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
2286 0xC205, // PHY ID register (?) 2286 0xC205, /* PHY ID register (?) */
2287 &Value); // XXXTODO - add def 2287 &Value); /* XXXTODO - add def */
2288 if (status != STATUS_SUCCESS) 2288 if (status != STATUS_SUCCESS)
2289 return (STATUS_FAILURE); 2289 return (STATUS_FAILURE);
2290 2290
2291 if (Value == 0x0012) { // 0x0012 == AEL2005C PHY(?) - XXXTODO - add def 2291 if (Value == 0x0012) { /* 0x0012 == AEL2005C PHY(?) - XXXTODO - add def */
2292 DBG_ERROR 2292 DBG_ERROR
2293 ("AEL2005C PHY detected. Downloading PHY microcode.\n"); 2293 ("AEL2005C PHY detected. Downloading PHY microcode.\n");
2294 2294
2295 // Initialize AEL2005C PHY and download PHY microcode 2295 /* Initialize AEL2005C PHY and download PHY microcode */
2296 for (p = PhyUcode; p->Addr != 0xFFFF; p++) { 2296 for (p = PhyUcode; p->Addr != 0xFFFF; p++) {
2297 if (p->Addr == 0) { 2297 if (p->Addr == 0) {
2298 // if address == 0, data == sleep time in ms 2298 /* if address == 0, data == sleep time in ms */
2299 mdelay(p->Data); 2299 mdelay(p->Data);
2300 } else { 2300 } else {
2301 // write the given data to the specified address 2301 /* write the given data to the specified address */
2302 status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module 2302 status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
2303 p->Addr, // PHY address 2303 p->Addr, /* PHY address */
2304 p->Data); // PHY data 2304 p->Data); /* PHY data */
2305 if (status != STATUS_SUCCESS) 2305 if (status != STATUS_SUCCESS)
2306 return (STATUS_FAILURE); 2306 return (STATUS_FAILURE);
2307 } 2307 }
@@ -2332,38 +2332,38 @@ static void sxg_link_event(p_adapter_t adapter)
2332 adapter, 0, 0, 0); 2332 adapter, 0, 0, 0);
2333 DBG_ERROR("ENTER %s\n", __func__); 2333 DBG_ERROR("ENTER %s\n", __func__);
2334 2334
2335 // Check the Link Status register. We should have a Link Alarm. 2335 /* Check the Link Status register. We should have a Link Alarm. */
2336 READ_REG(HwRegs->LinkStatus, Value); 2336 READ_REG(HwRegs->LinkStatus, Value);
2337 if (Value & LS_LINK_ALARM) { 2337 if (Value & LS_LINK_ALARM) {
2338 // We got a Link Status alarm. First, pause to let the 2338 /* We got a Link Status alarm. First, pause to let the */
2339 // link state settle (it can bounce a number of times) 2339 /* link state settle (it can bounce a number of times) */
2340 mdelay(10); 2340 mdelay(10);
2341 2341
2342 // Now clear the alarm by reading the LASI status register. 2342 /* Now clear the alarm by reading the LASI status register. */
2343 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module 2343 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
2344 LASI_STATUS, // LASI status register 2344 LASI_STATUS, /* LASI status register */
2345 &Value); 2345 &Value);
2346 if (status != STATUS_SUCCESS) { 2346 if (status != STATUS_SUCCESS) {
2347 DBG_ERROR("Error reading LASI Status MDIO register!\n"); 2347 DBG_ERROR("Error reading LASI Status MDIO register!\n");
2348 sxg_link_state(adapter, SXG_LINK_DOWN); 2348 sxg_link_state(adapter, SXG_LINK_DOWN);
2349// ASSERT(0); 2349/* ASSERT(0); */
2350 } 2350 }
2351 ASSERT(Value & LASI_STATUS_LS_ALARM); 2351 ASSERT(Value & LASI_STATUS_LS_ALARM);
2352 2352
2353 // Now get and set the link state 2353 /* Now get and set the link state */
2354 LinkState = sxg_get_link_state(adapter); 2354 LinkState = sxg_get_link_state(adapter);
2355 sxg_link_state(adapter, LinkState); 2355 sxg_link_state(adapter, LinkState);
2356 DBG_ERROR("SXG: Link Alarm occurred. Link is %s\n", 2356 DBG_ERROR("SXG: Link Alarm occurred. Link is %s\n",
2357 ((LinkState == SXG_LINK_UP) ? "UP" : "DOWN")); 2357 ((LinkState == SXG_LINK_UP) ? "UP" : "DOWN"));
2358 } else { 2358 } else {
2359 // XXXTODO - Assuming Link Attention is only being generated for the 2359 /* XXXTODO - Assuming Link Attention is only being generated for the */
2360 // Link Alarm pin (and not for a XAUI Link Status change), then it's 2360 /* Link Alarm pin (and not for a XAUI Link Status change), then it's */
2361 // impossible to get here. Yet we've gotten here twice (under extreme 2361 /* impossible to get here. Yet we've gotten here twice (under extreme */
2362 // conditions - bouncing the link up and down many times a second). 2362 /* conditions - bouncing the link up and down many times a second). */
2363 // Needs further investigation. 2363 /* Needs further investigation. */
2364 DBG_ERROR("SXG: sxg_link_event: Can't get here!\n"); 2364 DBG_ERROR("SXG: sxg_link_event: Can't get here!\n");
2365 DBG_ERROR("SXG: Link Status == 0x%08X.\n", Value); 2365 DBG_ERROR("SXG: Link Status == 0x%08X.\n", Value);
2366// ASSERT(0); 2366/* ASSERT(0); */
2367 } 2367 }
2368 DBG_ERROR("EXIT %s\n", __func__); 2368 DBG_ERROR("EXIT %s\n", __func__);
2369 2369
@@ -2388,45 +2388,45 @@ static SXG_LINK_STATE sxg_get_link_state(p_adapter_t adapter)
2388 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "GetLink", 2388 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "GetLink",
2389 adapter, 0, 0, 0); 2389 adapter, 0, 0, 0);
2390 2390
2391 // Per the Xenpak spec (and the IEEE 10Gb spec?), the link is up if 2391 /* Per the Xenpak spec (and the IEEE 10Gb spec?), the link is up if */
2392 // the following 3 bits (from 3 different MDIO registers) are all true. 2392 /* the following 3 bits (from 3 different MDIO registers) are all true. */
2393 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module 2393 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
2394 PHY_PMA_RCV_DET, // PMA/PMD Receive Signal Detect register 2394 PHY_PMA_RCV_DET, /* PMA/PMD Receive Signal Detect register */
2395 &Value); 2395 &Value);
2396 if (status != STATUS_SUCCESS) 2396 if (status != STATUS_SUCCESS)
2397 goto bad; 2397 goto bad;
2398 2398
2399 // If PMA/PMD receive signal detect is 0, then the link is down 2399 /* If PMA/PMD receive signal detect is 0, then the link is down */
2400 if (!(Value & PMA_RCV_DETECT)) 2400 if (!(Value & PMA_RCV_DETECT))
2401 return (SXG_LINK_DOWN); 2401 return (SXG_LINK_DOWN);
2402 2402
2403 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PCS, // PHY PCS module 2403 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PCS, /* PHY PCS module */
2404 PHY_PCS_10G_STATUS1, // PCS 10GBASE-R Status 1 register 2404 PHY_PCS_10G_STATUS1, /* PCS 10GBASE-R Status 1 register */
2405 &Value); 2405 &Value);
2406 if (status != STATUS_SUCCESS) 2406 if (status != STATUS_SUCCESS)
2407 goto bad; 2407 goto bad;
2408 2408
2409 // If PCS is not locked to receive blocks, then the link is down 2409 /* If PCS is not locked to receive blocks, then the link is down */
2410 if (!(Value & PCS_10B_BLOCK_LOCK)) 2410 if (!(Value & PCS_10B_BLOCK_LOCK))
2411 return (SXG_LINK_DOWN); 2411 return (SXG_LINK_DOWN);
2412 2412
2413 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_XS, // PHY XS module 2413 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_XS, /* PHY XS module */
2414 PHY_XS_LANE_STATUS, // XS Lane Status register 2414 PHY_XS_LANE_STATUS, /* XS Lane Status register */
2415 &Value); 2415 &Value);
2416 if (status != STATUS_SUCCESS) 2416 if (status != STATUS_SUCCESS)
2417 goto bad; 2417 goto bad;
2418 2418
2419 // If XS transmit lanes are not aligned, then the link is down 2419 /* If XS transmit lanes are not aligned, then the link is down */
2420 if (!(Value & XS_LANE_ALIGN)) 2420 if (!(Value & XS_LANE_ALIGN))
2421 return (SXG_LINK_DOWN); 2421 return (SXG_LINK_DOWN);
2422 2422
2423 // All 3 bits are true, so the link is up 2423 /* All 3 bits are true, so the link is up */
2424 DBG_ERROR("EXIT %s\n", __func__); 2424 DBG_ERROR("EXIT %s\n", __func__);
2425 2425
2426 return (SXG_LINK_UP); 2426 return (SXG_LINK_UP);
2427 2427
2428 bad: 2428 bad:
2429 // An error occurred reading an MDIO register. This shouldn't happen. 2429 /* An error occurred reading an MDIO register. This shouldn't happen. */
2430 DBG_ERROR("Error reading an MDIO register!\n"); 2430 DBG_ERROR("Error reading an MDIO register!\n");
2431 ASSERT(0); 2431 ASSERT(0);
2432 return (SXG_LINK_DOWN); 2432 return (SXG_LINK_DOWN);
@@ -2466,19 +2466,19 @@ static void sxg_link_state(p_adapter_t adapter, SXG_LINK_STATE LinkState)
2466 2466
2467 DBG_ERROR("ENTER %s\n", __func__); 2467 DBG_ERROR("ENTER %s\n", __func__);
2468 2468
2469 // Hold the adapter lock during this routine. Maybe move 2469 /* Hold the adapter lock during this routine. Maybe move */
2470 // the lock to the caller. 2470 /* the lock to the caller. */
2471 spin_lock(&adapter->AdapterLock); 2471 spin_lock(&adapter->AdapterLock);
2472 if (LinkState == adapter->LinkState) { 2472 if (LinkState == adapter->LinkState) {
2473 // Nothing changed.. 2473 /* Nothing changed.. */
2474 spin_unlock(&adapter->AdapterLock); 2474 spin_unlock(&adapter->AdapterLock);
2475 DBG_ERROR("EXIT #0 %s\n", __func__); 2475 DBG_ERROR("EXIT #0 %s\n", __func__);
2476 return; 2476 return;
2477 } 2477 }
2478 // Save the adapter state 2478 /* Save the adapter state */
2479 adapter->LinkState = LinkState; 2479 adapter->LinkState = LinkState;
2480 2480
2481 // Drop the lock and indicate link state 2481 /* Drop the lock and indicate link state */
2482 spin_unlock(&adapter->AdapterLock); 2482 spin_unlock(&adapter->AdapterLock);
2483 DBG_ERROR("EXIT #1 %s\n", __func__); 2483 DBG_ERROR("EXIT #1 %s\n", __func__);
2484 2484
@@ -2501,76 +2501,76 @@ static int sxg_write_mdio_reg(p_adapter_t adapter,
2501 u32 DevAddr, u32 RegAddr, u32 Value) 2501 u32 DevAddr, u32 RegAddr, u32 Value)
2502{ 2502{
2503 PSXG_HW_REGS HwRegs = adapter->HwRegs; 2503 PSXG_HW_REGS HwRegs = adapter->HwRegs;
2504 u32 AddrOp; // Address operation (written to MIIM field reg) 2504 u32 AddrOp; /* Address operation (written to MIIM field reg) */
2505 u32 WriteOp; // Write operation (written to MIIM field reg) 2505 u32 WriteOp; /* Write operation (written to MIIM field reg) */
2506 u32 Cmd; // Command (written to MIIM command reg) 2506 u32 Cmd; /* Command (written to MIIM command reg) */
2507 u32 ValueRead; 2507 u32 ValueRead;
2508 u32 Timeout; 2508 u32 Timeout;
2509 2509
2510// DBG_ERROR("ENTER %s\n", __func__); 2510/* DBG_ERROR("ENTER %s\n", __func__); */
2511 2511
2512 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO", 2512 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO",
2513 adapter, 0, 0, 0); 2513 adapter, 0, 0, 0);
2514 2514
2515 // Ensure values don't exceed field width 2515 /* Ensure values don't exceed field width */
2516 DevAddr &= 0x001F; // 5-bit field 2516 DevAddr &= 0x001F; /* 5-bit field */
2517 RegAddr &= 0xFFFF; // 16-bit field 2517 RegAddr &= 0xFFFF; /* 16-bit field */
2518 Value &= 0xFFFF; // 16-bit field 2518 Value &= 0xFFFF; /* 16-bit field */
2519 2519
2520 // Set MIIM field register bits for an MIIM address operation 2520 /* Set MIIM field register bits for an MIIM address operation */
2521 AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) | 2521 AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
2522 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) | 2522 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
2523 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) | 2523 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
2524 (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr; 2524 (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr;
2525 2525
2526 // Set MIIM field register bits for an MIIM write operation 2526 /* Set MIIM field register bits for an MIIM write operation */
2527 WriteOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) | 2527 WriteOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
2528 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) | 2528 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
2529 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) | 2529 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
2530 (MIIM_OP_WRITE << AXGMAC_AMIIM_FIELD_OP_SHIFT) | Value; 2530 (MIIM_OP_WRITE << AXGMAC_AMIIM_FIELD_OP_SHIFT) | Value;
2531 2531
2532 // Set MIIM command register bits to execute an MIIM command 2532 /* Set MIIM command register bits to execute an MIIM command */
2533 Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION; 2533 Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION;
2534 2534
2535 // Reset the command register command bit (in case it's not 0) 2535 /* Reset the command register command bit (in case it's not 0) */
2536 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE); 2536 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
2537 2537
2538 // MIIM write to set the address of the specified MDIO register 2538 /* MIIM write to set the address of the specified MDIO register */
2539 WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE); 2539 WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE);
2540 2540
2541 // Write to MIIM Command Register to execute to address operation 2541 /* Write to MIIM Command Register to execute to address operation */
2542 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE); 2542 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
2543 2543
2544 // Poll AMIIM Indicator register to wait for completion 2544 /* Poll AMIIM Indicator register to wait for completion */
2545 Timeout = SXG_LINK_TIMEOUT; 2545 Timeout = SXG_LINK_TIMEOUT;
2546 do { 2546 do {
2547 udelay(100); // Timeout in 100us units 2547 udelay(100); /* Timeout in 100us units */
2548 READ_REG(HwRegs->MacAmiimIndicator, ValueRead); 2548 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
2549 if (--Timeout == 0) { 2549 if (--Timeout == 0) {
2550 return (STATUS_FAILURE); 2550 return (STATUS_FAILURE);
2551 } 2551 }
2552 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY); 2552 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
2553 2553
2554 // Reset the command register command bit 2554 /* Reset the command register command bit */
2555 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE); 2555 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
2556 2556
2557 // MIIM write to set up an MDIO write operation 2557 /* MIIM write to set up an MDIO write operation */
2558 WRITE_REG(HwRegs->MacAmiimField, WriteOp, TRUE); 2558 WRITE_REG(HwRegs->MacAmiimField, WriteOp, TRUE);
2559 2559
2560 // Write to MIIM Command Register to execute the write operation 2560 /* Write to MIIM Command Register to execute the write operation */
2561 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE); 2561 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
2562 2562
2563 // Poll AMIIM Indicator register to wait for completion 2563 /* Poll AMIIM Indicator register to wait for completion */
2564 Timeout = SXG_LINK_TIMEOUT; 2564 Timeout = SXG_LINK_TIMEOUT;
2565 do { 2565 do {
2566 udelay(100); // Timeout in 100us units 2566 udelay(100); /* Timeout in 100us units */
2567 READ_REG(HwRegs->MacAmiimIndicator, ValueRead); 2567 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
2568 if (--Timeout == 0) { 2568 if (--Timeout == 0) {
2569 return (STATUS_FAILURE); 2569 return (STATUS_FAILURE);
2570 } 2570 }
2571 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY); 2571 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
2572 2572
2573// DBG_ERROR("EXIT %s\n", __func__); 2573/* DBG_ERROR("EXIT %s\n", __func__); */
2574 2574
2575 return (STATUS_SUCCESS); 2575 return (STATUS_SUCCESS);
2576} 2576}
@@ -2591,78 +2591,78 @@ static int sxg_read_mdio_reg(p_adapter_t adapter,
2591 u32 DevAddr, u32 RegAddr, u32 *pValue) 2591 u32 DevAddr, u32 RegAddr, u32 *pValue)
2592{ 2592{
2593 PSXG_HW_REGS HwRegs = adapter->HwRegs; 2593 PSXG_HW_REGS HwRegs = adapter->HwRegs;
2594 u32 AddrOp; // Address operation (written to MIIM field reg) 2594 u32 AddrOp; /* Address operation (written to MIIM field reg) */
2595 u32 ReadOp; // Read operation (written to MIIM field reg) 2595 u32 ReadOp; /* Read operation (written to MIIM field reg) */
2596 u32 Cmd; // Command (written to MIIM command reg) 2596 u32 Cmd; /* Command (written to MIIM command reg) */
2597 u32 ValueRead; 2597 u32 ValueRead;
2598 u32 Timeout; 2598 u32 Timeout;
2599 2599
2600 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO", 2600 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO",
2601 adapter, 0, 0, 0); 2601 adapter, 0, 0, 0);
2602// DBG_ERROR("ENTER %s\n", __func__); 2602/* DBG_ERROR("ENTER %s\n", __func__); */
2603 2603
2604 // Ensure values don't exceed field width 2604 /* Ensure values don't exceed field width */
2605 DevAddr &= 0x001F; // 5-bit field 2605 DevAddr &= 0x001F; /* 5-bit field */
2606 RegAddr &= 0xFFFF; // 16-bit field 2606 RegAddr &= 0xFFFF; /* 16-bit field */
2607 2607
2608 // Set MIIM field register bits for an MIIM address operation 2608 /* Set MIIM field register bits for an MIIM address operation */
2609 AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) | 2609 AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
2610 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) | 2610 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
2611 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) | 2611 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
2612 (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr; 2612 (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr;
2613 2613
2614 // Set MIIM field register bits for an MIIM read operation 2614 /* Set MIIM field register bits for an MIIM read operation */
2615 ReadOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) | 2615 ReadOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
2616 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) | 2616 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
2617 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) | 2617 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
2618 (MIIM_OP_READ << AXGMAC_AMIIM_FIELD_OP_SHIFT); 2618 (MIIM_OP_READ << AXGMAC_AMIIM_FIELD_OP_SHIFT);
2619 2619
2620 // Set MIIM command register bits to execute an MIIM command 2620 /* Set MIIM command register bits to execute an MIIM command */
2621 Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION; 2621 Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION;
2622 2622
2623 // Reset the command register command bit (in case it's not 0) 2623 /* Reset the command register command bit (in case it's not 0) */
2624 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE); 2624 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
2625 2625
2626 // MIIM write to set the address of the specified MDIO register 2626 /* MIIM write to set the address of the specified MDIO register */
2627 WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE); 2627 WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE);
2628 2628
2629 // Write to MIIM Command Register to execute to address operation 2629 /* Write to MIIM Command Register to execute to address operation */
2630 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE); 2630 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
2631 2631
2632 // Poll AMIIM Indicator register to wait for completion 2632 /* Poll AMIIM Indicator register to wait for completion */
2633 Timeout = SXG_LINK_TIMEOUT; 2633 Timeout = SXG_LINK_TIMEOUT;
2634 do { 2634 do {
2635 udelay(100); // Timeout in 100us units 2635 udelay(100); /* Timeout in 100us units */
2636 READ_REG(HwRegs->MacAmiimIndicator, ValueRead); 2636 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
2637 if (--Timeout == 0) { 2637 if (--Timeout == 0) {
2638 return (STATUS_FAILURE); 2638 return (STATUS_FAILURE);
2639 } 2639 }
2640 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY); 2640 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
2641 2641
2642 // Reset the command register command bit 2642 /* Reset the command register command bit */
2643 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE); 2643 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
2644 2644
2645 // MIIM write to set up an MDIO register read operation 2645 /* MIIM write to set up an MDIO register read operation */
2646 WRITE_REG(HwRegs->MacAmiimField, ReadOp, TRUE); 2646 WRITE_REG(HwRegs->MacAmiimField, ReadOp, TRUE);
2647 2647
2648 // Write to MIIM Command Register to execute the read operation 2648 /* Write to MIIM Command Register to execute the read operation */
2649 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE); 2649 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
2650 2650
2651 // Poll AMIIM Indicator register to wait for completion 2651 /* Poll AMIIM Indicator register to wait for completion */
2652 Timeout = SXG_LINK_TIMEOUT; 2652 Timeout = SXG_LINK_TIMEOUT;
2653 do { 2653 do {
2654 udelay(100); // Timeout in 100us units 2654 udelay(100); /* Timeout in 100us units */
2655 READ_REG(HwRegs->MacAmiimIndicator, ValueRead); 2655 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
2656 if (--Timeout == 0) { 2656 if (--Timeout == 0) {
2657 return (STATUS_FAILURE); 2657 return (STATUS_FAILURE);
2658 } 2658 }
2659 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY); 2659 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
2660 2660
2661 // Read the MDIO register data back from the field register 2661 /* Read the MDIO register data back from the field register */
2662 READ_REG(HwRegs->MacAmiimField, *pValue); 2662 READ_REG(HwRegs->MacAmiimField, *pValue);
2663 *pValue &= 0xFFFF; // data is in the lower 16 bits 2663 *pValue &= 0xFFFF; /* data is in the lower 16 bits */
2664 2664
2665// DBG_ERROR("EXIT %s\n", __func__); 2665/* DBG_ERROR("EXIT %s\n", __func__); */
2666 2666
2667 return (STATUS_SUCCESS); 2667 return (STATUS_SUCCESS);
2668} 2668}
@@ -2852,10 +2852,10 @@ static void sxg_mcast_set_mask(p_adapter_t adapter)
2852 * mode as well as ALLMCAST mode. It saves the Microcode from having 2852 * mode as well as ALLMCAST mode. It saves the Microcode from having
2853 * to keep state about the MAC configuration. 2853 * to keep state about the MAC configuration.
2854 */ 2854 */
2855// DBG_ERROR("sxg: %s macopts = MAC_ALLMCAST | MAC_PROMISC\n SLUT MODE!!!\n",__func__); 2855/* DBG_ERROR("sxg: %s macopts = MAC_ALLMCAST | MAC_PROMISC\n SLUT MODE!!!\n",__func__); */
2856 WRITE_REG(sxg_regs->McastLow, 0xFFFFFFFF, FLUSH); 2856 WRITE_REG(sxg_regs->McastLow, 0xFFFFFFFF, FLUSH);
2857 WRITE_REG(sxg_regs->McastHigh, 0xFFFFFFFF, FLUSH); 2857 WRITE_REG(sxg_regs->McastHigh, 0xFFFFFFFF, FLUSH);
2858// DBG_ERROR("%s (%s) WRITE to slic_regs slic_mcastlow&high 0xFFFFFFFF\n",__func__, adapter->netdev->name); 2858/* DBG_ERROR("%s (%s) WRITE to slic_regs slic_mcastlow&high 0xFFFFFFFF\n",__func__, adapter->netdev->name); */
2859 2859
2860 } else { 2860 } else {
2861 /* Commit our multicast mast to the SLIC by writing to the multicast 2861 /* Commit our multicast mast to the SLIC by writing to the multicast
@@ -2878,10 +2878,10 @@ static void sxg_mcast_set_mask(p_adapter_t adapter)
2878static void sxg_unmap_mmio_space(p_adapter_t adapter) 2878static void sxg_unmap_mmio_space(p_adapter_t adapter)
2879{ 2879{
2880#if LINUX_FREES_ADAPTER_RESOURCES 2880#if LINUX_FREES_ADAPTER_RESOURCES
2881// if (adapter->Regs) { 2881/* if (adapter->Regs) { */
2882// iounmap(adapter->Regs); 2882/* iounmap(adapter->Regs); */
2883// } 2883/* } */
2884// adapter->slic_regs = NULL; 2884/* adapter->slic_regs = NULL; */
2885#endif 2885#endif
2886} 2886}
2887 2887
@@ -2909,8 +2909,8 @@ void SxgFreeResources(p_adapter_t adapter)
2909 IsrCount = adapter->MsiEnabled ? RssIds : 1; 2909 IsrCount = adapter->MsiEnabled ? RssIds : 1;
2910 2910
2911 if (adapter->BasicAllocations == FALSE) { 2911 if (adapter->BasicAllocations == FALSE) {
2912 // No allocations have been made, including spinlocks, 2912 /* No allocations have been made, including spinlocks, */
2913 // or listhead initializations. Return. 2913 /* or listhead initializations. Return. */
2914 return; 2914 return;
2915 } 2915 }
2916 2916
@@ -2920,7 +2920,7 @@ void SxgFreeResources(p_adapter_t adapter)
2920 if (!(IsListEmpty(&adapter->AllSglBuffers))) { 2920 if (!(IsListEmpty(&adapter->AllSglBuffers))) {
2921 SxgFreeSglBuffers(adapter); 2921 SxgFreeSglBuffers(adapter);
2922 } 2922 }
2923 // Free event queues. 2923 /* Free event queues. */
2924 if (adapter->EventRings) { 2924 if (adapter->EventRings) {
2925 pci_free_consistent(adapter->pcidev, 2925 pci_free_consistent(adapter->pcidev,
2926 sizeof(SXG_EVENT_RING) * RssIds, 2926 sizeof(SXG_EVENT_RING) * RssIds,
@@ -2947,17 +2947,17 @@ void SxgFreeResources(p_adapter_t adapter)
2947 SXG_FREE_PACKET_POOL(adapter->PacketPoolHandle); 2947 SXG_FREE_PACKET_POOL(adapter->PacketPoolHandle);
2948 SXG_FREE_BUFFER_POOL(adapter->BufferPoolHandle); 2948 SXG_FREE_BUFFER_POOL(adapter->BufferPoolHandle);
2949 2949
2950 // Unmap register spaces 2950 /* Unmap register spaces */
2951 SxgUnmapResources(adapter); 2951 SxgUnmapResources(adapter);
2952 2952
2953 // Deregister DMA 2953 /* Deregister DMA */
2954 if (adapter->DmaHandle) { 2954 if (adapter->DmaHandle) {
2955 SXG_DEREGISTER_DMA(adapter->DmaHandle); 2955 SXG_DEREGISTER_DMA(adapter->DmaHandle);
2956 } 2956 }
2957 // Deregister interrupt 2957 /* Deregister interrupt */
2958 SxgDeregisterInterrupt(adapter); 2958 SxgDeregisterInterrupt(adapter);
2959 2959
2960 // Possibly free system info (5.2 only) 2960 /* Possibly free system info (5.2 only) */
2961 SXG_RELEASE_SYSTEM_INFO(adapter); 2961 SXG_RELEASE_SYSTEM_INFO(adapter);
2962 2962
2963 SxgDiagFreeResources(adapter); 2963 SxgDiagFreeResources(adapter);
@@ -3047,23 +3047,23 @@ static int sxg_allocate_buffer_memory(p_adapter_t adapter,
3047 3047
3048 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocMem", 3048 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocMem",
3049 adapter, Size, BufferType, 0); 3049 adapter, Size, BufferType, 0);
3050 // Grab the adapter lock and check the state. 3050 /* Grab the adapter lock and check the state. */
3051 // If we're in anything other than INITIALIZING or 3051 /* If we're in anything other than INITIALIZING or */
3052 // RUNNING state, fail. This is to prevent 3052 /* RUNNING state, fail. This is to prevent */
3053 // allocations in an improper driver state 3053 /* allocations in an improper driver state */
3054 spin_lock(&adapter->AdapterLock); 3054 spin_lock(&adapter->AdapterLock);
3055 3055
3056 // Increment the AllocationsPending count while holding 3056 /* Increment the AllocationsPending count while holding */
3057 // the lock. Pause processing relies on this 3057 /* the lock. Pause processing relies on this */
3058 ++adapter->AllocationsPending; 3058 ++adapter->AllocationsPending;
3059 spin_unlock(&adapter->AdapterLock); 3059 spin_unlock(&adapter->AdapterLock);
3060 3060
3061 // At initialization time allocate resources synchronously. 3061 /* At initialization time allocate resources synchronously. */
3062 Buffer = pci_alloc_consistent(adapter->pcidev, Size, &pBuffer); 3062 Buffer = pci_alloc_consistent(adapter->pcidev, Size, &pBuffer);
3063 if (Buffer == NULL) { 3063 if (Buffer == NULL) {
3064 spin_lock(&adapter->AdapterLock); 3064 spin_lock(&adapter->AdapterLock);
3065 // Decrement the AllocationsPending count while holding 3065 /* Decrement the AllocationsPending count while holding */
3066 // the lock. Pause processing relies on this 3066 /* the lock. Pause processing relies on this */
3067 --adapter->AllocationsPending; 3067 --adapter->AllocationsPending;
3068 spin_unlock(&adapter->AdapterLock); 3068 spin_unlock(&adapter->AdapterLock);
3069 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlcMemF1", 3069 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlcMemF1",
@@ -3113,10 +3113,10 @@ static void sxg_allocate_rcvblock_complete(p_adapter_t adapter,
3113 ASSERT((BufferSize == SXG_RCV_DATA_BUFFER_SIZE) || 3113 ASSERT((BufferSize == SXG_RCV_DATA_BUFFER_SIZE) ||
3114 (BufferSize == SXG_RCV_JUMBO_BUFFER_SIZE)); 3114 (BufferSize == SXG_RCV_JUMBO_BUFFER_SIZE));
3115 ASSERT(Length == SXG_RCV_BLOCK_SIZE(BufferSize)); 3115 ASSERT(Length == SXG_RCV_BLOCK_SIZE(BufferSize));
3116 // First, initialize the contained pool of receive data 3116 /* First, initialize the contained pool of receive data */
3117 // buffers. This initialization requires NBL/NB/MDL allocations, 3117 /* buffers. This initialization requires NBL/NB/MDL allocations, */
3118 // If any of them fail, free the block and return without 3118 /* If any of them fail, free the block and return without */
3119 // queueing the shared memory 3119 /* queueing the shared memory */
3120 RcvDataBuffer = RcvBlock; 3120 RcvDataBuffer = RcvBlock;
3121#if 0 3121#if 0
3122 for (i = 0, Paddr = *PhysicalAddress; 3122 for (i = 0, Paddr = *PhysicalAddress;
@@ -3126,14 +3126,14 @@ static void sxg_allocate_rcvblock_complete(p_adapter_t adapter,
3126 for (i = 0, Paddr = PhysicalAddress; 3126 for (i = 0, Paddr = PhysicalAddress;
3127 i < SXG_RCV_DESCRIPTORS_PER_BLOCK; 3127 i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
3128 i++, Paddr += BufferSize, RcvDataBuffer += BufferSize) { 3128 i++, Paddr += BufferSize, RcvDataBuffer += BufferSize) {
3129 // 3129 /* */
3130 RcvDataBufferHdr = 3130 RcvDataBufferHdr =
3131 (PSXG_RCV_DATA_BUFFER_HDR) (RcvDataBuffer + 3131 (PSXG_RCV_DATA_BUFFER_HDR) (RcvDataBuffer +
3132 SXG_RCV_DATA_BUFFER_HDR_OFFSET 3132 SXG_RCV_DATA_BUFFER_HDR_OFFSET
3133 (BufferSize)); 3133 (BufferSize));
3134 RcvDataBufferHdr->VirtualAddress = RcvDataBuffer; 3134 RcvDataBufferHdr->VirtualAddress = RcvDataBuffer;
3135 RcvDataBufferHdr->PhysicalAddress = Paddr; 3135 RcvDataBufferHdr->PhysicalAddress = Paddr;
3136 RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM; // For FREE macro assertion 3136 RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM; /* For FREE macro assertion */
3137 RcvDataBufferHdr->Size = 3137 RcvDataBufferHdr->Size =
3138 SXG_RCV_BUFFER_DATA_SIZE(BufferSize); 3138 SXG_RCV_BUFFER_DATA_SIZE(BufferSize);
3139 3139
@@ -3143,8 +3143,8 @@ static void sxg_allocate_rcvblock_complete(p_adapter_t adapter,
3143 3143
3144 } 3144 }
3145 3145
3146 // Place this entire block of memory on the AllRcvBlocks queue so it can be 3146 /* Place this entire block of memory on the AllRcvBlocks queue so it can be */
3147 // free later 3147 /* free later */
3148 RcvBlockHdr = 3148 RcvBlockHdr =
3149 (PSXG_RCV_BLOCK_HDR) ((unsigned char *)RcvBlock + 3149 (PSXG_RCV_BLOCK_HDR) ((unsigned char *)RcvBlock +
3150 SXG_RCV_BLOCK_HDR_OFFSET(BufferSize)); 3150 SXG_RCV_BLOCK_HDR_OFFSET(BufferSize));
@@ -3155,7 +3155,7 @@ static void sxg_allocate_rcvblock_complete(p_adapter_t adapter,
3155 InsertTailList(&adapter->AllRcvBlocks, &RcvBlockHdr->AllList); 3155 InsertTailList(&adapter->AllRcvBlocks, &RcvBlockHdr->AllList);
3156 spin_unlock(&adapter->RcvQLock); 3156 spin_unlock(&adapter->RcvQLock);
3157 3157
3158 // Now free the contained receive data buffers that we initialized above 3158 /* Now free the contained receive data buffers that we initialized above */
3159 RcvDataBuffer = RcvBlock; 3159 RcvDataBuffer = RcvBlock;
3160 for (i = 0, Paddr = PhysicalAddress; 3160 for (i = 0, Paddr = PhysicalAddress;
3161 i < SXG_RCV_DESCRIPTORS_PER_BLOCK; 3161 i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
@@ -3168,7 +3168,7 @@ static void sxg_allocate_rcvblock_complete(p_adapter_t adapter,
3168 spin_unlock(&adapter->RcvQLock); 3168 spin_unlock(&adapter->RcvQLock);
3169 } 3169 }
3170 3170
3171 // Locate the descriptor block and put it on a separate free queue 3171 /* Locate the descriptor block and put it on a separate free queue */
3172 RcvDescriptorBlock = 3172 RcvDescriptorBlock =
3173 (PSXG_RCV_DESCRIPTOR_BLOCK) ((unsigned char *)RcvBlock + 3173 (PSXG_RCV_DESCRIPTOR_BLOCK) ((unsigned char *)RcvBlock +
3174 SXG_RCV_DESCRIPTOR_BLOCK_OFFSET 3174 SXG_RCV_DESCRIPTOR_BLOCK_OFFSET
@@ -3186,7 +3186,7 @@ static void sxg_allocate_rcvblock_complete(p_adapter_t adapter,
3186 adapter, RcvBlock, Length, 0); 3186 adapter, RcvBlock, Length, 0);
3187 return; 3187 return;
3188 fail: 3188 fail:
3189 // Free any allocated resources 3189 /* Free any allocated resources */
3190 if (RcvBlock) { 3190 if (RcvBlock) {
3191 RcvDataBuffer = RcvBlock; 3191 RcvDataBuffer = RcvBlock;
3192 for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK; 3192 for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
@@ -3230,7 +3230,7 @@ static void sxg_allocate_sgl_buffer_complete(p_adapter_t adapter,
3230 adapter->AllSglBufferCount++; 3230 adapter->AllSglBufferCount++;
3231 memset(SxgSgl, 0, sizeof(SXG_SCATTER_GATHER)); 3231 memset(SxgSgl, 0, sizeof(SXG_SCATTER_GATHER));
3232 SxgSgl->PhysicalAddress = PhysicalAddress; /* *PhysicalAddress; */ 3232 SxgSgl->PhysicalAddress = PhysicalAddress; /* *PhysicalAddress; */
3233 SxgSgl->adapter = adapter; // Initialize backpointer once 3233 SxgSgl->adapter = adapter; /* Initialize backpointer once */
3234 InsertTailList(&adapter->AllSglBuffers, &SxgSgl->AllList); 3234 InsertTailList(&adapter->AllSglBuffers, &SxgSgl->AllList);
3235 spin_unlock(&adapter->SglQLock); 3235 spin_unlock(&adapter->SglQLock);
3236 SxgSgl->State = SXG_BUFFER_BUSY; 3236 SxgSgl->State = SXG_BUFFER_BUSY;
@@ -3244,14 +3244,14 @@ static unsigned char temp_mac_address[6] =
3244 3244
3245static void sxg_adapter_set_hwaddr(p_adapter_t adapter) 3245static void sxg_adapter_set_hwaddr(p_adapter_t adapter)
3246{ 3246{
3247// DBG_ERROR ("%s ENTER card->config_set[%x] port[%d] physport[%d] funct#[%d]\n", __func__, 3247/* DBG_ERROR ("%s ENTER card->config_set[%x] port[%d] physport[%d] funct#[%d]\n", __func__, */
3248// card->config_set, adapter->port, adapter->physport, adapter->functionnumber); 3248/* card->config_set, adapter->port, adapter->physport, adapter->functionnumber); */
3249// 3249/* */
3250// sxg_dbg_macaddrs(adapter); 3250/* sxg_dbg_macaddrs(adapter); */
3251 3251
3252 memcpy(adapter->macaddr, temp_mac_address, sizeof(SXG_CONFIG_MAC)); 3252 memcpy(adapter->macaddr, temp_mac_address, sizeof(SXG_CONFIG_MAC));
3253// DBG_ERROR ("%s AFTER copying from config.macinfo into currmacaddr\n", __func__); 3253/* DBG_ERROR ("%s AFTER copying from config.macinfo into currmacaddr\n", __func__); */
3254// sxg_dbg_macaddrs(adapter); 3254/* sxg_dbg_macaddrs(adapter); */
3255 if (!(adapter->currmacaddr[0] || 3255 if (!(adapter->currmacaddr[0] ||
3256 adapter->currmacaddr[1] || 3256 adapter->currmacaddr[1] ||
3257 adapter->currmacaddr[2] || 3257 adapter->currmacaddr[2] ||
@@ -3262,7 +3262,7 @@ static void sxg_adapter_set_hwaddr(p_adapter_t adapter)
3262 if (adapter->netdev) { 3262 if (adapter->netdev) {
3263 memcpy(adapter->netdev->dev_addr, adapter->currmacaddr, 6); 3263 memcpy(adapter->netdev->dev_addr, adapter->currmacaddr, 6);
3264 } 3264 }
3265// DBG_ERROR ("%s EXIT port %d\n", __func__, adapter->port); 3265/* DBG_ERROR ("%s EXIT port %d\n", __func__, adapter->port); */
3266 sxg_dbg_macaddrs(adapter); 3266 sxg_dbg_macaddrs(adapter);
3267 3267
3268} 3268}
@@ -3321,68 +3321,68 @@ static int sxg_initialize_adapter(p_adapter_t adapter)
3321 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitAdpt", 3321 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitAdpt",
3322 adapter, 0, 0, 0); 3322 adapter, 0, 0, 0);
3323 3323
3324 RssIds = 1; // XXXTODO SXG_RSS_CPU_COUNT(adapter); 3324 RssIds = 1; /* XXXTODO SXG_RSS_CPU_COUNT(adapter); */
3325 IsrCount = adapter->MsiEnabled ? RssIds : 1; 3325 IsrCount = adapter->MsiEnabled ? RssIds : 1;
3326 3326
3327 // Sanity check SXG_UCODE_REGS structure definition to 3327 /* Sanity check SXG_UCODE_REGS structure definition to */
3328 // make sure the length is correct 3328 /* make sure the length is correct */
3329 ASSERT(sizeof(SXG_UCODE_REGS) == SXG_REGISTER_SIZE_PER_CPU); 3329 ASSERT(sizeof(SXG_UCODE_REGS) == SXG_REGISTER_SIZE_PER_CPU);
3330 3330
3331 // Disable interrupts 3331 /* Disable interrupts */
3332 SXG_DISABLE_ALL_INTERRUPTS(adapter); 3332 SXG_DISABLE_ALL_INTERRUPTS(adapter);
3333 3333
3334 // Set MTU 3334 /* Set MTU */
3335 ASSERT((adapter->FrameSize == ETHERMAXFRAME) || 3335 ASSERT((adapter->FrameSize == ETHERMAXFRAME) ||
3336 (adapter->FrameSize == JUMBOMAXFRAME)); 3336 (adapter->FrameSize == JUMBOMAXFRAME));
3337 WRITE_REG(adapter->UcodeRegs[0].LinkMtu, adapter->FrameSize, TRUE); 3337 WRITE_REG(adapter->UcodeRegs[0].LinkMtu, adapter->FrameSize, TRUE);
3338 3338
3339 // Set event ring base address and size 3339 /* Set event ring base address and size */
3340 WRITE_REG64(adapter, 3340 WRITE_REG64(adapter,
3341 adapter->UcodeRegs[0].EventBase, adapter->PEventRings, 0); 3341 adapter->UcodeRegs[0].EventBase, adapter->PEventRings, 0);
3342 WRITE_REG(adapter->UcodeRegs[0].EventSize, EVENT_RING_SIZE, TRUE); 3342 WRITE_REG(adapter->UcodeRegs[0].EventSize, EVENT_RING_SIZE, TRUE);
3343 3343
3344 // Per-ISR initialization 3344 /* Per-ISR initialization */
3345 for (i = 0; i < IsrCount; i++) { 3345 for (i = 0; i < IsrCount; i++) {
3346 u64 Addr; 3346 u64 Addr;
3347 // Set interrupt status pointer 3347 /* Set interrupt status pointer */
3348 Addr = adapter->PIsr + (i * sizeof(u32)); 3348 Addr = adapter->PIsr + (i * sizeof(u32));
3349 WRITE_REG64(adapter, adapter->UcodeRegs[i].Isp, Addr, i); 3349 WRITE_REG64(adapter, adapter->UcodeRegs[i].Isp, Addr, i);
3350 } 3350 }
3351 3351
3352 // XMT ring zero index 3352 /* XMT ring zero index */
3353 WRITE_REG64(adapter, 3353 WRITE_REG64(adapter,
3354 adapter->UcodeRegs[0].SPSendIndex, 3354 adapter->UcodeRegs[0].SPSendIndex,
3355 adapter->PXmtRingZeroIndex, 0); 3355 adapter->PXmtRingZeroIndex, 0);
3356 3356
3357 // Per-RSS initialization 3357 /* Per-RSS initialization */
3358 for (i = 0; i < RssIds; i++) { 3358 for (i = 0; i < RssIds; i++) {
3359 // Release all event ring entries to the Microcode 3359 /* Release all event ring entries to the Microcode */
3360 WRITE_REG(adapter->UcodeRegs[i].EventRelease, EVENT_RING_SIZE, 3360 WRITE_REG(adapter->UcodeRegs[i].EventRelease, EVENT_RING_SIZE,
3361 TRUE); 3361 TRUE);
3362 } 3362 }
3363 3363
3364 // Transmit ring base and size 3364 /* Transmit ring base and size */
3365 WRITE_REG64(adapter, 3365 WRITE_REG64(adapter,
3366 adapter->UcodeRegs[0].XmtBase, adapter->PXmtRings, 0); 3366 adapter->UcodeRegs[0].XmtBase, adapter->PXmtRings, 0);
3367 WRITE_REG(adapter->UcodeRegs[0].XmtSize, SXG_XMT_RING_SIZE, TRUE); 3367 WRITE_REG(adapter->UcodeRegs[0].XmtSize, SXG_XMT_RING_SIZE, TRUE);
3368 3368
3369 // Receive ring base and size 3369 /* Receive ring base and size */
3370 WRITE_REG64(adapter, 3370 WRITE_REG64(adapter,
3371 adapter->UcodeRegs[0].RcvBase, adapter->PRcvRings, 0); 3371 adapter->UcodeRegs[0].RcvBase, adapter->PRcvRings, 0);
3372 WRITE_REG(adapter->UcodeRegs[0].RcvSize, SXG_RCV_RING_SIZE, TRUE); 3372 WRITE_REG(adapter->UcodeRegs[0].RcvSize, SXG_RCV_RING_SIZE, TRUE);
3373 3373
3374 // Populate the card with receive buffers 3374 /* Populate the card with receive buffers */
3375 sxg_stock_rcv_buffers(adapter); 3375 sxg_stock_rcv_buffers(adapter);
3376 3376
3377 // Initialize checksum offload capabilities. At the moment 3377 /* Initialize checksum offload capabilities. At the moment */
3378 // we always enable IP and TCP receive checksums on the card. 3378 /* we always enable IP and TCP receive checksums on the card. */
3379 // Depending on the checksum configuration specified by the 3379 /* Depending on the checksum configuration specified by the */
3380 // user, we can choose to report or ignore the checksum 3380 /* user, we can choose to report or ignore the checksum */
3381 // information provided by the card. 3381 /* information provided by the card. */
3382 WRITE_REG(adapter->UcodeRegs[0].ReceiveChecksum, 3382 WRITE_REG(adapter->UcodeRegs[0].ReceiveChecksum,
3383 SXG_RCV_TCP_CSUM_ENABLED | SXG_RCV_IP_CSUM_ENABLED, TRUE); 3383 SXG_RCV_TCP_CSUM_ENABLED | SXG_RCV_IP_CSUM_ENABLED, TRUE);
3384 3384
3385 // Initialize the MAC, XAUI 3385 /* Initialize the MAC, XAUI */
3386 DBG_ERROR("sxg: %s ENTER sxg_initialize_link\n", __func__); 3386 DBG_ERROR("sxg: %s ENTER sxg_initialize_link\n", __func__);
3387 status = sxg_initialize_link(adapter); 3387 status = sxg_initialize_link(adapter);
3388 DBG_ERROR("sxg: %s EXIT sxg_initialize_link status[%x]\n", __func__, 3388 DBG_ERROR("sxg: %s EXIT sxg_initialize_link status[%x]\n", __func__,
@@ -3390,8 +3390,8 @@ static int sxg_initialize_adapter(p_adapter_t adapter)
3390 if (status != STATUS_SUCCESS) { 3390 if (status != STATUS_SUCCESS) {
3391 return (status); 3391 return (status);
3392 } 3392 }
3393 // Initialize Dead to FALSE. 3393 /* Initialize Dead to FALSE. */
3394 // SlicCheckForHang or SlicDumpThread will take it from here. 3394 /* SlicCheckForHang or SlicDumpThread will take it from here. */
3395 adapter->Dead = FALSE; 3395 adapter->Dead = FALSE;
3396 adapter->PingOutstanding = FALSE; 3396 adapter->PingOutstanding = FALSE;
3397 3397
@@ -3428,14 +3428,14 @@ static int sxg_fill_descriptor_block(p_adapter_t adapter,
3428 3428
3429 ASSERT(RcvDescriptorBlockHdr); 3429 ASSERT(RcvDescriptorBlockHdr);
3430 3430
3431 // If we don't have the resources to fill the descriptor block, 3431 /* If we don't have the resources to fill the descriptor block, */
3432 // return failure 3432 /* return failure */
3433 if ((adapter->FreeRcvBufferCount < SXG_RCV_DESCRIPTORS_PER_BLOCK) || 3433 if ((adapter->FreeRcvBufferCount < SXG_RCV_DESCRIPTORS_PER_BLOCK) ||
3434 SXG_RING_FULL(RcvRingInfo)) { 3434 SXG_RING_FULL(RcvRingInfo)) {
3435 adapter->Stats.NoMem++; 3435 adapter->Stats.NoMem++;
3436 return (STATUS_FAILURE); 3436 return (STATUS_FAILURE);
3437 } 3437 }
3438 // Get a ring descriptor command 3438 /* Get a ring descriptor command */
3439 SXG_GET_CMD(RingZero, 3439 SXG_GET_CMD(RingZero,
3440 RcvRingInfo, RingDescriptorCmd, RcvDescriptorBlockHdr); 3440 RcvRingInfo, RingDescriptorCmd, RcvDescriptorBlockHdr);
3441 ASSERT(RingDescriptorCmd); 3441 ASSERT(RingDescriptorCmd);
@@ -3443,7 +3443,7 @@ static int sxg_fill_descriptor_block(p_adapter_t adapter,
3443 RcvDescriptorBlock = 3443 RcvDescriptorBlock =
3444 (PSXG_RCV_DESCRIPTOR_BLOCK) RcvDescriptorBlockHdr->VirtualAddress; 3444 (PSXG_RCV_DESCRIPTOR_BLOCK) RcvDescriptorBlockHdr->VirtualAddress;
3445 3445
3446 // Fill in the descriptor block 3446 /* Fill in the descriptor block */
3447 for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK; i++) { 3447 for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK; i++) {
3448 SXG_GET_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr); 3448 SXG_GET_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
3449 ASSERT(RcvDataBufferHdr); 3449 ASSERT(RcvDataBufferHdr);
@@ -3454,13 +3454,13 @@ static int sxg_fill_descriptor_block(p_adapter_t adapter,
3454 RcvDescriptorBlock->Descriptors[i].PhysicalAddress = 3454 RcvDescriptorBlock->Descriptors[i].PhysicalAddress =
3455 RcvDataBufferHdr->PhysicalAddress; 3455 RcvDataBufferHdr->PhysicalAddress;
3456 } 3456 }
3457 // Add the descriptor block to receive descriptor ring 0 3457 /* Add the descriptor block to receive descriptor ring 0 */
3458 RingDescriptorCmd->Sgl = RcvDescriptorBlockHdr->PhysicalAddress; 3458 RingDescriptorCmd->Sgl = RcvDescriptorBlockHdr->PhysicalAddress;
3459 3459
3460 // RcvBuffersOnCard is not protected via the receive lock (see 3460 /* RcvBuffersOnCard is not protected via the receive lock (see */
3461 // sxg_process_event_queue) We don't want to grap a lock every time a 3461 /* sxg_process_event_queue) We don't want to grap a lock every time a */
3462 // buffer is returned to us, so we use atomic interlocked functions 3462 /* buffer is returned to us, so we use atomic interlocked functions */
3463 // instead. 3463 /* instead. */
3464 adapter->RcvBuffersOnCard += SXG_RCV_DESCRIPTORS_PER_BLOCK; 3464 adapter->RcvBuffersOnCard += SXG_RCV_DESCRIPTORS_PER_BLOCK;
3465 3465
3466 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DscBlk", 3466 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DscBlk",
@@ -3490,10 +3490,10 @@ static void sxg_stock_rcv_buffers(p_adapter_t adapter)
3490 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "StockBuf", 3490 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "StockBuf",
3491 adapter, adapter->RcvBuffersOnCard, 3491 adapter, adapter->RcvBuffersOnCard,
3492 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount); 3492 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
3493 // First, see if we've got less than our minimum threshold of 3493 /* First, see if we've got less than our minimum threshold of */
3494 // receive buffers, there isn't an allocation in progress, and 3494 /* receive buffers, there isn't an allocation in progress, and */
3495 // we haven't exceeded our maximum.. get another block of buffers 3495 /* we haven't exceeded our maximum.. get another block of buffers */
3496 // None of this needs to be SMP safe. It's round numbers. 3496 /* None of this needs to be SMP safe. It's round numbers. */
3497 if ((adapter->FreeRcvBufferCount < SXG_MIN_RCV_DATA_BUFFERS) && 3497 if ((adapter->FreeRcvBufferCount < SXG_MIN_RCV_DATA_BUFFERS) &&
3498 (adapter->AllRcvBlockCount < SXG_MAX_RCV_BLOCKS) && 3498 (adapter->AllRcvBlockCount < SXG_MAX_RCV_BLOCKS) &&
3499 (adapter->AllocationsPending == 0)) { 3499 (adapter->AllocationsPending == 0)) {
@@ -3502,12 +3502,12 @@ static void sxg_stock_rcv_buffers(p_adapter_t adapter)
3502 ReceiveBufferSize), 3502 ReceiveBufferSize),
3503 SXG_BUFFER_TYPE_RCV); 3503 SXG_BUFFER_TYPE_RCV);
3504 } 3504 }
3505 // Now grab the RcvQLock lock and proceed 3505 /* Now grab the RcvQLock lock and proceed */
3506 spin_lock(&adapter->RcvQLock); 3506 spin_lock(&adapter->RcvQLock);
3507 while (adapter->RcvBuffersOnCard < SXG_RCV_DATA_BUFFERS) { 3507 while (adapter->RcvBuffersOnCard < SXG_RCV_DATA_BUFFERS) {
3508 PLIST_ENTRY _ple; 3508 PLIST_ENTRY _ple;
3509 3509
3510 // Get a descriptor block 3510 /* Get a descriptor block */
3511 RcvDescriptorBlockHdr = NULL; 3511 RcvDescriptorBlockHdr = NULL;
3512 if (adapter->FreeRcvBlockCount) { 3512 if (adapter->FreeRcvBlockCount) {
3513 _ple = RemoveHeadList(&adapter->FreeRcvBlocks); 3513 _ple = RemoveHeadList(&adapter->FreeRcvBlocks);
@@ -3519,14 +3519,14 @@ static void sxg_stock_rcv_buffers(p_adapter_t adapter)
3519 } 3519 }
3520 3520
3521 if (RcvDescriptorBlockHdr == NULL) { 3521 if (RcvDescriptorBlockHdr == NULL) {
3522 // Bail out.. 3522 /* Bail out.. */
3523 adapter->Stats.NoMem++; 3523 adapter->Stats.NoMem++;
3524 break; 3524 break;
3525 } 3525 }
3526 // Fill in the descriptor block and give it to the card 3526 /* Fill in the descriptor block and give it to the card */
3527 if (sxg_fill_descriptor_block(adapter, RcvDescriptorBlockHdr) == 3527 if (sxg_fill_descriptor_block(adapter, RcvDescriptorBlockHdr) ==
3528 STATUS_FAILURE) { 3528 STATUS_FAILURE) {
3529 // Free the descriptor block 3529 /* Free the descriptor block */
3530 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter, 3530 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter,
3531 RcvDescriptorBlockHdr); 3531 RcvDescriptorBlockHdr);
3532 break; 3532 break;
@@ -3560,15 +3560,15 @@ static void sxg_complete_descriptor_blocks(p_adapter_t adapter,
3560 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpRBlks", 3560 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpRBlks",
3561 adapter, Index, RcvRingInfo->Head, RcvRingInfo->Tail); 3561 adapter, Index, RcvRingInfo->Head, RcvRingInfo->Tail);
3562 3562
3563 // Now grab the RcvQLock lock and proceed 3563 /* Now grab the RcvQLock lock and proceed */
3564 spin_lock(&adapter->RcvQLock); 3564 spin_lock(&adapter->RcvQLock);
3565 ASSERT(Index != RcvRingInfo->Tail); 3565 ASSERT(Index != RcvRingInfo->Tail);
3566 while (RcvRingInfo->Tail != Index) { 3566 while (RcvRingInfo->Tail != Index) {
3567 // 3567 /* */
3568 // Locate the current Cmd (ring descriptor entry), and 3568 /* Locate the current Cmd (ring descriptor entry), and */
3569 // associated receive descriptor block, and advance 3569 /* associated receive descriptor block, and advance */
3570 // the tail 3570 /* the tail */
3571 // 3571 /* */
3572 SXG_RETURN_CMD(RingZero, 3572 SXG_RETURN_CMD(RingZero,
3573 RcvRingInfo, 3573 RcvRingInfo,
3574 RingDescriptorCmd, RcvDescriptorBlockHdr); 3574 RingDescriptorCmd, RcvDescriptorBlockHdr);
@@ -3576,12 +3576,12 @@ static void sxg_complete_descriptor_blocks(p_adapter_t adapter,
3576 RcvRingInfo->Head, RcvRingInfo->Tail, 3576 RcvRingInfo->Head, RcvRingInfo->Tail,
3577 RingDescriptorCmd, RcvDescriptorBlockHdr); 3577 RingDescriptorCmd, RcvDescriptorBlockHdr);
3578 3578
3579 // Clear the SGL field 3579 /* Clear the SGL field */
3580 RingDescriptorCmd->Sgl = 0; 3580 RingDescriptorCmd->Sgl = 0;
3581 // Attempt to refill it and hand it right back to the 3581 /* Attempt to refill it and hand it right back to the */
3582 // card. If we fail to refill it, free the descriptor block 3582 /* card. If we fail to refill it, free the descriptor block */
3583 // header. The card will be restocked later via the 3583 /* header. The card will be restocked later via the */
3584 // RcvBuffersOnCard test 3584 /* RcvBuffersOnCard test */
3585 if (sxg_fill_descriptor_block(adapter, RcvDescriptorBlockHdr) == 3585 if (sxg_fill_descriptor_block(adapter, RcvDescriptorBlockHdr) ==
3586 STATUS_FAILURE) { 3586 STATUS_FAILURE) {
3587 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter, 3587 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter,
diff --git a/drivers/staging/sxg/sxghw.h b/drivers/staging/sxg/sxghw.h
index 870eef3f9d50..2222ae91fd97 100644
--- a/drivers/staging/sxg/sxghw.h
+++ b/drivers/staging/sxg/sxghw.h
@@ -13,11 +13,11 @@
13/******************************************************************************* 13/*******************************************************************************
14 * Configuration space 14 * Configuration space
15 *******************************************************************************/ 15 *******************************************************************************/
16// PCI Vendor ID 16/* PCI Vendor ID */
17#define SXG_VENDOR_ID 0x139A // Alacritech's Vendor ID 17#define SXG_VENDOR_ID 0x139A /* Alacritech's Vendor ID */
18 18
19// PCI Device ID 19// PCI Device ID
20#define SXG_DEVICE_ID 0x0009 // Sahara Device ID 20#define SXG_DEVICE_ID 0x0009 /* Sahara Device ID */
21 21
22// 22//
23// Subsystem IDs. 23// Subsystem IDs.
@@ -623,48 +623,48 @@ typedef struct _RCV_BUF_HDR {
623 * Queue definitions 623 * Queue definitions
624 *****************************************************************************/ 624 *****************************************************************************/
625 625
626// Ingress (read only) queue numbers 626/* Ingress (read only) queue numbers */
627#define PXY_BUF_Q 0 // Proxy Buffer Queue 627#define PXY_BUF_Q 0 /* Proxy Buffer Queue */
628#define HST_EVT_Q 1 // Host Event Queue 628#define HST_EVT_Q 1 /* Host Event Queue */
629#define XMT_BUF_Q 2 // Transmit Buffer Queue 629#define XMT_BUF_Q 2 /* Transmit Buffer Queue */
630#define SKT_EVL_Q 3 // RcvSqr Socket Event Low Priority Queue 630#define SKT_EVL_Q 3 /* RcvSqr Socket Event Low Priority Queue */
631#define RCV_EVL_Q 4 // RcvSqr Rcv Event Low Priority Queue 631#define RCV_EVL_Q 4 /* RcvSqr Rcv Event Low Priority Queue */
632#define SKT_EVH_Q 5 // RcvSqr Socket Event High Priority Queue 632#define SKT_EVH_Q 5 /* RcvSqr Socket Event High Priority Queue */
633#define RCV_EVH_Q 6 // RcvSqr Rcv Event High Priority Queue 633#define RCV_EVH_Q 6 /* RcvSqr Rcv Event High Priority Queue */
634#define DMA_RSP_Q 7 // Dma Response Queue - one per CPU context 634#define DMA_RSP_Q 7 /* Dma Response Queue - one per CPU context */
635// Local (read/write) queue numbers 635/* Local (read/write) queue numbers */
636#define LOCAL_A_Q 8 // Spare local Queue 636#define LOCAL_A_Q 8 /* Spare local Queue */
637#define LOCAL_B_Q 9 // Spare local Queue 637#define LOCAL_B_Q 9 /* Spare local Queue */
638#define LOCAL_C_Q 10 // Spare local Queue 638#define LOCAL_C_Q 10 /* Spare local Queue */
639#define FSM_EVT_Q 11 // Finite-State-Machine Event Queue 639#define FSM_EVT_Q 11 /* Finite-State-Machine Event Queue */
640#define SBF_PAL_Q 12 // System Buffer Physical Address (low) Queue 640#define SBF_PAL_Q 12 /* System Buffer Physical Address (low) Queue */
641#define SBF_PAH_Q 13 // System Buffer Physical Address (high) Queue 641#define SBF_PAH_Q 13 /* System Buffer Physical Address (high) Queue */
642#define SBF_VAL_Q 14 // System Buffer Virtual Address (low) Queue 642#define SBF_VAL_Q 14 /* System Buffer Virtual Address (low) Queue */
643#define SBF_VAH_Q 15 // System Buffer Virtual Address (high) Queue 643#define SBF_VAH_Q 15 /* System Buffer Virtual Address (high) Queue */
644// Egress (write only) queue numbers 644/* Egress (write only) queue numbers */
645#define H2G_CMD_Q 16 // Host to GlbRam DMA Command Queue 645#define H2G_CMD_Q 16 /* Host to GlbRam DMA Command Queue */
646#define H2D_CMD_Q 17 // Host to DRAM DMA Command Queue 646#define H2D_CMD_Q 17 /* Host to DRAM DMA Command Queue */
647#define G2H_CMD_Q 18 // GlbRam to Host DMA Command Queue 647#define G2H_CMD_Q 18 /* GlbRam to Host DMA Command Queue */
648#define G2D_CMD_Q 19 // GlbRam to DRAM DMA Command Queue 648#define G2D_CMD_Q 19 /* GlbRam to DRAM DMA Command Queue */
649#define D2H_CMD_Q 20 // DRAM to Host DMA Command Queue 649#define D2H_CMD_Q 20 /* DRAM to Host DMA Command Queue */
650#define D2G_CMD_Q 21 // DRAM to GlbRam DMA Command Queue 650#define D2G_CMD_Q 21 /* DRAM to GlbRam DMA Command Queue */
651#define D2D_CMD_Q 22 // DRAM to DRAM DMA Command Queue 651#define D2D_CMD_Q 22 /* DRAM to DRAM DMA Command Queue */
652#define PXL_CMD_Q 23 // Low Priority Proxy Command Queue 652#define PXL_CMD_Q 23 /* Low Priority Proxy Command Queue */
653#define PXH_CMD_Q 24 // High Priority Proxy Command Queue 653#define PXH_CMD_Q 24 /* High Priority Proxy Command Queue */
654#define RSQ_CMD_Q 25 // Receive Sequencer Command Queue 654#define RSQ_CMD_Q 25 /* Receive Sequencer Command Queue */
655#define RCV_BUF_Q 26 // Receive Buffer Queue 655#define RCV_BUF_Q 26 /* Receive Buffer Queue */
656 656
657// Bit definitions for the Proxy Command queues (PXL_CMD_Q and PXH_CMD_Q) 657/* Bit definitions for the Proxy Command queues (PXL_CMD_Q and PXH_CMD_Q) */
658#define PXY_COPY_EN 0x00200000 // enable copy of xmt descriptor to xmt command queue 658#define PXY_COPY_EN 0x00200000 /* enable copy of xmt descriptor to xmt command queue */
659#define PXY_SIZE_16 0x00000000 // copy 16 bytes 659#define PXY_SIZE_16 0x00000000 /* copy 16 bytes */
660#define PXY_SIZE_32 0x00100000 // copy 32 bytes 660#define PXY_SIZE_32 0x00100000 /* copy 32 bytes */
661 661
662/***************************************************************************** 662/*****************************************************************************
663 * SXG EEPROM/Flash Configuration Definitions 663 * SXG EEPROM/Flash Configuration Definitions
664 *****************************************************************************/ 664 *****************************************************************************/
665#pragma pack(push, 1) 665#pragma pack(push, 1)
666 666
667// 667/* */
668typedef struct _HW_CFG_DATA { 668typedef struct _HW_CFG_DATA {
669 ushort Addr; 669 ushort Addr;
670 union { 670 union {
@@ -673,22 +673,22 @@ typedef struct _HW_CFG_DATA {
673 }; 673 };
674} HW_CFG_DATA, *PHW_CFG_DATA; 674} HW_CFG_DATA, *PHW_CFG_DATA;
675 675
676// 676/* */
677#define NUM_HW_CFG_ENTRIES ((128/sizeof(HW_CFG_DATA)) - 4) 677#define NUM_HW_CFG_ENTRIES ((128/sizeof(HW_CFG_DATA)) - 4)
678 678
679// MAC address 679/* MAC address */
680typedef struct _SXG_CONFIG_MAC { 680typedef struct _SXG_CONFIG_MAC {
681 unsigned char MacAddr[6]; // MAC Address 681 unsigned char MacAddr[6]; /* MAC Address */
682} SXG_CONFIG_MAC, *PSXG_CONFIG_MAC; 682} SXG_CONFIG_MAC, *PSXG_CONFIG_MAC;
683 683
684// 684/* */
685typedef struct _ATK_FRU { 685typedef struct _ATK_FRU {
686 unsigned char PartNum[6]; 686 unsigned char PartNum[6];
687 unsigned char Revision[2]; 687 unsigned char Revision[2];
688 unsigned char Serial[14]; 688 unsigned char Serial[14];
689} ATK_FRU, *PATK_FRU; 689} ATK_FRU, *PATK_FRU;
690 690
691// OEM FRU Format types 691/* OEM FRU Format types */
692#define ATK_FRU_FORMAT 0x0000 692#define ATK_FRU_FORMAT 0x0000
693#define CPQ_FRU_FORMAT 0x0001 693#define CPQ_FRU_FORMAT 0x0001
694#define DELL_FRU_FORMAT 0x0002 694#define DELL_FRU_FORMAT 0x0002
@@ -697,24 +697,24 @@ typedef struct _ATK_FRU {
697#define EMC_FRU_FORMAT 0x0005 697#define EMC_FRU_FORMAT 0x0005
698#define NO_FRU_FORMAT 0xFFFF 698#define NO_FRU_FORMAT 0xFFFF
699 699
700// EEPROM/Flash Format 700/* EEPROM/Flash Format */
701typedef struct _SXG_CONFIG { 701typedef struct _SXG_CONFIG {
702 // 702 /* */
703 // Section 1 (128 bytes) 703 /* Section 1 (128 bytes) */
704 // 704 /* */
705 ushort MagicWord; // EEPROM/FLASH Magic code 'A5A5' 705 ushort MagicWord; /* EEPROM/FLASH Magic code 'A5A5' */
706 ushort SpiClks; // SPI bus clock dividers 706 ushort SpiClks; /* SPI bus clock dividers */
707 HW_CFG_DATA HwCfg[NUM_HW_CFG_ENTRIES]; 707 HW_CFG_DATA HwCfg[NUM_HW_CFG_ENTRIES];
708 // 708 /* */
709 // 709 /* */
710 // 710 /* */
711 ushort Version; // EEPROM format version 711 ushort Version; /* EEPROM format version */
712 SXG_CONFIG_MAC MacAddr[4]; // space for 4 MAC addresses 712 SXG_CONFIG_MAC MacAddr[4]; /* space for 4 MAC addresses */
713 ATK_FRU AtkFru; // FRU information 713 ATK_FRU AtkFru; /* FRU information */
714 ushort OemFruFormat; // OEM FRU format type 714 ushort OemFruFormat; /* OEM FRU format type */
715 unsigned char OemFru[76]; // OEM FRU information (optional) 715 unsigned char OemFru[76]; /* OEM FRU information (optional) */
716 ushort Checksum; // Checksum of section 2 716 ushort Checksum; /* Checksum of section 2 */
717 // CS info XXXTODO 717 /* CS info XXXTODO */
718} SXG_CONFIG, *PSXG_CONFIG; 718} SXG_CONFIG, *PSXG_CONFIG;
719#pragma pack(pop) 719#pragma pack(pop)
720 720