aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/vxge/vxge-traffic.c
diff options
context:
space:
mode:
authorJon Mason <jon.mason@exar.com>2010-12-10 09:02:56 -0500
committerDavid S. Miller <davem@davemloft.net>2010-12-10 19:08:21 -0500
commit528f727279ae840db8a06c94f5e82cdaeb00da6f (patch)
treeab2cd139152c9bc7809298d046b77783472f5c3d /drivers/net/vxge/vxge-traffic.c
parentdeef4b522b814593407cfd56216840c2b75e9f15 (diff)
vxge: code cleanup and reorganization
Move function locations to remove the need for internal declarations and other misc clean-ups. Signed-off-by: Jon Mason <jon.mason@exar.com> Signed-off-by: Arpit Patel <arpit.patel@exar.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/vxge/vxge-traffic.c')
-rw-r--r--drivers/net/vxge/vxge-traffic.c773
1 files changed, 380 insertions, 393 deletions
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c
index 4bdb611a6842..42cc29843ac7 100644
--- a/drivers/net/vxge/vxge-traffic.c
+++ b/drivers/net/vxge/vxge-traffic.c
@@ -17,13 +17,6 @@
17#include "vxge-config.h" 17#include "vxge-config.h"
18#include "vxge-main.h" 18#include "vxge-main.h"
19 19
20static enum vxge_hw_status
21__vxge_hw_device_handle_error(struct __vxge_hw_device *hldev,
22 u32 vp_id, enum vxge_hw_event type);
23static enum vxge_hw_status
24__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
25 u32 skip_alarms);
26
27/* 20/*
28 * vxge_hw_vpath_intr_enable - Enable vpath interrupts. 21 * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
29 * @vp: Virtual Path handle. 22 * @vp: Virtual Path handle.
@@ -419,6 +412,384 @@ void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
419} 412}
420 413
421/** 414/**
415 * __vxge_hw_device_handle_error - Handle error
416 * @hldev: HW device
417 * @vp_id: Vpath Id
418 * @type: Error type. Please see enum vxge_hw_event{}
419 *
420 * Handle error.
421 */
422static enum vxge_hw_status
423__vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
424 enum vxge_hw_event type)
425{
426 switch (type) {
427 case VXGE_HW_EVENT_UNKNOWN:
428 break;
429 case VXGE_HW_EVENT_RESET_START:
430 case VXGE_HW_EVENT_RESET_COMPLETE:
431 case VXGE_HW_EVENT_LINK_DOWN:
432 case VXGE_HW_EVENT_LINK_UP:
433 goto out;
434 case VXGE_HW_EVENT_ALARM_CLEARED:
435 goto out;
436 case VXGE_HW_EVENT_ECCERR:
437 case VXGE_HW_EVENT_MRPCIM_ECCERR:
438 goto out;
439 case VXGE_HW_EVENT_FIFO_ERR:
440 case VXGE_HW_EVENT_VPATH_ERR:
441 case VXGE_HW_EVENT_CRITICAL_ERR:
442 case VXGE_HW_EVENT_SERR:
443 break;
444 case VXGE_HW_EVENT_SRPCIM_SERR:
445 case VXGE_HW_EVENT_MRPCIM_SERR:
446 goto out;
447 case VXGE_HW_EVENT_SLOT_FREEZE:
448 break;
449 default:
450 vxge_assert(0);
451 goto out;
452 }
453
454 /* notify driver */
455 if (hldev->uld_callbacks.crit_err)
456 hldev->uld_callbacks.crit_err(
457 (struct __vxge_hw_device *)hldev,
458 type, vp_id);
459out:
460
461 return VXGE_HW_OK;
462}
463
464/*
465 * __vxge_hw_device_handle_link_down_ind
466 * @hldev: HW device handle.
467 *
468 * Link down indication handler. The function is invoked by HW when
469 * Titan indicates that the link is down.
470 */
471static enum vxge_hw_status
472__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
473{
474 /*
475 * If the previous link state is not down, return.
476 */
477 if (hldev->link_state == VXGE_HW_LINK_DOWN)
478 goto exit;
479
480 hldev->link_state = VXGE_HW_LINK_DOWN;
481
482 /* notify driver */
483 if (hldev->uld_callbacks.link_down)
484 hldev->uld_callbacks.link_down(hldev);
485exit:
486 return VXGE_HW_OK;
487}
488
489/*
490 * __vxge_hw_device_handle_link_up_ind
491 * @hldev: HW device handle.
492 *
493 * Link up indication handler. The function is invoked by HW when
494 * Titan indicates that the link is up for programmable amount of time.
495 */
496static enum vxge_hw_status
497__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
498{
499 /*
500 * If the previous link state is not down, return.
501 */
502 if (hldev->link_state == VXGE_HW_LINK_UP)
503 goto exit;
504
505 hldev->link_state = VXGE_HW_LINK_UP;
506
507 /* notify driver */
508 if (hldev->uld_callbacks.link_up)
509 hldev->uld_callbacks.link_up(hldev);
510exit:
511 return VXGE_HW_OK;
512}
513
514/*
515 * __vxge_hw_vpath_alarm_process - Process Alarms.
516 * @vpath: Virtual Path.
517 * @skip_alarms: Do not clear the alarms
518 *
519 * Process vpath alarms.
520 *
521 */
522static enum vxge_hw_status
523__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
524 u32 skip_alarms)
525{
526 u64 val64;
527 u64 alarm_status;
528 u64 pic_status;
529 struct __vxge_hw_device *hldev = NULL;
530 enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
531 u64 mask64;
532 struct vxge_hw_vpath_stats_sw_info *sw_stats;
533 struct vxge_hw_vpath_reg __iomem *vp_reg;
534
535 if (vpath == NULL) {
536 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
537 alarm_event);
538 goto out2;
539 }
540
541 hldev = vpath->hldev;
542 vp_reg = vpath->vp_reg;
543 alarm_status = readq(&vp_reg->vpath_general_int_status);
544
545 if (alarm_status == VXGE_HW_ALL_FOXES) {
546 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
547 alarm_event);
548 goto out;
549 }
550
551 sw_stats = vpath->sw_stats;
552
553 if (alarm_status & ~(
554 VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
555 VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
556 VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
557 VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
558 sw_stats->error_stats.unknown_alarms++;
559
560 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
561 alarm_event);
562 goto out;
563 }
564
565 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
566
567 val64 = readq(&vp_reg->xgmac_vp_int_status);
568
569 if (val64 &
570 VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
571
572 val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
573
574 if (((val64 &
575 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
576 (!(val64 &
577 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
578 ((val64 &
579 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
580 (!(val64 &
581 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
582 ))) {
583 sw_stats->error_stats.network_sustained_fault++;
584
585 writeq(
586 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
587 &vp_reg->asic_ntwk_vp_err_mask);
588
589 __vxge_hw_device_handle_link_down_ind(hldev);
590 alarm_event = VXGE_HW_SET_LEVEL(
591 VXGE_HW_EVENT_LINK_DOWN, alarm_event);
592 }
593
594 if (((val64 &
595 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
596 (!(val64 &
597 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
598 ((val64 &
599 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
600 (!(val64 &
601 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
602 ))) {
603
604 sw_stats->error_stats.network_sustained_ok++;
605
606 writeq(
607 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
608 &vp_reg->asic_ntwk_vp_err_mask);
609
610 __vxge_hw_device_handle_link_up_ind(hldev);
611 alarm_event = VXGE_HW_SET_LEVEL(
612 VXGE_HW_EVENT_LINK_UP, alarm_event);
613 }
614
615 writeq(VXGE_HW_INTR_MASK_ALL,
616 &vp_reg->asic_ntwk_vp_err_reg);
617
618 alarm_event = VXGE_HW_SET_LEVEL(
619 VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
620
621 if (skip_alarms)
622 return VXGE_HW_OK;
623 }
624 }
625
626 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
627
628 pic_status = readq(&vp_reg->vpath_ppif_int_status);
629
630 if (pic_status &
631 VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
632
633 val64 = readq(&vp_reg->general_errors_reg);
634 mask64 = readq(&vp_reg->general_errors_mask);
635
636 if ((val64 &
637 VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
638 ~mask64) {
639 sw_stats->error_stats.ini_serr_det++;
640
641 alarm_event = VXGE_HW_SET_LEVEL(
642 VXGE_HW_EVENT_SERR, alarm_event);
643 }
644
645 if ((val64 &
646 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
647 ~mask64) {
648 sw_stats->error_stats.dblgen_fifo0_overflow++;
649
650 alarm_event = VXGE_HW_SET_LEVEL(
651 VXGE_HW_EVENT_FIFO_ERR, alarm_event);
652 }
653
654 if ((val64 &
655 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
656 ~mask64)
657 sw_stats->error_stats.statsb_pif_chain_error++;
658
659 if ((val64 &
660 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
661 ~mask64)
662 sw_stats->error_stats.statsb_drop_timeout++;
663
664 if ((val64 &
665 VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
666 ~mask64)
667 sw_stats->error_stats.target_illegal_access++;
668
669 if (!skip_alarms) {
670 writeq(VXGE_HW_INTR_MASK_ALL,
671 &vp_reg->general_errors_reg);
672 alarm_event = VXGE_HW_SET_LEVEL(
673 VXGE_HW_EVENT_ALARM_CLEARED,
674 alarm_event);
675 }
676 }
677
678 if (pic_status &
679 VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
680
681 val64 = readq(&vp_reg->kdfcctl_errors_reg);
682 mask64 = readq(&vp_reg->kdfcctl_errors_mask);
683
684 if ((val64 &
685 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
686 ~mask64) {
687 sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
688
689 alarm_event = VXGE_HW_SET_LEVEL(
690 VXGE_HW_EVENT_FIFO_ERR,
691 alarm_event);
692 }
693
694 if ((val64 &
695 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
696 ~mask64) {
697 sw_stats->error_stats.kdfcctl_fifo0_poison++;
698
699 alarm_event = VXGE_HW_SET_LEVEL(
700 VXGE_HW_EVENT_FIFO_ERR,
701 alarm_event);
702 }
703
704 if ((val64 &
705 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
706 ~mask64) {
707 sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
708
709 alarm_event = VXGE_HW_SET_LEVEL(
710 VXGE_HW_EVENT_FIFO_ERR,
711 alarm_event);
712 }
713
714 if (!skip_alarms) {
715 writeq(VXGE_HW_INTR_MASK_ALL,
716 &vp_reg->kdfcctl_errors_reg);
717 alarm_event = VXGE_HW_SET_LEVEL(
718 VXGE_HW_EVENT_ALARM_CLEARED,
719 alarm_event);
720 }
721 }
722
723 }
724
725 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
726
727 val64 = readq(&vp_reg->wrdma_alarm_status);
728
729 if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
730
731 val64 = readq(&vp_reg->prc_alarm_reg);
732 mask64 = readq(&vp_reg->prc_alarm_mask);
733
734 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
735 ~mask64)
736 sw_stats->error_stats.prc_ring_bumps++;
737
738 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
739 ~mask64) {
740 sw_stats->error_stats.prc_rxdcm_sc_err++;
741
742 alarm_event = VXGE_HW_SET_LEVEL(
743 VXGE_HW_EVENT_VPATH_ERR,
744 alarm_event);
745 }
746
747 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
748 & ~mask64) {
749 sw_stats->error_stats.prc_rxdcm_sc_abort++;
750
751 alarm_event = VXGE_HW_SET_LEVEL(
752 VXGE_HW_EVENT_VPATH_ERR,
753 alarm_event);
754 }
755
756 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
757 & ~mask64) {
758 sw_stats->error_stats.prc_quanta_size_err++;
759
760 alarm_event = VXGE_HW_SET_LEVEL(
761 VXGE_HW_EVENT_VPATH_ERR,
762 alarm_event);
763 }
764
765 if (!skip_alarms) {
766 writeq(VXGE_HW_INTR_MASK_ALL,
767 &vp_reg->prc_alarm_reg);
768 alarm_event = VXGE_HW_SET_LEVEL(
769 VXGE_HW_EVENT_ALARM_CLEARED,
770 alarm_event);
771 }
772 }
773 }
774out:
775 hldev->stats.sw_dev_err_stats.vpath_alarms++;
776out2:
777 if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
778 (alarm_event == VXGE_HW_EVENT_UNKNOWN))
779 return VXGE_HW_OK;
780
781 __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
782
783 if (alarm_event == VXGE_HW_EVENT_SERR)
784 return VXGE_HW_ERR_CRITICAL;
785
786 return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
787 VXGE_HW_ERR_SLOT_FREEZE :
788 (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
789 VXGE_HW_ERR_VPATH;
790}
791
792/**
422 * vxge_hw_device_begin_irq - Begin IRQ processing. 793 * vxge_hw_device_begin_irq - Begin IRQ processing.
423 * @hldev: HW device handle. 794 * @hldev: HW device handle.
424 * @skip_alarms: Do not clear the alarms 795 * @skip_alarms: Do not clear the alarms
@@ -513,108 +884,6 @@ exit:
513 return ret; 884 return ret;
514} 885}
515 886
516/*
517 * __vxge_hw_device_handle_link_up_ind
518 * @hldev: HW device handle.
519 *
520 * Link up indication handler. The function is invoked by HW when
521 * Titan indicates that the link is up for programmable amount of time.
522 */
523static enum vxge_hw_status
524__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
525{
526 /*
527 * If the previous link state is not down, return.
528 */
529 if (hldev->link_state == VXGE_HW_LINK_UP)
530 goto exit;
531
532 hldev->link_state = VXGE_HW_LINK_UP;
533
534 /* notify driver */
535 if (hldev->uld_callbacks.link_up)
536 hldev->uld_callbacks.link_up(hldev);
537exit:
538 return VXGE_HW_OK;
539}
540
541/*
542 * __vxge_hw_device_handle_link_down_ind
543 * @hldev: HW device handle.
544 *
545 * Link down indication handler. The function is invoked by HW when
546 * Titan indicates that the link is down.
547 */
548static enum vxge_hw_status
549__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
550{
551 /*
552 * If the previous link state is not down, return.
553 */
554 if (hldev->link_state == VXGE_HW_LINK_DOWN)
555 goto exit;
556
557 hldev->link_state = VXGE_HW_LINK_DOWN;
558
559 /* notify driver */
560 if (hldev->uld_callbacks.link_down)
561 hldev->uld_callbacks.link_down(hldev);
562exit:
563 return VXGE_HW_OK;
564}
565
566/**
567 * __vxge_hw_device_handle_error - Handle error
568 * @hldev: HW device
569 * @vp_id: Vpath Id
570 * @type: Error type. Please see enum vxge_hw_event{}
571 *
572 * Handle error.
573 */
574static enum vxge_hw_status
575__vxge_hw_device_handle_error(
576 struct __vxge_hw_device *hldev,
577 u32 vp_id,
578 enum vxge_hw_event type)
579{
580 switch (type) {
581 case VXGE_HW_EVENT_UNKNOWN:
582 break;
583 case VXGE_HW_EVENT_RESET_START:
584 case VXGE_HW_EVENT_RESET_COMPLETE:
585 case VXGE_HW_EVENT_LINK_DOWN:
586 case VXGE_HW_EVENT_LINK_UP:
587 goto out;
588 case VXGE_HW_EVENT_ALARM_CLEARED:
589 goto out;
590 case VXGE_HW_EVENT_ECCERR:
591 case VXGE_HW_EVENT_MRPCIM_ECCERR:
592 goto out;
593 case VXGE_HW_EVENT_FIFO_ERR:
594 case VXGE_HW_EVENT_VPATH_ERR:
595 case VXGE_HW_EVENT_CRITICAL_ERR:
596 case VXGE_HW_EVENT_SERR:
597 break;
598 case VXGE_HW_EVENT_SRPCIM_SERR:
599 case VXGE_HW_EVENT_MRPCIM_SERR:
600 goto out;
601 case VXGE_HW_EVENT_SLOT_FREEZE:
602 break;
603 default:
604 vxge_assert(0);
605 goto out;
606 }
607
608 /* notify driver */
609 if (hldev->uld_callbacks.crit_err)
610 hldev->uld_callbacks.crit_err(
611 (struct __vxge_hw_device *)hldev,
612 type, vp_id);
613out:
614
615 return VXGE_HW_OK;
616}
617
618/** 887/**
619 * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the 888 * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
620 * condition that has caused the Tx and RX interrupt. 889 * condition that has caused the Tx and RX interrupt.
@@ -699,8 +968,8 @@ _alloc_after_swap:
699 * Posts a dtr to work array. 968 * Posts a dtr to work array.
700 * 969 *
701 */ 970 */
702static void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, 971static void
703 void *dtrh) 972vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
704{ 973{
705 vxge_assert(channel->work_arr[channel->post_index] == NULL); 974 vxge_assert(channel->work_arr[channel->post_index] == NULL);
706 975
@@ -911,10 +1180,6 @@ void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
911 */ 1180 */
912void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh) 1181void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
913{ 1182{
914 struct __vxge_hw_channel *channel;
915
916 channel = &ring->channel;
917
918 wmb(); 1183 wmb();
919 vxge_hw_ring_rxd_post_post(ring, rxdh); 1184 vxge_hw_ring_rxd_post_post(ring, rxdh);
920} 1185}
@@ -1868,284 +2133,6 @@ exit:
1868} 2133}
1869 2134
1870/* 2135/*
1871 * __vxge_hw_vpath_alarm_process - Process Alarms.
1872 * @vpath: Virtual Path.
1873 * @skip_alarms: Do not clear the alarms
1874 *
1875 * Process vpath alarms.
1876 *
1877 */
1878static enum vxge_hw_status
1879__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
1880 u32 skip_alarms)
1881{
1882 u64 val64;
1883 u64 alarm_status;
1884 u64 pic_status;
1885 struct __vxge_hw_device *hldev = NULL;
1886 enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
1887 u64 mask64;
1888 struct vxge_hw_vpath_stats_sw_info *sw_stats;
1889 struct vxge_hw_vpath_reg __iomem *vp_reg;
1890
1891 if (vpath == NULL) {
1892 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
1893 alarm_event);
1894 goto out2;
1895 }
1896
1897 hldev = vpath->hldev;
1898 vp_reg = vpath->vp_reg;
1899 alarm_status = readq(&vp_reg->vpath_general_int_status);
1900
1901 if (alarm_status == VXGE_HW_ALL_FOXES) {
1902 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
1903 alarm_event);
1904 goto out;
1905 }
1906
1907 sw_stats = vpath->sw_stats;
1908
1909 if (alarm_status & ~(
1910 VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
1911 VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
1912 VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
1913 VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
1914 sw_stats->error_stats.unknown_alarms++;
1915
1916 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
1917 alarm_event);
1918 goto out;
1919 }
1920
1921 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
1922
1923 val64 = readq(&vp_reg->xgmac_vp_int_status);
1924
1925 if (val64 &
1926 VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
1927
1928 val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
1929
1930 if (((val64 &
1931 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
1932 (!(val64 &
1933 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
1934 ((val64 &
1935 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
1936 (!(val64 &
1937 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
1938 ))) {
1939 sw_stats->error_stats.network_sustained_fault++;
1940
1941 writeq(
1942 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
1943 &vp_reg->asic_ntwk_vp_err_mask);
1944
1945 __vxge_hw_device_handle_link_down_ind(hldev);
1946 alarm_event = VXGE_HW_SET_LEVEL(
1947 VXGE_HW_EVENT_LINK_DOWN, alarm_event);
1948 }
1949
1950 if (((val64 &
1951 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
1952 (!(val64 &
1953 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
1954 ((val64 &
1955 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
1956 (!(val64 &
1957 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
1958 ))) {
1959
1960 sw_stats->error_stats.network_sustained_ok++;
1961
1962 writeq(
1963 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
1964 &vp_reg->asic_ntwk_vp_err_mask);
1965
1966 __vxge_hw_device_handle_link_up_ind(hldev);
1967 alarm_event = VXGE_HW_SET_LEVEL(
1968 VXGE_HW_EVENT_LINK_UP, alarm_event);
1969 }
1970
1971 writeq(VXGE_HW_INTR_MASK_ALL,
1972 &vp_reg->asic_ntwk_vp_err_reg);
1973
1974 alarm_event = VXGE_HW_SET_LEVEL(
1975 VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
1976
1977 if (skip_alarms)
1978 return VXGE_HW_OK;
1979 }
1980 }
1981
1982 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
1983
1984 pic_status = readq(&vp_reg->vpath_ppif_int_status);
1985
1986 if (pic_status &
1987 VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
1988
1989 val64 = readq(&vp_reg->general_errors_reg);
1990 mask64 = readq(&vp_reg->general_errors_mask);
1991
1992 if ((val64 &
1993 VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
1994 ~mask64) {
1995 sw_stats->error_stats.ini_serr_det++;
1996
1997 alarm_event = VXGE_HW_SET_LEVEL(
1998 VXGE_HW_EVENT_SERR, alarm_event);
1999 }
2000
2001 if ((val64 &
2002 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
2003 ~mask64) {
2004 sw_stats->error_stats.dblgen_fifo0_overflow++;
2005
2006 alarm_event = VXGE_HW_SET_LEVEL(
2007 VXGE_HW_EVENT_FIFO_ERR, alarm_event);
2008 }
2009
2010 if ((val64 &
2011 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
2012 ~mask64)
2013 sw_stats->error_stats.statsb_pif_chain_error++;
2014
2015 if ((val64 &
2016 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
2017 ~mask64)
2018 sw_stats->error_stats.statsb_drop_timeout++;
2019
2020 if ((val64 &
2021 VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
2022 ~mask64)
2023 sw_stats->error_stats.target_illegal_access++;
2024
2025 if (!skip_alarms) {
2026 writeq(VXGE_HW_INTR_MASK_ALL,
2027 &vp_reg->general_errors_reg);
2028 alarm_event = VXGE_HW_SET_LEVEL(
2029 VXGE_HW_EVENT_ALARM_CLEARED,
2030 alarm_event);
2031 }
2032 }
2033
2034 if (pic_status &
2035 VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
2036
2037 val64 = readq(&vp_reg->kdfcctl_errors_reg);
2038 mask64 = readq(&vp_reg->kdfcctl_errors_mask);
2039
2040 if ((val64 &
2041 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
2042 ~mask64) {
2043 sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
2044
2045 alarm_event = VXGE_HW_SET_LEVEL(
2046 VXGE_HW_EVENT_FIFO_ERR,
2047 alarm_event);
2048 }
2049
2050 if ((val64 &
2051 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
2052 ~mask64) {
2053 sw_stats->error_stats.kdfcctl_fifo0_poison++;
2054
2055 alarm_event = VXGE_HW_SET_LEVEL(
2056 VXGE_HW_EVENT_FIFO_ERR,
2057 alarm_event);
2058 }
2059
2060 if ((val64 &
2061 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
2062 ~mask64) {
2063 sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
2064
2065 alarm_event = VXGE_HW_SET_LEVEL(
2066 VXGE_HW_EVENT_FIFO_ERR,
2067 alarm_event);
2068 }
2069
2070 if (!skip_alarms) {
2071 writeq(VXGE_HW_INTR_MASK_ALL,
2072 &vp_reg->kdfcctl_errors_reg);
2073 alarm_event = VXGE_HW_SET_LEVEL(
2074 VXGE_HW_EVENT_ALARM_CLEARED,
2075 alarm_event);
2076 }
2077 }
2078
2079 }
2080
2081 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
2082
2083 val64 = readq(&vp_reg->wrdma_alarm_status);
2084
2085 if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
2086
2087 val64 = readq(&vp_reg->prc_alarm_reg);
2088 mask64 = readq(&vp_reg->prc_alarm_mask);
2089
2090 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
2091 ~mask64)
2092 sw_stats->error_stats.prc_ring_bumps++;
2093
2094 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
2095 ~mask64) {
2096 sw_stats->error_stats.prc_rxdcm_sc_err++;
2097
2098 alarm_event = VXGE_HW_SET_LEVEL(
2099 VXGE_HW_EVENT_VPATH_ERR,
2100 alarm_event);
2101 }
2102
2103 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
2104 & ~mask64) {
2105 sw_stats->error_stats.prc_rxdcm_sc_abort++;
2106
2107 alarm_event = VXGE_HW_SET_LEVEL(
2108 VXGE_HW_EVENT_VPATH_ERR,
2109 alarm_event);
2110 }
2111
2112 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
2113 & ~mask64) {
2114 sw_stats->error_stats.prc_quanta_size_err++;
2115
2116 alarm_event = VXGE_HW_SET_LEVEL(
2117 VXGE_HW_EVENT_VPATH_ERR,
2118 alarm_event);
2119 }
2120
2121 if (!skip_alarms) {
2122 writeq(VXGE_HW_INTR_MASK_ALL,
2123 &vp_reg->prc_alarm_reg);
2124 alarm_event = VXGE_HW_SET_LEVEL(
2125 VXGE_HW_EVENT_ALARM_CLEARED,
2126 alarm_event);
2127 }
2128 }
2129 }
2130out:
2131 hldev->stats.sw_dev_err_stats.vpath_alarms++;
2132out2:
2133 if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
2134 (alarm_event == VXGE_HW_EVENT_UNKNOWN))
2135 return VXGE_HW_OK;
2136
2137 __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
2138
2139 if (alarm_event == VXGE_HW_EVENT_SERR)
2140 return VXGE_HW_ERR_CRITICAL;
2141
2142 return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
2143 VXGE_HW_ERR_SLOT_FREEZE :
2144 (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
2145 VXGE_HW_ERR_VPATH;
2146}
2147
2148/*
2149 * vxge_hw_vpath_alarm_process - Process Alarms. 2136 * vxge_hw_vpath_alarm_process - Process Alarms.
2150 * @vpath: Virtual Path. 2137 * @vpath: Virtual Path.
2151 * @skip_alarms: Do not clear the alarms 2138 * @skip_alarms: Do not clear the alarms