aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/vxge/vxge-traffic.c
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
commitc71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch)
treeecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /drivers/net/vxge/vxge-traffic.c
parentea53c912f8a86a8567697115b6a0d8152beee5c8 (diff)
parent6a00f206debf8a5c8899055726ad127dbeeed098 (diff)
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts: litmus/sched_cedf.c
Diffstat (limited to 'drivers/net/vxge/vxge-traffic.c')
-rw-r--r--drivers/net/vxge/vxge-traffic.c931
1 files changed, 475 insertions, 456 deletions
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c
index cedf08f99cb3..f93517055162 100644
--- a/drivers/net/vxge/vxge-traffic.c
+++ b/drivers/net/vxge/vxge-traffic.c
@@ -12,6 +12,7 @@
12 * Copyright(c) 2002-2010 Exar Corp. 12 * Copyright(c) 2002-2010 Exar Corp.
13 ******************************************************************************/ 13 ******************************************************************************/
14#include <linux/etherdevice.h> 14#include <linux/etherdevice.h>
15#include <linux/prefetch.h>
15 16
16#include "vxge-traffic.h" 17#include "vxge-traffic.h"
17#include "vxge-config.h" 18#include "vxge-config.h"
@@ -218,6 +219,68 @@ exit:
218 return status; 219 return status;
219} 220}
220 221
222void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo)
223{
224 struct vxge_hw_vpath_reg __iomem *vp_reg;
225 struct vxge_hw_vp_config *config;
226 u64 val64;
227
228 if (fifo->config->enable != VXGE_HW_FIFO_ENABLE)
229 return;
230
231 vp_reg = fifo->vp_reg;
232 config = container_of(fifo->config, struct vxge_hw_vp_config, fifo);
233
234 if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
235 config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
236 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
237 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
238 fifo->tim_tti_cfg1_saved = val64;
239 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
240 }
241}
242
243void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring)
244{
245 u64 val64 = ring->tim_rti_cfg1_saved;
246
247 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
248 ring->tim_rti_cfg1_saved = val64;
249 writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
250}
251
252void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo)
253{
254 u64 val64 = fifo->tim_tti_cfg3_saved;
255 u64 timer = (fifo->rtimer * 1000) / 272;
256
257 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
258 if (timer)
259 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
260 VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5);
261
262 writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
263 /* tti_cfg3_saved is not updated again because it is
264 * initialized at one place only - init time.
265 */
266}
267
268void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring)
269{
270 u64 val64 = ring->tim_rti_cfg3_saved;
271 u64 timer = (ring->rtimer * 1000) / 272;
272
273 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
274 if (timer)
275 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
276 VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4);
277
278 writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
279 /* rti_cfg3_saved is not updated again because it is
280 * initialized at one place only - init time.
281 */
282}
283
221/** 284/**
222 * vxge_hw_channel_msix_mask - Mask MSIX Vector. 285 * vxge_hw_channel_msix_mask - Mask MSIX Vector.
223 * @channeh: Channel for rx or tx handle 286 * @channeh: Channel for rx or tx handle
@@ -254,6 +317,23 @@ vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
254} 317}
255 318
256/** 319/**
320 * vxge_hw_channel_msix_clear - Unmask the MSIX Vector.
321 * @channel: Channel for rx or tx handle
322 * @msix_id: MSI ID
323 *
324 * The function unmasks the msix interrupt for the given msix_id
325 * if configured in MSIX oneshot mode
326 *
327 * Returns: 0
328 */
329void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id)
330{
331 __vxge_hw_pio_mem_write32_upper(
332 (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
333 &channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
334}
335
336/**
257 * vxge_hw_device_set_intr_type - Updates the configuration 337 * vxge_hw_device_set_intr_type - Updates the configuration
258 * with new interrupt type. 338 * with new interrupt type.
259 * @hldev: HW device handle. 339 * @hldev: HW device handle.
@@ -412,6 +492,384 @@ void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
412} 492}
413 493
414/** 494/**
495 * __vxge_hw_device_handle_error - Handle error
496 * @hldev: HW device
497 * @vp_id: Vpath Id
498 * @type: Error type. Please see enum vxge_hw_event{}
499 *
500 * Handle error.
501 */
502static enum vxge_hw_status
503__vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
504 enum vxge_hw_event type)
505{
506 switch (type) {
507 case VXGE_HW_EVENT_UNKNOWN:
508 break;
509 case VXGE_HW_EVENT_RESET_START:
510 case VXGE_HW_EVENT_RESET_COMPLETE:
511 case VXGE_HW_EVENT_LINK_DOWN:
512 case VXGE_HW_EVENT_LINK_UP:
513 goto out;
514 case VXGE_HW_EVENT_ALARM_CLEARED:
515 goto out;
516 case VXGE_HW_EVENT_ECCERR:
517 case VXGE_HW_EVENT_MRPCIM_ECCERR:
518 goto out;
519 case VXGE_HW_EVENT_FIFO_ERR:
520 case VXGE_HW_EVENT_VPATH_ERR:
521 case VXGE_HW_EVENT_CRITICAL_ERR:
522 case VXGE_HW_EVENT_SERR:
523 break;
524 case VXGE_HW_EVENT_SRPCIM_SERR:
525 case VXGE_HW_EVENT_MRPCIM_SERR:
526 goto out;
527 case VXGE_HW_EVENT_SLOT_FREEZE:
528 break;
529 default:
530 vxge_assert(0);
531 goto out;
532 }
533
534 /* notify driver */
535 if (hldev->uld_callbacks.crit_err)
536 hldev->uld_callbacks.crit_err(
537 (struct __vxge_hw_device *)hldev,
538 type, vp_id);
539out:
540
541 return VXGE_HW_OK;
542}
543
544/*
545 * __vxge_hw_device_handle_link_down_ind
546 * @hldev: HW device handle.
547 *
548 * Link down indication handler. The function is invoked by HW when
549 * Titan indicates that the link is down.
550 */
551static enum vxge_hw_status
552__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
553{
554 /*
555 * If the previous link state is not down, return.
556 */
557 if (hldev->link_state == VXGE_HW_LINK_DOWN)
558 goto exit;
559
560 hldev->link_state = VXGE_HW_LINK_DOWN;
561
562 /* notify driver */
563 if (hldev->uld_callbacks.link_down)
564 hldev->uld_callbacks.link_down(hldev);
565exit:
566 return VXGE_HW_OK;
567}
568
569/*
570 * __vxge_hw_device_handle_link_up_ind
571 * @hldev: HW device handle.
572 *
573 * Link up indication handler. The function is invoked by HW when
574 * Titan indicates that the link is up for programmable amount of time.
575 */
576static enum vxge_hw_status
577__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
578{
579 /*
580 * If the previous link state is not down, return.
581 */
582 if (hldev->link_state == VXGE_HW_LINK_UP)
583 goto exit;
584
585 hldev->link_state = VXGE_HW_LINK_UP;
586
587 /* notify driver */
588 if (hldev->uld_callbacks.link_up)
589 hldev->uld_callbacks.link_up(hldev);
590exit:
591 return VXGE_HW_OK;
592}
593
594/*
595 * __vxge_hw_vpath_alarm_process - Process Alarms.
596 * @vpath: Virtual Path.
597 * @skip_alarms: Do not clear the alarms
598 *
599 * Process vpath alarms.
600 *
601 */
602static enum vxge_hw_status
603__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
604 u32 skip_alarms)
605{
606 u64 val64;
607 u64 alarm_status;
608 u64 pic_status;
609 struct __vxge_hw_device *hldev = NULL;
610 enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
611 u64 mask64;
612 struct vxge_hw_vpath_stats_sw_info *sw_stats;
613 struct vxge_hw_vpath_reg __iomem *vp_reg;
614
615 if (vpath == NULL) {
616 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
617 alarm_event);
618 goto out2;
619 }
620
621 hldev = vpath->hldev;
622 vp_reg = vpath->vp_reg;
623 alarm_status = readq(&vp_reg->vpath_general_int_status);
624
625 if (alarm_status == VXGE_HW_ALL_FOXES) {
626 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
627 alarm_event);
628 goto out;
629 }
630
631 sw_stats = vpath->sw_stats;
632
633 if (alarm_status & ~(
634 VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
635 VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
636 VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
637 VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
638 sw_stats->error_stats.unknown_alarms++;
639
640 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
641 alarm_event);
642 goto out;
643 }
644
645 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
646
647 val64 = readq(&vp_reg->xgmac_vp_int_status);
648
649 if (val64 &
650 VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
651
652 val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
653
654 if (((val64 &
655 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
656 (!(val64 &
657 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
658 ((val64 &
659 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
660 (!(val64 &
661 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
662 ))) {
663 sw_stats->error_stats.network_sustained_fault++;
664
665 writeq(
666 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
667 &vp_reg->asic_ntwk_vp_err_mask);
668
669 __vxge_hw_device_handle_link_down_ind(hldev);
670 alarm_event = VXGE_HW_SET_LEVEL(
671 VXGE_HW_EVENT_LINK_DOWN, alarm_event);
672 }
673
674 if (((val64 &
675 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
676 (!(val64 &
677 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
678 ((val64 &
679 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
680 (!(val64 &
681 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
682 ))) {
683
684 sw_stats->error_stats.network_sustained_ok++;
685
686 writeq(
687 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
688 &vp_reg->asic_ntwk_vp_err_mask);
689
690 __vxge_hw_device_handle_link_up_ind(hldev);
691 alarm_event = VXGE_HW_SET_LEVEL(
692 VXGE_HW_EVENT_LINK_UP, alarm_event);
693 }
694
695 writeq(VXGE_HW_INTR_MASK_ALL,
696 &vp_reg->asic_ntwk_vp_err_reg);
697
698 alarm_event = VXGE_HW_SET_LEVEL(
699 VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
700
701 if (skip_alarms)
702 return VXGE_HW_OK;
703 }
704 }
705
706 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
707
708 pic_status = readq(&vp_reg->vpath_ppif_int_status);
709
710 if (pic_status &
711 VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
712
713 val64 = readq(&vp_reg->general_errors_reg);
714 mask64 = readq(&vp_reg->general_errors_mask);
715
716 if ((val64 &
717 VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
718 ~mask64) {
719 sw_stats->error_stats.ini_serr_det++;
720
721 alarm_event = VXGE_HW_SET_LEVEL(
722 VXGE_HW_EVENT_SERR, alarm_event);
723 }
724
725 if ((val64 &
726 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
727 ~mask64) {
728 sw_stats->error_stats.dblgen_fifo0_overflow++;
729
730 alarm_event = VXGE_HW_SET_LEVEL(
731 VXGE_HW_EVENT_FIFO_ERR, alarm_event);
732 }
733
734 if ((val64 &
735 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
736 ~mask64)
737 sw_stats->error_stats.statsb_pif_chain_error++;
738
739 if ((val64 &
740 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
741 ~mask64)
742 sw_stats->error_stats.statsb_drop_timeout++;
743
744 if ((val64 &
745 VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
746 ~mask64)
747 sw_stats->error_stats.target_illegal_access++;
748
749 if (!skip_alarms) {
750 writeq(VXGE_HW_INTR_MASK_ALL,
751 &vp_reg->general_errors_reg);
752 alarm_event = VXGE_HW_SET_LEVEL(
753 VXGE_HW_EVENT_ALARM_CLEARED,
754 alarm_event);
755 }
756 }
757
758 if (pic_status &
759 VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
760
761 val64 = readq(&vp_reg->kdfcctl_errors_reg);
762 mask64 = readq(&vp_reg->kdfcctl_errors_mask);
763
764 if ((val64 &
765 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
766 ~mask64) {
767 sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
768
769 alarm_event = VXGE_HW_SET_LEVEL(
770 VXGE_HW_EVENT_FIFO_ERR,
771 alarm_event);
772 }
773
774 if ((val64 &
775 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
776 ~mask64) {
777 sw_stats->error_stats.kdfcctl_fifo0_poison++;
778
779 alarm_event = VXGE_HW_SET_LEVEL(
780 VXGE_HW_EVENT_FIFO_ERR,
781 alarm_event);
782 }
783
784 if ((val64 &
785 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
786 ~mask64) {
787 sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
788
789 alarm_event = VXGE_HW_SET_LEVEL(
790 VXGE_HW_EVENT_FIFO_ERR,
791 alarm_event);
792 }
793
794 if (!skip_alarms) {
795 writeq(VXGE_HW_INTR_MASK_ALL,
796 &vp_reg->kdfcctl_errors_reg);
797 alarm_event = VXGE_HW_SET_LEVEL(
798 VXGE_HW_EVENT_ALARM_CLEARED,
799 alarm_event);
800 }
801 }
802
803 }
804
805 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
806
807 val64 = readq(&vp_reg->wrdma_alarm_status);
808
809 if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
810
811 val64 = readq(&vp_reg->prc_alarm_reg);
812 mask64 = readq(&vp_reg->prc_alarm_mask);
813
814 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
815 ~mask64)
816 sw_stats->error_stats.prc_ring_bumps++;
817
818 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
819 ~mask64) {
820 sw_stats->error_stats.prc_rxdcm_sc_err++;
821
822 alarm_event = VXGE_HW_SET_LEVEL(
823 VXGE_HW_EVENT_VPATH_ERR,
824 alarm_event);
825 }
826
827 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
828 & ~mask64) {
829 sw_stats->error_stats.prc_rxdcm_sc_abort++;
830
831 alarm_event = VXGE_HW_SET_LEVEL(
832 VXGE_HW_EVENT_VPATH_ERR,
833 alarm_event);
834 }
835
836 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
837 & ~mask64) {
838 sw_stats->error_stats.prc_quanta_size_err++;
839
840 alarm_event = VXGE_HW_SET_LEVEL(
841 VXGE_HW_EVENT_VPATH_ERR,
842 alarm_event);
843 }
844
845 if (!skip_alarms) {
846 writeq(VXGE_HW_INTR_MASK_ALL,
847 &vp_reg->prc_alarm_reg);
848 alarm_event = VXGE_HW_SET_LEVEL(
849 VXGE_HW_EVENT_ALARM_CLEARED,
850 alarm_event);
851 }
852 }
853 }
854out:
855 hldev->stats.sw_dev_err_stats.vpath_alarms++;
856out2:
857 if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
858 (alarm_event == VXGE_HW_EVENT_UNKNOWN))
859 return VXGE_HW_OK;
860
861 __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
862
863 if (alarm_event == VXGE_HW_EVENT_SERR)
864 return VXGE_HW_ERR_CRITICAL;
865
866 return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
867 VXGE_HW_ERR_SLOT_FREEZE :
868 (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
869 VXGE_HW_ERR_VPATH;
870}
871
872/**
415 * vxge_hw_device_begin_irq - Begin IRQ processing. 873 * vxge_hw_device_begin_irq - Begin IRQ processing.
416 * @hldev: HW device handle. 874 * @hldev: HW device handle.
417 * @skip_alarms: Do not clear the alarms 875 * @skip_alarms: Do not clear the alarms
@@ -506,108 +964,6 @@ exit:
506 return ret; 964 return ret;
507} 965}
508 966
509/*
510 * __vxge_hw_device_handle_link_up_ind
511 * @hldev: HW device handle.
512 *
513 * Link up indication handler. The function is invoked by HW when
514 * Titan indicates that the link is up for programmable amount of time.
515 */
516enum vxge_hw_status
517__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
518{
519 /*
520 * If the previous link state is not down, return.
521 */
522 if (hldev->link_state == VXGE_HW_LINK_UP)
523 goto exit;
524
525 hldev->link_state = VXGE_HW_LINK_UP;
526
527 /* notify driver */
528 if (hldev->uld_callbacks.link_up)
529 hldev->uld_callbacks.link_up(hldev);
530exit:
531 return VXGE_HW_OK;
532}
533
534/*
535 * __vxge_hw_device_handle_link_down_ind
536 * @hldev: HW device handle.
537 *
538 * Link down indication handler. The function is invoked by HW when
539 * Titan indicates that the link is down.
540 */
541enum vxge_hw_status
542__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
543{
544 /*
545 * If the previous link state is not down, return.
546 */
547 if (hldev->link_state == VXGE_HW_LINK_DOWN)
548 goto exit;
549
550 hldev->link_state = VXGE_HW_LINK_DOWN;
551
552 /* notify driver */
553 if (hldev->uld_callbacks.link_down)
554 hldev->uld_callbacks.link_down(hldev);
555exit:
556 return VXGE_HW_OK;
557}
558
559/**
560 * __vxge_hw_device_handle_error - Handle error
561 * @hldev: HW device
562 * @vp_id: Vpath Id
563 * @type: Error type. Please see enum vxge_hw_event{}
564 *
565 * Handle error.
566 */
567enum vxge_hw_status
568__vxge_hw_device_handle_error(
569 struct __vxge_hw_device *hldev,
570 u32 vp_id,
571 enum vxge_hw_event type)
572{
573 switch (type) {
574 case VXGE_HW_EVENT_UNKNOWN:
575 break;
576 case VXGE_HW_EVENT_RESET_START:
577 case VXGE_HW_EVENT_RESET_COMPLETE:
578 case VXGE_HW_EVENT_LINK_DOWN:
579 case VXGE_HW_EVENT_LINK_UP:
580 goto out;
581 case VXGE_HW_EVENT_ALARM_CLEARED:
582 goto out;
583 case VXGE_HW_EVENT_ECCERR:
584 case VXGE_HW_EVENT_MRPCIM_ECCERR:
585 goto out;
586 case VXGE_HW_EVENT_FIFO_ERR:
587 case VXGE_HW_EVENT_VPATH_ERR:
588 case VXGE_HW_EVENT_CRITICAL_ERR:
589 case VXGE_HW_EVENT_SERR:
590 break;
591 case VXGE_HW_EVENT_SRPCIM_SERR:
592 case VXGE_HW_EVENT_MRPCIM_SERR:
593 goto out;
594 case VXGE_HW_EVENT_SLOT_FREEZE:
595 break;
596 default:
597 vxge_assert(0);
598 goto out;
599 }
600
601 /* notify driver */
602 if (hldev->uld_callbacks.crit_err)
603 hldev->uld_callbacks.crit_err(
604 (struct __vxge_hw_device *)hldev,
605 type, vp_id);
606out:
607
608 return VXGE_HW_OK;
609}
610
611/** 967/**
612 * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the 968 * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
613 * condition that has caused the Tx and RX interrupt. 969 * condition that has caused the Tx and RX interrupt.
@@ -646,7 +1002,7 @@ void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
646 * it swaps the reserve and free arrays. 1002 * it swaps the reserve and free arrays.
647 * 1003 *
648 */ 1004 */
649enum vxge_hw_status 1005static enum vxge_hw_status
650vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh) 1006vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
651{ 1007{
652 void **tmp_arr; 1008 void **tmp_arr;
@@ -692,7 +1048,8 @@ _alloc_after_swap:
692 * Posts a dtr to work array. 1048 * Posts a dtr to work array.
693 * 1049 *
694 */ 1050 */
695void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh) 1051static void
1052vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
696{ 1053{
697 vxge_assert(channel->work_arr[channel->post_index] == NULL); 1054 vxge_assert(channel->work_arr[channel->post_index] == NULL);
698 1055
@@ -755,7 +1112,7 @@ void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh)
755 * vxge_hw_channel_dtr_count 1112 * vxge_hw_channel_dtr_count
756 * @channel: Channel handle. Obtained via vxge_hw_channel_open(). 1113 * @channel: Channel handle. Obtained via vxge_hw_channel_open().
757 * 1114 *
758 * Retreive number of DTRs available. This function can not be called 1115 * Retrieve number of DTRs available. This function can not be called
759 * from data path. ring_initial_replenishi() is the only user. 1116 * from data path. ring_initial_replenishi() is the only user.
760 */ 1117 */
761int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel) 1118int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel)
@@ -903,10 +1260,6 @@ void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
903 */ 1260 */
904void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh) 1261void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
905{ 1262{
906 struct __vxge_hw_channel *channel;
907
908 channel = &ring->channel;
909
910 wmb(); 1263 wmb();
911 vxge_hw_ring_rxd_post_post(ring, rxdh); 1264 vxge_hw_ring_rxd_post_post(ring, rxdh);
912} 1265}
@@ -967,7 +1320,7 @@ enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
967 *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0); 1320 *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
968 1321
969 /* check whether it is not the end */ 1322 /* check whether it is not the end */
970 if (!own || ((*t_code == VXGE_HW_RING_T_CODE_FRM_DROP) && own)) { 1323 if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) {
971 1324
972 vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control != 1325 vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control !=
973 0); 1326 0);
@@ -1658,37 +2011,6 @@ exit:
1658} 2011}
1659 2012
1660/** 2013/**
1661 * vxge_hw_vpath_vid_get_next - Get the next vid entry for this vpath
1662 * from vlan id table.
1663 * @vp: Vpath handle.
1664 * @vid: Buffer to return vlan id
1665 *
1666 * Returns the next vlan id in the list for this vpath.
1667 * see also: vxge_hw_vpath_vid_get
1668 *
1669 */
1670enum vxge_hw_status
1671vxge_hw_vpath_vid_get_next(struct __vxge_hw_vpath_handle *vp, u64 *vid)
1672{
1673 u64 data;
1674 enum vxge_hw_status status = VXGE_HW_OK;
1675
1676 if (vp == NULL) {
1677 status = VXGE_HW_ERR_INVALID_HANDLE;
1678 goto exit;
1679 }
1680
1681 status = __vxge_hw_vpath_rts_table_get(vp,
1682 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
1683 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1684 0, vid, &data);
1685
1686 *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
1687exit:
1688 return status;
1689}
1690
1691/**
1692 * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath 2014 * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
1693 * to vlan id table. 2015 * to vlan id table.
1694 * @vp: Vpath handle. 2016 * @vp: Vpath handle.
@@ -1739,7 +2061,7 @@ enum vxge_hw_status vxge_hw_vpath_promisc_enable(
1739 2061
1740 vpath = vp->vpath; 2062 vpath = vp->vpath;
1741 2063
1742 /* Enable promiscous mode for function 0 only */ 2064 /* Enable promiscuous mode for function 0 only */
1743 if (!(vpath->hldev->access_rights & 2065 if (!(vpath->hldev->access_rights &
1744 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) 2066 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
1745 return VXGE_HW_OK; 2067 return VXGE_HW_OK;
@@ -1891,284 +2213,6 @@ exit:
1891} 2213}
1892 2214
1893/* 2215/*
1894 * __vxge_hw_vpath_alarm_process - Process Alarms.
1895 * @vpath: Virtual Path.
1896 * @skip_alarms: Do not clear the alarms
1897 *
1898 * Process vpath alarms.
1899 *
1900 */
1901enum vxge_hw_status __vxge_hw_vpath_alarm_process(
1902 struct __vxge_hw_virtualpath *vpath,
1903 u32 skip_alarms)
1904{
1905 u64 val64;
1906 u64 alarm_status;
1907 u64 pic_status;
1908 struct __vxge_hw_device *hldev = NULL;
1909 enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
1910 u64 mask64;
1911 struct vxge_hw_vpath_stats_sw_info *sw_stats;
1912 struct vxge_hw_vpath_reg __iomem *vp_reg;
1913
1914 if (vpath == NULL) {
1915 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
1916 alarm_event);
1917 goto out2;
1918 }
1919
1920 hldev = vpath->hldev;
1921 vp_reg = vpath->vp_reg;
1922 alarm_status = readq(&vp_reg->vpath_general_int_status);
1923
1924 if (alarm_status == VXGE_HW_ALL_FOXES) {
1925 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
1926 alarm_event);
1927 goto out;
1928 }
1929
1930 sw_stats = vpath->sw_stats;
1931
1932 if (alarm_status & ~(
1933 VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
1934 VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
1935 VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
1936 VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
1937 sw_stats->error_stats.unknown_alarms++;
1938
1939 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
1940 alarm_event);
1941 goto out;
1942 }
1943
1944 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
1945
1946 val64 = readq(&vp_reg->xgmac_vp_int_status);
1947
1948 if (val64 &
1949 VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
1950
1951 val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
1952
1953 if (((val64 &
1954 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
1955 (!(val64 &
1956 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
1957 ((val64 &
1958 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
1959 (!(val64 &
1960 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
1961 ))) {
1962 sw_stats->error_stats.network_sustained_fault++;
1963
1964 writeq(
1965 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
1966 &vp_reg->asic_ntwk_vp_err_mask);
1967
1968 __vxge_hw_device_handle_link_down_ind(hldev);
1969 alarm_event = VXGE_HW_SET_LEVEL(
1970 VXGE_HW_EVENT_LINK_DOWN, alarm_event);
1971 }
1972
1973 if (((val64 &
1974 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
1975 (!(val64 &
1976 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
1977 ((val64 &
1978 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
1979 (!(val64 &
1980 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
1981 ))) {
1982
1983 sw_stats->error_stats.network_sustained_ok++;
1984
1985 writeq(
1986 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
1987 &vp_reg->asic_ntwk_vp_err_mask);
1988
1989 __vxge_hw_device_handle_link_up_ind(hldev);
1990 alarm_event = VXGE_HW_SET_LEVEL(
1991 VXGE_HW_EVENT_LINK_UP, alarm_event);
1992 }
1993
1994 writeq(VXGE_HW_INTR_MASK_ALL,
1995 &vp_reg->asic_ntwk_vp_err_reg);
1996
1997 alarm_event = VXGE_HW_SET_LEVEL(
1998 VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
1999
2000 if (skip_alarms)
2001 return VXGE_HW_OK;
2002 }
2003 }
2004
2005 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
2006
2007 pic_status = readq(&vp_reg->vpath_ppif_int_status);
2008
2009 if (pic_status &
2010 VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
2011
2012 val64 = readq(&vp_reg->general_errors_reg);
2013 mask64 = readq(&vp_reg->general_errors_mask);
2014
2015 if ((val64 &
2016 VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
2017 ~mask64) {
2018 sw_stats->error_stats.ini_serr_det++;
2019
2020 alarm_event = VXGE_HW_SET_LEVEL(
2021 VXGE_HW_EVENT_SERR, alarm_event);
2022 }
2023
2024 if ((val64 &
2025 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
2026 ~mask64) {
2027 sw_stats->error_stats.dblgen_fifo0_overflow++;
2028
2029 alarm_event = VXGE_HW_SET_LEVEL(
2030 VXGE_HW_EVENT_FIFO_ERR, alarm_event);
2031 }
2032
2033 if ((val64 &
2034 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
2035 ~mask64)
2036 sw_stats->error_stats.statsb_pif_chain_error++;
2037
2038 if ((val64 &
2039 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
2040 ~mask64)
2041 sw_stats->error_stats.statsb_drop_timeout++;
2042
2043 if ((val64 &
2044 VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
2045 ~mask64)
2046 sw_stats->error_stats.target_illegal_access++;
2047
2048 if (!skip_alarms) {
2049 writeq(VXGE_HW_INTR_MASK_ALL,
2050 &vp_reg->general_errors_reg);
2051 alarm_event = VXGE_HW_SET_LEVEL(
2052 VXGE_HW_EVENT_ALARM_CLEARED,
2053 alarm_event);
2054 }
2055 }
2056
2057 if (pic_status &
2058 VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
2059
2060 val64 = readq(&vp_reg->kdfcctl_errors_reg);
2061 mask64 = readq(&vp_reg->kdfcctl_errors_mask);
2062
2063 if ((val64 &
2064 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
2065 ~mask64) {
2066 sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
2067
2068 alarm_event = VXGE_HW_SET_LEVEL(
2069 VXGE_HW_EVENT_FIFO_ERR,
2070 alarm_event);
2071 }
2072
2073 if ((val64 &
2074 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
2075 ~mask64) {
2076 sw_stats->error_stats.kdfcctl_fifo0_poison++;
2077
2078 alarm_event = VXGE_HW_SET_LEVEL(
2079 VXGE_HW_EVENT_FIFO_ERR,
2080 alarm_event);
2081 }
2082
2083 if ((val64 &
2084 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
2085 ~mask64) {
2086 sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
2087
2088 alarm_event = VXGE_HW_SET_LEVEL(
2089 VXGE_HW_EVENT_FIFO_ERR,
2090 alarm_event);
2091 }
2092
2093 if (!skip_alarms) {
2094 writeq(VXGE_HW_INTR_MASK_ALL,
2095 &vp_reg->kdfcctl_errors_reg);
2096 alarm_event = VXGE_HW_SET_LEVEL(
2097 VXGE_HW_EVENT_ALARM_CLEARED,
2098 alarm_event);
2099 }
2100 }
2101
2102 }
2103
2104 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
2105
2106 val64 = readq(&vp_reg->wrdma_alarm_status);
2107
2108 if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
2109
2110 val64 = readq(&vp_reg->prc_alarm_reg);
2111 mask64 = readq(&vp_reg->prc_alarm_mask);
2112
2113 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
2114 ~mask64)
2115 sw_stats->error_stats.prc_ring_bumps++;
2116
2117 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
2118 ~mask64) {
2119 sw_stats->error_stats.prc_rxdcm_sc_err++;
2120
2121 alarm_event = VXGE_HW_SET_LEVEL(
2122 VXGE_HW_EVENT_VPATH_ERR,
2123 alarm_event);
2124 }
2125
2126 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
2127 & ~mask64) {
2128 sw_stats->error_stats.prc_rxdcm_sc_abort++;
2129
2130 alarm_event = VXGE_HW_SET_LEVEL(
2131 VXGE_HW_EVENT_VPATH_ERR,
2132 alarm_event);
2133 }
2134
2135 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
2136 & ~mask64) {
2137 sw_stats->error_stats.prc_quanta_size_err++;
2138
2139 alarm_event = VXGE_HW_SET_LEVEL(
2140 VXGE_HW_EVENT_VPATH_ERR,
2141 alarm_event);
2142 }
2143
2144 if (!skip_alarms) {
2145 writeq(VXGE_HW_INTR_MASK_ALL,
2146 &vp_reg->prc_alarm_reg);
2147 alarm_event = VXGE_HW_SET_LEVEL(
2148 VXGE_HW_EVENT_ALARM_CLEARED,
2149 alarm_event);
2150 }
2151 }
2152 }
2153out:
2154 hldev->stats.sw_dev_err_stats.vpath_alarms++;
2155out2:
2156 if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
2157 (alarm_event == VXGE_HW_EVENT_UNKNOWN))
2158 return VXGE_HW_OK;
2159
2160 __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
2161
2162 if (alarm_event == VXGE_HW_EVENT_SERR)
2163 return VXGE_HW_ERR_CRITICAL;
2164
2165 return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
2166 VXGE_HW_ERR_SLOT_FREEZE :
2167 (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
2168 VXGE_HW_ERR_VPATH;
2169}
2170
2171/*
2172 * vxge_hw_vpath_alarm_process - Process Alarms. 2216 * vxge_hw_vpath_alarm_process - Process Alarms.
2173 * @vpath: Virtual Path. 2217 * @vpath: Virtual Path.
2174 * @skip_alarms: Do not clear the alarms 2218 * @skip_alarms: Do not clear the alarms
@@ -2227,19 +2271,14 @@ vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
2227 if (vpath->hldev->config.intr_mode == 2271 if (vpath->hldev->config.intr_mode ==
2228 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) { 2272 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2229 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( 2273 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2274 VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN,
2275 0, 32), &vp_reg->one_shot_vect0_en);
2276 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2230 VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN, 2277 VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
2231 0, 32), &vp_reg->one_shot_vect1_en); 2278 0, 32), &vp_reg->one_shot_vect1_en);
2232 }
2233
2234 if (vpath->hldev->config.intr_mode ==
2235 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2236 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( 2279 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2237 VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN, 2280 VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
2238 0, 32), &vp_reg->one_shot_vect2_en); 2281 0, 32), &vp_reg->one_shot_vect2_en);
2239
2240 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2241 VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN,
2242 0, 32), &vp_reg->one_shot_vect3_en);
2243 } 2282 }
2244} 2283}
2245 2284
@@ -2276,22 +2315,18 @@ vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2276 * status. 2315 * status.
2277 * See also: 2316 * See also:
2278 */ 2317 */
2279void 2318void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
2280vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
2281{ 2319{
2282 struct __vxge_hw_device *hldev = vp->vpath->hldev; 2320 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2283 if (hldev->config.intr_mode == 2321
2284 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) { 2322 if ((hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT))
2285 __vxge_hw_pio_mem_write32_upper( 2323 __vxge_hw_pio_mem_write32_upper(
2286 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32), 2324 (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2287 &hldev->common_reg-> 2325 &hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
2288 clr_msix_one_shot_vec[msix_id%4]); 2326 else
2289 } else {
2290 __vxge_hw_pio_mem_write32_upper( 2327 __vxge_hw_pio_mem_write32_upper(
2291 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32), 2328 (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2292 &hldev->common_reg-> 2329 &hldev->common_reg->clear_msix_mask_vect[msix_id % 4]);
2293 clear_msix_mask_vect[msix_id%4]);
2294 }
2295} 2330}
2296 2331
2297/** 2332/**
@@ -2316,22 +2351,6 @@ vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2316} 2351}
2317 2352
2318/** 2353/**
2319 * vxge_hw_vpath_msix_mask_all - Mask all MSIX vectors for the vpath.
2320 * @vp: Virtual Path handle.
2321 *
2322 * The function masks all msix interrupt for the given vpath
2323 *
2324 */
2325void
2326vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vp)
2327{
2328
2329 __vxge_hw_pio_mem_write32_upper(
2330 (u32)vxge_bVALn(vxge_mBIT(vp->vpath->vp_id), 0, 32),
2331 &vp->vpath->hldev->common_reg->set_msix_mask_all_vect);
2332}
2333
2334/**
2335 * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts. 2354 * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
2336 * @vp: Virtual Path handle. 2355 * @vp: Virtual Path handle.
2337 * 2356 *