diff options
author | Jon Mason <jon.mason@exar.com> | 2010-12-10 09:02:56 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-12-10 19:08:21 -0500 |
commit | 528f727279ae840db8a06c94f5e82cdaeb00da6f (patch) | |
tree | ab2cd139152c9bc7809298d046b77783472f5c3d /drivers/net | |
parent | deef4b522b814593407cfd56216840c2b75e9f15 (diff) |
vxge: code cleanup and reorganization
Move function locations to remove the need for internal declarations and
other misc clean-ups.
Signed-off-by: Jon Mason <jon.mason@exar.com>
Signed-off-by: Arpit Patel <arpit.patel@exar.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/vxge/vxge-config.c | 2481 | ||||
-rw-r--r-- | drivers/net/vxge/vxge-config.h | 34 | ||||
-rw-r--r-- | drivers/net/vxge/vxge-main.c | 474 | ||||
-rw-r--r-- | drivers/net/vxge/vxge-main.h | 8 | ||||
-rw-r--r-- | drivers/net/vxge/vxge-traffic.c | 773 | ||||
-rw-r--r-- | drivers/net/vxge/vxge-traffic.h | 21 |
6 files changed, 1812 insertions, 1979 deletions
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c index a0241fe72d8b..1169aa387cab 100644 --- a/drivers/net/vxge/vxge-config.c +++ b/drivers/net/vxge/vxge-config.c | |||
@@ -21,100 +21,15 @@ | |||
21 | #include "vxge-config.h" | 21 | #include "vxge-config.h" |
22 | #include "vxge-main.h" | 22 | #include "vxge-main.h" |
23 | 23 | ||
24 | static enum vxge_hw_status | 24 | #define VXGE_HW_VPATH_STATS_PIO_READ(offset) { \ |
25 | __vxge_hw_fifo_delete( | 25 | status = __vxge_hw_vpath_stats_access(vpath, \ |
26 | struct __vxge_hw_vpath_handle *vpath_handle); | 26 | VXGE_HW_STATS_OP_READ, \ |
27 | 27 | offset, \ | |
28 | static struct __vxge_hw_blockpool_entry * | 28 | &val64); \ |
29 | __vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *hldev, | 29 | if (status != VXGE_HW_OK) \ |
30 | u32 size); | 30 | return status; \ |
31 | |||
32 | static void | ||
33 | __vxge_hw_blockpool_block_free(struct __vxge_hw_device *hldev, | ||
34 | struct __vxge_hw_blockpool_entry *entry); | ||
35 | |||
36 | static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh, | ||
37 | void *block_addr, | ||
38 | u32 length, | ||
39 | struct pci_dev *dma_h, | ||
40 | struct pci_dev *acc_handle); | ||
41 | |||
42 | static enum vxge_hw_status | ||
43 | __vxge_hw_blockpool_create(struct __vxge_hw_device *hldev, | ||
44 | struct __vxge_hw_blockpool *blockpool, | ||
45 | u32 pool_size, | ||
46 | u32 pool_max); | ||
47 | |||
48 | static void | ||
49 | __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool); | ||
50 | |||
51 | static void * | ||
52 | __vxge_hw_blockpool_malloc(struct __vxge_hw_device *hldev, | ||
53 | u32 size, | ||
54 | struct vxge_hw_mempool_dma *dma_object); | ||
55 | |||
56 | static void | ||
57 | __vxge_hw_blockpool_free(struct __vxge_hw_device *hldev, | ||
58 | void *memblock, | ||
59 | u32 size, | ||
60 | struct vxge_hw_mempool_dma *dma_object); | ||
61 | |||
62 | static void | ||
63 | __vxge_hw_channel_free( | ||
64 | struct __vxge_hw_channel *channel); | ||
65 | |||
66 | static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp); | ||
67 | |||
68 | static enum vxge_hw_status | ||
69 | __vxge_hw_device_config_check(struct vxge_hw_device_config *new_config); | ||
70 | |||
71 | static enum vxge_hw_status | ||
72 | __vxge_hw_device_register_poll( | ||
73 | void __iomem *reg, | ||
74 | u64 mask, u32 max_millis); | ||
75 | |||
76 | static inline enum vxge_hw_status | ||
77 | __vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr, | ||
78 | u64 mask, u32 max_millis) | ||
79 | { | ||
80 | __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr); | ||
81 | wmb(); | ||
82 | |||
83 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr); | ||
84 | wmb(); | ||
85 | |||
86 | return __vxge_hw_device_register_poll(addr, mask, max_millis); | ||
87 | } | 31 | } |
88 | 32 | ||
89 | static struct vxge_hw_mempool* | ||
90 | __vxge_hw_mempool_create(struct __vxge_hw_device *devh, u32 memblock_size, | ||
91 | u32 item_size, u32 private_size, u32 items_initial, | ||
92 | u32 items_max, struct vxge_hw_mempool_cbs *mp_callback, | ||
93 | void *userdata); | ||
94 | |||
95 | static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool); | ||
96 | |||
97 | static enum vxge_hw_status | ||
98 | __vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath, | ||
99 | struct vxge_hw_vpath_stats_hw_info *hw_stats); | ||
100 | |||
101 | static enum vxge_hw_status | ||
102 | vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vpath_handle); | ||
103 | |||
104 | static enum vxge_hw_status | ||
105 | __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg); | ||
106 | |||
107 | static void | ||
108 | __vxge_hw_vp_terminate(struct __vxge_hw_device *devh, u32 vp_id); | ||
109 | |||
110 | static enum vxge_hw_status | ||
111 | __vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath, | ||
112 | struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats); | ||
113 | |||
114 | static enum vxge_hw_status | ||
115 | __vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath, | ||
116 | struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats); | ||
117 | |||
118 | static void | 33 | static void |
119 | vxge_hw_vpath_set_zero_rx_frm_len(struct vxge_hw_vpath_reg __iomem *vp_reg) | 34 | vxge_hw_vpath_set_zero_rx_frm_len(struct vxge_hw_vpath_reg __iomem *vp_reg) |
120 | { | 35 | { |
@@ -124,8 +39,6 @@ vxge_hw_vpath_set_zero_rx_frm_len(struct vxge_hw_vpath_reg __iomem *vp_reg) | |||
124 | val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff); | 39 | val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff); |
125 | writeq(val64, &vp_reg->rxmac_vcfg0); | 40 | writeq(val64, &vp_reg->rxmac_vcfg0); |
126 | val64 = readq(&vp_reg->rxmac_vcfg0); | 41 | val64 = readq(&vp_reg->rxmac_vcfg0); |
127 | |||
128 | return; | ||
129 | } | 42 | } |
130 | 43 | ||
131 | /* | 44 | /* |
@@ -197,6 +110,50 @@ void vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev) | |||
197 | } | 110 | } |
198 | } | 111 | } |
199 | 112 | ||
113 | /* | ||
114 | * __vxge_hw_device_register_poll | ||
115 | * Will poll certain register for specified amount of time. | ||
116 | * Will poll until masked bit is not cleared. | ||
117 | */ | ||
118 | static enum vxge_hw_status | ||
119 | __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis) | ||
120 | { | ||
121 | u64 val64; | ||
122 | u32 i = 0; | ||
123 | enum vxge_hw_status ret = VXGE_HW_FAIL; | ||
124 | |||
125 | udelay(10); | ||
126 | |||
127 | do { | ||
128 | val64 = readq(reg); | ||
129 | if (!(val64 & mask)) | ||
130 | return VXGE_HW_OK; | ||
131 | udelay(100); | ||
132 | } while (++i <= 9); | ||
133 | |||
134 | i = 0; | ||
135 | do { | ||
136 | val64 = readq(reg); | ||
137 | if (!(val64 & mask)) | ||
138 | return VXGE_HW_OK; | ||
139 | mdelay(1); | ||
140 | } while (++i <= max_millis); | ||
141 | |||
142 | return ret; | ||
143 | } | ||
144 | |||
145 | static inline enum vxge_hw_status | ||
146 | __vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr, | ||
147 | u64 mask, u32 max_millis) | ||
148 | { | ||
149 | __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr); | ||
150 | wmb(); | ||
151 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr); | ||
152 | wmb(); | ||
153 | |||
154 | return __vxge_hw_device_register_poll(addr, mask, max_millis); | ||
155 | } | ||
156 | |||
200 | static enum vxge_hw_status | 157 | static enum vxge_hw_status |
201 | vxge_hw_vpath_fw_api(struct __vxge_hw_virtualpath *vpath, u32 action, | 158 | vxge_hw_vpath_fw_api(struct __vxge_hw_virtualpath *vpath, u32 action, |
202 | u32 fw_memo, u32 offset, u64 *data0, u64 *data1, | 159 | u32 fw_memo, u32 offset, u64 *data0, u64 *data1, |
@@ -446,77 +403,6 @@ vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev, | |||
446 | } | 403 | } |
447 | 404 | ||
448 | /* | 405 | /* |
449 | * __vxge_hw_channel_allocate - Allocate memory for channel | ||
450 | * This function allocates required memory for the channel and various arrays | ||
451 | * in the channel | ||
452 | */ | ||
453 | static struct __vxge_hw_channel * | ||
454 | __vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph, | ||
455 | enum __vxge_hw_channel_type type, | ||
456 | u32 length, u32 per_dtr_space, void *userdata) | ||
457 | { | ||
458 | struct __vxge_hw_channel *channel; | ||
459 | struct __vxge_hw_device *hldev; | ||
460 | int size = 0; | ||
461 | u32 vp_id; | ||
462 | |||
463 | hldev = vph->vpath->hldev; | ||
464 | vp_id = vph->vpath->vp_id; | ||
465 | |||
466 | switch (type) { | ||
467 | case VXGE_HW_CHANNEL_TYPE_FIFO: | ||
468 | size = sizeof(struct __vxge_hw_fifo); | ||
469 | break; | ||
470 | case VXGE_HW_CHANNEL_TYPE_RING: | ||
471 | size = sizeof(struct __vxge_hw_ring); | ||
472 | break; | ||
473 | default: | ||
474 | break; | ||
475 | } | ||
476 | |||
477 | channel = kzalloc(size, GFP_KERNEL); | ||
478 | if (channel == NULL) | ||
479 | goto exit0; | ||
480 | INIT_LIST_HEAD(&channel->item); | ||
481 | |||
482 | channel->common_reg = hldev->common_reg; | ||
483 | channel->first_vp_id = hldev->first_vp_id; | ||
484 | channel->type = type; | ||
485 | channel->devh = hldev; | ||
486 | channel->vph = vph; | ||
487 | channel->userdata = userdata; | ||
488 | channel->per_dtr_space = per_dtr_space; | ||
489 | channel->length = length; | ||
490 | channel->vp_id = vp_id; | ||
491 | |||
492 | channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); | ||
493 | if (channel->work_arr == NULL) | ||
494 | goto exit1; | ||
495 | |||
496 | channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); | ||
497 | if (channel->free_arr == NULL) | ||
498 | goto exit1; | ||
499 | channel->free_ptr = length; | ||
500 | |||
501 | channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); | ||
502 | if (channel->reserve_arr == NULL) | ||
503 | goto exit1; | ||
504 | channel->reserve_ptr = length; | ||
505 | channel->reserve_top = 0; | ||
506 | |||
507 | channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); | ||
508 | if (channel->orig_arr == NULL) | ||
509 | goto exit1; | ||
510 | |||
511 | return channel; | ||
512 | exit1: | ||
513 | __vxge_hw_channel_free(channel); | ||
514 | |||
515 | exit0: | ||
516 | return NULL; | ||
517 | } | ||
518 | |||
519 | /* | ||
520 | * __vxge_hw_channel_free - Free memory allocated for channel | 406 | * __vxge_hw_channel_free - Free memory allocated for channel |
521 | * This function deallocates memory from the channel and various arrays | 407 | * This function deallocates memory from the channel and various arrays |
522 | * in the channel | 408 | * in the channel |
@@ -609,38 +495,6 @@ static void __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev) | |||
609 | pci_save_state(hldev->pdev); | 495 | pci_save_state(hldev->pdev); |
610 | } | 496 | } |
611 | 497 | ||
612 | /* | ||
613 | * __vxge_hw_device_register_poll | ||
614 | * Will poll certain register for specified amount of time. | ||
615 | * Will poll until masked bit is not cleared. | ||
616 | */ | ||
617 | static enum vxge_hw_status | ||
618 | __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis) | ||
619 | { | ||
620 | u64 val64; | ||
621 | u32 i = 0; | ||
622 | enum vxge_hw_status ret = VXGE_HW_FAIL; | ||
623 | |||
624 | udelay(10); | ||
625 | |||
626 | do { | ||
627 | val64 = readq(reg); | ||
628 | if (!(val64 & mask)) | ||
629 | return VXGE_HW_OK; | ||
630 | udelay(100); | ||
631 | } while (++i <= 9); | ||
632 | |||
633 | i = 0; | ||
634 | do { | ||
635 | val64 = readq(reg); | ||
636 | if (!(val64 & mask)) | ||
637 | return VXGE_HW_OK; | ||
638 | mdelay(1); | ||
639 | } while (++i <= max_millis); | ||
640 | |||
641 | return ret; | ||
642 | } | ||
643 | |||
644 | /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset | 498 | /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset |
645 | * in progress | 499 | * in progress |
646 | * This routine checks the vpath reset in progress register is turned zero | 500 | * This routine checks the vpath reset in progress register is turned zero |
@@ -656,6 +510,60 @@ __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog) | |||
656 | } | 510 | } |
657 | 511 | ||
658 | /* | 512 | /* |
513 | * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion. | ||
514 | * Set the swapper bits appropriately for the lagacy section. | ||
515 | */ | ||
516 | static enum vxge_hw_status | ||
517 | __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg) | ||
518 | { | ||
519 | u64 val64; | ||
520 | enum vxge_hw_status status = VXGE_HW_OK; | ||
521 | |||
522 | val64 = readq(&legacy_reg->toc_swapper_fb); | ||
523 | |||
524 | wmb(); | ||
525 | |||
526 | switch (val64) { | ||
527 | case VXGE_HW_SWAPPER_INITIAL_VALUE: | ||
528 | return status; | ||
529 | |||
530 | case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED: | ||
531 | writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE, | ||
532 | &legacy_reg->pifm_rd_swap_en); | ||
533 | writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE, | ||
534 | &legacy_reg->pifm_rd_flip_en); | ||
535 | writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE, | ||
536 | &legacy_reg->pifm_wr_swap_en); | ||
537 | writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE, | ||
538 | &legacy_reg->pifm_wr_flip_en); | ||
539 | break; | ||
540 | |||
541 | case VXGE_HW_SWAPPER_BYTE_SWAPPED: | ||
542 | writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE, | ||
543 | &legacy_reg->pifm_rd_swap_en); | ||
544 | writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE, | ||
545 | &legacy_reg->pifm_wr_swap_en); | ||
546 | break; | ||
547 | |||
548 | case VXGE_HW_SWAPPER_BIT_FLIPPED: | ||
549 | writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE, | ||
550 | &legacy_reg->pifm_rd_flip_en); | ||
551 | writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE, | ||
552 | &legacy_reg->pifm_wr_flip_en); | ||
553 | break; | ||
554 | } | ||
555 | |||
556 | wmb(); | ||
557 | |||
558 | val64 = readq(&legacy_reg->toc_swapper_fb); | ||
559 | |||
560 | if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE) | ||
561 | status = VXGE_HW_ERR_SWAPPER_CTRL; | ||
562 | |||
563 | return status; | ||
564 | } | ||
565 | |||
566 | /* | ||
659 | * __vxge_hw_device_toc_get | 567 | * __vxge_hw_device_toc_get |
660 | * This routine sets the swapper and reads the toc pointer and returns the | 568 | * This routine sets the swapper and reads the toc pointer and returns the |
661 | * memory mapped address of the toc | 569 | * memory mapped address of the toc |
@@ -1132,7 +1040,6 @@ vxge_hw_device_hw_info_get(void __iomem *bar0, | |||
1132 | (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64); | 1040 | (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64); |
1133 | 1041 | ||
1134 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | 1042 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { |
1135 | |||
1136 | if (!((hw_info->vpath_mask) & vxge_mBIT(i))) | 1043 | if (!((hw_info->vpath_mask) & vxge_mBIT(i))) |
1137 | continue; | 1044 | continue; |
1138 | 1045 | ||
@@ -1196,6 +1103,218 @@ exit: | |||
1196 | } | 1103 | } |
1197 | 1104 | ||
1198 | /* | 1105 | /* |
1106 | * __vxge_hw_blockpool_destroy - Deallocates the block pool | ||
1107 | */ | ||
1108 | static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool) | ||
1109 | { | ||
1110 | struct __vxge_hw_device *hldev; | ||
1111 | struct list_head *p, *n; | ||
1112 | u16 ret; | ||
1113 | |||
1114 | if (blockpool == NULL) { | ||
1115 | ret = 1; | ||
1116 | goto exit; | ||
1117 | } | ||
1118 | |||
1119 | hldev = blockpool->hldev; | ||
1120 | |||
1121 | list_for_each_safe(p, n, &blockpool->free_block_list) { | ||
1122 | pci_unmap_single(hldev->pdev, | ||
1123 | ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, | ||
1124 | ((struct __vxge_hw_blockpool_entry *)p)->length, | ||
1125 | PCI_DMA_BIDIRECTIONAL); | ||
1126 | |||
1127 | vxge_os_dma_free(hldev->pdev, | ||
1128 | ((struct __vxge_hw_blockpool_entry *)p)->memblock, | ||
1129 | &((struct __vxge_hw_blockpool_entry *)p)->acc_handle); | ||
1130 | |||
1131 | list_del(&((struct __vxge_hw_blockpool_entry *)p)->item); | ||
1132 | kfree(p); | ||
1133 | blockpool->pool_size--; | ||
1134 | } | ||
1135 | |||
1136 | list_for_each_safe(p, n, &blockpool->free_entry_list) { | ||
1137 | list_del(&((struct __vxge_hw_blockpool_entry *)p)->item); | ||
1138 | kfree((void *)p); | ||
1139 | } | ||
1140 | ret = 0; | ||
1141 | exit: | ||
1142 | return; | ||
1143 | } | ||
1144 | |||
1145 | /* | ||
1146 | * __vxge_hw_blockpool_create - Create block pool | ||
1147 | */ | ||
1148 | static enum vxge_hw_status | ||
1149 | __vxge_hw_blockpool_create(struct __vxge_hw_device *hldev, | ||
1150 | struct __vxge_hw_blockpool *blockpool, | ||
1151 | u32 pool_size, | ||
1152 | u32 pool_max) | ||
1153 | { | ||
1154 | u32 i; | ||
1155 | struct __vxge_hw_blockpool_entry *entry = NULL; | ||
1156 | void *memblock; | ||
1157 | dma_addr_t dma_addr; | ||
1158 | struct pci_dev *dma_handle; | ||
1159 | struct pci_dev *acc_handle; | ||
1160 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1161 | |||
1162 | if (blockpool == NULL) { | ||
1163 | status = VXGE_HW_FAIL; | ||
1164 | goto blockpool_create_exit; | ||
1165 | } | ||
1166 | |||
1167 | blockpool->hldev = hldev; | ||
1168 | blockpool->block_size = VXGE_HW_BLOCK_SIZE; | ||
1169 | blockpool->pool_size = 0; | ||
1170 | blockpool->pool_max = pool_max; | ||
1171 | blockpool->req_out = 0; | ||
1172 | |||
1173 | INIT_LIST_HEAD(&blockpool->free_block_list); | ||
1174 | INIT_LIST_HEAD(&blockpool->free_entry_list); | ||
1175 | |||
1176 | for (i = 0; i < pool_size + pool_max; i++) { | ||
1177 | entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry), | ||
1178 | GFP_KERNEL); | ||
1179 | if (entry == NULL) { | ||
1180 | __vxge_hw_blockpool_destroy(blockpool); | ||
1181 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
1182 | goto blockpool_create_exit; | ||
1183 | } | ||
1184 | list_add(&entry->item, &blockpool->free_entry_list); | ||
1185 | } | ||
1186 | |||
1187 | for (i = 0; i < pool_size; i++) { | ||
1188 | memblock = vxge_os_dma_malloc( | ||
1189 | hldev->pdev, | ||
1190 | VXGE_HW_BLOCK_SIZE, | ||
1191 | &dma_handle, | ||
1192 | &acc_handle); | ||
1193 | if (memblock == NULL) { | ||
1194 | __vxge_hw_blockpool_destroy(blockpool); | ||
1195 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
1196 | goto blockpool_create_exit; | ||
1197 | } | ||
1198 | |||
1199 | dma_addr = pci_map_single(hldev->pdev, memblock, | ||
1200 | VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
1201 | if (unlikely(pci_dma_mapping_error(hldev->pdev, | ||
1202 | dma_addr))) { | ||
1203 | vxge_os_dma_free(hldev->pdev, memblock, &acc_handle); | ||
1204 | __vxge_hw_blockpool_destroy(blockpool); | ||
1205 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
1206 | goto blockpool_create_exit; | ||
1207 | } | ||
1208 | |||
1209 | if (!list_empty(&blockpool->free_entry_list)) | ||
1210 | entry = (struct __vxge_hw_blockpool_entry *) | ||
1211 | list_first_entry(&blockpool->free_entry_list, | ||
1212 | struct __vxge_hw_blockpool_entry, | ||
1213 | item); | ||
1214 | |||
1215 | if (entry == NULL) | ||
1216 | entry = | ||
1217 | kzalloc(sizeof(struct __vxge_hw_blockpool_entry), | ||
1218 | GFP_KERNEL); | ||
1219 | if (entry != NULL) { | ||
1220 | list_del(&entry->item); | ||
1221 | entry->length = VXGE_HW_BLOCK_SIZE; | ||
1222 | entry->memblock = memblock; | ||
1223 | entry->dma_addr = dma_addr; | ||
1224 | entry->acc_handle = acc_handle; | ||
1225 | entry->dma_handle = dma_handle; | ||
1226 | list_add(&entry->item, | ||
1227 | &blockpool->free_block_list); | ||
1228 | blockpool->pool_size++; | ||
1229 | } else { | ||
1230 | __vxge_hw_blockpool_destroy(blockpool); | ||
1231 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
1232 | goto blockpool_create_exit; | ||
1233 | } | ||
1234 | } | ||
1235 | |||
1236 | blockpool_create_exit: | ||
1237 | return status; | ||
1238 | } | ||
1239 | |||
1240 | /* | ||
1241 | * __vxge_hw_device_fifo_config_check - Check fifo configuration. | ||
1242 | * Check the fifo configuration | ||
1243 | */ | ||
1244 | static enum vxge_hw_status | ||
1245 | __vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config) | ||
1246 | { | ||
1247 | if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) || | ||
1248 | (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS)) | ||
1249 | return VXGE_HW_BADCFG_FIFO_BLOCKS; | ||
1250 | |||
1251 | return VXGE_HW_OK; | ||
1252 | } | ||
1253 | |||
1254 | /* | ||
1255 | * __vxge_hw_device_vpath_config_check - Check vpath configuration. | ||
1256 | * Check the vpath configuration | ||
1257 | */ | ||
1258 | static enum vxge_hw_status | ||
1259 | __vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config) | ||
1260 | { | ||
1261 | enum vxge_hw_status status; | ||
1262 | |||
1263 | if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) || | ||
1264 | (vp_config->min_bandwidth > VXGE_HW_VPATH_BANDWIDTH_MAX)) | ||
1265 | return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH; | ||
1266 | |||
1267 | status = __vxge_hw_device_fifo_config_check(&vp_config->fifo); | ||
1268 | if (status != VXGE_HW_OK) | ||
1269 | return status; | ||
1270 | |||
1271 | if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) && | ||
1272 | ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) || | ||
1273 | (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU))) | ||
1274 | return VXGE_HW_BADCFG_VPATH_MTU; | ||
1275 | |||
1276 | if ((vp_config->rpa_strip_vlan_tag != | ||
1277 | VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) && | ||
1278 | (vp_config->rpa_strip_vlan_tag != | ||
1279 | VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) && | ||
1280 | (vp_config->rpa_strip_vlan_tag != | ||
1281 | VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE)) | ||
1282 | return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG; | ||
1283 | |||
1284 | return VXGE_HW_OK; | ||
1285 | } | ||
1286 | |||
1287 | /* | ||
1288 | * __vxge_hw_device_config_check - Check device configuration. | ||
1289 | * Check the device configuration | ||
1290 | */ | ||
1291 | static enum vxge_hw_status | ||
1292 | __vxge_hw_device_config_check(struct vxge_hw_device_config *new_config) | ||
1293 | { | ||
1294 | u32 i; | ||
1295 | enum vxge_hw_status status; | ||
1296 | |||
1297 | if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) && | ||
1298 | (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) && | ||
1299 | (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) && | ||
1300 | (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF)) | ||
1301 | return VXGE_HW_BADCFG_INTR_MODE; | ||
1302 | |||
1303 | if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) && | ||
1304 | (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE)) | ||
1305 | return VXGE_HW_BADCFG_RTS_MAC_EN; | ||
1306 | |||
1307 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | ||
1308 | status = __vxge_hw_device_vpath_config_check( | ||
1309 | &new_config->vp_config[i]); | ||
1310 | if (status != VXGE_HW_OK) | ||
1311 | return status; | ||
1312 | } | ||
1313 | |||
1314 | return VXGE_HW_OK; | ||
1315 | } | ||
1316 | |||
1317 | /* | ||
1199 | * vxge_hw_device_initialize - Initialize Titan device. | 1318 | * vxge_hw_device_initialize - Initialize Titan device. |
1200 | * Initialize Titan device. Note that all the arguments of this public API | 1319 | * Initialize Titan device. Note that all the arguments of this public API |
1201 | * are 'IN', including @hldev. Driver cooperates with | 1320 | * are 'IN', including @hldev. Driver cooperates with |
@@ -1303,6 +1422,242 @@ vxge_hw_device_terminate(struct __vxge_hw_device *hldev) | |||
1303 | } | 1422 | } |
1304 | 1423 | ||
1305 | /* | 1424 | /* |
1425 | * __vxge_hw_vpath_stats_access - Get the statistics from the given location | ||
1426 | * and offset and perform an operation | ||
1427 | */ | ||
1428 | static enum vxge_hw_status | ||
1429 | __vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath, | ||
1430 | u32 operation, u32 offset, u64 *stat) | ||
1431 | { | ||
1432 | u64 val64; | ||
1433 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1434 | struct vxge_hw_vpath_reg __iomem *vp_reg; | ||
1435 | |||
1436 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
1437 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
1438 | goto vpath_stats_access_exit; | ||
1439 | } | ||
1440 | |||
1441 | vp_reg = vpath->vp_reg; | ||
1442 | |||
1443 | val64 = VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) | | ||
1444 | VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE | | ||
1445 | VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset); | ||
1446 | |||
1447 | status = __vxge_hw_pio_mem_write64(val64, | ||
1448 | &vp_reg->xmac_stats_access_cmd, | ||
1449 | VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE, | ||
1450 | vpath->hldev->config.device_poll_millis); | ||
1451 | if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ)) | ||
1452 | *stat = readq(&vp_reg->xmac_stats_access_data); | ||
1453 | else | ||
1454 | *stat = 0; | ||
1455 | |||
1456 | vpath_stats_access_exit: | ||
1457 | return status; | ||
1458 | } | ||
1459 | |||
1460 | /* | ||
1461 | * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath | ||
1462 | */ | ||
1463 | static enum vxge_hw_status | ||
1464 | __vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath, | ||
1465 | struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats) | ||
1466 | { | ||
1467 | u64 *val64; | ||
1468 | int i; | ||
1469 | u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET; | ||
1470 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1471 | |||
1472 | val64 = (u64 *)vpath_tx_stats; | ||
1473 | |||
1474 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
1475 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
1476 | goto exit; | ||
1477 | } | ||
1478 | |||
1479 | for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) { | ||
1480 | status = __vxge_hw_vpath_stats_access(vpath, | ||
1481 | VXGE_HW_STATS_OP_READ, | ||
1482 | offset, val64); | ||
1483 | if (status != VXGE_HW_OK) | ||
1484 | goto exit; | ||
1485 | offset++; | ||
1486 | val64++; | ||
1487 | } | ||
1488 | exit: | ||
1489 | return status; | ||
1490 | } | ||
1491 | |||
1492 | /* | ||
1493 | * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath | ||
1494 | */ | ||
1495 | static enum vxge_hw_status | ||
1496 | __vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath, | ||
1497 | struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats) | ||
1498 | { | ||
1499 | u64 *val64; | ||
1500 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1501 | int i; | ||
1502 | u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET; | ||
1503 | val64 = (u64 *) vpath_rx_stats; | ||
1504 | |||
1505 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
1506 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
1507 | goto exit; | ||
1508 | } | ||
1509 | for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) { | ||
1510 | status = __vxge_hw_vpath_stats_access(vpath, | ||
1511 | VXGE_HW_STATS_OP_READ, | ||
1512 | offset >> 3, val64); | ||
1513 | if (status != VXGE_HW_OK) | ||
1514 | goto exit; | ||
1515 | |||
1516 | offset += 8; | ||
1517 | val64++; | ||
1518 | } | ||
1519 | exit: | ||
1520 | return status; | ||
1521 | } | ||
1522 | |||
1523 | /* | ||
1524 | * __vxge_hw_vpath_stats_get - Get the vpath hw statistics. | ||
1525 | */ | ||
1526 | static enum vxge_hw_status | ||
1527 | __vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath, | ||
1528 | struct vxge_hw_vpath_stats_hw_info *hw_stats) | ||
1529 | { | ||
1530 | u64 val64; | ||
1531 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1532 | struct vxge_hw_vpath_reg __iomem *vp_reg; | ||
1533 | |||
1534 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
1535 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
1536 | goto exit; | ||
1537 | } | ||
1538 | vp_reg = vpath->vp_reg; | ||
1539 | |||
1540 | val64 = readq(&vp_reg->vpath_debug_stats0); | ||
1541 | hw_stats->ini_num_mwr_sent = | ||
1542 | (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64); | ||
1543 | |||
1544 | val64 = readq(&vp_reg->vpath_debug_stats1); | ||
1545 | hw_stats->ini_num_mrd_sent = | ||
1546 | (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64); | ||
1547 | |||
1548 | val64 = readq(&vp_reg->vpath_debug_stats2); | ||
1549 | hw_stats->ini_num_cpl_rcvd = | ||
1550 | (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64); | ||
1551 | |||
1552 | val64 = readq(&vp_reg->vpath_debug_stats3); | ||
1553 | hw_stats->ini_num_mwr_byte_sent = | ||
1554 | VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64); | ||
1555 | |||
1556 | val64 = readq(&vp_reg->vpath_debug_stats4); | ||
1557 | hw_stats->ini_num_cpl_byte_rcvd = | ||
1558 | VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64); | ||
1559 | |||
1560 | val64 = readq(&vp_reg->vpath_debug_stats5); | ||
1561 | hw_stats->wrcrdtarb_xoff = | ||
1562 | (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64); | ||
1563 | |||
1564 | val64 = readq(&vp_reg->vpath_debug_stats6); | ||
1565 | hw_stats->rdcrdtarb_xoff = | ||
1566 | (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64); | ||
1567 | |||
1568 | val64 = readq(&vp_reg->vpath_genstats_count01); | ||
1569 | hw_stats->vpath_genstats_count0 = | ||
1570 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0( | ||
1571 | val64); | ||
1572 | |||
1573 | val64 = readq(&vp_reg->vpath_genstats_count01); | ||
1574 | hw_stats->vpath_genstats_count1 = | ||
1575 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1( | ||
1576 | val64); | ||
1577 | |||
1578 | val64 = readq(&vp_reg->vpath_genstats_count23); | ||
1579 | hw_stats->vpath_genstats_count2 = | ||
1580 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2( | ||
1581 | val64); | ||
1582 | |||
1583 | val64 = readq(&vp_reg->vpath_genstats_count01); | ||
1584 | hw_stats->vpath_genstats_count3 = | ||
1585 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3( | ||
1586 | val64); | ||
1587 | |||
1588 | val64 = readq(&vp_reg->vpath_genstats_count4); | ||
1589 | hw_stats->vpath_genstats_count4 = | ||
1590 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4( | ||
1591 | val64); | ||
1592 | |||
1593 | val64 = readq(&vp_reg->vpath_genstats_count5); | ||
1594 | hw_stats->vpath_genstats_count5 = | ||
1595 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5( | ||
1596 | val64); | ||
1597 | |||
1598 | status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats); | ||
1599 | if (status != VXGE_HW_OK) | ||
1600 | goto exit; | ||
1601 | |||
1602 | status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats); | ||
1603 | if (status != VXGE_HW_OK) | ||
1604 | goto exit; | ||
1605 | |||
1606 | VXGE_HW_VPATH_STATS_PIO_READ( | ||
1607 | VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET); | ||
1608 | |||
1609 | hw_stats->prog_event_vnum0 = | ||
1610 | (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64); | ||
1611 | |||
1612 | hw_stats->prog_event_vnum1 = | ||
1613 | (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64); | ||
1614 | |||
1615 | VXGE_HW_VPATH_STATS_PIO_READ( | ||
1616 | VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET); | ||
1617 | |||
1618 | hw_stats->prog_event_vnum2 = | ||
1619 | (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64); | ||
1620 | |||
1621 | hw_stats->prog_event_vnum3 = | ||
1622 | (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64); | ||
1623 | |||
1624 | val64 = readq(&vp_reg->rx_multi_cast_stats); | ||
1625 | hw_stats->rx_multi_cast_frame_discard = | ||
1626 | (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64); | ||
1627 | |||
1628 | val64 = readq(&vp_reg->rx_frm_transferred); | ||
1629 | hw_stats->rx_frm_transferred = | ||
1630 | (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64); | ||
1631 | |||
1632 | val64 = readq(&vp_reg->rxd_returned); | ||
1633 | hw_stats->rxd_returned = | ||
1634 | (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64); | ||
1635 | |||
1636 | val64 = readq(&vp_reg->dbg_stats_rx_mpa); | ||
1637 | hw_stats->rx_mpa_len_fail_frms = | ||
1638 | (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64); | ||
1639 | hw_stats->rx_mpa_mrk_fail_frms = | ||
1640 | (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64); | ||
1641 | hw_stats->rx_mpa_crc_fail_frms = | ||
1642 | (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64); | ||
1643 | |||
1644 | val64 = readq(&vp_reg->dbg_stats_rx_fau); | ||
1645 | hw_stats->rx_permitted_frms = | ||
1646 | (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64); | ||
1647 | hw_stats->rx_vp_reset_discarded_frms = | ||
1648 | (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64); | ||
1649 | hw_stats->rx_wol_frms = | ||
1650 | (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64); | ||
1651 | |||
1652 | val64 = readq(&vp_reg->tx_vp_reset_discarded_frms); | ||
1653 | hw_stats->tx_vp_reset_discarded_frms = | ||
1654 | (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS( | ||
1655 | val64); | ||
1656 | exit: | ||
1657 | return status; | ||
1658 | } | ||
1659 | |||
1660 | /* | ||
1306 | * vxge_hw_device_stats_get - Get the device hw statistics. | 1661 | * vxge_hw_device_stats_get - Get the device hw statistics. |
1307 | * Returns the vpath h/w stats for the device. | 1662 | * Returns the vpath h/w stats for the device. |
1308 | */ | 1663 | */ |
@@ -1468,7 +1823,6 @@ vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *hldev, | |||
1468 | 1823 | ||
1469 | status = vxge_hw_device_xmac_aggr_stats_get(hldev, | 1824 | status = vxge_hw_device_xmac_aggr_stats_get(hldev, |
1470 | 0, &xmac_stats->aggr_stats[0]); | 1825 | 0, &xmac_stats->aggr_stats[0]); |
1471 | |||
1472 | if (status != VXGE_HW_OK) | 1826 | if (status != VXGE_HW_OK) |
1473 | goto exit; | 1827 | goto exit; |
1474 | 1828 | ||
@@ -1843,189 +2197,359 @@ exit: | |||
1843 | } | 2197 | } |
1844 | 2198 | ||
1845 | /* | 2199 | /* |
1846 | * __vxge_hw_ring_create - Create a Ring | 2200 | * __vxge_hw_channel_allocate - Allocate memory for channel |
1847 | * This function creates Ring and initializes it. | 2201 | * This function allocates required memory for the channel and various arrays |
2202 | * in the channel | ||
1848 | */ | 2203 | */ |
1849 | static enum vxge_hw_status | 2204 | static struct __vxge_hw_channel * |
1850 | __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp, | 2205 | __vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph, |
1851 | struct vxge_hw_ring_attr *attr) | 2206 | enum __vxge_hw_channel_type type, |
2207 | u32 length, u32 per_dtr_space, | ||
2208 | void *userdata) | ||
1852 | { | 2209 | { |
1853 | enum vxge_hw_status status = VXGE_HW_OK; | 2210 | struct __vxge_hw_channel *channel; |
1854 | struct __vxge_hw_ring *ring; | ||
1855 | u32 ring_length; | ||
1856 | struct vxge_hw_ring_config *config; | ||
1857 | struct __vxge_hw_device *hldev; | 2211 | struct __vxge_hw_device *hldev; |
2212 | int size = 0; | ||
1858 | u32 vp_id; | 2213 | u32 vp_id; |
1859 | struct vxge_hw_mempool_cbs ring_mp_callback; | ||
1860 | 2214 | ||
1861 | if ((vp == NULL) || (attr == NULL)) { | 2215 | hldev = vph->vpath->hldev; |
2216 | vp_id = vph->vpath->vp_id; | ||
2217 | |||
2218 | switch (type) { | ||
2219 | case VXGE_HW_CHANNEL_TYPE_FIFO: | ||
2220 | size = sizeof(struct __vxge_hw_fifo); | ||
2221 | break; | ||
2222 | case VXGE_HW_CHANNEL_TYPE_RING: | ||
2223 | size = sizeof(struct __vxge_hw_ring); | ||
2224 | break; | ||
2225 | default: | ||
2226 | break; | ||
2227 | } | ||
2228 | |||
2229 | channel = kzalloc(size, GFP_KERNEL); | ||
2230 | if (channel == NULL) | ||
2231 | goto exit0; | ||
2232 | INIT_LIST_HEAD(&channel->item); | ||
2233 | |||
2234 | channel->common_reg = hldev->common_reg; | ||
2235 | channel->first_vp_id = hldev->first_vp_id; | ||
2236 | channel->type = type; | ||
2237 | channel->devh = hldev; | ||
2238 | channel->vph = vph; | ||
2239 | channel->userdata = userdata; | ||
2240 | channel->per_dtr_space = per_dtr_space; | ||
2241 | channel->length = length; | ||
2242 | channel->vp_id = vp_id; | ||
2243 | |||
2244 | channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); | ||
2245 | if (channel->work_arr == NULL) | ||
2246 | goto exit1; | ||
2247 | |||
2248 | channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); | ||
2249 | if (channel->free_arr == NULL) | ||
2250 | goto exit1; | ||
2251 | channel->free_ptr = length; | ||
2252 | |||
2253 | channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); | ||
2254 | if (channel->reserve_arr == NULL) | ||
2255 | goto exit1; | ||
2256 | channel->reserve_ptr = length; | ||
2257 | channel->reserve_top = 0; | ||
2258 | |||
2259 | channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); | ||
2260 | if (channel->orig_arr == NULL) | ||
2261 | goto exit1; | ||
2262 | |||
2263 | return channel; | ||
2264 | exit1: | ||
2265 | __vxge_hw_channel_free(channel); | ||
2266 | |||
2267 | exit0: | ||
2268 | return NULL; | ||
2269 | } | ||
2270 | |||
2271 | /* | ||
2272 | * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async | ||
2273 | * Adds a block to block pool | ||
2274 | */ | ||
2275 | static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh, | ||
2276 | void *block_addr, | ||
2277 | u32 length, | ||
2278 | struct pci_dev *dma_h, | ||
2279 | struct pci_dev *acc_handle) | ||
2280 | { | ||
2281 | struct __vxge_hw_blockpool *blockpool; | ||
2282 | struct __vxge_hw_blockpool_entry *entry = NULL; | ||
2283 | dma_addr_t dma_addr; | ||
2284 | enum vxge_hw_status status = VXGE_HW_OK; | ||
2285 | u32 req_out; | ||
2286 | |||
2287 | blockpool = &devh->block_pool; | ||
2288 | |||
2289 | if (block_addr == NULL) { | ||
2290 | blockpool->req_out--; | ||
1862 | status = VXGE_HW_FAIL; | 2291 | status = VXGE_HW_FAIL; |
1863 | goto exit; | 2292 | goto exit; |
1864 | } | 2293 | } |
1865 | 2294 | ||
1866 | hldev = vp->vpath->hldev; | 2295 | dma_addr = pci_map_single(devh->pdev, block_addr, length, |
1867 | vp_id = vp->vpath->vp_id; | 2296 | PCI_DMA_BIDIRECTIONAL); |
1868 | 2297 | ||
1869 | config = &hldev->config.vp_config[vp_id].ring; | 2298 | if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) { |
2299 | vxge_os_dma_free(devh->pdev, block_addr, &acc_handle); | ||
2300 | blockpool->req_out--; | ||
2301 | status = VXGE_HW_FAIL; | ||
2302 | goto exit; | ||
2303 | } | ||
1870 | 2304 | ||
1871 | ring_length = config->ring_blocks * | 2305 | if (!list_empty(&blockpool->free_entry_list)) |
1872 | vxge_hw_ring_rxds_per_block_get(config->buffer_mode); | 2306 | entry = (struct __vxge_hw_blockpool_entry *) |
2307 | list_first_entry(&blockpool->free_entry_list, | ||
2308 | struct __vxge_hw_blockpool_entry, | ||
2309 | item); | ||
1873 | 2310 | ||
1874 | ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp, | 2311 | if (entry == NULL) |
1875 | VXGE_HW_CHANNEL_TYPE_RING, | 2312 | entry = vmalloc(sizeof(struct __vxge_hw_blockpool_entry)); |
1876 | ring_length, | 2313 | else |
1877 | attr->per_rxd_space, | 2314 | list_del(&entry->item); |
1878 | attr->userdata); | ||
1879 | 2315 | ||
1880 | if (ring == NULL) { | 2316 | if (entry != NULL) { |
2317 | entry->length = length; | ||
2318 | entry->memblock = block_addr; | ||
2319 | entry->dma_addr = dma_addr; | ||
2320 | entry->acc_handle = acc_handle; | ||
2321 | entry->dma_handle = dma_h; | ||
2322 | list_add(&entry->item, &blockpool->free_block_list); | ||
2323 | blockpool->pool_size++; | ||
2324 | status = VXGE_HW_OK; | ||
2325 | } else | ||
1881 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | 2326 | status = VXGE_HW_ERR_OUT_OF_MEMORY; |
1882 | goto exit; | ||
1883 | } | ||
1884 | 2327 | ||
1885 | vp->vpath->ringh = ring; | 2328 | blockpool->req_out--; |
1886 | ring->vp_id = vp_id; | ||
1887 | ring->vp_reg = vp->vpath->vp_reg; | ||
1888 | ring->common_reg = hldev->common_reg; | ||
1889 | ring->stats = &vp->vpath->sw_stats->ring_stats; | ||
1890 | ring->config = config; | ||
1891 | ring->callback = attr->callback; | ||
1892 | ring->rxd_init = attr->rxd_init; | ||
1893 | ring->rxd_term = attr->rxd_term; | ||
1894 | ring->buffer_mode = config->buffer_mode; | ||
1895 | ring->rxds_limit = config->rxds_limit; | ||
1896 | 2329 | ||
1897 | ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode); | 2330 | req_out = blockpool->req_out; |
1898 | ring->rxd_priv_size = | 2331 | exit: |
1899 | sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space; | 2332 | return; |
1900 | ring->per_rxd_space = attr->per_rxd_space; | 2333 | } |
1901 | 2334 | ||
1902 | ring->rxd_priv_size = | 2335 | static inline void |
1903 | ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) / | 2336 | vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh, unsigned long size) |
1904 | VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE; | 2337 | { |
2338 | gfp_t flags; | ||
2339 | void *vaddr; | ||
1905 | 2340 | ||
1906 | /* how many RxDs can fit into one block. Depends on configured | 2341 | if (in_interrupt()) |
1907 | * buffer_mode. */ | 2342 | flags = GFP_ATOMIC | GFP_DMA; |
1908 | ring->rxds_per_block = | 2343 | else |
1909 | vxge_hw_ring_rxds_per_block_get(config->buffer_mode); | 2344 | flags = GFP_KERNEL | GFP_DMA; |
1910 | 2345 | ||
1911 | /* calculate actual RxD block private size */ | 2346 | vaddr = kmalloc((size), flags); |
1912 | ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block; | ||
1913 | ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc; | ||
1914 | ring->mempool = __vxge_hw_mempool_create(hldev, | ||
1915 | VXGE_HW_BLOCK_SIZE, | ||
1916 | VXGE_HW_BLOCK_SIZE, | ||
1917 | ring->rxdblock_priv_size, | ||
1918 | ring->config->ring_blocks, | ||
1919 | ring->config->ring_blocks, | ||
1920 | &ring_mp_callback, | ||
1921 | ring); | ||
1922 | 2347 | ||
1923 | if (ring->mempool == NULL) { | 2348 | vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev); |
1924 | __vxge_hw_ring_delete(vp); | 2349 | } |
1925 | return VXGE_HW_ERR_OUT_OF_MEMORY; | ||
1926 | } | ||
1927 | 2350 | ||
1928 | status = __vxge_hw_channel_initialize(&ring->channel); | 2351 | /* |
1929 | if (status != VXGE_HW_OK) { | 2352 | * __vxge_hw_blockpool_blocks_add - Request additional blocks |
1930 | __vxge_hw_ring_delete(vp); | 2353 | */ |
1931 | goto exit; | 2354 | static |
2355 | void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool) | ||
2356 | { | ||
2357 | u32 nreq = 0, i; | ||
2358 | |||
2359 | if ((blockpool->pool_size + blockpool->req_out) < | ||
2360 | VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) { | ||
2361 | nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE; | ||
2362 | blockpool->req_out += nreq; | ||
1932 | } | 2363 | } |
1933 | 2364 | ||
1934 | /* Note: | 2365 | for (i = 0; i < nreq; i++) |
1935 | * Specifying rxd_init callback means two things: | 2366 | vxge_os_dma_malloc_async( |
1936 | * 1) rxds need to be initialized by driver at channel-open time; | 2367 | ((struct __vxge_hw_device *)blockpool->hldev)->pdev, |
1937 | * 2) rxds need to be posted at channel-open time | 2368 | blockpool->hldev, VXGE_HW_BLOCK_SIZE); |
1938 | * (that's what the initial_replenish() below does) | 2369 | } |
1939 | * Currently we don't have a case when the 1) is done without the 2). | 2370 | |
1940 | */ | 2371 | /* |
1941 | if (ring->rxd_init) { | 2372 | * __vxge_hw_blockpool_malloc - Allocate a memory block from pool |
1942 | status = vxge_hw_ring_replenish(ring); | 2373 | * Allocates a block of memory of given size, either from block pool |
1943 | if (status != VXGE_HW_OK) { | 2374 | * or by calling vxge_os_dma_malloc() |
1944 | __vxge_hw_ring_delete(vp); | 2375 | */ |
2376 | static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size, | ||
2377 | struct vxge_hw_mempool_dma *dma_object) | ||
2378 | { | ||
2379 | struct __vxge_hw_blockpool_entry *entry = NULL; | ||
2380 | struct __vxge_hw_blockpool *blockpool; | ||
2381 | void *memblock = NULL; | ||
2382 | enum vxge_hw_status status = VXGE_HW_OK; | ||
2383 | |||
2384 | blockpool = &devh->block_pool; | ||
2385 | |||
2386 | if (size != blockpool->block_size) { | ||
2387 | |||
2388 | memblock = vxge_os_dma_malloc(devh->pdev, size, | ||
2389 | &dma_object->handle, | ||
2390 | &dma_object->acc_handle); | ||
2391 | |||
2392 | if (memblock == NULL) { | ||
2393 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
1945 | goto exit; | 2394 | goto exit; |
1946 | } | 2395 | } |
1947 | } | ||
1948 | 2396 | ||
1949 | /* initial replenish will increment the counter in its post() routine, | 2397 | dma_object->addr = pci_map_single(devh->pdev, memblock, size, |
1950 | * we have to reset it */ | 2398 | PCI_DMA_BIDIRECTIONAL); |
1951 | ring->stats->common_stats.usage_cnt = 0; | 2399 | |
2400 | if (unlikely(pci_dma_mapping_error(devh->pdev, | ||
2401 | dma_object->addr))) { | ||
2402 | vxge_os_dma_free(devh->pdev, memblock, | ||
2403 | &dma_object->acc_handle); | ||
2404 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
2405 | goto exit; | ||
2406 | } | ||
2407 | |||
2408 | } else { | ||
2409 | |||
2410 | if (!list_empty(&blockpool->free_block_list)) | ||
2411 | entry = (struct __vxge_hw_blockpool_entry *) | ||
2412 | list_first_entry(&blockpool->free_block_list, | ||
2413 | struct __vxge_hw_blockpool_entry, | ||
2414 | item); | ||
2415 | |||
2416 | if (entry != NULL) { | ||
2417 | list_del(&entry->item); | ||
2418 | dma_object->addr = entry->dma_addr; | ||
2419 | dma_object->handle = entry->dma_handle; | ||
2420 | dma_object->acc_handle = entry->acc_handle; | ||
2421 | memblock = entry->memblock; | ||
2422 | |||
2423 | list_add(&entry->item, | ||
2424 | &blockpool->free_entry_list); | ||
2425 | blockpool->pool_size--; | ||
2426 | } | ||
2427 | |||
2428 | if (memblock != NULL) | ||
2429 | __vxge_hw_blockpool_blocks_add(blockpool); | ||
2430 | } | ||
1952 | exit: | 2431 | exit: |
1953 | return status; | 2432 | return memblock; |
1954 | } | 2433 | } |
1955 | 2434 | ||
1956 | /* | 2435 | /* |
1957 | * __vxge_hw_ring_abort - Returns the RxD | 2436 | * __vxge_hw_blockpool_blocks_remove - Free additional blocks |
1958 | * This function terminates the RxDs of ring | ||
1959 | */ | 2437 | */ |
1960 | static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring) | 2438 | static void |
2439 | __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool) | ||
1961 | { | 2440 | { |
1962 | void *rxdh; | 2441 | struct list_head *p, *n; |
1963 | struct __vxge_hw_channel *channel; | ||
1964 | |||
1965 | channel = &ring->channel; | ||
1966 | 2442 | ||
1967 | for (;;) { | 2443 | list_for_each_safe(p, n, &blockpool->free_block_list) { |
1968 | vxge_hw_channel_dtr_try_complete(channel, &rxdh); | ||
1969 | 2444 | ||
1970 | if (rxdh == NULL) | 2445 | if (blockpool->pool_size < blockpool->pool_max) |
1971 | break; | 2446 | break; |
1972 | 2447 | ||
1973 | vxge_hw_channel_dtr_complete(channel); | 2448 | pci_unmap_single( |
2449 | ((struct __vxge_hw_device *)blockpool->hldev)->pdev, | ||
2450 | ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, | ||
2451 | ((struct __vxge_hw_blockpool_entry *)p)->length, | ||
2452 | PCI_DMA_BIDIRECTIONAL); | ||
1974 | 2453 | ||
1975 | if (ring->rxd_term) | 2454 | vxge_os_dma_free( |
1976 | ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED, | 2455 | ((struct __vxge_hw_device *)blockpool->hldev)->pdev, |
1977 | channel->userdata); | 2456 | ((struct __vxge_hw_blockpool_entry *)p)->memblock, |
2457 | &((struct __vxge_hw_blockpool_entry *)p)->acc_handle); | ||
1978 | 2458 | ||
1979 | vxge_hw_channel_dtr_free(channel, rxdh); | 2459 | list_del(&((struct __vxge_hw_blockpool_entry *)p)->item); |
1980 | } | ||
1981 | 2460 | ||
1982 | return VXGE_HW_OK; | 2461 | list_add(p, &blockpool->free_entry_list); |
2462 | |||
2463 | blockpool->pool_size--; | ||
2464 | |||
2465 | } | ||
1983 | } | 2466 | } |
1984 | 2467 | ||
1985 | /* | 2468 | /* |
1986 | * __vxge_hw_ring_reset - Resets the ring | 2469 | * __vxge_hw_blockpool_free - Frees the memory allcoated with |
1987 | * This function resets the ring during vpath reset operation | 2470 | * __vxge_hw_blockpool_malloc |
1988 | */ | 2471 | */ |
1989 | static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring) | 2472 | static void __vxge_hw_blockpool_free(struct __vxge_hw_device *devh, |
2473 | void *memblock, u32 size, | ||
2474 | struct vxge_hw_mempool_dma *dma_object) | ||
1990 | { | 2475 | { |
2476 | struct __vxge_hw_blockpool_entry *entry = NULL; | ||
2477 | struct __vxge_hw_blockpool *blockpool; | ||
1991 | enum vxge_hw_status status = VXGE_HW_OK; | 2478 | enum vxge_hw_status status = VXGE_HW_OK; |
1992 | struct __vxge_hw_channel *channel; | ||
1993 | 2479 | ||
1994 | channel = &ring->channel; | 2480 | blockpool = &devh->block_pool; |
1995 | 2481 | ||
1996 | __vxge_hw_ring_abort(ring); | 2482 | if (size != blockpool->block_size) { |
2483 | pci_unmap_single(devh->pdev, dma_object->addr, size, | ||
2484 | PCI_DMA_BIDIRECTIONAL); | ||
2485 | vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle); | ||
2486 | } else { | ||
1997 | 2487 | ||
1998 | status = __vxge_hw_channel_reset(channel); | 2488 | if (!list_empty(&blockpool->free_entry_list)) |
2489 | entry = (struct __vxge_hw_blockpool_entry *) | ||
2490 | list_first_entry(&blockpool->free_entry_list, | ||
2491 | struct __vxge_hw_blockpool_entry, | ||
2492 | item); | ||
1999 | 2493 | ||
2000 | if (status != VXGE_HW_OK) | 2494 | if (entry == NULL) |
2001 | goto exit; | 2495 | entry = vmalloc(sizeof( |
2496 | struct __vxge_hw_blockpool_entry)); | ||
2497 | else | ||
2498 | list_del(&entry->item); | ||
2002 | 2499 | ||
2003 | if (ring->rxd_init) { | 2500 | if (entry != NULL) { |
2004 | status = vxge_hw_ring_replenish(ring); | 2501 | entry->length = size; |
2005 | if (status != VXGE_HW_OK) | 2502 | entry->memblock = memblock; |
2006 | goto exit; | 2503 | entry->dma_addr = dma_object->addr; |
2504 | entry->acc_handle = dma_object->acc_handle; | ||
2505 | entry->dma_handle = dma_object->handle; | ||
2506 | list_add(&entry->item, | ||
2507 | &blockpool->free_block_list); | ||
2508 | blockpool->pool_size++; | ||
2509 | status = VXGE_HW_OK; | ||
2510 | } else | ||
2511 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
2512 | |||
2513 | if (status == VXGE_HW_OK) | ||
2514 | __vxge_hw_blockpool_blocks_remove(blockpool); | ||
2007 | } | 2515 | } |
2008 | exit: | ||
2009 | return status; | ||
2010 | } | 2516 | } |
2011 | 2517 | ||
2012 | /* | 2518 | /* |
2013 | * __vxge_hw_ring_delete - Removes the ring | 2519 | * vxge_hw_mempool_destroy |
2014 | * This function freeup the memory pool and removes the ring | ||
2015 | */ | 2520 | */ |
2016 | static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp) | 2521 | static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool) |
2017 | { | 2522 | { |
2018 | struct __vxge_hw_ring *ring = vp->vpath->ringh; | 2523 | u32 i, j; |
2524 | struct __vxge_hw_device *devh = mempool->devh; | ||
2019 | 2525 | ||
2020 | __vxge_hw_ring_abort(ring); | 2526 | for (i = 0; i < mempool->memblocks_allocated; i++) { |
2527 | struct vxge_hw_mempool_dma *dma_object; | ||
2021 | 2528 | ||
2022 | if (ring->mempool) | 2529 | vxge_assert(mempool->memblocks_arr[i]); |
2023 | __vxge_hw_mempool_destroy(ring->mempool); | 2530 | vxge_assert(mempool->memblocks_dma_arr + i); |
2024 | 2531 | ||
2025 | vp->vpath->ringh = NULL; | 2532 | dma_object = mempool->memblocks_dma_arr + i; |
2026 | __vxge_hw_channel_free(&ring->channel); | ||
2027 | 2533 | ||
2028 | return VXGE_HW_OK; | 2534 | for (j = 0; j < mempool->items_per_memblock; j++) { |
2535 | u32 index = i * mempool->items_per_memblock + j; | ||
2536 | |||
2537 | /* to skip last partially filled(if any) memblock */ | ||
2538 | if (index >= mempool->items_current) | ||
2539 | break; | ||
2540 | } | ||
2541 | |||
2542 | vfree(mempool->memblocks_priv_arr[i]); | ||
2543 | |||
2544 | __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i], | ||
2545 | mempool->memblock_size, dma_object); | ||
2546 | } | ||
2547 | |||
2548 | vfree(mempool->items_arr); | ||
2549 | vfree(mempool->memblocks_dma_arr); | ||
2550 | vfree(mempool->memblocks_priv_arr); | ||
2551 | vfree(mempool->memblocks_arr); | ||
2552 | vfree(mempool); | ||
2029 | } | 2553 | } |
2030 | 2554 | ||
2031 | /* | 2555 | /* |
@@ -2118,16 +2642,15 @@ exit: | |||
2118 | * with size enough to hold %items_initial number of items. Memory is | 2642 | * with size enough to hold %items_initial number of items. Memory is |
2119 | * DMA-able but client must map/unmap before interoperating with the device. | 2643 | * DMA-able but client must map/unmap before interoperating with the device. |
2120 | */ | 2644 | */ |
2121 | static struct vxge_hw_mempool* | 2645 | static struct vxge_hw_mempool * |
2122 | __vxge_hw_mempool_create( | 2646 | __vxge_hw_mempool_create(struct __vxge_hw_device *devh, |
2123 | struct __vxge_hw_device *devh, | 2647 | u32 memblock_size, |
2124 | u32 memblock_size, | 2648 | u32 item_size, |
2125 | u32 item_size, | 2649 | u32 items_priv_size, |
2126 | u32 items_priv_size, | 2650 | u32 items_initial, |
2127 | u32 items_initial, | 2651 | u32 items_max, |
2128 | u32 items_max, | 2652 | struct vxge_hw_mempool_cbs *mp_callback, |
2129 | struct vxge_hw_mempool_cbs *mp_callback, | 2653 | void *userdata) |
2130 | void *userdata) | ||
2131 | { | 2654 | { |
2132 | enum vxge_hw_status status = VXGE_HW_OK; | 2655 | enum vxge_hw_status status = VXGE_HW_OK; |
2133 | u32 memblocks_to_allocate; | 2656 | u32 memblocks_to_allocate; |
@@ -2185,7 +2708,6 @@ __vxge_hw_mempool_create( | |||
2185 | mempool->memblocks_dma_arr = | 2708 | mempool->memblocks_dma_arr = |
2186 | vzalloc(sizeof(struct vxge_hw_mempool_dma) * | 2709 | vzalloc(sizeof(struct vxge_hw_mempool_dma) * |
2187 | mempool->memblocks_max); | 2710 | mempool->memblocks_max); |
2188 | |||
2189 | if (mempool->memblocks_dma_arr == NULL) { | 2711 | if (mempool->memblocks_dma_arr == NULL) { |
2190 | __vxge_hw_mempool_destroy(mempool); | 2712 | __vxge_hw_mempool_destroy(mempool); |
2191 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | 2713 | status = VXGE_HW_ERR_OUT_OF_MEMORY; |
@@ -2222,122 +2744,188 @@ exit: | |||
2222 | } | 2744 | } |
2223 | 2745 | ||
2224 | /* | 2746 | /* |
2225 | * vxge_hw_mempool_destroy | 2747 | * __vxge_hw_ring_abort - Returns the RxD |
2748 | * This function terminates the RxDs of ring | ||
2226 | */ | 2749 | */ |
2227 | static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool) | 2750 | static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring) |
2228 | { | 2751 | { |
2229 | u32 i, j; | 2752 | void *rxdh; |
2230 | struct __vxge_hw_device *devh = mempool->devh; | 2753 | struct __vxge_hw_channel *channel; |
2231 | |||
2232 | for (i = 0; i < mempool->memblocks_allocated; i++) { | ||
2233 | struct vxge_hw_mempool_dma *dma_object; | ||
2234 | 2754 | ||
2235 | vxge_assert(mempool->memblocks_arr[i]); | 2755 | channel = &ring->channel; |
2236 | vxge_assert(mempool->memblocks_dma_arr + i); | ||
2237 | 2756 | ||
2238 | dma_object = mempool->memblocks_dma_arr + i; | 2757 | for (;;) { |
2758 | vxge_hw_channel_dtr_try_complete(channel, &rxdh); | ||
2239 | 2759 | ||
2240 | for (j = 0; j < mempool->items_per_memblock; j++) { | 2760 | if (rxdh == NULL) |
2241 | u32 index = i * mempool->items_per_memblock + j; | 2761 | break; |
2242 | 2762 | ||
2243 | /* to skip last partially filled(if any) memblock */ | 2763 | vxge_hw_channel_dtr_complete(channel); |
2244 | if (index >= mempool->items_current) | ||
2245 | break; | ||
2246 | } | ||
2247 | 2764 | ||
2248 | vfree(mempool->memblocks_priv_arr[i]); | 2765 | if (ring->rxd_term) |
2766 | ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED, | ||
2767 | channel->userdata); | ||
2249 | 2768 | ||
2250 | __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i], | 2769 | vxge_hw_channel_dtr_free(channel, rxdh); |
2251 | mempool->memblock_size, dma_object); | ||
2252 | } | 2770 | } |
2253 | 2771 | ||
2254 | vfree(mempool->items_arr); | 2772 | return VXGE_HW_OK; |
2773 | } | ||
2255 | 2774 | ||
2256 | vfree(mempool->memblocks_dma_arr); | 2775 | /* |
2776 | * __vxge_hw_ring_reset - Resets the ring | ||
2777 | * This function resets the ring during vpath reset operation | ||
2778 | */ | ||
2779 | static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring) | ||
2780 | { | ||
2781 | enum vxge_hw_status status = VXGE_HW_OK; | ||
2782 | struct __vxge_hw_channel *channel; | ||
2257 | 2783 | ||
2258 | vfree(mempool->memblocks_priv_arr); | 2784 | channel = &ring->channel; |
2259 | 2785 | ||
2260 | vfree(mempool->memblocks_arr); | 2786 | __vxge_hw_ring_abort(ring); |
2261 | 2787 | ||
2262 | vfree(mempool); | 2788 | status = __vxge_hw_channel_reset(channel); |
2789 | |||
2790 | if (status != VXGE_HW_OK) | ||
2791 | goto exit; | ||
2792 | |||
2793 | if (ring->rxd_init) { | ||
2794 | status = vxge_hw_ring_replenish(ring); | ||
2795 | if (status != VXGE_HW_OK) | ||
2796 | goto exit; | ||
2797 | } | ||
2798 | exit: | ||
2799 | return status; | ||
2263 | } | 2800 | } |
2264 | 2801 | ||
2265 | /* | 2802 | /* |
2266 | * __vxge_hw_device_fifo_config_check - Check fifo configuration. | 2803 | * __vxge_hw_ring_delete - Removes the ring |
2267 | * Check the fifo configuration | 2804 | * This function freeup the memory pool and removes the ring |
2268 | */ | 2805 | */ |
2269 | static enum vxge_hw_status | 2806 | static enum vxge_hw_status |
2270 | __vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config) | 2807 | __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp) |
2271 | { | 2808 | { |
2272 | if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) || | 2809 | struct __vxge_hw_ring *ring = vp->vpath->ringh; |
2273 | (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS)) | 2810 | |
2274 | return VXGE_HW_BADCFG_FIFO_BLOCKS; | 2811 | __vxge_hw_ring_abort(ring); |
2812 | |||
2813 | if (ring->mempool) | ||
2814 | __vxge_hw_mempool_destroy(ring->mempool); | ||
2815 | |||
2816 | vp->vpath->ringh = NULL; | ||
2817 | __vxge_hw_channel_free(&ring->channel); | ||
2275 | 2818 | ||
2276 | return VXGE_HW_OK; | 2819 | return VXGE_HW_OK; |
2277 | } | 2820 | } |
2278 | 2821 | ||
2279 | /* | 2822 | /* |
2280 | * __vxge_hw_device_vpath_config_check - Check vpath configuration. | 2823 | * __vxge_hw_ring_create - Create a Ring |
2281 | * Check the vpath configuration | 2824 | * This function creates Ring and initializes it. |
2282 | */ | 2825 | */ |
2283 | static enum vxge_hw_status | 2826 | static enum vxge_hw_status |
2284 | __vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config) | 2827 | __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp, |
2828 | struct vxge_hw_ring_attr *attr) | ||
2285 | { | 2829 | { |
2286 | enum vxge_hw_status status; | 2830 | enum vxge_hw_status status = VXGE_HW_OK; |
2831 | struct __vxge_hw_ring *ring; | ||
2832 | u32 ring_length; | ||
2833 | struct vxge_hw_ring_config *config; | ||
2834 | struct __vxge_hw_device *hldev; | ||
2835 | u32 vp_id; | ||
2836 | struct vxge_hw_mempool_cbs ring_mp_callback; | ||
2287 | 2837 | ||
2288 | if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) || | 2838 | if ((vp == NULL) || (attr == NULL)) { |
2289 | (vp_config->min_bandwidth > | 2839 | status = VXGE_HW_FAIL; |
2290 | VXGE_HW_VPATH_BANDWIDTH_MAX)) | 2840 | goto exit; |
2291 | return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH; | 2841 | } |
2292 | 2842 | ||
2293 | status = __vxge_hw_device_fifo_config_check(&vp_config->fifo); | 2843 | hldev = vp->vpath->hldev; |
2294 | if (status != VXGE_HW_OK) | 2844 | vp_id = vp->vpath->vp_id; |
2295 | return status; | ||
2296 | 2845 | ||
2297 | if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) && | 2846 | config = &hldev->config.vp_config[vp_id].ring; |
2298 | ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) || | ||
2299 | (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU))) | ||
2300 | return VXGE_HW_BADCFG_VPATH_MTU; | ||
2301 | 2847 | ||
2302 | if ((vp_config->rpa_strip_vlan_tag != | 2848 | ring_length = config->ring_blocks * |
2303 | VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) && | 2849 | vxge_hw_ring_rxds_per_block_get(config->buffer_mode); |
2304 | (vp_config->rpa_strip_vlan_tag != | ||
2305 | VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) && | ||
2306 | (vp_config->rpa_strip_vlan_tag != | ||
2307 | VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE)) | ||
2308 | return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG; | ||
2309 | 2850 | ||
2310 | return VXGE_HW_OK; | 2851 | ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp, |
2311 | } | 2852 | VXGE_HW_CHANNEL_TYPE_RING, |
2853 | ring_length, | ||
2854 | attr->per_rxd_space, | ||
2855 | attr->userdata); | ||
2856 | if (ring == NULL) { | ||
2857 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
2858 | goto exit; | ||
2859 | } | ||
2312 | 2860 | ||
2313 | /* | 2861 | vp->vpath->ringh = ring; |
2314 | * __vxge_hw_device_config_check - Check device configuration. | 2862 | ring->vp_id = vp_id; |
2315 | * Check the device configuration | 2863 | ring->vp_reg = vp->vpath->vp_reg; |
2316 | */ | 2864 | ring->common_reg = hldev->common_reg; |
2317 | static enum vxge_hw_status | 2865 | ring->stats = &vp->vpath->sw_stats->ring_stats; |
2318 | __vxge_hw_device_config_check(struct vxge_hw_device_config *new_config) | 2866 | ring->config = config; |
2319 | { | 2867 | ring->callback = attr->callback; |
2320 | u32 i; | 2868 | ring->rxd_init = attr->rxd_init; |
2321 | enum vxge_hw_status status; | 2869 | ring->rxd_term = attr->rxd_term; |
2870 | ring->buffer_mode = config->buffer_mode; | ||
2871 | ring->rxds_limit = config->rxds_limit; | ||
2322 | 2872 | ||
2323 | if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) && | 2873 | ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode); |
2324 | (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) && | 2874 | ring->rxd_priv_size = |
2325 | (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) && | 2875 | sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space; |
2326 | (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF)) | 2876 | ring->per_rxd_space = attr->per_rxd_space; |
2327 | return VXGE_HW_BADCFG_INTR_MODE; | ||
2328 | 2877 | ||
2329 | if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) && | 2878 | ring->rxd_priv_size = |
2330 | (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE)) | 2879 | ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) / |
2331 | return VXGE_HW_BADCFG_RTS_MAC_EN; | 2880 | VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE; |
2332 | 2881 | ||
2333 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | 2882 | /* how many RxDs can fit into one block. Depends on configured |
2334 | status = __vxge_hw_device_vpath_config_check( | 2883 | * buffer_mode. */ |
2335 | &new_config->vp_config[i]); | 2884 | ring->rxds_per_block = |
2336 | if (status != VXGE_HW_OK) | 2885 | vxge_hw_ring_rxds_per_block_get(config->buffer_mode); |
2337 | return status; | 2886 | |
2887 | /* calculate actual RxD block private size */ | ||
2888 | ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block; | ||
2889 | ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc; | ||
2890 | ring->mempool = __vxge_hw_mempool_create(hldev, | ||
2891 | VXGE_HW_BLOCK_SIZE, | ||
2892 | VXGE_HW_BLOCK_SIZE, | ||
2893 | ring->rxdblock_priv_size, | ||
2894 | ring->config->ring_blocks, | ||
2895 | ring->config->ring_blocks, | ||
2896 | &ring_mp_callback, | ||
2897 | ring); | ||
2898 | if (ring->mempool == NULL) { | ||
2899 | __vxge_hw_ring_delete(vp); | ||
2900 | return VXGE_HW_ERR_OUT_OF_MEMORY; | ||
2338 | } | 2901 | } |
2339 | 2902 | ||
2340 | return VXGE_HW_OK; | 2903 | status = __vxge_hw_channel_initialize(&ring->channel); |
2904 | if (status != VXGE_HW_OK) { | ||
2905 | __vxge_hw_ring_delete(vp); | ||
2906 | goto exit; | ||
2907 | } | ||
2908 | |||
2909 | /* Note: | ||
2910 | * Specifying rxd_init callback means two things: | ||
2911 | * 1) rxds need to be initialized by driver at channel-open time; | ||
2912 | * 2) rxds need to be posted at channel-open time | ||
2913 | * (that's what the initial_replenish() below does) | ||
2914 | * Currently we don't have a case when the 1) is done without the 2). | ||
2915 | */ | ||
2916 | if (ring->rxd_init) { | ||
2917 | status = vxge_hw_ring_replenish(ring); | ||
2918 | if (status != VXGE_HW_OK) { | ||
2919 | __vxge_hw_ring_delete(vp); | ||
2920 | goto exit; | ||
2921 | } | ||
2922 | } | ||
2923 | |||
2924 | /* initial replenish will increment the counter in its post() routine, | ||
2925 | * we have to reset it */ | ||
2926 | ring->stats->common_stats.usage_cnt = 0; | ||
2927 | exit: | ||
2928 | return status; | ||
2341 | } | 2929 | } |
2342 | 2930 | ||
2343 | /* | 2931 | /* |
@@ -2359,7 +2947,6 @@ vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config) | |||
2359 | device_config->rts_mac_en = VXGE_HW_RTS_MAC_DEFAULT; | 2947 | device_config->rts_mac_en = VXGE_HW_RTS_MAC_DEFAULT; |
2360 | 2948 | ||
2361 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | 2949 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { |
2362 | |||
2363 | device_config->vp_config[i].vp_id = i; | 2950 | device_config->vp_config[i].vp_id = i; |
2364 | 2951 | ||
2365 | device_config->vp_config[i].min_bandwidth = | 2952 | device_config->vp_config[i].min_bandwidth = |
@@ -2499,61 +3086,6 @@ vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config) | |||
2499 | } | 3086 | } |
2500 | 3087 | ||
2501 | /* | 3088 | /* |
2502 | * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion. | ||
2503 | * Set the swapper bits appropriately for the lagacy section. | ||
2504 | */ | ||
2505 | static enum vxge_hw_status | ||
2506 | __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg) | ||
2507 | { | ||
2508 | u64 val64; | ||
2509 | enum vxge_hw_status status = VXGE_HW_OK; | ||
2510 | |||
2511 | val64 = readq(&legacy_reg->toc_swapper_fb); | ||
2512 | |||
2513 | wmb(); | ||
2514 | |||
2515 | switch (val64) { | ||
2516 | |||
2517 | case VXGE_HW_SWAPPER_INITIAL_VALUE: | ||
2518 | return status; | ||
2519 | |||
2520 | case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED: | ||
2521 | writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE, | ||
2522 | &legacy_reg->pifm_rd_swap_en); | ||
2523 | writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE, | ||
2524 | &legacy_reg->pifm_rd_flip_en); | ||
2525 | writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE, | ||
2526 | &legacy_reg->pifm_wr_swap_en); | ||
2527 | writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE, | ||
2528 | &legacy_reg->pifm_wr_flip_en); | ||
2529 | break; | ||
2530 | |||
2531 | case VXGE_HW_SWAPPER_BYTE_SWAPPED: | ||
2532 | writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE, | ||
2533 | &legacy_reg->pifm_rd_swap_en); | ||
2534 | writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE, | ||
2535 | &legacy_reg->pifm_wr_swap_en); | ||
2536 | break; | ||
2537 | |||
2538 | case VXGE_HW_SWAPPER_BIT_FLIPPED: | ||
2539 | writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE, | ||
2540 | &legacy_reg->pifm_rd_flip_en); | ||
2541 | writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE, | ||
2542 | &legacy_reg->pifm_wr_flip_en); | ||
2543 | break; | ||
2544 | } | ||
2545 | |||
2546 | wmb(); | ||
2547 | |||
2548 | val64 = readq(&legacy_reg->toc_swapper_fb); | ||
2549 | |||
2550 | if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE) | ||
2551 | status = VXGE_HW_ERR_SWAPPER_CTRL; | ||
2552 | |||
2553 | return status; | ||
2554 | } | ||
2555 | |||
2556 | /* | ||
2557 | * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath. | 3089 | * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath. |
2558 | * Set the swapper bits appropriately for the vpath. | 3090 | * Set the swapper bits appropriately for the vpath. |
2559 | */ | 3091 | */ |
@@ -2577,9 +3109,8 @@ __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg) | |||
2577 | * Set the swapper bits appropriately for the vpath. | 3109 | * Set the swapper bits appropriately for the vpath. |
2578 | */ | 3110 | */ |
2579 | static enum vxge_hw_status | 3111 | static enum vxge_hw_status |
2580 | __vxge_hw_kdfc_swapper_set( | 3112 | __vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg, |
2581 | struct vxge_hw_legacy_reg __iomem *legacy_reg, | 3113 | struct vxge_hw_vpath_reg __iomem *vpath_reg) |
2582 | struct vxge_hw_vpath_reg __iomem *vpath_reg) | ||
2583 | { | 3114 | { |
2584 | u64 val64; | 3115 | u64 val64; |
2585 | 3116 | ||
@@ -2829,6 +3360,69 @@ exit: | |||
2829 | } | 3360 | } |
2830 | 3361 | ||
2831 | /* | 3362 | /* |
3363 | * __vxge_hw_fifo_abort - Returns the TxD | ||
3364 | * This function terminates the TxDs of fifo | ||
3365 | */ | ||
3366 | static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo) | ||
3367 | { | ||
3368 | void *txdlh; | ||
3369 | |||
3370 | for (;;) { | ||
3371 | vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh); | ||
3372 | |||
3373 | if (txdlh == NULL) | ||
3374 | break; | ||
3375 | |||
3376 | vxge_hw_channel_dtr_complete(&fifo->channel); | ||
3377 | |||
3378 | if (fifo->txdl_term) { | ||
3379 | fifo->txdl_term(txdlh, | ||
3380 | VXGE_HW_TXDL_STATE_POSTED, | ||
3381 | fifo->channel.userdata); | ||
3382 | } | ||
3383 | |||
3384 | vxge_hw_channel_dtr_free(&fifo->channel, txdlh); | ||
3385 | } | ||
3386 | |||
3387 | return VXGE_HW_OK; | ||
3388 | } | ||
3389 | |||
3390 | /* | ||
3391 | * __vxge_hw_fifo_reset - Resets the fifo | ||
3392 | * This function resets the fifo during vpath reset operation | ||
3393 | */ | ||
3394 | static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo) | ||
3395 | { | ||
3396 | enum vxge_hw_status status = VXGE_HW_OK; | ||
3397 | |||
3398 | __vxge_hw_fifo_abort(fifo); | ||
3399 | status = __vxge_hw_channel_reset(&fifo->channel); | ||
3400 | |||
3401 | return status; | ||
3402 | } | ||
3403 | |||
3404 | /* | ||
3405 | * __vxge_hw_fifo_delete - Removes the FIFO | ||
3406 | * This function freeup the memory pool and removes the FIFO | ||
3407 | */ | ||
3408 | static enum vxge_hw_status | ||
3409 | __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp) | ||
3410 | { | ||
3411 | struct __vxge_hw_fifo *fifo = vp->vpath->fifoh; | ||
3412 | |||
3413 | __vxge_hw_fifo_abort(fifo); | ||
3414 | |||
3415 | if (fifo->mempool) | ||
3416 | __vxge_hw_mempool_destroy(fifo->mempool); | ||
3417 | |||
3418 | vp->vpath->fifoh = NULL; | ||
3419 | |||
3420 | __vxge_hw_channel_free(&fifo->channel); | ||
3421 | |||
3422 | return VXGE_HW_OK; | ||
3423 | } | ||
3424 | |||
3425 | /* | ||
2832 | * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD | 3426 | * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD |
2833 | * list callback | 3427 | * list callback |
2834 | * This function is callback passed to __vxge_hw_mempool_create to create memory | 3428 | * This function is callback passed to __vxge_hw_mempool_create to create memory |
@@ -2993,69 +3587,6 @@ exit: | |||
2993 | } | 3587 | } |
2994 | 3588 | ||
2995 | /* | 3589 | /* |
2996 | * __vxge_hw_fifo_abort - Returns the TxD | ||
2997 | * This function terminates the TxDs of fifo | ||
2998 | */ | ||
2999 | static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo) | ||
3000 | { | ||
3001 | void *txdlh; | ||
3002 | |||
3003 | for (;;) { | ||
3004 | vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh); | ||
3005 | |||
3006 | if (txdlh == NULL) | ||
3007 | break; | ||
3008 | |||
3009 | vxge_hw_channel_dtr_complete(&fifo->channel); | ||
3010 | |||
3011 | if (fifo->txdl_term) { | ||
3012 | fifo->txdl_term(txdlh, | ||
3013 | VXGE_HW_TXDL_STATE_POSTED, | ||
3014 | fifo->channel.userdata); | ||
3015 | } | ||
3016 | |||
3017 | vxge_hw_channel_dtr_free(&fifo->channel, txdlh); | ||
3018 | } | ||
3019 | |||
3020 | return VXGE_HW_OK; | ||
3021 | } | ||
3022 | |||
3023 | /* | ||
3024 | * __vxge_hw_fifo_reset - Resets the fifo | ||
3025 | * This function resets the fifo during vpath reset operation | ||
3026 | */ | ||
3027 | static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo) | ||
3028 | { | ||
3029 | enum vxge_hw_status status = VXGE_HW_OK; | ||
3030 | |||
3031 | __vxge_hw_fifo_abort(fifo); | ||
3032 | status = __vxge_hw_channel_reset(&fifo->channel); | ||
3033 | |||
3034 | return status; | ||
3035 | } | ||
3036 | |||
3037 | /* | ||
3038 | * __vxge_hw_fifo_delete - Removes the FIFO | ||
3039 | * This function freeup the memory pool and removes the FIFO | ||
3040 | */ | ||
3041 | static enum vxge_hw_status | ||
3042 | __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp) | ||
3043 | { | ||
3044 | struct __vxge_hw_fifo *fifo = vp->vpath->fifoh; | ||
3045 | |||
3046 | __vxge_hw_fifo_abort(fifo); | ||
3047 | |||
3048 | if (fifo->mempool) | ||
3049 | __vxge_hw_mempool_destroy(fifo->mempool); | ||
3050 | |||
3051 | vp->vpath->fifoh = NULL; | ||
3052 | |||
3053 | __vxge_hw_channel_free(&fifo->channel); | ||
3054 | |||
3055 | return VXGE_HW_OK; | ||
3056 | } | ||
3057 | |||
3058 | /* | ||
3059 | * __vxge_hw_vpath_pci_read - Read the content of given address | 3590 | * __vxge_hw_vpath_pci_read - Read the content of given address |
3060 | * in pci config space. | 3591 | * in pci config space. |
3061 | * Read from the vpath pci config space. | 3592 | * Read from the vpath pci config space. |
@@ -3786,10 +4317,10 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) | |||
3786 | vp_reg = vpath->vp_reg; | 4317 | vp_reg = vpath->vp_reg; |
3787 | config = vpath->vp_config; | 4318 | config = vpath->vp_config; |
3788 | 4319 | ||
3789 | writeq((u64)0, &vp_reg->tim_dest_addr); | 4320 | writeq(0, &vp_reg->tim_dest_addr); |
3790 | writeq((u64)0, &vp_reg->tim_vpath_map); | 4321 | writeq(0, &vp_reg->tim_vpath_map); |
3791 | writeq((u64)0, &vp_reg->tim_bitmap); | 4322 | writeq(0, &vp_reg->tim_bitmap); |
3792 | writeq((u64)0, &vp_reg->tim_remap); | 4323 | writeq(0, &vp_reg->tim_remap); |
3793 | 4324 | ||
3794 | if (config->ring.enable == VXGE_HW_RING_ENABLE) | 4325 | if (config->ring.enable == VXGE_HW_RING_ENABLE) |
3795 | writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM( | 4326 | writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM( |
@@ -4021,8 +4552,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) | |||
4021 | return status; | 4552 | return status; |
4022 | } | 4553 | } |
4023 | 4554 | ||
4024 | void | 4555 | void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id) |
4025 | vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id) | ||
4026 | { | 4556 | { |
4027 | struct __vxge_hw_virtualpath *vpath; | 4557 | struct __vxge_hw_virtualpath *vpath; |
4028 | struct vxge_hw_vpath_reg __iomem *vp_reg; | 4558 | struct vxge_hw_vpath_reg __iomem *vp_reg; |
@@ -4033,17 +4563,15 @@ vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id) | |||
4033 | vp_reg = vpath->vp_reg; | 4563 | vp_reg = vpath->vp_reg; |
4034 | config = vpath->vp_config; | 4564 | config = vpath->vp_config; |
4035 | 4565 | ||
4036 | if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) { | 4566 | if (config->fifo.enable == VXGE_HW_FIFO_ENABLE && |
4567 | config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) { | ||
4568 | config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE; | ||
4037 | val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); | 4569 | val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); |
4038 | 4570 | val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; | |
4039 | if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) { | 4571 | writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); |
4040 | config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE; | ||
4041 | val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; | ||
4042 | writeq(val64, | ||
4043 | &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); | ||
4044 | } | ||
4045 | } | 4572 | } |
4046 | } | 4573 | } |
4574 | |||
4047 | /* | 4575 | /* |
4048 | * __vxge_hw_vpath_initialize | 4576 | * __vxge_hw_vpath_initialize |
4049 | * This routine is the final phase of init which initializes the | 4577 | * This routine is the final phase of init which initializes the |
@@ -4067,22 +4595,18 @@ __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id) | |||
4067 | vp_reg = vpath->vp_reg; | 4595 | vp_reg = vpath->vp_reg; |
4068 | 4596 | ||
4069 | status = __vxge_hw_vpath_swapper_set(vpath->vp_reg); | 4597 | status = __vxge_hw_vpath_swapper_set(vpath->vp_reg); |
4070 | |||
4071 | if (status != VXGE_HW_OK) | 4598 | if (status != VXGE_HW_OK) |
4072 | goto exit; | 4599 | goto exit; |
4073 | 4600 | ||
4074 | status = __vxge_hw_vpath_mac_configure(hldev, vp_id); | 4601 | status = __vxge_hw_vpath_mac_configure(hldev, vp_id); |
4075 | |||
4076 | if (status != VXGE_HW_OK) | 4602 | if (status != VXGE_HW_OK) |
4077 | goto exit; | 4603 | goto exit; |
4078 | 4604 | ||
4079 | status = __vxge_hw_vpath_kdfc_configure(hldev, vp_id); | 4605 | status = __vxge_hw_vpath_kdfc_configure(hldev, vp_id); |
4080 | |||
4081 | if (status != VXGE_HW_OK) | 4606 | if (status != VXGE_HW_OK) |
4082 | goto exit; | 4607 | goto exit; |
4083 | 4608 | ||
4084 | status = __vxge_hw_vpath_tim_configure(hldev, vp_id); | 4609 | status = __vxge_hw_vpath_tim_configure(hldev, vp_id); |
4085 | |||
4086 | if (status != VXGE_HW_OK) | 4610 | if (status != VXGE_HW_OK) |
4087 | goto exit; | 4611 | goto exit; |
4088 | 4612 | ||
@@ -4090,7 +4614,6 @@ __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id) | |||
4090 | 4614 | ||
4091 | /* Get MRRS value from device control */ | 4615 | /* Get MRRS value from device control */ |
4092 | status = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32); | 4616 | status = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32); |
4093 | |||
4094 | if (status == VXGE_HW_OK) { | 4617 | if (status == VXGE_HW_OK) { |
4095 | val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12; | 4618 | val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12; |
4096 | val64 &= | 4619 | val64 &= |
@@ -4114,6 +4637,28 @@ exit: | |||
4114 | } | 4637 | } |
4115 | 4638 | ||
4116 | /* | 4639 | /* |
4640 | * __vxge_hw_vp_terminate - Terminate Virtual Path structure | ||
4641 | * This routine closes all channels it opened and freeup memory | ||
4642 | */ | ||
4643 | static void __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id) | ||
4644 | { | ||
4645 | struct __vxge_hw_virtualpath *vpath; | ||
4646 | |||
4647 | vpath = &hldev->virtual_paths[vp_id]; | ||
4648 | |||
4649 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) | ||
4650 | goto exit; | ||
4651 | |||
4652 | VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0, | ||
4653 | vpath->hldev->tim_int_mask1, vpath->vp_id); | ||
4654 | hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL; | ||
4655 | |||
4656 | memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath)); | ||
4657 | exit: | ||
4658 | return; | ||
4659 | } | ||
4660 | |||
4661 | /* | ||
4117 | * __vxge_hw_vp_initialize - Initialize Virtual Path structure | 4662 | * __vxge_hw_vp_initialize - Initialize Virtual Path structure |
4118 | * This routine is the initial phase of init which resets the vpath and | 4663 | * This routine is the initial phase of init which resets the vpath and |
4119 | * initializes the software support structures. | 4664 | * initializes the software support structures. |
@@ -4169,29 +4714,6 @@ exit: | |||
4169 | } | 4714 | } |
4170 | 4715 | ||
4171 | /* | 4716 | /* |
4172 | * __vxge_hw_vp_terminate - Terminate Virtual Path structure | ||
4173 | * This routine closes all channels it opened and freeup memory | ||
4174 | */ | ||
4175 | static void | ||
4176 | __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id) | ||
4177 | { | ||
4178 | struct __vxge_hw_virtualpath *vpath; | ||
4179 | |||
4180 | vpath = &hldev->virtual_paths[vp_id]; | ||
4181 | |||
4182 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) | ||
4183 | goto exit; | ||
4184 | |||
4185 | VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0, | ||
4186 | vpath->hldev->tim_int_mask1, vpath->vp_id); | ||
4187 | hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL; | ||
4188 | |||
4189 | memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath)); | ||
4190 | exit: | ||
4191 | return; | ||
4192 | } | ||
4193 | |||
4194 | /* | ||
4195 | * vxge_hw_vpath_mtu_set - Set MTU. | 4717 | * vxge_hw_vpath_mtu_set - Set MTU. |
4196 | * Set new MTU value. Example, to use jumbo frames: | 4718 | * Set new MTU value. Example, to use jumbo frames: |
4197 | * vxge_hw_vpath_mtu_set(my_device, 9600); | 4719 | * vxge_hw_vpath_mtu_set(my_device, 9600); |
@@ -4228,6 +4750,64 @@ exit: | |||
4228 | } | 4750 | } |
4229 | 4751 | ||
4230 | /* | 4752 | /* |
4753 | * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics. | ||
4754 | * Enable the DMA vpath statistics. The function is to be called to re-enable | ||
4755 | * the adapter to update stats into the host memory | ||
4756 | */ | ||
4757 | static enum vxge_hw_status | ||
4758 | vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp) | ||
4759 | { | ||
4760 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4761 | struct __vxge_hw_virtualpath *vpath; | ||
4762 | |||
4763 | vpath = vp->vpath; | ||
4764 | |||
4765 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
4766 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
4767 | goto exit; | ||
4768 | } | ||
4769 | |||
4770 | memcpy(vpath->hw_stats_sav, vpath->hw_stats, | ||
4771 | sizeof(struct vxge_hw_vpath_stats_hw_info)); | ||
4772 | |||
4773 | status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats); | ||
4774 | exit: | ||
4775 | return status; | ||
4776 | } | ||
4777 | |||
4778 | /* | ||
4779 | * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool | ||
4780 | * This function allocates a block from block pool or from the system | ||
4781 | */ | ||
4782 | static struct __vxge_hw_blockpool_entry * | ||
4783 | __vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size) | ||
4784 | { | ||
4785 | struct __vxge_hw_blockpool_entry *entry = NULL; | ||
4786 | struct __vxge_hw_blockpool *blockpool; | ||
4787 | |||
4788 | blockpool = &devh->block_pool; | ||
4789 | |||
4790 | if (size == blockpool->block_size) { | ||
4791 | |||
4792 | if (!list_empty(&blockpool->free_block_list)) | ||
4793 | entry = (struct __vxge_hw_blockpool_entry *) | ||
4794 | list_first_entry(&blockpool->free_block_list, | ||
4795 | struct __vxge_hw_blockpool_entry, | ||
4796 | item); | ||
4797 | |||
4798 | if (entry != NULL) { | ||
4799 | list_del(&entry->item); | ||
4800 | blockpool->pool_size--; | ||
4801 | } | ||
4802 | } | ||
4803 | |||
4804 | if (entry != NULL) | ||
4805 | __vxge_hw_blockpool_blocks_add(blockpool); | ||
4806 | |||
4807 | return entry; | ||
4808 | } | ||
4809 | |||
4810 | /* | ||
4231 | * vxge_hw_vpath_open - Open a virtual path on a given adapter | 4811 | * vxge_hw_vpath_open - Open a virtual path on a given adapter |
4232 | * This function is used to open access to virtual path of an | 4812 | * This function is used to open access to virtual path of an |
4233 | * adapter for offload, GRO operations. This function returns | 4813 | * adapter for offload, GRO operations. This function returns |
@@ -4251,7 +4831,6 @@ vxge_hw_vpath_open(struct __vxge_hw_device *hldev, | |||
4251 | 4831 | ||
4252 | status = __vxge_hw_vp_initialize(hldev, attr->vp_id, | 4832 | status = __vxge_hw_vp_initialize(hldev, attr->vp_id, |
4253 | &hldev->config.vp_config[attr->vp_id]); | 4833 | &hldev->config.vp_config[attr->vp_id]); |
4254 | |||
4255 | if (status != VXGE_HW_OK) | 4834 | if (status != VXGE_HW_OK) |
4256 | goto vpath_open_exit1; | 4835 | goto vpath_open_exit1; |
4257 | 4836 | ||
@@ -4283,7 +4862,6 @@ vxge_hw_vpath_open(struct __vxge_hw_device *hldev, | |||
4283 | 4862 | ||
4284 | vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev, | 4863 | vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev, |
4285 | VXGE_HW_BLOCK_SIZE); | 4864 | VXGE_HW_BLOCK_SIZE); |
4286 | |||
4287 | if (vpath->stats_block == NULL) { | 4865 | if (vpath->stats_block == NULL) { |
4288 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | 4866 | status = VXGE_HW_ERR_OUT_OF_MEMORY; |
4289 | goto vpath_open_exit8; | 4867 | goto vpath_open_exit8; |
@@ -4342,8 +4920,7 @@ vpath_open_exit1: | |||
4342 | * This function is used to close access to virtual path opened | 4920 | * This function is used to close access to virtual path opened |
4343 | * earlier. | 4921 | * earlier. |
4344 | */ | 4922 | */ |
4345 | void | 4923 | void vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp) |
4346 | vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp) | ||
4347 | { | 4924 | { |
4348 | struct __vxge_hw_virtualpath *vpath = vp->vpath; | 4925 | struct __vxge_hw_virtualpath *vpath = vp->vpath; |
4349 | struct __vxge_hw_ring *ring = vpath->ringh; | 4926 | struct __vxge_hw_ring *ring = vpath->ringh; |
@@ -4379,6 +4956,29 @@ vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp) | |||
4379 | } | 4956 | } |
4380 | 4957 | ||
4381 | /* | 4958 | /* |
4959 | * __vxge_hw_blockpool_block_free - Frees a block from block pool | ||
4960 | * @devh: Hal device | ||
4961 | * @entry: Entry of block to be freed | ||
4962 | * | ||
4963 | * This function frees a block from block pool | ||
4964 | */ | ||
4965 | static void | ||
4966 | __vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh, | ||
4967 | struct __vxge_hw_blockpool_entry *entry) | ||
4968 | { | ||
4969 | struct __vxge_hw_blockpool *blockpool; | ||
4970 | |||
4971 | blockpool = &devh->block_pool; | ||
4972 | |||
4973 | if (entry->length == blockpool->block_size) { | ||
4974 | list_add(&entry->item, &blockpool->free_block_list); | ||
4975 | blockpool->pool_size++; | ||
4976 | } | ||
4977 | |||
4978 | __vxge_hw_blockpool_blocks_remove(blockpool); | ||
4979 | } | ||
4980 | |||
4981 | /* | ||
4382 | * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open | 4982 | * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open |
4383 | * This function is used to close access to virtual path opened | 4983 | * This function is used to close access to virtual path opened |
4384 | * earlier. | 4984 | * earlier. |
@@ -4529,728 +5129,3 @@ vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp) | |||
4529 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), | 5129 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), |
4530 | &hldev->common_reg->cmn_rsthdlr_cfg1); | 5130 | &hldev->common_reg->cmn_rsthdlr_cfg1); |
4531 | } | 5131 | } |
4532 | |||
4533 | /* | ||
4534 | * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics. | ||
4535 | * Enable the DMA vpath statistics. The function is to be called to re-enable | ||
4536 | * the adapter to update stats into the host memory | ||
4537 | */ | ||
4538 | static enum vxge_hw_status | ||
4539 | vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp) | ||
4540 | { | ||
4541 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4542 | struct __vxge_hw_virtualpath *vpath; | ||
4543 | |||
4544 | vpath = vp->vpath; | ||
4545 | |||
4546 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
4547 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
4548 | goto exit; | ||
4549 | } | ||
4550 | |||
4551 | memcpy(vpath->hw_stats_sav, vpath->hw_stats, | ||
4552 | sizeof(struct vxge_hw_vpath_stats_hw_info)); | ||
4553 | |||
4554 | status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats); | ||
4555 | exit: | ||
4556 | return status; | ||
4557 | } | ||
4558 | |||
4559 | /* | ||
4560 | * __vxge_hw_vpath_stats_access - Get the statistics from the given location | ||
4561 | * and offset and perform an operation | ||
4562 | */ | ||
4563 | static enum vxge_hw_status | ||
4564 | __vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath, | ||
4565 | u32 operation, u32 offset, u64 *stat) | ||
4566 | { | ||
4567 | u64 val64; | ||
4568 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4569 | struct vxge_hw_vpath_reg __iomem *vp_reg; | ||
4570 | |||
4571 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
4572 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
4573 | goto vpath_stats_access_exit; | ||
4574 | } | ||
4575 | |||
4576 | vp_reg = vpath->vp_reg; | ||
4577 | |||
4578 | val64 = VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) | | ||
4579 | VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE | | ||
4580 | VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset); | ||
4581 | |||
4582 | status = __vxge_hw_pio_mem_write64(val64, | ||
4583 | &vp_reg->xmac_stats_access_cmd, | ||
4584 | VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE, | ||
4585 | vpath->hldev->config.device_poll_millis); | ||
4586 | |||
4587 | if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ)) | ||
4588 | *stat = readq(&vp_reg->xmac_stats_access_data); | ||
4589 | else | ||
4590 | *stat = 0; | ||
4591 | |||
4592 | vpath_stats_access_exit: | ||
4593 | return status; | ||
4594 | } | ||
4595 | |||
4596 | /* | ||
4597 | * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath | ||
4598 | */ | ||
4599 | static enum vxge_hw_status | ||
4600 | __vxge_hw_vpath_xmac_tx_stats_get( | ||
4601 | struct __vxge_hw_virtualpath *vpath, | ||
4602 | struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats) | ||
4603 | { | ||
4604 | u64 *val64; | ||
4605 | int i; | ||
4606 | u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET; | ||
4607 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4608 | |||
4609 | val64 = (u64 *) vpath_tx_stats; | ||
4610 | |||
4611 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
4612 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
4613 | goto exit; | ||
4614 | } | ||
4615 | |||
4616 | for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) { | ||
4617 | status = __vxge_hw_vpath_stats_access(vpath, | ||
4618 | VXGE_HW_STATS_OP_READ, | ||
4619 | offset, val64); | ||
4620 | if (status != VXGE_HW_OK) | ||
4621 | goto exit; | ||
4622 | offset++; | ||
4623 | val64++; | ||
4624 | } | ||
4625 | exit: | ||
4626 | return status; | ||
4627 | } | ||
4628 | |||
4629 | /* | ||
4630 | * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath | ||
4631 | */ | ||
4632 | static enum vxge_hw_status | ||
4633 | __vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath, | ||
4634 | struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats) | ||
4635 | { | ||
4636 | u64 *val64; | ||
4637 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4638 | int i; | ||
4639 | u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET; | ||
4640 | val64 = (u64 *) vpath_rx_stats; | ||
4641 | |||
4642 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
4643 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
4644 | goto exit; | ||
4645 | } | ||
4646 | for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) { | ||
4647 | status = __vxge_hw_vpath_stats_access(vpath, | ||
4648 | VXGE_HW_STATS_OP_READ, | ||
4649 | offset >> 3, val64); | ||
4650 | if (status != VXGE_HW_OK) | ||
4651 | goto exit; | ||
4652 | |||
4653 | offset += 8; | ||
4654 | val64++; | ||
4655 | } | ||
4656 | exit: | ||
4657 | return status; | ||
4658 | } | ||
4659 | |||
4660 | /* | ||
4661 | * __vxge_hw_vpath_stats_get - Get the vpath hw statistics. | ||
4662 | */ | ||
4663 | static enum vxge_hw_status | ||
4664 | __vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath, | ||
4665 | struct vxge_hw_vpath_stats_hw_info *hw_stats) | ||
4666 | { | ||
4667 | u64 val64; | ||
4668 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4669 | struct vxge_hw_vpath_reg __iomem *vp_reg; | ||
4670 | |||
4671 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
4672 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
4673 | goto exit; | ||
4674 | } | ||
4675 | vp_reg = vpath->vp_reg; | ||
4676 | |||
4677 | val64 = readq(&vp_reg->vpath_debug_stats0); | ||
4678 | hw_stats->ini_num_mwr_sent = | ||
4679 | (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64); | ||
4680 | |||
4681 | val64 = readq(&vp_reg->vpath_debug_stats1); | ||
4682 | hw_stats->ini_num_mrd_sent = | ||
4683 | (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64); | ||
4684 | |||
4685 | val64 = readq(&vp_reg->vpath_debug_stats2); | ||
4686 | hw_stats->ini_num_cpl_rcvd = | ||
4687 | (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64); | ||
4688 | |||
4689 | val64 = readq(&vp_reg->vpath_debug_stats3); | ||
4690 | hw_stats->ini_num_mwr_byte_sent = | ||
4691 | VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64); | ||
4692 | |||
4693 | val64 = readq(&vp_reg->vpath_debug_stats4); | ||
4694 | hw_stats->ini_num_cpl_byte_rcvd = | ||
4695 | VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64); | ||
4696 | |||
4697 | val64 = readq(&vp_reg->vpath_debug_stats5); | ||
4698 | hw_stats->wrcrdtarb_xoff = | ||
4699 | (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64); | ||
4700 | |||
4701 | val64 = readq(&vp_reg->vpath_debug_stats6); | ||
4702 | hw_stats->rdcrdtarb_xoff = | ||
4703 | (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64); | ||
4704 | |||
4705 | val64 = readq(&vp_reg->vpath_genstats_count01); | ||
4706 | hw_stats->vpath_genstats_count0 = | ||
4707 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0( | ||
4708 | val64); | ||
4709 | |||
4710 | val64 = readq(&vp_reg->vpath_genstats_count01); | ||
4711 | hw_stats->vpath_genstats_count1 = | ||
4712 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1( | ||
4713 | val64); | ||
4714 | |||
4715 | val64 = readq(&vp_reg->vpath_genstats_count23); | ||
4716 | hw_stats->vpath_genstats_count2 = | ||
4717 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2( | ||
4718 | val64); | ||
4719 | |||
4720 | val64 = readq(&vp_reg->vpath_genstats_count01); | ||
4721 | hw_stats->vpath_genstats_count3 = | ||
4722 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3( | ||
4723 | val64); | ||
4724 | |||
4725 | val64 = readq(&vp_reg->vpath_genstats_count4); | ||
4726 | hw_stats->vpath_genstats_count4 = | ||
4727 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4( | ||
4728 | val64); | ||
4729 | |||
4730 | val64 = readq(&vp_reg->vpath_genstats_count5); | ||
4731 | hw_stats->vpath_genstats_count5 = | ||
4732 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5( | ||
4733 | val64); | ||
4734 | |||
4735 | status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats); | ||
4736 | if (status != VXGE_HW_OK) | ||
4737 | goto exit; | ||
4738 | |||
4739 | status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats); | ||
4740 | if (status != VXGE_HW_OK) | ||
4741 | goto exit; | ||
4742 | |||
4743 | VXGE_HW_VPATH_STATS_PIO_READ( | ||
4744 | VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET); | ||
4745 | |||
4746 | hw_stats->prog_event_vnum0 = | ||
4747 | (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64); | ||
4748 | |||
4749 | hw_stats->prog_event_vnum1 = | ||
4750 | (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64); | ||
4751 | |||
4752 | VXGE_HW_VPATH_STATS_PIO_READ( | ||
4753 | VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET); | ||
4754 | |||
4755 | hw_stats->prog_event_vnum2 = | ||
4756 | (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64); | ||
4757 | |||
4758 | hw_stats->prog_event_vnum3 = | ||
4759 | (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64); | ||
4760 | |||
4761 | val64 = readq(&vp_reg->rx_multi_cast_stats); | ||
4762 | hw_stats->rx_multi_cast_frame_discard = | ||
4763 | (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64); | ||
4764 | |||
4765 | val64 = readq(&vp_reg->rx_frm_transferred); | ||
4766 | hw_stats->rx_frm_transferred = | ||
4767 | (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64); | ||
4768 | |||
4769 | val64 = readq(&vp_reg->rxd_returned); | ||
4770 | hw_stats->rxd_returned = | ||
4771 | (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64); | ||
4772 | |||
4773 | val64 = readq(&vp_reg->dbg_stats_rx_mpa); | ||
4774 | hw_stats->rx_mpa_len_fail_frms = | ||
4775 | (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64); | ||
4776 | hw_stats->rx_mpa_mrk_fail_frms = | ||
4777 | (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64); | ||
4778 | hw_stats->rx_mpa_crc_fail_frms = | ||
4779 | (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64); | ||
4780 | |||
4781 | val64 = readq(&vp_reg->dbg_stats_rx_fau); | ||
4782 | hw_stats->rx_permitted_frms = | ||
4783 | (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64); | ||
4784 | hw_stats->rx_vp_reset_discarded_frms = | ||
4785 | (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64); | ||
4786 | hw_stats->rx_wol_frms = | ||
4787 | (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64); | ||
4788 | |||
4789 | val64 = readq(&vp_reg->tx_vp_reset_discarded_frms); | ||
4790 | hw_stats->tx_vp_reset_discarded_frms = | ||
4791 | (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS( | ||
4792 | val64); | ||
4793 | exit: | ||
4794 | return status; | ||
4795 | } | ||
4796 | |||
4797 | |||
4798 | static void vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh, | ||
4799 | unsigned long size) | ||
4800 | { | ||
4801 | gfp_t flags; | ||
4802 | void *vaddr; | ||
4803 | |||
4804 | if (in_interrupt()) | ||
4805 | flags = GFP_ATOMIC | GFP_DMA; | ||
4806 | else | ||
4807 | flags = GFP_KERNEL | GFP_DMA; | ||
4808 | |||
4809 | vaddr = kmalloc((size), flags); | ||
4810 | |||
4811 | vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev); | ||
4812 | } | ||
4813 | |||
4814 | static void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr, | ||
4815 | struct pci_dev **p_dma_acch) | ||
4816 | { | ||
4817 | unsigned long misaligned = *(unsigned long *)p_dma_acch; | ||
4818 | u8 *tmp = (u8 *)vaddr; | ||
4819 | tmp -= misaligned; | ||
4820 | kfree((void *)tmp); | ||
4821 | } | ||
4822 | |||
4823 | /* | ||
4824 | * __vxge_hw_blockpool_create - Create block pool | ||
4825 | */ | ||
4826 | |||
4827 | static enum vxge_hw_status | ||
4828 | __vxge_hw_blockpool_create(struct __vxge_hw_device *hldev, | ||
4829 | struct __vxge_hw_blockpool *blockpool, | ||
4830 | u32 pool_size, | ||
4831 | u32 pool_max) | ||
4832 | { | ||
4833 | u32 i; | ||
4834 | struct __vxge_hw_blockpool_entry *entry = NULL; | ||
4835 | void *memblock; | ||
4836 | dma_addr_t dma_addr; | ||
4837 | struct pci_dev *dma_handle; | ||
4838 | struct pci_dev *acc_handle; | ||
4839 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4840 | |||
4841 | if (blockpool == NULL) { | ||
4842 | status = VXGE_HW_FAIL; | ||
4843 | goto blockpool_create_exit; | ||
4844 | } | ||
4845 | |||
4846 | blockpool->hldev = hldev; | ||
4847 | blockpool->block_size = VXGE_HW_BLOCK_SIZE; | ||
4848 | blockpool->pool_size = 0; | ||
4849 | blockpool->pool_max = pool_max; | ||
4850 | blockpool->req_out = 0; | ||
4851 | |||
4852 | INIT_LIST_HEAD(&blockpool->free_block_list); | ||
4853 | INIT_LIST_HEAD(&blockpool->free_entry_list); | ||
4854 | |||
4855 | for (i = 0; i < pool_size + pool_max; i++) { | ||
4856 | entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry), | ||
4857 | GFP_KERNEL); | ||
4858 | if (entry == NULL) { | ||
4859 | __vxge_hw_blockpool_destroy(blockpool); | ||
4860 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
4861 | goto blockpool_create_exit; | ||
4862 | } | ||
4863 | list_add(&entry->item, &blockpool->free_entry_list); | ||
4864 | } | ||
4865 | |||
4866 | for (i = 0; i < pool_size; i++) { | ||
4867 | |||
4868 | memblock = vxge_os_dma_malloc( | ||
4869 | hldev->pdev, | ||
4870 | VXGE_HW_BLOCK_SIZE, | ||
4871 | &dma_handle, | ||
4872 | &acc_handle); | ||
4873 | |||
4874 | if (memblock == NULL) { | ||
4875 | __vxge_hw_blockpool_destroy(blockpool); | ||
4876 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
4877 | goto blockpool_create_exit; | ||
4878 | } | ||
4879 | |||
4880 | dma_addr = pci_map_single(hldev->pdev, memblock, | ||
4881 | VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
4882 | |||
4883 | if (unlikely(pci_dma_mapping_error(hldev->pdev, | ||
4884 | dma_addr))) { | ||
4885 | |||
4886 | vxge_os_dma_free(hldev->pdev, memblock, &acc_handle); | ||
4887 | __vxge_hw_blockpool_destroy(blockpool); | ||
4888 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
4889 | goto blockpool_create_exit; | ||
4890 | } | ||
4891 | |||
4892 | if (!list_empty(&blockpool->free_entry_list)) | ||
4893 | entry = (struct __vxge_hw_blockpool_entry *) | ||
4894 | list_first_entry(&blockpool->free_entry_list, | ||
4895 | struct __vxge_hw_blockpool_entry, | ||
4896 | item); | ||
4897 | |||
4898 | if (entry == NULL) | ||
4899 | entry = | ||
4900 | kzalloc(sizeof(struct __vxge_hw_blockpool_entry), | ||
4901 | GFP_KERNEL); | ||
4902 | if (entry != NULL) { | ||
4903 | list_del(&entry->item); | ||
4904 | entry->length = VXGE_HW_BLOCK_SIZE; | ||
4905 | entry->memblock = memblock; | ||
4906 | entry->dma_addr = dma_addr; | ||
4907 | entry->acc_handle = acc_handle; | ||
4908 | entry->dma_handle = dma_handle; | ||
4909 | list_add(&entry->item, | ||
4910 | &blockpool->free_block_list); | ||
4911 | blockpool->pool_size++; | ||
4912 | } else { | ||
4913 | __vxge_hw_blockpool_destroy(blockpool); | ||
4914 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
4915 | goto blockpool_create_exit; | ||
4916 | } | ||
4917 | } | ||
4918 | |||
4919 | blockpool_create_exit: | ||
4920 | return status; | ||
4921 | } | ||
4922 | |||
4923 | /* | ||
4924 | * __vxge_hw_blockpool_destroy - Deallocates the block pool | ||
4925 | */ | ||
4926 | |||
4927 | static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool) | ||
4928 | { | ||
4929 | |||
4930 | struct __vxge_hw_device *hldev; | ||
4931 | struct list_head *p, *n; | ||
4932 | u16 ret; | ||
4933 | |||
4934 | if (blockpool == NULL) { | ||
4935 | ret = 1; | ||
4936 | goto exit; | ||
4937 | } | ||
4938 | |||
4939 | hldev = blockpool->hldev; | ||
4940 | |||
4941 | list_for_each_safe(p, n, &blockpool->free_block_list) { | ||
4942 | |||
4943 | pci_unmap_single(hldev->pdev, | ||
4944 | ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, | ||
4945 | ((struct __vxge_hw_blockpool_entry *)p)->length, | ||
4946 | PCI_DMA_BIDIRECTIONAL); | ||
4947 | |||
4948 | vxge_os_dma_free(hldev->pdev, | ||
4949 | ((struct __vxge_hw_blockpool_entry *)p)->memblock, | ||
4950 | &((struct __vxge_hw_blockpool_entry *) p)->acc_handle); | ||
4951 | |||
4952 | list_del( | ||
4953 | &((struct __vxge_hw_blockpool_entry *)p)->item); | ||
4954 | kfree(p); | ||
4955 | blockpool->pool_size--; | ||
4956 | } | ||
4957 | |||
4958 | list_for_each_safe(p, n, &blockpool->free_entry_list) { | ||
4959 | list_del( | ||
4960 | &((struct __vxge_hw_blockpool_entry *)p)->item); | ||
4961 | kfree((void *)p); | ||
4962 | } | ||
4963 | ret = 0; | ||
4964 | exit: | ||
4965 | return; | ||
4966 | } | ||
4967 | |||
4968 | /* | ||
4969 | * __vxge_hw_blockpool_blocks_add - Request additional blocks | ||
4970 | */ | ||
4971 | static | ||
4972 | void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool) | ||
4973 | { | ||
4974 | u32 nreq = 0, i; | ||
4975 | |||
4976 | if ((blockpool->pool_size + blockpool->req_out) < | ||
4977 | VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) { | ||
4978 | nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE; | ||
4979 | blockpool->req_out += nreq; | ||
4980 | } | ||
4981 | |||
4982 | for (i = 0; i < nreq; i++) | ||
4983 | vxge_os_dma_malloc_async( | ||
4984 | ((struct __vxge_hw_device *)blockpool->hldev)->pdev, | ||
4985 | blockpool->hldev, VXGE_HW_BLOCK_SIZE); | ||
4986 | } | ||
4987 | |||
4988 | /* | ||
4989 | * __vxge_hw_blockpool_blocks_remove - Free additional blocks | ||
4990 | */ | ||
4991 | static | ||
4992 | void __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool) | ||
4993 | { | ||
4994 | struct list_head *p, *n; | ||
4995 | |||
4996 | list_for_each_safe(p, n, &blockpool->free_block_list) { | ||
4997 | |||
4998 | if (blockpool->pool_size < blockpool->pool_max) | ||
4999 | break; | ||
5000 | |||
5001 | pci_unmap_single( | ||
5002 | ((struct __vxge_hw_device *)blockpool->hldev)->pdev, | ||
5003 | ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, | ||
5004 | ((struct __vxge_hw_blockpool_entry *)p)->length, | ||
5005 | PCI_DMA_BIDIRECTIONAL); | ||
5006 | |||
5007 | vxge_os_dma_free( | ||
5008 | ((struct __vxge_hw_device *)blockpool->hldev)->pdev, | ||
5009 | ((struct __vxge_hw_blockpool_entry *)p)->memblock, | ||
5010 | &((struct __vxge_hw_blockpool_entry *)p)->acc_handle); | ||
5011 | |||
5012 | list_del(&((struct __vxge_hw_blockpool_entry *)p)->item); | ||
5013 | |||
5014 | list_add(p, &blockpool->free_entry_list); | ||
5015 | |||
5016 | blockpool->pool_size--; | ||
5017 | |||
5018 | } | ||
5019 | } | ||
5020 | |||
5021 | /* | ||
5022 | * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async | ||
5023 | * Adds a block to block pool | ||
5024 | */ | ||
5025 | static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh, | ||
5026 | void *block_addr, | ||
5027 | u32 length, | ||
5028 | struct pci_dev *dma_h, | ||
5029 | struct pci_dev *acc_handle) | ||
5030 | { | ||
5031 | struct __vxge_hw_blockpool *blockpool; | ||
5032 | struct __vxge_hw_blockpool_entry *entry = NULL; | ||
5033 | dma_addr_t dma_addr; | ||
5034 | enum vxge_hw_status status = VXGE_HW_OK; | ||
5035 | u32 req_out; | ||
5036 | |||
5037 | blockpool = &devh->block_pool; | ||
5038 | |||
5039 | if (block_addr == NULL) { | ||
5040 | blockpool->req_out--; | ||
5041 | status = VXGE_HW_FAIL; | ||
5042 | goto exit; | ||
5043 | } | ||
5044 | |||
5045 | dma_addr = pci_map_single(devh->pdev, block_addr, length, | ||
5046 | PCI_DMA_BIDIRECTIONAL); | ||
5047 | |||
5048 | if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) { | ||
5049 | |||
5050 | vxge_os_dma_free(devh->pdev, block_addr, &acc_handle); | ||
5051 | blockpool->req_out--; | ||
5052 | status = VXGE_HW_FAIL; | ||
5053 | goto exit; | ||
5054 | } | ||
5055 | |||
5056 | |||
5057 | if (!list_empty(&blockpool->free_entry_list)) | ||
5058 | entry = (struct __vxge_hw_blockpool_entry *) | ||
5059 | list_first_entry(&blockpool->free_entry_list, | ||
5060 | struct __vxge_hw_blockpool_entry, | ||
5061 | item); | ||
5062 | |||
5063 | if (entry == NULL) | ||
5064 | entry = vmalloc(sizeof(struct __vxge_hw_blockpool_entry)); | ||
5065 | else | ||
5066 | list_del(&entry->item); | ||
5067 | |||
5068 | if (entry != NULL) { | ||
5069 | entry->length = length; | ||
5070 | entry->memblock = block_addr; | ||
5071 | entry->dma_addr = dma_addr; | ||
5072 | entry->acc_handle = acc_handle; | ||
5073 | entry->dma_handle = dma_h; | ||
5074 | list_add(&entry->item, &blockpool->free_block_list); | ||
5075 | blockpool->pool_size++; | ||
5076 | status = VXGE_HW_OK; | ||
5077 | } else | ||
5078 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
5079 | |||
5080 | blockpool->req_out--; | ||
5081 | |||
5082 | req_out = blockpool->req_out; | ||
5083 | exit: | ||
5084 | return; | ||
5085 | } | ||
5086 | |||
5087 | /* | ||
5088 | * __vxge_hw_blockpool_malloc - Allocate a memory block from pool | ||
5089 | * Allocates a block of memory of given size, either from block pool | ||
5090 | * or by calling vxge_os_dma_malloc() | ||
5091 | */ | ||
5092 | static void * | ||
5093 | __vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size, | ||
5094 | struct vxge_hw_mempool_dma *dma_object) | ||
5095 | { | ||
5096 | struct __vxge_hw_blockpool_entry *entry = NULL; | ||
5097 | struct __vxge_hw_blockpool *blockpool; | ||
5098 | void *memblock = NULL; | ||
5099 | enum vxge_hw_status status = VXGE_HW_OK; | ||
5100 | |||
5101 | blockpool = &devh->block_pool; | ||
5102 | |||
5103 | if (size != blockpool->block_size) { | ||
5104 | |||
5105 | memblock = vxge_os_dma_malloc(devh->pdev, size, | ||
5106 | &dma_object->handle, | ||
5107 | &dma_object->acc_handle); | ||
5108 | |||
5109 | if (memblock == NULL) { | ||
5110 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
5111 | goto exit; | ||
5112 | } | ||
5113 | |||
5114 | dma_object->addr = pci_map_single(devh->pdev, memblock, size, | ||
5115 | PCI_DMA_BIDIRECTIONAL); | ||
5116 | |||
5117 | if (unlikely(pci_dma_mapping_error(devh->pdev, | ||
5118 | dma_object->addr))) { | ||
5119 | vxge_os_dma_free(devh->pdev, memblock, | ||
5120 | &dma_object->acc_handle); | ||
5121 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
5122 | goto exit; | ||
5123 | } | ||
5124 | |||
5125 | } else { | ||
5126 | |||
5127 | if (!list_empty(&blockpool->free_block_list)) | ||
5128 | entry = (struct __vxge_hw_blockpool_entry *) | ||
5129 | list_first_entry(&blockpool->free_block_list, | ||
5130 | struct __vxge_hw_blockpool_entry, | ||
5131 | item); | ||
5132 | |||
5133 | if (entry != NULL) { | ||
5134 | list_del(&entry->item); | ||
5135 | dma_object->addr = entry->dma_addr; | ||
5136 | dma_object->handle = entry->dma_handle; | ||
5137 | dma_object->acc_handle = entry->acc_handle; | ||
5138 | memblock = entry->memblock; | ||
5139 | |||
5140 | list_add(&entry->item, | ||
5141 | &blockpool->free_entry_list); | ||
5142 | blockpool->pool_size--; | ||
5143 | } | ||
5144 | |||
5145 | if (memblock != NULL) | ||
5146 | __vxge_hw_blockpool_blocks_add(blockpool); | ||
5147 | } | ||
5148 | exit: | ||
5149 | return memblock; | ||
5150 | } | ||
5151 | |||
5152 | /* | ||
5153 | * __vxge_hw_blockpool_free - Frees the memory allcoated with | ||
5154 | __vxge_hw_blockpool_malloc | ||
5155 | */ | ||
5156 | static void | ||
5157 | __vxge_hw_blockpool_free(struct __vxge_hw_device *devh, | ||
5158 | void *memblock, u32 size, | ||
5159 | struct vxge_hw_mempool_dma *dma_object) | ||
5160 | { | ||
5161 | struct __vxge_hw_blockpool_entry *entry = NULL; | ||
5162 | struct __vxge_hw_blockpool *blockpool; | ||
5163 | enum vxge_hw_status status = VXGE_HW_OK; | ||
5164 | |||
5165 | blockpool = &devh->block_pool; | ||
5166 | |||
5167 | if (size != blockpool->block_size) { | ||
5168 | pci_unmap_single(devh->pdev, dma_object->addr, size, | ||
5169 | PCI_DMA_BIDIRECTIONAL); | ||
5170 | vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle); | ||
5171 | } else { | ||
5172 | |||
5173 | if (!list_empty(&blockpool->free_entry_list)) | ||
5174 | entry = (struct __vxge_hw_blockpool_entry *) | ||
5175 | list_first_entry(&blockpool->free_entry_list, | ||
5176 | struct __vxge_hw_blockpool_entry, | ||
5177 | item); | ||
5178 | |||
5179 | if (entry == NULL) | ||
5180 | entry = vmalloc(sizeof( | ||
5181 | struct __vxge_hw_blockpool_entry)); | ||
5182 | else | ||
5183 | list_del(&entry->item); | ||
5184 | |||
5185 | if (entry != NULL) { | ||
5186 | entry->length = size; | ||
5187 | entry->memblock = memblock; | ||
5188 | entry->dma_addr = dma_object->addr; | ||
5189 | entry->acc_handle = dma_object->acc_handle; | ||
5190 | entry->dma_handle = dma_object->handle; | ||
5191 | list_add(&entry->item, | ||
5192 | &blockpool->free_block_list); | ||
5193 | blockpool->pool_size++; | ||
5194 | status = VXGE_HW_OK; | ||
5195 | } else | ||
5196 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
5197 | |||
5198 | if (status == VXGE_HW_OK) | ||
5199 | __vxge_hw_blockpool_blocks_remove(blockpool); | ||
5200 | } | ||
5201 | } | ||
5202 | |||
5203 | /* | ||
5204 | * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool | ||
5205 | * This function allocates a block from block pool or from the system | ||
5206 | */ | ||
5207 | static struct __vxge_hw_blockpool_entry * | ||
5208 | __vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size) | ||
5209 | { | ||
5210 | struct __vxge_hw_blockpool_entry *entry = NULL; | ||
5211 | struct __vxge_hw_blockpool *blockpool; | ||
5212 | |||
5213 | blockpool = &devh->block_pool; | ||
5214 | |||
5215 | if (size == blockpool->block_size) { | ||
5216 | |||
5217 | if (!list_empty(&blockpool->free_block_list)) | ||
5218 | entry = (struct __vxge_hw_blockpool_entry *) | ||
5219 | list_first_entry(&blockpool->free_block_list, | ||
5220 | struct __vxge_hw_blockpool_entry, | ||
5221 | item); | ||
5222 | |||
5223 | if (entry != NULL) { | ||
5224 | list_del(&entry->item); | ||
5225 | blockpool->pool_size--; | ||
5226 | } | ||
5227 | } | ||
5228 | |||
5229 | if (entry != NULL) | ||
5230 | __vxge_hw_blockpool_blocks_add(blockpool); | ||
5231 | |||
5232 | return entry; | ||
5233 | } | ||
5234 | |||
5235 | /* | ||
5236 | * __vxge_hw_blockpool_block_free - Frees a block from block pool | ||
5237 | * @devh: Hal device | ||
5238 | * @entry: Entry of block to be freed | ||
5239 | * | ||
5240 | * This function frees a block from block pool | ||
5241 | */ | ||
5242 | static void | ||
5243 | __vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh, | ||
5244 | struct __vxge_hw_blockpool_entry *entry) | ||
5245 | { | ||
5246 | struct __vxge_hw_blockpool *blockpool; | ||
5247 | |||
5248 | blockpool = &devh->block_pool; | ||
5249 | |||
5250 | if (entry->length == blockpool->block_size) { | ||
5251 | list_add(&entry->item, &blockpool->free_block_list); | ||
5252 | blockpool->pool_size++; | ||
5253 | } | ||
5254 | |||
5255 | __vxge_hw_blockpool_blocks_remove(blockpool); | ||
5256 | } | ||
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h index 5b2c8313426d..e249e288d160 100644 --- a/drivers/net/vxge/vxge-config.h +++ b/drivers/net/vxge/vxge-config.h | |||
@@ -314,9 +314,9 @@ struct vxge_hw_ring_config { | |||
314 | #define VXGE_HW_RING_DEFAULT 1 | 314 | #define VXGE_HW_RING_DEFAULT 1 |
315 | 315 | ||
316 | u32 ring_blocks; | 316 | u32 ring_blocks; |
317 | #define VXGE_HW_MIN_RING_BLOCKS 1 | 317 | #define VXGE_HW_MIN_RING_BLOCKS 1 |
318 | #define VXGE_HW_MAX_RING_BLOCKS 128 | 318 | #define VXGE_HW_MAX_RING_BLOCKS 128 |
319 | #define VXGE_HW_DEF_RING_BLOCKS 2 | 319 | #define VXGE_HW_DEF_RING_BLOCKS 2 |
320 | 320 | ||
321 | u32 buffer_mode; | 321 | u32 buffer_mode; |
322 | #define VXGE_HW_RING_RXD_BUFFER_MODE_1 1 | 322 | #define VXGE_HW_RING_RXD_BUFFER_MODE_1 1 |
@@ -700,7 +700,7 @@ struct __vxge_hw_virtualpath { | |||
700 | * | 700 | * |
701 | * This structure is used to store the callback information. | 701 | * This structure is used to store the callback information. |
702 | */ | 702 | */ |
703 | struct __vxge_hw_vpath_handle{ | 703 | struct __vxge_hw_vpath_handle { |
704 | struct list_head item; | 704 | struct list_head item; |
705 | struct __vxge_hw_virtualpath *vpath; | 705 | struct __vxge_hw_virtualpath *vpath; |
706 | }; | 706 | }; |
@@ -815,8 +815,8 @@ struct vxge_hw_device_hw_info { | |||
815 | u8 serial_number[VXGE_HW_INFO_LEN]; | 815 | u8 serial_number[VXGE_HW_INFO_LEN]; |
816 | u8 part_number[VXGE_HW_INFO_LEN]; | 816 | u8 part_number[VXGE_HW_INFO_LEN]; |
817 | u8 product_desc[VXGE_HW_INFO_LEN]; | 817 | u8 product_desc[VXGE_HW_INFO_LEN]; |
818 | u8 (mac_addrs)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN]; | 818 | u8 mac_addrs[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN]; |
819 | u8 (mac_addr_masks)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN]; | 819 | u8 mac_addr_masks[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN]; |
820 | }; | 820 | }; |
821 | 821 | ||
822 | /** | 822 | /** |
@@ -863,20 +863,10 @@ struct vxge_hw_device_attr { | |||
863 | loc, \ | 863 | loc, \ |
864 | offset, \ | 864 | offset, \ |
865 | &val64); \ | 865 | &val64); \ |
866 | \ | ||
867 | if (status != VXGE_HW_OK) \ | 866 | if (status != VXGE_HW_OK) \ |
868 | return status; \ | 867 | return status; \ |
869 | } | 868 | } |
870 | 869 | ||
871 | #define VXGE_HW_VPATH_STATS_PIO_READ(offset) { \ | ||
872 | status = __vxge_hw_vpath_stats_access(vpath, \ | ||
873 | VXGE_HW_STATS_OP_READ, \ | ||
874 | offset, \ | ||
875 | &val64); \ | ||
876 | if (status != VXGE_HW_OK) \ | ||
877 | return status; \ | ||
878 | } | ||
879 | |||
880 | /* | 870 | /* |
881 | * struct __vxge_hw_ring - Ring channel. | 871 | * struct __vxge_hw_ring - Ring channel. |
882 | * @channel: Channel "base" of this ring, the common part of all HW | 872 | * @channel: Channel "base" of this ring, the common part of all HW |
@@ -1148,7 +1138,7 @@ struct __vxge_hw_non_offload_db_wrapper { | |||
1148 | * lookup to determine the transmit port. | 1138 | * lookup to determine the transmit port. |
1149 | * 01: Send on physical Port1. | 1139 | * 01: Send on physical Port1. |
1150 | * 10: Send on physical Port0. | 1140 | * 10: Send on physical Port0. |
1151 | * 11: Send on both ports. | 1141 | * 11: Send on both ports. |
1152 | * Bits 18 to 21 - Reserved | 1142 | * Bits 18 to 21 - Reserved |
1153 | * Bits 22 to 23 - Gather_Code. This field is set by the host and | 1143 | * Bits 22 to 23 - Gather_Code. This field is set by the host and |
1154 | * is used to describe how individual buffers comprise a frame. | 1144 | * is used to describe how individual buffers comprise a frame. |
@@ -1927,6 +1917,15 @@ out: | |||
1927 | return vaddr; | 1917 | return vaddr; |
1928 | } | 1918 | } |
1929 | 1919 | ||
1920 | static inline void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr, | ||
1921 | struct pci_dev **p_dma_acch) | ||
1922 | { | ||
1923 | unsigned long misaligned = *(unsigned long *)p_dma_acch; | ||
1924 | u8 *tmp = (u8 *)vaddr; | ||
1925 | tmp -= misaligned; | ||
1926 | kfree((void *)tmp); | ||
1927 | } | ||
1928 | |||
1930 | /* | 1929 | /* |
1931 | * __vxge_hw_mempool_item_priv - will return pointer on per item private space | 1930 | * __vxge_hw_mempool_item_priv - will return pointer on per item private space |
1932 | */ | 1931 | */ |
@@ -1996,7 +1995,6 @@ enum vxge_hw_status vxge_hw_vpath_mtu_set( | |||
1996 | void | 1995 | void |
1997 | vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp); | 1996 | vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp); |
1998 | 1997 | ||
1999 | |||
2000 | #ifndef readq | 1998 | #ifndef readq |
2001 | static inline u64 readq(void __iomem *addr) | 1999 | static inline u64 readq(void __iomem *addr) |
2002 | { | 2000 | { |
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c index 4877b3b8a29e..70c327910f09 100644 --- a/drivers/net/vxge/vxge-main.c +++ b/drivers/net/vxge/vxge-main.c | |||
@@ -84,15 +84,6 @@ module_param_array(bw_percentage, uint, NULL, 0); | |||
84 | 84 | ||
85 | static struct vxge_drv_config *driver_config; | 85 | static struct vxge_drv_config *driver_config; |
86 | 86 | ||
87 | static enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, | ||
88 | struct macInfo *mac); | ||
89 | static enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev, | ||
90 | struct macInfo *mac); | ||
91 | static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac); | ||
92 | static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac); | ||
93 | static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath); | ||
94 | static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath); | ||
95 | |||
96 | static inline int is_vxge_card_up(struct vxgedev *vdev) | 87 | static inline int is_vxge_card_up(struct vxgedev *vdev) |
97 | { | 88 | { |
98 | return test_bit(__VXGE_STATE_CARD_UP, &vdev->state); | 89 | return test_bit(__VXGE_STATE_CARD_UP, &vdev->state); |
@@ -149,8 +140,7 @@ static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev) | |||
149 | * This function is called during interrupt context to notify link up state | 140 | * This function is called during interrupt context to notify link up state |
150 | * change. | 141 | * change. |
151 | */ | 142 | */ |
152 | static void | 143 | static void vxge_callback_link_up(struct __vxge_hw_device *hldev) |
153 | vxge_callback_link_up(struct __vxge_hw_device *hldev) | ||
154 | { | 144 | { |
155 | struct net_device *dev = hldev->ndev; | 145 | struct net_device *dev = hldev->ndev; |
156 | struct vxgedev *vdev = netdev_priv(dev); | 146 | struct vxgedev *vdev = netdev_priv(dev); |
@@ -173,8 +163,7 @@ vxge_callback_link_up(struct __vxge_hw_device *hldev) | |||
173 | * This function is called during interrupt context to notify link down state | 163 | * This function is called during interrupt context to notify link down state |
174 | * change. | 164 | * change. |
175 | */ | 165 | */ |
176 | static void | 166 | static void vxge_callback_link_down(struct __vxge_hw_device *hldev) |
177 | vxge_callback_link_down(struct __vxge_hw_device *hldev) | ||
178 | { | 167 | { |
179 | struct net_device *dev = hldev->ndev; | 168 | struct net_device *dev = hldev->ndev; |
180 | struct vxgedev *vdev = netdev_priv(dev); | 169 | struct vxgedev *vdev = netdev_priv(dev); |
@@ -196,7 +185,7 @@ vxge_callback_link_down(struct __vxge_hw_device *hldev) | |||
196 | * | 185 | * |
197 | * Allocate SKB. | 186 | * Allocate SKB. |
198 | */ | 187 | */ |
199 | static struct sk_buff* | 188 | static struct sk_buff * |
200 | vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size) | 189 | vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size) |
201 | { | 190 | { |
202 | struct net_device *dev; | 191 | struct net_device *dev; |
@@ -414,7 +403,6 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr, | |||
414 | 403 | ||
415 | prefetch((char *)skb + L1_CACHE_BYTES); | 404 | prefetch((char *)skb + L1_CACHE_BYTES); |
416 | if (unlikely(t_code)) { | 405 | if (unlikely(t_code)) { |
417 | |||
418 | if (vxge_hw_ring_handle_tcode(ringh, dtr, t_code) != | 406 | if (vxge_hw_ring_handle_tcode(ringh, dtr, t_code) != |
419 | VXGE_HW_OK) { | 407 | VXGE_HW_OK) { |
420 | 408 | ||
@@ -437,9 +425,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr, | |||
437 | } | 425 | } |
438 | 426 | ||
439 | if (pkt_length > VXGE_LL_RX_COPY_THRESHOLD) { | 427 | if (pkt_length > VXGE_LL_RX_COPY_THRESHOLD) { |
440 | |||
441 | if (vxge_rx_alloc(dtr, ring, data_size) != NULL) { | 428 | if (vxge_rx_alloc(dtr, ring, data_size) != NULL) { |
442 | |||
443 | if (!vxge_rx_map(dtr, ring)) { | 429 | if (!vxge_rx_map(dtr, ring)) { |
444 | skb_put(skb, pkt_length); | 430 | skb_put(skb, pkt_length); |
445 | 431 | ||
@@ -678,6 +664,65 @@ static enum vxge_hw_status vxge_search_mac_addr_in_list( | |||
678 | return FALSE; | 664 | return FALSE; |
679 | } | 665 | } |
680 | 666 | ||
667 | static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac) | ||
668 | { | ||
669 | struct vxge_mac_addrs *new_mac_entry; | ||
670 | u8 *mac_address = NULL; | ||
671 | |||
672 | if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT) | ||
673 | return TRUE; | ||
674 | |||
675 | new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC); | ||
676 | if (!new_mac_entry) { | ||
677 | vxge_debug_mem(VXGE_ERR, | ||
678 | "%s: memory allocation failed", | ||
679 | VXGE_DRIVER_NAME); | ||
680 | return FALSE; | ||
681 | } | ||
682 | |||
683 | list_add(&new_mac_entry->item, &vpath->mac_addr_list); | ||
684 | |||
685 | /* Copy the new mac address to the list */ | ||
686 | mac_address = (u8 *)&new_mac_entry->macaddr; | ||
687 | memcpy(mac_address, mac->macaddr, ETH_ALEN); | ||
688 | |||
689 | new_mac_entry->state = mac->state; | ||
690 | vpath->mac_addr_cnt++; | ||
691 | |||
692 | /* Is this a multicast address */ | ||
693 | if (0x01 & mac->macaddr[0]) | ||
694 | vpath->mcast_addr_cnt++; | ||
695 | |||
696 | return TRUE; | ||
697 | } | ||
698 | |||
699 | /* Add a mac address to DA table */ | ||
700 | static enum vxge_hw_status | ||
701 | vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac) | ||
702 | { | ||
703 | enum vxge_hw_status status = VXGE_HW_OK; | ||
704 | struct vxge_vpath *vpath; | ||
705 | enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode; | ||
706 | |||
707 | if (0x01 & mac->macaddr[0]) /* multicast address */ | ||
708 | duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE; | ||
709 | else | ||
710 | duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE; | ||
711 | |||
712 | vpath = &vdev->vpaths[mac->vpath_no]; | ||
713 | status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr, | ||
714 | mac->macmask, duplicate_mode); | ||
715 | if (status != VXGE_HW_OK) { | ||
716 | vxge_debug_init(VXGE_ERR, | ||
717 | "DA config add entry failed for vpath:%d", | ||
718 | vpath->device_id); | ||
719 | } else | ||
720 | if (FALSE == vxge_mac_list_add(vpath, mac)) | ||
721 | status = -EPERM; | ||
722 | |||
723 | return status; | ||
724 | } | ||
725 | |||
681 | static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header) | 726 | static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header) |
682 | { | 727 | { |
683 | struct macInfo mac_info; | 728 | struct macInfo mac_info; |
@@ -1023,6 +1068,50 @@ vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata) | |||
1023 | "%s:%d Exiting...", __func__, __LINE__); | 1068 | "%s:%d Exiting...", __func__, __LINE__); |
1024 | } | 1069 | } |
1025 | 1070 | ||
1071 | static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac) | ||
1072 | { | ||
1073 | struct list_head *entry, *next; | ||
1074 | u64 del_mac = 0; | ||
1075 | u8 *mac_address = (u8 *) (&del_mac); | ||
1076 | |||
1077 | /* Copy the mac address to delete from the list */ | ||
1078 | memcpy(mac_address, mac->macaddr, ETH_ALEN); | ||
1079 | |||
1080 | list_for_each_safe(entry, next, &vpath->mac_addr_list) { | ||
1081 | if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) { | ||
1082 | list_del(entry); | ||
1083 | kfree((struct vxge_mac_addrs *)entry); | ||
1084 | vpath->mac_addr_cnt--; | ||
1085 | |||
1086 | /* Is this a multicast address */ | ||
1087 | if (0x01 & mac->macaddr[0]) | ||
1088 | vpath->mcast_addr_cnt--; | ||
1089 | return TRUE; | ||
1090 | } | ||
1091 | } | ||
1092 | |||
1093 | return FALSE; | ||
1094 | } | ||
1095 | |||
1096 | /* delete a mac address from DA table */ | ||
1097 | static enum vxge_hw_status | ||
1098 | vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac) | ||
1099 | { | ||
1100 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1101 | struct vxge_vpath *vpath; | ||
1102 | |||
1103 | vpath = &vdev->vpaths[mac->vpath_no]; | ||
1104 | status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr, | ||
1105 | mac->macmask); | ||
1106 | if (status != VXGE_HW_OK) { | ||
1107 | vxge_debug_init(VXGE_ERR, | ||
1108 | "DA config delete entry failed for vpath:%d", | ||
1109 | vpath->device_id); | ||
1110 | } else | ||
1111 | vxge_mac_list_del(vpath, mac); | ||
1112 | return status; | ||
1113 | } | ||
1114 | |||
1026 | /** | 1115 | /** |
1027 | * vxge_set_multicast | 1116 | * vxge_set_multicast |
1028 | * @dev: pointer to the device structure | 1117 | * @dev: pointer to the device structure |
@@ -1333,6 +1422,95 @@ static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id) | |||
1333 | } | 1422 | } |
1334 | } | 1423 | } |
1335 | 1424 | ||
1425 | /* list all mac addresses from DA table */ | ||
1426 | static enum vxge_hw_status | ||
1427 | vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath, struct macInfo *mac) | ||
1428 | { | ||
1429 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1430 | unsigned char macmask[ETH_ALEN]; | ||
1431 | unsigned char macaddr[ETH_ALEN]; | ||
1432 | |||
1433 | status = vxge_hw_vpath_mac_addr_get(vpath->handle, | ||
1434 | macaddr, macmask); | ||
1435 | if (status != VXGE_HW_OK) { | ||
1436 | vxge_debug_init(VXGE_ERR, | ||
1437 | "DA config list entry failed for vpath:%d", | ||
1438 | vpath->device_id); | ||
1439 | return status; | ||
1440 | } | ||
1441 | |||
1442 | while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) { | ||
1443 | status = vxge_hw_vpath_mac_addr_get_next(vpath->handle, | ||
1444 | macaddr, macmask); | ||
1445 | if (status != VXGE_HW_OK) | ||
1446 | break; | ||
1447 | } | ||
1448 | |||
1449 | return status; | ||
1450 | } | ||
1451 | |||
1452 | /* Store all mac addresses from the list to the DA table */ | ||
1453 | static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath) | ||
1454 | { | ||
1455 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1456 | struct macInfo mac_info; | ||
1457 | u8 *mac_address = NULL; | ||
1458 | struct list_head *entry, *next; | ||
1459 | |||
1460 | memset(&mac_info, 0, sizeof(struct macInfo)); | ||
1461 | |||
1462 | if (vpath->is_open) { | ||
1463 | list_for_each_safe(entry, next, &vpath->mac_addr_list) { | ||
1464 | mac_address = | ||
1465 | (u8 *)& | ||
1466 | ((struct vxge_mac_addrs *)entry)->macaddr; | ||
1467 | memcpy(mac_info.macaddr, mac_address, ETH_ALEN); | ||
1468 | ((struct vxge_mac_addrs *)entry)->state = | ||
1469 | VXGE_LL_MAC_ADDR_IN_DA_TABLE; | ||
1470 | /* does this mac address already exist in da table? */ | ||
1471 | status = vxge_search_mac_addr_in_da_table(vpath, | ||
1472 | &mac_info); | ||
1473 | if (status != VXGE_HW_OK) { | ||
1474 | /* Add this mac address to the DA table */ | ||
1475 | status = vxge_hw_vpath_mac_addr_add( | ||
1476 | vpath->handle, mac_info.macaddr, | ||
1477 | mac_info.macmask, | ||
1478 | VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE); | ||
1479 | if (status != VXGE_HW_OK) { | ||
1480 | vxge_debug_init(VXGE_ERR, | ||
1481 | "DA add entry failed for vpath:%d", | ||
1482 | vpath->device_id); | ||
1483 | ((struct vxge_mac_addrs *)entry)->state | ||
1484 | = VXGE_LL_MAC_ADDR_IN_LIST; | ||
1485 | } | ||
1486 | } | ||
1487 | } | ||
1488 | } | ||
1489 | |||
1490 | return status; | ||
1491 | } | ||
1492 | |||
1493 | /* Store all vlan ids from the list to the vid table */ | ||
1494 | static enum vxge_hw_status | ||
1495 | vxge_restore_vpath_vid_table(struct vxge_vpath *vpath) | ||
1496 | { | ||
1497 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1498 | struct vxgedev *vdev = vpath->vdev; | ||
1499 | u16 vid; | ||
1500 | |||
1501 | if (vdev->vlgrp && vpath->is_open) { | ||
1502 | |||
1503 | for (vid = 0; vid < VLAN_N_VID; vid++) { | ||
1504 | if (!vlan_group_get_device(vdev->vlgrp, vid)) | ||
1505 | continue; | ||
1506 | /* Add these vlan to the vid table */ | ||
1507 | status = vxge_hw_vpath_vid_add(vpath->handle, vid); | ||
1508 | } | ||
1509 | } | ||
1510 | |||
1511 | return status; | ||
1512 | } | ||
1513 | |||
1336 | /* | 1514 | /* |
1337 | * vxge_reset_vpath | 1515 | * vxge_reset_vpath |
1338 | * @vdev: pointer to vdev | 1516 | * @vdev: pointer to vdev |
@@ -1745,7 +1923,6 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev) | |||
1745 | vdev->config.rth_algorithm, | 1923 | vdev->config.rth_algorithm, |
1746 | &hash_types, | 1924 | &hash_types, |
1747 | vdev->config.rth_bkt_sz); | 1925 | vdev->config.rth_bkt_sz); |
1748 | |||
1749 | if (status != VXGE_HW_OK) { | 1926 | if (status != VXGE_HW_OK) { |
1750 | vxge_debug_init(VXGE_ERR, | 1927 | vxge_debug_init(VXGE_ERR, |
1751 | "RTH configuration failed for vpath:%d", | 1928 | "RTH configuration failed for vpath:%d", |
@@ -1757,199 +1934,6 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev) | |||
1757 | return status; | 1934 | return status; |
1758 | } | 1935 | } |
1759 | 1936 | ||
1760 | static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac) | ||
1761 | { | ||
1762 | struct vxge_mac_addrs *new_mac_entry; | ||
1763 | u8 *mac_address = NULL; | ||
1764 | |||
1765 | if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT) | ||
1766 | return TRUE; | ||
1767 | |||
1768 | new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC); | ||
1769 | if (!new_mac_entry) { | ||
1770 | vxge_debug_mem(VXGE_ERR, | ||
1771 | "%s: memory allocation failed", | ||
1772 | VXGE_DRIVER_NAME); | ||
1773 | return FALSE; | ||
1774 | } | ||
1775 | |||
1776 | list_add(&new_mac_entry->item, &vpath->mac_addr_list); | ||
1777 | |||
1778 | /* Copy the new mac address to the list */ | ||
1779 | mac_address = (u8 *)&new_mac_entry->macaddr; | ||
1780 | memcpy(mac_address, mac->macaddr, ETH_ALEN); | ||
1781 | |||
1782 | new_mac_entry->state = mac->state; | ||
1783 | vpath->mac_addr_cnt++; | ||
1784 | |||
1785 | /* Is this a multicast address */ | ||
1786 | if (0x01 & mac->macaddr[0]) | ||
1787 | vpath->mcast_addr_cnt++; | ||
1788 | |||
1789 | return TRUE; | ||
1790 | } | ||
1791 | |||
1792 | /* Add a mac address to DA table */ | ||
1793 | static enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, | ||
1794 | struct macInfo *mac) | ||
1795 | { | ||
1796 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1797 | struct vxge_vpath *vpath; | ||
1798 | enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode; | ||
1799 | |||
1800 | if (0x01 & mac->macaddr[0]) /* multicast address */ | ||
1801 | duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE; | ||
1802 | else | ||
1803 | duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE; | ||
1804 | |||
1805 | vpath = &vdev->vpaths[mac->vpath_no]; | ||
1806 | status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr, | ||
1807 | mac->macmask, duplicate_mode); | ||
1808 | if (status != VXGE_HW_OK) { | ||
1809 | vxge_debug_init(VXGE_ERR, | ||
1810 | "DA config add entry failed for vpath:%d", | ||
1811 | vpath->device_id); | ||
1812 | } else | ||
1813 | if (FALSE == vxge_mac_list_add(vpath, mac)) | ||
1814 | status = -EPERM; | ||
1815 | |||
1816 | return status; | ||
1817 | } | ||
1818 | |||
1819 | static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac) | ||
1820 | { | ||
1821 | struct list_head *entry, *next; | ||
1822 | u64 del_mac = 0; | ||
1823 | u8 *mac_address = (u8 *)(&del_mac); | ||
1824 | |||
1825 | /* Copy the mac address to delete from the list */ | ||
1826 | memcpy(mac_address, mac->macaddr, ETH_ALEN); | ||
1827 | |||
1828 | list_for_each_safe(entry, next, &vpath->mac_addr_list) { | ||
1829 | if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) { | ||
1830 | list_del(entry); | ||
1831 | kfree((struct vxge_mac_addrs *)entry); | ||
1832 | vpath->mac_addr_cnt--; | ||
1833 | |||
1834 | /* Is this a multicast address */ | ||
1835 | if (0x01 & mac->macaddr[0]) | ||
1836 | vpath->mcast_addr_cnt--; | ||
1837 | return TRUE; | ||
1838 | } | ||
1839 | } | ||
1840 | |||
1841 | return FALSE; | ||
1842 | } | ||
1843 | /* delete a mac address from DA table */ | ||
1844 | static enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev, | ||
1845 | struct macInfo *mac) | ||
1846 | { | ||
1847 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1848 | struct vxge_vpath *vpath; | ||
1849 | |||
1850 | vpath = &vdev->vpaths[mac->vpath_no]; | ||
1851 | status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr, | ||
1852 | mac->macmask); | ||
1853 | if (status != VXGE_HW_OK) { | ||
1854 | vxge_debug_init(VXGE_ERR, | ||
1855 | "DA config delete entry failed for vpath:%d", | ||
1856 | vpath->device_id); | ||
1857 | } else | ||
1858 | vxge_mac_list_del(vpath, mac); | ||
1859 | return status; | ||
1860 | } | ||
1861 | |||
1862 | /* list all mac addresses from DA table */ | ||
1863 | enum vxge_hw_status | ||
1864 | static vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath, | ||
1865 | struct macInfo *mac) | ||
1866 | { | ||
1867 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1868 | unsigned char macmask[ETH_ALEN]; | ||
1869 | unsigned char macaddr[ETH_ALEN]; | ||
1870 | |||
1871 | status = vxge_hw_vpath_mac_addr_get(vpath->handle, | ||
1872 | macaddr, macmask); | ||
1873 | if (status != VXGE_HW_OK) { | ||
1874 | vxge_debug_init(VXGE_ERR, | ||
1875 | "DA config list entry failed for vpath:%d", | ||
1876 | vpath->device_id); | ||
1877 | return status; | ||
1878 | } | ||
1879 | |||
1880 | while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) { | ||
1881 | |||
1882 | status = vxge_hw_vpath_mac_addr_get_next(vpath->handle, | ||
1883 | macaddr, macmask); | ||
1884 | if (status != VXGE_HW_OK) | ||
1885 | break; | ||
1886 | } | ||
1887 | |||
1888 | return status; | ||
1889 | } | ||
1890 | |||
1891 | /* Store all vlan ids from the list to the vid table */ | ||
1892 | static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath) | ||
1893 | { | ||
1894 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1895 | struct vxgedev *vdev = vpath->vdev; | ||
1896 | u16 vid; | ||
1897 | |||
1898 | if (vdev->vlgrp && vpath->is_open) { | ||
1899 | |||
1900 | for (vid = 0; vid < VLAN_N_VID; vid++) { | ||
1901 | if (!vlan_group_get_device(vdev->vlgrp, vid)) | ||
1902 | continue; | ||
1903 | /* Add these vlan to the vid table */ | ||
1904 | status = vxge_hw_vpath_vid_add(vpath->handle, vid); | ||
1905 | } | ||
1906 | } | ||
1907 | |||
1908 | return status; | ||
1909 | } | ||
1910 | |||
1911 | /* Store all mac addresses from the list to the DA table */ | ||
1912 | static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath) | ||
1913 | { | ||
1914 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1915 | struct macInfo mac_info; | ||
1916 | u8 *mac_address = NULL; | ||
1917 | struct list_head *entry, *next; | ||
1918 | |||
1919 | memset(&mac_info, 0, sizeof(struct macInfo)); | ||
1920 | |||
1921 | if (vpath->is_open) { | ||
1922 | |||
1923 | list_for_each_safe(entry, next, &vpath->mac_addr_list) { | ||
1924 | mac_address = | ||
1925 | (u8 *)& | ||
1926 | ((struct vxge_mac_addrs *)entry)->macaddr; | ||
1927 | memcpy(mac_info.macaddr, mac_address, ETH_ALEN); | ||
1928 | ((struct vxge_mac_addrs *)entry)->state = | ||
1929 | VXGE_LL_MAC_ADDR_IN_DA_TABLE; | ||
1930 | /* does this mac address already exist in da table? */ | ||
1931 | status = vxge_search_mac_addr_in_da_table(vpath, | ||
1932 | &mac_info); | ||
1933 | if (status != VXGE_HW_OK) { | ||
1934 | /* Add this mac address to the DA table */ | ||
1935 | status = vxge_hw_vpath_mac_addr_add( | ||
1936 | vpath->handle, mac_info.macaddr, | ||
1937 | mac_info.macmask, | ||
1938 | VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE); | ||
1939 | if (status != VXGE_HW_OK) { | ||
1940 | vxge_debug_init(VXGE_ERR, | ||
1941 | "DA add entry failed for vpath:%d", | ||
1942 | vpath->device_id); | ||
1943 | ((struct vxge_mac_addrs *)entry)->state | ||
1944 | = VXGE_LL_MAC_ADDR_IN_LIST; | ||
1945 | } | ||
1946 | } | ||
1947 | } | ||
1948 | } | ||
1949 | |||
1950 | return status; | ||
1951 | } | ||
1952 | |||
1953 | /* reset vpaths */ | 1937 | /* reset vpaths */ |
1954 | enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev) | 1938 | enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev) |
1955 | { | 1939 | { |
@@ -2042,6 +2026,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev) | |||
2042 | 2026 | ||
2043 | vpath->ring.ndev = vdev->ndev; | 2027 | vpath->ring.ndev = vdev->ndev; |
2044 | vpath->ring.pdev = vdev->pdev; | 2028 | vpath->ring.pdev = vdev->pdev; |
2029 | |||
2045 | status = vxge_hw_vpath_open(vdev->devh, &attr, &vpath->handle); | 2030 | status = vxge_hw_vpath_open(vdev->devh, &attr, &vpath->handle); |
2046 | if (status == VXGE_HW_OK) { | 2031 | if (status == VXGE_HW_OK) { |
2047 | vpath->fifo.handle = | 2032 | vpath->fifo.handle = |
@@ -2070,11 +2055,10 @@ static int vxge_open_vpaths(struct vxgedev *vdev) | |||
2070 | vdev->stats.vpaths_open++; | 2055 | vdev->stats.vpaths_open++; |
2071 | } else { | 2056 | } else { |
2072 | vdev->stats.vpath_open_fail++; | 2057 | vdev->stats.vpath_open_fail++; |
2073 | vxge_debug_init(VXGE_ERR, | 2058 | vxge_debug_init(VXGE_ERR, "%s: vpath: %d failed to " |
2074 | "%s: vpath: %d failed to open " | 2059 | "open with status: %d", |
2075 | "with status: %d", | 2060 | vdev->ndev->name, vpath->device_id, |
2076 | vdev->ndev->name, vpath->device_id, | 2061 | status); |
2077 | status); | ||
2078 | vxge_close_vpaths(vdev, 0); | 2062 | vxge_close_vpaths(vdev, 0); |
2079 | return -EPERM; | 2063 | return -EPERM; |
2080 | } | 2064 | } |
@@ -2082,6 +2066,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev) | |||
2082 | vp_id = vpath->handle->vpath->vp_id; | 2066 | vp_id = vpath->handle->vpath->vp_id; |
2083 | vdev->vpaths_deployed |= vxge_mBIT(vp_id); | 2067 | vdev->vpaths_deployed |= vxge_mBIT(vp_id); |
2084 | } | 2068 | } |
2069 | |||
2085 | return VXGE_HW_OK; | 2070 | return VXGE_HW_OK; |
2086 | } | 2071 | } |
2087 | 2072 | ||
@@ -2114,8 +2099,7 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id) | |||
2114 | if (unlikely(!is_vxge_card_up(vdev))) | 2099 | if (unlikely(!is_vxge_card_up(vdev))) |
2115 | return IRQ_HANDLED; | 2100 | return IRQ_HANDLED; |
2116 | 2101 | ||
2117 | status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode, | 2102 | status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode, &reason); |
2118 | &reason); | ||
2119 | if (status == VXGE_HW_OK) { | 2103 | if (status == VXGE_HW_OK) { |
2120 | vxge_hw_device_mask_all(hldev); | 2104 | vxge_hw_device_mask_all(hldev); |
2121 | 2105 | ||
@@ -2568,8 +2552,7 @@ static void vxge_poll_vp_lockup(unsigned long data) | |||
2568 | * Return value: '0' on success and an appropriate (-)ve integer as | 2552 | * Return value: '0' on success and an appropriate (-)ve integer as |
2569 | * defined in errno.h file on failure. | 2553 | * defined in errno.h file on failure. |
2570 | */ | 2554 | */ |
2571 | static int | 2555 | static int vxge_open(struct net_device *dev) |
2572 | vxge_open(struct net_device *dev) | ||
2573 | { | 2556 | { |
2574 | enum vxge_hw_status status; | 2557 | enum vxge_hw_status status; |
2575 | struct vxgedev *vdev; | 2558 | struct vxgedev *vdev; |
@@ -2578,6 +2561,7 @@ vxge_open(struct net_device *dev) | |||
2578 | int ret = 0; | 2561 | int ret = 0; |
2579 | int i; | 2562 | int i; |
2580 | u64 val64, function_mode; | 2563 | u64 val64, function_mode; |
2564 | |||
2581 | vxge_debug_entryexit(VXGE_TRACE, | 2565 | vxge_debug_entryexit(VXGE_TRACE, |
2582 | "%s: %s:%d", dev->name, __func__, __LINE__); | 2566 | "%s: %s:%d", dev->name, __func__, __LINE__); |
2583 | 2567 | ||
@@ -2830,7 +2814,6 @@ static int do_vxge_close(struct net_device *dev, int do_io) | |||
2830 | struct vxge_hw_mrpcim_reg, | 2814 | struct vxge_hw_mrpcim_reg, |
2831 | rts_mgr_cbasin_cfg), | 2815 | rts_mgr_cbasin_cfg), |
2832 | &val64); | 2816 | &val64); |
2833 | |||
2834 | if (status == VXGE_HW_OK) { | 2817 | if (status == VXGE_HW_OK) { |
2835 | val64 &= ~vpath_vector; | 2818 | val64 &= ~vpath_vector; |
2836 | status = vxge_hw_mgmt_reg_write(vdev->devh, | 2819 | status = vxge_hw_mgmt_reg_write(vdev->devh, |
@@ -2914,8 +2897,7 @@ static int do_vxge_close(struct net_device *dev, int do_io) | |||
2914 | * Return value: '0' on success and an appropriate (-)ve integer as | 2897 | * Return value: '0' on success and an appropriate (-)ve integer as |
2915 | * defined in errno.h file on failure. | 2898 | * defined in errno.h file on failure. |
2916 | */ | 2899 | */ |
2917 | static int | 2900 | static int vxge_close(struct net_device *dev) |
2918 | vxge_close(struct net_device *dev) | ||
2919 | { | 2901 | { |
2920 | do_vxge_close(dev, 1); | 2902 | do_vxge_close(dev, 1); |
2921 | return 0; | 2903 | return 0; |
@@ -2989,9 +2971,7 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats) | |||
2989 | net_stats->rx_bytes += vdev->vpaths[k].ring.stats.rx_bytes; | 2971 | net_stats->rx_bytes += vdev->vpaths[k].ring.stats.rx_bytes; |
2990 | net_stats->rx_errors += vdev->vpaths[k].ring.stats.rx_errors; | 2972 | net_stats->rx_errors += vdev->vpaths[k].ring.stats.rx_errors; |
2991 | net_stats->multicast += vdev->vpaths[k].ring.stats.rx_mcast; | 2973 | net_stats->multicast += vdev->vpaths[k].ring.stats.rx_mcast; |
2992 | net_stats->rx_dropped += | 2974 | net_stats->rx_dropped += vdev->vpaths[k].ring.stats.rx_dropped; |
2993 | vdev->vpaths[k].ring.stats.rx_dropped; | ||
2994 | |||
2995 | net_stats->tx_packets += vdev->vpaths[k].fifo.stats.tx_frms; | 2975 | net_stats->tx_packets += vdev->vpaths[k].fifo.stats.tx_frms; |
2996 | net_stats->tx_bytes += vdev->vpaths[k].fifo.stats.tx_bytes; | 2976 | net_stats->tx_bytes += vdev->vpaths[k].fifo.stats.tx_bytes; |
2997 | net_stats->tx_errors += vdev->vpaths[k].fifo.stats.tx_errors; | 2977 | net_stats->tx_errors += vdev->vpaths[k].fifo.stats.tx_errors; |
@@ -3264,15 +3244,12 @@ static const struct net_device_ops vxge_netdev_ops = { | |||
3264 | .ndo_start_xmit = vxge_xmit, | 3244 | .ndo_start_xmit = vxge_xmit, |
3265 | .ndo_validate_addr = eth_validate_addr, | 3245 | .ndo_validate_addr = eth_validate_addr, |
3266 | .ndo_set_multicast_list = vxge_set_multicast, | 3246 | .ndo_set_multicast_list = vxge_set_multicast, |
3267 | |||
3268 | .ndo_do_ioctl = vxge_ioctl, | 3247 | .ndo_do_ioctl = vxge_ioctl, |
3269 | |||
3270 | .ndo_set_mac_address = vxge_set_mac_addr, | 3248 | .ndo_set_mac_address = vxge_set_mac_addr, |
3271 | .ndo_change_mtu = vxge_change_mtu, | 3249 | .ndo_change_mtu = vxge_change_mtu, |
3272 | .ndo_vlan_rx_register = vxge_vlan_rx_register, | 3250 | .ndo_vlan_rx_register = vxge_vlan_rx_register, |
3273 | .ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid, | 3251 | .ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid, |
3274 | .ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid, | 3252 | .ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid, |
3275 | |||
3276 | .ndo_tx_timeout = vxge_tx_watchdog, | 3253 | .ndo_tx_timeout = vxge_tx_watchdog, |
3277 | #ifdef CONFIG_NET_POLL_CONTROLLER | 3254 | #ifdef CONFIG_NET_POLL_CONTROLLER |
3278 | .ndo_poll_controller = vxge_netpoll, | 3255 | .ndo_poll_controller = vxge_netpoll, |
@@ -3698,9 +3675,9 @@ static int __devinit vxge_config_vpaths( | |||
3698 | device_config->vp_config[i].tti.timer_ac_en = | 3675 | device_config->vp_config[i].tti.timer_ac_en = |
3699 | VXGE_HW_TIM_TIMER_AC_ENABLE; | 3676 | VXGE_HW_TIM_TIMER_AC_ENABLE; |
3700 | 3677 | ||
3701 | /* For msi-x with napi (each vector | 3678 | /* For msi-x with napi (each vector has a handler of its own) - |
3702 | has a handler of its own) - | 3679 | * Set CI to OFF for all vpaths |
3703 | Set CI to OFF for all vpaths */ | 3680 | */ |
3704 | device_config->vp_config[i].tti.timer_ci_en = | 3681 | device_config->vp_config[i].tti.timer_ci_en = |
3705 | VXGE_HW_TIM_TIMER_CI_DISABLE; | 3682 | VXGE_HW_TIM_TIMER_CI_DISABLE; |
3706 | 3683 | ||
@@ -3730,10 +3707,13 @@ static int __devinit vxge_config_vpaths( | |||
3730 | 3707 | ||
3731 | device_config->vp_config[i].ring.ring_blocks = | 3708 | device_config->vp_config[i].ring.ring_blocks = |
3732 | VXGE_HW_DEF_RING_BLOCKS; | 3709 | VXGE_HW_DEF_RING_BLOCKS; |
3710 | |||
3733 | device_config->vp_config[i].ring.buffer_mode = | 3711 | device_config->vp_config[i].ring.buffer_mode = |
3734 | VXGE_HW_RING_RXD_BUFFER_MODE_1; | 3712 | VXGE_HW_RING_RXD_BUFFER_MODE_1; |
3713 | |||
3735 | device_config->vp_config[i].ring.rxds_limit = | 3714 | device_config->vp_config[i].ring.rxds_limit = |
3736 | VXGE_HW_DEF_RING_RXDS_LIMIT; | 3715 | VXGE_HW_DEF_RING_RXDS_LIMIT; |
3716 | |||
3737 | device_config->vp_config[i].ring.scatter_mode = | 3717 | device_config->vp_config[i].ring.scatter_mode = |
3738 | VXGE_HW_RING_SCATTER_MODE_A; | 3718 | VXGE_HW_RING_SCATTER_MODE_A; |
3739 | 3719 | ||
@@ -3813,6 +3793,7 @@ static void __devinit vxge_device_config_init( | |||
3813 | device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX; | 3793 | device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX; |
3814 | break; | 3794 | break; |
3815 | } | 3795 | } |
3796 | |||
3816 | /* Timer period between device poll */ | 3797 | /* Timer period between device poll */ |
3817 | device_config->device_poll_millis = VXGE_TIMER_DELAY; | 3798 | device_config->device_poll_millis = VXGE_TIMER_DELAY; |
3818 | 3799 | ||
@@ -3824,16 +3805,10 @@ static void __devinit vxge_device_config_init( | |||
3824 | 3805 | ||
3825 | vxge_debug_ll_config(VXGE_TRACE, "%s : Device Config Params ", | 3806 | vxge_debug_ll_config(VXGE_TRACE, "%s : Device Config Params ", |
3826 | __func__); | 3807 | __func__); |
3827 | vxge_debug_ll_config(VXGE_TRACE, "dma_blockpool_initial : %d", | ||
3828 | device_config->dma_blockpool_initial); | ||
3829 | vxge_debug_ll_config(VXGE_TRACE, "dma_blockpool_max : %d", | ||
3830 | device_config->dma_blockpool_max); | ||
3831 | vxge_debug_ll_config(VXGE_TRACE, "intr_mode : %d", | 3808 | vxge_debug_ll_config(VXGE_TRACE, "intr_mode : %d", |
3832 | device_config->intr_mode); | 3809 | device_config->intr_mode); |
3833 | vxge_debug_ll_config(VXGE_TRACE, "device_poll_millis : %d", | 3810 | vxge_debug_ll_config(VXGE_TRACE, "device_poll_millis : %d", |
3834 | device_config->device_poll_millis); | 3811 | device_config->device_poll_millis); |
3835 | vxge_debug_ll_config(VXGE_TRACE, "rts_mac_en : %d", | ||
3836 | device_config->rts_mac_en); | ||
3837 | vxge_debug_ll_config(VXGE_TRACE, "rth_en : %d", | 3812 | vxge_debug_ll_config(VXGE_TRACE, "rth_en : %d", |
3838 | device_config->rth_en); | 3813 | device_config->rth_en); |
3839 | vxge_debug_ll_config(VXGE_TRACE, "rth_it_type : %d", | 3814 | vxge_debug_ll_config(VXGE_TRACE, "rth_it_type : %d", |
@@ -4013,7 +3988,7 @@ static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev) | |||
4013 | } | 3988 | } |
4014 | 3989 | ||
4015 | pci_set_master(pdev); | 3990 | pci_set_master(pdev); |
4016 | vxge_reset(vdev); | 3991 | do_vxge_reset(vdev, VXGE_LL_FULL_RESET); |
4017 | 3992 | ||
4018 | return PCI_ERS_RESULT_RECOVERED; | 3993 | return PCI_ERS_RESULT_RECOVERED; |
4019 | } | 3994 | } |
@@ -4244,9 +4219,10 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
4244 | attr.pdev = pdev; | 4219 | attr.pdev = pdev; |
4245 | 4220 | ||
4246 | /* In SRIOV-17 mode, functions of the same adapter | 4221 | /* In SRIOV-17 mode, functions of the same adapter |
4247 | * can be deployed on different buses */ | 4222 | * can be deployed on different buses |
4248 | if ((!pdev->is_virtfn) && ((bus != pdev->bus->number) || | 4223 | */ |
4249 | (device != PCI_SLOT(pdev->devfn)))) | 4224 | if (((bus != pdev->bus->number) || (device != PCI_SLOT(pdev->devfn))) && |
4225 | !pdev->is_virtfn) | ||
4250 | new_device = 1; | 4226 | new_device = 1; |
4251 | 4227 | ||
4252 | bus = pdev->bus->number; | 4228 | bus = pdev->bus->number; |
@@ -4264,6 +4240,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
4264 | driver_config->config_dev_cnt = 0; | 4240 | driver_config->config_dev_cnt = 0; |
4265 | driver_config->total_dev_cnt = 0; | 4241 | driver_config->total_dev_cnt = 0; |
4266 | } | 4242 | } |
4243 | |||
4267 | /* Now making the CPU based no of vpath calculation | 4244 | /* Now making the CPU based no of vpath calculation |
4268 | * applicable for individual functions as well. | 4245 | * applicable for individual functions as well. |
4269 | */ | 4246 | */ |
@@ -4286,11 +4263,11 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
4286 | goto _exit0; | 4263 | goto _exit0; |
4287 | } | 4264 | } |
4288 | 4265 | ||
4289 | ll_config = kzalloc(sizeof(*ll_config), GFP_KERNEL); | 4266 | ll_config = kzalloc(sizeof(struct vxge_config), GFP_KERNEL); |
4290 | if (!ll_config) { | 4267 | if (!ll_config) { |
4291 | ret = -ENOMEM; | 4268 | ret = -ENOMEM; |
4292 | vxge_debug_init(VXGE_ERR, | 4269 | vxge_debug_init(VXGE_ERR, |
4293 | "ll_config : malloc failed %s %d", | 4270 | "device_config : malloc failed %s %d", |
4294 | __FILE__, __LINE__); | 4271 | __FILE__, __LINE__); |
4295 | goto _exit0; | 4272 | goto _exit0; |
4296 | } | 4273 | } |
@@ -4746,6 +4723,10 @@ vxge_starter(void) | |||
4746 | return -ENOMEM; | 4723 | return -ENOMEM; |
4747 | 4724 | ||
4748 | ret = pci_register_driver(&vxge_driver); | 4725 | ret = pci_register_driver(&vxge_driver); |
4726 | if (ret) { | ||
4727 | kfree(driver_config); | ||
4728 | goto err; | ||
4729 | } | ||
4749 | 4730 | ||
4750 | if (driver_config->config_dev_cnt && | 4731 | if (driver_config->config_dev_cnt && |
4751 | (driver_config->config_dev_cnt != driver_config->total_dev_cnt)) | 4732 | (driver_config->config_dev_cnt != driver_config->total_dev_cnt)) |
@@ -4753,10 +4734,7 @@ vxge_starter(void) | |||
4753 | "%s: Configured %d of %d devices", | 4734 | "%s: Configured %d of %d devices", |
4754 | VXGE_DRIVER_NAME, driver_config->config_dev_cnt, | 4735 | VXGE_DRIVER_NAME, driver_config->config_dev_cnt, |
4755 | driver_config->total_dev_cnt); | 4736 | driver_config->total_dev_cnt); |
4756 | 4737 | err: | |
4757 | if (ret) | ||
4758 | kfree(driver_config); | ||
4759 | |||
4760 | return ret; | 4738 | return ret; |
4761 | } | 4739 | } |
4762 | 4740 | ||
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h index 953cb0ded3e1..256d5b406a67 100644 --- a/drivers/net/vxge/vxge-main.h +++ b/drivers/net/vxge/vxge-main.h | |||
@@ -305,8 +305,8 @@ struct vxge_vpath { | |||
305 | int is_configured; | 305 | int is_configured; |
306 | int is_open; | 306 | int is_open; |
307 | struct vxgedev *vdev; | 307 | struct vxgedev *vdev; |
308 | u8 (macaddr)[ETH_ALEN]; | 308 | u8 macaddr[ETH_ALEN]; |
309 | u8 (macmask)[ETH_ALEN]; | 309 | u8 macmask[ETH_ALEN]; |
310 | 310 | ||
311 | #define VXGE_MAX_LEARN_MAC_ADDR_CNT 2048 | 311 | #define VXGE_MAX_LEARN_MAC_ADDR_CNT 2048 |
312 | /* mac addresses currently programmed into NIC */ | 312 | /* mac addresses currently programmed into NIC */ |
@@ -420,10 +420,8 @@ struct vxge_tx_priv { | |||
420 | mod_timer(&timer, (jiffies + exp)); \ | 420 | mod_timer(&timer, (jiffies + exp)); \ |
421 | } while (0); | 421 | } while (0); |
422 | 422 | ||
423 | extern void vxge_initialize_ethtool_ops(struct net_device *ndev); | 423 | void vxge_initialize_ethtool_ops(struct net_device *ndev); |
424 | |||
425 | enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev); | 424 | enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev); |
426 | |||
427 | int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override); | 425 | int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override); |
428 | 426 | ||
429 | /** | 427 | /** |
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c index 4bdb611a6842..42cc29843ac7 100644 --- a/drivers/net/vxge/vxge-traffic.c +++ b/drivers/net/vxge/vxge-traffic.c | |||
@@ -17,13 +17,6 @@ | |||
17 | #include "vxge-config.h" | 17 | #include "vxge-config.h" |
18 | #include "vxge-main.h" | 18 | #include "vxge-main.h" |
19 | 19 | ||
20 | static enum vxge_hw_status | ||
21 | __vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, | ||
22 | u32 vp_id, enum vxge_hw_event type); | ||
23 | static enum vxge_hw_status | ||
24 | __vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath, | ||
25 | u32 skip_alarms); | ||
26 | |||
27 | /* | 20 | /* |
28 | * vxge_hw_vpath_intr_enable - Enable vpath interrupts. | 21 | * vxge_hw_vpath_intr_enable - Enable vpath interrupts. |
29 | * @vp: Virtual Path handle. | 22 | * @vp: Virtual Path handle. |
@@ -419,6 +412,384 @@ void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev) | |||
419 | } | 412 | } |
420 | 413 | ||
421 | /** | 414 | /** |
415 | * __vxge_hw_device_handle_error - Handle error | ||
416 | * @hldev: HW device | ||
417 | * @vp_id: Vpath Id | ||
418 | * @type: Error type. Please see enum vxge_hw_event{} | ||
419 | * | ||
420 | * Handle error. | ||
421 | */ | ||
422 | static enum vxge_hw_status | ||
423 | __vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id, | ||
424 | enum vxge_hw_event type) | ||
425 | { | ||
426 | switch (type) { | ||
427 | case VXGE_HW_EVENT_UNKNOWN: | ||
428 | break; | ||
429 | case VXGE_HW_EVENT_RESET_START: | ||
430 | case VXGE_HW_EVENT_RESET_COMPLETE: | ||
431 | case VXGE_HW_EVENT_LINK_DOWN: | ||
432 | case VXGE_HW_EVENT_LINK_UP: | ||
433 | goto out; | ||
434 | case VXGE_HW_EVENT_ALARM_CLEARED: | ||
435 | goto out; | ||
436 | case VXGE_HW_EVENT_ECCERR: | ||
437 | case VXGE_HW_EVENT_MRPCIM_ECCERR: | ||
438 | goto out; | ||
439 | case VXGE_HW_EVENT_FIFO_ERR: | ||
440 | case VXGE_HW_EVENT_VPATH_ERR: | ||
441 | case VXGE_HW_EVENT_CRITICAL_ERR: | ||
442 | case VXGE_HW_EVENT_SERR: | ||
443 | break; | ||
444 | case VXGE_HW_EVENT_SRPCIM_SERR: | ||
445 | case VXGE_HW_EVENT_MRPCIM_SERR: | ||
446 | goto out; | ||
447 | case VXGE_HW_EVENT_SLOT_FREEZE: | ||
448 | break; | ||
449 | default: | ||
450 | vxge_assert(0); | ||
451 | goto out; | ||
452 | } | ||
453 | |||
454 | /* notify driver */ | ||
455 | if (hldev->uld_callbacks.crit_err) | ||
456 | hldev->uld_callbacks.crit_err( | ||
457 | (struct __vxge_hw_device *)hldev, | ||
458 | type, vp_id); | ||
459 | out: | ||
460 | |||
461 | return VXGE_HW_OK; | ||
462 | } | ||
463 | |||
464 | /* | ||
465 | * __vxge_hw_device_handle_link_down_ind | ||
466 | * @hldev: HW device handle. | ||
467 | * | ||
468 | * Link down indication handler. The function is invoked by HW when | ||
469 | * Titan indicates that the link is down. | ||
470 | */ | ||
471 | static enum vxge_hw_status | ||
472 | __vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev) | ||
473 | { | ||
474 | /* | ||
475 | * If the previous link state is not down, return. | ||
476 | */ | ||
477 | if (hldev->link_state == VXGE_HW_LINK_DOWN) | ||
478 | goto exit; | ||
479 | |||
480 | hldev->link_state = VXGE_HW_LINK_DOWN; | ||
481 | |||
482 | /* notify driver */ | ||
483 | if (hldev->uld_callbacks.link_down) | ||
484 | hldev->uld_callbacks.link_down(hldev); | ||
485 | exit: | ||
486 | return VXGE_HW_OK; | ||
487 | } | ||
488 | |||
489 | /* | ||
490 | * __vxge_hw_device_handle_link_up_ind | ||
491 | * @hldev: HW device handle. | ||
492 | * | ||
493 | * Link up indication handler. The function is invoked by HW when | ||
494 | * Titan indicates that the link is up for programmable amount of time. | ||
495 | */ | ||
496 | static enum vxge_hw_status | ||
497 | __vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev) | ||
498 | { | ||
499 | /* | ||
500 | * If the previous link state is not down, return. | ||
501 | */ | ||
502 | if (hldev->link_state == VXGE_HW_LINK_UP) | ||
503 | goto exit; | ||
504 | |||
505 | hldev->link_state = VXGE_HW_LINK_UP; | ||
506 | |||
507 | /* notify driver */ | ||
508 | if (hldev->uld_callbacks.link_up) | ||
509 | hldev->uld_callbacks.link_up(hldev); | ||
510 | exit: | ||
511 | return VXGE_HW_OK; | ||
512 | } | ||
513 | |||
514 | /* | ||
515 | * __vxge_hw_vpath_alarm_process - Process Alarms. | ||
516 | * @vpath: Virtual Path. | ||
517 | * @skip_alarms: Do not clear the alarms | ||
518 | * | ||
519 | * Process vpath alarms. | ||
520 | * | ||
521 | */ | ||
522 | static enum vxge_hw_status | ||
523 | __vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath, | ||
524 | u32 skip_alarms) | ||
525 | { | ||
526 | u64 val64; | ||
527 | u64 alarm_status; | ||
528 | u64 pic_status; | ||
529 | struct __vxge_hw_device *hldev = NULL; | ||
530 | enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN; | ||
531 | u64 mask64; | ||
532 | struct vxge_hw_vpath_stats_sw_info *sw_stats; | ||
533 | struct vxge_hw_vpath_reg __iomem *vp_reg; | ||
534 | |||
535 | if (vpath == NULL) { | ||
536 | alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN, | ||
537 | alarm_event); | ||
538 | goto out2; | ||
539 | } | ||
540 | |||
541 | hldev = vpath->hldev; | ||
542 | vp_reg = vpath->vp_reg; | ||
543 | alarm_status = readq(&vp_reg->vpath_general_int_status); | ||
544 | |||
545 | if (alarm_status == VXGE_HW_ALL_FOXES) { | ||
546 | alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE, | ||
547 | alarm_event); | ||
548 | goto out; | ||
549 | } | ||
550 | |||
551 | sw_stats = vpath->sw_stats; | ||
552 | |||
553 | if (alarm_status & ~( | ||
554 | VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT | | ||
555 | VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT | | ||
556 | VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT | | ||
557 | VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) { | ||
558 | sw_stats->error_stats.unknown_alarms++; | ||
559 | |||
560 | alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN, | ||
561 | alarm_event); | ||
562 | goto out; | ||
563 | } | ||
564 | |||
565 | if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) { | ||
566 | |||
567 | val64 = readq(&vp_reg->xgmac_vp_int_status); | ||
568 | |||
569 | if (val64 & | ||
570 | VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) { | ||
571 | |||
572 | val64 = readq(&vp_reg->asic_ntwk_vp_err_reg); | ||
573 | |||
574 | if (((val64 & | ||
575 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) && | ||
576 | (!(val64 & | ||
577 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) || | ||
578 | ((val64 & | ||
579 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) && | ||
580 | (!(val64 & | ||
581 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) | ||
582 | ))) { | ||
583 | sw_stats->error_stats.network_sustained_fault++; | ||
584 | |||
585 | writeq( | ||
586 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT, | ||
587 | &vp_reg->asic_ntwk_vp_err_mask); | ||
588 | |||
589 | __vxge_hw_device_handle_link_down_ind(hldev); | ||
590 | alarm_event = VXGE_HW_SET_LEVEL( | ||
591 | VXGE_HW_EVENT_LINK_DOWN, alarm_event); | ||
592 | } | ||
593 | |||
594 | if (((val64 & | ||
595 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) && | ||
596 | (!(val64 & | ||
597 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) || | ||
598 | ((val64 & | ||
599 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) && | ||
600 | (!(val64 & | ||
601 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) | ||
602 | ))) { | ||
603 | |||
604 | sw_stats->error_stats.network_sustained_ok++; | ||
605 | |||
606 | writeq( | ||
607 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK, | ||
608 | &vp_reg->asic_ntwk_vp_err_mask); | ||
609 | |||
610 | __vxge_hw_device_handle_link_up_ind(hldev); | ||
611 | alarm_event = VXGE_HW_SET_LEVEL( | ||
612 | VXGE_HW_EVENT_LINK_UP, alarm_event); | ||
613 | } | ||
614 | |||
615 | writeq(VXGE_HW_INTR_MASK_ALL, | ||
616 | &vp_reg->asic_ntwk_vp_err_reg); | ||
617 | |||
618 | alarm_event = VXGE_HW_SET_LEVEL( | ||
619 | VXGE_HW_EVENT_ALARM_CLEARED, alarm_event); | ||
620 | |||
621 | if (skip_alarms) | ||
622 | return VXGE_HW_OK; | ||
623 | } | ||
624 | } | ||
625 | |||
626 | if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) { | ||
627 | |||
628 | pic_status = readq(&vp_reg->vpath_ppif_int_status); | ||
629 | |||
630 | if (pic_status & | ||
631 | VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) { | ||
632 | |||
633 | val64 = readq(&vp_reg->general_errors_reg); | ||
634 | mask64 = readq(&vp_reg->general_errors_mask); | ||
635 | |||
636 | if ((val64 & | ||
637 | VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) & | ||
638 | ~mask64) { | ||
639 | sw_stats->error_stats.ini_serr_det++; | ||
640 | |||
641 | alarm_event = VXGE_HW_SET_LEVEL( | ||
642 | VXGE_HW_EVENT_SERR, alarm_event); | ||
643 | } | ||
644 | |||
645 | if ((val64 & | ||
646 | VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) & | ||
647 | ~mask64) { | ||
648 | sw_stats->error_stats.dblgen_fifo0_overflow++; | ||
649 | |||
650 | alarm_event = VXGE_HW_SET_LEVEL( | ||
651 | VXGE_HW_EVENT_FIFO_ERR, alarm_event); | ||
652 | } | ||
653 | |||
654 | if ((val64 & | ||
655 | VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) & | ||
656 | ~mask64) | ||
657 | sw_stats->error_stats.statsb_pif_chain_error++; | ||
658 | |||
659 | if ((val64 & | ||
660 | VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) & | ||
661 | ~mask64) | ||
662 | sw_stats->error_stats.statsb_drop_timeout++; | ||
663 | |||
664 | if ((val64 & | ||
665 | VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) & | ||
666 | ~mask64) | ||
667 | sw_stats->error_stats.target_illegal_access++; | ||
668 | |||
669 | if (!skip_alarms) { | ||
670 | writeq(VXGE_HW_INTR_MASK_ALL, | ||
671 | &vp_reg->general_errors_reg); | ||
672 | alarm_event = VXGE_HW_SET_LEVEL( | ||
673 | VXGE_HW_EVENT_ALARM_CLEARED, | ||
674 | alarm_event); | ||
675 | } | ||
676 | } | ||
677 | |||
678 | if (pic_status & | ||
679 | VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) { | ||
680 | |||
681 | val64 = readq(&vp_reg->kdfcctl_errors_reg); | ||
682 | mask64 = readq(&vp_reg->kdfcctl_errors_mask); | ||
683 | |||
684 | if ((val64 & | ||
685 | VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) & | ||
686 | ~mask64) { | ||
687 | sw_stats->error_stats.kdfcctl_fifo0_overwrite++; | ||
688 | |||
689 | alarm_event = VXGE_HW_SET_LEVEL( | ||
690 | VXGE_HW_EVENT_FIFO_ERR, | ||
691 | alarm_event); | ||
692 | } | ||
693 | |||
694 | if ((val64 & | ||
695 | VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) & | ||
696 | ~mask64) { | ||
697 | sw_stats->error_stats.kdfcctl_fifo0_poison++; | ||
698 | |||
699 | alarm_event = VXGE_HW_SET_LEVEL( | ||
700 | VXGE_HW_EVENT_FIFO_ERR, | ||
701 | alarm_event); | ||
702 | } | ||
703 | |||
704 | if ((val64 & | ||
705 | VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) & | ||
706 | ~mask64) { | ||
707 | sw_stats->error_stats.kdfcctl_fifo0_dma_error++; | ||
708 | |||
709 | alarm_event = VXGE_HW_SET_LEVEL( | ||
710 | VXGE_HW_EVENT_FIFO_ERR, | ||
711 | alarm_event); | ||
712 | } | ||
713 | |||
714 | if (!skip_alarms) { | ||
715 | writeq(VXGE_HW_INTR_MASK_ALL, | ||
716 | &vp_reg->kdfcctl_errors_reg); | ||
717 | alarm_event = VXGE_HW_SET_LEVEL( | ||
718 | VXGE_HW_EVENT_ALARM_CLEARED, | ||
719 | alarm_event); | ||
720 | } | ||
721 | } | ||
722 | |||
723 | } | ||
724 | |||
725 | if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) { | ||
726 | |||
727 | val64 = readq(&vp_reg->wrdma_alarm_status); | ||
728 | |||
729 | if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) { | ||
730 | |||
731 | val64 = readq(&vp_reg->prc_alarm_reg); | ||
732 | mask64 = readq(&vp_reg->prc_alarm_mask); | ||
733 | |||
734 | if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)& | ||
735 | ~mask64) | ||
736 | sw_stats->error_stats.prc_ring_bumps++; | ||
737 | |||
738 | if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) & | ||
739 | ~mask64) { | ||
740 | sw_stats->error_stats.prc_rxdcm_sc_err++; | ||
741 | |||
742 | alarm_event = VXGE_HW_SET_LEVEL( | ||
743 | VXGE_HW_EVENT_VPATH_ERR, | ||
744 | alarm_event); | ||
745 | } | ||
746 | |||
747 | if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT) | ||
748 | & ~mask64) { | ||
749 | sw_stats->error_stats.prc_rxdcm_sc_abort++; | ||
750 | |||
751 | alarm_event = VXGE_HW_SET_LEVEL( | ||
752 | VXGE_HW_EVENT_VPATH_ERR, | ||
753 | alarm_event); | ||
754 | } | ||
755 | |||
756 | if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR) | ||
757 | & ~mask64) { | ||
758 | sw_stats->error_stats.prc_quanta_size_err++; | ||
759 | |||
760 | alarm_event = VXGE_HW_SET_LEVEL( | ||
761 | VXGE_HW_EVENT_VPATH_ERR, | ||
762 | alarm_event); | ||
763 | } | ||
764 | |||
765 | if (!skip_alarms) { | ||
766 | writeq(VXGE_HW_INTR_MASK_ALL, | ||
767 | &vp_reg->prc_alarm_reg); | ||
768 | alarm_event = VXGE_HW_SET_LEVEL( | ||
769 | VXGE_HW_EVENT_ALARM_CLEARED, | ||
770 | alarm_event); | ||
771 | } | ||
772 | } | ||
773 | } | ||
774 | out: | ||
775 | hldev->stats.sw_dev_err_stats.vpath_alarms++; | ||
776 | out2: | ||
777 | if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) || | ||
778 | (alarm_event == VXGE_HW_EVENT_UNKNOWN)) | ||
779 | return VXGE_HW_OK; | ||
780 | |||
781 | __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event); | ||
782 | |||
783 | if (alarm_event == VXGE_HW_EVENT_SERR) | ||
784 | return VXGE_HW_ERR_CRITICAL; | ||
785 | |||
786 | return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ? | ||
787 | VXGE_HW_ERR_SLOT_FREEZE : | ||
788 | (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO : | ||
789 | VXGE_HW_ERR_VPATH; | ||
790 | } | ||
791 | |||
792 | /** | ||
422 | * vxge_hw_device_begin_irq - Begin IRQ processing. | 793 | * vxge_hw_device_begin_irq - Begin IRQ processing. |
423 | * @hldev: HW device handle. | 794 | * @hldev: HW device handle. |
424 | * @skip_alarms: Do not clear the alarms | 795 | * @skip_alarms: Do not clear the alarms |
@@ -513,108 +884,6 @@ exit: | |||
513 | return ret; | 884 | return ret; |
514 | } | 885 | } |
515 | 886 | ||
516 | /* | ||
517 | * __vxge_hw_device_handle_link_up_ind | ||
518 | * @hldev: HW device handle. | ||
519 | * | ||
520 | * Link up indication handler. The function is invoked by HW when | ||
521 | * Titan indicates that the link is up for programmable amount of time. | ||
522 | */ | ||
523 | static enum vxge_hw_status | ||
524 | __vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev) | ||
525 | { | ||
526 | /* | ||
527 | * If the previous link state is not down, return. | ||
528 | */ | ||
529 | if (hldev->link_state == VXGE_HW_LINK_UP) | ||
530 | goto exit; | ||
531 | |||
532 | hldev->link_state = VXGE_HW_LINK_UP; | ||
533 | |||
534 | /* notify driver */ | ||
535 | if (hldev->uld_callbacks.link_up) | ||
536 | hldev->uld_callbacks.link_up(hldev); | ||
537 | exit: | ||
538 | return VXGE_HW_OK; | ||
539 | } | ||
540 | |||
541 | /* | ||
542 | * __vxge_hw_device_handle_link_down_ind | ||
543 | * @hldev: HW device handle. | ||
544 | * | ||
545 | * Link down indication handler. The function is invoked by HW when | ||
546 | * Titan indicates that the link is down. | ||
547 | */ | ||
548 | static enum vxge_hw_status | ||
549 | __vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev) | ||
550 | { | ||
551 | /* | ||
552 | * If the previous link state is not down, return. | ||
553 | */ | ||
554 | if (hldev->link_state == VXGE_HW_LINK_DOWN) | ||
555 | goto exit; | ||
556 | |||
557 | hldev->link_state = VXGE_HW_LINK_DOWN; | ||
558 | |||
559 | /* notify driver */ | ||
560 | if (hldev->uld_callbacks.link_down) | ||
561 | hldev->uld_callbacks.link_down(hldev); | ||
562 | exit: | ||
563 | return VXGE_HW_OK; | ||
564 | } | ||
565 | |||
566 | /** | ||
567 | * __vxge_hw_device_handle_error - Handle error | ||
568 | * @hldev: HW device | ||
569 | * @vp_id: Vpath Id | ||
570 | * @type: Error type. Please see enum vxge_hw_event{} | ||
571 | * | ||
572 | * Handle error. | ||
573 | */ | ||
574 | static enum vxge_hw_status | ||
575 | __vxge_hw_device_handle_error( | ||
576 | struct __vxge_hw_device *hldev, | ||
577 | u32 vp_id, | ||
578 | enum vxge_hw_event type) | ||
579 | { | ||
580 | switch (type) { | ||
581 | case VXGE_HW_EVENT_UNKNOWN: | ||
582 | break; | ||
583 | case VXGE_HW_EVENT_RESET_START: | ||
584 | case VXGE_HW_EVENT_RESET_COMPLETE: | ||
585 | case VXGE_HW_EVENT_LINK_DOWN: | ||
586 | case VXGE_HW_EVENT_LINK_UP: | ||
587 | goto out; | ||
588 | case VXGE_HW_EVENT_ALARM_CLEARED: | ||
589 | goto out; | ||
590 | case VXGE_HW_EVENT_ECCERR: | ||
591 | case VXGE_HW_EVENT_MRPCIM_ECCERR: | ||
592 | goto out; | ||
593 | case VXGE_HW_EVENT_FIFO_ERR: | ||
594 | case VXGE_HW_EVENT_VPATH_ERR: | ||
595 | case VXGE_HW_EVENT_CRITICAL_ERR: | ||
596 | case VXGE_HW_EVENT_SERR: | ||
597 | break; | ||
598 | case VXGE_HW_EVENT_SRPCIM_SERR: | ||
599 | case VXGE_HW_EVENT_MRPCIM_SERR: | ||
600 | goto out; | ||
601 | case VXGE_HW_EVENT_SLOT_FREEZE: | ||
602 | break; | ||
603 | default: | ||
604 | vxge_assert(0); | ||
605 | goto out; | ||
606 | } | ||
607 | |||
608 | /* notify driver */ | ||
609 | if (hldev->uld_callbacks.crit_err) | ||
610 | hldev->uld_callbacks.crit_err( | ||
611 | (struct __vxge_hw_device *)hldev, | ||
612 | type, vp_id); | ||
613 | out: | ||
614 | |||
615 | return VXGE_HW_OK; | ||
616 | } | ||
617 | |||
618 | /** | 887 | /** |
619 | * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the | 888 | * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the |
620 | * condition that has caused the Tx and RX interrupt. | 889 | * condition that has caused the Tx and RX interrupt. |
@@ -699,8 +968,8 @@ _alloc_after_swap: | |||
699 | * Posts a dtr to work array. | 968 | * Posts a dtr to work array. |
700 | * | 969 | * |
701 | */ | 970 | */ |
702 | static void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, | 971 | static void |
703 | void *dtrh) | 972 | vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh) |
704 | { | 973 | { |
705 | vxge_assert(channel->work_arr[channel->post_index] == NULL); | 974 | vxge_assert(channel->work_arr[channel->post_index] == NULL); |
706 | 975 | ||
@@ -911,10 +1180,6 @@ void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh) | |||
911 | */ | 1180 | */ |
912 | void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh) | 1181 | void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh) |
913 | { | 1182 | { |
914 | struct __vxge_hw_channel *channel; | ||
915 | |||
916 | channel = &ring->channel; | ||
917 | |||
918 | wmb(); | 1183 | wmb(); |
919 | vxge_hw_ring_rxd_post_post(ring, rxdh); | 1184 | vxge_hw_ring_rxd_post_post(ring, rxdh); |
920 | } | 1185 | } |
@@ -1868,284 +2133,6 @@ exit: | |||
1868 | } | 2133 | } |
1869 | 2134 | ||
1870 | /* | 2135 | /* |
1871 | * __vxge_hw_vpath_alarm_process - Process Alarms. | ||
1872 | * @vpath: Virtual Path. | ||
1873 | * @skip_alarms: Do not clear the alarms | ||
1874 | * | ||
1875 | * Process vpath alarms. | ||
1876 | * | ||
1877 | */ | ||
1878 | static enum vxge_hw_status | ||
1879 | __vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath, | ||
1880 | u32 skip_alarms) | ||
1881 | { | ||
1882 | u64 val64; | ||
1883 | u64 alarm_status; | ||
1884 | u64 pic_status; | ||
1885 | struct __vxge_hw_device *hldev = NULL; | ||
1886 | enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN; | ||
1887 | u64 mask64; | ||
1888 | struct vxge_hw_vpath_stats_sw_info *sw_stats; | ||
1889 | struct vxge_hw_vpath_reg __iomem *vp_reg; | ||
1890 | |||
1891 | if (vpath == NULL) { | ||
1892 | alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN, | ||
1893 | alarm_event); | ||
1894 | goto out2; | ||
1895 | } | ||
1896 | |||
1897 | hldev = vpath->hldev; | ||
1898 | vp_reg = vpath->vp_reg; | ||
1899 | alarm_status = readq(&vp_reg->vpath_general_int_status); | ||
1900 | |||
1901 | if (alarm_status == VXGE_HW_ALL_FOXES) { | ||
1902 | alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE, | ||
1903 | alarm_event); | ||
1904 | goto out; | ||
1905 | } | ||
1906 | |||
1907 | sw_stats = vpath->sw_stats; | ||
1908 | |||
1909 | if (alarm_status & ~( | ||
1910 | VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT | | ||
1911 | VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT | | ||
1912 | VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT | | ||
1913 | VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) { | ||
1914 | sw_stats->error_stats.unknown_alarms++; | ||
1915 | |||
1916 | alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN, | ||
1917 | alarm_event); | ||
1918 | goto out; | ||
1919 | } | ||
1920 | |||
1921 | if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) { | ||
1922 | |||
1923 | val64 = readq(&vp_reg->xgmac_vp_int_status); | ||
1924 | |||
1925 | if (val64 & | ||
1926 | VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) { | ||
1927 | |||
1928 | val64 = readq(&vp_reg->asic_ntwk_vp_err_reg); | ||
1929 | |||
1930 | if (((val64 & | ||
1931 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) && | ||
1932 | (!(val64 & | ||
1933 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) || | ||
1934 | ((val64 & | ||
1935 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) && | ||
1936 | (!(val64 & | ||
1937 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) | ||
1938 | ))) { | ||
1939 | sw_stats->error_stats.network_sustained_fault++; | ||
1940 | |||
1941 | writeq( | ||
1942 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT, | ||
1943 | &vp_reg->asic_ntwk_vp_err_mask); | ||
1944 | |||
1945 | __vxge_hw_device_handle_link_down_ind(hldev); | ||
1946 | alarm_event = VXGE_HW_SET_LEVEL( | ||
1947 | VXGE_HW_EVENT_LINK_DOWN, alarm_event); | ||
1948 | } | ||
1949 | |||
1950 | if (((val64 & | ||
1951 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) && | ||
1952 | (!(val64 & | ||
1953 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) || | ||
1954 | ((val64 & | ||
1955 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) && | ||
1956 | (!(val64 & | ||
1957 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) | ||
1958 | ))) { | ||
1959 | |||
1960 | sw_stats->error_stats.network_sustained_ok++; | ||
1961 | |||
1962 | writeq( | ||
1963 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK, | ||
1964 | &vp_reg->asic_ntwk_vp_err_mask); | ||
1965 | |||
1966 | __vxge_hw_device_handle_link_up_ind(hldev); | ||
1967 | alarm_event = VXGE_HW_SET_LEVEL( | ||
1968 | VXGE_HW_EVENT_LINK_UP, alarm_event); | ||
1969 | } | ||
1970 | |||
1971 | writeq(VXGE_HW_INTR_MASK_ALL, | ||
1972 | &vp_reg->asic_ntwk_vp_err_reg); | ||
1973 | |||
1974 | alarm_event = VXGE_HW_SET_LEVEL( | ||
1975 | VXGE_HW_EVENT_ALARM_CLEARED, alarm_event); | ||
1976 | |||
1977 | if (skip_alarms) | ||
1978 | return VXGE_HW_OK; | ||
1979 | } | ||
1980 | } | ||
1981 | |||
1982 | if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) { | ||
1983 | |||
1984 | pic_status = readq(&vp_reg->vpath_ppif_int_status); | ||
1985 | |||
1986 | if (pic_status & | ||
1987 | VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) { | ||
1988 | |||
1989 | val64 = readq(&vp_reg->general_errors_reg); | ||
1990 | mask64 = readq(&vp_reg->general_errors_mask); | ||
1991 | |||
1992 | if ((val64 & | ||
1993 | VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) & | ||
1994 | ~mask64) { | ||
1995 | sw_stats->error_stats.ini_serr_det++; | ||
1996 | |||
1997 | alarm_event = VXGE_HW_SET_LEVEL( | ||
1998 | VXGE_HW_EVENT_SERR, alarm_event); | ||
1999 | } | ||
2000 | |||
2001 | if ((val64 & | ||
2002 | VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) & | ||
2003 | ~mask64) { | ||
2004 | sw_stats->error_stats.dblgen_fifo0_overflow++; | ||
2005 | |||
2006 | alarm_event = VXGE_HW_SET_LEVEL( | ||
2007 | VXGE_HW_EVENT_FIFO_ERR, alarm_event); | ||
2008 | } | ||
2009 | |||
2010 | if ((val64 & | ||
2011 | VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) & | ||
2012 | ~mask64) | ||
2013 | sw_stats->error_stats.statsb_pif_chain_error++; | ||
2014 | |||
2015 | if ((val64 & | ||
2016 | VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) & | ||
2017 | ~mask64) | ||
2018 | sw_stats->error_stats.statsb_drop_timeout++; | ||
2019 | |||
2020 | if ((val64 & | ||
2021 | VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) & | ||
2022 | ~mask64) | ||
2023 | sw_stats->error_stats.target_illegal_access++; | ||
2024 | |||
2025 | if (!skip_alarms) { | ||
2026 | writeq(VXGE_HW_INTR_MASK_ALL, | ||
2027 | &vp_reg->general_errors_reg); | ||
2028 | alarm_event = VXGE_HW_SET_LEVEL( | ||
2029 | VXGE_HW_EVENT_ALARM_CLEARED, | ||
2030 | alarm_event); | ||
2031 | } | ||
2032 | } | ||
2033 | |||
2034 | if (pic_status & | ||
2035 | VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) { | ||
2036 | |||
2037 | val64 = readq(&vp_reg->kdfcctl_errors_reg); | ||
2038 | mask64 = readq(&vp_reg->kdfcctl_errors_mask); | ||
2039 | |||
2040 | if ((val64 & | ||
2041 | VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) & | ||
2042 | ~mask64) { | ||
2043 | sw_stats->error_stats.kdfcctl_fifo0_overwrite++; | ||
2044 | |||
2045 | alarm_event = VXGE_HW_SET_LEVEL( | ||
2046 | VXGE_HW_EVENT_FIFO_ERR, | ||
2047 | alarm_event); | ||
2048 | } | ||
2049 | |||
2050 | if ((val64 & | ||
2051 | VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) & | ||
2052 | ~mask64) { | ||
2053 | sw_stats->error_stats.kdfcctl_fifo0_poison++; | ||
2054 | |||
2055 | alarm_event = VXGE_HW_SET_LEVEL( | ||
2056 | VXGE_HW_EVENT_FIFO_ERR, | ||
2057 | alarm_event); | ||
2058 | } | ||
2059 | |||
2060 | if ((val64 & | ||
2061 | VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) & | ||
2062 | ~mask64) { | ||
2063 | sw_stats->error_stats.kdfcctl_fifo0_dma_error++; | ||
2064 | |||
2065 | alarm_event = VXGE_HW_SET_LEVEL( | ||
2066 | VXGE_HW_EVENT_FIFO_ERR, | ||
2067 | alarm_event); | ||
2068 | } | ||
2069 | |||
2070 | if (!skip_alarms) { | ||
2071 | writeq(VXGE_HW_INTR_MASK_ALL, | ||
2072 | &vp_reg->kdfcctl_errors_reg); | ||
2073 | alarm_event = VXGE_HW_SET_LEVEL( | ||
2074 | VXGE_HW_EVENT_ALARM_CLEARED, | ||
2075 | alarm_event); | ||
2076 | } | ||
2077 | } | ||
2078 | |||
2079 | } | ||
2080 | |||
2081 | if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) { | ||
2082 | |||
2083 | val64 = readq(&vp_reg->wrdma_alarm_status); | ||
2084 | |||
2085 | if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) { | ||
2086 | |||
2087 | val64 = readq(&vp_reg->prc_alarm_reg); | ||
2088 | mask64 = readq(&vp_reg->prc_alarm_mask); | ||
2089 | |||
2090 | if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)& | ||
2091 | ~mask64) | ||
2092 | sw_stats->error_stats.prc_ring_bumps++; | ||
2093 | |||
2094 | if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) & | ||
2095 | ~mask64) { | ||
2096 | sw_stats->error_stats.prc_rxdcm_sc_err++; | ||
2097 | |||
2098 | alarm_event = VXGE_HW_SET_LEVEL( | ||
2099 | VXGE_HW_EVENT_VPATH_ERR, | ||
2100 | alarm_event); | ||
2101 | } | ||
2102 | |||
2103 | if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT) | ||
2104 | & ~mask64) { | ||
2105 | sw_stats->error_stats.prc_rxdcm_sc_abort++; | ||
2106 | |||
2107 | alarm_event = VXGE_HW_SET_LEVEL( | ||
2108 | VXGE_HW_EVENT_VPATH_ERR, | ||
2109 | alarm_event); | ||
2110 | } | ||
2111 | |||
2112 | if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR) | ||
2113 | & ~mask64) { | ||
2114 | sw_stats->error_stats.prc_quanta_size_err++; | ||
2115 | |||
2116 | alarm_event = VXGE_HW_SET_LEVEL( | ||
2117 | VXGE_HW_EVENT_VPATH_ERR, | ||
2118 | alarm_event); | ||
2119 | } | ||
2120 | |||
2121 | if (!skip_alarms) { | ||
2122 | writeq(VXGE_HW_INTR_MASK_ALL, | ||
2123 | &vp_reg->prc_alarm_reg); | ||
2124 | alarm_event = VXGE_HW_SET_LEVEL( | ||
2125 | VXGE_HW_EVENT_ALARM_CLEARED, | ||
2126 | alarm_event); | ||
2127 | } | ||
2128 | } | ||
2129 | } | ||
2130 | out: | ||
2131 | hldev->stats.sw_dev_err_stats.vpath_alarms++; | ||
2132 | out2: | ||
2133 | if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) || | ||
2134 | (alarm_event == VXGE_HW_EVENT_UNKNOWN)) | ||
2135 | return VXGE_HW_OK; | ||
2136 | |||
2137 | __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event); | ||
2138 | |||
2139 | if (alarm_event == VXGE_HW_EVENT_SERR) | ||
2140 | return VXGE_HW_ERR_CRITICAL; | ||
2141 | |||
2142 | return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ? | ||
2143 | VXGE_HW_ERR_SLOT_FREEZE : | ||
2144 | (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO : | ||
2145 | VXGE_HW_ERR_VPATH; | ||
2146 | } | ||
2147 | |||
2148 | /* | ||
2149 | * vxge_hw_vpath_alarm_process - Process Alarms. | 2136 | * vxge_hw_vpath_alarm_process - Process Alarms. |
2150 | * @vpath: Virtual Path. | 2137 | * @vpath: Virtual Path. |
2151 | * @skip_alarms: Do not clear the alarms | 2138 | * @skip_alarms: Do not clear the alarms |
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h index 1fceee876228..8c3103fb6442 100644 --- a/drivers/net/vxge/vxge-traffic.h +++ b/drivers/net/vxge/vxge-traffic.h | |||
@@ -2081,10 +2081,6 @@ struct __vxge_hw_ring_rxd_priv { | |||
2081 | #endif | 2081 | #endif |
2082 | }; | 2082 | }; |
2083 | 2083 | ||
2084 | /* ========================= FIFO PRIVATE API ============================= */ | ||
2085 | |||
2086 | struct vxge_hw_fifo_attr; | ||
2087 | |||
2088 | struct vxge_hw_mempool_cbs { | 2084 | struct vxge_hw_mempool_cbs { |
2089 | void (*item_func_alloc)( | 2085 | void (*item_func_alloc)( |
2090 | struct vxge_hw_mempool *mempoolh, | 2086 | struct vxge_hw_mempool *mempoolh, |
@@ -2158,27 +2154,27 @@ enum vxge_hw_vpath_mac_addr_add_mode { | |||
2158 | enum vxge_hw_status | 2154 | enum vxge_hw_status |
2159 | vxge_hw_vpath_mac_addr_add( | 2155 | vxge_hw_vpath_mac_addr_add( |
2160 | struct __vxge_hw_vpath_handle *vpath_handle, | 2156 | struct __vxge_hw_vpath_handle *vpath_handle, |
2161 | u8 (macaddr)[ETH_ALEN], | 2157 | u8 *macaddr, |
2162 | u8 (macaddr_mask)[ETH_ALEN], | 2158 | u8 *macaddr_mask, |
2163 | enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode); | 2159 | enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode); |
2164 | 2160 | ||
2165 | enum vxge_hw_status | 2161 | enum vxge_hw_status |
2166 | vxge_hw_vpath_mac_addr_get( | 2162 | vxge_hw_vpath_mac_addr_get( |
2167 | struct __vxge_hw_vpath_handle *vpath_handle, | 2163 | struct __vxge_hw_vpath_handle *vpath_handle, |
2168 | u8 (macaddr)[ETH_ALEN], | 2164 | u8 *macaddr, |
2169 | u8 (macaddr_mask)[ETH_ALEN]); | 2165 | u8 *macaddr_mask); |
2170 | 2166 | ||
2171 | enum vxge_hw_status | 2167 | enum vxge_hw_status |
2172 | vxge_hw_vpath_mac_addr_get_next( | 2168 | vxge_hw_vpath_mac_addr_get_next( |
2173 | struct __vxge_hw_vpath_handle *vpath_handle, | 2169 | struct __vxge_hw_vpath_handle *vpath_handle, |
2174 | u8 (macaddr)[ETH_ALEN], | 2170 | u8 *macaddr, |
2175 | u8 (macaddr_mask)[ETH_ALEN]); | 2171 | u8 *macaddr_mask); |
2176 | 2172 | ||
2177 | enum vxge_hw_status | 2173 | enum vxge_hw_status |
2178 | vxge_hw_vpath_mac_addr_delete( | 2174 | vxge_hw_vpath_mac_addr_delete( |
2179 | struct __vxge_hw_vpath_handle *vpath_handle, | 2175 | struct __vxge_hw_vpath_handle *vpath_handle, |
2180 | u8 (macaddr)[ETH_ALEN], | 2176 | u8 *macaddr, |
2181 | u8 (macaddr_mask)[ETH_ALEN]); | 2177 | u8 *macaddr_mask); |
2182 | 2178 | ||
2183 | enum vxge_hw_status | 2179 | enum vxge_hw_status |
2184 | vxge_hw_vpath_vid_add( | 2180 | vxge_hw_vpath_vid_add( |
@@ -2285,6 +2281,7 @@ vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh); | |||
2285 | 2281 | ||
2286 | int | 2282 | int |
2287 | vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel); | 2283 | vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel); |
2284 | |||
2288 | void | 2285 | void |
2289 | vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id); | 2286 | vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id); |
2290 | 2287 | ||