aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/vxge/vxge-config.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/vxge/vxge-config.c')
-rw-r--r--drivers/net/vxge/vxge-config.c2481
1 files changed, 1178 insertions, 1303 deletions
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index a0241fe72d8b..1169aa387cab 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -21,100 +21,15 @@
21#include "vxge-config.h" 21#include "vxge-config.h"
22#include "vxge-main.h" 22#include "vxge-main.h"
23 23
24static enum vxge_hw_status 24#define VXGE_HW_VPATH_STATS_PIO_READ(offset) { \
25__vxge_hw_fifo_delete( 25 status = __vxge_hw_vpath_stats_access(vpath, \
26 struct __vxge_hw_vpath_handle *vpath_handle); 26 VXGE_HW_STATS_OP_READ, \
27 27 offset, \
28static struct __vxge_hw_blockpool_entry * 28 &val64); \
29__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *hldev, 29 if (status != VXGE_HW_OK) \
30 u32 size); 30 return status; \
31
32static void
33__vxge_hw_blockpool_block_free(struct __vxge_hw_device *hldev,
34 struct __vxge_hw_blockpool_entry *entry);
35
36static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
37 void *block_addr,
38 u32 length,
39 struct pci_dev *dma_h,
40 struct pci_dev *acc_handle);
41
42static enum vxge_hw_status
43__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
44 struct __vxge_hw_blockpool *blockpool,
45 u32 pool_size,
46 u32 pool_max);
47
48static void
49__vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool);
50
51static void *
52__vxge_hw_blockpool_malloc(struct __vxge_hw_device *hldev,
53 u32 size,
54 struct vxge_hw_mempool_dma *dma_object);
55
56static void
57__vxge_hw_blockpool_free(struct __vxge_hw_device *hldev,
58 void *memblock,
59 u32 size,
60 struct vxge_hw_mempool_dma *dma_object);
61
62static void
63__vxge_hw_channel_free(
64 struct __vxge_hw_channel *channel);
65
66static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp);
67
68static enum vxge_hw_status
69__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config);
70
71static enum vxge_hw_status
72__vxge_hw_device_register_poll(
73 void __iomem *reg,
74 u64 mask, u32 max_millis);
75
76static inline enum vxge_hw_status
77__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
78 u64 mask, u32 max_millis)
79{
80 __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
81 wmb();
82
83 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
84 wmb();
85
86 return __vxge_hw_device_register_poll(addr, mask, max_millis);
87} 31}
88 32
89static struct vxge_hw_mempool*
90__vxge_hw_mempool_create(struct __vxge_hw_device *devh, u32 memblock_size,
91 u32 item_size, u32 private_size, u32 items_initial,
92 u32 items_max, struct vxge_hw_mempool_cbs *mp_callback,
93 void *userdata);
94
95static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool);
96
97static enum vxge_hw_status
98__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
99 struct vxge_hw_vpath_stats_hw_info *hw_stats);
100
101static enum vxge_hw_status
102vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vpath_handle);
103
104static enum vxge_hw_status
105__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg);
106
107static void
108__vxge_hw_vp_terminate(struct __vxge_hw_device *devh, u32 vp_id);
109
110static enum vxge_hw_status
111__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath,
112 struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats);
113
114static enum vxge_hw_status
115__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
116 struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats);
117
118static void 33static void
119vxge_hw_vpath_set_zero_rx_frm_len(struct vxge_hw_vpath_reg __iomem *vp_reg) 34vxge_hw_vpath_set_zero_rx_frm_len(struct vxge_hw_vpath_reg __iomem *vp_reg)
120{ 35{
@@ -124,8 +39,6 @@ vxge_hw_vpath_set_zero_rx_frm_len(struct vxge_hw_vpath_reg __iomem *vp_reg)
124 val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff); 39 val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
125 writeq(val64, &vp_reg->rxmac_vcfg0); 40 writeq(val64, &vp_reg->rxmac_vcfg0);
126 val64 = readq(&vp_reg->rxmac_vcfg0); 41 val64 = readq(&vp_reg->rxmac_vcfg0);
127
128 return;
129} 42}
130 43
131/* 44/*
@@ -197,6 +110,50 @@ void vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev)
197 } 110 }
198} 111}
199 112
113/*
114 * __vxge_hw_device_register_poll
115 * Will poll certain register for specified amount of time.
116 * Will poll until masked bit is not cleared.
117 */
118static enum vxge_hw_status
119__vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
120{
121 u64 val64;
122 u32 i = 0;
123 enum vxge_hw_status ret = VXGE_HW_FAIL;
124
125 udelay(10);
126
127 do {
128 val64 = readq(reg);
129 if (!(val64 & mask))
130 return VXGE_HW_OK;
131 udelay(100);
132 } while (++i <= 9);
133
134 i = 0;
135 do {
136 val64 = readq(reg);
137 if (!(val64 & mask))
138 return VXGE_HW_OK;
139 mdelay(1);
140 } while (++i <= max_millis);
141
142 return ret;
143}
144
145static inline enum vxge_hw_status
146__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
147 u64 mask, u32 max_millis)
148{
149 __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
150 wmb();
151 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
152 wmb();
153
154 return __vxge_hw_device_register_poll(addr, mask, max_millis);
155}
156
200static enum vxge_hw_status 157static enum vxge_hw_status
201vxge_hw_vpath_fw_api(struct __vxge_hw_virtualpath *vpath, u32 action, 158vxge_hw_vpath_fw_api(struct __vxge_hw_virtualpath *vpath, u32 action,
202 u32 fw_memo, u32 offset, u64 *data0, u64 *data1, 159 u32 fw_memo, u32 offset, u64 *data0, u64 *data1,
@@ -446,77 +403,6 @@ vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
446} 403}
447 404
448/* 405/*
449 * __vxge_hw_channel_allocate - Allocate memory for channel
450 * This function allocates required memory for the channel and various arrays
451 * in the channel
452 */
453static struct __vxge_hw_channel *
454__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
455 enum __vxge_hw_channel_type type,
456 u32 length, u32 per_dtr_space, void *userdata)
457{
458 struct __vxge_hw_channel *channel;
459 struct __vxge_hw_device *hldev;
460 int size = 0;
461 u32 vp_id;
462
463 hldev = vph->vpath->hldev;
464 vp_id = vph->vpath->vp_id;
465
466 switch (type) {
467 case VXGE_HW_CHANNEL_TYPE_FIFO:
468 size = sizeof(struct __vxge_hw_fifo);
469 break;
470 case VXGE_HW_CHANNEL_TYPE_RING:
471 size = sizeof(struct __vxge_hw_ring);
472 break;
473 default:
474 break;
475 }
476
477 channel = kzalloc(size, GFP_KERNEL);
478 if (channel == NULL)
479 goto exit0;
480 INIT_LIST_HEAD(&channel->item);
481
482 channel->common_reg = hldev->common_reg;
483 channel->first_vp_id = hldev->first_vp_id;
484 channel->type = type;
485 channel->devh = hldev;
486 channel->vph = vph;
487 channel->userdata = userdata;
488 channel->per_dtr_space = per_dtr_space;
489 channel->length = length;
490 channel->vp_id = vp_id;
491
492 channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
493 if (channel->work_arr == NULL)
494 goto exit1;
495
496 channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
497 if (channel->free_arr == NULL)
498 goto exit1;
499 channel->free_ptr = length;
500
501 channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
502 if (channel->reserve_arr == NULL)
503 goto exit1;
504 channel->reserve_ptr = length;
505 channel->reserve_top = 0;
506
507 channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
508 if (channel->orig_arr == NULL)
509 goto exit1;
510
511 return channel;
512exit1:
513 __vxge_hw_channel_free(channel);
514
515exit0:
516 return NULL;
517}
518
519/*
520 * __vxge_hw_channel_free - Free memory allocated for channel 406 * __vxge_hw_channel_free - Free memory allocated for channel
521 * This function deallocates memory from the channel and various arrays 407 * This function deallocates memory from the channel and various arrays
522 * in the channel 408 * in the channel
@@ -609,38 +495,6 @@ static void __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
609 pci_save_state(hldev->pdev); 495 pci_save_state(hldev->pdev);
610} 496}
611 497
612/*
613 * __vxge_hw_device_register_poll
614 * Will poll certain register for specified amount of time.
615 * Will poll until masked bit is not cleared.
616 */
617static enum vxge_hw_status
618__vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
619{
620 u64 val64;
621 u32 i = 0;
622 enum vxge_hw_status ret = VXGE_HW_FAIL;
623
624 udelay(10);
625
626 do {
627 val64 = readq(reg);
628 if (!(val64 & mask))
629 return VXGE_HW_OK;
630 udelay(100);
631 } while (++i <= 9);
632
633 i = 0;
634 do {
635 val64 = readq(reg);
636 if (!(val64 & mask))
637 return VXGE_HW_OK;
638 mdelay(1);
639 } while (++i <= max_millis);
640
641 return ret;
642}
643
644/* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset 498/* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
645 * in progress 499 * in progress
646 * This routine checks the vpath reset in progress register is turned zero 500 * This routine checks the vpath reset in progress register is turned zero
@@ -656,6 +510,60 @@ __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
656} 510}
657 511
658/* 512/*
513 * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
514 * Set the swapper bits appropriately for the lagacy section.
515 */
516static enum vxge_hw_status
517__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
518{
519 u64 val64;
520 enum vxge_hw_status status = VXGE_HW_OK;
521
522 val64 = readq(&legacy_reg->toc_swapper_fb);
523
524 wmb();
525
526 switch (val64) {
527 case VXGE_HW_SWAPPER_INITIAL_VALUE:
528 return status;
529
530 case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
531 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
532 &legacy_reg->pifm_rd_swap_en);
533 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
534 &legacy_reg->pifm_rd_flip_en);
535 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
536 &legacy_reg->pifm_wr_swap_en);
537 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
538 &legacy_reg->pifm_wr_flip_en);
539 break;
540
541 case VXGE_HW_SWAPPER_BYTE_SWAPPED:
542 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
543 &legacy_reg->pifm_rd_swap_en);
544 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
545 &legacy_reg->pifm_wr_swap_en);
546 break;
547
548 case VXGE_HW_SWAPPER_BIT_FLIPPED:
549 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
550 &legacy_reg->pifm_rd_flip_en);
551 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
552 &legacy_reg->pifm_wr_flip_en);
553 break;
554 }
555
556 wmb();
557
558 val64 = readq(&legacy_reg->toc_swapper_fb);
559
560 if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
561 status = VXGE_HW_ERR_SWAPPER_CTRL;
562
563 return status;
564}
565
566/*
659 * __vxge_hw_device_toc_get 567 * __vxge_hw_device_toc_get
660 * This routine sets the swapper and reads the toc pointer and returns the 568 * This routine sets the swapper and reads the toc pointer and returns the
661 * memory mapped address of the toc 569 * memory mapped address of the toc
@@ -1132,7 +1040,6 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
1132 (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64); 1040 (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
1133 1041
1134 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 1042 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1135
1136 if (!((hw_info->vpath_mask) & vxge_mBIT(i))) 1043 if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
1137 continue; 1044 continue;
1138 1045
@@ -1196,6 +1103,218 @@ exit:
1196} 1103}
1197 1104
1198/* 1105/*
1106 * __vxge_hw_blockpool_destroy - Deallocates the block pool
1107 */
1108static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
1109{
1110 struct __vxge_hw_device *hldev;
1111 struct list_head *p, *n;
1112 u16 ret;
1113
1114 if (blockpool == NULL) {
1115 ret = 1;
1116 goto exit;
1117 }
1118
1119 hldev = blockpool->hldev;
1120
1121 list_for_each_safe(p, n, &blockpool->free_block_list) {
1122 pci_unmap_single(hldev->pdev,
1123 ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
1124 ((struct __vxge_hw_blockpool_entry *)p)->length,
1125 PCI_DMA_BIDIRECTIONAL);
1126
1127 vxge_os_dma_free(hldev->pdev,
1128 ((struct __vxge_hw_blockpool_entry *)p)->memblock,
1129 &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
1130
1131 list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
1132 kfree(p);
1133 blockpool->pool_size--;
1134 }
1135
1136 list_for_each_safe(p, n, &blockpool->free_entry_list) {
1137 list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
1138 kfree((void *)p);
1139 }
1140 ret = 0;
1141exit:
1142 return;
1143}
1144
1145/*
1146 * __vxge_hw_blockpool_create - Create block pool
1147 */
1148static enum vxge_hw_status
1149__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
1150 struct __vxge_hw_blockpool *blockpool,
1151 u32 pool_size,
1152 u32 pool_max)
1153{
1154 u32 i;
1155 struct __vxge_hw_blockpool_entry *entry = NULL;
1156 void *memblock;
1157 dma_addr_t dma_addr;
1158 struct pci_dev *dma_handle;
1159 struct pci_dev *acc_handle;
1160 enum vxge_hw_status status = VXGE_HW_OK;
1161
1162 if (blockpool == NULL) {
1163 status = VXGE_HW_FAIL;
1164 goto blockpool_create_exit;
1165 }
1166
1167 blockpool->hldev = hldev;
1168 blockpool->block_size = VXGE_HW_BLOCK_SIZE;
1169 blockpool->pool_size = 0;
1170 blockpool->pool_max = pool_max;
1171 blockpool->req_out = 0;
1172
1173 INIT_LIST_HEAD(&blockpool->free_block_list);
1174 INIT_LIST_HEAD(&blockpool->free_entry_list);
1175
1176 for (i = 0; i < pool_size + pool_max; i++) {
1177 entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
1178 GFP_KERNEL);
1179 if (entry == NULL) {
1180 __vxge_hw_blockpool_destroy(blockpool);
1181 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1182 goto blockpool_create_exit;
1183 }
1184 list_add(&entry->item, &blockpool->free_entry_list);
1185 }
1186
1187 for (i = 0; i < pool_size; i++) {
1188 memblock = vxge_os_dma_malloc(
1189 hldev->pdev,
1190 VXGE_HW_BLOCK_SIZE,
1191 &dma_handle,
1192 &acc_handle);
1193 if (memblock == NULL) {
1194 __vxge_hw_blockpool_destroy(blockpool);
1195 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1196 goto blockpool_create_exit;
1197 }
1198
1199 dma_addr = pci_map_single(hldev->pdev, memblock,
1200 VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL);
1201 if (unlikely(pci_dma_mapping_error(hldev->pdev,
1202 dma_addr))) {
1203 vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
1204 __vxge_hw_blockpool_destroy(blockpool);
1205 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1206 goto blockpool_create_exit;
1207 }
1208
1209 if (!list_empty(&blockpool->free_entry_list))
1210 entry = (struct __vxge_hw_blockpool_entry *)
1211 list_first_entry(&blockpool->free_entry_list,
1212 struct __vxge_hw_blockpool_entry,
1213 item);
1214
1215 if (entry == NULL)
1216 entry =
1217 kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
1218 GFP_KERNEL);
1219 if (entry != NULL) {
1220 list_del(&entry->item);
1221 entry->length = VXGE_HW_BLOCK_SIZE;
1222 entry->memblock = memblock;
1223 entry->dma_addr = dma_addr;
1224 entry->acc_handle = acc_handle;
1225 entry->dma_handle = dma_handle;
1226 list_add(&entry->item,
1227 &blockpool->free_block_list);
1228 blockpool->pool_size++;
1229 } else {
1230 __vxge_hw_blockpool_destroy(blockpool);
1231 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1232 goto blockpool_create_exit;
1233 }
1234 }
1235
1236blockpool_create_exit:
1237 return status;
1238}
1239
1240/*
1241 * __vxge_hw_device_fifo_config_check - Check fifo configuration.
1242 * Check the fifo configuration
1243 */
1244static enum vxge_hw_status
1245__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
1246{
1247 if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) ||
1248 (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS))
1249 return VXGE_HW_BADCFG_FIFO_BLOCKS;
1250
1251 return VXGE_HW_OK;
1252}
1253
1254/*
1255 * __vxge_hw_device_vpath_config_check - Check vpath configuration.
1256 * Check the vpath configuration
1257 */
1258static enum vxge_hw_status
1259__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
1260{
1261 enum vxge_hw_status status;
1262
1263 if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) ||
1264 (vp_config->min_bandwidth > VXGE_HW_VPATH_BANDWIDTH_MAX))
1265 return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH;
1266
1267 status = __vxge_hw_device_fifo_config_check(&vp_config->fifo);
1268 if (status != VXGE_HW_OK)
1269 return status;
1270
1271 if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) &&
1272 ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) ||
1273 (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU)))
1274 return VXGE_HW_BADCFG_VPATH_MTU;
1275
1276 if ((vp_config->rpa_strip_vlan_tag !=
1277 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) &&
1278 (vp_config->rpa_strip_vlan_tag !=
1279 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) &&
1280 (vp_config->rpa_strip_vlan_tag !=
1281 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE))
1282 return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG;
1283
1284 return VXGE_HW_OK;
1285}
1286
1287/*
1288 * __vxge_hw_device_config_check - Check device configuration.
1289 * Check the device configuration
1290 */
1291static enum vxge_hw_status
1292__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config)
1293{
1294 u32 i;
1295 enum vxge_hw_status status;
1296
1297 if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
1298 (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
1299 (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
1300 (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF))
1301 return VXGE_HW_BADCFG_INTR_MODE;
1302
1303 if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) &&
1304 (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE))
1305 return VXGE_HW_BADCFG_RTS_MAC_EN;
1306
1307 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1308 status = __vxge_hw_device_vpath_config_check(
1309 &new_config->vp_config[i]);
1310 if (status != VXGE_HW_OK)
1311 return status;
1312 }
1313
1314 return VXGE_HW_OK;
1315}
1316
1317/*
1199 * vxge_hw_device_initialize - Initialize Titan device. 1318 * vxge_hw_device_initialize - Initialize Titan device.
1200 * Initialize Titan device. Note that all the arguments of this public API 1319 * Initialize Titan device. Note that all the arguments of this public API
1201 * are 'IN', including @hldev. Driver cooperates with 1320 * are 'IN', including @hldev. Driver cooperates with
@@ -1303,6 +1422,242 @@ vxge_hw_device_terminate(struct __vxge_hw_device *hldev)
1303} 1422}
1304 1423
1305/* 1424/*
1425 * __vxge_hw_vpath_stats_access - Get the statistics from the given location
1426 * and offset and perform an operation
1427 */
1428static enum vxge_hw_status
1429__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
1430 u32 operation, u32 offset, u64 *stat)
1431{
1432 u64 val64;
1433 enum vxge_hw_status status = VXGE_HW_OK;
1434 struct vxge_hw_vpath_reg __iomem *vp_reg;
1435
1436 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1437 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
1438 goto vpath_stats_access_exit;
1439 }
1440
1441 vp_reg = vpath->vp_reg;
1442
1443 val64 = VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) |
1444 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE |
1445 VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset);
1446
1447 status = __vxge_hw_pio_mem_write64(val64,
1448 &vp_reg->xmac_stats_access_cmd,
1449 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE,
1450 vpath->hldev->config.device_poll_millis);
1451 if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
1452 *stat = readq(&vp_reg->xmac_stats_access_data);
1453 else
1454 *stat = 0;
1455
1456vpath_stats_access_exit:
1457 return status;
1458}
1459
1460/*
1461 * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
1462 */
1463static enum vxge_hw_status
1464__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath,
1465 struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
1466{
1467 u64 *val64;
1468 int i;
1469 u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET;
1470 enum vxge_hw_status status = VXGE_HW_OK;
1471
1472 val64 = (u64 *)vpath_tx_stats;
1473
1474 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1475 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
1476 goto exit;
1477 }
1478
1479 for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) {
1480 status = __vxge_hw_vpath_stats_access(vpath,
1481 VXGE_HW_STATS_OP_READ,
1482 offset, val64);
1483 if (status != VXGE_HW_OK)
1484 goto exit;
1485 offset++;
1486 val64++;
1487 }
1488exit:
1489 return status;
1490}
1491
1492/*
1493 * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
1494 */
1495static enum vxge_hw_status
1496__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
1497 struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
1498{
1499 u64 *val64;
1500 enum vxge_hw_status status = VXGE_HW_OK;
1501 int i;
1502 u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET;
1503 val64 = (u64 *) vpath_rx_stats;
1504
1505 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1506 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
1507 goto exit;
1508 }
1509 for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) {
1510 status = __vxge_hw_vpath_stats_access(vpath,
1511 VXGE_HW_STATS_OP_READ,
1512 offset >> 3, val64);
1513 if (status != VXGE_HW_OK)
1514 goto exit;
1515
1516 offset += 8;
1517 val64++;
1518 }
1519exit:
1520 return status;
1521}
1522
1523/*
1524 * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
1525 */
1526static enum vxge_hw_status
1527__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
1528 struct vxge_hw_vpath_stats_hw_info *hw_stats)
1529{
1530 u64 val64;
1531 enum vxge_hw_status status = VXGE_HW_OK;
1532 struct vxge_hw_vpath_reg __iomem *vp_reg;
1533
1534 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1535 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
1536 goto exit;
1537 }
1538 vp_reg = vpath->vp_reg;
1539
1540 val64 = readq(&vp_reg->vpath_debug_stats0);
1541 hw_stats->ini_num_mwr_sent =
1542 (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64);
1543
1544 val64 = readq(&vp_reg->vpath_debug_stats1);
1545 hw_stats->ini_num_mrd_sent =
1546 (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64);
1547
1548 val64 = readq(&vp_reg->vpath_debug_stats2);
1549 hw_stats->ini_num_cpl_rcvd =
1550 (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64);
1551
1552 val64 = readq(&vp_reg->vpath_debug_stats3);
1553 hw_stats->ini_num_mwr_byte_sent =
1554 VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64);
1555
1556 val64 = readq(&vp_reg->vpath_debug_stats4);
1557 hw_stats->ini_num_cpl_byte_rcvd =
1558 VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64);
1559
1560 val64 = readq(&vp_reg->vpath_debug_stats5);
1561 hw_stats->wrcrdtarb_xoff =
1562 (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64);
1563
1564 val64 = readq(&vp_reg->vpath_debug_stats6);
1565 hw_stats->rdcrdtarb_xoff =
1566 (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64);
1567
1568 val64 = readq(&vp_reg->vpath_genstats_count01);
1569 hw_stats->vpath_genstats_count0 =
1570 (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(
1571 val64);
1572
1573 val64 = readq(&vp_reg->vpath_genstats_count01);
1574 hw_stats->vpath_genstats_count1 =
1575 (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(
1576 val64);
1577
1578 val64 = readq(&vp_reg->vpath_genstats_count23);
1579 hw_stats->vpath_genstats_count2 =
1580 (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(
1581 val64);
1582
1583 val64 = readq(&vp_reg->vpath_genstats_count01);
1584 hw_stats->vpath_genstats_count3 =
1585 (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(
1586 val64);
1587
1588 val64 = readq(&vp_reg->vpath_genstats_count4);
1589 hw_stats->vpath_genstats_count4 =
1590 (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(
1591 val64);
1592
1593 val64 = readq(&vp_reg->vpath_genstats_count5);
1594 hw_stats->vpath_genstats_count5 =
1595 (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(
1596 val64);
1597
1598 status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats);
1599 if (status != VXGE_HW_OK)
1600 goto exit;
1601
1602 status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats);
1603 if (status != VXGE_HW_OK)
1604 goto exit;
1605
1606 VXGE_HW_VPATH_STATS_PIO_READ(
1607 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET);
1608
1609 hw_stats->prog_event_vnum0 =
1610 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64);
1611
1612 hw_stats->prog_event_vnum1 =
1613 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64);
1614
1615 VXGE_HW_VPATH_STATS_PIO_READ(
1616 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET);
1617
1618 hw_stats->prog_event_vnum2 =
1619 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64);
1620
1621 hw_stats->prog_event_vnum3 =
1622 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64);
1623
1624 val64 = readq(&vp_reg->rx_multi_cast_stats);
1625 hw_stats->rx_multi_cast_frame_discard =
1626 (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64);
1627
1628 val64 = readq(&vp_reg->rx_frm_transferred);
1629 hw_stats->rx_frm_transferred =
1630 (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64);
1631
1632 val64 = readq(&vp_reg->rxd_returned);
1633 hw_stats->rxd_returned =
1634 (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64);
1635
1636 val64 = readq(&vp_reg->dbg_stats_rx_mpa);
1637 hw_stats->rx_mpa_len_fail_frms =
1638 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64);
1639 hw_stats->rx_mpa_mrk_fail_frms =
1640 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64);
1641 hw_stats->rx_mpa_crc_fail_frms =
1642 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64);
1643
1644 val64 = readq(&vp_reg->dbg_stats_rx_fau);
1645 hw_stats->rx_permitted_frms =
1646 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64);
1647 hw_stats->rx_vp_reset_discarded_frms =
1648 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64);
1649 hw_stats->rx_wol_frms =
1650 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64);
1651
1652 val64 = readq(&vp_reg->tx_vp_reset_discarded_frms);
1653 hw_stats->tx_vp_reset_discarded_frms =
1654 (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(
1655 val64);
1656exit:
1657 return status;
1658}
1659
1660/*
1306 * vxge_hw_device_stats_get - Get the device hw statistics. 1661 * vxge_hw_device_stats_get - Get the device hw statistics.
1307 * Returns the vpath h/w stats for the device. 1662 * Returns the vpath h/w stats for the device.
1308 */ 1663 */
@@ -1468,7 +1823,6 @@ vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *hldev,
1468 1823
1469 status = vxge_hw_device_xmac_aggr_stats_get(hldev, 1824 status = vxge_hw_device_xmac_aggr_stats_get(hldev,
1470 0, &xmac_stats->aggr_stats[0]); 1825 0, &xmac_stats->aggr_stats[0]);
1471
1472 if (status != VXGE_HW_OK) 1826 if (status != VXGE_HW_OK)
1473 goto exit; 1827 goto exit;
1474 1828
@@ -1843,189 +2197,359 @@ exit:
1843} 2197}
1844 2198
1845/* 2199/*
1846 * __vxge_hw_ring_create - Create a Ring 2200 * __vxge_hw_channel_allocate - Allocate memory for channel
1847 * This function creates Ring and initializes it. 2201 * This function allocates required memory for the channel and various arrays
2202 * in the channel
1848 */ 2203 */
1849static enum vxge_hw_status 2204static struct __vxge_hw_channel *
1850__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp, 2205__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
1851 struct vxge_hw_ring_attr *attr) 2206 enum __vxge_hw_channel_type type,
2207 u32 length, u32 per_dtr_space,
2208 void *userdata)
1852{ 2209{
1853 enum vxge_hw_status status = VXGE_HW_OK; 2210 struct __vxge_hw_channel *channel;
1854 struct __vxge_hw_ring *ring;
1855 u32 ring_length;
1856 struct vxge_hw_ring_config *config;
1857 struct __vxge_hw_device *hldev; 2211 struct __vxge_hw_device *hldev;
2212 int size = 0;
1858 u32 vp_id; 2213 u32 vp_id;
1859 struct vxge_hw_mempool_cbs ring_mp_callback;
1860 2214
1861 if ((vp == NULL) || (attr == NULL)) { 2215 hldev = vph->vpath->hldev;
2216 vp_id = vph->vpath->vp_id;
2217
2218 switch (type) {
2219 case VXGE_HW_CHANNEL_TYPE_FIFO:
2220 size = sizeof(struct __vxge_hw_fifo);
2221 break;
2222 case VXGE_HW_CHANNEL_TYPE_RING:
2223 size = sizeof(struct __vxge_hw_ring);
2224 break;
2225 default:
2226 break;
2227 }
2228
2229 channel = kzalloc(size, GFP_KERNEL);
2230 if (channel == NULL)
2231 goto exit0;
2232 INIT_LIST_HEAD(&channel->item);
2233
2234 channel->common_reg = hldev->common_reg;
2235 channel->first_vp_id = hldev->first_vp_id;
2236 channel->type = type;
2237 channel->devh = hldev;
2238 channel->vph = vph;
2239 channel->userdata = userdata;
2240 channel->per_dtr_space = per_dtr_space;
2241 channel->length = length;
2242 channel->vp_id = vp_id;
2243
2244 channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
2245 if (channel->work_arr == NULL)
2246 goto exit1;
2247
2248 channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
2249 if (channel->free_arr == NULL)
2250 goto exit1;
2251 channel->free_ptr = length;
2252
2253 channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
2254 if (channel->reserve_arr == NULL)
2255 goto exit1;
2256 channel->reserve_ptr = length;
2257 channel->reserve_top = 0;
2258
2259 channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
2260 if (channel->orig_arr == NULL)
2261 goto exit1;
2262
2263 return channel;
2264exit1:
2265 __vxge_hw_channel_free(channel);
2266
2267exit0:
2268 return NULL;
2269}
2270
2271/*
2272 * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
2273 * Adds a block to block pool
2274 */
2275static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
2276 void *block_addr,
2277 u32 length,
2278 struct pci_dev *dma_h,
2279 struct pci_dev *acc_handle)
2280{
2281 struct __vxge_hw_blockpool *blockpool;
2282 struct __vxge_hw_blockpool_entry *entry = NULL;
2283 dma_addr_t dma_addr;
2284 enum vxge_hw_status status = VXGE_HW_OK;
2285 u32 req_out;
2286
2287 blockpool = &devh->block_pool;
2288
2289 if (block_addr == NULL) {
2290 blockpool->req_out--;
1862 status = VXGE_HW_FAIL; 2291 status = VXGE_HW_FAIL;
1863 goto exit; 2292 goto exit;
1864 } 2293 }
1865 2294
1866 hldev = vp->vpath->hldev; 2295 dma_addr = pci_map_single(devh->pdev, block_addr, length,
1867 vp_id = vp->vpath->vp_id; 2296 PCI_DMA_BIDIRECTIONAL);
1868 2297
1869 config = &hldev->config.vp_config[vp_id].ring; 2298 if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) {
2299 vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
2300 blockpool->req_out--;
2301 status = VXGE_HW_FAIL;
2302 goto exit;
2303 }
1870 2304
1871 ring_length = config->ring_blocks * 2305 if (!list_empty(&blockpool->free_entry_list))
1872 vxge_hw_ring_rxds_per_block_get(config->buffer_mode); 2306 entry = (struct __vxge_hw_blockpool_entry *)
2307 list_first_entry(&blockpool->free_entry_list,
2308 struct __vxge_hw_blockpool_entry,
2309 item);
1873 2310
1874 ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp, 2311 if (entry == NULL)
1875 VXGE_HW_CHANNEL_TYPE_RING, 2312 entry = vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
1876 ring_length, 2313 else
1877 attr->per_rxd_space, 2314 list_del(&entry->item);
1878 attr->userdata);
1879 2315
1880 if (ring == NULL) { 2316 if (entry != NULL) {
2317 entry->length = length;
2318 entry->memblock = block_addr;
2319 entry->dma_addr = dma_addr;
2320 entry->acc_handle = acc_handle;
2321 entry->dma_handle = dma_h;
2322 list_add(&entry->item, &blockpool->free_block_list);
2323 blockpool->pool_size++;
2324 status = VXGE_HW_OK;
2325 } else
1881 status = VXGE_HW_ERR_OUT_OF_MEMORY; 2326 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1882 goto exit;
1883 }
1884 2327
1885 vp->vpath->ringh = ring; 2328 blockpool->req_out--;
1886 ring->vp_id = vp_id;
1887 ring->vp_reg = vp->vpath->vp_reg;
1888 ring->common_reg = hldev->common_reg;
1889 ring->stats = &vp->vpath->sw_stats->ring_stats;
1890 ring->config = config;
1891 ring->callback = attr->callback;
1892 ring->rxd_init = attr->rxd_init;
1893 ring->rxd_term = attr->rxd_term;
1894 ring->buffer_mode = config->buffer_mode;
1895 ring->rxds_limit = config->rxds_limit;
1896 2329
1897 ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode); 2330 req_out = blockpool->req_out;
1898 ring->rxd_priv_size = 2331exit:
1899 sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space; 2332 return;
1900 ring->per_rxd_space = attr->per_rxd_space; 2333}
1901 2334
1902 ring->rxd_priv_size = 2335static inline void
1903 ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) / 2336vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh, unsigned long size)
1904 VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE; 2337{
2338 gfp_t flags;
2339 void *vaddr;
1905 2340
1906 /* how many RxDs can fit into one block. Depends on configured 2341 if (in_interrupt())
1907 * buffer_mode. */ 2342 flags = GFP_ATOMIC | GFP_DMA;
1908 ring->rxds_per_block = 2343 else
1909 vxge_hw_ring_rxds_per_block_get(config->buffer_mode); 2344 flags = GFP_KERNEL | GFP_DMA;
1910 2345
1911 /* calculate actual RxD block private size */ 2346 vaddr = kmalloc((size), flags);
1912 ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
1913 ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc;
1914 ring->mempool = __vxge_hw_mempool_create(hldev,
1915 VXGE_HW_BLOCK_SIZE,
1916 VXGE_HW_BLOCK_SIZE,
1917 ring->rxdblock_priv_size,
1918 ring->config->ring_blocks,
1919 ring->config->ring_blocks,
1920 &ring_mp_callback,
1921 ring);
1922 2347
1923 if (ring->mempool == NULL) { 2348 vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
1924 __vxge_hw_ring_delete(vp); 2349}
1925 return VXGE_HW_ERR_OUT_OF_MEMORY;
1926 }
1927 2350
1928 status = __vxge_hw_channel_initialize(&ring->channel); 2351/*
1929 if (status != VXGE_HW_OK) { 2352 * __vxge_hw_blockpool_blocks_add - Request additional blocks
1930 __vxge_hw_ring_delete(vp); 2353 */
1931 goto exit; 2354static
2355void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
2356{
2357 u32 nreq = 0, i;
2358
2359 if ((blockpool->pool_size + blockpool->req_out) <
2360 VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) {
2361 nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE;
2362 blockpool->req_out += nreq;
1932 } 2363 }
1933 2364
1934 /* Note: 2365 for (i = 0; i < nreq; i++)
1935 * Specifying rxd_init callback means two things: 2366 vxge_os_dma_malloc_async(
1936 * 1) rxds need to be initialized by driver at channel-open time; 2367 ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
1937 * 2) rxds need to be posted at channel-open time 2368 blockpool->hldev, VXGE_HW_BLOCK_SIZE);
1938 * (that's what the initial_replenish() below does) 2369}
1939 * Currently we don't have a case when the 1) is done without the 2). 2370
1940 */ 2371/*
1941 if (ring->rxd_init) { 2372 * __vxge_hw_blockpool_malloc - Allocate a memory block from pool
1942 status = vxge_hw_ring_replenish(ring); 2373 * Allocates a block of memory of given size, either from block pool
1943 if (status != VXGE_HW_OK) { 2374 * or by calling vxge_os_dma_malloc()
1944 __vxge_hw_ring_delete(vp); 2375 */
2376static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
2377 struct vxge_hw_mempool_dma *dma_object)
2378{
2379 struct __vxge_hw_blockpool_entry *entry = NULL;
2380 struct __vxge_hw_blockpool *blockpool;
2381 void *memblock = NULL;
2382 enum vxge_hw_status status = VXGE_HW_OK;
2383
2384 blockpool = &devh->block_pool;
2385
2386 if (size != blockpool->block_size) {
2387
2388 memblock = vxge_os_dma_malloc(devh->pdev, size,
2389 &dma_object->handle,
2390 &dma_object->acc_handle);
2391
2392 if (memblock == NULL) {
2393 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1945 goto exit; 2394 goto exit;
1946 } 2395 }
1947 }
1948 2396
1949 /* initial replenish will increment the counter in its post() routine, 2397 dma_object->addr = pci_map_single(devh->pdev, memblock, size,
1950 * we have to reset it */ 2398 PCI_DMA_BIDIRECTIONAL);
1951 ring->stats->common_stats.usage_cnt = 0; 2399
2400 if (unlikely(pci_dma_mapping_error(devh->pdev,
2401 dma_object->addr))) {
2402 vxge_os_dma_free(devh->pdev, memblock,
2403 &dma_object->acc_handle);
2404 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2405 goto exit;
2406 }
2407
2408 } else {
2409
2410 if (!list_empty(&blockpool->free_block_list))
2411 entry = (struct __vxge_hw_blockpool_entry *)
2412 list_first_entry(&blockpool->free_block_list,
2413 struct __vxge_hw_blockpool_entry,
2414 item);
2415
2416 if (entry != NULL) {
2417 list_del(&entry->item);
2418 dma_object->addr = entry->dma_addr;
2419 dma_object->handle = entry->dma_handle;
2420 dma_object->acc_handle = entry->acc_handle;
2421 memblock = entry->memblock;
2422
2423 list_add(&entry->item,
2424 &blockpool->free_entry_list);
2425 blockpool->pool_size--;
2426 }
2427
2428 if (memblock != NULL)
2429 __vxge_hw_blockpool_blocks_add(blockpool);
2430 }
1952exit: 2431exit:
1953 return status; 2432 return memblock;
1954} 2433}
1955 2434
1956/* 2435/*
1957 * __vxge_hw_ring_abort - Returns the RxD 2436 * __vxge_hw_blockpool_blocks_remove - Free additional blocks
1958 * This function terminates the RxDs of ring
1959 */ 2437 */
1960static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring) 2438static void
2439__vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
1961{ 2440{
1962 void *rxdh; 2441 struct list_head *p, *n;
1963 struct __vxge_hw_channel *channel;
1964
1965 channel = &ring->channel;
1966 2442
1967 for (;;) { 2443 list_for_each_safe(p, n, &blockpool->free_block_list) {
1968 vxge_hw_channel_dtr_try_complete(channel, &rxdh);
1969 2444
1970 if (rxdh == NULL) 2445 if (blockpool->pool_size < blockpool->pool_max)
1971 break; 2446 break;
1972 2447
1973 vxge_hw_channel_dtr_complete(channel); 2448 pci_unmap_single(
2449 ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
2450 ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
2451 ((struct __vxge_hw_blockpool_entry *)p)->length,
2452 PCI_DMA_BIDIRECTIONAL);
1974 2453
1975 if (ring->rxd_term) 2454 vxge_os_dma_free(
1976 ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED, 2455 ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
1977 channel->userdata); 2456 ((struct __vxge_hw_blockpool_entry *)p)->memblock,
2457 &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
1978 2458
1979 vxge_hw_channel_dtr_free(channel, rxdh); 2459 list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
1980 }
1981 2460
1982 return VXGE_HW_OK; 2461 list_add(p, &blockpool->free_entry_list);
2462
2463 blockpool->pool_size--;
2464
2465 }
1983} 2466}
1984 2467
1985/* 2468/*
1986 * __vxge_hw_ring_reset - Resets the ring 2469 * __vxge_hw_blockpool_free - Frees the memory allcoated with
1987 * This function resets the ring during vpath reset operation 2470 * __vxge_hw_blockpool_malloc
1988 */ 2471 */
1989static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring) 2472static void __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
2473 void *memblock, u32 size,
2474 struct vxge_hw_mempool_dma *dma_object)
1990{ 2475{
2476 struct __vxge_hw_blockpool_entry *entry = NULL;
2477 struct __vxge_hw_blockpool *blockpool;
1991 enum vxge_hw_status status = VXGE_HW_OK; 2478 enum vxge_hw_status status = VXGE_HW_OK;
1992 struct __vxge_hw_channel *channel;
1993 2479
1994 channel = &ring->channel; 2480 blockpool = &devh->block_pool;
1995 2481
1996 __vxge_hw_ring_abort(ring); 2482 if (size != blockpool->block_size) {
2483 pci_unmap_single(devh->pdev, dma_object->addr, size,
2484 PCI_DMA_BIDIRECTIONAL);
2485 vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle);
2486 } else {
1997 2487
1998 status = __vxge_hw_channel_reset(channel); 2488 if (!list_empty(&blockpool->free_entry_list))
2489 entry = (struct __vxge_hw_blockpool_entry *)
2490 list_first_entry(&blockpool->free_entry_list,
2491 struct __vxge_hw_blockpool_entry,
2492 item);
1999 2493
2000 if (status != VXGE_HW_OK) 2494 if (entry == NULL)
2001 goto exit; 2495 entry = vmalloc(sizeof(
2496 struct __vxge_hw_blockpool_entry));
2497 else
2498 list_del(&entry->item);
2002 2499
2003 if (ring->rxd_init) { 2500 if (entry != NULL) {
2004 status = vxge_hw_ring_replenish(ring); 2501 entry->length = size;
2005 if (status != VXGE_HW_OK) 2502 entry->memblock = memblock;
2006 goto exit; 2503 entry->dma_addr = dma_object->addr;
2504 entry->acc_handle = dma_object->acc_handle;
2505 entry->dma_handle = dma_object->handle;
2506 list_add(&entry->item,
2507 &blockpool->free_block_list);
2508 blockpool->pool_size++;
2509 status = VXGE_HW_OK;
2510 } else
2511 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2512
2513 if (status == VXGE_HW_OK)
2514 __vxge_hw_blockpool_blocks_remove(blockpool);
2007 } 2515 }
2008exit:
2009 return status;
2010} 2516}
2011 2517
2012/* 2518/*
2013 * __vxge_hw_ring_delete - Removes the ring 2519 * vxge_hw_mempool_destroy
2014 * This function freeup the memory pool and removes the ring
2015 */ 2520 */
2016static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp) 2521static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
2017{ 2522{
2018 struct __vxge_hw_ring *ring = vp->vpath->ringh; 2523 u32 i, j;
2524 struct __vxge_hw_device *devh = mempool->devh;
2019 2525
2020 __vxge_hw_ring_abort(ring); 2526 for (i = 0; i < mempool->memblocks_allocated; i++) {
2527 struct vxge_hw_mempool_dma *dma_object;
2021 2528
2022 if (ring->mempool) 2529 vxge_assert(mempool->memblocks_arr[i]);
2023 __vxge_hw_mempool_destroy(ring->mempool); 2530 vxge_assert(mempool->memblocks_dma_arr + i);
2024 2531
2025 vp->vpath->ringh = NULL; 2532 dma_object = mempool->memblocks_dma_arr + i;
2026 __vxge_hw_channel_free(&ring->channel);
2027 2533
2028 return VXGE_HW_OK; 2534 for (j = 0; j < mempool->items_per_memblock; j++) {
2535 u32 index = i * mempool->items_per_memblock + j;
2536
2537 /* to skip last partially filled(if any) memblock */
2538 if (index >= mempool->items_current)
2539 break;
2540 }
2541
2542 vfree(mempool->memblocks_priv_arr[i]);
2543
2544 __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i],
2545 mempool->memblock_size, dma_object);
2546 }
2547
2548 vfree(mempool->items_arr);
2549 vfree(mempool->memblocks_dma_arr);
2550 vfree(mempool->memblocks_priv_arr);
2551 vfree(mempool->memblocks_arr);
2552 vfree(mempool);
2029} 2553}
2030 2554
2031/* 2555/*
@@ -2118,16 +2642,15 @@ exit:
2118 * with size enough to hold %items_initial number of items. Memory is 2642 * with size enough to hold %items_initial number of items. Memory is
2119 * DMA-able but client must map/unmap before interoperating with the device. 2643 * DMA-able but client must map/unmap before interoperating with the device.
2120 */ 2644 */
2121static struct vxge_hw_mempool* 2645static struct vxge_hw_mempool *
2122__vxge_hw_mempool_create( 2646__vxge_hw_mempool_create(struct __vxge_hw_device *devh,
2123 struct __vxge_hw_device *devh, 2647 u32 memblock_size,
2124 u32 memblock_size, 2648 u32 item_size,
2125 u32 item_size, 2649 u32 items_priv_size,
2126 u32 items_priv_size, 2650 u32 items_initial,
2127 u32 items_initial, 2651 u32 items_max,
2128 u32 items_max, 2652 struct vxge_hw_mempool_cbs *mp_callback,
2129 struct vxge_hw_mempool_cbs *mp_callback, 2653 void *userdata)
2130 void *userdata)
2131{ 2654{
2132 enum vxge_hw_status status = VXGE_HW_OK; 2655 enum vxge_hw_status status = VXGE_HW_OK;
2133 u32 memblocks_to_allocate; 2656 u32 memblocks_to_allocate;
@@ -2185,7 +2708,6 @@ __vxge_hw_mempool_create(
2185 mempool->memblocks_dma_arr = 2708 mempool->memblocks_dma_arr =
2186 vzalloc(sizeof(struct vxge_hw_mempool_dma) * 2709 vzalloc(sizeof(struct vxge_hw_mempool_dma) *
2187 mempool->memblocks_max); 2710 mempool->memblocks_max);
2188
2189 if (mempool->memblocks_dma_arr == NULL) { 2711 if (mempool->memblocks_dma_arr == NULL) {
2190 __vxge_hw_mempool_destroy(mempool); 2712 __vxge_hw_mempool_destroy(mempool);
2191 status = VXGE_HW_ERR_OUT_OF_MEMORY; 2713 status = VXGE_HW_ERR_OUT_OF_MEMORY;
@@ -2222,122 +2744,188 @@ exit:
2222} 2744}
2223 2745
2224/* 2746/*
2225 * vxge_hw_mempool_destroy 2747 * __vxge_hw_ring_abort - Returns the RxD
2748 * This function terminates the RxDs of ring
2226 */ 2749 */
2227static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool) 2750static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
2228{ 2751{
2229 u32 i, j; 2752 void *rxdh;
2230 struct __vxge_hw_device *devh = mempool->devh; 2753 struct __vxge_hw_channel *channel;
2231
2232 for (i = 0; i < mempool->memblocks_allocated; i++) {
2233 struct vxge_hw_mempool_dma *dma_object;
2234 2754
2235 vxge_assert(mempool->memblocks_arr[i]); 2755 channel = &ring->channel;
2236 vxge_assert(mempool->memblocks_dma_arr + i);
2237 2756
2238 dma_object = mempool->memblocks_dma_arr + i; 2757 for (;;) {
2758 vxge_hw_channel_dtr_try_complete(channel, &rxdh);
2239 2759
2240 for (j = 0; j < mempool->items_per_memblock; j++) { 2760 if (rxdh == NULL)
2241 u32 index = i * mempool->items_per_memblock + j; 2761 break;
2242 2762
2243 /* to skip last partially filled(if any) memblock */ 2763 vxge_hw_channel_dtr_complete(channel);
2244 if (index >= mempool->items_current)
2245 break;
2246 }
2247 2764
2248 vfree(mempool->memblocks_priv_arr[i]); 2765 if (ring->rxd_term)
2766 ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED,
2767 channel->userdata);
2249 2768
2250 __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i], 2769 vxge_hw_channel_dtr_free(channel, rxdh);
2251 mempool->memblock_size, dma_object);
2252 } 2770 }
2253 2771
2254 vfree(mempool->items_arr); 2772 return VXGE_HW_OK;
2773}
2255 2774
2256 vfree(mempool->memblocks_dma_arr); 2775/*
2776 * __vxge_hw_ring_reset - Resets the ring
2777 * This function resets the ring during vpath reset operation
2778 */
2779static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
2780{
2781 enum vxge_hw_status status = VXGE_HW_OK;
2782 struct __vxge_hw_channel *channel;
2257 2783
2258 vfree(mempool->memblocks_priv_arr); 2784 channel = &ring->channel;
2259 2785
2260 vfree(mempool->memblocks_arr); 2786 __vxge_hw_ring_abort(ring);
2261 2787
2262 vfree(mempool); 2788 status = __vxge_hw_channel_reset(channel);
2789
2790 if (status != VXGE_HW_OK)
2791 goto exit;
2792
2793 if (ring->rxd_init) {
2794 status = vxge_hw_ring_replenish(ring);
2795 if (status != VXGE_HW_OK)
2796 goto exit;
2797 }
2798exit:
2799 return status;
2263} 2800}
2264 2801
2265/* 2802/*
2266 * __vxge_hw_device_fifo_config_check - Check fifo configuration. 2803 * __vxge_hw_ring_delete - Removes the ring
2267 * Check the fifo configuration 2804 * This function freeup the memory pool and removes the ring
2268 */ 2805 */
2269static enum vxge_hw_status 2806static enum vxge_hw_status
2270__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config) 2807__vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
2271{ 2808{
2272 if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) || 2809 struct __vxge_hw_ring *ring = vp->vpath->ringh;
2273 (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS)) 2810
2274 return VXGE_HW_BADCFG_FIFO_BLOCKS; 2811 __vxge_hw_ring_abort(ring);
2812
2813 if (ring->mempool)
2814 __vxge_hw_mempool_destroy(ring->mempool);
2815
2816 vp->vpath->ringh = NULL;
2817 __vxge_hw_channel_free(&ring->channel);
2275 2818
2276 return VXGE_HW_OK; 2819 return VXGE_HW_OK;
2277} 2820}
2278 2821
2279/* 2822/*
2280 * __vxge_hw_device_vpath_config_check - Check vpath configuration. 2823 * __vxge_hw_ring_create - Create a Ring
2281 * Check the vpath configuration 2824 * This function creates Ring and initializes it.
2282 */ 2825 */
2283static enum vxge_hw_status 2826static enum vxge_hw_status
2284__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config) 2827__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
2828 struct vxge_hw_ring_attr *attr)
2285{ 2829{
2286 enum vxge_hw_status status; 2830 enum vxge_hw_status status = VXGE_HW_OK;
2831 struct __vxge_hw_ring *ring;
2832 u32 ring_length;
2833 struct vxge_hw_ring_config *config;
2834 struct __vxge_hw_device *hldev;
2835 u32 vp_id;
2836 struct vxge_hw_mempool_cbs ring_mp_callback;
2287 2837
2288 if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) || 2838 if ((vp == NULL) || (attr == NULL)) {
2289 (vp_config->min_bandwidth > 2839 status = VXGE_HW_FAIL;
2290 VXGE_HW_VPATH_BANDWIDTH_MAX)) 2840 goto exit;
2291 return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH; 2841 }
2292 2842
2293 status = __vxge_hw_device_fifo_config_check(&vp_config->fifo); 2843 hldev = vp->vpath->hldev;
2294 if (status != VXGE_HW_OK) 2844 vp_id = vp->vpath->vp_id;
2295 return status;
2296 2845
2297 if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) && 2846 config = &hldev->config.vp_config[vp_id].ring;
2298 ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) ||
2299 (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU)))
2300 return VXGE_HW_BADCFG_VPATH_MTU;
2301 2847
2302 if ((vp_config->rpa_strip_vlan_tag != 2848 ring_length = config->ring_blocks *
2303 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) && 2849 vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
2304 (vp_config->rpa_strip_vlan_tag !=
2305 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) &&
2306 (vp_config->rpa_strip_vlan_tag !=
2307 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE))
2308 return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG;
2309 2850
2310 return VXGE_HW_OK; 2851 ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp,
2311} 2852 VXGE_HW_CHANNEL_TYPE_RING,
2853 ring_length,
2854 attr->per_rxd_space,
2855 attr->userdata);
2856 if (ring == NULL) {
2857 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2858 goto exit;
2859 }
2312 2860
2313/* 2861 vp->vpath->ringh = ring;
2314 * __vxge_hw_device_config_check - Check device configuration. 2862 ring->vp_id = vp_id;
2315 * Check the device configuration 2863 ring->vp_reg = vp->vpath->vp_reg;
2316 */ 2864 ring->common_reg = hldev->common_reg;
2317static enum vxge_hw_status 2865 ring->stats = &vp->vpath->sw_stats->ring_stats;
2318__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config) 2866 ring->config = config;
2319{ 2867 ring->callback = attr->callback;
2320 u32 i; 2868 ring->rxd_init = attr->rxd_init;
2321 enum vxge_hw_status status; 2869 ring->rxd_term = attr->rxd_term;
2870 ring->buffer_mode = config->buffer_mode;
2871 ring->rxds_limit = config->rxds_limit;
2322 2872
2323 if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) && 2873 ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
2324 (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) && 2874 ring->rxd_priv_size =
2325 (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) && 2875 sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space;
2326 (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF)) 2876 ring->per_rxd_space = attr->per_rxd_space;
2327 return VXGE_HW_BADCFG_INTR_MODE;
2328 2877
2329 if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) && 2878 ring->rxd_priv_size =
2330 (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE)) 2879 ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) /
2331 return VXGE_HW_BADCFG_RTS_MAC_EN; 2880 VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
2332 2881
2333 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 2882 /* how many RxDs can fit into one block. Depends on configured
2334 status = __vxge_hw_device_vpath_config_check( 2883 * buffer_mode. */
2335 &new_config->vp_config[i]); 2884 ring->rxds_per_block =
2336 if (status != VXGE_HW_OK) 2885 vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
2337 return status; 2886
2887 /* calculate actual RxD block private size */
2888 ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
2889 ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc;
2890 ring->mempool = __vxge_hw_mempool_create(hldev,
2891 VXGE_HW_BLOCK_SIZE,
2892 VXGE_HW_BLOCK_SIZE,
2893 ring->rxdblock_priv_size,
2894 ring->config->ring_blocks,
2895 ring->config->ring_blocks,
2896 &ring_mp_callback,
2897 ring);
2898 if (ring->mempool == NULL) {
2899 __vxge_hw_ring_delete(vp);
2900 return VXGE_HW_ERR_OUT_OF_MEMORY;
2338 } 2901 }
2339 2902
2340 return VXGE_HW_OK; 2903 status = __vxge_hw_channel_initialize(&ring->channel);
2904 if (status != VXGE_HW_OK) {
2905 __vxge_hw_ring_delete(vp);
2906 goto exit;
2907 }
2908
2909 /* Note:
2910 * Specifying rxd_init callback means two things:
2911 * 1) rxds need to be initialized by driver at channel-open time;
2912 * 2) rxds need to be posted at channel-open time
2913 * (that's what the initial_replenish() below does)
2914 * Currently we don't have a case when the 1) is done without the 2).
2915 */
2916 if (ring->rxd_init) {
2917 status = vxge_hw_ring_replenish(ring);
2918 if (status != VXGE_HW_OK) {
2919 __vxge_hw_ring_delete(vp);
2920 goto exit;
2921 }
2922 }
2923
2924 /* initial replenish will increment the counter in its post() routine,
2925 * we have to reset it */
2926 ring->stats->common_stats.usage_cnt = 0;
2927exit:
2928 return status;
2341} 2929}
2342 2930
2343/* 2931/*
@@ -2359,7 +2947,6 @@ vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
2359 device_config->rts_mac_en = VXGE_HW_RTS_MAC_DEFAULT; 2947 device_config->rts_mac_en = VXGE_HW_RTS_MAC_DEFAULT;
2360 2948
2361 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 2949 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
2362
2363 device_config->vp_config[i].vp_id = i; 2950 device_config->vp_config[i].vp_id = i;
2364 2951
2365 device_config->vp_config[i].min_bandwidth = 2952 device_config->vp_config[i].min_bandwidth =
@@ -2499,61 +3086,6 @@ vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
2499} 3086}
2500 3087
2501/* 3088/*
2502 * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
2503 * Set the swapper bits appropriately for the lagacy section.
2504 */
2505static enum vxge_hw_status
2506__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
2507{
2508 u64 val64;
2509 enum vxge_hw_status status = VXGE_HW_OK;
2510
2511 val64 = readq(&legacy_reg->toc_swapper_fb);
2512
2513 wmb();
2514
2515 switch (val64) {
2516
2517 case VXGE_HW_SWAPPER_INITIAL_VALUE:
2518 return status;
2519
2520 case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
2521 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
2522 &legacy_reg->pifm_rd_swap_en);
2523 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
2524 &legacy_reg->pifm_rd_flip_en);
2525 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
2526 &legacy_reg->pifm_wr_swap_en);
2527 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
2528 &legacy_reg->pifm_wr_flip_en);
2529 break;
2530
2531 case VXGE_HW_SWAPPER_BYTE_SWAPPED:
2532 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
2533 &legacy_reg->pifm_rd_swap_en);
2534 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
2535 &legacy_reg->pifm_wr_swap_en);
2536 break;
2537
2538 case VXGE_HW_SWAPPER_BIT_FLIPPED:
2539 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
2540 &legacy_reg->pifm_rd_flip_en);
2541 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
2542 &legacy_reg->pifm_wr_flip_en);
2543 break;
2544 }
2545
2546 wmb();
2547
2548 val64 = readq(&legacy_reg->toc_swapper_fb);
2549
2550 if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
2551 status = VXGE_HW_ERR_SWAPPER_CTRL;
2552
2553 return status;
2554}
2555
2556/*
2557 * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath. 3089 * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
2558 * Set the swapper bits appropriately for the vpath. 3090 * Set the swapper bits appropriately for the vpath.
2559 */ 3091 */
@@ -2577,9 +3109,8 @@ __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
2577 * Set the swapper bits appropriately for the vpath. 3109 * Set the swapper bits appropriately for the vpath.
2578 */ 3110 */
2579static enum vxge_hw_status 3111static enum vxge_hw_status
2580__vxge_hw_kdfc_swapper_set( 3112__vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg,
2581 struct vxge_hw_legacy_reg __iomem *legacy_reg, 3113 struct vxge_hw_vpath_reg __iomem *vpath_reg)
2582 struct vxge_hw_vpath_reg __iomem *vpath_reg)
2583{ 3114{
2584 u64 val64; 3115 u64 val64;
2585 3116
@@ -2829,6 +3360,69 @@ exit:
2829} 3360}
2830 3361
2831/* 3362/*
3363 * __vxge_hw_fifo_abort - Returns the TxD
3364 * This function terminates the TxDs of fifo
3365 */
3366static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
3367{
3368 void *txdlh;
3369
3370 for (;;) {
3371 vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh);
3372
3373 if (txdlh == NULL)
3374 break;
3375
3376 vxge_hw_channel_dtr_complete(&fifo->channel);
3377
3378 if (fifo->txdl_term) {
3379 fifo->txdl_term(txdlh,
3380 VXGE_HW_TXDL_STATE_POSTED,
3381 fifo->channel.userdata);
3382 }
3383
3384 vxge_hw_channel_dtr_free(&fifo->channel, txdlh);
3385 }
3386
3387 return VXGE_HW_OK;
3388}
3389
3390/*
3391 * __vxge_hw_fifo_reset - Resets the fifo
3392 * This function resets the fifo during vpath reset operation
3393 */
3394static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
3395{
3396 enum vxge_hw_status status = VXGE_HW_OK;
3397
3398 __vxge_hw_fifo_abort(fifo);
3399 status = __vxge_hw_channel_reset(&fifo->channel);
3400
3401 return status;
3402}
3403
3404/*
3405 * __vxge_hw_fifo_delete - Removes the FIFO
3406 * This function freeup the memory pool and removes the FIFO
3407 */
3408static enum vxge_hw_status
3409__vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
3410{
3411 struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
3412
3413 __vxge_hw_fifo_abort(fifo);
3414
3415 if (fifo->mempool)
3416 __vxge_hw_mempool_destroy(fifo->mempool);
3417
3418 vp->vpath->fifoh = NULL;
3419
3420 __vxge_hw_channel_free(&fifo->channel);
3421
3422 return VXGE_HW_OK;
3423}
3424
3425/*
2832 * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD 3426 * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD
2833 * list callback 3427 * list callback
2834 * This function is callback passed to __vxge_hw_mempool_create to create memory 3428 * This function is callback passed to __vxge_hw_mempool_create to create memory
@@ -2993,69 +3587,6 @@ exit:
2993} 3587}
2994 3588
2995/* 3589/*
2996 * __vxge_hw_fifo_abort - Returns the TxD
2997 * This function terminates the TxDs of fifo
2998 */
2999static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
3000{
3001 void *txdlh;
3002
3003 for (;;) {
3004 vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh);
3005
3006 if (txdlh == NULL)
3007 break;
3008
3009 vxge_hw_channel_dtr_complete(&fifo->channel);
3010
3011 if (fifo->txdl_term) {
3012 fifo->txdl_term(txdlh,
3013 VXGE_HW_TXDL_STATE_POSTED,
3014 fifo->channel.userdata);
3015 }
3016
3017 vxge_hw_channel_dtr_free(&fifo->channel, txdlh);
3018 }
3019
3020 return VXGE_HW_OK;
3021}
3022
3023/*
3024 * __vxge_hw_fifo_reset - Resets the fifo
3025 * This function resets the fifo during vpath reset operation
3026 */
3027static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
3028{
3029 enum vxge_hw_status status = VXGE_HW_OK;
3030
3031 __vxge_hw_fifo_abort(fifo);
3032 status = __vxge_hw_channel_reset(&fifo->channel);
3033
3034 return status;
3035}
3036
3037/*
3038 * __vxge_hw_fifo_delete - Removes the FIFO
3039 * This function freeup the memory pool and removes the FIFO
3040 */
3041static enum vxge_hw_status
3042__vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
3043{
3044 struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
3045
3046 __vxge_hw_fifo_abort(fifo);
3047
3048 if (fifo->mempool)
3049 __vxge_hw_mempool_destroy(fifo->mempool);
3050
3051 vp->vpath->fifoh = NULL;
3052
3053 __vxge_hw_channel_free(&fifo->channel);
3054
3055 return VXGE_HW_OK;
3056}
3057
3058/*
3059 * __vxge_hw_vpath_pci_read - Read the content of given address 3590 * __vxge_hw_vpath_pci_read - Read the content of given address
3060 * in pci config space. 3591 * in pci config space.
3061 * Read from the vpath pci config space. 3592 * Read from the vpath pci config space.
@@ -3786,10 +4317,10 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3786 vp_reg = vpath->vp_reg; 4317 vp_reg = vpath->vp_reg;
3787 config = vpath->vp_config; 4318 config = vpath->vp_config;
3788 4319
3789 writeq((u64)0, &vp_reg->tim_dest_addr); 4320 writeq(0, &vp_reg->tim_dest_addr);
3790 writeq((u64)0, &vp_reg->tim_vpath_map); 4321 writeq(0, &vp_reg->tim_vpath_map);
3791 writeq((u64)0, &vp_reg->tim_bitmap); 4322 writeq(0, &vp_reg->tim_bitmap);
3792 writeq((u64)0, &vp_reg->tim_remap); 4323 writeq(0, &vp_reg->tim_remap);
3793 4324
3794 if (config->ring.enable == VXGE_HW_RING_ENABLE) 4325 if (config->ring.enable == VXGE_HW_RING_ENABLE)
3795 writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM( 4326 writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM(
@@ -4021,8 +4552,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4021 return status; 4552 return status;
4022} 4553}
4023 4554
4024void 4555void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
4025vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
4026{ 4556{
4027 struct __vxge_hw_virtualpath *vpath; 4557 struct __vxge_hw_virtualpath *vpath;
4028 struct vxge_hw_vpath_reg __iomem *vp_reg; 4558 struct vxge_hw_vpath_reg __iomem *vp_reg;
@@ -4033,17 +4563,15 @@ vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
4033 vp_reg = vpath->vp_reg; 4563 vp_reg = vpath->vp_reg;
4034 config = vpath->vp_config; 4564 config = vpath->vp_config;
4035 4565
4036 if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) { 4566 if (config->fifo.enable == VXGE_HW_FIFO_ENABLE &&
4567 config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
4568 config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
4037 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); 4569 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4038 4570 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
4039 if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) { 4571 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4040 config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
4041 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
4042 writeq(val64,
4043 &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4044 }
4045 } 4572 }
4046} 4573}
4574
4047/* 4575/*
4048 * __vxge_hw_vpath_initialize 4576 * __vxge_hw_vpath_initialize
4049 * This routine is the final phase of init which initializes the 4577 * This routine is the final phase of init which initializes the
@@ -4067,22 +4595,18 @@ __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
4067 vp_reg = vpath->vp_reg; 4595 vp_reg = vpath->vp_reg;
4068 4596
4069 status = __vxge_hw_vpath_swapper_set(vpath->vp_reg); 4597 status = __vxge_hw_vpath_swapper_set(vpath->vp_reg);
4070
4071 if (status != VXGE_HW_OK) 4598 if (status != VXGE_HW_OK)
4072 goto exit; 4599 goto exit;
4073 4600
4074 status = __vxge_hw_vpath_mac_configure(hldev, vp_id); 4601 status = __vxge_hw_vpath_mac_configure(hldev, vp_id);
4075
4076 if (status != VXGE_HW_OK) 4602 if (status != VXGE_HW_OK)
4077 goto exit; 4603 goto exit;
4078 4604
4079 status = __vxge_hw_vpath_kdfc_configure(hldev, vp_id); 4605 status = __vxge_hw_vpath_kdfc_configure(hldev, vp_id);
4080
4081 if (status != VXGE_HW_OK) 4606 if (status != VXGE_HW_OK)
4082 goto exit; 4607 goto exit;
4083 4608
4084 status = __vxge_hw_vpath_tim_configure(hldev, vp_id); 4609 status = __vxge_hw_vpath_tim_configure(hldev, vp_id);
4085
4086 if (status != VXGE_HW_OK) 4610 if (status != VXGE_HW_OK)
4087 goto exit; 4611 goto exit;
4088 4612
@@ -4090,7 +4614,6 @@ __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
4090 4614
4091 /* Get MRRS value from device control */ 4615 /* Get MRRS value from device control */
4092 status = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32); 4616 status = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32);
4093
4094 if (status == VXGE_HW_OK) { 4617 if (status == VXGE_HW_OK) {
4095 val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12; 4618 val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12;
4096 val64 &= 4619 val64 &=
@@ -4114,6 +4637,28 @@ exit:
4114} 4637}
4115 4638
4116/* 4639/*
4640 * __vxge_hw_vp_terminate - Terminate Virtual Path structure
4641 * This routine closes all channels it opened and freeup memory
4642 */
4643static void __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
4644{
4645 struct __vxge_hw_virtualpath *vpath;
4646
4647 vpath = &hldev->virtual_paths[vp_id];
4648
4649 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
4650 goto exit;
4651
4652 VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0,
4653 vpath->hldev->tim_int_mask1, vpath->vp_id);
4654 hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL;
4655
4656 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4657exit:
4658 return;
4659}
4660
4661/*
4117 * __vxge_hw_vp_initialize - Initialize Virtual Path structure 4662 * __vxge_hw_vp_initialize - Initialize Virtual Path structure
4118 * This routine is the initial phase of init which resets the vpath and 4663 * This routine is the initial phase of init which resets the vpath and
4119 * initializes the software support structures. 4664 * initializes the software support structures.
@@ -4169,29 +4714,6 @@ exit:
4169} 4714}
4170 4715
4171/* 4716/*
4172 * __vxge_hw_vp_terminate - Terminate Virtual Path structure
4173 * This routine closes all channels it opened and freeup memory
4174 */
4175static void
4176__vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
4177{
4178 struct __vxge_hw_virtualpath *vpath;
4179
4180 vpath = &hldev->virtual_paths[vp_id];
4181
4182 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
4183 goto exit;
4184
4185 VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0,
4186 vpath->hldev->tim_int_mask1, vpath->vp_id);
4187 hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL;
4188
4189 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4190exit:
4191 return;
4192}
4193
4194/*
4195 * vxge_hw_vpath_mtu_set - Set MTU. 4717 * vxge_hw_vpath_mtu_set - Set MTU.
4196 * Set new MTU value. Example, to use jumbo frames: 4718 * Set new MTU value. Example, to use jumbo frames:
4197 * vxge_hw_vpath_mtu_set(my_device, 9600); 4719 * vxge_hw_vpath_mtu_set(my_device, 9600);
@@ -4228,6 +4750,64 @@ exit:
4228} 4750}
4229 4751
4230/* 4752/*
4753 * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
4754 * Enable the DMA vpath statistics. The function is to be called to re-enable
4755 * the adapter to update stats into the host memory
4756 */
4757static enum vxge_hw_status
4758vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
4759{
4760 enum vxge_hw_status status = VXGE_HW_OK;
4761 struct __vxge_hw_virtualpath *vpath;
4762
4763 vpath = vp->vpath;
4764
4765 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4766 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4767 goto exit;
4768 }
4769
4770 memcpy(vpath->hw_stats_sav, vpath->hw_stats,
4771 sizeof(struct vxge_hw_vpath_stats_hw_info));
4772
4773 status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats);
4774exit:
4775 return status;
4776}
4777
4778/*
4779 * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
4780 * This function allocates a block from block pool or from the system
4781 */
4782static struct __vxge_hw_blockpool_entry *
4783__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
4784{
4785 struct __vxge_hw_blockpool_entry *entry = NULL;
4786 struct __vxge_hw_blockpool *blockpool;
4787
4788 blockpool = &devh->block_pool;
4789
4790 if (size == blockpool->block_size) {
4791
4792 if (!list_empty(&blockpool->free_block_list))
4793 entry = (struct __vxge_hw_blockpool_entry *)
4794 list_first_entry(&blockpool->free_block_list,
4795 struct __vxge_hw_blockpool_entry,
4796 item);
4797
4798 if (entry != NULL) {
4799 list_del(&entry->item);
4800 blockpool->pool_size--;
4801 }
4802 }
4803
4804 if (entry != NULL)
4805 __vxge_hw_blockpool_blocks_add(blockpool);
4806
4807 return entry;
4808}
4809
4810/*
4231 * vxge_hw_vpath_open - Open a virtual path on a given adapter 4811 * vxge_hw_vpath_open - Open a virtual path on a given adapter
4232 * This function is used to open access to virtual path of an 4812 * This function is used to open access to virtual path of an
4233 * adapter for offload, GRO operations. This function returns 4813 * adapter for offload, GRO operations. This function returns
@@ -4251,7 +4831,6 @@ vxge_hw_vpath_open(struct __vxge_hw_device *hldev,
4251 4831
4252 status = __vxge_hw_vp_initialize(hldev, attr->vp_id, 4832 status = __vxge_hw_vp_initialize(hldev, attr->vp_id,
4253 &hldev->config.vp_config[attr->vp_id]); 4833 &hldev->config.vp_config[attr->vp_id]);
4254
4255 if (status != VXGE_HW_OK) 4834 if (status != VXGE_HW_OK)
4256 goto vpath_open_exit1; 4835 goto vpath_open_exit1;
4257 4836
@@ -4283,7 +4862,6 @@ vxge_hw_vpath_open(struct __vxge_hw_device *hldev,
4283 4862
4284 vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev, 4863 vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev,
4285 VXGE_HW_BLOCK_SIZE); 4864 VXGE_HW_BLOCK_SIZE);
4286
4287 if (vpath->stats_block == NULL) { 4865 if (vpath->stats_block == NULL) {
4288 status = VXGE_HW_ERR_OUT_OF_MEMORY; 4866 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4289 goto vpath_open_exit8; 4867 goto vpath_open_exit8;
@@ -4342,8 +4920,7 @@ vpath_open_exit1:
4342 * This function is used to close access to virtual path opened 4920 * This function is used to close access to virtual path opened
4343 * earlier. 4921 * earlier.
4344 */ 4922 */
4345void 4923void vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
4346vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
4347{ 4924{
4348 struct __vxge_hw_virtualpath *vpath = vp->vpath; 4925 struct __vxge_hw_virtualpath *vpath = vp->vpath;
4349 struct __vxge_hw_ring *ring = vpath->ringh; 4926 struct __vxge_hw_ring *ring = vpath->ringh;
@@ -4379,6 +4956,29 @@ vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
4379} 4956}
4380 4957
4381/* 4958/*
4959 * __vxge_hw_blockpool_block_free - Frees a block from block pool
4960 * @devh: Hal device
4961 * @entry: Entry of block to be freed
4962 *
4963 * This function frees a block from block pool
4964 */
4965static void
4966__vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
4967 struct __vxge_hw_blockpool_entry *entry)
4968{
4969 struct __vxge_hw_blockpool *blockpool;
4970
4971 blockpool = &devh->block_pool;
4972
4973 if (entry->length == blockpool->block_size) {
4974 list_add(&entry->item, &blockpool->free_block_list);
4975 blockpool->pool_size++;
4976 }
4977
4978 __vxge_hw_blockpool_blocks_remove(blockpool);
4979}
4980
4981/*
4382 * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open 4982 * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open
4383 * This function is used to close access to virtual path opened 4983 * This function is used to close access to virtual path opened
4384 * earlier. 4984 * earlier.
@@ -4529,728 +5129,3 @@ vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp)
4529 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), 5129 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
4530 &hldev->common_reg->cmn_rsthdlr_cfg1); 5130 &hldev->common_reg->cmn_rsthdlr_cfg1);
4531} 5131}
4532
4533/*
4534 * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
4535 * Enable the DMA vpath statistics. The function is to be called to re-enable
4536 * the adapter to update stats into the host memory
4537 */
4538static enum vxge_hw_status
4539vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
4540{
4541 enum vxge_hw_status status = VXGE_HW_OK;
4542 struct __vxge_hw_virtualpath *vpath;
4543
4544 vpath = vp->vpath;
4545
4546 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4547 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4548 goto exit;
4549 }
4550
4551 memcpy(vpath->hw_stats_sav, vpath->hw_stats,
4552 sizeof(struct vxge_hw_vpath_stats_hw_info));
4553
4554 status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats);
4555exit:
4556 return status;
4557}
4558
4559/*
4560 * __vxge_hw_vpath_stats_access - Get the statistics from the given location
4561 * and offset and perform an operation
4562 */
4563static enum vxge_hw_status
4564__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
4565 u32 operation, u32 offset, u64 *stat)
4566{
4567 u64 val64;
4568 enum vxge_hw_status status = VXGE_HW_OK;
4569 struct vxge_hw_vpath_reg __iomem *vp_reg;
4570
4571 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4572 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4573 goto vpath_stats_access_exit;
4574 }
4575
4576 vp_reg = vpath->vp_reg;
4577
4578 val64 = VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) |
4579 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE |
4580 VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset);
4581
4582 status = __vxge_hw_pio_mem_write64(val64,
4583 &vp_reg->xmac_stats_access_cmd,
4584 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE,
4585 vpath->hldev->config.device_poll_millis);
4586
4587 if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
4588 *stat = readq(&vp_reg->xmac_stats_access_data);
4589 else
4590 *stat = 0;
4591
4592vpath_stats_access_exit:
4593 return status;
4594}
4595
4596/*
4597 * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
4598 */
4599static enum vxge_hw_status
4600__vxge_hw_vpath_xmac_tx_stats_get(
4601 struct __vxge_hw_virtualpath *vpath,
4602 struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
4603{
4604 u64 *val64;
4605 int i;
4606 u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET;
4607 enum vxge_hw_status status = VXGE_HW_OK;
4608
4609 val64 = (u64 *) vpath_tx_stats;
4610
4611 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4612 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4613 goto exit;
4614 }
4615
4616 for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) {
4617 status = __vxge_hw_vpath_stats_access(vpath,
4618 VXGE_HW_STATS_OP_READ,
4619 offset, val64);
4620 if (status != VXGE_HW_OK)
4621 goto exit;
4622 offset++;
4623 val64++;
4624 }
4625exit:
4626 return status;
4627}
4628
4629/*
4630 * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
4631 */
4632static enum vxge_hw_status
4633__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
4634 struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
4635{
4636 u64 *val64;
4637 enum vxge_hw_status status = VXGE_HW_OK;
4638 int i;
4639 u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET;
4640 val64 = (u64 *) vpath_rx_stats;
4641
4642 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4643 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4644 goto exit;
4645 }
4646 for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) {
4647 status = __vxge_hw_vpath_stats_access(vpath,
4648 VXGE_HW_STATS_OP_READ,
4649 offset >> 3, val64);
4650 if (status != VXGE_HW_OK)
4651 goto exit;
4652
4653 offset += 8;
4654 val64++;
4655 }
4656exit:
4657 return status;
4658}
4659
4660/*
4661 * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
4662 */
4663static enum vxge_hw_status
4664__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
4665 struct vxge_hw_vpath_stats_hw_info *hw_stats)
4666{
4667 u64 val64;
4668 enum vxge_hw_status status = VXGE_HW_OK;
4669 struct vxge_hw_vpath_reg __iomem *vp_reg;
4670
4671 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4672 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4673 goto exit;
4674 }
4675 vp_reg = vpath->vp_reg;
4676
4677 val64 = readq(&vp_reg->vpath_debug_stats0);
4678 hw_stats->ini_num_mwr_sent =
4679 (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64);
4680
4681 val64 = readq(&vp_reg->vpath_debug_stats1);
4682 hw_stats->ini_num_mrd_sent =
4683 (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64);
4684
4685 val64 = readq(&vp_reg->vpath_debug_stats2);
4686 hw_stats->ini_num_cpl_rcvd =
4687 (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64);
4688
4689 val64 = readq(&vp_reg->vpath_debug_stats3);
4690 hw_stats->ini_num_mwr_byte_sent =
4691 VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64);
4692
4693 val64 = readq(&vp_reg->vpath_debug_stats4);
4694 hw_stats->ini_num_cpl_byte_rcvd =
4695 VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64);
4696
4697 val64 = readq(&vp_reg->vpath_debug_stats5);
4698 hw_stats->wrcrdtarb_xoff =
4699 (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64);
4700
4701 val64 = readq(&vp_reg->vpath_debug_stats6);
4702 hw_stats->rdcrdtarb_xoff =
4703 (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64);
4704
4705 val64 = readq(&vp_reg->vpath_genstats_count01);
4706 hw_stats->vpath_genstats_count0 =
4707 (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(
4708 val64);
4709
4710 val64 = readq(&vp_reg->vpath_genstats_count01);
4711 hw_stats->vpath_genstats_count1 =
4712 (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(
4713 val64);
4714
4715 val64 = readq(&vp_reg->vpath_genstats_count23);
4716 hw_stats->vpath_genstats_count2 =
4717 (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(
4718 val64);
4719
4720 val64 = readq(&vp_reg->vpath_genstats_count01);
4721 hw_stats->vpath_genstats_count3 =
4722 (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(
4723 val64);
4724
4725 val64 = readq(&vp_reg->vpath_genstats_count4);
4726 hw_stats->vpath_genstats_count4 =
4727 (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(
4728 val64);
4729
4730 val64 = readq(&vp_reg->vpath_genstats_count5);
4731 hw_stats->vpath_genstats_count5 =
4732 (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(
4733 val64);
4734
4735 status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats);
4736 if (status != VXGE_HW_OK)
4737 goto exit;
4738
4739 status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats);
4740 if (status != VXGE_HW_OK)
4741 goto exit;
4742
4743 VXGE_HW_VPATH_STATS_PIO_READ(
4744 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET);
4745
4746 hw_stats->prog_event_vnum0 =
4747 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64);
4748
4749 hw_stats->prog_event_vnum1 =
4750 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64);
4751
4752 VXGE_HW_VPATH_STATS_PIO_READ(
4753 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET);
4754
4755 hw_stats->prog_event_vnum2 =
4756 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64);
4757
4758 hw_stats->prog_event_vnum3 =
4759 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64);
4760
4761 val64 = readq(&vp_reg->rx_multi_cast_stats);
4762 hw_stats->rx_multi_cast_frame_discard =
4763 (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64);
4764
4765 val64 = readq(&vp_reg->rx_frm_transferred);
4766 hw_stats->rx_frm_transferred =
4767 (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64);
4768
4769 val64 = readq(&vp_reg->rxd_returned);
4770 hw_stats->rxd_returned =
4771 (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64);
4772
4773 val64 = readq(&vp_reg->dbg_stats_rx_mpa);
4774 hw_stats->rx_mpa_len_fail_frms =
4775 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64);
4776 hw_stats->rx_mpa_mrk_fail_frms =
4777 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64);
4778 hw_stats->rx_mpa_crc_fail_frms =
4779 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64);
4780
4781 val64 = readq(&vp_reg->dbg_stats_rx_fau);
4782 hw_stats->rx_permitted_frms =
4783 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64);
4784 hw_stats->rx_vp_reset_discarded_frms =
4785 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64);
4786 hw_stats->rx_wol_frms =
4787 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64);
4788
4789 val64 = readq(&vp_reg->tx_vp_reset_discarded_frms);
4790 hw_stats->tx_vp_reset_discarded_frms =
4791 (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(
4792 val64);
4793exit:
4794 return status;
4795}
4796
4797
4798static void vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh,
4799 unsigned long size)
4800{
4801 gfp_t flags;
4802 void *vaddr;
4803
4804 if (in_interrupt())
4805 flags = GFP_ATOMIC | GFP_DMA;
4806 else
4807 flags = GFP_KERNEL | GFP_DMA;
4808
4809 vaddr = kmalloc((size), flags);
4810
4811 vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
4812}
4813
4814static void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
4815 struct pci_dev **p_dma_acch)
4816{
4817 unsigned long misaligned = *(unsigned long *)p_dma_acch;
4818 u8 *tmp = (u8 *)vaddr;
4819 tmp -= misaligned;
4820 kfree((void *)tmp);
4821}
4822
4823/*
4824 * __vxge_hw_blockpool_create - Create block pool
4825 */
4826
4827static enum vxge_hw_status
4828__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
4829 struct __vxge_hw_blockpool *blockpool,
4830 u32 pool_size,
4831 u32 pool_max)
4832{
4833 u32 i;
4834 struct __vxge_hw_blockpool_entry *entry = NULL;
4835 void *memblock;
4836 dma_addr_t dma_addr;
4837 struct pci_dev *dma_handle;
4838 struct pci_dev *acc_handle;
4839 enum vxge_hw_status status = VXGE_HW_OK;
4840
4841 if (blockpool == NULL) {
4842 status = VXGE_HW_FAIL;
4843 goto blockpool_create_exit;
4844 }
4845
4846 blockpool->hldev = hldev;
4847 blockpool->block_size = VXGE_HW_BLOCK_SIZE;
4848 blockpool->pool_size = 0;
4849 blockpool->pool_max = pool_max;
4850 blockpool->req_out = 0;
4851
4852 INIT_LIST_HEAD(&blockpool->free_block_list);
4853 INIT_LIST_HEAD(&blockpool->free_entry_list);
4854
4855 for (i = 0; i < pool_size + pool_max; i++) {
4856 entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
4857 GFP_KERNEL);
4858 if (entry == NULL) {
4859 __vxge_hw_blockpool_destroy(blockpool);
4860 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4861 goto blockpool_create_exit;
4862 }
4863 list_add(&entry->item, &blockpool->free_entry_list);
4864 }
4865
4866 for (i = 0; i < pool_size; i++) {
4867
4868 memblock = vxge_os_dma_malloc(
4869 hldev->pdev,
4870 VXGE_HW_BLOCK_SIZE,
4871 &dma_handle,
4872 &acc_handle);
4873
4874 if (memblock == NULL) {
4875 __vxge_hw_blockpool_destroy(blockpool);
4876 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4877 goto blockpool_create_exit;
4878 }
4879
4880 dma_addr = pci_map_single(hldev->pdev, memblock,
4881 VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL);
4882
4883 if (unlikely(pci_dma_mapping_error(hldev->pdev,
4884 dma_addr))) {
4885
4886 vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
4887 __vxge_hw_blockpool_destroy(blockpool);
4888 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4889 goto blockpool_create_exit;
4890 }
4891
4892 if (!list_empty(&blockpool->free_entry_list))
4893 entry = (struct __vxge_hw_blockpool_entry *)
4894 list_first_entry(&blockpool->free_entry_list,
4895 struct __vxge_hw_blockpool_entry,
4896 item);
4897
4898 if (entry == NULL)
4899 entry =
4900 kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
4901 GFP_KERNEL);
4902 if (entry != NULL) {
4903 list_del(&entry->item);
4904 entry->length = VXGE_HW_BLOCK_SIZE;
4905 entry->memblock = memblock;
4906 entry->dma_addr = dma_addr;
4907 entry->acc_handle = acc_handle;
4908 entry->dma_handle = dma_handle;
4909 list_add(&entry->item,
4910 &blockpool->free_block_list);
4911 blockpool->pool_size++;
4912 } else {
4913 __vxge_hw_blockpool_destroy(blockpool);
4914 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4915 goto blockpool_create_exit;
4916 }
4917 }
4918
4919blockpool_create_exit:
4920 return status;
4921}
4922
4923/*
4924 * __vxge_hw_blockpool_destroy - Deallocates the block pool
4925 */
4926
4927static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
4928{
4929
4930 struct __vxge_hw_device *hldev;
4931 struct list_head *p, *n;
4932 u16 ret;
4933
4934 if (blockpool == NULL) {
4935 ret = 1;
4936 goto exit;
4937 }
4938
4939 hldev = blockpool->hldev;
4940
4941 list_for_each_safe(p, n, &blockpool->free_block_list) {
4942
4943 pci_unmap_single(hldev->pdev,
4944 ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
4945 ((struct __vxge_hw_blockpool_entry *)p)->length,
4946 PCI_DMA_BIDIRECTIONAL);
4947
4948 vxge_os_dma_free(hldev->pdev,
4949 ((struct __vxge_hw_blockpool_entry *)p)->memblock,
4950 &((struct __vxge_hw_blockpool_entry *) p)->acc_handle);
4951
4952 list_del(
4953 &((struct __vxge_hw_blockpool_entry *)p)->item);
4954 kfree(p);
4955 blockpool->pool_size--;
4956 }
4957
4958 list_for_each_safe(p, n, &blockpool->free_entry_list) {
4959 list_del(
4960 &((struct __vxge_hw_blockpool_entry *)p)->item);
4961 kfree((void *)p);
4962 }
4963 ret = 0;
4964exit:
4965 return;
4966}
4967
4968/*
4969 * __vxge_hw_blockpool_blocks_add - Request additional blocks
4970 */
4971static
4972void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
4973{
4974 u32 nreq = 0, i;
4975
4976 if ((blockpool->pool_size + blockpool->req_out) <
4977 VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) {
4978 nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE;
4979 blockpool->req_out += nreq;
4980 }
4981
4982 for (i = 0; i < nreq; i++)
4983 vxge_os_dma_malloc_async(
4984 ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
4985 blockpool->hldev, VXGE_HW_BLOCK_SIZE);
4986}
4987
4988/*
4989 * __vxge_hw_blockpool_blocks_remove - Free additional blocks
4990 */
4991static
4992void __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
4993{
4994 struct list_head *p, *n;
4995
4996 list_for_each_safe(p, n, &blockpool->free_block_list) {
4997
4998 if (blockpool->pool_size < blockpool->pool_max)
4999 break;
5000
5001 pci_unmap_single(
5002 ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
5003 ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
5004 ((struct __vxge_hw_blockpool_entry *)p)->length,
5005 PCI_DMA_BIDIRECTIONAL);
5006
5007 vxge_os_dma_free(
5008 ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
5009 ((struct __vxge_hw_blockpool_entry *)p)->memblock,
5010 &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
5011
5012 list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
5013
5014 list_add(p, &blockpool->free_entry_list);
5015
5016 blockpool->pool_size--;
5017
5018 }
5019}
5020
5021/*
5022 * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
5023 * Adds a block to block pool
5024 */
5025static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
5026 void *block_addr,
5027 u32 length,
5028 struct pci_dev *dma_h,
5029 struct pci_dev *acc_handle)
5030{
5031 struct __vxge_hw_blockpool *blockpool;
5032 struct __vxge_hw_blockpool_entry *entry = NULL;
5033 dma_addr_t dma_addr;
5034 enum vxge_hw_status status = VXGE_HW_OK;
5035 u32 req_out;
5036
5037 blockpool = &devh->block_pool;
5038
5039 if (block_addr == NULL) {
5040 blockpool->req_out--;
5041 status = VXGE_HW_FAIL;
5042 goto exit;
5043 }
5044
5045 dma_addr = pci_map_single(devh->pdev, block_addr, length,
5046 PCI_DMA_BIDIRECTIONAL);
5047
5048 if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) {
5049
5050 vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
5051 blockpool->req_out--;
5052 status = VXGE_HW_FAIL;
5053 goto exit;
5054 }
5055
5056
5057 if (!list_empty(&blockpool->free_entry_list))
5058 entry = (struct __vxge_hw_blockpool_entry *)
5059 list_first_entry(&blockpool->free_entry_list,
5060 struct __vxge_hw_blockpool_entry,
5061 item);
5062
5063 if (entry == NULL)
5064 entry = vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
5065 else
5066 list_del(&entry->item);
5067
5068 if (entry != NULL) {
5069 entry->length = length;
5070 entry->memblock = block_addr;
5071 entry->dma_addr = dma_addr;
5072 entry->acc_handle = acc_handle;
5073 entry->dma_handle = dma_h;
5074 list_add(&entry->item, &blockpool->free_block_list);
5075 blockpool->pool_size++;
5076 status = VXGE_HW_OK;
5077 } else
5078 status = VXGE_HW_ERR_OUT_OF_MEMORY;
5079
5080 blockpool->req_out--;
5081
5082 req_out = blockpool->req_out;
5083exit:
5084 return;
5085}
5086
5087/*
5088 * __vxge_hw_blockpool_malloc - Allocate a memory block from pool
5089 * Allocates a block of memory of given size, either from block pool
5090 * or by calling vxge_os_dma_malloc()
5091 */
5092static void *
5093__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
5094 struct vxge_hw_mempool_dma *dma_object)
5095{
5096 struct __vxge_hw_blockpool_entry *entry = NULL;
5097 struct __vxge_hw_blockpool *blockpool;
5098 void *memblock = NULL;
5099 enum vxge_hw_status status = VXGE_HW_OK;
5100
5101 blockpool = &devh->block_pool;
5102
5103 if (size != blockpool->block_size) {
5104
5105 memblock = vxge_os_dma_malloc(devh->pdev, size,
5106 &dma_object->handle,
5107 &dma_object->acc_handle);
5108
5109 if (memblock == NULL) {
5110 status = VXGE_HW_ERR_OUT_OF_MEMORY;
5111 goto exit;
5112 }
5113
5114 dma_object->addr = pci_map_single(devh->pdev, memblock, size,
5115 PCI_DMA_BIDIRECTIONAL);
5116
5117 if (unlikely(pci_dma_mapping_error(devh->pdev,
5118 dma_object->addr))) {
5119 vxge_os_dma_free(devh->pdev, memblock,
5120 &dma_object->acc_handle);
5121 status = VXGE_HW_ERR_OUT_OF_MEMORY;
5122 goto exit;
5123 }
5124
5125 } else {
5126
5127 if (!list_empty(&blockpool->free_block_list))
5128 entry = (struct __vxge_hw_blockpool_entry *)
5129 list_first_entry(&blockpool->free_block_list,
5130 struct __vxge_hw_blockpool_entry,
5131 item);
5132
5133 if (entry != NULL) {
5134 list_del(&entry->item);
5135 dma_object->addr = entry->dma_addr;
5136 dma_object->handle = entry->dma_handle;
5137 dma_object->acc_handle = entry->acc_handle;
5138 memblock = entry->memblock;
5139
5140 list_add(&entry->item,
5141 &blockpool->free_entry_list);
5142 blockpool->pool_size--;
5143 }
5144
5145 if (memblock != NULL)
5146 __vxge_hw_blockpool_blocks_add(blockpool);
5147 }
5148exit:
5149 return memblock;
5150}
5151
5152/*
5153 * __vxge_hw_blockpool_free - Frees the memory allcoated with
5154 __vxge_hw_blockpool_malloc
5155 */
5156static void
5157__vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
5158 void *memblock, u32 size,
5159 struct vxge_hw_mempool_dma *dma_object)
5160{
5161 struct __vxge_hw_blockpool_entry *entry = NULL;
5162 struct __vxge_hw_blockpool *blockpool;
5163 enum vxge_hw_status status = VXGE_HW_OK;
5164
5165 blockpool = &devh->block_pool;
5166
5167 if (size != blockpool->block_size) {
5168 pci_unmap_single(devh->pdev, dma_object->addr, size,
5169 PCI_DMA_BIDIRECTIONAL);
5170 vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle);
5171 } else {
5172
5173 if (!list_empty(&blockpool->free_entry_list))
5174 entry = (struct __vxge_hw_blockpool_entry *)
5175 list_first_entry(&blockpool->free_entry_list,
5176 struct __vxge_hw_blockpool_entry,
5177 item);
5178
5179 if (entry == NULL)
5180 entry = vmalloc(sizeof(
5181 struct __vxge_hw_blockpool_entry));
5182 else
5183 list_del(&entry->item);
5184
5185 if (entry != NULL) {
5186 entry->length = size;
5187 entry->memblock = memblock;
5188 entry->dma_addr = dma_object->addr;
5189 entry->acc_handle = dma_object->acc_handle;
5190 entry->dma_handle = dma_object->handle;
5191 list_add(&entry->item,
5192 &blockpool->free_block_list);
5193 blockpool->pool_size++;
5194 status = VXGE_HW_OK;
5195 } else
5196 status = VXGE_HW_ERR_OUT_OF_MEMORY;
5197
5198 if (status == VXGE_HW_OK)
5199 __vxge_hw_blockpool_blocks_remove(blockpool);
5200 }
5201}
5202
5203/*
5204 * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
5205 * This function allocates a block from block pool or from the system
5206 */
5207static struct __vxge_hw_blockpool_entry *
5208__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
5209{
5210 struct __vxge_hw_blockpool_entry *entry = NULL;
5211 struct __vxge_hw_blockpool *blockpool;
5212
5213 blockpool = &devh->block_pool;
5214
5215 if (size == blockpool->block_size) {
5216
5217 if (!list_empty(&blockpool->free_block_list))
5218 entry = (struct __vxge_hw_blockpool_entry *)
5219 list_first_entry(&blockpool->free_block_list,
5220 struct __vxge_hw_blockpool_entry,
5221 item);
5222
5223 if (entry != NULL) {
5224 list_del(&entry->item);
5225 blockpool->pool_size--;
5226 }
5227 }
5228
5229 if (entry != NULL)
5230 __vxge_hw_blockpool_blocks_add(blockpool);
5231
5232 return entry;
5233}
5234
5235/*
5236 * __vxge_hw_blockpool_block_free - Frees a block from block pool
5237 * @devh: Hal device
5238 * @entry: Entry of block to be freed
5239 *
5240 * This function frees a block from block pool
5241 */
5242static void
5243__vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
5244 struct __vxge_hw_blockpool_entry *entry)
5245{
5246 struct __vxge_hw_blockpool *blockpool;
5247
5248 blockpool = &devh->block_pool;
5249
5250 if (entry->length == blockpool->block_size) {
5251 list_add(&entry->item, &blockpool->free_block_list);
5252 blockpool->pool_size++;
5253 }
5254
5255 __vxge_hw_blockpool_blocks_remove(blockpool);
5256}