aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/vxge
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-22 10:38:37 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-22 10:38:37 -0500
commitfcc9d2e5a6c89d22b8b773a64fb4ad21ac318446 (patch)
treea57612d1888735a2ec7972891b68c1ac5ec8faea /drivers/net/vxge
parent8dea78da5cee153b8af9c07a2745f6c55057fe12 (diff)
Added missing tegra files.HEADmaster
Diffstat (limited to 'drivers/net/vxge')
-rw-r--r--drivers/net/vxge/Makefile7
-rw-r--r--drivers/net/vxge/vxge-config.c5123
-rw-r--r--drivers/net/vxge/vxge-config.h2111
-rw-r--r--drivers/net/vxge/vxge-ethtool.c1132
-rw-r--r--drivers/net/vxge/vxge-ethtool.h67
-rw-r--r--drivers/net/vxge/vxge-main.c4854
-rw-r--r--drivers/net/vxge/vxge-main.h519
-rw-r--r--drivers/net/vxge/vxge-reg.h4636
-rw-r--r--drivers/net/vxge/vxge-traffic.c2514
-rw-r--r--drivers/net/vxge/vxge-traffic.h2298
-rw-r--r--drivers/net/vxge/vxge-version.h49
11 files changed, 23310 insertions, 0 deletions
diff --git a/drivers/net/vxge/Makefile b/drivers/net/vxge/Makefile
new file mode 100644
index 00000000000..b625e2c503f
--- /dev/null
+++ b/drivers/net/vxge/Makefile
@@ -0,0 +1,7 @@
1#
2# Makefile for Exar Corp's X3100 Series 10 GbE PCIe I/O
3# Virtualized Server Adapter linux driver
4
5obj-$(CONFIG_VXGE) += vxge.o
6
7vxge-objs := vxge-config.o vxge-traffic.o vxge-ethtool.o vxge-main.o
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
new file mode 100644
index 00000000000..1520c574cb2
--- /dev/null
+++ b/drivers/net/vxge/vxge-config.c
@@ -0,0 +1,5123 @@
1/******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
9 *
10 * vxge-config.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2010 Exar Corp.
13 ******************************************************************************/
14#include <linux/vmalloc.h>
15#include <linux/etherdevice.h>
16#include <linux/pci.h>
17#include <linux/pci_hotplug.h>
18#include <linux/slab.h>
19
20#include "vxge-traffic.h"
21#include "vxge-config.h"
22#include "vxge-main.h"
23
24#define VXGE_HW_VPATH_STATS_PIO_READ(offset) { \
25 status = __vxge_hw_vpath_stats_access(vpath, \
26 VXGE_HW_STATS_OP_READ, \
27 offset, \
28 &val64); \
29 if (status != VXGE_HW_OK) \
30 return status; \
31}
32
33static void
34vxge_hw_vpath_set_zero_rx_frm_len(struct vxge_hw_vpath_reg __iomem *vp_reg)
35{
36 u64 val64;
37
38 val64 = readq(&vp_reg->rxmac_vcfg0);
39 val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
40 writeq(val64, &vp_reg->rxmac_vcfg0);
41 val64 = readq(&vp_reg->rxmac_vcfg0);
42}
43
44/*
45 * vxge_hw_vpath_wait_receive_idle - Wait for Rx to become idle
46 */
47int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id)
48{
49 struct vxge_hw_vpath_reg __iomem *vp_reg;
50 struct __vxge_hw_virtualpath *vpath;
51 u64 val64, rxd_count, rxd_spat;
52 int count = 0, total_count = 0;
53
54 vpath = &hldev->virtual_paths[vp_id];
55 vp_reg = vpath->vp_reg;
56
57 vxge_hw_vpath_set_zero_rx_frm_len(vp_reg);
58
59 /* Check that the ring controller for this vpath has enough free RxDs
60 * to send frames to the host. This is done by reading the
61 * PRC_RXD_DOORBELL_VPn register and comparing the read value to the
62 * RXD_SPAT value for the vpath.
63 */
64 val64 = readq(&vp_reg->prc_cfg6);
65 rxd_spat = VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val64) + 1;
66 /* Use a factor of 2 when comparing rxd_count against rxd_spat for some
67 * leg room.
68 */
69 rxd_spat *= 2;
70
71 do {
72 mdelay(1);
73
74 rxd_count = readq(&vp_reg->prc_rxd_doorbell);
75
76 /* Check that the ring controller for this vpath does
77 * not have any frame in its pipeline.
78 */
79 val64 = readq(&vp_reg->frm_in_progress_cnt);
80 if ((rxd_count <= rxd_spat) || (val64 > 0))
81 count = 0;
82 else
83 count++;
84 total_count++;
85 } while ((count < VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT) &&
86 (total_count < VXGE_HW_MAX_POLLING_COUNT));
87
88 if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
89 printk(KERN_ALERT "%s: Still Receiving traffic. Abort wait\n",
90 __func__);
91
92 return total_count;
93}
94
95/* vxge_hw_device_wait_receive_idle - This function waits until all frames
96 * stored in the frame buffer for each vpath assigned to the given
97 * function (hldev) have been sent to the host.
98 */
99void vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev)
100{
101 int i, total_count = 0;
102
103 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
104 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
105 continue;
106
107 total_count += vxge_hw_vpath_wait_receive_idle(hldev, i);
108 if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
109 break;
110 }
111}
112
113/*
114 * __vxge_hw_device_register_poll
115 * Will poll certain register for specified amount of time.
116 * Will poll until masked bit is not cleared.
117 */
118static enum vxge_hw_status
119__vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
120{
121 u64 val64;
122 u32 i = 0;
123 enum vxge_hw_status ret = VXGE_HW_FAIL;
124
125 udelay(10);
126
127 do {
128 val64 = readq(reg);
129 if (!(val64 & mask))
130 return VXGE_HW_OK;
131 udelay(100);
132 } while (++i <= 9);
133
134 i = 0;
135 do {
136 val64 = readq(reg);
137 if (!(val64 & mask))
138 return VXGE_HW_OK;
139 mdelay(1);
140 } while (++i <= max_millis);
141
142 return ret;
143}
144
145static inline enum vxge_hw_status
146__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
147 u64 mask, u32 max_millis)
148{
149 __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
150 wmb();
151 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
152 wmb();
153
154 return __vxge_hw_device_register_poll(addr, mask, max_millis);
155}
156
157static enum vxge_hw_status
158vxge_hw_vpath_fw_api(struct __vxge_hw_virtualpath *vpath, u32 action,
159 u32 fw_memo, u32 offset, u64 *data0, u64 *data1,
160 u64 *steer_ctrl)
161{
162 struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
163 enum vxge_hw_status status;
164 u64 val64;
165 u32 retry = 0, max_retry = 3;
166
167 spin_lock(&vpath->lock);
168 if (!vpath->vp_open) {
169 spin_unlock(&vpath->lock);
170 max_retry = 100;
171 }
172
173 writeq(*data0, &vp_reg->rts_access_steer_data0);
174 writeq(*data1, &vp_reg->rts_access_steer_data1);
175 wmb();
176
177 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
178 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(fw_memo) |
179 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset) |
180 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
181 *steer_ctrl;
182
183 status = __vxge_hw_pio_mem_write64(val64,
184 &vp_reg->rts_access_steer_ctrl,
185 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
186 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
187
188 /* The __vxge_hw_device_register_poll can udelay for a significant
189 * amount of time, blocking other process from the CPU. If it delays
190 * for ~5secs, a NMI error can occur. A way around this is to give up
191 * the processor via msleep, but this is not allowed is under lock.
192 * So, only allow it to sleep for ~4secs if open. Otherwise, delay for
193 * 1sec and sleep for 10ms until the firmware operation has completed
194 * or timed-out.
195 */
196 while ((status != VXGE_HW_OK) && retry++ < max_retry) {
197 if (!vpath->vp_open)
198 msleep(20);
199 status = __vxge_hw_device_register_poll(
200 &vp_reg->rts_access_steer_ctrl,
201 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
202 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
203 }
204
205 if (status != VXGE_HW_OK)
206 goto out;
207
208 val64 = readq(&vp_reg->rts_access_steer_ctrl);
209 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
210 *data0 = readq(&vp_reg->rts_access_steer_data0);
211 *data1 = readq(&vp_reg->rts_access_steer_data1);
212 *steer_ctrl = val64;
213 } else
214 status = VXGE_HW_FAIL;
215
216out:
217 if (vpath->vp_open)
218 spin_unlock(&vpath->lock);
219 return status;
220}
221
222enum vxge_hw_status
223vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
224 u32 *minor, u32 *build)
225{
226 u64 data0 = 0, data1 = 0, steer_ctrl = 0;
227 struct __vxge_hw_virtualpath *vpath;
228 enum vxge_hw_status status;
229
230 vpath = &hldev->virtual_paths[hldev->first_vp_id];
231
232 status = vxge_hw_vpath_fw_api(vpath,
233 VXGE_HW_FW_UPGRADE_ACTION,
234 VXGE_HW_FW_UPGRADE_MEMO,
235 VXGE_HW_FW_UPGRADE_OFFSET_READ,
236 &data0, &data1, &steer_ctrl);
237 if (status != VXGE_HW_OK)
238 return status;
239
240 *major = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
241 *minor = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
242 *build = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
243
244 return status;
245}
246
247enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev)
248{
249 u64 data0 = 0, data1 = 0, steer_ctrl = 0;
250 struct __vxge_hw_virtualpath *vpath;
251 enum vxge_hw_status status;
252 u32 ret;
253
254 vpath = &hldev->virtual_paths[hldev->first_vp_id];
255
256 status = vxge_hw_vpath_fw_api(vpath,
257 VXGE_HW_FW_UPGRADE_ACTION,
258 VXGE_HW_FW_UPGRADE_MEMO,
259 VXGE_HW_FW_UPGRADE_OFFSET_COMMIT,
260 &data0, &data1, &steer_ctrl);
261 if (status != VXGE_HW_OK) {
262 vxge_debug_init(VXGE_ERR, "%s: FW upgrade failed", __func__);
263 goto exit;
264 }
265
266 ret = VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(steer_ctrl) & 0x7F;
267 if (ret != 1) {
268 vxge_debug_init(VXGE_ERR, "%s: FW commit failed with error %d",
269 __func__, ret);
270 status = VXGE_HW_FAIL;
271 }
272
273exit:
274 return status;
275}
276
277enum vxge_hw_status
278vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *fwdata, int size)
279{
280 u64 data0 = 0, data1 = 0, steer_ctrl = 0;
281 struct __vxge_hw_virtualpath *vpath;
282 enum vxge_hw_status status;
283 int ret_code, sec_code;
284
285 vpath = &hldev->virtual_paths[hldev->first_vp_id];
286
287 /* send upgrade start command */
288 status = vxge_hw_vpath_fw_api(vpath,
289 VXGE_HW_FW_UPGRADE_ACTION,
290 VXGE_HW_FW_UPGRADE_MEMO,
291 VXGE_HW_FW_UPGRADE_OFFSET_START,
292 &data0, &data1, &steer_ctrl);
293 if (status != VXGE_HW_OK) {
294 vxge_debug_init(VXGE_ERR, " %s: Upgrade start cmd failed",
295 __func__);
296 return status;
297 }
298
299 /* Transfer fw image to adapter 16 bytes at a time */
300 for (; size > 0; size -= VXGE_HW_FW_UPGRADE_BLK_SIZE) {
301 steer_ctrl = 0;
302
303 /* The next 128bits of fwdata to be loaded onto the adapter */
304 data0 = *((u64 *)fwdata);
305 data1 = *((u64 *)fwdata + 1);
306
307 status = vxge_hw_vpath_fw_api(vpath,
308 VXGE_HW_FW_UPGRADE_ACTION,
309 VXGE_HW_FW_UPGRADE_MEMO,
310 VXGE_HW_FW_UPGRADE_OFFSET_SEND,
311 &data0, &data1, &steer_ctrl);
312 if (status != VXGE_HW_OK) {
313 vxge_debug_init(VXGE_ERR, "%s: Upgrade send failed",
314 __func__);
315 goto out;
316 }
317
318 ret_code = VXGE_HW_UPGRADE_GET_RET_ERR_CODE(data0);
319 switch (ret_code) {
320 case VXGE_HW_FW_UPGRADE_OK:
321 /* All OK, send next 16 bytes. */
322 break;
323 case VXGE_FW_UPGRADE_BYTES2SKIP:
324 /* skip bytes in the stream */
325 fwdata += (data0 >> 8) & 0xFFFFFFFF;
326 break;
327 case VXGE_HW_FW_UPGRADE_DONE:
328 goto out;
329 case VXGE_HW_FW_UPGRADE_ERR:
330 sec_code = VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(data0);
331 switch (sec_code) {
332 case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1:
333 case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7:
334 printk(KERN_ERR
335 "corrupted data from .ncf file\n");
336 break;
337 case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3:
338 case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4:
339 case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5:
340 case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6:
341 case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8:
342 printk(KERN_ERR "invalid .ncf file\n");
343 break;
344 case VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW:
345 printk(KERN_ERR "buffer overflow\n");
346 break;
347 case VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH:
348 printk(KERN_ERR "failed to flash the image\n");
349 break;
350 case VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN:
351 printk(KERN_ERR
352 "generic error. Unknown error type\n");
353 break;
354 default:
355 printk(KERN_ERR "Unknown error of type %d\n",
356 sec_code);
357 break;
358 }
359 status = VXGE_HW_FAIL;
360 goto out;
361 default:
362 printk(KERN_ERR "Unknown FW error: %d\n", ret_code);
363 status = VXGE_HW_FAIL;
364 goto out;
365 }
366 /* point to next 16 bytes */
367 fwdata += VXGE_HW_FW_UPGRADE_BLK_SIZE;
368 }
369out:
370 return status;
371}
372
373enum vxge_hw_status
374vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
375 struct eprom_image *img)
376{
377 u64 data0 = 0, data1 = 0, steer_ctrl = 0;
378 struct __vxge_hw_virtualpath *vpath;
379 enum vxge_hw_status status;
380 int i;
381
382 vpath = &hldev->virtual_paths[hldev->first_vp_id];
383
384 for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
385 data0 = VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(i);
386 data1 = steer_ctrl = 0;
387
388 status = vxge_hw_vpath_fw_api(vpath,
389 VXGE_HW_FW_API_GET_EPROM_REV,
390 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
391 0, &data0, &data1, &steer_ctrl);
392 if (status != VXGE_HW_OK)
393 break;
394
395 img[i].is_valid = VXGE_HW_GET_EPROM_IMAGE_VALID(data0);
396 img[i].index = VXGE_HW_GET_EPROM_IMAGE_INDEX(data0);
397 img[i].type = VXGE_HW_GET_EPROM_IMAGE_TYPE(data0);
398 img[i].version = VXGE_HW_GET_EPROM_IMAGE_REV(data0);
399 }
400
401 return status;
402}
403
404/*
405 * __vxge_hw_channel_free - Free memory allocated for channel
406 * This function deallocates memory from the channel and various arrays
407 * in the channel
408 */
409static void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
410{
411 kfree(channel->work_arr);
412 kfree(channel->free_arr);
413 kfree(channel->reserve_arr);
414 kfree(channel->orig_arr);
415 kfree(channel);
416}
417
418/*
419 * __vxge_hw_channel_initialize - Initialize a channel
420 * This function initializes a channel by properly setting the
421 * various references
422 */
423static enum vxge_hw_status
424__vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
425{
426 u32 i;
427 struct __vxge_hw_virtualpath *vpath;
428
429 vpath = channel->vph->vpath;
430
431 if ((channel->reserve_arr != NULL) && (channel->orig_arr != NULL)) {
432 for (i = 0; i < channel->length; i++)
433 channel->orig_arr[i] = channel->reserve_arr[i];
434 }
435
436 switch (channel->type) {
437 case VXGE_HW_CHANNEL_TYPE_FIFO:
438 vpath->fifoh = (struct __vxge_hw_fifo *)channel;
439 channel->stats = &((struct __vxge_hw_fifo *)
440 channel)->stats->common_stats;
441 break;
442 case VXGE_HW_CHANNEL_TYPE_RING:
443 vpath->ringh = (struct __vxge_hw_ring *)channel;
444 channel->stats = &((struct __vxge_hw_ring *)
445 channel)->stats->common_stats;
446 break;
447 default:
448 break;
449 }
450
451 return VXGE_HW_OK;
452}
453
454/*
455 * __vxge_hw_channel_reset - Resets a channel
456 * This function resets a channel by properly setting the various references
457 */
458static enum vxge_hw_status
459__vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
460{
461 u32 i;
462
463 for (i = 0; i < channel->length; i++) {
464 if (channel->reserve_arr != NULL)
465 channel->reserve_arr[i] = channel->orig_arr[i];
466 if (channel->free_arr != NULL)
467 channel->free_arr[i] = NULL;
468 if (channel->work_arr != NULL)
469 channel->work_arr[i] = NULL;
470 }
471 channel->free_ptr = channel->length;
472 channel->reserve_ptr = channel->length;
473 channel->reserve_top = 0;
474 channel->post_index = 0;
475 channel->compl_index = 0;
476
477 return VXGE_HW_OK;
478}
479
480/*
481 * __vxge_hw_device_pci_e_init
482 * Initialize certain PCI/PCI-X configuration registers
483 * with recommended values. Save config space for future hw resets.
484 */
485static void __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
486{
487 u16 cmd = 0;
488
489 /* Set the PErr Repconse bit and SERR in PCI command register. */
490 pci_read_config_word(hldev->pdev, PCI_COMMAND, &cmd);
491 cmd |= 0x140;
492 pci_write_config_word(hldev->pdev, PCI_COMMAND, cmd);
493
494 pci_save_state(hldev->pdev);
495}
496
497/* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
498 * in progress
499 * This routine checks the vpath reset in progress register is turned zero
500 */
501static enum vxge_hw_status
502__vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
503{
504 enum vxge_hw_status status;
505 status = __vxge_hw_device_register_poll(vpath_rst_in_prog,
506 VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(0x1ffff),
507 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
508 return status;
509}
510
511/*
512 * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
513 * Set the swapper bits appropriately for the lagacy section.
514 */
515static enum vxge_hw_status
516__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
517{
518 u64 val64;
519 enum vxge_hw_status status = VXGE_HW_OK;
520
521 val64 = readq(&legacy_reg->toc_swapper_fb);
522
523 wmb();
524
525 switch (val64) {
526 case VXGE_HW_SWAPPER_INITIAL_VALUE:
527 return status;
528
529 case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
530 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
531 &legacy_reg->pifm_rd_swap_en);
532 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
533 &legacy_reg->pifm_rd_flip_en);
534 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
535 &legacy_reg->pifm_wr_swap_en);
536 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
537 &legacy_reg->pifm_wr_flip_en);
538 break;
539
540 case VXGE_HW_SWAPPER_BYTE_SWAPPED:
541 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
542 &legacy_reg->pifm_rd_swap_en);
543 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
544 &legacy_reg->pifm_wr_swap_en);
545 break;
546
547 case VXGE_HW_SWAPPER_BIT_FLIPPED:
548 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
549 &legacy_reg->pifm_rd_flip_en);
550 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
551 &legacy_reg->pifm_wr_flip_en);
552 break;
553 }
554
555 wmb();
556
557 val64 = readq(&legacy_reg->toc_swapper_fb);
558
559 if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
560 status = VXGE_HW_ERR_SWAPPER_CTRL;
561
562 return status;
563}
564
565/*
566 * __vxge_hw_device_toc_get
567 * This routine sets the swapper and reads the toc pointer and returns the
568 * memory mapped address of the toc
569 */
570static struct vxge_hw_toc_reg __iomem *
571__vxge_hw_device_toc_get(void __iomem *bar0)
572{
573 u64 val64;
574 struct vxge_hw_toc_reg __iomem *toc = NULL;
575 enum vxge_hw_status status;
576
577 struct vxge_hw_legacy_reg __iomem *legacy_reg =
578 (struct vxge_hw_legacy_reg __iomem *)bar0;
579
580 status = __vxge_hw_legacy_swapper_set(legacy_reg);
581 if (status != VXGE_HW_OK)
582 goto exit;
583
584 val64 = readq(&legacy_reg->toc_first_pointer);
585 toc = bar0 + val64;
586exit:
587 return toc;
588}
589
590/*
591 * __vxge_hw_device_reg_addr_get
592 * This routine sets the swapper and reads the toc pointer and initializes the
593 * register location pointers in the device object. It waits until the ric is
594 * completed initializing registers.
595 */
596static enum vxge_hw_status
597__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
598{
599 u64 val64;
600 u32 i;
601 enum vxge_hw_status status = VXGE_HW_OK;
602
603 hldev->legacy_reg = hldev->bar0;
604
605 hldev->toc_reg = __vxge_hw_device_toc_get(hldev->bar0);
606 if (hldev->toc_reg == NULL) {
607 status = VXGE_HW_FAIL;
608 goto exit;
609 }
610
611 val64 = readq(&hldev->toc_reg->toc_common_pointer);
612 hldev->common_reg = hldev->bar0 + val64;
613
614 val64 = readq(&hldev->toc_reg->toc_mrpcim_pointer);
615 hldev->mrpcim_reg = hldev->bar0 + val64;
616
617 for (i = 0; i < VXGE_HW_TITAN_SRPCIM_REG_SPACES; i++) {
618 val64 = readq(&hldev->toc_reg->toc_srpcim_pointer[i]);
619 hldev->srpcim_reg[i] = hldev->bar0 + val64;
620 }
621
622 for (i = 0; i < VXGE_HW_TITAN_VPMGMT_REG_SPACES; i++) {
623 val64 = readq(&hldev->toc_reg->toc_vpmgmt_pointer[i]);
624 hldev->vpmgmt_reg[i] = hldev->bar0 + val64;
625 }
626
627 for (i = 0; i < VXGE_HW_TITAN_VPATH_REG_SPACES; i++) {
628 val64 = readq(&hldev->toc_reg->toc_vpath_pointer[i]);
629 hldev->vpath_reg[i] = hldev->bar0 + val64;
630 }
631
632 val64 = readq(&hldev->toc_reg->toc_kdfc);
633
634 switch (VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val64)) {
635 case 0:
636 hldev->kdfc = hldev->bar0 + VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64) ;
637 break;
638 default:
639 break;
640 }
641
642 status = __vxge_hw_device_vpath_reset_in_prog_check(
643 (u64 __iomem *)&hldev->common_reg->vpath_rst_in_prog);
644exit:
645 return status;
646}
647
648/*
649 * __vxge_hw_device_access_rights_get: Get Access Rights of the driver
650 * This routine returns the Access Rights of the driver
651 */
652static u32
653__vxge_hw_device_access_rights_get(u32 host_type, u32 func_id)
654{
655 u32 access_rights = VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH;
656
657 switch (host_type) {
658 case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION:
659 if (func_id == 0) {
660 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
661 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
662 }
663 break;
664 case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION:
665 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
666 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
667 break;
668 case VXGE_HW_NO_MR_SR_VH0_FUNCTION0:
669 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
670 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
671 break;
672 case VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION:
673 case VXGE_HW_SR_VH_VIRTUAL_FUNCTION:
674 case VXGE_HW_MR_SR_VH0_INVALID_CONFIG:
675 break;
676 case VXGE_HW_SR_VH_FUNCTION0:
677 case VXGE_HW_VH_NORMAL_FUNCTION:
678 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
679 break;
680 }
681
682 return access_rights;
683}
684/*
685 * __vxge_hw_device_is_privilaged
686 * This routine checks if the device function is privilaged or not
687 */
688
689enum vxge_hw_status
690__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id)
691{
692 if (__vxge_hw_device_access_rights_get(host_type,
693 func_id) &
694 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)
695 return VXGE_HW_OK;
696 else
697 return VXGE_HW_ERR_PRIVILAGED_OPEARATION;
698}
699
700/*
701 * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
702 * Returns the function number of the vpath.
703 */
704static u32
705__vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
706{
707 u64 val64;
708
709 val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
710
711 return
712 (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
713}
714
715/*
716 * __vxge_hw_device_host_info_get
717 * This routine returns the host type assignments
718 */
719static void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
720{
721 u64 val64;
722 u32 i;
723
724 val64 = readq(&hldev->common_reg->host_type_assignments);
725
726 hldev->host_type =
727 (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
728
729 hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments);
730
731 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
732 if (!(hldev->vpath_assignments & vxge_mBIT(i)))
733 continue;
734
735 hldev->func_id =
736 __vxge_hw_vpath_func_id_get(hldev->vpmgmt_reg[i]);
737
738 hldev->access_rights = __vxge_hw_device_access_rights_get(
739 hldev->host_type, hldev->func_id);
740
741 hldev->virtual_paths[i].vp_open = VXGE_HW_VP_NOT_OPEN;
742 hldev->virtual_paths[i].vp_reg = hldev->vpath_reg[i];
743
744 hldev->first_vp_id = i;
745 break;
746 }
747}
748
749/*
750 * __vxge_hw_verify_pci_e_info - Validate the pci-e link parameters such as
751 * link width and signalling rate.
752 */
753static enum vxge_hw_status
754__vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
755{
756 struct pci_dev *dev = hldev->pdev;
757 u16 lnk;
758
759 /* Get the negotiated link width and speed from PCI config space */
760 pci_read_config_word(dev, dev->pcie_cap + PCI_EXP_LNKSTA, &lnk);
761
762 if ((lnk & PCI_EXP_LNKSTA_CLS) != 1)
763 return VXGE_HW_ERR_INVALID_PCI_INFO;
764
765 switch ((lnk & PCI_EXP_LNKSTA_NLW) >> 4) {
766 case PCIE_LNK_WIDTH_RESRV:
767 case PCIE_LNK_X1:
768 case PCIE_LNK_X2:
769 case PCIE_LNK_X4:
770 case PCIE_LNK_X8:
771 break;
772 default:
773 return VXGE_HW_ERR_INVALID_PCI_INFO;
774 }
775
776 return VXGE_HW_OK;
777}
778
779/*
780 * __vxge_hw_device_initialize
781 * Initialize Titan-V hardware.
782 */
783static enum vxge_hw_status
784__vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
785{
786 enum vxge_hw_status status = VXGE_HW_OK;
787
788 if (VXGE_HW_OK == __vxge_hw_device_is_privilaged(hldev->host_type,
789 hldev->func_id)) {
790 /* Validate the pci-e link width and speed */
791 status = __vxge_hw_verify_pci_e_info(hldev);
792 if (status != VXGE_HW_OK)
793 goto exit;
794 }
795
796exit:
797 return status;
798}
799
800/*
801 * __vxge_hw_vpath_fw_ver_get - Get the fw version
802 * Returns FW Version
803 */
804static enum vxge_hw_status
805__vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath *vpath,
806 struct vxge_hw_device_hw_info *hw_info)
807{
808 struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
809 struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
810 struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
811 struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
812 u64 data0, data1 = 0, steer_ctrl = 0;
813 enum vxge_hw_status status;
814
815 status = vxge_hw_vpath_fw_api(vpath,
816 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
817 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
818 0, &data0, &data1, &steer_ctrl);
819 if (status != VXGE_HW_OK)
820 goto exit;
821
822 fw_date->day =
823 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(data0);
824 fw_date->month =
825 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(data0);
826 fw_date->year =
827 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(data0);
828
829 snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
830 fw_date->month, fw_date->day, fw_date->year);
831
832 fw_version->major =
833 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
834 fw_version->minor =
835 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
836 fw_version->build =
837 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
838
839 snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
840 fw_version->major, fw_version->minor, fw_version->build);
841
842 flash_date->day =
843 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data1);
844 flash_date->month =
845 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data1);
846 flash_date->year =
847 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data1);
848
849 snprintf(flash_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
850 flash_date->month, flash_date->day, flash_date->year);
851
852 flash_version->major =
853 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data1);
854 flash_version->minor =
855 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data1);
856 flash_version->build =
857 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data1);
858
859 snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
860 flash_version->major, flash_version->minor,
861 flash_version->build);
862
863exit:
864 return status;
865}
866
867/*
868 * __vxge_hw_vpath_card_info_get - Get the serial numbers,
869 * part number and product description.
870 */
871static enum vxge_hw_status
872__vxge_hw_vpath_card_info_get(struct __vxge_hw_virtualpath *vpath,
873 struct vxge_hw_device_hw_info *hw_info)
874{
875 enum vxge_hw_status status;
876 u64 data0, data1 = 0, steer_ctrl = 0;
877 u8 *serial_number = hw_info->serial_number;
878 u8 *part_number = hw_info->part_number;
879 u8 *product_desc = hw_info->product_desc;
880 u32 i, j = 0;
881
882 data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER;
883
884 status = vxge_hw_vpath_fw_api(vpath,
885 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
886 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
887 0, &data0, &data1, &steer_ctrl);
888 if (status != VXGE_HW_OK)
889 return status;
890
891 ((u64 *)serial_number)[0] = be64_to_cpu(data0);
892 ((u64 *)serial_number)[1] = be64_to_cpu(data1);
893
894 data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER;
895 data1 = steer_ctrl = 0;
896
897 status = vxge_hw_vpath_fw_api(vpath,
898 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
899 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
900 0, &data0, &data1, &steer_ctrl);
901 if (status != VXGE_HW_OK)
902 return status;
903
904 ((u64 *)part_number)[0] = be64_to_cpu(data0);
905 ((u64 *)part_number)[1] = be64_to_cpu(data1);
906
907 for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
908 i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
909 data0 = i;
910 data1 = steer_ctrl = 0;
911
912 status = vxge_hw_vpath_fw_api(vpath,
913 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
914 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
915 0, &data0, &data1, &steer_ctrl);
916 if (status != VXGE_HW_OK)
917 return status;
918
919 ((u64 *)product_desc)[j++] = be64_to_cpu(data0);
920 ((u64 *)product_desc)[j++] = be64_to_cpu(data1);
921 }
922
923 return status;
924}
925
926/*
927 * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
928 * Returns pci function mode
929 */
930static enum vxge_hw_status
931__vxge_hw_vpath_pci_func_mode_get(struct __vxge_hw_virtualpath *vpath,
932 struct vxge_hw_device_hw_info *hw_info)
933{
934 u64 data0, data1 = 0, steer_ctrl = 0;
935 enum vxge_hw_status status;
936
937 data0 = 0;
938
939 status = vxge_hw_vpath_fw_api(vpath,
940 VXGE_HW_FW_API_GET_FUNC_MODE,
941 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
942 0, &data0, &data1, &steer_ctrl);
943 if (status != VXGE_HW_OK)
944 return status;
945
946 hw_info->function_mode = VXGE_HW_GET_FUNC_MODE_VAL(data0);
947 return status;
948}
949
950/*
951 * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
952 * from MAC address table.
953 */
954static enum vxge_hw_status
955__vxge_hw_vpath_addr_get(struct __vxge_hw_virtualpath *vpath,
956 u8 *macaddr, u8 *macaddr_mask)
957{
958 u64 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
959 data0 = 0, data1 = 0, steer_ctrl = 0;
960 enum vxge_hw_status status;
961 int i;
962
963 do {
964 status = vxge_hw_vpath_fw_api(vpath, action,
965 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
966 0, &data0, &data1, &steer_ctrl);
967 if (status != VXGE_HW_OK)
968 goto exit;
969
970 data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data0);
971 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
972 data1);
973
974 for (i = ETH_ALEN; i > 0; i--) {
975 macaddr[i - 1] = (u8) (data0 & 0xFF);
976 data0 >>= 8;
977
978 macaddr_mask[i - 1] = (u8) (data1 & 0xFF);
979 data1 >>= 8;
980 }
981
982 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY;
983 data0 = 0, data1 = 0, steer_ctrl = 0;
984
985 } while (!is_valid_ether_addr(macaddr));
986exit:
987 return status;
988}
989
990/**
991 * vxge_hw_device_hw_info_get - Get the hw information
992 * Returns the vpath mask that has the bits set for each vpath allocated
993 * for the driver, FW version information, and the first mac address for
994 * each vpath
995 */
996enum vxge_hw_status __devinit
997vxge_hw_device_hw_info_get(void __iomem *bar0,
998 struct vxge_hw_device_hw_info *hw_info)
999{
1000 u32 i;
1001 u64 val64;
1002 struct vxge_hw_toc_reg __iomem *toc;
1003 struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
1004 struct vxge_hw_common_reg __iomem *common_reg;
1005 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
1006 enum vxge_hw_status status;
1007 struct __vxge_hw_virtualpath vpath;
1008
1009 memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info));
1010
1011 toc = __vxge_hw_device_toc_get(bar0);
1012 if (toc == NULL) {
1013 status = VXGE_HW_ERR_CRITICAL;
1014 goto exit;
1015 }
1016
1017 val64 = readq(&toc->toc_common_pointer);
1018 common_reg = bar0 + val64;
1019
1020 status = __vxge_hw_device_vpath_reset_in_prog_check(
1021 (u64 __iomem *)&common_reg->vpath_rst_in_prog);
1022 if (status != VXGE_HW_OK)
1023 goto exit;
1024
1025 hw_info->vpath_mask = readq(&common_reg->vpath_assignments);
1026
1027 val64 = readq(&common_reg->host_type_assignments);
1028
1029 hw_info->host_type =
1030 (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
1031
1032 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1033 if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
1034 continue;
1035
1036 val64 = readq(&toc->toc_vpmgmt_pointer[i]);
1037
1038 vpmgmt_reg = bar0 + val64;
1039
1040 hw_info->func_id = __vxge_hw_vpath_func_id_get(vpmgmt_reg);
1041 if (__vxge_hw_device_access_rights_get(hw_info->host_type,
1042 hw_info->func_id) &
1043 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) {
1044
1045 val64 = readq(&toc->toc_mrpcim_pointer);
1046
1047 mrpcim_reg = bar0 + val64;
1048
1049 writeq(0, &mrpcim_reg->xgmac_gen_fw_memo_mask);
1050 wmb();
1051 }
1052
1053 val64 = readq(&toc->toc_vpath_pointer[i]);
1054
1055 spin_lock_init(&vpath.lock);
1056 vpath.vp_reg = bar0 + val64;
1057 vpath.vp_open = VXGE_HW_VP_NOT_OPEN;
1058
1059 status = __vxge_hw_vpath_pci_func_mode_get(&vpath, hw_info);
1060 if (status != VXGE_HW_OK)
1061 goto exit;
1062
1063 status = __vxge_hw_vpath_fw_ver_get(&vpath, hw_info);
1064 if (status != VXGE_HW_OK)
1065 goto exit;
1066
1067 status = __vxge_hw_vpath_card_info_get(&vpath, hw_info);
1068 if (status != VXGE_HW_OK)
1069 goto exit;
1070
1071 break;
1072 }
1073
1074 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1075 if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
1076 continue;
1077
1078 val64 = readq(&toc->toc_vpath_pointer[i]);
1079 vpath.vp_reg = bar0 + val64;
1080 vpath.vp_open = VXGE_HW_VP_NOT_OPEN;
1081
1082 status = __vxge_hw_vpath_addr_get(&vpath,
1083 hw_info->mac_addrs[i],
1084 hw_info->mac_addr_masks[i]);
1085 if (status != VXGE_HW_OK)
1086 goto exit;
1087 }
1088exit:
1089 return status;
1090}
1091
1092/*
1093 * __vxge_hw_blockpool_destroy - Deallocates the block pool
1094 */
1095static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
1096{
1097 struct __vxge_hw_device *hldev;
1098 struct list_head *p, *n;
1099 u16 ret;
1100
1101 if (blockpool == NULL) {
1102 ret = 1;
1103 goto exit;
1104 }
1105
1106 hldev = blockpool->hldev;
1107
1108 list_for_each_safe(p, n, &blockpool->free_block_list) {
1109 pci_unmap_single(hldev->pdev,
1110 ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
1111 ((struct __vxge_hw_blockpool_entry *)p)->length,
1112 PCI_DMA_BIDIRECTIONAL);
1113
1114 vxge_os_dma_free(hldev->pdev,
1115 ((struct __vxge_hw_blockpool_entry *)p)->memblock,
1116 &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
1117
1118 list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
1119 kfree(p);
1120 blockpool->pool_size--;
1121 }
1122
1123 list_for_each_safe(p, n, &blockpool->free_entry_list) {
1124 list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
1125 kfree((void *)p);
1126 }
1127 ret = 0;
1128exit:
1129 return;
1130}
1131
1132/*
1133 * __vxge_hw_blockpool_create - Create block pool
1134 */
1135static enum vxge_hw_status
1136__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
1137 struct __vxge_hw_blockpool *blockpool,
1138 u32 pool_size,
1139 u32 pool_max)
1140{
1141 u32 i;
1142 struct __vxge_hw_blockpool_entry *entry = NULL;
1143 void *memblock;
1144 dma_addr_t dma_addr;
1145 struct pci_dev *dma_handle;
1146 struct pci_dev *acc_handle;
1147 enum vxge_hw_status status = VXGE_HW_OK;
1148
1149 if (blockpool == NULL) {
1150 status = VXGE_HW_FAIL;
1151 goto blockpool_create_exit;
1152 }
1153
1154 blockpool->hldev = hldev;
1155 blockpool->block_size = VXGE_HW_BLOCK_SIZE;
1156 blockpool->pool_size = 0;
1157 blockpool->pool_max = pool_max;
1158 blockpool->req_out = 0;
1159
1160 INIT_LIST_HEAD(&blockpool->free_block_list);
1161 INIT_LIST_HEAD(&blockpool->free_entry_list);
1162
1163 for (i = 0; i < pool_size + pool_max; i++) {
1164 entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
1165 GFP_KERNEL);
1166 if (entry == NULL) {
1167 __vxge_hw_blockpool_destroy(blockpool);
1168 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1169 goto blockpool_create_exit;
1170 }
1171 list_add(&entry->item, &blockpool->free_entry_list);
1172 }
1173
1174 for (i = 0; i < pool_size; i++) {
1175 memblock = vxge_os_dma_malloc(
1176 hldev->pdev,
1177 VXGE_HW_BLOCK_SIZE,
1178 &dma_handle,
1179 &acc_handle);
1180 if (memblock == NULL) {
1181 __vxge_hw_blockpool_destroy(blockpool);
1182 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1183 goto blockpool_create_exit;
1184 }
1185
1186 dma_addr = pci_map_single(hldev->pdev, memblock,
1187 VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL);
1188 if (unlikely(pci_dma_mapping_error(hldev->pdev,
1189 dma_addr))) {
1190 vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
1191 __vxge_hw_blockpool_destroy(blockpool);
1192 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1193 goto blockpool_create_exit;
1194 }
1195
1196 if (!list_empty(&blockpool->free_entry_list))
1197 entry = (struct __vxge_hw_blockpool_entry *)
1198 list_first_entry(&blockpool->free_entry_list,
1199 struct __vxge_hw_blockpool_entry,
1200 item);
1201
1202 if (entry == NULL)
1203 entry =
1204 kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
1205 GFP_KERNEL);
1206 if (entry != NULL) {
1207 list_del(&entry->item);
1208 entry->length = VXGE_HW_BLOCK_SIZE;
1209 entry->memblock = memblock;
1210 entry->dma_addr = dma_addr;
1211 entry->acc_handle = acc_handle;
1212 entry->dma_handle = dma_handle;
1213 list_add(&entry->item,
1214 &blockpool->free_block_list);
1215 blockpool->pool_size++;
1216 } else {
1217 __vxge_hw_blockpool_destroy(blockpool);
1218 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1219 goto blockpool_create_exit;
1220 }
1221 }
1222
1223blockpool_create_exit:
1224 return status;
1225}
1226
1227/*
1228 * __vxge_hw_device_fifo_config_check - Check fifo configuration.
1229 * Check the fifo configuration
1230 */
1231static enum vxge_hw_status
1232__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
1233{
1234 if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) ||
1235 (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS))
1236 return VXGE_HW_BADCFG_FIFO_BLOCKS;
1237
1238 return VXGE_HW_OK;
1239}
1240
1241/*
1242 * __vxge_hw_device_vpath_config_check - Check vpath configuration.
1243 * Check the vpath configuration
1244 */
1245static enum vxge_hw_status
1246__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
1247{
1248 enum vxge_hw_status status;
1249
1250 if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) ||
1251 (vp_config->min_bandwidth > VXGE_HW_VPATH_BANDWIDTH_MAX))
1252 return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH;
1253
1254 status = __vxge_hw_device_fifo_config_check(&vp_config->fifo);
1255 if (status != VXGE_HW_OK)
1256 return status;
1257
1258 if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) &&
1259 ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) ||
1260 (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU)))
1261 return VXGE_HW_BADCFG_VPATH_MTU;
1262
1263 if ((vp_config->rpa_strip_vlan_tag !=
1264 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) &&
1265 (vp_config->rpa_strip_vlan_tag !=
1266 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) &&
1267 (vp_config->rpa_strip_vlan_tag !=
1268 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE))
1269 return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG;
1270
1271 return VXGE_HW_OK;
1272}
1273
1274/*
1275 * __vxge_hw_device_config_check - Check device configuration.
1276 * Check the device configuration
1277 */
1278static enum vxge_hw_status
1279__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config)
1280{
1281 u32 i;
1282 enum vxge_hw_status status;
1283
1284 if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
1285 (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
1286 (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
1287 (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF))
1288 return VXGE_HW_BADCFG_INTR_MODE;
1289
1290 if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) &&
1291 (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE))
1292 return VXGE_HW_BADCFG_RTS_MAC_EN;
1293
1294 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1295 status = __vxge_hw_device_vpath_config_check(
1296 &new_config->vp_config[i]);
1297 if (status != VXGE_HW_OK)
1298 return status;
1299 }
1300
1301 return VXGE_HW_OK;
1302}
1303
1304/*
1305 * vxge_hw_device_initialize - Initialize Titan device.
1306 * Initialize Titan device. Note that all the arguments of this public API
1307 * are 'IN', including @hldev. Driver cooperates with
1308 * OS to find new Titan device, locate its PCI and memory spaces.
1309 *
1310 * When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW
1311 * to enable the latter to perform Titan hardware initialization.
1312 */
1313enum vxge_hw_status __devinit
1314vxge_hw_device_initialize(
1315 struct __vxge_hw_device **devh,
1316 struct vxge_hw_device_attr *attr,
1317 struct vxge_hw_device_config *device_config)
1318{
1319 u32 i;
1320 u32 nblocks = 0;
1321 struct __vxge_hw_device *hldev = NULL;
1322 enum vxge_hw_status status = VXGE_HW_OK;
1323
1324 status = __vxge_hw_device_config_check(device_config);
1325 if (status != VXGE_HW_OK)
1326 goto exit;
1327
1328 hldev = vzalloc(sizeof(struct __vxge_hw_device));
1329 if (hldev == NULL) {
1330 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1331 goto exit;
1332 }
1333
1334 hldev->magic = VXGE_HW_DEVICE_MAGIC;
1335
1336 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL);
1337
1338 /* apply config */
1339 memcpy(&hldev->config, device_config,
1340 sizeof(struct vxge_hw_device_config));
1341
1342 hldev->bar0 = attr->bar0;
1343 hldev->pdev = attr->pdev;
1344
1345 hldev->uld_callbacks.link_up = attr->uld_callbacks.link_up;
1346 hldev->uld_callbacks.link_down = attr->uld_callbacks.link_down;
1347 hldev->uld_callbacks.crit_err = attr->uld_callbacks.crit_err;
1348
1349 __vxge_hw_device_pci_e_init(hldev);
1350
1351 status = __vxge_hw_device_reg_addr_get(hldev);
1352 if (status != VXGE_HW_OK) {
1353 vfree(hldev);
1354 goto exit;
1355 }
1356
1357 __vxge_hw_device_host_info_get(hldev);
1358
1359 /* Incrementing for stats blocks */
1360 nblocks++;
1361
1362 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1363 if (!(hldev->vpath_assignments & vxge_mBIT(i)))
1364 continue;
1365
1366 if (device_config->vp_config[i].ring.enable ==
1367 VXGE_HW_RING_ENABLE)
1368 nblocks += device_config->vp_config[i].ring.ring_blocks;
1369
1370 if (device_config->vp_config[i].fifo.enable ==
1371 VXGE_HW_FIFO_ENABLE)
1372 nblocks += device_config->vp_config[i].fifo.fifo_blocks;
1373 nblocks++;
1374 }
1375
1376 if (__vxge_hw_blockpool_create(hldev,
1377 &hldev->block_pool,
1378 device_config->dma_blockpool_initial + nblocks,
1379 device_config->dma_blockpool_max + nblocks) != VXGE_HW_OK) {
1380
1381 vxge_hw_device_terminate(hldev);
1382 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1383 goto exit;
1384 }
1385
1386 status = __vxge_hw_device_initialize(hldev);
1387 if (status != VXGE_HW_OK) {
1388 vxge_hw_device_terminate(hldev);
1389 goto exit;
1390 }
1391
1392 *devh = hldev;
1393exit:
1394 return status;
1395}
1396
1397/*
1398 * vxge_hw_device_terminate - Terminate Titan device.
1399 * Terminate HW device.
1400 */
1401void
1402vxge_hw_device_terminate(struct __vxge_hw_device *hldev)
1403{
1404 vxge_assert(hldev->magic == VXGE_HW_DEVICE_MAGIC);
1405
1406 hldev->magic = VXGE_HW_DEVICE_DEAD;
1407 __vxge_hw_blockpool_destroy(&hldev->block_pool);
1408 vfree(hldev);
1409}
1410
1411/*
1412 * __vxge_hw_vpath_stats_access - Get the statistics from the given location
1413 * and offset and perform an operation
1414 */
1415static enum vxge_hw_status
1416__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
1417 u32 operation, u32 offset, u64 *stat)
1418{
1419 u64 val64;
1420 enum vxge_hw_status status = VXGE_HW_OK;
1421 struct vxge_hw_vpath_reg __iomem *vp_reg;
1422
1423 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1424 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
1425 goto vpath_stats_access_exit;
1426 }
1427
1428 vp_reg = vpath->vp_reg;
1429
1430 val64 = VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) |
1431 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE |
1432 VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset);
1433
1434 status = __vxge_hw_pio_mem_write64(val64,
1435 &vp_reg->xmac_stats_access_cmd,
1436 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE,
1437 vpath->hldev->config.device_poll_millis);
1438 if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
1439 *stat = readq(&vp_reg->xmac_stats_access_data);
1440 else
1441 *stat = 0;
1442
1443vpath_stats_access_exit:
1444 return status;
1445}
1446
1447/*
1448 * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
1449 */
1450static enum vxge_hw_status
1451__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath,
1452 struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
1453{
1454 u64 *val64;
1455 int i;
1456 u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET;
1457 enum vxge_hw_status status = VXGE_HW_OK;
1458
1459 val64 = (u64 *)vpath_tx_stats;
1460
1461 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1462 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
1463 goto exit;
1464 }
1465
1466 for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) {
1467 status = __vxge_hw_vpath_stats_access(vpath,
1468 VXGE_HW_STATS_OP_READ,
1469 offset, val64);
1470 if (status != VXGE_HW_OK)
1471 goto exit;
1472 offset++;
1473 val64++;
1474 }
1475exit:
1476 return status;
1477}
1478
1479/*
1480 * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
1481 */
1482static enum vxge_hw_status
1483__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
1484 struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
1485{
1486 u64 *val64;
1487 enum vxge_hw_status status = VXGE_HW_OK;
1488 int i;
1489 u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET;
1490 val64 = (u64 *) vpath_rx_stats;
1491
1492 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1493 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
1494 goto exit;
1495 }
1496 for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) {
1497 status = __vxge_hw_vpath_stats_access(vpath,
1498 VXGE_HW_STATS_OP_READ,
1499 offset >> 3, val64);
1500 if (status != VXGE_HW_OK)
1501 goto exit;
1502
1503 offset += 8;
1504 val64++;
1505 }
1506exit:
1507 return status;
1508}
1509
1510/*
1511 * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
1512 */
1513static enum vxge_hw_status
1514__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
1515 struct vxge_hw_vpath_stats_hw_info *hw_stats)
1516{
1517 u64 val64;
1518 enum vxge_hw_status status = VXGE_HW_OK;
1519 struct vxge_hw_vpath_reg __iomem *vp_reg;
1520
1521 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1522 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
1523 goto exit;
1524 }
1525 vp_reg = vpath->vp_reg;
1526
1527 val64 = readq(&vp_reg->vpath_debug_stats0);
1528 hw_stats->ini_num_mwr_sent =
1529 (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64);
1530
1531 val64 = readq(&vp_reg->vpath_debug_stats1);
1532 hw_stats->ini_num_mrd_sent =
1533 (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64);
1534
1535 val64 = readq(&vp_reg->vpath_debug_stats2);
1536 hw_stats->ini_num_cpl_rcvd =
1537 (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64);
1538
1539 val64 = readq(&vp_reg->vpath_debug_stats3);
1540 hw_stats->ini_num_mwr_byte_sent =
1541 VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64);
1542
1543 val64 = readq(&vp_reg->vpath_debug_stats4);
1544 hw_stats->ini_num_cpl_byte_rcvd =
1545 VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64);
1546
1547 val64 = readq(&vp_reg->vpath_debug_stats5);
1548 hw_stats->wrcrdtarb_xoff =
1549 (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64);
1550
1551 val64 = readq(&vp_reg->vpath_debug_stats6);
1552 hw_stats->rdcrdtarb_xoff =
1553 (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64);
1554
1555 val64 = readq(&vp_reg->vpath_genstats_count01);
1556 hw_stats->vpath_genstats_count0 =
1557 (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(
1558 val64);
1559
1560 val64 = readq(&vp_reg->vpath_genstats_count01);
1561 hw_stats->vpath_genstats_count1 =
1562 (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(
1563 val64);
1564
1565 val64 = readq(&vp_reg->vpath_genstats_count23);
1566 hw_stats->vpath_genstats_count2 =
1567 (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(
1568 val64);
1569
1570 val64 = readq(&vp_reg->vpath_genstats_count01);
1571 hw_stats->vpath_genstats_count3 =
1572 (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(
1573 val64);
1574
1575 val64 = readq(&vp_reg->vpath_genstats_count4);
1576 hw_stats->vpath_genstats_count4 =
1577 (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(
1578 val64);
1579
1580 val64 = readq(&vp_reg->vpath_genstats_count5);
1581 hw_stats->vpath_genstats_count5 =
1582 (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(
1583 val64);
1584
1585 status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats);
1586 if (status != VXGE_HW_OK)
1587 goto exit;
1588
1589 status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats);
1590 if (status != VXGE_HW_OK)
1591 goto exit;
1592
1593 VXGE_HW_VPATH_STATS_PIO_READ(
1594 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET);
1595
1596 hw_stats->prog_event_vnum0 =
1597 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64);
1598
1599 hw_stats->prog_event_vnum1 =
1600 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64);
1601
1602 VXGE_HW_VPATH_STATS_PIO_READ(
1603 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET);
1604
1605 hw_stats->prog_event_vnum2 =
1606 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64);
1607
1608 hw_stats->prog_event_vnum3 =
1609 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64);
1610
1611 val64 = readq(&vp_reg->rx_multi_cast_stats);
1612 hw_stats->rx_multi_cast_frame_discard =
1613 (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64);
1614
1615 val64 = readq(&vp_reg->rx_frm_transferred);
1616 hw_stats->rx_frm_transferred =
1617 (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64);
1618
1619 val64 = readq(&vp_reg->rxd_returned);
1620 hw_stats->rxd_returned =
1621 (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64);
1622
1623 val64 = readq(&vp_reg->dbg_stats_rx_mpa);
1624 hw_stats->rx_mpa_len_fail_frms =
1625 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64);
1626 hw_stats->rx_mpa_mrk_fail_frms =
1627 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64);
1628 hw_stats->rx_mpa_crc_fail_frms =
1629 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64);
1630
1631 val64 = readq(&vp_reg->dbg_stats_rx_fau);
1632 hw_stats->rx_permitted_frms =
1633 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64);
1634 hw_stats->rx_vp_reset_discarded_frms =
1635 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64);
1636 hw_stats->rx_wol_frms =
1637 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64);
1638
1639 val64 = readq(&vp_reg->tx_vp_reset_discarded_frms);
1640 hw_stats->tx_vp_reset_discarded_frms =
1641 (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(
1642 val64);
1643exit:
1644 return status;
1645}
1646
1647/*
1648 * vxge_hw_device_stats_get - Get the device hw statistics.
1649 * Returns the vpath h/w stats for the device.
1650 */
1651enum vxge_hw_status
1652vxge_hw_device_stats_get(struct __vxge_hw_device *hldev,
1653 struct vxge_hw_device_stats_hw_info *hw_stats)
1654{
1655 u32 i;
1656 enum vxge_hw_status status = VXGE_HW_OK;
1657
1658 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1659 if (!(hldev->vpaths_deployed & vxge_mBIT(i)) ||
1660 (hldev->virtual_paths[i].vp_open ==
1661 VXGE_HW_VP_NOT_OPEN))
1662 continue;
1663
1664 memcpy(hldev->virtual_paths[i].hw_stats_sav,
1665 hldev->virtual_paths[i].hw_stats,
1666 sizeof(struct vxge_hw_vpath_stats_hw_info));
1667
1668 status = __vxge_hw_vpath_stats_get(
1669 &hldev->virtual_paths[i],
1670 hldev->virtual_paths[i].hw_stats);
1671 }
1672
1673 memcpy(hw_stats, &hldev->stats.hw_dev_info_stats,
1674 sizeof(struct vxge_hw_device_stats_hw_info));
1675
1676 return status;
1677}
1678
1679/*
1680 * vxge_hw_driver_stats_get - Get the device sw statistics.
1681 * Returns the vpath s/w stats for the device.
1682 */
1683enum vxge_hw_status vxge_hw_driver_stats_get(
1684 struct __vxge_hw_device *hldev,
1685 struct vxge_hw_device_stats_sw_info *sw_stats)
1686{
1687 enum vxge_hw_status status = VXGE_HW_OK;
1688
1689 memcpy(sw_stats, &hldev->stats.sw_dev_info_stats,
1690 sizeof(struct vxge_hw_device_stats_sw_info));
1691
1692 return status;
1693}
1694
1695/*
1696 * vxge_hw_mrpcim_stats_access - Access the statistics from the given location
1697 * and offset and perform an operation
1698 * Get the statistics from the given location and offset.
1699 */
1700enum vxge_hw_status
1701vxge_hw_mrpcim_stats_access(struct __vxge_hw_device *hldev,
1702 u32 operation, u32 location, u32 offset, u64 *stat)
1703{
1704 u64 val64;
1705 enum vxge_hw_status status = VXGE_HW_OK;
1706
1707 status = __vxge_hw_device_is_privilaged(hldev->host_type,
1708 hldev->func_id);
1709 if (status != VXGE_HW_OK)
1710 goto exit;
1711
1712 val64 = VXGE_HW_XMAC_STATS_SYS_CMD_OP(operation) |
1713 VXGE_HW_XMAC_STATS_SYS_CMD_STROBE |
1714 VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(location) |
1715 VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(offset);
1716
1717 status = __vxge_hw_pio_mem_write64(val64,
1718 &hldev->mrpcim_reg->xmac_stats_sys_cmd,
1719 VXGE_HW_XMAC_STATS_SYS_CMD_STROBE,
1720 hldev->config.device_poll_millis);
1721
1722 if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
1723 *stat = readq(&hldev->mrpcim_reg->xmac_stats_sys_data);
1724 else
1725 *stat = 0;
1726exit:
1727 return status;
1728}
1729
1730/*
1731 * vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port
1732 * Get the Statistics on aggregate port
1733 */
1734static enum vxge_hw_status
1735vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port,
1736 struct vxge_hw_xmac_aggr_stats *aggr_stats)
1737{
1738 u64 *val64;
1739 int i;
1740 u32 offset = VXGE_HW_STATS_AGGRn_OFFSET;
1741 enum vxge_hw_status status = VXGE_HW_OK;
1742
1743 val64 = (u64 *)aggr_stats;
1744
1745 status = __vxge_hw_device_is_privilaged(hldev->host_type,
1746 hldev->func_id);
1747 if (status != VXGE_HW_OK)
1748 goto exit;
1749
1750 for (i = 0; i < sizeof(struct vxge_hw_xmac_aggr_stats) / 8; i++) {
1751 status = vxge_hw_mrpcim_stats_access(hldev,
1752 VXGE_HW_STATS_OP_READ,
1753 VXGE_HW_STATS_LOC_AGGR,
1754 ((offset + (104 * port)) >> 3), val64);
1755 if (status != VXGE_HW_OK)
1756 goto exit;
1757
1758 offset += 8;
1759 val64++;
1760 }
1761exit:
1762 return status;
1763}
1764
1765/*
1766 * vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port
1767 * Get the Statistics on port
1768 */
1769static enum vxge_hw_status
1770vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port,
1771 struct vxge_hw_xmac_port_stats *port_stats)
1772{
1773 u64 *val64;
1774 enum vxge_hw_status status = VXGE_HW_OK;
1775 int i;
1776 u32 offset = 0x0;
1777 val64 = (u64 *) port_stats;
1778
1779 status = __vxge_hw_device_is_privilaged(hldev->host_type,
1780 hldev->func_id);
1781 if (status != VXGE_HW_OK)
1782 goto exit;
1783
1784 for (i = 0; i < sizeof(struct vxge_hw_xmac_port_stats) / 8; i++) {
1785 status = vxge_hw_mrpcim_stats_access(hldev,
1786 VXGE_HW_STATS_OP_READ,
1787 VXGE_HW_STATS_LOC_AGGR,
1788 ((offset + (608 * port)) >> 3), val64);
1789 if (status != VXGE_HW_OK)
1790 goto exit;
1791
1792 offset += 8;
1793 val64++;
1794 }
1795
1796exit:
1797 return status;
1798}
1799
1800/*
1801 * vxge_hw_device_xmac_stats_get - Get the XMAC Statistics
1802 * Get the XMAC Statistics
1803 */
1804enum vxge_hw_status
1805vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *hldev,
1806 struct vxge_hw_xmac_stats *xmac_stats)
1807{
1808 enum vxge_hw_status status = VXGE_HW_OK;
1809 u32 i;
1810
1811 status = vxge_hw_device_xmac_aggr_stats_get(hldev,
1812 0, &xmac_stats->aggr_stats[0]);
1813 if (status != VXGE_HW_OK)
1814 goto exit;
1815
1816 status = vxge_hw_device_xmac_aggr_stats_get(hldev,
1817 1, &xmac_stats->aggr_stats[1]);
1818 if (status != VXGE_HW_OK)
1819 goto exit;
1820
1821 for (i = 0; i <= VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
1822
1823 status = vxge_hw_device_xmac_port_stats_get(hldev,
1824 i, &xmac_stats->port_stats[i]);
1825 if (status != VXGE_HW_OK)
1826 goto exit;
1827 }
1828
1829 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1830
1831 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
1832 continue;
1833
1834 status = __vxge_hw_vpath_xmac_tx_stats_get(
1835 &hldev->virtual_paths[i],
1836 &xmac_stats->vpath_tx_stats[i]);
1837 if (status != VXGE_HW_OK)
1838 goto exit;
1839
1840 status = __vxge_hw_vpath_xmac_rx_stats_get(
1841 &hldev->virtual_paths[i],
1842 &xmac_stats->vpath_rx_stats[i]);
1843 if (status != VXGE_HW_OK)
1844 goto exit;
1845 }
1846exit:
1847 return status;
1848}
1849
1850/*
1851 * vxge_hw_device_debug_set - Set the debug module, level and timestamp
1852 * This routine is used to dynamically change the debug output
1853 */
1854void vxge_hw_device_debug_set(struct __vxge_hw_device *hldev,
1855 enum vxge_debug_level level, u32 mask)
1856{
1857 if (hldev == NULL)
1858 return;
1859
1860#if defined(VXGE_DEBUG_TRACE_MASK) || \
1861 defined(VXGE_DEBUG_ERR_MASK)
1862 hldev->debug_module_mask = mask;
1863 hldev->debug_level = level;
1864#endif
1865
1866#if defined(VXGE_DEBUG_ERR_MASK)
1867 hldev->level_err = level & VXGE_ERR;
1868#endif
1869
1870#if defined(VXGE_DEBUG_TRACE_MASK)
1871 hldev->level_trace = level & VXGE_TRACE;
1872#endif
1873}
1874
1875/*
1876 * vxge_hw_device_error_level_get - Get the error level
1877 * This routine returns the current error level set
1878 */
1879u32 vxge_hw_device_error_level_get(struct __vxge_hw_device *hldev)
1880{
1881#if defined(VXGE_DEBUG_ERR_MASK)
1882 if (hldev == NULL)
1883 return VXGE_ERR;
1884 else
1885 return hldev->level_err;
1886#else
1887 return 0;
1888#endif
1889}
1890
1891/*
1892 * vxge_hw_device_trace_level_get - Get the trace level
1893 * This routine returns the current trace level set
1894 */
1895u32 vxge_hw_device_trace_level_get(struct __vxge_hw_device *hldev)
1896{
1897#if defined(VXGE_DEBUG_TRACE_MASK)
1898 if (hldev == NULL)
1899 return VXGE_TRACE;
1900 else
1901 return hldev->level_trace;
1902#else
1903 return 0;
1904#endif
1905}
1906
1907/*
1908 * vxge_hw_getpause_data -Pause frame frame generation and reception.
1909 * Returns the Pause frame generation and reception capability of the NIC.
1910 */
1911enum vxge_hw_status vxge_hw_device_getpause_data(struct __vxge_hw_device *hldev,
1912 u32 port, u32 *tx, u32 *rx)
1913{
1914 u64 val64;
1915 enum vxge_hw_status status = VXGE_HW_OK;
1916
1917 if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
1918 status = VXGE_HW_ERR_INVALID_DEVICE;
1919 goto exit;
1920 }
1921
1922 if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
1923 status = VXGE_HW_ERR_INVALID_PORT;
1924 goto exit;
1925 }
1926
1927 if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
1928 status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
1929 goto exit;
1930 }
1931
1932 val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1933 if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN)
1934 *tx = 1;
1935 if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN)
1936 *rx = 1;
1937exit:
1938 return status;
1939}
1940
1941/*
1942 * vxge_hw_device_setpause_data - set/reset pause frame generation.
1943 * It can be used to set or reset Pause frame generation or reception
1944 * support of the NIC.
1945 */
1946enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev,
1947 u32 port, u32 tx, u32 rx)
1948{
1949 u64 val64;
1950 enum vxge_hw_status status = VXGE_HW_OK;
1951
1952 if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
1953 status = VXGE_HW_ERR_INVALID_DEVICE;
1954 goto exit;
1955 }
1956
1957 if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
1958 status = VXGE_HW_ERR_INVALID_PORT;
1959 goto exit;
1960 }
1961
1962 status = __vxge_hw_device_is_privilaged(hldev->host_type,
1963 hldev->func_id);
1964 if (status != VXGE_HW_OK)
1965 goto exit;
1966
1967 val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1968 if (tx)
1969 val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
1970 else
1971 val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
1972 if (rx)
1973 val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
1974 else
1975 val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
1976
1977 writeq(val64, &hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1978exit:
1979 return status;
1980}
1981
1982u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *hldev)
1983{
1984 struct pci_dev *dev = hldev->pdev;
1985 u16 lnk;
1986
1987 pci_read_config_word(dev, dev->pcie_cap + PCI_EXP_LNKSTA, &lnk);
1988 return (lnk & VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH) >> 4;
1989}
1990
1991/*
1992 * __vxge_hw_ring_block_memblock_idx - Return the memblock index
1993 * This function returns the index of memory block
1994 */
1995static inline u32
1996__vxge_hw_ring_block_memblock_idx(u8 *block)
1997{
1998 return (u32)*((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET));
1999}
2000
2001/*
2002 * __vxge_hw_ring_block_memblock_idx_set - Sets the memblock index
2003 * This function sets index to a memory block
2004 */
2005static inline void
2006__vxge_hw_ring_block_memblock_idx_set(u8 *block, u32 memblock_idx)
2007{
2008 *((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET)) = memblock_idx;
2009}
2010
2011/*
2012 * __vxge_hw_ring_block_next_pointer_set - Sets the next block pointer
2013 * in RxD block
2014 * Sets the next block pointer in RxD block
2015 */
2016static inline void
2017__vxge_hw_ring_block_next_pointer_set(u8 *block, dma_addr_t dma_next)
2018{
2019 *((u64 *)(block + VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET)) = dma_next;
2020}
2021
2022/*
2023 * __vxge_hw_ring_first_block_address_get - Returns the dma address of the
2024 * first block
2025 * Returns the dma address of the first RxD block
2026 */
2027static u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring)
2028{
2029 struct vxge_hw_mempool_dma *dma_object;
2030
2031 dma_object = ring->mempool->memblocks_dma_arr;
2032 vxge_assert(dma_object != NULL);
2033
2034 return dma_object->addr;
2035}
2036
2037/*
2038 * __vxge_hw_ring_item_dma_addr - Return the dma address of an item
2039 * This function returns the dma address of a given item
2040 */
2041static dma_addr_t __vxge_hw_ring_item_dma_addr(struct vxge_hw_mempool *mempoolh,
2042 void *item)
2043{
2044 u32 memblock_idx;
2045 void *memblock;
2046 struct vxge_hw_mempool_dma *memblock_dma_object;
2047 ptrdiff_t dma_item_offset;
2048
2049 /* get owner memblock index */
2050 memblock_idx = __vxge_hw_ring_block_memblock_idx(item);
2051
2052 /* get owner memblock by memblock index */
2053 memblock = mempoolh->memblocks_arr[memblock_idx];
2054
2055 /* get memblock DMA object by memblock index */
2056 memblock_dma_object = mempoolh->memblocks_dma_arr + memblock_idx;
2057
2058 /* calculate offset in the memblock of this item */
2059 dma_item_offset = (u8 *)item - (u8 *)memblock;
2060
2061 return memblock_dma_object->addr + dma_item_offset;
2062}
2063
2064/*
2065 * __vxge_hw_ring_rxdblock_link - Link the RxD blocks
2066 * This function returns the dma address of a given item
2067 */
2068static void __vxge_hw_ring_rxdblock_link(struct vxge_hw_mempool *mempoolh,
2069 struct __vxge_hw_ring *ring, u32 from,
2070 u32 to)
2071{
2072 u8 *to_item , *from_item;
2073 dma_addr_t to_dma;
2074
2075 /* get "from" RxD block */
2076 from_item = mempoolh->items_arr[from];
2077 vxge_assert(from_item);
2078
2079 /* get "to" RxD block */
2080 to_item = mempoolh->items_arr[to];
2081 vxge_assert(to_item);
2082
2083 /* return address of the beginning of previous RxD block */
2084 to_dma = __vxge_hw_ring_item_dma_addr(mempoolh, to_item);
2085
2086 /* set next pointer for this RxD block to point on
2087 * previous item's DMA start address */
2088 __vxge_hw_ring_block_next_pointer_set(from_item, to_dma);
2089}
2090
2091/*
2092 * __vxge_hw_ring_mempool_item_alloc - Allocate List blocks for RxD
2093 * block callback
2094 * This function is callback passed to __vxge_hw_mempool_create to create memory
2095 * pool for RxD block
2096 */
2097static void
2098__vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh,
2099 u32 memblock_index,
2100 struct vxge_hw_mempool_dma *dma_object,
2101 u32 index, u32 is_last)
2102{
2103 u32 i;
2104 void *item = mempoolh->items_arr[index];
2105 struct __vxge_hw_ring *ring =
2106 (struct __vxge_hw_ring *)mempoolh->userdata;
2107
2108 /* format rxds array */
2109 for (i = 0; i < ring->rxds_per_block; i++) {
2110 void *rxdblock_priv;
2111 void *uld_priv;
2112 struct vxge_hw_ring_rxd_1 *rxdp;
2113
2114 u32 reserve_index = ring->channel.reserve_ptr -
2115 (index * ring->rxds_per_block + i + 1);
2116 u32 memblock_item_idx;
2117
2118 ring->channel.reserve_arr[reserve_index] = ((u8 *)item) +
2119 i * ring->rxd_size;
2120
2121 /* Note: memblock_item_idx is index of the item within
2122 * the memblock. For instance, in case of three RxD-blocks
2123 * per memblock this value can be 0, 1 or 2. */
2124 rxdblock_priv = __vxge_hw_mempool_item_priv(mempoolh,
2125 memblock_index, item,
2126 &memblock_item_idx);
2127
2128 rxdp = ring->channel.reserve_arr[reserve_index];
2129
2130 uld_priv = ((u8 *)rxdblock_priv + ring->rxd_priv_size * i);
2131
2132 /* pre-format Host_Control */
2133 rxdp->host_control = (u64)(size_t)uld_priv;
2134 }
2135
2136 __vxge_hw_ring_block_memblock_idx_set(item, memblock_index);
2137
2138 if (is_last) {
2139 /* link last one with first one */
2140 __vxge_hw_ring_rxdblock_link(mempoolh, ring, index, 0);
2141 }
2142
2143 if (index > 0) {
2144 /* link this RxD block with previous one */
2145 __vxge_hw_ring_rxdblock_link(mempoolh, ring, index - 1, index);
2146 }
2147}
2148
2149/*
2150 * __vxge_hw_ring_replenish - Initial replenish of RxDs
2151 * This function replenishes the RxDs from reserve array to work array
2152 */
2153enum vxge_hw_status
2154vxge_hw_ring_replenish(struct __vxge_hw_ring *ring)
2155{
2156 void *rxd;
2157 struct __vxge_hw_channel *channel;
2158 enum vxge_hw_status status = VXGE_HW_OK;
2159
2160 channel = &ring->channel;
2161
2162 while (vxge_hw_channel_dtr_count(channel) > 0) {
2163
2164 status = vxge_hw_ring_rxd_reserve(ring, &rxd);
2165
2166 vxge_assert(status == VXGE_HW_OK);
2167
2168 if (ring->rxd_init) {
2169 status = ring->rxd_init(rxd, channel->userdata);
2170 if (status != VXGE_HW_OK) {
2171 vxge_hw_ring_rxd_free(ring, rxd);
2172 goto exit;
2173 }
2174 }
2175
2176 vxge_hw_ring_rxd_post(ring, rxd);
2177 }
2178 status = VXGE_HW_OK;
2179exit:
2180 return status;
2181}
2182
2183/*
2184 * __vxge_hw_channel_allocate - Allocate memory for channel
2185 * This function allocates required memory for the channel and various arrays
2186 * in the channel
2187 */
2188static struct __vxge_hw_channel *
2189__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
2190 enum __vxge_hw_channel_type type,
2191 u32 length, u32 per_dtr_space,
2192 void *userdata)
2193{
2194 struct __vxge_hw_channel *channel;
2195 struct __vxge_hw_device *hldev;
2196 int size = 0;
2197 u32 vp_id;
2198
2199 hldev = vph->vpath->hldev;
2200 vp_id = vph->vpath->vp_id;
2201
2202 switch (type) {
2203 case VXGE_HW_CHANNEL_TYPE_FIFO:
2204 size = sizeof(struct __vxge_hw_fifo);
2205 break;
2206 case VXGE_HW_CHANNEL_TYPE_RING:
2207 size = sizeof(struct __vxge_hw_ring);
2208 break;
2209 default:
2210 break;
2211 }
2212
2213 channel = kzalloc(size, GFP_KERNEL);
2214 if (channel == NULL)
2215 goto exit0;
2216 INIT_LIST_HEAD(&channel->item);
2217
2218 channel->common_reg = hldev->common_reg;
2219 channel->first_vp_id = hldev->first_vp_id;
2220 channel->type = type;
2221 channel->devh = hldev;
2222 channel->vph = vph;
2223 channel->userdata = userdata;
2224 channel->per_dtr_space = per_dtr_space;
2225 channel->length = length;
2226 channel->vp_id = vp_id;
2227
2228 channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
2229 if (channel->work_arr == NULL)
2230 goto exit1;
2231
2232 channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
2233 if (channel->free_arr == NULL)
2234 goto exit1;
2235 channel->free_ptr = length;
2236
2237 channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
2238 if (channel->reserve_arr == NULL)
2239 goto exit1;
2240 channel->reserve_ptr = length;
2241 channel->reserve_top = 0;
2242
2243 channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
2244 if (channel->orig_arr == NULL)
2245 goto exit1;
2246
2247 return channel;
2248exit1:
2249 __vxge_hw_channel_free(channel);
2250
2251exit0:
2252 return NULL;
2253}
2254
2255/*
2256 * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
2257 * Adds a block to block pool
2258 */
2259static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
2260 void *block_addr,
2261 u32 length,
2262 struct pci_dev *dma_h,
2263 struct pci_dev *acc_handle)
2264{
2265 struct __vxge_hw_blockpool *blockpool;
2266 struct __vxge_hw_blockpool_entry *entry = NULL;
2267 dma_addr_t dma_addr;
2268 enum vxge_hw_status status = VXGE_HW_OK;
2269 u32 req_out;
2270
2271 blockpool = &devh->block_pool;
2272
2273 if (block_addr == NULL) {
2274 blockpool->req_out--;
2275 status = VXGE_HW_FAIL;
2276 goto exit;
2277 }
2278
2279 dma_addr = pci_map_single(devh->pdev, block_addr, length,
2280 PCI_DMA_BIDIRECTIONAL);
2281
2282 if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) {
2283 vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
2284 blockpool->req_out--;
2285 status = VXGE_HW_FAIL;
2286 goto exit;
2287 }
2288
2289 if (!list_empty(&blockpool->free_entry_list))
2290 entry = (struct __vxge_hw_blockpool_entry *)
2291 list_first_entry(&blockpool->free_entry_list,
2292 struct __vxge_hw_blockpool_entry,
2293 item);
2294
2295 if (entry == NULL)
2296 entry = vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
2297 else
2298 list_del(&entry->item);
2299
2300 if (entry != NULL) {
2301 entry->length = length;
2302 entry->memblock = block_addr;
2303 entry->dma_addr = dma_addr;
2304 entry->acc_handle = acc_handle;
2305 entry->dma_handle = dma_h;
2306 list_add(&entry->item, &blockpool->free_block_list);
2307 blockpool->pool_size++;
2308 status = VXGE_HW_OK;
2309 } else
2310 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2311
2312 blockpool->req_out--;
2313
2314 req_out = blockpool->req_out;
2315exit:
2316 return;
2317}
2318
2319static inline void
2320vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh, unsigned long size)
2321{
2322 gfp_t flags;
2323 void *vaddr;
2324
2325 if (in_interrupt())
2326 flags = GFP_ATOMIC | GFP_DMA;
2327 else
2328 flags = GFP_KERNEL | GFP_DMA;
2329
2330 vaddr = kmalloc((size), flags);
2331
2332 vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
2333}
2334
2335/*
2336 * __vxge_hw_blockpool_blocks_add - Request additional blocks
2337 */
2338static
2339void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
2340{
2341 u32 nreq = 0, i;
2342
2343 if ((blockpool->pool_size + blockpool->req_out) <
2344 VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) {
2345 nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE;
2346 blockpool->req_out += nreq;
2347 }
2348
2349 for (i = 0; i < nreq; i++)
2350 vxge_os_dma_malloc_async(
2351 ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
2352 blockpool->hldev, VXGE_HW_BLOCK_SIZE);
2353}
2354
2355/*
2356 * __vxge_hw_blockpool_malloc - Allocate a memory block from pool
2357 * Allocates a block of memory of given size, either from block pool
2358 * or by calling vxge_os_dma_malloc()
2359 */
2360static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
2361 struct vxge_hw_mempool_dma *dma_object)
2362{
2363 struct __vxge_hw_blockpool_entry *entry = NULL;
2364 struct __vxge_hw_blockpool *blockpool;
2365 void *memblock = NULL;
2366 enum vxge_hw_status status = VXGE_HW_OK;
2367
2368 blockpool = &devh->block_pool;
2369
2370 if (size != blockpool->block_size) {
2371
2372 memblock = vxge_os_dma_malloc(devh->pdev, size,
2373 &dma_object->handle,
2374 &dma_object->acc_handle);
2375
2376 if (memblock == NULL) {
2377 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2378 goto exit;
2379 }
2380
2381 dma_object->addr = pci_map_single(devh->pdev, memblock, size,
2382 PCI_DMA_BIDIRECTIONAL);
2383
2384 if (unlikely(pci_dma_mapping_error(devh->pdev,
2385 dma_object->addr))) {
2386 vxge_os_dma_free(devh->pdev, memblock,
2387 &dma_object->acc_handle);
2388 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2389 goto exit;
2390 }
2391
2392 } else {
2393
2394 if (!list_empty(&blockpool->free_block_list))
2395 entry = (struct __vxge_hw_blockpool_entry *)
2396 list_first_entry(&blockpool->free_block_list,
2397 struct __vxge_hw_blockpool_entry,
2398 item);
2399
2400 if (entry != NULL) {
2401 list_del(&entry->item);
2402 dma_object->addr = entry->dma_addr;
2403 dma_object->handle = entry->dma_handle;
2404 dma_object->acc_handle = entry->acc_handle;
2405 memblock = entry->memblock;
2406
2407 list_add(&entry->item,
2408 &blockpool->free_entry_list);
2409 blockpool->pool_size--;
2410 }
2411
2412 if (memblock != NULL)
2413 __vxge_hw_blockpool_blocks_add(blockpool);
2414 }
2415exit:
2416 return memblock;
2417}
2418
2419/*
2420 * __vxge_hw_blockpool_blocks_remove - Free additional blocks
2421 */
2422static void
2423__vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
2424{
2425 struct list_head *p, *n;
2426
2427 list_for_each_safe(p, n, &blockpool->free_block_list) {
2428
2429 if (blockpool->pool_size < blockpool->pool_max)
2430 break;
2431
2432 pci_unmap_single(
2433 ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
2434 ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
2435 ((struct __vxge_hw_blockpool_entry *)p)->length,
2436 PCI_DMA_BIDIRECTIONAL);
2437
2438 vxge_os_dma_free(
2439 ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
2440 ((struct __vxge_hw_blockpool_entry *)p)->memblock,
2441 &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
2442
2443 list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
2444
2445 list_add(p, &blockpool->free_entry_list);
2446
2447 blockpool->pool_size--;
2448
2449 }
2450}
2451
2452/*
2453 * __vxge_hw_blockpool_free - Frees the memory allcoated with
2454 * __vxge_hw_blockpool_malloc
2455 */
2456static void __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
2457 void *memblock, u32 size,
2458 struct vxge_hw_mempool_dma *dma_object)
2459{
2460 struct __vxge_hw_blockpool_entry *entry = NULL;
2461 struct __vxge_hw_blockpool *blockpool;
2462 enum vxge_hw_status status = VXGE_HW_OK;
2463
2464 blockpool = &devh->block_pool;
2465
2466 if (size != blockpool->block_size) {
2467 pci_unmap_single(devh->pdev, dma_object->addr, size,
2468 PCI_DMA_BIDIRECTIONAL);
2469 vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle);
2470 } else {
2471
2472 if (!list_empty(&blockpool->free_entry_list))
2473 entry = (struct __vxge_hw_blockpool_entry *)
2474 list_first_entry(&blockpool->free_entry_list,
2475 struct __vxge_hw_blockpool_entry,
2476 item);
2477
2478 if (entry == NULL)
2479 entry = vmalloc(sizeof(
2480 struct __vxge_hw_blockpool_entry));
2481 else
2482 list_del(&entry->item);
2483
2484 if (entry != NULL) {
2485 entry->length = size;
2486 entry->memblock = memblock;
2487 entry->dma_addr = dma_object->addr;
2488 entry->acc_handle = dma_object->acc_handle;
2489 entry->dma_handle = dma_object->handle;
2490 list_add(&entry->item,
2491 &blockpool->free_block_list);
2492 blockpool->pool_size++;
2493 status = VXGE_HW_OK;
2494 } else
2495 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2496
2497 if (status == VXGE_HW_OK)
2498 __vxge_hw_blockpool_blocks_remove(blockpool);
2499 }
2500}
2501
2502/*
2503 * vxge_hw_mempool_destroy
2504 */
2505static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
2506{
2507 u32 i, j;
2508 struct __vxge_hw_device *devh = mempool->devh;
2509
2510 for (i = 0; i < mempool->memblocks_allocated; i++) {
2511 struct vxge_hw_mempool_dma *dma_object;
2512
2513 vxge_assert(mempool->memblocks_arr[i]);
2514 vxge_assert(mempool->memblocks_dma_arr + i);
2515
2516 dma_object = mempool->memblocks_dma_arr + i;
2517
2518 for (j = 0; j < mempool->items_per_memblock; j++) {
2519 u32 index = i * mempool->items_per_memblock + j;
2520
2521 /* to skip last partially filled(if any) memblock */
2522 if (index >= mempool->items_current)
2523 break;
2524 }
2525
2526 vfree(mempool->memblocks_priv_arr[i]);
2527
2528 __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i],
2529 mempool->memblock_size, dma_object);
2530 }
2531
2532 vfree(mempool->items_arr);
2533 vfree(mempool->memblocks_dma_arr);
2534 vfree(mempool->memblocks_priv_arr);
2535 vfree(mempool->memblocks_arr);
2536 vfree(mempool);
2537}
2538
2539/*
2540 * __vxge_hw_mempool_grow
2541 * Will resize mempool up to %num_allocate value.
2542 */
2543static enum vxge_hw_status
2544__vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
2545 u32 *num_allocated)
2546{
2547 u32 i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0;
2548 u32 n_items = mempool->items_per_memblock;
2549 u32 start_block_idx = mempool->memblocks_allocated;
2550 u32 end_block_idx = mempool->memblocks_allocated + num_allocate;
2551 enum vxge_hw_status status = VXGE_HW_OK;
2552
2553 *num_allocated = 0;
2554
2555 if (end_block_idx > mempool->memblocks_max) {
2556 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2557 goto exit;
2558 }
2559
2560 for (i = start_block_idx; i < end_block_idx; i++) {
2561 u32 j;
2562 u32 is_last = ((end_block_idx - 1) == i);
2563 struct vxge_hw_mempool_dma *dma_object =
2564 mempool->memblocks_dma_arr + i;
2565 void *the_memblock;
2566
2567 /* allocate memblock's private part. Each DMA memblock
2568 * has a space allocated for item's private usage upon
2569 * mempool's user request. Each time mempool grows, it will
2570 * allocate new memblock and its private part at once.
2571 * This helps to minimize memory usage a lot. */
2572 mempool->memblocks_priv_arr[i] =
2573 vzalloc(mempool->items_priv_size * n_items);
2574 if (mempool->memblocks_priv_arr[i] == NULL) {
2575 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2576 goto exit;
2577 }
2578
2579 /* allocate DMA-capable memblock */
2580 mempool->memblocks_arr[i] =
2581 __vxge_hw_blockpool_malloc(mempool->devh,
2582 mempool->memblock_size, dma_object);
2583 if (mempool->memblocks_arr[i] == NULL) {
2584 vfree(mempool->memblocks_priv_arr[i]);
2585 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2586 goto exit;
2587 }
2588
2589 (*num_allocated)++;
2590 mempool->memblocks_allocated++;
2591
2592 memset(mempool->memblocks_arr[i], 0, mempool->memblock_size);
2593
2594 the_memblock = mempool->memblocks_arr[i];
2595
2596 /* fill the items hash array */
2597 for (j = 0; j < n_items; j++) {
2598 u32 index = i * n_items + j;
2599
2600 if (first_time && index >= mempool->items_initial)
2601 break;
2602
2603 mempool->items_arr[index] =
2604 ((char *)the_memblock + j*mempool->item_size);
2605
2606 /* let caller to do more job on each item */
2607 if (mempool->item_func_alloc != NULL)
2608 mempool->item_func_alloc(mempool, i,
2609 dma_object, index, is_last);
2610
2611 mempool->items_current = index + 1;
2612 }
2613
2614 if (first_time && mempool->items_current ==
2615 mempool->items_initial)
2616 break;
2617 }
2618exit:
2619 return status;
2620}
2621
2622/*
2623 * vxge_hw_mempool_create
2624 * This function will create memory pool object. Pool may grow but will
2625 * never shrink. Pool consists of number of dynamically allocated blocks
2626 * with size enough to hold %items_initial number of items. Memory is
2627 * DMA-able but client must map/unmap before interoperating with the device.
2628 */
2629static struct vxge_hw_mempool *
2630__vxge_hw_mempool_create(struct __vxge_hw_device *devh,
2631 u32 memblock_size,
2632 u32 item_size,
2633 u32 items_priv_size,
2634 u32 items_initial,
2635 u32 items_max,
2636 struct vxge_hw_mempool_cbs *mp_callback,
2637 void *userdata)
2638{
2639 enum vxge_hw_status status = VXGE_HW_OK;
2640 u32 memblocks_to_allocate;
2641 struct vxge_hw_mempool *mempool = NULL;
2642 u32 allocated;
2643
2644 if (memblock_size < item_size) {
2645 status = VXGE_HW_FAIL;
2646 goto exit;
2647 }
2648
2649 mempool = vzalloc(sizeof(struct vxge_hw_mempool));
2650 if (mempool == NULL) {
2651 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2652 goto exit;
2653 }
2654
2655 mempool->devh = devh;
2656 mempool->memblock_size = memblock_size;
2657 mempool->items_max = items_max;
2658 mempool->items_initial = items_initial;
2659 mempool->item_size = item_size;
2660 mempool->items_priv_size = items_priv_size;
2661 mempool->item_func_alloc = mp_callback->item_func_alloc;
2662 mempool->userdata = userdata;
2663
2664 mempool->memblocks_allocated = 0;
2665
2666 mempool->items_per_memblock = memblock_size / item_size;
2667
2668 mempool->memblocks_max = (items_max + mempool->items_per_memblock - 1) /
2669 mempool->items_per_memblock;
2670
2671 /* allocate array of memblocks */
2672 mempool->memblocks_arr =
2673 vzalloc(sizeof(void *) * mempool->memblocks_max);
2674 if (mempool->memblocks_arr == NULL) {
2675 __vxge_hw_mempool_destroy(mempool);
2676 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2677 mempool = NULL;
2678 goto exit;
2679 }
2680
2681 /* allocate array of private parts of items per memblocks */
2682 mempool->memblocks_priv_arr =
2683 vzalloc(sizeof(void *) * mempool->memblocks_max);
2684 if (mempool->memblocks_priv_arr == NULL) {
2685 __vxge_hw_mempool_destroy(mempool);
2686 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2687 mempool = NULL;
2688 goto exit;
2689 }
2690
2691 /* allocate array of memblocks DMA objects */
2692 mempool->memblocks_dma_arr =
2693 vzalloc(sizeof(struct vxge_hw_mempool_dma) *
2694 mempool->memblocks_max);
2695 if (mempool->memblocks_dma_arr == NULL) {
2696 __vxge_hw_mempool_destroy(mempool);
2697 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2698 mempool = NULL;
2699 goto exit;
2700 }
2701
2702 /* allocate hash array of items */
2703 mempool->items_arr = vzalloc(sizeof(void *) * mempool->items_max);
2704 if (mempool->items_arr == NULL) {
2705 __vxge_hw_mempool_destroy(mempool);
2706 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2707 mempool = NULL;
2708 goto exit;
2709 }
2710
2711 /* calculate initial number of memblocks */
2712 memblocks_to_allocate = (mempool->items_initial +
2713 mempool->items_per_memblock - 1) /
2714 mempool->items_per_memblock;
2715
2716 /* pre-allocate the mempool */
2717 status = __vxge_hw_mempool_grow(mempool, memblocks_to_allocate,
2718 &allocated);
2719 if (status != VXGE_HW_OK) {
2720 __vxge_hw_mempool_destroy(mempool);
2721 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2722 mempool = NULL;
2723 goto exit;
2724 }
2725
2726exit:
2727 return mempool;
2728}
2729
2730/*
2731 * __vxge_hw_ring_abort - Returns the RxD
2732 * This function terminates the RxDs of ring
2733 */
2734static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
2735{
2736 void *rxdh;
2737 struct __vxge_hw_channel *channel;
2738
2739 channel = &ring->channel;
2740
2741 for (;;) {
2742 vxge_hw_channel_dtr_try_complete(channel, &rxdh);
2743
2744 if (rxdh == NULL)
2745 break;
2746
2747 vxge_hw_channel_dtr_complete(channel);
2748
2749 if (ring->rxd_term)
2750 ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED,
2751 channel->userdata);
2752
2753 vxge_hw_channel_dtr_free(channel, rxdh);
2754 }
2755
2756 return VXGE_HW_OK;
2757}
2758
2759/*
2760 * __vxge_hw_ring_reset - Resets the ring
2761 * This function resets the ring during vpath reset operation
2762 */
2763static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
2764{
2765 enum vxge_hw_status status = VXGE_HW_OK;
2766 struct __vxge_hw_channel *channel;
2767
2768 channel = &ring->channel;
2769
2770 __vxge_hw_ring_abort(ring);
2771
2772 status = __vxge_hw_channel_reset(channel);
2773
2774 if (status != VXGE_HW_OK)
2775 goto exit;
2776
2777 if (ring->rxd_init) {
2778 status = vxge_hw_ring_replenish(ring);
2779 if (status != VXGE_HW_OK)
2780 goto exit;
2781 }
2782exit:
2783 return status;
2784}
2785
2786/*
2787 * __vxge_hw_ring_delete - Removes the ring
2788 * This function freeup the memory pool and removes the ring
2789 */
2790static enum vxge_hw_status
2791__vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
2792{
2793 struct __vxge_hw_ring *ring = vp->vpath->ringh;
2794
2795 __vxge_hw_ring_abort(ring);
2796
2797 if (ring->mempool)
2798 __vxge_hw_mempool_destroy(ring->mempool);
2799
2800 vp->vpath->ringh = NULL;
2801 __vxge_hw_channel_free(&ring->channel);
2802
2803 return VXGE_HW_OK;
2804}
2805
2806/*
2807 * __vxge_hw_ring_create - Create a Ring
2808 * This function creates Ring and initializes it.
2809 */
2810static enum vxge_hw_status
2811__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
2812 struct vxge_hw_ring_attr *attr)
2813{
2814 enum vxge_hw_status status = VXGE_HW_OK;
2815 struct __vxge_hw_ring *ring;
2816 u32 ring_length;
2817 struct vxge_hw_ring_config *config;
2818 struct __vxge_hw_device *hldev;
2819 u32 vp_id;
2820 struct vxge_hw_mempool_cbs ring_mp_callback;
2821
2822 if ((vp == NULL) || (attr == NULL)) {
2823 status = VXGE_HW_FAIL;
2824 goto exit;
2825 }
2826
2827 hldev = vp->vpath->hldev;
2828 vp_id = vp->vpath->vp_id;
2829
2830 config = &hldev->config.vp_config[vp_id].ring;
2831
2832 ring_length = config->ring_blocks *
2833 vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
2834
2835 ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp,
2836 VXGE_HW_CHANNEL_TYPE_RING,
2837 ring_length,
2838 attr->per_rxd_space,
2839 attr->userdata);
2840 if (ring == NULL) {
2841 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2842 goto exit;
2843 }
2844
2845 vp->vpath->ringh = ring;
2846 ring->vp_id = vp_id;
2847 ring->vp_reg = vp->vpath->vp_reg;
2848 ring->common_reg = hldev->common_reg;
2849 ring->stats = &vp->vpath->sw_stats->ring_stats;
2850 ring->config = config;
2851 ring->callback = attr->callback;
2852 ring->rxd_init = attr->rxd_init;
2853 ring->rxd_term = attr->rxd_term;
2854 ring->buffer_mode = config->buffer_mode;
2855 ring->tim_rti_cfg1_saved = vp->vpath->tim_rti_cfg1_saved;
2856 ring->tim_rti_cfg3_saved = vp->vpath->tim_rti_cfg3_saved;
2857 ring->rxds_limit = config->rxds_limit;
2858
2859 ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
2860 ring->rxd_priv_size =
2861 sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space;
2862 ring->per_rxd_space = attr->per_rxd_space;
2863
2864 ring->rxd_priv_size =
2865 ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) /
2866 VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
2867
2868 /* how many RxDs can fit into one block. Depends on configured
2869 * buffer_mode. */
2870 ring->rxds_per_block =
2871 vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
2872
2873 /* calculate actual RxD block private size */
2874 ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
2875 ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc;
2876 ring->mempool = __vxge_hw_mempool_create(hldev,
2877 VXGE_HW_BLOCK_SIZE,
2878 VXGE_HW_BLOCK_SIZE,
2879 ring->rxdblock_priv_size,
2880 ring->config->ring_blocks,
2881 ring->config->ring_blocks,
2882 &ring_mp_callback,
2883 ring);
2884 if (ring->mempool == NULL) {
2885 __vxge_hw_ring_delete(vp);
2886 return VXGE_HW_ERR_OUT_OF_MEMORY;
2887 }
2888
2889 status = __vxge_hw_channel_initialize(&ring->channel);
2890 if (status != VXGE_HW_OK) {
2891 __vxge_hw_ring_delete(vp);
2892 goto exit;
2893 }
2894
2895 /* Note:
2896 * Specifying rxd_init callback means two things:
2897 * 1) rxds need to be initialized by driver at channel-open time;
2898 * 2) rxds need to be posted at channel-open time
2899 * (that's what the initial_replenish() below does)
2900 * Currently we don't have a case when the 1) is done without the 2).
2901 */
2902 if (ring->rxd_init) {
2903 status = vxge_hw_ring_replenish(ring);
2904 if (status != VXGE_HW_OK) {
2905 __vxge_hw_ring_delete(vp);
2906 goto exit;
2907 }
2908 }
2909
2910 /* initial replenish will increment the counter in its post() routine,
2911 * we have to reset it */
2912 ring->stats->common_stats.usage_cnt = 0;
2913exit:
2914 return status;
2915}
2916
2917/*
2918 * vxge_hw_device_config_default_get - Initialize device config with defaults.
2919 * Initialize Titan device config with default values.
2920 */
2921enum vxge_hw_status __devinit
2922vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
2923{
2924 u32 i;
2925
2926 device_config->dma_blockpool_initial =
2927 VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
2928 device_config->dma_blockpool_max = VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
2929 device_config->intr_mode = VXGE_HW_INTR_MODE_DEF;
2930 device_config->rth_en = VXGE_HW_RTH_DEFAULT;
2931 device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_DEFAULT;
2932 device_config->device_poll_millis = VXGE_HW_DEF_DEVICE_POLL_MILLIS;
2933 device_config->rts_mac_en = VXGE_HW_RTS_MAC_DEFAULT;
2934
2935 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
2936 device_config->vp_config[i].vp_id = i;
2937
2938 device_config->vp_config[i].min_bandwidth =
2939 VXGE_HW_VPATH_BANDWIDTH_DEFAULT;
2940
2941 device_config->vp_config[i].ring.enable = VXGE_HW_RING_DEFAULT;
2942
2943 device_config->vp_config[i].ring.ring_blocks =
2944 VXGE_HW_DEF_RING_BLOCKS;
2945
2946 device_config->vp_config[i].ring.buffer_mode =
2947 VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT;
2948
2949 device_config->vp_config[i].ring.scatter_mode =
2950 VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT;
2951
2952 device_config->vp_config[i].ring.rxds_limit =
2953 VXGE_HW_DEF_RING_RXDS_LIMIT;
2954
2955 device_config->vp_config[i].fifo.enable = VXGE_HW_FIFO_ENABLE;
2956
2957 device_config->vp_config[i].fifo.fifo_blocks =
2958 VXGE_HW_MIN_FIFO_BLOCKS;
2959
2960 device_config->vp_config[i].fifo.max_frags =
2961 VXGE_HW_MAX_FIFO_FRAGS;
2962
2963 device_config->vp_config[i].fifo.memblock_size =
2964 VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE;
2965
2966 device_config->vp_config[i].fifo.alignment_size =
2967 VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE;
2968
2969 device_config->vp_config[i].fifo.intr =
2970 VXGE_HW_FIFO_QUEUE_INTR_DEFAULT;
2971
2972 device_config->vp_config[i].fifo.no_snoop_bits =
2973 VXGE_HW_FIFO_NO_SNOOP_DEFAULT;
2974 device_config->vp_config[i].tti.intr_enable =
2975 VXGE_HW_TIM_INTR_DEFAULT;
2976
2977 device_config->vp_config[i].tti.btimer_val =
2978 VXGE_HW_USE_FLASH_DEFAULT;
2979
2980 device_config->vp_config[i].tti.timer_ac_en =
2981 VXGE_HW_USE_FLASH_DEFAULT;
2982
2983 device_config->vp_config[i].tti.timer_ci_en =
2984 VXGE_HW_USE_FLASH_DEFAULT;
2985
2986 device_config->vp_config[i].tti.timer_ri_en =
2987 VXGE_HW_USE_FLASH_DEFAULT;
2988
2989 device_config->vp_config[i].tti.rtimer_val =
2990 VXGE_HW_USE_FLASH_DEFAULT;
2991
2992 device_config->vp_config[i].tti.util_sel =
2993 VXGE_HW_USE_FLASH_DEFAULT;
2994
2995 device_config->vp_config[i].tti.ltimer_val =
2996 VXGE_HW_USE_FLASH_DEFAULT;
2997
2998 device_config->vp_config[i].tti.urange_a =
2999 VXGE_HW_USE_FLASH_DEFAULT;
3000
3001 device_config->vp_config[i].tti.uec_a =
3002 VXGE_HW_USE_FLASH_DEFAULT;
3003
3004 device_config->vp_config[i].tti.urange_b =
3005 VXGE_HW_USE_FLASH_DEFAULT;
3006
3007 device_config->vp_config[i].tti.uec_b =
3008 VXGE_HW_USE_FLASH_DEFAULT;
3009
3010 device_config->vp_config[i].tti.urange_c =
3011 VXGE_HW_USE_FLASH_DEFAULT;
3012
3013 device_config->vp_config[i].tti.uec_c =
3014 VXGE_HW_USE_FLASH_DEFAULT;
3015
3016 device_config->vp_config[i].tti.uec_d =
3017 VXGE_HW_USE_FLASH_DEFAULT;
3018
3019 device_config->vp_config[i].rti.intr_enable =
3020 VXGE_HW_TIM_INTR_DEFAULT;
3021
3022 device_config->vp_config[i].rti.btimer_val =
3023 VXGE_HW_USE_FLASH_DEFAULT;
3024
3025 device_config->vp_config[i].rti.timer_ac_en =
3026 VXGE_HW_USE_FLASH_DEFAULT;
3027
3028 device_config->vp_config[i].rti.timer_ci_en =
3029 VXGE_HW_USE_FLASH_DEFAULT;
3030
3031 device_config->vp_config[i].rti.timer_ri_en =
3032 VXGE_HW_USE_FLASH_DEFAULT;
3033
3034 device_config->vp_config[i].rti.rtimer_val =
3035 VXGE_HW_USE_FLASH_DEFAULT;
3036
3037 device_config->vp_config[i].rti.util_sel =
3038 VXGE_HW_USE_FLASH_DEFAULT;
3039
3040 device_config->vp_config[i].rti.ltimer_val =
3041 VXGE_HW_USE_FLASH_DEFAULT;
3042
3043 device_config->vp_config[i].rti.urange_a =
3044 VXGE_HW_USE_FLASH_DEFAULT;
3045
3046 device_config->vp_config[i].rti.uec_a =
3047 VXGE_HW_USE_FLASH_DEFAULT;
3048
3049 device_config->vp_config[i].rti.urange_b =
3050 VXGE_HW_USE_FLASH_DEFAULT;
3051
3052 device_config->vp_config[i].rti.uec_b =
3053 VXGE_HW_USE_FLASH_DEFAULT;
3054
3055 device_config->vp_config[i].rti.urange_c =
3056 VXGE_HW_USE_FLASH_DEFAULT;
3057
3058 device_config->vp_config[i].rti.uec_c =
3059 VXGE_HW_USE_FLASH_DEFAULT;
3060
3061 device_config->vp_config[i].rti.uec_d =
3062 VXGE_HW_USE_FLASH_DEFAULT;
3063
3064 device_config->vp_config[i].mtu =
3065 VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU;
3066
3067 device_config->vp_config[i].rpa_strip_vlan_tag =
3068 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT;
3069 }
3070
3071 return VXGE_HW_OK;
3072}
3073
3074/*
3075 * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
3076 * Set the swapper bits appropriately for the vpath.
3077 */
3078static enum vxge_hw_status
3079__vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
3080{
3081#ifndef __BIG_ENDIAN
3082 u64 val64;
3083
3084 val64 = readq(&vpath_reg->vpath_general_cfg1);
3085 wmb();
3086 val64 |= VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN;
3087 writeq(val64, &vpath_reg->vpath_general_cfg1);
3088 wmb();
3089#endif
3090 return VXGE_HW_OK;
3091}
3092
3093/*
3094 * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc.
3095 * Set the swapper bits appropriately for the vpath.
3096 */
3097static enum vxge_hw_status
3098__vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg,
3099 struct vxge_hw_vpath_reg __iomem *vpath_reg)
3100{
3101 u64 val64;
3102
3103 val64 = readq(&legacy_reg->pifm_wr_swap_en);
3104
3105 if (val64 == VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE) {
3106 val64 = readq(&vpath_reg->kdfcctl_cfg0);
3107 wmb();
3108
3109 val64 |= VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0 |
3110 VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1 |
3111 VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2;
3112
3113 writeq(val64, &vpath_reg->kdfcctl_cfg0);
3114 wmb();
3115 }
3116
3117 return VXGE_HW_OK;
3118}
3119
3120/*
3121 * vxge_hw_mgmt_reg_read - Read Titan register.
3122 */
3123enum vxge_hw_status
3124vxge_hw_mgmt_reg_read(struct __vxge_hw_device *hldev,
3125 enum vxge_hw_mgmt_reg_type type,
3126 u32 index, u32 offset, u64 *value)
3127{
3128 enum vxge_hw_status status = VXGE_HW_OK;
3129
3130 if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
3131 status = VXGE_HW_ERR_INVALID_DEVICE;
3132 goto exit;
3133 }
3134
3135 switch (type) {
3136 case vxge_hw_mgmt_reg_type_legacy:
3137 if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
3138 status = VXGE_HW_ERR_INVALID_OFFSET;
3139 break;
3140 }
3141 *value = readq((void __iomem *)hldev->legacy_reg + offset);
3142 break;
3143 case vxge_hw_mgmt_reg_type_toc:
3144 if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
3145 status = VXGE_HW_ERR_INVALID_OFFSET;
3146 break;
3147 }
3148 *value = readq((void __iomem *)hldev->toc_reg + offset);
3149 break;
3150 case vxge_hw_mgmt_reg_type_common:
3151 if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
3152 status = VXGE_HW_ERR_INVALID_OFFSET;
3153 break;
3154 }
3155 *value = readq((void __iomem *)hldev->common_reg + offset);
3156 break;
3157 case vxge_hw_mgmt_reg_type_mrpcim:
3158 if (!(hldev->access_rights &
3159 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
3160 status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
3161 break;
3162 }
3163 if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
3164 status = VXGE_HW_ERR_INVALID_OFFSET;
3165 break;
3166 }
3167 *value = readq((void __iomem *)hldev->mrpcim_reg + offset);
3168 break;
3169 case vxge_hw_mgmt_reg_type_srpcim:
3170 if (!(hldev->access_rights &
3171 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
3172 status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
3173 break;
3174 }
3175 if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
3176 status = VXGE_HW_ERR_INVALID_INDEX;
3177 break;
3178 }
3179 if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
3180 status = VXGE_HW_ERR_INVALID_OFFSET;
3181 break;
3182 }
3183 *value = readq((void __iomem *)hldev->srpcim_reg[index] +
3184 offset);
3185 break;
3186 case vxge_hw_mgmt_reg_type_vpmgmt:
3187 if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
3188 (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
3189 status = VXGE_HW_ERR_INVALID_INDEX;
3190 break;
3191 }
3192 if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
3193 status = VXGE_HW_ERR_INVALID_OFFSET;
3194 break;
3195 }
3196 *value = readq((void __iomem *)hldev->vpmgmt_reg[index] +
3197 offset);
3198 break;
3199 case vxge_hw_mgmt_reg_type_vpath:
3200 if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) ||
3201 (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
3202 status = VXGE_HW_ERR_INVALID_INDEX;
3203 break;
3204 }
3205 if (index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) {
3206 status = VXGE_HW_ERR_INVALID_INDEX;
3207 break;
3208 }
3209 if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
3210 status = VXGE_HW_ERR_INVALID_OFFSET;
3211 break;
3212 }
3213 *value = readq((void __iomem *)hldev->vpath_reg[index] +
3214 offset);
3215 break;
3216 default:
3217 status = VXGE_HW_ERR_INVALID_TYPE;
3218 break;
3219 }
3220
3221exit:
3222 return status;
3223}
3224
3225/*
3226 * vxge_hw_vpath_strip_fcs_check - Check for FCS strip.
3227 */
3228enum vxge_hw_status
3229vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask)
3230{
3231 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
3232 enum vxge_hw_status status = VXGE_HW_OK;
3233 int i = 0, j = 0;
3234
3235 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3236 if (!((vpath_mask) & vxge_mBIT(i)))
3237 continue;
3238 vpmgmt_reg = hldev->vpmgmt_reg[i];
3239 for (j = 0; j < VXGE_HW_MAC_MAX_MAC_PORT_ID; j++) {
3240 if (readq(&vpmgmt_reg->rxmac_cfg0_port_vpmgmt_clone[j])
3241 & VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS)
3242 return VXGE_HW_FAIL;
3243 }
3244 }
3245 return status;
3246}
3247/*
3248 * vxge_hw_mgmt_reg_Write - Write Titan register.
3249 */
3250enum vxge_hw_status
3251vxge_hw_mgmt_reg_write(struct __vxge_hw_device *hldev,
3252 enum vxge_hw_mgmt_reg_type type,
3253 u32 index, u32 offset, u64 value)
3254{
3255 enum vxge_hw_status status = VXGE_HW_OK;
3256
3257 if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
3258 status = VXGE_HW_ERR_INVALID_DEVICE;
3259 goto exit;
3260 }
3261
3262 switch (type) {
3263 case vxge_hw_mgmt_reg_type_legacy:
3264 if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
3265 status = VXGE_HW_ERR_INVALID_OFFSET;
3266 break;
3267 }
3268 writeq(value, (void __iomem *)hldev->legacy_reg + offset);
3269 break;
3270 case vxge_hw_mgmt_reg_type_toc:
3271 if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
3272 status = VXGE_HW_ERR_INVALID_OFFSET;
3273 break;
3274 }
3275 writeq(value, (void __iomem *)hldev->toc_reg + offset);
3276 break;
3277 case vxge_hw_mgmt_reg_type_common:
3278 if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
3279 status = VXGE_HW_ERR_INVALID_OFFSET;
3280 break;
3281 }
3282 writeq(value, (void __iomem *)hldev->common_reg + offset);
3283 break;
3284 case vxge_hw_mgmt_reg_type_mrpcim:
3285 if (!(hldev->access_rights &
3286 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
3287 status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
3288 break;
3289 }
3290 if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
3291 status = VXGE_HW_ERR_INVALID_OFFSET;
3292 break;
3293 }
3294 writeq(value, (void __iomem *)hldev->mrpcim_reg + offset);
3295 break;
3296 case vxge_hw_mgmt_reg_type_srpcim:
3297 if (!(hldev->access_rights &
3298 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
3299 status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
3300 break;
3301 }
3302 if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
3303 status = VXGE_HW_ERR_INVALID_INDEX;
3304 break;
3305 }
3306 if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
3307 status = VXGE_HW_ERR_INVALID_OFFSET;
3308 break;
3309 }
3310 writeq(value, (void __iomem *)hldev->srpcim_reg[index] +
3311 offset);
3312
3313 break;
3314 case vxge_hw_mgmt_reg_type_vpmgmt:
3315 if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
3316 (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
3317 status = VXGE_HW_ERR_INVALID_INDEX;
3318 break;
3319 }
3320 if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
3321 status = VXGE_HW_ERR_INVALID_OFFSET;
3322 break;
3323 }
3324 writeq(value, (void __iomem *)hldev->vpmgmt_reg[index] +
3325 offset);
3326 break;
3327 case vxge_hw_mgmt_reg_type_vpath:
3328 if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES-1) ||
3329 (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
3330 status = VXGE_HW_ERR_INVALID_INDEX;
3331 break;
3332 }
3333 if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
3334 status = VXGE_HW_ERR_INVALID_OFFSET;
3335 break;
3336 }
3337 writeq(value, (void __iomem *)hldev->vpath_reg[index] +
3338 offset);
3339 break;
3340 default:
3341 status = VXGE_HW_ERR_INVALID_TYPE;
3342 break;
3343 }
3344exit:
3345 return status;
3346}
3347
3348/*
3349 * __vxge_hw_fifo_abort - Returns the TxD
3350 * This function terminates the TxDs of fifo
3351 */
3352static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
3353{
3354 void *txdlh;
3355
3356 for (;;) {
3357 vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh);
3358
3359 if (txdlh == NULL)
3360 break;
3361
3362 vxge_hw_channel_dtr_complete(&fifo->channel);
3363
3364 if (fifo->txdl_term) {
3365 fifo->txdl_term(txdlh,
3366 VXGE_HW_TXDL_STATE_POSTED,
3367 fifo->channel.userdata);
3368 }
3369
3370 vxge_hw_channel_dtr_free(&fifo->channel, txdlh);
3371 }
3372
3373 return VXGE_HW_OK;
3374}
3375
3376/*
3377 * __vxge_hw_fifo_reset - Resets the fifo
3378 * This function resets the fifo during vpath reset operation
3379 */
3380static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
3381{
3382 enum vxge_hw_status status = VXGE_HW_OK;
3383
3384 __vxge_hw_fifo_abort(fifo);
3385 status = __vxge_hw_channel_reset(&fifo->channel);
3386
3387 return status;
3388}
3389
3390/*
3391 * __vxge_hw_fifo_delete - Removes the FIFO
3392 * This function freeup the memory pool and removes the FIFO
3393 */
3394static enum vxge_hw_status
3395__vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
3396{
3397 struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
3398
3399 __vxge_hw_fifo_abort(fifo);
3400
3401 if (fifo->mempool)
3402 __vxge_hw_mempool_destroy(fifo->mempool);
3403
3404 vp->vpath->fifoh = NULL;
3405
3406 __vxge_hw_channel_free(&fifo->channel);
3407
3408 return VXGE_HW_OK;
3409}
3410
3411/*
3412 * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD
3413 * list callback
3414 * This function is callback passed to __vxge_hw_mempool_create to create memory
3415 * pool for TxD list
3416 */
3417static void
3418__vxge_hw_fifo_mempool_item_alloc(
3419 struct vxge_hw_mempool *mempoolh,
3420 u32 memblock_index, struct vxge_hw_mempool_dma *dma_object,
3421 u32 index, u32 is_last)
3422{
3423 u32 memblock_item_idx;
3424 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
3425 struct vxge_hw_fifo_txd *txdp =
3426 (struct vxge_hw_fifo_txd *)mempoolh->items_arr[index];
3427 struct __vxge_hw_fifo *fifo =
3428 (struct __vxge_hw_fifo *)mempoolh->userdata;
3429 void *memblock = mempoolh->memblocks_arr[memblock_index];
3430
3431 vxge_assert(txdp);
3432
3433 txdp->host_control = (u64) (size_t)
3434 __vxge_hw_mempool_item_priv(mempoolh, memblock_index, txdp,
3435 &memblock_item_idx);
3436
3437 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
3438
3439 vxge_assert(txdl_priv);
3440
3441 fifo->channel.reserve_arr[fifo->channel.reserve_ptr - 1 - index] = txdp;
3442
3443 /* pre-format HW's TxDL's private */
3444 txdl_priv->dma_offset = (char *)txdp - (char *)memblock;
3445 txdl_priv->dma_addr = dma_object->addr + txdl_priv->dma_offset;
3446 txdl_priv->dma_handle = dma_object->handle;
3447 txdl_priv->memblock = memblock;
3448 txdl_priv->first_txdp = txdp;
3449 txdl_priv->next_txdl_priv = NULL;
3450 txdl_priv->alloc_frags = 0;
3451}
3452
3453/*
3454 * __vxge_hw_fifo_create - Create a FIFO
3455 * This function creates FIFO and initializes it.
3456 */
3457static enum vxge_hw_status
3458__vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
3459 struct vxge_hw_fifo_attr *attr)
3460{
3461 enum vxge_hw_status status = VXGE_HW_OK;
3462 struct __vxge_hw_fifo *fifo;
3463 struct vxge_hw_fifo_config *config;
3464 u32 txdl_size, txdl_per_memblock;
3465 struct vxge_hw_mempool_cbs fifo_mp_callback;
3466 struct __vxge_hw_virtualpath *vpath;
3467
3468 if ((vp == NULL) || (attr == NULL)) {
3469 status = VXGE_HW_ERR_INVALID_HANDLE;
3470 goto exit;
3471 }
3472 vpath = vp->vpath;
3473 config = &vpath->hldev->config.vp_config[vpath->vp_id].fifo;
3474
3475 txdl_size = config->max_frags * sizeof(struct vxge_hw_fifo_txd);
3476
3477 txdl_per_memblock = config->memblock_size / txdl_size;
3478
3479 fifo = (struct __vxge_hw_fifo *)__vxge_hw_channel_allocate(vp,
3480 VXGE_HW_CHANNEL_TYPE_FIFO,
3481 config->fifo_blocks * txdl_per_memblock,
3482 attr->per_txdl_space, attr->userdata);
3483
3484 if (fifo == NULL) {
3485 status = VXGE_HW_ERR_OUT_OF_MEMORY;
3486 goto exit;
3487 }
3488
3489 vpath->fifoh = fifo;
3490 fifo->nofl_db = vpath->nofl_db;
3491
3492 fifo->vp_id = vpath->vp_id;
3493 fifo->vp_reg = vpath->vp_reg;
3494 fifo->stats = &vpath->sw_stats->fifo_stats;
3495
3496 fifo->config = config;
3497
3498 /* apply "interrupts per txdl" attribute */
3499 fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ;
3500 fifo->tim_tti_cfg1_saved = vpath->tim_tti_cfg1_saved;
3501 fifo->tim_tti_cfg3_saved = vpath->tim_tti_cfg3_saved;
3502
3503 if (fifo->config->intr)
3504 fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;
3505
3506 fifo->no_snoop_bits = config->no_snoop_bits;
3507
3508 /*
3509 * FIFO memory management strategy:
3510 *
3511 * TxDL split into three independent parts:
3512 * - set of TxD's
3513 * - TxD HW private part
3514 * - driver private part
3515 *
3516 * Adaptative memory allocation used. i.e. Memory allocated on
3517 * demand with the size which will fit into one memory block.
3518 * One memory block may contain more than one TxDL.
3519 *
3520 * During "reserve" operations more memory can be allocated on demand
3521 * for example due to FIFO full condition.
3522 *
3523 * Pool of memory memblocks never shrinks except in __vxge_hw_fifo_close
3524 * routine which will essentially stop the channel and free resources.
3525 */
3526
3527 /* TxDL common private size == TxDL private + driver private */
3528 fifo->priv_size =
3529 sizeof(struct __vxge_hw_fifo_txdl_priv) + attr->per_txdl_space;
3530 fifo->priv_size = ((fifo->priv_size + VXGE_CACHE_LINE_SIZE - 1) /
3531 VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
3532
3533 fifo->per_txdl_space = attr->per_txdl_space;
3534
3535 /* recompute txdl size to be cacheline aligned */
3536 fifo->txdl_size = txdl_size;
3537 fifo->txdl_per_memblock = txdl_per_memblock;
3538
3539 fifo->txdl_term = attr->txdl_term;
3540 fifo->callback = attr->callback;
3541
3542 if (fifo->txdl_per_memblock == 0) {
3543 __vxge_hw_fifo_delete(vp);
3544 status = VXGE_HW_ERR_INVALID_BLOCK_SIZE;
3545 goto exit;
3546 }
3547
3548 fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
3549
3550 fifo->mempool =
3551 __vxge_hw_mempool_create(vpath->hldev,
3552 fifo->config->memblock_size,
3553 fifo->txdl_size,
3554 fifo->priv_size,
3555 (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
3556 (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
3557 &fifo_mp_callback,
3558 fifo);
3559
3560 if (fifo->mempool == NULL) {
3561 __vxge_hw_fifo_delete(vp);
3562 status = VXGE_HW_ERR_OUT_OF_MEMORY;
3563 goto exit;
3564 }
3565
3566 status = __vxge_hw_channel_initialize(&fifo->channel);
3567 if (status != VXGE_HW_OK) {
3568 __vxge_hw_fifo_delete(vp);
3569 goto exit;
3570 }
3571
3572 vxge_assert(fifo->channel.reserve_ptr);
3573exit:
3574 return status;
3575}
3576
3577/*
3578 * __vxge_hw_vpath_pci_read - Read the content of given address
3579 * in pci config space.
3580 * Read from the vpath pci config space.
3581 */
3582static enum vxge_hw_status
3583__vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
3584 u32 phy_func_0, u32 offset, u32 *val)
3585{
3586 u64 val64;
3587 enum vxge_hw_status status = VXGE_HW_OK;
3588 struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
3589
3590 val64 = VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset);
3591
3592 if (phy_func_0)
3593 val64 |= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0;
3594
3595 writeq(val64, &vp_reg->pci_config_access_cfg1);
3596 wmb();
3597 writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ,
3598 &vp_reg->pci_config_access_cfg2);
3599 wmb();
3600
3601 status = __vxge_hw_device_register_poll(
3602 &vp_reg->pci_config_access_cfg2,
3603 VXGE_HW_INTR_MASK_ALL, VXGE_HW_DEF_DEVICE_POLL_MILLIS);
3604
3605 if (status != VXGE_HW_OK)
3606 goto exit;
3607
3608 val64 = readq(&vp_reg->pci_config_access_status);
3609
3610 if (val64 & VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR) {
3611 status = VXGE_HW_FAIL;
3612 *val = 0;
3613 } else
3614 *val = (u32)vxge_bVALn(val64, 32, 32);
3615exit:
3616 return status;
3617}
3618
3619/**
3620 * vxge_hw_device_flick_link_led - Flick (blink) link LED.
3621 * @hldev: HW device.
3622 * @on_off: TRUE if flickering to be on, FALSE to be off
3623 *
3624 * Flicker the link LED.
3625 */
3626enum vxge_hw_status
3627vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev, u64 on_off)
3628{
3629 struct __vxge_hw_virtualpath *vpath;
3630 u64 data0, data1 = 0, steer_ctrl = 0;
3631 enum vxge_hw_status status;
3632
3633 if (hldev == NULL) {
3634 status = VXGE_HW_ERR_INVALID_DEVICE;
3635 goto exit;
3636 }
3637
3638 vpath = &hldev->virtual_paths[hldev->first_vp_id];
3639
3640 data0 = on_off;
3641 status = vxge_hw_vpath_fw_api(vpath,
3642 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL,
3643 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
3644 0, &data0, &data1, &steer_ctrl);
3645exit:
3646 return status;
3647}
3648
3649/*
3650 * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables
3651 */
3652enum vxge_hw_status
3653__vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle *vp,
3654 u32 action, u32 rts_table, u32 offset,
3655 u64 *data0, u64 *data1)
3656{
3657 enum vxge_hw_status status;
3658 u64 steer_ctrl = 0;
3659
3660 if (vp == NULL) {
3661 status = VXGE_HW_ERR_INVALID_HANDLE;
3662 goto exit;
3663 }
3664
3665 if ((rts_table ==
3666 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) ||
3667 (rts_table ==
3668 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) ||
3669 (rts_table ==
3670 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) ||
3671 (rts_table ==
3672 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) {
3673 steer_ctrl = VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL;
3674 }
3675
3676 status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
3677 data0, data1, &steer_ctrl);
3678 if (status != VXGE_HW_OK)
3679 goto exit;
3680
3681 if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) &&
3682 (rts_table !=
3683 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
3684 *data1 = 0;
3685exit:
3686 return status;
3687}
3688
3689/*
3690 * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables
3691 */
3692enum vxge_hw_status
3693__vxge_hw_vpath_rts_table_set(struct __vxge_hw_vpath_handle *vp, u32 action,
3694 u32 rts_table, u32 offset, u64 steer_data0,
3695 u64 steer_data1)
3696{
3697 u64 data0, data1 = 0, steer_ctrl = 0;
3698 enum vxge_hw_status status;
3699
3700 if (vp == NULL) {
3701 status = VXGE_HW_ERR_INVALID_HANDLE;
3702 goto exit;
3703 }
3704
3705 data0 = steer_data0;
3706
3707 if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
3708 (rts_table ==
3709 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
3710 data1 = steer_data1;
3711
3712 status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
3713 &data0, &data1, &steer_ctrl);
3714exit:
3715 return status;
3716}
3717
3718/*
3719 * vxge_hw_vpath_rts_rth_set - Set/configure RTS hashing.
3720 */
3721enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
3722 struct __vxge_hw_vpath_handle *vp,
3723 enum vxge_hw_rth_algoritms algorithm,
3724 struct vxge_hw_rth_hash_types *hash_type,
3725 u16 bucket_size)
3726{
3727 u64 data0, data1;
3728 enum vxge_hw_status status = VXGE_HW_OK;
3729
3730 if (vp == NULL) {
3731 status = VXGE_HW_ERR_INVALID_HANDLE;
3732 goto exit;
3733 }
3734
3735 status = __vxge_hw_vpath_rts_table_get(vp,
3736 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
3737 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
3738 0, &data0, &data1);
3739 if (status != VXGE_HW_OK)
3740 goto exit;
3741
3742 data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) |
3743 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3));
3744
3745 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN |
3746 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(bucket_size) |
3747 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(algorithm);
3748
3749 if (hash_type->hash_type_tcpipv4_en)
3750 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN;
3751
3752 if (hash_type->hash_type_ipv4_en)
3753 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN;
3754
3755 if (hash_type->hash_type_tcpipv6_en)
3756 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN;
3757
3758 if (hash_type->hash_type_ipv6_en)
3759 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN;
3760
3761 if (hash_type->hash_type_tcpipv6ex_en)
3762 data0 |=
3763 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN;
3764
3765 if (hash_type->hash_type_ipv6ex_en)
3766 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN;
3767
3768 if (VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(data0))
3769 data0 &= ~VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
3770 else
3771 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
3772
3773 status = __vxge_hw_vpath_rts_table_set(vp,
3774 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY,
3775 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
3776 0, data0, 0);
3777exit:
3778 return status;
3779}
3780
3781static void
3782vxge_hw_rts_rth_data0_data1_get(u32 j, u64 *data0, u64 *data1,
3783 u16 flag, u8 *itable)
3784{
3785 switch (flag) {
3786 case 1:
3787 *data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(j)|
3788 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN |
3789 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(
3790 itable[j]);
3791 case 2:
3792 *data0 |=
3793 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j)|
3794 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN |
3795 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(
3796 itable[j]);
3797 case 3:
3798 *data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j)|
3799 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN |
3800 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(
3801 itable[j]);
3802 case 4:
3803 *data1 |=
3804 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j)|
3805 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN |
3806 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA(
3807 itable[j]);
3808 default:
3809 return;
3810 }
3811}
3812/*
3813 * vxge_hw_vpath_rts_rth_itable_set - Set/configure indirection table (IT).
3814 */
3815enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set(
3816 struct __vxge_hw_vpath_handle **vpath_handles,
3817 u32 vpath_count,
3818 u8 *mtable,
3819 u8 *itable,
3820 u32 itable_size)
3821{
3822 u32 i, j, action, rts_table;
3823 u64 data0;
3824 u64 data1;
3825 u32 max_entries;
3826 enum vxge_hw_status status = VXGE_HW_OK;
3827 struct __vxge_hw_vpath_handle *vp = vpath_handles[0];
3828
3829 if (vp == NULL) {
3830 status = VXGE_HW_ERR_INVALID_HANDLE;
3831 goto exit;
3832 }
3833
3834 max_entries = (((u32)1) << itable_size);
3835
3836 if (vp->vpath->hldev->config.rth_it_type
3837 == VXGE_HW_RTH_IT_TYPE_SOLO_IT) {
3838 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
3839 rts_table =
3840 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT;
3841
3842 for (j = 0; j < max_entries; j++) {
3843
3844 data1 = 0;
3845
3846 data0 =
3847 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
3848 itable[j]);
3849
3850 status = __vxge_hw_vpath_rts_table_set(vpath_handles[0],
3851 action, rts_table, j, data0, data1);
3852
3853 if (status != VXGE_HW_OK)
3854 goto exit;
3855 }
3856
3857 for (j = 0; j < max_entries; j++) {
3858
3859 data1 = 0;
3860
3861 data0 =
3862 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN |
3863 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
3864 itable[j]);
3865
3866 status = __vxge_hw_vpath_rts_table_set(
3867 vpath_handles[mtable[itable[j]]], action,
3868 rts_table, j, data0, data1);
3869
3870 if (status != VXGE_HW_OK)
3871 goto exit;
3872 }
3873 } else {
3874 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
3875 rts_table =
3876 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT;
3877 for (i = 0; i < vpath_count; i++) {
3878
3879 for (j = 0; j < max_entries;) {
3880
3881 data0 = 0;
3882 data1 = 0;
3883
3884 while (j < max_entries) {
3885 if (mtable[itable[j]] != i) {
3886 j++;
3887 continue;
3888 }
3889 vxge_hw_rts_rth_data0_data1_get(j,
3890 &data0, &data1, 1, itable);
3891 j++;
3892 break;
3893 }
3894
3895 while (j < max_entries) {
3896 if (mtable[itable[j]] != i) {
3897 j++;
3898 continue;
3899 }
3900 vxge_hw_rts_rth_data0_data1_get(j,
3901 &data0, &data1, 2, itable);
3902 j++;
3903 break;
3904 }
3905
3906 while (j < max_entries) {
3907 if (mtable[itable[j]] != i) {
3908 j++;
3909 continue;
3910 }
3911 vxge_hw_rts_rth_data0_data1_get(j,
3912 &data0, &data1, 3, itable);
3913 j++;
3914 break;
3915 }
3916
3917 while (j < max_entries) {
3918 if (mtable[itable[j]] != i) {
3919 j++;
3920 continue;
3921 }
3922 vxge_hw_rts_rth_data0_data1_get(j,
3923 &data0, &data1, 4, itable);
3924 j++;
3925 break;
3926 }
3927
3928 if (data0 != 0) {
3929 status = __vxge_hw_vpath_rts_table_set(
3930 vpath_handles[i],
3931 action, rts_table,
3932 0, data0, data1);
3933
3934 if (status != VXGE_HW_OK)
3935 goto exit;
3936 }
3937 }
3938 }
3939 }
3940exit:
3941 return status;
3942}
3943
3944/**
3945 * vxge_hw_vpath_check_leak - Check for memory leak
3946 * @ringh: Handle to the ring object used for receive
3947 *
3948 * If PRC_RXD_DOORBELL_VPn.NEW_QW_CNT is larger or equal to
3949 * PRC_CFG6_VPn.RXD_SPAT then a leak has occurred.
3950 * Returns: VXGE_HW_FAIL, if leak has occurred.
3951 *
3952 */
3953enum vxge_hw_status
3954vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ring)
3955{
3956 enum vxge_hw_status status = VXGE_HW_OK;
3957 u64 rxd_new_count, rxd_spat;
3958
3959 if (ring == NULL)
3960 return status;
3961
3962 rxd_new_count = readl(&ring->vp_reg->prc_rxd_doorbell);
3963 rxd_spat = readq(&ring->vp_reg->prc_cfg6);
3964 rxd_spat = VXGE_HW_PRC_CFG6_RXD_SPAT(rxd_spat);
3965
3966 if (rxd_new_count >= rxd_spat)
3967 status = VXGE_HW_FAIL;
3968
3969 return status;
3970}
3971
3972/*
3973 * __vxge_hw_vpath_mgmt_read
3974 * This routine reads the vpath_mgmt registers
3975 */
3976static enum vxge_hw_status
3977__vxge_hw_vpath_mgmt_read(
3978 struct __vxge_hw_device *hldev,
3979 struct __vxge_hw_virtualpath *vpath)
3980{
3981 u32 i, mtu = 0, max_pyld = 0;
3982 u64 val64;
3983 enum vxge_hw_status status = VXGE_HW_OK;
3984
3985 for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
3986
3987 val64 = readq(&vpath->vpmgmt_reg->
3988 rxmac_cfg0_port_vpmgmt_clone[i]);
3989 max_pyld =
3990 (u32)
3991 VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN
3992 (val64);
3993 if (mtu < max_pyld)
3994 mtu = max_pyld;
3995 }
3996
3997 vpath->max_mtu = mtu + VXGE_HW_MAC_HEADER_MAX_SIZE;
3998
3999 val64 = readq(&vpath->vpmgmt_reg->xmac_vsport_choices_vp);
4000
4001 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
4002 if (val64 & vxge_mBIT(i))
4003 vpath->vsport_number = i;
4004 }
4005
4006 val64 = readq(&vpath->vpmgmt_reg->xgmac_gen_status_vpmgmt_clone);
4007
4008 if (val64 & VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK)
4009 VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_UP);
4010 else
4011 VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN);
4012
4013 return status;
4014}
4015
4016/*
4017 * __vxge_hw_vpath_reset_check - Check if resetting the vpath completed
4018 * This routine checks the vpath_rst_in_prog register to see if
4019 * adapter completed the reset process for the vpath
4020 */
4021static enum vxge_hw_status
4022__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
4023{
4024 enum vxge_hw_status status;
4025
4026 status = __vxge_hw_device_register_poll(
4027 &vpath->hldev->common_reg->vpath_rst_in_prog,
4028 VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(
4029 1 << (16 - vpath->vp_id)),
4030 vpath->hldev->config.device_poll_millis);
4031
4032 return status;
4033}
4034
4035/*
4036 * __vxge_hw_vpath_reset
4037 * This routine resets the vpath on the device
4038 */
4039static enum vxge_hw_status
4040__vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
4041{
4042 u64 val64;
4043 enum vxge_hw_status status = VXGE_HW_OK;
4044
4045 val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id));
4046
4047 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
4048 &hldev->common_reg->cmn_rsthdlr_cfg0);
4049
4050 return status;
4051}
4052
4053/*
4054 * __vxge_hw_vpath_sw_reset
4055 * This routine resets the vpath structures
4056 */
4057static enum vxge_hw_status
4058__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id)
4059{
4060 enum vxge_hw_status status = VXGE_HW_OK;
4061 struct __vxge_hw_virtualpath *vpath;
4062
4063 vpath = (struct __vxge_hw_virtualpath *)&hldev->virtual_paths[vp_id];
4064
4065 if (vpath->ringh) {
4066 status = __vxge_hw_ring_reset(vpath->ringh);
4067 if (status != VXGE_HW_OK)
4068 goto exit;
4069 }
4070
4071 if (vpath->fifoh)
4072 status = __vxge_hw_fifo_reset(vpath->fifoh);
4073exit:
4074 return status;
4075}
4076
4077/*
4078 * __vxge_hw_vpath_prc_configure
4079 * This routine configures the prc registers of virtual path using the config
4080 * passed
4081 */
4082static void
4083__vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4084{
4085 u64 val64;
4086 struct __vxge_hw_virtualpath *vpath;
4087 struct vxge_hw_vp_config *vp_config;
4088 struct vxge_hw_vpath_reg __iomem *vp_reg;
4089
4090 vpath = &hldev->virtual_paths[vp_id];
4091 vp_reg = vpath->vp_reg;
4092 vp_config = vpath->vp_config;
4093
4094 if (vp_config->ring.enable == VXGE_HW_RING_DISABLE)
4095 return;
4096
4097 val64 = readq(&vp_reg->prc_cfg1);
4098 val64 |= VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE;
4099 writeq(val64, &vp_reg->prc_cfg1);
4100
4101 val64 = readq(&vpath->vp_reg->prc_cfg6);
4102 val64 |= VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN;
4103 writeq(val64, &vpath->vp_reg->prc_cfg6);
4104
4105 val64 = readq(&vp_reg->prc_cfg7);
4106
4107 if (vpath->vp_config->ring.scatter_mode !=
4108 VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT) {
4109
4110 val64 &= ~VXGE_HW_PRC_CFG7_SCATTER_MODE(0x3);
4111
4112 switch (vpath->vp_config->ring.scatter_mode) {
4113 case VXGE_HW_RING_SCATTER_MODE_A:
4114 val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
4115 VXGE_HW_PRC_CFG7_SCATTER_MODE_A);
4116 break;
4117 case VXGE_HW_RING_SCATTER_MODE_B:
4118 val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
4119 VXGE_HW_PRC_CFG7_SCATTER_MODE_B);
4120 break;
4121 case VXGE_HW_RING_SCATTER_MODE_C:
4122 val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
4123 VXGE_HW_PRC_CFG7_SCATTER_MODE_C);
4124 break;
4125 }
4126 }
4127
4128 writeq(val64, &vp_reg->prc_cfg7);
4129
4130 writeq(VXGE_HW_PRC_CFG5_RXD0_ADD(
4131 __vxge_hw_ring_first_block_address_get(
4132 vpath->ringh) >> 3), &vp_reg->prc_cfg5);
4133
4134 val64 = readq(&vp_reg->prc_cfg4);
4135 val64 |= VXGE_HW_PRC_CFG4_IN_SVC;
4136 val64 &= ~VXGE_HW_PRC_CFG4_RING_MODE(0x3);
4137
4138 val64 |= VXGE_HW_PRC_CFG4_RING_MODE(
4139 VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER);
4140
4141 if (hldev->config.rth_en == VXGE_HW_RTH_DISABLE)
4142 val64 |= VXGE_HW_PRC_CFG4_RTH_DISABLE;
4143 else
4144 val64 &= ~VXGE_HW_PRC_CFG4_RTH_DISABLE;
4145
4146 writeq(val64, &vp_reg->prc_cfg4);
4147}
4148
4149/*
4150 * __vxge_hw_vpath_kdfc_configure
4151 * This routine configures the kdfc registers of virtual path using the
4152 * config passed
4153 */
4154static enum vxge_hw_status
4155__vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4156{
4157 u64 val64;
4158 u64 vpath_stride;
4159 enum vxge_hw_status status = VXGE_HW_OK;
4160 struct __vxge_hw_virtualpath *vpath;
4161 struct vxge_hw_vpath_reg __iomem *vp_reg;
4162
4163 vpath = &hldev->virtual_paths[vp_id];
4164 vp_reg = vpath->vp_reg;
4165 status = __vxge_hw_kdfc_swapper_set(hldev->legacy_reg, vp_reg);
4166
4167 if (status != VXGE_HW_OK)
4168 goto exit;
4169
4170 val64 = readq(&vp_reg->kdfc_drbl_triplet_total);
4171
4172 vpath->max_kdfc_db =
4173 (u32)VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(
4174 val64+1)/2;
4175
4176 if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
4177
4178 vpath->max_nofl_db = vpath->max_kdfc_db;
4179
4180 if (vpath->max_nofl_db <
4181 ((vpath->vp_config->fifo.memblock_size /
4182 (vpath->vp_config->fifo.max_frags *
4183 sizeof(struct vxge_hw_fifo_txd))) *
4184 vpath->vp_config->fifo.fifo_blocks)) {
4185
4186 return VXGE_HW_BADCFG_FIFO_BLOCKS;
4187 }
4188 val64 = VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(
4189 (vpath->max_nofl_db*2)-1);
4190 }
4191
4192 writeq(val64, &vp_reg->kdfc_fifo_trpl_partition);
4193
4194 writeq(VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE,
4195 &vp_reg->kdfc_fifo_trpl_ctrl);
4196
4197 val64 = readq(&vp_reg->kdfc_trpl_fifo_0_ctrl);
4198
4199 val64 &= ~(VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(0x3) |
4200 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0xFF));
4201
4202 val64 |= VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(
4203 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY) |
4204#ifndef __BIG_ENDIAN
4205 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN |
4206#endif
4207 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0);
4208
4209 writeq(val64, &vp_reg->kdfc_trpl_fifo_0_ctrl);
4210 writeq((u64)0, &vp_reg->kdfc_trpl_fifo_0_wb_address);
4211 wmb();
4212 vpath_stride = readq(&hldev->toc_reg->toc_kdfc_vpath_stride);
4213
4214 vpath->nofl_db =
4215 (struct __vxge_hw_non_offload_db_wrapper __iomem *)
4216 (hldev->kdfc + (vp_id *
4217 VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(
4218 vpath_stride)));
4219exit:
4220 return status;
4221}
4222
4223/*
4224 * __vxge_hw_vpath_mac_configure
4225 * This routine configures the mac of virtual path using the config passed
4226 */
4227static enum vxge_hw_status
4228__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4229{
4230 u64 val64;
4231 enum vxge_hw_status status = VXGE_HW_OK;
4232 struct __vxge_hw_virtualpath *vpath;
4233 struct vxge_hw_vp_config *vp_config;
4234 struct vxge_hw_vpath_reg __iomem *vp_reg;
4235
4236 vpath = &hldev->virtual_paths[vp_id];
4237 vp_reg = vpath->vp_reg;
4238 vp_config = vpath->vp_config;
4239
4240 writeq(VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(
4241 vpath->vsport_number), &vp_reg->xmac_vsport_choice);
4242
4243 if (vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
4244
4245 val64 = readq(&vp_reg->xmac_rpa_vcfg);
4246
4247 if (vp_config->rpa_strip_vlan_tag !=
4248 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) {
4249 if (vp_config->rpa_strip_vlan_tag)
4250 val64 |= VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
4251 else
4252 val64 &= ~VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
4253 }
4254
4255 writeq(val64, &vp_reg->xmac_rpa_vcfg);
4256 val64 = readq(&vp_reg->rxmac_vcfg0);
4257
4258 if (vp_config->mtu !=
4259 VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) {
4260 val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
4261 if ((vp_config->mtu +
4262 VXGE_HW_MAC_HEADER_MAX_SIZE) < vpath->max_mtu)
4263 val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
4264 vp_config->mtu +
4265 VXGE_HW_MAC_HEADER_MAX_SIZE);
4266 else
4267 val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
4268 vpath->max_mtu);
4269 }
4270
4271 writeq(val64, &vp_reg->rxmac_vcfg0);
4272
4273 val64 = readq(&vp_reg->rxmac_vcfg1);
4274
4275 val64 &= ~(VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(0x3) |
4276 VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE);
4277
4278 if (hldev->config.rth_it_type ==
4279 VXGE_HW_RTH_IT_TYPE_MULTI_IT) {
4280 val64 |= VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(
4281 0x2) |
4282 VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE;
4283 }
4284
4285 writeq(val64, &vp_reg->rxmac_vcfg1);
4286 }
4287 return status;
4288}
4289
4290/*
4291 * __vxge_hw_vpath_tim_configure
4292 * This routine configures the tim registers of virtual path using the config
4293 * passed
4294 */
4295static enum vxge_hw_status
4296__vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4297{
4298 u64 val64;
4299 enum vxge_hw_status status = VXGE_HW_OK;
4300 struct __vxge_hw_virtualpath *vpath;
4301 struct vxge_hw_vpath_reg __iomem *vp_reg;
4302 struct vxge_hw_vp_config *config;
4303
4304 vpath = &hldev->virtual_paths[vp_id];
4305 vp_reg = vpath->vp_reg;
4306 config = vpath->vp_config;
4307
4308 writeq(0, &vp_reg->tim_dest_addr);
4309 writeq(0, &vp_reg->tim_vpath_map);
4310 writeq(0, &vp_reg->tim_bitmap);
4311 writeq(0, &vp_reg->tim_remap);
4312
4313 if (config->ring.enable == VXGE_HW_RING_ENABLE)
4314 writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM(
4315 (vp_id * VXGE_HW_MAX_INTR_PER_VP) +
4316 VXGE_HW_VPATH_INTR_RX), &vp_reg->tim_ring_assn);
4317
4318 val64 = readq(&vp_reg->tim_pci_cfg);
4319 val64 |= VXGE_HW_TIM_PCI_CFG_ADD_PAD;
4320 writeq(val64, &vp_reg->tim_pci_cfg);
4321
4322 if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
4323
4324 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4325
4326 if (config->tti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4327 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
4328 0x3ffffff);
4329 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
4330 config->tti.btimer_val);
4331 }
4332
4333 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
4334
4335 if (config->tti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
4336 if (config->tti.timer_ac_en)
4337 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
4338 else
4339 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
4340 }
4341
4342 if (config->tti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
4343 if (config->tti.timer_ci_en)
4344 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
4345 else
4346 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
4347 }
4348
4349 if (config->tti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
4350 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
4351 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
4352 config->tti.urange_a);
4353 }
4354
4355 if (config->tti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
4356 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
4357 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
4358 config->tti.urange_b);
4359 }
4360
4361 if (config->tti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
4362 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
4363 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
4364 config->tti.urange_c);
4365 }
4366
4367 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4368 vpath->tim_tti_cfg1_saved = val64;
4369
4370 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
4371
4372 if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
4373 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
4374 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
4375 config->tti.uec_a);
4376 }
4377
4378 if (config->tti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
4379 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
4380 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
4381 config->tti.uec_b);
4382 }
4383
4384 if (config->tti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
4385 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
4386 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
4387 config->tti.uec_c);
4388 }
4389
4390 if (config->tti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
4391 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
4392 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
4393 config->tti.uec_d);
4394 }
4395
4396 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
4397 val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
4398
4399 if (config->tti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
4400 if (config->tti.timer_ri_en)
4401 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
4402 else
4403 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
4404 }
4405
4406 if (config->tti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4407 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
4408 0x3ffffff);
4409 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
4410 config->tti.rtimer_val);
4411 }
4412
4413 if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
4414 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
4415 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id);
4416 }
4417
4418 if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4419 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
4420 0x3ffffff);
4421 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
4422 config->tti.ltimer_val);
4423 }
4424
4425 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
4426 vpath->tim_tti_cfg3_saved = val64;
4427 }
4428
4429 if (config->ring.enable == VXGE_HW_RING_ENABLE) {
4430
4431 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
4432
4433 if (config->rti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4434 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
4435 0x3ffffff);
4436 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
4437 config->rti.btimer_val);
4438 }
4439
4440 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
4441
4442 if (config->rti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
4443 if (config->rti.timer_ac_en)
4444 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
4445 else
4446 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
4447 }
4448
4449 if (config->rti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
4450 if (config->rti.timer_ci_en)
4451 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
4452 else
4453 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
4454 }
4455
4456 if (config->rti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
4457 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
4458 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
4459 config->rti.urange_a);
4460 }
4461
4462 if (config->rti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
4463 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
4464 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
4465 config->rti.urange_b);
4466 }
4467
4468 if (config->rti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
4469 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
4470 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
4471 config->rti.urange_c);
4472 }
4473
4474 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
4475 vpath->tim_rti_cfg1_saved = val64;
4476
4477 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
4478
4479 if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
4480 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
4481 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
4482 config->rti.uec_a);
4483 }
4484
4485 if (config->rti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
4486 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
4487 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
4488 config->rti.uec_b);
4489 }
4490
4491 if (config->rti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
4492 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
4493 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
4494 config->rti.uec_c);
4495 }
4496
4497 if (config->rti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
4498 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
4499 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
4500 config->rti.uec_d);
4501 }
4502
4503 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
4504 val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
4505
4506 if (config->rti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
4507 if (config->rti.timer_ri_en)
4508 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
4509 else
4510 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
4511 }
4512
4513 if (config->rti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4514 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
4515 0x3ffffff);
4516 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
4517 config->rti.rtimer_val);
4518 }
4519
4520 if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
4521 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
4522 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id);
4523 }
4524
4525 if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4526 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
4527 0x3ffffff);
4528 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
4529 config->rti.ltimer_val);
4530 }
4531
4532 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
4533 vpath->tim_rti_cfg3_saved = val64;
4534 }
4535
4536 val64 = 0;
4537 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_EINTA]);
4538 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_EINTA]);
4539 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_EINTA]);
4540 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_BMAP]);
4541 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]);
4542 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]);
4543
4544 val64 = VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_PRD(150);
4545 val64 |= VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_DIV(0);
4546 val64 |= VXGE_HW_TIM_WRKLD_CLC_CNT_RX_TX(3);
4547 writeq(val64, &vp_reg->tim_wrkld_clc);
4548
4549 return status;
4550}
4551
4552/*
4553 * __vxge_hw_vpath_initialize
4554 * This routine is the final phase of init which initializes the
4555 * registers of the vpath using the configuration passed.
4556 */
4557static enum vxge_hw_status
4558__vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
4559{
4560 u64 val64;
4561 u32 val32;
4562 enum vxge_hw_status status = VXGE_HW_OK;
4563 struct __vxge_hw_virtualpath *vpath;
4564 struct vxge_hw_vpath_reg __iomem *vp_reg;
4565
4566 vpath = &hldev->virtual_paths[vp_id];
4567
4568 if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
4569 status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
4570 goto exit;
4571 }
4572 vp_reg = vpath->vp_reg;
4573
4574 status = __vxge_hw_vpath_swapper_set(vpath->vp_reg);
4575 if (status != VXGE_HW_OK)
4576 goto exit;
4577
4578 status = __vxge_hw_vpath_mac_configure(hldev, vp_id);
4579 if (status != VXGE_HW_OK)
4580 goto exit;
4581
4582 status = __vxge_hw_vpath_kdfc_configure(hldev, vp_id);
4583 if (status != VXGE_HW_OK)
4584 goto exit;
4585
4586 status = __vxge_hw_vpath_tim_configure(hldev, vp_id);
4587 if (status != VXGE_HW_OK)
4588 goto exit;
4589
4590 val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl);
4591
4592 /* Get MRRS value from device control */
4593 status = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32);
4594 if (status == VXGE_HW_OK) {
4595 val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12;
4596 val64 &=
4597 ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(7));
4598 val64 |=
4599 VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val32);
4600
4601 val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE;
4602 }
4603
4604 val64 &= ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(7));
4605 val64 |=
4606 VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(
4607 VXGE_HW_MAX_PAYLOAD_SIZE_512);
4608
4609 val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN;
4610 writeq(val64, &vp_reg->rtdma_rd_optimization_ctrl);
4611
4612exit:
4613 return status;
4614}
4615
4616/*
4617 * __vxge_hw_vp_terminate - Terminate Virtual Path structure
4618 * This routine closes all channels it opened and freeup memory
4619 */
4620static void __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
4621{
4622 struct __vxge_hw_virtualpath *vpath;
4623
4624 vpath = &hldev->virtual_paths[vp_id];
4625
4626 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
4627 goto exit;
4628
4629 VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0,
4630 vpath->hldev->tim_int_mask1, vpath->vp_id);
4631 hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL;
4632
4633 /* If the whole struct __vxge_hw_virtualpath is zeroed, nothing will
4634 * work after the interface is brought down.
4635 */
4636 spin_lock(&vpath->lock);
4637 vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
4638 spin_unlock(&vpath->lock);
4639
4640 vpath->vpmgmt_reg = NULL;
4641 vpath->nofl_db = NULL;
4642 vpath->max_mtu = 0;
4643 vpath->vsport_number = 0;
4644 vpath->max_kdfc_db = 0;
4645 vpath->max_nofl_db = 0;
4646 vpath->ringh = NULL;
4647 vpath->fifoh = NULL;
4648 memset(&vpath->vpath_handles, 0, sizeof(struct list_head));
4649 vpath->stats_block = 0;
4650 vpath->hw_stats = NULL;
4651 vpath->hw_stats_sav = NULL;
4652 vpath->sw_stats = NULL;
4653
4654exit:
4655 return;
4656}
4657
4658/*
4659 * __vxge_hw_vp_initialize - Initialize Virtual Path structure
4660 * This routine is the initial phase of init which resets the vpath and
4661 * initializes the software support structures.
4662 */
4663static enum vxge_hw_status
4664__vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
4665 struct vxge_hw_vp_config *config)
4666{
4667 struct __vxge_hw_virtualpath *vpath;
4668 enum vxge_hw_status status = VXGE_HW_OK;
4669
4670 if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
4671 status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
4672 goto exit;
4673 }
4674
4675 vpath = &hldev->virtual_paths[vp_id];
4676
4677 spin_lock_init(&vpath->lock);
4678 vpath->vp_id = vp_id;
4679 vpath->vp_open = VXGE_HW_VP_OPEN;
4680 vpath->hldev = hldev;
4681 vpath->vp_config = config;
4682 vpath->vp_reg = hldev->vpath_reg[vp_id];
4683 vpath->vpmgmt_reg = hldev->vpmgmt_reg[vp_id];
4684
4685 __vxge_hw_vpath_reset(hldev, vp_id);
4686
4687 status = __vxge_hw_vpath_reset_check(vpath);
4688 if (status != VXGE_HW_OK) {
4689 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4690 goto exit;
4691 }
4692
4693 status = __vxge_hw_vpath_mgmt_read(hldev, vpath);
4694 if (status != VXGE_HW_OK) {
4695 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4696 goto exit;
4697 }
4698
4699 INIT_LIST_HEAD(&vpath->vpath_handles);
4700
4701 vpath->sw_stats = &hldev->stats.sw_dev_info_stats.vpath_info[vp_id];
4702
4703 VXGE_HW_DEVICE_TIM_INT_MASK_SET(hldev->tim_int_mask0,
4704 hldev->tim_int_mask1, vp_id);
4705
4706 status = __vxge_hw_vpath_initialize(hldev, vp_id);
4707 if (status != VXGE_HW_OK)
4708 __vxge_hw_vp_terminate(hldev, vp_id);
4709exit:
4710 return status;
4711}
4712
4713/*
4714 * vxge_hw_vpath_mtu_set - Set MTU.
4715 * Set new MTU value. Example, to use jumbo frames:
4716 * vxge_hw_vpath_mtu_set(my_device, 9600);
4717 */
4718enum vxge_hw_status
4719vxge_hw_vpath_mtu_set(struct __vxge_hw_vpath_handle *vp, u32 new_mtu)
4720{
4721 u64 val64;
4722 enum vxge_hw_status status = VXGE_HW_OK;
4723 struct __vxge_hw_virtualpath *vpath;
4724
4725 if (vp == NULL) {
4726 status = VXGE_HW_ERR_INVALID_HANDLE;
4727 goto exit;
4728 }
4729 vpath = vp->vpath;
4730
4731 new_mtu += VXGE_HW_MAC_HEADER_MAX_SIZE;
4732
4733 if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > vpath->max_mtu))
4734 status = VXGE_HW_ERR_INVALID_MTU_SIZE;
4735
4736 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
4737
4738 val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
4739 val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_mtu);
4740
4741 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
4742
4743 vpath->vp_config->mtu = new_mtu - VXGE_HW_MAC_HEADER_MAX_SIZE;
4744
4745exit:
4746 return status;
4747}
4748
4749/*
4750 * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
4751 * Enable the DMA vpath statistics. The function is to be called to re-enable
4752 * the adapter to update stats into the host memory
4753 */
4754static enum vxge_hw_status
4755vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
4756{
4757 enum vxge_hw_status status = VXGE_HW_OK;
4758 struct __vxge_hw_virtualpath *vpath;
4759
4760 vpath = vp->vpath;
4761
4762 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4763 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4764 goto exit;
4765 }
4766
4767 memcpy(vpath->hw_stats_sav, vpath->hw_stats,
4768 sizeof(struct vxge_hw_vpath_stats_hw_info));
4769
4770 status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats);
4771exit:
4772 return status;
4773}
4774
4775/*
4776 * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
4777 * This function allocates a block from block pool or from the system
4778 */
4779static struct __vxge_hw_blockpool_entry *
4780__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
4781{
4782 struct __vxge_hw_blockpool_entry *entry = NULL;
4783 struct __vxge_hw_blockpool *blockpool;
4784
4785 blockpool = &devh->block_pool;
4786
4787 if (size == blockpool->block_size) {
4788
4789 if (!list_empty(&blockpool->free_block_list))
4790 entry = (struct __vxge_hw_blockpool_entry *)
4791 list_first_entry(&blockpool->free_block_list,
4792 struct __vxge_hw_blockpool_entry,
4793 item);
4794
4795 if (entry != NULL) {
4796 list_del(&entry->item);
4797 blockpool->pool_size--;
4798 }
4799 }
4800
4801 if (entry != NULL)
4802 __vxge_hw_blockpool_blocks_add(blockpool);
4803
4804 return entry;
4805}
4806
4807/*
4808 * vxge_hw_vpath_open - Open a virtual path on a given adapter
4809 * This function is used to open access to virtual path of an
4810 * adapter for offload, GRO operations. This function returns
4811 * synchronously.
4812 */
4813enum vxge_hw_status
4814vxge_hw_vpath_open(struct __vxge_hw_device *hldev,
4815 struct vxge_hw_vpath_attr *attr,
4816 struct __vxge_hw_vpath_handle **vpath_handle)
4817{
4818 struct __vxge_hw_virtualpath *vpath;
4819 struct __vxge_hw_vpath_handle *vp;
4820 enum vxge_hw_status status;
4821
4822 vpath = &hldev->virtual_paths[attr->vp_id];
4823
4824 if (vpath->vp_open == VXGE_HW_VP_OPEN) {
4825 status = VXGE_HW_ERR_INVALID_STATE;
4826 goto vpath_open_exit1;
4827 }
4828
4829 status = __vxge_hw_vp_initialize(hldev, attr->vp_id,
4830 &hldev->config.vp_config[attr->vp_id]);
4831 if (status != VXGE_HW_OK)
4832 goto vpath_open_exit1;
4833
4834 vp = vzalloc(sizeof(struct __vxge_hw_vpath_handle));
4835 if (vp == NULL) {
4836 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4837 goto vpath_open_exit2;
4838 }
4839
4840 vp->vpath = vpath;
4841
4842 if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
4843 status = __vxge_hw_fifo_create(vp, &attr->fifo_attr);
4844 if (status != VXGE_HW_OK)
4845 goto vpath_open_exit6;
4846 }
4847
4848 if (vpath->vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
4849 status = __vxge_hw_ring_create(vp, &attr->ring_attr);
4850 if (status != VXGE_HW_OK)
4851 goto vpath_open_exit7;
4852
4853 __vxge_hw_vpath_prc_configure(hldev, attr->vp_id);
4854 }
4855
4856 vpath->fifoh->tx_intr_num =
4857 (attr->vp_id * VXGE_HW_MAX_INTR_PER_VP) +
4858 VXGE_HW_VPATH_INTR_TX;
4859
4860 vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev,
4861 VXGE_HW_BLOCK_SIZE);
4862 if (vpath->stats_block == NULL) {
4863 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4864 goto vpath_open_exit8;
4865 }
4866
4867 vpath->hw_stats = vpath->stats_block->memblock;
4868 memset(vpath->hw_stats, 0,
4869 sizeof(struct vxge_hw_vpath_stats_hw_info));
4870
4871 hldev->stats.hw_dev_info_stats.vpath_info[attr->vp_id] =
4872 vpath->hw_stats;
4873
4874 vpath->hw_stats_sav =
4875 &hldev->stats.hw_dev_info_stats.vpath_info_sav[attr->vp_id];
4876 memset(vpath->hw_stats_sav, 0,
4877 sizeof(struct vxge_hw_vpath_stats_hw_info));
4878
4879 writeq(vpath->stats_block->dma_addr, &vpath->vp_reg->stats_cfg);
4880
4881 status = vxge_hw_vpath_stats_enable(vp);
4882 if (status != VXGE_HW_OK)
4883 goto vpath_open_exit8;
4884
4885 list_add(&vp->item, &vpath->vpath_handles);
4886
4887 hldev->vpaths_deployed |= vxge_mBIT(vpath->vp_id);
4888
4889 *vpath_handle = vp;
4890
4891 attr->fifo_attr.userdata = vpath->fifoh;
4892 attr->ring_attr.userdata = vpath->ringh;
4893
4894 return VXGE_HW_OK;
4895
4896vpath_open_exit8:
4897 if (vpath->ringh != NULL)
4898 __vxge_hw_ring_delete(vp);
4899vpath_open_exit7:
4900 if (vpath->fifoh != NULL)
4901 __vxge_hw_fifo_delete(vp);
4902vpath_open_exit6:
4903 vfree(vp);
4904vpath_open_exit2:
4905 __vxge_hw_vp_terminate(hldev, attr->vp_id);
4906vpath_open_exit1:
4907
4908 return status;
4909}
4910
4911/**
4912 * vxge_hw_vpath_rx_doorbell_post - Close the handle got from previous vpath
4913 * (vpath) open
4914 * @vp: Handle got from previous vpath open
4915 *
4916 * This function is used to close access to virtual path opened
4917 * earlier.
4918 */
4919void vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
4920{
4921 struct __vxge_hw_virtualpath *vpath = vp->vpath;
4922 struct __vxge_hw_ring *ring = vpath->ringh;
4923 struct vxgedev *vdev = netdev_priv(vpath->hldev->ndev);
4924 u64 new_count, val64, val164;
4925
4926 if (vdev->titan1) {
4927 new_count = readq(&vpath->vp_reg->rxdmem_size);
4928 new_count &= 0x1fff;
4929 } else
4930 new_count = ring->config->ring_blocks * VXGE_HW_BLOCK_SIZE / 8;
4931
4932 val164 = VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count);
4933
4934 writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164),
4935 &vpath->vp_reg->prc_rxd_doorbell);
4936 readl(&vpath->vp_reg->prc_rxd_doorbell);
4937
4938 val164 /= 2;
4939 val64 = readq(&vpath->vp_reg->prc_cfg6);
4940 val64 = VXGE_HW_PRC_CFG6_RXD_SPAT(val64);
4941 val64 &= 0x1ff;
4942
4943 /*
4944 * Each RxD is of 4 qwords
4945 */
4946 new_count -= (val64 + 1);
4947 val64 = min(val164, new_count) / 4;
4948
4949 ring->rxds_limit = min(ring->rxds_limit, val64);
4950 if (ring->rxds_limit < 4)
4951 ring->rxds_limit = 4;
4952}
4953
4954/*
4955 * __vxge_hw_blockpool_block_free - Frees a block from block pool
4956 * @devh: Hal device
4957 * @entry: Entry of block to be freed
4958 *
4959 * This function frees a block from block pool
4960 */
4961static void
4962__vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
4963 struct __vxge_hw_blockpool_entry *entry)
4964{
4965 struct __vxge_hw_blockpool *blockpool;
4966
4967 blockpool = &devh->block_pool;
4968
4969 if (entry->length == blockpool->block_size) {
4970 list_add(&entry->item, &blockpool->free_block_list);
4971 blockpool->pool_size++;
4972 }
4973
4974 __vxge_hw_blockpool_blocks_remove(blockpool);
4975}
4976
4977/*
4978 * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open
4979 * This function is used to close access to virtual path opened
4980 * earlier.
4981 */
4982enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp)
4983{
4984 struct __vxge_hw_virtualpath *vpath = NULL;
4985 struct __vxge_hw_device *devh = NULL;
4986 u32 vp_id = vp->vpath->vp_id;
4987 u32 is_empty = TRUE;
4988 enum vxge_hw_status status = VXGE_HW_OK;
4989
4990 vpath = vp->vpath;
4991 devh = vpath->hldev;
4992
4993 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4994 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4995 goto vpath_close_exit;
4996 }
4997
4998 list_del(&vp->item);
4999
5000 if (!list_empty(&vpath->vpath_handles)) {
5001 list_add(&vp->item, &vpath->vpath_handles);
5002 is_empty = FALSE;
5003 }
5004
5005 if (!is_empty) {
5006 status = VXGE_HW_FAIL;
5007 goto vpath_close_exit;
5008 }
5009
5010 devh->vpaths_deployed &= ~vxge_mBIT(vp_id);
5011
5012 if (vpath->ringh != NULL)
5013 __vxge_hw_ring_delete(vp);
5014
5015 if (vpath->fifoh != NULL)
5016 __vxge_hw_fifo_delete(vp);
5017
5018 if (vpath->stats_block != NULL)
5019 __vxge_hw_blockpool_block_free(devh, vpath->stats_block);
5020
5021 vfree(vp);
5022
5023 __vxge_hw_vp_terminate(devh, vp_id);
5024
5025vpath_close_exit:
5026 return status;
5027}
5028
5029/*
5030 * vxge_hw_vpath_reset - Resets vpath
5031 * This function is used to request a reset of vpath
5032 */
5033enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_vpath_handle *vp)
5034{
5035 enum vxge_hw_status status;
5036 u32 vp_id;
5037 struct __vxge_hw_virtualpath *vpath = vp->vpath;
5038
5039 vp_id = vpath->vp_id;
5040
5041 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
5042 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
5043 goto exit;
5044 }
5045
5046 status = __vxge_hw_vpath_reset(vpath->hldev, vp_id);
5047 if (status == VXGE_HW_OK)
5048 vpath->sw_stats->soft_reset_cnt++;
5049exit:
5050 return status;
5051}
5052
5053/*
5054 * vxge_hw_vpath_recover_from_reset - Poll for reset complete and re-initialize.
5055 * This function poll's for the vpath reset completion and re initializes
5056 * the vpath.
5057 */
5058enum vxge_hw_status
5059vxge_hw_vpath_recover_from_reset(struct __vxge_hw_vpath_handle *vp)
5060{
5061 struct __vxge_hw_virtualpath *vpath = NULL;
5062 enum vxge_hw_status status;
5063 struct __vxge_hw_device *hldev;
5064 u32 vp_id;
5065
5066 vp_id = vp->vpath->vp_id;
5067 vpath = vp->vpath;
5068 hldev = vpath->hldev;
5069
5070 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
5071 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
5072 goto exit;
5073 }
5074
5075 status = __vxge_hw_vpath_reset_check(vpath);
5076 if (status != VXGE_HW_OK)
5077 goto exit;
5078
5079 status = __vxge_hw_vpath_sw_reset(hldev, vp_id);
5080 if (status != VXGE_HW_OK)
5081 goto exit;
5082
5083 status = __vxge_hw_vpath_initialize(hldev, vp_id);
5084 if (status != VXGE_HW_OK)
5085 goto exit;
5086
5087 if (vpath->ringh != NULL)
5088 __vxge_hw_vpath_prc_configure(hldev, vp_id);
5089
5090 memset(vpath->hw_stats, 0,
5091 sizeof(struct vxge_hw_vpath_stats_hw_info));
5092
5093 memset(vpath->hw_stats_sav, 0,
5094 sizeof(struct vxge_hw_vpath_stats_hw_info));
5095
5096 writeq(vpath->stats_block->dma_addr,
5097 &vpath->vp_reg->stats_cfg);
5098
5099 status = vxge_hw_vpath_stats_enable(vp);
5100
5101exit:
5102 return status;
5103}
5104
5105/*
5106 * vxge_hw_vpath_enable - Enable vpath.
5107 * This routine clears the vpath reset thereby enabling a vpath
5108 * to start forwarding frames and generating interrupts.
5109 */
5110void
5111vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp)
5112{
5113 struct __vxge_hw_device *hldev;
5114 u64 val64;
5115
5116 hldev = vp->vpath->hldev;
5117
5118 val64 = VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(
5119 1 << (16 - vp->vpath->vp_id));
5120
5121 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
5122 &hldev->common_reg->cmn_rsthdlr_cfg1);
5123}
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
new file mode 100644
index 00000000000..dd362584f5c
--- /dev/null
+++ b/drivers/net/vxge/vxge-config.h
@@ -0,0 +1,2111 @@
1/******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
9 *
10 * vxge-config.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2010 Exar Corp.
13 ******************************************************************************/
14#ifndef VXGE_CONFIG_H
15#define VXGE_CONFIG_H
16#include <linux/hardirq.h>
17#include <linux/list.h>
18#include <linux/slab.h>
19#include <asm/io.h>
20
21#ifndef VXGE_CACHE_LINE_SIZE
22#define VXGE_CACHE_LINE_SIZE 128
23#endif
24
25#ifndef VXGE_ALIGN
26#define VXGE_ALIGN(adrs, size) \
27 (((size) - (((u64)adrs) & ((size)-1))) & ((size)-1))
28#endif
29
30#define VXGE_HW_MIN_MTU 68
31#define VXGE_HW_MAX_MTU 9600
32#define VXGE_HW_DEFAULT_MTU 1500
33
34#define VXGE_HW_MAX_ROM_IMAGES 8
35
36struct eprom_image {
37 u8 is_valid:1;
38 u8 index;
39 u8 type;
40 u16 version;
41};
42
43#ifdef VXGE_DEBUG_ASSERT
44/**
45 * vxge_assert
46 * @test: C-condition to check
47 * @fmt: printf like format string
48 *
49 * This function implements traditional assert. By default assertions
50 * are enabled. It can be disabled by undefining VXGE_DEBUG_ASSERT macro in
51 * compilation
52 * time.
53 */
54#define vxge_assert(test) BUG_ON(!(test))
55#else
56#define vxge_assert(test)
57#endif /* end of VXGE_DEBUG_ASSERT */
58
59/**
60 * enum vxge_debug_level
61 * @VXGE_NONE: debug disabled
62 * @VXGE_ERR: all errors going to be logged out
63 * @VXGE_TRACE: all errors plus all kind of verbose tracing print outs
64 * going to be logged out. Very noisy.
65 *
66 * This enumeration going to be used to switch between different
67 * debug levels during runtime if DEBUG macro defined during
68 * compilation. If DEBUG macro not defined than code will be
69 * compiled out.
70 */
71enum vxge_debug_level {
72 VXGE_NONE = 0,
73 VXGE_TRACE = 1,
74 VXGE_ERR = 2
75};
76
77#define NULL_VPID 0xFFFFFFFF
78#ifdef CONFIG_VXGE_DEBUG_TRACE_ALL
79#define VXGE_DEBUG_MODULE_MASK 0xffffffff
80#define VXGE_DEBUG_TRACE_MASK 0xffffffff
81#define VXGE_DEBUG_ERR_MASK 0xffffffff
82#define VXGE_DEBUG_MASK 0x000001ff
83#else
84#define VXGE_DEBUG_MODULE_MASK 0x20000000
85#define VXGE_DEBUG_TRACE_MASK 0x20000000
86#define VXGE_DEBUG_ERR_MASK 0x20000000
87#define VXGE_DEBUG_MASK 0x00000001
88#endif
89
90/*
91 * @VXGE_COMPONENT_LL: do debug for vxge link layer module
92 * @VXGE_COMPONENT_ALL: activate debug for all modules with no exceptions
93 *
94 * This enumeration going to be used to distinguish modules
95 * or libraries during compilation and runtime. Makefile must declare
96 * VXGE_DEBUG_MODULE_MASK macro and set it to proper value.
97 */
98#define VXGE_COMPONENT_LL 0x20000000
99#define VXGE_COMPONENT_ALL 0xffffffff
100
101#define VXGE_HW_BASE_INF 100
102#define VXGE_HW_BASE_ERR 200
103#define VXGE_HW_BASE_BADCFG 300
104
105enum vxge_hw_status {
106 VXGE_HW_OK = 0,
107 VXGE_HW_FAIL = 1,
108 VXGE_HW_PENDING = 2,
109 VXGE_HW_COMPLETIONS_REMAIN = 3,
110
111 VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS = VXGE_HW_BASE_INF + 1,
112 VXGE_HW_INF_OUT_OF_DESCRIPTORS = VXGE_HW_BASE_INF + 2,
113
114 VXGE_HW_ERR_INVALID_HANDLE = VXGE_HW_BASE_ERR + 1,
115 VXGE_HW_ERR_OUT_OF_MEMORY = VXGE_HW_BASE_ERR + 2,
116 VXGE_HW_ERR_VPATH_NOT_AVAILABLE = VXGE_HW_BASE_ERR + 3,
117 VXGE_HW_ERR_VPATH_NOT_OPEN = VXGE_HW_BASE_ERR + 4,
118 VXGE_HW_ERR_WRONG_IRQ = VXGE_HW_BASE_ERR + 5,
119 VXGE_HW_ERR_SWAPPER_CTRL = VXGE_HW_BASE_ERR + 6,
120 VXGE_HW_ERR_INVALID_MTU_SIZE = VXGE_HW_BASE_ERR + 7,
121 VXGE_HW_ERR_INVALID_INDEX = VXGE_HW_BASE_ERR + 8,
122 VXGE_HW_ERR_INVALID_TYPE = VXGE_HW_BASE_ERR + 9,
123 VXGE_HW_ERR_INVALID_OFFSET = VXGE_HW_BASE_ERR + 10,
124 VXGE_HW_ERR_INVALID_DEVICE = VXGE_HW_BASE_ERR + 11,
125 VXGE_HW_ERR_VERSION_CONFLICT = VXGE_HW_BASE_ERR + 12,
126 VXGE_HW_ERR_INVALID_PCI_INFO = VXGE_HW_BASE_ERR + 13,
127 VXGE_HW_ERR_INVALID_TCODE = VXGE_HW_BASE_ERR + 14,
128 VXGE_HW_ERR_INVALID_BLOCK_SIZE = VXGE_HW_BASE_ERR + 15,
129 VXGE_HW_ERR_INVALID_STATE = VXGE_HW_BASE_ERR + 16,
130 VXGE_HW_ERR_PRIVILAGED_OPEARATION = VXGE_HW_BASE_ERR + 17,
131 VXGE_HW_ERR_INVALID_PORT = VXGE_HW_BASE_ERR + 18,
132 VXGE_HW_ERR_FIFO = VXGE_HW_BASE_ERR + 19,
133 VXGE_HW_ERR_VPATH = VXGE_HW_BASE_ERR + 20,
134 VXGE_HW_ERR_CRITICAL = VXGE_HW_BASE_ERR + 21,
135 VXGE_HW_ERR_SLOT_FREEZE = VXGE_HW_BASE_ERR + 22,
136
137 VXGE_HW_BADCFG_RING_INDICATE_MAX_PKTS = VXGE_HW_BASE_BADCFG + 1,
138 VXGE_HW_BADCFG_FIFO_BLOCKS = VXGE_HW_BASE_BADCFG + 2,
139 VXGE_HW_BADCFG_VPATH_MTU = VXGE_HW_BASE_BADCFG + 3,
140 VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG = VXGE_HW_BASE_BADCFG + 4,
141 VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH = VXGE_HW_BASE_BADCFG + 5,
142 VXGE_HW_BADCFG_INTR_MODE = VXGE_HW_BASE_BADCFG + 6,
143 VXGE_HW_BADCFG_RTS_MAC_EN = VXGE_HW_BASE_BADCFG + 7,
144
145 VXGE_HW_EOF_TRACE_BUF = -1
146};
147
148/**
149 * enum enum vxge_hw_device_link_state - Link state enumeration.
150 * @VXGE_HW_LINK_NONE: Invalid link state.
151 * @VXGE_HW_LINK_DOWN: Link is down.
152 * @VXGE_HW_LINK_UP: Link is up.
153 *
154 */
155enum vxge_hw_device_link_state {
156 VXGE_HW_LINK_NONE,
157 VXGE_HW_LINK_DOWN,
158 VXGE_HW_LINK_UP
159};
160
161/**
162 * enum enum vxge_hw_fw_upgrade_code - FW upgrade return codes.
163 * @VXGE_HW_FW_UPGRADE_OK: All OK send next 16 bytes
164 * @VXGE_HW_FW_UPGRADE_DONE: upload completed
165 * @VXGE_HW_FW_UPGRADE_ERR: upload error
166 * @VXGE_FW_UPGRADE_BYTES2SKIP: skip bytes in the stream
167 *
168 */
169enum vxge_hw_fw_upgrade_code {
170 VXGE_HW_FW_UPGRADE_OK = 0,
171 VXGE_HW_FW_UPGRADE_DONE = 1,
172 VXGE_HW_FW_UPGRADE_ERR = 2,
173 VXGE_FW_UPGRADE_BYTES2SKIP = 3
174};
175
176/**
177 * enum enum vxge_hw_fw_upgrade_err_code - FW upgrade error codes.
178 * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1: corrupt data
179 * @VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW: buffer overflow
180 * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3: invalid .ncf file
181 * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4: invalid .ncf file
182 * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5: invalid .ncf file
183 * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6: invalid .ncf file
184 * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7: corrupt data
185 * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8: invalid .ncf file
186 * @VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN: generic error unknown type
187 * @VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH: failed to flash image check failed
188 */
189enum vxge_hw_fw_upgrade_err_code {
190 VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1 = 1,
191 VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW = 2,
192 VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3 = 3,
193 VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4 = 4,
194 VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5 = 5,
195 VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6 = 6,
196 VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7 = 7,
197 VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8 = 8,
198 VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN = 9,
199 VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH = 10
200};
201
202/**
203 * struct vxge_hw_device_date - Date Format
204 * @day: Day
205 * @month: Month
206 * @year: Year
207 * @date: Date in string format
208 *
209 * Structure for returning date
210 */
211
212#define VXGE_HW_FW_STRLEN 32
213struct vxge_hw_device_date {
214 u32 day;
215 u32 month;
216 u32 year;
217 char date[VXGE_HW_FW_STRLEN];
218};
219
220struct vxge_hw_device_version {
221 u32 major;
222 u32 minor;
223 u32 build;
224 char version[VXGE_HW_FW_STRLEN];
225};
226
227/**
228 * struct vxge_hw_fifo_config - Configuration of fifo.
229 * @enable: Is this fifo to be commissioned
230 * @fifo_blocks: Numbers of TxDL (that is, lists of Tx descriptors)
231 * blocks per queue.
232 * @max_frags: Max number of Tx buffers per TxDL (that is, per single
233 * transmit operation).
234 * No more than 256 transmit buffers can be specified.
235 * @memblock_size: Fifo descriptors are allocated in blocks of @mem_block_size
236 * bytes. Setting @memblock_size to page size ensures
237 * by-page allocation of descriptors. 128K bytes is the
238 * maximum supported block size.
239 * @alignment_size: per Tx fragment DMA-able memory used to align transmit data
240 * (e.g., to align on a cache line).
241 * @intr: Boolean. Use 1 to generate interrupt for each completed TxDL.
242 * Use 0 otherwise.
243 * @no_snoop_bits: If non-zero, specifies no-snoop PCI operation,
244 * which generally improves latency of the host bridge operation
245 * (see PCI specification). For valid values please refer
246 * to struct vxge_hw_fifo_config{} in the driver sources.
247 * Configuration of all Titan fifos.
248 * Note: Valid (min, max) range for each attribute is specified in the body of
249 * the struct vxge_hw_fifo_config{} structure.
250 */
251struct vxge_hw_fifo_config {
252 u32 enable;
253#define VXGE_HW_FIFO_ENABLE 1
254#define VXGE_HW_FIFO_DISABLE 0
255
256 u32 fifo_blocks;
257#define VXGE_HW_MIN_FIFO_BLOCKS 2
258#define VXGE_HW_MAX_FIFO_BLOCKS 128
259
260 u32 max_frags;
261#define VXGE_HW_MIN_FIFO_FRAGS 1
262#define VXGE_HW_MAX_FIFO_FRAGS 256
263
264 u32 memblock_size;
265#define VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE VXGE_HW_BLOCK_SIZE
266#define VXGE_HW_MAX_FIFO_MEMBLOCK_SIZE 131072
267#define VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE 8096
268
269 u32 alignment_size;
270#define VXGE_HW_MIN_FIFO_ALIGNMENT_SIZE 0
271#define VXGE_HW_MAX_FIFO_ALIGNMENT_SIZE 65536
272#define VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE VXGE_CACHE_LINE_SIZE
273
274 u32 intr;
275#define VXGE_HW_FIFO_QUEUE_INTR_ENABLE 1
276#define VXGE_HW_FIFO_QUEUE_INTR_DISABLE 0
277#define VXGE_HW_FIFO_QUEUE_INTR_DEFAULT 0
278
279 u32 no_snoop_bits;
280#define VXGE_HW_FIFO_NO_SNOOP_DISABLED 0
281#define VXGE_HW_FIFO_NO_SNOOP_TXD 1
282#define VXGE_HW_FIFO_NO_SNOOP_FRM 2
283#define VXGE_HW_FIFO_NO_SNOOP_ALL 3
284#define VXGE_HW_FIFO_NO_SNOOP_DEFAULT 0
285
286};
287/**
288 * struct vxge_hw_ring_config - Ring configurations.
289 * @enable: Is this ring to be commissioned
290 * @ring_blocks: Numbers of RxD blocks in the ring
291 * @buffer_mode: Receive buffer mode (1, 2, 3, or 5); for details please refer
292 * to Titan User Guide.
293 * @scatter_mode: Titan supports two receive scatter modes: A and B.
294 * For details please refer to Titan User Guide.
295 * @rx_timer_val: The number of 32ns periods that would be counted between two
296 * timer interrupts.
297 * @greedy_return: If Set it forces the device to return absolutely all RxD
298 * that are consumed and still on board when a timer interrupt
299 * triggers. If Clear, then if the device has already returned
300 * RxD before current timer interrupt trigerred and after the
301 * previous timer interrupt triggered, then the device is not
302 * forced to returned the rest of the consumed RxD that it has
303 * on board which account for a byte count less than the one
304 * programmed into PRC_CFG6.RXD_CRXDT field
305 * @rx_timer_ci: TBD
306 * @backoff_interval_us: Time (in microseconds), after which Titan
307 * tries to download RxDs posted by the host.
308 * Note that the "backoff" does not happen if host posts receive
309 * descriptors in the timely fashion.
310 * Ring configuration.
311 */
312struct vxge_hw_ring_config {
313 u32 enable;
314#define VXGE_HW_RING_ENABLE 1
315#define VXGE_HW_RING_DISABLE 0
316#define VXGE_HW_RING_DEFAULT 1
317
318 u32 ring_blocks;
319#define VXGE_HW_MIN_RING_BLOCKS 1
320#define VXGE_HW_MAX_RING_BLOCKS 128
321#define VXGE_HW_DEF_RING_BLOCKS 2
322
323 u32 buffer_mode;
324#define VXGE_HW_RING_RXD_BUFFER_MODE_1 1
325#define VXGE_HW_RING_RXD_BUFFER_MODE_3 3
326#define VXGE_HW_RING_RXD_BUFFER_MODE_5 5
327#define VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT 1
328
329 u32 scatter_mode;
330#define VXGE_HW_RING_SCATTER_MODE_A 0
331#define VXGE_HW_RING_SCATTER_MODE_B 1
332#define VXGE_HW_RING_SCATTER_MODE_C 2
333#define VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT 0xffffffff
334
335 u64 rxds_limit;
336#define VXGE_HW_DEF_RING_RXDS_LIMIT 44
337};
338
339/**
340 * struct vxge_hw_vp_config - Configuration of virtual path
341 * @vp_id: Virtual Path Id
342 * @min_bandwidth: Minimum Guaranteed bandwidth
343 * @ring: See struct vxge_hw_ring_config{}.
344 * @fifo: See struct vxge_hw_fifo_config{}.
345 * @tti: Configuration of interrupt associated with Transmit.
346 * see struct vxge_hw_tim_intr_config();
347 * @rti: Configuration of interrupt associated with Receive.
348 * see struct vxge_hw_tim_intr_config();
349 * @mtu: mtu size used on this port.
350 * @rpa_strip_vlan_tag: Strip VLAN Tag enable/disable. Instructs the device to
351 * remove the VLAN tag from all received tagged frames that are not
352 * replicated at the internal L2 switch.
353 * 0 - Do not strip the VLAN tag.
354 * 1 - Strip the VLAN tag. Regardless of this setting, VLAN tags are
355 * always placed into the RxDMA descriptor.
356 *
357 * This structure is used by the driver to pass the configuration parameters to
358 * configure Virtual Path.
359 */
360struct vxge_hw_vp_config {
361 u32 vp_id;
362
363#define VXGE_HW_VPATH_PRIORITY_MIN 0
364#define VXGE_HW_VPATH_PRIORITY_MAX 16
365#define VXGE_HW_VPATH_PRIORITY_DEFAULT 0
366
367 u32 min_bandwidth;
368#define VXGE_HW_VPATH_BANDWIDTH_MIN 0
369#define VXGE_HW_VPATH_BANDWIDTH_MAX 100
370#define VXGE_HW_VPATH_BANDWIDTH_DEFAULT 0
371
372 struct vxge_hw_ring_config ring;
373 struct vxge_hw_fifo_config fifo;
374 struct vxge_hw_tim_intr_config tti;
375 struct vxge_hw_tim_intr_config rti;
376
377 u32 mtu;
378#define VXGE_HW_VPATH_MIN_INITIAL_MTU VXGE_HW_MIN_MTU
379#define VXGE_HW_VPATH_MAX_INITIAL_MTU VXGE_HW_MAX_MTU
380#define VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU 0xffffffff
381
382 u32 rpa_strip_vlan_tag;
383#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE 1
384#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE 0
385#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT 0xffffffff
386
387};
388/**
389 * struct vxge_hw_device_config - Device configuration.
390 * @dma_blockpool_initial: Initial size of DMA Pool
391 * @dma_blockpool_max: Maximum blocks in DMA pool
392 * @intr_mode: Line, or MSI-X interrupt.
393 *
394 * @rth_en: Enable Receive Traffic Hashing(RTH) using IT(Indirection Table).
395 * @rth_it_type: RTH IT table programming type
396 * @rts_mac_en: Enable Receive Traffic Steering using MAC destination address
397 * @vp_config: Configuration for virtual paths
398 * @device_poll_millis: Specify the interval (in mulliseconds)
399 * to wait for register reads
400 *
401 * Titan configuration.
402 * Contains per-device configuration parameters, including:
403 * - stats sampling interval, etc.
404 *
405 * In addition, struct vxge_hw_device_config{} includes "subordinate"
406 * configurations, including:
407 * - fifos and rings;
408 * - MAC (done at firmware level).
409 *
410 * See Titan User Guide for more details.
411 * Note: Valid (min, max) range for each attribute is specified in the body of
412 * the struct vxge_hw_device_config{} structure. Please refer to the
413 * corresponding include file.
414 * See also: struct vxge_hw_tim_intr_config{}.
415 */
416struct vxge_hw_device_config {
417 u32 device_poll_millis;
418#define VXGE_HW_MIN_DEVICE_POLL_MILLIS 1
419#define VXGE_HW_MAX_DEVICE_POLL_MILLIS 100000
420#define VXGE_HW_DEF_DEVICE_POLL_MILLIS 1000
421
422 u32 dma_blockpool_initial;
423 u32 dma_blockpool_max;
424#define VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE 0
425#define VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE 0
426#define VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE 4
427#define VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE 4096
428
429#define VXGE_HW_MAX_PAYLOAD_SIZE_512 2
430
431 u32 intr_mode:2,
432#define VXGE_HW_INTR_MODE_IRQLINE 0
433#define VXGE_HW_INTR_MODE_MSIX 1
434#define VXGE_HW_INTR_MODE_MSIX_ONE_SHOT 2
435
436#define VXGE_HW_INTR_MODE_DEF 0
437
438 rth_en:1,
439#define VXGE_HW_RTH_DISABLE 0
440#define VXGE_HW_RTH_ENABLE 1
441#define VXGE_HW_RTH_DEFAULT 0
442
443 rth_it_type:1,
444#define VXGE_HW_RTH_IT_TYPE_SOLO_IT 0
445#define VXGE_HW_RTH_IT_TYPE_MULTI_IT 1
446#define VXGE_HW_RTH_IT_TYPE_DEFAULT 0
447
448 rts_mac_en:1,
449#define VXGE_HW_RTS_MAC_DISABLE 0
450#define VXGE_HW_RTS_MAC_ENABLE 1
451#define VXGE_HW_RTS_MAC_DEFAULT 0
452
453 hwts_en:1;
454#define VXGE_HW_HWTS_DISABLE 0
455#define VXGE_HW_HWTS_ENABLE 1
456#define VXGE_HW_HWTS_DEFAULT 1
457
458 struct vxge_hw_vp_config vp_config[VXGE_HW_MAX_VIRTUAL_PATHS];
459};
460
461/**
462 * function vxge_uld_link_up_f - Link-Up callback provided by driver.
463 * @devh: HW device handle.
464 * Link-up notification callback provided by the driver.
465 * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}.
466 *
467 * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_down_f{},
468 * vxge_hw_driver_initialize().
469 */
470
471/**
472 * function vxge_uld_link_down_f - Link-Down callback provided by
473 * driver.
474 * @devh: HW device handle.
475 *
476 * Link-Down notification callback provided by the driver.
477 * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}.
478 *
479 * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_up_f{},
480 * vxge_hw_driver_initialize().
481 */
482
483/**
484 * function vxge_uld_crit_err_f - Critical Error notification callback.
485 * @devh: HW device handle.
486 * (typically - at HW device iinitialization time).
487 * @type: Enumerated hw error, e.g.: double ECC.
488 * @serr_data: Titan status.
489 * @ext_data: Extended data. The contents depends on the @type.
490 *
491 * Link-Down notification callback provided by the driver.
492 * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}.
493 *
494 * See also: struct vxge_hw_uld_cbs{}, enum vxge_hw_event{},
495 * vxge_hw_driver_initialize().
496 */
497
498/**
499 * struct vxge_hw_uld_cbs - driver "slow-path" callbacks.
500 * @link_up: See vxge_uld_link_up_f{}.
501 * @link_down: See vxge_uld_link_down_f{}.
502 * @crit_err: See vxge_uld_crit_err_f{}.
503 *
504 * Driver slow-path (per-driver) callbacks.
505 * Implemented by driver and provided to HW via
506 * vxge_hw_driver_initialize().
507 * Note that these callbacks are not mandatory: HW will not invoke
508 * a callback if NULL is specified.
509 *
510 * See also: vxge_hw_driver_initialize().
511 */
512struct vxge_hw_uld_cbs {
513 void (*link_up)(struct __vxge_hw_device *devh);
514 void (*link_down)(struct __vxge_hw_device *devh);
515 void (*crit_err)(struct __vxge_hw_device *devh,
516 enum vxge_hw_event type, u64 ext_data);
517};
518
519/*
520 * struct __vxge_hw_blockpool_entry - Block private data structure
521 * @item: List header used to link.
522 * @length: Length of the block
523 * @memblock: Virtual address block
524 * @dma_addr: DMA Address of the block.
525 * @dma_handle: DMA handle of the block.
526 * @acc_handle: DMA acc handle
527 *
528 * Block is allocated with a header to put the blocks into list.
529 *
530 */
531struct __vxge_hw_blockpool_entry {
532 struct list_head item;
533 u32 length;
534 void *memblock;
535 dma_addr_t dma_addr;
536 struct pci_dev *dma_handle;
537 struct pci_dev *acc_handle;
538};
539
540/*
541 * struct __vxge_hw_blockpool - Block Pool
542 * @hldev: HW device
543 * @block_size: size of each block.
544 * @Pool_size: Number of blocks in the pool
545 * @pool_max: Maximum number of blocks above which to free additional blocks
546 * @req_out: Number of block requests with OS out standing
547 * @free_block_list: List of free blocks
548 *
549 * Block pool contains the DMA blocks preallocated.
550 *
551 */
552struct __vxge_hw_blockpool {
553 struct __vxge_hw_device *hldev;
554 u32 block_size;
555 u32 pool_size;
556 u32 pool_max;
557 u32 req_out;
558 struct list_head free_block_list;
559 struct list_head free_entry_list;
560};
561
562/*
563 * enum enum __vxge_hw_channel_type - Enumerated channel types.
564 * @VXGE_HW_CHANNEL_TYPE_UNKNOWN: Unknown channel.
565 * @VXGE_HW_CHANNEL_TYPE_FIFO: fifo.
566 * @VXGE_HW_CHANNEL_TYPE_RING: ring.
567 * @VXGE_HW_CHANNEL_TYPE_MAX: Maximum number of HW-supported
568 * (and recognized) channel types. Currently: 2.
569 *
570 * Enumerated channel types. Currently there are only two link-layer
571 * channels - Titan fifo and Titan ring. In the future the list will grow.
572 */
573enum __vxge_hw_channel_type {
574 VXGE_HW_CHANNEL_TYPE_UNKNOWN = 0,
575 VXGE_HW_CHANNEL_TYPE_FIFO = 1,
576 VXGE_HW_CHANNEL_TYPE_RING = 2,
577 VXGE_HW_CHANNEL_TYPE_MAX = 3
578};
579
580/*
581 * struct __vxge_hw_channel
582 * @item: List item; used to maintain a list of open channels.
583 * @type: Channel type. See enum vxge_hw_channel_type{}.
584 * @devh: Device handle. HW device object that contains _this_ channel.
585 * @vph: Virtual path handle. Virtual Path Object that contains _this_ channel.
586 * @length: Channel length. Currently allocated number of descriptors.
587 * The channel length "grows" when more descriptors get allocated.
588 * See _hw_mempool_grow.
589 * @reserve_arr: Reserve array. Contains descriptors that can be reserved
590 * by driver for the subsequent send or receive operation.
591 * See vxge_hw_fifo_txdl_reserve(),
592 * vxge_hw_ring_rxd_reserve().
593 * @reserve_ptr: Current pointer in the resrve array
594 * @reserve_top: Reserve top gives the maximum number of dtrs available in
595 * reserve array.
596 * @work_arr: Work array. Contains descriptors posted to the channel.
597 * Note that at any point in time @work_arr contains 3 types of
598 * descriptors:
599 * 1) posted but not yet consumed by Titan device;
600 * 2) consumed but not yet completed;
601 * 3) completed but not yet freed
602 * (via vxge_hw_fifo_txdl_free() or vxge_hw_ring_rxd_free())
603 * @post_index: Post index. At any point in time points on the
604 * position in the channel, which'll contain next to-be-posted
605 * descriptor.
606 * @compl_index: Completion index. At any point in time points on the
607 * position in the channel, which will contain next
608 * to-be-completed descriptor.
609 * @free_arr: Free array. Contains completed descriptors that were freed
610 * (i.e., handed over back to HW) by driver.
611 * See vxge_hw_fifo_txdl_free(), vxge_hw_ring_rxd_free().
612 * @free_ptr: current pointer in free array
613 * @per_dtr_space: Per-descriptor space (in bytes) that channel user can utilize
614 * to store per-operation control information.
615 * @stats: Pointer to common statistics
616 * @userdata: Per-channel opaque (void*) user-defined context, which may be
617 * driver object, ULP connection, etc.
618 * Once channel is open, @userdata is passed back to user via
619 * vxge_hw_channel_callback_f.
620 *
621 * HW channel object.
622 *
623 * See also: enum vxge_hw_channel_type{}, enum vxge_hw_channel_flag
624 */
625struct __vxge_hw_channel {
626 struct list_head item;
627 enum __vxge_hw_channel_type type;
628 struct __vxge_hw_device *devh;
629 struct __vxge_hw_vpath_handle *vph;
630 u32 length;
631 u32 vp_id;
632 void **reserve_arr;
633 u32 reserve_ptr;
634 u32 reserve_top;
635 void **work_arr;
636 u32 post_index ____cacheline_aligned;
637 u32 compl_index ____cacheline_aligned;
638 void **free_arr;
639 u32 free_ptr;
640 void **orig_arr;
641 u32 per_dtr_space;
642 void *userdata;
643 struct vxge_hw_common_reg __iomem *common_reg;
644 u32 first_vp_id;
645 struct vxge_hw_vpath_stats_sw_common_info *stats;
646
647} ____cacheline_aligned;
648
649/*
650 * struct __vxge_hw_virtualpath - Virtual Path
651 *
652 * @vp_id: Virtual path id
653 * @vp_open: This flag specifies if vxge_hw_vp_open is called from LL Driver
654 * @hldev: Hal device
655 * @vp_config: Virtual Path Config
656 * @vp_reg: VPATH Register map address in BAR0
657 * @vpmgmt_reg: VPATH_MGMT register map address
658 * @max_mtu: Max mtu that can be supported
659 * @vsport_number: vsport attached to this vpath
660 * @max_kdfc_db: Maximum kernel mode doorbells
661 * @max_nofl_db: Maximum non offload doorbells
662 * @tx_intr_num: Interrupt Number associated with the TX
663
664 * @ringh: Ring Queue
665 * @fifoh: FIFO Queue
666 * @vpath_handles: Virtual Path handles list
667 * @stats_block: Memory for DMAing stats
668 * @stats: Vpath statistics
669 *
670 * Virtual path structure to encapsulate the data related to a virtual path.
671 * Virtual paths are allocated by the HW upon getting configuration from the
672 * driver and inserted into the list of virtual paths.
673 */
674struct __vxge_hw_virtualpath {
675 u32 vp_id;
676
677 u32 vp_open;
678#define VXGE_HW_VP_NOT_OPEN 0
679#define VXGE_HW_VP_OPEN 1
680
681 struct __vxge_hw_device *hldev;
682 struct vxge_hw_vp_config *vp_config;
683 struct vxge_hw_vpath_reg __iomem *vp_reg;
684 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
685 struct __vxge_hw_non_offload_db_wrapper __iomem *nofl_db;
686
687 u32 max_mtu;
688 u32 vsport_number;
689 u32 max_kdfc_db;
690 u32 max_nofl_db;
691 u64 tim_tti_cfg1_saved;
692 u64 tim_tti_cfg3_saved;
693 u64 tim_rti_cfg1_saved;
694 u64 tim_rti_cfg3_saved;
695
696 struct __vxge_hw_ring *____cacheline_aligned ringh;
697 struct __vxge_hw_fifo *____cacheline_aligned fifoh;
698 struct list_head vpath_handles;
699 struct __vxge_hw_blockpool_entry *stats_block;
700 struct vxge_hw_vpath_stats_hw_info *hw_stats;
701 struct vxge_hw_vpath_stats_hw_info *hw_stats_sav;
702 struct vxge_hw_vpath_stats_sw_info *sw_stats;
703 spinlock_t lock;
704};
705
706/*
707 * struct __vxge_hw_vpath_handle - List item to store callback information
708 * @item: List head to keep the item in linked list
709 * @vpath: Virtual path to which this item belongs
710 *
711 * This structure is used to store the callback information.
712 */
713struct __vxge_hw_vpath_handle {
714 struct list_head item;
715 struct __vxge_hw_virtualpath *vpath;
716};
717
718/*
719 * struct __vxge_hw_device
720 *
721 * HW device object.
722 */
723/**
724 * struct __vxge_hw_device - Hal device object
725 * @magic: Magic Number
726 * @bar0: BAR0 virtual address.
727 * @pdev: Physical device handle
728 * @config: Confguration passed by the LL driver at initialization
729 * @link_state: Link state
730 *
731 * HW device object. Represents Titan adapter
732 */
733struct __vxge_hw_device {
734 u32 magic;
735#define VXGE_HW_DEVICE_MAGIC 0x12345678
736#define VXGE_HW_DEVICE_DEAD 0xDEADDEAD
737 void __iomem *bar0;
738 struct pci_dev *pdev;
739 struct net_device *ndev;
740 struct vxge_hw_device_config config;
741 enum vxge_hw_device_link_state link_state;
742
743 struct vxge_hw_uld_cbs uld_callbacks;
744
745 u32 host_type;
746 u32 func_id;
747 u32 access_rights;
748#define VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH 0x1
749#define VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM 0x2
750#define VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM 0x4
751 struct vxge_hw_legacy_reg __iomem *legacy_reg;
752 struct vxge_hw_toc_reg __iomem *toc_reg;
753 struct vxge_hw_common_reg __iomem *common_reg;
754 struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
755 struct vxge_hw_srpcim_reg __iomem *srpcim_reg \
756 [VXGE_HW_TITAN_SRPCIM_REG_SPACES];
757 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg \
758 [VXGE_HW_TITAN_VPMGMT_REG_SPACES];
759 struct vxge_hw_vpath_reg __iomem *vpath_reg \
760 [VXGE_HW_TITAN_VPATH_REG_SPACES];
761 u8 __iomem *kdfc;
762 u8 __iomem *usdc;
763 struct __vxge_hw_virtualpath virtual_paths \
764 [VXGE_HW_MAX_VIRTUAL_PATHS];
765 u64 vpath_assignments;
766 u64 vpaths_deployed;
767 u32 first_vp_id;
768 u64 tim_int_mask0[4];
769 u32 tim_int_mask1[4];
770
771 struct __vxge_hw_blockpool block_pool;
772 struct vxge_hw_device_stats stats;
773 u32 debug_module_mask;
774 u32 debug_level;
775 u32 level_err;
776 u32 level_trace;
777 u16 eprom_versions[VXGE_HW_MAX_ROM_IMAGES];
778};
779
780#define VXGE_HW_INFO_LEN 64
781/**
782 * struct vxge_hw_device_hw_info - Device information
783 * @host_type: Host Type
784 * @func_id: Function Id
785 * @vpath_mask: vpath bit mask
786 * @fw_version: Firmware version
787 * @fw_date: Firmware Date
788 * @flash_version: Firmware version
789 * @flash_date: Firmware Date
790 * @mac_addrs: Mac addresses for each vpath
791 * @mac_addr_masks: Mac address masks for each vpath
792 *
793 * Returns the vpath mask that has the bits set for each vpath allocated
794 * for the driver and the first mac address for each vpath
795 */
796struct vxge_hw_device_hw_info {
797 u32 host_type;
798#define VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION 0
799#define VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION 1
800#define VXGE_HW_NO_MR_SR_VH0_FUNCTION0 2
801#define VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION 3
802#define VXGE_HW_MR_SR_VH0_INVALID_CONFIG 4
803#define VXGE_HW_SR_VH_FUNCTION0 5
804#define VXGE_HW_SR_VH_VIRTUAL_FUNCTION 6
805#define VXGE_HW_VH_NORMAL_FUNCTION 7
806 u64 function_mode;
807#define VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION 0
808#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION 1
809#define VXGE_HW_FUNCTION_MODE_SRIOV 2
810#define VXGE_HW_FUNCTION_MODE_MRIOV 3
811#define VXGE_HW_FUNCTION_MODE_MRIOV_8 4
812#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17 5
813#define VXGE_HW_FUNCTION_MODE_SRIOV_8 6
814#define VXGE_HW_FUNCTION_MODE_SRIOV_4 7
815#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2 8
816#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_4 9
817#define VXGE_HW_FUNCTION_MODE_MRIOV_4 10
818
819 u32 func_id;
820 u64 vpath_mask;
821 struct vxge_hw_device_version fw_version;
822 struct vxge_hw_device_date fw_date;
823 struct vxge_hw_device_version flash_version;
824 struct vxge_hw_device_date flash_date;
825 u8 serial_number[VXGE_HW_INFO_LEN];
826 u8 part_number[VXGE_HW_INFO_LEN];
827 u8 product_desc[VXGE_HW_INFO_LEN];
828 u8 mac_addrs[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
829 u8 mac_addr_masks[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
830};
831
832/**
833 * struct vxge_hw_device_attr - Device memory spaces.
834 * @bar0: BAR0 virtual address.
835 * @pdev: PCI device object.
836 *
837 * Device memory spaces. Includes configuration, BAR0 etc. per device
838 * mapped memories. Also, includes a pointer to OS-specific PCI device object.
839 */
840struct vxge_hw_device_attr {
841 void __iomem *bar0;
842 struct pci_dev *pdev;
843 struct vxge_hw_uld_cbs uld_callbacks;
844};
845
846#define VXGE_HW_DEVICE_LINK_STATE_SET(hldev, ls) (hldev->link_state = ls)
847
848#define VXGE_HW_DEVICE_TIM_INT_MASK_SET(m0, m1, i) { \
849 if (i < 16) { \
850 m0[0] |= vxge_vBIT(0x8, (i*4), 4); \
851 m0[1] |= vxge_vBIT(0x4, (i*4), 4); \
852 } \
853 else { \
854 m1[0] = 0x80000000; \
855 m1[1] = 0x40000000; \
856 } \
857}
858
859#define VXGE_HW_DEVICE_TIM_INT_MASK_RESET(m0, m1, i) { \
860 if (i < 16) { \
861 m0[0] &= ~vxge_vBIT(0x8, (i*4), 4); \
862 m0[1] &= ~vxge_vBIT(0x4, (i*4), 4); \
863 } \
864 else { \
865 m1[0] = 0; \
866 m1[1] = 0; \
867 } \
868}
869
870#define VXGE_HW_DEVICE_STATS_PIO_READ(loc, offset) { \
871 status = vxge_hw_mrpcim_stats_access(hldev, \
872 VXGE_HW_STATS_OP_READ, \
873 loc, \
874 offset, \
875 &val64); \
876 if (status != VXGE_HW_OK) \
877 return status; \
878}
879
880/*
881 * struct __vxge_hw_ring - Ring channel.
882 * @channel: Channel "base" of this ring, the common part of all HW
883 * channels.
884 * @mempool: Memory pool, the pool from which descriptors get allocated.
885 * (See vxge_hw_mm.h).
886 * @config: Ring configuration, part of device configuration
887 * (see struct vxge_hw_device_config{}).
888 * @ring_length: Length of the ring
889 * @buffer_mode: 1, 3, or 5. The value specifies a receive buffer mode,
890 * as per Titan User Guide.
891 * @rxd_size: RxD sizes for 1-, 3- or 5- buffer modes. As per Titan spec,
892 * 1-buffer mode descriptor is 32 byte long, etc.
893 * @rxd_priv_size: Per RxD size reserved (by HW) for driver to keep
894 * per-descriptor data (e.g., DMA handle for Solaris)
895 * @per_rxd_space: Per rxd space requested by driver
896 * @rxds_per_block: Number of descriptors per hardware-defined RxD
897 * block. Depends on the (1-, 3-, 5-) buffer mode.
898 * @rxdblock_priv_size: Reserved at the end of each RxD block. HW internal
899 * usage. Not to confuse with @rxd_priv_size.
900 * @cmpl_cnt: Completion counter. Is reset to zero upon entering the ISR.
901 * @callback: Channel completion callback. HW invokes the callback when there
902 * are new completions on that channel. In many implementations
903 * the @callback executes in the hw interrupt context.
904 * @rxd_init: Channel's descriptor-initialize callback.
905 * See vxge_hw_ring_rxd_init_f{}.
906 * If not NULL, HW invokes the callback when opening
907 * the ring.
908 * @rxd_term: Channel's descriptor-terminate callback. If not NULL,
909 * HW invokes the callback when closing the corresponding channel.
910 * See also vxge_hw_channel_rxd_term_f{}.
911 * @stats: Statistics for ring
912 * Ring channel.
913 *
914 * Note: The structure is cache line aligned to better utilize
915 * CPU cache performance.
916 */
917struct __vxge_hw_ring {
918 struct __vxge_hw_channel channel;
919 struct vxge_hw_mempool *mempool;
920 struct vxge_hw_vpath_reg __iomem *vp_reg;
921 struct vxge_hw_common_reg __iomem *common_reg;
922 u32 ring_length;
923 u32 buffer_mode;
924 u32 rxd_size;
925 u32 rxd_priv_size;
926 u32 per_rxd_space;
927 u32 rxds_per_block;
928 u32 rxdblock_priv_size;
929 u32 cmpl_cnt;
930 u32 vp_id;
931 u32 doorbell_cnt;
932 u32 total_db_cnt;
933 u64 rxds_limit;
934 u32 rtimer;
935 u64 tim_rti_cfg1_saved;
936 u64 tim_rti_cfg3_saved;
937
938 enum vxge_hw_status (*callback)(
939 struct __vxge_hw_ring *ringh,
940 void *rxdh,
941 u8 t_code,
942 void *userdata);
943
944 enum vxge_hw_status (*rxd_init)(
945 void *rxdh,
946 void *userdata);
947
948 void (*rxd_term)(
949 void *rxdh,
950 enum vxge_hw_rxd_state state,
951 void *userdata);
952
953 struct vxge_hw_vpath_stats_sw_ring_info *stats ____cacheline_aligned;
954 struct vxge_hw_ring_config *config;
955} ____cacheline_aligned;
956
957/**
958 * enum enum vxge_hw_txdl_state - Descriptor (TXDL) state.
959 * @VXGE_HW_TXDL_STATE_NONE: Invalid state.
960 * @VXGE_HW_TXDL_STATE_AVAIL: Descriptor is available for reservation.
961 * @VXGE_HW_TXDL_STATE_POSTED: Descriptor is posted for processing by the
962 * device.
963 * @VXGE_HW_TXDL_STATE_FREED: Descriptor is free and can be reused for
964 * filling-in and posting later.
965 *
966 * Titan/HW descriptor states.
967 *
968 */
969enum vxge_hw_txdl_state {
970 VXGE_HW_TXDL_STATE_NONE = 0,
971 VXGE_HW_TXDL_STATE_AVAIL = 1,
972 VXGE_HW_TXDL_STATE_POSTED = 2,
973 VXGE_HW_TXDL_STATE_FREED = 3
974};
975/*
976 * struct __vxge_hw_fifo - Fifo.
977 * @channel: Channel "base" of this fifo, the common part of all HW
978 * channels.
979 * @mempool: Memory pool, from which descriptors get allocated.
980 * @config: Fifo configuration, part of device configuration
981 * (see struct vxge_hw_device_config{}).
982 * @interrupt_type: Interrupt type to be used
983 * @no_snoop_bits: See struct vxge_hw_fifo_config{}.
984 * @txdl_per_memblock: Number of TxDLs (TxD lists) per memblock.
985 * on TxDL please refer to Titan UG.
986 * @txdl_size: Configured TxDL size (i.e., number of TxDs in a list), plus
987 * per-TxDL HW private space (struct __vxge_hw_fifo_txdl_priv).
988 * @priv_size: Per-Tx descriptor space reserved for driver
989 * usage.
990 * @per_txdl_space: Per txdl private space for the driver
991 * @callback: Fifo completion callback. HW invokes the callback when there
992 * are new completions on that fifo. In many implementations
993 * the @callback executes in the hw interrupt context.
994 * @txdl_term: Fifo's descriptor-terminate callback. If not NULL,
995 * HW invokes the callback when closing the corresponding fifo.
996 * See also vxge_hw_fifo_txdl_term_f{}.
997 * @stats: Statistics of this fifo
998 *
999 * Fifo channel.
1000 * Note: The structure is cache line aligned.
1001 */
1002struct __vxge_hw_fifo {
1003 struct __vxge_hw_channel channel;
1004 struct vxge_hw_mempool *mempool;
1005 struct vxge_hw_fifo_config *config;
1006 struct vxge_hw_vpath_reg __iomem *vp_reg;
1007 struct __vxge_hw_non_offload_db_wrapper __iomem *nofl_db;
1008 u64 interrupt_type;
1009 u32 no_snoop_bits;
1010 u32 txdl_per_memblock;
1011 u32 txdl_size;
1012 u32 priv_size;
1013 u32 per_txdl_space;
1014 u32 vp_id;
1015 u32 tx_intr_num;
1016 u32 rtimer;
1017 u64 tim_tti_cfg1_saved;
1018 u64 tim_tti_cfg3_saved;
1019
1020 enum vxge_hw_status (*callback)(
1021 struct __vxge_hw_fifo *fifo_handle,
1022 void *txdlh,
1023 enum vxge_hw_fifo_tcode t_code,
1024 void *userdata,
1025 struct sk_buff ***skb_ptr,
1026 int nr_skb,
1027 int *more);
1028
1029 void (*txdl_term)(
1030 void *txdlh,
1031 enum vxge_hw_txdl_state state,
1032 void *userdata);
1033
1034 struct vxge_hw_vpath_stats_sw_fifo_info *stats ____cacheline_aligned;
1035} ____cacheline_aligned;
1036
1037/*
1038 * struct __vxge_hw_fifo_txdl_priv - Transmit descriptor HW-private data.
1039 * @dma_addr: DMA (mapped) address of _this_ descriptor.
1040 * @dma_handle: DMA handle used to map the descriptor onto device.
1041 * @dma_offset: Descriptor's offset in the memory block. HW allocates
1042 * descriptors in memory blocks (see struct vxge_hw_fifo_config{})
1043 * Each memblock is a contiguous block of DMA-able memory.
1044 * @frags: Total number of fragments (that is, contiguous data buffers)
1045 * carried by this TxDL.
1046 * @align_vaddr_start: Aligned virtual address start
1047 * @align_vaddr: Virtual address of the per-TxDL area in memory used for
1048 * alignement. Used to place one or more mis-aligned fragments
1049 * @align_dma_addr: DMA address translated from the @align_vaddr.
1050 * @align_dma_handle: DMA handle that corresponds to @align_dma_addr.
1051 * @align_dma_acch: DMA access handle corresponds to @align_dma_addr.
1052 * @align_dma_offset: The current offset into the @align_vaddr area.
1053 * Grows while filling the descriptor, gets reset.
1054 * @align_used_frags: Number of fragments used.
1055 * @alloc_frags: Total number of fragments allocated.
1056 * @unused: TODO
1057 * @next_txdl_priv: (TODO).
1058 * @first_txdp: (TODO).
1059 * @linked_txdl_priv: Pointer to any linked TxDL for creating contiguous
1060 * TxDL list.
1061 * @txdlh: Corresponding txdlh to this TxDL.
1062 * @memblock: Pointer to the TxDL memory block or memory page.
1063 * on the next send operation.
1064 * @dma_object: DMA address and handle of the memory block that contains
1065 * the descriptor. This member is used only in the "checked"
1066 * version of the HW (to enforce certain assertions);
1067 * otherwise it gets compiled out.
1068 * @allocated: True if the descriptor is reserved, 0 otherwise. Internal usage.
1069 *
1070 * Per-transmit decsriptor HW-private data. HW uses the space to keep DMA
1071 * information associated with the descriptor. Note that driver can ask HW
1072 * to allocate additional per-descriptor space for its own (driver-specific)
1073 * purposes.
1074 *
1075 * See also: struct vxge_hw_ring_rxd_priv{}.
1076 */
1077struct __vxge_hw_fifo_txdl_priv {
1078 dma_addr_t dma_addr;
1079 struct pci_dev *dma_handle;
1080 ptrdiff_t dma_offset;
1081 u32 frags;
1082 u8 *align_vaddr_start;
1083 u8 *align_vaddr;
1084 dma_addr_t align_dma_addr;
1085 struct pci_dev *align_dma_handle;
1086 struct pci_dev *align_dma_acch;
1087 ptrdiff_t align_dma_offset;
1088 u32 align_used_frags;
1089 u32 alloc_frags;
1090 u32 unused;
1091 struct __vxge_hw_fifo_txdl_priv *next_txdl_priv;
1092 struct vxge_hw_fifo_txd *first_txdp;
1093 void *memblock;
1094};
1095
1096/*
1097 * struct __vxge_hw_non_offload_db_wrapper - Non-offload Doorbell Wrapper
1098 * @control_0: Bits 0 to 7 - Doorbell type.
1099 * Bits 8 to 31 - Reserved.
1100 * Bits 32 to 39 - The highest TxD in this TxDL.
1101 * Bits 40 to 47 - Reserved.
1102 * Bits 48 to 55 - Reserved.
1103 * Bits 56 to 63 - No snoop flags.
1104 * @txdl_ptr: The starting location of the TxDL in host memory.
1105 *
1106 * Created by the host and written to the adapter via PIO to a Kernel Doorbell
1107 * FIFO. All non-offload doorbell wrapper fields must be written by the host as
1108 * part of a doorbell write. Consumed by the adapter but is not written by the
1109 * adapter.
1110 */
1111struct __vxge_hw_non_offload_db_wrapper {
1112 u64 control_0;
1113#define VXGE_HW_NODBW_GET_TYPE(ctrl0) vxge_bVALn(ctrl0, 0, 8)
1114#define VXGE_HW_NODBW_TYPE(val) vxge_vBIT(val, 0, 8)
1115#define VXGE_HW_NODBW_TYPE_NODBW 0
1116
1117#define VXGE_HW_NODBW_GET_LAST_TXD_NUMBER(ctrl0) vxge_bVALn(ctrl0, 32, 8)
1118#define VXGE_HW_NODBW_LAST_TXD_NUMBER(val) vxge_vBIT(val, 32, 8)
1119
1120#define VXGE_HW_NODBW_GET_NO_SNOOP(ctrl0) vxge_bVALn(ctrl0, 56, 8)
1121#define VXGE_HW_NODBW_LIST_NO_SNOOP(val) vxge_vBIT(val, 56, 8)
1122#define VXGE_HW_NODBW_LIST_NO_SNOOP_TXD_READ_TXD0_WRITE 0x2
1123#define VXGE_HW_NODBW_LIST_NO_SNOOP_TX_FRAME_DATA_READ 0x1
1124
1125 u64 txdl_ptr;
1126};
1127
1128/*
1129 * TX Descriptor
1130 */
1131
1132/**
1133 * struct vxge_hw_fifo_txd - Transmit Descriptor
1134 * @control_0: Bits 0 to 6 - Reserved.
1135 * Bit 7 - List Ownership. This field should be initialized
1136 * to '1' by the driver before the transmit list pointer is
1137 * written to the adapter. This field will be set to '0' by the
1138 * adapter once it has completed transmitting the frame or frames in
1139 * the list. Note - This field is only valid in TxD0. Additionally,
1140 * for multi-list sequences, the driver should not release any
1141 * buffers until the ownership of the last list in the multi-list
1142 * sequence has been returned to the host.
1143 * Bits 8 to 11 - Reserved
1144 * Bits 12 to 15 - Transfer_Code. This field is only valid in
1145 * TxD0. It is used to describe the status of the transmit data
1146 * buffer transfer. This field is always overwritten by the
1147 * adapter, so this field may be initialized to any value.
1148 * Bits 16 to 17 - Host steering. This field allows the host to
1149 * override the selection of the physical transmit port.
1150 * Attention:
1151 * Normal sounds as if learned from the switch rather than from
1152 * the aggregation algorythms.
1153 * 00: Normal. Use Destination/MAC Address
1154 * lookup to determine the transmit port.
1155 * 01: Send on physical Port1.
1156 * 10: Send on physical Port0.
1157 * 11: Send on both ports.
1158 * Bits 18 to 21 - Reserved
1159 * Bits 22 to 23 - Gather_Code. This field is set by the host and
1160 * is used to describe how individual buffers comprise a frame.
1161 * 10: First descriptor of a frame.
1162 * 00: Middle of a multi-descriptor frame.
1163 * 01: Last descriptor of a frame.
1164 * 11: First and last descriptor of a frame (the entire frame
1165 * resides in a single buffer).
1166 * For multi-descriptor frames, the only valid gather code sequence
1167 * is {10, [00], 01}. In other words, the descriptors must be placed
1168 * in the list in the correct order.
1169 * Bits 24 to 27 - Reserved
1170 * Bits 28 to 29 - LSO_Frm_Encap. LSO Frame Encapsulation
1171 * definition. Only valid in TxD0. This field allows the host to
1172 * indicate the Ethernet encapsulation of an outbound LSO packet.
1173 * 00 - classic mode (best guess)
1174 * 01 - LLC
1175 * 10 - SNAP
1176 * 11 - DIX
1177 * If "classic mode" is selected, the adapter will attempt to
1178 * decode the frame's Ethernet encapsulation by examining the L/T
1179 * field as follows:
1180 * <= 0x05DC LLC/SNAP encoding; must examine DSAP/SSAP to determine
1181 * if packet is IPv4 or IPv6.
1182 * 0x8870 Jumbo-SNAP encoding.
1183 * 0x0800 IPv4 DIX encoding
1184 * 0x86DD IPv6 DIX encoding
1185 * others illegal encapsulation
1186 * Bits 30 - LSO_ Flag. Large Send Offload (LSO) flag.
1187 * Set to 1 to perform segmentation offload for TCP/UDP.
1188 * This field is valid only in TxD0.
1189 * Bits 31 to 33 - Reserved.
1190 * Bits 34 to 47 - LSO_MSS. TCP/UDP LSO Maximum Segment Size
1191 * This field is meaningful only when LSO_Control is non-zero.
1192 * When LSO_Control is set to TCP_LSO, the single (possibly large)
1193 * TCP segment described by this TxDL will be sent as a series of
1194 * TCP segments each of which contains no more than LSO_MSS
1195 * payload bytes.
1196 * When LSO_Control is set to UDP_LSO, the single (possibly large)
1197 * UDP datagram described by this TxDL will be sent as a series of
1198 * UDP datagrams each of which contains no more than LSO_MSS
1199 * payload bytes.
1200 * All outgoing frames from this TxDL will have LSO_MSS bytes of UDP
1201 * or TCP payload, with the exception of the last, which will have
1202 * <= LSO_MSS bytes of payload.
1203 * Bits 48 to 63 - Buffer_Size. Number of valid bytes in the
1204 * buffer to be read by the adapter. This field is written by the
1205 * host. A value of 0 is illegal.
1206 * Bits 32 to 63 - This value is written by the adapter upon
1207 * completion of a UDP or TCP LSO operation and indicates the number
1208 * of UDP or TCP payload bytes that were transmitted. 0x0000 will be
1209 * returned for any non-LSO operation.
1210 * @control_1: Bits 0 to 4 - Reserved.
1211 * Bit 5 - Tx_CKO_IPv4 Set to a '1' to enable IPv4 header checksum
1212 * offload. This field is only valid in the first TxD of a frame.
1213 * Bit 6 - Tx_CKO_TCP Set to a '1' to enable TCP checksum offload.
1214 * This field is only valid in the first TxD of a frame (the TxD's
1215 * gather code must be 10 or 11). The driver should only set this
1216 * bit if it can guarantee that TCP is present.
1217 * Bit 7 - Tx_CKO_UDP Set to a '1' to enable UDP checksum offload.
1218 * This field is only valid in the first TxD of a frame (the TxD's
1219 * gather code must be 10 or 11). The driver should only set this
1220 * bit if it can guarantee that UDP is present.
1221 * Bits 8 to 14 - Reserved.
1222 * Bit 15 - Tx_VLAN_Enable VLAN tag insertion flag. Set to a '1' to
1223 * instruct the adapter to insert the VLAN tag specified by the
1224 * Tx_VLAN_Tag field. This field is only valid in the first TxD of
1225 * a frame.
1226 * Bits 16 to 31 - Tx_VLAN_Tag. Variable portion of the VLAN tag
1227 * to be inserted into the frame by the adapter (the first two bytes
1228 * of a VLAN tag are always 0x8100). This field is only valid if the
1229 * Tx_VLAN_Enable field is set to '1'.
1230 * Bits 32 to 33 - Reserved.
1231 * Bits 34 to 39 - Tx_Int_Number. Indicates which Tx interrupt
1232 * number the frame associated with. This field is written by the
1233 * host. It is only valid in the first TxD of a frame.
1234 * Bits 40 to 42 - Reserved.
1235 * Bit 43 - Set to 1 to exclude the frame from bandwidth metering
1236 * functions. This field is valid only in the first TxD
1237 * of a frame.
1238 * Bits 44 to 45 - Reserved.
1239 * Bit 46 - Tx_Int_Per_List Set to a '1' to instruct the adapter to
1240 * generate an interrupt as soon as all of the frames in the list
1241 * have been transmitted. In order to have per-frame interrupts,
1242 * the driver should place a maximum of one frame per list. This
1243 * field is only valid in the first TxD of a frame.
1244 * Bit 47 - Tx_Int_Utilization Set to a '1' to instruct the adapter
1245 * to count the frame toward the utilization interrupt specified in
1246 * the Tx_Int_Number field. This field is only valid in the first
1247 * TxD of a frame.
1248 * Bits 48 to 63 - Reserved.
1249 * @buffer_pointer: Buffer start address.
1250 * @host_control: Host_Control.Opaque 64bit data stored by driver inside the
1251 * Titan descriptor prior to posting the latter on the fifo
1252 * via vxge_hw_fifo_txdl_post().The %host_control is returned as is
1253 * to the driver with each completed descriptor.
1254 *
1255 * Transmit descriptor (TxD).Fifo descriptor contains configured number
1256 * (list) of TxDs. * For more details please refer to Titan User Guide,
1257 * Section 5.4.2 "Transmit Descriptor (TxD) Format".
1258 */
1259struct vxge_hw_fifo_txd {
1260 u64 control_0;
1261#define VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER vxge_mBIT(7)
1262
1263#define VXGE_HW_FIFO_TXD_T_CODE_GET(ctrl0) vxge_bVALn(ctrl0, 12, 4)
1264#define VXGE_HW_FIFO_TXD_T_CODE(val) vxge_vBIT(val, 12, 4)
1265#define VXGE_HW_FIFO_TXD_T_CODE_UNUSED VXGE_HW_FIFO_T_CODE_UNUSED
1266
1267
1268#define VXGE_HW_FIFO_TXD_GATHER_CODE(val) vxge_vBIT(val, 22, 2)
1269#define VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST VXGE_HW_FIFO_GATHER_CODE_FIRST
1270#define VXGE_HW_FIFO_TXD_GATHER_CODE_LAST VXGE_HW_FIFO_GATHER_CODE_LAST
1271
1272
1273#define VXGE_HW_FIFO_TXD_LSO_EN vxge_mBIT(30)
1274
1275#define VXGE_HW_FIFO_TXD_LSO_MSS(val) vxge_vBIT(val, 34, 14)
1276
1277#define VXGE_HW_FIFO_TXD_BUFFER_SIZE(val) vxge_vBIT(val, 48, 16)
1278
1279 u64 control_1;
1280#define VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN vxge_mBIT(5)
1281#define VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN vxge_mBIT(6)
1282#define VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN vxge_mBIT(7)
1283#define VXGE_HW_FIFO_TXD_VLAN_ENABLE vxge_mBIT(15)
1284
1285#define VXGE_HW_FIFO_TXD_VLAN_TAG(val) vxge_vBIT(val, 16, 16)
1286
1287#define VXGE_HW_FIFO_TXD_INT_NUMBER(val) vxge_vBIT(val, 34, 6)
1288
1289#define VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST vxge_mBIT(46)
1290#define VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ vxge_mBIT(47)
1291
1292 u64 buffer_pointer;
1293
1294 u64 host_control;
1295};
1296
1297/**
1298 * struct vxge_hw_ring_rxd_1 - One buffer mode RxD for ring
1299 * @host_control: This field is exclusively for host use and is "readonly"
1300 * from the adapter's perspective.
1301 * @control_0:Bits 0 to 6 - RTH_Bucket get
1302 * Bit 7 - Own Descriptor ownership bit. This bit is set to 1
1303 * by the host, and is set to 0 by the adapter.
1304 * 0 - Host owns RxD and buffer.
1305 * 1 - The adapter owns RxD and buffer.
1306 * Bit 8 - Fast_Path_Eligible When set, indicates that the
1307 * received frame meets all of the criteria for fast path processing.
1308 * The required criteria are as follows:
1309 * !SYN &
1310 * (Transfer_Code == "Transfer OK") &
1311 * (!Is_IP_Fragment) &
1312 * ((Is_IPv4 & computed_L3_checksum == 0xFFFF) |
1313 * (Is_IPv6)) &
1314 * ((Is_TCP & computed_L4_checksum == 0xFFFF) |
1315 * (Is_UDP & (computed_L4_checksum == 0xFFFF |
1316 * computed _L4_checksum == 0x0000)))
1317 * (same meaning for all RxD buffer modes)
1318 * Bit 9 - L3 Checksum Correct
1319 * Bit 10 - L4 Checksum Correct
1320 * Bit 11 - Reserved
1321 * Bit 12 to 15 - This field is written by the adapter. It is
1322 * used to report the status of the frame transfer to the host.
1323 * 0x0 - Transfer OK
1324 * 0x4 - RDA Failure During Transfer
1325 * 0x5 - Unparseable Packet, such as unknown IPv6 header.
1326 * 0x6 - Frame integrity error (FCS or ECC).
1327 * 0x7 - Buffer Size Error. The provided buffer(s) were not
1328 * appropriately sized and data loss occurred.
1329 * 0x8 - Internal ECC Error. RxD corrupted.
1330 * 0x9 - IPv4 Checksum error
1331 * 0xA - TCP/UDP Checksum error
1332 * 0xF - Unknown Error or Multiple Error. Indicates an
1333 * unknown problem or that more than one of transfer codes is set.
1334 * Bit 16 - SYN The adapter sets this field to indicate that
1335 * the incoming frame contained a TCP segment with its SYN bit
1336 * set and its ACK bit NOT set. (same meaning for all RxD buffer
1337 * modes)
1338 * Bit 17 - Is ICMP
1339 * Bit 18 - RTH_SPDM_HIT Set to 1 if there was a match in the
1340 * Socket Pair Direct Match Table and the frame was steered based
1341 * on SPDM.
1342 * Bit 19 - RTH_IT_HIT Set to 1 if there was a match in the
1343 * Indirection Table and the frame was steered based on hash
1344 * indirection.
1345 * Bit 20 to 23 - RTH_HASH_TYPE Indicates the function (hash
1346 * type) that was used to calculate the hash.
1347 * Bit 19 - IS_VLAN Set to '1' if the frame was/is VLAN
1348 * tagged.
1349 * Bit 25 to 26 - ETHER_ENCAP Reflects the Ethernet encapsulation
1350 * of the received frame.
1351 * 0x0 - Ethernet DIX
1352 * 0x1 - LLC
1353 * 0x2 - SNAP (includes Jumbo-SNAP)
1354 * 0x3 - IPX
1355 * Bit 27 - IS_IPV4 Set to '1' if the frame contains an IPv4 packet.
1356 * Bit 28 - IS_IPV6 Set to '1' if the frame contains an IPv6 packet.
1357 * Bit 29 - IS_IP_FRAG Set to '1' if the frame contains a fragmented
1358 * IP packet.
1359 * Bit 30 - IS_TCP Set to '1' if the frame contains a TCP segment.
1360 * Bit 31 - IS_UDP Set to '1' if the frame contains a UDP message.
1361 * Bit 32 to 47 - L3_Checksum[0:15] The IPv4 checksum value that
1362 * arrived with the frame. If the resulting computed IPv4 header
1363 * checksum for the frame did not produce the expected 0xFFFF value,
1364 * then the transfer code would be set to 0x9.
1365 * Bit 48 to 63 - L4_Checksum[0:15] The TCP/UDP checksum value that
1366 * arrived with the frame. If the resulting computed TCP/UDP checksum
1367 * for the frame did not produce the expected 0xFFFF value, then the
1368 * transfer code would be set to 0xA.
1369 * @control_1:Bits 0 to 1 - Reserved
1370 * Bits 2 to 15 - Buffer0_Size.This field is set by the host and
1371 * eventually overwritten by the adapter. The host writes the
1372 * available buffer size in bytes when it passes the descriptor to
1373 * the adapter. When a frame is delivered the host, the adapter
1374 * populates this field with the number of bytes written into the
1375 * buffer. The largest supported buffer is 16, 383 bytes.
1376 * Bit 16 to 47 - RTH Hash Value 32-bit RTH hash value. Only valid if
1377 * RTH_HASH_TYPE (Control_0, bits 20:23) is nonzero.
1378 * Bit 48 to 63 - VLAN_Tag[0:15] The contents of the variable portion
1379 * of the VLAN tag, if one was detected by the adapter. This field is
1380 * populated even if VLAN-tag stripping is enabled.
1381 * @buffer0_ptr: Pointer to buffer. This field is populated by the driver.
1382 *
1383 * One buffer mode RxD for ring structure
1384 */
1385struct vxge_hw_ring_rxd_1 {
1386 u64 host_control;
1387 u64 control_0;
1388#define VXGE_HW_RING_RXD_RTH_BUCKET_GET(ctrl0) vxge_bVALn(ctrl0, 0, 7)
1389
1390#define VXGE_HW_RING_RXD_LIST_OWN_ADAPTER vxge_mBIT(7)
1391
1392#define VXGE_HW_RING_RXD_FAST_PATH_ELIGIBLE_GET(ctrl0) vxge_bVALn(ctrl0, 8, 1)
1393
1394#define VXGE_HW_RING_RXD_L3_CKSUM_CORRECT_GET(ctrl0) vxge_bVALn(ctrl0, 9, 1)
1395
1396#define VXGE_HW_RING_RXD_L4_CKSUM_CORRECT_GET(ctrl0) vxge_bVALn(ctrl0, 10, 1)
1397
1398#define VXGE_HW_RING_RXD_T_CODE_GET(ctrl0) vxge_bVALn(ctrl0, 12, 4)
1399#define VXGE_HW_RING_RXD_T_CODE(val) vxge_vBIT(val, 12, 4)
1400
1401#define VXGE_HW_RING_RXD_T_CODE_UNUSED VXGE_HW_RING_T_CODE_UNUSED
1402
1403#define VXGE_HW_RING_RXD_SYN_GET(ctrl0) vxge_bVALn(ctrl0, 16, 1)
1404
1405#define VXGE_HW_RING_RXD_IS_ICMP_GET(ctrl0) vxge_bVALn(ctrl0, 17, 1)
1406
1407#define VXGE_HW_RING_RXD_RTH_SPDM_HIT_GET(ctrl0) vxge_bVALn(ctrl0, 18, 1)
1408
1409#define VXGE_HW_RING_RXD_RTH_IT_HIT_GET(ctrl0) vxge_bVALn(ctrl0, 19, 1)
1410
1411#define VXGE_HW_RING_RXD_RTH_HASH_TYPE_GET(ctrl0) vxge_bVALn(ctrl0, 20, 4)
1412
1413#define VXGE_HW_RING_RXD_IS_VLAN_GET(ctrl0) vxge_bVALn(ctrl0, 24, 1)
1414
1415#define VXGE_HW_RING_RXD_ETHER_ENCAP_GET(ctrl0) vxge_bVALn(ctrl0, 25, 2)
1416
1417#define VXGE_HW_RING_RXD_FRAME_PROTO_GET(ctrl0) vxge_bVALn(ctrl0, 27, 5)
1418
1419#define VXGE_HW_RING_RXD_L3_CKSUM_GET(ctrl0) vxge_bVALn(ctrl0, 32, 16)
1420
1421#define VXGE_HW_RING_RXD_L4_CKSUM_GET(ctrl0) vxge_bVALn(ctrl0, 48, 16)
1422
1423 u64 control_1;
1424
1425#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(ctrl1) vxge_bVALn(ctrl1, 2, 14)
1426#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE(val) vxge_vBIT(val, 2, 14)
1427#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK vxge_vBIT(0x3FFF, 2, 14)
1428
1429#define VXGE_HW_RING_RXD_1_RTH_HASH_VAL_GET(ctrl1) vxge_bVALn(ctrl1, 16, 32)
1430
1431#define VXGE_HW_RING_RXD_VLAN_TAG_GET(ctrl1) vxge_bVALn(ctrl1, 48, 16)
1432
1433 u64 buffer0_ptr;
1434};
1435
1436enum vxge_hw_rth_algoritms {
1437 RTH_ALG_JENKINS = 0,
1438 RTH_ALG_MS_RSS = 1,
1439 RTH_ALG_CRC32C = 2
1440};
1441
1442/**
1443 * struct vxge_hw_rth_hash_types - RTH hash types.
1444 * @hash_type_tcpipv4_en: Enables RTH field type HashTypeTcpIPv4
1445 * @hash_type_ipv4_en: Enables RTH field type HashTypeIPv4
1446 * @hash_type_tcpipv6_en: Enables RTH field type HashTypeTcpIPv6
1447 * @hash_type_ipv6_en: Enables RTH field type HashTypeIPv6
1448 * @hash_type_tcpipv6ex_en: Enables RTH field type HashTypeTcpIPv6Ex
1449 * @hash_type_ipv6ex_en: Enables RTH field type HashTypeIPv6Ex
1450 *
1451 * Used to pass RTH hash types to rts_rts_set.
1452 *
1453 * See also: vxge_hw_vpath_rts_rth_set(), vxge_hw_vpath_rts_rth_get().
1454 */
1455struct vxge_hw_rth_hash_types {
1456 u8 hash_type_tcpipv4_en:1,
1457 hash_type_ipv4_en:1,
1458 hash_type_tcpipv6_en:1,
1459 hash_type_ipv6_en:1,
1460 hash_type_tcpipv6ex_en:1,
1461 hash_type_ipv6ex_en:1;
1462};
1463
1464void vxge_hw_device_debug_set(
1465 struct __vxge_hw_device *devh,
1466 enum vxge_debug_level level,
1467 u32 mask);
1468
1469u32
1470vxge_hw_device_error_level_get(struct __vxge_hw_device *devh);
1471
1472u32
1473vxge_hw_device_trace_level_get(struct __vxge_hw_device *devh);
1474
1475/**
1476 * vxge_hw_ring_rxd_size_get - Get the size of ring descriptor.
1477 * @buf_mode: Buffer mode (1, 3 or 5)
1478 *
1479 * This function returns the size of RxD for given buffer mode
1480 */
1481static inline u32 vxge_hw_ring_rxd_size_get(u32 buf_mode)
1482{
1483 return sizeof(struct vxge_hw_ring_rxd_1);
1484}
1485
1486/**
1487 * vxge_hw_ring_rxds_per_block_get - Get the number of rxds per block.
1488 * @buf_mode: Buffer mode (1 buffer mode only)
1489 *
1490 * This function returns the number of RxD for RxD block for given buffer mode
1491 */
1492static inline u32 vxge_hw_ring_rxds_per_block_get(u32 buf_mode)
1493{
1494 return (u32)((VXGE_HW_BLOCK_SIZE-16) /
1495 sizeof(struct vxge_hw_ring_rxd_1));
1496}
1497
1498/**
1499 * vxge_hw_ring_rxd_1b_set - Prepare 1-buffer-mode descriptor.
1500 * @rxdh: Descriptor handle.
1501 * @dma_pointer: DMA address of a single receive buffer this descriptor
1502 * should carry. Note that by the time vxge_hw_ring_rxd_1b_set is called,
1503 * the receive buffer should be already mapped to the device
1504 * @size: Size of the receive @dma_pointer buffer.
1505 *
1506 * Prepare 1-buffer-mode Rx descriptor for posting
1507 * (via vxge_hw_ring_rxd_post()).
1508 *
1509 * This inline helper-function does not return any parameters and always
1510 * succeeds.
1511 *
1512 */
1513static inline
1514void vxge_hw_ring_rxd_1b_set(
1515 void *rxdh,
1516 dma_addr_t dma_pointer,
1517 u32 size)
1518{
1519 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1520 rxdp->buffer0_ptr = dma_pointer;
1521 rxdp->control_1 &= ~VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK;
1522 rxdp->control_1 |= VXGE_HW_RING_RXD_1_BUFFER0_SIZE(size);
1523}
1524
1525/**
1526 * vxge_hw_ring_rxd_1b_get - Get data from the completed 1-buf
1527 * descriptor.
1528 * @vpath_handle: Virtual Path handle.
1529 * @rxdh: Descriptor handle.
1530 * @dma_pointer: DMA address of a single receive buffer this descriptor
1531 * carries. Returned by HW.
1532 * @pkt_length: Length (in bytes) of the data in the buffer pointed by
1533 *
1534 * Retrieve protocol data from the completed 1-buffer-mode Rx descriptor.
1535 * This inline helper-function uses completed descriptor to populate receive
1536 * buffer pointer and other "out" parameters. The function always succeeds.
1537 *
1538 */
1539static inline
1540void vxge_hw_ring_rxd_1b_get(
1541 struct __vxge_hw_ring *ring_handle,
1542 void *rxdh,
1543 u32 *pkt_length)
1544{
1545 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1546
1547 *pkt_length =
1548 (u32)VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(rxdp->control_1);
1549}
1550
1551/**
1552 * vxge_hw_ring_rxd_1b_info_get - Get extended information associated with
1553 * a completed receive descriptor for 1b mode.
1554 * @vpath_handle: Virtual Path handle.
1555 * @rxdh: Descriptor handle.
1556 * @rxd_info: Descriptor information
1557 *
1558 * Retrieve extended information associated with a completed receive descriptor.
1559 *
1560 */
1561static inline
1562void vxge_hw_ring_rxd_1b_info_get(
1563 struct __vxge_hw_ring *ring_handle,
1564 void *rxdh,
1565 struct vxge_hw_ring_rxd_info *rxd_info)
1566{
1567
1568 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1569 rxd_info->syn_flag =
1570 (u32)VXGE_HW_RING_RXD_SYN_GET(rxdp->control_0);
1571 rxd_info->is_icmp =
1572 (u32)VXGE_HW_RING_RXD_IS_ICMP_GET(rxdp->control_0);
1573 rxd_info->fast_path_eligible =
1574 (u32)VXGE_HW_RING_RXD_FAST_PATH_ELIGIBLE_GET(rxdp->control_0);
1575 rxd_info->l3_cksum_valid =
1576 (u32)VXGE_HW_RING_RXD_L3_CKSUM_CORRECT_GET(rxdp->control_0);
1577 rxd_info->l3_cksum =
1578 (u32)VXGE_HW_RING_RXD_L3_CKSUM_GET(rxdp->control_0);
1579 rxd_info->l4_cksum_valid =
1580 (u32)VXGE_HW_RING_RXD_L4_CKSUM_CORRECT_GET(rxdp->control_0);
1581 rxd_info->l4_cksum =
1582 (u32)VXGE_HW_RING_RXD_L4_CKSUM_GET(rxdp->control_0);
1583 rxd_info->frame =
1584 (u32)VXGE_HW_RING_RXD_ETHER_ENCAP_GET(rxdp->control_0);
1585 rxd_info->proto =
1586 (u32)VXGE_HW_RING_RXD_FRAME_PROTO_GET(rxdp->control_0);
1587 rxd_info->is_vlan =
1588 (u32)VXGE_HW_RING_RXD_IS_VLAN_GET(rxdp->control_0);
1589 rxd_info->vlan =
1590 (u32)VXGE_HW_RING_RXD_VLAN_TAG_GET(rxdp->control_1);
1591 rxd_info->rth_bucket =
1592 (u32)VXGE_HW_RING_RXD_RTH_BUCKET_GET(rxdp->control_0);
1593 rxd_info->rth_it_hit =
1594 (u32)VXGE_HW_RING_RXD_RTH_IT_HIT_GET(rxdp->control_0);
1595 rxd_info->rth_spdm_hit =
1596 (u32)VXGE_HW_RING_RXD_RTH_SPDM_HIT_GET(rxdp->control_0);
1597 rxd_info->rth_hash_type =
1598 (u32)VXGE_HW_RING_RXD_RTH_HASH_TYPE_GET(rxdp->control_0);
1599 rxd_info->rth_value =
1600 (u32)VXGE_HW_RING_RXD_1_RTH_HASH_VAL_GET(rxdp->control_1);
1601}
1602
1603/**
1604 * vxge_hw_ring_rxd_private_get - Get driver private per-descriptor data
1605 * of 1b mode 3b mode ring.
1606 * @rxdh: Descriptor handle.
1607 *
1608 * Returns: private driver info associated with the descriptor.
1609 * driver requests per-descriptor space via vxge_hw_ring_attr.
1610 *
1611 */
1612static inline void *vxge_hw_ring_rxd_private_get(void *rxdh)
1613{
1614 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1615 return (void *)(size_t)rxdp->host_control;
1616}
1617
1618/**
1619 * vxge_hw_fifo_txdl_cksum_set_bits - Offload checksum.
1620 * @txdlh: Descriptor handle.
1621 * @cksum_bits: Specifies which checksums are to be offloaded: IPv4,
1622 * and/or TCP and/or UDP.
1623 *
1624 * Ask Titan to calculate IPv4 & transport checksums for _this_ transmit
1625 * descriptor.
1626 * This API is part of the preparation of the transmit descriptor for posting
1627 * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
1628 * vxge_hw_fifo_txdl_mss_set(), vxge_hw_fifo_txdl_buffer_set_aligned(),
1629 * and vxge_hw_fifo_txdl_buffer_set().
1630 * All these APIs fill in the fields of the fifo descriptor,
1631 * in accordance with the Titan specification.
1632 *
1633 */
1634static inline void vxge_hw_fifo_txdl_cksum_set_bits(void *txdlh, u64 cksum_bits)
1635{
1636 struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
1637 txdp->control_1 |= cksum_bits;
1638}
1639
1640/**
1641 * vxge_hw_fifo_txdl_mss_set - Set MSS.
1642 * @txdlh: Descriptor handle.
1643 * @mss: MSS size for _this_ TCP connection. Passed by TCP stack down to the
1644 * driver, which in turn inserts the MSS into the @txdlh.
1645 *
1646 * This API is part of the preparation of the transmit descriptor for posting
1647 * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
1648 * vxge_hw_fifo_txdl_buffer_set(), vxge_hw_fifo_txdl_buffer_set_aligned(),
1649 * and vxge_hw_fifo_txdl_cksum_set_bits().
1650 * All these APIs fill in the fields of the fifo descriptor,
1651 * in accordance with the Titan specification.
1652 *
1653 */
1654static inline void vxge_hw_fifo_txdl_mss_set(void *txdlh, int mss)
1655{
1656 struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
1657
1658 txdp->control_0 |= VXGE_HW_FIFO_TXD_LSO_EN;
1659 txdp->control_0 |= VXGE_HW_FIFO_TXD_LSO_MSS(mss);
1660}
1661
1662/**
1663 * vxge_hw_fifo_txdl_vlan_set - Set VLAN tag.
1664 * @txdlh: Descriptor handle.
1665 * @vlan_tag: 16bit VLAN tag.
1666 *
1667 * Insert VLAN tag into specified transmit descriptor.
1668 * The actual insertion of the tag into outgoing frame is done by the hardware.
1669 */
1670static inline void vxge_hw_fifo_txdl_vlan_set(void *txdlh, u16 vlan_tag)
1671{
1672 struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
1673
1674 txdp->control_1 |= VXGE_HW_FIFO_TXD_VLAN_ENABLE;
1675 txdp->control_1 |= VXGE_HW_FIFO_TXD_VLAN_TAG(vlan_tag);
1676}
1677
1678/**
1679 * vxge_hw_fifo_txdl_private_get - Retrieve per-descriptor private data.
1680 * @txdlh: Descriptor handle.
1681 *
1682 * Retrieve per-descriptor private data.
1683 * Note that driver requests per-descriptor space via
1684 * struct vxge_hw_fifo_attr passed to
1685 * vxge_hw_vpath_open().
1686 *
1687 * Returns: private driver data associated with the descriptor.
1688 */
1689static inline void *vxge_hw_fifo_txdl_private_get(void *txdlh)
1690{
1691 struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
1692
1693 return (void *)(size_t)txdp->host_control;
1694}
1695
1696/**
1697 * struct vxge_hw_ring_attr - Ring open "template".
1698 * @callback: Ring completion callback. HW invokes the callback when there
1699 * are new completions on that ring. In many implementations
1700 * the @callback executes in the hw interrupt context.
1701 * @rxd_init: Ring's descriptor-initialize callback.
1702 * See vxge_hw_ring_rxd_init_f{}.
1703 * If not NULL, HW invokes the callback when opening
1704 * the ring.
1705 * @rxd_term: Ring's descriptor-terminate callback. If not NULL,
1706 * HW invokes the callback when closing the corresponding ring.
1707 * See also vxge_hw_ring_rxd_term_f{}.
1708 * @userdata: User-defined "context" of _that_ ring. Passed back to the
1709 * user as one of the @callback, @rxd_init, and @rxd_term arguments.
1710 * @per_rxd_space: If specified (i.e., greater than zero): extra space
1711 * reserved by HW per each receive descriptor.
1712 * Can be used to store
1713 * and retrieve on completion, information specific
1714 * to the driver.
1715 *
1716 * Ring open "template". User fills the structure with ring
1717 * attributes and passes it to vxge_hw_vpath_open().
1718 */
1719struct vxge_hw_ring_attr {
1720 enum vxge_hw_status (*callback)(
1721 struct __vxge_hw_ring *ringh,
1722 void *rxdh,
1723 u8 t_code,
1724 void *userdata);
1725
1726 enum vxge_hw_status (*rxd_init)(
1727 void *rxdh,
1728 void *userdata);
1729
1730 void (*rxd_term)(
1731 void *rxdh,
1732 enum vxge_hw_rxd_state state,
1733 void *userdata);
1734
1735 void *userdata;
1736 u32 per_rxd_space;
1737};
1738
1739/**
1740 * function vxge_hw_fifo_callback_f - FIFO callback.
1741 * @vpath_handle: Virtual path whose Fifo "containing" 1 or more completed
1742 * descriptors.
1743 * @txdlh: First completed descriptor.
1744 * @txdl_priv: Pointer to per txdl space allocated
1745 * @t_code: Transfer code, as per Titan User Guide.
1746 * Returned by HW.
1747 * @host_control: Opaque 64bit data stored by driver inside the Titan
1748 * descriptor prior to posting the latter on the fifo
1749 * via vxge_hw_fifo_txdl_post(). The @host_control is returned
1750 * as is to the driver with each completed descriptor.
1751 * @userdata: Opaque per-fifo data specified at fifo open
1752 * time, via vxge_hw_vpath_open().
1753 *
1754 * Fifo completion callback (type declaration). A single per-fifo
1755 * callback is specified at fifo open time, via
1756 * vxge_hw_vpath_open(). Typically gets called as part of the processing
1757 * of the Interrupt Service Routine.
1758 *
1759 * Fifo callback gets called by HW if, and only if, there is at least
1760 * one new completion on a given fifo. Upon processing the first @txdlh driver
1761 * is _supposed_ to continue consuming completions using:
1762 * - vxge_hw_fifo_txdl_next_completed()
1763 *
1764 * Note that failure to process new completions in a timely fashion
1765 * leads to VXGE_HW_INF_OUT_OF_DESCRIPTORS condition.
1766 *
1767 * Non-zero @t_code means failure to process transmit descriptor.
1768 *
1769 * In the "transmit" case the failure could happen, for instance, when the
1770 * link is down, in which case Titan completes the descriptor because it
1771 * is not able to send the data out.
1772 *
1773 * For details please refer to Titan User Guide.
1774 *
1775 * See also: vxge_hw_fifo_txdl_next_completed(), vxge_hw_fifo_txdl_term_f{}.
1776 */
1777/**
1778 * function vxge_hw_fifo_txdl_term_f - Terminate descriptor callback.
1779 * @txdlh: First completed descriptor.
1780 * @txdl_priv: Pointer to per txdl space allocated
1781 * @state: One of the enum vxge_hw_txdl_state{} enumerated states.
1782 * @userdata: Per-fifo user data (a.k.a. context) specified at
1783 * fifo open time, via vxge_hw_vpath_open().
1784 *
1785 * Terminate descriptor callback. Unless NULL is specified in the
1786 * struct vxge_hw_fifo_attr{} structure passed to vxge_hw_vpath_open()),
1787 * HW invokes the callback as part of closing fifo, prior to
1788 * de-allocating the ring and associated data structures
1789 * (including descriptors).
1790 * driver should utilize the callback to (for instance) unmap
1791 * and free DMA data buffers associated with the posted (state =
1792 * VXGE_HW_TXDL_STATE_POSTED) descriptors,
1793 * as well as other relevant cleanup functions.
1794 *
1795 * See also: struct vxge_hw_fifo_attr{}
1796 */
1797/**
1798 * struct vxge_hw_fifo_attr - Fifo open "template".
1799 * @callback: Fifo completion callback. HW invokes the callback when there
1800 * are new completions on that fifo. In many implementations
1801 * the @callback executes in the hw interrupt context.
1802 * @txdl_term: Fifo's descriptor-terminate callback. If not NULL,
1803 * HW invokes the callback when closing the corresponding fifo.
1804 * See also vxge_hw_fifo_txdl_term_f{}.
1805 * @userdata: User-defined "context" of _that_ fifo. Passed back to the
1806 * user as one of the @callback, and @txdl_term arguments.
1807 * @per_txdl_space: If specified (i.e., greater than zero): extra space
1808 * reserved by HW per each transmit descriptor. Can be used to
1809 * store, and retrieve on completion, information specific
1810 * to the driver.
1811 *
1812 * Fifo open "template". User fills the structure with fifo
1813 * attributes and passes it to vxge_hw_vpath_open().
1814 */
1815struct vxge_hw_fifo_attr {
1816
1817 enum vxge_hw_status (*callback)(
1818 struct __vxge_hw_fifo *fifo_handle,
1819 void *txdlh,
1820 enum vxge_hw_fifo_tcode t_code,
1821 void *userdata,
1822 struct sk_buff ***skb_ptr,
1823 int nr_skb, int *more);
1824
1825 void (*txdl_term)(
1826 void *txdlh,
1827 enum vxge_hw_txdl_state state,
1828 void *userdata);
1829
1830 void *userdata;
1831 u32 per_txdl_space;
1832};
1833
1834/**
1835 * struct vxge_hw_vpath_attr - Attributes of virtual path
1836 * @vp_id: Identifier of Virtual Path
1837 * @ring_attr: Attributes of ring for non-offload receive
1838 * @fifo_attr: Attributes of fifo for non-offload transmit
1839 *
1840 * Attributes of virtual path. This structure is passed as parameter
1841 * to the vxge_hw_vpath_open() routine to set the attributes of ring and fifo.
1842 */
1843struct vxge_hw_vpath_attr {
1844 u32 vp_id;
1845 struct vxge_hw_ring_attr ring_attr;
1846 struct vxge_hw_fifo_attr fifo_attr;
1847};
1848
1849enum vxge_hw_status __devinit vxge_hw_device_hw_info_get(
1850 void __iomem *bar0,
1851 struct vxge_hw_device_hw_info *hw_info);
1852
1853enum vxge_hw_status __devinit vxge_hw_device_config_default_get(
1854 struct vxge_hw_device_config *device_config);
1855
1856/**
1857 * vxge_hw_device_link_state_get - Get link state.
1858 * @devh: HW device handle.
1859 *
1860 * Get link state.
1861 * Returns: link state.
1862 */
1863static inline
1864enum vxge_hw_device_link_state vxge_hw_device_link_state_get(
1865 struct __vxge_hw_device *devh)
1866{
1867 return devh->link_state;
1868}
1869
1870void vxge_hw_device_terminate(struct __vxge_hw_device *devh);
1871
1872const u8 *
1873vxge_hw_device_serial_number_get(struct __vxge_hw_device *devh);
1874
1875u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *devh);
1876
1877const u8 *
1878vxge_hw_device_product_name_get(struct __vxge_hw_device *devh);
1879
1880enum vxge_hw_status __devinit vxge_hw_device_initialize(
1881 struct __vxge_hw_device **devh,
1882 struct vxge_hw_device_attr *attr,
1883 struct vxge_hw_device_config *device_config);
1884
1885enum vxge_hw_status vxge_hw_device_getpause_data(
1886 struct __vxge_hw_device *devh,
1887 u32 port,
1888 u32 *tx,
1889 u32 *rx);
1890
1891enum vxge_hw_status vxge_hw_device_setpause_data(
1892 struct __vxge_hw_device *devh,
1893 u32 port,
1894 u32 tx,
1895 u32 rx);
1896
1897static inline void *vxge_os_dma_malloc(struct pci_dev *pdev,
1898 unsigned long size,
1899 struct pci_dev **p_dmah,
1900 struct pci_dev **p_dma_acch)
1901{
1902 gfp_t flags;
1903 void *vaddr;
1904 unsigned long misaligned = 0;
1905 int realloc_flag = 0;
1906 *p_dma_acch = *p_dmah = NULL;
1907
1908 if (in_interrupt())
1909 flags = GFP_ATOMIC | GFP_DMA;
1910 else
1911 flags = GFP_KERNEL | GFP_DMA;
1912realloc:
1913 vaddr = kmalloc((size), flags);
1914 if (vaddr == NULL)
1915 return vaddr;
1916 misaligned = (unsigned long)VXGE_ALIGN((unsigned long)vaddr,
1917 VXGE_CACHE_LINE_SIZE);
1918 if (realloc_flag)
1919 goto out;
1920
1921 if (misaligned) {
1922 /* misaligned, free current one and try allocating
1923 * size + VXGE_CACHE_LINE_SIZE memory
1924 */
1925 kfree((void *) vaddr);
1926 size += VXGE_CACHE_LINE_SIZE;
1927 realloc_flag = 1;
1928 goto realloc;
1929 }
1930out:
1931 *(unsigned long *)p_dma_acch = misaligned;
1932 vaddr = (void *)((u8 *)vaddr + misaligned);
1933 return vaddr;
1934}
1935
1936static inline void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
1937 struct pci_dev **p_dma_acch)
1938{
1939 unsigned long misaligned = *(unsigned long *)p_dma_acch;
1940 u8 *tmp = (u8 *)vaddr;
1941 tmp -= misaligned;
1942 kfree((void *)tmp);
1943}
1944
1945/*
1946 * __vxge_hw_mempool_item_priv - will return pointer on per item private space
1947 */
1948static inline void*
1949__vxge_hw_mempool_item_priv(
1950 struct vxge_hw_mempool *mempool,
1951 u32 memblock_idx,
1952 void *item,
1953 u32 *memblock_item_idx)
1954{
1955 ptrdiff_t offset;
1956 void *memblock = mempool->memblocks_arr[memblock_idx];
1957
1958
1959 offset = (u32)((u8 *)item - (u8 *)memblock);
1960 vxge_assert(offset >= 0 && (u32)offset < mempool->memblock_size);
1961
1962 (*memblock_item_idx) = (u32) offset / mempool->item_size;
1963 vxge_assert((*memblock_item_idx) < mempool->items_per_memblock);
1964
1965 return (u8 *)mempool->memblocks_priv_arr[memblock_idx] +
1966 (*memblock_item_idx) * mempool->items_priv_size;
1967}
1968
1969/*
1970 * __vxge_hw_fifo_txdl_priv - Return the max fragments allocated
1971 * for the fifo.
1972 * @fifo: Fifo
1973 * @txdp: Poniter to a TxD
1974 */
1975static inline struct __vxge_hw_fifo_txdl_priv *
1976__vxge_hw_fifo_txdl_priv(
1977 struct __vxge_hw_fifo *fifo,
1978 struct vxge_hw_fifo_txd *txdp)
1979{
1980 return (struct __vxge_hw_fifo_txdl_priv *)
1981 (((char *)((ulong)txdp->host_control)) +
1982 fifo->per_txdl_space);
1983}
1984
1985enum vxge_hw_status vxge_hw_vpath_open(
1986 struct __vxge_hw_device *devh,
1987 struct vxge_hw_vpath_attr *attr,
1988 struct __vxge_hw_vpath_handle **vpath_handle);
1989
1990enum vxge_hw_status vxge_hw_vpath_close(
1991 struct __vxge_hw_vpath_handle *vpath_handle);
1992
1993enum vxge_hw_status
1994vxge_hw_vpath_reset(
1995 struct __vxge_hw_vpath_handle *vpath_handle);
1996
1997enum vxge_hw_status
1998vxge_hw_vpath_recover_from_reset(
1999 struct __vxge_hw_vpath_handle *vpath_handle);
2000
2001void
2002vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp);
2003
2004enum vxge_hw_status
2005vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ringh);
2006
2007enum vxge_hw_status vxge_hw_vpath_mtu_set(
2008 struct __vxge_hw_vpath_handle *vpath_handle,
2009 u32 new_mtu);
2010
2011void
2012vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp);
2013
2014#ifndef readq
2015static inline u64 readq(void __iomem *addr)
2016{
2017 u64 ret = 0;
2018 ret = readl(addr + 4);
2019 ret <<= 32;
2020 ret |= readl(addr);
2021
2022 return ret;
2023}
2024#endif
2025
2026#ifndef writeq
2027static inline void writeq(u64 val, void __iomem *addr)
2028{
2029 writel((u32) (val), addr);
2030 writel((u32) (val >> 32), (addr + 4));
2031}
2032#endif
2033
2034static inline void __vxge_hw_pio_mem_write32_upper(u32 val, void __iomem *addr)
2035{
2036 writel(val, addr + 4);
2037}
2038
2039static inline void __vxge_hw_pio_mem_write32_lower(u32 val, void __iomem *addr)
2040{
2041 writel(val, addr);
2042}
2043
2044enum vxge_hw_status
2045vxge_hw_device_flick_link_led(struct __vxge_hw_device *devh, u64 on_off);
2046
2047enum vxge_hw_status
2048vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
2049
2050/**
2051 * vxge_debug_ll
2052 * @level: level of debug verbosity.
2053 * @mask: mask for the debug
2054 * @buf: Circular buffer for tracing
2055 * @fmt: printf like format string
2056 *
2057 * Provides logging facilities. Can be customized on per-module
2058 * basis or/and with debug levels. Input parameters, except
2059 * module and level, are the same as posix printf. This function
2060 * may be compiled out if DEBUG macro was never defined.
2061 * See also: enum vxge_debug_level{}.
2062 */
2063#if (VXGE_COMPONENT_LL & VXGE_DEBUG_MODULE_MASK)
2064#define vxge_debug_ll(level, mask, fmt, ...) do { \
2065 if ((level >= VXGE_ERR && VXGE_COMPONENT_LL & VXGE_DEBUG_ERR_MASK) || \
2066 (level >= VXGE_TRACE && VXGE_COMPONENT_LL & VXGE_DEBUG_TRACE_MASK))\
2067 if ((mask & VXGE_DEBUG_MASK) == mask) \
2068 printk(fmt "\n", __VA_ARGS__); \
2069} while (0)
2070#else
2071#define vxge_debug_ll(level, mask, fmt, ...)
2072#endif
2073
2074enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set(
2075 struct __vxge_hw_vpath_handle **vpath_handles,
2076 u32 vpath_count,
2077 u8 *mtable,
2078 u8 *itable,
2079 u32 itable_size);
2080
2081enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
2082 struct __vxge_hw_vpath_handle *vpath_handle,
2083 enum vxge_hw_rth_algoritms algorithm,
2084 struct vxge_hw_rth_hash_types *hash_type,
2085 u16 bucket_size);
2086
2087enum vxge_hw_status
2088__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id);
2089
2090#define VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT 5
2091#define VXGE_HW_MAX_POLLING_COUNT 100
2092
2093void
2094vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev);
2095
2096enum vxge_hw_status
2097vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
2098 u32 *minor, u32 *build);
2099
2100enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev);
2101
2102enum vxge_hw_status
2103vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *filebuf,
2104 int size);
2105
2106enum vxge_hw_status
2107vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
2108 struct eprom_image *eprom_image_data);
2109
2110int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id);
2111#endif
diff --git a/drivers/net/vxge/vxge-ethtool.c b/drivers/net/vxge/vxge-ethtool.c
new file mode 100644
index 00000000000..92dd72d3f9d
--- /dev/null
+++ b/drivers/net/vxge/vxge-ethtool.c
@@ -0,0 +1,1132 @@
1/******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
9 *
10 * vxge-ethtool.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2010 Exar Corp.
13 ******************************************************************************/
14#include <linux/ethtool.h>
15#include <linux/slab.h>
16#include <linux/pci.h>
17#include <linux/etherdevice.h>
18
19#include "vxge-ethtool.h"
20
21/**
22 * vxge_ethtool_sset - Sets different link parameters.
23 * @dev: device pointer.
24 * @info: pointer to the structure with parameters given by ethtool to set
25 * link information.
26 *
27 * The function sets different link parameters provided by the user onto
28 * the NIC.
29 * Return value:
30 * 0 on success.
31 */
32static int vxge_ethtool_sset(struct net_device *dev, struct ethtool_cmd *info)
33{
34 /* We currently only support 10Gb/FULL */
35 if ((info->autoneg == AUTONEG_ENABLE) ||
36 (ethtool_cmd_speed(info) != SPEED_10000) ||
37 (info->duplex != DUPLEX_FULL))
38 return -EINVAL;
39
40 return 0;
41}
42
43/**
44 * vxge_ethtool_gset - Return link specific information.
45 * @dev: device pointer.
46 * @info: pointer to the structure with parameters given by ethtool
47 * to return link information.
48 *
49 * Returns link specific information like speed, duplex etc.. to ethtool.
50 * Return value :
51 * return 0 on success.
52 */
53static int vxge_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
54{
55 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
56 info->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
57 info->port = PORT_FIBRE;
58
59 info->transceiver = XCVR_EXTERNAL;
60
61 if (netif_carrier_ok(dev)) {
62 ethtool_cmd_speed_set(info, SPEED_10000);
63 info->duplex = DUPLEX_FULL;
64 } else {
65 ethtool_cmd_speed_set(info, -1);
66 info->duplex = -1;
67 }
68
69 info->autoneg = AUTONEG_DISABLE;
70 return 0;
71}
72
73/**
74 * vxge_ethtool_gdrvinfo - Returns driver specific information.
75 * @dev: device pointer.
76 * @info: pointer to the structure with parameters given by ethtool to
77 * return driver information.
78 *
79 * Returns driver specefic information like name, version etc.. to ethtool.
80 */
81static void vxge_ethtool_gdrvinfo(struct net_device *dev,
82 struct ethtool_drvinfo *info)
83{
84 struct vxgedev *vdev = netdev_priv(dev);
85 strlcpy(info->driver, VXGE_DRIVER_NAME, sizeof(VXGE_DRIVER_NAME));
86 strlcpy(info->version, DRV_VERSION, sizeof(DRV_VERSION));
87 strlcpy(info->fw_version, vdev->fw_version, VXGE_HW_FW_STRLEN);
88 strlcpy(info->bus_info, pci_name(vdev->pdev), sizeof(info->bus_info));
89 info->regdump_len = sizeof(struct vxge_hw_vpath_reg)
90 * vdev->no_of_vpath;
91
92 info->n_stats = STAT_LEN;
93}
94
95/**
96 * vxge_ethtool_gregs - dumps the entire space of Titan into the buffer.
97 * @dev: device pointer.
98 * @regs: pointer to the structure with parameters given by ethtool for
99 * dumping the registers.
100 * @reg_space: The input argumnet into which all the registers are dumped.
101 *
102 * Dumps the vpath register space of Titan NIC into the user given
103 * buffer area.
104 */
105static void vxge_ethtool_gregs(struct net_device *dev,
106 struct ethtool_regs *regs, void *space)
107{
108 int index, offset;
109 enum vxge_hw_status status;
110 u64 reg;
111 u64 *reg_space = (u64 *)space;
112 struct vxgedev *vdev = netdev_priv(dev);
113 struct __vxge_hw_device *hldev = vdev->devh;
114
115 regs->len = sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath;
116 regs->version = vdev->pdev->subsystem_device;
117 for (index = 0; index < vdev->no_of_vpath; index++) {
118 for (offset = 0; offset < sizeof(struct vxge_hw_vpath_reg);
119 offset += 8) {
120 status = vxge_hw_mgmt_reg_read(hldev,
121 vxge_hw_mgmt_reg_type_vpath,
122 vdev->vpaths[index].device_id,
123 offset, &reg);
124 if (status != VXGE_HW_OK) {
125 vxge_debug_init(VXGE_ERR,
126 "%s:%d Getting reg dump Failed",
127 __func__, __LINE__);
128 return;
129 }
130 *reg_space++ = reg;
131 }
132 }
133}
134
135/**
136 * vxge_ethtool_idnic - To physically identify the nic on the system.
137 * @dev : device pointer.
138 * @state : requested LED state
139 *
140 * Used to physically identify the NIC on the system.
141 * 0 on success
142 */
143static int vxge_ethtool_idnic(struct net_device *dev,
144 enum ethtool_phys_id_state state)
145{
146 struct vxgedev *vdev = netdev_priv(dev);
147 struct __vxge_hw_device *hldev = vdev->devh;
148
149 switch (state) {
150 case ETHTOOL_ID_ACTIVE:
151 vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_ON);
152 break;
153
154 case ETHTOOL_ID_INACTIVE:
155 vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_OFF);
156 break;
157
158 default:
159 return -EINVAL;
160 }
161
162 return 0;
163}
164
165/**
166 * vxge_ethtool_getpause_data - Pause frame frame generation and reception.
167 * @dev : device pointer.
168 * @ep : pointer to the structure with pause parameters given by ethtool.
169 * Description:
170 * Returns the Pause frame generation and reception capability of the NIC.
171 * Return value:
172 * void
173 */
174static void vxge_ethtool_getpause_data(struct net_device *dev,
175 struct ethtool_pauseparam *ep)
176{
177 struct vxgedev *vdev = netdev_priv(dev);
178 struct __vxge_hw_device *hldev = vdev->devh;
179
180 vxge_hw_device_getpause_data(hldev, 0, &ep->tx_pause, &ep->rx_pause);
181}
182
183/**
184 * vxge_ethtool_setpause_data - set/reset pause frame generation.
185 * @dev : device pointer.
186 * @ep : pointer to the structure with pause parameters given by ethtool.
187 * Description:
188 * It can be used to set or reset Pause frame generation or reception
189 * support of the NIC.
190 * Return value:
191 * int, returns 0 on Success
192 */
193static int vxge_ethtool_setpause_data(struct net_device *dev,
194 struct ethtool_pauseparam *ep)
195{
196 struct vxgedev *vdev = netdev_priv(dev);
197 struct __vxge_hw_device *hldev = vdev->devh;
198
199 vxge_hw_device_setpause_data(hldev, 0, ep->tx_pause, ep->rx_pause);
200
201 vdev->config.tx_pause_enable = ep->tx_pause;
202 vdev->config.rx_pause_enable = ep->rx_pause;
203
204 return 0;
205}
206
207static void vxge_get_ethtool_stats(struct net_device *dev,
208 struct ethtool_stats *estats, u64 *tmp_stats)
209{
210 int j, k;
211 enum vxge_hw_status status;
212 enum vxge_hw_status swstatus;
213 struct vxge_vpath *vpath = NULL;
214 struct vxgedev *vdev = netdev_priv(dev);
215 struct __vxge_hw_device *hldev = vdev->devh;
216 struct vxge_hw_xmac_stats *xmac_stats;
217 struct vxge_hw_device_stats_sw_info *sw_stats;
218 struct vxge_hw_device_stats_hw_info *hw_stats;
219
220 u64 *ptr = tmp_stats;
221
222 memset(tmp_stats, 0,
223 vxge_ethtool_get_sset_count(dev, ETH_SS_STATS) * sizeof(u64));
224
225 xmac_stats = kzalloc(sizeof(struct vxge_hw_xmac_stats), GFP_KERNEL);
226 if (xmac_stats == NULL) {
227 vxge_debug_init(VXGE_ERR,
228 "%s : %d Memory Allocation failed for xmac_stats",
229 __func__, __LINE__);
230 return;
231 }
232
233 sw_stats = kzalloc(sizeof(struct vxge_hw_device_stats_sw_info),
234 GFP_KERNEL);
235 if (sw_stats == NULL) {
236 kfree(xmac_stats);
237 vxge_debug_init(VXGE_ERR,
238 "%s : %d Memory Allocation failed for sw_stats",
239 __func__, __LINE__);
240 return;
241 }
242
243 hw_stats = kzalloc(sizeof(struct vxge_hw_device_stats_hw_info),
244 GFP_KERNEL);
245 if (hw_stats == NULL) {
246 kfree(xmac_stats);
247 kfree(sw_stats);
248 vxge_debug_init(VXGE_ERR,
249 "%s : %d Memory Allocation failed for hw_stats",
250 __func__, __LINE__);
251 return;
252 }
253
254 *ptr++ = 0;
255 status = vxge_hw_device_xmac_stats_get(hldev, xmac_stats);
256 if (status != VXGE_HW_OK) {
257 if (status != VXGE_HW_ERR_PRIVILAGED_OPEARATION) {
258 vxge_debug_init(VXGE_ERR,
259 "%s : %d Failure in getting xmac stats",
260 __func__, __LINE__);
261 }
262 }
263 swstatus = vxge_hw_driver_stats_get(hldev, sw_stats);
264 if (swstatus != VXGE_HW_OK) {
265 vxge_debug_init(VXGE_ERR,
266 "%s : %d Failure in getting sw stats",
267 __func__, __LINE__);
268 }
269
270 status = vxge_hw_device_stats_get(hldev, hw_stats);
271 if (status != VXGE_HW_OK) {
272 vxge_debug_init(VXGE_ERR,
273 "%s : %d hw_stats_get error", __func__, __LINE__);
274 }
275
276 for (k = 0; k < vdev->no_of_vpath; k++) {
277 struct vxge_hw_vpath_stats_hw_info *vpath_info;
278
279 vpath = &vdev->vpaths[k];
280 j = vpath->device_id;
281 vpath_info = hw_stats->vpath_info[j];
282 if (!vpath_info) {
283 memset(ptr, 0, (VXGE_HW_VPATH_TX_STATS_LEN +
284 VXGE_HW_VPATH_RX_STATS_LEN) * sizeof(u64));
285 ptr += (VXGE_HW_VPATH_TX_STATS_LEN +
286 VXGE_HW_VPATH_RX_STATS_LEN);
287 continue;
288 }
289
290 *ptr++ = vpath_info->tx_stats.tx_ttl_eth_frms;
291 *ptr++ = vpath_info->tx_stats.tx_ttl_eth_octets;
292 *ptr++ = vpath_info->tx_stats.tx_data_octets;
293 *ptr++ = vpath_info->tx_stats.tx_mcast_frms;
294 *ptr++ = vpath_info->tx_stats.tx_bcast_frms;
295 *ptr++ = vpath_info->tx_stats.tx_ucast_frms;
296 *ptr++ = vpath_info->tx_stats.tx_tagged_frms;
297 *ptr++ = vpath_info->tx_stats.tx_vld_ip;
298 *ptr++ = vpath_info->tx_stats.tx_vld_ip_octets;
299 *ptr++ = vpath_info->tx_stats.tx_icmp;
300 *ptr++ = vpath_info->tx_stats.tx_tcp;
301 *ptr++ = vpath_info->tx_stats.tx_rst_tcp;
302 *ptr++ = vpath_info->tx_stats.tx_udp;
303 *ptr++ = vpath_info->tx_stats.tx_unknown_protocol;
304 *ptr++ = vpath_info->tx_stats.tx_lost_ip;
305 *ptr++ = vpath_info->tx_stats.tx_parse_error;
306 *ptr++ = vpath_info->tx_stats.tx_tcp_offload;
307 *ptr++ = vpath_info->tx_stats.tx_retx_tcp_offload;
308 *ptr++ = vpath_info->tx_stats.tx_lost_ip_offload;
309 *ptr++ = vpath_info->rx_stats.rx_ttl_eth_frms;
310 *ptr++ = vpath_info->rx_stats.rx_vld_frms;
311 *ptr++ = vpath_info->rx_stats.rx_offload_frms;
312 *ptr++ = vpath_info->rx_stats.rx_ttl_eth_octets;
313 *ptr++ = vpath_info->rx_stats.rx_data_octets;
314 *ptr++ = vpath_info->rx_stats.rx_offload_octets;
315 *ptr++ = vpath_info->rx_stats.rx_vld_mcast_frms;
316 *ptr++ = vpath_info->rx_stats.rx_vld_bcast_frms;
317 *ptr++ = vpath_info->rx_stats.rx_accepted_ucast_frms;
318 *ptr++ = vpath_info->rx_stats.rx_accepted_nucast_frms;
319 *ptr++ = vpath_info->rx_stats.rx_tagged_frms;
320 *ptr++ = vpath_info->rx_stats.rx_long_frms;
321 *ptr++ = vpath_info->rx_stats.rx_usized_frms;
322 *ptr++ = vpath_info->rx_stats.rx_osized_frms;
323 *ptr++ = vpath_info->rx_stats.rx_frag_frms;
324 *ptr++ = vpath_info->rx_stats.rx_jabber_frms;
325 *ptr++ = vpath_info->rx_stats.rx_ttl_64_frms;
326 *ptr++ = vpath_info->rx_stats.rx_ttl_65_127_frms;
327 *ptr++ = vpath_info->rx_stats.rx_ttl_128_255_frms;
328 *ptr++ = vpath_info->rx_stats.rx_ttl_256_511_frms;
329 *ptr++ = vpath_info->rx_stats.rx_ttl_512_1023_frms;
330 *ptr++ = vpath_info->rx_stats.rx_ttl_1024_1518_frms;
331 *ptr++ = vpath_info->rx_stats.rx_ttl_1519_4095_frms;
332 *ptr++ = vpath_info->rx_stats.rx_ttl_4096_8191_frms;
333 *ptr++ = vpath_info->rx_stats.rx_ttl_8192_max_frms;
334 *ptr++ = vpath_info->rx_stats.rx_ttl_gt_max_frms;
335 *ptr++ = vpath_info->rx_stats.rx_ip;
336 *ptr++ = vpath_info->rx_stats.rx_accepted_ip;
337 *ptr++ = vpath_info->rx_stats.rx_ip_octets;
338 *ptr++ = vpath_info->rx_stats.rx_err_ip;
339 *ptr++ = vpath_info->rx_stats.rx_icmp;
340 *ptr++ = vpath_info->rx_stats.rx_tcp;
341 *ptr++ = vpath_info->rx_stats.rx_udp;
342 *ptr++ = vpath_info->rx_stats.rx_err_tcp;
343 *ptr++ = vpath_info->rx_stats.rx_lost_frms;
344 *ptr++ = vpath_info->rx_stats.rx_lost_ip;
345 *ptr++ = vpath_info->rx_stats.rx_lost_ip_offload;
346 *ptr++ = vpath_info->rx_stats.rx_various_discard;
347 *ptr++ = vpath_info->rx_stats.rx_sleep_discard;
348 *ptr++ = vpath_info->rx_stats.rx_red_discard;
349 *ptr++ = vpath_info->rx_stats.rx_queue_full_discard;
350 *ptr++ = vpath_info->rx_stats.rx_mpa_ok_frms;
351 }
352 *ptr++ = 0;
353 for (k = 0; k < vdev->max_config_port; k++) {
354 *ptr++ = xmac_stats->aggr_stats[k].tx_frms;
355 *ptr++ = xmac_stats->aggr_stats[k].tx_data_octets;
356 *ptr++ = xmac_stats->aggr_stats[k].tx_mcast_frms;
357 *ptr++ = xmac_stats->aggr_stats[k].tx_bcast_frms;
358 *ptr++ = xmac_stats->aggr_stats[k].tx_discarded_frms;
359 *ptr++ = xmac_stats->aggr_stats[k].tx_errored_frms;
360 *ptr++ = xmac_stats->aggr_stats[k].rx_frms;
361 *ptr++ = xmac_stats->aggr_stats[k].rx_data_octets;
362 *ptr++ = xmac_stats->aggr_stats[k].rx_mcast_frms;
363 *ptr++ = xmac_stats->aggr_stats[k].rx_bcast_frms;
364 *ptr++ = xmac_stats->aggr_stats[k].rx_discarded_frms;
365 *ptr++ = xmac_stats->aggr_stats[k].rx_errored_frms;
366 *ptr++ = xmac_stats->aggr_stats[k].rx_unknown_slow_proto_frms;
367 }
368 *ptr++ = 0;
369 for (k = 0; k < vdev->max_config_port; k++) {
370 *ptr++ = xmac_stats->port_stats[k].tx_ttl_frms;
371 *ptr++ = xmac_stats->port_stats[k].tx_ttl_octets;
372 *ptr++ = xmac_stats->port_stats[k].tx_data_octets;
373 *ptr++ = xmac_stats->port_stats[k].tx_mcast_frms;
374 *ptr++ = xmac_stats->port_stats[k].tx_bcast_frms;
375 *ptr++ = xmac_stats->port_stats[k].tx_ucast_frms;
376 *ptr++ = xmac_stats->port_stats[k].tx_tagged_frms;
377 *ptr++ = xmac_stats->port_stats[k].tx_vld_ip;
378 *ptr++ = xmac_stats->port_stats[k].tx_vld_ip_octets;
379 *ptr++ = xmac_stats->port_stats[k].tx_icmp;
380 *ptr++ = xmac_stats->port_stats[k].tx_tcp;
381 *ptr++ = xmac_stats->port_stats[k].tx_rst_tcp;
382 *ptr++ = xmac_stats->port_stats[k].tx_udp;
383 *ptr++ = xmac_stats->port_stats[k].tx_parse_error;
384 *ptr++ = xmac_stats->port_stats[k].tx_unknown_protocol;
385 *ptr++ = xmac_stats->port_stats[k].tx_pause_ctrl_frms;
386 *ptr++ = xmac_stats->port_stats[k].tx_marker_pdu_frms;
387 *ptr++ = xmac_stats->port_stats[k].tx_lacpdu_frms;
388 *ptr++ = xmac_stats->port_stats[k].tx_drop_ip;
389 *ptr++ = xmac_stats->port_stats[k].tx_marker_resp_pdu_frms;
390 *ptr++ = xmac_stats->port_stats[k].tx_xgmii_char2_match;
391 *ptr++ = xmac_stats->port_stats[k].tx_xgmii_char1_match;
392 *ptr++ = xmac_stats->port_stats[k].tx_xgmii_column2_match;
393 *ptr++ = xmac_stats->port_stats[k].tx_xgmii_column1_match;
394 *ptr++ = xmac_stats->port_stats[k].tx_any_err_frms;
395 *ptr++ = xmac_stats->port_stats[k].tx_drop_frms;
396 *ptr++ = xmac_stats->port_stats[k].rx_ttl_frms;
397 *ptr++ = xmac_stats->port_stats[k].rx_vld_frms;
398 *ptr++ = xmac_stats->port_stats[k].rx_offload_frms;
399 *ptr++ = xmac_stats->port_stats[k].rx_ttl_octets;
400 *ptr++ = xmac_stats->port_stats[k].rx_data_octets;
401 *ptr++ = xmac_stats->port_stats[k].rx_offload_octets;
402 *ptr++ = xmac_stats->port_stats[k].rx_vld_mcast_frms;
403 *ptr++ = xmac_stats->port_stats[k].rx_vld_bcast_frms;
404 *ptr++ = xmac_stats->port_stats[k].rx_accepted_ucast_frms;
405 *ptr++ = xmac_stats->port_stats[k].rx_accepted_nucast_frms;
406 *ptr++ = xmac_stats->port_stats[k].rx_tagged_frms;
407 *ptr++ = xmac_stats->port_stats[k].rx_long_frms;
408 *ptr++ = xmac_stats->port_stats[k].rx_usized_frms;
409 *ptr++ = xmac_stats->port_stats[k].rx_osized_frms;
410 *ptr++ = xmac_stats->port_stats[k].rx_frag_frms;
411 *ptr++ = xmac_stats->port_stats[k].rx_jabber_frms;
412 *ptr++ = xmac_stats->port_stats[k].rx_ttl_64_frms;
413 *ptr++ = xmac_stats->port_stats[k].rx_ttl_65_127_frms;
414 *ptr++ = xmac_stats->port_stats[k].rx_ttl_128_255_frms;
415 *ptr++ = xmac_stats->port_stats[k].rx_ttl_256_511_frms;
416 *ptr++ = xmac_stats->port_stats[k].rx_ttl_512_1023_frms;
417 *ptr++ = xmac_stats->port_stats[k].rx_ttl_1024_1518_frms;
418 *ptr++ = xmac_stats->port_stats[k].rx_ttl_1519_4095_frms;
419 *ptr++ = xmac_stats->port_stats[k].rx_ttl_4096_8191_frms;
420 *ptr++ = xmac_stats->port_stats[k].rx_ttl_8192_max_frms;
421 *ptr++ = xmac_stats->port_stats[k].rx_ttl_gt_max_frms;
422 *ptr++ = xmac_stats->port_stats[k].rx_ip;
423 *ptr++ = xmac_stats->port_stats[k].rx_accepted_ip;
424 *ptr++ = xmac_stats->port_stats[k].rx_ip_octets;
425 *ptr++ = xmac_stats->port_stats[k].rx_err_ip;
426 *ptr++ = xmac_stats->port_stats[k].rx_icmp;
427 *ptr++ = xmac_stats->port_stats[k].rx_tcp;
428 *ptr++ = xmac_stats->port_stats[k].rx_udp;
429 *ptr++ = xmac_stats->port_stats[k].rx_err_tcp;
430 *ptr++ = xmac_stats->port_stats[k].rx_pause_count;
431 *ptr++ = xmac_stats->port_stats[k].rx_pause_ctrl_frms;
432 *ptr++ = xmac_stats->port_stats[k].rx_unsup_ctrl_frms;
433 *ptr++ = xmac_stats->port_stats[k].rx_fcs_err_frms;
434 *ptr++ = xmac_stats->port_stats[k].rx_in_rng_len_err_frms;
435 *ptr++ = xmac_stats->port_stats[k].rx_out_rng_len_err_frms;
436 *ptr++ = xmac_stats->port_stats[k].rx_drop_frms;
437 *ptr++ = xmac_stats->port_stats[k].rx_discarded_frms;
438 *ptr++ = xmac_stats->port_stats[k].rx_drop_ip;
439 *ptr++ = xmac_stats->port_stats[k].rx_drop_udp;
440 *ptr++ = xmac_stats->port_stats[k].rx_marker_pdu_frms;
441 *ptr++ = xmac_stats->port_stats[k].rx_lacpdu_frms;
442 *ptr++ = xmac_stats->port_stats[k].rx_unknown_pdu_frms;
443 *ptr++ = xmac_stats->port_stats[k].rx_marker_resp_pdu_frms;
444 *ptr++ = xmac_stats->port_stats[k].rx_fcs_discard;
445 *ptr++ = xmac_stats->port_stats[k].rx_illegal_pdu_frms;
446 *ptr++ = xmac_stats->port_stats[k].rx_switch_discard;
447 *ptr++ = xmac_stats->port_stats[k].rx_len_discard;
448 *ptr++ = xmac_stats->port_stats[k].rx_rpa_discard;
449 *ptr++ = xmac_stats->port_stats[k].rx_l2_mgmt_discard;
450 *ptr++ = xmac_stats->port_stats[k].rx_rts_discard;
451 *ptr++ = xmac_stats->port_stats[k].rx_trash_discard;
452 *ptr++ = xmac_stats->port_stats[k].rx_buff_full_discard;
453 *ptr++ = xmac_stats->port_stats[k].rx_red_discard;
454 *ptr++ = xmac_stats->port_stats[k].rx_xgmii_ctrl_err_cnt;
455 *ptr++ = xmac_stats->port_stats[k].rx_xgmii_data_err_cnt;
456 *ptr++ = xmac_stats->port_stats[k].rx_xgmii_char1_match;
457 *ptr++ = xmac_stats->port_stats[k].rx_xgmii_err_sym;
458 *ptr++ = xmac_stats->port_stats[k].rx_xgmii_column1_match;
459 *ptr++ = xmac_stats->port_stats[k].rx_xgmii_char2_match;
460 *ptr++ = xmac_stats->port_stats[k].rx_local_fault;
461 *ptr++ = xmac_stats->port_stats[k].rx_xgmii_column2_match;
462 *ptr++ = xmac_stats->port_stats[k].rx_jettison;
463 *ptr++ = xmac_stats->port_stats[k].rx_remote_fault;
464 }
465
466 *ptr++ = 0;
467 for (k = 0; k < vdev->no_of_vpath; k++) {
468 struct vxge_hw_vpath_stats_sw_info *vpath_info;
469
470 vpath = &vdev->vpaths[k];
471 j = vpath->device_id;
472 vpath_info = (struct vxge_hw_vpath_stats_sw_info *)
473 &sw_stats->vpath_info[j];
474 *ptr++ = vpath_info->soft_reset_cnt;
475 *ptr++ = vpath_info->error_stats.unknown_alarms;
476 *ptr++ = vpath_info->error_stats.network_sustained_fault;
477 *ptr++ = vpath_info->error_stats.network_sustained_ok;
478 *ptr++ = vpath_info->error_stats.kdfcctl_fifo0_overwrite;
479 *ptr++ = vpath_info->error_stats.kdfcctl_fifo0_poison;
480 *ptr++ = vpath_info->error_stats.kdfcctl_fifo0_dma_error;
481 *ptr++ = vpath_info->error_stats.dblgen_fifo0_overflow;
482 *ptr++ = vpath_info->error_stats.statsb_pif_chain_error;
483 *ptr++ = vpath_info->error_stats.statsb_drop_timeout;
484 *ptr++ = vpath_info->error_stats.target_illegal_access;
485 *ptr++ = vpath_info->error_stats.ini_serr_det;
486 *ptr++ = vpath_info->error_stats.prc_ring_bumps;
487 *ptr++ = vpath_info->error_stats.prc_rxdcm_sc_err;
488 *ptr++ = vpath_info->error_stats.prc_rxdcm_sc_abort;
489 *ptr++ = vpath_info->error_stats.prc_quanta_size_err;
490 *ptr++ = vpath_info->ring_stats.common_stats.full_cnt;
491 *ptr++ = vpath_info->ring_stats.common_stats.usage_cnt;
492 *ptr++ = vpath_info->ring_stats.common_stats.usage_max;
493 *ptr++ = vpath_info->ring_stats.common_stats.
494 reserve_free_swaps_cnt;
495 *ptr++ = vpath_info->ring_stats.common_stats.total_compl_cnt;
496 for (j = 0; j < VXGE_HW_DTR_MAX_T_CODE; j++)
497 *ptr++ = vpath_info->ring_stats.rxd_t_code_err_cnt[j];
498 *ptr++ = vpath_info->fifo_stats.common_stats.full_cnt;
499 *ptr++ = vpath_info->fifo_stats.common_stats.usage_cnt;
500 *ptr++ = vpath_info->fifo_stats.common_stats.usage_max;
501 *ptr++ = vpath_info->fifo_stats.common_stats.
502 reserve_free_swaps_cnt;
503 *ptr++ = vpath_info->fifo_stats.common_stats.total_compl_cnt;
504 *ptr++ = vpath_info->fifo_stats.total_posts;
505 *ptr++ = vpath_info->fifo_stats.total_buffers;
506 for (j = 0; j < VXGE_HW_DTR_MAX_T_CODE; j++)
507 *ptr++ = vpath_info->fifo_stats.txd_t_code_err_cnt[j];
508 }
509
510 *ptr++ = 0;
511 for (k = 0; k < vdev->no_of_vpath; k++) {
512 struct vxge_hw_vpath_stats_hw_info *vpath_info;
513 vpath = &vdev->vpaths[k];
514 j = vpath->device_id;
515 vpath_info = hw_stats->vpath_info[j];
516 if (!vpath_info) {
517 memset(ptr, 0, VXGE_HW_VPATH_STATS_LEN * sizeof(u64));
518 ptr += VXGE_HW_VPATH_STATS_LEN;
519 continue;
520 }
521 *ptr++ = vpath_info->ini_num_mwr_sent;
522 *ptr++ = vpath_info->ini_num_mrd_sent;
523 *ptr++ = vpath_info->ini_num_cpl_rcvd;
524 *ptr++ = vpath_info->ini_num_mwr_byte_sent;
525 *ptr++ = vpath_info->ini_num_cpl_byte_rcvd;
526 *ptr++ = vpath_info->wrcrdtarb_xoff;
527 *ptr++ = vpath_info->rdcrdtarb_xoff;
528 *ptr++ = vpath_info->vpath_genstats_count0;
529 *ptr++ = vpath_info->vpath_genstats_count1;
530 *ptr++ = vpath_info->vpath_genstats_count2;
531 *ptr++ = vpath_info->vpath_genstats_count3;
532 *ptr++ = vpath_info->vpath_genstats_count4;
533 *ptr++ = vpath_info->vpath_genstats_count5;
534 *ptr++ = vpath_info->prog_event_vnum0;
535 *ptr++ = vpath_info->prog_event_vnum1;
536 *ptr++ = vpath_info->prog_event_vnum2;
537 *ptr++ = vpath_info->prog_event_vnum3;
538 *ptr++ = vpath_info->rx_multi_cast_frame_discard;
539 *ptr++ = vpath_info->rx_frm_transferred;
540 *ptr++ = vpath_info->rxd_returned;
541 *ptr++ = vpath_info->rx_mpa_len_fail_frms;
542 *ptr++ = vpath_info->rx_mpa_mrk_fail_frms;
543 *ptr++ = vpath_info->rx_mpa_crc_fail_frms;
544 *ptr++ = vpath_info->rx_permitted_frms;
545 *ptr++ = vpath_info->rx_vp_reset_discarded_frms;
546 *ptr++ = vpath_info->rx_wol_frms;
547 *ptr++ = vpath_info->tx_vp_reset_discarded_frms;
548 }
549
550 *ptr++ = 0;
551 *ptr++ = vdev->stats.vpaths_open;
552 *ptr++ = vdev->stats.vpath_open_fail;
553 *ptr++ = vdev->stats.link_up;
554 *ptr++ = vdev->stats.link_down;
555
556 for (k = 0; k < vdev->no_of_vpath; k++) {
557 *ptr += vdev->vpaths[k].fifo.stats.tx_frms;
558 *(ptr + 1) += vdev->vpaths[k].fifo.stats.tx_errors;
559 *(ptr + 2) += vdev->vpaths[k].fifo.stats.tx_bytes;
560 *(ptr + 3) += vdev->vpaths[k].fifo.stats.txd_not_free;
561 *(ptr + 4) += vdev->vpaths[k].fifo.stats.txd_out_of_desc;
562 *(ptr + 5) += vdev->vpaths[k].ring.stats.rx_frms;
563 *(ptr + 6) += vdev->vpaths[k].ring.stats.rx_errors;
564 *(ptr + 7) += vdev->vpaths[k].ring.stats.rx_bytes;
565 *(ptr + 8) += vdev->vpaths[k].ring.stats.rx_mcast;
566 *(ptr + 9) += vdev->vpaths[k].fifo.stats.pci_map_fail +
567 vdev->vpaths[k].ring.stats.pci_map_fail;
568 *(ptr + 10) += vdev->vpaths[k].ring.stats.skb_alloc_fail;
569 }
570
571 ptr += 12;
572
573 kfree(xmac_stats);
574 kfree(sw_stats);
575 kfree(hw_stats);
576}
577
578static void vxge_ethtool_get_strings(struct net_device *dev, u32 stringset,
579 u8 *data)
580{
581 int stat_size = 0;
582 int i, j;
583 struct vxgedev *vdev = netdev_priv(dev);
584 switch (stringset) {
585 case ETH_SS_STATS:
586 vxge_add_string("VPATH STATISTICS%s\t\t\t",
587 &stat_size, data, "");
588 for (i = 0; i < vdev->no_of_vpath; i++) {
589 vxge_add_string("tx_ttl_eth_frms_%d\t\t\t",
590 &stat_size, data, i);
591 vxge_add_string("tx_ttl_eth_octects_%d\t\t",
592 &stat_size, data, i);
593 vxge_add_string("tx_data_octects_%d\t\t\t",
594 &stat_size, data, i);
595 vxge_add_string("tx_mcast_frms_%d\t\t\t",
596 &stat_size, data, i);
597 vxge_add_string("tx_bcast_frms_%d\t\t\t",
598 &stat_size, data, i);
599 vxge_add_string("tx_ucast_frms_%d\t\t\t",
600 &stat_size, data, i);
601 vxge_add_string("tx_tagged_frms_%d\t\t\t",
602 &stat_size, data, i);
603 vxge_add_string("tx_vld_ip_%d\t\t\t",
604 &stat_size, data, i);
605 vxge_add_string("tx_vld_ip_octects_%d\t\t",
606 &stat_size, data, i);
607 vxge_add_string("tx_icmp_%d\t\t\t\t",
608 &stat_size, data, i);
609 vxge_add_string("tx_tcp_%d\t\t\t\t",
610 &stat_size, data, i);
611 vxge_add_string("tx_rst_tcp_%d\t\t\t",
612 &stat_size, data, i);
613 vxge_add_string("tx_udp_%d\t\t\t\t",
614 &stat_size, data, i);
615 vxge_add_string("tx_unknown_proto_%d\t\t\t",
616 &stat_size, data, i);
617 vxge_add_string("tx_lost_ip_%d\t\t\t",
618 &stat_size, data, i);
619 vxge_add_string("tx_parse_error_%d\t\t\t",
620 &stat_size, data, i);
621 vxge_add_string("tx_tcp_offload_%d\t\t\t",
622 &stat_size, data, i);
623 vxge_add_string("tx_retx_tcp_offload_%d\t\t",
624 &stat_size, data, i);
625 vxge_add_string("tx_lost_ip_offload_%d\t\t",
626 &stat_size, data, i);
627 vxge_add_string("rx_ttl_eth_frms_%d\t\t\t",
628 &stat_size, data, i);
629 vxge_add_string("rx_vld_frms_%d\t\t\t",
630 &stat_size, data, i);
631 vxge_add_string("rx_offload_frms_%d\t\t\t",
632 &stat_size, data, i);
633 vxge_add_string("rx_ttl_eth_octects_%d\t\t",
634 &stat_size, data, i);
635 vxge_add_string("rx_data_octects_%d\t\t\t",
636 &stat_size, data, i);
637 vxge_add_string("rx_offload_octects_%d\t\t",
638 &stat_size, data, i);
639 vxge_add_string("rx_vld_mcast_frms_%d\t\t",
640 &stat_size, data, i);
641 vxge_add_string("rx_vld_bcast_frms_%d\t\t",
642 &stat_size, data, i);
643 vxge_add_string("rx_accepted_ucast_frms_%d\t\t",
644 &stat_size, data, i);
645 vxge_add_string("rx_accepted_nucast_frms_%d\t\t",
646 &stat_size, data, i);
647 vxge_add_string("rx_tagged_frms_%d\t\t\t",
648 &stat_size, data, i);
649 vxge_add_string("rx_long_frms_%d\t\t\t",
650 &stat_size, data, i);
651 vxge_add_string("rx_usized_frms_%d\t\t\t",
652 &stat_size, data, i);
653 vxge_add_string("rx_osized_frms_%d\t\t\t",
654 &stat_size, data, i);
655 vxge_add_string("rx_frag_frms_%d\t\t\t",
656 &stat_size, data, i);
657 vxge_add_string("rx_jabber_frms_%d\t\t\t",
658 &stat_size, data, i);
659 vxge_add_string("rx_ttl_64_frms_%d\t\t\t",
660 &stat_size, data, i);
661 vxge_add_string("rx_ttl_65_127_frms_%d\t\t",
662 &stat_size, data, i);
663 vxge_add_string("rx_ttl_128_255_frms_%d\t\t",
664 &stat_size, data, i);
665 vxge_add_string("rx_ttl_256_511_frms_%d\t\t",
666 &stat_size, data, i);
667 vxge_add_string("rx_ttl_512_1023_frms_%d\t\t",
668 &stat_size, data, i);
669 vxge_add_string("rx_ttl_1024_1518_frms_%d\t\t",
670 &stat_size, data, i);
671 vxge_add_string("rx_ttl_1519_4095_frms_%d\t\t",
672 &stat_size, data, i);
673 vxge_add_string("rx_ttl_4096_8191_frms_%d\t\t",
674 &stat_size, data, i);
675 vxge_add_string("rx_ttl_8192_max_frms_%d\t\t",
676 &stat_size, data, i);
677 vxge_add_string("rx_ttl_gt_max_frms_%d\t\t",
678 &stat_size, data, i);
679 vxge_add_string("rx_ip%d\t\t\t\t",
680 &stat_size, data, i);
681 vxge_add_string("rx_accepted_ip_%d\t\t\t",
682 &stat_size, data, i);
683 vxge_add_string("rx_ip_octects_%d\t\t\t",
684 &stat_size, data, i);
685 vxge_add_string("rx_err_ip_%d\t\t\t",
686 &stat_size, data, i);
687 vxge_add_string("rx_icmp_%d\t\t\t\t",
688 &stat_size, data, i);
689 vxge_add_string("rx_tcp_%d\t\t\t\t",
690 &stat_size, data, i);
691 vxge_add_string("rx_udp_%d\t\t\t\t",
692 &stat_size, data, i);
693 vxge_add_string("rx_err_tcp_%d\t\t\t",
694 &stat_size, data, i);
695 vxge_add_string("rx_lost_frms_%d\t\t\t",
696 &stat_size, data, i);
697 vxge_add_string("rx_lost_ip_%d\t\t\t",
698 &stat_size, data, i);
699 vxge_add_string("rx_lost_ip_offload_%d\t\t",
700 &stat_size, data, i);
701 vxge_add_string("rx_various_discard_%d\t\t",
702 &stat_size, data, i);
703 vxge_add_string("rx_sleep_discard_%d\t\t\t",
704 &stat_size, data, i);
705 vxge_add_string("rx_red_discard_%d\t\t\t",
706 &stat_size, data, i);
707 vxge_add_string("rx_queue_full_discard_%d\t\t",
708 &stat_size, data, i);
709 vxge_add_string("rx_mpa_ok_frms_%d\t\t\t",
710 &stat_size, data, i);
711 }
712
713 vxge_add_string("\nAGGR STATISTICS%s\t\t\t\t",
714 &stat_size, data, "");
715 for (i = 0; i < vdev->max_config_port; i++) {
716 vxge_add_string("tx_frms_%d\t\t\t\t",
717 &stat_size, data, i);
718 vxge_add_string("tx_data_octects_%d\t\t\t",
719 &stat_size, data, i);
720 vxge_add_string("tx_mcast_frms_%d\t\t\t",
721 &stat_size, data, i);
722 vxge_add_string("tx_bcast_frms_%d\t\t\t",
723 &stat_size, data, i);
724 vxge_add_string("tx_discarded_frms_%d\t\t",
725 &stat_size, data, i);
726 vxge_add_string("tx_errored_frms_%d\t\t\t",
727 &stat_size, data, i);
728 vxge_add_string("rx_frms_%d\t\t\t\t",
729 &stat_size, data, i);
730 vxge_add_string("rx_data_octects_%d\t\t\t",
731 &stat_size, data, i);
732 vxge_add_string("rx_mcast_frms_%d\t\t\t",
733 &stat_size, data, i);
734 vxge_add_string("rx_bcast_frms_%d\t\t\t",
735 &stat_size, data, i);
736 vxge_add_string("rx_discarded_frms_%d\t\t",
737 &stat_size, data, i);
738 vxge_add_string("rx_errored_frms_%d\t\t\t",
739 &stat_size, data, i);
740 vxge_add_string("rx_unknown_slow_proto_frms_%d\t",
741 &stat_size, data, i);
742 }
743
744 vxge_add_string("\nPORT STATISTICS%s\t\t\t\t",
745 &stat_size, data, "");
746 for (i = 0; i < vdev->max_config_port; i++) {
747 vxge_add_string("tx_ttl_frms_%d\t\t\t",
748 &stat_size, data, i);
749 vxge_add_string("tx_ttl_octects_%d\t\t\t",
750 &stat_size, data, i);
751 vxge_add_string("tx_data_octects_%d\t\t\t",
752 &stat_size, data, i);
753 vxge_add_string("tx_mcast_frms_%d\t\t\t",
754 &stat_size, data, i);
755 vxge_add_string("tx_bcast_frms_%d\t\t\t",
756 &stat_size, data, i);
757 vxge_add_string("tx_ucast_frms_%d\t\t\t",
758 &stat_size, data, i);
759 vxge_add_string("tx_tagged_frms_%d\t\t\t",
760 &stat_size, data, i);
761 vxge_add_string("tx_vld_ip_%d\t\t\t",
762 &stat_size, data, i);
763 vxge_add_string("tx_vld_ip_octects_%d\t\t",
764 &stat_size, data, i);
765 vxge_add_string("tx_icmp_%d\t\t\t\t",
766 &stat_size, data, i);
767 vxge_add_string("tx_tcp_%d\t\t\t\t",
768 &stat_size, data, i);
769 vxge_add_string("tx_rst_tcp_%d\t\t\t",
770 &stat_size, data, i);
771 vxge_add_string("tx_udp_%d\t\t\t\t",
772 &stat_size, data, i);
773 vxge_add_string("tx_parse_error_%d\t\t\t",
774 &stat_size, data, i);
775 vxge_add_string("tx_unknown_protocol_%d\t\t",
776 &stat_size, data, i);
777 vxge_add_string("tx_pause_ctrl_frms_%d\t\t",
778 &stat_size, data, i);
779 vxge_add_string("tx_marker_pdu_frms_%d\t\t",
780 &stat_size, data, i);
781 vxge_add_string("tx_lacpdu_frms_%d\t\t\t",
782 &stat_size, data, i);
783 vxge_add_string("tx_drop_ip_%d\t\t\t",
784 &stat_size, data, i);
785 vxge_add_string("tx_marker_resp_pdu_frms_%d\t\t",
786 &stat_size, data, i);
787 vxge_add_string("tx_xgmii_char2_match_%d\t\t",
788 &stat_size, data, i);
789 vxge_add_string("tx_xgmii_char1_match_%d\t\t",
790 &stat_size, data, i);
791 vxge_add_string("tx_xgmii_column2_match_%d\t\t",
792 &stat_size, data, i);
793 vxge_add_string("tx_xgmii_column1_match_%d\t\t",
794 &stat_size, data, i);
795 vxge_add_string("tx_any_err_frms_%d\t\t\t",
796 &stat_size, data, i);
797 vxge_add_string("tx_drop_frms_%d\t\t\t",
798 &stat_size, data, i);
799 vxge_add_string("rx_ttl_frms_%d\t\t\t",
800 &stat_size, data, i);
801 vxge_add_string("rx_vld_frms_%d\t\t\t",
802 &stat_size, data, i);
803 vxge_add_string("rx_offload_frms_%d\t\t\t",
804 &stat_size, data, i);
805 vxge_add_string("rx_ttl_octects_%d\t\t\t",
806 &stat_size, data, i);
807 vxge_add_string("rx_data_octects_%d\t\t\t",
808 &stat_size, data, i);
809 vxge_add_string("rx_offload_octects_%d\t\t",
810 &stat_size, data, i);
811 vxge_add_string("rx_vld_mcast_frms_%d\t\t",
812 &stat_size, data, i);
813 vxge_add_string("rx_vld_bcast_frms_%d\t\t",
814 &stat_size, data, i);
815 vxge_add_string("rx_accepted_ucast_frms_%d\t\t",
816 &stat_size, data, i);
817 vxge_add_string("rx_accepted_nucast_frms_%d\t\t",
818 &stat_size, data, i);
819 vxge_add_string("rx_tagged_frms_%d\t\t\t",
820 &stat_size, data, i);
821 vxge_add_string("rx_long_frms_%d\t\t\t",
822 &stat_size, data, i);
823 vxge_add_string("rx_usized_frms_%d\t\t\t",
824 &stat_size, data, i);
825 vxge_add_string("rx_osized_frms_%d\t\t\t",
826 &stat_size, data, i);
827 vxge_add_string("rx_frag_frms_%d\t\t\t",
828 &stat_size, data, i);
829 vxge_add_string("rx_jabber_frms_%d\t\t\t",
830 &stat_size, data, i);
831 vxge_add_string("rx_ttl_64_frms_%d\t\t\t",
832 &stat_size, data, i);
833 vxge_add_string("rx_ttl_65_127_frms_%d\t\t",
834 &stat_size, data, i);
835 vxge_add_string("rx_ttl_128_255_frms_%d\t\t",
836 &stat_size, data, i);
837 vxge_add_string("rx_ttl_256_511_frms_%d\t\t",
838 &stat_size, data, i);
839 vxge_add_string("rx_ttl_512_1023_frms_%d\t\t",
840 &stat_size, data, i);
841 vxge_add_string("rx_ttl_1024_1518_frms_%d\t\t",
842 &stat_size, data, i);
843 vxge_add_string("rx_ttl_1519_4095_frms_%d\t\t",
844 &stat_size, data, i);
845 vxge_add_string("rx_ttl_4096_8191_frms_%d\t\t",
846 &stat_size, data, i);
847 vxge_add_string("rx_ttl_8192_max_frms_%d\t\t",
848 &stat_size, data, i);
849 vxge_add_string("rx_ttl_gt_max_frms_%d\t\t",
850 &stat_size, data, i);
851 vxge_add_string("rx_ip_%d\t\t\t\t",
852 &stat_size, data, i);
853 vxge_add_string("rx_accepted_ip_%d\t\t\t",
854 &stat_size, data, i);
855 vxge_add_string("rx_ip_octets_%d\t\t\t",
856 &stat_size, data, i);
857 vxge_add_string("rx_err_ip_%d\t\t\t",
858 &stat_size, data, i);
859 vxge_add_string("rx_icmp_%d\t\t\t\t",
860 &stat_size, data, i);
861 vxge_add_string("rx_tcp_%d\t\t\t\t",
862 &stat_size, data, i);
863 vxge_add_string("rx_udp_%d\t\t\t\t",
864 &stat_size, data, i);
865 vxge_add_string("rx_err_tcp_%d\t\t\t",
866 &stat_size, data, i);
867 vxge_add_string("rx_pause_count_%d\t\t\t",
868 &stat_size, data, i);
869 vxge_add_string("rx_pause_ctrl_frms_%d\t\t",
870 &stat_size, data, i);
871 vxge_add_string("rx_unsup_ctrl_frms_%d\t\t",
872 &stat_size, data, i);
873 vxge_add_string("rx_fcs_err_frms_%d\t\t\t",
874 &stat_size, data, i);
875 vxge_add_string("rx_in_rng_len_err_frms_%d\t\t",
876 &stat_size, data, i);
877 vxge_add_string("rx_out_rng_len_err_frms_%d\t\t",
878 &stat_size, data, i);
879 vxge_add_string("rx_drop_frms_%d\t\t\t",
880 &stat_size, data, i);
881 vxge_add_string("rx_discard_frms_%d\t\t\t",
882 &stat_size, data, i);
883 vxge_add_string("rx_drop_ip_%d\t\t\t",
884 &stat_size, data, i);
885 vxge_add_string("rx_drop_udp_%d\t\t\t",
886 &stat_size, data, i);
887 vxge_add_string("rx_marker_pdu_frms_%d\t\t",
888 &stat_size, data, i);
889 vxge_add_string("rx_lacpdu_frms_%d\t\t\t",
890 &stat_size, data, i);
891 vxge_add_string("rx_unknown_pdu_frms_%d\t\t",
892 &stat_size, data, i);
893 vxge_add_string("rx_marker_resp_pdu_frms_%d\t\t",
894 &stat_size, data, i);
895 vxge_add_string("rx_fcs_discard_%d\t\t\t",
896 &stat_size, data, i);
897 vxge_add_string("rx_illegal_pdu_frms_%d\t\t",
898 &stat_size, data, i);
899 vxge_add_string("rx_switch_discard_%d\t\t",
900 &stat_size, data, i);
901 vxge_add_string("rx_len_discard_%d\t\t\t",
902 &stat_size, data, i);
903 vxge_add_string("rx_rpa_discard_%d\t\t\t",
904 &stat_size, data, i);
905 vxge_add_string("rx_l2_mgmt_discard_%d\t\t",
906 &stat_size, data, i);
907 vxge_add_string("rx_rts_discard_%d\t\t\t",
908 &stat_size, data, i);
909 vxge_add_string("rx_trash_discard_%d\t\t\t",
910 &stat_size, data, i);
911 vxge_add_string("rx_buff_full_discard_%d\t\t",
912 &stat_size, data, i);
913 vxge_add_string("rx_red_discard_%d\t\t\t",
914 &stat_size, data, i);
915 vxge_add_string("rx_xgmii_ctrl_err_cnt_%d\t\t",
916 &stat_size, data, i);
917 vxge_add_string("rx_xgmii_data_err_cnt_%d\t\t",
918 &stat_size, data, i);
919 vxge_add_string("rx_xgmii_char1_match_%d\t\t",
920 &stat_size, data, i);
921 vxge_add_string("rx_xgmii_err_sym_%d\t\t\t",
922 &stat_size, data, i);
923 vxge_add_string("rx_xgmii_column1_match_%d\t\t",
924 &stat_size, data, i);
925 vxge_add_string("rx_xgmii_char2_match_%d\t\t",
926 &stat_size, data, i);
927 vxge_add_string("rx_local_fault_%d\t\t\t",
928 &stat_size, data, i);
929 vxge_add_string("rx_xgmii_column2_match_%d\t\t",
930 &stat_size, data, i);
931 vxge_add_string("rx_jettison_%d\t\t\t",
932 &stat_size, data, i);
933 vxge_add_string("rx_remote_fault_%d\t\t\t",
934 &stat_size, data, i);
935 }
936
937 vxge_add_string("\n SOFTWARE STATISTICS%s\t\t\t",
938 &stat_size, data, "");
939 for (i = 0; i < vdev->no_of_vpath; i++) {
940 vxge_add_string("soft_reset_cnt_%d\t\t\t",
941 &stat_size, data, i);
942 vxge_add_string("unknown_alarms_%d\t\t\t",
943 &stat_size, data, i);
944 vxge_add_string("network_sustained_fault_%d\t\t",
945 &stat_size, data, i);
946 vxge_add_string("network_sustained_ok_%d\t\t",
947 &stat_size, data, i);
948 vxge_add_string("kdfcctl_fifo0_overwrite_%d\t\t",
949 &stat_size, data, i);
950 vxge_add_string("kdfcctl_fifo0_poison_%d\t\t",
951 &stat_size, data, i);
952 vxge_add_string("kdfcctl_fifo0_dma_error_%d\t\t",
953 &stat_size, data, i);
954 vxge_add_string("dblgen_fifo0_overflow_%d\t\t",
955 &stat_size, data, i);
956 vxge_add_string("statsb_pif_chain_error_%d\t\t",
957 &stat_size, data, i);
958 vxge_add_string("statsb_drop_timeout_%d\t\t",
959 &stat_size, data, i);
960 vxge_add_string("target_illegal_access_%d\t\t",
961 &stat_size, data, i);
962 vxge_add_string("ini_serr_det_%d\t\t\t",
963 &stat_size, data, i);
964 vxge_add_string("prc_ring_bumps_%d\t\t\t",
965 &stat_size, data, i);
966 vxge_add_string("prc_rxdcm_sc_err_%d\t\t\t",
967 &stat_size, data, i);
968 vxge_add_string("prc_rxdcm_sc_abort_%d\t\t",
969 &stat_size, data, i);
970 vxge_add_string("prc_quanta_size_err_%d\t\t",
971 &stat_size, data, i);
972 vxge_add_string("ring_full_cnt_%d\t\t\t",
973 &stat_size, data, i);
974 vxge_add_string("ring_usage_cnt_%d\t\t\t",
975 &stat_size, data, i);
976 vxge_add_string("ring_usage_max_%d\t\t\t",
977 &stat_size, data, i);
978 vxge_add_string("ring_reserve_free_swaps_cnt_%d\t",
979 &stat_size, data, i);
980 vxge_add_string("ring_total_compl_cnt_%d\t\t",
981 &stat_size, data, i);
982 for (j = 0; j < VXGE_HW_DTR_MAX_T_CODE; j++)
983 vxge_add_string("rxd_t_code_err_cnt%d_%d\t\t",
984 &stat_size, data, j, i);
985 vxge_add_string("fifo_full_cnt_%d\t\t\t",
986 &stat_size, data, i);
987 vxge_add_string("fifo_usage_cnt_%d\t\t\t",
988 &stat_size, data, i);
989 vxge_add_string("fifo_usage_max_%d\t\t\t",
990 &stat_size, data, i);
991 vxge_add_string("fifo_reserve_free_swaps_cnt_%d\t",
992 &stat_size, data, i);
993 vxge_add_string("fifo_total_compl_cnt_%d\t\t",
994 &stat_size, data, i);
995 vxge_add_string("fifo_total_posts_%d\t\t\t",
996 &stat_size, data, i);
997 vxge_add_string("fifo_total_buffers_%d\t\t",
998 &stat_size, data, i);
999 for (j = 0; j < VXGE_HW_DTR_MAX_T_CODE; j++)
1000 vxge_add_string("txd_t_code_err_cnt%d_%d\t\t",
1001 &stat_size, data, j, i);
1002 }
1003
1004 vxge_add_string("\n HARDWARE STATISTICS%s\t\t\t",
1005 &stat_size, data, "");
1006 for (i = 0; i < vdev->no_of_vpath; i++) {
1007 vxge_add_string("ini_num_mwr_sent_%d\t\t\t",
1008 &stat_size, data, i);
1009 vxge_add_string("ini_num_mrd_sent_%d\t\t\t",
1010 &stat_size, data, i);
1011 vxge_add_string("ini_num_cpl_rcvd_%d\t\t\t",
1012 &stat_size, data, i);
1013 vxge_add_string("ini_num_mwr_byte_sent_%d\t\t",
1014 &stat_size, data, i);
1015 vxge_add_string("ini_num_cpl_byte_rcvd_%d\t\t",
1016 &stat_size, data, i);
1017 vxge_add_string("wrcrdtarb_xoff_%d\t\t\t",
1018 &stat_size, data, i);
1019 vxge_add_string("rdcrdtarb_xoff_%d\t\t\t",
1020 &stat_size, data, i);
1021 vxge_add_string("vpath_genstats_count0_%d\t\t",
1022 &stat_size, data, i);
1023 vxge_add_string("vpath_genstats_count1_%d\t\t",
1024 &stat_size, data, i);
1025 vxge_add_string("vpath_genstats_count2_%d\t\t",
1026 &stat_size, data, i);
1027 vxge_add_string("vpath_genstats_count3_%d\t\t",
1028 &stat_size, data, i);
1029 vxge_add_string("vpath_genstats_count4_%d\t\t",
1030 &stat_size, data, i);
1031 vxge_add_string("vpath_genstats_count5_%d\t\t",
1032 &stat_size, data, i);
1033 vxge_add_string("prog_event_vnum0_%d\t\t\t",
1034 &stat_size, data, i);
1035 vxge_add_string("prog_event_vnum1_%d\t\t\t",
1036 &stat_size, data, i);
1037 vxge_add_string("prog_event_vnum2_%d\t\t\t",
1038 &stat_size, data, i);
1039 vxge_add_string("prog_event_vnum3_%d\t\t\t",
1040 &stat_size, data, i);
1041 vxge_add_string("rx_multi_cast_frame_discard_%d\t",
1042 &stat_size, data, i);
1043 vxge_add_string("rx_frm_transferred_%d\t\t",
1044 &stat_size, data, i);
1045 vxge_add_string("rxd_returned_%d\t\t\t",
1046 &stat_size, data, i);
1047 vxge_add_string("rx_mpa_len_fail_frms_%d\t\t",
1048 &stat_size, data, i);
1049 vxge_add_string("rx_mpa_mrk_fail_frms_%d\t\t",
1050 &stat_size, data, i);
1051 vxge_add_string("rx_mpa_crc_fail_frms_%d\t\t",
1052 &stat_size, data, i);
1053 vxge_add_string("rx_permitted_frms_%d\t\t",
1054 &stat_size, data, i);
1055 vxge_add_string("rx_vp_reset_discarded_frms_%d\t",
1056 &stat_size, data, i);
1057 vxge_add_string("rx_wol_frms_%d\t\t\t",
1058 &stat_size, data, i);
1059 vxge_add_string("tx_vp_reset_discarded_frms_%d\t",
1060 &stat_size, data, i);
1061 }
1062
1063 memcpy(data + stat_size, &ethtool_driver_stats_keys,
1064 sizeof(ethtool_driver_stats_keys));
1065 }
1066}
1067
1068static int vxge_ethtool_get_regs_len(struct net_device *dev)
1069{
1070 struct vxgedev *vdev = netdev_priv(dev);
1071
1072 return sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath;
1073}
1074
1075static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset)
1076{
1077 struct vxgedev *vdev = netdev_priv(dev);
1078
1079 switch (sset) {
1080 case ETH_SS_STATS:
1081 return VXGE_TITLE_LEN +
1082 (vdev->no_of_vpath * VXGE_HW_VPATH_STATS_LEN) +
1083 (vdev->max_config_port * VXGE_HW_AGGR_STATS_LEN) +
1084 (vdev->max_config_port * VXGE_HW_PORT_STATS_LEN) +
1085 (vdev->no_of_vpath * VXGE_HW_VPATH_TX_STATS_LEN) +
1086 (vdev->no_of_vpath * VXGE_HW_VPATH_RX_STATS_LEN) +
1087 (vdev->no_of_vpath * VXGE_SW_STATS_LEN) +
1088 DRIVER_STAT_LEN;
1089 default:
1090 return -EOPNOTSUPP;
1091 }
1092}
1093
1094static int vxge_fw_flash(struct net_device *dev, struct ethtool_flash *parms)
1095{
1096 struct vxgedev *vdev = netdev_priv(dev);
1097
1098 if (vdev->max_vpath_supported != VXGE_HW_MAX_VIRTUAL_PATHS) {
1099 printk(KERN_INFO "Single Function Mode is required to flash the"
1100 " firmware\n");
1101 return -EINVAL;
1102 }
1103
1104 if (netif_running(dev)) {
1105 printk(KERN_INFO "Interface %s must be down to flash the "
1106 "firmware\n", dev->name);
1107 return -EBUSY;
1108 }
1109
1110 return vxge_fw_upgrade(vdev, parms->data, 1);
1111}
1112
1113static const struct ethtool_ops vxge_ethtool_ops = {
1114 .get_settings = vxge_ethtool_gset,
1115 .set_settings = vxge_ethtool_sset,
1116 .get_drvinfo = vxge_ethtool_gdrvinfo,
1117 .get_regs_len = vxge_ethtool_get_regs_len,
1118 .get_regs = vxge_ethtool_gregs,
1119 .get_link = ethtool_op_get_link,
1120 .get_pauseparam = vxge_ethtool_getpause_data,
1121 .set_pauseparam = vxge_ethtool_setpause_data,
1122 .get_strings = vxge_ethtool_get_strings,
1123 .set_phys_id = vxge_ethtool_idnic,
1124 .get_sset_count = vxge_ethtool_get_sset_count,
1125 .get_ethtool_stats = vxge_get_ethtool_stats,
1126 .flash_device = vxge_fw_flash,
1127};
1128
1129void vxge_initialize_ethtool_ops(struct net_device *ndev)
1130{
1131 SET_ETHTOOL_OPS(ndev, &vxge_ethtool_ops);
1132}
diff --git a/drivers/net/vxge/vxge-ethtool.h b/drivers/net/vxge/vxge-ethtool.h
new file mode 100644
index 00000000000..6cf3044d7f4
--- /dev/null
+++ b/drivers/net/vxge/vxge-ethtool.h
@@ -0,0 +1,67 @@
1/******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
9 *
10 * vxge-ethtool.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2010 Exar Corp.
13 ******************************************************************************/
14#ifndef _VXGE_ETHTOOL_H
15#define _VXGE_ETHTOOL_H
16
17#include "vxge-main.h"
18
19/* Ethtool related variables and Macros. */
20static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset);
21
22static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
23 {"\n DRIVER STATISTICS"},
24 {"vpaths_opened"},
25 {"vpath_open_fail_cnt"},
26 {"link_up_cnt"},
27 {"link_down_cnt"},
28 {"tx_frms"},
29 {"tx_errors"},
30 {"tx_bytes"},
31 {"txd_not_free"},
32 {"txd_out_of_desc"},
33 {"rx_frms"},
34 {"rx_errors"},
35 {"rx_bytes"},
36 {"rx_mcast"},
37 {"pci_map_fail_cnt"},
38 {"skb_alloc_fail_cnt"}
39};
40
41#define VXGE_TITLE_LEN 5
42#define VXGE_HW_VPATH_STATS_LEN 27
43#define VXGE_HW_AGGR_STATS_LEN 13
44#define VXGE_HW_PORT_STATS_LEN 94
45#define VXGE_HW_VPATH_TX_STATS_LEN 19
46#define VXGE_HW_VPATH_RX_STATS_LEN 42
47#define VXGE_SW_STATS_LEN 60
48#define VXGE_HW_STATS_LEN (VXGE_HW_VPATH_STATS_LEN +\
49 VXGE_HW_AGGR_STATS_LEN +\
50 VXGE_HW_PORT_STATS_LEN +\
51 VXGE_HW_VPATH_TX_STATS_LEN +\
52 VXGE_HW_VPATH_RX_STATS_LEN)
53
54#define DRIVER_STAT_LEN (sizeof(ethtool_driver_stats_keys)/ETH_GSTRING_LEN)
55#define STAT_LEN (VXGE_HW_STATS_LEN + DRIVER_STAT_LEN + VXGE_SW_STATS_LEN)
56
57/* Maximum flicker time of adapter LED */
58#define VXGE_MAX_FLICKER_TIME (60 * HZ) /* 60 seconds */
59#define VXGE_FLICKER_ON 1
60#define VXGE_FLICKER_OFF 0
61
62#define vxge_add_string(fmt, size, buf, ...) {\
63 snprintf(buf + *size, ETH_GSTRING_LEN, fmt, __VA_ARGS__); \
64 *size += ETH_GSTRING_LEN; \
65}
66
67#endif /*_VXGE_ETHTOOL_H*/
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
new file mode 100644
index 00000000000..178348a258d
--- /dev/null
+++ b/drivers/net/vxge/vxge-main.c
@@ -0,0 +1,4854 @@
1/******************************************************************************
2* This software may be used and distributed according to the terms of
3* the GNU General Public License (GPL), incorporated herein by reference.
4* Drivers based on or derived from this code fall under the GPL and must
5* retain the authorship, copyright and license notice. This file is not
6* a complete program and may only be used when the entire operating
7* system is licensed under the GPL.
8* See the file COPYING in this distribution for more information.
9*
10* vxge-main.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11* Virtualized Server Adapter.
12* Copyright(c) 2002-2010 Exar Corp.
13*
14* The module loadable parameters that are supported by the driver and a brief
15* explanation of all the variables:
16* vlan_tag_strip:
17* Strip VLAN Tag enable/disable. Instructs the device to remove
18* the VLAN tag from all received tagged frames that are not
19* replicated at the internal L2 switch.
20* 0 - Do not strip the VLAN tag.
21* 1 - Strip the VLAN tag.
22*
23* addr_learn_en:
24* Enable learning the mac address of the guest OS interface in
25* a virtualization environment.
26* 0 - DISABLE
27* 1 - ENABLE
28*
29* max_config_port:
30* Maximum number of port to be supported.
31* MIN -1 and MAX - 2
32*
33* max_config_vpath:
34* This configures the maximum no of VPATH configures for each
35* device function.
36* MIN - 1 and MAX - 17
37*
38* max_config_dev:
39* This configures maximum no of Device function to be enabled.
40* MIN - 1 and MAX - 17
41*
42******************************************************************************/
43
44#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45
46#include <linux/bitops.h>
47#include <linux/if_vlan.h>
48#include <linux/interrupt.h>
49#include <linux/pci.h>
50#include <linux/slab.h>
51#include <linux/tcp.h>
52#include <net/ip.h>
53#include <linux/netdevice.h>
54#include <linux/etherdevice.h>
55#include <linux/firmware.h>
56#include <linux/net_tstamp.h>
57#include <linux/prefetch.h>
58#include "vxge-main.h"
59#include "vxge-reg.h"
60
61MODULE_LICENSE("Dual BSD/GPL");
62MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O"
63 "Virtualized Server Adapter");
64
65static DEFINE_PCI_DEVICE_TABLE(vxge_id_table) = {
66 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_WIN, PCI_ANY_ID,
67 PCI_ANY_ID},
68 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_UNI, PCI_ANY_ID,
69 PCI_ANY_ID},
70 {0}
71};
72
73MODULE_DEVICE_TABLE(pci, vxge_id_table);
74
75VXGE_MODULE_PARAM_INT(vlan_tag_strip, VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE);
76VXGE_MODULE_PARAM_INT(addr_learn_en, VXGE_HW_MAC_ADDR_LEARN_DEFAULT);
77VXGE_MODULE_PARAM_INT(max_config_port, VXGE_MAX_CONFIG_PORT);
78VXGE_MODULE_PARAM_INT(max_config_vpath, VXGE_USE_DEFAULT);
79VXGE_MODULE_PARAM_INT(max_mac_vpath, VXGE_MAX_MAC_ADDR_COUNT);
80VXGE_MODULE_PARAM_INT(max_config_dev, VXGE_MAX_CONFIG_DEV);
81
82static u16 vpath_selector[VXGE_HW_MAX_VIRTUAL_PATHS] =
83 {0, 1, 3, 3, 7, 7, 7, 7, 15, 15, 15, 15, 15, 15, 15, 15, 31};
84static unsigned int bw_percentage[VXGE_HW_MAX_VIRTUAL_PATHS] =
85 {[0 ...(VXGE_HW_MAX_VIRTUAL_PATHS - 1)] = 0xFF};
86module_param_array(bw_percentage, uint, NULL, 0);
87
88static struct vxge_drv_config *driver_config;
89
90static inline int is_vxge_card_up(struct vxgedev *vdev)
91{
92 return test_bit(__VXGE_STATE_CARD_UP, &vdev->state);
93}
94
95static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
96{
97 struct sk_buff **skb_ptr = NULL;
98 struct sk_buff **temp;
99#define NR_SKB_COMPLETED 128
100 struct sk_buff *completed[NR_SKB_COMPLETED];
101 int more;
102
103 do {
104 more = 0;
105 skb_ptr = completed;
106
107 if (__netif_tx_trylock(fifo->txq)) {
108 vxge_hw_vpath_poll_tx(fifo->handle, &skb_ptr,
109 NR_SKB_COMPLETED, &more);
110 __netif_tx_unlock(fifo->txq);
111 }
112
113 /* free SKBs */
114 for (temp = completed; temp != skb_ptr; temp++)
115 dev_kfree_skb_irq(*temp);
116 } while (more);
117}
118
119static inline void VXGE_COMPLETE_ALL_TX(struct vxgedev *vdev)
120{
121 int i;
122
123 /* Complete all transmits */
124 for (i = 0; i < vdev->no_of_vpath; i++)
125 VXGE_COMPLETE_VPATH_TX(&vdev->vpaths[i].fifo);
126}
127
128static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev)
129{
130 int i;
131 struct vxge_ring *ring;
132
133 /* Complete all receives*/
134 for (i = 0; i < vdev->no_of_vpath; i++) {
135 ring = &vdev->vpaths[i].ring;
136 vxge_hw_vpath_poll_rx(ring->handle);
137 }
138}
139
140/*
141 * vxge_callback_link_up
142 *
143 * This function is called during interrupt context to notify link up state
144 * change.
145 */
146static void vxge_callback_link_up(struct __vxge_hw_device *hldev)
147{
148 struct net_device *dev = hldev->ndev;
149 struct vxgedev *vdev = netdev_priv(dev);
150
151 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
152 vdev->ndev->name, __func__, __LINE__);
153 netdev_notice(vdev->ndev, "Link Up\n");
154 vdev->stats.link_up++;
155
156 netif_carrier_on(vdev->ndev);
157 netif_tx_wake_all_queues(vdev->ndev);
158
159 vxge_debug_entryexit(VXGE_TRACE,
160 "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
161}
162
163/*
164 * vxge_callback_link_down
165 *
166 * This function is called during interrupt context to notify link down state
167 * change.
168 */
169static void vxge_callback_link_down(struct __vxge_hw_device *hldev)
170{
171 struct net_device *dev = hldev->ndev;
172 struct vxgedev *vdev = netdev_priv(dev);
173
174 vxge_debug_entryexit(VXGE_TRACE,
175 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
176 netdev_notice(vdev->ndev, "Link Down\n");
177
178 vdev->stats.link_down++;
179 netif_carrier_off(vdev->ndev);
180 netif_tx_stop_all_queues(vdev->ndev);
181
182 vxge_debug_entryexit(VXGE_TRACE,
183 "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
184}
185
186/*
187 * vxge_rx_alloc
188 *
189 * Allocate SKB.
190 */
191static struct sk_buff *
192vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size)
193{
194 struct net_device *dev;
195 struct sk_buff *skb;
196 struct vxge_rx_priv *rx_priv;
197
198 dev = ring->ndev;
199 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
200 ring->ndev->name, __func__, __LINE__);
201
202 rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
203
204 /* try to allocate skb first. this one may fail */
205 skb = netdev_alloc_skb(dev, skb_size +
206 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
207 if (skb == NULL) {
208 vxge_debug_mem(VXGE_ERR,
209 "%s: out of memory to allocate SKB", dev->name);
210 ring->stats.skb_alloc_fail++;
211 return NULL;
212 }
213
214 vxge_debug_mem(VXGE_TRACE,
215 "%s: %s:%d Skb : 0x%p", ring->ndev->name,
216 __func__, __LINE__, skb);
217
218 skb_reserve(skb, VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
219
220 rx_priv->skb = skb;
221 rx_priv->skb_data = NULL;
222 rx_priv->data_size = skb_size;
223 vxge_debug_entryexit(VXGE_TRACE,
224 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
225
226 return skb;
227}
228
229/*
230 * vxge_rx_map
231 */
232static int vxge_rx_map(void *dtrh, struct vxge_ring *ring)
233{
234 struct vxge_rx_priv *rx_priv;
235 dma_addr_t dma_addr;
236
237 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
238 ring->ndev->name, __func__, __LINE__);
239 rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
240
241 rx_priv->skb_data = rx_priv->skb->data;
242 dma_addr = pci_map_single(ring->pdev, rx_priv->skb_data,
243 rx_priv->data_size, PCI_DMA_FROMDEVICE);
244
245 if (unlikely(pci_dma_mapping_error(ring->pdev, dma_addr))) {
246 ring->stats.pci_map_fail++;
247 return -EIO;
248 }
249 vxge_debug_mem(VXGE_TRACE,
250 "%s: %s:%d 1 buffer mode dma_addr = 0x%llx",
251 ring->ndev->name, __func__, __LINE__,
252 (unsigned long long)dma_addr);
253 vxge_hw_ring_rxd_1b_set(dtrh, dma_addr, rx_priv->data_size);
254
255 rx_priv->data_dma = dma_addr;
256 vxge_debug_entryexit(VXGE_TRACE,
257 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
258
259 return 0;
260}
261
262/*
263 * vxge_rx_initial_replenish
264 * Allocation of RxD as an initial replenish procedure.
265 */
266static enum vxge_hw_status
267vxge_rx_initial_replenish(void *dtrh, void *userdata)
268{
269 struct vxge_ring *ring = (struct vxge_ring *)userdata;
270 struct vxge_rx_priv *rx_priv;
271
272 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
273 ring->ndev->name, __func__, __LINE__);
274 if (vxge_rx_alloc(dtrh, ring,
275 VXGE_LL_MAX_FRAME_SIZE(ring->ndev)) == NULL)
276 return VXGE_HW_FAIL;
277
278 if (vxge_rx_map(dtrh, ring)) {
279 rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
280 dev_kfree_skb(rx_priv->skb);
281
282 return VXGE_HW_FAIL;
283 }
284 vxge_debug_entryexit(VXGE_TRACE,
285 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
286
287 return VXGE_HW_OK;
288}
289
290static inline void
291vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan,
292 int pkt_length, struct vxge_hw_ring_rxd_info *ext_info)
293{
294
295 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
296 ring->ndev->name, __func__, __LINE__);
297 skb_record_rx_queue(skb, ring->driver_id);
298 skb->protocol = eth_type_trans(skb, ring->ndev);
299
300 u64_stats_update_begin(&ring->stats.syncp);
301 ring->stats.rx_frms++;
302 ring->stats.rx_bytes += pkt_length;
303
304 if (skb->pkt_type == PACKET_MULTICAST)
305 ring->stats.rx_mcast++;
306 u64_stats_update_end(&ring->stats.syncp);
307
308 vxge_debug_rx(VXGE_TRACE,
309 "%s: %s:%d skb protocol = %d",
310 ring->ndev->name, __func__, __LINE__, skb->protocol);
311
312 if (ext_info->vlan &&
313 ring->vlan_tag_strip == VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE)
314 __vlan_hwaccel_put_tag(skb, ext_info->vlan);
315 napi_gro_receive(ring->napi_p, skb);
316
317 vxge_debug_entryexit(VXGE_TRACE,
318 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
319}
320
321static inline void vxge_re_pre_post(void *dtr, struct vxge_ring *ring,
322 struct vxge_rx_priv *rx_priv)
323{
324 pci_dma_sync_single_for_device(ring->pdev,
325 rx_priv->data_dma, rx_priv->data_size, PCI_DMA_FROMDEVICE);
326
327 vxge_hw_ring_rxd_1b_set(dtr, rx_priv->data_dma, rx_priv->data_size);
328 vxge_hw_ring_rxd_pre_post(ring->handle, dtr);
329}
330
331static inline void vxge_post(int *dtr_cnt, void **first_dtr,
332 void *post_dtr, struct __vxge_hw_ring *ringh)
333{
334 int dtr_count = *dtr_cnt;
335 if ((*dtr_cnt % VXGE_HW_RXSYNC_FREQ_CNT) == 0) {
336 if (*first_dtr)
337 vxge_hw_ring_rxd_post_post_wmb(ringh, *first_dtr);
338 *first_dtr = post_dtr;
339 } else
340 vxge_hw_ring_rxd_post_post(ringh, post_dtr);
341 dtr_count++;
342 *dtr_cnt = dtr_count;
343}
344
345/*
346 * vxge_rx_1b_compl
347 *
348 * If the interrupt is because of a received frame or if the receive ring
349 * contains fresh as yet un-processed frames, this function is called.
350 */
351static enum vxge_hw_status
352vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
353 u8 t_code, void *userdata)
354{
355 struct vxge_ring *ring = (struct vxge_ring *)userdata;
356 struct net_device *dev = ring->ndev;
357 unsigned int dma_sizes;
358 void *first_dtr = NULL;
359 int dtr_cnt = 0;
360 int data_size;
361 dma_addr_t data_dma;
362 int pkt_length;
363 struct sk_buff *skb;
364 struct vxge_rx_priv *rx_priv;
365 struct vxge_hw_ring_rxd_info ext_info;
366 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
367 ring->ndev->name, __func__, __LINE__);
368
369 do {
370 prefetch((char *)dtr + L1_CACHE_BYTES);
371 rx_priv = vxge_hw_ring_rxd_private_get(dtr);
372 skb = rx_priv->skb;
373 data_size = rx_priv->data_size;
374 data_dma = rx_priv->data_dma;
375 prefetch(rx_priv->skb_data);
376
377 vxge_debug_rx(VXGE_TRACE,
378 "%s: %s:%d skb = 0x%p",
379 ring->ndev->name, __func__, __LINE__, skb);
380
381 vxge_hw_ring_rxd_1b_get(ringh, dtr, &dma_sizes);
382 pkt_length = dma_sizes;
383
384 pkt_length -= ETH_FCS_LEN;
385
386 vxge_debug_rx(VXGE_TRACE,
387 "%s: %s:%d Packet Length = %d",
388 ring->ndev->name, __func__, __LINE__, pkt_length);
389
390 vxge_hw_ring_rxd_1b_info_get(ringh, dtr, &ext_info);
391
392 /* check skb validity */
393 vxge_assert(skb);
394
395 prefetch((char *)skb + L1_CACHE_BYTES);
396 if (unlikely(t_code)) {
397 if (vxge_hw_ring_handle_tcode(ringh, dtr, t_code) !=
398 VXGE_HW_OK) {
399
400 ring->stats.rx_errors++;
401 vxge_debug_rx(VXGE_TRACE,
402 "%s: %s :%d Rx T_code is %d",
403 ring->ndev->name, __func__,
404 __LINE__, t_code);
405
406 /* If the t_code is not supported and if the
407 * t_code is other than 0x5 (unparseable packet
408 * such as unknown UPV6 header), Drop it !!!
409 */
410 vxge_re_pre_post(dtr, ring, rx_priv);
411
412 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
413 ring->stats.rx_dropped++;
414 continue;
415 }
416 }
417
418 if (pkt_length > VXGE_LL_RX_COPY_THRESHOLD) {
419 if (vxge_rx_alloc(dtr, ring, data_size) != NULL) {
420 if (!vxge_rx_map(dtr, ring)) {
421 skb_put(skb, pkt_length);
422
423 pci_unmap_single(ring->pdev, data_dma,
424 data_size, PCI_DMA_FROMDEVICE);
425
426 vxge_hw_ring_rxd_pre_post(ringh, dtr);
427 vxge_post(&dtr_cnt, &first_dtr, dtr,
428 ringh);
429 } else {
430 dev_kfree_skb(rx_priv->skb);
431 rx_priv->skb = skb;
432 rx_priv->data_size = data_size;
433 vxge_re_pre_post(dtr, ring, rx_priv);
434
435 vxge_post(&dtr_cnt, &first_dtr, dtr,
436 ringh);
437 ring->stats.rx_dropped++;
438 break;
439 }
440 } else {
441 vxge_re_pre_post(dtr, ring, rx_priv);
442
443 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
444 ring->stats.rx_dropped++;
445 break;
446 }
447 } else {
448 struct sk_buff *skb_up;
449
450 skb_up = netdev_alloc_skb(dev, pkt_length +
451 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
452 if (skb_up != NULL) {
453 skb_reserve(skb_up,
454 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
455
456 pci_dma_sync_single_for_cpu(ring->pdev,
457 data_dma, data_size,
458 PCI_DMA_FROMDEVICE);
459
460 vxge_debug_mem(VXGE_TRACE,
461 "%s: %s:%d skb_up = %p",
462 ring->ndev->name, __func__,
463 __LINE__, skb);
464 memcpy(skb_up->data, skb->data, pkt_length);
465
466 vxge_re_pre_post(dtr, ring, rx_priv);
467
468 vxge_post(&dtr_cnt, &first_dtr, dtr,
469 ringh);
470 /* will netif_rx small SKB instead */
471 skb = skb_up;
472 skb_put(skb, pkt_length);
473 } else {
474 vxge_re_pre_post(dtr, ring, rx_priv);
475
476 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
477 vxge_debug_rx(VXGE_ERR,
478 "%s: vxge_rx_1b_compl: out of "
479 "memory", dev->name);
480 ring->stats.skb_alloc_fail++;
481 break;
482 }
483 }
484
485 if ((ext_info.proto & VXGE_HW_FRAME_PROTO_TCP_OR_UDP) &&
486 !(ext_info.proto & VXGE_HW_FRAME_PROTO_IP_FRAG) &&
487 (dev->features & NETIF_F_RXCSUM) && /* Offload Rx side CSUM */
488 ext_info.l3_cksum == VXGE_HW_L3_CKSUM_OK &&
489 ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK)
490 skb->ip_summed = CHECKSUM_UNNECESSARY;
491 else
492 skb_checksum_none_assert(skb);
493
494
495 if (ring->rx_hwts) {
496 struct skb_shared_hwtstamps *skb_hwts;
497 u32 ns = *(u32 *)(skb->head + pkt_length);
498
499 skb_hwts = skb_hwtstamps(skb);
500 skb_hwts->hwtstamp = ns_to_ktime(ns);
501 skb_hwts->syststamp.tv64 = 0;
502 }
503
504 /* rth_hash_type and rth_it_hit are non-zero regardless of
505 * whether rss is enabled. Only the rth_value is zero/non-zero
506 * if rss is disabled/enabled, so key off of that.
507 */
508 if (ext_info.rth_value)
509 skb->rxhash = ext_info.rth_value;
510
511 vxge_rx_complete(ring, skb, ext_info.vlan,
512 pkt_length, &ext_info);
513
514 ring->budget--;
515 ring->pkts_processed++;
516 if (!ring->budget)
517 break;
518
519 } while (vxge_hw_ring_rxd_next_completed(ringh, &dtr,
520 &t_code) == VXGE_HW_OK);
521
522 if (first_dtr)
523 vxge_hw_ring_rxd_post_post_wmb(ringh, first_dtr);
524
525 vxge_debug_entryexit(VXGE_TRACE,
526 "%s:%d Exiting...",
527 __func__, __LINE__);
528 return VXGE_HW_OK;
529}
530
531/*
532 * vxge_xmit_compl
533 *
534 * If an interrupt was raised to indicate DMA complete of the Tx packet,
535 * this function is called. It identifies the last TxD whose buffer was
536 * freed and frees all skbs whose data have already DMA'ed into the NICs
537 * internal memory.
538 */
539static enum vxge_hw_status
540vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
541 enum vxge_hw_fifo_tcode t_code, void *userdata,
542 struct sk_buff ***skb_ptr, int nr_skb, int *more)
543{
544 struct vxge_fifo *fifo = (struct vxge_fifo *)userdata;
545 struct sk_buff *skb, **done_skb = *skb_ptr;
546 int pkt_cnt = 0;
547
548 vxge_debug_entryexit(VXGE_TRACE,
549 "%s:%d Entered....", __func__, __LINE__);
550
551 do {
552 int frg_cnt;
553 skb_frag_t *frag;
554 int i = 0, j;
555 struct vxge_tx_priv *txd_priv =
556 vxge_hw_fifo_txdl_private_get(dtr);
557
558 skb = txd_priv->skb;
559 frg_cnt = skb_shinfo(skb)->nr_frags;
560 frag = &skb_shinfo(skb)->frags[0];
561
562 vxge_debug_tx(VXGE_TRACE,
563 "%s: %s:%d fifo_hw = %p dtr = %p "
564 "tcode = 0x%x", fifo->ndev->name, __func__,
565 __LINE__, fifo_hw, dtr, t_code);
566 /* check skb validity */
567 vxge_assert(skb);
568 vxge_debug_tx(VXGE_TRACE,
569 "%s: %s:%d skb = %p itxd_priv = %p frg_cnt = %d",
570 fifo->ndev->name, __func__, __LINE__,
571 skb, txd_priv, frg_cnt);
572 if (unlikely(t_code)) {
573 fifo->stats.tx_errors++;
574 vxge_debug_tx(VXGE_ERR,
575 "%s: tx: dtr %p completed due to "
576 "error t_code %01x", fifo->ndev->name,
577 dtr, t_code);
578 vxge_hw_fifo_handle_tcode(fifo_hw, dtr, t_code);
579 }
580
581 /* for unfragmented skb */
582 pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++],
583 skb_headlen(skb), PCI_DMA_TODEVICE);
584
585 for (j = 0; j < frg_cnt; j++) {
586 pci_unmap_page(fifo->pdev,
587 txd_priv->dma_buffers[i++],
588 frag->size, PCI_DMA_TODEVICE);
589 frag += 1;
590 }
591
592 vxge_hw_fifo_txdl_free(fifo_hw, dtr);
593
594 /* Updating the statistics block */
595 u64_stats_update_begin(&fifo->stats.syncp);
596 fifo->stats.tx_frms++;
597 fifo->stats.tx_bytes += skb->len;
598 u64_stats_update_end(&fifo->stats.syncp);
599
600 *done_skb++ = skb;
601
602 if (--nr_skb <= 0) {
603 *more = 1;
604 break;
605 }
606
607 pkt_cnt++;
608 if (pkt_cnt > fifo->indicate_max_pkts)
609 break;
610
611 } while (vxge_hw_fifo_txdl_next_completed(fifo_hw,
612 &dtr, &t_code) == VXGE_HW_OK);
613
614 *skb_ptr = done_skb;
615 if (netif_tx_queue_stopped(fifo->txq))
616 netif_tx_wake_queue(fifo->txq);
617
618 vxge_debug_entryexit(VXGE_TRACE,
619 "%s: %s:%d Exiting...",
620 fifo->ndev->name, __func__, __LINE__);
621 return VXGE_HW_OK;
622}
623
624/* select a vpath to transmit the packet */
625static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb)
626{
627 u16 queue_len, counter = 0;
628 if (skb->protocol == htons(ETH_P_IP)) {
629 struct iphdr *ip;
630 struct tcphdr *th;
631
632 ip = ip_hdr(skb);
633
634 if (!ip_is_fragment(ip)) {
635 th = (struct tcphdr *)(((unsigned char *)ip) +
636 ip->ihl*4);
637
638 queue_len = vdev->no_of_vpath;
639 counter = (ntohs(th->source) +
640 ntohs(th->dest)) &
641 vdev->vpath_selector[queue_len - 1];
642 if (counter >= queue_len)
643 counter = queue_len - 1;
644 }
645 }
646 return counter;
647}
648
649static enum vxge_hw_status vxge_search_mac_addr_in_list(
650 struct vxge_vpath *vpath, u64 del_mac)
651{
652 struct list_head *entry, *next;
653 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
654 if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac)
655 return TRUE;
656 }
657 return FALSE;
658}
659
660static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
661{
662 struct vxge_mac_addrs *new_mac_entry;
663 u8 *mac_address = NULL;
664
665 if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT)
666 return TRUE;
667
668 new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC);
669 if (!new_mac_entry) {
670 vxge_debug_mem(VXGE_ERR,
671 "%s: memory allocation failed",
672 VXGE_DRIVER_NAME);
673 return FALSE;
674 }
675
676 list_add(&new_mac_entry->item, &vpath->mac_addr_list);
677
678 /* Copy the new mac address to the list */
679 mac_address = (u8 *)&new_mac_entry->macaddr;
680 memcpy(mac_address, mac->macaddr, ETH_ALEN);
681
682 new_mac_entry->state = mac->state;
683 vpath->mac_addr_cnt++;
684
685 if (is_multicast_ether_addr(mac->macaddr))
686 vpath->mcast_addr_cnt++;
687
688 return TRUE;
689}
690
691/* Add a mac address to DA table */
692static enum vxge_hw_status
693vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
694{
695 enum vxge_hw_status status = VXGE_HW_OK;
696 struct vxge_vpath *vpath;
697 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode;
698
699 if (is_multicast_ether_addr(mac->macaddr))
700 duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE;
701 else
702 duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE;
703
704 vpath = &vdev->vpaths[mac->vpath_no];
705 status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr,
706 mac->macmask, duplicate_mode);
707 if (status != VXGE_HW_OK) {
708 vxge_debug_init(VXGE_ERR,
709 "DA config add entry failed for vpath:%d",
710 vpath->device_id);
711 } else
712 if (FALSE == vxge_mac_list_add(vpath, mac))
713 status = -EPERM;
714
715 return status;
716}
717
718static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
719{
720 struct macInfo mac_info;
721 u8 *mac_address = NULL;
722 u64 mac_addr = 0, vpath_vector = 0;
723 int vpath_idx = 0;
724 enum vxge_hw_status status = VXGE_HW_OK;
725 struct vxge_vpath *vpath = NULL;
726 struct __vxge_hw_device *hldev;
727
728 hldev = pci_get_drvdata(vdev->pdev);
729
730 mac_address = (u8 *)&mac_addr;
731 memcpy(mac_address, mac_header, ETH_ALEN);
732
733 /* Is this mac address already in the list? */
734 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
735 vpath = &vdev->vpaths[vpath_idx];
736 if (vxge_search_mac_addr_in_list(vpath, mac_addr))
737 return vpath_idx;
738 }
739
740 memset(&mac_info, 0, sizeof(struct macInfo));
741 memcpy(mac_info.macaddr, mac_header, ETH_ALEN);
742
743 /* Any vpath has room to add mac address to its da table? */
744 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
745 vpath = &vdev->vpaths[vpath_idx];
746 if (vpath->mac_addr_cnt < vpath->max_mac_addr_cnt) {
747 /* Add this mac address to this vpath */
748 mac_info.vpath_no = vpath_idx;
749 mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
750 status = vxge_add_mac_addr(vdev, &mac_info);
751 if (status != VXGE_HW_OK)
752 return -EPERM;
753 return vpath_idx;
754 }
755 }
756
757 mac_info.state = VXGE_LL_MAC_ADDR_IN_LIST;
758 vpath_idx = 0;
759 mac_info.vpath_no = vpath_idx;
760 /* Is the first vpath already selected as catch-basin ? */
761 vpath = &vdev->vpaths[vpath_idx];
762 if (vpath->mac_addr_cnt > vpath->max_mac_addr_cnt) {
763 /* Add this mac address to this vpath */
764 if (FALSE == vxge_mac_list_add(vpath, &mac_info))
765 return -EPERM;
766 return vpath_idx;
767 }
768
769 /* Select first vpath as catch-basin */
770 vpath_vector = vxge_mBIT(vpath->device_id);
771 status = vxge_hw_mgmt_reg_write(vpath->vdev->devh,
772 vxge_hw_mgmt_reg_type_mrpcim,
773 0,
774 (ulong)offsetof(
775 struct vxge_hw_mrpcim_reg,
776 rts_mgr_cbasin_cfg),
777 vpath_vector);
778 if (status != VXGE_HW_OK) {
779 vxge_debug_tx(VXGE_ERR,
780 "%s: Unable to set the vpath-%d in catch-basin mode",
781 VXGE_DRIVER_NAME, vpath->device_id);
782 return -EPERM;
783 }
784
785 if (FALSE == vxge_mac_list_add(vpath, &mac_info))
786 return -EPERM;
787
788 return vpath_idx;
789}
790
791/**
792 * vxge_xmit
793 * @skb : the socket buffer containing the Tx data.
794 * @dev : device pointer.
795 *
796 * This function is the Tx entry point of the driver. Neterion NIC supports
797 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
798*/
799static netdev_tx_t
800vxge_xmit(struct sk_buff *skb, struct net_device *dev)
801{
802 struct vxge_fifo *fifo = NULL;
803 void *dtr_priv;
804 void *dtr = NULL;
805 struct vxgedev *vdev = NULL;
806 enum vxge_hw_status status;
807 int frg_cnt, first_frg_len;
808 skb_frag_t *frag;
809 int i = 0, j = 0, avail;
810 u64 dma_pointer;
811 struct vxge_tx_priv *txdl_priv = NULL;
812 struct __vxge_hw_fifo *fifo_hw;
813 int offload_type;
814 int vpath_no = 0;
815
816 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
817 dev->name, __func__, __LINE__);
818
819 /* A buffer with no data will be dropped */
820 if (unlikely(skb->len <= 0)) {
821 vxge_debug_tx(VXGE_ERR,
822 "%s: Buffer has no data..", dev->name);
823 dev_kfree_skb(skb);
824 return NETDEV_TX_OK;
825 }
826
827 vdev = netdev_priv(dev);
828
829 if (unlikely(!is_vxge_card_up(vdev))) {
830 vxge_debug_tx(VXGE_ERR,
831 "%s: vdev not initialized", dev->name);
832 dev_kfree_skb(skb);
833 return NETDEV_TX_OK;
834 }
835
836 if (vdev->config.addr_learn_en) {
837 vpath_no = vxge_learn_mac(vdev, skb->data + ETH_ALEN);
838 if (vpath_no == -EPERM) {
839 vxge_debug_tx(VXGE_ERR,
840 "%s: Failed to store the mac address",
841 dev->name);
842 dev_kfree_skb(skb);
843 return NETDEV_TX_OK;
844 }
845 }
846
847 if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING)
848 vpath_no = skb_get_queue_mapping(skb);
849 else if (vdev->config.tx_steering_type == TX_PORT_STEERING)
850 vpath_no = vxge_get_vpath_no(vdev, skb);
851
852 vxge_debug_tx(VXGE_TRACE, "%s: vpath_no= %d", dev->name, vpath_no);
853
854 if (vpath_no >= vdev->no_of_vpath)
855 vpath_no = 0;
856
857 fifo = &vdev->vpaths[vpath_no].fifo;
858 fifo_hw = fifo->handle;
859
860 if (netif_tx_queue_stopped(fifo->txq))
861 return NETDEV_TX_BUSY;
862
863 avail = vxge_hw_fifo_free_txdl_count_get(fifo_hw);
864 if (avail == 0) {
865 vxge_debug_tx(VXGE_ERR,
866 "%s: No free TXDs available", dev->name);
867 fifo->stats.txd_not_free++;
868 goto _exit0;
869 }
870
871 /* Last TXD? Stop tx queue to avoid dropping packets. TX
872 * completion will resume the queue.
873 */
874 if (avail == 1)
875 netif_tx_stop_queue(fifo->txq);
876
877 status = vxge_hw_fifo_txdl_reserve(fifo_hw, &dtr, &dtr_priv);
878 if (unlikely(status != VXGE_HW_OK)) {
879 vxge_debug_tx(VXGE_ERR,
880 "%s: Out of descriptors .", dev->name);
881 fifo->stats.txd_out_of_desc++;
882 goto _exit0;
883 }
884
885 vxge_debug_tx(VXGE_TRACE,
886 "%s: %s:%d fifo_hw = %p dtr = %p dtr_priv = %p",
887 dev->name, __func__, __LINE__,
888 fifo_hw, dtr, dtr_priv);
889
890 if (vlan_tx_tag_present(skb)) {
891 u16 vlan_tag = vlan_tx_tag_get(skb);
892 vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag);
893 }
894
895 first_frg_len = skb_headlen(skb);
896
897 dma_pointer = pci_map_single(fifo->pdev, skb->data, first_frg_len,
898 PCI_DMA_TODEVICE);
899
900 if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) {
901 vxge_hw_fifo_txdl_free(fifo_hw, dtr);
902 fifo->stats.pci_map_fail++;
903 goto _exit0;
904 }
905
906 txdl_priv = vxge_hw_fifo_txdl_private_get(dtr);
907 txdl_priv->skb = skb;
908 txdl_priv->dma_buffers[j] = dma_pointer;
909
910 frg_cnt = skb_shinfo(skb)->nr_frags;
911 vxge_debug_tx(VXGE_TRACE,
912 "%s: %s:%d skb = %p txdl_priv = %p "
913 "frag_cnt = %d dma_pointer = 0x%llx", dev->name,
914 __func__, __LINE__, skb, txdl_priv,
915 frg_cnt, (unsigned long long)dma_pointer);
916
917 vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
918 first_frg_len);
919
920 frag = &skb_shinfo(skb)->frags[0];
921 for (i = 0; i < frg_cnt; i++) {
922 /* ignore 0 length fragment */
923 if (!frag->size)
924 continue;
925
926 dma_pointer = (u64) pci_map_page(fifo->pdev, frag->page,
927 frag->page_offset, frag->size,
928 PCI_DMA_TODEVICE);
929
930 if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer)))
931 goto _exit2;
932 vxge_debug_tx(VXGE_TRACE,
933 "%s: %s:%d frag = %d dma_pointer = 0x%llx",
934 dev->name, __func__, __LINE__, i,
935 (unsigned long long)dma_pointer);
936
937 txdl_priv->dma_buffers[j] = dma_pointer;
938 vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
939 frag->size);
940 frag += 1;
941 }
942
943 offload_type = vxge_offload_type(skb);
944
945 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
946 int mss = vxge_tcp_mss(skb);
947 if (mss) {
948 vxge_debug_tx(VXGE_TRACE, "%s: %s:%d mss = %d",
949 dev->name, __func__, __LINE__, mss);
950 vxge_hw_fifo_txdl_mss_set(dtr, mss);
951 } else {
952 vxge_assert(skb->len <=
953 dev->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE);
954 vxge_assert(0);
955 goto _exit1;
956 }
957 }
958
959 if (skb->ip_summed == CHECKSUM_PARTIAL)
960 vxge_hw_fifo_txdl_cksum_set_bits(dtr,
961 VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN |
962 VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN |
963 VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN);
964
965 vxge_hw_fifo_txdl_post(fifo_hw, dtr);
966
967 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
968 dev->name, __func__, __LINE__);
969 return NETDEV_TX_OK;
970
971_exit2:
972 vxge_debug_tx(VXGE_TRACE, "%s: pci_map_page failed", dev->name);
973_exit1:
974 j = 0;
975 frag = &skb_shinfo(skb)->frags[0];
976
977 pci_unmap_single(fifo->pdev, txdl_priv->dma_buffers[j++],
978 skb_headlen(skb), PCI_DMA_TODEVICE);
979
980 for (; j < i; j++) {
981 pci_unmap_page(fifo->pdev, txdl_priv->dma_buffers[j],
982 frag->size, PCI_DMA_TODEVICE);
983 frag += 1;
984 }
985
986 vxge_hw_fifo_txdl_free(fifo_hw, dtr);
987_exit0:
988 netif_tx_stop_queue(fifo->txq);
989 dev_kfree_skb(skb);
990
991 return NETDEV_TX_OK;
992}
993
994/*
995 * vxge_rx_term
996 *
997 * Function will be called by hw function to abort all outstanding receive
998 * descriptors.
999 */
1000static void
1001vxge_rx_term(void *dtrh, enum vxge_hw_rxd_state state, void *userdata)
1002{
1003 struct vxge_ring *ring = (struct vxge_ring *)userdata;
1004 struct vxge_rx_priv *rx_priv =
1005 vxge_hw_ring_rxd_private_get(dtrh);
1006
1007 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
1008 ring->ndev->name, __func__, __LINE__);
1009 if (state != VXGE_HW_RXD_STATE_POSTED)
1010 return;
1011
1012 pci_unmap_single(ring->pdev, rx_priv->data_dma,
1013 rx_priv->data_size, PCI_DMA_FROMDEVICE);
1014
1015 dev_kfree_skb(rx_priv->skb);
1016 rx_priv->skb_data = NULL;
1017
1018 vxge_debug_entryexit(VXGE_TRACE,
1019 "%s: %s:%d Exiting...",
1020 ring->ndev->name, __func__, __LINE__);
1021}
1022
1023/*
1024 * vxge_tx_term
1025 *
1026 * Function will be called to abort all outstanding tx descriptors
1027 */
1028static void
1029vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata)
1030{
1031 struct vxge_fifo *fifo = (struct vxge_fifo *)userdata;
1032 skb_frag_t *frag;
1033 int i = 0, j, frg_cnt;
1034 struct vxge_tx_priv *txd_priv = vxge_hw_fifo_txdl_private_get(dtrh);
1035 struct sk_buff *skb = txd_priv->skb;
1036
1037 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1038
1039 if (state != VXGE_HW_TXDL_STATE_POSTED)
1040 return;
1041
1042 /* check skb validity */
1043 vxge_assert(skb);
1044 frg_cnt = skb_shinfo(skb)->nr_frags;
1045 frag = &skb_shinfo(skb)->frags[0];
1046
1047 /* for unfragmented skb */
1048 pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++],
1049 skb_headlen(skb), PCI_DMA_TODEVICE);
1050
1051 for (j = 0; j < frg_cnt; j++) {
1052 pci_unmap_page(fifo->pdev, txd_priv->dma_buffers[i++],
1053 frag->size, PCI_DMA_TODEVICE);
1054 frag += 1;
1055 }
1056
1057 dev_kfree_skb(skb);
1058
1059 vxge_debug_entryexit(VXGE_TRACE,
1060 "%s:%d Exiting...", __func__, __LINE__);
1061}
1062
1063static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
1064{
1065 struct list_head *entry, *next;
1066 u64 del_mac = 0;
1067 u8 *mac_address = (u8 *) (&del_mac);
1068
1069 /* Copy the mac address to delete from the list */
1070 memcpy(mac_address, mac->macaddr, ETH_ALEN);
1071
1072 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
1073 if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) {
1074 list_del(entry);
1075 kfree((struct vxge_mac_addrs *)entry);
1076 vpath->mac_addr_cnt--;
1077
1078 if (is_multicast_ether_addr(mac->macaddr))
1079 vpath->mcast_addr_cnt--;
1080 return TRUE;
1081 }
1082 }
1083
1084 return FALSE;
1085}
1086
1087/* delete a mac address from DA table */
1088static enum vxge_hw_status
1089vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
1090{
1091 enum vxge_hw_status status = VXGE_HW_OK;
1092 struct vxge_vpath *vpath;
1093
1094 vpath = &vdev->vpaths[mac->vpath_no];
1095 status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr,
1096 mac->macmask);
1097 if (status != VXGE_HW_OK) {
1098 vxge_debug_init(VXGE_ERR,
1099 "DA config delete entry failed for vpath:%d",
1100 vpath->device_id);
1101 } else
1102 vxge_mac_list_del(vpath, mac);
1103 return status;
1104}
1105
1106/**
1107 * vxge_set_multicast
1108 * @dev: pointer to the device structure
1109 *
1110 * Entry point for multicast address enable/disable
1111 * This function is a driver entry point which gets called by the kernel
1112 * whenever multicast addresses must be enabled/disabled. This also gets
1113 * called to set/reset promiscuous mode. Depending on the deivce flag, we
1114 * determine, if multicast address must be enabled or if promiscuous mode
1115 * is to be disabled etc.
1116 */
1117static void vxge_set_multicast(struct net_device *dev)
1118{
1119 struct netdev_hw_addr *ha;
1120 struct vxgedev *vdev;
1121 int i, mcast_cnt = 0;
1122 struct __vxge_hw_device *hldev;
1123 struct vxge_vpath *vpath;
1124 enum vxge_hw_status status = VXGE_HW_OK;
1125 struct macInfo mac_info;
1126 int vpath_idx = 0;
1127 struct vxge_mac_addrs *mac_entry;
1128 struct list_head *list_head;
1129 struct list_head *entry, *next;
1130 u8 *mac_address = NULL;
1131
1132 vxge_debug_entryexit(VXGE_TRACE,
1133 "%s:%d", __func__, __LINE__);
1134
1135 vdev = netdev_priv(dev);
1136 hldev = (struct __vxge_hw_device *)vdev->devh;
1137
1138 if (unlikely(!is_vxge_card_up(vdev)))
1139 return;
1140
1141 if ((dev->flags & IFF_ALLMULTI) && (!vdev->all_multi_flg)) {
1142 for (i = 0; i < vdev->no_of_vpath; i++) {
1143 vpath = &vdev->vpaths[i];
1144 vxge_assert(vpath->is_open);
1145 status = vxge_hw_vpath_mcast_enable(vpath->handle);
1146 if (status != VXGE_HW_OK)
1147 vxge_debug_init(VXGE_ERR, "failed to enable "
1148 "multicast, status %d", status);
1149 vdev->all_multi_flg = 1;
1150 }
1151 } else if (!(dev->flags & IFF_ALLMULTI) && (vdev->all_multi_flg)) {
1152 for (i = 0; i < vdev->no_of_vpath; i++) {
1153 vpath = &vdev->vpaths[i];
1154 vxge_assert(vpath->is_open);
1155 status = vxge_hw_vpath_mcast_disable(vpath->handle);
1156 if (status != VXGE_HW_OK)
1157 vxge_debug_init(VXGE_ERR, "failed to disable "
1158 "multicast, status %d", status);
1159 vdev->all_multi_flg = 0;
1160 }
1161 }
1162
1163
1164 if (!vdev->config.addr_learn_en) {
1165 for (i = 0; i < vdev->no_of_vpath; i++) {
1166 vpath = &vdev->vpaths[i];
1167 vxge_assert(vpath->is_open);
1168
1169 if (dev->flags & IFF_PROMISC)
1170 status = vxge_hw_vpath_promisc_enable(
1171 vpath->handle);
1172 else
1173 status = vxge_hw_vpath_promisc_disable(
1174 vpath->handle);
1175 if (status != VXGE_HW_OK)
1176 vxge_debug_init(VXGE_ERR, "failed to %s promisc"
1177 ", status %d", dev->flags&IFF_PROMISC ?
1178 "enable" : "disable", status);
1179 }
1180 }
1181
1182 memset(&mac_info, 0, sizeof(struct macInfo));
1183 /* Update individual M_CAST address list */
1184 if ((!vdev->all_multi_flg) && netdev_mc_count(dev)) {
1185 mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
1186 list_head = &vdev->vpaths[0].mac_addr_list;
1187 if ((netdev_mc_count(dev) +
1188 (vdev->vpaths[0].mac_addr_cnt - mcast_cnt)) >
1189 vdev->vpaths[0].max_mac_addr_cnt)
1190 goto _set_all_mcast;
1191
1192 /* Delete previous MC's */
1193 for (i = 0; i < mcast_cnt; i++) {
1194 list_for_each_safe(entry, next, list_head) {
1195 mac_entry = (struct vxge_mac_addrs *)entry;
1196 /* Copy the mac address to delete */
1197 mac_address = (u8 *)&mac_entry->macaddr;
1198 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1199
1200 if (is_multicast_ether_addr(mac_info.macaddr)) {
1201 for (vpath_idx = 0; vpath_idx <
1202 vdev->no_of_vpath;
1203 vpath_idx++) {
1204 mac_info.vpath_no = vpath_idx;
1205 status = vxge_del_mac_addr(
1206 vdev,
1207 &mac_info);
1208 }
1209 }
1210 }
1211 }
1212
1213 /* Add new ones */
1214 netdev_for_each_mc_addr(ha, dev) {
1215 memcpy(mac_info.macaddr, ha->addr, ETH_ALEN);
1216 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
1217 vpath_idx++) {
1218 mac_info.vpath_no = vpath_idx;
1219 mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1220 status = vxge_add_mac_addr(vdev, &mac_info);
1221 if (status != VXGE_HW_OK) {
1222 vxge_debug_init(VXGE_ERR,
1223 "%s:%d Setting individual"
1224 "multicast address failed",
1225 __func__, __LINE__);
1226 goto _set_all_mcast;
1227 }
1228 }
1229 }
1230
1231 return;
1232_set_all_mcast:
1233 mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
1234 /* Delete previous MC's */
1235 for (i = 0; i < mcast_cnt; i++) {
1236 list_for_each_safe(entry, next, list_head) {
1237 mac_entry = (struct vxge_mac_addrs *)entry;
1238 /* Copy the mac address to delete */
1239 mac_address = (u8 *)&mac_entry->macaddr;
1240 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1241
1242 if (is_multicast_ether_addr(mac_info.macaddr))
1243 break;
1244 }
1245
1246 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
1247 vpath_idx++) {
1248 mac_info.vpath_no = vpath_idx;
1249 status = vxge_del_mac_addr(vdev, &mac_info);
1250 }
1251 }
1252
1253 /* Enable all multicast */
1254 for (i = 0; i < vdev->no_of_vpath; i++) {
1255 vpath = &vdev->vpaths[i];
1256 vxge_assert(vpath->is_open);
1257
1258 status = vxge_hw_vpath_mcast_enable(vpath->handle);
1259 if (status != VXGE_HW_OK) {
1260 vxge_debug_init(VXGE_ERR,
1261 "%s:%d Enabling all multicasts failed",
1262 __func__, __LINE__);
1263 }
1264 vdev->all_multi_flg = 1;
1265 }
1266 dev->flags |= IFF_ALLMULTI;
1267 }
1268
1269 vxge_debug_entryexit(VXGE_TRACE,
1270 "%s:%d Exiting...", __func__, __LINE__);
1271}
1272
1273/**
1274 * vxge_set_mac_addr
1275 * @dev: pointer to the device structure
1276 *
1277 * Update entry "0" (default MAC addr)
1278 */
1279static int vxge_set_mac_addr(struct net_device *dev, void *p)
1280{
1281 struct sockaddr *addr = p;
1282 struct vxgedev *vdev;
1283 struct __vxge_hw_device *hldev;
1284 enum vxge_hw_status status = VXGE_HW_OK;
1285 struct macInfo mac_info_new, mac_info_old;
1286 int vpath_idx = 0;
1287
1288 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1289
1290 vdev = netdev_priv(dev);
1291 hldev = vdev->devh;
1292
1293 if (!is_valid_ether_addr(addr->sa_data))
1294 return -EINVAL;
1295
1296 memset(&mac_info_new, 0, sizeof(struct macInfo));
1297 memset(&mac_info_old, 0, sizeof(struct macInfo));
1298
1299 vxge_debug_entryexit(VXGE_TRACE, "%s:%d Exiting...",
1300 __func__, __LINE__);
1301
1302 /* Get the old address */
1303 memcpy(mac_info_old.macaddr, dev->dev_addr, dev->addr_len);
1304
1305 /* Copy the new address */
1306 memcpy(mac_info_new.macaddr, addr->sa_data, dev->addr_len);
1307
1308 /* First delete the old mac address from all the vpaths
1309 as we can't specify the index while adding new mac address */
1310 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
1311 struct vxge_vpath *vpath = &vdev->vpaths[vpath_idx];
1312 if (!vpath->is_open) {
1313 /* This can happen when this interface is added/removed
1314 to the bonding interface. Delete this station address
1315 from the linked list */
1316 vxge_mac_list_del(vpath, &mac_info_old);
1317
1318 /* Add this new address to the linked list
1319 for later restoring */
1320 vxge_mac_list_add(vpath, &mac_info_new);
1321
1322 continue;
1323 }
1324 /* Delete the station address */
1325 mac_info_old.vpath_no = vpath_idx;
1326 status = vxge_del_mac_addr(vdev, &mac_info_old);
1327 }
1328
1329 if (unlikely(!is_vxge_card_up(vdev))) {
1330 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1331 return VXGE_HW_OK;
1332 }
1333
1334 /* Set this mac address to all the vpaths */
1335 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
1336 mac_info_new.vpath_no = vpath_idx;
1337 mac_info_new.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1338 status = vxge_add_mac_addr(vdev, &mac_info_new);
1339 if (status != VXGE_HW_OK)
1340 return -EINVAL;
1341 }
1342
1343 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1344
1345 return status;
1346}
1347
1348/*
1349 * vxge_vpath_intr_enable
1350 * @vdev: pointer to vdev
1351 * @vp_id: vpath for which to enable the interrupts
1352 *
1353 * Enables the interrupts for the vpath
1354*/
1355static void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
1356{
1357 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1358 int msix_id = 0;
1359 int tim_msix_id[4] = {0, 1, 0, 0};
1360 int alarm_msix_id = VXGE_ALARM_MSIX_ID;
1361
1362 vxge_hw_vpath_intr_enable(vpath->handle);
1363
1364 if (vdev->config.intr_type == INTA)
1365 vxge_hw_vpath_inta_unmask_tx_rx(vpath->handle);
1366 else {
1367 vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
1368 alarm_msix_id);
1369
1370 msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
1371 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
1372 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id + 1);
1373
1374 /* enable the alarm vector */
1375 msix_id = (vpath->handle->vpath->hldev->first_vp_id *
1376 VXGE_HW_VPATH_MSIX_ACTIVE) + alarm_msix_id;
1377 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
1378 }
1379}
1380
1381/*
1382 * vxge_vpath_intr_disable
1383 * @vdev: pointer to vdev
1384 * @vp_id: vpath for which to disable the interrupts
1385 *
1386 * Disables the interrupts for the vpath
1387*/
1388static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
1389{
1390 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1391 struct __vxge_hw_device *hldev;
1392 int msix_id;
1393
1394 hldev = pci_get_drvdata(vdev->pdev);
1395
1396 vxge_hw_vpath_wait_receive_idle(hldev, vpath->device_id);
1397
1398 vxge_hw_vpath_intr_disable(vpath->handle);
1399
1400 if (vdev->config.intr_type == INTA)
1401 vxge_hw_vpath_inta_mask_tx_rx(vpath->handle);
1402 else {
1403 msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
1404 vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
1405 vxge_hw_vpath_msix_mask(vpath->handle, msix_id + 1);
1406
1407 /* disable the alarm vector */
1408 msix_id = (vpath->handle->vpath->hldev->first_vp_id *
1409 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
1410 vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
1411 }
1412}
1413
1414/* list all mac addresses from DA table */
1415static enum vxge_hw_status
1416vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath, struct macInfo *mac)
1417{
1418 enum vxge_hw_status status = VXGE_HW_OK;
1419 unsigned char macmask[ETH_ALEN];
1420 unsigned char macaddr[ETH_ALEN];
1421
1422 status = vxge_hw_vpath_mac_addr_get(vpath->handle,
1423 macaddr, macmask);
1424 if (status != VXGE_HW_OK) {
1425 vxge_debug_init(VXGE_ERR,
1426 "DA config list entry failed for vpath:%d",
1427 vpath->device_id);
1428 return status;
1429 }
1430
1431 while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) {
1432 status = vxge_hw_vpath_mac_addr_get_next(vpath->handle,
1433 macaddr, macmask);
1434 if (status != VXGE_HW_OK)
1435 break;
1436 }
1437
1438 return status;
1439}
1440
1441/* Store all mac addresses from the list to the DA table */
1442static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
1443{
1444 enum vxge_hw_status status = VXGE_HW_OK;
1445 struct macInfo mac_info;
1446 u8 *mac_address = NULL;
1447 struct list_head *entry, *next;
1448
1449 memset(&mac_info, 0, sizeof(struct macInfo));
1450
1451 if (vpath->is_open) {
1452 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
1453 mac_address =
1454 (u8 *)&
1455 ((struct vxge_mac_addrs *)entry)->macaddr;
1456 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1457 ((struct vxge_mac_addrs *)entry)->state =
1458 VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1459 /* does this mac address already exist in da table? */
1460 status = vxge_search_mac_addr_in_da_table(vpath,
1461 &mac_info);
1462 if (status != VXGE_HW_OK) {
1463 /* Add this mac address to the DA table */
1464 status = vxge_hw_vpath_mac_addr_add(
1465 vpath->handle, mac_info.macaddr,
1466 mac_info.macmask,
1467 VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE);
1468 if (status != VXGE_HW_OK) {
1469 vxge_debug_init(VXGE_ERR,
1470 "DA add entry failed for vpath:%d",
1471 vpath->device_id);
1472 ((struct vxge_mac_addrs *)entry)->state
1473 = VXGE_LL_MAC_ADDR_IN_LIST;
1474 }
1475 }
1476 }
1477 }
1478
1479 return status;
1480}
1481
1482/* Store all vlan ids from the list to the vid table */
1483static enum vxge_hw_status
1484vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
1485{
1486 enum vxge_hw_status status = VXGE_HW_OK;
1487 struct vxgedev *vdev = vpath->vdev;
1488 u16 vid;
1489
1490 if (!vpath->is_open)
1491 return status;
1492
1493 for_each_set_bit(vid, vdev->active_vlans, VLAN_N_VID)
1494 status = vxge_hw_vpath_vid_add(vpath->handle, vid);
1495
1496 return status;
1497}
1498
1499/*
1500 * vxge_reset_vpath
1501 * @vdev: pointer to vdev
1502 * @vp_id: vpath to reset
1503 *
1504 * Resets the vpath
1505*/
1506static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
1507{
1508 enum vxge_hw_status status = VXGE_HW_OK;
1509 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1510 int ret = 0;
1511
1512 /* check if device is down already */
1513 if (unlikely(!is_vxge_card_up(vdev)))
1514 return 0;
1515
1516 /* is device reset already scheduled */
1517 if (test_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
1518 return 0;
1519
1520 if (vpath->handle) {
1521 if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) {
1522 if (is_vxge_card_up(vdev) &&
1523 vxge_hw_vpath_recover_from_reset(vpath->handle)
1524 != VXGE_HW_OK) {
1525 vxge_debug_init(VXGE_ERR,
1526 "vxge_hw_vpath_recover_from_reset"
1527 "failed for vpath:%d", vp_id);
1528 return status;
1529 }
1530 } else {
1531 vxge_debug_init(VXGE_ERR,
1532 "vxge_hw_vpath_reset failed for"
1533 "vpath:%d", vp_id);
1534 return status;
1535 }
1536 } else
1537 return VXGE_HW_FAIL;
1538
1539 vxge_restore_vpath_mac_addr(vpath);
1540 vxge_restore_vpath_vid_table(vpath);
1541
1542 /* Enable all broadcast */
1543 vxge_hw_vpath_bcast_enable(vpath->handle);
1544
1545 /* Enable all multicast */
1546 if (vdev->all_multi_flg) {
1547 status = vxge_hw_vpath_mcast_enable(vpath->handle);
1548 if (status != VXGE_HW_OK)
1549 vxge_debug_init(VXGE_ERR,
1550 "%s:%d Enabling multicast failed",
1551 __func__, __LINE__);
1552 }
1553
1554 /* Enable the interrupts */
1555 vxge_vpath_intr_enable(vdev, vp_id);
1556
1557 smp_wmb();
1558
1559 /* Enable the flow of traffic through the vpath */
1560 vxge_hw_vpath_enable(vpath->handle);
1561
1562 smp_wmb();
1563 vxge_hw_vpath_rx_doorbell_init(vpath->handle);
1564 vpath->ring.last_status = VXGE_HW_OK;
1565
1566 /* Vpath reset done */
1567 clear_bit(vp_id, &vdev->vp_reset);
1568
1569 /* Start the vpath queue */
1570 if (netif_tx_queue_stopped(vpath->fifo.txq))
1571 netif_tx_wake_queue(vpath->fifo.txq);
1572
1573 return ret;
1574}
1575
1576/* Configure CI */
1577static void vxge_config_ci_for_tti_rti(struct vxgedev *vdev)
1578{
1579 int i = 0;
1580
1581 /* Enable CI for RTI */
1582 if (vdev->config.intr_type == MSI_X) {
1583 for (i = 0; i < vdev->no_of_vpath; i++) {
1584 struct __vxge_hw_ring *hw_ring;
1585
1586 hw_ring = vdev->vpaths[i].ring.handle;
1587 vxge_hw_vpath_dynamic_rti_ci_set(hw_ring);
1588 }
1589 }
1590
1591 /* Enable CI for TTI */
1592 for (i = 0; i < vdev->no_of_vpath; i++) {
1593 struct __vxge_hw_fifo *hw_fifo = vdev->vpaths[i].fifo.handle;
1594 vxge_hw_vpath_tti_ci_set(hw_fifo);
1595 /*
1596 * For Inta (with or without napi), Set CI ON for only one
1597 * vpath. (Have only one free running timer).
1598 */
1599 if ((vdev->config.intr_type == INTA) && (i == 0))
1600 break;
1601 }
1602
1603 return;
1604}
1605
1606static int do_vxge_reset(struct vxgedev *vdev, int event)
1607{
1608 enum vxge_hw_status status;
1609 int ret = 0, vp_id, i;
1610
1611 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1612
1613 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET)) {
1614 /* check if device is down already */
1615 if (unlikely(!is_vxge_card_up(vdev)))
1616 return 0;
1617
1618 /* is reset already scheduled */
1619 if (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
1620 return 0;
1621 }
1622
1623 if (event == VXGE_LL_FULL_RESET) {
1624 netif_carrier_off(vdev->ndev);
1625
1626 /* wait for all the vpath reset to complete */
1627 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
1628 while (test_bit(vp_id, &vdev->vp_reset))
1629 msleep(50);
1630 }
1631
1632 netif_carrier_on(vdev->ndev);
1633
1634 /* if execution mode is set to debug, don't reset the adapter */
1635 if (unlikely(vdev->exec_mode)) {
1636 vxge_debug_init(VXGE_ERR,
1637 "%s: execution mode is debug, returning..",
1638 vdev->ndev->name);
1639 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
1640 netif_tx_stop_all_queues(vdev->ndev);
1641 return 0;
1642 }
1643 }
1644
1645 if (event == VXGE_LL_FULL_RESET) {
1646 vxge_hw_device_wait_receive_idle(vdev->devh);
1647 vxge_hw_device_intr_disable(vdev->devh);
1648
1649 switch (vdev->cric_err_event) {
1650 case VXGE_HW_EVENT_UNKNOWN:
1651 netif_tx_stop_all_queues(vdev->ndev);
1652 vxge_debug_init(VXGE_ERR,
1653 "fatal: %s: Disabling device due to"
1654 "unknown error",
1655 vdev->ndev->name);
1656 ret = -EPERM;
1657 goto out;
1658 case VXGE_HW_EVENT_RESET_START:
1659 break;
1660 case VXGE_HW_EVENT_RESET_COMPLETE:
1661 case VXGE_HW_EVENT_LINK_DOWN:
1662 case VXGE_HW_EVENT_LINK_UP:
1663 case VXGE_HW_EVENT_ALARM_CLEARED:
1664 case VXGE_HW_EVENT_ECCERR:
1665 case VXGE_HW_EVENT_MRPCIM_ECCERR:
1666 ret = -EPERM;
1667 goto out;
1668 case VXGE_HW_EVENT_FIFO_ERR:
1669 case VXGE_HW_EVENT_VPATH_ERR:
1670 break;
1671 case VXGE_HW_EVENT_CRITICAL_ERR:
1672 netif_tx_stop_all_queues(vdev->ndev);
1673 vxge_debug_init(VXGE_ERR,
1674 "fatal: %s: Disabling device due to"
1675 "serious error",
1676 vdev->ndev->name);
1677 /* SOP or device reset required */
1678 /* This event is not currently used */
1679 ret = -EPERM;
1680 goto out;
1681 case VXGE_HW_EVENT_SERR:
1682 netif_tx_stop_all_queues(vdev->ndev);
1683 vxge_debug_init(VXGE_ERR,
1684 "fatal: %s: Disabling device due to"
1685 "serious error",
1686 vdev->ndev->name);
1687 ret = -EPERM;
1688 goto out;
1689 case VXGE_HW_EVENT_SRPCIM_SERR:
1690 case VXGE_HW_EVENT_MRPCIM_SERR:
1691 ret = -EPERM;
1692 goto out;
1693 case VXGE_HW_EVENT_SLOT_FREEZE:
1694 netif_tx_stop_all_queues(vdev->ndev);
1695 vxge_debug_init(VXGE_ERR,
1696 "fatal: %s: Disabling device due to"
1697 "slot freeze",
1698 vdev->ndev->name);
1699 ret = -EPERM;
1700 goto out;
1701 default:
1702 break;
1703
1704 }
1705 }
1706
1707 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET))
1708 netif_tx_stop_all_queues(vdev->ndev);
1709
1710 if (event == VXGE_LL_FULL_RESET) {
1711 status = vxge_reset_all_vpaths(vdev);
1712 if (status != VXGE_HW_OK) {
1713 vxge_debug_init(VXGE_ERR,
1714 "fatal: %s: can not reset vpaths",
1715 vdev->ndev->name);
1716 ret = -EPERM;
1717 goto out;
1718 }
1719 }
1720
1721 if (event == VXGE_LL_COMPL_RESET) {
1722 for (i = 0; i < vdev->no_of_vpath; i++)
1723 if (vdev->vpaths[i].handle) {
1724 if (vxge_hw_vpath_recover_from_reset(
1725 vdev->vpaths[i].handle)
1726 != VXGE_HW_OK) {
1727 vxge_debug_init(VXGE_ERR,
1728 "vxge_hw_vpath_recover_"
1729 "from_reset failed for vpath: "
1730 "%d", i);
1731 ret = -EPERM;
1732 goto out;
1733 }
1734 } else {
1735 vxge_debug_init(VXGE_ERR,
1736 "vxge_hw_vpath_reset failed for "
1737 "vpath:%d", i);
1738 ret = -EPERM;
1739 goto out;
1740 }
1741 }
1742
1743 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET)) {
1744 /* Reprogram the DA table with populated mac addresses */
1745 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
1746 vxge_restore_vpath_mac_addr(&vdev->vpaths[vp_id]);
1747 vxge_restore_vpath_vid_table(&vdev->vpaths[vp_id]);
1748 }
1749
1750 /* enable vpath interrupts */
1751 for (i = 0; i < vdev->no_of_vpath; i++)
1752 vxge_vpath_intr_enable(vdev, i);
1753
1754 vxge_hw_device_intr_enable(vdev->devh);
1755
1756 smp_wmb();
1757
1758 /* Indicate card up */
1759 set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
1760
1761 /* Get the traffic to flow through the vpaths */
1762 for (i = 0; i < vdev->no_of_vpath; i++) {
1763 vxge_hw_vpath_enable(vdev->vpaths[i].handle);
1764 smp_wmb();
1765 vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle);
1766 }
1767
1768 netif_tx_wake_all_queues(vdev->ndev);
1769 }
1770
1771 /* configure CI */
1772 vxge_config_ci_for_tti_rti(vdev);
1773
1774out:
1775 vxge_debug_entryexit(VXGE_TRACE,
1776 "%s:%d Exiting...", __func__, __LINE__);
1777
1778 /* Indicate reset done */
1779 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET))
1780 clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
1781 return ret;
1782}
1783
1784/*
1785 * vxge_reset
1786 * @vdev: pointer to ll device
1787 *
1788 * driver may reset the chip on events of serr, eccerr, etc
1789 */
1790static void vxge_reset(struct work_struct *work)
1791{
1792 struct vxgedev *vdev = container_of(work, struct vxgedev, reset_task);
1793
1794 if (!netif_running(vdev->ndev))
1795 return;
1796
1797 do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
1798}
1799
1800/**
1801 * vxge_poll - Receive handler when Receive Polling is used.
1802 * @dev: pointer to the device structure.
1803 * @budget: Number of packets budgeted to be processed in this iteration.
1804 *
1805 * This function comes into picture only if Receive side is being handled
1806 * through polling (called NAPI in linux). It mostly does what the normal
1807 * Rx interrupt handler does in terms of descriptor and packet processing
1808 * but not in an interrupt context. Also it will process a specified number
1809 * of packets at most in one iteration. This value is passed down by the
1810 * kernel as the function argument 'budget'.
1811 */
1812static int vxge_poll_msix(struct napi_struct *napi, int budget)
1813{
1814 struct vxge_ring *ring = container_of(napi, struct vxge_ring, napi);
1815 int pkts_processed;
1816 int budget_org = budget;
1817
1818 ring->budget = budget;
1819 ring->pkts_processed = 0;
1820 vxge_hw_vpath_poll_rx(ring->handle);
1821 pkts_processed = ring->pkts_processed;
1822
1823 if (ring->pkts_processed < budget_org) {
1824 napi_complete(napi);
1825
1826 /* Re enable the Rx interrupts for the vpath */
1827 vxge_hw_channel_msix_unmask(
1828 (struct __vxge_hw_channel *)ring->handle,
1829 ring->rx_vector_no);
1830 mmiowb();
1831 }
1832
1833 /* We are copying and returning the local variable, in case if after
1834 * clearing the msix interrupt above, if the interrupt fires right
1835 * away which can preempt this NAPI thread */
1836 return pkts_processed;
1837}
1838
1839static int vxge_poll_inta(struct napi_struct *napi, int budget)
1840{
1841 struct vxgedev *vdev = container_of(napi, struct vxgedev, napi);
1842 int pkts_processed = 0;
1843 int i;
1844 int budget_org = budget;
1845 struct vxge_ring *ring;
1846
1847 struct __vxge_hw_device *hldev = pci_get_drvdata(vdev->pdev);
1848
1849 for (i = 0; i < vdev->no_of_vpath; i++) {
1850 ring = &vdev->vpaths[i].ring;
1851 ring->budget = budget;
1852 ring->pkts_processed = 0;
1853 vxge_hw_vpath_poll_rx(ring->handle);
1854 pkts_processed += ring->pkts_processed;
1855 budget -= ring->pkts_processed;
1856 if (budget <= 0)
1857 break;
1858 }
1859
1860 VXGE_COMPLETE_ALL_TX(vdev);
1861
1862 if (pkts_processed < budget_org) {
1863 napi_complete(napi);
1864 /* Re enable the Rx interrupts for the ring */
1865 vxge_hw_device_unmask_all(hldev);
1866 vxge_hw_device_flush_io(hldev);
1867 }
1868
1869 return pkts_processed;
1870}
1871
1872#ifdef CONFIG_NET_POLL_CONTROLLER
1873/**
1874 * vxge_netpoll - netpoll event handler entry point
1875 * @dev : pointer to the device structure.
1876 * Description:
1877 * This function will be called by upper layer to check for events on the
1878 * interface in situations where interrupts are disabled. It is used for
1879 * specific in-kernel networking tasks, such as remote consoles and kernel
1880 * debugging over the network (example netdump in RedHat).
1881 */
1882static void vxge_netpoll(struct net_device *dev)
1883{
1884 struct __vxge_hw_device *hldev;
1885 struct vxgedev *vdev;
1886
1887 vdev = netdev_priv(dev);
1888 hldev = pci_get_drvdata(vdev->pdev);
1889
1890 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1891
1892 if (pci_channel_offline(vdev->pdev))
1893 return;
1894
1895 disable_irq(dev->irq);
1896 vxge_hw_device_clear_tx_rx(hldev);
1897
1898 vxge_hw_device_clear_tx_rx(hldev);
1899 VXGE_COMPLETE_ALL_RX(vdev);
1900 VXGE_COMPLETE_ALL_TX(vdev);
1901
1902 enable_irq(dev->irq);
1903
1904 vxge_debug_entryexit(VXGE_TRACE,
1905 "%s:%d Exiting...", __func__, __LINE__);
1906}
1907#endif
1908
1909/* RTH configuration */
1910static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
1911{
1912 enum vxge_hw_status status = VXGE_HW_OK;
1913 struct vxge_hw_rth_hash_types hash_types;
1914 u8 itable[256] = {0}; /* indirection table */
1915 u8 mtable[256] = {0}; /* CPU to vpath mapping */
1916 int index;
1917
1918 /*
1919 * Filling
1920 * - itable with bucket numbers
1921 * - mtable with bucket-to-vpath mapping
1922 */
1923 for (index = 0; index < (1 << vdev->config.rth_bkt_sz); index++) {
1924 itable[index] = index;
1925 mtable[index] = index % vdev->no_of_vpath;
1926 }
1927
1928 /* set indirection table, bucket-to-vpath mapping */
1929 status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles,
1930 vdev->no_of_vpath,
1931 mtable, itable,
1932 vdev->config.rth_bkt_sz);
1933 if (status != VXGE_HW_OK) {
1934 vxge_debug_init(VXGE_ERR,
1935 "RTH indirection table configuration failed "
1936 "for vpath:%d", vdev->vpaths[0].device_id);
1937 return status;
1938 }
1939
1940 /* Fill RTH hash types */
1941 hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4;
1942 hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4;
1943 hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6;
1944 hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6;
1945 hash_types.hash_type_tcpipv6ex_en =
1946 vdev->config.rth_hash_type_tcpipv6ex;
1947 hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex;
1948
1949 /*
1950 * Because the itable_set() method uses the active_table field
1951 * for the target virtual path the RTH config should be updated
1952 * for all VPATHs. The h/w only uses the lowest numbered VPATH
1953 * when steering frames.
1954 */
1955 for (index = 0; index < vdev->no_of_vpath; index++) {
1956 status = vxge_hw_vpath_rts_rth_set(
1957 vdev->vpaths[index].handle,
1958 vdev->config.rth_algorithm,
1959 &hash_types,
1960 vdev->config.rth_bkt_sz);
1961 if (status != VXGE_HW_OK) {
1962 vxge_debug_init(VXGE_ERR,
1963 "RTH configuration failed for vpath:%d",
1964 vdev->vpaths[index].device_id);
1965 return status;
1966 }
1967 }
1968
1969 return status;
1970}
1971
1972/* reset vpaths */
1973enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
1974{
1975 enum vxge_hw_status status = VXGE_HW_OK;
1976 struct vxge_vpath *vpath;
1977 int i;
1978
1979 for (i = 0; i < vdev->no_of_vpath; i++) {
1980 vpath = &vdev->vpaths[i];
1981 if (vpath->handle) {
1982 if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) {
1983 if (is_vxge_card_up(vdev) &&
1984 vxge_hw_vpath_recover_from_reset(
1985 vpath->handle) != VXGE_HW_OK) {
1986 vxge_debug_init(VXGE_ERR,
1987 "vxge_hw_vpath_recover_"
1988 "from_reset failed for vpath: "
1989 "%d", i);
1990 return status;
1991 }
1992 } else {
1993 vxge_debug_init(VXGE_ERR,
1994 "vxge_hw_vpath_reset failed for "
1995 "vpath:%d", i);
1996 return status;
1997 }
1998 }
1999 }
2000
2001 return status;
2002}
2003
2004/* close vpaths */
2005static void vxge_close_vpaths(struct vxgedev *vdev, int index)
2006{
2007 struct vxge_vpath *vpath;
2008 int i;
2009
2010 for (i = index; i < vdev->no_of_vpath; i++) {
2011 vpath = &vdev->vpaths[i];
2012
2013 if (vpath->handle && vpath->is_open) {
2014 vxge_hw_vpath_close(vpath->handle);
2015 vdev->stats.vpaths_open--;
2016 }
2017 vpath->is_open = 0;
2018 vpath->handle = NULL;
2019 }
2020}
2021
2022/* open vpaths */
2023static int vxge_open_vpaths(struct vxgedev *vdev)
2024{
2025 struct vxge_hw_vpath_attr attr;
2026 enum vxge_hw_status status;
2027 struct vxge_vpath *vpath;
2028 u32 vp_id = 0;
2029 int i;
2030
2031 for (i = 0; i < vdev->no_of_vpath; i++) {
2032 vpath = &vdev->vpaths[i];
2033 vxge_assert(vpath->is_configured);
2034
2035 if (!vdev->titan1) {
2036 struct vxge_hw_vp_config *vcfg;
2037 vcfg = &vdev->devh->config.vp_config[vpath->device_id];
2038
2039 vcfg->rti.urange_a = RTI_T1A_RX_URANGE_A;
2040 vcfg->rti.urange_b = RTI_T1A_RX_URANGE_B;
2041 vcfg->rti.urange_c = RTI_T1A_RX_URANGE_C;
2042 vcfg->tti.uec_a = TTI_T1A_TX_UFC_A;
2043 vcfg->tti.uec_b = TTI_T1A_TX_UFC_B;
2044 vcfg->tti.uec_c = TTI_T1A_TX_UFC_C(vdev->mtu);
2045 vcfg->tti.uec_d = TTI_T1A_TX_UFC_D(vdev->mtu);
2046 vcfg->tti.ltimer_val = VXGE_T1A_TTI_LTIMER_VAL;
2047 vcfg->tti.rtimer_val = VXGE_T1A_TTI_RTIMER_VAL;
2048 }
2049
2050 attr.vp_id = vpath->device_id;
2051 attr.fifo_attr.callback = vxge_xmit_compl;
2052 attr.fifo_attr.txdl_term = vxge_tx_term;
2053 attr.fifo_attr.per_txdl_space = sizeof(struct vxge_tx_priv);
2054 attr.fifo_attr.userdata = &vpath->fifo;
2055
2056 attr.ring_attr.callback = vxge_rx_1b_compl;
2057 attr.ring_attr.rxd_init = vxge_rx_initial_replenish;
2058 attr.ring_attr.rxd_term = vxge_rx_term;
2059 attr.ring_attr.per_rxd_space = sizeof(struct vxge_rx_priv);
2060 attr.ring_attr.userdata = &vpath->ring;
2061
2062 vpath->ring.ndev = vdev->ndev;
2063 vpath->ring.pdev = vdev->pdev;
2064
2065 status = vxge_hw_vpath_open(vdev->devh, &attr, &vpath->handle);
2066 if (status == VXGE_HW_OK) {
2067 vpath->fifo.handle =
2068 (struct __vxge_hw_fifo *)attr.fifo_attr.userdata;
2069 vpath->ring.handle =
2070 (struct __vxge_hw_ring *)attr.ring_attr.userdata;
2071 vpath->fifo.tx_steering_type =
2072 vdev->config.tx_steering_type;
2073 vpath->fifo.ndev = vdev->ndev;
2074 vpath->fifo.pdev = vdev->pdev;
2075 if (vdev->config.tx_steering_type)
2076 vpath->fifo.txq =
2077 netdev_get_tx_queue(vdev->ndev, i);
2078 else
2079 vpath->fifo.txq =
2080 netdev_get_tx_queue(vdev->ndev, 0);
2081 vpath->fifo.indicate_max_pkts =
2082 vdev->config.fifo_indicate_max_pkts;
2083 vpath->fifo.tx_vector_no = 0;
2084 vpath->ring.rx_vector_no = 0;
2085 vpath->ring.rx_hwts = vdev->rx_hwts;
2086 vpath->is_open = 1;
2087 vdev->vp_handles[i] = vpath->handle;
2088 vpath->ring.vlan_tag_strip = vdev->vlan_tag_strip;
2089 vdev->stats.vpaths_open++;
2090 } else {
2091 vdev->stats.vpath_open_fail++;
2092 vxge_debug_init(VXGE_ERR, "%s: vpath: %d failed to "
2093 "open with status: %d",
2094 vdev->ndev->name, vpath->device_id,
2095 status);
2096 vxge_close_vpaths(vdev, 0);
2097 return -EPERM;
2098 }
2099
2100 vp_id = vpath->handle->vpath->vp_id;
2101 vdev->vpaths_deployed |= vxge_mBIT(vp_id);
2102 }
2103
2104 return VXGE_HW_OK;
2105}
2106
2107/**
2108 * adaptive_coalesce_tx_interrupts - Changes the interrupt coalescing
2109 * if the interrupts are not within a range
2110 * @fifo: pointer to transmit fifo structure
2111 * Description: The function changes boundary timer and restriction timer
2112 * value depends on the traffic
2113 * Return Value: None
2114 */
2115static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo)
2116{
2117 fifo->interrupt_count++;
2118 if (jiffies > fifo->jiffies + HZ / 100) {
2119 struct __vxge_hw_fifo *hw_fifo = fifo->handle;
2120
2121 fifo->jiffies = jiffies;
2122 if (fifo->interrupt_count > VXGE_T1A_MAX_TX_INTERRUPT_COUNT &&
2123 hw_fifo->rtimer != VXGE_TTI_RTIMER_ADAPT_VAL) {
2124 hw_fifo->rtimer = VXGE_TTI_RTIMER_ADAPT_VAL;
2125 vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
2126 } else if (hw_fifo->rtimer != 0) {
2127 hw_fifo->rtimer = 0;
2128 vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
2129 }
2130 fifo->interrupt_count = 0;
2131 }
2132}
2133
2134/**
2135 * adaptive_coalesce_rx_interrupts - Changes the interrupt coalescing
2136 * if the interrupts are not within a range
2137 * @ring: pointer to receive ring structure
2138 * Description: The function increases of decreases the packet counts within
2139 * the ranges of traffic utilization, if the interrupts due to this ring are
2140 * not within a fixed range.
2141 * Return Value: Nothing
2142 */
2143static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring)
2144{
2145 ring->interrupt_count++;
2146 if (jiffies > ring->jiffies + HZ / 100) {
2147 struct __vxge_hw_ring *hw_ring = ring->handle;
2148
2149 ring->jiffies = jiffies;
2150 if (ring->interrupt_count > VXGE_T1A_MAX_INTERRUPT_COUNT &&
2151 hw_ring->rtimer != VXGE_RTI_RTIMER_ADAPT_VAL) {
2152 hw_ring->rtimer = VXGE_RTI_RTIMER_ADAPT_VAL;
2153 vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
2154 } else if (hw_ring->rtimer != 0) {
2155 hw_ring->rtimer = 0;
2156 vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
2157 }
2158 ring->interrupt_count = 0;
2159 }
2160}
2161
2162/*
2163 * vxge_isr_napi
2164 * @irq: the irq of the device.
2165 * @dev_id: a void pointer to the hldev structure of the Titan device
2166 * @ptregs: pointer to the registers pushed on the stack.
2167 *
2168 * This function is the ISR handler of the device when napi is enabled. It
2169 * identifies the reason for the interrupt and calls the relevant service
2170 * routines.
2171 */
2172static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
2173{
2174 struct net_device *dev;
2175 struct __vxge_hw_device *hldev;
2176 u64 reason;
2177 enum vxge_hw_status status;
2178 struct vxgedev *vdev = (struct vxgedev *)dev_id;
2179
2180 vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__);
2181
2182 dev = vdev->ndev;
2183 hldev = pci_get_drvdata(vdev->pdev);
2184
2185 if (pci_channel_offline(vdev->pdev))
2186 return IRQ_NONE;
2187
2188 if (unlikely(!is_vxge_card_up(vdev)))
2189 return IRQ_HANDLED;
2190
2191 status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode, &reason);
2192 if (status == VXGE_HW_OK) {
2193 vxge_hw_device_mask_all(hldev);
2194
2195 if (reason &
2196 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(
2197 vdev->vpaths_deployed >>
2198 (64 - VXGE_HW_MAX_VIRTUAL_PATHS))) {
2199
2200 vxge_hw_device_clear_tx_rx(hldev);
2201 napi_schedule(&vdev->napi);
2202 vxge_debug_intr(VXGE_TRACE,
2203 "%s:%d Exiting...", __func__, __LINE__);
2204 return IRQ_HANDLED;
2205 } else
2206 vxge_hw_device_unmask_all(hldev);
2207 } else if (unlikely((status == VXGE_HW_ERR_VPATH) ||
2208 (status == VXGE_HW_ERR_CRITICAL) ||
2209 (status == VXGE_HW_ERR_FIFO))) {
2210 vxge_hw_device_mask_all(hldev);
2211 vxge_hw_device_flush_io(hldev);
2212 return IRQ_HANDLED;
2213 } else if (unlikely(status == VXGE_HW_ERR_SLOT_FREEZE))
2214 return IRQ_HANDLED;
2215
2216 vxge_debug_intr(VXGE_TRACE, "%s:%d Exiting...", __func__, __LINE__);
2217 return IRQ_NONE;
2218}
2219
2220#ifdef CONFIG_PCI_MSI
2221
2222static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id)
2223{
2224 struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id;
2225
2226 adaptive_coalesce_tx_interrupts(fifo);
2227
2228 vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)fifo->handle,
2229 fifo->tx_vector_no);
2230
2231 vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)fifo->handle,
2232 fifo->tx_vector_no);
2233
2234 VXGE_COMPLETE_VPATH_TX(fifo);
2235
2236 vxge_hw_channel_msix_unmask((struct __vxge_hw_channel *)fifo->handle,
2237 fifo->tx_vector_no);
2238
2239 mmiowb();
2240
2241 return IRQ_HANDLED;
2242}
2243
2244static irqreturn_t vxge_rx_msix_napi_handle(int irq, void *dev_id)
2245{
2246 struct vxge_ring *ring = (struct vxge_ring *)dev_id;
2247
2248 adaptive_coalesce_rx_interrupts(ring);
2249
2250 vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle,
2251 ring->rx_vector_no);
2252
2253 vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)ring->handle,
2254 ring->rx_vector_no);
2255
2256 napi_schedule(&ring->napi);
2257 return IRQ_HANDLED;
2258}
2259
2260static irqreturn_t
2261vxge_alarm_msix_handle(int irq, void *dev_id)
2262{
2263 int i;
2264 enum vxge_hw_status status;
2265 struct vxge_vpath *vpath = (struct vxge_vpath *)dev_id;
2266 struct vxgedev *vdev = vpath->vdev;
2267 int msix_id = (vpath->handle->vpath->vp_id *
2268 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
2269
2270 for (i = 0; i < vdev->no_of_vpath; i++) {
2271 /* Reduce the chance of losing alarm interrupts by masking
2272 * the vector. A pending bit will be set if an alarm is
2273 * generated and on unmask the interrupt will be fired.
2274 */
2275 vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id);
2276 vxge_hw_vpath_msix_clear(vdev->vpaths[i].handle, msix_id);
2277 mmiowb();
2278
2279 status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
2280 vdev->exec_mode);
2281 if (status == VXGE_HW_OK) {
2282 vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
2283 msix_id);
2284 mmiowb();
2285 continue;
2286 }
2287 vxge_debug_intr(VXGE_ERR,
2288 "%s: vxge_hw_vpath_alarm_process failed %x ",
2289 VXGE_DRIVER_NAME, status);
2290 }
2291 return IRQ_HANDLED;
2292}
2293
2294static int vxge_alloc_msix(struct vxgedev *vdev)
2295{
2296 int j, i, ret = 0;
2297 int msix_intr_vect = 0, temp;
2298 vdev->intr_cnt = 0;
2299
2300start:
2301 /* Tx/Rx MSIX Vectors count */
2302 vdev->intr_cnt = vdev->no_of_vpath * 2;
2303
2304 /* Alarm MSIX Vectors count */
2305 vdev->intr_cnt++;
2306
2307 vdev->entries = kcalloc(vdev->intr_cnt, sizeof(struct msix_entry),
2308 GFP_KERNEL);
2309 if (!vdev->entries) {
2310 vxge_debug_init(VXGE_ERR,
2311 "%s: memory allocation failed",
2312 VXGE_DRIVER_NAME);
2313 ret = -ENOMEM;
2314 goto alloc_entries_failed;
2315 }
2316
2317 vdev->vxge_entries = kcalloc(vdev->intr_cnt,
2318 sizeof(struct vxge_msix_entry),
2319 GFP_KERNEL);
2320 if (!vdev->vxge_entries) {
2321 vxge_debug_init(VXGE_ERR, "%s: memory allocation failed",
2322 VXGE_DRIVER_NAME);
2323 ret = -ENOMEM;
2324 goto alloc_vxge_entries_failed;
2325 }
2326
2327 for (i = 0, j = 0; i < vdev->no_of_vpath; i++) {
2328
2329 msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE;
2330
2331 /* Initialize the fifo vector */
2332 vdev->entries[j].entry = msix_intr_vect;
2333 vdev->vxge_entries[j].entry = msix_intr_vect;
2334 vdev->vxge_entries[j].in_use = 0;
2335 j++;
2336
2337 /* Initialize the ring vector */
2338 vdev->entries[j].entry = msix_intr_vect + 1;
2339 vdev->vxge_entries[j].entry = msix_intr_vect + 1;
2340 vdev->vxge_entries[j].in_use = 0;
2341 j++;
2342 }
2343
2344 /* Initialize the alarm vector */
2345 vdev->entries[j].entry = VXGE_ALARM_MSIX_ID;
2346 vdev->vxge_entries[j].entry = VXGE_ALARM_MSIX_ID;
2347 vdev->vxge_entries[j].in_use = 0;
2348
2349 ret = pci_enable_msix(vdev->pdev, vdev->entries, vdev->intr_cnt);
2350 if (ret > 0) {
2351 vxge_debug_init(VXGE_ERR,
2352 "%s: MSI-X enable failed for %d vectors, ret: %d",
2353 VXGE_DRIVER_NAME, vdev->intr_cnt, ret);
2354 if ((max_config_vpath != VXGE_USE_DEFAULT) || (ret < 3)) {
2355 ret = -ENODEV;
2356 goto enable_msix_failed;
2357 }
2358
2359 kfree(vdev->entries);
2360 kfree(vdev->vxge_entries);
2361 vdev->entries = NULL;
2362 vdev->vxge_entries = NULL;
2363 /* Try with less no of vector by reducing no of vpaths count */
2364 temp = (ret - 1)/2;
2365 vxge_close_vpaths(vdev, temp);
2366 vdev->no_of_vpath = temp;
2367 goto start;
2368 } else if (ret < 0) {
2369 ret = -ENODEV;
2370 goto enable_msix_failed;
2371 }
2372 return 0;
2373
2374enable_msix_failed:
2375 kfree(vdev->vxge_entries);
2376alloc_vxge_entries_failed:
2377 kfree(vdev->entries);
2378alloc_entries_failed:
2379 return ret;
2380}
2381
2382static int vxge_enable_msix(struct vxgedev *vdev)
2383{
2384
2385 int i, ret = 0;
2386 /* 0 - Tx, 1 - Rx */
2387 int tim_msix_id[4] = {0, 1, 0, 0};
2388
2389 vdev->intr_cnt = 0;
2390
2391 /* allocate msix vectors */
2392 ret = vxge_alloc_msix(vdev);
2393 if (!ret) {
2394 for (i = 0; i < vdev->no_of_vpath; i++) {
2395 struct vxge_vpath *vpath = &vdev->vpaths[i];
2396
2397 /* If fifo or ring are not enabled, the MSIX vector for
2398 * it should be set to 0.
2399 */
2400 vpath->ring.rx_vector_no = (vpath->device_id *
2401 VXGE_HW_VPATH_MSIX_ACTIVE) + 1;
2402
2403 vpath->fifo.tx_vector_no = (vpath->device_id *
2404 VXGE_HW_VPATH_MSIX_ACTIVE);
2405
2406 vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
2407 VXGE_ALARM_MSIX_ID);
2408 }
2409 }
2410
2411 return ret;
2412}
2413
2414static void vxge_rem_msix_isr(struct vxgedev *vdev)
2415{
2416 int intr_cnt;
2417
2418 for (intr_cnt = 0; intr_cnt < (vdev->no_of_vpath * 2 + 1);
2419 intr_cnt++) {
2420 if (vdev->vxge_entries[intr_cnt].in_use) {
2421 synchronize_irq(vdev->entries[intr_cnt].vector);
2422 free_irq(vdev->entries[intr_cnt].vector,
2423 vdev->vxge_entries[intr_cnt].arg);
2424 vdev->vxge_entries[intr_cnt].in_use = 0;
2425 }
2426 }
2427
2428 kfree(vdev->entries);
2429 kfree(vdev->vxge_entries);
2430 vdev->entries = NULL;
2431 vdev->vxge_entries = NULL;
2432
2433 if (vdev->config.intr_type == MSI_X)
2434 pci_disable_msix(vdev->pdev);
2435}
2436#endif
2437
2438static void vxge_rem_isr(struct vxgedev *vdev)
2439{
2440 struct __vxge_hw_device *hldev;
2441 hldev = pci_get_drvdata(vdev->pdev);
2442
2443#ifdef CONFIG_PCI_MSI
2444 if (vdev->config.intr_type == MSI_X) {
2445 vxge_rem_msix_isr(vdev);
2446 } else
2447#endif
2448 if (vdev->config.intr_type == INTA) {
2449 synchronize_irq(vdev->pdev->irq);
2450 free_irq(vdev->pdev->irq, vdev);
2451 }
2452}
2453
2454static int vxge_add_isr(struct vxgedev *vdev)
2455{
2456 int ret = 0;
2457#ifdef CONFIG_PCI_MSI
2458 int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0;
2459 int pci_fun = PCI_FUNC(vdev->pdev->devfn);
2460
2461 if (vdev->config.intr_type == MSI_X)
2462 ret = vxge_enable_msix(vdev);
2463
2464 if (ret) {
2465 vxge_debug_init(VXGE_ERR,
2466 "%s: Enabling MSI-X Failed", VXGE_DRIVER_NAME);
2467 vxge_debug_init(VXGE_ERR,
2468 "%s: Defaulting to INTA", VXGE_DRIVER_NAME);
2469 vdev->config.intr_type = INTA;
2470 }
2471
2472 if (vdev->config.intr_type == MSI_X) {
2473 for (intr_idx = 0;
2474 intr_idx < (vdev->no_of_vpath *
2475 VXGE_HW_VPATH_MSIX_ACTIVE); intr_idx++) {
2476
2477 msix_idx = intr_idx % VXGE_HW_VPATH_MSIX_ACTIVE;
2478 irq_req = 0;
2479
2480 switch (msix_idx) {
2481 case 0:
2482 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2483 "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d",
2484 vdev->ndev->name,
2485 vdev->entries[intr_cnt].entry,
2486 pci_fun, vp_idx);
2487 ret = request_irq(
2488 vdev->entries[intr_cnt].vector,
2489 vxge_tx_msix_handle, 0,
2490 vdev->desc[intr_cnt],
2491 &vdev->vpaths[vp_idx].fifo);
2492 vdev->vxge_entries[intr_cnt].arg =
2493 &vdev->vpaths[vp_idx].fifo;
2494 irq_req = 1;
2495 break;
2496 case 1:
2497 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2498 "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d",
2499 vdev->ndev->name,
2500 vdev->entries[intr_cnt].entry,
2501 pci_fun, vp_idx);
2502 ret = request_irq(
2503 vdev->entries[intr_cnt].vector,
2504 vxge_rx_msix_napi_handle,
2505 0,
2506 vdev->desc[intr_cnt],
2507 &vdev->vpaths[vp_idx].ring);
2508 vdev->vxge_entries[intr_cnt].arg =
2509 &vdev->vpaths[vp_idx].ring;
2510 irq_req = 1;
2511 break;
2512 }
2513
2514 if (ret) {
2515 vxge_debug_init(VXGE_ERR,
2516 "%s: MSIX - %d Registration failed",
2517 vdev->ndev->name, intr_cnt);
2518 vxge_rem_msix_isr(vdev);
2519 vdev->config.intr_type = INTA;
2520 vxge_debug_init(VXGE_ERR,
2521 "%s: Defaulting to INTA"
2522 , vdev->ndev->name);
2523 goto INTA_MODE;
2524 }
2525
2526 if (irq_req) {
2527 /* We requested for this msix interrupt */
2528 vdev->vxge_entries[intr_cnt].in_use = 1;
2529 msix_idx += vdev->vpaths[vp_idx].device_id *
2530 VXGE_HW_VPATH_MSIX_ACTIVE;
2531 vxge_hw_vpath_msix_unmask(
2532 vdev->vpaths[vp_idx].handle,
2533 msix_idx);
2534 intr_cnt++;
2535 }
2536
2537 /* Point to next vpath handler */
2538 if (((intr_idx + 1) % VXGE_HW_VPATH_MSIX_ACTIVE == 0) &&
2539 (vp_idx < (vdev->no_of_vpath - 1)))
2540 vp_idx++;
2541 }
2542
2543 intr_cnt = vdev->no_of_vpath * 2;
2544 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2545 "%s:vxge:MSI-X %d - Alarm - fn:%d",
2546 vdev->ndev->name,
2547 vdev->entries[intr_cnt].entry,
2548 pci_fun);
2549 /* For Alarm interrupts */
2550 ret = request_irq(vdev->entries[intr_cnt].vector,
2551 vxge_alarm_msix_handle, 0,
2552 vdev->desc[intr_cnt],
2553 &vdev->vpaths[0]);
2554 if (ret) {
2555 vxge_debug_init(VXGE_ERR,
2556 "%s: MSIX - %d Registration failed",
2557 vdev->ndev->name, intr_cnt);
2558 vxge_rem_msix_isr(vdev);
2559 vdev->config.intr_type = INTA;
2560 vxge_debug_init(VXGE_ERR,
2561 "%s: Defaulting to INTA",
2562 vdev->ndev->name);
2563 goto INTA_MODE;
2564 }
2565
2566 msix_idx = (vdev->vpaths[0].handle->vpath->vp_id *
2567 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
2568 vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle,
2569 msix_idx);
2570 vdev->vxge_entries[intr_cnt].in_use = 1;
2571 vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[0];
2572 }
2573INTA_MODE:
2574#endif
2575
2576 if (vdev->config.intr_type == INTA) {
2577 snprintf(vdev->desc[0], VXGE_INTR_STRLEN,
2578 "%s:vxge:INTA", vdev->ndev->name);
2579 vxge_hw_device_set_intr_type(vdev->devh,
2580 VXGE_HW_INTR_MODE_IRQLINE);
2581
2582 vxge_hw_vpath_tti_ci_set(vdev->vpaths[0].fifo.handle);
2583
2584 ret = request_irq((int) vdev->pdev->irq,
2585 vxge_isr_napi,
2586 IRQF_SHARED, vdev->desc[0], vdev);
2587 if (ret) {
2588 vxge_debug_init(VXGE_ERR,
2589 "%s %s-%d: ISR registration failed",
2590 VXGE_DRIVER_NAME, "IRQ", vdev->pdev->irq);
2591 return -ENODEV;
2592 }
2593 vxge_debug_init(VXGE_TRACE,
2594 "new %s-%d line allocated",
2595 "IRQ", vdev->pdev->irq);
2596 }
2597
2598 return VXGE_HW_OK;
2599}
2600
2601static void vxge_poll_vp_reset(unsigned long data)
2602{
2603 struct vxgedev *vdev = (struct vxgedev *)data;
2604 int i, j = 0;
2605
2606 for (i = 0; i < vdev->no_of_vpath; i++) {
2607 if (test_bit(i, &vdev->vp_reset)) {
2608 vxge_reset_vpath(vdev, i);
2609 j++;
2610 }
2611 }
2612 if (j && (vdev->config.intr_type != MSI_X)) {
2613 vxge_hw_device_unmask_all(vdev->devh);
2614 vxge_hw_device_flush_io(vdev->devh);
2615 }
2616
2617 mod_timer(&vdev->vp_reset_timer, jiffies + HZ / 2);
2618}
2619
2620static void vxge_poll_vp_lockup(unsigned long data)
2621{
2622 struct vxgedev *vdev = (struct vxgedev *)data;
2623 enum vxge_hw_status status = VXGE_HW_OK;
2624 struct vxge_vpath *vpath;
2625 struct vxge_ring *ring;
2626 int i;
2627 unsigned long rx_frms;
2628
2629 for (i = 0; i < vdev->no_of_vpath; i++) {
2630 ring = &vdev->vpaths[i].ring;
2631
2632 /* Truncated to machine word size number of frames */
2633 rx_frms = ACCESS_ONCE(ring->stats.rx_frms);
2634
2635 /* Did this vpath received any packets */
2636 if (ring->stats.prev_rx_frms == rx_frms) {
2637 status = vxge_hw_vpath_check_leak(ring->handle);
2638
2639 /* Did it received any packets last time */
2640 if ((VXGE_HW_FAIL == status) &&
2641 (VXGE_HW_FAIL == ring->last_status)) {
2642
2643 /* schedule vpath reset */
2644 if (!test_and_set_bit(i, &vdev->vp_reset)) {
2645 vpath = &vdev->vpaths[i];
2646
2647 /* disable interrupts for this vpath */
2648 vxge_vpath_intr_disable(vdev, i);
2649
2650 /* stop the queue for this vpath */
2651 netif_tx_stop_queue(vpath->fifo.txq);
2652 continue;
2653 }
2654 }
2655 }
2656 ring->stats.prev_rx_frms = rx_frms;
2657 ring->last_status = status;
2658 }
2659
2660 /* Check every 1 milli second */
2661 mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000);
2662}
2663
2664static u32 vxge_fix_features(struct net_device *dev, u32 features)
2665{
2666 u32 changed = dev->features ^ features;
2667
2668 /* Enabling RTH requires some of the logic in vxge_device_register and a
2669 * vpath reset. Due to these restrictions, only allow modification
2670 * while the interface is down.
2671 */
2672 if ((changed & NETIF_F_RXHASH) && netif_running(dev))
2673 features ^= NETIF_F_RXHASH;
2674
2675 return features;
2676}
2677
2678static int vxge_set_features(struct net_device *dev, u32 features)
2679{
2680 struct vxgedev *vdev = netdev_priv(dev);
2681 u32 changed = dev->features ^ features;
2682
2683 if (!(changed & NETIF_F_RXHASH))
2684 return 0;
2685
2686 /* !netif_running() ensured by vxge_fix_features() */
2687
2688 vdev->devh->config.rth_en = !!(features & NETIF_F_RXHASH);
2689 if (vxge_reset_all_vpaths(vdev) != VXGE_HW_OK) {
2690 dev->features = features ^ NETIF_F_RXHASH;
2691 vdev->devh->config.rth_en = !!(dev->features & NETIF_F_RXHASH);
2692 return -EIO;
2693 }
2694
2695 return 0;
2696}
2697
2698/**
2699 * vxge_open
2700 * @dev: pointer to the device structure.
2701 *
2702 * This function is the open entry point of the driver. It mainly calls a
2703 * function to allocate Rx buffers and inserts them into the buffer
2704 * descriptors and then enables the Rx part of the NIC.
2705 * Return value: '0' on success and an appropriate (-)ve integer as
2706 * defined in errno.h file on failure.
2707 */
2708static int vxge_open(struct net_device *dev)
2709{
2710 enum vxge_hw_status status;
2711 struct vxgedev *vdev;
2712 struct __vxge_hw_device *hldev;
2713 struct vxge_vpath *vpath;
2714 int ret = 0;
2715 int i;
2716 u64 val64, function_mode;
2717
2718 vxge_debug_entryexit(VXGE_TRACE,
2719 "%s: %s:%d", dev->name, __func__, __LINE__);
2720
2721 vdev = netdev_priv(dev);
2722 hldev = pci_get_drvdata(vdev->pdev);
2723 function_mode = vdev->config.device_hw_info.function_mode;
2724
2725 /* make sure you have link off by default every time Nic is
2726 * initialized */
2727 netif_carrier_off(dev);
2728
2729 /* Open VPATHs */
2730 status = vxge_open_vpaths(vdev);
2731 if (status != VXGE_HW_OK) {
2732 vxge_debug_init(VXGE_ERR,
2733 "%s: fatal: Vpath open failed", vdev->ndev->name);
2734 ret = -EPERM;
2735 goto out0;
2736 }
2737
2738 vdev->mtu = dev->mtu;
2739
2740 status = vxge_add_isr(vdev);
2741 if (status != VXGE_HW_OK) {
2742 vxge_debug_init(VXGE_ERR,
2743 "%s: fatal: ISR add failed", dev->name);
2744 ret = -EPERM;
2745 goto out1;
2746 }
2747
2748 if (vdev->config.intr_type != MSI_X) {
2749 netif_napi_add(dev, &vdev->napi, vxge_poll_inta,
2750 vdev->config.napi_weight);
2751 napi_enable(&vdev->napi);
2752 for (i = 0; i < vdev->no_of_vpath; i++) {
2753 vpath = &vdev->vpaths[i];
2754 vpath->ring.napi_p = &vdev->napi;
2755 }
2756 } else {
2757 for (i = 0; i < vdev->no_of_vpath; i++) {
2758 vpath = &vdev->vpaths[i];
2759 netif_napi_add(dev, &vpath->ring.napi,
2760 vxge_poll_msix, vdev->config.napi_weight);
2761 napi_enable(&vpath->ring.napi);
2762 vpath->ring.napi_p = &vpath->ring.napi;
2763 }
2764 }
2765
2766 /* configure RTH */
2767 if (vdev->config.rth_steering) {
2768 status = vxge_rth_configure(vdev);
2769 if (status != VXGE_HW_OK) {
2770 vxge_debug_init(VXGE_ERR,
2771 "%s: fatal: RTH configuration failed",
2772 dev->name);
2773 ret = -EPERM;
2774 goto out2;
2775 }
2776 }
2777 printk(KERN_INFO "%s: Receive Hashing Offload %s\n", dev->name,
2778 hldev->config.rth_en ? "enabled" : "disabled");
2779
2780 for (i = 0; i < vdev->no_of_vpath; i++) {
2781 vpath = &vdev->vpaths[i];
2782
2783 /* set initial mtu before enabling the device */
2784 status = vxge_hw_vpath_mtu_set(vpath->handle, vdev->mtu);
2785 if (status != VXGE_HW_OK) {
2786 vxge_debug_init(VXGE_ERR,
2787 "%s: fatal: can not set new MTU", dev->name);
2788 ret = -EPERM;
2789 goto out2;
2790 }
2791 }
2792
2793 VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_TRACE, VXGE_COMPONENT_LL, vdev);
2794 vxge_debug_init(vdev->level_trace,
2795 "%s: MTU is %d", vdev->ndev->name, vdev->mtu);
2796 VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_ERR, VXGE_COMPONENT_LL, vdev);
2797
2798 /* Restore the DA, VID table and also multicast and promiscuous mode
2799 * states
2800 */
2801 if (vdev->all_multi_flg) {
2802 for (i = 0; i < vdev->no_of_vpath; i++) {
2803 vpath = &vdev->vpaths[i];
2804 vxge_restore_vpath_mac_addr(vpath);
2805 vxge_restore_vpath_vid_table(vpath);
2806
2807 status = vxge_hw_vpath_mcast_enable(vpath->handle);
2808 if (status != VXGE_HW_OK)
2809 vxge_debug_init(VXGE_ERR,
2810 "%s:%d Enabling multicast failed",
2811 __func__, __LINE__);
2812 }
2813 }
2814
2815 /* Enable vpath to sniff all unicast/multicast traffic that not
2816 * addressed to them. We allow promiscuous mode for PF only
2817 */
2818
2819 val64 = 0;
2820 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
2821 val64 |= VXGE_HW_RXMAC_AUTHORIZE_ALL_ADDR_VP(i);
2822
2823 vxge_hw_mgmt_reg_write(vdev->devh,
2824 vxge_hw_mgmt_reg_type_mrpcim,
2825 0,
2826 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2827 rxmac_authorize_all_addr),
2828 val64);
2829
2830 vxge_hw_mgmt_reg_write(vdev->devh,
2831 vxge_hw_mgmt_reg_type_mrpcim,
2832 0,
2833 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2834 rxmac_authorize_all_vid),
2835 val64);
2836
2837 vxge_set_multicast(dev);
2838
2839 /* Enabling Bcast and mcast for all vpath */
2840 for (i = 0; i < vdev->no_of_vpath; i++) {
2841 vpath = &vdev->vpaths[i];
2842 status = vxge_hw_vpath_bcast_enable(vpath->handle);
2843 if (status != VXGE_HW_OK)
2844 vxge_debug_init(VXGE_ERR,
2845 "%s : Can not enable bcast for vpath "
2846 "id %d", dev->name, i);
2847 if (vdev->config.addr_learn_en) {
2848 status = vxge_hw_vpath_mcast_enable(vpath->handle);
2849 if (status != VXGE_HW_OK)
2850 vxge_debug_init(VXGE_ERR,
2851 "%s : Can not enable mcast for vpath "
2852 "id %d", dev->name, i);
2853 }
2854 }
2855
2856 vxge_hw_device_setpause_data(vdev->devh, 0,
2857 vdev->config.tx_pause_enable,
2858 vdev->config.rx_pause_enable);
2859
2860 if (vdev->vp_reset_timer.function == NULL)
2861 vxge_os_timer(vdev->vp_reset_timer,
2862 vxge_poll_vp_reset, vdev, (HZ/2));
2863
2864 /* There is no need to check for RxD leak and RxD lookup on Titan1A */
2865 if (vdev->titan1 && vdev->vp_lockup_timer.function == NULL)
2866 vxge_os_timer(vdev->vp_lockup_timer, vxge_poll_vp_lockup, vdev,
2867 HZ / 2);
2868
2869 set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
2870
2871 smp_wmb();
2872
2873 if (vxge_hw_device_link_state_get(vdev->devh) == VXGE_HW_LINK_UP) {
2874 netif_carrier_on(vdev->ndev);
2875 netdev_notice(vdev->ndev, "Link Up\n");
2876 vdev->stats.link_up++;
2877 }
2878
2879 vxge_hw_device_intr_enable(vdev->devh);
2880
2881 smp_wmb();
2882
2883 for (i = 0; i < vdev->no_of_vpath; i++) {
2884 vpath = &vdev->vpaths[i];
2885
2886 vxge_hw_vpath_enable(vpath->handle);
2887 smp_wmb();
2888 vxge_hw_vpath_rx_doorbell_init(vpath->handle);
2889 }
2890
2891 netif_tx_start_all_queues(vdev->ndev);
2892
2893 /* configure CI */
2894 vxge_config_ci_for_tti_rti(vdev);
2895
2896 goto out0;
2897
2898out2:
2899 vxge_rem_isr(vdev);
2900
2901 /* Disable napi */
2902 if (vdev->config.intr_type != MSI_X)
2903 napi_disable(&vdev->napi);
2904 else {
2905 for (i = 0; i < vdev->no_of_vpath; i++)
2906 napi_disable(&vdev->vpaths[i].ring.napi);
2907 }
2908
2909out1:
2910 vxge_close_vpaths(vdev, 0);
2911out0:
2912 vxge_debug_entryexit(VXGE_TRACE,
2913 "%s: %s:%d Exiting...",
2914 dev->name, __func__, __LINE__);
2915 return ret;
2916}
2917
2918/* Loop through the mac address list and delete all the entries */
2919static void vxge_free_mac_add_list(struct vxge_vpath *vpath)
2920{
2921
2922 struct list_head *entry, *next;
2923 if (list_empty(&vpath->mac_addr_list))
2924 return;
2925
2926 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
2927 list_del(entry);
2928 kfree((struct vxge_mac_addrs *)entry);
2929 }
2930}
2931
2932static void vxge_napi_del_all(struct vxgedev *vdev)
2933{
2934 int i;
2935 if (vdev->config.intr_type != MSI_X)
2936 netif_napi_del(&vdev->napi);
2937 else {
2938 for (i = 0; i < vdev->no_of_vpath; i++)
2939 netif_napi_del(&vdev->vpaths[i].ring.napi);
2940 }
2941}
2942
2943static int do_vxge_close(struct net_device *dev, int do_io)
2944{
2945 enum vxge_hw_status status;
2946 struct vxgedev *vdev;
2947 struct __vxge_hw_device *hldev;
2948 int i;
2949 u64 val64, vpath_vector;
2950 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
2951 dev->name, __func__, __LINE__);
2952
2953 vdev = netdev_priv(dev);
2954 hldev = pci_get_drvdata(vdev->pdev);
2955
2956 if (unlikely(!is_vxge_card_up(vdev)))
2957 return 0;
2958
2959 /* If vxge_handle_crit_err task is executing,
2960 * wait till it completes. */
2961 while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
2962 msleep(50);
2963
2964 if (do_io) {
2965 /* Put the vpath back in normal mode */
2966 vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id);
2967 status = vxge_hw_mgmt_reg_read(vdev->devh,
2968 vxge_hw_mgmt_reg_type_mrpcim,
2969 0,
2970 (ulong)offsetof(
2971 struct vxge_hw_mrpcim_reg,
2972 rts_mgr_cbasin_cfg),
2973 &val64);
2974 if (status == VXGE_HW_OK) {
2975 val64 &= ~vpath_vector;
2976 status = vxge_hw_mgmt_reg_write(vdev->devh,
2977 vxge_hw_mgmt_reg_type_mrpcim,
2978 0,
2979 (ulong)offsetof(
2980 struct vxge_hw_mrpcim_reg,
2981 rts_mgr_cbasin_cfg),
2982 val64);
2983 }
2984
2985 /* Remove the function 0 from promiscuous mode */
2986 vxge_hw_mgmt_reg_write(vdev->devh,
2987 vxge_hw_mgmt_reg_type_mrpcim,
2988 0,
2989 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2990 rxmac_authorize_all_addr),
2991 0);
2992
2993 vxge_hw_mgmt_reg_write(vdev->devh,
2994 vxge_hw_mgmt_reg_type_mrpcim,
2995 0,
2996 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2997 rxmac_authorize_all_vid),
2998 0);
2999
3000 smp_wmb();
3001 }
3002
3003 if (vdev->titan1)
3004 del_timer_sync(&vdev->vp_lockup_timer);
3005
3006 del_timer_sync(&vdev->vp_reset_timer);
3007
3008 if (do_io)
3009 vxge_hw_device_wait_receive_idle(hldev);
3010
3011 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3012
3013 /* Disable napi */
3014 if (vdev->config.intr_type != MSI_X)
3015 napi_disable(&vdev->napi);
3016 else {
3017 for (i = 0; i < vdev->no_of_vpath; i++)
3018 napi_disable(&vdev->vpaths[i].ring.napi);
3019 }
3020
3021 netif_carrier_off(vdev->ndev);
3022 netdev_notice(vdev->ndev, "Link Down\n");
3023 netif_tx_stop_all_queues(vdev->ndev);
3024
3025 /* Note that at this point xmit() is stopped by upper layer */
3026 if (do_io)
3027 vxge_hw_device_intr_disable(vdev->devh);
3028
3029 vxge_rem_isr(vdev);
3030
3031 vxge_napi_del_all(vdev);
3032
3033 if (do_io)
3034 vxge_reset_all_vpaths(vdev);
3035
3036 vxge_close_vpaths(vdev, 0);
3037
3038 vxge_debug_entryexit(VXGE_TRACE,
3039 "%s: %s:%d Exiting...", dev->name, __func__, __LINE__);
3040
3041 clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
3042
3043 return 0;
3044}
3045
3046/**
3047 * vxge_close
3048 * @dev: device pointer.
3049 *
3050 * This is the stop entry point of the driver. It needs to undo exactly
3051 * whatever was done by the open entry point, thus it's usually referred to
3052 * as the close function.Among other things this function mainly stops the
3053 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3054 * Return value: '0' on success and an appropriate (-)ve integer as
3055 * defined in errno.h file on failure.
3056 */
3057static int vxge_close(struct net_device *dev)
3058{
3059 do_vxge_close(dev, 1);
3060 return 0;
3061}
3062
3063/**
3064 * vxge_change_mtu
3065 * @dev: net device pointer.
3066 * @new_mtu :the new MTU size for the device.
3067 *
3068 * A driver entry point to change MTU size for the device. Before changing
3069 * the MTU the device must be stopped.
3070 */
3071static int vxge_change_mtu(struct net_device *dev, int new_mtu)
3072{
3073 struct vxgedev *vdev = netdev_priv(dev);
3074
3075 vxge_debug_entryexit(vdev->level_trace,
3076 "%s:%d", __func__, __LINE__);
3077 if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > VXGE_HW_MAX_MTU)) {
3078 vxge_debug_init(vdev->level_err,
3079 "%s: mtu size is invalid", dev->name);
3080 return -EPERM;
3081 }
3082
3083 /* check if device is down already */
3084 if (unlikely(!is_vxge_card_up(vdev))) {
3085 /* just store new value, will use later on open() */
3086 dev->mtu = new_mtu;
3087 vxge_debug_init(vdev->level_err,
3088 "%s", "device is down on MTU change");
3089 return 0;
3090 }
3091
3092 vxge_debug_init(vdev->level_trace,
3093 "trying to apply new MTU %d", new_mtu);
3094
3095 if (vxge_close(dev))
3096 return -EIO;
3097
3098 dev->mtu = new_mtu;
3099 vdev->mtu = new_mtu;
3100
3101 if (vxge_open(dev))
3102 return -EIO;
3103
3104 vxge_debug_init(vdev->level_trace,
3105 "%s: MTU changed to %d", vdev->ndev->name, new_mtu);
3106
3107 vxge_debug_entryexit(vdev->level_trace,
3108 "%s:%d Exiting...", __func__, __LINE__);
3109
3110 return 0;
3111}
3112
3113/**
3114 * vxge_get_stats64
3115 * @dev: pointer to the device structure
3116 * @stats: pointer to struct rtnl_link_stats64
3117 *
3118 */
3119static struct rtnl_link_stats64 *
3120vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
3121{
3122 struct vxgedev *vdev = netdev_priv(dev);
3123 int k;
3124
3125 /* net_stats already zeroed by caller */
3126 for (k = 0; k < vdev->no_of_vpath; k++) {
3127 struct vxge_ring_stats *rxstats = &vdev->vpaths[k].ring.stats;
3128 struct vxge_fifo_stats *txstats = &vdev->vpaths[k].fifo.stats;
3129 unsigned int start;
3130 u64 packets, bytes, multicast;
3131
3132 do {
3133 start = u64_stats_fetch_begin(&rxstats->syncp);
3134
3135 packets = rxstats->rx_frms;
3136 multicast = rxstats->rx_mcast;
3137 bytes = rxstats->rx_bytes;
3138 } while (u64_stats_fetch_retry(&rxstats->syncp, start));
3139
3140 net_stats->rx_packets += packets;
3141 net_stats->rx_bytes += bytes;
3142 net_stats->multicast += multicast;
3143
3144 net_stats->rx_errors += rxstats->rx_errors;
3145 net_stats->rx_dropped += rxstats->rx_dropped;
3146
3147 do {
3148 start = u64_stats_fetch_begin(&txstats->syncp);
3149
3150 packets = txstats->tx_frms;
3151 bytes = txstats->tx_bytes;
3152 } while (u64_stats_fetch_retry(&txstats->syncp, start));
3153
3154 net_stats->tx_packets += packets;
3155 net_stats->tx_bytes += bytes;
3156 net_stats->tx_errors += txstats->tx_errors;
3157 }
3158
3159 return net_stats;
3160}
3161
3162static enum vxge_hw_status vxge_timestamp_config(struct __vxge_hw_device *devh)
3163{
3164 enum vxge_hw_status status;
3165 u64 val64;
3166
3167 /* Timestamp is passed to the driver via the FCS, therefore we
3168 * must disable the FCS stripping by the adapter. Since this is
3169 * required for the driver to load (due to a hardware bug),
3170 * there is no need to do anything special here.
3171 */
3172 val64 = VXGE_HW_XMAC_TIMESTAMP_EN |
3173 VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(0) |
3174 VXGE_HW_XMAC_TIMESTAMP_INTERVAL(0);
3175
3176 status = vxge_hw_mgmt_reg_write(devh,
3177 vxge_hw_mgmt_reg_type_mrpcim,
3178 0,
3179 offsetof(struct vxge_hw_mrpcim_reg,
3180 xmac_timestamp),
3181 val64);
3182 vxge_hw_device_flush_io(devh);
3183 devh->config.hwts_en = VXGE_HW_HWTS_ENABLE;
3184 return status;
3185}
3186
3187static int vxge_hwtstamp_ioctl(struct vxgedev *vdev, void __user *data)
3188{
3189 struct hwtstamp_config config;
3190 int i;
3191
3192 if (copy_from_user(&config, data, sizeof(config)))
3193 return -EFAULT;
3194
3195 /* reserved for future extensions */
3196 if (config.flags)
3197 return -EINVAL;
3198
3199 /* Transmit HW Timestamp not supported */
3200 switch (config.tx_type) {
3201 case HWTSTAMP_TX_OFF:
3202 break;
3203 case HWTSTAMP_TX_ON:
3204 default:
3205 return -ERANGE;
3206 }
3207
3208 switch (config.rx_filter) {
3209 case HWTSTAMP_FILTER_NONE:
3210 vdev->rx_hwts = 0;
3211 config.rx_filter = HWTSTAMP_FILTER_NONE;
3212 break;
3213
3214 case HWTSTAMP_FILTER_ALL:
3215 case HWTSTAMP_FILTER_SOME:
3216 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
3217 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
3218 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
3219 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
3220 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
3221 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
3222 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
3223 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
3224 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
3225 case HWTSTAMP_FILTER_PTP_V2_EVENT:
3226 case HWTSTAMP_FILTER_PTP_V2_SYNC:
3227 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
3228 if (vdev->devh->config.hwts_en != VXGE_HW_HWTS_ENABLE)
3229 return -EFAULT;
3230
3231 vdev->rx_hwts = 1;
3232 config.rx_filter = HWTSTAMP_FILTER_ALL;
3233 break;
3234
3235 default:
3236 return -ERANGE;
3237 }
3238
3239 for (i = 0; i < vdev->no_of_vpath; i++)
3240 vdev->vpaths[i].ring.rx_hwts = vdev->rx_hwts;
3241
3242 if (copy_to_user(data, &config, sizeof(config)))
3243 return -EFAULT;
3244
3245 return 0;
3246}
3247
3248/**
3249 * vxge_ioctl
3250 * @dev: Device pointer.
3251 * @ifr: An IOCTL specific structure, that can contain a pointer to
3252 * a proprietary structure used to pass information to the driver.
3253 * @cmd: This is used to distinguish between the different commands that
3254 * can be passed to the IOCTL functions.
3255 *
3256 * Entry point for the Ioctl.
3257 */
3258static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3259{
3260 struct vxgedev *vdev = netdev_priv(dev);
3261 int ret;
3262
3263 switch (cmd) {
3264 case SIOCSHWTSTAMP:
3265 ret = vxge_hwtstamp_ioctl(vdev, rq->ifr_data);
3266 if (ret)
3267 return ret;
3268 break;
3269 default:
3270 return -EOPNOTSUPP;
3271 }
3272
3273 return 0;
3274}
3275
3276/**
3277 * vxge_tx_watchdog
3278 * @dev: pointer to net device structure
3279 *
3280 * Watchdog for transmit side.
3281 * This function is triggered if the Tx Queue is stopped
3282 * for a pre-defined amount of time when the Interface is still up.
3283 */
3284static void vxge_tx_watchdog(struct net_device *dev)
3285{
3286 struct vxgedev *vdev;
3287
3288 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3289
3290 vdev = netdev_priv(dev);
3291
3292 vdev->cric_err_event = VXGE_HW_EVENT_RESET_START;
3293
3294 schedule_work(&vdev->reset_task);
3295 vxge_debug_entryexit(VXGE_TRACE,
3296 "%s:%d Exiting...", __func__, __LINE__);
3297}
3298
3299/**
3300 * vxge_vlan_rx_add_vid
3301 * @dev: net device pointer.
3302 * @vid: vid
3303 *
3304 * Add the vlan id to the devices vlan id table
3305 */
3306static void
3307vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
3308{
3309 struct vxgedev *vdev = netdev_priv(dev);
3310 struct vxge_vpath *vpath;
3311 int vp_id;
3312
3313 /* Add these vlan to the vid table */
3314 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
3315 vpath = &vdev->vpaths[vp_id];
3316 if (!vpath->is_open)
3317 continue;
3318 vxge_hw_vpath_vid_add(vpath->handle, vid);
3319 }
3320 set_bit(vid, vdev->active_vlans);
3321}
3322
3323/**
3324 * vxge_vlan_rx_add_vid
3325 * @dev: net device pointer.
3326 * @vid: vid
3327 *
3328 * Remove the vlan id from the device's vlan id table
3329 */
3330static void
3331vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
3332{
3333 struct vxgedev *vdev = netdev_priv(dev);
3334 struct vxge_vpath *vpath;
3335 int vp_id;
3336
3337 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3338
3339 /* Delete this vlan from the vid table */
3340 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
3341 vpath = &vdev->vpaths[vp_id];
3342 if (!vpath->is_open)
3343 continue;
3344 vxge_hw_vpath_vid_delete(vpath->handle, vid);
3345 }
3346 vxge_debug_entryexit(VXGE_TRACE,
3347 "%s:%d Exiting...", __func__, __LINE__);
3348 clear_bit(vid, vdev->active_vlans);
3349}
3350
3351static const struct net_device_ops vxge_netdev_ops = {
3352 .ndo_open = vxge_open,
3353 .ndo_stop = vxge_close,
3354 .ndo_get_stats64 = vxge_get_stats64,
3355 .ndo_start_xmit = vxge_xmit,
3356 .ndo_validate_addr = eth_validate_addr,
3357 .ndo_set_multicast_list = vxge_set_multicast,
3358 .ndo_do_ioctl = vxge_ioctl,
3359 .ndo_set_mac_address = vxge_set_mac_addr,
3360 .ndo_change_mtu = vxge_change_mtu,
3361 .ndo_fix_features = vxge_fix_features,
3362 .ndo_set_features = vxge_set_features,
3363 .ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid,
3364 .ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid,
3365 .ndo_tx_timeout = vxge_tx_watchdog,
3366#ifdef CONFIG_NET_POLL_CONTROLLER
3367 .ndo_poll_controller = vxge_netpoll,
3368#endif
3369};
3370
3371static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3372 struct vxge_config *config,
3373 int high_dma, int no_of_vpath,
3374 struct vxgedev **vdev_out)
3375{
3376 struct net_device *ndev;
3377 enum vxge_hw_status status = VXGE_HW_OK;
3378 struct vxgedev *vdev;
3379 int ret = 0, no_of_queue = 1;
3380 u64 stat;
3381
3382 *vdev_out = NULL;
3383 if (config->tx_steering_type)
3384 no_of_queue = no_of_vpath;
3385
3386 ndev = alloc_etherdev_mq(sizeof(struct vxgedev),
3387 no_of_queue);
3388 if (ndev == NULL) {
3389 vxge_debug_init(
3390 vxge_hw_device_trace_level_get(hldev),
3391 "%s : device allocation failed", __func__);
3392 ret = -ENODEV;
3393 goto _out0;
3394 }
3395
3396 vxge_debug_entryexit(
3397 vxge_hw_device_trace_level_get(hldev),
3398 "%s: %s:%d Entering...",
3399 ndev->name, __func__, __LINE__);
3400
3401 vdev = netdev_priv(ndev);
3402 memset(vdev, 0, sizeof(struct vxgedev));
3403
3404 vdev->ndev = ndev;
3405 vdev->devh = hldev;
3406 vdev->pdev = hldev->pdev;
3407 memcpy(&vdev->config, config, sizeof(struct vxge_config));
3408 vdev->rx_hwts = 0;
3409 vdev->titan1 = (vdev->pdev->revision == VXGE_HW_TITAN1_PCI_REVISION);
3410
3411 SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
3412
3413 ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_SG |
3414 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3415 NETIF_F_TSO | NETIF_F_TSO6 |
3416 NETIF_F_HW_VLAN_TX;
3417 if (vdev->config.rth_steering != NO_STEERING)
3418 ndev->hw_features |= NETIF_F_RXHASH;
3419
3420 ndev->features |= ndev->hw_features |
3421 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3422
3423 /* Driver entry points */
3424 ndev->irq = vdev->pdev->irq;
3425 ndev->base_addr = (unsigned long) hldev->bar0;
3426
3427 ndev->netdev_ops = &vxge_netdev_ops;
3428
3429 ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT;
3430 INIT_WORK(&vdev->reset_task, vxge_reset);
3431
3432 vxge_initialize_ethtool_ops(ndev);
3433
3434 /* Allocate memory for vpath */
3435 vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
3436 no_of_vpath, GFP_KERNEL);
3437 if (!vdev->vpaths) {
3438 vxge_debug_init(VXGE_ERR,
3439 "%s: vpath memory allocation failed",
3440 vdev->ndev->name);
3441 ret = -ENOMEM;
3442 goto _out1;
3443 }
3444
3445 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3446 "%s : checksuming enabled", __func__);
3447
3448 if (high_dma) {
3449 ndev->features |= NETIF_F_HIGHDMA;
3450 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3451 "%s : using High DMA", __func__);
3452 }
3453
3454 ret = register_netdev(ndev);
3455 if (ret) {
3456 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3457 "%s: %s : device registration failed!",
3458 ndev->name, __func__);
3459 goto _out2;
3460 }
3461
3462 /* Set the factory defined MAC address initially */
3463 ndev->addr_len = ETH_ALEN;
3464
3465 /* Make Link state as off at this point, when the Link change
3466 * interrupt comes the state will be automatically changed to
3467 * the right state.
3468 */
3469 netif_carrier_off(ndev);
3470
3471 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3472 "%s: Ethernet device registered",
3473 ndev->name);
3474
3475 hldev->ndev = ndev;
3476 *vdev_out = vdev;
3477
3478 /* Resetting the Device stats */
3479 status = vxge_hw_mrpcim_stats_access(
3480 hldev,
3481 VXGE_HW_STATS_OP_CLEAR_ALL_STATS,
3482 0,
3483 0,
3484 &stat);
3485
3486 if (status == VXGE_HW_ERR_PRIVILAGED_OPEARATION)
3487 vxge_debug_init(
3488 vxge_hw_device_trace_level_get(hldev),
3489 "%s: device stats clear returns"
3490 "VXGE_HW_ERR_PRIVILAGED_OPEARATION", ndev->name);
3491
3492 vxge_debug_entryexit(vxge_hw_device_trace_level_get(hldev),
3493 "%s: %s:%d Exiting...",
3494 ndev->name, __func__, __LINE__);
3495
3496 return ret;
3497_out2:
3498 kfree(vdev->vpaths);
3499_out1:
3500 free_netdev(ndev);
3501_out0:
3502 return ret;
3503}
3504
3505/*
3506 * vxge_device_unregister
3507 *
3508 * This function will unregister and free network device
3509 */
3510static void vxge_device_unregister(struct __vxge_hw_device *hldev)
3511{
3512 struct vxgedev *vdev;
3513 struct net_device *dev;
3514 char buf[IFNAMSIZ];
3515
3516 dev = hldev->ndev;
3517 vdev = netdev_priv(dev);
3518
3519 vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d", vdev->ndev->name,
3520 __func__, __LINE__);
3521
3522 strncpy(buf, dev->name, IFNAMSIZ);
3523
3524 flush_work_sync(&vdev->reset_task);
3525
3526 /* in 2.6 will call stop() if device is up */
3527 unregister_netdev(dev);
3528
3529 kfree(vdev->vpaths);
3530
3531 /* we are safe to free it now */
3532 free_netdev(dev);
3533
3534 vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered",
3535 buf);
3536 vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf,
3537 __func__, __LINE__);
3538}
3539
3540/*
3541 * vxge_callback_crit_err
3542 *
3543 * This function is called by the alarm handler in interrupt context.
3544 * Driver must analyze it based on the event type.
3545 */
3546static void
3547vxge_callback_crit_err(struct __vxge_hw_device *hldev,
3548 enum vxge_hw_event type, u64 vp_id)
3549{
3550 struct net_device *dev = hldev->ndev;
3551 struct vxgedev *vdev = netdev_priv(dev);
3552 struct vxge_vpath *vpath = NULL;
3553 int vpath_idx;
3554
3555 vxge_debug_entryexit(vdev->level_trace,
3556 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
3557
3558 /* Note: This event type should be used for device wide
3559 * indications only - Serious errors, Slot freeze and critical errors
3560 */
3561 vdev->cric_err_event = type;
3562
3563 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
3564 vpath = &vdev->vpaths[vpath_idx];
3565 if (vpath->device_id == vp_id)
3566 break;
3567 }
3568
3569 if (!test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) {
3570 if (type == VXGE_HW_EVENT_SLOT_FREEZE) {
3571 vxge_debug_init(VXGE_ERR,
3572 "%s: Slot is frozen", vdev->ndev->name);
3573 } else if (type == VXGE_HW_EVENT_SERR) {
3574 vxge_debug_init(VXGE_ERR,
3575 "%s: Encountered Serious Error",
3576 vdev->ndev->name);
3577 } else if (type == VXGE_HW_EVENT_CRITICAL_ERR)
3578 vxge_debug_init(VXGE_ERR,
3579 "%s: Encountered Critical Error",
3580 vdev->ndev->name);
3581 }
3582
3583 if ((type == VXGE_HW_EVENT_SERR) ||
3584 (type == VXGE_HW_EVENT_SLOT_FREEZE)) {
3585 if (unlikely(vdev->exec_mode))
3586 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3587 } else if (type == VXGE_HW_EVENT_CRITICAL_ERR) {
3588 vxge_hw_device_mask_all(hldev);
3589 if (unlikely(vdev->exec_mode))
3590 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3591 } else if ((type == VXGE_HW_EVENT_FIFO_ERR) ||
3592 (type == VXGE_HW_EVENT_VPATH_ERR)) {
3593
3594 if (unlikely(vdev->exec_mode))
3595 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3596 else {
3597 /* check if this vpath is already set for reset */
3598 if (!test_and_set_bit(vpath_idx, &vdev->vp_reset)) {
3599
3600 /* disable interrupts for this vpath */
3601 vxge_vpath_intr_disable(vdev, vpath_idx);
3602
3603 /* stop the queue for this vpath */
3604 netif_tx_stop_queue(vpath->fifo.txq);
3605 }
3606 }
3607 }
3608
3609 vxge_debug_entryexit(vdev->level_trace,
3610 "%s: %s:%d Exiting...",
3611 vdev->ndev->name, __func__, __LINE__);
3612}
3613
3614static void verify_bandwidth(void)
3615{
3616 int i, band_width, total = 0, equal_priority = 0;
3617
3618 /* 1. If user enters 0 for some fifo, give equal priority to all */
3619 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3620 if (bw_percentage[i] == 0) {
3621 equal_priority = 1;
3622 break;
3623 }
3624 }
3625
3626 if (!equal_priority) {
3627 /* 2. If sum exceeds 100, give equal priority to all */
3628 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3629 if (bw_percentage[i] == 0xFF)
3630 break;
3631
3632 total += bw_percentage[i];
3633 if (total > VXGE_HW_VPATH_BANDWIDTH_MAX) {
3634 equal_priority = 1;
3635 break;
3636 }
3637 }
3638 }
3639
3640 if (!equal_priority) {
3641 /* Is all the bandwidth consumed? */
3642 if (total < VXGE_HW_VPATH_BANDWIDTH_MAX) {
3643 if (i < VXGE_HW_MAX_VIRTUAL_PATHS) {
3644 /* Split rest of bw equally among next VPs*/
3645 band_width =
3646 (VXGE_HW_VPATH_BANDWIDTH_MAX - total) /
3647 (VXGE_HW_MAX_VIRTUAL_PATHS - i);
3648 if (band_width < 2) /* min of 2% */
3649 equal_priority = 1;
3650 else {
3651 for (; i < VXGE_HW_MAX_VIRTUAL_PATHS;
3652 i++)
3653 bw_percentage[i] =
3654 band_width;
3655 }
3656 }
3657 } else if (i < VXGE_HW_MAX_VIRTUAL_PATHS)
3658 equal_priority = 1;
3659 }
3660
3661 if (equal_priority) {
3662 vxge_debug_init(VXGE_ERR,
3663 "%s: Assigning equal bandwidth to all the vpaths",
3664 VXGE_DRIVER_NAME);
3665 bw_percentage[0] = VXGE_HW_VPATH_BANDWIDTH_MAX /
3666 VXGE_HW_MAX_VIRTUAL_PATHS;
3667 for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3668 bw_percentage[i] = bw_percentage[0];
3669 }
3670}
3671
3672/*
3673 * Vpath configuration
3674 */
3675static int __devinit vxge_config_vpaths(
3676 struct vxge_hw_device_config *device_config,
3677 u64 vpath_mask, struct vxge_config *config_param)
3678{
3679 int i, no_of_vpaths = 0, default_no_vpath = 0, temp;
3680 u32 txdl_size, txdl_per_memblock;
3681
3682 temp = driver_config->vpath_per_dev;
3683 if ((driver_config->vpath_per_dev == VXGE_USE_DEFAULT) &&
3684 (max_config_dev == VXGE_MAX_CONFIG_DEV)) {
3685 /* No more CPU. Return vpath number as zero.*/
3686 if (driver_config->g_no_cpus == -1)
3687 return 0;
3688
3689 if (!driver_config->g_no_cpus)
3690 driver_config->g_no_cpus = num_online_cpus();
3691
3692 driver_config->vpath_per_dev = driver_config->g_no_cpus >> 1;
3693 if (!driver_config->vpath_per_dev)
3694 driver_config->vpath_per_dev = 1;
3695
3696 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3697 if (!vxge_bVALn(vpath_mask, i, 1))
3698 continue;
3699 else
3700 default_no_vpath++;
3701 if (default_no_vpath < driver_config->vpath_per_dev)
3702 driver_config->vpath_per_dev = default_no_vpath;
3703
3704 driver_config->g_no_cpus = driver_config->g_no_cpus -
3705 (driver_config->vpath_per_dev * 2);
3706 if (driver_config->g_no_cpus <= 0)
3707 driver_config->g_no_cpus = -1;
3708 }
3709
3710 if (driver_config->vpath_per_dev == 1) {
3711 vxge_debug_ll_config(VXGE_TRACE,
3712 "%s: Disable tx and rx steering, "
3713 "as single vpath is configured", VXGE_DRIVER_NAME);
3714 config_param->rth_steering = NO_STEERING;
3715 config_param->tx_steering_type = NO_STEERING;
3716 device_config->rth_en = 0;
3717 }
3718
3719 /* configure bandwidth */
3720 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3721 device_config->vp_config[i].min_bandwidth = bw_percentage[i];
3722
3723 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3724 device_config->vp_config[i].vp_id = i;
3725 device_config->vp_config[i].mtu = VXGE_HW_DEFAULT_MTU;
3726 if (no_of_vpaths < driver_config->vpath_per_dev) {
3727 if (!vxge_bVALn(vpath_mask, i, 1)) {
3728 vxge_debug_ll_config(VXGE_TRACE,
3729 "%s: vpath: %d is not available",
3730 VXGE_DRIVER_NAME, i);
3731 continue;
3732 } else {
3733 vxge_debug_ll_config(VXGE_TRACE,
3734 "%s: vpath: %d available",
3735 VXGE_DRIVER_NAME, i);
3736 no_of_vpaths++;
3737 }
3738 } else {
3739 vxge_debug_ll_config(VXGE_TRACE,
3740 "%s: vpath: %d is not configured, "
3741 "max_config_vpath exceeded",
3742 VXGE_DRIVER_NAME, i);
3743 break;
3744 }
3745
3746 /* Configure Tx fifo's */
3747 device_config->vp_config[i].fifo.enable =
3748 VXGE_HW_FIFO_ENABLE;
3749 device_config->vp_config[i].fifo.max_frags =
3750 MAX_SKB_FRAGS + 1;
3751 device_config->vp_config[i].fifo.memblock_size =
3752 VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE;
3753
3754 txdl_size = device_config->vp_config[i].fifo.max_frags *
3755 sizeof(struct vxge_hw_fifo_txd);
3756 txdl_per_memblock = VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE / txdl_size;
3757
3758 device_config->vp_config[i].fifo.fifo_blocks =
3759 ((VXGE_DEF_FIFO_LENGTH - 1) / txdl_per_memblock) + 1;
3760
3761 device_config->vp_config[i].fifo.intr =
3762 VXGE_HW_FIFO_QUEUE_INTR_DISABLE;
3763
3764 /* Configure tti properties */
3765 device_config->vp_config[i].tti.intr_enable =
3766 VXGE_HW_TIM_INTR_ENABLE;
3767
3768 device_config->vp_config[i].tti.btimer_val =
3769 (VXGE_TTI_BTIMER_VAL * 1000) / 272;
3770
3771 device_config->vp_config[i].tti.timer_ac_en =
3772 VXGE_HW_TIM_TIMER_AC_ENABLE;
3773
3774 /* For msi-x with napi (each vector has a handler of its own) -
3775 * Set CI to OFF for all vpaths
3776 */
3777 device_config->vp_config[i].tti.timer_ci_en =
3778 VXGE_HW_TIM_TIMER_CI_DISABLE;
3779
3780 device_config->vp_config[i].tti.timer_ri_en =
3781 VXGE_HW_TIM_TIMER_RI_DISABLE;
3782
3783 device_config->vp_config[i].tti.util_sel =
3784 VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL;
3785
3786 device_config->vp_config[i].tti.ltimer_val =
3787 (VXGE_TTI_LTIMER_VAL * 1000) / 272;
3788
3789 device_config->vp_config[i].tti.rtimer_val =
3790 (VXGE_TTI_RTIMER_VAL * 1000) / 272;
3791
3792 device_config->vp_config[i].tti.urange_a = TTI_TX_URANGE_A;
3793 device_config->vp_config[i].tti.urange_b = TTI_TX_URANGE_B;
3794 device_config->vp_config[i].tti.urange_c = TTI_TX_URANGE_C;
3795 device_config->vp_config[i].tti.uec_a = TTI_TX_UFC_A;
3796 device_config->vp_config[i].tti.uec_b = TTI_TX_UFC_B;
3797 device_config->vp_config[i].tti.uec_c = TTI_TX_UFC_C;
3798 device_config->vp_config[i].tti.uec_d = TTI_TX_UFC_D;
3799
3800 /* Configure Rx rings */
3801 device_config->vp_config[i].ring.enable =
3802 VXGE_HW_RING_ENABLE;
3803
3804 device_config->vp_config[i].ring.ring_blocks =
3805 VXGE_HW_DEF_RING_BLOCKS;
3806
3807 device_config->vp_config[i].ring.buffer_mode =
3808 VXGE_HW_RING_RXD_BUFFER_MODE_1;
3809
3810 device_config->vp_config[i].ring.rxds_limit =
3811 VXGE_HW_DEF_RING_RXDS_LIMIT;
3812
3813 device_config->vp_config[i].ring.scatter_mode =
3814 VXGE_HW_RING_SCATTER_MODE_A;
3815
3816 /* Configure rti properties */
3817 device_config->vp_config[i].rti.intr_enable =
3818 VXGE_HW_TIM_INTR_ENABLE;
3819
3820 device_config->vp_config[i].rti.btimer_val =
3821 (VXGE_RTI_BTIMER_VAL * 1000)/272;
3822
3823 device_config->vp_config[i].rti.timer_ac_en =
3824 VXGE_HW_TIM_TIMER_AC_ENABLE;
3825
3826 device_config->vp_config[i].rti.timer_ci_en =
3827 VXGE_HW_TIM_TIMER_CI_DISABLE;
3828
3829 device_config->vp_config[i].rti.timer_ri_en =
3830 VXGE_HW_TIM_TIMER_RI_DISABLE;
3831
3832 device_config->vp_config[i].rti.util_sel =
3833 VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL;
3834
3835 device_config->vp_config[i].rti.urange_a =
3836 RTI_RX_URANGE_A;
3837 device_config->vp_config[i].rti.urange_b =
3838 RTI_RX_URANGE_B;
3839 device_config->vp_config[i].rti.urange_c =
3840 RTI_RX_URANGE_C;
3841 device_config->vp_config[i].rti.uec_a = RTI_RX_UFC_A;
3842 device_config->vp_config[i].rti.uec_b = RTI_RX_UFC_B;
3843 device_config->vp_config[i].rti.uec_c = RTI_RX_UFC_C;
3844 device_config->vp_config[i].rti.uec_d = RTI_RX_UFC_D;
3845
3846 device_config->vp_config[i].rti.rtimer_val =
3847 (VXGE_RTI_RTIMER_VAL * 1000) / 272;
3848
3849 device_config->vp_config[i].rti.ltimer_val =
3850 (VXGE_RTI_LTIMER_VAL * 1000) / 272;
3851
3852 device_config->vp_config[i].rpa_strip_vlan_tag =
3853 vlan_tag_strip;
3854 }
3855
3856 driver_config->vpath_per_dev = temp;
3857 return no_of_vpaths;
3858}
3859
3860/* initialize device configuratrions */
3861static void __devinit vxge_device_config_init(
3862 struct vxge_hw_device_config *device_config,
3863 int *intr_type)
3864{
3865 /* Used for CQRQ/SRQ. */
3866 device_config->dma_blockpool_initial =
3867 VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
3868
3869 device_config->dma_blockpool_max =
3870 VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
3871
3872 if (max_mac_vpath > VXGE_MAX_MAC_ADDR_COUNT)
3873 max_mac_vpath = VXGE_MAX_MAC_ADDR_COUNT;
3874
3875#ifndef CONFIG_PCI_MSI
3876 vxge_debug_init(VXGE_ERR,
3877 "%s: This Kernel does not support "
3878 "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME);
3879 *intr_type = INTA;
3880#endif
3881
3882 /* Configure whether MSI-X or IRQL. */
3883 switch (*intr_type) {
3884 case INTA:
3885 device_config->intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
3886 break;
3887
3888 case MSI_X:
3889 device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX_ONE_SHOT;
3890 break;
3891 }
3892
3893 /* Timer period between device poll */
3894 device_config->device_poll_millis = VXGE_TIMER_DELAY;
3895
3896 /* Configure mac based steering. */
3897 device_config->rts_mac_en = addr_learn_en;
3898
3899 /* Configure Vpaths */
3900 device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_MULTI_IT;
3901
3902 vxge_debug_ll_config(VXGE_TRACE, "%s : Device Config Params ",
3903 __func__);
3904 vxge_debug_ll_config(VXGE_TRACE, "intr_mode : %d",
3905 device_config->intr_mode);
3906 vxge_debug_ll_config(VXGE_TRACE, "device_poll_millis : %d",
3907 device_config->device_poll_millis);
3908 vxge_debug_ll_config(VXGE_TRACE, "rth_en : %d",
3909 device_config->rth_en);
3910 vxge_debug_ll_config(VXGE_TRACE, "rth_it_type : %d",
3911 device_config->rth_it_type);
3912}
3913
3914static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
3915{
3916 int i;
3917
3918 vxge_debug_init(VXGE_TRACE,
3919 "%s: %d Vpath(s) opened",
3920 vdev->ndev->name, vdev->no_of_vpath);
3921
3922 switch (vdev->config.intr_type) {
3923 case INTA:
3924 vxge_debug_init(VXGE_TRACE,
3925 "%s: Interrupt type INTA", vdev->ndev->name);
3926 break;
3927
3928 case MSI_X:
3929 vxge_debug_init(VXGE_TRACE,
3930 "%s: Interrupt type MSI-X", vdev->ndev->name);
3931 break;
3932 }
3933
3934 if (vdev->config.rth_steering) {
3935 vxge_debug_init(VXGE_TRACE,
3936 "%s: RTH steering enabled for TCP_IPV4",
3937 vdev->ndev->name);
3938 } else {
3939 vxge_debug_init(VXGE_TRACE,
3940 "%s: RTH steering disabled", vdev->ndev->name);
3941 }
3942
3943 switch (vdev->config.tx_steering_type) {
3944 case NO_STEERING:
3945 vxge_debug_init(VXGE_TRACE,
3946 "%s: Tx steering disabled", vdev->ndev->name);
3947 break;
3948 case TX_PRIORITY_STEERING:
3949 vxge_debug_init(VXGE_TRACE,
3950 "%s: Unsupported tx steering option",
3951 vdev->ndev->name);
3952 vxge_debug_init(VXGE_TRACE,
3953 "%s: Tx steering disabled", vdev->ndev->name);
3954 vdev->config.tx_steering_type = 0;
3955 break;
3956 case TX_VLAN_STEERING:
3957 vxge_debug_init(VXGE_TRACE,
3958 "%s: Unsupported tx steering option",
3959 vdev->ndev->name);
3960 vxge_debug_init(VXGE_TRACE,
3961 "%s: Tx steering disabled", vdev->ndev->name);
3962 vdev->config.tx_steering_type = 0;
3963 break;
3964 case TX_MULTIQ_STEERING:
3965 vxge_debug_init(VXGE_TRACE,
3966 "%s: Tx multiqueue steering enabled",
3967 vdev->ndev->name);
3968 break;
3969 case TX_PORT_STEERING:
3970 vxge_debug_init(VXGE_TRACE,
3971 "%s: Tx port steering enabled",
3972 vdev->ndev->name);
3973 break;
3974 default:
3975 vxge_debug_init(VXGE_ERR,
3976 "%s: Unsupported tx steering type",
3977 vdev->ndev->name);
3978 vxge_debug_init(VXGE_TRACE,
3979 "%s: Tx steering disabled", vdev->ndev->name);
3980 vdev->config.tx_steering_type = 0;
3981 }
3982
3983 if (vdev->config.addr_learn_en)
3984 vxge_debug_init(VXGE_TRACE,
3985 "%s: MAC Address learning enabled", vdev->ndev->name);
3986
3987 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3988 if (!vxge_bVALn(vpath_mask, i, 1))
3989 continue;
3990 vxge_debug_ll_config(VXGE_TRACE,
3991 "%s: MTU size - %d", vdev->ndev->name,
3992 ((struct __vxge_hw_device *)(vdev->devh))->
3993 config.vp_config[i].mtu);
3994 vxge_debug_init(VXGE_TRACE,
3995 "%s: VLAN tag stripping %s", vdev->ndev->name,
3996 ((struct __vxge_hw_device *)(vdev->devh))->
3997 config.vp_config[i].rpa_strip_vlan_tag
3998 ? "Enabled" : "Disabled");
3999 vxge_debug_ll_config(VXGE_TRACE,
4000 "%s: Max frags : %d", vdev->ndev->name,
4001 ((struct __vxge_hw_device *)(vdev->devh))->
4002 config.vp_config[i].fifo.max_frags);
4003 break;
4004 }
4005}
4006
4007#ifdef CONFIG_PM
4008/**
4009 * vxge_pm_suspend - vxge power management suspend entry point
4010 *
4011 */
4012static int vxge_pm_suspend(struct pci_dev *pdev, pm_message_t state)
4013{
4014 return -ENOSYS;
4015}
4016/**
4017 * vxge_pm_resume - vxge power management resume entry point
4018 *
4019 */
4020static int vxge_pm_resume(struct pci_dev *pdev)
4021{
4022 return -ENOSYS;
4023}
4024
4025#endif
4026
4027/**
4028 * vxge_io_error_detected - called when PCI error is detected
4029 * @pdev: Pointer to PCI device
4030 * @state: The current pci connection state
4031 *
4032 * This function is called after a PCI bus error affecting
4033 * this device has been detected.
4034 */
4035static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
4036 pci_channel_state_t state)
4037{
4038 struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
4039 struct net_device *netdev = hldev->ndev;
4040
4041 netif_device_detach(netdev);
4042
4043 if (state == pci_channel_io_perm_failure)
4044 return PCI_ERS_RESULT_DISCONNECT;
4045
4046 if (netif_running(netdev)) {
4047 /* Bring down the card, while avoiding PCI I/O */
4048 do_vxge_close(netdev, 0);
4049 }
4050
4051 pci_disable_device(pdev);
4052
4053 return PCI_ERS_RESULT_NEED_RESET;
4054}
4055
4056/**
4057 * vxge_io_slot_reset - called after the pci bus has been reset.
4058 * @pdev: Pointer to PCI device
4059 *
4060 * Restart the card from scratch, as if from a cold-boot.
4061 * At this point, the card has exprienced a hard reset,
4062 * followed by fixups by BIOS, and has its config space
4063 * set up identically to what it was at cold boot.
4064 */
4065static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
4066{
4067 struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
4068 struct net_device *netdev = hldev->ndev;
4069
4070 struct vxgedev *vdev = netdev_priv(netdev);
4071
4072 if (pci_enable_device(pdev)) {
4073 netdev_err(netdev, "Cannot re-enable device after reset\n");
4074 return PCI_ERS_RESULT_DISCONNECT;
4075 }
4076
4077 pci_set_master(pdev);
4078 do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
4079
4080 return PCI_ERS_RESULT_RECOVERED;
4081}
4082
4083/**
4084 * vxge_io_resume - called when traffic can start flowing again.
4085 * @pdev: Pointer to PCI device
4086 *
4087 * This callback is called when the error recovery driver tells
4088 * us that its OK to resume normal operation.
4089 */
4090static void vxge_io_resume(struct pci_dev *pdev)
4091{
4092 struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
4093 struct net_device *netdev = hldev->ndev;
4094
4095 if (netif_running(netdev)) {
4096 if (vxge_open(netdev)) {
4097 netdev_err(netdev,
4098 "Can't bring device back up after reset\n");
4099 return;
4100 }
4101 }
4102
4103 netif_device_attach(netdev);
4104}
4105
4106static inline u32 vxge_get_num_vfs(u64 function_mode)
4107{
4108 u32 num_functions = 0;
4109
4110 switch (function_mode) {
4111 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION:
4112 case VXGE_HW_FUNCTION_MODE_SRIOV_8:
4113 num_functions = 8;
4114 break;
4115 case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
4116 num_functions = 1;
4117 break;
4118 case VXGE_HW_FUNCTION_MODE_SRIOV:
4119 case VXGE_HW_FUNCTION_MODE_MRIOV:
4120 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17:
4121 num_functions = 17;
4122 break;
4123 case VXGE_HW_FUNCTION_MODE_SRIOV_4:
4124 num_functions = 4;
4125 break;
4126 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2:
4127 num_functions = 2;
4128 break;
4129 case VXGE_HW_FUNCTION_MODE_MRIOV_8:
4130 num_functions = 8; /* TODO */
4131 break;
4132 }
4133 return num_functions;
4134}
4135
4136int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override)
4137{
4138 struct __vxge_hw_device *hldev = vdev->devh;
4139 u32 maj, min, bld, cmaj, cmin, cbld;
4140 enum vxge_hw_status status;
4141 const struct firmware *fw;
4142 int ret;
4143
4144 ret = request_firmware(&fw, fw_name, &vdev->pdev->dev);
4145 if (ret) {
4146 vxge_debug_init(VXGE_ERR, "%s: Firmware file '%s' not found",
4147 VXGE_DRIVER_NAME, fw_name);
4148 goto out;
4149 }
4150
4151 /* Load the new firmware onto the adapter */
4152 status = vxge_update_fw_image(hldev, fw->data, fw->size);
4153 if (status != VXGE_HW_OK) {
4154 vxge_debug_init(VXGE_ERR,
4155 "%s: FW image download to adapter failed '%s'.",
4156 VXGE_DRIVER_NAME, fw_name);
4157 ret = -EIO;
4158 goto out;
4159 }
4160
4161 /* Read the version of the new firmware */
4162 status = vxge_hw_upgrade_read_version(hldev, &maj, &min, &bld);
4163 if (status != VXGE_HW_OK) {
4164 vxge_debug_init(VXGE_ERR,
4165 "%s: Upgrade read version failed '%s'.",
4166 VXGE_DRIVER_NAME, fw_name);
4167 ret = -EIO;
4168 goto out;
4169 }
4170
4171 cmaj = vdev->config.device_hw_info.fw_version.major;
4172 cmin = vdev->config.device_hw_info.fw_version.minor;
4173 cbld = vdev->config.device_hw_info.fw_version.build;
4174 /* It's possible the version in /lib/firmware is not the latest version.
4175 * If so, we could get into a loop of trying to upgrade to the latest
4176 * and flashing the older version.
4177 */
4178 if (VXGE_FW_VER(maj, min, bld) == VXGE_FW_VER(cmaj, cmin, cbld) &&
4179 !override) {
4180 ret = -EINVAL;
4181 goto out;
4182 }
4183
4184 printk(KERN_NOTICE "Upgrade to firmware version %d.%d.%d commencing\n",
4185 maj, min, bld);
4186
4187 /* Flash the adapter with the new firmware */
4188 status = vxge_hw_flash_fw(hldev);
4189 if (status != VXGE_HW_OK) {
4190 vxge_debug_init(VXGE_ERR, "%s: Upgrade commit failed '%s'.",
4191 VXGE_DRIVER_NAME, fw_name);
4192 ret = -EIO;
4193 goto out;
4194 }
4195
4196 printk(KERN_NOTICE "Upgrade of firmware successful! Adapter must be "
4197 "hard reset before using, thus requiring a system reboot or a "
4198 "hotplug event.\n");
4199
4200out:
4201 release_firmware(fw);
4202 return ret;
4203}
4204
4205static int vxge_probe_fw_update(struct vxgedev *vdev)
4206{
4207 u32 maj, min, bld;
4208 int ret, gpxe = 0;
4209 char *fw_name;
4210
4211 maj = vdev->config.device_hw_info.fw_version.major;
4212 min = vdev->config.device_hw_info.fw_version.minor;
4213 bld = vdev->config.device_hw_info.fw_version.build;
4214
4215 if (VXGE_FW_VER(maj, min, bld) == VXGE_CERT_FW_VER)
4216 return 0;
4217
4218 /* Ignore the build number when determining if the current firmware is
4219 * "too new" to load the driver
4220 */
4221 if (VXGE_FW_VER(maj, min, 0) > VXGE_CERT_FW_VER) {
4222 vxge_debug_init(VXGE_ERR, "%s: Firmware newer than last known "
4223 "version, unable to load driver\n",
4224 VXGE_DRIVER_NAME);
4225 return -EINVAL;
4226 }
4227
4228 /* Firmware 1.4.4 and older cannot be upgraded, and is too ancient to
4229 * work with this driver.
4230 */
4231 if (VXGE_FW_VER(maj, min, bld) <= VXGE_FW_DEAD_VER) {
4232 vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d cannot be "
4233 "upgraded\n", VXGE_DRIVER_NAME, maj, min, bld);
4234 return -EINVAL;
4235 }
4236
4237 /* If file not specified, determine gPXE or not */
4238 if (VXGE_FW_VER(maj, min, bld) >= VXGE_EPROM_FW_VER) {
4239 int i;
4240 for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++)
4241 if (vdev->devh->eprom_versions[i]) {
4242 gpxe = 1;
4243 break;
4244 }
4245 }
4246 if (gpxe)
4247 fw_name = "vxge/X3fw-pxe.ncf";
4248 else
4249 fw_name = "vxge/X3fw.ncf";
4250
4251 ret = vxge_fw_upgrade(vdev, fw_name, 0);
4252 /* -EINVAL and -ENOENT are not fatal errors for flashing firmware on
4253 * probe, so ignore them
4254 */
4255 if (ret != -EINVAL && ret != -ENOENT)
4256 return -EIO;
4257 else
4258 ret = 0;
4259
4260 if (VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, VXGE_CERT_FW_VER_MINOR, 0) >
4261 VXGE_FW_VER(maj, min, 0)) {
4262 vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d is too old to"
4263 " be used with this driver.\n"
4264 "Please get the latest version from "
4265 "ftp://ftp.s2io.com/pub/X3100-Drivers/FIRMWARE",
4266 VXGE_DRIVER_NAME, maj, min, bld);
4267 return -EINVAL;
4268 }
4269
4270 return ret;
4271}
4272
4273static int __devinit is_sriov_initialized(struct pci_dev *pdev)
4274{
4275 int pos;
4276 u16 ctrl;
4277
4278 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
4279 if (pos) {
4280 pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &ctrl);
4281 if (ctrl & PCI_SRIOV_CTRL_VFE)
4282 return 1;
4283 }
4284 return 0;
4285}
4286
4287/**
4288 * vxge_probe
4289 * @pdev : structure containing the PCI related information of the device.
4290 * @pre: List of PCI devices supported by the driver listed in vxge_id_table.
4291 * Description:
4292 * This function is called when a new PCI device gets detected and initializes
4293 * it.
4294 * Return value:
4295 * returns 0 on success and negative on failure.
4296 *
4297 */
4298static int __devinit
4299vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4300{
4301 struct __vxge_hw_device *hldev;
4302 enum vxge_hw_status status;
4303 int ret;
4304 int high_dma = 0;
4305 u64 vpath_mask = 0;
4306 struct vxgedev *vdev;
4307 struct vxge_config *ll_config = NULL;
4308 struct vxge_hw_device_config *device_config = NULL;
4309 struct vxge_hw_device_attr attr;
4310 int i, j, no_of_vpath = 0, max_vpath_supported = 0;
4311 u8 *macaddr;
4312 struct vxge_mac_addrs *entry;
4313 static int bus = -1, device = -1;
4314 u32 host_type;
4315 u8 new_device = 0;
4316 enum vxge_hw_status is_privileged;
4317 u32 function_mode;
4318 u32 num_vfs = 0;
4319
4320 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
4321 attr.pdev = pdev;
4322
4323 /* In SRIOV-17 mode, functions of the same adapter
4324 * can be deployed on different buses
4325 */
4326 if (((bus != pdev->bus->number) || (device != PCI_SLOT(pdev->devfn))) &&
4327 !pdev->is_virtfn)
4328 new_device = 1;
4329
4330 bus = pdev->bus->number;
4331 device = PCI_SLOT(pdev->devfn);
4332
4333 if (new_device) {
4334 if (driver_config->config_dev_cnt &&
4335 (driver_config->config_dev_cnt !=
4336 driver_config->total_dev_cnt))
4337 vxge_debug_init(VXGE_ERR,
4338 "%s: Configured %d of %d devices",
4339 VXGE_DRIVER_NAME,
4340 driver_config->config_dev_cnt,
4341 driver_config->total_dev_cnt);
4342 driver_config->config_dev_cnt = 0;
4343 driver_config->total_dev_cnt = 0;
4344 }
4345
4346 /* Now making the CPU based no of vpath calculation
4347 * applicable for individual functions as well.
4348 */
4349 driver_config->g_no_cpus = 0;
4350 driver_config->vpath_per_dev = max_config_vpath;
4351
4352 driver_config->total_dev_cnt++;
4353 if (++driver_config->config_dev_cnt > max_config_dev) {
4354 ret = 0;
4355 goto _exit0;
4356 }
4357
4358 device_config = kzalloc(sizeof(struct vxge_hw_device_config),
4359 GFP_KERNEL);
4360 if (!device_config) {
4361 ret = -ENOMEM;
4362 vxge_debug_init(VXGE_ERR,
4363 "device_config : malloc failed %s %d",
4364 __FILE__, __LINE__);
4365 goto _exit0;
4366 }
4367
4368 ll_config = kzalloc(sizeof(struct vxge_config), GFP_KERNEL);
4369 if (!ll_config) {
4370 ret = -ENOMEM;
4371 vxge_debug_init(VXGE_ERR,
4372 "device_config : malloc failed %s %d",
4373 __FILE__, __LINE__);
4374 goto _exit0;
4375 }
4376 ll_config->tx_steering_type = TX_MULTIQ_STEERING;
4377 ll_config->intr_type = MSI_X;
4378 ll_config->napi_weight = NEW_NAPI_WEIGHT;
4379 ll_config->rth_steering = RTH_STEERING;
4380
4381 /* get the default configuration parameters */
4382 vxge_hw_device_config_default_get(device_config);
4383
4384 /* initialize configuration parameters */
4385 vxge_device_config_init(device_config, &ll_config->intr_type);
4386
4387 ret = pci_enable_device(pdev);
4388 if (ret) {
4389 vxge_debug_init(VXGE_ERR,
4390 "%s : can not enable PCI device", __func__);
4391 goto _exit0;
4392 }
4393
4394 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4395 vxge_debug_ll_config(VXGE_TRACE,
4396 "%s : using 64bit DMA", __func__);
4397
4398 high_dma = 1;
4399
4400 if (pci_set_consistent_dma_mask(pdev,
4401 DMA_BIT_MASK(64))) {
4402 vxge_debug_init(VXGE_ERR,
4403 "%s : unable to obtain 64bit DMA for "
4404 "consistent allocations", __func__);
4405 ret = -ENOMEM;
4406 goto _exit1;
4407 }
4408 } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
4409 vxge_debug_ll_config(VXGE_TRACE,
4410 "%s : using 32bit DMA", __func__);
4411 } else {
4412 ret = -ENOMEM;
4413 goto _exit1;
4414 }
4415
4416 ret = pci_request_region(pdev, 0, VXGE_DRIVER_NAME);
4417 if (ret) {
4418 vxge_debug_init(VXGE_ERR,
4419 "%s : request regions failed", __func__);
4420 goto _exit1;
4421 }
4422
4423 pci_set_master(pdev);
4424
4425 attr.bar0 = pci_ioremap_bar(pdev, 0);
4426 if (!attr.bar0) {
4427 vxge_debug_init(VXGE_ERR,
4428 "%s : cannot remap io memory bar0", __func__);
4429 ret = -ENODEV;
4430 goto _exit2;
4431 }
4432 vxge_debug_ll_config(VXGE_TRACE,
4433 "pci ioremap bar0: %p:0x%llx",
4434 attr.bar0,
4435 (unsigned long long)pci_resource_start(pdev, 0));
4436
4437 status = vxge_hw_device_hw_info_get(attr.bar0,
4438 &ll_config->device_hw_info);
4439 if (status != VXGE_HW_OK) {
4440 vxge_debug_init(VXGE_ERR,
4441 "%s: Reading of hardware info failed."
4442 "Please try upgrading the firmware.", VXGE_DRIVER_NAME);
4443 ret = -EINVAL;
4444 goto _exit3;
4445 }
4446
4447 vpath_mask = ll_config->device_hw_info.vpath_mask;
4448 if (vpath_mask == 0) {
4449 vxge_debug_ll_config(VXGE_TRACE,
4450 "%s: No vpaths available in device", VXGE_DRIVER_NAME);
4451 ret = -EINVAL;
4452 goto _exit3;
4453 }
4454
4455 vxge_debug_ll_config(VXGE_TRACE,
4456 "%s:%d Vpath mask = %llx", __func__, __LINE__,
4457 (unsigned long long)vpath_mask);
4458
4459 function_mode = ll_config->device_hw_info.function_mode;
4460 host_type = ll_config->device_hw_info.host_type;
4461 is_privileged = __vxge_hw_device_is_privilaged(host_type,
4462 ll_config->device_hw_info.func_id);
4463
4464 /* Check how many vpaths are available */
4465 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
4466 if (!((vpath_mask) & vxge_mBIT(i)))
4467 continue;
4468 max_vpath_supported++;
4469 }
4470
4471 if (new_device)
4472 num_vfs = vxge_get_num_vfs(function_mode) - 1;
4473
4474 /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */
4475 if (is_sriov(function_mode) && !is_sriov_initialized(pdev) &&
4476 (ll_config->intr_type != INTA)) {
4477 ret = pci_enable_sriov(pdev, num_vfs);
4478 if (ret)
4479 vxge_debug_ll_config(VXGE_ERR,
4480 "Failed in enabling SRIOV mode: %d\n", ret);
4481 /* No need to fail out, as an error here is non-fatal */
4482 }
4483
4484 /*
4485 * Configure vpaths and get driver configured number of vpaths
4486 * which is less than or equal to the maximum vpaths per function.
4487 */
4488 no_of_vpath = vxge_config_vpaths(device_config, vpath_mask, ll_config);
4489 if (!no_of_vpath) {
4490 vxge_debug_ll_config(VXGE_ERR,
4491 "%s: No more vpaths to configure", VXGE_DRIVER_NAME);
4492 ret = 0;
4493 goto _exit3;
4494 }
4495
4496 /* Setting driver callbacks */
4497 attr.uld_callbacks.link_up = vxge_callback_link_up;
4498 attr.uld_callbacks.link_down = vxge_callback_link_down;
4499 attr.uld_callbacks.crit_err = vxge_callback_crit_err;
4500
4501 status = vxge_hw_device_initialize(&hldev, &attr, device_config);
4502 if (status != VXGE_HW_OK) {
4503 vxge_debug_init(VXGE_ERR,
4504 "Failed to initialize device (%d)", status);
4505 ret = -EINVAL;
4506 goto _exit3;
4507 }
4508
4509 if (VXGE_FW_VER(ll_config->device_hw_info.fw_version.major,
4510 ll_config->device_hw_info.fw_version.minor,
4511 ll_config->device_hw_info.fw_version.build) >=
4512 VXGE_EPROM_FW_VER) {
4513 struct eprom_image img[VXGE_HW_MAX_ROM_IMAGES];
4514
4515 status = vxge_hw_vpath_eprom_img_ver_get(hldev, img);
4516 if (status != VXGE_HW_OK) {
4517 vxge_debug_init(VXGE_ERR, "%s: Reading of EPROM failed",
4518 VXGE_DRIVER_NAME);
4519 /* This is a non-fatal error, continue */
4520 }
4521
4522 for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
4523 hldev->eprom_versions[i] = img[i].version;
4524 if (!img[i].is_valid)
4525 break;
4526 vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version "
4527 "%d.%d.%d.%d", VXGE_DRIVER_NAME, i,
4528 VXGE_EPROM_IMG_MAJOR(img[i].version),
4529 VXGE_EPROM_IMG_MINOR(img[i].version),
4530 VXGE_EPROM_IMG_FIX(img[i].version),
4531 VXGE_EPROM_IMG_BUILD(img[i].version));
4532 }
4533 }
4534
4535 /* if FCS stripping is not disabled in MAC fail driver load */
4536 status = vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask);
4537 if (status != VXGE_HW_OK) {
4538 vxge_debug_init(VXGE_ERR, "%s: FCS stripping is enabled in MAC"
4539 " failing driver load", VXGE_DRIVER_NAME);
4540 ret = -EINVAL;
4541 goto _exit4;
4542 }
4543
4544 /* Always enable HWTS. This will always cause the FCS to be invalid,
4545 * due to the fact that HWTS is using the FCS as the location of the
4546 * timestamp. The HW FCS checking will still correctly determine if
4547 * there is a valid checksum, and the FCS is being removed by the driver
4548 * anyway. So no fucntionality is being lost. Since it is always
4549 * enabled, we now simply use the ioctl call to set whether or not the
4550 * driver should be paying attention to the HWTS.
4551 */
4552 if (is_privileged == VXGE_HW_OK) {
4553 status = vxge_timestamp_config(hldev);
4554 if (status != VXGE_HW_OK) {
4555 vxge_debug_init(VXGE_ERR, "%s: HWTS enable failed",
4556 VXGE_DRIVER_NAME);
4557 ret = -EFAULT;
4558 goto _exit4;
4559 }
4560 }
4561
4562 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
4563
4564 /* set private device info */
4565 pci_set_drvdata(pdev, hldev);
4566
4567 ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
4568 ll_config->addr_learn_en = addr_learn_en;
4569 ll_config->rth_algorithm = RTH_ALG_JENKINS;
4570 ll_config->rth_hash_type_tcpipv4 = 1;
4571 ll_config->rth_hash_type_ipv4 = 0;
4572 ll_config->rth_hash_type_tcpipv6 = 0;
4573 ll_config->rth_hash_type_ipv6 = 0;
4574 ll_config->rth_hash_type_tcpipv6ex = 0;
4575 ll_config->rth_hash_type_ipv6ex = 0;
4576 ll_config->rth_bkt_sz = RTH_BUCKET_SIZE;
4577 ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
4578 ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
4579
4580 ret = vxge_device_register(hldev, ll_config, high_dma, no_of_vpath,
4581 &vdev);
4582 if (ret) {
4583 ret = -EINVAL;
4584 goto _exit4;
4585 }
4586
4587 ret = vxge_probe_fw_update(vdev);
4588 if (ret)
4589 goto _exit5;
4590
4591 vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL);
4592 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
4593 vxge_hw_device_trace_level_get(hldev));
4594
4595 /* set private HW device info */
4596 vdev->mtu = VXGE_HW_DEFAULT_MTU;
4597 vdev->bar0 = attr.bar0;
4598 vdev->max_vpath_supported = max_vpath_supported;
4599 vdev->no_of_vpath = no_of_vpath;
4600
4601 /* Virtual Path count */
4602 for (i = 0, j = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
4603 if (!vxge_bVALn(vpath_mask, i, 1))
4604 continue;
4605 if (j >= vdev->no_of_vpath)
4606 break;
4607
4608 vdev->vpaths[j].is_configured = 1;
4609 vdev->vpaths[j].device_id = i;
4610 vdev->vpaths[j].ring.driver_id = j;
4611 vdev->vpaths[j].vdev = vdev;
4612 vdev->vpaths[j].max_mac_addr_cnt = max_mac_vpath;
4613 memcpy((u8 *)vdev->vpaths[j].macaddr,
4614 ll_config->device_hw_info.mac_addrs[i],
4615 ETH_ALEN);
4616
4617 /* Initialize the mac address list header */
4618 INIT_LIST_HEAD(&vdev->vpaths[j].mac_addr_list);
4619
4620 vdev->vpaths[j].mac_addr_cnt = 0;
4621 vdev->vpaths[j].mcast_addr_cnt = 0;
4622 j++;
4623 }
4624 vdev->exec_mode = VXGE_EXEC_MODE_DISABLE;
4625 vdev->max_config_port = max_config_port;
4626
4627 vdev->vlan_tag_strip = vlan_tag_strip;
4628
4629 /* map the hashing selector table to the configured vpaths */
4630 for (i = 0; i < vdev->no_of_vpath; i++)
4631 vdev->vpath_selector[i] = vpath_selector[i];
4632
4633 macaddr = (u8 *)vdev->vpaths[0].macaddr;
4634
4635 ll_config->device_hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0';
4636 ll_config->device_hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0';
4637 ll_config->device_hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0';
4638
4639 vxge_debug_init(VXGE_TRACE, "%s: SERIAL NUMBER: %s",
4640 vdev->ndev->name, ll_config->device_hw_info.serial_number);
4641
4642 vxge_debug_init(VXGE_TRACE, "%s: PART NUMBER: %s",
4643 vdev->ndev->name, ll_config->device_hw_info.part_number);
4644
4645 vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter",
4646 vdev->ndev->name, ll_config->device_hw_info.product_desc);
4647
4648 vxge_debug_init(VXGE_TRACE, "%s: MAC ADDR: %pM",
4649 vdev->ndev->name, macaddr);
4650
4651 vxge_debug_init(VXGE_TRACE, "%s: Link Width x%d",
4652 vdev->ndev->name, vxge_hw_device_link_width_get(hldev));
4653
4654 vxge_debug_init(VXGE_TRACE,
4655 "%s: Firmware version : %s Date : %s", vdev->ndev->name,
4656 ll_config->device_hw_info.fw_version.version,
4657 ll_config->device_hw_info.fw_date.date);
4658
4659 if (new_device) {
4660 switch (ll_config->device_hw_info.function_mode) {
4661 case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
4662 vxge_debug_init(VXGE_TRACE,
4663 "%s: Single Function Mode Enabled", vdev->ndev->name);
4664 break;
4665 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION:
4666 vxge_debug_init(VXGE_TRACE,
4667 "%s: Multi Function Mode Enabled", vdev->ndev->name);
4668 break;
4669 case VXGE_HW_FUNCTION_MODE_SRIOV:
4670 vxge_debug_init(VXGE_TRACE,
4671 "%s: Single Root IOV Mode Enabled", vdev->ndev->name);
4672 break;
4673 case VXGE_HW_FUNCTION_MODE_MRIOV:
4674 vxge_debug_init(VXGE_TRACE,
4675 "%s: Multi Root IOV Mode Enabled", vdev->ndev->name);
4676 break;
4677 }
4678 }
4679
4680 vxge_print_parm(vdev, vpath_mask);
4681
4682 /* Store the fw version for ethttool option */
4683 strcpy(vdev->fw_version, ll_config->device_hw_info.fw_version.version);
4684 memcpy(vdev->ndev->dev_addr, (u8 *)vdev->vpaths[0].macaddr, ETH_ALEN);
4685 memcpy(vdev->ndev->perm_addr, vdev->ndev->dev_addr, ETH_ALEN);
4686
4687 /* Copy the station mac address to the list */
4688 for (i = 0; i < vdev->no_of_vpath; i++) {
4689 entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_KERNEL);
4690 if (NULL == entry) {
4691 vxge_debug_init(VXGE_ERR,
4692 "%s: mac_addr_list : memory allocation failed",
4693 vdev->ndev->name);
4694 ret = -EPERM;
4695 goto _exit6;
4696 }
4697 macaddr = (u8 *)&entry->macaddr;
4698 memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN);
4699 list_add(&entry->item, &vdev->vpaths[i].mac_addr_list);
4700 vdev->vpaths[i].mac_addr_cnt = 1;
4701 }
4702
4703 kfree(device_config);
4704
4705 /*
4706 * INTA is shared in multi-function mode. This is unlike the INTA
4707 * implementation in MR mode, where each VH has its own INTA message.
4708 * - INTA is masked (disabled) as long as at least one function sets
4709 * its TITAN_MASK_ALL_INT.ALARM bit.
4710 * - INTA is unmasked (enabled) when all enabled functions have cleared
4711 * their own TITAN_MASK_ALL_INT.ALARM bit.
4712 * The TITAN_MASK_ALL_INT ALARM & TRAFFIC bits are cleared on power up.
4713 * Though this driver leaves the top level interrupts unmasked while
4714 * leaving the required module interrupt bits masked on exit, there
4715 * could be a rougue driver around that does not follow this procedure
4716 * resulting in a failure to generate interrupts. The following code is
4717 * present to prevent such a failure.
4718 */
4719
4720 if (ll_config->device_hw_info.function_mode ==
4721 VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION)
4722 if (vdev->config.intr_type == INTA)
4723 vxge_hw_device_unmask_all(hldev);
4724
4725 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
4726 vdev->ndev->name, __func__, __LINE__);
4727
4728 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
4729 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
4730 vxge_hw_device_trace_level_get(hldev));
4731
4732 kfree(ll_config);
4733 return 0;
4734
4735_exit6:
4736 for (i = 0; i < vdev->no_of_vpath; i++)
4737 vxge_free_mac_add_list(&vdev->vpaths[i]);
4738_exit5:
4739 vxge_device_unregister(hldev);
4740_exit4:
4741 pci_set_drvdata(pdev, NULL);
4742 vxge_hw_device_terminate(hldev);
4743 pci_disable_sriov(pdev);
4744_exit3:
4745 iounmap(attr.bar0);
4746_exit2:
4747 pci_release_region(pdev, 0);
4748_exit1:
4749 pci_disable_device(pdev);
4750_exit0:
4751 kfree(ll_config);
4752 kfree(device_config);
4753 driver_config->config_dev_cnt--;
4754 driver_config->total_dev_cnt--;
4755 return ret;
4756}
4757
4758/**
4759 * vxge_rem_nic - Free the PCI device
4760 * @pdev: structure containing the PCI related information of the device.
4761 * Description: This function is called by the Pci subsystem to release a
4762 * PCI device and free up all resource held up by the device.
4763 */
4764static void __devexit vxge_remove(struct pci_dev *pdev)
4765{
4766 struct __vxge_hw_device *hldev;
4767 struct vxgedev *vdev;
4768 int i;
4769
4770 hldev = pci_get_drvdata(pdev);
4771 if (hldev == NULL)
4772 return;
4773
4774 vdev = netdev_priv(hldev->ndev);
4775
4776 vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__);
4777 vxge_debug_init(vdev->level_trace, "%s : removing PCI device...",
4778 __func__);
4779
4780 for (i = 0; i < vdev->no_of_vpath; i++)
4781 vxge_free_mac_add_list(&vdev->vpaths[i]);
4782
4783 vxge_device_unregister(hldev);
4784 pci_set_drvdata(pdev, NULL);
4785 /* Do not call pci_disable_sriov here, as it will break child devices */
4786 vxge_hw_device_terminate(hldev);
4787 iounmap(vdev->bar0);
4788 pci_release_region(pdev, 0);
4789 pci_disable_device(pdev);
4790 driver_config->config_dev_cnt--;
4791 driver_config->total_dev_cnt--;
4792
4793 vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered",
4794 __func__, __LINE__);
4795 vxge_debug_entryexit(vdev->level_trace, "%s:%d Exiting...", __func__,
4796 __LINE__);
4797}
4798
4799static struct pci_error_handlers vxge_err_handler = {
4800 .error_detected = vxge_io_error_detected,
4801 .slot_reset = vxge_io_slot_reset,
4802 .resume = vxge_io_resume,
4803};
4804
4805static struct pci_driver vxge_driver = {
4806 .name = VXGE_DRIVER_NAME,
4807 .id_table = vxge_id_table,
4808 .probe = vxge_probe,
4809 .remove = __devexit_p(vxge_remove),
4810#ifdef CONFIG_PM
4811 .suspend = vxge_pm_suspend,
4812 .resume = vxge_pm_resume,
4813#endif
4814 .err_handler = &vxge_err_handler,
4815};
4816
4817static int __init
4818vxge_starter(void)
4819{
4820 int ret = 0;
4821
4822 pr_info("Copyright(c) 2002-2010 Exar Corp.\n");
4823 pr_info("Driver version: %s\n", DRV_VERSION);
4824
4825 verify_bandwidth();
4826
4827 driver_config = kzalloc(sizeof(struct vxge_drv_config), GFP_KERNEL);
4828 if (!driver_config)
4829 return -ENOMEM;
4830
4831 ret = pci_register_driver(&vxge_driver);
4832 if (ret) {
4833 kfree(driver_config);
4834 goto err;
4835 }
4836
4837 if (driver_config->config_dev_cnt &&
4838 (driver_config->config_dev_cnt != driver_config->total_dev_cnt))
4839 vxge_debug_init(VXGE_ERR,
4840 "%s: Configured %d of %d devices",
4841 VXGE_DRIVER_NAME, driver_config->config_dev_cnt,
4842 driver_config->total_dev_cnt);
4843err:
4844 return ret;
4845}
4846
4847static void __exit
4848vxge_closer(void)
4849{
4850 pci_unregister_driver(&vxge_driver);
4851 kfree(driver_config);
4852}
4853module_init(vxge_starter);
4854module_exit(vxge_closer);
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h
new file mode 100644
index 00000000000..f52a42d1dbb
--- /dev/null
+++ b/drivers/net/vxge/vxge-main.h
@@ -0,0 +1,519 @@
1/******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
9 *
10 * vxge-main.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2010 Exar Corp.
13 ******************************************************************************/
14#ifndef VXGE_MAIN_H
15#define VXGE_MAIN_H
16
17#include "vxge-traffic.h"
18#include "vxge-config.h"
19#include "vxge-version.h"
20#include <linux/list.h>
21#include <linux/bitops.h>
22#include <linux/if_vlan.h>
23
24#define VXGE_DRIVER_NAME "vxge"
25#define VXGE_DRIVER_VENDOR "Neterion, Inc"
26#define VXGE_DRIVER_FW_VERSION_MAJOR 1
27
28#define DRV_VERSION VXGE_VERSION_MAJOR"."VXGE_VERSION_MINOR"."\
29 VXGE_VERSION_FIX"."VXGE_VERSION_BUILD"-"\
30 VXGE_VERSION_FOR
31
32#define PCI_DEVICE_ID_TITAN_WIN 0x5733
33#define PCI_DEVICE_ID_TITAN_UNI 0x5833
34#define VXGE_HW_TITAN1_PCI_REVISION 1
35#define VXGE_HW_TITAN1A_PCI_REVISION 2
36
37#define VXGE_USE_DEFAULT 0xffffffff
38#define VXGE_HW_VPATH_MSIX_ACTIVE 4
39#define VXGE_ALARM_MSIX_ID 2
40#define VXGE_HW_RXSYNC_FREQ_CNT 4
41#define VXGE_LL_WATCH_DOG_TIMEOUT (15 * HZ)
42#define VXGE_LL_RX_COPY_THRESHOLD 256
43#define VXGE_DEF_FIFO_LENGTH 84
44
45#define NO_STEERING 0
46#define PORT_STEERING 0x1
47#define RTH_STEERING 0x2
48#define RX_TOS_STEERING 0x3
49#define RX_VLAN_STEERING 0x4
50#define RTH_BUCKET_SIZE 4
51
52#define TX_PRIORITY_STEERING 1
53#define TX_VLAN_STEERING 2
54#define TX_PORT_STEERING 3
55#define TX_MULTIQ_STEERING 4
56
57#define VXGE_HW_MAC_ADDR_LEARN_DEFAULT VXGE_HW_RTS_MAC_DISABLE
58
59#define VXGE_TTI_BTIMER_VAL 250000
60
61#define VXGE_TTI_LTIMER_VAL 1000
62#define VXGE_T1A_TTI_LTIMER_VAL 80
63#define VXGE_TTI_RTIMER_VAL 0
64#define VXGE_TTI_RTIMER_ADAPT_VAL 10
65#define VXGE_T1A_TTI_RTIMER_VAL 400
66#define VXGE_RTI_BTIMER_VAL 250
67#define VXGE_RTI_LTIMER_VAL 100
68#define VXGE_RTI_RTIMER_VAL 0
69#define VXGE_RTI_RTIMER_ADAPT_VAL 15
70#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH
71#define VXGE_ISR_POLLING_CNT 8
72#define VXGE_MAX_CONFIG_DEV 0xFF
73#define VXGE_EXEC_MODE_DISABLE 0
74#define VXGE_EXEC_MODE_ENABLE 1
75#define VXGE_MAX_CONFIG_PORT 1
76#define VXGE_ALL_VID_DISABLE 0
77#define VXGE_ALL_VID_ENABLE 1
78#define VXGE_PAUSE_CTRL_DISABLE 0
79#define VXGE_PAUSE_CTRL_ENABLE 1
80
81#define TTI_TX_URANGE_A 5
82#define TTI_TX_URANGE_B 15
83#define TTI_TX_URANGE_C 40
84#define TTI_TX_UFC_A 5
85#define TTI_TX_UFC_B 40
86#define TTI_TX_UFC_C 60
87#define TTI_TX_UFC_D 100
88#define TTI_T1A_TX_UFC_A 30
89#define TTI_T1A_TX_UFC_B 80
90/* Slope - (max_mtu - min_mtu)/(max_mtu_ufc - min_mtu_ufc) */
91/* Slope - 93 */
92/* 60 - 9k Mtu, 140 - 1.5k mtu */
93#define TTI_T1A_TX_UFC_C(mtu) (60 + ((VXGE_HW_MAX_MTU - mtu) / 93))
94
95/* Slope - 37 */
96/* 100 - 9k Mtu, 300 - 1.5k mtu */
97#define TTI_T1A_TX_UFC_D(mtu) (100 + ((VXGE_HW_MAX_MTU - mtu) / 37))
98
99
100#define RTI_RX_URANGE_A 5
101#define RTI_RX_URANGE_B 15
102#define RTI_RX_URANGE_C 40
103#define RTI_T1A_RX_URANGE_A 1
104#define RTI_T1A_RX_URANGE_B 20
105#define RTI_T1A_RX_URANGE_C 50
106#define RTI_RX_UFC_A 1
107#define RTI_RX_UFC_B 5
108#define RTI_RX_UFC_C 10
109#define RTI_RX_UFC_D 15
110#define RTI_T1A_RX_UFC_B 20
111#define RTI_T1A_RX_UFC_C 50
112#define RTI_T1A_RX_UFC_D 60
113
114/*
115 * The interrupt rate is maintained at 3k per second with the moderation
116 * parameters for most traffic but not all. This is the maximum interrupt
117 * count allowed per function with INTA or per vector in the case of
118 * MSI-X in a 10 millisecond time period. Enabled only for Titan 1A.
119 */
120#define VXGE_T1A_MAX_INTERRUPT_COUNT 100
121#define VXGE_T1A_MAX_TX_INTERRUPT_COUNT 200
122
123/* Milli secs timer period */
124#define VXGE_TIMER_DELAY 10000
125
126#define VXGE_LL_MAX_FRAME_SIZE(dev) ((dev)->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE)
127
128#define is_sriov(function_mode) \
129 ((function_mode == VXGE_HW_FUNCTION_MODE_SRIOV) || \
130 (function_mode == VXGE_HW_FUNCTION_MODE_SRIOV_8) || \
131 (function_mode == VXGE_HW_FUNCTION_MODE_SRIOV_4))
132
133enum vxge_reset_event {
134 /* reset events */
135 VXGE_LL_VPATH_RESET = 0,
136 VXGE_LL_DEVICE_RESET = 1,
137 VXGE_LL_FULL_RESET = 2,
138 VXGE_LL_START_RESET = 3,
139 VXGE_LL_COMPL_RESET = 4
140};
141/* These flags represent the devices temporary state */
142enum vxge_device_state_t {
143__VXGE_STATE_RESET_CARD = 0,
144__VXGE_STATE_CARD_UP
145};
146
147enum vxge_mac_addr_state {
148 /* mac address states */
149 VXGE_LL_MAC_ADDR_IN_LIST = 0,
150 VXGE_LL_MAC_ADDR_IN_DA_TABLE = 1
151};
152
153struct vxge_drv_config {
154 int config_dev_cnt;
155 int total_dev_cnt;
156 int g_no_cpus;
157 unsigned int vpath_per_dev;
158};
159
160struct macInfo {
161 unsigned char macaddr[ETH_ALEN];
162 unsigned char macmask[ETH_ALEN];
163 unsigned int vpath_no;
164 enum vxge_mac_addr_state state;
165};
166
167struct vxge_config {
168 int tx_pause_enable;
169 int rx_pause_enable;
170
171#define NEW_NAPI_WEIGHT 64
172 int napi_weight;
173 int intr_type;
174#define INTA 0
175#define MSI 1
176#define MSI_X 2
177
178 int addr_learn_en;
179
180 u32 rth_steering:2,
181 rth_algorithm:2,
182 rth_hash_type_tcpipv4:1,
183 rth_hash_type_ipv4:1,
184 rth_hash_type_tcpipv6:1,
185 rth_hash_type_ipv6:1,
186 rth_hash_type_tcpipv6ex:1,
187 rth_hash_type_ipv6ex:1,
188 rth_bkt_sz:8;
189 int rth_jhash_golden_ratio;
190 int tx_steering_type;
191 int fifo_indicate_max_pkts;
192 struct vxge_hw_device_hw_info device_hw_info;
193};
194
195struct vxge_msix_entry {
196 /* Mimicing the msix_entry struct of Kernel. */
197 u16 vector;
198 u16 entry;
199 u16 in_use;
200 void *arg;
201};
202
203/* Software Statistics */
204
205struct vxge_sw_stats {
206
207 /* Virtual Path */
208 unsigned long vpaths_open;
209 unsigned long vpath_open_fail;
210
211 /* Misc. */
212 unsigned long link_up;
213 unsigned long link_down;
214};
215
216struct vxge_mac_addrs {
217 struct list_head item;
218 u64 macaddr;
219 u64 macmask;
220 enum vxge_mac_addr_state state;
221};
222
223struct vxgedev;
224
225struct vxge_fifo_stats {
226 struct u64_stats_sync syncp;
227 u64 tx_frms;
228 u64 tx_bytes;
229
230 unsigned long tx_errors;
231 unsigned long txd_not_free;
232 unsigned long txd_out_of_desc;
233 unsigned long pci_map_fail;
234};
235
236struct vxge_fifo {
237 struct net_device *ndev;
238 struct pci_dev *pdev;
239 struct __vxge_hw_fifo *handle;
240 struct netdev_queue *txq;
241
242 int tx_steering_type;
243 int indicate_max_pkts;
244
245 /* Adaptive interrupt moderation parameters used in T1A */
246 unsigned long interrupt_count;
247 unsigned long jiffies;
248
249 u32 tx_vector_no;
250 /* Tx stats */
251 struct vxge_fifo_stats stats;
252} ____cacheline_aligned;
253
254struct vxge_ring_stats {
255 struct u64_stats_sync syncp;
256 u64 rx_frms;
257 u64 rx_mcast;
258 u64 rx_bytes;
259
260 unsigned long rx_errors;
261 unsigned long rx_dropped;
262 unsigned long prev_rx_frms;
263 unsigned long pci_map_fail;
264 unsigned long skb_alloc_fail;
265};
266
267struct vxge_ring {
268 struct net_device *ndev;
269 struct pci_dev *pdev;
270 struct __vxge_hw_ring *handle;
271 /* The vpath id maintained in the driver -
272 * 0 to 'maximum_vpaths_in_function - 1'
273 */
274 int driver_id;
275
276 /* Adaptive interrupt moderation parameters used in T1A */
277 unsigned long interrupt_count;
278 unsigned long jiffies;
279
280 /* copy of the flag indicating whether rx_hwts is to be used */
281 u32 rx_hwts:1;
282
283 int pkts_processed;
284 int budget;
285
286 struct napi_struct napi;
287 struct napi_struct *napi_p;
288
289#define VXGE_MAX_MAC_ADDR_COUNT 30
290
291 int vlan_tag_strip;
292 u32 rx_vector_no;
293 enum vxge_hw_status last_status;
294
295 /* Rx stats */
296 struct vxge_ring_stats stats;
297} ____cacheline_aligned;
298
299struct vxge_vpath {
300 struct vxge_fifo fifo;
301 struct vxge_ring ring;
302
303 struct __vxge_hw_vpath_handle *handle;
304
305 /* Actual vpath id for this vpath in the device - 0 to 16 */
306 int device_id;
307 int max_mac_addr_cnt;
308 int is_configured;
309 int is_open;
310 struct vxgedev *vdev;
311 u8 macaddr[ETH_ALEN];
312 u8 macmask[ETH_ALEN];
313
314#define VXGE_MAX_LEARN_MAC_ADDR_CNT 2048
315 /* mac addresses currently programmed into NIC */
316 u16 mac_addr_cnt;
317 u16 mcast_addr_cnt;
318 struct list_head mac_addr_list;
319
320 u32 level_err;
321 u32 level_trace;
322};
323#define VXGE_COPY_DEBUG_INFO_TO_LL(vdev, err, trace) { \
324 for (i = 0; i < vdev->no_of_vpath; i++) { \
325 vdev->vpaths[i].level_err = err; \
326 vdev->vpaths[i].level_trace = trace; \
327 } \
328 vdev->level_err = err; \
329 vdev->level_trace = trace; \
330}
331
332struct vxgedev {
333 struct net_device *ndev;
334 struct pci_dev *pdev;
335 struct __vxge_hw_device *devh;
336 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
337 int vlan_tag_strip;
338 struct vxge_config config;
339 unsigned long state;
340
341 /* Indicates which vpath to reset */
342 unsigned long vp_reset;
343
344 /* Timer used for polling vpath resets */
345 struct timer_list vp_reset_timer;
346
347 /* Timer used for polling vpath lockup */
348 struct timer_list vp_lockup_timer;
349
350 /*
351 * Flags to track whether device is in All Multicast
352 * or in promiscuous mode.
353 */
354 u16 all_multi_flg;
355
356 /* A flag indicating whether rx_hwts is to be used or not. */
357 u32 rx_hwts:1,
358 titan1:1;
359
360 struct vxge_msix_entry *vxge_entries;
361 struct msix_entry *entries;
362 /*
363 * 4 for each vpath * 17;
364 * total is 68
365 */
366#define VXGE_MAX_REQUESTED_MSIX 68
367#define VXGE_INTR_STRLEN 80
368 char desc[VXGE_MAX_REQUESTED_MSIX][VXGE_INTR_STRLEN];
369
370 enum vxge_hw_event cric_err_event;
371
372 int max_vpath_supported;
373 int no_of_vpath;
374
375 struct napi_struct napi;
376 /* A debug option, when enabled and if error condition occurs,
377 * the driver will do following steps:
378 * - mask all interrupts
379 * - Not clear the source of the alarm
380 * - gracefully stop all I/O
381 * A diagnostic dump of register and stats at this point
382 * reveals very useful information.
383 */
384 int exec_mode;
385 int max_config_port;
386 struct vxge_vpath *vpaths;
387
388 struct __vxge_hw_vpath_handle *vp_handles[VXGE_HW_MAX_VIRTUAL_PATHS];
389 void __iomem *bar0;
390 struct vxge_sw_stats stats;
391 int mtu;
392 /* Below variables are used for vpath selection to transmit a packet */
393 u8 vpath_selector[VXGE_HW_MAX_VIRTUAL_PATHS];
394 u64 vpaths_deployed;
395
396 u32 intr_cnt;
397 u32 level_err;
398 u32 level_trace;
399 char fw_version[VXGE_HW_FW_STRLEN];
400 struct work_struct reset_task;
401};
402
403struct vxge_rx_priv {
404 struct sk_buff *skb;
405 unsigned char *skb_data;
406 dma_addr_t data_dma;
407 dma_addr_t data_size;
408};
409
410struct vxge_tx_priv {
411 struct sk_buff *skb;
412 dma_addr_t dma_buffers[MAX_SKB_FRAGS+1];
413};
414
415#define VXGE_MODULE_PARAM_INT(p, val) \
416 static int p = val; \
417 module_param(p, int, 0)
418
419#define vxge_os_timer(timer, handle, arg, exp) do { \
420 init_timer(&timer); \
421 timer.function = handle; \
422 timer.data = (unsigned long) arg; \
423 mod_timer(&timer, (jiffies + exp)); \
424 } while (0);
425
426void vxge_initialize_ethtool_ops(struct net_device *ndev);
427enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
428int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override);
429
430/**
431 * #define VXGE_DEBUG_INIT: debug for initialization functions
432 * #define VXGE_DEBUG_TX : debug transmit related functions
433 * #define VXGE_DEBUG_RX : debug recevice related functions
434 * #define VXGE_DEBUG_MEM : debug memory module
435 * #define VXGE_DEBUG_LOCK: debug locks
436 * #define VXGE_DEBUG_SEM : debug semaphore
437 * #define VXGE_DEBUG_ENTRYEXIT: debug functions by adding entry exit statements
438*/
439#define VXGE_DEBUG_INIT 0x00000001
440#define VXGE_DEBUG_TX 0x00000002
441#define VXGE_DEBUG_RX 0x00000004
442#define VXGE_DEBUG_MEM 0x00000008
443#define VXGE_DEBUG_LOCK 0x00000010
444#define VXGE_DEBUG_SEM 0x00000020
445#define VXGE_DEBUG_ENTRYEXIT 0x00000040
446#define VXGE_DEBUG_INTR 0x00000080
447#define VXGE_DEBUG_LL_CONFIG 0x00000100
448
449/* Debug tracing for VXGE driver */
450#ifndef VXGE_DEBUG_MASK
451#define VXGE_DEBUG_MASK 0x0
452#endif
453
454#if (VXGE_DEBUG_LL_CONFIG & VXGE_DEBUG_MASK)
455#define vxge_debug_ll_config(level, fmt, ...) \
456 vxge_debug_ll(level, VXGE_DEBUG_LL_CONFIG, fmt, __VA_ARGS__)
457#else
458#define vxge_debug_ll_config(level, fmt, ...)
459#endif
460
461#if (VXGE_DEBUG_INIT & VXGE_DEBUG_MASK)
462#define vxge_debug_init(level, fmt, ...) \
463 vxge_debug_ll(level, VXGE_DEBUG_INIT, fmt, __VA_ARGS__)
464#else
465#define vxge_debug_init(level, fmt, ...)
466#endif
467
468#if (VXGE_DEBUG_TX & VXGE_DEBUG_MASK)
469#define vxge_debug_tx(level, fmt, ...) \
470 vxge_debug_ll(level, VXGE_DEBUG_TX, fmt, __VA_ARGS__)
471#else
472#define vxge_debug_tx(level, fmt, ...)
473#endif
474
475#if (VXGE_DEBUG_RX & VXGE_DEBUG_MASK)
476#define vxge_debug_rx(level, fmt, ...) \
477 vxge_debug_ll(level, VXGE_DEBUG_RX, fmt, __VA_ARGS__)
478#else
479#define vxge_debug_rx(level, fmt, ...)
480#endif
481
482#if (VXGE_DEBUG_MEM & VXGE_DEBUG_MASK)
483#define vxge_debug_mem(level, fmt, ...) \
484 vxge_debug_ll(level, VXGE_DEBUG_MEM, fmt, __VA_ARGS__)
485#else
486#define vxge_debug_mem(level, fmt, ...)
487#endif
488
489#if (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK)
490#define vxge_debug_entryexit(level, fmt, ...) \
491 vxge_debug_ll(level, VXGE_DEBUG_ENTRYEXIT, fmt, __VA_ARGS__)
492#else
493#define vxge_debug_entryexit(level, fmt, ...)
494#endif
495
496#if (VXGE_DEBUG_INTR & VXGE_DEBUG_MASK)
497#define vxge_debug_intr(level, fmt, ...) \
498 vxge_debug_ll(level, VXGE_DEBUG_INTR, fmt, __VA_ARGS__)
499#else
500#define vxge_debug_intr(level, fmt, ...)
501#endif
502
503#define VXGE_DEVICE_DEBUG_LEVEL_SET(level, mask, vdev) {\
504 vxge_hw_device_debug_set((struct __vxge_hw_device *)vdev->devh, \
505 level, mask);\
506 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, \
507 vxge_hw_device_error_level_get((struct __vxge_hw_device *) \
508 vdev->devh), \
509 vxge_hw_device_trace_level_get((struct __vxge_hw_device *) \
510 vdev->devh));\
511}
512
513#ifdef NETIF_F_GSO
514#define vxge_tcp_mss(skb) (skb_shinfo(skb)->gso_size)
515#define vxge_udp_mss(skb) (skb_shinfo(skb)->gso_size)
516#define vxge_offload_type(skb) (skb_shinfo(skb)->gso_type)
517#endif
518
519#endif
diff --git a/drivers/net/vxge/vxge-reg.h b/drivers/net/vxge/vxge-reg.h
new file mode 100644
index 00000000000..3e658b17594
--- /dev/null
+++ b/drivers/net/vxge/vxge-reg.h
@@ -0,0 +1,4636 @@
1/******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
9 *
10 * vxge-reg.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O Virtualized
11 * Server Adapter.
12 * Copyright(c) 2002-2010 Exar Corp.
13 ******************************************************************************/
14#ifndef VXGE_REG_H
15#define VXGE_REG_H
16
17/*
18 * vxge_mBIT(loc) - set bit at offset
19 */
20#define vxge_mBIT(loc) (0x8000000000000000ULL >> (loc))
21
22/*
23 * vxge_vBIT(val, loc, sz) - set bits at offset
24 */
25#define vxge_vBIT(val, loc, sz) (((u64)(val)) << (64-(loc)-(sz)))
26#define vxge_vBIT32(val, loc, sz) (((u32)(val)) << (32-(loc)-(sz)))
27
28/*
29 * vxge_bVALn(bits, loc, n) - Get the value of n bits at location
30 */
31#define vxge_bVALn(bits, loc, n) \
32 ((((u64)bits) >> (64-(loc+n))) & ((0x1ULL << n) - 1))
33
34#define VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_DEVICE_ID(bits) \
35 vxge_bVALn(bits, 0, 16)
36#define VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MAJOR_REVISION(bits) \
37 vxge_bVALn(bits, 48, 8)
38#define VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(bits) \
39 vxge_bVALn(bits, 56, 8)
40
41#define VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(bits) \
42 vxge_bVALn(bits, 3, 5)
43#define VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(bits) \
44 vxge_bVALn(bits, 5, 3)
45#define VXGE_HW_PF_SW_RESET_COMMAND 0xA5
46
47#define VXGE_HW_TITAN_PCICFGMGMT_REG_SPACES 17
48#define VXGE_HW_TITAN_SRPCIM_REG_SPACES 17
49#define VXGE_HW_TITAN_VPMGMT_REG_SPACES 17
50#define VXGE_HW_TITAN_VPATH_REG_SPACES 17
51
52#define VXGE_HW_FW_API_GET_EPROM_REV 31
53
54#define VXGE_EPROM_IMG_MAJOR(val) (u32) vxge_bVALn(val, 48, 4)
55#define VXGE_EPROM_IMG_MINOR(val) (u32) vxge_bVALn(val, 52, 4)
56#define VXGE_EPROM_IMG_FIX(val) (u32) vxge_bVALn(val, 56, 4)
57#define VXGE_EPROM_IMG_BUILD(val) (u32) vxge_bVALn(val, 60, 4)
58
59#define VXGE_HW_GET_EPROM_IMAGE_INDEX(val) vxge_bVALn(val, 16, 8)
60#define VXGE_HW_GET_EPROM_IMAGE_VALID(val) vxge_bVALn(val, 31, 1)
61#define VXGE_HW_GET_EPROM_IMAGE_TYPE(val) vxge_bVALn(val, 40, 8)
62#define VXGE_HW_GET_EPROM_IMAGE_REV(val) vxge_bVALn(val, 48, 16)
63#define VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(val) vxge_vBIT(val, 16, 8)
64
65#define VXGE_HW_FW_API_GET_FUNC_MODE 29
66#define VXGE_HW_GET_FUNC_MODE_VAL(val) (val & 0xFF)
67
68#define VXGE_HW_FW_UPGRADE_MEMO 13
69#define VXGE_HW_FW_UPGRADE_ACTION 16
70#define VXGE_HW_FW_UPGRADE_OFFSET_START 2
71#define VXGE_HW_FW_UPGRADE_OFFSET_SEND 3
72#define VXGE_HW_FW_UPGRADE_OFFSET_COMMIT 4
73#define VXGE_HW_FW_UPGRADE_OFFSET_READ 5
74
75#define VXGE_HW_FW_UPGRADE_BLK_SIZE 16
76#define VXGE_HW_UPGRADE_GET_RET_ERR_CODE(val) (val & 0xff)
77#define VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(val) ((val >> 8) & 0xff)
78
79#define VXGE_HW_ASIC_MODE_RESERVED 0
80#define VXGE_HW_ASIC_MODE_NO_IOV 1
81#define VXGE_HW_ASIC_MODE_SR_IOV 2
82#define VXGE_HW_ASIC_MODE_MR_IOV 3
83
84#define VXGE_HW_TXMAC_GEN_CFG1_TMAC_PERMA_STOP_EN vxge_mBIT(3)
85#define VXGE_HW_TXMAC_GEN_CFG1_BLOCK_BCAST_TO_WIRE vxge_mBIT(19)
86#define VXGE_HW_TXMAC_GEN_CFG1_BLOCK_BCAST_TO_SWITCH vxge_mBIT(23)
87#define VXGE_HW_TXMAC_GEN_CFG1_HOST_APPEND_FCS vxge_mBIT(31)
88
89#define VXGE_HW_VPATH_IS_FIRST_GET_VPATH_IS_FIRST(bits) vxge_bVALn(bits, 3, 1)
90
91#define VXGE_HW_TIM_VPATH_ASSIGNMENT_GET_BMAP_ROOT(bits) \
92 vxge_bVALn(bits, 0, 32)
93
94#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN(bits) \
95 vxge_bVALn(bits, 50, 14)
96
97#define VXGE_HW_XMAC_VSPORT_CHOICES_VP_GET_VSPORT_VECTOR(bits) \
98 vxge_bVALn(bits, 0, 17)
99
100#define VXGE_HW_XMAC_VPATH_TO_VSPORT_VPMGMT_CLONE_GET_VSPORT_NUMBER(bits) \
101 vxge_bVALn(bits, 3, 5)
102
103#define VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(bits) \
104 vxge_bVALn(bits, 17, 15)
105
106#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_LEGACY_MODE 0
107#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY 1
108#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_MULTI_OP_MODE 2
109
110#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_MODE_MESSAGES_ONLY 0
111#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_MODE_MULTI_OP_MODE 1
112
113#define VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val) \
114 (val&~VXGE_HW_TOC_KDFC_INITIAL_BIR(7))
115#define VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val) \
116 vxge_bVALn(val, 61, 3)
117#define VXGE_HW_TOC_GET_USDC_INITIAL_OFFSET(val) \
118 (val&~VXGE_HW_TOC_USDC_INITIAL_BIR(7))
119#define VXGE_HW_TOC_GET_USDC_INITIAL_BIR(val) \
120 vxge_bVALn(val, 61, 3)
121
122#define VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(bits) bits
123#define VXGE_HW_TOC_KDFC_FIFO_STRIDE_GET_TOC_KDFC_FIFO_STRIDE(bits) bits
124
125#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_GET_KDFC_RCTR0(bits) \
126 vxge_bVALn(bits, 1, 15)
127#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_GET_KDFC_RCTR1(bits) \
128 vxge_bVALn(bits, 17, 15)
129#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_GET_KDFC_RCTR2(bits) \
130 vxge_bVALn(bits, 33, 15)
131
132#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_VAPTH_NUM(val) vxge_vBIT(val, 42, 5)
133#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_FIFO_NUM(val) vxge_vBIT(val, 47, 2)
134#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_FIFO_OFFSET(val) \
135 vxge_vBIT(val, 49, 15)
136
137#define VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER 0
138#define VXGE_HW_PRC_CFG4_RING_MODE_THREE_BUFFER 1
139#define VXGE_HW_PRC_CFG4_RING_MODE_FIVE_BUFFER 2
140
141#define VXGE_HW_PRC_CFG7_SCATTER_MODE_A 0
142#define VXGE_HW_PRC_CFG7_SCATTER_MODE_B 2
143#define VXGE_HW_PRC_CFG7_SCATTER_MODE_C 1
144
145#define VXGE_HW_RTS_MGR_STEER_CTRL_WE_READ 0
146#define VXGE_HW_RTS_MGR_STEER_CTRL_WE_WRITE 1
147
148#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_DA 0
149#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_VID 1
150#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_ETYPE 2
151#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_PN 3
152#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RANGE_PN 4
153#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG 5
154#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT 6
155#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_JHASH_CFG 7
156#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK 8
157#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY 9
158#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_QOS 10
159#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_DS 11
160#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT 12
161#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_FW_VERSION 13
162
163#define VXGE_HW_RTS_MGR_STEER_DATA0_GET_DA_MAC_ADDR(bits) \
164 vxge_bVALn(bits, 0, 48)
165#define VXGE_HW_RTS_MGR_STEER_DATA0_DA_MAC_ADDR(val) vxge_vBIT(val, 0, 48)
166
167#define VXGE_HW_RTS_MGR_STEER_DATA1_GET_DA_MAC_ADDR_MASK(bits) \
168 vxge_bVALn(bits, 0, 48)
169#define VXGE_HW_RTS_MGR_STEER_DATA1_DA_MAC_ADDR_MASK(val) vxge_vBIT(val, 0, 48)
170#define VXGE_HW_RTS_MGR_STEER_DATA1_DA_MAC_ADDR_ADD_PRIVILEGED_MODE \
171 vxge_mBIT(54)
172#define VXGE_HW_RTS_MGR_STEER_DATA1_GET_DA_MAC_ADDR_ADD_VPATH(bits) \
173 vxge_bVALn(bits, 55, 5)
174#define VXGE_HW_RTS_MGR_STEER_DATA1_DA_MAC_ADDR_ADD_VPATH(val) \
175 vxge_vBIT(val, 55, 5)
176#define VXGE_HW_RTS_MGR_STEER_DATA1_GET_DA_MAC_ADDR_ADD_MODE(bits) \
177 vxge_bVALn(bits, 62, 2)
178#define VXGE_HW_RTS_MGR_STEER_DATA1_DA_MAC_ADDR_MODE(val) vxge_vBIT(val, 62, 2)
179
180#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY 0
181#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY 1
182#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY 2
183#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY 3
184#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY 0
185#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY 1
186#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY 3
187#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL 4
188#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ALL_CLEAR 172
189
190#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA 0
191#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID 1
192#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_ETYPE 2
193#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_PN 3
194#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG 5
195#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT 6
196#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_JHASH_CFG 7
197#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK 8
198#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY 9
199#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_QOS 10
200#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DS 11
201#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT 12
202#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO 13
203
204#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(bits) \
205 vxge_bVALn(bits, 0, 48)
206#define VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(val) vxge_vBIT(val, 0, 48)
207
208#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(bits) vxge_bVALn(bits, 0, 12)
209#define VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(val) vxge_vBIT(val, 0, 12)
210
211#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_ETYPE(bits) vxge_bVALn(bits, 0, 11)
212#define VXGE_HW_RTS_ACCESS_STEER_DATA0_ETYPE(val) vxge_vBIT(val, 0, 16)
213
214#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_PN_SRC_DEST_SEL(bits) \
215 vxge_bVALn(bits, 3, 1)
216#define VXGE_HW_RTS_ACCESS_STEER_DATA0_PN_SRC_DEST_SEL vxge_mBIT(3)
217#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_PN_TCP_UDP_SEL(bits) \
218 vxge_bVALn(bits, 7, 1)
219#define VXGE_HW_RTS_ACCESS_STEER_DATA0_PN_TCP_UDP_SEL vxge_mBIT(7)
220#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_PN_PORT_NUM(bits) \
221 vxge_bVALn(bits, 8, 16)
222#define VXGE_HW_RTS_ACCESS_STEER_DATA0_PN_PORT_NUM(val) vxge_vBIT(val, 8, 16)
223
224#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_EN(bits) \
225 vxge_bVALn(bits, 3, 1)
226#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN vxge_mBIT(3)
227#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_BUCKET_SIZE(bits) \
228 vxge_bVALn(bits, 4, 4)
229#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(val) \
230 vxge_vBIT(val, 4, 4)
231#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ALG_SEL(bits) \
232 vxge_bVALn(bits, 10, 2)
233#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(val) \
234 vxge_vBIT(val, 10, 2)
235#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL_JENKINS 0
236#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL_MS_RSS 1
237#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL_CRC32C 2
238#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_TCP_IPV4_EN(bits) \
239 vxge_bVALn(bits, 15, 1)
240#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN vxge_mBIT(15)
241#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_IPV4_EN(bits) \
242 vxge_bVALn(bits, 19, 1)
243#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN vxge_mBIT(19)
244#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_TCP_IPV6_EN(bits) \
245 vxge_bVALn(bits, 23, 1)
246#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN vxge_mBIT(23)
247#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_IPV6_EN(bits) \
248 vxge_bVALn(bits, 27, 1)
249#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN vxge_mBIT(27)
250#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_TCP_IPV6_EX_EN(bits) \
251 vxge_bVALn(bits, 31, 1)
252#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN vxge_mBIT(31)
253#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_IPV6_EX_EN(bits) \
254 vxge_bVALn(bits, 35, 1)
255#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN vxge_mBIT(35)
256#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(bits) \
257 vxge_bVALn(bits, 39, 1)
258#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE vxge_mBIT(39)
259#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_REPL_ENTRY_EN(bits) \
260 vxge_bVALn(bits, 43, 1)
261#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_REPL_ENTRY_EN vxge_mBIT(43)
262
263#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_SOLO_IT_ENTRY_EN(bits) \
264 vxge_bVALn(bits, 3, 1)
265#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN vxge_mBIT(3)
266#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_SOLO_IT_BUCKET_DATA(bits) \
267 vxge_bVALn(bits, 9, 7)
268#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(val) \
269 vxge_vBIT(val, 9, 7)
270
271#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM0_BUCKET_NUM(bits) \
272 vxge_bVALn(bits, 0, 8)
273#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(val) \
274 vxge_vBIT(val, 0, 8)
275#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM0_ENTRY_EN(bits) \
276 vxge_bVALn(bits, 8, 1)
277#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN vxge_mBIT(8)
278#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM0_BUCKET_DATA(bits) \
279 vxge_bVALn(bits, 9, 7)
280#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(val) \
281 vxge_vBIT(val, 9, 7)
282#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM1_BUCKET_NUM(bits) \
283 vxge_bVALn(bits, 16, 8)
284#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(val) \
285 vxge_vBIT(val, 16, 8)
286#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM1_ENTRY_EN(bits) \
287 vxge_bVALn(bits, 24, 1)
288#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN vxge_mBIT(24)
289#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM1_BUCKET_DATA(bits) \
290 vxge_bVALn(bits, 25, 7)
291#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(val) \
292 vxge_vBIT(val, 25, 7)
293#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM0_BUCKET_NUM(bits) \
294 vxge_bVALn(bits, 0, 8)
295#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(val) \
296 vxge_vBIT(val, 0, 8)
297#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM0_ENTRY_EN(bits) \
298 vxge_bVALn(bits, 8, 1)
299#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN vxge_mBIT(8)
300#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM0_BUCKET_DATA(bits) \
301 vxge_bVALn(bits, 9, 7)
302#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(val) \
303 vxge_vBIT(val, 9, 7)
304#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM1_BUCKET_NUM(bits) \
305 vxge_bVALn(bits, 16, 8)
306#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(val) \
307 vxge_vBIT(val, 16, 8)
308#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM1_ENTRY_EN(bits) \
309 vxge_bVALn(bits, 24, 1)
310#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN vxge_mBIT(24)
311#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM1_BUCKET_DATA(bits) \
312 vxge_bVALn(bits, 25, 7)
313#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA(val) \
314 vxge_vBIT(val, 25, 7)
315
316#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_JHASH_CFG_GOLDEN_RATIO(bits) \
317 vxge_bVALn(bits, 0, 32)
318#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_JHASH_CFG_GOLDEN_RATIO(val) \
319 vxge_vBIT(val, 0, 32)
320#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_JHASH_CFG_INIT_VALUE(bits) \
321 vxge_bVALn(bits, 32, 32)
322#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_JHASH_CFG_INIT_VALUE(val) \
323 vxge_vBIT(val, 32, 32)
324
325#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_IPV6_SA_MASK(bits) \
326 vxge_bVALn(bits, 0, 16)
327#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_IPV6_SA_MASK(val) \
328 vxge_vBIT(val, 0, 16)
329#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_IPV6_DA_MASK(bits) \
330 vxge_bVALn(bits, 16, 16)
331#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_IPV6_DA_MASK(val) \
332 vxge_vBIT(val, 16, 16)
333#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_IPV4_SA_MASK(bits) \
334 vxge_bVALn(bits, 32, 4)
335#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_IPV4_SA_MASK(val) \
336 vxge_vBIT(val, 32, 4)
337#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_IPV4_DA_MASK(bits) \
338 vxge_bVALn(bits, 36, 4)
339#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_IPV4_DA_MASK(val) \
340 vxge_vBIT(val, 36, 4)
341#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_L4SP_MASK(bits) \
342 vxge_bVALn(bits, 40, 2)
343#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_L4SP_MASK(val) \
344 vxge_vBIT(val, 40, 2)
345#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_L4DP_MASK(bits) \
346 vxge_bVALn(bits, 42, 2)
347#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_L4DP_MASK(val) \
348 vxge_vBIT(val, 42, 2)
349
350#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_KEY_KEY(bits) \
351 vxge_bVALn(bits, 0, 64)
352#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_KEY_KEY vxge_vBIT(val, 0, 64)
353
354#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_QOS_ENTRY_EN(bits) \
355 vxge_bVALn(bits, 3, 1)
356#define VXGE_HW_RTS_ACCESS_STEER_DATA0_QOS_ENTRY_EN vxge_mBIT(3)
357
358#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DS_ENTRY_EN(bits) \
359 vxge_bVALn(bits, 3, 1)
360#define VXGE_HW_RTS_ACCESS_STEER_DATA0_DS_ENTRY_EN vxge_mBIT(3)
361
362#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(bits) \
363 vxge_bVALn(bits, 0, 48)
364#define VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(val) \
365 vxge_vBIT(val, 0, 48)
366#define VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(val) \
367 vxge_vBIT(val, 62, 2)
368
369#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM4_BUCKET_NUM(bits) \
370 vxge_bVALn(bits, 0, 8)
371#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM4_BUCKET_NUM(val) \
372 vxge_vBIT(val, 0, 8)
373#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM4_ENTRY_EN(bits) \
374 vxge_bVALn(bits, 8, 1)
375#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM4_ENTRY_EN vxge_mBIT(8)
376#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM4_BUCKET_DATA(bits) \
377 vxge_bVALn(bits, 9, 7)
378#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM4_BUCKET_DATA(val) \
379 vxge_vBIT(val, 9, 7)
380#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM5_BUCKET_NUM(bits) \
381 vxge_bVALn(bits, 16, 8)
382#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM5_BUCKET_NUM(val) \
383 vxge_vBIT(val, 16, 8)
384#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM5_ENTRY_EN(bits) \
385 vxge_bVALn(bits, 24, 1)
386#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM5_ENTRY_EN vxge_mBIT(24)
387#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM5_BUCKET_DATA(bits) \
388 vxge_bVALn(bits, 25, 7)
389#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM5_BUCKET_DATA(val) \
390 vxge_vBIT(val, 25, 7)
391#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM6_BUCKET_NUM(bits) \
392 vxge_bVALn(bits, 32, 8)
393#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM6_BUCKET_NUM(val) \
394 vxge_vBIT(val, 32, 8)
395#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM6_ENTRY_EN(bits) \
396 vxge_bVALn(bits, 40, 1)
397#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM6_ENTRY_EN vxge_mBIT(40)
398#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM6_BUCKET_DATA(bits) \
399 vxge_bVALn(bits, 41, 7)
400#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM6_BUCKET_DATA(val) \
401 vxge_vBIT(val, 41, 7)
402#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM7_BUCKET_NUM(bits) \
403 vxge_bVALn(bits, 48, 8)
404#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM7_BUCKET_NUM(val) \
405 vxge_vBIT(val, 48, 8)
406#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM7_ENTRY_EN(bits) \
407 vxge_bVALn(bits, 56, 1)
408#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM7_ENTRY_EN vxge_mBIT(56)
409#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM7_BUCKET_DATA(bits) \
410 vxge_bVALn(bits, 57, 7)
411#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM7_BUCKET_DATA(val) \
412 vxge_vBIT(val, 57, 7)
413
414#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER 0
415#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER 1
416#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_VERSION 2
417#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PCI_MODE 3
418#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0 4
419#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_1 5
420#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_2 6
421#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3 7
422
423#define VXGE_HW_RTS_ACCESS_STEER_DATA0_LED_CONTROL_ON 1
424#define VXGE_HW_RTS_ACCESS_STEER_DATA0_LED_CONTROL_OFF 0
425
426#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(bits) \
427 vxge_bVALn(bits, 0, 8)
428#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_DAY(val) vxge_vBIT(val, 0, 8)
429#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(bits) \
430 vxge_bVALn(bits, 8, 8)
431#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_MONTH(val) vxge_vBIT(val, 8, 8)
432#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(bits) \
433 vxge_bVALn(bits, 16, 16)
434#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_YEAR(val) \
435 vxge_vBIT(val, 16, 16)
436
437#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(bits) \
438 vxge_bVALn(bits, 32, 8)
439#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_MAJOR vxge_vBIT(val, 32, 8)
440#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(bits) \
441 vxge_bVALn(bits, 40, 8)
442#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_MINOR vxge_vBIT(val, 40, 8)
443#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(bits) \
444 vxge_bVALn(bits, 48, 16)
445#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_BUILD vxge_vBIT(val, 48, 16)
446
447#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(bits) \
448 vxge_bVALn(bits, 0, 8)
449#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_DAY(val) vxge_vBIT(val, 0, 8)
450#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(bits) \
451 vxge_bVALn(bits, 8, 8)
452#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_MONTH(val) vxge_vBIT(val, 8, 8)
453#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(bits) \
454 vxge_bVALn(bits, 16, 16)
455#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_YEAR(val) \
456 vxge_vBIT(val, 16, 16)
457
458#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(bits) \
459 vxge_bVALn(bits, 32, 8)
460#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_MAJOR vxge_vBIT(val, 32, 8)
461#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(bits) \
462 vxge_bVALn(bits, 40, 8)
463#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_MINOR vxge_vBIT(val, 40, 8)
464#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(bits) \
465 vxge_bVALn(bits, 48, 16)
466#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_BUILD vxge_vBIT(val, 48, 16)
467#define VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(bits) vxge_bVALn(bits, 0, 8)
468
469#define VXGE_HW_SRPCIM_TO_VPATH_ALARM_REG_GET_PPIF_SRPCIM_TO_VPATH_ALARM(bits)\
470 vxge_bVALn(bits, 0, 18)
471
472#define VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(bits) \
473 vxge_bVALn(bits, 48, 16)
474#define VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(bits) \
475 vxge_bVALn(bits, 32, 32)
476#define VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(bits) vxge_bVALn(bits, 48, 16)
477#define VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(bits) \
478 vxge_bVALn(bits, 0, 32)
479#define VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(bits) \
480 vxge_bVALn(bits, 0, 32)
481#define VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(bits) \
482 vxge_bVALn(bits, 0, 32)
483#define VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(bits) (bits)
484#define VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(bits) (bits)
485#define VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(bits) \
486 vxge_bVALn(bits, 32, 32)
487#define VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(bits) \
488 vxge_bVALn(bits, 32, 32)
489#define VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(bits) \
490 vxge_bVALn(bits, 0, 32)
491#define VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(bits) \
492 vxge_bVALn(bits, 32, 32)
493#define VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(bits) \
494 vxge_bVALn(bits, 0, 32)
495#define VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(bits) \
496 vxge_bVALn(bits, 32, 32)
497#define VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(bits) \
498 vxge_bVALn(bits, 0, 32)
499#define VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(bits) \
500 vxge_bVALn(bits, 32, 32)
501#define VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(bits\
502) vxge_bVALn(bits, 48, 16)
503#define VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(bits) vxge_bVALn(bits, 0, 16)
504#define VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(bits) \
505 vxge_bVALn(bits, 16, 16)
506#define VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(bits) \
507 vxge_bVALn(bits, 32, 16)
508#define VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(bits) vxge_bVALn(bits, 0, 16)
509#define VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(bits) \
510 vxge_bVALn(bits, 16, 16)
511#define VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(bits) \
512 vxge_bVALn(bits, 32, 16)
513
514#define VXGE_HW_MRPCIM_DEBUG_STATS0_GET_INI_WR_DROP(bits) \
515 vxge_bVALn(bits, 0, 32)
516#define VXGE_HW_MRPCIM_DEBUG_STATS0_GET_INI_RD_DROP(bits) \
517 vxge_bVALn(bits, 32, 32)
518#define VXGE_HW_MRPCIM_DEBUG_STATS1_GET_VPLANE_WRCRDTARB_PH_CRDT_DEPLETED(bits\
519) vxge_bVALn(bits, 32, 32)
520#define VXGE_HW_MRPCIM_DEBUG_STATS2_GET_VPLANE_WRCRDTARB_PD_CRDT_DEPLETED(bits\
521) vxge_bVALn(bits, 32, 32)
522#define \
523VXGE_HW_MRPCIM_DEBUG_STATS3_GET_VPLANE_RDCRDTARB_NPH_CRDT_DEPLETED(bits) \
524 vxge_bVALn(bits, 32, 32)
525#define VXGE_HW_MRPCIM_DEBUG_STATS4_GET_INI_WR_VPIN_DROP(bits) \
526 vxge_bVALn(bits, 0, 32)
527#define VXGE_HW_MRPCIM_DEBUG_STATS4_GET_INI_RD_VPIN_DROP(bits) \
528 vxge_bVALn(bits, 32, 32)
529#define VXGE_HW_GENSTATS_COUNT01_GET_GENSTATS_COUNT1(bits) \
530 vxge_bVALn(bits, 0, 32)
531#define VXGE_HW_GENSTATS_COUNT01_GET_GENSTATS_COUNT0(bits) \
532 vxge_bVALn(bits, 32, 32)
533#define VXGE_HW_GENSTATS_COUNT23_GET_GENSTATS_COUNT3(bits) \
534 vxge_bVALn(bits, 0, 32)
535#define VXGE_HW_GENSTATS_COUNT23_GET_GENSTATS_COUNT2(bits) \
536 vxge_bVALn(bits, 32, 32)
537#define VXGE_HW_GENSTATS_COUNT4_GET_GENSTATS_COUNT4(bits) \
538 vxge_bVALn(bits, 32, 32)
539#define VXGE_HW_GENSTATS_COUNT5_GET_GENSTATS_COUNT5(bits) \
540 vxge_bVALn(bits, 32, 32)
541
542#define VXGE_HW_DEBUG_STATS0_GET_RSTDROP_MSG(bits) vxge_bVALn(bits, 0, 32)
543#define VXGE_HW_DEBUG_STATS0_GET_RSTDROP_CPL(bits) vxge_bVALn(bits, 32, 32)
544#define VXGE_HW_DEBUG_STATS1_GET_RSTDROP_CLIENT0(bits) vxge_bVALn(bits, 0, 32)
545#define VXGE_HW_DEBUG_STATS1_GET_RSTDROP_CLIENT1(bits) vxge_bVALn(bits, 32, 32)
546#define VXGE_HW_DEBUG_STATS2_GET_RSTDROP_CLIENT2(bits) vxge_bVALn(bits, 0, 32)
547#define VXGE_HW_DEBUG_STATS3_GET_VPLANE_DEPL_PH(bits) vxge_bVALn(bits, 0, 16)
548#define VXGE_HW_DEBUG_STATS3_GET_VPLANE_DEPL_NPH(bits) vxge_bVALn(bits, 16, 16)
549#define VXGE_HW_DEBUG_STATS3_GET_VPLANE_DEPL_CPLH(bits) vxge_bVALn(bits, 32, 16)
550#define VXGE_HW_DEBUG_STATS4_GET_VPLANE_DEPL_PD(bits) vxge_bVALn(bits, 0, 16)
551#define VXGE_HW_DEBUG_STATS4_GET_VPLANE_DEPL_NPD(bits) bVAL(bits, 16, 16)
552#define VXGE_HW_DEBUG_STATS4_GET_VPLANE_DEPL_CPLD(bits) vxge_bVALn(bits, 32, 16)
553
554#define VXGE_HW_DBG_STATS_TPA_TX_PATH_GET_TX_PERMITTED_FRMS(bits) \
555 vxge_bVALn(bits, 32, 32)
556
557#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_GET_PORT0_TX_ANY_FRMS(bits) \
558 vxge_bVALn(bits, 0, 8)
559#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_GET_PORT1_TX_ANY_FRMS(bits) \
560 vxge_bVALn(bits, 8, 8)
561#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_GET_PORT2_TX_ANY_FRMS(bits) \
562 vxge_bVALn(bits, 16, 8)
563
564#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_GET_PORT0_RX_ANY_FRMS(bits) \
565 vxge_bVALn(bits, 0, 8)
566#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_GET_PORT1_RX_ANY_FRMS(bits) \
567 vxge_bVALn(bits, 8, 8)
568#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_GET_PORT2_RX_ANY_FRMS(bits) \
569 vxge_bVALn(bits, 16, 8)
570
571#define VXGE_HW_CONFIG_PRIV_H
572
573#define VXGE_HW_SWAPPER_INITIAL_VALUE 0x0123456789abcdefULL
574#define VXGE_HW_SWAPPER_BYTE_SWAPPED 0xefcdab8967452301ULL
575#define VXGE_HW_SWAPPER_BIT_FLIPPED 0x80c4a2e691d5b3f7ULL
576#define VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED 0xf7b3d591e6a2c480ULL
577
578#define VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE 0xFFFFFFFFFFFFFFFFULL
579#define VXGE_HW_SWAPPER_READ_BYTE_SWAP_DISABLE 0x0000000000000000ULL
580
581#define VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE 0xFFFFFFFFFFFFFFFFULL
582#define VXGE_HW_SWAPPER_READ_BIT_FLAP_DISABLE 0x0000000000000000ULL
583
584#define VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE 0xFFFFFFFFFFFFFFFFULL
585#define VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_DISABLE 0x0000000000000000ULL
586
587#define VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE 0xFFFFFFFFFFFFFFFFULL
588#define VXGE_HW_SWAPPER_WRITE_BIT_FLAP_DISABLE 0x0000000000000000ULL
589
590/*
591 * The registers are memory mapped and are native big-endian byte order. The
592 * little-endian hosts are handled by enabling hardware byte-swapping for
593 * register and dma operations.
594 */
595struct vxge_hw_legacy_reg {
596
597 u8 unused00010[0x00010];
598
599/*0x00010*/ u64 toc_swapper_fb;
600#define VXGE_HW_TOC_SWAPPER_FB_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
601/*0x00018*/ u64 pifm_rd_swap_en;
602#define VXGE_HW_PIFM_RD_SWAP_EN_PIFM_RD_SWAP_EN(val) vxge_vBIT(val, 0, 64)
603/*0x00020*/ u64 pifm_rd_flip_en;
604#define VXGE_HW_PIFM_RD_FLIP_EN_PIFM_RD_FLIP_EN(val) vxge_vBIT(val, 0, 64)
605/*0x00028*/ u64 pifm_wr_swap_en;
606#define VXGE_HW_PIFM_WR_SWAP_EN_PIFM_WR_SWAP_EN(val) vxge_vBIT(val, 0, 64)
607/*0x00030*/ u64 pifm_wr_flip_en;
608#define VXGE_HW_PIFM_WR_FLIP_EN_PIFM_WR_FLIP_EN(val) vxge_vBIT(val, 0, 64)
609/*0x00038*/ u64 toc_first_pointer;
610#define VXGE_HW_TOC_FIRST_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
611/*0x00040*/ u64 host_access_en;
612#define VXGE_HW_HOST_ACCESS_EN_HOST_ACCESS_EN(val) vxge_vBIT(val, 0, 64)
613
614} __packed;
615
616struct vxge_hw_toc_reg {
617
618 u8 unused00050[0x00050];
619
620/*0x00050*/ u64 toc_common_pointer;
621#define VXGE_HW_TOC_COMMON_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
622/*0x00058*/ u64 toc_memrepair_pointer;
623#define VXGE_HW_TOC_MEMREPAIR_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
624/*0x00060*/ u64 toc_pcicfgmgmt_pointer[17];
625#define VXGE_HW_TOC_PCICFGMGMT_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
626 u8 unused001e0[0x001e0-0x000e8];
627
628/*0x001e0*/ u64 toc_mrpcim_pointer;
629#define VXGE_HW_TOC_MRPCIM_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
630/*0x001e8*/ u64 toc_srpcim_pointer[17];
631#define VXGE_HW_TOC_SRPCIM_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
632 u8 unused00278[0x00278-0x00270];
633
634/*0x00278*/ u64 toc_vpmgmt_pointer[17];
635#define VXGE_HW_TOC_VPMGMT_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
636 u8 unused00390[0x00390-0x00300];
637
638/*0x00390*/ u64 toc_vpath_pointer[17];
639#define VXGE_HW_TOC_VPATH_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
640 u8 unused004a0[0x004a0-0x00418];
641
642/*0x004a0*/ u64 toc_kdfc;
643#define VXGE_HW_TOC_KDFC_INITIAL_OFFSET(val) vxge_vBIT(val, 0, 61)
644#define VXGE_HW_TOC_KDFC_INITIAL_BIR(val) vxge_vBIT(val, 61, 3)
645/*0x004a8*/ u64 toc_usdc;
646#define VXGE_HW_TOC_USDC_INITIAL_OFFSET(val) vxge_vBIT(val, 0, 61)
647#define VXGE_HW_TOC_USDC_INITIAL_BIR(val) vxge_vBIT(val, 61, 3)
648/*0x004b0*/ u64 toc_kdfc_vpath_stride;
649#define VXGE_HW_TOC_KDFC_VPATH_STRIDE_INITIAL_TOC_KDFC_VPATH_STRIDE(val) \
650 vxge_vBIT(val, 0, 64)
651/*0x004b8*/ u64 toc_kdfc_fifo_stride;
652#define VXGE_HW_TOC_KDFC_FIFO_STRIDE_INITIAL_TOC_KDFC_FIFO_STRIDE(val) \
653 vxge_vBIT(val, 0, 64)
654
655} __packed;
656
657struct vxge_hw_common_reg {
658
659 u8 unused00a00[0x00a00];
660
661/*0x00a00*/ u64 prc_status1;
662#define VXGE_HW_PRC_STATUS1_PRC_VP_QUIESCENT(n) vxge_mBIT(n)
663/*0x00a08*/ u64 rxdcm_reset_in_progress;
664#define VXGE_HW_RXDCM_RESET_IN_PROGRESS_PRC_VP(n) vxge_mBIT(n)
665/*0x00a10*/ u64 replicq_flush_in_progress;
666#define VXGE_HW_REPLICQ_FLUSH_IN_PROGRESS_NOA_VP(n) vxge_mBIT(n)
667/*0x00a18*/ u64 rxpe_cmds_reset_in_progress;
668#define VXGE_HW_RXPE_CMDS_RESET_IN_PROGRESS_NOA_VP(n) vxge_mBIT(n)
669/*0x00a20*/ u64 mxp_cmds_reset_in_progress;
670#define VXGE_HW_MXP_CMDS_RESET_IN_PROGRESS_NOA_VP(n) vxge_mBIT(n)
671/*0x00a28*/ u64 noffload_reset_in_progress;
672#define VXGE_HW_NOFFLOAD_RESET_IN_PROGRESS_PRC_VP(n) vxge_mBIT(n)
673/*0x00a30*/ u64 rd_req_in_progress;
674#define VXGE_HW_RD_REQ_IN_PROGRESS_VP(n) vxge_mBIT(n)
675/*0x00a38*/ u64 rd_req_outstanding;
676#define VXGE_HW_RD_REQ_OUTSTANDING_VP(n) vxge_mBIT(n)
677/*0x00a40*/ u64 kdfc_reset_in_progress;
678#define VXGE_HW_KDFC_RESET_IN_PROGRESS_NOA_VP(n) vxge_mBIT(n)
679 u8 unused00b00[0x00b00-0x00a48];
680
681/*0x00b00*/ u64 one_cfg_vp;
682#define VXGE_HW_ONE_CFG_VP_RDY(n) vxge_mBIT(n)
683/*0x00b08*/ u64 one_common;
684#define VXGE_HW_ONE_COMMON_PET_VPATH_RESET_IN_PROGRESS(n) vxge_mBIT(n)
685 u8 unused00b80[0x00b80-0x00b10];
686
687/*0x00b80*/ u64 tim_int_en;
688#define VXGE_HW_TIM_INT_EN_TIM_VP(n) vxge_mBIT(n)
689/*0x00b88*/ u64 tim_set_int_en;
690#define VXGE_HW_TIM_SET_INT_EN_VP(n) vxge_mBIT(n)
691/*0x00b90*/ u64 tim_clr_int_en;
692#define VXGE_HW_TIM_CLR_INT_EN_VP(n) vxge_mBIT(n)
693/*0x00b98*/ u64 tim_mask_int_during_reset;
694#define VXGE_HW_TIM_MASK_INT_DURING_RESET_VPATH(n) vxge_mBIT(n)
695/*0x00ba0*/ u64 tim_reset_in_progress;
696#define VXGE_HW_TIM_RESET_IN_PROGRESS_TIM_VPATH(n) vxge_mBIT(n)
697/*0x00ba8*/ u64 tim_outstanding_bmap;
698#define VXGE_HW_TIM_OUTSTANDING_BMAP_TIM_VPATH(n) vxge_mBIT(n)
699 u8 unused00c00[0x00c00-0x00bb0];
700
701/*0x00c00*/ u64 msg_reset_in_progress;
702#define VXGE_HW_MSG_RESET_IN_PROGRESS_MSG_COMPOSITE(val) vxge_vBIT(val, 0, 17)
703/*0x00c08*/ u64 msg_mxp_mr_ready;
704#define VXGE_HW_MSG_MXP_MR_READY_MP_BOOTED(n) vxge_mBIT(n)
705/*0x00c10*/ u64 msg_uxp_mr_ready;
706#define VXGE_HW_MSG_UXP_MR_READY_UP_BOOTED(n) vxge_mBIT(n)
707/*0x00c18*/ u64 msg_dmq_noni_rtl_prefetch;
708#define VXGE_HW_MSG_DMQ_NONI_RTL_PREFETCH_BYPASS_ENABLE(n) vxge_mBIT(n)
709/*0x00c20*/ u64 msg_umq_rtl_bwr;
710#define VXGE_HW_MSG_UMQ_RTL_BWR_PREFETCH_DISABLE(n) vxge_mBIT(n)
711 u8 unused00d00[0x00d00-0x00c28];
712
713/*0x00d00*/ u64 cmn_rsthdlr_cfg0;
714#define VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(val) vxge_vBIT(val, 0, 17)
715/*0x00d08*/ u64 cmn_rsthdlr_cfg1;
716#define VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(val) vxge_vBIT(val, 0, 17)
717/*0x00d10*/ u64 cmn_rsthdlr_cfg2;
718#define VXGE_HW_CMN_RSTHDLR_CFG2_SW_RESET_FIFO0(val) vxge_vBIT(val, 0, 17)
719/*0x00d18*/ u64 cmn_rsthdlr_cfg3;
720#define VXGE_HW_CMN_RSTHDLR_CFG3_SW_RESET_FIFO1(val) vxge_vBIT(val, 0, 17)
721/*0x00d20*/ u64 cmn_rsthdlr_cfg4;
722#define VXGE_HW_CMN_RSTHDLR_CFG4_SW_RESET_FIFO2(val) vxge_vBIT(val, 0, 17)
723 u8 unused00d40[0x00d40-0x00d28];
724
725/*0x00d40*/ u64 cmn_rsthdlr_cfg8;
726#define VXGE_HW_CMN_RSTHDLR_CFG8_INCR_VPATH_INST_NUM(val) vxge_vBIT(val, 0, 17)
727/*0x00d48*/ u64 stats_cfg0;
728#define VXGE_HW_STATS_CFG0_STATS_ENABLE(val) vxge_vBIT(val, 0, 17)
729 u8 unused00da8[0x00da8-0x00d50];
730
731/*0x00da8*/ u64 clear_msix_mask_vect[4];
732#define VXGE_HW_CLEAR_MSIX_MASK_VECT_CLEAR_MSIX_MASK_VECT(val) \
733 vxge_vBIT(val, 0, 17)
734/*0x00dc8*/ u64 set_msix_mask_vect[4];
735#define VXGE_HW_SET_MSIX_MASK_VECT_SET_MSIX_MASK_VECT(val) vxge_vBIT(val, 0, 17)
736/*0x00de8*/ u64 clear_msix_mask_all_vect;
737#define VXGE_HW_CLEAR_MSIX_MASK_ALL_VECT_CLEAR_MSIX_MASK_ALL_VECT(val) \
738 vxge_vBIT(val, 0, 17)
739/*0x00df0*/ u64 set_msix_mask_all_vect;
740#define VXGE_HW_SET_MSIX_MASK_ALL_VECT_SET_MSIX_MASK_ALL_VECT(val) \
741 vxge_vBIT(val, 0, 17)
742/*0x00df8*/ u64 mask_vector[4];
743#define VXGE_HW_MASK_VECTOR_MASK_VECTOR(val) vxge_vBIT(val, 0, 17)
744/*0x00e18*/ u64 msix_pending_vector[4];
745#define VXGE_HW_MSIX_PENDING_VECTOR_MSIX_PENDING_VECTOR(val) \
746 vxge_vBIT(val, 0, 17)
747/*0x00e38*/ u64 clr_msix_one_shot_vec[4];
748#define VXGE_HW_CLR_MSIX_ONE_SHOT_VEC_CLR_MSIX_ONE_SHOT_VEC(val) \
749 vxge_vBIT(val, 0, 17)
750/*0x00e58*/ u64 titan_asic_id;
751#define VXGE_HW_TITAN_ASIC_ID_INITIAL_DEVICE_ID(val) vxge_vBIT(val, 0, 16)
752#define VXGE_HW_TITAN_ASIC_ID_INITIAL_MAJOR_REVISION(val) vxge_vBIT(val, 48, 8)
753#define VXGE_HW_TITAN_ASIC_ID_INITIAL_MINOR_REVISION(val) vxge_vBIT(val, 56, 8)
754/*0x00e60*/ u64 titan_general_int_status;
755#define VXGE_HW_TITAN_GENERAL_INT_STATUS_MRPCIM_ALARM_INT vxge_mBIT(0)
756#define VXGE_HW_TITAN_GENERAL_INT_STATUS_SRPCIM_ALARM_INT vxge_mBIT(1)
757#define VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT vxge_mBIT(2)
758#define VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(val) \
759 vxge_vBIT(val, 3, 17)
760 u8 unused00e70[0x00e70-0x00e68];
761
762/*0x00e70*/ u64 titan_mask_all_int;
763#define VXGE_HW_TITAN_MASK_ALL_INT_ALARM vxge_mBIT(7)
764#define VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC vxge_mBIT(15)
765 u8 unused00e80[0x00e80-0x00e78];
766
767/*0x00e80*/ u64 tim_int_status0;
768#define VXGE_HW_TIM_INT_STATUS0_TIM_INT_STATUS0(val) vxge_vBIT(val, 0, 64)
769/*0x00e88*/ u64 tim_int_mask0;
770#define VXGE_HW_TIM_INT_MASK0_TIM_INT_MASK0(val) vxge_vBIT(val, 0, 64)
771/*0x00e90*/ u64 tim_int_status1;
772#define VXGE_HW_TIM_INT_STATUS1_TIM_INT_STATUS1(val) vxge_vBIT(val, 0, 4)
773/*0x00e98*/ u64 tim_int_mask1;
774#define VXGE_HW_TIM_INT_MASK1_TIM_INT_MASK1(val) vxge_vBIT(val, 0, 4)
775/*0x00ea0*/ u64 rti_int_status;
776#define VXGE_HW_RTI_INT_STATUS_RTI_INT_STATUS(val) vxge_vBIT(val, 0, 17)
777/*0x00ea8*/ u64 rti_int_mask;
778#define VXGE_HW_RTI_INT_MASK_RTI_INT_MASK(val) vxge_vBIT(val, 0, 17)
779/*0x00eb0*/ u64 adapter_status;
780#define VXGE_HW_ADAPTER_STATUS_RTDMA_RTDMA_READY vxge_mBIT(0)
781#define VXGE_HW_ADAPTER_STATUS_WRDMA_WRDMA_READY vxge_mBIT(1)
782#define VXGE_HW_ADAPTER_STATUS_KDFC_KDFC_READY vxge_mBIT(2)
783#define VXGE_HW_ADAPTER_STATUS_TPA_TMAC_BUF_EMPTY vxge_mBIT(3)
784#define VXGE_HW_ADAPTER_STATUS_RDCTL_PIC_QUIESCENT vxge_mBIT(4)
785#define VXGE_HW_ADAPTER_STATUS_XGMAC_NETWORK_FAULT vxge_mBIT(5)
786#define VXGE_HW_ADAPTER_STATUS_ROCRC_OFFLOAD_QUIESCENT vxge_mBIT(6)
787#define VXGE_HW_ADAPTER_STATUS_G3IF_FB_G3IF_FB_GDDR3_READY vxge_mBIT(7)
788#define VXGE_HW_ADAPTER_STATUS_G3IF_CM_G3IF_CM_GDDR3_READY vxge_mBIT(8)
789#define VXGE_HW_ADAPTER_STATUS_RIC_RIC_RUNNING vxge_mBIT(9)
790#define VXGE_HW_ADAPTER_STATUS_CMG_C_PLL_IN_LOCK vxge_mBIT(10)
791#define VXGE_HW_ADAPTER_STATUS_XGMAC_X_PLL_IN_LOCK vxge_mBIT(11)
792#define VXGE_HW_ADAPTER_STATUS_FBIF_M_PLL_IN_LOCK vxge_mBIT(12)
793#define VXGE_HW_ADAPTER_STATUS_PCC_PCC_IDLE(val) vxge_vBIT(val, 24, 8)
794#define VXGE_HW_ADAPTER_STATUS_ROCRC_RC_PRC_QUIESCENT(val) vxge_vBIT(val, 44, 8)
795/*0x00eb8*/ u64 gen_ctrl;
796#define VXGE_HW_GEN_CTRL_SPI_MRPCIM_WR_DIS vxge_mBIT(0)
797#define VXGE_HW_GEN_CTRL_SPI_MRPCIM_RD_DIS vxge_mBIT(1)
798#define VXGE_HW_GEN_CTRL_SPI_SRPCIM_WR_DIS vxge_mBIT(2)
799#define VXGE_HW_GEN_CTRL_SPI_SRPCIM_RD_DIS vxge_mBIT(3)
800#define VXGE_HW_GEN_CTRL_SPI_DEBUG_DIS vxge_mBIT(4)
801#define VXGE_HW_GEN_CTRL_SPI_APP_LTSSM_TIMER_DIS vxge_mBIT(5)
802#define VXGE_HW_GEN_CTRL_SPI_NOT_USED(val) vxge_vBIT(val, 6, 4)
803 u8 unused00ed0[0x00ed0-0x00ec0];
804
805/*0x00ed0*/ u64 adapter_ready;
806#define VXGE_HW_ADAPTER_READY_ADAPTER_READY vxge_mBIT(63)
807/*0x00ed8*/ u64 outstanding_read;
808#define VXGE_HW_OUTSTANDING_READ_OUTSTANDING_READ(val) vxge_vBIT(val, 0, 17)
809/*0x00ee0*/ u64 vpath_rst_in_prog;
810#define VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(val) vxge_vBIT(val, 0, 17)
811/*0x00ee8*/ u64 vpath_reg_modified;
812#define VXGE_HW_VPATH_REG_MODIFIED_VPATH_REG_MODIFIED(val) vxge_vBIT(val, 0, 17)
813 u8 unused00fc0[0x00fc0-0x00ef0];
814
815/*0x00fc0*/ u64 cp_reset_in_progress;
816#define VXGE_HW_CP_RESET_IN_PROGRESS_CP_VPATH(n) vxge_mBIT(n)
817 u8 unused01080[0x01080-0x00fc8];
818
819/*0x01080*/ u64 xgmac_ready;
820#define VXGE_HW_XGMAC_READY_XMACJ_READY(val) vxge_vBIT(val, 0, 17)
821 u8 unused010c0[0x010c0-0x01088];
822
823/*0x010c0*/ u64 fbif_ready;
824#define VXGE_HW_FBIF_READY_FAU_READY(val) vxge_vBIT(val, 0, 17)
825 u8 unused01100[0x01100-0x010c8];
826
827/*0x01100*/ u64 vplane_assignments;
828#define VXGE_HW_VPLANE_ASSIGNMENTS_VPLANE_ASSIGNMENTS(val) vxge_vBIT(val, 3, 5)
829/*0x01108*/ u64 vpath_assignments;
830#define VXGE_HW_VPATH_ASSIGNMENTS_VPATH_ASSIGNMENTS(val) vxge_vBIT(val, 0, 17)
831/*0x01110*/ u64 resource_assignments;
832#define VXGE_HW_RESOURCE_ASSIGNMENTS_RESOURCE_ASSIGNMENTS(val) \
833 vxge_vBIT(val, 0, 17)
834/*0x01118*/ u64 host_type_assignments;
835#define VXGE_HW_HOST_TYPE_ASSIGNMENTS_HOST_TYPE_ASSIGNMENTS(val) \
836 vxge_vBIT(val, 5, 3)
837 u8 unused01128[0x01128-0x01120];
838
839/*0x01128*/ u64 max_resource_assignments;
840#define VXGE_HW_MAX_RESOURCE_ASSIGNMENTS_PCI_MAX_VPLANE(val) \
841 vxge_vBIT(val, 3, 5)
842#define VXGE_HW_MAX_RESOURCE_ASSIGNMENTS_PCI_MAX_VPATHS(val) \
843 vxge_vBIT(val, 11, 5)
844/*0x01130*/ u64 pf_vpath_assignments;
845#define VXGE_HW_PF_VPATH_ASSIGNMENTS_PF_VPATH_ASSIGNMENTS(val) \
846 vxge_vBIT(val, 0, 17)
847 u8 unused01200[0x01200-0x01138];
848
849/*0x01200*/ u64 rts_access_icmp;
850#define VXGE_HW_RTS_ACCESS_ICMP_EN(val) vxge_vBIT(val, 0, 17)
851/*0x01208*/ u64 rts_access_tcpsyn;
852#define VXGE_HW_RTS_ACCESS_TCPSYN_EN(val) vxge_vBIT(val, 0, 17)
853/*0x01210*/ u64 rts_access_zl4pyld;
854#define VXGE_HW_RTS_ACCESS_ZL4PYLD_EN(val) vxge_vBIT(val, 0, 17)
855/*0x01218*/ u64 rts_access_l4prtcl_tcp;
856#define VXGE_HW_RTS_ACCESS_L4PRTCL_TCP_EN(val) vxge_vBIT(val, 0, 17)
857/*0x01220*/ u64 rts_access_l4prtcl_udp;
858#define VXGE_HW_RTS_ACCESS_L4PRTCL_UDP_EN(val) vxge_vBIT(val, 0, 17)
859/*0x01228*/ u64 rts_access_l4prtcl_flex;
860#define VXGE_HW_RTS_ACCESS_L4PRTCL_FLEX_EN(val) vxge_vBIT(val, 0, 17)
861/*0x01230*/ u64 rts_access_ipfrag;
862#define VXGE_HW_RTS_ACCESS_IPFRAG_EN(val) vxge_vBIT(val, 0, 17)
863
864} __packed;
865
866struct vxge_hw_memrepair_reg {
867 u64 unused1;
868 u64 unused2;
869} __packed;
870
871struct vxge_hw_pcicfgmgmt_reg {
872
873/*0x00000*/ u64 resource_no;
874#define VXGE_HW_RESOURCE_NO_PFN_OR_VF BIT(3)
875/*0x00008*/ u64 bargrp_pf_or_vf_bar0_mask;
876#define VXGE_HW_BARGRP_PF_OR_VF_BAR0_MASK_BARGRP_PF_OR_VF_BAR0_MASK(val) \
877 vxge_vBIT(val, 2, 6)
878/*0x00010*/ u64 bargrp_pf_or_vf_bar1_mask;
879#define VXGE_HW_BARGRP_PF_OR_VF_BAR1_MASK_BARGRP_PF_OR_VF_BAR1_MASK(val) \
880 vxge_vBIT(val, 2, 6)
881/*0x00018*/ u64 bargrp_pf_or_vf_bar2_mask;
882#define VXGE_HW_BARGRP_PF_OR_VF_BAR2_MASK_BARGRP_PF_OR_VF_BAR2_MASK(val) \
883 vxge_vBIT(val, 2, 6)
884/*0x00020*/ u64 msixgrp_no;
885#define VXGE_HW_MSIXGRP_NO_TABLE_SIZE(val) vxge_vBIT(val, 5, 11)
886
887} __packed;
888
889struct vxge_hw_mrpcim_reg {
890/*0x00000*/ u64 g3fbct_int_status;
891#define VXGE_HW_G3FBCT_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0)
892/*0x00008*/ u64 g3fbct_int_mask;
893/*0x00010*/ u64 g3fbct_err_reg;
894#define VXGE_HW_G3FBCT_ERR_REG_G3IF_SM_ERR vxge_mBIT(4)
895#define VXGE_HW_G3FBCT_ERR_REG_G3IF_GDDR3_DECC vxge_mBIT(5)
896#define VXGE_HW_G3FBCT_ERR_REG_G3IF_GDDR3_U_DECC vxge_mBIT(6)
897#define VXGE_HW_G3FBCT_ERR_REG_G3IF_CTRL_FIFO_DECC vxge_mBIT(7)
898#define VXGE_HW_G3FBCT_ERR_REG_G3IF_GDDR3_SECC vxge_mBIT(29)
899#define VXGE_HW_G3FBCT_ERR_REG_G3IF_GDDR3_U_SECC vxge_mBIT(30)
900#define VXGE_HW_G3FBCT_ERR_REG_G3IF_CTRL_FIFO_SECC vxge_mBIT(31)
901/*0x00018*/ u64 g3fbct_err_mask;
902/*0x00020*/ u64 g3fbct_err_alarm;
903
904 u8 unused00a00[0x00a00-0x00028];
905
906/*0x00a00*/ u64 wrdma_int_status;
907#define VXGE_HW_WRDMA_INT_STATUS_RC_ALARM_RC_INT vxge_mBIT(0)
908#define VXGE_HW_WRDMA_INT_STATUS_RXDRM_SM_ERR_RXDRM_INT vxge_mBIT(1)
909#define VXGE_HW_WRDMA_INT_STATUS_RXDCM_SM_ERR_RXDCM_SM_INT vxge_mBIT(2)
910#define VXGE_HW_WRDMA_INT_STATUS_RXDWM_SM_ERR_RXDWM_INT vxge_mBIT(3)
911#define VXGE_HW_WRDMA_INT_STATUS_RDA_ERR_RDA_INT vxge_mBIT(6)
912#define VXGE_HW_WRDMA_INT_STATUS_RDA_ECC_DB_RDA_ECC_DB_INT vxge_mBIT(8)
913#define VXGE_HW_WRDMA_INT_STATUS_RDA_ECC_SG_RDA_ECC_SG_INT vxge_mBIT(9)
914#define VXGE_HW_WRDMA_INT_STATUS_FRF_ALARM_FRF_INT vxge_mBIT(12)
915#define VXGE_HW_WRDMA_INT_STATUS_ROCRC_ALARM_ROCRC_INT vxge_mBIT(13)
916#define VXGE_HW_WRDMA_INT_STATUS_WDE0_ALARM_WDE0_INT vxge_mBIT(14)
917#define VXGE_HW_WRDMA_INT_STATUS_WDE1_ALARM_WDE1_INT vxge_mBIT(15)
918#define VXGE_HW_WRDMA_INT_STATUS_WDE2_ALARM_WDE2_INT vxge_mBIT(16)
919#define VXGE_HW_WRDMA_INT_STATUS_WDE3_ALARM_WDE3_INT vxge_mBIT(17)
920/*0x00a08*/ u64 wrdma_int_mask;
921/*0x00a10*/ u64 rc_alarm_reg;
922#define VXGE_HW_RC_ALARM_REG_FTC_SM_ERR vxge_mBIT(0)
923#define VXGE_HW_RC_ALARM_REG_FTC_SM_PHASE_ERR vxge_mBIT(1)
924#define VXGE_HW_RC_ALARM_REG_BTDWM_SM_ERR vxge_mBIT(2)
925#define VXGE_HW_RC_ALARM_REG_BTC_SM_ERR vxge_mBIT(3)
926#define VXGE_HW_RC_ALARM_REG_BTDCM_SM_ERR vxge_mBIT(4)
927#define VXGE_HW_RC_ALARM_REG_BTDRM_SM_ERR vxge_mBIT(5)
928#define VXGE_HW_RC_ALARM_REG_RMM_RXD_RC_ECC_DB_ERR vxge_mBIT(6)
929#define VXGE_HW_RC_ALARM_REG_RMM_RXD_RC_ECC_SG_ERR vxge_mBIT(7)
930#define VXGE_HW_RC_ALARM_REG_RHS_RXD_RHS_ECC_DB_ERR vxge_mBIT(8)
931#define VXGE_HW_RC_ALARM_REG_RHS_RXD_RHS_ECC_SG_ERR vxge_mBIT(9)
932#define VXGE_HW_RC_ALARM_REG_RMM_SM_ERR vxge_mBIT(10)
933#define VXGE_HW_RC_ALARM_REG_BTC_VPATH_MISMATCH_ERR vxge_mBIT(12)
934/*0x00a18*/ u64 rc_alarm_mask;
935/*0x00a20*/ u64 rc_alarm_alarm;
936/*0x00a28*/ u64 rxdrm_sm_err_reg;
937#define VXGE_HW_RXDRM_SM_ERR_REG_PRC_VP(n) vxge_mBIT(n)
938/*0x00a30*/ u64 rxdrm_sm_err_mask;
939/*0x00a38*/ u64 rxdrm_sm_err_alarm;
940/*0x00a40*/ u64 rxdcm_sm_err_reg;
941#define VXGE_HW_RXDCM_SM_ERR_REG_PRC_VP(n) vxge_mBIT(n)
942/*0x00a48*/ u64 rxdcm_sm_err_mask;
943/*0x00a50*/ u64 rxdcm_sm_err_alarm;
944/*0x00a58*/ u64 rxdwm_sm_err_reg;
945#define VXGE_HW_RXDWM_SM_ERR_REG_PRC_VP(n) vxge_mBIT(n)
946/*0x00a60*/ u64 rxdwm_sm_err_mask;
947/*0x00a68*/ u64 rxdwm_sm_err_alarm;
948/*0x00a70*/ u64 rda_err_reg;
949#define VXGE_HW_RDA_ERR_REG_RDA_SM0_ERR_ALARM vxge_mBIT(0)
950#define VXGE_HW_RDA_ERR_REG_RDA_MISC_ERR vxge_mBIT(1)
951#define VXGE_HW_RDA_ERR_REG_RDA_PCIX_ERR vxge_mBIT(2)
952#define VXGE_HW_RDA_ERR_REG_RDA_RXD_ECC_DB_ERR vxge_mBIT(3)
953#define VXGE_HW_RDA_ERR_REG_RDA_FRM_ECC_DB_ERR vxge_mBIT(4)
954#define VXGE_HW_RDA_ERR_REG_RDA_UQM_ECC_DB_ERR vxge_mBIT(5)
955#define VXGE_HW_RDA_ERR_REG_RDA_IMM_ECC_DB_ERR vxge_mBIT(6)
956#define VXGE_HW_RDA_ERR_REG_RDA_TIM_ECC_DB_ERR vxge_mBIT(7)
957/*0x00a78*/ u64 rda_err_mask;
958/*0x00a80*/ u64 rda_err_alarm;
959/*0x00a88*/ u64 rda_ecc_db_reg;
960#define VXGE_HW_RDA_ECC_DB_REG_RDA_RXD_ERR(n) vxge_mBIT(n)
961/*0x00a90*/ u64 rda_ecc_db_mask;
962/*0x00a98*/ u64 rda_ecc_db_alarm;
963/*0x00aa0*/ u64 rda_ecc_sg_reg;
964#define VXGE_HW_RDA_ECC_SG_REG_RDA_RXD_ERR(n) vxge_mBIT(n)
965/*0x00aa8*/ u64 rda_ecc_sg_mask;
966/*0x00ab0*/ u64 rda_ecc_sg_alarm;
967/*0x00ab8*/ u64 rqa_err_reg;
968#define VXGE_HW_RQA_ERR_REG_RQA_SM_ERR_ALARM vxge_mBIT(0)
969/*0x00ac0*/ u64 rqa_err_mask;
970/*0x00ac8*/ u64 rqa_err_alarm;
971/*0x00ad0*/ u64 frf_alarm_reg;
972#define VXGE_HW_FRF_ALARM_REG_PRC_VP_FRF_SM_ERR(n) vxge_mBIT(n)
973/*0x00ad8*/ u64 frf_alarm_mask;
974/*0x00ae0*/ u64 frf_alarm_alarm;
975/*0x00ae8*/ u64 rocrc_alarm_reg;
976#define VXGE_HW_ROCRC_ALARM_REG_QCQ_QCC_BYP_ECC_DB vxge_mBIT(0)
977#define VXGE_HW_ROCRC_ALARM_REG_QCQ_QCC_BYP_ECC_SG vxge_mBIT(1)
978#define VXGE_HW_ROCRC_ALARM_REG_NOA_NMA_SM_ERR vxge_mBIT(2)
979#define VXGE_HW_ROCRC_ALARM_REG_NOA_IMMM_ECC_DB vxge_mBIT(3)
980#define VXGE_HW_ROCRC_ALARM_REG_NOA_IMMM_ECC_SG vxge_mBIT(4)
981#define VXGE_HW_ROCRC_ALARM_REG_UDQ_UMQM_ECC_DB vxge_mBIT(5)
982#define VXGE_HW_ROCRC_ALARM_REG_UDQ_UMQM_ECC_SG vxge_mBIT(6)
983#define VXGE_HW_ROCRC_ALARM_REG_NOA_RCBM_ECC_DB vxge_mBIT(11)
984#define VXGE_HW_ROCRC_ALARM_REG_NOA_RCBM_ECC_SG vxge_mBIT(12)
985#define VXGE_HW_ROCRC_ALARM_REG_QCQ_MULTI_EGB_RSVD_ERR vxge_mBIT(13)
986#define VXGE_HW_ROCRC_ALARM_REG_QCQ_MULTI_EGB_OWN_ERR vxge_mBIT(14)
987#define VXGE_HW_ROCRC_ALARM_REG_QCQ_MULTI_BYP_OWN_ERR vxge_mBIT(15)
988#define VXGE_HW_ROCRC_ALARM_REG_QCQ_OWN_NOT_ASSIGNED_ERR vxge_mBIT(16)
989#define VXGE_HW_ROCRC_ALARM_REG_QCQ_OWN_RSVD_SYNC_ERR vxge_mBIT(17)
990#define VXGE_HW_ROCRC_ALARM_REG_QCQ_LOST_EGB_ERR vxge_mBIT(18)
991#define VXGE_HW_ROCRC_ALARM_REG_RCQ_BYPQ0_OVERFLOW vxge_mBIT(19)
992#define VXGE_HW_ROCRC_ALARM_REG_RCQ_BYPQ1_OVERFLOW vxge_mBIT(20)
993#define VXGE_HW_ROCRC_ALARM_REG_RCQ_BYPQ2_OVERFLOW vxge_mBIT(21)
994#define VXGE_HW_ROCRC_ALARM_REG_NOA_WCT_CMD_FIFO_ERR vxge_mBIT(22)
995/*0x00af0*/ u64 rocrc_alarm_mask;
996/*0x00af8*/ u64 rocrc_alarm_alarm;
997/*0x00b00*/ u64 wde0_alarm_reg;
998#define VXGE_HW_WDE0_ALARM_REG_WDE0_DCC_SM_ERR vxge_mBIT(0)
999#define VXGE_HW_WDE0_ALARM_REG_WDE0_PRM_SM_ERR vxge_mBIT(1)
1000#define VXGE_HW_WDE0_ALARM_REG_WDE0_CP_SM_ERR vxge_mBIT(2)
1001#define VXGE_HW_WDE0_ALARM_REG_WDE0_CP_CMD_ERR vxge_mBIT(3)
1002#define VXGE_HW_WDE0_ALARM_REG_WDE0_PCR_SM_ERR vxge_mBIT(4)
1003/*0x00b08*/ u64 wde0_alarm_mask;
1004/*0x00b10*/ u64 wde0_alarm_alarm;
1005/*0x00b18*/ u64 wde1_alarm_reg;
1006#define VXGE_HW_WDE1_ALARM_REG_WDE1_DCC_SM_ERR vxge_mBIT(0)
1007#define VXGE_HW_WDE1_ALARM_REG_WDE1_PRM_SM_ERR vxge_mBIT(1)
1008#define VXGE_HW_WDE1_ALARM_REG_WDE1_CP_SM_ERR vxge_mBIT(2)
1009#define VXGE_HW_WDE1_ALARM_REG_WDE1_CP_CMD_ERR vxge_mBIT(3)
1010#define VXGE_HW_WDE1_ALARM_REG_WDE1_PCR_SM_ERR vxge_mBIT(4)
1011/*0x00b20*/ u64 wde1_alarm_mask;
1012/*0x00b28*/ u64 wde1_alarm_alarm;
1013/*0x00b30*/ u64 wde2_alarm_reg;
1014#define VXGE_HW_WDE2_ALARM_REG_WDE2_DCC_SM_ERR vxge_mBIT(0)
1015#define VXGE_HW_WDE2_ALARM_REG_WDE2_PRM_SM_ERR vxge_mBIT(1)
1016#define VXGE_HW_WDE2_ALARM_REG_WDE2_CP_SM_ERR vxge_mBIT(2)
1017#define VXGE_HW_WDE2_ALARM_REG_WDE2_CP_CMD_ERR vxge_mBIT(3)
1018#define VXGE_HW_WDE2_ALARM_REG_WDE2_PCR_SM_ERR vxge_mBIT(4)
1019/*0x00b38*/ u64 wde2_alarm_mask;
1020/*0x00b40*/ u64 wde2_alarm_alarm;
1021/*0x00b48*/ u64 wde3_alarm_reg;
1022#define VXGE_HW_WDE3_ALARM_REG_WDE3_DCC_SM_ERR vxge_mBIT(0)
1023#define VXGE_HW_WDE3_ALARM_REG_WDE3_PRM_SM_ERR vxge_mBIT(1)
1024#define VXGE_HW_WDE3_ALARM_REG_WDE3_CP_SM_ERR vxge_mBIT(2)
1025#define VXGE_HW_WDE3_ALARM_REG_WDE3_CP_CMD_ERR vxge_mBIT(3)
1026#define VXGE_HW_WDE3_ALARM_REG_WDE3_PCR_SM_ERR vxge_mBIT(4)
1027/*0x00b50*/ u64 wde3_alarm_mask;
1028/*0x00b58*/ u64 wde3_alarm_alarm;
1029
1030 u8 unused00be8[0x00be8-0x00b60];
1031
1032/*0x00be8*/ u64 rx_w_round_robin_0;
1033#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_0(val) vxge_vBIT(val, 3, 5)
1034#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_1(val) vxge_vBIT(val, 11, 5)
1035#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_2(val) vxge_vBIT(val, 19, 5)
1036#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_3(val) vxge_vBIT(val, 27, 5)
1037#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_4(val) vxge_vBIT(val, 35, 5)
1038#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_5(val) vxge_vBIT(val, 43, 5)
1039#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_6(val) vxge_vBIT(val, 51, 5)
1040#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_7(val) vxge_vBIT(val, 59, 5)
1041/*0x00bf0*/ u64 rx_w_round_robin_1;
1042#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_8(val) vxge_vBIT(val, 3, 5)
1043#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_9(val) vxge_vBIT(val, 11, 5)
1044#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_10(val) \
1045 vxge_vBIT(val, 19, 5)
1046#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_11(val) \
1047 vxge_vBIT(val, 27, 5)
1048#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_12(val) \
1049 vxge_vBIT(val, 35, 5)
1050#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_13(val) \
1051 vxge_vBIT(val, 43, 5)
1052#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_14(val) \
1053 vxge_vBIT(val, 51, 5)
1054#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_15(val) \
1055 vxge_vBIT(val, 59, 5)
1056/*0x00bf8*/ u64 rx_w_round_robin_2;
1057#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_16(val) vxge_vBIT(val, 3, 5)
1058#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_17(val) \
1059 vxge_vBIT(val, 11, 5)
1060#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_18(val) \
1061 vxge_vBIT(val, 19, 5)
1062#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_19(val) \
1063 vxge_vBIT(val, 27, 5)
1064#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_20(val) \
1065 vxge_vBIT(val, 35, 5)
1066#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_21(val) \
1067 vxge_vBIT(val, 43, 5)
1068#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_22(val) \
1069 vxge_vBIT(val, 51, 5)
1070#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_23(val) \
1071 vxge_vBIT(val, 59, 5)
1072/*0x00c00*/ u64 rx_w_round_robin_3;
1073#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_24(val) vxge_vBIT(val, 3, 5)
1074#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_25(val) \
1075 vxge_vBIT(val, 11, 5)
1076#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_26(val) \
1077 vxge_vBIT(val, 19, 5)
1078#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_27(val) \
1079 vxge_vBIT(val, 27, 5)
1080#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_28(val) \
1081 vxge_vBIT(val, 35, 5)
1082#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_29(val) \
1083 vxge_vBIT(val, 43, 5)
1084#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_30(val) \
1085 vxge_vBIT(val, 51, 5)
1086#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_31(val) \
1087 vxge_vBIT(val, 59, 5)
1088/*0x00c08*/ u64 rx_w_round_robin_4;
1089#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_32(val) vxge_vBIT(val, 3, 5)
1090#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_33(val) \
1091 vxge_vBIT(val, 11, 5)
1092#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_34(val) \
1093 vxge_vBIT(val, 19, 5)
1094#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_35(val) \
1095 vxge_vBIT(val, 27, 5)
1096#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_36(val) \
1097 vxge_vBIT(val, 35, 5)
1098#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_37(val) \
1099 vxge_vBIT(val, 43, 5)
1100#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_38(val) \
1101 vxge_vBIT(val, 51, 5)
1102#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_39(val) \
1103 vxge_vBIT(val, 59, 5)
1104/*0x00c10*/ u64 rx_w_round_robin_5;
1105#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_40(val) vxge_vBIT(val, 3, 5)
1106#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_41(val) \
1107 vxge_vBIT(val, 11, 5)
1108#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_42(val) \
1109 vxge_vBIT(val, 19, 5)
1110#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_43(val) \
1111 vxge_vBIT(val, 27, 5)
1112#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_44(val) \
1113 vxge_vBIT(val, 35, 5)
1114#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_45(val) \
1115 vxge_vBIT(val, 43, 5)
1116#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_46(val) \
1117 vxge_vBIT(val, 51, 5)
1118#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_47(val) \
1119 vxge_vBIT(val, 59, 5)
1120/*0x00c18*/ u64 rx_w_round_robin_6;
1121#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_48(val) vxge_vBIT(val, 3, 5)
1122#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_49(val) \
1123 vxge_vBIT(val, 11, 5)
1124#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_50(val) \
1125 vxge_vBIT(val, 19, 5)
1126#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_51(val) \
1127 vxge_vBIT(val, 27, 5)
1128#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_52(val) \
1129 vxge_vBIT(val, 35, 5)
1130#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_53(val) \
1131 vxge_vBIT(val, 43, 5)
1132#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_54(val) \
1133 vxge_vBIT(val, 51, 5)
1134#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_55(val) \
1135 vxge_vBIT(val, 59, 5)
1136/*0x00c20*/ u64 rx_w_round_robin_7;
1137#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_56(val) vxge_vBIT(val, 3, 5)
1138#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_57(val) \
1139 vxge_vBIT(val, 11, 5)
1140#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_58(val) \
1141 vxge_vBIT(val, 19, 5)
1142#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_59(val) \
1143 vxge_vBIT(val, 27, 5)
1144#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_60(val) \
1145 vxge_vBIT(val, 35, 5)
1146#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_61(val) \
1147 vxge_vBIT(val, 43, 5)
1148#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_62(val) \
1149 vxge_vBIT(val, 51, 5)
1150#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_63(val) \
1151 vxge_vBIT(val, 59, 5)
1152/*0x00c28*/ u64 rx_w_round_robin_8;
1153#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_64(val) vxge_vBIT(val, 3, 5)
1154#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_65(val) \
1155 vxge_vBIT(val, 11, 5)
1156#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_66(val) \
1157 vxge_vBIT(val, 19, 5)
1158#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_67(val) \
1159 vxge_vBIT(val, 27, 5)
1160#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_68(val) \
1161 vxge_vBIT(val, 35, 5)
1162#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_69(val) \
1163 vxge_vBIT(val, 43, 5)
1164#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_70(val) \
1165 vxge_vBIT(val, 51, 5)
1166#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_71(val) \
1167 vxge_vBIT(val, 59, 5)
1168/*0x00c30*/ u64 rx_w_round_robin_9;
1169#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_72(val) vxge_vBIT(val, 3, 5)
1170#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_73(val) \
1171 vxge_vBIT(val, 11, 5)
1172#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_74(val) \
1173 vxge_vBIT(val, 19, 5)
1174#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_75(val) \
1175 vxge_vBIT(val, 27, 5)
1176#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_76(val) \
1177 vxge_vBIT(val, 35, 5)
1178#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_77(val) \
1179 vxge_vBIT(val, 43, 5)
1180#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_78(val) \
1181 vxge_vBIT(val, 51, 5)
1182#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_79(val) \
1183 vxge_vBIT(val, 59, 5)
1184/*0x00c38*/ u64 rx_w_round_robin_10;
1185#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_80(val) \
1186 vxge_vBIT(val, 3, 5)
1187#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_81(val) \
1188 vxge_vBIT(val, 11, 5)
1189#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_82(val) \
1190 vxge_vBIT(val, 19, 5)
1191#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_83(val) \
1192 vxge_vBIT(val, 27, 5)
1193#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_84(val) \
1194 vxge_vBIT(val, 35, 5)
1195#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_85(val) \
1196 vxge_vBIT(val, 43, 5)
1197#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_86(val) \
1198 vxge_vBIT(val, 51, 5)
1199#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_87(val) \
1200 vxge_vBIT(val, 59, 5)
1201/*0x00c40*/ u64 rx_w_round_robin_11;
1202#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_88(val) \
1203 vxge_vBIT(val, 3, 5)
1204#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_89(val) \
1205 vxge_vBIT(val, 11, 5)
1206#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_90(val) \
1207 vxge_vBIT(val, 19, 5)
1208#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_91(val) \
1209 vxge_vBIT(val, 27, 5)
1210#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_92(val) \
1211 vxge_vBIT(val, 35, 5)
1212#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_93(val) \
1213 vxge_vBIT(val, 43, 5)
1214#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_94(val) \
1215 vxge_vBIT(val, 51, 5)
1216#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_95(val) \
1217 vxge_vBIT(val, 59, 5)
1218/*0x00c48*/ u64 rx_w_round_robin_12;
1219#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_96(val) \
1220 vxge_vBIT(val, 3, 5)
1221#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_97(val) \
1222 vxge_vBIT(val, 11, 5)
1223#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_98(val) \
1224 vxge_vBIT(val, 19, 5)
1225#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_99(val) \
1226 vxge_vBIT(val, 27, 5)
1227#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_100(val) \
1228 vxge_vBIT(val, 35, 5)
1229#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_101(val) \
1230 vxge_vBIT(val, 43, 5)
1231#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_102(val) \
1232 vxge_vBIT(val, 51, 5)
1233#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_103(val) \
1234 vxge_vBIT(val, 59, 5)
1235/*0x00c50*/ u64 rx_w_round_robin_13;
1236#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_104(val) \
1237 vxge_vBIT(val, 3, 5)
1238#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_105(val) \
1239 vxge_vBIT(val, 11, 5)
1240#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_106(val) \
1241 vxge_vBIT(val, 19, 5)
1242#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_107(val) \
1243 vxge_vBIT(val, 27, 5)
1244#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_108(val) \
1245 vxge_vBIT(val, 35, 5)
1246#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_109(val) \
1247 vxge_vBIT(val, 43, 5)
1248#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_110(val) \
1249 vxge_vBIT(val, 51, 5)
1250#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_111(val) \
1251 vxge_vBIT(val, 59, 5)
1252/*0x00c58*/ u64 rx_w_round_robin_14;
1253#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_112(val) \
1254 vxge_vBIT(val, 3, 5)
1255#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_113(val) \
1256 vxge_vBIT(val, 11, 5)
1257#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_114(val) \
1258 vxge_vBIT(val, 19, 5)
1259#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_115(val) \
1260 vxge_vBIT(val, 27, 5)
1261#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_116(val) \
1262 vxge_vBIT(val, 35, 5)
1263#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_117(val) \
1264 vxge_vBIT(val, 43, 5)
1265#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_118(val) \
1266 vxge_vBIT(val, 51, 5)
1267#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_119(val) \
1268 vxge_vBIT(val, 59, 5)
1269/*0x00c60*/ u64 rx_w_round_robin_15;
1270#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_120(val) \
1271 vxge_vBIT(val, 3, 5)
1272#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_121(val) \
1273 vxge_vBIT(val, 11, 5)
1274#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_122(val) \
1275 vxge_vBIT(val, 19, 5)
1276#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_123(val) \
1277 vxge_vBIT(val, 27, 5)
1278#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_124(val) \
1279 vxge_vBIT(val, 35, 5)
1280#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_125(val) \
1281 vxge_vBIT(val, 43, 5)
1282#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_126(val) \
1283 vxge_vBIT(val, 51, 5)
1284#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_127(val) \
1285 vxge_vBIT(val, 59, 5)
1286/*0x00c68*/ u64 rx_w_round_robin_16;
1287#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_128(val) \
1288 vxge_vBIT(val, 3, 5)
1289#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_129(val) \
1290 vxge_vBIT(val, 11, 5)
1291#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_130(val) \
1292 vxge_vBIT(val, 19, 5)
1293#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_131(val) \
1294 vxge_vBIT(val, 27, 5)
1295#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_132(val) \
1296 vxge_vBIT(val, 35, 5)
1297#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_133(val) \
1298 vxge_vBIT(val, 43, 5)
1299#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_134(val) \
1300 vxge_vBIT(val, 51, 5)
1301#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_135(val) \
1302 vxge_vBIT(val, 59, 5)
1303/*0x00c70*/ u64 rx_w_round_robin_17;
1304#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_136(val) \
1305 vxge_vBIT(val, 3, 5)
1306#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_137(val) \
1307 vxge_vBIT(val, 11, 5)
1308#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_138(val) \
1309 vxge_vBIT(val, 19, 5)
1310#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_139(val) \
1311 vxge_vBIT(val, 27, 5)
1312#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_140(val) \
1313 vxge_vBIT(val, 35, 5)
1314#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_141(val) \
1315 vxge_vBIT(val, 43, 5)
1316#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_142(val) \
1317 vxge_vBIT(val, 51, 5)
1318#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_143(val) \
1319 vxge_vBIT(val, 59, 5)
1320/*0x00c78*/ u64 rx_w_round_robin_18;
1321#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_144(val) \
1322 vxge_vBIT(val, 3, 5)
1323#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_145(val) \
1324 vxge_vBIT(val, 11, 5)
1325#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_146(val) \
1326 vxge_vBIT(val, 19, 5)
1327#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_147(val) \
1328 vxge_vBIT(val, 27, 5)
1329#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_148(val) \
1330 vxge_vBIT(val, 35, 5)
1331#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_149(val) \
1332 vxge_vBIT(val, 43, 5)
1333#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_150(val) \
1334 vxge_vBIT(val, 51, 5)
1335#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_151(val) \
1336 vxge_vBIT(val, 59, 5)
1337/*0x00c80*/ u64 rx_w_round_robin_19;
1338#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_152(val) \
1339 vxge_vBIT(val, 3, 5)
1340#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_153(val) \
1341 vxge_vBIT(val, 11, 5)
1342#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_154(val) \
1343 vxge_vBIT(val, 19, 5)
1344#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_155(val) \
1345 vxge_vBIT(val, 27, 5)
1346#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_156(val) \
1347 vxge_vBIT(val, 35, 5)
1348#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_157(val) \
1349 vxge_vBIT(val, 43, 5)
1350#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_158(val) \
1351 vxge_vBIT(val, 51, 5)
1352#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_159(val) \
1353 vxge_vBIT(val, 59, 5)
1354/*0x00c88*/ u64 rx_w_round_robin_20;
1355#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_160(val) \
1356 vxge_vBIT(val, 3, 5)
1357#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_161(val) \
1358 vxge_vBIT(val, 11, 5)
1359#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_162(val) \
1360 vxge_vBIT(val, 19, 5)
1361#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_163(val) \
1362 vxge_vBIT(val, 27, 5)
1363#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_164(val) \
1364 vxge_vBIT(val, 35, 5)
1365#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_165(val) \
1366 vxge_vBIT(val, 43, 5)
1367#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_166(val) \
1368 vxge_vBIT(val, 51, 5)
1369#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_167(val) \
1370 vxge_vBIT(val, 59, 5)
1371/*0x00c90*/ u64 rx_w_round_robin_21;
1372#define VXGE_HW_RX_W_ROUND_ROBIN_21_RX_W_PRIORITY_SS_168(val) \
1373 vxge_vBIT(val, 3, 5)
1374#define VXGE_HW_RX_W_ROUND_ROBIN_21_RX_W_PRIORITY_SS_169(val) \
1375 vxge_vBIT(val, 11, 5)
1376#define VXGE_HW_RX_W_ROUND_ROBIN_21_RX_W_PRIORITY_SS_170(val) \
1377 vxge_vBIT(val, 19, 5)
1378
1379#define VXGE_HW_WRR_RING_SERVICE_STATES 171
1380#define VXGE_HW_WRR_RING_COUNT 22
1381
1382/*0x00c98*/ u64 rx_queue_priority_0;
1383#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_0(val) vxge_vBIT(val, 3, 5)
1384#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_1(val) vxge_vBIT(val, 11, 5)
1385#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_2(val) vxge_vBIT(val, 19, 5)
1386#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_3(val) vxge_vBIT(val, 27, 5)
1387#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_4(val) vxge_vBIT(val, 35, 5)
1388#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_5(val) vxge_vBIT(val, 43, 5)
1389#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_6(val) vxge_vBIT(val, 51, 5)
1390#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_7(val) vxge_vBIT(val, 59, 5)
1391/*0x00ca0*/ u64 rx_queue_priority_1;
1392#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_8(val) vxge_vBIT(val, 3, 5)
1393#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_9(val) vxge_vBIT(val, 11, 5)
1394#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_10(val) vxge_vBIT(val, 19, 5)
1395#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_11(val) vxge_vBIT(val, 27, 5)
1396#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_12(val) vxge_vBIT(val, 35, 5)
1397#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_13(val) vxge_vBIT(val, 43, 5)
1398#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_14(val) vxge_vBIT(val, 51, 5)
1399#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_15(val) vxge_vBIT(val, 59, 5)
1400/*0x00ca8*/ u64 rx_queue_priority_2;
1401#define VXGE_HW_RX_QUEUE_PRIORITY_2_RX_Q_NUMBER_16(val) vxge_vBIT(val, 3, 5)
1402 u8 unused00cc8[0x00cc8-0x00cb0];
1403
1404/*0x00cc8*/ u64 replication_queue_priority;
1405#define VXGE_HW_REPLICATION_QUEUE_PRIORITY_REPLICATION_QUEUE_PRIORITY(val) \
1406 vxge_vBIT(val, 59, 5)
1407/*0x00cd0*/ u64 rx_queue_select;
1408#define VXGE_HW_RX_QUEUE_SELECT_NUMBER(n) vxge_mBIT(n)
1409#define VXGE_HW_RX_QUEUE_SELECT_ENABLE_CODE vxge_mBIT(15)
1410#define VXGE_HW_RX_QUEUE_SELECT_ENABLE_HIERARCHICAL_PRTY vxge_mBIT(23)
1411/*0x00cd8*/ u64 rqa_vpbp_ctrl;
1412#define VXGE_HW_RQA_VPBP_CTRL_WR_XON_DIS vxge_mBIT(15)
1413#define VXGE_HW_RQA_VPBP_CTRL_ROCRC_DIS vxge_mBIT(23)
1414#define VXGE_HW_RQA_VPBP_CTRL_TXPE_DIS vxge_mBIT(31)
1415/*0x00ce0*/ u64 rx_multi_cast_ctrl;
1416#define VXGE_HW_RX_MULTI_CAST_CTRL_TIME_OUT_DIS vxge_mBIT(0)
1417#define VXGE_HW_RX_MULTI_CAST_CTRL_FRM_DROP_DIS vxge_mBIT(1)
1418#define VXGE_HW_RX_MULTI_CAST_CTRL_NO_RXD_TIME_OUT_CNT(val) \
1419 vxge_vBIT(val, 2, 30)
1420#define VXGE_HW_RX_MULTI_CAST_CTRL_TIME_OUT_CNT(val) vxge_vBIT(val, 32, 32)
1421/*0x00ce8*/ u64 wde_prm_ctrl;
1422#define VXGE_HW_WDE_PRM_CTRL_SPAV_THRESHOLD(val) vxge_vBIT(val, 2, 10)
1423#define VXGE_HW_WDE_PRM_CTRL_SPLIT_THRESHOLD(val) vxge_vBIT(val, 18, 14)
1424#define VXGE_HW_WDE_PRM_CTRL_SPLIT_ON_1ST_ROW vxge_mBIT(32)
1425#define VXGE_HW_WDE_PRM_CTRL_SPLIT_ON_ROW_BNDRY vxge_mBIT(33)
1426#define VXGE_HW_WDE_PRM_CTRL_FB_ROW_SIZE(val) vxge_vBIT(val, 46, 2)
1427/*0x00cf0*/ u64 noa_ctrl;
1428#define VXGE_HW_NOA_CTRL_FRM_PRTY_QUOTA(val) vxge_vBIT(val, 3, 5)
1429#define VXGE_HW_NOA_CTRL_NON_FRM_PRTY_QUOTA(val) vxge_vBIT(val, 11, 5)
1430#define VXGE_HW_NOA_CTRL_IGNORE_KDFC_IF_STATUS vxge_mBIT(16)
1431#define VXGE_HW_NOA_CTRL_MAX_JOB_CNT_FOR_WDE0(val) vxge_vBIT(val, 37, 4)
1432#define VXGE_HW_NOA_CTRL_MAX_JOB_CNT_FOR_WDE1(val) vxge_vBIT(val, 45, 4)
1433#define VXGE_HW_NOA_CTRL_MAX_JOB_CNT_FOR_WDE2(val) vxge_vBIT(val, 53, 4)
1434#define VXGE_HW_NOA_CTRL_MAX_JOB_CNT_FOR_WDE3(val) vxge_vBIT(val, 60, 4)
1435/*0x00cf8*/ u64 phase_cfg;
1436#define VXGE_HW_PHASE_CFG_QCC_WR_PHASE_EN vxge_mBIT(0)
1437#define VXGE_HW_PHASE_CFG_QCC_RD_PHASE_EN vxge_mBIT(3)
1438#define VXGE_HW_PHASE_CFG_IMMM_WR_PHASE_EN vxge_mBIT(7)
1439#define VXGE_HW_PHASE_CFG_IMMM_RD_PHASE_EN vxge_mBIT(11)
1440#define VXGE_HW_PHASE_CFG_UMQM_WR_PHASE_EN vxge_mBIT(15)
1441#define VXGE_HW_PHASE_CFG_UMQM_RD_PHASE_EN vxge_mBIT(19)
1442#define VXGE_HW_PHASE_CFG_RCBM_WR_PHASE_EN vxge_mBIT(23)
1443#define VXGE_HW_PHASE_CFG_RCBM_RD_PHASE_EN vxge_mBIT(27)
1444#define VXGE_HW_PHASE_CFG_RXD_RC_WR_PHASE_EN vxge_mBIT(31)
1445#define VXGE_HW_PHASE_CFG_RXD_RC_RD_PHASE_EN vxge_mBIT(35)
1446#define VXGE_HW_PHASE_CFG_RXD_RHS_WR_PHASE_EN vxge_mBIT(39)
1447#define VXGE_HW_PHASE_CFG_RXD_RHS_RD_PHASE_EN vxge_mBIT(43)
1448/*0x00d00*/ u64 rcq_bypq_cfg;
1449#define VXGE_HW_RCQ_BYPQ_CFG_OVERFLOW_THRESHOLD(val) vxge_vBIT(val, 10, 22)
1450#define VXGE_HW_RCQ_BYPQ_CFG_BYP_ON_THRESHOLD(val) vxge_vBIT(val, 39, 9)
1451#define VXGE_HW_RCQ_BYPQ_CFG_BYP_OFF_THRESHOLD(val) vxge_vBIT(val, 55, 9)
1452 u8 unused00e00[0x00e00-0x00d08];
1453
1454/*0x00e00*/ u64 doorbell_int_status;
1455#define VXGE_HW_DOORBELL_INT_STATUS_KDFC_ERR_REG_TXDMA_KDFC_INT vxge_mBIT(7)
1456#define VXGE_HW_DOORBELL_INT_STATUS_USDC_ERR_REG_TXDMA_USDC_INT vxge_mBIT(15)
1457/*0x00e08*/ u64 doorbell_int_mask;
1458/*0x00e10*/ u64 kdfc_err_reg;
1459#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_ECC_SG_ERR vxge_mBIT(7)
1460#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_ECC_DB_ERR vxge_mBIT(15)
1461#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_SM_ERR_ALARM vxge_mBIT(23)
1462#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_MISC_ERR_1 vxge_mBIT(32)
1463#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_PCIX_ERR vxge_mBIT(39)
1464/*0x00e18*/ u64 kdfc_err_mask;
1465/*0x00e20*/ u64 kdfc_err_reg_alarm;
1466#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_ECC_SG_ERR vxge_mBIT(7)
1467#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_ECC_DB_ERR vxge_mBIT(15)
1468#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_SM_ERR_ALARM vxge_mBIT(23)
1469#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_MISC_ERR_1 vxge_mBIT(32)
1470#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_PCIX_ERR vxge_mBIT(39)
1471 u8 unused00e40[0x00e40-0x00e28];
1472/*0x00e40*/ u64 kdfc_vp_partition_0;
1473#define VXGE_HW_KDFC_VP_PARTITION_0_ENABLE vxge_mBIT(0)
1474#define VXGE_HW_KDFC_VP_PARTITION_0_NUMBER_0(val) vxge_vBIT(val, 5, 3)
1475#define VXGE_HW_KDFC_VP_PARTITION_0_LENGTH_0(val) vxge_vBIT(val, 17, 15)
1476#define VXGE_HW_KDFC_VP_PARTITION_0_NUMBER_1(val) vxge_vBIT(val, 37, 3)
1477#define VXGE_HW_KDFC_VP_PARTITION_0_LENGTH_1(val) vxge_vBIT(val, 49, 15)
1478/*0x00e48*/ u64 kdfc_vp_partition_1;
1479#define VXGE_HW_KDFC_VP_PARTITION_1_NUMBER_2(val) vxge_vBIT(val, 5, 3)
1480#define VXGE_HW_KDFC_VP_PARTITION_1_LENGTH_2(val) vxge_vBIT(val, 17, 15)
1481#define VXGE_HW_KDFC_VP_PARTITION_1_NUMBER_3(val) vxge_vBIT(val, 37, 3)
1482#define VXGE_HW_KDFC_VP_PARTITION_1_LENGTH_3(val) vxge_vBIT(val, 49, 15)
1483/*0x00e50*/ u64 kdfc_vp_partition_2;
1484#define VXGE_HW_KDFC_VP_PARTITION_2_NUMBER_4(val) vxge_vBIT(val, 5, 3)
1485#define VXGE_HW_KDFC_VP_PARTITION_2_LENGTH_4(val) vxge_vBIT(val, 17, 15)
1486#define VXGE_HW_KDFC_VP_PARTITION_2_NUMBER_5(val) vxge_vBIT(val, 37, 3)
1487#define VXGE_HW_KDFC_VP_PARTITION_2_LENGTH_5(val) vxge_vBIT(val, 49, 15)
1488/*0x00e58*/ u64 kdfc_vp_partition_3;
1489#define VXGE_HW_KDFC_VP_PARTITION_3_NUMBER_6(val) vxge_vBIT(val, 5, 3)
1490#define VXGE_HW_KDFC_VP_PARTITION_3_LENGTH_6(val) vxge_vBIT(val, 17, 15)
1491#define VXGE_HW_KDFC_VP_PARTITION_3_NUMBER_7(val) vxge_vBIT(val, 37, 3)
1492#define VXGE_HW_KDFC_VP_PARTITION_3_LENGTH_7(val) vxge_vBIT(val, 49, 15)
1493/*0x00e60*/ u64 kdfc_vp_partition_4;
1494#define VXGE_HW_KDFC_VP_PARTITION_4_LENGTH_8(val) vxge_vBIT(val, 17, 15)
1495#define VXGE_HW_KDFC_VP_PARTITION_4_LENGTH_9(val) vxge_vBIT(val, 49, 15)
1496/*0x00e68*/ u64 kdfc_vp_partition_5;
1497#define VXGE_HW_KDFC_VP_PARTITION_5_LENGTH_10(val) vxge_vBIT(val, 17, 15)
1498#define VXGE_HW_KDFC_VP_PARTITION_5_LENGTH_11(val) vxge_vBIT(val, 49, 15)
1499/*0x00e70*/ u64 kdfc_vp_partition_6;
1500#define VXGE_HW_KDFC_VP_PARTITION_6_LENGTH_12(val) vxge_vBIT(val, 17, 15)
1501#define VXGE_HW_KDFC_VP_PARTITION_6_LENGTH_13(val) vxge_vBIT(val, 49, 15)
1502/*0x00e78*/ u64 kdfc_vp_partition_7;
1503#define VXGE_HW_KDFC_VP_PARTITION_7_LENGTH_14(val) vxge_vBIT(val, 17, 15)
1504#define VXGE_HW_KDFC_VP_PARTITION_7_LENGTH_15(val) vxge_vBIT(val, 49, 15)
1505/*0x00e80*/ u64 kdfc_vp_partition_8;
1506#define VXGE_HW_KDFC_VP_PARTITION_8_LENGTH_16(val) vxge_vBIT(val, 17, 15)
1507/*0x00e88*/ u64 kdfc_w_round_robin_0;
1508#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_0(val) vxge_vBIT(val, 3, 5)
1509#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_1(val) vxge_vBIT(val, 11, 5)
1510#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_2(val) vxge_vBIT(val, 19, 5)
1511#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_3(val) vxge_vBIT(val, 27, 5)
1512#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_4(val) vxge_vBIT(val, 35, 5)
1513#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_5(val) vxge_vBIT(val, 43, 5)
1514#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_6(val) vxge_vBIT(val, 51, 5)
1515#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_7(val) vxge_vBIT(val, 59, 5)
1516
1517 u8 unused0f28[0x0f28-0x0e90];
1518
1519/*0x00f28*/ u64 kdfc_w_round_robin_20;
1520#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_0(val) vxge_vBIT(val, 3, 5)
1521#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_1(val) vxge_vBIT(val, 11, 5)
1522#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_2(val) vxge_vBIT(val, 19, 5)
1523#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_3(val) vxge_vBIT(val, 27, 5)
1524#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_4(val) vxge_vBIT(val, 35, 5)
1525#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_5(val) vxge_vBIT(val, 43, 5)
1526#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_6(val) vxge_vBIT(val, 51, 5)
1527#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_7(val) vxge_vBIT(val, 59, 5)
1528
1529#define VXGE_HW_WRR_FIFO_COUNT 20
1530
1531 u8 unused0fc8[0x0fc8-0x0f30];
1532
1533/*0x00fc8*/ u64 kdfc_w_round_robin_40;
1534#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_0(val) vxge_vBIT(val, 3, 5)
1535#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_1(val) vxge_vBIT(val, 11, 5)
1536#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_2(val) vxge_vBIT(val, 19, 5)
1537#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_3(val) vxge_vBIT(val, 27, 5)
1538#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_4(val) vxge_vBIT(val, 35, 5)
1539#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_5(val) vxge_vBIT(val, 43, 5)
1540#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_6(val) vxge_vBIT(val, 51, 5)
1541#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_7(val) vxge_vBIT(val, 59, 5)
1542
1543 u8 unused1068[0x01068-0x0fd0];
1544
1545/*0x01068*/ u64 kdfc_entry_type_sel_0;
1546#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_0(val) vxge_vBIT(val, 6, 2)
1547#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_1(val) vxge_vBIT(val, 14, 2)
1548#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_2(val) vxge_vBIT(val, 22, 2)
1549#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_3(val) vxge_vBIT(val, 30, 2)
1550#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_4(val) vxge_vBIT(val, 38, 2)
1551#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_5(val) vxge_vBIT(val, 46, 2)
1552#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_6(val) vxge_vBIT(val, 54, 2)
1553#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_7(val) vxge_vBIT(val, 62, 2)
1554/*0x01070*/ u64 kdfc_entry_type_sel_1;
1555#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_1_NUMBER_8(val) vxge_vBIT(val, 6, 2)
1556/*0x01078*/ u64 kdfc_fifo_0_ctrl;
1557#define VXGE_HW_KDFC_FIFO_0_CTRL_WRR_NUMBER(val) vxge_vBIT(val, 3, 5)
1558#define VXGE_HW_WEIGHTED_RR_SERVICE_STATES 176
1559#define VXGE_HW_WRR_FIFO_SERVICE_STATES 153
1560
1561 u8 unused1100[0x01100-0x1080];
1562
1563/*0x01100*/ u64 kdfc_fifo_17_ctrl;
1564#define VXGE_HW_KDFC_FIFO_17_CTRL_WRR_NUMBER(val) vxge_vBIT(val, 3, 5)
1565
1566 u8 unused1600[0x01600-0x1108];
1567
1568/*0x01600*/ u64 rxmac_int_status;
1569#define VXGE_HW_RXMAC_INT_STATUS_RXMAC_GEN_ERR_RXMAC_GEN_INT vxge_mBIT(3)
1570#define VXGE_HW_RXMAC_INT_STATUS_RXMAC_ECC_ERR_RXMAC_ECC_INT vxge_mBIT(7)
1571#define VXGE_HW_RXMAC_INT_STATUS_RXMAC_VARIOUS_ERR_RXMAC_VARIOUS_INT \
1572 vxge_mBIT(11)
1573/*0x01608*/ u64 rxmac_int_mask;
1574 u8 unused01618[0x01618-0x01610];
1575
1576/*0x01618*/ u64 rxmac_gen_err_reg;
1577/*0x01620*/ u64 rxmac_gen_err_mask;
1578/*0x01628*/ u64 rxmac_gen_err_alarm;
1579/*0x01630*/ u64 rxmac_ecc_err_reg;
1580#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT0_RMAC_RTS_PART_SG_ERR(val) \
1581 vxge_vBIT(val, 0, 4)
1582#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT0_RMAC_RTS_PART_DB_ERR(val) \
1583 vxge_vBIT(val, 4, 4)
1584#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT1_RMAC_RTS_PART_SG_ERR(val) \
1585 vxge_vBIT(val, 8, 4)
1586#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT1_RMAC_RTS_PART_DB_ERR(val) \
1587 vxge_vBIT(val, 12, 4)
1588#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT2_RMAC_RTS_PART_SG_ERR(val) \
1589 vxge_vBIT(val, 16, 4)
1590#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT2_RMAC_RTS_PART_DB_ERR(val) \
1591 vxge_vBIT(val, 20, 4)
1592#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DA_LKP_PRT0_SG_ERR(val) \
1593 vxge_vBIT(val, 24, 2)
1594#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DA_LKP_PRT0_DB_ERR(val) \
1595 vxge_vBIT(val, 26, 2)
1596#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DA_LKP_PRT1_SG_ERR(val) \
1597 vxge_vBIT(val, 28, 2)
1598#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DA_LKP_PRT1_DB_ERR(val) \
1599 vxge_vBIT(val, 30, 2)
1600#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_VID_LKP_SG_ERR vxge_mBIT(32)
1601#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_VID_LKP_DB_ERR vxge_mBIT(33)
1602#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT0_SG_ERR vxge_mBIT(34)
1603#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT0_DB_ERR vxge_mBIT(35)
1604#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT1_SG_ERR vxge_mBIT(36)
1605#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT1_DB_ERR vxge_mBIT(37)
1606#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT2_SG_ERR vxge_mBIT(38)
1607#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT2_DB_ERR vxge_mBIT(39)
1608#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_RTH_MASK_SG_ERR(val) \
1609 vxge_vBIT(val, 40, 7)
1610#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_RTH_MASK_DB_ERR(val) \
1611 vxge_vBIT(val, 47, 7)
1612#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_RTH_LKP_SG_ERR(val) \
1613 vxge_vBIT(val, 54, 3)
1614#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_RTH_LKP_DB_ERR(val) \
1615 vxge_vBIT(val, 57, 3)
1616#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DS_LKP_SG_ERR \
1617 vxge_mBIT(60)
1618#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DS_LKP_DB_ERR \
1619 vxge_mBIT(61)
1620/*0x01638*/ u64 rxmac_ecc_err_mask;
1621/*0x01640*/ u64 rxmac_ecc_err_alarm;
1622/*0x01648*/ u64 rxmac_various_err_reg;
1623#define VXGE_HW_RXMAC_VARIOUS_ERR_REG_RMAC_RMAC_PORT0_FSM_ERR vxge_mBIT(0)
1624#define VXGE_HW_RXMAC_VARIOUS_ERR_REG_RMAC_RMAC_PORT1_FSM_ERR vxge_mBIT(1)
1625#define VXGE_HW_RXMAC_VARIOUS_ERR_REG_RMAC_RMAC_PORT2_FSM_ERR vxge_mBIT(2)
1626#define VXGE_HW_RXMAC_VARIOUS_ERR_REG_RMACJ_RMACJ_FSM_ERR vxge_mBIT(3)
1627/*0x01650*/ u64 rxmac_various_err_mask;
1628/*0x01658*/ u64 rxmac_various_err_alarm;
1629/*0x01660*/ u64 rxmac_gen_cfg;
1630#define VXGE_HW_RXMAC_GEN_CFG_SCALE_RMAC_UTIL vxge_mBIT(11)
1631/*0x01668*/ u64 rxmac_authorize_all_addr;
1632#define VXGE_HW_RXMAC_AUTHORIZE_ALL_ADDR_VP(n) vxge_mBIT(n)
1633/*0x01670*/ u64 rxmac_authorize_all_vid;
1634#define VXGE_HW_RXMAC_AUTHORIZE_ALL_VID_VP(n) vxge_mBIT(n)
1635 u8 unused016c0[0x016c0-0x01678];
1636
1637/*0x016c0*/ u64 rxmac_red_rate_repl_queue;
1638#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_CRATE_THR0(val) vxge_vBIT(val, 0, 4)
1639#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_CRATE_THR1(val) vxge_vBIT(val, 4, 4)
1640#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_CRATE_THR2(val) vxge_vBIT(val, 8, 4)
1641#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_CRATE_THR3(val) vxge_vBIT(val, 12, 4)
1642#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_FRATE_THR0(val) vxge_vBIT(val, 16, 4)
1643#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_FRATE_THR1(val) vxge_vBIT(val, 20, 4)
1644#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_FRATE_THR2(val) vxge_vBIT(val, 24, 4)
1645#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_FRATE_THR3(val) vxge_vBIT(val, 28, 4)
1646#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_TRICKLE_EN vxge_mBIT(35)
1647 u8 unused016e0[0x016e0-0x016c8];
1648
1649/*0x016e0*/ u64 rxmac_cfg0_port[3];
1650#define VXGE_HW_RXMAC_CFG0_PORT_RMAC_EN vxge_mBIT(3)
1651#define VXGE_HW_RXMAC_CFG0_PORT_STRIP_FCS vxge_mBIT(7)
1652#define VXGE_HW_RXMAC_CFG0_PORT_DISCARD_PFRM vxge_mBIT(11)
1653#define VXGE_HW_RXMAC_CFG0_PORT_IGNORE_FCS_ERR vxge_mBIT(15)
1654#define VXGE_HW_RXMAC_CFG0_PORT_IGNORE_LONG_ERR vxge_mBIT(19)
1655#define VXGE_HW_RXMAC_CFG0_PORT_IGNORE_USIZED_ERR vxge_mBIT(23)
1656#define VXGE_HW_RXMAC_CFG0_PORT_IGNORE_LEN_MISMATCH vxge_mBIT(27)
1657#define VXGE_HW_RXMAC_CFG0_PORT_MAX_PYLD_LEN(val) vxge_vBIT(val, 50, 14)
1658 u8 unused01710[0x01710-0x016f8];
1659
1660/*0x01710*/ u64 rxmac_cfg2_port[3];
1661#define VXGE_HW_RXMAC_CFG2_PORT_PROM_EN vxge_mBIT(3)
1662/*0x01728*/ u64 rxmac_pause_cfg_port[3];
1663#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN vxge_mBIT(3)
1664#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN vxge_mBIT(7)
1665#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_ACCEL_SEND(val) vxge_vBIT(val, 9, 3)
1666#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_DUAL_THR vxge_mBIT(15)
1667#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_HIGH_PTIME(val) vxge_vBIT(val, 20, 16)
1668#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_IGNORE_PF_FCS_ERR vxge_mBIT(39)
1669#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_IGNORE_PF_LEN_ERR vxge_mBIT(43)
1670#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_LIMITER_EN vxge_mBIT(47)
1671#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_MAX_LIMIT(val) vxge_vBIT(val, 48, 8)
1672#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_PERMIT_RATEMGMT_CTRL vxge_mBIT(59)
1673 u8 unused01758[0x01758-0x01740];
1674
1675/*0x01758*/ u64 rxmac_red_cfg0_port[3];
1676#define VXGE_HW_RXMAC_RED_CFG0_PORT_RED_EN_VP(n) vxge_mBIT(n)
1677/*0x01770*/ u64 rxmac_red_cfg1_port[3];
1678#define VXGE_HW_RXMAC_RED_CFG1_PORT_FINE_EN vxge_mBIT(3)
1679#define VXGE_HW_RXMAC_RED_CFG1_PORT_RED_EN_REPL_QUEUE vxge_mBIT(11)
1680/*0x01788*/ u64 rxmac_red_cfg2_port[3];
1681#define VXGE_HW_RXMAC_RED_CFG2_PORT_TRICKLE_EN_VP(n) vxge_mBIT(n)
1682/*0x017a0*/ u64 rxmac_link_util_port[3];
1683#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_RMAC_UTILIZATION(val) \
1684 vxge_vBIT(val, 1, 7)
1685#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_UTIL_CFG(val) vxge_vBIT(val, 8, 4)
1686#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_RMAC_FRAC_UTIL(val) \
1687 vxge_vBIT(val, 12, 4)
1688#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_PKT_WEIGHT(val) vxge_vBIT(val, 16, 4)
1689#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_RMAC_SCALE_FACTOR vxge_mBIT(23)
1690 u8 unused017d0[0x017d0-0x017b8];
1691
1692/*0x017d0*/ u64 rxmac_status_port[3];
1693#define VXGE_HW_RXMAC_STATUS_PORT_RMAC_RX_FRM_RCVD vxge_mBIT(3)
1694 u8 unused01800[0x01800-0x017e8];
1695
1696/*0x01800*/ u64 rxmac_rx_pa_cfg0;
1697#define VXGE_HW_RXMAC_RX_PA_CFG0_IGNORE_FRAME_ERR vxge_mBIT(3)
1698#define VXGE_HW_RXMAC_RX_PA_CFG0_SUPPORT_SNAP_AB_N vxge_mBIT(7)
1699#define VXGE_HW_RXMAC_RX_PA_CFG0_SEARCH_FOR_HAO vxge_mBIT(18)
1700#define VXGE_HW_RXMAC_RX_PA_CFG0_SUPPORT_MOBILE_IPV6_HDRS vxge_mBIT(19)
1701#define VXGE_HW_RXMAC_RX_PA_CFG0_IPV6_STOP_SEARCHING vxge_mBIT(23)
1702#define VXGE_HW_RXMAC_RX_PA_CFG0_NO_PS_IF_UNKNOWN vxge_mBIT(27)
1703#define VXGE_HW_RXMAC_RX_PA_CFG0_SEARCH_FOR_ETYPE vxge_mBIT(35)
1704#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_ANY_FRM_IF_L3_CSUM_ERR vxge_mBIT(39)
1705#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_OFFLD_FRM_IF_L3_CSUM_ERR vxge_mBIT(43)
1706#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_ANY_FRM_IF_L4_CSUM_ERR vxge_mBIT(47)
1707#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_OFFLD_FRM_IF_L4_CSUM_ERR vxge_mBIT(51)
1708#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_ANY_FRM_IF_RPA_ERR vxge_mBIT(55)
1709#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_OFFLD_FRM_IF_RPA_ERR vxge_mBIT(59)
1710#define VXGE_HW_RXMAC_RX_PA_CFG0_JUMBO_SNAP_EN vxge_mBIT(63)
1711/*0x01808*/ u64 rxmac_rx_pa_cfg1;
1712#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_IPV4_TCP_INCL_PH vxge_mBIT(3)
1713#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_IPV6_TCP_INCL_PH vxge_mBIT(7)
1714#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_IPV4_UDP_INCL_PH vxge_mBIT(11)
1715#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_IPV6_UDP_INCL_PH vxge_mBIT(15)
1716#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_L4_INCL_CF vxge_mBIT(19)
1717#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_STRIP_VLAN_TAG vxge_mBIT(23)
1718 u8 unused01828[0x01828-0x01810];
1719
1720/*0x01828*/ u64 rts_mgr_cfg0;
1721#define VXGE_HW_RTS_MGR_CFG0_RTS_DP_SP_PRIORITY vxge_mBIT(3)
1722#define VXGE_HW_RTS_MGR_CFG0_FLEX_L4PRTCL_VALUE(val) vxge_vBIT(val, 24, 8)
1723#define VXGE_HW_RTS_MGR_CFG0_ICMP_TRASH vxge_mBIT(35)
1724#define VXGE_HW_RTS_MGR_CFG0_TCPSYN_TRASH vxge_mBIT(39)
1725#define VXGE_HW_RTS_MGR_CFG0_ZL4PYLD_TRASH vxge_mBIT(43)
1726#define VXGE_HW_RTS_MGR_CFG0_L4PRTCL_TCP_TRASH vxge_mBIT(47)
1727#define VXGE_HW_RTS_MGR_CFG0_L4PRTCL_UDP_TRASH vxge_mBIT(51)
1728#define VXGE_HW_RTS_MGR_CFG0_L4PRTCL_FLEX_TRASH vxge_mBIT(55)
1729#define VXGE_HW_RTS_MGR_CFG0_IPFRAG_TRASH vxge_mBIT(59)
1730/*0x01830*/ u64 rts_mgr_cfg1;
1731#define VXGE_HW_RTS_MGR_CFG1_DA_ACTIVE_TABLE vxge_mBIT(3)
1732#define VXGE_HW_RTS_MGR_CFG1_PN_ACTIVE_TABLE vxge_mBIT(7)
1733/*0x01838*/ u64 rts_mgr_criteria_priority;
1734#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_ETYPE(val) vxge_vBIT(val, 5, 3)
1735#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_ICMP_TCPSYN(val) vxge_vBIT(val, 9, 3)
1736#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_L4PN(val) vxge_vBIT(val, 13, 3)
1737#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_RANGE_L4PN(val) vxge_vBIT(val, 17, 3)
1738#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_RTH_IT(val) vxge_vBIT(val, 21, 3)
1739#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_DS(val) vxge_vBIT(val, 25, 3)
1740#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_QOS(val) vxge_vBIT(val, 29, 3)
1741#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_ZL4PYLD(val) vxge_vBIT(val, 33, 3)
1742#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_L4PRTCL(val) vxge_vBIT(val, 37, 3)
1743/*0x01840*/ u64 rts_mgr_da_pause_cfg;
1744#define VXGE_HW_RTS_MGR_DA_PAUSE_CFG_VPATH_VECTOR(val) vxge_vBIT(val, 0, 17)
1745/*0x01848*/ u64 rts_mgr_da_slow_proto_cfg;
1746#define VXGE_HW_RTS_MGR_DA_SLOW_PROTO_CFG_VPATH_VECTOR(val) \
1747 vxge_vBIT(val, 0, 17)
1748 u8 unused01890[0x01890-0x01850];
1749/*0x01890*/ u64 rts_mgr_cbasin_cfg;
1750 u8 unused01968[0x01968-0x01898];
1751
1752/*0x01968*/ u64 dbg_stat_rx_any_frms;
1753#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_PORT0_RX_ANY_FRMS(val) vxge_vBIT(val, 0, 8)
1754#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_PORT1_RX_ANY_FRMS(val) vxge_vBIT(val, 8, 8)
1755#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_PORT2_RX_ANY_FRMS(val) \
1756 vxge_vBIT(val, 16, 8)
1757 u8 unused01a00[0x01a00-0x01970];
1758
1759/*0x01a00*/ u64 rxmac_red_rate_vp[17];
1760#define VXGE_HW_RXMAC_RED_RATE_VP_CRATE_THR0(val) vxge_vBIT(val, 0, 4)
1761#define VXGE_HW_RXMAC_RED_RATE_VP_CRATE_THR1(val) vxge_vBIT(val, 4, 4)
1762#define VXGE_HW_RXMAC_RED_RATE_VP_CRATE_THR2(val) vxge_vBIT(val, 8, 4)
1763#define VXGE_HW_RXMAC_RED_RATE_VP_CRATE_THR3(val) vxge_vBIT(val, 12, 4)
1764#define VXGE_HW_RXMAC_RED_RATE_VP_FRATE_THR0(val) vxge_vBIT(val, 16, 4)
1765#define VXGE_HW_RXMAC_RED_RATE_VP_FRATE_THR1(val) vxge_vBIT(val, 20, 4)
1766#define VXGE_HW_RXMAC_RED_RATE_VP_FRATE_THR2(val) vxge_vBIT(val, 24, 4)
1767#define VXGE_HW_RXMAC_RED_RATE_VP_FRATE_THR3(val) vxge_vBIT(val, 28, 4)
1768 u8 unused01e00[0x01e00-0x01a88];
1769
1770/*0x01e00*/ u64 xgmac_int_status;
1771#define VXGE_HW_XGMAC_INT_STATUS_XMAC_GEN_ERR_XMAC_GEN_INT vxge_mBIT(3)
1772#define VXGE_HW_XGMAC_INT_STATUS_XMAC_LINK_ERR_PORT0_XMAC_LINK_INT_PORT0 \
1773 vxge_mBIT(7)
1774#define VXGE_HW_XGMAC_INT_STATUS_XMAC_LINK_ERR_PORT1_XMAC_LINK_INT_PORT1 \
1775 vxge_mBIT(11)
1776#define VXGE_HW_XGMAC_INT_STATUS_XGXS_GEN_ERR_XGXS_GEN_INT vxge_mBIT(15)
1777#define VXGE_HW_XGMAC_INT_STATUS_ASIC_NTWK_ERR_ASIC_NTWK_INT vxge_mBIT(19)
1778#define VXGE_HW_XGMAC_INT_STATUS_ASIC_GPIO_ERR_ASIC_GPIO_INT vxge_mBIT(23)
1779/*0x01e08*/ u64 xgmac_int_mask;
1780/*0x01e10*/ u64 xmac_gen_err_reg;
1781#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT0_ACTOR_CHURN_DETECTED \
1782 vxge_mBIT(7)
1783#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT0_PARTNER_CHURN_DETECTED \
1784 vxge_mBIT(11)
1785#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT0_RECEIVED_LACPDU vxge_mBIT(15)
1786#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT1_ACTOR_CHURN_DETECTED \
1787 vxge_mBIT(19)
1788#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT1_PARTNER_CHURN_DETECTED \
1789 vxge_mBIT(23)
1790#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT1_RECEIVED_LACPDU vxge_mBIT(27)
1791#define VXGE_HW_XMAC_GEN_ERR_REG_XLCM_LAG_FAILOVER_DETECTED vxge_mBIT(31)
1792#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE0_SG_ERR(val) \
1793 vxge_vBIT(val, 40, 2)
1794#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE0_DB_ERR(val) \
1795 vxge_vBIT(val, 42, 2)
1796#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE1_SG_ERR(val) \
1797 vxge_vBIT(val, 44, 2)
1798#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE1_DB_ERR(val) \
1799 vxge_vBIT(val, 46, 2)
1800#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE2_SG_ERR(val) \
1801 vxge_vBIT(val, 48, 2)
1802#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE2_DB_ERR(val) \
1803 vxge_vBIT(val, 50, 2)
1804#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE3_SG_ERR(val) \
1805 vxge_vBIT(val, 52, 2)
1806#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE3_DB_ERR(val) \
1807 vxge_vBIT(val, 54, 2)
1808#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE4_SG_ERR(val) \
1809 vxge_vBIT(val, 56, 2)
1810#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE4_DB_ERR(val) \
1811 vxge_vBIT(val, 58, 2)
1812#define VXGE_HW_XMAC_GEN_ERR_REG_XMACJ_XMAC_FSM_ERR vxge_mBIT(63)
1813/*0x01e18*/ u64 xmac_gen_err_mask;
1814/*0x01e20*/ u64 xmac_gen_err_alarm;
1815/*0x01e28*/ u64 xmac_link_err_port0_reg;
1816#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_DOWN vxge_mBIT(3)
1817#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_UP vxge_mBIT(7)
1818#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_WENT_DOWN vxge_mBIT(11)
1819#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_WENT_UP vxge_mBIT(15)
1820#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_REAFFIRMED_FAULT \
1821 vxge_mBIT(19)
1822#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_REAFFIRMED_OK vxge_mBIT(23)
1823#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_LINK_DOWN vxge_mBIT(27)
1824#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_LINK_UP vxge_mBIT(31)
1825#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_RATEMGMT_RATE_CHANGE vxge_mBIT(35)
1826#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_RATEMGMT_LASI_INV vxge_mBIT(39)
1827#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMDIO_MDIO_MGR_ACCESS_COMPLETE \
1828 vxge_mBIT(47)
1829/*0x01e30*/ u64 xmac_link_err_port0_mask;
1830/*0x01e38*/ u64 xmac_link_err_port0_alarm;
1831/*0x01e40*/ u64 xmac_link_err_port1_reg;
1832/*0x01e48*/ u64 xmac_link_err_port1_mask;
1833/*0x01e50*/ u64 xmac_link_err_port1_alarm;
1834/*0x01e58*/ u64 xgxs_gen_err_reg;
1835#define VXGE_HW_XGXS_GEN_ERR_REG_XGXS_XGXS_FSM_ERR vxge_mBIT(63)
1836/*0x01e60*/ u64 xgxs_gen_err_mask;
1837/*0x01e68*/ u64 xgxs_gen_err_alarm;
1838/*0x01e70*/ u64 asic_ntwk_err_reg;
1839#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_DOWN vxge_mBIT(3)
1840#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_UP vxge_mBIT(7)
1841#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_WENT_DOWN vxge_mBIT(11)
1842#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_WENT_UP vxge_mBIT(15)
1843#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT vxge_mBIT(19)
1844#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK vxge_mBIT(23)
1845/*0x01e78*/ u64 asic_ntwk_err_mask;
1846/*0x01e80*/ u64 asic_ntwk_err_alarm;
1847/*0x01e88*/ u64 asic_gpio_err_reg;
1848#define VXGE_HW_ASIC_GPIO_ERR_REG_XMACJ_GPIO_INT(n) vxge_mBIT(n)
1849/*0x01e90*/ u64 asic_gpio_err_mask;
1850/*0x01e98*/ u64 asic_gpio_err_alarm;
1851/*0x01ea0*/ u64 xgmac_gen_status;
1852#define VXGE_HW_XGMAC_GEN_STATUS_XMACJ_NTWK_OK vxge_mBIT(3)
1853#define VXGE_HW_XGMAC_GEN_STATUS_XMACJ_NTWK_DATA_RATE vxge_mBIT(11)
1854/*0x01ea8*/ u64 xgmac_gen_fw_memo_status;
1855#define VXGE_HW_XGMAC_GEN_FW_MEMO_STATUS_XMACJ_EVENTS_PENDING(val) \
1856 vxge_vBIT(val, 0, 17)
1857/*0x01eb0*/ u64 xgmac_gen_fw_memo_mask;
1858#define VXGE_HW_XGMAC_GEN_FW_MEMO_MASK_MASK(val) vxge_vBIT(val, 0, 64)
1859/*0x01eb8*/ u64 xgmac_gen_fw_vpath_to_vsport_status;
1860#define VXGE_HW_XGMAC_GEN_FW_VPATH_TO_VSPORT_STATUS_XMACJ_EVENTS_PENDING(val) \
1861 vxge_vBIT(val, 0, 17)
1862/*0x01ec0*/ u64 xgmac_main_cfg_port[2];
1863#define VXGE_HW_XGMAC_MAIN_CFG_PORT_PORT_EN vxge_mBIT(3)
1864 u8 unused01f40[0x01f40-0x01ed0];
1865
1866/*0x01f40*/ u64 xmac_gen_cfg;
1867#define VXGE_HW_XMAC_GEN_CFG_RATEMGMT_MAC_RATE_SEL(val) vxge_vBIT(val, 2, 2)
1868#define VXGE_HW_XMAC_GEN_CFG_TX_HEAD_DROP_WHEN_FAULT vxge_mBIT(7)
1869#define VXGE_HW_XMAC_GEN_CFG_FAULT_BEHAVIOUR vxge_mBIT(27)
1870#define VXGE_HW_XMAC_GEN_CFG_PERIOD_NTWK_UP(val) vxge_vBIT(val, 28, 4)
1871#define VXGE_HW_XMAC_GEN_CFG_PERIOD_NTWK_DOWN(val) vxge_vBIT(val, 32, 4)
1872/*0x01f48*/ u64 xmac_timestamp;
1873#define VXGE_HW_XMAC_TIMESTAMP_EN vxge_mBIT(3)
1874#define VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(val) vxge_vBIT(val, 6, 2)
1875#define VXGE_HW_XMAC_TIMESTAMP_INTERVAL(val) vxge_vBIT(val, 12, 4)
1876#define VXGE_HW_XMAC_TIMESTAMP_TIMER_RESTART vxge_mBIT(19)
1877#define VXGE_HW_XMAC_TIMESTAMP_XMACJ_ROLLOVER_CNT(val) vxge_vBIT(val, 32, 16)
1878/*0x01f50*/ u64 xmac_stats_gen_cfg;
1879#define VXGE_HW_XMAC_STATS_GEN_CFG_PRTAGGR_CUM_TIMER(val) vxge_vBIT(val, 4, 4)
1880#define VXGE_HW_XMAC_STATS_GEN_CFG_VPATH_CUM_TIMER(val) vxge_vBIT(val, 8, 4)
1881#define VXGE_HW_XMAC_STATS_GEN_CFG_VLAN_HANDLING vxge_mBIT(15)
1882/*0x01f58*/ u64 xmac_stats_sys_cmd;
1883#define VXGE_HW_XMAC_STATS_SYS_CMD_OP(val) vxge_vBIT(val, 5, 3)
1884#define VXGE_HW_XMAC_STATS_SYS_CMD_STROBE vxge_mBIT(15)
1885#define VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(val) vxge_vBIT(val, 27, 5)
1886#define VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(val) vxge_vBIT(val, 32, 8)
1887/*0x01f60*/ u64 xmac_stats_sys_data;
1888#define VXGE_HW_XMAC_STATS_SYS_DATA_XSMGR_DATA(val) vxge_vBIT(val, 0, 64)
1889 u8 unused01f80[0x01f80-0x01f68];
1890
1891/*0x01f80*/ u64 asic_ntwk_ctrl;
1892#define VXGE_HW_ASIC_NTWK_CTRL_REQ_TEST_NTWK vxge_mBIT(3)
1893#define VXGE_HW_ASIC_NTWK_CTRL_PORT0_REQ_TEST_PORT vxge_mBIT(11)
1894#define VXGE_HW_ASIC_NTWK_CTRL_PORT1_REQ_TEST_PORT vxge_mBIT(15)
1895/*0x01f88*/ u64 asic_ntwk_cfg_show_port_info;
1896#define VXGE_HW_ASIC_NTWK_CFG_SHOW_PORT_INFO_VP(n) vxge_mBIT(n)
1897/*0x01f90*/ u64 asic_ntwk_cfg_port_num;
1898#define VXGE_HW_ASIC_NTWK_CFG_PORT_NUM_VP(n) vxge_mBIT(n)
1899/*0x01f98*/ u64 xmac_cfg_port[3];
1900#define VXGE_HW_XMAC_CFG_PORT_XGMII_LOOPBACK vxge_mBIT(3)
1901#define VXGE_HW_XMAC_CFG_PORT_XGMII_REVERSE_LOOPBACK vxge_mBIT(7)
1902#define VXGE_HW_XMAC_CFG_PORT_XGMII_TX_BEHAV vxge_mBIT(11)
1903#define VXGE_HW_XMAC_CFG_PORT_XGMII_RX_BEHAV vxge_mBIT(15)
1904/*0x01fb0*/ u64 xmac_station_addr_port[2];
1905#define VXGE_HW_XMAC_STATION_ADDR_PORT_MAC_ADDR(val) vxge_vBIT(val, 0, 48)
1906 u8 unused02020[0x02020-0x01fc0];
1907
1908/*0x02020*/ u64 lag_cfg;
1909#define VXGE_HW_LAG_CFG_EN vxge_mBIT(3)
1910#define VXGE_HW_LAG_CFG_MODE(val) vxge_vBIT(val, 6, 2)
1911#define VXGE_HW_LAG_CFG_TX_DISCARD_BEHAV vxge_mBIT(11)
1912#define VXGE_HW_LAG_CFG_RX_DISCARD_BEHAV vxge_mBIT(15)
1913#define VXGE_HW_LAG_CFG_PREF_INDIV_PORT_NUM vxge_mBIT(19)
1914/*0x02028*/ u64 lag_status;
1915#define VXGE_HW_LAG_STATUS_XLCM_WAITING_TO_FAILBACK vxge_mBIT(3)
1916#define VXGE_HW_LAG_STATUS_XLCM_TIMER_VAL_COLD_FAILOVER(val) \
1917 vxge_vBIT(val, 8, 8)
1918/*0x02030*/ u64 lag_active_passive_cfg;
1919#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_HOT_STANDBY vxge_mBIT(3)
1920#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_LACP_DECIDES vxge_mBIT(7)
1921#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_PREF_ACTIVE_PORT_NUM vxge_mBIT(11)
1922#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_AUTO_FAILBACK vxge_mBIT(15)
1923#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_FAILBACK_EN vxge_mBIT(19)
1924#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_COLD_FAILOVER_TIMEOUT(val) \
1925 vxge_vBIT(val, 32, 16)
1926 u8 unused02040[0x02040-0x02038];
1927
1928/*0x02040*/ u64 lag_lacp_cfg;
1929#define VXGE_HW_LAG_LACP_CFG_EN vxge_mBIT(3)
1930#define VXGE_HW_LAG_LACP_CFG_LACP_BEGIN vxge_mBIT(7)
1931#define VXGE_HW_LAG_LACP_CFG_DISCARD_LACP vxge_mBIT(11)
1932#define VXGE_HW_LAG_LACP_CFG_LIBERAL_LEN_CHK vxge_mBIT(15)
1933/*0x02048*/ u64 lag_timer_cfg_1;
1934#define VXGE_HW_LAG_TIMER_CFG_1_FAST_PER(val) vxge_vBIT(val, 0, 16)
1935#define VXGE_HW_LAG_TIMER_CFG_1_SLOW_PER(val) vxge_vBIT(val, 16, 16)
1936#define VXGE_HW_LAG_TIMER_CFG_1_SHORT_TIMEOUT(val) vxge_vBIT(val, 32, 16)
1937#define VXGE_HW_LAG_TIMER_CFG_1_LONG_TIMEOUT(val) vxge_vBIT(val, 48, 16)
1938/*0x02050*/ u64 lag_timer_cfg_2;
1939#define VXGE_HW_LAG_TIMER_CFG_2_CHURN_DET(val) vxge_vBIT(val, 0, 16)
1940#define VXGE_HW_LAG_TIMER_CFG_2_AGGR_WAIT(val) vxge_vBIT(val, 16, 16)
1941#define VXGE_HW_LAG_TIMER_CFG_2_SHORT_TIMER_SCALE(val) vxge_vBIT(val, 32, 16)
1942#define VXGE_HW_LAG_TIMER_CFG_2_LONG_TIMER_SCALE(val) vxge_vBIT(val, 48, 16)
1943/*0x02058*/ u64 lag_sys_id;
1944#define VXGE_HW_LAG_SYS_ID_ADDR(val) vxge_vBIT(val, 0, 48)
1945#define VXGE_HW_LAG_SYS_ID_USE_PORT_ADDR vxge_mBIT(51)
1946#define VXGE_HW_LAG_SYS_ID_ADDR_SEL vxge_mBIT(55)
1947/*0x02060*/ u64 lag_sys_cfg;
1948#define VXGE_HW_LAG_SYS_CFG_SYS_PRI(val) vxge_vBIT(val, 0, 16)
1949 u8 unused02070[0x02070-0x02068];
1950
1951/*0x02070*/ u64 lag_aggr_addr_cfg[2];
1952#define VXGE_HW_LAG_AGGR_ADDR_CFG_ADDR(val) vxge_vBIT(val, 0, 48)
1953#define VXGE_HW_LAG_AGGR_ADDR_CFG_USE_PORT_ADDR vxge_mBIT(51)
1954#define VXGE_HW_LAG_AGGR_ADDR_CFG_ADDR_SEL vxge_mBIT(55)
1955/*0x02080*/ u64 lag_aggr_id_cfg[2];
1956#define VXGE_HW_LAG_AGGR_ID_CFG_ID(val) vxge_vBIT(val, 0, 16)
1957/*0x02090*/ u64 lag_aggr_admin_key[2];
1958#define VXGE_HW_LAG_AGGR_ADMIN_KEY_KEY(val) vxge_vBIT(val, 0, 16)
1959/*0x020a0*/ u64 lag_aggr_alt_admin_key;
1960#define VXGE_HW_LAG_AGGR_ALT_ADMIN_KEY_KEY(val) vxge_vBIT(val, 0, 16)
1961#define VXGE_HW_LAG_AGGR_ALT_ADMIN_KEY_ALT_AGGR vxge_mBIT(19)
1962/*0x020a8*/ u64 lag_aggr_oper_key[2];
1963#define VXGE_HW_LAG_AGGR_OPER_KEY_LAGC_KEY(val) vxge_vBIT(val, 0, 16)
1964/*0x020b8*/ u64 lag_aggr_partner_sys_id[2];
1965#define VXGE_HW_LAG_AGGR_PARTNER_SYS_ID_LAGC_ADDR(val) vxge_vBIT(val, 0, 48)
1966/*0x020c8*/ u64 lag_aggr_partner_info[2];
1967#define VXGE_HW_LAG_AGGR_PARTNER_INFO_LAGC_SYS_PRI(val) vxge_vBIT(val, 0, 16)
1968#define VXGE_HW_LAG_AGGR_PARTNER_INFO_LAGC_OPER_KEY(val) \
1969 vxge_vBIT(val, 16, 16)
1970/*0x020d8*/ u64 lag_aggr_state[2];
1971#define VXGE_HW_LAG_AGGR_STATE_LAGC_TX vxge_mBIT(3)
1972#define VXGE_HW_LAG_AGGR_STATE_LAGC_RX vxge_mBIT(7)
1973#define VXGE_HW_LAG_AGGR_STATE_LAGC_READY vxge_mBIT(11)
1974#define VXGE_HW_LAG_AGGR_STATE_LAGC_INDIVIDUAL vxge_mBIT(15)
1975 u8 unused020f0[0x020f0-0x020e8];
1976
1977/*0x020f0*/ u64 lag_port_cfg[2];
1978#define VXGE_HW_LAG_PORT_CFG_EN vxge_mBIT(3)
1979#define VXGE_HW_LAG_PORT_CFG_DISCARD_SLOW_PROTO vxge_mBIT(7)
1980#define VXGE_HW_LAG_PORT_CFG_HOST_CHOSEN_AGGR vxge_mBIT(11)
1981#define VXGE_HW_LAG_PORT_CFG_DISCARD_UNKNOWN_SLOW_PROTO vxge_mBIT(15)
1982/*0x02100*/ u64 lag_port_actor_admin_cfg[2];
1983#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_CFG_PORT_NUM(val) vxge_vBIT(val, 0, 16)
1984#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_CFG_PORT_PRI(val) vxge_vBIT(val, 16, 16)
1985#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_CFG_KEY_10G(val) vxge_vBIT(val, 32, 16)
1986#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_CFG_KEY_1G(val) vxge_vBIT(val, 48, 16)
1987/*0x02110*/ u64 lag_port_actor_admin_state[2];
1988#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_LACP_ACTIVITY vxge_mBIT(3)
1989#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_LACP_TIMEOUT vxge_mBIT(7)
1990#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_AGGREGATION vxge_mBIT(11)
1991#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_SYNCHRONIZATION vxge_mBIT(15)
1992#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_COLLECTING vxge_mBIT(19)
1993#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_DISTRIBUTING vxge_mBIT(23)
1994#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_DEFAULTED vxge_mBIT(27)
1995#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_EXPIRED vxge_mBIT(31)
1996/*0x02120*/ u64 lag_port_partner_admin_sys_id[2];
1997#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_SYS_ID_ADDR(val) vxge_vBIT(val, 0, 48)
1998/*0x02130*/ u64 lag_port_partner_admin_cfg[2];
1999#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_CFG_SYS_PRI(val) vxge_vBIT(val, 0, 16)
2000#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_CFG_KEY(val) vxge_vBIT(val, 16, 16)
2001#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_CFG_PORT_NUM(val) \
2002 vxge_vBIT(val, 32, 16)
2003#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_CFG_PORT_PRI(val) \
2004 vxge_vBIT(val, 48, 16)
2005/*0x02140*/ u64 lag_port_partner_admin_state[2];
2006#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_LACP_ACTIVITY vxge_mBIT(3)
2007#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_LACP_TIMEOUT vxge_mBIT(7)
2008#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_AGGREGATION vxge_mBIT(11)
2009#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_SYNCHRONIZATION vxge_mBIT(15)
2010#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_COLLECTING vxge_mBIT(19)
2011#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_DISTRIBUTING vxge_mBIT(23)
2012#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_DEFAULTED vxge_mBIT(27)
2013#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_EXPIRED vxge_mBIT(31)
2014/*0x02150*/ u64 lag_port_to_aggr[2];
2015#define VXGE_HW_LAG_PORT_TO_AGGR_LAGC_AGGR_ID(val) vxge_vBIT(val, 0, 16)
2016#define VXGE_HW_LAG_PORT_TO_AGGR_LAGC_AGGR_VLD_ID vxge_mBIT(19)
2017/*0x02160*/ u64 lag_port_actor_oper_key[2];
2018#define VXGE_HW_LAG_PORT_ACTOR_OPER_KEY_LAGC_KEY(val) vxge_vBIT(val, 0, 16)
2019/*0x02170*/ u64 lag_port_actor_oper_state[2];
2020#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_LACP_ACTIVITY vxge_mBIT(3)
2021#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_LACP_TIMEOUT vxge_mBIT(7)
2022#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_AGGREGATION vxge_mBIT(11)
2023#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_SYNCHRONIZATION vxge_mBIT(15)
2024#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_COLLECTING vxge_mBIT(19)
2025#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_DISTRIBUTING vxge_mBIT(23)
2026#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_DEFAULTED vxge_mBIT(27)
2027#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_EXPIRED vxge_mBIT(31)
2028/*0x02180*/ u64 lag_port_partner_oper_sys_id[2];
2029#define VXGE_HW_LAG_PORT_PARTNER_OPER_SYS_ID_LAGC_ADDR(val) \
2030 vxge_vBIT(val, 0, 48)
2031/*0x02190*/ u64 lag_port_partner_oper_info[2];
2032#define VXGE_HW_LAG_PORT_PARTNER_OPER_INFO_LAGC_SYS_PRI(val) \
2033 vxge_vBIT(val, 0, 16)
2034#define VXGE_HW_LAG_PORT_PARTNER_OPER_INFO_LAGC_KEY(val) \
2035 vxge_vBIT(val, 16, 16)
2036#define VXGE_HW_LAG_PORT_PARTNER_OPER_INFO_LAGC_PORT_NUM(val) \
2037 vxge_vBIT(val, 32, 16)
2038#define VXGE_HW_LAG_PORT_PARTNER_OPER_INFO_LAGC_PORT_PRI(val) \
2039 vxge_vBIT(val, 48, 16)
2040/*0x021a0*/ u64 lag_port_partner_oper_state[2];
2041#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_LACP_ACTIVITY vxge_mBIT(3)
2042#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_LACP_TIMEOUT vxge_mBIT(7)
2043#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_AGGREGATION vxge_mBIT(11)
2044#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_SYNCHRONIZATION \
2045 vxge_mBIT(15)
2046#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_COLLECTING vxge_mBIT(19)
2047#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_DISTRIBUTING vxge_mBIT(23)
2048#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_DEFAULTED vxge_mBIT(27)
2049#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_EXPIRED vxge_mBIT(31)
2050/*0x021b0*/ u64 lag_port_state_vars[2];
2051#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_READY vxge_mBIT(3)
2052#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_SELECTED(val) vxge_vBIT(val, 6, 2)
2053#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_AGGR_NUM vxge_mBIT(11)
2054#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PORT_MOVED vxge_mBIT(15)
2055#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PORT_ENABLED vxge_mBIT(18)
2056#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PORT_DISABLED vxge_mBIT(19)
2057#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_NTT vxge_mBIT(23)
2058#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_ACTOR_CHURN vxge_mBIT(27)
2059#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PARTNER_CHURN vxge_mBIT(31)
2060#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_ACTOR_INFO_LEN_MISMATCH \
2061 vxge_mBIT(32)
2062#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PARTNER_INFO_LEN_MISMATCH \
2063 vxge_mBIT(33)
2064#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_COLL_INFO_LEN_MISMATCH vxge_mBIT(34)
2065#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_TERM_INFO_LEN_MISMATCH vxge_mBIT(35)
2066#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_RX_FSM_STATE(val) vxge_vBIT(val, 37, 3)
2067#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_MUX_FSM_STATE(val) \
2068 vxge_vBIT(val, 41, 3)
2069#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_MUX_REASON(val) vxge_vBIT(val, 44, 4)
2070#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_ACTOR_CHURN_STATE vxge_mBIT(54)
2071#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PARTNER_CHURN_STATE vxge_mBIT(55)
2072#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_ACTOR_CHURN_COUNT(val) \
2073 vxge_vBIT(val, 56, 4)
2074#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PARTNER_CHURN_COUNT(val) \
2075 vxge_vBIT(val, 60, 4)
2076/*0x021c0*/ u64 lag_port_timer_cntr[2];
2077#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_CURRENT_WHILE(val) vxge_vBIT(val, 0, 8)
2078#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_PERIODIC_WHILE(val) \
2079 vxge_vBIT(val, 8, 8)
2080#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_WAIT_WHILE(val) vxge_vBIT(val, 16, 8)
2081#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_TX_LACP(val) vxge_vBIT(val, 24, 8)
2082#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_ACTOR_SYNC_TRANSITION_COUNT(val) \
2083 vxge_vBIT(val, 32, 8)
2084#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_PARTNER_SYNC_TRANSITION_COUNT(val) \
2085 vxge_vBIT(val, 40, 8)
2086#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_ACTOR_CHANGE_COUNT(val) \
2087 vxge_vBIT(val, 48, 8)
2088#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_PARTNER_CHANGE_COUNT(val) \
2089 vxge_vBIT(val, 56, 8)
2090 u8 unused02208[0x02700-0x021d0];
2091
2092/*0x02700*/ u64 rtdma_int_status;
2093#define VXGE_HW_RTDMA_INT_STATUS_PDA_ALARM_PDA_INT vxge_mBIT(1)
2094#define VXGE_HW_RTDMA_INT_STATUS_PCC_ERROR_PCC_INT vxge_mBIT(2)
2095#define VXGE_HW_RTDMA_INT_STATUS_LSO_ERROR_LSO_INT vxge_mBIT(4)
2096#define VXGE_HW_RTDMA_INT_STATUS_SM_ERROR_SM_INT vxge_mBIT(5)
2097/*0x02708*/ u64 rtdma_int_mask;
2098/*0x02710*/ u64 pda_alarm_reg;
2099#define VXGE_HW_PDA_ALARM_REG_PDA_HSC_FIFO_ERR vxge_mBIT(0)
2100#define VXGE_HW_PDA_ALARM_REG_PDA_SM_ERR vxge_mBIT(1)
2101/*0x02718*/ u64 pda_alarm_mask;
2102/*0x02720*/ u64 pda_alarm_alarm;
2103/*0x02728*/ u64 pcc_error_reg;
2104#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_FRM_BUF_SBE(n) vxge_mBIT(n)
2105#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_TXDO_SBE(n) vxge_mBIT(n)
2106#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_FRM_BUF_DBE(n) vxge_mBIT(n)
2107#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_TXDO_DBE(n) vxge_mBIT(n)
2108#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_FSM_ERR_ALARM(n) vxge_mBIT(n)
2109#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_SERR(n) vxge_mBIT(n)
2110/*0x02730*/ u64 pcc_error_mask;
2111/*0x02738*/ u64 pcc_error_alarm;
2112/*0x02740*/ u64 lso_error_reg;
2113#define VXGE_HW_LSO_ERROR_REG_PCC_LSO_ABORT(n) vxge_mBIT(n)
2114#define VXGE_HW_LSO_ERROR_REG_PCC_LSO_FSM_ERR_ALARM(n) vxge_mBIT(n)
2115/*0x02748*/ u64 lso_error_mask;
2116/*0x02750*/ u64 lso_error_alarm;
2117/*0x02758*/ u64 sm_error_reg;
2118#define VXGE_HW_SM_ERROR_REG_SM_FSM_ERR_ALARM vxge_mBIT(15)
2119/*0x02760*/ u64 sm_error_mask;
2120/*0x02768*/ u64 sm_error_alarm;
2121
2122 u8 unused027a8[0x027a8-0x02770];
2123
2124/*0x027a8*/ u64 txd_ownership_ctrl;
2125#define VXGE_HW_TXD_OWNERSHIP_CTRL_KEEP_OWNERSHIP vxge_mBIT(7)
2126/*0x027b0*/ u64 pcc_cfg;
2127#define VXGE_HW_PCC_CFG_PCC_ENABLE(n) vxge_mBIT(n)
2128#define VXGE_HW_PCC_CFG_PCC_ECC_ENABLE_N(n) vxge_mBIT(n)
2129/*0x027b8*/ u64 pcc_control;
2130#define VXGE_HW_PCC_CONTROL_FE_ENABLE(val) vxge_vBIT(val, 6, 2)
2131#define VXGE_HW_PCC_CONTROL_EARLY_ASSIGN_EN vxge_mBIT(15)
2132#define VXGE_HW_PCC_CONTROL_UNBLOCK_DB_ERR vxge_mBIT(31)
2133/*0x027c0*/ u64 pda_status1;
2134#define VXGE_HW_PDA_STATUS1_PDA_WRAP_0_CTR(val) vxge_vBIT(val, 4, 4)
2135#define VXGE_HW_PDA_STATUS1_PDA_WRAP_1_CTR(val) vxge_vBIT(val, 12, 4)
2136#define VXGE_HW_PDA_STATUS1_PDA_WRAP_2_CTR(val) vxge_vBIT(val, 20, 4)
2137#define VXGE_HW_PDA_STATUS1_PDA_WRAP_3_CTR(val) vxge_vBIT(val, 28, 4)
2138#define VXGE_HW_PDA_STATUS1_PDA_WRAP_4_CTR(val) vxge_vBIT(val, 36, 4)
2139#define VXGE_HW_PDA_STATUS1_PDA_WRAP_5_CTR(val) vxge_vBIT(val, 44, 4)
2140#define VXGE_HW_PDA_STATUS1_PDA_WRAP_6_CTR(val) vxge_vBIT(val, 52, 4)
2141#define VXGE_HW_PDA_STATUS1_PDA_WRAP_7_CTR(val) vxge_vBIT(val, 60, 4)
2142/*0x027c8*/ u64 rtdma_bw_timer;
2143#define VXGE_HW_RTDMA_BW_TIMER_TIMER_CTRL(val) vxge_vBIT(val, 12, 4)
2144
2145 u8 unused02900[0x02900-0x027d0];
2146/*0x02900*/ u64 g3cmct_int_status;
2147#define VXGE_HW_G3CMCT_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0)
2148/*0x02908*/ u64 g3cmct_int_mask;
2149/*0x02910*/ u64 g3cmct_err_reg;
2150#define VXGE_HW_G3CMCT_ERR_REG_G3IF_SM_ERR vxge_mBIT(4)
2151#define VXGE_HW_G3CMCT_ERR_REG_G3IF_GDDR3_DECC vxge_mBIT(5)
2152#define VXGE_HW_G3CMCT_ERR_REG_G3IF_GDDR3_U_DECC vxge_mBIT(6)
2153#define VXGE_HW_G3CMCT_ERR_REG_G3IF_CTRL_FIFO_DECC vxge_mBIT(7)
2154#define VXGE_HW_G3CMCT_ERR_REG_G3IF_GDDR3_SECC vxge_mBIT(29)
2155#define VXGE_HW_G3CMCT_ERR_REG_G3IF_GDDR3_U_SECC vxge_mBIT(30)
2156#define VXGE_HW_G3CMCT_ERR_REG_G3IF_CTRL_FIFO_SECC vxge_mBIT(31)
2157/*0x02918*/ u64 g3cmct_err_mask;
2158/*0x02920*/ u64 g3cmct_err_alarm;
2159 u8 unused03000[0x03000-0x02928];
2160
2161/*0x03000*/ u64 mc_int_status;
2162#define VXGE_HW_MC_INT_STATUS_MC_ERR_MC_INT vxge_mBIT(3)
2163#define VXGE_HW_MC_INT_STATUS_GROCRC_ALARM_ROCRC_INT vxge_mBIT(7)
2164#define VXGE_HW_MC_INT_STATUS_FAU_GEN_ERR_FAU_GEN_INT vxge_mBIT(11)
2165#define VXGE_HW_MC_INT_STATUS_FAU_ECC_ERR_FAU_ECC_INT vxge_mBIT(15)
2166/*0x03008*/ u64 mc_int_mask;
2167/*0x03010*/ u64 mc_err_reg;
2168#define VXGE_HW_MC_ERR_REG_MC_XFMD_MEM_ECC_SG_ERR_A vxge_mBIT(3)
2169#define VXGE_HW_MC_ERR_REG_MC_XFMD_MEM_ECC_SG_ERR_B vxge_mBIT(4)
2170#define VXGE_HW_MC_ERR_REG_MC_G3IF_RD_FIFO_ECC_SG_ERR vxge_mBIT(5)
2171#define VXGE_HW_MC_ERR_REG_MC_MIRI_ECC_SG_ERR_0 vxge_mBIT(6)
2172#define VXGE_HW_MC_ERR_REG_MC_MIRI_ECC_SG_ERR_1 vxge_mBIT(7)
2173#define VXGE_HW_MC_ERR_REG_MC_XFMD_MEM_ECC_DB_ERR_A vxge_mBIT(10)
2174#define VXGE_HW_MC_ERR_REG_MC_XFMD_MEM_ECC_DB_ERR_B vxge_mBIT(11)
2175#define VXGE_HW_MC_ERR_REG_MC_G3IF_RD_FIFO_ECC_DB_ERR vxge_mBIT(12)
2176#define VXGE_HW_MC_ERR_REG_MC_MIRI_ECC_DB_ERR_0 vxge_mBIT(13)
2177#define VXGE_HW_MC_ERR_REG_MC_MIRI_ECC_DB_ERR_1 vxge_mBIT(14)
2178#define VXGE_HW_MC_ERR_REG_MC_SM_ERR vxge_mBIT(15)
2179/*0x03018*/ u64 mc_err_mask;
2180/*0x03020*/ u64 mc_err_alarm;
2181/*0x03028*/ u64 grocrc_alarm_reg;
2182#define VXGE_HW_GROCRC_ALARM_REG_XFMD_WR_FIFO_ERR vxge_mBIT(3)
2183#define VXGE_HW_GROCRC_ALARM_REG_WDE2MSR_RD_FIFO_ERR vxge_mBIT(7)
2184/*0x03030*/ u64 grocrc_alarm_mask;
2185/*0x03038*/ u64 grocrc_alarm_alarm;
2186 u8 unused03100[0x03100-0x03040];
2187
2188/*0x03100*/ u64 rx_thresh_cfg_repl;
2189#define VXGE_HW_RX_THRESH_CFG_REPL_PAUSE_LOW_THR(val) vxge_vBIT(val, 0, 8)
2190#define VXGE_HW_RX_THRESH_CFG_REPL_PAUSE_HIGH_THR(val) vxge_vBIT(val, 8, 8)
2191#define VXGE_HW_RX_THRESH_CFG_REPL_RED_THR_0(val) vxge_vBIT(val, 16, 8)
2192#define VXGE_HW_RX_THRESH_CFG_REPL_RED_THR_1(val) vxge_vBIT(val, 24, 8)
2193#define VXGE_HW_RX_THRESH_CFG_REPL_RED_THR_2(val) vxge_vBIT(val, 32, 8)
2194#define VXGE_HW_RX_THRESH_CFG_REPL_RED_THR_3(val) vxge_vBIT(val, 40, 8)
2195#define VXGE_HW_RX_THRESH_CFG_REPL_GLOBAL_WOL_EN vxge_mBIT(62)
2196#define VXGE_HW_RX_THRESH_CFG_REPL_EXACT_VP_MATCH_REQ vxge_mBIT(63)
2197 u8 unused033b8[0x033b8-0x03108];
2198
2199/*0x033b8*/ u64 fbmc_ecc_cfg;
2200#define VXGE_HW_FBMC_ECC_CFG_ENABLE(val) vxge_vBIT(val, 3, 5)
2201 u8 unused03400[0x03400-0x033c0];
2202
2203/*0x03400*/ u64 pcipif_int_status;
2204#define VXGE_HW_PCIPIF_INT_STATUS_DBECC_ERR_DBECC_ERR_INT vxge_mBIT(3)
2205#define VXGE_HW_PCIPIF_INT_STATUS_SBECC_ERR_SBECC_ERR_INT vxge_mBIT(7)
2206#define VXGE_HW_PCIPIF_INT_STATUS_GENERAL_ERR_GENERAL_ERR_INT vxge_mBIT(11)
2207#define VXGE_HW_PCIPIF_INT_STATUS_SRPCIM_MSG_SRPCIM_MSG_INT vxge_mBIT(15)
2208#define VXGE_HW_PCIPIF_INT_STATUS_MRPCIM_SPARE_R1_MRPCIM_SPARE_R1_INT \
2209 vxge_mBIT(19)
2210/*0x03408*/ u64 pcipif_int_mask;
2211/*0x03410*/ u64 dbecc_err_reg;
2212#define VXGE_HW_DBECC_ERR_REG_PCI_RETRY_BUF_DB_ERR vxge_mBIT(3)
2213#define VXGE_HW_DBECC_ERR_REG_PCI_RETRY_SOT_DB_ERR vxge_mBIT(7)
2214#define VXGE_HW_DBECC_ERR_REG_PCI_P_HDR_DB_ERR vxge_mBIT(11)
2215#define VXGE_HW_DBECC_ERR_REG_PCI_P_DATA_DB_ERR vxge_mBIT(15)
2216#define VXGE_HW_DBECC_ERR_REG_PCI_NP_HDR_DB_ERR vxge_mBIT(19)
2217#define VXGE_HW_DBECC_ERR_REG_PCI_NP_DATA_DB_ERR vxge_mBIT(23)
2218/*0x03418*/ u64 dbecc_err_mask;
2219/*0x03420*/ u64 dbecc_err_alarm;
2220/*0x03428*/ u64 sbecc_err_reg;
2221#define VXGE_HW_SBECC_ERR_REG_PCI_RETRY_BUF_SG_ERR vxge_mBIT(3)
2222#define VXGE_HW_SBECC_ERR_REG_PCI_RETRY_SOT_SG_ERR vxge_mBIT(7)
2223#define VXGE_HW_SBECC_ERR_REG_PCI_P_HDR_SG_ERR vxge_mBIT(11)
2224#define VXGE_HW_SBECC_ERR_REG_PCI_P_DATA_SG_ERR vxge_mBIT(15)
2225#define VXGE_HW_SBECC_ERR_REG_PCI_NP_HDR_SG_ERR vxge_mBIT(19)
2226#define VXGE_HW_SBECC_ERR_REG_PCI_NP_DATA_SG_ERR vxge_mBIT(23)
2227/*0x03430*/ u64 sbecc_err_mask;
2228/*0x03438*/ u64 sbecc_err_alarm;
2229/*0x03440*/ u64 general_err_reg;
2230#define VXGE_HW_GENERAL_ERR_REG_PCI_DROPPED_ILLEGAL_CFG vxge_mBIT(3)
2231#define VXGE_HW_GENERAL_ERR_REG_PCI_ILLEGAL_MEM_MAP_PROG vxge_mBIT(7)
2232#define VXGE_HW_GENERAL_ERR_REG_PCI_LINK_RST_FSM_ERR vxge_mBIT(11)
2233#define VXGE_HW_GENERAL_ERR_REG_PCI_RX_ILLEGAL_TLP_VPLANE vxge_mBIT(15)
2234#define VXGE_HW_GENERAL_ERR_REG_PCI_TRAINING_RESET_DET vxge_mBIT(19)
2235#define VXGE_HW_GENERAL_ERR_REG_PCI_PCI_LINK_DOWN_DET vxge_mBIT(23)
2236#define VXGE_HW_GENERAL_ERR_REG_PCI_RESET_ACK_DLLP vxge_mBIT(27)
2237/*0x03448*/ u64 general_err_mask;
2238/*0x03450*/ u64 general_err_alarm;
2239/*0x03458*/ u64 srpcim_msg_reg;
2240#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE0_RMSG_INT \
2241 vxge_mBIT(0)
2242#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE1_RMSG_INT \
2243 vxge_mBIT(1)
2244#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE2_RMSG_INT \
2245 vxge_mBIT(2)
2246#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE3_RMSG_INT \
2247 vxge_mBIT(3)
2248#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE4_RMSG_INT \
2249 vxge_mBIT(4)
2250#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE5_RMSG_INT \
2251 vxge_mBIT(5)
2252#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE6_RMSG_INT \
2253 vxge_mBIT(6)
2254#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE7_RMSG_INT \
2255 vxge_mBIT(7)
2256#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE8_RMSG_INT \
2257 vxge_mBIT(8)
2258#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE9_RMSG_INT \
2259 vxge_mBIT(9)
2260#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE10_RMSG_INT \
2261 vxge_mBIT(10)
2262#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE11_RMSG_INT \
2263 vxge_mBIT(11)
2264#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE12_RMSG_INT \
2265 vxge_mBIT(12)
2266#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE13_RMSG_INT \
2267 vxge_mBIT(13)
2268#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE14_RMSG_INT \
2269 vxge_mBIT(14)
2270#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE15_RMSG_INT \
2271 vxge_mBIT(15)
2272#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE16_RMSG_INT \
2273 vxge_mBIT(16)
2274/*0x03460*/ u64 srpcim_msg_mask;
2275/*0x03468*/ u64 srpcim_msg_alarm;
2276 u8 unused03600[0x03600-0x03470];
2277
2278/*0x03600*/ u64 gcmg1_int_status;
2279#define VXGE_HW_GCMG1_INT_STATUS_GSSCC_ERR_GSSCC_INT vxge_mBIT(0)
2280#define VXGE_HW_GCMG1_INT_STATUS_GSSC0_ERR0_GSSC0_0_INT vxge_mBIT(1)
2281#define VXGE_HW_GCMG1_INT_STATUS_GSSC0_ERR1_GSSC0_1_INT vxge_mBIT(2)
2282#define VXGE_HW_GCMG1_INT_STATUS_GSSC1_ERR0_GSSC1_0_INT vxge_mBIT(3)
2283#define VXGE_HW_GCMG1_INT_STATUS_GSSC1_ERR1_GSSC1_1_INT vxge_mBIT(4)
2284#define VXGE_HW_GCMG1_INT_STATUS_GSSC2_ERR0_GSSC2_0_INT vxge_mBIT(5)
2285#define VXGE_HW_GCMG1_INT_STATUS_GSSC2_ERR1_GSSC2_1_INT vxge_mBIT(6)
2286#define VXGE_HW_GCMG1_INT_STATUS_UQM_ERR_UQM_INT vxge_mBIT(7)
2287#define VXGE_HW_GCMG1_INT_STATUS_GQCC_ERR_GQCC_INT vxge_mBIT(8)
2288/*0x03608*/ u64 gcmg1_int_mask;
2289 u8 unused03a00[0x03a00-0x03610];
2290
2291/*0x03a00*/ u64 pcmg1_int_status;
2292#define VXGE_HW_PCMG1_INT_STATUS_PSSCC_ERR_PSSCC_INT vxge_mBIT(0)
2293#define VXGE_HW_PCMG1_INT_STATUS_PQCC_ERR_PQCC_INT vxge_mBIT(1)
2294#define VXGE_HW_PCMG1_INT_STATUS_PQCC_CQM_ERR_PQCC_CQM_INT vxge_mBIT(2)
2295#define VXGE_HW_PCMG1_INT_STATUS_PQCC_SQM_ERR_PQCC_SQM_INT vxge_mBIT(3)
2296/*0x03a08*/ u64 pcmg1_int_mask;
2297 u8 unused04000[0x04000-0x03a10];
2298
2299/*0x04000*/ u64 one_int_status;
2300#define VXGE_HW_ONE_INT_STATUS_RXPE_ERR_RXPE_INT vxge_mBIT(7)
2301#define VXGE_HW_ONE_INT_STATUS_TXPE_BCC_MEM_SG_ECC_ERR_TXPE_BCC_MEM_SG_ECC_INT \
2302 vxge_mBIT(13)
2303#define VXGE_HW_ONE_INT_STATUS_TXPE_BCC_MEM_DB_ECC_ERR_TXPE_BCC_MEM_DB_ECC_INT \
2304 vxge_mBIT(14)
2305#define VXGE_HW_ONE_INT_STATUS_TXPE_ERR_TXPE_INT vxge_mBIT(15)
2306#define VXGE_HW_ONE_INT_STATUS_DLM_ERR_DLM_INT vxge_mBIT(23)
2307#define VXGE_HW_ONE_INT_STATUS_PE_ERR_PE_INT vxge_mBIT(31)
2308#define VXGE_HW_ONE_INT_STATUS_RPE_ERR_RPE_INT vxge_mBIT(39)
2309#define VXGE_HW_ONE_INT_STATUS_RPE_FSM_ERR_RPE_FSM_INT vxge_mBIT(47)
2310#define VXGE_HW_ONE_INT_STATUS_OES_ERR_OES_INT vxge_mBIT(55)
2311/*0x04008*/ u64 one_int_mask;
2312 u8 unused04818[0x04818-0x04010];
2313
2314/*0x04818*/ u64 noa_wct_ctrl;
2315#define VXGE_HW_NOA_WCT_CTRL_VP_INT_NUM vxge_mBIT(0)
2316/*0x04820*/ u64 rc_cfg2;
2317#define VXGE_HW_RC_CFG2_BUFF1_SIZE(val) vxge_vBIT(val, 0, 16)
2318#define VXGE_HW_RC_CFG2_BUFF2_SIZE(val) vxge_vBIT(val, 16, 16)
2319#define VXGE_HW_RC_CFG2_BUFF3_SIZE(val) vxge_vBIT(val, 32, 16)
2320#define VXGE_HW_RC_CFG2_BUFF4_SIZE(val) vxge_vBIT(val, 48, 16)
2321/*0x04828*/ u64 rc_cfg3;
2322#define VXGE_HW_RC_CFG3_BUFF5_SIZE(val) vxge_vBIT(val, 0, 16)
2323/*0x04830*/ u64 rx_multi_cast_ctrl1;
2324#define VXGE_HW_RX_MULTI_CAST_CTRL1_ENABLE vxge_mBIT(7)
2325#define VXGE_HW_RX_MULTI_CAST_CTRL1_DELAY_COUNT(val) vxge_vBIT(val, 11, 5)
2326/*0x04838*/ u64 rxdm_dbg_rd;
2327#define VXGE_HW_RXDM_DBG_RD_ADDR(val) vxge_vBIT(val, 0, 12)
2328#define VXGE_HW_RXDM_DBG_RD_ENABLE vxge_mBIT(31)
2329/*0x04840*/ u64 rxdm_dbg_rd_data;
2330#define VXGE_HW_RXDM_DBG_RD_DATA_RMC_RXDM_DBG_RD_DATA(val) vxge_vBIT(val, 0, 64)
2331/*0x04848*/ u64 rqa_top_prty_for_vh[17];
2332#define VXGE_HW_RQA_TOP_PRTY_FOR_VH_RQA_TOP_PRTY_FOR_VH(val) \
2333 vxge_vBIT(val, 59, 5)
2334 u8 unused04900[0x04900-0x048d0];
2335
2336/*0x04900*/ u64 tim_status;
2337#define VXGE_HW_TIM_STATUS_TIM_RESET_IN_PROGRESS vxge_mBIT(0)
2338/*0x04908*/ u64 tim_ecc_enable;
2339#define VXGE_HW_TIM_ECC_ENABLE_VBLS_N vxge_mBIT(7)
2340#define VXGE_HW_TIM_ECC_ENABLE_BMAP_N vxge_mBIT(15)
2341#define VXGE_HW_TIM_ECC_ENABLE_BMAP_MSG_N vxge_mBIT(23)
2342/*0x04910*/ u64 tim_bp_ctrl;
2343#define VXGE_HW_TIM_BP_CTRL_RD_XON vxge_mBIT(7)
2344#define VXGE_HW_TIM_BP_CTRL_WR_XON vxge_mBIT(15)
2345#define VXGE_HW_TIM_BP_CTRL_ROCRC_BYP vxge_mBIT(23)
2346/*0x04918*/ u64 tim_resource_assignment_vh[17];
2347#define VXGE_HW_TIM_RESOURCE_ASSIGNMENT_VH_BMAP_ROOT(val) vxge_vBIT(val, 0, 32)
2348/*0x049a0*/ u64 tim_bmap_mapping_vp_err[17];
2349#define VXGE_HW_TIM_BMAP_MAPPING_VP_ERR_TIM_DEST_VPATH(val) vxge_vBIT(val, 3, 5)
2350 u8 unused04b00[0x04b00-0x04a28];
2351
2352/*0x04b00*/ u64 gcmg2_int_status;
2353#define VXGE_HW_GCMG2_INT_STATUS_GXTMC_ERR_GXTMC_INT vxge_mBIT(7)
2354#define VXGE_HW_GCMG2_INT_STATUS_GCP_ERR_GCP_INT vxge_mBIT(15)
2355#define VXGE_HW_GCMG2_INT_STATUS_CMC_ERR_CMC_INT vxge_mBIT(23)
2356/*0x04b08*/ u64 gcmg2_int_mask;
2357/*0x04b10*/ u64 gxtmc_err_reg;
2358#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_MEM_DB_ERR(val) vxge_vBIT(val, 0, 4)
2359#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_MEM_SG_ERR(val) vxge_vBIT(val, 4, 4)
2360#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMC_RD_DATA_DB_ERR vxge_mBIT(8)
2361#define VXGE_HW_GXTMC_ERR_REG_XTMC_REQ_FIFO_ERR vxge_mBIT(9)
2362#define VXGE_HW_GXTMC_ERR_REG_XTMC_REQ_DATA_FIFO_ERR vxge_mBIT(10)
2363#define VXGE_HW_GXTMC_ERR_REG_XTMC_WR_RSP_FIFO_ERR vxge_mBIT(11)
2364#define VXGE_HW_GXTMC_ERR_REG_XTMC_RD_RSP_FIFO_ERR vxge_mBIT(12)
2365#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_WRP_FIFO_ERR vxge_mBIT(13)
2366#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_WRP_ERR vxge_mBIT(14)
2367#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_RRP_FIFO_ERR vxge_mBIT(15)
2368#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_RRP_ERR vxge_mBIT(16)
2369#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_DATA_SM_ERR vxge_mBIT(17)
2370#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_CMC0_IF_ERR vxge_mBIT(18)
2371#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_ARB_SM_ERR vxge_mBIT(19)
2372#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_CFC_SM_ERR vxge_mBIT(20)
2373#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_DFETCH_CREDIT_OVERFLOW \
2374 vxge_mBIT(21)
2375#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_DFETCH_CREDIT_UNDERFLOW \
2376 vxge_mBIT(22)
2377#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_DFETCH_SM_ERR vxge_mBIT(23)
2378#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_RCTRL_CREDIT_OVERFLOW \
2379 vxge_mBIT(24)
2380#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_RCTRL_CREDIT_UNDERFLOW \
2381 vxge_mBIT(25)
2382#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_RCTRL_SM_ERR vxge_mBIT(26)
2383#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_WCOMPL_SM_ERR vxge_mBIT(27)
2384#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_WCOMPL_TAG_ERR vxge_mBIT(28)
2385#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_WREQ_SM_ERR vxge_mBIT(29)
2386#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_WREQ_FIFO_ERR vxge_mBIT(30)
2387#define VXGE_HW_GXTMC_ERR_REG_XTMC_CP2BDT_RFIFO_POP_ERR vxge_mBIT(31)
2388#define VXGE_HW_GXTMC_ERR_REG_XTMC_XTMC_BDT_CMI_OP_ERR vxge_mBIT(32)
2389#define VXGE_HW_GXTMC_ERR_REG_XTMC_XTMC_BDT_DFETCH_OP_ERR vxge_mBIT(33)
2390#define VXGE_HW_GXTMC_ERR_REG_XTMC_XTMC_BDT_DFIFO_ERR vxge_mBIT(34)
2391#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_ARB_SM_ERR vxge_mBIT(35)
2392/*0x04b18*/ u64 gxtmc_err_mask;
2393/*0x04b20*/ u64 gxtmc_err_alarm;
2394/*0x04b28*/ u64 cmc_err_reg;
2395#define VXGE_HW_CMC_ERR_REG_CMC_CMC_SM_ERR vxge_mBIT(0)
2396/*0x04b30*/ u64 cmc_err_mask;
2397/*0x04b38*/ u64 cmc_err_alarm;
2398/*0x04b40*/ u64 gcp_err_reg;
2399#define VXGE_HW_GCP_ERR_REG_CP_H2L2CP_FIFO_ERR vxge_mBIT(0)
2400#define VXGE_HW_GCP_ERR_REG_CP_STC2CP_FIFO_ERR vxge_mBIT(1)
2401#define VXGE_HW_GCP_ERR_REG_CP_STE2CP_FIFO_ERR vxge_mBIT(2)
2402#define VXGE_HW_GCP_ERR_REG_CP_TTE2CP_FIFO_ERR vxge_mBIT(3)
2403/*0x04b48*/ u64 gcp_err_mask;
2404/*0x04b50*/ u64 gcp_err_alarm;
2405 u8 unused04f00[0x04f00-0x04b58];
2406
2407/*0x04f00*/ u64 pcmg2_int_status;
2408#define VXGE_HW_PCMG2_INT_STATUS_PXTMC_ERR_PXTMC_INT vxge_mBIT(7)
2409#define VXGE_HW_PCMG2_INT_STATUS_CP_EXC_CP_XT_EXC_INT vxge_mBIT(15)
2410#define VXGE_HW_PCMG2_INT_STATUS_CP_ERR_CP_ERR_INT vxge_mBIT(23)
2411/*0x04f08*/ u64 pcmg2_int_mask;
2412/*0x04f10*/ u64 pxtmc_err_reg;
2413#define VXGE_HW_PXTMC_ERR_REG_XTMC_XT_PIF_SRAM_DB_ERR(val) vxge_vBIT(val, 0, 2)
2414#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_REQ_FIFO_ERR vxge_mBIT(2)
2415#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_PRSP_FIFO_ERR vxge_mBIT(3)
2416#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_WRSP_FIFO_ERR vxge_mBIT(4)
2417#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_REQ_FIFO_ERR vxge_mBIT(5)
2418#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_PRSP_FIFO_ERR vxge_mBIT(6)
2419#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_WRSP_FIFO_ERR vxge_mBIT(7)
2420#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_REQ_FIFO_ERR vxge_mBIT(8)
2421#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_PRSP_FIFO_ERR vxge_mBIT(9)
2422#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_WRSP_FIFO_ERR vxge_mBIT(10)
2423#define VXGE_HW_PXTMC_ERR_REG_XTMC_REQ_FIFO_ERR vxge_mBIT(11)
2424#define VXGE_HW_PXTMC_ERR_REG_XTMC_REQ_DATA_FIFO_ERR vxge_mBIT(12)
2425#define VXGE_HW_PXTMC_ERR_REG_XTMC_WR_RSP_FIFO_ERR vxge_mBIT(13)
2426#define VXGE_HW_PXTMC_ERR_REG_XTMC_RD_RSP_FIFO_ERR vxge_mBIT(14)
2427#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_REQ_SHADOW_ERR vxge_mBIT(15)
2428#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_RSP_SHADOW_ERR vxge_mBIT(16)
2429#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_REQ_SHADOW_ERR vxge_mBIT(17)
2430#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_RSP_SHADOW_ERR vxge_mBIT(18)
2431#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_REQ_SHADOW_ERR vxge_mBIT(19)
2432#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_RSP_SHADOW_ERR vxge_mBIT(20)
2433#define VXGE_HW_PXTMC_ERR_REG_XTMC_XIL_SHADOW_ERR vxge_mBIT(21)
2434#define VXGE_HW_PXTMC_ERR_REG_XTMC_ARB_SHADOW_ERR vxge_mBIT(22)
2435#define VXGE_HW_PXTMC_ERR_REG_XTMC_RAM_SHADOW_ERR vxge_mBIT(23)
2436#define VXGE_HW_PXTMC_ERR_REG_XTMC_CMW_SHADOW_ERR vxge_mBIT(24)
2437#define VXGE_HW_PXTMC_ERR_REG_XTMC_CMR_SHADOW_ERR vxge_mBIT(25)
2438#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_REQ_FSM_ERR vxge_mBIT(26)
2439#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_RSP_FSM_ERR vxge_mBIT(27)
2440#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_REQ_FSM_ERR vxge_mBIT(28)
2441#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_RSP_FSM_ERR vxge_mBIT(29)
2442#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_REQ_FSM_ERR vxge_mBIT(30)
2443#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_RSP_FSM_ERR vxge_mBIT(31)
2444#define VXGE_HW_PXTMC_ERR_REG_XTMC_XIL_FSM_ERR vxge_mBIT(32)
2445#define VXGE_HW_PXTMC_ERR_REG_XTMC_ARB_FSM_ERR vxge_mBIT(33)
2446#define VXGE_HW_PXTMC_ERR_REG_XTMC_CMW_FSM_ERR vxge_mBIT(34)
2447#define VXGE_HW_PXTMC_ERR_REG_XTMC_CMR_FSM_ERR vxge_mBIT(35)
2448#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_RD_PROT_ERR vxge_mBIT(36)
2449#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_RD_PROT_ERR vxge_mBIT(37)
2450#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_RD_PROT_ERR vxge_mBIT(38)
2451#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_WR_PROT_ERR vxge_mBIT(39)
2452#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_WR_PROT_ERR vxge_mBIT(40)
2453#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_WR_PROT_ERR vxge_mBIT(41)
2454#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_INV_ADDR_ERR vxge_mBIT(42)
2455#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_INV_ADDR_ERR vxge_mBIT(43)
2456#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_INV_ADDR_ERR vxge_mBIT(44)
2457#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_RD_PROT_INFO_ERR vxge_mBIT(45)
2458#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_RD_PROT_INFO_ERR vxge_mBIT(46)
2459#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_RD_PROT_INFO_ERR vxge_mBIT(47)
2460#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_WR_PROT_INFO_ERR vxge_mBIT(48)
2461#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_WR_PROT_INFO_ERR vxge_mBIT(49)
2462#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_WR_PROT_INFO_ERR vxge_mBIT(50)
2463#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_INV_ADDR_INFO_ERR vxge_mBIT(51)
2464#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_INV_ADDR_INFO_ERR vxge_mBIT(52)
2465#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_INV_ADDR_INFO_ERR vxge_mBIT(53)
2466#define VXGE_HW_PXTMC_ERR_REG_XTMC_XT_PIF_SRAM_SG_ERR(val) vxge_vBIT(val, 54, 2)
2467#define VXGE_HW_PXTMC_ERR_REG_XTMC_CP2BDT_DFIFO_PUSH_ERR vxge_mBIT(56)
2468#define VXGE_HW_PXTMC_ERR_REG_XTMC_CP2BDT_RFIFO_PUSH_ERR vxge_mBIT(57)
2469/*0x04f18*/ u64 pxtmc_err_mask;
2470/*0x04f20*/ u64 pxtmc_err_alarm;
2471/*0x04f28*/ u64 cp_err_reg;
2472#define VXGE_HW_CP_ERR_REG_CP_CP_DCACHE_SG_ERR(val) vxge_vBIT(val, 0, 8)
2473#define VXGE_HW_CP_ERR_REG_CP_CP_ICACHE_SG_ERR(val) vxge_vBIT(val, 8, 2)
2474#define VXGE_HW_CP_ERR_REG_CP_CP_DTAG_SG_ERR vxge_mBIT(10)
2475#define VXGE_HW_CP_ERR_REG_CP_CP_ITAG_SG_ERR vxge_mBIT(11)
2476#define VXGE_HW_CP_ERR_REG_CP_CP_TRACE_SG_ERR vxge_mBIT(12)
2477#define VXGE_HW_CP_ERR_REG_CP_DMA2CP_SG_ERR vxge_mBIT(13)
2478#define VXGE_HW_CP_ERR_REG_CP_MP2CP_SG_ERR vxge_mBIT(14)
2479#define VXGE_HW_CP_ERR_REG_CP_QCC2CP_SG_ERR vxge_mBIT(15)
2480#define VXGE_HW_CP_ERR_REG_CP_STC2CP_SG_ERR(val) vxge_vBIT(val, 16, 2)
2481#define VXGE_HW_CP_ERR_REG_CP_CP_DCACHE_DB_ERR(val) vxge_vBIT(val, 24, 8)
2482#define VXGE_HW_CP_ERR_REG_CP_CP_ICACHE_DB_ERR(val) vxge_vBIT(val, 32, 2)
2483#define VXGE_HW_CP_ERR_REG_CP_CP_DTAG_DB_ERR vxge_mBIT(34)
2484#define VXGE_HW_CP_ERR_REG_CP_CP_ITAG_DB_ERR vxge_mBIT(35)
2485#define VXGE_HW_CP_ERR_REG_CP_CP_TRACE_DB_ERR vxge_mBIT(36)
2486#define VXGE_HW_CP_ERR_REG_CP_DMA2CP_DB_ERR vxge_mBIT(37)
2487#define VXGE_HW_CP_ERR_REG_CP_MP2CP_DB_ERR vxge_mBIT(38)
2488#define VXGE_HW_CP_ERR_REG_CP_QCC2CP_DB_ERR vxge_mBIT(39)
2489#define VXGE_HW_CP_ERR_REG_CP_STC2CP_DB_ERR(val) vxge_vBIT(val, 40, 2)
2490#define VXGE_HW_CP_ERR_REG_CP_H2L2CP_FIFO_ERR vxge_mBIT(48)
2491#define VXGE_HW_CP_ERR_REG_CP_STC2CP_FIFO_ERR vxge_mBIT(49)
2492#define VXGE_HW_CP_ERR_REG_CP_STE2CP_FIFO_ERR vxge_mBIT(50)
2493#define VXGE_HW_CP_ERR_REG_CP_TTE2CP_FIFO_ERR vxge_mBIT(51)
2494#define VXGE_HW_CP_ERR_REG_CP_SWIF2CP_FIFO_ERR vxge_mBIT(52)
2495#define VXGE_HW_CP_ERR_REG_CP_CP2DMA_FIFO_ERR vxge_mBIT(53)
2496#define VXGE_HW_CP_ERR_REG_CP_DAM2CP_FIFO_ERR vxge_mBIT(54)
2497#define VXGE_HW_CP_ERR_REG_CP_MP2CP_FIFO_ERR vxge_mBIT(55)
2498#define VXGE_HW_CP_ERR_REG_CP_QCC2CP_FIFO_ERR vxge_mBIT(56)
2499#define VXGE_HW_CP_ERR_REG_CP_DMA2CP_FIFO_ERR vxge_mBIT(57)
2500#define VXGE_HW_CP_ERR_REG_CP_CP_WAKE_FSM_INTEGRITY_ERR vxge_mBIT(60)
2501#define VXGE_HW_CP_ERR_REG_CP_CP_PMON_FSM_INTEGRITY_ERR vxge_mBIT(61)
2502#define VXGE_HW_CP_ERR_REG_CP_DMA_RD_SHADOW_ERR vxge_mBIT(62)
2503#define VXGE_HW_CP_ERR_REG_CP_PIFT_CREDIT_ERR vxge_mBIT(63)
2504/*0x04f30*/ u64 cp_err_mask;
2505/*0x04f38*/ u64 cp_err_alarm;
2506 u8 unused04fe8[0x04f50-0x04f40];
2507
2508/*0x04f50*/ u64 cp_exc_reg;
2509#define VXGE_HW_CP_EXC_REG_CP_CP_CAUSE_INFO_INT vxge_mBIT(47)
2510#define VXGE_HW_CP_EXC_REG_CP_CP_CAUSE_CRIT_INT vxge_mBIT(55)
2511#define VXGE_HW_CP_EXC_REG_CP_CP_SERR vxge_mBIT(63)
2512/*0x04f58*/ u64 cp_exc_mask;
2513/*0x04f60*/ u64 cp_exc_alarm;
2514/*0x04f68*/ u64 cp_exc_cause;
2515#define VXGE_HW_CP_EXC_CAUSE_CP_CP_CAUSE(val) vxge_vBIT(val, 32, 32)
2516 u8 unused05200[0x05200-0x04f70];
2517
2518/*0x05200*/ u64 msg_int_status;
2519#define VXGE_HW_MSG_INT_STATUS_TIM_ERR_TIM_INT vxge_mBIT(7)
2520#define VXGE_HW_MSG_INT_STATUS_MSG_EXC_MSG_XT_EXC_INT vxge_mBIT(60)
2521#define VXGE_HW_MSG_INT_STATUS_MSG_ERR3_MSG_ERR3_INT vxge_mBIT(61)
2522#define VXGE_HW_MSG_INT_STATUS_MSG_ERR2_MSG_ERR2_INT vxge_mBIT(62)
2523#define VXGE_HW_MSG_INT_STATUS_MSG_ERR_MSG_ERR_INT vxge_mBIT(63)
2524/*0x05208*/ u64 msg_int_mask;
2525/*0x05210*/ u64 tim_err_reg;
2526#define VXGE_HW_TIM_ERR_REG_TIM_VBLS_SG_ERR vxge_mBIT(4)
2527#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_PA_SG_ERR vxge_mBIT(5)
2528#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_PB_SG_ERR vxge_mBIT(6)
2529#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MSG_SG_ERR vxge_mBIT(7)
2530#define VXGE_HW_TIM_ERR_REG_TIM_VBLS_DB_ERR vxge_mBIT(12)
2531#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_PA_DB_ERR vxge_mBIT(13)
2532#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_PB_DB_ERR vxge_mBIT(14)
2533#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MSG_DB_ERR vxge_mBIT(15)
2534#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MEM_CNTRL_SM_ERR vxge_mBIT(18)
2535#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MSG_MEM_CNTRL_SM_ERR vxge_mBIT(19)
2536#define VXGE_HW_TIM_ERR_REG_TIM_MPIF_PCIWR_ERR vxge_mBIT(20)
2537#define VXGE_HW_TIM_ERR_REG_TIM_ROCRC_BMAP_UPDT_FIFO_ERR vxge_mBIT(22)
2538#define VXGE_HW_TIM_ERR_REG_TIM_CREATE_BMAPMSG_FIFO_ERR vxge_mBIT(23)
2539#define VXGE_HW_TIM_ERR_REG_TIM_ROCRCIF_MISMATCH vxge_mBIT(46)
2540#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MAPPING_VP_ERR(n) vxge_mBIT(n)
2541/*0x05218*/ u64 tim_err_mask;
2542/*0x05220*/ u64 tim_err_alarm;
2543/*0x05228*/ u64 msg_err_reg;
2544#define VXGE_HW_MSG_ERR_REG_UP_UXP_WAKE_FSM_INTEGRITY_ERR vxge_mBIT(0)
2545#define VXGE_HW_MSG_ERR_REG_MP_MXP_WAKE_FSM_INTEGRITY_ERR vxge_mBIT(1)
2546#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_DMA_READ_CMD_FSM_INTEGRITY_ERR \
2547 vxge_mBIT(2)
2548#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_DMA_RESP_FSM_INTEGRITY_ERR \
2549 vxge_mBIT(3)
2550#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_OWN_FSM_INTEGRITY_ERR vxge_mBIT(4)
2551#define VXGE_HW_MSG_ERR_REG_MSG_QUE_PDA_ACC_FSM_INTEGRITY_ERR vxge_mBIT(5)
2552#define VXGE_HW_MSG_ERR_REG_MP_MXP_PMON_FSM_INTEGRITY_ERR vxge_mBIT(6)
2553#define VXGE_HW_MSG_ERR_REG_UP_UXP_PMON_FSM_INTEGRITY_ERR vxge_mBIT(7)
2554#define VXGE_HW_MSG_ERR_REG_UP_UXP_DTAG_SG_ERR vxge_mBIT(8)
2555#define VXGE_HW_MSG_ERR_REG_UP_UXP_ITAG_SG_ERR vxge_mBIT(10)
2556#define VXGE_HW_MSG_ERR_REG_MP_MXP_DTAG_SG_ERR vxge_mBIT(12)
2557#define VXGE_HW_MSG_ERR_REG_MP_MXP_ITAG_SG_ERR vxge_mBIT(14)
2558#define VXGE_HW_MSG_ERR_REG_UP_UXP_TRACE_SG_ERR vxge_mBIT(16)
2559#define VXGE_HW_MSG_ERR_REG_MP_MXP_TRACE_SG_ERR vxge_mBIT(17)
2560#define VXGE_HW_MSG_ERR_REG_MSG_QUE_CMG2MSG_SG_ERR vxge_mBIT(18)
2561#define VXGE_HW_MSG_ERR_REG_MSG_QUE_TXPE2MSG_SG_ERR vxge_mBIT(19)
2562#define VXGE_HW_MSG_ERR_REG_MSG_QUE_RXPE2MSG_SG_ERR vxge_mBIT(20)
2563#define VXGE_HW_MSG_ERR_REG_MSG_QUE_RPE2MSG_SG_ERR vxge_mBIT(21)
2564#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UMQ_SG_ERR vxge_mBIT(26)
2565#define VXGE_HW_MSG_ERR_REG_MSG_QUE_BWR_PF_SG_ERR vxge_mBIT(27)
2566#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_ECC_SG_ERR vxge_mBIT(29)
2567#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMA_RESP_ECC_SG_ERR vxge_mBIT(31)
2568#define VXGE_HW_MSG_ERR_REG_MSG_XFMDQRY_FSM_INTEGRITY_ERR vxge_mBIT(33)
2569#define VXGE_HW_MSG_ERR_REG_MSG_FRMQRY_FSM_INTEGRITY_ERR vxge_mBIT(34)
2570#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UMQ_WRITE_FSM_INTEGRITY_ERR vxge_mBIT(35)
2571#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UMQ_BWR_PF_FSM_INTEGRITY_ERR \
2572 vxge_mBIT(36)
2573#define VXGE_HW_MSG_ERR_REG_MSG_QUE_REG_RESP_FIFO_ERR vxge_mBIT(38)
2574#define VXGE_HW_MSG_ERR_REG_UP_UXP_DTAG_DB_ERR vxge_mBIT(39)
2575#define VXGE_HW_MSG_ERR_REG_UP_UXP_ITAG_DB_ERR vxge_mBIT(41)
2576#define VXGE_HW_MSG_ERR_REG_MP_MXP_DTAG_DB_ERR vxge_mBIT(43)
2577#define VXGE_HW_MSG_ERR_REG_MP_MXP_ITAG_DB_ERR vxge_mBIT(45)
2578#define VXGE_HW_MSG_ERR_REG_UP_UXP_TRACE_DB_ERR vxge_mBIT(47)
2579#define VXGE_HW_MSG_ERR_REG_MP_MXP_TRACE_DB_ERR vxge_mBIT(48)
2580#define VXGE_HW_MSG_ERR_REG_MSG_QUE_CMG2MSG_DB_ERR vxge_mBIT(49)
2581#define VXGE_HW_MSG_ERR_REG_MSG_QUE_TXPE2MSG_DB_ERR vxge_mBIT(50)
2582#define VXGE_HW_MSG_ERR_REG_MSG_QUE_RXPE2MSG_DB_ERR vxge_mBIT(51)
2583#define VXGE_HW_MSG_ERR_REG_MSG_QUE_RPE2MSG_DB_ERR vxge_mBIT(52)
2584#define VXGE_HW_MSG_ERR_REG_MSG_QUE_REG_READ_FIFO_ERR vxge_mBIT(53)
2585#define VXGE_HW_MSG_ERR_REG_MSG_QUE_MXP2UXP_FIFO_ERR vxge_mBIT(54)
2586#define VXGE_HW_MSG_ERR_REG_MSG_QUE_KDFC_SIF_FIFO_ERR vxge_mBIT(55)
2587#define VXGE_HW_MSG_ERR_REG_MSG_QUE_CXP2SWIF_FIFO_ERR vxge_mBIT(56)
2588#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UMQ_DB_ERR vxge_mBIT(57)
2589#define VXGE_HW_MSG_ERR_REG_MSG_QUE_BWR_PF_DB_ERR vxge_mBIT(58)
2590#define VXGE_HW_MSG_ERR_REG_MSG_QUE_BWR_SIF_FIFO_ERR vxge_mBIT(59)
2591#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_ECC_DB_ERR vxge_mBIT(60)
2592#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMA_READ_FIFO_ERR vxge_mBIT(61)
2593#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMA_RESP_ECC_DB_ERR vxge_mBIT(62)
2594#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UXP2MXP_FIFO_ERR vxge_mBIT(63)
2595/*0x05230*/ u64 msg_err_mask;
2596/*0x05238*/ u64 msg_err_alarm;
2597 u8 unused05340[0x05340-0x05240];
2598
2599/*0x05340*/ u64 msg_exc_reg;
2600#define VXGE_HW_MSG_EXC_REG_MP_MXP_CAUSE_INFO_INT vxge_mBIT(50)
2601#define VXGE_HW_MSG_EXC_REG_MP_MXP_CAUSE_CRIT_INT vxge_mBIT(51)
2602#define VXGE_HW_MSG_EXC_REG_UP_UXP_CAUSE_INFO_INT vxge_mBIT(54)
2603#define VXGE_HW_MSG_EXC_REG_UP_UXP_CAUSE_CRIT_INT vxge_mBIT(55)
2604#define VXGE_HW_MSG_EXC_REG_MP_MXP_SERR vxge_mBIT(62)
2605#define VXGE_HW_MSG_EXC_REG_UP_UXP_SERR vxge_mBIT(63)
2606/*0x05348*/ u64 msg_exc_mask;
2607/*0x05350*/ u64 msg_exc_alarm;
2608/*0x05358*/ u64 msg_exc_cause;
2609#define VXGE_HW_MSG_EXC_CAUSE_MP_MXP(val) vxge_vBIT(val, 0, 32)
2610#define VXGE_HW_MSG_EXC_CAUSE_UP_UXP(val) vxge_vBIT(val, 32, 32)
2611 u8 unused05368[0x05380-0x05360];
2612
2613/*0x05380*/ u64 msg_err2_reg;
2614#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_CMG2MSG_DISPATCH_FSM_INTEGRITY_ERR \
2615 vxge_mBIT(0)
2616#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_DMQ_DISPATCH_FSM_INTEGRITY_ERR \
2617 vxge_mBIT(1)
2618#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_SWIF_DISPATCH_FSM_INTEGRITY_ERR \
2619 vxge_mBIT(2)
2620#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_PIC_WRITE_FSM_INTEGRITY_ERR \
2621 vxge_mBIT(3)
2622#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_SWIFREG_FSM_INTEGRITY_ERR vxge_mBIT(4)
2623#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_TIM_WRITE_FSM_INTEGRITY_ERR \
2624 vxge_mBIT(5)
2625#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_UMQ_TA_FSM_INTEGRITY_ERR vxge_mBIT(6)
2626#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_TXPE_TA_FSM_INTEGRITY_ERR vxge_mBIT(7)
2627#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_RXPE_TA_FSM_INTEGRITY_ERR vxge_mBIT(8)
2628#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_SWIF_TA_FSM_INTEGRITY_ERR vxge_mBIT(9)
2629#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_DMA_TA_FSM_INTEGRITY_ERR vxge_mBIT(10)
2630#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_CP_TA_FSM_INTEGRITY_ERR vxge_mBIT(11)
2631#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA16_FSM_INTEGRITY_ERR \
2632 vxge_mBIT(12)
2633#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA15_FSM_INTEGRITY_ERR \
2634 vxge_mBIT(13)
2635#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA14_FSM_INTEGRITY_ERR \
2636 vxge_mBIT(14)
2637#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA13_FSM_INTEGRITY_ERR \
2638 vxge_mBIT(15)
2639#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA12_FSM_INTEGRITY_ERR \
2640 vxge_mBIT(16)
2641#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA11_FSM_INTEGRITY_ERR \
2642 vxge_mBIT(17)
2643#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA10_FSM_INTEGRITY_ERR \
2644 vxge_mBIT(18)
2645#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA9_FSM_INTEGRITY_ERR \
2646 vxge_mBIT(19)
2647#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA8_FSM_INTEGRITY_ERR \
2648 vxge_mBIT(20)
2649#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA7_FSM_INTEGRITY_ERR \
2650 vxge_mBIT(21)
2651#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA6_FSM_INTEGRITY_ERR \
2652 vxge_mBIT(22)
2653#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA5_FSM_INTEGRITY_ERR \
2654 vxge_mBIT(23)
2655#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA4_FSM_INTEGRITY_ERR \
2656 vxge_mBIT(24)
2657#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA3_FSM_INTEGRITY_ERR \
2658 vxge_mBIT(25)
2659#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA2_FSM_INTEGRITY_ERR \
2660 vxge_mBIT(26)
2661#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA1_FSM_INTEGRITY_ERR \
2662 vxge_mBIT(27)
2663#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA0_FSM_INTEGRITY_ERR \
2664 vxge_mBIT(28)
2665#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_FBMC_OWN_FSM_INTEGRITY_ERR vxge_mBIT(29)
2666#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_TXPE2MSG_DISPATCH_FSM_INTEGRITY_ERR \
2667 vxge_mBIT(30)
2668#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_RXPE2MSG_DISPATCH_FSM_INTEGRITY_ERR \
2669 vxge_mBIT(31)
2670#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_RPE2MSG_DISPATCH_FSM_INTEGRITY_ERR \
2671 vxge_mBIT(32)
2672#define VXGE_HW_MSG_ERR2_REG_MP_MP_PIFT_IF_CREDIT_CNT_ERR vxge_mBIT(33)
2673#define VXGE_HW_MSG_ERR2_REG_UP_UP_PIFT_IF_CREDIT_CNT_ERR vxge_mBIT(34)
2674#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_UMQ2PIC_CMD_FIFO_ERR vxge_mBIT(62)
2675#define VXGE_HW_MSG_ERR2_REG_TIM_TIM2MSG_CMD_FIFO_ERR vxge_mBIT(63)
2676/*0x05388*/ u64 msg_err2_mask;
2677/*0x05390*/ u64 msg_err2_alarm;
2678/*0x05398*/ u64 msg_err3_reg;
2679#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR0 vxge_mBIT(0)
2680#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR1 vxge_mBIT(1)
2681#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR2 vxge_mBIT(2)
2682#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR3 vxge_mBIT(3)
2683#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR4 vxge_mBIT(4)
2684#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR5 vxge_mBIT(5)
2685#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR6 vxge_mBIT(6)
2686#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR7 vxge_mBIT(7)
2687#define VXGE_HW_MSG_ERR3_REG_UP_UXP_ICACHE_SG_ERR0 vxge_mBIT(8)
2688#define VXGE_HW_MSG_ERR3_REG_UP_UXP_ICACHE_SG_ERR1 vxge_mBIT(9)
2689#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR0 vxge_mBIT(16)
2690#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR1 vxge_mBIT(17)
2691#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR2 vxge_mBIT(18)
2692#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR3 vxge_mBIT(19)
2693#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR4 vxge_mBIT(20)
2694#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR5 vxge_mBIT(21)
2695#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR6 vxge_mBIT(22)
2696#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR7 vxge_mBIT(23)
2697#define VXGE_HW_MSG_ERR3_REG_MP_MXP_ICACHE_SG_ERR0 vxge_mBIT(24)
2698#define VXGE_HW_MSG_ERR3_REG_MP_MXP_ICACHE_SG_ERR1 vxge_mBIT(25)
2699#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR0 vxge_mBIT(32)
2700#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR1 vxge_mBIT(33)
2701#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR2 vxge_mBIT(34)
2702#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR3 vxge_mBIT(35)
2703#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR4 vxge_mBIT(36)
2704#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR5 vxge_mBIT(37)
2705#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR6 vxge_mBIT(38)
2706#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR7 vxge_mBIT(39)
2707#define VXGE_HW_MSG_ERR3_REG_UP_UXP_ICACHE_DB_ERR0 vxge_mBIT(40)
2708#define VXGE_HW_MSG_ERR3_REG_UP_UXP_ICACHE_DB_ERR1 vxge_mBIT(41)
2709#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR0 vxge_mBIT(48)
2710#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR1 vxge_mBIT(49)
2711#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR2 vxge_mBIT(50)
2712#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR3 vxge_mBIT(51)
2713#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR4 vxge_mBIT(52)
2714#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR5 vxge_mBIT(53)
2715#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR6 vxge_mBIT(54)
2716#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR7 vxge_mBIT(55)
2717#define VXGE_HW_MSG_ERR3_REG_MP_MXP_ICACHE_DB_ERR0 vxge_mBIT(56)
2718#define VXGE_HW_MSG_ERR3_REG_MP_MXP_ICACHE_DB_ERR1 vxge_mBIT(57)
2719/*0x053a0*/ u64 msg_err3_mask;
2720/*0x053a8*/ u64 msg_err3_alarm;
2721 u8 unused05600[0x05600-0x053b0];
2722
2723/*0x05600*/ u64 fau_gen_err_reg;
2724#define VXGE_HW_FAU_GEN_ERR_REG_FMPF_PORT0_PERMANENT_STOP vxge_mBIT(3)
2725#define VXGE_HW_FAU_GEN_ERR_REG_FMPF_PORT1_PERMANENT_STOP vxge_mBIT(7)
2726#define VXGE_HW_FAU_GEN_ERR_REG_FMPF_PORT2_PERMANENT_STOP vxge_mBIT(11)
2727#define VXGE_HW_FAU_GEN_ERR_REG_FALR_AUTO_LRO_NOTIFICATION vxge_mBIT(15)
2728/*0x05608*/ u64 fau_gen_err_mask;
2729/*0x05610*/ u64 fau_gen_err_alarm;
2730/*0x05618*/ u64 fau_ecc_err_reg;
2731#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT0_FAU_MAC2F_N_SG_ERR vxge_mBIT(0)
2732#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT0_FAU_MAC2F_N_DB_ERR vxge_mBIT(1)
2733#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT0_FAU_MAC2F_W_SG_ERR(val) \
2734 vxge_vBIT(val, 2, 2)
2735#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT0_FAU_MAC2F_W_DB_ERR(val) \
2736 vxge_vBIT(val, 4, 2)
2737#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT1_FAU_MAC2F_N_SG_ERR vxge_mBIT(6)
2738#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT1_FAU_MAC2F_N_DB_ERR vxge_mBIT(7)
2739#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT1_FAU_MAC2F_W_SG_ERR(val) \
2740 vxge_vBIT(val, 8, 2)
2741#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT1_FAU_MAC2F_W_DB_ERR(val) \
2742 vxge_vBIT(val, 10, 2)
2743#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT2_FAU_MAC2F_N_SG_ERR vxge_mBIT(12)
2744#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT2_FAU_MAC2F_N_DB_ERR vxge_mBIT(13)
2745#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT2_FAU_MAC2F_W_SG_ERR(val) \
2746 vxge_vBIT(val, 14, 2)
2747#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT2_FAU_MAC2F_W_DB_ERR(val) \
2748 vxge_vBIT(val, 16, 2)
2749#define VXGE_HW_FAU_ECC_ERR_REG_FAU_FAU_XFMD_INS_SG_ERR(val) \
2750 vxge_vBIT(val, 18, 2)
2751#define VXGE_HW_FAU_ECC_ERR_REG_FAU_FAU_XFMD_INS_DB_ERR(val) \
2752 vxge_vBIT(val, 20, 2)
2753#define VXGE_HW_FAU_ECC_ERR_REG_FAUJ_FAU_FSM_ERR vxge_mBIT(31)
2754/*0x05620*/ u64 fau_ecc_err_mask;
2755/*0x05628*/ u64 fau_ecc_err_alarm;
2756 u8 unused05658[0x05658-0x05630];
2757/*0x05658*/ u64 fau_pa_cfg;
2758#define VXGE_HW_FAU_PA_CFG_REPL_L4_COMP_CSUM vxge_mBIT(3)
2759#define VXGE_HW_FAU_PA_CFG_REPL_L3_INCL_CF vxge_mBIT(7)
2760#define VXGE_HW_FAU_PA_CFG_REPL_L3_COMP_CSUM vxge_mBIT(11)
2761 u8 unused05668[0x05668-0x05660];
2762
2763/*0x05668*/ u64 dbg_stats_fau_rx_path;
2764#define VXGE_HW_DBG_STATS_FAU_RX_PATH_RX_PERMITTED_FRMS(val) \
2765 vxge_vBIT(val, 32, 32)
2766 u8 unused056c0[0x056c0-0x05670];
2767
2768/*0x056c0*/ u64 fau_lag_cfg;
2769#define VXGE_HW_FAU_LAG_CFG_COLL_ALG(val) vxge_vBIT(val, 2, 2)
2770#define VXGE_HW_FAU_LAG_CFG_INCR_RX_AGGR_STATS vxge_mBIT(7)
2771 u8 unused05800[0x05800-0x056c8];
2772
2773/*0x05800*/ u64 tpa_int_status;
2774#define VXGE_HW_TPA_INT_STATUS_ORP_ERR_ORP_INT vxge_mBIT(15)
2775#define VXGE_HW_TPA_INT_STATUS_PTM_ALARM_PTM_INT vxge_mBIT(23)
2776#define VXGE_HW_TPA_INT_STATUS_TPA_ERROR_TPA_INT vxge_mBIT(31)
2777/*0x05808*/ u64 tpa_int_mask;
2778/*0x05810*/ u64 orp_err_reg;
2779#define VXGE_HW_ORP_ERR_REG_ORP_FIFO_SG_ERR vxge_mBIT(3)
2780#define VXGE_HW_ORP_ERR_REG_ORP_FIFO_DB_ERR vxge_mBIT(7)
2781#define VXGE_HW_ORP_ERR_REG_ORP_XFMD_FIFO_UFLOW_ERR vxge_mBIT(11)
2782#define VXGE_HW_ORP_ERR_REG_ORP_FRM_FIFO_UFLOW_ERR vxge_mBIT(15)
2783#define VXGE_HW_ORP_ERR_REG_ORP_XFMD_RCV_FSM_ERR vxge_mBIT(19)
2784#define VXGE_HW_ORP_ERR_REG_ORP_OUTREAD_FSM_ERR vxge_mBIT(23)
2785#define VXGE_HW_ORP_ERR_REG_ORP_OUTQEM_FSM_ERR vxge_mBIT(27)
2786#define VXGE_HW_ORP_ERR_REG_ORP_XFMD_RCV_SHADOW_ERR vxge_mBIT(31)
2787#define VXGE_HW_ORP_ERR_REG_ORP_OUTREAD_SHADOW_ERR vxge_mBIT(35)
2788#define VXGE_HW_ORP_ERR_REG_ORP_OUTQEM_SHADOW_ERR vxge_mBIT(39)
2789#define VXGE_HW_ORP_ERR_REG_ORP_OUTFRM_SHADOW_ERR vxge_mBIT(43)
2790#define VXGE_HW_ORP_ERR_REG_ORP_OPTPRS_SHADOW_ERR vxge_mBIT(47)
2791/*0x05818*/ u64 orp_err_mask;
2792/*0x05820*/ u64 orp_err_alarm;
2793/*0x05828*/ u64 ptm_alarm_reg;
2794#define VXGE_HW_PTM_ALARM_REG_PTM_RDCTRL_SYNC_ERR vxge_mBIT(3)
2795#define VXGE_HW_PTM_ALARM_REG_PTM_RDCTRL_FIFO_ERR vxge_mBIT(7)
2796#define VXGE_HW_PTM_ALARM_REG_XFMD_RD_FIFO_ERR vxge_mBIT(11)
2797#define VXGE_HW_PTM_ALARM_REG_WDE2MSR_WR_FIFO_ERR vxge_mBIT(15)
2798#define VXGE_HW_PTM_ALARM_REG_PTM_FRMM_ECC_DB_ERR(val) vxge_vBIT(val, 18, 2)
2799#define VXGE_HW_PTM_ALARM_REG_PTM_FRMM_ECC_SG_ERR(val) vxge_vBIT(val, 22, 2)
2800/*0x05830*/ u64 ptm_alarm_mask;
2801/*0x05838*/ u64 ptm_alarm_alarm;
2802/*0x05840*/ u64 tpa_error_reg;
2803#define VXGE_HW_TPA_ERROR_REG_TPA_FSM_ERR_ALARM vxge_mBIT(3)
2804#define VXGE_HW_TPA_ERROR_REG_TPA_TPA_DA_LKUP_PRT0_DB_ERR vxge_mBIT(7)
2805#define VXGE_HW_TPA_ERROR_REG_TPA_TPA_DA_LKUP_PRT0_SG_ERR vxge_mBIT(11)
2806/*0x05848*/ u64 tpa_error_mask;
2807/*0x05850*/ u64 tpa_error_alarm;
2808/*0x05858*/ u64 tpa_global_cfg;
2809#define VXGE_HW_TPA_GLOBAL_CFG_SUPPORT_SNAP_AB_N vxge_mBIT(7)
2810#define VXGE_HW_TPA_GLOBAL_CFG_ECC_ENABLE_N vxge_mBIT(35)
2811 u8 unused05868[0x05870-0x05860];
2812
2813/*0x05870*/ u64 ptm_ecc_cfg;
2814#define VXGE_HW_PTM_ECC_CFG_PTM_FRMM_ECC_EN_N vxge_mBIT(3)
2815/*0x05878*/ u64 ptm_phase_cfg;
2816#define VXGE_HW_PTM_PHASE_CFG_FRMM_WR_PHASE_EN vxge_mBIT(3)
2817#define VXGE_HW_PTM_PHASE_CFG_FRMM_RD_PHASE_EN vxge_mBIT(7)
2818 u8 unused05898[0x05898-0x05880];
2819
2820/*0x05898*/ u64 dbg_stats_tpa_tx_path;
2821#define VXGE_HW_DBG_STATS_TPA_TX_PATH_TX_PERMITTED_FRMS(val) \
2822 vxge_vBIT(val, 32, 32)
2823 u8 unused05900[0x05900-0x058a0];
2824
2825/*0x05900*/ u64 tmac_int_status;
2826#define VXGE_HW_TMAC_INT_STATUS_TXMAC_GEN_ERR_TXMAC_GEN_INT vxge_mBIT(3)
2827#define VXGE_HW_TMAC_INT_STATUS_TXMAC_ECC_ERR_TXMAC_ECC_INT vxge_mBIT(7)
2828/*0x05908*/ u64 tmac_int_mask;
2829/*0x05910*/ u64 txmac_gen_err_reg;
2830#define VXGE_HW_TXMAC_GEN_ERR_REG_TMACJ_PERMANENT_STOP vxge_mBIT(3)
2831#define VXGE_HW_TXMAC_GEN_ERR_REG_TMACJ_NO_VALID_VSPORT vxge_mBIT(7)
2832/*0x05918*/ u64 txmac_gen_err_mask;
2833/*0x05920*/ u64 txmac_gen_err_alarm;
2834/*0x05928*/ u64 txmac_ecc_err_reg;
2835#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2MAC_SG_ERR vxge_mBIT(3)
2836#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2MAC_DB_ERR vxge_mBIT(7)
2837#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2M_SB_SG_ERR vxge_mBIT(11)
2838#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2M_SB_DB_ERR vxge_mBIT(15)
2839#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2M_DA_SG_ERR vxge_mBIT(19)
2840#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2M_DA_DB_ERR vxge_mBIT(23)
2841#define VXGE_HW_TXMAC_ECC_ERR_REG_TMAC_TMAC_PORT0_FSM_ERR vxge_mBIT(27)
2842#define VXGE_HW_TXMAC_ECC_ERR_REG_TMAC_TMAC_PORT1_FSM_ERR vxge_mBIT(31)
2843#define VXGE_HW_TXMAC_ECC_ERR_REG_TMAC_TMAC_PORT2_FSM_ERR vxge_mBIT(35)
2844#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMACJ_FSM_ERR vxge_mBIT(39)
2845/*0x05930*/ u64 txmac_ecc_err_mask;
2846/*0x05938*/ u64 txmac_ecc_err_alarm;
2847 u8 unused05978[0x05978-0x05940];
2848
2849/*0x05978*/ u64 dbg_stat_tx_any_frms;
2850#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_PORT0_TX_ANY_FRMS(val) vxge_vBIT(val, 0, 8)
2851#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_PORT1_TX_ANY_FRMS(val) vxge_vBIT(val, 8, 8)
2852#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_PORT2_TX_ANY_FRMS(val) \
2853 vxge_vBIT(val, 16, 8)
2854 u8 unused059a0[0x059a0-0x05980];
2855
2856/*0x059a0*/ u64 txmac_link_util_port[3];
2857#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_TMAC_UTILIZATION(val) \
2858 vxge_vBIT(val, 1, 7)
2859#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_UTIL_CFG(val) vxge_vBIT(val, 8, 4)
2860#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_TMAC_FRAC_UTIL(val) \
2861 vxge_vBIT(val, 12, 4)
2862#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_PKT_WEIGHT(val) vxge_vBIT(val, 16, 4)
2863#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_TMAC_SCALE_FACTOR vxge_mBIT(23)
2864/*0x059b8*/ u64 txmac_cfg0_port[3];
2865#define VXGE_HW_TXMAC_CFG0_PORT_TMAC_EN vxge_mBIT(3)
2866#define VXGE_HW_TXMAC_CFG0_PORT_APPEND_PAD vxge_mBIT(7)
2867#define VXGE_HW_TXMAC_CFG0_PORT_PAD_BYTE(val) vxge_vBIT(val, 8, 8)
2868/*0x059d0*/ u64 txmac_cfg1_port[3];
2869#define VXGE_HW_TXMAC_CFG1_PORT_AVG_IPG(val) vxge_vBIT(val, 40, 8)
2870/*0x059e8*/ u64 txmac_status_port[3];
2871#define VXGE_HW_TXMAC_STATUS_PORT_TMAC_TX_FRM_SENT vxge_mBIT(3)
2872 u8 unused05a20[0x05a20-0x05a00];
2873
2874/*0x05a20*/ u64 lag_distrib_dest;
2875#define VXGE_HW_LAG_DISTRIB_DEST_MAP_VPATH(n) vxge_mBIT(n)
2876/*0x05a28*/ u64 lag_marker_cfg;
2877#define VXGE_HW_LAG_MARKER_CFG_GEN_RCVR_EN vxge_mBIT(3)
2878#define VXGE_HW_LAG_MARKER_CFG_RESP_EN vxge_mBIT(7)
2879#define VXGE_HW_LAG_MARKER_CFG_RESP_TIMEOUT(val) vxge_vBIT(val, 16, 16)
2880#define VXGE_HW_LAG_MARKER_CFG_SLOW_PROTO_MRKR_MIN_INTERVAL(val) \
2881 vxge_vBIT(val, 32, 16)
2882#define VXGE_HW_LAG_MARKER_CFG_THROTTLE_MRKR_RESP vxge_mBIT(51)
2883/*0x05a30*/ u64 lag_tx_cfg;
2884#define VXGE_HW_LAG_TX_CFG_INCR_TX_AGGR_STATS vxge_mBIT(3)
2885#define VXGE_HW_LAG_TX_CFG_DISTRIB_ALG_SEL(val) vxge_vBIT(val, 6, 2)
2886#define VXGE_HW_LAG_TX_CFG_DISTRIB_REMAP_IF_FAIL vxge_mBIT(11)
2887#define VXGE_HW_LAG_TX_CFG_COLL_MAX_DELAY(val) vxge_vBIT(val, 16, 16)
2888/*0x05a38*/ u64 lag_tx_status;
2889#define VXGE_HW_LAG_TX_STATUS_TLAG_TIMER_VAL_EMPTIED_LINK(val) \
2890 vxge_vBIT(val, 0, 8)
2891#define VXGE_HW_LAG_TX_STATUS_TLAG_TIMER_VAL_SLOW_PROTO_MRKR(val) \
2892 vxge_vBIT(val, 8, 8)
2893#define VXGE_HW_LAG_TX_STATUS_TLAG_TIMER_VAL_SLOW_PROTO_MRKRRESP(val) \
2894 vxge_vBIT(val, 16, 8)
2895 u8 unused05d48[0x05d48-0x05a40];
2896
2897/*0x05d48*/ u64 srpcim_to_mrpcim_vplane_rmsg[17];
2898#define \
2899VXGE_HAL_SRPCIM_TO_MRPCIM_VPLANE_RMSG_SWIF_SRPCIM_TO_MRPCIM_VPLANE_RMSG(val)\
2900 vxge_vBIT(val, 0, 64)
2901 u8 unused06420[0x06420-0x05dd0];
2902
2903/*0x06420*/ u64 mrpcim_to_srpcim_vplane_wmsg[17];
2904#define VXGE_HW_MRPCIM_TO_SRPCIM_VPLANE_WMSG_MRPCIM_TO_SRPCIM_VPLANE_WMSG(val) \
2905 vxge_vBIT(val, 0, 64)
2906/*0x064a8*/ u64 mrpcim_to_srpcim_vplane_wmsg_trig[17];
2907
2908/*0x06530*/ u64 debug_stats0;
2909#define VXGE_HW_DEBUG_STATS0_RSTDROP_MSG(val) vxge_vBIT(val, 0, 32)
2910#define VXGE_HW_DEBUG_STATS0_RSTDROP_CPL(val) vxge_vBIT(val, 32, 32)
2911/*0x06538*/ u64 debug_stats1;
2912#define VXGE_HW_DEBUG_STATS1_RSTDROP_CLIENT0(val) vxge_vBIT(val, 0, 32)
2913#define VXGE_HW_DEBUG_STATS1_RSTDROP_CLIENT1(val) vxge_vBIT(val, 32, 32)
2914/*0x06540*/ u64 debug_stats2;
2915#define VXGE_HW_DEBUG_STATS2_RSTDROP_CLIENT2(val) vxge_vBIT(val, 0, 32)
2916/*0x06548*/ u64 debug_stats3_vplane[17];
2917#define VXGE_HW_DEBUG_STATS3_VPLANE_DEPL_PH(val) vxge_vBIT(val, 0, 16)
2918#define VXGE_HW_DEBUG_STATS3_VPLANE_DEPL_NPH(val) vxge_vBIT(val, 16, 16)
2919#define VXGE_HW_DEBUG_STATS3_VPLANE_DEPL_CPLH(val) vxge_vBIT(val, 32, 16)
2920/*0x065d0*/ u64 debug_stats4_vplane[17];
2921#define VXGE_HW_DEBUG_STATS4_VPLANE_DEPL_PD(val) vxge_vBIT(val, 0, 16)
2922#define VXGE_HW_DEBUG_STATS4_VPLANE_DEPL_NPD(val) vxge_vBIT(val, 16, 16)
2923#define VXGE_HW_DEBUG_STATS4_VPLANE_DEPL_CPLD(val) vxge_vBIT(val, 32, 16)
2924
2925 u8 unused07000[0x07000-0x06658];
2926
2927/*0x07000*/ u64 mrpcim_general_int_status;
2928#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PIC_INT vxge_mBIT(0)
2929#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PCI_INT vxge_mBIT(1)
2930#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_RTDMA_INT vxge_mBIT(2)
2931#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_WRDMA_INT vxge_mBIT(3)
2932#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3CMCT_INT vxge_mBIT(4)
2933#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_GCMG1_INT vxge_mBIT(5)
2934#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_GCMG2_INT vxge_mBIT(6)
2935#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_GCMG3_INT vxge_mBIT(7)
2936#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3CMIFL_INT vxge_mBIT(8)
2937#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3CMIFU_INT vxge_mBIT(9)
2938#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PCMG1_INT vxge_mBIT(10)
2939#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PCMG2_INT vxge_mBIT(11)
2940#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PCMG3_INT vxge_mBIT(12)
2941#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_XMAC_INT vxge_mBIT(13)
2942#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_RXMAC_INT vxge_mBIT(14)
2943#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_TMAC_INT vxge_mBIT(15)
2944#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3FBIF_INT vxge_mBIT(16)
2945#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_FBMC_INT vxge_mBIT(17)
2946#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3FBCT_INT vxge_mBIT(18)
2947#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_TPA_INT vxge_mBIT(19)
2948#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_DRBELL_INT vxge_mBIT(20)
2949#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_ONE_INT vxge_mBIT(21)
2950#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_MSG_INT vxge_mBIT(22)
2951/*0x07008*/ u64 mrpcim_general_int_mask;
2952#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PIC_INT vxge_mBIT(0)
2953#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PCI_INT vxge_mBIT(1)
2954#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_RTDMA_INT vxge_mBIT(2)
2955#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_WRDMA_INT vxge_mBIT(3)
2956#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3CMCT_INT vxge_mBIT(4)
2957#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_GCMG1_INT vxge_mBIT(5)
2958#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_GCMG2_INT vxge_mBIT(6)
2959#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_GCMG3_INT vxge_mBIT(7)
2960#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3CMIFL_INT vxge_mBIT(8)
2961#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3CMIFU_INT vxge_mBIT(9)
2962#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PCMG1_INT vxge_mBIT(10)
2963#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PCMG2_INT vxge_mBIT(11)
2964#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PCMG3_INT vxge_mBIT(12)
2965#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_XMAC_INT vxge_mBIT(13)
2966#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_RXMAC_INT vxge_mBIT(14)
2967#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_TMAC_INT vxge_mBIT(15)
2968#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3FBIF_INT vxge_mBIT(16)
2969#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_FBMC_INT vxge_mBIT(17)
2970#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3FBCT_INT vxge_mBIT(18)
2971#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_TPA_INT vxge_mBIT(19)
2972#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_DRBELL_INT vxge_mBIT(20)
2973#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_ONE_INT vxge_mBIT(21)
2974#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_MSG_INT vxge_mBIT(22)
2975/*0x07010*/ u64 mrpcim_ppif_int_status;
2976#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_INI_ERRORS_INI_INT vxge_mBIT(3)
2977#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_DMA_ERRORS_DMA_INT vxge_mBIT(7)
2978#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_TGT_ERRORS_TGT_INT vxge_mBIT(11)
2979#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CONFIG_ERRORS_CONFIG_INT vxge_mBIT(15)
2980#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_CRDT_INT vxge_mBIT(19)
2981#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_PLL_ERRORS_PLL_INT vxge_mBIT(27)
2982#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE0_CRD_INT_VPLANE0_INT\
2983 vxge_mBIT(31)
2984#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE1_CRD_INT_VPLANE1_INT\
2985 vxge_mBIT(32)
2986#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE2_CRD_INT_VPLANE2_INT\
2987 vxge_mBIT(33)
2988#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE3_CRD_INT_VPLANE3_INT\
2989 vxge_mBIT(34)
2990#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE4_CRD_INT_VPLANE4_INT\
2991 vxge_mBIT(35)
2992#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE5_CRD_INT_VPLANE5_INT\
2993 vxge_mBIT(36)
2994#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE6_CRD_INT_VPLANE6_INT\
2995 vxge_mBIT(37)
2996#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE7_CRD_INT_VPLANE7_INT\
2997 vxge_mBIT(38)
2998#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE8_CRD_INT_VPLANE8_INT\
2999 vxge_mBIT(39)
3000#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE9_CRD_INT_VPLANE9_INT\
3001 vxge_mBIT(40)
3002#define \
3003VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE10_CRD_INT_VPLANE10_INT \
3004 vxge_mBIT(41)
3005#define \
3006VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE11_CRD_INT_VPLANE11_INT \
3007 vxge_mBIT(42)
3008#define \
3009VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE12_CRD_INT_VPLANE12_INT \
3010 vxge_mBIT(43)
3011#define \
3012VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE13_CRD_INT_VPLANE13_INT \
3013 vxge_mBIT(44)
3014#define \
3015VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE14_CRD_INT_VPLANE14_INT \
3016 vxge_mBIT(45)
3017#define \
3018VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE15_CRD_INT_VPLANE15_INT \
3019 vxge_mBIT(46)
3020#define \
3021VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE16_CRD_INT_VPLANE16_INT \
3022 vxge_mBIT(47)
3023#define \
3024VXGE_HW_MRPCIM_PPIF_INT_STATUS_VPATH_TO_MRPCIM_ALARM_VPATH_TO_MRPCIM_ALARM_INT \
3025 vxge_mBIT(55)
3026/*0x07018*/ u64 mrpcim_ppif_int_mask;
3027 u8 unused07028[0x07028-0x07020];
3028
3029/*0x07028*/ u64 ini_errors_reg;
3030#define VXGE_HW_INI_ERRORS_REG_SCPL_CPL_TIMEOUT_UNUSED_TAG vxge_mBIT(3)
3031#define VXGE_HW_INI_ERRORS_REG_SCPL_CPL_TIMEOUT vxge_mBIT(7)
3032#define VXGE_HW_INI_ERRORS_REG_DCPL_FSM_ERR vxge_mBIT(11)
3033#define VXGE_HW_INI_ERRORS_REG_DCPL_POISON vxge_mBIT(12)
3034#define VXGE_HW_INI_ERRORS_REG_DCPL_UNSUPPORTED vxge_mBIT(15)
3035#define VXGE_HW_INI_ERRORS_REG_DCPL_ABORT vxge_mBIT(19)
3036#define VXGE_HW_INI_ERRORS_REG_INI_TLP_ABORT vxge_mBIT(23)
3037#define VXGE_HW_INI_ERRORS_REG_INI_DLLP_ABORT vxge_mBIT(27)
3038#define VXGE_HW_INI_ERRORS_REG_INI_ECRC_ERR vxge_mBIT(31)
3039#define VXGE_HW_INI_ERRORS_REG_INI_BUF_DB_ERR vxge_mBIT(35)
3040#define VXGE_HW_INI_ERRORS_REG_INI_BUF_SG_ERR vxge_mBIT(39)
3041#define VXGE_HW_INI_ERRORS_REG_INI_DATA_OVERFLOW vxge_mBIT(43)
3042#define VXGE_HW_INI_ERRORS_REG_INI_HDR_OVERFLOW vxge_mBIT(47)
3043#define VXGE_HW_INI_ERRORS_REG_INI_MRD_SYS_DROP vxge_mBIT(51)
3044#define VXGE_HW_INI_ERRORS_REG_INI_MWR_SYS_DROP vxge_mBIT(55)
3045#define VXGE_HW_INI_ERRORS_REG_INI_MRD_CLIENT_DROP vxge_mBIT(59)
3046#define VXGE_HW_INI_ERRORS_REG_INI_MWR_CLIENT_DROP vxge_mBIT(63)
3047/*0x07030*/ u64 ini_errors_mask;
3048/*0x07038*/ u64 ini_errors_alarm;
3049/*0x07040*/ u64 dma_errors_reg;
3050#define VXGE_HW_DMA_ERRORS_REG_RDARB_FSM_ERR vxge_mBIT(3)
3051#define VXGE_HW_DMA_ERRORS_REG_WRARB_FSM_ERR vxge_mBIT(7)
3052#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_WR_HDR_OVERFLOW vxge_mBIT(8)
3053#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_WR_HDR_UNDERFLOW vxge_mBIT(9)
3054#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_WR_DATA_OVERFLOW vxge_mBIT(10)
3055#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_WR_DATA_UNDERFLOW vxge_mBIT(11)
3056#define VXGE_HW_DMA_ERRORS_REG_DMA_MSG_WR_HDR_OVERFLOW vxge_mBIT(12)
3057#define VXGE_HW_DMA_ERRORS_REG_DMA_MSG_WR_HDR_UNDERFLOW vxge_mBIT(13)
3058#define VXGE_HW_DMA_ERRORS_REG_DMA_MSG_WR_DATA_OVERFLOW vxge_mBIT(14)
3059#define VXGE_HW_DMA_ERRORS_REG_DMA_MSG_WR_DATA_UNDERFLOW vxge_mBIT(15)
3060#define VXGE_HW_DMA_ERRORS_REG_DMA_STATS_WR_HDR_OVERFLOW vxge_mBIT(16)
3061#define VXGE_HW_DMA_ERRORS_REG_DMA_STATS_WR_HDR_UNDERFLOW vxge_mBIT(17)
3062#define VXGE_HW_DMA_ERRORS_REG_DMA_STATS_WR_DATA_OVERFLOW vxge_mBIT(18)
3063#define VXGE_HW_DMA_ERRORS_REG_DMA_STATS_WR_DATA_UNDERFLOW vxge_mBIT(19)
3064#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_WR_HDR_OVERFLOW vxge_mBIT(20)
3065#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_WR_HDR_UNDERFLOW vxge_mBIT(21)
3066#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_WR_DATA_OVERFLOW vxge_mBIT(22)
3067#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_WR_DATA_UNDERFLOW vxge_mBIT(23)
3068#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_RD_HDR_OVERFLOW vxge_mBIT(24)
3069#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_RD_HDR_UNDERFLOW vxge_mBIT(25)
3070#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_RD_HDR_OVERFLOW vxge_mBIT(28)
3071#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_RD_HDR_UNDERFLOW vxge_mBIT(29)
3072#define VXGE_HW_DMA_ERRORS_REG_DBLGEN_FSM_ERR vxge_mBIT(32)
3073#define VXGE_HW_DMA_ERRORS_REG_DBLGEN_CREDIT_FSM_ERR vxge_mBIT(33)
3074#define VXGE_HW_DMA_ERRORS_REG_DBLGEN_DMA_WRR_SM_ERR vxge_mBIT(34)
3075/*0x07048*/ u64 dma_errors_mask;
3076/*0x07050*/ u64 dma_errors_alarm;
3077/*0x07058*/ u64 tgt_errors_reg;
3078#define VXGE_HW_TGT_ERRORS_REG_TGT_VENDOR_MSG vxge_mBIT(0)
3079#define VXGE_HW_TGT_ERRORS_REG_TGT_MSG_UNLOCK vxge_mBIT(1)
3080#define VXGE_HW_TGT_ERRORS_REG_TGT_ILLEGAL_TLP_BE vxge_mBIT(2)
3081#define VXGE_HW_TGT_ERRORS_REG_TGT_BOOT_WRITE vxge_mBIT(3)
3082#define VXGE_HW_TGT_ERRORS_REG_TGT_PIF_WR_CROSS_QWRANGE vxge_mBIT(4)
3083#define VXGE_HW_TGT_ERRORS_REG_TGT_PIF_READ_CROSS_QWRANGE vxge_mBIT(5)
3084#define VXGE_HW_TGT_ERRORS_REG_TGT_KDFC_READ vxge_mBIT(6)
3085#define VXGE_HW_TGT_ERRORS_REG_TGT_USDC_READ vxge_mBIT(7)
3086#define VXGE_HW_TGT_ERRORS_REG_TGT_USDC_WR_CROSS_QWRANGE vxge_mBIT(8)
3087#define VXGE_HW_TGT_ERRORS_REG_TGT_MSIX_BEYOND_RANGE vxge_mBIT(9)
3088#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_KDFC_POISON vxge_mBIT(10)
3089#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_USDC_POISON vxge_mBIT(11)
3090#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_PIF_POISON vxge_mBIT(12)
3091#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_MSIX_POISON vxge_mBIT(13)
3092#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_MRIOV_POISON vxge_mBIT(14)
3093#define VXGE_HW_TGT_ERRORS_REG_TGT_NOT_MEM_TLP vxge_mBIT(15)
3094#define VXGE_HW_TGT_ERRORS_REG_TGT_UNKNOWN_MEM_TLP vxge_mBIT(16)
3095#define VXGE_HW_TGT_ERRORS_REG_TGT_REQ_FSM_ERR vxge_mBIT(17)
3096#define VXGE_HW_TGT_ERRORS_REG_TGT_CPL_FSM_ERR vxge_mBIT(18)
3097#define VXGE_HW_TGT_ERRORS_REG_TGT_KDFC_PROT_ERR vxge_mBIT(19)
3098#define VXGE_HW_TGT_ERRORS_REG_TGT_SWIF_PROT_ERR vxge_mBIT(20)
3099#define VXGE_HW_TGT_ERRORS_REG_TGT_MRIOV_MEM_MAP_CFG_ERR vxge_mBIT(21)
3100/*0x07060*/ u64 tgt_errors_mask;
3101/*0x07068*/ u64 tgt_errors_alarm;
3102/*0x07070*/ u64 config_errors_reg;
3103#define VXGE_HW_CONFIG_ERRORS_REG_I2C_ILLEGAL_STOP_COND vxge_mBIT(3)
3104#define VXGE_HW_CONFIG_ERRORS_REG_I2C_ILLEGAL_START_COND vxge_mBIT(7)
3105#define VXGE_HW_CONFIG_ERRORS_REG_I2C_EXP_RD_CNT vxge_mBIT(11)
3106#define VXGE_HW_CONFIG_ERRORS_REG_I2C_EXTRA_CYCLE vxge_mBIT(15)
3107#define VXGE_HW_CONFIG_ERRORS_REG_I2C_MAIN_FSM_ERR vxge_mBIT(19)
3108#define VXGE_HW_CONFIG_ERRORS_REG_I2C_REQ_COLLISION vxge_mBIT(23)
3109#define VXGE_HW_CONFIG_ERRORS_REG_I2C_REG_FSM_ERR vxge_mBIT(27)
3110#define VXGE_HW_CONFIG_ERRORS_REG_CFGM_I2C_TIMEOUT vxge_mBIT(31)
3111#define VXGE_HW_CONFIG_ERRORS_REG_RIC_I2C_TIMEOUT vxge_mBIT(35)
3112#define VXGE_HW_CONFIG_ERRORS_REG_CFGM_FSM_ERR vxge_mBIT(39)
3113#define VXGE_HW_CONFIG_ERRORS_REG_RIC_FSM_ERR vxge_mBIT(43)
3114#define VXGE_HW_CONFIG_ERRORS_REG_PIFM_ILLEGAL_ACCESS vxge_mBIT(47)
3115#define VXGE_HW_CONFIG_ERRORS_REG_PIFM_TIMEOUT vxge_mBIT(51)
3116#define VXGE_HW_CONFIG_ERRORS_REG_PIFM_FSM_ERR vxge_mBIT(55)
3117#define VXGE_HW_CONFIG_ERRORS_REG_PIFM_TO_FSM_ERR vxge_mBIT(59)
3118#define VXGE_HW_CONFIG_ERRORS_REG_RIC_RIC_RD_TIMEOUT vxge_mBIT(63)
3119/*0x07078*/ u64 config_errors_mask;
3120/*0x07080*/ u64 config_errors_alarm;
3121 u8 unused07090[0x07090-0x07088];
3122
3123/*0x07090*/ u64 crdt_errors_reg;
3124#define VXGE_HW_CRDT_ERRORS_REG_WRCRDTARB_FSM_ERR vxge_mBIT(11)
3125#define VXGE_HW_CRDT_ERRORS_REG_WRCRDTARB_INTCTL_ILLEGAL_CRD_DEAL \
3126 vxge_mBIT(15)
3127#define VXGE_HW_CRDT_ERRORS_REG_WRCRDTARB_PDA_ILLEGAL_CRD_DEAL vxge_mBIT(19)
3128#define VXGE_HW_CRDT_ERRORS_REG_WRCRDTARB_PCI_MSG_ILLEGAL_CRD_DEAL \
3129 vxge_mBIT(23)
3130#define VXGE_HW_CRDT_ERRORS_REG_RDCRDTARB_FSM_ERR vxge_mBIT(35)
3131#define VXGE_HW_CRDT_ERRORS_REG_RDCRDTARB_RDA_ILLEGAL_CRD_DEAL vxge_mBIT(39)
3132#define VXGE_HW_CRDT_ERRORS_REG_RDCRDTARB_PDA_ILLEGAL_CRD_DEAL vxge_mBIT(43)
3133#define VXGE_HW_CRDT_ERRORS_REG_RDCRDTARB_DBLGEN_ILLEGAL_CRD_DEAL \
3134 vxge_mBIT(47)
3135/*0x07098*/ u64 crdt_errors_mask;
3136/*0x070a0*/ u64 crdt_errors_alarm;
3137 u8 unused070b0[0x070b0-0x070a8];
3138
3139/*0x070b0*/ u64 mrpcim_general_errors_reg;
3140#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_STATSB_FSM_ERR vxge_mBIT(3)
3141#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_XGEN_FSM_ERR vxge_mBIT(7)
3142#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_XMEM_FSM_ERR vxge_mBIT(11)
3143#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_KDFCCTL_FSM_ERR vxge_mBIT(15)
3144#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_MRIOVCTL_FSM_ERR vxge_mBIT(19)
3145#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_SPI_FLSH_ERR vxge_mBIT(23)
3146#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_SPI_IIC_ACK_ERR vxge_mBIT(27)
3147#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_SPI_IIC_CHKSUM_ERR vxge_mBIT(31)
3148#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_INI_SERR_DET vxge_mBIT(35)
3149#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_INTCTL_MSIX_FSM_ERR vxge_mBIT(39)
3150#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_INTCTL_MSI_OVERFLOW vxge_mBIT(43)
3151#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_PPIF_PCI_NOT_FLUSH_DURING_SW_RESET \
3152 vxge_mBIT(47)
3153#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_PPIF_SW_RESET_FSM_ERR vxge_mBIT(51)
3154/*0x070b8*/ u64 mrpcim_general_errors_mask;
3155/*0x070c0*/ u64 mrpcim_general_errors_alarm;
3156 u8 unused070d0[0x070d0-0x070c8];
3157
3158/*0x070d0*/ u64 pll_errors_reg;
3159#define VXGE_HW_PLL_ERRORS_REG_CORE_CMG_PLL_OOL vxge_mBIT(3)
3160#define VXGE_HW_PLL_ERRORS_REG_CORE_FB_PLL_OOL vxge_mBIT(7)
3161#define VXGE_HW_PLL_ERRORS_REG_CORE_X_PLL_OOL vxge_mBIT(11)
3162/*0x070d8*/ u64 pll_errors_mask;
3163/*0x070e0*/ u64 pll_errors_alarm;
3164/*0x070e8*/ u64 srpcim_to_mrpcim_alarm_reg;
3165#define VXGE_HW_SRPCIM_TO_MRPCIM_ALARM_REG_PPIF_SRPCIM_TO_MRPCIM_ALARM(val) \
3166 vxge_vBIT(val, 0, 17)
3167/*0x070f0*/ u64 srpcim_to_mrpcim_alarm_mask;
3168/*0x070f8*/ u64 srpcim_to_mrpcim_alarm_alarm;
3169/*0x07100*/ u64 vpath_to_mrpcim_alarm_reg;
3170#define VXGE_HW_VPATH_TO_MRPCIM_ALARM_REG_PPIF_VPATH_TO_MRPCIM_ALARM(val) \
3171 vxge_vBIT(val, 0, 17)
3172/*0x07108*/ u64 vpath_to_mrpcim_alarm_mask;
3173/*0x07110*/ u64 vpath_to_mrpcim_alarm_alarm;
3174 u8 unused07128[0x07128-0x07118];
3175
3176/*0x07128*/ u64 crdt_errors_vplane_reg[17];
3177#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_WRCRDTARB_P_H_CONSUME_CRDT_ERR \
3178 vxge_mBIT(3)
3179#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_WRCRDTARB_P_D_CONSUME_CRDT_ERR \
3180 vxge_mBIT(7)
3181#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_WRCRDTARB_P_H_RETURN_CRDT_ERR \
3182 vxge_mBIT(11)
3183#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_WRCRDTARB_P_D_RETURN_CRDT_ERR \
3184 vxge_mBIT(15)
3185#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_RDCRDTARB_NP_H_CONSUME_CRDT_ERR \
3186 vxge_mBIT(19)
3187#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_RDCRDTARB_NP_H_RETURN_CRDT_ERR \
3188 vxge_mBIT(23)
3189#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_RDCRDTARB_TAG_CONSUME_TAG_ERR \
3190 vxge_mBIT(27)
3191#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_RDCRDTARB_TAG_RETURN_TAG_ERR \
3192 vxge_mBIT(31)
3193/*0x07130*/ u64 crdt_errors_vplane_mask[17];
3194/*0x07138*/ u64 crdt_errors_vplane_alarm[17];
3195 u8 unused072f0[0x072f0-0x072c0];
3196
3197/*0x072f0*/ u64 mrpcim_rst_in_prog;
3198#define VXGE_HW_MRPCIM_RST_IN_PROG_MRPCIM_RST_IN_PROG vxge_mBIT(7)
3199/*0x072f8*/ u64 mrpcim_reg_modified;
3200#define VXGE_HW_MRPCIM_REG_MODIFIED_MRPCIM_REG_MODIFIED vxge_mBIT(7)
3201
3202 u8 unused07378[0x07378-0x07300];
3203
3204/*0x07378*/ u64 write_arb_pending;
3205#define VXGE_HW_WRITE_ARB_PENDING_WRARB_WRDMA vxge_mBIT(3)
3206#define VXGE_HW_WRITE_ARB_PENDING_WRARB_RTDMA vxge_mBIT(7)
3207#define VXGE_HW_WRITE_ARB_PENDING_WRARB_MSG vxge_mBIT(11)
3208#define VXGE_HW_WRITE_ARB_PENDING_WRARB_STATSB vxge_mBIT(15)
3209#define VXGE_HW_WRITE_ARB_PENDING_WRARB_INTCTL vxge_mBIT(19)
3210/*0x07380*/ u64 read_arb_pending;
3211#define VXGE_HW_READ_ARB_PENDING_RDARB_WRDMA vxge_mBIT(3)
3212#define VXGE_HW_READ_ARB_PENDING_RDARB_RTDMA vxge_mBIT(7)
3213#define VXGE_HW_READ_ARB_PENDING_RDARB_DBLGEN vxge_mBIT(11)
3214/*0x07388*/ u64 dmaif_dmadbl_pending;
3215#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_WRDMA_WR vxge_mBIT(0)
3216#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_WRDMA_RD vxge_mBIT(1)
3217#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_RTDMA_WR vxge_mBIT(2)
3218#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_RTDMA_RD vxge_mBIT(3)
3219#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_MSG_WR vxge_mBIT(4)
3220#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_STATS_WR vxge_mBIT(5)
3221#define VXGE_HW_DMAIF_DMADBL_PENDING_DBLGEN_IN_PROG(val) \
3222 vxge_vBIT(val, 13, 51)
3223/*0x07390*/ u64 wrcrdtarb_status0_vplane[17];
3224#define VXGE_HW_WRCRDTARB_STATUS0_VPLANE_WRCRDTARB_ABS_AVAIL_P_H(val) \
3225 vxge_vBIT(val, 0, 8)
3226/*0x07418*/ u64 wrcrdtarb_status1_vplane[17];
3227#define VXGE_HW_WRCRDTARB_STATUS1_VPLANE_WRCRDTARB_ABS_AVAIL_P_D(val) \
3228 vxge_vBIT(val, 4, 12)
3229 u8 unused07500[0x07500-0x074a0];
3230
3231/*0x07500*/ u64 mrpcim_general_cfg1;
3232#define VXGE_HW_MRPCIM_GENERAL_CFG1_CLEAR_SERR vxge_mBIT(7)
3233/*0x07508*/ u64 mrpcim_general_cfg2;
3234#define VXGE_HW_MRPCIM_GENERAL_CFG2_INS_TX_WR_TD vxge_mBIT(3)
3235#define VXGE_HW_MRPCIM_GENERAL_CFG2_INS_TX_RD_TD vxge_mBIT(7)
3236#define VXGE_HW_MRPCIM_GENERAL_CFG2_INS_TX_CPL_TD vxge_mBIT(11)
3237#define VXGE_HW_MRPCIM_GENERAL_CFG2_INI_TIMEOUT_EN_MWR vxge_mBIT(15)
3238#define VXGE_HW_MRPCIM_GENERAL_CFG2_INI_TIMEOUT_EN_MRD vxge_mBIT(19)
3239#define VXGE_HW_MRPCIM_GENERAL_CFG2_IGNORE_VPATH_RST_FOR_MSIX vxge_mBIT(23)
3240#define VXGE_HW_MRPCIM_GENERAL_CFG2_FLASH_READ_MSB vxge_mBIT(27)
3241#define VXGE_HW_MRPCIM_GENERAL_CFG2_DIS_HOST_PIPELINE_WR vxge_mBIT(31)
3242#define VXGE_HW_MRPCIM_GENERAL_CFG2_MRPCIM_STATS_ENABLE vxge_mBIT(43)
3243#define VXGE_HW_MRPCIM_GENERAL_CFG2_MRPCIM_STATS_MAP_TO_VPATH(val) \
3244 vxge_vBIT(val, 47, 5)
3245#define VXGE_HW_MRPCIM_GENERAL_CFG2_EN_BLOCK_MSIX_DUE_TO_SERR vxge_mBIT(55)
3246#define VXGE_HW_MRPCIM_GENERAL_CFG2_FORCE_SENDING_INTA vxge_mBIT(59)
3247#define VXGE_HW_MRPCIM_GENERAL_CFG2_DIS_SWIF_PROT_ON_RDS vxge_mBIT(63)
3248/*0x07510*/ u64 mrpcim_general_cfg3;
3249#define VXGE_HW_MRPCIM_GENERAL_CFG3_PROTECTION_CA_OR_UNSUPN vxge_mBIT(0)
3250#define VXGE_HW_MRPCIM_GENERAL_CFG3_ILLEGAL_RD_CA_OR_UNSUPN vxge_mBIT(3)
3251#define VXGE_HW_MRPCIM_GENERAL_CFG3_RD_BYTE_SWAPEN vxge_mBIT(7)
3252#define VXGE_HW_MRPCIM_GENERAL_CFG3_RD_BIT_FLIPEN vxge_mBIT(11)
3253#define VXGE_HW_MRPCIM_GENERAL_CFG3_WR_BYTE_SWAPEN vxge_mBIT(15)
3254#define VXGE_HW_MRPCIM_GENERAL_CFG3_WR_BIT_FLIPEN vxge_mBIT(19)
3255#define VXGE_HW_MRPCIM_GENERAL_CFG3_MR_MAX_MVFS(val) vxge_vBIT(val, 20, 16)
3256#define VXGE_HW_MRPCIM_GENERAL_CFG3_MR_MVF_TBL_SIZE(val) \
3257 vxge_vBIT(val, 36, 16)
3258#define VXGE_HW_MRPCIM_GENERAL_CFG3_PF0_SW_RESET_EN vxge_mBIT(55)
3259#define VXGE_HW_MRPCIM_GENERAL_CFG3_REG_MODIFIED_CFG(val) vxge_vBIT(val, 56, 2)
3260#define VXGE_HW_MRPCIM_GENERAL_CFG3_CPL_ECC_ENABLE_N vxge_mBIT(59)
3261#define VXGE_HW_MRPCIM_GENERAL_CFG3_BYPASS_DAISY_CHAIN vxge_mBIT(63)
3262/*0x07518*/ u64 mrpcim_stats_start_host_addr;
3263#define VXGE_HW_MRPCIM_STATS_START_HOST_ADDR_MRPCIM_STATS_START_HOST_ADDR(val)\
3264 vxge_vBIT(val, 0, 57)
3265
3266 u8 unused07950[0x07950-0x07520];
3267
3268/*0x07950*/ u64 rdcrdtarb_cfg0;
3269#define VXGE_HW_RDCRDTARB_CFG0_RDA_MAX_OUTSTANDING_RDS(val) \
3270 vxge_vBIT(val, 18, 6)
3271#define VXGE_HW_RDCRDTARB_CFG0_PDA_MAX_OUTSTANDING_RDS(val) \
3272 vxge_vBIT(val, 26, 6)
3273#define VXGE_HW_RDCRDTARB_CFG0_DBLGEN_MAX_OUTSTANDING_RDS(val) \
3274 vxge_vBIT(val, 34, 6)
3275#define VXGE_HW_RDCRDTARB_CFG0_WAIT_CNT(val) vxge_vBIT(val, 48, 4)
3276#define VXGE_HW_RDCRDTARB_CFG0_MAX_OUTSTANDING_RDS(val) vxge_vBIT(val, 54, 6)
3277#define VXGE_HW_RDCRDTARB_CFG0_EN_XON vxge_mBIT(63)
3278 u8 unused07be8[0x07be8-0x07958];
3279
3280/*0x07be8*/ u64 bf_sw_reset;
3281#define VXGE_HW_BF_SW_RESET_BF_SW_RESET(val) vxge_vBIT(val, 0, 8)
3282/*0x07bf0*/ u64 sw_reset_status;
3283#define VXGE_HW_SW_RESET_STATUS_RESET_CMPLT vxge_mBIT(7)
3284#define VXGE_HW_SW_RESET_STATUS_INIT_CMPLT vxge_mBIT(15)
3285 u8 unused07d30[0x07d30-0x07bf8];
3286
3287/*0x07d30*/ u64 mrpcim_debug_stats0;
3288#define VXGE_HW_MRPCIM_DEBUG_STATS0_INI_WR_DROP(val) vxge_vBIT(val, 0, 32)
3289#define VXGE_HW_MRPCIM_DEBUG_STATS0_INI_RD_DROP(val) vxge_vBIT(val, 32, 32)
3290/*0x07d38*/ u64 mrpcim_debug_stats1_vplane[17];
3291#define VXGE_HW_MRPCIM_DEBUG_STATS1_VPLANE_WRCRDTARB_PH_CRDT_DEPLETED(val) \
3292 vxge_vBIT(val, 32, 32)
3293/*0x07dc0*/ u64 mrpcim_debug_stats2_vplane[17];
3294#define VXGE_HW_MRPCIM_DEBUG_STATS2_VPLANE_WRCRDTARB_PD_CRDT_DEPLETED(val) \
3295 vxge_vBIT(val, 32, 32)
3296/*0x07e48*/ u64 mrpcim_debug_stats3_vplane[17];
3297#define VXGE_HW_MRPCIM_DEBUG_STATS3_VPLANE_RDCRDTARB_NPH_CRDT_DEPLETED(val) \
3298 vxge_vBIT(val, 32, 32)
3299/*0x07ed0*/ u64 mrpcim_debug_stats4;
3300#define VXGE_HW_MRPCIM_DEBUG_STATS4_INI_WR_VPIN_DROP(val) vxge_vBIT(val, 0, 32)
3301#define VXGE_HW_MRPCIM_DEBUG_STATS4_INI_RD_VPIN_DROP(val) \
3302 vxge_vBIT(val, 32, 32)
3303/*0x07ed8*/ u64 genstats_count01;
3304#define VXGE_HW_GENSTATS_COUNT01_GENSTATS_COUNT1(val) vxge_vBIT(val, 0, 32)
3305#define VXGE_HW_GENSTATS_COUNT01_GENSTATS_COUNT0(val) vxge_vBIT(val, 32, 32)
3306/*0x07ee0*/ u64 genstats_count23;
3307#define VXGE_HW_GENSTATS_COUNT23_GENSTATS_COUNT3(val) vxge_vBIT(val, 0, 32)
3308#define VXGE_HW_GENSTATS_COUNT23_GENSTATS_COUNT2(val) vxge_vBIT(val, 32, 32)
3309/*0x07ee8*/ u64 genstats_count4;
3310#define VXGE_HW_GENSTATS_COUNT4_GENSTATS_COUNT4(val) vxge_vBIT(val, 32, 32)
3311/*0x07ef0*/ u64 genstats_count5;
3312#define VXGE_HW_GENSTATS_COUNT5_GENSTATS_COUNT5(val) vxge_vBIT(val, 32, 32)
3313
3314 u8 unused07f08[0x07f08-0x07ef8];
3315
3316/*0x07f08*/ u64 genstats_cfg[6];
3317#define VXGE_HW_GENSTATS_CFG_DTYPE_SEL(val) vxge_vBIT(val, 3, 5)
3318#define VXGE_HW_GENSTATS_CFG_CLIENT_NO_SEL(val) vxge_vBIT(val, 9, 3)
3319#define VXGE_HW_GENSTATS_CFG_WR_RD_CPL_SEL(val) vxge_vBIT(val, 14, 2)
3320#define VXGE_HW_GENSTATS_CFG_VPATH_SEL(val) vxge_vBIT(val, 31, 17)
3321/*0x07f38*/ u64 genstat_64bit_cfg;
3322#define VXGE_HW_GENSTAT_64BIT_CFG_EN_FOR_GENSTATS0 vxge_mBIT(3)
3323#define VXGE_HW_GENSTAT_64BIT_CFG_EN_FOR_GENSTATS2 vxge_mBIT(7)
3324 u8 unused08000[0x08000-0x07f40];
3325/*0x08000*/ u64 gcmg3_int_status;
3326#define VXGE_HW_GCMG3_INT_STATUS_GSTC_ERR0_GSTC0_INT vxge_mBIT(0)
3327#define VXGE_HW_GCMG3_INT_STATUS_GSTC_ERR1_GSTC1_INT vxge_mBIT(1)
3328#define VXGE_HW_GCMG3_INT_STATUS_GH2L_ERR0_GH2L0_INT vxge_mBIT(2)
3329#define VXGE_HW_GCMG3_INT_STATUS_GHSQ_ERR_GH2L1_INT vxge_mBIT(3)
3330#define VXGE_HW_GCMG3_INT_STATUS_GHSQ_ERR2_GH2L2_INT vxge_mBIT(4)
3331#define VXGE_HW_GCMG3_INT_STATUS_GH2L_SMERR0_GH2L3_INT vxge_mBIT(5)
3332#define VXGE_HW_GCMG3_INT_STATUS_GHSQ_ERR3_GH2L4_INT vxge_mBIT(6)
3333/*0x08008*/ u64 gcmg3_int_mask;
3334 u8 unused09000[0x09000-0x8010];
3335
3336/*0x09000*/ u64 g3ifcmd_fb_int_status;
3337#define VXGE_HW_G3IFCMD_FB_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0)
3338/*0x09008*/ u64 g3ifcmd_fb_int_mask;
3339/*0x09010*/ u64 g3ifcmd_fb_err_reg;
3340#define VXGE_HW_G3IFCMD_FB_ERR_REG_G3IF_CK_DLL_LOCK vxge_mBIT(6)
3341#define VXGE_HW_G3IFCMD_FB_ERR_REG_G3IF_SM_ERR vxge_mBIT(7)
3342#define VXGE_HW_G3IFCMD_FB_ERR_REG_G3IF_RWDQS_DLL_LOCK(val) \
3343 vxge_vBIT(val, 24, 8)
3344#define VXGE_HW_G3IFCMD_FB_ERR_REG_G3IF_IOCAL_FAULT vxge_mBIT(55)
3345/*0x09018*/ u64 g3ifcmd_fb_err_mask;
3346/*0x09020*/ u64 g3ifcmd_fb_err_alarm;
3347
3348 u8 unused09400[0x09400-0x09028];
3349
3350/*0x09400*/ u64 g3ifcmd_cmu_int_status;
3351#define VXGE_HW_G3IFCMD_CMU_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0)
3352/*0x09408*/ u64 g3ifcmd_cmu_int_mask;
3353/*0x09410*/ u64 g3ifcmd_cmu_err_reg;
3354#define VXGE_HW_G3IFCMD_CMU_ERR_REG_G3IF_CK_DLL_LOCK vxge_mBIT(6)
3355#define VXGE_HW_G3IFCMD_CMU_ERR_REG_G3IF_SM_ERR vxge_mBIT(7)
3356#define VXGE_HW_G3IFCMD_CMU_ERR_REG_G3IF_RWDQS_DLL_LOCK(val) \
3357 vxge_vBIT(val, 24, 8)
3358#define VXGE_HW_G3IFCMD_CMU_ERR_REG_G3IF_IOCAL_FAULT vxge_mBIT(55)
3359/*0x09418*/ u64 g3ifcmd_cmu_err_mask;
3360/*0x09420*/ u64 g3ifcmd_cmu_err_alarm;
3361
3362 u8 unused09800[0x09800-0x09428];
3363
3364/*0x09800*/ u64 g3ifcmd_cml_int_status;
3365#define VXGE_HW_G3IFCMD_CML_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0)
3366/*0x09808*/ u64 g3ifcmd_cml_int_mask;
3367/*0x09810*/ u64 g3ifcmd_cml_err_reg;
3368#define VXGE_HW_G3IFCMD_CML_ERR_REG_G3IF_CK_DLL_LOCK vxge_mBIT(6)
3369#define VXGE_HW_G3IFCMD_CML_ERR_REG_G3IF_SM_ERR vxge_mBIT(7)
3370#define VXGE_HW_G3IFCMD_CML_ERR_REG_G3IF_RWDQS_DLL_LOCK(val) \
3371 vxge_vBIT(val, 24, 8)
3372#define VXGE_HW_G3IFCMD_CML_ERR_REG_G3IF_IOCAL_FAULT vxge_mBIT(55)
3373/*0x09818*/ u64 g3ifcmd_cml_err_mask;
3374/*0x09820*/ u64 g3ifcmd_cml_err_alarm;
3375 u8 unused09b00[0x09b00-0x09828];
3376
3377/*0x09b00*/ u64 vpath_to_vplane_map[17];
3378#define VXGE_HW_VPATH_TO_VPLANE_MAP_VPATH_TO_VPLANE_MAP(val) \
3379 vxge_vBIT(val, 3, 5)
3380 u8 unused09c30[0x09c30-0x09b88];
3381
3382/*0x09c30*/ u64 xgxs_cfg_port[2];
3383#define VXGE_HW_XGXS_CFG_PORT_SIG_DETECT_FORCE_LOS(val) vxge_vBIT(val, 16, 4)
3384#define VXGE_HW_XGXS_CFG_PORT_SIG_DETECT_FORCE_VALID(val) vxge_vBIT(val, 20, 4)
3385#define VXGE_HW_XGXS_CFG_PORT_SEL_INFO_0 vxge_mBIT(27)
3386#define VXGE_HW_XGXS_CFG_PORT_SEL_INFO_1(val) vxge_vBIT(val, 29, 3)
3387#define VXGE_HW_XGXS_CFG_PORT_TX_LANE0_SKEW(val) vxge_vBIT(val, 32, 4)
3388#define VXGE_HW_XGXS_CFG_PORT_TX_LANE1_SKEW(val) vxge_vBIT(val, 36, 4)
3389#define VXGE_HW_XGXS_CFG_PORT_TX_LANE2_SKEW(val) vxge_vBIT(val, 40, 4)
3390#define VXGE_HW_XGXS_CFG_PORT_TX_LANE3_SKEW(val) vxge_vBIT(val, 44, 4)
3391/*0x09c40*/ u64 xgxs_rxber_cfg_port[2];
3392#define VXGE_HW_XGXS_RXBER_CFG_PORT_INTERVAL_DUR(val) vxge_vBIT(val, 0, 4)
3393#define VXGE_HW_XGXS_RXBER_CFG_PORT_RXGXS_INTERVAL_CNT(val) \
3394 vxge_vBIT(val, 16, 48)
3395/*0x09c50*/ u64 xgxs_rxber_status_port[2];
3396#define VXGE_HW_XGXS_RXBER_STATUS_PORT_RXGXS_RXGXS_LANE_A_ERR_CNT(val) \
3397 vxge_vBIT(val, 0, 16)
3398#define VXGE_HW_XGXS_RXBER_STATUS_PORT_RXGXS_RXGXS_LANE_B_ERR_CNT(val) \
3399 vxge_vBIT(val, 16, 16)
3400#define VXGE_HW_XGXS_RXBER_STATUS_PORT_RXGXS_RXGXS_LANE_C_ERR_CNT(val) \
3401 vxge_vBIT(val, 32, 16)
3402#define VXGE_HW_XGXS_RXBER_STATUS_PORT_RXGXS_RXGXS_LANE_D_ERR_CNT(val) \
3403 vxge_vBIT(val, 48, 16)
3404/*0x09c60*/ u64 xgxs_status_port[2];
3405#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_TX_ACTIVITY(val) vxge_vBIT(val, 0, 4)
3406#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_RX_ACTIVITY(val) vxge_vBIT(val, 4, 4)
3407#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_CTC_FIFO_ERR BIT(11)
3408#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_BYTE_SYNC_LOST(val) \
3409 vxge_vBIT(val, 12, 4)
3410#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_CTC_ERR(val) vxge_vBIT(val, 16, 4)
3411#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_ALIGNMENT_ERR vxge_mBIT(23)
3412#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_DEC_ERR(val) vxge_vBIT(val, 24, 8)
3413#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_SKIP_INS_REQ(val) \
3414 vxge_vBIT(val, 32, 4)
3415#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_SKIP_DEL_REQ(val) \
3416 vxge_vBIT(val, 36, 4)
3417/*0x09c70*/ u64 xgxs_pma_reset_port[2];
3418#define VXGE_HW_XGXS_PMA_RESET_PORT_SERDES_RESET(val) vxge_vBIT(val, 0, 8)
3419 u8 unused09c90[0x09c90-0x09c80];
3420
3421/*0x09c90*/ u64 xgxs_static_cfg_port[2];
3422#define VXGE_HW_XGXS_STATIC_CFG_PORT_FW_CTRL_SERDES vxge_mBIT(3)
3423 u8 unused09d40[0x09d40-0x09ca0];
3424
3425/*0x09d40*/ u64 xgxs_info_port[2];
3426#define VXGE_HW_XGXS_INFO_PORT_XMACJ_INFO_0(val) vxge_vBIT(val, 0, 32)
3427#define VXGE_HW_XGXS_INFO_PORT_XMACJ_INFO_1(val) vxge_vBIT(val, 32, 32)
3428/*0x09d50*/ u64 ratemgmt_cfg_port[2];
3429#define VXGE_HW_RATEMGMT_CFG_PORT_MODE(val) vxge_vBIT(val, 2, 2)
3430#define VXGE_HW_RATEMGMT_CFG_PORT_RATE vxge_mBIT(7)
3431#define VXGE_HW_RATEMGMT_CFG_PORT_FIXED_USE_FSM vxge_mBIT(11)
3432#define VXGE_HW_RATEMGMT_CFG_PORT_ANTP_USE_FSM vxge_mBIT(15)
3433#define VXGE_HW_RATEMGMT_CFG_PORT_ANBE_USE_FSM vxge_mBIT(19)
3434/*0x09d60*/ u64 ratemgmt_status_port[2];
3435#define VXGE_HW_RATEMGMT_STATUS_PORT_RATEMGMT_COMPLETE vxge_mBIT(3)
3436#define VXGE_HW_RATEMGMT_STATUS_PORT_RATEMGMT_RATE vxge_mBIT(7)
3437#define VXGE_HW_RATEMGMT_STATUS_PORT_RATEMGMT_MAC_MATCHES_PHY vxge_mBIT(11)
3438 u8 unused09d80[0x09d80-0x09d70];
3439
3440/*0x09d80*/ u64 ratemgmt_fixed_cfg_port[2];
3441#define VXGE_HW_RATEMGMT_FIXED_CFG_PORT_RESTART vxge_mBIT(7)
3442/*0x09d90*/ u64 ratemgmt_antp_cfg_port[2];
3443#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_RESTART vxge_mBIT(7)
3444#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_USE_PREAMBLE_EXT_PHY vxge_mBIT(11)
3445#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_USE_ACT_SEL vxge_mBIT(15)
3446#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_T_RETRY_PHY_QUERY(val) \
3447 vxge_vBIT(val, 16, 4)
3448#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_T_WAIT_MDIO_RESPONSE(val) \
3449 vxge_vBIT(val, 20, 4)
3450#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_T_LDOWN_REAUTO_RESPONSE(val) \
3451 vxge_vBIT(val, 24, 4)
3452#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_ADVERTISE_10G vxge_mBIT(31)
3453#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_ADVERTISE_1G vxge_mBIT(35)
3454/*0x09da0*/ u64 ratemgmt_anbe_cfg_port[2];
3455#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_RESTART vxge_mBIT(7)
3456#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_PARALLEL_DETECT_10G_KX4_ENABLE \
3457 vxge_mBIT(11)
3458#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_PARALLEL_DETECT_1G_KX_ENABLE \
3459 vxge_mBIT(15)
3460#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_T_SYNC_10G_KX4(val) vxge_vBIT(val, 16, 4)
3461#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_T_SYNC_1G_KX(val) vxge_vBIT(val, 20, 4)
3462#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_T_DME_EXCHANGE(val) vxge_vBIT(val, 24, 4)
3463#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_ADVERTISE_10G_KX4 vxge_mBIT(31)
3464#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_ADVERTISE_1G_KX vxge_mBIT(35)
3465/*0x09db0*/ u64 anbe_cfg_port[2];
3466#define VXGE_HW_ANBE_CFG_PORT_RESET_CFG_REGS(val) vxge_vBIT(val, 0, 8)
3467#define VXGE_HW_ANBE_CFG_PORT_ALIGN_10G_KX4_OVERRIDE(val) vxge_vBIT(val, 10, 2)
3468#define VXGE_HW_ANBE_CFG_PORT_SYNC_1G_KX_OVERRIDE(val) vxge_vBIT(val, 14, 2)
3469/*0x09dc0*/ u64 anbe_mgr_ctrl_port[2];
3470#define VXGE_HW_ANBE_MGR_CTRL_PORT_WE vxge_mBIT(3)
3471#define VXGE_HW_ANBE_MGR_CTRL_PORT_STROBE vxge_mBIT(7)
3472#define VXGE_HW_ANBE_MGR_CTRL_PORT_ADDR(val) vxge_vBIT(val, 15, 9)
3473#define VXGE_HW_ANBE_MGR_CTRL_PORT_DATA(val) vxge_vBIT(val, 32, 32)
3474 u8 unused09de0[0x09de0-0x09dd0];
3475
3476/*0x09de0*/ u64 anbe_fw_mstr_port[2];
3477#define VXGE_HW_ANBE_FW_MSTR_PORT_CONNECT_BEAN_TO_SERDES vxge_mBIT(3)
3478#define VXGE_HW_ANBE_FW_MSTR_PORT_TX_ZEROES_TO_SERDES vxge_mBIT(7)
3479/*0x09df0*/ u64 anbe_hwfsm_gen_status_port[2];
3480#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_10G_KX4_USING_PD \
3481 vxge_mBIT(3)
3482#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_10G_KX4_USING_DME \
3483 vxge_mBIT(7)
3484#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_1G_KX_USING_PD \
3485 vxge_mBIT(11)
3486#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_1G_KX_USING_DME \
3487 vxge_mBIT(15)
3488#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_ANBEFSM_STATE(val) \
3489 vxge_vBIT(val, 18, 6)
3490#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_BEAN_NEXT_PAGE_RECEIVED \
3491 vxge_mBIT(27)
3492#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_BEAN_BASE_PAGE_RECEIVED \
3493 vxge_mBIT(35)
3494#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_BEAN_AUTONEG_COMPLETE \
3495 vxge_mBIT(39)
3496#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_NP_BEFORE_BP \
3497 vxge_mBIT(43)
3498#define \
3499VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_AN_COMPLETE_BEFORE_BP \
3500 vxge_mBIT(47)
3501#define \
3502VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_AN_COMPLETE_BEFORE_NP \
3503vxge_mBIT(51)
3504#define \
3505VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_MODE_WHEN_AN_COMPLETE \
3506 vxge_mBIT(55)
3507#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_COUNT_BP(val) \
3508 vxge_vBIT(val, 56, 4)
3509#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_COUNT_NP(val) \
3510 vxge_vBIT(val, 60, 4)
3511/*0x09e00*/ u64 anbe_hwfsm_bp_status_port[2];
3512#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_FEC_ENABLE \
3513 vxge_mBIT(32)
3514#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_FEC_ABILITY \
3515 vxge_mBIT(33)
3516#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_10G_KR_CAPABLE \
3517 vxge_mBIT(40)
3518#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_10G_KX4_CAPABLE \
3519 vxge_mBIT(41)
3520#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_1G_KX_CAPABLE \
3521 vxge_mBIT(42)
3522#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_TX_NONCE(val) \
3523 vxge_vBIT(val, 43, 5)
3524#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_NP vxge_mBIT(48)
3525#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ACK vxge_mBIT(49)
3526#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_REMOTE_FAULT \
3527 vxge_mBIT(50)
3528#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ASM_DIR vxge_mBIT(51)
3529#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_PAUSE vxge_mBIT(53)
3530#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ECHOED_NONCE(val) \
3531 vxge_vBIT(val, 54, 5)
3532#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_SELECTOR_FIELD(val) \
3533 vxge_vBIT(val, 59, 5)
3534/*0x09e10*/ u64 anbe_hwfsm_np_status_port[2];
3535#define VXGE_HW_ANBE_HWFSM_NP_STATUS_PORT_RATEMGMT_NP_BITS_47_TO_32(val) \
3536 vxge_vBIT(val, 16, 16)
3537#define VXGE_HW_ANBE_HWFSM_NP_STATUS_PORT_RATEMGMT_NP_BITS_31_TO_0(val) \
3538 vxge_vBIT(val, 32, 32)
3539 u8 unused09e30[0x09e30-0x09e20];
3540
3541/*0x09e30*/ u64 antp_gen_cfg_port[2];
3542/*0x09e40*/ u64 antp_hwfsm_gen_status_port[2];
3543#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_10G vxge_mBIT(3)
3544#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_1G vxge_mBIT(7)
3545#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_ANTPFSM_STATE(val) \
3546 vxge_vBIT(val, 10, 6)
3547#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_AUTONEG_COMPLETE \
3548 vxge_mBIT(23)
3549#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_NO_LP_XNP \
3550 vxge_mBIT(27)
3551#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_GOT_LP_XNP vxge_mBIT(31)
3552#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_MESSAGE_CODE \
3553 vxge_mBIT(35)
3554#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_NO_HCD \
3555 vxge_mBIT(43)
3556#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_FOUND_HCD vxge_mBIT(47)
3557#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_INVALID_RATE \
3558 vxge_mBIT(51)
3559#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_VALID_RATE vxge_mBIT(55)
3560#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_PERSISTENT_LDOWN \
3561 vxge_mBIT(59)
3562/*0x09e50*/ u64 antp_hwfsm_bp_status_port[2];
3563#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_NP vxge_mBIT(0)
3564#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ACK vxge_mBIT(1)
3565#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_RF vxge_mBIT(2)
3566#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_XNP vxge_mBIT(3)
3567#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ABILITY_FIELD(val) \
3568 vxge_vBIT(val, 4, 7)
3569#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_SELECTOR_FIELD(val) \
3570 vxge_vBIT(val, 11, 5)
3571/*0x09e60*/ u64 antp_hwfsm_xnp_status_port[2];
3572#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_NP vxge_mBIT(0)
3573#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_ACK vxge_mBIT(1)
3574#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_MP vxge_mBIT(2)
3575#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_ACK2 vxge_mBIT(3)
3576#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_TOGGLE vxge_mBIT(4)
3577#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_MESSAGE_CODE(val) \
3578 vxge_vBIT(val, 5, 11)
3579#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_UNF_CODE_FIELD1(val) \
3580 vxge_vBIT(val, 16, 16)
3581#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_UNF_CODE_FIELD2(val) \
3582 vxge_vBIT(val, 32, 16)
3583/*0x09e70*/ u64 mdio_mgr_access_port[2];
3584#define VXGE_HW_MDIO_MGR_ACCESS_PORT_STROBE_ONE BIT(3)
3585#define VXGE_HW_MDIO_MGR_ACCESS_PORT_OP_TYPE(val) vxge_vBIT(val, 5, 3)
3586#define VXGE_HW_MDIO_MGR_ACCESS_PORT_DEVAD(val) vxge_vBIT(val, 11, 5)
3587#define VXGE_HW_MDIO_MGR_ACCESS_PORT_ADDR(val) vxge_vBIT(val, 16, 16)
3588#define VXGE_HW_MDIO_MGR_ACCESS_PORT_DATA(val) vxge_vBIT(val, 32, 16)
3589#define VXGE_HW_MDIO_MGR_ACCESS_PORT_ST_PATTERN(val) vxge_vBIT(val, 49, 2)
3590#define VXGE_HW_MDIO_MGR_ACCESS_PORT_PREAMBLE vxge_mBIT(51)
3591#define VXGE_HW_MDIO_MGR_ACCESS_PORT_PRTAD(val) vxge_vBIT(val, 55, 5)
3592#define VXGE_HW_MDIO_MGR_ACCESS_PORT_STROBE_TWO vxge_mBIT(63)
3593 u8 unused0a200[0x0a200-0x09e80];
3594/*0x0a200*/ u64 xmac_vsport_choices_vh[17];
3595#define VXGE_HW_XMAC_VSPORT_CHOICES_VH_VSPORT_VECTOR(val) vxge_vBIT(val, 0, 17)
3596 u8 unused0a400[0x0a400-0x0a288];
3597
3598/*0x0a400*/ u64 rx_thresh_cfg_vp[17];
3599#define VXGE_HW_RX_THRESH_CFG_VP_PAUSE_LOW_THR(val) vxge_vBIT(val, 0, 8)
3600#define VXGE_HW_RX_THRESH_CFG_VP_PAUSE_HIGH_THR(val) vxge_vBIT(val, 8, 8)
3601#define VXGE_HW_RX_THRESH_CFG_VP_RED_THR_0(val) vxge_vBIT(val, 16, 8)
3602#define VXGE_HW_RX_THRESH_CFG_VP_RED_THR_1(val) vxge_vBIT(val, 24, 8)
3603#define VXGE_HW_RX_THRESH_CFG_VP_RED_THR_2(val) vxge_vBIT(val, 32, 8)
3604#define VXGE_HW_RX_THRESH_CFG_VP_RED_THR_3(val) vxge_vBIT(val, 40, 8)
3605 u8 unused0ac90[0x0ac90-0x0a488];
3606} __packed;
3607
3608/*VXGE_HW_SRPCIM_REGS_H*/
3609struct vxge_hw_srpcim_reg {
3610
3611/*0x00000*/ u64 tim_mr2sr_resource_assignment_vh;
3612#define VXGE_HW_TIM_MR2SR_RESOURCE_ASSIGNMENT_VH_BMAP_ROOT(val) \
3613 vxge_vBIT(val, 0, 32)
3614 u8 unused00100[0x00100-0x00008];
3615
3616/*0x00100*/ u64 srpcim_pcipif_int_status;
3617#define VXGE_HW_SRPCIM_PCIPIF_INT_STATUS_MRPCIM_MSG_MRPCIM_MSG_INT BIT(3)
3618#define VXGE_HW_SRPCIM_PCIPIF_INT_STATUS_VPATH_MSG_VPATH_MSG_INT BIT(7)
3619#define VXGE_HW_SRPCIM_PCIPIF_INT_STATUS_SRPCIM_SPARE_R1_SRPCIM_SPARE_R1_INT \
3620 BIT(11)
3621/*0x00108*/ u64 srpcim_pcipif_int_mask;
3622/*0x00110*/ u64 mrpcim_msg_reg;
3623#define VXGE_HW_MRPCIM_MSG_REG_SWIF_MRPCIM_TO_SRPCIM_RMSG_INT BIT(3)
3624/*0x00118*/ u64 mrpcim_msg_mask;
3625/*0x00120*/ u64 mrpcim_msg_alarm;
3626/*0x00128*/ u64 vpath_msg_reg;
3627#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH0_TO_SRPCIM_RMSG_INT BIT(0)
3628#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH1_TO_SRPCIM_RMSG_INT BIT(1)
3629#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH2_TO_SRPCIM_RMSG_INT BIT(2)
3630#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH3_TO_SRPCIM_RMSG_INT BIT(3)
3631#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH4_TO_SRPCIM_RMSG_INT BIT(4)
3632#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH5_TO_SRPCIM_RMSG_INT BIT(5)
3633#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH6_TO_SRPCIM_RMSG_INT BIT(6)
3634#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH7_TO_SRPCIM_RMSG_INT BIT(7)
3635#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH8_TO_SRPCIM_RMSG_INT BIT(8)
3636#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH9_TO_SRPCIM_RMSG_INT BIT(9)
3637#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH10_TO_SRPCIM_RMSG_INT BIT(10)
3638#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH11_TO_SRPCIM_RMSG_INT BIT(11)
3639#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH12_TO_SRPCIM_RMSG_INT BIT(12)
3640#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH13_TO_SRPCIM_RMSG_INT BIT(13)
3641#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH14_TO_SRPCIM_RMSG_INT BIT(14)
3642#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH15_TO_SRPCIM_RMSG_INT BIT(15)
3643#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH16_TO_SRPCIM_RMSG_INT BIT(16)
3644/*0x00130*/ u64 vpath_msg_mask;
3645/*0x00138*/ u64 vpath_msg_alarm;
3646 u8 unused00160[0x00160-0x00140];
3647
3648/*0x00160*/ u64 srpcim_to_mrpcim_wmsg;
3649#define VXGE_HW_SRPCIM_TO_MRPCIM_WMSG_SRPCIM_TO_MRPCIM_WMSG(val) \
3650 vxge_vBIT(val, 0, 64)
3651/*0x00168*/ u64 srpcim_to_mrpcim_wmsg_trig;
3652#define VXGE_HW_SRPCIM_TO_MRPCIM_WMSG_TRIG_SRPCIM_TO_MRPCIM_WMSG_TRIG BIT(0)
3653/*0x00170*/ u64 mrpcim_to_srpcim_rmsg;
3654#define VXGE_HW_MRPCIM_TO_SRPCIM_RMSG_SWIF_MRPCIM_TO_SRPCIM_RMSG(val) \
3655 vxge_vBIT(val, 0, 64)
3656/*0x00178*/ u64 vpath_to_srpcim_rmsg_sel;
3657#define VXGE_HW_VPATH_TO_SRPCIM_RMSG_SEL_VPATH_TO_SRPCIM_RMSG_SEL(val) \
3658 vxge_vBIT(val, 0, 5)
3659/*0x00180*/ u64 vpath_to_srpcim_rmsg;
3660#define VXGE_HW_VPATH_TO_SRPCIM_RMSG_SWIF_VPATH_TO_SRPCIM_RMSG(val) \
3661 vxge_vBIT(val, 0, 64)
3662 u8 unused00200[0x00200-0x00188];
3663
3664/*0x00200*/ u64 srpcim_general_int_status;
3665#define VXGE_HW_SRPCIM_GENERAL_INT_STATUS_PIC_INT BIT(0)
3666#define VXGE_HW_SRPCIM_GENERAL_INT_STATUS_PCI_INT BIT(3)
3667#define VXGE_HW_SRPCIM_GENERAL_INT_STATUS_XMAC_INT BIT(7)
3668 u8 unused00210[0x00210-0x00208];
3669
3670/*0x00210*/ u64 srpcim_general_int_mask;
3671#define VXGE_HW_SRPCIM_GENERAL_INT_MASK_PIC_INT BIT(0)
3672#define VXGE_HW_SRPCIM_GENERAL_INT_MASK_PCI_INT BIT(3)
3673#define VXGE_HW_SRPCIM_GENERAL_INT_MASK_XMAC_INT BIT(7)
3674 u8 unused00220[0x00220-0x00218];
3675
3676/*0x00220*/ u64 srpcim_ppif_int_status;
3677
3678/*0x00228*/ u64 srpcim_ppif_int_mask;
3679/*0x00230*/ u64 srpcim_gen_errors_reg;
3680#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_PCICONFIG_PF_STATUS_ERR BIT(3)
3681#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_PCICONFIG_PF_UNCOR_ERR BIT(7)
3682#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_PCICONFIG_PF_COR_ERR BIT(11)
3683#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_INTCTRL_SCHED_INT BIT(15)
3684#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_INI_SERR_DET BIT(19)
3685#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_TGT_PF_ILLEGAL_ACCESS BIT(23)
3686/*0x00238*/ u64 srpcim_gen_errors_mask;
3687/*0x00240*/ u64 srpcim_gen_errors_alarm;
3688/*0x00248*/ u64 mrpcim_to_srpcim_alarm_reg;
3689#define VXGE_HW_MRPCIM_TO_SRPCIM_ALARM_REG_PPIF_MRPCIM_TO_SRPCIM_ALARM BIT(3)
3690/*0x00250*/ u64 mrpcim_to_srpcim_alarm_mask;
3691/*0x00258*/ u64 mrpcim_to_srpcim_alarm_alarm;
3692/*0x00260*/ u64 vpath_to_srpcim_alarm_reg;
3693
3694/*0x00268*/ u64 vpath_to_srpcim_alarm_mask;
3695/*0x00270*/ u64 vpath_to_srpcim_alarm_alarm;
3696 u8 unused00280[0x00280-0x00278];
3697
3698/*0x00280*/ u64 pf_sw_reset;
3699#define VXGE_HW_PF_SW_RESET_PF_SW_RESET(val) vxge_vBIT(val, 0, 8)
3700/*0x00288*/ u64 srpcim_general_cfg1;
3701#define VXGE_HW_SRPCIM_GENERAL_CFG1_BOOT_BYTE_SWAPEN BIT(19)
3702#define VXGE_HW_SRPCIM_GENERAL_CFG1_BOOT_BIT_FLIPEN BIT(23)
3703#define VXGE_HW_SRPCIM_GENERAL_CFG1_MSIX_ADDR_SWAPEN BIT(27)
3704#define VXGE_HW_SRPCIM_GENERAL_CFG1_MSIX_ADDR_FLIPEN BIT(31)
3705#define VXGE_HW_SRPCIM_GENERAL_CFG1_MSIX_DATA_SWAPEN BIT(35)
3706#define VXGE_HW_SRPCIM_GENERAL_CFG1_MSIX_DATA_FLIPEN BIT(39)
3707/*0x00290*/ u64 srpcim_interrupt_cfg1;
3708#define VXGE_HW_SRPCIM_INTERRUPT_CFG1_ALARM_MAP_TO_MSG(val) vxge_vBIT(val, 1, 7)
3709#define VXGE_HW_SRPCIM_INTERRUPT_CFG1_TRAFFIC_CLASS(val) vxge_vBIT(val, 9, 3)
3710 u8 unused002a8[0x002a8-0x00298];
3711
3712/*0x002a8*/ u64 srpcim_clear_msix_mask;
3713#define VXGE_HW_SRPCIM_CLEAR_MSIX_MASK_SRPCIM_CLEAR_MSIX_MASK BIT(0)
3714/*0x002b0*/ u64 srpcim_set_msix_mask;
3715#define VXGE_HW_SRPCIM_SET_MSIX_MASK_SRPCIM_SET_MSIX_MASK BIT(0)
3716/*0x002b8*/ u64 srpcim_clr_msix_one_shot;
3717#define VXGE_HW_SRPCIM_CLR_MSIX_ONE_SHOT_SRPCIM_CLR_MSIX_ONE_SHOT BIT(0)
3718/*0x002c0*/ u64 srpcim_rst_in_prog;
3719#define VXGE_HW_SRPCIM_RST_IN_PROG_SRPCIM_RST_IN_PROG BIT(7)
3720/*0x002c8*/ u64 srpcim_reg_modified;
3721#define VXGE_HW_SRPCIM_REG_MODIFIED_SRPCIM_REG_MODIFIED BIT(7)
3722/*0x002d0*/ u64 tgt_pf_illegal_access;
3723#define VXGE_HW_TGT_PF_ILLEGAL_ACCESS_SWIF_REGION(val) vxge_vBIT(val, 1, 7)
3724/*0x002d8*/ u64 srpcim_msix_status;
3725#define VXGE_HW_SRPCIM_MSIX_STATUS_INTCTL_SRPCIM_MSIX_MASK BIT(3)
3726#define VXGE_HW_SRPCIM_MSIX_STATUS_INTCTL_SRPCIM_MSIX_PENDING_VECTOR BIT(7)
3727 u8 unused00880[0x00880-0x002e0];
3728
3729/*0x00880*/ u64 xgmac_sr_int_status;
3730#define VXGE_HW_XGMAC_SR_INT_STATUS_ASIC_NTWK_SR_ERR_ASIC_NTWK_SR_INT BIT(3)
3731/*0x00888*/ u64 xgmac_sr_int_mask;
3732/*0x00890*/ u64 asic_ntwk_sr_err_reg;
3733#define VXGE_HW_ASIC_NTWK_SR_ERR_REG_XMACJ_NTWK_SUSTAINED_FAULT BIT(3)
3734#define VXGE_HW_ASIC_NTWK_SR_ERR_REG_XMACJ_NTWK_SUSTAINED_OK BIT(7)
3735#define VXGE_HW_ASIC_NTWK_SR_ERR_REG_XMACJ_NTWK_SUSTAINED_FAULT_OCCURRED \
3736 BIT(11)
3737#define VXGE_HW_ASIC_NTWK_SR_ERR_REG_XMACJ_NTWK_SUSTAINED_OK_OCCURRED BIT(15)
3738/*0x00898*/ u64 asic_ntwk_sr_err_mask;
3739/*0x008a0*/ u64 asic_ntwk_sr_err_alarm;
3740 u8 unused008c0[0x008c0-0x008a8];
3741
3742/*0x008c0*/ u64 xmac_vsport_choices_sr_clone;
3743#define VXGE_HW_XMAC_VSPORT_CHOICES_SR_CLONE_VSPORT_VECTOR(val) \
3744 vxge_vBIT(val, 0, 17)
3745 u8 unused00900[0x00900-0x008c8];
3746
3747/*0x00900*/ u64 mr_rqa_top_prty_for_vh;
3748#define VXGE_HW_MR_RQA_TOP_PRTY_FOR_VH_RQA_TOP_PRTY_FOR_VH(val) \
3749 vxge_vBIT(val, 59, 5)
3750/*0x00908*/ u64 umq_vh_data_list_empty;
3751#define VXGE_HW_UMQ_VH_DATA_LIST_EMPTY_ROCRC_UMQ_VH_DATA_LIST_EMPTY \
3752 BIT(0)
3753/*0x00910*/ u64 wde_cfg;
3754#define VXGE_HW_WDE_CFG_NS0_FORCE_MWB_START BIT(0)
3755#define VXGE_HW_WDE_CFG_NS0_FORCE_MWB_END BIT(1)
3756#define VXGE_HW_WDE_CFG_NS0_FORCE_QB_START BIT(2)
3757#define VXGE_HW_WDE_CFG_NS0_FORCE_QB_END BIT(3)
3758#define VXGE_HW_WDE_CFG_NS0_FORCE_MPSB_START BIT(4)
3759#define VXGE_HW_WDE_CFG_NS0_FORCE_MPSB_END BIT(5)
3760#define VXGE_HW_WDE_CFG_NS0_MWB_OPT_EN BIT(6)
3761#define VXGE_HW_WDE_CFG_NS0_QB_OPT_EN BIT(7)
3762#define VXGE_HW_WDE_CFG_NS0_MPSB_OPT_EN BIT(8)
3763#define VXGE_HW_WDE_CFG_NS1_FORCE_MWB_START BIT(9)
3764#define VXGE_HW_WDE_CFG_NS1_FORCE_MWB_END BIT(10)
3765#define VXGE_HW_WDE_CFG_NS1_FORCE_QB_START BIT(11)
3766#define VXGE_HW_WDE_CFG_NS1_FORCE_QB_END BIT(12)
3767#define VXGE_HW_WDE_CFG_NS1_FORCE_MPSB_START BIT(13)
3768#define VXGE_HW_WDE_CFG_NS1_FORCE_MPSB_END BIT(14)
3769#define VXGE_HW_WDE_CFG_NS1_MWB_OPT_EN BIT(15)
3770#define VXGE_HW_WDE_CFG_NS1_QB_OPT_EN BIT(16)
3771#define VXGE_HW_WDE_CFG_NS1_MPSB_OPT_EN BIT(17)
3772#define VXGE_HW_WDE_CFG_DISABLE_QPAD_FOR_UNALIGNED_ADDR BIT(19)
3773#define VXGE_HW_WDE_CFG_ALIGNMENT_PREFERENCE(val) vxge_vBIT(val, 30, 2)
3774#define VXGE_HW_WDE_CFG_MEM_WORD_SIZE(val) vxge_vBIT(val, 46, 2)
3775
3776} __packed;
3777
3778/*VXGE_HW_VPMGMT_REGS_H*/
3779struct vxge_hw_vpmgmt_reg {
3780
3781 u8 unused00040[0x00040-0x00000];
3782
3783/*0x00040*/ u64 vpath_to_func_map_cfg1;
3784#define VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_VPATH_TO_FUNC_MAP_CFG1(val) \
3785 vxge_vBIT(val, 3, 5)
3786/*0x00048*/ u64 vpath_is_first;
3787#define VXGE_HW_VPATH_IS_FIRST_VPATH_IS_FIRST vxge_mBIT(3)
3788/*0x00050*/ u64 srpcim_to_vpath_wmsg;
3789#define VXGE_HW_SRPCIM_TO_VPATH_WMSG_SRPCIM_TO_VPATH_WMSG(val) \
3790 vxge_vBIT(val, 0, 64)
3791/*0x00058*/ u64 srpcim_to_vpath_wmsg_trig;
3792#define VXGE_HW_SRPCIM_TO_VPATH_WMSG_TRIG_SRPCIM_TO_VPATH_WMSG_TRIG \
3793 vxge_mBIT(0)
3794 u8 unused00100[0x00100-0x00060];
3795
3796/*0x00100*/ u64 tim_vpath_assignment;
3797#define VXGE_HW_TIM_VPATH_ASSIGNMENT_BMAP_ROOT(val) vxge_vBIT(val, 0, 32)
3798 u8 unused00140[0x00140-0x00108];
3799
3800/*0x00140*/ u64 rqa_top_prty_for_vp;
3801#define VXGE_HW_RQA_TOP_PRTY_FOR_VP_RQA_TOP_PRTY_FOR_VP(val) \
3802 vxge_vBIT(val, 59, 5)
3803 u8 unused001c0[0x001c0-0x00148];
3804
3805/*0x001c0*/ u64 rxmac_rx_pa_cfg0_vpmgmt_clone;
3806#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_IGNORE_FRAME_ERR vxge_mBIT(3)
3807#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_SUPPORT_SNAP_AB_N vxge_mBIT(7)
3808#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_SEARCH_FOR_HAO vxge_mBIT(18)
3809#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_SUPPORT_MOBILE_IPV6_HDRS \
3810 vxge_mBIT(19)
3811#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_IPV6_STOP_SEARCHING \
3812 vxge_mBIT(23)
3813#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_NO_PS_IF_UNKNOWN vxge_mBIT(27)
3814#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_SEARCH_FOR_ETYPE vxge_mBIT(35)
3815#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_ANY_FRM_IF_L3_CSUM_ERR \
3816 vxge_mBIT(39)
3817#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_OFFLD_FRM_IF_L3_CSUM_ERR \
3818 vxge_mBIT(43)
3819#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_ANY_FRM_IF_L4_CSUM_ERR \
3820 vxge_mBIT(47)
3821#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_OFFLD_FRM_IF_L4_CSUM_ERR \
3822 vxge_mBIT(51)
3823#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_ANY_FRM_IF_RPA_ERR \
3824 vxge_mBIT(55)
3825#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_OFFLD_FRM_IF_RPA_ERR \
3826 vxge_mBIT(59)
3827#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_JUMBO_SNAP_EN vxge_mBIT(63)
3828/*0x001c8*/ u64 rts_mgr_cfg0_vpmgmt_clone;
3829#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_RTS_DP_SP_PRIORITY vxge_mBIT(3)
3830#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_FLEX_L4PRTCL_VALUE(val) \
3831 vxge_vBIT(val, 24, 8)
3832#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_ICMP_TRASH vxge_mBIT(35)
3833#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_TCPSYN_TRASH vxge_mBIT(39)
3834#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_ZL4PYLD_TRASH vxge_mBIT(43)
3835#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_L4PRTCL_TCP_TRASH vxge_mBIT(47)
3836#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_L4PRTCL_UDP_TRASH vxge_mBIT(51)
3837#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_L4PRTCL_FLEX_TRASH vxge_mBIT(55)
3838#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_IPFRAG_TRASH vxge_mBIT(59)
3839/*0x001d0*/ u64 rts_mgr_criteria_priority_vpmgmt_clone;
3840#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_ETYPE(val) \
3841 vxge_vBIT(val, 5, 3)
3842#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_ICMP_TCPSYN(val) \
3843 vxge_vBIT(val, 9, 3)
3844#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_L4PN(val) \
3845 vxge_vBIT(val, 13, 3)
3846#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_RANGE_L4PN(val) \
3847 vxge_vBIT(val, 17, 3)
3848#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_RTH_IT(val) \
3849 vxge_vBIT(val, 21, 3)
3850#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_DS(val) \
3851 vxge_vBIT(val, 25, 3)
3852#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_QOS(val) \
3853 vxge_vBIT(val, 29, 3)
3854#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_ZL4PYLD(val) \
3855 vxge_vBIT(val, 33, 3)
3856#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_L4PRTCL(val) \
3857 vxge_vBIT(val, 37, 3)
3858/*0x001d8*/ u64 rxmac_cfg0_port_vpmgmt_clone[3];
3859#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_RMAC_EN vxge_mBIT(3)
3860#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS vxge_mBIT(7)
3861#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_DISCARD_PFRM vxge_mBIT(11)
3862#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_IGNORE_FCS_ERR vxge_mBIT(15)
3863#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_IGNORE_LONG_ERR vxge_mBIT(19)
3864#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_IGNORE_USIZED_ERR vxge_mBIT(23)
3865#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_IGNORE_LEN_MISMATCH \
3866 vxge_mBIT(27)
3867#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_MAX_PYLD_LEN(val) \
3868 vxge_vBIT(val, 50, 14)
3869/*0x001f0*/ u64 rxmac_pause_cfg_port_vpmgmt_clone[3];
3870#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_GEN_EN vxge_mBIT(3)
3871#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_RCV_EN vxge_mBIT(7)
3872#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_ACCEL_SEND(val) \
3873 vxge_vBIT(val, 9, 3)
3874#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_DUAL_THR vxge_mBIT(15)
3875#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_HIGH_PTIME(val) \
3876 vxge_vBIT(val, 20, 16)
3877#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_IGNORE_PF_FCS_ERR \
3878 vxge_mBIT(39)
3879#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_IGNORE_PF_LEN_ERR \
3880 vxge_mBIT(43)
3881#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_LIMITER_EN vxge_mBIT(47)
3882#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_MAX_LIMIT(val) \
3883 vxge_vBIT(val, 48, 8)
3884#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_PERMIT_RATEMGMT_CTRL \
3885 vxge_mBIT(59)
3886 u8 unused00240[0x00240-0x00208];
3887
3888/*0x00240*/ u64 xmac_vsport_choices_vp;
3889#define VXGE_HW_XMAC_VSPORT_CHOICES_VP_VSPORT_VECTOR(val) vxge_vBIT(val, 0, 17)
3890 u8 unused00260[0x00260-0x00248];
3891
3892/*0x00260*/ u64 xgmac_gen_status_vpmgmt_clone;
3893#define VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK vxge_mBIT(3)
3894#define VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_DATA_RATE \
3895 vxge_mBIT(11)
3896/*0x00268*/ u64 xgmac_status_port_vpmgmt_clone[2];
3897#define VXGE_HW_XGMAC_STATUS_PORT_VPMGMT_CLONE_RMAC_REMOTE_FAULT \
3898 vxge_mBIT(3)
3899#define VXGE_HW_XGMAC_STATUS_PORT_VPMGMT_CLONE_RMAC_LOCAL_FAULT vxge_mBIT(7)
3900#define VXGE_HW_XGMAC_STATUS_PORT_VPMGMT_CLONE_XMACJ_MAC_PHY_LAYER_AVAIL \
3901 vxge_mBIT(11)
3902#define VXGE_HW_XGMAC_STATUS_PORT_VPMGMT_CLONE_XMACJ_PORT_OK vxge_mBIT(15)
3903/*0x00278*/ u64 xmac_gen_cfg_vpmgmt_clone;
3904#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_RATEMGMT_MAC_RATE_SEL(val) \
3905 vxge_vBIT(val, 2, 2)
3906#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_TX_HEAD_DROP_WHEN_FAULT \
3907 vxge_mBIT(7)
3908#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_FAULT_BEHAVIOUR vxge_mBIT(27)
3909#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_PERIOD_NTWK_UP(val) \
3910 vxge_vBIT(val, 28, 4)
3911#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_PERIOD_NTWK_DOWN(val) \
3912 vxge_vBIT(val, 32, 4)
3913/*0x00280*/ u64 xmac_timestamp_vpmgmt_clone;
3914#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_EN vxge_mBIT(3)
3915#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_USE_LINK_ID(val) \
3916 vxge_vBIT(val, 6, 2)
3917#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_INTERVAL(val) vxge_vBIT(val, 12, 4)
3918#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_TIMER_RESTART vxge_mBIT(19)
3919#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_XMACJ_ROLLOVER_CNT(val) \
3920 vxge_vBIT(val, 32, 16)
3921/*0x00288*/ u64 xmac_stats_gen_cfg_vpmgmt_clone;
3922#define VXGE_HW_XMAC_STATS_GEN_CFG_VPMGMT_CLONE_PRTAGGR_CUM_TIMER(val) \
3923 vxge_vBIT(val, 4, 4)
3924#define VXGE_HW_XMAC_STATS_GEN_CFG_VPMGMT_CLONE_VPATH_CUM_TIMER(val) \
3925 vxge_vBIT(val, 8, 4)
3926#define VXGE_HW_XMAC_STATS_GEN_CFG_VPMGMT_CLONE_VLAN_HANDLING vxge_mBIT(15)
3927/*0x00290*/ u64 xmac_cfg_port_vpmgmt_clone[3];
3928#define VXGE_HW_XMAC_CFG_PORT_VPMGMT_CLONE_XGMII_LOOPBACK vxge_mBIT(3)
3929#define VXGE_HW_XMAC_CFG_PORT_VPMGMT_CLONE_XGMII_REVERSE_LOOPBACK \
3930 vxge_mBIT(7)
3931#define VXGE_HW_XMAC_CFG_PORT_VPMGMT_CLONE_XGMII_TX_BEHAV vxge_mBIT(11)
3932#define VXGE_HW_XMAC_CFG_PORT_VPMGMT_CLONE_XGMII_RX_BEHAV vxge_mBIT(15)
3933 u8 unused002c0[0x002c0-0x002a8];
3934
3935/*0x002c0*/ u64 txmac_gen_cfg0_vpmgmt_clone;
3936#define VXGE_HW_TXMAC_GEN_CFG0_VPMGMT_CLONE_CHOSEN_TX_PORT vxge_mBIT(7)
3937/*0x002c8*/ u64 txmac_cfg0_port_vpmgmt_clone[3];
3938#define VXGE_HW_TXMAC_CFG0_PORT_VPMGMT_CLONE_TMAC_EN vxge_mBIT(3)
3939#define VXGE_HW_TXMAC_CFG0_PORT_VPMGMT_CLONE_APPEND_PAD vxge_mBIT(7)
3940#define VXGE_HW_TXMAC_CFG0_PORT_VPMGMT_CLONE_PAD_BYTE(val) vxge_vBIT(val, 8, 8)
3941 u8 unused00300[0x00300-0x002e0];
3942
3943/*0x00300*/ u64 wol_mp_crc;
3944#define VXGE_HW_WOL_MP_CRC_CRC(val) vxge_vBIT(val, 0, 32)
3945#define VXGE_HW_WOL_MP_CRC_RC_EN vxge_mBIT(63)
3946/*0x00308*/ u64 wol_mp_mask_a;
3947#define VXGE_HW_WOL_MP_MASK_A_MASK(val) vxge_vBIT(val, 0, 64)
3948/*0x00310*/ u64 wol_mp_mask_b;
3949#define VXGE_HW_WOL_MP_MASK_B_MASK(val) vxge_vBIT(val, 0, 64)
3950 u8 unused00360[0x00360-0x00318];
3951
3952/*0x00360*/ u64 fau_pa_cfg_vpmgmt_clone;
3953#define VXGE_HW_FAU_PA_CFG_VPMGMT_CLONE_REPL_L4_COMP_CSUM vxge_mBIT(3)
3954#define VXGE_HW_FAU_PA_CFG_VPMGMT_CLONE_REPL_L3_INCL_CF vxge_mBIT(7)
3955#define VXGE_HW_FAU_PA_CFG_VPMGMT_CLONE_REPL_L3_COMP_CSUM vxge_mBIT(11)
3956/*0x00368*/ u64 rx_datapath_util_vp_clone;
3957#define VXGE_HW_RX_DATAPATH_UTIL_VP_CLONE_FAU_RX_UTILIZATION(val) \
3958 vxge_vBIT(val, 7, 9)
3959#define VXGE_HW_RX_DATAPATH_UTIL_VP_CLONE_RX_UTIL_CFG(val) \
3960 vxge_vBIT(val, 16, 4)
3961#define VXGE_HW_RX_DATAPATH_UTIL_VP_CLONE_FAU_RX_FRAC_UTIL(val) \
3962 vxge_vBIT(val, 20, 4)
3963#define VXGE_HW_RX_DATAPATH_UTIL_VP_CLONE_RX_PKT_WEIGHT(val) \
3964 vxge_vBIT(val, 24, 4)
3965 u8 unused00380[0x00380-0x00370];
3966
3967/*0x00380*/ u64 tx_datapath_util_vp_clone;
3968#define VXGE_HW_TX_DATAPATH_UTIL_VP_CLONE_TPA_TX_UTILIZATION(val) \
3969 vxge_vBIT(val, 7, 9)
3970#define VXGE_HW_TX_DATAPATH_UTIL_VP_CLONE_TX_UTIL_CFG(val) \
3971 vxge_vBIT(val, 16, 4)
3972#define VXGE_HW_TX_DATAPATH_UTIL_VP_CLONE_TPA_TX_FRAC_UTIL(val) \
3973 vxge_vBIT(val, 20, 4)
3974#define VXGE_HW_TX_DATAPATH_UTIL_VP_CLONE_TX_PKT_WEIGHT(val) \
3975 vxge_vBIT(val, 24, 4)
3976
3977} __packed;
3978
3979struct vxge_hw_vpath_reg {
3980
3981 u8 unused00300[0x00300];
3982
3983/*0x00300*/ u64 usdc_vpath;
3984#define VXGE_HW_USDC_VPATH_SGRP_ASSIGN(val) vxge_vBIT(val, 0, 32)
3985 u8 unused00a00[0x00a00-0x00308];
3986
3987/*0x00a00*/ u64 wrdma_alarm_status;
3988#define VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT vxge_mBIT(1)
3989/*0x00a08*/ u64 wrdma_alarm_mask;
3990 u8 unused00a30[0x00a30-0x00a10];
3991
3992/*0x00a30*/ u64 prc_alarm_reg;
3993#define VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP vxge_mBIT(0)
3994#define VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR vxge_mBIT(1)
3995#define VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT vxge_mBIT(2)
3996#define VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR vxge_mBIT(3)
3997/*0x00a38*/ u64 prc_alarm_mask;
3998/*0x00a40*/ u64 prc_alarm_alarm;
3999/*0x00a48*/ u64 prc_cfg1;
4000#define VXGE_HW_PRC_CFG1_RX_TIMER_VAL(val) vxge_vBIT(val, 3, 29)
4001#define VXGE_HW_PRC_CFG1_TIM_RING_BUMP_INT_ENABLE vxge_mBIT(34)
4002#define VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE vxge_mBIT(35)
4003#define VXGE_HW_PRC_CFG1_GREEDY_RETURN vxge_mBIT(36)
4004#define VXGE_HW_PRC_CFG1_QUICK_SHOT vxge_mBIT(37)
4005#define VXGE_HW_PRC_CFG1_RX_TIMER_CI vxge_mBIT(39)
4006#define VXGE_HW_PRC_CFG1_RESET_TIMER_ON_RXD_RET(val) vxge_vBIT(val, 40, 2)
4007 u8 unused00a60[0x00a60-0x00a50];
4008
4009/*0x00a60*/ u64 prc_cfg4;
4010#define VXGE_HW_PRC_CFG4_IN_SVC vxge_mBIT(7)
4011#define VXGE_HW_PRC_CFG4_RING_MODE(val) vxge_vBIT(val, 14, 2)
4012#define VXGE_HW_PRC_CFG4_RXD_NO_SNOOP vxge_mBIT(22)
4013#define VXGE_HW_PRC_CFG4_FRM_NO_SNOOP vxge_mBIT(23)
4014#define VXGE_HW_PRC_CFG4_RTH_DISABLE vxge_mBIT(31)
4015#define VXGE_HW_PRC_CFG4_IGNORE_OWNERSHIP vxge_mBIT(32)
4016#define VXGE_HW_PRC_CFG4_SIGNAL_BENIGN_OVFLW vxge_mBIT(36)
4017#define VXGE_HW_PRC_CFG4_BIMODAL_INTERRUPT vxge_mBIT(37)
4018#define VXGE_HW_PRC_CFG4_BACKOFF_INTERVAL(val) vxge_vBIT(val, 40, 24)
4019/*0x00a68*/ u64 prc_cfg5;
4020#define VXGE_HW_PRC_CFG5_RXD0_ADD(val) vxge_vBIT(val, 0, 61)
4021/*0x00a70*/ u64 prc_cfg6;
4022#define VXGE_HW_PRC_CFG6_FRM_PAD_EN vxge_mBIT(0)
4023#define VXGE_HW_PRC_CFG6_QSIZE_ALIGNED_RXD vxge_mBIT(2)
4024#define VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN vxge_mBIT(5)
4025#define VXGE_HW_PRC_CFG6_L3_CPC_TRSFR_CODE_EN vxge_mBIT(8)
4026#define VXGE_HW_PRC_CFG6_L4_CPC_TRSFR_CODE_EN vxge_mBIT(9)
4027#define VXGE_HW_PRC_CFG6_RXD_CRXDT(val) vxge_vBIT(val, 23, 9)
4028#define VXGE_HW_PRC_CFG6_RXD_SPAT(val) vxge_vBIT(val, 36, 9)
4029#define VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val) vxge_bVALn(val, 36, 9)
4030/*0x00a78*/ u64 prc_cfg7;
4031#define VXGE_HW_PRC_CFG7_SCATTER_MODE(val) vxge_vBIT(val, 6, 2)
4032#define VXGE_HW_PRC_CFG7_SMART_SCAT_EN vxge_mBIT(11)
4033#define VXGE_HW_PRC_CFG7_RXD_NS_CHG_EN vxge_mBIT(12)
4034#define VXGE_HW_PRC_CFG7_NO_HDR_SEPARATION vxge_mBIT(14)
4035#define VXGE_HW_PRC_CFG7_RXD_BUFF_SIZE_MASK(val) vxge_vBIT(val, 20, 4)
4036#define VXGE_HW_PRC_CFG7_BUFF_SIZE0_MASK(val) vxge_vBIT(val, 27, 5)
4037/*0x00a80*/ u64 tim_dest_addr;
4038#define VXGE_HW_TIM_DEST_ADDR_TIM_DEST_ADDR(val) vxge_vBIT(val, 0, 64)
4039/*0x00a88*/ u64 prc_rxd_doorbell;
4040#define VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val) vxge_vBIT(val, 48, 16)
4041/*0x00a90*/ u64 rqa_prty_for_vp;
4042#define VXGE_HW_RQA_PRTY_FOR_VP_RQA_PRTY_FOR_VP(val) vxge_vBIT(val, 59, 5)
4043/*0x00a98*/ u64 rxdmem_size;
4044#define VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(val) vxge_vBIT(val, 51, 13)
4045/*0x00aa0*/ u64 frm_in_progress_cnt;
4046#define VXGE_HW_FRM_IN_PROGRESS_CNT_PRC_FRM_IN_PROGRESS_CNT(val) \
4047 vxge_vBIT(val, 59, 5)
4048/*0x00aa8*/ u64 rx_multi_cast_stats;
4049#define VXGE_HW_RX_MULTI_CAST_STATS_FRAME_DISCARD(val) vxge_vBIT(val, 48, 16)
4050/*0x00ab0*/ u64 rx_frm_transferred;
4051#define VXGE_HW_RX_FRM_TRANSFERRED_RX_FRM_TRANSFERRED(val) \
4052 vxge_vBIT(val, 32, 32)
4053/*0x00ab8*/ u64 rxd_returned;
4054#define VXGE_HW_RXD_RETURNED_RXD_RETURNED(val) vxge_vBIT(val, 48, 16)
4055 u8 unused00c00[0x00c00-0x00ac0];
4056
4057/*0x00c00*/ u64 kdfc_fifo_trpl_partition;
4058#define VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(val) vxge_vBIT(val, 17, 15)
4059#define VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_1(val) vxge_vBIT(val, 33, 15)
4060#define VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_2(val) vxge_vBIT(val, 49, 15)
4061/*0x00c08*/ u64 kdfc_fifo_trpl_ctrl;
4062#define VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE vxge_mBIT(7)
4063/*0x00c10*/ u64 kdfc_trpl_fifo_0_ctrl;
4064#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(val) vxge_vBIT(val, 14, 2)
4065#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_FLIP_EN vxge_mBIT(22)
4066#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN vxge_mBIT(23)
4067#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_INT_CTRL(val) vxge_vBIT(val, 26, 2)
4068#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_CTRL_STRUC vxge_mBIT(28)
4069#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_ADD_PAD vxge_mBIT(29)
4070#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_NO_SNOOP vxge_mBIT(30)
4071#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_RLX_ORD vxge_mBIT(31)
4072#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(val) vxge_vBIT(val, 32, 8)
4073#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_INT_NO(val) vxge_vBIT(val, 41, 7)
4074#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_BIT_MAP(val) vxge_vBIT(val, 48, 16)
4075/*0x00c18*/ u64 kdfc_trpl_fifo_1_ctrl;
4076#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_MODE(val) vxge_vBIT(val, 14, 2)
4077#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_FLIP_EN vxge_mBIT(22)
4078#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_SWAP_EN vxge_mBIT(23)
4079#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_INT_CTRL(val) vxge_vBIT(val, 26, 2)
4080#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_CTRL_STRUC vxge_mBIT(28)
4081#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_ADD_PAD vxge_mBIT(29)
4082#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_NO_SNOOP vxge_mBIT(30)
4083#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_RLX_ORD vxge_mBIT(31)
4084#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_SELECT(val) vxge_vBIT(val, 32, 8)
4085#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_INT_NO(val) vxge_vBIT(val, 41, 7)
4086#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_BIT_MAP(val) vxge_vBIT(val, 48, 16)
4087/*0x00c20*/ u64 kdfc_trpl_fifo_2_ctrl;
4088#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_FLIP_EN vxge_mBIT(22)
4089#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_SWAP_EN vxge_mBIT(23)
4090#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_INT_CTRL(val) vxge_vBIT(val, 26, 2)
4091#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_CTRL_STRUC vxge_mBIT(28)
4092#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_ADD_PAD vxge_mBIT(29)
4093#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_NO_SNOOP vxge_mBIT(30)
4094#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_RLX_ORD vxge_mBIT(31)
4095#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_SELECT(val) vxge_vBIT(val, 32, 8)
4096#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_INT_NO(val) vxge_vBIT(val, 41, 7)
4097#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_BIT_MAP(val) vxge_vBIT(val, 48, 16)
4098/*0x00c28*/ u64 kdfc_trpl_fifo_0_wb_address;
4099#define VXGE_HW_KDFC_TRPL_FIFO_0_WB_ADDRESS_ADD(val) vxge_vBIT(val, 0, 64)
4100/*0x00c30*/ u64 kdfc_trpl_fifo_1_wb_address;
4101#define VXGE_HW_KDFC_TRPL_FIFO_1_WB_ADDRESS_ADD(val) vxge_vBIT(val, 0, 64)
4102/*0x00c38*/ u64 kdfc_trpl_fifo_2_wb_address;
4103#define VXGE_HW_KDFC_TRPL_FIFO_2_WB_ADDRESS_ADD(val) vxge_vBIT(val, 0, 64)
4104/*0x00c40*/ u64 kdfc_trpl_fifo_offset;
4105#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_RCTR0(val) vxge_vBIT(val, 1, 15)
4106#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_RCTR1(val) vxge_vBIT(val, 17, 15)
4107#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_RCTR2(val) vxge_vBIT(val, 33, 15)
4108/*0x00c48*/ u64 kdfc_drbl_triplet_total;
4109#define VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_KDFC_MAX_SIZE(val) \
4110 vxge_vBIT(val, 17, 15)
4111 u8 unused00c60[0x00c60-0x00c50];
4112
4113/*0x00c60*/ u64 usdc_drbl_ctrl;
4114#define VXGE_HW_USDC_DRBL_CTRL_FLIP_EN vxge_mBIT(22)
4115#define VXGE_HW_USDC_DRBL_CTRL_SWAP_EN vxge_mBIT(23)
4116/*0x00c68*/ u64 usdc_vp_ready;
4117#define VXGE_HW_USDC_VP_READY_USDC_HTN_READY vxge_mBIT(7)
4118#define VXGE_HW_USDC_VP_READY_USDC_SRQ_READY vxge_mBIT(15)
4119#define VXGE_HW_USDC_VP_READY_USDC_CQRQ_READY vxge_mBIT(23)
4120/*0x00c70*/ u64 kdfc_status;
4121#define VXGE_HW_KDFC_STATUS_KDFC_WRR_0_READY vxge_mBIT(0)
4122#define VXGE_HW_KDFC_STATUS_KDFC_WRR_1_READY vxge_mBIT(1)
4123#define VXGE_HW_KDFC_STATUS_KDFC_WRR_2_READY vxge_mBIT(2)
4124 u8 unused00c80[0x00c80-0x00c78];
4125
4126/*0x00c80*/ u64 xmac_rpa_vcfg;
4127#define VXGE_HW_XMAC_RPA_VCFG_IPV4_TCP_INCL_PH vxge_mBIT(3)
4128#define VXGE_HW_XMAC_RPA_VCFG_IPV6_TCP_INCL_PH vxge_mBIT(7)
4129#define VXGE_HW_XMAC_RPA_VCFG_IPV4_UDP_INCL_PH vxge_mBIT(11)
4130#define VXGE_HW_XMAC_RPA_VCFG_IPV6_UDP_INCL_PH vxge_mBIT(15)
4131#define VXGE_HW_XMAC_RPA_VCFG_L4_INCL_CF vxge_mBIT(19)
4132#define VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG vxge_mBIT(23)
4133/*0x00c88*/ u64 rxmac_vcfg0;
4134#define VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(val) vxge_vBIT(val, 2, 14)
4135#define VXGE_HW_RXMAC_VCFG0_RTS_USE_MIN_LEN vxge_mBIT(19)
4136#define VXGE_HW_RXMAC_VCFG0_RTS_MIN_FRM_LEN(val) vxge_vBIT(val, 26, 14)
4137#define VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN vxge_mBIT(43)
4138#define VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN vxge_mBIT(47)
4139#define VXGE_HW_RXMAC_VCFG0_BCAST_EN vxge_mBIT(51)
4140#define VXGE_HW_RXMAC_VCFG0_ALL_VID_EN vxge_mBIT(55)
4141/*0x00c90*/ u64 rxmac_vcfg1;
4142#define VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(val) vxge_vBIT(val, 42, 2)
4143#define VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE vxge_mBIT(47)
4144#define VXGE_HW_RXMAC_VCFG1_CONTRIB_L2_FLOW vxge_mBIT(51)
4145/*0x00c98*/ u64 rts_access_steer_ctrl;
4146#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(val) vxge_vBIT(val, 1, 7)
4147#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(val) vxge_vBIT(val, 8, 4)
4148#define VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE vxge_mBIT(15)
4149#define VXGE_HW_RTS_ACCESS_STEER_CTRL_BEHAV_TBL_SEL vxge_mBIT(23)
4150#define VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL vxge_mBIT(27)
4151#define VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS vxge_mBIT(0)
4152#define VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(val) vxge_vBIT(val, 40, 8)
4153/*0x00ca0*/ u64 rts_access_steer_data0;
4154#define VXGE_HW_RTS_ACCESS_STEER_DATA0_DATA(val) vxge_vBIT(val, 0, 64)
4155/*0x00ca8*/ u64 rts_access_steer_data1;
4156#define VXGE_HW_RTS_ACCESS_STEER_DATA1_DATA(val) vxge_vBIT(val, 0, 64)
4157 u8 unused00d00[0x00d00-0x00cb0];
4158
4159/*0x00d00*/ u64 xmac_vsport_choice;
4160#define VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(val) vxge_vBIT(val, 3, 5)
4161/*0x00d08*/ u64 xmac_stats_cfg;
4162/*0x00d10*/ u64 xmac_stats_access_cmd;
4163#define VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(val) vxge_vBIT(val, 6, 2)
4164#define VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE vxge_mBIT(15)
4165#define VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(val) vxge_vBIT(val, 32, 8)
4166/*0x00d18*/ u64 xmac_stats_access_data;
4167#define VXGE_HW_XMAC_STATS_ACCESS_DATA_XSMGR_DATA(val) vxge_vBIT(val, 0, 64)
4168/*0x00d20*/ u64 asic_ntwk_vp_ctrl;
4169#define VXGE_HW_ASIC_NTWK_VP_CTRL_REQ_TEST_NTWK vxge_mBIT(3)
4170#define VXGE_HW_ASIC_NTWK_VP_CTRL_XMACJ_SHOW_PORT_INFO vxge_mBIT(55)
4171#define VXGE_HW_ASIC_NTWK_VP_CTRL_XMACJ_PORT_NUM vxge_mBIT(63)
4172 u8 unused00d30[0x00d30-0x00d28];
4173
4174/*0x00d30*/ u64 xgmac_vp_int_status;
4175#define VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT \
4176 vxge_mBIT(3)
4177/*0x00d38*/ u64 xgmac_vp_int_mask;
4178/*0x00d40*/ u64 asic_ntwk_vp_err_reg;
4179#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT vxge_mBIT(3)
4180#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK vxge_mBIT(7)
4181#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR \
4182 vxge_mBIT(11)
4183#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR \
4184 vxge_mBIT(15)
4185#define VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT \
4186 vxge_mBIT(19)
4187#define VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK vxge_mBIT(23)
4188/*0x00d48*/ u64 asic_ntwk_vp_err_mask;
4189/*0x00d50*/ u64 asic_ntwk_vp_err_alarm;
4190 u8 unused00d80[0x00d80-0x00d58];
4191
4192/*0x00d80*/ u64 rtdma_bw_ctrl;
4193#define VXGE_HW_RTDMA_BW_CTRL_BW_CTRL_EN vxge_mBIT(39)
4194#define VXGE_HW_RTDMA_BW_CTRL_DESIRED_BW(val) vxge_vBIT(val, 46, 18)
4195/*0x00d88*/ u64 rtdma_rd_optimization_ctrl;
4196#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_GEN_INT_AFTER_ABORT vxge_mBIT(3)
4197#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_PAD_MODE(val) vxge_vBIT(val, 6, 2)
4198#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_PAD_PATTERN(val) vxge_vBIT(val, 8, 8)
4199#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE vxge_mBIT(19)
4200#define VXGE_HW_PCI_EXP_DEVCTL_READRQ 0x7000 /* Max_Read_Request_Size */
4201#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val) \
4202 vxge_vBIT(val, 21, 3)
4203#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_PYLD_WMARK_EN vxge_mBIT(28)
4204#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_PYLD_WMARK(val) \
4205 vxge_vBIT(val, 29, 3)
4206#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN vxge_mBIT(35)
4207#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(val) \
4208 vxge_vBIT(val, 37, 3)
4209#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_WAIT_FOR_SPACE vxge_mBIT(43)
4210#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_FILL_THRESH(val) \
4211 vxge_vBIT(val, 51, 5)
4212#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_ADDR_BDRY_EN vxge_mBIT(59)
4213#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_ADDR_BDRY(val) \
4214 vxge_vBIT(val, 61, 3)
4215/*0x00d90*/ u64 pda_pcc_job_monitor;
4216#define VXGE_HW_PDA_PCC_JOB_MONITOR_PDA_PCC_JOB_STATUS vxge_mBIT(7)
4217/*0x00d98*/ u64 tx_protocol_assist_cfg;
4218#define VXGE_HW_TX_PROTOCOL_ASSIST_CFG_LSOV2_EN vxge_mBIT(6)
4219#define VXGE_HW_TX_PROTOCOL_ASSIST_CFG_IPV6_KEEP_SEARCHING vxge_mBIT(7)
4220 u8 unused01000[0x01000-0x00da0];
4221
4222/*0x01000*/ u64 tim_cfg1_int_num[4];
4223#define VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(val) vxge_vBIT(val, 6, 26)
4224#define VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN vxge_mBIT(35)
4225#define VXGE_HW_TIM_CFG1_INT_NUM_TXFRM_CNT_EN vxge_mBIT(36)
4226#define VXGE_HW_TIM_CFG1_INT_NUM_TXD_CNT_EN vxge_mBIT(37)
4227#define VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC vxge_mBIT(38)
4228#define VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI vxge_mBIT(39)
4229#define VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(val) vxge_vBIT(val, 41, 7)
4230#define VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(val) vxge_vBIT(val, 49, 7)
4231#define VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(val) vxge_vBIT(val, 57, 7)
4232/*0x01020*/ u64 tim_cfg2_int_num[4];
4233#define VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(val) vxge_vBIT(val, 0, 16)
4234#define VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(val) vxge_vBIT(val, 16, 16)
4235#define VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(val) vxge_vBIT(val, 32, 16)
4236#define VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(val) vxge_vBIT(val, 48, 16)
4237/*0x01040*/ u64 tim_cfg3_int_num[4];
4238#define VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI vxge_mBIT(0)
4239#define VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(val) vxge_vBIT(val, 1, 4)
4240#define VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(val) vxge_vBIT(val, 6, 26)
4241#define VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(val) vxge_vBIT(val, 32, 6)
4242#define VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(val) vxge_vBIT(val, 38, 26)
4243/*0x01060*/ u64 tim_wrkld_clc;
4244#define VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_PRD(val) vxge_vBIT(val, 0, 32)
4245#define VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_DIV(val) vxge_vBIT(val, 35, 5)
4246#define VXGE_HW_TIM_WRKLD_CLC_CNT_FRM_BYTE vxge_mBIT(40)
4247#define VXGE_HW_TIM_WRKLD_CLC_CNT_RX_TX(val) vxge_vBIT(val, 41, 2)
4248#define VXGE_HW_TIM_WRKLD_CLC_CNT_LNK_EN vxge_mBIT(43)
4249#define VXGE_HW_TIM_WRKLD_CLC_HOST_UTIL(val) vxge_vBIT(val, 57, 7)
4250/*0x01068*/ u64 tim_bitmap;
4251#define VXGE_HW_TIM_BITMAP_MASK(val) vxge_vBIT(val, 0, 32)
4252#define VXGE_HW_TIM_BITMAP_LLROOT_RXD_EN vxge_mBIT(32)
4253#define VXGE_HW_TIM_BITMAP_LLROOT_TXD_EN vxge_mBIT(33)
4254/*0x01070*/ u64 tim_ring_assn;
4255#define VXGE_HW_TIM_RING_ASSN_INT_NUM(val) vxge_vBIT(val, 6, 2)
4256/*0x01078*/ u64 tim_remap;
4257#define VXGE_HW_TIM_REMAP_TX_EN vxge_mBIT(5)
4258#define VXGE_HW_TIM_REMAP_RX_EN vxge_mBIT(6)
4259#define VXGE_HW_TIM_REMAP_OFFLOAD_EN vxge_mBIT(7)
4260#define VXGE_HW_TIM_REMAP_TO_VPATH_NUM(val) vxge_vBIT(val, 11, 5)
4261/*0x01080*/ u64 tim_vpath_map;
4262#define VXGE_HW_TIM_VPATH_MAP_BMAP_ROOT(val) vxge_vBIT(val, 0, 32)
4263/*0x01088*/ u64 tim_pci_cfg;
4264#define VXGE_HW_TIM_PCI_CFG_ADD_PAD vxge_mBIT(7)
4265#define VXGE_HW_TIM_PCI_CFG_NO_SNOOP vxge_mBIT(15)
4266#define VXGE_HW_TIM_PCI_CFG_RELAXED vxge_mBIT(23)
4267#define VXGE_HW_TIM_PCI_CFG_CTL_STR vxge_mBIT(31)
4268 u8 unused01100[0x01100-0x01090];
4269
4270/*0x01100*/ u64 sgrp_assign;
4271#define VXGE_HW_SGRP_ASSIGN_SGRP_ASSIGN(val) vxge_vBIT(val, 0, 64)
4272/*0x01108*/ u64 sgrp_aoa_and_result;
4273#define VXGE_HW_SGRP_AOA_AND_RESULT_PET_SGRP_AOA_AND_RESULT(val) \
4274 vxge_vBIT(val, 0, 64)
4275/*0x01110*/ u64 rpe_pci_cfg;
4276#define VXGE_HW_RPE_PCI_CFG_PAD_LRO_DATA_ENABLE vxge_mBIT(7)
4277#define VXGE_HW_RPE_PCI_CFG_PAD_LRO_HDR_ENABLE vxge_mBIT(8)
4278#define VXGE_HW_RPE_PCI_CFG_PAD_LRO_CQE_ENABLE vxge_mBIT(9)
4279#define VXGE_HW_RPE_PCI_CFG_PAD_NONLL_CQE_ENABLE vxge_mBIT(10)
4280#define VXGE_HW_RPE_PCI_CFG_PAD_BASE_LL_CQE_ENABLE vxge_mBIT(11)
4281#define VXGE_HW_RPE_PCI_CFG_PAD_LL_CQE_IDATA_ENABLE vxge_mBIT(12)
4282#define VXGE_HW_RPE_PCI_CFG_PAD_CQRQ_IR_ENABLE vxge_mBIT(13)
4283#define VXGE_HW_RPE_PCI_CFG_PAD_CQSQ_IR_ENABLE vxge_mBIT(14)
4284#define VXGE_HW_RPE_PCI_CFG_PAD_CQRR_IR_ENABLE vxge_mBIT(15)
4285#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_DATA vxge_mBIT(18)
4286#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_NONLL_CQE vxge_mBIT(19)
4287#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_LL_CQE vxge_mBIT(20)
4288#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_CQRQ_IR vxge_mBIT(21)
4289#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_CQSQ_IR vxge_mBIT(22)
4290#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_CQRR_IR vxge_mBIT(23)
4291#define VXGE_HW_RPE_PCI_CFG_RELAXED_DATA vxge_mBIT(26)
4292#define VXGE_HW_RPE_PCI_CFG_RELAXED_NONLL_CQE vxge_mBIT(27)
4293#define VXGE_HW_RPE_PCI_CFG_RELAXED_LL_CQE vxge_mBIT(28)
4294#define VXGE_HW_RPE_PCI_CFG_RELAXED_CQRQ_IR vxge_mBIT(29)
4295#define VXGE_HW_RPE_PCI_CFG_RELAXED_CQSQ_IR vxge_mBIT(30)
4296#define VXGE_HW_RPE_PCI_CFG_RELAXED_CQRR_IR vxge_mBIT(31)
4297/*0x01118*/ u64 rpe_lro_cfg;
4298#define VXGE_HW_RPE_LRO_CFG_SUPPRESS_LRO_ETH_TRLR vxge_mBIT(7)
4299#define VXGE_HW_RPE_LRO_CFG_ALLOW_LRO_SNAP_SNAPJUMBO_MRG vxge_mBIT(11)
4300#define VXGE_HW_RPE_LRO_CFG_ALLOW_LRO_LLC_LLCJUMBO_MRG vxge_mBIT(15)
4301#define VXGE_HW_RPE_LRO_CFG_INCL_ACK_CNT_IN_CQE vxge_mBIT(23)
4302/*0x01120*/ u64 pe_mr2vp_ack_blk_limit;
4303#define VXGE_HW_PE_MR2VP_ACK_BLK_LIMIT_BLK_LIMIT(val) vxge_vBIT(val, 32, 32)
4304/*0x01128*/ u64 pe_mr2vp_rirr_lirr_blk_limit;
4305#define VXGE_HW_PE_MR2VP_RIRR_LIRR_BLK_LIMIT_RIRR_BLK_LIMIT(val) \
4306 vxge_vBIT(val, 0, 32)
4307#define VXGE_HW_PE_MR2VP_RIRR_LIRR_BLK_LIMIT_LIRR_BLK_LIMIT(val) \
4308 vxge_vBIT(val, 32, 32)
4309/*0x01130*/ u64 txpe_pci_nce_cfg;
4310#define VXGE_HW_TXPE_PCI_NCE_CFG_NCE_THRESH(val) vxge_vBIT(val, 0, 32)
4311#define VXGE_HW_TXPE_PCI_NCE_CFG_PAD_TOWI_ENABLE vxge_mBIT(55)
4312#define VXGE_HW_TXPE_PCI_NCE_CFG_NOSNOOP_TOWI vxge_mBIT(63)
4313 u8 unused01180[0x01180-0x01138];
4314
4315/*0x01180*/ u64 msg_qpad_en_cfg;
4316#define VXGE_HW_MSG_QPAD_EN_CFG_UMQ_BWR_READ vxge_mBIT(3)
4317#define VXGE_HW_MSG_QPAD_EN_CFG_DMQ_BWR_READ vxge_mBIT(7)
4318#define VXGE_HW_MSG_QPAD_EN_CFG_MXP_GENDMA_READ vxge_mBIT(11)
4319#define VXGE_HW_MSG_QPAD_EN_CFG_UXP_GENDMA_READ vxge_mBIT(15)
4320#define VXGE_HW_MSG_QPAD_EN_CFG_UMQ_MSG_WRITE vxge_mBIT(19)
4321#define VXGE_HW_MSG_QPAD_EN_CFG_UMQDMQ_IR_WRITE vxge_mBIT(23)
4322#define VXGE_HW_MSG_QPAD_EN_CFG_MXP_GENDMA_WRITE vxge_mBIT(27)
4323#define VXGE_HW_MSG_QPAD_EN_CFG_UXP_GENDMA_WRITE vxge_mBIT(31)
4324/*0x01188*/ u64 msg_pci_cfg;
4325#define VXGE_HW_MSG_PCI_CFG_GENDMA_NO_SNOOP vxge_mBIT(3)
4326#define VXGE_HW_MSG_PCI_CFG_UMQDMQ_IR_NO_SNOOP vxge_mBIT(7)
4327#define VXGE_HW_MSG_PCI_CFG_UMQ_NO_SNOOP vxge_mBIT(11)
4328#define VXGE_HW_MSG_PCI_CFG_DMQ_NO_SNOOP vxge_mBIT(15)
4329/*0x01190*/ u64 umqdmq_ir_init;
4330#define VXGE_HW_UMQDMQ_IR_INIT_HOST_WRITE_ADD(val) vxge_vBIT(val, 0, 64)
4331/*0x01198*/ u64 dmq_ir_int;
4332#define VXGE_HW_DMQ_IR_INT_IMMED_ENABLE vxge_mBIT(6)
4333#define VXGE_HW_DMQ_IR_INT_EVENT_ENABLE vxge_mBIT(7)
4334#define VXGE_HW_DMQ_IR_INT_NUMBER(val) vxge_vBIT(val, 9, 7)
4335#define VXGE_HW_DMQ_IR_INT_BITMAP(val) vxge_vBIT(val, 16, 16)
4336/*0x011a0*/ u64 dmq_bwr_init_add;
4337#define VXGE_HW_DMQ_BWR_INIT_ADD_HOST(val) vxge_vBIT(val, 0, 64)
4338/*0x011a8*/ u64 dmq_bwr_init_byte;
4339#define VXGE_HW_DMQ_BWR_INIT_BYTE_COUNT(val) vxge_vBIT(val, 0, 32)
4340/*0x011b0*/ u64 dmq_ir;
4341#define VXGE_HW_DMQ_IR_POLICY(val) vxge_vBIT(val, 0, 8)
4342/*0x011b8*/ u64 umq_int;
4343#define VXGE_HW_UMQ_INT_IMMED_ENABLE vxge_mBIT(6)
4344#define VXGE_HW_UMQ_INT_EVENT_ENABLE vxge_mBIT(7)
4345#define VXGE_HW_UMQ_INT_NUMBER(val) vxge_vBIT(val, 9, 7)
4346#define VXGE_HW_UMQ_INT_BITMAP(val) vxge_vBIT(val, 16, 16)
4347/*0x011c0*/ u64 umq_mr2vp_bwr_pfch_init;
4348#define VXGE_HW_UMQ_MR2VP_BWR_PFCH_INIT_NUMBER(val) vxge_vBIT(val, 0, 8)
4349/*0x011c8*/ u64 umq_bwr_pfch_ctrl;
4350#define VXGE_HW_UMQ_BWR_PFCH_CTRL_POLL_EN vxge_mBIT(3)
4351/*0x011d0*/ u64 umq_mr2vp_bwr_eol;
4352#define VXGE_HW_UMQ_MR2VP_BWR_EOL_POLL_LATENCY(val) vxge_vBIT(val, 32, 32)
4353/*0x011d8*/ u64 umq_bwr_init_add;
4354#define VXGE_HW_UMQ_BWR_INIT_ADD_HOST(val) vxge_vBIT(val, 0, 64)
4355/*0x011e0*/ u64 umq_bwr_init_byte;
4356#define VXGE_HW_UMQ_BWR_INIT_BYTE_COUNT(val) vxge_vBIT(val, 0, 32)
4357/*0x011e8*/ u64 gendma_int;
4358/*0x011f0*/ u64 umqdmq_ir_init_notify;
4359#define VXGE_HW_UMQDMQ_IR_INIT_NOTIFY_PULSE vxge_mBIT(3)
4360/*0x011f8*/ u64 dmq_init_notify;
4361#define VXGE_HW_DMQ_INIT_NOTIFY_PULSE vxge_mBIT(3)
4362/*0x01200*/ u64 umq_init_notify;
4363#define VXGE_HW_UMQ_INIT_NOTIFY_PULSE vxge_mBIT(3)
4364 u8 unused01380[0x01380-0x01208];
4365
4366/*0x01380*/ u64 tpa_cfg;
4367#define VXGE_HW_TPA_CFG_IGNORE_FRAME_ERR vxge_mBIT(3)
4368#define VXGE_HW_TPA_CFG_IPV6_STOP_SEARCHING vxge_mBIT(7)
4369#define VXGE_HW_TPA_CFG_L4_PSHDR_PRESENT vxge_mBIT(11)
4370#define VXGE_HW_TPA_CFG_SUPPORT_MOBILE_IPV6_HDRS vxge_mBIT(15)
4371 u8 unused01400[0x01400-0x01388];
4372
4373/*0x01400*/ u64 tx_vp_reset_discarded_frms;
4374#define VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_TX_VP_RESET_DISCARDED_FRMS(val) \
4375 vxge_vBIT(val, 48, 16)
4376 u8 unused01480[0x01480-0x01408];
4377
4378/*0x01480*/ u64 fau_rpa_vcfg;
4379#define VXGE_HW_FAU_RPA_VCFG_L4_COMP_CSUM vxge_mBIT(7)
4380#define VXGE_HW_FAU_RPA_VCFG_L3_INCL_CF vxge_mBIT(11)
4381#define VXGE_HW_FAU_RPA_VCFG_L3_COMP_CSUM vxge_mBIT(15)
4382 u8 unused014d0[0x014d0-0x01488];
4383
4384/*0x014d0*/ u64 dbg_stats_rx_mpa;
4385#define VXGE_HW_DBG_STATS_RX_MPA_CRC_FAIL_FRMS(val) vxge_vBIT(val, 0, 16)
4386#define VXGE_HW_DBG_STATS_RX_MPA_MRK_FAIL_FRMS(val) vxge_vBIT(val, 16, 16)
4387#define VXGE_HW_DBG_STATS_RX_MPA_LEN_FAIL_FRMS(val) vxge_vBIT(val, 32, 16)
4388/*0x014d8*/ u64 dbg_stats_rx_fau;
4389#define VXGE_HW_DBG_STATS_RX_FAU_RX_WOL_FRMS(val) vxge_vBIT(val, 0, 16)
4390#define VXGE_HW_DBG_STATS_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val) \
4391 vxge_vBIT(val, 16, 16)
4392#define VXGE_HW_DBG_STATS_RX_FAU_RX_PERMITTED_FRMS(val) \
4393 vxge_vBIT(val, 32, 32)
4394 u8 unused014f0[0x014f0-0x014e0];
4395
4396/*0x014f0*/ u64 fbmc_vp_rdy;
4397#define VXGE_HW_FBMC_VP_RDY_QUEUE_SPAV_FM vxge_mBIT(0)
4398 u8 unused01e00[0x01e00-0x014f8];
4399
4400/*0x01e00*/ u64 vpath_pcipif_int_status;
4401#define \
4402VXGE_HW_VPATH_PCIPIF_INT_STATUS_SRPCIM_MSG_TO_VPATH_SRPCIM_MSG_TO_VPATH_INT \
4403 vxge_mBIT(3)
4404#define VXGE_HW_VPATH_PCIPIF_INT_STATUS_VPATH_SPARE_R1_VPATH_SPARE_R1_INT \
4405 vxge_mBIT(7)
4406/*0x01e08*/ u64 vpath_pcipif_int_mask;
4407 u8 unused01e20[0x01e20-0x01e10];
4408
4409/*0x01e20*/ u64 srpcim_msg_to_vpath_reg;
4410#define VXGE_HW_SRPCIM_MSG_TO_VPATH_REG_SWIF_SRPCIM_TO_VPATH_RMSG_INT \
4411 vxge_mBIT(3)
4412/*0x01e28*/ u64 srpcim_msg_to_vpath_mask;
4413/*0x01e30*/ u64 srpcim_msg_to_vpath_alarm;
4414 u8 unused01ea0[0x01ea0-0x01e38];
4415
4416/*0x01ea0*/ u64 vpath_to_srpcim_wmsg;
4417#define VXGE_HW_VPATH_TO_SRPCIM_WMSG_VPATH_TO_SRPCIM_WMSG(val) \
4418 vxge_vBIT(val, 0, 64)
4419/*0x01ea8*/ u64 vpath_to_srpcim_wmsg_trig;
4420#define VXGE_HW_VPATH_TO_SRPCIM_WMSG_TRIG_VPATH_TO_SRPCIM_WMSG_TRIG \
4421 vxge_mBIT(0)
4422 u8 unused02000[0x02000-0x01eb0];
4423
4424/*0x02000*/ u64 vpath_general_int_status;
4425#define VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT vxge_mBIT(3)
4426#define VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT vxge_mBIT(7)
4427#define VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT vxge_mBIT(15)
4428#define VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT vxge_mBIT(19)
4429/*0x02008*/ u64 vpath_general_int_mask;
4430#define VXGE_HW_VPATH_GENERAL_INT_MASK_PIC_INT vxge_mBIT(3)
4431#define VXGE_HW_VPATH_GENERAL_INT_MASK_PCI_INT vxge_mBIT(7)
4432#define VXGE_HW_VPATH_GENERAL_INT_MASK_WRDMA_INT vxge_mBIT(15)
4433#define VXGE_HW_VPATH_GENERAL_INT_MASK_XMAC_INT vxge_mBIT(19)
4434/*0x02010*/ u64 vpath_ppif_int_status;
4435#define VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT \
4436 vxge_mBIT(3)
4437#define VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT \
4438 vxge_mBIT(7)
4439#define VXGE_HW_VPATH_PPIF_INT_STATUS_PCI_CONFIG_ERRORS_PCI_CONFIG_INT \
4440 vxge_mBIT(11)
4441#define \
4442VXGE_HW_VPATH_PPIF_INT_STATUS_MRPCIM_TO_VPATH_ALARM_MRPCIM_TO_VPATH_ALARM_INT \
4443 vxge_mBIT(15)
4444#define \
4445VXGE_HW_VPATH_PPIF_INT_STATUS_SRPCIM_TO_VPATH_ALARM_SRPCIM_TO_VPATH_ALARM_INT \
4446 vxge_mBIT(19)
4447/*0x02018*/ u64 vpath_ppif_int_mask;
4448/*0x02020*/ u64 kdfcctl_errors_reg;
4449#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR vxge_mBIT(3)
4450#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR vxge_mBIT(7)
4451#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR vxge_mBIT(11)
4452#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON vxge_mBIT(15)
4453#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON vxge_mBIT(19)
4454#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON vxge_mBIT(23)
4455#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR vxge_mBIT(31)
4456#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR vxge_mBIT(35)
4457#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR vxge_mBIT(39)
4458/*0x02028*/ u64 kdfcctl_errors_mask;
4459/*0x02030*/ u64 kdfcctl_errors_alarm;
4460 u8 unused02040[0x02040-0x02038];
4461
4462/*0x02040*/ u64 general_errors_reg;
4463#define VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW vxge_mBIT(3)
4464#define VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW vxge_mBIT(7)
4465#define VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW vxge_mBIT(11)
4466#define VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR vxge_mBIT(15)
4467#define VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ vxge_mBIT(19)
4468#define VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS vxge_mBIT(27)
4469#define VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET vxge_mBIT(31)
4470/*0x02048*/ u64 general_errors_mask;
4471/*0x02050*/ u64 general_errors_alarm;
4472/*0x02058*/ u64 pci_config_errors_reg;
4473#define VXGE_HW_PCI_CONFIG_ERRORS_REG_PCICONFIG_STATUS_ERR vxge_mBIT(3)
4474#define VXGE_HW_PCI_CONFIG_ERRORS_REG_PCICONFIG_UNCOR_ERR vxge_mBIT(7)
4475#define VXGE_HW_PCI_CONFIG_ERRORS_REG_PCICONFIG_COR_ERR vxge_mBIT(11)
4476/*0x02060*/ u64 pci_config_errors_mask;
4477/*0x02068*/ u64 pci_config_errors_alarm;
4478/*0x02070*/ u64 mrpcim_to_vpath_alarm_reg;
4479#define VXGE_HW_MRPCIM_TO_VPATH_ALARM_REG_PPIF_MRPCIM_TO_VPATH_ALARM \
4480 vxge_mBIT(3)
4481/*0x02078*/ u64 mrpcim_to_vpath_alarm_mask;
4482/*0x02080*/ u64 mrpcim_to_vpath_alarm_alarm;
4483/*0x02088*/ u64 srpcim_to_vpath_alarm_reg;
4484#define VXGE_HW_SRPCIM_TO_VPATH_ALARM_REG_PPIF_SRPCIM_TO_VPATH_ALARM(val) \
4485 vxge_vBIT(val, 0, 17)
4486/*0x02090*/ u64 srpcim_to_vpath_alarm_mask;
4487/*0x02098*/ u64 srpcim_to_vpath_alarm_alarm;
4488 u8 unused02108[0x02108-0x020a0];
4489
4490/*0x02108*/ u64 kdfcctl_status;
4491#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO0_PRES(val) vxge_vBIT(val, 0, 8)
4492#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO1_PRES(val) vxge_vBIT(val, 8, 8)
4493#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO2_PRES(val) vxge_vBIT(val, 16, 8)
4494#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO0_OVRWR(val) vxge_vBIT(val, 24, 8)
4495#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO1_OVRWR(val) vxge_vBIT(val, 32, 8)
4496#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO2_OVRWR(val) vxge_vBIT(val, 40, 8)
4497/*0x02110*/ u64 rsthdlr_status;
4498#define VXGE_HW_RSTHDLR_STATUS_RSTHDLR_CURRENT_RESET vxge_mBIT(3)
4499#define VXGE_HW_RSTHDLR_STATUS_RSTHDLR_CURRENT_VPIN(val) vxge_vBIT(val, 6, 2)
4500/*0x02118*/ u64 fifo0_status;
4501#define VXGE_HW_FIFO0_STATUS_DBLGEN_FIFO0_RDIDX(val) vxge_vBIT(val, 0, 12)
4502/*0x02120*/ u64 fifo1_status;
4503#define VXGE_HW_FIFO1_STATUS_DBLGEN_FIFO1_RDIDX(val) vxge_vBIT(val, 0, 12)
4504/*0x02128*/ u64 fifo2_status;
4505#define VXGE_HW_FIFO2_STATUS_DBLGEN_FIFO2_RDIDX(val) vxge_vBIT(val, 0, 12)
4506 u8 unused02158[0x02158-0x02130];
4507
4508/*0x02158*/ u64 tgt_illegal_access;
4509#define VXGE_HW_TGT_ILLEGAL_ACCESS_SWIF_REGION(val) vxge_vBIT(val, 1, 7)
4510 u8 unused02200[0x02200-0x02160];
4511
4512/*0x02200*/ u64 vpath_general_cfg1;
4513#define VXGE_HW_VPATH_GENERAL_CFG1_TC_VALUE(val) vxge_vBIT(val, 1, 3)
4514#define VXGE_HW_VPATH_GENERAL_CFG1_DATA_BYTE_SWAPEN vxge_mBIT(7)
4515#define VXGE_HW_VPATH_GENERAL_CFG1_DATA_FLIPEN vxge_mBIT(11)
4516#define VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN vxge_mBIT(15)
4517#define VXGE_HW_VPATH_GENERAL_CFG1_CTL_FLIPEN vxge_mBIT(23)
4518#define VXGE_HW_VPATH_GENERAL_CFG1_MSIX_ADDR_SWAPEN vxge_mBIT(51)
4519#define VXGE_HW_VPATH_GENERAL_CFG1_MSIX_ADDR_FLIPEN vxge_mBIT(55)
4520#define VXGE_HW_VPATH_GENERAL_CFG1_MSIX_DATA_SWAPEN vxge_mBIT(59)
4521#define VXGE_HW_VPATH_GENERAL_CFG1_MSIX_DATA_FLIPEN vxge_mBIT(63)
4522/*0x02208*/ u64 vpath_general_cfg2;
4523#define VXGE_HW_VPATH_GENERAL_CFG2_SIZE_QUANTUM(val) vxge_vBIT(val, 1, 3)
4524/*0x02210*/ u64 vpath_general_cfg3;
4525#define VXGE_HW_VPATH_GENERAL_CFG3_IGNORE_VPATH_RST_FOR_INTA vxge_mBIT(3)
4526 u8 unused02220[0x02220-0x02218];
4527
4528/*0x02220*/ u64 kdfcctl_cfg0;
4529#define VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0 vxge_mBIT(1)
4530#define VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1 vxge_mBIT(2)
4531#define VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2 vxge_mBIT(3)
4532#define VXGE_HW_KDFCCTL_CFG0_BIT_FLIPEN_FIFO0 vxge_mBIT(5)
4533#define VXGE_HW_KDFCCTL_CFG0_BIT_FLIPEN_FIFO1 vxge_mBIT(6)
4534#define VXGE_HW_KDFCCTL_CFG0_BIT_FLIPEN_FIFO2 vxge_mBIT(7)
4535#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE0_FIFO0 vxge_mBIT(9)
4536#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE0_FIFO1 vxge_mBIT(10)
4537#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE0_FIFO2 vxge_mBIT(11)
4538#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE1_FIFO0 vxge_mBIT(13)
4539#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE1_FIFO1 vxge_mBIT(14)
4540#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE1_FIFO2 vxge_mBIT(15)
4541#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE2_FIFO0 vxge_mBIT(17)
4542#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE2_FIFO1 vxge_mBIT(18)
4543#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE2_FIFO2 vxge_mBIT(19)
4544#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE3_FIFO0 vxge_mBIT(21)
4545#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE3_FIFO1 vxge_mBIT(22)
4546#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE3_FIFO2 vxge_mBIT(23)
4547#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE4_FIFO0 vxge_mBIT(25)
4548#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE4_FIFO1 vxge_mBIT(26)
4549#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE4_FIFO2 vxge_mBIT(27)
4550#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE5_FIFO0 vxge_mBIT(29)
4551#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE5_FIFO1 vxge_mBIT(30)
4552#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE5_FIFO2 vxge_mBIT(31)
4553#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE6_FIFO0 vxge_mBIT(33)
4554#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE6_FIFO1 vxge_mBIT(34)
4555#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE6_FIFO2 vxge_mBIT(35)
4556#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE7_FIFO0 vxge_mBIT(37)
4557#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE7_FIFO1 vxge_mBIT(38)
4558#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE7_FIFO2 vxge_mBIT(39)
4559
4560 u8 unused02268[0x02268-0x02228];
4561
4562/*0x02268*/ u64 stats_cfg;
4563#define VXGE_HW_STATS_CFG_START_HOST_ADDR(val) vxge_vBIT(val, 0, 57)
4564/*0x02270*/ u64 interrupt_cfg0;
4565#define VXGE_HW_INTERRUPT_CFG0_MSIX_FOR_RXTI(val) vxge_vBIT(val, 1, 7)
4566#define VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(val) vxge_vBIT(val, 9, 7)
4567#define VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(val) vxge_vBIT(val, 17, 7)
4568#define VXGE_HW_INTERRUPT_CFG0_GROUP2_MSIX_FOR_TXTI(val) vxge_vBIT(val, 25, 7)
4569#define VXGE_HW_INTERRUPT_CFG0_GROUP3_MSIX_FOR_TXTI(val) vxge_vBIT(val, 33, 7)
4570 u8 unused02280[0x02280-0x02278];
4571
4572/*0x02280*/ u64 interrupt_cfg2;
4573#define VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(val) vxge_vBIT(val, 1, 7)
4574/*0x02288*/ u64 one_shot_vect0_en;
4575#define VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN vxge_mBIT(3)
4576/*0x02290*/ u64 one_shot_vect1_en;
4577#define VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN vxge_mBIT(3)
4578/*0x02298*/ u64 one_shot_vect2_en;
4579#define VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN vxge_mBIT(3)
4580/*0x022a0*/ u64 one_shot_vect3_en;
4581#define VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN vxge_mBIT(3)
4582 u8 unused022b0[0x022b0-0x022a8];
4583
4584/*0x022b0*/ u64 pci_config_access_cfg1;
4585#define VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(val) vxge_vBIT(val, 0, 12)
4586#define VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0 vxge_mBIT(15)
4587/*0x022b8*/ u64 pci_config_access_cfg2;
4588#define VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ vxge_mBIT(0)
4589/*0x022c0*/ u64 pci_config_access_status;
4590#define VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR vxge_mBIT(0)
4591#define VXGE_HW_PCI_CONFIG_ACCESS_STATUS_DATA(val) vxge_vBIT(val, 32, 32)
4592 u8 unused02300[0x02300-0x022c8];
4593
4594/*0x02300*/ u64 vpath_debug_stats0;
4595#define VXGE_HW_VPATH_DEBUG_STATS0_INI_NUM_MWR_SENT(val) vxge_vBIT(val, 0, 32)
4596/*0x02308*/ u64 vpath_debug_stats1;
4597#define VXGE_HW_VPATH_DEBUG_STATS1_INI_NUM_MRD_SENT(val) vxge_vBIT(val, 0, 32)
4598/*0x02310*/ u64 vpath_debug_stats2;
4599#define VXGE_HW_VPATH_DEBUG_STATS2_INI_NUM_CPL_RCVD(val) vxge_vBIT(val, 0, 32)
4600/*0x02318*/ u64 vpath_debug_stats3;
4601#define VXGE_HW_VPATH_DEBUG_STATS3_INI_NUM_MWR_BYTE_SENT(val) \
4602 vxge_vBIT(val, 0, 64)
4603/*0x02320*/ u64 vpath_debug_stats4;
4604#define VXGE_HW_VPATH_DEBUG_STATS4_INI_NUM_CPL_BYTE_RCVD(val) \
4605 vxge_vBIT(val, 0, 64)
4606/*0x02328*/ u64 vpath_debug_stats5;
4607#define VXGE_HW_VPATH_DEBUG_STATS5_WRCRDTARB_XOFF(val) vxge_vBIT(val, 32, 32)
4608/*0x02330*/ u64 vpath_debug_stats6;
4609#define VXGE_HW_VPATH_DEBUG_STATS6_RDCRDTARB_XOFF(val) vxge_vBIT(val, 32, 32)
4610/*0x02338*/ u64 vpath_genstats_count01;
4611#define VXGE_HW_VPATH_GENSTATS_COUNT01_PPIF_VPATH_GENSTATS_COUNT1(val) \
4612 vxge_vBIT(val, 0, 32)
4613#define VXGE_HW_VPATH_GENSTATS_COUNT01_PPIF_VPATH_GENSTATS_COUNT0(val) \
4614 vxge_vBIT(val, 32, 32)
4615/*0x02340*/ u64 vpath_genstats_count23;
4616#define VXGE_HW_VPATH_GENSTATS_COUNT23_PPIF_VPATH_GENSTATS_COUNT3(val) \
4617 vxge_vBIT(val, 0, 32)
4618#define VXGE_HW_VPATH_GENSTATS_COUNT23_PPIF_VPATH_GENSTATS_COUNT2(val) \
4619 vxge_vBIT(val, 32, 32)
4620/*0x02348*/ u64 vpath_genstats_count4;
4621#define VXGE_HW_VPATH_GENSTATS_COUNT4_PPIF_VPATH_GENSTATS_COUNT4(val) \
4622 vxge_vBIT(val, 32, 32)
4623/*0x02350*/ u64 vpath_genstats_count5;
4624#define VXGE_HW_VPATH_GENSTATS_COUNT5_PPIF_VPATH_GENSTATS_COUNT5(val) \
4625 vxge_vBIT(val, 32, 32)
4626 u8 unused02648[0x02648-0x02358];
4627} __packed;
4628
4629#define VXGE_HW_EEPROM_SIZE (0x01 << 11)
4630
4631/* Capability lists */
4632#define VXGE_HW_PCI_EXP_LNKCAP_LNK_SPEED 0xf /* Supported Link speeds */
4633#define VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH 0x3f0 /* Supported Link speeds. */
4634#define VXGE_HW_PCI_EXP_LNKCAP_LW_RES 0x0 /* Reserved. */
4635
4636#endif
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c
new file mode 100644
index 00000000000..ad64ce0afe3
--- /dev/null
+++ b/drivers/net/vxge/vxge-traffic.c
@@ -0,0 +1,2514 @@
1/******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
9 *
10 * vxge-traffic.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2010 Exar Corp.
13 ******************************************************************************/
14#include <linux/etherdevice.h>
15#include <linux/prefetch.h>
16
17#include "vxge-traffic.h"
18#include "vxge-config.h"
19#include "vxge-main.h"
20
21/*
22 * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
23 * @vp: Virtual Path handle.
24 *
25 * Enable vpath interrupts. The function is to be executed the last in
26 * vpath initialization sequence.
27 *
28 * See also: vxge_hw_vpath_intr_disable()
29 */
30enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
31{
32 u64 val64;
33
34 struct __vxge_hw_virtualpath *vpath;
35 struct vxge_hw_vpath_reg __iomem *vp_reg;
36 enum vxge_hw_status status = VXGE_HW_OK;
37 if (vp == NULL) {
38 status = VXGE_HW_ERR_INVALID_HANDLE;
39 goto exit;
40 }
41
42 vpath = vp->vpath;
43
44 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
45 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
46 goto exit;
47 }
48
49 vp_reg = vpath->vp_reg;
50
51 writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
52
53 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
54 &vp_reg->general_errors_reg);
55
56 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
57 &vp_reg->pci_config_errors_reg);
58
59 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
60 &vp_reg->mrpcim_to_vpath_alarm_reg);
61
62 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
63 &vp_reg->srpcim_to_vpath_alarm_reg);
64
65 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
66 &vp_reg->vpath_ppif_int_status);
67
68 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
69 &vp_reg->srpcim_msg_to_vpath_reg);
70
71 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
72 &vp_reg->vpath_pcipif_int_status);
73
74 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
75 &vp_reg->prc_alarm_reg);
76
77 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
78 &vp_reg->wrdma_alarm_status);
79
80 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
81 &vp_reg->asic_ntwk_vp_err_reg);
82
83 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
84 &vp_reg->xgmac_vp_int_status);
85
86 val64 = readq(&vp_reg->vpath_general_int_status);
87
88 /* Mask unwanted interrupts */
89
90 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
91 &vp_reg->vpath_pcipif_int_mask);
92
93 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
94 &vp_reg->srpcim_msg_to_vpath_mask);
95
96 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
97 &vp_reg->srpcim_to_vpath_alarm_mask);
98
99 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
100 &vp_reg->mrpcim_to_vpath_alarm_mask);
101
102 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
103 &vp_reg->pci_config_errors_mask);
104
105 /* Unmask the individual interrupts */
106
107 writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
108 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
109 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
110 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
111 &vp_reg->general_errors_mask);
112
113 __vxge_hw_pio_mem_write32_upper(
114 (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
115 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
116 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
117 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
118 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
119 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
120 &vp_reg->kdfcctl_errors_mask);
121
122 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
123
124 __vxge_hw_pio_mem_write32_upper(
125 (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
126 &vp_reg->prc_alarm_mask);
127
128 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
129 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
130
131 if (vpath->hldev->first_vp_id != vpath->vp_id)
132 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
133 &vp_reg->asic_ntwk_vp_err_mask);
134 else
135 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
136 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT |
137 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32),
138 &vp_reg->asic_ntwk_vp_err_mask);
139
140 __vxge_hw_pio_mem_write32_upper(0,
141 &vp_reg->vpath_general_int_mask);
142exit:
143 return status;
144
145}
146
147/*
148 * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
149 * @vp: Virtual Path handle.
150 *
151 * Disable vpath interrupts. The function is to be executed the last in
152 * vpath initialization sequence.
153 *
154 * See also: vxge_hw_vpath_intr_enable()
155 */
156enum vxge_hw_status vxge_hw_vpath_intr_disable(
157 struct __vxge_hw_vpath_handle *vp)
158{
159 u64 val64;
160
161 struct __vxge_hw_virtualpath *vpath;
162 enum vxge_hw_status status = VXGE_HW_OK;
163 struct vxge_hw_vpath_reg __iomem *vp_reg;
164 if (vp == NULL) {
165 status = VXGE_HW_ERR_INVALID_HANDLE;
166 goto exit;
167 }
168
169 vpath = vp->vpath;
170
171 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
172 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
173 goto exit;
174 }
175 vp_reg = vpath->vp_reg;
176
177 __vxge_hw_pio_mem_write32_upper(
178 (u32)VXGE_HW_INTR_MASK_ALL,
179 &vp_reg->vpath_general_int_mask);
180
181 val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id));
182
183 writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
184
185 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
186 &vp_reg->general_errors_mask);
187
188 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
189 &vp_reg->pci_config_errors_mask);
190
191 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
192 &vp_reg->mrpcim_to_vpath_alarm_mask);
193
194 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
195 &vp_reg->srpcim_to_vpath_alarm_mask);
196
197 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
198 &vp_reg->vpath_ppif_int_mask);
199
200 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
201 &vp_reg->srpcim_msg_to_vpath_mask);
202
203 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
204 &vp_reg->vpath_pcipif_int_mask);
205
206 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
207 &vp_reg->wrdma_alarm_mask);
208
209 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
210 &vp_reg->prc_alarm_mask);
211
212 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
213 &vp_reg->xgmac_vp_int_mask);
214
215 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
216 &vp_reg->asic_ntwk_vp_err_mask);
217
218exit:
219 return status;
220}
221
222void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo)
223{
224 struct vxge_hw_vpath_reg __iomem *vp_reg;
225 struct vxge_hw_vp_config *config;
226 u64 val64;
227
228 if (fifo->config->enable != VXGE_HW_FIFO_ENABLE)
229 return;
230
231 vp_reg = fifo->vp_reg;
232 config = container_of(fifo->config, struct vxge_hw_vp_config, fifo);
233
234 if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
235 config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
236 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
237 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
238 fifo->tim_tti_cfg1_saved = val64;
239 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
240 }
241}
242
243void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring)
244{
245 u64 val64 = ring->tim_rti_cfg1_saved;
246
247 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
248 ring->tim_rti_cfg1_saved = val64;
249 writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
250}
251
252void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo)
253{
254 u64 val64 = fifo->tim_tti_cfg3_saved;
255 u64 timer = (fifo->rtimer * 1000) / 272;
256
257 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
258 if (timer)
259 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
260 VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5);
261
262 writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
263 /* tti_cfg3_saved is not updated again because it is
264 * initialized at one place only - init time.
265 */
266}
267
268void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring)
269{
270 u64 val64 = ring->tim_rti_cfg3_saved;
271 u64 timer = (ring->rtimer * 1000) / 272;
272
273 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
274 if (timer)
275 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
276 VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4);
277
278 writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
279 /* rti_cfg3_saved is not updated again because it is
280 * initialized at one place only - init time.
281 */
282}
283
284/**
285 * vxge_hw_channel_msix_mask - Mask MSIX Vector.
286 * @channeh: Channel for rx or tx handle
287 * @msix_id: MSIX ID
288 *
289 * The function masks the msix interrupt for the given msix_id
290 *
291 * Returns: 0
292 */
293void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
294{
295
296 __vxge_hw_pio_mem_write32_upper(
297 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
298 &channel->common_reg->set_msix_mask_vect[msix_id%4]);
299}
300
301/**
302 * vxge_hw_channel_msix_unmask - Unmask the MSIX Vector.
303 * @channeh: Channel for rx or tx handle
304 * @msix_id: MSI ID
305 *
306 * The function unmasks the msix interrupt for the given msix_id
307 *
308 * Returns: 0
309 */
310void
311vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
312{
313
314 __vxge_hw_pio_mem_write32_upper(
315 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
316 &channel->common_reg->clear_msix_mask_vect[msix_id%4]);
317}
318
319/**
320 * vxge_hw_channel_msix_clear - Unmask the MSIX Vector.
321 * @channel: Channel for rx or tx handle
322 * @msix_id: MSI ID
323 *
324 * The function unmasks the msix interrupt for the given msix_id
325 * if configured in MSIX oneshot mode
326 *
327 * Returns: 0
328 */
329void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id)
330{
331 __vxge_hw_pio_mem_write32_upper(
332 (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
333 &channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
334}
335
336/**
337 * vxge_hw_device_set_intr_type - Updates the configuration
338 * with new interrupt type.
339 * @hldev: HW device handle.
340 * @intr_mode: New interrupt type
341 */
342u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
343{
344
345 if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
346 (intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
347 (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
348 (intr_mode != VXGE_HW_INTR_MODE_DEF))
349 intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
350
351 hldev->config.intr_mode = intr_mode;
352 return intr_mode;
353}
354
355/**
356 * vxge_hw_device_intr_enable - Enable interrupts.
357 * @hldev: HW device handle.
358 * @op: One of the enum vxge_hw_device_intr enumerated values specifying
359 * the type(s) of interrupts to enable.
360 *
361 * Enable Titan interrupts. The function is to be executed the last in
362 * Titan initialization sequence.
363 *
364 * See also: vxge_hw_device_intr_disable()
365 */
366void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
367{
368 u32 i;
369 u64 val64;
370 u32 val32;
371
372 vxge_hw_device_mask_all(hldev);
373
374 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
375
376 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
377 continue;
378
379 vxge_hw_vpath_intr_enable(
380 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
381 }
382
383 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) {
384 val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
385 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
386
387 if (val64 != 0) {
388 writeq(val64, &hldev->common_reg->tim_int_status0);
389
390 writeq(~val64, &hldev->common_reg->tim_int_mask0);
391 }
392
393 val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
394 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
395
396 if (val32 != 0) {
397 __vxge_hw_pio_mem_write32_upper(val32,
398 &hldev->common_reg->tim_int_status1);
399
400 __vxge_hw_pio_mem_write32_upper(~val32,
401 &hldev->common_reg->tim_int_mask1);
402 }
403 }
404
405 val64 = readq(&hldev->common_reg->titan_general_int_status);
406
407 vxge_hw_device_unmask_all(hldev);
408}
409
410/**
411 * vxge_hw_device_intr_disable - Disable Titan interrupts.
412 * @hldev: HW device handle.
413 * @op: One of the enum vxge_hw_device_intr enumerated values specifying
414 * the type(s) of interrupts to disable.
415 *
416 * Disable Titan interrupts.
417 *
418 * See also: vxge_hw_device_intr_enable()
419 */
420void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
421{
422 u32 i;
423
424 vxge_hw_device_mask_all(hldev);
425
426 /* mask all the tim interrupts */
427 writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
428 __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
429 &hldev->common_reg->tim_int_mask1);
430
431 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
432
433 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
434 continue;
435
436 vxge_hw_vpath_intr_disable(
437 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
438 }
439}
440
441/**
442 * vxge_hw_device_mask_all - Mask all device interrupts.
443 * @hldev: HW device handle.
444 *
445 * Mask all device interrupts.
446 *
447 * See also: vxge_hw_device_unmask_all()
448 */
449void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
450{
451 u64 val64;
452
453 val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
454 VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
455
456 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
457 &hldev->common_reg->titan_mask_all_int);
458}
459
460/**
461 * vxge_hw_device_unmask_all - Unmask all device interrupts.
462 * @hldev: HW device handle.
463 *
464 * Unmask all device interrupts.
465 *
466 * See also: vxge_hw_device_mask_all()
467 */
468void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
469{
470 u64 val64 = 0;
471
472 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE)
473 val64 = VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
474
475 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
476 &hldev->common_reg->titan_mask_all_int);
477}
478
479/**
480 * vxge_hw_device_flush_io - Flush io writes.
481 * @hldev: HW device handle.
482 *
483 * The function performs a read operation to flush io writes.
484 *
485 * Returns: void
486 */
487void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
488{
489 u32 val32;
490
491 val32 = readl(&hldev->common_reg->titan_general_int_status);
492}
493
494/**
495 * __vxge_hw_device_handle_error - Handle error
496 * @hldev: HW device
497 * @vp_id: Vpath Id
498 * @type: Error type. Please see enum vxge_hw_event{}
499 *
500 * Handle error.
501 */
502static enum vxge_hw_status
503__vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
504 enum vxge_hw_event type)
505{
506 switch (type) {
507 case VXGE_HW_EVENT_UNKNOWN:
508 break;
509 case VXGE_HW_EVENT_RESET_START:
510 case VXGE_HW_EVENT_RESET_COMPLETE:
511 case VXGE_HW_EVENT_LINK_DOWN:
512 case VXGE_HW_EVENT_LINK_UP:
513 goto out;
514 case VXGE_HW_EVENT_ALARM_CLEARED:
515 goto out;
516 case VXGE_HW_EVENT_ECCERR:
517 case VXGE_HW_EVENT_MRPCIM_ECCERR:
518 goto out;
519 case VXGE_HW_EVENT_FIFO_ERR:
520 case VXGE_HW_EVENT_VPATH_ERR:
521 case VXGE_HW_EVENT_CRITICAL_ERR:
522 case VXGE_HW_EVENT_SERR:
523 break;
524 case VXGE_HW_EVENT_SRPCIM_SERR:
525 case VXGE_HW_EVENT_MRPCIM_SERR:
526 goto out;
527 case VXGE_HW_EVENT_SLOT_FREEZE:
528 break;
529 default:
530 vxge_assert(0);
531 goto out;
532 }
533
534 /* notify driver */
535 if (hldev->uld_callbacks.crit_err)
536 hldev->uld_callbacks.crit_err(
537 (struct __vxge_hw_device *)hldev,
538 type, vp_id);
539out:
540
541 return VXGE_HW_OK;
542}
543
544/*
545 * __vxge_hw_device_handle_link_down_ind
546 * @hldev: HW device handle.
547 *
548 * Link down indication handler. The function is invoked by HW when
549 * Titan indicates that the link is down.
550 */
551static enum vxge_hw_status
552__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
553{
554 /*
555 * If the previous link state is not down, return.
556 */
557 if (hldev->link_state == VXGE_HW_LINK_DOWN)
558 goto exit;
559
560 hldev->link_state = VXGE_HW_LINK_DOWN;
561
562 /* notify driver */
563 if (hldev->uld_callbacks.link_down)
564 hldev->uld_callbacks.link_down(hldev);
565exit:
566 return VXGE_HW_OK;
567}
568
569/*
570 * __vxge_hw_device_handle_link_up_ind
571 * @hldev: HW device handle.
572 *
573 * Link up indication handler. The function is invoked by HW when
574 * Titan indicates that the link is up for programmable amount of time.
575 */
576static enum vxge_hw_status
577__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
578{
579 /*
580 * If the previous link state is not down, return.
581 */
582 if (hldev->link_state == VXGE_HW_LINK_UP)
583 goto exit;
584
585 hldev->link_state = VXGE_HW_LINK_UP;
586
587 /* notify driver */
588 if (hldev->uld_callbacks.link_up)
589 hldev->uld_callbacks.link_up(hldev);
590exit:
591 return VXGE_HW_OK;
592}
593
594/*
595 * __vxge_hw_vpath_alarm_process - Process Alarms.
596 * @vpath: Virtual Path.
597 * @skip_alarms: Do not clear the alarms
598 *
599 * Process vpath alarms.
600 *
601 */
602static enum vxge_hw_status
603__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
604 u32 skip_alarms)
605{
606 u64 val64;
607 u64 alarm_status;
608 u64 pic_status;
609 struct __vxge_hw_device *hldev = NULL;
610 enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
611 u64 mask64;
612 struct vxge_hw_vpath_stats_sw_info *sw_stats;
613 struct vxge_hw_vpath_reg __iomem *vp_reg;
614
615 if (vpath == NULL) {
616 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
617 alarm_event);
618 goto out2;
619 }
620
621 hldev = vpath->hldev;
622 vp_reg = vpath->vp_reg;
623 alarm_status = readq(&vp_reg->vpath_general_int_status);
624
625 if (alarm_status == VXGE_HW_ALL_FOXES) {
626 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
627 alarm_event);
628 goto out;
629 }
630
631 sw_stats = vpath->sw_stats;
632
633 if (alarm_status & ~(
634 VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
635 VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
636 VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
637 VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
638 sw_stats->error_stats.unknown_alarms++;
639
640 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
641 alarm_event);
642 goto out;
643 }
644
645 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
646
647 val64 = readq(&vp_reg->xgmac_vp_int_status);
648
649 if (val64 &
650 VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
651
652 val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
653
654 if (((val64 &
655 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
656 (!(val64 &
657 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
658 ((val64 &
659 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
660 (!(val64 &
661 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
662 ))) {
663 sw_stats->error_stats.network_sustained_fault++;
664
665 writeq(
666 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
667 &vp_reg->asic_ntwk_vp_err_mask);
668
669 __vxge_hw_device_handle_link_down_ind(hldev);
670 alarm_event = VXGE_HW_SET_LEVEL(
671 VXGE_HW_EVENT_LINK_DOWN, alarm_event);
672 }
673
674 if (((val64 &
675 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
676 (!(val64 &
677 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
678 ((val64 &
679 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
680 (!(val64 &
681 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
682 ))) {
683
684 sw_stats->error_stats.network_sustained_ok++;
685
686 writeq(
687 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
688 &vp_reg->asic_ntwk_vp_err_mask);
689
690 __vxge_hw_device_handle_link_up_ind(hldev);
691 alarm_event = VXGE_HW_SET_LEVEL(
692 VXGE_HW_EVENT_LINK_UP, alarm_event);
693 }
694
695 writeq(VXGE_HW_INTR_MASK_ALL,
696 &vp_reg->asic_ntwk_vp_err_reg);
697
698 alarm_event = VXGE_HW_SET_LEVEL(
699 VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
700
701 if (skip_alarms)
702 return VXGE_HW_OK;
703 }
704 }
705
706 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
707
708 pic_status = readq(&vp_reg->vpath_ppif_int_status);
709
710 if (pic_status &
711 VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
712
713 val64 = readq(&vp_reg->general_errors_reg);
714 mask64 = readq(&vp_reg->general_errors_mask);
715
716 if ((val64 &
717 VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
718 ~mask64) {
719 sw_stats->error_stats.ini_serr_det++;
720
721 alarm_event = VXGE_HW_SET_LEVEL(
722 VXGE_HW_EVENT_SERR, alarm_event);
723 }
724
725 if ((val64 &
726 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
727 ~mask64) {
728 sw_stats->error_stats.dblgen_fifo0_overflow++;
729
730 alarm_event = VXGE_HW_SET_LEVEL(
731 VXGE_HW_EVENT_FIFO_ERR, alarm_event);
732 }
733
734 if ((val64 &
735 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
736 ~mask64)
737 sw_stats->error_stats.statsb_pif_chain_error++;
738
739 if ((val64 &
740 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
741 ~mask64)
742 sw_stats->error_stats.statsb_drop_timeout++;
743
744 if ((val64 &
745 VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
746 ~mask64)
747 sw_stats->error_stats.target_illegal_access++;
748
749 if (!skip_alarms) {
750 writeq(VXGE_HW_INTR_MASK_ALL,
751 &vp_reg->general_errors_reg);
752 alarm_event = VXGE_HW_SET_LEVEL(
753 VXGE_HW_EVENT_ALARM_CLEARED,
754 alarm_event);
755 }
756 }
757
758 if (pic_status &
759 VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
760
761 val64 = readq(&vp_reg->kdfcctl_errors_reg);
762 mask64 = readq(&vp_reg->kdfcctl_errors_mask);
763
764 if ((val64 &
765 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
766 ~mask64) {
767 sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
768
769 alarm_event = VXGE_HW_SET_LEVEL(
770 VXGE_HW_EVENT_FIFO_ERR,
771 alarm_event);
772 }
773
774 if ((val64 &
775 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
776 ~mask64) {
777 sw_stats->error_stats.kdfcctl_fifo0_poison++;
778
779 alarm_event = VXGE_HW_SET_LEVEL(
780 VXGE_HW_EVENT_FIFO_ERR,
781 alarm_event);
782 }
783
784 if ((val64 &
785 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
786 ~mask64) {
787 sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
788
789 alarm_event = VXGE_HW_SET_LEVEL(
790 VXGE_HW_EVENT_FIFO_ERR,
791 alarm_event);
792 }
793
794 if (!skip_alarms) {
795 writeq(VXGE_HW_INTR_MASK_ALL,
796 &vp_reg->kdfcctl_errors_reg);
797 alarm_event = VXGE_HW_SET_LEVEL(
798 VXGE_HW_EVENT_ALARM_CLEARED,
799 alarm_event);
800 }
801 }
802
803 }
804
805 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
806
807 val64 = readq(&vp_reg->wrdma_alarm_status);
808
809 if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
810
811 val64 = readq(&vp_reg->prc_alarm_reg);
812 mask64 = readq(&vp_reg->prc_alarm_mask);
813
814 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
815 ~mask64)
816 sw_stats->error_stats.prc_ring_bumps++;
817
818 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
819 ~mask64) {
820 sw_stats->error_stats.prc_rxdcm_sc_err++;
821
822 alarm_event = VXGE_HW_SET_LEVEL(
823 VXGE_HW_EVENT_VPATH_ERR,
824 alarm_event);
825 }
826
827 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
828 & ~mask64) {
829 sw_stats->error_stats.prc_rxdcm_sc_abort++;
830
831 alarm_event = VXGE_HW_SET_LEVEL(
832 VXGE_HW_EVENT_VPATH_ERR,
833 alarm_event);
834 }
835
836 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
837 & ~mask64) {
838 sw_stats->error_stats.prc_quanta_size_err++;
839
840 alarm_event = VXGE_HW_SET_LEVEL(
841 VXGE_HW_EVENT_VPATH_ERR,
842 alarm_event);
843 }
844
845 if (!skip_alarms) {
846 writeq(VXGE_HW_INTR_MASK_ALL,
847 &vp_reg->prc_alarm_reg);
848 alarm_event = VXGE_HW_SET_LEVEL(
849 VXGE_HW_EVENT_ALARM_CLEARED,
850 alarm_event);
851 }
852 }
853 }
854out:
855 hldev->stats.sw_dev_err_stats.vpath_alarms++;
856out2:
857 if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
858 (alarm_event == VXGE_HW_EVENT_UNKNOWN))
859 return VXGE_HW_OK;
860
861 __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
862
863 if (alarm_event == VXGE_HW_EVENT_SERR)
864 return VXGE_HW_ERR_CRITICAL;
865
866 return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
867 VXGE_HW_ERR_SLOT_FREEZE :
868 (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
869 VXGE_HW_ERR_VPATH;
870}
871
872/**
873 * vxge_hw_device_begin_irq - Begin IRQ processing.
874 * @hldev: HW device handle.
875 * @skip_alarms: Do not clear the alarms
876 * @reason: "Reason" for the interrupt, the value of Titan's
877 * general_int_status register.
878 *
879 * The function performs two actions, It first checks whether (shared IRQ) the
880 * interrupt was raised by the device. Next, it masks the device interrupts.
881 *
882 * Note:
883 * vxge_hw_device_begin_irq() does not flush MMIO writes through the
884 * bridge. Therefore, two back-to-back interrupts are potentially possible.
885 *
886 * Returns: 0, if the interrupt is not "ours" (note that in this case the
887 * device remain enabled).
888 * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
889 * status.
890 */
891enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
892 u32 skip_alarms, u64 *reason)
893{
894 u32 i;
895 u64 val64;
896 u64 adapter_status;
897 u64 vpath_mask;
898 enum vxge_hw_status ret = VXGE_HW_OK;
899
900 val64 = readq(&hldev->common_reg->titan_general_int_status);
901
902 if (unlikely(!val64)) {
903 /* not Titan interrupt */
904 *reason = 0;
905 ret = VXGE_HW_ERR_WRONG_IRQ;
906 goto exit;
907 }
908
909 if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
910
911 adapter_status = readq(&hldev->common_reg->adapter_status);
912
913 if (adapter_status == VXGE_HW_ALL_FOXES) {
914
915 __vxge_hw_device_handle_error(hldev,
916 NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
917 *reason = 0;
918 ret = VXGE_HW_ERR_SLOT_FREEZE;
919 goto exit;
920 }
921 }
922
923 hldev->stats.sw_dev_info_stats.total_intr_cnt++;
924
925 *reason = val64;
926
927 vpath_mask = hldev->vpaths_deployed >>
928 (64 - VXGE_HW_MAX_VIRTUAL_PATHS);
929
930 if (val64 &
931 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
932 hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
933
934 return VXGE_HW_OK;
935 }
936
937 hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
938
939 if (unlikely(val64 &
940 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
941
942 enum vxge_hw_status error_level = VXGE_HW_OK;
943
944 hldev->stats.sw_dev_err_stats.vpath_alarms++;
945
946 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
947
948 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
949 continue;
950
951 ret = __vxge_hw_vpath_alarm_process(
952 &hldev->virtual_paths[i], skip_alarms);
953
954 error_level = VXGE_HW_SET_LEVEL(ret, error_level);
955
956 if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
957 (ret == VXGE_HW_ERR_SLOT_FREEZE)))
958 break;
959 }
960
961 ret = error_level;
962 }
963exit:
964 return ret;
965}
966
967/**
968 * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
969 * condition that has caused the Tx and RX interrupt.
970 * @hldev: HW device.
971 *
972 * Acknowledge (that is, clear) the condition that has caused
973 * the Tx and Rx interrupt.
974 * See also: vxge_hw_device_begin_irq(),
975 * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
976 */
977void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
978{
979
980 if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
981 (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
982 writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
983 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
984 &hldev->common_reg->tim_int_status0);
985 }
986
987 if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
988 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
989 __vxge_hw_pio_mem_write32_upper(
990 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
991 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
992 &hldev->common_reg->tim_int_status1);
993 }
994}
995
996/*
997 * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
998 * @channel: Channel
999 * @dtrh: Buffer to return the DTR pointer
1000 *
1001 * Allocates a dtr from the reserve array. If the reserve array is empty,
1002 * it swaps the reserve and free arrays.
1003 *
1004 */
1005static enum vxge_hw_status
1006vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
1007{
1008 void **tmp_arr;
1009
1010 if (channel->reserve_ptr - channel->reserve_top > 0) {
1011_alloc_after_swap:
1012 *dtrh = channel->reserve_arr[--channel->reserve_ptr];
1013
1014 return VXGE_HW_OK;
1015 }
1016
1017 /* switch between empty and full arrays */
1018
1019 /* the idea behind such a design is that by having free and reserved
1020 * arrays separated we basically separated irq and non-irq parts.
1021 * i.e. no additional lock need to be done when we free a resource */
1022
1023 if (channel->length - channel->free_ptr > 0) {
1024
1025 tmp_arr = channel->reserve_arr;
1026 channel->reserve_arr = channel->free_arr;
1027 channel->free_arr = tmp_arr;
1028 channel->reserve_ptr = channel->length;
1029 channel->reserve_top = channel->free_ptr;
1030 channel->free_ptr = channel->length;
1031
1032 channel->stats->reserve_free_swaps_cnt++;
1033
1034 goto _alloc_after_swap;
1035 }
1036
1037 channel->stats->full_cnt++;
1038
1039 *dtrh = NULL;
1040 return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
1041}
1042
1043/*
1044 * vxge_hw_channel_dtr_post - Post a dtr to the channel
1045 * @channelh: Channel
1046 * @dtrh: DTR pointer
1047 *
1048 * Posts a dtr to work array.
1049 *
1050 */
1051static void
1052vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
1053{
1054 vxge_assert(channel->work_arr[channel->post_index] == NULL);
1055
1056 channel->work_arr[channel->post_index++] = dtrh;
1057
1058 /* wrap-around */
1059 if (channel->post_index == channel->length)
1060 channel->post_index = 0;
1061}
1062
1063/*
1064 * vxge_hw_channel_dtr_try_complete - Returns next completed dtr
1065 * @channel: Channel
1066 * @dtr: Buffer to return the next completed DTR pointer
1067 *
1068 * Returns the next completed dtr with out removing it from work array
1069 *
1070 */
1071void
1072vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh)
1073{
1074 vxge_assert(channel->compl_index < channel->length);
1075
1076 *dtrh = channel->work_arr[channel->compl_index];
1077 prefetch(*dtrh);
1078}
1079
1080/*
1081 * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array
1082 * @channel: Channel handle
1083 *
1084 * Removes the next completed dtr from work array
1085 *
1086 */
1087void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel)
1088{
1089 channel->work_arr[channel->compl_index] = NULL;
1090
1091 /* wrap-around */
1092 if (++channel->compl_index == channel->length)
1093 channel->compl_index = 0;
1094
1095 channel->stats->total_compl_cnt++;
1096}
1097
1098/*
1099 * vxge_hw_channel_dtr_free - Frees a dtr
1100 * @channel: Channel handle
1101 * @dtr: DTR pointer
1102 *
1103 * Returns the dtr to free array
1104 *
1105 */
1106void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh)
1107{
1108 channel->free_arr[--channel->free_ptr] = dtrh;
1109}
1110
1111/*
1112 * vxge_hw_channel_dtr_count
1113 * @channel: Channel handle. Obtained via vxge_hw_channel_open().
1114 *
1115 * Retrieve number of DTRs available. This function can not be called
1116 * from data path. ring_initial_replenishi() is the only user.
1117 */
1118int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel)
1119{
1120 return (channel->reserve_ptr - channel->reserve_top) +
1121 (channel->length - channel->free_ptr);
1122}
1123
1124/**
1125 * vxge_hw_ring_rxd_reserve - Reserve ring descriptor.
1126 * @ring: Handle to the ring object used for receive
1127 * @rxdh: Reserved descriptor. On success HW fills this "out" parameter
1128 * with a valid handle.
1129 *
1130 * Reserve Rx descriptor for the subsequent filling-in driver
1131 * and posting on the corresponding channel (@channelh)
1132 * via vxge_hw_ring_rxd_post().
1133 *
1134 * Returns: VXGE_HW_OK - success.
1135 * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
1136 *
1137 */
1138enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring,
1139 void **rxdh)
1140{
1141 enum vxge_hw_status status;
1142 struct __vxge_hw_channel *channel;
1143
1144 channel = &ring->channel;
1145
1146 status = vxge_hw_channel_dtr_alloc(channel, rxdh);
1147
1148 if (status == VXGE_HW_OK) {
1149 struct vxge_hw_ring_rxd_1 *rxdp =
1150 (struct vxge_hw_ring_rxd_1 *)*rxdh;
1151
1152 rxdp->control_0 = rxdp->control_1 = 0;
1153 }
1154
1155 return status;
1156}
1157
1158/**
1159 * vxge_hw_ring_rxd_free - Free descriptor.
1160 * @ring: Handle to the ring object used for receive
1161 * @rxdh: Descriptor handle.
1162 *
1163 * Free the reserved descriptor. This operation is "symmetrical" to
1164 * vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's
1165 * lifecycle.
1166 *
1167 * After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can
1168 * be:
1169 *
1170 * - reserved (vxge_hw_ring_rxd_reserve);
1171 *
1172 * - posted (vxge_hw_ring_rxd_post);
1173 *
1174 * - completed (vxge_hw_ring_rxd_next_completed);
1175 *
1176 * - and recycled again (vxge_hw_ring_rxd_free).
1177 *
1178 * For alternative state transitions and more details please refer to
1179 * the design doc.
1180 *
1181 */
1182void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh)
1183{
1184 struct __vxge_hw_channel *channel;
1185
1186 channel = &ring->channel;
1187
1188 vxge_hw_channel_dtr_free(channel, rxdh);
1189
1190}
1191
1192/**
1193 * vxge_hw_ring_rxd_pre_post - Prepare rxd and post
1194 * @ring: Handle to the ring object used for receive
1195 * @rxdh: Descriptor handle.
1196 *
1197 * This routine prepares a rxd and posts
1198 */
1199void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh)
1200{
1201 struct __vxge_hw_channel *channel;
1202
1203 channel = &ring->channel;
1204
1205 vxge_hw_channel_dtr_post(channel, rxdh);
1206}
1207
1208/**
1209 * vxge_hw_ring_rxd_post_post - Process rxd after post.
1210 * @ring: Handle to the ring object used for receive
1211 * @rxdh: Descriptor handle.
1212 *
1213 * Processes rxd after post
1214 */
1215void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
1216{
1217 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1218 struct __vxge_hw_channel *channel;
1219
1220 channel = &ring->channel;
1221
1222 rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1223
1224 if (ring->stats->common_stats.usage_cnt > 0)
1225 ring->stats->common_stats.usage_cnt--;
1226}
1227
1228/**
1229 * vxge_hw_ring_rxd_post - Post descriptor on the ring.
1230 * @ring: Handle to the ring object used for receive
1231 * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
1232 *
1233 * Post descriptor on the ring.
1234 * Prior to posting the descriptor should be filled in accordance with
1235 * Host/Titan interface specification for a given service (LL, etc.).
1236 *
1237 */
1238void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
1239{
1240 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1241 struct __vxge_hw_channel *channel;
1242
1243 channel = &ring->channel;
1244
1245 wmb();
1246 rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1247
1248 vxge_hw_channel_dtr_post(channel, rxdh);
1249
1250 if (ring->stats->common_stats.usage_cnt > 0)
1251 ring->stats->common_stats.usage_cnt--;
1252}
1253
1254/**
1255 * vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier.
1256 * @ring: Handle to the ring object used for receive
1257 * @rxdh: Descriptor handle.
1258 *
1259 * Processes rxd after post with memory barrier.
1260 */
1261void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
1262{
1263 wmb();
1264 vxge_hw_ring_rxd_post_post(ring, rxdh);
1265}
1266
1267/**
1268 * vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor.
1269 * @ring: Handle to the ring object used for receive
1270 * @rxdh: Descriptor handle. Returned by HW.
1271 * @t_code: Transfer code, as per Titan User Guide,
1272 * Receive Descriptor Format. Returned by HW.
1273 *
1274 * Retrieve the _next_ completed descriptor.
1275 * HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy
1276 * driver of new completed descriptors. After that
1277 * the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest
1278 * completions (the very first completion is passed by HW via
1279 * vxge_hw_ring_callback_f).
1280 *
1281 * Implementation-wise, the driver is free to call
1282 * vxge_hw_ring_rxd_next_completed either immediately from inside the
1283 * ring callback, or in a deferred fashion and separate (from HW)
1284 * context.
1285 *
1286 * Non-zero @t_code means failure to fill-in receive buffer(s)
1287 * of the descriptor.
1288 * For instance, parity error detected during the data transfer.
1289 * In this case Titan will complete the descriptor and indicate
1290 * for the host that the received data is not to be used.
1291 * For details please refer to Titan User Guide.
1292 *
1293 * Returns: VXGE_HW_OK - success.
1294 * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1295 * are currently available for processing.
1296 *
1297 * See also: vxge_hw_ring_callback_f{},
1298 * vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}.
1299 */
1300enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
1301 struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code)
1302{
1303 struct __vxge_hw_channel *channel;
1304 struct vxge_hw_ring_rxd_1 *rxdp;
1305 enum vxge_hw_status status = VXGE_HW_OK;
1306 u64 control_0, own;
1307
1308 channel = &ring->channel;
1309
1310 vxge_hw_channel_dtr_try_complete(channel, rxdh);
1311
1312 rxdp = *rxdh;
1313 if (rxdp == NULL) {
1314 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1315 goto exit;
1316 }
1317
1318 control_0 = rxdp->control_0;
1319 own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1320 *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
1321
1322 /* check whether it is not the end */
1323 if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) {
1324
1325 vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control !=
1326 0);
1327
1328 ++ring->cmpl_cnt;
1329 vxge_hw_channel_dtr_complete(channel);
1330
1331 vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
1332
1333 ring->stats->common_stats.usage_cnt++;
1334 if (ring->stats->common_stats.usage_max <
1335 ring->stats->common_stats.usage_cnt)
1336 ring->stats->common_stats.usage_max =
1337 ring->stats->common_stats.usage_cnt;
1338
1339 status = VXGE_HW_OK;
1340 goto exit;
1341 }
1342
1343 /* reset it. since we don't want to return
1344 * garbage to the driver */
1345 *rxdh = NULL;
1346 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1347exit:
1348 return status;
1349}
1350
1351/**
1352 * vxge_hw_ring_handle_tcode - Handle transfer code.
1353 * @ring: Handle to the ring object used for receive
1354 * @rxdh: Descriptor handle.
1355 * @t_code: One of the enumerated (and documented in the Titan user guide)
1356 * "transfer codes".
1357 *
1358 * Handle descriptor's transfer code. The latter comes with each completed
1359 * descriptor.
1360 *
1361 * Returns: one of the enum vxge_hw_status{} enumerated types.
1362 * VXGE_HW_OK - for success.
1363 * VXGE_HW_ERR_CRITICAL - when encounters critical error.
1364 */
1365enum vxge_hw_status vxge_hw_ring_handle_tcode(
1366 struct __vxge_hw_ring *ring, void *rxdh, u8 t_code)
1367{
1368 struct __vxge_hw_channel *channel;
1369 enum vxge_hw_status status = VXGE_HW_OK;
1370
1371 channel = &ring->channel;
1372
1373 /* If the t_code is not supported and if the
1374 * t_code is other than 0x5 (unparseable packet
1375 * such as unknown UPV6 header), Drop it !!!
1376 */
1377
1378 if (t_code == VXGE_HW_RING_T_CODE_OK ||
1379 t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) {
1380 status = VXGE_HW_OK;
1381 goto exit;
1382 }
1383
1384 if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) {
1385 status = VXGE_HW_ERR_INVALID_TCODE;
1386 goto exit;
1387 }
1388
1389 ring->stats->rxd_t_code_err_cnt[t_code]++;
1390exit:
1391 return status;
1392}
1393
1394/**
1395 * __vxge_hw_non_offload_db_post - Post non offload doorbell
1396 *
1397 * @fifo: fifohandle
1398 * @txdl_ptr: The starting location of the TxDL in host memory
1399 * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
1400 * @no_snoop: No snoop flags
1401 *
1402 * This function posts a non-offload doorbell to doorbell FIFO
1403 *
1404 */
1405static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
1406 u64 txdl_ptr, u32 num_txds, u32 no_snoop)
1407{
1408 struct __vxge_hw_channel *channel;
1409
1410 channel = &fifo->channel;
1411
1412 writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
1413 VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) |
1414 VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
1415 &fifo->nofl_db->control_0);
1416
1417 mmiowb();
1418
1419 writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
1420
1421 mmiowb();
1422}
1423
1424/**
1425 * vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in
1426 * the fifo
1427 * @fifoh: Handle to the fifo object used for non offload send
1428 */
1429u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh)
1430{
1431 return vxge_hw_channel_dtr_count(&fifoh->channel);
1432}
1433
1434/**
1435 * vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor.
1436 * @fifoh: Handle to the fifo object used for non offload send
1437 * @txdlh: Reserved descriptor. On success HW fills this "out" parameter
1438 * with a valid handle.
1439 * @txdl_priv: Buffer to return the pointer to per txdl space
1440 *
1441 * Reserve a single TxDL (that is, fifo descriptor)
1442 * for the subsequent filling-in by driver)
1443 * and posting on the corresponding channel (@channelh)
1444 * via vxge_hw_fifo_txdl_post().
1445 *
1446 * Note: it is the responsibility of driver to reserve multiple descriptors
1447 * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
1448 * carries up to configured number (fifo.max_frags) of contiguous buffers.
1449 *
1450 * Returns: VXGE_HW_OK - success;
1451 * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
1452 *
1453 */
1454enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
1455 struct __vxge_hw_fifo *fifo,
1456 void **txdlh, void **txdl_priv)
1457{
1458 struct __vxge_hw_channel *channel;
1459 enum vxge_hw_status status;
1460 int i;
1461
1462 channel = &fifo->channel;
1463
1464 status = vxge_hw_channel_dtr_alloc(channel, txdlh);
1465
1466 if (status == VXGE_HW_OK) {
1467 struct vxge_hw_fifo_txd *txdp =
1468 (struct vxge_hw_fifo_txd *)*txdlh;
1469 struct __vxge_hw_fifo_txdl_priv *priv;
1470
1471 priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
1472
1473 /* reset the TxDL's private */
1474 priv->align_dma_offset = 0;
1475 priv->align_vaddr_start = priv->align_vaddr;
1476 priv->align_used_frags = 0;
1477 priv->frags = 0;
1478 priv->alloc_frags = fifo->config->max_frags;
1479 priv->next_txdl_priv = NULL;
1480
1481 *txdl_priv = (void *)(size_t)txdp->host_control;
1482
1483 for (i = 0; i < fifo->config->max_frags; i++) {
1484 txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i;
1485 txdp->control_0 = txdp->control_1 = 0;
1486 }
1487 }
1488
1489 return status;
1490}
1491
1492/**
1493 * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
1494 * descriptor.
1495 * @fifo: Handle to the fifo object used for non offload send
1496 * @txdlh: Descriptor handle.
1497 * @frag_idx: Index of the data buffer in the caller's scatter-gather list
1498 * (of buffers).
1499 * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
1500 * @size: Size of the data buffer (in bytes).
1501 *
1502 * This API is part of the preparation of the transmit descriptor for posting
1503 * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
1504 * vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits().
1505 * All three APIs fill in the fields of the fifo descriptor,
1506 * in accordance with the Titan specification.
1507 *
1508 */
1509void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
1510 void *txdlh, u32 frag_idx,
1511 dma_addr_t dma_pointer, u32 size)
1512{
1513 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1514 struct vxge_hw_fifo_txd *txdp, *txdp_last;
1515 struct __vxge_hw_channel *channel;
1516
1517 channel = &fifo->channel;
1518
1519 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1520 txdp = (struct vxge_hw_fifo_txd *)txdlh + txdl_priv->frags;
1521
1522 if (frag_idx != 0)
1523 txdp->control_0 = txdp->control_1 = 0;
1524 else {
1525 txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1526 VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST);
1527 txdp->control_1 |= fifo->interrupt_type;
1528 txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER(
1529 fifo->tx_intr_num);
1530 if (txdl_priv->frags) {
1531 txdp_last = (struct vxge_hw_fifo_txd *)txdlh +
1532 (txdl_priv->frags - 1);
1533 txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1534 VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1535 }
1536 }
1537
1538 vxge_assert(frag_idx < txdl_priv->alloc_frags);
1539
1540 txdp->buffer_pointer = (u64)dma_pointer;
1541 txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size);
1542 fifo->stats->total_buffers++;
1543 txdl_priv->frags++;
1544}
1545
1546/**
1547 * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
1548 * @fifo: Handle to the fifo object used for non offload send
1549 * @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve()
1550 * @frags: Number of contiguous buffers that are part of a single
1551 * transmit operation.
1552 *
1553 * Post descriptor on the 'fifo' type channel for transmission.
1554 * Prior to posting the descriptor should be filled in accordance with
1555 * Host/Titan interface specification for a given service (LL, etc.).
1556 *
1557 */
1558void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
1559{
1560 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1561 struct vxge_hw_fifo_txd *txdp_last;
1562 struct vxge_hw_fifo_txd *txdp_first;
1563 struct __vxge_hw_channel *channel;
1564
1565 channel = &fifo->channel;
1566
1567 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1568 txdp_first = txdlh;
1569
1570 txdp_last = (struct vxge_hw_fifo_txd *)txdlh + (txdl_priv->frags - 1);
1571 txdp_last->control_0 |=
1572 VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1573 txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
1574
1575 vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
1576
1577 __vxge_hw_non_offload_db_post(fifo,
1578 (u64)txdl_priv->dma_addr,
1579 txdl_priv->frags - 1,
1580 fifo->no_snoop_bits);
1581
1582 fifo->stats->total_posts++;
1583 fifo->stats->common_stats.usage_cnt++;
1584 if (fifo->stats->common_stats.usage_max <
1585 fifo->stats->common_stats.usage_cnt)
1586 fifo->stats->common_stats.usage_max =
1587 fifo->stats->common_stats.usage_cnt;
1588}
1589
1590/**
1591 * vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor.
1592 * @fifo: Handle to the fifo object used for non offload send
1593 * @txdlh: Descriptor handle. Returned by HW.
1594 * @t_code: Transfer code, as per Titan User Guide,
1595 * Transmit Descriptor Format.
1596 * Returned by HW.
1597 *
1598 * Retrieve the _next_ completed descriptor.
1599 * HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy
1600 * driver of new completed descriptors. After that
1601 * the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest
1602 * completions (the very first completion is passed by HW via
1603 * vxge_hw_channel_callback_f).
1604 *
1605 * Implementation-wise, the driver is free to call
1606 * vxge_hw_fifo_txdl_next_completed either immediately from inside the
1607 * channel callback, or in a deferred fashion and separate (from HW)
1608 * context.
1609 *
1610 * Non-zero @t_code means failure to process the descriptor.
1611 * The failure could happen, for instance, when the link is
1612 * down, in which case Titan completes the descriptor because it
1613 * is not able to send the data out.
1614 *
1615 * For details please refer to Titan User Guide.
1616 *
1617 * Returns: VXGE_HW_OK - success.
1618 * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1619 * are currently available for processing.
1620 *
1621 */
1622enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
1623 struct __vxge_hw_fifo *fifo, void **txdlh,
1624 enum vxge_hw_fifo_tcode *t_code)
1625{
1626 struct __vxge_hw_channel *channel;
1627 struct vxge_hw_fifo_txd *txdp;
1628 enum vxge_hw_status status = VXGE_HW_OK;
1629
1630 channel = &fifo->channel;
1631
1632 vxge_hw_channel_dtr_try_complete(channel, txdlh);
1633
1634 txdp = *txdlh;
1635 if (txdp == NULL) {
1636 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1637 goto exit;
1638 }
1639
1640 /* check whether host owns it */
1641 if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) {
1642
1643 vxge_assert(txdp->host_control != 0);
1644
1645 vxge_hw_channel_dtr_complete(channel);
1646
1647 *t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0);
1648
1649 if (fifo->stats->common_stats.usage_cnt > 0)
1650 fifo->stats->common_stats.usage_cnt--;
1651
1652 status = VXGE_HW_OK;
1653 goto exit;
1654 }
1655
1656 /* no more completions */
1657 *txdlh = NULL;
1658 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1659exit:
1660 return status;
1661}
1662
1663/**
1664 * vxge_hw_fifo_handle_tcode - Handle transfer code.
1665 * @fifo: Handle to the fifo object used for non offload send
1666 * @txdlh: Descriptor handle.
1667 * @t_code: One of the enumerated (and documented in the Titan user guide)
1668 * "transfer codes".
1669 *
1670 * Handle descriptor's transfer code. The latter comes with each completed
1671 * descriptor.
1672 *
1673 * Returns: one of the enum vxge_hw_status{} enumerated types.
1674 * VXGE_HW_OK - for success.
1675 * VXGE_HW_ERR_CRITICAL - when encounters critical error.
1676 */
1677enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo,
1678 void *txdlh,
1679 enum vxge_hw_fifo_tcode t_code)
1680{
1681 struct __vxge_hw_channel *channel;
1682
1683 enum vxge_hw_status status = VXGE_HW_OK;
1684 channel = &fifo->channel;
1685
1686 if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) {
1687 status = VXGE_HW_ERR_INVALID_TCODE;
1688 goto exit;
1689 }
1690
1691 fifo->stats->txd_t_code_err_cnt[t_code]++;
1692exit:
1693 return status;
1694}
1695
1696/**
1697 * vxge_hw_fifo_txdl_free - Free descriptor.
1698 * @fifo: Handle to the fifo object used for non offload send
1699 * @txdlh: Descriptor handle.
1700 *
1701 * Free the reserved descriptor. This operation is "symmetrical" to
1702 * vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's
1703 * lifecycle.
1704 *
1705 * After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can
1706 * be:
1707 *
1708 * - reserved (vxge_hw_fifo_txdl_reserve);
1709 *
1710 * - posted (vxge_hw_fifo_txdl_post);
1711 *
1712 * - completed (vxge_hw_fifo_txdl_next_completed);
1713 *
1714 * - and recycled again (vxge_hw_fifo_txdl_free).
1715 *
1716 * For alternative state transitions and more details please refer to
1717 * the design doc.
1718 *
1719 */
1720void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
1721{
1722 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1723 u32 max_frags;
1724 struct __vxge_hw_channel *channel;
1725
1726 channel = &fifo->channel;
1727
1728 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo,
1729 (struct vxge_hw_fifo_txd *)txdlh);
1730
1731 max_frags = fifo->config->max_frags;
1732
1733 vxge_hw_channel_dtr_free(channel, txdlh);
1734}
1735
1736/**
1737 * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath
1738 * to MAC address table.
1739 * @vp: Vpath handle.
1740 * @macaddr: MAC address to be added for this vpath into the list
1741 * @macaddr_mask: MAC address mask for macaddr
1742 * @duplicate_mode: Duplicate MAC address add mode. Please see
1743 * enum vxge_hw_vpath_mac_addr_add_mode{}
1744 *
1745 * Adds the given mac address and mac address mask into the list for this
1746 * vpath.
1747 * see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and
1748 * vxge_hw_vpath_mac_addr_get_next
1749 *
1750 */
1751enum vxge_hw_status
1752vxge_hw_vpath_mac_addr_add(
1753 struct __vxge_hw_vpath_handle *vp,
1754 u8 (macaddr)[ETH_ALEN],
1755 u8 (macaddr_mask)[ETH_ALEN],
1756 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
1757{
1758 u32 i;
1759 u64 data1 = 0ULL;
1760 u64 data2 = 0ULL;
1761 enum vxge_hw_status status = VXGE_HW_OK;
1762
1763 if (vp == NULL) {
1764 status = VXGE_HW_ERR_INVALID_HANDLE;
1765 goto exit;
1766 }
1767
1768 for (i = 0; i < ETH_ALEN; i++) {
1769 data1 <<= 8;
1770 data1 |= (u8)macaddr[i];
1771
1772 data2 <<= 8;
1773 data2 |= (u8)macaddr_mask[i];
1774 }
1775
1776 switch (duplicate_mode) {
1777 case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE:
1778 i = 0;
1779 break;
1780 case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE:
1781 i = 1;
1782 break;
1783 case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE:
1784 i = 2;
1785 break;
1786 default:
1787 i = 0;
1788 break;
1789 }
1790
1791 status = __vxge_hw_vpath_rts_table_set(vp,
1792 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1793 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1794 0,
1795 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1796 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)|
1797 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i));
1798exit:
1799 return status;
1800}
1801
1802/**
1803 * vxge_hw_vpath_mac_addr_get - Get the first mac address entry for this vpath
1804 * from MAC address table.
1805 * @vp: Vpath handle.
1806 * @macaddr: First MAC address entry for this vpath in the list
1807 * @macaddr_mask: MAC address mask for macaddr
1808 *
1809 * Returns the first mac address and mac address mask in the list for this
1810 * vpath.
1811 * see also: vxge_hw_vpath_mac_addr_get_next
1812 *
1813 */
1814enum vxge_hw_status
1815vxge_hw_vpath_mac_addr_get(
1816 struct __vxge_hw_vpath_handle *vp,
1817 u8 (macaddr)[ETH_ALEN],
1818 u8 (macaddr_mask)[ETH_ALEN])
1819{
1820 u32 i;
1821 u64 data1 = 0ULL;
1822 u64 data2 = 0ULL;
1823 enum vxge_hw_status status = VXGE_HW_OK;
1824
1825 if (vp == NULL) {
1826 status = VXGE_HW_ERR_INVALID_HANDLE;
1827 goto exit;
1828 }
1829
1830 status = __vxge_hw_vpath_rts_table_get(vp,
1831 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
1832 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1833 0, &data1, &data2);
1834
1835 if (status != VXGE_HW_OK)
1836 goto exit;
1837
1838 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1839
1840 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1841
1842 for (i = ETH_ALEN; i > 0; i--) {
1843 macaddr[i-1] = (u8)(data1 & 0xFF);
1844 data1 >>= 8;
1845
1846 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1847 data2 >>= 8;
1848 }
1849exit:
1850 return status;
1851}
1852
1853/**
1854 * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry for this
1855 * vpath
1856 * from MAC address table.
1857 * @vp: Vpath handle.
1858 * @macaddr: Next MAC address entry for this vpath in the list
1859 * @macaddr_mask: MAC address mask for macaddr
1860 *
1861 * Returns the next mac address and mac address mask in the list for this
1862 * vpath.
1863 * see also: vxge_hw_vpath_mac_addr_get
1864 *
1865 */
1866enum vxge_hw_status
1867vxge_hw_vpath_mac_addr_get_next(
1868 struct __vxge_hw_vpath_handle *vp,
1869 u8 (macaddr)[ETH_ALEN],
1870 u8 (macaddr_mask)[ETH_ALEN])
1871{
1872 u32 i;
1873 u64 data1 = 0ULL;
1874 u64 data2 = 0ULL;
1875 enum vxge_hw_status status = VXGE_HW_OK;
1876
1877 if (vp == NULL) {
1878 status = VXGE_HW_ERR_INVALID_HANDLE;
1879 goto exit;
1880 }
1881
1882 status = __vxge_hw_vpath_rts_table_get(vp,
1883 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
1884 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1885 0, &data1, &data2);
1886
1887 if (status != VXGE_HW_OK)
1888 goto exit;
1889
1890 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1891
1892 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1893
1894 for (i = ETH_ALEN; i > 0; i--) {
1895 macaddr[i-1] = (u8)(data1 & 0xFF);
1896 data1 >>= 8;
1897
1898 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1899 data2 >>= 8;
1900 }
1901
1902exit:
1903 return status;
1904}
1905
1906/**
1907 * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath
1908 * to MAC address table.
1909 * @vp: Vpath handle.
1910 * @macaddr: MAC address to be added for this vpath into the list
1911 * @macaddr_mask: MAC address mask for macaddr
1912 *
1913 * Delete the given mac address and mac address mask into the list for this
1914 * vpath.
1915 * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and
1916 * vxge_hw_vpath_mac_addr_get_next
1917 *
1918 */
1919enum vxge_hw_status
1920vxge_hw_vpath_mac_addr_delete(
1921 struct __vxge_hw_vpath_handle *vp,
1922 u8 (macaddr)[ETH_ALEN],
1923 u8 (macaddr_mask)[ETH_ALEN])
1924{
1925 u32 i;
1926 u64 data1 = 0ULL;
1927 u64 data2 = 0ULL;
1928 enum vxge_hw_status status = VXGE_HW_OK;
1929
1930 if (vp == NULL) {
1931 status = VXGE_HW_ERR_INVALID_HANDLE;
1932 goto exit;
1933 }
1934
1935 for (i = 0; i < ETH_ALEN; i++) {
1936 data1 <<= 8;
1937 data1 |= (u8)macaddr[i];
1938
1939 data2 <<= 8;
1940 data2 |= (u8)macaddr_mask[i];
1941 }
1942
1943 status = __vxge_hw_vpath_rts_table_set(vp,
1944 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1945 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1946 0,
1947 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1948 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
1949exit:
1950 return status;
1951}
1952
1953/**
1954 * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath
1955 * to vlan id table.
1956 * @vp: Vpath handle.
1957 * @vid: vlan id to be added for this vpath into the list
1958 *
1959 * Adds the given vlan id into the list for this vpath.
1960 * see also: vxge_hw_vpath_vid_delete, vxge_hw_vpath_vid_get and
1961 * vxge_hw_vpath_vid_get_next
1962 *
1963 */
1964enum vxge_hw_status
1965vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
1966{
1967 enum vxge_hw_status status = VXGE_HW_OK;
1968
1969 if (vp == NULL) {
1970 status = VXGE_HW_ERR_INVALID_HANDLE;
1971 goto exit;
1972 }
1973
1974 status = __vxge_hw_vpath_rts_table_set(vp,
1975 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1976 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1977 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1978exit:
1979 return status;
1980}
1981
1982/**
1983 * vxge_hw_vpath_vid_get - Get the first vid entry for this vpath
1984 * from vlan id table.
1985 * @vp: Vpath handle.
1986 * @vid: Buffer to return vlan id
1987 *
1988 * Returns the first vlan id in the list for this vpath.
1989 * see also: vxge_hw_vpath_vid_get_next
1990 *
1991 */
1992enum vxge_hw_status
1993vxge_hw_vpath_vid_get(struct __vxge_hw_vpath_handle *vp, u64 *vid)
1994{
1995 u64 data;
1996 enum vxge_hw_status status = VXGE_HW_OK;
1997
1998 if (vp == NULL) {
1999 status = VXGE_HW_ERR_INVALID_HANDLE;
2000 goto exit;
2001 }
2002
2003 status = __vxge_hw_vpath_rts_table_get(vp,
2004 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
2005 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
2006 0, vid, &data);
2007
2008 *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
2009exit:
2010 return status;
2011}
2012
2013/**
2014 * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
2015 * to vlan id table.
2016 * @vp: Vpath handle.
2017 * @vid: vlan id to be added for this vpath into the list
2018 *
2019 * Adds the given vlan id into the list for this vpath.
2020 * see also: vxge_hw_vpath_vid_add, vxge_hw_vpath_vid_get and
2021 * vxge_hw_vpath_vid_get_next
2022 *
2023 */
2024enum vxge_hw_status
2025vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
2026{
2027 enum vxge_hw_status status = VXGE_HW_OK;
2028
2029 if (vp == NULL) {
2030 status = VXGE_HW_ERR_INVALID_HANDLE;
2031 goto exit;
2032 }
2033
2034 status = __vxge_hw_vpath_rts_table_set(vp,
2035 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
2036 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
2037 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
2038exit:
2039 return status;
2040}
2041
2042/**
2043 * vxge_hw_vpath_promisc_enable - Enable promiscuous mode.
2044 * @vp: Vpath handle.
2045 *
2046 * Enable promiscuous mode of Titan-e operation.
2047 *
2048 * See also: vxge_hw_vpath_promisc_disable().
2049 */
2050enum vxge_hw_status vxge_hw_vpath_promisc_enable(
2051 struct __vxge_hw_vpath_handle *vp)
2052{
2053 u64 val64;
2054 struct __vxge_hw_virtualpath *vpath;
2055 enum vxge_hw_status status = VXGE_HW_OK;
2056
2057 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2058 status = VXGE_HW_ERR_INVALID_HANDLE;
2059 goto exit;
2060 }
2061
2062 vpath = vp->vpath;
2063
2064 /* Enable promiscuous mode for function 0 only */
2065 if (!(vpath->hldev->access_rights &
2066 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
2067 return VXGE_HW_OK;
2068
2069 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2070
2071 if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
2072
2073 val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
2074 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
2075 VXGE_HW_RXMAC_VCFG0_BCAST_EN |
2076 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
2077
2078 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2079 }
2080exit:
2081 return status;
2082}
2083
2084/**
2085 * vxge_hw_vpath_promisc_disable - Disable promiscuous mode.
2086 * @vp: Vpath handle.
2087 *
2088 * Disable promiscuous mode of Titan-e operation.
2089 *
2090 * See also: vxge_hw_vpath_promisc_enable().
2091 */
2092enum vxge_hw_status vxge_hw_vpath_promisc_disable(
2093 struct __vxge_hw_vpath_handle *vp)
2094{
2095 u64 val64;
2096 struct __vxge_hw_virtualpath *vpath;
2097 enum vxge_hw_status status = VXGE_HW_OK;
2098
2099 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2100 status = VXGE_HW_ERR_INVALID_HANDLE;
2101 goto exit;
2102 }
2103
2104 vpath = vp->vpath;
2105
2106 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2107
2108 if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
2109
2110 val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
2111 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
2112 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
2113
2114 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2115 }
2116exit:
2117 return status;
2118}
2119
2120/*
2121 * vxge_hw_vpath_bcast_enable - Enable broadcast
2122 * @vp: Vpath handle.
2123 *
2124 * Enable receiving broadcasts.
2125 */
2126enum vxge_hw_status vxge_hw_vpath_bcast_enable(
2127 struct __vxge_hw_vpath_handle *vp)
2128{
2129 u64 val64;
2130 struct __vxge_hw_virtualpath *vpath;
2131 enum vxge_hw_status status = VXGE_HW_OK;
2132
2133 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2134 status = VXGE_HW_ERR_INVALID_HANDLE;
2135 goto exit;
2136 }
2137
2138 vpath = vp->vpath;
2139
2140 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2141
2142 if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
2143 val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
2144 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2145 }
2146exit:
2147 return status;
2148}
2149
2150/**
2151 * vxge_hw_vpath_mcast_enable - Enable multicast addresses.
2152 * @vp: Vpath handle.
2153 *
2154 * Enable Titan-e multicast addresses.
2155 * Returns: VXGE_HW_OK on success.
2156 *
2157 */
2158enum vxge_hw_status vxge_hw_vpath_mcast_enable(
2159 struct __vxge_hw_vpath_handle *vp)
2160{
2161 u64 val64;
2162 struct __vxge_hw_virtualpath *vpath;
2163 enum vxge_hw_status status = VXGE_HW_OK;
2164
2165 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2166 status = VXGE_HW_ERR_INVALID_HANDLE;
2167 goto exit;
2168 }
2169
2170 vpath = vp->vpath;
2171
2172 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2173
2174 if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
2175 val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
2176 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2177 }
2178exit:
2179 return status;
2180}
2181
2182/**
2183 * vxge_hw_vpath_mcast_disable - Disable multicast addresses.
2184 * @vp: Vpath handle.
2185 *
2186 * Disable Titan-e multicast addresses.
2187 * Returns: VXGE_HW_OK - success.
2188 * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle
2189 *
2190 */
2191enum vxge_hw_status
2192vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
2193{
2194 u64 val64;
2195 struct __vxge_hw_virtualpath *vpath;
2196 enum vxge_hw_status status = VXGE_HW_OK;
2197
2198 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2199 status = VXGE_HW_ERR_INVALID_HANDLE;
2200 goto exit;
2201 }
2202
2203 vpath = vp->vpath;
2204
2205 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2206
2207 if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
2208 val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
2209 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2210 }
2211exit:
2212 return status;
2213}
2214
2215/*
2216 * vxge_hw_vpath_alarm_process - Process Alarms.
2217 * @vpath: Virtual Path.
2218 * @skip_alarms: Do not clear the alarms
2219 *
2220 * Process vpath alarms.
2221 *
2222 */
2223enum vxge_hw_status vxge_hw_vpath_alarm_process(
2224 struct __vxge_hw_vpath_handle *vp,
2225 u32 skip_alarms)
2226{
2227 enum vxge_hw_status status = VXGE_HW_OK;
2228
2229 if (vp == NULL) {
2230 status = VXGE_HW_ERR_INVALID_HANDLE;
2231 goto exit;
2232 }
2233
2234 status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms);
2235exit:
2236 return status;
2237}
2238
2239/**
2240 * vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and
2241 * alrms
2242 * @vp: Virtual Path handle.
2243 * @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of
2244 * interrupts(Can be repeated). If fifo or ring are not enabled
2245 * the MSIX vector for that should be set to 0
2246 * @alarm_msix_id: MSIX vector for alarm.
2247 *
2248 * This API will associate a given MSIX vector numbers with the four TIM
2249 * interrupts and alarm interrupt.
2250 */
2251void
2252vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
2253 int alarm_msix_id)
2254{
2255 u64 val64;
2256 struct __vxge_hw_virtualpath *vpath = vp->vpath;
2257 struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
2258 u32 vp_id = vp->vpath->vp_id;
2259
2260 val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
2261 (vp_id * 4) + tim_msix_id[0]) |
2262 VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
2263 (vp_id * 4) + tim_msix_id[1]);
2264
2265 writeq(val64, &vp_reg->interrupt_cfg0);
2266
2267 writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
2268 (vpath->hldev->first_vp_id * 4) + alarm_msix_id),
2269 &vp_reg->interrupt_cfg2);
2270
2271 if (vpath->hldev->config.intr_mode ==
2272 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2273 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2274 VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN,
2275 0, 32), &vp_reg->one_shot_vect0_en);
2276 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2277 VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
2278 0, 32), &vp_reg->one_shot_vect1_en);
2279 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2280 VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
2281 0, 32), &vp_reg->one_shot_vect2_en);
2282 }
2283}
2284
2285/**
2286 * vxge_hw_vpath_msix_mask - Mask MSIX Vector.
2287 * @vp: Virtual Path handle.
2288 * @msix_id: MSIX ID
2289 *
2290 * The function masks the msix interrupt for the given msix_id
2291 *
2292 * Returns: 0,
2293 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2294 * status.
2295 * See also:
2296 */
2297void
2298vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2299{
2300 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2301 __vxge_hw_pio_mem_write32_upper(
2302 (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2303 &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
2304}
2305
2306/**
2307 * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
2308 * @vp: Virtual Path handle.
2309 * @msix_id: MSI ID
2310 *
2311 * The function clears the msix interrupt for the given msix_id
2312 *
2313 * Returns: 0,
2314 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2315 * status.
2316 * See also:
2317 */
2318void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
2319{
2320 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2321
2322 if ((hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT))
2323 __vxge_hw_pio_mem_write32_upper(
2324 (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2325 &hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
2326 else
2327 __vxge_hw_pio_mem_write32_upper(
2328 (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2329 &hldev->common_reg->clear_msix_mask_vect[msix_id % 4]);
2330}
2331
2332/**
2333 * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
2334 * @vp: Virtual Path handle.
2335 * @msix_id: MSI ID
2336 *
2337 * The function unmasks the msix interrupt for the given msix_id
2338 *
2339 * Returns: 0,
2340 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2341 * status.
2342 * See also:
2343 */
2344void
2345vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2346{
2347 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2348 __vxge_hw_pio_mem_write32_upper(
2349 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2350 &hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
2351}
2352
2353/**
2354 * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
2355 * @vp: Virtual Path handle.
2356 *
2357 * Mask Tx and Rx vpath interrupts.
2358 *
2359 * See also: vxge_hw_vpath_inta_mask_tx_rx()
2360 */
2361void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2362{
2363 u64 tim_int_mask0[4] = {[0 ...3] = 0};
2364 u32 tim_int_mask1[4] = {[0 ...3] = 0};
2365 u64 val64;
2366 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2367
2368 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2369 tim_int_mask1, vp->vpath->vp_id);
2370
2371 val64 = readq(&hldev->common_reg->tim_int_mask0);
2372
2373 if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2374 (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2375 writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2376 tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64),
2377 &hldev->common_reg->tim_int_mask0);
2378 }
2379
2380 val64 = readl(&hldev->common_reg->tim_int_mask1);
2381
2382 if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2383 (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2384 __vxge_hw_pio_mem_write32_upper(
2385 (tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2386 tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
2387 &hldev->common_reg->tim_int_mask1);
2388 }
2389}
2390
2391/**
2392 * vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts.
2393 * @vp: Virtual Path handle.
2394 *
2395 * Unmask Tx and Rx vpath interrupts.
2396 *
2397 * See also: vxge_hw_vpath_inta_mask_tx_rx()
2398 */
2399void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2400{
2401 u64 tim_int_mask0[4] = {[0 ...3] = 0};
2402 u32 tim_int_mask1[4] = {[0 ...3] = 0};
2403 u64 val64;
2404 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2405
2406 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2407 tim_int_mask1, vp->vpath->vp_id);
2408
2409 val64 = readq(&hldev->common_reg->tim_int_mask0);
2410
2411 if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2412 (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2413 writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2414 tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64,
2415 &hldev->common_reg->tim_int_mask0);
2416 }
2417
2418 if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2419 (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2420 __vxge_hw_pio_mem_write32_upper(
2421 (~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2422 tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
2423 &hldev->common_reg->tim_int_mask1);
2424 }
2425}
2426
2427/**
2428 * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
2429 * descriptors and process the same.
2430 * @ring: Handle to the ring object used for receive
2431 *
2432 * The function polls the Rx for the completed descriptors and calls
2433 * the driver via supplied completion callback.
2434 *
2435 * Returns: VXGE_HW_OK, if the polling is completed successful.
2436 * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2437 * descriptors available which are yet to be processed.
2438 *
2439 * See also: vxge_hw_vpath_poll_rx()
2440 */
2441enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
2442{
2443 u8 t_code;
2444 enum vxge_hw_status status = VXGE_HW_OK;
2445 void *first_rxdh;
2446 u64 val64 = 0;
2447 int new_count = 0;
2448
2449 ring->cmpl_cnt = 0;
2450
2451 status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code);
2452 if (status == VXGE_HW_OK)
2453 ring->callback(ring, first_rxdh,
2454 t_code, ring->channel.userdata);
2455
2456 if (ring->cmpl_cnt != 0) {
2457 ring->doorbell_cnt += ring->cmpl_cnt;
2458 if (ring->doorbell_cnt >= ring->rxds_limit) {
2459 /*
2460 * Each RxD is of 4 qwords, update the number of
2461 * qwords replenished
2462 */
2463 new_count = (ring->doorbell_cnt * 4);
2464
2465 /* For each block add 4 more qwords */
2466 ring->total_db_cnt += ring->doorbell_cnt;
2467 if (ring->total_db_cnt >= ring->rxds_per_block) {
2468 new_count += 4;
2469 /* Reset total count */
2470 ring->total_db_cnt %= ring->rxds_per_block;
2471 }
2472 writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count),
2473 &ring->vp_reg->prc_rxd_doorbell);
2474 val64 =
2475 readl(&ring->common_reg->titan_general_int_status);
2476 ring->doorbell_cnt = 0;
2477 }
2478 }
2479
2480 return status;
2481}
2482
2483/**
2484 * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process
2485 * the same.
2486 * @fifo: Handle to the fifo object used for non offload send
2487 *
2488 * The function polls the Tx for the completed descriptors and calls
2489 * the driver via supplied completion callback.
2490 *
2491 * Returns: VXGE_HW_OK, if the polling is completed successful.
2492 * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2493 * descriptors available which are yet to be processed.
2494 */
2495enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
2496 struct sk_buff ***skb_ptr, int nr_skb,
2497 int *more)
2498{
2499 enum vxge_hw_fifo_tcode t_code;
2500 void *first_txdlh;
2501 enum vxge_hw_status status = VXGE_HW_OK;
2502 struct __vxge_hw_channel *channel;
2503
2504 channel = &fifo->channel;
2505
2506 status = vxge_hw_fifo_txdl_next_completed(fifo,
2507 &first_txdlh, &t_code);
2508 if (status == VXGE_HW_OK)
2509 if (fifo->callback(fifo, first_txdlh, t_code,
2510 channel->userdata, skb_ptr, nr_skb, more) != VXGE_HW_OK)
2511 status = VXGE_HW_COMPLETIONS_REMAIN;
2512
2513 return status;
2514}
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
new file mode 100644
index 00000000000..4a518a3b131
--- /dev/null
+++ b/drivers/net/vxge/vxge-traffic.h
@@ -0,0 +1,2298 @@
1/******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
9 *
10 * vxge-traffic.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2010 Exar Corp.
13 ******************************************************************************/
14#ifndef VXGE_TRAFFIC_H
15#define VXGE_TRAFFIC_H
16
17#include "vxge-reg.h"
18#include "vxge-version.h"
19
20#define VXGE_HW_DTR_MAX_T_CODE 16
21#define VXGE_HW_ALL_FOXES 0xFFFFFFFFFFFFFFFFULL
22#define VXGE_HW_INTR_MASK_ALL 0xFFFFFFFFFFFFFFFFULL
23#define VXGE_HW_MAX_VIRTUAL_PATHS 17
24
25#define VXGE_HW_MAC_MAX_MAC_PORT_ID 2
26
27#define VXGE_HW_DEFAULT_32 0xffffffff
28/* frames sizes */
29#define VXGE_HW_HEADER_802_2_SIZE 3
30#define VXGE_HW_HEADER_SNAP_SIZE 5
31#define VXGE_HW_HEADER_VLAN_SIZE 4
32#define VXGE_HW_MAC_HEADER_MAX_SIZE \
33 (ETH_HLEN + \
34 VXGE_HW_HEADER_802_2_SIZE + \
35 VXGE_HW_HEADER_VLAN_SIZE + \
36 VXGE_HW_HEADER_SNAP_SIZE)
37
38/* 32bit alignments */
39#define VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN 2
40#define VXGE_HW_HEADER_802_2_SNAP_ALIGN 2
41#define VXGE_HW_HEADER_802_2_ALIGN 3
42#define VXGE_HW_HEADER_SNAP_ALIGN 1
43
44#define VXGE_HW_L3_CKSUM_OK 0xFFFF
45#define VXGE_HW_L4_CKSUM_OK 0xFFFF
46
47/* Forward declarations */
48struct __vxge_hw_device;
49struct __vxge_hw_vpath_handle;
50struct vxge_hw_vp_config;
51struct __vxge_hw_virtualpath;
52struct __vxge_hw_channel;
53struct __vxge_hw_fifo;
54struct __vxge_hw_ring;
55struct vxge_hw_ring_attr;
56struct vxge_hw_mempool;
57
58#ifndef TRUE
59#define TRUE 1
60#endif
61
62#ifndef FALSE
63#define FALSE 0
64#endif
65
66/*VXGE_HW_STATUS_H*/
67
68#define VXGE_HW_EVENT_BASE 0
69#define VXGE_LL_EVENT_BASE 100
70
71/**
72 * enum vxge_hw_event- Enumerates slow-path HW events.
73 * @VXGE_HW_EVENT_UNKNOWN: Unknown (and invalid) event.
74 * @VXGE_HW_EVENT_SERR: Serious vpath hardware error event.
75 * @VXGE_HW_EVENT_ECCERR: vpath ECC error event.
76 * @VXGE_HW_EVENT_VPATH_ERR: Error local to the respective vpath
77 * @VXGE_HW_EVENT_FIFO_ERR: FIFO Doorbell fifo error.
78 * @VXGE_HW_EVENT_SRPCIM_SERR: srpcim hardware error event.
79 * @VXGE_HW_EVENT_MRPCIM_SERR: mrpcim hardware error event.
80 * @VXGE_HW_EVENT_MRPCIM_ECCERR: mrpcim ecc error event.
81 * @VXGE_HW_EVENT_RESET_START: Privileged entity is starting device reset
82 * @VXGE_HW_EVENT_RESET_COMPLETE: Device reset has been completed
83 * @VXGE_HW_EVENT_SLOT_FREEZE: Slot-freeze event. Driver tries to distinguish
84 * slot-freeze from the rest critical events (e.g. ECC) when it is
85 * impossible to PIO read "through" the bus, i.e. when getting all-foxes.
86 *
87 * enum vxge_hw_event enumerates slow-path HW eventis.
88 *
89 * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_up_f{},
90 * vxge_uld_link_down_f{}.
91 */
92enum vxge_hw_event {
93 VXGE_HW_EVENT_UNKNOWN = 0,
94 /* HW events */
95 VXGE_HW_EVENT_RESET_START = VXGE_HW_EVENT_BASE + 1,
96 VXGE_HW_EVENT_RESET_COMPLETE = VXGE_HW_EVENT_BASE + 2,
97 VXGE_HW_EVENT_LINK_DOWN = VXGE_HW_EVENT_BASE + 3,
98 VXGE_HW_EVENT_LINK_UP = VXGE_HW_EVENT_BASE + 4,
99 VXGE_HW_EVENT_ALARM_CLEARED = VXGE_HW_EVENT_BASE + 5,
100 VXGE_HW_EVENT_ECCERR = VXGE_HW_EVENT_BASE + 6,
101 VXGE_HW_EVENT_MRPCIM_ECCERR = VXGE_HW_EVENT_BASE + 7,
102 VXGE_HW_EVENT_FIFO_ERR = VXGE_HW_EVENT_BASE + 8,
103 VXGE_HW_EVENT_VPATH_ERR = VXGE_HW_EVENT_BASE + 9,
104 VXGE_HW_EVENT_CRITICAL_ERR = VXGE_HW_EVENT_BASE + 10,
105 VXGE_HW_EVENT_SERR = VXGE_HW_EVENT_BASE + 11,
106 VXGE_HW_EVENT_SRPCIM_SERR = VXGE_HW_EVENT_BASE + 12,
107 VXGE_HW_EVENT_MRPCIM_SERR = VXGE_HW_EVENT_BASE + 13,
108 VXGE_HW_EVENT_SLOT_FREEZE = VXGE_HW_EVENT_BASE + 14,
109};
110
111#define VXGE_HW_SET_LEVEL(a, b) (((a) > (b)) ? (a) : (b))
112
113/*
114 * struct vxge_hw_mempool_dma - Represents DMA objects passed to the
115 caller.
116 */
117struct vxge_hw_mempool_dma {
118 dma_addr_t addr;
119 struct pci_dev *handle;
120 struct pci_dev *acc_handle;
121};
122
123/*
124 * vxge_hw_mempool_item_f - Mempool item alloc/free callback
125 * @mempoolh: Memory pool handle.
126 * @memblock: Address of memory block
127 * @memblock_index: Index of memory block
128 * @item: Item that gets allocated or freed.
129 * @index: Item's index in the memory pool.
130 * @is_last: True, if this item is the last one in the pool; false - otherwise.
131 * userdata: Per-pool user context.
132 *
133 * Memory pool allocation/deallocation callback.
134 */
135
136/*
137 * struct vxge_hw_mempool - Memory pool.
138 */
139struct vxge_hw_mempool {
140
141 void (*item_func_alloc)(
142 struct vxge_hw_mempool *mempoolh,
143 u32 memblock_index,
144 struct vxge_hw_mempool_dma *dma_object,
145 u32 index,
146 u32 is_last);
147
148 void *userdata;
149 void **memblocks_arr;
150 void **memblocks_priv_arr;
151 struct vxge_hw_mempool_dma *memblocks_dma_arr;
152 struct __vxge_hw_device *devh;
153 u32 memblock_size;
154 u32 memblocks_max;
155 u32 memblocks_allocated;
156 u32 item_size;
157 u32 items_max;
158 u32 items_initial;
159 u32 items_current;
160 u32 items_per_memblock;
161 void **items_arr;
162 u32 items_priv_size;
163};
164
165#define VXGE_HW_MAX_INTR_PER_VP 4
166#define VXGE_HW_VPATH_INTR_TX 0
167#define VXGE_HW_VPATH_INTR_RX 1
168#define VXGE_HW_VPATH_INTR_EINTA 2
169#define VXGE_HW_VPATH_INTR_BMAP 3
170
171#define VXGE_HW_BLOCK_SIZE 4096
172
173/**
174 * struct vxge_hw_tim_intr_config - Titan Tim interrupt configuration.
175 * @intr_enable: Set to 1, if interrupt is enabled.
176 * @btimer_val: Boundary Timer Initialization value in units of 272 ns.
177 * @timer_ac_en: Timer Automatic Cancel. 1 : Automatic Canceling Enable: when
178 * asserted, other interrupt-generating entities will cancel the
179 * scheduled timer interrupt.
180 * @timer_ci_en: Timer Continuous Interrupt. 1 : Continuous Interrupting Enable:
181 * When asserted, an interrupt will be generated every time the
182 * boundary timer expires, even if no traffic has been transmitted
183 * on this interrupt.
184 * @timer_ri_en: Timer Consecutive (Re-) Interrupt 1 : Consecutive
185 * (Re-) Interrupt Enable: When asserted, an interrupt will be
186 * generated the next time the timer expires, even if no traffic has
187 * been transmitted on this interrupt. (This will only happen once
188 * each time that this value is written to the TIM.) This bit is
189 * cleared by H/W at the end of the current-timer-interval when
190 * the interrupt is triggered.
191 * @rtimer_val: Restriction Timer Initialization value in units of 272 ns.
192 * @util_sel: Utilization Selector. Selects which of the workload approximations
193 * to use (e.g. legacy Tx utilization, Tx/Rx utilization, host
194 * specified utilization etc.), selects one of
195 * the 17 host configured values.
196 * 0-Virtual Path 0
197 * 1-Virtual Path 1
198 * ...
199 * 16-Virtual Path 17
200 * 17-Legacy Tx network utilization, provided by TPA
201 * 18-Legacy Rx network utilization, provided by FAU
202 * 19-Average of legacy Rx and Tx utilization calculated from link
203 * utilization values.
204 * 20-31-Invalid configurations
205 * 32-Host utilization for Virtual Path 0
206 * 33-Host utilization for Virtual Path 1
207 * ...
208 * 48-Host utilization for Virtual Path 17
209 * 49-Legacy Tx network utilization, provided by TPA
210 * 50-Legacy Rx network utilization, provided by FAU
211 * 51-Average of legacy Rx and Tx utilization calculated from
212 * link utilization values.
213 * 52-63-Invalid configurations
214 * @ltimer_val: Latency Timer Initialization Value in units of 272 ns.
215 * @txd_cnt_en: TxD Return Event Count Enable. This configuration bit when set
216 * to 1 enables counting of TxD0 returns (signalled by PCC's),
217 * towards utilization event count values.
218 * @urange_a: Defines the upper limit (in percent) for this utilization range
219 * to be active. This range is considered active
220 * if 0 = UTIL = URNG_A
221 * and the UEC_A field (below) is non-zero.
222 * @uec_a: Utilization Event Count A. If this range is active, the adapter will
223 * wait until UEC_A events have occurred on the interrupt before
224 * generating an interrupt.
225 * @urange_b: Link utilization range B.
226 * @uec_b: Utilization Event Count B.
227 * @urange_c: Link utilization range C.
228 * @uec_c: Utilization Event Count C.
229 * @urange_d: Link utilization range D.
230 * @uec_d: Utilization Event Count D.
231 * Traffic Interrupt Controller Module interrupt configuration.
232 */
233struct vxge_hw_tim_intr_config {
234
235 u32 intr_enable;
236#define VXGE_HW_TIM_INTR_ENABLE 1
237#define VXGE_HW_TIM_INTR_DISABLE 0
238#define VXGE_HW_TIM_INTR_DEFAULT 0
239
240 u32 btimer_val;
241#define VXGE_HW_MIN_TIM_BTIMER_VAL 0
242#define VXGE_HW_MAX_TIM_BTIMER_VAL 67108864
243#define VXGE_HW_USE_FLASH_DEFAULT (~0)
244
245 u32 timer_ac_en;
246#define VXGE_HW_TIM_TIMER_AC_ENABLE 1
247#define VXGE_HW_TIM_TIMER_AC_DISABLE 0
248
249 u32 timer_ci_en;
250#define VXGE_HW_TIM_TIMER_CI_ENABLE 1
251#define VXGE_HW_TIM_TIMER_CI_DISABLE 0
252
253 u32 timer_ri_en;
254#define VXGE_HW_TIM_TIMER_RI_ENABLE 1
255#define VXGE_HW_TIM_TIMER_RI_DISABLE 0
256
257 u32 rtimer_val;
258#define VXGE_HW_MIN_TIM_RTIMER_VAL 0
259#define VXGE_HW_MAX_TIM_RTIMER_VAL 67108864
260
261 u32 util_sel;
262#define VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL 17
263#define VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL 18
264#define VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_RX_AVE_NET_UTIL 19
265#define VXGE_HW_TIM_UTIL_SEL_PER_VPATH 63
266
267 u32 ltimer_val;
268#define VXGE_HW_MIN_TIM_LTIMER_VAL 0
269#define VXGE_HW_MAX_TIM_LTIMER_VAL 67108864
270
271 /* Line utilization interrupts */
272 u32 urange_a;
273#define VXGE_HW_MIN_TIM_URANGE_A 0
274#define VXGE_HW_MAX_TIM_URANGE_A 100
275
276 u32 uec_a;
277#define VXGE_HW_MIN_TIM_UEC_A 0
278#define VXGE_HW_MAX_TIM_UEC_A 65535
279
280 u32 urange_b;
281#define VXGE_HW_MIN_TIM_URANGE_B 0
282#define VXGE_HW_MAX_TIM_URANGE_B 100
283
284 u32 uec_b;
285#define VXGE_HW_MIN_TIM_UEC_B 0
286#define VXGE_HW_MAX_TIM_UEC_B 65535
287
288 u32 urange_c;
289#define VXGE_HW_MIN_TIM_URANGE_C 0
290#define VXGE_HW_MAX_TIM_URANGE_C 100
291
292 u32 uec_c;
293#define VXGE_HW_MIN_TIM_UEC_C 0
294#define VXGE_HW_MAX_TIM_UEC_C 65535
295
296 u32 uec_d;
297#define VXGE_HW_MIN_TIM_UEC_D 0
298#define VXGE_HW_MAX_TIM_UEC_D 65535
299};
300
301#define VXGE_HW_STATS_OP_READ 0
302#define VXGE_HW_STATS_OP_CLEAR_STAT 1
303#define VXGE_HW_STATS_OP_CLEAR_ALL_VPATH_STATS 2
304#define VXGE_HW_STATS_OP_CLEAR_ALL_STATS_OF_LOC 2
305#define VXGE_HW_STATS_OP_CLEAR_ALL_STATS 3
306
307#define VXGE_HW_STATS_LOC_AGGR 17
308#define VXGE_HW_STATS_AGGRn_OFFSET 0x00720
309
310#define VXGE_HW_STATS_VPATH_TX_OFFSET 0x0
311#define VXGE_HW_STATS_VPATH_RX_OFFSET 0x00090
312
313#define VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET (0x001d0 >> 3)
314#define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(bits) \
315 vxge_bVALn(bits, 0, 32)
316
317#define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(bits) \
318 vxge_bVALn(bits, 32, 32)
319
320#define VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET (0x001d8 >> 3)
321#define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(bits) \
322 vxge_bVALn(bits, 0, 32)
323
324#define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(bits) \
325 vxge_bVALn(bits, 32, 32)
326
327/**
328 * struct vxge_hw_xmac_aggr_stats - Per-Aggregator XMAC Statistics
329 *
330 * @tx_frms: Count of data frames transmitted on this Aggregator on all
331 * its Aggregation ports. Does not include LACPDUs or Marker PDUs.
332 * However, does include frames discarded by the Distribution
333 * function.
334 * @tx_data_octets: Count of data and padding octets of frames transmitted
335 * on this Aggregator on all its Aggregation ports. Does not include
336 * octets of LACPDUs or Marker PDUs. However, does include octets of
337 * frames discarded by the Distribution function.
338 * @tx_mcast_frms: Count of data frames transmitted (to a group destination
339 * address other than the broadcast address) on this Aggregator on
340 * all its Aggregation ports. Does not include LACPDUs or Marker
341 * PDUs. However, does include frames discarded by the Distribution
342 * function.
343 * @tx_bcast_frms: Count of broadcast data frames transmitted on this Aggregator
344 * on all its Aggregation ports. Does not include LACPDUs or Marker
345 * PDUs. However, does include frames discarded by the Distribution
346 * function.
347 * @tx_discarded_frms: Count of data frames to be transmitted on this Aggregator
348 * that are discarded by the Distribution function. This occurs when
349 * conversation are allocated to different ports and have to be
350 * flushed on old ports
351 * @tx_errored_frms: Count of data frames transmitted on this Aggregator that
352 * experience transmission errors on its Aggregation ports.
353 * @rx_frms: Count of data frames received on this Aggregator on all its
354 * Aggregation ports. Does not include LACPDUs or Marker PDUs.
355 * Also, does not include frames discarded by the Collection
356 * function.
357 * @rx_data_octets: Count of data and padding octets of frames received on this
358 * Aggregator on all its Aggregation ports. Does not include octets
359 * of LACPDUs or Marker PDUs. Also, does not include
360 * octets of frames
361 * discarded by the Collection function.
362 * @rx_mcast_frms: Count of data frames received (from a group destination
363 * address other than the broadcast address) on this Aggregator on
364 * all its Aggregation ports. Does not include LACPDUs or Marker
365 * PDUs. Also, does not include frames discarded by the Collection
366 * function.
367 * @rx_bcast_frms: Count of broadcast data frames received on this Aggregator on
368 * all its Aggregation ports. Does not include LACPDUs or Marker
369 * PDUs. Also, does not include frames discarded by the Collection
370 * function.
371 * @rx_discarded_frms: Count of data frames received on this Aggregator that are
372 * discarded by the Collection function because the Collection
373 * function was disabled on the port which the frames are received.
374 * @rx_errored_frms: Count of data frames received on this Aggregator that are
375 * discarded by its Aggregation ports, or are discarded by the
376 * Collection function of the Aggregator, or that are discarded by
377 * the Aggregator due to detection of an illegal Slow Protocols PDU.
378 * @rx_unknown_slow_proto_frms: Count of data frames received on this Aggregator
379 * that are discarded by its Aggregation ports due to detection of
380 * an unknown Slow Protocols PDU.
381 *
382 * Per aggregator XMAC RX statistics.
383 */
384struct vxge_hw_xmac_aggr_stats {
385/*0x000*/ u64 tx_frms;
386/*0x008*/ u64 tx_data_octets;
387/*0x010*/ u64 tx_mcast_frms;
388/*0x018*/ u64 tx_bcast_frms;
389/*0x020*/ u64 tx_discarded_frms;
390/*0x028*/ u64 tx_errored_frms;
391/*0x030*/ u64 rx_frms;
392/*0x038*/ u64 rx_data_octets;
393/*0x040*/ u64 rx_mcast_frms;
394/*0x048*/ u64 rx_bcast_frms;
395/*0x050*/ u64 rx_discarded_frms;
396/*0x058*/ u64 rx_errored_frms;
397/*0x060*/ u64 rx_unknown_slow_proto_frms;
398} __packed;
399
400/**
401 * struct vxge_hw_xmac_port_stats - XMAC Port Statistics
402 *
403 * @tx_ttl_frms: Count of successfully transmitted MAC frames
404 * @tx_ttl_octets: Count of total octets of transmitted frames, not including
405 * framing characters (i.e. less framing bits). To determine the
406 * total octets of transmitted frames, including framing characters,
407 * multiply PORTn_TX_TTL_FRMS by 8 and add it to this stat (unless
408 * otherwise configured, this stat only counts frames that have
409 * 8 bytes of preamble for each frame). This stat can be configured
410 * (see XMAC_STATS_GLOBAL_CFG.TTL_FRMS_HANDLING) to count everything
411 * including the preamble octets.
412 * @tx_data_octets: Count of data and padding octets of successfully transmitted
413 * frames.
414 * @tx_mcast_frms: Count of successfully transmitted frames to a group address
415 * other than the broadcast address.
416 * @tx_bcast_frms: Count of successfully transmitted frames to the broadcast
417 * group address.
418 * @tx_ucast_frms: Count of transmitted frames containing a unicast address.
419 * Includes discarded frames that are not sent to the network.
420 * @tx_tagged_frms: Count of transmitted frames containing a VLAN tag.
421 * @tx_vld_ip: Count of transmitted IP datagrams that are passed to the network.
422 * @tx_vld_ip_octets: Count of total octets of transmitted IP datagrams that
423 * are passed to the network.
424 * @tx_icmp: Count of transmitted ICMP messages. Includes messages not sent
425 * due to problems within ICMP.
426 * @tx_tcp: Count of transmitted TCP segments. Does not include segments
427 * containing retransmitted octets.
428 * @tx_rst_tcp: Count of transmitted TCP segments containing the RST flag.
429 * @tx_udp: Count of transmitted UDP datagrams.
430 * @tx_parse_error: Increments when the TPA is unable to parse a packet. This
431 * generally occurs when a packet is corrupt somehow, including
432 * packets that have IP version mismatches, invalid Layer 2 control
433 * fields, etc. L3/L4 checksums are not offloaded, but the packet
434 * is still be transmitted.
435 * @tx_unknown_protocol: Increments when the TPA encounters an unknown
436 * protocol, such as a new IPv6 extension header, or an unsupported
437 * Routing Type. The packet still has a checksum calculated but it
438 * may be incorrect.
439 * @tx_pause_ctrl_frms: Count of MAC PAUSE control frames that are transmitted.
440 * Since, the only control frames supported by this device are
441 * PAUSE frames, this register is a count of all transmitted MAC
442 * control frames.
443 * @tx_marker_pdu_frms: Count of Marker PDUs transmitted
444 * on this Aggregation port.
445 * @tx_lacpdu_frms: Count of LACPDUs transmitted on this Aggregation port.
446 * @tx_drop_ip: Count of transmitted IP datagrams that could not be passed to
447 * the network. Increments because of:
448 * 1) An internal processing error
449 * (such as an uncorrectable ECC error). 2) A frame parsing error
450 * during IP checksum calculation.
451 * @tx_marker_resp_pdu_frms: Count of Marker Response PDUs transmitted on this
452 * Aggregation port.
453 * @tx_xgmii_char2_match: Maintains a count of the number of transmitted XGMII
454 * characters that match a pattern that is programmable through
455 * register XMAC_STATS_TX_XGMII_CHAR_PORTn. By default, the pattern
456 * is set to /T/ (i.e. the terminate character), thus the statistic
457 * tracks the number of transmitted Terminate characters.
458 * @tx_xgmii_char1_match: Maintains a count of the number of transmitted XGMII
459 * characters that match a pattern that is programmable through
460 * register XMAC_STATS_TX_XGMII_CHAR_PORTn. By default, the pattern
461 * is set to /S/ (i.e. the start character),
462 * thus the statistic tracks
463 * the number of transmitted Start characters.
464 * @tx_xgmii_column2_match: Maintains a count of the number of transmitted XGMII
465 * columns that match a pattern that is programmable through register
466 * XMAC_STATS_TX_XGMII_COLUMN2_PORTn. By default, the pattern is set
467 * to 4 x /E/ (i.e. a column containing all error characters), thus
468 * the statistic tracks the number of Error columns transmitted at
469 * any time. If XMAC_STATS_TX_XGMII_BEHAV_COLUMN2_PORTn.NEAR_COL1 is
470 * set to 1, then this stat increments when COLUMN2 is found within
471 * 'n' clocks after COLUMN1. Here, 'n' is defined by
472 * XMAC_STATS_TX_XGMII_BEHAV_COLUMN2_PORTn.NUM_COL (if 'n' is set
473 * to 0, then it means to search anywhere for COLUMN2).
474 * @tx_xgmii_column1_match: Maintains a count of the number of transmitted XGMII
475 * columns that match a pattern that is programmable through register
476 * XMAC_STATS_TX_XGMII_COLUMN1_PORTn. By default, the pattern is set
477 * to 4 x /I/ (i.e. a column containing all idle characters),
478 * thus the statistic tracks the number of transmitted Idle columns.
479 * @tx_any_err_frms: Count of transmitted frames containing any error that
480 * prevents them from being passed to the network. Increments if
481 * there is an ECC while reading the frame out of the transmit
482 * buffer. Also increments if the transmit protocol assist (TPA)
483 * block determines that the frame should not be sent.
484 * @tx_drop_frms: Count of frames that could not be sent for no other reason
485 * than internal MAC processing. Increments once whenever the
486 * transmit buffer is flushed (due to an ECC error on a memory
487 * descriptor).
488 * @rx_ttl_frms: Count of total received MAC frames, including frames received
489 * with frame-too-long, FCS, or length errors. This stat can be
490 * configured (see XMAC_STATS_GLOBAL_CFG.TTL_FRMS_HANDLING) to count
491 * everything, even "frames" as small one byte of preamble.
492 * @rx_vld_frms: Count of successfully received MAC frames. Does not include
493 * frames received with frame-too-long, FCS, or length errors.
494 * @rx_offload_frms: Count of offloaded received frames that are passed to
495 * the host.
496 * @rx_ttl_octets: Count of total octets of received frames, not including
497 * framing characters (i.e. less framing bits). To determine the
498 * total octets of received frames, including framing characters,
499 * multiply PORTn_RX_TTL_FRMS by 8 and add it to this stat (unless
500 * otherwise configured, this stat only counts frames that have 8
501 * bytes of preamble for each frame). This stat can be configured
502 * (see XMAC_STATS_GLOBAL_CFG.TTL_FRMS_HANDLING) to count everything,
503 * even the preamble octets of "frames" as small one byte of preamble
504 * @rx_data_octets: Count of data and padding octets of successfully received
505 * frames. Does not include frames received with frame-too-long,
506 * FCS, or length errors.
507 * @rx_offload_octets: Count of total octets, not including framing
508 * characters, of offloaded received frames that are passed
509 * to the host.
510 * @rx_vld_mcast_frms: Count of successfully received MAC frames containing a
511 * nonbroadcast group address. Does not include frames received
512 * with frame-too-long, FCS, or length errors.
513 * @rx_vld_bcast_frms: Count of successfully received MAC frames containing
514 * the broadcast group address. Does not include frames received
515 * with frame-too-long, FCS, or length errors.
516 * @rx_accepted_ucast_frms: Count of successfully received frames containing
517 * a unicast address. Only includes frames that are passed to
518 * the system.
519 * @rx_accepted_nucast_frms: Count of successfully received frames containing
520 * a non-unicast (broadcast or multicast) address. Only includes
521 * frames that are passed to the system. Could include, for instance,
522 * non-unicast frames that contain FCS errors if the MAC_ERROR_CFG
523 * register is set to pass FCS-errored frames to the host.
524 * @rx_tagged_frms: Count of received frames containing a VLAN tag.
525 * @rx_long_frms: Count of received frames that are longer than RX_MAX_PYLD_LEN
526 * + 18 bytes (+ 22 bytes if VLAN-tagged).
527 * @rx_usized_frms: Count of received frames of length (including FCS, but not
528 * framing bits) less than 64 octets, that are otherwise well-formed.
529 * In other words, counts runts.
530 * @rx_osized_frms: Count of received frames of length (including FCS, but not
531 * framing bits) more than 1518 octets, that are otherwise
532 * well-formed. Note: If register XMAC_STATS_GLOBAL_CFG.VLAN_HANDLING
533 * is set to 1, then "more than 1518 octets" becomes "more than 1518
534 * (1522 if VLAN-tagged) octets".
535 * @rx_frag_frms: Count of received frames of length (including FCS, but not
536 * framing bits) less than 64 octets that had bad FCS. In other
537 * words, counts fragments.
538 * @rx_jabber_frms: Count of received frames of length (including FCS, but not
539 * framing bits) more than 1518 octets that had bad FCS. In other
540 * words, counts jabbers. Note: If register
541 * XMAC_STATS_GLOBAL_CFG.VLAN_HANDLING is set to 1, then "more than
542 * 1518 octets" becomes "more than 1518 (1522 if VLAN-tagged)
543 * octets".
544 * @rx_ttl_64_frms: Count of total received MAC frames with length (including
545 * FCS, but not framing bits) of exactly 64 octets. Includes frames
546 * received with frame-too-long, FCS, or length errors.
547 * @rx_ttl_65_127_frms: Count of total received MAC frames with length
548 * (including FCS, but not framing bits) of between 65 and 127
549 * octets inclusive. Includes frames received with frame-too-long,
550 * FCS, or length errors.
551 * @rx_ttl_128_255_frms: Count of total received MAC frames with length
552 * (including FCS, but not framing bits) of between 128 and 255
553 * octets inclusive. Includes frames received with frame-too-long,
554 * FCS, or length errors.
555 * @rx_ttl_256_511_frms: Count of total received MAC frames with length
556 * (including FCS, but not framing bits) of between 256 and 511
557 * octets inclusive. Includes frames received with frame-too-long,
558 * FCS, or length errors.
559 * @rx_ttl_512_1023_frms: Count of total received MAC frames with length
560 * (including FCS, but not framing bits) of between 512 and 1023
561 * octets inclusive. Includes frames received with frame-too-long,
562 * FCS, or length errors.
563 * @rx_ttl_1024_1518_frms: Count of total received MAC frames with length
564 * (including FCS, but not framing bits) of between 1024 and 1518
565 * octets inclusive. Includes frames received with frame-too-long,
566 * FCS, or length errors.
567 * @rx_ttl_1519_4095_frms: Count of total received MAC frames with length
568 * (including FCS, but not framing bits) of between 1519 and 4095
569 * octets inclusive. Includes frames received with frame-too-long,
570 * FCS, or length errors.
571 * @rx_ttl_4096_8191_frms: Count of total received MAC frames with length
572 * (including FCS, but not framing bits) of between 4096 and 8191
573 * octets inclusive. Includes frames received with frame-too-long,
574 * FCS, or length errors.
575 * @rx_ttl_8192_max_frms: Count of total received MAC frames with length
576 * (including FCS, but not framing bits) of between 8192 and
577 * RX_MAX_PYLD_LEN+18 octets inclusive. Includes frames received
578 * with frame-too-long, FCS, or length errors.
579 * @rx_ttl_gt_max_frms: Count of total received MAC frames with length
580 * (including FCS, but not framing bits) exceeding
581 * RX_MAX_PYLD_LEN+18 (+22 bytes if VLAN-tagged) octets inclusive.
582 * Includes frames received with frame-too-long,
583 * FCS, or length errors.
584 * @rx_ip: Count of received IP datagrams. Includes errored IP datagrams.
585 * @rx_accepted_ip: Count of received IP datagrams that
586 * are passed to the system.
587 * @rx_ip_octets: Count of number of octets in received IP datagrams. Includes
588 * errored IP datagrams.
589 * @rx_err_ip: Count of received IP datagrams containing errors. For example,
590 * bad IP checksum.
591 * @rx_icmp: Count of received ICMP messages. Includes errored ICMP messages.
592 * @rx_tcp: Count of received TCP segments. Includes errored TCP segments.
593 * Note: This stat contains a count of all received TCP segments,
594 * regardless of whether or not they pertain to an established
595 * connection.
596 * @rx_udp: Count of received UDP datagrams.
597 * @rx_err_tcp: Count of received TCP segments containing errors. For example,
598 * bad TCP checksum.
599 * @rx_pause_count: Count of number of pause quanta that the MAC has been in
600 * the paused state. Recall, one pause quantum equates to 512
601 * bit times.
602 * @rx_pause_ctrl_frms: Count of received MAC PAUSE control frames.
603 * @rx_unsup_ctrl_frms: Count of received MAC control frames that do not
604 * contain the PAUSE opcode. The sum of RX_PAUSE_CTRL_FRMS and
605 * this register is a count of all received MAC control frames.
606 * Note: This stat may be configured to count all layer 2 errors
607 * (i.e. length errors and FCS errors).
608 * @rx_fcs_err_frms: Count of received MAC frames that do not pass FCS. Does
609 * not include frames received with frame-too-long or
610 * frame-too-short error.
611 * @rx_in_rng_len_err_frms: Count of received frames with a length/type field
612 * value between 46 (42 for VLAN-tagged frames) and 1500 (also 1500
613 * for VLAN-tagged frames), inclusive, that does not match the
614 * number of data octets (including pad) received. Also contains
615 * a count of received frames with a length/type field less than
616 * 46 (42 for VLAN-tagged frames) and the number of data octets
617 * (including pad) received is greater than 46 (42 for VLAN-tagged
618 * frames).
619 * @rx_out_rng_len_err_frms: Count of received frames with length/type field
620 * between 1501 and 1535 decimal, inclusive.
621 * @rx_drop_frms: Count of received frames that could not be passed to the host.
622 * See PORTn_RX_L2_MGMT_DISCARD, PORTn_RX_RPA_DISCARD,
623 * PORTn_RX_TRASH_DISCARD, PORTn_RX_RTS_DISCARD, PORTn_RX_RED_DISCARD
624 * for a list of reasons. Because the RMAC drops one frame at a time,
625 * this stat also indicates the number of drop events.
626 * @rx_discarded_frms: Count of received frames containing
627 * any error that prevents
628 * them from being passed to the system. See PORTn_RX_FCS_DISCARD,
629 * PORTn_RX_LEN_DISCARD, and PORTn_RX_SWITCH_DISCARD for a list of
630 * reasons.
631 * @rx_drop_ip: Count of received IP datagrams that could not be passed to the
632 * host. See PORTn_RX_DROP_FRMS for a list of reasons.
633 * @rx_drop_udp: Count of received UDP datagrams that are not delivered to the
634 * host. See PORTn_RX_DROP_FRMS for a list of reasons.
635 * @rx_marker_pdu_frms: Count of valid Marker PDUs received on this Aggregation
636 * port.
637 * @rx_lacpdu_frms: Count of valid LACPDUs received on this Aggregation port.
638 * @rx_unknown_pdu_frms: Count of received frames (on this Aggregation port)
639 * that carry the Slow Protocols EtherType, but contain an unknown
640 * PDU. Or frames that contain the Slow Protocols group MAC address,
641 * but do not carry the Slow Protocols EtherType.
642 * @rx_marker_resp_pdu_frms: Count of valid Marker Response PDUs received on
643 * this Aggregation port.
644 * @rx_fcs_discard: Count of received frames that are discarded because the
645 * FCS check failed.
646 * @rx_illegal_pdu_frms: Count of received frames (on this Aggregation port)
647 * that carry the Slow Protocols EtherType, but contain a badly
648 * formed PDU. Or frames that carry the Slow Protocols EtherType,
649 * but contain an illegal value of Protocol Subtype.
650 * @rx_switch_discard: Count of received frames that are discarded by the
651 * internal switch because they did not have an entry in the
652 * Filtering Database. This includes frames that had an invalid
653 * destination MAC address or VLAN ID. It also includes frames are
654 * discarded because they did not satisfy the length requirements
655 * of the target VPATH.
656 * @rx_len_discard: Count of received frames that are discarded because of an
657 * invalid frame length (includes fragments, oversized frames and
658 * mismatch between frame length and length/type field). This stat
659 * can be configured
660 * (see XMAC_STATS_GLOBAL_CFG.LEN_DISCARD_HANDLING).
661 * @rx_rpa_discard: Count of received frames that were discarded because the
662 * receive protocol assist (RPA) discovered and error in the frame
663 * or was unable to parse the frame.
664 * @rx_l2_mgmt_discard: Count of Layer 2 management frames (eg. pause frames,
665 * Link Aggregation Control Protocol (LACP) frames, etc.) that are
666 * discarded.
667 * @rx_rts_discard: Count of received frames that are discarded by the receive
668 * traffic steering (RTS) logic. Includes those frame discarded
669 * because the SSC response contradicted the switch table, because
670 * the SSC timed out, or because the target queue could not fit the
671 * frame.
672 * @rx_trash_discard: Count of received frames that are discarded because
673 * receive traffic steering (RTS) steered the frame to the trash
674 * queue.
675 * @rx_buff_full_discard: Count of received frames that are discarded because
676 * internal buffers are full. Includes frames discarded because the
677 * RTS logic is waiting for an SSC lookup that has no timeout bound.
678 * Also, includes frames that are dropped because the MAC2FAU buffer
679 * is nearly full -- this can happen if the external receive buffer
680 * is full and the receive path is backing up.
681 * @rx_red_discard: Count of received frames that are discarded because of RED
682 * (Random Early Discard).
683 * @rx_xgmii_ctrl_err_cnt: Maintains a count of unexpected or misplaced control
684 * characters occurring between times of normal data transmission
685 * (i.e. not included in RX_XGMII_DATA_ERR_CNT). This counter is
686 * incremented when either -
687 * 1) The Reconciliation Sublayer (RS) is expecting one control
688 * character and gets another (i.e. is expecting a Start
689 * character, but gets another control character).
690 * 2) Start control character is not in lane 0
691 * Only increments the count by one for each XGMII column.
692 * @rx_xgmii_data_err_cnt: Maintains a count of unexpected control characters
693 * during normal data transmission. If the Reconciliation Sublayer
694 * (RS) receives a control character, other than a terminate control
695 * character, during receipt of data octets then this register is
696 * incremented. Also increments if the start frame delimiter is not
697 * found in the correct location. Only increments the count by one
698 * for each XGMII column.
699 * @rx_xgmii_char1_match: Maintains a count of the number of XGMII characters
700 * that match a pattern that is programmable through register
701 * XMAC_STATS_RX_XGMII_CHAR_PORTn. By default, the pattern is set
702 * to /E/ (i.e. the error character), thus the statistic tracks the
703 * number of Error characters received at any time.
704 * @rx_xgmii_err_sym: Count of the number of symbol errors in the received
705 * XGMII data (i.e. PHY indicates "Receive Error" on the XGMII).
706 * Only includes symbol errors that are observed between the XGMII
707 * Start Frame Delimiter and End Frame Delimiter, inclusive. And
708 * only increments the count by one for each frame.
709 * @rx_xgmii_column1_match: Maintains a count of the number of XGMII columns
710 * that match a pattern that is programmable through register
711 * XMAC_STATS_RX_XGMII_COLUMN1_PORTn. By default, the pattern is set
712 * to 4 x /E/ (i.e. a column containing all error characters), thus
713 * the statistic tracks the number of Error columns received at any
714 * time.
715 * @rx_xgmii_char2_match: Maintains a count of the number of XGMII characters
716 * that match a pattern that is programmable through register
717 * XMAC_STATS_RX_XGMII_CHAR_PORTn. By default, the pattern is set
718 * to /E/ (i.e. the error character), thus the statistic tracks the
719 * number of Error characters received at any time.
720 * @rx_local_fault: Maintains a count of the number of times that link
721 * transitioned from "up" to "down" due to a local fault.
722 * @rx_xgmii_column2_match: Maintains a count of the number of XGMII columns
723 * that match a pattern that is programmable through register
724 * XMAC_STATS_RX_XGMII_COLUMN2_PORTn. By default, the pattern is set
725 * to 4 x /E/ (i.e. a column containing all error characters), thus
726 * the statistic tracks the number of Error columns received at any
727 * time. If XMAC_STATS_RX_XGMII_BEHAV_COLUMN2_PORTn.NEAR_COL1 is set
728 * to 1, then this stat increments when COLUMN2 is found within 'n'
729 * clocks after COLUMN1. Here, 'n' is defined by
730 * XMAC_STATS_RX_XGMII_BEHAV_COLUMN2_PORTn.NUM_COL (if 'n' is set to
731 * 0, then it means to search anywhere for COLUMN2).
732 * @rx_jettison: Count of received frames that are jettisoned because internal
733 * buffers are full.
734 * @rx_remote_fault: Maintains a count of the number of times that link
735 * transitioned from "up" to "down" due to a remote fault.
736 *
737 * XMAC Port Statistics.
738 */
739struct vxge_hw_xmac_port_stats {
740/*0x000*/ u64 tx_ttl_frms;
741/*0x008*/ u64 tx_ttl_octets;
742/*0x010*/ u64 tx_data_octets;
743/*0x018*/ u64 tx_mcast_frms;
744/*0x020*/ u64 tx_bcast_frms;
745/*0x028*/ u64 tx_ucast_frms;
746/*0x030*/ u64 tx_tagged_frms;
747/*0x038*/ u64 tx_vld_ip;
748/*0x040*/ u64 tx_vld_ip_octets;
749/*0x048*/ u64 tx_icmp;
750/*0x050*/ u64 tx_tcp;
751/*0x058*/ u64 tx_rst_tcp;
752/*0x060*/ u64 tx_udp;
753/*0x068*/ u32 tx_parse_error;
754/*0x06c*/ u32 tx_unknown_protocol;
755/*0x070*/ u64 tx_pause_ctrl_frms;
756/*0x078*/ u32 tx_marker_pdu_frms;
757/*0x07c*/ u32 tx_lacpdu_frms;
758/*0x080*/ u32 tx_drop_ip;
759/*0x084*/ u32 tx_marker_resp_pdu_frms;
760/*0x088*/ u32 tx_xgmii_char2_match;
761/*0x08c*/ u32 tx_xgmii_char1_match;
762/*0x090*/ u32 tx_xgmii_column2_match;
763/*0x094*/ u32 tx_xgmii_column1_match;
764/*0x098*/ u32 unused1;
765/*0x09c*/ u16 tx_any_err_frms;
766/*0x09e*/ u16 tx_drop_frms;
767/*0x0a0*/ u64 rx_ttl_frms;
768/*0x0a8*/ u64 rx_vld_frms;
769/*0x0b0*/ u64 rx_offload_frms;
770/*0x0b8*/ u64 rx_ttl_octets;
771/*0x0c0*/ u64 rx_data_octets;
772/*0x0c8*/ u64 rx_offload_octets;
773/*0x0d0*/ u64 rx_vld_mcast_frms;
774/*0x0d8*/ u64 rx_vld_bcast_frms;
775/*0x0e0*/ u64 rx_accepted_ucast_frms;
776/*0x0e8*/ u64 rx_accepted_nucast_frms;
777/*0x0f0*/ u64 rx_tagged_frms;
778/*0x0f8*/ u64 rx_long_frms;
779/*0x100*/ u64 rx_usized_frms;
780/*0x108*/ u64 rx_osized_frms;
781/*0x110*/ u64 rx_frag_frms;
782/*0x118*/ u64 rx_jabber_frms;
783/*0x120*/ u64 rx_ttl_64_frms;
784/*0x128*/ u64 rx_ttl_65_127_frms;
785/*0x130*/ u64 rx_ttl_128_255_frms;
786/*0x138*/ u64 rx_ttl_256_511_frms;
787/*0x140*/ u64 rx_ttl_512_1023_frms;
788/*0x148*/ u64 rx_ttl_1024_1518_frms;
789/*0x150*/ u64 rx_ttl_1519_4095_frms;
790/*0x158*/ u64 rx_ttl_4096_8191_frms;
791/*0x160*/ u64 rx_ttl_8192_max_frms;
792/*0x168*/ u64 rx_ttl_gt_max_frms;
793/*0x170*/ u64 rx_ip;
794/*0x178*/ u64 rx_accepted_ip;
795/*0x180*/ u64 rx_ip_octets;
796/*0x188*/ u64 rx_err_ip;
797/*0x190*/ u64 rx_icmp;
798/*0x198*/ u64 rx_tcp;
799/*0x1a0*/ u64 rx_udp;
800/*0x1a8*/ u64 rx_err_tcp;
801/*0x1b0*/ u64 rx_pause_count;
802/*0x1b8*/ u64 rx_pause_ctrl_frms;
803/*0x1c0*/ u64 rx_unsup_ctrl_frms;
804/*0x1c8*/ u64 rx_fcs_err_frms;
805/*0x1d0*/ u64 rx_in_rng_len_err_frms;
806/*0x1d8*/ u64 rx_out_rng_len_err_frms;
807/*0x1e0*/ u64 rx_drop_frms;
808/*0x1e8*/ u64 rx_discarded_frms;
809/*0x1f0*/ u64 rx_drop_ip;
810/*0x1f8*/ u64 rx_drop_udp;
811/*0x200*/ u32 rx_marker_pdu_frms;
812/*0x204*/ u32 rx_lacpdu_frms;
813/*0x208*/ u32 rx_unknown_pdu_frms;
814/*0x20c*/ u32 rx_marker_resp_pdu_frms;
815/*0x210*/ u32 rx_fcs_discard;
816/*0x214*/ u32 rx_illegal_pdu_frms;
817/*0x218*/ u32 rx_switch_discard;
818/*0x21c*/ u32 rx_len_discard;
819/*0x220*/ u32 rx_rpa_discard;
820/*0x224*/ u32 rx_l2_mgmt_discard;
821/*0x228*/ u32 rx_rts_discard;
822/*0x22c*/ u32 rx_trash_discard;
823/*0x230*/ u32 rx_buff_full_discard;
824/*0x234*/ u32 rx_red_discard;
825/*0x238*/ u32 rx_xgmii_ctrl_err_cnt;
826/*0x23c*/ u32 rx_xgmii_data_err_cnt;
827/*0x240*/ u32 rx_xgmii_char1_match;
828/*0x244*/ u32 rx_xgmii_err_sym;
829/*0x248*/ u32 rx_xgmii_column1_match;
830/*0x24c*/ u32 rx_xgmii_char2_match;
831/*0x250*/ u32 rx_local_fault;
832/*0x254*/ u32 rx_xgmii_column2_match;
833/*0x258*/ u32 rx_jettison;
834/*0x25c*/ u32 rx_remote_fault;
835} __packed;
836
837/**
838 * struct vxge_hw_xmac_vpath_tx_stats - XMAC Vpath Tx Statistics
839 *
840 * @tx_ttl_eth_frms: Count of successfully transmitted MAC frames.
841 * @tx_ttl_eth_octets: Count of total octets of transmitted frames,
842 * not including framing characters (i.e. less framing bits).
843 * To determine the total octets of transmitted frames, including
844 * framing characters, multiply TX_TTL_ETH_FRMS by 8 and add it to
845 * this stat (the device always prepends 8 bytes of preamble for
846 * each frame)
847 * @tx_data_octets: Count of data and padding octets of successfully transmitted
848 * frames.
849 * @tx_mcast_frms: Count of successfully transmitted frames to a group address
850 * other than the broadcast address.
851 * @tx_bcast_frms: Count of successfully transmitted frames to the broadcast
852 * group address.
853 * @tx_ucast_frms: Count of transmitted frames containing a unicast address.
854 * Includes discarded frames that are not sent to the network.
855 * @tx_tagged_frms: Count of transmitted frames containing a VLAN tag.
856 * @tx_vld_ip: Count of transmitted IP datagrams that are passed to the network.
857 * @tx_vld_ip_octets: Count of total octets of transmitted IP datagrams that
858 * are passed to the network.
859 * @tx_icmp: Count of transmitted ICMP messages. Includes messages not sent due
860 * to problems within ICMP.
861 * @tx_tcp: Count of transmitted TCP segments. Does not include segments
862 * containing retransmitted octets.
863 * @tx_rst_tcp: Count of transmitted TCP segments containing the RST flag.
864 * @tx_udp: Count of transmitted UDP datagrams.
865 * @tx_unknown_protocol: Increments when the TPA encounters an unknown protocol,
866 * such as a new IPv6 extension header, or an unsupported Routing
867 * Type. The packet still has a checksum calculated but it may be
868 * incorrect.
869 * @tx_lost_ip: Count of transmitted IP datagrams that could not be passed
870 * to the network. Increments because of: 1) An internal processing
871 * error (such as an uncorrectable ECC error). 2) A frame parsing
872 * error during IP checksum calculation.
873 * @tx_parse_error: Increments when the TPA is unable to parse a packet. This
874 * generally occurs when a packet is corrupt somehow, including
875 * packets that have IP version mismatches, invalid Layer 2 control
876 * fields, etc. L3/L4 checksums are not offloaded, but the packet
877 * is still be transmitted.
878 * @tx_tcp_offload: For frames belonging to offloaded sessions only, a count
879 * of transmitted TCP segments. Does not include segments containing
880 * retransmitted octets.
881 * @tx_retx_tcp_offload: For frames belonging to offloaded sessions only, the
882 * total number of segments retransmitted. Retransmitted segments
883 * that are sourced by the host are counted by the host.
884 * @tx_lost_ip_offload: For frames belonging to offloaded sessions only, a count
885 * of transmitted IP datagrams that could not be passed to the
886 * network.
887 *
888 * XMAC Vpath TX Statistics.
889 */
890struct vxge_hw_xmac_vpath_tx_stats {
891 u64 tx_ttl_eth_frms;
892 u64 tx_ttl_eth_octets;
893 u64 tx_data_octets;
894 u64 tx_mcast_frms;
895 u64 tx_bcast_frms;
896 u64 tx_ucast_frms;
897 u64 tx_tagged_frms;
898 u64 tx_vld_ip;
899 u64 tx_vld_ip_octets;
900 u64 tx_icmp;
901 u64 tx_tcp;
902 u64 tx_rst_tcp;
903 u64 tx_udp;
904 u32 tx_unknown_protocol;
905 u32 tx_lost_ip;
906 u32 unused1;
907 u32 tx_parse_error;
908 u64 tx_tcp_offload;
909 u64 tx_retx_tcp_offload;
910 u64 tx_lost_ip_offload;
911} __packed;
912
913/**
914 * struct vxge_hw_xmac_vpath_rx_stats - XMAC Vpath RX Statistics
915 *
916 * @rx_ttl_eth_frms: Count of successfully received MAC frames.
917 * @rx_vld_frms: Count of successfully received MAC frames. Does not include
918 * frames received with frame-too-long, FCS, or length errors.
919 * @rx_offload_frms: Count of offloaded received frames that are passed to
920 * the host.
921 * @rx_ttl_eth_octets: Count of total octets of received frames, not including
922 * framing characters (i.e. less framing bits). Only counts octets
923 * of frames that are at least 14 bytes (18 bytes for VLAN-tagged)
924 * before FCS. To determine the total octets of received frames,
925 * including framing characters, multiply RX_TTL_ETH_FRMS by 8 and
926 * add it to this stat (the stat RX_TTL_ETH_FRMS only counts frames
927 * that have the required 8 bytes of preamble).
928 * @rx_data_octets: Count of data and padding octets of successfully received
929 * frames. Does not include frames received with frame-too-long,
930 * FCS, or length errors.
931 * @rx_offload_octets: Count of total octets, not including framing characters,
932 * of offloaded received frames that are passed to the host.
933 * @rx_vld_mcast_frms: Count of successfully received MAC frames containing a
934 * nonbroadcast group address. Does not include frames received with
935 * frame-too-long, FCS, or length errors.
936 * @rx_vld_bcast_frms: Count of successfully received MAC frames containing the
937 * broadcast group address. Does not include frames received with
938 * frame-too-long, FCS, or length errors.
939 * @rx_accepted_ucast_frms: Count of successfully received frames containing
940 * a unicast address. Only includes frames that are passed to the
941 * system.
942 * @rx_accepted_nucast_frms: Count of successfully received frames containing
943 * a non-unicast (broadcast or multicast) address. Only includes
944 * frames that are passed to the system. Could include, for instance,
945 * non-unicast frames that contain FCS errors if the MAC_ERROR_CFG
946 * register is set to pass FCS-errored frames to the host.
947 * @rx_tagged_frms: Count of received frames containing a VLAN tag.
948 * @rx_long_frms: Count of received frames that are longer than RX_MAX_PYLD_LEN
949 * + 18 bytes (+ 22 bytes if VLAN-tagged).
950 * @rx_usized_frms: Count of received frames of length (including FCS, but not
951 * framing bits) less than 64 octets, that are otherwise well-formed.
952 * In other words, counts runts.
953 * @rx_osized_frms: Count of received frames of length (including FCS, but not
954 * framing bits) more than 1518 octets, that are otherwise
955 * well-formed.
956 * @rx_frag_frms: Count of received frames of length (including FCS, but not
957 * framing bits) less than 64 octets that had bad FCS.
958 * In other words, counts fragments.
959 * @rx_jabber_frms: Count of received frames of length (including FCS, but not
960 * framing bits) more than 1518 octets that had bad FCS. In other
961 * words, counts jabbers.
962 * @rx_ttl_64_frms: Count of total received MAC frames with length (including
963 * FCS, but not framing bits) of exactly 64 octets. Includes frames
964 * received with frame-too-long, FCS, or length errors.
965 * @rx_ttl_65_127_frms: Count of total received MAC frames
966 * with length (including
967 * FCS, but not framing bits) of between 65 and 127 octets inclusive.
968 * Includes frames received with frame-too-long, FCS,
969 * or length errors.
970 * @rx_ttl_128_255_frms: Count of total received MAC frames with length
971 * (including FCS, but not framing bits)
972 * of between 128 and 255 octets
973 * inclusive. Includes frames received with frame-too-long, FCS,
974 * or length errors.
975 * @rx_ttl_256_511_frms: Count of total received MAC frames with length
976 * (including FCS, but not framing bits)
977 * of between 256 and 511 octets
978 * inclusive. Includes frames received with frame-too-long, FCS, or
979 * length errors.
980 * @rx_ttl_512_1023_frms: Count of total received MAC frames with length
981 * (including FCS, but not framing bits) of between 512 and 1023
982 * octets inclusive. Includes frames received with frame-too-long,
983 * FCS, or length errors.
984 * @rx_ttl_1024_1518_frms: Count of total received MAC frames with length
985 * (including FCS, but not framing bits) of between 1024 and 1518
986 * octets inclusive. Includes frames received with frame-too-long,
987 * FCS, or length errors.
988 * @rx_ttl_1519_4095_frms: Count of total received MAC frames with length
989 * (including FCS, but not framing bits) of between 1519 and 4095
990 * octets inclusive. Includes frames received with frame-too-long,
991 * FCS, or length errors.
992 * @rx_ttl_4096_8191_frms: Count of total received MAC frames with length
993 * (including FCS, but not framing bits) of between 4096 and 8191
994 * octets inclusive. Includes frames received with frame-too-long,
995 * FCS, or length errors.
996 * @rx_ttl_8192_max_frms: Count of total received MAC frames with length
997 * (including FCS, but not framing bits) of between 8192 and
998 * RX_MAX_PYLD_LEN+18 octets inclusive. Includes frames received
999 * with frame-too-long, FCS, or length errors.
1000 * @rx_ttl_gt_max_frms: Count of total received MAC frames with length
1001 * (including FCS, but not framing bits) exceeding RX_MAX_PYLD_LEN+18
1002 * (+22 bytes if VLAN-tagged) octets inclusive. Includes frames
1003 * received with frame-too-long, FCS, or length errors.
1004 * @rx_ip: Count of received IP datagrams. Includes errored IP datagrams.
1005 * @rx_accepted_ip: Count of received IP datagrams that
1006 * are passed to the system.
1007 * @rx_ip_octets: Count of number of octets in received IP datagrams.
1008 * Includes errored IP datagrams.
1009 * @rx_err_ip: Count of received IP datagrams containing errors. For example,
1010 * bad IP checksum.
1011 * @rx_icmp: Count of received ICMP messages. Includes errored ICMP messages.
1012 * @rx_tcp: Count of received TCP segments. Includes errored TCP segments.
1013 * Note: This stat contains a count of all received TCP segments,
1014 * regardless of whether or not they pertain to an established
1015 * connection.
1016 * @rx_udp: Count of received UDP datagrams.
1017 * @rx_err_tcp: Count of received TCP segments containing errors. For example,
1018 * bad TCP checksum.
1019 * @rx_lost_frms: Count of received frames that could not be passed to the host.
1020 * See RX_QUEUE_FULL_DISCARD and RX_RED_DISCARD
1021 * for a list of reasons.
1022 * @rx_lost_ip: Count of received IP datagrams that could not be passed to
1023 * the host. See RX_LOST_FRMS for a list of reasons.
1024 * @rx_lost_ip_offload: For frames belonging to offloaded sessions only, a count
1025 * of received IP datagrams that could not be passed to the host.
1026 * See RX_LOST_FRMS for a list of reasons.
1027 * @rx_various_discard: Count of received frames that are discarded because
1028 * the target receive queue is full.
1029 * @rx_sleep_discard: Count of received frames that are discarded because the
1030 * target VPATH is asleep (a Wake-on-LAN magic packet can be used
1031 * to awaken the VPATH).
1032 * @rx_red_discard: Count of received frames that are discarded because of RED
1033 * (Random Early Discard).
1034 * @rx_queue_full_discard: Count of received frames that are discarded because
1035 * the target receive queue is full.
1036 * @rx_mpa_ok_frms: Count of received frames that pass the MPA checks.
1037 *
1038 * XMAC Vpath RX Statistics.
1039 */
1040struct vxge_hw_xmac_vpath_rx_stats {
1041 u64 rx_ttl_eth_frms;
1042 u64 rx_vld_frms;
1043 u64 rx_offload_frms;
1044 u64 rx_ttl_eth_octets;
1045 u64 rx_data_octets;
1046 u64 rx_offload_octets;
1047 u64 rx_vld_mcast_frms;
1048 u64 rx_vld_bcast_frms;
1049 u64 rx_accepted_ucast_frms;
1050 u64 rx_accepted_nucast_frms;
1051 u64 rx_tagged_frms;
1052 u64 rx_long_frms;
1053 u64 rx_usized_frms;
1054 u64 rx_osized_frms;
1055 u64 rx_frag_frms;
1056 u64 rx_jabber_frms;
1057 u64 rx_ttl_64_frms;
1058 u64 rx_ttl_65_127_frms;
1059 u64 rx_ttl_128_255_frms;
1060 u64 rx_ttl_256_511_frms;
1061 u64 rx_ttl_512_1023_frms;
1062 u64 rx_ttl_1024_1518_frms;
1063 u64 rx_ttl_1519_4095_frms;
1064 u64 rx_ttl_4096_8191_frms;
1065 u64 rx_ttl_8192_max_frms;
1066 u64 rx_ttl_gt_max_frms;
1067 u64 rx_ip;
1068 u64 rx_accepted_ip;
1069 u64 rx_ip_octets;
1070 u64 rx_err_ip;
1071 u64 rx_icmp;
1072 u64 rx_tcp;
1073 u64 rx_udp;
1074 u64 rx_err_tcp;
1075 u64 rx_lost_frms;
1076 u64 rx_lost_ip;
1077 u64 rx_lost_ip_offload;
1078 u16 rx_various_discard;
1079 u16 rx_sleep_discard;
1080 u16 rx_red_discard;
1081 u16 rx_queue_full_discard;
1082 u64 rx_mpa_ok_frms;
1083} __packed;
1084
1085/**
1086 * struct vxge_hw_xmac_stats - XMAC Statistics
1087 *
1088 * @aggr_stats: Statistics on aggregate port(port 0, port 1)
1089 * @port_stats: Staticstics on ports(wire 0, wire 1, lag)
1090 * @vpath_tx_stats: Per vpath XMAC TX stats
1091 * @vpath_rx_stats: Per vpath XMAC RX stats
1092 *
1093 * XMAC Statistics.
1094 */
1095struct vxge_hw_xmac_stats {
1096 struct vxge_hw_xmac_aggr_stats
1097 aggr_stats[VXGE_HW_MAC_MAX_MAC_PORT_ID];
1098 struct vxge_hw_xmac_port_stats
1099 port_stats[VXGE_HW_MAC_MAX_MAC_PORT_ID+1];
1100 struct vxge_hw_xmac_vpath_tx_stats
1101 vpath_tx_stats[VXGE_HW_MAX_VIRTUAL_PATHS];
1102 struct vxge_hw_xmac_vpath_rx_stats
1103 vpath_rx_stats[VXGE_HW_MAX_VIRTUAL_PATHS];
1104};
1105
1106/**
1107 * struct vxge_hw_vpath_stats_hw_info - Titan vpath hardware statistics.
1108 * @ini_num_mwr_sent: The number of PCI memory writes initiated by the PIC block
1109 * for the given VPATH
1110 * @ini_num_mrd_sent: The number of PCI memory reads initiated by the PIC block
1111 * @ini_num_cpl_rcvd: The number of PCI read completions received by the
1112 * PIC block
1113 * @ini_num_mwr_byte_sent: The number of PCI memory write bytes sent by the PIC
1114 * block to the host
1115 * @ini_num_cpl_byte_rcvd: The number of PCI read completion bytes received by
1116 * the PIC block
1117 * @wrcrdtarb_xoff: TBD
1118 * @rdcrdtarb_xoff: TBD
1119 * @vpath_genstats_count0: TBD
1120 * @vpath_genstats_count1: TBD
1121 * @vpath_genstats_count2: TBD
1122 * @vpath_genstats_count3: TBD
1123 * @vpath_genstats_count4: TBD
1124 * @vpath_gennstats_count5: TBD
1125 * @tx_stats: Transmit stats
1126 * @rx_stats: Receive stats
1127 * @prog_event_vnum1: Programmable statistic. Increments when internal logic
1128 * detects a certain event. See register
1129 * XMAC_STATS_CFG.EVENT_VNUM1_CFG for more information.
1130 * @prog_event_vnum0: Programmable statistic. Increments when internal logic
1131 * detects a certain event. See register
1132 * XMAC_STATS_CFG.EVENT_VNUM0_CFG for more information.
1133 * @prog_event_vnum3: Programmable statistic. Increments when internal logic
1134 * detects a certain event. See register
1135 * XMAC_STATS_CFG.EVENT_VNUM3_CFG for more information.
1136 * @prog_event_vnum2: Programmable statistic. Increments when internal logic
1137 * detects a certain event. See register
1138 * XMAC_STATS_CFG.EVENT_VNUM2_CFG for more information.
1139 * @rx_multi_cast_frame_discard: TBD
1140 * @rx_frm_transferred: TBD
1141 * @rxd_returned: TBD
1142 * @rx_mpa_len_fail_frms: Count of received frames
1143 * that fail the MPA length check
1144 * @rx_mpa_mrk_fail_frms: Count of received frames
1145 * that fail the MPA marker check
1146 * @rx_mpa_crc_fail_frms: Count of received frames that fail the MPA CRC check
1147 * @rx_permitted_frms: Count of frames that pass through the FAU and on to the
1148 * frame buffer (and subsequently to the host).
1149 * @rx_vp_reset_discarded_frms: Count of receive frames that are discarded
1150 * because the VPATH is in reset
1151 * @rx_wol_frms: Count of received "magic packet" frames. Stat increments
1152 * whenever the received frame matches the VPATH's Wake-on-LAN
1153 * signature(s) CRC.
1154 * @tx_vp_reset_discarded_frms: Count of transmit frames that are discarded
1155 * because the VPATH is in reset. Includes frames that are discarded
1156 * because the current VPIN does not match that VPIN of the frame
1157 *
1158 * Titan vpath hardware statistics.
1159 */
1160struct vxge_hw_vpath_stats_hw_info {
1161/*0x000*/ u32 ini_num_mwr_sent;
1162/*0x004*/ u32 unused1;
1163/*0x008*/ u32 ini_num_mrd_sent;
1164/*0x00c*/ u32 unused2;
1165/*0x010*/ u32 ini_num_cpl_rcvd;
1166/*0x014*/ u32 unused3;
1167/*0x018*/ u64 ini_num_mwr_byte_sent;
1168/*0x020*/ u64 ini_num_cpl_byte_rcvd;
1169/*0x028*/ u32 wrcrdtarb_xoff;
1170/*0x02c*/ u32 unused4;
1171/*0x030*/ u32 rdcrdtarb_xoff;
1172/*0x034*/ u32 unused5;
1173/*0x038*/ u32 vpath_genstats_count0;
1174/*0x03c*/ u32 vpath_genstats_count1;
1175/*0x040*/ u32 vpath_genstats_count2;
1176/*0x044*/ u32 vpath_genstats_count3;
1177/*0x048*/ u32 vpath_genstats_count4;
1178/*0x04c*/ u32 unused6;
1179/*0x050*/ u32 vpath_genstats_count5;
1180/*0x054*/ u32 unused7;
1181/*0x058*/ struct vxge_hw_xmac_vpath_tx_stats tx_stats;
1182/*0x0e8*/ struct vxge_hw_xmac_vpath_rx_stats rx_stats;
1183/*0x220*/ u64 unused9;
1184/*0x228*/ u32 prog_event_vnum1;
1185/*0x22c*/ u32 prog_event_vnum0;
1186/*0x230*/ u32 prog_event_vnum3;
1187/*0x234*/ u32 prog_event_vnum2;
1188/*0x238*/ u16 rx_multi_cast_frame_discard;
1189/*0x23a*/ u8 unused10[6];
1190/*0x240*/ u32 rx_frm_transferred;
1191/*0x244*/ u32 unused11;
1192/*0x248*/ u16 rxd_returned;
1193/*0x24a*/ u8 unused12[6];
1194/*0x252*/ u16 rx_mpa_len_fail_frms;
1195/*0x254*/ u16 rx_mpa_mrk_fail_frms;
1196/*0x256*/ u16 rx_mpa_crc_fail_frms;
1197/*0x258*/ u16 rx_permitted_frms;
1198/*0x25c*/ u64 rx_vp_reset_discarded_frms;
1199/*0x25e*/ u64 rx_wol_frms;
1200/*0x260*/ u64 tx_vp_reset_discarded_frms;
1201} __packed;
1202
1203
1204/**
1205 * struct vxge_hw_device_stats_mrpcim_info - Titan mrpcim hardware statistics.
1206 * @pic.ini_rd_drop 0x0000 4 Number of DMA reads initiated
1207 * by the adapter that were discarded because the VPATH is out of service
1208 * @pic.ini_wr_drop 0x0004 4 Number of DMA writes initiated by the
1209 * adapter that were discared because the VPATH is out of service
1210 * @pic.wrcrdtarb_ph_crdt_depleted[vplane0] 0x0008 4 Number of times
1211 * the posted header credits for upstream PCI writes were depleted
1212 * @pic.wrcrdtarb_ph_crdt_depleted[vplane1] 0x0010 4 Number of times
1213 * the posted header credits for upstream PCI writes were depleted
1214 * @pic.wrcrdtarb_ph_crdt_depleted[vplane2] 0x0018 4 Number of times
1215 * the posted header credits for upstream PCI writes were depleted
1216 * @pic.wrcrdtarb_ph_crdt_depleted[vplane3] 0x0020 4 Number of times
1217 * the posted header credits for upstream PCI writes were depleted
1218 * @pic.wrcrdtarb_ph_crdt_depleted[vplane4] 0x0028 4 Number of times
1219 * the posted header credits for upstream PCI writes were depleted
1220 * @pic.wrcrdtarb_ph_crdt_depleted[vplane5] 0x0030 4 Number of times
1221 * the posted header credits for upstream PCI writes were depleted
1222 * @pic.wrcrdtarb_ph_crdt_depleted[vplane6] 0x0038 4 Number of times
1223 * the posted header credits for upstream PCI writes were depleted
1224 * @pic.wrcrdtarb_ph_crdt_depleted[vplane7] 0x0040 4 Number of times
1225 * the posted header credits for upstream PCI writes were depleted
1226 * @pic.wrcrdtarb_ph_crdt_depleted[vplane8] 0x0048 4 Number of times
1227 * the posted header credits for upstream PCI writes were depleted
1228 * @pic.wrcrdtarb_ph_crdt_depleted[vplane9] 0x0050 4 Number of times
1229 * the posted header credits for upstream PCI writes were depleted
1230 * @pic.wrcrdtarb_ph_crdt_depleted[vplane10] 0x0058 4 Number of times
1231 * the posted header credits for upstream PCI writes were depleted
1232 * @pic.wrcrdtarb_ph_crdt_depleted[vplane11] 0x0060 4 Number of times
1233 * the posted header credits for upstream PCI writes were depleted
1234 * @pic.wrcrdtarb_ph_crdt_depleted[vplane12] 0x0068 4 Number of times
1235 * the posted header credits for upstream PCI writes were depleted
1236 * @pic.wrcrdtarb_ph_crdt_depleted[vplane13] 0x0070 4 Number of times
1237 * the posted header credits for upstream PCI writes were depleted
1238 * @pic.wrcrdtarb_ph_crdt_depleted[vplane14] 0x0078 4 Number of times
1239 * the posted header credits for upstream PCI writes were depleted
1240 * @pic.wrcrdtarb_ph_crdt_depleted[vplane15] 0x0080 4 Number of times
1241 * the posted header credits for upstream PCI writes were depleted
1242 * @pic.wrcrdtarb_ph_crdt_depleted[vplane16] 0x0088 4 Number of times
1243 * the posted header credits for upstream PCI writes were depleted
1244 * @pic.wrcrdtarb_pd_crdt_depleted[vplane0] 0x0090 4 Number of times
1245 * the posted data credits for upstream PCI writes were depleted
1246 * @pic.wrcrdtarb_pd_crdt_depleted[vplane1] 0x0098 4 Number of times
1247 * the posted data credits for upstream PCI writes were depleted
1248 * @pic.wrcrdtarb_pd_crdt_depleted[vplane2] 0x00a0 4 Number of times
1249 * the posted data credits for upstream PCI writes were depleted
1250 * @pic.wrcrdtarb_pd_crdt_depleted[vplane3] 0x00a8 4 Number of times
1251 * the posted data credits for upstream PCI writes were depleted
1252 * @pic.wrcrdtarb_pd_crdt_depleted[vplane4] 0x00b0 4 Number of times
1253 * the posted data credits for upstream PCI writes were depleted
1254 * @pic.wrcrdtarb_pd_crdt_depleted[vplane5] 0x00b8 4 Number of times
1255 * the posted data credits for upstream PCI writes were depleted
1256 * @pic.wrcrdtarb_pd_crdt_depleted[vplane6] 0x00c0 4 Number of times
1257 * the posted data credits for upstream PCI writes were depleted
1258 * @pic.wrcrdtarb_pd_crdt_depleted[vplane7] 0x00c8 4 Number of times
1259 * the posted data credits for upstream PCI writes were depleted
1260 * @pic.wrcrdtarb_pd_crdt_depleted[vplane8] 0x00d0 4 Number of times
1261 * the posted data credits for upstream PCI writes were depleted
1262 * @pic.wrcrdtarb_pd_crdt_depleted[vplane9] 0x00d8 4 Number of times
1263 * the posted data credits for upstream PCI writes were depleted
1264 * @pic.wrcrdtarb_pd_crdt_depleted[vplane10] 0x00e0 4 Number of times
1265 * the posted data credits for upstream PCI writes were depleted
1266 * @pic.wrcrdtarb_pd_crdt_depleted[vplane11] 0x00e8 4 Number of times
1267 * the posted data credits for upstream PCI writes were depleted
1268 * @pic.wrcrdtarb_pd_crdt_depleted[vplane12] 0x00f0 4 Number of times
1269 * the posted data credits for upstream PCI writes were depleted
1270 * @pic.wrcrdtarb_pd_crdt_depleted[vplane13] 0x00f8 4 Number of times
1271 * the posted data credits for upstream PCI writes were depleted
1272 * @pic.wrcrdtarb_pd_crdt_depleted[vplane14] 0x0100 4 Number of times
1273 * the posted data credits for upstream PCI writes were depleted
1274 * @pic.wrcrdtarb_pd_crdt_depleted[vplane15] 0x0108 4 Number of times
1275 * the posted data credits for upstream PCI writes were depleted
1276 * @pic.wrcrdtarb_pd_crdt_depleted[vplane16] 0x0110 4 Number of times
1277 * the posted data credits for upstream PCI writes were depleted
1278 * @pic.rdcrdtarb_nph_crdt_depleted[vplane0] 0x0118 4 Number of times
1279 * the non-posted header credits for upstream PCI reads were depleted
1280 * @pic.rdcrdtarb_nph_crdt_depleted[vplane1] 0x0120 4 Number of times
1281 * the non-posted header credits for upstream PCI reads were depleted
1282 * @pic.rdcrdtarb_nph_crdt_depleted[vplane2] 0x0128 4 Number of times
1283 * the non-posted header credits for upstream PCI reads were depleted
1284 * @pic.rdcrdtarb_nph_crdt_depleted[vplane3] 0x0130 4 Number of times
1285 * the non-posted header credits for upstream PCI reads were depleted
1286 * @pic.rdcrdtarb_nph_crdt_depleted[vplane4] 0x0138 4 Number of times
1287 * the non-posted header credits for upstream PCI reads were depleted
1288 * @pic.rdcrdtarb_nph_crdt_depleted[vplane5] 0x0140 4 Number of times
1289 * the non-posted header credits for upstream PCI reads were depleted
1290 * @pic.rdcrdtarb_nph_crdt_depleted[vplane6] 0x0148 4 Number of times
1291 * the non-posted header credits for upstream PCI reads were depleted
1292 * @pic.rdcrdtarb_nph_crdt_depleted[vplane7] 0x0150 4 Number of times
1293 * the non-posted header credits for upstream PCI reads were depleted
1294 * @pic.rdcrdtarb_nph_crdt_depleted[vplane8] 0x0158 4 Number of times
1295 * the non-posted header credits for upstream PCI reads were depleted
1296 * @pic.rdcrdtarb_nph_crdt_depleted[vplane9] 0x0160 4 Number of times
1297 * the non-posted header credits for upstream PCI reads were depleted
1298 * @pic.rdcrdtarb_nph_crdt_depleted[vplane10] 0x0168 4 Number of times
1299 * the non-posted header credits for upstream PCI reads were depleted
1300 * @pic.rdcrdtarb_nph_crdt_depleted[vplane11] 0x0170 4 Number of times
1301 * the non-posted header credits for upstream PCI reads were depleted
1302 * @pic.rdcrdtarb_nph_crdt_depleted[vplane12] 0x0178 4 Number of times
1303 * the non-posted header credits for upstream PCI reads were depleted
1304 * @pic.rdcrdtarb_nph_crdt_depleted[vplane13] 0x0180 4 Number of times
1305 * the non-posted header credits for upstream PCI reads were depleted
1306 * @pic.rdcrdtarb_nph_crdt_depleted[vplane14] 0x0188 4 Number of times
1307 * the non-posted header credits for upstream PCI reads were depleted
1308 * @pic.rdcrdtarb_nph_crdt_depleted[vplane15] 0x0190 4 Number of times
1309 * the non-posted header credits for upstream PCI reads were depleted
1310 * @pic.rdcrdtarb_nph_crdt_depleted[vplane16] 0x0198 4 Number of times
1311 * the non-posted header credits for upstream PCI reads were depleted
1312 * @pic.ini_rd_vpin_drop 0x01a0 4 Number of DMA reads initiated by
1313 * the adapter that were discarded because the VPATH instance number does
1314 * not match
1315 * @pic.ini_wr_vpin_drop 0x01a4 4 Number of DMA writes initiated
1316 * by the adapter that were discarded because the VPATH instance number
1317 * does not match
1318 * @pic.genstats_count0 0x01a8 4 Configurable statistic #1. Refer
1319 * to the GENSTATS0_CFG for information on configuring this statistic
1320 * @pic.genstats_count1 0x01ac 4 Configurable statistic #2. Refer
1321 * to the GENSTATS1_CFG for information on configuring this statistic
1322 * @pic.genstats_count2 0x01b0 4 Configurable statistic #3. Refer
1323 * to the GENSTATS2_CFG for information on configuring this statistic
1324 * @pic.genstats_count3 0x01b4 4 Configurable statistic #4. Refer
1325 * to the GENSTATS3_CFG for information on configuring this statistic
1326 * @pic.genstats_count4 0x01b8 4 Configurable statistic #5. Refer
1327 * to the GENSTATS4_CFG for information on configuring this statistic
1328 * @pic.genstats_count5 0x01c0 4 Configurable statistic #6. Refer
1329 * to the GENSTATS5_CFG for information on configuring this statistic
1330 * @pci.rstdrop_cpl 0x01c8 4
1331 * @pci.rstdrop_msg 0x01cc 4
1332 * @pci.rstdrop_client1 0x01d0 4
1333 * @pci.rstdrop_client0 0x01d4 4
1334 * @pci.rstdrop_client2 0x01d8 4
1335 * @pci.depl_cplh[vplane0] 0x01e2 2 Number of times completion
1336 * header credits were depleted
1337 * @pci.depl_nph[vplane0] 0x01e4 2 Number of times non posted
1338 * header credits were depleted
1339 * @pci.depl_ph[vplane0] 0x01e6 2 Number of times the posted
1340 * header credits were depleted
1341 * @pci.depl_cplh[vplane1] 0x01ea 2
1342 * @pci.depl_nph[vplane1] 0x01ec 2
1343 * @pci.depl_ph[vplane1] 0x01ee 2
1344 * @pci.depl_cplh[vplane2] 0x01f2 2
1345 * @pci.depl_nph[vplane2] 0x01f4 2
1346 * @pci.depl_ph[vplane2] 0x01f6 2
1347 * @pci.depl_cplh[vplane3] 0x01fa 2
1348 * @pci.depl_nph[vplane3] 0x01fc 2
1349 * @pci.depl_ph[vplane3] 0x01fe 2
1350 * @pci.depl_cplh[vplane4] 0x0202 2
1351 * @pci.depl_nph[vplane4] 0x0204 2
1352 * @pci.depl_ph[vplane4] 0x0206 2
1353 * @pci.depl_cplh[vplane5] 0x020a 2
1354 * @pci.depl_nph[vplane5] 0x020c 2
1355 * @pci.depl_ph[vplane5] 0x020e 2
1356 * @pci.depl_cplh[vplane6] 0x0212 2
1357 * @pci.depl_nph[vplane6] 0x0214 2
1358 * @pci.depl_ph[vplane6] 0x0216 2
1359 * @pci.depl_cplh[vplane7] 0x021a 2
1360 * @pci.depl_nph[vplane7] 0x021c 2
1361 * @pci.depl_ph[vplane7] 0x021e 2
1362 * @pci.depl_cplh[vplane8] 0x0222 2
1363 * @pci.depl_nph[vplane8] 0x0224 2
1364 * @pci.depl_ph[vplane8] 0x0226 2
1365 * @pci.depl_cplh[vplane9] 0x022a 2
1366 * @pci.depl_nph[vplane9] 0x022c 2
1367 * @pci.depl_ph[vplane9] 0x022e 2
1368 * @pci.depl_cplh[vplane10] 0x0232 2
1369 * @pci.depl_nph[vplane10] 0x0234 2
1370 * @pci.depl_ph[vplane10] 0x0236 2
1371 * @pci.depl_cplh[vplane11] 0x023a 2
1372 * @pci.depl_nph[vplane11] 0x023c 2
1373 * @pci.depl_ph[vplane11] 0x023e 2
1374 * @pci.depl_cplh[vplane12] 0x0242 2
1375 * @pci.depl_nph[vplane12] 0x0244 2
1376 * @pci.depl_ph[vplane12] 0x0246 2
1377 * @pci.depl_cplh[vplane13] 0x024a 2
1378 * @pci.depl_nph[vplane13] 0x024c 2
1379 * @pci.depl_ph[vplane13] 0x024e 2
1380 * @pci.depl_cplh[vplane14] 0x0252 2
1381 * @pci.depl_nph[vplane14] 0x0254 2
1382 * @pci.depl_ph[vplane14] 0x0256 2
1383 * @pci.depl_cplh[vplane15] 0x025a 2
1384 * @pci.depl_nph[vplane15] 0x025c 2
1385 * @pci.depl_ph[vplane15] 0x025e 2
1386 * @pci.depl_cplh[vplane16] 0x0262 2
1387 * @pci.depl_nph[vplane16] 0x0264 2
1388 * @pci.depl_ph[vplane16] 0x0266 2
1389 * @pci.depl_cpld[vplane0] 0x026a 2 Number of times completion data
1390 * credits were depleted
1391 * @pci.depl_npd[vplane0] 0x026c 2 Number of times non posted data
1392 * credits were depleted
1393 * @pci.depl_pd[vplane0] 0x026e 2 Number of times the posted data
1394 * credits were depleted
1395 * @pci.depl_cpld[vplane1] 0x0272 2
1396 * @pci.depl_npd[vplane1] 0x0274 2
1397 * @pci.depl_pd[vplane1] 0x0276 2
1398 * @pci.depl_cpld[vplane2] 0x027a 2
1399 * @pci.depl_npd[vplane2] 0x027c 2
1400 * @pci.depl_pd[vplane2] 0x027e 2
1401 * @pci.depl_cpld[vplane3] 0x0282 2
1402 * @pci.depl_npd[vplane3] 0x0284 2
1403 * @pci.depl_pd[vplane3] 0x0286 2
1404 * @pci.depl_cpld[vplane4] 0x028a 2
1405 * @pci.depl_npd[vplane4] 0x028c 2
1406 * @pci.depl_pd[vplane4] 0x028e 2
1407 * @pci.depl_cpld[vplane5] 0x0292 2
1408 * @pci.depl_npd[vplane5] 0x0294 2
1409 * @pci.depl_pd[vplane5] 0x0296 2
1410 * @pci.depl_cpld[vplane6] 0x029a 2
1411 * @pci.depl_npd[vplane6] 0x029c 2
1412 * @pci.depl_pd[vplane6] 0x029e 2
1413 * @pci.depl_cpld[vplane7] 0x02a2 2
1414 * @pci.depl_npd[vplane7] 0x02a4 2
1415 * @pci.depl_pd[vplane7] 0x02a6 2
1416 * @pci.depl_cpld[vplane8] 0x02aa 2
1417 * @pci.depl_npd[vplane8] 0x02ac 2
1418 * @pci.depl_pd[vplane8] 0x02ae 2
1419 * @pci.depl_cpld[vplane9] 0x02b2 2
1420 * @pci.depl_npd[vplane9] 0x02b4 2
1421 * @pci.depl_pd[vplane9] 0x02b6 2
1422 * @pci.depl_cpld[vplane10] 0x02ba 2
1423 * @pci.depl_npd[vplane10] 0x02bc 2
1424 * @pci.depl_pd[vplane10] 0x02be 2
1425 * @pci.depl_cpld[vplane11] 0x02c2 2
1426 * @pci.depl_npd[vplane11] 0x02c4 2
1427 * @pci.depl_pd[vplane11] 0x02c6 2
1428 * @pci.depl_cpld[vplane12] 0x02ca 2
1429 * @pci.depl_npd[vplane12] 0x02cc 2
1430 * @pci.depl_pd[vplane12] 0x02ce 2
1431 * @pci.depl_cpld[vplane13] 0x02d2 2
1432 * @pci.depl_npd[vplane13] 0x02d4 2
1433 * @pci.depl_pd[vplane13] 0x02d6 2
1434 * @pci.depl_cpld[vplane14] 0x02da 2
1435 * @pci.depl_npd[vplane14] 0x02dc 2
1436 * @pci.depl_pd[vplane14] 0x02de 2
1437 * @pci.depl_cpld[vplane15] 0x02e2 2
1438 * @pci.depl_npd[vplane15] 0x02e4 2
1439 * @pci.depl_pd[vplane15] 0x02e6 2
1440 * @pci.depl_cpld[vplane16] 0x02ea 2
1441 * @pci.depl_npd[vplane16] 0x02ec 2
1442 * @pci.depl_pd[vplane16] 0x02ee 2
1443 * @xgmac_port[3];
1444 * @xgmac_aggr[2];
1445 * @xgmac.global_prog_event_gnum0 0x0ae0 8 Programmable statistic.
1446 * Increments when internal logic detects a certain event. See register
1447 * XMAC_STATS_GLOBAL_CFG.EVENT_GNUM0_CFG for more information.
1448 * @xgmac.global_prog_event_gnum1 0x0ae8 8 Programmable statistic.
1449 * Increments when internal logic detects a certain event. See register
1450 * XMAC_STATS_GLOBAL_CFG.EVENT_GNUM1_CFG for more information.
1451 * @xgmac.orp_lro_events 0x0af8 8
1452 * @xgmac.orp_bs_events 0x0b00 8
1453 * @xgmac.orp_iwarp_events 0x0b08 8
1454 * @xgmac.tx_permitted_frms 0x0b14 4
1455 * @xgmac.port2_tx_any_frms 0x0b1d 1
1456 * @xgmac.port1_tx_any_frms 0x0b1e 1
1457 * @xgmac.port0_tx_any_frms 0x0b1f 1
1458 * @xgmac.port2_rx_any_frms 0x0b25 1
1459 * @xgmac.port1_rx_any_frms 0x0b26 1
1460 * @xgmac.port0_rx_any_frms 0x0b27 1
1461 *
1462 * Titan mrpcim hardware statistics.
1463 */
1464struct vxge_hw_device_stats_mrpcim_info {
1465/*0x0000*/ u32 pic_ini_rd_drop;
1466/*0x0004*/ u32 pic_ini_wr_drop;
1467/*0x0008*/ struct {
1468 /*0x0000*/ u32 pic_wrcrdtarb_ph_crdt_depleted;
1469 /*0x0004*/ u32 unused1;
1470 } pic_wrcrdtarb_ph_crdt_depleted_vplane[17];
1471/*0x0090*/ struct {
1472 /*0x0000*/ u32 pic_wrcrdtarb_pd_crdt_depleted;
1473 /*0x0004*/ u32 unused2;
1474 } pic_wrcrdtarb_pd_crdt_depleted_vplane[17];
1475/*0x0118*/ struct {
1476 /*0x0000*/ u32 pic_rdcrdtarb_nph_crdt_depleted;
1477 /*0x0004*/ u32 unused3;
1478 } pic_rdcrdtarb_nph_crdt_depleted_vplane[17];
1479/*0x01a0*/ u32 pic_ini_rd_vpin_drop;
1480/*0x01a4*/ u32 pic_ini_wr_vpin_drop;
1481/*0x01a8*/ u32 pic_genstats_count0;
1482/*0x01ac*/ u32 pic_genstats_count1;
1483/*0x01b0*/ u32 pic_genstats_count2;
1484/*0x01b4*/ u32 pic_genstats_count3;
1485/*0x01b8*/ u32 pic_genstats_count4;
1486/*0x01bc*/ u32 unused4;
1487/*0x01c0*/ u32 pic_genstats_count5;
1488/*0x01c4*/ u32 unused5;
1489/*0x01c8*/ u32 pci_rstdrop_cpl;
1490/*0x01cc*/ u32 pci_rstdrop_msg;
1491/*0x01d0*/ u32 pci_rstdrop_client1;
1492/*0x01d4*/ u32 pci_rstdrop_client0;
1493/*0x01d8*/ u32 pci_rstdrop_client2;
1494/*0x01dc*/ u32 unused6;
1495/*0x01e0*/ struct {
1496 /*0x0000*/ u16 unused7;
1497 /*0x0002*/ u16 pci_depl_cplh;
1498 /*0x0004*/ u16 pci_depl_nph;
1499 /*0x0006*/ u16 pci_depl_ph;
1500 } pci_depl_h_vplane[17];
1501/*0x0268*/ struct {
1502 /*0x0000*/ u16 unused8;
1503 /*0x0002*/ u16 pci_depl_cpld;
1504 /*0x0004*/ u16 pci_depl_npd;
1505 /*0x0006*/ u16 pci_depl_pd;
1506 } pci_depl_d_vplane[17];
1507/*0x02f0*/ struct vxge_hw_xmac_port_stats xgmac_port[3];
1508/*0x0a10*/ struct vxge_hw_xmac_aggr_stats xgmac_aggr[2];
1509/*0x0ae0*/ u64 xgmac_global_prog_event_gnum0;
1510/*0x0ae8*/ u64 xgmac_global_prog_event_gnum1;
1511/*0x0af0*/ u64 unused7;
1512/*0x0af8*/ u64 unused8;
1513/*0x0b00*/ u64 unused9;
1514/*0x0b08*/ u64 unused10;
1515/*0x0b10*/ u32 unused11;
1516/*0x0b14*/ u32 xgmac_tx_permitted_frms;
1517/*0x0b18*/ u32 unused12;
1518/*0x0b1c*/ u8 unused13;
1519/*0x0b1d*/ u8 xgmac_port2_tx_any_frms;
1520/*0x0b1e*/ u8 xgmac_port1_tx_any_frms;
1521/*0x0b1f*/ u8 xgmac_port0_tx_any_frms;
1522/*0x0b20*/ u32 unused14;
1523/*0x0b24*/ u8 unused15;
1524/*0x0b25*/ u8 xgmac_port2_rx_any_frms;
1525/*0x0b26*/ u8 xgmac_port1_rx_any_frms;
1526/*0x0b27*/ u8 xgmac_port0_rx_any_frms;
1527} __packed;
1528
1529/**
1530 * struct vxge_hw_device_stats_hw_info - Titan hardware statistics.
1531 * @vpath_info: VPath statistics
1532 * @vpath_info_sav: Vpath statistics saved
1533 *
1534 * Titan hardware statistics.
1535 */
1536struct vxge_hw_device_stats_hw_info {
1537 struct vxge_hw_vpath_stats_hw_info
1538 *vpath_info[VXGE_HW_MAX_VIRTUAL_PATHS];
1539 struct vxge_hw_vpath_stats_hw_info
1540 vpath_info_sav[VXGE_HW_MAX_VIRTUAL_PATHS];
1541};
1542
1543/**
1544 * struct vxge_hw_vpath_stats_sw_common_info - HW common
1545 * statistics for queues.
1546 * @full_cnt: Number of times the queue was full
1547 * @usage_cnt: usage count.
1548 * @usage_max: Maximum usage
1549 * @reserve_free_swaps_cnt: Reserve/free swap counter. Internal usage.
1550 * @total_compl_cnt: Total descriptor completion count.
1551 *
1552 * Hw queue counters
1553 * See also: struct vxge_hw_vpath_stats_sw_fifo_info{},
1554 * struct vxge_hw_vpath_stats_sw_ring_info{},
1555 */
1556struct vxge_hw_vpath_stats_sw_common_info {
1557 u32 full_cnt;
1558 u32 usage_cnt;
1559 u32 usage_max;
1560 u32 reserve_free_swaps_cnt;
1561 u32 total_compl_cnt;
1562};
1563
1564/**
1565 * struct vxge_hw_vpath_stats_sw_fifo_info - HW fifo statistics
1566 * @common_stats: Common counters for all queues
1567 * @total_posts: Total number of postings on the queue.
1568 * @total_buffers: Total number of buffers posted.
1569 * @txd_t_code_err_cnt: Array of transmit transfer codes. The position
1570 * (index) in this array reflects the transfer code type, for instance
1571 * 0xA - "loss of link".
1572 * Value txd_t_code_err_cnt[i] reflects the
1573 * number of times the corresponding transfer code was encountered.
1574 *
1575 * HW fifo counters
1576 * See also: struct vxge_hw_vpath_stats_sw_common_info{},
1577 * struct vxge_hw_vpath_stats_sw_ring_info{},
1578 */
1579struct vxge_hw_vpath_stats_sw_fifo_info {
1580 struct vxge_hw_vpath_stats_sw_common_info common_stats;
1581 u32 total_posts;
1582 u32 total_buffers;
1583 u32 txd_t_code_err_cnt[VXGE_HW_DTR_MAX_T_CODE];
1584};
1585
1586/**
1587 * struct vxge_hw_vpath_stats_sw_ring_info - HW ring statistics
1588 * @common_stats: Common counters for all queues
1589 * @rxd_t_code_err_cnt: Array of receive transfer codes. The position
1590 * (index) in this array reflects the transfer code type,
1591 * for instance
1592 * 0x7 - for "invalid receive buffer size", or 0x8 - for ECC.
1593 * Value rxd_t_code_err_cnt[i] reflects the
1594 * number of times the corresponding transfer code was encountered.
1595 *
1596 * HW ring counters
1597 * See also: struct vxge_hw_vpath_stats_sw_common_info{},
1598 * struct vxge_hw_vpath_stats_sw_fifo_info{},
1599 */
1600struct vxge_hw_vpath_stats_sw_ring_info {
1601 struct vxge_hw_vpath_stats_sw_common_info common_stats;
1602 u32 rxd_t_code_err_cnt[VXGE_HW_DTR_MAX_T_CODE];
1603
1604};
1605
1606/**
1607 * struct vxge_hw_vpath_stats_sw_err - HW vpath error statistics
1608 * @unknown_alarms:
1609 * @network_sustained_fault:
1610 * @network_sustained_ok:
1611 * @kdfcctl_fifo0_overwrite:
1612 * @kdfcctl_fifo0_poison:
1613 * @kdfcctl_fifo0_dma_error:
1614 * @dblgen_fifo0_overflow:
1615 * @statsb_pif_chain_error:
1616 * @statsb_drop_timeout:
1617 * @target_illegal_access:
1618 * @ini_serr_det:
1619 * @prc_ring_bumps:
1620 * @prc_rxdcm_sc_err:
1621 * @prc_rxdcm_sc_abort:
1622 * @prc_quanta_size_err:
1623 *
1624 * HW vpath error statistics
1625 */
1626struct vxge_hw_vpath_stats_sw_err {
1627 u32 unknown_alarms;
1628 u32 network_sustained_fault;
1629 u32 network_sustained_ok;
1630 u32 kdfcctl_fifo0_overwrite;
1631 u32 kdfcctl_fifo0_poison;
1632 u32 kdfcctl_fifo0_dma_error;
1633 u32 dblgen_fifo0_overflow;
1634 u32 statsb_pif_chain_error;
1635 u32 statsb_drop_timeout;
1636 u32 target_illegal_access;
1637 u32 ini_serr_det;
1638 u32 prc_ring_bumps;
1639 u32 prc_rxdcm_sc_err;
1640 u32 prc_rxdcm_sc_abort;
1641 u32 prc_quanta_size_err;
1642};
1643
1644/**
1645 * struct vxge_hw_vpath_stats_sw_info - HW vpath sw statistics
1646 * @soft_reset_cnt: Number of times soft reset is done on this vpath.
1647 * @error_stats: error counters for the vpath
1648 * @ring_stats: counters for ring belonging to the vpath
1649 * @fifo_stats: counters for fifo belonging to the vpath
1650 *
1651 * HW vpath sw statistics
1652 * See also: struct vxge_hw_device_info{} }.
1653 */
1654struct vxge_hw_vpath_stats_sw_info {
1655 u32 soft_reset_cnt;
1656 struct vxge_hw_vpath_stats_sw_err error_stats;
1657 struct vxge_hw_vpath_stats_sw_ring_info ring_stats;
1658 struct vxge_hw_vpath_stats_sw_fifo_info fifo_stats;
1659};
1660
1661/**
1662 * struct vxge_hw_device_stats_sw_info - HW own per-device statistics.
1663 *
1664 * @not_traffic_intr_cnt: Number of times the host was interrupted
1665 * without new completions.
1666 * "Non-traffic interrupt counter".
1667 * @traffic_intr_cnt: Number of traffic interrupts for the device.
1668 * @total_intr_cnt: Total number of traffic interrupts for the device.
1669 * @total_intr_cnt == @traffic_intr_cnt +
1670 * @not_traffic_intr_cnt
1671 * @soft_reset_cnt: Number of times soft reset is done on this device.
1672 * @vpath_info: please see struct vxge_hw_vpath_stats_sw_info{}
1673 * HW per-device statistics.
1674 */
1675struct vxge_hw_device_stats_sw_info {
1676 u32 not_traffic_intr_cnt;
1677 u32 traffic_intr_cnt;
1678 u32 total_intr_cnt;
1679 u32 soft_reset_cnt;
1680 struct vxge_hw_vpath_stats_sw_info
1681 vpath_info[VXGE_HW_MAX_VIRTUAL_PATHS];
1682};
1683
1684/**
1685 * struct vxge_hw_device_stats_sw_err - HW device error statistics.
1686 * @vpath_alarms: Number of vpath alarms
1687 *
1688 * HW Device error stats
1689 */
1690struct vxge_hw_device_stats_sw_err {
1691 u32 vpath_alarms;
1692};
1693
1694/**
1695 * struct vxge_hw_device_stats - Contains HW per-device statistics,
1696 * including hw.
1697 * @devh: HW device handle.
1698 * @dma_addr: DMA address of the %hw_info. Given to device to fill-in the stats.
1699 * @hw_info_dmah: DMA handle used to map hw statistics onto the device memory
1700 * space.
1701 * @hw_info_dma_acch: One more DMA handle used subsequently to free the
1702 * DMA object. Note that this and the previous handle have
1703 * physical meaning for Solaris; on Windows and Linux the
1704 * corresponding value will be simply pointer to PCI device.
1705 *
1706 * @hw_dev_info_stats: Titan statistics maintained by the hardware.
1707 * @sw_dev_info_stats: HW's "soft" device informational statistics, e.g. number
1708 * of completions per interrupt.
1709 * @sw_dev_err_stats: HW's "soft" device error statistics.
1710 *
1711 * Structure-container of HW per-device statistics. Note that per-channel
1712 * statistics are kept in separate structures under HW's fifo and ring
1713 * channels.
1714 */
1715struct vxge_hw_device_stats {
1716 /* handles */
1717 struct __vxge_hw_device *devh;
1718
1719 /* HW device hardware statistics */
1720 struct vxge_hw_device_stats_hw_info hw_dev_info_stats;
1721
1722 /* HW device "soft" stats */
1723 struct vxge_hw_device_stats_sw_err sw_dev_err_stats;
1724 struct vxge_hw_device_stats_sw_info sw_dev_info_stats;
1725
1726};
1727
1728enum vxge_hw_status vxge_hw_device_hw_stats_enable(
1729 struct __vxge_hw_device *devh);
1730
1731enum vxge_hw_status vxge_hw_device_stats_get(
1732 struct __vxge_hw_device *devh,
1733 struct vxge_hw_device_stats_hw_info *hw_stats);
1734
1735enum vxge_hw_status vxge_hw_driver_stats_get(
1736 struct __vxge_hw_device *devh,
1737 struct vxge_hw_device_stats_sw_info *sw_stats);
1738
1739enum vxge_hw_status vxge_hw_mrpcim_stats_enable(struct __vxge_hw_device *devh);
1740
1741enum vxge_hw_status vxge_hw_mrpcim_stats_disable(struct __vxge_hw_device *devh);
1742
1743enum vxge_hw_status
1744vxge_hw_mrpcim_stats_access(
1745 struct __vxge_hw_device *devh,
1746 u32 operation,
1747 u32 location,
1748 u32 offset,
1749 u64 *stat);
1750
1751enum vxge_hw_status
1752vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *devh,
1753 struct vxge_hw_xmac_stats *xmac_stats);
1754
1755/**
1756 * enum enum vxge_hw_mgmt_reg_type - Register types.
1757 *
1758 * @vxge_hw_mgmt_reg_type_legacy: Legacy registers
1759 * @vxge_hw_mgmt_reg_type_toc: TOC Registers
1760 * @vxge_hw_mgmt_reg_type_common: Common Registers
1761 * @vxge_hw_mgmt_reg_type_mrpcim: mrpcim registers
1762 * @vxge_hw_mgmt_reg_type_srpcim: srpcim registers
1763 * @vxge_hw_mgmt_reg_type_vpmgmt: vpath management registers
1764 * @vxge_hw_mgmt_reg_type_vpath: vpath registers
1765 *
1766 * Register type enumaration
1767 */
1768enum vxge_hw_mgmt_reg_type {
1769 vxge_hw_mgmt_reg_type_legacy = 0,
1770 vxge_hw_mgmt_reg_type_toc = 1,
1771 vxge_hw_mgmt_reg_type_common = 2,
1772 vxge_hw_mgmt_reg_type_mrpcim = 3,
1773 vxge_hw_mgmt_reg_type_srpcim = 4,
1774 vxge_hw_mgmt_reg_type_vpmgmt = 5,
1775 vxge_hw_mgmt_reg_type_vpath = 6
1776};
1777
1778enum vxge_hw_status
1779vxge_hw_mgmt_reg_read(struct __vxge_hw_device *devh,
1780 enum vxge_hw_mgmt_reg_type type,
1781 u32 index,
1782 u32 offset,
1783 u64 *value);
1784
1785enum vxge_hw_status
1786vxge_hw_mgmt_reg_write(struct __vxge_hw_device *devh,
1787 enum vxge_hw_mgmt_reg_type type,
1788 u32 index,
1789 u32 offset,
1790 u64 value);
1791
1792/**
1793 * enum enum vxge_hw_rxd_state - Descriptor (RXD) state.
1794 * @VXGE_HW_RXD_STATE_NONE: Invalid state.
1795 * @VXGE_HW_RXD_STATE_AVAIL: Descriptor is available for reservation.
1796 * @VXGE_HW_RXD_STATE_POSTED: Descriptor is posted for processing by the
1797 * device.
1798 * @VXGE_HW_RXD_STATE_FREED: Descriptor is free and can be reused for
1799 * filling-in and posting later.
1800 *
1801 * Titan/HW descriptor states.
1802 *
1803 */
1804enum vxge_hw_rxd_state {
1805 VXGE_HW_RXD_STATE_NONE = 0,
1806 VXGE_HW_RXD_STATE_AVAIL = 1,
1807 VXGE_HW_RXD_STATE_POSTED = 2,
1808 VXGE_HW_RXD_STATE_FREED = 3
1809};
1810
1811/**
1812 * struct vxge_hw_ring_rxd_info - Extended information associated with a
1813 * completed ring descriptor.
1814 * @syn_flag: SYN flag
1815 * @is_icmp: Is ICMP
1816 * @fast_path_eligible: Fast Path Eligible flag
1817 * @l3_cksum: in L3 checksum is valid
1818 * @l3_cksum: Result of IP checksum check (by Titan hardware).
1819 * This field containing VXGE_HW_L3_CKSUM_OK would mean that
1820 * the checksum is correct, otherwise - the datagram is
1821 * corrupted.
1822 * @l4_cksum: in L4 checksum is valid
1823 * @l4_cksum: Result of TCP/UDP checksum check (by Titan hardware).
1824 * This field containing VXGE_HW_L4_CKSUM_OK would mean that
1825 * the checksum is correct. Otherwise - the packet is
1826 * corrupted.
1827 * @frame: Zero or more of enum vxge_hw_frame_type flags.
1828 * See enum vxge_hw_frame_type{}.
1829 * @proto: zero or more of enum vxge_hw_frame_proto flags. Reporting bits for
1830 * various higher-layer protocols, including (but note restricted to)
1831 * TCP and UDP. See enum vxge_hw_frame_proto{}.
1832 * @is_vlan: If vlan tag is valid
1833 * @vlan: VLAN tag extracted from the received frame.
1834 * @rth_bucket: RTH bucket
1835 * @rth_it_hit: Set, If RTH hash value calculated by the Titan hardware
1836 * has a matching entry in the Indirection table.
1837 * @rth_spdm_hit: Set, If RTH hash value calculated by the Titan hardware
1838 * has a matching entry in the Socket Pair Direct Match table.
1839 * @rth_hash_type: RTH hash code of the function used to calculate the hash.
1840 * @rth_value: Receive Traffic Hashing(RTH) hash value. Produced by Titan
1841 * hardware if RTH is enabled.
1842 */
1843struct vxge_hw_ring_rxd_info {
1844 u32 syn_flag;
1845 u32 is_icmp;
1846 u32 fast_path_eligible;
1847 u32 l3_cksum_valid;
1848 u32 l3_cksum;
1849 u32 l4_cksum_valid;
1850 u32 l4_cksum;
1851 u32 frame;
1852 u32 proto;
1853 u32 is_vlan;
1854 u32 vlan;
1855 u32 rth_bucket;
1856 u32 rth_it_hit;
1857 u32 rth_spdm_hit;
1858 u32 rth_hash_type;
1859 u32 rth_value;
1860};
1861/**
1862 * enum vxge_hw_ring_tcode - Transfer codes returned by adapter
1863 * @VXGE_HW_RING_T_CODE_OK: Transfer ok.
1864 * @VXGE_HW_RING_T_CODE_L3_CKSUM_MISMATCH: Layer 3 checksum presentation
1865 * configuration mismatch.
1866 * @VXGE_HW_RING_T_CODE_L4_CKSUM_MISMATCH: Layer 4 checksum presentation
1867 * configuration mismatch.
1868 * @VXGE_HW_RING_T_CODE_L3_L4_CKSUM_MISMATCH: Layer 3 and Layer 4 checksum
1869 * presentation configuration mismatch.
1870 * @VXGE_HW_RING_T_CODE_L3_PKT_ERR: Layer 3 error unparseable packet,
1871 * such as unknown IPv6 header.
1872 * @VXGE_HW_RING_T_CODE_L2_FRM_ERR: Layer 2 error frame integrity
1873 * error, such as FCS or ECC).
1874 * @VXGE_HW_RING_T_CODE_BUF_SIZE_ERR: Buffer size error the RxD buffer(
1875 * s) were not appropriately sized and data loss occurred.
1876 * @VXGE_HW_RING_T_CODE_INT_ECC_ERR: Internal ECC error RxD corrupted.
1877 * @VXGE_HW_RING_T_CODE_BENIGN_OVFLOW: Benign overflow the contents of
1878 * Segment1 exceeded the capacity of Buffer1 and the remainder
1879 * was placed in Buffer2. Segment2 now starts in Buffer3.
1880 * No data loss or errors occurred.
1881 * @VXGE_HW_RING_T_CODE_ZERO_LEN_BUFF: Buffer size 0 one of the RxDs
1882 * assigned buffers has a size of 0 bytes.
1883 * @VXGE_HW_RING_T_CODE_FRM_DROP: Frame dropped either due to
1884 * VPath Reset or because of a VPIN mismatch.
1885 * @VXGE_HW_RING_T_CODE_UNUSED: Unused
1886 * @VXGE_HW_RING_T_CODE_MULTI_ERR: Multiple errors more than one
1887 * transfer code condition occurred.
1888 *
1889 * Transfer codes returned by adapter.
1890 */
1891enum vxge_hw_ring_tcode {
1892 VXGE_HW_RING_T_CODE_OK = 0x0,
1893 VXGE_HW_RING_T_CODE_L3_CKSUM_MISMATCH = 0x1,
1894 VXGE_HW_RING_T_CODE_L4_CKSUM_MISMATCH = 0x2,
1895 VXGE_HW_RING_T_CODE_L3_L4_CKSUM_MISMATCH = 0x3,
1896 VXGE_HW_RING_T_CODE_L3_PKT_ERR = 0x5,
1897 VXGE_HW_RING_T_CODE_L2_FRM_ERR = 0x6,
1898 VXGE_HW_RING_T_CODE_BUF_SIZE_ERR = 0x7,
1899 VXGE_HW_RING_T_CODE_INT_ECC_ERR = 0x8,
1900 VXGE_HW_RING_T_CODE_BENIGN_OVFLOW = 0x9,
1901 VXGE_HW_RING_T_CODE_ZERO_LEN_BUFF = 0xA,
1902 VXGE_HW_RING_T_CODE_FRM_DROP = 0xC,
1903 VXGE_HW_RING_T_CODE_UNUSED = 0xE,
1904 VXGE_HW_RING_T_CODE_MULTI_ERR = 0xF
1905};
1906
1907enum vxge_hw_status vxge_hw_ring_rxd_reserve(
1908 struct __vxge_hw_ring *ring_handle,
1909 void **rxdh);
1910
1911void
1912vxge_hw_ring_rxd_pre_post(
1913 struct __vxge_hw_ring *ring_handle,
1914 void *rxdh);
1915
1916void
1917vxge_hw_ring_rxd_post_post(
1918 struct __vxge_hw_ring *ring_handle,
1919 void *rxdh);
1920
1921enum vxge_hw_status
1922vxge_hw_ring_replenish(struct __vxge_hw_ring *ring_handle);
1923
1924void
1925vxge_hw_ring_rxd_post_post_wmb(
1926 struct __vxge_hw_ring *ring_handle,
1927 void *rxdh);
1928
1929void vxge_hw_ring_rxd_post(
1930 struct __vxge_hw_ring *ring_handle,
1931 void *rxdh);
1932
1933enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
1934 struct __vxge_hw_ring *ring_handle,
1935 void **rxdh,
1936 u8 *t_code);
1937
1938enum vxge_hw_status vxge_hw_ring_handle_tcode(
1939 struct __vxge_hw_ring *ring_handle,
1940 void *rxdh,
1941 u8 t_code);
1942
1943void vxge_hw_ring_rxd_free(
1944 struct __vxge_hw_ring *ring_handle,
1945 void *rxdh);
1946
1947/**
1948 * enum enum vxge_hw_frame_proto - Higher-layer ethernet protocols.
1949 * @VXGE_HW_FRAME_PROTO_VLAN_TAGGED: VLAN.
1950 * @VXGE_HW_FRAME_PROTO_IPV4: IPv4.
1951 * @VXGE_HW_FRAME_PROTO_IPV6: IPv6.
1952 * @VXGE_HW_FRAME_PROTO_IP_FRAG: IP fragmented.
1953 * @VXGE_HW_FRAME_PROTO_TCP: TCP.
1954 * @VXGE_HW_FRAME_PROTO_UDP: UDP.
1955 * @VXGE_HW_FRAME_PROTO_TCP_OR_UDP: TCP or UDP.
1956 *
1957 * Higher layer ethernet protocols and options.
1958 */
1959enum vxge_hw_frame_proto {
1960 VXGE_HW_FRAME_PROTO_VLAN_TAGGED = 0x80,
1961 VXGE_HW_FRAME_PROTO_IPV4 = 0x10,
1962 VXGE_HW_FRAME_PROTO_IPV6 = 0x08,
1963 VXGE_HW_FRAME_PROTO_IP_FRAG = 0x04,
1964 VXGE_HW_FRAME_PROTO_TCP = 0x02,
1965 VXGE_HW_FRAME_PROTO_UDP = 0x01,
1966 VXGE_HW_FRAME_PROTO_TCP_OR_UDP = (VXGE_HW_FRAME_PROTO_TCP | \
1967 VXGE_HW_FRAME_PROTO_UDP)
1968};
1969
1970/**
1971 * enum enum vxge_hw_fifo_gather_code - Gather codes used in fifo TxD
1972 * @VXGE_HW_FIFO_GATHER_CODE_FIRST: First TxDL
1973 * @VXGE_HW_FIFO_GATHER_CODE_MIDDLE: Middle TxDL
1974 * @VXGE_HW_FIFO_GATHER_CODE_LAST: Last TxDL
1975 * @VXGE_HW_FIFO_GATHER_CODE_FIRST_LAST: First and Last TxDL.
1976 *
1977 * These gather codes are used to indicate the position of a TxD in a TxD list
1978 */
1979enum vxge_hw_fifo_gather_code {
1980 VXGE_HW_FIFO_GATHER_CODE_FIRST = 0x2,
1981 VXGE_HW_FIFO_GATHER_CODE_MIDDLE = 0x0,
1982 VXGE_HW_FIFO_GATHER_CODE_LAST = 0x1,
1983 VXGE_HW_FIFO_GATHER_CODE_FIRST_LAST = 0x3
1984};
1985
1986/**
1987 * enum enum vxge_hw_fifo_tcode - tcodes used in fifo
1988 * @VXGE_HW_FIFO_T_CODE_OK: Transfer OK
1989 * @VXGE_HW_FIFO_T_CODE_PCI_READ_CORRUPT: PCI read transaction (either TxD or
1990 * frame data) returned with corrupt data.
1991 * @VXGE_HW_FIFO_T_CODE_PCI_READ_FAIL:PCI read transaction was returned
1992 * with no data.
1993 * @VXGE_HW_FIFO_T_CODE_INVALID_MSS: The host attempted to send either a
1994 * frame or LSO MSS that was too long (>9800B).
1995 * @VXGE_HW_FIFO_T_CODE_LSO_ERROR: Error detected during TCP/UDP Large Send
1996 * Offload operation, due to improper header template,
1997 * unsupported protocol, etc.
1998 * @VXGE_HW_FIFO_T_CODE_UNUSED: Unused
1999 * @VXGE_HW_FIFO_T_CODE_MULTI_ERROR: Set to 1 by the adapter if multiple
2000 * data buffer transfer errors are encountered (see below).
2001 * Otherwise it is set to 0.
2002 *
2003 * These tcodes are returned in various API for TxD status
2004 */
2005enum vxge_hw_fifo_tcode {
2006 VXGE_HW_FIFO_T_CODE_OK = 0x0,
2007 VXGE_HW_FIFO_T_CODE_PCI_READ_CORRUPT = 0x1,
2008 VXGE_HW_FIFO_T_CODE_PCI_READ_FAIL = 0x2,
2009 VXGE_HW_FIFO_T_CODE_INVALID_MSS = 0x3,
2010 VXGE_HW_FIFO_T_CODE_LSO_ERROR = 0x4,
2011 VXGE_HW_FIFO_T_CODE_UNUSED = 0x7,
2012 VXGE_HW_FIFO_T_CODE_MULTI_ERROR = 0x8
2013};
2014
2015enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
2016 struct __vxge_hw_fifo *fifoh,
2017 void **txdlh,
2018 void **txdl_priv);
2019
2020void vxge_hw_fifo_txdl_buffer_set(
2021 struct __vxge_hw_fifo *fifo_handle,
2022 void *txdlh,
2023 u32 frag_idx,
2024 dma_addr_t dma_pointer,
2025 u32 size);
2026
2027void vxge_hw_fifo_txdl_post(
2028 struct __vxge_hw_fifo *fifo_handle,
2029 void *txdlh);
2030
2031u32 vxge_hw_fifo_free_txdl_count_get(
2032 struct __vxge_hw_fifo *fifo_handle);
2033
2034enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
2035 struct __vxge_hw_fifo *fifoh,
2036 void **txdlh,
2037 enum vxge_hw_fifo_tcode *t_code);
2038
2039enum vxge_hw_status vxge_hw_fifo_handle_tcode(
2040 struct __vxge_hw_fifo *fifoh,
2041 void *txdlh,
2042 enum vxge_hw_fifo_tcode t_code);
2043
2044void vxge_hw_fifo_txdl_free(
2045 struct __vxge_hw_fifo *fifoh,
2046 void *txdlh);
2047
2048/*
2049 * Device
2050 */
2051
2052#define VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET (VXGE_HW_BLOCK_SIZE-8)
2053#define VXGE_HW_RING_MEMBLOCK_IDX_OFFSET (VXGE_HW_BLOCK_SIZE-16)
2054
2055/*
2056 * struct __vxge_hw_ring_rxd_priv - Receive descriptor HW-private data.
2057 * @dma_addr: DMA (mapped) address of _this_ descriptor.
2058 * @dma_handle: DMA handle used to map the descriptor onto device.
2059 * @dma_offset: Descriptor's offset in the memory block. HW allocates
2060 * descriptors in memory blocks of %VXGE_HW_BLOCK_SIZE
2061 * bytes. Each memblock is contiguous DMA-able memory. Each
2062 * memblock contains 1 or more 4KB RxD blocks visible to the
2063 * Titan hardware.
2064 * @dma_object: DMA address and handle of the memory block that contains
2065 * the descriptor. This member is used only in the "checked"
2066 * version of the HW (to enforce certain assertions);
2067 * otherwise it gets compiled out.
2068 * @allocated: True if the descriptor is reserved, 0 otherwise. Internal usage.
2069 *
2070 * Per-receive decsriptor HW-private data. HW uses the space to keep DMA
2071 * information associated with the descriptor. Note that driver can ask HW
2072 * to allocate additional per-descriptor space for its own (driver-specific)
2073 * purposes.
2074 */
2075struct __vxge_hw_ring_rxd_priv {
2076 dma_addr_t dma_addr;
2077 struct pci_dev *dma_handle;
2078 ptrdiff_t dma_offset;
2079#ifdef VXGE_DEBUG_ASSERT
2080 struct vxge_hw_mempool_dma *dma_object;
2081#endif
2082};
2083
2084struct vxge_hw_mempool_cbs {
2085 void (*item_func_alloc)(
2086 struct vxge_hw_mempool *mempoolh,
2087 u32 memblock_index,
2088 struct vxge_hw_mempool_dma *dma_object,
2089 u32 index,
2090 u32 is_last);
2091};
2092
2093#define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
2094 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
2095
2096enum vxge_hw_status
2097__vxge_hw_vpath_rts_table_get(
2098 struct __vxge_hw_vpath_handle *vpath_handle,
2099 u32 action,
2100 u32 rts_table,
2101 u32 offset,
2102 u64 *data1,
2103 u64 *data2);
2104
2105enum vxge_hw_status
2106__vxge_hw_vpath_rts_table_set(
2107 struct __vxge_hw_vpath_handle *vpath_handle,
2108 u32 action,
2109 u32 rts_table,
2110 u32 offset,
2111 u64 data1,
2112 u64 data2);
2113
2114enum vxge_hw_status
2115__vxge_hw_vpath_enable(
2116 struct __vxge_hw_device *devh,
2117 u32 vp_id);
2118
2119void vxge_hw_device_intr_enable(
2120 struct __vxge_hw_device *devh);
2121
2122u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *devh, u32 intr_mode);
2123
2124void vxge_hw_device_intr_disable(
2125 struct __vxge_hw_device *devh);
2126
2127void vxge_hw_device_mask_all(
2128 struct __vxge_hw_device *devh);
2129
2130void vxge_hw_device_unmask_all(
2131 struct __vxge_hw_device *devh);
2132
2133enum vxge_hw_status vxge_hw_device_begin_irq(
2134 struct __vxge_hw_device *devh,
2135 u32 skip_alarms,
2136 u64 *reason);
2137
2138void vxge_hw_device_clear_tx_rx(
2139 struct __vxge_hw_device *devh);
2140
2141/*
2142 * Virtual Paths
2143 */
2144
2145void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring);
2146
2147void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo);
2148
2149u32 vxge_hw_vpath_id(
2150 struct __vxge_hw_vpath_handle *vpath_handle);
2151
2152enum vxge_hw_vpath_mac_addr_add_mode {
2153 VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE = 0,
2154 VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE = 1,
2155 VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE = 2
2156};
2157
2158enum vxge_hw_status
2159vxge_hw_vpath_mac_addr_add(
2160 struct __vxge_hw_vpath_handle *vpath_handle,
2161 u8 *macaddr,
2162 u8 *macaddr_mask,
2163 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode);
2164
2165enum vxge_hw_status
2166vxge_hw_vpath_mac_addr_get(
2167 struct __vxge_hw_vpath_handle *vpath_handle,
2168 u8 *macaddr,
2169 u8 *macaddr_mask);
2170
2171enum vxge_hw_status
2172vxge_hw_vpath_mac_addr_get_next(
2173 struct __vxge_hw_vpath_handle *vpath_handle,
2174 u8 *macaddr,
2175 u8 *macaddr_mask);
2176
2177enum vxge_hw_status
2178vxge_hw_vpath_mac_addr_delete(
2179 struct __vxge_hw_vpath_handle *vpath_handle,
2180 u8 *macaddr,
2181 u8 *macaddr_mask);
2182
2183enum vxge_hw_status
2184vxge_hw_vpath_vid_add(
2185 struct __vxge_hw_vpath_handle *vpath_handle,
2186 u64 vid);
2187
2188enum vxge_hw_status
2189vxge_hw_vpath_vid_get(
2190 struct __vxge_hw_vpath_handle *vpath_handle,
2191 u64 *vid);
2192
2193enum vxge_hw_status
2194vxge_hw_vpath_vid_delete(
2195 struct __vxge_hw_vpath_handle *vpath_handle,
2196 u64 vid);
2197
2198enum vxge_hw_status
2199vxge_hw_vpath_etype_add(
2200 struct __vxge_hw_vpath_handle *vpath_handle,
2201 u64 etype);
2202
2203enum vxge_hw_status
2204vxge_hw_vpath_etype_get(
2205 struct __vxge_hw_vpath_handle *vpath_handle,
2206 u64 *etype);
2207
2208enum vxge_hw_status
2209vxge_hw_vpath_etype_get_next(
2210 struct __vxge_hw_vpath_handle *vpath_handle,
2211 u64 *etype);
2212
2213enum vxge_hw_status
2214vxge_hw_vpath_etype_delete(
2215 struct __vxge_hw_vpath_handle *vpath_handle,
2216 u64 etype);
2217
2218enum vxge_hw_status vxge_hw_vpath_promisc_enable(
2219 struct __vxge_hw_vpath_handle *vpath_handle);
2220
2221enum vxge_hw_status vxge_hw_vpath_promisc_disable(
2222 struct __vxge_hw_vpath_handle *vpath_handle);
2223
2224enum vxge_hw_status vxge_hw_vpath_bcast_enable(
2225 struct __vxge_hw_vpath_handle *vpath_handle);
2226
2227enum vxge_hw_status vxge_hw_vpath_mcast_enable(
2228 struct __vxge_hw_vpath_handle *vpath_handle);
2229
2230enum vxge_hw_status vxge_hw_vpath_mcast_disable(
2231 struct __vxge_hw_vpath_handle *vpath_handle);
2232
2233enum vxge_hw_status vxge_hw_vpath_poll_rx(
2234 struct __vxge_hw_ring *ringh);
2235
2236enum vxge_hw_status vxge_hw_vpath_poll_tx(
2237 struct __vxge_hw_fifo *fifoh,
2238 struct sk_buff ***skb_ptr, int nr_skb, int *more);
2239
2240enum vxge_hw_status vxge_hw_vpath_alarm_process(
2241 struct __vxge_hw_vpath_handle *vpath_handle,
2242 u32 skip_alarms);
2243
2244void
2245vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vpath_handle,
2246 int *tim_msix_id, int alarm_msix_id);
2247
2248void
2249vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vpath_handle,
2250 int msix_id);
2251
2252void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id);
2253
2254void vxge_hw_device_flush_io(struct __vxge_hw_device *devh);
2255
2256void
2257vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vpath_handle,
2258 int msix_id);
2259
2260enum vxge_hw_status vxge_hw_vpath_intr_enable(
2261 struct __vxge_hw_vpath_handle *vpath_handle);
2262
2263enum vxge_hw_status vxge_hw_vpath_intr_disable(
2264 struct __vxge_hw_vpath_handle *vpath_handle);
2265
2266void vxge_hw_vpath_inta_mask_tx_rx(
2267 struct __vxge_hw_vpath_handle *vpath_handle);
2268
2269void vxge_hw_vpath_inta_unmask_tx_rx(
2270 struct __vxge_hw_vpath_handle *vpath_handle);
2271
2272void
2273vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channelh, int msix_id);
2274
2275void
2276vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id);
2277
2278void
2279vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channelh, int msix_id);
2280
2281void
2282vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel,
2283 void **dtrh);
2284
2285void
2286vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel);
2287
2288void
2289vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh);
2290
2291int
2292vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel);
2293
2294void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo);
2295
2296void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring);
2297
2298#endif
diff --git a/drivers/net/vxge/vxge-version.h b/drivers/net/vxge/vxge-version.h
new file mode 100644
index 00000000000..b9efa28bab3
--- /dev/null
+++ b/drivers/net/vxge/vxge-version.h
@@ -0,0 +1,49 @@
1/******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
9 *
10 * vxge-version.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2010 Exar Corp.
13 ******************************************************************************/
14#ifndef VXGE_VERSION_H
15#define VXGE_VERSION_H
16
17#define VXGE_VERSION_MAJOR "2"
18#define VXGE_VERSION_MINOR "5"
19#define VXGE_VERSION_FIX "3"
20#define VXGE_VERSION_BUILD "22640"
21#define VXGE_VERSION_FOR "k"
22
23#define VXGE_FW_VER(maj, min, bld) (((maj) << 16) + ((min) << 8) + (bld))
24
25#define VXGE_DEAD_FW_VER_MAJOR 1
26#define VXGE_DEAD_FW_VER_MINOR 4
27#define VXGE_DEAD_FW_VER_BUILD 4
28
29#define VXGE_FW_DEAD_VER VXGE_FW_VER(VXGE_DEAD_FW_VER_MAJOR, \
30 VXGE_DEAD_FW_VER_MINOR, \
31 VXGE_DEAD_FW_VER_BUILD)
32
33#define VXGE_EPROM_FW_VER_MAJOR 1
34#define VXGE_EPROM_FW_VER_MINOR 6
35#define VXGE_EPROM_FW_VER_BUILD 1
36
37#define VXGE_EPROM_FW_VER VXGE_FW_VER(VXGE_EPROM_FW_VER_MAJOR, \
38 VXGE_EPROM_FW_VER_MINOR, \
39 VXGE_EPROM_FW_VER_BUILD)
40
41#define VXGE_CERT_FW_VER_MAJOR 1
42#define VXGE_CERT_FW_VER_MINOR 8
43#define VXGE_CERT_FW_VER_BUILD 1
44
45#define VXGE_CERT_FW_VER VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, \
46 VXGE_CERT_FW_VER_MINOR, \
47 VXGE_CERT_FW_VER_BUILD)
48
49#endif