aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/vxge
diff options
context:
space:
mode:
authorRamkrishna Vepa <ram.vepa@neterion.com>2009-04-01 14:14:58 -0400
committerDavid S. Miller <davem@davemloft.net>2009-04-02 03:33:43 -0400
commit113241321dcd19f36d53f2af46a4734855ca0cc0 (patch)
tree9c94efd07b427b262c274d093f8bde394c3b97b7 /drivers/net/vxge
parent40a3a9156dc66f23cc79758981886c1896887341 (diff)
Neterion: New driver: Traffic & alarm handler
This patch takes care of trafic handling related APIS. - Interrupt Enable and disable - Mask / Unmask Interrupt - Traffic Interrupt handling. - Alarm Interrupt handling. - Changes in this submission - - General clean up - removed redundant includes, defines and macros. - Changes in previous submissions - - General cleanup - removed unused functions and variables. - Use asserts where necessary - Reported by Andi Kleen - Fixed sparse warnings - Reported by Andi Kleen - Use a prefix, "__vxge" in front of hw functions to make them globally unique - Ben Hutchings Signed-off-by: Sivakumar Subramani <sivakumar.subramani@neterion.com> Signed-off-by: Rastapur Santosh <santosh.rastapur@neterion.com> Signed-off-by: Ramkrishna Vepa <ram.vepa@neterion.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/vxge')
-rw-r--r--drivers/net/vxge/vxge-traffic.c2528
-rw-r--r--drivers/net/vxge/vxge-traffic.h2409
2 files changed, 4937 insertions, 0 deletions
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c
new file mode 100644
index 000000000000..7be0ae10d69b
--- /dev/null
+++ b/drivers/net/vxge/vxge-traffic.c
@@ -0,0 +1,2528 @@
1/******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
9 *
10 * vxge-traffic.c: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2009 Neterion Inc.
13 ******************************************************************************/
14#include <linux/etherdevice.h>
15
16#include "vxge-traffic.h"
17#include "vxge-config.h"
18#include "vxge-main.h"
19
20/*
21 * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
22 * @vp: Virtual Path handle.
23 *
24 * Enable vpath interrupts. The function is to be executed the last in
25 * vpath initialization sequence.
26 *
27 * See also: vxge_hw_vpath_intr_disable()
28 */
29enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
30{
31 u64 val64;
32
33 struct __vxge_hw_virtualpath *vpath;
34 struct vxge_hw_vpath_reg __iomem *vp_reg;
35 enum vxge_hw_status status = VXGE_HW_OK;
36 if (vp == NULL) {
37 status = VXGE_HW_ERR_INVALID_HANDLE;
38 goto exit;
39 }
40
41 vpath = vp->vpath;
42
43 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
44 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
45 goto exit;
46 }
47
48 vp_reg = vpath->vp_reg;
49
50 writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
51
52 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
53 &vp_reg->general_errors_reg);
54
55 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
56 &vp_reg->pci_config_errors_reg);
57
58 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
59 &vp_reg->mrpcim_to_vpath_alarm_reg);
60
61 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
62 &vp_reg->srpcim_to_vpath_alarm_reg);
63
64 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
65 &vp_reg->vpath_ppif_int_status);
66
67 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
68 &vp_reg->srpcim_msg_to_vpath_reg);
69
70 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
71 &vp_reg->vpath_pcipif_int_status);
72
73 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
74 &vp_reg->prc_alarm_reg);
75
76 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
77 &vp_reg->wrdma_alarm_status);
78
79 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
80 &vp_reg->asic_ntwk_vp_err_reg);
81
82 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
83 &vp_reg->xgmac_vp_int_status);
84
85 val64 = readq(&vp_reg->vpath_general_int_status);
86
87 /* Mask unwanted interrupts */
88
89 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
90 &vp_reg->vpath_pcipif_int_mask);
91
92 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
93 &vp_reg->srpcim_msg_to_vpath_mask);
94
95 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
96 &vp_reg->srpcim_to_vpath_alarm_mask);
97
98 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
99 &vp_reg->mrpcim_to_vpath_alarm_mask);
100
101 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
102 &vp_reg->pci_config_errors_mask);
103
104 /* Unmask the individual interrupts */
105
106 writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
107 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
108 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
109 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
110 &vp_reg->general_errors_mask);
111
112 __vxge_hw_pio_mem_write32_upper(
113 (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
114 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
115 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
116 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
117 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
118 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR), 0, 32),
119 &vp_reg->kdfcctl_errors_mask);
120
121 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
122
123 __vxge_hw_pio_mem_write32_upper(
124 (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
125 &vp_reg->prc_alarm_mask);
126
127 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
128 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
129
130 if (vpath->hldev->first_vp_id != vpath->vp_id)
131 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
132 &vp_reg->asic_ntwk_vp_err_mask);
133 else
134 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
135 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT |
136 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32),
137 &vp_reg->asic_ntwk_vp_err_mask);
138
139 __vxge_hw_pio_mem_write32_upper(0,
140 &vp_reg->vpath_general_int_mask);
141exit:
142 return status;
143
144}
145
146/*
147 * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
148 * @vp: Virtual Path handle.
149 *
150 * Disable vpath interrupts. The function is to be executed the last in
151 * vpath initialization sequence.
152 *
153 * See also: vxge_hw_vpath_intr_enable()
154 */
155enum vxge_hw_status vxge_hw_vpath_intr_disable(
156 struct __vxge_hw_vpath_handle *vp)
157{
158 u64 val64;
159
160 struct __vxge_hw_virtualpath *vpath;
161 enum vxge_hw_status status = VXGE_HW_OK;
162 struct vxge_hw_vpath_reg __iomem *vp_reg;
163 if (vp == NULL) {
164 status = VXGE_HW_ERR_INVALID_HANDLE;
165 goto exit;
166 }
167
168 vpath = vp->vpath;
169
170 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
171 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
172 goto exit;
173 }
174 vp_reg = vpath->vp_reg;
175
176 __vxge_hw_pio_mem_write32_upper(
177 (u32)VXGE_HW_INTR_MASK_ALL,
178 &vp_reg->vpath_general_int_mask);
179
180 val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id));
181
182 writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
183
184 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
185 &vp_reg->general_errors_mask);
186
187 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
188 &vp_reg->pci_config_errors_mask);
189
190 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
191 &vp_reg->mrpcim_to_vpath_alarm_mask);
192
193 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
194 &vp_reg->srpcim_to_vpath_alarm_mask);
195
196 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
197 &vp_reg->vpath_ppif_int_mask);
198
199 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
200 &vp_reg->srpcim_msg_to_vpath_mask);
201
202 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
203 &vp_reg->vpath_pcipif_int_mask);
204
205 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
206 &vp_reg->wrdma_alarm_mask);
207
208 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
209 &vp_reg->prc_alarm_mask);
210
211 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
212 &vp_reg->xgmac_vp_int_mask);
213
214 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
215 &vp_reg->asic_ntwk_vp_err_mask);
216
217exit:
218 return status;
219}
220
221/**
222 * vxge_hw_channel_msix_mask - Mask MSIX Vector.
223 * @channeh: Channel for rx or tx handle
224 * @msix_id: MSIX ID
225 *
226 * The function masks the msix interrupt for the given msix_id
227 *
228 * Returns: 0
229 */
230void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
231{
232
233 __vxge_hw_pio_mem_write32_upper(
234 (u32)vxge_bVALn(vxge_mBIT(channel->first_vp_id+(msix_id/4)),
235 0, 32),
236 &channel->common_reg->set_msix_mask_vect[msix_id%4]);
237
238 return;
239}
240
241/**
242 * vxge_hw_channel_msix_unmask - Unmask the MSIX Vector.
243 * @channeh: Channel for rx or tx handle
244 * @msix_id: MSI ID
245 *
246 * The function unmasks the msix interrupt for the given msix_id
247 *
248 * Returns: 0
249 */
250void
251vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
252{
253
254 __vxge_hw_pio_mem_write32_upper(
255 (u32)vxge_bVALn(vxge_mBIT(channel->first_vp_id+(msix_id/4)),
256 0, 32),
257 &channel->common_reg->clear_msix_mask_vect[msix_id%4]);
258
259 return;
260}
261
262/**
263 * vxge_hw_device_set_intr_type - Updates the configuration
264 * with new interrupt type.
265 * @hldev: HW device handle.
266 * @intr_mode: New interrupt type
267 */
268u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
269{
270
271 if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
272 (intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
273 (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
274 (intr_mode != VXGE_HW_INTR_MODE_DEF))
275 intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
276
277 hldev->config.intr_mode = intr_mode;
278 return intr_mode;
279}
280
281/**
282 * vxge_hw_device_intr_enable - Enable interrupts.
283 * @hldev: HW device handle.
284 * @op: One of the enum vxge_hw_device_intr enumerated values specifying
285 * the type(s) of interrupts to enable.
286 *
287 * Enable Titan interrupts. The function is to be executed the last in
288 * Titan initialization sequence.
289 *
290 * See also: vxge_hw_device_intr_disable()
291 */
292void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
293{
294 u32 i;
295 u64 val64;
296 u32 val32;
297
298 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
299
300 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
301 continue;
302
303 vxge_hw_vpath_intr_enable(
304 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
305 }
306
307 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) {
308 val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
309 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
310
311 if (val64 != 0) {
312 writeq(val64, &hldev->common_reg->tim_int_status0);
313
314 writeq(~val64, &hldev->common_reg->tim_int_mask0);
315 }
316
317 val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
318 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
319
320 if (val32 != 0) {
321 __vxge_hw_pio_mem_write32_upper(val32,
322 &hldev->common_reg->tim_int_status1);
323
324 __vxge_hw_pio_mem_write32_upper(~val32,
325 &hldev->common_reg->tim_int_mask1);
326 }
327 }
328
329 val64 = readq(&hldev->common_reg->titan_general_int_status);
330
331 vxge_hw_device_unmask_all(hldev);
332
333 return;
334}
335
336/**
337 * vxge_hw_device_intr_disable - Disable Titan interrupts.
338 * @hldev: HW device handle.
339 * @op: One of the enum vxge_hw_device_intr enumerated values specifying
340 * the type(s) of interrupts to disable.
341 *
342 * Disable Titan interrupts.
343 *
344 * See also: vxge_hw_device_intr_enable()
345 */
346void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
347{
348 u32 i;
349
350 vxge_hw_device_mask_all(hldev);
351
352 /* mask all the tim interrupts */
353 writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
354 __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
355 &hldev->common_reg->tim_int_mask1);
356
357 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
358
359 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
360 continue;
361
362 vxge_hw_vpath_intr_disable(
363 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
364 }
365
366 return;
367}
368
369/**
370 * vxge_hw_device_mask_all - Mask all device interrupts.
371 * @hldev: HW device handle.
372 *
373 * Mask all device interrupts.
374 *
375 * See also: vxge_hw_device_unmask_all()
376 */
377void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
378{
379 u64 val64;
380
381 val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
382 VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
383
384 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
385 &hldev->common_reg->titan_mask_all_int);
386
387 return;
388}
389
390/**
391 * vxge_hw_device_unmask_all - Unmask all device interrupts.
392 * @hldev: HW device handle.
393 *
394 * Unmask all device interrupts.
395 *
396 * See also: vxge_hw_device_mask_all()
397 */
398void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
399{
400 u64 val64 = 0;
401
402 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE)
403 val64 = VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
404
405 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
406 &hldev->common_reg->titan_mask_all_int);
407
408 return;
409}
410
411/**
412 * vxge_hw_device_flush_io - Flush io writes.
413 * @hldev: HW device handle.
414 *
415 * The function performs a read operation to flush io writes.
416 *
417 * Returns: void
418 */
419void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
420{
421 u32 val32;
422
423 val32 = readl(&hldev->common_reg->titan_general_int_status);
424}
425
426/**
427 * vxge_hw_device_begin_irq - Begin IRQ processing.
428 * @hldev: HW device handle.
429 * @skip_alarms: Do not clear the alarms
430 * @reason: "Reason" for the interrupt, the value of Titan's
431 * general_int_status register.
432 *
433 * The function performs two actions, It first checks whether (shared IRQ) the
434 * interrupt was raised by the device. Next, it masks the device interrupts.
435 *
436 * Note:
437 * vxge_hw_device_begin_irq() does not flush MMIO writes through the
438 * bridge. Therefore, two back-to-back interrupts are potentially possible.
439 *
440 * Returns: 0, if the interrupt is not "ours" (note that in this case the
441 * device remain enabled).
442 * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
443 * status.
444 */
445enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
446 u32 skip_alarms, u64 *reason)
447{
448 u32 i;
449 u64 val64;
450 u64 adapter_status;
451 u64 vpath_mask;
452 enum vxge_hw_status ret = VXGE_HW_OK;
453
454 val64 = readq(&hldev->common_reg->titan_general_int_status);
455
456 if (unlikely(!val64)) {
457 /* not Titan interrupt */
458 *reason = 0;
459 ret = VXGE_HW_ERR_WRONG_IRQ;
460 goto exit;
461 }
462
463 if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
464
465 adapter_status = readq(&hldev->common_reg->adapter_status);
466
467 if (adapter_status == VXGE_HW_ALL_FOXES) {
468
469 __vxge_hw_device_handle_error(hldev,
470 NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
471 *reason = 0;
472 ret = VXGE_HW_ERR_SLOT_FREEZE;
473 goto exit;
474 }
475 }
476
477 hldev->stats.sw_dev_info_stats.total_intr_cnt++;
478
479 *reason = val64;
480
481 vpath_mask = hldev->vpaths_deployed >>
482 (64 - VXGE_HW_MAX_VIRTUAL_PATHS);
483
484 if (val64 &
485 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
486 hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
487
488 return VXGE_HW_OK;
489 }
490
491 hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
492
493 if (unlikely(val64 &
494 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
495
496 enum vxge_hw_status error_level = VXGE_HW_OK;
497
498 hldev->stats.sw_dev_err_stats.vpath_alarms++;
499
500 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
501
502 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
503 continue;
504
505 ret = __vxge_hw_vpath_alarm_process(
506 &hldev->virtual_paths[i], skip_alarms);
507
508 error_level = VXGE_HW_SET_LEVEL(ret, error_level);
509
510 if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
511 (ret == VXGE_HW_ERR_SLOT_FREEZE)))
512 break;
513 }
514
515 ret = error_level;
516 }
517exit:
518 return ret;
519}
520
521/*
522 * __vxge_hw_device_handle_link_up_ind
523 * @hldev: HW device handle.
524 *
525 * Link up indication handler. The function is invoked by HW when
526 * Titan indicates that the link is up for programmable amount of time.
527 */
528enum vxge_hw_status
529__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
530{
531 /*
532 * If the previous link state is not down, return.
533 */
534 if (hldev->link_state == VXGE_HW_LINK_UP)
535 goto exit;
536
537 hldev->link_state = VXGE_HW_LINK_UP;
538
539 /* notify driver */
540 if (hldev->uld_callbacks.link_up)
541 hldev->uld_callbacks.link_up(hldev);
542exit:
543 return VXGE_HW_OK;
544}
545
546/*
547 * __vxge_hw_device_handle_link_down_ind
548 * @hldev: HW device handle.
549 *
550 * Link down indication handler. The function is invoked by HW when
551 * Titan indicates that the link is down.
552 */
553enum vxge_hw_status
554__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
555{
556 /*
557 * If the previous link state is not down, return.
558 */
559 if (hldev->link_state == VXGE_HW_LINK_DOWN)
560 goto exit;
561
562 hldev->link_state = VXGE_HW_LINK_DOWN;
563
564 /* notify driver */
565 if (hldev->uld_callbacks.link_down)
566 hldev->uld_callbacks.link_down(hldev);
567exit:
568 return VXGE_HW_OK;
569}
570
571/**
572 * __vxge_hw_device_handle_error - Handle error
573 * @hldev: HW device
574 * @vp_id: Vpath Id
575 * @type: Error type. Please see enum vxge_hw_event{}
576 *
577 * Handle error.
578 */
579enum vxge_hw_status
580__vxge_hw_device_handle_error(
581 struct __vxge_hw_device *hldev,
582 u32 vp_id,
583 enum vxge_hw_event type)
584{
585 switch (type) {
586 case VXGE_HW_EVENT_UNKNOWN:
587 break;
588 case VXGE_HW_EVENT_RESET_START:
589 case VXGE_HW_EVENT_RESET_COMPLETE:
590 case VXGE_HW_EVENT_LINK_DOWN:
591 case VXGE_HW_EVENT_LINK_UP:
592 goto out;
593 case VXGE_HW_EVENT_ALARM_CLEARED:
594 goto out;
595 case VXGE_HW_EVENT_ECCERR:
596 case VXGE_HW_EVENT_MRPCIM_ECCERR:
597 goto out;
598 case VXGE_HW_EVENT_FIFO_ERR:
599 case VXGE_HW_EVENT_VPATH_ERR:
600 case VXGE_HW_EVENT_CRITICAL_ERR:
601 case VXGE_HW_EVENT_SERR:
602 break;
603 case VXGE_HW_EVENT_SRPCIM_SERR:
604 case VXGE_HW_EVENT_MRPCIM_SERR:
605 goto out;
606 case VXGE_HW_EVENT_SLOT_FREEZE:
607 break;
608 default:
609 vxge_assert(0);
610 goto out;
611 }
612
613 /* notify driver */
614 if (hldev->uld_callbacks.crit_err)
615 hldev->uld_callbacks.crit_err(
616 (struct __vxge_hw_device *)hldev,
617 type, vp_id);
618out:
619
620 return VXGE_HW_OK;
621}
622
623/**
624 * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
625 * condition that has caused the Tx and RX interrupt.
626 * @hldev: HW device.
627 *
628 * Acknowledge (that is, clear) the condition that has caused
629 * the Tx and Rx interrupt.
630 * See also: vxge_hw_device_begin_irq(),
631 * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
632 */
633void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
634{
635
636 if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
637 (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
638 writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
639 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
640 &hldev->common_reg->tim_int_status0);
641 }
642
643 if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
644 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
645 __vxge_hw_pio_mem_write32_upper(
646 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
647 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
648 &hldev->common_reg->tim_int_status1);
649 }
650
651 return;
652}
653
654/*
655 * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
656 * @channel: Channel
657 * @dtrh: Buffer to return the DTR pointer
658 *
659 * Allocates a dtr from the reserve array. If the reserve array is empty,
660 * it swaps the reserve and free arrays.
661 *
662 */
663enum vxge_hw_status
664vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
665{
666 void **tmp_arr;
667
668 if (channel->reserve_ptr - channel->reserve_top > 0) {
669_alloc_after_swap:
670 *dtrh = channel->reserve_arr[--channel->reserve_ptr];
671
672 return VXGE_HW_OK;
673 }
674
675 /* switch between empty and full arrays */
676
677 /* the idea behind such a design is that by having free and reserved
678 * arrays separated we basically separated irq and non-irq parts.
679 * i.e. no additional lock need to be done when we free a resource */
680
681 if (channel->length - channel->free_ptr > 0) {
682
683 tmp_arr = channel->reserve_arr;
684 channel->reserve_arr = channel->free_arr;
685 channel->free_arr = tmp_arr;
686 channel->reserve_ptr = channel->length;
687 channel->reserve_top = channel->free_ptr;
688 channel->free_ptr = channel->length;
689
690 channel->stats->reserve_free_swaps_cnt++;
691
692 goto _alloc_after_swap;
693 }
694
695 channel->stats->full_cnt++;
696
697 *dtrh = NULL;
698 return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
699}
700
701/*
702 * vxge_hw_channel_dtr_post - Post a dtr to the channel
703 * @channelh: Channel
704 * @dtrh: DTR pointer
705 *
706 * Posts a dtr to work array.
707 *
708 */
709void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
710{
711 vxge_assert(channel->work_arr[channel->post_index] == NULL);
712
713 channel->work_arr[channel->post_index++] = dtrh;
714
715 /* wrap-around */
716 if (channel->post_index == channel->length)
717 channel->post_index = 0;
718}
719
720/*
721 * vxge_hw_channel_dtr_try_complete - Returns next completed dtr
722 * @channel: Channel
723 * @dtr: Buffer to return the next completed DTR pointer
724 *
725 * Returns the next completed dtr with out removing it from work array
726 *
727 */
728void
729vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh)
730{
731 vxge_assert(channel->compl_index < channel->length);
732
733 *dtrh = channel->work_arr[channel->compl_index];
734}
735
736/*
737 * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array
738 * @channel: Channel handle
739 *
740 * Removes the next completed dtr from work array
741 *
742 */
743void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel)
744{
745 channel->work_arr[channel->compl_index] = NULL;
746
747 /* wrap-around */
748 if (++channel->compl_index == channel->length)
749 channel->compl_index = 0;
750
751 channel->stats->total_compl_cnt++;
752}
753
754/*
755 * vxge_hw_channel_dtr_free - Frees a dtr
756 * @channel: Channel handle
757 * @dtr: DTR pointer
758 *
759 * Returns the dtr to free array
760 *
761 */
762void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh)
763{
764 channel->free_arr[--channel->free_ptr] = dtrh;
765}
766
767/*
768 * vxge_hw_channel_dtr_count
769 * @channel: Channel handle. Obtained via vxge_hw_channel_open().
770 *
771 * Retreive number of DTRs available. This function can not be called
772 * from data path. ring_initial_replenishi() is the only user.
773 */
774int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel)
775{
776 return (channel->reserve_ptr - channel->reserve_top) +
777 (channel->length - channel->free_ptr);
778}
779
780/**
781 * vxge_hw_ring_rxd_reserve - Reserve ring descriptor.
782 * @ring: Handle to the ring object used for receive
783 * @rxdh: Reserved descriptor. On success HW fills this "out" parameter
784 * with a valid handle.
785 *
786 * Reserve Rx descriptor for the subsequent filling-in driver
787 * and posting on the corresponding channel (@channelh)
788 * via vxge_hw_ring_rxd_post().
789 *
790 * Returns: VXGE_HW_OK - success.
791 * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
792 *
793 */
794enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring,
795 void **rxdh)
796{
797 enum vxge_hw_status status;
798 struct __vxge_hw_channel *channel;
799
800 channel = &ring->channel;
801
802 status = vxge_hw_channel_dtr_alloc(channel, rxdh);
803
804 if (status == VXGE_HW_OK) {
805 struct vxge_hw_ring_rxd_1 *rxdp =
806 (struct vxge_hw_ring_rxd_1 *)*rxdh;
807
808 rxdp->control_0 = rxdp->control_1 = 0;
809 }
810
811 return status;
812}
813
814/**
815 * vxge_hw_ring_rxd_free - Free descriptor.
816 * @ring: Handle to the ring object used for receive
817 * @rxdh: Descriptor handle.
818 *
819 * Free the reserved descriptor. This operation is "symmetrical" to
820 * vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's
821 * lifecycle.
822 *
823 * After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can
824 * be:
825 *
826 * - reserved (vxge_hw_ring_rxd_reserve);
827 *
828 * - posted (vxge_hw_ring_rxd_post);
829 *
830 * - completed (vxge_hw_ring_rxd_next_completed);
831 *
832 * - and recycled again (vxge_hw_ring_rxd_free).
833 *
834 * For alternative state transitions and more details please refer to
835 * the design doc.
836 *
837 */
838void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh)
839{
840 struct __vxge_hw_channel *channel;
841
842 channel = &ring->channel;
843
844 vxge_hw_channel_dtr_free(channel, rxdh);
845
846}
847
848/**
849 * vxge_hw_ring_rxd_pre_post - Prepare rxd and post
850 * @ring: Handle to the ring object used for receive
851 * @rxdh: Descriptor handle.
852 *
853 * This routine prepares a rxd and posts
854 */
855void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh)
856{
857 struct __vxge_hw_channel *channel;
858
859 channel = &ring->channel;
860
861 vxge_hw_channel_dtr_post(channel, rxdh);
862}
863
864/**
865 * vxge_hw_ring_rxd_post_post - Process rxd after post.
866 * @ring: Handle to the ring object used for receive
867 * @rxdh: Descriptor handle.
868 *
869 * Processes rxd after post
870 */
871void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
872{
873 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
874 struct __vxge_hw_channel *channel;
875
876 channel = &ring->channel;
877
878 rxdp->control_0 |= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
879
880 if (ring->stats->common_stats.usage_cnt > 0)
881 ring->stats->common_stats.usage_cnt--;
882}
883
884/**
885 * vxge_hw_ring_rxd_post - Post descriptor on the ring.
886 * @ring: Handle to the ring object used for receive
887 * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
888 *
889 * Post descriptor on the ring.
890 * Prior to posting the descriptor should be filled in accordance with
891 * Host/Titan interface specification for a given service (LL, etc.).
892 *
893 */
894void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
895{
896 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
897 struct __vxge_hw_channel *channel;
898
899 channel = &ring->channel;
900
901 wmb();
902 rxdp->control_0 |= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
903
904 vxge_hw_channel_dtr_post(channel, rxdh);
905
906 if (ring->stats->common_stats.usage_cnt > 0)
907 ring->stats->common_stats.usage_cnt--;
908}
909
910/**
911 * vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier.
912 * @ring: Handle to the ring object used for receive
913 * @rxdh: Descriptor handle.
914 *
915 * Processes rxd after post with memory barrier.
916 */
917void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
918{
919 struct __vxge_hw_channel *channel;
920
921 channel = &ring->channel;
922
923 wmb();
924 vxge_hw_ring_rxd_post_post(ring, rxdh);
925}
926
927/**
928 * vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor.
929 * @ring: Handle to the ring object used for receive
930 * @rxdh: Descriptor handle. Returned by HW.
931 * @t_code: Transfer code, as per Titan User Guide,
932 * Receive Descriptor Format. Returned by HW.
933 *
934 * Retrieve the _next_ completed descriptor.
935 * HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy
936 * driver of new completed descriptors. After that
937 * the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest
938 * completions (the very first completion is passed by HW via
939 * vxge_hw_ring_callback_f).
940 *
941 * Implementation-wise, the driver is free to call
942 * vxge_hw_ring_rxd_next_completed either immediately from inside the
943 * ring callback, or in a deferred fashion and separate (from HW)
944 * context.
945 *
946 * Non-zero @t_code means failure to fill-in receive buffer(s)
947 * of the descriptor.
948 * For instance, parity error detected during the data transfer.
949 * In this case Titan will complete the descriptor and indicate
950 * for the host that the received data is not to be used.
951 * For details please refer to Titan User Guide.
952 *
953 * Returns: VXGE_HW_OK - success.
954 * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
955 * are currently available for processing.
956 *
957 * See also: vxge_hw_ring_callback_f{},
958 * vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}.
959 */
960enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
961 struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code)
962{
963 struct __vxge_hw_channel *channel;
964 struct vxge_hw_ring_rxd_1 *rxdp;
965 enum vxge_hw_status status = VXGE_HW_OK;
966
967 channel = &ring->channel;
968
969 vxge_hw_channel_dtr_try_complete(channel, rxdh);
970
971 rxdp = (struct vxge_hw_ring_rxd_1 *)*rxdh;
972 if (rxdp == NULL) {
973 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
974 goto exit;
975 }
976
977 /* check whether it is not the end */
978 if (!(rxdp->control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER)) {
979
980 vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control !=
981 0);
982
983 ++ring->cmpl_cnt;
984 vxge_hw_channel_dtr_complete(channel);
985
986 *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(rxdp->control_0);
987
988 vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
989
990 ring->stats->common_stats.usage_cnt++;
991 if (ring->stats->common_stats.usage_max <
992 ring->stats->common_stats.usage_cnt)
993 ring->stats->common_stats.usage_max =
994 ring->stats->common_stats.usage_cnt;
995
996 status = VXGE_HW_OK;
997 goto exit;
998 }
999
1000 /* reset it. since we don't want to return
1001 * garbage to the driver */
1002 *rxdh = NULL;
1003 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1004exit:
1005 return status;
1006}
1007
1008/**
1009 * vxge_hw_ring_handle_tcode - Handle transfer code.
1010 * @ring: Handle to the ring object used for receive
1011 * @rxdh: Descriptor handle.
1012 * @t_code: One of the enumerated (and documented in the Titan user guide)
1013 * "transfer codes".
1014 *
1015 * Handle descriptor's transfer code. The latter comes with each completed
1016 * descriptor.
1017 *
1018 * Returns: one of the enum vxge_hw_status{} enumerated types.
1019 * VXGE_HW_OK - for success.
1020 * VXGE_HW_ERR_CRITICAL - when encounters critical error.
1021 */
1022enum vxge_hw_status vxge_hw_ring_handle_tcode(
1023 struct __vxge_hw_ring *ring, void *rxdh, u8 t_code)
1024{
1025 struct __vxge_hw_channel *channel;
1026 enum vxge_hw_status status = VXGE_HW_OK;
1027
1028 channel = &ring->channel;
1029
1030 /* If the t_code is not supported and if the
1031 * t_code is other than 0x5 (unparseable packet
1032 * such as unknown UPV6 header), Drop it !!!
1033 */
1034
1035 if (t_code == 0 || t_code == 5) {
1036 status = VXGE_HW_OK;
1037 goto exit;
1038 }
1039
1040 if (t_code > 0xF) {
1041 status = VXGE_HW_ERR_INVALID_TCODE;
1042 goto exit;
1043 }
1044
1045 ring->stats->rxd_t_code_err_cnt[t_code]++;
1046exit:
1047 return status;
1048}
1049
1050/**
1051 * __vxge_hw_non_offload_db_post - Post non offload doorbell
1052 *
1053 * @fifo: fifohandle
1054 * @txdl_ptr: The starting location of the TxDL in host memory
1055 * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
1056 * @no_snoop: No snoop flags
1057 *
1058 * This function posts a non-offload doorbell to doorbell FIFO
1059 *
1060 */
1061static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
1062 u64 txdl_ptr, u32 num_txds, u32 no_snoop)
1063{
1064 struct __vxge_hw_channel *channel;
1065
1066 channel = &fifo->channel;
1067
1068 writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
1069 VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) |
1070 VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
1071 &fifo->nofl_db->control_0);
1072
1073 wmb();
1074
1075 writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
1076 wmb();
1077
1078}
1079
1080/**
1081 * vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in
1082 * the fifo
1083 * @fifoh: Handle to the fifo object used for non offload send
1084 */
1085u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh)
1086{
1087 return vxge_hw_channel_dtr_count(&fifoh->channel);
1088}
1089
1090/**
1091 * vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor.
1092 * @fifoh: Handle to the fifo object used for non offload send
1093 * @txdlh: Reserved descriptor. On success HW fills this "out" parameter
1094 * with a valid handle.
1095 * @txdl_priv: Buffer to return the pointer to per txdl space
1096 *
1097 * Reserve a single TxDL (that is, fifo descriptor)
1098 * for the subsequent filling-in by driver)
1099 * and posting on the corresponding channel (@channelh)
1100 * via vxge_hw_fifo_txdl_post().
1101 *
1102 * Note: it is the responsibility of driver to reserve multiple descriptors
1103 * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
1104 * carries up to configured number (fifo.max_frags) of contiguous buffers.
1105 *
1106 * Returns: VXGE_HW_OK - success;
1107 * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
1108 *
1109 */
1110enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
1111 struct __vxge_hw_fifo *fifo,
1112 void **txdlh, void **txdl_priv)
1113{
1114 struct __vxge_hw_channel *channel;
1115 enum vxge_hw_status status;
1116 int i;
1117
1118 channel = &fifo->channel;
1119
1120 status = vxge_hw_channel_dtr_alloc(channel, txdlh);
1121
1122 if (status == VXGE_HW_OK) {
1123 struct vxge_hw_fifo_txd *txdp =
1124 (struct vxge_hw_fifo_txd *)*txdlh;
1125 struct __vxge_hw_fifo_txdl_priv *priv;
1126
1127 priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
1128
1129 /* reset the TxDL's private */
1130 priv->align_dma_offset = 0;
1131 priv->align_vaddr_start = priv->align_vaddr;
1132 priv->align_used_frags = 0;
1133 priv->frags = 0;
1134 priv->alloc_frags = fifo->config->max_frags;
1135 priv->next_txdl_priv = NULL;
1136
1137 *txdl_priv = (void *)(size_t)txdp->host_control;
1138
1139 for (i = 0; i < fifo->config->max_frags; i++) {
1140 txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i;
1141 txdp->control_0 = txdp->control_1 = 0;
1142 }
1143 }
1144
1145 return status;
1146}
1147
1148/**
1149 * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
1150 * descriptor.
1151 * @fifo: Handle to the fifo object used for non offload send
1152 * @txdlh: Descriptor handle.
1153 * @frag_idx: Index of the data buffer in the caller's scatter-gather list
1154 * (of buffers).
1155 * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
1156 * @size: Size of the data buffer (in bytes).
1157 *
1158 * This API is part of the preparation of the transmit descriptor for posting
1159 * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
1160 * vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits().
1161 * All three APIs fill in the fields of the fifo descriptor,
1162 * in accordance with the Titan specification.
1163 *
1164 */
1165void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
1166 void *txdlh, u32 frag_idx,
1167 dma_addr_t dma_pointer, u32 size)
1168{
1169 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1170 struct vxge_hw_fifo_txd *txdp, *txdp_last;
1171 struct __vxge_hw_channel *channel;
1172
1173 channel = &fifo->channel;
1174
1175 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1176 txdp = (struct vxge_hw_fifo_txd *)txdlh + txdl_priv->frags;
1177
1178 if (frag_idx != 0)
1179 txdp->control_0 = txdp->control_1 = 0;
1180 else {
1181 txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1182 VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST);
1183 txdp->control_1 |= fifo->interrupt_type;
1184 txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER(
1185 fifo->tx_intr_num);
1186 if (txdl_priv->frags) {
1187 txdp_last = (struct vxge_hw_fifo_txd *)txdlh +
1188 (txdl_priv->frags - 1);
1189 txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1190 VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1191 }
1192 }
1193
1194 vxge_assert(frag_idx < txdl_priv->alloc_frags);
1195
1196 txdp->buffer_pointer = (u64)dma_pointer;
1197 txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size);
1198 fifo->stats->total_buffers++;
1199 txdl_priv->frags++;
1200}
1201
1202/**
1203 * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
1204 * @fifo: Handle to the fifo object used for non offload send
1205 * @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve()
1206 * @frags: Number of contiguous buffers that are part of a single
1207 * transmit operation.
1208 *
1209 * Post descriptor on the 'fifo' type channel for transmission.
1210 * Prior to posting the descriptor should be filled in accordance with
1211 * Host/Titan interface specification for a given service (LL, etc.).
1212 *
1213 */
1214void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
1215{
1216 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1217 struct vxge_hw_fifo_txd *txdp_last;
1218 struct vxge_hw_fifo_txd *txdp_first;
1219 struct __vxge_hw_channel *channel;
1220
1221 channel = &fifo->channel;
1222
1223 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1224 txdp_first = (struct vxge_hw_fifo_txd *)txdlh;
1225
1226 txdp_last = (struct vxge_hw_fifo_txd *)txdlh + (txdl_priv->frags - 1);
1227 txdp_last->control_0 |=
1228 VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1229 txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
1230
1231 vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
1232
1233 __vxge_hw_non_offload_db_post(fifo,
1234 (u64)(size_t)txdl_priv->dma_addr,
1235 txdl_priv->frags - 1,
1236 fifo->no_snoop_bits);
1237
1238 fifo->stats->total_posts++;
1239 fifo->stats->common_stats.usage_cnt++;
1240 if (fifo->stats->common_stats.usage_max <
1241 fifo->stats->common_stats.usage_cnt)
1242 fifo->stats->common_stats.usage_max =
1243 fifo->stats->common_stats.usage_cnt;
1244}
1245
1246/**
1247 * vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor.
1248 * @fifo: Handle to the fifo object used for non offload send
1249 * @txdlh: Descriptor handle. Returned by HW.
1250 * @t_code: Transfer code, as per Titan User Guide,
1251 * Transmit Descriptor Format.
1252 * Returned by HW.
1253 *
1254 * Retrieve the _next_ completed descriptor.
1255 * HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy
1256 * driver of new completed descriptors. After that
1257 * the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest
1258 * completions (the very first completion is passed by HW via
1259 * vxge_hw_channel_callback_f).
1260 *
1261 * Implementation-wise, the driver is free to call
1262 * vxge_hw_fifo_txdl_next_completed either immediately from inside the
1263 * channel callback, or in a deferred fashion and separate (from HW)
1264 * context.
1265 *
1266 * Non-zero @t_code means failure to process the descriptor.
1267 * The failure could happen, for instance, when the link is
1268 * down, in which case Titan completes the descriptor because it
1269 * is not able to send the data out.
1270 *
1271 * For details please refer to Titan User Guide.
1272 *
1273 * Returns: VXGE_HW_OK - success.
1274 * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1275 * are currently available for processing.
1276 *
1277 */
1278enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
1279 struct __vxge_hw_fifo *fifo, void **txdlh,
1280 enum vxge_hw_fifo_tcode *t_code)
1281{
1282 struct __vxge_hw_channel *channel;
1283 struct vxge_hw_fifo_txd *txdp;
1284 enum vxge_hw_status status = VXGE_HW_OK;
1285
1286 channel = &fifo->channel;
1287
1288 vxge_hw_channel_dtr_try_complete(channel, txdlh);
1289
1290 txdp = (struct vxge_hw_fifo_txd *)*txdlh;
1291 if (txdp == NULL) {
1292 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1293 goto exit;
1294 }
1295
1296 /* check whether host owns it */
1297 if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) {
1298
1299 vxge_assert(txdp->host_control != 0);
1300
1301 vxge_hw_channel_dtr_complete(channel);
1302
1303 *t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0);
1304
1305 if (fifo->stats->common_stats.usage_cnt > 0)
1306 fifo->stats->common_stats.usage_cnt--;
1307
1308 status = VXGE_HW_OK;
1309 goto exit;
1310 }
1311
1312 /* no more completions */
1313 *txdlh = NULL;
1314 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1315exit:
1316 return status;
1317}
1318
1319/**
1320 * vxge_hw_fifo_handle_tcode - Handle transfer code.
1321 * @fifo: Handle to the fifo object used for non offload send
1322 * @txdlh: Descriptor handle.
1323 * @t_code: One of the enumerated (and documented in the Titan user guide)
1324 * "transfer codes".
1325 *
1326 * Handle descriptor's transfer code. The latter comes with each completed
1327 * descriptor.
1328 *
1329 * Returns: one of the enum vxge_hw_status{} enumerated types.
1330 * VXGE_HW_OK - for success.
1331 * VXGE_HW_ERR_CRITICAL - when encounters critical error.
1332 */
1333enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo,
1334 void *txdlh,
1335 enum vxge_hw_fifo_tcode t_code)
1336{
1337 struct __vxge_hw_channel *channel;
1338
1339 enum vxge_hw_status status = VXGE_HW_OK;
1340 channel = &fifo->channel;
1341
1342 if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) {
1343 status = VXGE_HW_ERR_INVALID_TCODE;
1344 goto exit;
1345 }
1346
1347 fifo->stats->txd_t_code_err_cnt[t_code]++;
1348exit:
1349 return status;
1350}
1351
1352/**
1353 * vxge_hw_fifo_txdl_free - Free descriptor.
1354 * @fifo: Handle to the fifo object used for non offload send
1355 * @txdlh: Descriptor handle.
1356 *
1357 * Free the reserved descriptor. This operation is "symmetrical" to
1358 * vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's
1359 * lifecycle.
1360 *
1361 * After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can
1362 * be:
1363 *
1364 * - reserved (vxge_hw_fifo_txdl_reserve);
1365 *
1366 * - posted (vxge_hw_fifo_txdl_post);
1367 *
1368 * - completed (vxge_hw_fifo_txdl_next_completed);
1369 *
1370 * - and recycled again (vxge_hw_fifo_txdl_free).
1371 *
1372 * For alternative state transitions and more details please refer to
1373 * the design doc.
1374 *
1375 */
1376void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
1377{
1378 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1379 u32 max_frags;
1380 struct __vxge_hw_channel *channel;
1381
1382 channel = &fifo->channel;
1383
1384 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo,
1385 (struct vxge_hw_fifo_txd *)txdlh);
1386
1387 max_frags = fifo->config->max_frags;
1388
1389 vxge_hw_channel_dtr_free(channel, txdlh);
1390}
1391
1392/**
1393 * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath
1394 * to MAC address table.
1395 * @vp: Vpath handle.
1396 * @macaddr: MAC address to be added for this vpath into the list
1397 * @macaddr_mask: MAC address mask for macaddr
1398 * @duplicate_mode: Duplicate MAC address add mode. Please see
1399 * enum vxge_hw_vpath_mac_addr_add_mode{}
1400 *
1401 * Adds the given mac address and mac address mask into the list for this
1402 * vpath.
1403 * see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and
1404 * vxge_hw_vpath_mac_addr_get_next
1405 *
1406 */
1407enum vxge_hw_status
1408vxge_hw_vpath_mac_addr_add(
1409 struct __vxge_hw_vpath_handle *vp,
1410 u8 (macaddr)[ETH_ALEN],
1411 u8 (macaddr_mask)[ETH_ALEN],
1412 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
1413{
1414 u32 i;
1415 u64 data1 = 0ULL;
1416 u64 data2 = 0ULL;
1417 enum vxge_hw_status status = VXGE_HW_OK;
1418
1419 if (vp == NULL) {
1420 status = VXGE_HW_ERR_INVALID_HANDLE;
1421 goto exit;
1422 }
1423
1424 for (i = 0; i < ETH_ALEN; i++) {
1425 data1 <<= 8;
1426 data1 |= (u8)macaddr[i];
1427
1428 data2 <<= 8;
1429 data2 |= (u8)macaddr_mask[i];
1430 }
1431
1432 switch (duplicate_mode) {
1433 case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE:
1434 i = 0;
1435 break;
1436 case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE:
1437 i = 1;
1438 break;
1439 case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE:
1440 i = 2;
1441 break;
1442 default:
1443 i = 0;
1444 break;
1445 }
1446
1447 status = __vxge_hw_vpath_rts_table_set(vp,
1448 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1449 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1450 0,
1451 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1452 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)|
1453 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i));
1454exit:
1455 return status;
1456}
1457
1458/**
1459 * vxge_hw_vpath_mac_addr_get - Get the first mac address entry for this vpath
1460 * from MAC address table.
1461 * @vp: Vpath handle.
1462 * @macaddr: First MAC address entry for this vpath in the list
1463 * @macaddr_mask: MAC address mask for macaddr
1464 *
1465 * Returns the first mac address and mac address mask in the list for this
1466 * vpath.
1467 * see also: vxge_hw_vpath_mac_addr_get_next
1468 *
1469 */
1470enum vxge_hw_status
1471vxge_hw_vpath_mac_addr_get(
1472 struct __vxge_hw_vpath_handle *vp,
1473 u8 (macaddr)[ETH_ALEN],
1474 u8 (macaddr_mask)[ETH_ALEN])
1475{
1476 u32 i;
1477 u64 data1 = 0ULL;
1478 u64 data2 = 0ULL;
1479 enum vxge_hw_status status = VXGE_HW_OK;
1480
1481 if (vp == NULL) {
1482 status = VXGE_HW_ERR_INVALID_HANDLE;
1483 goto exit;
1484 }
1485
1486 status = __vxge_hw_vpath_rts_table_get(vp,
1487 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
1488 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1489 0, &data1, &data2);
1490
1491 if (status != VXGE_HW_OK)
1492 goto exit;
1493
1494 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1495
1496 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1497
1498 for (i = ETH_ALEN; i > 0; i--) {
1499 macaddr[i-1] = (u8)(data1 & 0xFF);
1500 data1 >>= 8;
1501
1502 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1503 data2 >>= 8;
1504 }
1505exit:
1506 return status;
1507}
1508
1509/**
1510 * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry for this
1511 * vpath
1512 * from MAC address table.
1513 * @vp: Vpath handle.
1514 * @macaddr: Next MAC address entry for this vpath in the list
1515 * @macaddr_mask: MAC address mask for macaddr
1516 *
1517 * Returns the next mac address and mac address mask in the list for this
1518 * vpath.
1519 * see also: vxge_hw_vpath_mac_addr_get
1520 *
1521 */
1522enum vxge_hw_status
1523vxge_hw_vpath_mac_addr_get_next(
1524 struct __vxge_hw_vpath_handle *vp,
1525 u8 (macaddr)[ETH_ALEN],
1526 u8 (macaddr_mask)[ETH_ALEN])
1527{
1528 u32 i;
1529 u64 data1 = 0ULL;
1530 u64 data2 = 0ULL;
1531 enum vxge_hw_status status = VXGE_HW_OK;
1532
1533 if (vp == NULL) {
1534 status = VXGE_HW_ERR_INVALID_HANDLE;
1535 goto exit;
1536 }
1537
1538 status = __vxge_hw_vpath_rts_table_get(vp,
1539 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
1540 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1541 0, &data1, &data2);
1542
1543 if (status != VXGE_HW_OK)
1544 goto exit;
1545
1546 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1547
1548 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1549
1550 for (i = ETH_ALEN; i > 0; i--) {
1551 macaddr[i-1] = (u8)(data1 & 0xFF);
1552 data1 >>= 8;
1553
1554 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1555 data2 >>= 8;
1556 }
1557
1558exit:
1559 return status;
1560}
1561
1562/**
1563 * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath
1564 * to MAC address table.
1565 * @vp: Vpath handle.
1566 * @macaddr: MAC address to be added for this vpath into the list
1567 * @macaddr_mask: MAC address mask for macaddr
1568 *
1569 * Delete the given mac address and mac address mask into the list for this
1570 * vpath.
1571 * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and
1572 * vxge_hw_vpath_mac_addr_get_next
1573 *
1574 */
1575enum vxge_hw_status
1576vxge_hw_vpath_mac_addr_delete(
1577 struct __vxge_hw_vpath_handle *vp,
1578 u8 (macaddr)[ETH_ALEN],
1579 u8 (macaddr_mask)[ETH_ALEN])
1580{
1581 u32 i;
1582 u64 data1 = 0ULL;
1583 u64 data2 = 0ULL;
1584 enum vxge_hw_status status = VXGE_HW_OK;
1585
1586 if (vp == NULL) {
1587 status = VXGE_HW_ERR_INVALID_HANDLE;
1588 goto exit;
1589 }
1590
1591 for (i = 0; i < ETH_ALEN; i++) {
1592 data1 <<= 8;
1593 data1 |= (u8)macaddr[i];
1594
1595 data2 <<= 8;
1596 data2 |= (u8)macaddr_mask[i];
1597 }
1598
1599 status = __vxge_hw_vpath_rts_table_set(vp,
1600 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1601 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1602 0,
1603 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1604 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
1605exit:
1606 return status;
1607}
1608
1609/**
1610 * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath
1611 * to vlan id table.
1612 * @vp: Vpath handle.
1613 * @vid: vlan id to be added for this vpath into the list
1614 *
1615 * Adds the given vlan id into the list for this vpath.
1616 * see also: vxge_hw_vpath_vid_delete, vxge_hw_vpath_vid_get and
1617 * vxge_hw_vpath_vid_get_next
1618 *
1619 */
1620enum vxge_hw_status
1621vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
1622{
1623 enum vxge_hw_status status = VXGE_HW_OK;
1624
1625 if (vp == NULL) {
1626 status = VXGE_HW_ERR_INVALID_HANDLE;
1627 goto exit;
1628 }
1629
1630 status = __vxge_hw_vpath_rts_table_set(vp,
1631 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1632 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1633 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1634exit:
1635 return status;
1636}
1637
1638/**
1639 * vxge_hw_vpath_vid_get - Get the first vid entry for this vpath
1640 * from vlan id table.
1641 * @vp: Vpath handle.
1642 * @vid: Buffer to return vlan id
1643 *
1644 * Returns the first vlan id in the list for this vpath.
1645 * see also: vxge_hw_vpath_vid_get_next
1646 *
1647 */
1648enum vxge_hw_status
1649vxge_hw_vpath_vid_get(struct __vxge_hw_vpath_handle *vp, u64 *vid)
1650{
1651 u64 data;
1652 enum vxge_hw_status status = VXGE_HW_OK;
1653
1654 if (vp == NULL) {
1655 status = VXGE_HW_ERR_INVALID_HANDLE;
1656 goto exit;
1657 }
1658
1659 status = __vxge_hw_vpath_rts_table_get(vp,
1660 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
1661 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1662 0, vid, &data);
1663
1664 *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
1665exit:
1666 return status;
1667}
1668
1669/**
1670 * vxge_hw_vpath_vid_get_next - Get the next vid entry for this vpath
1671 * from vlan id table.
1672 * @vp: Vpath handle.
1673 * @vid: Buffer to return vlan id
1674 *
1675 * Returns the next vlan id in the list for this vpath.
1676 * see also: vxge_hw_vpath_vid_get
1677 *
1678 */
1679enum vxge_hw_status
1680vxge_hw_vpath_vid_get_next(struct __vxge_hw_vpath_handle *vp, u64 *vid)
1681{
1682 u64 data;
1683 enum vxge_hw_status status = VXGE_HW_OK;
1684
1685 if (vp == NULL) {
1686 status = VXGE_HW_ERR_INVALID_HANDLE;
1687 goto exit;
1688 }
1689
1690 status = __vxge_hw_vpath_rts_table_get(vp,
1691 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
1692 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1693 0, vid, &data);
1694
1695 *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
1696exit:
1697 return status;
1698}
1699
1700/**
1701 * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
1702 * to vlan id table.
1703 * @vp: Vpath handle.
1704 * @vid: vlan id to be added for this vpath into the list
1705 *
1706 * Adds the given vlan id into the list for this vpath.
1707 * see also: vxge_hw_vpath_vid_add, vxge_hw_vpath_vid_get and
1708 * vxge_hw_vpath_vid_get_next
1709 *
1710 */
1711enum vxge_hw_status
1712vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
1713{
1714 enum vxge_hw_status status = VXGE_HW_OK;
1715
1716 if (vp == NULL) {
1717 status = VXGE_HW_ERR_INVALID_HANDLE;
1718 goto exit;
1719 }
1720
1721 status = __vxge_hw_vpath_rts_table_set(vp,
1722 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1723 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1724 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1725exit:
1726 return status;
1727}
1728
1729/**
1730 * vxge_hw_vpath_promisc_enable - Enable promiscuous mode.
1731 * @vp: Vpath handle.
1732 *
1733 * Enable promiscuous mode of Titan-e operation.
1734 *
1735 * See also: vxge_hw_vpath_promisc_disable().
1736 */
1737enum vxge_hw_status vxge_hw_vpath_promisc_enable(
1738 struct __vxge_hw_vpath_handle *vp)
1739{
1740 u64 val64;
1741 struct __vxge_hw_virtualpath *vpath;
1742 enum vxge_hw_status status = VXGE_HW_OK;
1743
1744 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1745 status = VXGE_HW_ERR_INVALID_HANDLE;
1746 goto exit;
1747 }
1748
1749 vpath = vp->vpath;
1750
1751 /* Enable promiscous mode for function 0 only */
1752 if (!(vpath->hldev->access_rights &
1753 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
1754 return VXGE_HW_OK;
1755
1756 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1757
1758 if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
1759
1760 val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
1761 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
1762 VXGE_HW_RXMAC_VCFG0_BCAST_EN |
1763 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
1764
1765 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1766 }
1767exit:
1768 return status;
1769}
1770
1771/**
1772 * vxge_hw_vpath_promisc_disable - Disable promiscuous mode.
1773 * @vp: Vpath handle.
1774 *
1775 * Disable promiscuous mode of Titan-e operation.
1776 *
1777 * See also: vxge_hw_vpath_promisc_enable().
1778 */
1779enum vxge_hw_status vxge_hw_vpath_promisc_disable(
1780 struct __vxge_hw_vpath_handle *vp)
1781{
1782 u64 val64;
1783 struct __vxge_hw_virtualpath *vpath;
1784 enum vxge_hw_status status = VXGE_HW_OK;
1785
1786 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1787 status = VXGE_HW_ERR_INVALID_HANDLE;
1788 goto exit;
1789 }
1790
1791 vpath = vp->vpath;
1792
1793 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1794
1795 if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
1796
1797 val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
1798 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
1799 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
1800
1801 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1802 }
1803exit:
1804 return status;
1805}
1806
1807/*
1808 * vxge_hw_vpath_bcast_enable - Enable broadcast
1809 * @vp: Vpath handle.
1810 *
1811 * Enable receiving broadcasts.
1812 */
1813enum vxge_hw_status vxge_hw_vpath_bcast_enable(
1814 struct __vxge_hw_vpath_handle *vp)
1815{
1816 u64 val64;
1817 struct __vxge_hw_virtualpath *vpath;
1818 enum vxge_hw_status status = VXGE_HW_OK;
1819
1820 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1821 status = VXGE_HW_ERR_INVALID_HANDLE;
1822 goto exit;
1823 }
1824
1825 vpath = vp->vpath;
1826
1827 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1828
1829 if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
1830 val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
1831 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1832 }
1833exit:
1834 return status;
1835}
1836
1837/**
1838 * vxge_hw_vpath_mcast_enable - Enable multicast addresses.
1839 * @vp: Vpath handle.
1840 *
1841 * Enable Titan-e multicast addresses.
1842 * Returns: VXGE_HW_OK on success.
1843 *
1844 */
1845enum vxge_hw_status vxge_hw_vpath_mcast_enable(
1846 struct __vxge_hw_vpath_handle *vp)
1847{
1848 u64 val64;
1849 struct __vxge_hw_virtualpath *vpath;
1850 enum vxge_hw_status status = VXGE_HW_OK;
1851
1852 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1853 status = VXGE_HW_ERR_INVALID_HANDLE;
1854 goto exit;
1855 }
1856
1857 vpath = vp->vpath;
1858
1859 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1860
1861 if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
1862 val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
1863 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1864 }
1865exit:
1866 return status;
1867}
1868
1869/**
1870 * vxge_hw_vpath_mcast_disable - Disable multicast addresses.
1871 * @vp: Vpath handle.
1872 *
1873 * Disable Titan-e multicast addresses.
1874 * Returns: VXGE_HW_OK - success.
1875 * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle
1876 *
1877 */
1878enum vxge_hw_status
1879vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
1880{
1881 u64 val64;
1882 struct __vxge_hw_virtualpath *vpath;
1883 enum vxge_hw_status status = VXGE_HW_OK;
1884
1885 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1886 status = VXGE_HW_ERR_INVALID_HANDLE;
1887 goto exit;
1888 }
1889
1890 vpath = vp->vpath;
1891
1892 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1893
1894 if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
1895 val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
1896 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1897 }
1898exit:
1899 return status;
1900}
1901
1902/*
1903 * __vxge_hw_vpath_alarm_process - Process Alarms.
1904 * @vpath: Virtual Path.
1905 * @skip_alarms: Do not clear the alarms
1906 *
1907 * Process vpath alarms.
1908 *
1909 */
1910enum vxge_hw_status __vxge_hw_vpath_alarm_process(
1911 struct __vxge_hw_virtualpath *vpath,
1912 u32 skip_alarms)
1913{
1914 u64 val64;
1915 u64 alarm_status;
1916 u64 pic_status;
1917 struct __vxge_hw_device *hldev = NULL;
1918 enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
1919 u64 mask64;
1920 struct vxge_hw_vpath_stats_sw_info *sw_stats;
1921 struct vxge_hw_vpath_reg __iomem *vp_reg;
1922
1923 if (vpath == NULL) {
1924 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
1925 alarm_event);
1926 goto out;
1927 }
1928
1929 hldev = vpath->hldev;
1930 vp_reg = vpath->vp_reg;
1931 alarm_status = readq(&vp_reg->vpath_general_int_status);
1932
1933 if (alarm_status == VXGE_HW_ALL_FOXES) {
1934 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
1935 alarm_event);
1936 goto out;
1937 }
1938
1939 sw_stats = vpath->sw_stats;
1940
1941 if (alarm_status & ~(
1942 VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
1943 VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
1944 VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
1945 VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
1946 sw_stats->error_stats.unknown_alarms++;
1947
1948 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
1949 alarm_event);
1950 goto out;
1951 }
1952
1953 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
1954
1955 val64 = readq(&vp_reg->xgmac_vp_int_status);
1956
1957 if (val64 &
1958 VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
1959
1960 val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
1961
1962 if (((val64 &
1963 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
1964 (!(val64 &
1965 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
1966 ((val64 &
1967 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
1968 && (!(val64 &
1969 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
1970 ))) {
1971 sw_stats->error_stats.network_sustained_fault++;
1972
1973 writeq(
1974 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
1975 &vp_reg->asic_ntwk_vp_err_mask);
1976
1977 __vxge_hw_device_handle_link_down_ind(hldev);
1978 alarm_event = VXGE_HW_SET_LEVEL(
1979 VXGE_HW_EVENT_LINK_DOWN, alarm_event);
1980 }
1981
1982 if (((val64 &
1983 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
1984 (!(val64 &
1985 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
1986 ((val64 &
1987 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
1988 && (!(val64 &
1989 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
1990 ))) {
1991
1992 sw_stats->error_stats.network_sustained_ok++;
1993
1994 writeq(
1995 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
1996 &vp_reg->asic_ntwk_vp_err_mask);
1997
1998 __vxge_hw_device_handle_link_up_ind(hldev);
1999 alarm_event = VXGE_HW_SET_LEVEL(
2000 VXGE_HW_EVENT_LINK_UP, alarm_event);
2001 }
2002
2003 writeq(VXGE_HW_INTR_MASK_ALL,
2004 &vp_reg->asic_ntwk_vp_err_reg);
2005
2006 alarm_event = VXGE_HW_SET_LEVEL(
2007 VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
2008
2009 if (skip_alarms)
2010 return VXGE_HW_OK;
2011 }
2012 }
2013
2014 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
2015
2016 pic_status = readq(&vp_reg->vpath_ppif_int_status);
2017
2018 if (pic_status &
2019 VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
2020
2021 val64 = readq(&vp_reg->general_errors_reg);
2022 mask64 = readq(&vp_reg->general_errors_mask);
2023
2024 if ((val64 &
2025 VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
2026 ~mask64) {
2027 sw_stats->error_stats.ini_serr_det++;
2028
2029 alarm_event = VXGE_HW_SET_LEVEL(
2030 VXGE_HW_EVENT_SERR, alarm_event);
2031 }
2032
2033 if ((val64 &
2034 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
2035 ~mask64) {
2036 sw_stats->error_stats.dblgen_fifo0_overflow++;
2037
2038 alarm_event = VXGE_HW_SET_LEVEL(
2039 VXGE_HW_EVENT_FIFO_ERR, alarm_event);
2040 }
2041
2042 if ((val64 &
2043 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
2044 ~mask64)
2045 sw_stats->error_stats.statsb_pif_chain_error++;
2046
2047 if ((val64 &
2048 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
2049 ~mask64)
2050 sw_stats->error_stats.statsb_drop_timeout++;
2051
2052 if ((val64 &
2053 VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
2054 ~mask64)
2055 sw_stats->error_stats.target_illegal_access++;
2056
2057 if (!skip_alarms) {
2058 writeq(VXGE_HW_INTR_MASK_ALL,
2059 &vp_reg->general_errors_reg);
2060 alarm_event = VXGE_HW_SET_LEVEL(
2061 VXGE_HW_EVENT_ALARM_CLEARED,
2062 alarm_event);
2063 }
2064 }
2065
2066 if (pic_status &
2067 VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
2068
2069 val64 = readq(&vp_reg->kdfcctl_errors_reg);
2070 mask64 = readq(&vp_reg->kdfcctl_errors_mask);
2071
2072 if ((val64 &
2073 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
2074 ~mask64) {
2075 sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
2076
2077 alarm_event = VXGE_HW_SET_LEVEL(
2078 VXGE_HW_EVENT_FIFO_ERR,
2079 alarm_event);
2080 }
2081
2082 if ((val64 &
2083 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
2084 ~mask64) {
2085 sw_stats->error_stats.kdfcctl_fifo0_poison++;
2086
2087 alarm_event = VXGE_HW_SET_LEVEL(
2088 VXGE_HW_EVENT_FIFO_ERR,
2089 alarm_event);
2090 }
2091
2092 if ((val64 &
2093 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
2094 ~mask64) {
2095 sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
2096
2097 alarm_event = VXGE_HW_SET_LEVEL(
2098 VXGE_HW_EVENT_FIFO_ERR,
2099 alarm_event);
2100 }
2101
2102 if (!skip_alarms) {
2103 writeq(VXGE_HW_INTR_MASK_ALL,
2104 &vp_reg->kdfcctl_errors_reg);
2105 alarm_event = VXGE_HW_SET_LEVEL(
2106 VXGE_HW_EVENT_ALARM_CLEARED,
2107 alarm_event);
2108 }
2109 }
2110
2111 }
2112
2113 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
2114
2115 val64 = readq(&vp_reg->wrdma_alarm_status);
2116
2117 if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
2118
2119 val64 = readq(&vp_reg->prc_alarm_reg);
2120 mask64 = readq(&vp_reg->prc_alarm_mask);
2121
2122 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
2123 ~mask64)
2124 sw_stats->error_stats.prc_ring_bumps++;
2125
2126 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
2127 ~mask64) {
2128 sw_stats->error_stats.prc_rxdcm_sc_err++;
2129
2130 alarm_event = VXGE_HW_SET_LEVEL(
2131 VXGE_HW_EVENT_VPATH_ERR,
2132 alarm_event);
2133 }
2134
2135 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
2136 & ~mask64) {
2137 sw_stats->error_stats.prc_rxdcm_sc_abort++;
2138
2139 alarm_event = VXGE_HW_SET_LEVEL(
2140 VXGE_HW_EVENT_VPATH_ERR,
2141 alarm_event);
2142 }
2143
2144 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
2145 & ~mask64) {
2146 sw_stats->error_stats.prc_quanta_size_err++;
2147
2148 alarm_event = VXGE_HW_SET_LEVEL(
2149 VXGE_HW_EVENT_VPATH_ERR,
2150 alarm_event);
2151 }
2152
2153 if (!skip_alarms) {
2154 writeq(VXGE_HW_INTR_MASK_ALL,
2155 &vp_reg->prc_alarm_reg);
2156 alarm_event = VXGE_HW_SET_LEVEL(
2157 VXGE_HW_EVENT_ALARM_CLEARED,
2158 alarm_event);
2159 }
2160 }
2161 }
2162out:
2163 hldev->stats.sw_dev_err_stats.vpath_alarms++;
2164
2165 if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
2166 (alarm_event == VXGE_HW_EVENT_UNKNOWN))
2167 return VXGE_HW_OK;
2168
2169 __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
2170
2171 if (alarm_event == VXGE_HW_EVENT_SERR)
2172 return VXGE_HW_ERR_CRITICAL;
2173
2174 return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
2175 VXGE_HW_ERR_SLOT_FREEZE :
2176 (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
2177 VXGE_HW_ERR_VPATH;
2178}
2179
2180/*
2181 * vxge_hw_vpath_alarm_process - Process Alarms.
2182 * @vpath: Virtual Path.
2183 * @skip_alarms: Do not clear the alarms
2184 *
2185 * Process vpath alarms.
2186 *
2187 */
2188enum vxge_hw_status vxge_hw_vpath_alarm_process(
2189 struct __vxge_hw_vpath_handle *vp,
2190 u32 skip_alarms)
2191{
2192 enum vxge_hw_status status = VXGE_HW_OK;
2193
2194 if (vp == NULL) {
2195 status = VXGE_HW_ERR_INVALID_HANDLE;
2196 goto exit;
2197 }
2198
2199 status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms);
2200exit:
2201 return status;
2202}
2203
2204/**
2205 * vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and
2206 * alrms
2207 * @vp: Virtual Path handle.
2208 * @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of
2209 * interrupts(Can be repeated). If fifo or ring are not enabled
2210 * the MSIX vector for that should be set to 0
2211 * @alarm_msix_id: MSIX vector for alarm.
2212 *
2213 * This API will associate a given MSIX vector numbers with the four TIM
2214 * interrupts and alarm interrupt.
2215 */
2216enum vxge_hw_status
2217vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
2218 int alarm_msix_id)
2219{
2220 u64 val64;
2221 struct __vxge_hw_virtualpath *vpath = vp->vpath;
2222 struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
2223 u32 first_vp_id = vpath->hldev->first_vp_id;
2224
2225 val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
2226 (first_vp_id * 4) + tim_msix_id[0]) |
2227 VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
2228 (first_vp_id * 4) + tim_msix_id[1]) |
2229 VXGE_HW_INTERRUPT_CFG0_GROUP2_MSIX_FOR_TXTI(
2230 (first_vp_id * 4) + tim_msix_id[2]);
2231
2232 val64 |= VXGE_HW_INTERRUPT_CFG0_GROUP3_MSIX_FOR_TXTI(
2233 (first_vp_id * 4) + tim_msix_id[3]);
2234
2235 writeq(val64, &vp_reg->interrupt_cfg0);
2236
2237 writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
2238 (first_vp_id * 4) + alarm_msix_id),
2239 &vp_reg->interrupt_cfg2);
2240
2241 if (vpath->hldev->config.intr_mode ==
2242 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2243 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2244 VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
2245 0, 32), &vp_reg->one_shot_vect1_en);
2246 }
2247
2248 if (vpath->hldev->config.intr_mode ==
2249 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2250 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2251 VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
2252 0, 32), &vp_reg->one_shot_vect2_en);
2253
2254 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2255 VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN,
2256 0, 32), &vp_reg->one_shot_vect3_en);
2257 }
2258
2259 return VXGE_HW_OK;
2260}
2261
2262/**
2263 * vxge_hw_vpath_msix_mask - Mask MSIX Vector.
2264 * @vp: Virtual Path handle.
2265 * @msix_id: MSIX ID
2266 *
2267 * The function masks the msix interrupt for the given msix_id
2268 *
2269 * Returns: 0,
2270 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2271 * status.
2272 * See also:
2273 */
2274void
2275vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2276{
2277 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2278 __vxge_hw_pio_mem_write32_upper(
2279 (u32) vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
2280 (msix_id / 4)), 0, 32),
2281 &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
2282
2283 return;
2284}
2285
2286/**
2287 * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
2288 * @vp: Virtual Path handle.
2289 * @msix_id: MSI ID
2290 *
2291 * The function clears the msix interrupt for the given msix_id
2292 *
2293 * Returns: 0,
2294 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2295 * status.
2296 * See also:
2297 */
2298void
2299vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
2300{
2301 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2302 if (hldev->config.intr_mode ==
2303 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2304 __vxge_hw_pio_mem_write32_upper(
2305 (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
2306 (msix_id/4)), 0, 32),
2307 &hldev->common_reg->
2308 clr_msix_one_shot_vec[msix_id%4]);
2309 } else {
2310 __vxge_hw_pio_mem_write32_upper(
2311 (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
2312 (msix_id/4)), 0, 32),
2313 &hldev->common_reg->
2314 clear_msix_mask_vect[msix_id%4]);
2315 }
2316
2317 return;
2318}
2319
2320/**
2321 * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
2322 * @vp: Virtual Path handle.
2323 * @msix_id: MSI ID
2324 *
2325 * The function unmasks the msix interrupt for the given msix_id
2326 *
2327 * Returns: 0,
2328 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2329 * status.
2330 * See also:
2331 */
2332void
2333vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2334{
2335 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2336 __vxge_hw_pio_mem_write32_upper(
2337 (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
2338 (msix_id/4)), 0, 32),
2339 &hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
2340
2341 return;
2342}
2343
2344/**
2345 * vxge_hw_vpath_msix_mask_all - Mask all MSIX vectors for the vpath.
2346 * @vp: Virtual Path handle.
2347 *
2348 * The function masks all msix interrupt for the given vpath
2349 *
2350 */
2351void
2352vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vp)
2353{
2354
2355 __vxge_hw_pio_mem_write32_upper(
2356 (u32)vxge_bVALn(vxge_mBIT(vp->vpath->vp_id), 0, 32),
2357 &vp->vpath->hldev->common_reg->set_msix_mask_all_vect);
2358
2359 return;
2360}
2361
2362/**
2363 * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
2364 * @vp: Virtual Path handle.
2365 *
2366 * Mask Tx and Rx vpath interrupts.
2367 *
2368 * See also: vxge_hw_vpath_inta_mask_tx_rx()
2369 */
2370void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2371{
2372 u64 tim_int_mask0[4] = {[0 ...3] = 0};
2373 u32 tim_int_mask1[4] = {[0 ...3] = 0};
2374 u64 val64;
2375 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2376
2377 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2378 tim_int_mask1, vp->vpath->vp_id);
2379
2380 val64 = readq(&hldev->common_reg->tim_int_mask0);
2381
2382 if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2383 (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2384 writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2385 tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64),
2386 &hldev->common_reg->tim_int_mask0);
2387 }
2388
2389 val64 = readl(&hldev->common_reg->tim_int_mask1);
2390
2391 if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2392 (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2393 __vxge_hw_pio_mem_write32_upper(
2394 (tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2395 tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
2396 &hldev->common_reg->tim_int_mask1);
2397 }
2398
2399 return;
2400}
2401
2402/**
2403 * vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts.
2404 * @vp: Virtual Path handle.
2405 *
2406 * Unmask Tx and Rx vpath interrupts.
2407 *
2408 * See also: vxge_hw_vpath_inta_mask_tx_rx()
2409 */
2410void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2411{
2412 u64 tim_int_mask0[4] = {[0 ...3] = 0};
2413 u32 tim_int_mask1[4] = {[0 ...3] = 0};
2414 u64 val64;
2415 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2416
2417 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2418 tim_int_mask1, vp->vpath->vp_id);
2419
2420 val64 = readq(&hldev->common_reg->tim_int_mask0);
2421
2422 if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2423 (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2424 writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2425 tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64,
2426 &hldev->common_reg->tim_int_mask0);
2427 }
2428
2429 if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2430 (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2431 __vxge_hw_pio_mem_write32_upper(
2432 (~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2433 tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
2434 &hldev->common_reg->tim_int_mask1);
2435 }
2436
2437 return;
2438}
2439
2440/**
2441 * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
2442 * descriptors and process the same.
2443 * @ring: Handle to the ring object used for receive
2444 *
2445 * The function polls the Rx for the completed descriptors and calls
2446 * the driver via supplied completion callback.
2447 *
2448 * Returns: VXGE_HW_OK, if the polling is completed successful.
2449 * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2450 * descriptors available which are yet to be processed.
2451 *
2452 * See also: vxge_hw_vpath_poll_rx()
2453 */
2454enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
2455{
2456 u8 t_code;
2457 enum vxge_hw_status status = VXGE_HW_OK;
2458 void *first_rxdh;
2459 u64 val64 = 0;
2460 int new_count = 0;
2461
2462 ring->cmpl_cnt = 0;
2463
2464 status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code);
2465 if (status == VXGE_HW_OK)
2466 ring->callback(ring, first_rxdh,
2467 t_code, ring->channel.userdata);
2468
2469 if (ring->cmpl_cnt != 0) {
2470 ring->doorbell_cnt += ring->cmpl_cnt;
2471 if (ring->doorbell_cnt >= ring->rxds_limit) {
2472 /*
2473 * Each RxD is of 4 qwords, update the number of
2474 * qwords replenished
2475 */
2476 new_count = (ring->doorbell_cnt * 4);
2477
2478 /* For each block add 4 more qwords */
2479 ring->total_db_cnt += ring->doorbell_cnt;
2480 if (ring->total_db_cnt >= ring->rxds_per_block) {
2481 new_count += 4;
2482 /* Reset total count */
2483 ring->total_db_cnt %= ring->rxds_per_block;
2484 }
2485 writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count),
2486 &ring->vp_reg->prc_rxd_doorbell);
2487 val64 =
2488 readl(&ring->common_reg->titan_general_int_status);
2489 ring->doorbell_cnt = 0;
2490 }
2491 }
2492
2493 return status;
2494}
2495
2496/**
2497 * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process
2498 * the same.
2499 * @fifo: Handle to the fifo object used for non offload send
2500 *
2501 * The function polls the Tx for the completed descriptors and calls
2502 * the driver via supplied completion callback.
2503 *
2504 * Returns: VXGE_HW_OK, if the polling is completed successful.
2505 * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2506 * descriptors available which are yet to be processed.
2507 *
2508 * See also: vxge_hw_vpath_poll_tx().
2509 */
2510enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
2511 void **skb_ptr)
2512{
2513 enum vxge_hw_fifo_tcode t_code;
2514 void *first_txdlh;
2515 enum vxge_hw_status status = VXGE_HW_OK;
2516 struct __vxge_hw_channel *channel;
2517
2518 channel = &fifo->channel;
2519
2520 status = vxge_hw_fifo_txdl_next_completed(fifo,
2521 &first_txdlh, &t_code);
2522 if (status == VXGE_HW_OK)
2523 if (fifo->callback(fifo, first_txdlh,
2524 t_code, channel->userdata, skb_ptr) != VXGE_HW_OK)
2525 status = VXGE_HW_COMPLETIONS_REMAIN;
2526
2527 return status;
2528}
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
new file mode 100644
index 000000000000..7567a1140d07
--- /dev/null
+++ b/drivers/net/vxge/vxge-traffic.h
@@ -0,0 +1,2409 @@
1/******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
9 *
10 * vxge-traffic.h: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2009 Neterion Inc.
13 ******************************************************************************/
14#ifndef VXGE_TRAFFIC_H
15#define VXGE_TRAFFIC_H
16
17#include "vxge-reg.h"
18#include "vxge-version.h"
19
20#define VXGE_HW_DTR_MAX_T_CODE 16
21#define VXGE_HW_ALL_FOXES 0xFFFFFFFFFFFFFFFFULL
22#define VXGE_HW_INTR_MASK_ALL 0xFFFFFFFFFFFFFFFFULL
23#define VXGE_HW_MAX_VIRTUAL_PATHS 17
24
25#define VXGE_HW_MAC_MAX_MAC_PORT_ID 2
26
27#define VXGE_HW_DEFAULT_32 0xffffffff
28/* frames sizes */
29#define VXGE_HW_HEADER_802_2_SIZE 3
30#define VXGE_HW_HEADER_SNAP_SIZE 5
31#define VXGE_HW_HEADER_VLAN_SIZE 4
32#define VXGE_HW_MAC_HEADER_MAX_SIZE \
33 (ETH_HLEN + \
34 VXGE_HW_HEADER_802_2_SIZE + \
35 VXGE_HW_HEADER_VLAN_SIZE + \
36 VXGE_HW_HEADER_SNAP_SIZE)
37
38#define VXGE_HW_TCPIP_HEADER_MAX_SIZE (64 + 64)
39
40/* 32bit alignments */
41#define VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN 2
42#define VXGE_HW_HEADER_802_2_SNAP_ALIGN 2
43#define VXGE_HW_HEADER_802_2_ALIGN 3
44#define VXGE_HW_HEADER_SNAP_ALIGN 1
45
46#define VXGE_HW_L3_CKSUM_OK 0xFFFF
47#define VXGE_HW_L4_CKSUM_OK 0xFFFF
48
49/* Forward declarations */
50struct __vxge_hw_device;
51struct __vxge_hw_vpath_handle;
52struct vxge_hw_vp_config;
53struct __vxge_hw_virtualpath;
54struct __vxge_hw_channel;
55struct __vxge_hw_fifo;
56struct __vxge_hw_ring;
57struct vxge_hw_ring_attr;
58struct vxge_hw_mempool;
59
60#ifndef TRUE
61#define TRUE 1
62#endif
63
64#ifndef FALSE
65#define FALSE 0
66#endif
67
68/*VXGE_HW_STATUS_H*/
69
70#define VXGE_HW_EVENT_BASE 0
71#define VXGE_LL_EVENT_BASE 100
72
73/**
74 * enum vxge_hw_event- Enumerates slow-path HW events.
75 * @VXGE_HW_EVENT_UNKNOWN: Unknown (and invalid) event.
76 * @VXGE_HW_EVENT_SERR: Serious vpath hardware error event.
77 * @VXGE_HW_EVENT_ECCERR: vpath ECC error event.
78 * @VXGE_HW_EVENT_VPATH_ERR: Error local to the respective vpath
79 * @VXGE_HW_EVENT_FIFO_ERR: FIFO Doorbell fifo error.
80 * @VXGE_HW_EVENT_SRPCIM_SERR: srpcim hardware error event.
81 * @VXGE_HW_EVENT_MRPCIM_SERR: mrpcim hardware error event.
82 * @VXGE_HW_EVENT_MRPCIM_ECCERR: mrpcim ecc error event.
83 * @VXGE_HW_EVENT_RESET_START: Privileged entity is starting device reset
84 * @VXGE_HW_EVENT_RESET_COMPLETE: Device reset has been completed
85 * @VXGE_HW_EVENT_SLOT_FREEZE: Slot-freeze event. Driver tries to distinguish
86 * slot-freeze from the rest critical events (e.g. ECC) when it is
87 * impossible to PIO read "through" the bus, i.e. when getting all-foxes.
88 *
89 * enum vxge_hw_event enumerates slow-path HW eventis.
90 *
91 * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_up_f{},
92 * vxge_uld_link_down_f{}.
93 */
94enum vxge_hw_event {
95 VXGE_HW_EVENT_UNKNOWN = 0,
96 /* HW events */
97 VXGE_HW_EVENT_RESET_START = VXGE_HW_EVENT_BASE + 1,
98 VXGE_HW_EVENT_RESET_COMPLETE = VXGE_HW_EVENT_BASE + 2,
99 VXGE_HW_EVENT_LINK_DOWN = VXGE_HW_EVENT_BASE + 3,
100 VXGE_HW_EVENT_LINK_UP = VXGE_HW_EVENT_BASE + 4,
101 VXGE_HW_EVENT_ALARM_CLEARED = VXGE_HW_EVENT_BASE + 5,
102 VXGE_HW_EVENT_ECCERR = VXGE_HW_EVENT_BASE + 6,
103 VXGE_HW_EVENT_MRPCIM_ECCERR = VXGE_HW_EVENT_BASE + 7,
104 VXGE_HW_EVENT_FIFO_ERR = VXGE_HW_EVENT_BASE + 8,
105 VXGE_HW_EVENT_VPATH_ERR = VXGE_HW_EVENT_BASE + 9,
106 VXGE_HW_EVENT_CRITICAL_ERR = VXGE_HW_EVENT_BASE + 10,
107 VXGE_HW_EVENT_SERR = VXGE_HW_EVENT_BASE + 11,
108 VXGE_HW_EVENT_SRPCIM_SERR = VXGE_HW_EVENT_BASE + 12,
109 VXGE_HW_EVENT_MRPCIM_SERR = VXGE_HW_EVENT_BASE + 13,
110 VXGE_HW_EVENT_SLOT_FREEZE = VXGE_HW_EVENT_BASE + 14,
111};
112
113#define VXGE_HW_SET_LEVEL(a, b) (((a) > (b)) ? (a) : (b))
114
115/*
116 * struct vxge_hw_mempool_dma - Represents DMA objects passed to the
117 caller.
118 */
119struct vxge_hw_mempool_dma {
120 dma_addr_t addr;
121 struct pci_dev *handle;
122 struct pci_dev *acc_handle;
123};
124
125/*
126 * vxge_hw_mempool_item_f - Mempool item alloc/free callback
127 * @mempoolh: Memory pool handle.
128 * @memblock: Address of memory block
129 * @memblock_index: Index of memory block
130 * @item: Item that gets allocated or freed.
131 * @index: Item's index in the memory pool.
132 * @is_last: True, if this item is the last one in the pool; false - otherwise.
133 * userdata: Per-pool user context.
134 *
135 * Memory pool allocation/deallocation callback.
136 */
137
138/*
139 * struct vxge_hw_mempool - Memory pool.
140 */
141struct vxge_hw_mempool {
142
143 void (*item_func_alloc)(
144 struct vxge_hw_mempool *mempoolh,
145 u32 memblock_index,
146 struct vxge_hw_mempool_dma *dma_object,
147 u32 index,
148 u32 is_last);
149
150 void *userdata;
151 void **memblocks_arr;
152 void **memblocks_priv_arr;
153 struct vxge_hw_mempool_dma *memblocks_dma_arr;
154 struct __vxge_hw_device *devh;
155 u32 memblock_size;
156 u32 memblocks_max;
157 u32 memblocks_allocated;
158 u32 item_size;
159 u32 items_max;
160 u32 items_initial;
161 u32 items_current;
162 u32 items_per_memblock;
163 void **items_arr;
164 u32 items_priv_size;
165};
166
167#define VXGE_HW_MAX_INTR_PER_VP 4
168#define VXGE_HW_VPATH_INTR_TX 0
169#define VXGE_HW_VPATH_INTR_RX 1
170#define VXGE_HW_VPATH_INTR_EINTA 2
171#define VXGE_HW_VPATH_INTR_BMAP 3
172
173#define VXGE_HW_BLOCK_SIZE 4096
174
175/**
176 * struct vxge_hw_tim_intr_config - Titan Tim interrupt configuration.
177 * @intr_enable: Set to 1, if interrupt is enabled.
178 * @btimer_val: Boundary Timer Initialization value in units of 272 ns.
179 * @timer_ac_en: Timer Automatic Cancel. 1 : Automatic Canceling Enable: when
180 * asserted, other interrupt-generating entities will cancel the
181 * scheduled timer interrupt.
182 * @timer_ci_en: Timer Continuous Interrupt. 1 : Continuous Interrupting Enable:
183 * When asserted, an interrupt will be generated every time the
184 * boundary timer expires, even if no traffic has been transmitted
185 * on this interrupt.
186 * @timer_ri_en: Timer Consecutive (Re-) Interrupt 1 : Consecutive
187 * (Re-) Interrupt Enable: When asserted, an interrupt will be
188 * generated the next time the timer expires, even if no traffic has
189 * been transmitted on this interrupt. (This will only happen once
190 * each time that this value is written to the TIM.) This bit is
191 * cleared by H/W at the end of the current-timer-interval when
192 * the interrupt is triggered.
193 * @rtimer_val: Restriction Timer Initialization value in units of 272 ns.
194 * @util_sel: Utilization Selector. Selects which of the workload approximations
195 * to use (e.g. legacy Tx utilization, Tx/Rx utilization, host
196 * specified utilization etc.), selects one of
197 * the 17 host configured values.
198 * 0-Virtual Path 0
199 * 1-Virtual Path 1
200 * ...
201 * 16-Virtual Path 17
202 * 17-Legacy Tx network utilization, provided by TPA
203 * 18-Legacy Rx network utilization, provided by FAU
204 * 19-Average of legacy Rx and Tx utilization calculated from link
205 * utilization values.
206 * 20-31-Invalid configurations
207 * 32-Host utilization for Virtual Path 0
208 * 33-Host utilization for Virtual Path 1
209 * ...
210 * 48-Host utilization for Virtual Path 17
211 * 49-Legacy Tx network utilization, provided by TPA
212 * 50-Legacy Rx network utilization, provided by FAU
213 * 51-Average of legacy Rx and Tx utilization calculated from
214 * link utilization values.
215 * 52-63-Invalid configurations
216 * @ltimer_val: Latency Timer Initialization Value in units of 272 ns.
217 * @txd_cnt_en: TxD Return Event Count Enable. This configuration bit when set
218 * to 1 enables counting of TxD0 returns (signalled by PCC's),
219 * towards utilization event count values.
220 * @urange_a: Defines the upper limit (in percent) for this utilization range
221 * to be active. This range is considered active
222 * if 0 = UTIL = URNG_A
223 * and the UEC_A field (below) is non-zero.
224 * @uec_a: Utilization Event Count A. If this range is active, the adapter will
225 * wait until UEC_A events have occurred on the interrupt before
226 * generating an interrupt.
227 * @urange_b: Link utilization range B.
228 * @uec_b: Utilization Event Count B.
229 * @urange_c: Link utilization range C.
230 * @uec_c: Utilization Event Count C.
231 * @urange_d: Link utilization range D.
232 * @uec_d: Utilization Event Count D.
233 * Traffic Interrupt Controller Module interrupt configuration.
234 */
235struct vxge_hw_tim_intr_config {
236
237 u32 intr_enable;
238#define VXGE_HW_TIM_INTR_ENABLE 1
239#define VXGE_HW_TIM_INTR_DISABLE 0
240#define VXGE_HW_TIM_INTR_DEFAULT 0
241
242 u32 btimer_val;
243#define VXGE_HW_MIN_TIM_BTIMER_VAL 0
244#define VXGE_HW_MAX_TIM_BTIMER_VAL 67108864
245#define VXGE_HW_USE_FLASH_DEFAULT 0xffffffff
246
247 u32 timer_ac_en;
248#define VXGE_HW_TIM_TIMER_AC_ENABLE 1
249#define VXGE_HW_TIM_TIMER_AC_DISABLE 0
250
251 u32 timer_ci_en;
252#define VXGE_HW_TIM_TIMER_CI_ENABLE 1
253#define VXGE_HW_TIM_TIMER_CI_DISABLE 0
254
255 u32 timer_ri_en;
256#define VXGE_HW_TIM_TIMER_RI_ENABLE 1
257#define VXGE_HW_TIM_TIMER_RI_DISABLE 0
258
259 u32 rtimer_val;
260#define VXGE_HW_MIN_TIM_RTIMER_VAL 0
261#define VXGE_HW_MAX_TIM_RTIMER_VAL 67108864
262
263 u32 util_sel;
264#define VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL 17
265#define VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL 18
266#define VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_RX_AVE_NET_UTIL 19
267#define VXGE_HW_TIM_UTIL_SEL_PER_VPATH 63
268
269 u32 ltimer_val;
270#define VXGE_HW_MIN_TIM_LTIMER_VAL 0
271#define VXGE_HW_MAX_TIM_LTIMER_VAL 67108864
272
273 /* Line utilization interrupts */
274 u32 urange_a;
275#define VXGE_HW_MIN_TIM_URANGE_A 0
276#define VXGE_HW_MAX_TIM_URANGE_A 100
277
278 u32 uec_a;
279#define VXGE_HW_MIN_TIM_UEC_A 0
280#define VXGE_HW_MAX_TIM_UEC_A 65535
281
282 u32 urange_b;
283#define VXGE_HW_MIN_TIM_URANGE_B 0
284#define VXGE_HW_MAX_TIM_URANGE_B 100
285
286 u32 uec_b;
287#define VXGE_HW_MIN_TIM_UEC_B 0
288#define VXGE_HW_MAX_TIM_UEC_B 65535
289
290 u32 urange_c;
291#define VXGE_HW_MIN_TIM_URANGE_C 0
292#define VXGE_HW_MAX_TIM_URANGE_C 100
293
294 u32 uec_c;
295#define VXGE_HW_MIN_TIM_UEC_C 0
296#define VXGE_HW_MAX_TIM_UEC_C 65535
297
298 u32 uec_d;
299#define VXGE_HW_MIN_TIM_UEC_D 0
300#define VXGE_HW_MAX_TIM_UEC_D 65535
301};
302
303#define VXGE_HW_STATS_OP_READ 0
304#define VXGE_HW_STATS_OP_CLEAR_STAT 1
305#define VXGE_HW_STATS_OP_CLEAR_ALL_VPATH_STATS 2
306#define VXGE_HW_STATS_OP_CLEAR_ALL_STATS_OF_LOC 2
307#define VXGE_HW_STATS_OP_CLEAR_ALL_STATS 3
308
309#define VXGE_HW_STATS_LOC_AGGR 17
310#define VXGE_HW_STATS_AGGRn_OFFSET 0x00720
311
312#define VXGE_HW_STATS_VPATH_TX_OFFSET 0x0
313#define VXGE_HW_STATS_VPATH_RX_OFFSET 0x00090
314
315#define VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET (0x001d0 >> 3)
316#define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(bits) \
317 vxge_bVALn(bits, 0, 32)
318
319#define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(bits) \
320 vxge_bVALn(bits, 32, 32)
321
322#define VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET (0x001d8 >> 3)
323#define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(bits) \
324 vxge_bVALn(bits, 0, 32)
325
326#define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(bits) \
327 vxge_bVALn(bits, 32, 32)
328
329/**
330 * struct vxge_hw_xmac_aggr_stats - Per-Aggregator XMAC Statistics
331 *
332 * @tx_frms: Count of data frames transmitted on this Aggregator on all
333 * its Aggregation ports. Does not include LACPDUs or Marker PDUs.
334 * However, does include frames discarded by the Distribution
335 * function.
336 * @tx_data_octets: Count of data and padding octets of frames transmitted
337 * on this Aggregator on all its Aggregation ports. Does not include
338 * octets of LACPDUs or Marker PDUs. However, does include octets of
339 * frames discarded by the Distribution function.
340 * @tx_mcast_frms: Count of data frames transmitted (to a group destination
341 * address other than the broadcast address) on this Aggregator on
342 * all its Aggregation ports. Does not include LACPDUs or Marker
343 * PDUs. However, does include frames discarded by the Distribution
344 * function.
345 * @tx_bcast_frms: Count of broadcast data frames transmitted on this Aggregator
346 * on all its Aggregation ports. Does not include LACPDUs or Marker
347 * PDUs. However, does include frames discarded by the Distribution
348 * function.
349 * @tx_discarded_frms: Count of data frames to be transmitted on this Aggregator
350 * that are discarded by the Distribution function. This occurs when
351 * conversation are allocated to different ports and have to be
352 * flushed on old ports
353 * @tx_errored_frms: Count of data frames transmitted on this Aggregator that
354 * experience transmission errors on its Aggregation ports.
355 * @rx_frms: Count of data frames received on this Aggregator on all its
356 * Aggregation ports. Does not include LACPDUs or Marker PDUs.
357 * Also, does not include frames discarded by the Collection
358 * function.
359 * @rx_data_octets: Count of data and padding octets of frames received on this
360 * Aggregator on all its Aggregation ports. Does not include octets
361 * of LACPDUs or Marker PDUs. Also, does not include
362 * octets of frames
363 * discarded by the Collection function.
364 * @rx_mcast_frms: Count of data frames received (from a group destination
365 * address other than the broadcast address) on this Aggregator on
366 * all its Aggregation ports. Does not include LACPDUs or Marker
367 * PDUs. Also, does not include frames discarded by the Collection
368 * function.
369 * @rx_bcast_frms: Count of broadcast data frames received on this Aggregator on
370 * all its Aggregation ports. Does not include LACPDUs or Marker
371 * PDUs. Also, does not include frames discarded by the Collection
372 * function.
373 * @rx_discarded_frms: Count of data frames received on this Aggregator that are
374 * discarded by the Collection function because the Collection
375 * function was disabled on the port which the frames are received.
376 * @rx_errored_frms: Count of data frames received on this Aggregator that are
377 * discarded by its Aggregation ports, or are discarded by the
378 * Collection function of the Aggregator, or that are discarded by
379 * the Aggregator due to detection of an illegal Slow Protocols PDU.
380 * @rx_unknown_slow_proto_frms: Count of data frames received on this Aggregator
381 * that are discarded by its Aggregation ports due to detection of
382 * an unknown Slow Protocols PDU.
383 *
384 * Per aggregator XMAC RX statistics.
385 */
386struct vxge_hw_xmac_aggr_stats {
387/*0x000*/ u64 tx_frms;
388/*0x008*/ u64 tx_data_octets;
389/*0x010*/ u64 tx_mcast_frms;
390/*0x018*/ u64 tx_bcast_frms;
391/*0x020*/ u64 tx_discarded_frms;
392/*0x028*/ u64 tx_errored_frms;
393/*0x030*/ u64 rx_frms;
394/*0x038*/ u64 rx_data_octets;
395/*0x040*/ u64 rx_mcast_frms;
396/*0x048*/ u64 rx_bcast_frms;
397/*0x050*/ u64 rx_discarded_frms;
398/*0x058*/ u64 rx_errored_frms;
399/*0x060*/ u64 rx_unknown_slow_proto_frms;
400} __packed;
401
402/**
403 * struct vxge_hw_xmac_port_stats - XMAC Port Statistics
404 *
405 * @tx_ttl_frms: Count of successfully transmitted MAC frames
406 * @tx_ttl_octets: Count of total octets of transmitted frames, not including
407 * framing characters (i.e. less framing bits). To determine the
408 * total octets of transmitted frames, including framing characters,
409 * multiply PORTn_TX_TTL_FRMS by 8 and add it to this stat (unless
410 * otherwise configured, this stat only counts frames that have
411 * 8 bytes of preamble for each frame). This stat can be configured
412 * (see XMAC_STATS_GLOBAL_CFG.TTL_FRMS_HANDLING) to count everything
413 * including the preamble octets.
414 * @tx_data_octets: Count of data and padding octets of successfully transmitted
415 * frames.
416 * @tx_mcast_frms: Count of successfully transmitted frames to a group address
417 * other than the broadcast address.
418 * @tx_bcast_frms: Count of successfully transmitted frames to the broadcast
419 * group address.
420 * @tx_ucast_frms: Count of transmitted frames containing a unicast address.
421 * Includes discarded frames that are not sent to the network.
422 * @tx_tagged_frms: Count of transmitted frames containing a VLAN tag.
423 * @tx_vld_ip: Count of transmitted IP datagrams that are passed to the network.
424 * @tx_vld_ip_octets: Count of total octets of transmitted IP datagrams that
425 * are passed to the network.
426 * @tx_icmp: Count of transmitted ICMP messages. Includes messages not sent
427 * due to problems within ICMP.
428 * @tx_tcp: Count of transmitted TCP segments. Does not include segments
429 * containing retransmitted octets.
430 * @tx_rst_tcp: Count of transmitted TCP segments containing the RST flag.
431 * @tx_udp: Count of transmitted UDP datagrams.
432 * @tx_parse_error: Increments when the TPA is unable to parse a packet. This
433 * generally occurs when a packet is corrupt somehow, including
434 * packets that have IP version mismatches, invalid Layer 2 control
435 * fields, etc. L3/L4 checksums are not offloaded, but the packet
436 * is still be transmitted.
437 * @tx_unknown_protocol: Increments when the TPA encounters an unknown
438 * protocol, such as a new IPv6 extension header, or an unsupported
439 * Routing Type. The packet still has a checksum calculated but it
440 * may be incorrect.
441 * @tx_pause_ctrl_frms: Count of MAC PAUSE control frames that are transmitted.
442 * Since, the only control frames supported by this device are
443 * PAUSE frames, this register is a count of all transmitted MAC
444 * control frames.
445 * @tx_marker_pdu_frms: Count of Marker PDUs transmitted
446 * on this Aggregation port.
447 * @tx_lacpdu_frms: Count of LACPDUs transmitted on this Aggregation port.
448 * @tx_drop_ip: Count of transmitted IP datagrams that could not be passed to
449 * the network. Increments because of:
450 * 1) An internal processing error
451 * (such as an uncorrectable ECC error). 2) A frame parsing error
452 * during IP checksum calculation.
453 * @tx_marker_resp_pdu_frms: Count of Marker Response PDUs transmitted on this
454 * Aggregation port.
455 * @tx_xgmii_char2_match: Maintains a count of the number of transmitted XGMII
456 * characters that match a pattern that is programmable through
457 * register XMAC_STATS_TX_XGMII_CHAR_PORTn. By default, the pattern
458 * is set to /T/ (i.e. the terminate character), thus the statistic
459 * tracks the number of transmitted Terminate characters.
460 * @tx_xgmii_char1_match: Maintains a count of the number of transmitted XGMII
461 * characters that match a pattern that is programmable through
462 * register XMAC_STATS_TX_XGMII_CHAR_PORTn. By default, the pattern
463 * is set to /S/ (i.e. the start character),
464 * thus the statistic tracks
465 * the number of transmitted Start characters.
466 * @tx_xgmii_column2_match: Maintains a count of the number of transmitted XGMII
467 * columns that match a pattern that is programmable through register
468 * XMAC_STATS_TX_XGMII_COLUMN2_PORTn. By default, the pattern is set
469 * to 4 x /E/ (i.e. a column containing all error characters), thus
470 * the statistic tracks the number of Error columns transmitted at
471 * any time. If XMAC_STATS_TX_XGMII_BEHAV_COLUMN2_PORTn.NEAR_COL1 is
472 * set to 1, then this stat increments when COLUMN2 is found within
473 * 'n' clocks after COLUMN1. Here, 'n' is defined by
474 * XMAC_STATS_TX_XGMII_BEHAV_COLUMN2_PORTn.NUM_COL (if 'n' is set
475 * to 0, then it means to search anywhere for COLUMN2).
476 * @tx_xgmii_column1_match: Maintains a count of the number of transmitted XGMII
477 * columns that match a pattern that is programmable through register
478 * XMAC_STATS_TX_XGMII_COLUMN1_PORTn. By default, the pattern is set
479 * to 4 x /I/ (i.e. a column containing all idle characters),
480 * thus the statistic tracks the number of transmitted Idle columns.
481 * @tx_any_err_frms: Count of transmitted frames containing any error that
482 * prevents them from being passed to the network. Increments if
483 * there is an ECC while reading the frame out of the transmit
484 * buffer. Also increments if the transmit protocol assist (TPA)
485 * block determines that the frame should not be sent.
486 * @tx_drop_frms: Count of frames that could not be sent for no other reason
487 * than internal MAC processing. Increments once whenever the
488 * transmit buffer is flushed (due to an ECC error on a memory
489 * descriptor).
490 * @rx_ttl_frms: Count of total received MAC frames, including frames received
491 * with frame-too-long, FCS, or length errors. This stat can be
492 * configured (see XMAC_STATS_GLOBAL_CFG.TTL_FRMS_HANDLING) to count
493 * everything, even "frames" as small one byte of preamble.
494 * @rx_vld_frms: Count of successfully received MAC frames. Does not include
495 * frames received with frame-too-long, FCS, or length errors.
496 * @rx_offload_frms: Count of offloaded received frames that are passed to
497 * the host.
498 * @rx_ttl_octets: Count of total octets of received frames, not including
499 * framing characters (i.e. less framing bits). To determine the
500 * total octets of received frames, including framing characters,
501 * multiply PORTn_RX_TTL_FRMS by 8 and add it to this stat (unless
502 * otherwise configured, this stat only counts frames that have 8
503 * bytes of preamble for each frame). This stat can be configured
504 * (see XMAC_STATS_GLOBAL_CFG.TTL_FRMS_HANDLING) to count everything,
505 * even the preamble octets of "frames" as small one byte of preamble
506 * @rx_data_octets: Count of data and padding octets of successfully received
507 * frames. Does not include frames received with frame-too-long,
508 * FCS, or length errors.
509 * @rx_offload_octets: Count of total octets, not including framing
510 * characters, of offloaded received frames that are passed
511 * to the host.
512 * @rx_vld_mcast_frms: Count of successfully received MAC frames containing a
513 * nonbroadcast group address. Does not include frames received
514 * with frame-too-long, FCS, or length errors.
515 * @rx_vld_bcast_frms: Count of successfully received MAC frames containing
516 * the broadcast group address. Does not include frames received
517 * with frame-too-long, FCS, or length errors.
518 * @rx_accepted_ucast_frms: Count of successfully received frames containing
519 * a unicast address. Only includes frames that are passed to
520 * the system.
521 * @rx_accepted_nucast_frms: Count of successfully received frames containing
522 * a non-unicast (broadcast or multicast) address. Only includes
523 * frames that are passed to the system. Could include, for instance,
524 * non-unicast frames that contain FCS errors if the MAC_ERROR_CFG
525 * register is set to pass FCS-errored frames to the host.
526 * @rx_tagged_frms: Count of received frames containing a VLAN tag.
527 * @rx_long_frms: Count of received frames that are longer than RX_MAX_PYLD_LEN
528 * + 18 bytes (+ 22 bytes if VLAN-tagged).
529 * @rx_usized_frms: Count of received frames of length (including FCS, but not
530 * framing bits) less than 64 octets, that are otherwise well-formed.
531 * In other words, counts runts.
532 * @rx_osized_frms: Count of received frames of length (including FCS, but not
533 * framing bits) more than 1518 octets, that are otherwise
534 * well-formed. Note: If register XMAC_STATS_GLOBAL_CFG.VLAN_HANDLING
535 * is set to 1, then "more than 1518 octets" becomes "more than 1518
536 * (1522 if VLAN-tagged) octets".
537 * @rx_frag_frms: Count of received frames of length (including FCS, but not
538 * framing bits) less than 64 octets that had bad FCS. In other
539 * words, counts fragments.
540 * @rx_jabber_frms: Count of received frames of length (including FCS, but not
541 * framing bits) more than 1518 octets that had bad FCS. In other
542 * words, counts jabbers. Note: If register
543 * XMAC_STATS_GLOBAL_CFG.VLAN_HANDLING is set to 1, then "more than
544 * 1518 octets" becomes "more than 1518 (1522 if VLAN-tagged)
545 * octets".
546 * @rx_ttl_64_frms: Count of total received MAC frames with length (including
547 * FCS, but not framing bits) of exactly 64 octets. Includes frames
548 * received with frame-too-long, FCS, or length errors.
549 * @rx_ttl_65_127_frms: Count of total received MAC frames with length
550 * (including FCS, but not framing bits) of between 65 and 127
551 * octets inclusive. Includes frames received with frame-too-long,
552 * FCS, or length errors.
553 * @rx_ttl_128_255_frms: Count of total received MAC frames with length
554 * (including FCS, but not framing bits) of between 128 and 255
555 * octets inclusive. Includes frames received with frame-too-long,
556 * FCS, or length errors.
557 * @rx_ttl_256_511_frms: Count of total received MAC frames with length
558 * (including FCS, but not framing bits) of between 256 and 511
559 * octets inclusive. Includes frames received with frame-too-long,
560 * FCS, or length errors.
561 * @rx_ttl_512_1023_frms: Count of total received MAC frames with length
562 * (including FCS, but not framing bits) of between 512 and 1023
563 * octets inclusive. Includes frames received with frame-too-long,
564 * FCS, or length errors.
565 * @rx_ttl_1024_1518_frms: Count of total received MAC frames with length
566 * (including FCS, but not framing bits) of between 1024 and 1518
567 * octets inclusive. Includes frames received with frame-too-long,
568 * FCS, or length errors.
569 * @rx_ttl_1519_4095_frms: Count of total received MAC frames with length
570 * (including FCS, but not framing bits) of between 1519 and 4095
571 * octets inclusive. Includes frames received with frame-too-long,
572 * FCS, or length errors.
573 * @rx_ttl_4096_8191_frms: Count of total received MAC frames with length
574 * (including FCS, but not framing bits) of between 4096 and 8191
575 * octets inclusive. Includes frames received with frame-too-long,
576 * FCS, or length errors.
577 * @rx_ttl_8192_max_frms: Count of total received MAC frames with length
578 * (including FCS, but not framing bits) of between 8192 and
579 * RX_MAX_PYLD_LEN+18 octets inclusive. Includes frames received
580 * with frame-too-long, FCS, or length errors.
581 * @rx_ttl_gt_max_frms: Count of total received MAC frames with length
582 * (including FCS, but not framing bits) exceeding
583 * RX_MAX_PYLD_LEN+18 (+22 bytes if VLAN-tagged) octets inclusive.
584 * Includes frames received with frame-too-long,
585 * FCS, or length errors.
586 * @rx_ip: Count of received IP datagrams. Includes errored IP datagrams.
587 * @rx_accepted_ip: Count of received IP datagrams that
588 * are passed to the system.
589 * @rx_ip_octets: Count of number of octets in received IP datagrams. Includes
590 * errored IP datagrams.
591 * @rx_err_ip: Count of received IP datagrams containing errors. For example,
592 * bad IP checksum.
593 * @rx_icmp: Count of received ICMP messages. Includes errored ICMP messages.
594 * @rx_tcp: Count of received TCP segments. Includes errored TCP segments.
595 * Note: This stat contains a count of all received TCP segments,
596 * regardless of whether or not they pertain to an established
597 * connection.
598 * @rx_udp: Count of received UDP datagrams.
599 * @rx_err_tcp: Count of received TCP segments containing errors. For example,
600 * bad TCP checksum.
601 * @rx_pause_count: Count of number of pause quanta that the MAC has been in
602 * the paused state. Recall, one pause quantum equates to 512
603 * bit times.
604 * @rx_pause_ctrl_frms: Count of received MAC PAUSE control frames.
605 * @rx_unsup_ctrl_frms: Count of received MAC control frames that do not
606 * contain the PAUSE opcode. The sum of RX_PAUSE_CTRL_FRMS and
607 * this register is a count of all received MAC control frames.
608 * Note: This stat may be configured to count all layer 2 errors
609 * (i.e. length errors and FCS errors).
610 * @rx_fcs_err_frms: Count of received MAC frames that do not pass FCS. Does
611 * not include frames received with frame-too-long or
612 * frame-too-short error.
613 * @rx_in_rng_len_err_frms: Count of received frames with a length/type field
614 * value between 46 (42 for VLAN-tagged frames) and 1500 (also 1500
615 * for VLAN-tagged frames), inclusive, that does not match the
616 * number of data octets (including pad) received. Also contains
617 * a count of received frames with a length/type field less than
618 * 46 (42 for VLAN-tagged frames) and the number of data octets
619 * (including pad) received is greater than 46 (42 for VLAN-tagged
620 * frames).
621 * @rx_out_rng_len_err_frms: Count of received frames with length/type field
622 * between 1501 and 1535 decimal, inclusive.
623 * @rx_drop_frms: Count of received frames that could not be passed to the host.
624 * See PORTn_RX_L2_MGMT_DISCARD, PORTn_RX_RPA_DISCARD,
625 * PORTn_RX_TRASH_DISCARD, PORTn_RX_RTS_DISCARD, PORTn_RX_RED_DISCARD
626 * for a list of reasons. Because the RMAC drops one frame at a time,
627 * this stat also indicates the number of drop events.
628 * @rx_discarded_frms: Count of received frames containing
629 * any error that prevents
630 * them from being passed to the system. See PORTn_RX_FCS_DISCARD,
631 * PORTn_RX_LEN_DISCARD, and PORTn_RX_SWITCH_DISCARD for a list of
632 * reasons.
633 * @rx_drop_ip: Count of received IP datagrams that could not be passed to the
634 * host. See PORTn_RX_DROP_FRMS for a list of reasons.
635 * @rx_drop_udp: Count of received UDP datagrams that are not delivered to the
636 * host. See PORTn_RX_DROP_FRMS for a list of reasons.
637 * @rx_marker_pdu_frms: Count of valid Marker PDUs received on this Aggregation
638 * port.
639 * @rx_lacpdu_frms: Count of valid LACPDUs received on this Aggregation port.
640 * @rx_unknown_pdu_frms: Count of received frames (on this Aggregation port)
641 * that carry the Slow Protocols EtherType, but contain an unknown
642 * PDU. Or frames that contain the Slow Protocols group MAC address,
643 * but do not carry the Slow Protocols EtherType.
644 * @rx_marker_resp_pdu_frms: Count of valid Marker Response PDUs received on
645 * this Aggregation port.
646 * @rx_fcs_discard: Count of received frames that are discarded because the
647 * FCS check failed.
648 * @rx_illegal_pdu_frms: Count of received frames (on this Aggregation port)
649 * that carry the Slow Protocols EtherType, but contain a badly
650 * formed PDU. Or frames that carry the Slow Protocols EtherType,
651 * but contain an illegal value of Protocol Subtype.
652 * @rx_switch_discard: Count of received frames that are discarded by the
653 * internal switch because they did not have an entry in the
654 * Filtering Database. This includes frames that had an invalid
655 * destination MAC address or VLAN ID. It also includes frames are
656 * discarded because they did not satisfy the length requirements
657 * of the target VPATH.
658 * @rx_len_discard: Count of received frames that are discarded because of an
659 * invalid frame length (includes fragments, oversized frames and
660 * mismatch between frame length and length/type field). This stat
661 * can be configured
662 * (see XMAC_STATS_GLOBAL_CFG.LEN_DISCARD_HANDLING).
663 * @rx_rpa_discard: Count of received frames that were discarded because the
664 * receive protocol assist (RPA) discovered and error in the frame
665 * or was unable to parse the frame.
666 * @rx_l2_mgmt_discard: Count of Layer 2 management frames (eg. pause frames,
667 * Link Aggregation Control Protocol (LACP) frames, etc.) that are
668 * discarded.
669 * @rx_rts_discard: Count of received frames that are discarded by the receive
670 * traffic steering (RTS) logic. Includes those frame discarded
671 * because the SSC response contradicted the switch table, because
672 * the SSC timed out, or because the target queue could not fit the
673 * frame.
674 * @rx_trash_discard: Count of received frames that are discarded because
675 * receive traffic steering (RTS) steered the frame to the trash
676 * queue.
677 * @rx_buff_full_discard: Count of received frames that are discarded because
678 * internal buffers are full. Includes frames discarded because the
679 * RTS logic is waiting for an SSC lookup that has no timeout bound.
680 * Also, includes frames that are dropped because the MAC2FAU buffer
681 * is nearly full -- this can happen if the external receive buffer
682 * is full and the receive path is backing up.
683 * @rx_red_discard: Count of received frames that are discarded because of RED
684 * (Random Early Discard).
685 * @rx_xgmii_ctrl_err_cnt: Maintains a count of unexpected or misplaced control
686 * characters occuring between times of normal data transmission
687 * (i.e. not included in RX_XGMII_DATA_ERR_CNT). This counter is
688 * incremented when either -
689 * 1) The Reconciliation Sublayer (RS) is expecting one control
690 * character and gets another (i.e. is expecting a Start
691 * character, but gets another control character).
692 * 2) Start control character is not in lane 0
693 * Only increments the count by one for each XGMII column.
694 * @rx_xgmii_data_err_cnt: Maintains a count of unexpected control characters
695 * during normal data transmission. If the Reconciliation Sublayer
696 * (RS) receives a control character, other than a terminate control
697 * character, during receipt of data octets then this register is
698 * incremented. Also increments if the start frame delimiter is not
699 * found in the correct location. Only increments the count by one
700 * for each XGMII column.
701 * @rx_xgmii_char1_match: Maintains a count of the number of XGMII characters
702 * that match a pattern that is programmable through register
703 * XMAC_STATS_RX_XGMII_CHAR_PORTn. By default, the pattern is set
704 * to /E/ (i.e. the error character), thus the statistic tracks the
705 * number of Error characters received at any time.
706 * @rx_xgmii_err_sym: Count of the number of symbol errors in the received
707 * XGMII data (i.e. PHY indicates "Receive Error" on the XGMII).
708 * Only includes symbol errors that are observed between the XGMII
709 * Start Frame Delimiter and End Frame Delimiter, inclusive. And
710 * only increments the count by one for each frame.
711 * @rx_xgmii_column1_match: Maintains a count of the number of XGMII columns
712 * that match a pattern that is programmable through register
713 * XMAC_STATS_RX_XGMII_COLUMN1_PORTn. By default, the pattern is set
714 * to 4 x /E/ (i.e. a column containing all error characters), thus
715 * the statistic tracks the number of Error columns received at any
716 * time.
717 * @rx_xgmii_char2_match: Maintains a count of the number of XGMII characters
718 * that match a pattern that is programmable through register
719 * XMAC_STATS_RX_XGMII_CHAR_PORTn. By default, the pattern is set
720 * to /E/ (i.e. the error character), thus the statistic tracks the
721 * number of Error characters received at any time.
722 * @rx_local_fault: Maintains a count of the number of times that link
723 * transitioned from "up" to "down" due to a local fault.
724 * @rx_xgmii_column2_match: Maintains a count of the number of XGMII columns
725 * that match a pattern that is programmable through register
726 * XMAC_STATS_RX_XGMII_COLUMN2_PORTn. By default, the pattern is set
727 * to 4 x /E/ (i.e. a column containing all error characters), thus
728 * the statistic tracks the number of Error columns received at any
729 * time. If XMAC_STATS_RX_XGMII_BEHAV_COLUMN2_PORTn.NEAR_COL1 is set
730 * to 1, then this stat increments when COLUMN2 is found within 'n'
731 * clocks after COLUMN1. Here, 'n' is defined by
732 * XMAC_STATS_RX_XGMII_BEHAV_COLUMN2_PORTn.NUM_COL (if 'n' is set to
733 * 0, then it means to search anywhere for COLUMN2).
734 * @rx_jettison: Count of received frames that are jettisoned because internal
735 * buffers are full.
736 * @rx_remote_fault: Maintains a count of the number of times that link
737 * transitioned from "up" to "down" due to a remote fault.
738 *
739 * XMAC Port Statistics.
740 */
741struct vxge_hw_xmac_port_stats {
742/*0x000*/ u64 tx_ttl_frms;
743/*0x008*/ u64 tx_ttl_octets;
744/*0x010*/ u64 tx_data_octets;
745/*0x018*/ u64 tx_mcast_frms;
746/*0x020*/ u64 tx_bcast_frms;
747/*0x028*/ u64 tx_ucast_frms;
748/*0x030*/ u64 tx_tagged_frms;
749/*0x038*/ u64 tx_vld_ip;
750/*0x040*/ u64 tx_vld_ip_octets;
751/*0x048*/ u64 tx_icmp;
752/*0x050*/ u64 tx_tcp;
753/*0x058*/ u64 tx_rst_tcp;
754/*0x060*/ u64 tx_udp;
755/*0x068*/ u32 tx_parse_error;
756/*0x06c*/ u32 tx_unknown_protocol;
757/*0x070*/ u64 tx_pause_ctrl_frms;
758/*0x078*/ u32 tx_marker_pdu_frms;
759/*0x07c*/ u32 tx_lacpdu_frms;
760/*0x080*/ u32 tx_drop_ip;
761/*0x084*/ u32 tx_marker_resp_pdu_frms;
762/*0x088*/ u32 tx_xgmii_char2_match;
763/*0x08c*/ u32 tx_xgmii_char1_match;
764/*0x090*/ u32 tx_xgmii_column2_match;
765/*0x094*/ u32 tx_xgmii_column1_match;
766/*0x098*/ u32 unused1;
767/*0x09c*/ u16 tx_any_err_frms;
768/*0x09e*/ u16 tx_drop_frms;
769/*0x0a0*/ u64 rx_ttl_frms;
770/*0x0a8*/ u64 rx_vld_frms;
771/*0x0b0*/ u64 rx_offload_frms;
772/*0x0b8*/ u64 rx_ttl_octets;
773/*0x0c0*/ u64 rx_data_octets;
774/*0x0c8*/ u64 rx_offload_octets;
775/*0x0d0*/ u64 rx_vld_mcast_frms;
776/*0x0d8*/ u64 rx_vld_bcast_frms;
777/*0x0e0*/ u64 rx_accepted_ucast_frms;
778/*0x0e8*/ u64 rx_accepted_nucast_frms;
779/*0x0f0*/ u64 rx_tagged_frms;
780/*0x0f8*/ u64 rx_long_frms;
781/*0x100*/ u64 rx_usized_frms;
782/*0x108*/ u64 rx_osized_frms;
783/*0x110*/ u64 rx_frag_frms;
784/*0x118*/ u64 rx_jabber_frms;
785/*0x120*/ u64 rx_ttl_64_frms;
786/*0x128*/ u64 rx_ttl_65_127_frms;
787/*0x130*/ u64 rx_ttl_128_255_frms;
788/*0x138*/ u64 rx_ttl_256_511_frms;
789/*0x140*/ u64 rx_ttl_512_1023_frms;
790/*0x148*/ u64 rx_ttl_1024_1518_frms;
791/*0x150*/ u64 rx_ttl_1519_4095_frms;
792/*0x158*/ u64 rx_ttl_4096_8191_frms;
793/*0x160*/ u64 rx_ttl_8192_max_frms;
794/*0x168*/ u64 rx_ttl_gt_max_frms;
795/*0x170*/ u64 rx_ip;
796/*0x178*/ u64 rx_accepted_ip;
797/*0x180*/ u64 rx_ip_octets;
798/*0x188*/ u64 rx_err_ip;
799/*0x190*/ u64 rx_icmp;
800/*0x198*/ u64 rx_tcp;
801/*0x1a0*/ u64 rx_udp;
802/*0x1a8*/ u64 rx_err_tcp;
803/*0x1b0*/ u64 rx_pause_count;
804/*0x1b8*/ u64 rx_pause_ctrl_frms;
805/*0x1c0*/ u64 rx_unsup_ctrl_frms;
806/*0x1c8*/ u64 rx_fcs_err_frms;
807/*0x1d0*/ u64 rx_in_rng_len_err_frms;
808/*0x1d8*/ u64 rx_out_rng_len_err_frms;
809/*0x1e0*/ u64 rx_drop_frms;
810/*0x1e8*/ u64 rx_discarded_frms;
811/*0x1f0*/ u64 rx_drop_ip;
812/*0x1f8*/ u64 rx_drop_udp;
813/*0x200*/ u32 rx_marker_pdu_frms;
814/*0x204*/ u32 rx_lacpdu_frms;
815/*0x208*/ u32 rx_unknown_pdu_frms;
816/*0x20c*/ u32 rx_marker_resp_pdu_frms;
817/*0x210*/ u32 rx_fcs_discard;
818/*0x214*/ u32 rx_illegal_pdu_frms;
819/*0x218*/ u32 rx_switch_discard;
820/*0x21c*/ u32 rx_len_discard;
821/*0x220*/ u32 rx_rpa_discard;
822/*0x224*/ u32 rx_l2_mgmt_discard;
823/*0x228*/ u32 rx_rts_discard;
824/*0x22c*/ u32 rx_trash_discard;
825/*0x230*/ u32 rx_buff_full_discard;
826/*0x234*/ u32 rx_red_discard;
827/*0x238*/ u32 rx_xgmii_ctrl_err_cnt;
828/*0x23c*/ u32 rx_xgmii_data_err_cnt;
829/*0x240*/ u32 rx_xgmii_char1_match;
830/*0x244*/ u32 rx_xgmii_err_sym;
831/*0x248*/ u32 rx_xgmii_column1_match;
832/*0x24c*/ u32 rx_xgmii_char2_match;
833/*0x250*/ u32 rx_local_fault;
834/*0x254*/ u32 rx_xgmii_column2_match;
835/*0x258*/ u32 rx_jettison;
836/*0x25c*/ u32 rx_remote_fault;
837} __packed;
838
839/**
840 * struct vxge_hw_xmac_vpath_tx_stats - XMAC Vpath Tx Statistics
841 *
842 * @tx_ttl_eth_frms: Count of successfully transmitted MAC frames.
843 * @tx_ttl_eth_octets: Count of total octets of transmitted frames,
844 * not including framing characters (i.e. less framing bits).
845 * To determine the total octets of transmitted frames, including
846 * framing characters, multiply TX_TTL_ETH_FRMS by 8 and add it to
847 * this stat (the device always prepends 8 bytes of preamble for
848 * each frame)
849 * @tx_data_octets: Count of data and padding octets of successfully transmitted
850 * frames.
851 * @tx_mcast_frms: Count of successfully transmitted frames to a group address
852 * other than the broadcast address.
853 * @tx_bcast_frms: Count of successfully transmitted frames to the broadcast
854 * group address.
855 * @tx_ucast_frms: Count of transmitted frames containing a unicast address.
856 * Includes discarded frames that are not sent to the network.
857 * @tx_tagged_frms: Count of transmitted frames containing a VLAN tag.
858 * @tx_vld_ip: Count of transmitted IP datagrams that are passed to the network.
859 * @tx_vld_ip_octets: Count of total octets of transmitted IP datagrams that
860 * are passed to the network.
861 * @tx_icmp: Count of transmitted ICMP messages. Includes messages not sent due
862 * to problems within ICMP.
863 * @tx_tcp: Count of transmitted TCP segments. Does not include segments
864 * containing retransmitted octets.
865 * @tx_rst_tcp: Count of transmitted TCP segments containing the RST flag.
866 * @tx_udp: Count of transmitted UDP datagrams.
867 * @tx_unknown_protocol: Increments when the TPA encounters an unknown protocol,
868 * such as a new IPv6 extension header, or an unsupported Routing
869 * Type. The packet still has a checksum calculated but it may be
870 * incorrect.
871 * @tx_lost_ip: Count of transmitted IP datagrams that could not be passed
872 * to the network. Increments because of: 1) An internal processing
873 * error (such as an uncorrectable ECC error). 2) A frame parsing
874 * error during IP checksum calculation.
875 * @tx_parse_error: Increments when the TPA is unable to parse a packet. This
876 * generally occurs when a packet is corrupt somehow, including
877 * packets that have IP version mismatches, invalid Layer 2 control
878 * fields, etc. L3/L4 checksums are not offloaded, but the packet
879 * is still be transmitted.
880 * @tx_tcp_offload: For frames belonging to offloaded sessions only, a count
881 * of transmitted TCP segments. Does not include segments containing
882 * retransmitted octets.
883 * @tx_retx_tcp_offload: For frames belonging to offloaded sessions only, the
884 * total number of segments retransmitted. Retransmitted segments
885 * that are sourced by the host are counted by the host.
886 * @tx_lost_ip_offload: For frames belonging to offloaded sessions only, a count
887 * of transmitted IP datagrams that could not be passed to the
888 * network.
889 *
890 * XMAC Vpath TX Statistics.
891 */
892struct vxge_hw_xmac_vpath_tx_stats {
893 u64 tx_ttl_eth_frms;
894 u64 tx_ttl_eth_octets;
895 u64 tx_data_octets;
896 u64 tx_mcast_frms;
897 u64 tx_bcast_frms;
898 u64 tx_ucast_frms;
899 u64 tx_tagged_frms;
900 u64 tx_vld_ip;
901 u64 tx_vld_ip_octets;
902 u64 tx_icmp;
903 u64 tx_tcp;
904 u64 tx_rst_tcp;
905 u64 tx_udp;
906 u32 tx_unknown_protocol;
907 u32 tx_lost_ip;
908 u32 unused1;
909 u32 tx_parse_error;
910 u64 tx_tcp_offload;
911 u64 tx_retx_tcp_offload;
912 u64 tx_lost_ip_offload;
913} __packed;
914
915/**
916 * struct vxge_hw_xmac_vpath_rx_stats - XMAC Vpath RX Statistics
917 *
918 * @rx_ttl_eth_frms: Count of successfully received MAC frames.
919 * @rx_vld_frms: Count of successfully received MAC frames. Does not include
920 * frames received with frame-too-long, FCS, or length errors.
921 * @rx_offload_frms: Count of offloaded received frames that are passed to
922 * the host.
923 * @rx_ttl_eth_octets: Count of total octets of received frames, not including
924 * framing characters (i.e. less framing bits). Only counts octets
925 * of frames that are at least 14 bytes (18 bytes for VLAN-tagged)
926 * before FCS. To determine the total octets of received frames,
927 * including framing characters, multiply RX_TTL_ETH_FRMS by 8 and
928 * add it to this stat (the stat RX_TTL_ETH_FRMS only counts frames
929 * that have the required 8 bytes of preamble).
930 * @rx_data_octets: Count of data and padding octets of successfully received
931 * frames. Does not include frames received with frame-too-long,
932 * FCS, or length errors.
933 * @rx_offload_octets: Count of total octets, not including framing characters,
934 * of offloaded received frames that are passed to the host.
935 * @rx_vld_mcast_frms: Count of successfully received MAC frames containing a
936 * nonbroadcast group address. Does not include frames received with
937 * frame-too-long, FCS, or length errors.
938 * @rx_vld_bcast_frms: Count of successfully received MAC frames containing the
939 * broadcast group address. Does not include frames received with
940 * frame-too-long, FCS, or length errors.
941 * @rx_accepted_ucast_frms: Count of successfully received frames containing
942 * a unicast address. Only includes frames that are passed to the
943 * system.
944 * @rx_accepted_nucast_frms: Count of successfully received frames containing
945 * a non-unicast (broadcast or multicast) address. Only includes
946 * frames that are passed to the system. Could include, for instance,
947 * non-unicast frames that contain FCS errors if the MAC_ERROR_CFG
948 * register is set to pass FCS-errored frames to the host.
949 * @rx_tagged_frms: Count of received frames containing a VLAN tag.
950 * @rx_long_frms: Count of received frames that are longer than RX_MAX_PYLD_LEN
951 * + 18 bytes (+ 22 bytes if VLAN-tagged).
952 * @rx_usized_frms: Count of received frames of length (including FCS, but not
953 * framing bits) less than 64 octets, that are otherwise well-formed.
954 * In other words, counts runts.
955 * @rx_osized_frms: Count of received frames of length (including FCS, but not
956 * framing bits) more than 1518 octets, that are otherwise
957 * well-formed.
958 * @rx_frag_frms: Count of received frames of length (including FCS, but not
959 * framing bits) less than 64 octets that had bad FCS.
960 * In other words, counts fragments.
961 * @rx_jabber_frms: Count of received frames of length (including FCS, but not
962 * framing bits) more than 1518 octets that had bad FCS. In other
963 * words, counts jabbers.
964 * @rx_ttl_64_frms: Count of total received MAC frames with length (including
965 * FCS, but not framing bits) of exactly 64 octets. Includes frames
966 * received with frame-too-long, FCS, or length errors.
967 * @rx_ttl_65_127_frms: Count of total received MAC frames
968 * with length (including
969 * FCS, but not framing bits) of between 65 and 127 octets inclusive.
970 * Includes frames received with frame-too-long, FCS,
971 * or length errors.
972 * @rx_ttl_128_255_frms: Count of total received MAC frames with length
973 * (including FCS, but not framing bits)
974 * of between 128 and 255 octets
975 * inclusive. Includes frames received with frame-too-long, FCS,
976 * or length errors.
977 * @rx_ttl_256_511_frms: Count of total received MAC frames with length
978 * (including FCS, but not framing bits)
979 * of between 256 and 511 octets
980 * inclusive. Includes frames received with frame-too-long, FCS, or
981 * length errors.
982 * @rx_ttl_512_1023_frms: Count of total received MAC frames with length
983 * (including FCS, but not framing bits) of between 512 and 1023
984 * octets inclusive. Includes frames received with frame-too-long,
985 * FCS, or length errors.
986 * @rx_ttl_1024_1518_frms: Count of total received MAC frames with length
987 * (including FCS, but not framing bits) of between 1024 and 1518
988 * octets inclusive. Includes frames received with frame-too-long,
989 * FCS, or length errors.
990 * @rx_ttl_1519_4095_frms: Count of total received MAC frames with length
991 * (including FCS, but not framing bits) of between 1519 and 4095
992 * octets inclusive. Includes frames received with frame-too-long,
993 * FCS, or length errors.
994 * @rx_ttl_4096_8191_frms: Count of total received MAC frames with length
995 * (including FCS, but not framing bits) of between 4096 and 8191
996 * octets inclusive. Includes frames received with frame-too-long,
997 * FCS, or length errors.
998 * @rx_ttl_8192_max_frms: Count of total received MAC frames with length
999 * (including FCS, but not framing bits) of between 8192 and
1000 * RX_MAX_PYLD_LEN+18 octets inclusive. Includes frames received
1001 * with frame-too-long, FCS, or length errors.
1002 * @rx_ttl_gt_max_frms: Count of total received MAC frames with length
1003 * (including FCS, but not framing bits) exceeding RX_MAX_PYLD_LEN+18
1004 * (+22 bytes if VLAN-tagged) octets inclusive. Includes frames
1005 * received with frame-too-long, FCS, or length errors.
1006 * @rx_ip: Count of received IP datagrams. Includes errored IP datagrams.
1007 * @rx_accepted_ip: Count of received IP datagrams that
1008 * are passed to the system.
1009 * @rx_ip_octets: Count of number of octets in received IP datagrams.
1010 * Includes errored IP datagrams.
1011 * @rx_err_ip: Count of received IP datagrams containing errors. For example,
1012 * bad IP checksum.
1013 * @rx_icmp: Count of received ICMP messages. Includes errored ICMP messages.
1014 * @rx_tcp: Count of received TCP segments. Includes errored TCP segments.
1015 * Note: This stat contains a count of all received TCP segments,
1016 * regardless of whether or not they pertain to an established
1017 * connection.
1018 * @rx_udp: Count of received UDP datagrams.
1019 * @rx_err_tcp: Count of received TCP segments containing errors. For example,
1020 * bad TCP checksum.
1021 * @rx_lost_frms: Count of received frames that could not be passed to the host.
1022 * See RX_QUEUE_FULL_DISCARD and RX_RED_DISCARD
1023 * for a list of reasons.
1024 * @rx_lost_ip: Count of received IP datagrams that could not be passed to
1025 * the host. See RX_LOST_FRMS for a list of reasons.
1026 * @rx_lost_ip_offload: For frames belonging to offloaded sessions only, a count
1027 * of received IP datagrams that could not be passed to the host.
1028 * See RX_LOST_FRMS for a list of reasons.
1029 * @rx_various_discard: Count of received frames that are discarded because
1030 * the target receive queue is full.
1031 * @rx_sleep_discard: Count of received frames that are discarded because the
1032 * target VPATH is asleep (a Wake-on-LAN magic packet can be used
1033 * to awaken the VPATH).
1034 * @rx_red_discard: Count of received frames that are discarded because of RED
1035 * (Random Early Discard).
1036 * @rx_queue_full_discard: Count of received frames that are discarded because
1037 * the target receive queue is full.
1038 * @rx_mpa_ok_frms: Count of received frames that pass the MPA checks.
1039 *
1040 * XMAC Vpath RX Statistics.
1041 */
1042struct vxge_hw_xmac_vpath_rx_stats {
1043 u64 rx_ttl_eth_frms;
1044 u64 rx_vld_frms;
1045 u64 rx_offload_frms;
1046 u64 rx_ttl_eth_octets;
1047 u64 rx_data_octets;
1048 u64 rx_offload_octets;
1049 u64 rx_vld_mcast_frms;
1050 u64 rx_vld_bcast_frms;
1051 u64 rx_accepted_ucast_frms;
1052 u64 rx_accepted_nucast_frms;
1053 u64 rx_tagged_frms;
1054 u64 rx_long_frms;
1055 u64 rx_usized_frms;
1056 u64 rx_osized_frms;
1057 u64 rx_frag_frms;
1058 u64 rx_jabber_frms;
1059 u64 rx_ttl_64_frms;
1060 u64 rx_ttl_65_127_frms;
1061 u64 rx_ttl_128_255_frms;
1062 u64 rx_ttl_256_511_frms;
1063 u64 rx_ttl_512_1023_frms;
1064 u64 rx_ttl_1024_1518_frms;
1065 u64 rx_ttl_1519_4095_frms;
1066 u64 rx_ttl_4096_8191_frms;
1067 u64 rx_ttl_8192_max_frms;
1068 u64 rx_ttl_gt_max_frms;
1069 u64 rx_ip;
1070 u64 rx_accepted_ip;
1071 u64 rx_ip_octets;
1072 u64 rx_err_ip;
1073 u64 rx_icmp;
1074 u64 rx_tcp;
1075 u64 rx_udp;
1076 u64 rx_err_tcp;
1077 u64 rx_lost_frms;
1078 u64 rx_lost_ip;
1079 u64 rx_lost_ip_offload;
1080 u16 rx_various_discard;
1081 u16 rx_sleep_discard;
1082 u16 rx_red_discard;
1083 u16 rx_queue_full_discard;
1084 u64 rx_mpa_ok_frms;
1085} __packed;
1086
1087/**
1088 * struct vxge_hw_xmac_stats - XMAC Statistics
1089 *
1090 * @aggr_stats: Statistics on aggregate port(port 0, port 1)
1091 * @port_stats: Staticstics on ports(wire 0, wire 1, lag)
1092 * @vpath_tx_stats: Per vpath XMAC TX stats
1093 * @vpath_rx_stats: Per vpath XMAC RX stats
1094 *
1095 * XMAC Statistics.
1096 */
1097struct vxge_hw_xmac_stats {
1098 struct vxge_hw_xmac_aggr_stats
1099 aggr_stats[VXGE_HW_MAC_MAX_MAC_PORT_ID];
1100 struct vxge_hw_xmac_port_stats
1101 port_stats[VXGE_HW_MAC_MAX_MAC_PORT_ID+1];
1102 struct vxge_hw_xmac_vpath_tx_stats
1103 vpath_tx_stats[VXGE_HW_MAX_VIRTUAL_PATHS];
1104 struct vxge_hw_xmac_vpath_rx_stats
1105 vpath_rx_stats[VXGE_HW_MAX_VIRTUAL_PATHS];
1106};
1107
1108/**
1109 * struct vxge_hw_vpath_stats_hw_info - Titan vpath hardware statistics.
1110 * @ini_num_mwr_sent: The number of PCI memory writes initiated by the PIC block
1111 * for the given VPATH
1112 * @ini_num_mrd_sent: The number of PCI memory reads initiated by the PIC block
1113 * @ini_num_cpl_rcvd: The number of PCI read completions received by the
1114 * PIC block
1115 * @ini_num_mwr_byte_sent: The number of PCI memory write bytes sent by the PIC
1116 * block to the host
1117 * @ini_num_cpl_byte_rcvd: The number of PCI read completion bytes received by
1118 * the PIC block
1119 * @wrcrdtarb_xoff: TBD
1120 * @rdcrdtarb_xoff: TBD
1121 * @vpath_genstats_count0: TBD
1122 * @vpath_genstats_count1: TBD
1123 * @vpath_genstats_count2: TBD
1124 * @vpath_genstats_count3: TBD
1125 * @vpath_genstats_count4: TBD
1126 * @vpath_gennstats_count5: TBD
1127 * @tx_stats: Transmit stats
1128 * @rx_stats: Receive stats
1129 * @prog_event_vnum1: Programmable statistic. Increments when internal logic
1130 * detects a certain event. See register
1131 * XMAC_STATS_CFG.EVENT_VNUM1_CFG for more information.
1132 * @prog_event_vnum0: Programmable statistic. Increments when internal logic
1133 * detects a certain event. See register
1134 * XMAC_STATS_CFG.EVENT_VNUM0_CFG for more information.
1135 * @prog_event_vnum3: Programmable statistic. Increments when internal logic
1136 * detects a certain event. See register
1137 * XMAC_STATS_CFG.EVENT_VNUM3_CFG for more information.
1138 * @prog_event_vnum2: Programmable statistic. Increments when internal logic
1139 * detects a certain event. See register
1140 * XMAC_STATS_CFG.EVENT_VNUM2_CFG for more information.
1141 * @rx_multi_cast_frame_discard: TBD
1142 * @rx_frm_transferred: TBD
1143 * @rxd_returned: TBD
1144 * @rx_mpa_len_fail_frms: Count of received frames
1145 * that fail the MPA length check
1146 * @rx_mpa_mrk_fail_frms: Count of received frames
1147 * that fail the MPA marker check
1148 * @rx_mpa_crc_fail_frms: Count of received frames that fail the MPA CRC check
1149 * @rx_permitted_frms: Count of frames that pass through the FAU and on to the
1150 * frame buffer (and subsequently to the host).
1151 * @rx_vp_reset_discarded_frms: Count of receive frames that are discarded
1152 * because the VPATH is in reset
1153 * @rx_wol_frms: Count of received "magic packet" frames. Stat increments
1154 * whenever the received frame matches the VPATH's Wake-on-LAN
1155 * signature(s) CRC.
1156 * @tx_vp_reset_discarded_frms: Count of transmit frames that are discarded
1157 * because the VPATH is in reset. Includes frames that are discarded
1158 * because the current VPIN does not match that VPIN of the frame
1159 *
1160 * Titan vpath hardware statistics.
1161 */
1162struct vxge_hw_vpath_stats_hw_info {
1163/*0x000*/ u32 ini_num_mwr_sent;
1164/*0x004*/ u32 unused1;
1165/*0x008*/ u32 ini_num_mrd_sent;
1166/*0x00c*/ u32 unused2;
1167/*0x010*/ u32 ini_num_cpl_rcvd;
1168/*0x014*/ u32 unused3;
1169/*0x018*/ u64 ini_num_mwr_byte_sent;
1170/*0x020*/ u64 ini_num_cpl_byte_rcvd;
1171/*0x028*/ u32 wrcrdtarb_xoff;
1172/*0x02c*/ u32 unused4;
1173/*0x030*/ u32 rdcrdtarb_xoff;
1174/*0x034*/ u32 unused5;
1175/*0x038*/ u32 vpath_genstats_count0;
1176/*0x03c*/ u32 vpath_genstats_count1;
1177/*0x040*/ u32 vpath_genstats_count2;
1178/*0x044*/ u32 vpath_genstats_count3;
1179/*0x048*/ u32 vpath_genstats_count4;
1180/*0x04c*/ u32 unused6;
1181/*0x050*/ u32 vpath_genstats_count5;
1182/*0x054*/ u32 unused7;
1183/*0x058*/ struct vxge_hw_xmac_vpath_tx_stats tx_stats;
1184/*0x0e8*/ struct vxge_hw_xmac_vpath_rx_stats rx_stats;
1185/*0x220*/ u64 unused9;
1186/*0x228*/ u32 prog_event_vnum1;
1187/*0x22c*/ u32 prog_event_vnum0;
1188/*0x230*/ u32 prog_event_vnum3;
1189/*0x234*/ u32 prog_event_vnum2;
1190/*0x238*/ u16 rx_multi_cast_frame_discard;
1191/*0x23a*/ u8 unused10[6];
1192/*0x240*/ u32 rx_frm_transferred;
1193/*0x244*/ u32 unused11;
1194/*0x248*/ u16 rxd_returned;
1195/*0x24a*/ u8 unused12[6];
1196/*0x252*/ u16 rx_mpa_len_fail_frms;
1197/*0x254*/ u16 rx_mpa_mrk_fail_frms;
1198/*0x256*/ u16 rx_mpa_crc_fail_frms;
1199/*0x258*/ u16 rx_permitted_frms;
1200/*0x25c*/ u64 rx_vp_reset_discarded_frms;
1201/*0x25e*/ u64 rx_wol_frms;
1202/*0x260*/ u64 tx_vp_reset_discarded_frms;
1203} __packed;
1204
1205
1206/**
1207 * struct vxge_hw_device_stats_mrpcim_info - Titan mrpcim hardware statistics.
1208 * @pic.ini_rd_drop 0x0000 4 Number of DMA reads initiated
1209 * by the adapter that were discarded because the VPATH is out of service
1210 * @pic.ini_wr_drop 0x0004 4 Number of DMA writes initiated by the
1211 * adapter that were discared because the VPATH is out of service
1212 * @pic.wrcrdtarb_ph_crdt_depleted[vplane0] 0x0008 4 Number of times
1213 * the posted header credits for upstream PCI writes were depleted
1214 * @pic.wrcrdtarb_ph_crdt_depleted[vplane1] 0x0010 4 Number of times
1215 * the posted header credits for upstream PCI writes were depleted
1216 * @pic.wrcrdtarb_ph_crdt_depleted[vplane2] 0x0018 4 Number of times
1217 * the posted header credits for upstream PCI writes were depleted
1218 * @pic.wrcrdtarb_ph_crdt_depleted[vplane3] 0x0020 4 Number of times
1219 * the posted header credits for upstream PCI writes were depleted
1220 * @pic.wrcrdtarb_ph_crdt_depleted[vplane4] 0x0028 4 Number of times
1221 * the posted header credits for upstream PCI writes were depleted
1222 * @pic.wrcrdtarb_ph_crdt_depleted[vplane5] 0x0030 4 Number of times
1223 * the posted header credits for upstream PCI writes were depleted
1224 * @pic.wrcrdtarb_ph_crdt_depleted[vplane6] 0x0038 4 Number of times
1225 * the posted header credits for upstream PCI writes were depleted
1226 * @pic.wrcrdtarb_ph_crdt_depleted[vplane7] 0x0040 4 Number of times
1227 * the posted header credits for upstream PCI writes were depleted
1228 * @pic.wrcrdtarb_ph_crdt_depleted[vplane8] 0x0048 4 Number of times
1229 * the posted header credits for upstream PCI writes were depleted
1230 * @pic.wrcrdtarb_ph_crdt_depleted[vplane9] 0x0050 4 Number of times
1231 * the posted header credits for upstream PCI writes were depleted
1232 * @pic.wrcrdtarb_ph_crdt_depleted[vplane10] 0x0058 4 Number of times
1233 * the posted header credits for upstream PCI writes were depleted
1234 * @pic.wrcrdtarb_ph_crdt_depleted[vplane11] 0x0060 4 Number of times
1235 * the posted header credits for upstream PCI writes were depleted
1236 * @pic.wrcrdtarb_ph_crdt_depleted[vplane12] 0x0068 4 Number of times
1237 * the posted header credits for upstream PCI writes were depleted
1238 * @pic.wrcrdtarb_ph_crdt_depleted[vplane13] 0x0070 4 Number of times
1239 * the posted header credits for upstream PCI writes were depleted
1240 * @pic.wrcrdtarb_ph_crdt_depleted[vplane14] 0x0078 4 Number of times
1241 * the posted header credits for upstream PCI writes were depleted
1242 * @pic.wrcrdtarb_ph_crdt_depleted[vplane15] 0x0080 4 Number of times
1243 * the posted header credits for upstream PCI writes were depleted
1244 * @pic.wrcrdtarb_ph_crdt_depleted[vplane16] 0x0088 4 Number of times
1245 * the posted header credits for upstream PCI writes were depleted
1246 * @pic.wrcrdtarb_pd_crdt_depleted[vplane0] 0x0090 4 Number of times
1247 * the posted data credits for upstream PCI writes were depleted
1248 * @pic.wrcrdtarb_pd_crdt_depleted[vplane1] 0x0098 4 Number of times
1249 * the posted data credits for upstream PCI writes were depleted
1250 * @pic.wrcrdtarb_pd_crdt_depleted[vplane2] 0x00a0 4 Number of times
1251 * the posted data credits for upstream PCI writes were depleted
1252 * @pic.wrcrdtarb_pd_crdt_depleted[vplane3] 0x00a8 4 Number of times
1253 * the posted data credits for upstream PCI writes were depleted
1254 * @pic.wrcrdtarb_pd_crdt_depleted[vplane4] 0x00b0 4 Number of times
1255 * the posted data credits for upstream PCI writes were depleted
1256 * @pic.wrcrdtarb_pd_crdt_depleted[vplane5] 0x00b8 4 Number of times
1257 * the posted data credits for upstream PCI writes were depleted
1258 * @pic.wrcrdtarb_pd_crdt_depleted[vplane6] 0x00c0 4 Number of times
1259 * the posted data credits for upstream PCI writes were depleted
1260 * @pic.wrcrdtarb_pd_crdt_depleted[vplane7] 0x00c8 4 Number of times
1261 * the posted data credits for upstream PCI writes were depleted
1262 * @pic.wrcrdtarb_pd_crdt_depleted[vplane8] 0x00d0 4 Number of times
1263 * the posted data credits for upstream PCI writes were depleted
1264 * @pic.wrcrdtarb_pd_crdt_depleted[vplane9] 0x00d8 4 Number of times
1265 * the posted data credits for upstream PCI writes were depleted
1266 * @pic.wrcrdtarb_pd_crdt_depleted[vplane10] 0x00e0 4 Number of times
1267 * the posted data credits for upstream PCI writes were depleted
1268 * @pic.wrcrdtarb_pd_crdt_depleted[vplane11] 0x00e8 4 Number of times
1269 * the posted data credits for upstream PCI writes were depleted
1270 * @pic.wrcrdtarb_pd_crdt_depleted[vplane12] 0x00f0 4 Number of times
1271 * the posted data credits for upstream PCI writes were depleted
1272 * @pic.wrcrdtarb_pd_crdt_depleted[vplane13] 0x00f8 4 Number of times
1273 * the posted data credits for upstream PCI writes were depleted
1274 * @pic.wrcrdtarb_pd_crdt_depleted[vplane14] 0x0100 4 Number of times
1275 * the posted data credits for upstream PCI writes were depleted
1276 * @pic.wrcrdtarb_pd_crdt_depleted[vplane15] 0x0108 4 Number of times
1277 * the posted data credits for upstream PCI writes were depleted
1278 * @pic.wrcrdtarb_pd_crdt_depleted[vplane16] 0x0110 4 Number of times
1279 * the posted data credits for upstream PCI writes were depleted
1280 * @pic.rdcrdtarb_nph_crdt_depleted[vplane0] 0x0118 4 Number of times
1281 * the non-posted header credits for upstream PCI reads were depleted
1282 * @pic.rdcrdtarb_nph_crdt_depleted[vplane1] 0x0120 4 Number of times
1283 * the non-posted header credits for upstream PCI reads were depleted
1284 * @pic.rdcrdtarb_nph_crdt_depleted[vplane2] 0x0128 4 Number of times
1285 * the non-posted header credits for upstream PCI reads were depleted
1286 * @pic.rdcrdtarb_nph_crdt_depleted[vplane3] 0x0130 4 Number of times
1287 * the non-posted header credits for upstream PCI reads were depleted
1288 * @pic.rdcrdtarb_nph_crdt_depleted[vplane4] 0x0138 4 Number of times
1289 * the non-posted header credits for upstream PCI reads were depleted
1290 * @pic.rdcrdtarb_nph_crdt_depleted[vplane5] 0x0140 4 Number of times
1291 * the non-posted header credits for upstream PCI reads were depleted
1292 * @pic.rdcrdtarb_nph_crdt_depleted[vplane6] 0x0148 4 Number of times
1293 * the non-posted header credits for upstream PCI reads were depleted
1294 * @pic.rdcrdtarb_nph_crdt_depleted[vplane7] 0x0150 4 Number of times
1295 * the non-posted header credits for upstream PCI reads were depleted
1296 * @pic.rdcrdtarb_nph_crdt_depleted[vplane8] 0x0158 4 Number of times
1297 * the non-posted header credits for upstream PCI reads were depleted
1298 * @pic.rdcrdtarb_nph_crdt_depleted[vplane9] 0x0160 4 Number of times
1299 * the non-posted header credits for upstream PCI reads were depleted
1300 * @pic.rdcrdtarb_nph_crdt_depleted[vplane10] 0x0168 4 Number of times
1301 * the non-posted header credits for upstream PCI reads were depleted
1302 * @pic.rdcrdtarb_nph_crdt_depleted[vplane11] 0x0170 4 Number of times
1303 * the non-posted header credits for upstream PCI reads were depleted
1304 * @pic.rdcrdtarb_nph_crdt_depleted[vplane12] 0x0178 4 Number of times
1305 * the non-posted header credits for upstream PCI reads were depleted
1306 * @pic.rdcrdtarb_nph_crdt_depleted[vplane13] 0x0180 4 Number of times
1307 * the non-posted header credits for upstream PCI reads were depleted
1308 * @pic.rdcrdtarb_nph_crdt_depleted[vplane14] 0x0188 4 Number of times
1309 * the non-posted header credits for upstream PCI reads were depleted
1310 * @pic.rdcrdtarb_nph_crdt_depleted[vplane15] 0x0190 4 Number of times
1311 * the non-posted header credits for upstream PCI reads were depleted
1312 * @pic.rdcrdtarb_nph_crdt_depleted[vplane16] 0x0198 4 Number of times
1313 * the non-posted header credits for upstream PCI reads were depleted
1314 * @pic.ini_rd_vpin_drop 0x01a0 4 Number of DMA reads initiated by
1315 * the adapter that were discarded because the VPATH instance number does
1316 * not match
1317 * @pic.ini_wr_vpin_drop 0x01a4 4 Number of DMA writes initiated
1318 * by the adapter that were discarded because the VPATH instance number
1319 * does not match
1320 * @pic.genstats_count0 0x01a8 4 Configurable statistic #1. Refer
1321 * to the GENSTATS0_CFG for information on configuring this statistic
1322 * @pic.genstats_count1 0x01ac 4 Configurable statistic #2. Refer
1323 * to the GENSTATS1_CFG for information on configuring this statistic
1324 * @pic.genstats_count2 0x01b0 4 Configurable statistic #3. Refer
1325 * to the GENSTATS2_CFG for information on configuring this statistic
1326 * @pic.genstats_count3 0x01b4 4 Configurable statistic #4. Refer
1327 * to the GENSTATS3_CFG for information on configuring this statistic
1328 * @pic.genstats_count4 0x01b8 4 Configurable statistic #5. Refer
1329 * to the GENSTATS4_CFG for information on configuring this statistic
1330 * @pic.genstats_count5 0x01c0 4 Configurable statistic #6. Refer
1331 * to the GENSTATS5_CFG for information on configuring this statistic
1332 * @pci.rstdrop_cpl 0x01c8 4
1333 * @pci.rstdrop_msg 0x01cc 4
1334 * @pci.rstdrop_client1 0x01d0 4
1335 * @pci.rstdrop_client0 0x01d4 4
1336 * @pci.rstdrop_client2 0x01d8 4
1337 * @pci.depl_cplh[vplane0] 0x01e2 2 Number of times completion
1338 * header credits were depleted
1339 * @pci.depl_nph[vplane0] 0x01e4 2 Number of times non posted
1340 * header credits were depleted
1341 * @pci.depl_ph[vplane0] 0x01e6 2 Number of times the posted
1342 * header credits were depleted
1343 * @pci.depl_cplh[vplane1] 0x01ea 2
1344 * @pci.depl_nph[vplane1] 0x01ec 2
1345 * @pci.depl_ph[vplane1] 0x01ee 2
1346 * @pci.depl_cplh[vplane2] 0x01f2 2
1347 * @pci.depl_nph[vplane2] 0x01f4 2
1348 * @pci.depl_ph[vplane2] 0x01f6 2
1349 * @pci.depl_cplh[vplane3] 0x01fa 2
1350 * @pci.depl_nph[vplane3] 0x01fc 2
1351 * @pci.depl_ph[vplane3] 0x01fe 2
1352 * @pci.depl_cplh[vplane4] 0x0202 2
1353 * @pci.depl_nph[vplane4] 0x0204 2
1354 * @pci.depl_ph[vplane4] 0x0206 2
1355 * @pci.depl_cplh[vplane5] 0x020a 2
1356 * @pci.depl_nph[vplane5] 0x020c 2
1357 * @pci.depl_ph[vplane5] 0x020e 2
1358 * @pci.depl_cplh[vplane6] 0x0212 2
1359 * @pci.depl_nph[vplane6] 0x0214 2
1360 * @pci.depl_ph[vplane6] 0x0216 2
1361 * @pci.depl_cplh[vplane7] 0x021a 2
1362 * @pci.depl_nph[vplane7] 0x021c 2
1363 * @pci.depl_ph[vplane7] 0x021e 2
1364 * @pci.depl_cplh[vplane8] 0x0222 2
1365 * @pci.depl_nph[vplane8] 0x0224 2
1366 * @pci.depl_ph[vplane8] 0x0226 2
1367 * @pci.depl_cplh[vplane9] 0x022a 2
1368 * @pci.depl_nph[vplane9] 0x022c 2
1369 * @pci.depl_ph[vplane9] 0x022e 2
1370 * @pci.depl_cplh[vplane10] 0x0232 2
1371 * @pci.depl_nph[vplane10] 0x0234 2
1372 * @pci.depl_ph[vplane10] 0x0236 2
1373 * @pci.depl_cplh[vplane11] 0x023a 2
1374 * @pci.depl_nph[vplane11] 0x023c 2
1375 * @pci.depl_ph[vplane11] 0x023e 2
1376 * @pci.depl_cplh[vplane12] 0x0242 2
1377 * @pci.depl_nph[vplane12] 0x0244 2
1378 * @pci.depl_ph[vplane12] 0x0246 2
1379 * @pci.depl_cplh[vplane13] 0x024a 2
1380 * @pci.depl_nph[vplane13] 0x024c 2
1381 * @pci.depl_ph[vplane13] 0x024e 2
1382 * @pci.depl_cplh[vplane14] 0x0252 2
1383 * @pci.depl_nph[vplane14] 0x0254 2
1384 * @pci.depl_ph[vplane14] 0x0256 2
1385 * @pci.depl_cplh[vplane15] 0x025a 2
1386 * @pci.depl_nph[vplane15] 0x025c 2
1387 * @pci.depl_ph[vplane15] 0x025e 2
1388 * @pci.depl_cplh[vplane16] 0x0262 2
1389 * @pci.depl_nph[vplane16] 0x0264 2
1390 * @pci.depl_ph[vplane16] 0x0266 2
1391 * @pci.depl_cpld[vplane0] 0x026a 2 Number of times completion data
1392 * credits were depleted
1393 * @pci.depl_npd[vplane0] 0x026c 2 Number of times non posted data
1394 * credits were depleted
1395 * @pci.depl_pd[vplane0] 0x026e 2 Number of times the posted data
1396 * credits were depleted
1397 * @pci.depl_cpld[vplane1] 0x0272 2
1398 * @pci.depl_npd[vplane1] 0x0274 2
1399 * @pci.depl_pd[vplane1] 0x0276 2
1400 * @pci.depl_cpld[vplane2] 0x027a 2
1401 * @pci.depl_npd[vplane2] 0x027c 2
1402 * @pci.depl_pd[vplane2] 0x027e 2
1403 * @pci.depl_cpld[vplane3] 0x0282 2
1404 * @pci.depl_npd[vplane3] 0x0284 2
1405 * @pci.depl_pd[vplane3] 0x0286 2
1406 * @pci.depl_cpld[vplane4] 0x028a 2
1407 * @pci.depl_npd[vplane4] 0x028c 2
1408 * @pci.depl_pd[vplane4] 0x028e 2
1409 * @pci.depl_cpld[vplane5] 0x0292 2
1410 * @pci.depl_npd[vplane5] 0x0294 2
1411 * @pci.depl_pd[vplane5] 0x0296 2
1412 * @pci.depl_cpld[vplane6] 0x029a 2
1413 * @pci.depl_npd[vplane6] 0x029c 2
1414 * @pci.depl_pd[vplane6] 0x029e 2
1415 * @pci.depl_cpld[vplane7] 0x02a2 2
1416 * @pci.depl_npd[vplane7] 0x02a4 2
1417 * @pci.depl_pd[vplane7] 0x02a6 2
1418 * @pci.depl_cpld[vplane8] 0x02aa 2
1419 * @pci.depl_npd[vplane8] 0x02ac 2
1420 * @pci.depl_pd[vplane8] 0x02ae 2
1421 * @pci.depl_cpld[vplane9] 0x02b2 2
1422 * @pci.depl_npd[vplane9] 0x02b4 2
1423 * @pci.depl_pd[vplane9] 0x02b6 2
1424 * @pci.depl_cpld[vplane10] 0x02ba 2
1425 * @pci.depl_npd[vplane10] 0x02bc 2
1426 * @pci.depl_pd[vplane10] 0x02be 2
1427 * @pci.depl_cpld[vplane11] 0x02c2 2
1428 * @pci.depl_npd[vplane11] 0x02c4 2
1429 * @pci.depl_pd[vplane11] 0x02c6 2
1430 * @pci.depl_cpld[vplane12] 0x02ca 2
1431 * @pci.depl_npd[vplane12] 0x02cc 2
1432 * @pci.depl_pd[vplane12] 0x02ce 2
1433 * @pci.depl_cpld[vplane13] 0x02d2 2
1434 * @pci.depl_npd[vplane13] 0x02d4 2
1435 * @pci.depl_pd[vplane13] 0x02d6 2
1436 * @pci.depl_cpld[vplane14] 0x02da 2
1437 * @pci.depl_npd[vplane14] 0x02dc 2
1438 * @pci.depl_pd[vplane14] 0x02de 2
1439 * @pci.depl_cpld[vplane15] 0x02e2 2
1440 * @pci.depl_npd[vplane15] 0x02e4 2
1441 * @pci.depl_pd[vplane15] 0x02e6 2
1442 * @pci.depl_cpld[vplane16] 0x02ea 2
1443 * @pci.depl_npd[vplane16] 0x02ec 2
1444 * @pci.depl_pd[vplane16] 0x02ee 2
1445 * @xgmac_port[3];
1446 * @xgmac_aggr[2];
1447 * @xgmac.global_prog_event_gnum0 0x0ae0 8 Programmable statistic.
1448 * Increments when internal logic detects a certain event. See register
1449 * XMAC_STATS_GLOBAL_CFG.EVENT_GNUM0_CFG for more information.
1450 * @xgmac.global_prog_event_gnum1 0x0ae8 8 Programmable statistic.
1451 * Increments when internal logic detects a certain event. See register
1452 * XMAC_STATS_GLOBAL_CFG.EVENT_GNUM1_CFG for more information.
1453 * @xgmac.orp_lro_events 0x0af8 8
1454 * @xgmac.orp_bs_events 0x0b00 8
1455 * @xgmac.orp_iwarp_events 0x0b08 8
1456 * @xgmac.tx_permitted_frms 0x0b14 4
1457 * @xgmac.port2_tx_any_frms 0x0b1d 1
1458 * @xgmac.port1_tx_any_frms 0x0b1e 1
1459 * @xgmac.port0_tx_any_frms 0x0b1f 1
1460 * @xgmac.port2_rx_any_frms 0x0b25 1
1461 * @xgmac.port1_rx_any_frms 0x0b26 1
1462 * @xgmac.port0_rx_any_frms 0x0b27 1
1463 *
1464 * Titan mrpcim hardware statistics.
1465 */
1466struct vxge_hw_device_stats_mrpcim_info {
1467/*0x0000*/ u32 pic_ini_rd_drop;
1468/*0x0004*/ u32 pic_ini_wr_drop;
1469/*0x0008*/ struct {
1470 /*0x0000*/ u32 pic_wrcrdtarb_ph_crdt_depleted;
1471 /*0x0004*/ u32 unused1;
1472 } pic_wrcrdtarb_ph_crdt_depleted_vplane[17];
1473/*0x0090*/ struct {
1474 /*0x0000*/ u32 pic_wrcrdtarb_pd_crdt_depleted;
1475 /*0x0004*/ u32 unused2;
1476 } pic_wrcrdtarb_pd_crdt_depleted_vplane[17];
1477/*0x0118*/ struct {
1478 /*0x0000*/ u32 pic_rdcrdtarb_nph_crdt_depleted;
1479 /*0x0004*/ u32 unused3;
1480 } pic_rdcrdtarb_nph_crdt_depleted_vplane[17];
1481/*0x01a0*/ u32 pic_ini_rd_vpin_drop;
1482/*0x01a4*/ u32 pic_ini_wr_vpin_drop;
1483/*0x01a8*/ u32 pic_genstats_count0;
1484/*0x01ac*/ u32 pic_genstats_count1;
1485/*0x01b0*/ u32 pic_genstats_count2;
1486/*0x01b4*/ u32 pic_genstats_count3;
1487/*0x01b8*/ u32 pic_genstats_count4;
1488/*0x01bc*/ u32 unused4;
1489/*0x01c0*/ u32 pic_genstats_count5;
1490/*0x01c4*/ u32 unused5;
1491/*0x01c8*/ u32 pci_rstdrop_cpl;
1492/*0x01cc*/ u32 pci_rstdrop_msg;
1493/*0x01d0*/ u32 pci_rstdrop_client1;
1494/*0x01d4*/ u32 pci_rstdrop_client0;
1495/*0x01d8*/ u32 pci_rstdrop_client2;
1496/*0x01dc*/ u32 unused6;
1497/*0x01e0*/ struct {
1498 /*0x0000*/ u16 unused7;
1499 /*0x0002*/ u16 pci_depl_cplh;
1500 /*0x0004*/ u16 pci_depl_nph;
1501 /*0x0006*/ u16 pci_depl_ph;
1502 } pci_depl_h_vplane[17];
1503/*0x0268*/ struct {
1504 /*0x0000*/ u16 unused8;
1505 /*0x0002*/ u16 pci_depl_cpld;
1506 /*0x0004*/ u16 pci_depl_npd;
1507 /*0x0006*/ u16 pci_depl_pd;
1508 } pci_depl_d_vplane[17];
1509/*0x02f0*/ struct vxge_hw_xmac_port_stats xgmac_port[3];
1510/*0x0a10*/ struct vxge_hw_xmac_aggr_stats xgmac_aggr[2];
1511/*0x0ae0*/ u64 xgmac_global_prog_event_gnum0;
1512/*0x0ae8*/ u64 xgmac_global_prog_event_gnum1;
1513/*0x0af0*/ u64 unused7;
1514/*0x0af8*/ u64 unused8;
1515/*0x0b00*/ u64 unused9;
1516/*0x0b08*/ u64 unused10;
1517/*0x0b10*/ u32 unused11;
1518/*0x0b14*/ u32 xgmac_tx_permitted_frms;
1519/*0x0b18*/ u32 unused12;
1520/*0x0b1c*/ u8 unused13;
1521/*0x0b1d*/ u8 xgmac_port2_tx_any_frms;
1522/*0x0b1e*/ u8 xgmac_port1_tx_any_frms;
1523/*0x0b1f*/ u8 xgmac_port0_tx_any_frms;
1524/*0x0b20*/ u32 unused14;
1525/*0x0b24*/ u8 unused15;
1526/*0x0b25*/ u8 xgmac_port2_rx_any_frms;
1527/*0x0b26*/ u8 xgmac_port1_rx_any_frms;
1528/*0x0b27*/ u8 xgmac_port0_rx_any_frms;
1529} __packed;
1530
1531/**
1532 * struct vxge_hw_device_stats_hw_info - Titan hardware statistics.
1533 * @vpath_info: VPath statistics
1534 * @vpath_info_sav: Vpath statistics saved
1535 *
1536 * Titan hardware statistics.
1537 */
1538struct vxge_hw_device_stats_hw_info {
1539 struct vxge_hw_vpath_stats_hw_info
1540 *vpath_info[VXGE_HW_MAX_VIRTUAL_PATHS];
1541 struct vxge_hw_vpath_stats_hw_info
1542 vpath_info_sav[VXGE_HW_MAX_VIRTUAL_PATHS];
1543};
1544
1545/**
1546 * struct vxge_hw_vpath_stats_sw_common_info - HW common
1547 * statistics for queues.
1548 * @full_cnt: Number of times the queue was full
1549 * @usage_cnt: usage count.
1550 * @usage_max: Maximum usage
1551 * @reserve_free_swaps_cnt: Reserve/free swap counter. Internal usage.
1552 * @total_compl_cnt: Total descriptor completion count.
1553 *
1554 * Hw queue counters
1555 * See also: struct vxge_hw_vpath_stats_sw_fifo_info{},
1556 * struct vxge_hw_vpath_stats_sw_ring_info{},
1557 */
1558struct vxge_hw_vpath_stats_sw_common_info {
1559 u32 full_cnt;
1560 u32 usage_cnt;
1561 u32 usage_max;
1562 u32 reserve_free_swaps_cnt;
1563 u32 total_compl_cnt;
1564};
1565
1566/**
1567 * struct vxge_hw_vpath_stats_sw_fifo_info - HW fifo statistics
1568 * @common_stats: Common counters for all queues
1569 * @total_posts: Total number of postings on the queue.
1570 * @total_buffers: Total number of buffers posted.
1571 * @txd_t_code_err_cnt: Array of transmit transfer codes. The position
1572 * (index) in this array reflects the transfer code type, for instance
1573 * 0xA - "loss of link".
1574 * Value txd_t_code_err_cnt[i] reflects the
1575 * number of times the corresponding transfer code was encountered.
1576 *
1577 * HW fifo counters
1578 * See also: struct vxge_hw_vpath_stats_sw_common_info{},
1579 * struct vxge_hw_vpath_stats_sw_ring_info{},
1580 */
1581struct vxge_hw_vpath_stats_sw_fifo_info {
1582 struct vxge_hw_vpath_stats_sw_common_info common_stats;
1583 u32 total_posts;
1584 u32 total_buffers;
1585 u32 txd_t_code_err_cnt[VXGE_HW_DTR_MAX_T_CODE];
1586};
1587
1588/**
1589 * struct vxge_hw_vpath_stats_sw_ring_info - HW ring statistics
1590 * @common_stats: Common counters for all queues
1591 * @rxd_t_code_err_cnt: Array of receive transfer codes. The position
1592 * (index) in this array reflects the transfer code type,
1593 * for instance
1594 * 0x7 - for "invalid receive buffer size", or 0x8 - for ECC.
1595 * Value rxd_t_code_err_cnt[i] reflects the
1596 * number of times the corresponding transfer code was encountered.
1597 *
1598 * HW ring counters
1599 * See also: struct vxge_hw_vpath_stats_sw_common_info{},
1600 * struct vxge_hw_vpath_stats_sw_fifo_info{},
1601 */
1602struct vxge_hw_vpath_stats_sw_ring_info {
1603 struct vxge_hw_vpath_stats_sw_common_info common_stats;
1604 u32 rxd_t_code_err_cnt[VXGE_HW_DTR_MAX_T_CODE];
1605
1606};
1607
1608/**
1609 * struct vxge_hw_vpath_stats_sw_err - HW vpath error statistics
1610 * @unknown_alarms:
1611 * @network_sustained_fault:
1612 * @network_sustained_ok:
1613 * @kdfcctl_fifo0_overwrite:
1614 * @kdfcctl_fifo0_poison:
1615 * @kdfcctl_fifo0_dma_error:
1616 * @dblgen_fifo0_overflow:
1617 * @statsb_pif_chain_error:
1618 * @statsb_drop_timeout:
1619 * @target_illegal_access:
1620 * @ini_serr_det:
1621 * @prc_ring_bumps:
1622 * @prc_rxdcm_sc_err:
1623 * @prc_rxdcm_sc_abort:
1624 * @prc_quanta_size_err:
1625 *
1626 * HW vpath error statistics
1627 */
1628struct vxge_hw_vpath_stats_sw_err {
1629 u32 unknown_alarms;
1630 u32 network_sustained_fault;
1631 u32 network_sustained_ok;
1632 u32 kdfcctl_fifo0_overwrite;
1633 u32 kdfcctl_fifo0_poison;
1634 u32 kdfcctl_fifo0_dma_error;
1635 u32 dblgen_fifo0_overflow;
1636 u32 statsb_pif_chain_error;
1637 u32 statsb_drop_timeout;
1638 u32 target_illegal_access;
1639 u32 ini_serr_det;
1640 u32 prc_ring_bumps;
1641 u32 prc_rxdcm_sc_err;
1642 u32 prc_rxdcm_sc_abort;
1643 u32 prc_quanta_size_err;
1644};
1645
1646/**
1647 * struct vxge_hw_vpath_stats_sw_info - HW vpath sw statistics
1648 * @soft_reset_cnt: Number of times soft reset is done on this vpath.
1649 * @error_stats: error counters for the vpath
1650 * @ring_stats: counters for ring belonging to the vpath
1651 * @fifo_stats: counters for fifo belonging to the vpath
1652 *
1653 * HW vpath sw statistics
1654 * See also: struct vxge_hw_device_info{} }.
1655 */
1656struct vxge_hw_vpath_stats_sw_info {
1657 u32 soft_reset_cnt;
1658 struct vxge_hw_vpath_stats_sw_err error_stats;
1659 struct vxge_hw_vpath_stats_sw_ring_info ring_stats;
1660 struct vxge_hw_vpath_stats_sw_fifo_info fifo_stats;
1661};
1662
1663/**
1664 * struct vxge_hw_device_stats_sw_info - HW own per-device statistics.
1665 *
1666 * @not_traffic_intr_cnt: Number of times the host was interrupted
1667 * without new completions.
1668 * "Non-traffic interrupt counter".
1669 * @traffic_intr_cnt: Number of traffic interrupts for the device.
1670 * @total_intr_cnt: Total number of traffic interrupts for the device.
1671 * @total_intr_cnt == @traffic_intr_cnt +
1672 * @not_traffic_intr_cnt
1673 * @soft_reset_cnt: Number of times soft reset is done on this device.
1674 * @vpath_info: please see struct vxge_hw_vpath_stats_sw_info{}
1675 * HW per-device statistics.
1676 */
1677struct vxge_hw_device_stats_sw_info {
1678 u32 not_traffic_intr_cnt;
1679 u32 traffic_intr_cnt;
1680 u32 total_intr_cnt;
1681 u32 soft_reset_cnt;
1682 struct vxge_hw_vpath_stats_sw_info
1683 vpath_info[VXGE_HW_MAX_VIRTUAL_PATHS];
1684};
1685
1686/**
1687 * struct vxge_hw_device_stats_sw_err - HW device error statistics.
1688 * @vpath_alarms: Number of vpath alarms
1689 *
1690 * HW Device error stats
1691 */
1692struct vxge_hw_device_stats_sw_err {
1693 u32 vpath_alarms;
1694};
1695
1696/**
1697 * struct vxge_hw_device_stats - Contains HW per-device statistics,
1698 * including hw.
1699 * @devh: HW device handle.
1700 * @dma_addr: DMA addres of the %hw_info. Given to device to fill-in the stats.
1701 * @hw_info_dmah: DMA handle used to map hw statistics onto the device memory
1702 * space.
1703 * @hw_info_dma_acch: One more DMA handle used subsequently to free the
1704 * DMA object. Note that this and the previous handle have
1705 * physical meaning for Solaris; on Windows and Linux the
1706 * corresponding value will be simply pointer to PCI device.
1707 *
1708 * @hw_dev_info_stats: Titan statistics maintained by the hardware.
1709 * @sw_dev_info_stats: HW's "soft" device informational statistics, e.g. number
1710 * of completions per interrupt.
1711 * @sw_dev_err_stats: HW's "soft" device error statistics.
1712 *
1713 * Structure-container of HW per-device statistics. Note that per-channel
1714 * statistics are kept in separate structures under HW's fifo and ring
1715 * channels.
1716 */
1717struct vxge_hw_device_stats {
1718 /* handles */
1719 struct __vxge_hw_device *devh;
1720
1721 /* HW device hardware statistics */
1722 struct vxge_hw_device_stats_hw_info hw_dev_info_stats;
1723
1724 /* HW device "soft" stats */
1725 struct vxge_hw_device_stats_sw_err sw_dev_err_stats;
1726 struct vxge_hw_device_stats_sw_info sw_dev_info_stats;
1727
1728};
1729
1730enum vxge_hw_status vxge_hw_device_hw_stats_enable(
1731 struct __vxge_hw_device *devh);
1732
1733enum vxge_hw_status vxge_hw_device_stats_get(
1734 struct __vxge_hw_device *devh,
1735 struct vxge_hw_device_stats_hw_info *hw_stats);
1736
1737enum vxge_hw_status vxge_hw_driver_stats_get(
1738 struct __vxge_hw_device *devh,
1739 struct vxge_hw_device_stats_sw_info *sw_stats);
1740
1741enum vxge_hw_status vxge_hw_mrpcim_stats_enable(struct __vxge_hw_device *devh);
1742
1743enum vxge_hw_status vxge_hw_mrpcim_stats_disable(struct __vxge_hw_device *devh);
1744
1745enum vxge_hw_status
1746vxge_hw_mrpcim_stats_access(
1747 struct __vxge_hw_device *devh,
1748 u32 operation,
1749 u32 location,
1750 u32 offset,
1751 u64 *stat);
1752
1753enum vxge_hw_status
1754vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *devh, u32 port,
1755 struct vxge_hw_xmac_aggr_stats *aggr_stats);
1756
1757enum vxge_hw_status
1758vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *devh, u32 port,
1759 struct vxge_hw_xmac_port_stats *port_stats);
1760
1761enum vxge_hw_status
1762vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *devh,
1763 struct vxge_hw_xmac_stats *xmac_stats);
1764
1765/**
1766 * enum enum vxge_hw_mgmt_reg_type - Register types.
1767 *
1768 * @vxge_hw_mgmt_reg_type_legacy: Legacy registers
1769 * @vxge_hw_mgmt_reg_type_toc: TOC Registers
1770 * @vxge_hw_mgmt_reg_type_common: Common Registers
1771 * @vxge_hw_mgmt_reg_type_mrpcim: mrpcim registers
1772 * @vxge_hw_mgmt_reg_type_srpcim: srpcim registers
1773 * @vxge_hw_mgmt_reg_type_vpmgmt: vpath management registers
1774 * @vxge_hw_mgmt_reg_type_vpath: vpath registers
1775 *
1776 * Register type enumaration
1777 */
1778enum vxge_hw_mgmt_reg_type {
1779 vxge_hw_mgmt_reg_type_legacy = 0,
1780 vxge_hw_mgmt_reg_type_toc = 1,
1781 vxge_hw_mgmt_reg_type_common = 2,
1782 vxge_hw_mgmt_reg_type_mrpcim = 3,
1783 vxge_hw_mgmt_reg_type_srpcim = 4,
1784 vxge_hw_mgmt_reg_type_vpmgmt = 5,
1785 vxge_hw_mgmt_reg_type_vpath = 6
1786};
1787
1788enum vxge_hw_status
1789vxge_hw_mgmt_reg_read(struct __vxge_hw_device *devh,
1790 enum vxge_hw_mgmt_reg_type type,
1791 u32 index,
1792 u32 offset,
1793 u64 *value);
1794
1795enum vxge_hw_status
1796vxge_hw_mgmt_reg_write(struct __vxge_hw_device *devh,
1797 enum vxge_hw_mgmt_reg_type type,
1798 u32 index,
1799 u32 offset,
1800 u64 value);
1801
1802/**
1803 * enum enum vxge_hw_rxd_state - Descriptor (RXD) state.
1804 * @VXGE_HW_RXD_STATE_NONE: Invalid state.
1805 * @VXGE_HW_RXD_STATE_AVAIL: Descriptor is available for reservation.
1806 * @VXGE_HW_RXD_STATE_POSTED: Descriptor is posted for processing by the
1807 * device.
1808 * @VXGE_HW_RXD_STATE_FREED: Descriptor is free and can be reused for
1809 * filling-in and posting later.
1810 *
1811 * Titan/HW descriptor states.
1812 *
1813 */
1814enum vxge_hw_rxd_state {
1815 VXGE_HW_RXD_STATE_NONE = 0,
1816 VXGE_HW_RXD_STATE_AVAIL = 1,
1817 VXGE_HW_RXD_STATE_POSTED = 2,
1818 VXGE_HW_RXD_STATE_FREED = 3
1819};
1820
1821/**
1822 * struct vxge_hw_ring_rxd_info - Extended information associated with a
1823 * completed ring descriptor.
1824 * @syn_flag: SYN flag
1825 * @is_icmp: Is ICMP
1826 * @fast_path_eligible: Fast Path Eligible flag
1827 * @l3_cksum: in L3 checksum is valid
1828 * @l3_cksum: Result of IP checksum check (by Titan hardware).
1829 * This field containing VXGE_HW_L3_CKSUM_OK would mean that
1830 * the checksum is correct, otherwise - the datagram is
1831 * corrupted.
1832 * @l4_cksum: in L4 checksum is valid
1833 * @l4_cksum: Result of TCP/UDP checksum check (by Titan hardware).
1834 * This field containing VXGE_HW_L4_CKSUM_OK would mean that
1835 * the checksum is correct. Otherwise - the packet is
1836 * corrupted.
1837 * @frame: Zero or more of enum vxge_hw_frame_type flags.
1838 * See enum vxge_hw_frame_type{}.
1839 * @proto: zero or more of enum vxge_hw_frame_proto flags. Reporting bits for
1840 * various higher-layer protocols, including (but note restricted to)
1841 * TCP and UDP. See enum vxge_hw_frame_proto{}.
1842 * @is_vlan: If vlan tag is valid
1843 * @vlan: VLAN tag extracted from the received frame.
1844 * @rth_bucket: RTH bucket
1845 * @rth_it_hit: Set, If RTH hash value calculated by the Titan hardware
1846 * has a matching entry in the Indirection table.
1847 * @rth_spdm_hit: Set, If RTH hash value calculated by the Titan hardware
1848 * has a matching entry in the Socket Pair Direct Match table.
1849 * @rth_hash_type: RTH hash code of the function used to calculate the hash.
1850 * @rth_value: Receive Traffic Hashing(RTH) hash value. Produced by Titan
1851 * hardware if RTH is enabled.
1852 */
1853struct vxge_hw_ring_rxd_info {
1854 u32 syn_flag;
1855 u32 is_icmp;
1856 u32 fast_path_eligible;
1857 u32 l3_cksum_valid;
1858 u32 l3_cksum;
1859 u32 l4_cksum_valid;
1860 u32 l4_cksum;
1861 u32 frame;
1862 u32 proto;
1863 u32 is_vlan;
1864 u32 vlan;
1865 u32 rth_bucket;
1866 u32 rth_it_hit;
1867 u32 rth_spdm_hit;
1868 u32 rth_hash_type;
1869 u32 rth_value;
1870};
1871
1872/**
1873 * enum enum vxge_hw_ring_hash_type - RTH hash types
1874 * @VXGE_HW_RING_HASH_TYPE_NONE: No Hash
1875 * @VXGE_HW_RING_HASH_TYPE_TCP_IPV4: TCP IPv4
1876 * @VXGE_HW_RING_HASH_TYPE_UDP_IPV4: UDP IPv4
1877 * @VXGE_HW_RING_HASH_TYPE_IPV4: IPv4
1878 * @VXGE_HW_RING_HASH_TYPE_TCP_IPV6: TCP IPv6
1879 * @VXGE_HW_RING_HASH_TYPE_UDP_IPV6: UDP IPv6
1880 * @VXGE_HW_RING_HASH_TYPE_IPV6: IPv6
1881 * @VXGE_HW_RING_HASH_TYPE_TCP_IPV6_EX: TCP IPv6 extension
1882 * @VXGE_HW_RING_HASH_TYPE_UDP_IPV6_EX: UDP IPv6 extension
1883 * @VXGE_HW_RING_HASH_TYPE_IPV6_EX: IPv6 extension
1884 *
1885 * RTH hash types
1886 */
1887enum vxge_hw_ring_hash_type {
1888 VXGE_HW_RING_HASH_TYPE_NONE = 0x0,
1889 VXGE_HW_RING_HASH_TYPE_TCP_IPV4 = 0x1,
1890 VXGE_HW_RING_HASH_TYPE_UDP_IPV4 = 0x2,
1891 VXGE_HW_RING_HASH_TYPE_IPV4 = 0x3,
1892 VXGE_HW_RING_HASH_TYPE_TCP_IPV6 = 0x4,
1893 VXGE_HW_RING_HASH_TYPE_UDP_IPV6 = 0x5,
1894 VXGE_HW_RING_HASH_TYPE_IPV6 = 0x6,
1895 VXGE_HW_RING_HASH_TYPE_TCP_IPV6_EX = 0x7,
1896 VXGE_HW_RING_HASH_TYPE_UDP_IPV6_EX = 0x8,
1897 VXGE_HW_RING_HASH_TYPE_IPV6_EX = 0x9
1898};
1899
1900enum vxge_hw_status vxge_hw_ring_rxd_reserve(
1901 struct __vxge_hw_ring *ring_handle,
1902 void **rxdh);
1903
1904void
1905vxge_hw_ring_rxd_pre_post(
1906 struct __vxge_hw_ring *ring_handle,
1907 void *rxdh);
1908
1909void
1910vxge_hw_ring_rxd_post_post(
1911 struct __vxge_hw_ring *ring_handle,
1912 void *rxdh);
1913
1914enum vxge_hw_status
1915vxge_hw_ring_replenish(struct __vxge_hw_ring *ring_handle, u16 min_flag);
1916
1917void
1918vxge_hw_ring_rxd_post_post_wmb(
1919 struct __vxge_hw_ring *ring_handle,
1920 void *rxdh);
1921
1922void vxge_hw_ring_rxd_post(
1923 struct __vxge_hw_ring *ring_handle,
1924 void *rxdh);
1925
1926enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
1927 struct __vxge_hw_ring *ring_handle,
1928 void **rxdh,
1929 u8 *t_code);
1930
1931enum vxge_hw_status vxge_hw_ring_handle_tcode(
1932 struct __vxge_hw_ring *ring_handle,
1933 void *rxdh,
1934 u8 t_code);
1935
1936void vxge_hw_ring_rxd_free(
1937 struct __vxge_hw_ring *ring_handle,
1938 void *rxdh);
1939
1940/**
1941 * enum enum vxge_hw_frame_proto - Higher-layer ethernet protocols.
1942 * @VXGE_HW_FRAME_PROTO_VLAN_TAGGED: VLAN.
1943 * @VXGE_HW_FRAME_PROTO_IPV4: IPv4.
1944 * @VXGE_HW_FRAME_PROTO_IPV6: IPv6.
1945 * @VXGE_HW_FRAME_PROTO_IP_FRAG: IP fragmented.
1946 * @VXGE_HW_FRAME_PROTO_TCP: TCP.
1947 * @VXGE_HW_FRAME_PROTO_UDP: UDP.
1948 * @VXGE_HW_FRAME_PROTO_TCP_OR_UDP: TCP or UDP.
1949 *
1950 * Higher layer ethernet protocols and options.
1951 */
1952enum vxge_hw_frame_proto {
1953 VXGE_HW_FRAME_PROTO_VLAN_TAGGED = 0x80,
1954 VXGE_HW_FRAME_PROTO_IPV4 = 0x10,
1955 VXGE_HW_FRAME_PROTO_IPV6 = 0x08,
1956 VXGE_HW_FRAME_PROTO_IP_FRAG = 0x04,
1957 VXGE_HW_FRAME_PROTO_TCP = 0x02,
1958 VXGE_HW_FRAME_PROTO_UDP = 0x01,
1959 VXGE_HW_FRAME_PROTO_TCP_OR_UDP = (VXGE_HW_FRAME_PROTO_TCP | \
1960 VXGE_HW_FRAME_PROTO_UDP)
1961};
1962
1963/**
1964 * enum enum vxge_hw_fifo_gather_code - Gather codes used in fifo TxD
1965 * @VXGE_HW_FIFO_GATHER_CODE_FIRST: First TxDL
1966 * @VXGE_HW_FIFO_GATHER_CODE_MIDDLE: Middle TxDL
1967 * @VXGE_HW_FIFO_GATHER_CODE_LAST: Last TxDL
1968 * @VXGE_HW_FIFO_GATHER_CODE_FIRST_LAST: First and Last TxDL.
1969 *
1970 * These gather codes are used to indicate the position of a TxD in a TxD list
1971 */
1972enum vxge_hw_fifo_gather_code {
1973 VXGE_HW_FIFO_GATHER_CODE_FIRST = 0x2,
1974 VXGE_HW_FIFO_GATHER_CODE_MIDDLE = 0x0,
1975 VXGE_HW_FIFO_GATHER_CODE_LAST = 0x1,
1976 VXGE_HW_FIFO_GATHER_CODE_FIRST_LAST = 0x3
1977};
1978
1979/**
1980 * enum enum vxge_hw_fifo_tcode - tcodes used in fifo
1981 * @VXGE_HW_FIFO_T_CODE_OK: Transfer OK
1982 * @VXGE_HW_FIFO_T_CODE_PCI_READ_CORRUPT: PCI read transaction (either TxD or
1983 * frame data) returned with corrupt data.
1984 * @VXGE_HW_FIFO_T_CODE_PCI_READ_FAIL:PCI read transaction was returned
1985 * with no data.
1986 * @VXGE_HW_FIFO_T_CODE_INVALID_MSS: The host attempted to send either a
1987 * frame or LSO MSS that was too long (>9800B).
1988 * @VXGE_HW_FIFO_T_CODE_LSO_ERROR: Error detected during TCP/UDP Large Send
1989 * Offload operation, due to improper header template,
1990 * unsupported protocol, etc.
1991 * @VXGE_HW_FIFO_T_CODE_UNUSED: Unused
1992 * @VXGE_HW_FIFO_T_CODE_MULTI_ERROR: Set to 1 by the adapter if multiple
1993 * data buffer transfer errors are encountered (see below).
1994 * Otherwise it is set to 0.
1995 *
1996 * These tcodes are returned in various API for TxD status
1997 */
1998enum vxge_hw_fifo_tcode {
1999 VXGE_HW_FIFO_T_CODE_OK = 0x0,
2000 VXGE_HW_FIFO_T_CODE_PCI_READ_CORRUPT = 0x1,
2001 VXGE_HW_FIFO_T_CODE_PCI_READ_FAIL = 0x2,
2002 VXGE_HW_FIFO_T_CODE_INVALID_MSS = 0x3,
2003 VXGE_HW_FIFO_T_CODE_LSO_ERROR = 0x4,
2004 VXGE_HW_FIFO_T_CODE_UNUSED = 0x7,
2005 VXGE_HW_FIFO_T_CODE_MULTI_ERROR = 0x8
2006};
2007
2008enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
2009 struct __vxge_hw_fifo *fifoh,
2010 void **txdlh,
2011 void **txdl_priv);
2012
2013void vxge_hw_fifo_txdl_buffer_set(
2014 struct __vxge_hw_fifo *fifo_handle,
2015 void *txdlh,
2016 u32 frag_idx,
2017 dma_addr_t dma_pointer,
2018 u32 size);
2019
2020void vxge_hw_fifo_txdl_post(
2021 struct __vxge_hw_fifo *fifo_handle,
2022 void *txdlh);
2023
2024u32 vxge_hw_fifo_free_txdl_count_get(
2025 struct __vxge_hw_fifo *fifo_handle);
2026
2027enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
2028 struct __vxge_hw_fifo *fifoh,
2029 void **txdlh,
2030 enum vxge_hw_fifo_tcode *t_code);
2031
2032enum vxge_hw_status vxge_hw_fifo_handle_tcode(
2033 struct __vxge_hw_fifo *fifoh,
2034 void *txdlh,
2035 enum vxge_hw_fifo_tcode t_code);
2036
2037void vxge_hw_fifo_txdl_free(
2038 struct __vxge_hw_fifo *fifoh,
2039 void *txdlh);
2040
2041/*
2042 * Device
2043 */
2044
2045#define VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET (VXGE_HW_BLOCK_SIZE-8)
2046#define VXGE_HW_RING_MEMBLOCK_IDX_OFFSET (VXGE_HW_BLOCK_SIZE-16)
2047#define VXGE_HW_RING_MIN_BUFF_ALLOCATION 64
2048
2049/*
2050 * struct __vxge_hw_ring_rxd_priv - Receive descriptor HW-private data.
2051 * @dma_addr: DMA (mapped) address of _this_ descriptor.
2052 * @dma_handle: DMA handle used to map the descriptor onto device.
2053 * @dma_offset: Descriptor's offset in the memory block. HW allocates
2054 * descriptors in memory blocks of %VXGE_HW_BLOCK_SIZE
2055 * bytes. Each memblock is contiguous DMA-able memory. Each
2056 * memblock contains 1 or more 4KB RxD blocks visible to the
2057 * Titan hardware.
2058 * @dma_object: DMA address and handle of the memory block that contains
2059 * the descriptor. This member is used only in the "checked"
2060 * version of the HW (to enforce certain assertions);
2061 * otherwise it gets compiled out.
2062 * @allocated: True if the descriptor is reserved, 0 otherwise. Internal usage.
2063 *
2064 * Per-receive decsriptor HW-private data. HW uses the space to keep DMA
2065 * information associated with the descriptor. Note that driver can ask HW
2066 * to allocate additional per-descriptor space for its own (driver-specific)
2067 * purposes.
2068 */
2069struct __vxge_hw_ring_rxd_priv {
2070 dma_addr_t dma_addr;
2071 struct pci_dev *dma_handle;
2072 ptrdiff_t dma_offset;
2073#ifdef VXGE_DEBUG_ASSERT
2074 struct vxge_hw_mempool_dma *dma_object;
2075#endif
2076};
2077
2078/* ========================= RING PRIVATE API ============================= */
2079u64
2080__vxge_hw_ring_first_block_address_get(
2081 struct __vxge_hw_ring *ringh);
2082
2083enum vxge_hw_status
2084__vxge_hw_ring_create(
2085 struct __vxge_hw_vpath_handle *vpath_handle,
2086 struct vxge_hw_ring_attr *attr);
2087
2088enum vxge_hw_status
2089__vxge_hw_ring_abort(
2090 struct __vxge_hw_ring *ringh);
2091
2092enum vxge_hw_status
2093__vxge_hw_ring_reset(
2094 struct __vxge_hw_ring *ringh);
2095
2096enum vxge_hw_status
2097__vxge_hw_ring_delete(
2098 struct __vxge_hw_vpath_handle *vpath_handle);
2099
2100/* ========================= FIFO PRIVATE API ============================= */
2101
2102struct vxge_hw_fifo_attr;
2103
2104enum vxge_hw_status
2105__vxge_hw_fifo_create(
2106 struct __vxge_hw_vpath_handle *vpath_handle,
2107 struct vxge_hw_fifo_attr *attr);
2108
2109enum vxge_hw_status
2110__vxge_hw_fifo_abort(
2111 struct __vxge_hw_fifo *fifoh);
2112
2113enum vxge_hw_status
2114__vxge_hw_fifo_reset(
2115 struct __vxge_hw_fifo *ringh);
2116
2117enum vxge_hw_status
2118__vxge_hw_fifo_delete(
2119 struct __vxge_hw_vpath_handle *vpath_handle);
2120
2121struct vxge_hw_mempool_cbs {
2122 void (*item_func_alloc)(
2123 struct vxge_hw_mempool *mempoolh,
2124 u32 memblock_index,
2125 struct vxge_hw_mempool_dma *dma_object,
2126 u32 index,
2127 u32 is_last);
2128};
2129
2130void
2131__vxge_hw_mempool_destroy(
2132 struct vxge_hw_mempool *mempool);
2133
2134#define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
2135 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
2136
2137enum vxge_hw_status
2138__vxge_hw_vpath_rts_table_get(
2139 struct __vxge_hw_vpath_handle *vpath_handle,
2140 u32 action,
2141 u32 rts_table,
2142 u32 offset,
2143 u64 *data1,
2144 u64 *data2);
2145
2146enum vxge_hw_status
2147__vxge_hw_vpath_rts_table_set(
2148 struct __vxge_hw_vpath_handle *vpath_handle,
2149 u32 action,
2150 u32 rts_table,
2151 u32 offset,
2152 u64 data1,
2153 u64 data2);
2154
2155enum vxge_hw_status
2156__vxge_hw_vpath_reset(
2157 struct __vxge_hw_device *devh,
2158 u32 vp_id);
2159
2160enum vxge_hw_status
2161__vxge_hw_vpath_sw_reset(
2162 struct __vxge_hw_device *devh,
2163 u32 vp_id);
2164
2165enum vxge_hw_status
2166__vxge_hw_vpath_enable(
2167 struct __vxge_hw_device *devh,
2168 u32 vp_id);
2169
2170void
2171__vxge_hw_vpath_prc_configure(
2172 struct __vxge_hw_device *devh,
2173 u32 vp_id);
2174
2175enum vxge_hw_status
2176__vxge_hw_vpath_kdfc_configure(
2177 struct __vxge_hw_device *devh,
2178 u32 vp_id);
2179
2180enum vxge_hw_status
2181__vxge_hw_vpath_mac_configure(
2182 struct __vxge_hw_device *devh,
2183 u32 vp_id);
2184
2185enum vxge_hw_status
2186__vxge_hw_vpath_tim_configure(
2187 struct __vxge_hw_device *devh,
2188 u32 vp_id);
2189
2190enum vxge_hw_status
2191__vxge_hw_vpath_initialize(
2192 struct __vxge_hw_device *devh,
2193 u32 vp_id);
2194
2195enum vxge_hw_status
2196__vxge_hw_vp_initialize(
2197 struct __vxge_hw_device *devh,
2198 u32 vp_id,
2199 struct vxge_hw_vp_config *config);
2200
2201void
2202__vxge_hw_vp_terminate(
2203 struct __vxge_hw_device *devh,
2204 u32 vp_id);
2205
2206enum vxge_hw_status
2207__vxge_hw_vpath_alarm_process(
2208 struct __vxge_hw_virtualpath *vpath,
2209 u32 skip_alarms);
2210
2211void vxge_hw_device_intr_enable(
2212 struct __vxge_hw_device *devh);
2213
2214u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *devh, u32 intr_mode);
2215
2216void vxge_hw_device_intr_disable(
2217 struct __vxge_hw_device *devh);
2218
2219void vxge_hw_device_mask_all(
2220 struct __vxge_hw_device *devh);
2221
2222void vxge_hw_device_unmask_all(
2223 struct __vxge_hw_device *devh);
2224
2225enum vxge_hw_status vxge_hw_device_begin_irq(
2226 struct __vxge_hw_device *devh,
2227 u32 skip_alarms,
2228 u64 *reason);
2229
2230void vxge_hw_device_clear_tx_rx(
2231 struct __vxge_hw_device *devh);
2232
2233/*
2234 * Virtual Paths
2235 */
2236
2237u32 vxge_hw_vpath_id(
2238 struct __vxge_hw_vpath_handle *vpath_handle);
2239
2240enum vxge_hw_vpath_mac_addr_add_mode {
2241 VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE = 0,
2242 VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE = 1,
2243 VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE = 2
2244};
2245
2246enum vxge_hw_status
2247vxge_hw_vpath_mac_addr_add(
2248 struct __vxge_hw_vpath_handle *vpath_handle,
2249 u8 (macaddr)[ETH_ALEN],
2250 u8 (macaddr_mask)[ETH_ALEN],
2251 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode);
2252
2253enum vxge_hw_status
2254vxge_hw_vpath_mac_addr_get(
2255 struct __vxge_hw_vpath_handle *vpath_handle,
2256 u8 (macaddr)[ETH_ALEN],
2257 u8 (macaddr_mask)[ETH_ALEN]);
2258
2259enum vxge_hw_status
2260vxge_hw_vpath_mac_addr_get_next(
2261 struct __vxge_hw_vpath_handle *vpath_handle,
2262 u8 (macaddr)[ETH_ALEN],
2263 u8 (macaddr_mask)[ETH_ALEN]);
2264
2265enum vxge_hw_status
2266vxge_hw_vpath_mac_addr_delete(
2267 struct __vxge_hw_vpath_handle *vpath_handle,
2268 u8 (macaddr)[ETH_ALEN],
2269 u8 (macaddr_mask)[ETH_ALEN]);
2270
2271enum vxge_hw_status
2272vxge_hw_vpath_vid_add(
2273 struct __vxge_hw_vpath_handle *vpath_handle,
2274 u64 vid);
2275
2276enum vxge_hw_status
2277vxge_hw_vpath_vid_get(
2278 struct __vxge_hw_vpath_handle *vpath_handle,
2279 u64 *vid);
2280
2281enum vxge_hw_status
2282vxge_hw_vpath_vid_get_next(
2283 struct __vxge_hw_vpath_handle *vpath_handle,
2284 u64 *vid);
2285
2286enum vxge_hw_status
2287vxge_hw_vpath_vid_delete(
2288 struct __vxge_hw_vpath_handle *vpath_handle,
2289 u64 vid);
2290
2291enum vxge_hw_status
2292vxge_hw_vpath_etype_add(
2293 struct __vxge_hw_vpath_handle *vpath_handle,
2294 u64 etype);
2295
2296enum vxge_hw_status
2297vxge_hw_vpath_etype_get(
2298 struct __vxge_hw_vpath_handle *vpath_handle,
2299 u64 *etype);
2300
2301enum vxge_hw_status
2302vxge_hw_vpath_etype_get_next(
2303 struct __vxge_hw_vpath_handle *vpath_handle,
2304 u64 *etype);
2305
2306enum vxge_hw_status
2307vxge_hw_vpath_etype_delete(
2308 struct __vxge_hw_vpath_handle *vpath_handle,
2309 u64 etype);
2310
2311enum vxge_hw_status vxge_hw_vpath_promisc_enable(
2312 struct __vxge_hw_vpath_handle *vpath_handle);
2313
2314enum vxge_hw_status vxge_hw_vpath_promisc_disable(
2315 struct __vxge_hw_vpath_handle *vpath_handle);
2316
2317enum vxge_hw_status vxge_hw_vpath_bcast_enable(
2318 struct __vxge_hw_vpath_handle *vpath_handle);
2319
2320enum vxge_hw_status vxge_hw_vpath_mcast_enable(
2321 struct __vxge_hw_vpath_handle *vpath_handle);
2322
2323enum vxge_hw_status vxge_hw_vpath_mcast_disable(
2324 struct __vxge_hw_vpath_handle *vpath_handle);
2325
2326enum vxge_hw_status vxge_hw_vpath_poll_rx(
2327 struct __vxge_hw_ring *ringh);
2328
2329enum vxge_hw_status vxge_hw_vpath_poll_tx(
2330 struct __vxge_hw_fifo *fifoh,
2331 void **skb_ptr);
2332
2333enum vxge_hw_status vxge_hw_vpath_alarm_process(
2334 struct __vxge_hw_vpath_handle *vpath_handle,
2335 u32 skip_alarms);
2336
2337enum vxge_hw_status
2338vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vpath_handle,
2339 int *tim_msix_id, int alarm_msix_id);
2340
2341void
2342vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vpath_handle,
2343 int msix_id);
2344
2345void vxge_hw_device_flush_io(struct __vxge_hw_device *devh);
2346
2347void
2348vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vpath_handle,
2349 int msix_id);
2350
2351void
2352vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vpath_handle,
2353 int msix_id);
2354
2355void
2356vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vpath_handle);
2357
2358enum vxge_hw_status vxge_hw_vpath_intr_enable(
2359 struct __vxge_hw_vpath_handle *vpath_handle);
2360
2361enum vxge_hw_status vxge_hw_vpath_intr_disable(
2362 struct __vxge_hw_vpath_handle *vpath_handle);
2363
2364void vxge_hw_vpath_inta_mask_tx_rx(
2365 struct __vxge_hw_vpath_handle *vpath_handle);
2366
2367void vxge_hw_vpath_inta_unmask_tx_rx(
2368 struct __vxge_hw_vpath_handle *vpath_handle);
2369
2370void
2371vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channelh, int msix_id);
2372
2373void
2374vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id);
2375
2376enum vxge_hw_status
2377vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh);
2378
2379void
2380vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh);
2381
2382void
2383vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel,
2384 void **dtrh);
2385
2386void
2387vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel);
2388
2389void
2390vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh);
2391
2392int
2393vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel);
2394
2395/* ========================== PRIVATE API ================================= */
2396
2397enum vxge_hw_status
2398__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev);
2399
2400enum vxge_hw_status
2401__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev);
2402
2403enum vxge_hw_status
2404__vxge_hw_device_handle_error(
2405 struct __vxge_hw_device *hldev,
2406 u32 vp_id,
2407 enum vxge_hw_event type);
2408
2409#endif