diff options
author | Ramkrishna Vepa <ram.vepa@neterion.com> | 2009-04-01 14:14:40 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-04-02 03:33:41 -0400 |
commit | 40a3a9156dc66f23cc79758981886c1896887341 (patch) | |
tree | 1806d0314e23d4f195d21620387a8c73b380c61d /drivers/net/vxge | |
parent | 66d97fedea68f65d6dd8df832b2c48a714134b5a (diff) |
Neterion: New driver: Hardware init & configuration
This patch takes care of Initialization and configuration steps of
Neterion Inc's X3100 Series 10GbE PCIe I/O Virtualized Server Adapter.
- Device Initialization.
- Verification and setting of device config parameters.
- Allocation of Tx FIFO and Rx Ring descriptors (DTR).
- APIs to get various type of hw stats
- APIs to configure RTS (Receive Traffic Steering)
- Changes in this submission -
- Include vmalloc header without which a compilation error occured
on sparc64, ppc64 and IA64 plaforms.
- Fixed compilation warning in register_poll, write32_upper,
write32_lower and the special write64 functions on ppc64.
- General cleanup - removed redundant includes and defines.
- Changes in previous submissions -
- Add readq/writeq implementation for the driver for 32 bit systems -
reported by Dave Miller.
- Incorporated following comments from Ben Hutchings
- Start a comment with "/**" to make it a kernel-doc comment.
- Use prefix, "__vxge" in front of hw functions to make them globally
unique.
- Fixed unnecessary clearing members of *channel just before freeing
- Use backslashes only for macro definitions and not in multi-line
statements.
- Used pci_find_capability instead of redefining it.
- Used device and revision ids that are already in pdev - no need to
read them again.
- Used pci_save_state() and pci_restore_state() around resets.
- Used udelay and mdelay directly instead of wrapper.
- In __vxge_hw_device_register_poll() reset i to 0 after the
microsecond delay loop to commence the millisecond delay loop.
- Corrected spelling "sapper" - should be "swapper"
- Remove too much vertical whitespace.
- Replaced magic numbers with appropriate macros
- Incorporated following comments from Andi Kleen [andi@firstfloor.org]
- Reduced the arguments in functions or refactored them into smaller
functions.
- Allocate page sized memories used in slow path with vmalloc.
- Use asserts where necessary.
- Use macros instead of magic numbers.
- Use the pci layer code instead of defining own functions
- Remove driver wrappers such as xge_hw_device_private_set().
- Fixed sparse warnings.
Signed-off-by: Sivakumar Subramani <sivakumar.subramani@neterion.com>
Signed-off-by: Rastapur Santosh <santosh.rastapur@neterion.com>
Signed-off-by: Ramkrishna Vepa <ram.vepa@neterion.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/vxge')
-rw-r--r-- | drivers/net/vxge/vxge-config.c | 5264 | ||||
-rw-r--r-- | drivers/net/vxge/vxge-config.h | 2259 |
2 files changed, 7523 insertions, 0 deletions
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c new file mode 100644 index 00000000000..6b41c884a33 --- /dev/null +++ b/drivers/net/vxge/vxge-config.c | |||
@@ -0,0 +1,5264 @@ | |||
1 | /****************************************************************************** | ||
2 | * This software may be used and distributed according to the terms of | ||
3 | * the GNU General Public License (GPL), incorporated herein by reference. | ||
4 | * Drivers based on or derived from this code fall under the GPL and must | ||
5 | * retain the authorship, copyright and license notice. This file is not | ||
6 | * a complete program and may only be used when the entire operating | ||
7 | * system is licensed under the GPL. | ||
8 | * See the file COPYING in this distribution for more information. | ||
9 | * | ||
10 | * vxge-config.c: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O | ||
11 | * Virtualized Server Adapter. | ||
12 | * Copyright(c) 2002-2009 Neterion Inc. | ||
13 | ******************************************************************************/ | ||
14 | #include <linux/vmalloc.h> | ||
15 | #include <linux/etherdevice.h> | ||
16 | #include <linux/pci.h> | ||
17 | #include <linux/pci_hotplug.h> | ||
18 | |||
19 | #include "vxge-traffic.h" | ||
20 | #include "vxge-config.h" | ||
21 | |||
22 | /* | ||
23 | * __vxge_hw_channel_allocate - Allocate memory for channel | ||
24 | * This function allocates required memory for the channel and various arrays | ||
25 | * in the channel | ||
26 | */ | ||
27 | struct __vxge_hw_channel* | ||
28 | __vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph, | ||
29 | enum __vxge_hw_channel_type type, | ||
30 | u32 length, u32 per_dtr_space, void *userdata) | ||
31 | { | ||
32 | struct __vxge_hw_channel *channel; | ||
33 | struct __vxge_hw_device *hldev; | ||
34 | int size = 0; | ||
35 | u32 vp_id; | ||
36 | |||
37 | hldev = vph->vpath->hldev; | ||
38 | vp_id = vph->vpath->vp_id; | ||
39 | |||
40 | switch (type) { | ||
41 | case VXGE_HW_CHANNEL_TYPE_FIFO: | ||
42 | size = sizeof(struct __vxge_hw_fifo); | ||
43 | break; | ||
44 | case VXGE_HW_CHANNEL_TYPE_RING: | ||
45 | size = sizeof(struct __vxge_hw_ring); | ||
46 | break; | ||
47 | default: | ||
48 | break; | ||
49 | } | ||
50 | |||
51 | channel = kzalloc(size, GFP_KERNEL); | ||
52 | if (channel == NULL) | ||
53 | goto exit0; | ||
54 | INIT_LIST_HEAD(&channel->item); | ||
55 | |||
56 | channel->common_reg = hldev->common_reg; | ||
57 | channel->first_vp_id = hldev->first_vp_id; | ||
58 | channel->type = type; | ||
59 | channel->devh = hldev; | ||
60 | channel->vph = vph; | ||
61 | channel->userdata = userdata; | ||
62 | channel->per_dtr_space = per_dtr_space; | ||
63 | channel->length = length; | ||
64 | channel->vp_id = vp_id; | ||
65 | |||
66 | channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); | ||
67 | if (channel->work_arr == NULL) | ||
68 | goto exit1; | ||
69 | |||
70 | channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); | ||
71 | if (channel->free_arr == NULL) | ||
72 | goto exit1; | ||
73 | channel->free_ptr = length; | ||
74 | |||
75 | channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); | ||
76 | if (channel->reserve_arr == NULL) | ||
77 | goto exit1; | ||
78 | channel->reserve_ptr = length; | ||
79 | channel->reserve_top = 0; | ||
80 | |||
81 | channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); | ||
82 | if (channel->orig_arr == NULL) | ||
83 | goto exit1; | ||
84 | |||
85 | return channel; | ||
86 | exit1: | ||
87 | __vxge_hw_channel_free(channel); | ||
88 | |||
89 | exit0: | ||
90 | return NULL; | ||
91 | } | ||
92 | |||
93 | /* | ||
94 | * __vxge_hw_channel_free - Free memory allocated for channel | ||
95 | * This function deallocates memory from the channel and various arrays | ||
96 | * in the channel | ||
97 | */ | ||
98 | void __vxge_hw_channel_free(struct __vxge_hw_channel *channel) | ||
99 | { | ||
100 | kfree(channel->work_arr); | ||
101 | kfree(channel->free_arr); | ||
102 | kfree(channel->reserve_arr); | ||
103 | kfree(channel->orig_arr); | ||
104 | kfree(channel); | ||
105 | } | ||
106 | |||
107 | /* | ||
108 | * __vxge_hw_channel_initialize - Initialize a channel | ||
109 | * This function initializes a channel by properly setting the | ||
110 | * various references | ||
111 | */ | ||
112 | enum vxge_hw_status | ||
113 | __vxge_hw_channel_initialize(struct __vxge_hw_channel *channel) | ||
114 | { | ||
115 | u32 i; | ||
116 | struct __vxge_hw_virtualpath *vpath; | ||
117 | |||
118 | vpath = channel->vph->vpath; | ||
119 | |||
120 | if ((channel->reserve_arr != NULL) && (channel->orig_arr != NULL)) { | ||
121 | for (i = 0; i < channel->length; i++) | ||
122 | channel->orig_arr[i] = channel->reserve_arr[i]; | ||
123 | } | ||
124 | |||
125 | switch (channel->type) { | ||
126 | case VXGE_HW_CHANNEL_TYPE_FIFO: | ||
127 | vpath->fifoh = (struct __vxge_hw_fifo *)channel; | ||
128 | channel->stats = &((struct __vxge_hw_fifo *) | ||
129 | channel)->stats->common_stats; | ||
130 | break; | ||
131 | case VXGE_HW_CHANNEL_TYPE_RING: | ||
132 | vpath->ringh = (struct __vxge_hw_ring *)channel; | ||
133 | channel->stats = &((struct __vxge_hw_ring *) | ||
134 | channel)->stats->common_stats; | ||
135 | break; | ||
136 | default: | ||
137 | break; | ||
138 | } | ||
139 | |||
140 | return VXGE_HW_OK; | ||
141 | } | ||
142 | |||
143 | /* | ||
144 | * __vxge_hw_channel_reset - Resets a channel | ||
145 | * This function resets a channel by properly setting the various references | ||
146 | */ | ||
147 | enum vxge_hw_status | ||
148 | __vxge_hw_channel_reset(struct __vxge_hw_channel *channel) | ||
149 | { | ||
150 | u32 i; | ||
151 | |||
152 | for (i = 0; i < channel->length; i++) { | ||
153 | if (channel->reserve_arr != NULL) | ||
154 | channel->reserve_arr[i] = channel->orig_arr[i]; | ||
155 | if (channel->free_arr != NULL) | ||
156 | channel->free_arr[i] = NULL; | ||
157 | if (channel->work_arr != NULL) | ||
158 | channel->work_arr[i] = NULL; | ||
159 | } | ||
160 | channel->free_ptr = channel->length; | ||
161 | channel->reserve_ptr = channel->length; | ||
162 | channel->reserve_top = 0; | ||
163 | channel->post_index = 0; | ||
164 | channel->compl_index = 0; | ||
165 | |||
166 | return VXGE_HW_OK; | ||
167 | } | ||
168 | |||
169 | /* | ||
170 | * __vxge_hw_device_pci_e_init | ||
171 | * Initialize certain PCI/PCI-X configuration registers | ||
172 | * with recommended values. Save config space for future hw resets. | ||
173 | */ | ||
174 | void | ||
175 | __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev) | ||
176 | { | ||
177 | u16 cmd = 0; | ||
178 | |||
179 | /* Set the PErr Repconse bit and SERR in PCI command register. */ | ||
180 | pci_read_config_word(hldev->pdev, PCI_COMMAND, &cmd); | ||
181 | cmd |= 0x140; | ||
182 | pci_write_config_word(hldev->pdev, PCI_COMMAND, cmd); | ||
183 | |||
184 | pci_save_state(hldev->pdev); | ||
185 | |||
186 | return; | ||
187 | } | ||
188 | |||
189 | /* | ||
190 | * __vxge_hw_device_register_poll | ||
191 | * Will poll certain register for specified amount of time. | ||
192 | * Will poll until masked bit is not cleared. | ||
193 | */ | ||
194 | enum vxge_hw_status | ||
195 | __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis) | ||
196 | { | ||
197 | u64 val64; | ||
198 | u32 i = 0; | ||
199 | enum vxge_hw_status ret = VXGE_HW_FAIL; | ||
200 | |||
201 | udelay(10); | ||
202 | |||
203 | do { | ||
204 | val64 = readq(reg); | ||
205 | if (!(val64 & mask)) | ||
206 | return VXGE_HW_OK; | ||
207 | udelay(100); | ||
208 | } while (++i <= 9); | ||
209 | |||
210 | i = 0; | ||
211 | do { | ||
212 | val64 = readq(reg); | ||
213 | if (!(val64 & mask)) | ||
214 | return VXGE_HW_OK; | ||
215 | mdelay(1); | ||
216 | } while (++i <= max_millis); | ||
217 | |||
218 | return ret; | ||
219 | } | ||
220 | |||
221 | /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset | ||
222 | * in progress | ||
223 | * This routine checks the vpath reset in progress register is turned zero | ||
224 | */ | ||
225 | enum vxge_hw_status | ||
226 | __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog) | ||
227 | { | ||
228 | enum vxge_hw_status status; | ||
229 | status = __vxge_hw_device_register_poll(vpath_rst_in_prog, | ||
230 | VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(0x1ffff), | ||
231 | VXGE_HW_DEF_DEVICE_POLL_MILLIS); | ||
232 | return status; | ||
233 | } | ||
234 | |||
235 | /* | ||
236 | * __vxge_hw_device_toc_get | ||
237 | * This routine sets the swapper and reads the toc pointer and returns the | ||
238 | * memory mapped address of the toc | ||
239 | */ | ||
240 | struct vxge_hw_toc_reg __iomem * | ||
241 | __vxge_hw_device_toc_get(void __iomem *bar0) | ||
242 | { | ||
243 | u64 val64; | ||
244 | struct vxge_hw_toc_reg __iomem *toc = NULL; | ||
245 | enum vxge_hw_status status; | ||
246 | |||
247 | struct vxge_hw_legacy_reg __iomem *legacy_reg = | ||
248 | (struct vxge_hw_legacy_reg __iomem *)bar0; | ||
249 | |||
250 | status = __vxge_hw_legacy_swapper_set(legacy_reg); | ||
251 | if (status != VXGE_HW_OK) | ||
252 | goto exit; | ||
253 | |||
254 | val64 = readq(&legacy_reg->toc_first_pointer); | ||
255 | toc = (struct vxge_hw_toc_reg __iomem *)(bar0+val64); | ||
256 | exit: | ||
257 | return toc; | ||
258 | } | ||
259 | |||
260 | /* | ||
261 | * __vxge_hw_device_reg_addr_get | ||
262 | * This routine sets the swapper and reads the toc pointer and initializes the | ||
263 | * register location pointers in the device object. It waits until the ric is | ||
264 | * completed initializing registers. | ||
265 | */ | ||
266 | enum vxge_hw_status | ||
267 | __vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev) | ||
268 | { | ||
269 | u64 val64; | ||
270 | u32 i; | ||
271 | enum vxge_hw_status status = VXGE_HW_OK; | ||
272 | |||
273 | hldev->legacy_reg = (struct vxge_hw_legacy_reg __iomem *)hldev->bar0; | ||
274 | |||
275 | hldev->toc_reg = __vxge_hw_device_toc_get(hldev->bar0); | ||
276 | if (hldev->toc_reg == NULL) { | ||
277 | status = VXGE_HW_FAIL; | ||
278 | goto exit; | ||
279 | } | ||
280 | |||
281 | val64 = readq(&hldev->toc_reg->toc_common_pointer); | ||
282 | hldev->common_reg = | ||
283 | (struct vxge_hw_common_reg __iomem *)(hldev->bar0 + val64); | ||
284 | |||
285 | val64 = readq(&hldev->toc_reg->toc_mrpcim_pointer); | ||
286 | hldev->mrpcim_reg = | ||
287 | (struct vxge_hw_mrpcim_reg __iomem *)(hldev->bar0 + val64); | ||
288 | |||
289 | for (i = 0; i < VXGE_HW_TITAN_SRPCIM_REG_SPACES; i++) { | ||
290 | val64 = readq(&hldev->toc_reg->toc_srpcim_pointer[i]); | ||
291 | hldev->srpcim_reg[i] = | ||
292 | (struct vxge_hw_srpcim_reg __iomem *) | ||
293 | (hldev->bar0 + val64); | ||
294 | } | ||
295 | |||
296 | for (i = 0; i < VXGE_HW_TITAN_VPMGMT_REG_SPACES; i++) { | ||
297 | val64 = readq(&hldev->toc_reg->toc_vpmgmt_pointer[i]); | ||
298 | hldev->vpmgmt_reg[i] = | ||
299 | (struct vxge_hw_vpmgmt_reg __iomem *)(hldev->bar0 + val64); | ||
300 | } | ||
301 | |||
302 | for (i = 0; i < VXGE_HW_TITAN_VPATH_REG_SPACES; i++) { | ||
303 | val64 = readq(&hldev->toc_reg->toc_vpath_pointer[i]); | ||
304 | hldev->vpath_reg[i] = | ||
305 | (struct vxge_hw_vpath_reg __iomem *) | ||
306 | (hldev->bar0 + val64); | ||
307 | } | ||
308 | |||
309 | val64 = readq(&hldev->toc_reg->toc_kdfc); | ||
310 | |||
311 | switch (VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val64)) { | ||
312 | case 0: | ||
313 | hldev->kdfc = (u8 __iomem *)(hldev->bar0 + | ||
314 | VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64)); | ||
315 | break; | ||
316 | case 2: | ||
317 | hldev->kdfc = (u8 __iomem *)(hldev->bar1 + | ||
318 | VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64)); | ||
319 | break; | ||
320 | case 4: | ||
321 | hldev->kdfc = (u8 __iomem *)(hldev->bar2 + | ||
322 | VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64)); | ||
323 | break; | ||
324 | default: | ||
325 | break; | ||
326 | } | ||
327 | |||
328 | status = __vxge_hw_device_vpath_reset_in_prog_check( | ||
329 | (u64 __iomem *)&hldev->common_reg->vpath_rst_in_prog); | ||
330 | exit: | ||
331 | return status; | ||
332 | } | ||
333 | |||
334 | /* | ||
335 | * __vxge_hw_device_id_get | ||
336 | * This routine returns sets the device id and revision numbers into the device | ||
337 | * structure | ||
338 | */ | ||
339 | void __vxge_hw_device_id_get(struct __vxge_hw_device *hldev) | ||
340 | { | ||
341 | u64 val64; | ||
342 | |||
343 | val64 = readq(&hldev->common_reg->titan_asic_id); | ||
344 | hldev->device_id = | ||
345 | (u16)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_DEVICE_ID(val64); | ||
346 | |||
347 | hldev->major_revision = | ||
348 | (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MAJOR_REVISION(val64); | ||
349 | |||
350 | hldev->minor_revision = | ||
351 | (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(val64); | ||
352 | |||
353 | return; | ||
354 | } | ||
355 | |||
356 | /* | ||
357 | * __vxge_hw_device_access_rights_get: Get Access Rights of the driver | ||
358 | * This routine returns the Access Rights of the driver | ||
359 | */ | ||
360 | static u32 | ||
361 | __vxge_hw_device_access_rights_get(u32 host_type, u32 func_id) | ||
362 | { | ||
363 | u32 access_rights = VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH; | ||
364 | |||
365 | switch (host_type) { | ||
366 | case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION: | ||
367 | if (func_id == 0) { | ||
368 | access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM | | ||
369 | VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM; | ||
370 | } | ||
371 | break; | ||
372 | case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION: | ||
373 | access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM | | ||
374 | VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM; | ||
375 | break; | ||
376 | case VXGE_HW_NO_MR_SR_VH0_FUNCTION0: | ||
377 | access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM | | ||
378 | VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM; | ||
379 | break; | ||
380 | case VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION: | ||
381 | case VXGE_HW_SR_VH_VIRTUAL_FUNCTION: | ||
382 | case VXGE_HW_MR_SR_VH0_INVALID_CONFIG: | ||
383 | break; | ||
384 | case VXGE_HW_SR_VH_FUNCTION0: | ||
385 | case VXGE_HW_VH_NORMAL_FUNCTION: | ||
386 | access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM; | ||
387 | break; | ||
388 | } | ||
389 | |||
390 | return access_rights; | ||
391 | } | ||
392 | /* | ||
393 | * __vxge_hw_device_host_info_get | ||
394 | * This routine returns the host type assignments | ||
395 | */ | ||
396 | void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev) | ||
397 | { | ||
398 | u64 val64; | ||
399 | u32 i; | ||
400 | |||
401 | val64 = readq(&hldev->common_reg->host_type_assignments); | ||
402 | |||
403 | hldev->host_type = | ||
404 | (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64); | ||
405 | |||
406 | hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments); | ||
407 | |||
408 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | ||
409 | |||
410 | if (!(hldev->vpath_assignments & vxge_mBIT(i))) | ||
411 | continue; | ||
412 | |||
413 | hldev->func_id = | ||
414 | __vxge_hw_vpath_func_id_get(i, hldev->vpmgmt_reg[i]); | ||
415 | |||
416 | hldev->access_rights = __vxge_hw_device_access_rights_get( | ||
417 | hldev->host_type, hldev->func_id); | ||
418 | |||
419 | hldev->first_vp_id = i; | ||
420 | break; | ||
421 | } | ||
422 | |||
423 | return; | ||
424 | } | ||
425 | |||
426 | /* | ||
427 | * __vxge_hw_verify_pci_e_info - Validate the pci-e link parameters such as | ||
428 | * link width and signalling rate. | ||
429 | */ | ||
430 | static enum vxge_hw_status | ||
431 | __vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev) | ||
432 | { | ||
433 | int exp_cap; | ||
434 | u16 lnk; | ||
435 | |||
436 | /* Get the negotiated link width and speed from PCI config space */ | ||
437 | exp_cap = pci_find_capability(hldev->pdev, PCI_CAP_ID_EXP); | ||
438 | pci_read_config_word(hldev->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk); | ||
439 | |||
440 | if ((lnk & PCI_EXP_LNKSTA_CLS) != 1) | ||
441 | return VXGE_HW_ERR_INVALID_PCI_INFO; | ||
442 | |||
443 | switch ((lnk & PCI_EXP_LNKSTA_NLW) >> 4) { | ||
444 | case PCIE_LNK_WIDTH_RESRV: | ||
445 | case PCIE_LNK_X1: | ||
446 | case PCIE_LNK_X2: | ||
447 | case PCIE_LNK_X4: | ||
448 | case PCIE_LNK_X8: | ||
449 | break; | ||
450 | default: | ||
451 | return VXGE_HW_ERR_INVALID_PCI_INFO; | ||
452 | } | ||
453 | |||
454 | return VXGE_HW_OK; | ||
455 | } | ||
456 | |||
457 | static enum vxge_hw_status | ||
458 | __vxge_hw_device_is_privilaged(struct __vxge_hw_device *hldev) | ||
459 | { | ||
460 | if ((hldev->host_type == VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION || | ||
461 | hldev->host_type == VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION || | ||
462 | hldev->host_type == VXGE_HW_NO_MR_SR_VH0_FUNCTION0) && | ||
463 | (hldev->func_id == 0)) | ||
464 | return VXGE_HW_OK; | ||
465 | else | ||
466 | return VXGE_HW_ERR_PRIVILAGED_OPEARATION; | ||
467 | } | ||
468 | |||
469 | /* | ||
470 | * vxge_hw_wrr_rebalance - Rebalance the RX_WRR and KDFC_WRR calandars. | ||
471 | * Rebalance the RX_WRR and KDFC_WRR calandars. | ||
472 | */ | ||
473 | static enum | ||
474 | vxge_hw_status vxge_hw_wrr_rebalance(struct __vxge_hw_device *hldev) | ||
475 | { | ||
476 | u64 val64; | ||
477 | u32 wrr_states[VXGE_HW_WEIGHTED_RR_SERVICE_STATES]; | ||
478 | u32 i, j, how_often = 1; | ||
479 | enum vxge_hw_status status = VXGE_HW_OK; | ||
480 | |||
481 | status = __vxge_hw_device_is_privilaged(hldev); | ||
482 | if (status != VXGE_HW_OK) | ||
483 | goto exit; | ||
484 | |||
485 | /* Reset the priorities assigned to the WRR arbitration | ||
486 | phases for the receive traffic */ | ||
487 | for (i = 0; i < VXGE_HW_WRR_RING_COUNT; i++) | ||
488 | writeq(0, ((&hldev->mrpcim_reg->rx_w_round_robin_0) + i)); | ||
489 | |||
490 | /* Reset the transmit FIFO servicing calendar for FIFOs */ | ||
491 | for (i = 0; i < VXGE_HW_WRR_FIFO_COUNT; i++) { | ||
492 | writeq(0, ((&hldev->mrpcim_reg->kdfc_w_round_robin_0) + i)); | ||
493 | writeq(0, ((&hldev->mrpcim_reg->kdfc_w_round_robin_20) + i)); | ||
494 | } | ||
495 | |||
496 | /* Assign WRR priority 0 for all FIFOs */ | ||
497 | for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | ||
498 | writeq(VXGE_HW_KDFC_FIFO_0_CTRL_WRR_NUMBER(0), | ||
499 | ((&hldev->mrpcim_reg->kdfc_fifo_0_ctrl) + i)); | ||
500 | |||
501 | writeq(VXGE_HW_KDFC_FIFO_17_CTRL_WRR_NUMBER(0), | ||
502 | ((&hldev->mrpcim_reg->kdfc_fifo_17_ctrl) + i)); | ||
503 | } | ||
504 | |||
505 | /* Reset to service non-offload doorbells */ | ||
506 | writeq(0, &hldev->mrpcim_reg->kdfc_entry_type_sel_0); | ||
507 | writeq(0, &hldev->mrpcim_reg->kdfc_entry_type_sel_1); | ||
508 | |||
509 | /* Set priority 0 to all receive queues */ | ||
510 | writeq(0, &hldev->mrpcim_reg->rx_queue_priority_0); | ||
511 | writeq(0, &hldev->mrpcim_reg->rx_queue_priority_1); | ||
512 | writeq(0, &hldev->mrpcim_reg->rx_queue_priority_2); | ||
513 | |||
514 | /* Initialize all the slots as unused */ | ||
515 | for (i = 0; i < VXGE_HW_WEIGHTED_RR_SERVICE_STATES; i++) | ||
516 | wrr_states[i] = -1; | ||
517 | |||
518 | /* Prepare the Fifo service states */ | ||
519 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | ||
520 | |||
521 | if (!hldev->config.vp_config[i].min_bandwidth) | ||
522 | continue; | ||
523 | |||
524 | how_often = VXGE_HW_VPATH_BANDWIDTH_MAX / | ||
525 | hldev->config.vp_config[i].min_bandwidth; | ||
526 | if (how_often) { | ||
527 | |||
528 | for (j = 0; j < VXGE_HW_WRR_FIFO_SERVICE_STATES;) { | ||
529 | if (wrr_states[j] == -1) { | ||
530 | wrr_states[j] = i; | ||
531 | /* Make sure each fifo is serviced | ||
532 | * atleast once */ | ||
533 | if (i == j) | ||
534 | j += VXGE_HW_MAX_VIRTUAL_PATHS; | ||
535 | else | ||
536 | j += how_often; | ||
537 | } else | ||
538 | j++; | ||
539 | } | ||
540 | } | ||
541 | } | ||
542 | |||
543 | /* Fill the unused slots with 0 */ | ||
544 | for (j = 0; j < VXGE_HW_WEIGHTED_RR_SERVICE_STATES; j++) { | ||
545 | if (wrr_states[j] == -1) | ||
546 | wrr_states[j] = 0; | ||
547 | } | ||
548 | |||
549 | /* Assign WRR priority number for FIFOs */ | ||
550 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | ||
551 | writeq(VXGE_HW_KDFC_FIFO_0_CTRL_WRR_NUMBER(i), | ||
552 | ((&hldev->mrpcim_reg->kdfc_fifo_0_ctrl) + i)); | ||
553 | |||
554 | writeq(VXGE_HW_KDFC_FIFO_17_CTRL_WRR_NUMBER(i), | ||
555 | ((&hldev->mrpcim_reg->kdfc_fifo_17_ctrl) + i)); | ||
556 | } | ||
557 | |||
558 | /* Modify the servicing algorithm applied to the 3 types of doorbells. | ||
559 | i.e, none-offload, message and offload */ | ||
560 | writeq(VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_0(0) | | ||
561 | VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_1(0) | | ||
562 | VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_2(0) | | ||
563 | VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_3(0) | | ||
564 | VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_4(1) | | ||
565 | VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_5(0) | | ||
566 | VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_6(0) | | ||
567 | VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_7(0), | ||
568 | &hldev->mrpcim_reg->kdfc_entry_type_sel_0); | ||
569 | |||
570 | writeq(VXGE_HW_KDFC_ENTRY_TYPE_SEL_1_NUMBER_8(1), | ||
571 | &hldev->mrpcim_reg->kdfc_entry_type_sel_1); | ||
572 | |||
573 | for (i = 0, j = 0; i < VXGE_HW_WRR_FIFO_COUNT; i++) { | ||
574 | |||
575 | val64 = VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_0(wrr_states[j++]); | ||
576 | val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_1(wrr_states[j++]); | ||
577 | val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_2(wrr_states[j++]); | ||
578 | val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_3(wrr_states[j++]); | ||
579 | val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_4(wrr_states[j++]); | ||
580 | val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_5(wrr_states[j++]); | ||
581 | val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_6(wrr_states[j++]); | ||
582 | val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_7(wrr_states[j++]); | ||
583 | |||
584 | writeq(val64, (&hldev->mrpcim_reg->kdfc_w_round_robin_0 + i)); | ||
585 | writeq(val64, (&hldev->mrpcim_reg->kdfc_w_round_robin_20 + i)); | ||
586 | } | ||
587 | |||
588 | /* Set up the priorities assigned to receive queues */ | ||
589 | writeq(VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_0(0) | | ||
590 | VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_1(1) | | ||
591 | VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_2(2) | | ||
592 | VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_3(3) | | ||
593 | VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_4(4) | | ||
594 | VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_5(5) | | ||
595 | VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_6(6) | | ||
596 | VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_7(7), | ||
597 | &hldev->mrpcim_reg->rx_queue_priority_0); | ||
598 | |||
599 | writeq(VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_8(8) | | ||
600 | VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_9(9) | | ||
601 | VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_10(10) | | ||
602 | VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_11(11) | | ||
603 | VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_12(12) | | ||
604 | VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_13(13) | | ||
605 | VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_14(14) | | ||
606 | VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_15(15), | ||
607 | &hldev->mrpcim_reg->rx_queue_priority_1); | ||
608 | |||
609 | writeq(VXGE_HW_RX_QUEUE_PRIORITY_2_RX_Q_NUMBER_16(16), | ||
610 | &hldev->mrpcim_reg->rx_queue_priority_2); | ||
611 | |||
612 | /* Initialize all the slots as unused */ | ||
613 | for (i = 0; i < VXGE_HW_WEIGHTED_RR_SERVICE_STATES; i++) | ||
614 | wrr_states[i] = -1; | ||
615 | |||
616 | /* Prepare the Ring service states */ | ||
617 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | ||
618 | |||
619 | if (!hldev->config.vp_config[i].min_bandwidth) | ||
620 | continue; | ||
621 | |||
622 | how_often = VXGE_HW_VPATH_BANDWIDTH_MAX / | ||
623 | hldev->config.vp_config[i].min_bandwidth; | ||
624 | |||
625 | if (how_often) { | ||
626 | for (j = 0; j < VXGE_HW_WRR_RING_SERVICE_STATES;) { | ||
627 | if (wrr_states[j] == -1) { | ||
628 | wrr_states[j] = i; | ||
629 | /* Make sure each ring is | ||
630 | * serviced atleast once */ | ||
631 | if (i == j) | ||
632 | j += VXGE_HW_MAX_VIRTUAL_PATHS; | ||
633 | else | ||
634 | j += how_often; | ||
635 | } else | ||
636 | j++; | ||
637 | } | ||
638 | } | ||
639 | } | ||
640 | |||
641 | /* Fill the unused slots with 0 */ | ||
642 | for (j = 0; j < VXGE_HW_WEIGHTED_RR_SERVICE_STATES; j++) { | ||
643 | if (wrr_states[j] == -1) | ||
644 | wrr_states[j] = 0; | ||
645 | } | ||
646 | |||
647 | for (i = 0, j = 0; i < VXGE_HW_WRR_RING_COUNT; i++) { | ||
648 | val64 = VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_0( | ||
649 | wrr_states[j++]); | ||
650 | val64 |= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_1( | ||
651 | wrr_states[j++]); | ||
652 | val64 |= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_2( | ||
653 | wrr_states[j++]); | ||
654 | val64 |= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_3( | ||
655 | wrr_states[j++]); | ||
656 | val64 |= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_4( | ||
657 | wrr_states[j++]); | ||
658 | val64 |= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_5( | ||
659 | wrr_states[j++]); | ||
660 | val64 |= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_6( | ||
661 | wrr_states[j++]); | ||
662 | val64 |= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_7( | ||
663 | wrr_states[j++]); | ||
664 | |||
665 | writeq(val64, ((&hldev->mrpcim_reg->rx_w_round_robin_0) + i)); | ||
666 | } | ||
667 | exit: | ||
668 | return status; | ||
669 | } | ||
670 | |||
671 | /* | ||
672 | * __vxge_hw_device_initialize | ||
673 | * Initialize Titan-V hardware. | ||
674 | */ | ||
675 | enum vxge_hw_status __vxge_hw_device_initialize(struct __vxge_hw_device *hldev) | ||
676 | { | ||
677 | enum vxge_hw_status status = VXGE_HW_OK; | ||
678 | |||
679 | /* Validate the pci-e link width and speed */ | ||
680 | status = __vxge_hw_verify_pci_e_info(hldev); | ||
681 | if (status != VXGE_HW_OK) | ||
682 | goto exit; | ||
683 | |||
684 | vxge_hw_wrr_rebalance(hldev); | ||
685 | exit: | ||
686 | return status; | ||
687 | } | ||
688 | |||
689 | /** | ||
690 | * vxge_hw_device_hw_info_get - Get the hw information | ||
691 | * Returns the vpath mask that has the bits set for each vpath allocated | ||
692 | * for the driver, FW version information and the first mac addresse for | ||
693 | * each vpath | ||
694 | */ | ||
695 | enum vxge_hw_status __devinit | ||
696 | vxge_hw_device_hw_info_get(void __iomem *bar0, | ||
697 | struct vxge_hw_device_hw_info *hw_info) | ||
698 | { | ||
699 | u32 i; | ||
700 | u64 val64; | ||
701 | struct vxge_hw_toc_reg __iomem *toc; | ||
702 | struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg; | ||
703 | struct vxge_hw_common_reg __iomem *common_reg; | ||
704 | struct vxge_hw_vpath_reg __iomem *vpath_reg; | ||
705 | struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg; | ||
706 | enum vxge_hw_status status; | ||
707 | |||
708 | memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info)); | ||
709 | |||
710 | toc = __vxge_hw_device_toc_get(bar0); | ||
711 | if (toc == NULL) { | ||
712 | status = VXGE_HW_ERR_CRITICAL; | ||
713 | goto exit; | ||
714 | } | ||
715 | |||
716 | val64 = readq(&toc->toc_common_pointer); | ||
717 | common_reg = (struct vxge_hw_common_reg __iomem *)(bar0 + val64); | ||
718 | |||
719 | status = __vxge_hw_device_vpath_reset_in_prog_check( | ||
720 | (u64 __iomem *)&common_reg->vpath_rst_in_prog); | ||
721 | if (status != VXGE_HW_OK) | ||
722 | goto exit; | ||
723 | |||
724 | hw_info->vpath_mask = readq(&common_reg->vpath_assignments); | ||
725 | |||
726 | val64 = readq(&common_reg->host_type_assignments); | ||
727 | |||
728 | hw_info->host_type = | ||
729 | (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64); | ||
730 | |||
731 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | ||
732 | |||
733 | if (!((hw_info->vpath_mask) & vxge_mBIT(i))) | ||
734 | continue; | ||
735 | |||
736 | val64 = readq(&toc->toc_vpmgmt_pointer[i]); | ||
737 | |||
738 | vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *) | ||
739 | (bar0 + val64); | ||
740 | |||
741 | hw_info->func_id = __vxge_hw_vpath_func_id_get(i, vpmgmt_reg); | ||
742 | if (__vxge_hw_device_access_rights_get(hw_info->host_type, | ||
743 | hw_info->func_id) & | ||
744 | VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) { | ||
745 | |||
746 | val64 = readq(&toc->toc_mrpcim_pointer); | ||
747 | |||
748 | mrpcim_reg = (struct vxge_hw_mrpcim_reg __iomem *) | ||
749 | (bar0 + val64); | ||
750 | |||
751 | writeq(0, &mrpcim_reg->xgmac_gen_fw_memo_mask); | ||
752 | wmb(); | ||
753 | } | ||
754 | |||
755 | val64 = readq(&toc->toc_vpath_pointer[i]); | ||
756 | |||
757 | vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64); | ||
758 | |||
759 | hw_info->function_mode = | ||
760 | __vxge_hw_vpath_pci_func_mode_get(i, vpath_reg); | ||
761 | |||
762 | status = __vxge_hw_vpath_fw_ver_get(i, vpath_reg, hw_info); | ||
763 | if (status != VXGE_HW_OK) | ||
764 | goto exit; | ||
765 | |||
766 | status = __vxge_hw_vpath_card_info_get(i, vpath_reg, hw_info); | ||
767 | if (status != VXGE_HW_OK) | ||
768 | goto exit; | ||
769 | |||
770 | break; | ||
771 | } | ||
772 | |||
773 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | ||
774 | |||
775 | if (!((hw_info->vpath_mask) & vxge_mBIT(i))) | ||
776 | continue; | ||
777 | |||
778 | val64 = readq(&toc->toc_vpath_pointer[i]); | ||
779 | vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64); | ||
780 | |||
781 | status = __vxge_hw_vpath_addr_get(i, vpath_reg, | ||
782 | hw_info->mac_addrs[i], | ||
783 | hw_info->mac_addr_masks[i]); | ||
784 | if (status != VXGE_HW_OK) | ||
785 | goto exit; | ||
786 | } | ||
787 | exit: | ||
788 | return status; | ||
789 | } | ||
790 | |||
791 | /* | ||
792 | * vxge_hw_device_initialize - Initialize Titan device. | ||
793 | * Initialize Titan device. Note that all the arguments of this public API | ||
794 | * are 'IN', including @hldev. Driver cooperates with | ||
795 | * OS to find new Titan device, locate its PCI and memory spaces. | ||
796 | * | ||
797 | * When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW | ||
798 | * to enable the latter to perform Titan hardware initialization. | ||
799 | */ | ||
800 | enum vxge_hw_status __devinit | ||
801 | vxge_hw_device_initialize( | ||
802 | struct __vxge_hw_device **devh, | ||
803 | struct vxge_hw_device_attr *attr, | ||
804 | struct vxge_hw_device_config *device_config) | ||
805 | { | ||
806 | u32 i; | ||
807 | u32 nblocks = 0; | ||
808 | struct __vxge_hw_device *hldev = NULL; | ||
809 | enum vxge_hw_status status = VXGE_HW_OK; | ||
810 | |||
811 | status = __vxge_hw_device_config_check(device_config); | ||
812 | if (status != VXGE_HW_OK) | ||
813 | goto exit; | ||
814 | |||
815 | hldev = (struct __vxge_hw_device *) | ||
816 | vmalloc(sizeof(struct __vxge_hw_device)); | ||
817 | if (hldev == NULL) { | ||
818 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
819 | goto exit; | ||
820 | } | ||
821 | |||
822 | memset(hldev, 0, sizeof(struct __vxge_hw_device)); | ||
823 | hldev->magic = VXGE_HW_DEVICE_MAGIC; | ||
824 | |||
825 | vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL); | ||
826 | |||
827 | /* apply config */ | ||
828 | memcpy(&hldev->config, device_config, | ||
829 | sizeof(struct vxge_hw_device_config)); | ||
830 | |||
831 | hldev->bar0 = attr->bar0; | ||
832 | hldev->bar1 = attr->bar1; | ||
833 | hldev->bar2 = attr->bar2; | ||
834 | hldev->pdev = attr->pdev; | ||
835 | |||
836 | hldev->uld_callbacks.link_up = attr->uld_callbacks.link_up; | ||
837 | hldev->uld_callbacks.link_down = attr->uld_callbacks.link_down; | ||
838 | hldev->uld_callbacks.crit_err = attr->uld_callbacks.crit_err; | ||
839 | |||
840 | __vxge_hw_device_pci_e_init(hldev); | ||
841 | |||
842 | status = __vxge_hw_device_reg_addr_get(hldev); | ||
843 | if (status != VXGE_HW_OK) | ||
844 | goto exit; | ||
845 | __vxge_hw_device_id_get(hldev); | ||
846 | |||
847 | __vxge_hw_device_host_info_get(hldev); | ||
848 | |||
849 | /* Incrementing for stats blocks */ | ||
850 | nblocks++; | ||
851 | |||
852 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | ||
853 | |||
854 | if (!(hldev->vpath_assignments & vxge_mBIT(i))) | ||
855 | continue; | ||
856 | |||
857 | if (device_config->vp_config[i].ring.enable == | ||
858 | VXGE_HW_RING_ENABLE) | ||
859 | nblocks += device_config->vp_config[i].ring.ring_blocks; | ||
860 | |||
861 | if (device_config->vp_config[i].fifo.enable == | ||
862 | VXGE_HW_FIFO_ENABLE) | ||
863 | nblocks += device_config->vp_config[i].fifo.fifo_blocks; | ||
864 | nblocks++; | ||
865 | } | ||
866 | |||
867 | if (__vxge_hw_blockpool_create(hldev, | ||
868 | &hldev->block_pool, | ||
869 | device_config->dma_blockpool_initial + nblocks, | ||
870 | device_config->dma_blockpool_max + nblocks) != VXGE_HW_OK) { | ||
871 | |||
872 | vxge_hw_device_terminate(hldev); | ||
873 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
874 | goto exit; | ||
875 | } | ||
876 | |||
877 | status = __vxge_hw_device_initialize(hldev); | ||
878 | |||
879 | if (status != VXGE_HW_OK) { | ||
880 | vxge_hw_device_terminate(hldev); | ||
881 | goto exit; | ||
882 | } | ||
883 | |||
884 | *devh = hldev; | ||
885 | exit: | ||
886 | return status; | ||
887 | } | ||
888 | |||
889 | /* | ||
890 | * vxge_hw_device_terminate - Terminate Titan device. | ||
891 | * Terminate HW device. | ||
892 | */ | ||
893 | void | ||
894 | vxge_hw_device_terminate(struct __vxge_hw_device *hldev) | ||
895 | { | ||
896 | vxge_assert(hldev->magic == VXGE_HW_DEVICE_MAGIC); | ||
897 | |||
898 | hldev->magic = VXGE_HW_DEVICE_DEAD; | ||
899 | __vxge_hw_blockpool_destroy(&hldev->block_pool); | ||
900 | vfree(hldev); | ||
901 | } | ||
902 | |||
903 | /* | ||
904 | * vxge_hw_device_stats_get - Get the device hw statistics. | ||
905 | * Returns the vpath h/w stats for the device. | ||
906 | */ | ||
907 | enum vxge_hw_status | ||
908 | vxge_hw_device_stats_get(struct __vxge_hw_device *hldev, | ||
909 | struct vxge_hw_device_stats_hw_info *hw_stats) | ||
910 | { | ||
911 | u32 i; | ||
912 | enum vxge_hw_status status = VXGE_HW_OK; | ||
913 | |||
914 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | ||
915 | |||
916 | if (!(hldev->vpaths_deployed & vxge_mBIT(i)) || | ||
917 | (hldev->virtual_paths[i].vp_open == | ||
918 | VXGE_HW_VP_NOT_OPEN)) | ||
919 | continue; | ||
920 | |||
921 | memcpy(hldev->virtual_paths[i].hw_stats_sav, | ||
922 | hldev->virtual_paths[i].hw_stats, | ||
923 | sizeof(struct vxge_hw_vpath_stats_hw_info)); | ||
924 | |||
925 | status = __vxge_hw_vpath_stats_get( | ||
926 | &hldev->virtual_paths[i], | ||
927 | hldev->virtual_paths[i].hw_stats); | ||
928 | } | ||
929 | |||
930 | memcpy(hw_stats, &hldev->stats.hw_dev_info_stats, | ||
931 | sizeof(struct vxge_hw_device_stats_hw_info)); | ||
932 | |||
933 | return status; | ||
934 | } | ||
935 | |||
936 | /* | ||
937 | * vxge_hw_driver_stats_get - Get the device sw statistics. | ||
938 | * Returns the vpath s/w stats for the device. | ||
939 | */ | ||
940 | enum vxge_hw_status vxge_hw_driver_stats_get( | ||
941 | struct __vxge_hw_device *hldev, | ||
942 | struct vxge_hw_device_stats_sw_info *sw_stats) | ||
943 | { | ||
944 | enum vxge_hw_status status = VXGE_HW_OK; | ||
945 | |||
946 | memcpy(sw_stats, &hldev->stats.sw_dev_info_stats, | ||
947 | sizeof(struct vxge_hw_device_stats_sw_info)); | ||
948 | |||
949 | return status; | ||
950 | } | ||
951 | |||
952 | /* | ||
953 | * vxge_hw_mrpcim_stats_access - Access the statistics from the given location | ||
954 | * and offset and perform an operation | ||
955 | * Get the statistics from the given location and offset. | ||
956 | */ | ||
957 | enum vxge_hw_status | ||
958 | vxge_hw_mrpcim_stats_access(struct __vxge_hw_device *hldev, | ||
959 | u32 operation, u32 location, u32 offset, u64 *stat) | ||
960 | { | ||
961 | u64 val64; | ||
962 | enum vxge_hw_status status = VXGE_HW_OK; | ||
963 | |||
964 | status = __vxge_hw_device_is_privilaged(hldev); | ||
965 | if (status != VXGE_HW_OK) | ||
966 | goto exit; | ||
967 | |||
968 | val64 = VXGE_HW_XMAC_STATS_SYS_CMD_OP(operation) | | ||
969 | VXGE_HW_XMAC_STATS_SYS_CMD_STROBE | | ||
970 | VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(location) | | ||
971 | VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(offset); | ||
972 | |||
973 | status = __vxge_hw_pio_mem_write64(val64, | ||
974 | &hldev->mrpcim_reg->xmac_stats_sys_cmd, | ||
975 | VXGE_HW_XMAC_STATS_SYS_CMD_STROBE, | ||
976 | hldev->config.device_poll_millis); | ||
977 | |||
978 | if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ)) | ||
979 | *stat = readq(&hldev->mrpcim_reg->xmac_stats_sys_data); | ||
980 | else | ||
981 | *stat = 0; | ||
982 | exit: | ||
983 | return status; | ||
984 | } | ||
985 | |||
986 | /* | ||
987 | * vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port | ||
988 | * Get the Statistics on aggregate port | ||
989 | */ | ||
990 | enum vxge_hw_status | ||
991 | vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port, | ||
992 | struct vxge_hw_xmac_aggr_stats *aggr_stats) | ||
993 | { | ||
994 | u64 *val64; | ||
995 | int i; | ||
996 | u32 offset = VXGE_HW_STATS_AGGRn_OFFSET; | ||
997 | enum vxge_hw_status status = VXGE_HW_OK; | ||
998 | |||
999 | val64 = (u64 *)aggr_stats; | ||
1000 | |||
1001 | status = __vxge_hw_device_is_privilaged(hldev); | ||
1002 | if (status != VXGE_HW_OK) | ||
1003 | goto exit; | ||
1004 | |||
1005 | for (i = 0; i < sizeof(struct vxge_hw_xmac_aggr_stats) / 8; i++) { | ||
1006 | status = vxge_hw_mrpcim_stats_access(hldev, | ||
1007 | VXGE_HW_STATS_OP_READ, | ||
1008 | VXGE_HW_STATS_LOC_AGGR, | ||
1009 | ((offset + (104 * port)) >> 3), val64); | ||
1010 | if (status != VXGE_HW_OK) | ||
1011 | goto exit; | ||
1012 | |||
1013 | offset += 8; | ||
1014 | val64++; | ||
1015 | } | ||
1016 | exit: | ||
1017 | return status; | ||
1018 | } | ||
1019 | |||
1020 | /* | ||
1021 | * vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port | ||
1022 | * Get the Statistics on port | ||
1023 | */ | ||
1024 | enum vxge_hw_status | ||
1025 | vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port, | ||
1026 | struct vxge_hw_xmac_port_stats *port_stats) | ||
1027 | { | ||
1028 | u64 *val64; | ||
1029 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1030 | int i; | ||
1031 | u32 offset = 0x0; | ||
1032 | val64 = (u64 *) port_stats; | ||
1033 | |||
1034 | status = __vxge_hw_device_is_privilaged(hldev); | ||
1035 | if (status != VXGE_HW_OK) | ||
1036 | goto exit; | ||
1037 | |||
1038 | for (i = 0; i < sizeof(struct vxge_hw_xmac_port_stats) / 8; i++) { | ||
1039 | status = vxge_hw_mrpcim_stats_access(hldev, | ||
1040 | VXGE_HW_STATS_OP_READ, | ||
1041 | VXGE_HW_STATS_LOC_AGGR, | ||
1042 | ((offset + (608 * port)) >> 3), val64); | ||
1043 | if (status != VXGE_HW_OK) | ||
1044 | goto exit; | ||
1045 | |||
1046 | offset += 8; | ||
1047 | val64++; | ||
1048 | } | ||
1049 | |||
1050 | exit: | ||
1051 | return status; | ||
1052 | } | ||
1053 | |||
1054 | /* | ||
1055 | * vxge_hw_device_xmac_stats_get - Get the XMAC Statistics | ||
1056 | * Get the XMAC Statistics | ||
1057 | */ | ||
1058 | enum vxge_hw_status | ||
1059 | vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *hldev, | ||
1060 | struct vxge_hw_xmac_stats *xmac_stats) | ||
1061 | { | ||
1062 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1063 | u32 i; | ||
1064 | |||
1065 | status = vxge_hw_device_xmac_aggr_stats_get(hldev, | ||
1066 | 0, &xmac_stats->aggr_stats[0]); | ||
1067 | |||
1068 | if (status != VXGE_HW_OK) | ||
1069 | goto exit; | ||
1070 | |||
1071 | status = vxge_hw_device_xmac_aggr_stats_get(hldev, | ||
1072 | 1, &xmac_stats->aggr_stats[1]); | ||
1073 | if (status != VXGE_HW_OK) | ||
1074 | goto exit; | ||
1075 | |||
1076 | for (i = 0; i <= VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) { | ||
1077 | |||
1078 | status = vxge_hw_device_xmac_port_stats_get(hldev, | ||
1079 | i, &xmac_stats->port_stats[i]); | ||
1080 | if (status != VXGE_HW_OK) | ||
1081 | goto exit; | ||
1082 | } | ||
1083 | |||
1084 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | ||
1085 | |||
1086 | if (!(hldev->vpaths_deployed & vxge_mBIT(i))) | ||
1087 | continue; | ||
1088 | |||
1089 | status = __vxge_hw_vpath_xmac_tx_stats_get( | ||
1090 | &hldev->virtual_paths[i], | ||
1091 | &xmac_stats->vpath_tx_stats[i]); | ||
1092 | if (status != VXGE_HW_OK) | ||
1093 | goto exit; | ||
1094 | |||
1095 | status = __vxge_hw_vpath_xmac_rx_stats_get( | ||
1096 | &hldev->virtual_paths[i], | ||
1097 | &xmac_stats->vpath_rx_stats[i]); | ||
1098 | if (status != VXGE_HW_OK) | ||
1099 | goto exit; | ||
1100 | } | ||
1101 | exit: | ||
1102 | return status; | ||
1103 | } | ||
1104 | |||
1105 | /* | ||
1106 | * vxge_hw_device_debug_set - Set the debug module, level and timestamp | ||
1107 | * This routine is used to dynamically change the debug output | ||
1108 | */ | ||
1109 | void vxge_hw_device_debug_set(struct __vxge_hw_device *hldev, | ||
1110 | enum vxge_debug_level level, u32 mask) | ||
1111 | { | ||
1112 | if (hldev == NULL) | ||
1113 | return; | ||
1114 | |||
1115 | #if defined(VXGE_DEBUG_TRACE_MASK) || \ | ||
1116 | defined(VXGE_DEBUG_ERR_MASK) | ||
1117 | hldev->debug_module_mask = mask; | ||
1118 | hldev->debug_level = level; | ||
1119 | #endif | ||
1120 | |||
1121 | #if defined(VXGE_DEBUG_ERR_MASK) | ||
1122 | hldev->level_err = level & VXGE_ERR; | ||
1123 | #endif | ||
1124 | |||
1125 | #if defined(VXGE_DEBUG_TRACE_MASK) | ||
1126 | hldev->level_trace = level & VXGE_TRACE; | ||
1127 | #endif | ||
1128 | } | ||
1129 | |||
1130 | /* | ||
1131 | * vxge_hw_device_error_level_get - Get the error level | ||
1132 | * This routine returns the current error level set | ||
1133 | */ | ||
1134 | u32 vxge_hw_device_error_level_get(struct __vxge_hw_device *hldev) | ||
1135 | { | ||
1136 | #if defined(VXGE_DEBUG_ERR_MASK) | ||
1137 | if (hldev == NULL) | ||
1138 | return VXGE_ERR; | ||
1139 | else | ||
1140 | return hldev->level_err; | ||
1141 | #else | ||
1142 | return 0; | ||
1143 | #endif | ||
1144 | } | ||
1145 | |||
1146 | /* | ||
1147 | * vxge_hw_device_trace_level_get - Get the trace level | ||
1148 | * This routine returns the current trace level set | ||
1149 | */ | ||
1150 | u32 vxge_hw_device_trace_level_get(struct __vxge_hw_device *hldev) | ||
1151 | { | ||
1152 | #if defined(VXGE_DEBUG_TRACE_MASK) | ||
1153 | if (hldev == NULL) | ||
1154 | return VXGE_TRACE; | ||
1155 | else | ||
1156 | return hldev->level_trace; | ||
1157 | #else | ||
1158 | return 0; | ||
1159 | #endif | ||
1160 | } | ||
1161 | /* | ||
1162 | * vxge_hw_device_debug_mask_get - Get the debug mask | ||
1163 | * This routine returns the current debug mask set | ||
1164 | */ | ||
1165 | u32 vxge_hw_device_debug_mask_get(struct __vxge_hw_device *hldev) | ||
1166 | { | ||
1167 | #if defined(VXGE_DEBUG_TRACE_MASK) || defined(VXGE_DEBUG_ERR_MASK) | ||
1168 | if (hldev == NULL) | ||
1169 | return 0; | ||
1170 | return hldev->debug_module_mask; | ||
1171 | #else | ||
1172 | return 0; | ||
1173 | #endif | ||
1174 | } | ||
1175 | |||
1176 | /* | ||
1177 | * vxge_hw_getpause_data -Pause frame frame generation and reception. | ||
1178 | * Returns the Pause frame generation and reception capability of the NIC. | ||
1179 | */ | ||
1180 | enum vxge_hw_status vxge_hw_device_getpause_data(struct __vxge_hw_device *hldev, | ||
1181 | u32 port, u32 *tx, u32 *rx) | ||
1182 | { | ||
1183 | u64 val64; | ||
1184 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1185 | |||
1186 | if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) { | ||
1187 | status = VXGE_HW_ERR_INVALID_DEVICE; | ||
1188 | goto exit; | ||
1189 | } | ||
1190 | |||
1191 | if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) { | ||
1192 | status = VXGE_HW_ERR_INVALID_PORT; | ||
1193 | goto exit; | ||
1194 | } | ||
1195 | |||
1196 | if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) { | ||
1197 | status = VXGE_HW_ERR_PRIVILAGED_OPEARATION; | ||
1198 | goto exit; | ||
1199 | } | ||
1200 | |||
1201 | val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]); | ||
1202 | if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN) | ||
1203 | *tx = 1; | ||
1204 | if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN) | ||
1205 | *rx = 1; | ||
1206 | exit: | ||
1207 | return status; | ||
1208 | } | ||
1209 | |||
1210 | /* | ||
1211 | * vxge_hw_device_setpause_data - set/reset pause frame generation. | ||
1212 | * It can be used to set or reset Pause frame generation or reception | ||
1213 | * support of the NIC. | ||
1214 | */ | ||
1215 | |||
1216 | enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev, | ||
1217 | u32 port, u32 tx, u32 rx) | ||
1218 | { | ||
1219 | u64 val64; | ||
1220 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1221 | |||
1222 | if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) { | ||
1223 | status = VXGE_HW_ERR_INVALID_DEVICE; | ||
1224 | goto exit; | ||
1225 | } | ||
1226 | |||
1227 | if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) { | ||
1228 | status = VXGE_HW_ERR_INVALID_PORT; | ||
1229 | goto exit; | ||
1230 | } | ||
1231 | |||
1232 | status = __vxge_hw_device_is_privilaged(hldev); | ||
1233 | if (status != VXGE_HW_OK) | ||
1234 | goto exit; | ||
1235 | |||
1236 | val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]); | ||
1237 | if (tx) | ||
1238 | val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN; | ||
1239 | else | ||
1240 | val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN; | ||
1241 | if (rx) | ||
1242 | val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN; | ||
1243 | else | ||
1244 | val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN; | ||
1245 | |||
1246 | writeq(val64, &hldev->mrpcim_reg->rxmac_pause_cfg_port[port]); | ||
1247 | exit: | ||
1248 | return status; | ||
1249 | } | ||
1250 | |||
1251 | u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *hldev) | ||
1252 | { | ||
1253 | int link_width, exp_cap; | ||
1254 | u16 lnk; | ||
1255 | |||
1256 | exp_cap = pci_find_capability(hldev->pdev, PCI_CAP_ID_EXP); | ||
1257 | pci_read_config_word(hldev->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk); | ||
1258 | link_width = (lnk & VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH) >> 4; | ||
1259 | return link_width; | ||
1260 | } | ||
1261 | |||
1262 | /* | ||
1263 | * __vxge_hw_ring_block_memblock_idx - Return the memblock index | ||
1264 | * This function returns the index of memory block | ||
1265 | */ | ||
1266 | static inline u32 | ||
1267 | __vxge_hw_ring_block_memblock_idx(u8 *block) | ||
1268 | { | ||
1269 | return (u32)*((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET)); | ||
1270 | } | ||
1271 | |||
1272 | /* | ||
1273 | * __vxge_hw_ring_block_memblock_idx_set - Sets the memblock index | ||
1274 | * This function sets index to a memory block | ||
1275 | */ | ||
1276 | static inline void | ||
1277 | __vxge_hw_ring_block_memblock_idx_set(u8 *block, u32 memblock_idx) | ||
1278 | { | ||
1279 | *((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET)) = memblock_idx; | ||
1280 | } | ||
1281 | |||
1282 | /* | ||
1283 | * __vxge_hw_ring_block_next_pointer_set - Sets the next block pointer | ||
1284 | * in RxD block | ||
1285 | * Sets the next block pointer in RxD block | ||
1286 | */ | ||
1287 | static inline void | ||
1288 | __vxge_hw_ring_block_next_pointer_set(u8 *block, dma_addr_t dma_next) | ||
1289 | { | ||
1290 | *((u64 *)(block + VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET)) = dma_next; | ||
1291 | } | ||
1292 | |||
1293 | /* | ||
1294 | * __vxge_hw_ring_first_block_address_get - Returns the dma address of the | ||
1295 | * first block | ||
1296 | * Returns the dma address of the first RxD block | ||
1297 | */ | ||
1298 | u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring) | ||
1299 | { | ||
1300 | struct vxge_hw_mempool_dma *dma_object; | ||
1301 | |||
1302 | dma_object = ring->mempool->memblocks_dma_arr; | ||
1303 | vxge_assert(dma_object != NULL); | ||
1304 | |||
1305 | return dma_object->addr; | ||
1306 | } | ||
1307 | |||
1308 | /* | ||
1309 | * __vxge_hw_ring_item_dma_addr - Return the dma address of an item | ||
1310 | * This function returns the dma address of a given item | ||
1311 | */ | ||
1312 | static dma_addr_t __vxge_hw_ring_item_dma_addr(struct vxge_hw_mempool *mempoolh, | ||
1313 | void *item) | ||
1314 | { | ||
1315 | u32 memblock_idx; | ||
1316 | void *memblock; | ||
1317 | struct vxge_hw_mempool_dma *memblock_dma_object; | ||
1318 | ptrdiff_t dma_item_offset; | ||
1319 | |||
1320 | /* get owner memblock index */ | ||
1321 | memblock_idx = __vxge_hw_ring_block_memblock_idx(item); | ||
1322 | |||
1323 | /* get owner memblock by memblock index */ | ||
1324 | memblock = mempoolh->memblocks_arr[memblock_idx]; | ||
1325 | |||
1326 | /* get memblock DMA object by memblock index */ | ||
1327 | memblock_dma_object = mempoolh->memblocks_dma_arr + memblock_idx; | ||
1328 | |||
1329 | /* calculate offset in the memblock of this item */ | ||
1330 | dma_item_offset = (u8 *)item - (u8 *)memblock; | ||
1331 | |||
1332 | return memblock_dma_object->addr + dma_item_offset; | ||
1333 | } | ||
1334 | |||
1335 | /* | ||
1336 | * __vxge_hw_ring_rxdblock_link - Link the RxD blocks | ||
1337 | * This function returns the dma address of a given item | ||
1338 | */ | ||
1339 | static void __vxge_hw_ring_rxdblock_link(struct vxge_hw_mempool *mempoolh, | ||
1340 | struct __vxge_hw_ring *ring, u32 from, | ||
1341 | u32 to) | ||
1342 | { | ||
1343 | u8 *to_item , *from_item; | ||
1344 | dma_addr_t to_dma; | ||
1345 | |||
1346 | /* get "from" RxD block */ | ||
1347 | from_item = mempoolh->items_arr[from]; | ||
1348 | vxge_assert(from_item); | ||
1349 | |||
1350 | /* get "to" RxD block */ | ||
1351 | to_item = mempoolh->items_arr[to]; | ||
1352 | vxge_assert(to_item); | ||
1353 | |||
1354 | /* return address of the beginning of previous RxD block */ | ||
1355 | to_dma = __vxge_hw_ring_item_dma_addr(mempoolh, to_item); | ||
1356 | |||
1357 | /* set next pointer for this RxD block to point on | ||
1358 | * previous item's DMA start address */ | ||
1359 | __vxge_hw_ring_block_next_pointer_set(from_item, to_dma); | ||
1360 | } | ||
1361 | |||
1362 | /* | ||
1363 | * __vxge_hw_ring_mempool_item_alloc - Allocate List blocks for RxD | ||
1364 | * block callback | ||
1365 | * This function is callback passed to __vxge_hw_mempool_create to create memory | ||
1366 | * pool for RxD block | ||
1367 | */ | ||
1368 | static void | ||
1369 | __vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh, | ||
1370 | u32 memblock_index, | ||
1371 | struct vxge_hw_mempool_dma *dma_object, | ||
1372 | u32 index, u32 is_last) | ||
1373 | { | ||
1374 | u32 i; | ||
1375 | void *item = mempoolh->items_arr[index]; | ||
1376 | struct __vxge_hw_ring *ring = | ||
1377 | (struct __vxge_hw_ring *)mempoolh->userdata; | ||
1378 | |||
1379 | /* format rxds array */ | ||
1380 | for (i = 0; i < ring->rxds_per_block; i++) { | ||
1381 | void *rxdblock_priv; | ||
1382 | void *uld_priv; | ||
1383 | struct vxge_hw_ring_rxd_1 *rxdp; | ||
1384 | |||
1385 | u32 reserve_index = ring->channel.reserve_ptr - | ||
1386 | (index * ring->rxds_per_block + i + 1); | ||
1387 | u32 memblock_item_idx; | ||
1388 | |||
1389 | ring->channel.reserve_arr[reserve_index] = ((u8 *)item) + | ||
1390 | i * ring->rxd_size; | ||
1391 | |||
1392 | /* Note: memblock_item_idx is index of the item within | ||
1393 | * the memblock. For instance, in case of three RxD-blocks | ||
1394 | * per memblock this value can be 0, 1 or 2. */ | ||
1395 | rxdblock_priv = __vxge_hw_mempool_item_priv(mempoolh, | ||
1396 | memblock_index, item, | ||
1397 | &memblock_item_idx); | ||
1398 | |||
1399 | rxdp = (struct vxge_hw_ring_rxd_1 *) | ||
1400 | ring->channel.reserve_arr[reserve_index]; | ||
1401 | |||
1402 | uld_priv = ((u8 *)rxdblock_priv + ring->rxd_priv_size * i); | ||
1403 | |||
1404 | /* pre-format Host_Control */ | ||
1405 | rxdp->host_control = (u64)(size_t)uld_priv; | ||
1406 | } | ||
1407 | |||
1408 | __vxge_hw_ring_block_memblock_idx_set(item, memblock_index); | ||
1409 | |||
1410 | if (is_last) { | ||
1411 | /* link last one with first one */ | ||
1412 | __vxge_hw_ring_rxdblock_link(mempoolh, ring, index, 0); | ||
1413 | } | ||
1414 | |||
1415 | if (index > 0) { | ||
1416 | /* link this RxD block with previous one */ | ||
1417 | __vxge_hw_ring_rxdblock_link(mempoolh, ring, index - 1, index); | ||
1418 | } | ||
1419 | |||
1420 | return; | ||
1421 | } | ||
1422 | |||
1423 | /* | ||
1424 | * __vxge_hw_ring_initial_replenish - Initial replenish of RxDs | ||
1425 | * This function replenishes the RxDs from reserve array to work array | ||
1426 | */ | ||
1427 | enum vxge_hw_status | ||
1428 | vxge_hw_ring_replenish(struct __vxge_hw_ring *ring, u16 min_flag) | ||
1429 | { | ||
1430 | void *rxd; | ||
1431 | int i = 0; | ||
1432 | struct __vxge_hw_channel *channel; | ||
1433 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1434 | |||
1435 | channel = &ring->channel; | ||
1436 | |||
1437 | while (vxge_hw_channel_dtr_count(channel) > 0) { | ||
1438 | |||
1439 | status = vxge_hw_ring_rxd_reserve(ring, &rxd); | ||
1440 | |||
1441 | vxge_assert(status == VXGE_HW_OK); | ||
1442 | |||
1443 | if (ring->rxd_init) { | ||
1444 | status = ring->rxd_init(rxd, channel->userdata); | ||
1445 | if (status != VXGE_HW_OK) { | ||
1446 | vxge_hw_ring_rxd_free(ring, rxd); | ||
1447 | goto exit; | ||
1448 | } | ||
1449 | } | ||
1450 | |||
1451 | vxge_hw_ring_rxd_post(ring, rxd); | ||
1452 | if (min_flag) { | ||
1453 | i++; | ||
1454 | if (i == VXGE_HW_RING_MIN_BUFF_ALLOCATION) | ||
1455 | break; | ||
1456 | } | ||
1457 | } | ||
1458 | status = VXGE_HW_OK; | ||
1459 | exit: | ||
1460 | return status; | ||
1461 | } | ||
1462 | |||
1463 | /* | ||
1464 | * __vxge_hw_ring_create - Create a Ring | ||
1465 | * This function creates Ring and initializes it. | ||
1466 | * | ||
1467 | */ | ||
1468 | enum vxge_hw_status | ||
1469 | __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp, | ||
1470 | struct vxge_hw_ring_attr *attr) | ||
1471 | { | ||
1472 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1473 | struct __vxge_hw_ring *ring; | ||
1474 | u32 ring_length; | ||
1475 | struct vxge_hw_ring_config *config; | ||
1476 | struct __vxge_hw_device *hldev; | ||
1477 | u32 vp_id; | ||
1478 | struct vxge_hw_mempool_cbs ring_mp_callback; | ||
1479 | |||
1480 | if ((vp == NULL) || (attr == NULL)) { | ||
1481 | status = VXGE_HW_FAIL; | ||
1482 | goto exit; | ||
1483 | } | ||
1484 | |||
1485 | hldev = vp->vpath->hldev; | ||
1486 | vp_id = vp->vpath->vp_id; | ||
1487 | |||
1488 | config = &hldev->config.vp_config[vp_id].ring; | ||
1489 | |||
1490 | ring_length = config->ring_blocks * | ||
1491 | vxge_hw_ring_rxds_per_block_get(config->buffer_mode); | ||
1492 | |||
1493 | ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp, | ||
1494 | VXGE_HW_CHANNEL_TYPE_RING, | ||
1495 | ring_length, | ||
1496 | attr->per_rxd_space, | ||
1497 | attr->userdata); | ||
1498 | |||
1499 | if (ring == NULL) { | ||
1500 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
1501 | goto exit; | ||
1502 | } | ||
1503 | |||
1504 | vp->vpath->ringh = ring; | ||
1505 | ring->vp_id = vp_id; | ||
1506 | ring->vp_reg = vp->vpath->vp_reg; | ||
1507 | ring->common_reg = hldev->common_reg; | ||
1508 | ring->stats = &vp->vpath->sw_stats->ring_stats; | ||
1509 | ring->config = config; | ||
1510 | ring->callback = attr->callback; | ||
1511 | ring->rxd_init = attr->rxd_init; | ||
1512 | ring->rxd_term = attr->rxd_term; | ||
1513 | ring->buffer_mode = config->buffer_mode; | ||
1514 | ring->rxds_limit = config->rxds_limit; | ||
1515 | |||
1516 | ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode); | ||
1517 | ring->rxd_priv_size = | ||
1518 | sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space; | ||
1519 | ring->per_rxd_space = attr->per_rxd_space; | ||
1520 | |||
1521 | ring->rxd_priv_size = | ||
1522 | ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) / | ||
1523 | VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE; | ||
1524 | |||
1525 | /* how many RxDs can fit into one block. Depends on configured | ||
1526 | * buffer_mode. */ | ||
1527 | ring->rxds_per_block = | ||
1528 | vxge_hw_ring_rxds_per_block_get(config->buffer_mode); | ||
1529 | |||
1530 | /* calculate actual RxD block private size */ | ||
1531 | ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block; | ||
1532 | ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc; | ||
1533 | ring->mempool = __vxge_hw_mempool_create(hldev, | ||
1534 | VXGE_HW_BLOCK_SIZE, | ||
1535 | VXGE_HW_BLOCK_SIZE, | ||
1536 | ring->rxdblock_priv_size, | ||
1537 | ring->config->ring_blocks, | ||
1538 | ring->config->ring_blocks, | ||
1539 | &ring_mp_callback, | ||
1540 | ring); | ||
1541 | |||
1542 | if (ring->mempool == NULL) { | ||
1543 | __vxge_hw_ring_delete(vp); | ||
1544 | return VXGE_HW_ERR_OUT_OF_MEMORY; | ||
1545 | } | ||
1546 | |||
1547 | status = __vxge_hw_channel_initialize(&ring->channel); | ||
1548 | if (status != VXGE_HW_OK) { | ||
1549 | __vxge_hw_ring_delete(vp); | ||
1550 | goto exit; | ||
1551 | } | ||
1552 | |||
1553 | /* Note: | ||
1554 | * Specifying rxd_init callback means two things: | ||
1555 | * 1) rxds need to be initialized by driver at channel-open time; | ||
1556 | * 2) rxds need to be posted at channel-open time | ||
1557 | * (that's what the initial_replenish() below does) | ||
1558 | * Currently we don't have a case when the 1) is done without the 2). | ||
1559 | */ | ||
1560 | if (ring->rxd_init) { | ||
1561 | status = vxge_hw_ring_replenish(ring, 1); | ||
1562 | if (status != VXGE_HW_OK) { | ||
1563 | __vxge_hw_ring_delete(vp); | ||
1564 | goto exit; | ||
1565 | } | ||
1566 | } | ||
1567 | |||
1568 | /* initial replenish will increment the counter in its post() routine, | ||
1569 | * we have to reset it */ | ||
1570 | ring->stats->common_stats.usage_cnt = 0; | ||
1571 | exit: | ||
1572 | return status; | ||
1573 | } | ||
1574 | |||
1575 | /* | ||
1576 | * __vxge_hw_ring_abort - Returns the RxD | ||
1577 | * This function terminates the RxDs of ring | ||
1578 | */ | ||
1579 | enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring) | ||
1580 | { | ||
1581 | void *rxdh; | ||
1582 | struct __vxge_hw_channel *channel; | ||
1583 | |||
1584 | channel = &ring->channel; | ||
1585 | |||
1586 | for (;;) { | ||
1587 | vxge_hw_channel_dtr_try_complete(channel, &rxdh); | ||
1588 | |||
1589 | if (rxdh == NULL) | ||
1590 | break; | ||
1591 | |||
1592 | vxge_hw_channel_dtr_complete(channel); | ||
1593 | |||
1594 | if (ring->rxd_term) | ||
1595 | ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED, | ||
1596 | channel->userdata); | ||
1597 | |||
1598 | vxge_hw_channel_dtr_free(channel, rxdh); | ||
1599 | } | ||
1600 | |||
1601 | return VXGE_HW_OK; | ||
1602 | } | ||
1603 | |||
1604 | /* | ||
1605 | * __vxge_hw_ring_reset - Resets the ring | ||
1606 | * This function resets the ring during vpath reset operation | ||
1607 | */ | ||
1608 | enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring) | ||
1609 | { | ||
1610 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1611 | struct __vxge_hw_channel *channel; | ||
1612 | |||
1613 | channel = &ring->channel; | ||
1614 | |||
1615 | __vxge_hw_ring_abort(ring); | ||
1616 | |||
1617 | status = __vxge_hw_channel_reset(channel); | ||
1618 | |||
1619 | if (status != VXGE_HW_OK) | ||
1620 | goto exit; | ||
1621 | |||
1622 | if (ring->rxd_init) { | ||
1623 | status = vxge_hw_ring_replenish(ring, 1); | ||
1624 | if (status != VXGE_HW_OK) | ||
1625 | goto exit; | ||
1626 | } | ||
1627 | exit: | ||
1628 | return status; | ||
1629 | } | ||
1630 | |||
1631 | /* | ||
1632 | * __vxge_hw_ring_delete - Removes the ring | ||
1633 | * This function freeup the memory pool and removes the ring | ||
1634 | */ | ||
1635 | enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp) | ||
1636 | { | ||
1637 | struct __vxge_hw_ring *ring = vp->vpath->ringh; | ||
1638 | |||
1639 | __vxge_hw_ring_abort(ring); | ||
1640 | |||
1641 | if (ring->mempool) | ||
1642 | __vxge_hw_mempool_destroy(ring->mempool); | ||
1643 | |||
1644 | vp->vpath->ringh = NULL; | ||
1645 | __vxge_hw_channel_free(&ring->channel); | ||
1646 | |||
1647 | return VXGE_HW_OK; | ||
1648 | } | ||
1649 | |||
1650 | /* | ||
1651 | * __vxge_hw_mempool_grow | ||
1652 | * Will resize mempool up to %num_allocate value. | ||
1653 | */ | ||
1654 | enum vxge_hw_status | ||
1655 | __vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate, | ||
1656 | u32 *num_allocated) | ||
1657 | { | ||
1658 | u32 i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0; | ||
1659 | u32 n_items = mempool->items_per_memblock; | ||
1660 | u32 start_block_idx = mempool->memblocks_allocated; | ||
1661 | u32 end_block_idx = mempool->memblocks_allocated + num_allocate; | ||
1662 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1663 | |||
1664 | *num_allocated = 0; | ||
1665 | |||
1666 | if (end_block_idx > mempool->memblocks_max) { | ||
1667 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
1668 | goto exit; | ||
1669 | } | ||
1670 | |||
1671 | for (i = start_block_idx; i < end_block_idx; i++) { | ||
1672 | u32 j; | ||
1673 | u32 is_last = ((end_block_idx - 1) == i); | ||
1674 | struct vxge_hw_mempool_dma *dma_object = | ||
1675 | mempool->memblocks_dma_arr + i; | ||
1676 | void *the_memblock; | ||
1677 | |||
1678 | /* allocate memblock's private part. Each DMA memblock | ||
1679 | * has a space allocated for item's private usage upon | ||
1680 | * mempool's user request. Each time mempool grows, it will | ||
1681 | * allocate new memblock and its private part at once. | ||
1682 | * This helps to minimize memory usage a lot. */ | ||
1683 | mempool->memblocks_priv_arr[i] = | ||
1684 | vmalloc(mempool->items_priv_size * n_items); | ||
1685 | if (mempool->memblocks_priv_arr[i] == NULL) { | ||
1686 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
1687 | goto exit; | ||
1688 | } | ||
1689 | |||
1690 | memset(mempool->memblocks_priv_arr[i], 0, | ||
1691 | mempool->items_priv_size * n_items); | ||
1692 | |||
1693 | /* allocate DMA-capable memblock */ | ||
1694 | mempool->memblocks_arr[i] = | ||
1695 | __vxge_hw_blockpool_malloc(mempool->devh, | ||
1696 | mempool->memblock_size, dma_object); | ||
1697 | if (mempool->memblocks_arr[i] == NULL) { | ||
1698 | vfree(mempool->memblocks_priv_arr[i]); | ||
1699 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
1700 | goto exit; | ||
1701 | } | ||
1702 | |||
1703 | (*num_allocated)++; | ||
1704 | mempool->memblocks_allocated++; | ||
1705 | |||
1706 | memset(mempool->memblocks_arr[i], 0, mempool->memblock_size); | ||
1707 | |||
1708 | the_memblock = mempool->memblocks_arr[i]; | ||
1709 | |||
1710 | /* fill the items hash array */ | ||
1711 | for (j = 0; j < n_items; j++) { | ||
1712 | u32 index = i * n_items + j; | ||
1713 | |||
1714 | if (first_time && index >= mempool->items_initial) | ||
1715 | break; | ||
1716 | |||
1717 | mempool->items_arr[index] = | ||
1718 | ((char *)the_memblock + j*mempool->item_size); | ||
1719 | |||
1720 | /* let caller to do more job on each item */ | ||
1721 | if (mempool->item_func_alloc != NULL) | ||
1722 | mempool->item_func_alloc(mempool, i, | ||
1723 | dma_object, index, is_last); | ||
1724 | |||
1725 | mempool->items_current = index + 1; | ||
1726 | } | ||
1727 | |||
1728 | if (first_time && mempool->items_current == | ||
1729 | mempool->items_initial) | ||
1730 | break; | ||
1731 | } | ||
1732 | exit: | ||
1733 | return status; | ||
1734 | } | ||
1735 | |||
1736 | /* | ||
1737 | * vxge_hw_mempool_create | ||
1738 | * This function will create memory pool object. Pool may grow but will | ||
1739 | * never shrink. Pool consists of number of dynamically allocated blocks | ||
1740 | * with size enough to hold %items_initial number of items. Memory is | ||
1741 | * DMA-able but client must map/unmap before interoperating with the device. | ||
1742 | */ | ||
1743 | struct vxge_hw_mempool* | ||
1744 | __vxge_hw_mempool_create( | ||
1745 | struct __vxge_hw_device *devh, | ||
1746 | u32 memblock_size, | ||
1747 | u32 item_size, | ||
1748 | u32 items_priv_size, | ||
1749 | u32 items_initial, | ||
1750 | u32 items_max, | ||
1751 | struct vxge_hw_mempool_cbs *mp_callback, | ||
1752 | void *userdata) | ||
1753 | { | ||
1754 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1755 | u32 memblocks_to_allocate; | ||
1756 | struct vxge_hw_mempool *mempool = NULL; | ||
1757 | u32 allocated; | ||
1758 | |||
1759 | if (memblock_size < item_size) { | ||
1760 | status = VXGE_HW_FAIL; | ||
1761 | goto exit; | ||
1762 | } | ||
1763 | |||
1764 | mempool = (struct vxge_hw_mempool *) | ||
1765 | vmalloc(sizeof(struct vxge_hw_mempool)); | ||
1766 | if (mempool == NULL) { | ||
1767 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
1768 | goto exit; | ||
1769 | } | ||
1770 | memset(mempool, 0, sizeof(struct vxge_hw_mempool)); | ||
1771 | |||
1772 | mempool->devh = devh; | ||
1773 | mempool->memblock_size = memblock_size; | ||
1774 | mempool->items_max = items_max; | ||
1775 | mempool->items_initial = items_initial; | ||
1776 | mempool->item_size = item_size; | ||
1777 | mempool->items_priv_size = items_priv_size; | ||
1778 | mempool->item_func_alloc = mp_callback->item_func_alloc; | ||
1779 | mempool->userdata = userdata; | ||
1780 | |||
1781 | mempool->memblocks_allocated = 0; | ||
1782 | |||
1783 | mempool->items_per_memblock = memblock_size / item_size; | ||
1784 | |||
1785 | mempool->memblocks_max = (items_max + mempool->items_per_memblock - 1) / | ||
1786 | mempool->items_per_memblock; | ||
1787 | |||
1788 | /* allocate array of memblocks */ | ||
1789 | mempool->memblocks_arr = | ||
1790 | (void **) vmalloc(sizeof(void *) * mempool->memblocks_max); | ||
1791 | if (mempool->memblocks_arr == NULL) { | ||
1792 | __vxge_hw_mempool_destroy(mempool); | ||
1793 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
1794 | mempool = NULL; | ||
1795 | goto exit; | ||
1796 | } | ||
1797 | memset(mempool->memblocks_arr, 0, | ||
1798 | sizeof(void *) * mempool->memblocks_max); | ||
1799 | |||
1800 | /* allocate array of private parts of items per memblocks */ | ||
1801 | mempool->memblocks_priv_arr = | ||
1802 | (void **) vmalloc(sizeof(void *) * mempool->memblocks_max); | ||
1803 | if (mempool->memblocks_priv_arr == NULL) { | ||
1804 | __vxge_hw_mempool_destroy(mempool); | ||
1805 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
1806 | mempool = NULL; | ||
1807 | goto exit; | ||
1808 | } | ||
1809 | memset(mempool->memblocks_priv_arr, 0, | ||
1810 | sizeof(void *) * mempool->memblocks_max); | ||
1811 | |||
1812 | /* allocate array of memblocks DMA objects */ | ||
1813 | mempool->memblocks_dma_arr = (struct vxge_hw_mempool_dma *) | ||
1814 | vmalloc(sizeof(struct vxge_hw_mempool_dma) * | ||
1815 | mempool->memblocks_max); | ||
1816 | |||
1817 | if (mempool->memblocks_dma_arr == NULL) { | ||
1818 | __vxge_hw_mempool_destroy(mempool); | ||
1819 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
1820 | mempool = NULL; | ||
1821 | goto exit; | ||
1822 | } | ||
1823 | memset(mempool->memblocks_dma_arr, 0, | ||
1824 | sizeof(struct vxge_hw_mempool_dma) * | ||
1825 | mempool->memblocks_max); | ||
1826 | |||
1827 | /* allocate hash array of items */ | ||
1828 | mempool->items_arr = | ||
1829 | (void **) vmalloc(sizeof(void *) * mempool->items_max); | ||
1830 | if (mempool->items_arr == NULL) { | ||
1831 | __vxge_hw_mempool_destroy(mempool); | ||
1832 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
1833 | mempool = NULL; | ||
1834 | goto exit; | ||
1835 | } | ||
1836 | memset(mempool->items_arr, 0, sizeof(void *) * mempool->items_max); | ||
1837 | |||
1838 | /* calculate initial number of memblocks */ | ||
1839 | memblocks_to_allocate = (mempool->items_initial + | ||
1840 | mempool->items_per_memblock - 1) / | ||
1841 | mempool->items_per_memblock; | ||
1842 | |||
1843 | /* pre-allocate the mempool */ | ||
1844 | status = __vxge_hw_mempool_grow(mempool, memblocks_to_allocate, | ||
1845 | &allocated); | ||
1846 | if (status != VXGE_HW_OK) { | ||
1847 | __vxge_hw_mempool_destroy(mempool); | ||
1848 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
1849 | mempool = NULL; | ||
1850 | goto exit; | ||
1851 | } | ||
1852 | |||
1853 | exit: | ||
1854 | return mempool; | ||
1855 | } | ||
1856 | |||
1857 | /* | ||
1858 | * vxge_hw_mempool_destroy | ||
1859 | */ | ||
1860 | void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool) | ||
1861 | { | ||
1862 | u32 i, j; | ||
1863 | struct __vxge_hw_device *devh = mempool->devh; | ||
1864 | |||
1865 | for (i = 0; i < mempool->memblocks_allocated; i++) { | ||
1866 | struct vxge_hw_mempool_dma *dma_object; | ||
1867 | |||
1868 | vxge_assert(mempool->memblocks_arr[i]); | ||
1869 | vxge_assert(mempool->memblocks_dma_arr + i); | ||
1870 | |||
1871 | dma_object = mempool->memblocks_dma_arr + i; | ||
1872 | |||
1873 | for (j = 0; j < mempool->items_per_memblock; j++) { | ||
1874 | u32 index = i * mempool->items_per_memblock + j; | ||
1875 | |||
1876 | /* to skip last partially filled(if any) memblock */ | ||
1877 | if (index >= mempool->items_current) | ||
1878 | break; | ||
1879 | } | ||
1880 | |||
1881 | vfree(mempool->memblocks_priv_arr[i]); | ||
1882 | |||
1883 | __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i], | ||
1884 | mempool->memblock_size, dma_object); | ||
1885 | } | ||
1886 | |||
1887 | if (mempool->items_arr) | ||
1888 | vfree(mempool->items_arr); | ||
1889 | |||
1890 | if (mempool->memblocks_dma_arr) | ||
1891 | vfree(mempool->memblocks_dma_arr); | ||
1892 | |||
1893 | if (mempool->memblocks_priv_arr) | ||
1894 | vfree(mempool->memblocks_priv_arr); | ||
1895 | |||
1896 | if (mempool->memblocks_arr) | ||
1897 | vfree(mempool->memblocks_arr); | ||
1898 | |||
1899 | vfree(mempool); | ||
1900 | } | ||
1901 | |||
1902 | /* | ||
1903 | * __vxge_hw_device_fifo_config_check - Check fifo configuration. | ||
1904 | * Check the fifo configuration | ||
1905 | */ | ||
1906 | enum vxge_hw_status | ||
1907 | __vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config) | ||
1908 | { | ||
1909 | if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) || | ||
1910 | (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS)) | ||
1911 | return VXGE_HW_BADCFG_FIFO_BLOCKS; | ||
1912 | |||
1913 | return VXGE_HW_OK; | ||
1914 | } | ||
1915 | |||
1916 | /* | ||
1917 | * __vxge_hw_device_vpath_config_check - Check vpath configuration. | ||
1918 | * Check the vpath configuration | ||
1919 | */ | ||
1920 | enum vxge_hw_status | ||
1921 | __vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config) | ||
1922 | { | ||
1923 | enum vxge_hw_status status; | ||
1924 | |||
1925 | if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) || | ||
1926 | (vp_config->min_bandwidth > | ||
1927 | VXGE_HW_VPATH_BANDWIDTH_MAX)) | ||
1928 | return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH; | ||
1929 | |||
1930 | status = __vxge_hw_device_fifo_config_check(&vp_config->fifo); | ||
1931 | if (status != VXGE_HW_OK) | ||
1932 | return status; | ||
1933 | |||
1934 | if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) && | ||
1935 | ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) || | ||
1936 | (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU))) | ||
1937 | return VXGE_HW_BADCFG_VPATH_MTU; | ||
1938 | |||
1939 | if ((vp_config->rpa_strip_vlan_tag != | ||
1940 | VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) && | ||
1941 | (vp_config->rpa_strip_vlan_tag != | ||
1942 | VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) && | ||
1943 | (vp_config->rpa_strip_vlan_tag != | ||
1944 | VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE)) | ||
1945 | return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG; | ||
1946 | |||
1947 | return VXGE_HW_OK; | ||
1948 | } | ||
1949 | |||
1950 | /* | ||
1951 | * __vxge_hw_device_config_check - Check device configuration. | ||
1952 | * Check the device configuration | ||
1953 | */ | ||
1954 | enum vxge_hw_status | ||
1955 | __vxge_hw_device_config_check(struct vxge_hw_device_config *new_config) | ||
1956 | { | ||
1957 | u32 i; | ||
1958 | enum vxge_hw_status status; | ||
1959 | |||
1960 | if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) && | ||
1961 | (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) && | ||
1962 | (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) && | ||
1963 | (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF)) | ||
1964 | return VXGE_HW_BADCFG_INTR_MODE; | ||
1965 | |||
1966 | if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) && | ||
1967 | (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE)) | ||
1968 | return VXGE_HW_BADCFG_RTS_MAC_EN; | ||
1969 | |||
1970 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | ||
1971 | status = __vxge_hw_device_vpath_config_check( | ||
1972 | &new_config->vp_config[i]); | ||
1973 | if (status != VXGE_HW_OK) | ||
1974 | return status; | ||
1975 | } | ||
1976 | |||
1977 | return VXGE_HW_OK; | ||
1978 | } | ||
1979 | |||
1980 | /* | ||
1981 | * vxge_hw_device_config_default_get - Initialize device config with defaults. | ||
1982 | * Initialize Titan device config with default values. | ||
1983 | */ | ||
1984 | enum vxge_hw_status __devinit | ||
1985 | vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config) | ||
1986 | { | ||
1987 | u32 i; | ||
1988 | |||
1989 | device_config->dma_blockpool_initial = | ||
1990 | VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE; | ||
1991 | device_config->dma_blockpool_max = VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE; | ||
1992 | device_config->intr_mode = VXGE_HW_INTR_MODE_DEF; | ||
1993 | device_config->rth_en = VXGE_HW_RTH_DEFAULT; | ||
1994 | device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_DEFAULT; | ||
1995 | device_config->device_poll_millis = VXGE_HW_DEF_DEVICE_POLL_MILLIS; | ||
1996 | device_config->rts_mac_en = VXGE_HW_RTS_MAC_DEFAULT; | ||
1997 | |||
1998 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | ||
1999 | |||
2000 | device_config->vp_config[i].vp_id = i; | ||
2001 | |||
2002 | device_config->vp_config[i].min_bandwidth = | ||
2003 | VXGE_HW_VPATH_BANDWIDTH_DEFAULT; | ||
2004 | |||
2005 | device_config->vp_config[i].ring.enable = VXGE_HW_RING_DEFAULT; | ||
2006 | |||
2007 | device_config->vp_config[i].ring.ring_blocks = | ||
2008 | VXGE_HW_DEF_RING_BLOCKS; | ||
2009 | |||
2010 | device_config->vp_config[i].ring.buffer_mode = | ||
2011 | VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT; | ||
2012 | |||
2013 | device_config->vp_config[i].ring.scatter_mode = | ||
2014 | VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT; | ||
2015 | |||
2016 | device_config->vp_config[i].ring.rxds_limit = | ||
2017 | VXGE_HW_DEF_RING_RXDS_LIMIT; | ||
2018 | |||
2019 | device_config->vp_config[i].fifo.enable = VXGE_HW_FIFO_ENABLE; | ||
2020 | |||
2021 | device_config->vp_config[i].fifo.fifo_blocks = | ||
2022 | VXGE_HW_MIN_FIFO_BLOCKS; | ||
2023 | |||
2024 | device_config->vp_config[i].fifo.max_frags = | ||
2025 | VXGE_HW_MAX_FIFO_FRAGS; | ||
2026 | |||
2027 | device_config->vp_config[i].fifo.memblock_size = | ||
2028 | VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE; | ||
2029 | |||
2030 | device_config->vp_config[i].fifo.alignment_size = | ||
2031 | VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE; | ||
2032 | |||
2033 | device_config->vp_config[i].fifo.intr = | ||
2034 | VXGE_HW_FIFO_QUEUE_INTR_DEFAULT; | ||
2035 | |||
2036 | device_config->vp_config[i].fifo.no_snoop_bits = | ||
2037 | VXGE_HW_FIFO_NO_SNOOP_DEFAULT; | ||
2038 | device_config->vp_config[i].tti.intr_enable = | ||
2039 | VXGE_HW_TIM_INTR_DEFAULT; | ||
2040 | |||
2041 | device_config->vp_config[i].tti.btimer_val = | ||
2042 | VXGE_HW_USE_FLASH_DEFAULT; | ||
2043 | |||
2044 | device_config->vp_config[i].tti.timer_ac_en = | ||
2045 | VXGE_HW_USE_FLASH_DEFAULT; | ||
2046 | |||
2047 | device_config->vp_config[i].tti.timer_ci_en = | ||
2048 | VXGE_HW_USE_FLASH_DEFAULT; | ||
2049 | |||
2050 | device_config->vp_config[i].tti.timer_ri_en = | ||
2051 | VXGE_HW_USE_FLASH_DEFAULT; | ||
2052 | |||
2053 | device_config->vp_config[i].tti.rtimer_val = | ||
2054 | VXGE_HW_USE_FLASH_DEFAULT; | ||
2055 | |||
2056 | device_config->vp_config[i].tti.util_sel = | ||
2057 | VXGE_HW_USE_FLASH_DEFAULT; | ||
2058 | |||
2059 | device_config->vp_config[i].tti.ltimer_val = | ||
2060 | VXGE_HW_USE_FLASH_DEFAULT; | ||
2061 | |||
2062 | device_config->vp_config[i].tti.urange_a = | ||
2063 | VXGE_HW_USE_FLASH_DEFAULT; | ||
2064 | |||
2065 | device_config->vp_config[i].tti.uec_a = | ||
2066 | VXGE_HW_USE_FLASH_DEFAULT; | ||
2067 | |||
2068 | device_config->vp_config[i].tti.urange_b = | ||
2069 | VXGE_HW_USE_FLASH_DEFAULT; | ||
2070 | |||
2071 | device_config->vp_config[i].tti.uec_b = | ||
2072 | VXGE_HW_USE_FLASH_DEFAULT; | ||
2073 | |||
2074 | device_config->vp_config[i].tti.urange_c = | ||
2075 | VXGE_HW_USE_FLASH_DEFAULT; | ||
2076 | |||
2077 | device_config->vp_config[i].tti.uec_c = | ||
2078 | VXGE_HW_USE_FLASH_DEFAULT; | ||
2079 | |||
2080 | device_config->vp_config[i].tti.uec_d = | ||
2081 | VXGE_HW_USE_FLASH_DEFAULT; | ||
2082 | |||
2083 | device_config->vp_config[i].rti.intr_enable = | ||
2084 | VXGE_HW_TIM_INTR_DEFAULT; | ||
2085 | |||
2086 | device_config->vp_config[i].rti.btimer_val = | ||
2087 | VXGE_HW_USE_FLASH_DEFAULT; | ||
2088 | |||
2089 | device_config->vp_config[i].rti.timer_ac_en = | ||
2090 | VXGE_HW_USE_FLASH_DEFAULT; | ||
2091 | |||
2092 | device_config->vp_config[i].rti.timer_ci_en = | ||
2093 | VXGE_HW_USE_FLASH_DEFAULT; | ||
2094 | |||
2095 | device_config->vp_config[i].rti.timer_ri_en = | ||
2096 | VXGE_HW_USE_FLASH_DEFAULT; | ||
2097 | |||
2098 | device_config->vp_config[i].rti.rtimer_val = | ||
2099 | VXGE_HW_USE_FLASH_DEFAULT; | ||
2100 | |||
2101 | device_config->vp_config[i].rti.util_sel = | ||
2102 | VXGE_HW_USE_FLASH_DEFAULT; | ||
2103 | |||
2104 | device_config->vp_config[i].rti.ltimer_val = | ||
2105 | VXGE_HW_USE_FLASH_DEFAULT; | ||
2106 | |||
2107 | device_config->vp_config[i].rti.urange_a = | ||
2108 | VXGE_HW_USE_FLASH_DEFAULT; | ||
2109 | |||
2110 | device_config->vp_config[i].rti.uec_a = | ||
2111 | VXGE_HW_USE_FLASH_DEFAULT; | ||
2112 | |||
2113 | device_config->vp_config[i].rti.urange_b = | ||
2114 | VXGE_HW_USE_FLASH_DEFAULT; | ||
2115 | |||
2116 | device_config->vp_config[i].rti.uec_b = | ||
2117 | VXGE_HW_USE_FLASH_DEFAULT; | ||
2118 | |||
2119 | device_config->vp_config[i].rti.urange_c = | ||
2120 | VXGE_HW_USE_FLASH_DEFAULT; | ||
2121 | |||
2122 | device_config->vp_config[i].rti.uec_c = | ||
2123 | VXGE_HW_USE_FLASH_DEFAULT; | ||
2124 | |||
2125 | device_config->vp_config[i].rti.uec_d = | ||
2126 | VXGE_HW_USE_FLASH_DEFAULT; | ||
2127 | |||
2128 | device_config->vp_config[i].mtu = | ||
2129 | VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU; | ||
2130 | |||
2131 | device_config->vp_config[i].rpa_strip_vlan_tag = | ||
2132 | VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT; | ||
2133 | } | ||
2134 | |||
2135 | return VXGE_HW_OK; | ||
2136 | } | ||
2137 | |||
2138 | /* | ||
2139 | * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion. | ||
2140 | * Set the swapper bits appropriately for the lagacy section. | ||
2141 | */ | ||
2142 | enum vxge_hw_status | ||
2143 | __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg) | ||
2144 | { | ||
2145 | u64 val64; | ||
2146 | enum vxge_hw_status status = VXGE_HW_OK; | ||
2147 | |||
2148 | val64 = readq(&legacy_reg->toc_swapper_fb); | ||
2149 | |||
2150 | wmb(); | ||
2151 | |||
2152 | switch (val64) { | ||
2153 | |||
2154 | case VXGE_HW_SWAPPER_INITIAL_VALUE: | ||
2155 | return status; | ||
2156 | |||
2157 | case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED: | ||
2158 | writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE, | ||
2159 | &legacy_reg->pifm_rd_swap_en); | ||
2160 | writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE, | ||
2161 | &legacy_reg->pifm_rd_flip_en); | ||
2162 | writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE, | ||
2163 | &legacy_reg->pifm_wr_swap_en); | ||
2164 | writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE, | ||
2165 | &legacy_reg->pifm_wr_flip_en); | ||
2166 | break; | ||
2167 | |||
2168 | case VXGE_HW_SWAPPER_BYTE_SWAPPED: | ||
2169 | writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE, | ||
2170 | &legacy_reg->pifm_rd_swap_en); | ||
2171 | writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE, | ||
2172 | &legacy_reg->pifm_wr_swap_en); | ||
2173 | break; | ||
2174 | |||
2175 | case VXGE_HW_SWAPPER_BIT_FLIPPED: | ||
2176 | writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE, | ||
2177 | &legacy_reg->pifm_rd_flip_en); | ||
2178 | writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE, | ||
2179 | &legacy_reg->pifm_wr_flip_en); | ||
2180 | break; | ||
2181 | } | ||
2182 | |||
2183 | wmb(); | ||
2184 | |||
2185 | val64 = readq(&legacy_reg->toc_swapper_fb); | ||
2186 | |||
2187 | if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE) | ||
2188 | status = VXGE_HW_ERR_SWAPPER_CTRL; | ||
2189 | |||
2190 | return status; | ||
2191 | } | ||
2192 | |||
2193 | /* | ||
2194 | * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath. | ||
2195 | * Set the swapper bits appropriately for the vpath. | ||
2196 | */ | ||
2197 | enum vxge_hw_status | ||
2198 | __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg) | ||
2199 | { | ||
2200 | #ifndef __BIG_ENDIAN | ||
2201 | u64 val64; | ||
2202 | |||
2203 | val64 = readq(&vpath_reg->vpath_general_cfg1); | ||
2204 | wmb(); | ||
2205 | val64 |= VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN; | ||
2206 | writeq(val64, &vpath_reg->vpath_general_cfg1); | ||
2207 | wmb(); | ||
2208 | #endif | ||
2209 | return VXGE_HW_OK; | ||
2210 | } | ||
2211 | |||
2212 | /* | ||
2213 | * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc. | ||
2214 | * Set the swapper bits appropriately for the vpath. | ||
2215 | */ | ||
2216 | enum vxge_hw_status | ||
2217 | __vxge_hw_kdfc_swapper_set( | ||
2218 | struct vxge_hw_legacy_reg __iomem *legacy_reg, | ||
2219 | struct vxge_hw_vpath_reg __iomem *vpath_reg) | ||
2220 | { | ||
2221 | u64 val64; | ||
2222 | |||
2223 | val64 = readq(&legacy_reg->pifm_wr_swap_en); | ||
2224 | |||
2225 | if (val64 == VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE) { | ||
2226 | val64 = readq(&vpath_reg->kdfcctl_cfg0); | ||
2227 | wmb(); | ||
2228 | |||
2229 | val64 |= VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0 | | ||
2230 | VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1 | | ||
2231 | VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2; | ||
2232 | |||
2233 | writeq(val64, &vpath_reg->kdfcctl_cfg0); | ||
2234 | wmb(); | ||
2235 | } | ||
2236 | |||
2237 | return VXGE_HW_OK; | ||
2238 | } | ||
2239 | |||
2240 | /* | ||
2241 | * vxge_hw_mgmt_device_config - Retrieve device configuration. | ||
2242 | * Get device configuration. Permits to retrieve at run-time configuration | ||
2243 | * values that were used to initialize and configure the device. | ||
2244 | */ | ||
2245 | enum vxge_hw_status | ||
2246 | vxge_hw_mgmt_device_config(struct __vxge_hw_device *hldev, | ||
2247 | struct vxge_hw_device_config *dev_config, int size) | ||
2248 | { | ||
2249 | |||
2250 | if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) | ||
2251 | return VXGE_HW_ERR_INVALID_DEVICE; | ||
2252 | |||
2253 | if (size != sizeof(struct vxge_hw_device_config)) | ||
2254 | return VXGE_HW_ERR_VERSION_CONFLICT; | ||
2255 | |||
2256 | memcpy(dev_config, &hldev->config, | ||
2257 | sizeof(struct vxge_hw_device_config)); | ||
2258 | |||
2259 | return VXGE_HW_OK; | ||
2260 | } | ||
2261 | |||
2262 | /* | ||
2263 | * vxge_hw_mgmt_reg_read - Read Titan register. | ||
2264 | */ | ||
2265 | enum vxge_hw_status | ||
2266 | vxge_hw_mgmt_reg_read(struct __vxge_hw_device *hldev, | ||
2267 | enum vxge_hw_mgmt_reg_type type, | ||
2268 | u32 index, u32 offset, u64 *value) | ||
2269 | { | ||
2270 | enum vxge_hw_status status = VXGE_HW_OK; | ||
2271 | |||
2272 | if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) { | ||
2273 | status = VXGE_HW_ERR_INVALID_DEVICE; | ||
2274 | goto exit; | ||
2275 | } | ||
2276 | |||
2277 | switch (type) { | ||
2278 | case vxge_hw_mgmt_reg_type_legacy: | ||
2279 | if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) { | ||
2280 | status = VXGE_HW_ERR_INVALID_OFFSET; | ||
2281 | break; | ||
2282 | } | ||
2283 | *value = readq((void __iomem *)hldev->legacy_reg + offset); | ||
2284 | break; | ||
2285 | case vxge_hw_mgmt_reg_type_toc: | ||
2286 | if (offset > sizeof(struct vxge_hw_toc_reg) - 8) { | ||
2287 | status = VXGE_HW_ERR_INVALID_OFFSET; | ||
2288 | break; | ||
2289 | } | ||
2290 | *value = readq((void __iomem *)hldev->toc_reg + offset); | ||
2291 | break; | ||
2292 | case vxge_hw_mgmt_reg_type_common: | ||
2293 | if (offset > sizeof(struct vxge_hw_common_reg) - 8) { | ||
2294 | status = VXGE_HW_ERR_INVALID_OFFSET; | ||
2295 | break; | ||
2296 | } | ||
2297 | *value = readq((void __iomem *)hldev->common_reg + offset); | ||
2298 | break; | ||
2299 | case vxge_hw_mgmt_reg_type_mrpcim: | ||
2300 | if (!(hldev->access_rights & | ||
2301 | VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) { | ||
2302 | status = VXGE_HW_ERR_PRIVILAGED_OPEARATION; | ||
2303 | break; | ||
2304 | } | ||
2305 | if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) { | ||
2306 | status = VXGE_HW_ERR_INVALID_OFFSET; | ||
2307 | break; | ||
2308 | } | ||
2309 | *value = readq((void __iomem *)hldev->mrpcim_reg + offset); | ||
2310 | break; | ||
2311 | case vxge_hw_mgmt_reg_type_srpcim: | ||
2312 | if (!(hldev->access_rights & | ||
2313 | VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) { | ||
2314 | status = VXGE_HW_ERR_PRIVILAGED_OPEARATION; | ||
2315 | break; | ||
2316 | } | ||
2317 | if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) { | ||
2318 | status = VXGE_HW_ERR_INVALID_INDEX; | ||
2319 | break; | ||
2320 | } | ||
2321 | if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) { | ||
2322 | status = VXGE_HW_ERR_INVALID_OFFSET; | ||
2323 | break; | ||
2324 | } | ||
2325 | *value = readq((void __iomem *)hldev->srpcim_reg[index] + | ||
2326 | offset); | ||
2327 | break; | ||
2328 | case vxge_hw_mgmt_reg_type_vpmgmt: | ||
2329 | if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) || | ||
2330 | (!(hldev->vpath_assignments & vxge_mBIT(index)))) { | ||
2331 | status = VXGE_HW_ERR_INVALID_INDEX; | ||
2332 | break; | ||
2333 | } | ||
2334 | if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) { | ||
2335 | status = VXGE_HW_ERR_INVALID_OFFSET; | ||
2336 | break; | ||
2337 | } | ||
2338 | *value = readq((void __iomem *)hldev->vpmgmt_reg[index] + | ||
2339 | offset); | ||
2340 | break; | ||
2341 | case vxge_hw_mgmt_reg_type_vpath: | ||
2342 | if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) || | ||
2343 | (!(hldev->vpath_assignments & vxge_mBIT(index)))) { | ||
2344 | status = VXGE_HW_ERR_INVALID_INDEX; | ||
2345 | break; | ||
2346 | } | ||
2347 | if (index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) { | ||
2348 | status = VXGE_HW_ERR_INVALID_INDEX; | ||
2349 | break; | ||
2350 | } | ||
2351 | if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) { | ||
2352 | status = VXGE_HW_ERR_INVALID_OFFSET; | ||
2353 | break; | ||
2354 | } | ||
2355 | *value = readq((void __iomem *)hldev->vpath_reg[index] + | ||
2356 | offset); | ||
2357 | break; | ||
2358 | default: | ||
2359 | status = VXGE_HW_ERR_INVALID_TYPE; | ||
2360 | break; | ||
2361 | } | ||
2362 | |||
2363 | exit: | ||
2364 | return status; | ||
2365 | } | ||
2366 | |||
2367 | /* | ||
2368 | * vxge_hw_mgmt_reg_Write - Write Titan register. | ||
2369 | */ | ||
2370 | enum vxge_hw_status | ||
2371 | vxge_hw_mgmt_reg_write(struct __vxge_hw_device *hldev, | ||
2372 | enum vxge_hw_mgmt_reg_type type, | ||
2373 | u32 index, u32 offset, u64 value) | ||
2374 | { | ||
2375 | enum vxge_hw_status status = VXGE_HW_OK; | ||
2376 | |||
2377 | if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) { | ||
2378 | status = VXGE_HW_ERR_INVALID_DEVICE; | ||
2379 | goto exit; | ||
2380 | } | ||
2381 | |||
2382 | switch (type) { | ||
2383 | case vxge_hw_mgmt_reg_type_legacy: | ||
2384 | if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) { | ||
2385 | status = VXGE_HW_ERR_INVALID_OFFSET; | ||
2386 | break; | ||
2387 | } | ||
2388 | writeq(value, (void __iomem *)hldev->legacy_reg + offset); | ||
2389 | break; | ||
2390 | case vxge_hw_mgmt_reg_type_toc: | ||
2391 | if (offset > sizeof(struct vxge_hw_toc_reg) - 8) { | ||
2392 | status = VXGE_HW_ERR_INVALID_OFFSET; | ||
2393 | break; | ||
2394 | } | ||
2395 | writeq(value, (void __iomem *)hldev->toc_reg + offset); | ||
2396 | break; | ||
2397 | case vxge_hw_mgmt_reg_type_common: | ||
2398 | if (offset > sizeof(struct vxge_hw_common_reg) - 8) { | ||
2399 | status = VXGE_HW_ERR_INVALID_OFFSET; | ||
2400 | break; | ||
2401 | } | ||
2402 | writeq(value, (void __iomem *)hldev->common_reg + offset); | ||
2403 | break; | ||
2404 | case vxge_hw_mgmt_reg_type_mrpcim: | ||
2405 | if (!(hldev->access_rights & | ||
2406 | VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) { | ||
2407 | status = VXGE_HW_ERR_PRIVILAGED_OPEARATION; | ||
2408 | break; | ||
2409 | } | ||
2410 | if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) { | ||
2411 | status = VXGE_HW_ERR_INVALID_OFFSET; | ||
2412 | break; | ||
2413 | } | ||
2414 | writeq(value, (void __iomem *)hldev->mrpcim_reg + offset); | ||
2415 | break; | ||
2416 | case vxge_hw_mgmt_reg_type_srpcim: | ||
2417 | if (!(hldev->access_rights & | ||
2418 | VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) { | ||
2419 | status = VXGE_HW_ERR_PRIVILAGED_OPEARATION; | ||
2420 | break; | ||
2421 | } | ||
2422 | if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) { | ||
2423 | status = VXGE_HW_ERR_INVALID_INDEX; | ||
2424 | break; | ||
2425 | } | ||
2426 | if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) { | ||
2427 | status = VXGE_HW_ERR_INVALID_OFFSET; | ||
2428 | break; | ||
2429 | } | ||
2430 | writeq(value, (void __iomem *)hldev->srpcim_reg[index] + | ||
2431 | offset); | ||
2432 | |||
2433 | break; | ||
2434 | case vxge_hw_mgmt_reg_type_vpmgmt: | ||
2435 | if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) || | ||
2436 | (!(hldev->vpath_assignments & vxge_mBIT(index)))) { | ||
2437 | status = VXGE_HW_ERR_INVALID_INDEX; | ||
2438 | break; | ||
2439 | } | ||
2440 | if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) { | ||
2441 | status = VXGE_HW_ERR_INVALID_OFFSET; | ||
2442 | break; | ||
2443 | } | ||
2444 | writeq(value, (void __iomem *)hldev->vpmgmt_reg[index] + | ||
2445 | offset); | ||
2446 | break; | ||
2447 | case vxge_hw_mgmt_reg_type_vpath: | ||
2448 | if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES-1) || | ||
2449 | (!(hldev->vpath_assignments & vxge_mBIT(index)))) { | ||
2450 | status = VXGE_HW_ERR_INVALID_INDEX; | ||
2451 | break; | ||
2452 | } | ||
2453 | if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) { | ||
2454 | status = VXGE_HW_ERR_INVALID_OFFSET; | ||
2455 | break; | ||
2456 | } | ||
2457 | writeq(value, (void __iomem *)hldev->vpath_reg[index] + | ||
2458 | offset); | ||
2459 | break; | ||
2460 | default: | ||
2461 | status = VXGE_HW_ERR_INVALID_TYPE; | ||
2462 | break; | ||
2463 | } | ||
2464 | exit: | ||
2465 | return status; | ||
2466 | } | ||
2467 | |||
2468 | /* | ||
2469 | * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD | ||
2470 | * list callback | ||
2471 | * This function is callback passed to __vxge_hw_mempool_create to create memory | ||
2472 | * pool for TxD list | ||
2473 | */ | ||
2474 | static void | ||
2475 | __vxge_hw_fifo_mempool_item_alloc( | ||
2476 | struct vxge_hw_mempool *mempoolh, | ||
2477 | u32 memblock_index, struct vxge_hw_mempool_dma *dma_object, | ||
2478 | u32 index, u32 is_last) | ||
2479 | { | ||
2480 | u32 memblock_item_idx; | ||
2481 | struct __vxge_hw_fifo_txdl_priv *txdl_priv; | ||
2482 | struct vxge_hw_fifo_txd *txdp = | ||
2483 | (struct vxge_hw_fifo_txd *)mempoolh->items_arr[index]; | ||
2484 | struct __vxge_hw_fifo *fifo = | ||
2485 | (struct __vxge_hw_fifo *)mempoolh->userdata; | ||
2486 | void *memblock = mempoolh->memblocks_arr[memblock_index]; | ||
2487 | |||
2488 | vxge_assert(txdp); | ||
2489 | |||
2490 | txdp->host_control = (u64) (size_t) | ||
2491 | __vxge_hw_mempool_item_priv(mempoolh, memblock_index, txdp, | ||
2492 | &memblock_item_idx); | ||
2493 | |||
2494 | txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdp); | ||
2495 | |||
2496 | vxge_assert(txdl_priv); | ||
2497 | |||
2498 | fifo->channel.reserve_arr[fifo->channel.reserve_ptr - 1 - index] = txdp; | ||
2499 | |||
2500 | /* pre-format HW's TxDL's private */ | ||
2501 | txdl_priv->dma_offset = (char *)txdp - (char *)memblock; | ||
2502 | txdl_priv->dma_addr = dma_object->addr + txdl_priv->dma_offset; | ||
2503 | txdl_priv->dma_handle = dma_object->handle; | ||
2504 | txdl_priv->memblock = memblock; | ||
2505 | txdl_priv->first_txdp = txdp; | ||
2506 | txdl_priv->next_txdl_priv = NULL; | ||
2507 | txdl_priv->alloc_frags = 0; | ||
2508 | |||
2509 | return; | ||
2510 | } | ||
2511 | |||
2512 | /* | ||
2513 | * __vxge_hw_fifo_create - Create a FIFO | ||
2514 | * This function creates FIFO and initializes it. | ||
2515 | */ | ||
2516 | enum vxge_hw_status | ||
2517 | __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp, | ||
2518 | struct vxge_hw_fifo_attr *attr) | ||
2519 | { | ||
2520 | enum vxge_hw_status status = VXGE_HW_OK; | ||
2521 | struct __vxge_hw_fifo *fifo; | ||
2522 | struct vxge_hw_fifo_config *config; | ||
2523 | u32 txdl_size, txdl_per_memblock; | ||
2524 | struct vxge_hw_mempool_cbs fifo_mp_callback; | ||
2525 | struct __vxge_hw_virtualpath *vpath; | ||
2526 | |||
2527 | if ((vp == NULL) || (attr == NULL)) { | ||
2528 | status = VXGE_HW_ERR_INVALID_HANDLE; | ||
2529 | goto exit; | ||
2530 | } | ||
2531 | vpath = vp->vpath; | ||
2532 | config = &vpath->hldev->config.vp_config[vpath->vp_id].fifo; | ||
2533 | |||
2534 | txdl_size = config->max_frags * sizeof(struct vxge_hw_fifo_txd); | ||
2535 | |||
2536 | txdl_per_memblock = config->memblock_size / txdl_size; | ||
2537 | |||
2538 | fifo = (struct __vxge_hw_fifo *)__vxge_hw_channel_allocate(vp, | ||
2539 | VXGE_HW_CHANNEL_TYPE_FIFO, | ||
2540 | config->fifo_blocks * txdl_per_memblock, | ||
2541 | attr->per_txdl_space, attr->userdata); | ||
2542 | |||
2543 | if (fifo == NULL) { | ||
2544 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
2545 | goto exit; | ||
2546 | } | ||
2547 | |||
2548 | vpath->fifoh = fifo; | ||
2549 | fifo->nofl_db = vpath->nofl_db; | ||
2550 | |||
2551 | fifo->vp_id = vpath->vp_id; | ||
2552 | fifo->vp_reg = vpath->vp_reg; | ||
2553 | fifo->stats = &vpath->sw_stats->fifo_stats; | ||
2554 | |||
2555 | fifo->config = config; | ||
2556 | |||
2557 | /* apply "interrupts per txdl" attribute */ | ||
2558 | fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ; | ||
2559 | |||
2560 | if (fifo->config->intr) | ||
2561 | fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST; | ||
2562 | |||
2563 | fifo->no_snoop_bits = config->no_snoop_bits; | ||
2564 | |||
2565 | /* | ||
2566 | * FIFO memory management strategy: | ||
2567 | * | ||
2568 | * TxDL split into three independent parts: | ||
2569 | * - set of TxD's | ||
2570 | * - TxD HW private part | ||
2571 | * - driver private part | ||
2572 | * | ||
2573 | * Adaptative memory allocation used. i.e. Memory allocated on | ||
2574 | * demand with the size which will fit into one memory block. | ||
2575 | * One memory block may contain more than one TxDL. | ||
2576 | * | ||
2577 | * During "reserve" operations more memory can be allocated on demand | ||
2578 | * for example due to FIFO full condition. | ||
2579 | * | ||
2580 | * Pool of memory memblocks never shrinks except in __vxge_hw_fifo_close | ||
2581 | * routine which will essentially stop the channel and free resources. | ||
2582 | */ | ||
2583 | |||
2584 | /* TxDL common private size == TxDL private + driver private */ | ||
2585 | fifo->priv_size = | ||
2586 | sizeof(struct __vxge_hw_fifo_txdl_priv) + attr->per_txdl_space; | ||
2587 | fifo->priv_size = ((fifo->priv_size + VXGE_CACHE_LINE_SIZE - 1) / | ||
2588 | VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE; | ||
2589 | |||
2590 | fifo->per_txdl_space = attr->per_txdl_space; | ||
2591 | |||
2592 | /* recompute txdl size to be cacheline aligned */ | ||
2593 | fifo->txdl_size = txdl_size; | ||
2594 | fifo->txdl_per_memblock = txdl_per_memblock; | ||
2595 | |||
2596 | fifo->txdl_term = attr->txdl_term; | ||
2597 | fifo->callback = attr->callback; | ||
2598 | |||
2599 | if (fifo->txdl_per_memblock == 0) { | ||
2600 | __vxge_hw_fifo_delete(vp); | ||
2601 | status = VXGE_HW_ERR_INVALID_BLOCK_SIZE; | ||
2602 | goto exit; | ||
2603 | } | ||
2604 | |||
2605 | fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc; | ||
2606 | |||
2607 | fifo->mempool = | ||
2608 | __vxge_hw_mempool_create(vpath->hldev, | ||
2609 | fifo->config->memblock_size, | ||
2610 | fifo->txdl_size, | ||
2611 | fifo->priv_size, | ||
2612 | (fifo->config->fifo_blocks * fifo->txdl_per_memblock), | ||
2613 | (fifo->config->fifo_blocks * fifo->txdl_per_memblock), | ||
2614 | &fifo_mp_callback, | ||
2615 | fifo); | ||
2616 | |||
2617 | if (fifo->mempool == NULL) { | ||
2618 | __vxge_hw_fifo_delete(vp); | ||
2619 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
2620 | goto exit; | ||
2621 | } | ||
2622 | |||
2623 | status = __vxge_hw_channel_initialize(&fifo->channel); | ||
2624 | if (status != VXGE_HW_OK) { | ||
2625 | __vxge_hw_fifo_delete(vp); | ||
2626 | goto exit; | ||
2627 | } | ||
2628 | |||
2629 | vxge_assert(fifo->channel.reserve_ptr); | ||
2630 | exit: | ||
2631 | return status; | ||
2632 | } | ||
2633 | |||
2634 | /* | ||
2635 | * __vxge_hw_fifo_abort - Returns the TxD | ||
2636 | * This function terminates the TxDs of fifo | ||
2637 | */ | ||
2638 | enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo) | ||
2639 | { | ||
2640 | void *txdlh; | ||
2641 | |||
2642 | for (;;) { | ||
2643 | vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh); | ||
2644 | |||
2645 | if (txdlh == NULL) | ||
2646 | break; | ||
2647 | |||
2648 | vxge_hw_channel_dtr_complete(&fifo->channel); | ||
2649 | |||
2650 | if (fifo->txdl_term) { | ||
2651 | fifo->txdl_term(txdlh, | ||
2652 | VXGE_HW_TXDL_STATE_POSTED, | ||
2653 | fifo->channel.userdata); | ||
2654 | } | ||
2655 | |||
2656 | vxge_hw_channel_dtr_free(&fifo->channel, txdlh); | ||
2657 | } | ||
2658 | |||
2659 | return VXGE_HW_OK; | ||
2660 | } | ||
2661 | |||
2662 | /* | ||
2663 | * __vxge_hw_fifo_reset - Resets the fifo | ||
2664 | * This function resets the fifo during vpath reset operation | ||
2665 | */ | ||
2666 | enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo) | ||
2667 | { | ||
2668 | enum vxge_hw_status status = VXGE_HW_OK; | ||
2669 | |||
2670 | __vxge_hw_fifo_abort(fifo); | ||
2671 | status = __vxge_hw_channel_reset(&fifo->channel); | ||
2672 | |||
2673 | return status; | ||
2674 | } | ||
2675 | |||
2676 | /* | ||
2677 | * __vxge_hw_fifo_delete - Removes the FIFO | ||
2678 | * This function freeup the memory pool and removes the FIFO | ||
2679 | */ | ||
2680 | enum vxge_hw_status __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp) | ||
2681 | { | ||
2682 | struct __vxge_hw_fifo *fifo = vp->vpath->fifoh; | ||
2683 | |||
2684 | __vxge_hw_fifo_abort(fifo); | ||
2685 | |||
2686 | if (fifo->mempool) | ||
2687 | __vxge_hw_mempool_destroy(fifo->mempool); | ||
2688 | |||
2689 | vp->vpath->fifoh = NULL; | ||
2690 | |||
2691 | __vxge_hw_channel_free(&fifo->channel); | ||
2692 | |||
2693 | return VXGE_HW_OK; | ||
2694 | } | ||
2695 | |||
2696 | /* | ||
2697 | * __vxge_hw_vpath_pci_read - Read the content of given address | ||
2698 | * in pci config space. | ||
2699 | * Read from the vpath pci config space. | ||
2700 | */ | ||
2701 | enum vxge_hw_status | ||
2702 | __vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath, | ||
2703 | u32 phy_func_0, u32 offset, u32 *val) | ||
2704 | { | ||
2705 | u64 val64; | ||
2706 | enum vxge_hw_status status = VXGE_HW_OK; | ||
2707 | struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg; | ||
2708 | |||
2709 | val64 = VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset); | ||
2710 | |||
2711 | if (phy_func_0) | ||
2712 | val64 |= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0; | ||
2713 | |||
2714 | writeq(val64, &vp_reg->pci_config_access_cfg1); | ||
2715 | wmb(); | ||
2716 | writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ, | ||
2717 | &vp_reg->pci_config_access_cfg2); | ||
2718 | wmb(); | ||
2719 | |||
2720 | status = __vxge_hw_device_register_poll( | ||
2721 | &vp_reg->pci_config_access_cfg2, | ||
2722 | VXGE_HW_INTR_MASK_ALL, VXGE_HW_DEF_DEVICE_POLL_MILLIS); | ||
2723 | |||
2724 | if (status != VXGE_HW_OK) | ||
2725 | goto exit; | ||
2726 | |||
2727 | val64 = readq(&vp_reg->pci_config_access_status); | ||
2728 | |||
2729 | if (val64 & VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR) { | ||
2730 | status = VXGE_HW_FAIL; | ||
2731 | *val = 0; | ||
2732 | } else | ||
2733 | *val = (u32)vxge_bVALn(val64, 32, 32); | ||
2734 | exit: | ||
2735 | return status; | ||
2736 | } | ||
2737 | |||
2738 | /* | ||
2739 | * __vxge_hw_vpath_func_id_get - Get the function id of the vpath. | ||
2740 | * Returns the function number of the vpath. | ||
2741 | */ | ||
2742 | u32 | ||
2743 | __vxge_hw_vpath_func_id_get(u32 vp_id, | ||
2744 | struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg) | ||
2745 | { | ||
2746 | u64 val64; | ||
2747 | |||
2748 | val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1); | ||
2749 | |||
2750 | return | ||
2751 | (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64); | ||
2752 | } | ||
2753 | |||
2754 | /* | ||
2755 | * __vxge_hw_read_rts_ds - Program RTS steering critieria | ||
2756 | */ | ||
2757 | static inline void | ||
2758 | __vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem *vpath_reg, | ||
2759 | u64 dta_struct_sel) | ||
2760 | { | ||
2761 | writeq(0, &vpath_reg->rts_access_steer_ctrl); | ||
2762 | wmb(); | ||
2763 | writeq(dta_struct_sel, &vpath_reg->rts_access_steer_data0); | ||
2764 | writeq(0, &vpath_reg->rts_access_steer_data1); | ||
2765 | wmb(); | ||
2766 | return; | ||
2767 | } | ||
2768 | |||
2769 | |||
2770 | /* | ||
2771 | * __vxge_hw_vpath_card_info_get - Get the serial numbers, | ||
2772 | * part number and product description. | ||
2773 | */ | ||
2774 | enum vxge_hw_status | ||
2775 | __vxge_hw_vpath_card_info_get( | ||
2776 | u32 vp_id, | ||
2777 | struct vxge_hw_vpath_reg __iomem *vpath_reg, | ||
2778 | struct vxge_hw_device_hw_info *hw_info) | ||
2779 | { | ||
2780 | u32 i, j; | ||
2781 | u64 val64; | ||
2782 | u64 data1 = 0ULL; | ||
2783 | u64 data2 = 0ULL; | ||
2784 | enum vxge_hw_status status = VXGE_HW_OK; | ||
2785 | u8 *serial_number = hw_info->serial_number; | ||
2786 | u8 *part_number = hw_info->part_number; | ||
2787 | u8 *product_desc = hw_info->product_desc; | ||
2788 | |||
2789 | __vxge_hw_read_rts_ds(vpath_reg, | ||
2790 | VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER); | ||
2791 | |||
2792 | val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION( | ||
2793 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) | | ||
2794 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL( | ||
2795 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) | | ||
2796 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | | ||
2797 | VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0); | ||
2798 | |||
2799 | status = __vxge_hw_pio_mem_write64(val64, | ||
2800 | &vpath_reg->rts_access_steer_ctrl, | ||
2801 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, | ||
2802 | VXGE_HW_DEF_DEVICE_POLL_MILLIS); | ||
2803 | |||
2804 | if (status != VXGE_HW_OK) | ||
2805 | return status; | ||
2806 | |||
2807 | val64 = readq(&vpath_reg->rts_access_steer_ctrl); | ||
2808 | |||
2809 | if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { | ||
2810 | data1 = readq(&vpath_reg->rts_access_steer_data0); | ||
2811 | ((u64 *)serial_number)[0] = be64_to_cpu(data1); | ||
2812 | |||
2813 | data2 = readq(&vpath_reg->rts_access_steer_data1); | ||
2814 | ((u64 *)serial_number)[1] = be64_to_cpu(data2); | ||
2815 | status = VXGE_HW_OK; | ||
2816 | } else | ||
2817 | *serial_number = 0; | ||
2818 | |||
2819 | __vxge_hw_read_rts_ds(vpath_reg, | ||
2820 | VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER); | ||
2821 | |||
2822 | val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION( | ||
2823 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) | | ||
2824 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL( | ||
2825 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) | | ||
2826 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | | ||
2827 | VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0); | ||
2828 | |||
2829 | status = __vxge_hw_pio_mem_write64(val64, | ||
2830 | &vpath_reg->rts_access_steer_ctrl, | ||
2831 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, | ||
2832 | VXGE_HW_DEF_DEVICE_POLL_MILLIS); | ||
2833 | |||
2834 | if (status != VXGE_HW_OK) | ||
2835 | return status; | ||
2836 | |||
2837 | val64 = readq(&vpath_reg->rts_access_steer_ctrl); | ||
2838 | |||
2839 | if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { | ||
2840 | |||
2841 | data1 = readq(&vpath_reg->rts_access_steer_data0); | ||
2842 | ((u64 *)part_number)[0] = be64_to_cpu(data1); | ||
2843 | |||
2844 | data2 = readq(&vpath_reg->rts_access_steer_data1); | ||
2845 | ((u64 *)part_number)[1] = be64_to_cpu(data2); | ||
2846 | |||
2847 | status = VXGE_HW_OK; | ||
2848 | |||
2849 | } else | ||
2850 | *part_number = 0; | ||
2851 | |||
2852 | j = 0; | ||
2853 | |||
2854 | for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0; | ||
2855 | i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) { | ||
2856 | |||
2857 | __vxge_hw_read_rts_ds(vpath_reg, i); | ||
2858 | |||
2859 | val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION( | ||
2860 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) | | ||
2861 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL( | ||
2862 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) | | ||
2863 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | | ||
2864 | VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0); | ||
2865 | |||
2866 | status = __vxge_hw_pio_mem_write64(val64, | ||
2867 | &vpath_reg->rts_access_steer_ctrl, | ||
2868 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, | ||
2869 | VXGE_HW_DEF_DEVICE_POLL_MILLIS); | ||
2870 | |||
2871 | if (status != VXGE_HW_OK) | ||
2872 | return status; | ||
2873 | |||
2874 | val64 = readq(&vpath_reg->rts_access_steer_ctrl); | ||
2875 | |||
2876 | if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { | ||
2877 | |||
2878 | data1 = readq(&vpath_reg->rts_access_steer_data0); | ||
2879 | ((u64 *)product_desc)[j++] = be64_to_cpu(data1); | ||
2880 | |||
2881 | data2 = readq(&vpath_reg->rts_access_steer_data1); | ||
2882 | ((u64 *)product_desc)[j++] = be64_to_cpu(data2); | ||
2883 | |||
2884 | status = VXGE_HW_OK; | ||
2885 | } else | ||
2886 | *product_desc = 0; | ||
2887 | } | ||
2888 | |||
2889 | return status; | ||
2890 | } | ||
2891 | |||
2892 | /* | ||
2893 | * __vxge_hw_vpath_fw_ver_get - Get the fw version | ||
2894 | * Returns FW Version | ||
2895 | */ | ||
2896 | enum vxge_hw_status | ||
2897 | __vxge_hw_vpath_fw_ver_get( | ||
2898 | u32 vp_id, | ||
2899 | struct vxge_hw_vpath_reg __iomem *vpath_reg, | ||
2900 | struct vxge_hw_device_hw_info *hw_info) | ||
2901 | { | ||
2902 | u64 val64; | ||
2903 | u64 data1 = 0ULL; | ||
2904 | u64 data2 = 0ULL; | ||
2905 | struct vxge_hw_device_version *fw_version = &hw_info->fw_version; | ||
2906 | struct vxge_hw_device_date *fw_date = &hw_info->fw_date; | ||
2907 | struct vxge_hw_device_version *flash_version = &hw_info->flash_version; | ||
2908 | struct vxge_hw_device_date *flash_date = &hw_info->flash_date; | ||
2909 | enum vxge_hw_status status = VXGE_HW_OK; | ||
2910 | |||
2911 | val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION( | ||
2912 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY) | | ||
2913 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL( | ||
2914 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) | | ||
2915 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | | ||
2916 | VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0); | ||
2917 | |||
2918 | status = __vxge_hw_pio_mem_write64(val64, | ||
2919 | &vpath_reg->rts_access_steer_ctrl, | ||
2920 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, | ||
2921 | VXGE_HW_DEF_DEVICE_POLL_MILLIS); | ||
2922 | |||
2923 | if (status != VXGE_HW_OK) | ||
2924 | goto exit; | ||
2925 | |||
2926 | val64 = readq(&vpath_reg->rts_access_steer_ctrl); | ||
2927 | |||
2928 | if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { | ||
2929 | |||
2930 | data1 = readq(&vpath_reg->rts_access_steer_data0); | ||
2931 | data2 = readq(&vpath_reg->rts_access_steer_data1); | ||
2932 | |||
2933 | fw_date->day = | ||
2934 | (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY( | ||
2935 | data1); | ||
2936 | fw_date->month = | ||
2937 | (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH( | ||
2938 | data1); | ||
2939 | fw_date->year = | ||
2940 | (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR( | ||
2941 | data1); | ||
2942 | |||
2943 | snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d", | ||
2944 | fw_date->month, fw_date->day, fw_date->year); | ||
2945 | |||
2946 | fw_version->major = | ||
2947 | (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data1); | ||
2948 | fw_version->minor = | ||
2949 | (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data1); | ||
2950 | fw_version->build = | ||
2951 | (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data1); | ||
2952 | |||
2953 | snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d", | ||
2954 | fw_version->major, fw_version->minor, fw_version->build); | ||
2955 | |||
2956 | flash_date->day = | ||
2957 | (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data2); | ||
2958 | flash_date->month = | ||
2959 | (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data2); | ||
2960 | flash_date->year = | ||
2961 | (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data2); | ||
2962 | |||
2963 | snprintf(flash_date->date, VXGE_HW_FW_STRLEN, | ||
2964 | "%2.2d/%2.2d/%4.4d", | ||
2965 | flash_date->month, flash_date->day, flash_date->year); | ||
2966 | |||
2967 | flash_version->major = | ||
2968 | (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data2); | ||
2969 | flash_version->minor = | ||
2970 | (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data2); | ||
2971 | flash_version->build = | ||
2972 | (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data2); | ||
2973 | |||
2974 | snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d", | ||
2975 | flash_version->major, flash_version->minor, | ||
2976 | flash_version->build); | ||
2977 | |||
2978 | status = VXGE_HW_OK; | ||
2979 | |||
2980 | } else | ||
2981 | status = VXGE_HW_FAIL; | ||
2982 | exit: | ||
2983 | return status; | ||
2984 | } | ||
2985 | |||
2986 | /* | ||
2987 | * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode | ||
2988 | * Returns pci function mode | ||
2989 | */ | ||
2990 | u64 | ||
2991 | __vxge_hw_vpath_pci_func_mode_get( | ||
2992 | u32 vp_id, | ||
2993 | struct vxge_hw_vpath_reg __iomem *vpath_reg) | ||
2994 | { | ||
2995 | u64 val64; | ||
2996 | u64 data1 = 0ULL; | ||
2997 | enum vxge_hw_status status = VXGE_HW_OK; | ||
2998 | |||
2999 | __vxge_hw_read_rts_ds(vpath_reg, | ||
3000 | VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PCI_MODE); | ||
3001 | |||
3002 | val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION( | ||
3003 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) | | ||
3004 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL( | ||
3005 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) | | ||
3006 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | | ||
3007 | VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0); | ||
3008 | |||
3009 | status = __vxge_hw_pio_mem_write64(val64, | ||
3010 | &vpath_reg->rts_access_steer_ctrl, | ||
3011 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, | ||
3012 | VXGE_HW_DEF_DEVICE_POLL_MILLIS); | ||
3013 | |||
3014 | if (status != VXGE_HW_OK) | ||
3015 | goto exit; | ||
3016 | |||
3017 | val64 = readq(&vpath_reg->rts_access_steer_ctrl); | ||
3018 | |||
3019 | if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { | ||
3020 | data1 = readq(&vpath_reg->rts_access_steer_data0); | ||
3021 | status = VXGE_HW_OK; | ||
3022 | } else { | ||
3023 | data1 = 0; | ||
3024 | status = VXGE_HW_FAIL; | ||
3025 | } | ||
3026 | exit: | ||
3027 | return data1; | ||
3028 | } | ||
3029 | |||
3030 | /** | ||
3031 | * vxge_hw_device_flick_link_led - Flick (blink) link LED. | ||
3032 | * @hldev: HW device. | ||
3033 | * @on_off: TRUE if flickering to be on, FALSE to be off | ||
3034 | * | ||
3035 | * Flicker the link LED. | ||
3036 | */ | ||
3037 | enum vxge_hw_status | ||
3038 | vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev, | ||
3039 | u64 on_off) | ||
3040 | { | ||
3041 | u64 val64; | ||
3042 | enum vxge_hw_status status = VXGE_HW_OK; | ||
3043 | struct vxge_hw_vpath_reg __iomem *vp_reg; | ||
3044 | |||
3045 | if (hldev == NULL) { | ||
3046 | status = VXGE_HW_ERR_INVALID_DEVICE; | ||
3047 | goto exit; | ||
3048 | } | ||
3049 | |||
3050 | vp_reg = hldev->vpath_reg[hldev->first_vp_id]; | ||
3051 | |||
3052 | writeq(0, &vp_reg->rts_access_steer_ctrl); | ||
3053 | wmb(); | ||
3054 | writeq(on_off, &vp_reg->rts_access_steer_data0); | ||
3055 | writeq(0, &vp_reg->rts_access_steer_data1); | ||
3056 | wmb(); | ||
3057 | |||
3058 | val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION( | ||
3059 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL) | | ||
3060 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL( | ||
3061 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) | | ||
3062 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | | ||
3063 | VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0); | ||
3064 | |||
3065 | status = __vxge_hw_pio_mem_write64(val64, | ||
3066 | &vp_reg->rts_access_steer_ctrl, | ||
3067 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, | ||
3068 | VXGE_HW_DEF_DEVICE_POLL_MILLIS); | ||
3069 | exit: | ||
3070 | return status; | ||
3071 | } | ||
3072 | |||
3073 | /* | ||
3074 | * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables | ||
3075 | */ | ||
3076 | enum vxge_hw_status | ||
3077 | __vxge_hw_vpath_rts_table_get( | ||
3078 | struct __vxge_hw_vpath_handle *vp, | ||
3079 | u32 action, u32 rts_table, u32 offset, u64 *data1, u64 *data2) | ||
3080 | { | ||
3081 | u64 val64; | ||
3082 | struct __vxge_hw_virtualpath *vpath; | ||
3083 | struct vxge_hw_vpath_reg __iomem *vp_reg; | ||
3084 | |||
3085 | enum vxge_hw_status status = VXGE_HW_OK; | ||
3086 | |||
3087 | if (vp == NULL) { | ||
3088 | status = VXGE_HW_ERR_INVALID_HANDLE; | ||
3089 | goto exit; | ||
3090 | } | ||
3091 | |||
3092 | vpath = vp->vpath; | ||
3093 | vp_reg = vpath->vp_reg; | ||
3094 | |||
3095 | val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) | | ||
3096 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) | | ||
3097 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | | ||
3098 | VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset); | ||
3099 | |||
3100 | if ((rts_table == | ||
3101 | VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) || | ||
3102 | (rts_table == | ||
3103 | VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) || | ||
3104 | (rts_table == | ||
3105 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) || | ||
3106 | (rts_table == | ||
3107 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) { | ||
3108 | val64 = val64 | VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL; | ||
3109 | } | ||
3110 | |||
3111 | status = __vxge_hw_pio_mem_write64(val64, | ||
3112 | &vp_reg->rts_access_steer_ctrl, | ||
3113 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, | ||
3114 | vpath->hldev->config.device_poll_millis); | ||
3115 | |||
3116 | if (status != VXGE_HW_OK) | ||
3117 | goto exit; | ||
3118 | |||
3119 | val64 = readq(&vp_reg->rts_access_steer_ctrl); | ||
3120 | |||
3121 | if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { | ||
3122 | |||
3123 | *data1 = readq(&vp_reg->rts_access_steer_data0); | ||
3124 | |||
3125 | if ((rts_table == | ||
3126 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) || | ||
3127 | (rts_table == | ||
3128 | VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) { | ||
3129 | *data2 = readq(&vp_reg->rts_access_steer_data1); | ||
3130 | } | ||
3131 | status = VXGE_HW_OK; | ||
3132 | } else | ||
3133 | status = VXGE_HW_FAIL; | ||
3134 | exit: | ||
3135 | return status; | ||
3136 | } | ||
3137 | |||
3138 | /* | ||
3139 | * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables | ||
3140 | */ | ||
3141 | enum vxge_hw_status | ||
3142 | __vxge_hw_vpath_rts_table_set( | ||
3143 | struct __vxge_hw_vpath_handle *vp, u32 action, u32 rts_table, | ||
3144 | u32 offset, u64 data1, u64 data2) | ||
3145 | { | ||
3146 | u64 val64; | ||
3147 | struct __vxge_hw_virtualpath *vpath; | ||
3148 | enum vxge_hw_status status = VXGE_HW_OK; | ||
3149 | struct vxge_hw_vpath_reg __iomem *vp_reg; | ||
3150 | |||
3151 | if (vp == NULL) { | ||
3152 | status = VXGE_HW_ERR_INVALID_HANDLE; | ||
3153 | goto exit; | ||
3154 | } | ||
3155 | |||
3156 | vpath = vp->vpath; | ||
3157 | vp_reg = vpath->vp_reg; | ||
3158 | |||
3159 | writeq(data1, &vp_reg->rts_access_steer_data0); | ||
3160 | wmb(); | ||
3161 | |||
3162 | if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) || | ||
3163 | (rts_table == | ||
3164 | VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) { | ||
3165 | writeq(data2, &vp_reg->rts_access_steer_data1); | ||
3166 | wmb(); | ||
3167 | } | ||
3168 | |||
3169 | val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) | | ||
3170 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) | | ||
3171 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | | ||
3172 | VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset); | ||
3173 | |||
3174 | status = __vxge_hw_pio_mem_write64(val64, | ||
3175 | &vp_reg->rts_access_steer_ctrl, | ||
3176 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, | ||
3177 | vpath->hldev->config.device_poll_millis); | ||
3178 | |||
3179 | if (status != VXGE_HW_OK) | ||
3180 | goto exit; | ||
3181 | |||
3182 | val64 = readq(&vp_reg->rts_access_steer_ctrl); | ||
3183 | |||
3184 | if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) | ||
3185 | status = VXGE_HW_OK; | ||
3186 | else | ||
3187 | status = VXGE_HW_FAIL; | ||
3188 | exit: | ||
3189 | return status; | ||
3190 | } | ||
3191 | |||
3192 | /* | ||
3193 | * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath | ||
3194 | * from MAC address table. | ||
3195 | */ | ||
3196 | enum vxge_hw_status | ||
3197 | __vxge_hw_vpath_addr_get( | ||
3198 | u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg, | ||
3199 | u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN]) | ||
3200 | { | ||
3201 | u32 i; | ||
3202 | u64 val64; | ||
3203 | u64 data1 = 0ULL; | ||
3204 | u64 data2 = 0ULL; | ||
3205 | enum vxge_hw_status status = VXGE_HW_OK; | ||
3206 | |||
3207 | val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION( | ||
3208 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY) | | ||
3209 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL( | ||
3210 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) | | ||
3211 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | | ||
3212 | VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0); | ||
3213 | |||
3214 | status = __vxge_hw_pio_mem_write64(val64, | ||
3215 | &vpath_reg->rts_access_steer_ctrl, | ||
3216 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, | ||
3217 | VXGE_HW_DEF_DEVICE_POLL_MILLIS); | ||
3218 | |||
3219 | if (status != VXGE_HW_OK) | ||
3220 | goto exit; | ||
3221 | |||
3222 | val64 = readq(&vpath_reg->rts_access_steer_ctrl); | ||
3223 | |||
3224 | if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { | ||
3225 | |||
3226 | data1 = readq(&vpath_reg->rts_access_steer_data0); | ||
3227 | data2 = readq(&vpath_reg->rts_access_steer_data1); | ||
3228 | |||
3229 | data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1); | ||
3230 | data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK( | ||
3231 | data2); | ||
3232 | |||
3233 | for (i = ETH_ALEN; i > 0; i--) { | ||
3234 | macaddr[i-1] = (u8)(data1 & 0xFF); | ||
3235 | data1 >>= 8; | ||
3236 | |||
3237 | macaddr_mask[i-1] = (u8)(data2 & 0xFF); | ||
3238 | data2 >>= 8; | ||
3239 | } | ||
3240 | status = VXGE_HW_OK; | ||
3241 | } else | ||
3242 | status = VXGE_HW_FAIL; | ||
3243 | exit: | ||
3244 | return status; | ||
3245 | } | ||
3246 | |||
3247 | /* | ||
3248 | * vxge_hw_vpath_rts_rth_set - Set/configure RTS hashing. | ||
3249 | */ | ||
3250 | enum vxge_hw_status vxge_hw_vpath_rts_rth_set( | ||
3251 | struct __vxge_hw_vpath_handle *vp, | ||
3252 | enum vxge_hw_rth_algoritms algorithm, | ||
3253 | struct vxge_hw_rth_hash_types *hash_type, | ||
3254 | u16 bucket_size) | ||
3255 | { | ||
3256 | u64 data0, data1; | ||
3257 | enum vxge_hw_status status = VXGE_HW_OK; | ||
3258 | |||
3259 | if (vp == NULL) { | ||
3260 | status = VXGE_HW_ERR_INVALID_HANDLE; | ||
3261 | goto exit; | ||
3262 | } | ||
3263 | |||
3264 | status = __vxge_hw_vpath_rts_table_get(vp, | ||
3265 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY, | ||
3266 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG, | ||
3267 | 0, &data0, &data1); | ||
3268 | |||
3269 | data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) | | ||
3270 | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3)); | ||
3271 | |||
3272 | data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN | | ||
3273 | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(bucket_size) | | ||
3274 | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(algorithm); | ||
3275 | |||
3276 | if (hash_type->hash_type_tcpipv4_en) | ||
3277 | data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN; | ||
3278 | |||
3279 | if (hash_type->hash_type_ipv4_en) | ||
3280 | data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN; | ||
3281 | |||
3282 | if (hash_type->hash_type_tcpipv6_en) | ||
3283 | data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN; | ||
3284 | |||
3285 | if (hash_type->hash_type_ipv6_en) | ||
3286 | data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN; | ||
3287 | |||
3288 | if (hash_type->hash_type_tcpipv6ex_en) | ||
3289 | data0 |= | ||
3290 | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN; | ||
3291 | |||
3292 | if (hash_type->hash_type_ipv6ex_en) | ||
3293 | data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN; | ||
3294 | |||
3295 | if (VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(data0)) | ||
3296 | data0 &= ~VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE; | ||
3297 | else | ||
3298 | data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE; | ||
3299 | |||
3300 | status = __vxge_hw_vpath_rts_table_set(vp, | ||
3301 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY, | ||
3302 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG, | ||
3303 | 0, data0, 0); | ||
3304 | exit: | ||
3305 | return status; | ||
3306 | } | ||
3307 | |||
3308 | static void | ||
3309 | vxge_hw_rts_rth_data0_data1_get(u32 j, u64 *data0, u64 *data1, | ||
3310 | u16 flag, u8 *itable) | ||
3311 | { | ||
3312 | switch (flag) { | ||
3313 | case 1: | ||
3314 | *data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(j)| | ||
3315 | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN | | ||
3316 | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA( | ||
3317 | itable[j]); | ||
3318 | case 2: | ||
3319 | *data0 |= | ||
3320 | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j)| | ||
3321 | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN | | ||
3322 | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA( | ||
3323 | itable[j]); | ||
3324 | case 3: | ||
3325 | *data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j)| | ||
3326 | VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN | | ||
3327 | VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA( | ||
3328 | itable[j]); | ||
3329 | case 4: | ||
3330 | *data1 |= | ||
3331 | VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j)| | ||
3332 | VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN | | ||
3333 | VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA( | ||
3334 | itable[j]); | ||
3335 | default: | ||
3336 | return; | ||
3337 | } | ||
3338 | } | ||
3339 | /* | ||
3340 | * vxge_hw_vpath_rts_rth_itable_set - Set/configure indirection table (IT). | ||
3341 | */ | ||
3342 | enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set( | ||
3343 | struct __vxge_hw_vpath_handle **vpath_handles, | ||
3344 | u32 vpath_count, | ||
3345 | u8 *mtable, | ||
3346 | u8 *itable, | ||
3347 | u32 itable_size) | ||
3348 | { | ||
3349 | u32 i, j, action, rts_table; | ||
3350 | u64 data0; | ||
3351 | u64 data1; | ||
3352 | u32 max_entries; | ||
3353 | enum vxge_hw_status status = VXGE_HW_OK; | ||
3354 | struct __vxge_hw_vpath_handle *vp = vpath_handles[0]; | ||
3355 | |||
3356 | if (vp == NULL) { | ||
3357 | status = VXGE_HW_ERR_INVALID_HANDLE; | ||
3358 | goto exit; | ||
3359 | } | ||
3360 | |||
3361 | max_entries = (((u32)1) << itable_size); | ||
3362 | |||
3363 | if (vp->vpath->hldev->config.rth_it_type | ||
3364 | == VXGE_HW_RTH_IT_TYPE_SOLO_IT) { | ||
3365 | action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY; | ||
3366 | rts_table = | ||
3367 | VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT; | ||
3368 | |||
3369 | for (j = 0; j < max_entries; j++) { | ||
3370 | |||
3371 | data1 = 0; | ||
3372 | |||
3373 | data0 = | ||
3374 | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA( | ||
3375 | itable[j]); | ||
3376 | |||
3377 | status = __vxge_hw_vpath_rts_table_set(vpath_handles[0], | ||
3378 | action, rts_table, j, data0, data1); | ||
3379 | |||
3380 | if (status != VXGE_HW_OK) | ||
3381 | goto exit; | ||
3382 | } | ||
3383 | |||
3384 | for (j = 0; j < max_entries; j++) { | ||
3385 | |||
3386 | data1 = 0; | ||
3387 | |||
3388 | data0 = | ||
3389 | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN | | ||
3390 | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA( | ||
3391 | itable[j]); | ||
3392 | |||
3393 | status = __vxge_hw_vpath_rts_table_set( | ||
3394 | vpath_handles[mtable[itable[j]]], action, | ||
3395 | rts_table, j, data0, data1); | ||
3396 | |||
3397 | if (status != VXGE_HW_OK) | ||
3398 | goto exit; | ||
3399 | } | ||
3400 | } else { | ||
3401 | action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY; | ||
3402 | rts_table = | ||
3403 | VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT; | ||
3404 | for (i = 0; i < vpath_count; i++) { | ||
3405 | |||
3406 | for (j = 0; j < max_entries;) { | ||
3407 | |||
3408 | data0 = 0; | ||
3409 | data1 = 0; | ||
3410 | |||
3411 | while (j < max_entries) { | ||
3412 | if (mtable[itable[j]] != i) { | ||
3413 | j++; | ||
3414 | continue; | ||
3415 | } | ||
3416 | vxge_hw_rts_rth_data0_data1_get(j, | ||
3417 | &data0, &data1, 1, itable); | ||
3418 | j++; | ||
3419 | break; | ||
3420 | } | ||
3421 | |||
3422 | while (j < max_entries) { | ||
3423 | if (mtable[itable[j]] != i) { | ||
3424 | j++; | ||
3425 | continue; | ||
3426 | } | ||
3427 | vxge_hw_rts_rth_data0_data1_get(j, | ||
3428 | &data0, &data1, 2, itable); | ||
3429 | j++; | ||
3430 | break; | ||
3431 | } | ||
3432 | |||
3433 | while (j < max_entries) { | ||
3434 | if (mtable[itable[j]] != i) { | ||
3435 | j++; | ||
3436 | continue; | ||
3437 | } | ||
3438 | vxge_hw_rts_rth_data0_data1_get(j, | ||
3439 | &data0, &data1, 3, itable); | ||
3440 | j++; | ||
3441 | break; | ||
3442 | } | ||
3443 | |||
3444 | while (j < max_entries) { | ||
3445 | if (mtable[itable[j]] != i) { | ||
3446 | j++; | ||
3447 | continue; | ||
3448 | } | ||
3449 | vxge_hw_rts_rth_data0_data1_get(j, | ||
3450 | &data0, &data1, 4, itable); | ||
3451 | j++; | ||
3452 | break; | ||
3453 | } | ||
3454 | |||
3455 | if (data0 != 0) { | ||
3456 | status = __vxge_hw_vpath_rts_table_set( | ||
3457 | vpath_handles[i], | ||
3458 | action, rts_table, | ||
3459 | 0, data0, data1); | ||
3460 | |||
3461 | if (status != VXGE_HW_OK) | ||
3462 | goto exit; | ||
3463 | } | ||
3464 | } | ||
3465 | } | ||
3466 | } | ||
3467 | exit: | ||
3468 | return status; | ||
3469 | } | ||
3470 | |||
3471 | /** | ||
3472 | * vxge_hw_vpath_check_leak - Check for memory leak | ||
3473 | * @ringh: Handle to the ring object used for receive | ||
3474 | * | ||
3475 | * If PRC_RXD_DOORBELL_VPn.NEW_QW_CNT is larger or equal to | ||
3476 | * PRC_CFG6_VPn.RXD_SPAT then a leak has occurred. | ||
3477 | * Returns: VXGE_HW_FAIL, if leak has occurred. | ||
3478 | * | ||
3479 | */ | ||
3480 | enum vxge_hw_status | ||
3481 | vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ring) | ||
3482 | { | ||
3483 | enum vxge_hw_status status = VXGE_HW_OK; | ||
3484 | u64 rxd_new_count, rxd_spat; | ||
3485 | |||
3486 | if (ring == NULL) | ||
3487 | return status; | ||
3488 | |||
3489 | rxd_new_count = readl(&ring->vp_reg->prc_rxd_doorbell); | ||
3490 | rxd_spat = readq(&ring->vp_reg->prc_cfg6); | ||
3491 | rxd_spat = VXGE_HW_PRC_CFG6_RXD_SPAT(rxd_spat); | ||
3492 | |||
3493 | if (rxd_new_count >= rxd_spat) | ||
3494 | status = VXGE_HW_FAIL; | ||
3495 | |||
3496 | return status; | ||
3497 | } | ||
3498 | |||
3499 | /* | ||
3500 | * __vxge_hw_vpath_mgmt_read | ||
3501 | * This routine reads the vpath_mgmt registers | ||
3502 | */ | ||
3503 | static enum vxge_hw_status | ||
3504 | __vxge_hw_vpath_mgmt_read( | ||
3505 | struct __vxge_hw_device *hldev, | ||
3506 | struct __vxge_hw_virtualpath *vpath) | ||
3507 | { | ||
3508 | u32 i, mtu = 0, max_pyld = 0; | ||
3509 | u64 val64; | ||
3510 | enum vxge_hw_status status = VXGE_HW_OK; | ||
3511 | |||
3512 | for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) { | ||
3513 | |||
3514 | val64 = readq(&vpath->vpmgmt_reg-> | ||
3515 | rxmac_cfg0_port_vpmgmt_clone[i]); | ||
3516 | max_pyld = | ||
3517 | (u32) | ||
3518 | VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN | ||
3519 | (val64); | ||
3520 | if (mtu < max_pyld) | ||
3521 | mtu = max_pyld; | ||
3522 | } | ||
3523 | |||
3524 | vpath->max_mtu = mtu + VXGE_HW_MAC_HEADER_MAX_SIZE; | ||
3525 | |||
3526 | val64 = readq(&vpath->vpmgmt_reg->xmac_vsport_choices_vp); | ||
3527 | |||
3528 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | ||
3529 | if (val64 & vxge_mBIT(i)) | ||
3530 | vpath->vsport_number = i; | ||
3531 | } | ||
3532 | |||
3533 | val64 = readq(&vpath->vpmgmt_reg->xgmac_gen_status_vpmgmt_clone); | ||
3534 | |||
3535 | if (val64 & VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK) | ||
3536 | VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_UP); | ||
3537 | else | ||
3538 | VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN); | ||
3539 | |||
3540 | return status; | ||
3541 | } | ||
3542 | |||
3543 | /* | ||
3544 | * __vxge_hw_vpath_reset_check - Check if resetting the vpath completed | ||
3545 | * This routine checks the vpath_rst_in_prog register to see if | ||
3546 | * adapter completed the reset process for the vpath | ||
3547 | */ | ||
3548 | enum vxge_hw_status | ||
3549 | __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath) | ||
3550 | { | ||
3551 | enum vxge_hw_status status; | ||
3552 | |||
3553 | status = __vxge_hw_device_register_poll( | ||
3554 | &vpath->hldev->common_reg->vpath_rst_in_prog, | ||
3555 | VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG( | ||
3556 | 1 << (16 - vpath->vp_id)), | ||
3557 | vpath->hldev->config.device_poll_millis); | ||
3558 | |||
3559 | return status; | ||
3560 | } | ||
3561 | |||
3562 | /* | ||
3563 | * __vxge_hw_vpath_reset | ||
3564 | * This routine resets the vpath on the device | ||
3565 | */ | ||
3566 | enum vxge_hw_status | ||
3567 | __vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id) | ||
3568 | { | ||
3569 | u64 val64; | ||
3570 | enum vxge_hw_status status = VXGE_HW_OK; | ||
3571 | |||
3572 | val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id)); | ||
3573 | |||
3574 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), | ||
3575 | &hldev->common_reg->cmn_rsthdlr_cfg0); | ||
3576 | |||
3577 | return status; | ||
3578 | } | ||
3579 | |||
3580 | /* | ||
3581 | * __vxge_hw_vpath_sw_reset | ||
3582 | * This routine resets the vpath structures | ||
3583 | */ | ||
3584 | enum vxge_hw_status | ||
3585 | __vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id) | ||
3586 | { | ||
3587 | enum vxge_hw_status status = VXGE_HW_OK; | ||
3588 | struct __vxge_hw_virtualpath *vpath; | ||
3589 | |||
3590 | vpath = (struct __vxge_hw_virtualpath *)&hldev->virtual_paths[vp_id]; | ||
3591 | |||
3592 | if (vpath->ringh) { | ||
3593 | status = __vxge_hw_ring_reset(vpath->ringh); | ||
3594 | if (status != VXGE_HW_OK) | ||
3595 | goto exit; | ||
3596 | } | ||
3597 | |||
3598 | if (vpath->fifoh) | ||
3599 | status = __vxge_hw_fifo_reset(vpath->fifoh); | ||
3600 | exit: | ||
3601 | return status; | ||
3602 | } | ||
3603 | |||
3604 | /* | ||
3605 | * __vxge_hw_vpath_prc_configure | ||
3606 | * This routine configures the prc registers of virtual path using the config | ||
3607 | * passed | ||
3608 | */ | ||
3609 | void | ||
3610 | __vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id) | ||
3611 | { | ||
3612 | u64 val64; | ||
3613 | struct __vxge_hw_virtualpath *vpath; | ||
3614 | struct vxge_hw_vp_config *vp_config; | ||
3615 | struct vxge_hw_vpath_reg __iomem *vp_reg; | ||
3616 | |||
3617 | vpath = &hldev->virtual_paths[vp_id]; | ||
3618 | vp_reg = vpath->vp_reg; | ||
3619 | vp_config = vpath->vp_config; | ||
3620 | |||
3621 | if (vp_config->ring.enable == VXGE_HW_RING_DISABLE) | ||
3622 | return; | ||
3623 | |||
3624 | val64 = readq(&vp_reg->prc_cfg1); | ||
3625 | val64 |= VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE; | ||
3626 | writeq(val64, &vp_reg->prc_cfg1); | ||
3627 | |||
3628 | val64 = readq(&vpath->vp_reg->prc_cfg6); | ||
3629 | val64 |= VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN; | ||
3630 | writeq(val64, &vpath->vp_reg->prc_cfg6); | ||
3631 | |||
3632 | val64 = readq(&vp_reg->prc_cfg7); | ||
3633 | |||
3634 | if (vpath->vp_config->ring.scatter_mode != | ||
3635 | VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT) { | ||
3636 | |||
3637 | val64 &= ~VXGE_HW_PRC_CFG7_SCATTER_MODE(0x3); | ||
3638 | |||
3639 | switch (vpath->vp_config->ring.scatter_mode) { | ||
3640 | case VXGE_HW_RING_SCATTER_MODE_A: | ||
3641 | val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE( | ||
3642 | VXGE_HW_PRC_CFG7_SCATTER_MODE_A); | ||
3643 | break; | ||
3644 | case VXGE_HW_RING_SCATTER_MODE_B: | ||
3645 | val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE( | ||
3646 | VXGE_HW_PRC_CFG7_SCATTER_MODE_B); | ||
3647 | break; | ||
3648 | case VXGE_HW_RING_SCATTER_MODE_C: | ||
3649 | val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE( | ||
3650 | VXGE_HW_PRC_CFG7_SCATTER_MODE_C); | ||
3651 | break; | ||
3652 | } | ||
3653 | } | ||
3654 | |||
3655 | writeq(val64, &vp_reg->prc_cfg7); | ||
3656 | |||
3657 | writeq(VXGE_HW_PRC_CFG5_RXD0_ADD( | ||
3658 | __vxge_hw_ring_first_block_address_get( | ||
3659 | vpath->ringh) >> 3), &vp_reg->prc_cfg5); | ||
3660 | |||
3661 | val64 = readq(&vp_reg->prc_cfg4); | ||
3662 | val64 |= VXGE_HW_PRC_CFG4_IN_SVC; | ||
3663 | val64 &= ~VXGE_HW_PRC_CFG4_RING_MODE(0x3); | ||
3664 | |||
3665 | val64 |= VXGE_HW_PRC_CFG4_RING_MODE( | ||
3666 | VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER); | ||
3667 | |||
3668 | if (hldev->config.rth_en == VXGE_HW_RTH_DISABLE) | ||
3669 | val64 |= VXGE_HW_PRC_CFG4_RTH_DISABLE; | ||
3670 | else | ||
3671 | val64 &= ~VXGE_HW_PRC_CFG4_RTH_DISABLE; | ||
3672 | |||
3673 | writeq(val64, &vp_reg->prc_cfg4); | ||
3674 | return; | ||
3675 | } | ||
3676 | |||
3677 | /* | ||
3678 | * __vxge_hw_vpath_kdfc_configure | ||
3679 | * This routine configures the kdfc registers of virtual path using the | ||
3680 | * config passed | ||
3681 | */ | ||
3682 | enum vxge_hw_status | ||
3683 | __vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id) | ||
3684 | { | ||
3685 | u64 val64; | ||
3686 | u64 vpath_stride; | ||
3687 | enum vxge_hw_status status = VXGE_HW_OK; | ||
3688 | struct __vxge_hw_virtualpath *vpath; | ||
3689 | struct vxge_hw_vpath_reg __iomem *vp_reg; | ||
3690 | |||
3691 | vpath = &hldev->virtual_paths[vp_id]; | ||
3692 | vp_reg = vpath->vp_reg; | ||
3693 | status = __vxge_hw_kdfc_swapper_set(hldev->legacy_reg, vp_reg); | ||
3694 | |||
3695 | if (status != VXGE_HW_OK) | ||
3696 | goto exit; | ||
3697 | |||
3698 | val64 = readq(&vp_reg->kdfc_drbl_triplet_total); | ||
3699 | |||
3700 | vpath->max_kdfc_db = | ||
3701 | (u32)VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE( | ||
3702 | val64+1)/2; | ||
3703 | |||
3704 | if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) { | ||
3705 | |||
3706 | vpath->max_nofl_db = vpath->max_kdfc_db; | ||
3707 | |||
3708 | if (vpath->max_nofl_db < | ||
3709 | ((vpath->vp_config->fifo.memblock_size / | ||
3710 | (vpath->vp_config->fifo.max_frags * | ||
3711 | sizeof(struct vxge_hw_fifo_txd))) * | ||
3712 | vpath->vp_config->fifo.fifo_blocks)) { | ||
3713 | |||
3714 | return VXGE_HW_BADCFG_FIFO_BLOCKS; | ||
3715 | } | ||
3716 | val64 = VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0( | ||
3717 | (vpath->max_nofl_db*2)-1); | ||
3718 | } | ||
3719 | |||
3720 | writeq(val64, &vp_reg->kdfc_fifo_trpl_partition); | ||
3721 | |||
3722 | writeq(VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE, | ||
3723 | &vp_reg->kdfc_fifo_trpl_ctrl); | ||
3724 | |||
3725 | val64 = readq(&vp_reg->kdfc_trpl_fifo_0_ctrl); | ||
3726 | |||
3727 | val64 &= ~(VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(0x3) | | ||
3728 | VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0xFF)); | ||
3729 | |||
3730 | val64 |= VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE( | ||
3731 | VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY) | | ||
3732 | #ifndef __BIG_ENDIAN | ||
3733 | VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN | | ||
3734 | #endif | ||
3735 | VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0); | ||
3736 | |||
3737 | writeq(val64, &vp_reg->kdfc_trpl_fifo_0_ctrl); | ||
3738 | writeq((u64)0, &vp_reg->kdfc_trpl_fifo_0_wb_address); | ||
3739 | wmb(); | ||
3740 | vpath_stride = readq(&hldev->toc_reg->toc_kdfc_vpath_stride); | ||
3741 | |||
3742 | vpath->nofl_db = | ||
3743 | (struct __vxge_hw_non_offload_db_wrapper __iomem *) | ||
3744 | (hldev->kdfc + (vp_id * | ||
3745 | VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE( | ||
3746 | vpath_stride))); | ||
3747 | exit: | ||
3748 | return status; | ||
3749 | } | ||
3750 | |||
3751 | /* | ||
3752 | * __vxge_hw_vpath_mac_configure | ||
3753 | * This routine configures the mac of virtual path using the config passed | ||
3754 | */ | ||
3755 | enum vxge_hw_status | ||
3756 | __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id) | ||
3757 | { | ||
3758 | u64 val64; | ||
3759 | enum vxge_hw_status status = VXGE_HW_OK; | ||
3760 | struct __vxge_hw_virtualpath *vpath; | ||
3761 | struct vxge_hw_vp_config *vp_config; | ||
3762 | struct vxge_hw_vpath_reg __iomem *vp_reg; | ||
3763 | |||
3764 | vpath = &hldev->virtual_paths[vp_id]; | ||
3765 | vp_reg = vpath->vp_reg; | ||
3766 | vp_config = vpath->vp_config; | ||
3767 | |||
3768 | writeq(VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER( | ||
3769 | vpath->vsport_number), &vp_reg->xmac_vsport_choice); | ||
3770 | |||
3771 | if (vp_config->ring.enable == VXGE_HW_RING_ENABLE) { | ||
3772 | |||
3773 | val64 = readq(&vp_reg->xmac_rpa_vcfg); | ||
3774 | |||
3775 | if (vp_config->rpa_strip_vlan_tag != | ||
3776 | VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) { | ||
3777 | if (vp_config->rpa_strip_vlan_tag) | ||
3778 | val64 |= VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG; | ||
3779 | else | ||
3780 | val64 &= ~VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG; | ||
3781 | } | ||
3782 | |||
3783 | writeq(val64, &vp_reg->xmac_rpa_vcfg); | ||
3784 | val64 = readq(&vp_reg->rxmac_vcfg0); | ||
3785 | |||
3786 | if (vp_config->mtu != | ||
3787 | VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) { | ||
3788 | val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff); | ||
3789 | if ((vp_config->mtu + | ||
3790 | VXGE_HW_MAC_HEADER_MAX_SIZE) < vpath->max_mtu) | ||
3791 | val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN( | ||
3792 | vp_config->mtu + | ||
3793 | VXGE_HW_MAC_HEADER_MAX_SIZE); | ||
3794 | else | ||
3795 | val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN( | ||
3796 | vpath->max_mtu); | ||
3797 | } | ||
3798 | |||
3799 | writeq(val64, &vp_reg->rxmac_vcfg0); | ||
3800 | |||
3801 | val64 = readq(&vp_reg->rxmac_vcfg1); | ||
3802 | |||
3803 | val64 &= ~(VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(0x3) | | ||
3804 | VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE); | ||
3805 | |||
3806 | if (hldev->config.rth_it_type == | ||
3807 | VXGE_HW_RTH_IT_TYPE_MULTI_IT) { | ||
3808 | val64 |= VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE( | ||
3809 | 0x2) | | ||
3810 | VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE; | ||
3811 | } | ||
3812 | |||
3813 | writeq(val64, &vp_reg->rxmac_vcfg1); | ||
3814 | } | ||
3815 | return status; | ||
3816 | } | ||
3817 | |||
3818 | /* | ||
3819 | * __vxge_hw_vpath_tim_configure | ||
3820 | * This routine configures the tim registers of virtual path using the config | ||
3821 | * passed | ||
3822 | */ | ||
3823 | enum vxge_hw_status | ||
3824 | __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) | ||
3825 | { | ||
3826 | u64 val64; | ||
3827 | enum vxge_hw_status status = VXGE_HW_OK; | ||
3828 | struct __vxge_hw_virtualpath *vpath; | ||
3829 | struct vxge_hw_vpath_reg __iomem *vp_reg; | ||
3830 | struct vxge_hw_vp_config *config; | ||
3831 | |||
3832 | vpath = &hldev->virtual_paths[vp_id]; | ||
3833 | vp_reg = vpath->vp_reg; | ||
3834 | config = vpath->vp_config; | ||
3835 | |||
3836 | writeq((u64)0, &vp_reg->tim_dest_addr); | ||
3837 | writeq((u64)0, &vp_reg->tim_vpath_map); | ||
3838 | writeq((u64)0, &vp_reg->tim_bitmap); | ||
3839 | writeq((u64)0, &vp_reg->tim_remap); | ||
3840 | |||
3841 | if (config->ring.enable == VXGE_HW_RING_ENABLE) | ||
3842 | writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM( | ||
3843 | (vp_id * VXGE_HW_MAX_INTR_PER_VP) + | ||
3844 | VXGE_HW_VPATH_INTR_RX), &vp_reg->tim_ring_assn); | ||
3845 | |||
3846 | val64 = readq(&vp_reg->tim_pci_cfg); | ||
3847 | val64 |= VXGE_HW_TIM_PCI_CFG_ADD_PAD; | ||
3848 | writeq(val64, &vp_reg->tim_pci_cfg); | ||
3849 | |||
3850 | if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) { | ||
3851 | |||
3852 | val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); | ||
3853 | |||
3854 | if (config->tti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) { | ||
3855 | val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL( | ||
3856 | 0x3ffffff); | ||
3857 | val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL( | ||
3858 | config->tti.btimer_val); | ||
3859 | } | ||
3860 | |||
3861 | val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN; | ||
3862 | |||
3863 | if (config->tti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) { | ||
3864 | if (config->tti.timer_ac_en) | ||
3865 | val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC; | ||
3866 | else | ||
3867 | val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC; | ||
3868 | } | ||
3869 | |||
3870 | if (config->tti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) { | ||
3871 | if (config->tti.timer_ci_en) | ||
3872 | val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; | ||
3873 | else | ||
3874 | val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; | ||
3875 | } | ||
3876 | |||
3877 | if (config->tti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) { | ||
3878 | val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f); | ||
3879 | val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A( | ||
3880 | config->tti.urange_a); | ||
3881 | } | ||
3882 | |||
3883 | if (config->tti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) { | ||
3884 | val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f); | ||
3885 | val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B( | ||
3886 | config->tti.urange_b); | ||
3887 | } | ||
3888 | |||
3889 | if (config->tti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) { | ||
3890 | val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f); | ||
3891 | val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C( | ||
3892 | config->tti.urange_c); | ||
3893 | } | ||
3894 | |||
3895 | writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); | ||
3896 | val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]); | ||
3897 | |||
3898 | if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) { | ||
3899 | val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff); | ||
3900 | val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A( | ||
3901 | config->tti.uec_a); | ||
3902 | } | ||
3903 | |||
3904 | if (config->tti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) { | ||
3905 | val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff); | ||
3906 | val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B( | ||
3907 | config->tti.uec_b); | ||
3908 | } | ||
3909 | |||
3910 | if (config->tti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) { | ||
3911 | val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff); | ||
3912 | val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C( | ||
3913 | config->tti.uec_c); | ||
3914 | } | ||
3915 | |||
3916 | if (config->tti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) { | ||
3917 | val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff); | ||
3918 | val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D( | ||
3919 | config->tti.uec_d); | ||
3920 | } | ||
3921 | |||
3922 | writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]); | ||
3923 | val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]); | ||
3924 | |||
3925 | if (config->tti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) { | ||
3926 | if (config->tti.timer_ri_en) | ||
3927 | val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI; | ||
3928 | else | ||
3929 | val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI; | ||
3930 | } | ||
3931 | |||
3932 | if (config->tti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) { | ||
3933 | val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL( | ||
3934 | 0x3ffffff); | ||
3935 | val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL( | ||
3936 | config->tti.rtimer_val); | ||
3937 | } | ||
3938 | |||
3939 | if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) { | ||
3940 | val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f); | ||
3941 | val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL( | ||
3942 | config->tti.util_sel); | ||
3943 | } | ||
3944 | |||
3945 | if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) { | ||
3946 | val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL( | ||
3947 | 0x3ffffff); | ||
3948 | val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL( | ||
3949 | config->tti.ltimer_val); | ||
3950 | } | ||
3951 | |||
3952 | writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]); | ||
3953 | } | ||
3954 | |||
3955 | if (config->ring.enable == VXGE_HW_RING_ENABLE) { | ||
3956 | |||
3957 | val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]); | ||
3958 | |||
3959 | if (config->rti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) { | ||
3960 | val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL( | ||
3961 | 0x3ffffff); | ||
3962 | val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL( | ||
3963 | config->rti.btimer_val); | ||
3964 | } | ||
3965 | |||
3966 | val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN; | ||
3967 | |||
3968 | if (config->rti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) { | ||
3969 | if (config->rti.timer_ac_en) | ||
3970 | val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC; | ||
3971 | else | ||
3972 | val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC; | ||
3973 | } | ||
3974 | |||
3975 | if (config->rti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) { | ||
3976 | if (config->rti.timer_ci_en) | ||
3977 | val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; | ||
3978 | else | ||
3979 | val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; | ||
3980 | } | ||
3981 | |||
3982 | if (config->rti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) { | ||
3983 | val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f); | ||
3984 | val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A( | ||
3985 | config->rti.urange_a); | ||
3986 | } | ||
3987 | |||
3988 | if (config->rti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) { | ||
3989 | val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f); | ||
3990 | val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B( | ||
3991 | config->rti.urange_b); | ||
3992 | } | ||
3993 | |||
3994 | if (config->rti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) { | ||
3995 | val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f); | ||
3996 | val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C( | ||
3997 | config->rti.urange_c); | ||
3998 | } | ||
3999 | |||
4000 | writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]); | ||
4001 | val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]); | ||
4002 | |||
4003 | if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) { | ||
4004 | val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff); | ||
4005 | val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A( | ||
4006 | config->rti.uec_a); | ||
4007 | } | ||
4008 | |||
4009 | if (config->rti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) { | ||
4010 | val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff); | ||
4011 | val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B( | ||
4012 | config->rti.uec_b); | ||
4013 | } | ||
4014 | |||
4015 | if (config->rti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) { | ||
4016 | val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff); | ||
4017 | val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C( | ||
4018 | config->rti.uec_c); | ||
4019 | } | ||
4020 | |||
4021 | if (config->rti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) { | ||
4022 | val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff); | ||
4023 | val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D( | ||
4024 | config->rti.uec_d); | ||
4025 | } | ||
4026 | |||
4027 | writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]); | ||
4028 | val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]); | ||
4029 | |||
4030 | if (config->rti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) { | ||
4031 | if (config->rti.timer_ri_en) | ||
4032 | val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI; | ||
4033 | else | ||
4034 | val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI; | ||
4035 | } | ||
4036 | |||
4037 | if (config->rti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) { | ||
4038 | val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL( | ||
4039 | 0x3ffffff); | ||
4040 | val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL( | ||
4041 | config->rti.rtimer_val); | ||
4042 | } | ||
4043 | |||
4044 | if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) { | ||
4045 | val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f); | ||
4046 | val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL( | ||
4047 | config->rti.util_sel); | ||
4048 | } | ||
4049 | |||
4050 | if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) { | ||
4051 | val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL( | ||
4052 | 0x3ffffff); | ||
4053 | val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL( | ||
4054 | config->rti.ltimer_val); | ||
4055 | } | ||
4056 | |||
4057 | writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]); | ||
4058 | } | ||
4059 | |||
4060 | val64 = 0; | ||
4061 | writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_EINTA]); | ||
4062 | writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_EINTA]); | ||
4063 | writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_EINTA]); | ||
4064 | writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_BMAP]); | ||
4065 | writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]); | ||
4066 | writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]); | ||
4067 | |||
4068 | return status; | ||
4069 | } | ||
4070 | |||
4071 | /* | ||
4072 | * __vxge_hw_vpath_initialize | ||
4073 | * This routine is the final phase of init which initializes the | ||
4074 | * registers of the vpath using the configuration passed. | ||
4075 | */ | ||
4076 | enum vxge_hw_status | ||
4077 | __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id) | ||
4078 | { | ||
4079 | u64 val64; | ||
4080 | u32 val32; | ||
4081 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4082 | struct __vxge_hw_virtualpath *vpath; | ||
4083 | struct vxge_hw_vpath_reg __iomem *vp_reg; | ||
4084 | |||
4085 | vpath = &hldev->virtual_paths[vp_id]; | ||
4086 | |||
4087 | if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) { | ||
4088 | status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE; | ||
4089 | goto exit; | ||
4090 | } | ||
4091 | vp_reg = vpath->vp_reg; | ||
4092 | |||
4093 | status = __vxge_hw_vpath_swapper_set(vpath->vp_reg); | ||
4094 | |||
4095 | if (status != VXGE_HW_OK) | ||
4096 | goto exit; | ||
4097 | |||
4098 | status = __vxge_hw_vpath_mac_configure(hldev, vp_id); | ||
4099 | |||
4100 | if (status != VXGE_HW_OK) | ||
4101 | goto exit; | ||
4102 | |||
4103 | status = __vxge_hw_vpath_kdfc_configure(hldev, vp_id); | ||
4104 | |||
4105 | if (status != VXGE_HW_OK) | ||
4106 | goto exit; | ||
4107 | |||
4108 | status = __vxge_hw_vpath_tim_configure(hldev, vp_id); | ||
4109 | |||
4110 | if (status != VXGE_HW_OK) | ||
4111 | goto exit; | ||
4112 | |||
4113 | writeq(0, &vp_reg->gendma_int); | ||
4114 | |||
4115 | val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl); | ||
4116 | |||
4117 | /* Get MRRS value from device control */ | ||
4118 | status = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32); | ||
4119 | |||
4120 | if (status == VXGE_HW_OK) { | ||
4121 | val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12; | ||
4122 | val64 &= | ||
4123 | ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(7)); | ||
4124 | val64 |= | ||
4125 | VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val32); | ||
4126 | |||
4127 | val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE; | ||
4128 | } | ||
4129 | |||
4130 | val64 &= ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(7)); | ||
4131 | val64 |= | ||
4132 | VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY( | ||
4133 | VXGE_HW_MAX_PAYLOAD_SIZE_512); | ||
4134 | |||
4135 | val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN; | ||
4136 | writeq(val64, &vp_reg->rtdma_rd_optimization_ctrl); | ||
4137 | |||
4138 | exit: | ||
4139 | return status; | ||
4140 | } | ||
4141 | |||
4142 | /* | ||
4143 | * __vxge_hw_vp_initialize - Initialize Virtual Path structure | ||
4144 | * This routine is the initial phase of init which resets the vpath and | ||
4145 | * initializes the software support structures. | ||
4146 | */ | ||
4147 | enum vxge_hw_status | ||
4148 | __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id, | ||
4149 | struct vxge_hw_vp_config *config) | ||
4150 | { | ||
4151 | struct __vxge_hw_virtualpath *vpath; | ||
4152 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4153 | |||
4154 | if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) { | ||
4155 | status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE; | ||
4156 | goto exit; | ||
4157 | } | ||
4158 | |||
4159 | vpath = &hldev->virtual_paths[vp_id]; | ||
4160 | |||
4161 | vpath->vp_id = vp_id; | ||
4162 | vpath->vp_open = VXGE_HW_VP_OPEN; | ||
4163 | vpath->hldev = hldev; | ||
4164 | vpath->vp_config = config; | ||
4165 | vpath->vp_reg = hldev->vpath_reg[vp_id]; | ||
4166 | vpath->vpmgmt_reg = hldev->vpmgmt_reg[vp_id]; | ||
4167 | |||
4168 | __vxge_hw_vpath_reset(hldev, vp_id); | ||
4169 | |||
4170 | status = __vxge_hw_vpath_reset_check(vpath); | ||
4171 | |||
4172 | if (status != VXGE_HW_OK) { | ||
4173 | memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath)); | ||
4174 | goto exit; | ||
4175 | } | ||
4176 | |||
4177 | status = __vxge_hw_vpath_mgmt_read(hldev, vpath); | ||
4178 | |||
4179 | if (status != VXGE_HW_OK) { | ||
4180 | memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath)); | ||
4181 | goto exit; | ||
4182 | } | ||
4183 | |||
4184 | INIT_LIST_HEAD(&vpath->vpath_handles); | ||
4185 | |||
4186 | vpath->sw_stats = &hldev->stats.sw_dev_info_stats.vpath_info[vp_id]; | ||
4187 | |||
4188 | VXGE_HW_DEVICE_TIM_INT_MASK_SET(hldev->tim_int_mask0, | ||
4189 | hldev->tim_int_mask1, vp_id); | ||
4190 | |||
4191 | status = __vxge_hw_vpath_initialize(hldev, vp_id); | ||
4192 | |||
4193 | if (status != VXGE_HW_OK) | ||
4194 | __vxge_hw_vp_terminate(hldev, vp_id); | ||
4195 | exit: | ||
4196 | return status; | ||
4197 | } | ||
4198 | |||
4199 | /* | ||
4200 | * __vxge_hw_vp_terminate - Terminate Virtual Path structure | ||
4201 | * This routine closes all channels it opened and freeup memory | ||
4202 | */ | ||
4203 | void | ||
4204 | __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id) | ||
4205 | { | ||
4206 | struct __vxge_hw_virtualpath *vpath; | ||
4207 | |||
4208 | vpath = &hldev->virtual_paths[vp_id]; | ||
4209 | |||
4210 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) | ||
4211 | goto exit; | ||
4212 | |||
4213 | VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0, | ||
4214 | vpath->hldev->tim_int_mask1, vpath->vp_id); | ||
4215 | hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL; | ||
4216 | |||
4217 | memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath)); | ||
4218 | exit: | ||
4219 | return; | ||
4220 | } | ||
4221 | |||
4222 | /* | ||
4223 | * vxge_hw_vpath_mtu_set - Set MTU. | ||
4224 | * Set new MTU value. Example, to use jumbo frames: | ||
4225 | * vxge_hw_vpath_mtu_set(my_device, 9600); | ||
4226 | */ | ||
4227 | enum vxge_hw_status | ||
4228 | vxge_hw_vpath_mtu_set(struct __vxge_hw_vpath_handle *vp, u32 new_mtu) | ||
4229 | { | ||
4230 | u64 val64; | ||
4231 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4232 | struct __vxge_hw_virtualpath *vpath; | ||
4233 | |||
4234 | if (vp == NULL) { | ||
4235 | status = VXGE_HW_ERR_INVALID_HANDLE; | ||
4236 | goto exit; | ||
4237 | } | ||
4238 | vpath = vp->vpath; | ||
4239 | |||
4240 | new_mtu += VXGE_HW_MAC_HEADER_MAX_SIZE; | ||
4241 | |||
4242 | if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > vpath->max_mtu)) | ||
4243 | status = VXGE_HW_ERR_INVALID_MTU_SIZE; | ||
4244 | |||
4245 | val64 = readq(&vpath->vp_reg->rxmac_vcfg0); | ||
4246 | |||
4247 | val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff); | ||
4248 | val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_mtu); | ||
4249 | |||
4250 | writeq(val64, &vpath->vp_reg->rxmac_vcfg0); | ||
4251 | |||
4252 | vpath->vp_config->mtu = new_mtu - VXGE_HW_MAC_HEADER_MAX_SIZE; | ||
4253 | |||
4254 | exit: | ||
4255 | return status; | ||
4256 | } | ||
4257 | |||
4258 | /* | ||
4259 | * vxge_hw_vpath_open - Open a virtual path on a given adapter | ||
4260 | * This function is used to open access to virtual path of an | ||
4261 | * adapter for offload, GRO operations. This function returns | ||
4262 | * synchronously. | ||
4263 | */ | ||
4264 | enum vxge_hw_status | ||
4265 | vxge_hw_vpath_open(struct __vxge_hw_device *hldev, | ||
4266 | struct vxge_hw_vpath_attr *attr, | ||
4267 | struct __vxge_hw_vpath_handle **vpath_handle) | ||
4268 | { | ||
4269 | struct __vxge_hw_virtualpath *vpath; | ||
4270 | struct __vxge_hw_vpath_handle *vp; | ||
4271 | enum vxge_hw_status status; | ||
4272 | |||
4273 | vpath = &hldev->virtual_paths[attr->vp_id]; | ||
4274 | |||
4275 | if (vpath->vp_open == VXGE_HW_VP_OPEN) { | ||
4276 | status = VXGE_HW_ERR_INVALID_STATE; | ||
4277 | goto vpath_open_exit1; | ||
4278 | } | ||
4279 | |||
4280 | status = __vxge_hw_vp_initialize(hldev, attr->vp_id, | ||
4281 | &hldev->config.vp_config[attr->vp_id]); | ||
4282 | |||
4283 | if (status != VXGE_HW_OK) | ||
4284 | goto vpath_open_exit1; | ||
4285 | |||
4286 | vp = (struct __vxge_hw_vpath_handle *) | ||
4287 | vmalloc(sizeof(struct __vxge_hw_vpath_handle)); | ||
4288 | if (vp == NULL) { | ||
4289 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
4290 | goto vpath_open_exit2; | ||
4291 | } | ||
4292 | |||
4293 | memset(vp, 0, sizeof(struct __vxge_hw_vpath_handle)); | ||
4294 | |||
4295 | vp->vpath = vpath; | ||
4296 | |||
4297 | if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) { | ||
4298 | status = __vxge_hw_fifo_create(vp, &attr->fifo_attr); | ||
4299 | if (status != VXGE_HW_OK) | ||
4300 | goto vpath_open_exit6; | ||
4301 | } | ||
4302 | |||
4303 | if (vpath->vp_config->ring.enable == VXGE_HW_RING_ENABLE) { | ||
4304 | status = __vxge_hw_ring_create(vp, &attr->ring_attr); | ||
4305 | if (status != VXGE_HW_OK) | ||
4306 | goto vpath_open_exit7; | ||
4307 | |||
4308 | __vxge_hw_vpath_prc_configure(hldev, attr->vp_id); | ||
4309 | } | ||
4310 | |||
4311 | vpath->fifoh->tx_intr_num = | ||
4312 | (attr->vp_id * VXGE_HW_MAX_INTR_PER_VP) + | ||
4313 | VXGE_HW_VPATH_INTR_TX; | ||
4314 | |||
4315 | vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev, | ||
4316 | VXGE_HW_BLOCK_SIZE); | ||
4317 | |||
4318 | if (vpath->stats_block == NULL) { | ||
4319 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
4320 | goto vpath_open_exit8; | ||
4321 | } | ||
4322 | |||
4323 | vpath->hw_stats = (struct vxge_hw_vpath_stats_hw_info *)vpath-> | ||
4324 | stats_block->memblock; | ||
4325 | memset(vpath->hw_stats, 0, | ||
4326 | sizeof(struct vxge_hw_vpath_stats_hw_info)); | ||
4327 | |||
4328 | hldev->stats.hw_dev_info_stats.vpath_info[attr->vp_id] = | ||
4329 | vpath->hw_stats; | ||
4330 | |||
4331 | vpath->hw_stats_sav = | ||
4332 | &hldev->stats.hw_dev_info_stats.vpath_info_sav[attr->vp_id]; | ||
4333 | memset(vpath->hw_stats_sav, 0, | ||
4334 | sizeof(struct vxge_hw_vpath_stats_hw_info)); | ||
4335 | |||
4336 | writeq(vpath->stats_block->dma_addr, &vpath->vp_reg->stats_cfg); | ||
4337 | |||
4338 | status = vxge_hw_vpath_stats_enable(vp); | ||
4339 | if (status != VXGE_HW_OK) | ||
4340 | goto vpath_open_exit8; | ||
4341 | |||
4342 | list_add(&vp->item, &vpath->vpath_handles); | ||
4343 | |||
4344 | hldev->vpaths_deployed |= vxge_mBIT(vpath->vp_id); | ||
4345 | |||
4346 | *vpath_handle = vp; | ||
4347 | |||
4348 | attr->fifo_attr.userdata = vpath->fifoh; | ||
4349 | attr->ring_attr.userdata = vpath->ringh; | ||
4350 | |||
4351 | return VXGE_HW_OK; | ||
4352 | |||
4353 | vpath_open_exit8: | ||
4354 | if (vpath->ringh != NULL) | ||
4355 | __vxge_hw_ring_delete(vp); | ||
4356 | vpath_open_exit7: | ||
4357 | if (vpath->fifoh != NULL) | ||
4358 | __vxge_hw_fifo_delete(vp); | ||
4359 | vpath_open_exit6: | ||
4360 | vfree(vp); | ||
4361 | vpath_open_exit2: | ||
4362 | __vxge_hw_vp_terminate(hldev, attr->vp_id); | ||
4363 | vpath_open_exit1: | ||
4364 | |||
4365 | return status; | ||
4366 | } | ||
4367 | |||
4368 | /** | ||
4369 | * vxge_hw_vpath_rx_doorbell_post - Close the handle got from previous vpath | ||
4370 | * (vpath) open | ||
4371 | * @vp: Handle got from previous vpath open | ||
4372 | * | ||
4373 | * This function is used to close access to virtual path opened | ||
4374 | * earlier. | ||
4375 | */ | ||
4376 | void | ||
4377 | vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp) | ||
4378 | { | ||
4379 | struct __vxge_hw_virtualpath *vpath = NULL; | ||
4380 | u64 new_count, val64, val164; | ||
4381 | struct __vxge_hw_ring *ring; | ||
4382 | |||
4383 | vpath = vp->vpath; | ||
4384 | ring = vpath->ringh; | ||
4385 | |||
4386 | new_count = readq(&vpath->vp_reg->rxdmem_size); | ||
4387 | new_count &= 0x1fff; | ||
4388 | val164 = (VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count)); | ||
4389 | |||
4390 | writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164), | ||
4391 | &vpath->vp_reg->prc_rxd_doorbell); | ||
4392 | readl(&vpath->vp_reg->prc_rxd_doorbell); | ||
4393 | |||
4394 | val164 /= 2; | ||
4395 | val64 = readq(&vpath->vp_reg->prc_cfg6); | ||
4396 | val64 = VXGE_HW_PRC_CFG6_RXD_SPAT(val64); | ||
4397 | val64 &= 0x1ff; | ||
4398 | |||
4399 | /* | ||
4400 | * Each RxD is of 4 qwords | ||
4401 | */ | ||
4402 | new_count -= (val64 + 1); | ||
4403 | val64 = min(val164, new_count) / 4; | ||
4404 | |||
4405 | ring->rxds_limit = min(ring->rxds_limit, val64); | ||
4406 | if (ring->rxds_limit < 4) | ||
4407 | ring->rxds_limit = 4; | ||
4408 | } | ||
4409 | |||
4410 | /* | ||
4411 | * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open | ||
4412 | * This function is used to close access to virtual path opened | ||
4413 | * earlier. | ||
4414 | */ | ||
4415 | enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp) | ||
4416 | { | ||
4417 | struct __vxge_hw_virtualpath *vpath = NULL; | ||
4418 | struct __vxge_hw_device *devh = NULL; | ||
4419 | u32 vp_id = vp->vpath->vp_id; | ||
4420 | u32 is_empty = TRUE; | ||
4421 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4422 | |||
4423 | vpath = vp->vpath; | ||
4424 | devh = vpath->hldev; | ||
4425 | |||
4426 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
4427 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
4428 | goto vpath_close_exit; | ||
4429 | } | ||
4430 | |||
4431 | list_del(&vp->item); | ||
4432 | |||
4433 | if (!list_empty(&vpath->vpath_handles)) { | ||
4434 | list_add(&vp->item, &vpath->vpath_handles); | ||
4435 | is_empty = FALSE; | ||
4436 | } | ||
4437 | |||
4438 | if (!is_empty) { | ||
4439 | status = VXGE_HW_FAIL; | ||
4440 | goto vpath_close_exit; | ||
4441 | } | ||
4442 | |||
4443 | devh->vpaths_deployed &= ~vxge_mBIT(vp_id); | ||
4444 | |||
4445 | if (vpath->ringh != NULL) | ||
4446 | __vxge_hw_ring_delete(vp); | ||
4447 | |||
4448 | if (vpath->fifoh != NULL) | ||
4449 | __vxge_hw_fifo_delete(vp); | ||
4450 | |||
4451 | if (vpath->stats_block != NULL) | ||
4452 | __vxge_hw_blockpool_block_free(devh, vpath->stats_block); | ||
4453 | |||
4454 | vfree(vp); | ||
4455 | |||
4456 | __vxge_hw_vp_terminate(devh, vp_id); | ||
4457 | |||
4458 | vpath->vp_open = VXGE_HW_VP_NOT_OPEN; | ||
4459 | |||
4460 | vpath_close_exit: | ||
4461 | return status; | ||
4462 | } | ||
4463 | |||
4464 | /* | ||
4465 | * vxge_hw_vpath_reset - Resets vpath | ||
4466 | * This function is used to request a reset of vpath | ||
4467 | */ | ||
4468 | enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_vpath_handle *vp) | ||
4469 | { | ||
4470 | enum vxge_hw_status status; | ||
4471 | u32 vp_id; | ||
4472 | struct __vxge_hw_virtualpath *vpath = vp->vpath; | ||
4473 | |||
4474 | vp_id = vpath->vp_id; | ||
4475 | |||
4476 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
4477 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
4478 | goto exit; | ||
4479 | } | ||
4480 | |||
4481 | status = __vxge_hw_vpath_reset(vpath->hldev, vp_id); | ||
4482 | if (status == VXGE_HW_OK) | ||
4483 | vpath->sw_stats->soft_reset_cnt++; | ||
4484 | exit: | ||
4485 | return status; | ||
4486 | } | ||
4487 | |||
4488 | /* | ||
4489 | * vxge_hw_vpath_recover_from_reset - Poll for reset complete and re-initialize. | ||
4490 | * This function poll's for the vpath reset completion and re initializes | ||
4491 | * the vpath. | ||
4492 | */ | ||
4493 | enum vxge_hw_status | ||
4494 | vxge_hw_vpath_recover_from_reset(struct __vxge_hw_vpath_handle *vp) | ||
4495 | { | ||
4496 | struct __vxge_hw_virtualpath *vpath = NULL; | ||
4497 | enum vxge_hw_status status; | ||
4498 | struct __vxge_hw_device *hldev; | ||
4499 | u32 vp_id; | ||
4500 | |||
4501 | vp_id = vp->vpath->vp_id; | ||
4502 | vpath = vp->vpath; | ||
4503 | hldev = vpath->hldev; | ||
4504 | |||
4505 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
4506 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
4507 | goto exit; | ||
4508 | } | ||
4509 | |||
4510 | status = __vxge_hw_vpath_reset_check(vpath); | ||
4511 | if (status != VXGE_HW_OK) | ||
4512 | goto exit; | ||
4513 | |||
4514 | status = __vxge_hw_vpath_sw_reset(hldev, vp_id); | ||
4515 | if (status != VXGE_HW_OK) | ||
4516 | goto exit; | ||
4517 | |||
4518 | status = __vxge_hw_vpath_initialize(hldev, vp_id); | ||
4519 | if (status != VXGE_HW_OK) | ||
4520 | goto exit; | ||
4521 | |||
4522 | if (vpath->ringh != NULL) | ||
4523 | __vxge_hw_vpath_prc_configure(hldev, vp_id); | ||
4524 | |||
4525 | memset(vpath->hw_stats, 0, | ||
4526 | sizeof(struct vxge_hw_vpath_stats_hw_info)); | ||
4527 | |||
4528 | memset(vpath->hw_stats_sav, 0, | ||
4529 | sizeof(struct vxge_hw_vpath_stats_hw_info)); | ||
4530 | |||
4531 | writeq(vpath->stats_block->dma_addr, | ||
4532 | &vpath->vp_reg->stats_cfg); | ||
4533 | |||
4534 | status = vxge_hw_vpath_stats_enable(vp); | ||
4535 | |||
4536 | exit: | ||
4537 | return status; | ||
4538 | } | ||
4539 | |||
4540 | /* | ||
4541 | * vxge_hw_vpath_enable - Enable vpath. | ||
4542 | * This routine clears the vpath reset thereby enabling a vpath | ||
4543 | * to start forwarding frames and generating interrupts. | ||
4544 | */ | ||
4545 | void | ||
4546 | vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp) | ||
4547 | { | ||
4548 | struct __vxge_hw_device *hldev; | ||
4549 | u64 val64; | ||
4550 | |||
4551 | hldev = vp->vpath->hldev; | ||
4552 | |||
4553 | val64 = VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET( | ||
4554 | 1 << (16 - vp->vpath->vp_id)); | ||
4555 | |||
4556 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), | ||
4557 | &hldev->common_reg->cmn_rsthdlr_cfg1); | ||
4558 | } | ||
4559 | |||
4560 | /* | ||
4561 | * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics. | ||
4562 | * Enable the DMA vpath statistics. The function is to be called to re-enable | ||
4563 | * the adapter to update stats into the host memory | ||
4564 | */ | ||
4565 | enum vxge_hw_status | ||
4566 | vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp) | ||
4567 | { | ||
4568 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4569 | struct __vxge_hw_virtualpath *vpath; | ||
4570 | |||
4571 | vpath = vp->vpath; | ||
4572 | |||
4573 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
4574 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
4575 | goto exit; | ||
4576 | } | ||
4577 | |||
4578 | memcpy(vpath->hw_stats_sav, vpath->hw_stats, | ||
4579 | sizeof(struct vxge_hw_vpath_stats_hw_info)); | ||
4580 | |||
4581 | status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats); | ||
4582 | exit: | ||
4583 | return status; | ||
4584 | } | ||
4585 | |||
4586 | /* | ||
4587 | * __vxge_hw_vpath_stats_access - Get the statistics from the given location | ||
4588 | * and offset and perform an operation | ||
4589 | */ | ||
4590 | enum vxge_hw_status | ||
4591 | __vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath, | ||
4592 | u32 operation, u32 offset, u64 *stat) | ||
4593 | { | ||
4594 | u64 val64; | ||
4595 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4596 | struct vxge_hw_vpath_reg __iomem *vp_reg; | ||
4597 | |||
4598 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
4599 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
4600 | goto vpath_stats_access_exit; | ||
4601 | } | ||
4602 | |||
4603 | vp_reg = vpath->vp_reg; | ||
4604 | |||
4605 | val64 = VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) | | ||
4606 | VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE | | ||
4607 | VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset); | ||
4608 | |||
4609 | status = __vxge_hw_pio_mem_write64(val64, | ||
4610 | &vp_reg->xmac_stats_access_cmd, | ||
4611 | VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE, | ||
4612 | vpath->hldev->config.device_poll_millis); | ||
4613 | |||
4614 | if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ)) | ||
4615 | *stat = readq(&vp_reg->xmac_stats_access_data); | ||
4616 | else | ||
4617 | *stat = 0; | ||
4618 | |||
4619 | vpath_stats_access_exit: | ||
4620 | return status; | ||
4621 | } | ||
4622 | |||
4623 | /* | ||
4624 | * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath | ||
4625 | */ | ||
4626 | enum vxge_hw_status | ||
4627 | __vxge_hw_vpath_xmac_tx_stats_get( | ||
4628 | struct __vxge_hw_virtualpath *vpath, | ||
4629 | struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats) | ||
4630 | { | ||
4631 | u64 *val64; | ||
4632 | int i; | ||
4633 | u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET; | ||
4634 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4635 | |||
4636 | val64 = (u64 *) vpath_tx_stats; | ||
4637 | |||
4638 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
4639 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
4640 | goto exit; | ||
4641 | } | ||
4642 | |||
4643 | for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) { | ||
4644 | status = __vxge_hw_vpath_stats_access(vpath, | ||
4645 | VXGE_HW_STATS_OP_READ, | ||
4646 | offset, val64); | ||
4647 | if (status != VXGE_HW_OK) | ||
4648 | goto exit; | ||
4649 | offset++; | ||
4650 | val64++; | ||
4651 | } | ||
4652 | exit: | ||
4653 | return status; | ||
4654 | } | ||
4655 | |||
4656 | /* | ||
4657 | * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath | ||
4658 | */ | ||
4659 | enum vxge_hw_status | ||
4660 | __vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath, | ||
4661 | struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats) | ||
4662 | { | ||
4663 | u64 *val64; | ||
4664 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4665 | int i; | ||
4666 | u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET; | ||
4667 | val64 = (u64 *) vpath_rx_stats; | ||
4668 | |||
4669 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
4670 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
4671 | goto exit; | ||
4672 | } | ||
4673 | for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) { | ||
4674 | status = __vxge_hw_vpath_stats_access(vpath, | ||
4675 | VXGE_HW_STATS_OP_READ, | ||
4676 | offset >> 3, val64); | ||
4677 | if (status != VXGE_HW_OK) | ||
4678 | goto exit; | ||
4679 | |||
4680 | offset += 8; | ||
4681 | val64++; | ||
4682 | } | ||
4683 | exit: | ||
4684 | return status; | ||
4685 | } | ||
4686 | |||
4687 | /* | ||
4688 | * __vxge_hw_vpath_stats_get - Get the vpath hw statistics. | ||
4689 | */ | ||
4690 | enum vxge_hw_status __vxge_hw_vpath_stats_get( | ||
4691 | struct __vxge_hw_virtualpath *vpath, | ||
4692 | struct vxge_hw_vpath_stats_hw_info *hw_stats) | ||
4693 | { | ||
4694 | u64 val64; | ||
4695 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4696 | struct vxge_hw_vpath_reg __iomem *vp_reg; | ||
4697 | |||
4698 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
4699 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
4700 | goto exit; | ||
4701 | } | ||
4702 | vp_reg = vpath->vp_reg; | ||
4703 | |||
4704 | val64 = readq(&vp_reg->vpath_debug_stats0); | ||
4705 | hw_stats->ini_num_mwr_sent = | ||
4706 | (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64); | ||
4707 | |||
4708 | val64 = readq(&vp_reg->vpath_debug_stats1); | ||
4709 | hw_stats->ini_num_mrd_sent = | ||
4710 | (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64); | ||
4711 | |||
4712 | val64 = readq(&vp_reg->vpath_debug_stats2); | ||
4713 | hw_stats->ini_num_cpl_rcvd = | ||
4714 | (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64); | ||
4715 | |||
4716 | val64 = readq(&vp_reg->vpath_debug_stats3); | ||
4717 | hw_stats->ini_num_mwr_byte_sent = | ||
4718 | VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64); | ||
4719 | |||
4720 | val64 = readq(&vp_reg->vpath_debug_stats4); | ||
4721 | hw_stats->ini_num_cpl_byte_rcvd = | ||
4722 | VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64); | ||
4723 | |||
4724 | val64 = readq(&vp_reg->vpath_debug_stats5); | ||
4725 | hw_stats->wrcrdtarb_xoff = | ||
4726 | (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64); | ||
4727 | |||
4728 | val64 = readq(&vp_reg->vpath_debug_stats6); | ||
4729 | hw_stats->rdcrdtarb_xoff = | ||
4730 | (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64); | ||
4731 | |||
4732 | val64 = readq(&vp_reg->vpath_genstats_count01); | ||
4733 | hw_stats->vpath_genstats_count0 = | ||
4734 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0( | ||
4735 | val64); | ||
4736 | |||
4737 | val64 = readq(&vp_reg->vpath_genstats_count01); | ||
4738 | hw_stats->vpath_genstats_count1 = | ||
4739 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1( | ||
4740 | val64); | ||
4741 | |||
4742 | val64 = readq(&vp_reg->vpath_genstats_count23); | ||
4743 | hw_stats->vpath_genstats_count2 = | ||
4744 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2( | ||
4745 | val64); | ||
4746 | |||
4747 | val64 = readq(&vp_reg->vpath_genstats_count01); | ||
4748 | hw_stats->vpath_genstats_count3 = | ||
4749 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3( | ||
4750 | val64); | ||
4751 | |||
4752 | val64 = readq(&vp_reg->vpath_genstats_count4); | ||
4753 | hw_stats->vpath_genstats_count4 = | ||
4754 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4( | ||
4755 | val64); | ||
4756 | |||
4757 | val64 = readq(&vp_reg->vpath_genstats_count5); | ||
4758 | hw_stats->vpath_genstats_count5 = | ||
4759 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5( | ||
4760 | val64); | ||
4761 | |||
4762 | status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats); | ||
4763 | if (status != VXGE_HW_OK) | ||
4764 | goto exit; | ||
4765 | |||
4766 | status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats); | ||
4767 | if (status != VXGE_HW_OK) | ||
4768 | goto exit; | ||
4769 | |||
4770 | VXGE_HW_VPATH_STATS_PIO_READ( | ||
4771 | VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET); | ||
4772 | |||
4773 | hw_stats->prog_event_vnum0 = | ||
4774 | (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64); | ||
4775 | |||
4776 | hw_stats->prog_event_vnum1 = | ||
4777 | (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64); | ||
4778 | |||
4779 | VXGE_HW_VPATH_STATS_PIO_READ( | ||
4780 | VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET); | ||
4781 | |||
4782 | hw_stats->prog_event_vnum2 = | ||
4783 | (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64); | ||
4784 | |||
4785 | hw_stats->prog_event_vnum3 = | ||
4786 | (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64); | ||
4787 | |||
4788 | val64 = readq(&vp_reg->rx_multi_cast_stats); | ||
4789 | hw_stats->rx_multi_cast_frame_discard = | ||
4790 | (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64); | ||
4791 | |||
4792 | val64 = readq(&vp_reg->rx_frm_transferred); | ||
4793 | hw_stats->rx_frm_transferred = | ||
4794 | (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64); | ||
4795 | |||
4796 | val64 = readq(&vp_reg->rxd_returned); | ||
4797 | hw_stats->rxd_returned = | ||
4798 | (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64); | ||
4799 | |||
4800 | val64 = readq(&vp_reg->dbg_stats_rx_mpa); | ||
4801 | hw_stats->rx_mpa_len_fail_frms = | ||
4802 | (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64); | ||
4803 | hw_stats->rx_mpa_mrk_fail_frms = | ||
4804 | (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64); | ||
4805 | hw_stats->rx_mpa_crc_fail_frms = | ||
4806 | (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64); | ||
4807 | |||
4808 | val64 = readq(&vp_reg->dbg_stats_rx_fau); | ||
4809 | hw_stats->rx_permitted_frms = | ||
4810 | (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64); | ||
4811 | hw_stats->rx_vp_reset_discarded_frms = | ||
4812 | (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64); | ||
4813 | hw_stats->rx_wol_frms = | ||
4814 | (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64); | ||
4815 | |||
4816 | val64 = readq(&vp_reg->tx_vp_reset_discarded_frms); | ||
4817 | hw_stats->tx_vp_reset_discarded_frms = | ||
4818 | (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS( | ||
4819 | val64); | ||
4820 | exit: | ||
4821 | return status; | ||
4822 | } | ||
4823 | |||
4824 | /* | ||
4825 | * __vxge_hw_blockpool_create - Create block pool | ||
4826 | */ | ||
4827 | |||
4828 | enum vxge_hw_status | ||
4829 | __vxge_hw_blockpool_create(struct __vxge_hw_device *hldev, | ||
4830 | struct __vxge_hw_blockpool *blockpool, | ||
4831 | u32 pool_size, | ||
4832 | u32 pool_max) | ||
4833 | { | ||
4834 | u32 i; | ||
4835 | struct __vxge_hw_blockpool_entry *entry = NULL; | ||
4836 | void *memblock; | ||
4837 | dma_addr_t dma_addr; | ||
4838 | struct pci_dev *dma_handle; | ||
4839 | struct pci_dev *acc_handle; | ||
4840 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4841 | |||
4842 | if (blockpool == NULL) { | ||
4843 | status = VXGE_HW_FAIL; | ||
4844 | goto blockpool_create_exit; | ||
4845 | } | ||
4846 | |||
4847 | blockpool->hldev = hldev; | ||
4848 | blockpool->block_size = VXGE_HW_BLOCK_SIZE; | ||
4849 | blockpool->pool_size = 0; | ||
4850 | blockpool->pool_max = pool_max; | ||
4851 | blockpool->req_out = 0; | ||
4852 | |||
4853 | INIT_LIST_HEAD(&blockpool->free_block_list); | ||
4854 | INIT_LIST_HEAD(&blockpool->free_entry_list); | ||
4855 | |||
4856 | for (i = 0; i < pool_size + pool_max; i++) { | ||
4857 | entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry), | ||
4858 | GFP_KERNEL); | ||
4859 | if (entry == NULL) { | ||
4860 | __vxge_hw_blockpool_destroy(blockpool); | ||
4861 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
4862 | goto blockpool_create_exit; | ||
4863 | } | ||
4864 | list_add(&entry->item, &blockpool->free_entry_list); | ||
4865 | } | ||
4866 | |||
4867 | for (i = 0; i < pool_size; i++) { | ||
4868 | |||
4869 | memblock = vxge_os_dma_malloc( | ||
4870 | hldev->pdev, | ||
4871 | VXGE_HW_BLOCK_SIZE, | ||
4872 | &dma_handle, | ||
4873 | &acc_handle); | ||
4874 | |||
4875 | if (memblock == NULL) { | ||
4876 | __vxge_hw_blockpool_destroy(blockpool); | ||
4877 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
4878 | goto blockpool_create_exit; | ||
4879 | } | ||
4880 | |||
4881 | dma_addr = pci_map_single(hldev->pdev, memblock, | ||
4882 | VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
4883 | |||
4884 | if (unlikely(pci_dma_mapping_error(hldev->pdev, | ||
4885 | dma_addr))) { | ||
4886 | |||
4887 | vxge_os_dma_free(hldev->pdev, memblock, &acc_handle); | ||
4888 | __vxge_hw_blockpool_destroy(blockpool); | ||
4889 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
4890 | goto blockpool_create_exit; | ||
4891 | } | ||
4892 | |||
4893 | if (!list_empty(&blockpool->free_entry_list)) | ||
4894 | entry = (struct __vxge_hw_blockpool_entry *) | ||
4895 | list_first_entry(&blockpool->free_entry_list, | ||
4896 | struct __vxge_hw_blockpool_entry, | ||
4897 | item); | ||
4898 | |||
4899 | if (entry == NULL) | ||
4900 | entry = | ||
4901 | kzalloc(sizeof(struct __vxge_hw_blockpool_entry), | ||
4902 | GFP_KERNEL); | ||
4903 | if (entry != NULL) { | ||
4904 | list_del(&entry->item); | ||
4905 | entry->length = VXGE_HW_BLOCK_SIZE; | ||
4906 | entry->memblock = memblock; | ||
4907 | entry->dma_addr = dma_addr; | ||
4908 | entry->acc_handle = acc_handle; | ||
4909 | entry->dma_handle = dma_handle; | ||
4910 | list_add(&entry->item, | ||
4911 | &blockpool->free_block_list); | ||
4912 | blockpool->pool_size++; | ||
4913 | } else { | ||
4914 | __vxge_hw_blockpool_destroy(blockpool); | ||
4915 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
4916 | goto blockpool_create_exit; | ||
4917 | } | ||
4918 | } | ||
4919 | |||
4920 | blockpool_create_exit: | ||
4921 | return status; | ||
4922 | } | ||
4923 | |||
4924 | /* | ||
4925 | * __vxge_hw_blockpool_destroy - Deallocates the block pool | ||
4926 | */ | ||
4927 | |||
4928 | void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool) | ||
4929 | { | ||
4930 | |||
4931 | struct __vxge_hw_device *hldev; | ||
4932 | struct list_head *p, *n; | ||
4933 | u16 ret; | ||
4934 | |||
4935 | if (blockpool == NULL) { | ||
4936 | ret = 1; | ||
4937 | goto exit; | ||
4938 | } | ||
4939 | |||
4940 | hldev = blockpool->hldev; | ||
4941 | |||
4942 | list_for_each_safe(p, n, &blockpool->free_block_list) { | ||
4943 | |||
4944 | pci_unmap_single(hldev->pdev, | ||
4945 | ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, | ||
4946 | ((struct __vxge_hw_blockpool_entry *)p)->length, | ||
4947 | PCI_DMA_BIDIRECTIONAL); | ||
4948 | |||
4949 | vxge_os_dma_free(hldev->pdev, | ||
4950 | ((struct __vxge_hw_blockpool_entry *)p)->memblock, | ||
4951 | &((struct __vxge_hw_blockpool_entry *) p)->acc_handle); | ||
4952 | |||
4953 | list_del( | ||
4954 | &((struct __vxge_hw_blockpool_entry *)p)->item); | ||
4955 | kfree(p); | ||
4956 | blockpool->pool_size--; | ||
4957 | } | ||
4958 | |||
4959 | list_for_each_safe(p, n, &blockpool->free_entry_list) { | ||
4960 | list_del( | ||
4961 | &((struct __vxge_hw_blockpool_entry *)p)->item); | ||
4962 | kfree((void *)p); | ||
4963 | } | ||
4964 | ret = 0; | ||
4965 | exit: | ||
4966 | return; | ||
4967 | } | ||
4968 | |||
4969 | /* | ||
4970 | * __vxge_hw_blockpool_blocks_add - Request additional blocks | ||
4971 | */ | ||
4972 | static | ||
4973 | void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool) | ||
4974 | { | ||
4975 | u32 nreq = 0, i; | ||
4976 | |||
4977 | if ((blockpool->pool_size + blockpool->req_out) < | ||
4978 | VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) { | ||
4979 | nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE; | ||
4980 | blockpool->req_out += nreq; | ||
4981 | } | ||
4982 | |||
4983 | for (i = 0; i < nreq; i++) | ||
4984 | vxge_os_dma_malloc_async( | ||
4985 | ((struct __vxge_hw_device *)blockpool->hldev)->pdev, | ||
4986 | blockpool->hldev, VXGE_HW_BLOCK_SIZE); | ||
4987 | } | ||
4988 | |||
4989 | /* | ||
4990 | * __vxge_hw_blockpool_blocks_remove - Free additional blocks | ||
4991 | */ | ||
4992 | static | ||
4993 | void __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool) | ||
4994 | { | ||
4995 | struct list_head *p, *n; | ||
4996 | |||
4997 | list_for_each_safe(p, n, &blockpool->free_block_list) { | ||
4998 | |||
4999 | if (blockpool->pool_size < blockpool->pool_max) | ||
5000 | break; | ||
5001 | |||
5002 | pci_unmap_single( | ||
5003 | ((struct __vxge_hw_device *)blockpool->hldev)->pdev, | ||
5004 | ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, | ||
5005 | ((struct __vxge_hw_blockpool_entry *)p)->length, | ||
5006 | PCI_DMA_BIDIRECTIONAL); | ||
5007 | |||
5008 | vxge_os_dma_free( | ||
5009 | ((struct __vxge_hw_device *)blockpool->hldev)->pdev, | ||
5010 | ((struct __vxge_hw_blockpool_entry *)p)->memblock, | ||
5011 | &((struct __vxge_hw_blockpool_entry *)p)->acc_handle); | ||
5012 | |||
5013 | list_del(&((struct __vxge_hw_blockpool_entry *)p)->item); | ||
5014 | |||
5015 | list_add(p, &blockpool->free_entry_list); | ||
5016 | |||
5017 | blockpool->pool_size--; | ||
5018 | |||
5019 | } | ||
5020 | } | ||
5021 | |||
5022 | /* | ||
5023 | * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async | ||
5024 | * Adds a block to block pool | ||
5025 | */ | ||
5026 | void vxge_hw_blockpool_block_add( | ||
5027 | struct __vxge_hw_device *devh, | ||
5028 | void *block_addr, | ||
5029 | u32 length, | ||
5030 | struct pci_dev *dma_h, | ||
5031 | struct pci_dev *acc_handle) | ||
5032 | { | ||
5033 | struct __vxge_hw_blockpool *blockpool; | ||
5034 | struct __vxge_hw_blockpool_entry *entry = NULL; | ||
5035 | dma_addr_t dma_addr; | ||
5036 | enum vxge_hw_status status = VXGE_HW_OK; | ||
5037 | u32 req_out; | ||
5038 | |||
5039 | blockpool = &devh->block_pool; | ||
5040 | |||
5041 | if (block_addr == NULL) { | ||
5042 | blockpool->req_out--; | ||
5043 | status = VXGE_HW_FAIL; | ||
5044 | goto exit; | ||
5045 | } | ||
5046 | |||
5047 | dma_addr = pci_map_single(devh->pdev, block_addr, length, | ||
5048 | PCI_DMA_BIDIRECTIONAL); | ||
5049 | |||
5050 | if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) { | ||
5051 | |||
5052 | vxge_os_dma_free(devh->pdev, block_addr, &acc_handle); | ||
5053 | blockpool->req_out--; | ||
5054 | status = VXGE_HW_FAIL; | ||
5055 | goto exit; | ||
5056 | } | ||
5057 | |||
5058 | |||
5059 | if (!list_empty(&blockpool->free_entry_list)) | ||
5060 | entry = (struct __vxge_hw_blockpool_entry *) | ||
5061 | list_first_entry(&blockpool->free_entry_list, | ||
5062 | struct __vxge_hw_blockpool_entry, | ||
5063 | item); | ||
5064 | |||
5065 | if (entry == NULL) | ||
5066 | entry = (struct __vxge_hw_blockpool_entry *) | ||
5067 | vmalloc(sizeof(struct __vxge_hw_blockpool_entry)); | ||
5068 | else | ||
5069 | list_del(&entry->item); | ||
5070 | |||
5071 | if (entry != NULL) { | ||
5072 | entry->length = length; | ||
5073 | entry->memblock = block_addr; | ||
5074 | entry->dma_addr = dma_addr; | ||
5075 | entry->acc_handle = acc_handle; | ||
5076 | entry->dma_handle = dma_h; | ||
5077 | list_add(&entry->item, &blockpool->free_block_list); | ||
5078 | blockpool->pool_size++; | ||
5079 | status = VXGE_HW_OK; | ||
5080 | } else | ||
5081 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
5082 | |||
5083 | blockpool->req_out--; | ||
5084 | |||
5085 | req_out = blockpool->req_out; | ||
5086 | exit: | ||
5087 | return; | ||
5088 | } | ||
5089 | |||
5090 | /* | ||
5091 | * __vxge_hw_blockpool_malloc - Allocate a memory block from pool | ||
5092 | * Allocates a block of memory of given size, either from block pool | ||
5093 | * or by calling vxge_os_dma_malloc() | ||
5094 | */ | ||
5095 | void * | ||
5096 | __vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size, | ||
5097 | struct vxge_hw_mempool_dma *dma_object) | ||
5098 | { | ||
5099 | struct __vxge_hw_blockpool_entry *entry = NULL; | ||
5100 | struct __vxge_hw_blockpool *blockpool; | ||
5101 | void *memblock = NULL; | ||
5102 | enum vxge_hw_status status = VXGE_HW_OK; | ||
5103 | |||
5104 | blockpool = &devh->block_pool; | ||
5105 | |||
5106 | if (size != blockpool->block_size) { | ||
5107 | |||
5108 | memblock = vxge_os_dma_malloc(devh->pdev, size, | ||
5109 | &dma_object->handle, | ||
5110 | &dma_object->acc_handle); | ||
5111 | |||
5112 | if (memblock == NULL) { | ||
5113 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
5114 | goto exit; | ||
5115 | } | ||
5116 | |||
5117 | dma_object->addr = pci_map_single(devh->pdev, memblock, size, | ||
5118 | PCI_DMA_BIDIRECTIONAL); | ||
5119 | |||
5120 | if (unlikely(pci_dma_mapping_error(devh->pdev, | ||
5121 | dma_object->addr))) { | ||
5122 | vxge_os_dma_free(devh->pdev, memblock, | ||
5123 | &dma_object->acc_handle); | ||
5124 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
5125 | goto exit; | ||
5126 | } | ||
5127 | |||
5128 | } else { | ||
5129 | |||
5130 | if (!list_empty(&blockpool->free_block_list)) | ||
5131 | entry = (struct __vxge_hw_blockpool_entry *) | ||
5132 | list_first_entry(&blockpool->free_block_list, | ||
5133 | struct __vxge_hw_blockpool_entry, | ||
5134 | item); | ||
5135 | |||
5136 | if (entry != NULL) { | ||
5137 | list_del(&entry->item); | ||
5138 | dma_object->addr = entry->dma_addr; | ||
5139 | dma_object->handle = entry->dma_handle; | ||
5140 | dma_object->acc_handle = entry->acc_handle; | ||
5141 | memblock = entry->memblock; | ||
5142 | |||
5143 | list_add(&entry->item, | ||
5144 | &blockpool->free_entry_list); | ||
5145 | blockpool->pool_size--; | ||
5146 | } | ||
5147 | |||
5148 | if (memblock != NULL) | ||
5149 | __vxge_hw_blockpool_blocks_add(blockpool); | ||
5150 | } | ||
5151 | exit: | ||
5152 | return memblock; | ||
5153 | } | ||
5154 | |||
5155 | /* | ||
5156 | * __vxge_hw_blockpool_free - Frees the memory allcoated with | ||
5157 | __vxge_hw_blockpool_malloc | ||
5158 | */ | ||
5159 | void | ||
5160 | __vxge_hw_blockpool_free(struct __vxge_hw_device *devh, | ||
5161 | void *memblock, u32 size, | ||
5162 | struct vxge_hw_mempool_dma *dma_object) | ||
5163 | { | ||
5164 | struct __vxge_hw_blockpool_entry *entry = NULL; | ||
5165 | struct __vxge_hw_blockpool *blockpool; | ||
5166 | enum vxge_hw_status status = VXGE_HW_OK; | ||
5167 | |||
5168 | blockpool = &devh->block_pool; | ||
5169 | |||
5170 | if (size != blockpool->block_size) { | ||
5171 | pci_unmap_single(devh->pdev, dma_object->addr, size, | ||
5172 | PCI_DMA_BIDIRECTIONAL); | ||
5173 | vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle); | ||
5174 | } else { | ||
5175 | |||
5176 | if (!list_empty(&blockpool->free_entry_list)) | ||
5177 | entry = (struct __vxge_hw_blockpool_entry *) | ||
5178 | list_first_entry(&blockpool->free_entry_list, | ||
5179 | struct __vxge_hw_blockpool_entry, | ||
5180 | item); | ||
5181 | |||
5182 | if (entry == NULL) | ||
5183 | entry = (struct __vxge_hw_blockpool_entry *) | ||
5184 | vmalloc(sizeof( | ||
5185 | struct __vxge_hw_blockpool_entry)); | ||
5186 | else | ||
5187 | list_del(&entry->item); | ||
5188 | |||
5189 | if (entry != NULL) { | ||
5190 | entry->length = size; | ||
5191 | entry->memblock = memblock; | ||
5192 | entry->dma_addr = dma_object->addr; | ||
5193 | entry->acc_handle = dma_object->acc_handle; | ||
5194 | entry->dma_handle = dma_object->handle; | ||
5195 | list_add(&entry->item, | ||
5196 | &blockpool->free_block_list); | ||
5197 | blockpool->pool_size++; | ||
5198 | status = VXGE_HW_OK; | ||
5199 | } else | ||
5200 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
5201 | |||
5202 | if (status == VXGE_HW_OK) | ||
5203 | __vxge_hw_blockpool_blocks_remove(blockpool); | ||
5204 | } | ||
5205 | |||
5206 | return; | ||
5207 | } | ||
5208 | |||
5209 | /* | ||
5210 | * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool | ||
5211 | * This function allocates a block from block pool or from the system | ||
5212 | */ | ||
5213 | struct __vxge_hw_blockpool_entry * | ||
5214 | __vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size) | ||
5215 | { | ||
5216 | struct __vxge_hw_blockpool_entry *entry = NULL; | ||
5217 | struct __vxge_hw_blockpool *blockpool; | ||
5218 | |||
5219 | blockpool = &devh->block_pool; | ||
5220 | |||
5221 | if (size == blockpool->block_size) { | ||
5222 | |||
5223 | if (!list_empty(&blockpool->free_block_list)) | ||
5224 | entry = (struct __vxge_hw_blockpool_entry *) | ||
5225 | list_first_entry(&blockpool->free_block_list, | ||
5226 | struct __vxge_hw_blockpool_entry, | ||
5227 | item); | ||
5228 | |||
5229 | if (entry != NULL) { | ||
5230 | list_del(&entry->item); | ||
5231 | blockpool->pool_size--; | ||
5232 | } | ||
5233 | } | ||
5234 | |||
5235 | if (entry != NULL) | ||
5236 | __vxge_hw_blockpool_blocks_add(blockpool); | ||
5237 | |||
5238 | return entry; | ||
5239 | } | ||
5240 | |||
5241 | /* | ||
5242 | * __vxge_hw_blockpool_block_free - Frees a block from block pool | ||
5243 | * @devh: Hal device | ||
5244 | * @entry: Entry of block to be freed | ||
5245 | * | ||
5246 | * This function frees a block from block pool | ||
5247 | */ | ||
5248 | void | ||
5249 | __vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh, | ||
5250 | struct __vxge_hw_blockpool_entry *entry) | ||
5251 | { | ||
5252 | struct __vxge_hw_blockpool *blockpool; | ||
5253 | |||
5254 | blockpool = &devh->block_pool; | ||
5255 | |||
5256 | if (entry->length == blockpool->block_size) { | ||
5257 | list_add(&entry->item, &blockpool->free_block_list); | ||
5258 | blockpool->pool_size++; | ||
5259 | } | ||
5260 | |||
5261 | __vxge_hw_blockpool_blocks_remove(blockpool); | ||
5262 | |||
5263 | return; | ||
5264 | } | ||
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h new file mode 100644 index 00000000000..afbdf6f4d22 --- /dev/null +++ b/drivers/net/vxge/vxge-config.h | |||
@@ -0,0 +1,2259 @@ | |||
1 | /****************************************************************************** | ||
2 | * This software may be used and distributed according to the terms of | ||
3 | * the GNU General Public License (GPL), incorporated herein by reference. | ||
4 | * Drivers based on or derived from this code fall under the GPL and must | ||
5 | * retain the authorship, copyright and license notice. This file is not | ||
6 | * a complete program and may only be used when the entire operating | ||
7 | * system is licensed under the GPL. | ||
8 | * See the file COPYING in this distribution for more information. | ||
9 | * | ||
10 | * vxge-config.h: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O | ||
11 | * Virtualized Server Adapter. | ||
12 | * Copyright(c) 2002-2009 Neterion Inc. | ||
13 | ******************************************************************************/ | ||
14 | #ifndef VXGE_CONFIG_H | ||
15 | #define VXGE_CONFIG_H | ||
16 | #include <linux/list.h> | ||
17 | |||
18 | #ifndef VXGE_CACHE_LINE_SIZE | ||
19 | #define VXGE_CACHE_LINE_SIZE 128 | ||
20 | #endif | ||
21 | |||
22 | #define vxge_os_vaprintf(level, mask, fmt, ...) { \ | ||
23 | char buff[255]; \ | ||
24 | snprintf(buff, 255, fmt, __VA_ARGS__); \ | ||
25 | printk(buff); \ | ||
26 | printk("\n"); \ | ||
27 | } | ||
28 | |||
29 | #ifndef VXGE_ALIGN | ||
30 | #define VXGE_ALIGN(adrs, size) \ | ||
31 | (((size) - (((u64)adrs) & ((size)-1))) & ((size)-1)) | ||
32 | #endif | ||
33 | |||
34 | #define VXGE_HW_MIN_MTU 68 | ||
35 | #define VXGE_HW_MAX_MTU 9600 | ||
36 | #define VXGE_HW_DEFAULT_MTU 1500 | ||
37 | |||
38 | #ifdef VXGE_DEBUG_ASSERT | ||
39 | |||
40 | /** | ||
41 | * vxge_assert | ||
42 | * @test: C-condition to check | ||
43 | * @fmt: printf like format string | ||
44 | * | ||
45 | * This function implements traditional assert. By default assertions | ||
46 | * are enabled. It can be disabled by undefining VXGE_DEBUG_ASSERT macro in | ||
47 | * compilation | ||
48 | * time. | ||
49 | */ | ||
50 | #define vxge_assert(test) { \ | ||
51 | if (!(test)) \ | ||
52 | vxge_os_bug("bad cond: "#test" at %s:%d\n", \ | ||
53 | __FILE__, __LINE__); } | ||
54 | #else | ||
55 | #define vxge_assert(test) | ||
56 | #endif /* end of VXGE_DEBUG_ASSERT */ | ||
57 | |||
58 | /** | ||
59 | * enum enum vxge_debug_level | ||
60 | * @VXGE_NONE: debug disabled | ||
61 | * @VXGE_ERR: all errors going to be logged out | ||
62 | * @VXGE_TRACE: all errors plus all kind of verbose tracing print outs | ||
63 | * going to be logged out. Very noisy. | ||
64 | * | ||
65 | * This enumeration going to be used to switch between different | ||
66 | * debug levels during runtime if DEBUG macro defined during | ||
67 | * compilation. If DEBUG macro not defined than code will be | ||
68 | * compiled out. | ||
69 | */ | ||
70 | enum vxge_debug_level { | ||
71 | VXGE_NONE = 0, | ||
72 | VXGE_TRACE = 1, | ||
73 | VXGE_ERR = 2 | ||
74 | }; | ||
75 | |||
76 | #define NULL_VPID 0xFFFFFFFF | ||
77 | #ifdef CONFIG_VXGE_DEBUG_TRACE_ALL | ||
78 | #define VXGE_DEBUG_MODULE_MASK 0xffffffff | ||
79 | #define VXGE_DEBUG_TRACE_MASK 0xffffffff | ||
80 | #define VXGE_DEBUG_ERR_MASK 0xffffffff | ||
81 | #define VXGE_DEBUG_MASK 0x000001ff | ||
82 | #else | ||
83 | #define VXGE_DEBUG_MODULE_MASK 0x20000000 | ||
84 | #define VXGE_DEBUG_TRACE_MASK 0x20000000 | ||
85 | #define VXGE_DEBUG_ERR_MASK 0x20000000 | ||
86 | #define VXGE_DEBUG_MASK 0x00000001 | ||
87 | #endif | ||
88 | |||
89 | /* | ||
90 | * @VXGE_COMPONENT_LL: do debug for vxge link layer module | ||
91 | * @VXGE_COMPONENT_ALL: activate debug for all modules with no exceptions | ||
92 | * | ||
93 | * This enumeration going to be used to distinguish modules | ||
94 | * or libraries during compilation and runtime. Makefile must declare | ||
95 | * VXGE_DEBUG_MODULE_MASK macro and set it to proper value. | ||
96 | */ | ||
97 | #define VXGE_COMPONENT_LL 0x20000000 | ||
98 | #define VXGE_COMPONENT_ALL 0xffffffff | ||
99 | |||
100 | #define VXGE_HW_BASE_INF 100 | ||
101 | #define VXGE_HW_BASE_ERR 200 | ||
102 | #define VXGE_HW_BASE_BADCFG 300 | ||
103 | |||
104 | enum vxge_hw_status { | ||
105 | VXGE_HW_OK = 0, | ||
106 | VXGE_HW_FAIL = 1, | ||
107 | VXGE_HW_PENDING = 2, | ||
108 | VXGE_HW_COMPLETIONS_REMAIN = 3, | ||
109 | |||
110 | VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS = VXGE_HW_BASE_INF + 1, | ||
111 | VXGE_HW_INF_OUT_OF_DESCRIPTORS = VXGE_HW_BASE_INF + 2, | ||
112 | |||
113 | VXGE_HW_ERR_INVALID_HANDLE = VXGE_HW_BASE_ERR + 1, | ||
114 | VXGE_HW_ERR_OUT_OF_MEMORY = VXGE_HW_BASE_ERR + 2, | ||
115 | VXGE_HW_ERR_VPATH_NOT_AVAILABLE = VXGE_HW_BASE_ERR + 3, | ||
116 | VXGE_HW_ERR_VPATH_NOT_OPEN = VXGE_HW_BASE_ERR + 4, | ||
117 | VXGE_HW_ERR_WRONG_IRQ = VXGE_HW_BASE_ERR + 5, | ||
118 | VXGE_HW_ERR_SWAPPER_CTRL = VXGE_HW_BASE_ERR + 6, | ||
119 | VXGE_HW_ERR_INVALID_MTU_SIZE = VXGE_HW_BASE_ERR + 7, | ||
120 | VXGE_HW_ERR_INVALID_INDEX = VXGE_HW_BASE_ERR + 8, | ||
121 | VXGE_HW_ERR_INVALID_TYPE = VXGE_HW_BASE_ERR + 9, | ||
122 | VXGE_HW_ERR_INVALID_OFFSET = VXGE_HW_BASE_ERR + 10, | ||
123 | VXGE_HW_ERR_INVALID_DEVICE = VXGE_HW_BASE_ERR + 11, | ||
124 | VXGE_HW_ERR_VERSION_CONFLICT = VXGE_HW_BASE_ERR + 12, | ||
125 | VXGE_HW_ERR_INVALID_PCI_INFO = VXGE_HW_BASE_ERR + 13, | ||
126 | VXGE_HW_ERR_INVALID_TCODE = VXGE_HW_BASE_ERR + 14, | ||
127 | VXGE_HW_ERR_INVALID_BLOCK_SIZE = VXGE_HW_BASE_ERR + 15, | ||
128 | VXGE_HW_ERR_INVALID_STATE = VXGE_HW_BASE_ERR + 16, | ||
129 | VXGE_HW_ERR_PRIVILAGED_OPEARATION = VXGE_HW_BASE_ERR + 17, | ||
130 | VXGE_HW_ERR_INVALID_PORT = VXGE_HW_BASE_ERR + 18, | ||
131 | VXGE_HW_ERR_FIFO = VXGE_HW_BASE_ERR + 19, | ||
132 | VXGE_HW_ERR_VPATH = VXGE_HW_BASE_ERR + 20, | ||
133 | VXGE_HW_ERR_CRITICAL = VXGE_HW_BASE_ERR + 21, | ||
134 | VXGE_HW_ERR_SLOT_FREEZE = VXGE_HW_BASE_ERR + 22, | ||
135 | |||
136 | VXGE_HW_BADCFG_RING_INDICATE_MAX_PKTS = VXGE_HW_BASE_BADCFG + 1, | ||
137 | VXGE_HW_BADCFG_FIFO_BLOCKS = VXGE_HW_BASE_BADCFG + 2, | ||
138 | VXGE_HW_BADCFG_VPATH_MTU = VXGE_HW_BASE_BADCFG + 3, | ||
139 | VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG = VXGE_HW_BASE_BADCFG + 4, | ||
140 | VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH = VXGE_HW_BASE_BADCFG + 5, | ||
141 | VXGE_HW_BADCFG_INTR_MODE = VXGE_HW_BASE_BADCFG + 6, | ||
142 | VXGE_HW_BADCFG_RTS_MAC_EN = VXGE_HW_BASE_BADCFG + 7, | ||
143 | |||
144 | VXGE_HW_EOF_TRACE_BUF = -1 | ||
145 | }; | ||
146 | |||
147 | /** | ||
148 | * enum enum vxge_hw_device_link_state - Link state enumeration. | ||
149 | * @VXGE_HW_LINK_NONE: Invalid link state. | ||
150 | * @VXGE_HW_LINK_DOWN: Link is down. | ||
151 | * @VXGE_HW_LINK_UP: Link is up. | ||
152 | * | ||
153 | */ | ||
154 | enum vxge_hw_device_link_state { | ||
155 | VXGE_HW_LINK_NONE, | ||
156 | VXGE_HW_LINK_DOWN, | ||
157 | VXGE_HW_LINK_UP | ||
158 | }; | ||
159 | |||
160 | /** | ||
161 | * struct vxge_hw_device_date - Date Format | ||
162 | * @day: Day | ||
163 | * @month: Month | ||
164 | * @year: Year | ||
165 | * @date: Date in string format | ||
166 | * | ||
167 | * Structure for returning date | ||
168 | */ | ||
169 | |||
170 | #define VXGE_HW_FW_STRLEN 32 | ||
171 | struct vxge_hw_device_date { | ||
172 | u32 day; | ||
173 | u32 month; | ||
174 | u32 year; | ||
175 | char date[VXGE_HW_FW_STRLEN]; | ||
176 | }; | ||
177 | |||
178 | struct vxge_hw_device_version { | ||
179 | u32 major; | ||
180 | u32 minor; | ||
181 | u32 build; | ||
182 | char version[VXGE_HW_FW_STRLEN]; | ||
183 | }; | ||
184 | |||
185 | u64 | ||
186 | __vxge_hw_vpath_pci_func_mode_get( | ||
187 | u32 vp_id, | ||
188 | struct vxge_hw_vpath_reg __iomem *vpath_reg); | ||
189 | |||
190 | /** | ||
191 | * struct vxge_hw_fifo_config - Configuration of fifo. | ||
192 | * @enable: Is this fifo to be commissioned | ||
193 | * @fifo_blocks: Numbers of TxDL (that is, lists of Tx descriptors) | ||
194 | * blocks per queue. | ||
195 | * @max_frags: Max number of Tx buffers per TxDL (that is, per single | ||
196 | * transmit operation). | ||
197 | * No more than 256 transmit buffers can be specified. | ||
198 | * @memblock_size: Fifo descriptors are allocated in blocks of @mem_block_size | ||
199 | * bytes. Setting @memblock_size to page size ensures | ||
200 | * by-page allocation of descriptors. 128K bytes is the | ||
201 | * maximum supported block size. | ||
202 | * @alignment_size: per Tx fragment DMA-able memory used to align transmit data | ||
203 | * (e.g., to align on a cache line). | ||
204 | * @intr: Boolean. Use 1 to generate interrupt for each completed TxDL. | ||
205 | * Use 0 otherwise. | ||
206 | * @no_snoop_bits: If non-zero, specifies no-snoop PCI operation, | ||
207 | * which generally improves latency of the host bridge operation | ||
208 | * (see PCI specification). For valid values please refer | ||
209 | * to struct vxge_hw_fifo_config{} in the driver sources. | ||
210 | * Configuration of all Titan fifos. | ||
211 | * Note: Valid (min, max) range for each attribute is specified in the body of | ||
212 | * the struct vxge_hw_fifo_config{} structure. | ||
213 | */ | ||
214 | struct vxge_hw_fifo_config { | ||
215 | u32 enable; | ||
216 | #define VXGE_HW_FIFO_ENABLE 1 | ||
217 | #define VXGE_HW_FIFO_DISABLE 0 | ||
218 | |||
219 | u32 fifo_blocks; | ||
220 | #define VXGE_HW_MIN_FIFO_BLOCKS 2 | ||
221 | #define VXGE_HW_MAX_FIFO_BLOCKS 128 | ||
222 | |||
223 | u32 max_frags; | ||
224 | #define VXGE_HW_MIN_FIFO_FRAGS 1 | ||
225 | #define VXGE_HW_MAX_FIFO_FRAGS 256 | ||
226 | |||
227 | u32 memblock_size; | ||
228 | #define VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE VXGE_HW_BLOCK_SIZE | ||
229 | #define VXGE_HW_MAX_FIFO_MEMBLOCK_SIZE 131072 | ||
230 | #define VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE 8096 | ||
231 | |||
232 | u32 alignment_size; | ||
233 | #define VXGE_HW_MIN_FIFO_ALIGNMENT_SIZE 0 | ||
234 | #define VXGE_HW_MAX_FIFO_ALIGNMENT_SIZE 65536 | ||
235 | #define VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE VXGE_CACHE_LINE_SIZE | ||
236 | |||
237 | u32 intr; | ||
238 | #define VXGE_HW_FIFO_QUEUE_INTR_ENABLE 1 | ||
239 | #define VXGE_HW_FIFO_QUEUE_INTR_DISABLE 0 | ||
240 | #define VXGE_HW_FIFO_QUEUE_INTR_DEFAULT 0 | ||
241 | |||
242 | u32 no_snoop_bits; | ||
243 | #define VXGE_HW_FIFO_NO_SNOOP_DISABLED 0 | ||
244 | #define VXGE_HW_FIFO_NO_SNOOP_TXD 1 | ||
245 | #define VXGE_HW_FIFO_NO_SNOOP_FRM 2 | ||
246 | #define VXGE_HW_FIFO_NO_SNOOP_ALL 3 | ||
247 | #define VXGE_HW_FIFO_NO_SNOOP_DEFAULT 0 | ||
248 | |||
249 | }; | ||
250 | /** | ||
251 | * struct vxge_hw_ring_config - Ring configurations. | ||
252 | * @enable: Is this ring to be commissioned | ||
253 | * @ring_blocks: Numbers of RxD blocks in the ring | ||
254 | * @buffer_mode: Receive buffer mode (1, 2, 3, or 5); for details please refer | ||
255 | * to Titan User Guide. | ||
256 | * @scatter_mode: Titan supports two receive scatter modes: A and B. | ||
257 | * For details please refer to Titan User Guide. | ||
258 | * @rx_timer_val: The number of 32ns periods that would be counted between two | ||
259 | * timer interrupts. | ||
260 | * @greedy_return: If Set it forces the device to return absolutely all RxD | ||
261 | * that are consumed and still on board when a timer interrupt | ||
262 | * triggers. If Clear, then if the device has already returned | ||
263 | * RxD before current timer interrupt trigerred and after the | ||
264 | * previous timer interrupt triggered, then the device is not | ||
265 | * forced to returned the rest of the consumed RxD that it has | ||
266 | * on board which account for a byte count less than the one | ||
267 | * programmed into PRC_CFG6.RXD_CRXDT field | ||
268 | * @rx_timer_ci: TBD | ||
269 | * @backoff_interval_us: Time (in microseconds), after which Titan | ||
270 | * tries to download RxDs posted by the host. | ||
271 | * Note that the "backoff" does not happen if host posts receive | ||
272 | * descriptors in the timely fashion. | ||
273 | * Ring configuration. | ||
274 | */ | ||
275 | struct vxge_hw_ring_config { | ||
276 | u32 enable; | ||
277 | #define VXGE_HW_RING_ENABLE 1 | ||
278 | #define VXGE_HW_RING_DISABLE 0 | ||
279 | #define VXGE_HW_RING_DEFAULT 1 | ||
280 | |||
281 | u32 ring_blocks; | ||
282 | #define VXGE_HW_MIN_RING_BLOCKS 1 | ||
283 | #define VXGE_HW_MAX_RING_BLOCKS 128 | ||
284 | #define VXGE_HW_DEF_RING_BLOCKS 2 | ||
285 | |||
286 | u32 buffer_mode; | ||
287 | #define VXGE_HW_RING_RXD_BUFFER_MODE_1 1 | ||
288 | #define VXGE_HW_RING_RXD_BUFFER_MODE_3 3 | ||
289 | #define VXGE_HW_RING_RXD_BUFFER_MODE_5 5 | ||
290 | #define VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT 1 | ||
291 | |||
292 | u32 scatter_mode; | ||
293 | #define VXGE_HW_RING_SCATTER_MODE_A 0 | ||
294 | #define VXGE_HW_RING_SCATTER_MODE_B 1 | ||
295 | #define VXGE_HW_RING_SCATTER_MODE_C 2 | ||
296 | #define VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT 0xffffffff | ||
297 | |||
298 | u64 rxds_limit; | ||
299 | #define VXGE_HW_DEF_RING_RXDS_LIMIT 44 | ||
300 | }; | ||
301 | |||
302 | /** | ||
303 | * struct vxge_hw_vp_config - Configuration of virtual path | ||
304 | * @vp_id: Virtual Path Id | ||
305 | * @min_bandwidth: Minimum Guaranteed bandwidth | ||
306 | * @ring: See struct vxge_hw_ring_config{}. | ||
307 | * @fifo: See struct vxge_hw_fifo_config{}. | ||
308 | * @tti: Configuration of interrupt associated with Transmit. | ||
309 | * see struct vxge_hw_tim_intr_config(); | ||
310 | * @rti: Configuration of interrupt associated with Receive. | ||
311 | * see struct vxge_hw_tim_intr_config(); | ||
312 | * @mtu: mtu size used on this port. | ||
313 | * @rpa_strip_vlan_tag: Strip VLAN Tag enable/disable. Instructs the device to | ||
314 | * remove the VLAN tag from all received tagged frames that are not | ||
315 | * replicated at the internal L2 switch. | ||
316 | * 0 - Do not strip the VLAN tag. | ||
317 | * 1 - Strip the VLAN tag. Regardless of this setting, VLAN tags are | ||
318 | * always placed into the RxDMA descriptor. | ||
319 | * | ||
320 | * This structure is used by the driver to pass the configuration parameters to | ||
321 | * configure Virtual Path. | ||
322 | */ | ||
323 | struct vxge_hw_vp_config { | ||
324 | u32 vp_id; | ||
325 | |||
326 | #define VXGE_HW_VPATH_PRIORITY_MIN 0 | ||
327 | #define VXGE_HW_VPATH_PRIORITY_MAX 16 | ||
328 | #define VXGE_HW_VPATH_PRIORITY_DEFAULT 0 | ||
329 | |||
330 | u32 min_bandwidth; | ||
331 | #define VXGE_HW_VPATH_BANDWIDTH_MIN 0 | ||
332 | #define VXGE_HW_VPATH_BANDWIDTH_MAX 100 | ||
333 | #define VXGE_HW_VPATH_BANDWIDTH_DEFAULT 0 | ||
334 | |||
335 | struct vxge_hw_ring_config ring; | ||
336 | struct vxge_hw_fifo_config fifo; | ||
337 | struct vxge_hw_tim_intr_config tti; | ||
338 | struct vxge_hw_tim_intr_config rti; | ||
339 | |||
340 | u32 mtu; | ||
341 | #define VXGE_HW_VPATH_MIN_INITIAL_MTU VXGE_HW_MIN_MTU | ||
342 | #define VXGE_HW_VPATH_MAX_INITIAL_MTU VXGE_HW_MAX_MTU | ||
343 | #define VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU 0xffffffff | ||
344 | |||
345 | u32 rpa_strip_vlan_tag; | ||
346 | #define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE 1 | ||
347 | #define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE 0 | ||
348 | #define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT 0xffffffff | ||
349 | |||
350 | }; | ||
351 | /** | ||
352 | * struct vxge_hw_device_config - Device configuration. | ||
353 | * @dma_blockpool_initial: Initial size of DMA Pool | ||
354 | * @dma_blockpool_max: Maximum blocks in DMA pool | ||
355 | * @intr_mode: Line, or MSI-X interrupt. | ||
356 | * | ||
357 | * @rth_en: Enable Receive Traffic Hashing(RTH) using IT(Indirection Table). | ||
358 | * @rth_it_type: RTH IT table programming type | ||
359 | * @rts_mac_en: Enable Receive Traffic Steering using MAC destination address | ||
360 | * @vp_config: Configuration for virtual paths | ||
361 | * @device_poll_millis: Specify the interval (in mulliseconds) | ||
362 | * to wait for register reads | ||
363 | * | ||
364 | * Titan configuration. | ||
365 | * Contains per-device configuration parameters, including: | ||
366 | * - stats sampling interval, etc. | ||
367 | * | ||
368 | * In addition, struct vxge_hw_device_config{} includes "subordinate" | ||
369 | * configurations, including: | ||
370 | * - fifos and rings; | ||
371 | * - MAC (done at firmware level). | ||
372 | * | ||
373 | * See Titan User Guide for more details. | ||
374 | * Note: Valid (min, max) range for each attribute is specified in the body of | ||
375 | * the struct vxge_hw_device_config{} structure. Please refer to the | ||
376 | * corresponding include file. | ||
377 | * See also: struct vxge_hw_tim_intr_config{}. | ||
378 | */ | ||
379 | struct vxge_hw_device_config { | ||
380 | u32 dma_blockpool_initial; | ||
381 | u32 dma_blockpool_max; | ||
382 | #define VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE 0 | ||
383 | #define VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE 0 | ||
384 | #define VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE 4 | ||
385 | #define VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE 4096 | ||
386 | |||
387 | #define VXGE_HW_MAX_PAYLOAD_SIZE_512 2 | ||
388 | |||
389 | u32 intr_mode; | ||
390 | #define VXGE_HW_INTR_MODE_IRQLINE 0 | ||
391 | #define VXGE_HW_INTR_MODE_MSIX 1 | ||
392 | #define VXGE_HW_INTR_MODE_MSIX_ONE_SHOT 2 | ||
393 | |||
394 | #define VXGE_HW_INTR_MODE_DEF 0 | ||
395 | |||
396 | u32 rth_en; | ||
397 | #define VXGE_HW_RTH_DISABLE 0 | ||
398 | #define VXGE_HW_RTH_ENABLE 1 | ||
399 | #define VXGE_HW_RTH_DEFAULT 0 | ||
400 | |||
401 | u32 rth_it_type; | ||
402 | #define VXGE_HW_RTH_IT_TYPE_SOLO_IT 0 | ||
403 | #define VXGE_HW_RTH_IT_TYPE_MULTI_IT 1 | ||
404 | #define VXGE_HW_RTH_IT_TYPE_DEFAULT 0 | ||
405 | |||
406 | u32 rts_mac_en; | ||
407 | #define VXGE_HW_RTS_MAC_DISABLE 0 | ||
408 | #define VXGE_HW_RTS_MAC_ENABLE 1 | ||
409 | #define VXGE_HW_RTS_MAC_DEFAULT 0 | ||
410 | |||
411 | struct vxge_hw_vp_config vp_config[VXGE_HW_MAX_VIRTUAL_PATHS]; | ||
412 | |||
413 | u32 device_poll_millis; | ||
414 | #define VXGE_HW_MIN_DEVICE_POLL_MILLIS 1 | ||
415 | #define VXGE_HW_MAX_DEVICE_POLL_MILLIS 100000 | ||
416 | #define VXGE_HW_DEF_DEVICE_POLL_MILLIS 1000 | ||
417 | |||
418 | }; | ||
419 | |||
420 | /** | ||
421 | * function vxge_uld_link_up_f - Link-Up callback provided by driver. | ||
422 | * @devh: HW device handle. | ||
423 | * Link-up notification callback provided by the driver. | ||
424 | * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}. | ||
425 | * | ||
426 | * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_down_f{}, | ||
427 | * vxge_hw_driver_initialize(). | ||
428 | */ | ||
429 | |||
430 | /** | ||
431 | * function vxge_uld_link_down_f - Link-Down callback provided by | ||
432 | * driver. | ||
433 | * @devh: HW device handle. | ||
434 | * | ||
435 | * Link-Down notification callback provided by the driver. | ||
436 | * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}. | ||
437 | * | ||
438 | * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_up_f{}, | ||
439 | * vxge_hw_driver_initialize(). | ||
440 | */ | ||
441 | |||
442 | /** | ||
443 | * function vxge_uld_crit_err_f - Critical Error notification callback. | ||
444 | * @devh: HW device handle. | ||
445 | * (typically - at HW device iinitialization time). | ||
446 | * @type: Enumerated hw error, e.g.: double ECC. | ||
447 | * @serr_data: Titan status. | ||
448 | * @ext_data: Extended data. The contents depends on the @type. | ||
449 | * | ||
450 | * Link-Down notification callback provided by the driver. | ||
451 | * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}. | ||
452 | * | ||
453 | * See also: struct vxge_hw_uld_cbs{}, enum vxge_hw_event{}, | ||
454 | * vxge_hw_driver_initialize(). | ||
455 | */ | ||
456 | |||
457 | /** | ||
458 | * struct vxge_hw_uld_cbs - driver "slow-path" callbacks. | ||
459 | * @link_up: See vxge_uld_link_up_f{}. | ||
460 | * @link_down: See vxge_uld_link_down_f{}. | ||
461 | * @crit_err: See vxge_uld_crit_err_f{}. | ||
462 | * | ||
463 | * Driver slow-path (per-driver) callbacks. | ||
464 | * Implemented by driver and provided to HW via | ||
465 | * vxge_hw_driver_initialize(). | ||
466 | * Note that these callbacks are not mandatory: HW will not invoke | ||
467 | * a callback if NULL is specified. | ||
468 | * | ||
469 | * See also: vxge_hw_driver_initialize(). | ||
470 | */ | ||
471 | struct vxge_hw_uld_cbs { | ||
472 | |||
473 | void (*link_up)(struct __vxge_hw_device *devh); | ||
474 | void (*link_down)(struct __vxge_hw_device *devh); | ||
475 | void (*crit_err)(struct __vxge_hw_device *devh, | ||
476 | enum vxge_hw_event type, u64 ext_data); | ||
477 | }; | ||
478 | |||
479 | /* | ||
480 | * struct __vxge_hw_blockpool_entry - Block private data structure | ||
481 | * @item: List header used to link. | ||
482 | * @length: Length of the block | ||
483 | * @memblock: Virtual address block | ||
484 | * @dma_addr: DMA Address of the block. | ||
485 | * @dma_handle: DMA handle of the block. | ||
486 | * @acc_handle: DMA acc handle | ||
487 | * | ||
488 | * Block is allocated with a header to put the blocks into list. | ||
489 | * | ||
490 | */ | ||
491 | struct __vxge_hw_blockpool_entry { | ||
492 | struct list_head item; | ||
493 | u32 length; | ||
494 | void *memblock; | ||
495 | dma_addr_t dma_addr; | ||
496 | struct pci_dev *dma_handle; | ||
497 | struct pci_dev *acc_handle; | ||
498 | }; | ||
499 | |||
500 | /* | ||
501 | * struct __vxge_hw_blockpool - Block Pool | ||
502 | * @hldev: HW device | ||
503 | * @block_size: size of each block. | ||
504 | * @Pool_size: Number of blocks in the pool | ||
505 | * @pool_max: Maximum number of blocks above which to free additional blocks | ||
506 | * @req_out: Number of block requests with OS out standing | ||
507 | * @free_block_list: List of free blocks | ||
508 | * | ||
509 | * Block pool contains the DMA blocks preallocated. | ||
510 | * | ||
511 | */ | ||
512 | struct __vxge_hw_blockpool { | ||
513 | struct __vxge_hw_device *hldev; | ||
514 | u32 block_size; | ||
515 | u32 pool_size; | ||
516 | u32 pool_max; | ||
517 | u32 req_out; | ||
518 | struct list_head free_block_list; | ||
519 | struct list_head free_entry_list; | ||
520 | }; | ||
521 | |||
522 | /* | ||
523 | * enum enum __vxge_hw_channel_type - Enumerated channel types. | ||
524 | * @VXGE_HW_CHANNEL_TYPE_UNKNOWN: Unknown channel. | ||
525 | * @VXGE_HW_CHANNEL_TYPE_FIFO: fifo. | ||
526 | * @VXGE_HW_CHANNEL_TYPE_RING: ring. | ||
527 | * @VXGE_HW_CHANNEL_TYPE_MAX: Maximum number of HW-supported | ||
528 | * (and recognized) channel types. Currently: 2. | ||
529 | * | ||
530 | * Enumerated channel types. Currently there are only two link-layer | ||
531 | * channels - Titan fifo and Titan ring. In the future the list will grow. | ||
532 | */ | ||
533 | enum __vxge_hw_channel_type { | ||
534 | VXGE_HW_CHANNEL_TYPE_UNKNOWN = 0, | ||
535 | VXGE_HW_CHANNEL_TYPE_FIFO = 1, | ||
536 | VXGE_HW_CHANNEL_TYPE_RING = 2, | ||
537 | VXGE_HW_CHANNEL_TYPE_MAX = 3 | ||
538 | }; | ||
539 | |||
540 | /* | ||
541 | * struct __vxge_hw_channel | ||
542 | * @item: List item; used to maintain a list of open channels. | ||
543 | * @type: Channel type. See enum vxge_hw_channel_type{}. | ||
544 | * @devh: Device handle. HW device object that contains _this_ channel. | ||
545 | * @vph: Virtual path handle. Virtual Path Object that contains _this_ channel. | ||
546 | * @length: Channel length. Currently allocated number of descriptors. | ||
547 | * The channel length "grows" when more descriptors get allocated. | ||
548 | * See _hw_mempool_grow. | ||
549 | * @reserve_arr: Reserve array. Contains descriptors that can be reserved | ||
550 | * by driver for the subsequent send or receive operation. | ||
551 | * See vxge_hw_fifo_txdl_reserve(), | ||
552 | * vxge_hw_ring_rxd_reserve(). | ||
553 | * @reserve_ptr: Current pointer in the resrve array | ||
554 | * @reserve_top: Reserve top gives the maximum number of dtrs available in | ||
555 | * reserve array. | ||
556 | * @work_arr: Work array. Contains descriptors posted to the channel. | ||
557 | * Note that at any point in time @work_arr contains 3 types of | ||
558 | * descriptors: | ||
559 | * 1) posted but not yet consumed by Titan device; | ||
560 | * 2) consumed but not yet completed; | ||
561 | * 3) completed but not yet freed | ||
562 | * (via vxge_hw_fifo_txdl_free() or vxge_hw_ring_rxd_free()) | ||
563 | * @post_index: Post index. At any point in time points on the | ||
564 | * position in the channel, which'll contain next to-be-posted | ||
565 | * descriptor. | ||
566 | * @compl_index: Completion index. At any point in time points on the | ||
567 | * position in the channel, which will contain next | ||
568 | * to-be-completed descriptor. | ||
569 | * @free_arr: Free array. Contains completed descriptors that were freed | ||
570 | * (i.e., handed over back to HW) by driver. | ||
571 | * See vxge_hw_fifo_txdl_free(), vxge_hw_ring_rxd_free(). | ||
572 | * @free_ptr: current pointer in free array | ||
573 | * @per_dtr_space: Per-descriptor space (in bytes) that channel user can utilize | ||
574 | * to store per-operation control information. | ||
575 | * @stats: Pointer to common statistics | ||
576 | * @userdata: Per-channel opaque (void*) user-defined context, which may be | ||
577 | * driver object, ULP connection, etc. | ||
578 | * Once channel is open, @userdata is passed back to user via | ||
579 | * vxge_hw_channel_callback_f. | ||
580 | * | ||
581 | * HW channel object. | ||
582 | * | ||
583 | * See also: enum vxge_hw_channel_type{}, enum vxge_hw_channel_flag | ||
584 | */ | ||
585 | struct __vxge_hw_channel { | ||
586 | struct list_head item; | ||
587 | enum __vxge_hw_channel_type type; | ||
588 | struct __vxge_hw_device *devh; | ||
589 | struct __vxge_hw_vpath_handle *vph; | ||
590 | u32 length; | ||
591 | u32 vp_id; | ||
592 | void **reserve_arr; | ||
593 | u32 reserve_ptr; | ||
594 | u32 reserve_top; | ||
595 | void **work_arr; | ||
596 | u32 post_index ____cacheline_aligned; | ||
597 | u32 compl_index ____cacheline_aligned; | ||
598 | void **free_arr; | ||
599 | u32 free_ptr; | ||
600 | void **orig_arr; | ||
601 | u32 per_dtr_space; | ||
602 | void *userdata; | ||
603 | struct vxge_hw_common_reg __iomem *common_reg; | ||
604 | u32 first_vp_id; | ||
605 | struct vxge_hw_vpath_stats_sw_common_info *stats; | ||
606 | |||
607 | } ____cacheline_aligned; | ||
608 | |||
609 | /* | ||
610 | * struct __vxge_hw_virtualpath - Virtual Path | ||
611 | * | ||
612 | * @vp_id: Virtual path id | ||
613 | * @vp_open: This flag specifies if vxge_hw_vp_open is called from LL Driver | ||
614 | * @hldev: Hal device | ||
615 | * @vp_config: Virtual Path Config | ||
616 | * @vp_reg: VPATH Register map address in BAR0 | ||
617 | * @vpmgmt_reg: VPATH_MGMT register map address | ||
618 | * @max_mtu: Max mtu that can be supported | ||
619 | * @vsport_number: vsport attached to this vpath | ||
620 | * @max_kdfc_db: Maximum kernel mode doorbells | ||
621 | * @max_nofl_db: Maximum non offload doorbells | ||
622 | * @tx_intr_num: Interrupt Number associated with the TX | ||
623 | |||
624 | * @ringh: Ring Queue | ||
625 | * @fifoh: FIFO Queue | ||
626 | * @vpath_handles: Virtual Path handles list | ||
627 | * @stats_block: Memory for DMAing stats | ||
628 | * @stats: Vpath statistics | ||
629 | * | ||
630 | * Virtual path structure to encapsulate the data related to a virtual path. | ||
631 | * Virtual paths are allocated by the HW upon getting configuration from the | ||
632 | * driver and inserted into the list of virtual paths. | ||
633 | */ | ||
634 | struct __vxge_hw_virtualpath { | ||
635 | u32 vp_id; | ||
636 | |||
637 | u32 vp_open; | ||
638 | #define VXGE_HW_VP_NOT_OPEN 0 | ||
639 | #define VXGE_HW_VP_OPEN 1 | ||
640 | |||
641 | struct __vxge_hw_device *hldev; | ||
642 | struct vxge_hw_vp_config *vp_config; | ||
643 | struct vxge_hw_vpath_reg __iomem *vp_reg; | ||
644 | struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg; | ||
645 | struct __vxge_hw_non_offload_db_wrapper __iomem *nofl_db; | ||
646 | |||
647 | u32 max_mtu; | ||
648 | u32 vsport_number; | ||
649 | u32 max_kdfc_db; | ||
650 | u32 max_nofl_db; | ||
651 | |||
652 | struct __vxge_hw_ring *____cacheline_aligned ringh; | ||
653 | struct __vxge_hw_fifo *____cacheline_aligned fifoh; | ||
654 | struct list_head vpath_handles; | ||
655 | struct __vxge_hw_blockpool_entry *stats_block; | ||
656 | struct vxge_hw_vpath_stats_hw_info *hw_stats; | ||
657 | struct vxge_hw_vpath_stats_hw_info *hw_stats_sav; | ||
658 | struct vxge_hw_vpath_stats_sw_info *sw_stats; | ||
659 | }; | ||
660 | |||
661 | /* | ||
662 | * struct __vxge_hw_vpath_handle - List item to store callback information | ||
663 | * @item: List head to keep the item in linked list | ||
664 | * @vpath: Virtual path to which this item belongs | ||
665 | * | ||
666 | * This structure is used to store the callback information. | ||
667 | */ | ||
668 | struct __vxge_hw_vpath_handle{ | ||
669 | struct list_head item; | ||
670 | struct __vxge_hw_virtualpath *vpath; | ||
671 | }; | ||
672 | |||
673 | /* | ||
674 | * struct __vxge_hw_device | ||
675 | * | ||
676 | * HW device object. | ||
677 | */ | ||
678 | /** | ||
679 | * struct __vxge_hw_device - Hal device object | ||
680 | * @magic: Magic Number | ||
681 | * @device_id: PCI Device Id of the adapter | ||
682 | * @major_revision: PCI Device major revision | ||
683 | * @minor_revision: PCI Device minor revision | ||
684 | * @bar0: BAR0 virtual address. | ||
685 | * @bar1: BAR1 virtual address. | ||
686 | * @bar2: BAR2 virtual address. | ||
687 | * @pdev: Physical device handle | ||
688 | * @config: Confguration passed by the LL driver at initialization | ||
689 | * @link_state: Link state | ||
690 | * | ||
691 | * HW device object. Represents Titan adapter | ||
692 | */ | ||
693 | struct __vxge_hw_device { | ||
694 | u32 magic; | ||
695 | #define VXGE_HW_DEVICE_MAGIC 0x12345678 | ||
696 | #define VXGE_HW_DEVICE_DEAD 0xDEADDEAD | ||
697 | u16 device_id; | ||
698 | u8 major_revision; | ||
699 | u8 minor_revision; | ||
700 | void __iomem *bar0; | ||
701 | void __iomem *bar1; | ||
702 | void __iomem *bar2; | ||
703 | struct pci_dev *pdev; | ||
704 | struct net_device *ndev; | ||
705 | struct vxge_hw_device_config config; | ||
706 | enum vxge_hw_device_link_state link_state; | ||
707 | |||
708 | struct vxge_hw_uld_cbs uld_callbacks; | ||
709 | |||
710 | u32 host_type; | ||
711 | u32 func_id; | ||
712 | u32 access_rights; | ||
713 | #define VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH 0x1 | ||
714 | #define VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM 0x2 | ||
715 | #define VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM 0x4 | ||
716 | struct vxge_hw_legacy_reg __iomem *legacy_reg; | ||
717 | struct vxge_hw_toc_reg __iomem *toc_reg; | ||
718 | struct vxge_hw_common_reg __iomem *common_reg; | ||
719 | struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg; | ||
720 | struct vxge_hw_srpcim_reg __iomem *srpcim_reg \ | ||
721 | [VXGE_HW_TITAN_SRPCIM_REG_SPACES]; | ||
722 | struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg \ | ||
723 | [VXGE_HW_TITAN_VPMGMT_REG_SPACES]; | ||
724 | struct vxge_hw_vpath_reg __iomem *vpath_reg \ | ||
725 | [VXGE_HW_TITAN_VPATH_REG_SPACES]; | ||
726 | u8 __iomem *kdfc; | ||
727 | u8 __iomem *usdc; | ||
728 | struct __vxge_hw_virtualpath virtual_paths \ | ||
729 | [VXGE_HW_MAX_VIRTUAL_PATHS]; | ||
730 | u64 vpath_assignments; | ||
731 | u64 vpaths_deployed; | ||
732 | u32 first_vp_id; | ||
733 | u64 tim_int_mask0[4]; | ||
734 | u32 tim_int_mask1[4]; | ||
735 | |||
736 | struct __vxge_hw_blockpool block_pool; | ||
737 | struct vxge_hw_device_stats stats; | ||
738 | u32 debug_module_mask; | ||
739 | u32 debug_level; | ||
740 | u32 level_err; | ||
741 | u32 level_trace; | ||
742 | }; | ||
743 | |||
744 | #define VXGE_HW_INFO_LEN 64 | ||
745 | /** | ||
746 | * struct vxge_hw_device_hw_info - Device information | ||
747 | * @host_type: Host Type | ||
748 | * @func_id: Function Id | ||
749 | * @vpath_mask: vpath bit mask | ||
750 | * @fw_version: Firmware version | ||
751 | * @fw_date: Firmware Date | ||
752 | * @flash_version: Firmware version | ||
753 | * @flash_date: Firmware Date | ||
754 | * @mac_addrs: Mac addresses for each vpath | ||
755 | * @mac_addr_masks: Mac address masks for each vpath | ||
756 | * | ||
757 | * Returns the vpath mask that has the bits set for each vpath allocated | ||
758 | * for the driver and the first mac address for each vpath | ||
759 | */ | ||
760 | struct vxge_hw_device_hw_info { | ||
761 | u32 host_type; | ||
762 | #define VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION 0 | ||
763 | #define VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION 1 | ||
764 | #define VXGE_HW_NO_MR_SR_VH0_FUNCTION0 2 | ||
765 | #define VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION 3 | ||
766 | #define VXGE_HW_MR_SR_VH0_INVALID_CONFIG 4 | ||
767 | #define VXGE_HW_SR_VH_FUNCTION0 5 | ||
768 | #define VXGE_HW_SR_VH_VIRTUAL_FUNCTION 6 | ||
769 | #define VXGE_HW_VH_NORMAL_FUNCTION 7 | ||
770 | u64 function_mode; | ||
771 | #define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION 0 | ||
772 | #define VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION 1 | ||
773 | #define VXGE_HW_FUNCTION_MODE_SRIOV 2 | ||
774 | #define VXGE_HW_FUNCTION_MODE_MRIOV 3 | ||
775 | u32 func_id; | ||
776 | u64 vpath_mask; | ||
777 | struct vxge_hw_device_version fw_version; | ||
778 | struct vxge_hw_device_date fw_date; | ||
779 | struct vxge_hw_device_version flash_version; | ||
780 | struct vxge_hw_device_date flash_date; | ||
781 | u8 serial_number[VXGE_HW_INFO_LEN]; | ||
782 | u8 part_number[VXGE_HW_INFO_LEN]; | ||
783 | u8 product_desc[VXGE_HW_INFO_LEN]; | ||
784 | u8 (mac_addrs)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN]; | ||
785 | u8 (mac_addr_masks)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN]; | ||
786 | }; | ||
787 | |||
788 | /** | ||
789 | * struct vxge_hw_device_attr - Device memory spaces. | ||
790 | * @bar0: BAR0 virtual address. | ||
791 | * @bar1: BAR1 virtual address. | ||
792 | * @bar2: BAR2 virtual address. | ||
793 | * @pdev: PCI device object. | ||
794 | * | ||
795 | * Device memory spaces. Includes configuration, BAR0, BAR1, etc. per device | ||
796 | * mapped memories. Also, includes a pointer to OS-specific PCI device object. | ||
797 | */ | ||
798 | struct vxge_hw_device_attr { | ||
799 | void __iomem *bar0; | ||
800 | void __iomem *bar1; | ||
801 | void __iomem *bar2; | ||
802 | struct pci_dev *pdev; | ||
803 | struct vxge_hw_uld_cbs uld_callbacks; | ||
804 | }; | ||
805 | |||
806 | #define VXGE_HW_DEVICE_LINK_STATE_SET(hldev, ls) (hldev->link_state = ls) | ||
807 | |||
808 | #define VXGE_HW_DEVICE_TIM_INT_MASK_SET(m0, m1, i) { \ | ||
809 | if (i < 16) { \ | ||
810 | m0[0] |= vxge_vBIT(0x8, (i*4), 4); \ | ||
811 | m0[1] |= vxge_vBIT(0x4, (i*4), 4); \ | ||
812 | } \ | ||
813 | else { \ | ||
814 | m1[0] = 0x80000000; \ | ||
815 | m1[1] = 0x40000000; \ | ||
816 | } \ | ||
817 | } | ||
818 | |||
819 | #define VXGE_HW_DEVICE_TIM_INT_MASK_RESET(m0, m1, i) { \ | ||
820 | if (i < 16) { \ | ||
821 | m0[0] &= ~vxge_vBIT(0x8, (i*4), 4); \ | ||
822 | m0[1] &= ~vxge_vBIT(0x4, (i*4), 4); \ | ||
823 | } \ | ||
824 | else { \ | ||
825 | m1[0] = 0; \ | ||
826 | m1[1] = 0; \ | ||
827 | } \ | ||
828 | } | ||
829 | |||
830 | #define VXGE_HW_DEVICE_STATS_PIO_READ(loc, offset) { \ | ||
831 | status = vxge_hw_mrpcim_stats_access(hldev, \ | ||
832 | VXGE_HW_STATS_OP_READ, \ | ||
833 | loc, \ | ||
834 | offset, \ | ||
835 | &val64); \ | ||
836 | \ | ||
837 | if (status != VXGE_HW_OK) \ | ||
838 | return status; \ | ||
839 | } | ||
840 | |||
841 | #define VXGE_HW_VPATH_STATS_PIO_READ(offset) { \ | ||
842 | status = __vxge_hw_vpath_stats_access(vpath, \ | ||
843 | VXGE_HW_STATS_OP_READ, \ | ||
844 | offset, \ | ||
845 | &val64); \ | ||
846 | if (status != VXGE_HW_OK) \ | ||
847 | return status; \ | ||
848 | } | ||
849 | |||
850 | /* | ||
851 | * struct __vxge_hw_ring - Ring channel. | ||
852 | * @channel: Channel "base" of this ring, the common part of all HW | ||
853 | * channels. | ||
854 | * @mempool: Memory pool, the pool from which descriptors get allocated. | ||
855 | * (See vxge_hw_mm.h). | ||
856 | * @config: Ring configuration, part of device configuration | ||
857 | * (see struct vxge_hw_device_config{}). | ||
858 | * @ring_length: Length of the ring | ||
859 | * @buffer_mode: 1, 3, or 5. The value specifies a receive buffer mode, | ||
860 | * as per Titan User Guide. | ||
861 | * @rxd_size: RxD sizes for 1-, 3- or 5- buffer modes. As per Titan spec, | ||
862 | * 1-buffer mode descriptor is 32 byte long, etc. | ||
863 | * @rxd_priv_size: Per RxD size reserved (by HW) for driver to keep | ||
864 | * per-descriptor data (e.g., DMA handle for Solaris) | ||
865 | * @per_rxd_space: Per rxd space requested by driver | ||
866 | * @rxds_per_block: Number of descriptors per hardware-defined RxD | ||
867 | * block. Depends on the (1-, 3-, 5-) buffer mode. | ||
868 | * @rxdblock_priv_size: Reserved at the end of each RxD block. HW internal | ||
869 | * usage. Not to confuse with @rxd_priv_size. | ||
870 | * @cmpl_cnt: Completion counter. Is reset to zero upon entering the ISR. | ||
871 | * @callback: Channel completion callback. HW invokes the callback when there | ||
872 | * are new completions on that channel. In many implementations | ||
873 | * the @callback executes in the hw interrupt context. | ||
874 | * @rxd_init: Channel's descriptor-initialize callback. | ||
875 | * See vxge_hw_ring_rxd_init_f{}. | ||
876 | * If not NULL, HW invokes the callback when opening | ||
877 | * the ring. | ||
878 | * @rxd_term: Channel's descriptor-terminate callback. If not NULL, | ||
879 | * HW invokes the callback when closing the corresponding channel. | ||
880 | * See also vxge_hw_channel_rxd_term_f{}. | ||
881 | * @stats: Statistics for ring | ||
882 | * Ring channel. | ||
883 | * | ||
884 | * Note: The structure is cache line aligned to better utilize | ||
885 | * CPU cache performance. | ||
886 | */ | ||
887 | struct __vxge_hw_ring { | ||
888 | struct __vxge_hw_channel channel; | ||
889 | struct vxge_hw_mempool *mempool; | ||
890 | struct vxge_hw_vpath_reg __iomem *vp_reg; | ||
891 | struct vxge_hw_common_reg __iomem *common_reg; | ||
892 | u32 ring_length; | ||
893 | u32 buffer_mode; | ||
894 | u32 rxd_size; | ||
895 | u32 rxd_priv_size; | ||
896 | u32 per_rxd_space; | ||
897 | u32 rxds_per_block; | ||
898 | u32 rxdblock_priv_size; | ||
899 | u32 cmpl_cnt; | ||
900 | u32 vp_id; | ||
901 | u32 doorbell_cnt; | ||
902 | u32 total_db_cnt; | ||
903 | u64 rxds_limit; | ||
904 | |||
905 | enum vxge_hw_status (*callback)( | ||
906 | struct __vxge_hw_ring *ringh, | ||
907 | void *rxdh, | ||
908 | u8 t_code, | ||
909 | void *userdata); | ||
910 | |||
911 | enum vxge_hw_status (*rxd_init)( | ||
912 | void *rxdh, | ||
913 | void *userdata); | ||
914 | |||
915 | void (*rxd_term)( | ||
916 | void *rxdh, | ||
917 | enum vxge_hw_rxd_state state, | ||
918 | void *userdata); | ||
919 | |||
920 | struct vxge_hw_vpath_stats_sw_ring_info *stats ____cacheline_aligned; | ||
921 | struct vxge_hw_ring_config *config; | ||
922 | } ____cacheline_aligned; | ||
923 | |||
924 | /** | ||
925 | * enum enum vxge_hw_txdl_state - Descriptor (TXDL) state. | ||
926 | * @VXGE_HW_TXDL_STATE_NONE: Invalid state. | ||
927 | * @VXGE_HW_TXDL_STATE_AVAIL: Descriptor is available for reservation. | ||
928 | * @VXGE_HW_TXDL_STATE_POSTED: Descriptor is posted for processing by the | ||
929 | * device. | ||
930 | * @VXGE_HW_TXDL_STATE_FREED: Descriptor is free and can be reused for | ||
931 | * filling-in and posting later. | ||
932 | * | ||
933 | * Titan/HW descriptor states. | ||
934 | * | ||
935 | */ | ||
936 | enum vxge_hw_txdl_state { | ||
937 | VXGE_HW_TXDL_STATE_NONE = 0, | ||
938 | VXGE_HW_TXDL_STATE_AVAIL = 1, | ||
939 | VXGE_HW_TXDL_STATE_POSTED = 2, | ||
940 | VXGE_HW_TXDL_STATE_FREED = 3 | ||
941 | }; | ||
942 | /* | ||
943 | * struct __vxge_hw_fifo - Fifo. | ||
944 | * @channel: Channel "base" of this fifo, the common part of all HW | ||
945 | * channels. | ||
946 | * @mempool: Memory pool, from which descriptors get allocated. | ||
947 | * @config: Fifo configuration, part of device configuration | ||
948 | * (see struct vxge_hw_device_config{}). | ||
949 | * @interrupt_type: Interrupt type to be used | ||
950 | * @no_snoop_bits: See struct vxge_hw_fifo_config{}. | ||
951 | * @txdl_per_memblock: Number of TxDLs (TxD lists) per memblock. | ||
952 | * on TxDL please refer to Titan UG. | ||
953 | * @txdl_size: Configured TxDL size (i.e., number of TxDs in a list), plus | ||
954 | * per-TxDL HW private space (struct __vxge_hw_fifo_txdl_priv). | ||
955 | * @priv_size: Per-Tx descriptor space reserved for driver | ||
956 | * usage. | ||
957 | * @per_txdl_space: Per txdl private space for the driver | ||
958 | * @callback: Fifo completion callback. HW invokes the callback when there | ||
959 | * are new completions on that fifo. In many implementations | ||
960 | * the @callback executes in the hw interrupt context. | ||
961 | * @txdl_term: Fifo's descriptor-terminate callback. If not NULL, | ||
962 | * HW invokes the callback when closing the corresponding fifo. | ||
963 | * See also vxge_hw_fifo_txdl_term_f{}. | ||
964 | * @stats: Statistics of this fifo | ||
965 | * | ||
966 | * Fifo channel. | ||
967 | * Note: The structure is cache line aligned. | ||
968 | */ | ||
969 | struct __vxge_hw_fifo { | ||
970 | struct __vxge_hw_channel channel; | ||
971 | struct vxge_hw_mempool *mempool; | ||
972 | struct vxge_hw_fifo_config *config; | ||
973 | struct vxge_hw_vpath_reg __iomem *vp_reg; | ||
974 | struct __vxge_hw_non_offload_db_wrapper __iomem *nofl_db; | ||
975 | u64 interrupt_type; | ||
976 | u32 no_snoop_bits; | ||
977 | u32 txdl_per_memblock; | ||
978 | u32 txdl_size; | ||
979 | u32 priv_size; | ||
980 | u32 per_txdl_space; | ||
981 | u32 vp_id; | ||
982 | u32 tx_intr_num; | ||
983 | |||
984 | enum vxge_hw_status (*callback)( | ||
985 | struct __vxge_hw_fifo *fifo_handle, | ||
986 | void *txdlh, | ||
987 | enum vxge_hw_fifo_tcode t_code, | ||
988 | void *userdata, | ||
989 | void **skb_ptr); | ||
990 | |||
991 | void (*txdl_term)( | ||
992 | void *txdlh, | ||
993 | enum vxge_hw_txdl_state state, | ||
994 | void *userdata); | ||
995 | |||
996 | struct vxge_hw_vpath_stats_sw_fifo_info *stats ____cacheline_aligned; | ||
997 | } ____cacheline_aligned; | ||
998 | |||
999 | /* | ||
1000 | * struct __vxge_hw_fifo_txdl_priv - Transmit descriptor HW-private data. | ||
1001 | * @dma_addr: DMA (mapped) address of _this_ descriptor. | ||
1002 | * @dma_handle: DMA handle used to map the descriptor onto device. | ||
1003 | * @dma_offset: Descriptor's offset in the memory block. HW allocates | ||
1004 | * descriptors in memory blocks (see struct vxge_hw_fifo_config{}) | ||
1005 | * Each memblock is a contiguous block of DMA-able memory. | ||
1006 | * @frags: Total number of fragments (that is, contiguous data buffers) | ||
1007 | * carried by this TxDL. | ||
1008 | * @align_vaddr_start: Aligned virtual address start | ||
1009 | * @align_vaddr: Virtual address of the per-TxDL area in memory used for | ||
1010 | * alignement. Used to place one or more mis-aligned fragments | ||
1011 | * @align_dma_addr: DMA address translated from the @align_vaddr. | ||
1012 | * @align_dma_handle: DMA handle that corresponds to @align_dma_addr. | ||
1013 | * @align_dma_acch: DMA access handle corresponds to @align_dma_addr. | ||
1014 | * @align_dma_offset: The current offset into the @align_vaddr area. | ||
1015 | * Grows while filling the descriptor, gets reset. | ||
1016 | * @align_used_frags: Number of fragments used. | ||
1017 | * @alloc_frags: Total number of fragments allocated. | ||
1018 | * @unused: TODO | ||
1019 | * @next_txdl_priv: (TODO). | ||
1020 | * @first_txdp: (TODO). | ||
1021 | * @linked_txdl_priv: Pointer to any linked TxDL for creating contiguous | ||
1022 | * TxDL list. | ||
1023 | * @txdlh: Corresponding txdlh to this TxDL. | ||
1024 | * @memblock: Pointer to the TxDL memory block or memory page. | ||
1025 | * on the next send operation. | ||
1026 | * @dma_object: DMA address and handle of the memory block that contains | ||
1027 | * the descriptor. This member is used only in the "checked" | ||
1028 | * version of the HW (to enforce certain assertions); | ||
1029 | * otherwise it gets compiled out. | ||
1030 | * @allocated: True if the descriptor is reserved, 0 otherwise. Internal usage. | ||
1031 | * | ||
1032 | * Per-transmit decsriptor HW-private data. HW uses the space to keep DMA | ||
1033 | * information associated with the descriptor. Note that driver can ask HW | ||
1034 | * to allocate additional per-descriptor space for its own (driver-specific) | ||
1035 | * purposes. | ||
1036 | * | ||
1037 | * See also: struct vxge_hw_ring_rxd_priv{}. | ||
1038 | */ | ||
1039 | struct __vxge_hw_fifo_txdl_priv { | ||
1040 | dma_addr_t dma_addr; | ||
1041 | struct pci_dev *dma_handle; | ||
1042 | ptrdiff_t dma_offset; | ||
1043 | u32 frags; | ||
1044 | u8 *align_vaddr_start; | ||
1045 | u8 *align_vaddr; | ||
1046 | dma_addr_t align_dma_addr; | ||
1047 | struct pci_dev *align_dma_handle; | ||
1048 | struct pci_dev *align_dma_acch; | ||
1049 | ptrdiff_t align_dma_offset; | ||
1050 | u32 align_used_frags; | ||
1051 | u32 alloc_frags; | ||
1052 | u32 unused; | ||
1053 | struct __vxge_hw_fifo_txdl_priv *next_txdl_priv; | ||
1054 | struct vxge_hw_fifo_txd *first_txdp; | ||
1055 | void *memblock; | ||
1056 | }; | ||
1057 | |||
1058 | /* | ||
1059 | * struct __vxge_hw_non_offload_db_wrapper - Non-offload Doorbell Wrapper | ||
1060 | * @control_0: Bits 0 to 7 - Doorbell type. | ||
1061 | * Bits 8 to 31 - Reserved. | ||
1062 | * Bits 32 to 39 - The highest TxD in this TxDL. | ||
1063 | * Bits 40 to 47 - Reserved. | ||
1064 | * Bits 48 to 55 - Reserved. | ||
1065 | * Bits 56 to 63 - No snoop flags. | ||
1066 | * @txdl_ptr: The starting location of the TxDL in host memory. | ||
1067 | * | ||
1068 | * Created by the host and written to the adapter via PIO to a Kernel Doorbell | ||
1069 | * FIFO. All non-offload doorbell wrapper fields must be written by the host as | ||
1070 | * part of a doorbell write. Consumed by the adapter but is not written by the | ||
1071 | * adapter. | ||
1072 | */ | ||
1073 | struct __vxge_hw_non_offload_db_wrapper { | ||
1074 | u64 control_0; | ||
1075 | #define VXGE_HW_NODBW_GET_TYPE(ctrl0) vxge_bVALn(ctrl0, 0, 8) | ||
1076 | #define VXGE_HW_NODBW_TYPE(val) vxge_vBIT(val, 0, 8) | ||
1077 | #define VXGE_HW_NODBW_TYPE_NODBW 0 | ||
1078 | |||
1079 | #define VXGE_HW_NODBW_GET_LAST_TXD_NUMBER(ctrl0) vxge_bVALn(ctrl0, 32, 8) | ||
1080 | #define VXGE_HW_NODBW_LAST_TXD_NUMBER(val) vxge_vBIT(val, 32, 8) | ||
1081 | |||
1082 | #define VXGE_HW_NODBW_GET_NO_SNOOP(ctrl0) vxge_bVALn(ctrl0, 56, 8) | ||
1083 | #define VXGE_HW_NODBW_LIST_NO_SNOOP(val) vxge_vBIT(val, 56, 8) | ||
1084 | #define VXGE_HW_NODBW_LIST_NO_SNOOP_TXD_READ_TXD0_WRITE 0x2 | ||
1085 | #define VXGE_HW_NODBW_LIST_NO_SNOOP_TX_FRAME_DATA_READ 0x1 | ||
1086 | |||
1087 | u64 txdl_ptr; | ||
1088 | }; | ||
1089 | |||
1090 | /* | ||
1091 | * TX Descriptor | ||
1092 | */ | ||
1093 | |||
1094 | /** | ||
1095 | * struct vxge_hw_fifo_txd - Transmit Descriptor | ||
1096 | * @control_0: Bits 0 to 6 - Reserved. | ||
1097 | * Bit 7 - List Ownership. This field should be initialized | ||
1098 | * to '1' by the driver before the transmit list pointer is | ||
1099 | * written to the adapter. This field will be set to '0' by the | ||
1100 | * adapter once it has completed transmitting the frame or frames in | ||
1101 | * the list. Note - This field is only valid in TxD0. Additionally, | ||
1102 | * for multi-list sequences, the driver should not release any | ||
1103 | * buffers until the ownership of the last list in the multi-list | ||
1104 | * sequence has been returned to the host. | ||
1105 | * Bits 8 to 11 - Reserved | ||
1106 | * Bits 12 to 15 - Transfer_Code. This field is only valid in | ||
1107 | * TxD0. It is used to describe the status of the transmit data | ||
1108 | * buffer transfer. This field is always overwritten by the | ||
1109 | * adapter, so this field may be initialized to any value. | ||
1110 | * Bits 16 to 17 - Host steering. This field allows the host to | ||
1111 | * override the selection of the physical transmit port. | ||
1112 | * Attention: | ||
1113 | * Normal sounds as if learned from the switch rather than from | ||
1114 | * the aggregation algorythms. | ||
1115 | * 00: Normal. Use Destination/MAC Address | ||
1116 | * lookup to determine the transmit port. | ||
1117 | * 01: Send on physical Port1. | ||
1118 | * 10: Send on physical Port0. | ||
1119 | * 11: Send on both ports. | ||
1120 | * Bits 18 to 21 - Reserved | ||
1121 | * Bits 22 to 23 - Gather_Code. This field is set by the host and | ||
1122 | * is used to describe how individual buffers comprise a frame. | ||
1123 | * 10: First descriptor of a frame. | ||
1124 | * 00: Middle of a multi-descriptor frame. | ||
1125 | * 01: Last descriptor of a frame. | ||
1126 | * 11: First and last descriptor of a frame (the entire frame | ||
1127 | * resides in a single buffer). | ||
1128 | * For multi-descriptor frames, the only valid gather code sequence | ||
1129 | * is {10, [00], 01}. In other words, the descriptors must be placed | ||
1130 | * in the list in the correct order. | ||
1131 | * Bits 24 to 27 - Reserved | ||
1132 | * Bits 28 to 29 - LSO_Frm_Encap. LSO Frame Encapsulation | ||
1133 | * definition. Only valid in TxD0. This field allows the host to | ||
1134 | * indicate the Ethernet encapsulation of an outbound LSO packet. | ||
1135 | * 00 - classic mode (best guess) | ||
1136 | * 01 - LLC | ||
1137 | * 10 - SNAP | ||
1138 | * 11 - DIX | ||
1139 | * If "classic mode" is selected, the adapter will attempt to | ||
1140 | * decode the frame's Ethernet encapsulation by examining the L/T | ||
1141 | * field as follows: | ||
1142 | * <= 0x05DC LLC/SNAP encoding; must examine DSAP/SSAP to determine | ||
1143 | * if packet is IPv4 or IPv6. | ||
1144 | * 0x8870 Jumbo-SNAP encoding. | ||
1145 | * 0x0800 IPv4 DIX encoding | ||
1146 | * 0x86DD IPv6 DIX encoding | ||
1147 | * others illegal encapsulation | ||
1148 | * Bits 30 - LSO_ Flag. Large Send Offload (LSO) flag. | ||
1149 | * Set to 1 to perform segmentation offload for TCP/UDP. | ||
1150 | * This field is valid only in TxD0. | ||
1151 | * Bits 31 to 33 - Reserved. | ||
1152 | * Bits 34 to 47 - LSO_MSS. TCP/UDP LSO Maximum Segment Size | ||
1153 | * This field is meaningful only when LSO_Control is non-zero. | ||
1154 | * When LSO_Control is set to TCP_LSO, the single (possibly large) | ||
1155 | * TCP segment described by this TxDL will be sent as a series of | ||
1156 | * TCP segments each of which contains no more than LSO_MSS | ||
1157 | * payload bytes. | ||
1158 | * When LSO_Control is set to UDP_LSO, the single (possibly large) | ||
1159 | * UDP datagram described by this TxDL will be sent as a series of | ||
1160 | * UDP datagrams each of which contains no more than LSO_MSS | ||
1161 | * payload bytes. | ||
1162 | * All outgoing frames from this TxDL will have LSO_MSS bytes of UDP | ||
1163 | * or TCP payload, with the exception of the last, which will have | ||
1164 | * <= LSO_MSS bytes of payload. | ||
1165 | * Bits 48 to 63 - Buffer_Size. Number of valid bytes in the | ||
1166 | * buffer to be read by the adapter. This field is written by the | ||
1167 | * host. A value of 0 is illegal. | ||
1168 | * Bits 32 to 63 - This value is written by the adapter upon | ||
1169 | * completion of a UDP or TCP LSO operation and indicates the number | ||
1170 | * of UDP or TCP payload bytes that were transmitted. 0x0000 will be | ||
1171 | * returned for any non-LSO operation. | ||
1172 | * @control_1: Bits 0 to 4 - Reserved. | ||
1173 | * Bit 5 - Tx_CKO_IPv4 Set to a '1' to enable IPv4 header checksum | ||
1174 | * offload. This field is only valid in the first TxD of a frame. | ||
1175 | * Bit 6 - Tx_CKO_TCP Set to a '1' to enable TCP checksum offload. | ||
1176 | * This field is only valid in the first TxD of a frame (the TxD's | ||
1177 | * gather code must be 10 or 11). The driver should only set this | ||
1178 | * bit if it can guarantee that TCP is present. | ||
1179 | * Bit 7 - Tx_CKO_UDP Set to a '1' to enable UDP checksum offload. | ||
1180 | * This field is only valid in the first TxD of a frame (the TxD's | ||
1181 | * gather code must be 10 or 11). The driver should only set this | ||
1182 | * bit if it can guarantee that UDP is present. | ||
1183 | * Bits 8 to 14 - Reserved. | ||
1184 | * Bit 15 - Tx_VLAN_Enable VLAN tag insertion flag. Set to a '1' to | ||
1185 | * instruct the adapter to insert the VLAN tag specified by the | ||
1186 | * Tx_VLAN_Tag field. This field is only valid in the first TxD of | ||
1187 | * a frame. | ||
1188 | * Bits 16 to 31 - Tx_VLAN_Tag. Variable portion of the VLAN tag | ||
1189 | * to be inserted into the frame by the adapter (the first two bytes | ||
1190 | * of a VLAN tag are always 0x8100). This field is only valid if the | ||
1191 | * Tx_VLAN_Enable field is set to '1'. | ||
1192 | * Bits 32 to 33 - Reserved. | ||
1193 | * Bits 34 to 39 - Tx_Int_Number. Indicates which Tx interrupt | ||
1194 | * number the frame associated with. This field is written by the | ||
1195 | * host. It is only valid in the first TxD of a frame. | ||
1196 | * Bits 40 to 42 - Reserved. | ||
1197 | * Bit 43 - Set to 1 to exclude the frame from bandwidth metering | ||
1198 | * functions. This field is valid only in the first TxD | ||
1199 | * of a frame. | ||
1200 | * Bits 44 to 45 - Reserved. | ||
1201 | * Bit 46 - Tx_Int_Per_List Set to a '1' to instruct the adapter to | ||
1202 | * generate an interrupt as soon as all of the frames in the list | ||
1203 | * have been transmitted. In order to have per-frame interrupts, | ||
1204 | * the driver should place a maximum of one frame per list. This | ||
1205 | * field is only valid in the first TxD of a frame. | ||
1206 | * Bit 47 - Tx_Int_Utilization Set to a '1' to instruct the adapter | ||
1207 | * to count the frame toward the utilization interrupt specified in | ||
1208 | * the Tx_Int_Number field. This field is only valid in the first | ||
1209 | * TxD of a frame. | ||
1210 | * Bits 48 to 63 - Reserved. | ||
1211 | * @buffer_pointer: Buffer start address. | ||
1212 | * @host_control: Host_Control.Opaque 64bit data stored by driver inside the | ||
1213 | * Titan descriptor prior to posting the latter on the fifo | ||
1214 | * via vxge_hw_fifo_txdl_post().The %host_control is returned as is | ||
1215 | * to the driver with each completed descriptor. | ||
1216 | * | ||
1217 | * Transmit descriptor (TxD).Fifo descriptor contains configured number | ||
1218 | * (list) of TxDs. * For more details please refer to Titan User Guide, | ||
1219 | * Section 5.4.2 "Transmit Descriptor (TxD) Format". | ||
1220 | */ | ||
1221 | struct vxge_hw_fifo_txd { | ||
1222 | u64 control_0; | ||
1223 | #define VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER vxge_mBIT(7) | ||
1224 | |||
1225 | #define VXGE_HW_FIFO_TXD_T_CODE_GET(ctrl0) vxge_bVALn(ctrl0, 12, 4) | ||
1226 | #define VXGE_HW_FIFO_TXD_T_CODE(val) vxge_vBIT(val, 12, 4) | ||
1227 | #define VXGE_HW_FIFO_TXD_T_CODE_UNUSED VXGE_HW_FIFO_T_CODE_UNUSED | ||
1228 | |||
1229 | |||
1230 | #define VXGE_HW_FIFO_TXD_GATHER_CODE(val) vxge_vBIT(val, 22, 2) | ||
1231 | #define VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST VXGE_HW_FIFO_GATHER_CODE_FIRST | ||
1232 | #define VXGE_HW_FIFO_TXD_GATHER_CODE_LAST VXGE_HW_FIFO_GATHER_CODE_LAST | ||
1233 | |||
1234 | |||
1235 | #define VXGE_HW_FIFO_TXD_LSO_EN vxge_mBIT(30) | ||
1236 | |||
1237 | #define VXGE_HW_FIFO_TXD_LSO_MSS(val) vxge_vBIT(val, 34, 14) | ||
1238 | |||
1239 | #define VXGE_HW_FIFO_TXD_BUFFER_SIZE(val) vxge_vBIT(val, 48, 16) | ||
1240 | |||
1241 | u64 control_1; | ||
1242 | #define VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN vxge_mBIT(5) | ||
1243 | #define VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN vxge_mBIT(6) | ||
1244 | #define VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN vxge_mBIT(7) | ||
1245 | #define VXGE_HW_FIFO_TXD_VLAN_ENABLE vxge_mBIT(15) | ||
1246 | |||
1247 | #define VXGE_HW_FIFO_TXD_VLAN_TAG(val) vxge_vBIT(val, 16, 16) | ||
1248 | |||
1249 | #define VXGE_HW_FIFO_TXD_INT_NUMBER(val) vxge_vBIT(val, 34, 6) | ||
1250 | |||
1251 | #define VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST vxge_mBIT(46) | ||
1252 | #define VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ vxge_mBIT(47) | ||
1253 | |||
1254 | u64 buffer_pointer; | ||
1255 | |||
1256 | u64 host_control; | ||
1257 | }; | ||
1258 | |||
1259 | /** | ||
1260 | * struct vxge_hw_ring_rxd_1 - One buffer mode RxD for ring | ||
1261 | * @host_control: This field is exclusively for host use and is "readonly" | ||
1262 | * from the adapter's perspective. | ||
1263 | * @control_0:Bits 0 to 6 - RTH_Bucket get | ||
1264 | * Bit 7 - Own Descriptor ownership bit. This bit is set to 1 | ||
1265 | * by the host, and is set to 0 by the adapter. | ||
1266 | * 0 - Host owns RxD and buffer. | ||
1267 | * 1 - The adapter owns RxD and buffer. | ||
1268 | * Bit 8 - Fast_Path_Eligible When set, indicates that the | ||
1269 | * received frame meets all of the criteria for fast path processing. | ||
1270 | * The required criteria are as follows: | ||
1271 | * !SYN & | ||
1272 | * (Transfer_Code == "Transfer OK") & | ||
1273 | * (!Is_IP_Fragment) & | ||
1274 | * ((Is_IPv4 & computed_L3_checksum == 0xFFFF) | | ||
1275 | * (Is_IPv6)) & | ||
1276 | * ((Is_TCP & computed_L4_checksum == 0xFFFF) | | ||
1277 | * (Is_UDP & (computed_L4_checksum == 0xFFFF | | ||
1278 | * computed _L4_checksum == 0x0000))) | ||
1279 | * (same meaning for all RxD buffer modes) | ||
1280 | * Bit 9 - L3 Checksum Correct | ||
1281 | * Bit 10 - L4 Checksum Correct | ||
1282 | * Bit 11 - Reserved | ||
1283 | * Bit 12 to 15 - This field is written by the adapter. It is | ||
1284 | * used to report the status of the frame transfer to the host. | ||
1285 | * 0x0 - Transfer OK | ||
1286 | * 0x4 - RDA Failure During Transfer | ||
1287 | * 0x5 - Unparseable Packet, such as unknown IPv6 header. | ||
1288 | * 0x6 - Frame integrity error (FCS or ECC). | ||
1289 | * 0x7 - Buffer Size Error. The provided buffer(s) were not | ||
1290 | * appropriately sized and data loss occurred. | ||
1291 | * 0x8 - Internal ECC Error. RxD corrupted. | ||
1292 | * 0x9 - IPv4 Checksum error | ||
1293 | * 0xA - TCP/UDP Checksum error | ||
1294 | * 0xF - Unknown Error or Multiple Error. Indicates an | ||
1295 | * unknown problem or that more than one of transfer codes is set. | ||
1296 | * Bit 16 - SYN The adapter sets this field to indicate that | ||
1297 | * the incoming frame contained a TCP segment with its SYN bit | ||
1298 | * set and its ACK bit NOT set. (same meaning for all RxD buffer | ||
1299 | * modes) | ||
1300 | * Bit 17 - Is ICMP | ||
1301 | * Bit 18 - RTH_SPDM_HIT Set to 1 if there was a match in the | ||
1302 | * Socket Pair Direct Match Table and the frame was steered based | ||
1303 | * on SPDM. | ||
1304 | * Bit 19 - RTH_IT_HIT Set to 1 if there was a match in the | ||
1305 | * Indirection Table and the frame was steered based on hash | ||
1306 | * indirection. | ||
1307 | * Bit 20 to 23 - RTH_HASH_TYPE Indicates the function (hash | ||
1308 | * type) that was used to calculate the hash. | ||
1309 | * Bit 19 - IS_VLAN Set to '1' if the frame was/is VLAN | ||
1310 | * tagged. | ||
1311 | * Bit 25 to 26 - ETHER_ENCAP Reflects the Ethernet encapsulation | ||
1312 | * of the received frame. | ||
1313 | * 0x0 - Ethernet DIX | ||
1314 | * 0x1 - LLC | ||
1315 | * 0x2 - SNAP (includes Jumbo-SNAP) | ||
1316 | * 0x3 - IPX | ||
1317 | * Bit 27 - IS_IPV4 Set to '1' if the frame contains an IPv4 packet. | ||
1318 | * Bit 28 - IS_IPV6 Set to '1' if the frame contains an IPv6 packet. | ||
1319 | * Bit 29 - IS_IP_FRAG Set to '1' if the frame contains a fragmented | ||
1320 | * IP packet. | ||
1321 | * Bit 30 - IS_TCP Set to '1' if the frame contains a TCP segment. | ||
1322 | * Bit 31 - IS_UDP Set to '1' if the frame contains a UDP message. | ||
1323 | * Bit 32 to 47 - L3_Checksum[0:15] The IPv4 checksum value that | ||
1324 | * arrived with the frame. If the resulting computed IPv4 header | ||
1325 | * checksum for the frame did not produce the expected 0xFFFF value, | ||
1326 | * then the transfer code would be set to 0x9. | ||
1327 | * Bit 48 to 63 - L4_Checksum[0:15] The TCP/UDP checksum value that | ||
1328 | * arrived with the frame. If the resulting computed TCP/UDP checksum | ||
1329 | * for the frame did not produce the expected 0xFFFF value, then the | ||
1330 | * transfer code would be set to 0xA. | ||
1331 | * @control_1:Bits 0 to 1 - Reserved | ||
1332 | * Bits 2 to 15 - Buffer0_Size.This field is set by the host and | ||
1333 | * eventually overwritten by the adapter. The host writes the | ||
1334 | * available buffer size in bytes when it passes the descriptor to | ||
1335 | * the adapter. When a frame is delivered the host, the adapter | ||
1336 | * populates this field with the number of bytes written into the | ||
1337 | * buffer. The largest supported buffer is 16, 383 bytes. | ||
1338 | * Bit 16 to 47 - RTH Hash Value 32-bit RTH hash value. Only valid if | ||
1339 | * RTH_HASH_TYPE (Control_0, bits 20:23) is nonzero. | ||
1340 | * Bit 48 to 63 - VLAN_Tag[0:15] The contents of the variable portion | ||
1341 | * of the VLAN tag, if one was detected by the adapter. This field is | ||
1342 | * populated even if VLAN-tag stripping is enabled. | ||
1343 | * @buffer0_ptr: Pointer to buffer. This field is populated by the driver. | ||
1344 | * | ||
1345 | * One buffer mode RxD for ring structure | ||
1346 | */ | ||
1347 | struct vxge_hw_ring_rxd_1 { | ||
1348 | u64 host_control; | ||
1349 | u64 control_0; | ||
1350 | #define VXGE_HW_RING_RXD_RTH_BUCKET_GET(ctrl0) vxge_bVALn(ctrl0, 0, 7) | ||
1351 | |||
1352 | #define VXGE_HW_RING_RXD_LIST_OWN_ADAPTER vxge_mBIT(7) | ||
1353 | |||
1354 | #define VXGE_HW_RING_RXD_FAST_PATH_ELIGIBLE_GET(ctrl0) vxge_bVALn(ctrl0, 8, 1) | ||
1355 | |||
1356 | #define VXGE_HW_RING_RXD_L3_CKSUM_CORRECT_GET(ctrl0) vxge_bVALn(ctrl0, 9, 1) | ||
1357 | |||
1358 | #define VXGE_HW_RING_RXD_L4_CKSUM_CORRECT_GET(ctrl0) vxge_bVALn(ctrl0, 10, 1) | ||
1359 | |||
1360 | #define VXGE_HW_RING_RXD_T_CODE_GET(ctrl0) vxge_bVALn(ctrl0, 12, 4) | ||
1361 | #define VXGE_HW_RING_RXD_T_CODE(val) vxge_vBIT(val, 12, 4) | ||
1362 | |||
1363 | #define VXGE_HW_RING_RXD_T_CODE_UNUSED VXGE_HW_RING_T_CODE_UNUSED | ||
1364 | |||
1365 | #define VXGE_HW_RING_RXD_SYN_GET(ctrl0) vxge_bVALn(ctrl0, 16, 1) | ||
1366 | |||
1367 | #define VXGE_HW_RING_RXD_IS_ICMP_GET(ctrl0) vxge_bVALn(ctrl0, 17, 1) | ||
1368 | |||
1369 | #define VXGE_HW_RING_RXD_RTH_SPDM_HIT_GET(ctrl0) vxge_bVALn(ctrl0, 18, 1) | ||
1370 | |||
1371 | #define VXGE_HW_RING_RXD_RTH_IT_HIT_GET(ctrl0) vxge_bVALn(ctrl0, 19, 1) | ||
1372 | |||
1373 | #define VXGE_HW_RING_RXD_RTH_HASH_TYPE_GET(ctrl0) vxge_bVALn(ctrl0, 20, 4) | ||
1374 | |||
1375 | #define VXGE_HW_RING_RXD_IS_VLAN_GET(ctrl0) vxge_bVALn(ctrl0, 24, 1) | ||
1376 | |||
1377 | #define VXGE_HW_RING_RXD_ETHER_ENCAP_GET(ctrl0) vxge_bVALn(ctrl0, 25, 2) | ||
1378 | |||
1379 | #define VXGE_HW_RING_RXD_FRAME_PROTO_GET(ctrl0) vxge_bVALn(ctrl0, 27, 5) | ||
1380 | |||
1381 | #define VXGE_HW_RING_RXD_L3_CKSUM_GET(ctrl0) vxge_bVALn(ctrl0, 32, 16) | ||
1382 | |||
1383 | #define VXGE_HW_RING_RXD_L4_CKSUM_GET(ctrl0) vxge_bVALn(ctrl0, 48, 16) | ||
1384 | |||
1385 | u64 control_1; | ||
1386 | |||
1387 | #define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(ctrl1) vxge_bVALn(ctrl1, 2, 14) | ||
1388 | #define VXGE_HW_RING_RXD_1_BUFFER0_SIZE(val) vxge_vBIT(val, 2, 14) | ||
1389 | #define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK vxge_vBIT(0x3FFF, 2, 14) | ||
1390 | |||
1391 | #define VXGE_HW_RING_RXD_1_RTH_HASH_VAL_GET(ctrl1) vxge_bVALn(ctrl1, 16, 32) | ||
1392 | |||
1393 | #define VXGE_HW_RING_RXD_VLAN_TAG_GET(ctrl1) vxge_bVALn(ctrl1, 48, 16) | ||
1394 | |||
1395 | u64 buffer0_ptr; | ||
1396 | }; | ||
1397 | |||
1398 | enum vxge_hw_rth_algoritms { | ||
1399 | RTH_ALG_JENKINS = 0, | ||
1400 | RTH_ALG_MS_RSS = 1, | ||
1401 | RTH_ALG_CRC32C = 2 | ||
1402 | }; | ||
1403 | |||
1404 | /** | ||
1405 | * struct vxge_hw_rth_hash_types - RTH hash types. | ||
1406 | * @hash_type_tcpipv4_en: Enables RTH field type HashTypeTcpIPv4 | ||
1407 | * @hash_type_ipv4_en: Enables RTH field type HashTypeIPv4 | ||
1408 | * @hash_type_tcpipv6_en: Enables RTH field type HashTypeTcpIPv6 | ||
1409 | * @hash_type_ipv6_en: Enables RTH field type HashTypeIPv6 | ||
1410 | * @hash_type_tcpipv6ex_en: Enables RTH field type HashTypeTcpIPv6Ex | ||
1411 | * @hash_type_ipv6ex_en: Enables RTH field type HashTypeIPv6Ex | ||
1412 | * | ||
1413 | * Used to pass RTH hash types to rts_rts_set. | ||
1414 | * | ||
1415 | * See also: vxge_hw_vpath_rts_rth_set(), vxge_hw_vpath_rts_rth_get(). | ||
1416 | */ | ||
1417 | struct vxge_hw_rth_hash_types { | ||
1418 | u8 hash_type_tcpipv4_en; | ||
1419 | u8 hash_type_ipv4_en; | ||
1420 | u8 hash_type_tcpipv6_en; | ||
1421 | u8 hash_type_ipv6_en; | ||
1422 | u8 hash_type_tcpipv6ex_en; | ||
1423 | u8 hash_type_ipv6ex_en; | ||
1424 | }; | ||
1425 | |||
1426 | u32 | ||
1427 | vxge_hw_device_debug_mask_get(struct __vxge_hw_device *devh); | ||
1428 | |||
1429 | void vxge_hw_device_debug_set( | ||
1430 | struct __vxge_hw_device *devh, | ||
1431 | enum vxge_debug_level level, | ||
1432 | u32 mask); | ||
1433 | |||
1434 | u32 | ||
1435 | vxge_hw_device_error_level_get(struct __vxge_hw_device *devh); | ||
1436 | |||
1437 | u32 | ||
1438 | vxge_hw_device_trace_level_get(struct __vxge_hw_device *devh); | ||
1439 | |||
1440 | u32 | ||
1441 | vxge_hw_device_debug_mask_get(struct __vxge_hw_device *devh); | ||
1442 | |||
1443 | /** | ||
1444 | * vxge_hw_ring_rxd_size_get - Get the size of ring descriptor. | ||
1445 | * @buf_mode: Buffer mode (1, 3 or 5) | ||
1446 | * | ||
1447 | * This function returns the size of RxD for given buffer mode | ||
1448 | */ | ||
1449 | static inline u32 vxge_hw_ring_rxd_size_get(u32 buf_mode) | ||
1450 | { | ||
1451 | return sizeof(struct vxge_hw_ring_rxd_1); | ||
1452 | } | ||
1453 | |||
1454 | /** | ||
1455 | * vxge_hw_ring_rxds_per_block_get - Get the number of rxds per block. | ||
1456 | * @buf_mode: Buffer mode (1 buffer mode only) | ||
1457 | * | ||
1458 | * This function returns the number of RxD for RxD block for given buffer mode | ||
1459 | */ | ||
1460 | static inline u32 vxge_hw_ring_rxds_per_block_get(u32 buf_mode) | ||
1461 | { | ||
1462 | return (u32)((VXGE_HW_BLOCK_SIZE-16) / | ||
1463 | sizeof(struct vxge_hw_ring_rxd_1)); | ||
1464 | } | ||
1465 | |||
1466 | /** | ||
1467 | * vxge_hw_ring_rxd_1b_set - Prepare 1-buffer-mode descriptor. | ||
1468 | * @rxdh: Descriptor handle. | ||
1469 | * @dma_pointer: DMA address of a single receive buffer this descriptor | ||
1470 | * should carry. Note that by the time vxge_hw_ring_rxd_1b_set is called, | ||
1471 | * the receive buffer should be already mapped to the device | ||
1472 | * @size: Size of the receive @dma_pointer buffer. | ||
1473 | * | ||
1474 | * Prepare 1-buffer-mode Rx descriptor for posting | ||
1475 | * (via vxge_hw_ring_rxd_post()). | ||
1476 | * | ||
1477 | * This inline helper-function does not return any parameters and always | ||
1478 | * succeeds. | ||
1479 | * | ||
1480 | */ | ||
1481 | static inline | ||
1482 | void vxge_hw_ring_rxd_1b_set( | ||
1483 | void *rxdh, | ||
1484 | dma_addr_t dma_pointer, | ||
1485 | u32 size) | ||
1486 | { | ||
1487 | struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh; | ||
1488 | rxdp->buffer0_ptr = dma_pointer; | ||
1489 | rxdp->control_1 &= ~VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK; | ||
1490 | rxdp->control_1 |= VXGE_HW_RING_RXD_1_BUFFER0_SIZE(size); | ||
1491 | } | ||
1492 | |||
1493 | /** | ||
1494 | * vxge_hw_ring_rxd_1b_get - Get data from the completed 1-buf | ||
1495 | * descriptor. | ||
1496 | * @vpath_handle: Virtual Path handle. | ||
1497 | * @rxdh: Descriptor handle. | ||
1498 | * @dma_pointer: DMA address of a single receive buffer this descriptor | ||
1499 | * carries. Returned by HW. | ||
1500 | * @pkt_length: Length (in bytes) of the data in the buffer pointed by | ||
1501 | * | ||
1502 | * Retrieve protocol data from the completed 1-buffer-mode Rx descriptor. | ||
1503 | * This inline helper-function uses completed descriptor to populate receive | ||
1504 | * buffer pointer and other "out" parameters. The function always succeeds. | ||
1505 | * | ||
1506 | */ | ||
1507 | static inline | ||
1508 | void vxge_hw_ring_rxd_1b_get( | ||
1509 | struct __vxge_hw_ring *ring_handle, | ||
1510 | void *rxdh, | ||
1511 | u32 *pkt_length) | ||
1512 | { | ||
1513 | struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh; | ||
1514 | |||
1515 | *pkt_length = | ||
1516 | (u32)VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(rxdp->control_1); | ||
1517 | } | ||
1518 | |||
1519 | /** | ||
1520 | * vxge_hw_ring_rxd_1b_info_get - Get extended information associated with | ||
1521 | * a completed receive descriptor for 1b mode. | ||
1522 | * @vpath_handle: Virtual Path handle. | ||
1523 | * @rxdh: Descriptor handle. | ||
1524 | * @rxd_info: Descriptor information | ||
1525 | * | ||
1526 | * Retrieve extended information associated with a completed receive descriptor. | ||
1527 | * | ||
1528 | */ | ||
1529 | static inline | ||
1530 | void vxge_hw_ring_rxd_1b_info_get( | ||
1531 | struct __vxge_hw_ring *ring_handle, | ||
1532 | void *rxdh, | ||
1533 | struct vxge_hw_ring_rxd_info *rxd_info) | ||
1534 | { | ||
1535 | |||
1536 | struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh; | ||
1537 | rxd_info->syn_flag = | ||
1538 | (u32)VXGE_HW_RING_RXD_SYN_GET(rxdp->control_0); | ||
1539 | rxd_info->is_icmp = | ||
1540 | (u32)VXGE_HW_RING_RXD_IS_ICMP_GET(rxdp->control_0); | ||
1541 | rxd_info->fast_path_eligible = | ||
1542 | (u32)VXGE_HW_RING_RXD_FAST_PATH_ELIGIBLE_GET(rxdp->control_0); | ||
1543 | rxd_info->l3_cksum_valid = | ||
1544 | (u32)VXGE_HW_RING_RXD_L3_CKSUM_CORRECT_GET(rxdp->control_0); | ||
1545 | rxd_info->l3_cksum = | ||
1546 | (u32)VXGE_HW_RING_RXD_L3_CKSUM_GET(rxdp->control_0); | ||
1547 | rxd_info->l4_cksum_valid = | ||
1548 | (u32)VXGE_HW_RING_RXD_L4_CKSUM_CORRECT_GET(rxdp->control_0); | ||
1549 | rxd_info->l4_cksum = | ||
1550 | (u32)VXGE_HW_RING_RXD_L4_CKSUM_GET(rxdp->control_0);; | ||
1551 | rxd_info->frame = | ||
1552 | (u32)VXGE_HW_RING_RXD_ETHER_ENCAP_GET(rxdp->control_0); | ||
1553 | rxd_info->proto = | ||
1554 | (u32)VXGE_HW_RING_RXD_FRAME_PROTO_GET(rxdp->control_0); | ||
1555 | rxd_info->is_vlan = | ||
1556 | (u32)VXGE_HW_RING_RXD_IS_VLAN_GET(rxdp->control_0); | ||
1557 | rxd_info->vlan = | ||
1558 | (u32)VXGE_HW_RING_RXD_VLAN_TAG_GET(rxdp->control_1); | ||
1559 | rxd_info->rth_bucket = | ||
1560 | (u32)VXGE_HW_RING_RXD_RTH_BUCKET_GET(rxdp->control_0); | ||
1561 | rxd_info->rth_it_hit = | ||
1562 | (u32)VXGE_HW_RING_RXD_RTH_IT_HIT_GET(rxdp->control_0); | ||
1563 | rxd_info->rth_spdm_hit = | ||
1564 | (u32)VXGE_HW_RING_RXD_RTH_SPDM_HIT_GET(rxdp->control_0); | ||
1565 | rxd_info->rth_hash_type = | ||
1566 | (u32)VXGE_HW_RING_RXD_RTH_HASH_TYPE_GET(rxdp->control_0); | ||
1567 | rxd_info->rth_value = | ||
1568 | (u32)VXGE_HW_RING_RXD_1_RTH_HASH_VAL_GET(rxdp->control_1); | ||
1569 | } | ||
1570 | |||
1571 | /** | ||
1572 | * vxge_hw_ring_rxd_private_get - Get driver private per-descriptor data | ||
1573 | * of 1b mode 3b mode ring. | ||
1574 | * @rxdh: Descriptor handle. | ||
1575 | * | ||
1576 | * Returns: private driver info associated with the descriptor. | ||
1577 | * driver requests per-descriptor space via vxge_hw_ring_attr. | ||
1578 | * | ||
1579 | */ | ||
1580 | static inline void *vxge_hw_ring_rxd_private_get(void *rxdh) | ||
1581 | { | ||
1582 | struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh; | ||
1583 | return (void *)(size_t)rxdp->host_control; | ||
1584 | } | ||
1585 | |||
1586 | /** | ||
1587 | * vxge_hw_fifo_txdl_cksum_set_bits - Offload checksum. | ||
1588 | * @txdlh: Descriptor handle. | ||
1589 | * @cksum_bits: Specifies which checksums are to be offloaded: IPv4, | ||
1590 | * and/or TCP and/or UDP. | ||
1591 | * | ||
1592 | * Ask Titan to calculate IPv4 & transport checksums for _this_ transmit | ||
1593 | * descriptor. | ||
1594 | * This API is part of the preparation of the transmit descriptor for posting | ||
1595 | * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include | ||
1596 | * vxge_hw_fifo_txdl_mss_set(), vxge_hw_fifo_txdl_buffer_set_aligned(), | ||
1597 | * and vxge_hw_fifo_txdl_buffer_set(). | ||
1598 | * All these APIs fill in the fields of the fifo descriptor, | ||
1599 | * in accordance with the Titan specification. | ||
1600 | * | ||
1601 | */ | ||
1602 | static inline void vxge_hw_fifo_txdl_cksum_set_bits(void *txdlh, u64 cksum_bits) | ||
1603 | { | ||
1604 | struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh; | ||
1605 | txdp->control_1 |= cksum_bits; | ||
1606 | } | ||
1607 | |||
1608 | /** | ||
1609 | * vxge_hw_fifo_txdl_mss_set - Set MSS. | ||
1610 | * @txdlh: Descriptor handle. | ||
1611 | * @mss: MSS size for _this_ TCP connection. Passed by TCP stack down to the | ||
1612 | * driver, which in turn inserts the MSS into the @txdlh. | ||
1613 | * | ||
1614 | * This API is part of the preparation of the transmit descriptor for posting | ||
1615 | * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include | ||
1616 | * vxge_hw_fifo_txdl_buffer_set(), vxge_hw_fifo_txdl_buffer_set_aligned(), | ||
1617 | * and vxge_hw_fifo_txdl_cksum_set_bits(). | ||
1618 | * All these APIs fill in the fields of the fifo descriptor, | ||
1619 | * in accordance with the Titan specification. | ||
1620 | * | ||
1621 | */ | ||
1622 | static inline void vxge_hw_fifo_txdl_mss_set(void *txdlh, int mss) | ||
1623 | { | ||
1624 | struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh; | ||
1625 | |||
1626 | txdp->control_0 |= VXGE_HW_FIFO_TXD_LSO_EN; | ||
1627 | txdp->control_0 |= VXGE_HW_FIFO_TXD_LSO_MSS(mss); | ||
1628 | } | ||
1629 | |||
1630 | /** | ||
1631 | * vxge_hw_fifo_txdl_vlan_set - Set VLAN tag. | ||
1632 | * @txdlh: Descriptor handle. | ||
1633 | * @vlan_tag: 16bit VLAN tag. | ||
1634 | * | ||
1635 | * Insert VLAN tag into specified transmit descriptor. | ||
1636 | * The actual insertion of the tag into outgoing frame is done by the hardware. | ||
1637 | */ | ||
1638 | static inline void vxge_hw_fifo_txdl_vlan_set(void *txdlh, u16 vlan_tag) | ||
1639 | { | ||
1640 | struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh; | ||
1641 | |||
1642 | txdp->control_1 |= VXGE_HW_FIFO_TXD_VLAN_ENABLE; | ||
1643 | txdp->control_1 |= VXGE_HW_FIFO_TXD_VLAN_TAG(vlan_tag); | ||
1644 | } | ||
1645 | |||
1646 | /** | ||
1647 | * vxge_hw_fifo_txdl_private_get - Retrieve per-descriptor private data. | ||
1648 | * @txdlh: Descriptor handle. | ||
1649 | * | ||
1650 | * Retrieve per-descriptor private data. | ||
1651 | * Note that driver requests per-descriptor space via | ||
1652 | * struct vxge_hw_fifo_attr passed to | ||
1653 | * vxge_hw_vpath_open(). | ||
1654 | * | ||
1655 | * Returns: private driver data associated with the descriptor. | ||
1656 | */ | ||
1657 | static inline void *vxge_hw_fifo_txdl_private_get(void *txdlh) | ||
1658 | { | ||
1659 | struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh; | ||
1660 | |||
1661 | return (void *)(size_t)txdp->host_control; | ||
1662 | } | ||
1663 | |||
1664 | /** | ||
1665 | * struct vxge_hw_ring_attr - Ring open "template". | ||
1666 | * @callback: Ring completion callback. HW invokes the callback when there | ||
1667 | * are new completions on that ring. In many implementations | ||
1668 | * the @callback executes in the hw interrupt context. | ||
1669 | * @rxd_init: Ring's descriptor-initialize callback. | ||
1670 | * See vxge_hw_ring_rxd_init_f{}. | ||
1671 | * If not NULL, HW invokes the callback when opening | ||
1672 | * the ring. | ||
1673 | * @rxd_term: Ring's descriptor-terminate callback. If not NULL, | ||
1674 | * HW invokes the callback when closing the corresponding ring. | ||
1675 | * See also vxge_hw_ring_rxd_term_f{}. | ||
1676 | * @userdata: User-defined "context" of _that_ ring. Passed back to the | ||
1677 | * user as one of the @callback, @rxd_init, and @rxd_term arguments. | ||
1678 | * @per_rxd_space: If specified (i.e., greater than zero): extra space | ||
1679 | * reserved by HW per each receive descriptor. | ||
1680 | * Can be used to store | ||
1681 | * and retrieve on completion, information specific | ||
1682 | * to the driver. | ||
1683 | * | ||
1684 | * Ring open "template". User fills the structure with ring | ||
1685 | * attributes and passes it to vxge_hw_vpath_open(). | ||
1686 | */ | ||
1687 | struct vxge_hw_ring_attr { | ||
1688 | enum vxge_hw_status (*callback)( | ||
1689 | struct __vxge_hw_ring *ringh, | ||
1690 | void *rxdh, | ||
1691 | u8 t_code, | ||
1692 | void *userdata); | ||
1693 | |||
1694 | enum vxge_hw_status (*rxd_init)( | ||
1695 | void *rxdh, | ||
1696 | void *userdata); | ||
1697 | |||
1698 | void (*rxd_term)( | ||
1699 | void *rxdh, | ||
1700 | enum vxge_hw_rxd_state state, | ||
1701 | void *userdata); | ||
1702 | |||
1703 | void *userdata; | ||
1704 | u32 per_rxd_space; | ||
1705 | }; | ||
1706 | |||
1707 | /** | ||
1708 | * function vxge_hw_fifo_callback_f - FIFO callback. | ||
1709 | * @vpath_handle: Virtual path whose Fifo "containing" 1 or more completed | ||
1710 | * descriptors. | ||
1711 | * @txdlh: First completed descriptor. | ||
1712 | * @txdl_priv: Pointer to per txdl space allocated | ||
1713 | * @t_code: Transfer code, as per Titan User Guide. | ||
1714 | * Returned by HW. | ||
1715 | * @host_control: Opaque 64bit data stored by driver inside the Titan | ||
1716 | * descriptor prior to posting the latter on the fifo | ||
1717 | * via vxge_hw_fifo_txdl_post(). The @host_control is returned | ||
1718 | * as is to the driver with each completed descriptor. | ||
1719 | * @userdata: Opaque per-fifo data specified at fifo open | ||
1720 | * time, via vxge_hw_vpath_open(). | ||
1721 | * | ||
1722 | * Fifo completion callback (type declaration). A single per-fifo | ||
1723 | * callback is specified at fifo open time, via | ||
1724 | * vxge_hw_vpath_open(). Typically gets called as part of the processing | ||
1725 | * of the Interrupt Service Routine. | ||
1726 | * | ||
1727 | * Fifo callback gets called by HW if, and only if, there is at least | ||
1728 | * one new completion on a given fifo. Upon processing the first @txdlh driver | ||
1729 | * is _supposed_ to continue consuming completions using: | ||
1730 | * - vxge_hw_fifo_txdl_next_completed() | ||
1731 | * | ||
1732 | * Note that failure to process new completions in a timely fashion | ||
1733 | * leads to VXGE_HW_INF_OUT_OF_DESCRIPTORS condition. | ||
1734 | * | ||
1735 | * Non-zero @t_code means failure to process transmit descriptor. | ||
1736 | * | ||
1737 | * In the "transmit" case the failure could happen, for instance, when the | ||
1738 | * link is down, in which case Titan completes the descriptor because it | ||
1739 | * is not able to send the data out. | ||
1740 | * | ||
1741 | * For details please refer to Titan User Guide. | ||
1742 | * | ||
1743 | * See also: vxge_hw_fifo_txdl_next_completed(), vxge_hw_fifo_txdl_term_f{}. | ||
1744 | */ | ||
1745 | /** | ||
1746 | * function vxge_hw_fifo_txdl_term_f - Terminate descriptor callback. | ||
1747 | * @txdlh: First completed descriptor. | ||
1748 | * @txdl_priv: Pointer to per txdl space allocated | ||
1749 | * @state: One of the enum vxge_hw_txdl_state{} enumerated states. | ||
1750 | * @userdata: Per-fifo user data (a.k.a. context) specified at | ||
1751 | * fifo open time, via vxge_hw_vpath_open(). | ||
1752 | * | ||
1753 | * Terminate descriptor callback. Unless NULL is specified in the | ||
1754 | * struct vxge_hw_fifo_attr{} structure passed to vxge_hw_vpath_open()), | ||
1755 | * HW invokes the callback as part of closing fifo, prior to | ||
1756 | * de-allocating the ring and associated data structures | ||
1757 | * (including descriptors). | ||
1758 | * driver should utilize the callback to (for instance) unmap | ||
1759 | * and free DMA data buffers associated with the posted (state = | ||
1760 | * VXGE_HW_TXDL_STATE_POSTED) descriptors, | ||
1761 | * as well as other relevant cleanup functions. | ||
1762 | * | ||
1763 | * See also: struct vxge_hw_fifo_attr{} | ||
1764 | */ | ||
1765 | /** | ||
1766 | * struct vxge_hw_fifo_attr - Fifo open "template". | ||
1767 | * @callback: Fifo completion callback. HW invokes the callback when there | ||
1768 | * are new completions on that fifo. In many implementations | ||
1769 | * the @callback executes in the hw interrupt context. | ||
1770 | * @txdl_term: Fifo's descriptor-terminate callback. If not NULL, | ||
1771 | * HW invokes the callback when closing the corresponding fifo. | ||
1772 | * See also vxge_hw_fifo_txdl_term_f{}. | ||
1773 | * @userdata: User-defined "context" of _that_ fifo. Passed back to the | ||
1774 | * user as one of the @callback, and @txdl_term arguments. | ||
1775 | * @per_txdl_space: If specified (i.e., greater than zero): extra space | ||
1776 | * reserved by HW per each transmit descriptor. Can be used to | ||
1777 | * store, and retrieve on completion, information specific | ||
1778 | * to the driver. | ||
1779 | * | ||
1780 | * Fifo open "template". User fills the structure with fifo | ||
1781 | * attributes and passes it to vxge_hw_vpath_open(). | ||
1782 | */ | ||
1783 | struct vxge_hw_fifo_attr { | ||
1784 | |||
1785 | enum vxge_hw_status (*callback)( | ||
1786 | struct __vxge_hw_fifo *fifo_handle, | ||
1787 | void *txdlh, | ||
1788 | enum vxge_hw_fifo_tcode t_code, | ||
1789 | void *userdata, | ||
1790 | void **skb_ptr); | ||
1791 | |||
1792 | void (*txdl_term)( | ||
1793 | void *txdlh, | ||
1794 | enum vxge_hw_txdl_state state, | ||
1795 | void *userdata); | ||
1796 | |||
1797 | void *userdata; | ||
1798 | u32 per_txdl_space; | ||
1799 | }; | ||
1800 | |||
1801 | /** | ||
1802 | * struct vxge_hw_vpath_attr - Attributes of virtual path | ||
1803 | * @vp_id: Identifier of Virtual Path | ||
1804 | * @ring_attr: Attributes of ring for non-offload receive | ||
1805 | * @fifo_attr: Attributes of fifo for non-offload transmit | ||
1806 | * | ||
1807 | * Attributes of virtual path. This structure is passed as parameter | ||
1808 | * to the vxge_hw_vpath_open() routine to set the attributes of ring and fifo. | ||
1809 | */ | ||
1810 | struct vxge_hw_vpath_attr { | ||
1811 | u32 vp_id; | ||
1812 | struct vxge_hw_ring_attr ring_attr; | ||
1813 | struct vxge_hw_fifo_attr fifo_attr; | ||
1814 | }; | ||
1815 | |||
1816 | enum vxge_hw_status | ||
1817 | __vxge_hw_blockpool_create(struct __vxge_hw_device *hldev, | ||
1818 | struct __vxge_hw_blockpool *blockpool, | ||
1819 | u32 pool_size, | ||
1820 | u32 pool_max); | ||
1821 | |||
1822 | void | ||
1823 | __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool); | ||
1824 | |||
1825 | struct __vxge_hw_blockpool_entry * | ||
1826 | __vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *hldev, | ||
1827 | u32 size); | ||
1828 | |||
1829 | void | ||
1830 | __vxge_hw_blockpool_block_free(struct __vxge_hw_device *hldev, | ||
1831 | struct __vxge_hw_blockpool_entry *entry); | ||
1832 | |||
1833 | void * | ||
1834 | __vxge_hw_blockpool_malloc(struct __vxge_hw_device *hldev, | ||
1835 | u32 size, | ||
1836 | struct vxge_hw_mempool_dma *dma_object); | ||
1837 | |||
1838 | void | ||
1839 | __vxge_hw_blockpool_free(struct __vxge_hw_device *hldev, | ||
1840 | void *memblock, | ||
1841 | u32 size, | ||
1842 | struct vxge_hw_mempool_dma *dma_object); | ||
1843 | |||
1844 | enum vxge_hw_status | ||
1845 | __vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config); | ||
1846 | |||
1847 | enum vxge_hw_status | ||
1848 | __vxge_hw_device_config_check(struct vxge_hw_device_config *new_config); | ||
1849 | |||
1850 | enum vxge_hw_status | ||
1851 | vxge_hw_mgmt_device_config(struct __vxge_hw_device *devh, | ||
1852 | struct vxge_hw_device_config *dev_config, int size); | ||
1853 | |||
1854 | enum vxge_hw_status __devinit vxge_hw_device_hw_info_get( | ||
1855 | void __iomem *bar0, | ||
1856 | struct vxge_hw_device_hw_info *hw_info); | ||
1857 | |||
1858 | enum vxge_hw_status | ||
1859 | __vxge_hw_vpath_fw_ver_get( | ||
1860 | u32 vp_id, | ||
1861 | struct vxge_hw_vpath_reg __iomem *vpath_reg, | ||
1862 | struct vxge_hw_device_hw_info *hw_info); | ||
1863 | |||
1864 | enum vxge_hw_status | ||
1865 | __vxge_hw_vpath_card_info_get( | ||
1866 | u32 vp_id, | ||
1867 | struct vxge_hw_vpath_reg __iomem *vpath_reg, | ||
1868 | struct vxge_hw_device_hw_info *hw_info); | ||
1869 | |||
1870 | enum vxge_hw_status __devinit vxge_hw_device_config_default_get( | ||
1871 | struct vxge_hw_device_config *device_config); | ||
1872 | |||
1873 | /** | ||
1874 | * vxge_hw_device_link_state_get - Get link state. | ||
1875 | * @devh: HW device handle. | ||
1876 | * | ||
1877 | * Get link state. | ||
1878 | * Returns: link state. | ||
1879 | */ | ||
1880 | static inline | ||
1881 | enum vxge_hw_device_link_state vxge_hw_device_link_state_get( | ||
1882 | struct __vxge_hw_device *devh) | ||
1883 | { | ||
1884 | return devh->link_state; | ||
1885 | } | ||
1886 | |||
1887 | void vxge_hw_device_terminate(struct __vxge_hw_device *devh); | ||
1888 | |||
1889 | const u8 * | ||
1890 | vxge_hw_device_serial_number_get(struct __vxge_hw_device *devh); | ||
1891 | |||
1892 | u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *devh); | ||
1893 | |||
1894 | const u8 * | ||
1895 | vxge_hw_device_product_name_get(struct __vxge_hw_device *devh); | ||
1896 | |||
1897 | enum vxge_hw_status __devinit vxge_hw_device_initialize( | ||
1898 | struct __vxge_hw_device **devh, | ||
1899 | struct vxge_hw_device_attr *attr, | ||
1900 | struct vxge_hw_device_config *device_config); | ||
1901 | |||
1902 | enum vxge_hw_status vxge_hw_device_getpause_data( | ||
1903 | struct __vxge_hw_device *devh, | ||
1904 | u32 port, | ||
1905 | u32 *tx, | ||
1906 | u32 *rx); | ||
1907 | |||
1908 | enum vxge_hw_status vxge_hw_device_setpause_data( | ||
1909 | struct __vxge_hw_device *devh, | ||
1910 | u32 port, | ||
1911 | u32 tx, | ||
1912 | u32 rx); | ||
1913 | |||
1914 | static inline void *vxge_os_dma_malloc(struct pci_dev *pdev, | ||
1915 | unsigned long size, | ||
1916 | struct pci_dev **p_dmah, | ||
1917 | struct pci_dev **p_dma_acch) | ||
1918 | { | ||
1919 | gfp_t flags; | ||
1920 | void *vaddr; | ||
1921 | unsigned long misaligned = 0; | ||
1922 | *p_dma_acch = *p_dmah = NULL; | ||
1923 | |||
1924 | if (in_interrupt()) | ||
1925 | flags = GFP_ATOMIC | GFP_DMA; | ||
1926 | else | ||
1927 | flags = GFP_KERNEL | GFP_DMA; | ||
1928 | |||
1929 | size += VXGE_CACHE_LINE_SIZE; | ||
1930 | |||
1931 | vaddr = kmalloc((size), flags); | ||
1932 | if (vaddr == NULL) | ||
1933 | return vaddr; | ||
1934 | misaligned = (unsigned long)VXGE_ALIGN(*((u64 *)&vaddr), | ||
1935 | VXGE_CACHE_LINE_SIZE); | ||
1936 | *(unsigned long *)p_dma_acch = misaligned; | ||
1937 | vaddr = (void *)((u8 *)vaddr + misaligned); | ||
1938 | return vaddr; | ||
1939 | } | ||
1940 | |||
1941 | extern void vxge_hw_blockpool_block_add( | ||
1942 | struct __vxge_hw_device *devh, | ||
1943 | void *block_addr, | ||
1944 | u32 length, | ||
1945 | struct pci_dev *dma_h, | ||
1946 | struct pci_dev *acc_handle); | ||
1947 | |||
1948 | static inline void vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh, | ||
1949 | unsigned long size) | ||
1950 | { | ||
1951 | gfp_t flags; | ||
1952 | void *vaddr; | ||
1953 | |||
1954 | if (in_interrupt()) | ||
1955 | flags = GFP_ATOMIC | GFP_DMA; | ||
1956 | else | ||
1957 | flags = GFP_KERNEL | GFP_DMA; | ||
1958 | |||
1959 | vaddr = kmalloc((size), flags); | ||
1960 | |||
1961 | vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev); | ||
1962 | } | ||
1963 | |||
1964 | static inline void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr, | ||
1965 | struct pci_dev **p_dma_acch) | ||
1966 | { | ||
1967 | unsigned long misaligned = *(unsigned long *)p_dma_acch; | ||
1968 | u8 *tmp = (u8 *)vaddr; | ||
1969 | tmp -= misaligned; | ||
1970 | kfree((void *)tmp); | ||
1971 | } | ||
1972 | |||
1973 | /* | ||
1974 | * __vxge_hw_mempool_item_priv - will return pointer on per item private space | ||
1975 | */ | ||
1976 | static inline void* | ||
1977 | __vxge_hw_mempool_item_priv( | ||
1978 | struct vxge_hw_mempool *mempool, | ||
1979 | u32 memblock_idx, | ||
1980 | void *item, | ||
1981 | u32 *memblock_item_idx) | ||
1982 | { | ||
1983 | ptrdiff_t offset; | ||
1984 | void *memblock = mempool->memblocks_arr[memblock_idx]; | ||
1985 | |||
1986 | |||
1987 | offset = (u32)((u8 *)item - (u8 *)memblock); | ||
1988 | vxge_assert(offset >= 0 && (u32)offset < mempool->memblock_size); | ||
1989 | |||
1990 | (*memblock_item_idx) = (u32) offset / mempool->item_size; | ||
1991 | vxge_assert((*memblock_item_idx) < mempool->items_per_memblock); | ||
1992 | |||
1993 | return (u8 *)mempool->memblocks_priv_arr[memblock_idx] + | ||
1994 | (*memblock_item_idx) * mempool->items_priv_size; | ||
1995 | } | ||
1996 | |||
1997 | enum vxge_hw_status | ||
1998 | __vxge_hw_mempool_grow( | ||
1999 | struct vxge_hw_mempool *mempool, | ||
2000 | u32 num_allocate, | ||
2001 | u32 *num_allocated); | ||
2002 | |||
2003 | struct vxge_hw_mempool* | ||
2004 | __vxge_hw_mempool_create( | ||
2005 | struct __vxge_hw_device *devh, | ||
2006 | u32 memblock_size, | ||
2007 | u32 item_size, | ||
2008 | u32 private_size, | ||
2009 | u32 items_initial, | ||
2010 | u32 items_max, | ||
2011 | struct vxge_hw_mempool_cbs *mp_callback, | ||
2012 | void *userdata); | ||
2013 | |||
2014 | struct __vxge_hw_channel* | ||
2015 | __vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph, | ||
2016 | enum __vxge_hw_channel_type type, u32 length, | ||
2017 | u32 per_dtr_space, void *userdata); | ||
2018 | |||
2019 | void | ||
2020 | __vxge_hw_channel_free( | ||
2021 | struct __vxge_hw_channel *channel); | ||
2022 | |||
2023 | enum vxge_hw_status | ||
2024 | __vxge_hw_channel_initialize( | ||
2025 | struct __vxge_hw_channel *channel); | ||
2026 | |||
2027 | enum vxge_hw_status | ||
2028 | __vxge_hw_channel_reset( | ||
2029 | struct __vxge_hw_channel *channel); | ||
2030 | |||
2031 | /* | ||
2032 | * __vxge_hw_fifo_txdl_priv - Return the max fragments allocated | ||
2033 | * for the fifo. | ||
2034 | * @fifo: Fifo | ||
2035 | * @txdp: Poniter to a TxD | ||
2036 | */ | ||
2037 | static inline struct __vxge_hw_fifo_txdl_priv * | ||
2038 | __vxge_hw_fifo_txdl_priv( | ||
2039 | struct __vxge_hw_fifo *fifo, | ||
2040 | struct vxge_hw_fifo_txd *txdp) | ||
2041 | { | ||
2042 | return (struct __vxge_hw_fifo_txdl_priv *) | ||
2043 | (((char *)((ulong)txdp->host_control)) + | ||
2044 | fifo->per_txdl_space); | ||
2045 | } | ||
2046 | |||
2047 | enum vxge_hw_status vxge_hw_vpath_open( | ||
2048 | struct __vxge_hw_device *devh, | ||
2049 | struct vxge_hw_vpath_attr *attr, | ||
2050 | struct __vxge_hw_vpath_handle **vpath_handle); | ||
2051 | |||
2052 | enum vxge_hw_status | ||
2053 | __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog); | ||
2054 | |||
2055 | enum vxge_hw_status vxge_hw_vpath_close( | ||
2056 | struct __vxge_hw_vpath_handle *vpath_handle); | ||
2057 | |||
2058 | enum vxge_hw_status | ||
2059 | vxge_hw_vpath_reset( | ||
2060 | struct __vxge_hw_vpath_handle *vpath_handle); | ||
2061 | |||
2062 | enum vxge_hw_status | ||
2063 | vxge_hw_vpath_recover_from_reset( | ||
2064 | struct __vxge_hw_vpath_handle *vpath_handle); | ||
2065 | |||
2066 | void | ||
2067 | vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp); | ||
2068 | |||
2069 | enum vxge_hw_status | ||
2070 | vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ringh); | ||
2071 | |||
2072 | enum vxge_hw_status vxge_hw_vpath_mtu_set( | ||
2073 | struct __vxge_hw_vpath_handle *vpath_handle, | ||
2074 | u32 new_mtu); | ||
2075 | |||
2076 | enum vxge_hw_status vxge_hw_vpath_stats_enable( | ||
2077 | struct __vxge_hw_vpath_handle *vpath_handle); | ||
2078 | |||
2079 | enum vxge_hw_status | ||
2080 | __vxge_hw_vpath_stats_access( | ||
2081 | struct __vxge_hw_virtualpath *vpath, | ||
2082 | u32 operation, | ||
2083 | u32 offset, | ||
2084 | u64 *stat); | ||
2085 | |||
2086 | enum vxge_hw_status | ||
2087 | __vxge_hw_vpath_xmac_tx_stats_get( | ||
2088 | struct __vxge_hw_virtualpath *vpath, | ||
2089 | struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats); | ||
2090 | |||
2091 | enum vxge_hw_status | ||
2092 | __vxge_hw_vpath_xmac_rx_stats_get( | ||
2093 | struct __vxge_hw_virtualpath *vpath, | ||
2094 | struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats); | ||
2095 | |||
2096 | enum vxge_hw_status | ||
2097 | __vxge_hw_vpath_stats_get( | ||
2098 | struct __vxge_hw_virtualpath *vpath, | ||
2099 | struct vxge_hw_vpath_stats_hw_info *hw_stats); | ||
2100 | |||
2101 | void | ||
2102 | vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp); | ||
2103 | |||
2104 | enum vxge_hw_status | ||
2105 | __vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config); | ||
2106 | |||
2107 | void | ||
2108 | __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev); | ||
2109 | |||
2110 | enum vxge_hw_status | ||
2111 | __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg); | ||
2112 | |||
2113 | enum vxge_hw_status | ||
2114 | __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg); | ||
2115 | |||
2116 | enum vxge_hw_status | ||
2117 | __vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg, | ||
2118 | struct vxge_hw_vpath_reg __iomem *vpath_reg); | ||
2119 | |||
2120 | enum vxge_hw_status | ||
2121 | __vxge_hw_device_register_poll( | ||
2122 | void __iomem *reg, | ||
2123 | u64 mask, u32 max_millis); | ||
2124 | |||
2125 | #ifndef readq | ||
2126 | static inline u64 readq(void __iomem *addr) | ||
2127 | { | ||
2128 | u64 ret = 0; | ||
2129 | ret = readl(addr + 4); | ||
2130 | ret <<= 32; | ||
2131 | ret |= readl(addr); | ||
2132 | |||
2133 | return ret; | ||
2134 | } | ||
2135 | #endif | ||
2136 | |||
2137 | #ifndef writeq | ||
2138 | static inline void writeq(u64 val, void __iomem *addr) | ||
2139 | { | ||
2140 | writel((u32) (val), addr); | ||
2141 | writel((u32) (val >> 32), (addr + 4)); | ||
2142 | } | ||
2143 | #endif | ||
2144 | |||
2145 | static inline void __vxge_hw_pio_mem_write32_upper(u32 val, void __iomem *addr) | ||
2146 | { | ||
2147 | writel(val, addr + 4); | ||
2148 | } | ||
2149 | |||
2150 | static inline void __vxge_hw_pio_mem_write32_lower(u32 val, void __iomem *addr) | ||
2151 | { | ||
2152 | writel(val, addr); | ||
2153 | } | ||
2154 | |||
2155 | static inline enum vxge_hw_status | ||
2156 | __vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr, | ||
2157 | u64 mask, u32 max_millis) | ||
2158 | { | ||
2159 | enum vxge_hw_status status = VXGE_HW_OK; | ||
2160 | |||
2161 | __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr); | ||
2162 | wmb(); | ||
2163 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr); | ||
2164 | wmb(); | ||
2165 | |||
2166 | status = __vxge_hw_device_register_poll(addr, mask, max_millis); | ||
2167 | return status; | ||
2168 | } | ||
2169 | |||
2170 | struct vxge_hw_toc_reg __iomem * | ||
2171 | __vxge_hw_device_toc_get(void __iomem *bar0); | ||
2172 | |||
2173 | enum vxge_hw_status | ||
2174 | __vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev); | ||
2175 | |||
2176 | void | ||
2177 | __vxge_hw_device_id_get(struct __vxge_hw_device *hldev); | ||
2178 | |||
2179 | void | ||
2180 | __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev); | ||
2181 | |||
2182 | enum vxge_hw_status | ||
2183 | vxge_hw_device_flick_link_led(struct __vxge_hw_device *devh, u64 on_off); | ||
2184 | |||
2185 | enum vxge_hw_status | ||
2186 | __vxge_hw_device_initialize(struct __vxge_hw_device *hldev); | ||
2187 | |||
2188 | enum vxge_hw_status | ||
2189 | __vxge_hw_vpath_pci_read( | ||
2190 | struct __vxge_hw_virtualpath *vpath, | ||
2191 | u32 phy_func_0, | ||
2192 | u32 offset, | ||
2193 | u32 *val); | ||
2194 | |||
2195 | enum vxge_hw_status | ||
2196 | __vxge_hw_vpath_addr_get( | ||
2197 | u32 vp_id, | ||
2198 | struct vxge_hw_vpath_reg __iomem *vpath_reg, | ||
2199 | u8 (macaddr)[ETH_ALEN], | ||
2200 | u8 (macaddr_mask)[ETH_ALEN]); | ||
2201 | |||
2202 | u32 | ||
2203 | __vxge_hw_vpath_func_id_get( | ||
2204 | u32 vp_id, struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg); | ||
2205 | |||
2206 | enum vxge_hw_status | ||
2207 | __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath); | ||
2208 | |||
2209 | /** | ||
2210 | * vxge_debug | ||
2211 | * @level: level of debug verbosity. | ||
2212 | * @mask: mask for the debug | ||
2213 | * @buf: Circular buffer for tracing | ||
2214 | * @fmt: printf like format string | ||
2215 | * | ||
2216 | * Provides logging facilities. Can be customized on per-module | ||
2217 | * basis or/and with debug levels. Input parameters, except | ||
2218 | * module and level, are the same as posix printf. This function | ||
2219 | * may be compiled out if DEBUG macro was never defined. | ||
2220 | * See also: enum vxge_debug_level{}. | ||
2221 | */ | ||
2222 | |||
2223 | #define vxge_trace_aux(level, mask, fmt, ...) \ | ||
2224 | {\ | ||
2225 | vxge_os_vaprintf(level, mask, fmt, __VA_ARGS__);\ | ||
2226 | } | ||
2227 | |||
2228 | #define vxge_debug(module, level, mask, fmt, ...) { \ | ||
2229 | if ((level >= VXGE_TRACE && ((module & VXGE_DEBUG_TRACE_MASK) == module)) || \ | ||
2230 | (level >= VXGE_ERR && ((module & VXGE_DEBUG_ERR_MASK) == module))) {\ | ||
2231 | if ((mask & VXGE_DEBUG_MASK) == mask)\ | ||
2232 | vxge_trace_aux(level, mask, fmt, __VA_ARGS__); \ | ||
2233 | } \ | ||
2234 | } | ||
2235 | |||
2236 | #if (VXGE_COMPONENT_LL & VXGE_DEBUG_MODULE_MASK) | ||
2237 | #define vxge_debug_ll(level, mask, fmt, ...) \ | ||
2238 | {\ | ||
2239 | vxge_debug(VXGE_COMPONENT_LL, level, mask, fmt, __VA_ARGS__);\ | ||
2240 | } | ||
2241 | |||
2242 | #else | ||
2243 | #define vxge_debug_ll(level, mask, fmt, ...) | ||
2244 | #endif | ||
2245 | |||
2246 | enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set( | ||
2247 | struct __vxge_hw_vpath_handle **vpath_handles, | ||
2248 | u32 vpath_count, | ||
2249 | u8 *mtable, | ||
2250 | u8 *itable, | ||
2251 | u32 itable_size); | ||
2252 | |||
2253 | enum vxge_hw_status vxge_hw_vpath_rts_rth_set( | ||
2254 | struct __vxge_hw_vpath_handle *vpath_handle, | ||
2255 | enum vxge_hw_rth_algoritms algorithm, | ||
2256 | struct vxge_hw_rth_hash_types *hash_type, | ||
2257 | u16 bucket_size); | ||
2258 | |||
2259 | #endif | ||