diff options
Diffstat (limited to 'drivers/net/ethernet/neterion/vxge/vxge-config.c')
-rw-r--r-- | drivers/net/ethernet/neterion/vxge/vxge-config.c | 5123 |
1 files changed, 5123 insertions, 0 deletions
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c new file mode 100644 index 000000000000..1520c574cb20 --- /dev/null +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c | |||
@@ -0,0 +1,5123 @@ | |||
1 | /****************************************************************************** | ||
2 | * This software may be used and distributed according to the terms of | ||
3 | * the GNU General Public License (GPL), incorporated herein by reference. | ||
4 | * Drivers based on or derived from this code fall under the GPL and must | ||
5 | * retain the authorship, copyright and license notice. This file is not | ||
6 | * a complete program and may only be used when the entire operating | ||
7 | * system is licensed under the GPL. | ||
8 | * See the file COPYING in this distribution for more information. | ||
9 | * | ||
10 | * vxge-config.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O | ||
11 | * Virtualized Server Adapter. | ||
12 | * Copyright(c) 2002-2010 Exar Corp. | ||
13 | ******************************************************************************/ | ||
14 | #include <linux/vmalloc.h> | ||
15 | #include <linux/etherdevice.h> | ||
16 | #include <linux/pci.h> | ||
17 | #include <linux/pci_hotplug.h> | ||
18 | #include <linux/slab.h> | ||
19 | |||
20 | #include "vxge-traffic.h" | ||
21 | #include "vxge-config.h" | ||
22 | #include "vxge-main.h" | ||
23 | |||
24 | #define VXGE_HW_VPATH_STATS_PIO_READ(offset) { \ | ||
25 | status = __vxge_hw_vpath_stats_access(vpath, \ | ||
26 | VXGE_HW_STATS_OP_READ, \ | ||
27 | offset, \ | ||
28 | &val64); \ | ||
29 | if (status != VXGE_HW_OK) \ | ||
30 | return status; \ | ||
31 | } | ||
32 | |||
33 | static void | ||
34 | vxge_hw_vpath_set_zero_rx_frm_len(struct vxge_hw_vpath_reg __iomem *vp_reg) | ||
35 | { | ||
36 | u64 val64; | ||
37 | |||
38 | val64 = readq(&vp_reg->rxmac_vcfg0); | ||
39 | val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff); | ||
40 | writeq(val64, &vp_reg->rxmac_vcfg0); | ||
41 | val64 = readq(&vp_reg->rxmac_vcfg0); | ||
42 | } | ||
43 | |||
44 | /* | ||
45 | * vxge_hw_vpath_wait_receive_idle - Wait for Rx to become idle | ||
46 | */ | ||
47 | int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id) | ||
48 | { | ||
49 | struct vxge_hw_vpath_reg __iomem *vp_reg; | ||
50 | struct __vxge_hw_virtualpath *vpath; | ||
51 | u64 val64, rxd_count, rxd_spat; | ||
52 | int count = 0, total_count = 0; | ||
53 | |||
54 | vpath = &hldev->virtual_paths[vp_id]; | ||
55 | vp_reg = vpath->vp_reg; | ||
56 | |||
57 | vxge_hw_vpath_set_zero_rx_frm_len(vp_reg); | ||
58 | |||
59 | /* Check that the ring controller for this vpath has enough free RxDs | ||
60 | * to send frames to the host. This is done by reading the | ||
61 | * PRC_RXD_DOORBELL_VPn register and comparing the read value to the | ||
62 | * RXD_SPAT value for the vpath. | ||
63 | */ | ||
64 | val64 = readq(&vp_reg->prc_cfg6); | ||
65 | rxd_spat = VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val64) + 1; | ||
66 | /* Use a factor of 2 when comparing rxd_count against rxd_spat for some | ||
67 | * leg room. | ||
68 | */ | ||
69 | rxd_spat *= 2; | ||
70 | |||
71 | do { | ||
72 | mdelay(1); | ||
73 | |||
74 | rxd_count = readq(&vp_reg->prc_rxd_doorbell); | ||
75 | |||
76 | /* Check that the ring controller for this vpath does | ||
77 | * not have any frame in its pipeline. | ||
78 | */ | ||
79 | val64 = readq(&vp_reg->frm_in_progress_cnt); | ||
80 | if ((rxd_count <= rxd_spat) || (val64 > 0)) | ||
81 | count = 0; | ||
82 | else | ||
83 | count++; | ||
84 | total_count++; | ||
85 | } while ((count < VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT) && | ||
86 | (total_count < VXGE_HW_MAX_POLLING_COUNT)); | ||
87 | |||
88 | if (total_count >= VXGE_HW_MAX_POLLING_COUNT) | ||
89 | printk(KERN_ALERT "%s: Still Receiving traffic. Abort wait\n", | ||
90 | __func__); | ||
91 | |||
92 | return total_count; | ||
93 | } | ||
94 | |||
95 | /* vxge_hw_device_wait_receive_idle - This function waits until all frames | ||
96 | * stored in the frame buffer for each vpath assigned to the given | ||
97 | * function (hldev) have been sent to the host. | ||
98 | */ | ||
99 | void vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev) | ||
100 | { | ||
101 | int i, total_count = 0; | ||
102 | |||
103 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | ||
104 | if (!(hldev->vpaths_deployed & vxge_mBIT(i))) | ||
105 | continue; | ||
106 | |||
107 | total_count += vxge_hw_vpath_wait_receive_idle(hldev, i); | ||
108 | if (total_count >= VXGE_HW_MAX_POLLING_COUNT) | ||
109 | break; | ||
110 | } | ||
111 | } | ||
112 | |||
113 | /* | ||
114 | * __vxge_hw_device_register_poll | ||
115 | * Will poll certain register for specified amount of time. | ||
116 | * Will poll until masked bit is not cleared. | ||
117 | */ | ||
118 | static enum vxge_hw_status | ||
119 | __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis) | ||
120 | { | ||
121 | u64 val64; | ||
122 | u32 i = 0; | ||
123 | enum vxge_hw_status ret = VXGE_HW_FAIL; | ||
124 | |||
125 | udelay(10); | ||
126 | |||
127 | do { | ||
128 | val64 = readq(reg); | ||
129 | if (!(val64 & mask)) | ||
130 | return VXGE_HW_OK; | ||
131 | udelay(100); | ||
132 | } while (++i <= 9); | ||
133 | |||
134 | i = 0; | ||
135 | do { | ||
136 | val64 = readq(reg); | ||
137 | if (!(val64 & mask)) | ||
138 | return VXGE_HW_OK; | ||
139 | mdelay(1); | ||
140 | } while (++i <= max_millis); | ||
141 | |||
142 | return ret; | ||
143 | } | ||
144 | |||
145 | static inline enum vxge_hw_status | ||
146 | __vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr, | ||
147 | u64 mask, u32 max_millis) | ||
148 | { | ||
149 | __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr); | ||
150 | wmb(); | ||
151 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr); | ||
152 | wmb(); | ||
153 | |||
154 | return __vxge_hw_device_register_poll(addr, mask, max_millis); | ||
155 | } | ||
156 | |||
157 | static enum vxge_hw_status | ||
158 | vxge_hw_vpath_fw_api(struct __vxge_hw_virtualpath *vpath, u32 action, | ||
159 | u32 fw_memo, u32 offset, u64 *data0, u64 *data1, | ||
160 | u64 *steer_ctrl) | ||
161 | { | ||
162 | struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg; | ||
163 | enum vxge_hw_status status; | ||
164 | u64 val64; | ||
165 | u32 retry = 0, max_retry = 3; | ||
166 | |||
167 | spin_lock(&vpath->lock); | ||
168 | if (!vpath->vp_open) { | ||
169 | spin_unlock(&vpath->lock); | ||
170 | max_retry = 100; | ||
171 | } | ||
172 | |||
173 | writeq(*data0, &vp_reg->rts_access_steer_data0); | ||
174 | writeq(*data1, &vp_reg->rts_access_steer_data1); | ||
175 | wmb(); | ||
176 | |||
177 | val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) | | ||
178 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(fw_memo) | | ||
179 | VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset) | | ||
180 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | | ||
181 | *steer_ctrl; | ||
182 | |||
183 | status = __vxge_hw_pio_mem_write64(val64, | ||
184 | &vp_reg->rts_access_steer_ctrl, | ||
185 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, | ||
186 | VXGE_HW_DEF_DEVICE_POLL_MILLIS); | ||
187 | |||
188 | /* The __vxge_hw_device_register_poll can udelay for a significant | ||
189 | * amount of time, blocking other process from the CPU. If it delays | ||
190 | * for ~5secs, a NMI error can occur. A way around this is to give up | ||
191 | * the processor via msleep, but this is not allowed is under lock. | ||
192 | * So, only allow it to sleep for ~4secs if open. Otherwise, delay for | ||
193 | * 1sec and sleep for 10ms until the firmware operation has completed | ||
194 | * or timed-out. | ||
195 | */ | ||
196 | while ((status != VXGE_HW_OK) && retry++ < max_retry) { | ||
197 | if (!vpath->vp_open) | ||
198 | msleep(20); | ||
199 | status = __vxge_hw_device_register_poll( | ||
200 | &vp_reg->rts_access_steer_ctrl, | ||
201 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, | ||
202 | VXGE_HW_DEF_DEVICE_POLL_MILLIS); | ||
203 | } | ||
204 | |||
205 | if (status != VXGE_HW_OK) | ||
206 | goto out; | ||
207 | |||
208 | val64 = readq(&vp_reg->rts_access_steer_ctrl); | ||
209 | if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { | ||
210 | *data0 = readq(&vp_reg->rts_access_steer_data0); | ||
211 | *data1 = readq(&vp_reg->rts_access_steer_data1); | ||
212 | *steer_ctrl = val64; | ||
213 | } else | ||
214 | status = VXGE_HW_FAIL; | ||
215 | |||
216 | out: | ||
217 | if (vpath->vp_open) | ||
218 | spin_unlock(&vpath->lock); | ||
219 | return status; | ||
220 | } | ||
221 | |||
222 | enum vxge_hw_status | ||
223 | vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major, | ||
224 | u32 *minor, u32 *build) | ||
225 | { | ||
226 | u64 data0 = 0, data1 = 0, steer_ctrl = 0; | ||
227 | struct __vxge_hw_virtualpath *vpath; | ||
228 | enum vxge_hw_status status; | ||
229 | |||
230 | vpath = &hldev->virtual_paths[hldev->first_vp_id]; | ||
231 | |||
232 | status = vxge_hw_vpath_fw_api(vpath, | ||
233 | VXGE_HW_FW_UPGRADE_ACTION, | ||
234 | VXGE_HW_FW_UPGRADE_MEMO, | ||
235 | VXGE_HW_FW_UPGRADE_OFFSET_READ, | ||
236 | &data0, &data1, &steer_ctrl); | ||
237 | if (status != VXGE_HW_OK) | ||
238 | return status; | ||
239 | |||
240 | *major = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0); | ||
241 | *minor = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0); | ||
242 | *build = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0); | ||
243 | |||
244 | return status; | ||
245 | } | ||
246 | |||
247 | enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev) | ||
248 | { | ||
249 | u64 data0 = 0, data1 = 0, steer_ctrl = 0; | ||
250 | struct __vxge_hw_virtualpath *vpath; | ||
251 | enum vxge_hw_status status; | ||
252 | u32 ret; | ||
253 | |||
254 | vpath = &hldev->virtual_paths[hldev->first_vp_id]; | ||
255 | |||
256 | status = vxge_hw_vpath_fw_api(vpath, | ||
257 | VXGE_HW_FW_UPGRADE_ACTION, | ||
258 | VXGE_HW_FW_UPGRADE_MEMO, | ||
259 | VXGE_HW_FW_UPGRADE_OFFSET_COMMIT, | ||
260 | &data0, &data1, &steer_ctrl); | ||
261 | if (status != VXGE_HW_OK) { | ||
262 | vxge_debug_init(VXGE_ERR, "%s: FW upgrade failed", __func__); | ||
263 | goto exit; | ||
264 | } | ||
265 | |||
266 | ret = VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(steer_ctrl) & 0x7F; | ||
267 | if (ret != 1) { | ||
268 | vxge_debug_init(VXGE_ERR, "%s: FW commit failed with error %d", | ||
269 | __func__, ret); | ||
270 | status = VXGE_HW_FAIL; | ||
271 | } | ||
272 | |||
273 | exit: | ||
274 | return status; | ||
275 | } | ||
276 | |||
277 | enum vxge_hw_status | ||
278 | vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *fwdata, int size) | ||
279 | { | ||
280 | u64 data0 = 0, data1 = 0, steer_ctrl = 0; | ||
281 | struct __vxge_hw_virtualpath *vpath; | ||
282 | enum vxge_hw_status status; | ||
283 | int ret_code, sec_code; | ||
284 | |||
285 | vpath = &hldev->virtual_paths[hldev->first_vp_id]; | ||
286 | |||
287 | /* send upgrade start command */ | ||
288 | status = vxge_hw_vpath_fw_api(vpath, | ||
289 | VXGE_HW_FW_UPGRADE_ACTION, | ||
290 | VXGE_HW_FW_UPGRADE_MEMO, | ||
291 | VXGE_HW_FW_UPGRADE_OFFSET_START, | ||
292 | &data0, &data1, &steer_ctrl); | ||
293 | if (status != VXGE_HW_OK) { | ||
294 | vxge_debug_init(VXGE_ERR, " %s: Upgrade start cmd failed", | ||
295 | __func__); | ||
296 | return status; | ||
297 | } | ||
298 | |||
299 | /* Transfer fw image to adapter 16 bytes at a time */ | ||
300 | for (; size > 0; size -= VXGE_HW_FW_UPGRADE_BLK_SIZE) { | ||
301 | steer_ctrl = 0; | ||
302 | |||
303 | /* The next 128bits of fwdata to be loaded onto the adapter */ | ||
304 | data0 = *((u64 *)fwdata); | ||
305 | data1 = *((u64 *)fwdata + 1); | ||
306 | |||
307 | status = vxge_hw_vpath_fw_api(vpath, | ||
308 | VXGE_HW_FW_UPGRADE_ACTION, | ||
309 | VXGE_HW_FW_UPGRADE_MEMO, | ||
310 | VXGE_HW_FW_UPGRADE_OFFSET_SEND, | ||
311 | &data0, &data1, &steer_ctrl); | ||
312 | if (status != VXGE_HW_OK) { | ||
313 | vxge_debug_init(VXGE_ERR, "%s: Upgrade send failed", | ||
314 | __func__); | ||
315 | goto out; | ||
316 | } | ||
317 | |||
318 | ret_code = VXGE_HW_UPGRADE_GET_RET_ERR_CODE(data0); | ||
319 | switch (ret_code) { | ||
320 | case VXGE_HW_FW_UPGRADE_OK: | ||
321 | /* All OK, send next 16 bytes. */ | ||
322 | break; | ||
323 | case VXGE_FW_UPGRADE_BYTES2SKIP: | ||
324 | /* skip bytes in the stream */ | ||
325 | fwdata += (data0 >> 8) & 0xFFFFFFFF; | ||
326 | break; | ||
327 | case VXGE_HW_FW_UPGRADE_DONE: | ||
328 | goto out; | ||
329 | case VXGE_HW_FW_UPGRADE_ERR: | ||
330 | sec_code = VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(data0); | ||
331 | switch (sec_code) { | ||
332 | case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1: | ||
333 | case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7: | ||
334 | printk(KERN_ERR | ||
335 | "corrupted data from .ncf file\n"); | ||
336 | break; | ||
337 | case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3: | ||
338 | case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4: | ||
339 | case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5: | ||
340 | case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6: | ||
341 | case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8: | ||
342 | printk(KERN_ERR "invalid .ncf file\n"); | ||
343 | break; | ||
344 | case VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW: | ||
345 | printk(KERN_ERR "buffer overflow\n"); | ||
346 | break; | ||
347 | case VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH: | ||
348 | printk(KERN_ERR "failed to flash the image\n"); | ||
349 | break; | ||
350 | case VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN: | ||
351 | printk(KERN_ERR | ||
352 | "generic error. Unknown error type\n"); | ||
353 | break; | ||
354 | default: | ||
355 | printk(KERN_ERR "Unknown error of type %d\n", | ||
356 | sec_code); | ||
357 | break; | ||
358 | } | ||
359 | status = VXGE_HW_FAIL; | ||
360 | goto out; | ||
361 | default: | ||
362 | printk(KERN_ERR "Unknown FW error: %d\n", ret_code); | ||
363 | status = VXGE_HW_FAIL; | ||
364 | goto out; | ||
365 | } | ||
366 | /* point to next 16 bytes */ | ||
367 | fwdata += VXGE_HW_FW_UPGRADE_BLK_SIZE; | ||
368 | } | ||
369 | out: | ||
370 | return status; | ||
371 | } | ||
372 | |||
373 | enum vxge_hw_status | ||
374 | vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev, | ||
375 | struct eprom_image *img) | ||
376 | { | ||
377 | u64 data0 = 0, data1 = 0, steer_ctrl = 0; | ||
378 | struct __vxge_hw_virtualpath *vpath; | ||
379 | enum vxge_hw_status status; | ||
380 | int i; | ||
381 | |||
382 | vpath = &hldev->virtual_paths[hldev->first_vp_id]; | ||
383 | |||
384 | for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) { | ||
385 | data0 = VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(i); | ||
386 | data1 = steer_ctrl = 0; | ||
387 | |||
388 | status = vxge_hw_vpath_fw_api(vpath, | ||
389 | VXGE_HW_FW_API_GET_EPROM_REV, | ||
390 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, | ||
391 | 0, &data0, &data1, &steer_ctrl); | ||
392 | if (status != VXGE_HW_OK) | ||
393 | break; | ||
394 | |||
395 | img[i].is_valid = VXGE_HW_GET_EPROM_IMAGE_VALID(data0); | ||
396 | img[i].index = VXGE_HW_GET_EPROM_IMAGE_INDEX(data0); | ||
397 | img[i].type = VXGE_HW_GET_EPROM_IMAGE_TYPE(data0); | ||
398 | img[i].version = VXGE_HW_GET_EPROM_IMAGE_REV(data0); | ||
399 | } | ||
400 | |||
401 | return status; | ||
402 | } | ||
403 | |||
404 | /* | ||
405 | * __vxge_hw_channel_free - Free memory allocated for channel | ||
406 | * This function deallocates memory from the channel and various arrays | ||
407 | * in the channel | ||
408 | */ | ||
409 | static void __vxge_hw_channel_free(struct __vxge_hw_channel *channel) | ||
410 | { | ||
411 | kfree(channel->work_arr); | ||
412 | kfree(channel->free_arr); | ||
413 | kfree(channel->reserve_arr); | ||
414 | kfree(channel->orig_arr); | ||
415 | kfree(channel); | ||
416 | } | ||
417 | |||
418 | /* | ||
419 | * __vxge_hw_channel_initialize - Initialize a channel | ||
420 | * This function initializes a channel by properly setting the | ||
421 | * various references | ||
422 | */ | ||
423 | static enum vxge_hw_status | ||
424 | __vxge_hw_channel_initialize(struct __vxge_hw_channel *channel) | ||
425 | { | ||
426 | u32 i; | ||
427 | struct __vxge_hw_virtualpath *vpath; | ||
428 | |||
429 | vpath = channel->vph->vpath; | ||
430 | |||
431 | if ((channel->reserve_arr != NULL) && (channel->orig_arr != NULL)) { | ||
432 | for (i = 0; i < channel->length; i++) | ||
433 | channel->orig_arr[i] = channel->reserve_arr[i]; | ||
434 | } | ||
435 | |||
436 | switch (channel->type) { | ||
437 | case VXGE_HW_CHANNEL_TYPE_FIFO: | ||
438 | vpath->fifoh = (struct __vxge_hw_fifo *)channel; | ||
439 | channel->stats = &((struct __vxge_hw_fifo *) | ||
440 | channel)->stats->common_stats; | ||
441 | break; | ||
442 | case VXGE_HW_CHANNEL_TYPE_RING: | ||
443 | vpath->ringh = (struct __vxge_hw_ring *)channel; | ||
444 | channel->stats = &((struct __vxge_hw_ring *) | ||
445 | channel)->stats->common_stats; | ||
446 | break; | ||
447 | default: | ||
448 | break; | ||
449 | } | ||
450 | |||
451 | return VXGE_HW_OK; | ||
452 | } | ||
453 | |||
454 | /* | ||
455 | * __vxge_hw_channel_reset - Resets a channel | ||
456 | * This function resets a channel by properly setting the various references | ||
457 | */ | ||
458 | static enum vxge_hw_status | ||
459 | __vxge_hw_channel_reset(struct __vxge_hw_channel *channel) | ||
460 | { | ||
461 | u32 i; | ||
462 | |||
463 | for (i = 0; i < channel->length; i++) { | ||
464 | if (channel->reserve_arr != NULL) | ||
465 | channel->reserve_arr[i] = channel->orig_arr[i]; | ||
466 | if (channel->free_arr != NULL) | ||
467 | channel->free_arr[i] = NULL; | ||
468 | if (channel->work_arr != NULL) | ||
469 | channel->work_arr[i] = NULL; | ||
470 | } | ||
471 | channel->free_ptr = channel->length; | ||
472 | channel->reserve_ptr = channel->length; | ||
473 | channel->reserve_top = 0; | ||
474 | channel->post_index = 0; | ||
475 | channel->compl_index = 0; | ||
476 | |||
477 | return VXGE_HW_OK; | ||
478 | } | ||
479 | |||
480 | /* | ||
481 | * __vxge_hw_device_pci_e_init | ||
482 | * Initialize certain PCI/PCI-X configuration registers | ||
483 | * with recommended values. Save config space for future hw resets. | ||
484 | */ | ||
485 | static void __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev) | ||
486 | { | ||
487 | u16 cmd = 0; | ||
488 | |||
489 | /* Set the PErr Repconse bit and SERR in PCI command register. */ | ||
490 | pci_read_config_word(hldev->pdev, PCI_COMMAND, &cmd); | ||
491 | cmd |= 0x140; | ||
492 | pci_write_config_word(hldev->pdev, PCI_COMMAND, cmd); | ||
493 | |||
494 | pci_save_state(hldev->pdev); | ||
495 | } | ||
496 | |||
497 | /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset | ||
498 | * in progress | ||
499 | * This routine checks the vpath reset in progress register is turned zero | ||
500 | */ | ||
501 | static enum vxge_hw_status | ||
502 | __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog) | ||
503 | { | ||
504 | enum vxge_hw_status status; | ||
505 | status = __vxge_hw_device_register_poll(vpath_rst_in_prog, | ||
506 | VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(0x1ffff), | ||
507 | VXGE_HW_DEF_DEVICE_POLL_MILLIS); | ||
508 | return status; | ||
509 | } | ||
510 | |||
511 | /* | ||
512 | * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion. | ||
513 | * Set the swapper bits appropriately for the lagacy section. | ||
514 | */ | ||
515 | static enum vxge_hw_status | ||
516 | __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg) | ||
517 | { | ||
518 | u64 val64; | ||
519 | enum vxge_hw_status status = VXGE_HW_OK; | ||
520 | |||
521 | val64 = readq(&legacy_reg->toc_swapper_fb); | ||
522 | |||
523 | wmb(); | ||
524 | |||
525 | switch (val64) { | ||
526 | case VXGE_HW_SWAPPER_INITIAL_VALUE: | ||
527 | return status; | ||
528 | |||
529 | case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED: | ||
530 | writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE, | ||
531 | &legacy_reg->pifm_rd_swap_en); | ||
532 | writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE, | ||
533 | &legacy_reg->pifm_rd_flip_en); | ||
534 | writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE, | ||
535 | &legacy_reg->pifm_wr_swap_en); | ||
536 | writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE, | ||
537 | &legacy_reg->pifm_wr_flip_en); | ||
538 | break; | ||
539 | |||
540 | case VXGE_HW_SWAPPER_BYTE_SWAPPED: | ||
541 | writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE, | ||
542 | &legacy_reg->pifm_rd_swap_en); | ||
543 | writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE, | ||
544 | &legacy_reg->pifm_wr_swap_en); | ||
545 | break; | ||
546 | |||
547 | case VXGE_HW_SWAPPER_BIT_FLIPPED: | ||
548 | writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE, | ||
549 | &legacy_reg->pifm_rd_flip_en); | ||
550 | writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE, | ||
551 | &legacy_reg->pifm_wr_flip_en); | ||
552 | break; | ||
553 | } | ||
554 | |||
555 | wmb(); | ||
556 | |||
557 | val64 = readq(&legacy_reg->toc_swapper_fb); | ||
558 | |||
559 | if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE) | ||
560 | status = VXGE_HW_ERR_SWAPPER_CTRL; | ||
561 | |||
562 | return status; | ||
563 | } | ||
564 | |||
565 | /* | ||
566 | * __vxge_hw_device_toc_get | ||
567 | * This routine sets the swapper and reads the toc pointer and returns the | ||
568 | * memory mapped address of the toc | ||
569 | */ | ||
570 | static struct vxge_hw_toc_reg __iomem * | ||
571 | __vxge_hw_device_toc_get(void __iomem *bar0) | ||
572 | { | ||
573 | u64 val64; | ||
574 | struct vxge_hw_toc_reg __iomem *toc = NULL; | ||
575 | enum vxge_hw_status status; | ||
576 | |||
577 | struct vxge_hw_legacy_reg __iomem *legacy_reg = | ||
578 | (struct vxge_hw_legacy_reg __iomem *)bar0; | ||
579 | |||
580 | status = __vxge_hw_legacy_swapper_set(legacy_reg); | ||
581 | if (status != VXGE_HW_OK) | ||
582 | goto exit; | ||
583 | |||
584 | val64 = readq(&legacy_reg->toc_first_pointer); | ||
585 | toc = bar0 + val64; | ||
586 | exit: | ||
587 | return toc; | ||
588 | } | ||
589 | |||
590 | /* | ||
591 | * __vxge_hw_device_reg_addr_get | ||
592 | * This routine sets the swapper and reads the toc pointer and initializes the | ||
593 | * register location pointers in the device object. It waits until the ric is | ||
594 | * completed initializing registers. | ||
595 | */ | ||
596 | static enum vxge_hw_status | ||
597 | __vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev) | ||
598 | { | ||
599 | u64 val64; | ||
600 | u32 i; | ||
601 | enum vxge_hw_status status = VXGE_HW_OK; | ||
602 | |||
603 | hldev->legacy_reg = hldev->bar0; | ||
604 | |||
605 | hldev->toc_reg = __vxge_hw_device_toc_get(hldev->bar0); | ||
606 | if (hldev->toc_reg == NULL) { | ||
607 | status = VXGE_HW_FAIL; | ||
608 | goto exit; | ||
609 | } | ||
610 | |||
611 | val64 = readq(&hldev->toc_reg->toc_common_pointer); | ||
612 | hldev->common_reg = hldev->bar0 + val64; | ||
613 | |||
614 | val64 = readq(&hldev->toc_reg->toc_mrpcim_pointer); | ||
615 | hldev->mrpcim_reg = hldev->bar0 + val64; | ||
616 | |||
617 | for (i = 0; i < VXGE_HW_TITAN_SRPCIM_REG_SPACES; i++) { | ||
618 | val64 = readq(&hldev->toc_reg->toc_srpcim_pointer[i]); | ||
619 | hldev->srpcim_reg[i] = hldev->bar0 + val64; | ||
620 | } | ||
621 | |||
622 | for (i = 0; i < VXGE_HW_TITAN_VPMGMT_REG_SPACES; i++) { | ||
623 | val64 = readq(&hldev->toc_reg->toc_vpmgmt_pointer[i]); | ||
624 | hldev->vpmgmt_reg[i] = hldev->bar0 + val64; | ||
625 | } | ||
626 | |||
627 | for (i = 0; i < VXGE_HW_TITAN_VPATH_REG_SPACES; i++) { | ||
628 | val64 = readq(&hldev->toc_reg->toc_vpath_pointer[i]); | ||
629 | hldev->vpath_reg[i] = hldev->bar0 + val64; | ||
630 | } | ||
631 | |||
632 | val64 = readq(&hldev->toc_reg->toc_kdfc); | ||
633 | |||
634 | switch (VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val64)) { | ||
635 | case 0: | ||
636 | hldev->kdfc = hldev->bar0 + VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64) ; | ||
637 | break; | ||
638 | default: | ||
639 | break; | ||
640 | } | ||
641 | |||
642 | status = __vxge_hw_device_vpath_reset_in_prog_check( | ||
643 | (u64 __iomem *)&hldev->common_reg->vpath_rst_in_prog); | ||
644 | exit: | ||
645 | return status; | ||
646 | } | ||
647 | |||
648 | /* | ||
649 | * __vxge_hw_device_access_rights_get: Get Access Rights of the driver | ||
650 | * This routine returns the Access Rights of the driver | ||
651 | */ | ||
652 | static u32 | ||
653 | __vxge_hw_device_access_rights_get(u32 host_type, u32 func_id) | ||
654 | { | ||
655 | u32 access_rights = VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH; | ||
656 | |||
657 | switch (host_type) { | ||
658 | case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION: | ||
659 | if (func_id == 0) { | ||
660 | access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM | | ||
661 | VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM; | ||
662 | } | ||
663 | break; | ||
664 | case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION: | ||
665 | access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM | | ||
666 | VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM; | ||
667 | break; | ||
668 | case VXGE_HW_NO_MR_SR_VH0_FUNCTION0: | ||
669 | access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM | | ||
670 | VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM; | ||
671 | break; | ||
672 | case VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION: | ||
673 | case VXGE_HW_SR_VH_VIRTUAL_FUNCTION: | ||
674 | case VXGE_HW_MR_SR_VH0_INVALID_CONFIG: | ||
675 | break; | ||
676 | case VXGE_HW_SR_VH_FUNCTION0: | ||
677 | case VXGE_HW_VH_NORMAL_FUNCTION: | ||
678 | access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM; | ||
679 | break; | ||
680 | } | ||
681 | |||
682 | return access_rights; | ||
683 | } | ||
684 | /* | ||
685 | * __vxge_hw_device_is_privilaged | ||
686 | * This routine checks if the device function is privilaged or not | ||
687 | */ | ||
688 | |||
689 | enum vxge_hw_status | ||
690 | __vxge_hw_device_is_privilaged(u32 host_type, u32 func_id) | ||
691 | { | ||
692 | if (__vxge_hw_device_access_rights_get(host_type, | ||
693 | func_id) & | ||
694 | VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) | ||
695 | return VXGE_HW_OK; | ||
696 | else | ||
697 | return VXGE_HW_ERR_PRIVILAGED_OPEARATION; | ||
698 | } | ||
699 | |||
700 | /* | ||
701 | * __vxge_hw_vpath_func_id_get - Get the function id of the vpath. | ||
702 | * Returns the function number of the vpath. | ||
703 | */ | ||
704 | static u32 | ||
705 | __vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg) | ||
706 | { | ||
707 | u64 val64; | ||
708 | |||
709 | val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1); | ||
710 | |||
711 | return | ||
712 | (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64); | ||
713 | } | ||
714 | |||
715 | /* | ||
716 | * __vxge_hw_device_host_info_get | ||
717 | * This routine returns the host type assignments | ||
718 | */ | ||
719 | static void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev) | ||
720 | { | ||
721 | u64 val64; | ||
722 | u32 i; | ||
723 | |||
724 | val64 = readq(&hldev->common_reg->host_type_assignments); | ||
725 | |||
726 | hldev->host_type = | ||
727 | (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64); | ||
728 | |||
729 | hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments); | ||
730 | |||
731 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | ||
732 | if (!(hldev->vpath_assignments & vxge_mBIT(i))) | ||
733 | continue; | ||
734 | |||
735 | hldev->func_id = | ||
736 | __vxge_hw_vpath_func_id_get(hldev->vpmgmt_reg[i]); | ||
737 | |||
738 | hldev->access_rights = __vxge_hw_device_access_rights_get( | ||
739 | hldev->host_type, hldev->func_id); | ||
740 | |||
741 | hldev->virtual_paths[i].vp_open = VXGE_HW_VP_NOT_OPEN; | ||
742 | hldev->virtual_paths[i].vp_reg = hldev->vpath_reg[i]; | ||
743 | |||
744 | hldev->first_vp_id = i; | ||
745 | break; | ||
746 | } | ||
747 | } | ||
748 | |||
749 | /* | ||
750 | * __vxge_hw_verify_pci_e_info - Validate the pci-e link parameters such as | ||
751 | * link width and signalling rate. | ||
752 | */ | ||
753 | static enum vxge_hw_status | ||
754 | __vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev) | ||
755 | { | ||
756 | struct pci_dev *dev = hldev->pdev; | ||
757 | u16 lnk; | ||
758 | |||
759 | /* Get the negotiated link width and speed from PCI config space */ | ||
760 | pci_read_config_word(dev, dev->pcie_cap + PCI_EXP_LNKSTA, &lnk); | ||
761 | |||
762 | if ((lnk & PCI_EXP_LNKSTA_CLS) != 1) | ||
763 | return VXGE_HW_ERR_INVALID_PCI_INFO; | ||
764 | |||
765 | switch ((lnk & PCI_EXP_LNKSTA_NLW) >> 4) { | ||
766 | case PCIE_LNK_WIDTH_RESRV: | ||
767 | case PCIE_LNK_X1: | ||
768 | case PCIE_LNK_X2: | ||
769 | case PCIE_LNK_X4: | ||
770 | case PCIE_LNK_X8: | ||
771 | break; | ||
772 | default: | ||
773 | return VXGE_HW_ERR_INVALID_PCI_INFO; | ||
774 | } | ||
775 | |||
776 | return VXGE_HW_OK; | ||
777 | } | ||
778 | |||
779 | /* | ||
780 | * __vxge_hw_device_initialize | ||
781 | * Initialize Titan-V hardware. | ||
782 | */ | ||
783 | static enum vxge_hw_status | ||
784 | __vxge_hw_device_initialize(struct __vxge_hw_device *hldev) | ||
785 | { | ||
786 | enum vxge_hw_status status = VXGE_HW_OK; | ||
787 | |||
788 | if (VXGE_HW_OK == __vxge_hw_device_is_privilaged(hldev->host_type, | ||
789 | hldev->func_id)) { | ||
790 | /* Validate the pci-e link width and speed */ | ||
791 | status = __vxge_hw_verify_pci_e_info(hldev); | ||
792 | if (status != VXGE_HW_OK) | ||
793 | goto exit; | ||
794 | } | ||
795 | |||
796 | exit: | ||
797 | return status; | ||
798 | } | ||
799 | |||
800 | /* | ||
801 | * __vxge_hw_vpath_fw_ver_get - Get the fw version | ||
802 | * Returns FW Version | ||
803 | */ | ||
804 | static enum vxge_hw_status | ||
805 | __vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath *vpath, | ||
806 | struct vxge_hw_device_hw_info *hw_info) | ||
807 | { | ||
808 | struct vxge_hw_device_version *fw_version = &hw_info->fw_version; | ||
809 | struct vxge_hw_device_date *fw_date = &hw_info->fw_date; | ||
810 | struct vxge_hw_device_version *flash_version = &hw_info->flash_version; | ||
811 | struct vxge_hw_device_date *flash_date = &hw_info->flash_date; | ||
812 | u64 data0, data1 = 0, steer_ctrl = 0; | ||
813 | enum vxge_hw_status status; | ||
814 | |||
815 | status = vxge_hw_vpath_fw_api(vpath, | ||
816 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY, | ||
817 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, | ||
818 | 0, &data0, &data1, &steer_ctrl); | ||
819 | if (status != VXGE_HW_OK) | ||
820 | goto exit; | ||
821 | |||
822 | fw_date->day = | ||
823 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(data0); | ||
824 | fw_date->month = | ||
825 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(data0); | ||
826 | fw_date->year = | ||
827 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(data0); | ||
828 | |||
829 | snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d", | ||
830 | fw_date->month, fw_date->day, fw_date->year); | ||
831 | |||
832 | fw_version->major = | ||
833 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0); | ||
834 | fw_version->minor = | ||
835 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0); | ||
836 | fw_version->build = | ||
837 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0); | ||
838 | |||
839 | snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d", | ||
840 | fw_version->major, fw_version->minor, fw_version->build); | ||
841 | |||
842 | flash_date->day = | ||
843 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data1); | ||
844 | flash_date->month = | ||
845 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data1); | ||
846 | flash_date->year = | ||
847 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data1); | ||
848 | |||
849 | snprintf(flash_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d", | ||
850 | flash_date->month, flash_date->day, flash_date->year); | ||
851 | |||
852 | flash_version->major = | ||
853 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data1); | ||
854 | flash_version->minor = | ||
855 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data1); | ||
856 | flash_version->build = | ||
857 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data1); | ||
858 | |||
859 | snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d", | ||
860 | flash_version->major, flash_version->minor, | ||
861 | flash_version->build); | ||
862 | |||
863 | exit: | ||
864 | return status; | ||
865 | } | ||
866 | |||
867 | /* | ||
868 | * __vxge_hw_vpath_card_info_get - Get the serial numbers, | ||
869 | * part number and product description. | ||
870 | */ | ||
871 | static enum vxge_hw_status | ||
872 | __vxge_hw_vpath_card_info_get(struct __vxge_hw_virtualpath *vpath, | ||
873 | struct vxge_hw_device_hw_info *hw_info) | ||
874 | { | ||
875 | enum vxge_hw_status status; | ||
876 | u64 data0, data1 = 0, steer_ctrl = 0; | ||
877 | u8 *serial_number = hw_info->serial_number; | ||
878 | u8 *part_number = hw_info->part_number; | ||
879 | u8 *product_desc = hw_info->product_desc; | ||
880 | u32 i, j = 0; | ||
881 | |||
882 | data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER; | ||
883 | |||
884 | status = vxge_hw_vpath_fw_api(vpath, | ||
885 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY, | ||
886 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, | ||
887 | 0, &data0, &data1, &steer_ctrl); | ||
888 | if (status != VXGE_HW_OK) | ||
889 | return status; | ||
890 | |||
891 | ((u64 *)serial_number)[0] = be64_to_cpu(data0); | ||
892 | ((u64 *)serial_number)[1] = be64_to_cpu(data1); | ||
893 | |||
894 | data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER; | ||
895 | data1 = steer_ctrl = 0; | ||
896 | |||
897 | status = vxge_hw_vpath_fw_api(vpath, | ||
898 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY, | ||
899 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, | ||
900 | 0, &data0, &data1, &steer_ctrl); | ||
901 | if (status != VXGE_HW_OK) | ||
902 | return status; | ||
903 | |||
904 | ((u64 *)part_number)[0] = be64_to_cpu(data0); | ||
905 | ((u64 *)part_number)[1] = be64_to_cpu(data1); | ||
906 | |||
907 | for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0; | ||
908 | i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) { | ||
909 | data0 = i; | ||
910 | data1 = steer_ctrl = 0; | ||
911 | |||
912 | status = vxge_hw_vpath_fw_api(vpath, | ||
913 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY, | ||
914 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, | ||
915 | 0, &data0, &data1, &steer_ctrl); | ||
916 | if (status != VXGE_HW_OK) | ||
917 | return status; | ||
918 | |||
919 | ((u64 *)product_desc)[j++] = be64_to_cpu(data0); | ||
920 | ((u64 *)product_desc)[j++] = be64_to_cpu(data1); | ||
921 | } | ||
922 | |||
923 | return status; | ||
924 | } | ||
925 | |||
926 | /* | ||
927 | * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode | ||
928 | * Returns pci function mode | ||
929 | */ | ||
930 | static enum vxge_hw_status | ||
931 | __vxge_hw_vpath_pci_func_mode_get(struct __vxge_hw_virtualpath *vpath, | ||
932 | struct vxge_hw_device_hw_info *hw_info) | ||
933 | { | ||
934 | u64 data0, data1 = 0, steer_ctrl = 0; | ||
935 | enum vxge_hw_status status; | ||
936 | |||
937 | data0 = 0; | ||
938 | |||
939 | status = vxge_hw_vpath_fw_api(vpath, | ||
940 | VXGE_HW_FW_API_GET_FUNC_MODE, | ||
941 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, | ||
942 | 0, &data0, &data1, &steer_ctrl); | ||
943 | if (status != VXGE_HW_OK) | ||
944 | return status; | ||
945 | |||
946 | hw_info->function_mode = VXGE_HW_GET_FUNC_MODE_VAL(data0); | ||
947 | return status; | ||
948 | } | ||
949 | |||
950 | /* | ||
951 | * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath | ||
952 | * from MAC address table. | ||
953 | */ | ||
954 | static enum vxge_hw_status | ||
955 | __vxge_hw_vpath_addr_get(struct __vxge_hw_virtualpath *vpath, | ||
956 | u8 *macaddr, u8 *macaddr_mask) | ||
957 | { | ||
958 | u64 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY, | ||
959 | data0 = 0, data1 = 0, steer_ctrl = 0; | ||
960 | enum vxge_hw_status status; | ||
961 | int i; | ||
962 | |||
963 | do { | ||
964 | status = vxge_hw_vpath_fw_api(vpath, action, | ||
965 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA, | ||
966 | 0, &data0, &data1, &steer_ctrl); | ||
967 | if (status != VXGE_HW_OK) | ||
968 | goto exit; | ||
969 | |||
970 | data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data0); | ||
971 | data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK( | ||
972 | data1); | ||
973 | |||
974 | for (i = ETH_ALEN; i > 0; i--) { | ||
975 | macaddr[i - 1] = (u8) (data0 & 0xFF); | ||
976 | data0 >>= 8; | ||
977 | |||
978 | macaddr_mask[i - 1] = (u8) (data1 & 0xFF); | ||
979 | data1 >>= 8; | ||
980 | } | ||
981 | |||
982 | action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY; | ||
983 | data0 = 0, data1 = 0, steer_ctrl = 0; | ||
984 | |||
985 | } while (!is_valid_ether_addr(macaddr)); | ||
986 | exit: | ||
987 | return status; | ||
988 | } | ||
989 | |||
990 | /** | ||
991 | * vxge_hw_device_hw_info_get - Get the hw information | ||
992 | * Returns the vpath mask that has the bits set for each vpath allocated | ||
993 | * for the driver, FW version information, and the first mac address for | ||
994 | * each vpath | ||
995 | */ | ||
996 | enum vxge_hw_status __devinit | ||
997 | vxge_hw_device_hw_info_get(void __iomem *bar0, | ||
998 | struct vxge_hw_device_hw_info *hw_info) | ||
999 | { | ||
1000 | u32 i; | ||
1001 | u64 val64; | ||
1002 | struct vxge_hw_toc_reg __iomem *toc; | ||
1003 | struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg; | ||
1004 | struct vxge_hw_common_reg __iomem *common_reg; | ||
1005 | struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg; | ||
1006 | enum vxge_hw_status status; | ||
1007 | struct __vxge_hw_virtualpath vpath; | ||
1008 | |||
1009 | memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info)); | ||
1010 | |||
1011 | toc = __vxge_hw_device_toc_get(bar0); | ||
1012 | if (toc == NULL) { | ||
1013 | status = VXGE_HW_ERR_CRITICAL; | ||
1014 | goto exit; | ||
1015 | } | ||
1016 | |||
1017 | val64 = readq(&toc->toc_common_pointer); | ||
1018 | common_reg = bar0 + val64; | ||
1019 | |||
1020 | status = __vxge_hw_device_vpath_reset_in_prog_check( | ||
1021 | (u64 __iomem *)&common_reg->vpath_rst_in_prog); | ||
1022 | if (status != VXGE_HW_OK) | ||
1023 | goto exit; | ||
1024 | |||
1025 | hw_info->vpath_mask = readq(&common_reg->vpath_assignments); | ||
1026 | |||
1027 | val64 = readq(&common_reg->host_type_assignments); | ||
1028 | |||
1029 | hw_info->host_type = | ||
1030 | (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64); | ||
1031 | |||
1032 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | ||
1033 | if (!((hw_info->vpath_mask) & vxge_mBIT(i))) | ||
1034 | continue; | ||
1035 | |||
1036 | val64 = readq(&toc->toc_vpmgmt_pointer[i]); | ||
1037 | |||
1038 | vpmgmt_reg = bar0 + val64; | ||
1039 | |||
1040 | hw_info->func_id = __vxge_hw_vpath_func_id_get(vpmgmt_reg); | ||
1041 | if (__vxge_hw_device_access_rights_get(hw_info->host_type, | ||
1042 | hw_info->func_id) & | ||
1043 | VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) { | ||
1044 | |||
1045 | val64 = readq(&toc->toc_mrpcim_pointer); | ||
1046 | |||
1047 | mrpcim_reg = bar0 + val64; | ||
1048 | |||
1049 | writeq(0, &mrpcim_reg->xgmac_gen_fw_memo_mask); | ||
1050 | wmb(); | ||
1051 | } | ||
1052 | |||
1053 | val64 = readq(&toc->toc_vpath_pointer[i]); | ||
1054 | |||
1055 | spin_lock_init(&vpath.lock); | ||
1056 | vpath.vp_reg = bar0 + val64; | ||
1057 | vpath.vp_open = VXGE_HW_VP_NOT_OPEN; | ||
1058 | |||
1059 | status = __vxge_hw_vpath_pci_func_mode_get(&vpath, hw_info); | ||
1060 | if (status != VXGE_HW_OK) | ||
1061 | goto exit; | ||
1062 | |||
1063 | status = __vxge_hw_vpath_fw_ver_get(&vpath, hw_info); | ||
1064 | if (status != VXGE_HW_OK) | ||
1065 | goto exit; | ||
1066 | |||
1067 | status = __vxge_hw_vpath_card_info_get(&vpath, hw_info); | ||
1068 | if (status != VXGE_HW_OK) | ||
1069 | goto exit; | ||
1070 | |||
1071 | break; | ||
1072 | } | ||
1073 | |||
1074 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | ||
1075 | if (!((hw_info->vpath_mask) & vxge_mBIT(i))) | ||
1076 | continue; | ||
1077 | |||
1078 | val64 = readq(&toc->toc_vpath_pointer[i]); | ||
1079 | vpath.vp_reg = bar0 + val64; | ||
1080 | vpath.vp_open = VXGE_HW_VP_NOT_OPEN; | ||
1081 | |||
1082 | status = __vxge_hw_vpath_addr_get(&vpath, | ||
1083 | hw_info->mac_addrs[i], | ||
1084 | hw_info->mac_addr_masks[i]); | ||
1085 | if (status != VXGE_HW_OK) | ||
1086 | goto exit; | ||
1087 | } | ||
1088 | exit: | ||
1089 | return status; | ||
1090 | } | ||
1091 | |||
1092 | /* | ||
1093 | * __vxge_hw_blockpool_destroy - Deallocates the block pool | ||
1094 | */ | ||
1095 | static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool) | ||
1096 | { | ||
1097 | struct __vxge_hw_device *hldev; | ||
1098 | struct list_head *p, *n; | ||
1099 | u16 ret; | ||
1100 | |||
1101 | if (blockpool == NULL) { | ||
1102 | ret = 1; | ||
1103 | goto exit; | ||
1104 | } | ||
1105 | |||
1106 | hldev = blockpool->hldev; | ||
1107 | |||
1108 | list_for_each_safe(p, n, &blockpool->free_block_list) { | ||
1109 | pci_unmap_single(hldev->pdev, | ||
1110 | ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, | ||
1111 | ((struct __vxge_hw_blockpool_entry *)p)->length, | ||
1112 | PCI_DMA_BIDIRECTIONAL); | ||
1113 | |||
1114 | vxge_os_dma_free(hldev->pdev, | ||
1115 | ((struct __vxge_hw_blockpool_entry *)p)->memblock, | ||
1116 | &((struct __vxge_hw_blockpool_entry *)p)->acc_handle); | ||
1117 | |||
1118 | list_del(&((struct __vxge_hw_blockpool_entry *)p)->item); | ||
1119 | kfree(p); | ||
1120 | blockpool->pool_size--; | ||
1121 | } | ||
1122 | |||
1123 | list_for_each_safe(p, n, &blockpool->free_entry_list) { | ||
1124 | list_del(&((struct __vxge_hw_blockpool_entry *)p)->item); | ||
1125 | kfree((void *)p); | ||
1126 | } | ||
1127 | ret = 0; | ||
1128 | exit: | ||
1129 | return; | ||
1130 | } | ||
1131 | |||
1132 | /* | ||
1133 | * __vxge_hw_blockpool_create - Create block pool | ||
1134 | */ | ||
1135 | static enum vxge_hw_status | ||
1136 | __vxge_hw_blockpool_create(struct __vxge_hw_device *hldev, | ||
1137 | struct __vxge_hw_blockpool *blockpool, | ||
1138 | u32 pool_size, | ||
1139 | u32 pool_max) | ||
1140 | { | ||
1141 | u32 i; | ||
1142 | struct __vxge_hw_blockpool_entry *entry = NULL; | ||
1143 | void *memblock; | ||
1144 | dma_addr_t dma_addr; | ||
1145 | struct pci_dev *dma_handle; | ||
1146 | struct pci_dev *acc_handle; | ||
1147 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1148 | |||
1149 | if (blockpool == NULL) { | ||
1150 | status = VXGE_HW_FAIL; | ||
1151 | goto blockpool_create_exit; | ||
1152 | } | ||
1153 | |||
1154 | blockpool->hldev = hldev; | ||
1155 | blockpool->block_size = VXGE_HW_BLOCK_SIZE; | ||
1156 | blockpool->pool_size = 0; | ||
1157 | blockpool->pool_max = pool_max; | ||
1158 | blockpool->req_out = 0; | ||
1159 | |||
1160 | INIT_LIST_HEAD(&blockpool->free_block_list); | ||
1161 | INIT_LIST_HEAD(&blockpool->free_entry_list); | ||
1162 | |||
1163 | for (i = 0; i < pool_size + pool_max; i++) { | ||
1164 | entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry), | ||
1165 | GFP_KERNEL); | ||
1166 | if (entry == NULL) { | ||
1167 | __vxge_hw_blockpool_destroy(blockpool); | ||
1168 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
1169 | goto blockpool_create_exit; | ||
1170 | } | ||
1171 | list_add(&entry->item, &blockpool->free_entry_list); | ||
1172 | } | ||
1173 | |||
1174 | for (i = 0; i < pool_size; i++) { | ||
1175 | memblock = vxge_os_dma_malloc( | ||
1176 | hldev->pdev, | ||
1177 | VXGE_HW_BLOCK_SIZE, | ||
1178 | &dma_handle, | ||
1179 | &acc_handle); | ||
1180 | if (memblock == NULL) { | ||
1181 | __vxge_hw_blockpool_destroy(blockpool); | ||
1182 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
1183 | goto blockpool_create_exit; | ||
1184 | } | ||
1185 | |||
1186 | dma_addr = pci_map_single(hldev->pdev, memblock, | ||
1187 | VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
1188 | if (unlikely(pci_dma_mapping_error(hldev->pdev, | ||
1189 | dma_addr))) { | ||
1190 | vxge_os_dma_free(hldev->pdev, memblock, &acc_handle); | ||
1191 | __vxge_hw_blockpool_destroy(blockpool); | ||
1192 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
1193 | goto blockpool_create_exit; | ||
1194 | } | ||
1195 | |||
1196 | if (!list_empty(&blockpool->free_entry_list)) | ||
1197 | entry = (struct __vxge_hw_blockpool_entry *) | ||
1198 | list_first_entry(&blockpool->free_entry_list, | ||
1199 | struct __vxge_hw_blockpool_entry, | ||
1200 | item); | ||
1201 | |||
1202 | if (entry == NULL) | ||
1203 | entry = | ||
1204 | kzalloc(sizeof(struct __vxge_hw_blockpool_entry), | ||
1205 | GFP_KERNEL); | ||
1206 | if (entry != NULL) { | ||
1207 | list_del(&entry->item); | ||
1208 | entry->length = VXGE_HW_BLOCK_SIZE; | ||
1209 | entry->memblock = memblock; | ||
1210 | entry->dma_addr = dma_addr; | ||
1211 | entry->acc_handle = acc_handle; | ||
1212 | entry->dma_handle = dma_handle; | ||
1213 | list_add(&entry->item, | ||
1214 | &blockpool->free_block_list); | ||
1215 | blockpool->pool_size++; | ||
1216 | } else { | ||
1217 | __vxge_hw_blockpool_destroy(blockpool); | ||
1218 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
1219 | goto blockpool_create_exit; | ||
1220 | } | ||
1221 | } | ||
1222 | |||
1223 | blockpool_create_exit: | ||
1224 | return status; | ||
1225 | } | ||
1226 | |||
1227 | /* | ||
1228 | * __vxge_hw_device_fifo_config_check - Check fifo configuration. | ||
1229 | * Check the fifo configuration | ||
1230 | */ | ||
1231 | static enum vxge_hw_status | ||
1232 | __vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config) | ||
1233 | { | ||
1234 | if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) || | ||
1235 | (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS)) | ||
1236 | return VXGE_HW_BADCFG_FIFO_BLOCKS; | ||
1237 | |||
1238 | return VXGE_HW_OK; | ||
1239 | } | ||
1240 | |||
1241 | /* | ||
1242 | * __vxge_hw_device_vpath_config_check - Check vpath configuration. | ||
1243 | * Check the vpath configuration | ||
1244 | */ | ||
1245 | static enum vxge_hw_status | ||
1246 | __vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config) | ||
1247 | { | ||
1248 | enum vxge_hw_status status; | ||
1249 | |||
1250 | if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) || | ||
1251 | (vp_config->min_bandwidth > VXGE_HW_VPATH_BANDWIDTH_MAX)) | ||
1252 | return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH; | ||
1253 | |||
1254 | status = __vxge_hw_device_fifo_config_check(&vp_config->fifo); | ||
1255 | if (status != VXGE_HW_OK) | ||
1256 | return status; | ||
1257 | |||
1258 | if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) && | ||
1259 | ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) || | ||
1260 | (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU))) | ||
1261 | return VXGE_HW_BADCFG_VPATH_MTU; | ||
1262 | |||
1263 | if ((vp_config->rpa_strip_vlan_tag != | ||
1264 | VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) && | ||
1265 | (vp_config->rpa_strip_vlan_tag != | ||
1266 | VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) && | ||
1267 | (vp_config->rpa_strip_vlan_tag != | ||
1268 | VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE)) | ||
1269 | return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG; | ||
1270 | |||
1271 | return VXGE_HW_OK; | ||
1272 | } | ||
1273 | |||
1274 | /* | ||
1275 | * __vxge_hw_device_config_check - Check device configuration. | ||
1276 | * Check the device configuration | ||
1277 | */ | ||
1278 | static enum vxge_hw_status | ||
1279 | __vxge_hw_device_config_check(struct vxge_hw_device_config *new_config) | ||
1280 | { | ||
1281 | u32 i; | ||
1282 | enum vxge_hw_status status; | ||
1283 | |||
1284 | if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) && | ||
1285 | (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) && | ||
1286 | (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) && | ||
1287 | (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF)) | ||
1288 | return VXGE_HW_BADCFG_INTR_MODE; | ||
1289 | |||
1290 | if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) && | ||
1291 | (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE)) | ||
1292 | return VXGE_HW_BADCFG_RTS_MAC_EN; | ||
1293 | |||
1294 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | ||
1295 | status = __vxge_hw_device_vpath_config_check( | ||
1296 | &new_config->vp_config[i]); | ||
1297 | if (status != VXGE_HW_OK) | ||
1298 | return status; | ||
1299 | } | ||
1300 | |||
1301 | return VXGE_HW_OK; | ||
1302 | } | ||
1303 | |||
1304 | /* | ||
1305 | * vxge_hw_device_initialize - Initialize Titan device. | ||
1306 | * Initialize Titan device. Note that all the arguments of this public API | ||
1307 | * are 'IN', including @hldev. Driver cooperates with | ||
1308 | * OS to find new Titan device, locate its PCI and memory spaces. | ||
1309 | * | ||
1310 | * When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW | ||
1311 | * to enable the latter to perform Titan hardware initialization. | ||
1312 | */ | ||
1313 | enum vxge_hw_status __devinit | ||
1314 | vxge_hw_device_initialize( | ||
1315 | struct __vxge_hw_device **devh, | ||
1316 | struct vxge_hw_device_attr *attr, | ||
1317 | struct vxge_hw_device_config *device_config) | ||
1318 | { | ||
1319 | u32 i; | ||
1320 | u32 nblocks = 0; | ||
1321 | struct __vxge_hw_device *hldev = NULL; | ||
1322 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1323 | |||
1324 | status = __vxge_hw_device_config_check(device_config); | ||
1325 | if (status != VXGE_HW_OK) | ||
1326 | goto exit; | ||
1327 | |||
1328 | hldev = vzalloc(sizeof(struct __vxge_hw_device)); | ||
1329 | if (hldev == NULL) { | ||
1330 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
1331 | goto exit; | ||
1332 | } | ||
1333 | |||
1334 | hldev->magic = VXGE_HW_DEVICE_MAGIC; | ||
1335 | |||
1336 | vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL); | ||
1337 | |||
1338 | /* apply config */ | ||
1339 | memcpy(&hldev->config, device_config, | ||
1340 | sizeof(struct vxge_hw_device_config)); | ||
1341 | |||
1342 | hldev->bar0 = attr->bar0; | ||
1343 | hldev->pdev = attr->pdev; | ||
1344 | |||
1345 | hldev->uld_callbacks.link_up = attr->uld_callbacks.link_up; | ||
1346 | hldev->uld_callbacks.link_down = attr->uld_callbacks.link_down; | ||
1347 | hldev->uld_callbacks.crit_err = attr->uld_callbacks.crit_err; | ||
1348 | |||
1349 | __vxge_hw_device_pci_e_init(hldev); | ||
1350 | |||
1351 | status = __vxge_hw_device_reg_addr_get(hldev); | ||
1352 | if (status != VXGE_HW_OK) { | ||
1353 | vfree(hldev); | ||
1354 | goto exit; | ||
1355 | } | ||
1356 | |||
1357 | __vxge_hw_device_host_info_get(hldev); | ||
1358 | |||
1359 | /* Incrementing for stats blocks */ | ||
1360 | nblocks++; | ||
1361 | |||
1362 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | ||
1363 | if (!(hldev->vpath_assignments & vxge_mBIT(i))) | ||
1364 | continue; | ||
1365 | |||
1366 | if (device_config->vp_config[i].ring.enable == | ||
1367 | VXGE_HW_RING_ENABLE) | ||
1368 | nblocks += device_config->vp_config[i].ring.ring_blocks; | ||
1369 | |||
1370 | if (device_config->vp_config[i].fifo.enable == | ||
1371 | VXGE_HW_FIFO_ENABLE) | ||
1372 | nblocks += device_config->vp_config[i].fifo.fifo_blocks; | ||
1373 | nblocks++; | ||
1374 | } | ||
1375 | |||
1376 | if (__vxge_hw_blockpool_create(hldev, | ||
1377 | &hldev->block_pool, | ||
1378 | device_config->dma_blockpool_initial + nblocks, | ||
1379 | device_config->dma_blockpool_max + nblocks) != VXGE_HW_OK) { | ||
1380 | |||
1381 | vxge_hw_device_terminate(hldev); | ||
1382 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
1383 | goto exit; | ||
1384 | } | ||
1385 | |||
1386 | status = __vxge_hw_device_initialize(hldev); | ||
1387 | if (status != VXGE_HW_OK) { | ||
1388 | vxge_hw_device_terminate(hldev); | ||
1389 | goto exit; | ||
1390 | } | ||
1391 | |||
1392 | *devh = hldev; | ||
1393 | exit: | ||
1394 | return status; | ||
1395 | } | ||
1396 | |||
1397 | /* | ||
1398 | * vxge_hw_device_terminate - Terminate Titan device. | ||
1399 | * Terminate HW device. | ||
1400 | */ | ||
1401 | void | ||
1402 | vxge_hw_device_terminate(struct __vxge_hw_device *hldev) | ||
1403 | { | ||
1404 | vxge_assert(hldev->magic == VXGE_HW_DEVICE_MAGIC); | ||
1405 | |||
1406 | hldev->magic = VXGE_HW_DEVICE_DEAD; | ||
1407 | __vxge_hw_blockpool_destroy(&hldev->block_pool); | ||
1408 | vfree(hldev); | ||
1409 | } | ||
1410 | |||
1411 | /* | ||
1412 | * __vxge_hw_vpath_stats_access - Get the statistics from the given location | ||
1413 | * and offset and perform an operation | ||
1414 | */ | ||
1415 | static enum vxge_hw_status | ||
1416 | __vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath, | ||
1417 | u32 operation, u32 offset, u64 *stat) | ||
1418 | { | ||
1419 | u64 val64; | ||
1420 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1421 | struct vxge_hw_vpath_reg __iomem *vp_reg; | ||
1422 | |||
1423 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
1424 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
1425 | goto vpath_stats_access_exit; | ||
1426 | } | ||
1427 | |||
1428 | vp_reg = vpath->vp_reg; | ||
1429 | |||
1430 | val64 = VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) | | ||
1431 | VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE | | ||
1432 | VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset); | ||
1433 | |||
1434 | status = __vxge_hw_pio_mem_write64(val64, | ||
1435 | &vp_reg->xmac_stats_access_cmd, | ||
1436 | VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE, | ||
1437 | vpath->hldev->config.device_poll_millis); | ||
1438 | if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ)) | ||
1439 | *stat = readq(&vp_reg->xmac_stats_access_data); | ||
1440 | else | ||
1441 | *stat = 0; | ||
1442 | |||
1443 | vpath_stats_access_exit: | ||
1444 | return status; | ||
1445 | } | ||
1446 | |||
1447 | /* | ||
1448 | * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath | ||
1449 | */ | ||
1450 | static enum vxge_hw_status | ||
1451 | __vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath, | ||
1452 | struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats) | ||
1453 | { | ||
1454 | u64 *val64; | ||
1455 | int i; | ||
1456 | u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET; | ||
1457 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1458 | |||
1459 | val64 = (u64 *)vpath_tx_stats; | ||
1460 | |||
1461 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
1462 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
1463 | goto exit; | ||
1464 | } | ||
1465 | |||
1466 | for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) { | ||
1467 | status = __vxge_hw_vpath_stats_access(vpath, | ||
1468 | VXGE_HW_STATS_OP_READ, | ||
1469 | offset, val64); | ||
1470 | if (status != VXGE_HW_OK) | ||
1471 | goto exit; | ||
1472 | offset++; | ||
1473 | val64++; | ||
1474 | } | ||
1475 | exit: | ||
1476 | return status; | ||
1477 | } | ||
1478 | |||
1479 | /* | ||
1480 | * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath | ||
1481 | */ | ||
1482 | static enum vxge_hw_status | ||
1483 | __vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath, | ||
1484 | struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats) | ||
1485 | { | ||
1486 | u64 *val64; | ||
1487 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1488 | int i; | ||
1489 | u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET; | ||
1490 | val64 = (u64 *) vpath_rx_stats; | ||
1491 | |||
1492 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
1493 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
1494 | goto exit; | ||
1495 | } | ||
1496 | for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) { | ||
1497 | status = __vxge_hw_vpath_stats_access(vpath, | ||
1498 | VXGE_HW_STATS_OP_READ, | ||
1499 | offset >> 3, val64); | ||
1500 | if (status != VXGE_HW_OK) | ||
1501 | goto exit; | ||
1502 | |||
1503 | offset += 8; | ||
1504 | val64++; | ||
1505 | } | ||
1506 | exit: | ||
1507 | return status; | ||
1508 | } | ||
1509 | |||
1510 | /* | ||
1511 | * __vxge_hw_vpath_stats_get - Get the vpath hw statistics. | ||
1512 | */ | ||
1513 | static enum vxge_hw_status | ||
1514 | __vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath, | ||
1515 | struct vxge_hw_vpath_stats_hw_info *hw_stats) | ||
1516 | { | ||
1517 | u64 val64; | ||
1518 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1519 | struct vxge_hw_vpath_reg __iomem *vp_reg; | ||
1520 | |||
1521 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
1522 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
1523 | goto exit; | ||
1524 | } | ||
1525 | vp_reg = vpath->vp_reg; | ||
1526 | |||
1527 | val64 = readq(&vp_reg->vpath_debug_stats0); | ||
1528 | hw_stats->ini_num_mwr_sent = | ||
1529 | (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64); | ||
1530 | |||
1531 | val64 = readq(&vp_reg->vpath_debug_stats1); | ||
1532 | hw_stats->ini_num_mrd_sent = | ||
1533 | (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64); | ||
1534 | |||
1535 | val64 = readq(&vp_reg->vpath_debug_stats2); | ||
1536 | hw_stats->ini_num_cpl_rcvd = | ||
1537 | (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64); | ||
1538 | |||
1539 | val64 = readq(&vp_reg->vpath_debug_stats3); | ||
1540 | hw_stats->ini_num_mwr_byte_sent = | ||
1541 | VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64); | ||
1542 | |||
1543 | val64 = readq(&vp_reg->vpath_debug_stats4); | ||
1544 | hw_stats->ini_num_cpl_byte_rcvd = | ||
1545 | VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64); | ||
1546 | |||
1547 | val64 = readq(&vp_reg->vpath_debug_stats5); | ||
1548 | hw_stats->wrcrdtarb_xoff = | ||
1549 | (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64); | ||
1550 | |||
1551 | val64 = readq(&vp_reg->vpath_debug_stats6); | ||
1552 | hw_stats->rdcrdtarb_xoff = | ||
1553 | (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64); | ||
1554 | |||
1555 | val64 = readq(&vp_reg->vpath_genstats_count01); | ||
1556 | hw_stats->vpath_genstats_count0 = | ||
1557 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0( | ||
1558 | val64); | ||
1559 | |||
1560 | val64 = readq(&vp_reg->vpath_genstats_count01); | ||
1561 | hw_stats->vpath_genstats_count1 = | ||
1562 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1( | ||
1563 | val64); | ||
1564 | |||
1565 | val64 = readq(&vp_reg->vpath_genstats_count23); | ||
1566 | hw_stats->vpath_genstats_count2 = | ||
1567 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2( | ||
1568 | val64); | ||
1569 | |||
1570 | val64 = readq(&vp_reg->vpath_genstats_count01); | ||
1571 | hw_stats->vpath_genstats_count3 = | ||
1572 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3( | ||
1573 | val64); | ||
1574 | |||
1575 | val64 = readq(&vp_reg->vpath_genstats_count4); | ||
1576 | hw_stats->vpath_genstats_count4 = | ||
1577 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4( | ||
1578 | val64); | ||
1579 | |||
1580 | val64 = readq(&vp_reg->vpath_genstats_count5); | ||
1581 | hw_stats->vpath_genstats_count5 = | ||
1582 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5( | ||
1583 | val64); | ||
1584 | |||
1585 | status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats); | ||
1586 | if (status != VXGE_HW_OK) | ||
1587 | goto exit; | ||
1588 | |||
1589 | status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats); | ||
1590 | if (status != VXGE_HW_OK) | ||
1591 | goto exit; | ||
1592 | |||
1593 | VXGE_HW_VPATH_STATS_PIO_READ( | ||
1594 | VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET); | ||
1595 | |||
1596 | hw_stats->prog_event_vnum0 = | ||
1597 | (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64); | ||
1598 | |||
1599 | hw_stats->prog_event_vnum1 = | ||
1600 | (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64); | ||
1601 | |||
1602 | VXGE_HW_VPATH_STATS_PIO_READ( | ||
1603 | VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET); | ||
1604 | |||
1605 | hw_stats->prog_event_vnum2 = | ||
1606 | (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64); | ||
1607 | |||
1608 | hw_stats->prog_event_vnum3 = | ||
1609 | (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64); | ||
1610 | |||
1611 | val64 = readq(&vp_reg->rx_multi_cast_stats); | ||
1612 | hw_stats->rx_multi_cast_frame_discard = | ||
1613 | (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64); | ||
1614 | |||
1615 | val64 = readq(&vp_reg->rx_frm_transferred); | ||
1616 | hw_stats->rx_frm_transferred = | ||
1617 | (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64); | ||
1618 | |||
1619 | val64 = readq(&vp_reg->rxd_returned); | ||
1620 | hw_stats->rxd_returned = | ||
1621 | (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64); | ||
1622 | |||
1623 | val64 = readq(&vp_reg->dbg_stats_rx_mpa); | ||
1624 | hw_stats->rx_mpa_len_fail_frms = | ||
1625 | (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64); | ||
1626 | hw_stats->rx_mpa_mrk_fail_frms = | ||
1627 | (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64); | ||
1628 | hw_stats->rx_mpa_crc_fail_frms = | ||
1629 | (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64); | ||
1630 | |||
1631 | val64 = readq(&vp_reg->dbg_stats_rx_fau); | ||
1632 | hw_stats->rx_permitted_frms = | ||
1633 | (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64); | ||
1634 | hw_stats->rx_vp_reset_discarded_frms = | ||
1635 | (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64); | ||
1636 | hw_stats->rx_wol_frms = | ||
1637 | (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64); | ||
1638 | |||
1639 | val64 = readq(&vp_reg->tx_vp_reset_discarded_frms); | ||
1640 | hw_stats->tx_vp_reset_discarded_frms = | ||
1641 | (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS( | ||
1642 | val64); | ||
1643 | exit: | ||
1644 | return status; | ||
1645 | } | ||
1646 | |||
1647 | /* | ||
1648 | * vxge_hw_device_stats_get - Get the device hw statistics. | ||
1649 | * Returns the vpath h/w stats for the device. | ||
1650 | */ | ||
1651 | enum vxge_hw_status | ||
1652 | vxge_hw_device_stats_get(struct __vxge_hw_device *hldev, | ||
1653 | struct vxge_hw_device_stats_hw_info *hw_stats) | ||
1654 | { | ||
1655 | u32 i; | ||
1656 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1657 | |||
1658 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | ||
1659 | if (!(hldev->vpaths_deployed & vxge_mBIT(i)) || | ||
1660 | (hldev->virtual_paths[i].vp_open == | ||
1661 | VXGE_HW_VP_NOT_OPEN)) | ||
1662 | continue; | ||
1663 | |||
1664 | memcpy(hldev->virtual_paths[i].hw_stats_sav, | ||
1665 | hldev->virtual_paths[i].hw_stats, | ||
1666 | sizeof(struct vxge_hw_vpath_stats_hw_info)); | ||
1667 | |||
1668 | status = __vxge_hw_vpath_stats_get( | ||
1669 | &hldev->virtual_paths[i], | ||
1670 | hldev->virtual_paths[i].hw_stats); | ||
1671 | } | ||
1672 | |||
1673 | memcpy(hw_stats, &hldev->stats.hw_dev_info_stats, | ||
1674 | sizeof(struct vxge_hw_device_stats_hw_info)); | ||
1675 | |||
1676 | return status; | ||
1677 | } | ||
1678 | |||
1679 | /* | ||
1680 | * vxge_hw_driver_stats_get - Get the device sw statistics. | ||
1681 | * Returns the vpath s/w stats for the device. | ||
1682 | */ | ||
1683 | enum vxge_hw_status vxge_hw_driver_stats_get( | ||
1684 | struct __vxge_hw_device *hldev, | ||
1685 | struct vxge_hw_device_stats_sw_info *sw_stats) | ||
1686 | { | ||
1687 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1688 | |||
1689 | memcpy(sw_stats, &hldev->stats.sw_dev_info_stats, | ||
1690 | sizeof(struct vxge_hw_device_stats_sw_info)); | ||
1691 | |||
1692 | return status; | ||
1693 | } | ||
1694 | |||
1695 | /* | ||
1696 | * vxge_hw_mrpcim_stats_access - Access the statistics from the given location | ||
1697 | * and offset and perform an operation | ||
1698 | * Get the statistics from the given location and offset. | ||
1699 | */ | ||
1700 | enum vxge_hw_status | ||
1701 | vxge_hw_mrpcim_stats_access(struct __vxge_hw_device *hldev, | ||
1702 | u32 operation, u32 location, u32 offset, u64 *stat) | ||
1703 | { | ||
1704 | u64 val64; | ||
1705 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1706 | |||
1707 | status = __vxge_hw_device_is_privilaged(hldev->host_type, | ||
1708 | hldev->func_id); | ||
1709 | if (status != VXGE_HW_OK) | ||
1710 | goto exit; | ||
1711 | |||
1712 | val64 = VXGE_HW_XMAC_STATS_SYS_CMD_OP(operation) | | ||
1713 | VXGE_HW_XMAC_STATS_SYS_CMD_STROBE | | ||
1714 | VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(location) | | ||
1715 | VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(offset); | ||
1716 | |||
1717 | status = __vxge_hw_pio_mem_write64(val64, | ||
1718 | &hldev->mrpcim_reg->xmac_stats_sys_cmd, | ||
1719 | VXGE_HW_XMAC_STATS_SYS_CMD_STROBE, | ||
1720 | hldev->config.device_poll_millis); | ||
1721 | |||
1722 | if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ)) | ||
1723 | *stat = readq(&hldev->mrpcim_reg->xmac_stats_sys_data); | ||
1724 | else | ||
1725 | *stat = 0; | ||
1726 | exit: | ||
1727 | return status; | ||
1728 | } | ||
1729 | |||
1730 | /* | ||
1731 | * vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port | ||
1732 | * Get the Statistics on aggregate port | ||
1733 | */ | ||
1734 | static enum vxge_hw_status | ||
1735 | vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port, | ||
1736 | struct vxge_hw_xmac_aggr_stats *aggr_stats) | ||
1737 | { | ||
1738 | u64 *val64; | ||
1739 | int i; | ||
1740 | u32 offset = VXGE_HW_STATS_AGGRn_OFFSET; | ||
1741 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1742 | |||
1743 | val64 = (u64 *)aggr_stats; | ||
1744 | |||
1745 | status = __vxge_hw_device_is_privilaged(hldev->host_type, | ||
1746 | hldev->func_id); | ||
1747 | if (status != VXGE_HW_OK) | ||
1748 | goto exit; | ||
1749 | |||
1750 | for (i = 0; i < sizeof(struct vxge_hw_xmac_aggr_stats) / 8; i++) { | ||
1751 | status = vxge_hw_mrpcim_stats_access(hldev, | ||
1752 | VXGE_HW_STATS_OP_READ, | ||
1753 | VXGE_HW_STATS_LOC_AGGR, | ||
1754 | ((offset + (104 * port)) >> 3), val64); | ||
1755 | if (status != VXGE_HW_OK) | ||
1756 | goto exit; | ||
1757 | |||
1758 | offset += 8; | ||
1759 | val64++; | ||
1760 | } | ||
1761 | exit: | ||
1762 | return status; | ||
1763 | } | ||
1764 | |||
1765 | /* | ||
1766 | * vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port | ||
1767 | * Get the Statistics on port | ||
1768 | */ | ||
1769 | static enum vxge_hw_status | ||
1770 | vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port, | ||
1771 | struct vxge_hw_xmac_port_stats *port_stats) | ||
1772 | { | ||
1773 | u64 *val64; | ||
1774 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1775 | int i; | ||
1776 | u32 offset = 0x0; | ||
1777 | val64 = (u64 *) port_stats; | ||
1778 | |||
1779 | status = __vxge_hw_device_is_privilaged(hldev->host_type, | ||
1780 | hldev->func_id); | ||
1781 | if (status != VXGE_HW_OK) | ||
1782 | goto exit; | ||
1783 | |||
1784 | for (i = 0; i < sizeof(struct vxge_hw_xmac_port_stats) / 8; i++) { | ||
1785 | status = vxge_hw_mrpcim_stats_access(hldev, | ||
1786 | VXGE_HW_STATS_OP_READ, | ||
1787 | VXGE_HW_STATS_LOC_AGGR, | ||
1788 | ((offset + (608 * port)) >> 3), val64); | ||
1789 | if (status != VXGE_HW_OK) | ||
1790 | goto exit; | ||
1791 | |||
1792 | offset += 8; | ||
1793 | val64++; | ||
1794 | } | ||
1795 | |||
1796 | exit: | ||
1797 | return status; | ||
1798 | } | ||
1799 | |||
1800 | /* | ||
1801 | * vxge_hw_device_xmac_stats_get - Get the XMAC Statistics | ||
1802 | * Get the XMAC Statistics | ||
1803 | */ | ||
1804 | enum vxge_hw_status | ||
1805 | vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *hldev, | ||
1806 | struct vxge_hw_xmac_stats *xmac_stats) | ||
1807 | { | ||
1808 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1809 | u32 i; | ||
1810 | |||
1811 | status = vxge_hw_device_xmac_aggr_stats_get(hldev, | ||
1812 | 0, &xmac_stats->aggr_stats[0]); | ||
1813 | if (status != VXGE_HW_OK) | ||
1814 | goto exit; | ||
1815 | |||
1816 | status = vxge_hw_device_xmac_aggr_stats_get(hldev, | ||
1817 | 1, &xmac_stats->aggr_stats[1]); | ||
1818 | if (status != VXGE_HW_OK) | ||
1819 | goto exit; | ||
1820 | |||
1821 | for (i = 0; i <= VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) { | ||
1822 | |||
1823 | status = vxge_hw_device_xmac_port_stats_get(hldev, | ||
1824 | i, &xmac_stats->port_stats[i]); | ||
1825 | if (status != VXGE_HW_OK) | ||
1826 | goto exit; | ||
1827 | } | ||
1828 | |||
1829 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | ||
1830 | |||
1831 | if (!(hldev->vpaths_deployed & vxge_mBIT(i))) | ||
1832 | continue; | ||
1833 | |||
1834 | status = __vxge_hw_vpath_xmac_tx_stats_get( | ||
1835 | &hldev->virtual_paths[i], | ||
1836 | &xmac_stats->vpath_tx_stats[i]); | ||
1837 | if (status != VXGE_HW_OK) | ||
1838 | goto exit; | ||
1839 | |||
1840 | status = __vxge_hw_vpath_xmac_rx_stats_get( | ||
1841 | &hldev->virtual_paths[i], | ||
1842 | &xmac_stats->vpath_rx_stats[i]); | ||
1843 | if (status != VXGE_HW_OK) | ||
1844 | goto exit; | ||
1845 | } | ||
1846 | exit: | ||
1847 | return status; | ||
1848 | } | ||
1849 | |||
1850 | /* | ||
1851 | * vxge_hw_device_debug_set - Set the debug module, level and timestamp | ||
1852 | * This routine is used to dynamically change the debug output | ||
1853 | */ | ||
1854 | void vxge_hw_device_debug_set(struct __vxge_hw_device *hldev, | ||
1855 | enum vxge_debug_level level, u32 mask) | ||
1856 | { | ||
1857 | if (hldev == NULL) | ||
1858 | return; | ||
1859 | |||
1860 | #if defined(VXGE_DEBUG_TRACE_MASK) || \ | ||
1861 | defined(VXGE_DEBUG_ERR_MASK) | ||
1862 | hldev->debug_module_mask = mask; | ||
1863 | hldev->debug_level = level; | ||
1864 | #endif | ||
1865 | |||
1866 | #if defined(VXGE_DEBUG_ERR_MASK) | ||
1867 | hldev->level_err = level & VXGE_ERR; | ||
1868 | #endif | ||
1869 | |||
1870 | #if defined(VXGE_DEBUG_TRACE_MASK) | ||
1871 | hldev->level_trace = level & VXGE_TRACE; | ||
1872 | #endif | ||
1873 | } | ||
1874 | |||
1875 | /* | ||
1876 | * vxge_hw_device_error_level_get - Get the error level | ||
1877 | * This routine returns the current error level set | ||
1878 | */ | ||
1879 | u32 vxge_hw_device_error_level_get(struct __vxge_hw_device *hldev) | ||
1880 | { | ||
1881 | #if defined(VXGE_DEBUG_ERR_MASK) | ||
1882 | if (hldev == NULL) | ||
1883 | return VXGE_ERR; | ||
1884 | else | ||
1885 | return hldev->level_err; | ||
1886 | #else | ||
1887 | return 0; | ||
1888 | #endif | ||
1889 | } | ||
1890 | |||
1891 | /* | ||
1892 | * vxge_hw_device_trace_level_get - Get the trace level | ||
1893 | * This routine returns the current trace level set | ||
1894 | */ | ||
1895 | u32 vxge_hw_device_trace_level_get(struct __vxge_hw_device *hldev) | ||
1896 | { | ||
1897 | #if defined(VXGE_DEBUG_TRACE_MASK) | ||
1898 | if (hldev == NULL) | ||
1899 | return VXGE_TRACE; | ||
1900 | else | ||
1901 | return hldev->level_trace; | ||
1902 | #else | ||
1903 | return 0; | ||
1904 | #endif | ||
1905 | } | ||
1906 | |||
1907 | /* | ||
1908 | * vxge_hw_getpause_data -Pause frame frame generation and reception. | ||
1909 | * Returns the Pause frame generation and reception capability of the NIC. | ||
1910 | */ | ||
1911 | enum vxge_hw_status vxge_hw_device_getpause_data(struct __vxge_hw_device *hldev, | ||
1912 | u32 port, u32 *tx, u32 *rx) | ||
1913 | { | ||
1914 | u64 val64; | ||
1915 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1916 | |||
1917 | if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) { | ||
1918 | status = VXGE_HW_ERR_INVALID_DEVICE; | ||
1919 | goto exit; | ||
1920 | } | ||
1921 | |||
1922 | if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) { | ||
1923 | status = VXGE_HW_ERR_INVALID_PORT; | ||
1924 | goto exit; | ||
1925 | } | ||
1926 | |||
1927 | if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) { | ||
1928 | status = VXGE_HW_ERR_PRIVILAGED_OPEARATION; | ||
1929 | goto exit; | ||
1930 | } | ||
1931 | |||
1932 | val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]); | ||
1933 | if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN) | ||
1934 | *tx = 1; | ||
1935 | if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN) | ||
1936 | *rx = 1; | ||
1937 | exit: | ||
1938 | return status; | ||
1939 | } | ||
1940 | |||
1941 | /* | ||
1942 | * vxge_hw_device_setpause_data - set/reset pause frame generation. | ||
1943 | * It can be used to set or reset Pause frame generation or reception | ||
1944 | * support of the NIC. | ||
1945 | */ | ||
1946 | enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev, | ||
1947 | u32 port, u32 tx, u32 rx) | ||
1948 | { | ||
1949 | u64 val64; | ||
1950 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1951 | |||
1952 | if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) { | ||
1953 | status = VXGE_HW_ERR_INVALID_DEVICE; | ||
1954 | goto exit; | ||
1955 | } | ||
1956 | |||
1957 | if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) { | ||
1958 | status = VXGE_HW_ERR_INVALID_PORT; | ||
1959 | goto exit; | ||
1960 | } | ||
1961 | |||
1962 | status = __vxge_hw_device_is_privilaged(hldev->host_type, | ||
1963 | hldev->func_id); | ||
1964 | if (status != VXGE_HW_OK) | ||
1965 | goto exit; | ||
1966 | |||
1967 | val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]); | ||
1968 | if (tx) | ||
1969 | val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN; | ||
1970 | else | ||
1971 | val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN; | ||
1972 | if (rx) | ||
1973 | val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN; | ||
1974 | else | ||
1975 | val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN; | ||
1976 | |||
1977 | writeq(val64, &hldev->mrpcim_reg->rxmac_pause_cfg_port[port]); | ||
1978 | exit: | ||
1979 | return status; | ||
1980 | } | ||
1981 | |||
1982 | u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *hldev) | ||
1983 | { | ||
1984 | struct pci_dev *dev = hldev->pdev; | ||
1985 | u16 lnk; | ||
1986 | |||
1987 | pci_read_config_word(dev, dev->pcie_cap + PCI_EXP_LNKSTA, &lnk); | ||
1988 | return (lnk & VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH) >> 4; | ||
1989 | } | ||
1990 | |||
1991 | /* | ||
1992 | * __vxge_hw_ring_block_memblock_idx - Return the memblock index | ||
1993 | * This function returns the index of memory block | ||
1994 | */ | ||
1995 | static inline u32 | ||
1996 | __vxge_hw_ring_block_memblock_idx(u8 *block) | ||
1997 | { | ||
1998 | return (u32)*((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET)); | ||
1999 | } | ||
2000 | |||
2001 | /* | ||
2002 | * __vxge_hw_ring_block_memblock_idx_set - Sets the memblock index | ||
2003 | * This function sets index to a memory block | ||
2004 | */ | ||
2005 | static inline void | ||
2006 | __vxge_hw_ring_block_memblock_idx_set(u8 *block, u32 memblock_idx) | ||
2007 | { | ||
2008 | *((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET)) = memblock_idx; | ||
2009 | } | ||
2010 | |||
2011 | /* | ||
2012 | * __vxge_hw_ring_block_next_pointer_set - Sets the next block pointer | ||
2013 | * in RxD block | ||
2014 | * Sets the next block pointer in RxD block | ||
2015 | */ | ||
2016 | static inline void | ||
2017 | __vxge_hw_ring_block_next_pointer_set(u8 *block, dma_addr_t dma_next) | ||
2018 | { | ||
2019 | *((u64 *)(block + VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET)) = dma_next; | ||
2020 | } | ||
2021 | |||
2022 | /* | ||
2023 | * __vxge_hw_ring_first_block_address_get - Returns the dma address of the | ||
2024 | * first block | ||
2025 | * Returns the dma address of the first RxD block | ||
2026 | */ | ||
2027 | static u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring) | ||
2028 | { | ||
2029 | struct vxge_hw_mempool_dma *dma_object; | ||
2030 | |||
2031 | dma_object = ring->mempool->memblocks_dma_arr; | ||
2032 | vxge_assert(dma_object != NULL); | ||
2033 | |||
2034 | return dma_object->addr; | ||
2035 | } | ||
2036 | |||
2037 | /* | ||
2038 | * __vxge_hw_ring_item_dma_addr - Return the dma address of an item | ||
2039 | * This function returns the dma address of a given item | ||
2040 | */ | ||
2041 | static dma_addr_t __vxge_hw_ring_item_dma_addr(struct vxge_hw_mempool *mempoolh, | ||
2042 | void *item) | ||
2043 | { | ||
2044 | u32 memblock_idx; | ||
2045 | void *memblock; | ||
2046 | struct vxge_hw_mempool_dma *memblock_dma_object; | ||
2047 | ptrdiff_t dma_item_offset; | ||
2048 | |||
2049 | /* get owner memblock index */ | ||
2050 | memblock_idx = __vxge_hw_ring_block_memblock_idx(item); | ||
2051 | |||
2052 | /* get owner memblock by memblock index */ | ||
2053 | memblock = mempoolh->memblocks_arr[memblock_idx]; | ||
2054 | |||
2055 | /* get memblock DMA object by memblock index */ | ||
2056 | memblock_dma_object = mempoolh->memblocks_dma_arr + memblock_idx; | ||
2057 | |||
2058 | /* calculate offset in the memblock of this item */ | ||
2059 | dma_item_offset = (u8 *)item - (u8 *)memblock; | ||
2060 | |||
2061 | return memblock_dma_object->addr + dma_item_offset; | ||
2062 | } | ||
2063 | |||
2064 | /* | ||
2065 | * __vxge_hw_ring_rxdblock_link - Link the RxD blocks | ||
2066 | * This function returns the dma address of a given item | ||
2067 | */ | ||
2068 | static void __vxge_hw_ring_rxdblock_link(struct vxge_hw_mempool *mempoolh, | ||
2069 | struct __vxge_hw_ring *ring, u32 from, | ||
2070 | u32 to) | ||
2071 | { | ||
2072 | u8 *to_item , *from_item; | ||
2073 | dma_addr_t to_dma; | ||
2074 | |||
2075 | /* get "from" RxD block */ | ||
2076 | from_item = mempoolh->items_arr[from]; | ||
2077 | vxge_assert(from_item); | ||
2078 | |||
2079 | /* get "to" RxD block */ | ||
2080 | to_item = mempoolh->items_arr[to]; | ||
2081 | vxge_assert(to_item); | ||
2082 | |||
2083 | /* return address of the beginning of previous RxD block */ | ||
2084 | to_dma = __vxge_hw_ring_item_dma_addr(mempoolh, to_item); | ||
2085 | |||
2086 | /* set next pointer for this RxD block to point on | ||
2087 | * previous item's DMA start address */ | ||
2088 | __vxge_hw_ring_block_next_pointer_set(from_item, to_dma); | ||
2089 | } | ||
2090 | |||
2091 | /* | ||
2092 | * __vxge_hw_ring_mempool_item_alloc - Allocate List blocks for RxD | ||
2093 | * block callback | ||
2094 | * This function is callback passed to __vxge_hw_mempool_create to create memory | ||
2095 | * pool for RxD block | ||
2096 | */ | ||
2097 | static void | ||
2098 | __vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh, | ||
2099 | u32 memblock_index, | ||
2100 | struct vxge_hw_mempool_dma *dma_object, | ||
2101 | u32 index, u32 is_last) | ||
2102 | { | ||
2103 | u32 i; | ||
2104 | void *item = mempoolh->items_arr[index]; | ||
2105 | struct __vxge_hw_ring *ring = | ||
2106 | (struct __vxge_hw_ring *)mempoolh->userdata; | ||
2107 | |||
2108 | /* format rxds array */ | ||
2109 | for (i = 0; i < ring->rxds_per_block; i++) { | ||
2110 | void *rxdblock_priv; | ||
2111 | void *uld_priv; | ||
2112 | struct vxge_hw_ring_rxd_1 *rxdp; | ||
2113 | |||
2114 | u32 reserve_index = ring->channel.reserve_ptr - | ||
2115 | (index * ring->rxds_per_block + i + 1); | ||
2116 | u32 memblock_item_idx; | ||
2117 | |||
2118 | ring->channel.reserve_arr[reserve_index] = ((u8 *)item) + | ||
2119 | i * ring->rxd_size; | ||
2120 | |||
2121 | /* Note: memblock_item_idx is index of the item within | ||
2122 | * the memblock. For instance, in case of three RxD-blocks | ||
2123 | * per memblock this value can be 0, 1 or 2. */ | ||
2124 | rxdblock_priv = __vxge_hw_mempool_item_priv(mempoolh, | ||
2125 | memblock_index, item, | ||
2126 | &memblock_item_idx); | ||
2127 | |||
2128 | rxdp = ring->channel.reserve_arr[reserve_index]; | ||
2129 | |||
2130 | uld_priv = ((u8 *)rxdblock_priv + ring->rxd_priv_size * i); | ||
2131 | |||
2132 | /* pre-format Host_Control */ | ||
2133 | rxdp->host_control = (u64)(size_t)uld_priv; | ||
2134 | } | ||
2135 | |||
2136 | __vxge_hw_ring_block_memblock_idx_set(item, memblock_index); | ||
2137 | |||
2138 | if (is_last) { | ||
2139 | /* link last one with first one */ | ||
2140 | __vxge_hw_ring_rxdblock_link(mempoolh, ring, index, 0); | ||
2141 | } | ||
2142 | |||
2143 | if (index > 0) { | ||
2144 | /* link this RxD block with previous one */ | ||
2145 | __vxge_hw_ring_rxdblock_link(mempoolh, ring, index - 1, index); | ||
2146 | } | ||
2147 | } | ||
2148 | |||
2149 | /* | ||
2150 | * __vxge_hw_ring_replenish - Initial replenish of RxDs | ||
2151 | * This function replenishes the RxDs from reserve array to work array | ||
2152 | */ | ||
2153 | enum vxge_hw_status | ||
2154 | vxge_hw_ring_replenish(struct __vxge_hw_ring *ring) | ||
2155 | { | ||
2156 | void *rxd; | ||
2157 | struct __vxge_hw_channel *channel; | ||
2158 | enum vxge_hw_status status = VXGE_HW_OK; | ||
2159 | |||
2160 | channel = &ring->channel; | ||
2161 | |||
2162 | while (vxge_hw_channel_dtr_count(channel) > 0) { | ||
2163 | |||
2164 | status = vxge_hw_ring_rxd_reserve(ring, &rxd); | ||
2165 | |||
2166 | vxge_assert(status == VXGE_HW_OK); | ||
2167 | |||
2168 | if (ring->rxd_init) { | ||
2169 | status = ring->rxd_init(rxd, channel->userdata); | ||
2170 | if (status != VXGE_HW_OK) { | ||
2171 | vxge_hw_ring_rxd_free(ring, rxd); | ||
2172 | goto exit; | ||
2173 | } | ||
2174 | } | ||
2175 | |||
2176 | vxge_hw_ring_rxd_post(ring, rxd); | ||
2177 | } | ||
2178 | status = VXGE_HW_OK; | ||
2179 | exit: | ||
2180 | return status; | ||
2181 | } | ||
2182 | |||
2183 | /* | ||
2184 | * __vxge_hw_channel_allocate - Allocate memory for channel | ||
2185 | * This function allocates required memory for the channel and various arrays | ||
2186 | * in the channel | ||
2187 | */ | ||
2188 | static struct __vxge_hw_channel * | ||
2189 | __vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph, | ||
2190 | enum __vxge_hw_channel_type type, | ||
2191 | u32 length, u32 per_dtr_space, | ||
2192 | void *userdata) | ||
2193 | { | ||
2194 | struct __vxge_hw_channel *channel; | ||
2195 | struct __vxge_hw_device *hldev; | ||
2196 | int size = 0; | ||
2197 | u32 vp_id; | ||
2198 | |||
2199 | hldev = vph->vpath->hldev; | ||
2200 | vp_id = vph->vpath->vp_id; | ||
2201 | |||
2202 | switch (type) { | ||
2203 | case VXGE_HW_CHANNEL_TYPE_FIFO: | ||
2204 | size = sizeof(struct __vxge_hw_fifo); | ||
2205 | break; | ||
2206 | case VXGE_HW_CHANNEL_TYPE_RING: | ||
2207 | size = sizeof(struct __vxge_hw_ring); | ||
2208 | break; | ||
2209 | default: | ||
2210 | break; | ||
2211 | } | ||
2212 | |||
2213 | channel = kzalloc(size, GFP_KERNEL); | ||
2214 | if (channel == NULL) | ||
2215 | goto exit0; | ||
2216 | INIT_LIST_HEAD(&channel->item); | ||
2217 | |||
2218 | channel->common_reg = hldev->common_reg; | ||
2219 | channel->first_vp_id = hldev->first_vp_id; | ||
2220 | channel->type = type; | ||
2221 | channel->devh = hldev; | ||
2222 | channel->vph = vph; | ||
2223 | channel->userdata = userdata; | ||
2224 | channel->per_dtr_space = per_dtr_space; | ||
2225 | channel->length = length; | ||
2226 | channel->vp_id = vp_id; | ||
2227 | |||
2228 | channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); | ||
2229 | if (channel->work_arr == NULL) | ||
2230 | goto exit1; | ||
2231 | |||
2232 | channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); | ||
2233 | if (channel->free_arr == NULL) | ||
2234 | goto exit1; | ||
2235 | channel->free_ptr = length; | ||
2236 | |||
2237 | channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); | ||
2238 | if (channel->reserve_arr == NULL) | ||
2239 | goto exit1; | ||
2240 | channel->reserve_ptr = length; | ||
2241 | channel->reserve_top = 0; | ||
2242 | |||
2243 | channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); | ||
2244 | if (channel->orig_arr == NULL) | ||
2245 | goto exit1; | ||
2246 | |||
2247 | return channel; | ||
2248 | exit1: | ||
2249 | __vxge_hw_channel_free(channel); | ||
2250 | |||
2251 | exit0: | ||
2252 | return NULL; | ||
2253 | } | ||
2254 | |||
2255 | /* | ||
2256 | * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async | ||
2257 | * Adds a block to block pool | ||
2258 | */ | ||
2259 | static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh, | ||
2260 | void *block_addr, | ||
2261 | u32 length, | ||
2262 | struct pci_dev *dma_h, | ||
2263 | struct pci_dev *acc_handle) | ||
2264 | { | ||
2265 | struct __vxge_hw_blockpool *blockpool; | ||
2266 | struct __vxge_hw_blockpool_entry *entry = NULL; | ||
2267 | dma_addr_t dma_addr; | ||
2268 | enum vxge_hw_status status = VXGE_HW_OK; | ||
2269 | u32 req_out; | ||
2270 | |||
2271 | blockpool = &devh->block_pool; | ||
2272 | |||
2273 | if (block_addr == NULL) { | ||
2274 | blockpool->req_out--; | ||
2275 | status = VXGE_HW_FAIL; | ||
2276 | goto exit; | ||
2277 | } | ||
2278 | |||
2279 | dma_addr = pci_map_single(devh->pdev, block_addr, length, | ||
2280 | PCI_DMA_BIDIRECTIONAL); | ||
2281 | |||
2282 | if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) { | ||
2283 | vxge_os_dma_free(devh->pdev, block_addr, &acc_handle); | ||
2284 | blockpool->req_out--; | ||
2285 | status = VXGE_HW_FAIL; | ||
2286 | goto exit; | ||
2287 | } | ||
2288 | |||
2289 | if (!list_empty(&blockpool->free_entry_list)) | ||
2290 | entry = (struct __vxge_hw_blockpool_entry *) | ||
2291 | list_first_entry(&blockpool->free_entry_list, | ||
2292 | struct __vxge_hw_blockpool_entry, | ||
2293 | item); | ||
2294 | |||
2295 | if (entry == NULL) | ||
2296 | entry = vmalloc(sizeof(struct __vxge_hw_blockpool_entry)); | ||
2297 | else | ||
2298 | list_del(&entry->item); | ||
2299 | |||
2300 | if (entry != NULL) { | ||
2301 | entry->length = length; | ||
2302 | entry->memblock = block_addr; | ||
2303 | entry->dma_addr = dma_addr; | ||
2304 | entry->acc_handle = acc_handle; | ||
2305 | entry->dma_handle = dma_h; | ||
2306 | list_add(&entry->item, &blockpool->free_block_list); | ||
2307 | blockpool->pool_size++; | ||
2308 | status = VXGE_HW_OK; | ||
2309 | } else | ||
2310 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
2311 | |||
2312 | blockpool->req_out--; | ||
2313 | |||
2314 | req_out = blockpool->req_out; | ||
2315 | exit: | ||
2316 | return; | ||
2317 | } | ||
2318 | |||
2319 | static inline void | ||
2320 | vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh, unsigned long size) | ||
2321 | { | ||
2322 | gfp_t flags; | ||
2323 | void *vaddr; | ||
2324 | |||
2325 | if (in_interrupt()) | ||
2326 | flags = GFP_ATOMIC | GFP_DMA; | ||
2327 | else | ||
2328 | flags = GFP_KERNEL | GFP_DMA; | ||
2329 | |||
2330 | vaddr = kmalloc((size), flags); | ||
2331 | |||
2332 | vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev); | ||
2333 | } | ||
2334 | |||
2335 | /* | ||
2336 | * __vxge_hw_blockpool_blocks_add - Request additional blocks | ||
2337 | */ | ||
2338 | static | ||
2339 | void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool) | ||
2340 | { | ||
2341 | u32 nreq = 0, i; | ||
2342 | |||
2343 | if ((blockpool->pool_size + blockpool->req_out) < | ||
2344 | VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) { | ||
2345 | nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE; | ||
2346 | blockpool->req_out += nreq; | ||
2347 | } | ||
2348 | |||
2349 | for (i = 0; i < nreq; i++) | ||
2350 | vxge_os_dma_malloc_async( | ||
2351 | ((struct __vxge_hw_device *)blockpool->hldev)->pdev, | ||
2352 | blockpool->hldev, VXGE_HW_BLOCK_SIZE); | ||
2353 | } | ||
2354 | |||
2355 | /* | ||
2356 | * __vxge_hw_blockpool_malloc - Allocate a memory block from pool | ||
2357 | * Allocates a block of memory of given size, either from block pool | ||
2358 | * or by calling vxge_os_dma_malloc() | ||
2359 | */ | ||
2360 | static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size, | ||
2361 | struct vxge_hw_mempool_dma *dma_object) | ||
2362 | { | ||
2363 | struct __vxge_hw_blockpool_entry *entry = NULL; | ||
2364 | struct __vxge_hw_blockpool *blockpool; | ||
2365 | void *memblock = NULL; | ||
2366 | enum vxge_hw_status status = VXGE_HW_OK; | ||
2367 | |||
2368 | blockpool = &devh->block_pool; | ||
2369 | |||
2370 | if (size != blockpool->block_size) { | ||
2371 | |||
2372 | memblock = vxge_os_dma_malloc(devh->pdev, size, | ||
2373 | &dma_object->handle, | ||
2374 | &dma_object->acc_handle); | ||
2375 | |||
2376 | if (memblock == NULL) { | ||
2377 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
2378 | goto exit; | ||
2379 | } | ||
2380 | |||
2381 | dma_object->addr = pci_map_single(devh->pdev, memblock, size, | ||
2382 | PCI_DMA_BIDIRECTIONAL); | ||
2383 | |||
2384 | if (unlikely(pci_dma_mapping_error(devh->pdev, | ||
2385 | dma_object->addr))) { | ||
2386 | vxge_os_dma_free(devh->pdev, memblock, | ||
2387 | &dma_object->acc_handle); | ||
2388 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
2389 | goto exit; | ||
2390 | } | ||
2391 | |||
2392 | } else { | ||
2393 | |||
2394 | if (!list_empty(&blockpool->free_block_list)) | ||
2395 | entry = (struct __vxge_hw_blockpool_entry *) | ||
2396 | list_first_entry(&blockpool->free_block_list, | ||
2397 | struct __vxge_hw_blockpool_entry, | ||
2398 | item); | ||
2399 | |||
2400 | if (entry != NULL) { | ||
2401 | list_del(&entry->item); | ||
2402 | dma_object->addr = entry->dma_addr; | ||
2403 | dma_object->handle = entry->dma_handle; | ||
2404 | dma_object->acc_handle = entry->acc_handle; | ||
2405 | memblock = entry->memblock; | ||
2406 | |||
2407 | list_add(&entry->item, | ||
2408 | &blockpool->free_entry_list); | ||
2409 | blockpool->pool_size--; | ||
2410 | } | ||
2411 | |||
2412 | if (memblock != NULL) | ||
2413 | __vxge_hw_blockpool_blocks_add(blockpool); | ||
2414 | } | ||
2415 | exit: | ||
2416 | return memblock; | ||
2417 | } | ||
2418 | |||
2419 | /* | ||
2420 | * __vxge_hw_blockpool_blocks_remove - Free additional blocks | ||
2421 | */ | ||
2422 | static void | ||
2423 | __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool) | ||
2424 | { | ||
2425 | struct list_head *p, *n; | ||
2426 | |||
2427 | list_for_each_safe(p, n, &blockpool->free_block_list) { | ||
2428 | |||
2429 | if (blockpool->pool_size < blockpool->pool_max) | ||
2430 | break; | ||
2431 | |||
2432 | pci_unmap_single( | ||
2433 | ((struct __vxge_hw_device *)blockpool->hldev)->pdev, | ||
2434 | ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, | ||
2435 | ((struct __vxge_hw_blockpool_entry *)p)->length, | ||
2436 | PCI_DMA_BIDIRECTIONAL); | ||
2437 | |||
2438 | vxge_os_dma_free( | ||
2439 | ((struct __vxge_hw_device *)blockpool->hldev)->pdev, | ||
2440 | ((struct __vxge_hw_blockpool_entry *)p)->memblock, | ||
2441 | &((struct __vxge_hw_blockpool_entry *)p)->acc_handle); | ||
2442 | |||
2443 | list_del(&((struct __vxge_hw_blockpool_entry *)p)->item); | ||
2444 | |||
2445 | list_add(p, &blockpool->free_entry_list); | ||
2446 | |||
2447 | blockpool->pool_size--; | ||
2448 | |||
2449 | } | ||
2450 | } | ||
2451 | |||
2452 | /* | ||
2453 | * __vxge_hw_blockpool_free - Frees the memory allcoated with | ||
2454 | * __vxge_hw_blockpool_malloc | ||
2455 | */ | ||
2456 | static void __vxge_hw_blockpool_free(struct __vxge_hw_device *devh, | ||
2457 | void *memblock, u32 size, | ||
2458 | struct vxge_hw_mempool_dma *dma_object) | ||
2459 | { | ||
2460 | struct __vxge_hw_blockpool_entry *entry = NULL; | ||
2461 | struct __vxge_hw_blockpool *blockpool; | ||
2462 | enum vxge_hw_status status = VXGE_HW_OK; | ||
2463 | |||
2464 | blockpool = &devh->block_pool; | ||
2465 | |||
2466 | if (size != blockpool->block_size) { | ||
2467 | pci_unmap_single(devh->pdev, dma_object->addr, size, | ||
2468 | PCI_DMA_BIDIRECTIONAL); | ||
2469 | vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle); | ||
2470 | } else { | ||
2471 | |||
2472 | if (!list_empty(&blockpool->free_entry_list)) | ||
2473 | entry = (struct __vxge_hw_blockpool_entry *) | ||
2474 | list_first_entry(&blockpool->free_entry_list, | ||
2475 | struct __vxge_hw_blockpool_entry, | ||
2476 | item); | ||
2477 | |||
2478 | if (entry == NULL) | ||
2479 | entry = vmalloc(sizeof( | ||
2480 | struct __vxge_hw_blockpool_entry)); | ||
2481 | else | ||
2482 | list_del(&entry->item); | ||
2483 | |||
2484 | if (entry != NULL) { | ||
2485 | entry->length = size; | ||
2486 | entry->memblock = memblock; | ||
2487 | entry->dma_addr = dma_object->addr; | ||
2488 | entry->acc_handle = dma_object->acc_handle; | ||
2489 | entry->dma_handle = dma_object->handle; | ||
2490 | list_add(&entry->item, | ||
2491 | &blockpool->free_block_list); | ||
2492 | blockpool->pool_size++; | ||
2493 | status = VXGE_HW_OK; | ||
2494 | } else | ||
2495 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
2496 | |||
2497 | if (status == VXGE_HW_OK) | ||
2498 | __vxge_hw_blockpool_blocks_remove(blockpool); | ||
2499 | } | ||
2500 | } | ||
2501 | |||
2502 | /* | ||
2503 | * vxge_hw_mempool_destroy | ||
2504 | */ | ||
2505 | static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool) | ||
2506 | { | ||
2507 | u32 i, j; | ||
2508 | struct __vxge_hw_device *devh = mempool->devh; | ||
2509 | |||
2510 | for (i = 0; i < mempool->memblocks_allocated; i++) { | ||
2511 | struct vxge_hw_mempool_dma *dma_object; | ||
2512 | |||
2513 | vxge_assert(mempool->memblocks_arr[i]); | ||
2514 | vxge_assert(mempool->memblocks_dma_arr + i); | ||
2515 | |||
2516 | dma_object = mempool->memblocks_dma_arr + i; | ||
2517 | |||
2518 | for (j = 0; j < mempool->items_per_memblock; j++) { | ||
2519 | u32 index = i * mempool->items_per_memblock + j; | ||
2520 | |||
2521 | /* to skip last partially filled(if any) memblock */ | ||
2522 | if (index >= mempool->items_current) | ||
2523 | break; | ||
2524 | } | ||
2525 | |||
2526 | vfree(mempool->memblocks_priv_arr[i]); | ||
2527 | |||
2528 | __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i], | ||
2529 | mempool->memblock_size, dma_object); | ||
2530 | } | ||
2531 | |||
2532 | vfree(mempool->items_arr); | ||
2533 | vfree(mempool->memblocks_dma_arr); | ||
2534 | vfree(mempool->memblocks_priv_arr); | ||
2535 | vfree(mempool->memblocks_arr); | ||
2536 | vfree(mempool); | ||
2537 | } | ||
2538 | |||
2539 | /* | ||
2540 | * __vxge_hw_mempool_grow | ||
2541 | * Will resize mempool up to %num_allocate value. | ||
2542 | */ | ||
2543 | static enum vxge_hw_status | ||
2544 | __vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate, | ||
2545 | u32 *num_allocated) | ||
2546 | { | ||
2547 | u32 i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0; | ||
2548 | u32 n_items = mempool->items_per_memblock; | ||
2549 | u32 start_block_idx = mempool->memblocks_allocated; | ||
2550 | u32 end_block_idx = mempool->memblocks_allocated + num_allocate; | ||
2551 | enum vxge_hw_status status = VXGE_HW_OK; | ||
2552 | |||
2553 | *num_allocated = 0; | ||
2554 | |||
2555 | if (end_block_idx > mempool->memblocks_max) { | ||
2556 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
2557 | goto exit; | ||
2558 | } | ||
2559 | |||
2560 | for (i = start_block_idx; i < end_block_idx; i++) { | ||
2561 | u32 j; | ||
2562 | u32 is_last = ((end_block_idx - 1) == i); | ||
2563 | struct vxge_hw_mempool_dma *dma_object = | ||
2564 | mempool->memblocks_dma_arr + i; | ||
2565 | void *the_memblock; | ||
2566 | |||
2567 | /* allocate memblock's private part. Each DMA memblock | ||
2568 | * has a space allocated for item's private usage upon | ||
2569 | * mempool's user request. Each time mempool grows, it will | ||
2570 | * allocate new memblock and its private part at once. | ||
2571 | * This helps to minimize memory usage a lot. */ | ||
2572 | mempool->memblocks_priv_arr[i] = | ||
2573 | vzalloc(mempool->items_priv_size * n_items); | ||
2574 | if (mempool->memblocks_priv_arr[i] == NULL) { | ||
2575 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
2576 | goto exit; | ||
2577 | } | ||
2578 | |||
2579 | /* allocate DMA-capable memblock */ | ||
2580 | mempool->memblocks_arr[i] = | ||
2581 | __vxge_hw_blockpool_malloc(mempool->devh, | ||
2582 | mempool->memblock_size, dma_object); | ||
2583 | if (mempool->memblocks_arr[i] == NULL) { | ||
2584 | vfree(mempool->memblocks_priv_arr[i]); | ||
2585 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
2586 | goto exit; | ||
2587 | } | ||
2588 | |||
2589 | (*num_allocated)++; | ||
2590 | mempool->memblocks_allocated++; | ||
2591 | |||
2592 | memset(mempool->memblocks_arr[i], 0, mempool->memblock_size); | ||
2593 | |||
2594 | the_memblock = mempool->memblocks_arr[i]; | ||
2595 | |||
2596 | /* fill the items hash array */ | ||
2597 | for (j = 0; j < n_items; j++) { | ||
2598 | u32 index = i * n_items + j; | ||
2599 | |||
2600 | if (first_time && index >= mempool->items_initial) | ||
2601 | break; | ||
2602 | |||
2603 | mempool->items_arr[index] = | ||
2604 | ((char *)the_memblock + j*mempool->item_size); | ||
2605 | |||
2606 | /* let caller to do more job on each item */ | ||
2607 | if (mempool->item_func_alloc != NULL) | ||
2608 | mempool->item_func_alloc(mempool, i, | ||
2609 | dma_object, index, is_last); | ||
2610 | |||
2611 | mempool->items_current = index + 1; | ||
2612 | } | ||
2613 | |||
2614 | if (first_time && mempool->items_current == | ||
2615 | mempool->items_initial) | ||
2616 | break; | ||
2617 | } | ||
2618 | exit: | ||
2619 | return status; | ||
2620 | } | ||
2621 | |||
2622 | /* | ||
2623 | * vxge_hw_mempool_create | ||
2624 | * This function will create memory pool object. Pool may grow but will | ||
2625 | * never shrink. Pool consists of number of dynamically allocated blocks | ||
2626 | * with size enough to hold %items_initial number of items. Memory is | ||
2627 | * DMA-able but client must map/unmap before interoperating with the device. | ||
2628 | */ | ||
2629 | static struct vxge_hw_mempool * | ||
2630 | __vxge_hw_mempool_create(struct __vxge_hw_device *devh, | ||
2631 | u32 memblock_size, | ||
2632 | u32 item_size, | ||
2633 | u32 items_priv_size, | ||
2634 | u32 items_initial, | ||
2635 | u32 items_max, | ||
2636 | struct vxge_hw_mempool_cbs *mp_callback, | ||
2637 | void *userdata) | ||
2638 | { | ||
2639 | enum vxge_hw_status status = VXGE_HW_OK; | ||
2640 | u32 memblocks_to_allocate; | ||
2641 | struct vxge_hw_mempool *mempool = NULL; | ||
2642 | u32 allocated; | ||
2643 | |||
2644 | if (memblock_size < item_size) { | ||
2645 | status = VXGE_HW_FAIL; | ||
2646 | goto exit; | ||
2647 | } | ||
2648 | |||
2649 | mempool = vzalloc(sizeof(struct vxge_hw_mempool)); | ||
2650 | if (mempool == NULL) { | ||
2651 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
2652 | goto exit; | ||
2653 | } | ||
2654 | |||
2655 | mempool->devh = devh; | ||
2656 | mempool->memblock_size = memblock_size; | ||
2657 | mempool->items_max = items_max; | ||
2658 | mempool->items_initial = items_initial; | ||
2659 | mempool->item_size = item_size; | ||
2660 | mempool->items_priv_size = items_priv_size; | ||
2661 | mempool->item_func_alloc = mp_callback->item_func_alloc; | ||
2662 | mempool->userdata = userdata; | ||
2663 | |||
2664 | mempool->memblocks_allocated = 0; | ||
2665 | |||
2666 | mempool->items_per_memblock = memblock_size / item_size; | ||
2667 | |||
2668 | mempool->memblocks_max = (items_max + mempool->items_per_memblock - 1) / | ||
2669 | mempool->items_per_memblock; | ||
2670 | |||
2671 | /* allocate array of memblocks */ | ||
2672 | mempool->memblocks_arr = | ||
2673 | vzalloc(sizeof(void *) * mempool->memblocks_max); | ||
2674 | if (mempool->memblocks_arr == NULL) { | ||
2675 | __vxge_hw_mempool_destroy(mempool); | ||
2676 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
2677 | mempool = NULL; | ||
2678 | goto exit; | ||
2679 | } | ||
2680 | |||
2681 | /* allocate array of private parts of items per memblocks */ | ||
2682 | mempool->memblocks_priv_arr = | ||
2683 | vzalloc(sizeof(void *) * mempool->memblocks_max); | ||
2684 | if (mempool->memblocks_priv_arr == NULL) { | ||
2685 | __vxge_hw_mempool_destroy(mempool); | ||
2686 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
2687 | mempool = NULL; | ||
2688 | goto exit; | ||
2689 | } | ||
2690 | |||
2691 | /* allocate array of memblocks DMA objects */ | ||
2692 | mempool->memblocks_dma_arr = | ||
2693 | vzalloc(sizeof(struct vxge_hw_mempool_dma) * | ||
2694 | mempool->memblocks_max); | ||
2695 | if (mempool->memblocks_dma_arr == NULL) { | ||
2696 | __vxge_hw_mempool_destroy(mempool); | ||
2697 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
2698 | mempool = NULL; | ||
2699 | goto exit; | ||
2700 | } | ||
2701 | |||
2702 | /* allocate hash array of items */ | ||
2703 | mempool->items_arr = vzalloc(sizeof(void *) * mempool->items_max); | ||
2704 | if (mempool->items_arr == NULL) { | ||
2705 | __vxge_hw_mempool_destroy(mempool); | ||
2706 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
2707 | mempool = NULL; | ||
2708 | goto exit; | ||
2709 | } | ||
2710 | |||
2711 | /* calculate initial number of memblocks */ | ||
2712 | memblocks_to_allocate = (mempool->items_initial + | ||
2713 | mempool->items_per_memblock - 1) / | ||
2714 | mempool->items_per_memblock; | ||
2715 | |||
2716 | /* pre-allocate the mempool */ | ||
2717 | status = __vxge_hw_mempool_grow(mempool, memblocks_to_allocate, | ||
2718 | &allocated); | ||
2719 | if (status != VXGE_HW_OK) { | ||
2720 | __vxge_hw_mempool_destroy(mempool); | ||
2721 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
2722 | mempool = NULL; | ||
2723 | goto exit; | ||
2724 | } | ||
2725 | |||
2726 | exit: | ||
2727 | return mempool; | ||
2728 | } | ||
2729 | |||
2730 | /* | ||
2731 | * __vxge_hw_ring_abort - Returns the RxD | ||
2732 | * This function terminates the RxDs of ring | ||
2733 | */ | ||
2734 | static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring) | ||
2735 | { | ||
2736 | void *rxdh; | ||
2737 | struct __vxge_hw_channel *channel; | ||
2738 | |||
2739 | channel = &ring->channel; | ||
2740 | |||
2741 | for (;;) { | ||
2742 | vxge_hw_channel_dtr_try_complete(channel, &rxdh); | ||
2743 | |||
2744 | if (rxdh == NULL) | ||
2745 | break; | ||
2746 | |||
2747 | vxge_hw_channel_dtr_complete(channel); | ||
2748 | |||
2749 | if (ring->rxd_term) | ||
2750 | ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED, | ||
2751 | channel->userdata); | ||
2752 | |||
2753 | vxge_hw_channel_dtr_free(channel, rxdh); | ||
2754 | } | ||
2755 | |||
2756 | return VXGE_HW_OK; | ||
2757 | } | ||
2758 | |||
2759 | /* | ||
2760 | * __vxge_hw_ring_reset - Resets the ring | ||
2761 | * This function resets the ring during vpath reset operation | ||
2762 | */ | ||
2763 | static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring) | ||
2764 | { | ||
2765 | enum vxge_hw_status status = VXGE_HW_OK; | ||
2766 | struct __vxge_hw_channel *channel; | ||
2767 | |||
2768 | channel = &ring->channel; | ||
2769 | |||
2770 | __vxge_hw_ring_abort(ring); | ||
2771 | |||
2772 | status = __vxge_hw_channel_reset(channel); | ||
2773 | |||
2774 | if (status != VXGE_HW_OK) | ||
2775 | goto exit; | ||
2776 | |||
2777 | if (ring->rxd_init) { | ||
2778 | status = vxge_hw_ring_replenish(ring); | ||
2779 | if (status != VXGE_HW_OK) | ||
2780 | goto exit; | ||
2781 | } | ||
2782 | exit: | ||
2783 | return status; | ||
2784 | } | ||
2785 | |||
2786 | /* | ||
2787 | * __vxge_hw_ring_delete - Removes the ring | ||
2788 | * This function freeup the memory pool and removes the ring | ||
2789 | */ | ||
2790 | static enum vxge_hw_status | ||
2791 | __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp) | ||
2792 | { | ||
2793 | struct __vxge_hw_ring *ring = vp->vpath->ringh; | ||
2794 | |||
2795 | __vxge_hw_ring_abort(ring); | ||
2796 | |||
2797 | if (ring->mempool) | ||
2798 | __vxge_hw_mempool_destroy(ring->mempool); | ||
2799 | |||
2800 | vp->vpath->ringh = NULL; | ||
2801 | __vxge_hw_channel_free(&ring->channel); | ||
2802 | |||
2803 | return VXGE_HW_OK; | ||
2804 | } | ||
2805 | |||
2806 | /* | ||
2807 | * __vxge_hw_ring_create - Create a Ring | ||
2808 | * This function creates Ring and initializes it. | ||
2809 | */ | ||
2810 | static enum vxge_hw_status | ||
2811 | __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp, | ||
2812 | struct vxge_hw_ring_attr *attr) | ||
2813 | { | ||
2814 | enum vxge_hw_status status = VXGE_HW_OK; | ||
2815 | struct __vxge_hw_ring *ring; | ||
2816 | u32 ring_length; | ||
2817 | struct vxge_hw_ring_config *config; | ||
2818 | struct __vxge_hw_device *hldev; | ||
2819 | u32 vp_id; | ||
2820 | struct vxge_hw_mempool_cbs ring_mp_callback; | ||
2821 | |||
2822 | if ((vp == NULL) || (attr == NULL)) { | ||
2823 | status = VXGE_HW_FAIL; | ||
2824 | goto exit; | ||
2825 | } | ||
2826 | |||
2827 | hldev = vp->vpath->hldev; | ||
2828 | vp_id = vp->vpath->vp_id; | ||
2829 | |||
2830 | config = &hldev->config.vp_config[vp_id].ring; | ||
2831 | |||
2832 | ring_length = config->ring_blocks * | ||
2833 | vxge_hw_ring_rxds_per_block_get(config->buffer_mode); | ||
2834 | |||
2835 | ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp, | ||
2836 | VXGE_HW_CHANNEL_TYPE_RING, | ||
2837 | ring_length, | ||
2838 | attr->per_rxd_space, | ||
2839 | attr->userdata); | ||
2840 | if (ring == NULL) { | ||
2841 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
2842 | goto exit; | ||
2843 | } | ||
2844 | |||
2845 | vp->vpath->ringh = ring; | ||
2846 | ring->vp_id = vp_id; | ||
2847 | ring->vp_reg = vp->vpath->vp_reg; | ||
2848 | ring->common_reg = hldev->common_reg; | ||
2849 | ring->stats = &vp->vpath->sw_stats->ring_stats; | ||
2850 | ring->config = config; | ||
2851 | ring->callback = attr->callback; | ||
2852 | ring->rxd_init = attr->rxd_init; | ||
2853 | ring->rxd_term = attr->rxd_term; | ||
2854 | ring->buffer_mode = config->buffer_mode; | ||
2855 | ring->tim_rti_cfg1_saved = vp->vpath->tim_rti_cfg1_saved; | ||
2856 | ring->tim_rti_cfg3_saved = vp->vpath->tim_rti_cfg3_saved; | ||
2857 | ring->rxds_limit = config->rxds_limit; | ||
2858 | |||
2859 | ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode); | ||
2860 | ring->rxd_priv_size = | ||
2861 | sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space; | ||
2862 | ring->per_rxd_space = attr->per_rxd_space; | ||
2863 | |||
2864 | ring->rxd_priv_size = | ||
2865 | ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) / | ||
2866 | VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE; | ||
2867 | |||
2868 | /* how many RxDs can fit into one block. Depends on configured | ||
2869 | * buffer_mode. */ | ||
2870 | ring->rxds_per_block = | ||
2871 | vxge_hw_ring_rxds_per_block_get(config->buffer_mode); | ||
2872 | |||
2873 | /* calculate actual RxD block private size */ | ||
2874 | ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block; | ||
2875 | ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc; | ||
2876 | ring->mempool = __vxge_hw_mempool_create(hldev, | ||
2877 | VXGE_HW_BLOCK_SIZE, | ||
2878 | VXGE_HW_BLOCK_SIZE, | ||
2879 | ring->rxdblock_priv_size, | ||
2880 | ring->config->ring_blocks, | ||
2881 | ring->config->ring_blocks, | ||
2882 | &ring_mp_callback, | ||
2883 | ring); | ||
2884 | if (ring->mempool == NULL) { | ||
2885 | __vxge_hw_ring_delete(vp); | ||
2886 | return VXGE_HW_ERR_OUT_OF_MEMORY; | ||
2887 | } | ||
2888 | |||
2889 | status = __vxge_hw_channel_initialize(&ring->channel); | ||
2890 | if (status != VXGE_HW_OK) { | ||
2891 | __vxge_hw_ring_delete(vp); | ||
2892 | goto exit; | ||
2893 | } | ||
2894 | |||
2895 | /* Note: | ||
2896 | * Specifying rxd_init callback means two things: | ||
2897 | * 1) rxds need to be initialized by driver at channel-open time; | ||
2898 | * 2) rxds need to be posted at channel-open time | ||
2899 | * (that's what the initial_replenish() below does) | ||
2900 | * Currently we don't have a case when the 1) is done without the 2). | ||
2901 | */ | ||
2902 | if (ring->rxd_init) { | ||
2903 | status = vxge_hw_ring_replenish(ring); | ||
2904 | if (status != VXGE_HW_OK) { | ||
2905 | __vxge_hw_ring_delete(vp); | ||
2906 | goto exit; | ||
2907 | } | ||
2908 | } | ||
2909 | |||
2910 | /* initial replenish will increment the counter in its post() routine, | ||
2911 | * we have to reset it */ | ||
2912 | ring->stats->common_stats.usage_cnt = 0; | ||
2913 | exit: | ||
2914 | return status; | ||
2915 | } | ||
2916 | |||
2917 | /* | ||
2918 | * vxge_hw_device_config_default_get - Initialize device config with defaults. | ||
2919 | * Initialize Titan device config with default values. | ||
2920 | */ | ||
2921 | enum vxge_hw_status __devinit | ||
2922 | vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config) | ||
2923 | { | ||
2924 | u32 i; | ||
2925 | |||
2926 | device_config->dma_blockpool_initial = | ||
2927 | VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE; | ||
2928 | device_config->dma_blockpool_max = VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE; | ||
2929 | device_config->intr_mode = VXGE_HW_INTR_MODE_DEF; | ||
2930 | device_config->rth_en = VXGE_HW_RTH_DEFAULT; | ||
2931 | device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_DEFAULT; | ||
2932 | device_config->device_poll_millis = VXGE_HW_DEF_DEVICE_POLL_MILLIS; | ||
2933 | device_config->rts_mac_en = VXGE_HW_RTS_MAC_DEFAULT; | ||
2934 | |||
2935 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | ||
2936 | device_config->vp_config[i].vp_id = i; | ||
2937 | |||
2938 | device_config->vp_config[i].min_bandwidth = | ||
2939 | VXGE_HW_VPATH_BANDWIDTH_DEFAULT; | ||
2940 | |||
2941 | device_config->vp_config[i].ring.enable = VXGE_HW_RING_DEFAULT; | ||
2942 | |||
2943 | device_config->vp_config[i].ring.ring_blocks = | ||
2944 | VXGE_HW_DEF_RING_BLOCKS; | ||
2945 | |||
2946 | device_config->vp_config[i].ring.buffer_mode = | ||
2947 | VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT; | ||
2948 | |||
2949 | device_config->vp_config[i].ring.scatter_mode = | ||
2950 | VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT; | ||
2951 | |||
2952 | device_config->vp_config[i].ring.rxds_limit = | ||
2953 | VXGE_HW_DEF_RING_RXDS_LIMIT; | ||
2954 | |||
2955 | device_config->vp_config[i].fifo.enable = VXGE_HW_FIFO_ENABLE; | ||
2956 | |||
2957 | device_config->vp_config[i].fifo.fifo_blocks = | ||
2958 | VXGE_HW_MIN_FIFO_BLOCKS; | ||
2959 | |||
2960 | device_config->vp_config[i].fifo.max_frags = | ||
2961 | VXGE_HW_MAX_FIFO_FRAGS; | ||
2962 | |||
2963 | device_config->vp_config[i].fifo.memblock_size = | ||
2964 | VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE; | ||
2965 | |||
2966 | device_config->vp_config[i].fifo.alignment_size = | ||
2967 | VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE; | ||
2968 | |||
2969 | device_config->vp_config[i].fifo.intr = | ||
2970 | VXGE_HW_FIFO_QUEUE_INTR_DEFAULT; | ||
2971 | |||
2972 | device_config->vp_config[i].fifo.no_snoop_bits = | ||
2973 | VXGE_HW_FIFO_NO_SNOOP_DEFAULT; | ||
2974 | device_config->vp_config[i].tti.intr_enable = | ||
2975 | VXGE_HW_TIM_INTR_DEFAULT; | ||
2976 | |||
2977 | device_config->vp_config[i].tti.btimer_val = | ||
2978 | VXGE_HW_USE_FLASH_DEFAULT; | ||
2979 | |||
2980 | device_config->vp_config[i].tti.timer_ac_en = | ||
2981 | VXGE_HW_USE_FLASH_DEFAULT; | ||
2982 | |||
2983 | device_config->vp_config[i].tti.timer_ci_en = | ||
2984 | VXGE_HW_USE_FLASH_DEFAULT; | ||
2985 | |||
2986 | device_config->vp_config[i].tti.timer_ri_en = | ||
2987 | VXGE_HW_USE_FLASH_DEFAULT; | ||
2988 | |||
2989 | device_config->vp_config[i].tti.rtimer_val = | ||
2990 | VXGE_HW_USE_FLASH_DEFAULT; | ||
2991 | |||
2992 | device_config->vp_config[i].tti.util_sel = | ||
2993 | VXGE_HW_USE_FLASH_DEFAULT; | ||
2994 | |||
2995 | device_config->vp_config[i].tti.ltimer_val = | ||
2996 | VXGE_HW_USE_FLASH_DEFAULT; | ||
2997 | |||
2998 | device_config->vp_config[i].tti.urange_a = | ||
2999 | VXGE_HW_USE_FLASH_DEFAULT; | ||
3000 | |||
3001 | device_config->vp_config[i].tti.uec_a = | ||
3002 | VXGE_HW_USE_FLASH_DEFAULT; | ||
3003 | |||
3004 | device_config->vp_config[i].tti.urange_b = | ||
3005 | VXGE_HW_USE_FLASH_DEFAULT; | ||
3006 | |||
3007 | device_config->vp_config[i].tti.uec_b = | ||
3008 | VXGE_HW_USE_FLASH_DEFAULT; | ||
3009 | |||
3010 | device_config->vp_config[i].tti.urange_c = | ||
3011 | VXGE_HW_USE_FLASH_DEFAULT; | ||
3012 | |||
3013 | device_config->vp_config[i].tti.uec_c = | ||
3014 | VXGE_HW_USE_FLASH_DEFAULT; | ||
3015 | |||
3016 | device_config->vp_config[i].tti.uec_d = | ||
3017 | VXGE_HW_USE_FLASH_DEFAULT; | ||
3018 | |||
3019 | device_config->vp_config[i].rti.intr_enable = | ||
3020 | VXGE_HW_TIM_INTR_DEFAULT; | ||
3021 | |||
3022 | device_config->vp_config[i].rti.btimer_val = | ||
3023 | VXGE_HW_USE_FLASH_DEFAULT; | ||
3024 | |||
3025 | device_config->vp_config[i].rti.timer_ac_en = | ||
3026 | VXGE_HW_USE_FLASH_DEFAULT; | ||
3027 | |||
3028 | device_config->vp_config[i].rti.timer_ci_en = | ||
3029 | VXGE_HW_USE_FLASH_DEFAULT; | ||
3030 | |||
3031 | device_config->vp_config[i].rti.timer_ri_en = | ||
3032 | VXGE_HW_USE_FLASH_DEFAULT; | ||
3033 | |||
3034 | device_config->vp_config[i].rti.rtimer_val = | ||
3035 | VXGE_HW_USE_FLASH_DEFAULT; | ||
3036 | |||
3037 | device_config->vp_config[i].rti.util_sel = | ||
3038 | VXGE_HW_USE_FLASH_DEFAULT; | ||
3039 | |||
3040 | device_config->vp_config[i].rti.ltimer_val = | ||
3041 | VXGE_HW_USE_FLASH_DEFAULT; | ||
3042 | |||
3043 | device_config->vp_config[i].rti.urange_a = | ||
3044 | VXGE_HW_USE_FLASH_DEFAULT; | ||
3045 | |||
3046 | device_config->vp_config[i].rti.uec_a = | ||
3047 | VXGE_HW_USE_FLASH_DEFAULT; | ||
3048 | |||
3049 | device_config->vp_config[i].rti.urange_b = | ||
3050 | VXGE_HW_USE_FLASH_DEFAULT; | ||
3051 | |||
3052 | device_config->vp_config[i].rti.uec_b = | ||
3053 | VXGE_HW_USE_FLASH_DEFAULT; | ||
3054 | |||
3055 | device_config->vp_config[i].rti.urange_c = | ||
3056 | VXGE_HW_USE_FLASH_DEFAULT; | ||
3057 | |||
3058 | device_config->vp_config[i].rti.uec_c = | ||
3059 | VXGE_HW_USE_FLASH_DEFAULT; | ||
3060 | |||
3061 | device_config->vp_config[i].rti.uec_d = | ||
3062 | VXGE_HW_USE_FLASH_DEFAULT; | ||
3063 | |||
3064 | device_config->vp_config[i].mtu = | ||
3065 | VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU; | ||
3066 | |||
3067 | device_config->vp_config[i].rpa_strip_vlan_tag = | ||
3068 | VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT; | ||
3069 | } | ||
3070 | |||
3071 | return VXGE_HW_OK; | ||
3072 | } | ||
3073 | |||
3074 | /* | ||
3075 | * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath. | ||
3076 | * Set the swapper bits appropriately for the vpath. | ||
3077 | */ | ||
3078 | static enum vxge_hw_status | ||
3079 | __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg) | ||
3080 | { | ||
3081 | #ifndef __BIG_ENDIAN | ||
3082 | u64 val64; | ||
3083 | |||
3084 | val64 = readq(&vpath_reg->vpath_general_cfg1); | ||
3085 | wmb(); | ||
3086 | val64 |= VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN; | ||
3087 | writeq(val64, &vpath_reg->vpath_general_cfg1); | ||
3088 | wmb(); | ||
3089 | #endif | ||
3090 | return VXGE_HW_OK; | ||
3091 | } | ||
3092 | |||
3093 | /* | ||
3094 | * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc. | ||
3095 | * Set the swapper bits appropriately for the vpath. | ||
3096 | */ | ||
3097 | static enum vxge_hw_status | ||
3098 | __vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg, | ||
3099 | struct vxge_hw_vpath_reg __iomem *vpath_reg) | ||
3100 | { | ||
3101 | u64 val64; | ||
3102 | |||
3103 | val64 = readq(&legacy_reg->pifm_wr_swap_en); | ||
3104 | |||
3105 | if (val64 == VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE) { | ||
3106 | val64 = readq(&vpath_reg->kdfcctl_cfg0); | ||
3107 | wmb(); | ||
3108 | |||
3109 | val64 |= VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0 | | ||
3110 | VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1 | | ||
3111 | VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2; | ||
3112 | |||
3113 | writeq(val64, &vpath_reg->kdfcctl_cfg0); | ||
3114 | wmb(); | ||
3115 | } | ||
3116 | |||
3117 | return VXGE_HW_OK; | ||
3118 | } | ||
3119 | |||
3120 | /* | ||
3121 | * vxge_hw_mgmt_reg_read - Read Titan register. | ||
3122 | */ | ||
3123 | enum vxge_hw_status | ||
3124 | vxge_hw_mgmt_reg_read(struct __vxge_hw_device *hldev, | ||
3125 | enum vxge_hw_mgmt_reg_type type, | ||
3126 | u32 index, u32 offset, u64 *value) | ||
3127 | { | ||
3128 | enum vxge_hw_status status = VXGE_HW_OK; | ||
3129 | |||
3130 | if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) { | ||
3131 | status = VXGE_HW_ERR_INVALID_DEVICE; | ||
3132 | goto exit; | ||
3133 | } | ||
3134 | |||
3135 | switch (type) { | ||
3136 | case vxge_hw_mgmt_reg_type_legacy: | ||
3137 | if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) { | ||
3138 | status = VXGE_HW_ERR_INVALID_OFFSET; | ||
3139 | break; | ||
3140 | } | ||
3141 | *value = readq((void __iomem *)hldev->legacy_reg + offset); | ||
3142 | break; | ||
3143 | case vxge_hw_mgmt_reg_type_toc: | ||
3144 | if (offset > sizeof(struct vxge_hw_toc_reg) - 8) { | ||
3145 | status = VXGE_HW_ERR_INVALID_OFFSET; | ||
3146 | break; | ||
3147 | } | ||
3148 | *value = readq((void __iomem *)hldev->toc_reg + offset); | ||
3149 | break; | ||
3150 | case vxge_hw_mgmt_reg_type_common: | ||
3151 | if (offset > sizeof(struct vxge_hw_common_reg) - 8) { | ||
3152 | status = VXGE_HW_ERR_INVALID_OFFSET; | ||
3153 | break; | ||
3154 | } | ||
3155 | *value = readq((void __iomem *)hldev->common_reg + offset); | ||
3156 | break; | ||
3157 | case vxge_hw_mgmt_reg_type_mrpcim: | ||
3158 | if (!(hldev->access_rights & | ||
3159 | VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) { | ||
3160 | status = VXGE_HW_ERR_PRIVILAGED_OPEARATION; | ||
3161 | break; | ||
3162 | } | ||
3163 | if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) { | ||
3164 | status = VXGE_HW_ERR_INVALID_OFFSET; | ||
3165 | break; | ||
3166 | } | ||
3167 | *value = readq((void __iomem *)hldev->mrpcim_reg + offset); | ||
3168 | break; | ||
3169 | case vxge_hw_mgmt_reg_type_srpcim: | ||
3170 | if (!(hldev->access_rights & | ||
3171 | VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) { | ||
3172 | status = VXGE_HW_ERR_PRIVILAGED_OPEARATION; | ||
3173 | break; | ||
3174 | } | ||
3175 | if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) { | ||
3176 | status = VXGE_HW_ERR_INVALID_INDEX; | ||
3177 | break; | ||
3178 | } | ||
3179 | if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) { | ||
3180 | status = VXGE_HW_ERR_INVALID_OFFSET; | ||
3181 | break; | ||
3182 | } | ||
3183 | *value = readq((void __iomem *)hldev->srpcim_reg[index] + | ||
3184 | offset); | ||
3185 | break; | ||
3186 | case vxge_hw_mgmt_reg_type_vpmgmt: | ||
3187 | if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) || | ||
3188 | (!(hldev->vpath_assignments & vxge_mBIT(index)))) { | ||
3189 | status = VXGE_HW_ERR_INVALID_INDEX; | ||
3190 | break; | ||
3191 | } | ||
3192 | if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) { | ||
3193 | status = VXGE_HW_ERR_INVALID_OFFSET; | ||
3194 | break; | ||
3195 | } | ||
3196 | *value = readq((void __iomem *)hldev->vpmgmt_reg[index] + | ||
3197 | offset); | ||
3198 | break; | ||
3199 | case vxge_hw_mgmt_reg_type_vpath: | ||
3200 | if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) || | ||
3201 | (!(hldev->vpath_assignments & vxge_mBIT(index)))) { | ||
3202 | status = VXGE_HW_ERR_INVALID_INDEX; | ||
3203 | break; | ||
3204 | } | ||
3205 | if (index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) { | ||
3206 | status = VXGE_HW_ERR_INVALID_INDEX; | ||
3207 | break; | ||
3208 | } | ||
3209 | if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) { | ||
3210 | status = VXGE_HW_ERR_INVALID_OFFSET; | ||
3211 | break; | ||
3212 | } | ||
3213 | *value = readq((void __iomem *)hldev->vpath_reg[index] + | ||
3214 | offset); | ||
3215 | break; | ||
3216 | default: | ||
3217 | status = VXGE_HW_ERR_INVALID_TYPE; | ||
3218 | break; | ||
3219 | } | ||
3220 | |||
3221 | exit: | ||
3222 | return status; | ||
3223 | } | ||
3224 | |||
3225 | /* | ||
3226 | * vxge_hw_vpath_strip_fcs_check - Check for FCS strip. | ||
3227 | */ | ||
3228 | enum vxge_hw_status | ||
3229 | vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask) | ||
3230 | { | ||
3231 | struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg; | ||
3232 | enum vxge_hw_status status = VXGE_HW_OK; | ||
3233 | int i = 0, j = 0; | ||
3234 | |||
3235 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | ||
3236 | if (!((vpath_mask) & vxge_mBIT(i))) | ||
3237 | continue; | ||
3238 | vpmgmt_reg = hldev->vpmgmt_reg[i]; | ||
3239 | for (j = 0; j < VXGE_HW_MAC_MAX_MAC_PORT_ID; j++) { | ||
3240 | if (readq(&vpmgmt_reg->rxmac_cfg0_port_vpmgmt_clone[j]) | ||
3241 | & VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS) | ||
3242 | return VXGE_HW_FAIL; | ||
3243 | } | ||
3244 | } | ||
3245 | return status; | ||
3246 | } | ||
3247 | /* | ||
3248 | * vxge_hw_mgmt_reg_Write - Write Titan register. | ||
3249 | */ | ||
3250 | enum vxge_hw_status | ||
3251 | vxge_hw_mgmt_reg_write(struct __vxge_hw_device *hldev, | ||
3252 | enum vxge_hw_mgmt_reg_type type, | ||
3253 | u32 index, u32 offset, u64 value) | ||
3254 | { | ||
3255 | enum vxge_hw_status status = VXGE_HW_OK; | ||
3256 | |||
3257 | if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) { | ||
3258 | status = VXGE_HW_ERR_INVALID_DEVICE; | ||
3259 | goto exit; | ||
3260 | } | ||
3261 | |||
3262 | switch (type) { | ||
3263 | case vxge_hw_mgmt_reg_type_legacy: | ||
3264 | if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) { | ||
3265 | status = VXGE_HW_ERR_INVALID_OFFSET; | ||
3266 | break; | ||
3267 | } | ||
3268 | writeq(value, (void __iomem *)hldev->legacy_reg + offset); | ||
3269 | break; | ||
3270 | case vxge_hw_mgmt_reg_type_toc: | ||
3271 | if (offset > sizeof(struct vxge_hw_toc_reg) - 8) { | ||
3272 | status = VXGE_HW_ERR_INVALID_OFFSET; | ||
3273 | break; | ||
3274 | } | ||
3275 | writeq(value, (void __iomem *)hldev->toc_reg + offset); | ||
3276 | break; | ||
3277 | case vxge_hw_mgmt_reg_type_common: | ||
3278 | if (offset > sizeof(struct vxge_hw_common_reg) - 8) { | ||
3279 | status = VXGE_HW_ERR_INVALID_OFFSET; | ||
3280 | break; | ||
3281 | } | ||
3282 | writeq(value, (void __iomem *)hldev->common_reg + offset); | ||
3283 | break; | ||
3284 | case vxge_hw_mgmt_reg_type_mrpcim: | ||
3285 | if (!(hldev->access_rights & | ||
3286 | VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) { | ||
3287 | status = VXGE_HW_ERR_PRIVILAGED_OPEARATION; | ||
3288 | break; | ||
3289 | } | ||
3290 | if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) { | ||
3291 | status = VXGE_HW_ERR_INVALID_OFFSET; | ||
3292 | break; | ||
3293 | } | ||
3294 | writeq(value, (void __iomem *)hldev->mrpcim_reg + offset); | ||
3295 | break; | ||
3296 | case vxge_hw_mgmt_reg_type_srpcim: | ||
3297 | if (!(hldev->access_rights & | ||
3298 | VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) { | ||
3299 | status = VXGE_HW_ERR_PRIVILAGED_OPEARATION; | ||
3300 | break; | ||
3301 | } | ||
3302 | if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) { | ||
3303 | status = VXGE_HW_ERR_INVALID_INDEX; | ||
3304 | break; | ||
3305 | } | ||
3306 | if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) { | ||
3307 | status = VXGE_HW_ERR_INVALID_OFFSET; | ||
3308 | break; | ||
3309 | } | ||
3310 | writeq(value, (void __iomem *)hldev->srpcim_reg[index] + | ||
3311 | offset); | ||
3312 | |||
3313 | break; | ||
3314 | case vxge_hw_mgmt_reg_type_vpmgmt: | ||
3315 | if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) || | ||
3316 | (!(hldev->vpath_assignments & vxge_mBIT(index)))) { | ||
3317 | status = VXGE_HW_ERR_INVALID_INDEX; | ||
3318 | break; | ||
3319 | } | ||
3320 | if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) { | ||
3321 | status = VXGE_HW_ERR_INVALID_OFFSET; | ||
3322 | break; | ||
3323 | } | ||
3324 | writeq(value, (void __iomem *)hldev->vpmgmt_reg[index] + | ||
3325 | offset); | ||
3326 | break; | ||
3327 | case vxge_hw_mgmt_reg_type_vpath: | ||
3328 | if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES-1) || | ||
3329 | (!(hldev->vpath_assignments & vxge_mBIT(index)))) { | ||
3330 | status = VXGE_HW_ERR_INVALID_INDEX; | ||
3331 | break; | ||
3332 | } | ||
3333 | if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) { | ||
3334 | status = VXGE_HW_ERR_INVALID_OFFSET; | ||
3335 | break; | ||
3336 | } | ||
3337 | writeq(value, (void __iomem *)hldev->vpath_reg[index] + | ||
3338 | offset); | ||
3339 | break; | ||
3340 | default: | ||
3341 | status = VXGE_HW_ERR_INVALID_TYPE; | ||
3342 | break; | ||
3343 | } | ||
3344 | exit: | ||
3345 | return status; | ||
3346 | } | ||
3347 | |||
3348 | /* | ||
3349 | * __vxge_hw_fifo_abort - Returns the TxD | ||
3350 | * This function terminates the TxDs of fifo | ||
3351 | */ | ||
3352 | static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo) | ||
3353 | { | ||
3354 | void *txdlh; | ||
3355 | |||
3356 | for (;;) { | ||
3357 | vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh); | ||
3358 | |||
3359 | if (txdlh == NULL) | ||
3360 | break; | ||
3361 | |||
3362 | vxge_hw_channel_dtr_complete(&fifo->channel); | ||
3363 | |||
3364 | if (fifo->txdl_term) { | ||
3365 | fifo->txdl_term(txdlh, | ||
3366 | VXGE_HW_TXDL_STATE_POSTED, | ||
3367 | fifo->channel.userdata); | ||
3368 | } | ||
3369 | |||
3370 | vxge_hw_channel_dtr_free(&fifo->channel, txdlh); | ||
3371 | } | ||
3372 | |||
3373 | return VXGE_HW_OK; | ||
3374 | } | ||
3375 | |||
3376 | /* | ||
3377 | * __vxge_hw_fifo_reset - Resets the fifo | ||
3378 | * This function resets the fifo during vpath reset operation | ||
3379 | */ | ||
3380 | static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo) | ||
3381 | { | ||
3382 | enum vxge_hw_status status = VXGE_HW_OK; | ||
3383 | |||
3384 | __vxge_hw_fifo_abort(fifo); | ||
3385 | status = __vxge_hw_channel_reset(&fifo->channel); | ||
3386 | |||
3387 | return status; | ||
3388 | } | ||
3389 | |||
3390 | /* | ||
3391 | * __vxge_hw_fifo_delete - Removes the FIFO | ||
3392 | * This function freeup the memory pool and removes the FIFO | ||
3393 | */ | ||
3394 | static enum vxge_hw_status | ||
3395 | __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp) | ||
3396 | { | ||
3397 | struct __vxge_hw_fifo *fifo = vp->vpath->fifoh; | ||
3398 | |||
3399 | __vxge_hw_fifo_abort(fifo); | ||
3400 | |||
3401 | if (fifo->mempool) | ||
3402 | __vxge_hw_mempool_destroy(fifo->mempool); | ||
3403 | |||
3404 | vp->vpath->fifoh = NULL; | ||
3405 | |||
3406 | __vxge_hw_channel_free(&fifo->channel); | ||
3407 | |||
3408 | return VXGE_HW_OK; | ||
3409 | } | ||
3410 | |||
3411 | /* | ||
3412 | * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD | ||
3413 | * list callback | ||
3414 | * This function is callback passed to __vxge_hw_mempool_create to create memory | ||
3415 | * pool for TxD list | ||
3416 | */ | ||
3417 | static void | ||
3418 | __vxge_hw_fifo_mempool_item_alloc( | ||
3419 | struct vxge_hw_mempool *mempoolh, | ||
3420 | u32 memblock_index, struct vxge_hw_mempool_dma *dma_object, | ||
3421 | u32 index, u32 is_last) | ||
3422 | { | ||
3423 | u32 memblock_item_idx; | ||
3424 | struct __vxge_hw_fifo_txdl_priv *txdl_priv; | ||
3425 | struct vxge_hw_fifo_txd *txdp = | ||
3426 | (struct vxge_hw_fifo_txd *)mempoolh->items_arr[index]; | ||
3427 | struct __vxge_hw_fifo *fifo = | ||
3428 | (struct __vxge_hw_fifo *)mempoolh->userdata; | ||
3429 | void *memblock = mempoolh->memblocks_arr[memblock_index]; | ||
3430 | |||
3431 | vxge_assert(txdp); | ||
3432 | |||
3433 | txdp->host_control = (u64) (size_t) | ||
3434 | __vxge_hw_mempool_item_priv(mempoolh, memblock_index, txdp, | ||
3435 | &memblock_item_idx); | ||
3436 | |||
3437 | txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdp); | ||
3438 | |||
3439 | vxge_assert(txdl_priv); | ||
3440 | |||
3441 | fifo->channel.reserve_arr[fifo->channel.reserve_ptr - 1 - index] = txdp; | ||
3442 | |||
3443 | /* pre-format HW's TxDL's private */ | ||
3444 | txdl_priv->dma_offset = (char *)txdp - (char *)memblock; | ||
3445 | txdl_priv->dma_addr = dma_object->addr + txdl_priv->dma_offset; | ||
3446 | txdl_priv->dma_handle = dma_object->handle; | ||
3447 | txdl_priv->memblock = memblock; | ||
3448 | txdl_priv->first_txdp = txdp; | ||
3449 | txdl_priv->next_txdl_priv = NULL; | ||
3450 | txdl_priv->alloc_frags = 0; | ||
3451 | } | ||
3452 | |||
3453 | /* | ||
3454 | * __vxge_hw_fifo_create - Create a FIFO | ||
3455 | * This function creates FIFO and initializes it. | ||
3456 | */ | ||
3457 | static enum vxge_hw_status | ||
3458 | __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp, | ||
3459 | struct vxge_hw_fifo_attr *attr) | ||
3460 | { | ||
3461 | enum vxge_hw_status status = VXGE_HW_OK; | ||
3462 | struct __vxge_hw_fifo *fifo; | ||
3463 | struct vxge_hw_fifo_config *config; | ||
3464 | u32 txdl_size, txdl_per_memblock; | ||
3465 | struct vxge_hw_mempool_cbs fifo_mp_callback; | ||
3466 | struct __vxge_hw_virtualpath *vpath; | ||
3467 | |||
3468 | if ((vp == NULL) || (attr == NULL)) { | ||
3469 | status = VXGE_HW_ERR_INVALID_HANDLE; | ||
3470 | goto exit; | ||
3471 | } | ||
3472 | vpath = vp->vpath; | ||
3473 | config = &vpath->hldev->config.vp_config[vpath->vp_id].fifo; | ||
3474 | |||
3475 | txdl_size = config->max_frags * sizeof(struct vxge_hw_fifo_txd); | ||
3476 | |||
3477 | txdl_per_memblock = config->memblock_size / txdl_size; | ||
3478 | |||
3479 | fifo = (struct __vxge_hw_fifo *)__vxge_hw_channel_allocate(vp, | ||
3480 | VXGE_HW_CHANNEL_TYPE_FIFO, | ||
3481 | config->fifo_blocks * txdl_per_memblock, | ||
3482 | attr->per_txdl_space, attr->userdata); | ||
3483 | |||
3484 | if (fifo == NULL) { | ||
3485 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
3486 | goto exit; | ||
3487 | } | ||
3488 | |||
3489 | vpath->fifoh = fifo; | ||
3490 | fifo->nofl_db = vpath->nofl_db; | ||
3491 | |||
3492 | fifo->vp_id = vpath->vp_id; | ||
3493 | fifo->vp_reg = vpath->vp_reg; | ||
3494 | fifo->stats = &vpath->sw_stats->fifo_stats; | ||
3495 | |||
3496 | fifo->config = config; | ||
3497 | |||
3498 | /* apply "interrupts per txdl" attribute */ | ||
3499 | fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ; | ||
3500 | fifo->tim_tti_cfg1_saved = vpath->tim_tti_cfg1_saved; | ||
3501 | fifo->tim_tti_cfg3_saved = vpath->tim_tti_cfg3_saved; | ||
3502 | |||
3503 | if (fifo->config->intr) | ||
3504 | fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST; | ||
3505 | |||
3506 | fifo->no_snoop_bits = config->no_snoop_bits; | ||
3507 | |||
3508 | /* | ||
3509 | * FIFO memory management strategy: | ||
3510 | * | ||
3511 | * TxDL split into three independent parts: | ||
3512 | * - set of TxD's | ||
3513 | * - TxD HW private part | ||
3514 | * - driver private part | ||
3515 | * | ||
3516 | * Adaptative memory allocation used. i.e. Memory allocated on | ||
3517 | * demand with the size which will fit into one memory block. | ||
3518 | * One memory block may contain more than one TxDL. | ||
3519 | * | ||
3520 | * During "reserve" operations more memory can be allocated on demand | ||
3521 | * for example due to FIFO full condition. | ||
3522 | * | ||
3523 | * Pool of memory memblocks never shrinks except in __vxge_hw_fifo_close | ||
3524 | * routine which will essentially stop the channel and free resources. | ||
3525 | */ | ||
3526 | |||
3527 | /* TxDL common private size == TxDL private + driver private */ | ||
3528 | fifo->priv_size = | ||
3529 | sizeof(struct __vxge_hw_fifo_txdl_priv) + attr->per_txdl_space; | ||
3530 | fifo->priv_size = ((fifo->priv_size + VXGE_CACHE_LINE_SIZE - 1) / | ||
3531 | VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE; | ||
3532 | |||
3533 | fifo->per_txdl_space = attr->per_txdl_space; | ||
3534 | |||
3535 | /* recompute txdl size to be cacheline aligned */ | ||
3536 | fifo->txdl_size = txdl_size; | ||
3537 | fifo->txdl_per_memblock = txdl_per_memblock; | ||
3538 | |||
3539 | fifo->txdl_term = attr->txdl_term; | ||
3540 | fifo->callback = attr->callback; | ||
3541 | |||
3542 | if (fifo->txdl_per_memblock == 0) { | ||
3543 | __vxge_hw_fifo_delete(vp); | ||
3544 | status = VXGE_HW_ERR_INVALID_BLOCK_SIZE; | ||
3545 | goto exit; | ||
3546 | } | ||
3547 | |||
3548 | fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc; | ||
3549 | |||
3550 | fifo->mempool = | ||
3551 | __vxge_hw_mempool_create(vpath->hldev, | ||
3552 | fifo->config->memblock_size, | ||
3553 | fifo->txdl_size, | ||
3554 | fifo->priv_size, | ||
3555 | (fifo->config->fifo_blocks * fifo->txdl_per_memblock), | ||
3556 | (fifo->config->fifo_blocks * fifo->txdl_per_memblock), | ||
3557 | &fifo_mp_callback, | ||
3558 | fifo); | ||
3559 | |||
3560 | if (fifo->mempool == NULL) { | ||
3561 | __vxge_hw_fifo_delete(vp); | ||
3562 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
3563 | goto exit; | ||
3564 | } | ||
3565 | |||
3566 | status = __vxge_hw_channel_initialize(&fifo->channel); | ||
3567 | if (status != VXGE_HW_OK) { | ||
3568 | __vxge_hw_fifo_delete(vp); | ||
3569 | goto exit; | ||
3570 | } | ||
3571 | |||
3572 | vxge_assert(fifo->channel.reserve_ptr); | ||
3573 | exit: | ||
3574 | return status; | ||
3575 | } | ||
3576 | |||
3577 | /* | ||
3578 | * __vxge_hw_vpath_pci_read - Read the content of given address | ||
3579 | * in pci config space. | ||
3580 | * Read from the vpath pci config space. | ||
3581 | */ | ||
3582 | static enum vxge_hw_status | ||
3583 | __vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath, | ||
3584 | u32 phy_func_0, u32 offset, u32 *val) | ||
3585 | { | ||
3586 | u64 val64; | ||
3587 | enum vxge_hw_status status = VXGE_HW_OK; | ||
3588 | struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg; | ||
3589 | |||
3590 | val64 = VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset); | ||
3591 | |||
3592 | if (phy_func_0) | ||
3593 | val64 |= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0; | ||
3594 | |||
3595 | writeq(val64, &vp_reg->pci_config_access_cfg1); | ||
3596 | wmb(); | ||
3597 | writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ, | ||
3598 | &vp_reg->pci_config_access_cfg2); | ||
3599 | wmb(); | ||
3600 | |||
3601 | status = __vxge_hw_device_register_poll( | ||
3602 | &vp_reg->pci_config_access_cfg2, | ||
3603 | VXGE_HW_INTR_MASK_ALL, VXGE_HW_DEF_DEVICE_POLL_MILLIS); | ||
3604 | |||
3605 | if (status != VXGE_HW_OK) | ||
3606 | goto exit; | ||
3607 | |||
3608 | val64 = readq(&vp_reg->pci_config_access_status); | ||
3609 | |||
3610 | if (val64 & VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR) { | ||
3611 | status = VXGE_HW_FAIL; | ||
3612 | *val = 0; | ||
3613 | } else | ||
3614 | *val = (u32)vxge_bVALn(val64, 32, 32); | ||
3615 | exit: | ||
3616 | return status; | ||
3617 | } | ||
3618 | |||
3619 | /** | ||
3620 | * vxge_hw_device_flick_link_led - Flick (blink) link LED. | ||
3621 | * @hldev: HW device. | ||
3622 | * @on_off: TRUE if flickering to be on, FALSE to be off | ||
3623 | * | ||
3624 | * Flicker the link LED. | ||
3625 | */ | ||
3626 | enum vxge_hw_status | ||
3627 | vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev, u64 on_off) | ||
3628 | { | ||
3629 | struct __vxge_hw_virtualpath *vpath; | ||
3630 | u64 data0, data1 = 0, steer_ctrl = 0; | ||
3631 | enum vxge_hw_status status; | ||
3632 | |||
3633 | if (hldev == NULL) { | ||
3634 | status = VXGE_HW_ERR_INVALID_DEVICE; | ||
3635 | goto exit; | ||
3636 | } | ||
3637 | |||
3638 | vpath = &hldev->virtual_paths[hldev->first_vp_id]; | ||
3639 | |||
3640 | data0 = on_off; | ||
3641 | status = vxge_hw_vpath_fw_api(vpath, | ||
3642 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL, | ||
3643 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, | ||
3644 | 0, &data0, &data1, &steer_ctrl); | ||
3645 | exit: | ||
3646 | return status; | ||
3647 | } | ||
3648 | |||
3649 | /* | ||
3650 | * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables | ||
3651 | */ | ||
3652 | enum vxge_hw_status | ||
3653 | __vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle *vp, | ||
3654 | u32 action, u32 rts_table, u32 offset, | ||
3655 | u64 *data0, u64 *data1) | ||
3656 | { | ||
3657 | enum vxge_hw_status status; | ||
3658 | u64 steer_ctrl = 0; | ||
3659 | |||
3660 | if (vp == NULL) { | ||
3661 | status = VXGE_HW_ERR_INVALID_HANDLE; | ||
3662 | goto exit; | ||
3663 | } | ||
3664 | |||
3665 | if ((rts_table == | ||
3666 | VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) || | ||
3667 | (rts_table == | ||
3668 | VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) || | ||
3669 | (rts_table == | ||
3670 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) || | ||
3671 | (rts_table == | ||
3672 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) { | ||
3673 | steer_ctrl = VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL; | ||
3674 | } | ||
3675 | |||
3676 | status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset, | ||
3677 | data0, data1, &steer_ctrl); | ||
3678 | if (status != VXGE_HW_OK) | ||
3679 | goto exit; | ||
3680 | |||
3681 | if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) && | ||
3682 | (rts_table != | ||
3683 | VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) | ||
3684 | *data1 = 0; | ||
3685 | exit: | ||
3686 | return status; | ||
3687 | } | ||
3688 | |||
3689 | /* | ||
3690 | * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables | ||
3691 | */ | ||
3692 | enum vxge_hw_status | ||
3693 | __vxge_hw_vpath_rts_table_set(struct __vxge_hw_vpath_handle *vp, u32 action, | ||
3694 | u32 rts_table, u32 offset, u64 steer_data0, | ||
3695 | u64 steer_data1) | ||
3696 | { | ||
3697 | u64 data0, data1 = 0, steer_ctrl = 0; | ||
3698 | enum vxge_hw_status status; | ||
3699 | |||
3700 | if (vp == NULL) { | ||
3701 | status = VXGE_HW_ERR_INVALID_HANDLE; | ||
3702 | goto exit; | ||
3703 | } | ||
3704 | |||
3705 | data0 = steer_data0; | ||
3706 | |||
3707 | if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) || | ||
3708 | (rts_table == | ||
3709 | VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) | ||
3710 | data1 = steer_data1; | ||
3711 | |||
3712 | status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset, | ||
3713 | &data0, &data1, &steer_ctrl); | ||
3714 | exit: | ||
3715 | return status; | ||
3716 | } | ||
3717 | |||
3718 | /* | ||
3719 | * vxge_hw_vpath_rts_rth_set - Set/configure RTS hashing. | ||
3720 | */ | ||
3721 | enum vxge_hw_status vxge_hw_vpath_rts_rth_set( | ||
3722 | struct __vxge_hw_vpath_handle *vp, | ||
3723 | enum vxge_hw_rth_algoritms algorithm, | ||
3724 | struct vxge_hw_rth_hash_types *hash_type, | ||
3725 | u16 bucket_size) | ||
3726 | { | ||
3727 | u64 data0, data1; | ||
3728 | enum vxge_hw_status status = VXGE_HW_OK; | ||
3729 | |||
3730 | if (vp == NULL) { | ||
3731 | status = VXGE_HW_ERR_INVALID_HANDLE; | ||
3732 | goto exit; | ||
3733 | } | ||
3734 | |||
3735 | status = __vxge_hw_vpath_rts_table_get(vp, | ||
3736 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY, | ||
3737 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG, | ||
3738 | 0, &data0, &data1); | ||
3739 | if (status != VXGE_HW_OK) | ||
3740 | goto exit; | ||
3741 | |||
3742 | data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) | | ||
3743 | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3)); | ||
3744 | |||
3745 | data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN | | ||
3746 | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(bucket_size) | | ||
3747 | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(algorithm); | ||
3748 | |||
3749 | if (hash_type->hash_type_tcpipv4_en) | ||
3750 | data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN; | ||
3751 | |||
3752 | if (hash_type->hash_type_ipv4_en) | ||
3753 | data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN; | ||
3754 | |||
3755 | if (hash_type->hash_type_tcpipv6_en) | ||
3756 | data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN; | ||
3757 | |||
3758 | if (hash_type->hash_type_ipv6_en) | ||
3759 | data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN; | ||
3760 | |||
3761 | if (hash_type->hash_type_tcpipv6ex_en) | ||
3762 | data0 |= | ||
3763 | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN; | ||
3764 | |||
3765 | if (hash_type->hash_type_ipv6ex_en) | ||
3766 | data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN; | ||
3767 | |||
3768 | if (VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(data0)) | ||
3769 | data0 &= ~VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE; | ||
3770 | else | ||
3771 | data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE; | ||
3772 | |||
3773 | status = __vxge_hw_vpath_rts_table_set(vp, | ||
3774 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY, | ||
3775 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG, | ||
3776 | 0, data0, 0); | ||
3777 | exit: | ||
3778 | return status; | ||
3779 | } | ||
3780 | |||
3781 | static void | ||
3782 | vxge_hw_rts_rth_data0_data1_get(u32 j, u64 *data0, u64 *data1, | ||
3783 | u16 flag, u8 *itable) | ||
3784 | { | ||
3785 | switch (flag) { | ||
3786 | case 1: | ||
3787 | *data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(j)| | ||
3788 | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN | | ||
3789 | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA( | ||
3790 | itable[j]); | ||
3791 | case 2: | ||
3792 | *data0 |= | ||
3793 | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j)| | ||
3794 | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN | | ||
3795 | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA( | ||
3796 | itable[j]); | ||
3797 | case 3: | ||
3798 | *data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j)| | ||
3799 | VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN | | ||
3800 | VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA( | ||
3801 | itable[j]); | ||
3802 | case 4: | ||
3803 | *data1 |= | ||
3804 | VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j)| | ||
3805 | VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN | | ||
3806 | VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA( | ||
3807 | itable[j]); | ||
3808 | default: | ||
3809 | return; | ||
3810 | } | ||
3811 | } | ||
3812 | /* | ||
3813 | * vxge_hw_vpath_rts_rth_itable_set - Set/configure indirection table (IT). | ||
3814 | */ | ||
3815 | enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set( | ||
3816 | struct __vxge_hw_vpath_handle **vpath_handles, | ||
3817 | u32 vpath_count, | ||
3818 | u8 *mtable, | ||
3819 | u8 *itable, | ||
3820 | u32 itable_size) | ||
3821 | { | ||
3822 | u32 i, j, action, rts_table; | ||
3823 | u64 data0; | ||
3824 | u64 data1; | ||
3825 | u32 max_entries; | ||
3826 | enum vxge_hw_status status = VXGE_HW_OK; | ||
3827 | struct __vxge_hw_vpath_handle *vp = vpath_handles[0]; | ||
3828 | |||
3829 | if (vp == NULL) { | ||
3830 | status = VXGE_HW_ERR_INVALID_HANDLE; | ||
3831 | goto exit; | ||
3832 | } | ||
3833 | |||
3834 | max_entries = (((u32)1) << itable_size); | ||
3835 | |||
3836 | if (vp->vpath->hldev->config.rth_it_type | ||
3837 | == VXGE_HW_RTH_IT_TYPE_SOLO_IT) { | ||
3838 | action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY; | ||
3839 | rts_table = | ||
3840 | VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT; | ||
3841 | |||
3842 | for (j = 0; j < max_entries; j++) { | ||
3843 | |||
3844 | data1 = 0; | ||
3845 | |||
3846 | data0 = | ||
3847 | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA( | ||
3848 | itable[j]); | ||
3849 | |||
3850 | status = __vxge_hw_vpath_rts_table_set(vpath_handles[0], | ||
3851 | action, rts_table, j, data0, data1); | ||
3852 | |||
3853 | if (status != VXGE_HW_OK) | ||
3854 | goto exit; | ||
3855 | } | ||
3856 | |||
3857 | for (j = 0; j < max_entries; j++) { | ||
3858 | |||
3859 | data1 = 0; | ||
3860 | |||
3861 | data0 = | ||
3862 | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN | | ||
3863 | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA( | ||
3864 | itable[j]); | ||
3865 | |||
3866 | status = __vxge_hw_vpath_rts_table_set( | ||
3867 | vpath_handles[mtable[itable[j]]], action, | ||
3868 | rts_table, j, data0, data1); | ||
3869 | |||
3870 | if (status != VXGE_HW_OK) | ||
3871 | goto exit; | ||
3872 | } | ||
3873 | } else { | ||
3874 | action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY; | ||
3875 | rts_table = | ||
3876 | VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT; | ||
3877 | for (i = 0; i < vpath_count; i++) { | ||
3878 | |||
3879 | for (j = 0; j < max_entries;) { | ||
3880 | |||
3881 | data0 = 0; | ||
3882 | data1 = 0; | ||
3883 | |||
3884 | while (j < max_entries) { | ||
3885 | if (mtable[itable[j]] != i) { | ||
3886 | j++; | ||
3887 | continue; | ||
3888 | } | ||
3889 | vxge_hw_rts_rth_data0_data1_get(j, | ||
3890 | &data0, &data1, 1, itable); | ||
3891 | j++; | ||
3892 | break; | ||
3893 | } | ||
3894 | |||
3895 | while (j < max_entries) { | ||
3896 | if (mtable[itable[j]] != i) { | ||
3897 | j++; | ||
3898 | continue; | ||
3899 | } | ||
3900 | vxge_hw_rts_rth_data0_data1_get(j, | ||
3901 | &data0, &data1, 2, itable); | ||
3902 | j++; | ||
3903 | break; | ||
3904 | } | ||
3905 | |||
3906 | while (j < max_entries) { | ||
3907 | if (mtable[itable[j]] != i) { | ||
3908 | j++; | ||
3909 | continue; | ||
3910 | } | ||
3911 | vxge_hw_rts_rth_data0_data1_get(j, | ||
3912 | &data0, &data1, 3, itable); | ||
3913 | j++; | ||
3914 | break; | ||
3915 | } | ||
3916 | |||
3917 | while (j < max_entries) { | ||
3918 | if (mtable[itable[j]] != i) { | ||
3919 | j++; | ||
3920 | continue; | ||
3921 | } | ||
3922 | vxge_hw_rts_rth_data0_data1_get(j, | ||
3923 | &data0, &data1, 4, itable); | ||
3924 | j++; | ||
3925 | break; | ||
3926 | } | ||
3927 | |||
3928 | if (data0 != 0) { | ||
3929 | status = __vxge_hw_vpath_rts_table_set( | ||
3930 | vpath_handles[i], | ||
3931 | action, rts_table, | ||
3932 | 0, data0, data1); | ||
3933 | |||
3934 | if (status != VXGE_HW_OK) | ||
3935 | goto exit; | ||
3936 | } | ||
3937 | } | ||
3938 | } | ||
3939 | } | ||
3940 | exit: | ||
3941 | return status; | ||
3942 | } | ||
3943 | |||
3944 | /** | ||
3945 | * vxge_hw_vpath_check_leak - Check for memory leak | ||
3946 | * @ringh: Handle to the ring object used for receive | ||
3947 | * | ||
3948 | * If PRC_RXD_DOORBELL_VPn.NEW_QW_CNT is larger or equal to | ||
3949 | * PRC_CFG6_VPn.RXD_SPAT then a leak has occurred. | ||
3950 | * Returns: VXGE_HW_FAIL, if leak has occurred. | ||
3951 | * | ||
3952 | */ | ||
3953 | enum vxge_hw_status | ||
3954 | vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ring) | ||
3955 | { | ||
3956 | enum vxge_hw_status status = VXGE_HW_OK; | ||
3957 | u64 rxd_new_count, rxd_spat; | ||
3958 | |||
3959 | if (ring == NULL) | ||
3960 | return status; | ||
3961 | |||
3962 | rxd_new_count = readl(&ring->vp_reg->prc_rxd_doorbell); | ||
3963 | rxd_spat = readq(&ring->vp_reg->prc_cfg6); | ||
3964 | rxd_spat = VXGE_HW_PRC_CFG6_RXD_SPAT(rxd_spat); | ||
3965 | |||
3966 | if (rxd_new_count >= rxd_spat) | ||
3967 | status = VXGE_HW_FAIL; | ||
3968 | |||
3969 | return status; | ||
3970 | } | ||
3971 | |||
3972 | /* | ||
3973 | * __vxge_hw_vpath_mgmt_read | ||
3974 | * This routine reads the vpath_mgmt registers | ||
3975 | */ | ||
3976 | static enum vxge_hw_status | ||
3977 | __vxge_hw_vpath_mgmt_read( | ||
3978 | struct __vxge_hw_device *hldev, | ||
3979 | struct __vxge_hw_virtualpath *vpath) | ||
3980 | { | ||
3981 | u32 i, mtu = 0, max_pyld = 0; | ||
3982 | u64 val64; | ||
3983 | enum vxge_hw_status status = VXGE_HW_OK; | ||
3984 | |||
3985 | for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) { | ||
3986 | |||
3987 | val64 = readq(&vpath->vpmgmt_reg-> | ||
3988 | rxmac_cfg0_port_vpmgmt_clone[i]); | ||
3989 | max_pyld = | ||
3990 | (u32) | ||
3991 | VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN | ||
3992 | (val64); | ||
3993 | if (mtu < max_pyld) | ||
3994 | mtu = max_pyld; | ||
3995 | } | ||
3996 | |||
3997 | vpath->max_mtu = mtu + VXGE_HW_MAC_HEADER_MAX_SIZE; | ||
3998 | |||
3999 | val64 = readq(&vpath->vpmgmt_reg->xmac_vsport_choices_vp); | ||
4000 | |||
4001 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | ||
4002 | if (val64 & vxge_mBIT(i)) | ||
4003 | vpath->vsport_number = i; | ||
4004 | } | ||
4005 | |||
4006 | val64 = readq(&vpath->vpmgmt_reg->xgmac_gen_status_vpmgmt_clone); | ||
4007 | |||
4008 | if (val64 & VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK) | ||
4009 | VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_UP); | ||
4010 | else | ||
4011 | VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN); | ||
4012 | |||
4013 | return status; | ||
4014 | } | ||
4015 | |||
4016 | /* | ||
4017 | * __vxge_hw_vpath_reset_check - Check if resetting the vpath completed | ||
4018 | * This routine checks the vpath_rst_in_prog register to see if | ||
4019 | * adapter completed the reset process for the vpath | ||
4020 | */ | ||
4021 | static enum vxge_hw_status | ||
4022 | __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath) | ||
4023 | { | ||
4024 | enum vxge_hw_status status; | ||
4025 | |||
4026 | status = __vxge_hw_device_register_poll( | ||
4027 | &vpath->hldev->common_reg->vpath_rst_in_prog, | ||
4028 | VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG( | ||
4029 | 1 << (16 - vpath->vp_id)), | ||
4030 | vpath->hldev->config.device_poll_millis); | ||
4031 | |||
4032 | return status; | ||
4033 | } | ||
4034 | |||
4035 | /* | ||
4036 | * __vxge_hw_vpath_reset | ||
4037 | * This routine resets the vpath on the device | ||
4038 | */ | ||
4039 | static enum vxge_hw_status | ||
4040 | __vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id) | ||
4041 | { | ||
4042 | u64 val64; | ||
4043 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4044 | |||
4045 | val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id)); | ||
4046 | |||
4047 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), | ||
4048 | &hldev->common_reg->cmn_rsthdlr_cfg0); | ||
4049 | |||
4050 | return status; | ||
4051 | } | ||
4052 | |||
4053 | /* | ||
4054 | * __vxge_hw_vpath_sw_reset | ||
4055 | * This routine resets the vpath structures | ||
4056 | */ | ||
4057 | static enum vxge_hw_status | ||
4058 | __vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id) | ||
4059 | { | ||
4060 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4061 | struct __vxge_hw_virtualpath *vpath; | ||
4062 | |||
4063 | vpath = (struct __vxge_hw_virtualpath *)&hldev->virtual_paths[vp_id]; | ||
4064 | |||
4065 | if (vpath->ringh) { | ||
4066 | status = __vxge_hw_ring_reset(vpath->ringh); | ||
4067 | if (status != VXGE_HW_OK) | ||
4068 | goto exit; | ||
4069 | } | ||
4070 | |||
4071 | if (vpath->fifoh) | ||
4072 | status = __vxge_hw_fifo_reset(vpath->fifoh); | ||
4073 | exit: | ||
4074 | return status; | ||
4075 | } | ||
4076 | |||
4077 | /* | ||
4078 | * __vxge_hw_vpath_prc_configure | ||
4079 | * This routine configures the prc registers of virtual path using the config | ||
4080 | * passed | ||
4081 | */ | ||
4082 | static void | ||
4083 | __vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id) | ||
4084 | { | ||
4085 | u64 val64; | ||
4086 | struct __vxge_hw_virtualpath *vpath; | ||
4087 | struct vxge_hw_vp_config *vp_config; | ||
4088 | struct vxge_hw_vpath_reg __iomem *vp_reg; | ||
4089 | |||
4090 | vpath = &hldev->virtual_paths[vp_id]; | ||
4091 | vp_reg = vpath->vp_reg; | ||
4092 | vp_config = vpath->vp_config; | ||
4093 | |||
4094 | if (vp_config->ring.enable == VXGE_HW_RING_DISABLE) | ||
4095 | return; | ||
4096 | |||
4097 | val64 = readq(&vp_reg->prc_cfg1); | ||
4098 | val64 |= VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE; | ||
4099 | writeq(val64, &vp_reg->prc_cfg1); | ||
4100 | |||
4101 | val64 = readq(&vpath->vp_reg->prc_cfg6); | ||
4102 | val64 |= VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN; | ||
4103 | writeq(val64, &vpath->vp_reg->prc_cfg6); | ||
4104 | |||
4105 | val64 = readq(&vp_reg->prc_cfg7); | ||
4106 | |||
4107 | if (vpath->vp_config->ring.scatter_mode != | ||
4108 | VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT) { | ||
4109 | |||
4110 | val64 &= ~VXGE_HW_PRC_CFG7_SCATTER_MODE(0x3); | ||
4111 | |||
4112 | switch (vpath->vp_config->ring.scatter_mode) { | ||
4113 | case VXGE_HW_RING_SCATTER_MODE_A: | ||
4114 | val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE( | ||
4115 | VXGE_HW_PRC_CFG7_SCATTER_MODE_A); | ||
4116 | break; | ||
4117 | case VXGE_HW_RING_SCATTER_MODE_B: | ||
4118 | val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE( | ||
4119 | VXGE_HW_PRC_CFG7_SCATTER_MODE_B); | ||
4120 | break; | ||
4121 | case VXGE_HW_RING_SCATTER_MODE_C: | ||
4122 | val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE( | ||
4123 | VXGE_HW_PRC_CFG7_SCATTER_MODE_C); | ||
4124 | break; | ||
4125 | } | ||
4126 | } | ||
4127 | |||
4128 | writeq(val64, &vp_reg->prc_cfg7); | ||
4129 | |||
4130 | writeq(VXGE_HW_PRC_CFG5_RXD0_ADD( | ||
4131 | __vxge_hw_ring_first_block_address_get( | ||
4132 | vpath->ringh) >> 3), &vp_reg->prc_cfg5); | ||
4133 | |||
4134 | val64 = readq(&vp_reg->prc_cfg4); | ||
4135 | val64 |= VXGE_HW_PRC_CFG4_IN_SVC; | ||
4136 | val64 &= ~VXGE_HW_PRC_CFG4_RING_MODE(0x3); | ||
4137 | |||
4138 | val64 |= VXGE_HW_PRC_CFG4_RING_MODE( | ||
4139 | VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER); | ||
4140 | |||
4141 | if (hldev->config.rth_en == VXGE_HW_RTH_DISABLE) | ||
4142 | val64 |= VXGE_HW_PRC_CFG4_RTH_DISABLE; | ||
4143 | else | ||
4144 | val64 &= ~VXGE_HW_PRC_CFG4_RTH_DISABLE; | ||
4145 | |||
4146 | writeq(val64, &vp_reg->prc_cfg4); | ||
4147 | } | ||
4148 | |||
4149 | /* | ||
4150 | * __vxge_hw_vpath_kdfc_configure | ||
4151 | * This routine configures the kdfc registers of virtual path using the | ||
4152 | * config passed | ||
4153 | */ | ||
4154 | static enum vxge_hw_status | ||
4155 | __vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id) | ||
4156 | { | ||
4157 | u64 val64; | ||
4158 | u64 vpath_stride; | ||
4159 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4160 | struct __vxge_hw_virtualpath *vpath; | ||
4161 | struct vxge_hw_vpath_reg __iomem *vp_reg; | ||
4162 | |||
4163 | vpath = &hldev->virtual_paths[vp_id]; | ||
4164 | vp_reg = vpath->vp_reg; | ||
4165 | status = __vxge_hw_kdfc_swapper_set(hldev->legacy_reg, vp_reg); | ||
4166 | |||
4167 | if (status != VXGE_HW_OK) | ||
4168 | goto exit; | ||
4169 | |||
4170 | val64 = readq(&vp_reg->kdfc_drbl_triplet_total); | ||
4171 | |||
4172 | vpath->max_kdfc_db = | ||
4173 | (u32)VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE( | ||
4174 | val64+1)/2; | ||
4175 | |||
4176 | if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) { | ||
4177 | |||
4178 | vpath->max_nofl_db = vpath->max_kdfc_db; | ||
4179 | |||
4180 | if (vpath->max_nofl_db < | ||
4181 | ((vpath->vp_config->fifo.memblock_size / | ||
4182 | (vpath->vp_config->fifo.max_frags * | ||
4183 | sizeof(struct vxge_hw_fifo_txd))) * | ||
4184 | vpath->vp_config->fifo.fifo_blocks)) { | ||
4185 | |||
4186 | return VXGE_HW_BADCFG_FIFO_BLOCKS; | ||
4187 | } | ||
4188 | val64 = VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0( | ||
4189 | (vpath->max_nofl_db*2)-1); | ||
4190 | } | ||
4191 | |||
4192 | writeq(val64, &vp_reg->kdfc_fifo_trpl_partition); | ||
4193 | |||
4194 | writeq(VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE, | ||
4195 | &vp_reg->kdfc_fifo_trpl_ctrl); | ||
4196 | |||
4197 | val64 = readq(&vp_reg->kdfc_trpl_fifo_0_ctrl); | ||
4198 | |||
4199 | val64 &= ~(VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(0x3) | | ||
4200 | VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0xFF)); | ||
4201 | |||
4202 | val64 |= VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE( | ||
4203 | VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY) | | ||
4204 | #ifndef __BIG_ENDIAN | ||
4205 | VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN | | ||
4206 | #endif | ||
4207 | VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0); | ||
4208 | |||
4209 | writeq(val64, &vp_reg->kdfc_trpl_fifo_0_ctrl); | ||
4210 | writeq((u64)0, &vp_reg->kdfc_trpl_fifo_0_wb_address); | ||
4211 | wmb(); | ||
4212 | vpath_stride = readq(&hldev->toc_reg->toc_kdfc_vpath_stride); | ||
4213 | |||
4214 | vpath->nofl_db = | ||
4215 | (struct __vxge_hw_non_offload_db_wrapper __iomem *) | ||
4216 | (hldev->kdfc + (vp_id * | ||
4217 | VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE( | ||
4218 | vpath_stride))); | ||
4219 | exit: | ||
4220 | return status; | ||
4221 | } | ||
4222 | |||
4223 | /* | ||
4224 | * __vxge_hw_vpath_mac_configure | ||
4225 | * This routine configures the mac of virtual path using the config passed | ||
4226 | */ | ||
4227 | static enum vxge_hw_status | ||
4228 | __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id) | ||
4229 | { | ||
4230 | u64 val64; | ||
4231 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4232 | struct __vxge_hw_virtualpath *vpath; | ||
4233 | struct vxge_hw_vp_config *vp_config; | ||
4234 | struct vxge_hw_vpath_reg __iomem *vp_reg; | ||
4235 | |||
4236 | vpath = &hldev->virtual_paths[vp_id]; | ||
4237 | vp_reg = vpath->vp_reg; | ||
4238 | vp_config = vpath->vp_config; | ||
4239 | |||
4240 | writeq(VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER( | ||
4241 | vpath->vsport_number), &vp_reg->xmac_vsport_choice); | ||
4242 | |||
4243 | if (vp_config->ring.enable == VXGE_HW_RING_ENABLE) { | ||
4244 | |||
4245 | val64 = readq(&vp_reg->xmac_rpa_vcfg); | ||
4246 | |||
4247 | if (vp_config->rpa_strip_vlan_tag != | ||
4248 | VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) { | ||
4249 | if (vp_config->rpa_strip_vlan_tag) | ||
4250 | val64 |= VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG; | ||
4251 | else | ||
4252 | val64 &= ~VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG; | ||
4253 | } | ||
4254 | |||
4255 | writeq(val64, &vp_reg->xmac_rpa_vcfg); | ||
4256 | val64 = readq(&vp_reg->rxmac_vcfg0); | ||
4257 | |||
4258 | if (vp_config->mtu != | ||
4259 | VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) { | ||
4260 | val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff); | ||
4261 | if ((vp_config->mtu + | ||
4262 | VXGE_HW_MAC_HEADER_MAX_SIZE) < vpath->max_mtu) | ||
4263 | val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN( | ||
4264 | vp_config->mtu + | ||
4265 | VXGE_HW_MAC_HEADER_MAX_SIZE); | ||
4266 | else | ||
4267 | val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN( | ||
4268 | vpath->max_mtu); | ||
4269 | } | ||
4270 | |||
4271 | writeq(val64, &vp_reg->rxmac_vcfg0); | ||
4272 | |||
4273 | val64 = readq(&vp_reg->rxmac_vcfg1); | ||
4274 | |||
4275 | val64 &= ~(VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(0x3) | | ||
4276 | VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE); | ||
4277 | |||
4278 | if (hldev->config.rth_it_type == | ||
4279 | VXGE_HW_RTH_IT_TYPE_MULTI_IT) { | ||
4280 | val64 |= VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE( | ||
4281 | 0x2) | | ||
4282 | VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE; | ||
4283 | } | ||
4284 | |||
4285 | writeq(val64, &vp_reg->rxmac_vcfg1); | ||
4286 | } | ||
4287 | return status; | ||
4288 | } | ||
4289 | |||
4290 | /* | ||
4291 | * __vxge_hw_vpath_tim_configure | ||
4292 | * This routine configures the tim registers of virtual path using the config | ||
4293 | * passed | ||
4294 | */ | ||
4295 | static enum vxge_hw_status | ||
4296 | __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) | ||
4297 | { | ||
4298 | u64 val64; | ||
4299 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4300 | struct __vxge_hw_virtualpath *vpath; | ||
4301 | struct vxge_hw_vpath_reg __iomem *vp_reg; | ||
4302 | struct vxge_hw_vp_config *config; | ||
4303 | |||
4304 | vpath = &hldev->virtual_paths[vp_id]; | ||
4305 | vp_reg = vpath->vp_reg; | ||
4306 | config = vpath->vp_config; | ||
4307 | |||
4308 | writeq(0, &vp_reg->tim_dest_addr); | ||
4309 | writeq(0, &vp_reg->tim_vpath_map); | ||
4310 | writeq(0, &vp_reg->tim_bitmap); | ||
4311 | writeq(0, &vp_reg->tim_remap); | ||
4312 | |||
4313 | if (config->ring.enable == VXGE_HW_RING_ENABLE) | ||
4314 | writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM( | ||
4315 | (vp_id * VXGE_HW_MAX_INTR_PER_VP) + | ||
4316 | VXGE_HW_VPATH_INTR_RX), &vp_reg->tim_ring_assn); | ||
4317 | |||
4318 | val64 = readq(&vp_reg->tim_pci_cfg); | ||
4319 | val64 |= VXGE_HW_TIM_PCI_CFG_ADD_PAD; | ||
4320 | writeq(val64, &vp_reg->tim_pci_cfg); | ||
4321 | |||
4322 | if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) { | ||
4323 | |||
4324 | val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); | ||
4325 | |||
4326 | if (config->tti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) { | ||
4327 | val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL( | ||
4328 | 0x3ffffff); | ||
4329 | val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL( | ||
4330 | config->tti.btimer_val); | ||
4331 | } | ||
4332 | |||
4333 | val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN; | ||
4334 | |||
4335 | if (config->tti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) { | ||
4336 | if (config->tti.timer_ac_en) | ||
4337 | val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC; | ||
4338 | else | ||
4339 | val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC; | ||
4340 | } | ||
4341 | |||
4342 | if (config->tti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) { | ||
4343 | if (config->tti.timer_ci_en) | ||
4344 | val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; | ||
4345 | else | ||
4346 | val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; | ||
4347 | } | ||
4348 | |||
4349 | if (config->tti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) { | ||
4350 | val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f); | ||
4351 | val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A( | ||
4352 | config->tti.urange_a); | ||
4353 | } | ||
4354 | |||
4355 | if (config->tti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) { | ||
4356 | val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f); | ||
4357 | val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B( | ||
4358 | config->tti.urange_b); | ||
4359 | } | ||
4360 | |||
4361 | if (config->tti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) { | ||
4362 | val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f); | ||
4363 | val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C( | ||
4364 | config->tti.urange_c); | ||
4365 | } | ||
4366 | |||
4367 | writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); | ||
4368 | vpath->tim_tti_cfg1_saved = val64; | ||
4369 | |||
4370 | val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]); | ||
4371 | |||
4372 | if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) { | ||
4373 | val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff); | ||
4374 | val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A( | ||
4375 | config->tti.uec_a); | ||
4376 | } | ||
4377 | |||
4378 | if (config->tti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) { | ||
4379 | val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff); | ||
4380 | val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B( | ||
4381 | config->tti.uec_b); | ||
4382 | } | ||
4383 | |||
4384 | if (config->tti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) { | ||
4385 | val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff); | ||
4386 | val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C( | ||
4387 | config->tti.uec_c); | ||
4388 | } | ||
4389 | |||
4390 | if (config->tti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) { | ||
4391 | val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff); | ||
4392 | val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D( | ||
4393 | config->tti.uec_d); | ||
4394 | } | ||
4395 | |||
4396 | writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]); | ||
4397 | val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]); | ||
4398 | |||
4399 | if (config->tti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) { | ||
4400 | if (config->tti.timer_ri_en) | ||
4401 | val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI; | ||
4402 | else | ||
4403 | val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI; | ||
4404 | } | ||
4405 | |||
4406 | if (config->tti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) { | ||
4407 | val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL( | ||
4408 | 0x3ffffff); | ||
4409 | val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL( | ||
4410 | config->tti.rtimer_val); | ||
4411 | } | ||
4412 | |||
4413 | if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) { | ||
4414 | val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f); | ||
4415 | val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id); | ||
4416 | } | ||
4417 | |||
4418 | if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) { | ||
4419 | val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL( | ||
4420 | 0x3ffffff); | ||
4421 | val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL( | ||
4422 | config->tti.ltimer_val); | ||
4423 | } | ||
4424 | |||
4425 | writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]); | ||
4426 | vpath->tim_tti_cfg3_saved = val64; | ||
4427 | } | ||
4428 | |||
4429 | if (config->ring.enable == VXGE_HW_RING_ENABLE) { | ||
4430 | |||
4431 | val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]); | ||
4432 | |||
4433 | if (config->rti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) { | ||
4434 | val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL( | ||
4435 | 0x3ffffff); | ||
4436 | val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL( | ||
4437 | config->rti.btimer_val); | ||
4438 | } | ||
4439 | |||
4440 | val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN; | ||
4441 | |||
4442 | if (config->rti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) { | ||
4443 | if (config->rti.timer_ac_en) | ||
4444 | val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC; | ||
4445 | else | ||
4446 | val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC; | ||
4447 | } | ||
4448 | |||
4449 | if (config->rti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) { | ||
4450 | if (config->rti.timer_ci_en) | ||
4451 | val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; | ||
4452 | else | ||
4453 | val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; | ||
4454 | } | ||
4455 | |||
4456 | if (config->rti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) { | ||
4457 | val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f); | ||
4458 | val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A( | ||
4459 | config->rti.urange_a); | ||
4460 | } | ||
4461 | |||
4462 | if (config->rti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) { | ||
4463 | val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f); | ||
4464 | val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B( | ||
4465 | config->rti.urange_b); | ||
4466 | } | ||
4467 | |||
4468 | if (config->rti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) { | ||
4469 | val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f); | ||
4470 | val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C( | ||
4471 | config->rti.urange_c); | ||
4472 | } | ||
4473 | |||
4474 | writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]); | ||
4475 | vpath->tim_rti_cfg1_saved = val64; | ||
4476 | |||
4477 | val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]); | ||
4478 | |||
4479 | if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) { | ||
4480 | val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff); | ||
4481 | val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A( | ||
4482 | config->rti.uec_a); | ||
4483 | } | ||
4484 | |||
4485 | if (config->rti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) { | ||
4486 | val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff); | ||
4487 | val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B( | ||
4488 | config->rti.uec_b); | ||
4489 | } | ||
4490 | |||
4491 | if (config->rti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) { | ||
4492 | val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff); | ||
4493 | val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C( | ||
4494 | config->rti.uec_c); | ||
4495 | } | ||
4496 | |||
4497 | if (config->rti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) { | ||
4498 | val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff); | ||
4499 | val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D( | ||
4500 | config->rti.uec_d); | ||
4501 | } | ||
4502 | |||
4503 | writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]); | ||
4504 | val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]); | ||
4505 | |||
4506 | if (config->rti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) { | ||
4507 | if (config->rti.timer_ri_en) | ||
4508 | val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI; | ||
4509 | else | ||
4510 | val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI; | ||
4511 | } | ||
4512 | |||
4513 | if (config->rti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) { | ||
4514 | val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL( | ||
4515 | 0x3ffffff); | ||
4516 | val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL( | ||
4517 | config->rti.rtimer_val); | ||
4518 | } | ||
4519 | |||
4520 | if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) { | ||
4521 | val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f); | ||
4522 | val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id); | ||
4523 | } | ||
4524 | |||
4525 | if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) { | ||
4526 | val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL( | ||
4527 | 0x3ffffff); | ||
4528 | val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL( | ||
4529 | config->rti.ltimer_val); | ||
4530 | } | ||
4531 | |||
4532 | writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]); | ||
4533 | vpath->tim_rti_cfg3_saved = val64; | ||
4534 | } | ||
4535 | |||
4536 | val64 = 0; | ||
4537 | writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_EINTA]); | ||
4538 | writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_EINTA]); | ||
4539 | writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_EINTA]); | ||
4540 | writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_BMAP]); | ||
4541 | writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]); | ||
4542 | writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]); | ||
4543 | |||
4544 | val64 = VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_PRD(150); | ||
4545 | val64 |= VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_DIV(0); | ||
4546 | val64 |= VXGE_HW_TIM_WRKLD_CLC_CNT_RX_TX(3); | ||
4547 | writeq(val64, &vp_reg->tim_wrkld_clc); | ||
4548 | |||
4549 | return status; | ||
4550 | } | ||
4551 | |||
4552 | /* | ||
4553 | * __vxge_hw_vpath_initialize | ||
4554 | * This routine is the final phase of init which initializes the | ||
4555 | * registers of the vpath using the configuration passed. | ||
4556 | */ | ||
4557 | static enum vxge_hw_status | ||
4558 | __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id) | ||
4559 | { | ||
4560 | u64 val64; | ||
4561 | u32 val32; | ||
4562 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4563 | struct __vxge_hw_virtualpath *vpath; | ||
4564 | struct vxge_hw_vpath_reg __iomem *vp_reg; | ||
4565 | |||
4566 | vpath = &hldev->virtual_paths[vp_id]; | ||
4567 | |||
4568 | if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) { | ||
4569 | status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE; | ||
4570 | goto exit; | ||
4571 | } | ||
4572 | vp_reg = vpath->vp_reg; | ||
4573 | |||
4574 | status = __vxge_hw_vpath_swapper_set(vpath->vp_reg); | ||
4575 | if (status != VXGE_HW_OK) | ||
4576 | goto exit; | ||
4577 | |||
4578 | status = __vxge_hw_vpath_mac_configure(hldev, vp_id); | ||
4579 | if (status != VXGE_HW_OK) | ||
4580 | goto exit; | ||
4581 | |||
4582 | status = __vxge_hw_vpath_kdfc_configure(hldev, vp_id); | ||
4583 | if (status != VXGE_HW_OK) | ||
4584 | goto exit; | ||
4585 | |||
4586 | status = __vxge_hw_vpath_tim_configure(hldev, vp_id); | ||
4587 | if (status != VXGE_HW_OK) | ||
4588 | goto exit; | ||
4589 | |||
4590 | val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl); | ||
4591 | |||
4592 | /* Get MRRS value from device control */ | ||
4593 | status = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32); | ||
4594 | if (status == VXGE_HW_OK) { | ||
4595 | val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12; | ||
4596 | val64 &= | ||
4597 | ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(7)); | ||
4598 | val64 |= | ||
4599 | VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val32); | ||
4600 | |||
4601 | val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE; | ||
4602 | } | ||
4603 | |||
4604 | val64 &= ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(7)); | ||
4605 | val64 |= | ||
4606 | VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY( | ||
4607 | VXGE_HW_MAX_PAYLOAD_SIZE_512); | ||
4608 | |||
4609 | val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN; | ||
4610 | writeq(val64, &vp_reg->rtdma_rd_optimization_ctrl); | ||
4611 | |||
4612 | exit: | ||
4613 | return status; | ||
4614 | } | ||
4615 | |||
4616 | /* | ||
4617 | * __vxge_hw_vp_terminate - Terminate Virtual Path structure | ||
4618 | * This routine closes all channels it opened and freeup memory | ||
4619 | */ | ||
4620 | static void __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id) | ||
4621 | { | ||
4622 | struct __vxge_hw_virtualpath *vpath; | ||
4623 | |||
4624 | vpath = &hldev->virtual_paths[vp_id]; | ||
4625 | |||
4626 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) | ||
4627 | goto exit; | ||
4628 | |||
4629 | VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0, | ||
4630 | vpath->hldev->tim_int_mask1, vpath->vp_id); | ||
4631 | hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL; | ||
4632 | |||
4633 | /* If the whole struct __vxge_hw_virtualpath is zeroed, nothing will | ||
4634 | * work after the interface is brought down. | ||
4635 | */ | ||
4636 | spin_lock(&vpath->lock); | ||
4637 | vpath->vp_open = VXGE_HW_VP_NOT_OPEN; | ||
4638 | spin_unlock(&vpath->lock); | ||
4639 | |||
4640 | vpath->vpmgmt_reg = NULL; | ||
4641 | vpath->nofl_db = NULL; | ||
4642 | vpath->max_mtu = 0; | ||
4643 | vpath->vsport_number = 0; | ||
4644 | vpath->max_kdfc_db = 0; | ||
4645 | vpath->max_nofl_db = 0; | ||
4646 | vpath->ringh = NULL; | ||
4647 | vpath->fifoh = NULL; | ||
4648 | memset(&vpath->vpath_handles, 0, sizeof(struct list_head)); | ||
4649 | vpath->stats_block = 0; | ||
4650 | vpath->hw_stats = NULL; | ||
4651 | vpath->hw_stats_sav = NULL; | ||
4652 | vpath->sw_stats = NULL; | ||
4653 | |||
4654 | exit: | ||
4655 | return; | ||
4656 | } | ||
4657 | |||
4658 | /* | ||
4659 | * __vxge_hw_vp_initialize - Initialize Virtual Path structure | ||
4660 | * This routine is the initial phase of init which resets the vpath and | ||
4661 | * initializes the software support structures. | ||
4662 | */ | ||
4663 | static enum vxge_hw_status | ||
4664 | __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id, | ||
4665 | struct vxge_hw_vp_config *config) | ||
4666 | { | ||
4667 | struct __vxge_hw_virtualpath *vpath; | ||
4668 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4669 | |||
4670 | if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) { | ||
4671 | status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE; | ||
4672 | goto exit; | ||
4673 | } | ||
4674 | |||
4675 | vpath = &hldev->virtual_paths[vp_id]; | ||
4676 | |||
4677 | spin_lock_init(&vpath->lock); | ||
4678 | vpath->vp_id = vp_id; | ||
4679 | vpath->vp_open = VXGE_HW_VP_OPEN; | ||
4680 | vpath->hldev = hldev; | ||
4681 | vpath->vp_config = config; | ||
4682 | vpath->vp_reg = hldev->vpath_reg[vp_id]; | ||
4683 | vpath->vpmgmt_reg = hldev->vpmgmt_reg[vp_id]; | ||
4684 | |||
4685 | __vxge_hw_vpath_reset(hldev, vp_id); | ||
4686 | |||
4687 | status = __vxge_hw_vpath_reset_check(vpath); | ||
4688 | if (status != VXGE_HW_OK) { | ||
4689 | memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath)); | ||
4690 | goto exit; | ||
4691 | } | ||
4692 | |||
4693 | status = __vxge_hw_vpath_mgmt_read(hldev, vpath); | ||
4694 | if (status != VXGE_HW_OK) { | ||
4695 | memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath)); | ||
4696 | goto exit; | ||
4697 | } | ||
4698 | |||
4699 | INIT_LIST_HEAD(&vpath->vpath_handles); | ||
4700 | |||
4701 | vpath->sw_stats = &hldev->stats.sw_dev_info_stats.vpath_info[vp_id]; | ||
4702 | |||
4703 | VXGE_HW_DEVICE_TIM_INT_MASK_SET(hldev->tim_int_mask0, | ||
4704 | hldev->tim_int_mask1, vp_id); | ||
4705 | |||
4706 | status = __vxge_hw_vpath_initialize(hldev, vp_id); | ||
4707 | if (status != VXGE_HW_OK) | ||
4708 | __vxge_hw_vp_terminate(hldev, vp_id); | ||
4709 | exit: | ||
4710 | return status; | ||
4711 | } | ||
4712 | |||
4713 | /* | ||
4714 | * vxge_hw_vpath_mtu_set - Set MTU. | ||
4715 | * Set new MTU value. Example, to use jumbo frames: | ||
4716 | * vxge_hw_vpath_mtu_set(my_device, 9600); | ||
4717 | */ | ||
4718 | enum vxge_hw_status | ||
4719 | vxge_hw_vpath_mtu_set(struct __vxge_hw_vpath_handle *vp, u32 new_mtu) | ||
4720 | { | ||
4721 | u64 val64; | ||
4722 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4723 | struct __vxge_hw_virtualpath *vpath; | ||
4724 | |||
4725 | if (vp == NULL) { | ||
4726 | status = VXGE_HW_ERR_INVALID_HANDLE; | ||
4727 | goto exit; | ||
4728 | } | ||
4729 | vpath = vp->vpath; | ||
4730 | |||
4731 | new_mtu += VXGE_HW_MAC_HEADER_MAX_SIZE; | ||
4732 | |||
4733 | if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > vpath->max_mtu)) | ||
4734 | status = VXGE_HW_ERR_INVALID_MTU_SIZE; | ||
4735 | |||
4736 | val64 = readq(&vpath->vp_reg->rxmac_vcfg0); | ||
4737 | |||
4738 | val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff); | ||
4739 | val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_mtu); | ||
4740 | |||
4741 | writeq(val64, &vpath->vp_reg->rxmac_vcfg0); | ||
4742 | |||
4743 | vpath->vp_config->mtu = new_mtu - VXGE_HW_MAC_HEADER_MAX_SIZE; | ||
4744 | |||
4745 | exit: | ||
4746 | return status; | ||
4747 | } | ||
4748 | |||
4749 | /* | ||
4750 | * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics. | ||
4751 | * Enable the DMA vpath statistics. The function is to be called to re-enable | ||
4752 | * the adapter to update stats into the host memory | ||
4753 | */ | ||
4754 | static enum vxge_hw_status | ||
4755 | vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp) | ||
4756 | { | ||
4757 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4758 | struct __vxge_hw_virtualpath *vpath; | ||
4759 | |||
4760 | vpath = vp->vpath; | ||
4761 | |||
4762 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
4763 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
4764 | goto exit; | ||
4765 | } | ||
4766 | |||
4767 | memcpy(vpath->hw_stats_sav, vpath->hw_stats, | ||
4768 | sizeof(struct vxge_hw_vpath_stats_hw_info)); | ||
4769 | |||
4770 | status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats); | ||
4771 | exit: | ||
4772 | return status; | ||
4773 | } | ||
4774 | |||
4775 | /* | ||
4776 | * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool | ||
4777 | * This function allocates a block from block pool or from the system | ||
4778 | */ | ||
4779 | static struct __vxge_hw_blockpool_entry * | ||
4780 | __vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size) | ||
4781 | { | ||
4782 | struct __vxge_hw_blockpool_entry *entry = NULL; | ||
4783 | struct __vxge_hw_blockpool *blockpool; | ||
4784 | |||
4785 | blockpool = &devh->block_pool; | ||
4786 | |||
4787 | if (size == blockpool->block_size) { | ||
4788 | |||
4789 | if (!list_empty(&blockpool->free_block_list)) | ||
4790 | entry = (struct __vxge_hw_blockpool_entry *) | ||
4791 | list_first_entry(&blockpool->free_block_list, | ||
4792 | struct __vxge_hw_blockpool_entry, | ||
4793 | item); | ||
4794 | |||
4795 | if (entry != NULL) { | ||
4796 | list_del(&entry->item); | ||
4797 | blockpool->pool_size--; | ||
4798 | } | ||
4799 | } | ||
4800 | |||
4801 | if (entry != NULL) | ||
4802 | __vxge_hw_blockpool_blocks_add(blockpool); | ||
4803 | |||
4804 | return entry; | ||
4805 | } | ||
4806 | |||
4807 | /* | ||
4808 | * vxge_hw_vpath_open - Open a virtual path on a given adapter | ||
4809 | * This function is used to open access to virtual path of an | ||
4810 | * adapter for offload, GRO operations. This function returns | ||
4811 | * synchronously. | ||
4812 | */ | ||
4813 | enum vxge_hw_status | ||
4814 | vxge_hw_vpath_open(struct __vxge_hw_device *hldev, | ||
4815 | struct vxge_hw_vpath_attr *attr, | ||
4816 | struct __vxge_hw_vpath_handle **vpath_handle) | ||
4817 | { | ||
4818 | struct __vxge_hw_virtualpath *vpath; | ||
4819 | struct __vxge_hw_vpath_handle *vp; | ||
4820 | enum vxge_hw_status status; | ||
4821 | |||
4822 | vpath = &hldev->virtual_paths[attr->vp_id]; | ||
4823 | |||
4824 | if (vpath->vp_open == VXGE_HW_VP_OPEN) { | ||
4825 | status = VXGE_HW_ERR_INVALID_STATE; | ||
4826 | goto vpath_open_exit1; | ||
4827 | } | ||
4828 | |||
4829 | status = __vxge_hw_vp_initialize(hldev, attr->vp_id, | ||
4830 | &hldev->config.vp_config[attr->vp_id]); | ||
4831 | if (status != VXGE_HW_OK) | ||
4832 | goto vpath_open_exit1; | ||
4833 | |||
4834 | vp = vzalloc(sizeof(struct __vxge_hw_vpath_handle)); | ||
4835 | if (vp == NULL) { | ||
4836 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
4837 | goto vpath_open_exit2; | ||
4838 | } | ||
4839 | |||
4840 | vp->vpath = vpath; | ||
4841 | |||
4842 | if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) { | ||
4843 | status = __vxge_hw_fifo_create(vp, &attr->fifo_attr); | ||
4844 | if (status != VXGE_HW_OK) | ||
4845 | goto vpath_open_exit6; | ||
4846 | } | ||
4847 | |||
4848 | if (vpath->vp_config->ring.enable == VXGE_HW_RING_ENABLE) { | ||
4849 | status = __vxge_hw_ring_create(vp, &attr->ring_attr); | ||
4850 | if (status != VXGE_HW_OK) | ||
4851 | goto vpath_open_exit7; | ||
4852 | |||
4853 | __vxge_hw_vpath_prc_configure(hldev, attr->vp_id); | ||
4854 | } | ||
4855 | |||
4856 | vpath->fifoh->tx_intr_num = | ||
4857 | (attr->vp_id * VXGE_HW_MAX_INTR_PER_VP) + | ||
4858 | VXGE_HW_VPATH_INTR_TX; | ||
4859 | |||
4860 | vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev, | ||
4861 | VXGE_HW_BLOCK_SIZE); | ||
4862 | if (vpath->stats_block == NULL) { | ||
4863 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
4864 | goto vpath_open_exit8; | ||
4865 | } | ||
4866 | |||
4867 | vpath->hw_stats = vpath->stats_block->memblock; | ||
4868 | memset(vpath->hw_stats, 0, | ||
4869 | sizeof(struct vxge_hw_vpath_stats_hw_info)); | ||
4870 | |||
4871 | hldev->stats.hw_dev_info_stats.vpath_info[attr->vp_id] = | ||
4872 | vpath->hw_stats; | ||
4873 | |||
4874 | vpath->hw_stats_sav = | ||
4875 | &hldev->stats.hw_dev_info_stats.vpath_info_sav[attr->vp_id]; | ||
4876 | memset(vpath->hw_stats_sav, 0, | ||
4877 | sizeof(struct vxge_hw_vpath_stats_hw_info)); | ||
4878 | |||
4879 | writeq(vpath->stats_block->dma_addr, &vpath->vp_reg->stats_cfg); | ||
4880 | |||
4881 | status = vxge_hw_vpath_stats_enable(vp); | ||
4882 | if (status != VXGE_HW_OK) | ||
4883 | goto vpath_open_exit8; | ||
4884 | |||
4885 | list_add(&vp->item, &vpath->vpath_handles); | ||
4886 | |||
4887 | hldev->vpaths_deployed |= vxge_mBIT(vpath->vp_id); | ||
4888 | |||
4889 | *vpath_handle = vp; | ||
4890 | |||
4891 | attr->fifo_attr.userdata = vpath->fifoh; | ||
4892 | attr->ring_attr.userdata = vpath->ringh; | ||
4893 | |||
4894 | return VXGE_HW_OK; | ||
4895 | |||
4896 | vpath_open_exit8: | ||
4897 | if (vpath->ringh != NULL) | ||
4898 | __vxge_hw_ring_delete(vp); | ||
4899 | vpath_open_exit7: | ||
4900 | if (vpath->fifoh != NULL) | ||
4901 | __vxge_hw_fifo_delete(vp); | ||
4902 | vpath_open_exit6: | ||
4903 | vfree(vp); | ||
4904 | vpath_open_exit2: | ||
4905 | __vxge_hw_vp_terminate(hldev, attr->vp_id); | ||
4906 | vpath_open_exit1: | ||
4907 | |||
4908 | return status; | ||
4909 | } | ||
4910 | |||
4911 | /** | ||
4912 | * vxge_hw_vpath_rx_doorbell_post - Close the handle got from previous vpath | ||
4913 | * (vpath) open | ||
4914 | * @vp: Handle got from previous vpath open | ||
4915 | * | ||
4916 | * This function is used to close access to virtual path opened | ||
4917 | * earlier. | ||
4918 | */ | ||
4919 | void vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp) | ||
4920 | { | ||
4921 | struct __vxge_hw_virtualpath *vpath = vp->vpath; | ||
4922 | struct __vxge_hw_ring *ring = vpath->ringh; | ||
4923 | struct vxgedev *vdev = netdev_priv(vpath->hldev->ndev); | ||
4924 | u64 new_count, val64, val164; | ||
4925 | |||
4926 | if (vdev->titan1) { | ||
4927 | new_count = readq(&vpath->vp_reg->rxdmem_size); | ||
4928 | new_count &= 0x1fff; | ||
4929 | } else | ||
4930 | new_count = ring->config->ring_blocks * VXGE_HW_BLOCK_SIZE / 8; | ||
4931 | |||
4932 | val164 = VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count); | ||
4933 | |||
4934 | writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164), | ||
4935 | &vpath->vp_reg->prc_rxd_doorbell); | ||
4936 | readl(&vpath->vp_reg->prc_rxd_doorbell); | ||
4937 | |||
4938 | val164 /= 2; | ||
4939 | val64 = readq(&vpath->vp_reg->prc_cfg6); | ||
4940 | val64 = VXGE_HW_PRC_CFG6_RXD_SPAT(val64); | ||
4941 | val64 &= 0x1ff; | ||
4942 | |||
4943 | /* | ||
4944 | * Each RxD is of 4 qwords | ||
4945 | */ | ||
4946 | new_count -= (val64 + 1); | ||
4947 | val64 = min(val164, new_count) / 4; | ||
4948 | |||
4949 | ring->rxds_limit = min(ring->rxds_limit, val64); | ||
4950 | if (ring->rxds_limit < 4) | ||
4951 | ring->rxds_limit = 4; | ||
4952 | } | ||
4953 | |||
4954 | /* | ||
4955 | * __vxge_hw_blockpool_block_free - Frees a block from block pool | ||
4956 | * @devh: Hal device | ||
4957 | * @entry: Entry of block to be freed | ||
4958 | * | ||
4959 | * This function frees a block from block pool | ||
4960 | */ | ||
4961 | static void | ||
4962 | __vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh, | ||
4963 | struct __vxge_hw_blockpool_entry *entry) | ||
4964 | { | ||
4965 | struct __vxge_hw_blockpool *blockpool; | ||
4966 | |||
4967 | blockpool = &devh->block_pool; | ||
4968 | |||
4969 | if (entry->length == blockpool->block_size) { | ||
4970 | list_add(&entry->item, &blockpool->free_block_list); | ||
4971 | blockpool->pool_size++; | ||
4972 | } | ||
4973 | |||
4974 | __vxge_hw_blockpool_blocks_remove(blockpool); | ||
4975 | } | ||
4976 | |||
4977 | /* | ||
4978 | * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open | ||
4979 | * This function is used to close access to virtual path opened | ||
4980 | * earlier. | ||
4981 | */ | ||
4982 | enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp) | ||
4983 | { | ||
4984 | struct __vxge_hw_virtualpath *vpath = NULL; | ||
4985 | struct __vxge_hw_device *devh = NULL; | ||
4986 | u32 vp_id = vp->vpath->vp_id; | ||
4987 | u32 is_empty = TRUE; | ||
4988 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4989 | |||
4990 | vpath = vp->vpath; | ||
4991 | devh = vpath->hldev; | ||
4992 | |||
4993 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
4994 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
4995 | goto vpath_close_exit; | ||
4996 | } | ||
4997 | |||
4998 | list_del(&vp->item); | ||
4999 | |||
5000 | if (!list_empty(&vpath->vpath_handles)) { | ||
5001 | list_add(&vp->item, &vpath->vpath_handles); | ||
5002 | is_empty = FALSE; | ||
5003 | } | ||
5004 | |||
5005 | if (!is_empty) { | ||
5006 | status = VXGE_HW_FAIL; | ||
5007 | goto vpath_close_exit; | ||
5008 | } | ||
5009 | |||
5010 | devh->vpaths_deployed &= ~vxge_mBIT(vp_id); | ||
5011 | |||
5012 | if (vpath->ringh != NULL) | ||
5013 | __vxge_hw_ring_delete(vp); | ||
5014 | |||
5015 | if (vpath->fifoh != NULL) | ||
5016 | __vxge_hw_fifo_delete(vp); | ||
5017 | |||
5018 | if (vpath->stats_block != NULL) | ||
5019 | __vxge_hw_blockpool_block_free(devh, vpath->stats_block); | ||
5020 | |||
5021 | vfree(vp); | ||
5022 | |||
5023 | __vxge_hw_vp_terminate(devh, vp_id); | ||
5024 | |||
5025 | vpath_close_exit: | ||
5026 | return status; | ||
5027 | } | ||
5028 | |||
5029 | /* | ||
5030 | * vxge_hw_vpath_reset - Resets vpath | ||
5031 | * This function is used to request a reset of vpath | ||
5032 | */ | ||
5033 | enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_vpath_handle *vp) | ||
5034 | { | ||
5035 | enum vxge_hw_status status; | ||
5036 | u32 vp_id; | ||
5037 | struct __vxge_hw_virtualpath *vpath = vp->vpath; | ||
5038 | |||
5039 | vp_id = vpath->vp_id; | ||
5040 | |||
5041 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
5042 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
5043 | goto exit; | ||
5044 | } | ||
5045 | |||
5046 | status = __vxge_hw_vpath_reset(vpath->hldev, vp_id); | ||
5047 | if (status == VXGE_HW_OK) | ||
5048 | vpath->sw_stats->soft_reset_cnt++; | ||
5049 | exit: | ||
5050 | return status; | ||
5051 | } | ||
5052 | |||
5053 | /* | ||
5054 | * vxge_hw_vpath_recover_from_reset - Poll for reset complete and re-initialize. | ||
5055 | * This function poll's for the vpath reset completion and re initializes | ||
5056 | * the vpath. | ||
5057 | */ | ||
5058 | enum vxge_hw_status | ||
5059 | vxge_hw_vpath_recover_from_reset(struct __vxge_hw_vpath_handle *vp) | ||
5060 | { | ||
5061 | struct __vxge_hw_virtualpath *vpath = NULL; | ||
5062 | enum vxge_hw_status status; | ||
5063 | struct __vxge_hw_device *hldev; | ||
5064 | u32 vp_id; | ||
5065 | |||
5066 | vp_id = vp->vpath->vp_id; | ||
5067 | vpath = vp->vpath; | ||
5068 | hldev = vpath->hldev; | ||
5069 | |||
5070 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
5071 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
5072 | goto exit; | ||
5073 | } | ||
5074 | |||
5075 | status = __vxge_hw_vpath_reset_check(vpath); | ||
5076 | if (status != VXGE_HW_OK) | ||
5077 | goto exit; | ||
5078 | |||
5079 | status = __vxge_hw_vpath_sw_reset(hldev, vp_id); | ||
5080 | if (status != VXGE_HW_OK) | ||
5081 | goto exit; | ||
5082 | |||
5083 | status = __vxge_hw_vpath_initialize(hldev, vp_id); | ||
5084 | if (status != VXGE_HW_OK) | ||
5085 | goto exit; | ||
5086 | |||
5087 | if (vpath->ringh != NULL) | ||
5088 | __vxge_hw_vpath_prc_configure(hldev, vp_id); | ||
5089 | |||
5090 | memset(vpath->hw_stats, 0, | ||
5091 | sizeof(struct vxge_hw_vpath_stats_hw_info)); | ||
5092 | |||
5093 | memset(vpath->hw_stats_sav, 0, | ||
5094 | sizeof(struct vxge_hw_vpath_stats_hw_info)); | ||
5095 | |||
5096 | writeq(vpath->stats_block->dma_addr, | ||
5097 | &vpath->vp_reg->stats_cfg); | ||
5098 | |||
5099 | status = vxge_hw_vpath_stats_enable(vp); | ||
5100 | |||
5101 | exit: | ||
5102 | return status; | ||
5103 | } | ||
5104 | |||
5105 | /* | ||
5106 | * vxge_hw_vpath_enable - Enable vpath. | ||
5107 | * This routine clears the vpath reset thereby enabling a vpath | ||
5108 | * to start forwarding frames and generating interrupts. | ||
5109 | */ | ||
5110 | void | ||
5111 | vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp) | ||
5112 | { | ||
5113 | struct __vxge_hw_device *hldev; | ||
5114 | u64 val64; | ||
5115 | |||
5116 | hldev = vp->vpath->hldev; | ||
5117 | |||
5118 | val64 = VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET( | ||
5119 | 1 << (16 - vp->vpath->vp_id)); | ||
5120 | |||
5121 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), | ||
5122 | &hldev->common_reg->cmn_rsthdlr_cfg1); | ||
5123 | } | ||