diff options
Diffstat (limited to 'drivers/net/vxge/vxge-config.c')
-rw-r--r-- | drivers/net/vxge/vxge-config.c | 3610 |
1 files changed, 1834 insertions, 1776 deletions
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c index 0e6db5935609..32763b2dd73f 100644 --- a/drivers/net/vxge/vxge-config.c +++ b/drivers/net/vxge/vxge-config.c | |||
@@ -19,76 +19,386 @@ | |||
19 | 19 | ||
20 | #include "vxge-traffic.h" | 20 | #include "vxge-traffic.h" |
21 | #include "vxge-config.h" | 21 | #include "vxge-config.h" |
22 | #include "vxge-main.h" | ||
23 | |||
24 | #define VXGE_HW_VPATH_STATS_PIO_READ(offset) { \ | ||
25 | status = __vxge_hw_vpath_stats_access(vpath, \ | ||
26 | VXGE_HW_STATS_OP_READ, \ | ||
27 | offset, \ | ||
28 | &val64); \ | ||
29 | if (status != VXGE_HW_OK) \ | ||
30 | return status; \ | ||
31 | } | ||
32 | |||
33 | static void | ||
34 | vxge_hw_vpath_set_zero_rx_frm_len(struct vxge_hw_vpath_reg __iomem *vp_reg) | ||
35 | { | ||
36 | u64 val64; | ||
37 | |||
38 | val64 = readq(&vp_reg->rxmac_vcfg0); | ||
39 | val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff); | ||
40 | writeq(val64, &vp_reg->rxmac_vcfg0); | ||
41 | val64 = readq(&vp_reg->rxmac_vcfg0); | ||
42 | } | ||
22 | 43 | ||
23 | /* | 44 | /* |
24 | * __vxge_hw_channel_allocate - Allocate memory for channel | 45 | * vxge_hw_vpath_wait_receive_idle - Wait for Rx to become idle |
25 | * This function allocates required memory for the channel and various arrays | ||
26 | * in the channel | ||
27 | */ | 46 | */ |
28 | struct __vxge_hw_channel* | 47 | int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id) |
29 | __vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph, | ||
30 | enum __vxge_hw_channel_type type, | ||
31 | u32 length, u32 per_dtr_space, void *userdata) | ||
32 | { | 48 | { |
33 | struct __vxge_hw_channel *channel; | 49 | struct vxge_hw_vpath_reg __iomem *vp_reg; |
34 | struct __vxge_hw_device *hldev; | 50 | struct __vxge_hw_virtualpath *vpath; |
35 | int size = 0; | 51 | u64 val64, rxd_count, rxd_spat; |
36 | u32 vp_id; | 52 | int count = 0, total_count = 0; |
37 | 53 | ||
38 | hldev = vph->vpath->hldev; | 54 | vpath = &hldev->virtual_paths[vp_id]; |
39 | vp_id = vph->vpath->vp_id; | 55 | vp_reg = vpath->vp_reg; |
40 | 56 | ||
41 | switch (type) { | 57 | vxge_hw_vpath_set_zero_rx_frm_len(vp_reg); |
42 | case VXGE_HW_CHANNEL_TYPE_FIFO: | 58 | |
43 | size = sizeof(struct __vxge_hw_fifo); | 59 | /* Check that the ring controller for this vpath has enough free RxDs |
44 | break; | 60 | * to send frames to the host. This is done by reading the |
45 | case VXGE_HW_CHANNEL_TYPE_RING: | 61 | * PRC_RXD_DOORBELL_VPn register and comparing the read value to the |
46 | size = sizeof(struct __vxge_hw_ring); | 62 | * RXD_SPAT value for the vpath. |
47 | break; | 63 | */ |
48 | default: | 64 | val64 = readq(&vp_reg->prc_cfg6); |
49 | break; | 65 | rxd_spat = VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val64) + 1; |
66 | /* Use a factor of 2 when comparing rxd_count against rxd_spat for some | ||
67 | * leg room. | ||
68 | */ | ||
69 | rxd_spat *= 2; | ||
70 | |||
71 | do { | ||
72 | mdelay(1); | ||
73 | |||
74 | rxd_count = readq(&vp_reg->prc_rxd_doorbell); | ||
75 | |||
76 | /* Check that the ring controller for this vpath does | ||
77 | * not have any frame in its pipeline. | ||
78 | */ | ||
79 | val64 = readq(&vp_reg->frm_in_progress_cnt); | ||
80 | if ((rxd_count <= rxd_spat) || (val64 > 0)) | ||
81 | count = 0; | ||
82 | else | ||
83 | count++; | ||
84 | total_count++; | ||
85 | } while ((count < VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT) && | ||
86 | (total_count < VXGE_HW_MAX_POLLING_COUNT)); | ||
87 | |||
88 | if (total_count >= VXGE_HW_MAX_POLLING_COUNT) | ||
89 | printk(KERN_ALERT "%s: Still Receiving traffic. Abort wait\n", | ||
90 | __func__); | ||
91 | |||
92 | return total_count; | ||
93 | } | ||
94 | |||
95 | /* vxge_hw_device_wait_receive_idle - This function waits until all frames | ||
96 | * stored in the frame buffer for each vpath assigned to the given | ||
97 | * function (hldev) have been sent to the host. | ||
98 | */ | ||
99 | void vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev) | ||
100 | { | ||
101 | int i, total_count = 0; | ||
102 | |||
103 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | ||
104 | if (!(hldev->vpaths_deployed & vxge_mBIT(i))) | ||
105 | continue; | ||
106 | |||
107 | total_count += vxge_hw_vpath_wait_receive_idle(hldev, i); | ||
108 | if (total_count >= VXGE_HW_MAX_POLLING_COUNT) | ||
109 | break; | ||
50 | } | 110 | } |
111 | } | ||
51 | 112 | ||
52 | channel = kzalloc(size, GFP_KERNEL); | 113 | /* |
53 | if (channel == NULL) | 114 | * __vxge_hw_device_register_poll |
54 | goto exit0; | 115 | * Will poll certain register for specified amount of time. |
55 | INIT_LIST_HEAD(&channel->item); | 116 | * Will poll until masked bit is not cleared. |
117 | */ | ||
118 | static enum vxge_hw_status | ||
119 | __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis) | ||
120 | { | ||
121 | u64 val64; | ||
122 | u32 i = 0; | ||
123 | enum vxge_hw_status ret = VXGE_HW_FAIL; | ||
56 | 124 | ||
57 | channel->common_reg = hldev->common_reg; | 125 | udelay(10); |
58 | channel->first_vp_id = hldev->first_vp_id; | ||
59 | channel->type = type; | ||
60 | channel->devh = hldev; | ||
61 | channel->vph = vph; | ||
62 | channel->userdata = userdata; | ||
63 | channel->per_dtr_space = per_dtr_space; | ||
64 | channel->length = length; | ||
65 | channel->vp_id = vp_id; | ||
66 | 126 | ||
67 | channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); | 127 | do { |
68 | if (channel->work_arr == NULL) | 128 | val64 = readq(reg); |
69 | goto exit1; | 129 | if (!(val64 & mask)) |
130 | return VXGE_HW_OK; | ||
131 | udelay(100); | ||
132 | } while (++i <= 9); | ||
70 | 133 | ||
71 | channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); | 134 | i = 0; |
72 | if (channel->free_arr == NULL) | 135 | do { |
73 | goto exit1; | 136 | val64 = readq(reg); |
74 | channel->free_ptr = length; | 137 | if (!(val64 & mask)) |
138 | return VXGE_HW_OK; | ||
139 | mdelay(1); | ||
140 | } while (++i <= max_millis); | ||
75 | 141 | ||
76 | channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); | 142 | return ret; |
77 | if (channel->reserve_arr == NULL) | 143 | } |
78 | goto exit1; | ||
79 | channel->reserve_ptr = length; | ||
80 | channel->reserve_top = 0; | ||
81 | 144 | ||
82 | channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); | 145 | static inline enum vxge_hw_status |
83 | if (channel->orig_arr == NULL) | 146 | __vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr, |
84 | goto exit1; | 147 | u64 mask, u32 max_millis) |
148 | { | ||
149 | __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr); | ||
150 | wmb(); | ||
151 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr); | ||
152 | wmb(); | ||
85 | 153 | ||
86 | return channel; | 154 | return __vxge_hw_device_register_poll(addr, mask, max_millis); |
87 | exit1: | 155 | } |
88 | __vxge_hw_channel_free(channel); | ||
89 | 156 | ||
90 | exit0: | 157 | static enum vxge_hw_status |
91 | return NULL; | 158 | vxge_hw_vpath_fw_api(struct __vxge_hw_virtualpath *vpath, u32 action, |
159 | u32 fw_memo, u32 offset, u64 *data0, u64 *data1, | ||
160 | u64 *steer_ctrl) | ||
161 | { | ||
162 | struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg; | ||
163 | enum vxge_hw_status status; | ||
164 | u64 val64; | ||
165 | u32 retry = 0, max_retry = 3; | ||
166 | |||
167 | spin_lock(&vpath->lock); | ||
168 | if (!vpath->vp_open) { | ||
169 | spin_unlock(&vpath->lock); | ||
170 | max_retry = 100; | ||
171 | } | ||
172 | |||
173 | writeq(*data0, &vp_reg->rts_access_steer_data0); | ||
174 | writeq(*data1, &vp_reg->rts_access_steer_data1); | ||
175 | wmb(); | ||
176 | |||
177 | val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) | | ||
178 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(fw_memo) | | ||
179 | VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset) | | ||
180 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | | ||
181 | *steer_ctrl; | ||
182 | |||
183 | status = __vxge_hw_pio_mem_write64(val64, | ||
184 | &vp_reg->rts_access_steer_ctrl, | ||
185 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, | ||
186 | VXGE_HW_DEF_DEVICE_POLL_MILLIS); | ||
187 | |||
188 | /* The __vxge_hw_device_register_poll can udelay for a significant | ||
189 | * amount of time, blocking other process from the CPU. If it delays | ||
190 | * for ~5secs, a NMI error can occur. A way around this is to give up | ||
191 | * the processor via msleep, but this is not allowed is under lock. | ||
192 | * So, only allow it to sleep for ~4secs if open. Otherwise, delay for | ||
193 | * 1sec and sleep for 10ms until the firmware operation has completed | ||
194 | * or timed-out. | ||
195 | */ | ||
196 | while ((status != VXGE_HW_OK) && retry++ < max_retry) { | ||
197 | if (!vpath->vp_open) | ||
198 | msleep(20); | ||
199 | status = __vxge_hw_device_register_poll( | ||
200 | &vp_reg->rts_access_steer_ctrl, | ||
201 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, | ||
202 | VXGE_HW_DEF_DEVICE_POLL_MILLIS); | ||
203 | } | ||
204 | |||
205 | if (status != VXGE_HW_OK) | ||
206 | goto out; | ||
207 | |||
208 | val64 = readq(&vp_reg->rts_access_steer_ctrl); | ||
209 | if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { | ||
210 | *data0 = readq(&vp_reg->rts_access_steer_data0); | ||
211 | *data1 = readq(&vp_reg->rts_access_steer_data1); | ||
212 | *steer_ctrl = val64; | ||
213 | } else | ||
214 | status = VXGE_HW_FAIL; | ||
215 | |||
216 | out: | ||
217 | if (vpath->vp_open) | ||
218 | spin_unlock(&vpath->lock); | ||
219 | return status; | ||
220 | } | ||
221 | |||
222 | enum vxge_hw_status | ||
223 | vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major, | ||
224 | u32 *minor, u32 *build) | ||
225 | { | ||
226 | u64 data0 = 0, data1 = 0, steer_ctrl = 0; | ||
227 | struct __vxge_hw_virtualpath *vpath; | ||
228 | enum vxge_hw_status status; | ||
229 | |||
230 | vpath = &hldev->virtual_paths[hldev->first_vp_id]; | ||
231 | |||
232 | status = vxge_hw_vpath_fw_api(vpath, | ||
233 | VXGE_HW_FW_UPGRADE_ACTION, | ||
234 | VXGE_HW_FW_UPGRADE_MEMO, | ||
235 | VXGE_HW_FW_UPGRADE_OFFSET_READ, | ||
236 | &data0, &data1, &steer_ctrl); | ||
237 | if (status != VXGE_HW_OK) | ||
238 | return status; | ||
239 | |||
240 | *major = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0); | ||
241 | *minor = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0); | ||
242 | *build = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0); | ||
243 | |||
244 | return status; | ||
245 | } | ||
246 | |||
247 | enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev) | ||
248 | { | ||
249 | u64 data0 = 0, data1 = 0, steer_ctrl = 0; | ||
250 | struct __vxge_hw_virtualpath *vpath; | ||
251 | enum vxge_hw_status status; | ||
252 | u32 ret; | ||
253 | |||
254 | vpath = &hldev->virtual_paths[hldev->first_vp_id]; | ||
255 | |||
256 | status = vxge_hw_vpath_fw_api(vpath, | ||
257 | VXGE_HW_FW_UPGRADE_ACTION, | ||
258 | VXGE_HW_FW_UPGRADE_MEMO, | ||
259 | VXGE_HW_FW_UPGRADE_OFFSET_COMMIT, | ||
260 | &data0, &data1, &steer_ctrl); | ||
261 | if (status != VXGE_HW_OK) { | ||
262 | vxge_debug_init(VXGE_ERR, "%s: FW upgrade failed", __func__); | ||
263 | goto exit; | ||
264 | } | ||
265 | |||
266 | ret = VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(steer_ctrl) & 0x7F; | ||
267 | if (ret != 1) { | ||
268 | vxge_debug_init(VXGE_ERR, "%s: FW commit failed with error %d", | ||
269 | __func__, ret); | ||
270 | status = VXGE_HW_FAIL; | ||
271 | } | ||
272 | |||
273 | exit: | ||
274 | return status; | ||
275 | } | ||
276 | |||
277 | enum vxge_hw_status | ||
278 | vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *fwdata, int size) | ||
279 | { | ||
280 | u64 data0 = 0, data1 = 0, steer_ctrl = 0; | ||
281 | struct __vxge_hw_virtualpath *vpath; | ||
282 | enum vxge_hw_status status; | ||
283 | int ret_code, sec_code; | ||
284 | |||
285 | vpath = &hldev->virtual_paths[hldev->first_vp_id]; | ||
286 | |||
287 | /* send upgrade start command */ | ||
288 | status = vxge_hw_vpath_fw_api(vpath, | ||
289 | VXGE_HW_FW_UPGRADE_ACTION, | ||
290 | VXGE_HW_FW_UPGRADE_MEMO, | ||
291 | VXGE_HW_FW_UPGRADE_OFFSET_START, | ||
292 | &data0, &data1, &steer_ctrl); | ||
293 | if (status != VXGE_HW_OK) { | ||
294 | vxge_debug_init(VXGE_ERR, " %s: Upgrade start cmd failed", | ||
295 | __func__); | ||
296 | return status; | ||
297 | } | ||
298 | |||
299 | /* Transfer fw image to adapter 16 bytes at a time */ | ||
300 | for (; size > 0; size -= VXGE_HW_FW_UPGRADE_BLK_SIZE) { | ||
301 | steer_ctrl = 0; | ||
302 | |||
303 | /* The next 128bits of fwdata to be loaded onto the adapter */ | ||
304 | data0 = *((u64 *)fwdata); | ||
305 | data1 = *((u64 *)fwdata + 1); | ||
306 | |||
307 | status = vxge_hw_vpath_fw_api(vpath, | ||
308 | VXGE_HW_FW_UPGRADE_ACTION, | ||
309 | VXGE_HW_FW_UPGRADE_MEMO, | ||
310 | VXGE_HW_FW_UPGRADE_OFFSET_SEND, | ||
311 | &data0, &data1, &steer_ctrl); | ||
312 | if (status != VXGE_HW_OK) { | ||
313 | vxge_debug_init(VXGE_ERR, "%s: Upgrade send failed", | ||
314 | __func__); | ||
315 | goto out; | ||
316 | } | ||
317 | |||
318 | ret_code = VXGE_HW_UPGRADE_GET_RET_ERR_CODE(data0); | ||
319 | switch (ret_code) { | ||
320 | case VXGE_HW_FW_UPGRADE_OK: | ||
321 | /* All OK, send next 16 bytes. */ | ||
322 | break; | ||
323 | case VXGE_FW_UPGRADE_BYTES2SKIP: | ||
324 | /* skip bytes in the stream */ | ||
325 | fwdata += (data0 >> 8) & 0xFFFFFFFF; | ||
326 | break; | ||
327 | case VXGE_HW_FW_UPGRADE_DONE: | ||
328 | goto out; | ||
329 | case VXGE_HW_FW_UPGRADE_ERR: | ||
330 | sec_code = VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(data0); | ||
331 | switch (sec_code) { | ||
332 | case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1: | ||
333 | case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7: | ||
334 | printk(KERN_ERR | ||
335 | "corrupted data from .ncf file\n"); | ||
336 | break; | ||
337 | case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3: | ||
338 | case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4: | ||
339 | case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5: | ||
340 | case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6: | ||
341 | case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8: | ||
342 | printk(KERN_ERR "invalid .ncf file\n"); | ||
343 | break; | ||
344 | case VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW: | ||
345 | printk(KERN_ERR "buffer overflow\n"); | ||
346 | break; | ||
347 | case VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH: | ||
348 | printk(KERN_ERR "failed to flash the image\n"); | ||
349 | break; | ||
350 | case VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN: | ||
351 | printk(KERN_ERR | ||
352 | "generic error. Unknown error type\n"); | ||
353 | break; | ||
354 | default: | ||
355 | printk(KERN_ERR "Unknown error of type %d\n", | ||
356 | sec_code); | ||
357 | break; | ||
358 | } | ||
359 | status = VXGE_HW_FAIL; | ||
360 | goto out; | ||
361 | default: | ||
362 | printk(KERN_ERR "Unknown FW error: %d\n", ret_code); | ||
363 | status = VXGE_HW_FAIL; | ||
364 | goto out; | ||
365 | } | ||
366 | /* point to next 16 bytes */ | ||
367 | fwdata += VXGE_HW_FW_UPGRADE_BLK_SIZE; | ||
368 | } | ||
369 | out: | ||
370 | return status; | ||
371 | } | ||
372 | |||
373 | enum vxge_hw_status | ||
374 | vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev, | ||
375 | struct eprom_image *img) | ||
376 | { | ||
377 | u64 data0 = 0, data1 = 0, steer_ctrl = 0; | ||
378 | struct __vxge_hw_virtualpath *vpath; | ||
379 | enum vxge_hw_status status; | ||
380 | int i; | ||
381 | |||
382 | vpath = &hldev->virtual_paths[hldev->first_vp_id]; | ||
383 | |||
384 | for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) { | ||
385 | data0 = VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(i); | ||
386 | data1 = steer_ctrl = 0; | ||
387 | |||
388 | status = vxge_hw_vpath_fw_api(vpath, | ||
389 | VXGE_HW_FW_API_GET_EPROM_REV, | ||
390 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, | ||
391 | 0, &data0, &data1, &steer_ctrl); | ||
392 | if (status != VXGE_HW_OK) | ||
393 | break; | ||
394 | |||
395 | img[i].is_valid = VXGE_HW_GET_EPROM_IMAGE_VALID(data0); | ||
396 | img[i].index = VXGE_HW_GET_EPROM_IMAGE_INDEX(data0); | ||
397 | img[i].type = VXGE_HW_GET_EPROM_IMAGE_TYPE(data0); | ||
398 | img[i].version = VXGE_HW_GET_EPROM_IMAGE_REV(data0); | ||
399 | } | ||
400 | |||
401 | return status; | ||
92 | } | 402 | } |
93 | 403 | ||
94 | /* | 404 | /* |
@@ -96,7 +406,7 @@ exit0: | |||
96 | * This function deallocates memory from the channel and various arrays | 406 | * This function deallocates memory from the channel and various arrays |
97 | * in the channel | 407 | * in the channel |
98 | */ | 408 | */ |
99 | void __vxge_hw_channel_free(struct __vxge_hw_channel *channel) | 409 | static void __vxge_hw_channel_free(struct __vxge_hw_channel *channel) |
100 | { | 410 | { |
101 | kfree(channel->work_arr); | 411 | kfree(channel->work_arr); |
102 | kfree(channel->free_arr); | 412 | kfree(channel->free_arr); |
@@ -110,7 +420,7 @@ void __vxge_hw_channel_free(struct __vxge_hw_channel *channel) | |||
110 | * This function initializes a channel by properly setting the | 420 | * This function initializes a channel by properly setting the |
111 | * various references | 421 | * various references |
112 | */ | 422 | */ |
113 | enum vxge_hw_status | 423 | static enum vxge_hw_status |
114 | __vxge_hw_channel_initialize(struct __vxge_hw_channel *channel) | 424 | __vxge_hw_channel_initialize(struct __vxge_hw_channel *channel) |
115 | { | 425 | { |
116 | u32 i; | 426 | u32 i; |
@@ -145,7 +455,7 @@ __vxge_hw_channel_initialize(struct __vxge_hw_channel *channel) | |||
145 | * __vxge_hw_channel_reset - Resets a channel | 455 | * __vxge_hw_channel_reset - Resets a channel |
146 | * This function resets a channel by properly setting the various references | 456 | * This function resets a channel by properly setting the various references |
147 | */ | 457 | */ |
148 | enum vxge_hw_status | 458 | static enum vxge_hw_status |
149 | __vxge_hw_channel_reset(struct __vxge_hw_channel *channel) | 459 | __vxge_hw_channel_reset(struct __vxge_hw_channel *channel) |
150 | { | 460 | { |
151 | u32 i; | 461 | u32 i; |
@@ -172,8 +482,7 @@ __vxge_hw_channel_reset(struct __vxge_hw_channel *channel) | |||
172 | * Initialize certain PCI/PCI-X configuration registers | 482 | * Initialize certain PCI/PCI-X configuration registers |
173 | * with recommended values. Save config space for future hw resets. | 483 | * with recommended values. Save config space for future hw resets. |
174 | */ | 484 | */ |
175 | void | 485 | static void __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev) |
176 | __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev) | ||
177 | { | 486 | { |
178 | u16 cmd = 0; | 487 | u16 cmd = 0; |
179 | 488 | ||
@@ -185,43 +494,11 @@ __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev) | |||
185 | pci_save_state(hldev->pdev); | 494 | pci_save_state(hldev->pdev); |
186 | } | 495 | } |
187 | 496 | ||
188 | /* | 497 | /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset |
189 | * __vxge_hw_device_register_poll | ||
190 | * Will poll certain register for specified amount of time. | ||
191 | * Will poll until masked bit is not cleared. | ||
192 | */ | ||
193 | enum vxge_hw_status | ||
194 | __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis) | ||
195 | { | ||
196 | u64 val64; | ||
197 | u32 i = 0; | ||
198 | enum vxge_hw_status ret = VXGE_HW_FAIL; | ||
199 | |||
200 | udelay(10); | ||
201 | |||
202 | do { | ||
203 | val64 = readq(reg); | ||
204 | if (!(val64 & mask)) | ||
205 | return VXGE_HW_OK; | ||
206 | udelay(100); | ||
207 | } while (++i <= 9); | ||
208 | |||
209 | i = 0; | ||
210 | do { | ||
211 | val64 = readq(reg); | ||
212 | if (!(val64 & mask)) | ||
213 | return VXGE_HW_OK; | ||
214 | mdelay(1); | ||
215 | } while (++i <= max_millis); | ||
216 | |||
217 | return ret; | ||
218 | } | ||
219 | |||
220 | /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset | ||
221 | * in progress | 498 | * in progress |
222 | * This routine checks the vpath reset in progress register is turned zero | 499 | * This routine checks the vpath reset in progress register is turned zero |
223 | */ | 500 | */ |
224 | enum vxge_hw_status | 501 | static enum vxge_hw_status |
225 | __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog) | 502 | __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog) |
226 | { | 503 | { |
227 | enum vxge_hw_status status; | 504 | enum vxge_hw_status status; |
@@ -232,11 +509,65 @@ __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog) | |||
232 | } | 509 | } |
233 | 510 | ||
234 | /* | 511 | /* |
512 | * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion. | ||
513 | * Set the swapper bits appropriately for the lagacy section. | ||
514 | */ | ||
515 | static enum vxge_hw_status | ||
516 | __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg) | ||
517 | { | ||
518 | u64 val64; | ||
519 | enum vxge_hw_status status = VXGE_HW_OK; | ||
520 | |||
521 | val64 = readq(&legacy_reg->toc_swapper_fb); | ||
522 | |||
523 | wmb(); | ||
524 | |||
525 | switch (val64) { | ||
526 | case VXGE_HW_SWAPPER_INITIAL_VALUE: | ||
527 | return status; | ||
528 | |||
529 | case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED: | ||
530 | writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE, | ||
531 | &legacy_reg->pifm_rd_swap_en); | ||
532 | writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE, | ||
533 | &legacy_reg->pifm_rd_flip_en); | ||
534 | writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE, | ||
535 | &legacy_reg->pifm_wr_swap_en); | ||
536 | writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE, | ||
537 | &legacy_reg->pifm_wr_flip_en); | ||
538 | break; | ||
539 | |||
540 | case VXGE_HW_SWAPPER_BYTE_SWAPPED: | ||
541 | writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE, | ||
542 | &legacy_reg->pifm_rd_swap_en); | ||
543 | writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE, | ||
544 | &legacy_reg->pifm_wr_swap_en); | ||
545 | break; | ||
546 | |||
547 | case VXGE_HW_SWAPPER_BIT_FLIPPED: | ||
548 | writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE, | ||
549 | &legacy_reg->pifm_rd_flip_en); | ||
550 | writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE, | ||
551 | &legacy_reg->pifm_wr_flip_en); | ||
552 | break; | ||
553 | } | ||
554 | |||
555 | wmb(); | ||
556 | |||
557 | val64 = readq(&legacy_reg->toc_swapper_fb); | ||
558 | |||
559 | if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE) | ||
560 | status = VXGE_HW_ERR_SWAPPER_CTRL; | ||
561 | |||
562 | return status; | ||
563 | } | ||
564 | |||
565 | /* | ||
235 | * __vxge_hw_device_toc_get | 566 | * __vxge_hw_device_toc_get |
236 | * This routine sets the swapper and reads the toc pointer and returns the | 567 | * This routine sets the swapper and reads the toc pointer and returns the |
237 | * memory mapped address of the toc | 568 | * memory mapped address of the toc |
238 | */ | 569 | */ |
239 | struct vxge_hw_toc_reg __iomem * | 570 | static struct vxge_hw_toc_reg __iomem * |
240 | __vxge_hw_device_toc_get(void __iomem *bar0) | 571 | __vxge_hw_device_toc_get(void __iomem *bar0) |
241 | { | 572 | { |
242 | u64 val64; | 573 | u64 val64; |
@@ -262,7 +593,7 @@ exit: | |||
262 | * register location pointers in the device object. It waits until the ric is | 593 | * register location pointers in the device object. It waits until the ric is |
263 | * completed initializing registers. | 594 | * completed initializing registers. |
264 | */ | 595 | */ |
265 | enum vxge_hw_status | 596 | static enum vxge_hw_status |
266 | __vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev) | 597 | __vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev) |
267 | { | 598 | { |
268 | u64 val64; | 599 | u64 val64; |
@@ -323,26 +654,6 @@ exit: | |||
323 | } | 654 | } |
324 | 655 | ||
325 | /* | 656 | /* |
326 | * __vxge_hw_device_id_get | ||
327 | * This routine returns sets the device id and revision numbers into the device | ||
328 | * structure | ||
329 | */ | ||
330 | void __vxge_hw_device_id_get(struct __vxge_hw_device *hldev) | ||
331 | { | ||
332 | u64 val64; | ||
333 | |||
334 | val64 = readq(&hldev->common_reg->titan_asic_id); | ||
335 | hldev->device_id = | ||
336 | (u16)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_DEVICE_ID(val64); | ||
337 | |||
338 | hldev->major_revision = | ||
339 | (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MAJOR_REVISION(val64); | ||
340 | |||
341 | hldev->minor_revision = | ||
342 | (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(val64); | ||
343 | } | ||
344 | |||
345 | /* | ||
346 | * __vxge_hw_device_access_rights_get: Get Access Rights of the driver | 657 | * __vxge_hw_device_access_rights_get: Get Access Rights of the driver |
347 | * This routine returns the Access Rights of the driver | 658 | * This routine returns the Access Rights of the driver |
348 | */ | 659 | */ |
@@ -395,10 +706,25 @@ __vxge_hw_device_is_privilaged(u32 host_type, u32 func_id) | |||
395 | } | 706 | } |
396 | 707 | ||
397 | /* | 708 | /* |
709 | * __vxge_hw_vpath_func_id_get - Get the function id of the vpath. | ||
710 | * Returns the function number of the vpath. | ||
711 | */ | ||
712 | static u32 | ||
713 | __vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg) | ||
714 | { | ||
715 | u64 val64; | ||
716 | |||
717 | val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1); | ||
718 | |||
719 | return | ||
720 | (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64); | ||
721 | } | ||
722 | |||
723 | /* | ||
398 | * __vxge_hw_device_host_info_get | 724 | * __vxge_hw_device_host_info_get |
399 | * This routine returns the host type assignments | 725 | * This routine returns the host type assignments |
400 | */ | 726 | */ |
401 | void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev) | 727 | static void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev) |
402 | { | 728 | { |
403 | u64 val64; | 729 | u64 val64; |
404 | u32 i; | 730 | u32 i; |
@@ -411,16 +737,18 @@ void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev) | |||
411 | hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments); | 737 | hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments); |
412 | 738 | ||
413 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | 739 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { |
414 | |||
415 | if (!(hldev->vpath_assignments & vxge_mBIT(i))) | 740 | if (!(hldev->vpath_assignments & vxge_mBIT(i))) |
416 | continue; | 741 | continue; |
417 | 742 | ||
418 | hldev->func_id = | 743 | hldev->func_id = |
419 | __vxge_hw_vpath_func_id_get(i, hldev->vpmgmt_reg[i]); | 744 | __vxge_hw_vpath_func_id_get(hldev->vpmgmt_reg[i]); |
420 | 745 | ||
421 | hldev->access_rights = __vxge_hw_device_access_rights_get( | 746 | hldev->access_rights = __vxge_hw_device_access_rights_get( |
422 | hldev->host_type, hldev->func_id); | 747 | hldev->host_type, hldev->func_id); |
423 | 748 | ||
749 | hldev->virtual_paths[i].vp_open = VXGE_HW_VP_NOT_OPEN; | ||
750 | hldev->virtual_paths[i].vp_reg = hldev->vpath_reg[i]; | ||
751 | |||
424 | hldev->first_vp_id = i; | 752 | hldev->first_vp_id = i; |
425 | break; | 753 | break; |
426 | } | 754 | } |
@@ -461,7 +789,8 @@ __vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev) | |||
461 | * __vxge_hw_device_initialize | 789 | * __vxge_hw_device_initialize |
462 | * Initialize Titan-V hardware. | 790 | * Initialize Titan-V hardware. |
463 | */ | 791 | */ |
464 | enum vxge_hw_status __vxge_hw_device_initialize(struct __vxge_hw_device *hldev) | 792 | static enum vxge_hw_status |
793 | __vxge_hw_device_initialize(struct __vxge_hw_device *hldev) | ||
465 | { | 794 | { |
466 | enum vxge_hw_status status = VXGE_HW_OK; | 795 | enum vxge_hw_status status = VXGE_HW_OK; |
467 | 796 | ||
@@ -477,10 +806,200 @@ exit: | |||
477 | return status; | 806 | return status; |
478 | } | 807 | } |
479 | 808 | ||
809 | /* | ||
810 | * __vxge_hw_vpath_fw_ver_get - Get the fw version | ||
811 | * Returns FW Version | ||
812 | */ | ||
813 | static enum vxge_hw_status | ||
814 | __vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath *vpath, | ||
815 | struct vxge_hw_device_hw_info *hw_info) | ||
816 | { | ||
817 | struct vxge_hw_device_version *fw_version = &hw_info->fw_version; | ||
818 | struct vxge_hw_device_date *fw_date = &hw_info->fw_date; | ||
819 | struct vxge_hw_device_version *flash_version = &hw_info->flash_version; | ||
820 | struct vxge_hw_device_date *flash_date = &hw_info->flash_date; | ||
821 | u64 data0, data1 = 0, steer_ctrl = 0; | ||
822 | enum vxge_hw_status status; | ||
823 | |||
824 | status = vxge_hw_vpath_fw_api(vpath, | ||
825 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY, | ||
826 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, | ||
827 | 0, &data0, &data1, &steer_ctrl); | ||
828 | if (status != VXGE_HW_OK) | ||
829 | goto exit; | ||
830 | |||
831 | fw_date->day = | ||
832 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(data0); | ||
833 | fw_date->month = | ||
834 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(data0); | ||
835 | fw_date->year = | ||
836 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(data0); | ||
837 | |||
838 | snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d", | ||
839 | fw_date->month, fw_date->day, fw_date->year); | ||
840 | |||
841 | fw_version->major = | ||
842 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0); | ||
843 | fw_version->minor = | ||
844 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0); | ||
845 | fw_version->build = | ||
846 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0); | ||
847 | |||
848 | snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d", | ||
849 | fw_version->major, fw_version->minor, fw_version->build); | ||
850 | |||
851 | flash_date->day = | ||
852 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data1); | ||
853 | flash_date->month = | ||
854 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data1); | ||
855 | flash_date->year = | ||
856 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data1); | ||
857 | |||
858 | snprintf(flash_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d", | ||
859 | flash_date->month, flash_date->day, flash_date->year); | ||
860 | |||
861 | flash_version->major = | ||
862 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data1); | ||
863 | flash_version->minor = | ||
864 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data1); | ||
865 | flash_version->build = | ||
866 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data1); | ||
867 | |||
868 | snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d", | ||
869 | flash_version->major, flash_version->minor, | ||
870 | flash_version->build); | ||
871 | |||
872 | exit: | ||
873 | return status; | ||
874 | } | ||
875 | |||
876 | /* | ||
877 | * __vxge_hw_vpath_card_info_get - Get the serial numbers, | ||
878 | * part number and product description. | ||
879 | */ | ||
880 | static enum vxge_hw_status | ||
881 | __vxge_hw_vpath_card_info_get(struct __vxge_hw_virtualpath *vpath, | ||
882 | struct vxge_hw_device_hw_info *hw_info) | ||
883 | { | ||
884 | enum vxge_hw_status status; | ||
885 | u64 data0, data1 = 0, steer_ctrl = 0; | ||
886 | u8 *serial_number = hw_info->serial_number; | ||
887 | u8 *part_number = hw_info->part_number; | ||
888 | u8 *product_desc = hw_info->product_desc; | ||
889 | u32 i, j = 0; | ||
890 | |||
891 | data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER; | ||
892 | |||
893 | status = vxge_hw_vpath_fw_api(vpath, | ||
894 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY, | ||
895 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, | ||
896 | 0, &data0, &data1, &steer_ctrl); | ||
897 | if (status != VXGE_HW_OK) | ||
898 | return status; | ||
899 | |||
900 | ((u64 *)serial_number)[0] = be64_to_cpu(data0); | ||
901 | ((u64 *)serial_number)[1] = be64_to_cpu(data1); | ||
902 | |||
903 | data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER; | ||
904 | data1 = steer_ctrl = 0; | ||
905 | |||
906 | status = vxge_hw_vpath_fw_api(vpath, | ||
907 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY, | ||
908 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, | ||
909 | 0, &data0, &data1, &steer_ctrl); | ||
910 | if (status != VXGE_HW_OK) | ||
911 | return status; | ||
912 | |||
913 | ((u64 *)part_number)[0] = be64_to_cpu(data0); | ||
914 | ((u64 *)part_number)[1] = be64_to_cpu(data1); | ||
915 | |||
916 | for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0; | ||
917 | i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) { | ||
918 | data0 = i; | ||
919 | data1 = steer_ctrl = 0; | ||
920 | |||
921 | status = vxge_hw_vpath_fw_api(vpath, | ||
922 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY, | ||
923 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, | ||
924 | 0, &data0, &data1, &steer_ctrl); | ||
925 | if (status != VXGE_HW_OK) | ||
926 | return status; | ||
927 | |||
928 | ((u64 *)product_desc)[j++] = be64_to_cpu(data0); | ||
929 | ((u64 *)product_desc)[j++] = be64_to_cpu(data1); | ||
930 | } | ||
931 | |||
932 | return status; | ||
933 | } | ||
934 | |||
935 | /* | ||
936 | * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode | ||
937 | * Returns pci function mode | ||
938 | */ | ||
939 | static enum vxge_hw_status | ||
940 | __vxge_hw_vpath_pci_func_mode_get(struct __vxge_hw_virtualpath *vpath, | ||
941 | struct vxge_hw_device_hw_info *hw_info) | ||
942 | { | ||
943 | u64 data0, data1 = 0, steer_ctrl = 0; | ||
944 | enum vxge_hw_status status; | ||
945 | |||
946 | data0 = 0; | ||
947 | |||
948 | status = vxge_hw_vpath_fw_api(vpath, | ||
949 | VXGE_HW_FW_API_GET_FUNC_MODE, | ||
950 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, | ||
951 | 0, &data0, &data1, &steer_ctrl); | ||
952 | if (status != VXGE_HW_OK) | ||
953 | return status; | ||
954 | |||
955 | hw_info->function_mode = VXGE_HW_GET_FUNC_MODE_VAL(data0); | ||
956 | return status; | ||
957 | } | ||
958 | |||
959 | /* | ||
960 | * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath | ||
961 | * from MAC address table. | ||
962 | */ | ||
963 | static enum vxge_hw_status | ||
964 | __vxge_hw_vpath_addr_get(struct __vxge_hw_virtualpath *vpath, | ||
965 | u8 *macaddr, u8 *macaddr_mask) | ||
966 | { | ||
967 | u64 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY, | ||
968 | data0 = 0, data1 = 0, steer_ctrl = 0; | ||
969 | enum vxge_hw_status status; | ||
970 | int i; | ||
971 | |||
972 | do { | ||
973 | status = vxge_hw_vpath_fw_api(vpath, action, | ||
974 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA, | ||
975 | 0, &data0, &data1, &steer_ctrl); | ||
976 | if (status != VXGE_HW_OK) | ||
977 | goto exit; | ||
978 | |||
979 | data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data0); | ||
980 | data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK( | ||
981 | data1); | ||
982 | |||
983 | for (i = ETH_ALEN; i > 0; i--) { | ||
984 | macaddr[i - 1] = (u8) (data0 & 0xFF); | ||
985 | data0 >>= 8; | ||
986 | |||
987 | macaddr_mask[i - 1] = (u8) (data1 & 0xFF); | ||
988 | data1 >>= 8; | ||
989 | } | ||
990 | |||
991 | action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY; | ||
992 | data0 = 0, data1 = 0, steer_ctrl = 0; | ||
993 | |||
994 | } while (!is_valid_ether_addr(macaddr)); | ||
995 | exit: | ||
996 | return status; | ||
997 | } | ||
998 | |||
480 | /** | 999 | /** |
481 | * vxge_hw_device_hw_info_get - Get the hw information | 1000 | * vxge_hw_device_hw_info_get - Get the hw information |
482 | * Returns the vpath mask that has the bits set for each vpath allocated | 1001 | * Returns the vpath mask that has the bits set for each vpath allocated |
483 | * for the driver, FW version information and the first mac addresse for | 1002 | * for the driver, FW version information, and the first mac address for |
484 | * each vpath | 1003 | * each vpath |
485 | */ | 1004 | */ |
486 | enum vxge_hw_status __devinit | 1005 | enum vxge_hw_status __devinit |
@@ -492,9 +1011,9 @@ vxge_hw_device_hw_info_get(void __iomem *bar0, | |||
492 | struct vxge_hw_toc_reg __iomem *toc; | 1011 | struct vxge_hw_toc_reg __iomem *toc; |
493 | struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg; | 1012 | struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg; |
494 | struct vxge_hw_common_reg __iomem *common_reg; | 1013 | struct vxge_hw_common_reg __iomem *common_reg; |
495 | struct vxge_hw_vpath_reg __iomem *vpath_reg; | ||
496 | struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg; | 1014 | struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg; |
497 | enum vxge_hw_status status; | 1015 | enum vxge_hw_status status; |
1016 | struct __vxge_hw_virtualpath vpath; | ||
498 | 1017 | ||
499 | memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info)); | 1018 | memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info)); |
500 | 1019 | ||
@@ -520,7 +1039,6 @@ vxge_hw_device_hw_info_get(void __iomem *bar0, | |||
520 | (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64); | 1039 | (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64); |
521 | 1040 | ||
522 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | 1041 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { |
523 | |||
524 | if (!((hw_info->vpath_mask) & vxge_mBIT(i))) | 1042 | if (!((hw_info->vpath_mask) & vxge_mBIT(i))) |
525 | continue; | 1043 | continue; |
526 | 1044 | ||
@@ -529,7 +1047,7 @@ vxge_hw_device_hw_info_get(void __iomem *bar0, | |||
529 | vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *) | 1047 | vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *) |
530 | (bar0 + val64); | 1048 | (bar0 + val64); |
531 | 1049 | ||
532 | hw_info->func_id = __vxge_hw_vpath_func_id_get(i, vpmgmt_reg); | 1050 | hw_info->func_id = __vxge_hw_vpath_func_id_get(vpmgmt_reg); |
533 | if (__vxge_hw_device_access_rights_get(hw_info->host_type, | 1051 | if (__vxge_hw_device_access_rights_get(hw_info->host_type, |
534 | hw_info->func_id) & | 1052 | hw_info->func_id) & |
535 | VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) { | 1053 | VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) { |
@@ -545,16 +1063,20 @@ vxge_hw_device_hw_info_get(void __iomem *bar0, | |||
545 | 1063 | ||
546 | val64 = readq(&toc->toc_vpath_pointer[i]); | 1064 | val64 = readq(&toc->toc_vpath_pointer[i]); |
547 | 1065 | ||
548 | vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64); | 1066 | spin_lock_init(&vpath.lock); |
1067 | vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *) | ||
1068 | (bar0 + val64); | ||
1069 | vpath.vp_open = VXGE_HW_VP_NOT_OPEN; | ||
549 | 1070 | ||
550 | hw_info->function_mode = | 1071 | status = __vxge_hw_vpath_pci_func_mode_get(&vpath, hw_info); |
551 | __vxge_hw_vpath_pci_func_mode_get(i, vpath_reg); | 1072 | if (status != VXGE_HW_OK) |
1073 | goto exit; | ||
552 | 1074 | ||
553 | status = __vxge_hw_vpath_fw_ver_get(i, vpath_reg, hw_info); | 1075 | status = __vxge_hw_vpath_fw_ver_get(&vpath, hw_info); |
554 | if (status != VXGE_HW_OK) | 1076 | if (status != VXGE_HW_OK) |
555 | goto exit; | 1077 | goto exit; |
556 | 1078 | ||
557 | status = __vxge_hw_vpath_card_info_get(i, vpath_reg, hw_info); | 1079 | status = __vxge_hw_vpath_card_info_get(&vpath, hw_info); |
558 | if (status != VXGE_HW_OK) | 1080 | if (status != VXGE_HW_OK) |
559 | goto exit; | 1081 | goto exit; |
560 | 1082 | ||
@@ -562,14 +1084,15 @@ vxge_hw_device_hw_info_get(void __iomem *bar0, | |||
562 | } | 1084 | } |
563 | 1085 | ||
564 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | 1086 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { |
565 | |||
566 | if (!((hw_info->vpath_mask) & vxge_mBIT(i))) | 1087 | if (!((hw_info->vpath_mask) & vxge_mBIT(i))) |
567 | continue; | 1088 | continue; |
568 | 1089 | ||
569 | val64 = readq(&toc->toc_vpath_pointer[i]); | 1090 | val64 = readq(&toc->toc_vpath_pointer[i]); |
570 | vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64); | 1091 | vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *) |
1092 | (bar0 + val64); | ||
1093 | vpath.vp_open = VXGE_HW_VP_NOT_OPEN; | ||
571 | 1094 | ||
572 | status = __vxge_hw_vpath_addr_get(i, vpath_reg, | 1095 | status = __vxge_hw_vpath_addr_get(&vpath, |
573 | hw_info->mac_addrs[i], | 1096 | hw_info->mac_addrs[i], |
574 | hw_info->mac_addr_masks[i]); | 1097 | hw_info->mac_addr_masks[i]); |
575 | if (status != VXGE_HW_OK) | 1098 | if (status != VXGE_HW_OK) |
@@ -580,6 +1103,218 @@ exit: | |||
580 | } | 1103 | } |
581 | 1104 | ||
582 | /* | 1105 | /* |
1106 | * __vxge_hw_blockpool_destroy - Deallocates the block pool | ||
1107 | */ | ||
1108 | static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool) | ||
1109 | { | ||
1110 | struct __vxge_hw_device *hldev; | ||
1111 | struct list_head *p, *n; | ||
1112 | u16 ret; | ||
1113 | |||
1114 | if (blockpool == NULL) { | ||
1115 | ret = 1; | ||
1116 | goto exit; | ||
1117 | } | ||
1118 | |||
1119 | hldev = blockpool->hldev; | ||
1120 | |||
1121 | list_for_each_safe(p, n, &blockpool->free_block_list) { | ||
1122 | pci_unmap_single(hldev->pdev, | ||
1123 | ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, | ||
1124 | ((struct __vxge_hw_blockpool_entry *)p)->length, | ||
1125 | PCI_DMA_BIDIRECTIONAL); | ||
1126 | |||
1127 | vxge_os_dma_free(hldev->pdev, | ||
1128 | ((struct __vxge_hw_blockpool_entry *)p)->memblock, | ||
1129 | &((struct __vxge_hw_blockpool_entry *)p)->acc_handle); | ||
1130 | |||
1131 | list_del(&((struct __vxge_hw_blockpool_entry *)p)->item); | ||
1132 | kfree(p); | ||
1133 | blockpool->pool_size--; | ||
1134 | } | ||
1135 | |||
1136 | list_for_each_safe(p, n, &blockpool->free_entry_list) { | ||
1137 | list_del(&((struct __vxge_hw_blockpool_entry *)p)->item); | ||
1138 | kfree((void *)p); | ||
1139 | } | ||
1140 | ret = 0; | ||
1141 | exit: | ||
1142 | return; | ||
1143 | } | ||
1144 | |||
1145 | /* | ||
1146 | * __vxge_hw_blockpool_create - Create block pool | ||
1147 | */ | ||
1148 | static enum vxge_hw_status | ||
1149 | __vxge_hw_blockpool_create(struct __vxge_hw_device *hldev, | ||
1150 | struct __vxge_hw_blockpool *blockpool, | ||
1151 | u32 pool_size, | ||
1152 | u32 pool_max) | ||
1153 | { | ||
1154 | u32 i; | ||
1155 | struct __vxge_hw_blockpool_entry *entry = NULL; | ||
1156 | void *memblock; | ||
1157 | dma_addr_t dma_addr; | ||
1158 | struct pci_dev *dma_handle; | ||
1159 | struct pci_dev *acc_handle; | ||
1160 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1161 | |||
1162 | if (blockpool == NULL) { | ||
1163 | status = VXGE_HW_FAIL; | ||
1164 | goto blockpool_create_exit; | ||
1165 | } | ||
1166 | |||
1167 | blockpool->hldev = hldev; | ||
1168 | blockpool->block_size = VXGE_HW_BLOCK_SIZE; | ||
1169 | blockpool->pool_size = 0; | ||
1170 | blockpool->pool_max = pool_max; | ||
1171 | blockpool->req_out = 0; | ||
1172 | |||
1173 | INIT_LIST_HEAD(&blockpool->free_block_list); | ||
1174 | INIT_LIST_HEAD(&blockpool->free_entry_list); | ||
1175 | |||
1176 | for (i = 0; i < pool_size + pool_max; i++) { | ||
1177 | entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry), | ||
1178 | GFP_KERNEL); | ||
1179 | if (entry == NULL) { | ||
1180 | __vxge_hw_blockpool_destroy(blockpool); | ||
1181 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
1182 | goto blockpool_create_exit; | ||
1183 | } | ||
1184 | list_add(&entry->item, &blockpool->free_entry_list); | ||
1185 | } | ||
1186 | |||
1187 | for (i = 0; i < pool_size; i++) { | ||
1188 | memblock = vxge_os_dma_malloc( | ||
1189 | hldev->pdev, | ||
1190 | VXGE_HW_BLOCK_SIZE, | ||
1191 | &dma_handle, | ||
1192 | &acc_handle); | ||
1193 | if (memblock == NULL) { | ||
1194 | __vxge_hw_blockpool_destroy(blockpool); | ||
1195 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
1196 | goto blockpool_create_exit; | ||
1197 | } | ||
1198 | |||
1199 | dma_addr = pci_map_single(hldev->pdev, memblock, | ||
1200 | VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
1201 | if (unlikely(pci_dma_mapping_error(hldev->pdev, | ||
1202 | dma_addr))) { | ||
1203 | vxge_os_dma_free(hldev->pdev, memblock, &acc_handle); | ||
1204 | __vxge_hw_blockpool_destroy(blockpool); | ||
1205 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
1206 | goto blockpool_create_exit; | ||
1207 | } | ||
1208 | |||
1209 | if (!list_empty(&blockpool->free_entry_list)) | ||
1210 | entry = (struct __vxge_hw_blockpool_entry *) | ||
1211 | list_first_entry(&blockpool->free_entry_list, | ||
1212 | struct __vxge_hw_blockpool_entry, | ||
1213 | item); | ||
1214 | |||
1215 | if (entry == NULL) | ||
1216 | entry = | ||
1217 | kzalloc(sizeof(struct __vxge_hw_blockpool_entry), | ||
1218 | GFP_KERNEL); | ||
1219 | if (entry != NULL) { | ||
1220 | list_del(&entry->item); | ||
1221 | entry->length = VXGE_HW_BLOCK_SIZE; | ||
1222 | entry->memblock = memblock; | ||
1223 | entry->dma_addr = dma_addr; | ||
1224 | entry->acc_handle = acc_handle; | ||
1225 | entry->dma_handle = dma_handle; | ||
1226 | list_add(&entry->item, | ||
1227 | &blockpool->free_block_list); | ||
1228 | blockpool->pool_size++; | ||
1229 | } else { | ||
1230 | __vxge_hw_blockpool_destroy(blockpool); | ||
1231 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
1232 | goto blockpool_create_exit; | ||
1233 | } | ||
1234 | } | ||
1235 | |||
1236 | blockpool_create_exit: | ||
1237 | return status; | ||
1238 | } | ||
1239 | |||
1240 | /* | ||
1241 | * __vxge_hw_device_fifo_config_check - Check fifo configuration. | ||
1242 | * Check the fifo configuration | ||
1243 | */ | ||
1244 | static enum vxge_hw_status | ||
1245 | __vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config) | ||
1246 | { | ||
1247 | if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) || | ||
1248 | (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS)) | ||
1249 | return VXGE_HW_BADCFG_FIFO_BLOCKS; | ||
1250 | |||
1251 | return VXGE_HW_OK; | ||
1252 | } | ||
1253 | |||
1254 | /* | ||
1255 | * __vxge_hw_device_vpath_config_check - Check vpath configuration. | ||
1256 | * Check the vpath configuration | ||
1257 | */ | ||
1258 | static enum vxge_hw_status | ||
1259 | __vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config) | ||
1260 | { | ||
1261 | enum vxge_hw_status status; | ||
1262 | |||
1263 | if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) || | ||
1264 | (vp_config->min_bandwidth > VXGE_HW_VPATH_BANDWIDTH_MAX)) | ||
1265 | return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH; | ||
1266 | |||
1267 | status = __vxge_hw_device_fifo_config_check(&vp_config->fifo); | ||
1268 | if (status != VXGE_HW_OK) | ||
1269 | return status; | ||
1270 | |||
1271 | if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) && | ||
1272 | ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) || | ||
1273 | (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU))) | ||
1274 | return VXGE_HW_BADCFG_VPATH_MTU; | ||
1275 | |||
1276 | if ((vp_config->rpa_strip_vlan_tag != | ||
1277 | VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) && | ||
1278 | (vp_config->rpa_strip_vlan_tag != | ||
1279 | VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) && | ||
1280 | (vp_config->rpa_strip_vlan_tag != | ||
1281 | VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE)) | ||
1282 | return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG; | ||
1283 | |||
1284 | return VXGE_HW_OK; | ||
1285 | } | ||
1286 | |||
1287 | /* | ||
1288 | * __vxge_hw_device_config_check - Check device configuration. | ||
1289 | * Check the device configuration | ||
1290 | */ | ||
1291 | static enum vxge_hw_status | ||
1292 | __vxge_hw_device_config_check(struct vxge_hw_device_config *new_config) | ||
1293 | { | ||
1294 | u32 i; | ||
1295 | enum vxge_hw_status status; | ||
1296 | |||
1297 | if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) && | ||
1298 | (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) && | ||
1299 | (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) && | ||
1300 | (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF)) | ||
1301 | return VXGE_HW_BADCFG_INTR_MODE; | ||
1302 | |||
1303 | if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) && | ||
1304 | (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE)) | ||
1305 | return VXGE_HW_BADCFG_RTS_MAC_EN; | ||
1306 | |||
1307 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | ||
1308 | status = __vxge_hw_device_vpath_config_check( | ||
1309 | &new_config->vp_config[i]); | ||
1310 | if (status != VXGE_HW_OK) | ||
1311 | return status; | ||
1312 | } | ||
1313 | |||
1314 | return VXGE_HW_OK; | ||
1315 | } | ||
1316 | |||
1317 | /* | ||
583 | * vxge_hw_device_initialize - Initialize Titan device. | 1318 | * vxge_hw_device_initialize - Initialize Titan device. |
584 | * Initialize Titan device. Note that all the arguments of this public API | 1319 | * Initialize Titan device. Note that all the arguments of this public API |
585 | * are 'IN', including @hldev. Driver cooperates with | 1320 | * are 'IN', including @hldev. Driver cooperates with |
@@ -603,14 +1338,12 @@ vxge_hw_device_initialize( | |||
603 | if (status != VXGE_HW_OK) | 1338 | if (status != VXGE_HW_OK) |
604 | goto exit; | 1339 | goto exit; |
605 | 1340 | ||
606 | hldev = (struct __vxge_hw_device *) | 1341 | hldev = vzalloc(sizeof(struct __vxge_hw_device)); |
607 | vmalloc(sizeof(struct __vxge_hw_device)); | ||
608 | if (hldev == NULL) { | 1342 | if (hldev == NULL) { |
609 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | 1343 | status = VXGE_HW_ERR_OUT_OF_MEMORY; |
610 | goto exit; | 1344 | goto exit; |
611 | } | 1345 | } |
612 | 1346 | ||
613 | memset(hldev, 0, sizeof(struct __vxge_hw_device)); | ||
614 | hldev->magic = VXGE_HW_DEVICE_MAGIC; | 1347 | hldev->magic = VXGE_HW_DEVICE_MAGIC; |
615 | 1348 | ||
616 | vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL); | 1349 | vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL); |
@@ -633,7 +1366,6 @@ vxge_hw_device_initialize( | |||
633 | vfree(hldev); | 1366 | vfree(hldev); |
634 | goto exit; | 1367 | goto exit; |
635 | } | 1368 | } |
636 | __vxge_hw_device_id_get(hldev); | ||
637 | 1369 | ||
638 | __vxge_hw_device_host_info_get(hldev); | 1370 | __vxge_hw_device_host_info_get(hldev); |
639 | 1371 | ||
@@ -641,7 +1373,6 @@ vxge_hw_device_initialize( | |||
641 | nblocks++; | 1373 | nblocks++; |
642 | 1374 | ||
643 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | 1375 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { |
644 | |||
645 | if (!(hldev->vpath_assignments & vxge_mBIT(i))) | 1376 | if (!(hldev->vpath_assignments & vxge_mBIT(i))) |
646 | continue; | 1377 | continue; |
647 | 1378 | ||
@@ -666,7 +1397,6 @@ vxge_hw_device_initialize( | |||
666 | } | 1397 | } |
667 | 1398 | ||
668 | status = __vxge_hw_device_initialize(hldev); | 1399 | status = __vxge_hw_device_initialize(hldev); |
669 | |||
670 | if (status != VXGE_HW_OK) { | 1400 | if (status != VXGE_HW_OK) { |
671 | vxge_hw_device_terminate(hldev); | 1401 | vxge_hw_device_terminate(hldev); |
672 | goto exit; | 1402 | goto exit; |
@@ -692,6 +1422,242 @@ vxge_hw_device_terminate(struct __vxge_hw_device *hldev) | |||
692 | } | 1422 | } |
693 | 1423 | ||
694 | /* | 1424 | /* |
1425 | * __vxge_hw_vpath_stats_access - Get the statistics from the given location | ||
1426 | * and offset and perform an operation | ||
1427 | */ | ||
1428 | static enum vxge_hw_status | ||
1429 | __vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath, | ||
1430 | u32 operation, u32 offset, u64 *stat) | ||
1431 | { | ||
1432 | u64 val64; | ||
1433 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1434 | struct vxge_hw_vpath_reg __iomem *vp_reg; | ||
1435 | |||
1436 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
1437 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
1438 | goto vpath_stats_access_exit; | ||
1439 | } | ||
1440 | |||
1441 | vp_reg = vpath->vp_reg; | ||
1442 | |||
1443 | val64 = VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) | | ||
1444 | VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE | | ||
1445 | VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset); | ||
1446 | |||
1447 | status = __vxge_hw_pio_mem_write64(val64, | ||
1448 | &vp_reg->xmac_stats_access_cmd, | ||
1449 | VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE, | ||
1450 | vpath->hldev->config.device_poll_millis); | ||
1451 | if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ)) | ||
1452 | *stat = readq(&vp_reg->xmac_stats_access_data); | ||
1453 | else | ||
1454 | *stat = 0; | ||
1455 | |||
1456 | vpath_stats_access_exit: | ||
1457 | return status; | ||
1458 | } | ||
1459 | |||
1460 | /* | ||
1461 | * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath | ||
1462 | */ | ||
1463 | static enum vxge_hw_status | ||
1464 | __vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath, | ||
1465 | struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats) | ||
1466 | { | ||
1467 | u64 *val64; | ||
1468 | int i; | ||
1469 | u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET; | ||
1470 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1471 | |||
1472 | val64 = (u64 *)vpath_tx_stats; | ||
1473 | |||
1474 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
1475 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
1476 | goto exit; | ||
1477 | } | ||
1478 | |||
1479 | for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) { | ||
1480 | status = __vxge_hw_vpath_stats_access(vpath, | ||
1481 | VXGE_HW_STATS_OP_READ, | ||
1482 | offset, val64); | ||
1483 | if (status != VXGE_HW_OK) | ||
1484 | goto exit; | ||
1485 | offset++; | ||
1486 | val64++; | ||
1487 | } | ||
1488 | exit: | ||
1489 | return status; | ||
1490 | } | ||
1491 | |||
1492 | /* | ||
1493 | * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath | ||
1494 | */ | ||
1495 | static enum vxge_hw_status | ||
1496 | __vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath, | ||
1497 | struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats) | ||
1498 | { | ||
1499 | u64 *val64; | ||
1500 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1501 | int i; | ||
1502 | u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET; | ||
1503 | val64 = (u64 *) vpath_rx_stats; | ||
1504 | |||
1505 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
1506 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
1507 | goto exit; | ||
1508 | } | ||
1509 | for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) { | ||
1510 | status = __vxge_hw_vpath_stats_access(vpath, | ||
1511 | VXGE_HW_STATS_OP_READ, | ||
1512 | offset >> 3, val64); | ||
1513 | if (status != VXGE_HW_OK) | ||
1514 | goto exit; | ||
1515 | |||
1516 | offset += 8; | ||
1517 | val64++; | ||
1518 | } | ||
1519 | exit: | ||
1520 | return status; | ||
1521 | } | ||
1522 | |||
1523 | /* | ||
1524 | * __vxge_hw_vpath_stats_get - Get the vpath hw statistics. | ||
1525 | */ | ||
1526 | static enum vxge_hw_status | ||
1527 | __vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath, | ||
1528 | struct vxge_hw_vpath_stats_hw_info *hw_stats) | ||
1529 | { | ||
1530 | u64 val64; | ||
1531 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1532 | struct vxge_hw_vpath_reg __iomem *vp_reg; | ||
1533 | |||
1534 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
1535 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
1536 | goto exit; | ||
1537 | } | ||
1538 | vp_reg = vpath->vp_reg; | ||
1539 | |||
1540 | val64 = readq(&vp_reg->vpath_debug_stats0); | ||
1541 | hw_stats->ini_num_mwr_sent = | ||
1542 | (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64); | ||
1543 | |||
1544 | val64 = readq(&vp_reg->vpath_debug_stats1); | ||
1545 | hw_stats->ini_num_mrd_sent = | ||
1546 | (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64); | ||
1547 | |||
1548 | val64 = readq(&vp_reg->vpath_debug_stats2); | ||
1549 | hw_stats->ini_num_cpl_rcvd = | ||
1550 | (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64); | ||
1551 | |||
1552 | val64 = readq(&vp_reg->vpath_debug_stats3); | ||
1553 | hw_stats->ini_num_mwr_byte_sent = | ||
1554 | VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64); | ||
1555 | |||
1556 | val64 = readq(&vp_reg->vpath_debug_stats4); | ||
1557 | hw_stats->ini_num_cpl_byte_rcvd = | ||
1558 | VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64); | ||
1559 | |||
1560 | val64 = readq(&vp_reg->vpath_debug_stats5); | ||
1561 | hw_stats->wrcrdtarb_xoff = | ||
1562 | (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64); | ||
1563 | |||
1564 | val64 = readq(&vp_reg->vpath_debug_stats6); | ||
1565 | hw_stats->rdcrdtarb_xoff = | ||
1566 | (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64); | ||
1567 | |||
1568 | val64 = readq(&vp_reg->vpath_genstats_count01); | ||
1569 | hw_stats->vpath_genstats_count0 = | ||
1570 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0( | ||
1571 | val64); | ||
1572 | |||
1573 | val64 = readq(&vp_reg->vpath_genstats_count01); | ||
1574 | hw_stats->vpath_genstats_count1 = | ||
1575 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1( | ||
1576 | val64); | ||
1577 | |||
1578 | val64 = readq(&vp_reg->vpath_genstats_count23); | ||
1579 | hw_stats->vpath_genstats_count2 = | ||
1580 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2( | ||
1581 | val64); | ||
1582 | |||
1583 | val64 = readq(&vp_reg->vpath_genstats_count01); | ||
1584 | hw_stats->vpath_genstats_count3 = | ||
1585 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3( | ||
1586 | val64); | ||
1587 | |||
1588 | val64 = readq(&vp_reg->vpath_genstats_count4); | ||
1589 | hw_stats->vpath_genstats_count4 = | ||
1590 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4( | ||
1591 | val64); | ||
1592 | |||
1593 | val64 = readq(&vp_reg->vpath_genstats_count5); | ||
1594 | hw_stats->vpath_genstats_count5 = | ||
1595 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5( | ||
1596 | val64); | ||
1597 | |||
1598 | status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats); | ||
1599 | if (status != VXGE_HW_OK) | ||
1600 | goto exit; | ||
1601 | |||
1602 | status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats); | ||
1603 | if (status != VXGE_HW_OK) | ||
1604 | goto exit; | ||
1605 | |||
1606 | VXGE_HW_VPATH_STATS_PIO_READ( | ||
1607 | VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET); | ||
1608 | |||
1609 | hw_stats->prog_event_vnum0 = | ||
1610 | (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64); | ||
1611 | |||
1612 | hw_stats->prog_event_vnum1 = | ||
1613 | (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64); | ||
1614 | |||
1615 | VXGE_HW_VPATH_STATS_PIO_READ( | ||
1616 | VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET); | ||
1617 | |||
1618 | hw_stats->prog_event_vnum2 = | ||
1619 | (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64); | ||
1620 | |||
1621 | hw_stats->prog_event_vnum3 = | ||
1622 | (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64); | ||
1623 | |||
1624 | val64 = readq(&vp_reg->rx_multi_cast_stats); | ||
1625 | hw_stats->rx_multi_cast_frame_discard = | ||
1626 | (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64); | ||
1627 | |||
1628 | val64 = readq(&vp_reg->rx_frm_transferred); | ||
1629 | hw_stats->rx_frm_transferred = | ||
1630 | (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64); | ||
1631 | |||
1632 | val64 = readq(&vp_reg->rxd_returned); | ||
1633 | hw_stats->rxd_returned = | ||
1634 | (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64); | ||
1635 | |||
1636 | val64 = readq(&vp_reg->dbg_stats_rx_mpa); | ||
1637 | hw_stats->rx_mpa_len_fail_frms = | ||
1638 | (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64); | ||
1639 | hw_stats->rx_mpa_mrk_fail_frms = | ||
1640 | (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64); | ||
1641 | hw_stats->rx_mpa_crc_fail_frms = | ||
1642 | (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64); | ||
1643 | |||
1644 | val64 = readq(&vp_reg->dbg_stats_rx_fau); | ||
1645 | hw_stats->rx_permitted_frms = | ||
1646 | (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64); | ||
1647 | hw_stats->rx_vp_reset_discarded_frms = | ||
1648 | (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64); | ||
1649 | hw_stats->rx_wol_frms = | ||
1650 | (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64); | ||
1651 | |||
1652 | val64 = readq(&vp_reg->tx_vp_reset_discarded_frms); | ||
1653 | hw_stats->tx_vp_reset_discarded_frms = | ||
1654 | (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS( | ||
1655 | val64); | ||
1656 | exit: | ||
1657 | return status; | ||
1658 | } | ||
1659 | |||
1660 | /* | ||
695 | * vxge_hw_device_stats_get - Get the device hw statistics. | 1661 | * vxge_hw_device_stats_get - Get the device hw statistics. |
696 | * Returns the vpath h/w stats for the device. | 1662 | * Returns the vpath h/w stats for the device. |
697 | */ | 1663 | */ |
@@ -703,7 +1669,6 @@ vxge_hw_device_stats_get(struct __vxge_hw_device *hldev, | |||
703 | enum vxge_hw_status status = VXGE_HW_OK; | 1669 | enum vxge_hw_status status = VXGE_HW_OK; |
704 | 1670 | ||
705 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | 1671 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { |
706 | |||
707 | if (!(hldev->vpaths_deployed & vxge_mBIT(i)) || | 1672 | if (!(hldev->vpaths_deployed & vxge_mBIT(i)) || |
708 | (hldev->virtual_paths[i].vp_open == | 1673 | (hldev->virtual_paths[i].vp_open == |
709 | VXGE_HW_VP_NOT_OPEN)) | 1674 | VXGE_HW_VP_NOT_OPEN)) |
@@ -779,7 +1744,7 @@ exit: | |||
779 | * vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port | 1744 | * vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port |
780 | * Get the Statistics on aggregate port | 1745 | * Get the Statistics on aggregate port |
781 | */ | 1746 | */ |
782 | enum vxge_hw_status | 1747 | static enum vxge_hw_status |
783 | vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port, | 1748 | vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port, |
784 | struct vxge_hw_xmac_aggr_stats *aggr_stats) | 1749 | struct vxge_hw_xmac_aggr_stats *aggr_stats) |
785 | { | 1750 | { |
@@ -814,7 +1779,7 @@ exit: | |||
814 | * vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port | 1779 | * vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port |
815 | * Get the Statistics on port | 1780 | * Get the Statistics on port |
816 | */ | 1781 | */ |
817 | enum vxge_hw_status | 1782 | static enum vxge_hw_status |
818 | vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port, | 1783 | vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port, |
819 | struct vxge_hw_xmac_port_stats *port_stats) | 1784 | struct vxge_hw_xmac_port_stats *port_stats) |
820 | { | 1785 | { |
@@ -858,7 +1823,6 @@ vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *hldev, | |||
858 | 1823 | ||
859 | status = vxge_hw_device_xmac_aggr_stats_get(hldev, | 1824 | status = vxge_hw_device_xmac_aggr_stats_get(hldev, |
860 | 0, &xmac_stats->aggr_stats[0]); | 1825 | 0, &xmac_stats->aggr_stats[0]); |
861 | |||
862 | if (status != VXGE_HW_OK) | 1826 | if (status != VXGE_HW_OK) |
863 | goto exit; | 1827 | goto exit; |
864 | 1828 | ||
@@ -952,20 +1916,6 @@ u32 vxge_hw_device_trace_level_get(struct __vxge_hw_device *hldev) | |||
952 | return 0; | 1916 | return 0; |
953 | #endif | 1917 | #endif |
954 | } | 1918 | } |
955 | /* | ||
956 | * vxge_hw_device_debug_mask_get - Get the debug mask | ||
957 | * This routine returns the current debug mask set | ||
958 | */ | ||
959 | u32 vxge_hw_device_debug_mask_get(struct __vxge_hw_device *hldev) | ||
960 | { | ||
961 | #if defined(VXGE_DEBUG_TRACE_MASK) || defined(VXGE_DEBUG_ERR_MASK) | ||
962 | if (hldev == NULL) | ||
963 | return 0; | ||
964 | return hldev->debug_module_mask; | ||
965 | #else | ||
966 | return 0; | ||
967 | #endif | ||
968 | } | ||
969 | 1919 | ||
970 | /* | 1920 | /* |
971 | * vxge_hw_getpause_data -Pause frame frame generation and reception. | 1921 | * vxge_hw_getpause_data -Pause frame frame generation and reception. |
@@ -1006,7 +1956,6 @@ exit: | |||
1006 | * It can be used to set or reset Pause frame generation or reception | 1956 | * It can be used to set or reset Pause frame generation or reception |
1007 | * support of the NIC. | 1957 | * support of the NIC. |
1008 | */ | 1958 | */ |
1009 | |||
1010 | enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev, | 1959 | enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev, |
1011 | u32 port, u32 tx, u32 rx) | 1960 | u32 port, u32 tx, u32 rx) |
1012 | { | 1961 | { |
@@ -1090,7 +2039,7 @@ __vxge_hw_ring_block_next_pointer_set(u8 *block, dma_addr_t dma_next) | |||
1090 | * first block | 2039 | * first block |
1091 | * Returns the dma address of the first RxD block | 2040 | * Returns the dma address of the first RxD block |
1092 | */ | 2041 | */ |
1093 | u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring) | 2042 | static u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring) |
1094 | { | 2043 | { |
1095 | struct vxge_hw_mempool_dma *dma_object; | 2044 | struct vxge_hw_mempool_dma *dma_object; |
1096 | 2045 | ||
@@ -1248,197 +2197,366 @@ exit: | |||
1248 | } | 2197 | } |
1249 | 2198 | ||
1250 | /* | 2199 | /* |
1251 | * __vxge_hw_ring_create - Create a Ring | 2200 | * __vxge_hw_channel_allocate - Allocate memory for channel |
1252 | * This function creates Ring and initializes it. | 2201 | * This function allocates required memory for the channel and various arrays |
1253 | * | 2202 | * in the channel |
1254 | */ | 2203 | */ |
1255 | enum vxge_hw_status | 2204 | static struct __vxge_hw_channel * |
1256 | __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp, | 2205 | __vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph, |
1257 | struct vxge_hw_ring_attr *attr) | 2206 | enum __vxge_hw_channel_type type, |
2207 | u32 length, u32 per_dtr_space, | ||
2208 | void *userdata) | ||
1258 | { | 2209 | { |
1259 | enum vxge_hw_status status = VXGE_HW_OK; | 2210 | struct __vxge_hw_channel *channel; |
1260 | struct __vxge_hw_ring *ring; | ||
1261 | u32 ring_length; | ||
1262 | struct vxge_hw_ring_config *config; | ||
1263 | struct __vxge_hw_device *hldev; | 2211 | struct __vxge_hw_device *hldev; |
2212 | int size = 0; | ||
1264 | u32 vp_id; | 2213 | u32 vp_id; |
1265 | struct vxge_hw_mempool_cbs ring_mp_callback; | ||
1266 | 2214 | ||
1267 | if ((vp == NULL) || (attr == NULL)) { | 2215 | hldev = vph->vpath->hldev; |
2216 | vp_id = vph->vpath->vp_id; | ||
2217 | |||
2218 | switch (type) { | ||
2219 | case VXGE_HW_CHANNEL_TYPE_FIFO: | ||
2220 | size = sizeof(struct __vxge_hw_fifo); | ||
2221 | break; | ||
2222 | case VXGE_HW_CHANNEL_TYPE_RING: | ||
2223 | size = sizeof(struct __vxge_hw_ring); | ||
2224 | break; | ||
2225 | default: | ||
2226 | break; | ||
2227 | } | ||
2228 | |||
2229 | channel = kzalloc(size, GFP_KERNEL); | ||
2230 | if (channel == NULL) | ||
2231 | goto exit0; | ||
2232 | INIT_LIST_HEAD(&channel->item); | ||
2233 | |||
2234 | channel->common_reg = hldev->common_reg; | ||
2235 | channel->first_vp_id = hldev->first_vp_id; | ||
2236 | channel->type = type; | ||
2237 | channel->devh = hldev; | ||
2238 | channel->vph = vph; | ||
2239 | channel->userdata = userdata; | ||
2240 | channel->per_dtr_space = per_dtr_space; | ||
2241 | channel->length = length; | ||
2242 | channel->vp_id = vp_id; | ||
2243 | |||
2244 | channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); | ||
2245 | if (channel->work_arr == NULL) | ||
2246 | goto exit1; | ||
2247 | |||
2248 | channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); | ||
2249 | if (channel->free_arr == NULL) | ||
2250 | goto exit1; | ||
2251 | channel->free_ptr = length; | ||
2252 | |||
2253 | channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); | ||
2254 | if (channel->reserve_arr == NULL) | ||
2255 | goto exit1; | ||
2256 | channel->reserve_ptr = length; | ||
2257 | channel->reserve_top = 0; | ||
2258 | |||
2259 | channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); | ||
2260 | if (channel->orig_arr == NULL) | ||
2261 | goto exit1; | ||
2262 | |||
2263 | return channel; | ||
2264 | exit1: | ||
2265 | __vxge_hw_channel_free(channel); | ||
2266 | |||
2267 | exit0: | ||
2268 | return NULL; | ||
2269 | } | ||
2270 | |||
2271 | /* | ||
2272 | * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async | ||
2273 | * Adds a block to block pool | ||
2274 | */ | ||
2275 | static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh, | ||
2276 | void *block_addr, | ||
2277 | u32 length, | ||
2278 | struct pci_dev *dma_h, | ||
2279 | struct pci_dev *acc_handle) | ||
2280 | { | ||
2281 | struct __vxge_hw_blockpool *blockpool; | ||
2282 | struct __vxge_hw_blockpool_entry *entry = NULL; | ||
2283 | dma_addr_t dma_addr; | ||
2284 | enum vxge_hw_status status = VXGE_HW_OK; | ||
2285 | u32 req_out; | ||
2286 | |||
2287 | blockpool = &devh->block_pool; | ||
2288 | |||
2289 | if (block_addr == NULL) { | ||
2290 | blockpool->req_out--; | ||
1268 | status = VXGE_HW_FAIL; | 2291 | status = VXGE_HW_FAIL; |
1269 | goto exit; | 2292 | goto exit; |
1270 | } | 2293 | } |
1271 | 2294 | ||
1272 | hldev = vp->vpath->hldev; | 2295 | dma_addr = pci_map_single(devh->pdev, block_addr, length, |
1273 | vp_id = vp->vpath->vp_id; | 2296 | PCI_DMA_BIDIRECTIONAL); |
1274 | 2297 | ||
1275 | config = &hldev->config.vp_config[vp_id].ring; | 2298 | if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) { |
2299 | vxge_os_dma_free(devh->pdev, block_addr, &acc_handle); | ||
2300 | blockpool->req_out--; | ||
2301 | status = VXGE_HW_FAIL; | ||
2302 | goto exit; | ||
2303 | } | ||
1276 | 2304 | ||
1277 | ring_length = config->ring_blocks * | 2305 | if (!list_empty(&blockpool->free_entry_list)) |
1278 | vxge_hw_ring_rxds_per_block_get(config->buffer_mode); | 2306 | entry = (struct __vxge_hw_blockpool_entry *) |
2307 | list_first_entry(&blockpool->free_entry_list, | ||
2308 | struct __vxge_hw_blockpool_entry, | ||
2309 | item); | ||
1279 | 2310 | ||
1280 | ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp, | 2311 | if (entry == NULL) |
1281 | VXGE_HW_CHANNEL_TYPE_RING, | 2312 | entry = vmalloc(sizeof(struct __vxge_hw_blockpool_entry)); |
1282 | ring_length, | 2313 | else |
1283 | attr->per_rxd_space, | 2314 | list_del(&entry->item); |
1284 | attr->userdata); | ||
1285 | 2315 | ||
1286 | if (ring == NULL) { | 2316 | if (entry != NULL) { |
2317 | entry->length = length; | ||
2318 | entry->memblock = block_addr; | ||
2319 | entry->dma_addr = dma_addr; | ||
2320 | entry->acc_handle = acc_handle; | ||
2321 | entry->dma_handle = dma_h; | ||
2322 | list_add(&entry->item, &blockpool->free_block_list); | ||
2323 | blockpool->pool_size++; | ||
2324 | status = VXGE_HW_OK; | ||
2325 | } else | ||
1287 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | 2326 | status = VXGE_HW_ERR_OUT_OF_MEMORY; |
1288 | goto exit; | ||
1289 | } | ||
1290 | 2327 | ||
1291 | vp->vpath->ringh = ring; | 2328 | blockpool->req_out--; |
1292 | ring->vp_id = vp_id; | ||
1293 | ring->vp_reg = vp->vpath->vp_reg; | ||
1294 | ring->common_reg = hldev->common_reg; | ||
1295 | ring->stats = &vp->vpath->sw_stats->ring_stats; | ||
1296 | ring->config = config; | ||
1297 | ring->callback = attr->callback; | ||
1298 | ring->rxd_init = attr->rxd_init; | ||
1299 | ring->rxd_term = attr->rxd_term; | ||
1300 | ring->buffer_mode = config->buffer_mode; | ||
1301 | ring->rxds_limit = config->rxds_limit; | ||
1302 | 2329 | ||
1303 | ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode); | 2330 | req_out = blockpool->req_out; |
1304 | ring->rxd_priv_size = | 2331 | exit: |
1305 | sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space; | 2332 | return; |
1306 | ring->per_rxd_space = attr->per_rxd_space; | 2333 | } |
1307 | 2334 | ||
1308 | ring->rxd_priv_size = | 2335 | static inline void |
1309 | ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) / | 2336 | vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh, unsigned long size) |
1310 | VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE; | 2337 | { |
2338 | gfp_t flags; | ||
2339 | void *vaddr; | ||
1311 | 2340 | ||
1312 | /* how many RxDs can fit into one block. Depends on configured | 2341 | if (in_interrupt()) |
1313 | * buffer_mode. */ | 2342 | flags = GFP_ATOMIC | GFP_DMA; |
1314 | ring->rxds_per_block = | 2343 | else |
1315 | vxge_hw_ring_rxds_per_block_get(config->buffer_mode); | 2344 | flags = GFP_KERNEL | GFP_DMA; |
1316 | 2345 | ||
1317 | /* calculate actual RxD block private size */ | 2346 | vaddr = kmalloc((size), flags); |
1318 | ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block; | ||
1319 | ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc; | ||
1320 | ring->mempool = __vxge_hw_mempool_create(hldev, | ||
1321 | VXGE_HW_BLOCK_SIZE, | ||
1322 | VXGE_HW_BLOCK_SIZE, | ||
1323 | ring->rxdblock_priv_size, | ||
1324 | ring->config->ring_blocks, | ||
1325 | ring->config->ring_blocks, | ||
1326 | &ring_mp_callback, | ||
1327 | ring); | ||
1328 | 2347 | ||
1329 | if (ring->mempool == NULL) { | 2348 | vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev); |
1330 | __vxge_hw_ring_delete(vp); | 2349 | } |
1331 | return VXGE_HW_ERR_OUT_OF_MEMORY; | ||
1332 | } | ||
1333 | 2350 | ||
1334 | status = __vxge_hw_channel_initialize(&ring->channel); | 2351 | /* |
1335 | if (status != VXGE_HW_OK) { | 2352 | * __vxge_hw_blockpool_blocks_add - Request additional blocks |
1336 | __vxge_hw_ring_delete(vp); | 2353 | */ |
1337 | goto exit; | 2354 | static |
2355 | void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool) | ||
2356 | { | ||
2357 | u32 nreq = 0, i; | ||
2358 | |||
2359 | if ((blockpool->pool_size + blockpool->req_out) < | ||
2360 | VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) { | ||
2361 | nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE; | ||
2362 | blockpool->req_out += nreq; | ||
1338 | } | 2363 | } |
1339 | 2364 | ||
1340 | /* Note: | 2365 | for (i = 0; i < nreq; i++) |
1341 | * Specifying rxd_init callback means two things: | 2366 | vxge_os_dma_malloc_async( |
1342 | * 1) rxds need to be initialized by driver at channel-open time; | 2367 | ((struct __vxge_hw_device *)blockpool->hldev)->pdev, |
1343 | * 2) rxds need to be posted at channel-open time | 2368 | blockpool->hldev, VXGE_HW_BLOCK_SIZE); |
1344 | * (that's what the initial_replenish() below does) | 2369 | } |
1345 | * Currently we don't have a case when the 1) is done without the 2). | 2370 | |
1346 | */ | 2371 | /* |
1347 | if (ring->rxd_init) { | 2372 | * __vxge_hw_blockpool_malloc - Allocate a memory block from pool |
1348 | status = vxge_hw_ring_replenish(ring); | 2373 | * Allocates a block of memory of given size, either from block pool |
1349 | if (status != VXGE_HW_OK) { | 2374 | * or by calling vxge_os_dma_malloc() |
1350 | __vxge_hw_ring_delete(vp); | 2375 | */ |
2376 | static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size, | ||
2377 | struct vxge_hw_mempool_dma *dma_object) | ||
2378 | { | ||
2379 | struct __vxge_hw_blockpool_entry *entry = NULL; | ||
2380 | struct __vxge_hw_blockpool *blockpool; | ||
2381 | void *memblock = NULL; | ||
2382 | enum vxge_hw_status status = VXGE_HW_OK; | ||
2383 | |||
2384 | blockpool = &devh->block_pool; | ||
2385 | |||
2386 | if (size != blockpool->block_size) { | ||
2387 | |||
2388 | memblock = vxge_os_dma_malloc(devh->pdev, size, | ||
2389 | &dma_object->handle, | ||
2390 | &dma_object->acc_handle); | ||
2391 | |||
2392 | if (memblock == NULL) { | ||
2393 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
1351 | goto exit; | 2394 | goto exit; |
1352 | } | 2395 | } |
1353 | } | ||
1354 | 2396 | ||
1355 | /* initial replenish will increment the counter in its post() routine, | 2397 | dma_object->addr = pci_map_single(devh->pdev, memblock, size, |
1356 | * we have to reset it */ | 2398 | PCI_DMA_BIDIRECTIONAL); |
1357 | ring->stats->common_stats.usage_cnt = 0; | 2399 | |
2400 | if (unlikely(pci_dma_mapping_error(devh->pdev, | ||
2401 | dma_object->addr))) { | ||
2402 | vxge_os_dma_free(devh->pdev, memblock, | ||
2403 | &dma_object->acc_handle); | ||
2404 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
2405 | goto exit; | ||
2406 | } | ||
2407 | |||
2408 | } else { | ||
2409 | |||
2410 | if (!list_empty(&blockpool->free_block_list)) | ||
2411 | entry = (struct __vxge_hw_blockpool_entry *) | ||
2412 | list_first_entry(&blockpool->free_block_list, | ||
2413 | struct __vxge_hw_blockpool_entry, | ||
2414 | item); | ||
2415 | |||
2416 | if (entry != NULL) { | ||
2417 | list_del(&entry->item); | ||
2418 | dma_object->addr = entry->dma_addr; | ||
2419 | dma_object->handle = entry->dma_handle; | ||
2420 | dma_object->acc_handle = entry->acc_handle; | ||
2421 | memblock = entry->memblock; | ||
2422 | |||
2423 | list_add(&entry->item, | ||
2424 | &blockpool->free_entry_list); | ||
2425 | blockpool->pool_size--; | ||
2426 | } | ||
2427 | |||
2428 | if (memblock != NULL) | ||
2429 | __vxge_hw_blockpool_blocks_add(blockpool); | ||
2430 | } | ||
1358 | exit: | 2431 | exit: |
1359 | return status; | 2432 | return memblock; |
1360 | } | 2433 | } |
1361 | 2434 | ||
1362 | /* | 2435 | /* |
1363 | * __vxge_hw_ring_abort - Returns the RxD | 2436 | * __vxge_hw_blockpool_blocks_remove - Free additional blocks |
1364 | * This function terminates the RxDs of ring | ||
1365 | */ | 2437 | */ |
1366 | enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring) | 2438 | static void |
2439 | __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool) | ||
1367 | { | 2440 | { |
1368 | void *rxdh; | 2441 | struct list_head *p, *n; |
1369 | struct __vxge_hw_channel *channel; | ||
1370 | |||
1371 | channel = &ring->channel; | ||
1372 | 2442 | ||
1373 | for (;;) { | 2443 | list_for_each_safe(p, n, &blockpool->free_block_list) { |
1374 | vxge_hw_channel_dtr_try_complete(channel, &rxdh); | ||
1375 | 2444 | ||
1376 | if (rxdh == NULL) | 2445 | if (blockpool->pool_size < blockpool->pool_max) |
1377 | break; | 2446 | break; |
1378 | 2447 | ||
1379 | vxge_hw_channel_dtr_complete(channel); | 2448 | pci_unmap_single( |
2449 | ((struct __vxge_hw_device *)blockpool->hldev)->pdev, | ||
2450 | ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, | ||
2451 | ((struct __vxge_hw_blockpool_entry *)p)->length, | ||
2452 | PCI_DMA_BIDIRECTIONAL); | ||
1380 | 2453 | ||
1381 | if (ring->rxd_term) | 2454 | vxge_os_dma_free( |
1382 | ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED, | 2455 | ((struct __vxge_hw_device *)blockpool->hldev)->pdev, |
1383 | channel->userdata); | 2456 | ((struct __vxge_hw_blockpool_entry *)p)->memblock, |
2457 | &((struct __vxge_hw_blockpool_entry *)p)->acc_handle); | ||
1384 | 2458 | ||
1385 | vxge_hw_channel_dtr_free(channel, rxdh); | 2459 | list_del(&((struct __vxge_hw_blockpool_entry *)p)->item); |
1386 | } | ||
1387 | 2460 | ||
1388 | return VXGE_HW_OK; | 2461 | list_add(p, &blockpool->free_entry_list); |
2462 | |||
2463 | blockpool->pool_size--; | ||
2464 | |||
2465 | } | ||
1389 | } | 2466 | } |
1390 | 2467 | ||
1391 | /* | 2468 | /* |
1392 | * __vxge_hw_ring_reset - Resets the ring | 2469 | * __vxge_hw_blockpool_free - Frees the memory allcoated with |
1393 | * This function resets the ring during vpath reset operation | 2470 | * __vxge_hw_blockpool_malloc |
1394 | */ | 2471 | */ |
1395 | enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring) | 2472 | static void __vxge_hw_blockpool_free(struct __vxge_hw_device *devh, |
2473 | void *memblock, u32 size, | ||
2474 | struct vxge_hw_mempool_dma *dma_object) | ||
1396 | { | 2475 | { |
2476 | struct __vxge_hw_blockpool_entry *entry = NULL; | ||
2477 | struct __vxge_hw_blockpool *blockpool; | ||
1397 | enum vxge_hw_status status = VXGE_HW_OK; | 2478 | enum vxge_hw_status status = VXGE_HW_OK; |
1398 | struct __vxge_hw_channel *channel; | ||
1399 | 2479 | ||
1400 | channel = &ring->channel; | 2480 | blockpool = &devh->block_pool; |
1401 | 2481 | ||
1402 | __vxge_hw_ring_abort(ring); | 2482 | if (size != blockpool->block_size) { |
2483 | pci_unmap_single(devh->pdev, dma_object->addr, size, | ||
2484 | PCI_DMA_BIDIRECTIONAL); | ||
2485 | vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle); | ||
2486 | } else { | ||
1403 | 2487 | ||
1404 | status = __vxge_hw_channel_reset(channel); | 2488 | if (!list_empty(&blockpool->free_entry_list)) |
2489 | entry = (struct __vxge_hw_blockpool_entry *) | ||
2490 | list_first_entry(&blockpool->free_entry_list, | ||
2491 | struct __vxge_hw_blockpool_entry, | ||
2492 | item); | ||
1405 | 2493 | ||
1406 | if (status != VXGE_HW_OK) | 2494 | if (entry == NULL) |
1407 | goto exit; | 2495 | entry = vmalloc(sizeof( |
2496 | struct __vxge_hw_blockpool_entry)); | ||
2497 | else | ||
2498 | list_del(&entry->item); | ||
1408 | 2499 | ||
1409 | if (ring->rxd_init) { | 2500 | if (entry != NULL) { |
1410 | status = vxge_hw_ring_replenish(ring); | 2501 | entry->length = size; |
1411 | if (status != VXGE_HW_OK) | 2502 | entry->memblock = memblock; |
1412 | goto exit; | 2503 | entry->dma_addr = dma_object->addr; |
2504 | entry->acc_handle = dma_object->acc_handle; | ||
2505 | entry->dma_handle = dma_object->handle; | ||
2506 | list_add(&entry->item, | ||
2507 | &blockpool->free_block_list); | ||
2508 | blockpool->pool_size++; | ||
2509 | status = VXGE_HW_OK; | ||
2510 | } else | ||
2511 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
2512 | |||
2513 | if (status == VXGE_HW_OK) | ||
2514 | __vxge_hw_blockpool_blocks_remove(blockpool); | ||
1413 | } | 2515 | } |
1414 | exit: | ||
1415 | return status; | ||
1416 | } | 2516 | } |
1417 | 2517 | ||
1418 | /* | 2518 | /* |
1419 | * __vxge_hw_ring_delete - Removes the ring | 2519 | * vxge_hw_mempool_destroy |
1420 | * This function freeup the memory pool and removes the ring | ||
1421 | */ | 2520 | */ |
1422 | enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp) | 2521 | static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool) |
1423 | { | 2522 | { |
1424 | struct __vxge_hw_ring *ring = vp->vpath->ringh; | 2523 | u32 i, j; |
2524 | struct __vxge_hw_device *devh = mempool->devh; | ||
1425 | 2525 | ||
1426 | __vxge_hw_ring_abort(ring); | 2526 | for (i = 0; i < mempool->memblocks_allocated; i++) { |
2527 | struct vxge_hw_mempool_dma *dma_object; | ||
1427 | 2528 | ||
1428 | if (ring->mempool) | 2529 | vxge_assert(mempool->memblocks_arr[i]); |
1429 | __vxge_hw_mempool_destroy(ring->mempool); | 2530 | vxge_assert(mempool->memblocks_dma_arr + i); |
1430 | 2531 | ||
1431 | vp->vpath->ringh = NULL; | 2532 | dma_object = mempool->memblocks_dma_arr + i; |
1432 | __vxge_hw_channel_free(&ring->channel); | ||
1433 | 2533 | ||
1434 | return VXGE_HW_OK; | 2534 | for (j = 0; j < mempool->items_per_memblock; j++) { |
2535 | u32 index = i * mempool->items_per_memblock + j; | ||
2536 | |||
2537 | /* to skip last partially filled(if any) memblock */ | ||
2538 | if (index >= mempool->items_current) | ||
2539 | break; | ||
2540 | } | ||
2541 | |||
2542 | vfree(mempool->memblocks_priv_arr[i]); | ||
2543 | |||
2544 | __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i], | ||
2545 | mempool->memblock_size, dma_object); | ||
2546 | } | ||
2547 | |||
2548 | vfree(mempool->items_arr); | ||
2549 | vfree(mempool->memblocks_dma_arr); | ||
2550 | vfree(mempool->memblocks_priv_arr); | ||
2551 | vfree(mempool->memblocks_arr); | ||
2552 | vfree(mempool); | ||
1435 | } | 2553 | } |
1436 | 2554 | ||
1437 | /* | 2555 | /* |
1438 | * __vxge_hw_mempool_grow | 2556 | * __vxge_hw_mempool_grow |
1439 | * Will resize mempool up to %num_allocate value. | 2557 | * Will resize mempool up to %num_allocate value. |
1440 | */ | 2558 | */ |
1441 | enum vxge_hw_status | 2559 | static enum vxge_hw_status |
1442 | __vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate, | 2560 | __vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate, |
1443 | u32 *num_allocated) | 2561 | u32 *num_allocated) |
1444 | { | 2562 | { |
@@ -1468,15 +2586,12 @@ __vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate, | |||
1468 | * allocate new memblock and its private part at once. | 2586 | * allocate new memblock and its private part at once. |
1469 | * This helps to minimize memory usage a lot. */ | 2587 | * This helps to minimize memory usage a lot. */ |
1470 | mempool->memblocks_priv_arr[i] = | 2588 | mempool->memblocks_priv_arr[i] = |
1471 | vmalloc(mempool->items_priv_size * n_items); | 2589 | vzalloc(mempool->items_priv_size * n_items); |
1472 | if (mempool->memblocks_priv_arr[i] == NULL) { | 2590 | if (mempool->memblocks_priv_arr[i] == NULL) { |
1473 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | 2591 | status = VXGE_HW_ERR_OUT_OF_MEMORY; |
1474 | goto exit; | 2592 | goto exit; |
1475 | } | 2593 | } |
1476 | 2594 | ||
1477 | memset(mempool->memblocks_priv_arr[i], 0, | ||
1478 | mempool->items_priv_size * n_items); | ||
1479 | |||
1480 | /* allocate DMA-capable memblock */ | 2595 | /* allocate DMA-capable memblock */ |
1481 | mempool->memblocks_arr[i] = | 2596 | mempool->memblocks_arr[i] = |
1482 | __vxge_hw_blockpool_malloc(mempool->devh, | 2597 | __vxge_hw_blockpool_malloc(mempool->devh, |
@@ -1527,16 +2642,15 @@ exit: | |||
1527 | * with size enough to hold %items_initial number of items. Memory is | 2642 | * with size enough to hold %items_initial number of items. Memory is |
1528 | * DMA-able but client must map/unmap before interoperating with the device. | 2643 | * DMA-able but client must map/unmap before interoperating with the device. |
1529 | */ | 2644 | */ |
1530 | struct vxge_hw_mempool* | 2645 | static struct vxge_hw_mempool * |
1531 | __vxge_hw_mempool_create( | 2646 | __vxge_hw_mempool_create(struct __vxge_hw_device *devh, |
1532 | struct __vxge_hw_device *devh, | 2647 | u32 memblock_size, |
1533 | u32 memblock_size, | 2648 | u32 item_size, |
1534 | u32 item_size, | 2649 | u32 items_priv_size, |
1535 | u32 items_priv_size, | 2650 | u32 items_initial, |
1536 | u32 items_initial, | 2651 | u32 items_max, |
1537 | u32 items_max, | 2652 | struct vxge_hw_mempool_cbs *mp_callback, |
1538 | struct vxge_hw_mempool_cbs *mp_callback, | 2653 | void *userdata) |
1539 | void *userdata) | ||
1540 | { | 2654 | { |
1541 | enum vxge_hw_status status = VXGE_HW_OK; | 2655 | enum vxge_hw_status status = VXGE_HW_OK; |
1542 | u32 memblocks_to_allocate; | 2656 | u32 memblocks_to_allocate; |
@@ -1548,13 +2662,11 @@ __vxge_hw_mempool_create( | |||
1548 | goto exit; | 2662 | goto exit; |
1549 | } | 2663 | } |
1550 | 2664 | ||
1551 | mempool = (struct vxge_hw_mempool *) | 2665 | mempool = vzalloc(sizeof(struct vxge_hw_mempool)); |
1552 | vmalloc(sizeof(struct vxge_hw_mempool)); | ||
1553 | if (mempool == NULL) { | 2666 | if (mempool == NULL) { |
1554 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | 2667 | status = VXGE_HW_ERR_OUT_OF_MEMORY; |
1555 | goto exit; | 2668 | goto exit; |
1556 | } | 2669 | } |
1557 | memset(mempool, 0, sizeof(struct vxge_hw_mempool)); | ||
1558 | 2670 | ||
1559 | mempool->devh = devh; | 2671 | mempool->devh = devh; |
1560 | mempool->memblock_size = memblock_size; | 2672 | mempool->memblock_size = memblock_size; |
@@ -1574,53 +2686,43 @@ __vxge_hw_mempool_create( | |||
1574 | 2686 | ||
1575 | /* allocate array of memblocks */ | 2687 | /* allocate array of memblocks */ |
1576 | mempool->memblocks_arr = | 2688 | mempool->memblocks_arr = |
1577 | (void **) vmalloc(sizeof(void *) * mempool->memblocks_max); | 2689 | vzalloc(sizeof(void *) * mempool->memblocks_max); |
1578 | if (mempool->memblocks_arr == NULL) { | 2690 | if (mempool->memblocks_arr == NULL) { |
1579 | __vxge_hw_mempool_destroy(mempool); | 2691 | __vxge_hw_mempool_destroy(mempool); |
1580 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | 2692 | status = VXGE_HW_ERR_OUT_OF_MEMORY; |
1581 | mempool = NULL; | 2693 | mempool = NULL; |
1582 | goto exit; | 2694 | goto exit; |
1583 | } | 2695 | } |
1584 | memset(mempool->memblocks_arr, 0, | ||
1585 | sizeof(void *) * mempool->memblocks_max); | ||
1586 | 2696 | ||
1587 | /* allocate array of private parts of items per memblocks */ | 2697 | /* allocate array of private parts of items per memblocks */ |
1588 | mempool->memblocks_priv_arr = | 2698 | mempool->memblocks_priv_arr = |
1589 | (void **) vmalloc(sizeof(void *) * mempool->memblocks_max); | 2699 | vzalloc(sizeof(void *) * mempool->memblocks_max); |
1590 | if (mempool->memblocks_priv_arr == NULL) { | 2700 | if (mempool->memblocks_priv_arr == NULL) { |
1591 | __vxge_hw_mempool_destroy(mempool); | 2701 | __vxge_hw_mempool_destroy(mempool); |
1592 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | 2702 | status = VXGE_HW_ERR_OUT_OF_MEMORY; |
1593 | mempool = NULL; | 2703 | mempool = NULL; |
1594 | goto exit; | 2704 | goto exit; |
1595 | } | 2705 | } |
1596 | memset(mempool->memblocks_priv_arr, 0, | ||
1597 | sizeof(void *) * mempool->memblocks_max); | ||
1598 | 2706 | ||
1599 | /* allocate array of memblocks DMA objects */ | 2707 | /* allocate array of memblocks DMA objects */ |
1600 | mempool->memblocks_dma_arr = (struct vxge_hw_mempool_dma *) | 2708 | mempool->memblocks_dma_arr = |
1601 | vmalloc(sizeof(struct vxge_hw_mempool_dma) * | 2709 | vzalloc(sizeof(struct vxge_hw_mempool_dma) * |
1602 | mempool->memblocks_max); | 2710 | mempool->memblocks_max); |
1603 | |||
1604 | if (mempool->memblocks_dma_arr == NULL) { | 2711 | if (mempool->memblocks_dma_arr == NULL) { |
1605 | __vxge_hw_mempool_destroy(mempool); | 2712 | __vxge_hw_mempool_destroy(mempool); |
1606 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | 2713 | status = VXGE_HW_ERR_OUT_OF_MEMORY; |
1607 | mempool = NULL; | 2714 | mempool = NULL; |
1608 | goto exit; | 2715 | goto exit; |
1609 | } | 2716 | } |
1610 | memset(mempool->memblocks_dma_arr, 0, | ||
1611 | sizeof(struct vxge_hw_mempool_dma) * | ||
1612 | mempool->memblocks_max); | ||
1613 | 2717 | ||
1614 | /* allocate hash array of items */ | 2718 | /* allocate hash array of items */ |
1615 | mempool->items_arr = | 2719 | mempool->items_arr = vzalloc(sizeof(void *) * mempool->items_max); |
1616 | (void **) vmalloc(sizeof(void *) * mempool->items_max); | ||
1617 | if (mempool->items_arr == NULL) { | 2720 | if (mempool->items_arr == NULL) { |
1618 | __vxge_hw_mempool_destroy(mempool); | 2721 | __vxge_hw_mempool_destroy(mempool); |
1619 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | 2722 | status = VXGE_HW_ERR_OUT_OF_MEMORY; |
1620 | mempool = NULL; | 2723 | mempool = NULL; |
1621 | goto exit; | 2724 | goto exit; |
1622 | } | 2725 | } |
1623 | memset(mempool->items_arr, 0, sizeof(void *) * mempool->items_max); | ||
1624 | 2726 | ||
1625 | /* calculate initial number of memblocks */ | 2727 | /* calculate initial number of memblocks */ |
1626 | memblocks_to_allocate = (mempool->items_initial + | 2728 | memblocks_to_allocate = (mempool->items_initial + |
@@ -1642,122 +2744,190 @@ exit: | |||
1642 | } | 2744 | } |
1643 | 2745 | ||
1644 | /* | 2746 | /* |
1645 | * vxge_hw_mempool_destroy | 2747 | * __vxge_hw_ring_abort - Returns the RxD |
2748 | * This function terminates the RxDs of ring | ||
1646 | */ | 2749 | */ |
1647 | void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool) | 2750 | static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring) |
1648 | { | 2751 | { |
1649 | u32 i, j; | 2752 | void *rxdh; |
1650 | struct __vxge_hw_device *devh = mempool->devh; | 2753 | struct __vxge_hw_channel *channel; |
1651 | |||
1652 | for (i = 0; i < mempool->memblocks_allocated; i++) { | ||
1653 | struct vxge_hw_mempool_dma *dma_object; | ||
1654 | 2754 | ||
1655 | vxge_assert(mempool->memblocks_arr[i]); | 2755 | channel = &ring->channel; |
1656 | vxge_assert(mempool->memblocks_dma_arr + i); | ||
1657 | 2756 | ||
1658 | dma_object = mempool->memblocks_dma_arr + i; | 2757 | for (;;) { |
2758 | vxge_hw_channel_dtr_try_complete(channel, &rxdh); | ||
1659 | 2759 | ||
1660 | for (j = 0; j < mempool->items_per_memblock; j++) { | 2760 | if (rxdh == NULL) |
1661 | u32 index = i * mempool->items_per_memblock + j; | 2761 | break; |
1662 | 2762 | ||
1663 | /* to skip last partially filled(if any) memblock */ | 2763 | vxge_hw_channel_dtr_complete(channel); |
1664 | if (index >= mempool->items_current) | ||
1665 | break; | ||
1666 | } | ||
1667 | 2764 | ||
1668 | vfree(mempool->memblocks_priv_arr[i]); | 2765 | if (ring->rxd_term) |
2766 | ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED, | ||
2767 | channel->userdata); | ||
1669 | 2768 | ||
1670 | __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i], | 2769 | vxge_hw_channel_dtr_free(channel, rxdh); |
1671 | mempool->memblock_size, dma_object); | ||
1672 | } | 2770 | } |
1673 | 2771 | ||
1674 | vfree(mempool->items_arr); | 2772 | return VXGE_HW_OK; |
2773 | } | ||
1675 | 2774 | ||
1676 | vfree(mempool->memblocks_dma_arr); | 2775 | /* |
2776 | * __vxge_hw_ring_reset - Resets the ring | ||
2777 | * This function resets the ring during vpath reset operation | ||
2778 | */ | ||
2779 | static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring) | ||
2780 | { | ||
2781 | enum vxge_hw_status status = VXGE_HW_OK; | ||
2782 | struct __vxge_hw_channel *channel; | ||
1677 | 2783 | ||
1678 | vfree(mempool->memblocks_priv_arr); | 2784 | channel = &ring->channel; |
1679 | 2785 | ||
1680 | vfree(mempool->memblocks_arr); | 2786 | __vxge_hw_ring_abort(ring); |
1681 | 2787 | ||
1682 | vfree(mempool); | 2788 | status = __vxge_hw_channel_reset(channel); |
2789 | |||
2790 | if (status != VXGE_HW_OK) | ||
2791 | goto exit; | ||
2792 | |||
2793 | if (ring->rxd_init) { | ||
2794 | status = vxge_hw_ring_replenish(ring); | ||
2795 | if (status != VXGE_HW_OK) | ||
2796 | goto exit; | ||
2797 | } | ||
2798 | exit: | ||
2799 | return status; | ||
1683 | } | 2800 | } |
1684 | 2801 | ||
1685 | /* | 2802 | /* |
1686 | * __vxge_hw_device_fifo_config_check - Check fifo configuration. | 2803 | * __vxge_hw_ring_delete - Removes the ring |
1687 | * Check the fifo configuration | 2804 | * This function freeup the memory pool and removes the ring |
1688 | */ | 2805 | */ |
1689 | enum vxge_hw_status | 2806 | static enum vxge_hw_status |
1690 | __vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config) | 2807 | __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp) |
1691 | { | 2808 | { |
1692 | if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) || | 2809 | struct __vxge_hw_ring *ring = vp->vpath->ringh; |
1693 | (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS)) | 2810 | |
1694 | return VXGE_HW_BADCFG_FIFO_BLOCKS; | 2811 | __vxge_hw_ring_abort(ring); |
2812 | |||
2813 | if (ring->mempool) | ||
2814 | __vxge_hw_mempool_destroy(ring->mempool); | ||
2815 | |||
2816 | vp->vpath->ringh = NULL; | ||
2817 | __vxge_hw_channel_free(&ring->channel); | ||
1695 | 2818 | ||
1696 | return VXGE_HW_OK; | 2819 | return VXGE_HW_OK; |
1697 | } | 2820 | } |
1698 | 2821 | ||
1699 | /* | 2822 | /* |
1700 | * __vxge_hw_device_vpath_config_check - Check vpath configuration. | 2823 | * __vxge_hw_ring_create - Create a Ring |
1701 | * Check the vpath configuration | 2824 | * This function creates Ring and initializes it. |
1702 | */ | 2825 | */ |
1703 | enum vxge_hw_status | 2826 | static enum vxge_hw_status |
1704 | __vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config) | 2827 | __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp, |
2828 | struct vxge_hw_ring_attr *attr) | ||
1705 | { | 2829 | { |
1706 | enum vxge_hw_status status; | 2830 | enum vxge_hw_status status = VXGE_HW_OK; |
2831 | struct __vxge_hw_ring *ring; | ||
2832 | u32 ring_length; | ||
2833 | struct vxge_hw_ring_config *config; | ||
2834 | struct __vxge_hw_device *hldev; | ||
2835 | u32 vp_id; | ||
2836 | struct vxge_hw_mempool_cbs ring_mp_callback; | ||
1707 | 2837 | ||
1708 | if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) || | 2838 | if ((vp == NULL) || (attr == NULL)) { |
1709 | (vp_config->min_bandwidth > | 2839 | status = VXGE_HW_FAIL; |
1710 | VXGE_HW_VPATH_BANDWIDTH_MAX)) | 2840 | goto exit; |
1711 | return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH; | 2841 | } |
1712 | 2842 | ||
1713 | status = __vxge_hw_device_fifo_config_check(&vp_config->fifo); | 2843 | hldev = vp->vpath->hldev; |
1714 | if (status != VXGE_HW_OK) | 2844 | vp_id = vp->vpath->vp_id; |
1715 | return status; | ||
1716 | 2845 | ||
1717 | if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) && | 2846 | config = &hldev->config.vp_config[vp_id].ring; |
1718 | ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) || | ||
1719 | (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU))) | ||
1720 | return VXGE_HW_BADCFG_VPATH_MTU; | ||
1721 | 2847 | ||
1722 | if ((vp_config->rpa_strip_vlan_tag != | 2848 | ring_length = config->ring_blocks * |
1723 | VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) && | 2849 | vxge_hw_ring_rxds_per_block_get(config->buffer_mode); |
1724 | (vp_config->rpa_strip_vlan_tag != | ||
1725 | VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) && | ||
1726 | (vp_config->rpa_strip_vlan_tag != | ||
1727 | VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE)) | ||
1728 | return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG; | ||
1729 | 2850 | ||
1730 | return VXGE_HW_OK; | 2851 | ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp, |
1731 | } | 2852 | VXGE_HW_CHANNEL_TYPE_RING, |
2853 | ring_length, | ||
2854 | attr->per_rxd_space, | ||
2855 | attr->userdata); | ||
2856 | if (ring == NULL) { | ||
2857 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
2858 | goto exit; | ||
2859 | } | ||
1732 | 2860 | ||
1733 | /* | 2861 | vp->vpath->ringh = ring; |
1734 | * __vxge_hw_device_config_check - Check device configuration. | 2862 | ring->vp_id = vp_id; |
1735 | * Check the device configuration | 2863 | ring->vp_reg = vp->vpath->vp_reg; |
1736 | */ | 2864 | ring->common_reg = hldev->common_reg; |
1737 | enum vxge_hw_status | 2865 | ring->stats = &vp->vpath->sw_stats->ring_stats; |
1738 | __vxge_hw_device_config_check(struct vxge_hw_device_config *new_config) | 2866 | ring->config = config; |
1739 | { | 2867 | ring->callback = attr->callback; |
1740 | u32 i; | 2868 | ring->rxd_init = attr->rxd_init; |
1741 | enum vxge_hw_status status; | 2869 | ring->rxd_term = attr->rxd_term; |
2870 | ring->buffer_mode = config->buffer_mode; | ||
2871 | ring->tim_rti_cfg1_saved = vp->vpath->tim_rti_cfg1_saved; | ||
2872 | ring->tim_rti_cfg3_saved = vp->vpath->tim_rti_cfg3_saved; | ||
2873 | ring->rxds_limit = config->rxds_limit; | ||
1742 | 2874 | ||
1743 | if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) && | 2875 | ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode); |
1744 | (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) && | 2876 | ring->rxd_priv_size = |
1745 | (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) && | 2877 | sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space; |
1746 | (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF)) | 2878 | ring->per_rxd_space = attr->per_rxd_space; |
1747 | return VXGE_HW_BADCFG_INTR_MODE; | ||
1748 | 2879 | ||
1749 | if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) && | 2880 | ring->rxd_priv_size = |
1750 | (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE)) | 2881 | ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) / |
1751 | return VXGE_HW_BADCFG_RTS_MAC_EN; | 2882 | VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE; |
1752 | 2883 | ||
1753 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | 2884 | /* how many RxDs can fit into one block. Depends on configured |
1754 | status = __vxge_hw_device_vpath_config_check( | 2885 | * buffer_mode. */ |
1755 | &new_config->vp_config[i]); | 2886 | ring->rxds_per_block = |
1756 | if (status != VXGE_HW_OK) | 2887 | vxge_hw_ring_rxds_per_block_get(config->buffer_mode); |
1757 | return status; | 2888 | |
2889 | /* calculate actual RxD block private size */ | ||
2890 | ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block; | ||
2891 | ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc; | ||
2892 | ring->mempool = __vxge_hw_mempool_create(hldev, | ||
2893 | VXGE_HW_BLOCK_SIZE, | ||
2894 | VXGE_HW_BLOCK_SIZE, | ||
2895 | ring->rxdblock_priv_size, | ||
2896 | ring->config->ring_blocks, | ||
2897 | ring->config->ring_blocks, | ||
2898 | &ring_mp_callback, | ||
2899 | ring); | ||
2900 | if (ring->mempool == NULL) { | ||
2901 | __vxge_hw_ring_delete(vp); | ||
2902 | return VXGE_HW_ERR_OUT_OF_MEMORY; | ||
1758 | } | 2903 | } |
1759 | 2904 | ||
1760 | return VXGE_HW_OK; | 2905 | status = __vxge_hw_channel_initialize(&ring->channel); |
2906 | if (status != VXGE_HW_OK) { | ||
2907 | __vxge_hw_ring_delete(vp); | ||
2908 | goto exit; | ||
2909 | } | ||
2910 | |||
2911 | /* Note: | ||
2912 | * Specifying rxd_init callback means two things: | ||
2913 | * 1) rxds need to be initialized by driver at channel-open time; | ||
2914 | * 2) rxds need to be posted at channel-open time | ||
2915 | * (that's what the initial_replenish() below does) | ||
2916 | * Currently we don't have a case when the 1) is done without the 2). | ||
2917 | */ | ||
2918 | if (ring->rxd_init) { | ||
2919 | status = vxge_hw_ring_replenish(ring); | ||
2920 | if (status != VXGE_HW_OK) { | ||
2921 | __vxge_hw_ring_delete(vp); | ||
2922 | goto exit; | ||
2923 | } | ||
2924 | } | ||
2925 | |||
2926 | /* initial replenish will increment the counter in its post() routine, | ||
2927 | * we have to reset it */ | ||
2928 | ring->stats->common_stats.usage_cnt = 0; | ||
2929 | exit: | ||
2930 | return status; | ||
1761 | } | 2931 | } |
1762 | 2932 | ||
1763 | /* | 2933 | /* |
@@ -1779,7 +2949,6 @@ vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config) | |||
1779 | device_config->rts_mac_en = VXGE_HW_RTS_MAC_DEFAULT; | 2949 | device_config->rts_mac_en = VXGE_HW_RTS_MAC_DEFAULT; |
1780 | 2950 | ||
1781 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | 2951 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { |
1782 | |||
1783 | device_config->vp_config[i].vp_id = i; | 2952 | device_config->vp_config[i].vp_id = i; |
1784 | 2953 | ||
1785 | device_config->vp_config[i].min_bandwidth = | 2954 | device_config->vp_config[i].min_bandwidth = |
@@ -1919,65 +3088,10 @@ vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config) | |||
1919 | } | 3088 | } |
1920 | 3089 | ||
1921 | /* | 3090 | /* |
1922 | * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion. | ||
1923 | * Set the swapper bits appropriately for the lagacy section. | ||
1924 | */ | ||
1925 | enum vxge_hw_status | ||
1926 | __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg) | ||
1927 | { | ||
1928 | u64 val64; | ||
1929 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1930 | |||
1931 | val64 = readq(&legacy_reg->toc_swapper_fb); | ||
1932 | |||
1933 | wmb(); | ||
1934 | |||
1935 | switch (val64) { | ||
1936 | |||
1937 | case VXGE_HW_SWAPPER_INITIAL_VALUE: | ||
1938 | return status; | ||
1939 | |||
1940 | case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED: | ||
1941 | writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE, | ||
1942 | &legacy_reg->pifm_rd_swap_en); | ||
1943 | writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE, | ||
1944 | &legacy_reg->pifm_rd_flip_en); | ||
1945 | writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE, | ||
1946 | &legacy_reg->pifm_wr_swap_en); | ||
1947 | writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE, | ||
1948 | &legacy_reg->pifm_wr_flip_en); | ||
1949 | break; | ||
1950 | |||
1951 | case VXGE_HW_SWAPPER_BYTE_SWAPPED: | ||
1952 | writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE, | ||
1953 | &legacy_reg->pifm_rd_swap_en); | ||
1954 | writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE, | ||
1955 | &legacy_reg->pifm_wr_swap_en); | ||
1956 | break; | ||
1957 | |||
1958 | case VXGE_HW_SWAPPER_BIT_FLIPPED: | ||
1959 | writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE, | ||
1960 | &legacy_reg->pifm_rd_flip_en); | ||
1961 | writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE, | ||
1962 | &legacy_reg->pifm_wr_flip_en); | ||
1963 | break; | ||
1964 | } | ||
1965 | |||
1966 | wmb(); | ||
1967 | |||
1968 | val64 = readq(&legacy_reg->toc_swapper_fb); | ||
1969 | |||
1970 | if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE) | ||
1971 | status = VXGE_HW_ERR_SWAPPER_CTRL; | ||
1972 | |||
1973 | return status; | ||
1974 | } | ||
1975 | |||
1976 | /* | ||
1977 | * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath. | 3091 | * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath. |
1978 | * Set the swapper bits appropriately for the vpath. | 3092 | * Set the swapper bits appropriately for the vpath. |
1979 | */ | 3093 | */ |
1980 | enum vxge_hw_status | 3094 | static enum vxge_hw_status |
1981 | __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg) | 3095 | __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg) |
1982 | { | 3096 | { |
1983 | #ifndef __BIG_ENDIAN | 3097 | #ifndef __BIG_ENDIAN |
@@ -1996,10 +3110,9 @@ __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg) | |||
1996 | * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc. | 3110 | * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc. |
1997 | * Set the swapper bits appropriately for the vpath. | 3111 | * Set the swapper bits appropriately for the vpath. |
1998 | */ | 3112 | */ |
1999 | enum vxge_hw_status | 3113 | static enum vxge_hw_status |
2000 | __vxge_hw_kdfc_swapper_set( | 3114 | __vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg, |
2001 | struct vxge_hw_legacy_reg __iomem *legacy_reg, | 3115 | struct vxge_hw_vpath_reg __iomem *vpath_reg) |
2002 | struct vxge_hw_vpath_reg __iomem *vpath_reg) | ||
2003 | { | 3116 | { |
2004 | u64 val64; | 3117 | u64 val64; |
2005 | 3118 | ||
@@ -2021,28 +3134,6 @@ __vxge_hw_kdfc_swapper_set( | |||
2021 | } | 3134 | } |
2022 | 3135 | ||
2023 | /* | 3136 | /* |
2024 | * vxge_hw_mgmt_device_config - Retrieve device configuration. | ||
2025 | * Get device configuration. Permits to retrieve at run-time configuration | ||
2026 | * values that were used to initialize and configure the device. | ||
2027 | */ | ||
2028 | enum vxge_hw_status | ||
2029 | vxge_hw_mgmt_device_config(struct __vxge_hw_device *hldev, | ||
2030 | struct vxge_hw_device_config *dev_config, int size) | ||
2031 | { | ||
2032 | |||
2033 | if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) | ||
2034 | return VXGE_HW_ERR_INVALID_DEVICE; | ||
2035 | |||
2036 | if (size != sizeof(struct vxge_hw_device_config)) | ||
2037 | return VXGE_HW_ERR_VERSION_CONFLICT; | ||
2038 | |||
2039 | memcpy(dev_config, &hldev->config, | ||
2040 | sizeof(struct vxge_hw_device_config)); | ||
2041 | |||
2042 | return VXGE_HW_OK; | ||
2043 | } | ||
2044 | |||
2045 | /* | ||
2046 | * vxge_hw_mgmt_reg_read - Read Titan register. | 3137 | * vxge_hw_mgmt_reg_read - Read Titan register. |
2047 | */ | 3138 | */ |
2048 | enum vxge_hw_status | 3139 | enum vxge_hw_status |
@@ -2271,6 +3362,69 @@ exit: | |||
2271 | } | 3362 | } |
2272 | 3363 | ||
2273 | /* | 3364 | /* |
3365 | * __vxge_hw_fifo_abort - Returns the TxD | ||
3366 | * This function terminates the TxDs of fifo | ||
3367 | */ | ||
3368 | static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo) | ||
3369 | { | ||
3370 | void *txdlh; | ||
3371 | |||
3372 | for (;;) { | ||
3373 | vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh); | ||
3374 | |||
3375 | if (txdlh == NULL) | ||
3376 | break; | ||
3377 | |||
3378 | vxge_hw_channel_dtr_complete(&fifo->channel); | ||
3379 | |||
3380 | if (fifo->txdl_term) { | ||
3381 | fifo->txdl_term(txdlh, | ||
3382 | VXGE_HW_TXDL_STATE_POSTED, | ||
3383 | fifo->channel.userdata); | ||
3384 | } | ||
3385 | |||
3386 | vxge_hw_channel_dtr_free(&fifo->channel, txdlh); | ||
3387 | } | ||
3388 | |||
3389 | return VXGE_HW_OK; | ||
3390 | } | ||
3391 | |||
3392 | /* | ||
3393 | * __vxge_hw_fifo_reset - Resets the fifo | ||
3394 | * This function resets the fifo during vpath reset operation | ||
3395 | */ | ||
3396 | static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo) | ||
3397 | { | ||
3398 | enum vxge_hw_status status = VXGE_HW_OK; | ||
3399 | |||
3400 | __vxge_hw_fifo_abort(fifo); | ||
3401 | status = __vxge_hw_channel_reset(&fifo->channel); | ||
3402 | |||
3403 | return status; | ||
3404 | } | ||
3405 | |||
3406 | /* | ||
3407 | * __vxge_hw_fifo_delete - Removes the FIFO | ||
3408 | * This function freeup the memory pool and removes the FIFO | ||
3409 | */ | ||
3410 | static enum vxge_hw_status | ||
3411 | __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp) | ||
3412 | { | ||
3413 | struct __vxge_hw_fifo *fifo = vp->vpath->fifoh; | ||
3414 | |||
3415 | __vxge_hw_fifo_abort(fifo); | ||
3416 | |||
3417 | if (fifo->mempool) | ||
3418 | __vxge_hw_mempool_destroy(fifo->mempool); | ||
3419 | |||
3420 | vp->vpath->fifoh = NULL; | ||
3421 | |||
3422 | __vxge_hw_channel_free(&fifo->channel); | ||
3423 | |||
3424 | return VXGE_HW_OK; | ||
3425 | } | ||
3426 | |||
3427 | /* | ||
2274 | * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD | 3428 | * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD |
2275 | * list callback | 3429 | * list callback |
2276 | * This function is callback passed to __vxge_hw_mempool_create to create memory | 3430 | * This function is callback passed to __vxge_hw_mempool_create to create memory |
@@ -2316,7 +3470,7 @@ __vxge_hw_fifo_mempool_item_alloc( | |||
2316 | * __vxge_hw_fifo_create - Create a FIFO | 3470 | * __vxge_hw_fifo_create - Create a FIFO |
2317 | * This function creates FIFO and initializes it. | 3471 | * This function creates FIFO and initializes it. |
2318 | */ | 3472 | */ |
2319 | enum vxge_hw_status | 3473 | static enum vxge_hw_status |
2320 | __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp, | 3474 | __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp, |
2321 | struct vxge_hw_fifo_attr *attr) | 3475 | struct vxge_hw_fifo_attr *attr) |
2322 | { | 3476 | { |
@@ -2359,6 +3513,8 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp, | |||
2359 | 3513 | ||
2360 | /* apply "interrupts per txdl" attribute */ | 3514 | /* apply "interrupts per txdl" attribute */ |
2361 | fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ; | 3515 | fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ; |
3516 | fifo->tim_tti_cfg1_saved = vpath->tim_tti_cfg1_saved; | ||
3517 | fifo->tim_tti_cfg3_saved = vpath->tim_tti_cfg3_saved; | ||
2362 | 3518 | ||
2363 | if (fifo->config->intr) | 3519 | if (fifo->config->intr) |
2364 | fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST; | 3520 | fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST; |
@@ -2435,73 +3591,11 @@ exit: | |||
2435 | } | 3591 | } |
2436 | 3592 | ||
2437 | /* | 3593 | /* |
2438 | * __vxge_hw_fifo_abort - Returns the TxD | ||
2439 | * This function terminates the TxDs of fifo | ||
2440 | */ | ||
2441 | enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo) | ||
2442 | { | ||
2443 | void *txdlh; | ||
2444 | |||
2445 | for (;;) { | ||
2446 | vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh); | ||
2447 | |||
2448 | if (txdlh == NULL) | ||
2449 | break; | ||
2450 | |||
2451 | vxge_hw_channel_dtr_complete(&fifo->channel); | ||
2452 | |||
2453 | if (fifo->txdl_term) { | ||
2454 | fifo->txdl_term(txdlh, | ||
2455 | VXGE_HW_TXDL_STATE_POSTED, | ||
2456 | fifo->channel.userdata); | ||
2457 | } | ||
2458 | |||
2459 | vxge_hw_channel_dtr_free(&fifo->channel, txdlh); | ||
2460 | } | ||
2461 | |||
2462 | return VXGE_HW_OK; | ||
2463 | } | ||
2464 | |||
2465 | /* | ||
2466 | * __vxge_hw_fifo_reset - Resets the fifo | ||
2467 | * This function resets the fifo during vpath reset operation | ||
2468 | */ | ||
2469 | enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo) | ||
2470 | { | ||
2471 | enum vxge_hw_status status = VXGE_HW_OK; | ||
2472 | |||
2473 | __vxge_hw_fifo_abort(fifo); | ||
2474 | status = __vxge_hw_channel_reset(&fifo->channel); | ||
2475 | |||
2476 | return status; | ||
2477 | } | ||
2478 | |||
2479 | /* | ||
2480 | * __vxge_hw_fifo_delete - Removes the FIFO | ||
2481 | * This function freeup the memory pool and removes the FIFO | ||
2482 | */ | ||
2483 | enum vxge_hw_status __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp) | ||
2484 | { | ||
2485 | struct __vxge_hw_fifo *fifo = vp->vpath->fifoh; | ||
2486 | |||
2487 | __vxge_hw_fifo_abort(fifo); | ||
2488 | |||
2489 | if (fifo->mempool) | ||
2490 | __vxge_hw_mempool_destroy(fifo->mempool); | ||
2491 | |||
2492 | vp->vpath->fifoh = NULL; | ||
2493 | |||
2494 | __vxge_hw_channel_free(&fifo->channel); | ||
2495 | |||
2496 | return VXGE_HW_OK; | ||
2497 | } | ||
2498 | |||
2499 | /* | ||
2500 | * __vxge_hw_vpath_pci_read - Read the content of given address | 3594 | * __vxge_hw_vpath_pci_read - Read the content of given address |
2501 | * in pci config space. | 3595 | * in pci config space. |
2502 | * Read from the vpath pci config space. | 3596 | * Read from the vpath pci config space. |
2503 | */ | 3597 | */ |
2504 | enum vxge_hw_status | 3598 | static enum vxge_hw_status |
2505 | __vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath, | 3599 | __vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath, |
2506 | u32 phy_func_0, u32 offset, u32 *val) | 3600 | u32 phy_func_0, u32 offset, u32 *val) |
2507 | { | 3601 | { |
@@ -2538,297 +3632,6 @@ exit: | |||
2538 | return status; | 3632 | return status; |
2539 | } | 3633 | } |
2540 | 3634 | ||
2541 | /* | ||
2542 | * __vxge_hw_vpath_func_id_get - Get the function id of the vpath. | ||
2543 | * Returns the function number of the vpath. | ||
2544 | */ | ||
2545 | u32 | ||
2546 | __vxge_hw_vpath_func_id_get(u32 vp_id, | ||
2547 | struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg) | ||
2548 | { | ||
2549 | u64 val64; | ||
2550 | |||
2551 | val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1); | ||
2552 | |||
2553 | return | ||
2554 | (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64); | ||
2555 | } | ||
2556 | |||
2557 | /* | ||
2558 | * __vxge_hw_read_rts_ds - Program RTS steering critieria | ||
2559 | */ | ||
2560 | static inline void | ||
2561 | __vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem *vpath_reg, | ||
2562 | u64 dta_struct_sel) | ||
2563 | { | ||
2564 | writeq(0, &vpath_reg->rts_access_steer_ctrl); | ||
2565 | wmb(); | ||
2566 | writeq(dta_struct_sel, &vpath_reg->rts_access_steer_data0); | ||
2567 | writeq(0, &vpath_reg->rts_access_steer_data1); | ||
2568 | wmb(); | ||
2569 | } | ||
2570 | |||
2571 | |||
2572 | /* | ||
2573 | * __vxge_hw_vpath_card_info_get - Get the serial numbers, | ||
2574 | * part number and product description. | ||
2575 | */ | ||
2576 | enum vxge_hw_status | ||
2577 | __vxge_hw_vpath_card_info_get( | ||
2578 | u32 vp_id, | ||
2579 | struct vxge_hw_vpath_reg __iomem *vpath_reg, | ||
2580 | struct vxge_hw_device_hw_info *hw_info) | ||
2581 | { | ||
2582 | u32 i, j; | ||
2583 | u64 val64; | ||
2584 | u64 data1 = 0ULL; | ||
2585 | u64 data2 = 0ULL; | ||
2586 | enum vxge_hw_status status = VXGE_HW_OK; | ||
2587 | u8 *serial_number = hw_info->serial_number; | ||
2588 | u8 *part_number = hw_info->part_number; | ||
2589 | u8 *product_desc = hw_info->product_desc; | ||
2590 | |||
2591 | __vxge_hw_read_rts_ds(vpath_reg, | ||
2592 | VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER); | ||
2593 | |||
2594 | val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION( | ||
2595 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) | | ||
2596 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL( | ||
2597 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) | | ||
2598 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | | ||
2599 | VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0); | ||
2600 | |||
2601 | status = __vxge_hw_pio_mem_write64(val64, | ||
2602 | &vpath_reg->rts_access_steer_ctrl, | ||
2603 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, | ||
2604 | VXGE_HW_DEF_DEVICE_POLL_MILLIS); | ||
2605 | |||
2606 | if (status != VXGE_HW_OK) | ||
2607 | return status; | ||
2608 | |||
2609 | val64 = readq(&vpath_reg->rts_access_steer_ctrl); | ||
2610 | |||
2611 | if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { | ||
2612 | data1 = readq(&vpath_reg->rts_access_steer_data0); | ||
2613 | ((u64 *)serial_number)[0] = be64_to_cpu(data1); | ||
2614 | |||
2615 | data2 = readq(&vpath_reg->rts_access_steer_data1); | ||
2616 | ((u64 *)serial_number)[1] = be64_to_cpu(data2); | ||
2617 | status = VXGE_HW_OK; | ||
2618 | } else | ||
2619 | *serial_number = 0; | ||
2620 | |||
2621 | __vxge_hw_read_rts_ds(vpath_reg, | ||
2622 | VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER); | ||
2623 | |||
2624 | val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION( | ||
2625 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) | | ||
2626 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL( | ||
2627 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) | | ||
2628 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | | ||
2629 | VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0); | ||
2630 | |||
2631 | status = __vxge_hw_pio_mem_write64(val64, | ||
2632 | &vpath_reg->rts_access_steer_ctrl, | ||
2633 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, | ||
2634 | VXGE_HW_DEF_DEVICE_POLL_MILLIS); | ||
2635 | |||
2636 | if (status != VXGE_HW_OK) | ||
2637 | return status; | ||
2638 | |||
2639 | val64 = readq(&vpath_reg->rts_access_steer_ctrl); | ||
2640 | |||
2641 | if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { | ||
2642 | |||
2643 | data1 = readq(&vpath_reg->rts_access_steer_data0); | ||
2644 | ((u64 *)part_number)[0] = be64_to_cpu(data1); | ||
2645 | |||
2646 | data2 = readq(&vpath_reg->rts_access_steer_data1); | ||
2647 | ((u64 *)part_number)[1] = be64_to_cpu(data2); | ||
2648 | |||
2649 | status = VXGE_HW_OK; | ||
2650 | |||
2651 | } else | ||
2652 | *part_number = 0; | ||
2653 | |||
2654 | j = 0; | ||
2655 | |||
2656 | for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0; | ||
2657 | i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) { | ||
2658 | |||
2659 | __vxge_hw_read_rts_ds(vpath_reg, i); | ||
2660 | |||
2661 | val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION( | ||
2662 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) | | ||
2663 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL( | ||
2664 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) | | ||
2665 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | | ||
2666 | VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0); | ||
2667 | |||
2668 | status = __vxge_hw_pio_mem_write64(val64, | ||
2669 | &vpath_reg->rts_access_steer_ctrl, | ||
2670 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, | ||
2671 | VXGE_HW_DEF_DEVICE_POLL_MILLIS); | ||
2672 | |||
2673 | if (status != VXGE_HW_OK) | ||
2674 | return status; | ||
2675 | |||
2676 | val64 = readq(&vpath_reg->rts_access_steer_ctrl); | ||
2677 | |||
2678 | if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { | ||
2679 | |||
2680 | data1 = readq(&vpath_reg->rts_access_steer_data0); | ||
2681 | ((u64 *)product_desc)[j++] = be64_to_cpu(data1); | ||
2682 | |||
2683 | data2 = readq(&vpath_reg->rts_access_steer_data1); | ||
2684 | ((u64 *)product_desc)[j++] = be64_to_cpu(data2); | ||
2685 | |||
2686 | status = VXGE_HW_OK; | ||
2687 | } else | ||
2688 | *product_desc = 0; | ||
2689 | } | ||
2690 | |||
2691 | return status; | ||
2692 | } | ||
2693 | |||
2694 | /* | ||
2695 | * __vxge_hw_vpath_fw_ver_get - Get the fw version | ||
2696 | * Returns FW Version | ||
2697 | */ | ||
2698 | enum vxge_hw_status | ||
2699 | __vxge_hw_vpath_fw_ver_get( | ||
2700 | u32 vp_id, | ||
2701 | struct vxge_hw_vpath_reg __iomem *vpath_reg, | ||
2702 | struct vxge_hw_device_hw_info *hw_info) | ||
2703 | { | ||
2704 | u64 val64; | ||
2705 | u64 data1 = 0ULL; | ||
2706 | u64 data2 = 0ULL; | ||
2707 | struct vxge_hw_device_version *fw_version = &hw_info->fw_version; | ||
2708 | struct vxge_hw_device_date *fw_date = &hw_info->fw_date; | ||
2709 | struct vxge_hw_device_version *flash_version = &hw_info->flash_version; | ||
2710 | struct vxge_hw_device_date *flash_date = &hw_info->flash_date; | ||
2711 | enum vxge_hw_status status = VXGE_HW_OK; | ||
2712 | |||
2713 | val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION( | ||
2714 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY) | | ||
2715 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL( | ||
2716 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) | | ||
2717 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | | ||
2718 | VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0); | ||
2719 | |||
2720 | status = __vxge_hw_pio_mem_write64(val64, | ||
2721 | &vpath_reg->rts_access_steer_ctrl, | ||
2722 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, | ||
2723 | VXGE_HW_DEF_DEVICE_POLL_MILLIS); | ||
2724 | |||
2725 | if (status != VXGE_HW_OK) | ||
2726 | goto exit; | ||
2727 | |||
2728 | val64 = readq(&vpath_reg->rts_access_steer_ctrl); | ||
2729 | |||
2730 | if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { | ||
2731 | |||
2732 | data1 = readq(&vpath_reg->rts_access_steer_data0); | ||
2733 | data2 = readq(&vpath_reg->rts_access_steer_data1); | ||
2734 | |||
2735 | fw_date->day = | ||
2736 | (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY( | ||
2737 | data1); | ||
2738 | fw_date->month = | ||
2739 | (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH( | ||
2740 | data1); | ||
2741 | fw_date->year = | ||
2742 | (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR( | ||
2743 | data1); | ||
2744 | |||
2745 | snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d", | ||
2746 | fw_date->month, fw_date->day, fw_date->year); | ||
2747 | |||
2748 | fw_version->major = | ||
2749 | (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data1); | ||
2750 | fw_version->minor = | ||
2751 | (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data1); | ||
2752 | fw_version->build = | ||
2753 | (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data1); | ||
2754 | |||
2755 | snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d", | ||
2756 | fw_version->major, fw_version->minor, fw_version->build); | ||
2757 | |||
2758 | flash_date->day = | ||
2759 | (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data2); | ||
2760 | flash_date->month = | ||
2761 | (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data2); | ||
2762 | flash_date->year = | ||
2763 | (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data2); | ||
2764 | |||
2765 | snprintf(flash_date->date, VXGE_HW_FW_STRLEN, | ||
2766 | "%2.2d/%2.2d/%4.4d", | ||
2767 | flash_date->month, flash_date->day, flash_date->year); | ||
2768 | |||
2769 | flash_version->major = | ||
2770 | (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data2); | ||
2771 | flash_version->minor = | ||
2772 | (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data2); | ||
2773 | flash_version->build = | ||
2774 | (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data2); | ||
2775 | |||
2776 | snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d", | ||
2777 | flash_version->major, flash_version->minor, | ||
2778 | flash_version->build); | ||
2779 | |||
2780 | status = VXGE_HW_OK; | ||
2781 | |||
2782 | } else | ||
2783 | status = VXGE_HW_FAIL; | ||
2784 | exit: | ||
2785 | return status; | ||
2786 | } | ||
2787 | |||
2788 | /* | ||
2789 | * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode | ||
2790 | * Returns pci function mode | ||
2791 | */ | ||
2792 | u64 | ||
2793 | __vxge_hw_vpath_pci_func_mode_get( | ||
2794 | u32 vp_id, | ||
2795 | struct vxge_hw_vpath_reg __iomem *vpath_reg) | ||
2796 | { | ||
2797 | u64 val64; | ||
2798 | u64 data1 = 0ULL; | ||
2799 | enum vxge_hw_status status = VXGE_HW_OK; | ||
2800 | |||
2801 | __vxge_hw_read_rts_ds(vpath_reg, | ||
2802 | VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PCI_MODE); | ||
2803 | |||
2804 | val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION( | ||
2805 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) | | ||
2806 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL( | ||
2807 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) | | ||
2808 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | | ||
2809 | VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0); | ||
2810 | |||
2811 | status = __vxge_hw_pio_mem_write64(val64, | ||
2812 | &vpath_reg->rts_access_steer_ctrl, | ||
2813 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, | ||
2814 | VXGE_HW_DEF_DEVICE_POLL_MILLIS); | ||
2815 | |||
2816 | if (status != VXGE_HW_OK) | ||
2817 | goto exit; | ||
2818 | |||
2819 | val64 = readq(&vpath_reg->rts_access_steer_ctrl); | ||
2820 | |||
2821 | if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { | ||
2822 | data1 = readq(&vpath_reg->rts_access_steer_data0); | ||
2823 | status = VXGE_HW_OK; | ||
2824 | } else { | ||
2825 | data1 = 0; | ||
2826 | status = VXGE_HW_FAIL; | ||
2827 | } | ||
2828 | exit: | ||
2829 | return data1; | ||
2830 | } | ||
2831 | |||
2832 | /** | 3635 | /** |
2833 | * vxge_hw_device_flick_link_led - Flick (blink) link LED. | 3636 | * vxge_hw_device_flick_link_led - Flick (blink) link LED. |
2834 | * @hldev: HW device. | 3637 | * @hldev: HW device. |
@@ -2837,37 +3640,24 @@ exit: | |||
2837 | * Flicker the link LED. | 3640 | * Flicker the link LED. |
2838 | */ | 3641 | */ |
2839 | enum vxge_hw_status | 3642 | enum vxge_hw_status |
2840 | vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev, | 3643 | vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev, u64 on_off) |
2841 | u64 on_off) | ||
2842 | { | 3644 | { |
2843 | u64 val64; | 3645 | struct __vxge_hw_virtualpath *vpath; |
2844 | enum vxge_hw_status status = VXGE_HW_OK; | 3646 | u64 data0, data1 = 0, steer_ctrl = 0; |
2845 | struct vxge_hw_vpath_reg __iomem *vp_reg; | 3647 | enum vxge_hw_status status; |
2846 | 3648 | ||
2847 | if (hldev == NULL) { | 3649 | if (hldev == NULL) { |
2848 | status = VXGE_HW_ERR_INVALID_DEVICE; | 3650 | status = VXGE_HW_ERR_INVALID_DEVICE; |
2849 | goto exit; | 3651 | goto exit; |
2850 | } | 3652 | } |
2851 | 3653 | ||
2852 | vp_reg = hldev->vpath_reg[hldev->first_vp_id]; | 3654 | vpath = &hldev->virtual_paths[hldev->first_vp_id]; |
2853 | |||
2854 | writeq(0, &vp_reg->rts_access_steer_ctrl); | ||
2855 | wmb(); | ||
2856 | writeq(on_off, &vp_reg->rts_access_steer_data0); | ||
2857 | writeq(0, &vp_reg->rts_access_steer_data1); | ||
2858 | wmb(); | ||
2859 | 3655 | ||
2860 | val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION( | 3656 | data0 = on_off; |
2861 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL) | | 3657 | status = vxge_hw_vpath_fw_api(vpath, |
2862 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL( | 3658 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL, |
2863 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) | | 3659 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, |
2864 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | | 3660 | 0, &data0, &data1, &steer_ctrl); |
2865 | VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0); | ||
2866 | |||
2867 | status = __vxge_hw_pio_mem_write64(val64, | ||
2868 | &vp_reg->rts_access_steer_ctrl, | ||
2869 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, | ||
2870 | VXGE_HW_DEF_DEVICE_POLL_MILLIS); | ||
2871 | exit: | 3661 | exit: |
2872 | return status; | 3662 | return status; |
2873 | } | 3663 | } |
@@ -2876,63 +3666,38 @@ exit: | |||
2876 | * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables | 3666 | * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables |
2877 | */ | 3667 | */ |
2878 | enum vxge_hw_status | 3668 | enum vxge_hw_status |
2879 | __vxge_hw_vpath_rts_table_get( | 3669 | __vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle *vp, |
2880 | struct __vxge_hw_vpath_handle *vp, | 3670 | u32 action, u32 rts_table, u32 offset, |
2881 | u32 action, u32 rts_table, u32 offset, u64 *data1, u64 *data2) | 3671 | u64 *data0, u64 *data1) |
2882 | { | 3672 | { |
2883 | u64 val64; | 3673 | enum vxge_hw_status status; |
2884 | struct __vxge_hw_virtualpath *vpath; | 3674 | u64 steer_ctrl = 0; |
2885 | struct vxge_hw_vpath_reg __iomem *vp_reg; | ||
2886 | |||
2887 | enum vxge_hw_status status = VXGE_HW_OK; | ||
2888 | 3675 | ||
2889 | if (vp == NULL) { | 3676 | if (vp == NULL) { |
2890 | status = VXGE_HW_ERR_INVALID_HANDLE; | 3677 | status = VXGE_HW_ERR_INVALID_HANDLE; |
2891 | goto exit; | 3678 | goto exit; |
2892 | } | 3679 | } |
2893 | 3680 | ||
2894 | vpath = vp->vpath; | ||
2895 | vp_reg = vpath->vp_reg; | ||
2896 | |||
2897 | val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) | | ||
2898 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) | | ||
2899 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | | ||
2900 | VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset); | ||
2901 | |||
2902 | if ((rts_table == | 3681 | if ((rts_table == |
2903 | VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) || | 3682 | VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) || |
2904 | (rts_table == | 3683 | (rts_table == |
2905 | VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) || | 3684 | VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) || |
2906 | (rts_table == | 3685 | (rts_table == |
2907 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) || | 3686 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) || |
2908 | (rts_table == | 3687 | (rts_table == |
2909 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) { | 3688 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) { |
2910 | val64 = val64 | VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL; | 3689 | steer_ctrl = VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL; |
2911 | } | 3690 | } |
2912 | 3691 | ||
2913 | status = __vxge_hw_pio_mem_write64(val64, | 3692 | status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset, |
2914 | &vp_reg->rts_access_steer_ctrl, | 3693 | data0, data1, &steer_ctrl); |
2915 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, | ||
2916 | vpath->hldev->config.device_poll_millis); | ||
2917 | |||
2918 | if (status != VXGE_HW_OK) | 3694 | if (status != VXGE_HW_OK) |
2919 | goto exit; | 3695 | goto exit; |
2920 | 3696 | ||
2921 | val64 = readq(&vp_reg->rts_access_steer_ctrl); | 3697 | if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) && |
2922 | 3698 | (rts_table != | |
2923 | if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { | 3699 | VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) |
2924 | 3700 | *data1 = 0; | |
2925 | *data1 = readq(&vp_reg->rts_access_steer_data0); | ||
2926 | |||
2927 | if ((rts_table == | ||
2928 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) || | ||
2929 | (rts_table == | ||
2930 | VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) { | ||
2931 | *data2 = readq(&vp_reg->rts_access_steer_data1); | ||
2932 | } | ||
2933 | status = VXGE_HW_OK; | ||
2934 | } else | ||
2935 | status = VXGE_HW_FAIL; | ||
2936 | exit: | 3701 | exit: |
2937 | return status; | 3702 | return status; |
2938 | } | 3703 | } |
@@ -2941,107 +3706,27 @@ exit: | |||
2941 | * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables | 3706 | * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables |
2942 | */ | 3707 | */ |
2943 | enum vxge_hw_status | 3708 | enum vxge_hw_status |
2944 | __vxge_hw_vpath_rts_table_set( | 3709 | __vxge_hw_vpath_rts_table_set(struct __vxge_hw_vpath_handle *vp, u32 action, |
2945 | struct __vxge_hw_vpath_handle *vp, u32 action, u32 rts_table, | 3710 | u32 rts_table, u32 offset, u64 steer_data0, |
2946 | u32 offset, u64 data1, u64 data2) | 3711 | u64 steer_data1) |
2947 | { | 3712 | { |
2948 | u64 val64; | 3713 | u64 data0, data1 = 0, steer_ctrl = 0; |
2949 | struct __vxge_hw_virtualpath *vpath; | 3714 | enum vxge_hw_status status; |
2950 | enum vxge_hw_status status = VXGE_HW_OK; | ||
2951 | struct vxge_hw_vpath_reg __iomem *vp_reg; | ||
2952 | 3715 | ||
2953 | if (vp == NULL) { | 3716 | if (vp == NULL) { |
2954 | status = VXGE_HW_ERR_INVALID_HANDLE; | 3717 | status = VXGE_HW_ERR_INVALID_HANDLE; |
2955 | goto exit; | 3718 | goto exit; |
2956 | } | 3719 | } |
2957 | 3720 | ||
2958 | vpath = vp->vpath; | 3721 | data0 = steer_data0; |
2959 | vp_reg = vpath->vp_reg; | ||
2960 | |||
2961 | writeq(data1, &vp_reg->rts_access_steer_data0); | ||
2962 | wmb(); | ||
2963 | 3722 | ||
2964 | if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) || | 3723 | if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) || |
2965 | (rts_table == | 3724 | (rts_table == |
2966 | VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) { | 3725 | VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) |
2967 | writeq(data2, &vp_reg->rts_access_steer_data1); | 3726 | data1 = steer_data1; |
2968 | wmb(); | ||
2969 | } | ||
2970 | |||
2971 | val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) | | ||
2972 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) | | ||
2973 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | | ||
2974 | VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset); | ||
2975 | |||
2976 | status = __vxge_hw_pio_mem_write64(val64, | ||
2977 | &vp_reg->rts_access_steer_ctrl, | ||
2978 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, | ||
2979 | vpath->hldev->config.device_poll_millis); | ||
2980 | |||
2981 | if (status != VXGE_HW_OK) | ||
2982 | goto exit; | ||
2983 | |||
2984 | val64 = readq(&vp_reg->rts_access_steer_ctrl); | ||
2985 | 3727 | ||
2986 | if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) | 3728 | status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset, |
2987 | status = VXGE_HW_OK; | 3729 | &data0, &data1, &steer_ctrl); |
2988 | else | ||
2989 | status = VXGE_HW_FAIL; | ||
2990 | exit: | ||
2991 | return status; | ||
2992 | } | ||
2993 | |||
2994 | /* | ||
2995 | * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath | ||
2996 | * from MAC address table. | ||
2997 | */ | ||
2998 | enum vxge_hw_status | ||
2999 | __vxge_hw_vpath_addr_get( | ||
3000 | u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg, | ||
3001 | u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN]) | ||
3002 | { | ||
3003 | u32 i; | ||
3004 | u64 val64; | ||
3005 | u64 data1 = 0ULL; | ||
3006 | u64 data2 = 0ULL; | ||
3007 | enum vxge_hw_status status = VXGE_HW_OK; | ||
3008 | |||
3009 | val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION( | ||
3010 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY) | | ||
3011 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL( | ||
3012 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) | | ||
3013 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | | ||
3014 | VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0); | ||
3015 | |||
3016 | status = __vxge_hw_pio_mem_write64(val64, | ||
3017 | &vpath_reg->rts_access_steer_ctrl, | ||
3018 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, | ||
3019 | VXGE_HW_DEF_DEVICE_POLL_MILLIS); | ||
3020 | |||
3021 | if (status != VXGE_HW_OK) | ||
3022 | goto exit; | ||
3023 | |||
3024 | val64 = readq(&vpath_reg->rts_access_steer_ctrl); | ||
3025 | |||
3026 | if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { | ||
3027 | |||
3028 | data1 = readq(&vpath_reg->rts_access_steer_data0); | ||
3029 | data2 = readq(&vpath_reg->rts_access_steer_data1); | ||
3030 | |||
3031 | data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1); | ||
3032 | data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK( | ||
3033 | data2); | ||
3034 | |||
3035 | for (i = ETH_ALEN; i > 0; i--) { | ||
3036 | macaddr[i-1] = (u8)(data1 & 0xFF); | ||
3037 | data1 >>= 8; | ||
3038 | |||
3039 | macaddr_mask[i-1] = (u8)(data2 & 0xFF); | ||
3040 | data2 >>= 8; | ||
3041 | } | ||
3042 | status = VXGE_HW_OK; | ||
3043 | } else | ||
3044 | status = VXGE_HW_FAIL; | ||
3045 | exit: | 3730 | exit: |
3046 | return status; | 3731 | return status; |
3047 | } | 3732 | } |
@@ -3067,6 +3752,8 @@ enum vxge_hw_status vxge_hw_vpath_rts_rth_set( | |||
3067 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY, | 3752 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY, |
3068 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG, | 3753 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG, |
3069 | 0, &data0, &data1); | 3754 | 0, &data0, &data1); |
3755 | if (status != VXGE_HW_OK) | ||
3756 | goto exit; | ||
3070 | 3757 | ||
3071 | data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) | | 3758 | data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) | |
3072 | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3)); | 3759 | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3)); |
@@ -3347,7 +4034,7 @@ __vxge_hw_vpath_mgmt_read( | |||
3347 | * This routine checks the vpath_rst_in_prog register to see if | 4034 | * This routine checks the vpath_rst_in_prog register to see if |
3348 | * adapter completed the reset process for the vpath | 4035 | * adapter completed the reset process for the vpath |
3349 | */ | 4036 | */ |
3350 | enum vxge_hw_status | 4037 | static enum vxge_hw_status |
3351 | __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath) | 4038 | __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath) |
3352 | { | 4039 | { |
3353 | enum vxge_hw_status status; | 4040 | enum vxge_hw_status status; |
@@ -3365,7 +4052,7 @@ __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath) | |||
3365 | * __vxge_hw_vpath_reset | 4052 | * __vxge_hw_vpath_reset |
3366 | * This routine resets the vpath on the device | 4053 | * This routine resets the vpath on the device |
3367 | */ | 4054 | */ |
3368 | enum vxge_hw_status | 4055 | static enum vxge_hw_status |
3369 | __vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id) | 4056 | __vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id) |
3370 | { | 4057 | { |
3371 | u64 val64; | 4058 | u64 val64; |
@@ -3383,7 +4070,7 @@ __vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id) | |||
3383 | * __vxge_hw_vpath_sw_reset | 4070 | * __vxge_hw_vpath_sw_reset |
3384 | * This routine resets the vpath structures | 4071 | * This routine resets the vpath structures |
3385 | */ | 4072 | */ |
3386 | enum vxge_hw_status | 4073 | static enum vxge_hw_status |
3387 | __vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id) | 4074 | __vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id) |
3388 | { | 4075 | { |
3389 | enum vxge_hw_status status = VXGE_HW_OK; | 4076 | enum vxge_hw_status status = VXGE_HW_OK; |
@@ -3408,7 +4095,7 @@ exit: | |||
3408 | * This routine configures the prc registers of virtual path using the config | 4095 | * This routine configures the prc registers of virtual path using the config |
3409 | * passed | 4096 | * passed |
3410 | */ | 4097 | */ |
3411 | void | 4098 | static void |
3412 | __vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id) | 4099 | __vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id) |
3413 | { | 4100 | { |
3414 | u64 val64; | 4101 | u64 val64; |
@@ -3480,7 +4167,7 @@ __vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id) | |||
3480 | * This routine configures the kdfc registers of virtual path using the | 4167 | * This routine configures the kdfc registers of virtual path using the |
3481 | * config passed | 4168 | * config passed |
3482 | */ | 4169 | */ |
3483 | enum vxge_hw_status | 4170 | static enum vxge_hw_status |
3484 | __vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id) | 4171 | __vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id) |
3485 | { | 4172 | { |
3486 | u64 val64; | 4173 | u64 val64; |
@@ -3553,7 +4240,7 @@ exit: | |||
3553 | * __vxge_hw_vpath_mac_configure | 4240 | * __vxge_hw_vpath_mac_configure |
3554 | * This routine configures the mac of virtual path using the config passed | 4241 | * This routine configures the mac of virtual path using the config passed |
3555 | */ | 4242 | */ |
3556 | enum vxge_hw_status | 4243 | static enum vxge_hw_status |
3557 | __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id) | 4244 | __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id) |
3558 | { | 4245 | { |
3559 | u64 val64; | 4246 | u64 val64; |
@@ -3621,7 +4308,7 @@ __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id) | |||
3621 | * This routine configures the tim registers of virtual path using the config | 4308 | * This routine configures the tim registers of virtual path using the config |
3622 | * passed | 4309 | * passed |
3623 | */ | 4310 | */ |
3624 | enum vxge_hw_status | 4311 | static enum vxge_hw_status |
3625 | __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) | 4312 | __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) |
3626 | { | 4313 | { |
3627 | u64 val64; | 4314 | u64 val64; |
@@ -3634,10 +4321,10 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) | |||
3634 | vp_reg = vpath->vp_reg; | 4321 | vp_reg = vpath->vp_reg; |
3635 | config = vpath->vp_config; | 4322 | config = vpath->vp_config; |
3636 | 4323 | ||
3637 | writeq((u64)0, &vp_reg->tim_dest_addr); | 4324 | writeq(0, &vp_reg->tim_dest_addr); |
3638 | writeq((u64)0, &vp_reg->tim_vpath_map); | 4325 | writeq(0, &vp_reg->tim_vpath_map); |
3639 | writeq((u64)0, &vp_reg->tim_bitmap); | 4326 | writeq(0, &vp_reg->tim_bitmap); |
3640 | writeq((u64)0, &vp_reg->tim_remap); | 4327 | writeq(0, &vp_reg->tim_remap); |
3641 | 4328 | ||
3642 | if (config->ring.enable == VXGE_HW_RING_ENABLE) | 4329 | if (config->ring.enable == VXGE_HW_RING_ENABLE) |
3643 | writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM( | 4330 | writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM( |
@@ -3694,6 +4381,8 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) | |||
3694 | } | 4381 | } |
3695 | 4382 | ||
3696 | writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); | 4383 | writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); |
4384 | vpath->tim_tti_cfg1_saved = val64; | ||
4385 | |||
3697 | val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]); | 4386 | val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]); |
3698 | 4387 | ||
3699 | if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) { | 4388 | if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) { |
@@ -3739,8 +4428,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) | |||
3739 | 4428 | ||
3740 | if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) { | 4429 | if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) { |
3741 | val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f); | 4430 | val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f); |
3742 | val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL( | 4431 | val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id); |
3743 | config->tti.util_sel); | ||
3744 | } | 4432 | } |
3745 | 4433 | ||
3746 | if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) { | 4434 | if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) { |
@@ -3751,6 +4439,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) | |||
3751 | } | 4439 | } |
3752 | 4440 | ||
3753 | writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]); | 4441 | writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]); |
4442 | vpath->tim_tti_cfg3_saved = val64; | ||
3754 | } | 4443 | } |
3755 | 4444 | ||
3756 | if (config->ring.enable == VXGE_HW_RING_ENABLE) { | 4445 | if (config->ring.enable == VXGE_HW_RING_ENABLE) { |
@@ -3799,6 +4488,8 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) | |||
3799 | } | 4488 | } |
3800 | 4489 | ||
3801 | writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]); | 4490 | writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]); |
4491 | vpath->tim_rti_cfg1_saved = val64; | ||
4492 | |||
3802 | val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]); | 4493 | val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]); |
3803 | 4494 | ||
3804 | if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) { | 4495 | if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) { |
@@ -3844,8 +4535,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) | |||
3844 | 4535 | ||
3845 | if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) { | 4536 | if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) { |
3846 | val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f); | 4537 | val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f); |
3847 | val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL( | 4538 | val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id); |
3848 | config->rti.util_sel); | ||
3849 | } | 4539 | } |
3850 | 4540 | ||
3851 | if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) { | 4541 | if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) { |
@@ -3856,6 +4546,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) | |||
3856 | } | 4546 | } |
3857 | 4547 | ||
3858 | writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]); | 4548 | writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]); |
4549 | vpath->tim_rti_cfg3_saved = val64; | ||
3859 | } | 4550 | } |
3860 | 4551 | ||
3861 | val64 = 0; | 4552 | val64 = 0; |
@@ -3866,38 +4557,20 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) | |||
3866 | writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]); | 4557 | writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]); |
3867 | writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]); | 4558 | writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]); |
3868 | 4559 | ||
4560 | val64 = VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_PRD(150); | ||
4561 | val64 |= VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_DIV(0); | ||
4562 | val64 |= VXGE_HW_TIM_WRKLD_CLC_CNT_RX_TX(3); | ||
4563 | writeq(val64, &vp_reg->tim_wrkld_clc); | ||
4564 | |||
3869 | return status; | 4565 | return status; |
3870 | } | 4566 | } |
3871 | 4567 | ||
3872 | void | ||
3873 | vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id) | ||
3874 | { | ||
3875 | struct __vxge_hw_virtualpath *vpath; | ||
3876 | struct vxge_hw_vpath_reg __iomem *vp_reg; | ||
3877 | struct vxge_hw_vp_config *config; | ||
3878 | u64 val64; | ||
3879 | |||
3880 | vpath = &hldev->virtual_paths[vp_id]; | ||
3881 | vp_reg = vpath->vp_reg; | ||
3882 | config = vpath->vp_config; | ||
3883 | |||
3884 | if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) { | ||
3885 | val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); | ||
3886 | |||
3887 | if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) { | ||
3888 | config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE; | ||
3889 | val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; | ||
3890 | writeq(val64, | ||
3891 | &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); | ||
3892 | } | ||
3893 | } | ||
3894 | } | ||
3895 | /* | 4568 | /* |
3896 | * __vxge_hw_vpath_initialize | 4569 | * __vxge_hw_vpath_initialize |
3897 | * This routine is the final phase of init which initializes the | 4570 | * This routine is the final phase of init which initializes the |
3898 | * registers of the vpath using the configuration passed. | 4571 | * registers of the vpath using the configuration passed. |
3899 | */ | 4572 | */ |
3900 | enum vxge_hw_status | 4573 | static enum vxge_hw_status |
3901 | __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id) | 4574 | __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id) |
3902 | { | 4575 | { |
3903 | u64 val64; | 4576 | u64 val64; |
@@ -3915,22 +4588,18 @@ __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id) | |||
3915 | vp_reg = vpath->vp_reg; | 4588 | vp_reg = vpath->vp_reg; |
3916 | 4589 | ||
3917 | status = __vxge_hw_vpath_swapper_set(vpath->vp_reg); | 4590 | status = __vxge_hw_vpath_swapper_set(vpath->vp_reg); |
3918 | |||
3919 | if (status != VXGE_HW_OK) | 4591 | if (status != VXGE_HW_OK) |
3920 | goto exit; | 4592 | goto exit; |
3921 | 4593 | ||
3922 | status = __vxge_hw_vpath_mac_configure(hldev, vp_id); | 4594 | status = __vxge_hw_vpath_mac_configure(hldev, vp_id); |
3923 | |||
3924 | if (status != VXGE_HW_OK) | 4595 | if (status != VXGE_HW_OK) |
3925 | goto exit; | 4596 | goto exit; |
3926 | 4597 | ||
3927 | status = __vxge_hw_vpath_kdfc_configure(hldev, vp_id); | 4598 | status = __vxge_hw_vpath_kdfc_configure(hldev, vp_id); |
3928 | |||
3929 | if (status != VXGE_HW_OK) | 4599 | if (status != VXGE_HW_OK) |
3930 | goto exit; | 4600 | goto exit; |
3931 | 4601 | ||
3932 | status = __vxge_hw_vpath_tim_configure(hldev, vp_id); | 4602 | status = __vxge_hw_vpath_tim_configure(hldev, vp_id); |
3933 | |||
3934 | if (status != VXGE_HW_OK) | 4603 | if (status != VXGE_HW_OK) |
3935 | goto exit; | 4604 | goto exit; |
3936 | 4605 | ||
@@ -3938,7 +4607,6 @@ __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id) | |||
3938 | 4607 | ||
3939 | /* Get MRRS value from device control */ | 4608 | /* Get MRRS value from device control */ |
3940 | status = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32); | 4609 | status = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32); |
3941 | |||
3942 | if (status == VXGE_HW_OK) { | 4610 | if (status == VXGE_HW_OK) { |
3943 | val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12; | 4611 | val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12; |
3944 | val64 &= | 4612 | val64 &= |
@@ -3962,11 +4630,53 @@ exit: | |||
3962 | } | 4630 | } |
3963 | 4631 | ||
3964 | /* | 4632 | /* |
4633 | * __vxge_hw_vp_terminate - Terminate Virtual Path structure | ||
4634 | * This routine closes all channels it opened and freeup memory | ||
4635 | */ | ||
4636 | static void __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id) | ||
4637 | { | ||
4638 | struct __vxge_hw_virtualpath *vpath; | ||
4639 | |||
4640 | vpath = &hldev->virtual_paths[vp_id]; | ||
4641 | |||
4642 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) | ||
4643 | goto exit; | ||
4644 | |||
4645 | VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0, | ||
4646 | vpath->hldev->tim_int_mask1, vpath->vp_id); | ||
4647 | hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL; | ||
4648 | |||
4649 | /* If the whole struct __vxge_hw_virtualpath is zeroed, nothing will | ||
4650 | * work after the interface is brought down. | ||
4651 | */ | ||
4652 | spin_lock(&vpath->lock); | ||
4653 | vpath->vp_open = VXGE_HW_VP_NOT_OPEN; | ||
4654 | spin_unlock(&vpath->lock); | ||
4655 | |||
4656 | vpath->vpmgmt_reg = NULL; | ||
4657 | vpath->nofl_db = NULL; | ||
4658 | vpath->max_mtu = 0; | ||
4659 | vpath->vsport_number = 0; | ||
4660 | vpath->max_kdfc_db = 0; | ||
4661 | vpath->max_nofl_db = 0; | ||
4662 | vpath->ringh = NULL; | ||
4663 | vpath->fifoh = NULL; | ||
4664 | memset(&vpath->vpath_handles, 0, sizeof(struct list_head)); | ||
4665 | vpath->stats_block = 0; | ||
4666 | vpath->hw_stats = NULL; | ||
4667 | vpath->hw_stats_sav = NULL; | ||
4668 | vpath->sw_stats = NULL; | ||
4669 | |||
4670 | exit: | ||
4671 | return; | ||
4672 | } | ||
4673 | |||
4674 | /* | ||
3965 | * __vxge_hw_vp_initialize - Initialize Virtual Path structure | 4675 | * __vxge_hw_vp_initialize - Initialize Virtual Path structure |
3966 | * This routine is the initial phase of init which resets the vpath and | 4676 | * This routine is the initial phase of init which resets the vpath and |
3967 | * initializes the software support structures. | 4677 | * initializes the software support structures. |
3968 | */ | 4678 | */ |
3969 | enum vxge_hw_status | 4679 | static enum vxge_hw_status |
3970 | __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id, | 4680 | __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id, |
3971 | struct vxge_hw_vp_config *config) | 4681 | struct vxge_hw_vp_config *config) |
3972 | { | 4682 | { |
@@ -3980,6 +4690,7 @@ __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id, | |||
3980 | 4690 | ||
3981 | vpath = &hldev->virtual_paths[vp_id]; | 4691 | vpath = &hldev->virtual_paths[vp_id]; |
3982 | 4692 | ||
4693 | spin_lock_init(&vpath->lock); | ||
3983 | vpath->vp_id = vp_id; | 4694 | vpath->vp_id = vp_id; |
3984 | vpath->vp_open = VXGE_HW_VP_OPEN; | 4695 | vpath->vp_open = VXGE_HW_VP_OPEN; |
3985 | vpath->hldev = hldev; | 4696 | vpath->hldev = hldev; |
@@ -3990,14 +4701,12 @@ __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id, | |||
3990 | __vxge_hw_vpath_reset(hldev, vp_id); | 4701 | __vxge_hw_vpath_reset(hldev, vp_id); |
3991 | 4702 | ||
3992 | status = __vxge_hw_vpath_reset_check(vpath); | 4703 | status = __vxge_hw_vpath_reset_check(vpath); |
3993 | |||
3994 | if (status != VXGE_HW_OK) { | 4704 | if (status != VXGE_HW_OK) { |
3995 | memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath)); | 4705 | memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath)); |
3996 | goto exit; | 4706 | goto exit; |
3997 | } | 4707 | } |
3998 | 4708 | ||
3999 | status = __vxge_hw_vpath_mgmt_read(hldev, vpath); | 4709 | status = __vxge_hw_vpath_mgmt_read(hldev, vpath); |
4000 | |||
4001 | if (status != VXGE_HW_OK) { | 4710 | if (status != VXGE_HW_OK) { |
4002 | memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath)); | 4711 | memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath)); |
4003 | goto exit; | 4712 | goto exit; |
@@ -4011,7 +4720,6 @@ __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id, | |||
4011 | hldev->tim_int_mask1, vp_id); | 4720 | hldev->tim_int_mask1, vp_id); |
4012 | 4721 | ||
4013 | status = __vxge_hw_vpath_initialize(hldev, vp_id); | 4722 | status = __vxge_hw_vpath_initialize(hldev, vp_id); |
4014 | |||
4015 | if (status != VXGE_HW_OK) | 4723 | if (status != VXGE_HW_OK) |
4016 | __vxge_hw_vp_terminate(hldev, vp_id); | 4724 | __vxge_hw_vp_terminate(hldev, vp_id); |
4017 | exit: | 4725 | exit: |
@@ -4019,29 +4727,6 @@ exit: | |||
4019 | } | 4727 | } |
4020 | 4728 | ||
4021 | /* | 4729 | /* |
4022 | * __vxge_hw_vp_terminate - Terminate Virtual Path structure | ||
4023 | * This routine closes all channels it opened and freeup memory | ||
4024 | */ | ||
4025 | void | ||
4026 | __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id) | ||
4027 | { | ||
4028 | struct __vxge_hw_virtualpath *vpath; | ||
4029 | |||
4030 | vpath = &hldev->virtual_paths[vp_id]; | ||
4031 | |||
4032 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) | ||
4033 | goto exit; | ||
4034 | |||
4035 | VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0, | ||
4036 | vpath->hldev->tim_int_mask1, vpath->vp_id); | ||
4037 | hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL; | ||
4038 | |||
4039 | memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath)); | ||
4040 | exit: | ||
4041 | return; | ||
4042 | } | ||
4043 | |||
4044 | /* | ||
4045 | * vxge_hw_vpath_mtu_set - Set MTU. | 4730 | * vxge_hw_vpath_mtu_set - Set MTU. |
4046 | * Set new MTU value. Example, to use jumbo frames: | 4731 | * Set new MTU value. Example, to use jumbo frames: |
4047 | * vxge_hw_vpath_mtu_set(my_device, 9600); | 4732 | * vxge_hw_vpath_mtu_set(my_device, 9600); |
@@ -4078,6 +4763,64 @@ exit: | |||
4078 | } | 4763 | } |
4079 | 4764 | ||
4080 | /* | 4765 | /* |
4766 | * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics. | ||
4767 | * Enable the DMA vpath statistics. The function is to be called to re-enable | ||
4768 | * the adapter to update stats into the host memory | ||
4769 | */ | ||
4770 | static enum vxge_hw_status | ||
4771 | vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp) | ||
4772 | { | ||
4773 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4774 | struct __vxge_hw_virtualpath *vpath; | ||
4775 | |||
4776 | vpath = vp->vpath; | ||
4777 | |||
4778 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
4779 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
4780 | goto exit; | ||
4781 | } | ||
4782 | |||
4783 | memcpy(vpath->hw_stats_sav, vpath->hw_stats, | ||
4784 | sizeof(struct vxge_hw_vpath_stats_hw_info)); | ||
4785 | |||
4786 | status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats); | ||
4787 | exit: | ||
4788 | return status; | ||
4789 | } | ||
4790 | |||
4791 | /* | ||
4792 | * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool | ||
4793 | * This function allocates a block from block pool or from the system | ||
4794 | */ | ||
4795 | static struct __vxge_hw_blockpool_entry * | ||
4796 | __vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size) | ||
4797 | { | ||
4798 | struct __vxge_hw_blockpool_entry *entry = NULL; | ||
4799 | struct __vxge_hw_blockpool *blockpool; | ||
4800 | |||
4801 | blockpool = &devh->block_pool; | ||
4802 | |||
4803 | if (size == blockpool->block_size) { | ||
4804 | |||
4805 | if (!list_empty(&blockpool->free_block_list)) | ||
4806 | entry = (struct __vxge_hw_blockpool_entry *) | ||
4807 | list_first_entry(&blockpool->free_block_list, | ||
4808 | struct __vxge_hw_blockpool_entry, | ||
4809 | item); | ||
4810 | |||
4811 | if (entry != NULL) { | ||
4812 | list_del(&entry->item); | ||
4813 | blockpool->pool_size--; | ||
4814 | } | ||
4815 | } | ||
4816 | |||
4817 | if (entry != NULL) | ||
4818 | __vxge_hw_blockpool_blocks_add(blockpool); | ||
4819 | |||
4820 | return entry; | ||
4821 | } | ||
4822 | |||
4823 | /* | ||
4081 | * vxge_hw_vpath_open - Open a virtual path on a given adapter | 4824 | * vxge_hw_vpath_open - Open a virtual path on a given adapter |
4082 | * This function is used to open access to virtual path of an | 4825 | * This function is used to open access to virtual path of an |
4083 | * adapter for offload, GRO operations. This function returns | 4826 | * adapter for offload, GRO operations. This function returns |
@@ -4101,19 +4844,15 @@ vxge_hw_vpath_open(struct __vxge_hw_device *hldev, | |||
4101 | 4844 | ||
4102 | status = __vxge_hw_vp_initialize(hldev, attr->vp_id, | 4845 | status = __vxge_hw_vp_initialize(hldev, attr->vp_id, |
4103 | &hldev->config.vp_config[attr->vp_id]); | 4846 | &hldev->config.vp_config[attr->vp_id]); |
4104 | |||
4105 | if (status != VXGE_HW_OK) | 4847 | if (status != VXGE_HW_OK) |
4106 | goto vpath_open_exit1; | 4848 | goto vpath_open_exit1; |
4107 | 4849 | ||
4108 | vp = (struct __vxge_hw_vpath_handle *) | 4850 | vp = vzalloc(sizeof(struct __vxge_hw_vpath_handle)); |
4109 | vmalloc(sizeof(struct __vxge_hw_vpath_handle)); | ||
4110 | if (vp == NULL) { | 4851 | if (vp == NULL) { |
4111 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | 4852 | status = VXGE_HW_ERR_OUT_OF_MEMORY; |
4112 | goto vpath_open_exit2; | 4853 | goto vpath_open_exit2; |
4113 | } | 4854 | } |
4114 | 4855 | ||
4115 | memset(vp, 0, sizeof(struct __vxge_hw_vpath_handle)); | ||
4116 | |||
4117 | vp->vpath = vpath; | 4856 | vp->vpath = vpath; |
4118 | 4857 | ||
4119 | if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) { | 4858 | if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) { |
@@ -4136,7 +4875,6 @@ vxge_hw_vpath_open(struct __vxge_hw_device *hldev, | |||
4136 | 4875 | ||
4137 | vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev, | 4876 | vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev, |
4138 | VXGE_HW_BLOCK_SIZE); | 4877 | VXGE_HW_BLOCK_SIZE); |
4139 | |||
4140 | if (vpath->stats_block == NULL) { | 4878 | if (vpath->stats_block == NULL) { |
4141 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | 4879 | status = VXGE_HW_ERR_OUT_OF_MEMORY; |
4142 | goto vpath_open_exit8; | 4880 | goto vpath_open_exit8; |
@@ -4195,19 +4933,20 @@ vpath_open_exit1: | |||
4195 | * This function is used to close access to virtual path opened | 4933 | * This function is used to close access to virtual path opened |
4196 | * earlier. | 4934 | * earlier. |
4197 | */ | 4935 | */ |
4198 | void | 4936 | void vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp) |
4199 | vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp) | ||
4200 | { | 4937 | { |
4201 | struct __vxge_hw_virtualpath *vpath = NULL; | 4938 | struct __vxge_hw_virtualpath *vpath = vp->vpath; |
4939 | struct __vxge_hw_ring *ring = vpath->ringh; | ||
4940 | struct vxgedev *vdev = netdev_priv(vpath->hldev->ndev); | ||
4202 | u64 new_count, val64, val164; | 4941 | u64 new_count, val64, val164; |
4203 | struct __vxge_hw_ring *ring; | ||
4204 | 4942 | ||
4205 | vpath = vp->vpath; | 4943 | if (vdev->titan1) { |
4206 | ring = vpath->ringh; | 4944 | new_count = readq(&vpath->vp_reg->rxdmem_size); |
4945 | new_count &= 0x1fff; | ||
4946 | } else | ||
4947 | new_count = ring->config->ring_blocks * VXGE_HW_BLOCK_SIZE / 8; | ||
4207 | 4948 | ||
4208 | new_count = readq(&vpath->vp_reg->rxdmem_size); | 4949 | val164 = VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count); |
4209 | new_count &= 0x1fff; | ||
4210 | val164 = (VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count)); | ||
4211 | 4950 | ||
4212 | writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164), | 4951 | writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164), |
4213 | &vpath->vp_reg->prc_rxd_doorbell); | 4952 | &vpath->vp_reg->prc_rxd_doorbell); |
@@ -4230,6 +4969,29 @@ vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp) | |||
4230 | } | 4969 | } |
4231 | 4970 | ||
4232 | /* | 4971 | /* |
4972 | * __vxge_hw_blockpool_block_free - Frees a block from block pool | ||
4973 | * @devh: Hal device | ||
4974 | * @entry: Entry of block to be freed | ||
4975 | * | ||
4976 | * This function frees a block from block pool | ||
4977 | */ | ||
4978 | static void | ||
4979 | __vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh, | ||
4980 | struct __vxge_hw_blockpool_entry *entry) | ||
4981 | { | ||
4982 | struct __vxge_hw_blockpool *blockpool; | ||
4983 | |||
4984 | blockpool = &devh->block_pool; | ||
4985 | |||
4986 | if (entry->length == blockpool->block_size) { | ||
4987 | list_add(&entry->item, &blockpool->free_block_list); | ||
4988 | blockpool->pool_size++; | ||
4989 | } | ||
4990 | |||
4991 | __vxge_hw_blockpool_blocks_remove(blockpool); | ||
4992 | } | ||
4993 | |||
4994 | /* | ||
4233 | * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open | 4995 | * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open |
4234 | * This function is used to close access to virtual path opened | 4996 | * This function is used to close access to virtual path opened |
4235 | * earlier. | 4997 | * earlier. |
@@ -4277,8 +5039,6 @@ enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp) | |||
4277 | 5039 | ||
4278 | __vxge_hw_vp_terminate(devh, vp_id); | 5040 | __vxge_hw_vp_terminate(devh, vp_id); |
4279 | 5041 | ||
4280 | vpath->vp_open = VXGE_HW_VP_NOT_OPEN; | ||
4281 | |||
4282 | vpath_close_exit: | 5042 | vpath_close_exit: |
4283 | return status; | 5043 | return status; |
4284 | } | 5044 | } |
@@ -4378,705 +5138,3 @@ vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp) | |||
4378 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), | 5138 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), |
4379 | &hldev->common_reg->cmn_rsthdlr_cfg1); | 5139 | &hldev->common_reg->cmn_rsthdlr_cfg1); |
4380 | } | 5140 | } |
4381 | |||
4382 | /* | ||
4383 | * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics. | ||
4384 | * Enable the DMA vpath statistics. The function is to be called to re-enable | ||
4385 | * the adapter to update stats into the host memory | ||
4386 | */ | ||
4387 | enum vxge_hw_status | ||
4388 | vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp) | ||
4389 | { | ||
4390 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4391 | struct __vxge_hw_virtualpath *vpath; | ||
4392 | |||
4393 | vpath = vp->vpath; | ||
4394 | |||
4395 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
4396 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
4397 | goto exit; | ||
4398 | } | ||
4399 | |||
4400 | memcpy(vpath->hw_stats_sav, vpath->hw_stats, | ||
4401 | sizeof(struct vxge_hw_vpath_stats_hw_info)); | ||
4402 | |||
4403 | status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats); | ||
4404 | exit: | ||
4405 | return status; | ||
4406 | } | ||
4407 | |||
4408 | /* | ||
4409 | * __vxge_hw_vpath_stats_access - Get the statistics from the given location | ||
4410 | * and offset and perform an operation | ||
4411 | */ | ||
4412 | enum vxge_hw_status | ||
4413 | __vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath, | ||
4414 | u32 operation, u32 offset, u64 *stat) | ||
4415 | { | ||
4416 | u64 val64; | ||
4417 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4418 | struct vxge_hw_vpath_reg __iomem *vp_reg; | ||
4419 | |||
4420 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
4421 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
4422 | goto vpath_stats_access_exit; | ||
4423 | } | ||
4424 | |||
4425 | vp_reg = vpath->vp_reg; | ||
4426 | |||
4427 | val64 = VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) | | ||
4428 | VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE | | ||
4429 | VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset); | ||
4430 | |||
4431 | status = __vxge_hw_pio_mem_write64(val64, | ||
4432 | &vp_reg->xmac_stats_access_cmd, | ||
4433 | VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE, | ||
4434 | vpath->hldev->config.device_poll_millis); | ||
4435 | |||
4436 | if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ)) | ||
4437 | *stat = readq(&vp_reg->xmac_stats_access_data); | ||
4438 | else | ||
4439 | *stat = 0; | ||
4440 | |||
4441 | vpath_stats_access_exit: | ||
4442 | return status; | ||
4443 | } | ||
4444 | |||
4445 | /* | ||
4446 | * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath | ||
4447 | */ | ||
4448 | enum vxge_hw_status | ||
4449 | __vxge_hw_vpath_xmac_tx_stats_get( | ||
4450 | struct __vxge_hw_virtualpath *vpath, | ||
4451 | struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats) | ||
4452 | { | ||
4453 | u64 *val64; | ||
4454 | int i; | ||
4455 | u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET; | ||
4456 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4457 | |||
4458 | val64 = (u64 *) vpath_tx_stats; | ||
4459 | |||
4460 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
4461 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
4462 | goto exit; | ||
4463 | } | ||
4464 | |||
4465 | for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) { | ||
4466 | status = __vxge_hw_vpath_stats_access(vpath, | ||
4467 | VXGE_HW_STATS_OP_READ, | ||
4468 | offset, val64); | ||
4469 | if (status != VXGE_HW_OK) | ||
4470 | goto exit; | ||
4471 | offset++; | ||
4472 | val64++; | ||
4473 | } | ||
4474 | exit: | ||
4475 | return status; | ||
4476 | } | ||
4477 | |||
4478 | /* | ||
4479 | * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath | ||
4480 | */ | ||
4481 | enum vxge_hw_status | ||
4482 | __vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath, | ||
4483 | struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats) | ||
4484 | { | ||
4485 | u64 *val64; | ||
4486 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4487 | int i; | ||
4488 | u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET; | ||
4489 | val64 = (u64 *) vpath_rx_stats; | ||
4490 | |||
4491 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
4492 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
4493 | goto exit; | ||
4494 | } | ||
4495 | for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) { | ||
4496 | status = __vxge_hw_vpath_stats_access(vpath, | ||
4497 | VXGE_HW_STATS_OP_READ, | ||
4498 | offset >> 3, val64); | ||
4499 | if (status != VXGE_HW_OK) | ||
4500 | goto exit; | ||
4501 | |||
4502 | offset += 8; | ||
4503 | val64++; | ||
4504 | } | ||
4505 | exit: | ||
4506 | return status; | ||
4507 | } | ||
4508 | |||
4509 | /* | ||
4510 | * __vxge_hw_vpath_stats_get - Get the vpath hw statistics. | ||
4511 | */ | ||
4512 | enum vxge_hw_status __vxge_hw_vpath_stats_get( | ||
4513 | struct __vxge_hw_virtualpath *vpath, | ||
4514 | struct vxge_hw_vpath_stats_hw_info *hw_stats) | ||
4515 | { | ||
4516 | u64 val64; | ||
4517 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4518 | struct vxge_hw_vpath_reg __iomem *vp_reg; | ||
4519 | |||
4520 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
4521 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
4522 | goto exit; | ||
4523 | } | ||
4524 | vp_reg = vpath->vp_reg; | ||
4525 | |||
4526 | val64 = readq(&vp_reg->vpath_debug_stats0); | ||
4527 | hw_stats->ini_num_mwr_sent = | ||
4528 | (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64); | ||
4529 | |||
4530 | val64 = readq(&vp_reg->vpath_debug_stats1); | ||
4531 | hw_stats->ini_num_mrd_sent = | ||
4532 | (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64); | ||
4533 | |||
4534 | val64 = readq(&vp_reg->vpath_debug_stats2); | ||
4535 | hw_stats->ini_num_cpl_rcvd = | ||
4536 | (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64); | ||
4537 | |||
4538 | val64 = readq(&vp_reg->vpath_debug_stats3); | ||
4539 | hw_stats->ini_num_mwr_byte_sent = | ||
4540 | VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64); | ||
4541 | |||
4542 | val64 = readq(&vp_reg->vpath_debug_stats4); | ||
4543 | hw_stats->ini_num_cpl_byte_rcvd = | ||
4544 | VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64); | ||
4545 | |||
4546 | val64 = readq(&vp_reg->vpath_debug_stats5); | ||
4547 | hw_stats->wrcrdtarb_xoff = | ||
4548 | (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64); | ||
4549 | |||
4550 | val64 = readq(&vp_reg->vpath_debug_stats6); | ||
4551 | hw_stats->rdcrdtarb_xoff = | ||
4552 | (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64); | ||
4553 | |||
4554 | val64 = readq(&vp_reg->vpath_genstats_count01); | ||
4555 | hw_stats->vpath_genstats_count0 = | ||
4556 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0( | ||
4557 | val64); | ||
4558 | |||
4559 | val64 = readq(&vp_reg->vpath_genstats_count01); | ||
4560 | hw_stats->vpath_genstats_count1 = | ||
4561 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1( | ||
4562 | val64); | ||
4563 | |||
4564 | val64 = readq(&vp_reg->vpath_genstats_count23); | ||
4565 | hw_stats->vpath_genstats_count2 = | ||
4566 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2( | ||
4567 | val64); | ||
4568 | |||
4569 | val64 = readq(&vp_reg->vpath_genstats_count01); | ||
4570 | hw_stats->vpath_genstats_count3 = | ||
4571 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3( | ||
4572 | val64); | ||
4573 | |||
4574 | val64 = readq(&vp_reg->vpath_genstats_count4); | ||
4575 | hw_stats->vpath_genstats_count4 = | ||
4576 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4( | ||
4577 | val64); | ||
4578 | |||
4579 | val64 = readq(&vp_reg->vpath_genstats_count5); | ||
4580 | hw_stats->vpath_genstats_count5 = | ||
4581 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5( | ||
4582 | val64); | ||
4583 | |||
4584 | status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats); | ||
4585 | if (status != VXGE_HW_OK) | ||
4586 | goto exit; | ||
4587 | |||
4588 | status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats); | ||
4589 | if (status != VXGE_HW_OK) | ||
4590 | goto exit; | ||
4591 | |||
4592 | VXGE_HW_VPATH_STATS_PIO_READ( | ||
4593 | VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET); | ||
4594 | |||
4595 | hw_stats->prog_event_vnum0 = | ||
4596 | (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64); | ||
4597 | |||
4598 | hw_stats->prog_event_vnum1 = | ||
4599 | (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64); | ||
4600 | |||
4601 | VXGE_HW_VPATH_STATS_PIO_READ( | ||
4602 | VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET); | ||
4603 | |||
4604 | hw_stats->prog_event_vnum2 = | ||
4605 | (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64); | ||
4606 | |||
4607 | hw_stats->prog_event_vnum3 = | ||
4608 | (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64); | ||
4609 | |||
4610 | val64 = readq(&vp_reg->rx_multi_cast_stats); | ||
4611 | hw_stats->rx_multi_cast_frame_discard = | ||
4612 | (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64); | ||
4613 | |||
4614 | val64 = readq(&vp_reg->rx_frm_transferred); | ||
4615 | hw_stats->rx_frm_transferred = | ||
4616 | (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64); | ||
4617 | |||
4618 | val64 = readq(&vp_reg->rxd_returned); | ||
4619 | hw_stats->rxd_returned = | ||
4620 | (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64); | ||
4621 | |||
4622 | val64 = readq(&vp_reg->dbg_stats_rx_mpa); | ||
4623 | hw_stats->rx_mpa_len_fail_frms = | ||
4624 | (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64); | ||
4625 | hw_stats->rx_mpa_mrk_fail_frms = | ||
4626 | (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64); | ||
4627 | hw_stats->rx_mpa_crc_fail_frms = | ||
4628 | (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64); | ||
4629 | |||
4630 | val64 = readq(&vp_reg->dbg_stats_rx_fau); | ||
4631 | hw_stats->rx_permitted_frms = | ||
4632 | (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64); | ||
4633 | hw_stats->rx_vp_reset_discarded_frms = | ||
4634 | (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64); | ||
4635 | hw_stats->rx_wol_frms = | ||
4636 | (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64); | ||
4637 | |||
4638 | val64 = readq(&vp_reg->tx_vp_reset_discarded_frms); | ||
4639 | hw_stats->tx_vp_reset_discarded_frms = | ||
4640 | (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS( | ||
4641 | val64); | ||
4642 | exit: | ||
4643 | return status; | ||
4644 | } | ||
4645 | |||
4646 | /* | ||
4647 | * __vxge_hw_blockpool_create - Create block pool | ||
4648 | */ | ||
4649 | |||
4650 | enum vxge_hw_status | ||
4651 | __vxge_hw_blockpool_create(struct __vxge_hw_device *hldev, | ||
4652 | struct __vxge_hw_blockpool *blockpool, | ||
4653 | u32 pool_size, | ||
4654 | u32 pool_max) | ||
4655 | { | ||
4656 | u32 i; | ||
4657 | struct __vxge_hw_blockpool_entry *entry = NULL; | ||
4658 | void *memblock; | ||
4659 | dma_addr_t dma_addr; | ||
4660 | struct pci_dev *dma_handle; | ||
4661 | struct pci_dev *acc_handle; | ||
4662 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4663 | |||
4664 | if (blockpool == NULL) { | ||
4665 | status = VXGE_HW_FAIL; | ||
4666 | goto blockpool_create_exit; | ||
4667 | } | ||
4668 | |||
4669 | blockpool->hldev = hldev; | ||
4670 | blockpool->block_size = VXGE_HW_BLOCK_SIZE; | ||
4671 | blockpool->pool_size = 0; | ||
4672 | blockpool->pool_max = pool_max; | ||
4673 | blockpool->req_out = 0; | ||
4674 | |||
4675 | INIT_LIST_HEAD(&blockpool->free_block_list); | ||
4676 | INIT_LIST_HEAD(&blockpool->free_entry_list); | ||
4677 | |||
4678 | for (i = 0; i < pool_size + pool_max; i++) { | ||
4679 | entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry), | ||
4680 | GFP_KERNEL); | ||
4681 | if (entry == NULL) { | ||
4682 | __vxge_hw_blockpool_destroy(blockpool); | ||
4683 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
4684 | goto blockpool_create_exit; | ||
4685 | } | ||
4686 | list_add(&entry->item, &blockpool->free_entry_list); | ||
4687 | } | ||
4688 | |||
4689 | for (i = 0; i < pool_size; i++) { | ||
4690 | |||
4691 | memblock = vxge_os_dma_malloc( | ||
4692 | hldev->pdev, | ||
4693 | VXGE_HW_BLOCK_SIZE, | ||
4694 | &dma_handle, | ||
4695 | &acc_handle); | ||
4696 | |||
4697 | if (memblock == NULL) { | ||
4698 | __vxge_hw_blockpool_destroy(blockpool); | ||
4699 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
4700 | goto blockpool_create_exit; | ||
4701 | } | ||
4702 | |||
4703 | dma_addr = pci_map_single(hldev->pdev, memblock, | ||
4704 | VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
4705 | |||
4706 | if (unlikely(pci_dma_mapping_error(hldev->pdev, | ||
4707 | dma_addr))) { | ||
4708 | |||
4709 | vxge_os_dma_free(hldev->pdev, memblock, &acc_handle); | ||
4710 | __vxge_hw_blockpool_destroy(blockpool); | ||
4711 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
4712 | goto blockpool_create_exit; | ||
4713 | } | ||
4714 | |||
4715 | if (!list_empty(&blockpool->free_entry_list)) | ||
4716 | entry = (struct __vxge_hw_blockpool_entry *) | ||
4717 | list_first_entry(&blockpool->free_entry_list, | ||
4718 | struct __vxge_hw_blockpool_entry, | ||
4719 | item); | ||
4720 | |||
4721 | if (entry == NULL) | ||
4722 | entry = | ||
4723 | kzalloc(sizeof(struct __vxge_hw_blockpool_entry), | ||
4724 | GFP_KERNEL); | ||
4725 | if (entry != NULL) { | ||
4726 | list_del(&entry->item); | ||
4727 | entry->length = VXGE_HW_BLOCK_SIZE; | ||
4728 | entry->memblock = memblock; | ||
4729 | entry->dma_addr = dma_addr; | ||
4730 | entry->acc_handle = acc_handle; | ||
4731 | entry->dma_handle = dma_handle; | ||
4732 | list_add(&entry->item, | ||
4733 | &blockpool->free_block_list); | ||
4734 | blockpool->pool_size++; | ||
4735 | } else { | ||
4736 | __vxge_hw_blockpool_destroy(blockpool); | ||
4737 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
4738 | goto blockpool_create_exit; | ||
4739 | } | ||
4740 | } | ||
4741 | |||
4742 | blockpool_create_exit: | ||
4743 | return status; | ||
4744 | } | ||
4745 | |||
4746 | /* | ||
4747 | * __vxge_hw_blockpool_destroy - Deallocates the block pool | ||
4748 | */ | ||
4749 | |||
4750 | void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool) | ||
4751 | { | ||
4752 | |||
4753 | struct __vxge_hw_device *hldev; | ||
4754 | struct list_head *p, *n; | ||
4755 | u16 ret; | ||
4756 | |||
4757 | if (blockpool == NULL) { | ||
4758 | ret = 1; | ||
4759 | goto exit; | ||
4760 | } | ||
4761 | |||
4762 | hldev = blockpool->hldev; | ||
4763 | |||
4764 | list_for_each_safe(p, n, &blockpool->free_block_list) { | ||
4765 | |||
4766 | pci_unmap_single(hldev->pdev, | ||
4767 | ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, | ||
4768 | ((struct __vxge_hw_blockpool_entry *)p)->length, | ||
4769 | PCI_DMA_BIDIRECTIONAL); | ||
4770 | |||
4771 | vxge_os_dma_free(hldev->pdev, | ||
4772 | ((struct __vxge_hw_blockpool_entry *)p)->memblock, | ||
4773 | &((struct __vxge_hw_blockpool_entry *) p)->acc_handle); | ||
4774 | |||
4775 | list_del( | ||
4776 | &((struct __vxge_hw_blockpool_entry *)p)->item); | ||
4777 | kfree(p); | ||
4778 | blockpool->pool_size--; | ||
4779 | } | ||
4780 | |||
4781 | list_for_each_safe(p, n, &blockpool->free_entry_list) { | ||
4782 | list_del( | ||
4783 | &((struct __vxge_hw_blockpool_entry *)p)->item); | ||
4784 | kfree((void *)p); | ||
4785 | } | ||
4786 | ret = 0; | ||
4787 | exit: | ||
4788 | return; | ||
4789 | } | ||
4790 | |||
4791 | /* | ||
4792 | * __vxge_hw_blockpool_blocks_add - Request additional blocks | ||
4793 | */ | ||
4794 | static | ||
4795 | void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool) | ||
4796 | { | ||
4797 | u32 nreq = 0, i; | ||
4798 | |||
4799 | if ((blockpool->pool_size + blockpool->req_out) < | ||
4800 | VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) { | ||
4801 | nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE; | ||
4802 | blockpool->req_out += nreq; | ||
4803 | } | ||
4804 | |||
4805 | for (i = 0; i < nreq; i++) | ||
4806 | vxge_os_dma_malloc_async( | ||
4807 | ((struct __vxge_hw_device *)blockpool->hldev)->pdev, | ||
4808 | blockpool->hldev, VXGE_HW_BLOCK_SIZE); | ||
4809 | } | ||
4810 | |||
4811 | /* | ||
4812 | * __vxge_hw_blockpool_blocks_remove - Free additional blocks | ||
4813 | */ | ||
4814 | static | ||
4815 | void __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool) | ||
4816 | { | ||
4817 | struct list_head *p, *n; | ||
4818 | |||
4819 | list_for_each_safe(p, n, &blockpool->free_block_list) { | ||
4820 | |||
4821 | if (blockpool->pool_size < blockpool->pool_max) | ||
4822 | break; | ||
4823 | |||
4824 | pci_unmap_single( | ||
4825 | ((struct __vxge_hw_device *)blockpool->hldev)->pdev, | ||
4826 | ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, | ||
4827 | ((struct __vxge_hw_blockpool_entry *)p)->length, | ||
4828 | PCI_DMA_BIDIRECTIONAL); | ||
4829 | |||
4830 | vxge_os_dma_free( | ||
4831 | ((struct __vxge_hw_device *)blockpool->hldev)->pdev, | ||
4832 | ((struct __vxge_hw_blockpool_entry *)p)->memblock, | ||
4833 | &((struct __vxge_hw_blockpool_entry *)p)->acc_handle); | ||
4834 | |||
4835 | list_del(&((struct __vxge_hw_blockpool_entry *)p)->item); | ||
4836 | |||
4837 | list_add(p, &blockpool->free_entry_list); | ||
4838 | |||
4839 | blockpool->pool_size--; | ||
4840 | |||
4841 | } | ||
4842 | } | ||
4843 | |||
4844 | /* | ||
4845 | * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async | ||
4846 | * Adds a block to block pool | ||
4847 | */ | ||
4848 | void vxge_hw_blockpool_block_add( | ||
4849 | struct __vxge_hw_device *devh, | ||
4850 | void *block_addr, | ||
4851 | u32 length, | ||
4852 | struct pci_dev *dma_h, | ||
4853 | struct pci_dev *acc_handle) | ||
4854 | { | ||
4855 | struct __vxge_hw_blockpool *blockpool; | ||
4856 | struct __vxge_hw_blockpool_entry *entry = NULL; | ||
4857 | dma_addr_t dma_addr; | ||
4858 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4859 | u32 req_out; | ||
4860 | |||
4861 | blockpool = &devh->block_pool; | ||
4862 | |||
4863 | if (block_addr == NULL) { | ||
4864 | blockpool->req_out--; | ||
4865 | status = VXGE_HW_FAIL; | ||
4866 | goto exit; | ||
4867 | } | ||
4868 | |||
4869 | dma_addr = pci_map_single(devh->pdev, block_addr, length, | ||
4870 | PCI_DMA_BIDIRECTIONAL); | ||
4871 | |||
4872 | if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) { | ||
4873 | |||
4874 | vxge_os_dma_free(devh->pdev, block_addr, &acc_handle); | ||
4875 | blockpool->req_out--; | ||
4876 | status = VXGE_HW_FAIL; | ||
4877 | goto exit; | ||
4878 | } | ||
4879 | |||
4880 | |||
4881 | if (!list_empty(&blockpool->free_entry_list)) | ||
4882 | entry = (struct __vxge_hw_blockpool_entry *) | ||
4883 | list_first_entry(&blockpool->free_entry_list, | ||
4884 | struct __vxge_hw_blockpool_entry, | ||
4885 | item); | ||
4886 | |||
4887 | if (entry == NULL) | ||
4888 | entry = (struct __vxge_hw_blockpool_entry *) | ||
4889 | vmalloc(sizeof(struct __vxge_hw_blockpool_entry)); | ||
4890 | else | ||
4891 | list_del(&entry->item); | ||
4892 | |||
4893 | if (entry != NULL) { | ||
4894 | entry->length = length; | ||
4895 | entry->memblock = block_addr; | ||
4896 | entry->dma_addr = dma_addr; | ||
4897 | entry->acc_handle = acc_handle; | ||
4898 | entry->dma_handle = dma_h; | ||
4899 | list_add(&entry->item, &blockpool->free_block_list); | ||
4900 | blockpool->pool_size++; | ||
4901 | status = VXGE_HW_OK; | ||
4902 | } else | ||
4903 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
4904 | |||
4905 | blockpool->req_out--; | ||
4906 | |||
4907 | req_out = blockpool->req_out; | ||
4908 | exit: | ||
4909 | return; | ||
4910 | } | ||
4911 | |||
4912 | /* | ||
4913 | * __vxge_hw_blockpool_malloc - Allocate a memory block from pool | ||
4914 | * Allocates a block of memory of given size, either from block pool | ||
4915 | * or by calling vxge_os_dma_malloc() | ||
4916 | */ | ||
4917 | void * | ||
4918 | __vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size, | ||
4919 | struct vxge_hw_mempool_dma *dma_object) | ||
4920 | { | ||
4921 | struct __vxge_hw_blockpool_entry *entry = NULL; | ||
4922 | struct __vxge_hw_blockpool *blockpool; | ||
4923 | void *memblock = NULL; | ||
4924 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4925 | |||
4926 | blockpool = &devh->block_pool; | ||
4927 | |||
4928 | if (size != blockpool->block_size) { | ||
4929 | |||
4930 | memblock = vxge_os_dma_malloc(devh->pdev, size, | ||
4931 | &dma_object->handle, | ||
4932 | &dma_object->acc_handle); | ||
4933 | |||
4934 | if (memblock == NULL) { | ||
4935 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
4936 | goto exit; | ||
4937 | } | ||
4938 | |||
4939 | dma_object->addr = pci_map_single(devh->pdev, memblock, size, | ||
4940 | PCI_DMA_BIDIRECTIONAL); | ||
4941 | |||
4942 | if (unlikely(pci_dma_mapping_error(devh->pdev, | ||
4943 | dma_object->addr))) { | ||
4944 | vxge_os_dma_free(devh->pdev, memblock, | ||
4945 | &dma_object->acc_handle); | ||
4946 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
4947 | goto exit; | ||
4948 | } | ||
4949 | |||
4950 | } else { | ||
4951 | |||
4952 | if (!list_empty(&blockpool->free_block_list)) | ||
4953 | entry = (struct __vxge_hw_blockpool_entry *) | ||
4954 | list_first_entry(&blockpool->free_block_list, | ||
4955 | struct __vxge_hw_blockpool_entry, | ||
4956 | item); | ||
4957 | |||
4958 | if (entry != NULL) { | ||
4959 | list_del(&entry->item); | ||
4960 | dma_object->addr = entry->dma_addr; | ||
4961 | dma_object->handle = entry->dma_handle; | ||
4962 | dma_object->acc_handle = entry->acc_handle; | ||
4963 | memblock = entry->memblock; | ||
4964 | |||
4965 | list_add(&entry->item, | ||
4966 | &blockpool->free_entry_list); | ||
4967 | blockpool->pool_size--; | ||
4968 | } | ||
4969 | |||
4970 | if (memblock != NULL) | ||
4971 | __vxge_hw_blockpool_blocks_add(blockpool); | ||
4972 | } | ||
4973 | exit: | ||
4974 | return memblock; | ||
4975 | } | ||
4976 | |||
4977 | /* | ||
4978 | * __vxge_hw_blockpool_free - Frees the memory allcoated with | ||
4979 | __vxge_hw_blockpool_malloc | ||
4980 | */ | ||
4981 | void | ||
4982 | __vxge_hw_blockpool_free(struct __vxge_hw_device *devh, | ||
4983 | void *memblock, u32 size, | ||
4984 | struct vxge_hw_mempool_dma *dma_object) | ||
4985 | { | ||
4986 | struct __vxge_hw_blockpool_entry *entry = NULL; | ||
4987 | struct __vxge_hw_blockpool *blockpool; | ||
4988 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4989 | |||
4990 | blockpool = &devh->block_pool; | ||
4991 | |||
4992 | if (size != blockpool->block_size) { | ||
4993 | pci_unmap_single(devh->pdev, dma_object->addr, size, | ||
4994 | PCI_DMA_BIDIRECTIONAL); | ||
4995 | vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle); | ||
4996 | } else { | ||
4997 | |||
4998 | if (!list_empty(&blockpool->free_entry_list)) | ||
4999 | entry = (struct __vxge_hw_blockpool_entry *) | ||
5000 | list_first_entry(&blockpool->free_entry_list, | ||
5001 | struct __vxge_hw_blockpool_entry, | ||
5002 | item); | ||
5003 | |||
5004 | if (entry == NULL) | ||
5005 | entry = (struct __vxge_hw_blockpool_entry *) | ||
5006 | vmalloc(sizeof( | ||
5007 | struct __vxge_hw_blockpool_entry)); | ||
5008 | else | ||
5009 | list_del(&entry->item); | ||
5010 | |||
5011 | if (entry != NULL) { | ||
5012 | entry->length = size; | ||
5013 | entry->memblock = memblock; | ||
5014 | entry->dma_addr = dma_object->addr; | ||
5015 | entry->acc_handle = dma_object->acc_handle; | ||
5016 | entry->dma_handle = dma_object->handle; | ||
5017 | list_add(&entry->item, | ||
5018 | &blockpool->free_block_list); | ||
5019 | blockpool->pool_size++; | ||
5020 | status = VXGE_HW_OK; | ||
5021 | } else | ||
5022 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
5023 | |||
5024 | if (status == VXGE_HW_OK) | ||
5025 | __vxge_hw_blockpool_blocks_remove(blockpool); | ||
5026 | } | ||
5027 | } | ||
5028 | |||
5029 | /* | ||
5030 | * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool | ||
5031 | * This function allocates a block from block pool or from the system | ||
5032 | */ | ||
5033 | struct __vxge_hw_blockpool_entry * | ||
5034 | __vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size) | ||
5035 | { | ||
5036 | struct __vxge_hw_blockpool_entry *entry = NULL; | ||
5037 | struct __vxge_hw_blockpool *blockpool; | ||
5038 | |||
5039 | blockpool = &devh->block_pool; | ||
5040 | |||
5041 | if (size == blockpool->block_size) { | ||
5042 | |||
5043 | if (!list_empty(&blockpool->free_block_list)) | ||
5044 | entry = (struct __vxge_hw_blockpool_entry *) | ||
5045 | list_first_entry(&blockpool->free_block_list, | ||
5046 | struct __vxge_hw_blockpool_entry, | ||
5047 | item); | ||
5048 | |||
5049 | if (entry != NULL) { | ||
5050 | list_del(&entry->item); | ||
5051 | blockpool->pool_size--; | ||
5052 | } | ||
5053 | } | ||
5054 | |||
5055 | if (entry != NULL) | ||
5056 | __vxge_hw_blockpool_blocks_add(blockpool); | ||
5057 | |||
5058 | return entry; | ||
5059 | } | ||
5060 | |||
5061 | /* | ||
5062 | * __vxge_hw_blockpool_block_free - Frees a block from block pool | ||
5063 | * @devh: Hal device | ||
5064 | * @entry: Entry of block to be freed | ||
5065 | * | ||
5066 | * This function frees a block from block pool | ||
5067 | */ | ||
5068 | void | ||
5069 | __vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh, | ||
5070 | struct __vxge_hw_blockpool_entry *entry) | ||
5071 | { | ||
5072 | struct __vxge_hw_blockpool *blockpool; | ||
5073 | |||
5074 | blockpool = &devh->block_pool; | ||
5075 | |||
5076 | if (entry->length == blockpool->block_size) { | ||
5077 | list_add(&entry->item, &blockpool->free_block_list); | ||
5078 | blockpool->pool_size++; | ||
5079 | } | ||
5080 | |||
5081 | __vxge_hw_blockpool_blocks_remove(blockpool); | ||
5082 | } | ||