diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /drivers/net/vxge | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'drivers/net/vxge')
-rw-r--r-- | drivers/net/vxge/vxge-config.c | 3610 | ||||
-rw-r--r-- | drivers/net/vxge/vxge-config.h | 458 | ||||
-rw-r--r-- | drivers/net/vxge/vxge-ethtool.c | 142 | ||||
-rw-r--r-- | drivers/net/vxge/vxge-main.c | 1449 | ||||
-rw-r--r-- | drivers/net/vxge/vxge-main.h | 169 | ||||
-rw-r--r-- | drivers/net/vxge/vxge-reg.h | 33 | ||||
-rw-r--r-- | drivers/net/vxge/vxge-traffic.c | 931 | ||||
-rw-r--r-- | drivers/net/vxge/vxge-traffic.h | 195 | ||||
-rw-r--r-- | drivers/net/vxge/vxge-version.h | 33 |
9 files changed, 3611 insertions, 3409 deletions
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c index 0e6db5935609..32763b2dd73f 100644 --- a/drivers/net/vxge/vxge-config.c +++ b/drivers/net/vxge/vxge-config.c | |||
@@ -19,76 +19,386 @@ | |||
19 | 19 | ||
20 | #include "vxge-traffic.h" | 20 | #include "vxge-traffic.h" |
21 | #include "vxge-config.h" | 21 | #include "vxge-config.h" |
22 | #include "vxge-main.h" | ||
23 | |||
24 | #define VXGE_HW_VPATH_STATS_PIO_READ(offset) { \ | ||
25 | status = __vxge_hw_vpath_stats_access(vpath, \ | ||
26 | VXGE_HW_STATS_OP_READ, \ | ||
27 | offset, \ | ||
28 | &val64); \ | ||
29 | if (status != VXGE_HW_OK) \ | ||
30 | return status; \ | ||
31 | } | ||
32 | |||
33 | static void | ||
34 | vxge_hw_vpath_set_zero_rx_frm_len(struct vxge_hw_vpath_reg __iomem *vp_reg) | ||
35 | { | ||
36 | u64 val64; | ||
37 | |||
38 | val64 = readq(&vp_reg->rxmac_vcfg0); | ||
39 | val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff); | ||
40 | writeq(val64, &vp_reg->rxmac_vcfg0); | ||
41 | val64 = readq(&vp_reg->rxmac_vcfg0); | ||
42 | } | ||
22 | 43 | ||
23 | /* | 44 | /* |
24 | * __vxge_hw_channel_allocate - Allocate memory for channel | 45 | * vxge_hw_vpath_wait_receive_idle - Wait for Rx to become idle |
25 | * This function allocates required memory for the channel and various arrays | ||
26 | * in the channel | ||
27 | */ | 46 | */ |
28 | struct __vxge_hw_channel* | 47 | int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id) |
29 | __vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph, | ||
30 | enum __vxge_hw_channel_type type, | ||
31 | u32 length, u32 per_dtr_space, void *userdata) | ||
32 | { | 48 | { |
33 | struct __vxge_hw_channel *channel; | 49 | struct vxge_hw_vpath_reg __iomem *vp_reg; |
34 | struct __vxge_hw_device *hldev; | 50 | struct __vxge_hw_virtualpath *vpath; |
35 | int size = 0; | 51 | u64 val64, rxd_count, rxd_spat; |
36 | u32 vp_id; | 52 | int count = 0, total_count = 0; |
37 | 53 | ||
38 | hldev = vph->vpath->hldev; | 54 | vpath = &hldev->virtual_paths[vp_id]; |
39 | vp_id = vph->vpath->vp_id; | 55 | vp_reg = vpath->vp_reg; |
40 | 56 | ||
41 | switch (type) { | 57 | vxge_hw_vpath_set_zero_rx_frm_len(vp_reg); |
42 | case VXGE_HW_CHANNEL_TYPE_FIFO: | 58 | |
43 | size = sizeof(struct __vxge_hw_fifo); | 59 | /* Check that the ring controller for this vpath has enough free RxDs |
44 | break; | 60 | * to send frames to the host. This is done by reading the |
45 | case VXGE_HW_CHANNEL_TYPE_RING: | 61 | * PRC_RXD_DOORBELL_VPn register and comparing the read value to the |
46 | size = sizeof(struct __vxge_hw_ring); | 62 | * RXD_SPAT value for the vpath. |
47 | break; | 63 | */ |
48 | default: | 64 | val64 = readq(&vp_reg->prc_cfg6); |
49 | break; | 65 | rxd_spat = VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val64) + 1; |
66 | /* Use a factor of 2 when comparing rxd_count against rxd_spat for some | ||
67 | * leg room. | ||
68 | */ | ||
69 | rxd_spat *= 2; | ||
70 | |||
71 | do { | ||
72 | mdelay(1); | ||
73 | |||
74 | rxd_count = readq(&vp_reg->prc_rxd_doorbell); | ||
75 | |||
76 | /* Check that the ring controller for this vpath does | ||
77 | * not have any frame in its pipeline. | ||
78 | */ | ||
79 | val64 = readq(&vp_reg->frm_in_progress_cnt); | ||
80 | if ((rxd_count <= rxd_spat) || (val64 > 0)) | ||
81 | count = 0; | ||
82 | else | ||
83 | count++; | ||
84 | total_count++; | ||
85 | } while ((count < VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT) && | ||
86 | (total_count < VXGE_HW_MAX_POLLING_COUNT)); | ||
87 | |||
88 | if (total_count >= VXGE_HW_MAX_POLLING_COUNT) | ||
89 | printk(KERN_ALERT "%s: Still Receiving traffic. Abort wait\n", | ||
90 | __func__); | ||
91 | |||
92 | return total_count; | ||
93 | } | ||
94 | |||
95 | /* vxge_hw_device_wait_receive_idle - This function waits until all frames | ||
96 | * stored in the frame buffer for each vpath assigned to the given | ||
97 | * function (hldev) have been sent to the host. | ||
98 | */ | ||
99 | void vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev) | ||
100 | { | ||
101 | int i, total_count = 0; | ||
102 | |||
103 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | ||
104 | if (!(hldev->vpaths_deployed & vxge_mBIT(i))) | ||
105 | continue; | ||
106 | |||
107 | total_count += vxge_hw_vpath_wait_receive_idle(hldev, i); | ||
108 | if (total_count >= VXGE_HW_MAX_POLLING_COUNT) | ||
109 | break; | ||
50 | } | 110 | } |
111 | } | ||
51 | 112 | ||
52 | channel = kzalloc(size, GFP_KERNEL); | 113 | /* |
53 | if (channel == NULL) | 114 | * __vxge_hw_device_register_poll |
54 | goto exit0; | 115 | * Will poll certain register for specified amount of time. |
55 | INIT_LIST_HEAD(&channel->item); | 116 | * Will poll until masked bit is not cleared. |
117 | */ | ||
118 | static enum vxge_hw_status | ||
119 | __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis) | ||
120 | { | ||
121 | u64 val64; | ||
122 | u32 i = 0; | ||
123 | enum vxge_hw_status ret = VXGE_HW_FAIL; | ||
56 | 124 | ||
57 | channel->common_reg = hldev->common_reg; | 125 | udelay(10); |
58 | channel->first_vp_id = hldev->first_vp_id; | ||
59 | channel->type = type; | ||
60 | channel->devh = hldev; | ||
61 | channel->vph = vph; | ||
62 | channel->userdata = userdata; | ||
63 | channel->per_dtr_space = per_dtr_space; | ||
64 | channel->length = length; | ||
65 | channel->vp_id = vp_id; | ||
66 | 126 | ||
67 | channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); | 127 | do { |
68 | if (channel->work_arr == NULL) | 128 | val64 = readq(reg); |
69 | goto exit1; | 129 | if (!(val64 & mask)) |
130 | return VXGE_HW_OK; | ||
131 | udelay(100); | ||
132 | } while (++i <= 9); | ||
70 | 133 | ||
71 | channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); | 134 | i = 0; |
72 | if (channel->free_arr == NULL) | 135 | do { |
73 | goto exit1; | 136 | val64 = readq(reg); |
74 | channel->free_ptr = length; | 137 | if (!(val64 & mask)) |
138 | return VXGE_HW_OK; | ||
139 | mdelay(1); | ||
140 | } while (++i <= max_millis); | ||
75 | 141 | ||
76 | channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); | 142 | return ret; |
77 | if (channel->reserve_arr == NULL) | 143 | } |
78 | goto exit1; | ||
79 | channel->reserve_ptr = length; | ||
80 | channel->reserve_top = 0; | ||
81 | 144 | ||
82 | channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); | 145 | static inline enum vxge_hw_status |
83 | if (channel->orig_arr == NULL) | 146 | __vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr, |
84 | goto exit1; | 147 | u64 mask, u32 max_millis) |
148 | { | ||
149 | __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr); | ||
150 | wmb(); | ||
151 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr); | ||
152 | wmb(); | ||
85 | 153 | ||
86 | return channel; | 154 | return __vxge_hw_device_register_poll(addr, mask, max_millis); |
87 | exit1: | 155 | } |
88 | __vxge_hw_channel_free(channel); | ||
89 | 156 | ||
90 | exit0: | 157 | static enum vxge_hw_status |
91 | return NULL; | 158 | vxge_hw_vpath_fw_api(struct __vxge_hw_virtualpath *vpath, u32 action, |
159 | u32 fw_memo, u32 offset, u64 *data0, u64 *data1, | ||
160 | u64 *steer_ctrl) | ||
161 | { | ||
162 | struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg; | ||
163 | enum vxge_hw_status status; | ||
164 | u64 val64; | ||
165 | u32 retry = 0, max_retry = 3; | ||
166 | |||
167 | spin_lock(&vpath->lock); | ||
168 | if (!vpath->vp_open) { | ||
169 | spin_unlock(&vpath->lock); | ||
170 | max_retry = 100; | ||
171 | } | ||
172 | |||
173 | writeq(*data0, &vp_reg->rts_access_steer_data0); | ||
174 | writeq(*data1, &vp_reg->rts_access_steer_data1); | ||
175 | wmb(); | ||
176 | |||
177 | val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) | | ||
178 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(fw_memo) | | ||
179 | VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset) | | ||
180 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | | ||
181 | *steer_ctrl; | ||
182 | |||
183 | status = __vxge_hw_pio_mem_write64(val64, | ||
184 | &vp_reg->rts_access_steer_ctrl, | ||
185 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, | ||
186 | VXGE_HW_DEF_DEVICE_POLL_MILLIS); | ||
187 | |||
188 | /* The __vxge_hw_device_register_poll can udelay for a significant | ||
189 | * amount of time, blocking other process from the CPU. If it delays | ||
190 | * for ~5secs, a NMI error can occur. A way around this is to give up | ||
191 | * the processor via msleep, but this is not allowed is under lock. | ||
192 | * So, only allow it to sleep for ~4secs if open. Otherwise, delay for | ||
193 | * 1sec and sleep for 10ms until the firmware operation has completed | ||
194 | * or timed-out. | ||
195 | */ | ||
196 | while ((status != VXGE_HW_OK) && retry++ < max_retry) { | ||
197 | if (!vpath->vp_open) | ||
198 | msleep(20); | ||
199 | status = __vxge_hw_device_register_poll( | ||
200 | &vp_reg->rts_access_steer_ctrl, | ||
201 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, | ||
202 | VXGE_HW_DEF_DEVICE_POLL_MILLIS); | ||
203 | } | ||
204 | |||
205 | if (status != VXGE_HW_OK) | ||
206 | goto out; | ||
207 | |||
208 | val64 = readq(&vp_reg->rts_access_steer_ctrl); | ||
209 | if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { | ||
210 | *data0 = readq(&vp_reg->rts_access_steer_data0); | ||
211 | *data1 = readq(&vp_reg->rts_access_steer_data1); | ||
212 | *steer_ctrl = val64; | ||
213 | } else | ||
214 | status = VXGE_HW_FAIL; | ||
215 | |||
216 | out: | ||
217 | if (vpath->vp_open) | ||
218 | spin_unlock(&vpath->lock); | ||
219 | return status; | ||
220 | } | ||
221 | |||
222 | enum vxge_hw_status | ||
223 | vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major, | ||
224 | u32 *minor, u32 *build) | ||
225 | { | ||
226 | u64 data0 = 0, data1 = 0, steer_ctrl = 0; | ||
227 | struct __vxge_hw_virtualpath *vpath; | ||
228 | enum vxge_hw_status status; | ||
229 | |||
230 | vpath = &hldev->virtual_paths[hldev->first_vp_id]; | ||
231 | |||
232 | status = vxge_hw_vpath_fw_api(vpath, | ||
233 | VXGE_HW_FW_UPGRADE_ACTION, | ||
234 | VXGE_HW_FW_UPGRADE_MEMO, | ||
235 | VXGE_HW_FW_UPGRADE_OFFSET_READ, | ||
236 | &data0, &data1, &steer_ctrl); | ||
237 | if (status != VXGE_HW_OK) | ||
238 | return status; | ||
239 | |||
240 | *major = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0); | ||
241 | *minor = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0); | ||
242 | *build = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0); | ||
243 | |||
244 | return status; | ||
245 | } | ||
246 | |||
247 | enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev) | ||
248 | { | ||
249 | u64 data0 = 0, data1 = 0, steer_ctrl = 0; | ||
250 | struct __vxge_hw_virtualpath *vpath; | ||
251 | enum vxge_hw_status status; | ||
252 | u32 ret; | ||
253 | |||
254 | vpath = &hldev->virtual_paths[hldev->first_vp_id]; | ||
255 | |||
256 | status = vxge_hw_vpath_fw_api(vpath, | ||
257 | VXGE_HW_FW_UPGRADE_ACTION, | ||
258 | VXGE_HW_FW_UPGRADE_MEMO, | ||
259 | VXGE_HW_FW_UPGRADE_OFFSET_COMMIT, | ||
260 | &data0, &data1, &steer_ctrl); | ||
261 | if (status != VXGE_HW_OK) { | ||
262 | vxge_debug_init(VXGE_ERR, "%s: FW upgrade failed", __func__); | ||
263 | goto exit; | ||
264 | } | ||
265 | |||
266 | ret = VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(steer_ctrl) & 0x7F; | ||
267 | if (ret != 1) { | ||
268 | vxge_debug_init(VXGE_ERR, "%s: FW commit failed with error %d", | ||
269 | __func__, ret); | ||
270 | status = VXGE_HW_FAIL; | ||
271 | } | ||
272 | |||
273 | exit: | ||
274 | return status; | ||
275 | } | ||
276 | |||
277 | enum vxge_hw_status | ||
278 | vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *fwdata, int size) | ||
279 | { | ||
280 | u64 data0 = 0, data1 = 0, steer_ctrl = 0; | ||
281 | struct __vxge_hw_virtualpath *vpath; | ||
282 | enum vxge_hw_status status; | ||
283 | int ret_code, sec_code; | ||
284 | |||
285 | vpath = &hldev->virtual_paths[hldev->first_vp_id]; | ||
286 | |||
287 | /* send upgrade start command */ | ||
288 | status = vxge_hw_vpath_fw_api(vpath, | ||
289 | VXGE_HW_FW_UPGRADE_ACTION, | ||
290 | VXGE_HW_FW_UPGRADE_MEMO, | ||
291 | VXGE_HW_FW_UPGRADE_OFFSET_START, | ||
292 | &data0, &data1, &steer_ctrl); | ||
293 | if (status != VXGE_HW_OK) { | ||
294 | vxge_debug_init(VXGE_ERR, " %s: Upgrade start cmd failed", | ||
295 | __func__); | ||
296 | return status; | ||
297 | } | ||
298 | |||
299 | /* Transfer fw image to adapter 16 bytes at a time */ | ||
300 | for (; size > 0; size -= VXGE_HW_FW_UPGRADE_BLK_SIZE) { | ||
301 | steer_ctrl = 0; | ||
302 | |||
303 | /* The next 128bits of fwdata to be loaded onto the adapter */ | ||
304 | data0 = *((u64 *)fwdata); | ||
305 | data1 = *((u64 *)fwdata + 1); | ||
306 | |||
307 | status = vxge_hw_vpath_fw_api(vpath, | ||
308 | VXGE_HW_FW_UPGRADE_ACTION, | ||
309 | VXGE_HW_FW_UPGRADE_MEMO, | ||
310 | VXGE_HW_FW_UPGRADE_OFFSET_SEND, | ||
311 | &data0, &data1, &steer_ctrl); | ||
312 | if (status != VXGE_HW_OK) { | ||
313 | vxge_debug_init(VXGE_ERR, "%s: Upgrade send failed", | ||
314 | __func__); | ||
315 | goto out; | ||
316 | } | ||
317 | |||
318 | ret_code = VXGE_HW_UPGRADE_GET_RET_ERR_CODE(data0); | ||
319 | switch (ret_code) { | ||
320 | case VXGE_HW_FW_UPGRADE_OK: | ||
321 | /* All OK, send next 16 bytes. */ | ||
322 | break; | ||
323 | case VXGE_FW_UPGRADE_BYTES2SKIP: | ||
324 | /* skip bytes in the stream */ | ||
325 | fwdata += (data0 >> 8) & 0xFFFFFFFF; | ||
326 | break; | ||
327 | case VXGE_HW_FW_UPGRADE_DONE: | ||
328 | goto out; | ||
329 | case VXGE_HW_FW_UPGRADE_ERR: | ||
330 | sec_code = VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(data0); | ||
331 | switch (sec_code) { | ||
332 | case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1: | ||
333 | case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7: | ||
334 | printk(KERN_ERR | ||
335 | "corrupted data from .ncf file\n"); | ||
336 | break; | ||
337 | case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3: | ||
338 | case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4: | ||
339 | case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5: | ||
340 | case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6: | ||
341 | case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8: | ||
342 | printk(KERN_ERR "invalid .ncf file\n"); | ||
343 | break; | ||
344 | case VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW: | ||
345 | printk(KERN_ERR "buffer overflow\n"); | ||
346 | break; | ||
347 | case VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH: | ||
348 | printk(KERN_ERR "failed to flash the image\n"); | ||
349 | break; | ||
350 | case VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN: | ||
351 | printk(KERN_ERR | ||
352 | "generic error. Unknown error type\n"); | ||
353 | break; | ||
354 | default: | ||
355 | printk(KERN_ERR "Unknown error of type %d\n", | ||
356 | sec_code); | ||
357 | break; | ||
358 | } | ||
359 | status = VXGE_HW_FAIL; | ||
360 | goto out; | ||
361 | default: | ||
362 | printk(KERN_ERR "Unknown FW error: %d\n", ret_code); | ||
363 | status = VXGE_HW_FAIL; | ||
364 | goto out; | ||
365 | } | ||
366 | /* point to next 16 bytes */ | ||
367 | fwdata += VXGE_HW_FW_UPGRADE_BLK_SIZE; | ||
368 | } | ||
369 | out: | ||
370 | return status; | ||
371 | } | ||
372 | |||
373 | enum vxge_hw_status | ||
374 | vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev, | ||
375 | struct eprom_image *img) | ||
376 | { | ||
377 | u64 data0 = 0, data1 = 0, steer_ctrl = 0; | ||
378 | struct __vxge_hw_virtualpath *vpath; | ||
379 | enum vxge_hw_status status; | ||
380 | int i; | ||
381 | |||
382 | vpath = &hldev->virtual_paths[hldev->first_vp_id]; | ||
383 | |||
384 | for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) { | ||
385 | data0 = VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(i); | ||
386 | data1 = steer_ctrl = 0; | ||
387 | |||
388 | status = vxge_hw_vpath_fw_api(vpath, | ||
389 | VXGE_HW_FW_API_GET_EPROM_REV, | ||
390 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, | ||
391 | 0, &data0, &data1, &steer_ctrl); | ||
392 | if (status != VXGE_HW_OK) | ||
393 | break; | ||
394 | |||
395 | img[i].is_valid = VXGE_HW_GET_EPROM_IMAGE_VALID(data0); | ||
396 | img[i].index = VXGE_HW_GET_EPROM_IMAGE_INDEX(data0); | ||
397 | img[i].type = VXGE_HW_GET_EPROM_IMAGE_TYPE(data0); | ||
398 | img[i].version = VXGE_HW_GET_EPROM_IMAGE_REV(data0); | ||
399 | } | ||
400 | |||
401 | return status; | ||
92 | } | 402 | } |
93 | 403 | ||
94 | /* | 404 | /* |
@@ -96,7 +406,7 @@ exit0: | |||
96 | * This function deallocates memory from the channel and various arrays | 406 | * This function deallocates memory from the channel and various arrays |
97 | * in the channel | 407 | * in the channel |
98 | */ | 408 | */ |
99 | void __vxge_hw_channel_free(struct __vxge_hw_channel *channel) | 409 | static void __vxge_hw_channel_free(struct __vxge_hw_channel *channel) |
100 | { | 410 | { |
101 | kfree(channel->work_arr); | 411 | kfree(channel->work_arr); |
102 | kfree(channel->free_arr); | 412 | kfree(channel->free_arr); |
@@ -110,7 +420,7 @@ void __vxge_hw_channel_free(struct __vxge_hw_channel *channel) | |||
110 | * This function initializes a channel by properly setting the | 420 | * This function initializes a channel by properly setting the |
111 | * various references | 421 | * various references |
112 | */ | 422 | */ |
113 | enum vxge_hw_status | 423 | static enum vxge_hw_status |
114 | __vxge_hw_channel_initialize(struct __vxge_hw_channel *channel) | 424 | __vxge_hw_channel_initialize(struct __vxge_hw_channel *channel) |
115 | { | 425 | { |
116 | u32 i; | 426 | u32 i; |
@@ -145,7 +455,7 @@ __vxge_hw_channel_initialize(struct __vxge_hw_channel *channel) | |||
145 | * __vxge_hw_channel_reset - Resets a channel | 455 | * __vxge_hw_channel_reset - Resets a channel |
146 | * This function resets a channel by properly setting the various references | 456 | * This function resets a channel by properly setting the various references |
147 | */ | 457 | */ |
148 | enum vxge_hw_status | 458 | static enum vxge_hw_status |
149 | __vxge_hw_channel_reset(struct __vxge_hw_channel *channel) | 459 | __vxge_hw_channel_reset(struct __vxge_hw_channel *channel) |
150 | { | 460 | { |
151 | u32 i; | 461 | u32 i; |
@@ -172,8 +482,7 @@ __vxge_hw_channel_reset(struct __vxge_hw_channel *channel) | |||
172 | * Initialize certain PCI/PCI-X configuration registers | 482 | * Initialize certain PCI/PCI-X configuration registers |
173 | * with recommended values. Save config space for future hw resets. | 483 | * with recommended values. Save config space for future hw resets. |
174 | */ | 484 | */ |
175 | void | 485 | static void __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev) |
176 | __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev) | ||
177 | { | 486 | { |
178 | u16 cmd = 0; | 487 | u16 cmd = 0; |
179 | 488 | ||
@@ -185,43 +494,11 @@ __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev) | |||
185 | pci_save_state(hldev->pdev); | 494 | pci_save_state(hldev->pdev); |
186 | } | 495 | } |
187 | 496 | ||
188 | /* | 497 | /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset |
189 | * __vxge_hw_device_register_poll | ||
190 | * Will poll certain register for specified amount of time. | ||
191 | * Will poll until masked bit is not cleared. | ||
192 | */ | ||
193 | enum vxge_hw_status | ||
194 | __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis) | ||
195 | { | ||
196 | u64 val64; | ||
197 | u32 i = 0; | ||
198 | enum vxge_hw_status ret = VXGE_HW_FAIL; | ||
199 | |||
200 | udelay(10); | ||
201 | |||
202 | do { | ||
203 | val64 = readq(reg); | ||
204 | if (!(val64 & mask)) | ||
205 | return VXGE_HW_OK; | ||
206 | udelay(100); | ||
207 | } while (++i <= 9); | ||
208 | |||
209 | i = 0; | ||
210 | do { | ||
211 | val64 = readq(reg); | ||
212 | if (!(val64 & mask)) | ||
213 | return VXGE_HW_OK; | ||
214 | mdelay(1); | ||
215 | } while (++i <= max_millis); | ||
216 | |||
217 | return ret; | ||
218 | } | ||
219 | |||
220 | /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset | ||
221 | * in progress | 498 | * in progress |
222 | * This routine checks the vpath reset in progress register is turned zero | 499 | * This routine checks the vpath reset in progress register is turned zero |
223 | */ | 500 | */ |
224 | enum vxge_hw_status | 501 | static enum vxge_hw_status |
225 | __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog) | 502 | __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog) |
226 | { | 503 | { |
227 | enum vxge_hw_status status; | 504 | enum vxge_hw_status status; |
@@ -232,11 +509,65 @@ __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog) | |||
232 | } | 509 | } |
233 | 510 | ||
234 | /* | 511 | /* |
512 | * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion. | ||
513 | * Set the swapper bits appropriately for the lagacy section. | ||
514 | */ | ||
515 | static enum vxge_hw_status | ||
516 | __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg) | ||
517 | { | ||
518 | u64 val64; | ||
519 | enum vxge_hw_status status = VXGE_HW_OK; | ||
520 | |||
521 | val64 = readq(&legacy_reg->toc_swapper_fb); | ||
522 | |||
523 | wmb(); | ||
524 | |||
525 | switch (val64) { | ||
526 | case VXGE_HW_SWAPPER_INITIAL_VALUE: | ||
527 | return status; | ||
528 | |||
529 | case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED: | ||
530 | writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE, | ||
531 | &legacy_reg->pifm_rd_swap_en); | ||
532 | writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE, | ||
533 | &legacy_reg->pifm_rd_flip_en); | ||
534 | writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE, | ||
535 | &legacy_reg->pifm_wr_swap_en); | ||
536 | writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE, | ||
537 | &legacy_reg->pifm_wr_flip_en); | ||
538 | break; | ||
539 | |||
540 | case VXGE_HW_SWAPPER_BYTE_SWAPPED: | ||
541 | writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE, | ||
542 | &legacy_reg->pifm_rd_swap_en); | ||
543 | writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE, | ||
544 | &legacy_reg->pifm_wr_swap_en); | ||
545 | break; | ||
546 | |||
547 | case VXGE_HW_SWAPPER_BIT_FLIPPED: | ||
548 | writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE, | ||
549 | &legacy_reg->pifm_rd_flip_en); | ||
550 | writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE, | ||
551 | &legacy_reg->pifm_wr_flip_en); | ||
552 | break; | ||
553 | } | ||
554 | |||
555 | wmb(); | ||
556 | |||
557 | val64 = readq(&legacy_reg->toc_swapper_fb); | ||
558 | |||
559 | if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE) | ||
560 | status = VXGE_HW_ERR_SWAPPER_CTRL; | ||
561 | |||
562 | return status; | ||
563 | } | ||
564 | |||
565 | /* | ||
235 | * __vxge_hw_device_toc_get | 566 | * __vxge_hw_device_toc_get |
236 | * This routine sets the swapper and reads the toc pointer and returns the | 567 | * This routine sets the swapper and reads the toc pointer and returns the |
237 | * memory mapped address of the toc | 568 | * memory mapped address of the toc |
238 | */ | 569 | */ |
239 | struct vxge_hw_toc_reg __iomem * | 570 | static struct vxge_hw_toc_reg __iomem * |
240 | __vxge_hw_device_toc_get(void __iomem *bar0) | 571 | __vxge_hw_device_toc_get(void __iomem *bar0) |
241 | { | 572 | { |
242 | u64 val64; | 573 | u64 val64; |
@@ -262,7 +593,7 @@ exit: | |||
262 | * register location pointers in the device object. It waits until the ric is | 593 | * register location pointers in the device object. It waits until the ric is |
263 | * completed initializing registers. | 594 | * completed initializing registers. |
264 | */ | 595 | */ |
265 | enum vxge_hw_status | 596 | static enum vxge_hw_status |
266 | __vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev) | 597 | __vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev) |
267 | { | 598 | { |
268 | u64 val64; | 599 | u64 val64; |
@@ -323,26 +654,6 @@ exit: | |||
323 | } | 654 | } |
324 | 655 | ||
325 | /* | 656 | /* |
326 | * __vxge_hw_device_id_get | ||
327 | * This routine returns sets the device id and revision numbers into the device | ||
328 | * structure | ||
329 | */ | ||
330 | void __vxge_hw_device_id_get(struct __vxge_hw_device *hldev) | ||
331 | { | ||
332 | u64 val64; | ||
333 | |||
334 | val64 = readq(&hldev->common_reg->titan_asic_id); | ||
335 | hldev->device_id = | ||
336 | (u16)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_DEVICE_ID(val64); | ||
337 | |||
338 | hldev->major_revision = | ||
339 | (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MAJOR_REVISION(val64); | ||
340 | |||
341 | hldev->minor_revision = | ||
342 | (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(val64); | ||
343 | } | ||
344 | |||
345 | /* | ||
346 | * __vxge_hw_device_access_rights_get: Get Access Rights of the driver | 657 | * __vxge_hw_device_access_rights_get: Get Access Rights of the driver |
347 | * This routine returns the Access Rights of the driver | 658 | * This routine returns the Access Rights of the driver |
348 | */ | 659 | */ |
@@ -395,10 +706,25 @@ __vxge_hw_device_is_privilaged(u32 host_type, u32 func_id) | |||
395 | } | 706 | } |
396 | 707 | ||
397 | /* | 708 | /* |
709 | * __vxge_hw_vpath_func_id_get - Get the function id of the vpath. | ||
710 | * Returns the function number of the vpath. | ||
711 | */ | ||
712 | static u32 | ||
713 | __vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg) | ||
714 | { | ||
715 | u64 val64; | ||
716 | |||
717 | val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1); | ||
718 | |||
719 | return | ||
720 | (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64); | ||
721 | } | ||
722 | |||
723 | /* | ||
398 | * __vxge_hw_device_host_info_get | 724 | * __vxge_hw_device_host_info_get |
399 | * This routine returns the host type assignments | 725 | * This routine returns the host type assignments |
400 | */ | 726 | */ |
401 | void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev) | 727 | static void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev) |
402 | { | 728 | { |
403 | u64 val64; | 729 | u64 val64; |
404 | u32 i; | 730 | u32 i; |
@@ -411,16 +737,18 @@ void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev) | |||
411 | hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments); | 737 | hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments); |
412 | 738 | ||
413 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | 739 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { |
414 | |||
415 | if (!(hldev->vpath_assignments & vxge_mBIT(i))) | 740 | if (!(hldev->vpath_assignments & vxge_mBIT(i))) |
416 | continue; | 741 | continue; |
417 | 742 | ||
418 | hldev->func_id = | 743 | hldev->func_id = |
419 | __vxge_hw_vpath_func_id_get(i, hldev->vpmgmt_reg[i]); | 744 | __vxge_hw_vpath_func_id_get(hldev->vpmgmt_reg[i]); |
420 | 745 | ||
421 | hldev->access_rights = __vxge_hw_device_access_rights_get( | 746 | hldev->access_rights = __vxge_hw_device_access_rights_get( |
422 | hldev->host_type, hldev->func_id); | 747 | hldev->host_type, hldev->func_id); |
423 | 748 | ||
749 | hldev->virtual_paths[i].vp_open = VXGE_HW_VP_NOT_OPEN; | ||
750 | hldev->virtual_paths[i].vp_reg = hldev->vpath_reg[i]; | ||
751 | |||
424 | hldev->first_vp_id = i; | 752 | hldev->first_vp_id = i; |
425 | break; | 753 | break; |
426 | } | 754 | } |
@@ -461,7 +789,8 @@ __vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev) | |||
461 | * __vxge_hw_device_initialize | 789 | * __vxge_hw_device_initialize |
462 | * Initialize Titan-V hardware. | 790 | * Initialize Titan-V hardware. |
463 | */ | 791 | */ |
464 | enum vxge_hw_status __vxge_hw_device_initialize(struct __vxge_hw_device *hldev) | 792 | static enum vxge_hw_status |
793 | __vxge_hw_device_initialize(struct __vxge_hw_device *hldev) | ||
465 | { | 794 | { |
466 | enum vxge_hw_status status = VXGE_HW_OK; | 795 | enum vxge_hw_status status = VXGE_HW_OK; |
467 | 796 | ||
@@ -477,10 +806,200 @@ exit: | |||
477 | return status; | 806 | return status; |
478 | } | 807 | } |
479 | 808 | ||
809 | /* | ||
810 | * __vxge_hw_vpath_fw_ver_get - Get the fw version | ||
811 | * Returns FW Version | ||
812 | */ | ||
813 | static enum vxge_hw_status | ||
814 | __vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath *vpath, | ||
815 | struct vxge_hw_device_hw_info *hw_info) | ||
816 | { | ||
817 | struct vxge_hw_device_version *fw_version = &hw_info->fw_version; | ||
818 | struct vxge_hw_device_date *fw_date = &hw_info->fw_date; | ||
819 | struct vxge_hw_device_version *flash_version = &hw_info->flash_version; | ||
820 | struct vxge_hw_device_date *flash_date = &hw_info->flash_date; | ||
821 | u64 data0, data1 = 0, steer_ctrl = 0; | ||
822 | enum vxge_hw_status status; | ||
823 | |||
824 | status = vxge_hw_vpath_fw_api(vpath, | ||
825 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY, | ||
826 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, | ||
827 | 0, &data0, &data1, &steer_ctrl); | ||
828 | if (status != VXGE_HW_OK) | ||
829 | goto exit; | ||
830 | |||
831 | fw_date->day = | ||
832 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(data0); | ||
833 | fw_date->month = | ||
834 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(data0); | ||
835 | fw_date->year = | ||
836 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(data0); | ||
837 | |||
838 | snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d", | ||
839 | fw_date->month, fw_date->day, fw_date->year); | ||
840 | |||
841 | fw_version->major = | ||
842 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0); | ||
843 | fw_version->minor = | ||
844 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0); | ||
845 | fw_version->build = | ||
846 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0); | ||
847 | |||
848 | snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d", | ||
849 | fw_version->major, fw_version->minor, fw_version->build); | ||
850 | |||
851 | flash_date->day = | ||
852 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data1); | ||
853 | flash_date->month = | ||
854 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data1); | ||
855 | flash_date->year = | ||
856 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data1); | ||
857 | |||
858 | snprintf(flash_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d", | ||
859 | flash_date->month, flash_date->day, flash_date->year); | ||
860 | |||
861 | flash_version->major = | ||
862 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data1); | ||
863 | flash_version->minor = | ||
864 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data1); | ||
865 | flash_version->build = | ||
866 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data1); | ||
867 | |||
868 | snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d", | ||
869 | flash_version->major, flash_version->minor, | ||
870 | flash_version->build); | ||
871 | |||
872 | exit: | ||
873 | return status; | ||
874 | } | ||
875 | |||
876 | /* | ||
877 | * __vxge_hw_vpath_card_info_get - Get the serial numbers, | ||
878 | * part number and product description. | ||
879 | */ | ||
880 | static enum vxge_hw_status | ||
881 | __vxge_hw_vpath_card_info_get(struct __vxge_hw_virtualpath *vpath, | ||
882 | struct vxge_hw_device_hw_info *hw_info) | ||
883 | { | ||
884 | enum vxge_hw_status status; | ||
885 | u64 data0, data1 = 0, steer_ctrl = 0; | ||
886 | u8 *serial_number = hw_info->serial_number; | ||
887 | u8 *part_number = hw_info->part_number; | ||
888 | u8 *product_desc = hw_info->product_desc; | ||
889 | u32 i, j = 0; | ||
890 | |||
891 | data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER; | ||
892 | |||
893 | status = vxge_hw_vpath_fw_api(vpath, | ||
894 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY, | ||
895 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, | ||
896 | 0, &data0, &data1, &steer_ctrl); | ||
897 | if (status != VXGE_HW_OK) | ||
898 | return status; | ||
899 | |||
900 | ((u64 *)serial_number)[0] = be64_to_cpu(data0); | ||
901 | ((u64 *)serial_number)[1] = be64_to_cpu(data1); | ||
902 | |||
903 | data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER; | ||
904 | data1 = steer_ctrl = 0; | ||
905 | |||
906 | status = vxge_hw_vpath_fw_api(vpath, | ||
907 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY, | ||
908 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, | ||
909 | 0, &data0, &data1, &steer_ctrl); | ||
910 | if (status != VXGE_HW_OK) | ||
911 | return status; | ||
912 | |||
913 | ((u64 *)part_number)[0] = be64_to_cpu(data0); | ||
914 | ((u64 *)part_number)[1] = be64_to_cpu(data1); | ||
915 | |||
916 | for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0; | ||
917 | i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) { | ||
918 | data0 = i; | ||
919 | data1 = steer_ctrl = 0; | ||
920 | |||
921 | status = vxge_hw_vpath_fw_api(vpath, | ||
922 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY, | ||
923 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, | ||
924 | 0, &data0, &data1, &steer_ctrl); | ||
925 | if (status != VXGE_HW_OK) | ||
926 | return status; | ||
927 | |||
928 | ((u64 *)product_desc)[j++] = be64_to_cpu(data0); | ||
929 | ((u64 *)product_desc)[j++] = be64_to_cpu(data1); | ||
930 | } | ||
931 | |||
932 | return status; | ||
933 | } | ||
934 | |||
935 | /* | ||
936 | * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode | ||
937 | * Returns pci function mode | ||
938 | */ | ||
939 | static enum vxge_hw_status | ||
940 | __vxge_hw_vpath_pci_func_mode_get(struct __vxge_hw_virtualpath *vpath, | ||
941 | struct vxge_hw_device_hw_info *hw_info) | ||
942 | { | ||
943 | u64 data0, data1 = 0, steer_ctrl = 0; | ||
944 | enum vxge_hw_status status; | ||
945 | |||
946 | data0 = 0; | ||
947 | |||
948 | status = vxge_hw_vpath_fw_api(vpath, | ||
949 | VXGE_HW_FW_API_GET_FUNC_MODE, | ||
950 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, | ||
951 | 0, &data0, &data1, &steer_ctrl); | ||
952 | if (status != VXGE_HW_OK) | ||
953 | return status; | ||
954 | |||
955 | hw_info->function_mode = VXGE_HW_GET_FUNC_MODE_VAL(data0); | ||
956 | return status; | ||
957 | } | ||
958 | |||
959 | /* | ||
960 | * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath | ||
961 | * from MAC address table. | ||
962 | */ | ||
963 | static enum vxge_hw_status | ||
964 | __vxge_hw_vpath_addr_get(struct __vxge_hw_virtualpath *vpath, | ||
965 | u8 *macaddr, u8 *macaddr_mask) | ||
966 | { | ||
967 | u64 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY, | ||
968 | data0 = 0, data1 = 0, steer_ctrl = 0; | ||
969 | enum vxge_hw_status status; | ||
970 | int i; | ||
971 | |||
972 | do { | ||
973 | status = vxge_hw_vpath_fw_api(vpath, action, | ||
974 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA, | ||
975 | 0, &data0, &data1, &steer_ctrl); | ||
976 | if (status != VXGE_HW_OK) | ||
977 | goto exit; | ||
978 | |||
979 | data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data0); | ||
980 | data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK( | ||
981 | data1); | ||
982 | |||
983 | for (i = ETH_ALEN; i > 0; i--) { | ||
984 | macaddr[i - 1] = (u8) (data0 & 0xFF); | ||
985 | data0 >>= 8; | ||
986 | |||
987 | macaddr_mask[i - 1] = (u8) (data1 & 0xFF); | ||
988 | data1 >>= 8; | ||
989 | } | ||
990 | |||
991 | action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY; | ||
992 | data0 = 0, data1 = 0, steer_ctrl = 0; | ||
993 | |||
994 | } while (!is_valid_ether_addr(macaddr)); | ||
995 | exit: | ||
996 | return status; | ||
997 | } | ||
998 | |||
480 | /** | 999 | /** |
481 | * vxge_hw_device_hw_info_get - Get the hw information | 1000 | * vxge_hw_device_hw_info_get - Get the hw information |
482 | * Returns the vpath mask that has the bits set for each vpath allocated | 1001 | * Returns the vpath mask that has the bits set for each vpath allocated |
483 | * for the driver, FW version information and the first mac addresse for | 1002 | * for the driver, FW version information, and the first mac address for |
484 | * each vpath | 1003 | * each vpath |
485 | */ | 1004 | */ |
486 | enum vxge_hw_status __devinit | 1005 | enum vxge_hw_status __devinit |
@@ -492,9 +1011,9 @@ vxge_hw_device_hw_info_get(void __iomem *bar0, | |||
492 | struct vxge_hw_toc_reg __iomem *toc; | 1011 | struct vxge_hw_toc_reg __iomem *toc; |
493 | struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg; | 1012 | struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg; |
494 | struct vxge_hw_common_reg __iomem *common_reg; | 1013 | struct vxge_hw_common_reg __iomem *common_reg; |
495 | struct vxge_hw_vpath_reg __iomem *vpath_reg; | ||
496 | struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg; | 1014 | struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg; |
497 | enum vxge_hw_status status; | 1015 | enum vxge_hw_status status; |
1016 | struct __vxge_hw_virtualpath vpath; | ||
498 | 1017 | ||
499 | memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info)); | 1018 | memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info)); |
500 | 1019 | ||
@@ -520,7 +1039,6 @@ vxge_hw_device_hw_info_get(void __iomem *bar0, | |||
520 | (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64); | 1039 | (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64); |
521 | 1040 | ||
522 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | 1041 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { |
523 | |||
524 | if (!((hw_info->vpath_mask) & vxge_mBIT(i))) | 1042 | if (!((hw_info->vpath_mask) & vxge_mBIT(i))) |
525 | continue; | 1043 | continue; |
526 | 1044 | ||
@@ -529,7 +1047,7 @@ vxge_hw_device_hw_info_get(void __iomem *bar0, | |||
529 | vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *) | 1047 | vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *) |
530 | (bar0 + val64); | 1048 | (bar0 + val64); |
531 | 1049 | ||
532 | hw_info->func_id = __vxge_hw_vpath_func_id_get(i, vpmgmt_reg); | 1050 | hw_info->func_id = __vxge_hw_vpath_func_id_get(vpmgmt_reg); |
533 | if (__vxge_hw_device_access_rights_get(hw_info->host_type, | 1051 | if (__vxge_hw_device_access_rights_get(hw_info->host_type, |
534 | hw_info->func_id) & | 1052 | hw_info->func_id) & |
535 | VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) { | 1053 | VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) { |
@@ -545,16 +1063,20 @@ vxge_hw_device_hw_info_get(void __iomem *bar0, | |||
545 | 1063 | ||
546 | val64 = readq(&toc->toc_vpath_pointer[i]); | 1064 | val64 = readq(&toc->toc_vpath_pointer[i]); |
547 | 1065 | ||
548 | vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64); | 1066 | spin_lock_init(&vpath.lock); |
1067 | vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *) | ||
1068 | (bar0 + val64); | ||
1069 | vpath.vp_open = VXGE_HW_VP_NOT_OPEN; | ||
549 | 1070 | ||
550 | hw_info->function_mode = | 1071 | status = __vxge_hw_vpath_pci_func_mode_get(&vpath, hw_info); |
551 | __vxge_hw_vpath_pci_func_mode_get(i, vpath_reg); | 1072 | if (status != VXGE_HW_OK) |
1073 | goto exit; | ||
552 | 1074 | ||
553 | status = __vxge_hw_vpath_fw_ver_get(i, vpath_reg, hw_info); | 1075 | status = __vxge_hw_vpath_fw_ver_get(&vpath, hw_info); |
554 | if (status != VXGE_HW_OK) | 1076 | if (status != VXGE_HW_OK) |
555 | goto exit; | 1077 | goto exit; |
556 | 1078 | ||
557 | status = __vxge_hw_vpath_card_info_get(i, vpath_reg, hw_info); | 1079 | status = __vxge_hw_vpath_card_info_get(&vpath, hw_info); |
558 | if (status != VXGE_HW_OK) | 1080 | if (status != VXGE_HW_OK) |
559 | goto exit; | 1081 | goto exit; |
560 | 1082 | ||
@@ -562,14 +1084,15 @@ vxge_hw_device_hw_info_get(void __iomem *bar0, | |||
562 | } | 1084 | } |
563 | 1085 | ||
564 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | 1086 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { |
565 | |||
566 | if (!((hw_info->vpath_mask) & vxge_mBIT(i))) | 1087 | if (!((hw_info->vpath_mask) & vxge_mBIT(i))) |
567 | continue; | 1088 | continue; |
568 | 1089 | ||
569 | val64 = readq(&toc->toc_vpath_pointer[i]); | 1090 | val64 = readq(&toc->toc_vpath_pointer[i]); |
570 | vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64); | 1091 | vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *) |
1092 | (bar0 + val64); | ||
1093 | vpath.vp_open = VXGE_HW_VP_NOT_OPEN; | ||
571 | 1094 | ||
572 | status = __vxge_hw_vpath_addr_get(i, vpath_reg, | 1095 | status = __vxge_hw_vpath_addr_get(&vpath, |
573 | hw_info->mac_addrs[i], | 1096 | hw_info->mac_addrs[i], |
574 | hw_info->mac_addr_masks[i]); | 1097 | hw_info->mac_addr_masks[i]); |
575 | if (status != VXGE_HW_OK) | 1098 | if (status != VXGE_HW_OK) |
@@ -580,6 +1103,218 @@ exit: | |||
580 | } | 1103 | } |
581 | 1104 | ||
582 | /* | 1105 | /* |
1106 | * __vxge_hw_blockpool_destroy - Deallocates the block pool | ||
1107 | */ | ||
1108 | static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool) | ||
1109 | { | ||
1110 | struct __vxge_hw_device *hldev; | ||
1111 | struct list_head *p, *n; | ||
1112 | u16 ret; | ||
1113 | |||
1114 | if (blockpool == NULL) { | ||
1115 | ret = 1; | ||
1116 | goto exit; | ||
1117 | } | ||
1118 | |||
1119 | hldev = blockpool->hldev; | ||
1120 | |||
1121 | list_for_each_safe(p, n, &blockpool->free_block_list) { | ||
1122 | pci_unmap_single(hldev->pdev, | ||
1123 | ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, | ||
1124 | ((struct __vxge_hw_blockpool_entry *)p)->length, | ||
1125 | PCI_DMA_BIDIRECTIONAL); | ||
1126 | |||
1127 | vxge_os_dma_free(hldev->pdev, | ||
1128 | ((struct __vxge_hw_blockpool_entry *)p)->memblock, | ||
1129 | &((struct __vxge_hw_blockpool_entry *)p)->acc_handle); | ||
1130 | |||
1131 | list_del(&((struct __vxge_hw_blockpool_entry *)p)->item); | ||
1132 | kfree(p); | ||
1133 | blockpool->pool_size--; | ||
1134 | } | ||
1135 | |||
1136 | list_for_each_safe(p, n, &blockpool->free_entry_list) { | ||
1137 | list_del(&((struct __vxge_hw_blockpool_entry *)p)->item); | ||
1138 | kfree((void *)p); | ||
1139 | } | ||
1140 | ret = 0; | ||
1141 | exit: | ||
1142 | return; | ||
1143 | } | ||
1144 | |||
1145 | /* | ||
1146 | * __vxge_hw_blockpool_create - Create block pool | ||
1147 | */ | ||
1148 | static enum vxge_hw_status | ||
1149 | __vxge_hw_blockpool_create(struct __vxge_hw_device *hldev, | ||
1150 | struct __vxge_hw_blockpool *blockpool, | ||
1151 | u32 pool_size, | ||
1152 | u32 pool_max) | ||
1153 | { | ||
1154 | u32 i; | ||
1155 | struct __vxge_hw_blockpool_entry *entry = NULL; | ||
1156 | void *memblock; | ||
1157 | dma_addr_t dma_addr; | ||
1158 | struct pci_dev *dma_handle; | ||
1159 | struct pci_dev *acc_handle; | ||
1160 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1161 | |||
1162 | if (blockpool == NULL) { | ||
1163 | status = VXGE_HW_FAIL; | ||
1164 | goto blockpool_create_exit; | ||
1165 | } | ||
1166 | |||
1167 | blockpool->hldev = hldev; | ||
1168 | blockpool->block_size = VXGE_HW_BLOCK_SIZE; | ||
1169 | blockpool->pool_size = 0; | ||
1170 | blockpool->pool_max = pool_max; | ||
1171 | blockpool->req_out = 0; | ||
1172 | |||
1173 | INIT_LIST_HEAD(&blockpool->free_block_list); | ||
1174 | INIT_LIST_HEAD(&blockpool->free_entry_list); | ||
1175 | |||
1176 | for (i = 0; i < pool_size + pool_max; i++) { | ||
1177 | entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry), | ||
1178 | GFP_KERNEL); | ||
1179 | if (entry == NULL) { | ||
1180 | __vxge_hw_blockpool_destroy(blockpool); | ||
1181 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
1182 | goto blockpool_create_exit; | ||
1183 | } | ||
1184 | list_add(&entry->item, &blockpool->free_entry_list); | ||
1185 | } | ||
1186 | |||
1187 | for (i = 0; i < pool_size; i++) { | ||
1188 | memblock = vxge_os_dma_malloc( | ||
1189 | hldev->pdev, | ||
1190 | VXGE_HW_BLOCK_SIZE, | ||
1191 | &dma_handle, | ||
1192 | &acc_handle); | ||
1193 | if (memblock == NULL) { | ||
1194 | __vxge_hw_blockpool_destroy(blockpool); | ||
1195 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
1196 | goto blockpool_create_exit; | ||
1197 | } | ||
1198 | |||
1199 | dma_addr = pci_map_single(hldev->pdev, memblock, | ||
1200 | VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
1201 | if (unlikely(pci_dma_mapping_error(hldev->pdev, | ||
1202 | dma_addr))) { | ||
1203 | vxge_os_dma_free(hldev->pdev, memblock, &acc_handle); | ||
1204 | __vxge_hw_blockpool_destroy(blockpool); | ||
1205 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
1206 | goto blockpool_create_exit; | ||
1207 | } | ||
1208 | |||
1209 | if (!list_empty(&blockpool->free_entry_list)) | ||
1210 | entry = (struct __vxge_hw_blockpool_entry *) | ||
1211 | list_first_entry(&blockpool->free_entry_list, | ||
1212 | struct __vxge_hw_blockpool_entry, | ||
1213 | item); | ||
1214 | |||
1215 | if (entry == NULL) | ||
1216 | entry = | ||
1217 | kzalloc(sizeof(struct __vxge_hw_blockpool_entry), | ||
1218 | GFP_KERNEL); | ||
1219 | if (entry != NULL) { | ||
1220 | list_del(&entry->item); | ||
1221 | entry->length = VXGE_HW_BLOCK_SIZE; | ||
1222 | entry->memblock = memblock; | ||
1223 | entry->dma_addr = dma_addr; | ||
1224 | entry->acc_handle = acc_handle; | ||
1225 | entry->dma_handle = dma_handle; | ||
1226 | list_add(&entry->item, | ||
1227 | &blockpool->free_block_list); | ||
1228 | blockpool->pool_size++; | ||
1229 | } else { | ||
1230 | __vxge_hw_blockpool_destroy(blockpool); | ||
1231 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
1232 | goto blockpool_create_exit; | ||
1233 | } | ||
1234 | } | ||
1235 | |||
1236 | blockpool_create_exit: | ||
1237 | return status; | ||
1238 | } | ||
1239 | |||
1240 | /* | ||
1241 | * __vxge_hw_device_fifo_config_check - Check fifo configuration. | ||
1242 | * Check the fifo configuration | ||
1243 | */ | ||
1244 | static enum vxge_hw_status | ||
1245 | __vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config) | ||
1246 | { | ||
1247 | if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) || | ||
1248 | (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS)) | ||
1249 | return VXGE_HW_BADCFG_FIFO_BLOCKS; | ||
1250 | |||
1251 | return VXGE_HW_OK; | ||
1252 | } | ||
1253 | |||
1254 | /* | ||
1255 | * __vxge_hw_device_vpath_config_check - Check vpath configuration. | ||
1256 | * Check the vpath configuration | ||
1257 | */ | ||
1258 | static enum vxge_hw_status | ||
1259 | __vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config) | ||
1260 | { | ||
1261 | enum vxge_hw_status status; | ||
1262 | |||
1263 | if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) || | ||
1264 | (vp_config->min_bandwidth > VXGE_HW_VPATH_BANDWIDTH_MAX)) | ||
1265 | return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH; | ||
1266 | |||
1267 | status = __vxge_hw_device_fifo_config_check(&vp_config->fifo); | ||
1268 | if (status != VXGE_HW_OK) | ||
1269 | return status; | ||
1270 | |||
1271 | if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) && | ||
1272 | ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) || | ||
1273 | (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU))) | ||
1274 | return VXGE_HW_BADCFG_VPATH_MTU; | ||
1275 | |||
1276 | if ((vp_config->rpa_strip_vlan_tag != | ||
1277 | VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) && | ||
1278 | (vp_config->rpa_strip_vlan_tag != | ||
1279 | VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) && | ||
1280 | (vp_config->rpa_strip_vlan_tag != | ||
1281 | VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE)) | ||
1282 | return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG; | ||
1283 | |||
1284 | return VXGE_HW_OK; | ||
1285 | } | ||
1286 | |||
1287 | /* | ||
1288 | * __vxge_hw_device_config_check - Check device configuration. | ||
1289 | * Check the device configuration | ||
1290 | */ | ||
1291 | static enum vxge_hw_status | ||
1292 | __vxge_hw_device_config_check(struct vxge_hw_device_config *new_config) | ||
1293 | { | ||
1294 | u32 i; | ||
1295 | enum vxge_hw_status status; | ||
1296 | |||
1297 | if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) && | ||
1298 | (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) && | ||
1299 | (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) && | ||
1300 | (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF)) | ||
1301 | return VXGE_HW_BADCFG_INTR_MODE; | ||
1302 | |||
1303 | if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) && | ||
1304 | (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE)) | ||
1305 | return VXGE_HW_BADCFG_RTS_MAC_EN; | ||
1306 | |||
1307 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | ||
1308 | status = __vxge_hw_device_vpath_config_check( | ||
1309 | &new_config->vp_config[i]); | ||
1310 | if (status != VXGE_HW_OK) | ||
1311 | return status; | ||
1312 | } | ||
1313 | |||
1314 | return VXGE_HW_OK; | ||
1315 | } | ||
1316 | |||
1317 | /* | ||
583 | * vxge_hw_device_initialize - Initialize Titan device. | 1318 | * vxge_hw_device_initialize - Initialize Titan device. |
584 | * Initialize Titan device. Note that all the arguments of this public API | 1319 | * Initialize Titan device. Note that all the arguments of this public API |
585 | * are 'IN', including @hldev. Driver cooperates with | 1320 | * are 'IN', including @hldev. Driver cooperates with |
@@ -603,14 +1338,12 @@ vxge_hw_device_initialize( | |||
603 | if (status != VXGE_HW_OK) | 1338 | if (status != VXGE_HW_OK) |
604 | goto exit; | 1339 | goto exit; |
605 | 1340 | ||
606 | hldev = (struct __vxge_hw_device *) | 1341 | hldev = vzalloc(sizeof(struct __vxge_hw_device)); |
607 | vmalloc(sizeof(struct __vxge_hw_device)); | ||
608 | if (hldev == NULL) { | 1342 | if (hldev == NULL) { |
609 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | 1343 | status = VXGE_HW_ERR_OUT_OF_MEMORY; |
610 | goto exit; | 1344 | goto exit; |
611 | } | 1345 | } |
612 | 1346 | ||
613 | memset(hldev, 0, sizeof(struct __vxge_hw_device)); | ||
614 | hldev->magic = VXGE_HW_DEVICE_MAGIC; | 1347 | hldev->magic = VXGE_HW_DEVICE_MAGIC; |
615 | 1348 | ||
616 | vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL); | 1349 | vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL); |
@@ -633,7 +1366,6 @@ vxge_hw_device_initialize( | |||
633 | vfree(hldev); | 1366 | vfree(hldev); |
634 | goto exit; | 1367 | goto exit; |
635 | } | 1368 | } |
636 | __vxge_hw_device_id_get(hldev); | ||
637 | 1369 | ||
638 | __vxge_hw_device_host_info_get(hldev); | 1370 | __vxge_hw_device_host_info_get(hldev); |
639 | 1371 | ||
@@ -641,7 +1373,6 @@ vxge_hw_device_initialize( | |||
641 | nblocks++; | 1373 | nblocks++; |
642 | 1374 | ||
643 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | 1375 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { |
644 | |||
645 | if (!(hldev->vpath_assignments & vxge_mBIT(i))) | 1376 | if (!(hldev->vpath_assignments & vxge_mBIT(i))) |
646 | continue; | 1377 | continue; |
647 | 1378 | ||
@@ -666,7 +1397,6 @@ vxge_hw_device_initialize( | |||
666 | } | 1397 | } |
667 | 1398 | ||
668 | status = __vxge_hw_device_initialize(hldev); | 1399 | status = __vxge_hw_device_initialize(hldev); |
669 | |||
670 | if (status != VXGE_HW_OK) { | 1400 | if (status != VXGE_HW_OK) { |
671 | vxge_hw_device_terminate(hldev); | 1401 | vxge_hw_device_terminate(hldev); |
672 | goto exit; | 1402 | goto exit; |
@@ -692,6 +1422,242 @@ vxge_hw_device_terminate(struct __vxge_hw_device *hldev) | |||
692 | } | 1422 | } |
693 | 1423 | ||
694 | /* | 1424 | /* |
1425 | * __vxge_hw_vpath_stats_access - Get the statistics from the given location | ||
1426 | * and offset and perform an operation | ||
1427 | */ | ||
1428 | static enum vxge_hw_status | ||
1429 | __vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath, | ||
1430 | u32 operation, u32 offset, u64 *stat) | ||
1431 | { | ||
1432 | u64 val64; | ||
1433 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1434 | struct vxge_hw_vpath_reg __iomem *vp_reg; | ||
1435 | |||
1436 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
1437 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
1438 | goto vpath_stats_access_exit; | ||
1439 | } | ||
1440 | |||
1441 | vp_reg = vpath->vp_reg; | ||
1442 | |||
1443 | val64 = VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) | | ||
1444 | VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE | | ||
1445 | VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset); | ||
1446 | |||
1447 | status = __vxge_hw_pio_mem_write64(val64, | ||
1448 | &vp_reg->xmac_stats_access_cmd, | ||
1449 | VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE, | ||
1450 | vpath->hldev->config.device_poll_millis); | ||
1451 | if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ)) | ||
1452 | *stat = readq(&vp_reg->xmac_stats_access_data); | ||
1453 | else | ||
1454 | *stat = 0; | ||
1455 | |||
1456 | vpath_stats_access_exit: | ||
1457 | return status; | ||
1458 | } | ||
1459 | |||
1460 | /* | ||
1461 | * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath | ||
1462 | */ | ||
1463 | static enum vxge_hw_status | ||
1464 | __vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath, | ||
1465 | struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats) | ||
1466 | { | ||
1467 | u64 *val64; | ||
1468 | int i; | ||
1469 | u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET; | ||
1470 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1471 | |||
1472 | val64 = (u64 *)vpath_tx_stats; | ||
1473 | |||
1474 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
1475 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
1476 | goto exit; | ||
1477 | } | ||
1478 | |||
1479 | for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) { | ||
1480 | status = __vxge_hw_vpath_stats_access(vpath, | ||
1481 | VXGE_HW_STATS_OP_READ, | ||
1482 | offset, val64); | ||
1483 | if (status != VXGE_HW_OK) | ||
1484 | goto exit; | ||
1485 | offset++; | ||
1486 | val64++; | ||
1487 | } | ||
1488 | exit: | ||
1489 | return status; | ||
1490 | } | ||
1491 | |||
1492 | /* | ||
1493 | * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath | ||
1494 | */ | ||
1495 | static enum vxge_hw_status | ||
1496 | __vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath, | ||
1497 | struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats) | ||
1498 | { | ||
1499 | u64 *val64; | ||
1500 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1501 | int i; | ||
1502 | u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET; | ||
1503 | val64 = (u64 *) vpath_rx_stats; | ||
1504 | |||
1505 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
1506 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
1507 | goto exit; | ||
1508 | } | ||
1509 | for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) { | ||
1510 | status = __vxge_hw_vpath_stats_access(vpath, | ||
1511 | VXGE_HW_STATS_OP_READ, | ||
1512 | offset >> 3, val64); | ||
1513 | if (status != VXGE_HW_OK) | ||
1514 | goto exit; | ||
1515 | |||
1516 | offset += 8; | ||
1517 | val64++; | ||
1518 | } | ||
1519 | exit: | ||
1520 | return status; | ||
1521 | } | ||
1522 | |||
1523 | /* | ||
1524 | * __vxge_hw_vpath_stats_get - Get the vpath hw statistics. | ||
1525 | */ | ||
1526 | static enum vxge_hw_status | ||
1527 | __vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath, | ||
1528 | struct vxge_hw_vpath_stats_hw_info *hw_stats) | ||
1529 | { | ||
1530 | u64 val64; | ||
1531 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1532 | struct vxge_hw_vpath_reg __iomem *vp_reg; | ||
1533 | |||
1534 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
1535 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
1536 | goto exit; | ||
1537 | } | ||
1538 | vp_reg = vpath->vp_reg; | ||
1539 | |||
1540 | val64 = readq(&vp_reg->vpath_debug_stats0); | ||
1541 | hw_stats->ini_num_mwr_sent = | ||
1542 | (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64); | ||
1543 | |||
1544 | val64 = readq(&vp_reg->vpath_debug_stats1); | ||
1545 | hw_stats->ini_num_mrd_sent = | ||
1546 | (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64); | ||
1547 | |||
1548 | val64 = readq(&vp_reg->vpath_debug_stats2); | ||
1549 | hw_stats->ini_num_cpl_rcvd = | ||
1550 | (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64); | ||
1551 | |||
1552 | val64 = readq(&vp_reg->vpath_debug_stats3); | ||
1553 | hw_stats->ini_num_mwr_byte_sent = | ||
1554 | VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64); | ||
1555 | |||
1556 | val64 = readq(&vp_reg->vpath_debug_stats4); | ||
1557 | hw_stats->ini_num_cpl_byte_rcvd = | ||
1558 | VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64); | ||
1559 | |||
1560 | val64 = readq(&vp_reg->vpath_debug_stats5); | ||
1561 | hw_stats->wrcrdtarb_xoff = | ||
1562 | (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64); | ||
1563 | |||
1564 | val64 = readq(&vp_reg->vpath_debug_stats6); | ||
1565 | hw_stats->rdcrdtarb_xoff = | ||
1566 | (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64); | ||
1567 | |||
1568 | val64 = readq(&vp_reg->vpath_genstats_count01); | ||
1569 | hw_stats->vpath_genstats_count0 = | ||
1570 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0( | ||
1571 | val64); | ||
1572 | |||
1573 | val64 = readq(&vp_reg->vpath_genstats_count01); | ||
1574 | hw_stats->vpath_genstats_count1 = | ||
1575 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1( | ||
1576 | val64); | ||
1577 | |||
1578 | val64 = readq(&vp_reg->vpath_genstats_count23); | ||
1579 | hw_stats->vpath_genstats_count2 = | ||
1580 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2( | ||
1581 | val64); | ||
1582 | |||
1583 | val64 = readq(&vp_reg->vpath_genstats_count01); | ||
1584 | hw_stats->vpath_genstats_count3 = | ||
1585 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3( | ||
1586 | val64); | ||
1587 | |||
1588 | val64 = readq(&vp_reg->vpath_genstats_count4); | ||
1589 | hw_stats->vpath_genstats_count4 = | ||
1590 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4( | ||
1591 | val64); | ||
1592 | |||
1593 | val64 = readq(&vp_reg->vpath_genstats_count5); | ||
1594 | hw_stats->vpath_genstats_count5 = | ||
1595 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5( | ||
1596 | val64); | ||
1597 | |||
1598 | status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats); | ||
1599 | if (status != VXGE_HW_OK) | ||
1600 | goto exit; | ||
1601 | |||
1602 | status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats); | ||
1603 | if (status != VXGE_HW_OK) | ||
1604 | goto exit; | ||
1605 | |||
1606 | VXGE_HW_VPATH_STATS_PIO_READ( | ||
1607 | VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET); | ||
1608 | |||
1609 | hw_stats->prog_event_vnum0 = | ||
1610 | (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64); | ||
1611 | |||
1612 | hw_stats->prog_event_vnum1 = | ||
1613 | (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64); | ||
1614 | |||
1615 | VXGE_HW_VPATH_STATS_PIO_READ( | ||
1616 | VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET); | ||
1617 | |||
1618 | hw_stats->prog_event_vnum2 = | ||
1619 | (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64); | ||
1620 | |||
1621 | hw_stats->prog_event_vnum3 = | ||
1622 | (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64); | ||
1623 | |||
1624 | val64 = readq(&vp_reg->rx_multi_cast_stats); | ||
1625 | hw_stats->rx_multi_cast_frame_discard = | ||
1626 | (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64); | ||
1627 | |||
1628 | val64 = readq(&vp_reg->rx_frm_transferred); | ||
1629 | hw_stats->rx_frm_transferred = | ||
1630 | (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64); | ||
1631 | |||
1632 | val64 = readq(&vp_reg->rxd_returned); | ||
1633 | hw_stats->rxd_returned = | ||
1634 | (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64); | ||
1635 | |||
1636 | val64 = readq(&vp_reg->dbg_stats_rx_mpa); | ||
1637 | hw_stats->rx_mpa_len_fail_frms = | ||
1638 | (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64); | ||
1639 | hw_stats->rx_mpa_mrk_fail_frms = | ||
1640 | (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64); | ||
1641 | hw_stats->rx_mpa_crc_fail_frms = | ||
1642 | (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64); | ||
1643 | |||
1644 | val64 = readq(&vp_reg->dbg_stats_rx_fau); | ||
1645 | hw_stats->rx_permitted_frms = | ||
1646 | (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64); | ||
1647 | hw_stats->rx_vp_reset_discarded_frms = | ||
1648 | (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64); | ||
1649 | hw_stats->rx_wol_frms = | ||
1650 | (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64); | ||
1651 | |||
1652 | val64 = readq(&vp_reg->tx_vp_reset_discarded_frms); | ||
1653 | hw_stats->tx_vp_reset_discarded_frms = | ||
1654 | (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS( | ||
1655 | val64); | ||
1656 | exit: | ||
1657 | return status; | ||
1658 | } | ||
1659 | |||
1660 | /* | ||
695 | * vxge_hw_device_stats_get - Get the device hw statistics. | 1661 | * vxge_hw_device_stats_get - Get the device hw statistics. |
696 | * Returns the vpath h/w stats for the device. | 1662 | * Returns the vpath h/w stats for the device. |
697 | */ | 1663 | */ |
@@ -703,7 +1669,6 @@ vxge_hw_device_stats_get(struct __vxge_hw_device *hldev, | |||
703 | enum vxge_hw_status status = VXGE_HW_OK; | 1669 | enum vxge_hw_status status = VXGE_HW_OK; |
704 | 1670 | ||
705 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | 1671 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { |
706 | |||
707 | if (!(hldev->vpaths_deployed & vxge_mBIT(i)) || | 1672 | if (!(hldev->vpaths_deployed & vxge_mBIT(i)) || |
708 | (hldev->virtual_paths[i].vp_open == | 1673 | (hldev->virtual_paths[i].vp_open == |
709 | VXGE_HW_VP_NOT_OPEN)) | 1674 | VXGE_HW_VP_NOT_OPEN)) |
@@ -779,7 +1744,7 @@ exit: | |||
779 | * vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port | 1744 | * vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port |
780 | * Get the Statistics on aggregate port | 1745 | * Get the Statistics on aggregate port |
781 | */ | 1746 | */ |
782 | enum vxge_hw_status | 1747 | static enum vxge_hw_status |
783 | vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port, | 1748 | vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port, |
784 | struct vxge_hw_xmac_aggr_stats *aggr_stats) | 1749 | struct vxge_hw_xmac_aggr_stats *aggr_stats) |
785 | { | 1750 | { |
@@ -814,7 +1779,7 @@ exit: | |||
814 | * vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port | 1779 | * vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port |
815 | * Get the Statistics on port | 1780 | * Get the Statistics on port |
816 | */ | 1781 | */ |
817 | enum vxge_hw_status | 1782 | static enum vxge_hw_status |
818 | vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port, | 1783 | vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port, |
819 | struct vxge_hw_xmac_port_stats *port_stats) | 1784 | struct vxge_hw_xmac_port_stats *port_stats) |
820 | { | 1785 | { |
@@ -858,7 +1823,6 @@ vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *hldev, | |||
858 | 1823 | ||
859 | status = vxge_hw_device_xmac_aggr_stats_get(hldev, | 1824 | status = vxge_hw_device_xmac_aggr_stats_get(hldev, |
860 | 0, &xmac_stats->aggr_stats[0]); | 1825 | 0, &xmac_stats->aggr_stats[0]); |
861 | |||
862 | if (status != VXGE_HW_OK) | 1826 | if (status != VXGE_HW_OK) |
863 | goto exit; | 1827 | goto exit; |
864 | 1828 | ||
@@ -952,20 +1916,6 @@ u32 vxge_hw_device_trace_level_get(struct __vxge_hw_device *hldev) | |||
952 | return 0; | 1916 | return 0; |
953 | #endif | 1917 | #endif |
954 | } | 1918 | } |
955 | /* | ||
956 | * vxge_hw_device_debug_mask_get - Get the debug mask | ||
957 | * This routine returns the current debug mask set | ||
958 | */ | ||
959 | u32 vxge_hw_device_debug_mask_get(struct __vxge_hw_device *hldev) | ||
960 | { | ||
961 | #if defined(VXGE_DEBUG_TRACE_MASK) || defined(VXGE_DEBUG_ERR_MASK) | ||
962 | if (hldev == NULL) | ||
963 | return 0; | ||
964 | return hldev->debug_module_mask; | ||
965 | #else | ||
966 | return 0; | ||
967 | #endif | ||
968 | } | ||
969 | 1919 | ||
970 | /* | 1920 | /* |
971 | * vxge_hw_getpause_data -Pause frame frame generation and reception. | 1921 | * vxge_hw_getpause_data -Pause frame frame generation and reception. |
@@ -1006,7 +1956,6 @@ exit: | |||
1006 | * It can be used to set or reset Pause frame generation or reception | 1956 | * It can be used to set or reset Pause frame generation or reception |
1007 | * support of the NIC. | 1957 | * support of the NIC. |
1008 | */ | 1958 | */ |
1009 | |||
1010 | enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev, | 1959 | enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev, |
1011 | u32 port, u32 tx, u32 rx) | 1960 | u32 port, u32 tx, u32 rx) |
1012 | { | 1961 | { |
@@ -1090,7 +2039,7 @@ __vxge_hw_ring_block_next_pointer_set(u8 *block, dma_addr_t dma_next) | |||
1090 | * first block | 2039 | * first block |
1091 | * Returns the dma address of the first RxD block | 2040 | * Returns the dma address of the first RxD block |
1092 | */ | 2041 | */ |
1093 | u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring) | 2042 | static u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring) |
1094 | { | 2043 | { |
1095 | struct vxge_hw_mempool_dma *dma_object; | 2044 | struct vxge_hw_mempool_dma *dma_object; |
1096 | 2045 | ||
@@ -1248,197 +2197,366 @@ exit: | |||
1248 | } | 2197 | } |
1249 | 2198 | ||
1250 | /* | 2199 | /* |
1251 | * __vxge_hw_ring_create - Create a Ring | 2200 | * __vxge_hw_channel_allocate - Allocate memory for channel |
1252 | * This function creates Ring and initializes it. | 2201 | * This function allocates required memory for the channel and various arrays |
1253 | * | 2202 | * in the channel |
1254 | */ | 2203 | */ |
1255 | enum vxge_hw_status | 2204 | static struct __vxge_hw_channel * |
1256 | __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp, | 2205 | __vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph, |
1257 | struct vxge_hw_ring_attr *attr) | 2206 | enum __vxge_hw_channel_type type, |
2207 | u32 length, u32 per_dtr_space, | ||
2208 | void *userdata) | ||
1258 | { | 2209 | { |
1259 | enum vxge_hw_status status = VXGE_HW_OK; | 2210 | struct __vxge_hw_channel *channel; |
1260 | struct __vxge_hw_ring *ring; | ||
1261 | u32 ring_length; | ||
1262 | struct vxge_hw_ring_config *config; | ||
1263 | struct __vxge_hw_device *hldev; | 2211 | struct __vxge_hw_device *hldev; |
2212 | int size = 0; | ||
1264 | u32 vp_id; | 2213 | u32 vp_id; |
1265 | struct vxge_hw_mempool_cbs ring_mp_callback; | ||
1266 | 2214 | ||
1267 | if ((vp == NULL) || (attr == NULL)) { | 2215 | hldev = vph->vpath->hldev; |
2216 | vp_id = vph->vpath->vp_id; | ||
2217 | |||
2218 | switch (type) { | ||
2219 | case VXGE_HW_CHANNEL_TYPE_FIFO: | ||
2220 | size = sizeof(struct __vxge_hw_fifo); | ||
2221 | break; | ||
2222 | case VXGE_HW_CHANNEL_TYPE_RING: | ||
2223 | size = sizeof(struct __vxge_hw_ring); | ||
2224 | break; | ||
2225 | default: | ||
2226 | break; | ||
2227 | } | ||
2228 | |||
2229 | channel = kzalloc(size, GFP_KERNEL); | ||
2230 | if (channel == NULL) | ||
2231 | goto exit0; | ||
2232 | INIT_LIST_HEAD(&channel->item); | ||
2233 | |||
2234 | channel->common_reg = hldev->common_reg; | ||
2235 | channel->first_vp_id = hldev->first_vp_id; | ||
2236 | channel->type = type; | ||
2237 | channel->devh = hldev; | ||
2238 | channel->vph = vph; | ||
2239 | channel->userdata = userdata; | ||
2240 | channel->per_dtr_space = per_dtr_space; | ||
2241 | channel->length = length; | ||
2242 | channel->vp_id = vp_id; | ||
2243 | |||
2244 | channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); | ||
2245 | if (channel->work_arr == NULL) | ||
2246 | goto exit1; | ||
2247 | |||
2248 | channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); | ||
2249 | if (channel->free_arr == NULL) | ||
2250 | goto exit1; | ||
2251 | channel->free_ptr = length; | ||
2252 | |||
2253 | channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); | ||
2254 | if (channel->reserve_arr == NULL) | ||
2255 | goto exit1; | ||
2256 | channel->reserve_ptr = length; | ||
2257 | channel->reserve_top = 0; | ||
2258 | |||
2259 | channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); | ||
2260 | if (channel->orig_arr == NULL) | ||
2261 | goto exit1; | ||
2262 | |||
2263 | return channel; | ||
2264 | exit1: | ||
2265 | __vxge_hw_channel_free(channel); | ||
2266 | |||
2267 | exit0: | ||
2268 | return NULL; | ||
2269 | } | ||
2270 | |||
2271 | /* | ||
2272 | * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async | ||
2273 | * Adds a block to block pool | ||
2274 | */ | ||
2275 | static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh, | ||
2276 | void *block_addr, | ||
2277 | u32 length, | ||
2278 | struct pci_dev *dma_h, | ||
2279 | struct pci_dev *acc_handle) | ||
2280 | { | ||
2281 | struct __vxge_hw_blockpool *blockpool; | ||
2282 | struct __vxge_hw_blockpool_entry *entry = NULL; | ||
2283 | dma_addr_t dma_addr; | ||
2284 | enum vxge_hw_status status = VXGE_HW_OK; | ||
2285 | u32 req_out; | ||
2286 | |||
2287 | blockpool = &devh->block_pool; | ||
2288 | |||
2289 | if (block_addr == NULL) { | ||
2290 | blockpool->req_out--; | ||
1268 | status = VXGE_HW_FAIL; | 2291 | status = VXGE_HW_FAIL; |
1269 | goto exit; | 2292 | goto exit; |
1270 | } | 2293 | } |
1271 | 2294 | ||
1272 | hldev = vp->vpath->hldev; | 2295 | dma_addr = pci_map_single(devh->pdev, block_addr, length, |
1273 | vp_id = vp->vpath->vp_id; | 2296 | PCI_DMA_BIDIRECTIONAL); |
1274 | 2297 | ||
1275 | config = &hldev->config.vp_config[vp_id].ring; | 2298 | if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) { |
2299 | vxge_os_dma_free(devh->pdev, block_addr, &acc_handle); | ||
2300 | blockpool->req_out--; | ||
2301 | status = VXGE_HW_FAIL; | ||
2302 | goto exit; | ||
2303 | } | ||
1276 | 2304 | ||
1277 | ring_length = config->ring_blocks * | 2305 | if (!list_empty(&blockpool->free_entry_list)) |
1278 | vxge_hw_ring_rxds_per_block_get(config->buffer_mode); | 2306 | entry = (struct __vxge_hw_blockpool_entry *) |
2307 | list_first_entry(&blockpool->free_entry_list, | ||
2308 | struct __vxge_hw_blockpool_entry, | ||
2309 | item); | ||
1279 | 2310 | ||
1280 | ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp, | 2311 | if (entry == NULL) |
1281 | VXGE_HW_CHANNEL_TYPE_RING, | 2312 | entry = vmalloc(sizeof(struct __vxge_hw_blockpool_entry)); |
1282 | ring_length, | 2313 | else |
1283 | attr->per_rxd_space, | 2314 | list_del(&entry->item); |
1284 | attr->userdata); | ||
1285 | 2315 | ||
1286 | if (ring == NULL) { | 2316 | if (entry != NULL) { |
2317 | entry->length = length; | ||
2318 | entry->memblock = block_addr; | ||
2319 | entry->dma_addr = dma_addr; | ||
2320 | entry->acc_handle = acc_handle; | ||
2321 | entry->dma_handle = dma_h; | ||
2322 | list_add(&entry->item, &blockpool->free_block_list); | ||
2323 | blockpool->pool_size++; | ||
2324 | status = VXGE_HW_OK; | ||
2325 | } else | ||
1287 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | 2326 | status = VXGE_HW_ERR_OUT_OF_MEMORY; |
1288 | goto exit; | ||
1289 | } | ||
1290 | 2327 | ||
1291 | vp->vpath->ringh = ring; | 2328 | blockpool->req_out--; |
1292 | ring->vp_id = vp_id; | ||
1293 | ring->vp_reg = vp->vpath->vp_reg; | ||
1294 | ring->common_reg = hldev->common_reg; | ||
1295 | ring->stats = &vp->vpath->sw_stats->ring_stats; | ||
1296 | ring->config = config; | ||
1297 | ring->callback = attr->callback; | ||
1298 | ring->rxd_init = attr->rxd_init; | ||
1299 | ring->rxd_term = attr->rxd_term; | ||
1300 | ring->buffer_mode = config->buffer_mode; | ||
1301 | ring->rxds_limit = config->rxds_limit; | ||
1302 | 2329 | ||
1303 | ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode); | 2330 | req_out = blockpool->req_out; |
1304 | ring->rxd_priv_size = | 2331 | exit: |
1305 | sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space; | 2332 | return; |
1306 | ring->per_rxd_space = attr->per_rxd_space; | 2333 | } |
1307 | 2334 | ||
1308 | ring->rxd_priv_size = | 2335 | static inline void |
1309 | ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) / | 2336 | vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh, unsigned long size) |
1310 | VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE; | 2337 | { |
2338 | gfp_t flags; | ||
2339 | void *vaddr; | ||
1311 | 2340 | ||
1312 | /* how many RxDs can fit into one block. Depends on configured | 2341 | if (in_interrupt()) |
1313 | * buffer_mode. */ | 2342 | flags = GFP_ATOMIC | GFP_DMA; |
1314 | ring->rxds_per_block = | 2343 | else |
1315 | vxge_hw_ring_rxds_per_block_get(config->buffer_mode); | 2344 | flags = GFP_KERNEL | GFP_DMA; |
1316 | 2345 | ||
1317 | /* calculate actual RxD block private size */ | 2346 | vaddr = kmalloc((size), flags); |
1318 | ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block; | ||
1319 | ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc; | ||
1320 | ring->mempool = __vxge_hw_mempool_create(hldev, | ||
1321 | VXGE_HW_BLOCK_SIZE, | ||
1322 | VXGE_HW_BLOCK_SIZE, | ||
1323 | ring->rxdblock_priv_size, | ||
1324 | ring->config->ring_blocks, | ||
1325 | ring->config->ring_blocks, | ||
1326 | &ring_mp_callback, | ||
1327 | ring); | ||
1328 | 2347 | ||
1329 | if (ring->mempool == NULL) { | 2348 | vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev); |
1330 | __vxge_hw_ring_delete(vp); | 2349 | } |
1331 | return VXGE_HW_ERR_OUT_OF_MEMORY; | ||
1332 | } | ||
1333 | 2350 | ||
1334 | status = __vxge_hw_channel_initialize(&ring->channel); | 2351 | /* |
1335 | if (status != VXGE_HW_OK) { | 2352 | * __vxge_hw_blockpool_blocks_add - Request additional blocks |
1336 | __vxge_hw_ring_delete(vp); | 2353 | */ |
1337 | goto exit; | 2354 | static |
2355 | void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool) | ||
2356 | { | ||
2357 | u32 nreq = 0, i; | ||
2358 | |||
2359 | if ((blockpool->pool_size + blockpool->req_out) < | ||
2360 | VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) { | ||
2361 | nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE; | ||
2362 | blockpool->req_out += nreq; | ||
1338 | } | 2363 | } |
1339 | 2364 | ||
1340 | /* Note: | 2365 | for (i = 0; i < nreq; i++) |
1341 | * Specifying rxd_init callback means two things: | 2366 | vxge_os_dma_malloc_async( |
1342 | * 1) rxds need to be initialized by driver at channel-open time; | 2367 | ((struct __vxge_hw_device *)blockpool->hldev)->pdev, |
1343 | * 2) rxds need to be posted at channel-open time | 2368 | blockpool->hldev, VXGE_HW_BLOCK_SIZE); |
1344 | * (that's what the initial_replenish() below does) | 2369 | } |
1345 | * Currently we don't have a case when the 1) is done without the 2). | 2370 | |
1346 | */ | 2371 | /* |
1347 | if (ring->rxd_init) { | 2372 | * __vxge_hw_blockpool_malloc - Allocate a memory block from pool |
1348 | status = vxge_hw_ring_replenish(ring); | 2373 | * Allocates a block of memory of given size, either from block pool |
1349 | if (status != VXGE_HW_OK) { | 2374 | * or by calling vxge_os_dma_malloc() |
1350 | __vxge_hw_ring_delete(vp); | 2375 | */ |
2376 | static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size, | ||
2377 | struct vxge_hw_mempool_dma *dma_object) | ||
2378 | { | ||
2379 | struct __vxge_hw_blockpool_entry *entry = NULL; | ||
2380 | struct __vxge_hw_blockpool *blockpool; | ||
2381 | void *memblock = NULL; | ||
2382 | enum vxge_hw_status status = VXGE_HW_OK; | ||
2383 | |||
2384 | blockpool = &devh->block_pool; | ||
2385 | |||
2386 | if (size != blockpool->block_size) { | ||
2387 | |||
2388 | memblock = vxge_os_dma_malloc(devh->pdev, size, | ||
2389 | &dma_object->handle, | ||
2390 | &dma_object->acc_handle); | ||
2391 | |||
2392 | if (memblock == NULL) { | ||
2393 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
1351 | goto exit; | 2394 | goto exit; |
1352 | } | 2395 | } |
1353 | } | ||
1354 | 2396 | ||
1355 | /* initial replenish will increment the counter in its post() routine, | 2397 | dma_object->addr = pci_map_single(devh->pdev, memblock, size, |
1356 | * we have to reset it */ | 2398 | PCI_DMA_BIDIRECTIONAL); |
1357 | ring->stats->common_stats.usage_cnt = 0; | 2399 | |
2400 | if (unlikely(pci_dma_mapping_error(devh->pdev, | ||
2401 | dma_object->addr))) { | ||
2402 | vxge_os_dma_free(devh->pdev, memblock, | ||
2403 | &dma_object->acc_handle); | ||
2404 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
2405 | goto exit; | ||
2406 | } | ||
2407 | |||
2408 | } else { | ||
2409 | |||
2410 | if (!list_empty(&blockpool->free_block_list)) | ||
2411 | entry = (struct __vxge_hw_blockpool_entry *) | ||
2412 | list_first_entry(&blockpool->free_block_list, | ||
2413 | struct __vxge_hw_blockpool_entry, | ||
2414 | item); | ||
2415 | |||
2416 | if (entry != NULL) { | ||
2417 | list_del(&entry->item); | ||
2418 | dma_object->addr = entry->dma_addr; | ||
2419 | dma_object->handle = entry->dma_handle; | ||
2420 | dma_object->acc_handle = entry->acc_handle; | ||
2421 | memblock = entry->memblock; | ||
2422 | |||
2423 | list_add(&entry->item, | ||
2424 | &blockpool->free_entry_list); | ||
2425 | blockpool->pool_size--; | ||
2426 | } | ||
2427 | |||
2428 | if (memblock != NULL) | ||
2429 | __vxge_hw_blockpool_blocks_add(blockpool); | ||
2430 | } | ||
1358 | exit: | 2431 | exit: |
1359 | return status; | 2432 | return memblock; |
1360 | } | 2433 | } |
1361 | 2434 | ||
1362 | /* | 2435 | /* |
1363 | * __vxge_hw_ring_abort - Returns the RxD | 2436 | * __vxge_hw_blockpool_blocks_remove - Free additional blocks |
1364 | * This function terminates the RxDs of ring | ||
1365 | */ | 2437 | */ |
1366 | enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring) | 2438 | static void |
2439 | __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool) | ||
1367 | { | 2440 | { |
1368 | void *rxdh; | 2441 | struct list_head *p, *n; |
1369 | struct __vxge_hw_channel *channel; | ||
1370 | |||
1371 | channel = &ring->channel; | ||
1372 | 2442 | ||
1373 | for (;;) { | 2443 | list_for_each_safe(p, n, &blockpool->free_block_list) { |
1374 | vxge_hw_channel_dtr_try_complete(channel, &rxdh); | ||
1375 | 2444 | ||
1376 | if (rxdh == NULL) | 2445 | if (blockpool->pool_size < blockpool->pool_max) |
1377 | break; | 2446 | break; |
1378 | 2447 | ||
1379 | vxge_hw_channel_dtr_complete(channel); | 2448 | pci_unmap_single( |
2449 | ((struct __vxge_hw_device *)blockpool->hldev)->pdev, | ||
2450 | ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, | ||
2451 | ((struct __vxge_hw_blockpool_entry *)p)->length, | ||
2452 | PCI_DMA_BIDIRECTIONAL); | ||
1380 | 2453 | ||
1381 | if (ring->rxd_term) | 2454 | vxge_os_dma_free( |
1382 | ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED, | 2455 | ((struct __vxge_hw_device *)blockpool->hldev)->pdev, |
1383 | channel->userdata); | 2456 | ((struct __vxge_hw_blockpool_entry *)p)->memblock, |
2457 | &((struct __vxge_hw_blockpool_entry *)p)->acc_handle); | ||
1384 | 2458 | ||
1385 | vxge_hw_channel_dtr_free(channel, rxdh); | 2459 | list_del(&((struct __vxge_hw_blockpool_entry *)p)->item); |
1386 | } | ||
1387 | 2460 | ||
1388 | return VXGE_HW_OK; | 2461 | list_add(p, &blockpool->free_entry_list); |
2462 | |||
2463 | blockpool->pool_size--; | ||
2464 | |||
2465 | } | ||
1389 | } | 2466 | } |
1390 | 2467 | ||
1391 | /* | 2468 | /* |
1392 | * __vxge_hw_ring_reset - Resets the ring | 2469 | * __vxge_hw_blockpool_free - Frees the memory allcoated with |
1393 | * This function resets the ring during vpath reset operation | 2470 | * __vxge_hw_blockpool_malloc |
1394 | */ | 2471 | */ |
1395 | enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring) | 2472 | static void __vxge_hw_blockpool_free(struct __vxge_hw_device *devh, |
2473 | void *memblock, u32 size, | ||
2474 | struct vxge_hw_mempool_dma *dma_object) | ||
1396 | { | 2475 | { |
2476 | struct __vxge_hw_blockpool_entry *entry = NULL; | ||
2477 | struct __vxge_hw_blockpool *blockpool; | ||
1397 | enum vxge_hw_status status = VXGE_HW_OK; | 2478 | enum vxge_hw_status status = VXGE_HW_OK; |
1398 | struct __vxge_hw_channel *channel; | ||
1399 | 2479 | ||
1400 | channel = &ring->channel; | 2480 | blockpool = &devh->block_pool; |
1401 | 2481 | ||
1402 | __vxge_hw_ring_abort(ring); | 2482 | if (size != blockpool->block_size) { |
2483 | pci_unmap_single(devh->pdev, dma_object->addr, size, | ||
2484 | PCI_DMA_BIDIRECTIONAL); | ||
2485 | vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle); | ||
2486 | } else { | ||
1403 | 2487 | ||
1404 | status = __vxge_hw_channel_reset(channel); | 2488 | if (!list_empty(&blockpool->free_entry_list)) |
2489 | entry = (struct __vxge_hw_blockpool_entry *) | ||
2490 | list_first_entry(&blockpool->free_entry_list, | ||
2491 | struct __vxge_hw_blockpool_entry, | ||
2492 | item); | ||
1405 | 2493 | ||
1406 | if (status != VXGE_HW_OK) | 2494 | if (entry == NULL) |
1407 | goto exit; | 2495 | entry = vmalloc(sizeof( |
2496 | struct __vxge_hw_blockpool_entry)); | ||
2497 | else | ||
2498 | list_del(&entry->item); | ||
1408 | 2499 | ||
1409 | if (ring->rxd_init) { | 2500 | if (entry != NULL) { |
1410 | status = vxge_hw_ring_replenish(ring); | 2501 | entry->length = size; |
1411 | if (status != VXGE_HW_OK) | 2502 | entry->memblock = memblock; |
1412 | goto exit; | 2503 | entry->dma_addr = dma_object->addr; |
2504 | entry->acc_handle = dma_object->acc_handle; | ||
2505 | entry->dma_handle = dma_object->handle; | ||
2506 | list_add(&entry->item, | ||
2507 | &blockpool->free_block_list); | ||
2508 | blockpool->pool_size++; | ||
2509 | status = VXGE_HW_OK; | ||
2510 | } else | ||
2511 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
2512 | |||
2513 | if (status == VXGE_HW_OK) | ||
2514 | __vxge_hw_blockpool_blocks_remove(blockpool); | ||
1413 | } | 2515 | } |
1414 | exit: | ||
1415 | return status; | ||
1416 | } | 2516 | } |
1417 | 2517 | ||
1418 | /* | 2518 | /* |
1419 | * __vxge_hw_ring_delete - Removes the ring | 2519 | * vxge_hw_mempool_destroy |
1420 | * This function freeup the memory pool and removes the ring | ||
1421 | */ | 2520 | */ |
1422 | enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp) | 2521 | static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool) |
1423 | { | 2522 | { |
1424 | struct __vxge_hw_ring *ring = vp->vpath->ringh; | 2523 | u32 i, j; |
2524 | struct __vxge_hw_device *devh = mempool->devh; | ||
1425 | 2525 | ||
1426 | __vxge_hw_ring_abort(ring); | 2526 | for (i = 0; i < mempool->memblocks_allocated; i++) { |
2527 | struct vxge_hw_mempool_dma *dma_object; | ||
1427 | 2528 | ||
1428 | if (ring->mempool) | 2529 | vxge_assert(mempool->memblocks_arr[i]); |
1429 | __vxge_hw_mempool_destroy(ring->mempool); | 2530 | vxge_assert(mempool->memblocks_dma_arr + i); |
1430 | 2531 | ||
1431 | vp->vpath->ringh = NULL; | 2532 | dma_object = mempool->memblocks_dma_arr + i; |
1432 | __vxge_hw_channel_free(&ring->channel); | ||
1433 | 2533 | ||
1434 | return VXGE_HW_OK; | 2534 | for (j = 0; j < mempool->items_per_memblock; j++) { |
2535 | u32 index = i * mempool->items_per_memblock + j; | ||
2536 | |||
2537 | /* to skip last partially filled(if any) memblock */ | ||
2538 | if (index >= mempool->items_current) | ||
2539 | break; | ||
2540 | } | ||
2541 | |||
2542 | vfree(mempool->memblocks_priv_arr[i]); | ||
2543 | |||
2544 | __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i], | ||
2545 | mempool->memblock_size, dma_object); | ||
2546 | } | ||
2547 | |||
2548 | vfree(mempool->items_arr); | ||
2549 | vfree(mempool->memblocks_dma_arr); | ||
2550 | vfree(mempool->memblocks_priv_arr); | ||
2551 | vfree(mempool->memblocks_arr); | ||
2552 | vfree(mempool); | ||
1435 | } | 2553 | } |
1436 | 2554 | ||
1437 | /* | 2555 | /* |
1438 | * __vxge_hw_mempool_grow | 2556 | * __vxge_hw_mempool_grow |
1439 | * Will resize mempool up to %num_allocate value. | 2557 | * Will resize mempool up to %num_allocate value. |
1440 | */ | 2558 | */ |
1441 | enum vxge_hw_status | 2559 | static enum vxge_hw_status |
1442 | __vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate, | 2560 | __vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate, |
1443 | u32 *num_allocated) | 2561 | u32 *num_allocated) |
1444 | { | 2562 | { |
@@ -1468,15 +2586,12 @@ __vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate, | |||
1468 | * allocate new memblock and its private part at once. | 2586 | * allocate new memblock and its private part at once. |
1469 | * This helps to minimize memory usage a lot. */ | 2587 | * This helps to minimize memory usage a lot. */ |
1470 | mempool->memblocks_priv_arr[i] = | 2588 | mempool->memblocks_priv_arr[i] = |
1471 | vmalloc(mempool->items_priv_size * n_items); | 2589 | vzalloc(mempool->items_priv_size * n_items); |
1472 | if (mempool->memblocks_priv_arr[i] == NULL) { | 2590 | if (mempool->memblocks_priv_arr[i] == NULL) { |
1473 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | 2591 | status = VXGE_HW_ERR_OUT_OF_MEMORY; |
1474 | goto exit; | 2592 | goto exit; |
1475 | } | 2593 | } |
1476 | 2594 | ||
1477 | memset(mempool->memblocks_priv_arr[i], 0, | ||
1478 | mempool->items_priv_size * n_items); | ||
1479 | |||
1480 | /* allocate DMA-capable memblock */ | 2595 | /* allocate DMA-capable memblock */ |
1481 | mempool->memblocks_arr[i] = | 2596 | mempool->memblocks_arr[i] = |
1482 | __vxge_hw_blockpool_malloc(mempool->devh, | 2597 | __vxge_hw_blockpool_malloc(mempool->devh, |
@@ -1527,16 +2642,15 @@ exit: | |||
1527 | * with size enough to hold %items_initial number of items. Memory is | 2642 | * with size enough to hold %items_initial number of items. Memory is |
1528 | * DMA-able but client must map/unmap before interoperating with the device. | 2643 | * DMA-able but client must map/unmap before interoperating with the device. |
1529 | */ | 2644 | */ |
1530 | struct vxge_hw_mempool* | 2645 | static struct vxge_hw_mempool * |
1531 | __vxge_hw_mempool_create( | 2646 | __vxge_hw_mempool_create(struct __vxge_hw_device *devh, |
1532 | struct __vxge_hw_device *devh, | 2647 | u32 memblock_size, |
1533 | u32 memblock_size, | 2648 | u32 item_size, |
1534 | u32 item_size, | 2649 | u32 items_priv_size, |
1535 | u32 items_priv_size, | 2650 | u32 items_initial, |
1536 | u32 items_initial, | 2651 | u32 items_max, |
1537 | u32 items_max, | 2652 | struct vxge_hw_mempool_cbs *mp_callback, |
1538 | struct vxge_hw_mempool_cbs *mp_callback, | 2653 | void *userdata) |
1539 | void *userdata) | ||
1540 | { | 2654 | { |
1541 | enum vxge_hw_status status = VXGE_HW_OK; | 2655 | enum vxge_hw_status status = VXGE_HW_OK; |
1542 | u32 memblocks_to_allocate; | 2656 | u32 memblocks_to_allocate; |
@@ -1548,13 +2662,11 @@ __vxge_hw_mempool_create( | |||
1548 | goto exit; | 2662 | goto exit; |
1549 | } | 2663 | } |
1550 | 2664 | ||
1551 | mempool = (struct vxge_hw_mempool *) | 2665 | mempool = vzalloc(sizeof(struct vxge_hw_mempool)); |
1552 | vmalloc(sizeof(struct vxge_hw_mempool)); | ||
1553 | if (mempool == NULL) { | 2666 | if (mempool == NULL) { |
1554 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | 2667 | status = VXGE_HW_ERR_OUT_OF_MEMORY; |
1555 | goto exit; | 2668 | goto exit; |
1556 | } | 2669 | } |
1557 | memset(mempool, 0, sizeof(struct vxge_hw_mempool)); | ||
1558 | 2670 | ||
1559 | mempool->devh = devh; | 2671 | mempool->devh = devh; |
1560 | mempool->memblock_size = memblock_size; | 2672 | mempool->memblock_size = memblock_size; |
@@ -1574,53 +2686,43 @@ __vxge_hw_mempool_create( | |||
1574 | 2686 | ||
1575 | /* allocate array of memblocks */ | 2687 | /* allocate array of memblocks */ |
1576 | mempool->memblocks_arr = | 2688 | mempool->memblocks_arr = |
1577 | (void **) vmalloc(sizeof(void *) * mempool->memblocks_max); | 2689 | vzalloc(sizeof(void *) * mempool->memblocks_max); |
1578 | if (mempool->memblocks_arr == NULL) { | 2690 | if (mempool->memblocks_arr == NULL) { |
1579 | __vxge_hw_mempool_destroy(mempool); | 2691 | __vxge_hw_mempool_destroy(mempool); |
1580 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | 2692 | status = VXGE_HW_ERR_OUT_OF_MEMORY; |
1581 | mempool = NULL; | 2693 | mempool = NULL; |
1582 | goto exit; | 2694 | goto exit; |
1583 | } | 2695 | } |
1584 | memset(mempool->memblocks_arr, 0, | ||
1585 | sizeof(void *) * mempool->memblocks_max); | ||
1586 | 2696 | ||
1587 | /* allocate array of private parts of items per memblocks */ | 2697 | /* allocate array of private parts of items per memblocks */ |
1588 | mempool->memblocks_priv_arr = | 2698 | mempool->memblocks_priv_arr = |
1589 | (void **) vmalloc(sizeof(void *) * mempool->memblocks_max); | 2699 | vzalloc(sizeof(void *) * mempool->memblocks_max); |
1590 | if (mempool->memblocks_priv_arr == NULL) { | 2700 | if (mempool->memblocks_priv_arr == NULL) { |
1591 | __vxge_hw_mempool_destroy(mempool); | 2701 | __vxge_hw_mempool_destroy(mempool); |
1592 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | 2702 | status = VXGE_HW_ERR_OUT_OF_MEMORY; |
1593 | mempool = NULL; | 2703 | mempool = NULL; |
1594 | goto exit; | 2704 | goto exit; |
1595 | } | 2705 | } |
1596 | memset(mempool->memblocks_priv_arr, 0, | ||
1597 | sizeof(void *) * mempool->memblocks_max); | ||
1598 | 2706 | ||
1599 | /* allocate array of memblocks DMA objects */ | 2707 | /* allocate array of memblocks DMA objects */ |
1600 | mempool->memblocks_dma_arr = (struct vxge_hw_mempool_dma *) | 2708 | mempool->memblocks_dma_arr = |
1601 | vmalloc(sizeof(struct vxge_hw_mempool_dma) * | 2709 | vzalloc(sizeof(struct vxge_hw_mempool_dma) * |
1602 | mempool->memblocks_max); | 2710 | mempool->memblocks_max); |
1603 | |||
1604 | if (mempool->memblocks_dma_arr == NULL) { | 2711 | if (mempool->memblocks_dma_arr == NULL) { |
1605 | __vxge_hw_mempool_destroy(mempool); | 2712 | __vxge_hw_mempool_destroy(mempool); |
1606 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | 2713 | status = VXGE_HW_ERR_OUT_OF_MEMORY; |
1607 | mempool = NULL; | 2714 | mempool = NULL; |
1608 | goto exit; | 2715 | goto exit; |
1609 | } | 2716 | } |
1610 | memset(mempool->memblocks_dma_arr, 0, | ||
1611 | sizeof(struct vxge_hw_mempool_dma) * | ||
1612 | mempool->memblocks_max); | ||
1613 | 2717 | ||
1614 | /* allocate hash array of items */ | 2718 | /* allocate hash array of items */ |
1615 | mempool->items_arr = | 2719 | mempool->items_arr = vzalloc(sizeof(void *) * mempool->items_max); |
1616 | (void **) vmalloc(sizeof(void *) * mempool->items_max); | ||
1617 | if (mempool->items_arr == NULL) { | 2720 | if (mempool->items_arr == NULL) { |
1618 | __vxge_hw_mempool_destroy(mempool); | 2721 | __vxge_hw_mempool_destroy(mempool); |
1619 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | 2722 | status = VXGE_HW_ERR_OUT_OF_MEMORY; |
1620 | mempool = NULL; | 2723 | mempool = NULL; |
1621 | goto exit; | 2724 | goto exit; |
1622 | } | 2725 | } |
1623 | memset(mempool->items_arr, 0, sizeof(void *) * mempool->items_max); | ||
1624 | 2726 | ||
1625 | /* calculate initial number of memblocks */ | 2727 | /* calculate initial number of memblocks */ |
1626 | memblocks_to_allocate = (mempool->items_initial + | 2728 | memblocks_to_allocate = (mempool->items_initial + |
@@ -1642,122 +2744,190 @@ exit: | |||
1642 | } | 2744 | } |
1643 | 2745 | ||
1644 | /* | 2746 | /* |
1645 | * vxge_hw_mempool_destroy | 2747 | * __vxge_hw_ring_abort - Returns the RxD |
2748 | * This function terminates the RxDs of ring | ||
1646 | */ | 2749 | */ |
1647 | void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool) | 2750 | static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring) |
1648 | { | 2751 | { |
1649 | u32 i, j; | 2752 | void *rxdh; |
1650 | struct __vxge_hw_device *devh = mempool->devh; | 2753 | struct __vxge_hw_channel *channel; |
1651 | |||
1652 | for (i = 0; i < mempool->memblocks_allocated; i++) { | ||
1653 | struct vxge_hw_mempool_dma *dma_object; | ||
1654 | 2754 | ||
1655 | vxge_assert(mempool->memblocks_arr[i]); | 2755 | channel = &ring->channel; |
1656 | vxge_assert(mempool->memblocks_dma_arr + i); | ||
1657 | 2756 | ||
1658 | dma_object = mempool->memblocks_dma_arr + i; | 2757 | for (;;) { |
2758 | vxge_hw_channel_dtr_try_complete(channel, &rxdh); | ||
1659 | 2759 | ||
1660 | for (j = 0; j < mempool->items_per_memblock; j++) { | 2760 | if (rxdh == NULL) |
1661 | u32 index = i * mempool->items_per_memblock + j; | 2761 | break; |
1662 | 2762 | ||
1663 | /* to skip last partially filled(if any) memblock */ | 2763 | vxge_hw_channel_dtr_complete(channel); |
1664 | if (index >= mempool->items_current) | ||
1665 | break; | ||
1666 | } | ||
1667 | 2764 | ||
1668 | vfree(mempool->memblocks_priv_arr[i]); | 2765 | if (ring->rxd_term) |
2766 | ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED, | ||
2767 | channel->userdata); | ||
1669 | 2768 | ||
1670 | __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i], | 2769 | vxge_hw_channel_dtr_free(channel, rxdh); |
1671 | mempool->memblock_size, dma_object); | ||
1672 | } | 2770 | } |
1673 | 2771 | ||
1674 | vfree(mempool->items_arr); | 2772 | return VXGE_HW_OK; |
2773 | } | ||
1675 | 2774 | ||
1676 | vfree(mempool->memblocks_dma_arr); | 2775 | /* |
2776 | * __vxge_hw_ring_reset - Resets the ring | ||
2777 | * This function resets the ring during vpath reset operation | ||
2778 | */ | ||
2779 | static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring) | ||
2780 | { | ||
2781 | enum vxge_hw_status status = VXGE_HW_OK; | ||
2782 | struct __vxge_hw_channel *channel; | ||
1677 | 2783 | ||
1678 | vfree(mempool->memblocks_priv_arr); | 2784 | channel = &ring->channel; |
1679 | 2785 | ||
1680 | vfree(mempool->memblocks_arr); | 2786 | __vxge_hw_ring_abort(ring); |
1681 | 2787 | ||
1682 | vfree(mempool); | 2788 | status = __vxge_hw_channel_reset(channel); |
2789 | |||
2790 | if (status != VXGE_HW_OK) | ||
2791 | goto exit; | ||
2792 | |||
2793 | if (ring->rxd_init) { | ||
2794 | status = vxge_hw_ring_replenish(ring); | ||
2795 | if (status != VXGE_HW_OK) | ||
2796 | goto exit; | ||
2797 | } | ||
2798 | exit: | ||
2799 | return status; | ||
1683 | } | 2800 | } |
1684 | 2801 | ||
1685 | /* | 2802 | /* |
1686 | * __vxge_hw_device_fifo_config_check - Check fifo configuration. | 2803 | * __vxge_hw_ring_delete - Removes the ring |
1687 | * Check the fifo configuration | 2804 | * This function freeup the memory pool and removes the ring |
1688 | */ | 2805 | */ |
1689 | enum vxge_hw_status | 2806 | static enum vxge_hw_status |
1690 | __vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config) | 2807 | __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp) |
1691 | { | 2808 | { |
1692 | if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) || | 2809 | struct __vxge_hw_ring *ring = vp->vpath->ringh; |
1693 | (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS)) | 2810 | |
1694 | return VXGE_HW_BADCFG_FIFO_BLOCKS; | 2811 | __vxge_hw_ring_abort(ring); |
2812 | |||
2813 | if (ring->mempool) | ||
2814 | __vxge_hw_mempool_destroy(ring->mempool); | ||
2815 | |||
2816 | vp->vpath->ringh = NULL; | ||
2817 | __vxge_hw_channel_free(&ring->channel); | ||
1695 | 2818 | ||
1696 | return VXGE_HW_OK; | 2819 | return VXGE_HW_OK; |
1697 | } | 2820 | } |
1698 | 2821 | ||
1699 | /* | 2822 | /* |
1700 | * __vxge_hw_device_vpath_config_check - Check vpath configuration. | 2823 | * __vxge_hw_ring_create - Create a Ring |
1701 | * Check the vpath configuration | 2824 | * This function creates Ring and initializes it. |
1702 | */ | 2825 | */ |
1703 | enum vxge_hw_status | 2826 | static enum vxge_hw_status |
1704 | __vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config) | 2827 | __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp, |
2828 | struct vxge_hw_ring_attr *attr) | ||
1705 | { | 2829 | { |
1706 | enum vxge_hw_status status; | 2830 | enum vxge_hw_status status = VXGE_HW_OK; |
2831 | struct __vxge_hw_ring *ring; | ||
2832 | u32 ring_length; | ||
2833 | struct vxge_hw_ring_config *config; | ||
2834 | struct __vxge_hw_device *hldev; | ||
2835 | u32 vp_id; | ||
2836 | struct vxge_hw_mempool_cbs ring_mp_callback; | ||
1707 | 2837 | ||
1708 | if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) || | 2838 | if ((vp == NULL) || (attr == NULL)) { |
1709 | (vp_config->min_bandwidth > | 2839 | status = VXGE_HW_FAIL; |
1710 | VXGE_HW_VPATH_BANDWIDTH_MAX)) | 2840 | goto exit; |
1711 | return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH; | 2841 | } |
1712 | 2842 | ||
1713 | status = __vxge_hw_device_fifo_config_check(&vp_config->fifo); | 2843 | hldev = vp->vpath->hldev; |
1714 | if (status != VXGE_HW_OK) | 2844 | vp_id = vp->vpath->vp_id; |
1715 | return status; | ||
1716 | 2845 | ||
1717 | if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) && | 2846 | config = &hldev->config.vp_config[vp_id].ring; |
1718 | ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) || | ||
1719 | (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU))) | ||
1720 | return VXGE_HW_BADCFG_VPATH_MTU; | ||
1721 | 2847 | ||
1722 | if ((vp_config->rpa_strip_vlan_tag != | 2848 | ring_length = config->ring_blocks * |
1723 | VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) && | 2849 | vxge_hw_ring_rxds_per_block_get(config->buffer_mode); |
1724 | (vp_config->rpa_strip_vlan_tag != | ||
1725 | VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) && | ||
1726 | (vp_config->rpa_strip_vlan_tag != | ||
1727 | VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE)) | ||
1728 | return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG; | ||
1729 | 2850 | ||
1730 | return VXGE_HW_OK; | 2851 | ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp, |
1731 | } | 2852 | VXGE_HW_CHANNEL_TYPE_RING, |
2853 | ring_length, | ||
2854 | attr->per_rxd_space, | ||
2855 | attr->userdata); | ||
2856 | if (ring == NULL) { | ||
2857 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
2858 | goto exit; | ||
2859 | } | ||
1732 | 2860 | ||
1733 | /* | 2861 | vp->vpath->ringh = ring; |
1734 | * __vxge_hw_device_config_check - Check device configuration. | 2862 | ring->vp_id = vp_id; |
1735 | * Check the device configuration | 2863 | ring->vp_reg = vp->vpath->vp_reg; |
1736 | */ | 2864 | ring->common_reg = hldev->common_reg; |
1737 | enum vxge_hw_status | 2865 | ring->stats = &vp->vpath->sw_stats->ring_stats; |
1738 | __vxge_hw_device_config_check(struct vxge_hw_device_config *new_config) | 2866 | ring->config = config; |
1739 | { | 2867 | ring->callback = attr->callback; |
1740 | u32 i; | 2868 | ring->rxd_init = attr->rxd_init; |
1741 | enum vxge_hw_status status; | 2869 | ring->rxd_term = attr->rxd_term; |
2870 | ring->buffer_mode = config->buffer_mode; | ||
2871 | ring->tim_rti_cfg1_saved = vp->vpath->tim_rti_cfg1_saved; | ||
2872 | ring->tim_rti_cfg3_saved = vp->vpath->tim_rti_cfg3_saved; | ||
2873 | ring->rxds_limit = config->rxds_limit; | ||
1742 | 2874 | ||
1743 | if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) && | 2875 | ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode); |
1744 | (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) && | 2876 | ring->rxd_priv_size = |
1745 | (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) && | 2877 | sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space; |
1746 | (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF)) | 2878 | ring->per_rxd_space = attr->per_rxd_space; |
1747 | return VXGE_HW_BADCFG_INTR_MODE; | ||
1748 | 2879 | ||
1749 | if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) && | 2880 | ring->rxd_priv_size = |
1750 | (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE)) | 2881 | ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) / |
1751 | return VXGE_HW_BADCFG_RTS_MAC_EN; | 2882 | VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE; |
1752 | 2883 | ||
1753 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | 2884 | /* how many RxDs can fit into one block. Depends on configured |
1754 | status = __vxge_hw_device_vpath_config_check( | 2885 | * buffer_mode. */ |
1755 | &new_config->vp_config[i]); | 2886 | ring->rxds_per_block = |
1756 | if (status != VXGE_HW_OK) | 2887 | vxge_hw_ring_rxds_per_block_get(config->buffer_mode); |
1757 | return status; | 2888 | |
2889 | /* calculate actual RxD block private size */ | ||
2890 | ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block; | ||
2891 | ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc; | ||
2892 | ring->mempool = __vxge_hw_mempool_create(hldev, | ||
2893 | VXGE_HW_BLOCK_SIZE, | ||
2894 | VXGE_HW_BLOCK_SIZE, | ||
2895 | ring->rxdblock_priv_size, | ||
2896 | ring->config->ring_blocks, | ||
2897 | ring->config->ring_blocks, | ||
2898 | &ring_mp_callback, | ||
2899 | ring); | ||
2900 | if (ring->mempool == NULL) { | ||
2901 | __vxge_hw_ring_delete(vp); | ||
2902 | return VXGE_HW_ERR_OUT_OF_MEMORY; | ||
1758 | } | 2903 | } |
1759 | 2904 | ||
1760 | return VXGE_HW_OK; | 2905 | status = __vxge_hw_channel_initialize(&ring->channel); |
2906 | if (status != VXGE_HW_OK) { | ||
2907 | __vxge_hw_ring_delete(vp); | ||
2908 | goto exit; | ||
2909 | } | ||
2910 | |||
2911 | /* Note: | ||
2912 | * Specifying rxd_init callback means two things: | ||
2913 | * 1) rxds need to be initialized by driver at channel-open time; | ||
2914 | * 2) rxds need to be posted at channel-open time | ||
2915 | * (that's what the initial_replenish() below does) | ||
2916 | * Currently we don't have a case when the 1) is done without the 2). | ||
2917 | */ | ||
2918 | if (ring->rxd_init) { | ||
2919 | status = vxge_hw_ring_replenish(ring); | ||
2920 | if (status != VXGE_HW_OK) { | ||
2921 | __vxge_hw_ring_delete(vp); | ||
2922 | goto exit; | ||
2923 | } | ||
2924 | } | ||
2925 | |||
2926 | /* initial replenish will increment the counter in its post() routine, | ||
2927 | * we have to reset it */ | ||
2928 | ring->stats->common_stats.usage_cnt = 0; | ||
2929 | exit: | ||
2930 | return status; | ||
1761 | } | 2931 | } |
1762 | 2932 | ||
1763 | /* | 2933 | /* |
@@ -1779,7 +2949,6 @@ vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config) | |||
1779 | device_config->rts_mac_en = VXGE_HW_RTS_MAC_DEFAULT; | 2949 | device_config->rts_mac_en = VXGE_HW_RTS_MAC_DEFAULT; |
1780 | 2950 | ||
1781 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | 2951 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { |
1782 | |||
1783 | device_config->vp_config[i].vp_id = i; | 2952 | device_config->vp_config[i].vp_id = i; |
1784 | 2953 | ||
1785 | device_config->vp_config[i].min_bandwidth = | 2954 | device_config->vp_config[i].min_bandwidth = |
@@ -1919,65 +3088,10 @@ vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config) | |||
1919 | } | 3088 | } |
1920 | 3089 | ||
1921 | /* | 3090 | /* |
1922 | * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion. | ||
1923 | * Set the swapper bits appropriately for the lagacy section. | ||
1924 | */ | ||
1925 | enum vxge_hw_status | ||
1926 | __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg) | ||
1927 | { | ||
1928 | u64 val64; | ||
1929 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1930 | |||
1931 | val64 = readq(&legacy_reg->toc_swapper_fb); | ||
1932 | |||
1933 | wmb(); | ||
1934 | |||
1935 | switch (val64) { | ||
1936 | |||
1937 | case VXGE_HW_SWAPPER_INITIAL_VALUE: | ||
1938 | return status; | ||
1939 | |||
1940 | case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED: | ||
1941 | writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE, | ||
1942 | &legacy_reg->pifm_rd_swap_en); | ||
1943 | writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE, | ||
1944 | &legacy_reg->pifm_rd_flip_en); | ||
1945 | writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE, | ||
1946 | &legacy_reg->pifm_wr_swap_en); | ||
1947 | writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE, | ||
1948 | &legacy_reg->pifm_wr_flip_en); | ||
1949 | break; | ||
1950 | |||
1951 | case VXGE_HW_SWAPPER_BYTE_SWAPPED: | ||
1952 | writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE, | ||
1953 | &legacy_reg->pifm_rd_swap_en); | ||
1954 | writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE, | ||
1955 | &legacy_reg->pifm_wr_swap_en); | ||
1956 | break; | ||
1957 | |||
1958 | case VXGE_HW_SWAPPER_BIT_FLIPPED: | ||
1959 | writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE, | ||
1960 | &legacy_reg->pifm_rd_flip_en); | ||
1961 | writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE, | ||
1962 | &legacy_reg->pifm_wr_flip_en); | ||
1963 | break; | ||
1964 | } | ||
1965 | |||
1966 | wmb(); | ||
1967 | |||
1968 | val64 = readq(&legacy_reg->toc_swapper_fb); | ||
1969 | |||
1970 | if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE) | ||
1971 | status = VXGE_HW_ERR_SWAPPER_CTRL; | ||
1972 | |||
1973 | return status; | ||
1974 | } | ||
1975 | |||
1976 | /* | ||
1977 | * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath. | 3091 | * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath. |
1978 | * Set the swapper bits appropriately for the vpath. | 3092 | * Set the swapper bits appropriately for the vpath. |
1979 | */ | 3093 | */ |
1980 | enum vxge_hw_status | 3094 | static enum vxge_hw_status |
1981 | __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg) | 3095 | __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg) |
1982 | { | 3096 | { |
1983 | #ifndef __BIG_ENDIAN | 3097 | #ifndef __BIG_ENDIAN |
@@ -1996,10 +3110,9 @@ __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg) | |||
1996 | * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc. | 3110 | * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc. |
1997 | * Set the swapper bits appropriately for the vpath. | 3111 | * Set the swapper bits appropriately for the vpath. |
1998 | */ | 3112 | */ |
1999 | enum vxge_hw_status | 3113 | static enum vxge_hw_status |
2000 | __vxge_hw_kdfc_swapper_set( | 3114 | __vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg, |
2001 | struct vxge_hw_legacy_reg __iomem *legacy_reg, | 3115 | struct vxge_hw_vpath_reg __iomem *vpath_reg) |
2002 | struct vxge_hw_vpath_reg __iomem *vpath_reg) | ||
2003 | { | 3116 | { |
2004 | u64 val64; | 3117 | u64 val64; |
2005 | 3118 | ||
@@ -2021,28 +3134,6 @@ __vxge_hw_kdfc_swapper_set( | |||
2021 | } | 3134 | } |
2022 | 3135 | ||
2023 | /* | 3136 | /* |
2024 | * vxge_hw_mgmt_device_config - Retrieve device configuration. | ||
2025 | * Get device configuration. Permits to retrieve at run-time configuration | ||
2026 | * values that were used to initialize and configure the device. | ||
2027 | */ | ||
2028 | enum vxge_hw_status | ||
2029 | vxge_hw_mgmt_device_config(struct __vxge_hw_device *hldev, | ||
2030 | struct vxge_hw_device_config *dev_config, int size) | ||
2031 | { | ||
2032 | |||
2033 | if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) | ||
2034 | return VXGE_HW_ERR_INVALID_DEVICE; | ||
2035 | |||
2036 | if (size != sizeof(struct vxge_hw_device_config)) | ||
2037 | return VXGE_HW_ERR_VERSION_CONFLICT; | ||
2038 | |||
2039 | memcpy(dev_config, &hldev->config, | ||
2040 | sizeof(struct vxge_hw_device_config)); | ||
2041 | |||
2042 | return VXGE_HW_OK; | ||
2043 | } | ||
2044 | |||
2045 | /* | ||
2046 | * vxge_hw_mgmt_reg_read - Read Titan register. | 3137 | * vxge_hw_mgmt_reg_read - Read Titan register. |
2047 | */ | 3138 | */ |
2048 | enum vxge_hw_status | 3139 | enum vxge_hw_status |
@@ -2271,6 +3362,69 @@ exit: | |||
2271 | } | 3362 | } |
2272 | 3363 | ||
2273 | /* | 3364 | /* |
3365 | * __vxge_hw_fifo_abort - Returns the TxD | ||
3366 | * This function terminates the TxDs of fifo | ||
3367 | */ | ||
3368 | static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo) | ||
3369 | { | ||
3370 | void *txdlh; | ||
3371 | |||
3372 | for (;;) { | ||
3373 | vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh); | ||
3374 | |||
3375 | if (txdlh == NULL) | ||
3376 | break; | ||
3377 | |||
3378 | vxge_hw_channel_dtr_complete(&fifo->channel); | ||
3379 | |||
3380 | if (fifo->txdl_term) { | ||
3381 | fifo->txdl_term(txdlh, | ||
3382 | VXGE_HW_TXDL_STATE_POSTED, | ||
3383 | fifo->channel.userdata); | ||
3384 | } | ||
3385 | |||
3386 | vxge_hw_channel_dtr_free(&fifo->channel, txdlh); | ||
3387 | } | ||
3388 | |||
3389 | return VXGE_HW_OK; | ||
3390 | } | ||
3391 | |||
3392 | /* | ||
3393 | * __vxge_hw_fifo_reset - Resets the fifo | ||
3394 | * This function resets the fifo during vpath reset operation | ||
3395 | */ | ||
3396 | static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo) | ||
3397 | { | ||
3398 | enum vxge_hw_status status = VXGE_HW_OK; | ||
3399 | |||
3400 | __vxge_hw_fifo_abort(fifo); | ||
3401 | status = __vxge_hw_channel_reset(&fifo->channel); | ||
3402 | |||
3403 | return status; | ||
3404 | } | ||
3405 | |||
3406 | /* | ||
3407 | * __vxge_hw_fifo_delete - Removes the FIFO | ||
3408 | * This function freeup the memory pool and removes the FIFO | ||
3409 | */ | ||
3410 | static enum vxge_hw_status | ||
3411 | __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp) | ||
3412 | { | ||
3413 | struct __vxge_hw_fifo *fifo = vp->vpath->fifoh; | ||
3414 | |||
3415 | __vxge_hw_fifo_abort(fifo); | ||
3416 | |||
3417 | if (fifo->mempool) | ||
3418 | __vxge_hw_mempool_destroy(fifo->mempool); | ||
3419 | |||
3420 | vp->vpath->fifoh = NULL; | ||
3421 | |||
3422 | __vxge_hw_channel_free(&fifo->channel); | ||
3423 | |||
3424 | return VXGE_HW_OK; | ||
3425 | } | ||
3426 | |||
3427 | /* | ||
2274 | * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD | 3428 | * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD |
2275 | * list callback | 3429 | * list callback |
2276 | * This function is callback passed to __vxge_hw_mempool_create to create memory | 3430 | * This function is callback passed to __vxge_hw_mempool_create to create memory |
@@ -2316,7 +3470,7 @@ __vxge_hw_fifo_mempool_item_alloc( | |||
2316 | * __vxge_hw_fifo_create - Create a FIFO | 3470 | * __vxge_hw_fifo_create - Create a FIFO |
2317 | * This function creates FIFO and initializes it. | 3471 | * This function creates FIFO and initializes it. |
2318 | */ | 3472 | */ |
2319 | enum vxge_hw_status | 3473 | static enum vxge_hw_status |
2320 | __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp, | 3474 | __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp, |
2321 | struct vxge_hw_fifo_attr *attr) | 3475 | struct vxge_hw_fifo_attr *attr) |
2322 | { | 3476 | { |
@@ -2359,6 +3513,8 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp, | |||
2359 | 3513 | ||
2360 | /* apply "interrupts per txdl" attribute */ | 3514 | /* apply "interrupts per txdl" attribute */ |
2361 | fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ; | 3515 | fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ; |
3516 | fifo->tim_tti_cfg1_saved = vpath->tim_tti_cfg1_saved; | ||
3517 | fifo->tim_tti_cfg3_saved = vpath->tim_tti_cfg3_saved; | ||
2362 | 3518 | ||
2363 | if (fifo->config->intr) | 3519 | if (fifo->config->intr) |
2364 | fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST; | 3520 | fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST; |
@@ -2435,73 +3591,11 @@ exit: | |||
2435 | } | 3591 | } |
2436 | 3592 | ||
2437 | /* | 3593 | /* |
2438 | * __vxge_hw_fifo_abort - Returns the TxD | ||
2439 | * This function terminates the TxDs of fifo | ||
2440 | */ | ||
2441 | enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo) | ||
2442 | { | ||
2443 | void *txdlh; | ||
2444 | |||
2445 | for (;;) { | ||
2446 | vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh); | ||
2447 | |||
2448 | if (txdlh == NULL) | ||
2449 | break; | ||
2450 | |||
2451 | vxge_hw_channel_dtr_complete(&fifo->channel); | ||
2452 | |||
2453 | if (fifo->txdl_term) { | ||
2454 | fifo->txdl_term(txdlh, | ||
2455 | VXGE_HW_TXDL_STATE_POSTED, | ||
2456 | fifo->channel.userdata); | ||
2457 | } | ||
2458 | |||
2459 | vxge_hw_channel_dtr_free(&fifo->channel, txdlh); | ||
2460 | } | ||
2461 | |||
2462 | return VXGE_HW_OK; | ||
2463 | } | ||
2464 | |||
2465 | /* | ||
2466 | * __vxge_hw_fifo_reset - Resets the fifo | ||
2467 | * This function resets the fifo during vpath reset operation | ||
2468 | */ | ||
2469 | enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo) | ||
2470 | { | ||
2471 | enum vxge_hw_status status = VXGE_HW_OK; | ||
2472 | |||
2473 | __vxge_hw_fifo_abort(fifo); | ||
2474 | status = __vxge_hw_channel_reset(&fifo->channel); | ||
2475 | |||
2476 | return status; | ||
2477 | } | ||
2478 | |||
2479 | /* | ||
2480 | * __vxge_hw_fifo_delete - Removes the FIFO | ||
2481 | * This function freeup the memory pool and removes the FIFO | ||
2482 | */ | ||
2483 | enum vxge_hw_status __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp) | ||
2484 | { | ||
2485 | struct __vxge_hw_fifo *fifo = vp->vpath->fifoh; | ||
2486 | |||
2487 | __vxge_hw_fifo_abort(fifo); | ||
2488 | |||
2489 | if (fifo->mempool) | ||
2490 | __vxge_hw_mempool_destroy(fifo->mempool); | ||
2491 | |||
2492 | vp->vpath->fifoh = NULL; | ||
2493 | |||
2494 | __vxge_hw_channel_free(&fifo->channel); | ||
2495 | |||
2496 | return VXGE_HW_OK; | ||
2497 | } | ||
2498 | |||
2499 | /* | ||
2500 | * __vxge_hw_vpath_pci_read - Read the content of given address | 3594 | * __vxge_hw_vpath_pci_read - Read the content of given address |
2501 | * in pci config space. | 3595 | * in pci config space. |
2502 | * Read from the vpath pci config space. | 3596 | * Read from the vpath pci config space. |
2503 | */ | 3597 | */ |
2504 | enum vxge_hw_status | 3598 | static enum vxge_hw_status |
2505 | __vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath, | 3599 | __vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath, |
2506 | u32 phy_func_0, u32 offset, u32 *val) | 3600 | u32 phy_func_0, u32 offset, u32 *val) |
2507 | { | 3601 | { |
@@ -2538,297 +3632,6 @@ exit: | |||
2538 | return status; | 3632 | return status; |
2539 | } | 3633 | } |
2540 | 3634 | ||
2541 | /* | ||
2542 | * __vxge_hw_vpath_func_id_get - Get the function id of the vpath. | ||
2543 | * Returns the function number of the vpath. | ||
2544 | */ | ||
2545 | u32 | ||
2546 | __vxge_hw_vpath_func_id_get(u32 vp_id, | ||
2547 | struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg) | ||
2548 | { | ||
2549 | u64 val64; | ||
2550 | |||
2551 | val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1); | ||
2552 | |||
2553 | return | ||
2554 | (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64); | ||
2555 | } | ||
2556 | |||
2557 | /* | ||
2558 | * __vxge_hw_read_rts_ds - Program RTS steering critieria | ||
2559 | */ | ||
2560 | static inline void | ||
2561 | __vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem *vpath_reg, | ||
2562 | u64 dta_struct_sel) | ||
2563 | { | ||
2564 | writeq(0, &vpath_reg->rts_access_steer_ctrl); | ||
2565 | wmb(); | ||
2566 | writeq(dta_struct_sel, &vpath_reg->rts_access_steer_data0); | ||
2567 | writeq(0, &vpath_reg->rts_access_steer_data1); | ||
2568 | wmb(); | ||
2569 | } | ||
2570 | |||
2571 | |||
2572 | /* | ||
2573 | * __vxge_hw_vpath_card_info_get - Get the serial numbers, | ||
2574 | * part number and product description. | ||
2575 | */ | ||
2576 | enum vxge_hw_status | ||
2577 | __vxge_hw_vpath_card_info_get( | ||
2578 | u32 vp_id, | ||
2579 | struct vxge_hw_vpath_reg __iomem *vpath_reg, | ||
2580 | struct vxge_hw_device_hw_info *hw_info) | ||
2581 | { | ||
2582 | u32 i, j; | ||
2583 | u64 val64; | ||
2584 | u64 data1 = 0ULL; | ||
2585 | u64 data2 = 0ULL; | ||
2586 | enum vxge_hw_status status = VXGE_HW_OK; | ||
2587 | u8 *serial_number = hw_info->serial_number; | ||
2588 | u8 *part_number = hw_info->part_number; | ||
2589 | u8 *product_desc = hw_info->product_desc; | ||
2590 | |||
2591 | __vxge_hw_read_rts_ds(vpath_reg, | ||
2592 | VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER); | ||
2593 | |||
2594 | val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION( | ||
2595 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) | | ||
2596 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL( | ||
2597 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) | | ||
2598 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | | ||
2599 | VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0); | ||
2600 | |||
2601 | status = __vxge_hw_pio_mem_write64(val64, | ||
2602 | &vpath_reg->rts_access_steer_ctrl, | ||
2603 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, | ||
2604 | VXGE_HW_DEF_DEVICE_POLL_MILLIS); | ||
2605 | |||
2606 | if (status != VXGE_HW_OK) | ||
2607 | return status; | ||
2608 | |||
2609 | val64 = readq(&vpath_reg->rts_access_steer_ctrl); | ||
2610 | |||
2611 | if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { | ||
2612 | data1 = readq(&vpath_reg->rts_access_steer_data0); | ||
2613 | ((u64 *)serial_number)[0] = be64_to_cpu(data1); | ||
2614 | |||
2615 | data2 = readq(&vpath_reg->rts_access_steer_data1); | ||
2616 | ((u64 *)serial_number)[1] = be64_to_cpu(data2); | ||
2617 | status = VXGE_HW_OK; | ||
2618 | } else | ||
2619 | *serial_number = 0; | ||
2620 | |||
2621 | __vxge_hw_read_rts_ds(vpath_reg, | ||
2622 | VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER); | ||
2623 | |||
2624 | val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION( | ||
2625 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) | | ||
2626 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL( | ||
2627 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) | | ||
2628 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | | ||
2629 | VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0); | ||
2630 | |||
2631 | status = __vxge_hw_pio_mem_write64(val64, | ||
2632 | &vpath_reg->rts_access_steer_ctrl, | ||
2633 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, | ||
2634 | VXGE_HW_DEF_DEVICE_POLL_MILLIS); | ||
2635 | |||
2636 | if (status != VXGE_HW_OK) | ||
2637 | return status; | ||
2638 | |||
2639 | val64 = readq(&vpath_reg->rts_access_steer_ctrl); | ||
2640 | |||
2641 | if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { | ||
2642 | |||
2643 | data1 = readq(&vpath_reg->rts_access_steer_data0); | ||
2644 | ((u64 *)part_number)[0] = be64_to_cpu(data1); | ||
2645 | |||
2646 | data2 = readq(&vpath_reg->rts_access_steer_data1); | ||
2647 | ((u64 *)part_number)[1] = be64_to_cpu(data2); | ||
2648 | |||
2649 | status = VXGE_HW_OK; | ||
2650 | |||
2651 | } else | ||
2652 | *part_number = 0; | ||
2653 | |||
2654 | j = 0; | ||
2655 | |||
2656 | for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0; | ||
2657 | i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) { | ||
2658 | |||
2659 | __vxge_hw_read_rts_ds(vpath_reg, i); | ||
2660 | |||
2661 | val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION( | ||
2662 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) | | ||
2663 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL( | ||
2664 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) | | ||
2665 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | | ||
2666 | VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0); | ||
2667 | |||
2668 | status = __vxge_hw_pio_mem_write64(val64, | ||
2669 | &vpath_reg->rts_access_steer_ctrl, | ||
2670 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, | ||
2671 | VXGE_HW_DEF_DEVICE_POLL_MILLIS); | ||
2672 | |||
2673 | if (status != VXGE_HW_OK) | ||
2674 | return status; | ||
2675 | |||
2676 | val64 = readq(&vpath_reg->rts_access_steer_ctrl); | ||
2677 | |||
2678 | if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { | ||
2679 | |||
2680 | data1 = readq(&vpath_reg->rts_access_steer_data0); | ||
2681 | ((u64 *)product_desc)[j++] = be64_to_cpu(data1); | ||
2682 | |||
2683 | data2 = readq(&vpath_reg->rts_access_steer_data1); | ||
2684 | ((u64 *)product_desc)[j++] = be64_to_cpu(data2); | ||
2685 | |||
2686 | status = VXGE_HW_OK; | ||
2687 | } else | ||
2688 | *product_desc = 0; | ||
2689 | } | ||
2690 | |||
2691 | return status; | ||
2692 | } | ||
2693 | |||
2694 | /* | ||
2695 | * __vxge_hw_vpath_fw_ver_get - Get the fw version | ||
2696 | * Returns FW Version | ||
2697 | */ | ||
2698 | enum vxge_hw_status | ||
2699 | __vxge_hw_vpath_fw_ver_get( | ||
2700 | u32 vp_id, | ||
2701 | struct vxge_hw_vpath_reg __iomem *vpath_reg, | ||
2702 | struct vxge_hw_device_hw_info *hw_info) | ||
2703 | { | ||
2704 | u64 val64; | ||
2705 | u64 data1 = 0ULL; | ||
2706 | u64 data2 = 0ULL; | ||
2707 | struct vxge_hw_device_version *fw_version = &hw_info->fw_version; | ||
2708 | struct vxge_hw_device_date *fw_date = &hw_info->fw_date; | ||
2709 | struct vxge_hw_device_version *flash_version = &hw_info->flash_version; | ||
2710 | struct vxge_hw_device_date *flash_date = &hw_info->flash_date; | ||
2711 | enum vxge_hw_status status = VXGE_HW_OK; | ||
2712 | |||
2713 | val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION( | ||
2714 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY) | | ||
2715 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL( | ||
2716 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) | | ||
2717 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | | ||
2718 | VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0); | ||
2719 | |||
2720 | status = __vxge_hw_pio_mem_write64(val64, | ||
2721 | &vpath_reg->rts_access_steer_ctrl, | ||
2722 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, | ||
2723 | VXGE_HW_DEF_DEVICE_POLL_MILLIS); | ||
2724 | |||
2725 | if (status != VXGE_HW_OK) | ||
2726 | goto exit; | ||
2727 | |||
2728 | val64 = readq(&vpath_reg->rts_access_steer_ctrl); | ||
2729 | |||
2730 | if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { | ||
2731 | |||
2732 | data1 = readq(&vpath_reg->rts_access_steer_data0); | ||
2733 | data2 = readq(&vpath_reg->rts_access_steer_data1); | ||
2734 | |||
2735 | fw_date->day = | ||
2736 | (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY( | ||
2737 | data1); | ||
2738 | fw_date->month = | ||
2739 | (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH( | ||
2740 | data1); | ||
2741 | fw_date->year = | ||
2742 | (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR( | ||
2743 | data1); | ||
2744 | |||
2745 | snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d", | ||
2746 | fw_date->month, fw_date->day, fw_date->year); | ||
2747 | |||
2748 | fw_version->major = | ||
2749 | (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data1); | ||
2750 | fw_version->minor = | ||
2751 | (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data1); | ||
2752 | fw_version->build = | ||
2753 | (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data1); | ||
2754 | |||
2755 | snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d", | ||
2756 | fw_version->major, fw_version->minor, fw_version->build); | ||
2757 | |||
2758 | flash_date->day = | ||
2759 | (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data2); | ||
2760 | flash_date->month = | ||
2761 | (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data2); | ||
2762 | flash_date->year = | ||
2763 | (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data2); | ||
2764 | |||
2765 | snprintf(flash_date->date, VXGE_HW_FW_STRLEN, | ||
2766 | "%2.2d/%2.2d/%4.4d", | ||
2767 | flash_date->month, flash_date->day, flash_date->year); | ||
2768 | |||
2769 | flash_version->major = | ||
2770 | (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data2); | ||
2771 | flash_version->minor = | ||
2772 | (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data2); | ||
2773 | flash_version->build = | ||
2774 | (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data2); | ||
2775 | |||
2776 | snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d", | ||
2777 | flash_version->major, flash_version->minor, | ||
2778 | flash_version->build); | ||
2779 | |||
2780 | status = VXGE_HW_OK; | ||
2781 | |||
2782 | } else | ||
2783 | status = VXGE_HW_FAIL; | ||
2784 | exit: | ||
2785 | return status; | ||
2786 | } | ||
2787 | |||
2788 | /* | ||
2789 | * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode | ||
2790 | * Returns pci function mode | ||
2791 | */ | ||
2792 | u64 | ||
2793 | __vxge_hw_vpath_pci_func_mode_get( | ||
2794 | u32 vp_id, | ||
2795 | struct vxge_hw_vpath_reg __iomem *vpath_reg) | ||
2796 | { | ||
2797 | u64 val64; | ||
2798 | u64 data1 = 0ULL; | ||
2799 | enum vxge_hw_status status = VXGE_HW_OK; | ||
2800 | |||
2801 | __vxge_hw_read_rts_ds(vpath_reg, | ||
2802 | VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PCI_MODE); | ||
2803 | |||
2804 | val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION( | ||
2805 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) | | ||
2806 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL( | ||
2807 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) | | ||
2808 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | | ||
2809 | VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0); | ||
2810 | |||
2811 | status = __vxge_hw_pio_mem_write64(val64, | ||
2812 | &vpath_reg->rts_access_steer_ctrl, | ||
2813 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, | ||
2814 | VXGE_HW_DEF_DEVICE_POLL_MILLIS); | ||
2815 | |||
2816 | if (status != VXGE_HW_OK) | ||
2817 | goto exit; | ||
2818 | |||
2819 | val64 = readq(&vpath_reg->rts_access_steer_ctrl); | ||
2820 | |||
2821 | if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { | ||
2822 | data1 = readq(&vpath_reg->rts_access_steer_data0); | ||
2823 | status = VXGE_HW_OK; | ||
2824 | } else { | ||
2825 | data1 = 0; | ||
2826 | status = VXGE_HW_FAIL; | ||
2827 | } | ||
2828 | exit: | ||
2829 | return data1; | ||
2830 | } | ||
2831 | |||
2832 | /** | 3635 | /** |
2833 | * vxge_hw_device_flick_link_led - Flick (blink) link LED. | 3636 | * vxge_hw_device_flick_link_led - Flick (blink) link LED. |
2834 | * @hldev: HW device. | 3637 | * @hldev: HW device. |
@@ -2837,37 +3640,24 @@ exit: | |||
2837 | * Flicker the link LED. | 3640 | * Flicker the link LED. |
2838 | */ | 3641 | */ |
2839 | enum vxge_hw_status | 3642 | enum vxge_hw_status |
2840 | vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev, | 3643 | vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev, u64 on_off) |
2841 | u64 on_off) | ||
2842 | { | 3644 | { |
2843 | u64 val64; | 3645 | struct __vxge_hw_virtualpath *vpath; |
2844 | enum vxge_hw_status status = VXGE_HW_OK; | 3646 | u64 data0, data1 = 0, steer_ctrl = 0; |
2845 | struct vxge_hw_vpath_reg __iomem *vp_reg; | 3647 | enum vxge_hw_status status; |
2846 | 3648 | ||
2847 | if (hldev == NULL) { | 3649 | if (hldev == NULL) { |
2848 | status = VXGE_HW_ERR_INVALID_DEVICE; | 3650 | status = VXGE_HW_ERR_INVALID_DEVICE; |
2849 | goto exit; | 3651 | goto exit; |
2850 | } | 3652 | } |
2851 | 3653 | ||
2852 | vp_reg = hldev->vpath_reg[hldev->first_vp_id]; | 3654 | vpath = &hldev->virtual_paths[hldev->first_vp_id]; |
2853 | |||
2854 | writeq(0, &vp_reg->rts_access_steer_ctrl); | ||
2855 | wmb(); | ||
2856 | writeq(on_off, &vp_reg->rts_access_steer_data0); | ||
2857 | writeq(0, &vp_reg->rts_access_steer_data1); | ||
2858 | wmb(); | ||
2859 | 3655 | ||
2860 | val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION( | 3656 | data0 = on_off; |
2861 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL) | | 3657 | status = vxge_hw_vpath_fw_api(vpath, |
2862 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL( | 3658 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL, |
2863 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) | | 3659 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, |
2864 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | | 3660 | 0, &data0, &data1, &steer_ctrl); |
2865 | VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0); | ||
2866 | |||
2867 | status = __vxge_hw_pio_mem_write64(val64, | ||
2868 | &vp_reg->rts_access_steer_ctrl, | ||
2869 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, | ||
2870 | VXGE_HW_DEF_DEVICE_POLL_MILLIS); | ||
2871 | exit: | 3661 | exit: |
2872 | return status; | 3662 | return status; |
2873 | } | 3663 | } |
@@ -2876,63 +3666,38 @@ exit: | |||
2876 | * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables | 3666 | * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables |
2877 | */ | 3667 | */ |
2878 | enum vxge_hw_status | 3668 | enum vxge_hw_status |
2879 | __vxge_hw_vpath_rts_table_get( | 3669 | __vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle *vp, |
2880 | struct __vxge_hw_vpath_handle *vp, | 3670 | u32 action, u32 rts_table, u32 offset, |
2881 | u32 action, u32 rts_table, u32 offset, u64 *data1, u64 *data2) | 3671 | u64 *data0, u64 *data1) |
2882 | { | 3672 | { |
2883 | u64 val64; | 3673 | enum vxge_hw_status status; |
2884 | struct __vxge_hw_virtualpath *vpath; | 3674 | u64 steer_ctrl = 0; |
2885 | struct vxge_hw_vpath_reg __iomem *vp_reg; | ||
2886 | |||
2887 | enum vxge_hw_status status = VXGE_HW_OK; | ||
2888 | 3675 | ||
2889 | if (vp == NULL) { | 3676 | if (vp == NULL) { |
2890 | status = VXGE_HW_ERR_INVALID_HANDLE; | 3677 | status = VXGE_HW_ERR_INVALID_HANDLE; |
2891 | goto exit; | 3678 | goto exit; |
2892 | } | 3679 | } |
2893 | 3680 | ||
2894 | vpath = vp->vpath; | ||
2895 | vp_reg = vpath->vp_reg; | ||
2896 | |||
2897 | val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) | | ||
2898 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) | | ||
2899 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | | ||
2900 | VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset); | ||
2901 | |||
2902 | if ((rts_table == | 3681 | if ((rts_table == |
2903 | VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) || | 3682 | VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) || |
2904 | (rts_table == | 3683 | (rts_table == |
2905 | VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) || | 3684 | VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) || |
2906 | (rts_table == | 3685 | (rts_table == |
2907 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) || | 3686 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) || |
2908 | (rts_table == | 3687 | (rts_table == |
2909 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) { | 3688 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) { |
2910 | val64 = val64 | VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL; | 3689 | steer_ctrl = VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL; |
2911 | } | 3690 | } |
2912 | 3691 | ||
2913 | status = __vxge_hw_pio_mem_write64(val64, | 3692 | status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset, |
2914 | &vp_reg->rts_access_steer_ctrl, | 3693 | data0, data1, &steer_ctrl); |
2915 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, | ||
2916 | vpath->hldev->config.device_poll_millis); | ||
2917 | |||
2918 | if (status != VXGE_HW_OK) | 3694 | if (status != VXGE_HW_OK) |
2919 | goto exit; | 3695 | goto exit; |
2920 | 3696 | ||
2921 | val64 = readq(&vp_reg->rts_access_steer_ctrl); | 3697 | if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) && |
2922 | 3698 | (rts_table != | |
2923 | if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { | 3699 | VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) |
2924 | 3700 | *data1 = 0; | |
2925 | *data1 = readq(&vp_reg->rts_access_steer_data0); | ||
2926 | |||
2927 | if ((rts_table == | ||
2928 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) || | ||
2929 | (rts_table == | ||
2930 | VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) { | ||
2931 | *data2 = readq(&vp_reg->rts_access_steer_data1); | ||
2932 | } | ||
2933 | status = VXGE_HW_OK; | ||
2934 | } else | ||
2935 | status = VXGE_HW_FAIL; | ||
2936 | exit: | 3701 | exit: |
2937 | return status; | 3702 | return status; |
2938 | } | 3703 | } |
@@ -2941,107 +3706,27 @@ exit: | |||
2941 | * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables | 3706 | * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables |
2942 | */ | 3707 | */ |
2943 | enum vxge_hw_status | 3708 | enum vxge_hw_status |
2944 | __vxge_hw_vpath_rts_table_set( | 3709 | __vxge_hw_vpath_rts_table_set(struct __vxge_hw_vpath_handle *vp, u32 action, |
2945 | struct __vxge_hw_vpath_handle *vp, u32 action, u32 rts_table, | 3710 | u32 rts_table, u32 offset, u64 steer_data0, |
2946 | u32 offset, u64 data1, u64 data2) | 3711 | u64 steer_data1) |
2947 | { | 3712 | { |
2948 | u64 val64; | 3713 | u64 data0, data1 = 0, steer_ctrl = 0; |
2949 | struct __vxge_hw_virtualpath *vpath; | 3714 | enum vxge_hw_status status; |
2950 | enum vxge_hw_status status = VXGE_HW_OK; | ||
2951 | struct vxge_hw_vpath_reg __iomem *vp_reg; | ||
2952 | 3715 | ||
2953 | if (vp == NULL) { | 3716 | if (vp == NULL) { |
2954 | status = VXGE_HW_ERR_INVALID_HANDLE; | 3717 | status = VXGE_HW_ERR_INVALID_HANDLE; |
2955 | goto exit; | 3718 | goto exit; |
2956 | } | 3719 | } |
2957 | 3720 | ||
2958 | vpath = vp->vpath; | 3721 | data0 = steer_data0; |
2959 | vp_reg = vpath->vp_reg; | ||
2960 | |||
2961 | writeq(data1, &vp_reg->rts_access_steer_data0); | ||
2962 | wmb(); | ||
2963 | 3722 | ||
2964 | if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) || | 3723 | if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) || |
2965 | (rts_table == | 3724 | (rts_table == |
2966 | VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) { | 3725 | VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) |
2967 | writeq(data2, &vp_reg->rts_access_steer_data1); | 3726 | data1 = steer_data1; |
2968 | wmb(); | ||
2969 | } | ||
2970 | |||
2971 | val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) | | ||
2972 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) | | ||
2973 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | | ||
2974 | VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset); | ||
2975 | |||
2976 | status = __vxge_hw_pio_mem_write64(val64, | ||
2977 | &vp_reg->rts_access_steer_ctrl, | ||
2978 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, | ||
2979 | vpath->hldev->config.device_poll_millis); | ||
2980 | |||
2981 | if (status != VXGE_HW_OK) | ||
2982 | goto exit; | ||
2983 | |||
2984 | val64 = readq(&vp_reg->rts_access_steer_ctrl); | ||
2985 | 3727 | ||
2986 | if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) | 3728 | status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset, |
2987 | status = VXGE_HW_OK; | 3729 | &data0, &data1, &steer_ctrl); |
2988 | else | ||
2989 | status = VXGE_HW_FAIL; | ||
2990 | exit: | ||
2991 | return status; | ||
2992 | } | ||
2993 | |||
2994 | /* | ||
2995 | * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath | ||
2996 | * from MAC address table. | ||
2997 | */ | ||
2998 | enum vxge_hw_status | ||
2999 | __vxge_hw_vpath_addr_get( | ||
3000 | u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg, | ||
3001 | u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN]) | ||
3002 | { | ||
3003 | u32 i; | ||
3004 | u64 val64; | ||
3005 | u64 data1 = 0ULL; | ||
3006 | u64 data2 = 0ULL; | ||
3007 | enum vxge_hw_status status = VXGE_HW_OK; | ||
3008 | |||
3009 | val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION( | ||
3010 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY) | | ||
3011 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL( | ||
3012 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) | | ||
3013 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | | ||
3014 | VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0); | ||
3015 | |||
3016 | status = __vxge_hw_pio_mem_write64(val64, | ||
3017 | &vpath_reg->rts_access_steer_ctrl, | ||
3018 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, | ||
3019 | VXGE_HW_DEF_DEVICE_POLL_MILLIS); | ||
3020 | |||
3021 | if (status != VXGE_HW_OK) | ||
3022 | goto exit; | ||
3023 | |||
3024 | val64 = readq(&vpath_reg->rts_access_steer_ctrl); | ||
3025 | |||
3026 | if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { | ||
3027 | |||
3028 | data1 = readq(&vpath_reg->rts_access_steer_data0); | ||
3029 | data2 = readq(&vpath_reg->rts_access_steer_data1); | ||
3030 | |||
3031 | data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1); | ||
3032 | data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK( | ||
3033 | data2); | ||
3034 | |||
3035 | for (i = ETH_ALEN; i > 0; i--) { | ||
3036 | macaddr[i-1] = (u8)(data1 & 0xFF); | ||
3037 | data1 >>= 8; | ||
3038 | |||
3039 | macaddr_mask[i-1] = (u8)(data2 & 0xFF); | ||
3040 | data2 >>= 8; | ||
3041 | } | ||
3042 | status = VXGE_HW_OK; | ||
3043 | } else | ||
3044 | status = VXGE_HW_FAIL; | ||
3045 | exit: | 3730 | exit: |
3046 | return status; | 3731 | return status; |
3047 | } | 3732 | } |
@@ -3067,6 +3752,8 @@ enum vxge_hw_status vxge_hw_vpath_rts_rth_set( | |||
3067 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY, | 3752 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY, |
3068 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG, | 3753 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG, |
3069 | 0, &data0, &data1); | 3754 | 0, &data0, &data1); |
3755 | if (status != VXGE_HW_OK) | ||
3756 | goto exit; | ||
3070 | 3757 | ||
3071 | data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) | | 3758 | data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) | |
3072 | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3)); | 3759 | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3)); |
@@ -3347,7 +4034,7 @@ __vxge_hw_vpath_mgmt_read( | |||
3347 | * This routine checks the vpath_rst_in_prog register to see if | 4034 | * This routine checks the vpath_rst_in_prog register to see if |
3348 | * adapter completed the reset process for the vpath | 4035 | * adapter completed the reset process for the vpath |
3349 | */ | 4036 | */ |
3350 | enum vxge_hw_status | 4037 | static enum vxge_hw_status |
3351 | __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath) | 4038 | __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath) |
3352 | { | 4039 | { |
3353 | enum vxge_hw_status status; | 4040 | enum vxge_hw_status status; |
@@ -3365,7 +4052,7 @@ __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath) | |||
3365 | * __vxge_hw_vpath_reset | 4052 | * __vxge_hw_vpath_reset |
3366 | * This routine resets the vpath on the device | 4053 | * This routine resets the vpath on the device |
3367 | */ | 4054 | */ |
3368 | enum vxge_hw_status | 4055 | static enum vxge_hw_status |
3369 | __vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id) | 4056 | __vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id) |
3370 | { | 4057 | { |
3371 | u64 val64; | 4058 | u64 val64; |
@@ -3383,7 +4070,7 @@ __vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id) | |||
3383 | * __vxge_hw_vpath_sw_reset | 4070 | * __vxge_hw_vpath_sw_reset |
3384 | * This routine resets the vpath structures | 4071 | * This routine resets the vpath structures |
3385 | */ | 4072 | */ |
3386 | enum vxge_hw_status | 4073 | static enum vxge_hw_status |
3387 | __vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id) | 4074 | __vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id) |
3388 | { | 4075 | { |
3389 | enum vxge_hw_status status = VXGE_HW_OK; | 4076 | enum vxge_hw_status status = VXGE_HW_OK; |
@@ -3408,7 +4095,7 @@ exit: | |||
3408 | * This routine configures the prc registers of virtual path using the config | 4095 | * This routine configures the prc registers of virtual path using the config |
3409 | * passed | 4096 | * passed |
3410 | */ | 4097 | */ |
3411 | void | 4098 | static void |
3412 | __vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id) | 4099 | __vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id) |
3413 | { | 4100 | { |
3414 | u64 val64; | 4101 | u64 val64; |
@@ -3480,7 +4167,7 @@ __vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id) | |||
3480 | * This routine configures the kdfc registers of virtual path using the | 4167 | * This routine configures the kdfc registers of virtual path using the |
3481 | * config passed | 4168 | * config passed |
3482 | */ | 4169 | */ |
3483 | enum vxge_hw_status | 4170 | static enum vxge_hw_status |
3484 | __vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id) | 4171 | __vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id) |
3485 | { | 4172 | { |
3486 | u64 val64; | 4173 | u64 val64; |
@@ -3553,7 +4240,7 @@ exit: | |||
3553 | * __vxge_hw_vpath_mac_configure | 4240 | * __vxge_hw_vpath_mac_configure |
3554 | * This routine configures the mac of virtual path using the config passed | 4241 | * This routine configures the mac of virtual path using the config passed |
3555 | */ | 4242 | */ |
3556 | enum vxge_hw_status | 4243 | static enum vxge_hw_status |
3557 | __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id) | 4244 | __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id) |
3558 | { | 4245 | { |
3559 | u64 val64; | 4246 | u64 val64; |
@@ -3621,7 +4308,7 @@ __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id) | |||
3621 | * This routine configures the tim registers of virtual path using the config | 4308 | * This routine configures the tim registers of virtual path using the config |
3622 | * passed | 4309 | * passed |
3623 | */ | 4310 | */ |
3624 | enum vxge_hw_status | 4311 | static enum vxge_hw_status |
3625 | __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) | 4312 | __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) |
3626 | { | 4313 | { |
3627 | u64 val64; | 4314 | u64 val64; |
@@ -3634,10 +4321,10 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) | |||
3634 | vp_reg = vpath->vp_reg; | 4321 | vp_reg = vpath->vp_reg; |
3635 | config = vpath->vp_config; | 4322 | config = vpath->vp_config; |
3636 | 4323 | ||
3637 | writeq((u64)0, &vp_reg->tim_dest_addr); | 4324 | writeq(0, &vp_reg->tim_dest_addr); |
3638 | writeq((u64)0, &vp_reg->tim_vpath_map); | 4325 | writeq(0, &vp_reg->tim_vpath_map); |
3639 | writeq((u64)0, &vp_reg->tim_bitmap); | 4326 | writeq(0, &vp_reg->tim_bitmap); |
3640 | writeq((u64)0, &vp_reg->tim_remap); | 4327 | writeq(0, &vp_reg->tim_remap); |
3641 | 4328 | ||
3642 | if (config->ring.enable == VXGE_HW_RING_ENABLE) | 4329 | if (config->ring.enable == VXGE_HW_RING_ENABLE) |
3643 | writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM( | 4330 | writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM( |
@@ -3694,6 +4381,8 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) | |||
3694 | } | 4381 | } |
3695 | 4382 | ||
3696 | writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); | 4383 | writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); |
4384 | vpath->tim_tti_cfg1_saved = val64; | ||
4385 | |||
3697 | val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]); | 4386 | val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]); |
3698 | 4387 | ||
3699 | if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) { | 4388 | if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) { |
@@ -3739,8 +4428,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) | |||
3739 | 4428 | ||
3740 | if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) { | 4429 | if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) { |
3741 | val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f); | 4430 | val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f); |
3742 | val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL( | 4431 | val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id); |
3743 | config->tti.util_sel); | ||
3744 | } | 4432 | } |
3745 | 4433 | ||
3746 | if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) { | 4434 | if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) { |
@@ -3751,6 +4439,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) | |||
3751 | } | 4439 | } |
3752 | 4440 | ||
3753 | writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]); | 4441 | writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]); |
4442 | vpath->tim_tti_cfg3_saved = val64; | ||
3754 | } | 4443 | } |
3755 | 4444 | ||
3756 | if (config->ring.enable == VXGE_HW_RING_ENABLE) { | 4445 | if (config->ring.enable == VXGE_HW_RING_ENABLE) { |
@@ -3799,6 +4488,8 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) | |||
3799 | } | 4488 | } |
3800 | 4489 | ||
3801 | writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]); | 4490 | writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]); |
4491 | vpath->tim_rti_cfg1_saved = val64; | ||
4492 | |||
3802 | val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]); | 4493 | val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]); |
3803 | 4494 | ||
3804 | if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) { | 4495 | if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) { |
@@ -3844,8 +4535,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) | |||
3844 | 4535 | ||
3845 | if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) { | 4536 | if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) { |
3846 | val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f); | 4537 | val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f); |
3847 | val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL( | 4538 | val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id); |
3848 | config->rti.util_sel); | ||
3849 | } | 4539 | } |
3850 | 4540 | ||
3851 | if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) { | 4541 | if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) { |
@@ -3856,6 +4546,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) | |||
3856 | } | 4546 | } |
3857 | 4547 | ||
3858 | writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]); | 4548 | writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]); |
4549 | vpath->tim_rti_cfg3_saved = val64; | ||
3859 | } | 4550 | } |
3860 | 4551 | ||
3861 | val64 = 0; | 4552 | val64 = 0; |
@@ -3866,38 +4557,20 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) | |||
3866 | writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]); | 4557 | writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]); |
3867 | writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]); | 4558 | writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]); |
3868 | 4559 | ||
4560 | val64 = VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_PRD(150); | ||
4561 | val64 |= VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_DIV(0); | ||
4562 | val64 |= VXGE_HW_TIM_WRKLD_CLC_CNT_RX_TX(3); | ||
4563 | writeq(val64, &vp_reg->tim_wrkld_clc); | ||
4564 | |||
3869 | return status; | 4565 | return status; |
3870 | } | 4566 | } |
3871 | 4567 | ||
3872 | void | ||
3873 | vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id) | ||
3874 | { | ||
3875 | struct __vxge_hw_virtualpath *vpath; | ||
3876 | struct vxge_hw_vpath_reg __iomem *vp_reg; | ||
3877 | struct vxge_hw_vp_config *config; | ||
3878 | u64 val64; | ||
3879 | |||
3880 | vpath = &hldev->virtual_paths[vp_id]; | ||
3881 | vp_reg = vpath->vp_reg; | ||
3882 | config = vpath->vp_config; | ||
3883 | |||
3884 | if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) { | ||
3885 | val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); | ||
3886 | |||
3887 | if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) { | ||
3888 | config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE; | ||
3889 | val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; | ||
3890 | writeq(val64, | ||
3891 | &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); | ||
3892 | } | ||
3893 | } | ||
3894 | } | ||
3895 | /* | 4568 | /* |
3896 | * __vxge_hw_vpath_initialize | 4569 | * __vxge_hw_vpath_initialize |
3897 | * This routine is the final phase of init which initializes the | 4570 | * This routine is the final phase of init which initializes the |
3898 | * registers of the vpath using the configuration passed. | 4571 | * registers of the vpath using the configuration passed. |
3899 | */ | 4572 | */ |
3900 | enum vxge_hw_status | 4573 | static enum vxge_hw_status |
3901 | __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id) | 4574 | __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id) |
3902 | { | 4575 | { |
3903 | u64 val64; | 4576 | u64 val64; |
@@ -3915,22 +4588,18 @@ __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id) | |||
3915 | vp_reg = vpath->vp_reg; | 4588 | vp_reg = vpath->vp_reg; |
3916 | 4589 | ||
3917 | status = __vxge_hw_vpath_swapper_set(vpath->vp_reg); | 4590 | status = __vxge_hw_vpath_swapper_set(vpath->vp_reg); |
3918 | |||
3919 | if (status != VXGE_HW_OK) | 4591 | if (status != VXGE_HW_OK) |
3920 | goto exit; | 4592 | goto exit; |
3921 | 4593 | ||
3922 | status = __vxge_hw_vpath_mac_configure(hldev, vp_id); | 4594 | status = __vxge_hw_vpath_mac_configure(hldev, vp_id); |
3923 | |||
3924 | if (status != VXGE_HW_OK) | 4595 | if (status != VXGE_HW_OK) |
3925 | goto exit; | 4596 | goto exit; |
3926 | 4597 | ||
3927 | status = __vxge_hw_vpath_kdfc_configure(hldev, vp_id); | 4598 | status = __vxge_hw_vpath_kdfc_configure(hldev, vp_id); |
3928 | |||
3929 | if (status != VXGE_HW_OK) | 4599 | if (status != VXGE_HW_OK) |
3930 | goto exit; | 4600 | goto exit; |
3931 | 4601 | ||
3932 | status = __vxge_hw_vpath_tim_configure(hldev, vp_id); | 4602 | status = __vxge_hw_vpath_tim_configure(hldev, vp_id); |
3933 | |||
3934 | if (status != VXGE_HW_OK) | 4603 | if (status != VXGE_HW_OK) |
3935 | goto exit; | 4604 | goto exit; |
3936 | 4605 | ||
@@ -3938,7 +4607,6 @@ __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id) | |||
3938 | 4607 | ||
3939 | /* Get MRRS value from device control */ | 4608 | /* Get MRRS value from device control */ |
3940 | status = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32); | 4609 | status = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32); |
3941 | |||
3942 | if (status == VXGE_HW_OK) { | 4610 | if (status == VXGE_HW_OK) { |
3943 | val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12; | 4611 | val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12; |
3944 | val64 &= | 4612 | val64 &= |
@@ -3962,11 +4630,53 @@ exit: | |||
3962 | } | 4630 | } |
3963 | 4631 | ||
3964 | /* | 4632 | /* |
4633 | * __vxge_hw_vp_terminate - Terminate Virtual Path structure | ||
4634 | * This routine closes all channels it opened and freeup memory | ||
4635 | */ | ||
4636 | static void __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id) | ||
4637 | { | ||
4638 | struct __vxge_hw_virtualpath *vpath; | ||
4639 | |||
4640 | vpath = &hldev->virtual_paths[vp_id]; | ||
4641 | |||
4642 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) | ||
4643 | goto exit; | ||
4644 | |||
4645 | VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0, | ||
4646 | vpath->hldev->tim_int_mask1, vpath->vp_id); | ||
4647 | hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL; | ||
4648 | |||
4649 | /* If the whole struct __vxge_hw_virtualpath is zeroed, nothing will | ||
4650 | * work after the interface is brought down. | ||
4651 | */ | ||
4652 | spin_lock(&vpath->lock); | ||
4653 | vpath->vp_open = VXGE_HW_VP_NOT_OPEN; | ||
4654 | spin_unlock(&vpath->lock); | ||
4655 | |||
4656 | vpath->vpmgmt_reg = NULL; | ||
4657 | vpath->nofl_db = NULL; | ||
4658 | vpath->max_mtu = 0; | ||
4659 | vpath->vsport_number = 0; | ||
4660 | vpath->max_kdfc_db = 0; | ||
4661 | vpath->max_nofl_db = 0; | ||
4662 | vpath->ringh = NULL; | ||
4663 | vpath->fifoh = NULL; | ||
4664 | memset(&vpath->vpath_handles, 0, sizeof(struct list_head)); | ||
4665 | vpath->stats_block = 0; | ||
4666 | vpath->hw_stats = NULL; | ||
4667 | vpath->hw_stats_sav = NULL; | ||
4668 | vpath->sw_stats = NULL; | ||
4669 | |||
4670 | exit: | ||
4671 | return; | ||
4672 | } | ||
4673 | |||
4674 | /* | ||
3965 | * __vxge_hw_vp_initialize - Initialize Virtual Path structure | 4675 | * __vxge_hw_vp_initialize - Initialize Virtual Path structure |
3966 | * This routine is the initial phase of init which resets the vpath and | 4676 | * This routine is the initial phase of init which resets the vpath and |
3967 | * initializes the software support structures. | 4677 | * initializes the software support structures. |
3968 | */ | 4678 | */ |
3969 | enum vxge_hw_status | 4679 | static enum vxge_hw_status |
3970 | __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id, | 4680 | __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id, |
3971 | struct vxge_hw_vp_config *config) | 4681 | struct vxge_hw_vp_config *config) |
3972 | { | 4682 | { |
@@ -3980,6 +4690,7 @@ __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id, | |||
3980 | 4690 | ||
3981 | vpath = &hldev->virtual_paths[vp_id]; | 4691 | vpath = &hldev->virtual_paths[vp_id]; |
3982 | 4692 | ||
4693 | spin_lock_init(&vpath->lock); | ||
3983 | vpath->vp_id = vp_id; | 4694 | vpath->vp_id = vp_id; |
3984 | vpath->vp_open = VXGE_HW_VP_OPEN; | 4695 | vpath->vp_open = VXGE_HW_VP_OPEN; |
3985 | vpath->hldev = hldev; | 4696 | vpath->hldev = hldev; |
@@ -3990,14 +4701,12 @@ __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id, | |||
3990 | __vxge_hw_vpath_reset(hldev, vp_id); | 4701 | __vxge_hw_vpath_reset(hldev, vp_id); |
3991 | 4702 | ||
3992 | status = __vxge_hw_vpath_reset_check(vpath); | 4703 | status = __vxge_hw_vpath_reset_check(vpath); |
3993 | |||
3994 | if (status != VXGE_HW_OK) { | 4704 | if (status != VXGE_HW_OK) { |
3995 | memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath)); | 4705 | memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath)); |
3996 | goto exit; | 4706 | goto exit; |
3997 | } | 4707 | } |
3998 | 4708 | ||
3999 | status = __vxge_hw_vpath_mgmt_read(hldev, vpath); | 4709 | status = __vxge_hw_vpath_mgmt_read(hldev, vpath); |
4000 | |||
4001 | if (status != VXGE_HW_OK) { | 4710 | if (status != VXGE_HW_OK) { |
4002 | memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath)); | 4711 | memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath)); |
4003 | goto exit; | 4712 | goto exit; |
@@ -4011,7 +4720,6 @@ __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id, | |||
4011 | hldev->tim_int_mask1, vp_id); | 4720 | hldev->tim_int_mask1, vp_id); |
4012 | 4721 | ||
4013 | status = __vxge_hw_vpath_initialize(hldev, vp_id); | 4722 | status = __vxge_hw_vpath_initialize(hldev, vp_id); |
4014 | |||
4015 | if (status != VXGE_HW_OK) | 4723 | if (status != VXGE_HW_OK) |
4016 | __vxge_hw_vp_terminate(hldev, vp_id); | 4724 | __vxge_hw_vp_terminate(hldev, vp_id); |
4017 | exit: | 4725 | exit: |
@@ -4019,29 +4727,6 @@ exit: | |||
4019 | } | 4727 | } |
4020 | 4728 | ||
4021 | /* | 4729 | /* |
4022 | * __vxge_hw_vp_terminate - Terminate Virtual Path structure | ||
4023 | * This routine closes all channels it opened and freeup memory | ||
4024 | */ | ||
4025 | void | ||
4026 | __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id) | ||
4027 | { | ||
4028 | struct __vxge_hw_virtualpath *vpath; | ||
4029 | |||
4030 | vpath = &hldev->virtual_paths[vp_id]; | ||
4031 | |||
4032 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) | ||
4033 | goto exit; | ||
4034 | |||
4035 | VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0, | ||
4036 | vpath->hldev->tim_int_mask1, vpath->vp_id); | ||
4037 | hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL; | ||
4038 | |||
4039 | memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath)); | ||
4040 | exit: | ||
4041 | return; | ||
4042 | } | ||
4043 | |||
4044 | /* | ||
4045 | * vxge_hw_vpath_mtu_set - Set MTU. | 4730 | * vxge_hw_vpath_mtu_set - Set MTU. |
4046 | * Set new MTU value. Example, to use jumbo frames: | 4731 | * Set new MTU value. Example, to use jumbo frames: |
4047 | * vxge_hw_vpath_mtu_set(my_device, 9600); | 4732 | * vxge_hw_vpath_mtu_set(my_device, 9600); |
@@ -4078,6 +4763,64 @@ exit: | |||
4078 | } | 4763 | } |
4079 | 4764 | ||
4080 | /* | 4765 | /* |
4766 | * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics. | ||
4767 | * Enable the DMA vpath statistics. The function is to be called to re-enable | ||
4768 | * the adapter to update stats into the host memory | ||
4769 | */ | ||
4770 | static enum vxge_hw_status | ||
4771 | vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp) | ||
4772 | { | ||
4773 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4774 | struct __vxge_hw_virtualpath *vpath; | ||
4775 | |||
4776 | vpath = vp->vpath; | ||
4777 | |||
4778 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
4779 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
4780 | goto exit; | ||
4781 | } | ||
4782 | |||
4783 | memcpy(vpath->hw_stats_sav, vpath->hw_stats, | ||
4784 | sizeof(struct vxge_hw_vpath_stats_hw_info)); | ||
4785 | |||
4786 | status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats); | ||
4787 | exit: | ||
4788 | return status; | ||
4789 | } | ||
4790 | |||
4791 | /* | ||
4792 | * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool | ||
4793 | * This function allocates a block from block pool or from the system | ||
4794 | */ | ||
4795 | static struct __vxge_hw_blockpool_entry * | ||
4796 | __vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size) | ||
4797 | { | ||
4798 | struct __vxge_hw_blockpool_entry *entry = NULL; | ||
4799 | struct __vxge_hw_blockpool *blockpool; | ||
4800 | |||
4801 | blockpool = &devh->block_pool; | ||
4802 | |||
4803 | if (size == blockpool->block_size) { | ||
4804 | |||
4805 | if (!list_empty(&blockpool->free_block_list)) | ||
4806 | entry = (struct __vxge_hw_blockpool_entry *) | ||
4807 | list_first_entry(&blockpool->free_block_list, | ||
4808 | struct __vxge_hw_blockpool_entry, | ||
4809 | item); | ||
4810 | |||
4811 | if (entry != NULL) { | ||
4812 | list_del(&entry->item); | ||
4813 | blockpool->pool_size--; | ||
4814 | } | ||
4815 | } | ||
4816 | |||
4817 | if (entry != NULL) | ||
4818 | __vxge_hw_blockpool_blocks_add(blockpool); | ||
4819 | |||
4820 | return entry; | ||
4821 | } | ||
4822 | |||
4823 | /* | ||
4081 | * vxge_hw_vpath_open - Open a virtual path on a given adapter | 4824 | * vxge_hw_vpath_open - Open a virtual path on a given adapter |
4082 | * This function is used to open access to virtual path of an | 4825 | * This function is used to open access to virtual path of an |
4083 | * adapter for offload, GRO operations. This function returns | 4826 | * adapter for offload, GRO operations. This function returns |
@@ -4101,19 +4844,15 @@ vxge_hw_vpath_open(struct __vxge_hw_device *hldev, | |||
4101 | 4844 | ||
4102 | status = __vxge_hw_vp_initialize(hldev, attr->vp_id, | 4845 | status = __vxge_hw_vp_initialize(hldev, attr->vp_id, |
4103 | &hldev->config.vp_config[attr->vp_id]); | 4846 | &hldev->config.vp_config[attr->vp_id]); |
4104 | |||
4105 | if (status != VXGE_HW_OK) | 4847 | if (status != VXGE_HW_OK) |
4106 | goto vpath_open_exit1; | 4848 | goto vpath_open_exit1; |
4107 | 4849 | ||
4108 | vp = (struct __vxge_hw_vpath_handle *) | 4850 | vp = vzalloc(sizeof(struct __vxge_hw_vpath_handle)); |
4109 | vmalloc(sizeof(struct __vxge_hw_vpath_handle)); | ||
4110 | if (vp == NULL) { | 4851 | if (vp == NULL) { |
4111 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | 4852 | status = VXGE_HW_ERR_OUT_OF_MEMORY; |
4112 | goto vpath_open_exit2; | 4853 | goto vpath_open_exit2; |
4113 | } | 4854 | } |
4114 | 4855 | ||
4115 | memset(vp, 0, sizeof(struct __vxge_hw_vpath_handle)); | ||
4116 | |||
4117 | vp->vpath = vpath; | 4856 | vp->vpath = vpath; |
4118 | 4857 | ||
4119 | if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) { | 4858 | if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) { |
@@ -4136,7 +4875,6 @@ vxge_hw_vpath_open(struct __vxge_hw_device *hldev, | |||
4136 | 4875 | ||
4137 | vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev, | 4876 | vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev, |
4138 | VXGE_HW_BLOCK_SIZE); | 4877 | VXGE_HW_BLOCK_SIZE); |
4139 | |||
4140 | if (vpath->stats_block == NULL) { | 4878 | if (vpath->stats_block == NULL) { |
4141 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | 4879 | status = VXGE_HW_ERR_OUT_OF_MEMORY; |
4142 | goto vpath_open_exit8; | 4880 | goto vpath_open_exit8; |
@@ -4195,19 +4933,20 @@ vpath_open_exit1: | |||
4195 | * This function is used to close access to virtual path opened | 4933 | * This function is used to close access to virtual path opened |
4196 | * earlier. | 4934 | * earlier. |
4197 | */ | 4935 | */ |
4198 | void | 4936 | void vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp) |
4199 | vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp) | ||
4200 | { | 4937 | { |
4201 | struct __vxge_hw_virtualpath *vpath = NULL; | 4938 | struct __vxge_hw_virtualpath *vpath = vp->vpath; |
4939 | struct __vxge_hw_ring *ring = vpath->ringh; | ||
4940 | struct vxgedev *vdev = netdev_priv(vpath->hldev->ndev); | ||
4202 | u64 new_count, val64, val164; | 4941 | u64 new_count, val64, val164; |
4203 | struct __vxge_hw_ring *ring; | ||
4204 | 4942 | ||
4205 | vpath = vp->vpath; | 4943 | if (vdev->titan1) { |
4206 | ring = vpath->ringh; | 4944 | new_count = readq(&vpath->vp_reg->rxdmem_size); |
4945 | new_count &= 0x1fff; | ||
4946 | } else | ||
4947 | new_count = ring->config->ring_blocks * VXGE_HW_BLOCK_SIZE / 8; | ||
4207 | 4948 | ||
4208 | new_count = readq(&vpath->vp_reg->rxdmem_size); | 4949 | val164 = VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count); |
4209 | new_count &= 0x1fff; | ||
4210 | val164 = (VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count)); | ||
4211 | 4950 | ||
4212 | writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164), | 4951 | writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164), |
4213 | &vpath->vp_reg->prc_rxd_doorbell); | 4952 | &vpath->vp_reg->prc_rxd_doorbell); |
@@ -4230,6 +4969,29 @@ vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp) | |||
4230 | } | 4969 | } |
4231 | 4970 | ||
4232 | /* | 4971 | /* |
4972 | * __vxge_hw_blockpool_block_free - Frees a block from block pool | ||
4973 | * @devh: Hal device | ||
4974 | * @entry: Entry of block to be freed | ||
4975 | * | ||
4976 | * This function frees a block from block pool | ||
4977 | */ | ||
4978 | static void | ||
4979 | __vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh, | ||
4980 | struct __vxge_hw_blockpool_entry *entry) | ||
4981 | { | ||
4982 | struct __vxge_hw_blockpool *blockpool; | ||
4983 | |||
4984 | blockpool = &devh->block_pool; | ||
4985 | |||
4986 | if (entry->length == blockpool->block_size) { | ||
4987 | list_add(&entry->item, &blockpool->free_block_list); | ||
4988 | blockpool->pool_size++; | ||
4989 | } | ||
4990 | |||
4991 | __vxge_hw_blockpool_blocks_remove(blockpool); | ||
4992 | } | ||
4993 | |||
4994 | /* | ||
4233 | * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open | 4995 | * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open |
4234 | * This function is used to close access to virtual path opened | 4996 | * This function is used to close access to virtual path opened |
4235 | * earlier. | 4997 | * earlier. |
@@ -4277,8 +5039,6 @@ enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp) | |||
4277 | 5039 | ||
4278 | __vxge_hw_vp_terminate(devh, vp_id); | 5040 | __vxge_hw_vp_terminate(devh, vp_id); |
4279 | 5041 | ||
4280 | vpath->vp_open = VXGE_HW_VP_NOT_OPEN; | ||
4281 | |||
4282 | vpath_close_exit: | 5042 | vpath_close_exit: |
4283 | return status; | 5043 | return status; |
4284 | } | 5044 | } |
@@ -4378,705 +5138,3 @@ vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp) | |||
4378 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), | 5138 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), |
4379 | &hldev->common_reg->cmn_rsthdlr_cfg1); | 5139 | &hldev->common_reg->cmn_rsthdlr_cfg1); |
4380 | } | 5140 | } |
4381 | |||
4382 | /* | ||
4383 | * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics. | ||
4384 | * Enable the DMA vpath statistics. The function is to be called to re-enable | ||
4385 | * the adapter to update stats into the host memory | ||
4386 | */ | ||
4387 | enum vxge_hw_status | ||
4388 | vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp) | ||
4389 | { | ||
4390 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4391 | struct __vxge_hw_virtualpath *vpath; | ||
4392 | |||
4393 | vpath = vp->vpath; | ||
4394 | |||
4395 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
4396 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
4397 | goto exit; | ||
4398 | } | ||
4399 | |||
4400 | memcpy(vpath->hw_stats_sav, vpath->hw_stats, | ||
4401 | sizeof(struct vxge_hw_vpath_stats_hw_info)); | ||
4402 | |||
4403 | status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats); | ||
4404 | exit: | ||
4405 | return status; | ||
4406 | } | ||
4407 | |||
4408 | /* | ||
4409 | * __vxge_hw_vpath_stats_access - Get the statistics from the given location | ||
4410 | * and offset and perform an operation | ||
4411 | */ | ||
4412 | enum vxge_hw_status | ||
4413 | __vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath, | ||
4414 | u32 operation, u32 offset, u64 *stat) | ||
4415 | { | ||
4416 | u64 val64; | ||
4417 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4418 | struct vxge_hw_vpath_reg __iomem *vp_reg; | ||
4419 | |||
4420 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
4421 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
4422 | goto vpath_stats_access_exit; | ||
4423 | } | ||
4424 | |||
4425 | vp_reg = vpath->vp_reg; | ||
4426 | |||
4427 | val64 = VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) | | ||
4428 | VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE | | ||
4429 | VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset); | ||
4430 | |||
4431 | status = __vxge_hw_pio_mem_write64(val64, | ||
4432 | &vp_reg->xmac_stats_access_cmd, | ||
4433 | VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE, | ||
4434 | vpath->hldev->config.device_poll_millis); | ||
4435 | |||
4436 | if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ)) | ||
4437 | *stat = readq(&vp_reg->xmac_stats_access_data); | ||
4438 | else | ||
4439 | *stat = 0; | ||
4440 | |||
4441 | vpath_stats_access_exit: | ||
4442 | return status; | ||
4443 | } | ||
4444 | |||
4445 | /* | ||
4446 | * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath | ||
4447 | */ | ||
4448 | enum vxge_hw_status | ||
4449 | __vxge_hw_vpath_xmac_tx_stats_get( | ||
4450 | struct __vxge_hw_virtualpath *vpath, | ||
4451 | struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats) | ||
4452 | { | ||
4453 | u64 *val64; | ||
4454 | int i; | ||
4455 | u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET; | ||
4456 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4457 | |||
4458 | val64 = (u64 *) vpath_tx_stats; | ||
4459 | |||
4460 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
4461 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
4462 | goto exit; | ||
4463 | } | ||
4464 | |||
4465 | for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) { | ||
4466 | status = __vxge_hw_vpath_stats_access(vpath, | ||
4467 | VXGE_HW_STATS_OP_READ, | ||
4468 | offset, val64); | ||
4469 | if (status != VXGE_HW_OK) | ||
4470 | goto exit; | ||
4471 | offset++; | ||
4472 | val64++; | ||
4473 | } | ||
4474 | exit: | ||
4475 | return status; | ||
4476 | } | ||
4477 | |||
4478 | /* | ||
4479 | * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath | ||
4480 | */ | ||
4481 | enum vxge_hw_status | ||
4482 | __vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath, | ||
4483 | struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats) | ||
4484 | { | ||
4485 | u64 *val64; | ||
4486 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4487 | int i; | ||
4488 | u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET; | ||
4489 | val64 = (u64 *) vpath_rx_stats; | ||
4490 | |||
4491 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
4492 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
4493 | goto exit; | ||
4494 | } | ||
4495 | for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) { | ||
4496 | status = __vxge_hw_vpath_stats_access(vpath, | ||
4497 | VXGE_HW_STATS_OP_READ, | ||
4498 | offset >> 3, val64); | ||
4499 | if (status != VXGE_HW_OK) | ||
4500 | goto exit; | ||
4501 | |||
4502 | offset += 8; | ||
4503 | val64++; | ||
4504 | } | ||
4505 | exit: | ||
4506 | return status; | ||
4507 | } | ||
4508 | |||
4509 | /* | ||
4510 | * __vxge_hw_vpath_stats_get - Get the vpath hw statistics. | ||
4511 | */ | ||
4512 | enum vxge_hw_status __vxge_hw_vpath_stats_get( | ||
4513 | struct __vxge_hw_virtualpath *vpath, | ||
4514 | struct vxge_hw_vpath_stats_hw_info *hw_stats) | ||
4515 | { | ||
4516 | u64 val64; | ||
4517 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4518 | struct vxge_hw_vpath_reg __iomem *vp_reg; | ||
4519 | |||
4520 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | ||
4521 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | ||
4522 | goto exit; | ||
4523 | } | ||
4524 | vp_reg = vpath->vp_reg; | ||
4525 | |||
4526 | val64 = readq(&vp_reg->vpath_debug_stats0); | ||
4527 | hw_stats->ini_num_mwr_sent = | ||
4528 | (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64); | ||
4529 | |||
4530 | val64 = readq(&vp_reg->vpath_debug_stats1); | ||
4531 | hw_stats->ini_num_mrd_sent = | ||
4532 | (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64); | ||
4533 | |||
4534 | val64 = readq(&vp_reg->vpath_debug_stats2); | ||
4535 | hw_stats->ini_num_cpl_rcvd = | ||
4536 | (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64); | ||
4537 | |||
4538 | val64 = readq(&vp_reg->vpath_debug_stats3); | ||
4539 | hw_stats->ini_num_mwr_byte_sent = | ||
4540 | VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64); | ||
4541 | |||
4542 | val64 = readq(&vp_reg->vpath_debug_stats4); | ||
4543 | hw_stats->ini_num_cpl_byte_rcvd = | ||
4544 | VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64); | ||
4545 | |||
4546 | val64 = readq(&vp_reg->vpath_debug_stats5); | ||
4547 | hw_stats->wrcrdtarb_xoff = | ||
4548 | (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64); | ||
4549 | |||
4550 | val64 = readq(&vp_reg->vpath_debug_stats6); | ||
4551 | hw_stats->rdcrdtarb_xoff = | ||
4552 | (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64); | ||
4553 | |||
4554 | val64 = readq(&vp_reg->vpath_genstats_count01); | ||
4555 | hw_stats->vpath_genstats_count0 = | ||
4556 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0( | ||
4557 | val64); | ||
4558 | |||
4559 | val64 = readq(&vp_reg->vpath_genstats_count01); | ||
4560 | hw_stats->vpath_genstats_count1 = | ||
4561 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1( | ||
4562 | val64); | ||
4563 | |||
4564 | val64 = readq(&vp_reg->vpath_genstats_count23); | ||
4565 | hw_stats->vpath_genstats_count2 = | ||
4566 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2( | ||
4567 | val64); | ||
4568 | |||
4569 | val64 = readq(&vp_reg->vpath_genstats_count01); | ||
4570 | hw_stats->vpath_genstats_count3 = | ||
4571 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3( | ||
4572 | val64); | ||
4573 | |||
4574 | val64 = readq(&vp_reg->vpath_genstats_count4); | ||
4575 | hw_stats->vpath_genstats_count4 = | ||
4576 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4( | ||
4577 | val64); | ||
4578 | |||
4579 | val64 = readq(&vp_reg->vpath_genstats_count5); | ||
4580 | hw_stats->vpath_genstats_count5 = | ||
4581 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5( | ||
4582 | val64); | ||
4583 | |||
4584 | status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats); | ||
4585 | if (status != VXGE_HW_OK) | ||
4586 | goto exit; | ||
4587 | |||
4588 | status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats); | ||
4589 | if (status != VXGE_HW_OK) | ||
4590 | goto exit; | ||
4591 | |||
4592 | VXGE_HW_VPATH_STATS_PIO_READ( | ||
4593 | VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET); | ||
4594 | |||
4595 | hw_stats->prog_event_vnum0 = | ||
4596 | (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64); | ||
4597 | |||
4598 | hw_stats->prog_event_vnum1 = | ||
4599 | (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64); | ||
4600 | |||
4601 | VXGE_HW_VPATH_STATS_PIO_READ( | ||
4602 | VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET); | ||
4603 | |||
4604 | hw_stats->prog_event_vnum2 = | ||
4605 | (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64); | ||
4606 | |||
4607 | hw_stats->prog_event_vnum3 = | ||
4608 | (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64); | ||
4609 | |||
4610 | val64 = readq(&vp_reg->rx_multi_cast_stats); | ||
4611 | hw_stats->rx_multi_cast_frame_discard = | ||
4612 | (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64); | ||
4613 | |||
4614 | val64 = readq(&vp_reg->rx_frm_transferred); | ||
4615 | hw_stats->rx_frm_transferred = | ||
4616 | (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64); | ||
4617 | |||
4618 | val64 = readq(&vp_reg->rxd_returned); | ||
4619 | hw_stats->rxd_returned = | ||
4620 | (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64); | ||
4621 | |||
4622 | val64 = readq(&vp_reg->dbg_stats_rx_mpa); | ||
4623 | hw_stats->rx_mpa_len_fail_frms = | ||
4624 | (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64); | ||
4625 | hw_stats->rx_mpa_mrk_fail_frms = | ||
4626 | (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64); | ||
4627 | hw_stats->rx_mpa_crc_fail_frms = | ||
4628 | (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64); | ||
4629 | |||
4630 | val64 = readq(&vp_reg->dbg_stats_rx_fau); | ||
4631 | hw_stats->rx_permitted_frms = | ||
4632 | (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64); | ||
4633 | hw_stats->rx_vp_reset_discarded_frms = | ||
4634 | (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64); | ||
4635 | hw_stats->rx_wol_frms = | ||
4636 | (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64); | ||
4637 | |||
4638 | val64 = readq(&vp_reg->tx_vp_reset_discarded_frms); | ||
4639 | hw_stats->tx_vp_reset_discarded_frms = | ||
4640 | (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS( | ||
4641 | val64); | ||
4642 | exit: | ||
4643 | return status; | ||
4644 | } | ||
4645 | |||
4646 | /* | ||
4647 | * __vxge_hw_blockpool_create - Create block pool | ||
4648 | */ | ||
4649 | |||
4650 | enum vxge_hw_status | ||
4651 | __vxge_hw_blockpool_create(struct __vxge_hw_device *hldev, | ||
4652 | struct __vxge_hw_blockpool *blockpool, | ||
4653 | u32 pool_size, | ||
4654 | u32 pool_max) | ||
4655 | { | ||
4656 | u32 i; | ||
4657 | struct __vxge_hw_blockpool_entry *entry = NULL; | ||
4658 | void *memblock; | ||
4659 | dma_addr_t dma_addr; | ||
4660 | struct pci_dev *dma_handle; | ||
4661 | struct pci_dev *acc_handle; | ||
4662 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4663 | |||
4664 | if (blockpool == NULL) { | ||
4665 | status = VXGE_HW_FAIL; | ||
4666 | goto blockpool_create_exit; | ||
4667 | } | ||
4668 | |||
4669 | blockpool->hldev = hldev; | ||
4670 | blockpool->block_size = VXGE_HW_BLOCK_SIZE; | ||
4671 | blockpool->pool_size = 0; | ||
4672 | blockpool->pool_max = pool_max; | ||
4673 | blockpool->req_out = 0; | ||
4674 | |||
4675 | INIT_LIST_HEAD(&blockpool->free_block_list); | ||
4676 | INIT_LIST_HEAD(&blockpool->free_entry_list); | ||
4677 | |||
4678 | for (i = 0; i < pool_size + pool_max; i++) { | ||
4679 | entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry), | ||
4680 | GFP_KERNEL); | ||
4681 | if (entry == NULL) { | ||
4682 | __vxge_hw_blockpool_destroy(blockpool); | ||
4683 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
4684 | goto blockpool_create_exit; | ||
4685 | } | ||
4686 | list_add(&entry->item, &blockpool->free_entry_list); | ||
4687 | } | ||
4688 | |||
4689 | for (i = 0; i < pool_size; i++) { | ||
4690 | |||
4691 | memblock = vxge_os_dma_malloc( | ||
4692 | hldev->pdev, | ||
4693 | VXGE_HW_BLOCK_SIZE, | ||
4694 | &dma_handle, | ||
4695 | &acc_handle); | ||
4696 | |||
4697 | if (memblock == NULL) { | ||
4698 | __vxge_hw_blockpool_destroy(blockpool); | ||
4699 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
4700 | goto blockpool_create_exit; | ||
4701 | } | ||
4702 | |||
4703 | dma_addr = pci_map_single(hldev->pdev, memblock, | ||
4704 | VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
4705 | |||
4706 | if (unlikely(pci_dma_mapping_error(hldev->pdev, | ||
4707 | dma_addr))) { | ||
4708 | |||
4709 | vxge_os_dma_free(hldev->pdev, memblock, &acc_handle); | ||
4710 | __vxge_hw_blockpool_destroy(blockpool); | ||
4711 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
4712 | goto blockpool_create_exit; | ||
4713 | } | ||
4714 | |||
4715 | if (!list_empty(&blockpool->free_entry_list)) | ||
4716 | entry = (struct __vxge_hw_blockpool_entry *) | ||
4717 | list_first_entry(&blockpool->free_entry_list, | ||
4718 | struct __vxge_hw_blockpool_entry, | ||
4719 | item); | ||
4720 | |||
4721 | if (entry == NULL) | ||
4722 | entry = | ||
4723 | kzalloc(sizeof(struct __vxge_hw_blockpool_entry), | ||
4724 | GFP_KERNEL); | ||
4725 | if (entry != NULL) { | ||
4726 | list_del(&entry->item); | ||
4727 | entry->length = VXGE_HW_BLOCK_SIZE; | ||
4728 | entry->memblock = memblock; | ||
4729 | entry->dma_addr = dma_addr; | ||
4730 | entry->acc_handle = acc_handle; | ||
4731 | entry->dma_handle = dma_handle; | ||
4732 | list_add(&entry->item, | ||
4733 | &blockpool->free_block_list); | ||
4734 | blockpool->pool_size++; | ||
4735 | } else { | ||
4736 | __vxge_hw_blockpool_destroy(blockpool); | ||
4737 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
4738 | goto blockpool_create_exit; | ||
4739 | } | ||
4740 | } | ||
4741 | |||
4742 | blockpool_create_exit: | ||
4743 | return status; | ||
4744 | } | ||
4745 | |||
4746 | /* | ||
4747 | * __vxge_hw_blockpool_destroy - Deallocates the block pool | ||
4748 | */ | ||
4749 | |||
4750 | void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool) | ||
4751 | { | ||
4752 | |||
4753 | struct __vxge_hw_device *hldev; | ||
4754 | struct list_head *p, *n; | ||
4755 | u16 ret; | ||
4756 | |||
4757 | if (blockpool == NULL) { | ||
4758 | ret = 1; | ||
4759 | goto exit; | ||
4760 | } | ||
4761 | |||
4762 | hldev = blockpool->hldev; | ||
4763 | |||
4764 | list_for_each_safe(p, n, &blockpool->free_block_list) { | ||
4765 | |||
4766 | pci_unmap_single(hldev->pdev, | ||
4767 | ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, | ||
4768 | ((struct __vxge_hw_blockpool_entry *)p)->length, | ||
4769 | PCI_DMA_BIDIRECTIONAL); | ||
4770 | |||
4771 | vxge_os_dma_free(hldev->pdev, | ||
4772 | ((struct __vxge_hw_blockpool_entry *)p)->memblock, | ||
4773 | &((struct __vxge_hw_blockpool_entry *) p)->acc_handle); | ||
4774 | |||
4775 | list_del( | ||
4776 | &((struct __vxge_hw_blockpool_entry *)p)->item); | ||
4777 | kfree(p); | ||
4778 | blockpool->pool_size--; | ||
4779 | } | ||
4780 | |||
4781 | list_for_each_safe(p, n, &blockpool->free_entry_list) { | ||
4782 | list_del( | ||
4783 | &((struct __vxge_hw_blockpool_entry *)p)->item); | ||
4784 | kfree((void *)p); | ||
4785 | } | ||
4786 | ret = 0; | ||
4787 | exit: | ||
4788 | return; | ||
4789 | } | ||
4790 | |||
4791 | /* | ||
4792 | * __vxge_hw_blockpool_blocks_add - Request additional blocks | ||
4793 | */ | ||
4794 | static | ||
4795 | void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool) | ||
4796 | { | ||
4797 | u32 nreq = 0, i; | ||
4798 | |||
4799 | if ((blockpool->pool_size + blockpool->req_out) < | ||
4800 | VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) { | ||
4801 | nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE; | ||
4802 | blockpool->req_out += nreq; | ||
4803 | } | ||
4804 | |||
4805 | for (i = 0; i < nreq; i++) | ||
4806 | vxge_os_dma_malloc_async( | ||
4807 | ((struct __vxge_hw_device *)blockpool->hldev)->pdev, | ||
4808 | blockpool->hldev, VXGE_HW_BLOCK_SIZE); | ||
4809 | } | ||
4810 | |||
4811 | /* | ||
4812 | * __vxge_hw_blockpool_blocks_remove - Free additional blocks | ||
4813 | */ | ||
4814 | static | ||
4815 | void __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool) | ||
4816 | { | ||
4817 | struct list_head *p, *n; | ||
4818 | |||
4819 | list_for_each_safe(p, n, &blockpool->free_block_list) { | ||
4820 | |||
4821 | if (blockpool->pool_size < blockpool->pool_max) | ||
4822 | break; | ||
4823 | |||
4824 | pci_unmap_single( | ||
4825 | ((struct __vxge_hw_device *)blockpool->hldev)->pdev, | ||
4826 | ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, | ||
4827 | ((struct __vxge_hw_blockpool_entry *)p)->length, | ||
4828 | PCI_DMA_BIDIRECTIONAL); | ||
4829 | |||
4830 | vxge_os_dma_free( | ||
4831 | ((struct __vxge_hw_device *)blockpool->hldev)->pdev, | ||
4832 | ((struct __vxge_hw_blockpool_entry *)p)->memblock, | ||
4833 | &((struct __vxge_hw_blockpool_entry *)p)->acc_handle); | ||
4834 | |||
4835 | list_del(&((struct __vxge_hw_blockpool_entry *)p)->item); | ||
4836 | |||
4837 | list_add(p, &blockpool->free_entry_list); | ||
4838 | |||
4839 | blockpool->pool_size--; | ||
4840 | |||
4841 | } | ||
4842 | } | ||
4843 | |||
4844 | /* | ||
4845 | * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async | ||
4846 | * Adds a block to block pool | ||
4847 | */ | ||
4848 | void vxge_hw_blockpool_block_add( | ||
4849 | struct __vxge_hw_device *devh, | ||
4850 | void *block_addr, | ||
4851 | u32 length, | ||
4852 | struct pci_dev *dma_h, | ||
4853 | struct pci_dev *acc_handle) | ||
4854 | { | ||
4855 | struct __vxge_hw_blockpool *blockpool; | ||
4856 | struct __vxge_hw_blockpool_entry *entry = NULL; | ||
4857 | dma_addr_t dma_addr; | ||
4858 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4859 | u32 req_out; | ||
4860 | |||
4861 | blockpool = &devh->block_pool; | ||
4862 | |||
4863 | if (block_addr == NULL) { | ||
4864 | blockpool->req_out--; | ||
4865 | status = VXGE_HW_FAIL; | ||
4866 | goto exit; | ||
4867 | } | ||
4868 | |||
4869 | dma_addr = pci_map_single(devh->pdev, block_addr, length, | ||
4870 | PCI_DMA_BIDIRECTIONAL); | ||
4871 | |||
4872 | if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) { | ||
4873 | |||
4874 | vxge_os_dma_free(devh->pdev, block_addr, &acc_handle); | ||
4875 | blockpool->req_out--; | ||
4876 | status = VXGE_HW_FAIL; | ||
4877 | goto exit; | ||
4878 | } | ||
4879 | |||
4880 | |||
4881 | if (!list_empty(&blockpool->free_entry_list)) | ||
4882 | entry = (struct __vxge_hw_blockpool_entry *) | ||
4883 | list_first_entry(&blockpool->free_entry_list, | ||
4884 | struct __vxge_hw_blockpool_entry, | ||
4885 | item); | ||
4886 | |||
4887 | if (entry == NULL) | ||
4888 | entry = (struct __vxge_hw_blockpool_entry *) | ||
4889 | vmalloc(sizeof(struct __vxge_hw_blockpool_entry)); | ||
4890 | else | ||
4891 | list_del(&entry->item); | ||
4892 | |||
4893 | if (entry != NULL) { | ||
4894 | entry->length = length; | ||
4895 | entry->memblock = block_addr; | ||
4896 | entry->dma_addr = dma_addr; | ||
4897 | entry->acc_handle = acc_handle; | ||
4898 | entry->dma_handle = dma_h; | ||
4899 | list_add(&entry->item, &blockpool->free_block_list); | ||
4900 | blockpool->pool_size++; | ||
4901 | status = VXGE_HW_OK; | ||
4902 | } else | ||
4903 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
4904 | |||
4905 | blockpool->req_out--; | ||
4906 | |||
4907 | req_out = blockpool->req_out; | ||
4908 | exit: | ||
4909 | return; | ||
4910 | } | ||
4911 | |||
4912 | /* | ||
4913 | * __vxge_hw_blockpool_malloc - Allocate a memory block from pool | ||
4914 | * Allocates a block of memory of given size, either from block pool | ||
4915 | * or by calling vxge_os_dma_malloc() | ||
4916 | */ | ||
4917 | void * | ||
4918 | __vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size, | ||
4919 | struct vxge_hw_mempool_dma *dma_object) | ||
4920 | { | ||
4921 | struct __vxge_hw_blockpool_entry *entry = NULL; | ||
4922 | struct __vxge_hw_blockpool *blockpool; | ||
4923 | void *memblock = NULL; | ||
4924 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4925 | |||
4926 | blockpool = &devh->block_pool; | ||
4927 | |||
4928 | if (size != blockpool->block_size) { | ||
4929 | |||
4930 | memblock = vxge_os_dma_malloc(devh->pdev, size, | ||
4931 | &dma_object->handle, | ||
4932 | &dma_object->acc_handle); | ||
4933 | |||
4934 | if (memblock == NULL) { | ||
4935 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
4936 | goto exit; | ||
4937 | } | ||
4938 | |||
4939 | dma_object->addr = pci_map_single(devh->pdev, memblock, size, | ||
4940 | PCI_DMA_BIDIRECTIONAL); | ||
4941 | |||
4942 | if (unlikely(pci_dma_mapping_error(devh->pdev, | ||
4943 | dma_object->addr))) { | ||
4944 | vxge_os_dma_free(devh->pdev, memblock, | ||
4945 | &dma_object->acc_handle); | ||
4946 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
4947 | goto exit; | ||
4948 | } | ||
4949 | |||
4950 | } else { | ||
4951 | |||
4952 | if (!list_empty(&blockpool->free_block_list)) | ||
4953 | entry = (struct __vxge_hw_blockpool_entry *) | ||
4954 | list_first_entry(&blockpool->free_block_list, | ||
4955 | struct __vxge_hw_blockpool_entry, | ||
4956 | item); | ||
4957 | |||
4958 | if (entry != NULL) { | ||
4959 | list_del(&entry->item); | ||
4960 | dma_object->addr = entry->dma_addr; | ||
4961 | dma_object->handle = entry->dma_handle; | ||
4962 | dma_object->acc_handle = entry->acc_handle; | ||
4963 | memblock = entry->memblock; | ||
4964 | |||
4965 | list_add(&entry->item, | ||
4966 | &blockpool->free_entry_list); | ||
4967 | blockpool->pool_size--; | ||
4968 | } | ||
4969 | |||
4970 | if (memblock != NULL) | ||
4971 | __vxge_hw_blockpool_blocks_add(blockpool); | ||
4972 | } | ||
4973 | exit: | ||
4974 | return memblock; | ||
4975 | } | ||
4976 | |||
4977 | /* | ||
4978 | * __vxge_hw_blockpool_free - Frees the memory allcoated with | ||
4979 | __vxge_hw_blockpool_malloc | ||
4980 | */ | ||
4981 | void | ||
4982 | __vxge_hw_blockpool_free(struct __vxge_hw_device *devh, | ||
4983 | void *memblock, u32 size, | ||
4984 | struct vxge_hw_mempool_dma *dma_object) | ||
4985 | { | ||
4986 | struct __vxge_hw_blockpool_entry *entry = NULL; | ||
4987 | struct __vxge_hw_blockpool *blockpool; | ||
4988 | enum vxge_hw_status status = VXGE_HW_OK; | ||
4989 | |||
4990 | blockpool = &devh->block_pool; | ||
4991 | |||
4992 | if (size != blockpool->block_size) { | ||
4993 | pci_unmap_single(devh->pdev, dma_object->addr, size, | ||
4994 | PCI_DMA_BIDIRECTIONAL); | ||
4995 | vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle); | ||
4996 | } else { | ||
4997 | |||
4998 | if (!list_empty(&blockpool->free_entry_list)) | ||
4999 | entry = (struct __vxge_hw_blockpool_entry *) | ||
5000 | list_first_entry(&blockpool->free_entry_list, | ||
5001 | struct __vxge_hw_blockpool_entry, | ||
5002 | item); | ||
5003 | |||
5004 | if (entry == NULL) | ||
5005 | entry = (struct __vxge_hw_blockpool_entry *) | ||
5006 | vmalloc(sizeof( | ||
5007 | struct __vxge_hw_blockpool_entry)); | ||
5008 | else | ||
5009 | list_del(&entry->item); | ||
5010 | |||
5011 | if (entry != NULL) { | ||
5012 | entry->length = size; | ||
5013 | entry->memblock = memblock; | ||
5014 | entry->dma_addr = dma_object->addr; | ||
5015 | entry->acc_handle = dma_object->acc_handle; | ||
5016 | entry->dma_handle = dma_object->handle; | ||
5017 | list_add(&entry->item, | ||
5018 | &blockpool->free_block_list); | ||
5019 | blockpool->pool_size++; | ||
5020 | status = VXGE_HW_OK; | ||
5021 | } else | ||
5022 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | ||
5023 | |||
5024 | if (status == VXGE_HW_OK) | ||
5025 | __vxge_hw_blockpool_blocks_remove(blockpool); | ||
5026 | } | ||
5027 | } | ||
5028 | |||
5029 | /* | ||
5030 | * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool | ||
5031 | * This function allocates a block from block pool or from the system | ||
5032 | */ | ||
5033 | struct __vxge_hw_blockpool_entry * | ||
5034 | __vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size) | ||
5035 | { | ||
5036 | struct __vxge_hw_blockpool_entry *entry = NULL; | ||
5037 | struct __vxge_hw_blockpool *blockpool; | ||
5038 | |||
5039 | blockpool = &devh->block_pool; | ||
5040 | |||
5041 | if (size == blockpool->block_size) { | ||
5042 | |||
5043 | if (!list_empty(&blockpool->free_block_list)) | ||
5044 | entry = (struct __vxge_hw_blockpool_entry *) | ||
5045 | list_first_entry(&blockpool->free_block_list, | ||
5046 | struct __vxge_hw_blockpool_entry, | ||
5047 | item); | ||
5048 | |||
5049 | if (entry != NULL) { | ||
5050 | list_del(&entry->item); | ||
5051 | blockpool->pool_size--; | ||
5052 | } | ||
5053 | } | ||
5054 | |||
5055 | if (entry != NULL) | ||
5056 | __vxge_hw_blockpool_blocks_add(blockpool); | ||
5057 | |||
5058 | return entry; | ||
5059 | } | ||
5060 | |||
5061 | /* | ||
5062 | * __vxge_hw_blockpool_block_free - Frees a block from block pool | ||
5063 | * @devh: Hal device | ||
5064 | * @entry: Entry of block to be freed | ||
5065 | * | ||
5066 | * This function frees a block from block pool | ||
5067 | */ | ||
5068 | void | ||
5069 | __vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh, | ||
5070 | struct __vxge_hw_blockpool_entry *entry) | ||
5071 | { | ||
5072 | struct __vxge_hw_blockpool *blockpool; | ||
5073 | |||
5074 | blockpool = &devh->block_pool; | ||
5075 | |||
5076 | if (entry->length == blockpool->block_size) { | ||
5077 | list_add(&entry->item, &blockpool->free_block_list); | ||
5078 | blockpool->pool_size++; | ||
5079 | } | ||
5080 | |||
5081 | __vxge_hw_blockpool_blocks_remove(blockpool); | ||
5082 | } | ||
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h index 1a94343023cb..359b9b9f8041 100644 --- a/drivers/net/vxge/vxge-config.h +++ b/drivers/net/vxge/vxge-config.h | |||
@@ -20,13 +20,6 @@ | |||
20 | #define VXGE_CACHE_LINE_SIZE 128 | 20 | #define VXGE_CACHE_LINE_SIZE 128 |
21 | #endif | 21 | #endif |
22 | 22 | ||
23 | #define vxge_os_vaprintf(level, mask, fmt, ...) { \ | ||
24 | char buff[255]; \ | ||
25 | snprintf(buff, 255, fmt, __VA_ARGS__); \ | ||
26 | printk(buff); \ | ||
27 | printk("\n"); \ | ||
28 | } | ||
29 | |||
30 | #ifndef VXGE_ALIGN | 23 | #ifndef VXGE_ALIGN |
31 | #define VXGE_ALIGN(adrs, size) \ | 24 | #define VXGE_ALIGN(adrs, size) \ |
32 | (((size) - (((u64)adrs) & ((size)-1))) & ((size)-1)) | 25 | (((size) - (((u64)adrs) & ((size)-1))) & ((size)-1)) |
@@ -36,8 +29,16 @@ | |||
36 | #define VXGE_HW_MAX_MTU 9600 | 29 | #define VXGE_HW_MAX_MTU 9600 |
37 | #define VXGE_HW_DEFAULT_MTU 1500 | 30 | #define VXGE_HW_DEFAULT_MTU 1500 |
38 | 31 | ||
39 | #ifdef VXGE_DEBUG_ASSERT | 32 | #define VXGE_HW_MAX_ROM_IMAGES 8 |
33 | |||
34 | struct eprom_image { | ||
35 | u8 is_valid:1; | ||
36 | u8 index; | ||
37 | u8 type; | ||
38 | u16 version; | ||
39 | }; | ||
40 | 40 | ||
41 | #ifdef VXGE_DEBUG_ASSERT | ||
41 | /** | 42 | /** |
42 | * vxge_assert | 43 | * vxge_assert |
43 | * @test: C-condition to check | 44 | * @test: C-condition to check |
@@ -48,16 +49,13 @@ | |||
48 | * compilation | 49 | * compilation |
49 | * time. | 50 | * time. |
50 | */ | 51 | */ |
51 | #define vxge_assert(test) { \ | 52 | #define vxge_assert(test) BUG_ON(!(test)) |
52 | if (!(test)) \ | ||
53 | vxge_os_bug("bad cond: "#test" at %s:%d\n", \ | ||
54 | __FILE__, __LINE__); } | ||
55 | #else | 53 | #else |
56 | #define vxge_assert(test) | 54 | #define vxge_assert(test) |
57 | #endif /* end of VXGE_DEBUG_ASSERT */ | 55 | #endif /* end of VXGE_DEBUG_ASSERT */ |
58 | 56 | ||
59 | /** | 57 | /** |
60 | * enum enum vxge_debug_level | 58 | * enum vxge_debug_level |
61 | * @VXGE_NONE: debug disabled | 59 | * @VXGE_NONE: debug disabled |
62 | * @VXGE_ERR: all errors going to be logged out | 60 | * @VXGE_ERR: all errors going to be logged out |
63 | * @VXGE_TRACE: all errors plus all kind of verbose tracing print outs | 61 | * @VXGE_TRACE: all errors plus all kind of verbose tracing print outs |
@@ -159,6 +157,47 @@ enum vxge_hw_device_link_state { | |||
159 | }; | 157 | }; |
160 | 158 | ||
161 | /** | 159 | /** |
160 | * enum enum vxge_hw_fw_upgrade_code - FW upgrade return codes. | ||
161 | * @VXGE_HW_FW_UPGRADE_OK: All OK send next 16 bytes | ||
162 | * @VXGE_HW_FW_UPGRADE_DONE: upload completed | ||
163 | * @VXGE_HW_FW_UPGRADE_ERR: upload error | ||
164 | * @VXGE_FW_UPGRADE_BYTES2SKIP: skip bytes in the stream | ||
165 | * | ||
166 | */ | ||
167 | enum vxge_hw_fw_upgrade_code { | ||
168 | VXGE_HW_FW_UPGRADE_OK = 0, | ||
169 | VXGE_HW_FW_UPGRADE_DONE = 1, | ||
170 | VXGE_HW_FW_UPGRADE_ERR = 2, | ||
171 | VXGE_FW_UPGRADE_BYTES2SKIP = 3 | ||
172 | }; | ||
173 | |||
174 | /** | ||
175 | * enum enum vxge_hw_fw_upgrade_err_code - FW upgrade error codes. | ||
176 | * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1: corrupt data | ||
177 | * @VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW: buffer overflow | ||
178 | * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3: invalid .ncf file | ||
179 | * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4: invalid .ncf file | ||
180 | * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5: invalid .ncf file | ||
181 | * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6: invalid .ncf file | ||
182 | * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7: corrupt data | ||
183 | * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8: invalid .ncf file | ||
184 | * @VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN: generic error unknown type | ||
185 | * @VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH: failed to flash image check failed | ||
186 | */ | ||
187 | enum vxge_hw_fw_upgrade_err_code { | ||
188 | VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1 = 1, | ||
189 | VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW = 2, | ||
190 | VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3 = 3, | ||
191 | VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4 = 4, | ||
192 | VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5 = 5, | ||
193 | VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6 = 6, | ||
194 | VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7 = 7, | ||
195 | VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8 = 8, | ||
196 | VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN = 9, | ||
197 | VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH = 10 | ||
198 | }; | ||
199 | |||
200 | /** | ||
162 | * struct vxge_hw_device_date - Date Format | 201 | * struct vxge_hw_device_date - Date Format |
163 | * @day: Day | 202 | * @day: Day |
164 | * @month: Month | 203 | * @month: Month |
@@ -183,11 +222,6 @@ struct vxge_hw_device_version { | |||
183 | char version[VXGE_HW_FW_STRLEN]; | 222 | char version[VXGE_HW_FW_STRLEN]; |
184 | }; | 223 | }; |
185 | 224 | ||
186 | u64 | ||
187 | __vxge_hw_vpath_pci_func_mode_get( | ||
188 | u32 vp_id, | ||
189 | struct vxge_hw_vpath_reg __iomem *vpath_reg); | ||
190 | |||
191 | /** | 225 | /** |
192 | * struct vxge_hw_fifo_config - Configuration of fifo. | 226 | * struct vxge_hw_fifo_config - Configuration of fifo. |
193 | * @enable: Is this fifo to be commissioned | 227 | * @enable: Is this fifo to be commissioned |
@@ -280,9 +314,9 @@ struct vxge_hw_ring_config { | |||
280 | #define VXGE_HW_RING_DEFAULT 1 | 314 | #define VXGE_HW_RING_DEFAULT 1 |
281 | 315 | ||
282 | u32 ring_blocks; | 316 | u32 ring_blocks; |
283 | #define VXGE_HW_MIN_RING_BLOCKS 1 | 317 | #define VXGE_HW_MIN_RING_BLOCKS 1 |
284 | #define VXGE_HW_MAX_RING_BLOCKS 128 | 318 | #define VXGE_HW_MAX_RING_BLOCKS 128 |
285 | #define VXGE_HW_DEF_RING_BLOCKS 2 | 319 | #define VXGE_HW_DEF_RING_BLOCKS 2 |
286 | 320 | ||
287 | u32 buffer_mode; | 321 | u32 buffer_mode; |
288 | #define VXGE_HW_RING_RXD_BUFFER_MODE_1 1 | 322 | #define VXGE_HW_RING_RXD_BUFFER_MODE_1 1 |
@@ -378,44 +412,48 @@ struct vxge_hw_vp_config { | |||
378 | * See also: struct vxge_hw_tim_intr_config{}. | 412 | * See also: struct vxge_hw_tim_intr_config{}. |
379 | */ | 413 | */ |
380 | struct vxge_hw_device_config { | 414 | struct vxge_hw_device_config { |
381 | u32 dma_blockpool_initial; | 415 | u32 device_poll_millis; |
382 | u32 dma_blockpool_max; | 416 | #define VXGE_HW_MIN_DEVICE_POLL_MILLIS 1 |
383 | #define VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE 0 | 417 | #define VXGE_HW_MAX_DEVICE_POLL_MILLIS 100000 |
384 | #define VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE 0 | 418 | #define VXGE_HW_DEF_DEVICE_POLL_MILLIS 1000 |
385 | #define VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE 4 | 419 | |
386 | #define VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE 4096 | 420 | u32 dma_blockpool_initial; |
387 | 421 | u32 dma_blockpool_max; | |
388 | #define VXGE_HW_MAX_PAYLOAD_SIZE_512 2 | 422 | #define VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE 0 |
389 | 423 | #define VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE 0 | |
390 | u32 intr_mode; | 424 | #define VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE 4 |
391 | #define VXGE_HW_INTR_MODE_IRQLINE 0 | 425 | #define VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE 4096 |
392 | #define VXGE_HW_INTR_MODE_MSIX 1 | 426 | |
393 | #define VXGE_HW_INTR_MODE_MSIX_ONE_SHOT 2 | 427 | #define VXGE_HW_MAX_PAYLOAD_SIZE_512 2 |
394 | 428 | ||
395 | #define VXGE_HW_INTR_MODE_DEF 0 | 429 | u32 intr_mode:2, |
396 | 430 | #define VXGE_HW_INTR_MODE_IRQLINE 0 | |
397 | u32 rth_en; | 431 | #define VXGE_HW_INTR_MODE_MSIX 1 |
398 | #define VXGE_HW_RTH_DISABLE 0 | 432 | #define VXGE_HW_INTR_MODE_MSIX_ONE_SHOT 2 |
399 | #define VXGE_HW_RTH_ENABLE 1 | 433 | |
400 | #define VXGE_HW_RTH_DEFAULT 0 | 434 | #define VXGE_HW_INTR_MODE_DEF 0 |
401 | 435 | ||
402 | u32 rth_it_type; | 436 | rth_en:1, |
403 | #define VXGE_HW_RTH_IT_TYPE_SOLO_IT 0 | 437 | #define VXGE_HW_RTH_DISABLE 0 |
404 | #define VXGE_HW_RTH_IT_TYPE_MULTI_IT 1 | 438 | #define VXGE_HW_RTH_ENABLE 1 |
405 | #define VXGE_HW_RTH_IT_TYPE_DEFAULT 0 | 439 | #define VXGE_HW_RTH_DEFAULT 0 |
406 | 440 | ||
407 | u32 rts_mac_en; | 441 | rth_it_type:1, |
442 | #define VXGE_HW_RTH_IT_TYPE_SOLO_IT 0 | ||
443 | #define VXGE_HW_RTH_IT_TYPE_MULTI_IT 1 | ||
444 | #define VXGE_HW_RTH_IT_TYPE_DEFAULT 0 | ||
445 | |||
446 | rts_mac_en:1, | ||
408 | #define VXGE_HW_RTS_MAC_DISABLE 0 | 447 | #define VXGE_HW_RTS_MAC_DISABLE 0 |
409 | #define VXGE_HW_RTS_MAC_ENABLE 1 | 448 | #define VXGE_HW_RTS_MAC_ENABLE 1 |
410 | #define VXGE_HW_RTS_MAC_DEFAULT 0 | 449 | #define VXGE_HW_RTS_MAC_DEFAULT 0 |
411 | 450 | ||
412 | struct vxge_hw_vp_config vp_config[VXGE_HW_MAX_VIRTUAL_PATHS]; | 451 | hwts_en:1; |
413 | 452 | #define VXGE_HW_HWTS_DISABLE 0 | |
414 | u32 device_poll_millis; | 453 | #define VXGE_HW_HWTS_ENABLE 1 |
415 | #define VXGE_HW_MIN_DEVICE_POLL_MILLIS 1 | 454 | #define VXGE_HW_HWTS_DEFAULT 1 |
416 | #define VXGE_HW_MAX_DEVICE_POLL_MILLIS 100000 | ||
417 | #define VXGE_HW_DEF_DEVICE_POLL_MILLIS 1000 | ||
418 | 455 | ||
456 | struct vxge_hw_vp_config vp_config[VXGE_HW_MAX_VIRTUAL_PATHS]; | ||
419 | }; | 457 | }; |
420 | 458 | ||
421 | /** | 459 | /** |
@@ -470,7 +508,6 @@ struct vxge_hw_device_config { | |||
470 | * See also: vxge_hw_driver_initialize(). | 508 | * See also: vxge_hw_driver_initialize(). |
471 | */ | 509 | */ |
472 | struct vxge_hw_uld_cbs { | 510 | struct vxge_hw_uld_cbs { |
473 | |||
474 | void (*link_up)(struct __vxge_hw_device *devh); | 511 | void (*link_up)(struct __vxge_hw_device *devh); |
475 | void (*link_down)(struct __vxge_hw_device *devh); | 512 | void (*link_down)(struct __vxge_hw_device *devh); |
476 | void (*crit_err)(struct __vxge_hw_device *devh, | 513 | void (*crit_err)(struct __vxge_hw_device *devh, |
@@ -649,6 +686,10 @@ struct __vxge_hw_virtualpath { | |||
649 | u32 vsport_number; | 686 | u32 vsport_number; |
650 | u32 max_kdfc_db; | 687 | u32 max_kdfc_db; |
651 | u32 max_nofl_db; | 688 | u32 max_nofl_db; |
689 | u64 tim_tti_cfg1_saved; | ||
690 | u64 tim_tti_cfg3_saved; | ||
691 | u64 tim_rti_cfg1_saved; | ||
692 | u64 tim_rti_cfg3_saved; | ||
652 | 693 | ||
653 | struct __vxge_hw_ring *____cacheline_aligned ringh; | 694 | struct __vxge_hw_ring *____cacheline_aligned ringh; |
654 | struct __vxge_hw_fifo *____cacheline_aligned fifoh; | 695 | struct __vxge_hw_fifo *____cacheline_aligned fifoh; |
@@ -657,6 +698,7 @@ struct __vxge_hw_virtualpath { | |||
657 | struct vxge_hw_vpath_stats_hw_info *hw_stats; | 698 | struct vxge_hw_vpath_stats_hw_info *hw_stats; |
658 | struct vxge_hw_vpath_stats_hw_info *hw_stats_sav; | 699 | struct vxge_hw_vpath_stats_hw_info *hw_stats_sav; |
659 | struct vxge_hw_vpath_stats_sw_info *sw_stats; | 700 | struct vxge_hw_vpath_stats_sw_info *sw_stats; |
701 | spinlock_t lock; | ||
660 | }; | 702 | }; |
661 | 703 | ||
662 | /* | 704 | /* |
@@ -666,7 +708,7 @@ struct __vxge_hw_virtualpath { | |||
666 | * | 708 | * |
667 | * This structure is used to store the callback information. | 709 | * This structure is used to store the callback information. |
668 | */ | 710 | */ |
669 | struct __vxge_hw_vpath_handle{ | 711 | struct __vxge_hw_vpath_handle { |
670 | struct list_head item; | 712 | struct list_head item; |
671 | struct __vxge_hw_virtualpath *vpath; | 713 | struct __vxge_hw_virtualpath *vpath; |
672 | }; | 714 | }; |
@@ -679,9 +721,6 @@ struct __vxge_hw_vpath_handle{ | |||
679 | /** | 721 | /** |
680 | * struct __vxge_hw_device - Hal device object | 722 | * struct __vxge_hw_device - Hal device object |
681 | * @magic: Magic Number | 723 | * @magic: Magic Number |
682 | * @device_id: PCI Device Id of the adapter | ||
683 | * @major_revision: PCI Device major revision | ||
684 | * @minor_revision: PCI Device minor revision | ||
685 | * @bar0: BAR0 virtual address. | 724 | * @bar0: BAR0 virtual address. |
686 | * @pdev: Physical device handle | 725 | * @pdev: Physical device handle |
687 | * @config: Confguration passed by the LL driver at initialization | 726 | * @config: Confguration passed by the LL driver at initialization |
@@ -693,9 +732,6 @@ struct __vxge_hw_device { | |||
693 | u32 magic; | 732 | u32 magic; |
694 | #define VXGE_HW_DEVICE_MAGIC 0x12345678 | 733 | #define VXGE_HW_DEVICE_MAGIC 0x12345678 |
695 | #define VXGE_HW_DEVICE_DEAD 0xDEADDEAD | 734 | #define VXGE_HW_DEVICE_DEAD 0xDEADDEAD |
696 | u16 device_id; | ||
697 | u8 major_revision; | ||
698 | u8 minor_revision; | ||
699 | void __iomem *bar0; | 735 | void __iomem *bar0; |
700 | struct pci_dev *pdev; | 736 | struct pci_dev *pdev; |
701 | struct net_device *ndev; | 737 | struct net_device *ndev; |
@@ -736,6 +772,7 @@ struct __vxge_hw_device { | |||
736 | u32 debug_level; | 772 | u32 debug_level; |
737 | u32 level_err; | 773 | u32 level_err; |
738 | u32 level_trace; | 774 | u32 level_trace; |
775 | u16 eprom_versions[VXGE_HW_MAX_ROM_IMAGES]; | ||
739 | }; | 776 | }; |
740 | 777 | ||
741 | #define VXGE_HW_INFO_LEN 64 | 778 | #define VXGE_HW_INFO_LEN 64 |
@@ -786,8 +823,8 @@ struct vxge_hw_device_hw_info { | |||
786 | u8 serial_number[VXGE_HW_INFO_LEN]; | 823 | u8 serial_number[VXGE_HW_INFO_LEN]; |
787 | u8 part_number[VXGE_HW_INFO_LEN]; | 824 | u8 part_number[VXGE_HW_INFO_LEN]; |
788 | u8 product_desc[VXGE_HW_INFO_LEN]; | 825 | u8 product_desc[VXGE_HW_INFO_LEN]; |
789 | u8 (mac_addrs)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN]; | 826 | u8 mac_addrs[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN]; |
790 | u8 (mac_addr_masks)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN]; | 827 | u8 mac_addr_masks[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN]; |
791 | }; | 828 | }; |
792 | 829 | ||
793 | /** | 830 | /** |
@@ -834,20 +871,10 @@ struct vxge_hw_device_attr { | |||
834 | loc, \ | 871 | loc, \ |
835 | offset, \ | 872 | offset, \ |
836 | &val64); \ | 873 | &val64); \ |
837 | \ | ||
838 | if (status != VXGE_HW_OK) \ | 874 | if (status != VXGE_HW_OK) \ |
839 | return status; \ | 875 | return status; \ |
840 | } | 876 | } |
841 | 877 | ||
842 | #define VXGE_HW_VPATH_STATS_PIO_READ(offset) { \ | ||
843 | status = __vxge_hw_vpath_stats_access(vpath, \ | ||
844 | VXGE_HW_STATS_OP_READ, \ | ||
845 | offset, \ | ||
846 | &val64); \ | ||
847 | if (status != VXGE_HW_OK) \ | ||
848 | return status; \ | ||
849 | } | ||
850 | |||
851 | /* | 878 | /* |
852 | * struct __vxge_hw_ring - Ring channel. | 879 | * struct __vxge_hw_ring - Ring channel. |
853 | * @channel: Channel "base" of this ring, the common part of all HW | 880 | * @channel: Channel "base" of this ring, the common part of all HW |
@@ -902,6 +929,9 @@ struct __vxge_hw_ring { | |||
902 | u32 doorbell_cnt; | 929 | u32 doorbell_cnt; |
903 | u32 total_db_cnt; | 930 | u32 total_db_cnt; |
904 | u64 rxds_limit; | 931 | u64 rxds_limit; |
932 | u32 rtimer; | ||
933 | u64 tim_rti_cfg1_saved; | ||
934 | u64 tim_rti_cfg3_saved; | ||
905 | 935 | ||
906 | enum vxge_hw_status (*callback)( | 936 | enum vxge_hw_status (*callback)( |
907 | struct __vxge_hw_ring *ringh, | 937 | struct __vxge_hw_ring *ringh, |
@@ -981,6 +1011,9 @@ struct __vxge_hw_fifo { | |||
981 | u32 per_txdl_space; | 1011 | u32 per_txdl_space; |
982 | u32 vp_id; | 1012 | u32 vp_id; |
983 | u32 tx_intr_num; | 1013 | u32 tx_intr_num; |
1014 | u32 rtimer; | ||
1015 | u64 tim_tti_cfg1_saved; | ||
1016 | u64 tim_tti_cfg3_saved; | ||
984 | 1017 | ||
985 | enum vxge_hw_status (*callback)( | 1018 | enum vxge_hw_status (*callback)( |
986 | struct __vxge_hw_fifo *fifo_handle, | 1019 | struct __vxge_hw_fifo *fifo_handle, |
@@ -1119,7 +1152,7 @@ struct __vxge_hw_non_offload_db_wrapper { | |||
1119 | * lookup to determine the transmit port. | 1152 | * lookup to determine the transmit port. |
1120 | * 01: Send on physical Port1. | 1153 | * 01: Send on physical Port1. |
1121 | * 10: Send on physical Port0. | 1154 | * 10: Send on physical Port0. |
1122 | * 11: Send on both ports. | 1155 | * 11: Send on both ports. |
1123 | * Bits 18 to 21 - Reserved | 1156 | * Bits 18 to 21 - Reserved |
1124 | * Bits 22 to 23 - Gather_Code. This field is set by the host and | 1157 | * Bits 22 to 23 - Gather_Code. This field is set by the host and |
1125 | * is used to describe how individual buffers comprise a frame. | 1158 | * is used to describe how individual buffers comprise a frame. |
@@ -1418,17 +1451,14 @@ enum vxge_hw_rth_algoritms { | |||
1418 | * See also: vxge_hw_vpath_rts_rth_set(), vxge_hw_vpath_rts_rth_get(). | 1451 | * See also: vxge_hw_vpath_rts_rth_set(), vxge_hw_vpath_rts_rth_get(). |
1419 | */ | 1452 | */ |
1420 | struct vxge_hw_rth_hash_types { | 1453 | struct vxge_hw_rth_hash_types { |
1421 | u8 hash_type_tcpipv4_en; | 1454 | u8 hash_type_tcpipv4_en:1, |
1422 | u8 hash_type_ipv4_en; | 1455 | hash_type_ipv4_en:1, |
1423 | u8 hash_type_tcpipv6_en; | 1456 | hash_type_tcpipv6_en:1, |
1424 | u8 hash_type_ipv6_en; | 1457 | hash_type_ipv6_en:1, |
1425 | u8 hash_type_tcpipv6ex_en; | 1458 | hash_type_tcpipv6ex_en:1, |
1426 | u8 hash_type_ipv6ex_en; | 1459 | hash_type_ipv6ex_en:1; |
1427 | }; | 1460 | }; |
1428 | 1461 | ||
1429 | u32 | ||
1430 | vxge_hw_device_debug_mask_get(struct __vxge_hw_device *devh); | ||
1431 | |||
1432 | void vxge_hw_device_debug_set( | 1462 | void vxge_hw_device_debug_set( |
1433 | struct __vxge_hw_device *devh, | 1463 | struct __vxge_hw_device *devh, |
1434 | enum vxge_debug_level level, | 1464 | enum vxge_debug_level level, |
@@ -1440,9 +1470,6 @@ vxge_hw_device_error_level_get(struct __vxge_hw_device *devh); | |||
1440 | u32 | 1470 | u32 |
1441 | vxge_hw_device_trace_level_get(struct __vxge_hw_device *devh); | 1471 | vxge_hw_device_trace_level_get(struct __vxge_hw_device *devh); |
1442 | 1472 | ||
1443 | u32 | ||
1444 | vxge_hw_device_debug_mask_get(struct __vxge_hw_device *devh); | ||
1445 | |||
1446 | /** | 1473 | /** |
1447 | * vxge_hw_ring_rxd_size_get - Get the size of ring descriptor. | 1474 | * vxge_hw_ring_rxd_size_get - Get the size of ring descriptor. |
1448 | * @buf_mode: Buffer mode (1, 3 or 5) | 1475 | * @buf_mode: Buffer mode (1, 3 or 5) |
@@ -1817,60 +1844,10 @@ struct vxge_hw_vpath_attr { | |||
1817 | struct vxge_hw_fifo_attr fifo_attr; | 1844 | struct vxge_hw_fifo_attr fifo_attr; |
1818 | }; | 1845 | }; |
1819 | 1846 | ||
1820 | enum vxge_hw_status | ||
1821 | __vxge_hw_blockpool_create(struct __vxge_hw_device *hldev, | ||
1822 | struct __vxge_hw_blockpool *blockpool, | ||
1823 | u32 pool_size, | ||
1824 | u32 pool_max); | ||
1825 | |||
1826 | void | ||
1827 | __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool); | ||
1828 | |||
1829 | struct __vxge_hw_blockpool_entry * | ||
1830 | __vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *hldev, | ||
1831 | u32 size); | ||
1832 | |||
1833 | void | ||
1834 | __vxge_hw_blockpool_block_free(struct __vxge_hw_device *hldev, | ||
1835 | struct __vxge_hw_blockpool_entry *entry); | ||
1836 | |||
1837 | void * | ||
1838 | __vxge_hw_blockpool_malloc(struct __vxge_hw_device *hldev, | ||
1839 | u32 size, | ||
1840 | struct vxge_hw_mempool_dma *dma_object); | ||
1841 | |||
1842 | void | ||
1843 | __vxge_hw_blockpool_free(struct __vxge_hw_device *hldev, | ||
1844 | void *memblock, | ||
1845 | u32 size, | ||
1846 | struct vxge_hw_mempool_dma *dma_object); | ||
1847 | |||
1848 | enum vxge_hw_status | ||
1849 | __vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config); | ||
1850 | |||
1851 | enum vxge_hw_status | ||
1852 | __vxge_hw_device_config_check(struct vxge_hw_device_config *new_config); | ||
1853 | |||
1854 | enum vxge_hw_status | ||
1855 | vxge_hw_mgmt_device_config(struct __vxge_hw_device *devh, | ||
1856 | struct vxge_hw_device_config *dev_config, int size); | ||
1857 | |||
1858 | enum vxge_hw_status __devinit vxge_hw_device_hw_info_get( | 1847 | enum vxge_hw_status __devinit vxge_hw_device_hw_info_get( |
1859 | void __iomem *bar0, | 1848 | void __iomem *bar0, |
1860 | struct vxge_hw_device_hw_info *hw_info); | 1849 | struct vxge_hw_device_hw_info *hw_info); |
1861 | 1850 | ||
1862 | enum vxge_hw_status | ||
1863 | __vxge_hw_vpath_fw_ver_get( | ||
1864 | u32 vp_id, | ||
1865 | struct vxge_hw_vpath_reg __iomem *vpath_reg, | ||
1866 | struct vxge_hw_device_hw_info *hw_info); | ||
1867 | |||
1868 | enum vxge_hw_status | ||
1869 | __vxge_hw_vpath_card_info_get( | ||
1870 | u32 vp_id, | ||
1871 | struct vxge_hw_vpath_reg __iomem *vpath_reg, | ||
1872 | struct vxge_hw_device_hw_info *hw_info); | ||
1873 | |||
1874 | enum vxge_hw_status __devinit vxge_hw_device_config_default_get( | 1851 | enum vxge_hw_status __devinit vxge_hw_device_config_default_get( |
1875 | struct vxge_hw_device_config *device_config); | 1852 | struct vxge_hw_device_config *device_config); |
1876 | 1853 | ||
@@ -1954,29 +1931,6 @@ out: | |||
1954 | return vaddr; | 1931 | return vaddr; |
1955 | } | 1932 | } |
1956 | 1933 | ||
1957 | extern void vxge_hw_blockpool_block_add( | ||
1958 | struct __vxge_hw_device *devh, | ||
1959 | void *block_addr, | ||
1960 | u32 length, | ||
1961 | struct pci_dev *dma_h, | ||
1962 | struct pci_dev *acc_handle); | ||
1963 | |||
1964 | static inline void vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh, | ||
1965 | unsigned long size) | ||
1966 | { | ||
1967 | gfp_t flags; | ||
1968 | void *vaddr; | ||
1969 | |||
1970 | if (in_interrupt()) | ||
1971 | flags = GFP_ATOMIC | GFP_DMA; | ||
1972 | else | ||
1973 | flags = GFP_KERNEL | GFP_DMA; | ||
1974 | |||
1975 | vaddr = kmalloc((size), flags); | ||
1976 | |||
1977 | vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev); | ||
1978 | } | ||
1979 | |||
1980 | static inline void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr, | 1934 | static inline void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr, |
1981 | struct pci_dev **p_dma_acch) | 1935 | struct pci_dev **p_dma_acch) |
1982 | { | 1936 | { |
@@ -2010,40 +1964,6 @@ __vxge_hw_mempool_item_priv( | |||
2010 | (*memblock_item_idx) * mempool->items_priv_size; | 1964 | (*memblock_item_idx) * mempool->items_priv_size; |
2011 | } | 1965 | } |
2012 | 1966 | ||
2013 | enum vxge_hw_status | ||
2014 | __vxge_hw_mempool_grow( | ||
2015 | struct vxge_hw_mempool *mempool, | ||
2016 | u32 num_allocate, | ||
2017 | u32 *num_allocated); | ||
2018 | |||
2019 | struct vxge_hw_mempool* | ||
2020 | __vxge_hw_mempool_create( | ||
2021 | struct __vxge_hw_device *devh, | ||
2022 | u32 memblock_size, | ||
2023 | u32 item_size, | ||
2024 | u32 private_size, | ||
2025 | u32 items_initial, | ||
2026 | u32 items_max, | ||
2027 | struct vxge_hw_mempool_cbs *mp_callback, | ||
2028 | void *userdata); | ||
2029 | |||
2030 | struct __vxge_hw_channel* | ||
2031 | __vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph, | ||
2032 | enum __vxge_hw_channel_type type, u32 length, | ||
2033 | u32 per_dtr_space, void *userdata); | ||
2034 | |||
2035 | void | ||
2036 | __vxge_hw_channel_free( | ||
2037 | struct __vxge_hw_channel *channel); | ||
2038 | |||
2039 | enum vxge_hw_status | ||
2040 | __vxge_hw_channel_initialize( | ||
2041 | struct __vxge_hw_channel *channel); | ||
2042 | |||
2043 | enum vxge_hw_status | ||
2044 | __vxge_hw_channel_reset( | ||
2045 | struct __vxge_hw_channel *channel); | ||
2046 | |||
2047 | /* | 1967 | /* |
2048 | * __vxge_hw_fifo_txdl_priv - Return the max fragments allocated | 1968 | * __vxge_hw_fifo_txdl_priv - Return the max fragments allocated |
2049 | * for the fifo. | 1969 | * for the fifo. |
@@ -2065,9 +1985,6 @@ enum vxge_hw_status vxge_hw_vpath_open( | |||
2065 | struct vxge_hw_vpath_attr *attr, | 1985 | struct vxge_hw_vpath_attr *attr, |
2066 | struct __vxge_hw_vpath_handle **vpath_handle); | 1986 | struct __vxge_hw_vpath_handle **vpath_handle); |
2067 | 1987 | ||
2068 | enum vxge_hw_status | ||
2069 | __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog); | ||
2070 | |||
2071 | enum vxge_hw_status vxge_hw_vpath_close( | 1988 | enum vxge_hw_status vxge_hw_vpath_close( |
2072 | struct __vxge_hw_vpath_handle *vpath_handle); | 1989 | struct __vxge_hw_vpath_handle *vpath_handle); |
2073 | 1990 | ||
@@ -2089,55 +2006,9 @@ enum vxge_hw_status vxge_hw_vpath_mtu_set( | |||
2089 | struct __vxge_hw_vpath_handle *vpath_handle, | 2006 | struct __vxge_hw_vpath_handle *vpath_handle, |
2090 | u32 new_mtu); | 2007 | u32 new_mtu); |
2091 | 2008 | ||
2092 | enum vxge_hw_status vxge_hw_vpath_stats_enable( | ||
2093 | struct __vxge_hw_vpath_handle *vpath_handle); | ||
2094 | |||
2095 | enum vxge_hw_status | ||
2096 | __vxge_hw_vpath_stats_access( | ||
2097 | struct __vxge_hw_virtualpath *vpath, | ||
2098 | u32 operation, | ||
2099 | u32 offset, | ||
2100 | u64 *stat); | ||
2101 | |||
2102 | enum vxge_hw_status | ||
2103 | __vxge_hw_vpath_xmac_tx_stats_get( | ||
2104 | struct __vxge_hw_virtualpath *vpath, | ||
2105 | struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats); | ||
2106 | |||
2107 | enum vxge_hw_status | ||
2108 | __vxge_hw_vpath_xmac_rx_stats_get( | ||
2109 | struct __vxge_hw_virtualpath *vpath, | ||
2110 | struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats); | ||
2111 | |||
2112 | enum vxge_hw_status | ||
2113 | __vxge_hw_vpath_stats_get( | ||
2114 | struct __vxge_hw_virtualpath *vpath, | ||
2115 | struct vxge_hw_vpath_stats_hw_info *hw_stats); | ||
2116 | |||
2117 | void | 2009 | void |
2118 | vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp); | 2010 | vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp); |
2119 | 2011 | ||
2120 | enum vxge_hw_status | ||
2121 | __vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config); | ||
2122 | |||
2123 | void | ||
2124 | __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev); | ||
2125 | |||
2126 | enum vxge_hw_status | ||
2127 | __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg); | ||
2128 | |||
2129 | enum vxge_hw_status | ||
2130 | __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg); | ||
2131 | |||
2132 | enum vxge_hw_status | ||
2133 | __vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg, | ||
2134 | struct vxge_hw_vpath_reg __iomem *vpath_reg); | ||
2135 | |||
2136 | enum vxge_hw_status | ||
2137 | __vxge_hw_device_register_poll( | ||
2138 | void __iomem *reg, | ||
2139 | u64 mask, u32 max_millis); | ||
2140 | |||
2141 | #ifndef readq | 2012 | #ifndef readq |
2142 | static inline u64 readq(void __iomem *addr) | 2013 | static inline u64 readq(void __iomem *addr) |
2143 | { | 2014 | { |
@@ -2168,64 +2039,14 @@ static inline void __vxge_hw_pio_mem_write32_lower(u32 val, void __iomem *addr) | |||
2168 | writel(val, addr); | 2039 | writel(val, addr); |
2169 | } | 2040 | } |
2170 | 2041 | ||
2171 | static inline enum vxge_hw_status | ||
2172 | __vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr, | ||
2173 | u64 mask, u32 max_millis) | ||
2174 | { | ||
2175 | enum vxge_hw_status status = VXGE_HW_OK; | ||
2176 | |||
2177 | __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr); | ||
2178 | wmb(); | ||
2179 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr); | ||
2180 | wmb(); | ||
2181 | |||
2182 | status = __vxge_hw_device_register_poll(addr, mask, max_millis); | ||
2183 | return status; | ||
2184 | } | ||
2185 | |||
2186 | struct vxge_hw_toc_reg __iomem * | ||
2187 | __vxge_hw_device_toc_get(void __iomem *bar0); | ||
2188 | |||
2189 | enum vxge_hw_status | ||
2190 | __vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev); | ||
2191 | |||
2192 | void | ||
2193 | __vxge_hw_device_id_get(struct __vxge_hw_device *hldev); | ||
2194 | |||
2195 | void | ||
2196 | __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev); | ||
2197 | |||
2198 | enum vxge_hw_status | 2042 | enum vxge_hw_status |
2199 | vxge_hw_device_flick_link_led(struct __vxge_hw_device *devh, u64 on_off); | 2043 | vxge_hw_device_flick_link_led(struct __vxge_hw_device *devh, u64 on_off); |
2200 | 2044 | ||
2201 | enum vxge_hw_status | 2045 | enum vxge_hw_status |
2202 | __vxge_hw_device_initialize(struct __vxge_hw_device *hldev); | ||
2203 | |||
2204 | enum vxge_hw_status | ||
2205 | __vxge_hw_vpath_pci_read( | ||
2206 | struct __vxge_hw_virtualpath *vpath, | ||
2207 | u32 phy_func_0, | ||
2208 | u32 offset, | ||
2209 | u32 *val); | ||
2210 | |||
2211 | enum vxge_hw_status | ||
2212 | __vxge_hw_vpath_addr_get( | ||
2213 | u32 vp_id, | ||
2214 | struct vxge_hw_vpath_reg __iomem *vpath_reg, | ||
2215 | u8 (macaddr)[ETH_ALEN], | ||
2216 | u8 (macaddr_mask)[ETH_ALEN]); | ||
2217 | |||
2218 | u32 | ||
2219 | __vxge_hw_vpath_func_id_get( | ||
2220 | u32 vp_id, struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg); | ||
2221 | |||
2222 | enum vxge_hw_status | ||
2223 | __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath); | ||
2224 | |||
2225 | enum vxge_hw_status | ||
2226 | vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask); | 2046 | vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask); |
2047 | |||
2227 | /** | 2048 | /** |
2228 | * vxge_debug | 2049 | * vxge_debug_ll |
2229 | * @level: level of debug verbosity. | 2050 | * @level: level of debug verbosity. |
2230 | * @mask: mask for the debug | 2051 | * @mask: mask for the debug |
2231 | * @buf: Circular buffer for tracing | 2052 | * @buf: Circular buffer for tracing |
@@ -2237,26 +2058,13 @@ vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask); | |||
2237 | * may be compiled out if DEBUG macro was never defined. | 2058 | * may be compiled out if DEBUG macro was never defined. |
2238 | * See also: enum vxge_debug_level{}. | 2059 | * See also: enum vxge_debug_level{}. |
2239 | */ | 2060 | */ |
2240 | |||
2241 | #define vxge_trace_aux(level, mask, fmt, ...) \ | ||
2242 | {\ | ||
2243 | vxge_os_vaprintf(level, mask, fmt, __VA_ARGS__);\ | ||
2244 | } | ||
2245 | |||
2246 | #define vxge_debug(module, level, mask, fmt, ...) { \ | ||
2247 | if ((level >= VXGE_TRACE && ((module & VXGE_DEBUG_TRACE_MASK) == module)) || \ | ||
2248 | (level >= VXGE_ERR && ((module & VXGE_DEBUG_ERR_MASK) == module))) {\ | ||
2249 | if ((mask & VXGE_DEBUG_MASK) == mask)\ | ||
2250 | vxge_trace_aux(level, mask, fmt, __VA_ARGS__); \ | ||
2251 | } \ | ||
2252 | } | ||
2253 | |||
2254 | #if (VXGE_COMPONENT_LL & VXGE_DEBUG_MODULE_MASK) | 2061 | #if (VXGE_COMPONENT_LL & VXGE_DEBUG_MODULE_MASK) |
2255 | #define vxge_debug_ll(level, mask, fmt, ...) \ | 2062 | #define vxge_debug_ll(level, mask, fmt, ...) do { \ |
2256 | {\ | 2063 | if ((level >= VXGE_ERR && VXGE_COMPONENT_LL & VXGE_DEBUG_ERR_MASK) || \ |
2257 | vxge_debug(VXGE_COMPONENT_LL, level, mask, fmt, __VA_ARGS__);\ | 2064 | (level >= VXGE_TRACE && VXGE_COMPONENT_LL & VXGE_DEBUG_TRACE_MASK))\ |
2258 | } | 2065 | if ((mask & VXGE_DEBUG_MASK) == mask) \ |
2259 | 2066 | printk(fmt "\n", __VA_ARGS__); \ | |
2067 | } while (0) | ||
2260 | #else | 2068 | #else |
2261 | #define vxge_debug_ll(level, mask, fmt, ...) | 2069 | #define vxge_debug_ll(level, mask, fmt, ...) |
2262 | #endif | 2070 | #endif |
@@ -2276,4 +2084,26 @@ enum vxge_hw_status vxge_hw_vpath_rts_rth_set( | |||
2276 | 2084 | ||
2277 | enum vxge_hw_status | 2085 | enum vxge_hw_status |
2278 | __vxge_hw_device_is_privilaged(u32 host_type, u32 func_id); | 2086 | __vxge_hw_device_is_privilaged(u32 host_type, u32 func_id); |
2087 | |||
2088 | #define VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT 5 | ||
2089 | #define VXGE_HW_MAX_POLLING_COUNT 100 | ||
2090 | |||
2091 | void | ||
2092 | vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev); | ||
2093 | |||
2094 | enum vxge_hw_status | ||
2095 | vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major, | ||
2096 | u32 *minor, u32 *build); | ||
2097 | |||
2098 | enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev); | ||
2099 | |||
2100 | enum vxge_hw_status | ||
2101 | vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *filebuf, | ||
2102 | int size); | ||
2103 | |||
2104 | enum vxge_hw_status | ||
2105 | vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev, | ||
2106 | struct eprom_image *eprom_image_data); | ||
2107 | |||
2108 | int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id); | ||
2279 | #endif | 2109 | #endif |
diff --git a/drivers/net/vxge/vxge-ethtool.c b/drivers/net/vxge/vxge-ethtool.c index 05679e306fdd..92dd72d3f9de 100644 --- a/drivers/net/vxge/vxge-ethtool.c +++ b/drivers/net/vxge/vxge-ethtool.c | |||
@@ -11,7 +11,7 @@ | |||
11 | * Virtualized Server Adapter. | 11 | * Virtualized Server Adapter. |
12 | * Copyright(c) 2002-2010 Exar Corp. | 12 | * Copyright(c) 2002-2010 Exar Corp. |
13 | ******************************************************************************/ | 13 | ******************************************************************************/ |
14 | #include<linux/ethtool.h> | 14 | #include <linux/ethtool.h> |
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/pci.h> | 16 | #include <linux/pci.h> |
17 | #include <linux/etherdevice.h> | 17 | #include <linux/etherdevice.h> |
@@ -29,12 +29,12 @@ | |||
29 | * Return value: | 29 | * Return value: |
30 | * 0 on success. | 30 | * 0 on success. |
31 | */ | 31 | */ |
32 | |||
33 | static int vxge_ethtool_sset(struct net_device *dev, struct ethtool_cmd *info) | 32 | static int vxge_ethtool_sset(struct net_device *dev, struct ethtool_cmd *info) |
34 | { | 33 | { |
35 | /* We currently only support 10Gb/FULL */ | 34 | /* We currently only support 10Gb/FULL */ |
36 | if ((info->autoneg == AUTONEG_ENABLE) || | 35 | if ((info->autoneg == AUTONEG_ENABLE) || |
37 | (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL)) | 36 | (ethtool_cmd_speed(info) != SPEED_10000) || |
37 | (info->duplex != DUPLEX_FULL)) | ||
38 | return -EINVAL; | 38 | return -EINVAL; |
39 | 39 | ||
40 | return 0; | 40 | return 0; |
@@ -59,10 +59,10 @@ static int vxge_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info) | |||
59 | info->transceiver = XCVR_EXTERNAL; | 59 | info->transceiver = XCVR_EXTERNAL; |
60 | 60 | ||
61 | if (netif_carrier_ok(dev)) { | 61 | if (netif_carrier_ok(dev)) { |
62 | info->speed = SPEED_10000; | 62 | ethtool_cmd_speed_set(info, SPEED_10000); |
63 | info->duplex = DUPLEX_FULL; | 63 | info->duplex = DUPLEX_FULL; |
64 | } else { | 64 | } else { |
65 | info->speed = -1; | 65 | ethtool_cmd_speed_set(info, -1); |
66 | info->duplex = -1; | 66 | info->duplex = -1; |
67 | } | 67 | } |
68 | 68 | ||
@@ -79,10 +79,9 @@ static int vxge_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info) | |||
79 | * Returns driver specefic information like name, version etc.. to ethtool. | 79 | * Returns driver specefic information like name, version etc.. to ethtool. |
80 | */ | 80 | */ |
81 | static void vxge_ethtool_gdrvinfo(struct net_device *dev, | 81 | static void vxge_ethtool_gdrvinfo(struct net_device *dev, |
82 | struct ethtool_drvinfo *info) | 82 | struct ethtool_drvinfo *info) |
83 | { | 83 | { |
84 | struct vxgedev *vdev; | 84 | struct vxgedev *vdev = netdev_priv(dev); |
85 | vdev = (struct vxgedev *)netdev_priv(dev); | ||
86 | strlcpy(info->driver, VXGE_DRIVER_NAME, sizeof(VXGE_DRIVER_NAME)); | 85 | strlcpy(info->driver, VXGE_DRIVER_NAME, sizeof(VXGE_DRIVER_NAME)); |
87 | strlcpy(info->version, DRV_VERSION, sizeof(DRV_VERSION)); | 86 | strlcpy(info->version, DRV_VERSION, sizeof(DRV_VERSION)); |
88 | strlcpy(info->fw_version, vdev->fw_version, VXGE_HW_FW_STRLEN); | 87 | strlcpy(info->fw_version, vdev->fw_version, VXGE_HW_FW_STRLEN); |
@@ -104,15 +103,14 @@ static void vxge_ethtool_gdrvinfo(struct net_device *dev, | |||
104 | * buffer area. | 103 | * buffer area. |
105 | */ | 104 | */ |
106 | static void vxge_ethtool_gregs(struct net_device *dev, | 105 | static void vxge_ethtool_gregs(struct net_device *dev, |
107 | struct ethtool_regs *regs, void *space) | 106 | struct ethtool_regs *regs, void *space) |
108 | { | 107 | { |
109 | int index, offset; | 108 | int index, offset; |
110 | enum vxge_hw_status status; | 109 | enum vxge_hw_status status; |
111 | u64 reg; | 110 | u64 reg; |
112 | u64 *reg_space = (u64 *) space; | 111 | u64 *reg_space = (u64 *)space; |
113 | struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); | 112 | struct vxgedev *vdev = netdev_priv(dev); |
114 | struct __vxge_hw_device *hldev = (struct __vxge_hw_device *) | 113 | struct __vxge_hw_device *hldev = vdev->devh; |
115 | pci_get_drvdata(vdev->pdev); | ||
116 | 114 | ||
117 | regs->len = sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath; | 115 | regs->len = sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath; |
118 | regs->version = vdev->pdev->subsystem_device; | 116 | regs->version = vdev->pdev->subsystem_device; |
@@ -137,23 +135,29 @@ static void vxge_ethtool_gregs(struct net_device *dev, | |||
137 | /** | 135 | /** |
138 | * vxge_ethtool_idnic - To physically identify the nic on the system. | 136 | * vxge_ethtool_idnic - To physically identify the nic on the system. |
139 | * @dev : device pointer. | 137 | * @dev : device pointer. |
140 | * @id : pointer to the structure with identification parameters given by | 138 | * @state : requested LED state |
141 | * ethtool. | ||
142 | * | 139 | * |
143 | * Used to physically identify the NIC on the system. | 140 | * Used to physically identify the NIC on the system. |
144 | * The Link LED will blink for a time specified by the user. | ||
145 | * Return value: | ||
146 | * 0 on success | 141 | * 0 on success |
147 | */ | 142 | */ |
148 | static int vxge_ethtool_idnic(struct net_device *dev, u32 data) | 143 | static int vxge_ethtool_idnic(struct net_device *dev, |
144 | enum ethtool_phys_id_state state) | ||
149 | { | 145 | { |
150 | struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); | 146 | struct vxgedev *vdev = netdev_priv(dev); |
151 | struct __vxge_hw_device *hldev = (struct __vxge_hw_device *) | 147 | struct __vxge_hw_device *hldev = vdev->devh; |
152 | pci_get_drvdata(vdev->pdev); | 148 | |
149 | switch (state) { | ||
150 | case ETHTOOL_ID_ACTIVE: | ||
151 | vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_ON); | ||
152 | break; | ||
153 | |||
154 | case ETHTOOL_ID_INACTIVE: | ||
155 | vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_OFF); | ||
156 | break; | ||
153 | 157 | ||
154 | vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_ON); | 158 | default: |
155 | msleep_interruptible(data ? (data * HZ) : VXGE_MAX_FLICKER_TIME); | 159 | return -EINVAL; |
156 | vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_OFF); | 160 | } |
157 | 161 | ||
158 | return 0; | 162 | return 0; |
159 | } | 163 | } |
@@ -168,11 +172,10 @@ static int vxge_ethtool_idnic(struct net_device *dev, u32 data) | |||
168 | * void | 172 | * void |
169 | */ | 173 | */ |
170 | static void vxge_ethtool_getpause_data(struct net_device *dev, | 174 | static void vxge_ethtool_getpause_data(struct net_device *dev, |
171 | struct ethtool_pauseparam *ep) | 175 | struct ethtool_pauseparam *ep) |
172 | { | 176 | { |
173 | struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); | 177 | struct vxgedev *vdev = netdev_priv(dev); |
174 | struct __vxge_hw_device *hldev = (struct __vxge_hw_device *) | 178 | struct __vxge_hw_device *hldev = vdev->devh; |
175 | pci_get_drvdata(vdev->pdev); | ||
176 | 179 | ||
177 | vxge_hw_device_getpause_data(hldev, 0, &ep->tx_pause, &ep->rx_pause); | 180 | vxge_hw_device_getpause_data(hldev, 0, &ep->tx_pause, &ep->rx_pause); |
178 | } | 181 | } |
@@ -188,11 +191,10 @@ static void vxge_ethtool_getpause_data(struct net_device *dev, | |||
188 | * int, returns 0 on Success | 191 | * int, returns 0 on Success |
189 | */ | 192 | */ |
190 | static int vxge_ethtool_setpause_data(struct net_device *dev, | 193 | static int vxge_ethtool_setpause_data(struct net_device *dev, |
191 | struct ethtool_pauseparam *ep) | 194 | struct ethtool_pauseparam *ep) |
192 | { | 195 | { |
193 | struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); | 196 | struct vxgedev *vdev = netdev_priv(dev); |
194 | struct __vxge_hw_device *hldev = (struct __vxge_hw_device *) | 197 | struct __vxge_hw_device *hldev = vdev->devh; |
195 | pci_get_drvdata(vdev->pdev); | ||
196 | 198 | ||
197 | vxge_hw_device_setpause_data(hldev, 0, ep->tx_pause, ep->rx_pause); | 199 | vxge_hw_device_setpause_data(hldev, 0, ep->tx_pause, ep->rx_pause); |
198 | 200 | ||
@@ -209,9 +211,8 @@ static void vxge_get_ethtool_stats(struct net_device *dev, | |||
209 | enum vxge_hw_status status; | 211 | enum vxge_hw_status status; |
210 | enum vxge_hw_status swstatus; | 212 | enum vxge_hw_status swstatus; |
211 | struct vxge_vpath *vpath = NULL; | 213 | struct vxge_vpath *vpath = NULL; |
212 | 214 | struct vxgedev *vdev = netdev_priv(dev); | |
213 | struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); | 215 | struct __vxge_hw_device *hldev = vdev->devh; |
214 | struct __vxge_hw_device *hldev = vdev->devh; | ||
215 | struct vxge_hw_xmac_stats *xmac_stats; | 216 | struct vxge_hw_xmac_stats *xmac_stats; |
216 | struct vxge_hw_device_stats_sw_info *sw_stats; | 217 | struct vxge_hw_device_stats_sw_info *sw_stats; |
217 | struct vxge_hw_device_stats_hw_info *hw_stats; | 218 | struct vxge_hw_device_stats_hw_info *hw_stats; |
@@ -574,12 +575,12 @@ static void vxge_get_ethtool_stats(struct net_device *dev, | |||
574 | kfree(hw_stats); | 575 | kfree(hw_stats); |
575 | } | 576 | } |
576 | 577 | ||
577 | static void vxge_ethtool_get_strings(struct net_device *dev, | 578 | static void vxge_ethtool_get_strings(struct net_device *dev, u32 stringset, |
578 | u32 stringset, u8 *data) | 579 | u8 *data) |
579 | { | 580 | { |
580 | int stat_size = 0; | 581 | int stat_size = 0; |
581 | int i, j; | 582 | int i, j; |
582 | struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); | 583 | struct vxgedev *vdev = netdev_priv(dev); |
583 | switch (stringset) { | 584 | switch (stringset) { |
584 | case ETH_SS_STATS: | 585 | case ETH_SS_STATS: |
585 | vxge_add_string("VPATH STATISTICS%s\t\t\t", | 586 | vxge_add_string("VPATH STATISTICS%s\t\t\t", |
@@ -1066,43 +1067,14 @@ static void vxge_ethtool_get_strings(struct net_device *dev, | |||
1066 | 1067 | ||
1067 | static int vxge_ethtool_get_regs_len(struct net_device *dev) | 1068 | static int vxge_ethtool_get_regs_len(struct net_device *dev) |
1068 | { | 1069 | { |
1069 | struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); | 1070 | struct vxgedev *vdev = netdev_priv(dev); |
1070 | 1071 | ||
1071 | return sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath; | 1072 | return sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath; |
1072 | } | 1073 | } |
1073 | 1074 | ||
1074 | static u32 vxge_get_rx_csum(struct net_device *dev) | ||
1075 | { | ||
1076 | struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); | ||
1077 | |||
1078 | return vdev->rx_csum; | ||
1079 | } | ||
1080 | |||
1081 | static int vxge_set_rx_csum(struct net_device *dev, u32 data) | ||
1082 | { | ||
1083 | struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); | ||
1084 | |||
1085 | if (data) | ||
1086 | vdev->rx_csum = 1; | ||
1087 | else | ||
1088 | vdev->rx_csum = 0; | ||
1089 | |||
1090 | return 0; | ||
1091 | } | ||
1092 | |||
1093 | static int vxge_ethtool_op_set_tso(struct net_device *dev, u32 data) | ||
1094 | { | ||
1095 | if (data) | ||
1096 | dev->features |= (NETIF_F_TSO | NETIF_F_TSO6); | ||
1097 | else | ||
1098 | dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); | ||
1099 | |||
1100 | return 0; | ||
1101 | } | ||
1102 | |||
1103 | static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset) | 1075 | static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset) |
1104 | { | 1076 | { |
1105 | struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); | 1077 | struct vxgedev *vdev = netdev_priv(dev); |
1106 | 1078 | ||
1107 | switch (sset) { | 1079 | switch (sset) { |
1108 | case ETH_SS_STATS: | 1080 | case ETH_SS_STATS: |
@@ -1119,6 +1091,25 @@ static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset) | |||
1119 | } | 1091 | } |
1120 | } | 1092 | } |
1121 | 1093 | ||
1094 | static int vxge_fw_flash(struct net_device *dev, struct ethtool_flash *parms) | ||
1095 | { | ||
1096 | struct vxgedev *vdev = netdev_priv(dev); | ||
1097 | |||
1098 | if (vdev->max_vpath_supported != VXGE_HW_MAX_VIRTUAL_PATHS) { | ||
1099 | printk(KERN_INFO "Single Function Mode is required to flash the" | ||
1100 | " firmware\n"); | ||
1101 | return -EINVAL; | ||
1102 | } | ||
1103 | |||
1104 | if (netif_running(dev)) { | ||
1105 | printk(KERN_INFO "Interface %s must be down to flash the " | ||
1106 | "firmware\n", dev->name); | ||
1107 | return -EBUSY; | ||
1108 | } | ||
1109 | |||
1110 | return vxge_fw_upgrade(vdev, parms->data, 1); | ||
1111 | } | ||
1112 | |||
1122 | static const struct ethtool_ops vxge_ethtool_ops = { | 1113 | static const struct ethtool_ops vxge_ethtool_ops = { |
1123 | .get_settings = vxge_ethtool_gset, | 1114 | .get_settings = vxge_ethtool_gset, |
1124 | .set_settings = vxge_ethtool_sset, | 1115 | .set_settings = vxge_ethtool_sset, |
@@ -1128,21 +1119,14 @@ static const struct ethtool_ops vxge_ethtool_ops = { | |||
1128 | .get_link = ethtool_op_get_link, | 1119 | .get_link = ethtool_op_get_link, |
1129 | .get_pauseparam = vxge_ethtool_getpause_data, | 1120 | .get_pauseparam = vxge_ethtool_getpause_data, |
1130 | .set_pauseparam = vxge_ethtool_setpause_data, | 1121 | .set_pauseparam = vxge_ethtool_setpause_data, |
1131 | .get_rx_csum = vxge_get_rx_csum, | ||
1132 | .set_rx_csum = vxge_set_rx_csum, | ||
1133 | .get_tx_csum = ethtool_op_get_tx_csum, | ||
1134 | .set_tx_csum = ethtool_op_set_tx_hw_csum, | ||
1135 | .get_sg = ethtool_op_get_sg, | ||
1136 | .set_sg = ethtool_op_set_sg, | ||
1137 | .get_tso = ethtool_op_get_tso, | ||
1138 | .set_tso = vxge_ethtool_op_set_tso, | ||
1139 | .get_strings = vxge_ethtool_get_strings, | 1122 | .get_strings = vxge_ethtool_get_strings, |
1140 | .phys_id = vxge_ethtool_idnic, | 1123 | .set_phys_id = vxge_ethtool_idnic, |
1141 | .get_sset_count = vxge_ethtool_get_sset_count, | 1124 | .get_sset_count = vxge_ethtool_get_sset_count, |
1142 | .get_ethtool_stats = vxge_get_ethtool_stats, | 1125 | .get_ethtool_stats = vxge_get_ethtool_stats, |
1126 | .flash_device = vxge_fw_flash, | ||
1143 | }; | 1127 | }; |
1144 | 1128 | ||
1145 | void initialize_ethtool_ops(struct net_device *ndev) | 1129 | void vxge_initialize_ethtool_ops(struct net_device *ndev) |
1146 | { | 1130 | { |
1147 | SET_ETHTOOL_OPS(ndev, &vxge_ethtool_ops); | 1131 | SET_ETHTOOL_OPS(ndev, &vxge_ethtool_ops); |
1148 | } | 1132 | } |
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c index c7c5605b3728..8ab870a2ad02 100644 --- a/drivers/net/vxge/vxge-main.c +++ b/drivers/net/vxge/vxge-main.c | |||
@@ -50,6 +50,9 @@ | |||
50 | #include <net/ip.h> | 50 | #include <net/ip.h> |
51 | #include <linux/netdevice.h> | 51 | #include <linux/netdevice.h> |
52 | #include <linux/etherdevice.h> | 52 | #include <linux/etherdevice.h> |
53 | #include <linux/firmware.h> | ||
54 | #include <linux/net_tstamp.h> | ||
55 | #include <linux/prefetch.h> | ||
53 | #include "vxge-main.h" | 56 | #include "vxge-main.h" |
54 | #include "vxge-reg.h" | 57 | #include "vxge-reg.h" |
55 | 58 | ||
@@ -138,11 +141,10 @@ static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev) | |||
138 | * This function is called during interrupt context to notify link up state | 141 | * This function is called during interrupt context to notify link up state |
139 | * change. | 142 | * change. |
140 | */ | 143 | */ |
141 | void | 144 | static void vxge_callback_link_up(struct __vxge_hw_device *hldev) |
142 | vxge_callback_link_up(struct __vxge_hw_device *hldev) | ||
143 | { | 145 | { |
144 | struct net_device *dev = hldev->ndev; | 146 | struct net_device *dev = hldev->ndev; |
145 | struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); | 147 | struct vxgedev *vdev = netdev_priv(dev); |
146 | 148 | ||
147 | vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", | 149 | vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", |
148 | vdev->ndev->name, __func__, __LINE__); | 150 | vdev->ndev->name, __func__, __LINE__); |
@@ -162,11 +164,10 @@ vxge_callback_link_up(struct __vxge_hw_device *hldev) | |||
162 | * This function is called during interrupt context to notify link down state | 164 | * This function is called during interrupt context to notify link down state |
163 | * change. | 165 | * change. |
164 | */ | 166 | */ |
165 | void | 167 | static void vxge_callback_link_down(struct __vxge_hw_device *hldev) |
166 | vxge_callback_link_down(struct __vxge_hw_device *hldev) | ||
167 | { | 168 | { |
168 | struct net_device *dev = hldev->ndev; | 169 | struct net_device *dev = hldev->ndev; |
169 | struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); | 170 | struct vxgedev *vdev = netdev_priv(dev); |
170 | 171 | ||
171 | vxge_debug_entryexit(VXGE_TRACE, | 172 | vxge_debug_entryexit(VXGE_TRACE, |
172 | "%s: %s:%d", vdev->ndev->name, __func__, __LINE__); | 173 | "%s: %s:%d", vdev->ndev->name, __func__, __LINE__); |
@@ -185,7 +186,7 @@ vxge_callback_link_down(struct __vxge_hw_device *hldev) | |||
185 | * | 186 | * |
186 | * Allocate SKB. | 187 | * Allocate SKB. |
187 | */ | 188 | */ |
188 | static struct sk_buff* | 189 | static struct sk_buff * |
189 | vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size) | 190 | vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size) |
190 | { | 191 | { |
191 | struct net_device *dev; | 192 | struct net_device *dev; |
@@ -304,22 +305,14 @@ vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan, | |||
304 | "%s: %s:%d skb protocol = %d", | 305 | "%s: %s:%d skb protocol = %d", |
305 | ring->ndev->name, __func__, __LINE__, skb->protocol); | 306 | ring->ndev->name, __func__, __LINE__, skb->protocol); |
306 | 307 | ||
307 | if (ring->gro_enable) { | 308 | if (ring->vlgrp && ext_info->vlan && |
308 | if (ring->vlgrp && ext_info->vlan && | 309 | (ring->vlan_tag_strip == |
309 | (ring->vlan_tag_strip == | 310 | VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE)) |
310 | VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE)) | 311 | vlan_gro_receive(ring->napi_p, ring->vlgrp, |
311 | vlan_gro_receive(ring->napi_p, ring->vlgrp, | 312 | ext_info->vlan, skb); |
312 | ext_info->vlan, skb); | 313 | else |
313 | else | 314 | napi_gro_receive(ring->napi_p, skb); |
314 | napi_gro_receive(ring->napi_p, skb); | 315 | |
315 | } else { | ||
316 | if (ring->vlgrp && vlan && | ||
317 | (ring->vlan_tag_strip == | ||
318 | VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE)) | ||
319 | vlan_hwaccel_receive_skb(skb, ring->vlgrp, vlan); | ||
320 | else | ||
321 | netif_receive_skb(skb); | ||
322 | } | ||
323 | vxge_debug_entryexit(VXGE_TRACE, | 316 | vxge_debug_entryexit(VXGE_TRACE, |
324 | "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__); | 317 | "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__); |
325 | } | 318 | } |
@@ -354,12 +347,12 @@ static inline void vxge_post(int *dtr_cnt, void **first_dtr, | |||
354 | * If the interrupt is because of a received frame or if the receive ring | 347 | * If the interrupt is because of a received frame or if the receive ring |
355 | * contains fresh as yet un-processed frames, this function is called. | 348 | * contains fresh as yet un-processed frames, this function is called. |
356 | */ | 349 | */ |
357 | enum vxge_hw_status | 350 | static enum vxge_hw_status |
358 | vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr, | 351 | vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr, |
359 | u8 t_code, void *userdata) | 352 | u8 t_code, void *userdata) |
360 | { | 353 | { |
361 | struct vxge_ring *ring = (struct vxge_ring *)userdata; | 354 | struct vxge_ring *ring = (struct vxge_ring *)userdata; |
362 | struct net_device *dev = ring->ndev; | 355 | struct net_device *dev = ring->ndev; |
363 | unsigned int dma_sizes; | 356 | unsigned int dma_sizes; |
364 | void *first_dtr = NULL; | 357 | void *first_dtr = NULL; |
365 | int dtr_cnt = 0; | 358 | int dtr_cnt = 0; |
@@ -371,9 +364,6 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr, | |||
371 | struct vxge_hw_ring_rxd_info ext_info; | 364 | struct vxge_hw_ring_rxd_info ext_info; |
372 | vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", | 365 | vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", |
373 | ring->ndev->name, __func__, __LINE__); | 366 | ring->ndev->name, __func__, __LINE__); |
374 | ring->pkts_processed = 0; | ||
375 | |||
376 | vxge_hw_ring_replenish(ringh); | ||
377 | 367 | ||
378 | do { | 368 | do { |
379 | prefetch((char *)dtr + L1_CACHE_BYTES); | 369 | prefetch((char *)dtr + L1_CACHE_BYTES); |
@@ -403,7 +393,6 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr, | |||
403 | 393 | ||
404 | prefetch((char *)skb + L1_CACHE_BYTES); | 394 | prefetch((char *)skb + L1_CACHE_BYTES); |
405 | if (unlikely(t_code)) { | 395 | if (unlikely(t_code)) { |
406 | |||
407 | if (vxge_hw_ring_handle_tcode(ringh, dtr, t_code) != | 396 | if (vxge_hw_ring_handle_tcode(ringh, dtr, t_code) != |
408 | VXGE_HW_OK) { | 397 | VXGE_HW_OK) { |
409 | 398 | ||
@@ -426,9 +415,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr, | |||
426 | } | 415 | } |
427 | 416 | ||
428 | if (pkt_length > VXGE_LL_RX_COPY_THRESHOLD) { | 417 | if (pkt_length > VXGE_LL_RX_COPY_THRESHOLD) { |
429 | |||
430 | if (vxge_rx_alloc(dtr, ring, data_size) != NULL) { | 418 | if (vxge_rx_alloc(dtr, ring, data_size) != NULL) { |
431 | |||
432 | if (!vxge_rx_map(dtr, ring)) { | 419 | if (!vxge_rx_map(dtr, ring)) { |
433 | skb_put(skb, pkt_length); | 420 | skb_put(skb, pkt_length); |
434 | 421 | ||
@@ -496,12 +483,29 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr, | |||
496 | 483 | ||
497 | if ((ext_info.proto & VXGE_HW_FRAME_PROTO_TCP_OR_UDP) && | 484 | if ((ext_info.proto & VXGE_HW_FRAME_PROTO_TCP_OR_UDP) && |
498 | !(ext_info.proto & VXGE_HW_FRAME_PROTO_IP_FRAG) && | 485 | !(ext_info.proto & VXGE_HW_FRAME_PROTO_IP_FRAG) && |
499 | ring->rx_csum && /* Offload Rx side CSUM */ | 486 | (dev->features & NETIF_F_RXCSUM) && /* Offload Rx side CSUM */ |
500 | ext_info.l3_cksum == VXGE_HW_L3_CKSUM_OK && | 487 | ext_info.l3_cksum == VXGE_HW_L3_CKSUM_OK && |
501 | ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK) | 488 | ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK) |
502 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 489 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
503 | else | 490 | else |
504 | skb->ip_summed = CHECKSUM_NONE; | 491 | skb_checksum_none_assert(skb); |
492 | |||
493 | |||
494 | if (ring->rx_hwts) { | ||
495 | struct skb_shared_hwtstamps *skb_hwts; | ||
496 | u32 ns = *(u32 *)(skb->head + pkt_length); | ||
497 | |||
498 | skb_hwts = skb_hwtstamps(skb); | ||
499 | skb_hwts->hwtstamp = ns_to_ktime(ns); | ||
500 | skb_hwts->syststamp.tv64 = 0; | ||
501 | } | ||
502 | |||
503 | /* rth_hash_type and rth_it_hit are non-zero regardless of | ||
504 | * whether rss is enabled. Only the rth_value is zero/non-zero | ||
505 | * if rss is disabled/enabled, so key off of that. | ||
506 | */ | ||
507 | if (ext_info.rth_value) | ||
508 | skb->rxhash = ext_info.rth_value; | ||
505 | 509 | ||
506 | vxge_rx_complete(ring, skb, ext_info.vlan, | 510 | vxge_rx_complete(ring, skb, ext_info.vlan, |
507 | pkt_length, &ext_info); | 511 | pkt_length, &ext_info); |
@@ -531,7 +535,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr, | |||
531 | * freed and frees all skbs whose data have already DMA'ed into the NICs | 535 | * freed and frees all skbs whose data have already DMA'ed into the NICs |
532 | * internal memory. | 536 | * internal memory. |
533 | */ | 537 | */ |
534 | enum vxge_hw_status | 538 | static enum vxge_hw_status |
535 | vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr, | 539 | vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr, |
536 | enum vxge_hw_fifo_tcode t_code, void *userdata, | 540 | enum vxge_hw_fifo_tcode t_code, void *userdata, |
537 | struct sk_buff ***skb_ptr, int nr_skb, int *more) | 541 | struct sk_buff ***skb_ptr, int nr_skb, int *more) |
@@ -650,6 +654,65 @@ static enum vxge_hw_status vxge_search_mac_addr_in_list( | |||
650 | return FALSE; | 654 | return FALSE; |
651 | } | 655 | } |
652 | 656 | ||
657 | static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac) | ||
658 | { | ||
659 | struct vxge_mac_addrs *new_mac_entry; | ||
660 | u8 *mac_address = NULL; | ||
661 | |||
662 | if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT) | ||
663 | return TRUE; | ||
664 | |||
665 | new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC); | ||
666 | if (!new_mac_entry) { | ||
667 | vxge_debug_mem(VXGE_ERR, | ||
668 | "%s: memory allocation failed", | ||
669 | VXGE_DRIVER_NAME); | ||
670 | return FALSE; | ||
671 | } | ||
672 | |||
673 | list_add(&new_mac_entry->item, &vpath->mac_addr_list); | ||
674 | |||
675 | /* Copy the new mac address to the list */ | ||
676 | mac_address = (u8 *)&new_mac_entry->macaddr; | ||
677 | memcpy(mac_address, mac->macaddr, ETH_ALEN); | ||
678 | |||
679 | new_mac_entry->state = mac->state; | ||
680 | vpath->mac_addr_cnt++; | ||
681 | |||
682 | /* Is this a multicast address */ | ||
683 | if (0x01 & mac->macaddr[0]) | ||
684 | vpath->mcast_addr_cnt++; | ||
685 | |||
686 | return TRUE; | ||
687 | } | ||
688 | |||
689 | /* Add a mac address to DA table */ | ||
690 | static enum vxge_hw_status | ||
691 | vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac) | ||
692 | { | ||
693 | enum vxge_hw_status status = VXGE_HW_OK; | ||
694 | struct vxge_vpath *vpath; | ||
695 | enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode; | ||
696 | |||
697 | if (0x01 & mac->macaddr[0]) /* multicast address */ | ||
698 | duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE; | ||
699 | else | ||
700 | duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE; | ||
701 | |||
702 | vpath = &vdev->vpaths[mac->vpath_no]; | ||
703 | status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr, | ||
704 | mac->macmask, duplicate_mode); | ||
705 | if (status != VXGE_HW_OK) { | ||
706 | vxge_debug_init(VXGE_ERR, | ||
707 | "DA config add entry failed for vpath:%d", | ||
708 | vpath->device_id); | ||
709 | } else | ||
710 | if (FALSE == vxge_mac_list_add(vpath, mac)) | ||
711 | status = -EPERM; | ||
712 | |||
713 | return status; | ||
714 | } | ||
715 | |||
653 | static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header) | 716 | static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header) |
654 | { | 717 | { |
655 | struct macInfo mac_info; | 718 | struct macInfo mac_info; |
@@ -660,7 +723,7 @@ static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header) | |||
660 | struct vxge_vpath *vpath = NULL; | 723 | struct vxge_vpath *vpath = NULL; |
661 | struct __vxge_hw_device *hldev; | 724 | struct __vxge_hw_device *hldev; |
662 | 725 | ||
663 | hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev); | 726 | hldev = pci_get_drvdata(vdev->pdev); |
664 | 727 | ||
665 | mac_address = (u8 *)&mac_addr; | 728 | mac_address = (u8 *)&mac_addr; |
666 | memcpy(mac_address, mac_header, ETH_ALEN); | 729 | memcpy(mac_address, mac_header, ETH_ALEN); |
@@ -759,7 +822,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev) | |||
759 | return NETDEV_TX_OK; | 822 | return NETDEV_TX_OK; |
760 | } | 823 | } |
761 | 824 | ||
762 | vdev = (struct vxgedev *)netdev_priv(dev); | 825 | vdev = netdev_priv(dev); |
763 | 826 | ||
764 | if (unlikely(!is_vxge_card_up(vdev))) { | 827 | if (unlikely(!is_vxge_card_up(vdev))) { |
765 | vxge_debug_tx(VXGE_ERR, | 828 | vxge_debug_tx(VXGE_ERR, |
@@ -822,7 +885,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev) | |||
822 | dev->name, __func__, __LINE__, | 885 | dev->name, __func__, __LINE__, |
823 | fifo_hw, dtr, dtr_priv); | 886 | fifo_hw, dtr, dtr_priv); |
824 | 887 | ||
825 | if (vdev->vlgrp && vlan_tx_tag_present(skb)) { | 888 | if (vlan_tx_tag_present(skb)) { |
826 | u16 vlan_tag = vlan_tx_tag_get(skb); | 889 | u16 vlan_tag = vlan_tx_tag_get(skb); |
827 | vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag); | 890 | vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag); |
828 | } | 891 | } |
@@ -995,6 +1058,50 @@ vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata) | |||
995 | "%s:%d Exiting...", __func__, __LINE__); | 1058 | "%s:%d Exiting...", __func__, __LINE__); |
996 | } | 1059 | } |
997 | 1060 | ||
1061 | static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac) | ||
1062 | { | ||
1063 | struct list_head *entry, *next; | ||
1064 | u64 del_mac = 0; | ||
1065 | u8 *mac_address = (u8 *) (&del_mac); | ||
1066 | |||
1067 | /* Copy the mac address to delete from the list */ | ||
1068 | memcpy(mac_address, mac->macaddr, ETH_ALEN); | ||
1069 | |||
1070 | list_for_each_safe(entry, next, &vpath->mac_addr_list) { | ||
1071 | if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) { | ||
1072 | list_del(entry); | ||
1073 | kfree((struct vxge_mac_addrs *)entry); | ||
1074 | vpath->mac_addr_cnt--; | ||
1075 | |||
1076 | /* Is this a multicast address */ | ||
1077 | if (0x01 & mac->macaddr[0]) | ||
1078 | vpath->mcast_addr_cnt--; | ||
1079 | return TRUE; | ||
1080 | } | ||
1081 | } | ||
1082 | |||
1083 | return FALSE; | ||
1084 | } | ||
1085 | |||
1086 | /* delete a mac address from DA table */ | ||
1087 | static enum vxge_hw_status | ||
1088 | vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac) | ||
1089 | { | ||
1090 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1091 | struct vxge_vpath *vpath; | ||
1092 | |||
1093 | vpath = &vdev->vpaths[mac->vpath_no]; | ||
1094 | status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr, | ||
1095 | mac->macmask); | ||
1096 | if (status != VXGE_HW_OK) { | ||
1097 | vxge_debug_init(VXGE_ERR, | ||
1098 | "DA config delete entry failed for vpath:%d", | ||
1099 | vpath->device_id); | ||
1100 | } else | ||
1101 | vxge_mac_list_del(vpath, mac); | ||
1102 | return status; | ||
1103 | } | ||
1104 | |||
998 | /** | 1105 | /** |
999 | * vxge_set_multicast | 1106 | * vxge_set_multicast |
1000 | * @dev: pointer to the device structure | 1107 | * @dev: pointer to the device structure |
@@ -1024,7 +1131,7 @@ static void vxge_set_multicast(struct net_device *dev) | |||
1024 | vxge_debug_entryexit(VXGE_TRACE, | 1131 | vxge_debug_entryexit(VXGE_TRACE, |
1025 | "%s:%d", __func__, __LINE__); | 1132 | "%s:%d", __func__, __LINE__); |
1026 | 1133 | ||
1027 | vdev = (struct vxgedev *)netdev_priv(dev); | 1134 | vdev = netdev_priv(dev); |
1028 | hldev = (struct __vxge_hw_device *)vdev->devh; | 1135 | hldev = (struct __vxge_hw_device *)vdev->devh; |
1029 | 1136 | ||
1030 | if (unlikely(!is_vxge_card_up(vdev))) | 1137 | if (unlikely(!is_vxge_card_up(vdev))) |
@@ -1084,7 +1191,7 @@ static void vxge_set_multicast(struct net_device *dev) | |||
1084 | /* Delete previous MC's */ | 1191 | /* Delete previous MC's */ |
1085 | for (i = 0; i < mcast_cnt; i++) { | 1192 | for (i = 0; i < mcast_cnt; i++) { |
1086 | list_for_each_safe(entry, next, list_head) { | 1193 | list_for_each_safe(entry, next, list_head) { |
1087 | mac_entry = (struct vxge_mac_addrs *) entry; | 1194 | mac_entry = (struct vxge_mac_addrs *)entry; |
1088 | /* Copy the mac address to delete */ | 1195 | /* Copy the mac address to delete */ |
1089 | mac_address = (u8 *)&mac_entry->macaddr; | 1196 | mac_address = (u8 *)&mac_entry->macaddr; |
1090 | memcpy(mac_info.macaddr, mac_address, ETH_ALEN); | 1197 | memcpy(mac_info.macaddr, mac_address, ETH_ALEN); |
@@ -1127,7 +1234,7 @@ _set_all_mcast: | |||
1127 | /* Delete previous MC's */ | 1234 | /* Delete previous MC's */ |
1128 | for (i = 0; i < mcast_cnt; i++) { | 1235 | for (i = 0; i < mcast_cnt; i++) { |
1129 | list_for_each_safe(entry, next, list_head) { | 1236 | list_for_each_safe(entry, next, list_head) { |
1130 | mac_entry = (struct vxge_mac_addrs *) entry; | 1237 | mac_entry = (struct vxge_mac_addrs *)entry; |
1131 | /* Copy the mac address to delete */ | 1238 | /* Copy the mac address to delete */ |
1132 | mac_address = (u8 *)&mac_entry->macaddr; | 1239 | mac_address = (u8 *)&mac_entry->macaddr; |
1133 | memcpy(mac_info.macaddr, mac_address, ETH_ALEN); | 1240 | memcpy(mac_info.macaddr, mac_address, ETH_ALEN); |
@@ -1174,14 +1281,14 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p) | |||
1174 | { | 1281 | { |
1175 | struct sockaddr *addr = p; | 1282 | struct sockaddr *addr = p; |
1176 | struct vxgedev *vdev; | 1283 | struct vxgedev *vdev; |
1177 | struct __vxge_hw_device *hldev; | 1284 | struct __vxge_hw_device *hldev; |
1178 | enum vxge_hw_status status = VXGE_HW_OK; | 1285 | enum vxge_hw_status status = VXGE_HW_OK; |
1179 | struct macInfo mac_info_new, mac_info_old; | 1286 | struct macInfo mac_info_new, mac_info_old; |
1180 | int vpath_idx = 0; | 1287 | int vpath_idx = 0; |
1181 | 1288 | ||
1182 | vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); | 1289 | vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); |
1183 | 1290 | ||
1184 | vdev = (struct vxgedev *)netdev_priv(dev); | 1291 | vdev = netdev_priv(dev); |
1185 | hldev = vdev->devh; | 1292 | hldev = vdev->devh; |
1186 | 1293 | ||
1187 | if (!is_valid_ether_addr(addr->sa_data)) | 1294 | if (!is_valid_ether_addr(addr->sa_data)) |
@@ -1246,7 +1353,7 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p) | |||
1246 | * | 1353 | * |
1247 | * Enables the interrupts for the vpath | 1354 | * Enables the interrupts for the vpath |
1248 | */ | 1355 | */ |
1249 | void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id) | 1356 | static void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id) |
1250 | { | 1357 | { |
1251 | struct vxge_vpath *vpath = &vdev->vpaths[vp_id]; | 1358 | struct vxge_vpath *vpath = &vdev->vpaths[vp_id]; |
1252 | int msix_id = 0; | 1359 | int msix_id = 0; |
@@ -1279,11 +1386,16 @@ void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id) | |||
1279 | * | 1386 | * |
1280 | * Disables the interrupts for the vpath | 1387 | * Disables the interrupts for the vpath |
1281 | */ | 1388 | */ |
1282 | void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id) | 1389 | static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id) |
1283 | { | 1390 | { |
1284 | struct vxge_vpath *vpath = &vdev->vpaths[vp_id]; | 1391 | struct vxge_vpath *vpath = &vdev->vpaths[vp_id]; |
1392 | struct __vxge_hw_device *hldev; | ||
1285 | int msix_id; | 1393 | int msix_id; |
1286 | 1394 | ||
1395 | hldev = pci_get_drvdata(vdev->pdev); | ||
1396 | |||
1397 | vxge_hw_vpath_wait_receive_idle(hldev, vpath->device_id); | ||
1398 | |||
1287 | vxge_hw_vpath_intr_disable(vpath->handle); | 1399 | vxge_hw_vpath_intr_disable(vpath->handle); |
1288 | 1400 | ||
1289 | if (vdev->config.intr_type == INTA) | 1401 | if (vdev->config.intr_type == INTA) |
@@ -1300,6 +1412,95 @@ void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id) | |||
1300 | } | 1412 | } |
1301 | } | 1413 | } |
1302 | 1414 | ||
1415 | /* list all mac addresses from DA table */ | ||
1416 | static enum vxge_hw_status | ||
1417 | vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath, struct macInfo *mac) | ||
1418 | { | ||
1419 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1420 | unsigned char macmask[ETH_ALEN]; | ||
1421 | unsigned char macaddr[ETH_ALEN]; | ||
1422 | |||
1423 | status = vxge_hw_vpath_mac_addr_get(vpath->handle, | ||
1424 | macaddr, macmask); | ||
1425 | if (status != VXGE_HW_OK) { | ||
1426 | vxge_debug_init(VXGE_ERR, | ||
1427 | "DA config list entry failed for vpath:%d", | ||
1428 | vpath->device_id); | ||
1429 | return status; | ||
1430 | } | ||
1431 | |||
1432 | while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) { | ||
1433 | status = vxge_hw_vpath_mac_addr_get_next(vpath->handle, | ||
1434 | macaddr, macmask); | ||
1435 | if (status != VXGE_HW_OK) | ||
1436 | break; | ||
1437 | } | ||
1438 | |||
1439 | return status; | ||
1440 | } | ||
1441 | |||
1442 | /* Store all mac addresses from the list to the DA table */ | ||
1443 | static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath) | ||
1444 | { | ||
1445 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1446 | struct macInfo mac_info; | ||
1447 | u8 *mac_address = NULL; | ||
1448 | struct list_head *entry, *next; | ||
1449 | |||
1450 | memset(&mac_info, 0, sizeof(struct macInfo)); | ||
1451 | |||
1452 | if (vpath->is_open) { | ||
1453 | list_for_each_safe(entry, next, &vpath->mac_addr_list) { | ||
1454 | mac_address = | ||
1455 | (u8 *)& | ||
1456 | ((struct vxge_mac_addrs *)entry)->macaddr; | ||
1457 | memcpy(mac_info.macaddr, mac_address, ETH_ALEN); | ||
1458 | ((struct vxge_mac_addrs *)entry)->state = | ||
1459 | VXGE_LL_MAC_ADDR_IN_DA_TABLE; | ||
1460 | /* does this mac address already exist in da table? */ | ||
1461 | status = vxge_search_mac_addr_in_da_table(vpath, | ||
1462 | &mac_info); | ||
1463 | if (status != VXGE_HW_OK) { | ||
1464 | /* Add this mac address to the DA table */ | ||
1465 | status = vxge_hw_vpath_mac_addr_add( | ||
1466 | vpath->handle, mac_info.macaddr, | ||
1467 | mac_info.macmask, | ||
1468 | VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE); | ||
1469 | if (status != VXGE_HW_OK) { | ||
1470 | vxge_debug_init(VXGE_ERR, | ||
1471 | "DA add entry failed for vpath:%d", | ||
1472 | vpath->device_id); | ||
1473 | ((struct vxge_mac_addrs *)entry)->state | ||
1474 | = VXGE_LL_MAC_ADDR_IN_LIST; | ||
1475 | } | ||
1476 | } | ||
1477 | } | ||
1478 | } | ||
1479 | |||
1480 | return status; | ||
1481 | } | ||
1482 | |||
1483 | /* Store all vlan ids from the list to the vid table */ | ||
1484 | static enum vxge_hw_status | ||
1485 | vxge_restore_vpath_vid_table(struct vxge_vpath *vpath) | ||
1486 | { | ||
1487 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1488 | struct vxgedev *vdev = vpath->vdev; | ||
1489 | u16 vid; | ||
1490 | |||
1491 | if (vdev->vlgrp && vpath->is_open) { | ||
1492 | |||
1493 | for (vid = 0; vid < VLAN_N_VID; vid++) { | ||
1494 | if (!vlan_group_get_device(vdev->vlgrp, vid)) | ||
1495 | continue; | ||
1496 | /* Add these vlan to the vid table */ | ||
1497 | status = vxge_hw_vpath_vid_add(vpath->handle, vid); | ||
1498 | } | ||
1499 | } | ||
1500 | |||
1501 | return status; | ||
1502 | } | ||
1503 | |||
1303 | /* | 1504 | /* |
1304 | * vxge_reset_vpath | 1505 | * vxge_reset_vpath |
1305 | * @vdev: pointer to vdev | 1506 | * @vdev: pointer to vdev |
@@ -1377,6 +1578,36 @@ static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id) | |||
1377 | return ret; | 1578 | return ret; |
1378 | } | 1579 | } |
1379 | 1580 | ||
1581 | /* Configure CI */ | ||
1582 | static void vxge_config_ci_for_tti_rti(struct vxgedev *vdev) | ||
1583 | { | ||
1584 | int i = 0; | ||
1585 | |||
1586 | /* Enable CI for RTI */ | ||
1587 | if (vdev->config.intr_type == MSI_X) { | ||
1588 | for (i = 0; i < vdev->no_of_vpath; i++) { | ||
1589 | struct __vxge_hw_ring *hw_ring; | ||
1590 | |||
1591 | hw_ring = vdev->vpaths[i].ring.handle; | ||
1592 | vxge_hw_vpath_dynamic_rti_ci_set(hw_ring); | ||
1593 | } | ||
1594 | } | ||
1595 | |||
1596 | /* Enable CI for TTI */ | ||
1597 | for (i = 0; i < vdev->no_of_vpath; i++) { | ||
1598 | struct __vxge_hw_fifo *hw_fifo = vdev->vpaths[i].fifo.handle; | ||
1599 | vxge_hw_vpath_tti_ci_set(hw_fifo); | ||
1600 | /* | ||
1601 | * For Inta (with or without napi), Set CI ON for only one | ||
1602 | * vpath. (Have only one free running timer). | ||
1603 | */ | ||
1604 | if ((vdev->config.intr_type == INTA) && (i == 0)) | ||
1605 | break; | ||
1606 | } | ||
1607 | |||
1608 | return; | ||
1609 | } | ||
1610 | |||
1380 | static int do_vxge_reset(struct vxgedev *vdev, int event) | 1611 | static int do_vxge_reset(struct vxgedev *vdev, int event) |
1381 | { | 1612 | { |
1382 | enum vxge_hw_status status; | 1613 | enum vxge_hw_status status; |
@@ -1395,12 +1626,16 @@ static int do_vxge_reset(struct vxgedev *vdev, int event) | |||
1395 | } | 1626 | } |
1396 | 1627 | ||
1397 | if (event == VXGE_LL_FULL_RESET) { | 1628 | if (event == VXGE_LL_FULL_RESET) { |
1629 | netif_carrier_off(vdev->ndev); | ||
1630 | |||
1398 | /* wait for all the vpath reset to complete */ | 1631 | /* wait for all the vpath reset to complete */ |
1399 | for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) { | 1632 | for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) { |
1400 | while (test_bit(vp_id, &vdev->vp_reset)) | 1633 | while (test_bit(vp_id, &vdev->vp_reset)) |
1401 | msleep(50); | 1634 | msleep(50); |
1402 | } | 1635 | } |
1403 | 1636 | ||
1637 | netif_carrier_on(vdev->ndev); | ||
1638 | |||
1404 | /* if execution mode is set to debug, don't reset the adapter */ | 1639 | /* if execution mode is set to debug, don't reset the adapter */ |
1405 | if (unlikely(vdev->exec_mode)) { | 1640 | if (unlikely(vdev->exec_mode)) { |
1406 | vxge_debug_init(VXGE_ERR, | 1641 | vxge_debug_init(VXGE_ERR, |
@@ -1413,6 +1648,7 @@ static int do_vxge_reset(struct vxgedev *vdev, int event) | |||
1413 | } | 1648 | } |
1414 | 1649 | ||
1415 | if (event == VXGE_LL_FULL_RESET) { | 1650 | if (event == VXGE_LL_FULL_RESET) { |
1651 | vxge_hw_device_wait_receive_idle(vdev->devh); | ||
1416 | vxge_hw_device_intr_disable(vdev->devh); | 1652 | vxge_hw_device_intr_disable(vdev->devh); |
1417 | 1653 | ||
1418 | switch (vdev->cric_err_event) { | 1654 | switch (vdev->cric_err_event) { |
@@ -1537,6 +1773,9 @@ static int do_vxge_reset(struct vxgedev *vdev, int event) | |||
1537 | netif_tx_wake_all_queues(vdev->ndev); | 1773 | netif_tx_wake_all_queues(vdev->ndev); |
1538 | } | 1774 | } |
1539 | 1775 | ||
1776 | /* configure CI */ | ||
1777 | vxge_config_ci_for_tti_rti(vdev); | ||
1778 | |||
1540 | out: | 1779 | out: |
1541 | vxge_debug_entryexit(VXGE_TRACE, | 1780 | vxge_debug_entryexit(VXGE_TRACE, |
1542 | "%s:%d Exiting...", __func__, __LINE__); | 1781 | "%s:%d Exiting...", __func__, __LINE__); |
@@ -1553,9 +1792,14 @@ out: | |||
1553 | * | 1792 | * |
1554 | * driver may reset the chip on events of serr, eccerr, etc | 1793 | * driver may reset the chip on events of serr, eccerr, etc |
1555 | */ | 1794 | */ |
1556 | int vxge_reset(struct vxgedev *vdev) | 1795 | static void vxge_reset(struct work_struct *work) |
1557 | { | 1796 | { |
1558 | return do_vxge_reset(vdev, VXGE_LL_FULL_RESET); | 1797 | struct vxgedev *vdev = container_of(work, struct vxgedev, reset_task); |
1798 | |||
1799 | if (!netif_running(vdev->ndev)) | ||
1800 | return; | ||
1801 | |||
1802 | do_vxge_reset(vdev, VXGE_LL_FULL_RESET); | ||
1559 | } | 1803 | } |
1560 | 1804 | ||
1561 | /** | 1805 | /** |
@@ -1572,22 +1816,29 @@ int vxge_reset(struct vxgedev *vdev) | |||
1572 | */ | 1816 | */ |
1573 | static int vxge_poll_msix(struct napi_struct *napi, int budget) | 1817 | static int vxge_poll_msix(struct napi_struct *napi, int budget) |
1574 | { | 1818 | { |
1575 | struct vxge_ring *ring = | 1819 | struct vxge_ring *ring = container_of(napi, struct vxge_ring, napi); |
1576 | container_of(napi, struct vxge_ring, napi); | 1820 | int pkts_processed; |
1577 | int budget_org = budget; | 1821 | int budget_org = budget; |
1578 | ring->budget = budget; | ||
1579 | 1822 | ||
1823 | ring->budget = budget; | ||
1824 | ring->pkts_processed = 0; | ||
1580 | vxge_hw_vpath_poll_rx(ring->handle); | 1825 | vxge_hw_vpath_poll_rx(ring->handle); |
1826 | pkts_processed = ring->pkts_processed; | ||
1581 | 1827 | ||
1582 | if (ring->pkts_processed < budget_org) { | 1828 | if (ring->pkts_processed < budget_org) { |
1583 | napi_complete(napi); | 1829 | napi_complete(napi); |
1830 | |||
1584 | /* Re enable the Rx interrupts for the vpath */ | 1831 | /* Re enable the Rx interrupts for the vpath */ |
1585 | vxge_hw_channel_msix_unmask( | 1832 | vxge_hw_channel_msix_unmask( |
1586 | (struct __vxge_hw_channel *)ring->handle, | 1833 | (struct __vxge_hw_channel *)ring->handle, |
1587 | ring->rx_vector_no); | 1834 | ring->rx_vector_no); |
1835 | mmiowb(); | ||
1588 | } | 1836 | } |
1589 | 1837 | ||
1590 | return ring->pkts_processed; | 1838 | /* We are copying and returning the local variable, in case if after |
1839 | * clearing the msix interrupt above, if the interrupt fires right | ||
1840 | * away which can preempt this NAPI thread */ | ||
1841 | return pkts_processed; | ||
1591 | } | 1842 | } |
1592 | 1843 | ||
1593 | static int vxge_poll_inta(struct napi_struct *napi, int budget) | 1844 | static int vxge_poll_inta(struct napi_struct *napi, int budget) |
@@ -1598,12 +1849,12 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget) | |||
1598 | int budget_org = budget; | 1849 | int budget_org = budget; |
1599 | struct vxge_ring *ring; | 1850 | struct vxge_ring *ring; |
1600 | 1851 | ||
1601 | struct __vxge_hw_device *hldev = (struct __vxge_hw_device *) | 1852 | struct __vxge_hw_device *hldev = pci_get_drvdata(vdev->pdev); |
1602 | pci_get_drvdata(vdev->pdev); | ||
1603 | 1853 | ||
1604 | for (i = 0; i < vdev->no_of_vpath; i++) { | 1854 | for (i = 0; i < vdev->no_of_vpath; i++) { |
1605 | ring = &vdev->vpaths[i].ring; | 1855 | ring = &vdev->vpaths[i].ring; |
1606 | ring->budget = budget; | 1856 | ring->budget = budget; |
1857 | ring->pkts_processed = 0; | ||
1607 | vxge_hw_vpath_poll_rx(ring->handle); | 1858 | vxge_hw_vpath_poll_rx(ring->handle); |
1608 | pkts_processed += ring->pkts_processed; | 1859 | pkts_processed += ring->pkts_processed; |
1609 | budget -= ring->pkts_processed; | 1860 | budget -= ring->pkts_processed; |
@@ -1635,11 +1886,11 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget) | |||
1635 | */ | 1886 | */ |
1636 | static void vxge_netpoll(struct net_device *dev) | 1887 | static void vxge_netpoll(struct net_device *dev) |
1637 | { | 1888 | { |
1638 | struct __vxge_hw_device *hldev; | 1889 | struct __vxge_hw_device *hldev; |
1639 | struct vxgedev *vdev; | 1890 | struct vxgedev *vdev; |
1640 | 1891 | ||
1641 | vdev = (struct vxgedev *)netdev_priv(dev); | 1892 | vdev = netdev_priv(dev); |
1642 | hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev); | 1893 | hldev = pci_get_drvdata(vdev->pdev); |
1643 | 1894 | ||
1644 | vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); | 1895 | vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); |
1645 | 1896 | ||
@@ -1679,15 +1930,6 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev) | |||
1679 | mtable[index] = index % vdev->no_of_vpath; | 1930 | mtable[index] = index % vdev->no_of_vpath; |
1680 | } | 1931 | } |
1681 | 1932 | ||
1682 | /* Fill RTH hash types */ | ||
1683 | hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4; | ||
1684 | hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4; | ||
1685 | hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6; | ||
1686 | hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6; | ||
1687 | hash_types.hash_type_tcpipv6ex_en = | ||
1688 | vdev->config.rth_hash_type_tcpipv6ex; | ||
1689 | hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex; | ||
1690 | |||
1691 | /* set indirection table, bucket-to-vpath mapping */ | 1933 | /* set indirection table, bucket-to-vpath mapping */ |
1692 | status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles, | 1934 | status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles, |
1693 | vdev->no_of_vpath, | 1935 | vdev->no_of_vpath, |
@@ -1700,19 +1942,27 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev) | |||
1700 | return status; | 1942 | return status; |
1701 | } | 1943 | } |
1702 | 1944 | ||
1945 | /* Fill RTH hash types */ | ||
1946 | hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4; | ||
1947 | hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4; | ||
1948 | hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6; | ||
1949 | hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6; | ||
1950 | hash_types.hash_type_tcpipv6ex_en = | ||
1951 | vdev->config.rth_hash_type_tcpipv6ex; | ||
1952 | hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex; | ||
1953 | |||
1703 | /* | 1954 | /* |
1704 | * Because the itable_set() method uses the active_table field | 1955 | * Because the itable_set() method uses the active_table field |
1705 | * for the target virtual path the RTH config should be updated | 1956 | * for the target virtual path the RTH config should be updated |
1706 | * for all VPATHs. The h/w only uses the lowest numbered VPATH | 1957 | * for all VPATHs. The h/w only uses the lowest numbered VPATH |
1707 | * when steering frames. | 1958 | * when steering frames. |
1708 | */ | 1959 | */ |
1709 | for (index = 0; index < vdev->no_of_vpath; index++) { | 1960 | for (index = 0; index < vdev->no_of_vpath; index++) { |
1710 | status = vxge_hw_vpath_rts_rth_set( | 1961 | status = vxge_hw_vpath_rts_rth_set( |
1711 | vdev->vpaths[index].handle, | 1962 | vdev->vpaths[index].handle, |
1712 | vdev->config.rth_algorithm, | 1963 | vdev->config.rth_algorithm, |
1713 | &hash_types, | 1964 | &hash_types, |
1714 | vdev->config.rth_bkt_sz); | 1965 | vdev->config.rth_bkt_sz); |
1715 | |||
1716 | if (status != VXGE_HW_OK) { | 1966 | if (status != VXGE_HW_OK) { |
1717 | vxge_debug_init(VXGE_ERR, | 1967 | vxge_debug_init(VXGE_ERR, |
1718 | "RTH configuration failed for vpath:%d", | 1968 | "RTH configuration failed for vpath:%d", |
@@ -1724,197 +1974,6 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev) | |||
1724 | return status; | 1974 | return status; |
1725 | } | 1975 | } |
1726 | 1976 | ||
1727 | int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac) | ||
1728 | { | ||
1729 | struct vxge_mac_addrs *new_mac_entry; | ||
1730 | u8 *mac_address = NULL; | ||
1731 | |||
1732 | if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT) | ||
1733 | return TRUE; | ||
1734 | |||
1735 | new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC); | ||
1736 | if (!new_mac_entry) { | ||
1737 | vxge_debug_mem(VXGE_ERR, | ||
1738 | "%s: memory allocation failed", | ||
1739 | VXGE_DRIVER_NAME); | ||
1740 | return FALSE; | ||
1741 | } | ||
1742 | |||
1743 | list_add(&new_mac_entry->item, &vpath->mac_addr_list); | ||
1744 | |||
1745 | /* Copy the new mac address to the list */ | ||
1746 | mac_address = (u8 *)&new_mac_entry->macaddr; | ||
1747 | memcpy(mac_address, mac->macaddr, ETH_ALEN); | ||
1748 | |||
1749 | new_mac_entry->state = mac->state; | ||
1750 | vpath->mac_addr_cnt++; | ||
1751 | |||
1752 | /* Is this a multicast address */ | ||
1753 | if (0x01 & mac->macaddr[0]) | ||
1754 | vpath->mcast_addr_cnt++; | ||
1755 | |||
1756 | return TRUE; | ||
1757 | } | ||
1758 | |||
1759 | /* Add a mac address to DA table */ | ||
1760 | enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac) | ||
1761 | { | ||
1762 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1763 | struct vxge_vpath *vpath; | ||
1764 | enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode; | ||
1765 | |||
1766 | if (0x01 & mac->macaddr[0]) /* multicast address */ | ||
1767 | duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE; | ||
1768 | else | ||
1769 | duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE; | ||
1770 | |||
1771 | vpath = &vdev->vpaths[mac->vpath_no]; | ||
1772 | status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr, | ||
1773 | mac->macmask, duplicate_mode); | ||
1774 | if (status != VXGE_HW_OK) { | ||
1775 | vxge_debug_init(VXGE_ERR, | ||
1776 | "DA config add entry failed for vpath:%d", | ||
1777 | vpath->device_id); | ||
1778 | } else | ||
1779 | if (FALSE == vxge_mac_list_add(vpath, mac)) | ||
1780 | status = -EPERM; | ||
1781 | |||
1782 | return status; | ||
1783 | } | ||
1784 | |||
1785 | int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac) | ||
1786 | { | ||
1787 | struct list_head *entry, *next; | ||
1788 | u64 del_mac = 0; | ||
1789 | u8 *mac_address = (u8 *) (&del_mac); | ||
1790 | |||
1791 | /* Copy the mac address to delete from the list */ | ||
1792 | memcpy(mac_address, mac->macaddr, ETH_ALEN); | ||
1793 | |||
1794 | list_for_each_safe(entry, next, &vpath->mac_addr_list) { | ||
1795 | if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) { | ||
1796 | list_del(entry); | ||
1797 | kfree((struct vxge_mac_addrs *)entry); | ||
1798 | vpath->mac_addr_cnt--; | ||
1799 | |||
1800 | /* Is this a multicast address */ | ||
1801 | if (0x01 & mac->macaddr[0]) | ||
1802 | vpath->mcast_addr_cnt--; | ||
1803 | return TRUE; | ||
1804 | } | ||
1805 | } | ||
1806 | |||
1807 | return FALSE; | ||
1808 | } | ||
1809 | /* delete a mac address from DA table */ | ||
1810 | enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac) | ||
1811 | { | ||
1812 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1813 | struct vxge_vpath *vpath; | ||
1814 | |||
1815 | vpath = &vdev->vpaths[mac->vpath_no]; | ||
1816 | status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr, | ||
1817 | mac->macmask); | ||
1818 | if (status != VXGE_HW_OK) { | ||
1819 | vxge_debug_init(VXGE_ERR, | ||
1820 | "DA config delete entry failed for vpath:%d", | ||
1821 | vpath->device_id); | ||
1822 | } else | ||
1823 | vxge_mac_list_del(vpath, mac); | ||
1824 | return status; | ||
1825 | } | ||
1826 | |||
1827 | /* list all mac addresses from DA table */ | ||
1828 | enum vxge_hw_status | ||
1829 | static vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath, | ||
1830 | struct macInfo *mac) | ||
1831 | { | ||
1832 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1833 | unsigned char macmask[ETH_ALEN]; | ||
1834 | unsigned char macaddr[ETH_ALEN]; | ||
1835 | |||
1836 | status = vxge_hw_vpath_mac_addr_get(vpath->handle, | ||
1837 | macaddr, macmask); | ||
1838 | if (status != VXGE_HW_OK) { | ||
1839 | vxge_debug_init(VXGE_ERR, | ||
1840 | "DA config list entry failed for vpath:%d", | ||
1841 | vpath->device_id); | ||
1842 | return status; | ||
1843 | } | ||
1844 | |||
1845 | while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) { | ||
1846 | |||
1847 | status = vxge_hw_vpath_mac_addr_get_next(vpath->handle, | ||
1848 | macaddr, macmask); | ||
1849 | if (status != VXGE_HW_OK) | ||
1850 | break; | ||
1851 | } | ||
1852 | |||
1853 | return status; | ||
1854 | } | ||
1855 | |||
1856 | /* Store all vlan ids from the list to the vid table */ | ||
1857 | enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath) | ||
1858 | { | ||
1859 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1860 | struct vxgedev *vdev = vpath->vdev; | ||
1861 | u16 vid; | ||
1862 | |||
1863 | if (vdev->vlgrp && vpath->is_open) { | ||
1864 | |||
1865 | for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { | ||
1866 | if (!vlan_group_get_device(vdev->vlgrp, vid)) | ||
1867 | continue; | ||
1868 | /* Add these vlan to the vid table */ | ||
1869 | status = vxge_hw_vpath_vid_add(vpath->handle, vid); | ||
1870 | } | ||
1871 | } | ||
1872 | |||
1873 | return status; | ||
1874 | } | ||
1875 | |||
1876 | /* Store all mac addresses from the list to the DA table */ | ||
1877 | enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath) | ||
1878 | { | ||
1879 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1880 | struct macInfo mac_info; | ||
1881 | u8 *mac_address = NULL; | ||
1882 | struct list_head *entry, *next; | ||
1883 | |||
1884 | memset(&mac_info, 0, sizeof(struct macInfo)); | ||
1885 | |||
1886 | if (vpath->is_open) { | ||
1887 | |||
1888 | list_for_each_safe(entry, next, &vpath->mac_addr_list) { | ||
1889 | mac_address = | ||
1890 | (u8 *)& | ||
1891 | ((struct vxge_mac_addrs *)entry)->macaddr; | ||
1892 | memcpy(mac_info.macaddr, mac_address, ETH_ALEN); | ||
1893 | ((struct vxge_mac_addrs *)entry)->state = | ||
1894 | VXGE_LL_MAC_ADDR_IN_DA_TABLE; | ||
1895 | /* does this mac address already exist in da table? */ | ||
1896 | status = vxge_search_mac_addr_in_da_table(vpath, | ||
1897 | &mac_info); | ||
1898 | if (status != VXGE_HW_OK) { | ||
1899 | /* Add this mac address to the DA table */ | ||
1900 | status = vxge_hw_vpath_mac_addr_add( | ||
1901 | vpath->handle, mac_info.macaddr, | ||
1902 | mac_info.macmask, | ||
1903 | VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE); | ||
1904 | if (status != VXGE_HW_OK) { | ||
1905 | vxge_debug_init(VXGE_ERR, | ||
1906 | "DA add entry failed for vpath:%d", | ||
1907 | vpath->device_id); | ||
1908 | ((struct vxge_mac_addrs *)entry)->state | ||
1909 | = VXGE_LL_MAC_ADDR_IN_LIST; | ||
1910 | } | ||
1911 | } | ||
1912 | } | ||
1913 | } | ||
1914 | |||
1915 | return status; | ||
1916 | } | ||
1917 | |||
1918 | /* reset vpaths */ | 1977 | /* reset vpaths */ |
1919 | enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev) | 1978 | enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev) |
1920 | { | 1979 | { |
@@ -1948,7 +2007,7 @@ enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev) | |||
1948 | } | 2007 | } |
1949 | 2008 | ||
1950 | /* close vpaths */ | 2009 | /* close vpaths */ |
1951 | void vxge_close_vpaths(struct vxgedev *vdev, int index) | 2010 | static void vxge_close_vpaths(struct vxgedev *vdev, int index) |
1952 | { | 2011 | { |
1953 | struct vxge_vpath *vpath; | 2012 | struct vxge_vpath *vpath; |
1954 | int i; | 2013 | int i; |
@@ -1966,7 +2025,7 @@ void vxge_close_vpaths(struct vxgedev *vdev, int index) | |||
1966 | } | 2025 | } |
1967 | 2026 | ||
1968 | /* open vpaths */ | 2027 | /* open vpaths */ |
1969 | int vxge_open_vpaths(struct vxgedev *vdev) | 2028 | static int vxge_open_vpaths(struct vxgedev *vdev) |
1970 | { | 2029 | { |
1971 | struct vxge_hw_vpath_attr attr; | 2030 | struct vxge_hw_vpath_attr attr; |
1972 | enum vxge_hw_status status; | 2031 | enum vxge_hw_status status; |
@@ -1976,8 +2035,23 @@ int vxge_open_vpaths(struct vxgedev *vdev) | |||
1976 | 2035 | ||
1977 | for (i = 0; i < vdev->no_of_vpath; i++) { | 2036 | for (i = 0; i < vdev->no_of_vpath; i++) { |
1978 | vpath = &vdev->vpaths[i]; | 2037 | vpath = &vdev->vpaths[i]; |
1979 | |||
1980 | vxge_assert(vpath->is_configured); | 2038 | vxge_assert(vpath->is_configured); |
2039 | |||
2040 | if (!vdev->titan1) { | ||
2041 | struct vxge_hw_vp_config *vcfg; | ||
2042 | vcfg = &vdev->devh->config.vp_config[vpath->device_id]; | ||
2043 | |||
2044 | vcfg->rti.urange_a = RTI_T1A_RX_URANGE_A; | ||
2045 | vcfg->rti.urange_b = RTI_T1A_RX_URANGE_B; | ||
2046 | vcfg->rti.urange_c = RTI_T1A_RX_URANGE_C; | ||
2047 | vcfg->tti.uec_a = TTI_T1A_TX_UFC_A; | ||
2048 | vcfg->tti.uec_b = TTI_T1A_TX_UFC_B; | ||
2049 | vcfg->tti.uec_c = TTI_T1A_TX_UFC_C(vdev->mtu); | ||
2050 | vcfg->tti.uec_d = TTI_T1A_TX_UFC_D(vdev->mtu); | ||
2051 | vcfg->tti.ltimer_val = VXGE_T1A_TTI_LTIMER_VAL; | ||
2052 | vcfg->tti.rtimer_val = VXGE_T1A_TTI_RTIMER_VAL; | ||
2053 | } | ||
2054 | |||
1981 | attr.vp_id = vpath->device_id; | 2055 | attr.vp_id = vpath->device_id; |
1982 | attr.fifo_attr.callback = vxge_xmit_compl; | 2056 | attr.fifo_attr.callback = vxge_xmit_compl; |
1983 | attr.fifo_attr.txdl_term = vxge_tx_term; | 2057 | attr.fifo_attr.txdl_term = vxge_tx_term; |
@@ -1992,6 +2066,7 @@ int vxge_open_vpaths(struct vxgedev *vdev) | |||
1992 | 2066 | ||
1993 | vpath->ring.ndev = vdev->ndev; | 2067 | vpath->ring.ndev = vdev->ndev; |
1994 | vpath->ring.pdev = vdev->pdev; | 2068 | vpath->ring.pdev = vdev->pdev; |
2069 | |||
1995 | status = vxge_hw_vpath_open(vdev->devh, &attr, &vpath->handle); | 2070 | status = vxge_hw_vpath_open(vdev->devh, &attr, &vpath->handle); |
1996 | if (status == VXGE_HW_OK) { | 2071 | if (status == VXGE_HW_OK) { |
1997 | vpath->fifo.handle = | 2072 | vpath->fifo.handle = |
@@ -2010,20 +2085,19 @@ int vxge_open_vpaths(struct vxgedev *vdev) | |||
2010 | netdev_get_tx_queue(vdev->ndev, 0); | 2085 | netdev_get_tx_queue(vdev->ndev, 0); |
2011 | vpath->fifo.indicate_max_pkts = | 2086 | vpath->fifo.indicate_max_pkts = |
2012 | vdev->config.fifo_indicate_max_pkts; | 2087 | vdev->config.fifo_indicate_max_pkts; |
2088 | vpath->fifo.tx_vector_no = 0; | ||
2013 | vpath->ring.rx_vector_no = 0; | 2089 | vpath->ring.rx_vector_no = 0; |
2014 | vpath->ring.rx_csum = vdev->rx_csum; | 2090 | vpath->ring.rx_hwts = vdev->rx_hwts; |
2015 | vpath->is_open = 1; | 2091 | vpath->is_open = 1; |
2016 | vdev->vp_handles[i] = vpath->handle; | 2092 | vdev->vp_handles[i] = vpath->handle; |
2017 | vpath->ring.gro_enable = vdev->config.gro_enable; | ||
2018 | vpath->ring.vlan_tag_strip = vdev->vlan_tag_strip; | 2093 | vpath->ring.vlan_tag_strip = vdev->vlan_tag_strip; |
2019 | vdev->stats.vpaths_open++; | 2094 | vdev->stats.vpaths_open++; |
2020 | } else { | 2095 | } else { |
2021 | vdev->stats.vpath_open_fail++; | 2096 | vdev->stats.vpath_open_fail++; |
2022 | vxge_debug_init(VXGE_ERR, | 2097 | vxge_debug_init(VXGE_ERR, "%s: vpath: %d failed to " |
2023 | "%s: vpath: %d failed to open " | 2098 | "open with status: %d", |
2024 | "with status: %d", | 2099 | vdev->ndev->name, vpath->device_id, |
2025 | vdev->ndev->name, vpath->device_id, | 2100 | status); |
2026 | status); | ||
2027 | vxge_close_vpaths(vdev, 0); | 2101 | vxge_close_vpaths(vdev, 0); |
2028 | return -EPERM; | 2102 | return -EPERM; |
2029 | } | 2103 | } |
@@ -2031,9 +2105,65 @@ int vxge_open_vpaths(struct vxgedev *vdev) | |||
2031 | vp_id = vpath->handle->vpath->vp_id; | 2105 | vp_id = vpath->handle->vpath->vp_id; |
2032 | vdev->vpaths_deployed |= vxge_mBIT(vp_id); | 2106 | vdev->vpaths_deployed |= vxge_mBIT(vp_id); |
2033 | } | 2107 | } |
2108 | |||
2034 | return VXGE_HW_OK; | 2109 | return VXGE_HW_OK; |
2035 | } | 2110 | } |
2036 | 2111 | ||
2112 | /** | ||
2113 | * adaptive_coalesce_tx_interrupts - Changes the interrupt coalescing | ||
2114 | * if the interrupts are not within a range | ||
2115 | * @fifo: pointer to transmit fifo structure | ||
2116 | * Description: The function changes boundary timer and restriction timer | ||
2117 | * value depends on the traffic | ||
2118 | * Return Value: None | ||
2119 | */ | ||
2120 | static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo) | ||
2121 | { | ||
2122 | fifo->interrupt_count++; | ||
2123 | if (jiffies > fifo->jiffies + HZ / 100) { | ||
2124 | struct __vxge_hw_fifo *hw_fifo = fifo->handle; | ||
2125 | |||
2126 | fifo->jiffies = jiffies; | ||
2127 | if (fifo->interrupt_count > VXGE_T1A_MAX_TX_INTERRUPT_COUNT && | ||
2128 | hw_fifo->rtimer != VXGE_TTI_RTIMER_ADAPT_VAL) { | ||
2129 | hw_fifo->rtimer = VXGE_TTI_RTIMER_ADAPT_VAL; | ||
2130 | vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo); | ||
2131 | } else if (hw_fifo->rtimer != 0) { | ||
2132 | hw_fifo->rtimer = 0; | ||
2133 | vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo); | ||
2134 | } | ||
2135 | fifo->interrupt_count = 0; | ||
2136 | } | ||
2137 | } | ||
2138 | |||
2139 | /** | ||
2140 | * adaptive_coalesce_rx_interrupts - Changes the interrupt coalescing | ||
2141 | * if the interrupts are not within a range | ||
2142 | * @ring: pointer to receive ring structure | ||
2143 | * Description: The function increases of decreases the packet counts within | ||
2144 | * the ranges of traffic utilization, if the interrupts due to this ring are | ||
2145 | * not within a fixed range. | ||
2146 | * Return Value: Nothing | ||
2147 | */ | ||
2148 | static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring) | ||
2149 | { | ||
2150 | ring->interrupt_count++; | ||
2151 | if (jiffies > ring->jiffies + HZ / 100) { | ||
2152 | struct __vxge_hw_ring *hw_ring = ring->handle; | ||
2153 | |||
2154 | ring->jiffies = jiffies; | ||
2155 | if (ring->interrupt_count > VXGE_T1A_MAX_INTERRUPT_COUNT && | ||
2156 | hw_ring->rtimer != VXGE_RTI_RTIMER_ADAPT_VAL) { | ||
2157 | hw_ring->rtimer = VXGE_RTI_RTIMER_ADAPT_VAL; | ||
2158 | vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring); | ||
2159 | } else if (hw_ring->rtimer != 0) { | ||
2160 | hw_ring->rtimer = 0; | ||
2161 | vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring); | ||
2162 | } | ||
2163 | ring->interrupt_count = 0; | ||
2164 | } | ||
2165 | } | ||
2166 | |||
2037 | /* | 2167 | /* |
2038 | * vxge_isr_napi | 2168 | * vxge_isr_napi |
2039 | * @irq: the irq of the device. | 2169 | * @irq: the irq of the device. |
@@ -2050,21 +2180,20 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id) | |||
2050 | struct __vxge_hw_device *hldev; | 2180 | struct __vxge_hw_device *hldev; |
2051 | u64 reason; | 2181 | u64 reason; |
2052 | enum vxge_hw_status status; | 2182 | enum vxge_hw_status status; |
2053 | struct vxgedev *vdev = (struct vxgedev *) dev_id;; | 2183 | struct vxgedev *vdev = (struct vxgedev *)dev_id; |
2054 | 2184 | ||
2055 | vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__); | 2185 | vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__); |
2056 | 2186 | ||
2057 | dev = vdev->ndev; | 2187 | dev = vdev->ndev; |
2058 | hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev); | 2188 | hldev = pci_get_drvdata(vdev->pdev); |
2059 | 2189 | ||
2060 | if (pci_channel_offline(vdev->pdev)) | 2190 | if (pci_channel_offline(vdev->pdev)) |
2061 | return IRQ_NONE; | 2191 | return IRQ_NONE; |
2062 | 2192 | ||
2063 | if (unlikely(!is_vxge_card_up(vdev))) | 2193 | if (unlikely(!is_vxge_card_up(vdev))) |
2064 | return IRQ_NONE; | 2194 | return IRQ_HANDLED; |
2065 | 2195 | ||
2066 | status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode, | 2196 | status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode, &reason); |
2067 | &reason); | ||
2068 | if (status == VXGE_HW_OK) { | 2197 | if (status == VXGE_HW_OK) { |
2069 | vxge_hw_device_mask_all(hldev); | 2198 | vxge_hw_device_mask_all(hldev); |
2070 | 2199 | ||
@@ -2095,24 +2224,39 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id) | |||
2095 | 2224 | ||
2096 | #ifdef CONFIG_PCI_MSI | 2225 | #ifdef CONFIG_PCI_MSI |
2097 | 2226 | ||
2098 | static irqreturn_t | 2227 | static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id) |
2099 | vxge_tx_msix_handle(int irq, void *dev_id) | ||
2100 | { | 2228 | { |
2101 | struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id; | 2229 | struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id; |
2102 | 2230 | ||
2231 | adaptive_coalesce_tx_interrupts(fifo); | ||
2232 | |||
2233 | vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)fifo->handle, | ||
2234 | fifo->tx_vector_no); | ||
2235 | |||
2236 | vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)fifo->handle, | ||
2237 | fifo->tx_vector_no); | ||
2238 | |||
2103 | VXGE_COMPLETE_VPATH_TX(fifo); | 2239 | VXGE_COMPLETE_VPATH_TX(fifo); |
2104 | 2240 | ||
2241 | vxge_hw_channel_msix_unmask((struct __vxge_hw_channel *)fifo->handle, | ||
2242 | fifo->tx_vector_no); | ||
2243 | |||
2244 | mmiowb(); | ||
2245 | |||
2105 | return IRQ_HANDLED; | 2246 | return IRQ_HANDLED; |
2106 | } | 2247 | } |
2107 | 2248 | ||
2108 | static irqreturn_t | 2249 | static irqreturn_t vxge_rx_msix_napi_handle(int irq, void *dev_id) |
2109 | vxge_rx_msix_napi_handle(int irq, void *dev_id) | ||
2110 | { | 2250 | { |
2111 | struct vxge_ring *ring = (struct vxge_ring *)dev_id; | 2251 | struct vxge_ring *ring = (struct vxge_ring *)dev_id; |
2112 | 2252 | ||
2113 | /* MSIX_IDX for Rx is 1 */ | 2253 | adaptive_coalesce_rx_interrupts(ring); |
2254 | |||
2114 | vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle, | 2255 | vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle, |
2115 | ring->rx_vector_no); | 2256 | ring->rx_vector_no); |
2257 | |||
2258 | vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)ring->handle, | ||
2259 | ring->rx_vector_no); | ||
2116 | 2260 | ||
2117 | napi_schedule(&ring->napi); | 2261 | napi_schedule(&ring->napi); |
2118 | return IRQ_HANDLED; | 2262 | return IRQ_HANDLED; |
@@ -2129,14 +2273,20 @@ vxge_alarm_msix_handle(int irq, void *dev_id) | |||
2129 | VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID; | 2273 | VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID; |
2130 | 2274 | ||
2131 | for (i = 0; i < vdev->no_of_vpath; i++) { | 2275 | for (i = 0; i < vdev->no_of_vpath; i++) { |
2276 | /* Reduce the chance of losing alarm interrupts by masking | ||
2277 | * the vector. A pending bit will be set if an alarm is | ||
2278 | * generated and on unmask the interrupt will be fired. | ||
2279 | */ | ||
2132 | vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id); | 2280 | vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id); |
2281 | vxge_hw_vpath_msix_clear(vdev->vpaths[i].handle, msix_id); | ||
2282 | mmiowb(); | ||
2133 | 2283 | ||
2134 | status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle, | 2284 | status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle, |
2135 | vdev->exec_mode); | 2285 | vdev->exec_mode); |
2136 | if (status == VXGE_HW_OK) { | 2286 | if (status == VXGE_HW_OK) { |
2137 | |||
2138 | vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle, | 2287 | vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle, |
2139 | msix_id); | 2288 | msix_id); |
2289 | mmiowb(); | ||
2140 | continue; | 2290 | continue; |
2141 | } | 2291 | } |
2142 | vxge_debug_intr(VXGE_ERR, | 2292 | vxge_debug_intr(VXGE_ERR, |
@@ -2159,8 +2309,8 @@ start: | |||
2159 | /* Alarm MSIX Vectors count */ | 2309 | /* Alarm MSIX Vectors count */ |
2160 | vdev->intr_cnt++; | 2310 | vdev->intr_cnt++; |
2161 | 2311 | ||
2162 | vdev->entries = kzalloc(vdev->intr_cnt * sizeof(struct msix_entry), | 2312 | vdev->entries = kcalloc(vdev->intr_cnt, sizeof(struct msix_entry), |
2163 | GFP_KERNEL); | 2313 | GFP_KERNEL); |
2164 | if (!vdev->entries) { | 2314 | if (!vdev->entries) { |
2165 | vxge_debug_init(VXGE_ERR, | 2315 | vxge_debug_init(VXGE_ERR, |
2166 | "%s: memory allocation failed", | 2316 | "%s: memory allocation failed", |
@@ -2169,9 +2319,9 @@ start: | |||
2169 | goto alloc_entries_failed; | 2319 | goto alloc_entries_failed; |
2170 | } | 2320 | } |
2171 | 2321 | ||
2172 | vdev->vxge_entries = | 2322 | vdev->vxge_entries = kcalloc(vdev->intr_cnt, |
2173 | kzalloc(vdev->intr_cnt * sizeof(struct vxge_msix_entry), | 2323 | sizeof(struct vxge_msix_entry), |
2174 | GFP_KERNEL); | 2324 | GFP_KERNEL); |
2175 | if (!vdev->vxge_entries) { | 2325 | if (!vdev->vxge_entries) { |
2176 | vxge_debug_init(VXGE_ERR, "%s: memory allocation failed", | 2326 | vxge_debug_init(VXGE_ERR, "%s: memory allocation failed", |
2177 | VXGE_DRIVER_NAME); | 2327 | VXGE_DRIVER_NAME); |
@@ -2255,6 +2405,9 @@ static int vxge_enable_msix(struct vxgedev *vdev) | |||
2255 | vpath->ring.rx_vector_no = (vpath->device_id * | 2405 | vpath->ring.rx_vector_no = (vpath->device_id * |
2256 | VXGE_HW_VPATH_MSIX_ACTIVE) + 1; | 2406 | VXGE_HW_VPATH_MSIX_ACTIVE) + 1; |
2257 | 2407 | ||
2408 | vpath->fifo.tx_vector_no = (vpath->device_id * | ||
2409 | VXGE_HW_VPATH_MSIX_ACTIVE); | ||
2410 | |||
2258 | vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id, | 2411 | vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id, |
2259 | VXGE_ALARM_MSIX_ID); | 2412 | VXGE_ALARM_MSIX_ID); |
2260 | } | 2413 | } |
@@ -2289,8 +2442,8 @@ static void vxge_rem_msix_isr(struct vxgedev *vdev) | |||
2289 | 2442 | ||
2290 | static void vxge_rem_isr(struct vxgedev *vdev) | 2443 | static void vxge_rem_isr(struct vxgedev *vdev) |
2291 | { | 2444 | { |
2292 | struct __vxge_hw_device *hldev; | 2445 | struct __vxge_hw_device *hldev; |
2293 | hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev); | 2446 | hldev = pci_get_drvdata(vdev->pdev); |
2294 | 2447 | ||
2295 | #ifdef CONFIG_PCI_MSI | 2448 | #ifdef CONFIG_PCI_MSI |
2296 | if (vdev->config.intr_type == MSI_X) { | 2449 | if (vdev->config.intr_type == MSI_X) { |
@@ -2430,8 +2583,9 @@ INTA_MODE: | |||
2430 | "%s:vxge:INTA", vdev->ndev->name); | 2583 | "%s:vxge:INTA", vdev->ndev->name); |
2431 | vxge_hw_device_set_intr_type(vdev->devh, | 2584 | vxge_hw_device_set_intr_type(vdev->devh, |
2432 | VXGE_HW_INTR_MODE_IRQLINE); | 2585 | VXGE_HW_INTR_MODE_IRQLINE); |
2433 | vxge_hw_vpath_tti_ci_set(vdev->devh, | 2586 | |
2434 | vdev->vpaths[0].device_id); | 2587 | vxge_hw_vpath_tti_ci_set(vdev->vpaths[0].fifo.handle); |
2588 | |||
2435 | ret = request_irq((int) vdev->pdev->irq, | 2589 | ret = request_irq((int) vdev->pdev->irq, |
2436 | vxge_isr_napi, | 2590 | vxge_isr_napi, |
2437 | IRQF_SHARED, vdev->desc[0], vdev); | 2591 | IRQF_SHARED, vdev->desc[0], vdev); |
@@ -2507,6 +2661,40 @@ static void vxge_poll_vp_lockup(unsigned long data) | |||
2507 | mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000); | 2661 | mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000); |
2508 | } | 2662 | } |
2509 | 2663 | ||
2664 | static u32 vxge_fix_features(struct net_device *dev, u32 features) | ||
2665 | { | ||
2666 | u32 changed = dev->features ^ features; | ||
2667 | |||
2668 | /* Enabling RTH requires some of the logic in vxge_device_register and a | ||
2669 | * vpath reset. Due to these restrictions, only allow modification | ||
2670 | * while the interface is down. | ||
2671 | */ | ||
2672 | if ((changed & NETIF_F_RXHASH) && netif_running(dev)) | ||
2673 | features ^= NETIF_F_RXHASH; | ||
2674 | |||
2675 | return features; | ||
2676 | } | ||
2677 | |||
2678 | static int vxge_set_features(struct net_device *dev, u32 features) | ||
2679 | { | ||
2680 | struct vxgedev *vdev = netdev_priv(dev); | ||
2681 | u32 changed = dev->features ^ features; | ||
2682 | |||
2683 | if (!(changed & NETIF_F_RXHASH)) | ||
2684 | return 0; | ||
2685 | |||
2686 | /* !netif_running() ensured by vxge_fix_features() */ | ||
2687 | |||
2688 | vdev->devh->config.rth_en = !!(features & NETIF_F_RXHASH); | ||
2689 | if (vxge_reset_all_vpaths(vdev) != VXGE_HW_OK) { | ||
2690 | dev->features = features ^ NETIF_F_RXHASH; | ||
2691 | vdev->devh->config.rth_en = !!(dev->features & NETIF_F_RXHASH); | ||
2692 | return -EIO; | ||
2693 | } | ||
2694 | |||
2695 | return 0; | ||
2696 | } | ||
2697 | |||
2510 | /** | 2698 | /** |
2511 | * vxge_open | 2699 | * vxge_open |
2512 | * @dev: pointer to the device structure. | 2700 | * @dev: pointer to the device structure. |
@@ -2517,8 +2705,7 @@ static void vxge_poll_vp_lockup(unsigned long data) | |||
2517 | * Return value: '0' on success and an appropriate (-)ve integer as | 2705 | * Return value: '0' on success and an appropriate (-)ve integer as |
2518 | * defined in errno.h file on failure. | 2706 | * defined in errno.h file on failure. |
2519 | */ | 2707 | */ |
2520 | int | 2708 | static int vxge_open(struct net_device *dev) |
2521 | vxge_open(struct net_device *dev) | ||
2522 | { | 2709 | { |
2523 | enum vxge_hw_status status; | 2710 | enum vxge_hw_status status; |
2524 | struct vxgedev *vdev; | 2711 | struct vxgedev *vdev; |
@@ -2527,11 +2714,12 @@ vxge_open(struct net_device *dev) | |||
2527 | int ret = 0; | 2714 | int ret = 0; |
2528 | int i; | 2715 | int i; |
2529 | u64 val64, function_mode; | 2716 | u64 val64, function_mode; |
2717 | |||
2530 | vxge_debug_entryexit(VXGE_TRACE, | 2718 | vxge_debug_entryexit(VXGE_TRACE, |
2531 | "%s: %s:%d", dev->name, __func__, __LINE__); | 2719 | "%s: %s:%d", dev->name, __func__, __LINE__); |
2532 | 2720 | ||
2533 | vdev = (struct vxgedev *)netdev_priv(dev); | 2721 | vdev = netdev_priv(dev); |
2534 | hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev); | 2722 | hldev = pci_get_drvdata(vdev->pdev); |
2535 | function_mode = vdev->config.device_hw_info.function_mode; | 2723 | function_mode = vdev->config.device_hw_info.function_mode; |
2536 | 2724 | ||
2537 | /* make sure you have link off by default every time Nic is | 2725 | /* make sure you have link off by default every time Nic is |
@@ -2586,6 +2774,8 @@ vxge_open(struct net_device *dev) | |||
2586 | goto out2; | 2774 | goto out2; |
2587 | } | 2775 | } |
2588 | } | 2776 | } |
2777 | printk(KERN_INFO "%s: Receive Hashing Offload %s\n", dev->name, | ||
2778 | hldev->config.rth_en ? "enabled" : "disabled"); | ||
2589 | 2779 | ||
2590 | for (i = 0; i < vdev->no_of_vpath; i++) { | 2780 | for (i = 0; i < vdev->no_of_vpath; i++) { |
2591 | vpath = &vdev->vpaths[i]; | 2781 | vpath = &vdev->vpaths[i]; |
@@ -2623,7 +2813,7 @@ vxge_open(struct net_device *dev) | |||
2623 | } | 2813 | } |
2624 | 2814 | ||
2625 | /* Enable vpath to sniff all unicast/multicast traffic that not | 2815 | /* Enable vpath to sniff all unicast/multicast traffic that not |
2626 | * addressed to them. We allow promiscous mode for PF only | 2816 | * addressed to them. We allow promiscuous mode for PF only |
2627 | */ | 2817 | */ |
2628 | 2818 | ||
2629 | val64 = 0; | 2819 | val64 = 0; |
@@ -2671,9 +2861,10 @@ vxge_open(struct net_device *dev) | |||
2671 | vxge_os_timer(vdev->vp_reset_timer, | 2861 | vxge_os_timer(vdev->vp_reset_timer, |
2672 | vxge_poll_vp_reset, vdev, (HZ/2)); | 2862 | vxge_poll_vp_reset, vdev, (HZ/2)); |
2673 | 2863 | ||
2674 | if (vdev->vp_lockup_timer.function == NULL) | 2864 | /* There is no need to check for RxD leak and RxD lookup on Titan1A */ |
2675 | vxge_os_timer(vdev->vp_lockup_timer, | 2865 | if (vdev->titan1 && vdev->vp_lockup_timer.function == NULL) |
2676 | vxge_poll_vp_lockup, vdev, (HZ/2)); | 2866 | vxge_os_timer(vdev->vp_lockup_timer, vxge_poll_vp_lockup, vdev, |
2867 | HZ / 2); | ||
2677 | 2868 | ||
2678 | set_bit(__VXGE_STATE_CARD_UP, &vdev->state); | 2869 | set_bit(__VXGE_STATE_CARD_UP, &vdev->state); |
2679 | 2870 | ||
@@ -2698,6 +2889,10 @@ vxge_open(struct net_device *dev) | |||
2698 | } | 2889 | } |
2699 | 2890 | ||
2700 | netif_tx_start_all_queues(vdev->ndev); | 2891 | netif_tx_start_all_queues(vdev->ndev); |
2892 | |||
2893 | /* configure CI */ | ||
2894 | vxge_config_ci_for_tti_rti(vdev); | ||
2895 | |||
2701 | goto out0; | 2896 | goto out0; |
2702 | 2897 | ||
2703 | out2: | 2898 | out2: |
@@ -2720,8 +2915,8 @@ out0: | |||
2720 | return ret; | 2915 | return ret; |
2721 | } | 2916 | } |
2722 | 2917 | ||
2723 | /* Loop throught the mac address list and delete all the entries */ | 2918 | /* Loop through the mac address list and delete all the entries */ |
2724 | void vxge_free_mac_add_list(struct vxge_vpath *vpath) | 2919 | static void vxge_free_mac_add_list(struct vxge_vpath *vpath) |
2725 | { | 2920 | { |
2726 | 2921 | ||
2727 | struct list_head *entry, *next; | 2922 | struct list_head *entry, *next; |
@@ -2745,7 +2940,7 @@ static void vxge_napi_del_all(struct vxgedev *vdev) | |||
2745 | } | 2940 | } |
2746 | } | 2941 | } |
2747 | 2942 | ||
2748 | int do_vxge_close(struct net_device *dev, int do_io) | 2943 | static int do_vxge_close(struct net_device *dev, int do_io) |
2749 | { | 2944 | { |
2750 | enum vxge_hw_status status; | 2945 | enum vxge_hw_status status; |
2751 | struct vxgedev *vdev; | 2946 | struct vxgedev *vdev; |
@@ -2755,8 +2950,8 @@ int do_vxge_close(struct net_device *dev, int do_io) | |||
2755 | vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", | 2950 | vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", |
2756 | dev->name, __func__, __LINE__); | 2951 | dev->name, __func__, __LINE__); |
2757 | 2952 | ||
2758 | vdev = (struct vxgedev *)netdev_priv(dev); | 2953 | vdev = netdev_priv(dev); |
2759 | hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev); | 2954 | hldev = pci_get_drvdata(vdev->pdev); |
2760 | 2955 | ||
2761 | if (unlikely(!is_vxge_card_up(vdev))) | 2956 | if (unlikely(!is_vxge_card_up(vdev))) |
2762 | return 0; | 2957 | return 0; |
@@ -2766,7 +2961,6 @@ int do_vxge_close(struct net_device *dev, int do_io) | |||
2766 | while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) | 2961 | while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) |
2767 | msleep(50); | 2962 | msleep(50); |
2768 | 2963 | ||
2769 | clear_bit(__VXGE_STATE_CARD_UP, &vdev->state); | ||
2770 | if (do_io) { | 2964 | if (do_io) { |
2771 | /* Put the vpath back in normal mode */ | 2965 | /* Put the vpath back in normal mode */ |
2772 | vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id); | 2966 | vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id); |
@@ -2777,7 +2971,6 @@ int do_vxge_close(struct net_device *dev, int do_io) | |||
2777 | struct vxge_hw_mrpcim_reg, | 2971 | struct vxge_hw_mrpcim_reg, |
2778 | rts_mgr_cbasin_cfg), | 2972 | rts_mgr_cbasin_cfg), |
2779 | &val64); | 2973 | &val64); |
2780 | |||
2781 | if (status == VXGE_HW_OK) { | 2974 | if (status == VXGE_HW_OK) { |
2782 | val64 &= ~vpath_vector; | 2975 | val64 &= ~vpath_vector; |
2783 | status = vxge_hw_mgmt_reg_write(vdev->devh, | 2976 | status = vxge_hw_mgmt_reg_write(vdev->devh, |
@@ -2789,7 +2982,7 @@ int do_vxge_close(struct net_device *dev, int do_io) | |||
2789 | val64); | 2982 | val64); |
2790 | } | 2983 | } |
2791 | 2984 | ||
2792 | /* Remove the function 0 from promiscous mode */ | 2985 | /* Remove the function 0 from promiscuous mode */ |
2793 | vxge_hw_mgmt_reg_write(vdev->devh, | 2986 | vxge_hw_mgmt_reg_write(vdev->devh, |
2794 | vxge_hw_mgmt_reg_type_mrpcim, | 2987 | vxge_hw_mgmt_reg_type_mrpcim, |
2795 | 0, | 2988 | 0, |
@@ -2806,10 +2999,17 @@ int do_vxge_close(struct net_device *dev, int do_io) | |||
2806 | 2999 | ||
2807 | smp_wmb(); | 3000 | smp_wmb(); |
2808 | } | 3001 | } |
2809 | del_timer_sync(&vdev->vp_lockup_timer); | 3002 | |
3003 | if (vdev->titan1) | ||
3004 | del_timer_sync(&vdev->vp_lockup_timer); | ||
2810 | 3005 | ||
2811 | del_timer_sync(&vdev->vp_reset_timer); | 3006 | del_timer_sync(&vdev->vp_reset_timer); |
2812 | 3007 | ||
3008 | if (do_io) | ||
3009 | vxge_hw_device_wait_receive_idle(hldev); | ||
3010 | |||
3011 | clear_bit(__VXGE_STATE_CARD_UP, &vdev->state); | ||
3012 | |||
2813 | /* Disable napi */ | 3013 | /* Disable napi */ |
2814 | if (vdev->config.intr_type != MSI_X) | 3014 | if (vdev->config.intr_type != MSI_X) |
2815 | napi_disable(&vdev->napi); | 3015 | napi_disable(&vdev->napi); |
@@ -2826,8 +3026,6 @@ int do_vxge_close(struct net_device *dev, int do_io) | |||
2826 | if (do_io) | 3026 | if (do_io) |
2827 | vxge_hw_device_intr_disable(vdev->devh); | 3027 | vxge_hw_device_intr_disable(vdev->devh); |
2828 | 3028 | ||
2829 | mdelay(1000); | ||
2830 | |||
2831 | vxge_rem_isr(vdev); | 3029 | vxge_rem_isr(vdev); |
2832 | 3030 | ||
2833 | vxge_napi_del_all(vdev); | 3031 | vxge_napi_del_all(vdev); |
@@ -2856,8 +3054,7 @@ int do_vxge_close(struct net_device *dev, int do_io) | |||
2856 | * Return value: '0' on success and an appropriate (-)ve integer as | 3054 | * Return value: '0' on success and an appropriate (-)ve integer as |
2857 | * defined in errno.h file on failure. | 3055 | * defined in errno.h file on failure. |
2858 | */ | 3056 | */ |
2859 | int | 3057 | static int vxge_close(struct net_device *dev) |
2860 | vxge_close(struct net_device *dev) | ||
2861 | { | 3058 | { |
2862 | do_vxge_close(dev, 1); | 3059 | do_vxge_close(dev, 1); |
2863 | return 0; | 3060 | return 0; |
@@ -2914,34 +3111,24 @@ static int vxge_change_mtu(struct net_device *dev, int new_mtu) | |||
2914 | } | 3111 | } |
2915 | 3112 | ||
2916 | /** | 3113 | /** |
2917 | * vxge_get_stats | 3114 | * vxge_get_stats64 |
2918 | * @dev: pointer to the device structure | 3115 | * @dev: pointer to the device structure |
3116 | * @stats: pointer to struct rtnl_link_stats64 | ||
2919 | * | 3117 | * |
2920 | * Updates the device statistics structure. This function updates the device | ||
2921 | * statistics structure in the net_device structure and returns a pointer | ||
2922 | * to the same. | ||
2923 | */ | 3118 | */ |
2924 | static struct net_device_stats * | 3119 | static struct rtnl_link_stats64 * |
2925 | vxge_get_stats(struct net_device *dev) | 3120 | vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats) |
2926 | { | 3121 | { |
2927 | struct vxgedev *vdev; | 3122 | struct vxgedev *vdev = netdev_priv(dev); |
2928 | struct net_device_stats *net_stats; | ||
2929 | int k; | 3123 | int k; |
2930 | 3124 | ||
2931 | vdev = netdev_priv(dev); | 3125 | /* net_stats already zeroed by caller */ |
2932 | |||
2933 | net_stats = &vdev->stats.net_stats; | ||
2934 | |||
2935 | memset(net_stats, 0, sizeof(struct net_device_stats)); | ||
2936 | |||
2937 | for (k = 0; k < vdev->no_of_vpath; k++) { | 3126 | for (k = 0; k < vdev->no_of_vpath; k++) { |
2938 | net_stats->rx_packets += vdev->vpaths[k].ring.stats.rx_frms; | 3127 | net_stats->rx_packets += vdev->vpaths[k].ring.stats.rx_frms; |
2939 | net_stats->rx_bytes += vdev->vpaths[k].ring.stats.rx_bytes; | 3128 | net_stats->rx_bytes += vdev->vpaths[k].ring.stats.rx_bytes; |
2940 | net_stats->rx_errors += vdev->vpaths[k].ring.stats.rx_errors; | 3129 | net_stats->rx_errors += vdev->vpaths[k].ring.stats.rx_errors; |
2941 | net_stats->multicast += vdev->vpaths[k].ring.stats.rx_mcast; | 3130 | net_stats->multicast += vdev->vpaths[k].ring.stats.rx_mcast; |
2942 | net_stats->rx_dropped += | 3131 | net_stats->rx_dropped += vdev->vpaths[k].ring.stats.rx_dropped; |
2943 | vdev->vpaths[k].ring.stats.rx_dropped; | ||
2944 | |||
2945 | net_stats->tx_packets += vdev->vpaths[k].fifo.stats.tx_frms; | 3132 | net_stats->tx_packets += vdev->vpaths[k].fifo.stats.tx_frms; |
2946 | net_stats->tx_bytes += vdev->vpaths[k].fifo.stats.tx_bytes; | 3133 | net_stats->tx_bytes += vdev->vpaths[k].fifo.stats.tx_bytes; |
2947 | net_stats->tx_errors += vdev->vpaths[k].fifo.stats.tx_errors; | 3134 | net_stats->tx_errors += vdev->vpaths[k].fifo.stats.tx_errors; |
@@ -2950,6 +3137,92 @@ vxge_get_stats(struct net_device *dev) | |||
2950 | return net_stats; | 3137 | return net_stats; |
2951 | } | 3138 | } |
2952 | 3139 | ||
3140 | static enum vxge_hw_status vxge_timestamp_config(struct __vxge_hw_device *devh) | ||
3141 | { | ||
3142 | enum vxge_hw_status status; | ||
3143 | u64 val64; | ||
3144 | |||
3145 | /* Timestamp is passed to the driver via the FCS, therefore we | ||
3146 | * must disable the FCS stripping by the adapter. Since this is | ||
3147 | * required for the driver to load (due to a hardware bug), | ||
3148 | * there is no need to do anything special here. | ||
3149 | */ | ||
3150 | val64 = VXGE_HW_XMAC_TIMESTAMP_EN | | ||
3151 | VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(0) | | ||
3152 | VXGE_HW_XMAC_TIMESTAMP_INTERVAL(0); | ||
3153 | |||
3154 | status = vxge_hw_mgmt_reg_write(devh, | ||
3155 | vxge_hw_mgmt_reg_type_mrpcim, | ||
3156 | 0, | ||
3157 | offsetof(struct vxge_hw_mrpcim_reg, | ||
3158 | xmac_timestamp), | ||
3159 | val64); | ||
3160 | vxge_hw_device_flush_io(devh); | ||
3161 | devh->config.hwts_en = VXGE_HW_HWTS_ENABLE; | ||
3162 | return status; | ||
3163 | } | ||
3164 | |||
3165 | static int vxge_hwtstamp_ioctl(struct vxgedev *vdev, void __user *data) | ||
3166 | { | ||
3167 | struct hwtstamp_config config; | ||
3168 | int i; | ||
3169 | |||
3170 | if (copy_from_user(&config, data, sizeof(config))) | ||
3171 | return -EFAULT; | ||
3172 | |||
3173 | /* reserved for future extensions */ | ||
3174 | if (config.flags) | ||
3175 | return -EINVAL; | ||
3176 | |||
3177 | /* Transmit HW Timestamp not supported */ | ||
3178 | switch (config.tx_type) { | ||
3179 | case HWTSTAMP_TX_OFF: | ||
3180 | break; | ||
3181 | case HWTSTAMP_TX_ON: | ||
3182 | default: | ||
3183 | return -ERANGE; | ||
3184 | } | ||
3185 | |||
3186 | switch (config.rx_filter) { | ||
3187 | case HWTSTAMP_FILTER_NONE: | ||
3188 | vdev->rx_hwts = 0; | ||
3189 | config.rx_filter = HWTSTAMP_FILTER_NONE; | ||
3190 | break; | ||
3191 | |||
3192 | case HWTSTAMP_FILTER_ALL: | ||
3193 | case HWTSTAMP_FILTER_SOME: | ||
3194 | case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: | ||
3195 | case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: | ||
3196 | case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: | ||
3197 | case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: | ||
3198 | case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: | ||
3199 | case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: | ||
3200 | case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: | ||
3201 | case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: | ||
3202 | case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: | ||
3203 | case HWTSTAMP_FILTER_PTP_V2_EVENT: | ||
3204 | case HWTSTAMP_FILTER_PTP_V2_SYNC: | ||
3205 | case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: | ||
3206 | if (vdev->devh->config.hwts_en != VXGE_HW_HWTS_ENABLE) | ||
3207 | return -EFAULT; | ||
3208 | |||
3209 | vdev->rx_hwts = 1; | ||
3210 | config.rx_filter = HWTSTAMP_FILTER_ALL; | ||
3211 | break; | ||
3212 | |||
3213 | default: | ||
3214 | return -ERANGE; | ||
3215 | } | ||
3216 | |||
3217 | for (i = 0; i < vdev->no_of_vpath; i++) | ||
3218 | vdev->vpaths[i].ring.rx_hwts = vdev->rx_hwts; | ||
3219 | |||
3220 | if (copy_to_user(data, &config, sizeof(config))) | ||
3221 | return -EFAULT; | ||
3222 | |||
3223 | return 0; | ||
3224 | } | ||
3225 | |||
2953 | /** | 3226 | /** |
2954 | * vxge_ioctl | 3227 | * vxge_ioctl |
2955 | * @dev: Device pointer. | 3228 | * @dev: Device pointer. |
@@ -2962,7 +3235,20 @@ vxge_get_stats(struct net_device *dev) | |||
2962 | */ | 3235 | */ |
2963 | static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | 3236 | static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
2964 | { | 3237 | { |
2965 | return -EOPNOTSUPP; | 3238 | struct vxgedev *vdev = netdev_priv(dev); |
3239 | int ret; | ||
3240 | |||
3241 | switch (cmd) { | ||
3242 | case SIOCSHWTSTAMP: | ||
3243 | ret = vxge_hwtstamp_ioctl(vdev, rq->ifr_data); | ||
3244 | if (ret) | ||
3245 | return ret; | ||
3246 | break; | ||
3247 | default: | ||
3248 | return -EOPNOTSUPP; | ||
3249 | } | ||
3250 | |||
3251 | return 0; | ||
2966 | } | 3252 | } |
2967 | 3253 | ||
2968 | /** | 3254 | /** |
@@ -2973,18 +3259,17 @@ static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
2973 | * This function is triggered if the Tx Queue is stopped | 3259 | * This function is triggered if the Tx Queue is stopped |
2974 | * for a pre-defined amount of time when the Interface is still up. | 3260 | * for a pre-defined amount of time when the Interface is still up. |
2975 | */ | 3261 | */ |
2976 | static void | 3262 | static void vxge_tx_watchdog(struct net_device *dev) |
2977 | vxge_tx_watchdog(struct net_device *dev) | ||
2978 | { | 3263 | { |
2979 | struct vxgedev *vdev; | 3264 | struct vxgedev *vdev; |
2980 | 3265 | ||
2981 | vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); | 3266 | vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); |
2982 | 3267 | ||
2983 | vdev = (struct vxgedev *)netdev_priv(dev); | 3268 | vdev = netdev_priv(dev); |
2984 | 3269 | ||
2985 | vdev->cric_err_event = VXGE_HW_EVENT_RESET_START; | 3270 | vdev->cric_err_event = VXGE_HW_EVENT_RESET_START; |
2986 | 3271 | ||
2987 | vxge_reset(vdev); | 3272 | schedule_work(&vdev->reset_task); |
2988 | vxge_debug_entryexit(VXGE_TRACE, | 3273 | vxge_debug_entryexit(VXGE_TRACE, |
2989 | "%s:%d Exiting...", __func__, __LINE__); | 3274 | "%s:%d Exiting...", __func__, __LINE__); |
2990 | } | 3275 | } |
@@ -3008,7 +3293,7 @@ vxge_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) | |||
3008 | 3293 | ||
3009 | vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); | 3294 | vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); |
3010 | 3295 | ||
3011 | vdev = (struct vxgedev *)netdev_priv(dev); | 3296 | vdev = netdev_priv(dev); |
3012 | 3297 | ||
3013 | vpath = &vdev->vpaths[0]; | 3298 | vpath = &vdev->vpaths[0]; |
3014 | if ((NULL == grp) && (vpath->is_open)) { | 3299 | if ((NULL == grp) && (vpath->is_open)) { |
@@ -3057,7 +3342,7 @@ vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) | |||
3057 | struct vxge_vpath *vpath; | 3342 | struct vxge_vpath *vpath; |
3058 | int vp_id; | 3343 | int vp_id; |
3059 | 3344 | ||
3060 | vdev = (struct vxgedev *)netdev_priv(dev); | 3345 | vdev = netdev_priv(dev); |
3061 | 3346 | ||
3062 | /* Add these vlan to the vid table */ | 3347 | /* Add these vlan to the vid table */ |
3063 | for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) { | 3348 | for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) { |
@@ -3084,7 +3369,7 @@ vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | |||
3084 | 3369 | ||
3085 | vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); | 3370 | vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); |
3086 | 3371 | ||
3087 | vdev = (struct vxgedev *)netdev_priv(dev); | 3372 | vdev = netdev_priv(dev); |
3088 | 3373 | ||
3089 | vlan_group_set_device(vdev->vlgrp, vid, NULL); | 3374 | vlan_group_set_device(vdev->vlgrp, vid, NULL); |
3090 | 3375 | ||
@@ -3102,29 +3387,28 @@ vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | |||
3102 | static const struct net_device_ops vxge_netdev_ops = { | 3387 | static const struct net_device_ops vxge_netdev_ops = { |
3103 | .ndo_open = vxge_open, | 3388 | .ndo_open = vxge_open, |
3104 | .ndo_stop = vxge_close, | 3389 | .ndo_stop = vxge_close, |
3105 | .ndo_get_stats = vxge_get_stats, | 3390 | .ndo_get_stats64 = vxge_get_stats64, |
3106 | .ndo_start_xmit = vxge_xmit, | 3391 | .ndo_start_xmit = vxge_xmit, |
3107 | .ndo_validate_addr = eth_validate_addr, | 3392 | .ndo_validate_addr = eth_validate_addr, |
3108 | .ndo_set_multicast_list = vxge_set_multicast, | 3393 | .ndo_set_multicast_list = vxge_set_multicast, |
3109 | |||
3110 | .ndo_do_ioctl = vxge_ioctl, | 3394 | .ndo_do_ioctl = vxge_ioctl, |
3111 | |||
3112 | .ndo_set_mac_address = vxge_set_mac_addr, | 3395 | .ndo_set_mac_address = vxge_set_mac_addr, |
3113 | .ndo_change_mtu = vxge_change_mtu, | 3396 | .ndo_change_mtu = vxge_change_mtu, |
3397 | .ndo_fix_features = vxge_fix_features, | ||
3398 | .ndo_set_features = vxge_set_features, | ||
3114 | .ndo_vlan_rx_register = vxge_vlan_rx_register, | 3399 | .ndo_vlan_rx_register = vxge_vlan_rx_register, |
3115 | .ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid, | 3400 | .ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid, |
3116 | .ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid, | 3401 | .ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid, |
3117 | |||
3118 | .ndo_tx_timeout = vxge_tx_watchdog, | 3402 | .ndo_tx_timeout = vxge_tx_watchdog, |
3119 | #ifdef CONFIG_NET_POLL_CONTROLLER | 3403 | #ifdef CONFIG_NET_POLL_CONTROLLER |
3120 | .ndo_poll_controller = vxge_netpoll, | 3404 | .ndo_poll_controller = vxge_netpoll, |
3121 | #endif | 3405 | #endif |
3122 | }; | 3406 | }; |
3123 | 3407 | ||
3124 | int __devinit vxge_device_register(struct __vxge_hw_device *hldev, | 3408 | static int __devinit vxge_device_register(struct __vxge_hw_device *hldev, |
3125 | struct vxge_config *config, | 3409 | struct vxge_config *config, |
3126 | int high_dma, int no_of_vpath, | 3410 | int high_dma, int no_of_vpath, |
3127 | struct vxgedev **vdev_out) | 3411 | struct vxgedev **vdev_out) |
3128 | { | 3412 | { |
3129 | struct net_device *ndev; | 3413 | struct net_device *ndev; |
3130 | enum vxge_hw_status status = VXGE_HW_OK; | 3414 | enum vxge_hw_status status = VXGE_HW_OK; |
@@ -3158,12 +3442,21 @@ int __devinit vxge_device_register(struct __vxge_hw_device *hldev, | |||
3158 | vdev->devh = hldev; | 3442 | vdev->devh = hldev; |
3159 | vdev->pdev = hldev->pdev; | 3443 | vdev->pdev = hldev->pdev; |
3160 | memcpy(&vdev->config, config, sizeof(struct vxge_config)); | 3444 | memcpy(&vdev->config, config, sizeof(struct vxge_config)); |
3161 | vdev->rx_csum = 1; /* Enable Rx CSUM by default. */ | 3445 | vdev->rx_hwts = 0; |
3446 | vdev->titan1 = (vdev->pdev->revision == VXGE_HW_TITAN1_PCI_REVISION); | ||
3162 | 3447 | ||
3163 | SET_NETDEV_DEV(ndev, &vdev->pdev->dev); | 3448 | SET_NETDEV_DEV(ndev, &vdev->pdev->dev); |
3164 | 3449 | ||
3165 | ndev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | | 3450 | ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_SG | |
3166 | NETIF_F_HW_VLAN_FILTER; | 3451 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
3452 | NETIF_F_TSO | NETIF_F_TSO6 | | ||
3453 | NETIF_F_HW_VLAN_TX; | ||
3454 | if (vdev->config.rth_steering != NO_STEERING) | ||
3455 | ndev->hw_features |= NETIF_F_RXHASH; | ||
3456 | |||
3457 | ndev->features |= ndev->hw_features | | ||
3458 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; | ||
3459 | |||
3167 | /* Driver entry points */ | 3460 | /* Driver entry points */ |
3168 | ndev->irq = vdev->pdev->irq; | 3461 | ndev->irq = vdev->pdev->irq; |
3169 | ndev->base_addr = (unsigned long) hldev->bar0; | 3462 | ndev->base_addr = (unsigned long) hldev->bar0; |
@@ -3171,8 +3464,9 @@ int __devinit vxge_device_register(struct __vxge_hw_device *hldev, | |||
3171 | ndev->netdev_ops = &vxge_netdev_ops; | 3464 | ndev->netdev_ops = &vxge_netdev_ops; |
3172 | 3465 | ||
3173 | ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT; | 3466 | ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT; |
3467 | INIT_WORK(&vdev->reset_task, vxge_reset); | ||
3174 | 3468 | ||
3175 | initialize_ethtool_ops(ndev); | 3469 | vxge_initialize_ethtool_ops(ndev); |
3176 | 3470 | ||
3177 | /* Allocate memory for vpath */ | 3471 | /* Allocate memory for vpath */ |
3178 | vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) * | 3472 | vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) * |
@@ -3181,13 +3475,10 @@ int __devinit vxge_device_register(struct __vxge_hw_device *hldev, | |||
3181 | vxge_debug_init(VXGE_ERR, | 3475 | vxge_debug_init(VXGE_ERR, |
3182 | "%s: vpath memory allocation failed", | 3476 | "%s: vpath memory allocation failed", |
3183 | vdev->ndev->name); | 3477 | vdev->ndev->name); |
3184 | ret = -ENODEV; | 3478 | ret = -ENOMEM; |
3185 | goto _out1; | 3479 | goto _out1; |
3186 | } | 3480 | } |
3187 | 3481 | ||
3188 | ndev->features |= NETIF_F_SG; | ||
3189 | |||
3190 | ndev->features |= NETIF_F_HW_CSUM; | ||
3191 | vxge_debug_init(vxge_hw_device_trace_level_get(hldev), | 3482 | vxge_debug_init(vxge_hw_device_trace_level_get(hldev), |
3192 | "%s : checksuming enabled", __func__); | 3483 | "%s : checksuming enabled", __func__); |
3193 | 3484 | ||
@@ -3197,16 +3488,11 @@ int __devinit vxge_device_register(struct __vxge_hw_device *hldev, | |||
3197 | "%s : using High DMA", __func__); | 3488 | "%s : using High DMA", __func__); |
3198 | } | 3489 | } |
3199 | 3490 | ||
3200 | ndev->features |= NETIF_F_TSO | NETIF_F_TSO6; | 3491 | ret = register_netdev(ndev); |
3201 | 3492 | if (ret) { | |
3202 | if (vdev->config.gro_enable) | ||
3203 | ndev->features |= NETIF_F_GRO; | ||
3204 | |||
3205 | if (register_netdev(ndev)) { | ||
3206 | vxge_debug_init(vxge_hw_device_trace_level_get(hldev), | 3493 | vxge_debug_init(vxge_hw_device_trace_level_get(hldev), |
3207 | "%s: %s : device registration failed!", | 3494 | "%s: %s : device registration failed!", |
3208 | ndev->name, __func__); | 3495 | ndev->name, __func__); |
3209 | ret = -ENODEV; | ||
3210 | goto _out2; | 3496 | goto _out2; |
3211 | } | 3497 | } |
3212 | 3498 | ||
@@ -3223,6 +3509,7 @@ int __devinit vxge_device_register(struct __vxge_hw_device *hldev, | |||
3223 | "%s: Ethernet device registered", | 3509 | "%s: Ethernet device registered", |
3224 | ndev->name); | 3510 | ndev->name); |
3225 | 3511 | ||
3512 | hldev->ndev = ndev; | ||
3226 | *vdev_out = vdev; | 3513 | *vdev_out = vdev; |
3227 | 3514 | ||
3228 | /* Resetting the Device stats */ | 3515 | /* Resetting the Device stats */ |
@@ -3257,36 +3544,34 @@ _out0: | |||
3257 | * | 3544 | * |
3258 | * This function will unregister and free network device | 3545 | * This function will unregister and free network device |
3259 | */ | 3546 | */ |
3260 | void | 3547 | static void vxge_device_unregister(struct __vxge_hw_device *hldev) |
3261 | vxge_device_unregister(struct __vxge_hw_device *hldev) | ||
3262 | { | 3548 | { |
3263 | struct vxgedev *vdev; | 3549 | struct vxgedev *vdev; |
3264 | struct net_device *dev; | 3550 | struct net_device *dev; |
3265 | char buf[IFNAMSIZ]; | 3551 | char buf[IFNAMSIZ]; |
3266 | #if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \ | ||
3267 | (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK)) | ||
3268 | u32 level_trace; | ||
3269 | #endif | ||
3270 | 3552 | ||
3271 | dev = hldev->ndev; | 3553 | dev = hldev->ndev; |
3272 | vdev = netdev_priv(dev); | 3554 | vdev = netdev_priv(dev); |
3273 | #if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \ | ||
3274 | (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK)) | ||
3275 | level_trace = vdev->level_trace; | ||
3276 | #endif | ||
3277 | vxge_debug_entryexit(level_trace, | ||
3278 | "%s: %s:%d", vdev->ndev->name, __func__, __LINE__); | ||
3279 | 3555 | ||
3280 | memcpy(buf, vdev->ndev->name, IFNAMSIZ); | 3556 | vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d", vdev->ndev->name, |
3557 | __func__, __LINE__); | ||
3558 | |||
3559 | strncpy(buf, dev->name, IFNAMSIZ); | ||
3560 | |||
3561 | flush_work_sync(&vdev->reset_task); | ||
3281 | 3562 | ||
3282 | /* in 2.6 will call stop() if device is up */ | 3563 | /* in 2.6 will call stop() if device is up */ |
3283 | unregister_netdev(dev); | 3564 | unregister_netdev(dev); |
3284 | 3565 | ||
3285 | flush_scheduled_work(); | 3566 | kfree(vdev->vpaths); |
3567 | |||
3568 | /* we are safe to free it now */ | ||
3569 | free_netdev(dev); | ||
3286 | 3570 | ||
3287 | vxge_debug_init(level_trace, "%s: ethernet device unregistered", buf); | 3571 | vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered", |
3288 | vxge_debug_entryexit(level_trace, | 3572 | buf); |
3289 | "%s: %s:%d Exiting...", buf, __func__, __LINE__); | 3573 | vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf, |
3574 | __func__, __LINE__); | ||
3290 | } | 3575 | } |
3291 | 3576 | ||
3292 | /* | 3577 | /* |
@@ -3300,7 +3585,7 @@ vxge_callback_crit_err(struct __vxge_hw_device *hldev, | |||
3300 | enum vxge_hw_event type, u64 vp_id) | 3585 | enum vxge_hw_event type, u64 vp_id) |
3301 | { | 3586 | { |
3302 | struct net_device *dev = hldev->ndev; | 3587 | struct net_device *dev = hldev->ndev; |
3303 | struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); | 3588 | struct vxgedev *vdev = netdev_priv(dev); |
3304 | struct vxge_vpath *vpath = NULL; | 3589 | struct vxge_vpath *vpath = NULL; |
3305 | int vpath_idx; | 3590 | int vpath_idx; |
3306 | 3591 | ||
@@ -3523,9 +3808,9 @@ static int __devinit vxge_config_vpaths( | |||
3523 | device_config->vp_config[i].tti.timer_ac_en = | 3808 | device_config->vp_config[i].tti.timer_ac_en = |
3524 | VXGE_HW_TIM_TIMER_AC_ENABLE; | 3809 | VXGE_HW_TIM_TIMER_AC_ENABLE; |
3525 | 3810 | ||
3526 | /* For msi-x with napi (each vector | 3811 | /* For msi-x with napi (each vector has a handler of its own) - |
3527 | has a handler of its own) - | 3812 | * Set CI to OFF for all vpaths |
3528 | Set CI to OFF for all vpaths */ | 3813 | */ |
3529 | device_config->vp_config[i].tti.timer_ci_en = | 3814 | device_config->vp_config[i].tti.timer_ci_en = |
3530 | VXGE_HW_TIM_TIMER_CI_DISABLE; | 3815 | VXGE_HW_TIM_TIMER_CI_DISABLE; |
3531 | 3816 | ||
@@ -3555,10 +3840,13 @@ static int __devinit vxge_config_vpaths( | |||
3555 | 3840 | ||
3556 | device_config->vp_config[i].ring.ring_blocks = | 3841 | device_config->vp_config[i].ring.ring_blocks = |
3557 | VXGE_HW_DEF_RING_BLOCKS; | 3842 | VXGE_HW_DEF_RING_BLOCKS; |
3843 | |||
3558 | device_config->vp_config[i].ring.buffer_mode = | 3844 | device_config->vp_config[i].ring.buffer_mode = |
3559 | VXGE_HW_RING_RXD_BUFFER_MODE_1; | 3845 | VXGE_HW_RING_RXD_BUFFER_MODE_1; |
3846 | |||
3560 | device_config->vp_config[i].ring.rxds_limit = | 3847 | device_config->vp_config[i].ring.rxds_limit = |
3561 | VXGE_HW_DEF_RING_RXDS_LIMIT; | 3848 | VXGE_HW_DEF_RING_RXDS_LIMIT; |
3849 | |||
3562 | device_config->vp_config[i].ring.scatter_mode = | 3850 | device_config->vp_config[i].ring.scatter_mode = |
3563 | VXGE_HW_RING_SCATTER_MODE_A; | 3851 | VXGE_HW_RING_SCATTER_MODE_A; |
3564 | 3852 | ||
@@ -3635,9 +3923,10 @@ static void __devinit vxge_device_config_init( | |||
3635 | break; | 3923 | break; |
3636 | 3924 | ||
3637 | case MSI_X: | 3925 | case MSI_X: |
3638 | device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX; | 3926 | device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX_ONE_SHOT; |
3639 | break; | 3927 | break; |
3640 | } | 3928 | } |
3929 | |||
3641 | /* Timer period between device poll */ | 3930 | /* Timer period between device poll */ |
3642 | device_config->device_poll_millis = VXGE_TIMER_DELAY; | 3931 | device_config->device_poll_millis = VXGE_TIMER_DELAY; |
3643 | 3932 | ||
@@ -3649,16 +3938,10 @@ static void __devinit vxge_device_config_init( | |||
3649 | 3938 | ||
3650 | vxge_debug_ll_config(VXGE_TRACE, "%s : Device Config Params ", | 3939 | vxge_debug_ll_config(VXGE_TRACE, "%s : Device Config Params ", |
3651 | __func__); | 3940 | __func__); |
3652 | vxge_debug_ll_config(VXGE_TRACE, "dma_blockpool_initial : %d", | ||
3653 | device_config->dma_blockpool_initial); | ||
3654 | vxge_debug_ll_config(VXGE_TRACE, "dma_blockpool_max : %d", | ||
3655 | device_config->dma_blockpool_max); | ||
3656 | vxge_debug_ll_config(VXGE_TRACE, "intr_mode : %d", | 3941 | vxge_debug_ll_config(VXGE_TRACE, "intr_mode : %d", |
3657 | device_config->intr_mode); | 3942 | device_config->intr_mode); |
3658 | vxge_debug_ll_config(VXGE_TRACE, "device_poll_millis : %d", | 3943 | vxge_debug_ll_config(VXGE_TRACE, "device_poll_millis : %d", |
3659 | device_config->device_poll_millis); | 3944 | device_config->device_poll_millis); |
3660 | vxge_debug_ll_config(VXGE_TRACE, "rts_mac_en : %d", | ||
3661 | device_config->rts_mac_en); | ||
3662 | vxge_debug_ll_config(VXGE_TRACE, "rth_en : %d", | 3945 | vxge_debug_ll_config(VXGE_TRACE, "rth_en : %d", |
3663 | device_config->rth_en); | 3946 | device_config->rth_en); |
3664 | vxge_debug_ll_config(VXGE_TRACE, "rth_it_type : %d", | 3947 | vxge_debug_ll_config(VXGE_TRACE, "rth_it_type : %d", |
@@ -3734,22 +4017,10 @@ static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask) | |||
3734 | vdev->config.tx_steering_type = 0; | 4017 | vdev->config.tx_steering_type = 0; |
3735 | } | 4018 | } |
3736 | 4019 | ||
3737 | if (vdev->config.gro_enable) { | ||
3738 | vxge_debug_init(VXGE_ERR, | ||
3739 | "%s: Generic receive offload enabled", | ||
3740 | vdev->ndev->name); | ||
3741 | } else | ||
3742 | vxge_debug_init(VXGE_TRACE, | ||
3743 | "%s: Generic receive offload disabled", | ||
3744 | vdev->ndev->name); | ||
3745 | |||
3746 | if (vdev->config.addr_learn_en) | 4020 | if (vdev->config.addr_learn_en) |
3747 | vxge_debug_init(VXGE_TRACE, | 4021 | vxge_debug_init(VXGE_TRACE, |
3748 | "%s: MAC Address learning enabled", vdev->ndev->name); | 4022 | "%s: MAC Address learning enabled", vdev->ndev->name); |
3749 | 4023 | ||
3750 | vxge_debug_init(VXGE_TRACE, | ||
3751 | "%s: Rx doorbell mode enabled", vdev->ndev->name); | ||
3752 | |||
3753 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | 4024 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { |
3754 | if (!vxge_bVALn(vpath_mask, i, 1)) | 4025 | if (!vxge_bVALn(vpath_mask, i, 1)) |
3755 | continue; | 4026 | continue; |
@@ -3762,14 +4033,6 @@ static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask) | |||
3762 | ((struct __vxge_hw_device *)(vdev->devh))-> | 4033 | ((struct __vxge_hw_device *)(vdev->devh))-> |
3763 | config.vp_config[i].rpa_strip_vlan_tag | 4034 | config.vp_config[i].rpa_strip_vlan_tag |
3764 | ? "Enabled" : "Disabled"); | 4035 | ? "Enabled" : "Disabled"); |
3765 | vxge_debug_init(VXGE_TRACE, | ||
3766 | "%s: Ring blocks : %d", vdev->ndev->name, | ||
3767 | ((struct __vxge_hw_device *)(vdev->devh))-> | ||
3768 | config.vp_config[i].ring.ring_blocks); | ||
3769 | vxge_debug_init(VXGE_TRACE, | ||
3770 | "%s: Fifo blocks : %d", vdev->ndev->name, | ||
3771 | ((struct __vxge_hw_device *)(vdev->devh))-> | ||
3772 | config.vp_config[i].fifo.fifo_blocks); | ||
3773 | vxge_debug_ll_config(VXGE_TRACE, | 4036 | vxge_debug_ll_config(VXGE_TRACE, |
3774 | "%s: Max frags : %d", vdev->ndev->name, | 4037 | "%s: Max frags : %d", vdev->ndev->name, |
3775 | ((struct __vxge_hw_device *)(vdev->devh))-> | 4038 | ((struct __vxge_hw_device *)(vdev->devh))-> |
@@ -3809,8 +4072,7 @@ static int vxge_pm_resume(struct pci_dev *pdev) | |||
3809 | static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev, | 4072 | static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev, |
3810 | pci_channel_state_t state) | 4073 | pci_channel_state_t state) |
3811 | { | 4074 | { |
3812 | struct __vxge_hw_device *hldev = | 4075 | struct __vxge_hw_device *hldev = pci_get_drvdata(pdev); |
3813 | (struct __vxge_hw_device *) pci_get_drvdata(pdev); | ||
3814 | struct net_device *netdev = hldev->ndev; | 4076 | struct net_device *netdev = hldev->ndev; |
3815 | 4077 | ||
3816 | netif_device_detach(netdev); | 4078 | netif_device_detach(netdev); |
@@ -3839,8 +4101,7 @@ static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev, | |||
3839 | */ | 4101 | */ |
3840 | static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev) | 4102 | static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev) |
3841 | { | 4103 | { |
3842 | struct __vxge_hw_device *hldev = | 4104 | struct __vxge_hw_device *hldev = pci_get_drvdata(pdev); |
3843 | (struct __vxge_hw_device *) pci_get_drvdata(pdev); | ||
3844 | struct net_device *netdev = hldev->ndev; | 4105 | struct net_device *netdev = hldev->ndev; |
3845 | 4106 | ||
3846 | struct vxgedev *vdev = netdev_priv(netdev); | 4107 | struct vxgedev *vdev = netdev_priv(netdev); |
@@ -3851,7 +4112,7 @@ static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev) | |||
3851 | } | 4112 | } |
3852 | 4113 | ||
3853 | pci_set_master(pdev); | 4114 | pci_set_master(pdev); |
3854 | vxge_reset(vdev); | 4115 | do_vxge_reset(vdev, VXGE_LL_FULL_RESET); |
3855 | 4116 | ||
3856 | return PCI_ERS_RESULT_RECOVERED; | 4117 | return PCI_ERS_RESULT_RECOVERED; |
3857 | } | 4118 | } |
@@ -3865,8 +4126,7 @@ static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev) | |||
3865 | */ | 4126 | */ |
3866 | static void vxge_io_resume(struct pci_dev *pdev) | 4127 | static void vxge_io_resume(struct pci_dev *pdev) |
3867 | { | 4128 | { |
3868 | struct __vxge_hw_device *hldev = | 4129 | struct __vxge_hw_device *hldev = pci_get_drvdata(pdev); |
3869 | (struct __vxge_hw_device *) pci_get_drvdata(pdev); | ||
3870 | struct net_device *netdev = hldev->ndev; | 4130 | struct net_device *netdev = hldev->ndev; |
3871 | 4131 | ||
3872 | if (netif_running(netdev)) { | 4132 | if (netif_running(netdev)) { |
@@ -3910,6 +4170,157 @@ static inline u32 vxge_get_num_vfs(u64 function_mode) | |||
3910 | return num_functions; | 4170 | return num_functions; |
3911 | } | 4171 | } |
3912 | 4172 | ||
4173 | int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override) | ||
4174 | { | ||
4175 | struct __vxge_hw_device *hldev = vdev->devh; | ||
4176 | u32 maj, min, bld, cmaj, cmin, cbld; | ||
4177 | enum vxge_hw_status status; | ||
4178 | const struct firmware *fw; | ||
4179 | int ret; | ||
4180 | |||
4181 | ret = request_firmware(&fw, fw_name, &vdev->pdev->dev); | ||
4182 | if (ret) { | ||
4183 | vxge_debug_init(VXGE_ERR, "%s: Firmware file '%s' not found", | ||
4184 | VXGE_DRIVER_NAME, fw_name); | ||
4185 | goto out; | ||
4186 | } | ||
4187 | |||
4188 | /* Load the new firmware onto the adapter */ | ||
4189 | status = vxge_update_fw_image(hldev, fw->data, fw->size); | ||
4190 | if (status != VXGE_HW_OK) { | ||
4191 | vxge_debug_init(VXGE_ERR, | ||
4192 | "%s: FW image download to adapter failed '%s'.", | ||
4193 | VXGE_DRIVER_NAME, fw_name); | ||
4194 | ret = -EIO; | ||
4195 | goto out; | ||
4196 | } | ||
4197 | |||
4198 | /* Read the version of the new firmware */ | ||
4199 | status = vxge_hw_upgrade_read_version(hldev, &maj, &min, &bld); | ||
4200 | if (status != VXGE_HW_OK) { | ||
4201 | vxge_debug_init(VXGE_ERR, | ||
4202 | "%s: Upgrade read version failed '%s'.", | ||
4203 | VXGE_DRIVER_NAME, fw_name); | ||
4204 | ret = -EIO; | ||
4205 | goto out; | ||
4206 | } | ||
4207 | |||
4208 | cmaj = vdev->config.device_hw_info.fw_version.major; | ||
4209 | cmin = vdev->config.device_hw_info.fw_version.minor; | ||
4210 | cbld = vdev->config.device_hw_info.fw_version.build; | ||
4211 | /* It's possible the version in /lib/firmware is not the latest version. | ||
4212 | * If so, we could get into a loop of trying to upgrade to the latest | ||
4213 | * and flashing the older version. | ||
4214 | */ | ||
4215 | if (VXGE_FW_VER(maj, min, bld) == VXGE_FW_VER(cmaj, cmin, cbld) && | ||
4216 | !override) { | ||
4217 | ret = -EINVAL; | ||
4218 | goto out; | ||
4219 | } | ||
4220 | |||
4221 | printk(KERN_NOTICE "Upgrade to firmware version %d.%d.%d commencing\n", | ||
4222 | maj, min, bld); | ||
4223 | |||
4224 | /* Flash the adapter with the new firmware */ | ||
4225 | status = vxge_hw_flash_fw(hldev); | ||
4226 | if (status != VXGE_HW_OK) { | ||
4227 | vxge_debug_init(VXGE_ERR, "%s: Upgrade commit failed '%s'.", | ||
4228 | VXGE_DRIVER_NAME, fw_name); | ||
4229 | ret = -EIO; | ||
4230 | goto out; | ||
4231 | } | ||
4232 | |||
4233 | printk(KERN_NOTICE "Upgrade of firmware successful! Adapter must be " | ||
4234 | "hard reset before using, thus requiring a system reboot or a " | ||
4235 | "hotplug event.\n"); | ||
4236 | |||
4237 | out: | ||
4238 | release_firmware(fw); | ||
4239 | return ret; | ||
4240 | } | ||
4241 | |||
4242 | static int vxge_probe_fw_update(struct vxgedev *vdev) | ||
4243 | { | ||
4244 | u32 maj, min, bld; | ||
4245 | int ret, gpxe = 0; | ||
4246 | char *fw_name; | ||
4247 | |||
4248 | maj = vdev->config.device_hw_info.fw_version.major; | ||
4249 | min = vdev->config.device_hw_info.fw_version.minor; | ||
4250 | bld = vdev->config.device_hw_info.fw_version.build; | ||
4251 | |||
4252 | if (VXGE_FW_VER(maj, min, bld) == VXGE_CERT_FW_VER) | ||
4253 | return 0; | ||
4254 | |||
4255 | /* Ignore the build number when determining if the current firmware is | ||
4256 | * "too new" to load the driver | ||
4257 | */ | ||
4258 | if (VXGE_FW_VER(maj, min, 0) > VXGE_CERT_FW_VER) { | ||
4259 | vxge_debug_init(VXGE_ERR, "%s: Firmware newer than last known " | ||
4260 | "version, unable to load driver\n", | ||
4261 | VXGE_DRIVER_NAME); | ||
4262 | return -EINVAL; | ||
4263 | } | ||
4264 | |||
4265 | /* Firmware 1.4.4 and older cannot be upgraded, and is too ancient to | ||
4266 | * work with this driver. | ||
4267 | */ | ||
4268 | if (VXGE_FW_VER(maj, min, bld) <= VXGE_FW_DEAD_VER) { | ||
4269 | vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d cannot be " | ||
4270 | "upgraded\n", VXGE_DRIVER_NAME, maj, min, bld); | ||
4271 | return -EINVAL; | ||
4272 | } | ||
4273 | |||
4274 | /* If file not specified, determine gPXE or not */ | ||
4275 | if (VXGE_FW_VER(maj, min, bld) >= VXGE_EPROM_FW_VER) { | ||
4276 | int i; | ||
4277 | for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) | ||
4278 | if (vdev->devh->eprom_versions[i]) { | ||
4279 | gpxe = 1; | ||
4280 | break; | ||
4281 | } | ||
4282 | } | ||
4283 | if (gpxe) | ||
4284 | fw_name = "vxge/X3fw-pxe.ncf"; | ||
4285 | else | ||
4286 | fw_name = "vxge/X3fw.ncf"; | ||
4287 | |||
4288 | ret = vxge_fw_upgrade(vdev, fw_name, 0); | ||
4289 | /* -EINVAL and -ENOENT are not fatal errors for flashing firmware on | ||
4290 | * probe, so ignore them | ||
4291 | */ | ||
4292 | if (ret != -EINVAL && ret != -ENOENT) | ||
4293 | return -EIO; | ||
4294 | else | ||
4295 | ret = 0; | ||
4296 | |||
4297 | if (VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, VXGE_CERT_FW_VER_MINOR, 0) > | ||
4298 | VXGE_FW_VER(maj, min, 0)) { | ||
4299 | vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d is too old to" | ||
4300 | " be used with this driver.\n" | ||
4301 | "Please get the latest version from " | ||
4302 | "ftp://ftp.s2io.com/pub/X3100-Drivers/FIRMWARE", | ||
4303 | VXGE_DRIVER_NAME, maj, min, bld); | ||
4304 | return -EINVAL; | ||
4305 | } | ||
4306 | |||
4307 | return ret; | ||
4308 | } | ||
4309 | |||
4310 | static int __devinit is_sriov_initialized(struct pci_dev *pdev) | ||
4311 | { | ||
4312 | int pos; | ||
4313 | u16 ctrl; | ||
4314 | |||
4315 | pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); | ||
4316 | if (pos) { | ||
4317 | pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &ctrl); | ||
4318 | if (ctrl & PCI_SRIOV_CTRL_VFE) | ||
4319 | return 1; | ||
4320 | } | ||
4321 | return 0; | ||
4322 | } | ||
4323 | |||
3913 | /** | 4324 | /** |
3914 | * vxge_probe | 4325 | * vxge_probe |
3915 | * @pdev : structure containing the PCI related information of the device. | 4326 | * @pdev : structure containing the PCI related information of the device. |
@@ -3924,7 +4335,7 @@ static inline u32 vxge_get_num_vfs(u64 function_mode) | |||
3924 | static int __devinit | 4335 | static int __devinit |
3925 | vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) | 4336 | vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) |
3926 | { | 4337 | { |
3927 | struct __vxge_hw_device *hldev; | 4338 | struct __vxge_hw_device *hldev; |
3928 | enum vxge_hw_status status; | 4339 | enum vxge_hw_status status; |
3929 | int ret; | 4340 | int ret; |
3930 | int high_dma = 0; | 4341 | int high_dma = 0; |
@@ -3947,9 +4358,10 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
3947 | attr.pdev = pdev; | 4358 | attr.pdev = pdev; |
3948 | 4359 | ||
3949 | /* In SRIOV-17 mode, functions of the same adapter | 4360 | /* In SRIOV-17 mode, functions of the same adapter |
3950 | * can be deployed on different buses */ | 4361 | * can be deployed on different buses |
3951 | if ((!pdev->is_virtfn) && ((bus != pdev->bus->number) || | 4362 | */ |
3952 | (device != PCI_SLOT(pdev->devfn)))) | 4363 | if (((bus != pdev->bus->number) || (device != PCI_SLOT(pdev->devfn))) && |
4364 | !pdev->is_virtfn) | ||
3953 | new_device = 1; | 4365 | new_device = 1; |
3954 | 4366 | ||
3955 | bus = pdev->bus->number; | 4367 | bus = pdev->bus->number; |
@@ -3967,6 +4379,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
3967 | driver_config->config_dev_cnt = 0; | 4379 | driver_config->config_dev_cnt = 0; |
3968 | driver_config->total_dev_cnt = 0; | 4380 | driver_config->total_dev_cnt = 0; |
3969 | } | 4381 | } |
4382 | |||
3970 | /* Now making the CPU based no of vpath calculation | 4383 | /* Now making the CPU based no of vpath calculation |
3971 | * applicable for individual functions as well. | 4384 | * applicable for individual functions as well. |
3972 | */ | 4385 | */ |
@@ -3989,11 +4402,11 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
3989 | goto _exit0; | 4402 | goto _exit0; |
3990 | } | 4403 | } |
3991 | 4404 | ||
3992 | ll_config = kzalloc(sizeof(*ll_config), GFP_KERNEL); | 4405 | ll_config = kzalloc(sizeof(struct vxge_config), GFP_KERNEL); |
3993 | if (!ll_config) { | 4406 | if (!ll_config) { |
3994 | ret = -ENOMEM; | 4407 | ret = -ENOMEM; |
3995 | vxge_debug_init(VXGE_ERR, | 4408 | vxge_debug_init(VXGE_ERR, |
3996 | "ll_config : malloc failed %s %d", | 4409 | "device_config : malloc failed %s %d", |
3997 | __FILE__, __LINE__); | 4410 | __FILE__, __LINE__); |
3998 | goto _exit0; | 4411 | goto _exit0; |
3999 | } | 4412 | } |
@@ -4037,10 +4450,10 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
4037 | goto _exit1; | 4450 | goto _exit1; |
4038 | } | 4451 | } |
4039 | 4452 | ||
4040 | if (pci_request_regions(pdev, VXGE_DRIVER_NAME)) { | 4453 | ret = pci_request_region(pdev, 0, VXGE_DRIVER_NAME); |
4454 | if (ret) { | ||
4041 | vxge_debug_init(VXGE_ERR, | 4455 | vxge_debug_init(VXGE_ERR, |
4042 | "%s : request regions failed", __func__); | 4456 | "%s : request regions failed", __func__); |
4043 | ret = -ENODEV; | ||
4044 | goto _exit1; | 4457 | goto _exit1; |
4045 | } | 4458 | } |
4046 | 4459 | ||
@@ -4068,16 +4481,6 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
4068 | goto _exit3; | 4481 | goto _exit3; |
4069 | } | 4482 | } |
4070 | 4483 | ||
4071 | if (ll_config->device_hw_info.fw_version.major != | ||
4072 | VXGE_DRIVER_FW_VERSION_MAJOR) { | ||
4073 | vxge_debug_init(VXGE_ERR, | ||
4074 | "%s: Incorrect firmware version." | ||
4075 | "Please upgrade the firmware to version 1.x.x", | ||
4076 | VXGE_DRIVER_NAME); | ||
4077 | ret = -EINVAL; | ||
4078 | goto _exit3; | ||
4079 | } | ||
4080 | |||
4081 | vpath_mask = ll_config->device_hw_info.vpath_mask; | 4484 | vpath_mask = ll_config->device_hw_info.vpath_mask; |
4082 | if (vpath_mask == 0) { | 4485 | if (vpath_mask == 0) { |
4083 | vxge_debug_ll_config(VXGE_TRACE, | 4486 | vxge_debug_ll_config(VXGE_TRACE, |
@@ -4106,14 +4509,13 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
4106 | num_vfs = vxge_get_num_vfs(function_mode) - 1; | 4509 | num_vfs = vxge_get_num_vfs(function_mode) - 1; |
4107 | 4510 | ||
4108 | /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */ | 4511 | /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */ |
4109 | if (is_sriov(function_mode) && (max_config_dev > 1) && | 4512 | if (is_sriov(function_mode) && !is_sriov_initialized(pdev) && |
4110 | (ll_config->intr_type != INTA) && | 4513 | (ll_config->intr_type != INTA)) { |
4111 | (is_privileged == VXGE_HW_OK)) { | 4514 | ret = pci_enable_sriov(pdev, num_vfs); |
4112 | ret = pci_enable_sriov(pdev, ((max_config_dev - 1) < num_vfs) | ||
4113 | ? (max_config_dev - 1) : num_vfs); | ||
4114 | if (ret) | 4515 | if (ret) |
4115 | vxge_debug_ll_config(VXGE_ERR, | 4516 | vxge_debug_ll_config(VXGE_ERR, |
4116 | "Failed in enabling SRIOV mode: %d\n", ret); | 4517 | "Failed in enabling SRIOV mode: %d\n", ret); |
4518 | /* No need to fail out, as an error here is non-fatal */ | ||
4117 | } | 4519 | } |
4118 | 4520 | ||
4119 | /* | 4521 | /* |
@@ -4141,46 +4543,93 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
4141 | goto _exit3; | 4543 | goto _exit3; |
4142 | } | 4544 | } |
4143 | 4545 | ||
4546 | if (VXGE_FW_VER(ll_config->device_hw_info.fw_version.major, | ||
4547 | ll_config->device_hw_info.fw_version.minor, | ||
4548 | ll_config->device_hw_info.fw_version.build) >= | ||
4549 | VXGE_EPROM_FW_VER) { | ||
4550 | struct eprom_image img[VXGE_HW_MAX_ROM_IMAGES]; | ||
4551 | |||
4552 | status = vxge_hw_vpath_eprom_img_ver_get(hldev, img); | ||
4553 | if (status != VXGE_HW_OK) { | ||
4554 | vxge_debug_init(VXGE_ERR, "%s: Reading of EPROM failed", | ||
4555 | VXGE_DRIVER_NAME); | ||
4556 | /* This is a non-fatal error, continue */ | ||
4557 | } | ||
4558 | |||
4559 | for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) { | ||
4560 | hldev->eprom_versions[i] = img[i].version; | ||
4561 | if (!img[i].is_valid) | ||
4562 | break; | ||
4563 | vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version " | ||
4564 | "%d.%d.%d.%d", VXGE_DRIVER_NAME, i, | ||
4565 | VXGE_EPROM_IMG_MAJOR(img[i].version), | ||
4566 | VXGE_EPROM_IMG_MINOR(img[i].version), | ||
4567 | VXGE_EPROM_IMG_FIX(img[i].version), | ||
4568 | VXGE_EPROM_IMG_BUILD(img[i].version)); | ||
4569 | } | ||
4570 | } | ||
4571 | |||
4144 | /* if FCS stripping is not disabled in MAC fail driver load */ | 4572 | /* if FCS stripping is not disabled in MAC fail driver load */ |
4145 | if (vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask) != VXGE_HW_OK) { | 4573 | status = vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask); |
4146 | vxge_debug_init(VXGE_ERR, | 4574 | if (status != VXGE_HW_OK) { |
4147 | "%s: FCS stripping is not disabled in MAC" | 4575 | vxge_debug_init(VXGE_ERR, "%s: FCS stripping is enabled in MAC" |
4148 | " failing driver load", VXGE_DRIVER_NAME); | 4576 | " failing driver load", VXGE_DRIVER_NAME); |
4149 | ret = -EINVAL; | 4577 | ret = -EINVAL; |
4150 | goto _exit4; | 4578 | goto _exit4; |
4151 | } | 4579 | } |
4152 | 4580 | ||
4581 | /* Always enable HWTS. This will always cause the FCS to be invalid, | ||
4582 | * due to the fact that HWTS is using the FCS as the location of the | ||
4583 | * timestamp. The HW FCS checking will still correctly determine if | ||
4584 | * there is a valid checksum, and the FCS is being removed by the driver | ||
4585 | * anyway. So no fucntionality is being lost. Since it is always | ||
4586 | * enabled, we now simply use the ioctl call to set whether or not the | ||
4587 | * driver should be paying attention to the HWTS. | ||
4588 | */ | ||
4589 | if (is_privileged == VXGE_HW_OK) { | ||
4590 | status = vxge_timestamp_config(hldev); | ||
4591 | if (status != VXGE_HW_OK) { | ||
4592 | vxge_debug_init(VXGE_ERR, "%s: HWTS enable failed", | ||
4593 | VXGE_DRIVER_NAME); | ||
4594 | ret = -EFAULT; | ||
4595 | goto _exit4; | ||
4596 | } | ||
4597 | } | ||
4598 | |||
4153 | vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL); | 4599 | vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL); |
4154 | 4600 | ||
4155 | /* set private device info */ | 4601 | /* set private device info */ |
4156 | pci_set_drvdata(pdev, hldev); | 4602 | pci_set_drvdata(pdev, hldev); |
4157 | 4603 | ||
4158 | ll_config->gro_enable = VXGE_GRO_ALWAYS_AGGREGATE; | ||
4159 | ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS; | 4604 | ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS; |
4160 | ll_config->addr_learn_en = addr_learn_en; | 4605 | ll_config->addr_learn_en = addr_learn_en; |
4161 | ll_config->rth_algorithm = RTH_ALG_JENKINS; | 4606 | ll_config->rth_algorithm = RTH_ALG_JENKINS; |
4162 | ll_config->rth_hash_type_tcpipv4 = VXGE_HW_RING_HASH_TYPE_TCP_IPV4; | 4607 | ll_config->rth_hash_type_tcpipv4 = 1; |
4163 | ll_config->rth_hash_type_ipv4 = VXGE_HW_RING_HASH_TYPE_NONE; | 4608 | ll_config->rth_hash_type_ipv4 = 0; |
4164 | ll_config->rth_hash_type_tcpipv6 = VXGE_HW_RING_HASH_TYPE_NONE; | 4609 | ll_config->rth_hash_type_tcpipv6 = 0; |
4165 | ll_config->rth_hash_type_ipv6 = VXGE_HW_RING_HASH_TYPE_NONE; | 4610 | ll_config->rth_hash_type_ipv6 = 0; |
4166 | ll_config->rth_hash_type_tcpipv6ex = VXGE_HW_RING_HASH_TYPE_NONE; | 4611 | ll_config->rth_hash_type_tcpipv6ex = 0; |
4167 | ll_config->rth_hash_type_ipv6ex = VXGE_HW_RING_HASH_TYPE_NONE; | 4612 | ll_config->rth_hash_type_ipv6ex = 0; |
4168 | ll_config->rth_bkt_sz = RTH_BUCKET_SIZE; | 4613 | ll_config->rth_bkt_sz = RTH_BUCKET_SIZE; |
4169 | ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE; | 4614 | ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE; |
4170 | ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE; | 4615 | ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE; |
4171 | 4616 | ||
4172 | if (vxge_device_register(hldev, ll_config, high_dma, no_of_vpath, | 4617 | ret = vxge_device_register(hldev, ll_config, high_dma, no_of_vpath, |
4173 | &vdev)) { | 4618 | &vdev); |
4619 | if (ret) { | ||
4174 | ret = -EINVAL; | 4620 | ret = -EINVAL; |
4175 | goto _exit4; | 4621 | goto _exit4; |
4176 | } | 4622 | } |
4177 | 4623 | ||
4624 | ret = vxge_probe_fw_update(vdev); | ||
4625 | if (ret) | ||
4626 | goto _exit5; | ||
4627 | |||
4178 | vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL); | 4628 | vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL); |
4179 | VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev), | 4629 | VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev), |
4180 | vxge_hw_device_trace_level_get(hldev)); | 4630 | vxge_hw_device_trace_level_get(hldev)); |
4181 | 4631 | ||
4182 | /* set private HW device info */ | 4632 | /* set private HW device info */ |
4183 | hldev->ndev = vdev->ndev; | ||
4184 | vdev->mtu = VXGE_HW_DEFAULT_MTU; | 4633 | vdev->mtu = VXGE_HW_DEFAULT_MTU; |
4185 | vdev->bar0 = attr.bar0; | 4634 | vdev->bar0 = attr.bar0; |
4186 | vdev->max_vpath_supported = max_vpath_supported; | 4635 | vdev->max_vpath_supported = max_vpath_supported; |
@@ -4274,15 +4723,13 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
4274 | 4723 | ||
4275 | /* Copy the station mac address to the list */ | 4724 | /* Copy the station mac address to the list */ |
4276 | for (i = 0; i < vdev->no_of_vpath; i++) { | 4725 | for (i = 0; i < vdev->no_of_vpath; i++) { |
4277 | entry = (struct vxge_mac_addrs *) | 4726 | entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_KERNEL); |
4278 | kzalloc(sizeof(struct vxge_mac_addrs), | ||
4279 | GFP_KERNEL); | ||
4280 | if (NULL == entry) { | 4727 | if (NULL == entry) { |
4281 | vxge_debug_init(VXGE_ERR, | 4728 | vxge_debug_init(VXGE_ERR, |
4282 | "%s: mac_addr_list : memory allocation failed", | 4729 | "%s: mac_addr_list : memory allocation failed", |
4283 | vdev->ndev->name); | 4730 | vdev->ndev->name); |
4284 | ret = -EPERM; | 4731 | ret = -EPERM; |
4285 | goto _exit5; | 4732 | goto _exit6; |
4286 | } | 4733 | } |
4287 | macaddr = (u8 *)&entry->macaddr; | 4734 | macaddr = (u8 *)&entry->macaddr; |
4288 | memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN); | 4735 | memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN); |
@@ -4322,25 +4769,26 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
4322 | kfree(ll_config); | 4769 | kfree(ll_config); |
4323 | return 0; | 4770 | return 0; |
4324 | 4771 | ||
4325 | _exit5: | 4772 | _exit6: |
4326 | for (i = 0; i < vdev->no_of_vpath; i++) | 4773 | for (i = 0; i < vdev->no_of_vpath; i++) |
4327 | vxge_free_mac_add_list(&vdev->vpaths[i]); | 4774 | vxge_free_mac_add_list(&vdev->vpaths[i]); |
4328 | 4775 | _exit5: | |
4329 | vxge_device_unregister(hldev); | 4776 | vxge_device_unregister(hldev); |
4330 | _exit4: | 4777 | _exit4: |
4331 | pci_disable_sriov(pdev); | 4778 | pci_set_drvdata(pdev, NULL); |
4332 | vxge_hw_device_terminate(hldev); | 4779 | vxge_hw_device_terminate(hldev); |
4780 | pci_disable_sriov(pdev); | ||
4333 | _exit3: | 4781 | _exit3: |
4334 | iounmap(attr.bar0); | 4782 | iounmap(attr.bar0); |
4335 | _exit2: | 4783 | _exit2: |
4336 | pci_release_regions(pdev); | 4784 | pci_release_region(pdev, 0); |
4337 | _exit1: | 4785 | _exit1: |
4338 | pci_disable_device(pdev); | 4786 | pci_disable_device(pdev); |
4339 | _exit0: | 4787 | _exit0: |
4340 | kfree(ll_config); | 4788 | kfree(ll_config); |
4341 | kfree(device_config); | 4789 | kfree(device_config); |
4342 | driver_config->config_dev_cnt--; | 4790 | driver_config->config_dev_cnt--; |
4343 | pci_set_drvdata(pdev, NULL); | 4791 | driver_config->total_dev_cnt--; |
4344 | return ret; | 4792 | return ret; |
4345 | } | 4793 | } |
4346 | 4794 | ||
@@ -4350,61 +4798,39 @@ _exit0: | |||
4350 | * Description: This function is called by the Pci subsystem to release a | 4798 | * Description: This function is called by the Pci subsystem to release a |
4351 | * PCI device and free up all resource held up by the device. | 4799 | * PCI device and free up all resource held up by the device. |
4352 | */ | 4800 | */ |
4353 | static void __devexit | 4801 | static void __devexit vxge_remove(struct pci_dev *pdev) |
4354 | vxge_remove(struct pci_dev *pdev) | ||
4355 | { | 4802 | { |
4356 | struct __vxge_hw_device *hldev; | 4803 | struct __vxge_hw_device *hldev; |
4357 | struct vxgedev *vdev = NULL; | 4804 | struct vxgedev *vdev; |
4358 | struct net_device *dev; | 4805 | int i; |
4359 | int i = 0; | ||
4360 | #if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \ | ||
4361 | (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK)) | ||
4362 | u32 level_trace; | ||
4363 | #endif | ||
4364 | |||
4365 | hldev = (struct __vxge_hw_device *) pci_get_drvdata(pdev); | ||
4366 | 4806 | ||
4807 | hldev = pci_get_drvdata(pdev); | ||
4367 | if (hldev == NULL) | 4808 | if (hldev == NULL) |
4368 | return; | 4809 | return; |
4369 | dev = hldev->ndev; | ||
4370 | vdev = netdev_priv(dev); | ||
4371 | 4810 | ||
4372 | #if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \ | 4811 | vdev = netdev_priv(hldev->ndev); |
4373 | (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK)) | ||
4374 | level_trace = vdev->level_trace; | ||
4375 | #endif | ||
4376 | vxge_debug_entryexit(level_trace, | ||
4377 | "%s:%d", __func__, __LINE__); | ||
4378 | 4812 | ||
4379 | vxge_debug_init(level_trace, | 4813 | vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__); |
4380 | "%s : removing PCI device...", __func__); | 4814 | vxge_debug_init(vdev->level_trace, "%s : removing PCI device...", |
4381 | vxge_device_unregister(hldev); | 4815 | __func__); |
4382 | 4816 | ||
4383 | for (i = 0; i < vdev->no_of_vpath; i++) { | 4817 | for (i = 0; i < vdev->no_of_vpath; i++) |
4384 | vxge_free_mac_add_list(&vdev->vpaths[i]); | 4818 | vxge_free_mac_add_list(&vdev->vpaths[i]); |
4385 | vdev->vpaths[i].mcast_addr_cnt = 0; | ||
4386 | vdev->vpaths[i].mac_addr_cnt = 0; | ||
4387 | } | ||
4388 | |||
4389 | kfree(vdev->vpaths); | ||
4390 | |||
4391 | iounmap(vdev->bar0); | ||
4392 | |||
4393 | pci_disable_sriov(pdev); | ||
4394 | |||
4395 | /* we are safe to free it now */ | ||
4396 | free_netdev(dev); | ||
4397 | |||
4398 | vxge_debug_init(level_trace, | ||
4399 | "%s:%d Device unregistered", __func__, __LINE__); | ||
4400 | 4819 | ||
4820 | vxge_device_unregister(hldev); | ||
4821 | pci_set_drvdata(pdev, NULL); | ||
4822 | /* Do not call pci_disable_sriov here, as it will break child devices */ | ||
4401 | vxge_hw_device_terminate(hldev); | 4823 | vxge_hw_device_terminate(hldev); |
4402 | 4824 | iounmap(vdev->bar0); | |
4825 | pci_release_region(pdev, 0); | ||
4403 | pci_disable_device(pdev); | 4826 | pci_disable_device(pdev); |
4404 | pci_release_regions(pdev); | 4827 | driver_config->config_dev_cnt--; |
4405 | pci_set_drvdata(pdev, NULL); | 4828 | driver_config->total_dev_cnt--; |
4406 | vxge_debug_entryexit(level_trace, | 4829 | |
4407 | "%s:%d Exiting...", __func__, __LINE__); | 4830 | vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered", |
4831 | __func__, __LINE__); | ||
4832 | vxge_debug_entryexit(vdev->level_trace, "%s:%d Exiting...", __func__, | ||
4833 | __LINE__); | ||
4408 | } | 4834 | } |
4409 | 4835 | ||
4410 | static struct pci_error_handlers vxge_err_handler = { | 4836 | static struct pci_error_handlers vxge_err_handler = { |
@@ -4440,6 +4866,10 @@ vxge_starter(void) | |||
4440 | return -ENOMEM; | 4866 | return -ENOMEM; |
4441 | 4867 | ||
4442 | ret = pci_register_driver(&vxge_driver); | 4868 | ret = pci_register_driver(&vxge_driver); |
4869 | if (ret) { | ||
4870 | kfree(driver_config); | ||
4871 | goto err; | ||
4872 | } | ||
4443 | 4873 | ||
4444 | if (driver_config->config_dev_cnt && | 4874 | if (driver_config->config_dev_cnt && |
4445 | (driver_config->config_dev_cnt != driver_config->total_dev_cnt)) | 4875 | (driver_config->config_dev_cnt != driver_config->total_dev_cnt)) |
@@ -4447,10 +4877,7 @@ vxge_starter(void) | |||
4447 | "%s: Configured %d of %d devices", | 4877 | "%s: Configured %d of %d devices", |
4448 | VXGE_DRIVER_NAME, driver_config->config_dev_cnt, | 4878 | VXGE_DRIVER_NAME, driver_config->config_dev_cnt, |
4449 | driver_config->total_dev_cnt); | 4879 | driver_config->total_dev_cnt); |
4450 | 4880 | err: | |
4451 | if (ret) | ||
4452 | kfree(driver_config); | ||
4453 | |||
4454 | return ret; | 4881 | return ret; |
4455 | } | 4882 | } |
4456 | 4883 | ||
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h index 2e3b064b8e4b..ed120aba443d 100644 --- a/drivers/net/vxge/vxge-main.h +++ b/drivers/net/vxge/vxge-main.h | |||
@@ -29,6 +29,9 @@ | |||
29 | 29 | ||
30 | #define PCI_DEVICE_ID_TITAN_WIN 0x5733 | 30 | #define PCI_DEVICE_ID_TITAN_WIN 0x5733 |
31 | #define PCI_DEVICE_ID_TITAN_UNI 0x5833 | 31 | #define PCI_DEVICE_ID_TITAN_UNI 0x5833 |
32 | #define VXGE_HW_TITAN1_PCI_REVISION 1 | ||
33 | #define VXGE_HW_TITAN1A_PCI_REVISION 2 | ||
34 | |||
32 | #define VXGE_USE_DEFAULT 0xffffffff | 35 | #define VXGE_USE_DEFAULT 0xffffffff |
33 | #define VXGE_HW_VPATH_MSIX_ACTIVE 4 | 36 | #define VXGE_HW_VPATH_MSIX_ACTIVE 4 |
34 | #define VXGE_ALARM_MSIX_ID 2 | 37 | #define VXGE_ALARM_MSIX_ID 2 |
@@ -53,12 +56,16 @@ | |||
53 | 56 | ||
54 | #define VXGE_TTI_BTIMER_VAL 250000 | 57 | #define VXGE_TTI_BTIMER_VAL 250000 |
55 | 58 | ||
56 | #define VXGE_TTI_LTIMER_VAL 1000 | 59 | #define VXGE_TTI_LTIMER_VAL 1000 |
57 | #define VXGE_TTI_RTIMER_VAL 0 | 60 | #define VXGE_T1A_TTI_LTIMER_VAL 80 |
58 | #define VXGE_RTI_BTIMER_VAL 250 | 61 | #define VXGE_TTI_RTIMER_VAL 0 |
59 | #define VXGE_RTI_LTIMER_VAL 100 | 62 | #define VXGE_TTI_RTIMER_ADAPT_VAL 10 |
60 | #define VXGE_RTI_RTIMER_VAL 0 | 63 | #define VXGE_T1A_TTI_RTIMER_VAL 400 |
61 | #define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH | 64 | #define VXGE_RTI_BTIMER_VAL 250 |
65 | #define VXGE_RTI_LTIMER_VAL 100 | ||
66 | #define VXGE_RTI_RTIMER_VAL 0 | ||
67 | #define VXGE_RTI_RTIMER_ADAPT_VAL 15 | ||
68 | #define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH | ||
62 | #define VXGE_ISR_POLLING_CNT 8 | 69 | #define VXGE_ISR_POLLING_CNT 8 |
63 | #define VXGE_MAX_CONFIG_DEV 0xFF | 70 | #define VXGE_MAX_CONFIG_DEV 0xFF |
64 | #define VXGE_EXEC_MODE_DISABLE 0 | 71 | #define VXGE_EXEC_MODE_DISABLE 0 |
@@ -76,14 +83,40 @@ | |||
76 | #define TTI_TX_UFC_B 40 | 83 | #define TTI_TX_UFC_B 40 |
77 | #define TTI_TX_UFC_C 60 | 84 | #define TTI_TX_UFC_C 60 |
78 | #define TTI_TX_UFC_D 100 | 85 | #define TTI_TX_UFC_D 100 |
79 | 86 | #define TTI_T1A_TX_UFC_A 30 | |
80 | #define RTI_RX_URANGE_A 5 | 87 | #define TTI_T1A_TX_UFC_B 80 |
81 | #define RTI_RX_URANGE_B 15 | 88 | /* Slope - (max_mtu - min_mtu)/(max_mtu_ufc - min_mtu_ufc) */ |
82 | #define RTI_RX_URANGE_C 40 | 89 | /* Slope - 93 */ |
83 | #define RTI_RX_UFC_A 1 | 90 | /* 60 - 9k Mtu, 140 - 1.5k mtu */ |
84 | #define RTI_RX_UFC_B 5 | 91 | #define TTI_T1A_TX_UFC_C(mtu) (60 + ((VXGE_HW_MAX_MTU - mtu) / 93)) |
85 | #define RTI_RX_UFC_C 10 | 92 | |
86 | #define RTI_RX_UFC_D 15 | 93 | /* Slope - 37 */ |
94 | /* 100 - 9k Mtu, 300 - 1.5k mtu */ | ||
95 | #define TTI_T1A_TX_UFC_D(mtu) (100 + ((VXGE_HW_MAX_MTU - mtu) / 37)) | ||
96 | |||
97 | |||
98 | #define RTI_RX_URANGE_A 5 | ||
99 | #define RTI_RX_URANGE_B 15 | ||
100 | #define RTI_RX_URANGE_C 40 | ||
101 | #define RTI_T1A_RX_URANGE_A 1 | ||
102 | #define RTI_T1A_RX_URANGE_B 20 | ||
103 | #define RTI_T1A_RX_URANGE_C 50 | ||
104 | #define RTI_RX_UFC_A 1 | ||
105 | #define RTI_RX_UFC_B 5 | ||
106 | #define RTI_RX_UFC_C 10 | ||
107 | #define RTI_RX_UFC_D 15 | ||
108 | #define RTI_T1A_RX_UFC_B 20 | ||
109 | #define RTI_T1A_RX_UFC_C 50 | ||
110 | #define RTI_T1A_RX_UFC_D 60 | ||
111 | |||
112 | /* | ||
113 | * The interrupt rate is maintained at 3k per second with the moderation | ||
114 | * parameters for most traffic but not all. This is the maximum interrupt | ||
115 | * count allowed per function with INTA or per vector in the case of | ||
116 | * MSI-X in a 10 millisecond time period. Enabled only for Titan 1A. | ||
117 | */ | ||
118 | #define VXGE_T1A_MAX_INTERRUPT_COUNT 100 | ||
119 | #define VXGE_T1A_MAX_TX_INTERRUPT_COUNT 200 | ||
87 | 120 | ||
88 | /* Milli secs timer period */ | 121 | /* Milli secs timer period */ |
89 | #define VXGE_TIMER_DELAY 10000 | 122 | #define VXGE_TIMER_DELAY 10000 |
@@ -135,9 +168,6 @@ struct vxge_config { | |||
135 | 168 | ||
136 | #define NEW_NAPI_WEIGHT 64 | 169 | #define NEW_NAPI_WEIGHT 64 |
137 | int napi_weight; | 170 | int napi_weight; |
138 | #define VXGE_GRO_DONOT_AGGREGATE 0 | ||
139 | #define VXGE_GRO_ALWAYS_AGGREGATE 1 | ||
140 | int gro_enable; | ||
141 | int intr_type; | 171 | int intr_type; |
142 | #define INTA 0 | 172 | #define INTA 0 |
143 | #define MSI 1 | 173 | #define MSI 1 |
@@ -145,15 +175,15 @@ struct vxge_config { | |||
145 | 175 | ||
146 | int addr_learn_en; | 176 | int addr_learn_en; |
147 | 177 | ||
148 | int rth_steering; | 178 | u32 rth_steering:2, |
149 | int rth_algorithm; | 179 | rth_algorithm:2, |
150 | int rth_hash_type_tcpipv4; | 180 | rth_hash_type_tcpipv4:1, |
151 | int rth_hash_type_ipv4; | 181 | rth_hash_type_ipv4:1, |
152 | int rth_hash_type_tcpipv6; | 182 | rth_hash_type_tcpipv6:1, |
153 | int rth_hash_type_ipv6; | 183 | rth_hash_type_ipv6:1, |
154 | int rth_hash_type_tcpipv6ex; | 184 | rth_hash_type_tcpipv6ex:1, |
155 | int rth_hash_type_ipv6ex; | 185 | rth_hash_type_ipv6ex:1, |
156 | int rth_bkt_sz; | 186 | rth_bkt_sz:8; |
157 | int rth_jhash_golden_ratio; | 187 | int rth_jhash_golden_ratio; |
158 | int tx_steering_type; | 188 | int tx_steering_type; |
159 | int fifo_indicate_max_pkts; | 189 | int fifo_indicate_max_pkts; |
@@ -172,7 +202,6 @@ struct vxge_msix_entry { | |||
172 | 202 | ||
173 | struct vxge_sw_stats { | 203 | struct vxge_sw_stats { |
174 | /* Network Stats (interface stats) */ | 204 | /* Network Stats (interface stats) */ |
175 | struct net_device_stats net_stats; | ||
176 | 205 | ||
177 | /* Tx */ | 206 | /* Tx */ |
178 | u64 tx_frms; | 207 | u64 tx_frms; |
@@ -225,6 +254,11 @@ struct vxge_fifo { | |||
225 | int tx_steering_type; | 254 | int tx_steering_type; |
226 | int indicate_max_pkts; | 255 | int indicate_max_pkts; |
227 | 256 | ||
257 | /* Adaptive interrupt moderation parameters used in T1A */ | ||
258 | unsigned long interrupt_count; | ||
259 | unsigned long jiffies; | ||
260 | |||
261 | u32 tx_vector_no; | ||
228 | /* Tx stats */ | 262 | /* Tx stats */ |
229 | struct vxge_fifo_stats stats; | 263 | struct vxge_fifo_stats stats; |
230 | } ____cacheline_aligned; | 264 | } ____cacheline_aligned; |
@@ -249,12 +283,15 @@ struct vxge_ring { | |||
249 | */ | 283 | */ |
250 | int driver_id; | 284 | int driver_id; |
251 | 285 | ||
252 | /* copy of the flag indicating whether rx_csum is to be used */ | 286 | /* Adaptive interrupt moderation parameters used in T1A */ |
253 | u32 rx_csum; | 287 | unsigned long interrupt_count; |
288 | unsigned long jiffies; | ||
289 | |||
290 | /* copy of the flag indicating whether rx_hwts is to be used */ | ||
291 | u32 rx_hwts:1; | ||
254 | 292 | ||
255 | int pkts_processed; | 293 | int pkts_processed; |
256 | int budget; | 294 | int budget; |
257 | int gro_enable; | ||
258 | 295 | ||
259 | struct napi_struct napi; | 296 | struct napi_struct napi; |
260 | struct napi_struct *napi_p; | 297 | struct napi_struct *napi_p; |
@@ -263,7 +300,7 @@ struct vxge_ring { | |||
263 | 300 | ||
264 | int vlan_tag_strip; | 301 | int vlan_tag_strip; |
265 | struct vlan_group *vlgrp; | 302 | struct vlan_group *vlgrp; |
266 | int rx_vector_no; | 303 | u32 rx_vector_no; |
267 | enum vxge_hw_status last_status; | 304 | enum vxge_hw_status last_status; |
268 | 305 | ||
269 | /* Rx stats */ | 306 | /* Rx stats */ |
@@ -282,8 +319,8 @@ struct vxge_vpath { | |||
282 | int is_configured; | 319 | int is_configured; |
283 | int is_open; | 320 | int is_open; |
284 | struct vxgedev *vdev; | 321 | struct vxgedev *vdev; |
285 | u8 (macaddr)[ETH_ALEN]; | 322 | u8 macaddr[ETH_ALEN]; |
286 | u8 (macmask)[ETH_ALEN]; | 323 | u8 macmask[ETH_ALEN]; |
287 | 324 | ||
288 | #define VXGE_MAX_LEARN_MAC_ADDR_CNT 2048 | 325 | #define VXGE_MAX_LEARN_MAC_ADDR_CNT 2048 |
289 | /* mac addresses currently programmed into NIC */ | 326 | /* mac addresses currently programmed into NIC */ |
@@ -327,8 +364,9 @@ struct vxgedev { | |||
327 | */ | 364 | */ |
328 | u16 all_multi_flg; | 365 | u16 all_multi_flg; |
329 | 366 | ||
330 | /* A flag indicating whether rx_csum is to be used or not. */ | 367 | /* A flag indicating whether rx_hwts is to be used or not. */ |
331 | u32 rx_csum; | 368 | u32 rx_hwts:1, |
369 | titan1:1; | ||
332 | 370 | ||
333 | struct vxge_msix_entry *vxge_entries; | 371 | struct vxge_msix_entry *vxge_entries; |
334 | struct msix_entry *entries; | 372 | struct msix_entry *entries; |
@@ -370,6 +408,7 @@ struct vxgedev { | |||
370 | u32 level_err; | 408 | u32 level_err; |
371 | u32 level_trace; | 409 | u32 level_trace; |
372 | char fw_version[VXGE_HW_FW_STRLEN]; | 410 | char fw_version[VXGE_HW_FW_STRLEN]; |
411 | struct work_struct reset_task; | ||
373 | }; | 412 | }; |
374 | 413 | ||
375 | struct vxge_rx_priv { | 414 | struct vxge_rx_priv { |
@@ -388,8 +427,6 @@ struct vxge_tx_priv { | |||
388 | static int p = val; \ | 427 | static int p = val; \ |
389 | module_param(p, int, 0) | 428 | module_param(p, int, 0) |
390 | 429 | ||
391 | #define vxge_os_bug(fmt...) { printk(fmt); BUG(); } | ||
392 | |||
393 | #define vxge_os_timer(timer, handle, arg, exp) do { \ | 430 | #define vxge_os_timer(timer, handle, arg, exp) do { \ |
394 | init_timer(&timer); \ | 431 | init_timer(&timer); \ |
395 | timer.function = handle; \ | 432 | timer.function = handle; \ |
@@ -397,64 +434,10 @@ struct vxge_tx_priv { | |||
397 | mod_timer(&timer, (jiffies + exp)); \ | 434 | mod_timer(&timer, (jiffies + exp)); \ |
398 | } while (0); | 435 | } while (0); |
399 | 436 | ||
400 | int __devinit vxge_device_register(struct __vxge_hw_device *devh, | 437 | void vxge_initialize_ethtool_ops(struct net_device *ndev); |
401 | struct vxge_config *config, | ||
402 | int high_dma, int no_of_vpath, | ||
403 | struct vxgedev **vdev); | ||
404 | |||
405 | void vxge_device_unregister(struct __vxge_hw_device *devh); | ||
406 | |||
407 | void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id); | ||
408 | |||
409 | void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id); | ||
410 | |||
411 | void vxge_callback_link_up(struct __vxge_hw_device *devh); | ||
412 | |||
413 | void vxge_callback_link_down(struct __vxge_hw_device *devh); | ||
414 | |||
415 | enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, | ||
416 | struct macInfo *mac); | ||
417 | |||
418 | int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac); | ||
419 | |||
420 | int vxge_reset(struct vxgedev *vdev); | ||
421 | |||
422 | enum vxge_hw_status | ||
423 | vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr, | ||
424 | u8 t_code, void *userdata); | ||
425 | |||
426 | enum vxge_hw_status | ||
427 | vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr, | ||
428 | enum vxge_hw_fifo_tcode t_code, void *userdata, | ||
429 | struct sk_buff ***skb_ptr, int nr_skbs, int *more); | ||
430 | |||
431 | int vxge_close(struct net_device *dev); | ||
432 | |||
433 | int vxge_open(struct net_device *dev); | ||
434 | |||
435 | void vxge_close_vpaths(struct vxgedev *vdev, int index); | ||
436 | |||
437 | int vxge_open_vpaths(struct vxgedev *vdev); | ||
438 | |||
439 | enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev); | 438 | enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev); |
439 | int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override); | ||
440 | 440 | ||
441 | enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, | ||
442 | struct macInfo *mac); | ||
443 | |||
444 | enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev, | ||
445 | struct macInfo *mac); | ||
446 | |||
447 | int vxge_mac_list_add(struct vxge_vpath *vpath, | ||
448 | struct macInfo *mac); | ||
449 | |||
450 | void vxge_free_mac_add_list(struct vxge_vpath *vpath); | ||
451 | |||
452 | enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath); | ||
453 | |||
454 | enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath); | ||
455 | |||
456 | int do_vxge_close(struct net_device *dev, int do_io); | ||
457 | extern void initialize_ethtool_ops(struct net_device *ndev); | ||
458 | /** | 441 | /** |
459 | * #define VXGE_DEBUG_INIT: debug for initialization functions | 442 | * #define VXGE_DEBUG_INIT: debug for initialization functions |
460 | * #define VXGE_DEBUG_TX : debug transmit related functions | 443 | * #define VXGE_DEBUG_TX : debug transmit related functions |
diff --git a/drivers/net/vxge/vxge-reg.h b/drivers/net/vxge/vxge-reg.h index 3dd5c9615ef9..3e658b175947 100644 --- a/drivers/net/vxge/vxge-reg.h +++ b/drivers/net/vxge/vxge-reg.h | |||
@@ -49,6 +49,33 @@ | |||
49 | #define VXGE_HW_TITAN_VPMGMT_REG_SPACES 17 | 49 | #define VXGE_HW_TITAN_VPMGMT_REG_SPACES 17 |
50 | #define VXGE_HW_TITAN_VPATH_REG_SPACES 17 | 50 | #define VXGE_HW_TITAN_VPATH_REG_SPACES 17 |
51 | 51 | ||
52 | #define VXGE_HW_FW_API_GET_EPROM_REV 31 | ||
53 | |||
54 | #define VXGE_EPROM_IMG_MAJOR(val) (u32) vxge_bVALn(val, 48, 4) | ||
55 | #define VXGE_EPROM_IMG_MINOR(val) (u32) vxge_bVALn(val, 52, 4) | ||
56 | #define VXGE_EPROM_IMG_FIX(val) (u32) vxge_bVALn(val, 56, 4) | ||
57 | #define VXGE_EPROM_IMG_BUILD(val) (u32) vxge_bVALn(val, 60, 4) | ||
58 | |||
59 | #define VXGE_HW_GET_EPROM_IMAGE_INDEX(val) vxge_bVALn(val, 16, 8) | ||
60 | #define VXGE_HW_GET_EPROM_IMAGE_VALID(val) vxge_bVALn(val, 31, 1) | ||
61 | #define VXGE_HW_GET_EPROM_IMAGE_TYPE(val) vxge_bVALn(val, 40, 8) | ||
62 | #define VXGE_HW_GET_EPROM_IMAGE_REV(val) vxge_bVALn(val, 48, 16) | ||
63 | #define VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(val) vxge_vBIT(val, 16, 8) | ||
64 | |||
65 | #define VXGE_HW_FW_API_GET_FUNC_MODE 29 | ||
66 | #define VXGE_HW_GET_FUNC_MODE_VAL(val) (val & 0xFF) | ||
67 | |||
68 | #define VXGE_HW_FW_UPGRADE_MEMO 13 | ||
69 | #define VXGE_HW_FW_UPGRADE_ACTION 16 | ||
70 | #define VXGE_HW_FW_UPGRADE_OFFSET_START 2 | ||
71 | #define VXGE_HW_FW_UPGRADE_OFFSET_SEND 3 | ||
72 | #define VXGE_HW_FW_UPGRADE_OFFSET_COMMIT 4 | ||
73 | #define VXGE_HW_FW_UPGRADE_OFFSET_READ 5 | ||
74 | |||
75 | #define VXGE_HW_FW_UPGRADE_BLK_SIZE 16 | ||
76 | #define VXGE_HW_UPGRADE_GET_RET_ERR_CODE(val) (val & 0xff) | ||
77 | #define VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(val) ((val >> 8) & 0xff) | ||
78 | |||
52 | #define VXGE_HW_ASIC_MODE_RESERVED 0 | 79 | #define VXGE_HW_ASIC_MODE_RESERVED 0 |
53 | #define VXGE_HW_ASIC_MODE_NO_IOV 1 | 80 | #define VXGE_HW_ASIC_MODE_NO_IOV 1 |
54 | #define VXGE_HW_ASIC_MODE_SR_IOV 2 | 81 | #define VXGE_HW_ASIC_MODE_SR_IOV 2 |
@@ -165,13 +192,13 @@ | |||
165 | #define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_ETYPE 2 | 192 | #define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_ETYPE 2 |
166 | #define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_PN 3 | 193 | #define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_PN 3 |
167 | #define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG 5 | 194 | #define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG 5 |
168 | #define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT 6 | 195 | #define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT 6 |
169 | #define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_JHASH_CFG 7 | 196 | #define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_JHASH_CFG 7 |
170 | #define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK 8 | 197 | #define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK 8 |
171 | #define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY 9 | 198 | #define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY 9 |
172 | #define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_QOS 10 | 199 | #define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_QOS 10 |
173 | #define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DS 11 | 200 | #define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DS 11 |
174 | #define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT 12 | 201 | #define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT 12 |
175 | #define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO 13 | 202 | #define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO 13 |
176 | 203 | ||
177 | #define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(bits) \ | 204 | #define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(bits) \ |
@@ -437,6 +464,7 @@ | |||
437 | #define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(bits) \ | 464 | #define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(bits) \ |
438 | vxge_bVALn(bits, 48, 16) | 465 | vxge_bVALn(bits, 48, 16) |
439 | #define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_BUILD vxge_vBIT(val, 48, 16) | 466 | #define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_BUILD vxge_vBIT(val, 48, 16) |
467 | #define VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(bits) vxge_bVALn(bits, 0, 8) | ||
440 | 468 | ||
441 | #define VXGE_HW_SRPCIM_TO_VPATH_ALARM_REG_GET_PPIF_SRPCIM_TO_VPATH_ALARM(bits)\ | 469 | #define VXGE_HW_SRPCIM_TO_VPATH_ALARM_REG_GET_PPIF_SRPCIM_TO_VPATH_ALARM(bits)\ |
442 | vxge_bVALn(bits, 0, 18) | 470 | vxge_bVALn(bits, 0, 18) |
@@ -3998,6 +4026,7 @@ struct vxge_hw_vpath_reg { | |||
3998 | #define VXGE_HW_PRC_CFG6_L4_CPC_TRSFR_CODE_EN vxge_mBIT(9) | 4026 | #define VXGE_HW_PRC_CFG6_L4_CPC_TRSFR_CODE_EN vxge_mBIT(9) |
3999 | #define VXGE_HW_PRC_CFG6_RXD_CRXDT(val) vxge_vBIT(val, 23, 9) | 4027 | #define VXGE_HW_PRC_CFG6_RXD_CRXDT(val) vxge_vBIT(val, 23, 9) |
4000 | #define VXGE_HW_PRC_CFG6_RXD_SPAT(val) vxge_vBIT(val, 36, 9) | 4028 | #define VXGE_HW_PRC_CFG6_RXD_SPAT(val) vxge_vBIT(val, 36, 9) |
4029 | #define VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val) vxge_bVALn(val, 36, 9) | ||
4001 | /*0x00a78*/ u64 prc_cfg7; | 4030 | /*0x00a78*/ u64 prc_cfg7; |
4002 | #define VXGE_HW_PRC_CFG7_SCATTER_MODE(val) vxge_vBIT(val, 6, 2) | 4031 | #define VXGE_HW_PRC_CFG7_SCATTER_MODE(val) vxge_vBIT(val, 6, 2) |
4003 | #define VXGE_HW_PRC_CFG7_SMART_SCAT_EN vxge_mBIT(11) | 4032 | #define VXGE_HW_PRC_CFG7_SMART_SCAT_EN vxge_mBIT(11) |
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c index cedf08f99cb3..f93517055162 100644 --- a/drivers/net/vxge/vxge-traffic.c +++ b/drivers/net/vxge/vxge-traffic.c | |||
@@ -12,6 +12,7 @@ | |||
12 | * Copyright(c) 2002-2010 Exar Corp. | 12 | * Copyright(c) 2002-2010 Exar Corp. |
13 | ******************************************************************************/ | 13 | ******************************************************************************/ |
14 | #include <linux/etherdevice.h> | 14 | #include <linux/etherdevice.h> |
15 | #include <linux/prefetch.h> | ||
15 | 16 | ||
16 | #include "vxge-traffic.h" | 17 | #include "vxge-traffic.h" |
17 | #include "vxge-config.h" | 18 | #include "vxge-config.h" |
@@ -218,6 +219,68 @@ exit: | |||
218 | return status; | 219 | return status; |
219 | } | 220 | } |
220 | 221 | ||
222 | void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo) | ||
223 | { | ||
224 | struct vxge_hw_vpath_reg __iomem *vp_reg; | ||
225 | struct vxge_hw_vp_config *config; | ||
226 | u64 val64; | ||
227 | |||
228 | if (fifo->config->enable != VXGE_HW_FIFO_ENABLE) | ||
229 | return; | ||
230 | |||
231 | vp_reg = fifo->vp_reg; | ||
232 | config = container_of(fifo->config, struct vxge_hw_vp_config, fifo); | ||
233 | |||
234 | if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) { | ||
235 | config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE; | ||
236 | val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); | ||
237 | val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; | ||
238 | fifo->tim_tti_cfg1_saved = val64; | ||
239 | writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); | ||
240 | } | ||
241 | } | ||
242 | |||
243 | void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring) | ||
244 | { | ||
245 | u64 val64 = ring->tim_rti_cfg1_saved; | ||
246 | |||
247 | val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; | ||
248 | ring->tim_rti_cfg1_saved = val64; | ||
249 | writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]); | ||
250 | } | ||
251 | |||
252 | void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo) | ||
253 | { | ||
254 | u64 val64 = fifo->tim_tti_cfg3_saved; | ||
255 | u64 timer = (fifo->rtimer * 1000) / 272; | ||
256 | |||
257 | val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff); | ||
258 | if (timer) | ||
259 | val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) | | ||
260 | VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5); | ||
261 | |||
262 | writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]); | ||
263 | /* tti_cfg3_saved is not updated again because it is | ||
264 | * initialized at one place only - init time. | ||
265 | */ | ||
266 | } | ||
267 | |||
268 | void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring) | ||
269 | { | ||
270 | u64 val64 = ring->tim_rti_cfg3_saved; | ||
271 | u64 timer = (ring->rtimer * 1000) / 272; | ||
272 | |||
273 | val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff); | ||
274 | if (timer) | ||
275 | val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) | | ||
276 | VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4); | ||
277 | |||
278 | writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]); | ||
279 | /* rti_cfg3_saved is not updated again because it is | ||
280 | * initialized at one place only - init time. | ||
281 | */ | ||
282 | } | ||
283 | |||
221 | /** | 284 | /** |
222 | * vxge_hw_channel_msix_mask - Mask MSIX Vector. | 285 | * vxge_hw_channel_msix_mask - Mask MSIX Vector. |
223 | * @channeh: Channel for rx or tx handle | 286 | * @channeh: Channel for rx or tx handle |
@@ -254,6 +317,23 @@ vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id) | |||
254 | } | 317 | } |
255 | 318 | ||
256 | /** | 319 | /** |
320 | * vxge_hw_channel_msix_clear - Unmask the MSIX Vector. | ||
321 | * @channel: Channel for rx or tx handle | ||
322 | * @msix_id: MSI ID | ||
323 | * | ||
324 | * The function unmasks the msix interrupt for the given msix_id | ||
325 | * if configured in MSIX oneshot mode | ||
326 | * | ||
327 | * Returns: 0 | ||
328 | */ | ||
329 | void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id) | ||
330 | { | ||
331 | __vxge_hw_pio_mem_write32_upper( | ||
332 | (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32), | ||
333 | &channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]); | ||
334 | } | ||
335 | |||
336 | /** | ||
257 | * vxge_hw_device_set_intr_type - Updates the configuration | 337 | * vxge_hw_device_set_intr_type - Updates the configuration |
258 | * with new interrupt type. | 338 | * with new interrupt type. |
259 | * @hldev: HW device handle. | 339 | * @hldev: HW device handle. |
@@ -412,6 +492,384 @@ void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev) | |||
412 | } | 492 | } |
413 | 493 | ||
414 | /** | 494 | /** |
495 | * __vxge_hw_device_handle_error - Handle error | ||
496 | * @hldev: HW device | ||
497 | * @vp_id: Vpath Id | ||
498 | * @type: Error type. Please see enum vxge_hw_event{} | ||
499 | * | ||
500 | * Handle error. | ||
501 | */ | ||
502 | static enum vxge_hw_status | ||
503 | __vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id, | ||
504 | enum vxge_hw_event type) | ||
505 | { | ||
506 | switch (type) { | ||
507 | case VXGE_HW_EVENT_UNKNOWN: | ||
508 | break; | ||
509 | case VXGE_HW_EVENT_RESET_START: | ||
510 | case VXGE_HW_EVENT_RESET_COMPLETE: | ||
511 | case VXGE_HW_EVENT_LINK_DOWN: | ||
512 | case VXGE_HW_EVENT_LINK_UP: | ||
513 | goto out; | ||
514 | case VXGE_HW_EVENT_ALARM_CLEARED: | ||
515 | goto out; | ||
516 | case VXGE_HW_EVENT_ECCERR: | ||
517 | case VXGE_HW_EVENT_MRPCIM_ECCERR: | ||
518 | goto out; | ||
519 | case VXGE_HW_EVENT_FIFO_ERR: | ||
520 | case VXGE_HW_EVENT_VPATH_ERR: | ||
521 | case VXGE_HW_EVENT_CRITICAL_ERR: | ||
522 | case VXGE_HW_EVENT_SERR: | ||
523 | break; | ||
524 | case VXGE_HW_EVENT_SRPCIM_SERR: | ||
525 | case VXGE_HW_EVENT_MRPCIM_SERR: | ||
526 | goto out; | ||
527 | case VXGE_HW_EVENT_SLOT_FREEZE: | ||
528 | break; | ||
529 | default: | ||
530 | vxge_assert(0); | ||
531 | goto out; | ||
532 | } | ||
533 | |||
534 | /* notify driver */ | ||
535 | if (hldev->uld_callbacks.crit_err) | ||
536 | hldev->uld_callbacks.crit_err( | ||
537 | (struct __vxge_hw_device *)hldev, | ||
538 | type, vp_id); | ||
539 | out: | ||
540 | |||
541 | return VXGE_HW_OK; | ||
542 | } | ||
543 | |||
544 | /* | ||
545 | * __vxge_hw_device_handle_link_down_ind | ||
546 | * @hldev: HW device handle. | ||
547 | * | ||
548 | * Link down indication handler. The function is invoked by HW when | ||
549 | * Titan indicates that the link is down. | ||
550 | */ | ||
551 | static enum vxge_hw_status | ||
552 | __vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev) | ||
553 | { | ||
554 | /* | ||
555 | * If the previous link state is not down, return. | ||
556 | */ | ||
557 | if (hldev->link_state == VXGE_HW_LINK_DOWN) | ||
558 | goto exit; | ||
559 | |||
560 | hldev->link_state = VXGE_HW_LINK_DOWN; | ||
561 | |||
562 | /* notify driver */ | ||
563 | if (hldev->uld_callbacks.link_down) | ||
564 | hldev->uld_callbacks.link_down(hldev); | ||
565 | exit: | ||
566 | return VXGE_HW_OK; | ||
567 | } | ||
568 | |||
569 | /* | ||
570 | * __vxge_hw_device_handle_link_up_ind | ||
571 | * @hldev: HW device handle. | ||
572 | * | ||
573 | * Link up indication handler. The function is invoked by HW when | ||
574 | * Titan indicates that the link is up for programmable amount of time. | ||
575 | */ | ||
576 | static enum vxge_hw_status | ||
577 | __vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev) | ||
578 | { | ||
579 | /* | ||
580 | * If the previous link state is not down, return. | ||
581 | */ | ||
582 | if (hldev->link_state == VXGE_HW_LINK_UP) | ||
583 | goto exit; | ||
584 | |||
585 | hldev->link_state = VXGE_HW_LINK_UP; | ||
586 | |||
587 | /* notify driver */ | ||
588 | if (hldev->uld_callbacks.link_up) | ||
589 | hldev->uld_callbacks.link_up(hldev); | ||
590 | exit: | ||
591 | return VXGE_HW_OK; | ||
592 | } | ||
593 | |||
594 | /* | ||
595 | * __vxge_hw_vpath_alarm_process - Process Alarms. | ||
596 | * @vpath: Virtual Path. | ||
597 | * @skip_alarms: Do not clear the alarms | ||
598 | * | ||
599 | * Process vpath alarms. | ||
600 | * | ||
601 | */ | ||
602 | static enum vxge_hw_status | ||
603 | __vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath, | ||
604 | u32 skip_alarms) | ||
605 | { | ||
606 | u64 val64; | ||
607 | u64 alarm_status; | ||
608 | u64 pic_status; | ||
609 | struct __vxge_hw_device *hldev = NULL; | ||
610 | enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN; | ||
611 | u64 mask64; | ||
612 | struct vxge_hw_vpath_stats_sw_info *sw_stats; | ||
613 | struct vxge_hw_vpath_reg __iomem *vp_reg; | ||
614 | |||
615 | if (vpath == NULL) { | ||
616 | alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN, | ||
617 | alarm_event); | ||
618 | goto out2; | ||
619 | } | ||
620 | |||
621 | hldev = vpath->hldev; | ||
622 | vp_reg = vpath->vp_reg; | ||
623 | alarm_status = readq(&vp_reg->vpath_general_int_status); | ||
624 | |||
625 | if (alarm_status == VXGE_HW_ALL_FOXES) { | ||
626 | alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE, | ||
627 | alarm_event); | ||
628 | goto out; | ||
629 | } | ||
630 | |||
631 | sw_stats = vpath->sw_stats; | ||
632 | |||
633 | if (alarm_status & ~( | ||
634 | VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT | | ||
635 | VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT | | ||
636 | VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT | | ||
637 | VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) { | ||
638 | sw_stats->error_stats.unknown_alarms++; | ||
639 | |||
640 | alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN, | ||
641 | alarm_event); | ||
642 | goto out; | ||
643 | } | ||
644 | |||
645 | if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) { | ||
646 | |||
647 | val64 = readq(&vp_reg->xgmac_vp_int_status); | ||
648 | |||
649 | if (val64 & | ||
650 | VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) { | ||
651 | |||
652 | val64 = readq(&vp_reg->asic_ntwk_vp_err_reg); | ||
653 | |||
654 | if (((val64 & | ||
655 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) && | ||
656 | (!(val64 & | ||
657 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) || | ||
658 | ((val64 & | ||
659 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) && | ||
660 | (!(val64 & | ||
661 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) | ||
662 | ))) { | ||
663 | sw_stats->error_stats.network_sustained_fault++; | ||
664 | |||
665 | writeq( | ||
666 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT, | ||
667 | &vp_reg->asic_ntwk_vp_err_mask); | ||
668 | |||
669 | __vxge_hw_device_handle_link_down_ind(hldev); | ||
670 | alarm_event = VXGE_HW_SET_LEVEL( | ||
671 | VXGE_HW_EVENT_LINK_DOWN, alarm_event); | ||
672 | } | ||
673 | |||
674 | if (((val64 & | ||
675 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) && | ||
676 | (!(val64 & | ||
677 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) || | ||
678 | ((val64 & | ||
679 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) && | ||
680 | (!(val64 & | ||
681 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) | ||
682 | ))) { | ||
683 | |||
684 | sw_stats->error_stats.network_sustained_ok++; | ||
685 | |||
686 | writeq( | ||
687 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK, | ||
688 | &vp_reg->asic_ntwk_vp_err_mask); | ||
689 | |||
690 | __vxge_hw_device_handle_link_up_ind(hldev); | ||
691 | alarm_event = VXGE_HW_SET_LEVEL( | ||
692 | VXGE_HW_EVENT_LINK_UP, alarm_event); | ||
693 | } | ||
694 | |||
695 | writeq(VXGE_HW_INTR_MASK_ALL, | ||
696 | &vp_reg->asic_ntwk_vp_err_reg); | ||
697 | |||
698 | alarm_event = VXGE_HW_SET_LEVEL( | ||
699 | VXGE_HW_EVENT_ALARM_CLEARED, alarm_event); | ||
700 | |||
701 | if (skip_alarms) | ||
702 | return VXGE_HW_OK; | ||
703 | } | ||
704 | } | ||
705 | |||
706 | if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) { | ||
707 | |||
708 | pic_status = readq(&vp_reg->vpath_ppif_int_status); | ||
709 | |||
710 | if (pic_status & | ||
711 | VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) { | ||
712 | |||
713 | val64 = readq(&vp_reg->general_errors_reg); | ||
714 | mask64 = readq(&vp_reg->general_errors_mask); | ||
715 | |||
716 | if ((val64 & | ||
717 | VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) & | ||
718 | ~mask64) { | ||
719 | sw_stats->error_stats.ini_serr_det++; | ||
720 | |||
721 | alarm_event = VXGE_HW_SET_LEVEL( | ||
722 | VXGE_HW_EVENT_SERR, alarm_event); | ||
723 | } | ||
724 | |||
725 | if ((val64 & | ||
726 | VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) & | ||
727 | ~mask64) { | ||
728 | sw_stats->error_stats.dblgen_fifo0_overflow++; | ||
729 | |||
730 | alarm_event = VXGE_HW_SET_LEVEL( | ||
731 | VXGE_HW_EVENT_FIFO_ERR, alarm_event); | ||
732 | } | ||
733 | |||
734 | if ((val64 & | ||
735 | VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) & | ||
736 | ~mask64) | ||
737 | sw_stats->error_stats.statsb_pif_chain_error++; | ||
738 | |||
739 | if ((val64 & | ||
740 | VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) & | ||
741 | ~mask64) | ||
742 | sw_stats->error_stats.statsb_drop_timeout++; | ||
743 | |||
744 | if ((val64 & | ||
745 | VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) & | ||
746 | ~mask64) | ||
747 | sw_stats->error_stats.target_illegal_access++; | ||
748 | |||
749 | if (!skip_alarms) { | ||
750 | writeq(VXGE_HW_INTR_MASK_ALL, | ||
751 | &vp_reg->general_errors_reg); | ||
752 | alarm_event = VXGE_HW_SET_LEVEL( | ||
753 | VXGE_HW_EVENT_ALARM_CLEARED, | ||
754 | alarm_event); | ||
755 | } | ||
756 | } | ||
757 | |||
758 | if (pic_status & | ||
759 | VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) { | ||
760 | |||
761 | val64 = readq(&vp_reg->kdfcctl_errors_reg); | ||
762 | mask64 = readq(&vp_reg->kdfcctl_errors_mask); | ||
763 | |||
764 | if ((val64 & | ||
765 | VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) & | ||
766 | ~mask64) { | ||
767 | sw_stats->error_stats.kdfcctl_fifo0_overwrite++; | ||
768 | |||
769 | alarm_event = VXGE_HW_SET_LEVEL( | ||
770 | VXGE_HW_EVENT_FIFO_ERR, | ||
771 | alarm_event); | ||
772 | } | ||
773 | |||
774 | if ((val64 & | ||
775 | VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) & | ||
776 | ~mask64) { | ||
777 | sw_stats->error_stats.kdfcctl_fifo0_poison++; | ||
778 | |||
779 | alarm_event = VXGE_HW_SET_LEVEL( | ||
780 | VXGE_HW_EVENT_FIFO_ERR, | ||
781 | alarm_event); | ||
782 | } | ||
783 | |||
784 | if ((val64 & | ||
785 | VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) & | ||
786 | ~mask64) { | ||
787 | sw_stats->error_stats.kdfcctl_fifo0_dma_error++; | ||
788 | |||
789 | alarm_event = VXGE_HW_SET_LEVEL( | ||
790 | VXGE_HW_EVENT_FIFO_ERR, | ||
791 | alarm_event); | ||
792 | } | ||
793 | |||
794 | if (!skip_alarms) { | ||
795 | writeq(VXGE_HW_INTR_MASK_ALL, | ||
796 | &vp_reg->kdfcctl_errors_reg); | ||
797 | alarm_event = VXGE_HW_SET_LEVEL( | ||
798 | VXGE_HW_EVENT_ALARM_CLEARED, | ||
799 | alarm_event); | ||
800 | } | ||
801 | } | ||
802 | |||
803 | } | ||
804 | |||
805 | if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) { | ||
806 | |||
807 | val64 = readq(&vp_reg->wrdma_alarm_status); | ||
808 | |||
809 | if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) { | ||
810 | |||
811 | val64 = readq(&vp_reg->prc_alarm_reg); | ||
812 | mask64 = readq(&vp_reg->prc_alarm_mask); | ||
813 | |||
814 | if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)& | ||
815 | ~mask64) | ||
816 | sw_stats->error_stats.prc_ring_bumps++; | ||
817 | |||
818 | if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) & | ||
819 | ~mask64) { | ||
820 | sw_stats->error_stats.prc_rxdcm_sc_err++; | ||
821 | |||
822 | alarm_event = VXGE_HW_SET_LEVEL( | ||
823 | VXGE_HW_EVENT_VPATH_ERR, | ||
824 | alarm_event); | ||
825 | } | ||
826 | |||
827 | if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT) | ||
828 | & ~mask64) { | ||
829 | sw_stats->error_stats.prc_rxdcm_sc_abort++; | ||
830 | |||
831 | alarm_event = VXGE_HW_SET_LEVEL( | ||
832 | VXGE_HW_EVENT_VPATH_ERR, | ||
833 | alarm_event); | ||
834 | } | ||
835 | |||
836 | if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR) | ||
837 | & ~mask64) { | ||
838 | sw_stats->error_stats.prc_quanta_size_err++; | ||
839 | |||
840 | alarm_event = VXGE_HW_SET_LEVEL( | ||
841 | VXGE_HW_EVENT_VPATH_ERR, | ||
842 | alarm_event); | ||
843 | } | ||
844 | |||
845 | if (!skip_alarms) { | ||
846 | writeq(VXGE_HW_INTR_MASK_ALL, | ||
847 | &vp_reg->prc_alarm_reg); | ||
848 | alarm_event = VXGE_HW_SET_LEVEL( | ||
849 | VXGE_HW_EVENT_ALARM_CLEARED, | ||
850 | alarm_event); | ||
851 | } | ||
852 | } | ||
853 | } | ||
854 | out: | ||
855 | hldev->stats.sw_dev_err_stats.vpath_alarms++; | ||
856 | out2: | ||
857 | if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) || | ||
858 | (alarm_event == VXGE_HW_EVENT_UNKNOWN)) | ||
859 | return VXGE_HW_OK; | ||
860 | |||
861 | __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event); | ||
862 | |||
863 | if (alarm_event == VXGE_HW_EVENT_SERR) | ||
864 | return VXGE_HW_ERR_CRITICAL; | ||
865 | |||
866 | return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ? | ||
867 | VXGE_HW_ERR_SLOT_FREEZE : | ||
868 | (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO : | ||
869 | VXGE_HW_ERR_VPATH; | ||
870 | } | ||
871 | |||
872 | /** | ||
415 | * vxge_hw_device_begin_irq - Begin IRQ processing. | 873 | * vxge_hw_device_begin_irq - Begin IRQ processing. |
416 | * @hldev: HW device handle. | 874 | * @hldev: HW device handle. |
417 | * @skip_alarms: Do not clear the alarms | 875 | * @skip_alarms: Do not clear the alarms |
@@ -506,108 +964,6 @@ exit: | |||
506 | return ret; | 964 | return ret; |
507 | } | 965 | } |
508 | 966 | ||
509 | /* | ||
510 | * __vxge_hw_device_handle_link_up_ind | ||
511 | * @hldev: HW device handle. | ||
512 | * | ||
513 | * Link up indication handler. The function is invoked by HW when | ||
514 | * Titan indicates that the link is up for programmable amount of time. | ||
515 | */ | ||
516 | enum vxge_hw_status | ||
517 | __vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev) | ||
518 | { | ||
519 | /* | ||
520 | * If the previous link state is not down, return. | ||
521 | */ | ||
522 | if (hldev->link_state == VXGE_HW_LINK_UP) | ||
523 | goto exit; | ||
524 | |||
525 | hldev->link_state = VXGE_HW_LINK_UP; | ||
526 | |||
527 | /* notify driver */ | ||
528 | if (hldev->uld_callbacks.link_up) | ||
529 | hldev->uld_callbacks.link_up(hldev); | ||
530 | exit: | ||
531 | return VXGE_HW_OK; | ||
532 | } | ||
533 | |||
534 | /* | ||
535 | * __vxge_hw_device_handle_link_down_ind | ||
536 | * @hldev: HW device handle. | ||
537 | * | ||
538 | * Link down indication handler. The function is invoked by HW when | ||
539 | * Titan indicates that the link is down. | ||
540 | */ | ||
541 | enum vxge_hw_status | ||
542 | __vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev) | ||
543 | { | ||
544 | /* | ||
545 | * If the previous link state is not down, return. | ||
546 | */ | ||
547 | if (hldev->link_state == VXGE_HW_LINK_DOWN) | ||
548 | goto exit; | ||
549 | |||
550 | hldev->link_state = VXGE_HW_LINK_DOWN; | ||
551 | |||
552 | /* notify driver */ | ||
553 | if (hldev->uld_callbacks.link_down) | ||
554 | hldev->uld_callbacks.link_down(hldev); | ||
555 | exit: | ||
556 | return VXGE_HW_OK; | ||
557 | } | ||
558 | |||
559 | /** | ||
560 | * __vxge_hw_device_handle_error - Handle error | ||
561 | * @hldev: HW device | ||
562 | * @vp_id: Vpath Id | ||
563 | * @type: Error type. Please see enum vxge_hw_event{} | ||
564 | * | ||
565 | * Handle error. | ||
566 | */ | ||
567 | enum vxge_hw_status | ||
568 | __vxge_hw_device_handle_error( | ||
569 | struct __vxge_hw_device *hldev, | ||
570 | u32 vp_id, | ||
571 | enum vxge_hw_event type) | ||
572 | { | ||
573 | switch (type) { | ||
574 | case VXGE_HW_EVENT_UNKNOWN: | ||
575 | break; | ||
576 | case VXGE_HW_EVENT_RESET_START: | ||
577 | case VXGE_HW_EVENT_RESET_COMPLETE: | ||
578 | case VXGE_HW_EVENT_LINK_DOWN: | ||
579 | case VXGE_HW_EVENT_LINK_UP: | ||
580 | goto out; | ||
581 | case VXGE_HW_EVENT_ALARM_CLEARED: | ||
582 | goto out; | ||
583 | case VXGE_HW_EVENT_ECCERR: | ||
584 | case VXGE_HW_EVENT_MRPCIM_ECCERR: | ||
585 | goto out; | ||
586 | case VXGE_HW_EVENT_FIFO_ERR: | ||
587 | case VXGE_HW_EVENT_VPATH_ERR: | ||
588 | case VXGE_HW_EVENT_CRITICAL_ERR: | ||
589 | case VXGE_HW_EVENT_SERR: | ||
590 | break; | ||
591 | case VXGE_HW_EVENT_SRPCIM_SERR: | ||
592 | case VXGE_HW_EVENT_MRPCIM_SERR: | ||
593 | goto out; | ||
594 | case VXGE_HW_EVENT_SLOT_FREEZE: | ||
595 | break; | ||
596 | default: | ||
597 | vxge_assert(0); | ||
598 | goto out; | ||
599 | } | ||
600 | |||
601 | /* notify driver */ | ||
602 | if (hldev->uld_callbacks.crit_err) | ||
603 | hldev->uld_callbacks.crit_err( | ||
604 | (struct __vxge_hw_device *)hldev, | ||
605 | type, vp_id); | ||
606 | out: | ||
607 | |||
608 | return VXGE_HW_OK; | ||
609 | } | ||
610 | |||
611 | /** | 967 | /** |
612 | * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the | 968 | * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the |
613 | * condition that has caused the Tx and RX interrupt. | 969 | * condition that has caused the Tx and RX interrupt. |
@@ -646,7 +1002,7 @@ void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev) | |||
646 | * it swaps the reserve and free arrays. | 1002 | * it swaps the reserve and free arrays. |
647 | * | 1003 | * |
648 | */ | 1004 | */ |
649 | enum vxge_hw_status | 1005 | static enum vxge_hw_status |
650 | vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh) | 1006 | vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh) |
651 | { | 1007 | { |
652 | void **tmp_arr; | 1008 | void **tmp_arr; |
@@ -692,7 +1048,8 @@ _alloc_after_swap: | |||
692 | * Posts a dtr to work array. | 1048 | * Posts a dtr to work array. |
693 | * | 1049 | * |
694 | */ | 1050 | */ |
695 | void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh) | 1051 | static void |
1052 | vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh) | ||
696 | { | 1053 | { |
697 | vxge_assert(channel->work_arr[channel->post_index] == NULL); | 1054 | vxge_assert(channel->work_arr[channel->post_index] == NULL); |
698 | 1055 | ||
@@ -755,7 +1112,7 @@ void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh) | |||
755 | * vxge_hw_channel_dtr_count | 1112 | * vxge_hw_channel_dtr_count |
756 | * @channel: Channel handle. Obtained via vxge_hw_channel_open(). | 1113 | * @channel: Channel handle. Obtained via vxge_hw_channel_open(). |
757 | * | 1114 | * |
758 | * Retreive number of DTRs available. This function can not be called | 1115 | * Retrieve number of DTRs available. This function can not be called |
759 | * from data path. ring_initial_replenishi() is the only user. | 1116 | * from data path. ring_initial_replenishi() is the only user. |
760 | */ | 1117 | */ |
761 | int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel) | 1118 | int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel) |
@@ -903,10 +1260,6 @@ void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh) | |||
903 | */ | 1260 | */ |
904 | void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh) | 1261 | void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh) |
905 | { | 1262 | { |
906 | struct __vxge_hw_channel *channel; | ||
907 | |||
908 | channel = &ring->channel; | ||
909 | |||
910 | wmb(); | 1263 | wmb(); |
911 | vxge_hw_ring_rxd_post_post(ring, rxdh); | 1264 | vxge_hw_ring_rxd_post_post(ring, rxdh); |
912 | } | 1265 | } |
@@ -967,7 +1320,7 @@ enum vxge_hw_status vxge_hw_ring_rxd_next_completed( | |||
967 | *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0); | 1320 | *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0); |
968 | 1321 | ||
969 | /* check whether it is not the end */ | 1322 | /* check whether it is not the end */ |
970 | if (!own || ((*t_code == VXGE_HW_RING_T_CODE_FRM_DROP) && own)) { | 1323 | if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) { |
971 | 1324 | ||
972 | vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control != | 1325 | vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control != |
973 | 0); | 1326 | 0); |
@@ -1658,37 +2011,6 @@ exit: | |||
1658 | } | 2011 | } |
1659 | 2012 | ||
1660 | /** | 2013 | /** |
1661 | * vxge_hw_vpath_vid_get_next - Get the next vid entry for this vpath | ||
1662 | * from vlan id table. | ||
1663 | * @vp: Vpath handle. | ||
1664 | * @vid: Buffer to return vlan id | ||
1665 | * | ||
1666 | * Returns the next vlan id in the list for this vpath. | ||
1667 | * see also: vxge_hw_vpath_vid_get | ||
1668 | * | ||
1669 | */ | ||
1670 | enum vxge_hw_status | ||
1671 | vxge_hw_vpath_vid_get_next(struct __vxge_hw_vpath_handle *vp, u64 *vid) | ||
1672 | { | ||
1673 | u64 data; | ||
1674 | enum vxge_hw_status status = VXGE_HW_OK; | ||
1675 | |||
1676 | if (vp == NULL) { | ||
1677 | status = VXGE_HW_ERR_INVALID_HANDLE; | ||
1678 | goto exit; | ||
1679 | } | ||
1680 | |||
1681 | status = __vxge_hw_vpath_rts_table_get(vp, | ||
1682 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY, | ||
1683 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID, | ||
1684 | 0, vid, &data); | ||
1685 | |||
1686 | *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid); | ||
1687 | exit: | ||
1688 | return status; | ||
1689 | } | ||
1690 | |||
1691 | /** | ||
1692 | * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath | 2014 | * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath |
1693 | * to vlan id table. | 2015 | * to vlan id table. |
1694 | * @vp: Vpath handle. | 2016 | * @vp: Vpath handle. |
@@ -1739,7 +2061,7 @@ enum vxge_hw_status vxge_hw_vpath_promisc_enable( | |||
1739 | 2061 | ||
1740 | vpath = vp->vpath; | 2062 | vpath = vp->vpath; |
1741 | 2063 | ||
1742 | /* Enable promiscous mode for function 0 only */ | 2064 | /* Enable promiscuous mode for function 0 only */ |
1743 | if (!(vpath->hldev->access_rights & | 2065 | if (!(vpath->hldev->access_rights & |
1744 | VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) | 2066 | VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) |
1745 | return VXGE_HW_OK; | 2067 | return VXGE_HW_OK; |
@@ -1891,284 +2213,6 @@ exit: | |||
1891 | } | 2213 | } |
1892 | 2214 | ||
1893 | /* | 2215 | /* |
1894 | * __vxge_hw_vpath_alarm_process - Process Alarms. | ||
1895 | * @vpath: Virtual Path. | ||
1896 | * @skip_alarms: Do not clear the alarms | ||
1897 | * | ||
1898 | * Process vpath alarms. | ||
1899 | * | ||
1900 | */ | ||
1901 | enum vxge_hw_status __vxge_hw_vpath_alarm_process( | ||
1902 | struct __vxge_hw_virtualpath *vpath, | ||
1903 | u32 skip_alarms) | ||
1904 | { | ||
1905 | u64 val64; | ||
1906 | u64 alarm_status; | ||
1907 | u64 pic_status; | ||
1908 | struct __vxge_hw_device *hldev = NULL; | ||
1909 | enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN; | ||
1910 | u64 mask64; | ||
1911 | struct vxge_hw_vpath_stats_sw_info *sw_stats; | ||
1912 | struct vxge_hw_vpath_reg __iomem *vp_reg; | ||
1913 | |||
1914 | if (vpath == NULL) { | ||
1915 | alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN, | ||
1916 | alarm_event); | ||
1917 | goto out2; | ||
1918 | } | ||
1919 | |||
1920 | hldev = vpath->hldev; | ||
1921 | vp_reg = vpath->vp_reg; | ||
1922 | alarm_status = readq(&vp_reg->vpath_general_int_status); | ||
1923 | |||
1924 | if (alarm_status == VXGE_HW_ALL_FOXES) { | ||
1925 | alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE, | ||
1926 | alarm_event); | ||
1927 | goto out; | ||
1928 | } | ||
1929 | |||
1930 | sw_stats = vpath->sw_stats; | ||
1931 | |||
1932 | if (alarm_status & ~( | ||
1933 | VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT | | ||
1934 | VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT | | ||
1935 | VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT | | ||
1936 | VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) { | ||
1937 | sw_stats->error_stats.unknown_alarms++; | ||
1938 | |||
1939 | alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN, | ||
1940 | alarm_event); | ||
1941 | goto out; | ||
1942 | } | ||
1943 | |||
1944 | if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) { | ||
1945 | |||
1946 | val64 = readq(&vp_reg->xgmac_vp_int_status); | ||
1947 | |||
1948 | if (val64 & | ||
1949 | VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) { | ||
1950 | |||
1951 | val64 = readq(&vp_reg->asic_ntwk_vp_err_reg); | ||
1952 | |||
1953 | if (((val64 & | ||
1954 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) && | ||
1955 | (!(val64 & | ||
1956 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) || | ||
1957 | ((val64 & | ||
1958 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) && | ||
1959 | (!(val64 & | ||
1960 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) | ||
1961 | ))) { | ||
1962 | sw_stats->error_stats.network_sustained_fault++; | ||
1963 | |||
1964 | writeq( | ||
1965 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT, | ||
1966 | &vp_reg->asic_ntwk_vp_err_mask); | ||
1967 | |||
1968 | __vxge_hw_device_handle_link_down_ind(hldev); | ||
1969 | alarm_event = VXGE_HW_SET_LEVEL( | ||
1970 | VXGE_HW_EVENT_LINK_DOWN, alarm_event); | ||
1971 | } | ||
1972 | |||
1973 | if (((val64 & | ||
1974 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) && | ||
1975 | (!(val64 & | ||
1976 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) || | ||
1977 | ((val64 & | ||
1978 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) && | ||
1979 | (!(val64 & | ||
1980 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) | ||
1981 | ))) { | ||
1982 | |||
1983 | sw_stats->error_stats.network_sustained_ok++; | ||
1984 | |||
1985 | writeq( | ||
1986 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK, | ||
1987 | &vp_reg->asic_ntwk_vp_err_mask); | ||
1988 | |||
1989 | __vxge_hw_device_handle_link_up_ind(hldev); | ||
1990 | alarm_event = VXGE_HW_SET_LEVEL( | ||
1991 | VXGE_HW_EVENT_LINK_UP, alarm_event); | ||
1992 | } | ||
1993 | |||
1994 | writeq(VXGE_HW_INTR_MASK_ALL, | ||
1995 | &vp_reg->asic_ntwk_vp_err_reg); | ||
1996 | |||
1997 | alarm_event = VXGE_HW_SET_LEVEL( | ||
1998 | VXGE_HW_EVENT_ALARM_CLEARED, alarm_event); | ||
1999 | |||
2000 | if (skip_alarms) | ||
2001 | return VXGE_HW_OK; | ||
2002 | } | ||
2003 | } | ||
2004 | |||
2005 | if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) { | ||
2006 | |||
2007 | pic_status = readq(&vp_reg->vpath_ppif_int_status); | ||
2008 | |||
2009 | if (pic_status & | ||
2010 | VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) { | ||
2011 | |||
2012 | val64 = readq(&vp_reg->general_errors_reg); | ||
2013 | mask64 = readq(&vp_reg->general_errors_mask); | ||
2014 | |||
2015 | if ((val64 & | ||
2016 | VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) & | ||
2017 | ~mask64) { | ||
2018 | sw_stats->error_stats.ini_serr_det++; | ||
2019 | |||
2020 | alarm_event = VXGE_HW_SET_LEVEL( | ||
2021 | VXGE_HW_EVENT_SERR, alarm_event); | ||
2022 | } | ||
2023 | |||
2024 | if ((val64 & | ||
2025 | VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) & | ||
2026 | ~mask64) { | ||
2027 | sw_stats->error_stats.dblgen_fifo0_overflow++; | ||
2028 | |||
2029 | alarm_event = VXGE_HW_SET_LEVEL( | ||
2030 | VXGE_HW_EVENT_FIFO_ERR, alarm_event); | ||
2031 | } | ||
2032 | |||
2033 | if ((val64 & | ||
2034 | VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) & | ||
2035 | ~mask64) | ||
2036 | sw_stats->error_stats.statsb_pif_chain_error++; | ||
2037 | |||
2038 | if ((val64 & | ||
2039 | VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) & | ||
2040 | ~mask64) | ||
2041 | sw_stats->error_stats.statsb_drop_timeout++; | ||
2042 | |||
2043 | if ((val64 & | ||
2044 | VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) & | ||
2045 | ~mask64) | ||
2046 | sw_stats->error_stats.target_illegal_access++; | ||
2047 | |||
2048 | if (!skip_alarms) { | ||
2049 | writeq(VXGE_HW_INTR_MASK_ALL, | ||
2050 | &vp_reg->general_errors_reg); | ||
2051 | alarm_event = VXGE_HW_SET_LEVEL( | ||
2052 | VXGE_HW_EVENT_ALARM_CLEARED, | ||
2053 | alarm_event); | ||
2054 | } | ||
2055 | } | ||
2056 | |||
2057 | if (pic_status & | ||
2058 | VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) { | ||
2059 | |||
2060 | val64 = readq(&vp_reg->kdfcctl_errors_reg); | ||
2061 | mask64 = readq(&vp_reg->kdfcctl_errors_mask); | ||
2062 | |||
2063 | if ((val64 & | ||
2064 | VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) & | ||
2065 | ~mask64) { | ||
2066 | sw_stats->error_stats.kdfcctl_fifo0_overwrite++; | ||
2067 | |||
2068 | alarm_event = VXGE_HW_SET_LEVEL( | ||
2069 | VXGE_HW_EVENT_FIFO_ERR, | ||
2070 | alarm_event); | ||
2071 | } | ||
2072 | |||
2073 | if ((val64 & | ||
2074 | VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) & | ||
2075 | ~mask64) { | ||
2076 | sw_stats->error_stats.kdfcctl_fifo0_poison++; | ||
2077 | |||
2078 | alarm_event = VXGE_HW_SET_LEVEL( | ||
2079 | VXGE_HW_EVENT_FIFO_ERR, | ||
2080 | alarm_event); | ||
2081 | } | ||
2082 | |||
2083 | if ((val64 & | ||
2084 | VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) & | ||
2085 | ~mask64) { | ||
2086 | sw_stats->error_stats.kdfcctl_fifo0_dma_error++; | ||
2087 | |||
2088 | alarm_event = VXGE_HW_SET_LEVEL( | ||
2089 | VXGE_HW_EVENT_FIFO_ERR, | ||
2090 | alarm_event); | ||
2091 | } | ||
2092 | |||
2093 | if (!skip_alarms) { | ||
2094 | writeq(VXGE_HW_INTR_MASK_ALL, | ||
2095 | &vp_reg->kdfcctl_errors_reg); | ||
2096 | alarm_event = VXGE_HW_SET_LEVEL( | ||
2097 | VXGE_HW_EVENT_ALARM_CLEARED, | ||
2098 | alarm_event); | ||
2099 | } | ||
2100 | } | ||
2101 | |||
2102 | } | ||
2103 | |||
2104 | if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) { | ||
2105 | |||
2106 | val64 = readq(&vp_reg->wrdma_alarm_status); | ||
2107 | |||
2108 | if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) { | ||
2109 | |||
2110 | val64 = readq(&vp_reg->prc_alarm_reg); | ||
2111 | mask64 = readq(&vp_reg->prc_alarm_mask); | ||
2112 | |||
2113 | if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)& | ||
2114 | ~mask64) | ||
2115 | sw_stats->error_stats.prc_ring_bumps++; | ||
2116 | |||
2117 | if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) & | ||
2118 | ~mask64) { | ||
2119 | sw_stats->error_stats.prc_rxdcm_sc_err++; | ||
2120 | |||
2121 | alarm_event = VXGE_HW_SET_LEVEL( | ||
2122 | VXGE_HW_EVENT_VPATH_ERR, | ||
2123 | alarm_event); | ||
2124 | } | ||
2125 | |||
2126 | if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT) | ||
2127 | & ~mask64) { | ||
2128 | sw_stats->error_stats.prc_rxdcm_sc_abort++; | ||
2129 | |||
2130 | alarm_event = VXGE_HW_SET_LEVEL( | ||
2131 | VXGE_HW_EVENT_VPATH_ERR, | ||
2132 | alarm_event); | ||
2133 | } | ||
2134 | |||
2135 | if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR) | ||
2136 | & ~mask64) { | ||
2137 | sw_stats->error_stats.prc_quanta_size_err++; | ||
2138 | |||
2139 | alarm_event = VXGE_HW_SET_LEVEL( | ||
2140 | VXGE_HW_EVENT_VPATH_ERR, | ||
2141 | alarm_event); | ||
2142 | } | ||
2143 | |||
2144 | if (!skip_alarms) { | ||
2145 | writeq(VXGE_HW_INTR_MASK_ALL, | ||
2146 | &vp_reg->prc_alarm_reg); | ||
2147 | alarm_event = VXGE_HW_SET_LEVEL( | ||
2148 | VXGE_HW_EVENT_ALARM_CLEARED, | ||
2149 | alarm_event); | ||
2150 | } | ||
2151 | } | ||
2152 | } | ||
2153 | out: | ||
2154 | hldev->stats.sw_dev_err_stats.vpath_alarms++; | ||
2155 | out2: | ||
2156 | if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) || | ||
2157 | (alarm_event == VXGE_HW_EVENT_UNKNOWN)) | ||
2158 | return VXGE_HW_OK; | ||
2159 | |||
2160 | __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event); | ||
2161 | |||
2162 | if (alarm_event == VXGE_HW_EVENT_SERR) | ||
2163 | return VXGE_HW_ERR_CRITICAL; | ||
2164 | |||
2165 | return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ? | ||
2166 | VXGE_HW_ERR_SLOT_FREEZE : | ||
2167 | (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO : | ||
2168 | VXGE_HW_ERR_VPATH; | ||
2169 | } | ||
2170 | |||
2171 | /* | ||
2172 | * vxge_hw_vpath_alarm_process - Process Alarms. | 2216 | * vxge_hw_vpath_alarm_process - Process Alarms. |
2173 | * @vpath: Virtual Path. | 2217 | * @vpath: Virtual Path. |
2174 | * @skip_alarms: Do not clear the alarms | 2218 | * @skip_alarms: Do not clear the alarms |
@@ -2227,19 +2271,14 @@ vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id, | |||
2227 | if (vpath->hldev->config.intr_mode == | 2271 | if (vpath->hldev->config.intr_mode == |
2228 | VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) { | 2272 | VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) { |
2229 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( | 2273 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( |
2274 | VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN, | ||
2275 | 0, 32), &vp_reg->one_shot_vect0_en); | ||
2276 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( | ||
2230 | VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN, | 2277 | VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN, |
2231 | 0, 32), &vp_reg->one_shot_vect1_en); | 2278 | 0, 32), &vp_reg->one_shot_vect1_en); |
2232 | } | ||
2233 | |||
2234 | if (vpath->hldev->config.intr_mode == | ||
2235 | VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) { | ||
2236 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( | 2279 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( |
2237 | VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN, | 2280 | VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN, |
2238 | 0, 32), &vp_reg->one_shot_vect2_en); | 2281 | 0, 32), &vp_reg->one_shot_vect2_en); |
2239 | |||
2240 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( | ||
2241 | VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN, | ||
2242 | 0, 32), &vp_reg->one_shot_vect3_en); | ||
2243 | } | 2282 | } |
2244 | } | 2283 | } |
2245 | 2284 | ||
@@ -2276,22 +2315,18 @@ vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id) | |||
2276 | * status. | 2315 | * status. |
2277 | * See also: | 2316 | * See also: |
2278 | */ | 2317 | */ |
2279 | void | 2318 | void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id) |
2280 | vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id) | ||
2281 | { | 2319 | { |
2282 | struct __vxge_hw_device *hldev = vp->vpath->hldev; | 2320 | struct __vxge_hw_device *hldev = vp->vpath->hldev; |
2283 | if (hldev->config.intr_mode == | 2321 | |
2284 | VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) { | 2322 | if ((hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT)) |
2285 | __vxge_hw_pio_mem_write32_upper( | 2323 | __vxge_hw_pio_mem_write32_upper( |
2286 | (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32), | 2324 | (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32), |
2287 | &hldev->common_reg-> | 2325 | &hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]); |
2288 | clr_msix_one_shot_vec[msix_id%4]); | 2326 | else |
2289 | } else { | ||
2290 | __vxge_hw_pio_mem_write32_upper( | 2327 | __vxge_hw_pio_mem_write32_upper( |
2291 | (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32), | 2328 | (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32), |
2292 | &hldev->common_reg-> | 2329 | &hldev->common_reg->clear_msix_mask_vect[msix_id % 4]); |
2293 | clear_msix_mask_vect[msix_id%4]); | ||
2294 | } | ||
2295 | } | 2330 | } |
2296 | 2331 | ||
2297 | /** | 2332 | /** |
@@ -2316,22 +2351,6 @@ vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id) | |||
2316 | } | 2351 | } |
2317 | 2352 | ||
2318 | /** | 2353 | /** |
2319 | * vxge_hw_vpath_msix_mask_all - Mask all MSIX vectors for the vpath. | ||
2320 | * @vp: Virtual Path handle. | ||
2321 | * | ||
2322 | * The function masks all msix interrupt for the given vpath | ||
2323 | * | ||
2324 | */ | ||
2325 | void | ||
2326 | vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vp) | ||
2327 | { | ||
2328 | |||
2329 | __vxge_hw_pio_mem_write32_upper( | ||
2330 | (u32)vxge_bVALn(vxge_mBIT(vp->vpath->vp_id), 0, 32), | ||
2331 | &vp->vpath->hldev->common_reg->set_msix_mask_all_vect); | ||
2332 | } | ||
2333 | |||
2334 | /** | ||
2335 | * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts. | 2354 | * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts. |
2336 | * @vp: Virtual Path handle. | 2355 | * @vp: Virtual Path handle. |
2337 | * | 2356 | * |
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h index 6fa07d13798e..4a518a3b131c 100644 --- a/drivers/net/vxge/vxge-traffic.h +++ b/drivers/net/vxge/vxge-traffic.h | |||
@@ -240,7 +240,7 @@ struct vxge_hw_tim_intr_config { | |||
240 | u32 btimer_val; | 240 | u32 btimer_val; |
241 | #define VXGE_HW_MIN_TIM_BTIMER_VAL 0 | 241 | #define VXGE_HW_MIN_TIM_BTIMER_VAL 0 |
242 | #define VXGE_HW_MAX_TIM_BTIMER_VAL 67108864 | 242 | #define VXGE_HW_MAX_TIM_BTIMER_VAL 67108864 |
243 | #define VXGE_HW_USE_FLASH_DEFAULT 0xffffffff | 243 | #define VXGE_HW_USE_FLASH_DEFAULT (~0) |
244 | 244 | ||
245 | u32 timer_ac_en; | 245 | u32 timer_ac_en; |
246 | #define VXGE_HW_TIM_TIMER_AC_ENABLE 1 | 246 | #define VXGE_HW_TIM_TIMER_AC_ENABLE 1 |
@@ -681,7 +681,7 @@ struct vxge_hw_xmac_aggr_stats { | |||
681 | * @rx_red_discard: Count of received frames that are discarded because of RED | 681 | * @rx_red_discard: Count of received frames that are discarded because of RED |
682 | * (Random Early Discard). | 682 | * (Random Early Discard). |
683 | * @rx_xgmii_ctrl_err_cnt: Maintains a count of unexpected or misplaced control | 683 | * @rx_xgmii_ctrl_err_cnt: Maintains a count of unexpected or misplaced control |
684 | * characters occuring between times of normal data transmission | 684 | * characters occurring between times of normal data transmission |
685 | * (i.e. not included in RX_XGMII_DATA_ERR_CNT). This counter is | 685 | * (i.e. not included in RX_XGMII_DATA_ERR_CNT). This counter is |
686 | * incremented when either - | 686 | * incremented when either - |
687 | * 1) The Reconciliation Sublayer (RS) is expecting one control | 687 | * 1) The Reconciliation Sublayer (RS) is expecting one control |
@@ -1695,7 +1695,7 @@ struct vxge_hw_device_stats_sw_err { | |||
1695 | * struct vxge_hw_device_stats - Contains HW per-device statistics, | 1695 | * struct vxge_hw_device_stats - Contains HW per-device statistics, |
1696 | * including hw. | 1696 | * including hw. |
1697 | * @devh: HW device handle. | 1697 | * @devh: HW device handle. |
1698 | * @dma_addr: DMA addres of the %hw_info. Given to device to fill-in the stats. | 1698 | * @dma_addr: DMA address of the %hw_info. Given to device to fill-in the stats. |
1699 | * @hw_info_dmah: DMA handle used to map hw statistics onto the device memory | 1699 | * @hw_info_dmah: DMA handle used to map hw statistics onto the device memory |
1700 | * space. | 1700 | * space. |
1701 | * @hw_info_dma_acch: One more DMA handle used subsequently to free the | 1701 | * @hw_info_dma_acch: One more DMA handle used subsequently to free the |
@@ -1749,14 +1749,6 @@ vxge_hw_mrpcim_stats_access( | |||
1749 | u64 *stat); | 1749 | u64 *stat); |
1750 | 1750 | ||
1751 | enum vxge_hw_status | 1751 | enum vxge_hw_status |
1752 | vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *devh, u32 port, | ||
1753 | struct vxge_hw_xmac_aggr_stats *aggr_stats); | ||
1754 | |||
1755 | enum vxge_hw_status | ||
1756 | vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *devh, u32 port, | ||
1757 | struct vxge_hw_xmac_port_stats *port_stats); | ||
1758 | |||
1759 | enum vxge_hw_status | ||
1760 | vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *devh, | 1752 | vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *devh, |
1761 | struct vxge_hw_xmac_stats *xmac_stats); | 1753 | struct vxge_hw_xmac_stats *xmac_stats); |
1762 | 1754 | ||
@@ -1912,34 +1904,6 @@ enum vxge_hw_ring_tcode { | |||
1912 | VXGE_HW_RING_T_CODE_MULTI_ERR = 0xF | 1904 | VXGE_HW_RING_T_CODE_MULTI_ERR = 0xF |
1913 | }; | 1905 | }; |
1914 | 1906 | ||
1915 | /** | ||
1916 | * enum enum vxge_hw_ring_hash_type - RTH hash types | ||
1917 | * @VXGE_HW_RING_HASH_TYPE_NONE: No Hash | ||
1918 | * @VXGE_HW_RING_HASH_TYPE_TCP_IPV4: TCP IPv4 | ||
1919 | * @VXGE_HW_RING_HASH_TYPE_UDP_IPV4: UDP IPv4 | ||
1920 | * @VXGE_HW_RING_HASH_TYPE_IPV4: IPv4 | ||
1921 | * @VXGE_HW_RING_HASH_TYPE_TCP_IPV6: TCP IPv6 | ||
1922 | * @VXGE_HW_RING_HASH_TYPE_UDP_IPV6: UDP IPv6 | ||
1923 | * @VXGE_HW_RING_HASH_TYPE_IPV6: IPv6 | ||
1924 | * @VXGE_HW_RING_HASH_TYPE_TCP_IPV6_EX: TCP IPv6 extension | ||
1925 | * @VXGE_HW_RING_HASH_TYPE_UDP_IPV6_EX: UDP IPv6 extension | ||
1926 | * @VXGE_HW_RING_HASH_TYPE_IPV6_EX: IPv6 extension | ||
1927 | * | ||
1928 | * RTH hash types | ||
1929 | */ | ||
1930 | enum vxge_hw_ring_hash_type { | ||
1931 | VXGE_HW_RING_HASH_TYPE_NONE = 0x0, | ||
1932 | VXGE_HW_RING_HASH_TYPE_TCP_IPV4 = 0x1, | ||
1933 | VXGE_HW_RING_HASH_TYPE_UDP_IPV4 = 0x2, | ||
1934 | VXGE_HW_RING_HASH_TYPE_IPV4 = 0x3, | ||
1935 | VXGE_HW_RING_HASH_TYPE_TCP_IPV6 = 0x4, | ||
1936 | VXGE_HW_RING_HASH_TYPE_UDP_IPV6 = 0x5, | ||
1937 | VXGE_HW_RING_HASH_TYPE_IPV6 = 0x6, | ||
1938 | VXGE_HW_RING_HASH_TYPE_TCP_IPV6_EX = 0x7, | ||
1939 | VXGE_HW_RING_HASH_TYPE_UDP_IPV6_EX = 0x8, | ||
1940 | VXGE_HW_RING_HASH_TYPE_IPV6_EX = 0x9 | ||
1941 | }; | ||
1942 | |||
1943 | enum vxge_hw_status vxge_hw_ring_rxd_reserve( | 1907 | enum vxge_hw_status vxge_hw_ring_rxd_reserve( |
1944 | struct __vxge_hw_ring *ring_handle, | 1908 | struct __vxge_hw_ring *ring_handle, |
1945 | void **rxdh); | 1909 | void **rxdh); |
@@ -2117,49 +2081,6 @@ struct __vxge_hw_ring_rxd_priv { | |||
2117 | #endif | 2081 | #endif |
2118 | }; | 2082 | }; |
2119 | 2083 | ||
2120 | /* ========================= RING PRIVATE API ============================= */ | ||
2121 | u64 | ||
2122 | __vxge_hw_ring_first_block_address_get( | ||
2123 | struct __vxge_hw_ring *ringh); | ||
2124 | |||
2125 | enum vxge_hw_status | ||
2126 | __vxge_hw_ring_create( | ||
2127 | struct __vxge_hw_vpath_handle *vpath_handle, | ||
2128 | struct vxge_hw_ring_attr *attr); | ||
2129 | |||
2130 | enum vxge_hw_status | ||
2131 | __vxge_hw_ring_abort( | ||
2132 | struct __vxge_hw_ring *ringh); | ||
2133 | |||
2134 | enum vxge_hw_status | ||
2135 | __vxge_hw_ring_reset( | ||
2136 | struct __vxge_hw_ring *ringh); | ||
2137 | |||
2138 | enum vxge_hw_status | ||
2139 | __vxge_hw_ring_delete( | ||
2140 | struct __vxge_hw_vpath_handle *vpath_handle); | ||
2141 | |||
2142 | /* ========================= FIFO PRIVATE API ============================= */ | ||
2143 | |||
2144 | struct vxge_hw_fifo_attr; | ||
2145 | |||
2146 | enum vxge_hw_status | ||
2147 | __vxge_hw_fifo_create( | ||
2148 | struct __vxge_hw_vpath_handle *vpath_handle, | ||
2149 | struct vxge_hw_fifo_attr *attr); | ||
2150 | |||
2151 | enum vxge_hw_status | ||
2152 | __vxge_hw_fifo_abort( | ||
2153 | struct __vxge_hw_fifo *fifoh); | ||
2154 | |||
2155 | enum vxge_hw_status | ||
2156 | __vxge_hw_fifo_reset( | ||
2157 | struct __vxge_hw_fifo *ringh); | ||
2158 | |||
2159 | enum vxge_hw_status | ||
2160 | __vxge_hw_fifo_delete( | ||
2161 | struct __vxge_hw_vpath_handle *vpath_handle); | ||
2162 | |||
2163 | struct vxge_hw_mempool_cbs { | 2084 | struct vxge_hw_mempool_cbs { |
2164 | void (*item_func_alloc)( | 2085 | void (*item_func_alloc)( |
2165 | struct vxge_hw_mempool *mempoolh, | 2086 | struct vxge_hw_mempool *mempoolh, |
@@ -2169,10 +2090,6 @@ struct vxge_hw_mempool_cbs { | |||
2169 | u32 is_last); | 2090 | u32 is_last); |
2170 | }; | 2091 | }; |
2171 | 2092 | ||
2172 | void | ||
2173 | __vxge_hw_mempool_destroy( | ||
2174 | struct vxge_hw_mempool *mempool); | ||
2175 | |||
2176 | #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \ | 2093 | #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \ |
2177 | ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next) | 2094 | ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next) |
2178 | 2095 | ||
@@ -2195,61 +2112,10 @@ __vxge_hw_vpath_rts_table_set( | |||
2195 | u64 data2); | 2112 | u64 data2); |
2196 | 2113 | ||
2197 | enum vxge_hw_status | 2114 | enum vxge_hw_status |
2198 | __vxge_hw_vpath_reset( | ||
2199 | struct __vxge_hw_device *devh, | ||
2200 | u32 vp_id); | ||
2201 | |||
2202 | enum vxge_hw_status | ||
2203 | __vxge_hw_vpath_sw_reset( | ||
2204 | struct __vxge_hw_device *devh, | ||
2205 | u32 vp_id); | ||
2206 | |||
2207 | enum vxge_hw_status | ||
2208 | __vxge_hw_vpath_enable( | 2115 | __vxge_hw_vpath_enable( |
2209 | struct __vxge_hw_device *devh, | 2116 | struct __vxge_hw_device *devh, |
2210 | u32 vp_id); | 2117 | u32 vp_id); |
2211 | 2118 | ||
2212 | void | ||
2213 | __vxge_hw_vpath_prc_configure( | ||
2214 | struct __vxge_hw_device *devh, | ||
2215 | u32 vp_id); | ||
2216 | |||
2217 | enum vxge_hw_status | ||
2218 | __vxge_hw_vpath_kdfc_configure( | ||
2219 | struct __vxge_hw_device *devh, | ||
2220 | u32 vp_id); | ||
2221 | |||
2222 | enum vxge_hw_status | ||
2223 | __vxge_hw_vpath_mac_configure( | ||
2224 | struct __vxge_hw_device *devh, | ||
2225 | u32 vp_id); | ||
2226 | |||
2227 | enum vxge_hw_status | ||
2228 | __vxge_hw_vpath_tim_configure( | ||
2229 | struct __vxge_hw_device *devh, | ||
2230 | u32 vp_id); | ||
2231 | |||
2232 | enum vxge_hw_status | ||
2233 | __vxge_hw_vpath_initialize( | ||
2234 | struct __vxge_hw_device *devh, | ||
2235 | u32 vp_id); | ||
2236 | |||
2237 | enum vxge_hw_status | ||
2238 | __vxge_hw_vp_initialize( | ||
2239 | struct __vxge_hw_device *devh, | ||
2240 | u32 vp_id, | ||
2241 | struct vxge_hw_vp_config *config); | ||
2242 | |||
2243 | void | ||
2244 | __vxge_hw_vp_terminate( | ||
2245 | struct __vxge_hw_device *devh, | ||
2246 | u32 vp_id); | ||
2247 | |||
2248 | enum vxge_hw_status | ||
2249 | __vxge_hw_vpath_alarm_process( | ||
2250 | struct __vxge_hw_virtualpath *vpath, | ||
2251 | u32 skip_alarms); | ||
2252 | |||
2253 | void vxge_hw_device_intr_enable( | 2119 | void vxge_hw_device_intr_enable( |
2254 | struct __vxge_hw_device *devh); | 2120 | struct __vxge_hw_device *devh); |
2255 | 2121 | ||
@@ -2276,6 +2142,10 @@ void vxge_hw_device_clear_tx_rx( | |||
2276 | * Virtual Paths | 2142 | * Virtual Paths |
2277 | */ | 2143 | */ |
2278 | 2144 | ||
2145 | void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring); | ||
2146 | |||
2147 | void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo); | ||
2148 | |||
2279 | u32 vxge_hw_vpath_id( | 2149 | u32 vxge_hw_vpath_id( |
2280 | struct __vxge_hw_vpath_handle *vpath_handle); | 2150 | struct __vxge_hw_vpath_handle *vpath_handle); |
2281 | 2151 | ||
@@ -2288,27 +2158,27 @@ enum vxge_hw_vpath_mac_addr_add_mode { | |||
2288 | enum vxge_hw_status | 2158 | enum vxge_hw_status |
2289 | vxge_hw_vpath_mac_addr_add( | 2159 | vxge_hw_vpath_mac_addr_add( |
2290 | struct __vxge_hw_vpath_handle *vpath_handle, | 2160 | struct __vxge_hw_vpath_handle *vpath_handle, |
2291 | u8 (macaddr)[ETH_ALEN], | 2161 | u8 *macaddr, |
2292 | u8 (macaddr_mask)[ETH_ALEN], | 2162 | u8 *macaddr_mask, |
2293 | enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode); | 2163 | enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode); |
2294 | 2164 | ||
2295 | enum vxge_hw_status | 2165 | enum vxge_hw_status |
2296 | vxge_hw_vpath_mac_addr_get( | 2166 | vxge_hw_vpath_mac_addr_get( |
2297 | struct __vxge_hw_vpath_handle *vpath_handle, | 2167 | struct __vxge_hw_vpath_handle *vpath_handle, |
2298 | u8 (macaddr)[ETH_ALEN], | 2168 | u8 *macaddr, |
2299 | u8 (macaddr_mask)[ETH_ALEN]); | 2169 | u8 *macaddr_mask); |
2300 | 2170 | ||
2301 | enum vxge_hw_status | 2171 | enum vxge_hw_status |
2302 | vxge_hw_vpath_mac_addr_get_next( | 2172 | vxge_hw_vpath_mac_addr_get_next( |
2303 | struct __vxge_hw_vpath_handle *vpath_handle, | 2173 | struct __vxge_hw_vpath_handle *vpath_handle, |
2304 | u8 (macaddr)[ETH_ALEN], | 2174 | u8 *macaddr, |
2305 | u8 (macaddr_mask)[ETH_ALEN]); | 2175 | u8 *macaddr_mask); |
2306 | 2176 | ||
2307 | enum vxge_hw_status | 2177 | enum vxge_hw_status |
2308 | vxge_hw_vpath_mac_addr_delete( | 2178 | vxge_hw_vpath_mac_addr_delete( |
2309 | struct __vxge_hw_vpath_handle *vpath_handle, | 2179 | struct __vxge_hw_vpath_handle *vpath_handle, |
2310 | u8 (macaddr)[ETH_ALEN], | 2180 | u8 *macaddr, |
2311 | u8 (macaddr_mask)[ETH_ALEN]); | 2181 | u8 *macaddr_mask); |
2312 | 2182 | ||
2313 | enum vxge_hw_status | 2183 | enum vxge_hw_status |
2314 | vxge_hw_vpath_vid_add( | 2184 | vxge_hw_vpath_vid_add( |
@@ -2321,11 +2191,6 @@ vxge_hw_vpath_vid_get( | |||
2321 | u64 *vid); | 2191 | u64 *vid); |
2322 | 2192 | ||
2323 | enum vxge_hw_status | 2193 | enum vxge_hw_status |
2324 | vxge_hw_vpath_vid_get_next( | ||
2325 | struct __vxge_hw_vpath_handle *vpath_handle, | ||
2326 | u64 *vid); | ||
2327 | |||
2328 | enum vxge_hw_status | ||
2329 | vxge_hw_vpath_vid_delete( | 2194 | vxge_hw_vpath_vid_delete( |
2330 | struct __vxge_hw_vpath_handle *vpath_handle, | 2195 | struct __vxge_hw_vpath_handle *vpath_handle, |
2331 | u64 vid); | 2196 | u64 vid); |
@@ -2384,19 +2249,14 @@ void | |||
2384 | vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vpath_handle, | 2249 | vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vpath_handle, |
2385 | int msix_id); | 2250 | int msix_id); |
2386 | 2251 | ||
2387 | void vxge_hw_device_flush_io(struct __vxge_hw_device *devh); | 2252 | void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id); |
2388 | 2253 | ||
2389 | void | 2254 | void vxge_hw_device_flush_io(struct __vxge_hw_device *devh); |
2390 | vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vpath_handle, | ||
2391 | int msix_id); | ||
2392 | 2255 | ||
2393 | void | 2256 | void |
2394 | vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vpath_handle, | 2257 | vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vpath_handle, |
2395 | int msix_id); | 2258 | int msix_id); |
2396 | 2259 | ||
2397 | void | ||
2398 | vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vpath_handle); | ||
2399 | |||
2400 | enum vxge_hw_status vxge_hw_vpath_intr_enable( | 2260 | enum vxge_hw_status vxge_hw_vpath_intr_enable( |
2401 | struct __vxge_hw_vpath_handle *vpath_handle); | 2261 | struct __vxge_hw_vpath_handle *vpath_handle); |
2402 | 2262 | ||
@@ -2415,11 +2275,8 @@ vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channelh, int msix_id); | |||
2415 | void | 2275 | void |
2416 | vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id); | 2276 | vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id); |
2417 | 2277 | ||
2418 | enum vxge_hw_status | ||
2419 | vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh); | ||
2420 | |||
2421 | void | 2278 | void |
2422 | vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh); | 2279 | vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channelh, int msix_id); |
2423 | 2280 | ||
2424 | void | 2281 | void |
2425 | vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, | 2282 | vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, |
@@ -2433,21 +2290,9 @@ vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh); | |||
2433 | 2290 | ||
2434 | int | 2291 | int |
2435 | vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel); | 2292 | vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel); |
2436 | void | ||
2437 | vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id); | ||
2438 | 2293 | ||
2439 | /* ========================== PRIVATE API ================================= */ | 2294 | void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo); |
2440 | 2295 | ||
2441 | enum vxge_hw_status | 2296 | void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring); |
2442 | __vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev); | ||
2443 | |||
2444 | enum vxge_hw_status | ||
2445 | __vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev); | ||
2446 | |||
2447 | enum vxge_hw_status | ||
2448 | __vxge_hw_device_handle_error( | ||
2449 | struct __vxge_hw_device *hldev, | ||
2450 | u32 vp_id, | ||
2451 | enum vxge_hw_event type); | ||
2452 | 2297 | ||
2453 | #endif | 2298 | #endif |
diff --git a/drivers/net/vxge/vxge-version.h b/drivers/net/vxge/vxge-version.h index 53fefe137368..b9efa28bab3e 100644 --- a/drivers/net/vxge/vxge-version.h +++ b/drivers/net/vxge/vxge-version.h | |||
@@ -15,8 +15,35 @@ | |||
15 | #define VXGE_VERSION_H | 15 | #define VXGE_VERSION_H |
16 | 16 | ||
17 | #define VXGE_VERSION_MAJOR "2" | 17 | #define VXGE_VERSION_MAJOR "2" |
18 | #define VXGE_VERSION_MINOR "0" | 18 | #define VXGE_VERSION_MINOR "5" |
19 | #define VXGE_VERSION_FIX "9" | 19 | #define VXGE_VERSION_FIX "3" |
20 | #define VXGE_VERSION_BUILD "20840" | 20 | #define VXGE_VERSION_BUILD "22640" |
21 | #define VXGE_VERSION_FOR "k" | 21 | #define VXGE_VERSION_FOR "k" |
22 | |||
23 | #define VXGE_FW_VER(maj, min, bld) (((maj) << 16) + ((min) << 8) + (bld)) | ||
24 | |||
25 | #define VXGE_DEAD_FW_VER_MAJOR 1 | ||
26 | #define VXGE_DEAD_FW_VER_MINOR 4 | ||
27 | #define VXGE_DEAD_FW_VER_BUILD 4 | ||
28 | |||
29 | #define VXGE_FW_DEAD_VER VXGE_FW_VER(VXGE_DEAD_FW_VER_MAJOR, \ | ||
30 | VXGE_DEAD_FW_VER_MINOR, \ | ||
31 | VXGE_DEAD_FW_VER_BUILD) | ||
32 | |||
33 | #define VXGE_EPROM_FW_VER_MAJOR 1 | ||
34 | #define VXGE_EPROM_FW_VER_MINOR 6 | ||
35 | #define VXGE_EPROM_FW_VER_BUILD 1 | ||
36 | |||
37 | #define VXGE_EPROM_FW_VER VXGE_FW_VER(VXGE_EPROM_FW_VER_MAJOR, \ | ||
38 | VXGE_EPROM_FW_VER_MINOR, \ | ||
39 | VXGE_EPROM_FW_VER_BUILD) | ||
40 | |||
41 | #define VXGE_CERT_FW_VER_MAJOR 1 | ||
42 | #define VXGE_CERT_FW_VER_MINOR 8 | ||
43 | #define VXGE_CERT_FW_VER_BUILD 1 | ||
44 | |||
45 | #define VXGE_CERT_FW_VER VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, \ | ||
46 | VXGE_CERT_FW_VER_MINOR, \ | ||
47 | VXGE_CERT_FW_VER_BUILD) | ||
48 | |||
22 | #endif | 49 | #endif |