aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/vxge/vxge-traffic.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/vxge/vxge-traffic.c')
-rw-r--r--drivers/net/vxge/vxge-traffic.c79
1 files changed, 22 insertions, 57 deletions
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c
index 2c012f4ce465..6cc1dd79b40b 100644
--- a/drivers/net/vxge/vxge-traffic.c
+++ b/drivers/net/vxge/vxge-traffic.c
@@ -231,11 +231,8 @@ void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
231{ 231{
232 232
233 __vxge_hw_pio_mem_write32_upper( 233 __vxge_hw_pio_mem_write32_upper(
234 (u32)vxge_bVALn(vxge_mBIT(channel->first_vp_id+(msix_id/4)), 234 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
235 0, 32),
236 &channel->common_reg->set_msix_mask_vect[msix_id%4]); 235 &channel->common_reg->set_msix_mask_vect[msix_id%4]);
237
238 return;
239} 236}
240 237
241/** 238/**
@@ -252,11 +249,8 @@ vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
252{ 249{
253 250
254 __vxge_hw_pio_mem_write32_upper( 251 __vxge_hw_pio_mem_write32_upper(
255 (u32)vxge_bVALn(vxge_mBIT(channel->first_vp_id+(msix_id/4)), 252 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
256 0, 32),
257 &channel->common_reg->clear_msix_mask_vect[msix_id%4]); 253 &channel->common_reg->clear_msix_mask_vect[msix_id%4]);
258
259 return;
260} 254}
261 255
262/** 256/**
@@ -331,8 +325,6 @@ void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
331 val64 = readq(&hldev->common_reg->titan_general_int_status); 325 val64 = readq(&hldev->common_reg->titan_general_int_status);
332 326
333 vxge_hw_device_unmask_all(hldev); 327 vxge_hw_device_unmask_all(hldev);
334
335 return;
336} 328}
337 329
338/** 330/**
@@ -364,8 +356,6 @@ void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
364 vxge_hw_vpath_intr_disable( 356 vxge_hw_vpath_intr_disable(
365 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i])); 357 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
366 } 358 }
367
368 return;
369} 359}
370 360
371/** 361/**
@@ -385,8 +375,6 @@ void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
385 375
386 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), 376 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
387 &hldev->common_reg->titan_mask_all_int); 377 &hldev->common_reg->titan_mask_all_int);
388
389 return;
390} 378}
391 379
392/** 380/**
@@ -406,8 +394,6 @@ void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
406 394
407 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), 395 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
408 &hldev->common_reg->titan_mask_all_int); 396 &hldev->common_reg->titan_mask_all_int);
409
410 return;
411} 397}
412 398
413/** 399/**
@@ -649,8 +635,6 @@ void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
649 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]), 635 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
650 &hldev->common_reg->tim_int_status1); 636 &hldev->common_reg->tim_int_status1);
651 } 637 }
652
653 return;
654} 638}
655 639
656/* 640/*
@@ -878,7 +862,7 @@ void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
878 862
879 channel = &ring->channel; 863 channel = &ring->channel;
880 864
881 rxdp->control_0 |= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER; 865 rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
882 866
883 if (ring->stats->common_stats.usage_cnt > 0) 867 if (ring->stats->common_stats.usage_cnt > 0)
884 ring->stats->common_stats.usage_cnt--; 868 ring->stats->common_stats.usage_cnt--;
@@ -902,7 +886,7 @@ void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
902 channel = &ring->channel; 886 channel = &ring->channel;
903 887
904 wmb(); 888 wmb();
905 rxdp->control_0 |= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER; 889 rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
906 890
907 vxge_hw_channel_dtr_post(channel, rxdh); 891 vxge_hw_channel_dtr_post(channel, rxdh);
908 892
@@ -966,6 +950,7 @@ enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
966 struct __vxge_hw_channel *channel; 950 struct __vxge_hw_channel *channel;
967 struct vxge_hw_ring_rxd_1 *rxdp; 951 struct vxge_hw_ring_rxd_1 *rxdp;
968 enum vxge_hw_status status = VXGE_HW_OK; 952 enum vxge_hw_status status = VXGE_HW_OK;
953 u64 control_0, own;
969 954
970 channel = &ring->channel; 955 channel = &ring->channel;
971 956
@@ -977,8 +962,12 @@ enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
977 goto exit; 962 goto exit;
978 } 963 }
979 964
965 control_0 = rxdp->control_0;
966 own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
967 *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
968
980 /* check whether it is not the end */ 969 /* check whether it is not the end */
981 if (!(rxdp->control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER)) { 970 if (!own || ((*t_code == VXGE_HW_RING_T_CODE_FRM_DROP) && own)) {
982 971
983 vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control != 972 vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control !=
984 0); 973 0);
@@ -986,8 +975,6 @@ enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
986 ++ring->cmpl_cnt; 975 ++ring->cmpl_cnt;
987 vxge_hw_channel_dtr_complete(channel); 976 vxge_hw_channel_dtr_complete(channel);
988 977
989 *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(rxdp->control_0);
990
991 vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED); 978 vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
992 979
993 ring->stats->common_stats.usage_cnt++; 980 ring->stats->common_stats.usage_cnt++;
@@ -1035,12 +1022,13 @@ enum vxge_hw_status vxge_hw_ring_handle_tcode(
1035 * such as unknown UPV6 header), Drop it !!! 1022 * such as unknown UPV6 header), Drop it !!!
1036 */ 1023 */
1037 1024
1038 if (t_code == 0 || t_code == 5) { 1025 if (t_code == VXGE_HW_RING_T_CODE_OK ||
1026 t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) {
1039 status = VXGE_HW_OK; 1027 status = VXGE_HW_OK;
1040 goto exit; 1028 goto exit;
1041 } 1029 }
1042 1030
1043 if (t_code > 0xF) { 1031 if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) {
1044 status = VXGE_HW_ERR_INVALID_TCODE; 1032 status = VXGE_HW_ERR_INVALID_TCODE;
1045 goto exit; 1033 goto exit;
1046 } 1034 }
@@ -2216,29 +2204,24 @@ exit:
2216 * This API will associate a given MSIX vector numbers with the four TIM 2204 * This API will associate a given MSIX vector numbers with the four TIM
2217 * interrupts and alarm interrupt. 2205 * interrupts and alarm interrupt.
2218 */ 2206 */
2219enum vxge_hw_status 2207void
2220vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id, 2208vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
2221 int alarm_msix_id) 2209 int alarm_msix_id)
2222{ 2210{
2223 u64 val64; 2211 u64 val64;
2224 struct __vxge_hw_virtualpath *vpath = vp->vpath; 2212 struct __vxge_hw_virtualpath *vpath = vp->vpath;
2225 struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg; 2213 struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
2226 u32 first_vp_id = vpath->hldev->first_vp_id; 2214 u32 vp_id = vp->vpath->vp_id;
2227 2215
2228 val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI( 2216 val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
2229 (first_vp_id * 4) + tim_msix_id[0]) | 2217 (vp_id * 4) + tim_msix_id[0]) |
2230 VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI( 2218 VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
2231 (first_vp_id * 4) + tim_msix_id[1]) | 2219 (vp_id * 4) + tim_msix_id[1]);
2232 VXGE_HW_INTERRUPT_CFG0_GROUP2_MSIX_FOR_TXTI(
2233 (first_vp_id * 4) + tim_msix_id[2]);
2234
2235 val64 |= VXGE_HW_INTERRUPT_CFG0_GROUP3_MSIX_FOR_TXTI(
2236 (first_vp_id * 4) + tim_msix_id[3]);
2237 2220
2238 writeq(val64, &vp_reg->interrupt_cfg0); 2221 writeq(val64, &vp_reg->interrupt_cfg0);
2239 2222
2240 writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG( 2223 writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
2241 (first_vp_id * 4) + alarm_msix_id), 2224 (vpath->hldev->first_vp_id * 4) + alarm_msix_id),
2242 &vp_reg->interrupt_cfg2); 2225 &vp_reg->interrupt_cfg2);
2243 2226
2244 if (vpath->hldev->config.intr_mode == 2227 if (vpath->hldev->config.intr_mode ==
@@ -2258,8 +2241,6 @@ vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
2258 VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN, 2241 VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN,
2259 0, 32), &vp_reg->one_shot_vect3_en); 2242 0, 32), &vp_reg->one_shot_vect3_en);
2260 } 2243 }
2261
2262 return VXGE_HW_OK;
2263} 2244}
2264 2245
2265/** 2246/**
@@ -2279,11 +2260,8 @@ vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2279{ 2260{
2280 struct __vxge_hw_device *hldev = vp->vpath->hldev; 2261 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2281 __vxge_hw_pio_mem_write32_upper( 2262 __vxge_hw_pio_mem_write32_upper(
2282 (u32) vxge_bVALn(vxge_mBIT(hldev->first_vp_id + 2263 (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2283 (msix_id / 4)), 0, 32),
2284 &hldev->common_reg->set_msix_mask_vect[msix_id % 4]); 2264 &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
2285
2286 return;
2287} 2265}
2288 2266
2289/** 2267/**
@@ -2305,19 +2283,15 @@ vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
2305 if (hldev->config.intr_mode == 2283 if (hldev->config.intr_mode ==
2306 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) { 2284 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2307 __vxge_hw_pio_mem_write32_upper( 2285 __vxge_hw_pio_mem_write32_upper(
2308 (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id + 2286 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2309 (msix_id/4)), 0, 32),
2310 &hldev->common_reg-> 2287 &hldev->common_reg->
2311 clr_msix_one_shot_vec[msix_id%4]); 2288 clr_msix_one_shot_vec[msix_id%4]);
2312 } else { 2289 } else {
2313 __vxge_hw_pio_mem_write32_upper( 2290 __vxge_hw_pio_mem_write32_upper(
2314 (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id + 2291 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2315 (msix_id/4)), 0, 32),
2316 &hldev->common_reg-> 2292 &hldev->common_reg->
2317 clear_msix_mask_vect[msix_id%4]); 2293 clear_msix_mask_vect[msix_id%4]);
2318 } 2294 }
2319
2320 return;
2321} 2295}
2322 2296
2323/** 2297/**
@@ -2337,11 +2311,8 @@ vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2337{ 2311{
2338 struct __vxge_hw_device *hldev = vp->vpath->hldev; 2312 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2339 __vxge_hw_pio_mem_write32_upper( 2313 __vxge_hw_pio_mem_write32_upper(
2340 (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id + 2314 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2341 (msix_id/4)), 0, 32),
2342 &hldev->common_reg->clear_msix_mask_vect[msix_id%4]); 2315 &hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
2343
2344 return;
2345} 2316}
2346 2317
2347/** 2318/**
@@ -2358,8 +2329,6 @@ vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vp)
2358 __vxge_hw_pio_mem_write32_upper( 2329 __vxge_hw_pio_mem_write32_upper(
2359 (u32)vxge_bVALn(vxge_mBIT(vp->vpath->vp_id), 0, 32), 2330 (u32)vxge_bVALn(vxge_mBIT(vp->vpath->vp_id), 0, 32),
2360 &vp->vpath->hldev->common_reg->set_msix_mask_all_vect); 2331 &vp->vpath->hldev->common_reg->set_msix_mask_all_vect);
2361
2362 return;
2363} 2332}
2364 2333
2365/** 2334/**
@@ -2398,8 +2367,6 @@ void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2398 tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64), 2367 tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
2399 &hldev->common_reg->tim_int_mask1); 2368 &hldev->common_reg->tim_int_mask1);
2400 } 2369 }
2401
2402 return;
2403} 2370}
2404 2371
2405/** 2372/**
@@ -2436,8 +2403,6 @@ void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2436 tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64, 2403 tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
2437 &hldev->common_reg->tim_int_mask1); 2404 &hldev->common_reg->tim_int_mask1);
2438 } 2405 }
2439
2440 return;
2441} 2406}
2442 2407
2443/** 2408/**