diff options
author | Grant Likely <grant.likely@secretlab.ca> | 2010-05-22 02:36:56 -0400 |
---|---|---|
committer | Grant Likely <grant.likely@secretlab.ca> | 2010-05-22 02:36:56 -0400 |
commit | cf9b59e9d3e008591d1f54830f570982bb307a0d (patch) | |
tree | 113478ce8fd8c832ba726ffdf59b82cb46356476 /drivers/net/vxge | |
parent | 44504b2bebf8b5823c59484e73096a7d6574471d (diff) | |
parent | f4b87dee923342505e1ddba8d34ce9de33e75050 (diff) |
Merge remote branch 'origin' into secretlab/next-devicetree
Merging in current state of Linus' tree to deal with merge conflicts and
build failures in vio.c after merge.
Conflicts:
drivers/i2c/busses/i2c-cpm.c
drivers/i2c/busses/i2c-mpc.c
drivers/net/gianfar.c
Also fixed up one line in arch/powerpc/kernel/vio.c to use the
correct node pointer.
Signed-off-by: Grant Likely <grant.likely@secretlab.ca>
Diffstat (limited to 'drivers/net/vxge')
-rw-r--r-- | drivers/net/vxge/vxge-config.c | 41 | ||||
-rw-r--r-- | drivers/net/vxge/vxge-config.h | 34 | ||||
-rw-r--r-- | drivers/net/vxge/vxge-ethtool.c | 5 | ||||
-rw-r--r-- | drivers/net/vxge/vxge-main.c | 245 | ||||
-rw-r--r-- | drivers/net/vxge/vxge-main.h | 6 | ||||
-rw-r--r-- | drivers/net/vxge/vxge-traffic.c | 79 | ||||
-rw-r--r-- | drivers/net/vxge/vxge-traffic.h | 50 | ||||
-rw-r--r-- | drivers/net/vxge/vxge-version.h | 4 |
8 files changed, 247 insertions, 217 deletions
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c index a21a25d218b6..297f0d202073 100644 --- a/drivers/net/vxge/vxge-config.c +++ b/drivers/net/vxge/vxge-config.c | |||
@@ -183,8 +183,6 @@ __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev) | |||
183 | pci_write_config_word(hldev->pdev, PCI_COMMAND, cmd); | 183 | pci_write_config_word(hldev->pdev, PCI_COMMAND, cmd); |
184 | 184 | ||
185 | pci_save_state(hldev->pdev); | 185 | pci_save_state(hldev->pdev); |
186 | |||
187 | return; | ||
188 | } | 186 | } |
189 | 187 | ||
190 | /* | 188 | /* |
@@ -342,8 +340,6 @@ void __vxge_hw_device_id_get(struct __vxge_hw_device *hldev) | |||
342 | 340 | ||
343 | hldev->minor_revision = | 341 | hldev->minor_revision = |
344 | (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(val64); | 342 | (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(val64); |
345 | |||
346 | return; | ||
347 | } | 343 | } |
348 | 344 | ||
349 | /* | 345 | /* |
@@ -357,8 +353,10 @@ __vxge_hw_device_access_rights_get(u32 host_type, u32 func_id) | |||
357 | 353 | ||
358 | switch (host_type) { | 354 | switch (host_type) { |
359 | case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION: | 355 | case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION: |
360 | access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM | | 356 | if (func_id == 0) { |
361 | VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM; | 357 | access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM | |
358 | VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM; | ||
359 | } | ||
362 | break; | 360 | break; |
363 | case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION: | 361 | case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION: |
364 | access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM | | 362 | access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM | |
@@ -426,8 +424,6 @@ void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev) | |||
426 | hldev->first_vp_id = i; | 424 | hldev->first_vp_id = i; |
427 | break; | 425 | break; |
428 | } | 426 | } |
429 | |||
430 | return; | ||
431 | } | 427 | } |
432 | 428 | ||
433 | /* | 429 | /* |
@@ -633,8 +629,10 @@ vxge_hw_device_initialize( | |||
633 | __vxge_hw_device_pci_e_init(hldev); | 629 | __vxge_hw_device_pci_e_init(hldev); |
634 | 630 | ||
635 | status = __vxge_hw_device_reg_addr_get(hldev); | 631 | status = __vxge_hw_device_reg_addr_get(hldev); |
636 | if (status != VXGE_HW_OK) | 632 | if (status != VXGE_HW_OK) { |
633 | vfree(hldev); | ||
637 | goto exit; | 634 | goto exit; |
635 | } | ||
638 | __vxge_hw_device_id_get(hldev); | 636 | __vxge_hw_device_id_get(hldev); |
639 | 637 | ||
640 | __vxge_hw_device_host_info_get(hldev); | 638 | __vxge_hw_device_host_info_get(hldev); |
@@ -1213,19 +1211,16 @@ __vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh, | |||
1213 | /* link this RxD block with previous one */ | 1211 | /* link this RxD block with previous one */ |
1214 | __vxge_hw_ring_rxdblock_link(mempoolh, ring, index - 1, index); | 1212 | __vxge_hw_ring_rxdblock_link(mempoolh, ring, index - 1, index); |
1215 | } | 1213 | } |
1216 | |||
1217 | return; | ||
1218 | } | 1214 | } |
1219 | 1215 | ||
1220 | /* | 1216 | /* |
1221 | * __vxge_hw_ring_initial_replenish - Initial replenish of RxDs | 1217 | * __vxge_hw_ring_replenish - Initial replenish of RxDs |
1222 | * This function replenishes the RxDs from reserve array to work array | 1218 | * This function replenishes the RxDs from reserve array to work array |
1223 | */ | 1219 | */ |
1224 | enum vxge_hw_status | 1220 | enum vxge_hw_status |
1225 | vxge_hw_ring_replenish(struct __vxge_hw_ring *ring, u16 min_flag) | 1221 | vxge_hw_ring_replenish(struct __vxge_hw_ring *ring) |
1226 | { | 1222 | { |
1227 | void *rxd; | 1223 | void *rxd; |
1228 | int i = 0; | ||
1229 | struct __vxge_hw_channel *channel; | 1224 | struct __vxge_hw_channel *channel; |
1230 | enum vxge_hw_status status = VXGE_HW_OK; | 1225 | enum vxge_hw_status status = VXGE_HW_OK; |
1231 | 1226 | ||
@@ -1246,11 +1241,6 @@ vxge_hw_ring_replenish(struct __vxge_hw_ring *ring, u16 min_flag) | |||
1246 | } | 1241 | } |
1247 | 1242 | ||
1248 | vxge_hw_ring_rxd_post(ring, rxd); | 1243 | vxge_hw_ring_rxd_post(ring, rxd); |
1249 | if (min_flag) { | ||
1250 | i++; | ||
1251 | if (i == VXGE_HW_RING_MIN_BUFF_ALLOCATION) | ||
1252 | break; | ||
1253 | } | ||
1254 | } | 1244 | } |
1255 | status = VXGE_HW_OK; | 1245 | status = VXGE_HW_OK; |
1256 | exit: | 1246 | exit: |
@@ -1355,7 +1345,7 @@ __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp, | |||
1355 | * Currently we don't have a case when the 1) is done without the 2). | 1345 | * Currently we don't have a case when the 1) is done without the 2). |
1356 | */ | 1346 | */ |
1357 | if (ring->rxd_init) { | 1347 | if (ring->rxd_init) { |
1358 | status = vxge_hw_ring_replenish(ring, 1); | 1348 | status = vxge_hw_ring_replenish(ring); |
1359 | if (status != VXGE_HW_OK) { | 1349 | if (status != VXGE_HW_OK) { |
1360 | __vxge_hw_ring_delete(vp); | 1350 | __vxge_hw_ring_delete(vp); |
1361 | goto exit; | 1351 | goto exit; |
@@ -1417,7 +1407,7 @@ enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring) | |||
1417 | goto exit; | 1407 | goto exit; |
1418 | 1408 | ||
1419 | if (ring->rxd_init) { | 1409 | if (ring->rxd_init) { |
1420 | status = vxge_hw_ring_replenish(ring, 1); | 1410 | status = vxge_hw_ring_replenish(ring); |
1421 | if (status != VXGE_HW_OK) | 1411 | if (status != VXGE_HW_OK) |
1422 | goto exit; | 1412 | goto exit; |
1423 | } | 1413 | } |
@@ -2320,8 +2310,6 @@ __vxge_hw_fifo_mempool_item_alloc( | |||
2320 | txdl_priv->first_txdp = txdp; | 2310 | txdl_priv->first_txdp = txdp; |
2321 | txdl_priv->next_txdl_priv = NULL; | 2311 | txdl_priv->next_txdl_priv = NULL; |
2322 | txdl_priv->alloc_frags = 0; | 2312 | txdl_priv->alloc_frags = 0; |
2323 | |||
2324 | return; | ||
2325 | } | 2313 | } |
2326 | 2314 | ||
2327 | /* | 2315 | /* |
@@ -2578,7 +2566,6 @@ __vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem *vpath_reg, | |||
2578 | writeq(dta_struct_sel, &vpath_reg->rts_access_steer_data0); | 2566 | writeq(dta_struct_sel, &vpath_reg->rts_access_steer_data0); |
2579 | writeq(0, &vpath_reg->rts_access_steer_data1); | 2567 | writeq(0, &vpath_reg->rts_access_steer_data1); |
2580 | wmb(); | 2568 | wmb(); |
2581 | return; | ||
2582 | } | 2569 | } |
2583 | 2570 | ||
2584 | 2571 | ||
@@ -3486,7 +3473,6 @@ __vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id) | |||
3486 | val64 &= ~VXGE_HW_PRC_CFG4_RTH_DISABLE; | 3473 | val64 &= ~VXGE_HW_PRC_CFG4_RTH_DISABLE; |
3487 | 3474 | ||
3488 | writeq(val64, &vp_reg->prc_cfg4); | 3475 | writeq(val64, &vp_reg->prc_cfg4); |
3489 | return; | ||
3490 | } | 3476 | } |
3491 | 3477 | ||
3492 | /* | 3478 | /* |
@@ -3905,7 +3891,6 @@ vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id) | |||
3905 | &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); | 3891 | &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); |
3906 | } | 3892 | } |
3907 | } | 3893 | } |
3908 | return; | ||
3909 | } | 3894 | } |
3910 | /* | 3895 | /* |
3911 | * __vxge_hw_vpath_initialize | 3896 | * __vxge_hw_vpath_initialize |
@@ -5039,8 +5024,6 @@ __vxge_hw_blockpool_free(struct __vxge_hw_device *devh, | |||
5039 | if (status == VXGE_HW_OK) | 5024 | if (status == VXGE_HW_OK) |
5040 | __vxge_hw_blockpool_blocks_remove(blockpool); | 5025 | __vxge_hw_blockpool_blocks_remove(blockpool); |
5041 | } | 5026 | } |
5042 | |||
5043 | return; | ||
5044 | } | 5027 | } |
5045 | 5028 | ||
5046 | /* | 5029 | /* |
@@ -5096,6 +5079,4 @@ __vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh, | |||
5096 | } | 5079 | } |
5097 | 5080 | ||
5098 | __vxge_hw_blockpool_blocks_remove(blockpool); | 5081 | __vxge_hw_blockpool_blocks_remove(blockpool); |
5099 | |||
5100 | return; | ||
5101 | } | 5082 | } |
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h index 13f5416307f8..4ae2625d4d8f 100644 --- a/drivers/net/vxge/vxge-config.h +++ b/drivers/net/vxge/vxge-config.h | |||
@@ -765,10 +765,18 @@ struct vxge_hw_device_hw_info { | |||
765 | #define VXGE_HW_SR_VH_VIRTUAL_FUNCTION 6 | 765 | #define VXGE_HW_SR_VH_VIRTUAL_FUNCTION 6 |
766 | #define VXGE_HW_VH_NORMAL_FUNCTION 7 | 766 | #define VXGE_HW_VH_NORMAL_FUNCTION 7 |
767 | u64 function_mode; | 767 | u64 function_mode; |
768 | #define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION 0 | 768 | #define VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION 0 |
769 | #define VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION 1 | 769 | #define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION 1 |
770 | #define VXGE_HW_FUNCTION_MODE_SRIOV 2 | 770 | #define VXGE_HW_FUNCTION_MODE_SRIOV 2 |
771 | #define VXGE_HW_FUNCTION_MODE_MRIOV 3 | 771 | #define VXGE_HW_FUNCTION_MODE_MRIOV 3 |
772 | #define VXGE_HW_FUNCTION_MODE_MRIOV_8 4 | ||
773 | #define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17 5 | ||
774 | #define VXGE_HW_FUNCTION_MODE_SRIOV_8 6 | ||
775 | #define VXGE_HW_FUNCTION_MODE_SRIOV_4 7 | ||
776 | #define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2 8 | ||
777 | #define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_4 9 | ||
778 | #define VXGE_HW_FUNCTION_MODE_MRIOV_4 10 | ||
779 | |||
772 | u32 func_id; | 780 | u32 func_id; |
773 | u64 vpath_mask; | 781 | u64 vpath_mask; |
774 | struct vxge_hw_device_version fw_version; | 782 | struct vxge_hw_device_version fw_version; |
@@ -1915,20 +1923,32 @@ static inline void *vxge_os_dma_malloc(struct pci_dev *pdev, | |||
1915 | gfp_t flags; | 1923 | gfp_t flags; |
1916 | void *vaddr; | 1924 | void *vaddr; |
1917 | unsigned long misaligned = 0; | 1925 | unsigned long misaligned = 0; |
1926 | int realloc_flag = 0; | ||
1918 | *p_dma_acch = *p_dmah = NULL; | 1927 | *p_dma_acch = *p_dmah = NULL; |
1919 | 1928 | ||
1920 | if (in_interrupt()) | 1929 | if (in_interrupt()) |
1921 | flags = GFP_ATOMIC | GFP_DMA; | 1930 | flags = GFP_ATOMIC | GFP_DMA; |
1922 | else | 1931 | else |
1923 | flags = GFP_KERNEL | GFP_DMA; | 1932 | flags = GFP_KERNEL | GFP_DMA; |
1924 | 1933 | realloc: | |
1925 | size += VXGE_CACHE_LINE_SIZE; | ||
1926 | |||
1927 | vaddr = kmalloc((size), flags); | 1934 | vaddr = kmalloc((size), flags); |
1928 | if (vaddr == NULL) | 1935 | if (vaddr == NULL) |
1929 | return vaddr; | 1936 | return vaddr; |
1930 | misaligned = (unsigned long)VXGE_ALIGN(*((u64 *)&vaddr), | 1937 | misaligned = (unsigned long)VXGE_ALIGN((unsigned long)vaddr, |
1931 | VXGE_CACHE_LINE_SIZE); | 1938 | VXGE_CACHE_LINE_SIZE); |
1939 | if (realloc_flag) | ||
1940 | goto out; | ||
1941 | |||
1942 | if (misaligned) { | ||
1943 | /* misaligned, free current one and try allocating | ||
1944 | * size + VXGE_CACHE_LINE_SIZE memory | ||
1945 | */ | ||
1946 | kfree((void *) vaddr); | ||
1947 | size += VXGE_CACHE_LINE_SIZE; | ||
1948 | realloc_flag = 1; | ||
1949 | goto realloc; | ||
1950 | } | ||
1951 | out: | ||
1932 | *(unsigned long *)p_dma_acch = misaligned; | 1952 | *(unsigned long *)p_dma_acch = misaligned; |
1933 | vaddr = (void *)((u8 *)vaddr + misaligned); | 1953 | vaddr = (void *)((u8 *)vaddr + misaligned); |
1934 | return vaddr; | 1954 | return vaddr; |
@@ -2254,4 +2274,6 @@ enum vxge_hw_status vxge_hw_vpath_rts_rth_set( | |||
2254 | struct vxge_hw_rth_hash_types *hash_type, | 2274 | struct vxge_hw_rth_hash_types *hash_type, |
2255 | u16 bucket_size); | 2275 | u16 bucket_size); |
2256 | 2276 | ||
2277 | enum vxge_hw_status | ||
2278 | __vxge_hw_device_is_privilaged(u32 host_type, u32 func_id); | ||
2257 | #endif | 2279 | #endif |
diff --git a/drivers/net/vxge/vxge-ethtool.c b/drivers/net/vxge/vxge-ethtool.c index aaf374cfd322..cadef8549c06 100644 --- a/drivers/net/vxge/vxge-ethtool.c +++ b/drivers/net/vxge/vxge-ethtool.c | |||
@@ -109,7 +109,7 @@ static void vxge_ethtool_gregs(struct net_device *dev, | |||
109 | int index, offset; | 109 | int index, offset; |
110 | enum vxge_hw_status status; | 110 | enum vxge_hw_status status; |
111 | u64 reg; | 111 | u64 reg; |
112 | u8 *reg_space = (u8 *) space; | 112 | u64 *reg_space = (u64 *) space; |
113 | struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); | 113 | struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); |
114 | struct __vxge_hw_device *hldev = (struct __vxge_hw_device *) | 114 | struct __vxge_hw_device *hldev = (struct __vxge_hw_device *) |
115 | pci_get_drvdata(vdev->pdev); | 115 | pci_get_drvdata(vdev->pdev); |
@@ -129,8 +129,7 @@ static void vxge_ethtool_gregs(struct net_device *dev, | |||
129 | __func__, __LINE__); | 129 | __func__, __LINE__); |
130 | return; | 130 | return; |
131 | } | 131 | } |
132 | 132 | *reg_space++ = reg; | |
133 | memcpy((reg_space + offset), ®, 8); | ||
134 | } | 133 | } |
135 | } | 134 | } |
136 | } | 135 | } |
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c index ba6d0da78c30..b504bd561362 100644 --- a/drivers/net/vxge/vxge-main.c +++ b/drivers/net/vxge/vxge-main.c | |||
@@ -445,7 +445,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr, | |||
445 | ring->ndev->name, __func__, __LINE__); | 445 | ring->ndev->name, __func__, __LINE__); |
446 | ring->pkts_processed = 0; | 446 | ring->pkts_processed = 0; |
447 | 447 | ||
448 | vxge_hw_ring_replenish(ringh, 0); | 448 | vxge_hw_ring_replenish(ringh); |
449 | 449 | ||
450 | do { | 450 | do { |
451 | prefetch((char *)dtr + L1_CACHE_BYTES); | 451 | prefetch((char *)dtr + L1_CACHE_BYTES); |
@@ -1118,7 +1118,7 @@ vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata) | |||
1118 | */ | 1118 | */ |
1119 | static void vxge_set_multicast(struct net_device *dev) | 1119 | static void vxge_set_multicast(struct net_device *dev) |
1120 | { | 1120 | { |
1121 | struct dev_mc_list *mclist; | 1121 | struct netdev_hw_addr *ha; |
1122 | struct vxgedev *vdev; | 1122 | struct vxgedev *vdev; |
1123 | int i, mcast_cnt = 0; | 1123 | int i, mcast_cnt = 0; |
1124 | struct __vxge_hw_device *hldev; | 1124 | struct __vxge_hw_device *hldev; |
@@ -1218,8 +1218,8 @@ static void vxge_set_multicast(struct net_device *dev) | |||
1218 | } | 1218 | } |
1219 | 1219 | ||
1220 | /* Add new ones */ | 1220 | /* Add new ones */ |
1221 | netdev_for_each_mc_addr(mclist, dev) { | 1221 | netdev_for_each_mc_addr(ha, dev) { |
1222 | memcpy(mac_info.macaddr, mclist->dmi_addr, ETH_ALEN); | 1222 | memcpy(mac_info.macaddr, ha->addr, ETH_ALEN); |
1223 | for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; | 1223 | for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; |
1224 | vpath_idx++) { | 1224 | vpath_idx++) { |
1225 | mac_info.vpath_no = vpath_idx; | 1225 | mac_info.vpath_no = vpath_idx; |
@@ -1364,28 +1364,26 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p) | |||
1364 | void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id) | 1364 | void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id) |
1365 | { | 1365 | { |
1366 | struct vxge_vpath *vpath = &vdev->vpaths[vp_id]; | 1366 | struct vxge_vpath *vpath = &vdev->vpaths[vp_id]; |
1367 | int msix_id, alarm_msix_id; | 1367 | int msix_id = 0; |
1368 | int tim_msix_id[4] = {[0 ...3] = 0}; | 1368 | int tim_msix_id[4] = {0, 1, 0, 0}; |
1369 | int alarm_msix_id = VXGE_ALARM_MSIX_ID; | ||
1369 | 1370 | ||
1370 | vxge_hw_vpath_intr_enable(vpath->handle); | 1371 | vxge_hw_vpath_intr_enable(vpath->handle); |
1371 | 1372 | ||
1372 | if (vdev->config.intr_type == INTA) | 1373 | if (vdev->config.intr_type == INTA) |
1373 | vxge_hw_vpath_inta_unmask_tx_rx(vpath->handle); | 1374 | vxge_hw_vpath_inta_unmask_tx_rx(vpath->handle); |
1374 | else { | 1375 | else { |
1375 | msix_id = vp_id * VXGE_HW_VPATH_MSIX_ACTIVE; | ||
1376 | alarm_msix_id = | ||
1377 | VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2; | ||
1378 | |||
1379 | tim_msix_id[0] = msix_id; | ||
1380 | tim_msix_id[1] = msix_id + 1; | ||
1381 | vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id, | 1376 | vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id, |
1382 | alarm_msix_id); | 1377 | alarm_msix_id); |
1383 | 1378 | ||
1379 | msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE; | ||
1384 | vxge_hw_vpath_msix_unmask(vpath->handle, msix_id); | 1380 | vxge_hw_vpath_msix_unmask(vpath->handle, msix_id); |
1385 | vxge_hw_vpath_msix_unmask(vpath->handle, msix_id + 1); | 1381 | vxge_hw_vpath_msix_unmask(vpath->handle, msix_id + 1); |
1386 | 1382 | ||
1387 | /* enable the alarm vector */ | 1383 | /* enable the alarm vector */ |
1388 | vxge_hw_vpath_msix_unmask(vpath->handle, alarm_msix_id); | 1384 | msix_id = (vpath->handle->vpath->hldev->first_vp_id * |
1385 | VXGE_HW_VPATH_MSIX_ACTIVE) + alarm_msix_id; | ||
1386 | vxge_hw_vpath_msix_unmask(vpath->handle, msix_id); | ||
1389 | } | 1387 | } |
1390 | } | 1388 | } |
1391 | 1389 | ||
@@ -1406,12 +1404,13 @@ void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id) | |||
1406 | if (vdev->config.intr_type == INTA) | 1404 | if (vdev->config.intr_type == INTA) |
1407 | vxge_hw_vpath_inta_mask_tx_rx(vpath->handle); | 1405 | vxge_hw_vpath_inta_mask_tx_rx(vpath->handle); |
1408 | else { | 1406 | else { |
1409 | msix_id = vp_id * VXGE_HW_VPATH_MSIX_ACTIVE; | 1407 | msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE; |
1410 | vxge_hw_vpath_msix_mask(vpath->handle, msix_id); | 1408 | vxge_hw_vpath_msix_mask(vpath->handle, msix_id); |
1411 | vxge_hw_vpath_msix_mask(vpath->handle, msix_id + 1); | 1409 | vxge_hw_vpath_msix_mask(vpath->handle, msix_id + 1); |
1412 | 1410 | ||
1413 | /* disable the alarm vector */ | 1411 | /* disable the alarm vector */ |
1414 | msix_id = VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2; | 1412 | msix_id = (vpath->handle->vpath->hldev->first_vp_id * |
1413 | VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID; | ||
1415 | vxge_hw_vpath_msix_mask(vpath->handle, msix_id); | 1414 | vxge_hw_vpath_msix_mask(vpath->handle, msix_id); |
1416 | } | 1415 | } |
1417 | } | 1416 | } |
@@ -1765,7 +1764,6 @@ static void vxge_netpoll(struct net_device *dev) | |||
1765 | 1764 | ||
1766 | vxge_debug_entryexit(VXGE_TRACE, | 1765 | vxge_debug_entryexit(VXGE_TRACE, |
1767 | "%s:%d Exiting...", __func__, __LINE__); | 1766 | "%s:%d Exiting...", __func__, __LINE__); |
1768 | return; | ||
1769 | } | 1767 | } |
1770 | #endif | 1768 | #endif |
1771 | 1769 | ||
@@ -2224,19 +2222,18 @@ vxge_alarm_msix_handle(int irq, void *dev_id) | |||
2224 | enum vxge_hw_status status; | 2222 | enum vxge_hw_status status; |
2225 | struct vxge_vpath *vpath = (struct vxge_vpath *)dev_id; | 2223 | struct vxge_vpath *vpath = (struct vxge_vpath *)dev_id; |
2226 | struct vxgedev *vdev = vpath->vdev; | 2224 | struct vxgedev *vdev = vpath->vdev; |
2227 | int alarm_msix_id = | 2225 | int msix_id = (vpath->handle->vpath->vp_id * |
2228 | VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2; | 2226 | VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID; |
2229 | 2227 | ||
2230 | for (i = 0; i < vdev->no_of_vpath; i++) { | 2228 | for (i = 0; i < vdev->no_of_vpath; i++) { |
2231 | vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, | 2229 | vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id); |
2232 | alarm_msix_id); | ||
2233 | 2230 | ||
2234 | status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle, | 2231 | status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle, |
2235 | vdev->exec_mode); | 2232 | vdev->exec_mode); |
2236 | if (status == VXGE_HW_OK) { | 2233 | if (status == VXGE_HW_OK) { |
2237 | 2234 | ||
2238 | vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle, | 2235 | vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle, |
2239 | alarm_msix_id); | 2236 | msix_id); |
2240 | continue; | 2237 | continue; |
2241 | } | 2238 | } |
2242 | vxge_debug_intr(VXGE_ERR, | 2239 | vxge_debug_intr(VXGE_ERR, |
@@ -2249,18 +2246,17 @@ vxge_alarm_msix_handle(int irq, void *dev_id) | |||
2249 | static int vxge_alloc_msix(struct vxgedev *vdev) | 2246 | static int vxge_alloc_msix(struct vxgedev *vdev) |
2250 | { | 2247 | { |
2251 | int j, i, ret = 0; | 2248 | int j, i, ret = 0; |
2252 | int intr_cnt = 0; | 2249 | int msix_intr_vect = 0, temp; |
2253 | int alarm_msix_id = 0, msix_intr_vect = 0; | ||
2254 | vdev->intr_cnt = 0; | 2250 | vdev->intr_cnt = 0; |
2255 | 2251 | ||
2252 | start: | ||
2256 | /* Tx/Rx MSIX Vectors count */ | 2253 | /* Tx/Rx MSIX Vectors count */ |
2257 | vdev->intr_cnt = vdev->no_of_vpath * 2; | 2254 | vdev->intr_cnt = vdev->no_of_vpath * 2; |
2258 | 2255 | ||
2259 | /* Alarm MSIX Vectors count */ | 2256 | /* Alarm MSIX Vectors count */ |
2260 | vdev->intr_cnt++; | 2257 | vdev->intr_cnt++; |
2261 | 2258 | ||
2262 | intr_cnt = (vdev->max_vpath_supported * 2) + 1; | 2259 | vdev->entries = kzalloc(vdev->intr_cnt * sizeof(struct msix_entry), |
2263 | vdev->entries = kzalloc(intr_cnt * sizeof(struct msix_entry), | ||
2264 | GFP_KERNEL); | 2260 | GFP_KERNEL); |
2265 | if (!vdev->entries) { | 2261 | if (!vdev->entries) { |
2266 | vxge_debug_init(VXGE_ERR, | 2262 | vxge_debug_init(VXGE_ERR, |
@@ -2269,8 +2265,9 @@ static int vxge_alloc_msix(struct vxgedev *vdev) | |||
2269 | return -ENOMEM; | 2265 | return -ENOMEM; |
2270 | } | 2266 | } |
2271 | 2267 | ||
2272 | vdev->vxge_entries = kzalloc(intr_cnt * sizeof(struct vxge_msix_entry), | 2268 | vdev->vxge_entries = |
2273 | GFP_KERNEL); | 2269 | kzalloc(vdev->intr_cnt * sizeof(struct vxge_msix_entry), |
2270 | GFP_KERNEL); | ||
2274 | if (!vdev->vxge_entries) { | 2271 | if (!vdev->vxge_entries) { |
2275 | vxge_debug_init(VXGE_ERR, "%s: memory allocation failed", | 2272 | vxge_debug_init(VXGE_ERR, "%s: memory allocation failed", |
2276 | VXGE_DRIVER_NAME); | 2273 | VXGE_DRIVER_NAME); |
@@ -2278,9 +2275,7 @@ static int vxge_alloc_msix(struct vxgedev *vdev) | |||
2278 | return -ENOMEM; | 2275 | return -ENOMEM; |
2279 | } | 2276 | } |
2280 | 2277 | ||
2281 | /* Last vector in the list is used for alarm */ | 2278 | for (i = 0, j = 0; i < vdev->no_of_vpath; i++) { |
2282 | alarm_msix_id = VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2; | ||
2283 | for (i = 0, j = 0; i < vdev->max_vpath_supported; i++) { | ||
2284 | 2279 | ||
2285 | msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE; | 2280 | msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE; |
2286 | 2281 | ||
@@ -2298,47 +2293,31 @@ static int vxge_alloc_msix(struct vxgedev *vdev) | |||
2298 | } | 2293 | } |
2299 | 2294 | ||
2300 | /* Initialize the alarm vector */ | 2295 | /* Initialize the alarm vector */ |
2301 | vdev->entries[j].entry = alarm_msix_id; | 2296 | vdev->entries[j].entry = VXGE_ALARM_MSIX_ID; |
2302 | vdev->vxge_entries[j].entry = alarm_msix_id; | 2297 | vdev->vxge_entries[j].entry = VXGE_ALARM_MSIX_ID; |
2303 | vdev->vxge_entries[j].in_use = 0; | 2298 | vdev->vxge_entries[j].in_use = 0; |
2304 | 2299 | ||
2305 | ret = pci_enable_msix(vdev->pdev, vdev->entries, intr_cnt); | 2300 | ret = pci_enable_msix(vdev->pdev, vdev->entries, vdev->intr_cnt); |
2306 | /* if driver request exceeeds available irq's, request with a small | ||
2307 | * number. | ||
2308 | */ | ||
2309 | if (ret > 0) { | ||
2310 | vxge_debug_init(VXGE_ERR, | ||
2311 | "%s: MSI-X enable failed for %d vectors, available: %d", | ||
2312 | VXGE_DRIVER_NAME, intr_cnt, ret); | ||
2313 | vdev->max_vpath_supported = vdev->no_of_vpath; | ||
2314 | intr_cnt = (vdev->max_vpath_supported * 2) + 1; | ||
2315 | |||
2316 | /* Reset the alarm vector setting */ | ||
2317 | vdev->entries[j].entry = 0; | ||
2318 | vdev->vxge_entries[j].entry = 0; | ||
2319 | |||
2320 | /* Initialize the alarm vector with new setting */ | ||
2321 | vdev->entries[intr_cnt - 1].entry = alarm_msix_id; | ||
2322 | vdev->vxge_entries[intr_cnt - 1].entry = alarm_msix_id; | ||
2323 | vdev->vxge_entries[intr_cnt - 1].in_use = 0; | ||
2324 | |||
2325 | ret = pci_enable_msix(vdev->pdev, vdev->entries, intr_cnt); | ||
2326 | if (!ret) | ||
2327 | vxge_debug_init(VXGE_ERR, | ||
2328 | "%s: MSI-X enabled for %d vectors", | ||
2329 | VXGE_DRIVER_NAME, intr_cnt); | ||
2330 | } | ||
2331 | 2301 | ||
2332 | if (ret) { | 2302 | if (ret > 0) { |
2333 | vxge_debug_init(VXGE_ERR, | 2303 | vxge_debug_init(VXGE_ERR, |
2334 | "%s: MSI-X enable failed for %d vectors, ret: %d", | 2304 | "%s: MSI-X enable failed for %d vectors, ret: %d", |
2335 | VXGE_DRIVER_NAME, intr_cnt, ret); | 2305 | VXGE_DRIVER_NAME, vdev->intr_cnt, ret); |
2336 | kfree(vdev->entries); | 2306 | kfree(vdev->entries); |
2337 | kfree(vdev->vxge_entries); | 2307 | kfree(vdev->vxge_entries); |
2338 | vdev->entries = NULL; | 2308 | vdev->entries = NULL; |
2339 | vdev->vxge_entries = NULL; | 2309 | vdev->vxge_entries = NULL; |
2310 | |||
2311 | if ((max_config_vpath != VXGE_USE_DEFAULT) || (ret < 3)) | ||
2312 | return -ENODEV; | ||
2313 | /* Try with less no of vector by reducing no of vpaths count */ | ||
2314 | temp = (ret - 1)/2; | ||
2315 | vxge_close_vpaths(vdev, temp); | ||
2316 | vdev->no_of_vpath = temp; | ||
2317 | goto start; | ||
2318 | } else if (ret < 0) | ||
2340 | return -ENODEV; | 2319 | return -ENODEV; |
2341 | } | 2320 | |
2342 | return 0; | 2321 | return 0; |
2343 | } | 2322 | } |
2344 | 2323 | ||
@@ -2346,43 +2325,26 @@ static int vxge_enable_msix(struct vxgedev *vdev) | |||
2346 | { | 2325 | { |
2347 | 2326 | ||
2348 | int i, ret = 0; | 2327 | int i, ret = 0; |
2349 | enum vxge_hw_status status; | ||
2350 | /* 0 - Tx, 1 - Rx */ | 2328 | /* 0 - Tx, 1 - Rx */ |
2351 | int tim_msix_id[4]; | 2329 | int tim_msix_id[4] = {0, 1, 0, 0}; |
2352 | int alarm_msix_id = 0, msix_intr_vect = 0; | 2330 | |
2353 | vdev->intr_cnt = 0; | 2331 | vdev->intr_cnt = 0; |
2354 | 2332 | ||
2355 | /* allocate msix vectors */ | 2333 | /* allocate msix vectors */ |
2356 | ret = vxge_alloc_msix(vdev); | 2334 | ret = vxge_alloc_msix(vdev); |
2357 | if (!ret) { | 2335 | if (!ret) { |
2358 | /* Last vector in the list is used for alarm */ | ||
2359 | alarm_msix_id = | ||
2360 | VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2; | ||
2361 | for (i = 0; i < vdev->no_of_vpath; i++) { | 2336 | for (i = 0; i < vdev->no_of_vpath; i++) { |
2362 | 2337 | ||
2363 | /* If fifo or ring are not enabled | 2338 | /* If fifo or ring are not enabled |
2364 | the MSIX vector for that should be set to 0 | 2339 | the MSIX vector for that should be set to 0 |
2365 | Hence initializeing this array to all 0s. | 2340 | Hence initializeing this array to all 0s. |
2366 | */ | 2341 | */ |
2367 | memset(tim_msix_id, 0, sizeof(tim_msix_id)); | 2342 | vdev->vpaths[i].ring.rx_vector_no = |
2368 | msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE; | 2343 | (vdev->vpaths[i].device_id * |
2369 | tim_msix_id[0] = msix_intr_vect; | 2344 | VXGE_HW_VPATH_MSIX_ACTIVE) + 1; |
2370 | |||
2371 | tim_msix_id[1] = msix_intr_vect + 1; | ||
2372 | vdev->vpaths[i].ring.rx_vector_no = tim_msix_id[1]; | ||
2373 | 2345 | ||
2374 | status = vxge_hw_vpath_msix_set( | 2346 | vxge_hw_vpath_msix_set(vdev->vpaths[i].handle, |
2375 | vdev->vpaths[i].handle, | 2347 | tim_msix_id, VXGE_ALARM_MSIX_ID); |
2376 | tim_msix_id, alarm_msix_id); | ||
2377 | if (status != VXGE_HW_OK) { | ||
2378 | vxge_debug_init(VXGE_ERR, | ||
2379 | "vxge_hw_vpath_msix_set " | ||
2380 | "failed with status : %x", status); | ||
2381 | kfree(vdev->entries); | ||
2382 | kfree(vdev->vxge_entries); | ||
2383 | pci_disable_msix(vdev->pdev); | ||
2384 | return -ENODEV; | ||
2385 | } | ||
2386 | } | 2348 | } |
2387 | } | 2349 | } |
2388 | 2350 | ||
@@ -2393,7 +2355,7 @@ static void vxge_rem_msix_isr(struct vxgedev *vdev) | |||
2393 | { | 2355 | { |
2394 | int intr_cnt; | 2356 | int intr_cnt; |
2395 | 2357 | ||
2396 | for (intr_cnt = 0; intr_cnt < (vdev->max_vpath_supported * 2 + 1); | 2358 | for (intr_cnt = 0; intr_cnt < (vdev->no_of_vpath * 2 + 1); |
2397 | intr_cnt++) { | 2359 | intr_cnt++) { |
2398 | if (vdev->vxge_entries[intr_cnt].in_use) { | 2360 | if (vdev->vxge_entries[intr_cnt].in_use) { |
2399 | synchronize_irq(vdev->entries[intr_cnt].vector); | 2361 | synchronize_irq(vdev->entries[intr_cnt].vector); |
@@ -2458,9 +2420,10 @@ static int vxge_add_isr(struct vxgedev *vdev) | |||
2458 | switch (msix_idx) { | 2420 | switch (msix_idx) { |
2459 | case 0: | 2421 | case 0: |
2460 | snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN, | 2422 | snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN, |
2461 | "%s:vxge fn: %d vpath: %d Tx MSI-X: %d", | 2423 | "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d", |
2462 | vdev->ndev->name, pci_fun, vp_idx, | 2424 | vdev->ndev->name, |
2463 | vdev->entries[intr_cnt].entry); | 2425 | vdev->entries[intr_cnt].entry, |
2426 | pci_fun, vp_idx); | ||
2464 | ret = request_irq( | 2427 | ret = request_irq( |
2465 | vdev->entries[intr_cnt].vector, | 2428 | vdev->entries[intr_cnt].vector, |
2466 | vxge_tx_msix_handle, 0, | 2429 | vxge_tx_msix_handle, 0, |
@@ -2472,9 +2435,10 @@ static int vxge_add_isr(struct vxgedev *vdev) | |||
2472 | break; | 2435 | break; |
2473 | case 1: | 2436 | case 1: |
2474 | snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN, | 2437 | snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN, |
2475 | "%s:vxge fn: %d vpath: %d Rx MSI-X: %d", | 2438 | "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d", |
2476 | vdev->ndev->name, pci_fun, vp_idx, | 2439 | vdev->ndev->name, |
2477 | vdev->entries[intr_cnt].entry); | 2440 | vdev->entries[intr_cnt].entry, |
2441 | pci_fun, vp_idx); | ||
2478 | ret = request_irq( | 2442 | ret = request_irq( |
2479 | vdev->entries[intr_cnt].vector, | 2443 | vdev->entries[intr_cnt].vector, |
2480 | vxge_rx_msix_napi_handle, | 2444 | vxge_rx_msix_napi_handle, |
@@ -2502,9 +2466,11 @@ static int vxge_add_isr(struct vxgedev *vdev) | |||
2502 | if (irq_req) { | 2466 | if (irq_req) { |
2503 | /* We requested for this msix interrupt */ | 2467 | /* We requested for this msix interrupt */ |
2504 | vdev->vxge_entries[intr_cnt].in_use = 1; | 2468 | vdev->vxge_entries[intr_cnt].in_use = 1; |
2469 | msix_idx += vdev->vpaths[vp_idx].device_id * | ||
2470 | VXGE_HW_VPATH_MSIX_ACTIVE; | ||
2505 | vxge_hw_vpath_msix_unmask( | 2471 | vxge_hw_vpath_msix_unmask( |
2506 | vdev->vpaths[vp_idx].handle, | 2472 | vdev->vpaths[vp_idx].handle, |
2507 | intr_idx); | 2473 | msix_idx); |
2508 | intr_cnt++; | 2474 | intr_cnt++; |
2509 | } | 2475 | } |
2510 | 2476 | ||
@@ -2514,16 +2480,17 @@ static int vxge_add_isr(struct vxgedev *vdev) | |||
2514 | vp_idx++; | 2480 | vp_idx++; |
2515 | } | 2481 | } |
2516 | 2482 | ||
2517 | intr_cnt = vdev->max_vpath_supported * 2; | 2483 | intr_cnt = vdev->no_of_vpath * 2; |
2518 | snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN, | 2484 | snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN, |
2519 | "%s:vxge Alarm fn: %d MSI-X: %d", | 2485 | "%s:vxge:MSI-X %d - Alarm - fn:%d", |
2520 | vdev->ndev->name, pci_fun, | 2486 | vdev->ndev->name, |
2521 | vdev->entries[intr_cnt].entry); | 2487 | vdev->entries[intr_cnt].entry, |
2488 | pci_fun); | ||
2522 | /* For Alarm interrupts */ | 2489 | /* For Alarm interrupts */ |
2523 | ret = request_irq(vdev->entries[intr_cnt].vector, | 2490 | ret = request_irq(vdev->entries[intr_cnt].vector, |
2524 | vxge_alarm_msix_handle, 0, | 2491 | vxge_alarm_msix_handle, 0, |
2525 | vdev->desc[intr_cnt], | 2492 | vdev->desc[intr_cnt], |
2526 | &vdev->vpaths[vp_idx]); | 2493 | &vdev->vpaths[0]); |
2527 | if (ret) { | 2494 | if (ret) { |
2528 | vxge_debug_init(VXGE_ERR, | 2495 | vxge_debug_init(VXGE_ERR, |
2529 | "%s: MSIX - %d Registration failed", | 2496 | "%s: MSIX - %d Registration failed", |
@@ -2536,16 +2503,19 @@ static int vxge_add_isr(struct vxgedev *vdev) | |||
2536 | goto INTA_MODE; | 2503 | goto INTA_MODE; |
2537 | } | 2504 | } |
2538 | 2505 | ||
2506 | msix_idx = (vdev->vpaths[0].handle->vpath->vp_id * | ||
2507 | VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID; | ||
2539 | vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle, | 2508 | vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle, |
2540 | intr_idx - 2); | 2509 | msix_idx); |
2541 | vdev->vxge_entries[intr_cnt].in_use = 1; | 2510 | vdev->vxge_entries[intr_cnt].in_use = 1; |
2542 | vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[vp_idx]; | 2511 | vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[0]; |
2543 | } | 2512 | } |
2544 | INTA_MODE: | 2513 | INTA_MODE: |
2545 | #endif | 2514 | #endif |
2546 | snprintf(vdev->desc[0], VXGE_INTR_STRLEN, "%s:vxge", vdev->ndev->name); | ||
2547 | 2515 | ||
2548 | if (vdev->config.intr_type == INTA) { | 2516 | if (vdev->config.intr_type == INTA) { |
2517 | snprintf(vdev->desc[0], VXGE_INTR_STRLEN, | ||
2518 | "%s:vxge:INTA", vdev->ndev->name); | ||
2549 | vxge_hw_device_set_intr_type(vdev->devh, | 2519 | vxge_hw_device_set_intr_type(vdev->devh, |
2550 | VXGE_HW_INTR_MODE_IRQLINE); | 2520 | VXGE_HW_INTR_MODE_IRQLINE); |
2551 | vxge_hw_vpath_tti_ci_set(vdev->devh, | 2521 | vxge_hw_vpath_tti_ci_set(vdev->devh, |
@@ -2844,7 +2814,6 @@ static void vxge_napi_del_all(struct vxgedev *vdev) | |||
2844 | for (i = 0; i < vdev->no_of_vpath; i++) | 2814 | for (i = 0; i < vdev->no_of_vpath; i++) |
2845 | netif_napi_del(&vdev->vpaths[i].ring.napi); | 2815 | netif_napi_del(&vdev->vpaths[i].ring.napi); |
2846 | } | 2816 | } |
2847 | return; | ||
2848 | } | 2817 | } |
2849 | 2818 | ||
2850 | int do_vxge_close(struct net_device *dev, int do_io) | 2819 | int do_vxge_close(struct net_device *dev, int do_io) |
@@ -3529,8 +3498,6 @@ static void verify_bandwidth(void) | |||
3529 | for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) | 3498 | for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) |
3530 | bw_percentage[i] = bw_percentage[0]; | 3499 | bw_percentage[i] = bw_percentage[0]; |
3531 | } | 3500 | } |
3532 | |||
3533 | return; | ||
3534 | } | 3501 | } |
3535 | 3502 | ||
3536 | /* | 3503 | /* |
@@ -3995,6 +3962,36 @@ static void vxge_io_resume(struct pci_dev *pdev) | |||
3995 | netif_device_attach(netdev); | 3962 | netif_device_attach(netdev); |
3996 | } | 3963 | } |
3997 | 3964 | ||
3965 | static inline u32 vxge_get_num_vfs(u64 function_mode) | ||
3966 | { | ||
3967 | u32 num_functions = 0; | ||
3968 | |||
3969 | switch (function_mode) { | ||
3970 | case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION: | ||
3971 | case VXGE_HW_FUNCTION_MODE_SRIOV_8: | ||
3972 | num_functions = 8; | ||
3973 | break; | ||
3974 | case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION: | ||
3975 | num_functions = 1; | ||
3976 | break; | ||
3977 | case VXGE_HW_FUNCTION_MODE_SRIOV: | ||
3978 | case VXGE_HW_FUNCTION_MODE_MRIOV: | ||
3979 | case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17: | ||
3980 | num_functions = 17; | ||
3981 | break; | ||
3982 | case VXGE_HW_FUNCTION_MODE_SRIOV_4: | ||
3983 | num_functions = 4; | ||
3984 | break; | ||
3985 | case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2: | ||
3986 | num_functions = 2; | ||
3987 | break; | ||
3988 | case VXGE_HW_FUNCTION_MODE_MRIOV_8: | ||
3989 | num_functions = 8; /* TODO */ | ||
3990 | break; | ||
3991 | } | ||
3992 | return num_functions; | ||
3993 | } | ||
3994 | |||
3998 | /** | 3995 | /** |
3999 | * vxge_probe | 3996 | * vxge_probe |
4000 | * @pdev : structure containing the PCI related information of the device. | 3997 | * @pdev : structure containing the PCI related information of the device. |
@@ -4022,14 +4019,19 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
4022 | u8 *macaddr; | 4019 | u8 *macaddr; |
4023 | struct vxge_mac_addrs *entry; | 4020 | struct vxge_mac_addrs *entry; |
4024 | static int bus = -1, device = -1; | 4021 | static int bus = -1, device = -1; |
4022 | u32 host_type; | ||
4025 | u8 new_device = 0; | 4023 | u8 new_device = 0; |
4024 | enum vxge_hw_status is_privileged; | ||
4025 | u32 function_mode; | ||
4026 | u32 num_vfs = 0; | ||
4026 | 4027 | ||
4027 | vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); | 4028 | vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); |
4028 | attr.pdev = pdev; | 4029 | attr.pdev = pdev; |
4029 | 4030 | ||
4030 | if (bus != pdev->bus->number) | 4031 | /* In SRIOV-17 mode, functions of the same adapter |
4031 | new_device = 1; | 4032 | * can be deployed on different buses */ |
4032 | if (device != PCI_SLOT(pdev->devfn)) | 4033 | if ((!pdev->is_virtfn) && ((bus != pdev->bus->number) || |
4034 | (device != PCI_SLOT(pdev->devfn)))) | ||
4033 | new_device = 1; | 4035 | new_device = 1; |
4034 | 4036 | ||
4035 | bus = pdev->bus->number; | 4037 | bus = pdev->bus->number; |
@@ -4046,9 +4048,11 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
4046 | driver_config->total_dev_cnt); | 4048 | driver_config->total_dev_cnt); |
4047 | driver_config->config_dev_cnt = 0; | 4049 | driver_config->config_dev_cnt = 0; |
4048 | driver_config->total_dev_cnt = 0; | 4050 | driver_config->total_dev_cnt = 0; |
4049 | driver_config->g_no_cpus = 0; | ||
4050 | } | 4051 | } |
4051 | 4052 | /* Now making the CPU based no of vpath calculation | |
4053 | * applicable for individual functions as well. | ||
4054 | */ | ||
4055 | driver_config->g_no_cpus = 0; | ||
4052 | driver_config->vpath_per_dev = max_config_vpath; | 4056 | driver_config->vpath_per_dev = max_config_vpath; |
4053 | 4057 | ||
4054 | driver_config->total_dev_cnt++; | 4058 | driver_config->total_dev_cnt++; |
@@ -4161,6 +4165,11 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
4161 | "%s:%d Vpath mask = %llx", __func__, __LINE__, | 4165 | "%s:%d Vpath mask = %llx", __func__, __LINE__, |
4162 | (unsigned long long)vpath_mask); | 4166 | (unsigned long long)vpath_mask); |
4163 | 4167 | ||
4168 | function_mode = ll_config.device_hw_info.function_mode; | ||
4169 | host_type = ll_config.device_hw_info.host_type; | ||
4170 | is_privileged = __vxge_hw_device_is_privilaged(host_type, | ||
4171 | ll_config.device_hw_info.func_id); | ||
4172 | |||
4164 | /* Check how many vpaths are available */ | 4173 | /* Check how many vpaths are available */ |
4165 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | 4174 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { |
4166 | if (!((vpath_mask) & vxge_mBIT(i))) | 4175 | if (!((vpath_mask) & vxge_mBIT(i))) |
@@ -4168,14 +4177,18 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
4168 | max_vpath_supported++; | 4177 | max_vpath_supported++; |
4169 | } | 4178 | } |
4170 | 4179 | ||
4180 | if (new_device) | ||
4181 | num_vfs = vxge_get_num_vfs(function_mode) - 1; | ||
4182 | |||
4171 | /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */ | 4183 | /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */ |
4172 | if ((VXGE_HW_FUNCTION_MODE_SRIOV == | 4184 | if (is_sriov(function_mode) && (max_config_dev > 1) && |
4173 | ll_config.device_hw_info.function_mode) && | 4185 | (ll_config.intr_type != INTA) && |
4174 | (max_config_dev > 1) && (pdev->is_physfn)) { | 4186 | (is_privileged == VXGE_HW_OK)) { |
4175 | ret = pci_enable_sriov(pdev, max_config_dev - 1); | 4187 | ret = pci_enable_sriov(pdev, ((max_config_dev - 1) < num_vfs) |
4176 | if (ret) | 4188 | ? (max_config_dev - 1) : num_vfs); |
4177 | vxge_debug_ll_config(VXGE_ERR, | 4189 | if (ret) |
4178 | "Failed to enable SRIOV: %d \n", ret); | 4190 | vxge_debug_ll_config(VXGE_ERR, |
4191 | "Failed in enabling SRIOV mode: %d\n", ret); | ||
4179 | } | 4192 | } |
4180 | 4193 | ||
4181 | /* | 4194 | /* |
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h index 7c83ba4be9d7..60276b20fa5e 100644 --- a/drivers/net/vxge/vxge-main.h +++ b/drivers/net/vxge/vxge-main.h | |||
@@ -31,6 +31,7 @@ | |||
31 | #define PCI_DEVICE_ID_TITAN_UNI 0x5833 | 31 | #define PCI_DEVICE_ID_TITAN_UNI 0x5833 |
32 | #define VXGE_USE_DEFAULT 0xffffffff | 32 | #define VXGE_USE_DEFAULT 0xffffffff |
33 | #define VXGE_HW_VPATH_MSIX_ACTIVE 4 | 33 | #define VXGE_HW_VPATH_MSIX_ACTIVE 4 |
34 | #define VXGE_ALARM_MSIX_ID 2 | ||
34 | #define VXGE_HW_RXSYNC_FREQ_CNT 4 | 35 | #define VXGE_HW_RXSYNC_FREQ_CNT 4 |
35 | #define VXGE_LL_WATCH_DOG_TIMEOUT (15 * HZ) | 36 | #define VXGE_LL_WATCH_DOG_TIMEOUT (15 * HZ) |
36 | #define VXGE_LL_RX_COPY_THRESHOLD 256 | 37 | #define VXGE_LL_RX_COPY_THRESHOLD 256 |
@@ -89,6 +90,11 @@ | |||
89 | 90 | ||
90 | #define VXGE_LL_MAX_FRAME_SIZE(dev) ((dev)->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE) | 91 | #define VXGE_LL_MAX_FRAME_SIZE(dev) ((dev)->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE) |
91 | 92 | ||
93 | #define is_sriov(function_mode) \ | ||
94 | ((function_mode == VXGE_HW_FUNCTION_MODE_SRIOV) || \ | ||
95 | (function_mode == VXGE_HW_FUNCTION_MODE_SRIOV_8) || \ | ||
96 | (function_mode == VXGE_HW_FUNCTION_MODE_SRIOV_4)) | ||
97 | |||
92 | enum vxge_reset_event { | 98 | enum vxge_reset_event { |
93 | /* reset events */ | 99 | /* reset events */ |
94 | VXGE_LL_VPATH_RESET = 0, | 100 | VXGE_LL_VPATH_RESET = 0, |
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c index 2c012f4ce465..6cc1dd79b40b 100644 --- a/drivers/net/vxge/vxge-traffic.c +++ b/drivers/net/vxge/vxge-traffic.c | |||
@@ -231,11 +231,8 @@ void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id) | |||
231 | { | 231 | { |
232 | 232 | ||
233 | __vxge_hw_pio_mem_write32_upper( | 233 | __vxge_hw_pio_mem_write32_upper( |
234 | (u32)vxge_bVALn(vxge_mBIT(channel->first_vp_id+(msix_id/4)), | 234 | (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32), |
235 | 0, 32), | ||
236 | &channel->common_reg->set_msix_mask_vect[msix_id%4]); | 235 | &channel->common_reg->set_msix_mask_vect[msix_id%4]); |
237 | |||
238 | return; | ||
239 | } | 236 | } |
240 | 237 | ||
241 | /** | 238 | /** |
@@ -252,11 +249,8 @@ vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id) | |||
252 | { | 249 | { |
253 | 250 | ||
254 | __vxge_hw_pio_mem_write32_upper( | 251 | __vxge_hw_pio_mem_write32_upper( |
255 | (u32)vxge_bVALn(vxge_mBIT(channel->first_vp_id+(msix_id/4)), | 252 | (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32), |
256 | 0, 32), | ||
257 | &channel->common_reg->clear_msix_mask_vect[msix_id%4]); | 253 | &channel->common_reg->clear_msix_mask_vect[msix_id%4]); |
258 | |||
259 | return; | ||
260 | } | 254 | } |
261 | 255 | ||
262 | /** | 256 | /** |
@@ -331,8 +325,6 @@ void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev) | |||
331 | val64 = readq(&hldev->common_reg->titan_general_int_status); | 325 | val64 = readq(&hldev->common_reg->titan_general_int_status); |
332 | 326 | ||
333 | vxge_hw_device_unmask_all(hldev); | 327 | vxge_hw_device_unmask_all(hldev); |
334 | |||
335 | return; | ||
336 | } | 328 | } |
337 | 329 | ||
338 | /** | 330 | /** |
@@ -364,8 +356,6 @@ void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev) | |||
364 | vxge_hw_vpath_intr_disable( | 356 | vxge_hw_vpath_intr_disable( |
365 | VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i])); | 357 | VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i])); |
366 | } | 358 | } |
367 | |||
368 | return; | ||
369 | } | 359 | } |
370 | 360 | ||
371 | /** | 361 | /** |
@@ -385,8 +375,6 @@ void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev) | |||
385 | 375 | ||
386 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), | 376 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), |
387 | &hldev->common_reg->titan_mask_all_int); | 377 | &hldev->common_reg->titan_mask_all_int); |
388 | |||
389 | return; | ||
390 | } | 378 | } |
391 | 379 | ||
392 | /** | 380 | /** |
@@ -406,8 +394,6 @@ void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev) | |||
406 | 394 | ||
407 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), | 395 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), |
408 | &hldev->common_reg->titan_mask_all_int); | 396 | &hldev->common_reg->titan_mask_all_int); |
409 | |||
410 | return; | ||
411 | } | 397 | } |
412 | 398 | ||
413 | /** | 399 | /** |
@@ -649,8 +635,6 @@ void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev) | |||
649 | hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]), | 635 | hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]), |
650 | &hldev->common_reg->tim_int_status1); | 636 | &hldev->common_reg->tim_int_status1); |
651 | } | 637 | } |
652 | |||
653 | return; | ||
654 | } | 638 | } |
655 | 639 | ||
656 | /* | 640 | /* |
@@ -878,7 +862,7 @@ void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh) | |||
878 | 862 | ||
879 | channel = &ring->channel; | 863 | channel = &ring->channel; |
880 | 864 | ||
881 | rxdp->control_0 |= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER; | 865 | rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER; |
882 | 866 | ||
883 | if (ring->stats->common_stats.usage_cnt > 0) | 867 | if (ring->stats->common_stats.usage_cnt > 0) |
884 | ring->stats->common_stats.usage_cnt--; | 868 | ring->stats->common_stats.usage_cnt--; |
@@ -902,7 +886,7 @@ void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh) | |||
902 | channel = &ring->channel; | 886 | channel = &ring->channel; |
903 | 887 | ||
904 | wmb(); | 888 | wmb(); |
905 | rxdp->control_0 |= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER; | 889 | rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER; |
906 | 890 | ||
907 | vxge_hw_channel_dtr_post(channel, rxdh); | 891 | vxge_hw_channel_dtr_post(channel, rxdh); |
908 | 892 | ||
@@ -966,6 +950,7 @@ enum vxge_hw_status vxge_hw_ring_rxd_next_completed( | |||
966 | struct __vxge_hw_channel *channel; | 950 | struct __vxge_hw_channel *channel; |
967 | struct vxge_hw_ring_rxd_1 *rxdp; | 951 | struct vxge_hw_ring_rxd_1 *rxdp; |
968 | enum vxge_hw_status status = VXGE_HW_OK; | 952 | enum vxge_hw_status status = VXGE_HW_OK; |
953 | u64 control_0, own; | ||
969 | 954 | ||
970 | channel = &ring->channel; | 955 | channel = &ring->channel; |
971 | 956 | ||
@@ -977,8 +962,12 @@ enum vxge_hw_status vxge_hw_ring_rxd_next_completed( | |||
977 | goto exit; | 962 | goto exit; |
978 | } | 963 | } |
979 | 964 | ||
965 | control_0 = rxdp->control_0; | ||
966 | own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER; | ||
967 | *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0); | ||
968 | |||
980 | /* check whether it is not the end */ | 969 | /* check whether it is not the end */ |
981 | if (!(rxdp->control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER)) { | 970 | if (!own || ((*t_code == VXGE_HW_RING_T_CODE_FRM_DROP) && own)) { |
982 | 971 | ||
983 | vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control != | 972 | vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control != |
984 | 0); | 973 | 0); |
@@ -986,8 +975,6 @@ enum vxge_hw_status vxge_hw_ring_rxd_next_completed( | |||
986 | ++ring->cmpl_cnt; | 975 | ++ring->cmpl_cnt; |
987 | vxge_hw_channel_dtr_complete(channel); | 976 | vxge_hw_channel_dtr_complete(channel); |
988 | 977 | ||
989 | *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(rxdp->control_0); | ||
990 | |||
991 | vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED); | 978 | vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED); |
992 | 979 | ||
993 | ring->stats->common_stats.usage_cnt++; | 980 | ring->stats->common_stats.usage_cnt++; |
@@ -1035,12 +1022,13 @@ enum vxge_hw_status vxge_hw_ring_handle_tcode( | |||
1035 | * such as unknown UPV6 header), Drop it !!! | 1022 | * such as unknown UPV6 header), Drop it !!! |
1036 | */ | 1023 | */ |
1037 | 1024 | ||
1038 | if (t_code == 0 || t_code == 5) { | 1025 | if (t_code == VXGE_HW_RING_T_CODE_OK || |
1026 | t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) { | ||
1039 | status = VXGE_HW_OK; | 1027 | status = VXGE_HW_OK; |
1040 | goto exit; | 1028 | goto exit; |
1041 | } | 1029 | } |
1042 | 1030 | ||
1043 | if (t_code > 0xF) { | 1031 | if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) { |
1044 | status = VXGE_HW_ERR_INVALID_TCODE; | 1032 | status = VXGE_HW_ERR_INVALID_TCODE; |
1045 | goto exit; | 1033 | goto exit; |
1046 | } | 1034 | } |
@@ -2216,29 +2204,24 @@ exit: | |||
2216 | * This API will associate a given MSIX vector numbers with the four TIM | 2204 | * This API will associate a given MSIX vector numbers with the four TIM |
2217 | * interrupts and alarm interrupt. | 2205 | * interrupts and alarm interrupt. |
2218 | */ | 2206 | */ |
2219 | enum vxge_hw_status | 2207 | void |
2220 | vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id, | 2208 | vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id, |
2221 | int alarm_msix_id) | 2209 | int alarm_msix_id) |
2222 | { | 2210 | { |
2223 | u64 val64; | 2211 | u64 val64; |
2224 | struct __vxge_hw_virtualpath *vpath = vp->vpath; | 2212 | struct __vxge_hw_virtualpath *vpath = vp->vpath; |
2225 | struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg; | 2213 | struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg; |
2226 | u32 first_vp_id = vpath->hldev->first_vp_id; | 2214 | u32 vp_id = vp->vpath->vp_id; |
2227 | 2215 | ||
2228 | val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI( | 2216 | val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI( |
2229 | (first_vp_id * 4) + tim_msix_id[0]) | | 2217 | (vp_id * 4) + tim_msix_id[0]) | |
2230 | VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI( | 2218 | VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI( |
2231 | (first_vp_id * 4) + tim_msix_id[1]) | | 2219 | (vp_id * 4) + tim_msix_id[1]); |
2232 | VXGE_HW_INTERRUPT_CFG0_GROUP2_MSIX_FOR_TXTI( | ||
2233 | (first_vp_id * 4) + tim_msix_id[2]); | ||
2234 | |||
2235 | val64 |= VXGE_HW_INTERRUPT_CFG0_GROUP3_MSIX_FOR_TXTI( | ||
2236 | (first_vp_id * 4) + tim_msix_id[3]); | ||
2237 | 2220 | ||
2238 | writeq(val64, &vp_reg->interrupt_cfg0); | 2221 | writeq(val64, &vp_reg->interrupt_cfg0); |
2239 | 2222 | ||
2240 | writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG( | 2223 | writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG( |
2241 | (first_vp_id * 4) + alarm_msix_id), | 2224 | (vpath->hldev->first_vp_id * 4) + alarm_msix_id), |
2242 | &vp_reg->interrupt_cfg2); | 2225 | &vp_reg->interrupt_cfg2); |
2243 | 2226 | ||
2244 | if (vpath->hldev->config.intr_mode == | 2227 | if (vpath->hldev->config.intr_mode == |
@@ -2258,8 +2241,6 @@ vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id, | |||
2258 | VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN, | 2241 | VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN, |
2259 | 0, 32), &vp_reg->one_shot_vect3_en); | 2242 | 0, 32), &vp_reg->one_shot_vect3_en); |
2260 | } | 2243 | } |
2261 | |||
2262 | return VXGE_HW_OK; | ||
2263 | } | 2244 | } |
2264 | 2245 | ||
2265 | /** | 2246 | /** |
@@ -2279,11 +2260,8 @@ vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id) | |||
2279 | { | 2260 | { |
2280 | struct __vxge_hw_device *hldev = vp->vpath->hldev; | 2261 | struct __vxge_hw_device *hldev = vp->vpath->hldev; |
2281 | __vxge_hw_pio_mem_write32_upper( | 2262 | __vxge_hw_pio_mem_write32_upper( |
2282 | (u32) vxge_bVALn(vxge_mBIT(hldev->first_vp_id + | 2263 | (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32), |
2283 | (msix_id / 4)), 0, 32), | ||
2284 | &hldev->common_reg->set_msix_mask_vect[msix_id % 4]); | 2264 | &hldev->common_reg->set_msix_mask_vect[msix_id % 4]); |
2285 | |||
2286 | return; | ||
2287 | } | 2265 | } |
2288 | 2266 | ||
2289 | /** | 2267 | /** |
@@ -2305,19 +2283,15 @@ vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id) | |||
2305 | if (hldev->config.intr_mode == | 2283 | if (hldev->config.intr_mode == |
2306 | VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) { | 2284 | VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) { |
2307 | __vxge_hw_pio_mem_write32_upper( | 2285 | __vxge_hw_pio_mem_write32_upper( |
2308 | (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id + | 2286 | (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32), |
2309 | (msix_id/4)), 0, 32), | ||
2310 | &hldev->common_reg-> | 2287 | &hldev->common_reg-> |
2311 | clr_msix_one_shot_vec[msix_id%4]); | 2288 | clr_msix_one_shot_vec[msix_id%4]); |
2312 | } else { | 2289 | } else { |
2313 | __vxge_hw_pio_mem_write32_upper( | 2290 | __vxge_hw_pio_mem_write32_upper( |
2314 | (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id + | 2291 | (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32), |
2315 | (msix_id/4)), 0, 32), | ||
2316 | &hldev->common_reg-> | 2292 | &hldev->common_reg-> |
2317 | clear_msix_mask_vect[msix_id%4]); | 2293 | clear_msix_mask_vect[msix_id%4]); |
2318 | } | 2294 | } |
2319 | |||
2320 | return; | ||
2321 | } | 2295 | } |
2322 | 2296 | ||
2323 | /** | 2297 | /** |
@@ -2337,11 +2311,8 @@ vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id) | |||
2337 | { | 2311 | { |
2338 | struct __vxge_hw_device *hldev = vp->vpath->hldev; | 2312 | struct __vxge_hw_device *hldev = vp->vpath->hldev; |
2339 | __vxge_hw_pio_mem_write32_upper( | 2313 | __vxge_hw_pio_mem_write32_upper( |
2340 | (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id + | 2314 | (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32), |
2341 | (msix_id/4)), 0, 32), | ||
2342 | &hldev->common_reg->clear_msix_mask_vect[msix_id%4]); | 2315 | &hldev->common_reg->clear_msix_mask_vect[msix_id%4]); |
2343 | |||
2344 | return; | ||
2345 | } | 2316 | } |
2346 | 2317 | ||
2347 | /** | 2318 | /** |
@@ -2358,8 +2329,6 @@ vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vp) | |||
2358 | __vxge_hw_pio_mem_write32_upper( | 2329 | __vxge_hw_pio_mem_write32_upper( |
2359 | (u32)vxge_bVALn(vxge_mBIT(vp->vpath->vp_id), 0, 32), | 2330 | (u32)vxge_bVALn(vxge_mBIT(vp->vpath->vp_id), 0, 32), |
2360 | &vp->vpath->hldev->common_reg->set_msix_mask_all_vect); | 2331 | &vp->vpath->hldev->common_reg->set_msix_mask_all_vect); |
2361 | |||
2362 | return; | ||
2363 | } | 2332 | } |
2364 | 2333 | ||
2365 | /** | 2334 | /** |
@@ -2398,8 +2367,6 @@ void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp) | |||
2398 | tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64), | 2367 | tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64), |
2399 | &hldev->common_reg->tim_int_mask1); | 2368 | &hldev->common_reg->tim_int_mask1); |
2400 | } | 2369 | } |
2401 | |||
2402 | return; | ||
2403 | } | 2370 | } |
2404 | 2371 | ||
2405 | /** | 2372 | /** |
@@ -2436,8 +2403,6 @@ void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp) | |||
2436 | tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64, | 2403 | tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64, |
2437 | &hldev->common_reg->tim_int_mask1); | 2404 | &hldev->common_reg->tim_int_mask1); |
2438 | } | 2405 | } |
2439 | |||
2440 | return; | ||
2441 | } | 2406 | } |
2442 | 2407 | ||
2443 | /** | 2408 | /** |
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h index 861c853e3e84..c252f3d3f650 100644 --- a/drivers/net/vxge/vxge-traffic.h +++ b/drivers/net/vxge/vxge-traffic.h | |||
@@ -1866,6 +1866,51 @@ struct vxge_hw_ring_rxd_info { | |||
1866 | u32 rth_hash_type; | 1866 | u32 rth_hash_type; |
1867 | u32 rth_value; | 1867 | u32 rth_value; |
1868 | }; | 1868 | }; |
1869 | /** | ||
1870 | * enum vxge_hw_ring_tcode - Transfer codes returned by adapter | ||
1871 | * @VXGE_HW_RING_T_CODE_OK: Transfer ok. | ||
1872 | * @VXGE_HW_RING_T_CODE_L3_CKSUM_MISMATCH: Layer 3 checksum presentation | ||
1873 | * configuration mismatch. | ||
1874 | * @VXGE_HW_RING_T_CODE_L4_CKSUM_MISMATCH: Layer 4 checksum presentation | ||
1875 | * configuration mismatch. | ||
1876 | * @VXGE_HW_RING_T_CODE_L3_L4_CKSUM_MISMATCH: Layer 3 and Layer 4 checksum | ||
1877 | * presentation configuration mismatch. | ||
1878 | * @VXGE_HW_RING_T_CODE_L3_PKT_ERR: Layer 3 error unparseable packet, | ||
1879 | * such as unknown IPv6 header. | ||
1880 | * @VXGE_HW_RING_T_CODE_L2_FRM_ERR: Layer 2 error frame integrity | ||
1881 | * error, such as FCS or ECC). | ||
1882 | * @VXGE_HW_RING_T_CODE_BUF_SIZE_ERR: Buffer size error the RxD buffer( | ||
1883 | * s) were not appropriately sized and data loss occurred. | ||
1884 | * @VXGE_HW_RING_T_CODE_INT_ECC_ERR: Internal ECC error RxD corrupted. | ||
1885 | * @VXGE_HW_RING_T_CODE_BENIGN_OVFLOW: Benign overflow the contents of | ||
1886 | * Segment1 exceeded the capacity of Buffer1 and the remainder | ||
1887 | * was placed in Buffer2. Segment2 now starts in Buffer3. | ||
1888 | * No data loss or errors occurred. | ||
1889 | * @VXGE_HW_RING_T_CODE_ZERO_LEN_BUFF: Buffer size 0 one of the RxDs | ||
1890 | * assigned buffers has a size of 0 bytes. | ||
1891 | * @VXGE_HW_RING_T_CODE_FRM_DROP: Frame dropped either due to | ||
1892 | * VPath Reset or because of a VPIN mismatch. | ||
1893 | * @VXGE_HW_RING_T_CODE_UNUSED: Unused | ||
1894 | * @VXGE_HW_RING_T_CODE_MULTI_ERR: Multiple errors more than one | ||
1895 | * transfer code condition occurred. | ||
1896 | * | ||
1897 | * Transfer codes returned by adapter. | ||
1898 | */ | ||
1899 | enum vxge_hw_ring_tcode { | ||
1900 | VXGE_HW_RING_T_CODE_OK = 0x0, | ||
1901 | VXGE_HW_RING_T_CODE_L3_CKSUM_MISMATCH = 0x1, | ||
1902 | VXGE_HW_RING_T_CODE_L4_CKSUM_MISMATCH = 0x2, | ||
1903 | VXGE_HW_RING_T_CODE_L3_L4_CKSUM_MISMATCH = 0x3, | ||
1904 | VXGE_HW_RING_T_CODE_L3_PKT_ERR = 0x5, | ||
1905 | VXGE_HW_RING_T_CODE_L2_FRM_ERR = 0x6, | ||
1906 | VXGE_HW_RING_T_CODE_BUF_SIZE_ERR = 0x7, | ||
1907 | VXGE_HW_RING_T_CODE_INT_ECC_ERR = 0x8, | ||
1908 | VXGE_HW_RING_T_CODE_BENIGN_OVFLOW = 0x9, | ||
1909 | VXGE_HW_RING_T_CODE_ZERO_LEN_BUFF = 0xA, | ||
1910 | VXGE_HW_RING_T_CODE_FRM_DROP = 0xC, | ||
1911 | VXGE_HW_RING_T_CODE_UNUSED = 0xE, | ||
1912 | VXGE_HW_RING_T_CODE_MULTI_ERR = 0xF | ||
1913 | }; | ||
1869 | 1914 | ||
1870 | /** | 1915 | /** |
1871 | * enum enum vxge_hw_ring_hash_type - RTH hash types | 1916 | * enum enum vxge_hw_ring_hash_type - RTH hash types |
@@ -1910,7 +1955,7 @@ vxge_hw_ring_rxd_post_post( | |||
1910 | void *rxdh); | 1955 | void *rxdh); |
1911 | 1956 | ||
1912 | enum vxge_hw_status | 1957 | enum vxge_hw_status |
1913 | vxge_hw_ring_replenish(struct __vxge_hw_ring *ring_handle, u16 min_flag); | 1958 | vxge_hw_ring_replenish(struct __vxge_hw_ring *ring_handle); |
1914 | 1959 | ||
1915 | void | 1960 | void |
1916 | vxge_hw_ring_rxd_post_post_wmb( | 1961 | vxge_hw_ring_rxd_post_post_wmb( |
@@ -2042,7 +2087,6 @@ void vxge_hw_fifo_txdl_free( | |||
2042 | 2087 | ||
2043 | #define VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET (VXGE_HW_BLOCK_SIZE-8) | 2088 | #define VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET (VXGE_HW_BLOCK_SIZE-8) |
2044 | #define VXGE_HW_RING_MEMBLOCK_IDX_OFFSET (VXGE_HW_BLOCK_SIZE-16) | 2089 | #define VXGE_HW_RING_MEMBLOCK_IDX_OFFSET (VXGE_HW_BLOCK_SIZE-16) |
2045 | #define VXGE_HW_RING_MIN_BUFF_ALLOCATION 64 | ||
2046 | 2090 | ||
2047 | /* | 2091 | /* |
2048 | * struct __vxge_hw_ring_rxd_priv - Receive descriptor HW-private data. | 2092 | * struct __vxge_hw_ring_rxd_priv - Receive descriptor HW-private data. |
@@ -2332,7 +2376,7 @@ enum vxge_hw_status vxge_hw_vpath_alarm_process( | |||
2332 | struct __vxge_hw_vpath_handle *vpath_handle, | 2376 | struct __vxge_hw_vpath_handle *vpath_handle, |
2333 | u32 skip_alarms); | 2377 | u32 skip_alarms); |
2334 | 2378 | ||
2335 | enum vxge_hw_status | 2379 | void |
2336 | vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vpath_handle, | 2380 | vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vpath_handle, |
2337 | int *tim_msix_id, int alarm_msix_id); | 2381 | int *tim_msix_id, int alarm_msix_id); |
2338 | 2382 | ||
diff --git a/drivers/net/vxge/vxge-version.h b/drivers/net/vxge/vxge-version.h index 77c2a754b7b8..5da7ab1fd307 100644 --- a/drivers/net/vxge/vxge-version.h +++ b/drivers/net/vxge/vxge-version.h | |||
@@ -17,7 +17,7 @@ | |||
17 | 17 | ||
18 | #define VXGE_VERSION_MAJOR "2" | 18 | #define VXGE_VERSION_MAJOR "2" |
19 | #define VXGE_VERSION_MINOR "0" | 19 | #define VXGE_VERSION_MINOR "0" |
20 | #define VXGE_VERSION_FIX "6" | 20 | #define VXGE_VERSION_FIX "8" |
21 | #define VXGE_VERSION_BUILD "18937" | 21 | #define VXGE_VERSION_BUILD "20182" |
22 | #define VXGE_VERSION_FOR "k" | 22 | #define VXGE_VERSION_FOR "k" |
23 | #endif | 23 | #endif |