aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/cnic.c
diff options
context:
space:
mode:
authorMichael Chan <mchan@broadcom.com>2010-06-24 10:58:39 -0400
committerDavid S. Miller <davem@davemloft.net>2010-06-25 23:37:19 -0400
commite6c2889478f04b30e5a71d753734644c579472fa (patch)
treee5cf3e039b6b62684845b9b6e3044784e1d129ad /drivers/net/cnic.c
parent66fee9ed03a4413ea054e437b65af6fd3583b4db (diff)
cnic: Unify kcq allocation for all devices.
By creating a common data stucture kcq_info for all devices, the kcq (kernel completion queue) for all devices can be allocated by common code. Signed-off-by: Michael Chan <mchan@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/cnic.c')
-rw-r--r--drivers/net/cnic.c143
1 files changed, 87 insertions, 56 deletions
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index df6a0ccf0655..c1f0d16dd47a 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -804,7 +804,7 @@ static void cnic_free_resc(struct cnic_dev *dev)
804 cnic_free_dma(dev, &cp->conn_buf_info); 804 cnic_free_dma(dev, &cp->conn_buf_info);
805 cnic_free_dma(dev, &cp->kwq_info); 805 cnic_free_dma(dev, &cp->kwq_info);
806 cnic_free_dma(dev, &cp->kwq_16_data_info); 806 cnic_free_dma(dev, &cp->kwq_16_data_info);
807 cnic_free_dma(dev, &cp->kcq_info); 807 cnic_free_dma(dev, &cp->kcq1.dma);
808 kfree(cp->iscsi_tbl); 808 kfree(cp->iscsi_tbl);
809 cp->iscsi_tbl = NULL; 809 cp->iscsi_tbl = NULL;
810 kfree(cp->ctx_tbl); 810 kfree(cp->ctx_tbl);
@@ -863,6 +863,37 @@ static int cnic_alloc_context(struct cnic_dev *dev)
863 return 0; 863 return 0;
864} 864}
865 865
866static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info)
867{
868 int err, i, is_bnx2 = 0;
869 struct kcqe **kcq;
870
871 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags))
872 is_bnx2 = 1;
873
874 err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, is_bnx2);
875 if (err)
876 return err;
877
878 kcq = (struct kcqe **) info->dma.pg_arr;
879 info->kcq = kcq;
880
881 if (is_bnx2)
882 return 0;
883
884 for (i = 0; i < KCQ_PAGE_CNT; i++) {
885 struct bnx2x_bd_chain_next *next =
886 (struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT];
887 int j = i + 1;
888
889 if (j >= KCQ_PAGE_CNT)
890 j = 0;
891 next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32;
892 next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff;
893 }
894 return 0;
895}
896
866static int cnic_alloc_l2_rings(struct cnic_dev *dev, int pages) 897static int cnic_alloc_l2_rings(struct cnic_dev *dev, int pages)
867{ 898{
868 struct cnic_local *cp = dev->cnic_priv; 899 struct cnic_local *cp = dev->cnic_priv;
@@ -954,10 +985,9 @@ static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
954 goto error; 985 goto error;
955 cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr; 986 cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
956 987
957 ret = cnic_alloc_dma(dev, &cp->kcq_info, KCQ_PAGE_CNT, 1); 988 ret = cnic_alloc_kcq(dev, &cp->kcq1);
958 if (ret) 989 if (ret)
959 goto error; 990 goto error;
960 cp->kcq = (struct kcqe **) cp->kcq_info.pg_arr;
961 991
962 ret = cnic_alloc_context(dev); 992 ret = cnic_alloc_context(dev);
963 if (ret) 993 if (ret)
@@ -1076,22 +1106,9 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1076 j++; 1106 j++;
1077 } 1107 }
1078 1108
1079 ret = cnic_alloc_dma(dev, &cp->kcq_info, KCQ_PAGE_CNT, 0); 1109 ret = cnic_alloc_kcq(dev, &cp->kcq1);
1080 if (ret) 1110 if (ret)
1081 goto error; 1111 goto error;
1082 cp->kcq = (struct kcqe **) cp->kcq_info.pg_arr;
1083
1084 for (i = 0; i < KCQ_PAGE_CNT; i++) {
1085 struct bnx2x_bd_chain_next *next =
1086 (struct bnx2x_bd_chain_next *)
1087 &cp->kcq[i][MAX_KCQE_CNT];
1088 int j = i + 1;
1089
1090 if (j >= KCQ_PAGE_CNT)
1091 j = 0;
1092 next->addr_hi = (u64) cp->kcq_info.pg_map_arr[j] >> 32;
1093 next->addr_lo = cp->kcq_info.pg_map_arr[j] & 0xffffffff;
1094 }
1095 1112
1096 pages = PAGE_ALIGN(BNX2X_ISCSI_NUM_CONNECTIONS * 1113 pages = PAGE_ALIGN(BNX2X_ISCSI_NUM_CONNECTIONS *
1097 BNX2X_ISCSI_CONN_BUF_SIZE) / PAGE_SIZE; 1114 BNX2X_ISCSI_CONN_BUF_SIZE) / PAGE_SIZE;
@@ -2135,7 +2152,7 @@ static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod)
2135 ri &= MAX_KCQ_IDX; 2152 ri &= MAX_KCQ_IDX;
2136 2153
2137 while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) { 2154 while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
2138 kcqe = &cp->kcq[KCQ_PG(ri)][KCQ_IDX(ri)]; 2155 kcqe = &cp->kcq1.kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
2139 cp->completed_kcq[kcqe_cnt++] = kcqe; 2156 cp->completed_kcq[kcqe_cnt++] = kcqe;
2140 i = cp->next_idx(i); 2157 i = cp->next_idx(i);
2141 ri = i & MAX_KCQ_IDX; 2158 ri = i & MAX_KCQ_IDX;
@@ -2219,7 +2236,7 @@ static int cnic_service_bnx2(void *data, void *status_blk)
2219 cp->kwq_con_idx = *cp->kwq_con_idx_ptr; 2236 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2220 2237
2221 hw_prod = sblk->status_completion_producer_index; 2238 hw_prod = sblk->status_completion_producer_index;
2222 sw_prod = cp->kcq_prod_idx; 2239 sw_prod = cp->kcq1.sw_prod_idx;
2223 while (sw_prod != hw_prod) { 2240 while (sw_prod != hw_prod) {
2224 kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod); 2241 kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod);
2225 if (kcqe_cnt == 0) 2242 if (kcqe_cnt == 0)
@@ -2238,9 +2255,9 @@ static int cnic_service_bnx2(void *data, void *status_blk)
2238 } 2255 }
2239 2256
2240done: 2257done:
2241 CNIC_WR16(dev, cp->kcq_io_addr, sw_prod); 2258 CNIC_WR16(dev, cp->kcq1.io_addr, sw_prod);
2242 2259
2243 cp->kcq_prod_idx = sw_prod; 2260 cp->kcq1.sw_prod_idx = sw_prod;
2244 2261
2245 cnic_chk_pkt_rings(cp); 2262 cnic_chk_pkt_rings(cp);
2246 return status_idx; 2263 return status_idx;
@@ -2258,7 +2275,7 @@ static void cnic_service_bnx2_msix(unsigned long data)
2258 cp->kwq_con_idx = status_blk->status_cmd_consumer_index; 2275 cp->kwq_con_idx = status_blk->status_cmd_consumer_index;
2259 2276
2260 hw_prod = status_blk->status_completion_producer_index; 2277 hw_prod = status_blk->status_completion_producer_index;
2261 sw_prod = cp->kcq_prod_idx; 2278 sw_prod = cp->kcq1.sw_prod_idx;
2262 while (sw_prod != hw_prod) { 2279 while (sw_prod != hw_prod) {
2263 kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod); 2280 kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod);
2264 if (kcqe_cnt == 0) 2281 if (kcqe_cnt == 0)
@@ -2277,8 +2294,8 @@ static void cnic_service_bnx2_msix(unsigned long data)
2277 } 2294 }
2278 2295
2279done: 2296done:
2280 CNIC_WR16(dev, cp->kcq_io_addr, sw_prod); 2297 CNIC_WR16(dev, cp->kcq1.io_addr, sw_prod);
2281 cp->kcq_prod_idx = sw_prod; 2298 cp->kcq1.sw_prod_idx = sw_prod;
2282 2299
2283 cnic_chk_pkt_rings(cp); 2300 cnic_chk_pkt_rings(cp);
2284 2301
@@ -2290,11 +2307,11 @@ done:
2290static void cnic_doirq(struct cnic_dev *dev) 2307static void cnic_doirq(struct cnic_dev *dev)
2291{ 2308{
2292 struct cnic_local *cp = dev->cnic_priv; 2309 struct cnic_local *cp = dev->cnic_priv;
2293 u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX; 2310 u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX;
2294 2311
2295 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) { 2312 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
2296 prefetch(cp->status_blk.gen); 2313 prefetch(cp->status_blk.gen);
2297 prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]); 2314 prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
2298 2315
2299 tasklet_schedule(&cp->cnic_irq_task); 2316 tasklet_schedule(&cp->cnic_irq_task);
2300 } 2317 }
@@ -2354,7 +2371,7 @@ static void cnic_service_bnx2x_bh(unsigned long data)
2354 2371
2355 hw_prod = sblk->index_values[HC_INDEX_C_ISCSI_EQ_CONS]; 2372 hw_prod = sblk->index_values[HC_INDEX_C_ISCSI_EQ_CONS];
2356 hw_prod = cp->hw_idx(hw_prod); 2373 hw_prod = cp->hw_idx(hw_prod);
2357 sw_prod = cp->kcq_prod_idx; 2374 sw_prod = cp->kcq1.sw_prod_idx;
2358 while (sw_prod != hw_prod) { 2375 while (sw_prod != hw_prod) {
2359 kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod); 2376 kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod);
2360 if (kcqe_cnt == 0) 2377 if (kcqe_cnt == 0)
@@ -2373,11 +2390,11 @@ static void cnic_service_bnx2x_bh(unsigned long data)
2373 } 2390 }
2374 2391
2375done: 2392done:
2376 CNIC_WR16(dev, cp->kcq_io_addr, sw_prod + MAX_KCQ_IDX); 2393 CNIC_WR16(dev, cp->kcq1.io_addr, sw_prod + MAX_KCQ_IDX);
2377 cnic_ack_bnx2x_int(dev, cp->status_blk_num, CSTORM_ID, 2394 cnic_ack_bnx2x_int(dev, cp->status_blk_num, CSTORM_ID,
2378 status_idx, IGU_INT_ENABLE, 1); 2395 status_idx, IGU_INT_ENABLE, 1);
2379 2396
2380 cp->kcq_prod_idx = sw_prod; 2397 cp->kcq1.sw_prod_idx = sw_prod;
2381} 2398}
2382 2399
2383static int cnic_service_bnx2x(void *data, void *status_blk) 2400static int cnic_service_bnx2x(void *data, void *status_blk)
@@ -3711,7 +3728,7 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
3711 struct cnic_local *cp = dev->cnic_priv; 3728 struct cnic_local *cp = dev->cnic_priv;
3712 struct cnic_eth_dev *ethdev = cp->ethdev; 3729 struct cnic_eth_dev *ethdev = cp->ethdev;
3713 struct status_block *sblk = cp->status_blk.gen; 3730 struct status_block *sblk = cp->status_blk.gen;
3714 u32 val; 3731 u32 val, kcq_cid_addr, kwq_cid_addr;
3715 int err; 3732 int err;
3716 3733
3717 cnic_set_bnx2_mac(dev); 3734 cnic_set_bnx2_mac(dev);
@@ -3736,7 +3753,7 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
3736 cnic_init_context(dev, KWQ_CID); 3753 cnic_init_context(dev, KWQ_CID);
3737 cnic_init_context(dev, KCQ_CID); 3754 cnic_init_context(dev, KCQ_CID);
3738 3755
3739 cp->kwq_cid_addr = GET_CID_ADDR(KWQ_CID); 3756 kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
3740 cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX; 3757 cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
3741 3758
3742 cp->max_kwq_idx = MAX_KWQ_IDX; 3759 cp->max_kwq_idx = MAX_KWQ_IDX;
@@ -3752,50 +3769,58 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
3752 /* Initialize the kernel work queue context. */ 3769 /* Initialize the kernel work queue context. */
3753 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | 3770 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
3754 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; 3771 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
3755 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_TYPE, val); 3772 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
3756 3773
3757 val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16; 3774 val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
3758 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); 3775 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
3759 3776
3760 val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT; 3777 val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
3761 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); 3778 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
3762 3779
3763 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32); 3780 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
3764 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val); 3781 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
3765 3782
3766 val = (u32) cp->kwq_info.pgtbl_map; 3783 val = (u32) cp->kwq_info.pgtbl_map;
3767 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val); 3784 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
3785
3786 kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
3787 cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
3768 3788
3769 cp->kcq_cid_addr = GET_CID_ADDR(KCQ_CID); 3789 cp->kcq1.sw_prod_idx = 0;
3770 cp->kcq_io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX; 3790 cp->kcq1.hw_prod_idx_ptr =
3791 (u16 *) &sblk->status_completion_producer_index;
3771 3792
3772 cp->kcq_prod_idx = 0; 3793 cp->kcq1.status_idx_ptr = (u16 *) &sblk->status_idx;
3773 3794
3774 /* Initialize the kernel complete queue context. */ 3795 /* Initialize the kernel complete queue context. */
3775 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | 3796 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
3776 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; 3797 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
3777 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_TYPE, val); 3798 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
3778 3799
3779 val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16; 3800 val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
3780 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); 3801 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
3781 3802
3782 val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT; 3803 val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
3783 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); 3804 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
3784 3805
3785 val = (u32) ((u64) cp->kcq_info.pgtbl_map >> 32); 3806 val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
3786 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val); 3807 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
3787 3808
3788 val = (u32) cp->kcq_info.pgtbl_map; 3809 val = (u32) cp->kcq1.dma.pgtbl_map;
3789 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val); 3810 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
3790 3811
3791 cp->int_num = 0; 3812 cp->int_num = 0;
3792 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 3813 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
3814 struct status_block_msix *msblk = cp->status_blk.bnx2;
3793 u32 sb_id = cp->status_blk_num; 3815 u32 sb_id = cp->status_blk_num;
3794 u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id); 3816 u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
3795 3817
3818 cp->kcq1.hw_prod_idx_ptr =
3819 (u16 *) &msblk->status_completion_producer_index;
3820 cp->kcq1.status_idx_ptr = (u16 *) &msblk->status_idx;
3796 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT; 3821 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
3797 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); 3822 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
3798 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); 3823 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
3799 } 3824 }
3800 3825
3801 /* Enable Commnad Scheduler notification when we write to the 3826 /* Enable Commnad Scheduler notification when we write to the
@@ -4145,28 +4170,34 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
4145 if (ret) 4170 if (ret)
4146 return -ENOMEM; 4171 return -ENOMEM;
4147 4172
4148 cp->kcq_io_addr = BAR_CSTRORM_INTMEM + 4173 cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
4149 CSTORM_ISCSI_EQ_PROD_OFFSET(func, 0); 4174 CSTORM_ISCSI_EQ_PROD_OFFSET(func, 0);
4150 cp->kcq_prod_idx = 0; 4175 cp->kcq1.sw_prod_idx = 0;
4176
4177 cp->kcq1.hw_prod_idx_ptr =
4178 &cp->status_blk.bnx2x->c_status_block.index_values[
4179 HC_INDEX_C_ISCSI_EQ_CONS];
4180 cp->kcq1.status_idx_ptr =
4181 &cp->status_blk.bnx2x->c_status_block.status_block_index;
4151 4182
4152 cnic_get_bnx2x_iscsi_info(dev); 4183 cnic_get_bnx2x_iscsi_info(dev);
4153 4184
4154 /* Only 1 EQ */ 4185 /* Only 1 EQ */
4155 CNIC_WR16(dev, cp->kcq_io_addr, MAX_KCQ_IDX); 4186 CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
4156 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4187 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4157 CSTORM_ISCSI_EQ_CONS_OFFSET(func, 0), 0); 4188 CSTORM_ISCSI_EQ_CONS_OFFSET(func, 0), 0);
4158 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4189 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4159 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func, 0), 4190 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func, 0),
4160 cp->kcq_info.pg_map_arr[1] & 0xffffffff); 4191 cp->kcq1.dma.pg_map_arr[1] & 0xffffffff);
4161 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4192 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4162 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func, 0) + 4, 4193 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func, 0) + 4,
4163 (u64) cp->kcq_info.pg_map_arr[1] >> 32); 4194 (u64) cp->kcq1.dma.pg_map_arr[1] >> 32);
4164 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4195 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4165 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func, 0), 4196 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func, 0),
4166 cp->kcq_info.pg_map_arr[0] & 0xffffffff); 4197 cp->kcq1.dma.pg_map_arr[0] & 0xffffffff);
4167 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4198 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4168 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func, 0) + 4, 4199 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func, 0) + 4,
4169 (u64) cp->kcq_info.pg_map_arr[0] >> 32); 4200 (u64) cp->kcq1.dma.pg_map_arr[0] >> 32);
4170 CNIC_WR8(dev, BAR_CSTRORM_INTMEM + 4201 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4171 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(func, 0), 1); 4202 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(func, 0), 1);
4172 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + 4203 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
@@ -4394,7 +4425,7 @@ static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
4394 0); 4425 0);
4395 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4426 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4396 CSTORM_ISCSI_EQ_CONS_OFFSET(cp->func, 0), 0); 4427 CSTORM_ISCSI_EQ_CONS_OFFSET(cp->func, 0), 0);
4397 CNIC_WR16(dev, cp->kcq_io_addr, 0); 4428 CNIC_WR16(dev, cp->kcq1.io_addr, 0);
4398 cnic_free_resc(dev); 4429 cnic_free_resc(dev);
4399} 4430}
4400 4431