aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/cnic.c
diff options
context:
space:
mode:
authorMichael Chan <mchan@broadcom.com>2010-05-18 07:32:53 -0400
committerDavid S. Miller <davem@davemloft.net>2010-05-18 18:16:44 -0400
commit48f753d2ba94a4081400fa8d26bdbfbbf12b10de (patch)
tree1b4bf9472d89cee9a517e154a4af59f7b78214c7 /drivers/net/cnic.c
parent1f1332a3cb7ac73e3bcff6ea42ff965c90a29d12 (diff)
cnic: Return SPQ credit to bnx2x after ring setup and shutdown.
Everytime the iSCSI ring finishes setup or shutdown, we need to return the SPQ (slow path queue) credit to the bnx2x driver. Without this step, the SPQ will eventually be full causing iSCSI to fail. This can happen after 3 or 4 MTU changes for example. Add code to wait for these slow path commands to complete in the RX ring and return the SPQ credit to bnx2x. Signed-off-by: Michael Chan <mchan@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/cnic.c')
-rw-r--r--drivers/net/cnic.c71
1 files changed, 70 insertions, 1 deletions
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 7c6d325b880a..be90d3598bca 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -2145,17 +2145,56 @@ static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod)
2145 return last_cnt; 2145 return last_cnt;
2146} 2146}
2147 2147
2148static int cnic_l2_completion(struct cnic_local *cp)
2149{
2150 u16 hw_cons, sw_cons;
2151 union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
2152 (cp->l2_ring + (2 * BCM_PAGE_SIZE));
2153 u32 cmd;
2154 int comp = 0;
2155
2156 if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags))
2157 return 0;
2158
2159 hw_cons = *cp->rx_cons_ptr;
2160 if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT)
2161 hw_cons++;
2162
2163 sw_cons = cp->rx_cons;
2164 while (sw_cons != hw_cons) {
2165 u8 cqe_fp_flags;
2166
2167 cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT];
2168 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
2169 if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) {
2170 cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data);
2171 cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT;
2172 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP ||
2173 cmd == RAMROD_CMD_ID_ETH_HALT)
2174 comp++;
2175 }
2176 sw_cons = BNX2X_NEXT_RCQE(sw_cons);
2177 }
2178 return comp;
2179}
2180
2148static void cnic_chk_pkt_rings(struct cnic_local *cp) 2181static void cnic_chk_pkt_rings(struct cnic_local *cp)
2149{ 2182{
2150 u16 rx_cons = *cp->rx_cons_ptr; 2183 u16 rx_cons = *cp->rx_cons_ptr;
2151 u16 tx_cons = *cp->tx_cons_ptr; 2184 u16 tx_cons = *cp->tx_cons_ptr;
2185 int comp = 0;
2152 2186
2153 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) { 2187 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
2188 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
2189 comp = cnic_l2_completion(cp);
2190
2154 cp->tx_cons = tx_cons; 2191 cp->tx_cons = tx_cons;
2155 cp->rx_cons = rx_cons; 2192 cp->rx_cons = rx_cons;
2156 2193
2157 uio_event_notify(cp->cnic_uinfo); 2194 uio_event_notify(cp->cnic_uinfo);
2158 } 2195 }
2196 if (comp)
2197 clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
2159} 2198}
2160 2199
2161static int cnic_service_bnx2(void *data, void *status_blk) 2200static int cnic_service_bnx2(void *data, void *status_blk)
@@ -4168,6 +4207,8 @@ static void cnic_init_rings(struct cnic_dev *dev)
4168 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++) 4207 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
4169 CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]); 4208 CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
4170 4209
4210 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
4211
4171 cnic_init_bnx2x_tx_ring(dev); 4212 cnic_init_bnx2x_tx_ring(dev);
4172 cnic_init_bnx2x_rx_ring(dev); 4213 cnic_init_bnx2x_rx_ring(dev);
4173 4214
@@ -4175,6 +4216,15 @@ static void cnic_init_rings(struct cnic_dev *dev)
4175 l5_data.phy_address.hi = 0; 4216 l5_data.phy_address.hi = 0;
4176 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP, 4217 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
4177 BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE, &l5_data); 4218 BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE, &l5_data);
4219 i = 0;
4220 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
4221 ++i < 10)
4222 msleep(1);
4223
4224 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
4225 netdev_err(dev->netdev,
4226 "iSCSI CLIENT_SETUP did not complete\n");
4227 cnic_kwq_completion(dev, 1);
4178 cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 1); 4228 cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 1);
4179 } 4229 }
4180} 4230}
@@ -4187,14 +4237,25 @@ static void cnic_shutdown_rings(struct cnic_dev *dev)
4187 struct cnic_local *cp = dev->cnic_priv; 4237 struct cnic_local *cp = dev->cnic_priv;
4188 u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); 4238 u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp));
4189 union l5cm_specific_data l5_data; 4239 union l5cm_specific_data l5_data;
4240 int i;
4190 4241
4191 cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 0); 4242 cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 0);
4192 4243
4244 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
4245
4193 l5_data.phy_address.lo = cli; 4246 l5_data.phy_address.lo = cli;
4194 l5_data.phy_address.hi = 0; 4247 l5_data.phy_address.hi = 0;
4195 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT, 4248 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
4196 BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE, &l5_data); 4249 BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE, &l5_data);
4197 msleep(10); 4250 i = 0;
4251 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
4252 ++i < 10)
4253 msleep(1);
4254
4255 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
4256 netdev_err(dev->netdev,
4257 "iSCSI CLIENT_HALT did not complete\n");
4258 cnic_kwq_completion(dev, 1);
4198 4259
4199 memset(&l5_data, 0, sizeof(l5_data)); 4260 memset(&l5_data, 0, sizeof(l5_data));
4200 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CFC_DEL, 4261 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CFC_DEL,
@@ -4315,7 +4376,15 @@ static void cnic_stop_hw(struct cnic_dev *dev)
4315{ 4376{
4316 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) { 4377 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
4317 struct cnic_local *cp = dev->cnic_priv; 4378 struct cnic_local *cp = dev->cnic_priv;
4379 int i = 0;
4318 4380
4381 /* Need to wait for the ring shutdown event to complete
4382 * before clearing the CNIC_UP flag.
4383 */
4384 while (cp->uio_dev != -1 && i < 15) {
4385 msleep(100);
4386 i++;
4387 }
4319 clear_bit(CNIC_F_CNIC_UP, &dev->flags); 4388 clear_bit(CNIC_F_CNIC_UP, &dev->flags);
4320 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL); 4389 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL);
4321 synchronize_rcu(); 4390 synchronize_rcu();