diff options
author | Michael Chan <mchan@broadcom.com> | 2010-12-23 02:43:04 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-12-23 14:44:34 -0500 |
commit | e1928c86c4829703b800c81cc9edc939b5634e6f (patch) | |
tree | bea86c1f19868963b95c0cdc45f22f5e6e5d789c /drivers/net/cnic.c | |
parent | e21ba414eed8a233eadb79bb6b158ac7ceb35025 (diff) |
cnic: Add FCoE support on 57712
- Connection ID (cid) management
- Slow-path command and response support
- Update version to 2.2.11.
Reviewed-by: Bhanu Prakash Gollapudi <bprakash@broadcom.com>
Signed-off-by: Michael Chan <mchan@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/cnic.c')
-rw-r--r-- | drivers/net/cnic.c | 466 |
1 files changed, 459 insertions, 7 deletions
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c index 6ce739859ac..4a9c628ab2a 100644 --- a/drivers/net/cnic.c +++ b/drivers/net/cnic.c | |||
@@ -850,6 +850,7 @@ static void cnic_free_resc(struct cnic_dev *dev) | |||
850 | kfree(cp->ctx_tbl); | 850 | kfree(cp->ctx_tbl); |
851 | cp->ctx_tbl = NULL; | 851 | cp->ctx_tbl = NULL; |
852 | 852 | ||
853 | cnic_free_id_tbl(&cp->fcoe_cid_tbl); | ||
853 | cnic_free_id_tbl(&cp->cid_tbl); | 854 | cnic_free_id_tbl(&cp->cid_tbl); |
854 | } | 855 | } |
855 | 856 | ||
@@ -1137,12 +1138,22 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) | |||
1137 | 1138 | ||
1138 | cp->iro_arr = ethdev->iro_arr; | 1139 | cp->iro_arr = ethdev->iro_arr; |
1139 | 1140 | ||
1140 | cp->max_cid_space = MAX_ISCSI_TBL_SZ; | 1141 | cp->max_cid_space = MAX_ISCSI_TBL_SZ + BNX2X_FCOE_NUM_CONNECTIONS; |
1141 | cp->iscsi_start_cid = start_cid; | 1142 | cp->iscsi_start_cid = start_cid; |
1143 | cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ; | ||
1144 | |||
1145 | if (BNX2X_CHIP_IS_E2(cp->chip_id)) { | ||
1146 | cp->max_cid_space += BNX2X_FCOE_NUM_CONNECTIONS; | ||
1147 | cp->fcoe_init_cid = ethdev->fcoe_init_cid; | ||
1148 | if (!cp->fcoe_init_cid) | ||
1149 | cp->fcoe_init_cid = 0x10; | ||
1150 | } | ||
1151 | |||
1142 | if (start_cid < BNX2X_ISCSI_START_CID) { | 1152 | if (start_cid < BNX2X_ISCSI_START_CID) { |
1143 | u32 delta = BNX2X_ISCSI_START_CID - start_cid; | 1153 | u32 delta = BNX2X_ISCSI_START_CID - start_cid; |
1144 | 1154 | ||
1145 | cp->iscsi_start_cid = BNX2X_ISCSI_START_CID; | 1155 | cp->iscsi_start_cid = BNX2X_ISCSI_START_CID; |
1156 | cp->fcoe_start_cid += delta; | ||
1146 | cp->max_cid_space += delta; | 1157 | cp->max_cid_space += delta; |
1147 | } | 1158 | } |
1148 | 1159 | ||
@@ -1161,6 +1172,9 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) | |||
1161 | cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI; | 1172 | cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI; |
1162 | } | 1173 | } |
1163 | 1174 | ||
1175 | for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++) | ||
1176 | cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE; | ||
1177 | |||
1164 | pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) / | 1178 | pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) / |
1165 | PAGE_SIZE; | 1179 | PAGE_SIZE; |
1166 | 1180 | ||
@@ -1454,8 +1468,11 @@ static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid) | |||
1454 | cnic_free_dma(dev, &iscsi->hq_info); | 1468 | cnic_free_dma(dev, &iscsi->hq_info); |
1455 | cnic_free_dma(dev, &iscsi->r2tq_info); | 1469 | cnic_free_dma(dev, &iscsi->r2tq_info); |
1456 | cnic_free_dma(dev, &iscsi->task_array_info); | 1470 | cnic_free_dma(dev, &iscsi->task_array_info); |
1471 | cnic_free_id(&cp->cid_tbl, ctx->cid); | ||
1472 | } else { | ||
1473 | cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid); | ||
1457 | } | 1474 | } |
1458 | cnic_free_id(&cp->cid_tbl, ctx->cid); | 1475 | |
1459 | ctx->cid = 0; | 1476 | ctx->cid = 0; |
1460 | } | 1477 | } |
1461 | 1478 | ||
@@ -1467,6 +1484,16 @@ static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid) | |||
1467 | struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; | 1484 | struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; |
1468 | struct cnic_iscsi *iscsi = ctx->proto.iscsi; | 1485 | struct cnic_iscsi *iscsi = ctx->proto.iscsi; |
1469 | 1486 | ||
1487 | if (ctx->ulp_proto_id == CNIC_ULP_FCOE) { | ||
1488 | cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl); | ||
1489 | if (cid == -1) { | ||
1490 | ret = -ENOMEM; | ||
1491 | goto error; | ||
1492 | } | ||
1493 | ctx->cid = cid; | ||
1494 | return 0; | ||
1495 | } | ||
1496 | |||
1470 | cid = cnic_alloc_new_id(&cp->cid_tbl); | 1497 | cid = cnic_alloc_new_id(&cp->cid_tbl); |
1471 | if (cid == -1) { | 1498 | if (cid == -1) { |
1472 | ret = -ENOMEM; | 1499 | ret = -ENOMEM; |
@@ -2107,8 +2134,307 @@ static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe) | |||
2107 | return 0; | 2134 | return 0; |
2108 | } | 2135 | } |
2109 | 2136 | ||
2110 | static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], | 2137 | static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe) |
2111 | u32 num_wqes) | 2138 | { |
2139 | struct fcoe_kwqe_stat *req; | ||
2140 | struct fcoe_stat_ramrod_params *fcoe_stat; | ||
2141 | union l5cm_specific_data l5_data; | ||
2142 | struct cnic_local *cp = dev->cnic_priv; | ||
2143 | int ret; | ||
2144 | u32 cid; | ||
2145 | |||
2146 | req = (struct fcoe_kwqe_stat *) kwqe; | ||
2147 | cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); | ||
2148 | |||
2149 | fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data); | ||
2150 | if (!fcoe_stat) | ||
2151 | return -ENOMEM; | ||
2152 | |||
2153 | memset(fcoe_stat, 0, sizeof(*fcoe_stat)); | ||
2154 | memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req)); | ||
2155 | |||
2156 | ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT, cid, | ||
2157 | FCOE_CONNECTION_TYPE, &l5_data); | ||
2158 | return ret; | ||
2159 | } | ||
2160 | |||
2161 | static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[], | ||
2162 | u32 num, int *work) | ||
2163 | { | ||
2164 | int ret; | ||
2165 | struct cnic_local *cp = dev->cnic_priv; | ||
2166 | u32 cid; | ||
2167 | struct fcoe_init_ramrod_params *fcoe_init; | ||
2168 | struct fcoe_kwqe_init1 *req1; | ||
2169 | struct fcoe_kwqe_init2 *req2; | ||
2170 | struct fcoe_kwqe_init3 *req3; | ||
2171 | union l5cm_specific_data l5_data; | ||
2172 | |||
2173 | if (num < 3) { | ||
2174 | *work = num; | ||
2175 | return -EINVAL; | ||
2176 | } | ||
2177 | req1 = (struct fcoe_kwqe_init1 *) wqes[0]; | ||
2178 | req2 = (struct fcoe_kwqe_init2 *) wqes[1]; | ||
2179 | req3 = (struct fcoe_kwqe_init3 *) wqes[2]; | ||
2180 | if (req2->hdr.op_code != FCOE_KWQE_OPCODE_INIT2) { | ||
2181 | *work = 1; | ||
2182 | return -EINVAL; | ||
2183 | } | ||
2184 | if (req3->hdr.op_code != FCOE_KWQE_OPCODE_INIT3) { | ||
2185 | *work = 2; | ||
2186 | return -EINVAL; | ||
2187 | } | ||
2188 | |||
2189 | if (sizeof(*fcoe_init) > CNIC_KWQ16_DATA_SIZE) { | ||
2190 | netdev_err(dev->netdev, "fcoe_init size too big\n"); | ||
2191 | return -ENOMEM; | ||
2192 | } | ||
2193 | fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data); | ||
2194 | if (!fcoe_init) | ||
2195 | return -ENOMEM; | ||
2196 | |||
2197 | memset(fcoe_init, 0, sizeof(*fcoe_init)); | ||
2198 | memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1)); | ||
2199 | memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2)); | ||
2200 | memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3)); | ||
2201 | fcoe_init->eq_addr.lo = cp->kcq2.dma.pg_map_arr[0] & 0xffffffff; | ||
2202 | fcoe_init->eq_addr.hi = (u64) cp->kcq2.dma.pg_map_arr[0] >> 32; | ||
2203 | fcoe_init->eq_next_page_addr.lo = | ||
2204 | cp->kcq2.dma.pg_map_arr[1] & 0xffffffff; | ||
2205 | fcoe_init->eq_next_page_addr.hi = | ||
2206 | (u64) cp->kcq2.dma.pg_map_arr[1] >> 32; | ||
2207 | |||
2208 | fcoe_init->sb_num = cp->status_blk_num; | ||
2209 | fcoe_init->eq_prod = MAX_KCQ_IDX; | ||
2210 | fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS; | ||
2211 | cp->kcq2.sw_prod_idx = 0; | ||
2212 | |||
2213 | cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); | ||
2214 | printk(KERN_ERR "bdbg: submitting INIT RAMROD \n"); | ||
2215 | ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT, cid, | ||
2216 | FCOE_CONNECTION_TYPE, &l5_data); | ||
2217 | *work = 3; | ||
2218 | return ret; | ||
2219 | } | ||
2220 | |||
2221 | static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[], | ||
2222 | u32 num, int *work) | ||
2223 | { | ||
2224 | int ret = 0; | ||
2225 | u32 cid = -1, l5_cid; | ||
2226 | struct cnic_local *cp = dev->cnic_priv; | ||
2227 | struct fcoe_kwqe_conn_offload1 *req1; | ||
2228 | struct fcoe_kwqe_conn_offload2 *req2; | ||
2229 | struct fcoe_kwqe_conn_offload3 *req3; | ||
2230 | struct fcoe_kwqe_conn_offload4 *req4; | ||
2231 | struct fcoe_conn_offload_ramrod_params *fcoe_offload; | ||
2232 | struct cnic_context *ctx; | ||
2233 | struct fcoe_context *fctx; | ||
2234 | struct regpair ctx_addr; | ||
2235 | union l5cm_specific_data l5_data; | ||
2236 | struct fcoe_kcqe kcqe; | ||
2237 | struct kcqe *cqes[1]; | ||
2238 | |||
2239 | if (num < 4) { | ||
2240 | *work = num; | ||
2241 | return -EINVAL; | ||
2242 | } | ||
2243 | req1 = (struct fcoe_kwqe_conn_offload1 *) wqes[0]; | ||
2244 | req2 = (struct fcoe_kwqe_conn_offload2 *) wqes[1]; | ||
2245 | req3 = (struct fcoe_kwqe_conn_offload3 *) wqes[2]; | ||
2246 | req4 = (struct fcoe_kwqe_conn_offload4 *) wqes[3]; | ||
2247 | |||
2248 | *work = 4; | ||
2249 | |||
2250 | l5_cid = req1->fcoe_conn_id; | ||
2251 | if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS) | ||
2252 | goto err_reply; | ||
2253 | |||
2254 | l5_cid += BNX2X_FCOE_L5_CID_BASE; | ||
2255 | |||
2256 | ctx = &cp->ctx_tbl[l5_cid]; | ||
2257 | if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) | ||
2258 | goto err_reply; | ||
2259 | |||
2260 | ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid); | ||
2261 | if (ret) { | ||
2262 | ret = 0; | ||
2263 | goto err_reply; | ||
2264 | } | ||
2265 | cid = ctx->cid; | ||
2266 | |||
2267 | fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr); | ||
2268 | if (fctx) { | ||
2269 | u32 hw_cid = BNX2X_HW_CID(cp, cid); | ||
2270 | u32 val; | ||
2271 | |||
2272 | val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG, | ||
2273 | FCOE_CONNECTION_TYPE); | ||
2274 | fctx->xstorm_ag_context.cdu_reserved = val; | ||
2275 | val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG, | ||
2276 | FCOE_CONNECTION_TYPE); | ||
2277 | fctx->ustorm_ag_context.cdu_usage = val; | ||
2278 | } | ||
2279 | if (sizeof(*fcoe_offload) > CNIC_KWQ16_DATA_SIZE) { | ||
2280 | netdev_err(dev->netdev, "fcoe_offload size too big\n"); | ||
2281 | goto err_reply; | ||
2282 | } | ||
2283 | fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); | ||
2284 | if (!fcoe_offload) | ||
2285 | goto err_reply; | ||
2286 | |||
2287 | memset(fcoe_offload, 0, sizeof(*fcoe_offload)); | ||
2288 | memcpy(&fcoe_offload->offload_kwqe1, req1, sizeof(*req1)); | ||
2289 | memcpy(&fcoe_offload->offload_kwqe2, req2, sizeof(*req2)); | ||
2290 | memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3)); | ||
2291 | memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4)); | ||
2292 | |||
2293 | cid = BNX2X_HW_CID(cp, cid); | ||
2294 | ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid, | ||
2295 | FCOE_CONNECTION_TYPE, &l5_data); | ||
2296 | if (!ret) | ||
2297 | set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); | ||
2298 | |||
2299 | return ret; | ||
2300 | |||
2301 | err_reply: | ||
2302 | if (cid != -1) | ||
2303 | cnic_free_bnx2x_conn_resc(dev, l5_cid); | ||
2304 | |||
2305 | memset(&kcqe, 0, sizeof(kcqe)); | ||
2306 | kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN; | ||
2307 | kcqe.fcoe_conn_id = req1->fcoe_conn_id; | ||
2308 | kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE; | ||
2309 | |||
2310 | cqes[0] = (struct kcqe *) &kcqe; | ||
2311 | cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1); | ||
2312 | return ret; | ||
2313 | } | ||
2314 | |||
2315 | static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe) | ||
2316 | { | ||
2317 | struct fcoe_kwqe_conn_enable_disable *req; | ||
2318 | struct fcoe_conn_enable_disable_ramrod_params *fcoe_enable; | ||
2319 | union l5cm_specific_data l5_data; | ||
2320 | int ret; | ||
2321 | u32 cid, l5_cid; | ||
2322 | struct cnic_local *cp = dev->cnic_priv; | ||
2323 | |||
2324 | req = (struct fcoe_kwqe_conn_enable_disable *) kwqe; | ||
2325 | cid = req->context_id; | ||
2326 | l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE; | ||
2327 | |||
2328 | if (sizeof(*fcoe_enable) > CNIC_KWQ16_DATA_SIZE) { | ||
2329 | netdev_err(dev->netdev, "fcoe_enable size too big\n"); | ||
2330 | return -ENOMEM; | ||
2331 | } | ||
2332 | fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); | ||
2333 | if (!fcoe_enable) | ||
2334 | return -ENOMEM; | ||
2335 | |||
2336 | memset(fcoe_enable, 0, sizeof(*fcoe_enable)); | ||
2337 | memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req)); | ||
2338 | ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid, | ||
2339 | FCOE_CONNECTION_TYPE, &l5_data); | ||
2340 | return ret; | ||
2341 | } | ||
2342 | |||
2343 | static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe) | ||
2344 | { | ||
2345 | struct fcoe_kwqe_conn_enable_disable *req; | ||
2346 | struct fcoe_conn_enable_disable_ramrod_params *fcoe_disable; | ||
2347 | union l5cm_specific_data l5_data; | ||
2348 | int ret; | ||
2349 | u32 cid, l5_cid; | ||
2350 | struct cnic_local *cp = dev->cnic_priv; | ||
2351 | |||
2352 | req = (struct fcoe_kwqe_conn_enable_disable *) kwqe; | ||
2353 | cid = req->context_id; | ||
2354 | l5_cid = req->conn_id; | ||
2355 | if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS) | ||
2356 | return -EINVAL; | ||
2357 | |||
2358 | l5_cid += BNX2X_FCOE_L5_CID_BASE; | ||
2359 | |||
2360 | if (sizeof(*fcoe_disable) > CNIC_KWQ16_DATA_SIZE) { | ||
2361 | netdev_err(dev->netdev, "fcoe_disable size too big\n"); | ||
2362 | return -ENOMEM; | ||
2363 | } | ||
2364 | fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); | ||
2365 | if (!fcoe_disable) | ||
2366 | return -ENOMEM; | ||
2367 | |||
2368 | memset(fcoe_disable, 0, sizeof(*fcoe_disable)); | ||
2369 | memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req)); | ||
2370 | ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid, | ||
2371 | FCOE_CONNECTION_TYPE, &l5_data); | ||
2372 | return ret; | ||
2373 | } | ||
2374 | |||
2375 | static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe) | ||
2376 | { | ||
2377 | struct fcoe_kwqe_conn_destroy *req; | ||
2378 | union l5cm_specific_data l5_data; | ||
2379 | int ret; | ||
2380 | u32 cid, l5_cid; | ||
2381 | struct cnic_local *cp = dev->cnic_priv; | ||
2382 | struct cnic_context *ctx; | ||
2383 | struct fcoe_kcqe kcqe; | ||
2384 | struct kcqe *cqes[1]; | ||
2385 | |||
2386 | req = (struct fcoe_kwqe_conn_destroy *) kwqe; | ||
2387 | cid = req->context_id; | ||
2388 | l5_cid = req->conn_id; | ||
2389 | if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS) | ||
2390 | return -EINVAL; | ||
2391 | |||
2392 | l5_cid += BNX2X_FCOE_L5_CID_BASE; | ||
2393 | |||
2394 | ctx = &cp->ctx_tbl[l5_cid]; | ||
2395 | |||
2396 | init_waitqueue_head(&ctx->waitq); | ||
2397 | ctx->wait_cond = 0; | ||
2398 | |||
2399 | memset(&l5_data, 0, sizeof(l5_data)); | ||
2400 | ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid, | ||
2401 | FCOE_CONNECTION_TYPE, &l5_data); | ||
2402 | if (ret == 0) { | ||
2403 | wait_event(ctx->waitq, ctx->wait_cond); | ||
2404 | set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags); | ||
2405 | queue_delayed_work(cnic_wq, &cp->delete_task, | ||
2406 | msecs_to_jiffies(2000)); | ||
2407 | } | ||
2408 | |||
2409 | memset(&kcqe, 0, sizeof(kcqe)); | ||
2410 | kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN; | ||
2411 | kcqe.fcoe_conn_id = req->conn_id; | ||
2412 | kcqe.fcoe_conn_context_id = cid; | ||
2413 | |||
2414 | cqes[0] = (struct kcqe *) &kcqe; | ||
2415 | cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1); | ||
2416 | return ret; | ||
2417 | } | ||
2418 | |||
2419 | static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe) | ||
2420 | { | ||
2421 | struct fcoe_kwqe_destroy *req; | ||
2422 | union l5cm_specific_data l5_data; | ||
2423 | struct cnic_local *cp = dev->cnic_priv; | ||
2424 | int ret; | ||
2425 | u32 cid; | ||
2426 | |||
2427 | req = (struct fcoe_kwqe_destroy *) kwqe; | ||
2428 | cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); | ||
2429 | |||
2430 | memset(&l5_data, 0, sizeof(l5_data)); | ||
2431 | ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY, cid, | ||
2432 | FCOE_CONNECTION_TYPE, &l5_data); | ||
2433 | return ret; | ||
2434 | } | ||
2435 | |||
2436 | static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev, | ||
2437 | struct kwqe *wqes[], u32 num_wqes) | ||
2112 | { | 2438 | { |
2113 | int i, work, ret; | 2439 | int i, work, ret; |
2114 | u32 opcode; | 2440 | u32 opcode; |
@@ -2172,6 +2498,98 @@ static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], | |||
2172 | return 0; | 2498 | return 0; |
2173 | } | 2499 | } |
2174 | 2500 | ||
2501 | static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev, | ||
2502 | struct kwqe *wqes[], u32 num_wqes) | ||
2503 | { | ||
2504 | struct cnic_local *cp = dev->cnic_priv; | ||
2505 | int i, work, ret; | ||
2506 | u32 opcode; | ||
2507 | struct kwqe *kwqe; | ||
2508 | |||
2509 | if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) | ||
2510 | return -EAGAIN; /* bnx2 is down */ | ||
2511 | |||
2512 | if (BNX2X_CHIP_NUM(cp->chip_id) == BNX2X_CHIP_NUM_57710) | ||
2513 | return -EINVAL; | ||
2514 | |||
2515 | for (i = 0; i < num_wqes; ) { | ||
2516 | kwqe = wqes[i]; | ||
2517 | opcode = KWQE_OPCODE(kwqe->kwqe_op_flag); | ||
2518 | work = 1; | ||
2519 | |||
2520 | switch (opcode) { | ||
2521 | case FCOE_KWQE_OPCODE_INIT1: | ||
2522 | ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i], | ||
2523 | num_wqes - i, &work); | ||
2524 | break; | ||
2525 | case FCOE_KWQE_OPCODE_OFFLOAD_CONN1: | ||
2526 | ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i], | ||
2527 | num_wqes - i, &work); | ||
2528 | break; | ||
2529 | case FCOE_KWQE_OPCODE_ENABLE_CONN: | ||
2530 | ret = cnic_bnx2x_fcoe_enable(dev, kwqe); | ||
2531 | break; | ||
2532 | case FCOE_KWQE_OPCODE_DISABLE_CONN: | ||
2533 | ret = cnic_bnx2x_fcoe_disable(dev, kwqe); | ||
2534 | break; | ||
2535 | case FCOE_KWQE_OPCODE_DESTROY_CONN: | ||
2536 | ret = cnic_bnx2x_fcoe_destroy(dev, kwqe); | ||
2537 | break; | ||
2538 | case FCOE_KWQE_OPCODE_DESTROY: | ||
2539 | ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe); | ||
2540 | break; | ||
2541 | case FCOE_KWQE_OPCODE_STAT: | ||
2542 | ret = cnic_bnx2x_fcoe_stat(dev, kwqe); | ||
2543 | break; | ||
2544 | default: | ||
2545 | ret = 0; | ||
2546 | netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n", | ||
2547 | opcode); | ||
2548 | break; | ||
2549 | } | ||
2550 | if (ret < 0) | ||
2551 | netdev_err(dev->netdev, "KWQE(0x%x) failed\n", | ||
2552 | opcode); | ||
2553 | i += work; | ||
2554 | } | ||
2555 | return 0; | ||
2556 | } | ||
2557 | |||
2558 | static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], | ||
2559 | u32 num_wqes) | ||
2560 | { | ||
2561 | int ret = -EINVAL; | ||
2562 | u32 layer_code; | ||
2563 | |||
2564 | if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) | ||
2565 | return -EAGAIN; /* bnx2x is down */ | ||
2566 | |||
2567 | if (!num_wqes) | ||
2568 | return 0; | ||
2569 | |||
2570 | layer_code = wqes[0]->kwqe_op_flag & KWQE_LAYER_MASK; | ||
2571 | switch (layer_code) { | ||
2572 | case KWQE_FLAGS_LAYER_MASK_L5_ISCSI: | ||
2573 | case KWQE_FLAGS_LAYER_MASK_L4: | ||
2574 | case KWQE_FLAGS_LAYER_MASK_L2: | ||
2575 | ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes); | ||
2576 | break; | ||
2577 | |||
2578 | case KWQE_FLAGS_LAYER_MASK_L5_FCOE: | ||
2579 | ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes); | ||
2580 | break; | ||
2581 | } | ||
2582 | return ret; | ||
2583 | } | ||
2584 | |||
2585 | static inline u32 cnic_get_kcqe_layer_mask(u32 opflag) | ||
2586 | { | ||
2587 | if (unlikely(KCQE_OPCODE(opflag) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN)) | ||
2588 | return KCQE_FLAGS_LAYER_MASK_L4; | ||
2589 | |||
2590 | return opflag & KCQE_FLAGS_LAYER_MASK; | ||
2591 | } | ||
2592 | |||
2175 | static void service_kcqes(struct cnic_dev *dev, int num_cqes) | 2593 | static void service_kcqes(struct cnic_dev *dev, int num_cqes) |
2176 | { | 2594 | { |
2177 | struct cnic_local *cp = dev->cnic_priv; | 2595 | struct cnic_local *cp = dev->cnic_priv; |
@@ -2183,7 +2601,7 @@ static void service_kcqes(struct cnic_dev *dev, int num_cqes) | |||
2183 | struct cnic_ulp_ops *ulp_ops; | 2601 | struct cnic_ulp_ops *ulp_ops; |
2184 | int ulp_type; | 2602 | int ulp_type; |
2185 | u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag; | 2603 | u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag; |
2186 | u32 kcqe_layer = kcqe_op_flag & KCQE_FLAGS_LAYER_MASK; | 2604 | u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag); |
2187 | 2605 | ||
2188 | if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION)) | 2606 | if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION)) |
2189 | comp++; | 2607 | comp++; |
@@ -2191,7 +2609,7 @@ static void service_kcqes(struct cnic_dev *dev, int num_cqes) | |||
2191 | while (j < num_cqes) { | 2609 | while (j < num_cqes) { |
2192 | u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag; | 2610 | u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag; |
2193 | 2611 | ||
2194 | if ((next_op & KCQE_FLAGS_LAYER_MASK) != kcqe_layer) | 2612 | if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer) |
2195 | break; | 2613 | break; |
2196 | 2614 | ||
2197 | if (unlikely(next_op & KCQE_RAMROD_COMPLETION)) | 2615 | if (unlikely(next_op & KCQE_RAMROD_COMPLETION)) |
@@ -2203,6 +2621,8 @@ static void service_kcqes(struct cnic_dev *dev, int num_cqes) | |||
2203 | ulp_type = CNIC_ULP_RDMA; | 2621 | ulp_type = CNIC_ULP_RDMA; |
2204 | else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI) | 2622 | else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI) |
2205 | ulp_type = CNIC_ULP_ISCSI; | 2623 | ulp_type = CNIC_ULP_ISCSI; |
2624 | else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_FCOE) | ||
2625 | ulp_type = CNIC_ULP_FCOE; | ||
2206 | else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4) | 2626 | else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4) |
2207 | ulp_type = CNIC_ULP_L4; | 2627 | ulp_type = CNIC_ULP_L4; |
2208 | else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2) | 2628 | else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2) |
@@ -3249,6 +3669,18 @@ done: | |||
3249 | csk_put(csk); | 3669 | csk_put(csk); |
3250 | } | 3670 | } |
3251 | 3671 | ||
3672 | static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe) | ||
3673 | { | ||
3674 | struct cnic_local *cp = dev->cnic_priv; | ||
3675 | struct fcoe_kcqe *fc_kcqe = (struct fcoe_kcqe *) kcqe; | ||
3676 | u32 l5_cid = fc_kcqe->fcoe_conn_id + BNX2X_FCOE_L5_CID_BASE; | ||
3677 | struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; | ||
3678 | |||
3679 | ctx->timestamp = jiffies; | ||
3680 | ctx->wait_cond = 1; | ||
3681 | wake_up(&ctx->waitq); | ||
3682 | } | ||
3683 | |||
3252 | static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe) | 3684 | static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe) |
3253 | { | 3685 | { |
3254 | struct cnic_local *cp = dev->cnic_priv; | 3686 | struct cnic_local *cp = dev->cnic_priv; |
@@ -3257,6 +3689,10 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe) | |||
3257 | u32 l5_cid; | 3689 | u32 l5_cid; |
3258 | struct cnic_sock *csk; | 3690 | struct cnic_sock *csk; |
3259 | 3691 | ||
3692 | if (opcode == FCOE_RAMROD_CMD_ID_TERMINATE_CONN) { | ||
3693 | cnic_process_fcoe_term_conn(dev, kcqe); | ||
3694 | return; | ||
3695 | } | ||
3260 | if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG || | 3696 | if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG || |
3261 | opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) { | 3697 | opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) { |
3262 | cnic_cm_process_offld_pg(dev, l4kcqe); | 3698 | cnic_cm_process_offld_pg(dev, l4kcqe); |
@@ -3893,7 +4329,7 @@ static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev) | |||
3893 | 4329 | ||
3894 | memset(&l2kwqe, 0, sizeof(l2kwqe)); | 4330 | memset(&l2kwqe, 0, sizeof(l2kwqe)); |
3895 | wqes[0] = &l2kwqe; | 4331 | wqes[0] = &l2kwqe; |
3896 | l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_FLAGS_LAYER_SHIFT) | | 4332 | l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_LAYER_SHIFT) | |
3897 | (L2_KWQE_OPCODE_VALUE_FLUSH << | 4333 | (L2_KWQE_OPCODE_VALUE_FLUSH << |
3898 | KWQE_OPCODE_SHIFT) | 2; | 4334 | KWQE_OPCODE_SHIFT) | 2; |
3899 | dev->submit_kwqes(dev, wqes, 1); | 4335 | dev->submit_kwqes(dev, wqes, 1); |
@@ -4336,6 +4772,10 @@ static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev) | |||
4336 | val16 ^= 0x1e1e; | 4772 | val16 ^= 0x1e1e; |
4337 | dev->max_iscsi_conn = val16; | 4773 | dev->max_iscsi_conn = val16; |
4338 | } | 4774 | } |
4775 | |||
4776 | if (BNX2X_CHIP_IS_E2(cp->chip_id)) | ||
4777 | dev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS; | ||
4778 | |||
4339 | if (BNX2X_CHIP_IS_E1H(cp->chip_id) || BNX2X_CHIP_IS_E2(cp->chip_id)) { | 4779 | if (BNX2X_CHIP_IS_E1H(cp->chip_id) || BNX2X_CHIP_IS_E2(cp->chip_id)) { |
4340 | int func = CNIC_FUNC(cp); | 4780 | int func = CNIC_FUNC(cp); |
4341 | u32 mf_cfg_addr; | 4781 | u32 mf_cfg_addr; |
@@ -4362,6 +4802,9 @@ static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev) | |||
4362 | if (!(val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD)) | 4802 | if (!(val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD)) |
4363 | dev->max_iscsi_conn = 0; | 4803 | dev->max_iscsi_conn = 0; |
4364 | 4804 | ||
4805 | if (!(val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD)) | ||
4806 | dev->max_fcoe_conn = 0; | ||
4807 | |||
4365 | addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr, | 4808 | addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr, |
4366 | func_ext_config[func]. | 4809 | func_ext_config[func]. |
4367 | iscsi_mac_addr_upper); | 4810 | iscsi_mac_addr_upper); |
@@ -4463,6 +4906,15 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev) | |||
4463 | if (ret) | 4906 | if (ret) |
4464 | return -ENOMEM; | 4907 | return -ENOMEM; |
4465 | 4908 | ||
4909 | if (BNX2X_CHIP_IS_E2(cp->chip_id)) { | ||
4910 | ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, | ||
4911 | BNX2X_FCOE_NUM_CONNECTIONS, | ||
4912 | cp->fcoe_start_cid); | ||
4913 | |||
4914 | if (ret) | ||
4915 | return -ENOMEM; | ||
4916 | } | ||
4917 | |||
4466 | cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2; | 4918 | cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2; |
4467 | 4919 | ||
4468 | cnic_init_bnx2x_kcq(dev); | 4920 | cnic_init_bnx2x_kcq(dev); |