aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/benet/be_cmds.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/benet/be_cmds.c')
-rw-r--r--drivers/net/benet/be_cmds.c258
1 files changed, 200 insertions, 58 deletions
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index d444aed962bc..f1ec191f0c0d 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -17,6 +17,90 @@
17 17
18#include "be.h" 18#include "be.h"
19 19
20void be_mcc_notify(struct be_ctrl_info *ctrl)
21{
22 struct be_queue_info *mccq = &ctrl->mcc_obj.q;
23 u32 val = 0;
24
25 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
26 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
27 iowrite32(val, ctrl->db + DB_MCCQ_OFFSET);
28}
29
30/* To check if valid bit is set, check the entire word as we don't know
31 * the endianness of the data (old entry is host endian while a new entry is
32 * little endian) */
33static inline bool be_mcc_compl_is_new(struct be_mcc_cq_entry *compl)
34{
35 if (compl->flags != 0) {
36 compl->flags = le32_to_cpu(compl->flags);
37 BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
38 return true;
39 } else {
40 return false;
41 }
42}
43
44/* Need to reset the entire word that houses the valid bit */
45static inline void be_mcc_compl_use(struct be_mcc_cq_entry *compl)
46{
47 compl->flags = 0;
48}
49
50static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
51 struct be_mcc_cq_entry *compl)
52{
53 u16 compl_status, extd_status;
54
55 /* Just swap the status to host endian; mcc tag is opaquely copied
56 * from mcc_wrb */
57 be_dws_le_to_cpu(compl, 4);
58
59 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
60 CQE_STATUS_COMPL_MASK;
61 if (compl_status != MCC_STATUS_SUCCESS) {
62 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
63 CQE_STATUS_EXTD_MASK;
64 printk(KERN_WARNING DRV_NAME
65 " error in cmd completion: status(compl/extd)=%d/%d\n",
66 compl_status, extd_status);
67 return -1;
68 }
69 return 0;
70}
71
72
73static struct be_mcc_cq_entry *be_mcc_compl_get(struct be_ctrl_info *ctrl)
74{
75 struct be_queue_info *mcc_cq = &ctrl->mcc_obj.cq;
76 struct be_mcc_cq_entry *compl = queue_tail_node(mcc_cq);
77
78 if (be_mcc_compl_is_new(compl)) {
79 queue_tail_inc(mcc_cq);
80 return compl;
81 }
82 return NULL;
83}
84
85void be_process_mcc(struct be_ctrl_info *ctrl)
86{
87 struct be_mcc_cq_entry *compl;
88 int num = 0;
89
90 spin_lock_bh(&ctrl->mcc_cq_lock);
91 while ((compl = be_mcc_compl_get(ctrl))) {
92 if (!(compl->flags & CQE_FLAGS_ASYNC_MASK)) {
93 be_mcc_compl_process(ctrl, compl);
94 atomic_dec(&ctrl->mcc_obj.q.used);
95 }
96 be_mcc_compl_use(compl);
97 num++;
98 }
99 if (num)
100 be_cq_notify(ctrl, ctrl->mcc_obj.cq.id, true, num);
101 spin_unlock_bh(&ctrl->mcc_cq_lock);
102}
103
20static int be_mbox_db_ready_wait(void __iomem *db) 104static int be_mbox_db_ready_wait(void __iomem *db)
21{ 105{
22 int cnt = 0, wait = 5; 106 int cnt = 0, wait = 5;
@@ -44,11 +128,11 @@ static int be_mbox_db_ready_wait(void __iomem *db)
44 128
45/* 129/*
46 * Insert the mailbox address into the doorbell in two steps 130 * Insert the mailbox address into the doorbell in two steps
131 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
47 */ 132 */
48static int be_mbox_db_ring(struct be_ctrl_info *ctrl) 133static int be_mbox_db_ring(struct be_ctrl_info *ctrl)
49{ 134{
50 int status; 135 int status;
51 u16 compl_status, extd_status;
52 u32 val = 0; 136 u32 val = 0;
53 void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET; 137 void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
54 struct be_dma_mem *mbox_mem = &ctrl->mbox_mem; 138 struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
@@ -79,24 +163,17 @@ static int be_mbox_db_ring(struct be_ctrl_info *ctrl)
79 if (status != 0) 163 if (status != 0)
80 return status; 164 return status;
81 165
82 /* compl entry has been made now */ 166 /* A cq entry has been made now */
83 be_dws_le_to_cpu(cqe, sizeof(*cqe)); 167 if (be_mcc_compl_is_new(cqe)) {
84 if (!(cqe->flags & CQE_FLAGS_VALID_MASK)) { 168 status = be_mcc_compl_process(ctrl, &mbox->cqe);
85 printk(KERN_WARNING DRV_NAME ": ERROR invalid mbox compl\n"); 169 be_mcc_compl_use(cqe);
170 if (status)
171 return status;
172 } else {
173 printk(KERN_WARNING DRV_NAME "invalid mailbox completion\n");
86 return -1; 174 return -1;
87 } 175 }
88 176 return 0;
89 compl_status = (cqe->status >> CQE_STATUS_COMPL_SHIFT) &
90 CQE_STATUS_COMPL_MASK;
91 if (compl_status != MCC_STATUS_SUCCESS) {
92 extd_status = (cqe->status >> CQE_STATUS_EXTD_SHIFT) &
93 CQE_STATUS_EXTD_MASK;
94 printk(KERN_WARNING DRV_NAME
95 ": ERROR in cmd compl. status(compl/extd)=%d/%d\n",
96 compl_status, extd_status);
97 }
98
99 return compl_status;
100} 177}
101 178
102static int be_POST_stage_get(struct be_ctrl_info *ctrl, u16 *stage) 179static int be_POST_stage_get(struct be_ctrl_info *ctrl, u16 *stage)
@@ -235,6 +312,18 @@ static inline struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
235 return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb; 312 return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
236} 313}
237 314
315static inline struct be_mcc_wrb *wrb_from_mcc(struct be_queue_info *mccq)
316{
317 struct be_mcc_wrb *wrb = NULL;
318 if (atomic_read(&mccq->used) < mccq->len) {
319 wrb = queue_head_node(mccq);
320 queue_head_inc(mccq);
321 atomic_inc(&mccq->used);
322 memset(wrb, 0, sizeof(*wrb));
323 }
324 return wrb;
325}
326
238int be_cmd_eq_create(struct be_ctrl_info *ctrl, 327int be_cmd_eq_create(struct be_ctrl_info *ctrl,
239 struct be_queue_info *eq, int eq_delay) 328 struct be_queue_info *eq, int eq_delay)
240{ 329{
@@ -244,7 +333,7 @@ int be_cmd_eq_create(struct be_ctrl_info *ctrl,
244 struct be_dma_mem *q_mem = &eq->dma_mem; 333 struct be_dma_mem *q_mem = &eq->dma_mem;
245 int status; 334 int status;
246 335
247 spin_lock(&ctrl->cmd_lock); 336 spin_lock(&ctrl->mbox_lock);
248 memset(wrb, 0, sizeof(*wrb)); 337 memset(wrb, 0, sizeof(*wrb));
249 338
250 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 339 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -272,7 +361,7 @@ int be_cmd_eq_create(struct be_ctrl_info *ctrl,
272 eq->id = le16_to_cpu(resp->eq_id); 361 eq->id = le16_to_cpu(resp->eq_id);
273 eq->created = true; 362 eq->created = true;
274 } 363 }
275 spin_unlock(&ctrl->cmd_lock); 364 spin_unlock(&ctrl->mbox_lock);
276 return status; 365 return status;
277} 366}
278 367
@@ -284,7 +373,7 @@ int be_cmd_mac_addr_query(struct be_ctrl_info *ctrl, u8 *mac_addr,
284 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb); 373 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
285 int status; 374 int status;
286 375
287 spin_lock(&ctrl->cmd_lock); 376 spin_lock(&ctrl->mbox_lock);
288 memset(wrb, 0, sizeof(*wrb)); 377 memset(wrb, 0, sizeof(*wrb));
289 378
290 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 379 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -304,7 +393,7 @@ int be_cmd_mac_addr_query(struct be_ctrl_info *ctrl, u8 *mac_addr,
304 if (!status) 393 if (!status)
305 memcpy(mac_addr, resp->mac.addr, ETH_ALEN); 394 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
306 395
307 spin_unlock(&ctrl->cmd_lock); 396 spin_unlock(&ctrl->mbox_lock);
308 return status; 397 return status;
309} 398}
310 399
@@ -315,7 +404,7 @@ int be_cmd_pmac_add(struct be_ctrl_info *ctrl, u8 *mac_addr,
315 struct be_cmd_req_pmac_add *req = embedded_payload(wrb); 404 struct be_cmd_req_pmac_add *req = embedded_payload(wrb);
316 int status; 405 int status;
317 406
318 spin_lock(&ctrl->cmd_lock); 407 spin_lock(&ctrl->mbox_lock);
319 memset(wrb, 0, sizeof(*wrb)); 408 memset(wrb, 0, sizeof(*wrb));
320 409
321 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 410 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -332,7 +421,7 @@ int be_cmd_pmac_add(struct be_ctrl_info *ctrl, u8 *mac_addr,
332 *pmac_id = le32_to_cpu(resp->pmac_id); 421 *pmac_id = le32_to_cpu(resp->pmac_id);
333 } 422 }
334 423
335 spin_unlock(&ctrl->cmd_lock); 424 spin_unlock(&ctrl->mbox_lock);
336 return status; 425 return status;
337} 426}
338 427
@@ -342,7 +431,7 @@ int be_cmd_pmac_del(struct be_ctrl_info *ctrl, u32 if_id, u32 pmac_id)
342 struct be_cmd_req_pmac_del *req = embedded_payload(wrb); 431 struct be_cmd_req_pmac_del *req = embedded_payload(wrb);
343 int status; 432 int status;
344 433
345 spin_lock(&ctrl->cmd_lock); 434 spin_lock(&ctrl->mbox_lock);
346 memset(wrb, 0, sizeof(*wrb)); 435 memset(wrb, 0, sizeof(*wrb));
347 436
348 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 437 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -354,7 +443,7 @@ int be_cmd_pmac_del(struct be_ctrl_info *ctrl, u32 if_id, u32 pmac_id)
354 req->pmac_id = cpu_to_le32(pmac_id); 443 req->pmac_id = cpu_to_le32(pmac_id);
355 444
356 status = be_mbox_db_ring(ctrl); 445 status = be_mbox_db_ring(ctrl);
357 spin_unlock(&ctrl->cmd_lock); 446 spin_unlock(&ctrl->mbox_lock);
358 447
359 return status; 448 return status;
360} 449}
@@ -370,7 +459,7 @@ int be_cmd_cq_create(struct be_ctrl_info *ctrl,
370 void *ctxt = &req->context; 459 void *ctxt = &req->context;
371 int status; 460 int status;
372 461
373 spin_lock(&ctrl->cmd_lock); 462 spin_lock(&ctrl->mbox_lock);
374 memset(wrb, 0, sizeof(*wrb)); 463 memset(wrb, 0, sizeof(*wrb));
375 464
376 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 465 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -388,7 +477,7 @@ int be_cmd_cq_create(struct be_ctrl_info *ctrl,
388 AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts); 477 AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
389 AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1); 478 AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
390 AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id); 479 AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
391 AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 0); 480 AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
392 AMAP_SET_BITS(struct amap_cq_context, func, ctxt, ctrl->pci_func); 481 AMAP_SET_BITS(struct amap_cq_context, func, ctxt, ctrl->pci_func);
393 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 482 be_dws_cpu_to_le(ctxt, sizeof(req->context));
394 483
@@ -399,7 +488,56 @@ int be_cmd_cq_create(struct be_ctrl_info *ctrl,
399 cq->id = le16_to_cpu(resp->cq_id); 488 cq->id = le16_to_cpu(resp->cq_id);
400 cq->created = true; 489 cq->created = true;
401 } 490 }
402 spin_unlock(&ctrl->cmd_lock); 491 spin_unlock(&ctrl->mbox_lock);
492
493 return status;
494}
495
496static u32 be_encoded_q_len(int q_len)
497{
498 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
499 if (len_encoded == 16)
500 len_encoded = 0;
501 return len_encoded;
502}
503
504int be_cmd_mccq_create(struct be_ctrl_info *ctrl,
505 struct be_queue_info *mccq,
506 struct be_queue_info *cq)
507{
508 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
509 struct be_cmd_req_mcc_create *req = embedded_payload(wrb);
510 struct be_dma_mem *q_mem = &mccq->dma_mem;
511 void *ctxt = &req->context;
512 int status;
513
514 spin_lock(&ctrl->mbox_lock);
515 memset(wrb, 0, sizeof(*wrb));
516
517 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
518
519 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
520 OPCODE_COMMON_MCC_CREATE, sizeof(*req));
521
522 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
523
524 AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt, ctrl->pci_func);
525 AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
526 AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
527 be_encoded_q_len(mccq->len));
528 AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
529
530 be_dws_cpu_to_le(ctxt, sizeof(req->context));
531
532 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
533
534 status = be_mbox_db_ring(ctrl);
535 if (!status) {
536 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
537 mccq->id = le16_to_cpu(resp->id);
538 mccq->created = true;
539 }
540 spin_unlock(&ctrl->mbox_lock);
403 541
404 return status; 542 return status;
405} 543}
@@ -415,7 +553,7 @@ int be_cmd_txq_create(struct be_ctrl_info *ctrl,
415 int status; 553 int status;
416 u32 len_encoded; 554 u32 len_encoded;
417 555
418 spin_lock(&ctrl->cmd_lock); 556 spin_lock(&ctrl->mbox_lock);
419 memset(wrb, 0, sizeof(*wrb)); 557 memset(wrb, 0, sizeof(*wrb));
420 558
421 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 559 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -446,7 +584,7 @@ int be_cmd_txq_create(struct be_ctrl_info *ctrl,
446 txq->id = le16_to_cpu(resp->cid); 584 txq->id = le16_to_cpu(resp->cid);
447 txq->created = true; 585 txq->created = true;
448 } 586 }
449 spin_unlock(&ctrl->cmd_lock); 587 spin_unlock(&ctrl->mbox_lock);
450 588
451 return status; 589 return status;
452} 590}
@@ -460,7 +598,7 @@ int be_cmd_rxq_create(struct be_ctrl_info *ctrl,
460 struct be_dma_mem *q_mem = &rxq->dma_mem; 598 struct be_dma_mem *q_mem = &rxq->dma_mem;
461 int status; 599 int status;
462 600
463 spin_lock(&ctrl->cmd_lock); 601 spin_lock(&ctrl->mbox_lock);
464 memset(wrb, 0, sizeof(*wrb)); 602 memset(wrb, 0, sizeof(*wrb));
465 603
466 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 604 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -482,7 +620,7 @@ int be_cmd_rxq_create(struct be_ctrl_info *ctrl,
482 rxq->id = le16_to_cpu(resp->id); 620 rxq->id = le16_to_cpu(resp->id);
483 rxq->created = true; 621 rxq->created = true;
484 } 622 }
485 spin_unlock(&ctrl->cmd_lock); 623 spin_unlock(&ctrl->mbox_lock);
486 624
487 return status; 625 return status;
488} 626}
@@ -496,7 +634,7 @@ int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
496 u8 subsys = 0, opcode = 0; 634 u8 subsys = 0, opcode = 0;
497 int status; 635 int status;
498 636
499 spin_lock(&ctrl->cmd_lock); 637 spin_lock(&ctrl->mbox_lock);
500 638
501 memset(wrb, 0, sizeof(*wrb)); 639 memset(wrb, 0, sizeof(*wrb));
502 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 640 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -518,6 +656,10 @@ int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
518 subsys = CMD_SUBSYSTEM_ETH; 656 subsys = CMD_SUBSYSTEM_ETH;
519 opcode = OPCODE_ETH_RX_DESTROY; 657 opcode = OPCODE_ETH_RX_DESTROY;
520 break; 658 break;
659 case QTYPE_MCCQ:
660 subsys = CMD_SUBSYSTEM_COMMON;
661 opcode = OPCODE_COMMON_MCC_DESTROY;
662 break;
521 default: 663 default:
522 printk(KERN_WARNING DRV_NAME ":bad Q type in Q destroy cmd\n"); 664 printk(KERN_WARNING DRV_NAME ":bad Q type in Q destroy cmd\n");
523 status = -1; 665 status = -1;
@@ -528,7 +670,7 @@ int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
528 670
529 status = be_mbox_db_ring(ctrl); 671 status = be_mbox_db_ring(ctrl);
530err: 672err:
531 spin_unlock(&ctrl->cmd_lock); 673 spin_unlock(&ctrl->mbox_lock);
532 674
533 return status; 675 return status;
534} 676}
@@ -541,7 +683,7 @@ int be_cmd_if_create(struct be_ctrl_info *ctrl, u32 flags, u8 *mac,
541 struct be_cmd_req_if_create *req = embedded_payload(wrb); 683 struct be_cmd_req_if_create *req = embedded_payload(wrb);
542 int status; 684 int status;
543 685
544 spin_lock(&ctrl->cmd_lock); 686 spin_lock(&ctrl->mbox_lock);
545 memset(wrb, 0, sizeof(*wrb)); 687 memset(wrb, 0, sizeof(*wrb));
546 688
547 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 689 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -562,7 +704,7 @@ int be_cmd_if_create(struct be_ctrl_info *ctrl, u32 flags, u8 *mac,
562 *pmac_id = le32_to_cpu(resp->pmac_id); 704 *pmac_id = le32_to_cpu(resp->pmac_id);
563 } 705 }
564 706
565 spin_unlock(&ctrl->cmd_lock); 707 spin_unlock(&ctrl->mbox_lock);
566 return status; 708 return status;
567} 709}
568 710
@@ -572,7 +714,7 @@ int be_cmd_if_destroy(struct be_ctrl_info *ctrl, u32 interface_id)
572 struct be_cmd_req_if_destroy *req = embedded_payload(wrb); 714 struct be_cmd_req_if_destroy *req = embedded_payload(wrb);
573 int status; 715 int status;
574 716
575 spin_lock(&ctrl->cmd_lock); 717 spin_lock(&ctrl->mbox_lock);
576 memset(wrb, 0, sizeof(*wrb)); 718 memset(wrb, 0, sizeof(*wrb));
577 719
578 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 720 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -583,7 +725,7 @@ int be_cmd_if_destroy(struct be_ctrl_info *ctrl, u32 interface_id)
583 req->interface_id = cpu_to_le32(interface_id); 725 req->interface_id = cpu_to_le32(interface_id);
584 status = be_mbox_db_ring(ctrl); 726 status = be_mbox_db_ring(ctrl);
585 727
586 spin_unlock(&ctrl->cmd_lock); 728 spin_unlock(&ctrl->mbox_lock);
587 729
588 return status; 730 return status;
589} 731}
@@ -598,7 +740,7 @@ int be_cmd_get_stats(struct be_ctrl_info *ctrl, struct be_dma_mem *nonemb_cmd)
598 struct be_sge *sge = nonembedded_sgl(wrb); 740 struct be_sge *sge = nonembedded_sgl(wrb);
599 int status; 741 int status;
600 742
601 spin_lock(&ctrl->cmd_lock); 743 spin_lock(&ctrl->mbox_lock);
602 memset(wrb, 0, sizeof(*wrb)); 744 memset(wrb, 0, sizeof(*wrb));
603 745
604 memset(req, 0, sizeof(*req)); 746 memset(req, 0, sizeof(*req));
@@ -617,7 +759,7 @@ int be_cmd_get_stats(struct be_ctrl_info *ctrl, struct be_dma_mem *nonemb_cmd)
617 be_dws_le_to_cpu(&resp->hw_stats, sizeof(resp->hw_stats)); 759 be_dws_le_to_cpu(&resp->hw_stats, sizeof(resp->hw_stats));
618 } 760 }
619 761
620 spin_unlock(&ctrl->cmd_lock); 762 spin_unlock(&ctrl->mbox_lock);
621 return status; 763 return status;
622} 764}
623 765
@@ -628,7 +770,7 @@ int be_cmd_link_status_query(struct be_ctrl_info *ctrl,
628 struct be_cmd_req_link_status *req = embedded_payload(wrb); 770 struct be_cmd_req_link_status *req = embedded_payload(wrb);
629 int status; 771 int status;
630 772
631 spin_lock(&ctrl->cmd_lock); 773 spin_lock(&ctrl->mbox_lock);
632 memset(wrb, 0, sizeof(*wrb)); 774 memset(wrb, 0, sizeof(*wrb));
633 775
634 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 776 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -646,7 +788,7 @@ int be_cmd_link_status_query(struct be_ctrl_info *ctrl,
646 link->speed = PHY_LINK_SPEED_ZERO; 788 link->speed = PHY_LINK_SPEED_ZERO;
647 } 789 }
648 790
649 spin_unlock(&ctrl->cmd_lock); 791 spin_unlock(&ctrl->mbox_lock);
650 return status; 792 return status;
651} 793}
652 794
@@ -656,7 +798,7 @@ int be_cmd_get_fw_ver(struct be_ctrl_info *ctrl, char *fw_ver)
656 struct be_cmd_req_get_fw_version *req = embedded_payload(wrb); 798 struct be_cmd_req_get_fw_version *req = embedded_payload(wrb);
657 int status; 799 int status;
658 800
659 spin_lock(&ctrl->cmd_lock); 801 spin_lock(&ctrl->mbox_lock);
660 memset(wrb, 0, sizeof(*wrb)); 802 memset(wrb, 0, sizeof(*wrb));
661 803
662 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 804 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -670,7 +812,7 @@ int be_cmd_get_fw_ver(struct be_ctrl_info *ctrl, char *fw_ver)
670 strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN); 812 strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
671 } 813 }
672 814
673 spin_unlock(&ctrl->cmd_lock); 815 spin_unlock(&ctrl->mbox_lock);
674 return status; 816 return status;
675} 817}
676 818
@@ -681,7 +823,7 @@ int be_cmd_modify_eqd(struct be_ctrl_info *ctrl, u32 eq_id, u32 eqd)
681 struct be_cmd_req_modify_eq_delay *req = embedded_payload(wrb); 823 struct be_cmd_req_modify_eq_delay *req = embedded_payload(wrb);
682 int status; 824 int status;
683 825
684 spin_lock(&ctrl->cmd_lock); 826 spin_lock(&ctrl->mbox_lock);
685 memset(wrb, 0, sizeof(*wrb)); 827 memset(wrb, 0, sizeof(*wrb));
686 828
687 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 829 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -696,7 +838,7 @@ int be_cmd_modify_eqd(struct be_ctrl_info *ctrl, u32 eq_id, u32 eqd)
696 838
697 status = be_mbox_db_ring(ctrl); 839 status = be_mbox_db_ring(ctrl);
698 840
699 spin_unlock(&ctrl->cmd_lock); 841 spin_unlock(&ctrl->mbox_lock);
700 return status; 842 return status;
701} 843}
702 844
@@ -707,7 +849,7 @@ int be_cmd_vlan_config(struct be_ctrl_info *ctrl, u32 if_id, u16 *vtag_array,
707 struct be_cmd_req_vlan_config *req = embedded_payload(wrb); 849 struct be_cmd_req_vlan_config *req = embedded_payload(wrb);
708 int status; 850 int status;
709 851
710 spin_lock(&ctrl->cmd_lock); 852 spin_lock(&ctrl->mbox_lock);
711 memset(wrb, 0, sizeof(*wrb)); 853 memset(wrb, 0, sizeof(*wrb));
712 854
713 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 855 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -726,7 +868,7 @@ int be_cmd_vlan_config(struct be_ctrl_info *ctrl, u32 if_id, u16 *vtag_array,
726 868
727 status = be_mbox_db_ring(ctrl); 869 status = be_mbox_db_ring(ctrl);
728 870
729 spin_unlock(&ctrl->cmd_lock); 871 spin_unlock(&ctrl->mbox_lock);
730 return status; 872 return status;
731} 873}
732 874
@@ -736,7 +878,7 @@ int be_cmd_promiscuous_config(struct be_ctrl_info *ctrl, u8 port_num, bool en)
736 struct be_cmd_req_promiscuous_config *req = embedded_payload(wrb); 878 struct be_cmd_req_promiscuous_config *req = embedded_payload(wrb);
737 int status; 879 int status;
738 880
739 spin_lock(&ctrl->cmd_lock); 881 spin_lock(&ctrl->mbox_lock);
740 memset(wrb, 0, sizeof(*wrb)); 882 memset(wrb, 0, sizeof(*wrb));
741 883
742 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 884 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -751,7 +893,7 @@ int be_cmd_promiscuous_config(struct be_ctrl_info *ctrl, u8 port_num, bool en)
751 893
752 status = be_mbox_db_ring(ctrl); 894 status = be_mbox_db_ring(ctrl);
753 895
754 spin_unlock(&ctrl->cmd_lock); 896 spin_unlock(&ctrl->mbox_lock);
755 return status; 897 return status;
756} 898}
757 899
@@ -762,7 +904,7 @@ int be_cmd_mcast_mac_set(struct be_ctrl_info *ctrl, u32 if_id, u8 *mac_table,
762 struct be_cmd_req_mcast_mac_config *req = embedded_payload(wrb); 904 struct be_cmd_req_mcast_mac_config *req = embedded_payload(wrb);
763 int status; 905 int status;
764 906
765 spin_lock(&ctrl->cmd_lock); 907 spin_lock(&ctrl->mbox_lock);
766 memset(wrb, 0, sizeof(*wrb)); 908 memset(wrb, 0, sizeof(*wrb));
767 909
768 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 910 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -780,7 +922,7 @@ int be_cmd_mcast_mac_set(struct be_ctrl_info *ctrl, u32 if_id, u8 *mac_table,
780 922
781 status = be_mbox_db_ring(ctrl); 923 status = be_mbox_db_ring(ctrl);
782 924
783 spin_unlock(&ctrl->cmd_lock); 925 spin_unlock(&ctrl->mbox_lock);
784 return status; 926 return status;
785} 927}
786 928
@@ -790,7 +932,7 @@ int be_cmd_set_flow_control(struct be_ctrl_info *ctrl, u32 tx_fc, u32 rx_fc)
790 struct be_cmd_req_set_flow_control *req = embedded_payload(wrb); 932 struct be_cmd_req_set_flow_control *req = embedded_payload(wrb);
791 int status; 933 int status;
792 934
793 spin_lock(&ctrl->cmd_lock); 935 spin_lock(&ctrl->mbox_lock);
794 936
795 memset(wrb, 0, sizeof(*wrb)); 937 memset(wrb, 0, sizeof(*wrb));
796 938
@@ -804,7 +946,7 @@ int be_cmd_set_flow_control(struct be_ctrl_info *ctrl, u32 tx_fc, u32 rx_fc)
804 946
805 status = be_mbox_db_ring(ctrl); 947 status = be_mbox_db_ring(ctrl);
806 948
807 spin_unlock(&ctrl->cmd_lock); 949 spin_unlock(&ctrl->mbox_lock);
808 return status; 950 return status;
809} 951}
810 952
@@ -814,7 +956,7 @@ int be_cmd_get_flow_control(struct be_ctrl_info *ctrl, u32 *tx_fc, u32 *rx_fc)
814 struct be_cmd_req_get_flow_control *req = embedded_payload(wrb); 956 struct be_cmd_req_get_flow_control *req = embedded_payload(wrb);
815 int status; 957 int status;
816 958
817 spin_lock(&ctrl->cmd_lock); 959 spin_lock(&ctrl->mbox_lock);
818 960
819 memset(wrb, 0, sizeof(*wrb)); 961 memset(wrb, 0, sizeof(*wrb));
820 962
@@ -831,7 +973,7 @@ int be_cmd_get_flow_control(struct be_ctrl_info *ctrl, u32 *tx_fc, u32 *rx_fc)
831 *rx_fc = le16_to_cpu(resp->rx_flow_control); 973 *rx_fc = le16_to_cpu(resp->rx_flow_control);
832 } 974 }
833 975
834 spin_unlock(&ctrl->cmd_lock); 976 spin_unlock(&ctrl->mbox_lock);
835 return status; 977 return status;
836} 978}
837 979
@@ -841,7 +983,7 @@ int be_cmd_query_fw_cfg(struct be_ctrl_info *ctrl, u32 *port_num)
841 struct be_cmd_req_query_fw_cfg *req = embedded_payload(wrb); 983 struct be_cmd_req_query_fw_cfg *req = embedded_payload(wrb);
842 int status; 984 int status;
843 985
844 spin_lock(&ctrl->cmd_lock); 986 spin_lock(&ctrl->mbox_lock);
845 987
846 memset(wrb, 0, sizeof(*wrb)); 988 memset(wrb, 0, sizeof(*wrb));
847 989
@@ -856,6 +998,6 @@ int be_cmd_query_fw_cfg(struct be_ctrl_info *ctrl, u32 *port_num)
856 *port_num = le32_to_cpu(resp->phys_port); 998 *port_num = le32_to_cpu(resp->phys_port);
857 } 999 }
858 1000
859 spin_unlock(&ctrl->cmd_lock); 1001 spin_unlock(&ctrl->mbox_lock);
860 return status; 1002 return status;
861} 1003}