aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSathya Perla <sathyap@serverengines.com>2009-06-17 20:02:59 -0400
committerDavid S. Miller <davem@davemloft.net>2009-06-19 03:18:39 -0400
commit5fb379ee67a7ec55ff65b467b472f3d69b60ba16 (patch)
tree23d8c00fd535abb6f9d56e98ade838b8ed253e99
parente3453f6342110d60edb37be92c4a4f668ca8b0c4 (diff)
be2net: Add MCC queue mechanism for BE cmds
Currenlty all cmds use the blocking MCC mbox to post cmds. An mbox cmd is protected via a spin_lock(cmd_lock) and not spin_lock_bh() as it is undesirable to disable BHs while a blocking mbox cmd is in progress (and take long to finish.) This can lockup a cmd in progress in process context. Instead cmds that may be called in BH context must use the MCC queue to post cmds. The cmd completions are rcvd in a separate completion queue and the events are placed in the tx-event queue. Signed-off-by: Sathya Perla <sathyap@serverengines.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/benet/be.h95
-rw-r--r--drivers/net/benet/be_cmds.c258
-rw-r--r--drivers/net/benet/be_cmds.h40
-rw-r--r--drivers/net/benet/be_hw.h8
-rw-r--r--drivers/net/benet/be_main.c215
5 files changed, 455 insertions, 161 deletions
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index b4bb06fdf307..ef5be133ce68 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -65,7 +65,7 @@ static inline char *nic_name(struct pci_dev *pdev)
65#define TX_CQ_LEN 1024 65#define TX_CQ_LEN 1024
66#define RX_Q_LEN 1024 /* Does not support any other value */ 66#define RX_Q_LEN 1024 /* Does not support any other value */
67#define RX_CQ_LEN 1024 67#define RX_CQ_LEN 1024
68#define MCC_Q_LEN 64 /* total size not to exceed 8 pages */ 68#define MCC_Q_LEN 128 /* total size not to exceed 8 pages */
69#define MCC_CQ_LEN 256 69#define MCC_CQ_LEN 256
70 70
71#define BE_NAPI_WEIGHT 64 71#define BE_NAPI_WEIGHT 64
@@ -91,6 +91,61 @@ struct be_queue_info {
91 atomic_t used; /* Number of valid elements in the queue */ 91 atomic_t used; /* Number of valid elements in the queue */
92}; 92};
93 93
94static inline u32 MODULO(u16 val, u16 limit)
95{
96 BUG_ON(limit & (limit - 1));
97 return val & (limit - 1);
98}
99
100static inline void index_adv(u16 *index, u16 val, u16 limit)
101{
102 *index = MODULO((*index + val), limit);
103}
104
105static inline void index_inc(u16 *index, u16 limit)
106{
107 *index = MODULO((*index + 1), limit);
108}
109
110static inline void *queue_head_node(struct be_queue_info *q)
111{
112 return q->dma_mem.va + q->head * q->entry_size;
113}
114
115static inline void *queue_tail_node(struct be_queue_info *q)
116{
117 return q->dma_mem.va + q->tail * q->entry_size;
118}
119
120static inline void queue_head_inc(struct be_queue_info *q)
121{
122 index_inc(&q->head, q->len);
123}
124
125static inline void queue_tail_inc(struct be_queue_info *q)
126{
127 index_inc(&q->tail, q->len);
128}
129
130
131struct be_eq_obj {
132 struct be_queue_info q;
133 char desc[32];
134
135 /* Adaptive interrupt coalescing (AIC) info */
136 bool enable_aic;
137 u16 min_eqd; /* in usecs */
138 u16 max_eqd; /* in usecs */
139 u16 cur_eqd; /* in usecs */
140
141 struct napi_struct napi;
142};
143
144struct be_mcc_obj {
145 struct be_queue_info q;
146 struct be_queue_info cq;
147};
148
94struct be_ctrl_info { 149struct be_ctrl_info {
95 u8 __iomem *csr; 150 u8 __iomem *csr;
96 u8 __iomem *db; /* Door Bell */ 151 u8 __iomem *db; /* Door Bell */
@@ -98,11 +153,16 @@ struct be_ctrl_info {
98 int pci_func; 153 int pci_func;
99 154
100 /* Mbox used for cmd request/response */ 155 /* Mbox used for cmd request/response */
101 spinlock_t cmd_lock; /* For serializing cmds to BE card */ 156 spinlock_t mbox_lock; /* For serializing mbox cmds to BE card */
102 struct be_dma_mem mbox_mem; 157 struct be_dma_mem mbox_mem;
103 /* Mbox mem is adjusted to align to 16 bytes. The allocated addr 158 /* Mbox mem is adjusted to align to 16 bytes. The allocated addr
104 * is stored for freeing purpose */ 159 * is stored for freeing purpose */
105 struct be_dma_mem mbox_mem_alloced; 160 struct be_dma_mem mbox_mem_alloced;
161
162 /* MCC Rings */
163 struct be_mcc_obj mcc_obj;
164 spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
165 spinlock_t mcc_cq_lock;
106}; 166};
107 167
108#include "be_cmds.h" 168#include "be_cmds.h"
@@ -150,19 +210,6 @@ struct be_stats_obj {
150 struct be_dma_mem cmd; 210 struct be_dma_mem cmd;
151}; 211};
152 212
153struct be_eq_obj {
154 struct be_queue_info q;
155 char desc[32];
156
157 /* Adaptive interrupt coalescing (AIC) info */
158 bool enable_aic;
159 u16 min_eqd; /* in usecs */
160 u16 max_eqd; /* in usecs */
161 u16 cur_eqd; /* in usecs */
162
163 struct napi_struct napi;
164};
165
166struct be_tx_obj { 213struct be_tx_obj {
167 struct be_queue_info q; 214 struct be_queue_info q;
168 struct be_queue_info cq; 215 struct be_queue_info cq;
@@ -235,22 +282,6 @@ extern struct ethtool_ops be_ethtool_ops;
235 282
236#define BE_SET_NETDEV_OPS(netdev, ops) (netdev->netdev_ops = ops) 283#define BE_SET_NETDEV_OPS(netdev, ops) (netdev->netdev_ops = ops)
237 284
238static inline u32 MODULO(u16 val, u16 limit)
239{
240 BUG_ON(limit & (limit - 1));
241 return val & (limit - 1);
242}
243
244static inline void index_adv(u16 *index, u16 val, u16 limit)
245{
246 *index = MODULO((*index + val), limit);
247}
248
249static inline void index_inc(u16 *index, u16 limit)
250{
251 *index = MODULO((*index + 1), limit);
252}
253
254#define PAGE_SHIFT_4K 12 285#define PAGE_SHIFT_4K 12
255#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K) 286#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
256 287
@@ -339,4 +370,6 @@ static inline u8 is_udp_pkt(struct sk_buff *skb)
339 return val; 370 return val;
340} 371}
341 372
373extern void be_cq_notify(struct be_ctrl_info *ctrl, u16 qid, bool arm,
374 u16 num_popped);
342#endif /* BE_H */ 375#endif /* BE_H */
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index d444aed962bc..f1ec191f0c0d 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -17,6 +17,90 @@
17 17
18#include "be.h" 18#include "be.h"
19 19
20void be_mcc_notify(struct be_ctrl_info *ctrl)
21{
22 struct be_queue_info *mccq = &ctrl->mcc_obj.q;
23 u32 val = 0;
24
25 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
26 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
27 iowrite32(val, ctrl->db + DB_MCCQ_OFFSET);
28}
29
30/* To check if valid bit is set, check the entire word as we don't know
31 * the endianness of the data (old entry is host endian while a new entry is
32 * little endian) */
33static inline bool be_mcc_compl_is_new(struct be_mcc_cq_entry *compl)
34{
35 if (compl->flags != 0) {
36 compl->flags = le32_to_cpu(compl->flags);
37 BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
38 return true;
39 } else {
40 return false;
41 }
42}
43
44/* Need to reset the entire word that houses the valid bit */
45static inline void be_mcc_compl_use(struct be_mcc_cq_entry *compl)
46{
47 compl->flags = 0;
48}
49
50static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
51 struct be_mcc_cq_entry *compl)
52{
53 u16 compl_status, extd_status;
54
55 /* Just swap the status to host endian; mcc tag is opaquely copied
56 * from mcc_wrb */
57 be_dws_le_to_cpu(compl, 4);
58
59 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
60 CQE_STATUS_COMPL_MASK;
61 if (compl_status != MCC_STATUS_SUCCESS) {
62 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
63 CQE_STATUS_EXTD_MASK;
64 printk(KERN_WARNING DRV_NAME
65 " error in cmd completion: status(compl/extd)=%d/%d\n",
66 compl_status, extd_status);
67 return -1;
68 }
69 return 0;
70}
71
72
73static struct be_mcc_cq_entry *be_mcc_compl_get(struct be_ctrl_info *ctrl)
74{
75 struct be_queue_info *mcc_cq = &ctrl->mcc_obj.cq;
76 struct be_mcc_cq_entry *compl = queue_tail_node(mcc_cq);
77
78 if (be_mcc_compl_is_new(compl)) {
79 queue_tail_inc(mcc_cq);
80 return compl;
81 }
82 return NULL;
83}
84
85void be_process_mcc(struct be_ctrl_info *ctrl)
86{
87 struct be_mcc_cq_entry *compl;
88 int num = 0;
89
90 spin_lock_bh(&ctrl->mcc_cq_lock);
91 while ((compl = be_mcc_compl_get(ctrl))) {
92 if (!(compl->flags & CQE_FLAGS_ASYNC_MASK)) {
93 be_mcc_compl_process(ctrl, compl);
94 atomic_dec(&ctrl->mcc_obj.q.used);
95 }
96 be_mcc_compl_use(compl);
97 num++;
98 }
99 if (num)
100 be_cq_notify(ctrl, ctrl->mcc_obj.cq.id, true, num);
101 spin_unlock_bh(&ctrl->mcc_cq_lock);
102}
103
20static int be_mbox_db_ready_wait(void __iomem *db) 104static int be_mbox_db_ready_wait(void __iomem *db)
21{ 105{
22 int cnt = 0, wait = 5; 106 int cnt = 0, wait = 5;
@@ -44,11 +128,11 @@ static int be_mbox_db_ready_wait(void __iomem *db)
44 128
45/* 129/*
46 * Insert the mailbox address into the doorbell in two steps 130 * Insert the mailbox address into the doorbell in two steps
131 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
47 */ 132 */
48static int be_mbox_db_ring(struct be_ctrl_info *ctrl) 133static int be_mbox_db_ring(struct be_ctrl_info *ctrl)
49{ 134{
50 int status; 135 int status;
51 u16 compl_status, extd_status;
52 u32 val = 0; 136 u32 val = 0;
53 void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET; 137 void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
54 struct be_dma_mem *mbox_mem = &ctrl->mbox_mem; 138 struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
@@ -79,24 +163,17 @@ static int be_mbox_db_ring(struct be_ctrl_info *ctrl)
79 if (status != 0) 163 if (status != 0)
80 return status; 164 return status;
81 165
82 /* compl entry has been made now */ 166 /* A cq entry has been made now */
83 be_dws_le_to_cpu(cqe, sizeof(*cqe)); 167 if (be_mcc_compl_is_new(cqe)) {
84 if (!(cqe->flags & CQE_FLAGS_VALID_MASK)) { 168 status = be_mcc_compl_process(ctrl, &mbox->cqe);
85 printk(KERN_WARNING DRV_NAME ": ERROR invalid mbox compl\n"); 169 be_mcc_compl_use(cqe);
170 if (status)
171 return status;
172 } else {
173 printk(KERN_WARNING DRV_NAME "invalid mailbox completion\n");
86 return -1; 174 return -1;
87 } 175 }
88 176 return 0;
89 compl_status = (cqe->status >> CQE_STATUS_COMPL_SHIFT) &
90 CQE_STATUS_COMPL_MASK;
91 if (compl_status != MCC_STATUS_SUCCESS) {
92 extd_status = (cqe->status >> CQE_STATUS_EXTD_SHIFT) &
93 CQE_STATUS_EXTD_MASK;
94 printk(KERN_WARNING DRV_NAME
95 ": ERROR in cmd compl. status(compl/extd)=%d/%d\n",
96 compl_status, extd_status);
97 }
98
99 return compl_status;
100} 177}
101 178
102static int be_POST_stage_get(struct be_ctrl_info *ctrl, u16 *stage) 179static int be_POST_stage_get(struct be_ctrl_info *ctrl, u16 *stage)
@@ -235,6 +312,18 @@ static inline struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
235 return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb; 312 return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
236} 313}
237 314
315static inline struct be_mcc_wrb *wrb_from_mcc(struct be_queue_info *mccq)
316{
317 struct be_mcc_wrb *wrb = NULL;
318 if (atomic_read(&mccq->used) < mccq->len) {
319 wrb = queue_head_node(mccq);
320 queue_head_inc(mccq);
321 atomic_inc(&mccq->used);
322 memset(wrb, 0, sizeof(*wrb));
323 }
324 return wrb;
325}
326
238int be_cmd_eq_create(struct be_ctrl_info *ctrl, 327int be_cmd_eq_create(struct be_ctrl_info *ctrl,
239 struct be_queue_info *eq, int eq_delay) 328 struct be_queue_info *eq, int eq_delay)
240{ 329{
@@ -244,7 +333,7 @@ int be_cmd_eq_create(struct be_ctrl_info *ctrl,
244 struct be_dma_mem *q_mem = &eq->dma_mem; 333 struct be_dma_mem *q_mem = &eq->dma_mem;
245 int status; 334 int status;
246 335
247 spin_lock(&ctrl->cmd_lock); 336 spin_lock(&ctrl->mbox_lock);
248 memset(wrb, 0, sizeof(*wrb)); 337 memset(wrb, 0, sizeof(*wrb));
249 338
250 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 339 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -272,7 +361,7 @@ int be_cmd_eq_create(struct be_ctrl_info *ctrl,
272 eq->id = le16_to_cpu(resp->eq_id); 361 eq->id = le16_to_cpu(resp->eq_id);
273 eq->created = true; 362 eq->created = true;
274 } 363 }
275 spin_unlock(&ctrl->cmd_lock); 364 spin_unlock(&ctrl->mbox_lock);
276 return status; 365 return status;
277} 366}
278 367
@@ -284,7 +373,7 @@ int be_cmd_mac_addr_query(struct be_ctrl_info *ctrl, u8 *mac_addr,
284 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb); 373 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
285 int status; 374 int status;
286 375
287 spin_lock(&ctrl->cmd_lock); 376 spin_lock(&ctrl->mbox_lock);
288 memset(wrb, 0, sizeof(*wrb)); 377 memset(wrb, 0, sizeof(*wrb));
289 378
290 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 379 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -304,7 +393,7 @@ int be_cmd_mac_addr_query(struct be_ctrl_info *ctrl, u8 *mac_addr,
304 if (!status) 393 if (!status)
305 memcpy(mac_addr, resp->mac.addr, ETH_ALEN); 394 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
306 395
307 spin_unlock(&ctrl->cmd_lock); 396 spin_unlock(&ctrl->mbox_lock);
308 return status; 397 return status;
309} 398}
310 399
@@ -315,7 +404,7 @@ int be_cmd_pmac_add(struct be_ctrl_info *ctrl, u8 *mac_addr,
315 struct be_cmd_req_pmac_add *req = embedded_payload(wrb); 404 struct be_cmd_req_pmac_add *req = embedded_payload(wrb);
316 int status; 405 int status;
317 406
318 spin_lock(&ctrl->cmd_lock); 407 spin_lock(&ctrl->mbox_lock);
319 memset(wrb, 0, sizeof(*wrb)); 408 memset(wrb, 0, sizeof(*wrb));
320 409
321 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 410 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -332,7 +421,7 @@ int be_cmd_pmac_add(struct be_ctrl_info *ctrl, u8 *mac_addr,
332 *pmac_id = le32_to_cpu(resp->pmac_id); 421 *pmac_id = le32_to_cpu(resp->pmac_id);
333 } 422 }
334 423
335 spin_unlock(&ctrl->cmd_lock); 424 spin_unlock(&ctrl->mbox_lock);
336 return status; 425 return status;
337} 426}
338 427
@@ -342,7 +431,7 @@ int be_cmd_pmac_del(struct be_ctrl_info *ctrl, u32 if_id, u32 pmac_id)
342 struct be_cmd_req_pmac_del *req = embedded_payload(wrb); 431 struct be_cmd_req_pmac_del *req = embedded_payload(wrb);
343 int status; 432 int status;
344 433
345 spin_lock(&ctrl->cmd_lock); 434 spin_lock(&ctrl->mbox_lock);
346 memset(wrb, 0, sizeof(*wrb)); 435 memset(wrb, 0, sizeof(*wrb));
347 436
348 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 437 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -354,7 +443,7 @@ int be_cmd_pmac_del(struct be_ctrl_info *ctrl, u32 if_id, u32 pmac_id)
354 req->pmac_id = cpu_to_le32(pmac_id); 443 req->pmac_id = cpu_to_le32(pmac_id);
355 444
356 status = be_mbox_db_ring(ctrl); 445 status = be_mbox_db_ring(ctrl);
357 spin_unlock(&ctrl->cmd_lock); 446 spin_unlock(&ctrl->mbox_lock);
358 447
359 return status; 448 return status;
360} 449}
@@ -370,7 +459,7 @@ int be_cmd_cq_create(struct be_ctrl_info *ctrl,
370 void *ctxt = &req->context; 459 void *ctxt = &req->context;
371 int status; 460 int status;
372 461
373 spin_lock(&ctrl->cmd_lock); 462 spin_lock(&ctrl->mbox_lock);
374 memset(wrb, 0, sizeof(*wrb)); 463 memset(wrb, 0, sizeof(*wrb));
375 464
376 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 465 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -388,7 +477,7 @@ int be_cmd_cq_create(struct be_ctrl_info *ctrl,
388 AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts); 477 AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
389 AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1); 478 AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
390 AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id); 479 AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
391 AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 0); 480 AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
392 AMAP_SET_BITS(struct amap_cq_context, func, ctxt, ctrl->pci_func); 481 AMAP_SET_BITS(struct amap_cq_context, func, ctxt, ctrl->pci_func);
393 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 482 be_dws_cpu_to_le(ctxt, sizeof(req->context));
394 483
@@ -399,7 +488,56 @@ int be_cmd_cq_create(struct be_ctrl_info *ctrl,
399 cq->id = le16_to_cpu(resp->cq_id); 488 cq->id = le16_to_cpu(resp->cq_id);
400 cq->created = true; 489 cq->created = true;
401 } 490 }
402 spin_unlock(&ctrl->cmd_lock); 491 spin_unlock(&ctrl->mbox_lock);
492
493 return status;
494}
495
496static u32 be_encoded_q_len(int q_len)
497{
498 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
499 if (len_encoded == 16)
500 len_encoded = 0;
501 return len_encoded;
502}
503
504int be_cmd_mccq_create(struct be_ctrl_info *ctrl,
505 struct be_queue_info *mccq,
506 struct be_queue_info *cq)
507{
508 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
509 struct be_cmd_req_mcc_create *req = embedded_payload(wrb);
510 struct be_dma_mem *q_mem = &mccq->dma_mem;
511 void *ctxt = &req->context;
512 int status;
513
514 spin_lock(&ctrl->mbox_lock);
515 memset(wrb, 0, sizeof(*wrb));
516
517 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
518
519 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
520 OPCODE_COMMON_MCC_CREATE, sizeof(*req));
521
522 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
523
524 AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt, ctrl->pci_func);
525 AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
526 AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
527 be_encoded_q_len(mccq->len));
528 AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
529
530 be_dws_cpu_to_le(ctxt, sizeof(req->context));
531
532 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
533
534 status = be_mbox_db_ring(ctrl);
535 if (!status) {
536 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
537 mccq->id = le16_to_cpu(resp->id);
538 mccq->created = true;
539 }
540 spin_unlock(&ctrl->mbox_lock);
403 541
404 return status; 542 return status;
405} 543}
@@ -415,7 +553,7 @@ int be_cmd_txq_create(struct be_ctrl_info *ctrl,
415 int status; 553 int status;
416 u32 len_encoded; 554 u32 len_encoded;
417 555
418 spin_lock(&ctrl->cmd_lock); 556 spin_lock(&ctrl->mbox_lock);
419 memset(wrb, 0, sizeof(*wrb)); 557 memset(wrb, 0, sizeof(*wrb));
420 558
421 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 559 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -446,7 +584,7 @@ int be_cmd_txq_create(struct be_ctrl_info *ctrl,
446 txq->id = le16_to_cpu(resp->cid); 584 txq->id = le16_to_cpu(resp->cid);
447 txq->created = true; 585 txq->created = true;
448 } 586 }
449 spin_unlock(&ctrl->cmd_lock); 587 spin_unlock(&ctrl->mbox_lock);
450 588
451 return status; 589 return status;
452} 590}
@@ -460,7 +598,7 @@ int be_cmd_rxq_create(struct be_ctrl_info *ctrl,
460 struct be_dma_mem *q_mem = &rxq->dma_mem; 598 struct be_dma_mem *q_mem = &rxq->dma_mem;
461 int status; 599 int status;
462 600
463 spin_lock(&ctrl->cmd_lock); 601 spin_lock(&ctrl->mbox_lock);
464 memset(wrb, 0, sizeof(*wrb)); 602 memset(wrb, 0, sizeof(*wrb));
465 603
466 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 604 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -482,7 +620,7 @@ int be_cmd_rxq_create(struct be_ctrl_info *ctrl,
482 rxq->id = le16_to_cpu(resp->id); 620 rxq->id = le16_to_cpu(resp->id);
483 rxq->created = true; 621 rxq->created = true;
484 } 622 }
485 spin_unlock(&ctrl->cmd_lock); 623 spin_unlock(&ctrl->mbox_lock);
486 624
487 return status; 625 return status;
488} 626}
@@ -496,7 +634,7 @@ int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
496 u8 subsys = 0, opcode = 0; 634 u8 subsys = 0, opcode = 0;
497 int status; 635 int status;
498 636
499 spin_lock(&ctrl->cmd_lock); 637 spin_lock(&ctrl->mbox_lock);
500 638
501 memset(wrb, 0, sizeof(*wrb)); 639 memset(wrb, 0, sizeof(*wrb));
502 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 640 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -518,6 +656,10 @@ int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
518 subsys = CMD_SUBSYSTEM_ETH; 656 subsys = CMD_SUBSYSTEM_ETH;
519 opcode = OPCODE_ETH_RX_DESTROY; 657 opcode = OPCODE_ETH_RX_DESTROY;
520 break; 658 break;
659 case QTYPE_MCCQ:
660 subsys = CMD_SUBSYSTEM_COMMON;
661 opcode = OPCODE_COMMON_MCC_DESTROY;
662 break;
521 default: 663 default:
522 printk(KERN_WARNING DRV_NAME ":bad Q type in Q destroy cmd\n"); 664 printk(KERN_WARNING DRV_NAME ":bad Q type in Q destroy cmd\n");
523 status = -1; 665 status = -1;
@@ -528,7 +670,7 @@ int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
528 670
529 status = be_mbox_db_ring(ctrl); 671 status = be_mbox_db_ring(ctrl);
530err: 672err:
531 spin_unlock(&ctrl->cmd_lock); 673 spin_unlock(&ctrl->mbox_lock);
532 674
533 return status; 675 return status;
534} 676}
@@ -541,7 +683,7 @@ int be_cmd_if_create(struct be_ctrl_info *ctrl, u32 flags, u8 *mac,
541 struct be_cmd_req_if_create *req = embedded_payload(wrb); 683 struct be_cmd_req_if_create *req = embedded_payload(wrb);
542 int status; 684 int status;
543 685
544 spin_lock(&ctrl->cmd_lock); 686 spin_lock(&ctrl->mbox_lock);
545 memset(wrb, 0, sizeof(*wrb)); 687 memset(wrb, 0, sizeof(*wrb));
546 688
547 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 689 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -562,7 +704,7 @@ int be_cmd_if_create(struct be_ctrl_info *ctrl, u32 flags, u8 *mac,
562 *pmac_id = le32_to_cpu(resp->pmac_id); 704 *pmac_id = le32_to_cpu(resp->pmac_id);
563 } 705 }
564 706
565 spin_unlock(&ctrl->cmd_lock); 707 spin_unlock(&ctrl->mbox_lock);
566 return status; 708 return status;
567} 709}
568 710
@@ -572,7 +714,7 @@ int be_cmd_if_destroy(struct be_ctrl_info *ctrl, u32 interface_id)
572 struct be_cmd_req_if_destroy *req = embedded_payload(wrb); 714 struct be_cmd_req_if_destroy *req = embedded_payload(wrb);
573 int status; 715 int status;
574 716
575 spin_lock(&ctrl->cmd_lock); 717 spin_lock(&ctrl->mbox_lock);
576 memset(wrb, 0, sizeof(*wrb)); 718 memset(wrb, 0, sizeof(*wrb));
577 719
578 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 720 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -583,7 +725,7 @@ int be_cmd_if_destroy(struct be_ctrl_info *ctrl, u32 interface_id)
583 req->interface_id = cpu_to_le32(interface_id); 725 req->interface_id = cpu_to_le32(interface_id);
584 status = be_mbox_db_ring(ctrl); 726 status = be_mbox_db_ring(ctrl);
585 727
586 spin_unlock(&ctrl->cmd_lock); 728 spin_unlock(&ctrl->mbox_lock);
587 729
588 return status; 730 return status;
589} 731}
@@ -598,7 +740,7 @@ int be_cmd_get_stats(struct be_ctrl_info *ctrl, struct be_dma_mem *nonemb_cmd)
598 struct be_sge *sge = nonembedded_sgl(wrb); 740 struct be_sge *sge = nonembedded_sgl(wrb);
599 int status; 741 int status;
600 742
601 spin_lock(&ctrl->cmd_lock); 743 spin_lock(&ctrl->mbox_lock);
602 memset(wrb, 0, sizeof(*wrb)); 744 memset(wrb, 0, sizeof(*wrb));
603 745
604 memset(req, 0, sizeof(*req)); 746 memset(req, 0, sizeof(*req));
@@ -617,7 +759,7 @@ int be_cmd_get_stats(struct be_ctrl_info *ctrl, struct be_dma_mem *nonemb_cmd)
617 be_dws_le_to_cpu(&resp->hw_stats, sizeof(resp->hw_stats)); 759 be_dws_le_to_cpu(&resp->hw_stats, sizeof(resp->hw_stats));
618 } 760 }
619 761
620 spin_unlock(&ctrl->cmd_lock); 762 spin_unlock(&ctrl->mbox_lock);
621 return status; 763 return status;
622} 764}
623 765
@@ -628,7 +770,7 @@ int be_cmd_link_status_query(struct be_ctrl_info *ctrl,
628 struct be_cmd_req_link_status *req = embedded_payload(wrb); 770 struct be_cmd_req_link_status *req = embedded_payload(wrb);
629 int status; 771 int status;
630 772
631 spin_lock(&ctrl->cmd_lock); 773 spin_lock(&ctrl->mbox_lock);
632 memset(wrb, 0, sizeof(*wrb)); 774 memset(wrb, 0, sizeof(*wrb));
633 775
634 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 776 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -646,7 +788,7 @@ int be_cmd_link_status_query(struct be_ctrl_info *ctrl,
646 link->speed = PHY_LINK_SPEED_ZERO; 788 link->speed = PHY_LINK_SPEED_ZERO;
647 } 789 }
648 790
649 spin_unlock(&ctrl->cmd_lock); 791 spin_unlock(&ctrl->mbox_lock);
650 return status; 792 return status;
651} 793}
652 794
@@ -656,7 +798,7 @@ int be_cmd_get_fw_ver(struct be_ctrl_info *ctrl, char *fw_ver)
656 struct be_cmd_req_get_fw_version *req = embedded_payload(wrb); 798 struct be_cmd_req_get_fw_version *req = embedded_payload(wrb);
657 int status; 799 int status;
658 800
659 spin_lock(&ctrl->cmd_lock); 801 spin_lock(&ctrl->mbox_lock);
660 memset(wrb, 0, sizeof(*wrb)); 802 memset(wrb, 0, sizeof(*wrb));
661 803
662 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 804 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -670,7 +812,7 @@ int be_cmd_get_fw_ver(struct be_ctrl_info *ctrl, char *fw_ver)
670 strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN); 812 strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
671 } 813 }
672 814
673 spin_unlock(&ctrl->cmd_lock); 815 spin_unlock(&ctrl->mbox_lock);
674 return status; 816 return status;
675} 817}
676 818
@@ -681,7 +823,7 @@ int be_cmd_modify_eqd(struct be_ctrl_info *ctrl, u32 eq_id, u32 eqd)
681 struct be_cmd_req_modify_eq_delay *req = embedded_payload(wrb); 823 struct be_cmd_req_modify_eq_delay *req = embedded_payload(wrb);
682 int status; 824 int status;
683 825
684 spin_lock(&ctrl->cmd_lock); 826 spin_lock(&ctrl->mbox_lock);
685 memset(wrb, 0, sizeof(*wrb)); 827 memset(wrb, 0, sizeof(*wrb));
686 828
687 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 829 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -696,7 +838,7 @@ int be_cmd_modify_eqd(struct be_ctrl_info *ctrl, u32 eq_id, u32 eqd)
696 838
697 status = be_mbox_db_ring(ctrl); 839 status = be_mbox_db_ring(ctrl);
698 840
699 spin_unlock(&ctrl->cmd_lock); 841 spin_unlock(&ctrl->mbox_lock);
700 return status; 842 return status;
701} 843}
702 844
@@ -707,7 +849,7 @@ int be_cmd_vlan_config(struct be_ctrl_info *ctrl, u32 if_id, u16 *vtag_array,
707 struct be_cmd_req_vlan_config *req = embedded_payload(wrb); 849 struct be_cmd_req_vlan_config *req = embedded_payload(wrb);
708 int status; 850 int status;
709 851
710 spin_lock(&ctrl->cmd_lock); 852 spin_lock(&ctrl->mbox_lock);
711 memset(wrb, 0, sizeof(*wrb)); 853 memset(wrb, 0, sizeof(*wrb));
712 854
713 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 855 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -726,7 +868,7 @@ int be_cmd_vlan_config(struct be_ctrl_info *ctrl, u32 if_id, u16 *vtag_array,
726 868
727 status = be_mbox_db_ring(ctrl); 869 status = be_mbox_db_ring(ctrl);
728 870
729 spin_unlock(&ctrl->cmd_lock); 871 spin_unlock(&ctrl->mbox_lock);
730 return status; 872 return status;
731} 873}
732 874
@@ -736,7 +878,7 @@ int be_cmd_promiscuous_config(struct be_ctrl_info *ctrl, u8 port_num, bool en)
736 struct be_cmd_req_promiscuous_config *req = embedded_payload(wrb); 878 struct be_cmd_req_promiscuous_config *req = embedded_payload(wrb);
737 int status; 879 int status;
738 880
739 spin_lock(&ctrl->cmd_lock); 881 spin_lock(&ctrl->mbox_lock);
740 memset(wrb, 0, sizeof(*wrb)); 882 memset(wrb, 0, sizeof(*wrb));
741 883
742 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 884 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -751,7 +893,7 @@ int be_cmd_promiscuous_config(struct be_ctrl_info *ctrl, u8 port_num, bool en)
751 893
752 status = be_mbox_db_ring(ctrl); 894 status = be_mbox_db_ring(ctrl);
753 895
754 spin_unlock(&ctrl->cmd_lock); 896 spin_unlock(&ctrl->mbox_lock);
755 return status; 897 return status;
756} 898}
757 899
@@ -762,7 +904,7 @@ int be_cmd_mcast_mac_set(struct be_ctrl_info *ctrl, u32 if_id, u8 *mac_table,
762 struct be_cmd_req_mcast_mac_config *req = embedded_payload(wrb); 904 struct be_cmd_req_mcast_mac_config *req = embedded_payload(wrb);
763 int status; 905 int status;
764 906
765 spin_lock(&ctrl->cmd_lock); 907 spin_lock(&ctrl->mbox_lock);
766 memset(wrb, 0, sizeof(*wrb)); 908 memset(wrb, 0, sizeof(*wrb));
767 909
768 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 910 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -780,7 +922,7 @@ int be_cmd_mcast_mac_set(struct be_ctrl_info *ctrl, u32 if_id, u8 *mac_table,
780 922
781 status = be_mbox_db_ring(ctrl); 923 status = be_mbox_db_ring(ctrl);
782 924
783 spin_unlock(&ctrl->cmd_lock); 925 spin_unlock(&ctrl->mbox_lock);
784 return status; 926 return status;
785} 927}
786 928
@@ -790,7 +932,7 @@ int be_cmd_set_flow_control(struct be_ctrl_info *ctrl, u32 tx_fc, u32 rx_fc)
790 struct be_cmd_req_set_flow_control *req = embedded_payload(wrb); 932 struct be_cmd_req_set_flow_control *req = embedded_payload(wrb);
791 int status; 933 int status;
792 934
793 spin_lock(&ctrl->cmd_lock); 935 spin_lock(&ctrl->mbox_lock);
794 936
795 memset(wrb, 0, sizeof(*wrb)); 937 memset(wrb, 0, sizeof(*wrb));
796 938
@@ -804,7 +946,7 @@ int be_cmd_set_flow_control(struct be_ctrl_info *ctrl, u32 tx_fc, u32 rx_fc)
804 946
805 status = be_mbox_db_ring(ctrl); 947 status = be_mbox_db_ring(ctrl);
806 948
807 spin_unlock(&ctrl->cmd_lock); 949 spin_unlock(&ctrl->mbox_lock);
808 return status; 950 return status;
809} 951}
810 952
@@ -814,7 +956,7 @@ int be_cmd_get_flow_control(struct be_ctrl_info *ctrl, u32 *tx_fc, u32 *rx_fc)
814 struct be_cmd_req_get_flow_control *req = embedded_payload(wrb); 956 struct be_cmd_req_get_flow_control *req = embedded_payload(wrb);
815 int status; 957 int status;
816 958
817 spin_lock(&ctrl->cmd_lock); 959 spin_lock(&ctrl->mbox_lock);
818 960
819 memset(wrb, 0, sizeof(*wrb)); 961 memset(wrb, 0, sizeof(*wrb));
820 962
@@ -831,7 +973,7 @@ int be_cmd_get_flow_control(struct be_ctrl_info *ctrl, u32 *tx_fc, u32 *rx_fc)
831 *rx_fc = le16_to_cpu(resp->rx_flow_control); 973 *rx_fc = le16_to_cpu(resp->rx_flow_control);
832 } 974 }
833 975
834 spin_unlock(&ctrl->cmd_lock); 976 spin_unlock(&ctrl->mbox_lock);
835 return status; 977 return status;
836} 978}
837 979
@@ -841,7 +983,7 @@ int be_cmd_query_fw_cfg(struct be_ctrl_info *ctrl, u32 *port_num)
841 struct be_cmd_req_query_fw_cfg *req = embedded_payload(wrb); 983 struct be_cmd_req_query_fw_cfg *req = embedded_payload(wrb);
842 int status; 984 int status;
843 985
844 spin_lock(&ctrl->cmd_lock); 986 spin_lock(&ctrl->mbox_lock);
845 987
846 memset(wrb, 0, sizeof(*wrb)); 988 memset(wrb, 0, sizeof(*wrb));
847 989
@@ -856,6 +998,6 @@ int be_cmd_query_fw_cfg(struct be_ctrl_info *ctrl, u32 *port_num)
856 *port_num = le32_to_cpu(resp->phys_port); 998 *port_num = le32_to_cpu(resp->phys_port);
857 } 999 }
858 1000
859 spin_unlock(&ctrl->cmd_lock); 1001 spin_unlock(&ctrl->mbox_lock);
860 return status; 1002 return status;
861} 1003}
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index e499e2d5b8c3..0a9189defc2a 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -101,6 +101,7 @@ struct be_mcc_mailbox {
101#define OPCODE_COMMON_FIRMWARE_CONFIG 42 101#define OPCODE_COMMON_FIRMWARE_CONFIG 42
102#define OPCODE_COMMON_NTWK_INTERFACE_CREATE 50 102#define OPCODE_COMMON_NTWK_INTERFACE_CREATE 50
103#define OPCODE_COMMON_NTWK_INTERFACE_DESTROY 51 103#define OPCODE_COMMON_NTWK_INTERFACE_DESTROY 51
104#define OPCODE_COMMON_MCC_DESTROY 53
104#define OPCODE_COMMON_CQ_DESTROY 54 105#define OPCODE_COMMON_CQ_DESTROY 54
105#define OPCODE_COMMON_EQ_DESTROY 55 106#define OPCODE_COMMON_EQ_DESTROY 55
106#define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58 107#define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58
@@ -269,6 +270,38 @@ struct be_cmd_resp_cq_create {
269 u16 rsvd0; 270 u16 rsvd0;
270} __packed; 271} __packed;
271 272
273/******************** Create MCCQ ***************************/
274/* Pseudo amap definition in which each bit of the actual structure is defined
275 * as a byte: used to calculate offset/shift/mask of each field */
276struct amap_mcc_context {
277 u8 con_index[14];
278 u8 rsvd0[2];
279 u8 ring_size[4];
280 u8 fetch_wrb;
281 u8 fetch_r2t;
282 u8 cq_id[10];
283 u8 prod_index[14];
284 u8 fid[8];
285 u8 pdid[9];
286 u8 valid;
287 u8 rsvd1[32];
288 u8 rsvd2[32];
289} __packed;
290
291struct be_cmd_req_mcc_create {
292 struct be_cmd_req_hdr hdr;
293 u16 num_pages;
294 u16 rsvd0;
295 u8 context[sizeof(struct amap_mcc_context) / 8];
296 struct phys_addr pages[8];
297} __packed;
298
299struct be_cmd_resp_mcc_create {
300 struct be_cmd_resp_hdr hdr;
301 u16 id;
302 u16 rsvd0;
303} __packed;
304
272/******************** Create TxQ ***************************/ 305/******************** Create TxQ ***************************/
273#define BE_ETH_TX_RING_TYPE_STANDARD 2 306#define BE_ETH_TX_RING_TYPE_STANDARD 2
274#define BE_ULP1_NUM 1 307#define BE_ULP1_NUM 1
@@ -341,7 +374,8 @@ enum {
341 QTYPE_EQ = 1, 374 QTYPE_EQ = 1,
342 QTYPE_CQ, 375 QTYPE_CQ,
343 QTYPE_TXQ, 376 QTYPE_TXQ,
344 QTYPE_RXQ 377 QTYPE_RXQ,
378 QTYPE_MCCQ
345}; 379};
346 380
347struct be_cmd_req_q_destroy { 381struct be_cmd_req_q_destroy {
@@ -657,6 +691,9 @@ extern int be_cmd_cq_create(struct be_ctrl_info *ctrl,
657 struct be_queue_info *cq, struct be_queue_info *eq, 691 struct be_queue_info *cq, struct be_queue_info *eq,
658 bool sol_evts, bool no_delay, 692 bool sol_evts, bool no_delay,
659 int num_cqe_dma_coalesce); 693 int num_cqe_dma_coalesce);
694extern int be_cmd_mccq_create(struct be_ctrl_info *ctrl,
695 struct be_queue_info *mccq,
696 struct be_queue_info *cq);
660extern int be_cmd_txq_create(struct be_ctrl_info *ctrl, 697extern int be_cmd_txq_create(struct be_ctrl_info *ctrl,
661 struct be_queue_info *txq, 698 struct be_queue_info *txq,
662 struct be_queue_info *cq); 699 struct be_queue_info *cq);
@@ -686,3 +723,4 @@ extern int be_cmd_set_flow_control(struct be_ctrl_info *ctrl,
686extern int be_cmd_get_flow_control(struct be_ctrl_info *ctrl, 723extern int be_cmd_get_flow_control(struct be_ctrl_info *ctrl,
687 u32 *tx_fc, u32 *rx_fc); 724 u32 *tx_fc, u32 *rx_fc);
688extern int be_cmd_query_fw_cfg(struct be_ctrl_info *ctrl, u32 *port_num); 725extern int be_cmd_query_fw_cfg(struct be_ctrl_info *ctrl, u32 *port_num);
726extern void be_process_mcc(struct be_ctrl_info *ctrl);
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index b132aa4893ca..b02e805c1db3 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -61,7 +61,7 @@
61/* Clear the interrupt for this eq */ 61/* Clear the interrupt for this eq */
62#define DB_EQ_CLR_SHIFT (9) /* bit 9 */ 62#define DB_EQ_CLR_SHIFT (9) /* bit 9 */
63/* Must be 1 */ 63/* Must be 1 */
64#define DB_EQ_EVNT_SHIFT (10) /* bit 10 */ 64#define DB_EQ_EVNT_SHIFT (10) /* bit 10 */
65/* Number of event entries processed */ 65/* Number of event entries processed */
66#define DB_EQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */ 66#define DB_EQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
67/* Rearm bit */ 67/* Rearm bit */
@@ -88,6 +88,12 @@
88/* Number of rx frags posted */ 88/* Number of rx frags posted */
89#define DB_RQ_NUM_POSTED_SHIFT (24) /* bits 24 - 31 */ 89#define DB_RQ_NUM_POSTED_SHIFT (24) /* bits 24 - 31 */
90 90
91/********** MCC door bell ************/
92#define DB_MCCQ_OFFSET 0x140
93#define DB_MCCQ_RING_ID_MASK 0x7FF /* bits 0 - 10 */
94/* Number of entries posted */
95#define DB_MCCQ_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */
96
91/* 97/*
92 * BE descriptors: host memory data structures whose formats 98 * BE descriptors: host memory data structures whose formats
93 * are hardwired in BE silicon. 99 * are hardwired in BE silicon.
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 66bb56874d9b..a4ce80e776b6 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -60,26 +60,6 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
60 return 0; 60 return 0;
61} 61}
62 62
63static inline void *queue_head_node(struct be_queue_info *q)
64{
65 return q->dma_mem.va + q->head * q->entry_size;
66}
67
68static inline void *queue_tail_node(struct be_queue_info *q)
69{
70 return q->dma_mem.va + q->tail * q->entry_size;
71}
72
73static inline void queue_head_inc(struct be_queue_info *q)
74{
75 index_inc(&q->head, q->len);
76}
77
78static inline void queue_tail_inc(struct be_queue_info *q)
79{
80 index_inc(&q->tail, q->len);
81}
82
83static void be_intr_set(struct be_ctrl_info *ctrl, bool enable) 63static void be_intr_set(struct be_ctrl_info *ctrl, bool enable)
84{ 64{
85 u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET; 65 u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
@@ -127,7 +107,7 @@ static void be_eq_notify(struct be_ctrl_info *ctrl, u16 qid,
127 iowrite32(val, ctrl->db + DB_EQ_OFFSET); 107 iowrite32(val, ctrl->db + DB_EQ_OFFSET);
128} 108}
129 109
130static void be_cq_notify(struct be_ctrl_info *ctrl, u16 qid, 110void be_cq_notify(struct be_ctrl_info *ctrl, u16 qid,
131 bool arm, u16 num_popped) 111 bool arm, u16 num_popped)
132{ 112{
133 u32 val = 0; 113 u32 val = 0;
@@ -960,10 +940,8 @@ static void be_post_rx_frags(struct be_adapter *adapter)
960 return; 940 return;
961} 941}
962 942
963static struct be_eth_tx_compl * 943static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
964be_tx_compl_get(struct be_adapter *adapter)
965{ 944{
966 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
967 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq); 945 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
968 946
969 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0) 947 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
@@ -1051,6 +1029,59 @@ static void be_tx_q_clean(struct be_adapter *adapter)
1051 } 1029 }
1052} 1030}
1053 1031
1032static void be_mcc_queues_destroy(struct be_adapter *adapter)
1033{
1034 struct be_queue_info *q;
1035 struct be_ctrl_info *ctrl = &adapter->ctrl;
1036
1037 q = &ctrl->mcc_obj.q;
1038 if (q->created)
1039 be_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
1040 be_queue_free(adapter, q);
1041
1042 q = &ctrl->mcc_obj.cq;
1043 if (q->created)
1044 be_cmd_q_destroy(ctrl, q, QTYPE_CQ);
1045 be_queue_free(adapter, q);
1046}
1047
1048/* Must be called only after TX qs are created as MCC shares TX EQ */
1049static int be_mcc_queues_create(struct be_adapter *adapter)
1050{
1051 struct be_queue_info *q, *cq;
1052 struct be_ctrl_info *ctrl = &adapter->ctrl;
1053
1054 /* Alloc MCC compl queue */
1055 cq = &ctrl->mcc_obj.cq;
1056 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1057 sizeof(struct be_mcc_cq_entry)))
1058 goto err;
1059
1060 /* Ask BE to create MCC compl queue; share TX's eq */
1061 if (be_cmd_cq_create(ctrl, cq, &adapter->tx_eq.q, false, true, 0))
1062 goto mcc_cq_free;
1063
1064 /* Alloc MCC queue */
1065 q = &ctrl->mcc_obj.q;
1066 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1067 goto mcc_cq_destroy;
1068
1069 /* Ask BE to create MCC queue */
1070 if (be_cmd_mccq_create(ctrl, q, cq))
1071 goto mcc_q_free;
1072
1073 return 0;
1074
1075mcc_q_free:
1076 be_queue_free(adapter, q);
1077mcc_cq_destroy:
1078 be_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
1079mcc_cq_free:
1080 be_queue_free(adapter, cq);
1081err:
1082 return -1;
1083}
1084
1054static void be_tx_queues_destroy(struct be_adapter *adapter) 1085static void be_tx_queues_destroy(struct be_adapter *adapter)
1055{ 1086{
1056 struct be_queue_info *q; 1087 struct be_queue_info *q;
@@ -1263,7 +1294,7 @@ static irqreturn_t be_msix_rx(int irq, void *dev)
1263 return IRQ_HANDLED; 1294 return IRQ_HANDLED;
1264} 1295}
1265 1296
1266static irqreturn_t be_msix_tx(int irq, void *dev) 1297static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1267{ 1298{
1268 struct be_adapter *adapter = dev; 1299 struct be_adapter *adapter = dev;
1269 1300
@@ -1324,40 +1355,51 @@ int be_poll_rx(struct napi_struct *napi, int budget)
1324 return work_done; 1355 return work_done;
1325} 1356}
1326 1357
1327/* For TX we don't honour budget; consume everything */ 1358void be_process_tx(struct be_adapter *adapter)
1328int be_poll_tx(struct napi_struct *napi, int budget)
1329{ 1359{
1330 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi); 1360 struct be_queue_info *txq = &adapter->tx_obj.q;
1331 struct be_adapter *adapter = 1361 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1332 container_of(tx_eq, struct be_adapter, tx_eq);
1333 struct be_tx_obj *tx_obj = &adapter->tx_obj;
1334 struct be_queue_info *tx_cq = &tx_obj->cq;
1335 struct be_queue_info *txq = &tx_obj->q;
1336 struct be_eth_tx_compl *txcp; 1362 struct be_eth_tx_compl *txcp;
1337 u32 num_cmpl = 0; 1363 u32 num_cmpl = 0;
1338 u16 end_idx; 1364 u16 end_idx;
1339 1365
1340 while ((txcp = be_tx_compl_get(adapter))) { 1366 while ((txcp = be_tx_compl_get(tx_cq))) {
1341 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl, 1367 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1342 wrb_index, txcp); 1368 wrb_index, txcp);
1343 be_tx_compl_process(adapter, end_idx); 1369 be_tx_compl_process(adapter, end_idx);
1344 num_cmpl++; 1370 num_cmpl++;
1345 } 1371 }
1346 1372
1347 /* As Tx wrbs have been freed up, wake up netdev queue if 1373 if (num_cmpl) {
1348 * it was stopped due to lack of tx wrbs. 1374 be_cq_notify(&adapter->ctrl, tx_cq->id, true, num_cmpl);
1349 */ 1375
1350 if (netif_queue_stopped(adapter->netdev) && 1376 /* As Tx wrbs have been freed up, wake up netdev queue if
1377 * it was stopped due to lack of tx wrbs.
1378 */
1379 if (netif_queue_stopped(adapter->netdev) &&
1351 atomic_read(&txq->used) < txq->len / 2) { 1380 atomic_read(&txq->used) < txq->len / 2) {
1352 netif_wake_queue(adapter->netdev); 1381 netif_wake_queue(adapter->netdev);
1382 }
1383
1384 drvr_stats(adapter)->be_tx_events++;
1385 drvr_stats(adapter)->be_tx_compl += num_cmpl;
1353 } 1386 }
1387}
1388
1389/* As TX and MCC share the same EQ check for both TX and MCC completions.
1390 * For TX/MCC we don't honour budget; consume everything
1391 */
1392static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1393{
1394 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1395 struct be_adapter *adapter =
1396 container_of(tx_eq, struct be_adapter, tx_eq);
1354 1397
1355 napi_complete(napi); 1398 napi_complete(napi);
1356 1399
1357 be_cq_notify(&adapter->ctrl, tx_cq->id, true, num_cmpl); 1400 be_process_tx(adapter);
1358 1401
1359 drvr_stats(adapter)->be_tx_events++; 1402 be_process_mcc(&adapter->ctrl);
1360 drvr_stats(adapter)->be_tx_compl += num_cmpl;
1361 1403
1362 return 1; 1404 return 1;
1363} 1405}
@@ -1419,7 +1461,7 @@ static int be_msix_register(struct be_adapter *adapter)
1419 1461
1420 sprintf(tx_eq->desc, "%s-tx", netdev->name); 1462 sprintf(tx_eq->desc, "%s-tx", netdev->name);
1421 vec = be_msix_vec_get(adapter, tx_eq->q.id); 1463 vec = be_msix_vec_get(adapter, tx_eq->q.id);
1422 status = request_irq(vec, be_msix_tx, 0, tx_eq->desc, adapter); 1464 status = request_irq(vec, be_msix_tx_mcc, 0, tx_eq->desc, adapter);
1423 if (status) 1465 if (status)
1424 goto err; 1466 goto err;
1425 1467
@@ -1495,6 +1537,34 @@ static int be_open(struct net_device *netdev)
1495 struct be_ctrl_info *ctrl = &adapter->ctrl; 1537 struct be_ctrl_info *ctrl = &adapter->ctrl;
1496 struct be_eq_obj *rx_eq = &adapter->rx_eq; 1538 struct be_eq_obj *rx_eq = &adapter->rx_eq;
1497 struct be_eq_obj *tx_eq = &adapter->tx_eq; 1539 struct be_eq_obj *tx_eq = &adapter->tx_eq;
1540
1541 /* First time posting */
1542 be_post_rx_frags(adapter);
1543
1544 napi_enable(&rx_eq->napi);
1545 napi_enable(&tx_eq->napi);
1546
1547 be_irq_register(adapter);
1548
1549 be_intr_set(ctrl, true);
1550
1551 /* The evt queues are created in unarmed state; arm them */
1552 be_eq_notify(ctrl, rx_eq->q.id, true, false, 0);
1553 be_eq_notify(ctrl, tx_eq->q.id, true, false, 0);
1554
1555 /* Rx compl queue may be in unarmed state; rearm it */
1556 be_cq_notify(ctrl, adapter->rx_obj.cq.id, true, 0);
1557
1558 be_link_status_update(adapter);
1559
1560 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
1561 return 0;
1562}
1563
1564static int be_setup(struct be_adapter *adapter)
1565{
1566 struct be_ctrl_info *ctrl = &adapter->ctrl;
1567 struct net_device *netdev = adapter->netdev;
1498 u32 if_flags; 1568 u32 if_flags;
1499 int status; 1569 int status;
1500 1570
@@ -1521,29 +1591,14 @@ static int be_open(struct net_device *netdev)
1521 if (status != 0) 1591 if (status != 0)
1522 goto tx_qs_destroy; 1592 goto tx_qs_destroy;
1523 1593
1524 /* First time posting */ 1594 status = be_mcc_queues_create(adapter);
1525 be_post_rx_frags(adapter); 1595 if (status != 0)
1526 1596 goto rx_qs_destroy;
1527 napi_enable(&rx_eq->napi);
1528 napi_enable(&tx_eq->napi);
1529
1530 be_irq_register(adapter);
1531
1532 be_intr_set(ctrl, true);
1533
1534 /* The evt queues are created in the unarmed state; arm them */
1535 be_eq_notify(ctrl, rx_eq->q.id, true, false, 0);
1536 be_eq_notify(ctrl, tx_eq->q.id, true, false, 0);
1537
1538 /* The compl queues are created in the unarmed state; arm them */
1539 be_cq_notify(ctrl, adapter->rx_obj.cq.id, true, 0);
1540 be_cq_notify(ctrl, adapter->tx_obj.cq.id, true, 0);
1541
1542 be_link_status_update(adapter);
1543 1597
1544 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
1545 return 0; 1598 return 0;
1546 1599
1600rx_qs_destroy:
1601 be_rx_queues_destroy(adapter);
1547tx_qs_destroy: 1602tx_qs_destroy:
1548 be_tx_queues_destroy(adapter); 1603 be_tx_queues_destroy(adapter);
1549if_destroy: 1604if_destroy:
@@ -1552,6 +1607,19 @@ do_none:
1552 return status; 1607 return status;
1553} 1608}
1554 1609
1610static int be_clear(struct be_adapter *adapter)
1611{
1612 struct be_ctrl_info *ctrl = &adapter->ctrl;
1613
1614 be_rx_queues_destroy(adapter);
1615 be_tx_queues_destroy(adapter);
1616
1617 be_cmd_if_destroy(ctrl, adapter->if_handle);
1618
1619 be_mcc_queues_destroy(adapter);
1620 return 0;
1621}
1622
1555static int be_close(struct net_device *netdev) 1623static int be_close(struct net_device *netdev)
1556{ 1624{
1557 struct be_adapter *adapter = netdev_priv(netdev); 1625 struct be_adapter *adapter = netdev_priv(netdev);
@@ -1581,10 +1649,6 @@ static int be_close(struct net_device *netdev)
1581 napi_disable(&rx_eq->napi); 1649 napi_disable(&rx_eq->napi);
1582 napi_disable(&tx_eq->napi); 1650 napi_disable(&tx_eq->napi);
1583 1651
1584 be_rx_queues_destroy(adapter);
1585 be_tx_queues_destroy(adapter);
1586
1587 be_cmd_if_destroy(ctrl, adapter->if_handle);
1588 return 0; 1652 return 0;
1589} 1653}
1590 1654
@@ -1673,7 +1737,7 @@ static void be_netdev_init(struct net_device *netdev)
1673 1737
1674 netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx, 1738 netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx,
1675 BE_NAPI_WEIGHT); 1739 BE_NAPI_WEIGHT);
1676 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx, 1740 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
1677 BE_NAPI_WEIGHT); 1741 BE_NAPI_WEIGHT);
1678 1742
1679 netif_carrier_off(netdev); 1743 netif_carrier_off(netdev);
@@ -1755,7 +1819,9 @@ static int be_ctrl_init(struct be_adapter *adapter)
1755 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); 1819 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
1756 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); 1820 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
1757 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); 1821 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
1758 spin_lock_init(&ctrl->cmd_lock); 1822 spin_lock_init(&ctrl->mbox_lock);
1823 spin_lock_init(&ctrl->mcc_lock);
1824 spin_lock_init(&ctrl->mcc_cq_lock);
1759 1825
1760 val = ioread32(ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET); 1826 val = ioread32(ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
1761 ctrl->pci_func = (val >> MEMBAR_CTRL_INT_CTRL_PFUNC_SHIFT) & 1827 ctrl->pci_func = (val >> MEMBAR_CTRL_INT_CTRL_PFUNC_SHIFT) &
@@ -1793,6 +1859,8 @@ static void __devexit be_remove(struct pci_dev *pdev)
1793 1859
1794 unregister_netdev(adapter->netdev); 1860 unregister_netdev(adapter->netdev);
1795 1861
1862 be_clear(adapter);
1863
1796 be_stats_cleanup(adapter); 1864 be_stats_cleanup(adapter);
1797 1865
1798 be_ctrl_cleanup(adapter); 1866 be_ctrl_cleanup(adapter);
@@ -1890,13 +1958,18 @@ static int __devinit be_probe(struct pci_dev *pdev,
1890 be_netdev_init(netdev); 1958 be_netdev_init(netdev);
1891 SET_NETDEV_DEV(netdev, &adapter->pdev->dev); 1959 SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
1892 1960
1961 status = be_setup(adapter);
1962 if (status)
1963 goto stats_clean;
1893 status = register_netdev(netdev); 1964 status = register_netdev(netdev);
1894 if (status != 0) 1965 if (status != 0)
1895 goto stats_clean; 1966 goto unsetup;
1896 1967
1897 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num); 1968 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
1898 return 0; 1969 return 0;
1899 1970
1971unsetup:
1972 be_clear(adapter);
1900stats_clean: 1973stats_clean:
1901 be_stats_cleanup(adapter); 1974 be_stats_cleanup(adapter);
1902ctrl_clean: 1975ctrl_clean:
@@ -1921,6 +1994,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
1921 if (netif_running(netdev)) { 1994 if (netif_running(netdev)) {
1922 rtnl_lock(); 1995 rtnl_lock();
1923 be_close(netdev); 1996 be_close(netdev);
1997 be_clear(adapter);
1924 rtnl_unlock(); 1998 rtnl_unlock();
1925 } 1999 }
1926 2000
@@ -1947,6 +2021,7 @@ static int be_resume(struct pci_dev *pdev)
1947 2021
1948 if (netif_running(netdev)) { 2022 if (netif_running(netdev)) {
1949 rtnl_lock(); 2023 rtnl_lock();
2024 be_setup(adapter);
1950 be_open(netdev); 2025 be_open(netdev);
1951 rtnl_unlock(); 2026 rtnl_unlock();
1952 } 2027 }