diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /drivers/net/benet/be_cmds.c | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'drivers/net/benet/be_cmds.c')
-rw-r--r-- | drivers/net/benet/be_cmds.c | 776 |
1 files changed, 667 insertions, 109 deletions
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c index 34abcc9403d6..81654ae16c63 100644 --- a/drivers/net/benet/be_cmds.c +++ b/drivers/net/benet/be_cmds.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2005 - 2010 ServerEngines | 2 | * Copyright (C) 2005 - 2011 Emulex |
3 | * All rights reserved. | 3 | * All rights reserved. |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or | 5 | * This program is free software; you can redistribute it and/or |
@@ -8,21 +8,30 @@ | |||
8 | * Public License is included in this distribution in the file called COPYING. | 8 | * Public License is included in this distribution in the file called COPYING. |
9 | * | 9 | * |
10 | * Contact Information: | 10 | * Contact Information: |
11 | * linux-drivers@serverengines.com | 11 | * linux-drivers@emulex.com |
12 | * | 12 | * |
13 | * ServerEngines | 13 | * Emulex |
14 | * 209 N. Fair Oaks Ave | 14 | * 3333 Susan Street |
15 | * Sunnyvale, CA 94085 | 15 | * Costa Mesa, CA 92626 |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include "be.h" | 18 | #include "be.h" |
19 | #include "be_cmds.h" | 19 | #include "be_cmds.h" |
20 | 20 | ||
21 | /* Must be a power of 2 or else MODULO will BUG_ON */ | ||
22 | static int be_get_temp_freq = 32; | ||
23 | |||
21 | static void be_mcc_notify(struct be_adapter *adapter) | 24 | static void be_mcc_notify(struct be_adapter *adapter) |
22 | { | 25 | { |
23 | struct be_queue_info *mccq = &adapter->mcc_obj.q; | 26 | struct be_queue_info *mccq = &adapter->mcc_obj.q; |
24 | u32 val = 0; | 27 | u32 val = 0; |
25 | 28 | ||
29 | if (adapter->eeh_err) { | ||
30 | dev_info(&adapter->pdev->dev, | ||
31 | "Error in Card Detected! Cannot issue commands\n"); | ||
32 | return; | ||
33 | } | ||
34 | |||
26 | val |= mccq->id & DB_MCCQ_RING_ID_MASK; | 35 | val |= mccq->id & DB_MCCQ_RING_ID_MASK; |
27 | val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT; | 36 | val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT; |
28 | 37 | ||
@@ -62,20 +71,40 @@ static int be_mcc_compl_process(struct be_adapter *adapter, | |||
62 | compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) & | 71 | compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) & |
63 | CQE_STATUS_COMPL_MASK; | 72 | CQE_STATUS_COMPL_MASK; |
64 | 73 | ||
65 | if ((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) && | 74 | if (((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) || |
75 | (compl->tag0 == OPCODE_COMMON_WRITE_OBJECT)) && | ||
66 | (compl->tag1 == CMD_SUBSYSTEM_COMMON)) { | 76 | (compl->tag1 == CMD_SUBSYSTEM_COMMON)) { |
67 | adapter->flash_status = compl_status; | 77 | adapter->flash_status = compl_status; |
68 | complete(&adapter->flash_compl); | 78 | complete(&adapter->flash_compl); |
69 | } | 79 | } |
70 | 80 | ||
71 | if (compl_status == MCC_STATUS_SUCCESS) { | 81 | if (compl_status == MCC_STATUS_SUCCESS) { |
72 | if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) { | 82 | if (((compl->tag0 == OPCODE_ETH_GET_STATISTICS) || |
73 | struct be_cmd_resp_get_stats *resp = | 83 | (compl->tag0 == OPCODE_ETH_GET_PPORT_STATS)) && |
74 | adapter->stats.cmd.va; | 84 | (compl->tag1 == CMD_SUBSYSTEM_ETH)) { |
75 | be_dws_le_to_cpu(&resp->hw_stats, | 85 | if (adapter->generation == BE_GEN3) { |
76 | sizeof(resp->hw_stats)); | 86 | if (lancer_chip(adapter)) { |
87 | struct lancer_cmd_resp_pport_stats | ||
88 | *resp = adapter->stats_cmd.va; | ||
89 | be_dws_le_to_cpu(&resp->pport_stats, | ||
90 | sizeof(resp->pport_stats)); | ||
91 | } else { | ||
92 | struct be_cmd_resp_get_stats_v1 *resp = | ||
93 | adapter->stats_cmd.va; | ||
94 | |||
95 | be_dws_le_to_cpu(&resp->hw_stats, | ||
96 | sizeof(resp->hw_stats)); | ||
97 | } | ||
98 | } else { | ||
99 | struct be_cmd_resp_get_stats_v0 *resp = | ||
100 | adapter->stats_cmd.va; | ||
101 | |||
102 | be_dws_le_to_cpu(&resp->hw_stats, | ||
103 | sizeof(resp->hw_stats)); | ||
104 | } | ||
105 | be_parse_stats(adapter); | ||
77 | netdev_stats_update(adapter); | 106 | netdev_stats_update(adapter); |
78 | adapter->stats_ioctl_sent = false; | 107 | adapter->stats_cmd_sent = false; |
79 | } | 108 | } |
80 | } else if ((compl_status != MCC_STATUS_NOT_SUPPORTED) && | 109 | } else if ((compl_status != MCC_STATUS_NOT_SUPPORTED) && |
81 | (compl->tag0 != OPCODE_COMMON_NTWK_MAC_QUERY)) { | 110 | (compl->tag0 != OPCODE_COMMON_NTWK_MAC_QUERY)) { |
@@ -96,11 +125,77 @@ static void be_async_link_state_process(struct be_adapter *adapter, | |||
96 | evt->port_link_status == ASYNC_EVENT_LINK_UP); | 125 | evt->port_link_status == ASYNC_EVENT_LINK_UP); |
97 | } | 126 | } |
98 | 127 | ||
128 | /* Grp5 CoS Priority evt */ | ||
129 | static void be_async_grp5_cos_priority_process(struct be_adapter *adapter, | ||
130 | struct be_async_event_grp5_cos_priority *evt) | ||
131 | { | ||
132 | if (evt->valid) { | ||
133 | adapter->vlan_prio_bmap = evt->available_priority_bmap; | ||
134 | adapter->recommended_prio &= ~VLAN_PRIO_MASK; | ||
135 | adapter->recommended_prio = | ||
136 | evt->reco_default_priority << VLAN_PRIO_SHIFT; | ||
137 | } | ||
138 | } | ||
139 | |||
140 | /* Grp5 QOS Speed evt */ | ||
141 | static void be_async_grp5_qos_speed_process(struct be_adapter *adapter, | ||
142 | struct be_async_event_grp5_qos_link_speed *evt) | ||
143 | { | ||
144 | if (evt->physical_port == adapter->port_num) { | ||
145 | /* qos_link_speed is in units of 10 Mbps */ | ||
146 | adapter->link_speed = evt->qos_link_speed * 10; | ||
147 | } | ||
148 | } | ||
149 | |||
150 | /*Grp5 PVID evt*/ | ||
151 | static void be_async_grp5_pvid_state_process(struct be_adapter *adapter, | ||
152 | struct be_async_event_grp5_pvid_state *evt) | ||
153 | { | ||
154 | if (evt->enabled) | ||
155 | adapter->pvid = le16_to_cpu(evt->tag); | ||
156 | else | ||
157 | adapter->pvid = 0; | ||
158 | } | ||
159 | |||
160 | static void be_async_grp5_evt_process(struct be_adapter *adapter, | ||
161 | u32 trailer, struct be_mcc_compl *evt) | ||
162 | { | ||
163 | u8 event_type = 0; | ||
164 | |||
165 | event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) & | ||
166 | ASYNC_TRAILER_EVENT_TYPE_MASK; | ||
167 | |||
168 | switch (event_type) { | ||
169 | case ASYNC_EVENT_COS_PRIORITY: | ||
170 | be_async_grp5_cos_priority_process(adapter, | ||
171 | (struct be_async_event_grp5_cos_priority *)evt); | ||
172 | break; | ||
173 | case ASYNC_EVENT_QOS_SPEED: | ||
174 | be_async_grp5_qos_speed_process(adapter, | ||
175 | (struct be_async_event_grp5_qos_link_speed *)evt); | ||
176 | break; | ||
177 | case ASYNC_EVENT_PVID_STATE: | ||
178 | be_async_grp5_pvid_state_process(adapter, | ||
179 | (struct be_async_event_grp5_pvid_state *)evt); | ||
180 | break; | ||
181 | default: | ||
182 | dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n"); | ||
183 | break; | ||
184 | } | ||
185 | } | ||
186 | |||
99 | static inline bool is_link_state_evt(u32 trailer) | 187 | static inline bool is_link_state_evt(u32 trailer) |
100 | { | 188 | { |
189 | return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) & | ||
190 | ASYNC_TRAILER_EVENT_CODE_MASK) == | ||
191 | ASYNC_EVENT_CODE_LINK_STATE; | ||
192 | } | ||
193 | |||
194 | static inline bool is_grp5_evt(u32 trailer) | ||
195 | { | ||
101 | return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) & | 196 | return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) & |
102 | ASYNC_TRAILER_EVENT_CODE_MASK) == | 197 | ASYNC_TRAILER_EVENT_CODE_MASK) == |
103 | ASYNC_EVENT_CODE_LINK_STATE); | 198 | ASYNC_EVENT_CODE_GRP_5); |
104 | } | 199 | } |
105 | 200 | ||
106 | static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter) | 201 | static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter) |
@@ -143,6 +238,9 @@ int be_process_mcc(struct be_adapter *adapter, int *status) | |||
143 | if (is_link_state_evt(compl->flags)) | 238 | if (is_link_state_evt(compl->flags)) |
144 | be_async_link_state_process(adapter, | 239 | be_async_link_state_process(adapter, |
145 | (struct be_async_event_link_state *) compl); | 240 | (struct be_async_event_link_state *) compl); |
241 | else if (is_grp5_evt(compl->flags)) | ||
242 | be_async_grp5_evt_process(adapter, | ||
243 | compl->flags, compl); | ||
146 | } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) { | 244 | } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) { |
147 | *status = be_mcc_compl_process(adapter, compl); | 245 | *status = be_mcc_compl_process(adapter, compl); |
148 | atomic_dec(&mcc_obj->q.used); | 246 | atomic_dec(&mcc_obj->q.used); |
@@ -162,6 +260,9 @@ static int be_mcc_wait_compl(struct be_adapter *adapter) | |||
162 | int i, num, status = 0; | 260 | int i, num, status = 0; |
163 | struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; | 261 | struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; |
164 | 262 | ||
263 | if (adapter->eeh_err) | ||
264 | return -EIO; | ||
265 | |||
165 | for (i = 0; i < mcc_timeout; i++) { | 266 | for (i = 0; i < mcc_timeout; i++) { |
166 | num = be_process_mcc(adapter, &status); | 267 | num = be_process_mcc(adapter, &status); |
167 | if (num) | 268 | if (num) |
@@ -191,6 +292,12 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db) | |||
191 | int msecs = 0; | 292 | int msecs = 0; |
192 | u32 ready; | 293 | u32 ready; |
193 | 294 | ||
295 | if (adapter->eeh_err) { | ||
296 | dev_err(&adapter->pdev->dev, | ||
297 | "Error detected in card.Cannot issue commands\n"); | ||
298 | return -EIO; | ||
299 | } | ||
300 | |||
194 | do { | 301 | do { |
195 | ready = ioread32(db); | 302 | ready = ioread32(db); |
196 | if (ready == 0xffffffff) { | 303 | if (ready == 0xffffffff) { |
@@ -205,12 +312,12 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db) | |||
205 | 312 | ||
206 | if (msecs > 4000) { | 313 | if (msecs > 4000) { |
207 | dev_err(&adapter->pdev->dev, "mbox poll timed out\n"); | 314 | dev_err(&adapter->pdev->dev, "mbox poll timed out\n"); |
208 | be_detect_dump_ue(adapter); | 315 | if (!lancer_chip(adapter)) |
316 | be_detect_dump_ue(adapter); | ||
209 | return -1; | 317 | return -1; |
210 | } | 318 | } |
211 | 319 | ||
212 | set_current_state(TASK_INTERRUPTIBLE); | 320 | msleep(1); |
213 | schedule_timeout(msecs_to_jiffies(1)); | ||
214 | msecs++; | 321 | msecs++; |
215 | } while (true); | 322 | } while (true); |
216 | 323 | ||
@@ -269,7 +376,12 @@ static int be_mbox_notify_wait(struct be_adapter *adapter) | |||
269 | 376 | ||
270 | static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage) | 377 | static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage) |
271 | { | 378 | { |
272 | u32 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET); | 379 | u32 sem; |
380 | |||
381 | if (lancer_chip(adapter)) | ||
382 | sem = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET); | ||
383 | else | ||
384 | sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET); | ||
273 | 385 | ||
274 | *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK; | 386 | *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK; |
275 | if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK) | 387 | if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK) |
@@ -282,23 +394,25 @@ int be_cmd_POST(struct be_adapter *adapter) | |||
282 | { | 394 | { |
283 | u16 stage; | 395 | u16 stage; |
284 | int status, timeout = 0; | 396 | int status, timeout = 0; |
397 | struct device *dev = &adapter->pdev->dev; | ||
285 | 398 | ||
286 | do { | 399 | do { |
287 | status = be_POST_stage_get(adapter, &stage); | 400 | status = be_POST_stage_get(adapter, &stage); |
288 | if (status) { | 401 | if (status) { |
289 | dev_err(&adapter->pdev->dev, "POST error; stage=0x%x\n", | 402 | dev_err(dev, "POST error; stage=0x%x\n", stage); |
290 | stage); | ||
291 | return -1; | 403 | return -1; |
292 | } else if (stage != POST_STAGE_ARMFW_RDY) { | 404 | } else if (stage != POST_STAGE_ARMFW_RDY) { |
293 | set_current_state(TASK_INTERRUPTIBLE); | 405 | if (msleep_interruptible(2000)) { |
294 | schedule_timeout(2 * HZ); | 406 | dev_err(dev, "Waiting for POST aborted\n"); |
407 | return -EINTR; | ||
408 | } | ||
295 | timeout += 2; | 409 | timeout += 2; |
296 | } else { | 410 | } else { |
297 | return 0; | 411 | return 0; |
298 | } | 412 | } |
299 | } while (timeout < 40); | 413 | } while (timeout < 40); |
300 | 414 | ||
301 | dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage); | 415 | dev_err(dev, "POST timeout; stage=0x%x\n", stage); |
302 | return -1; | 416 | return -1; |
303 | } | 417 | } |
304 | 418 | ||
@@ -408,7 +522,8 @@ int be_cmd_fw_init(struct be_adapter *adapter) | |||
408 | u8 *wrb; | 522 | u8 *wrb; |
409 | int status; | 523 | int status; |
410 | 524 | ||
411 | spin_lock(&adapter->mbox_lock); | 525 | if (mutex_lock_interruptible(&adapter->mbox_lock)) |
526 | return -1; | ||
412 | 527 | ||
413 | wrb = (u8 *)wrb_from_mbox(adapter); | 528 | wrb = (u8 *)wrb_from_mbox(adapter); |
414 | *wrb++ = 0xFF; | 529 | *wrb++ = 0xFF; |
@@ -422,7 +537,7 @@ int be_cmd_fw_init(struct be_adapter *adapter) | |||
422 | 537 | ||
423 | status = be_mbox_notify_wait(adapter); | 538 | status = be_mbox_notify_wait(adapter); |
424 | 539 | ||
425 | spin_unlock(&adapter->mbox_lock); | 540 | mutex_unlock(&adapter->mbox_lock); |
426 | return status; | 541 | return status; |
427 | } | 542 | } |
428 | 543 | ||
@@ -437,7 +552,8 @@ int be_cmd_fw_clean(struct be_adapter *adapter) | |||
437 | if (adapter->eeh_err) | 552 | if (adapter->eeh_err) |
438 | return -EIO; | 553 | return -EIO; |
439 | 554 | ||
440 | spin_lock(&adapter->mbox_lock); | 555 | if (mutex_lock_interruptible(&adapter->mbox_lock)) |
556 | return -1; | ||
441 | 557 | ||
442 | wrb = (u8 *)wrb_from_mbox(adapter); | 558 | wrb = (u8 *)wrb_from_mbox(adapter); |
443 | *wrb++ = 0xFF; | 559 | *wrb++ = 0xFF; |
@@ -451,7 +567,7 @@ int be_cmd_fw_clean(struct be_adapter *adapter) | |||
451 | 567 | ||
452 | status = be_mbox_notify_wait(adapter); | 568 | status = be_mbox_notify_wait(adapter); |
453 | 569 | ||
454 | spin_unlock(&adapter->mbox_lock); | 570 | mutex_unlock(&adapter->mbox_lock); |
455 | return status; | 571 | return status; |
456 | } | 572 | } |
457 | int be_cmd_eq_create(struct be_adapter *adapter, | 573 | int be_cmd_eq_create(struct be_adapter *adapter, |
@@ -462,7 +578,8 @@ int be_cmd_eq_create(struct be_adapter *adapter, | |||
462 | struct be_dma_mem *q_mem = &eq->dma_mem; | 578 | struct be_dma_mem *q_mem = &eq->dma_mem; |
463 | int status; | 579 | int status; |
464 | 580 | ||
465 | spin_lock(&adapter->mbox_lock); | 581 | if (mutex_lock_interruptible(&adapter->mbox_lock)) |
582 | return -1; | ||
466 | 583 | ||
467 | wrb = wrb_from_mbox(adapter); | 584 | wrb = wrb_from_mbox(adapter); |
468 | req = embedded_payload(wrb); | 585 | req = embedded_payload(wrb); |
@@ -492,7 +609,7 @@ int be_cmd_eq_create(struct be_adapter *adapter, | |||
492 | eq->created = true; | 609 | eq->created = true; |
493 | } | 610 | } |
494 | 611 | ||
495 | spin_unlock(&adapter->mbox_lock); | 612 | mutex_unlock(&adapter->mbox_lock); |
496 | return status; | 613 | return status; |
497 | } | 614 | } |
498 | 615 | ||
@@ -504,7 +621,8 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, | |||
504 | struct be_cmd_req_mac_query *req; | 621 | struct be_cmd_req_mac_query *req; |
505 | int status; | 622 | int status; |
506 | 623 | ||
507 | spin_lock(&adapter->mbox_lock); | 624 | if (mutex_lock_interruptible(&adapter->mbox_lock)) |
625 | return -1; | ||
508 | 626 | ||
509 | wrb = wrb_from_mbox(adapter); | 627 | wrb = wrb_from_mbox(adapter); |
510 | req = embedded_payload(wrb); | 628 | req = embedded_payload(wrb); |
@@ -529,13 +647,13 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, | |||
529 | memcpy(mac_addr, resp->mac.addr, ETH_ALEN); | 647 | memcpy(mac_addr, resp->mac.addr, ETH_ALEN); |
530 | } | 648 | } |
531 | 649 | ||
532 | spin_unlock(&adapter->mbox_lock); | 650 | mutex_unlock(&adapter->mbox_lock); |
533 | return status; | 651 | return status; |
534 | } | 652 | } |
535 | 653 | ||
536 | /* Uses synchronous MCCQ */ | 654 | /* Uses synchronous MCCQ */ |
537 | int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, | 655 | int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, |
538 | u32 if_id, u32 *pmac_id) | 656 | u32 if_id, u32 *pmac_id, u32 domain) |
539 | { | 657 | { |
540 | struct be_mcc_wrb *wrb; | 658 | struct be_mcc_wrb *wrb; |
541 | struct be_cmd_req_pmac_add *req; | 659 | struct be_cmd_req_pmac_add *req; |
@@ -556,6 +674,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, | |||
556 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | 674 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, |
557 | OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req)); | 675 | OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req)); |
558 | 676 | ||
677 | req->hdr.domain = domain; | ||
559 | req->if_id = cpu_to_le32(if_id); | 678 | req->if_id = cpu_to_le32(if_id); |
560 | memcpy(req->mac_address, mac_addr, ETH_ALEN); | 679 | memcpy(req->mac_address, mac_addr, ETH_ALEN); |
561 | 680 | ||
@@ -571,7 +690,7 @@ err: | |||
571 | } | 690 | } |
572 | 691 | ||
573 | /* Uses synchronous MCCQ */ | 692 | /* Uses synchronous MCCQ */ |
574 | int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id) | 693 | int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id, u32 dom) |
575 | { | 694 | { |
576 | struct be_mcc_wrb *wrb; | 695 | struct be_mcc_wrb *wrb; |
577 | struct be_cmd_req_pmac_del *req; | 696 | struct be_cmd_req_pmac_del *req; |
@@ -592,6 +711,7 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id) | |||
592 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | 711 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, |
593 | OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req)); | 712 | OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req)); |
594 | 713 | ||
714 | req->hdr.domain = dom; | ||
595 | req->if_id = cpu_to_le32(if_id); | 715 | req->if_id = cpu_to_le32(if_id); |
596 | req->pmac_id = cpu_to_le32(pmac_id); | 716 | req->pmac_id = cpu_to_le32(pmac_id); |
597 | 717 | ||
@@ -613,7 +733,8 @@ int be_cmd_cq_create(struct be_adapter *adapter, | |||
613 | void *ctxt; | 733 | void *ctxt; |
614 | int status; | 734 | int status; |
615 | 735 | ||
616 | spin_lock(&adapter->mbox_lock); | 736 | if (mutex_lock_interruptible(&adapter->mbox_lock)) |
737 | return -1; | ||
617 | 738 | ||
618 | wrb = wrb_from_mbox(adapter); | 739 | wrb = wrb_from_mbox(adapter); |
619 | req = embedded_payload(wrb); | 740 | req = embedded_payload(wrb); |
@@ -626,16 +747,34 @@ int be_cmd_cq_create(struct be_adapter *adapter, | |||
626 | OPCODE_COMMON_CQ_CREATE, sizeof(*req)); | 747 | OPCODE_COMMON_CQ_CREATE, sizeof(*req)); |
627 | 748 | ||
628 | req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); | 749 | req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); |
750 | if (lancer_chip(adapter)) { | ||
751 | req->hdr.version = 2; | ||
752 | req->page_size = 1; /* 1 for 4K */ | ||
753 | AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt, | ||
754 | no_delay); | ||
755 | AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt, | ||
756 | __ilog2_u32(cq->len/256)); | ||
757 | AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1); | ||
758 | AMAP_SET_BITS(struct amap_cq_context_lancer, eventable, | ||
759 | ctxt, 1); | ||
760 | AMAP_SET_BITS(struct amap_cq_context_lancer, eqid, | ||
761 | ctxt, eq->id); | ||
762 | AMAP_SET_BITS(struct amap_cq_context_lancer, armed, ctxt, 1); | ||
763 | } else { | ||
764 | AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt, | ||
765 | coalesce_wm); | ||
766 | AMAP_SET_BITS(struct amap_cq_context_be, nodelay, | ||
767 | ctxt, no_delay); | ||
768 | AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt, | ||
769 | __ilog2_u32(cq->len/256)); | ||
770 | AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1); | ||
771 | AMAP_SET_BITS(struct amap_cq_context_be, solevent, | ||
772 | ctxt, sol_evts); | ||
773 | AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1); | ||
774 | AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id); | ||
775 | AMAP_SET_BITS(struct amap_cq_context_be, armed, ctxt, 1); | ||
776 | } | ||
629 | 777 | ||
630 | AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm); | ||
631 | AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay); | ||
632 | AMAP_SET_BITS(struct amap_cq_context, count, ctxt, | ||
633 | __ilog2_u32(cq->len/256)); | ||
634 | AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1); | ||
635 | AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts); | ||
636 | AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1); | ||
637 | AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id); | ||
638 | AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1); | ||
639 | be_dws_cpu_to_le(ctxt, sizeof(req->context)); | 778 | be_dws_cpu_to_le(ctxt, sizeof(req->context)); |
640 | 779 | ||
641 | be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); | 780 | be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); |
@@ -647,7 +786,7 @@ int be_cmd_cq_create(struct be_adapter *adapter, | |||
647 | cq->created = true; | 786 | cq->created = true; |
648 | } | 787 | } |
649 | 788 | ||
650 | spin_unlock(&adapter->mbox_lock); | 789 | mutex_unlock(&adapter->mbox_lock); |
651 | 790 | ||
652 | return status; | 791 | return status; |
653 | } | 792 | } |
@@ -670,25 +809,41 @@ int be_cmd_mccq_create(struct be_adapter *adapter, | |||
670 | void *ctxt; | 809 | void *ctxt; |
671 | int status; | 810 | int status; |
672 | 811 | ||
673 | spin_lock(&adapter->mbox_lock); | 812 | if (mutex_lock_interruptible(&adapter->mbox_lock)) |
813 | return -1; | ||
674 | 814 | ||
675 | wrb = wrb_from_mbox(adapter); | 815 | wrb = wrb_from_mbox(adapter); |
676 | req = embedded_payload(wrb); | 816 | req = embedded_payload(wrb); |
677 | ctxt = &req->context; | 817 | ctxt = &req->context; |
678 | 818 | ||
679 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, | 819 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, |
680 | OPCODE_COMMON_MCC_CREATE); | 820 | OPCODE_COMMON_MCC_CREATE_EXT); |
681 | 821 | ||
682 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | 822 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, |
683 | OPCODE_COMMON_MCC_CREATE, sizeof(*req)); | 823 | OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req)); |
684 | 824 | ||
685 | req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); | 825 | req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); |
826 | if (lancer_chip(adapter)) { | ||
827 | req->hdr.version = 1; | ||
828 | req->cq_id = cpu_to_le16(cq->id); | ||
829 | |||
830 | AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt, | ||
831 | be_encoded_q_len(mccq->len)); | ||
832 | AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1); | ||
833 | AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id, | ||
834 | ctxt, cq->id); | ||
835 | AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid, | ||
836 | ctxt, 1); | ||
686 | 837 | ||
687 | AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1); | 838 | } else { |
688 | AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt, | 839 | AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1); |
689 | be_encoded_q_len(mccq->len)); | 840 | AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt, |
690 | AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id); | 841 | be_encoded_q_len(mccq->len)); |
842 | AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id); | ||
843 | } | ||
691 | 844 | ||
845 | /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */ | ||
846 | req->async_event_bitmap[0] = cpu_to_le32(0x00000022); | ||
692 | be_dws_cpu_to_le(ctxt, sizeof(req->context)); | 847 | be_dws_cpu_to_le(ctxt, sizeof(req->context)); |
693 | 848 | ||
694 | be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); | 849 | be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); |
@@ -699,7 +854,7 @@ int be_cmd_mccq_create(struct be_adapter *adapter, | |||
699 | mccq->id = le16_to_cpu(resp->id); | 854 | mccq->id = le16_to_cpu(resp->id); |
700 | mccq->created = true; | 855 | mccq->created = true; |
701 | } | 856 | } |
702 | spin_unlock(&adapter->mbox_lock); | 857 | mutex_unlock(&adapter->mbox_lock); |
703 | 858 | ||
704 | return status; | 859 | return status; |
705 | } | 860 | } |
@@ -714,7 +869,8 @@ int be_cmd_txq_create(struct be_adapter *adapter, | |||
714 | void *ctxt; | 869 | void *ctxt; |
715 | int status; | 870 | int status; |
716 | 871 | ||
717 | spin_lock(&adapter->mbox_lock); | 872 | if (mutex_lock_interruptible(&adapter->mbox_lock)) |
873 | return -1; | ||
718 | 874 | ||
719 | wrb = wrb_from_mbox(adapter); | 875 | wrb = wrb_from_mbox(adapter); |
720 | req = embedded_payload(wrb); | 876 | req = embedded_payload(wrb); |
@@ -726,6 +882,12 @@ int be_cmd_txq_create(struct be_adapter *adapter, | |||
726 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE, | 882 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE, |
727 | sizeof(*req)); | 883 | sizeof(*req)); |
728 | 884 | ||
885 | if (lancer_chip(adapter)) { | ||
886 | req->hdr.version = 1; | ||
887 | AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt, | ||
888 | adapter->if_handle); | ||
889 | } | ||
890 | |||
729 | req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); | 891 | req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); |
730 | req->ulp_num = BE_ULP1_NUM; | 892 | req->ulp_num = BE_ULP1_NUM; |
731 | req->type = BE_ETH_TX_RING_TYPE_STANDARD; | 893 | req->type = BE_ETH_TX_RING_TYPE_STANDARD; |
@@ -746,7 +908,7 @@ int be_cmd_txq_create(struct be_adapter *adapter, | |||
746 | txq->created = true; | 908 | txq->created = true; |
747 | } | 909 | } |
748 | 910 | ||
749 | spin_unlock(&adapter->mbox_lock); | 911 | mutex_unlock(&adapter->mbox_lock); |
750 | 912 | ||
751 | return status; | 913 | return status; |
752 | } | 914 | } |
@@ -754,14 +916,15 @@ int be_cmd_txq_create(struct be_adapter *adapter, | |||
754 | /* Uses mbox */ | 916 | /* Uses mbox */ |
755 | int be_cmd_rxq_create(struct be_adapter *adapter, | 917 | int be_cmd_rxq_create(struct be_adapter *adapter, |
756 | struct be_queue_info *rxq, u16 cq_id, u16 frag_size, | 918 | struct be_queue_info *rxq, u16 cq_id, u16 frag_size, |
757 | u16 max_frame_size, u32 if_id, u32 rss) | 919 | u16 max_frame_size, u32 if_id, u32 rss, u8 *rss_id) |
758 | { | 920 | { |
759 | struct be_mcc_wrb *wrb; | 921 | struct be_mcc_wrb *wrb; |
760 | struct be_cmd_req_eth_rx_create *req; | 922 | struct be_cmd_req_eth_rx_create *req; |
761 | struct be_dma_mem *q_mem = &rxq->dma_mem; | 923 | struct be_dma_mem *q_mem = &rxq->dma_mem; |
762 | int status; | 924 | int status; |
763 | 925 | ||
764 | spin_lock(&adapter->mbox_lock); | 926 | if (mutex_lock_interruptible(&adapter->mbox_lock)) |
927 | return -1; | ||
765 | 928 | ||
766 | wrb = wrb_from_mbox(adapter); | 929 | wrb = wrb_from_mbox(adapter); |
767 | req = embedded_payload(wrb); | 930 | req = embedded_payload(wrb); |
@@ -785,9 +948,10 @@ int be_cmd_rxq_create(struct be_adapter *adapter, | |||
785 | struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb); | 948 | struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb); |
786 | rxq->id = le16_to_cpu(resp->id); | 949 | rxq->id = le16_to_cpu(resp->id); |
787 | rxq->created = true; | 950 | rxq->created = true; |
951 | *rss_id = resp->rss_id; | ||
788 | } | 952 | } |
789 | 953 | ||
790 | spin_unlock(&adapter->mbox_lock); | 954 | mutex_unlock(&adapter->mbox_lock); |
791 | 955 | ||
792 | return status; | 956 | return status; |
793 | } | 957 | } |
@@ -806,7 +970,8 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, | |||
806 | if (adapter->eeh_err) | 970 | if (adapter->eeh_err) |
807 | return -EIO; | 971 | return -EIO; |
808 | 972 | ||
809 | spin_lock(&adapter->mbox_lock); | 973 | if (mutex_lock_interruptible(&adapter->mbox_lock)) |
974 | return -1; | ||
810 | 975 | ||
811 | wrb = wrb_from_mbox(adapter); | 976 | wrb = wrb_from_mbox(adapter); |
812 | req = embedded_payload(wrb); | 977 | req = embedded_payload(wrb); |
@@ -843,7 +1008,7 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, | |||
843 | 1008 | ||
844 | status = be_mbox_notify_wait(adapter); | 1009 | status = be_mbox_notify_wait(adapter); |
845 | 1010 | ||
846 | spin_unlock(&adapter->mbox_lock); | 1011 | mutex_unlock(&adapter->mbox_lock); |
847 | 1012 | ||
848 | return status; | 1013 | return status; |
849 | } | 1014 | } |
@@ -859,7 +1024,8 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, | |||
859 | struct be_cmd_req_if_create *req; | 1024 | struct be_cmd_req_if_create *req; |
860 | int status; | 1025 | int status; |
861 | 1026 | ||
862 | spin_lock(&adapter->mbox_lock); | 1027 | if (mutex_lock_interruptible(&adapter->mbox_lock)) |
1028 | return -1; | ||
863 | 1029 | ||
864 | wrb = wrb_from_mbox(adapter); | 1030 | wrb = wrb_from_mbox(adapter); |
865 | req = embedded_payload(wrb); | 1031 | req = embedded_payload(wrb); |
@@ -885,12 +1051,12 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, | |||
885 | *pmac_id = le32_to_cpu(resp->pmac_id); | 1051 | *pmac_id = le32_to_cpu(resp->pmac_id); |
886 | } | 1052 | } |
887 | 1053 | ||
888 | spin_unlock(&adapter->mbox_lock); | 1054 | mutex_unlock(&adapter->mbox_lock); |
889 | return status; | 1055 | return status; |
890 | } | 1056 | } |
891 | 1057 | ||
892 | /* Uses mbox */ | 1058 | /* Uses mbox */ |
893 | int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id) | 1059 | int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain) |
894 | { | 1060 | { |
895 | struct be_mcc_wrb *wrb; | 1061 | struct be_mcc_wrb *wrb; |
896 | struct be_cmd_req_if_destroy *req; | 1062 | struct be_cmd_req_if_destroy *req; |
@@ -899,7 +1065,8 @@ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id) | |||
899 | if (adapter->eeh_err) | 1065 | if (adapter->eeh_err) |
900 | return -EIO; | 1066 | return -EIO; |
901 | 1067 | ||
902 | spin_lock(&adapter->mbox_lock); | 1068 | if (mutex_lock_interruptible(&adapter->mbox_lock)) |
1069 | return -1; | ||
903 | 1070 | ||
904 | wrb = wrb_from_mbox(adapter); | 1071 | wrb = wrb_from_mbox(adapter); |
905 | req = embedded_payload(wrb); | 1072 | req = embedded_payload(wrb); |
@@ -910,11 +1077,12 @@ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id) | |||
910 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | 1077 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, |
911 | OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req)); | 1078 | OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req)); |
912 | 1079 | ||
1080 | req->hdr.domain = domain; | ||
913 | req->interface_id = cpu_to_le32(interface_id); | 1081 | req->interface_id = cpu_to_le32(interface_id); |
914 | 1082 | ||
915 | status = be_mbox_notify_wait(adapter); | 1083 | status = be_mbox_notify_wait(adapter); |
916 | 1084 | ||
917 | spin_unlock(&adapter->mbox_lock); | 1085 | mutex_unlock(&adapter->mbox_lock); |
918 | 1086 | ||
919 | return status; | 1087 | return status; |
920 | } | 1088 | } |
@@ -926,10 +1094,13 @@ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id) | |||
926 | int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) | 1094 | int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) |
927 | { | 1095 | { |
928 | struct be_mcc_wrb *wrb; | 1096 | struct be_mcc_wrb *wrb; |
929 | struct be_cmd_req_get_stats *req; | 1097 | struct be_cmd_req_hdr *hdr; |
930 | struct be_sge *sge; | 1098 | struct be_sge *sge; |
931 | int status = 0; | 1099 | int status = 0; |
932 | 1100 | ||
1101 | if (MODULO(adapter->work_counter, be_get_temp_freq) == 0) | ||
1102 | be_cmd_get_die_temperature(adapter); | ||
1103 | |||
933 | spin_lock_bh(&adapter->mcc_lock); | 1104 | spin_lock_bh(&adapter->mcc_lock); |
934 | 1105 | ||
935 | wrb = wrb_from_mccq(adapter); | 1106 | wrb = wrb_from_mccq(adapter); |
@@ -937,20 +1108,68 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) | |||
937 | status = -EBUSY; | 1108 | status = -EBUSY; |
938 | goto err; | 1109 | goto err; |
939 | } | 1110 | } |
940 | req = nonemb_cmd->va; | 1111 | hdr = nonemb_cmd->va; |
941 | sge = nonembedded_sgl(wrb); | 1112 | sge = nonembedded_sgl(wrb); |
942 | 1113 | ||
943 | be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1, | 1114 | be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1, |
944 | OPCODE_ETH_GET_STATISTICS); | 1115 | OPCODE_ETH_GET_STATISTICS); |
945 | 1116 | ||
1117 | be_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH, | ||
1118 | OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size); | ||
1119 | |||
1120 | if (adapter->generation == BE_GEN3) | ||
1121 | hdr->version = 1; | ||
1122 | |||
1123 | wrb->tag1 = CMD_SUBSYSTEM_ETH; | ||
1124 | sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); | ||
1125 | sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF); | ||
1126 | sge->len = cpu_to_le32(nonemb_cmd->size); | ||
1127 | |||
1128 | be_mcc_notify(adapter); | ||
1129 | adapter->stats_cmd_sent = true; | ||
1130 | |||
1131 | err: | ||
1132 | spin_unlock_bh(&adapter->mcc_lock); | ||
1133 | return status; | ||
1134 | } | ||
1135 | |||
1136 | /* Lancer Stats */ | ||
1137 | int lancer_cmd_get_pport_stats(struct be_adapter *adapter, | ||
1138 | struct be_dma_mem *nonemb_cmd) | ||
1139 | { | ||
1140 | |||
1141 | struct be_mcc_wrb *wrb; | ||
1142 | struct lancer_cmd_req_pport_stats *req; | ||
1143 | struct be_sge *sge; | ||
1144 | int status = 0; | ||
1145 | |||
1146 | spin_lock_bh(&adapter->mcc_lock); | ||
1147 | |||
1148 | wrb = wrb_from_mccq(adapter); | ||
1149 | if (!wrb) { | ||
1150 | status = -EBUSY; | ||
1151 | goto err; | ||
1152 | } | ||
1153 | req = nonemb_cmd->va; | ||
1154 | sge = nonembedded_sgl(wrb); | ||
1155 | |||
1156 | be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1, | ||
1157 | OPCODE_ETH_GET_PPORT_STATS); | ||
1158 | |||
946 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, | 1159 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, |
947 | OPCODE_ETH_GET_STATISTICS, sizeof(*req)); | 1160 | OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size); |
1161 | |||
1162 | |||
1163 | req->cmd_params.params.pport_num = cpu_to_le16(adapter->port_num); | ||
1164 | req->cmd_params.params.reset_stats = 0; | ||
1165 | |||
1166 | wrb->tag1 = CMD_SUBSYSTEM_ETH; | ||
948 | sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); | 1167 | sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); |
949 | sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF); | 1168 | sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF); |
950 | sge->len = cpu_to_le32(nonemb_cmd->size); | 1169 | sge->len = cpu_to_le32(nonemb_cmd->size); |
951 | 1170 | ||
952 | be_mcc_notify(adapter); | 1171 | be_mcc_notify(adapter); |
953 | adapter->stats_ioctl_sent = true; | 1172 | adapter->stats_cmd_sent = true; |
954 | 1173 | ||
955 | err: | 1174 | err: |
956 | spin_unlock_bh(&adapter->mcc_lock); | 1175 | spin_unlock_bh(&adapter->mcc_lock); |
@@ -959,7 +1178,7 @@ err: | |||
959 | 1178 | ||
960 | /* Uses synchronous mcc */ | 1179 | /* Uses synchronous mcc */ |
961 | int be_cmd_link_status_query(struct be_adapter *adapter, | 1180 | int be_cmd_link_status_query(struct be_adapter *adapter, |
962 | bool *link_up, u8 *mac_speed, u16 *link_speed) | 1181 | bool *link_up, u8 *mac_speed, u16 *link_speed, u32 dom) |
963 | { | 1182 | { |
964 | struct be_mcc_wrb *wrb; | 1183 | struct be_mcc_wrb *wrb; |
965 | struct be_cmd_req_link_status *req; | 1184 | struct be_cmd_req_link_status *req; |
@@ -997,6 +1216,154 @@ err: | |||
997 | return status; | 1216 | return status; |
998 | } | 1217 | } |
999 | 1218 | ||
1219 | /* Uses synchronous mcc */ | ||
1220 | int be_cmd_get_die_temperature(struct be_adapter *adapter) | ||
1221 | { | ||
1222 | struct be_mcc_wrb *wrb; | ||
1223 | struct be_cmd_req_get_cntl_addnl_attribs *req; | ||
1224 | int status; | ||
1225 | |||
1226 | spin_lock_bh(&adapter->mcc_lock); | ||
1227 | |||
1228 | wrb = wrb_from_mccq(adapter); | ||
1229 | if (!wrb) { | ||
1230 | status = -EBUSY; | ||
1231 | goto err; | ||
1232 | } | ||
1233 | req = embedded_payload(wrb); | ||
1234 | |||
1235 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, | ||
1236 | OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES); | ||
1237 | |||
1238 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | ||
1239 | OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req)); | ||
1240 | |||
1241 | status = be_mcc_notify_wait(adapter); | ||
1242 | if (!status) { | ||
1243 | struct be_cmd_resp_get_cntl_addnl_attribs *resp = | ||
1244 | embedded_payload(wrb); | ||
1245 | adapter->drv_stats.be_on_die_temperature = | ||
1246 | resp->on_die_temperature; | ||
1247 | } | ||
1248 | /* If IOCTL fails once, do not bother issuing it again */ | ||
1249 | else | ||
1250 | be_get_temp_freq = 0; | ||
1251 | |||
1252 | err: | ||
1253 | spin_unlock_bh(&adapter->mcc_lock); | ||
1254 | return status; | ||
1255 | } | ||
1256 | |||
1257 | /* Uses synchronous mcc */ | ||
1258 | int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size) | ||
1259 | { | ||
1260 | struct be_mcc_wrb *wrb; | ||
1261 | struct be_cmd_req_get_fat *req; | ||
1262 | int status; | ||
1263 | |||
1264 | spin_lock_bh(&adapter->mcc_lock); | ||
1265 | |||
1266 | wrb = wrb_from_mccq(adapter); | ||
1267 | if (!wrb) { | ||
1268 | status = -EBUSY; | ||
1269 | goto err; | ||
1270 | } | ||
1271 | req = embedded_payload(wrb); | ||
1272 | |||
1273 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, | ||
1274 | OPCODE_COMMON_MANAGE_FAT); | ||
1275 | |||
1276 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | ||
1277 | OPCODE_COMMON_MANAGE_FAT, sizeof(*req)); | ||
1278 | req->fat_operation = cpu_to_le32(QUERY_FAT); | ||
1279 | status = be_mcc_notify_wait(adapter); | ||
1280 | if (!status) { | ||
1281 | struct be_cmd_resp_get_fat *resp = embedded_payload(wrb); | ||
1282 | if (log_size && resp->log_size) | ||
1283 | *log_size = le32_to_cpu(resp->log_size) - | ||
1284 | sizeof(u32); | ||
1285 | } | ||
1286 | err: | ||
1287 | spin_unlock_bh(&adapter->mcc_lock); | ||
1288 | return status; | ||
1289 | } | ||
1290 | |||
1291 | void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf) | ||
1292 | { | ||
1293 | struct be_dma_mem get_fat_cmd; | ||
1294 | struct be_mcc_wrb *wrb; | ||
1295 | struct be_cmd_req_get_fat *req; | ||
1296 | struct be_sge *sge; | ||
1297 | u32 offset = 0, total_size, buf_size, | ||
1298 | log_offset = sizeof(u32), payload_len; | ||
1299 | int status; | ||
1300 | |||
1301 | if (buf_len == 0) | ||
1302 | return; | ||
1303 | |||
1304 | total_size = buf_len; | ||
1305 | |||
1306 | get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024; | ||
1307 | get_fat_cmd.va = pci_alloc_consistent(adapter->pdev, | ||
1308 | get_fat_cmd.size, | ||
1309 | &get_fat_cmd.dma); | ||
1310 | if (!get_fat_cmd.va) { | ||
1311 | status = -ENOMEM; | ||
1312 | dev_err(&adapter->pdev->dev, | ||
1313 | "Memory allocation failure while retrieving FAT data\n"); | ||
1314 | return; | ||
1315 | } | ||
1316 | |||
1317 | spin_lock_bh(&adapter->mcc_lock); | ||
1318 | |||
1319 | while (total_size) { | ||
1320 | buf_size = min(total_size, (u32)60*1024); | ||
1321 | total_size -= buf_size; | ||
1322 | |||
1323 | wrb = wrb_from_mccq(adapter); | ||
1324 | if (!wrb) { | ||
1325 | status = -EBUSY; | ||
1326 | goto err; | ||
1327 | } | ||
1328 | req = get_fat_cmd.va; | ||
1329 | sge = nonembedded_sgl(wrb); | ||
1330 | |||
1331 | payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size; | ||
1332 | be_wrb_hdr_prepare(wrb, payload_len, false, 1, | ||
1333 | OPCODE_COMMON_MANAGE_FAT); | ||
1334 | |||
1335 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | ||
1336 | OPCODE_COMMON_MANAGE_FAT, payload_len); | ||
1337 | |||
1338 | sge->pa_hi = cpu_to_le32(upper_32_bits(get_fat_cmd.dma)); | ||
1339 | sge->pa_lo = cpu_to_le32(get_fat_cmd.dma & 0xFFFFFFFF); | ||
1340 | sge->len = cpu_to_le32(get_fat_cmd.size); | ||
1341 | |||
1342 | req->fat_operation = cpu_to_le32(RETRIEVE_FAT); | ||
1343 | req->read_log_offset = cpu_to_le32(log_offset); | ||
1344 | req->read_log_length = cpu_to_le32(buf_size); | ||
1345 | req->data_buffer_size = cpu_to_le32(buf_size); | ||
1346 | |||
1347 | status = be_mcc_notify_wait(adapter); | ||
1348 | if (!status) { | ||
1349 | struct be_cmd_resp_get_fat *resp = get_fat_cmd.va; | ||
1350 | memcpy(buf + offset, | ||
1351 | resp->data_buffer, | ||
1352 | resp->read_log_length); | ||
1353 | } else { | ||
1354 | dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n"); | ||
1355 | goto err; | ||
1356 | } | ||
1357 | offset += buf_size; | ||
1358 | log_offset += buf_size; | ||
1359 | } | ||
1360 | err: | ||
1361 | pci_free_consistent(adapter->pdev, get_fat_cmd.size, | ||
1362 | get_fat_cmd.va, | ||
1363 | get_fat_cmd.dma); | ||
1364 | spin_unlock_bh(&adapter->mcc_lock); | ||
1365 | } | ||
1366 | |||
1000 | /* Uses Mbox */ | 1367 | /* Uses Mbox */ |
1001 | int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver) | 1368 | int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver) |
1002 | { | 1369 | { |
@@ -1004,7 +1371,8 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver) | |||
1004 | struct be_cmd_req_get_fw_version *req; | 1371 | struct be_cmd_req_get_fw_version *req; |
1005 | int status; | 1372 | int status; |
1006 | 1373 | ||
1007 | spin_lock(&adapter->mbox_lock); | 1374 | if (mutex_lock_interruptible(&adapter->mbox_lock)) |
1375 | return -1; | ||
1008 | 1376 | ||
1009 | wrb = wrb_from_mbox(adapter); | 1377 | wrb = wrb_from_mbox(adapter); |
1010 | req = embedded_payload(wrb); | 1378 | req = embedded_payload(wrb); |
@@ -1021,7 +1389,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver) | |||
1021 | strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN); | 1389 | strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN); |
1022 | } | 1390 | } |
1023 | 1391 | ||
1024 | spin_unlock(&adapter->mbox_lock); | 1392 | mutex_unlock(&adapter->mbox_lock); |
1025 | return status; | 1393 | return status; |
1026 | } | 1394 | } |
1027 | 1395 | ||
@@ -1103,12 +1471,24 @@ err: | |||
1103 | /* Uses MCC for this command as it may be called in BH context | 1471 | /* Uses MCC for this command as it may be called in BH context |
1104 | * Uses synchronous mcc | 1472 | * Uses synchronous mcc |
1105 | */ | 1473 | */ |
1106 | int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en) | 1474 | int be_cmd_promiscuous_config(struct be_adapter *adapter, bool en) |
1107 | { | 1475 | { |
1108 | struct be_mcc_wrb *wrb; | 1476 | struct be_mcc_wrb *wrb; |
1109 | struct be_cmd_req_promiscuous_config *req; | 1477 | struct be_cmd_req_rx_filter *req; |
1478 | struct be_dma_mem promiscous_cmd; | ||
1479 | struct be_sge *sge; | ||
1110 | int status; | 1480 | int status; |
1111 | 1481 | ||
1482 | memset(&promiscous_cmd, 0, sizeof(struct be_dma_mem)); | ||
1483 | promiscous_cmd.size = sizeof(struct be_cmd_req_rx_filter); | ||
1484 | promiscous_cmd.va = pci_alloc_consistent(adapter->pdev, | ||
1485 | promiscous_cmd.size, &promiscous_cmd.dma); | ||
1486 | if (!promiscous_cmd.va) { | ||
1487 | dev_err(&adapter->pdev->dev, | ||
1488 | "Memory allocation failure\n"); | ||
1489 | return -ENOMEM; | ||
1490 | } | ||
1491 | |||
1112 | spin_lock_bh(&adapter->mcc_lock); | 1492 | spin_lock_bh(&adapter->mcc_lock); |
1113 | 1493 | ||
1114 | wrb = wrb_from_mccq(adapter); | 1494 | wrb = wrb_from_mccq(adapter); |
@@ -1116,32 +1496,36 @@ int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en) | |||
1116 | status = -EBUSY; | 1496 | status = -EBUSY; |
1117 | goto err; | 1497 | goto err; |
1118 | } | 1498 | } |
1119 | req = embedded_payload(wrb); | ||
1120 | 1499 | ||
1121 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_ETH_PROMISCUOUS); | 1500 | req = promiscous_cmd.va; |
1501 | sge = nonembedded_sgl(wrb); | ||
1122 | 1502 | ||
1123 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, | 1503 | be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1, |
1124 | OPCODE_ETH_PROMISCUOUS, sizeof(*req)); | 1504 | OPCODE_COMMON_NTWK_RX_FILTER); |
1125 | 1505 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
1126 | /* In FW versions X.102.149/X.101.487 and later, | 1506 | OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req)); |
1127 | * the port setting associated only with the | 1507 | |
1128 | * issuing pci function will take effect | 1508 | req->if_id = cpu_to_le32(adapter->if_handle); |
1129 | */ | 1509 | req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS); |
1130 | if (port_num) | 1510 | if (en) |
1131 | req->port1_promiscuous = en; | 1511 | req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS); |
1132 | else | 1512 | |
1133 | req->port0_promiscuous = en; | 1513 | sge->pa_hi = cpu_to_le32(upper_32_bits(promiscous_cmd.dma)); |
1514 | sge->pa_lo = cpu_to_le32(promiscous_cmd.dma & 0xFFFFFFFF); | ||
1515 | sge->len = cpu_to_le32(promiscous_cmd.size); | ||
1134 | 1516 | ||
1135 | status = be_mcc_notify_wait(adapter); | 1517 | status = be_mcc_notify_wait(adapter); |
1136 | 1518 | ||
1137 | err: | 1519 | err: |
1138 | spin_unlock_bh(&adapter->mcc_lock); | 1520 | spin_unlock_bh(&adapter->mcc_lock); |
1521 | pci_free_consistent(adapter->pdev, promiscous_cmd.size, | ||
1522 | promiscous_cmd.va, promiscous_cmd.dma); | ||
1139 | return status; | 1523 | return status; |
1140 | } | 1524 | } |
1141 | 1525 | ||
1142 | /* | 1526 | /* |
1143 | * Uses MCC for this command as it may be called in BH context | 1527 | * Uses MCC for this command as it may be called in BH context |
1144 | * (mc == NULL) => multicast promiscous | 1528 | * (mc == NULL) => multicast promiscuous |
1145 | */ | 1529 | */ |
1146 | int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id, | 1530 | int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id, |
1147 | struct net_device *netdev, struct be_dma_mem *mem) | 1531 | struct net_device *netdev, struct be_dma_mem *mem) |
@@ -1179,7 +1563,7 @@ int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id, | |||
1179 | 1563 | ||
1180 | i = 0; | 1564 | i = 0; |
1181 | netdev_for_each_mc_addr(ha, netdev) | 1565 | netdev_for_each_mc_addr(ha, netdev) |
1182 | memcpy(req->mac[i].byte, ha->addr, ETH_ALEN); | 1566 | memcpy(req->mac[i++].byte, ha->addr, ETH_ALEN); |
1183 | } else { | 1567 | } else { |
1184 | req->promiscuous = 1; | 1568 | req->promiscuous = 1; |
1185 | } | 1569 | } |
@@ -1259,13 +1643,15 @@ err: | |||
1259 | } | 1643 | } |
1260 | 1644 | ||
1261 | /* Uses mbox */ | 1645 | /* Uses mbox */ |
1262 | int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *mode) | 1646 | int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, |
1647 | u32 *mode, u32 *caps) | ||
1263 | { | 1648 | { |
1264 | struct be_mcc_wrb *wrb; | 1649 | struct be_mcc_wrb *wrb; |
1265 | struct be_cmd_req_query_fw_cfg *req; | 1650 | struct be_cmd_req_query_fw_cfg *req; |
1266 | int status; | 1651 | int status; |
1267 | 1652 | ||
1268 | spin_lock(&adapter->mbox_lock); | 1653 | if (mutex_lock_interruptible(&adapter->mbox_lock)) |
1654 | return -1; | ||
1269 | 1655 | ||
1270 | wrb = wrb_from_mbox(adapter); | 1656 | wrb = wrb_from_mbox(adapter); |
1271 | req = embedded_payload(wrb); | 1657 | req = embedded_payload(wrb); |
@@ -1281,9 +1667,10 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *mode) | |||
1281 | struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb); | 1667 | struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb); |
1282 | *port_num = le32_to_cpu(resp->phys_port); | 1668 | *port_num = le32_to_cpu(resp->phys_port); |
1283 | *mode = le32_to_cpu(resp->function_mode); | 1669 | *mode = le32_to_cpu(resp->function_mode); |
1670 | *caps = le32_to_cpu(resp->function_caps); | ||
1284 | } | 1671 | } |
1285 | 1672 | ||
1286 | spin_unlock(&adapter->mbox_lock); | 1673 | mutex_unlock(&adapter->mbox_lock); |
1287 | return status; | 1674 | return status; |
1288 | } | 1675 | } |
1289 | 1676 | ||
@@ -1294,7 +1681,8 @@ int be_cmd_reset_function(struct be_adapter *adapter) | |||
1294 | struct be_cmd_req_hdr *req; | 1681 | struct be_cmd_req_hdr *req; |
1295 | int status; | 1682 | int status; |
1296 | 1683 | ||
1297 | spin_lock(&adapter->mbox_lock); | 1684 | if (mutex_lock_interruptible(&adapter->mbox_lock)) |
1685 | return -1; | ||
1298 | 1686 | ||
1299 | wrb = wrb_from_mbox(adapter); | 1687 | wrb = wrb_from_mbox(adapter); |
1300 | req = embedded_payload(wrb); | 1688 | req = embedded_payload(wrb); |
@@ -1307,7 +1695,40 @@ int be_cmd_reset_function(struct be_adapter *adapter) | |||
1307 | 1695 | ||
1308 | status = be_mbox_notify_wait(adapter); | 1696 | status = be_mbox_notify_wait(adapter); |
1309 | 1697 | ||
1310 | spin_unlock(&adapter->mbox_lock); | 1698 | mutex_unlock(&adapter->mbox_lock); |
1699 | return status; | ||
1700 | } | ||
1701 | |||
1702 | int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size) | ||
1703 | { | ||
1704 | struct be_mcc_wrb *wrb; | ||
1705 | struct be_cmd_req_rss_config *req; | ||
1706 | u32 myhash[10] = {0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF, | ||
1707 | 0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF}; | ||
1708 | int status; | ||
1709 | |||
1710 | if (mutex_lock_interruptible(&adapter->mbox_lock)) | ||
1711 | return -1; | ||
1712 | |||
1713 | wrb = wrb_from_mbox(adapter); | ||
1714 | req = embedded_payload(wrb); | ||
1715 | |||
1716 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, | ||
1717 | OPCODE_ETH_RSS_CONFIG); | ||
1718 | |||
1719 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, | ||
1720 | OPCODE_ETH_RSS_CONFIG, sizeof(*req)); | ||
1721 | |||
1722 | req->if_id = cpu_to_le32(adapter->if_handle); | ||
1723 | req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4); | ||
1724 | req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1); | ||
1725 | memcpy(req->cpu_table, rsstable, table_size); | ||
1726 | memcpy(req->hash, myhash, sizeof(myhash)); | ||
1727 | be_dws_cpu_to_le(req->hash, sizeof(req->hash)); | ||
1728 | |||
1729 | status = be_mbox_notify_wait(adapter); | ||
1730 | |||
1731 | mutex_unlock(&adapter->mbox_lock); | ||
1311 | return status; | 1732 | return status; |
1312 | } | 1733 | } |
1313 | 1734 | ||
@@ -1382,38 +1803,77 @@ err: | |||
1382 | return status; | 1803 | return status; |
1383 | } | 1804 | } |
1384 | 1805 | ||
1385 | /* Uses sync mcc */ | 1806 | int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd, |
1386 | int be_cmd_read_port_type(struct be_adapter *adapter, u32 port, | 1807 | u32 data_size, u32 data_offset, const char *obj_name, |
1387 | u8 *connector) | 1808 | u32 *data_written, u8 *addn_status) |
1388 | { | 1809 | { |
1389 | struct be_mcc_wrb *wrb; | 1810 | struct be_mcc_wrb *wrb; |
1390 | struct be_cmd_req_port_type *req; | 1811 | struct lancer_cmd_req_write_object *req; |
1812 | struct lancer_cmd_resp_write_object *resp; | ||
1813 | void *ctxt = NULL; | ||
1391 | int status; | 1814 | int status; |
1392 | 1815 | ||
1393 | spin_lock_bh(&adapter->mcc_lock); | 1816 | spin_lock_bh(&adapter->mcc_lock); |
1817 | adapter->flash_status = 0; | ||
1394 | 1818 | ||
1395 | wrb = wrb_from_mccq(adapter); | 1819 | wrb = wrb_from_mccq(adapter); |
1396 | if (!wrb) { | 1820 | if (!wrb) { |
1397 | status = -EBUSY; | 1821 | status = -EBUSY; |
1398 | goto err; | 1822 | goto err_unlock; |
1399 | } | 1823 | } |
1824 | |||
1400 | req = embedded_payload(wrb); | 1825 | req = embedded_payload(wrb); |
1401 | 1826 | ||
1402 | be_wrb_hdr_prepare(wrb, sizeof(struct be_cmd_resp_port_type), true, 0, | 1827 | be_wrb_hdr_prepare(wrb, sizeof(struct lancer_cmd_req_write_object), |
1403 | OPCODE_COMMON_READ_TRANSRECV_DATA); | 1828 | true, 1, OPCODE_COMMON_WRITE_OBJECT); |
1829 | wrb->tag1 = CMD_SUBSYSTEM_COMMON; | ||
1404 | 1830 | ||
1405 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | 1831 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, |
1406 | OPCODE_COMMON_READ_TRANSRECV_DATA, sizeof(*req)); | 1832 | OPCODE_COMMON_WRITE_OBJECT, |
1833 | sizeof(struct lancer_cmd_req_write_object)); | ||
1407 | 1834 | ||
1408 | req->port = cpu_to_le32(port); | 1835 | ctxt = &req->context; |
1409 | req->page_num = cpu_to_le32(TR_PAGE_A0); | 1836 | AMAP_SET_BITS(struct amap_lancer_write_obj_context, |
1410 | status = be_mcc_notify_wait(adapter); | 1837 | write_length, ctxt, data_size); |
1838 | |||
1839 | if (data_size == 0) | ||
1840 | AMAP_SET_BITS(struct amap_lancer_write_obj_context, | ||
1841 | eof, ctxt, 1); | ||
1842 | else | ||
1843 | AMAP_SET_BITS(struct amap_lancer_write_obj_context, | ||
1844 | eof, ctxt, 0); | ||
1845 | |||
1846 | be_dws_cpu_to_le(ctxt, sizeof(req->context)); | ||
1847 | req->write_offset = cpu_to_le32(data_offset); | ||
1848 | strcpy(req->object_name, obj_name); | ||
1849 | req->descriptor_count = cpu_to_le32(1); | ||
1850 | req->buf_len = cpu_to_le32(data_size); | ||
1851 | req->addr_low = cpu_to_le32((cmd->dma + | ||
1852 | sizeof(struct lancer_cmd_req_write_object)) | ||
1853 | & 0xFFFFFFFF); | ||
1854 | req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma + | ||
1855 | sizeof(struct lancer_cmd_req_write_object))); | ||
1856 | |||
1857 | be_mcc_notify(adapter); | ||
1858 | spin_unlock_bh(&adapter->mcc_lock); | ||
1859 | |||
1860 | if (!wait_for_completion_timeout(&adapter->flash_compl, | ||
1861 | msecs_to_jiffies(12000))) | ||
1862 | status = -1; | ||
1863 | else | ||
1864 | status = adapter->flash_status; | ||
1865 | |||
1866 | resp = embedded_payload(wrb); | ||
1411 | if (!status) { | 1867 | if (!status) { |
1412 | struct be_cmd_resp_port_type *resp = embedded_payload(wrb); | 1868 | *data_written = le32_to_cpu(resp->actual_write_len); |
1413 | *connector = resp->data.connector; | 1869 | } else { |
1870 | *addn_status = resp->additional_status; | ||
1871 | status = resp->status; | ||
1414 | } | 1872 | } |
1415 | 1873 | ||
1416 | err: | 1874 | return status; |
1875 | |||
1876 | err_unlock: | ||
1417 | spin_unlock_bh(&adapter->mcc_lock); | 1877 | spin_unlock_bh(&adapter->mcc_lock); |
1418 | return status; | 1878 | return status; |
1419 | } | 1879 | } |
@@ -1679,6 +2139,10 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter, | |||
1679 | spin_lock_bh(&adapter->mcc_lock); | 2139 | spin_lock_bh(&adapter->mcc_lock); |
1680 | 2140 | ||
1681 | wrb = wrb_from_mccq(adapter); | 2141 | wrb = wrb_from_mccq(adapter); |
2142 | if (!wrb) { | ||
2143 | status = -EBUSY; | ||
2144 | goto err; | ||
2145 | } | ||
1682 | req = nonemb_cmd->va; | 2146 | req = nonemb_cmd->va; |
1683 | sge = nonembedded_sgl(wrb); | 2147 | sge = nonembedded_sgl(wrb); |
1684 | 2148 | ||
@@ -1694,6 +2158,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter, | |||
1694 | 2158 | ||
1695 | status = be_mcc_notify_wait(adapter); | 2159 | status = be_mcc_notify_wait(adapter); |
1696 | 2160 | ||
2161 | err: | ||
1697 | spin_unlock_bh(&adapter->mcc_lock); | 2162 | spin_unlock_bh(&adapter->mcc_lock); |
1698 | return status; | 2163 | return status; |
1699 | } | 2164 | } |
@@ -1756,8 +2221,8 @@ int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain) | |||
1756 | OPCODE_COMMON_SET_QOS, sizeof(*req)); | 2221 | OPCODE_COMMON_SET_QOS, sizeof(*req)); |
1757 | 2222 | ||
1758 | req->hdr.domain = domain; | 2223 | req->hdr.domain = domain; |
1759 | req->valid_bits = BE_QOS_BITS_NIC; | 2224 | req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC); |
1760 | req->max_bps_nic = bps; | 2225 | req->max_bps_nic = cpu_to_le32(bps); |
1761 | 2226 | ||
1762 | status = be_mcc_notify_wait(adapter); | 2227 | status = be_mcc_notify_wait(adapter); |
1763 | 2228 | ||
@@ -1765,3 +2230,96 @@ err: | |||
1765 | spin_unlock_bh(&adapter->mcc_lock); | 2230 | spin_unlock_bh(&adapter->mcc_lock); |
1766 | return status; | 2231 | return status; |
1767 | } | 2232 | } |
2233 | |||
2234 | int be_cmd_get_cntl_attributes(struct be_adapter *adapter) | ||
2235 | { | ||
2236 | struct be_mcc_wrb *wrb; | ||
2237 | struct be_cmd_req_cntl_attribs *req; | ||
2238 | struct be_cmd_resp_cntl_attribs *resp; | ||
2239 | struct be_sge *sge; | ||
2240 | int status; | ||
2241 | int payload_len = max(sizeof(*req), sizeof(*resp)); | ||
2242 | struct mgmt_controller_attrib *attribs; | ||
2243 | struct be_dma_mem attribs_cmd; | ||
2244 | |||
2245 | memset(&attribs_cmd, 0, sizeof(struct be_dma_mem)); | ||
2246 | attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs); | ||
2247 | attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size, | ||
2248 | &attribs_cmd.dma); | ||
2249 | if (!attribs_cmd.va) { | ||
2250 | dev_err(&adapter->pdev->dev, | ||
2251 | "Memory allocation failure\n"); | ||
2252 | return -ENOMEM; | ||
2253 | } | ||
2254 | |||
2255 | if (mutex_lock_interruptible(&adapter->mbox_lock)) | ||
2256 | return -1; | ||
2257 | |||
2258 | wrb = wrb_from_mbox(adapter); | ||
2259 | if (!wrb) { | ||
2260 | status = -EBUSY; | ||
2261 | goto err; | ||
2262 | } | ||
2263 | req = attribs_cmd.va; | ||
2264 | sge = nonembedded_sgl(wrb); | ||
2265 | |||
2266 | be_wrb_hdr_prepare(wrb, payload_len, false, 1, | ||
2267 | OPCODE_COMMON_GET_CNTL_ATTRIBUTES); | ||
2268 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | ||
2269 | OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len); | ||
2270 | sge->pa_hi = cpu_to_le32(upper_32_bits(attribs_cmd.dma)); | ||
2271 | sge->pa_lo = cpu_to_le32(attribs_cmd.dma & 0xFFFFFFFF); | ||
2272 | sge->len = cpu_to_le32(attribs_cmd.size); | ||
2273 | |||
2274 | status = be_mbox_notify_wait(adapter); | ||
2275 | if (!status) { | ||
2276 | attribs = (struct mgmt_controller_attrib *)( attribs_cmd.va + | ||
2277 | sizeof(struct be_cmd_resp_hdr)); | ||
2278 | adapter->hba_port_num = attribs->hba_attribs.phy_port; | ||
2279 | } | ||
2280 | |||
2281 | err: | ||
2282 | mutex_unlock(&adapter->mbox_lock); | ||
2283 | pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va, | ||
2284 | attribs_cmd.dma); | ||
2285 | return status; | ||
2286 | } | ||
2287 | |||
2288 | /* Uses mbox */ | ||
2289 | int be_cmd_check_native_mode(struct be_adapter *adapter) | ||
2290 | { | ||
2291 | struct be_mcc_wrb *wrb; | ||
2292 | struct be_cmd_req_set_func_cap *req; | ||
2293 | int status; | ||
2294 | |||
2295 | if (mutex_lock_interruptible(&adapter->mbox_lock)) | ||
2296 | return -1; | ||
2297 | |||
2298 | wrb = wrb_from_mbox(adapter); | ||
2299 | if (!wrb) { | ||
2300 | status = -EBUSY; | ||
2301 | goto err; | ||
2302 | } | ||
2303 | |||
2304 | req = embedded_payload(wrb); | ||
2305 | |||
2306 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, | ||
2307 | OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP); | ||
2308 | |||
2309 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | ||
2310 | OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req)); | ||
2311 | |||
2312 | req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS | | ||
2313 | CAPABILITY_BE3_NATIVE_ERX_API); | ||
2314 | req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API); | ||
2315 | |||
2316 | status = be_mbox_notify_wait(adapter); | ||
2317 | if (!status) { | ||
2318 | struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb); | ||
2319 | adapter->be3_native = le32_to_cpu(resp->cap_flags) & | ||
2320 | CAPABILITY_BE3_NATIVE_ERX_API; | ||
2321 | } | ||
2322 | err: | ||
2323 | mutex_unlock(&adapter->mbox_lock); | ||
2324 | return status; | ||
2325 | } | ||