aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/benet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/benet')
-rw-r--r--drivers/net/benet/Kconfig6
-rw-r--r--drivers/net/benet/Makefile7
-rw-r--r--drivers/net/benet/be.h532
-rw-r--r--drivers/net/benet/be_cmds.c2429
-rw-r--r--drivers/net/benet/be_cmds.h1551
-rw-r--r--drivers/net/benet/be_ethtool.c746
-rw-r--r--drivers/net/benet/be_hw.h503
-rw-r--r--drivers/net/benet/be_main.c3676
8 files changed, 9450 insertions, 0 deletions
diff --git a/drivers/net/benet/Kconfig b/drivers/net/benet/Kconfig
new file mode 100644
index 00000000000..1a41a49bb61
--- /dev/null
+++ b/drivers/net/benet/Kconfig
@@ -0,0 +1,6 @@
1config BE2NET
2 tristate "ServerEngines' 10Gbps NIC - BladeEngine"
3 depends on PCI && INET
4 help
5 This driver implements the NIC functionality for ServerEngines'
6 10Gbps network adapter - BladeEngine.
diff --git a/drivers/net/benet/Makefile b/drivers/net/benet/Makefile
new file mode 100644
index 00000000000..a60cd805113
--- /dev/null
+++ b/drivers/net/benet/Makefile
@@ -0,0 +1,7 @@
1#
2# Makefile to build the network driver for ServerEngine's BladeEngine.
3#
4
5obj-$(CONFIG_BE2NET) += be2net.o
6
7be2net-y := be_main.o be_cmds.o be_ethtool.o
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
new file mode 100644
index 00000000000..c85768cd1b1
--- /dev/null
+++ b/drivers/net/benet/be.h
@@ -0,0 +1,532 @@
1/*
2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@emulex.com
12 *
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
16 */
17
18#ifndef BE_H
19#define BE_H
20
21#include <linux/pci.h>
22#include <linux/etherdevice.h>
23#include <linux/delay.h>
24#include <net/tcp.h>
25#include <net/ip.h>
26#include <net/ipv6.h>
27#include <linux/if_vlan.h>
28#include <linux/workqueue.h>
29#include <linux/interrupt.h>
30#include <linux/firmware.h>
31#include <linux/slab.h>
32
33#include "be_hw.h"
34
35#define DRV_VER "4.0.100u"
36#define DRV_NAME "be2net"
37#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
38#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
39#define OC_NAME "Emulex OneConnect 10Gbps NIC"
40#define OC_NAME_BE OC_NAME "(be3)"
41#define OC_NAME_LANCER OC_NAME "(Lancer)"
42#define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver"
43
44#define BE_VENDOR_ID 0x19a2
45#define EMULEX_VENDOR_ID 0x10df
46#define BE_DEVICE_ID1 0x211
47#define BE_DEVICE_ID2 0x221
48#define OC_DEVICE_ID1 0x700 /* Device Id for BE2 cards */
49#define OC_DEVICE_ID2 0x710 /* Device Id for BE3 cards */
50#define OC_DEVICE_ID3 0xe220 /* Device id for Lancer cards */
51#define OC_DEVICE_ID4 0xe228 /* Device id for VF in Lancer */
52
53static inline char *nic_name(struct pci_dev *pdev)
54{
55 switch (pdev->device) {
56 case OC_DEVICE_ID1:
57 return OC_NAME;
58 case OC_DEVICE_ID2:
59 return OC_NAME_BE;
60 case OC_DEVICE_ID3:
61 case OC_DEVICE_ID4:
62 return OC_NAME_LANCER;
63 case BE_DEVICE_ID2:
64 return BE3_NAME;
65 default:
66 return BE_NAME;
67 }
68}
69
70/* Number of bytes of an RX frame that are copied to skb->data */
71#define BE_HDR_LEN ((u16) 64)
72#define BE_MAX_JUMBO_FRAME_SIZE 9018
73#define BE_MIN_MTU 256
74
75#define BE_NUM_VLANS_SUPPORTED 64
76#define BE_MAX_EQD 96
77#define BE_MAX_TX_FRAG_COUNT 30
78
79#define EVNT_Q_LEN 1024
80#define TX_Q_LEN 2048
81#define TX_CQ_LEN 1024
82#define RX_Q_LEN 1024 /* Does not support any other value */
83#define RX_CQ_LEN 1024
84#define MCC_Q_LEN 128 /* total size not to exceed 8 pages */
85#define MCC_CQ_LEN 256
86
87#define MAX_RSS_QS 4 /* BE limit is 4 queues/port */
88#define MAX_RX_QS (MAX_RSS_QS + 1) /* RSS qs + 1 def Rx */
89#define MAX_TX_QS 8
90#define BE_MAX_MSIX_VECTORS (MAX_RX_QS + 1)/* RX + TX */
91#define BE_NAPI_WEIGHT 64
92#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
93#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST)
94
95#define FW_VER_LEN 32
96
97struct be_dma_mem {
98 void *va;
99 dma_addr_t dma;
100 u32 size;
101};
102
103struct be_queue_info {
104 struct be_dma_mem dma_mem;
105 u16 len;
106 u16 entry_size; /* Size of an element in the queue */
107 u16 id;
108 u16 tail, head;
109 bool created;
110 atomic_t used; /* Number of valid elements in the queue */
111};
112
113static inline u32 MODULO(u16 val, u16 limit)
114{
115 BUG_ON(limit & (limit - 1));
116 return val & (limit - 1);
117}
118
119static inline void index_adv(u16 *index, u16 val, u16 limit)
120{
121 *index = MODULO((*index + val), limit);
122}
123
124static inline void index_inc(u16 *index, u16 limit)
125{
126 *index = MODULO((*index + 1), limit);
127}
128
129static inline void *queue_head_node(struct be_queue_info *q)
130{
131 return q->dma_mem.va + q->head * q->entry_size;
132}
133
134static inline void *queue_tail_node(struct be_queue_info *q)
135{
136 return q->dma_mem.va + q->tail * q->entry_size;
137}
138
139static inline void queue_head_inc(struct be_queue_info *q)
140{
141 index_inc(&q->head, q->len);
142}
143
144static inline void queue_tail_inc(struct be_queue_info *q)
145{
146 index_inc(&q->tail, q->len);
147}
148
149struct be_eq_obj {
150 struct be_queue_info q;
151 char desc[32];
152
153 /* Adaptive interrupt coalescing (AIC) info */
154 bool enable_aic;
155 u16 min_eqd; /* in usecs */
156 u16 max_eqd; /* in usecs */
157 u16 cur_eqd; /* in usecs */
158 u8 eq_idx;
159
160 struct napi_struct napi;
161};
162
163struct be_mcc_obj {
164 struct be_queue_info q;
165 struct be_queue_info cq;
166 bool rearm_cq;
167};
168
169struct be_tx_stats {
170 u32 be_tx_reqs; /* number of TX requests initiated */
171 u32 be_tx_stops; /* number of times TX Q was stopped */
172 u32 be_tx_wrbs; /* number of tx WRBs used */
173 u32 be_tx_compl; /* number of tx completion entries processed */
174 ulong be_tx_jiffies;
175 u64 be_tx_bytes;
176 u64 be_tx_bytes_prev;
177 u64 be_tx_pkts;
178 u32 be_tx_rate;
179};
180
181struct be_tx_obj {
182 struct be_queue_info q;
183 struct be_queue_info cq;
184 /* Remember the skbs that were transmitted */
185 struct sk_buff *sent_skb_list[TX_Q_LEN];
186 struct be_tx_stats stats;
187};
188
189/* Struct to remember the pages posted for rx frags */
190struct be_rx_page_info {
191 struct page *page;
192 DEFINE_DMA_UNMAP_ADDR(bus);
193 u16 page_offset;
194 bool last_page_user;
195};
196
197struct be_rx_stats {
198 u32 rx_post_fail;/* number of ethrx buffer alloc failures */
199 u32 rx_polls; /* number of times NAPI called poll function */
200 u32 rx_events; /* number of ucast rx completion events */
201 u32 rx_compl; /* number of rx completion entries processed */
202 ulong rx_dropped; /* number of skb allocation errors */
203 ulong rx_jiffies;
204 u64 rx_bytes;
205 u64 rx_bytes_prev;
206 u64 rx_pkts;
207 u32 rx_rate;
208 u32 rx_mcast_pkts;
209 u32 rxcp_err; /* Num rx completion entries w/ err set. */
210 ulong rx_fps_jiffies; /* jiffies at last FPS calc */
211 u32 rx_frags;
212 u32 prev_rx_frags;
213 u32 rx_fps; /* Rx frags per second */
214};
215
216struct be_rx_compl_info {
217 u32 rss_hash;
218 u16 vlan_tag;
219 u16 pkt_size;
220 u16 rxq_idx;
221 u16 mac_id;
222 u8 vlanf;
223 u8 num_rcvd;
224 u8 err;
225 u8 ipf;
226 u8 tcpf;
227 u8 udpf;
228 u8 ip_csum;
229 u8 l4_csum;
230 u8 ipv6;
231 u8 vtm;
232 u8 pkt_type;
233};
234
235struct be_rx_obj {
236 struct be_adapter *adapter;
237 struct be_queue_info q;
238 struct be_queue_info cq;
239 struct be_rx_compl_info rxcp;
240 struct be_rx_page_info page_info_tbl[RX_Q_LEN];
241 struct be_eq_obj rx_eq;
242 struct be_rx_stats stats;
243 u8 rss_id;
244 bool rx_post_starved; /* Zero rx frags have been posted to BE */
245 u32 cache_line_barrier[16];
246};
247
248struct be_drv_stats {
249 u8 be_on_die_temperature;
250 u64 be_tx_events;
251 u64 eth_red_drops;
252 u64 rx_drops_no_pbuf;
253 u64 rx_drops_no_txpb;
254 u64 rx_drops_no_erx_descr;
255 u64 rx_drops_no_tpre_descr;
256 u64 rx_drops_too_many_frags;
257 u64 rx_drops_invalid_ring;
258 u64 forwarded_packets;
259 u64 rx_drops_mtu;
260 u64 rx_crc_errors;
261 u64 rx_alignment_symbol_errors;
262 u64 rx_pause_frames;
263 u64 rx_priority_pause_frames;
264 u64 rx_control_frames;
265 u64 rx_in_range_errors;
266 u64 rx_out_range_errors;
267 u64 rx_frame_too_long;
268 u64 rx_address_match_errors;
269 u64 rx_dropped_too_small;
270 u64 rx_dropped_too_short;
271 u64 rx_dropped_header_too_small;
272 u64 rx_dropped_tcp_length;
273 u64 rx_dropped_runt;
274 u64 rx_ip_checksum_errs;
275 u64 rx_tcp_checksum_errs;
276 u64 rx_udp_checksum_errs;
277 u64 rx_switched_unicast_packets;
278 u64 rx_switched_multicast_packets;
279 u64 rx_switched_broadcast_packets;
280 u64 tx_pauseframes;
281 u64 tx_priority_pauseframes;
282 u64 tx_controlframes;
283 u64 rxpp_fifo_overflow_drop;
284 u64 rx_input_fifo_overflow_drop;
285 u64 pmem_fifo_overflow_drop;
286 u64 jabber_events;
287};
288
289struct be_vf_cfg {
290 unsigned char vf_mac_addr[ETH_ALEN];
291 u32 vf_if_handle;
292 u32 vf_pmac_id;
293 u16 vf_vlan_tag;
294 u32 vf_tx_rate;
295};
296
297#define BE_INVALID_PMAC_ID 0xffffffff
298
299struct be_adapter {
300 struct pci_dev *pdev;
301 struct net_device *netdev;
302
303 u8 __iomem *csr;
304 u8 __iomem *db; /* Door Bell */
305 u8 __iomem *pcicfg; /* PCI config space */
306
307 struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
308 struct be_dma_mem mbox_mem;
309 /* Mbox mem is adjusted to align to 16 bytes. The allocated addr
310 * is stored for freeing purpose */
311 struct be_dma_mem mbox_mem_alloced;
312
313 struct be_mcc_obj mcc_obj;
314 spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
315 spinlock_t mcc_cq_lock;
316
317 struct msix_entry msix_entries[BE_MAX_MSIX_VECTORS];
318 u32 num_msix_vec;
319 bool isr_registered;
320
321 /* TX Rings */
322 struct be_eq_obj tx_eq;
323 struct be_tx_obj tx_obj[MAX_TX_QS];
324 u8 num_tx_qs;
325
326 u32 cache_line_break[8];
327
328 /* Rx rings */
329 struct be_rx_obj rx_obj[MAX_RX_QS];
330 u32 num_rx_qs;
331 u32 big_page_size; /* Compounded page size shared by rx wrbs */
332
333 u8 eq_next_idx;
334 struct be_drv_stats drv_stats;
335
336 u16 vlans_added;
337 u16 max_vlans; /* Number of vlans supported */
338 u8 vlan_tag[VLAN_N_VID];
339 u8 vlan_prio_bmap; /* Available Priority BitMap */
340 u16 recommended_prio; /* Recommended Priority */
341 struct be_dma_mem mc_cmd_mem;
342
343 struct be_dma_mem stats_cmd;
344 /* Work queue used to perform periodic tasks like getting statistics */
345 struct delayed_work work;
346 u16 work_counter;
347
348 /* Ethtool knobs and info */
349 char fw_ver[FW_VER_LEN];
350 u32 if_handle; /* Used to configure filtering */
351 u32 pmac_id; /* MAC addr handle used by BE card */
352 u32 beacon_state; /* for set_phys_id */
353
354 bool eeh_err;
355 bool link_up;
356 u32 port_num;
357 bool promiscuous;
358 bool wol;
359 u32 function_mode;
360 u32 function_caps;
361 u32 rx_fc; /* Rx flow control */
362 u32 tx_fc; /* Tx flow control */
363 bool ue_detected;
364 bool stats_cmd_sent;
365 int link_speed;
366 u8 port_type;
367 u8 transceiver;
368 u8 autoneg;
369 u8 generation; /* BladeEngine ASIC generation */
370 u32 flash_status;
371 struct completion flash_compl;
372
373 bool be3_native;
374 bool sriov_enabled;
375 struct be_vf_cfg *vf_cfg;
376 u8 is_virtfn;
377 u32 sli_family;
378 u8 hba_port_num;
379 u16 pvid;
380};
381
382#define be_physfn(adapter) (!adapter->is_virtfn)
383
384/* BladeEngine Generation numbers */
385#define BE_GEN2 2
386#define BE_GEN3 3
387
388#define lancer_chip(adapter) ((adapter->pdev->device == OC_DEVICE_ID3) || \
389 (adapter->pdev->device == OC_DEVICE_ID4))
390
391extern const struct ethtool_ops be_ethtool_ops;
392
393#define msix_enabled(adapter) (adapter->num_msix_vec > 0)
394#define tx_stats(txo) (&txo->stats)
395#define rx_stats(rxo) (&rxo->stats)
396
397#define BE_SET_NETDEV_OPS(netdev, ops) (netdev->netdev_ops = ops)
398
399#define for_all_rx_queues(adapter, rxo, i) \
400 for (i = 0, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs; \
401 i++, rxo++)
402
403/* Just skip the first default non-rss queue */
404#define for_all_rss_queues(adapter, rxo, i) \
405 for (i = 0, rxo = &adapter->rx_obj[i+1]; i < (adapter->num_rx_qs - 1);\
406 i++, rxo++)
407
408#define for_all_tx_queues(adapter, txo, i) \
409 for (i = 0, txo = &adapter->tx_obj[i]; i < adapter->num_tx_qs; \
410 i++, txo++)
411
412#define PAGE_SHIFT_4K 12
413#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
414
415/* Returns number of pages spanned by the data starting at the given addr */
416#define PAGES_4K_SPANNED(_address, size) \
417 ((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \
418 (size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K))
419
420/* Byte offset into the page corresponding to given address */
421#define OFFSET_IN_PAGE(addr) \
422 ((size_t)(addr) & (PAGE_SIZE_4K-1))
423
424/* Returns bit offset within a DWORD of a bitfield */
425#define AMAP_BIT_OFFSET(_struct, field) \
426 (((size_t)&(((_struct *)0)->field))%32)
427
428/* Returns the bit mask of the field that is NOT shifted into location. */
429static inline u32 amap_mask(u32 bitsize)
430{
431 return (bitsize == 32 ? 0xFFFFFFFF : (1 << bitsize) - 1);
432}
433
434static inline void
435amap_set(void *ptr, u32 dw_offset, u32 mask, u32 offset, u32 value)
436{
437 u32 *dw = (u32 *) ptr + dw_offset;
438 *dw &= ~(mask << offset);
439 *dw |= (mask & value) << offset;
440}
441
442#define AMAP_SET_BITS(_struct, field, ptr, val) \
443 amap_set(ptr, \
444 offsetof(_struct, field)/32, \
445 amap_mask(sizeof(((_struct *)0)->field)), \
446 AMAP_BIT_OFFSET(_struct, field), \
447 val)
448
449static inline u32 amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset)
450{
451 u32 *dw = (u32 *) ptr;
452 return mask & (*(dw + dw_offset) >> offset);
453}
454
455#define AMAP_GET_BITS(_struct, field, ptr) \
456 amap_get(ptr, \
457 offsetof(_struct, field)/32, \
458 amap_mask(sizeof(((_struct *)0)->field)), \
459 AMAP_BIT_OFFSET(_struct, field))
460
461#define be_dws_cpu_to_le(wrb, len) swap_dws(wrb, len)
462#define be_dws_le_to_cpu(wrb, len) swap_dws(wrb, len)
463static inline void swap_dws(void *wrb, int len)
464{
465#ifdef __BIG_ENDIAN
466 u32 *dw = wrb;
467 BUG_ON(len % 4);
468 do {
469 *dw = cpu_to_le32(*dw);
470 dw++;
471 len -= 4;
472 } while (len);
473#endif /* __BIG_ENDIAN */
474}
475
476static inline u8 is_tcp_pkt(struct sk_buff *skb)
477{
478 u8 val = 0;
479
480 if (ip_hdr(skb)->version == 4)
481 val = (ip_hdr(skb)->protocol == IPPROTO_TCP);
482 else if (ip_hdr(skb)->version == 6)
483 val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP);
484
485 return val;
486}
487
488static inline u8 is_udp_pkt(struct sk_buff *skb)
489{
490 u8 val = 0;
491
492 if (ip_hdr(skb)->version == 4)
493 val = (ip_hdr(skb)->protocol == IPPROTO_UDP);
494 else if (ip_hdr(skb)->version == 6)
495 val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP);
496
497 return val;
498}
499
500static inline void be_check_sriov_fn_type(struct be_adapter *adapter)
501{
502 u32 sli_intf;
503
504 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
505 adapter->is_virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
506}
507
508static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
509{
510 u32 addr;
511
512 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
513
514 mac[5] = (u8)(addr & 0xFF);
515 mac[4] = (u8)((addr >> 8) & 0xFF);
516 mac[3] = (u8)((addr >> 16) & 0xFF);
517 /* Use the OUI from the current MAC address */
518 memcpy(mac, adapter->netdev->dev_addr, 3);
519}
520
521static inline bool be_multi_rxq(const struct be_adapter *adapter)
522{
523 return adapter->num_rx_qs > 1;
524}
525
526extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
527 u16 num_popped);
528extern void be_link_status_update(struct be_adapter *adapter, bool link_up);
529extern void netdev_stats_update(struct be_adapter *adapter);
530extern void be_parse_stats(struct be_adapter *adapter);
531extern int be_load_fw(struct be_adapter *adapter, u8 *func);
532#endif /* BE_H */
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
new file mode 100644
index 00000000000..054fa67bc4e
--- /dev/null
+++ b/drivers/net/benet/be_cmds.c
@@ -0,0 +1,2429 @@
1/*
2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@emulex.com
12 *
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
16 */
17
18#include "be.h"
19#include "be_cmds.h"
20
21/* Must be a power of 2 or else MODULO will BUG_ON */
22static int be_get_temp_freq = 32;
23
24static void be_mcc_notify(struct be_adapter *adapter)
25{
26 struct be_queue_info *mccq = &adapter->mcc_obj.q;
27 u32 val = 0;
28
29 if (adapter->eeh_err) {
30 dev_info(&adapter->pdev->dev,
31 "Error in Card Detected! Cannot issue commands\n");
32 return;
33 }
34
35 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
36 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
37
38 wmb();
39 iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
40}
41
42/* To check if valid bit is set, check the entire word as we don't know
43 * the endianness of the data (old entry is host endian while a new entry is
44 * little endian) */
45static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
46{
47 if (compl->flags != 0) {
48 compl->flags = le32_to_cpu(compl->flags);
49 BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
50 return true;
51 } else {
52 return false;
53 }
54}
55
56/* Need to reset the entire word that houses the valid bit */
57static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
58{
59 compl->flags = 0;
60}
61
62static int be_mcc_compl_process(struct be_adapter *adapter,
63 struct be_mcc_compl *compl)
64{
65 u16 compl_status, extd_status;
66
67 /* Just swap the status to host endian; mcc tag is opaquely copied
68 * from mcc_wrb */
69 be_dws_le_to_cpu(compl, 4);
70
71 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
72 CQE_STATUS_COMPL_MASK;
73
74 if (((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) ||
75 (compl->tag0 == OPCODE_COMMON_WRITE_OBJECT)) &&
76 (compl->tag1 == CMD_SUBSYSTEM_COMMON)) {
77 adapter->flash_status = compl_status;
78 complete(&adapter->flash_compl);
79 }
80
81 if (compl_status == MCC_STATUS_SUCCESS) {
82 if (((compl->tag0 == OPCODE_ETH_GET_STATISTICS) ||
83 (compl->tag0 == OPCODE_ETH_GET_PPORT_STATS)) &&
84 (compl->tag1 == CMD_SUBSYSTEM_ETH)) {
85 if (adapter->generation == BE_GEN3) {
86 if (lancer_chip(adapter)) {
87 struct lancer_cmd_resp_pport_stats
88 *resp = adapter->stats_cmd.va;
89 be_dws_le_to_cpu(&resp->pport_stats,
90 sizeof(resp->pport_stats));
91 } else {
92 struct be_cmd_resp_get_stats_v1 *resp =
93 adapter->stats_cmd.va;
94
95 be_dws_le_to_cpu(&resp->hw_stats,
96 sizeof(resp->hw_stats));
97 }
98 } else {
99 struct be_cmd_resp_get_stats_v0 *resp =
100 adapter->stats_cmd.va;
101
102 be_dws_le_to_cpu(&resp->hw_stats,
103 sizeof(resp->hw_stats));
104 }
105 be_parse_stats(adapter);
106 netdev_stats_update(adapter);
107 adapter->stats_cmd_sent = false;
108 }
109 } else {
110 if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
111 compl_status == MCC_STATUS_ILLEGAL_REQUEST)
112 goto done;
113
114 if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
115 dev_warn(&adapter->pdev->dev, "This domain(VM) is not "
116 "permitted to execute this cmd (opcode %d)\n",
117 compl->tag0);
118 } else {
119 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
120 CQE_STATUS_EXTD_MASK;
121 dev_err(&adapter->pdev->dev, "Cmd (opcode %d) failed:"
122 "status %d, extd-status %d\n",
123 compl->tag0, compl_status, extd_status);
124 }
125 }
126done:
127 return compl_status;
128}
129
130/* Link state evt is a string of bytes; no need for endian swapping */
131static void be_async_link_state_process(struct be_adapter *adapter,
132 struct be_async_event_link_state *evt)
133{
134 be_link_status_update(adapter,
135 evt->port_link_status == ASYNC_EVENT_LINK_UP);
136}
137
138/* Grp5 CoS Priority evt */
139static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
140 struct be_async_event_grp5_cos_priority *evt)
141{
142 if (evt->valid) {
143 adapter->vlan_prio_bmap = evt->available_priority_bmap;
144 adapter->recommended_prio &= ~VLAN_PRIO_MASK;
145 adapter->recommended_prio =
146 evt->reco_default_priority << VLAN_PRIO_SHIFT;
147 }
148}
149
150/* Grp5 QOS Speed evt */
151static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
152 struct be_async_event_grp5_qos_link_speed *evt)
153{
154 if (evt->physical_port == adapter->port_num) {
155 /* qos_link_speed is in units of 10 Mbps */
156 adapter->link_speed = evt->qos_link_speed * 10;
157 }
158}
159
160/*Grp5 PVID evt*/
161static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
162 struct be_async_event_grp5_pvid_state *evt)
163{
164 if (evt->enabled)
165 adapter->pvid = le16_to_cpu(evt->tag);
166 else
167 adapter->pvid = 0;
168}
169
170static void be_async_grp5_evt_process(struct be_adapter *adapter,
171 u32 trailer, struct be_mcc_compl *evt)
172{
173 u8 event_type = 0;
174
175 event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
176 ASYNC_TRAILER_EVENT_TYPE_MASK;
177
178 switch (event_type) {
179 case ASYNC_EVENT_COS_PRIORITY:
180 be_async_grp5_cos_priority_process(adapter,
181 (struct be_async_event_grp5_cos_priority *)evt);
182 break;
183 case ASYNC_EVENT_QOS_SPEED:
184 be_async_grp5_qos_speed_process(adapter,
185 (struct be_async_event_grp5_qos_link_speed *)evt);
186 break;
187 case ASYNC_EVENT_PVID_STATE:
188 be_async_grp5_pvid_state_process(adapter,
189 (struct be_async_event_grp5_pvid_state *)evt);
190 break;
191 default:
192 dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
193 break;
194 }
195}
196
197static inline bool is_link_state_evt(u32 trailer)
198{
199 return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
200 ASYNC_TRAILER_EVENT_CODE_MASK) ==
201 ASYNC_EVENT_CODE_LINK_STATE;
202}
203
204static inline bool is_grp5_evt(u32 trailer)
205{
206 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
207 ASYNC_TRAILER_EVENT_CODE_MASK) ==
208 ASYNC_EVENT_CODE_GRP_5);
209}
210
211static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
212{
213 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
214 struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
215
216 if (be_mcc_compl_is_new(compl)) {
217 queue_tail_inc(mcc_cq);
218 return compl;
219 }
220 return NULL;
221}
222
223void be_async_mcc_enable(struct be_adapter *adapter)
224{
225 spin_lock_bh(&adapter->mcc_cq_lock);
226
227 be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
228 adapter->mcc_obj.rearm_cq = true;
229
230 spin_unlock_bh(&adapter->mcc_cq_lock);
231}
232
233void be_async_mcc_disable(struct be_adapter *adapter)
234{
235 adapter->mcc_obj.rearm_cq = false;
236}
237
238int be_process_mcc(struct be_adapter *adapter, int *status)
239{
240 struct be_mcc_compl *compl;
241 int num = 0;
242 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
243
244 spin_lock_bh(&adapter->mcc_cq_lock);
245 while ((compl = be_mcc_compl_get(adapter))) {
246 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
247 /* Interpret flags as an async trailer */
248 if (is_link_state_evt(compl->flags))
249 be_async_link_state_process(adapter,
250 (struct be_async_event_link_state *) compl);
251 else if (is_grp5_evt(compl->flags))
252 be_async_grp5_evt_process(adapter,
253 compl->flags, compl);
254 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
255 *status = be_mcc_compl_process(adapter, compl);
256 atomic_dec(&mcc_obj->q.used);
257 }
258 be_mcc_compl_use(compl);
259 num++;
260 }
261
262 spin_unlock_bh(&adapter->mcc_cq_lock);
263 return num;
264}
265
266/* Wait till no more pending mcc requests are present */
267static int be_mcc_wait_compl(struct be_adapter *adapter)
268{
269#define mcc_timeout 120000 /* 12s timeout */
270 int i, num, status = 0;
271 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
272
273 if (adapter->eeh_err)
274 return -EIO;
275
276 for (i = 0; i < mcc_timeout; i++) {
277 num = be_process_mcc(adapter, &status);
278 if (num)
279 be_cq_notify(adapter, mcc_obj->cq.id,
280 mcc_obj->rearm_cq, num);
281
282 if (atomic_read(&mcc_obj->q.used) == 0)
283 break;
284 udelay(100);
285 }
286 if (i == mcc_timeout) {
287 dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
288 return -1;
289 }
290 return status;
291}
292
293/* Notify MCC requests and wait for completion */
294static int be_mcc_notify_wait(struct be_adapter *adapter)
295{
296 be_mcc_notify(adapter);
297 return be_mcc_wait_compl(adapter);
298}
299
300static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
301{
302 int msecs = 0;
303 u32 ready;
304
305 if (adapter->eeh_err) {
306 dev_err(&adapter->pdev->dev,
307 "Error detected in card.Cannot issue commands\n");
308 return -EIO;
309 }
310
311 do {
312 ready = ioread32(db);
313 if (ready == 0xffffffff) {
314 dev_err(&adapter->pdev->dev,
315 "pci slot disconnected\n");
316 return -1;
317 }
318
319 ready &= MPU_MAILBOX_DB_RDY_MASK;
320 if (ready)
321 break;
322
323 if (msecs > 4000) {
324 dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
325 if (!lancer_chip(adapter))
326 be_detect_dump_ue(adapter);
327 return -1;
328 }
329
330 msleep(1);
331 msecs++;
332 } while (true);
333
334 return 0;
335}
336
337/*
338 * Insert the mailbox address into the doorbell in two steps
339 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
340 */
341static int be_mbox_notify_wait(struct be_adapter *adapter)
342{
343 int status;
344 u32 val = 0;
345 void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
346 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
347 struct be_mcc_mailbox *mbox = mbox_mem->va;
348 struct be_mcc_compl *compl = &mbox->compl;
349
350 /* wait for ready to be set */
351 status = be_mbox_db_ready_wait(adapter, db);
352 if (status != 0)
353 return status;
354
355 val |= MPU_MAILBOX_DB_HI_MASK;
356 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
357 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
358 iowrite32(val, db);
359
360 /* wait for ready to be set */
361 status = be_mbox_db_ready_wait(adapter, db);
362 if (status != 0)
363 return status;
364
365 val = 0;
366 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
367 val |= (u32)(mbox_mem->dma >> 4) << 2;
368 iowrite32(val, db);
369
370 status = be_mbox_db_ready_wait(adapter, db);
371 if (status != 0)
372 return status;
373
374 /* A cq entry has been made now */
375 if (be_mcc_compl_is_new(compl)) {
376 status = be_mcc_compl_process(adapter, &mbox->compl);
377 be_mcc_compl_use(compl);
378 if (status)
379 return status;
380 } else {
381 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
382 return -1;
383 }
384 return 0;
385}
386
387static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
388{
389 u32 sem;
390
391 if (lancer_chip(adapter))
392 sem = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET);
393 else
394 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
395
396 *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
397 if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
398 return -1;
399 else
400 return 0;
401}
402
403int be_cmd_POST(struct be_adapter *adapter)
404{
405 u16 stage;
406 int status, timeout = 0;
407 struct device *dev = &adapter->pdev->dev;
408
409 do {
410 status = be_POST_stage_get(adapter, &stage);
411 if (status) {
412 dev_err(dev, "POST error; stage=0x%x\n", stage);
413 return -1;
414 } else if (stage != POST_STAGE_ARMFW_RDY) {
415 if (msleep_interruptible(2000)) {
416 dev_err(dev, "Waiting for POST aborted\n");
417 return -EINTR;
418 }
419 timeout += 2;
420 } else {
421 return 0;
422 }
423 } while (timeout < 40);
424
425 dev_err(dev, "POST timeout; stage=0x%x\n", stage);
426 return -1;
427}
428
429static inline void *embedded_payload(struct be_mcc_wrb *wrb)
430{
431 return wrb->payload.embedded_payload;
432}
433
434static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
435{
436 return &wrb->payload.sgl[0];
437}
438
439/* Don't touch the hdr after it's prepared */
440static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
441 bool embedded, u8 sge_cnt, u32 opcode)
442{
443 if (embedded)
444 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
445 else
446 wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
447 MCC_WRB_SGE_CNT_SHIFT;
448 wrb->payload_length = payload_len;
449 wrb->tag0 = opcode;
450 be_dws_cpu_to_le(wrb, 8);
451}
452
453/* Don't touch the hdr after it's prepared */
454static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
455 u8 subsystem, u8 opcode, int cmd_len)
456{
457 req_hdr->opcode = opcode;
458 req_hdr->subsystem = subsystem;
459 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
460 req_hdr->version = 0;
461}
462
463static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
464 struct be_dma_mem *mem)
465{
466 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
467 u64 dma = (u64)mem->dma;
468
469 for (i = 0; i < buf_pages; i++) {
470 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
471 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
472 dma += PAGE_SIZE_4K;
473 }
474}
475
476/* Converts interrupt delay in microseconds to multiplier value */
477static u32 eq_delay_to_mult(u32 usec_delay)
478{
479#define MAX_INTR_RATE 651042
480 const u32 round = 10;
481 u32 multiplier;
482
483 if (usec_delay == 0)
484 multiplier = 0;
485 else {
486 u32 interrupt_rate = 1000000 / usec_delay;
487 /* Max delay, corresponding to the lowest interrupt rate */
488 if (interrupt_rate == 0)
489 multiplier = 1023;
490 else {
491 multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
492 multiplier /= interrupt_rate;
493 /* Round the multiplier to the closest value.*/
494 multiplier = (multiplier + round/2) / round;
495 multiplier = min(multiplier, (u32)1023);
496 }
497 }
498 return multiplier;
499}
500
501static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
502{
503 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
504 struct be_mcc_wrb *wrb
505 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
506 memset(wrb, 0, sizeof(*wrb));
507 return wrb;
508}
509
510static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
511{
512 struct be_queue_info *mccq = &adapter->mcc_obj.q;
513 struct be_mcc_wrb *wrb;
514
515 if (atomic_read(&mccq->used) >= mccq->len) {
516 dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
517 return NULL;
518 }
519
520 wrb = queue_head_node(mccq);
521 queue_head_inc(mccq);
522 atomic_inc(&mccq->used);
523 memset(wrb, 0, sizeof(*wrb));
524 return wrb;
525}
526
527/* Tell fw we're about to start firing cmds by writing a
528 * special pattern across the wrb hdr; uses mbox
529 */
530int be_cmd_fw_init(struct be_adapter *adapter)
531{
532 u8 *wrb;
533 int status;
534
535 if (mutex_lock_interruptible(&adapter->mbox_lock))
536 return -1;
537
538 wrb = (u8 *)wrb_from_mbox(adapter);
539 *wrb++ = 0xFF;
540 *wrb++ = 0x12;
541 *wrb++ = 0x34;
542 *wrb++ = 0xFF;
543 *wrb++ = 0xFF;
544 *wrb++ = 0x56;
545 *wrb++ = 0x78;
546 *wrb = 0xFF;
547
548 status = be_mbox_notify_wait(adapter);
549
550 mutex_unlock(&adapter->mbox_lock);
551 return status;
552}
553
554/* Tell fw we're done with firing cmds by writing a
555 * special pattern across the wrb hdr; uses mbox
556 */
557int be_cmd_fw_clean(struct be_adapter *adapter)
558{
559 u8 *wrb;
560 int status;
561
562 if (adapter->eeh_err)
563 return -EIO;
564
565 if (mutex_lock_interruptible(&adapter->mbox_lock))
566 return -1;
567
568 wrb = (u8 *)wrb_from_mbox(adapter);
569 *wrb++ = 0xFF;
570 *wrb++ = 0xAA;
571 *wrb++ = 0xBB;
572 *wrb++ = 0xFF;
573 *wrb++ = 0xFF;
574 *wrb++ = 0xCC;
575 *wrb++ = 0xDD;
576 *wrb = 0xFF;
577
578 status = be_mbox_notify_wait(adapter);
579
580 mutex_unlock(&adapter->mbox_lock);
581 return status;
582}
583int be_cmd_eq_create(struct be_adapter *adapter,
584 struct be_queue_info *eq, int eq_delay)
585{
586 struct be_mcc_wrb *wrb;
587 struct be_cmd_req_eq_create *req;
588 struct be_dma_mem *q_mem = &eq->dma_mem;
589 int status;
590
591 if (mutex_lock_interruptible(&adapter->mbox_lock))
592 return -1;
593
594 wrb = wrb_from_mbox(adapter);
595 req = embedded_payload(wrb);
596
597 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_COMMON_EQ_CREATE);
598
599 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
600 OPCODE_COMMON_EQ_CREATE, sizeof(*req));
601
602 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
603
604 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
605 /* 4byte eqe*/
606 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
607 AMAP_SET_BITS(struct amap_eq_context, count, req->context,
608 __ilog2_u32(eq->len/256));
609 AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
610 eq_delay_to_mult(eq_delay));
611 be_dws_cpu_to_le(req->context, sizeof(req->context));
612
613 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
614
615 status = be_mbox_notify_wait(adapter);
616 if (!status) {
617 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
618 eq->id = le16_to_cpu(resp->eq_id);
619 eq->created = true;
620 }
621
622 mutex_unlock(&adapter->mbox_lock);
623 return status;
624}
625
626/* Uses mbox */
627int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
628 u8 type, bool permanent, u32 if_handle)
629{
630 struct be_mcc_wrb *wrb;
631 struct be_cmd_req_mac_query *req;
632 int status;
633
634 if (mutex_lock_interruptible(&adapter->mbox_lock))
635 return -1;
636
637 wrb = wrb_from_mbox(adapter);
638 req = embedded_payload(wrb);
639
640 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
641 OPCODE_COMMON_NTWK_MAC_QUERY);
642
643 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
644 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req));
645
646 req->type = type;
647 if (permanent) {
648 req->permanent = 1;
649 } else {
650 req->if_id = cpu_to_le16((u16) if_handle);
651 req->permanent = 0;
652 }
653
654 status = be_mbox_notify_wait(adapter);
655 if (!status) {
656 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
657 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
658 }
659
660 mutex_unlock(&adapter->mbox_lock);
661 return status;
662}
663
664/* Uses synchronous MCCQ */
665int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
666 u32 if_id, u32 *pmac_id, u32 domain)
667{
668 struct be_mcc_wrb *wrb;
669 struct be_cmd_req_pmac_add *req;
670 int status;
671
672 spin_lock_bh(&adapter->mcc_lock);
673
674 wrb = wrb_from_mccq(adapter);
675 if (!wrb) {
676 status = -EBUSY;
677 goto err;
678 }
679 req = embedded_payload(wrb);
680
681 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
682 OPCODE_COMMON_NTWK_PMAC_ADD);
683
684 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
685 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
686
687 req->hdr.domain = domain;
688 req->if_id = cpu_to_le32(if_id);
689 memcpy(req->mac_address, mac_addr, ETH_ALEN);
690
691 status = be_mcc_notify_wait(adapter);
692 if (!status) {
693 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
694 *pmac_id = le32_to_cpu(resp->pmac_id);
695 }
696
697err:
698 spin_unlock_bh(&adapter->mcc_lock);
699 return status;
700}
701
702/* Uses synchronous MCCQ */
703int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id, u32 dom)
704{
705 struct be_mcc_wrb *wrb;
706 struct be_cmd_req_pmac_del *req;
707 int status;
708
709 spin_lock_bh(&adapter->mcc_lock);
710
711 wrb = wrb_from_mccq(adapter);
712 if (!wrb) {
713 status = -EBUSY;
714 goto err;
715 }
716 req = embedded_payload(wrb);
717
718 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
719 OPCODE_COMMON_NTWK_PMAC_DEL);
720
721 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
722 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
723
724 req->hdr.domain = dom;
725 req->if_id = cpu_to_le32(if_id);
726 req->pmac_id = cpu_to_le32(pmac_id);
727
728 status = be_mcc_notify_wait(adapter);
729
730err:
731 spin_unlock_bh(&adapter->mcc_lock);
732 return status;
733}
734
735/* Uses Mbox */
736int be_cmd_cq_create(struct be_adapter *adapter,
737 struct be_queue_info *cq, struct be_queue_info *eq,
738 bool sol_evts, bool no_delay, int coalesce_wm)
739{
740 struct be_mcc_wrb *wrb;
741 struct be_cmd_req_cq_create *req;
742 struct be_dma_mem *q_mem = &cq->dma_mem;
743 void *ctxt;
744 int status;
745
746 if (mutex_lock_interruptible(&adapter->mbox_lock))
747 return -1;
748
749 wrb = wrb_from_mbox(adapter);
750 req = embedded_payload(wrb);
751 ctxt = &req->context;
752
753 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
754 OPCODE_COMMON_CQ_CREATE);
755
756 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
757 OPCODE_COMMON_CQ_CREATE, sizeof(*req));
758
759 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
760 if (lancer_chip(adapter)) {
761 req->hdr.version = 2;
762 req->page_size = 1; /* 1 for 4K */
763 AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
764 no_delay);
765 AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
766 __ilog2_u32(cq->len/256));
767 AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
768 AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
769 ctxt, 1);
770 AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
771 ctxt, eq->id);
772 AMAP_SET_BITS(struct amap_cq_context_lancer, armed, ctxt, 1);
773 } else {
774 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
775 coalesce_wm);
776 AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
777 ctxt, no_delay);
778 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
779 __ilog2_u32(cq->len/256));
780 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
781 AMAP_SET_BITS(struct amap_cq_context_be, solevent,
782 ctxt, sol_evts);
783 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
784 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
785 AMAP_SET_BITS(struct amap_cq_context_be, armed, ctxt, 1);
786 }
787
788 be_dws_cpu_to_le(ctxt, sizeof(req->context));
789
790 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
791
792 status = be_mbox_notify_wait(adapter);
793 if (!status) {
794 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
795 cq->id = le16_to_cpu(resp->cq_id);
796 cq->created = true;
797 }
798
799 mutex_unlock(&adapter->mbox_lock);
800
801 return status;
802}
803
804static u32 be_encoded_q_len(int q_len)
805{
806 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
807 if (len_encoded == 16)
808 len_encoded = 0;
809 return len_encoded;
810}
811
812int be_cmd_mccq_ext_create(struct be_adapter *adapter,
813 struct be_queue_info *mccq,
814 struct be_queue_info *cq)
815{
816 struct be_mcc_wrb *wrb;
817 struct be_cmd_req_mcc_ext_create *req;
818 struct be_dma_mem *q_mem = &mccq->dma_mem;
819 void *ctxt;
820 int status;
821
822 if (mutex_lock_interruptible(&adapter->mbox_lock))
823 return -1;
824
825 wrb = wrb_from_mbox(adapter);
826 req = embedded_payload(wrb);
827 ctxt = &req->context;
828
829 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
830 OPCODE_COMMON_MCC_CREATE_EXT);
831
832 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
833 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
834
835 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
836 if (lancer_chip(adapter)) {
837 req->hdr.version = 1;
838 req->cq_id = cpu_to_le16(cq->id);
839
840 AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
841 be_encoded_q_len(mccq->len));
842 AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
843 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
844 ctxt, cq->id);
845 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
846 ctxt, 1);
847
848 } else {
849 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
850 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
851 be_encoded_q_len(mccq->len));
852 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
853 }
854
855 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
856 req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
857 be_dws_cpu_to_le(ctxt, sizeof(req->context));
858
859 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
860
861 status = be_mbox_notify_wait(adapter);
862 if (!status) {
863 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
864 mccq->id = le16_to_cpu(resp->id);
865 mccq->created = true;
866 }
867 mutex_unlock(&adapter->mbox_lock);
868
869 return status;
870}
871
872int be_cmd_mccq_org_create(struct be_adapter *adapter,
873 struct be_queue_info *mccq,
874 struct be_queue_info *cq)
875{
876 struct be_mcc_wrb *wrb;
877 struct be_cmd_req_mcc_create *req;
878 struct be_dma_mem *q_mem = &mccq->dma_mem;
879 void *ctxt;
880 int status;
881
882 if (mutex_lock_interruptible(&adapter->mbox_lock))
883 return -1;
884
885 wrb = wrb_from_mbox(adapter);
886 req = embedded_payload(wrb);
887 ctxt = &req->context;
888
889 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
890 OPCODE_COMMON_MCC_CREATE);
891
892 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
893 OPCODE_COMMON_MCC_CREATE, sizeof(*req));
894
895 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
896
897 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
898 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
899 be_encoded_q_len(mccq->len));
900 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
901
902 be_dws_cpu_to_le(ctxt, sizeof(req->context));
903
904 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
905
906 status = be_mbox_notify_wait(adapter);
907 if (!status) {
908 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
909 mccq->id = le16_to_cpu(resp->id);
910 mccq->created = true;
911 }
912
913 mutex_unlock(&adapter->mbox_lock);
914 return status;
915}
916
917int be_cmd_mccq_create(struct be_adapter *adapter,
918 struct be_queue_info *mccq,
919 struct be_queue_info *cq)
920{
921 int status;
922
923 status = be_cmd_mccq_ext_create(adapter, mccq, cq);
924 if (status && !lancer_chip(adapter)) {
925 dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
926 "or newer to avoid conflicting priorities between NIC "
927 "and FCoE traffic");
928 status = be_cmd_mccq_org_create(adapter, mccq, cq);
929 }
930 return status;
931}
932
933int be_cmd_txq_create(struct be_adapter *adapter,
934 struct be_queue_info *txq,
935 struct be_queue_info *cq)
936{
937 struct be_mcc_wrb *wrb;
938 struct be_cmd_req_eth_tx_create *req;
939 struct be_dma_mem *q_mem = &txq->dma_mem;
940 void *ctxt;
941 int status;
942
943 if (mutex_lock_interruptible(&adapter->mbox_lock))
944 return -1;
945
946 wrb = wrb_from_mbox(adapter);
947 req = embedded_payload(wrb);
948 ctxt = &req->context;
949
950 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
951 OPCODE_ETH_TX_CREATE);
952
953 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
954 sizeof(*req));
955
956 if (lancer_chip(adapter)) {
957 req->hdr.version = 1;
958 AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt,
959 adapter->if_handle);
960 }
961
962 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
963 req->ulp_num = BE_ULP1_NUM;
964 req->type = BE_ETH_TX_RING_TYPE_STANDARD;
965
966 AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
967 be_encoded_q_len(txq->len));
968 AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
969 AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
970
971 be_dws_cpu_to_le(ctxt, sizeof(req->context));
972
973 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
974
975 status = be_mbox_notify_wait(adapter);
976 if (!status) {
977 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
978 txq->id = le16_to_cpu(resp->cid);
979 txq->created = true;
980 }
981
982 mutex_unlock(&adapter->mbox_lock);
983
984 return status;
985}
986
987/* Uses MCC */
988int be_cmd_rxq_create(struct be_adapter *adapter,
989 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
990 u16 max_frame_size, u32 if_id, u32 rss, u8 *rss_id)
991{
992 struct be_mcc_wrb *wrb;
993 struct be_cmd_req_eth_rx_create *req;
994 struct be_dma_mem *q_mem = &rxq->dma_mem;
995 int status;
996
997 spin_lock_bh(&adapter->mcc_lock);
998
999 wrb = wrb_from_mccq(adapter);
1000 if (!wrb) {
1001 status = -EBUSY;
1002 goto err;
1003 }
1004 req = embedded_payload(wrb);
1005
1006 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1007 OPCODE_ETH_RX_CREATE);
1008
1009 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE,
1010 sizeof(*req));
1011
1012 req->cq_id = cpu_to_le16(cq_id);
1013 req->frag_size = fls(frag_size) - 1;
1014 req->num_pages = 2;
1015 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1016 req->interface_id = cpu_to_le32(if_id);
1017 req->max_frame_size = cpu_to_le16(max_frame_size);
1018 req->rss_queue = cpu_to_le32(rss);
1019
1020 status = be_mcc_notify_wait(adapter);
1021 if (!status) {
1022 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
1023 rxq->id = le16_to_cpu(resp->id);
1024 rxq->created = true;
1025 *rss_id = resp->rss_id;
1026 }
1027
1028err:
1029 spin_unlock_bh(&adapter->mcc_lock);
1030 return status;
1031}
1032
1033/* Generic destroyer function for all types of queues
1034 * Uses Mbox
1035 */
1036int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1037 int queue_type)
1038{
1039 struct be_mcc_wrb *wrb;
1040 struct be_cmd_req_q_destroy *req;
1041 u8 subsys = 0, opcode = 0;
1042 int status;
1043
1044 if (adapter->eeh_err)
1045 return -EIO;
1046
1047 if (mutex_lock_interruptible(&adapter->mbox_lock))
1048 return -1;
1049
1050 wrb = wrb_from_mbox(adapter);
1051 req = embedded_payload(wrb);
1052
1053 switch (queue_type) {
1054 case QTYPE_EQ:
1055 subsys = CMD_SUBSYSTEM_COMMON;
1056 opcode = OPCODE_COMMON_EQ_DESTROY;
1057 break;
1058 case QTYPE_CQ:
1059 subsys = CMD_SUBSYSTEM_COMMON;
1060 opcode = OPCODE_COMMON_CQ_DESTROY;
1061 break;
1062 case QTYPE_TXQ:
1063 subsys = CMD_SUBSYSTEM_ETH;
1064 opcode = OPCODE_ETH_TX_DESTROY;
1065 break;
1066 case QTYPE_RXQ:
1067 subsys = CMD_SUBSYSTEM_ETH;
1068 opcode = OPCODE_ETH_RX_DESTROY;
1069 break;
1070 case QTYPE_MCCQ:
1071 subsys = CMD_SUBSYSTEM_COMMON;
1072 opcode = OPCODE_COMMON_MCC_DESTROY;
1073 break;
1074 default:
1075 BUG();
1076 }
1077
1078 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, opcode);
1079
1080 be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
1081 req->id = cpu_to_le16(q->id);
1082
1083 status = be_mbox_notify_wait(adapter);
1084 if (!status)
1085 q->created = false;
1086
1087 mutex_unlock(&adapter->mbox_lock);
1088 return status;
1089}
1090
1091/* Uses MCC */
1092int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1093{
1094 struct be_mcc_wrb *wrb;
1095 struct be_cmd_req_q_destroy *req;
1096 int status;
1097
1098 spin_lock_bh(&adapter->mcc_lock);
1099
1100 wrb = wrb_from_mccq(adapter);
1101 if (!wrb) {
1102 status = -EBUSY;
1103 goto err;
1104 }
1105 req = embedded_payload(wrb);
1106
1107 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_ETH_RX_DESTROY);
1108 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_DESTROY,
1109 sizeof(*req));
1110 req->id = cpu_to_le16(q->id);
1111
1112 status = be_mcc_notify_wait(adapter);
1113 if (!status)
1114 q->created = false;
1115
1116err:
1117 spin_unlock_bh(&adapter->mcc_lock);
1118 return status;
1119}
1120
1121/* Create an rx filtering policy configuration on an i/f
1122 * Uses mbox
1123 */
1124int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1125 u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id,
1126 u32 domain)
1127{
1128 struct be_mcc_wrb *wrb;
1129 struct be_cmd_req_if_create *req;
1130 int status;
1131
1132 if (mutex_lock_interruptible(&adapter->mbox_lock))
1133 return -1;
1134
1135 wrb = wrb_from_mbox(adapter);
1136 req = embedded_payload(wrb);
1137
1138 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1139 OPCODE_COMMON_NTWK_INTERFACE_CREATE);
1140
1141 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1142 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
1143
1144 req->hdr.domain = domain;
1145 req->capability_flags = cpu_to_le32(cap_flags);
1146 req->enable_flags = cpu_to_le32(en_flags);
1147 req->pmac_invalid = pmac_invalid;
1148 if (!pmac_invalid)
1149 memcpy(req->mac_addr, mac, ETH_ALEN);
1150
1151 status = be_mbox_notify_wait(adapter);
1152 if (!status) {
1153 struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
1154 *if_handle = le32_to_cpu(resp->interface_id);
1155 if (!pmac_invalid)
1156 *pmac_id = le32_to_cpu(resp->pmac_id);
1157 }
1158
1159 mutex_unlock(&adapter->mbox_lock);
1160 return status;
1161}
1162
1163/* Uses mbox */
1164int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain)
1165{
1166 struct be_mcc_wrb *wrb;
1167 struct be_cmd_req_if_destroy *req;
1168 int status;
1169
1170 if (adapter->eeh_err)
1171 return -EIO;
1172
1173 if (mutex_lock_interruptible(&adapter->mbox_lock))
1174 return -1;
1175
1176 wrb = wrb_from_mbox(adapter);
1177 req = embedded_payload(wrb);
1178
1179 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1180 OPCODE_COMMON_NTWK_INTERFACE_DESTROY);
1181
1182 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1183 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
1184
1185 req->hdr.domain = domain;
1186 req->interface_id = cpu_to_le32(interface_id);
1187
1188 status = be_mbox_notify_wait(adapter);
1189
1190 mutex_unlock(&adapter->mbox_lock);
1191
1192 return status;
1193}
1194
1195/* Get stats is a non embedded command: the request is not embedded inside
1196 * WRB but is a separate dma memory block
1197 * Uses asynchronous MCC
1198 */
1199int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1200{
1201 struct be_mcc_wrb *wrb;
1202 struct be_cmd_req_hdr *hdr;
1203 struct be_sge *sge;
1204 int status = 0;
1205
1206 if (MODULO(adapter->work_counter, be_get_temp_freq) == 0)
1207 be_cmd_get_die_temperature(adapter);
1208
1209 spin_lock_bh(&adapter->mcc_lock);
1210
1211 wrb = wrb_from_mccq(adapter);
1212 if (!wrb) {
1213 status = -EBUSY;
1214 goto err;
1215 }
1216 hdr = nonemb_cmd->va;
1217 sge = nonembedded_sgl(wrb);
1218
1219 be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1,
1220 OPCODE_ETH_GET_STATISTICS);
1221
1222 be_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1223 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size);
1224
1225 if (adapter->generation == BE_GEN3)
1226 hdr->version = 1;
1227
1228 wrb->tag1 = CMD_SUBSYSTEM_ETH;
1229 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
1230 sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
1231 sge->len = cpu_to_le32(nonemb_cmd->size);
1232
1233 be_mcc_notify(adapter);
1234 adapter->stats_cmd_sent = true;
1235
1236err:
1237 spin_unlock_bh(&adapter->mcc_lock);
1238 return status;
1239}
1240
1241/* Lancer Stats */
1242int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1243 struct be_dma_mem *nonemb_cmd)
1244{
1245
1246 struct be_mcc_wrb *wrb;
1247 struct lancer_cmd_req_pport_stats *req;
1248 struct be_sge *sge;
1249 int status = 0;
1250
1251 spin_lock_bh(&adapter->mcc_lock);
1252
1253 wrb = wrb_from_mccq(adapter);
1254 if (!wrb) {
1255 status = -EBUSY;
1256 goto err;
1257 }
1258 req = nonemb_cmd->va;
1259 sge = nonembedded_sgl(wrb);
1260
1261 be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1,
1262 OPCODE_ETH_GET_PPORT_STATS);
1263
1264 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1265 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size);
1266
1267
1268 req->cmd_params.params.pport_num = cpu_to_le16(adapter->port_num);
1269 req->cmd_params.params.reset_stats = 0;
1270
1271 wrb->tag1 = CMD_SUBSYSTEM_ETH;
1272 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
1273 sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
1274 sge->len = cpu_to_le32(nonemb_cmd->size);
1275
1276 be_mcc_notify(adapter);
1277 adapter->stats_cmd_sent = true;
1278
1279err:
1280 spin_unlock_bh(&adapter->mcc_lock);
1281 return status;
1282}
1283
1284/* Uses synchronous mcc */
1285int be_cmd_link_status_query(struct be_adapter *adapter,
1286 bool *link_up, u8 *mac_speed, u16 *link_speed, u32 dom)
1287{
1288 struct be_mcc_wrb *wrb;
1289 struct be_cmd_req_link_status *req;
1290 int status;
1291
1292 spin_lock_bh(&adapter->mcc_lock);
1293
1294 wrb = wrb_from_mccq(adapter);
1295 if (!wrb) {
1296 status = -EBUSY;
1297 goto err;
1298 }
1299 req = embedded_payload(wrb);
1300
1301 *link_up = false;
1302
1303 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1304 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY);
1305
1306 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1307 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req));
1308
1309 status = be_mcc_notify_wait(adapter);
1310 if (!status) {
1311 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1312 if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
1313 *link_up = true;
1314 *link_speed = le16_to_cpu(resp->link_speed);
1315 *mac_speed = resp->mac_speed;
1316 }
1317 }
1318
1319err:
1320 spin_unlock_bh(&adapter->mcc_lock);
1321 return status;
1322}
1323
1324/* Uses synchronous mcc */
1325int be_cmd_get_die_temperature(struct be_adapter *adapter)
1326{
1327 struct be_mcc_wrb *wrb;
1328 struct be_cmd_req_get_cntl_addnl_attribs *req;
1329 int status;
1330
1331 spin_lock_bh(&adapter->mcc_lock);
1332
1333 wrb = wrb_from_mccq(adapter);
1334 if (!wrb) {
1335 status = -EBUSY;
1336 goto err;
1337 }
1338 req = embedded_payload(wrb);
1339
1340 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1341 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES);
1342
1343 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1344 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req));
1345
1346 status = be_mcc_notify_wait(adapter);
1347 if (!status) {
1348 struct be_cmd_resp_get_cntl_addnl_attribs *resp =
1349 embedded_payload(wrb);
1350 adapter->drv_stats.be_on_die_temperature =
1351 resp->on_die_temperature;
1352 }
1353 /* If IOCTL fails once, do not bother issuing it again */
1354 else
1355 be_get_temp_freq = 0;
1356
1357err:
1358 spin_unlock_bh(&adapter->mcc_lock);
1359 return status;
1360}
1361
1362/* Uses synchronous mcc */
1363int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1364{
1365 struct be_mcc_wrb *wrb;
1366 struct be_cmd_req_get_fat *req;
1367 int status;
1368
1369 spin_lock_bh(&adapter->mcc_lock);
1370
1371 wrb = wrb_from_mccq(adapter);
1372 if (!wrb) {
1373 status = -EBUSY;
1374 goto err;
1375 }
1376 req = embedded_payload(wrb);
1377
1378 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1379 OPCODE_COMMON_MANAGE_FAT);
1380
1381 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1382 OPCODE_COMMON_MANAGE_FAT, sizeof(*req));
1383 req->fat_operation = cpu_to_le32(QUERY_FAT);
1384 status = be_mcc_notify_wait(adapter);
1385 if (!status) {
1386 struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
1387 if (log_size && resp->log_size)
1388 *log_size = le32_to_cpu(resp->log_size) -
1389 sizeof(u32);
1390 }
1391err:
1392 spin_unlock_bh(&adapter->mcc_lock);
1393 return status;
1394}
1395
1396void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1397{
1398 struct be_dma_mem get_fat_cmd;
1399 struct be_mcc_wrb *wrb;
1400 struct be_cmd_req_get_fat *req;
1401 struct be_sge *sge;
1402 u32 offset = 0, total_size, buf_size,
1403 log_offset = sizeof(u32), payload_len;
1404 int status;
1405
1406 if (buf_len == 0)
1407 return;
1408
1409 total_size = buf_len;
1410
1411 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1412 get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
1413 get_fat_cmd.size,
1414 &get_fat_cmd.dma);
1415 if (!get_fat_cmd.va) {
1416 status = -ENOMEM;
1417 dev_err(&adapter->pdev->dev,
1418 "Memory allocation failure while retrieving FAT data\n");
1419 return;
1420 }
1421
1422 spin_lock_bh(&adapter->mcc_lock);
1423
1424 while (total_size) {
1425 buf_size = min(total_size, (u32)60*1024);
1426 total_size -= buf_size;
1427
1428 wrb = wrb_from_mccq(adapter);
1429 if (!wrb) {
1430 status = -EBUSY;
1431 goto err;
1432 }
1433 req = get_fat_cmd.va;
1434 sge = nonembedded_sgl(wrb);
1435
1436 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
1437 be_wrb_hdr_prepare(wrb, payload_len, false, 1,
1438 OPCODE_COMMON_MANAGE_FAT);
1439
1440 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1441 OPCODE_COMMON_MANAGE_FAT, payload_len);
1442
1443 sge->pa_hi = cpu_to_le32(upper_32_bits(get_fat_cmd.dma));
1444 sge->pa_lo = cpu_to_le32(get_fat_cmd.dma & 0xFFFFFFFF);
1445 sge->len = cpu_to_le32(get_fat_cmd.size);
1446
1447 req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1448 req->read_log_offset = cpu_to_le32(log_offset);
1449 req->read_log_length = cpu_to_le32(buf_size);
1450 req->data_buffer_size = cpu_to_le32(buf_size);
1451
1452 status = be_mcc_notify_wait(adapter);
1453 if (!status) {
1454 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1455 memcpy(buf + offset,
1456 resp->data_buffer,
1457 resp->read_log_length);
1458 } else {
1459 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
1460 goto err;
1461 }
1462 offset += buf_size;
1463 log_offset += buf_size;
1464 }
1465err:
1466 pci_free_consistent(adapter->pdev, get_fat_cmd.size,
1467 get_fat_cmd.va,
1468 get_fat_cmd.dma);
1469 spin_unlock_bh(&adapter->mcc_lock);
1470}
1471
1472/* Uses Mbox */
1473int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
1474{
1475 struct be_mcc_wrb *wrb;
1476 struct be_cmd_req_get_fw_version *req;
1477 int status;
1478
1479 if (mutex_lock_interruptible(&adapter->mbox_lock))
1480 return -1;
1481
1482 wrb = wrb_from_mbox(adapter);
1483 req = embedded_payload(wrb);
1484
1485 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1486 OPCODE_COMMON_GET_FW_VERSION);
1487
1488 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1489 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));
1490
1491 status = be_mbox_notify_wait(adapter);
1492 if (!status) {
1493 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1494 strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
1495 }
1496
1497 mutex_unlock(&adapter->mbox_lock);
1498 return status;
1499}
1500
1501/* set the EQ delay interval of an EQ to specified value
1502 * Uses async mcc
1503 */
1504int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
1505{
1506 struct be_mcc_wrb *wrb;
1507 struct be_cmd_req_modify_eq_delay *req;
1508 int status = 0;
1509
1510 spin_lock_bh(&adapter->mcc_lock);
1511
1512 wrb = wrb_from_mccq(adapter);
1513 if (!wrb) {
1514 status = -EBUSY;
1515 goto err;
1516 }
1517 req = embedded_payload(wrb);
1518
1519 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1520 OPCODE_COMMON_MODIFY_EQ_DELAY);
1521
1522 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1523 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
1524
1525 req->num_eq = cpu_to_le32(1);
1526 req->delay[0].eq_id = cpu_to_le32(eq_id);
1527 req->delay[0].phase = 0;
1528 req->delay[0].delay_multiplier = cpu_to_le32(eqd);
1529
1530 be_mcc_notify(adapter);
1531
1532err:
1533 spin_unlock_bh(&adapter->mcc_lock);
1534 return status;
1535}
1536
1537/* Uses sycnhronous mcc */
1538int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1539 u32 num, bool untagged, bool promiscuous)
1540{
1541 struct be_mcc_wrb *wrb;
1542 struct be_cmd_req_vlan_config *req;
1543 int status;
1544
1545 spin_lock_bh(&adapter->mcc_lock);
1546
1547 wrb = wrb_from_mccq(adapter);
1548 if (!wrb) {
1549 status = -EBUSY;
1550 goto err;
1551 }
1552 req = embedded_payload(wrb);
1553
1554 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1555 OPCODE_COMMON_NTWK_VLAN_CONFIG);
1556
1557 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1558 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req));
1559
1560 req->interface_id = if_id;
1561 req->promiscuous = promiscuous;
1562 req->untagged = untagged;
1563 req->num_vlan = num;
1564 if (!promiscuous) {
1565 memcpy(req->normal_vlan, vtag_array,
1566 req->num_vlan * sizeof(vtag_array[0]));
1567 }
1568
1569 status = be_mcc_notify_wait(adapter);
1570
1571err:
1572 spin_unlock_bh(&adapter->mcc_lock);
1573 return status;
1574}
1575
1576/* Uses MCC for this command as it may be called in BH context
1577 * Uses synchronous mcc
1578 */
1579int be_cmd_promiscuous_config(struct be_adapter *adapter, bool en)
1580{
1581 struct be_mcc_wrb *wrb;
1582 struct be_cmd_req_rx_filter *req;
1583 struct be_dma_mem promiscous_cmd;
1584 struct be_sge *sge;
1585 int status;
1586
1587 memset(&promiscous_cmd, 0, sizeof(struct be_dma_mem));
1588 promiscous_cmd.size = sizeof(struct be_cmd_req_rx_filter);
1589 promiscous_cmd.va = pci_alloc_consistent(adapter->pdev,
1590 promiscous_cmd.size, &promiscous_cmd.dma);
1591 if (!promiscous_cmd.va) {
1592 dev_err(&adapter->pdev->dev,
1593 "Memory allocation failure\n");
1594 return -ENOMEM;
1595 }
1596
1597 spin_lock_bh(&adapter->mcc_lock);
1598
1599 wrb = wrb_from_mccq(adapter);
1600 if (!wrb) {
1601 status = -EBUSY;
1602 goto err;
1603 }
1604
1605 req = promiscous_cmd.va;
1606 sge = nonembedded_sgl(wrb);
1607
1608 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1609 OPCODE_COMMON_NTWK_RX_FILTER);
1610 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1611 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req));
1612
1613 req->if_id = cpu_to_le32(adapter->if_handle);
1614 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS);
1615 if (en)
1616 req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS);
1617
1618 sge->pa_hi = cpu_to_le32(upper_32_bits(promiscous_cmd.dma));
1619 sge->pa_lo = cpu_to_le32(promiscous_cmd.dma & 0xFFFFFFFF);
1620 sge->len = cpu_to_le32(promiscous_cmd.size);
1621
1622 status = be_mcc_notify_wait(adapter);
1623
1624err:
1625 spin_unlock_bh(&adapter->mcc_lock);
1626 pci_free_consistent(adapter->pdev, promiscous_cmd.size,
1627 promiscous_cmd.va, promiscous_cmd.dma);
1628 return status;
1629}
1630
1631/*
1632 * Uses MCC for this command as it may be called in BH context
1633 * (mc == NULL) => multicast promiscuous
1634 */
1635int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
1636 struct net_device *netdev, struct be_dma_mem *mem)
1637{
1638 struct be_mcc_wrb *wrb;
1639 struct be_cmd_req_mcast_mac_config *req = mem->va;
1640 struct be_sge *sge;
1641 int status;
1642
1643 spin_lock_bh(&adapter->mcc_lock);
1644
1645 wrb = wrb_from_mccq(adapter);
1646 if (!wrb) {
1647 status = -EBUSY;
1648 goto err;
1649 }
1650 sge = nonembedded_sgl(wrb);
1651 memset(req, 0, sizeof(*req));
1652
1653 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1654 OPCODE_COMMON_NTWK_MULTICAST_SET);
1655 sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
1656 sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
1657 sge->len = cpu_to_le32(mem->size);
1658
1659 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1660 OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));
1661
1662 req->interface_id = if_id;
1663 if (netdev) {
1664 int i;
1665 struct netdev_hw_addr *ha;
1666
1667 req->num_mac = cpu_to_le16(netdev_mc_count(netdev));
1668
1669 i = 0;
1670 netdev_for_each_mc_addr(ha, netdev)
1671 memcpy(req->mac[i++].byte, ha->addr, ETH_ALEN);
1672 } else {
1673 req->promiscuous = 1;
1674 }
1675
1676 status = be_mcc_notify_wait(adapter);
1677
1678err:
1679 spin_unlock_bh(&adapter->mcc_lock);
1680 return status;
1681}
1682
1683/* Uses synchrounous mcc */
1684int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1685{
1686 struct be_mcc_wrb *wrb;
1687 struct be_cmd_req_set_flow_control *req;
1688 int status;
1689
1690 spin_lock_bh(&adapter->mcc_lock);
1691
1692 wrb = wrb_from_mccq(adapter);
1693 if (!wrb) {
1694 status = -EBUSY;
1695 goto err;
1696 }
1697 req = embedded_payload(wrb);
1698
1699 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1700 OPCODE_COMMON_SET_FLOW_CONTROL);
1701
1702 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1703 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req));
1704
1705 req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1706 req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1707
1708 status = be_mcc_notify_wait(adapter);
1709
1710err:
1711 spin_unlock_bh(&adapter->mcc_lock);
1712 return status;
1713}
1714
1715/* Uses sycn mcc */
1716int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1717{
1718 struct be_mcc_wrb *wrb;
1719 struct be_cmd_req_get_flow_control *req;
1720 int status;
1721
1722 spin_lock_bh(&adapter->mcc_lock);
1723
1724 wrb = wrb_from_mccq(adapter);
1725 if (!wrb) {
1726 status = -EBUSY;
1727 goto err;
1728 }
1729 req = embedded_payload(wrb);
1730
1731 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1732 OPCODE_COMMON_GET_FLOW_CONTROL);
1733
1734 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1735 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req));
1736
1737 status = be_mcc_notify_wait(adapter);
1738 if (!status) {
1739 struct be_cmd_resp_get_flow_control *resp =
1740 embedded_payload(wrb);
1741 *tx_fc = le16_to_cpu(resp->tx_flow_control);
1742 *rx_fc = le16_to_cpu(resp->rx_flow_control);
1743 }
1744
1745err:
1746 spin_unlock_bh(&adapter->mcc_lock);
1747 return status;
1748}
1749
1750/* Uses mbox */
1751int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
1752 u32 *mode, u32 *caps)
1753{
1754 struct be_mcc_wrb *wrb;
1755 struct be_cmd_req_query_fw_cfg *req;
1756 int status;
1757
1758 if (mutex_lock_interruptible(&adapter->mbox_lock))
1759 return -1;
1760
1761 wrb = wrb_from_mbox(adapter);
1762 req = embedded_payload(wrb);
1763
1764 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1765 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG);
1766
1767 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1768 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
1769
1770 status = be_mbox_notify_wait(adapter);
1771 if (!status) {
1772 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1773 *port_num = le32_to_cpu(resp->phys_port);
1774 *mode = le32_to_cpu(resp->function_mode);
1775 *caps = le32_to_cpu(resp->function_caps);
1776 }
1777
1778 mutex_unlock(&adapter->mbox_lock);
1779 return status;
1780}
1781
1782/* Uses mbox */
1783int be_cmd_reset_function(struct be_adapter *adapter)
1784{
1785 struct be_mcc_wrb *wrb;
1786 struct be_cmd_req_hdr *req;
1787 int status;
1788
1789 if (mutex_lock_interruptible(&adapter->mbox_lock))
1790 return -1;
1791
1792 wrb = wrb_from_mbox(adapter);
1793 req = embedded_payload(wrb);
1794
1795 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1796 OPCODE_COMMON_FUNCTION_RESET);
1797
1798 be_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
1799 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
1800
1801 status = be_mbox_notify_wait(adapter);
1802
1803 mutex_unlock(&adapter->mbox_lock);
1804 return status;
1805}
1806
1807int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
1808{
1809 struct be_mcc_wrb *wrb;
1810 struct be_cmd_req_rss_config *req;
1811 u32 myhash[10] = {0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF,
1812 0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF};
1813 int status;
1814
1815 if (mutex_lock_interruptible(&adapter->mbox_lock))
1816 return -1;
1817
1818 wrb = wrb_from_mbox(adapter);
1819 req = embedded_payload(wrb);
1820
1821 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1822 OPCODE_ETH_RSS_CONFIG);
1823
1824 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1825 OPCODE_ETH_RSS_CONFIG, sizeof(*req));
1826
1827 req->if_id = cpu_to_le32(adapter->if_handle);
1828 req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4);
1829 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
1830 memcpy(req->cpu_table, rsstable, table_size);
1831 memcpy(req->hash, myhash, sizeof(myhash));
1832 be_dws_cpu_to_le(req->hash, sizeof(req->hash));
1833
1834 status = be_mbox_notify_wait(adapter);
1835
1836 mutex_unlock(&adapter->mbox_lock);
1837 return status;
1838}
1839
1840/* Uses sync mcc */
1841int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
1842 u8 bcn, u8 sts, u8 state)
1843{
1844 struct be_mcc_wrb *wrb;
1845 struct be_cmd_req_enable_disable_beacon *req;
1846 int status;
1847
1848 spin_lock_bh(&adapter->mcc_lock);
1849
1850 wrb = wrb_from_mccq(adapter);
1851 if (!wrb) {
1852 status = -EBUSY;
1853 goto err;
1854 }
1855 req = embedded_payload(wrb);
1856
1857 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1858 OPCODE_COMMON_ENABLE_DISABLE_BEACON);
1859
1860 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1861 OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req));
1862
1863 req->port_num = port_num;
1864 req->beacon_state = state;
1865 req->beacon_duration = bcn;
1866 req->status_duration = sts;
1867
1868 status = be_mcc_notify_wait(adapter);
1869
1870err:
1871 spin_unlock_bh(&adapter->mcc_lock);
1872 return status;
1873}
1874
1875/* Uses sync mcc */
1876int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
1877{
1878 struct be_mcc_wrb *wrb;
1879 struct be_cmd_req_get_beacon_state *req;
1880 int status;
1881
1882 spin_lock_bh(&adapter->mcc_lock);
1883
1884 wrb = wrb_from_mccq(adapter);
1885 if (!wrb) {
1886 status = -EBUSY;
1887 goto err;
1888 }
1889 req = embedded_payload(wrb);
1890
1891 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1892 OPCODE_COMMON_GET_BEACON_STATE);
1893
1894 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1895 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req));
1896
1897 req->port_num = port_num;
1898
1899 status = be_mcc_notify_wait(adapter);
1900 if (!status) {
1901 struct be_cmd_resp_get_beacon_state *resp =
1902 embedded_payload(wrb);
1903 *state = resp->beacon_state;
1904 }
1905
1906err:
1907 spin_unlock_bh(&adapter->mcc_lock);
1908 return status;
1909}
1910
1911int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
1912 u32 data_size, u32 data_offset, const char *obj_name,
1913 u32 *data_written, u8 *addn_status)
1914{
1915 struct be_mcc_wrb *wrb;
1916 struct lancer_cmd_req_write_object *req;
1917 struct lancer_cmd_resp_write_object *resp;
1918 void *ctxt = NULL;
1919 int status;
1920
1921 spin_lock_bh(&adapter->mcc_lock);
1922 adapter->flash_status = 0;
1923
1924 wrb = wrb_from_mccq(adapter);
1925 if (!wrb) {
1926 status = -EBUSY;
1927 goto err_unlock;
1928 }
1929
1930 req = embedded_payload(wrb);
1931
1932 be_wrb_hdr_prepare(wrb, sizeof(struct lancer_cmd_req_write_object),
1933 true, 1, OPCODE_COMMON_WRITE_OBJECT);
1934 wrb->tag1 = CMD_SUBSYSTEM_COMMON;
1935
1936 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1937 OPCODE_COMMON_WRITE_OBJECT,
1938 sizeof(struct lancer_cmd_req_write_object));
1939
1940 ctxt = &req->context;
1941 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
1942 write_length, ctxt, data_size);
1943
1944 if (data_size == 0)
1945 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
1946 eof, ctxt, 1);
1947 else
1948 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
1949 eof, ctxt, 0);
1950
1951 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1952 req->write_offset = cpu_to_le32(data_offset);
1953 strcpy(req->object_name, obj_name);
1954 req->descriptor_count = cpu_to_le32(1);
1955 req->buf_len = cpu_to_le32(data_size);
1956 req->addr_low = cpu_to_le32((cmd->dma +
1957 sizeof(struct lancer_cmd_req_write_object))
1958 & 0xFFFFFFFF);
1959 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
1960 sizeof(struct lancer_cmd_req_write_object)));
1961
1962 be_mcc_notify(adapter);
1963 spin_unlock_bh(&adapter->mcc_lock);
1964
1965 if (!wait_for_completion_timeout(&adapter->flash_compl,
1966 msecs_to_jiffies(12000)))
1967 status = -1;
1968 else
1969 status = adapter->flash_status;
1970
1971 resp = embedded_payload(wrb);
1972 if (!status) {
1973 *data_written = le32_to_cpu(resp->actual_write_len);
1974 } else {
1975 *addn_status = resp->additional_status;
1976 status = resp->status;
1977 }
1978
1979 return status;
1980
1981err_unlock:
1982 spin_unlock_bh(&adapter->mcc_lock);
1983 return status;
1984}
1985
1986int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
1987 u32 flash_type, u32 flash_opcode, u32 buf_size)
1988{
1989 struct be_mcc_wrb *wrb;
1990 struct be_cmd_write_flashrom *req;
1991 struct be_sge *sge;
1992 int status;
1993
1994 spin_lock_bh(&adapter->mcc_lock);
1995 adapter->flash_status = 0;
1996
1997 wrb = wrb_from_mccq(adapter);
1998 if (!wrb) {
1999 status = -EBUSY;
2000 goto err_unlock;
2001 }
2002 req = cmd->va;
2003 sge = nonembedded_sgl(wrb);
2004
2005 be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
2006 OPCODE_COMMON_WRITE_FLASHROM);
2007 wrb->tag1 = CMD_SUBSYSTEM_COMMON;
2008
2009 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2010 OPCODE_COMMON_WRITE_FLASHROM, cmd->size);
2011 sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
2012 sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
2013 sge->len = cpu_to_le32(cmd->size);
2014
2015 req->params.op_type = cpu_to_le32(flash_type);
2016 req->params.op_code = cpu_to_le32(flash_opcode);
2017 req->params.data_buf_size = cpu_to_le32(buf_size);
2018
2019 be_mcc_notify(adapter);
2020 spin_unlock_bh(&adapter->mcc_lock);
2021
2022 if (!wait_for_completion_timeout(&adapter->flash_compl,
2023 msecs_to_jiffies(12000)))
2024 status = -1;
2025 else
2026 status = adapter->flash_status;
2027
2028 return status;
2029
2030err_unlock:
2031 spin_unlock_bh(&adapter->mcc_lock);
2032 return status;
2033}
2034
2035int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2036 int offset)
2037{
2038 struct be_mcc_wrb *wrb;
2039 struct be_cmd_write_flashrom *req;
2040 int status;
2041
2042 spin_lock_bh(&adapter->mcc_lock);
2043
2044 wrb = wrb_from_mccq(adapter);
2045 if (!wrb) {
2046 status = -EBUSY;
2047 goto err;
2048 }
2049 req = embedded_payload(wrb);
2050
2051 be_wrb_hdr_prepare(wrb, sizeof(*req)+4, true, 0,
2052 OPCODE_COMMON_READ_FLASHROM);
2053
2054 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2055 OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4);
2056
2057 req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT);
2058 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
2059 req->params.offset = cpu_to_le32(offset);
2060 req->params.data_buf_size = cpu_to_le32(0x4);
2061
2062 status = be_mcc_notify_wait(adapter);
2063 if (!status)
2064 memcpy(flashed_crc, req->params.data_buf, 4);
2065
2066err:
2067 spin_unlock_bh(&adapter->mcc_lock);
2068 return status;
2069}
2070
2071int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
2072 struct be_dma_mem *nonemb_cmd)
2073{
2074 struct be_mcc_wrb *wrb;
2075 struct be_cmd_req_acpi_wol_magic_config *req;
2076 struct be_sge *sge;
2077 int status;
2078
2079 spin_lock_bh(&adapter->mcc_lock);
2080
2081 wrb = wrb_from_mccq(adapter);
2082 if (!wrb) {
2083 status = -EBUSY;
2084 goto err;
2085 }
2086 req = nonemb_cmd->va;
2087 sge = nonembedded_sgl(wrb);
2088
2089 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
2090 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG);
2091
2092 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2093 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req));
2094 memcpy(req->magic_mac, mac, ETH_ALEN);
2095
2096 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
2097 sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
2098 sge->len = cpu_to_le32(nonemb_cmd->size);
2099
2100 status = be_mcc_notify_wait(adapter);
2101
2102err:
2103 spin_unlock_bh(&adapter->mcc_lock);
2104 return status;
2105}
2106
2107int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
2108 u8 loopback_type, u8 enable)
2109{
2110 struct be_mcc_wrb *wrb;
2111 struct be_cmd_req_set_lmode *req;
2112 int status;
2113
2114 spin_lock_bh(&adapter->mcc_lock);
2115
2116 wrb = wrb_from_mccq(adapter);
2117 if (!wrb) {
2118 status = -EBUSY;
2119 goto err;
2120 }
2121
2122 req = embedded_payload(wrb);
2123
2124 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
2125 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE);
2126
2127 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2128 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
2129 sizeof(*req));
2130
2131 req->src_port = port_num;
2132 req->dest_port = port_num;
2133 req->loopback_type = loopback_type;
2134 req->loopback_state = enable;
2135
2136 status = be_mcc_notify_wait(adapter);
2137err:
2138 spin_unlock_bh(&adapter->mcc_lock);
2139 return status;
2140}
2141
2142int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2143 u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
2144{
2145 struct be_mcc_wrb *wrb;
2146 struct be_cmd_req_loopback_test *req;
2147 int status;
2148
2149 spin_lock_bh(&adapter->mcc_lock);
2150
2151 wrb = wrb_from_mccq(adapter);
2152 if (!wrb) {
2153 status = -EBUSY;
2154 goto err;
2155 }
2156
2157 req = embedded_payload(wrb);
2158
2159 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
2160 OPCODE_LOWLEVEL_LOOPBACK_TEST);
2161
2162 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2163 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req));
2164 req->hdr.timeout = cpu_to_le32(4);
2165
2166 req->pattern = cpu_to_le64(pattern);
2167 req->src_port = cpu_to_le32(port_num);
2168 req->dest_port = cpu_to_le32(port_num);
2169 req->pkt_size = cpu_to_le32(pkt_size);
2170 req->num_pkts = cpu_to_le32(num_pkts);
2171 req->loopback_type = cpu_to_le32(loopback_type);
2172
2173 status = be_mcc_notify_wait(adapter);
2174 if (!status) {
2175 struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
2176 status = le32_to_cpu(resp->status);
2177 }
2178
2179err:
2180 spin_unlock_bh(&adapter->mcc_lock);
2181 return status;
2182}
2183
2184int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
2185 u32 byte_cnt, struct be_dma_mem *cmd)
2186{
2187 struct be_mcc_wrb *wrb;
2188 struct be_cmd_req_ddrdma_test *req;
2189 struct be_sge *sge;
2190 int status;
2191 int i, j = 0;
2192
2193 spin_lock_bh(&adapter->mcc_lock);
2194
2195 wrb = wrb_from_mccq(adapter);
2196 if (!wrb) {
2197 status = -EBUSY;
2198 goto err;
2199 }
2200 req = cmd->va;
2201 sge = nonembedded_sgl(wrb);
2202 be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
2203 OPCODE_LOWLEVEL_HOST_DDR_DMA);
2204 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2205 OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size);
2206
2207 sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
2208 sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
2209 sge->len = cpu_to_le32(cmd->size);
2210
2211 req->pattern = cpu_to_le64(pattern);
2212 req->byte_count = cpu_to_le32(byte_cnt);
2213 for (i = 0; i < byte_cnt; i++) {
2214 req->snd_buff[i] = (u8)(pattern >> (j*8));
2215 j++;
2216 if (j > 7)
2217 j = 0;
2218 }
2219
2220 status = be_mcc_notify_wait(adapter);
2221
2222 if (!status) {
2223 struct be_cmd_resp_ddrdma_test *resp;
2224 resp = cmd->va;
2225 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
2226 resp->snd_err) {
2227 status = -1;
2228 }
2229 }
2230
2231err:
2232 spin_unlock_bh(&adapter->mcc_lock);
2233 return status;
2234}
2235
2236int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2237 struct be_dma_mem *nonemb_cmd)
2238{
2239 struct be_mcc_wrb *wrb;
2240 struct be_cmd_req_seeprom_read *req;
2241 struct be_sge *sge;
2242 int status;
2243
2244 spin_lock_bh(&adapter->mcc_lock);
2245
2246 wrb = wrb_from_mccq(adapter);
2247 if (!wrb) {
2248 status = -EBUSY;
2249 goto err;
2250 }
2251 req = nonemb_cmd->va;
2252 sge = nonembedded_sgl(wrb);
2253
2254 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
2255 OPCODE_COMMON_SEEPROM_READ);
2256
2257 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2258 OPCODE_COMMON_SEEPROM_READ, sizeof(*req));
2259
2260 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
2261 sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
2262 sge->len = cpu_to_le32(nonemb_cmd->size);
2263
2264 status = be_mcc_notify_wait(adapter);
2265
2266err:
2267 spin_unlock_bh(&adapter->mcc_lock);
2268 return status;
2269}
2270
2271int be_cmd_get_phy_info(struct be_adapter *adapter, struct be_dma_mem *cmd)
2272{
2273 struct be_mcc_wrb *wrb;
2274 struct be_cmd_req_get_phy_info *req;
2275 struct be_sge *sge;
2276 int status;
2277
2278 spin_lock_bh(&adapter->mcc_lock);
2279
2280 wrb = wrb_from_mccq(adapter);
2281 if (!wrb) {
2282 status = -EBUSY;
2283 goto err;
2284 }
2285
2286 req = cmd->va;
2287 sge = nonembedded_sgl(wrb);
2288
2289 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
2290 OPCODE_COMMON_GET_PHY_DETAILS);
2291
2292 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2293 OPCODE_COMMON_GET_PHY_DETAILS,
2294 sizeof(*req));
2295
2296 sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
2297 sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
2298 sge->len = cpu_to_le32(cmd->size);
2299
2300 status = be_mcc_notify_wait(adapter);
2301err:
2302 spin_unlock_bh(&adapter->mcc_lock);
2303 return status;
2304}
2305
2306int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2307{
2308 struct be_mcc_wrb *wrb;
2309 struct be_cmd_req_set_qos *req;
2310 int status;
2311
2312 spin_lock_bh(&adapter->mcc_lock);
2313
2314 wrb = wrb_from_mccq(adapter);
2315 if (!wrb) {
2316 status = -EBUSY;
2317 goto err;
2318 }
2319
2320 req = embedded_payload(wrb);
2321
2322 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
2323 OPCODE_COMMON_SET_QOS);
2324
2325 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2326 OPCODE_COMMON_SET_QOS, sizeof(*req));
2327
2328 req->hdr.domain = domain;
2329 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
2330 req->max_bps_nic = cpu_to_le32(bps);
2331
2332 status = be_mcc_notify_wait(adapter);
2333
2334err:
2335 spin_unlock_bh(&adapter->mcc_lock);
2336 return status;
2337}
2338
2339int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2340{
2341 struct be_mcc_wrb *wrb;
2342 struct be_cmd_req_cntl_attribs *req;
2343 struct be_cmd_resp_cntl_attribs *resp;
2344 struct be_sge *sge;
2345 int status;
2346 int payload_len = max(sizeof(*req), sizeof(*resp));
2347 struct mgmt_controller_attrib *attribs;
2348 struct be_dma_mem attribs_cmd;
2349
2350 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2351 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2352 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
2353 &attribs_cmd.dma);
2354 if (!attribs_cmd.va) {
2355 dev_err(&adapter->pdev->dev,
2356 "Memory allocation failure\n");
2357 return -ENOMEM;
2358 }
2359
2360 if (mutex_lock_interruptible(&adapter->mbox_lock))
2361 return -1;
2362
2363 wrb = wrb_from_mbox(adapter);
2364 if (!wrb) {
2365 status = -EBUSY;
2366 goto err;
2367 }
2368 req = attribs_cmd.va;
2369 sge = nonembedded_sgl(wrb);
2370
2371 be_wrb_hdr_prepare(wrb, payload_len, false, 1,
2372 OPCODE_COMMON_GET_CNTL_ATTRIBUTES);
2373 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2374 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len);
2375 sge->pa_hi = cpu_to_le32(upper_32_bits(attribs_cmd.dma));
2376 sge->pa_lo = cpu_to_le32(attribs_cmd.dma & 0xFFFFFFFF);
2377 sge->len = cpu_to_le32(attribs_cmd.size);
2378
2379 status = be_mbox_notify_wait(adapter);
2380 if (!status) {
2381 attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
2382 adapter->hba_port_num = attribs->hba_attribs.phy_port;
2383 }
2384
2385err:
2386 mutex_unlock(&adapter->mbox_lock);
2387 pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
2388 attribs_cmd.dma);
2389 return status;
2390}
2391
2392/* Uses mbox */
2393int be_cmd_req_native_mode(struct be_adapter *adapter)
2394{
2395 struct be_mcc_wrb *wrb;
2396 struct be_cmd_req_set_func_cap *req;
2397 int status;
2398
2399 if (mutex_lock_interruptible(&adapter->mbox_lock))
2400 return -1;
2401
2402 wrb = wrb_from_mbox(adapter);
2403 if (!wrb) {
2404 status = -EBUSY;
2405 goto err;
2406 }
2407
2408 req = embedded_payload(wrb);
2409
2410 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
2411 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP);
2412
2413 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2414 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req));
2415
2416 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
2417 CAPABILITY_BE3_NATIVE_ERX_API);
2418 req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
2419
2420 status = be_mbox_notify_wait(adapter);
2421 if (!status) {
2422 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2423 adapter->be3_native = le32_to_cpu(resp->cap_flags) &
2424 CAPABILITY_BE3_NATIVE_ERX_API;
2425 }
2426err:
2427 mutex_unlock(&adapter->mbox_lock);
2428 return status;
2429}
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
new file mode 100644
index 00000000000..8e4d48824fe
--- /dev/null
+++ b/drivers/net/benet/be_cmds.h
@@ -0,0 +1,1551 @@
1/*
2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@emulex.com
12 *
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
16 */
17
18/*
19 * The driver sends configuration and managements command requests to the
20 * firmware in the BE. These requests are communicated to the processor
21 * using Work Request Blocks (WRBs) submitted to the MCC-WRB ring or via one
22 * WRB inside a MAILBOX.
23 * The commands are serviced by the ARM processor in the BladeEngine's MPU.
24 */
25
26struct be_sge {
27 u32 pa_lo;
28 u32 pa_hi;
29 u32 len;
30};
31
32#define MCC_WRB_EMBEDDED_MASK 1 /* bit 0 of dword 0*/
33#define MCC_WRB_SGE_CNT_SHIFT 3 /* bits 3 - 7 of dword 0 */
34#define MCC_WRB_SGE_CNT_MASK 0x1F /* bits 3 - 7 of dword 0 */
35struct be_mcc_wrb {
36 u32 embedded; /* dword 0 */
37 u32 payload_length; /* dword 1 */
38 u32 tag0; /* dword 2 */
39 u32 tag1; /* dword 3 */
40 u32 rsvd; /* dword 4 */
41 union {
42 u8 embedded_payload[236]; /* used by embedded cmds */
43 struct be_sge sgl[19]; /* used by non-embedded cmds */
44 } payload;
45};
46
47#define CQE_FLAGS_VALID_MASK (1 << 31)
48#define CQE_FLAGS_ASYNC_MASK (1 << 30)
49#define CQE_FLAGS_COMPLETED_MASK (1 << 28)
50#define CQE_FLAGS_CONSUMED_MASK (1 << 27)
51
52/* Completion Status */
53enum {
54 MCC_STATUS_SUCCESS = 0,
55 MCC_STATUS_FAILED = 1,
56 MCC_STATUS_ILLEGAL_REQUEST = 2,
57 MCC_STATUS_ILLEGAL_FIELD = 3,
58 MCC_STATUS_INSUFFICIENT_BUFFER = 4,
59 MCC_STATUS_UNAUTHORIZED_REQUEST = 5,
60 MCC_STATUS_NOT_SUPPORTED = 66
61};
62
63#define CQE_STATUS_COMPL_MASK 0xFFFF
64#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */
65#define CQE_STATUS_EXTD_MASK 0xFFFF
66#define CQE_STATUS_EXTD_SHIFT 16 /* bits 16 - 31 */
67
68struct be_mcc_compl {
69 u32 status; /* dword 0 */
70 u32 tag0; /* dword 1 */
71 u32 tag1; /* dword 2 */
72 u32 flags; /* dword 3 */
73};
74
75/* When the async bit of mcc_compl is set, the last 4 bytes of
76 * mcc_compl is interpreted as follows:
77 */
78#define ASYNC_TRAILER_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */
79#define ASYNC_TRAILER_EVENT_CODE_MASK 0xFF
80#define ASYNC_TRAILER_EVENT_TYPE_SHIFT 16
81#define ASYNC_TRAILER_EVENT_TYPE_MASK 0xFF
82#define ASYNC_EVENT_CODE_LINK_STATE 0x1
83#define ASYNC_EVENT_CODE_GRP_5 0x5
84#define ASYNC_EVENT_QOS_SPEED 0x1
85#define ASYNC_EVENT_COS_PRIORITY 0x2
86#define ASYNC_EVENT_PVID_STATE 0x3
87struct be_async_event_trailer {
88 u32 code;
89};
90
91enum {
92 ASYNC_EVENT_LINK_DOWN = 0x0,
93 ASYNC_EVENT_LINK_UP = 0x1
94};
95
96/* When the event code of an async trailer is link-state, the mcc_compl
97 * must be interpreted as follows
98 */
99struct be_async_event_link_state {
100 u8 physical_port;
101 u8 port_link_status;
102 u8 port_duplex;
103 u8 port_speed;
104 u8 port_fault;
105 u8 rsvd0[7];
106 struct be_async_event_trailer trailer;
107} __packed;
108
109/* When the event code of an async trailer is GRP-5 and event_type is QOS_SPEED
110 * the mcc_compl must be interpreted as follows
111 */
112struct be_async_event_grp5_qos_link_speed {
113 u8 physical_port;
114 u8 rsvd[5];
115 u16 qos_link_speed;
116 u32 event_tag;
117 struct be_async_event_trailer trailer;
118} __packed;
119
120/* When the event code of an async trailer is GRP5 and event type is
121 * CoS-Priority, the mcc_compl must be interpreted as follows
122 */
123struct be_async_event_grp5_cos_priority {
124 u8 physical_port;
125 u8 available_priority_bmap;
126 u8 reco_default_priority;
127 u8 valid;
128 u8 rsvd0;
129 u8 event_tag;
130 struct be_async_event_trailer trailer;
131} __packed;
132
133/* When the event code of an async trailer is GRP5 and event type is
134 * PVID state, the mcc_compl must be interpreted as follows
135 */
136struct be_async_event_grp5_pvid_state {
137 u8 enabled;
138 u8 rsvd0;
139 u16 tag;
140 u32 event_tag;
141 u32 rsvd1;
142 struct be_async_event_trailer trailer;
143} __packed;
144
145struct be_mcc_mailbox {
146 struct be_mcc_wrb wrb;
147 struct be_mcc_compl compl;
148};
149
150#define CMD_SUBSYSTEM_COMMON 0x1
151#define CMD_SUBSYSTEM_ETH 0x3
152#define CMD_SUBSYSTEM_LOWLEVEL 0xb
153
154#define OPCODE_COMMON_NTWK_MAC_QUERY 1
155#define OPCODE_COMMON_NTWK_MAC_SET 2
156#define OPCODE_COMMON_NTWK_MULTICAST_SET 3
157#define OPCODE_COMMON_NTWK_VLAN_CONFIG 4
158#define OPCODE_COMMON_NTWK_LINK_STATUS_QUERY 5
159#define OPCODE_COMMON_READ_FLASHROM 6
160#define OPCODE_COMMON_WRITE_FLASHROM 7
161#define OPCODE_COMMON_CQ_CREATE 12
162#define OPCODE_COMMON_EQ_CREATE 13
163#define OPCODE_COMMON_MCC_CREATE 21
164#define OPCODE_COMMON_SET_QOS 28
165#define OPCODE_COMMON_MCC_CREATE_EXT 90
166#define OPCODE_COMMON_SEEPROM_READ 30
167#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES 32
168#define OPCODE_COMMON_NTWK_RX_FILTER 34
169#define OPCODE_COMMON_GET_FW_VERSION 35
170#define OPCODE_COMMON_SET_FLOW_CONTROL 36
171#define OPCODE_COMMON_GET_FLOW_CONTROL 37
172#define OPCODE_COMMON_SET_FRAME_SIZE 39
173#define OPCODE_COMMON_MODIFY_EQ_DELAY 41
174#define OPCODE_COMMON_FIRMWARE_CONFIG 42
175#define OPCODE_COMMON_NTWK_INTERFACE_CREATE 50
176#define OPCODE_COMMON_NTWK_INTERFACE_DESTROY 51
177#define OPCODE_COMMON_MCC_DESTROY 53
178#define OPCODE_COMMON_CQ_DESTROY 54
179#define OPCODE_COMMON_EQ_DESTROY 55
180#define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58
181#define OPCODE_COMMON_NTWK_PMAC_ADD 59
182#define OPCODE_COMMON_NTWK_PMAC_DEL 60
183#define OPCODE_COMMON_FUNCTION_RESET 61
184#define OPCODE_COMMON_MANAGE_FAT 68
185#define OPCODE_COMMON_ENABLE_DISABLE_BEACON 69
186#define OPCODE_COMMON_GET_BEACON_STATE 70
187#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
188#define OPCODE_COMMON_GET_PHY_DETAILS 102
189#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103
190#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121
191#define OPCODE_COMMON_WRITE_OBJECT 172
192
193#define OPCODE_ETH_RSS_CONFIG 1
194#define OPCODE_ETH_ACPI_CONFIG 2
195#define OPCODE_ETH_PROMISCUOUS 3
196#define OPCODE_ETH_GET_STATISTICS 4
197#define OPCODE_ETH_TX_CREATE 7
198#define OPCODE_ETH_RX_CREATE 8
199#define OPCODE_ETH_TX_DESTROY 9
200#define OPCODE_ETH_RX_DESTROY 10
201#define OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG 12
202#define OPCODE_ETH_GET_PPORT_STATS 18
203
204#define OPCODE_LOWLEVEL_HOST_DDR_DMA 17
205#define OPCODE_LOWLEVEL_LOOPBACK_TEST 18
206#define OPCODE_LOWLEVEL_SET_LOOPBACK_MODE 19
207
208struct be_cmd_req_hdr {
209 u8 opcode; /* dword 0 */
210 u8 subsystem; /* dword 0 */
211 u8 port_number; /* dword 0 */
212 u8 domain; /* dword 0 */
213 u32 timeout; /* dword 1 */
214 u32 request_length; /* dword 2 */
215 u8 version; /* dword 3 */
216 u8 rsvd[3]; /* dword 3 */
217};
218
219#define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */
220#define RESP_HDR_INFO_SUBSYS_SHIFT 8 /* bits 8 - 15 */
221struct be_cmd_resp_hdr {
222 u32 info; /* dword 0 */
223 u32 status; /* dword 1 */
224 u32 response_length; /* dword 2 */
225 u32 actual_resp_len; /* dword 3 */
226};
227
228struct phys_addr {
229 u32 lo;
230 u32 hi;
231};
232
233/**************************
234 * BE Command definitions *
235 **************************/
236
237/* Pseudo amap definition in which each bit of the actual structure is defined
238 * as a byte: used to calculate offset/shift/mask of each field */
239struct amap_eq_context {
240 u8 cidx[13]; /* dword 0*/
241 u8 rsvd0[3]; /* dword 0*/
242 u8 epidx[13]; /* dword 0*/
243 u8 valid; /* dword 0*/
244 u8 rsvd1; /* dword 0*/
245 u8 size; /* dword 0*/
246 u8 pidx[13]; /* dword 1*/
247 u8 rsvd2[3]; /* dword 1*/
248 u8 pd[10]; /* dword 1*/
249 u8 count[3]; /* dword 1*/
250 u8 solevent; /* dword 1*/
251 u8 stalled; /* dword 1*/
252 u8 armed; /* dword 1*/
253 u8 rsvd3[4]; /* dword 2*/
254 u8 func[8]; /* dword 2*/
255 u8 rsvd4; /* dword 2*/
256 u8 delaymult[10]; /* dword 2*/
257 u8 rsvd5[2]; /* dword 2*/
258 u8 phase[2]; /* dword 2*/
259 u8 nodelay; /* dword 2*/
260 u8 rsvd6[4]; /* dword 2*/
261 u8 rsvd7[32]; /* dword 3*/
262} __packed;
263
264struct be_cmd_req_eq_create {
265 struct be_cmd_req_hdr hdr;
266 u16 num_pages; /* sword */
267 u16 rsvd0; /* sword */
268 u8 context[sizeof(struct amap_eq_context) / 8];
269 struct phys_addr pages[8];
270} __packed;
271
272struct be_cmd_resp_eq_create {
273 struct be_cmd_resp_hdr resp_hdr;
274 u16 eq_id; /* sword */
275 u16 rsvd0; /* sword */
276} __packed;
277
278/******************** Mac query ***************************/
279enum {
280 MAC_ADDRESS_TYPE_STORAGE = 0x0,
281 MAC_ADDRESS_TYPE_NETWORK = 0x1,
282 MAC_ADDRESS_TYPE_PD = 0x2,
283 MAC_ADDRESS_TYPE_MANAGEMENT = 0x3
284};
285
286struct mac_addr {
287 u16 size_of_struct;
288 u8 addr[ETH_ALEN];
289} __packed;
290
291struct be_cmd_req_mac_query {
292 struct be_cmd_req_hdr hdr;
293 u8 type;
294 u8 permanent;
295 u16 if_id;
296} __packed;
297
298struct be_cmd_resp_mac_query {
299 struct be_cmd_resp_hdr hdr;
300 struct mac_addr mac;
301};
302
303/******************** PMac Add ***************************/
304struct be_cmd_req_pmac_add {
305 struct be_cmd_req_hdr hdr;
306 u32 if_id;
307 u8 mac_address[ETH_ALEN];
308 u8 rsvd0[2];
309} __packed;
310
311struct be_cmd_resp_pmac_add {
312 struct be_cmd_resp_hdr hdr;
313 u32 pmac_id;
314};
315
316/******************** PMac Del ***************************/
317struct be_cmd_req_pmac_del {
318 struct be_cmd_req_hdr hdr;
319 u32 if_id;
320 u32 pmac_id;
321};
322
323/******************** Create CQ ***************************/
324/* Pseudo amap definition in which each bit of the actual structure is defined
325 * as a byte: used to calculate offset/shift/mask of each field */
326struct amap_cq_context_be {
327 u8 cidx[11]; /* dword 0*/
328 u8 rsvd0; /* dword 0*/
329 u8 coalescwm[2]; /* dword 0*/
330 u8 nodelay; /* dword 0*/
331 u8 epidx[11]; /* dword 0*/
332 u8 rsvd1; /* dword 0*/
333 u8 count[2]; /* dword 0*/
334 u8 valid; /* dword 0*/
335 u8 solevent; /* dword 0*/
336 u8 eventable; /* dword 0*/
337 u8 pidx[11]; /* dword 1*/
338 u8 rsvd2; /* dword 1*/
339 u8 pd[10]; /* dword 1*/
340 u8 eqid[8]; /* dword 1*/
341 u8 stalled; /* dword 1*/
342 u8 armed; /* dword 1*/
343 u8 rsvd3[4]; /* dword 2*/
344 u8 func[8]; /* dword 2*/
345 u8 rsvd4[20]; /* dword 2*/
346 u8 rsvd5[32]; /* dword 3*/
347} __packed;
348
349struct amap_cq_context_lancer {
350 u8 rsvd0[12]; /* dword 0*/
351 u8 coalescwm[2]; /* dword 0*/
352 u8 nodelay; /* dword 0*/
353 u8 rsvd1[12]; /* dword 0*/
354 u8 count[2]; /* dword 0*/
355 u8 valid; /* dword 0*/
356 u8 rsvd2; /* dword 0*/
357 u8 eventable; /* dword 0*/
358 u8 eqid[16]; /* dword 1*/
359 u8 rsvd3[15]; /* dword 1*/
360 u8 armed; /* dword 1*/
361 u8 rsvd4[32]; /* dword 2*/
362 u8 rsvd5[32]; /* dword 3*/
363} __packed;
364
365struct be_cmd_req_cq_create {
366 struct be_cmd_req_hdr hdr;
367 u16 num_pages;
368 u8 page_size;
369 u8 rsvd0;
370 u8 context[sizeof(struct amap_cq_context_be) / 8];
371 struct phys_addr pages[8];
372} __packed;
373
374
375struct be_cmd_resp_cq_create {
376 struct be_cmd_resp_hdr hdr;
377 u16 cq_id;
378 u16 rsvd0;
379} __packed;
380
381struct be_cmd_req_get_fat {
382 struct be_cmd_req_hdr hdr;
383 u32 fat_operation;
384 u32 read_log_offset;
385 u32 read_log_length;
386 u32 data_buffer_size;
387 u32 data_buffer[1];
388} __packed;
389
390struct be_cmd_resp_get_fat {
391 struct be_cmd_resp_hdr hdr;
392 u32 log_size;
393 u32 read_log_length;
394 u32 rsvd[2];
395 u32 data_buffer[1];
396} __packed;
397
398
399/******************** Create MCCQ ***************************/
400/* Pseudo amap definition in which each bit of the actual structure is defined
401 * as a byte: used to calculate offset/shift/mask of each field */
402struct amap_mcc_context_be {
403 u8 con_index[14];
404 u8 rsvd0[2];
405 u8 ring_size[4];
406 u8 fetch_wrb;
407 u8 fetch_r2t;
408 u8 cq_id[10];
409 u8 prod_index[14];
410 u8 fid[8];
411 u8 pdid[9];
412 u8 valid;
413 u8 rsvd1[32];
414 u8 rsvd2[32];
415} __packed;
416
417struct amap_mcc_context_lancer {
418 u8 async_cq_id[16];
419 u8 ring_size[4];
420 u8 rsvd0[12];
421 u8 rsvd1[31];
422 u8 valid;
423 u8 async_cq_valid[1];
424 u8 rsvd2[31];
425 u8 rsvd3[32];
426} __packed;
427
428struct be_cmd_req_mcc_create {
429 struct be_cmd_req_hdr hdr;
430 u16 num_pages;
431 u16 cq_id;
432 u8 context[sizeof(struct amap_mcc_context_be) / 8];
433 struct phys_addr pages[8];
434} __packed;
435
436struct be_cmd_req_mcc_ext_create {
437 struct be_cmd_req_hdr hdr;
438 u16 num_pages;
439 u16 cq_id;
440 u32 async_event_bitmap[1];
441 u8 context[sizeof(struct amap_mcc_context_be) / 8];
442 struct phys_addr pages[8];
443} __packed;
444
445struct be_cmd_resp_mcc_create {
446 struct be_cmd_resp_hdr hdr;
447 u16 id;
448 u16 rsvd0;
449} __packed;
450
451/******************** Create TxQ ***************************/
452#define BE_ETH_TX_RING_TYPE_STANDARD 2
453#define BE_ULP1_NUM 1
454
455/* Pseudo amap definition in which each bit of the actual structure is defined
456 * as a byte: used to calculate offset/shift/mask of each field */
457struct amap_tx_context {
458 u8 if_id[16]; /* dword 0 */
459 u8 tx_ring_size[4]; /* dword 0 */
460 u8 rsvd1[26]; /* dword 0 */
461 u8 pci_func_id[8]; /* dword 1 */
462 u8 rsvd2[9]; /* dword 1 */
463 u8 ctx_valid; /* dword 1 */
464 u8 cq_id_send[16]; /* dword 2 */
465 u8 rsvd3[16]; /* dword 2 */
466 u8 rsvd4[32]; /* dword 3 */
467 u8 rsvd5[32]; /* dword 4 */
468 u8 rsvd6[32]; /* dword 5 */
469 u8 rsvd7[32]; /* dword 6 */
470 u8 rsvd8[32]; /* dword 7 */
471 u8 rsvd9[32]; /* dword 8 */
472 u8 rsvd10[32]; /* dword 9 */
473 u8 rsvd11[32]; /* dword 10 */
474 u8 rsvd12[32]; /* dword 11 */
475 u8 rsvd13[32]; /* dword 12 */
476 u8 rsvd14[32]; /* dword 13 */
477 u8 rsvd15[32]; /* dword 14 */
478 u8 rsvd16[32]; /* dword 15 */
479} __packed;
480
481struct be_cmd_req_eth_tx_create {
482 struct be_cmd_req_hdr hdr;
483 u8 num_pages;
484 u8 ulp_num;
485 u8 type;
486 u8 bound_port;
487 u8 context[sizeof(struct amap_tx_context) / 8];
488 struct phys_addr pages[8];
489} __packed;
490
491struct be_cmd_resp_eth_tx_create {
492 struct be_cmd_resp_hdr hdr;
493 u16 cid;
494 u16 rsvd0;
495} __packed;
496
497/******************** Create RxQ ***************************/
498struct be_cmd_req_eth_rx_create {
499 struct be_cmd_req_hdr hdr;
500 u16 cq_id;
501 u8 frag_size;
502 u8 num_pages;
503 struct phys_addr pages[2];
504 u32 interface_id;
505 u16 max_frame_size;
506 u16 rsvd0;
507 u32 rss_queue;
508} __packed;
509
510struct be_cmd_resp_eth_rx_create {
511 struct be_cmd_resp_hdr hdr;
512 u16 id;
513 u8 rss_id;
514 u8 rsvd0;
515} __packed;
516
517/******************** Q Destroy ***************************/
518/* Type of Queue to be destroyed */
519enum {
520 QTYPE_EQ = 1,
521 QTYPE_CQ,
522 QTYPE_TXQ,
523 QTYPE_RXQ,
524 QTYPE_MCCQ
525};
526
527struct be_cmd_req_q_destroy {
528 struct be_cmd_req_hdr hdr;
529 u16 id;
530 u16 bypass_flush; /* valid only for rx q destroy */
531} __packed;
532
533/************ I/f Create (it's actually I/f Config Create)**********/
534
535/* Capability flags for the i/f */
536enum be_if_flags {
537 BE_IF_FLAGS_RSS = 0x4,
538 BE_IF_FLAGS_PROMISCUOUS = 0x8,
539 BE_IF_FLAGS_BROADCAST = 0x10,
540 BE_IF_FLAGS_UNTAGGED = 0x20,
541 BE_IF_FLAGS_ULP = 0x40,
542 BE_IF_FLAGS_VLAN_PROMISCUOUS = 0x80,
543 BE_IF_FLAGS_VLAN = 0x100,
544 BE_IF_FLAGS_MCAST_PROMISCUOUS = 0x200,
545 BE_IF_FLAGS_PASS_L2_ERRORS = 0x400,
546 BE_IF_FLAGS_PASS_L3L4_ERRORS = 0x800,
547 BE_IF_FLAGS_MULTICAST = 0x1000
548};
549
550/* An RX interface is an object with one or more MAC addresses and
551 * filtering capabilities. */
552struct be_cmd_req_if_create {
553 struct be_cmd_req_hdr hdr;
554 u32 version; /* ignore currently */
555 u32 capability_flags;
556 u32 enable_flags;
557 u8 mac_addr[ETH_ALEN];
558 u8 rsvd0;
559 u8 pmac_invalid; /* if set, don't attach the mac addr to the i/f */
560 u32 vlan_tag; /* not used currently */
561} __packed;
562
563struct be_cmd_resp_if_create {
564 struct be_cmd_resp_hdr hdr;
565 u32 interface_id;
566 u32 pmac_id;
567};
568
569/****** I/f Destroy(it's actually I/f Config Destroy )**********/
570struct be_cmd_req_if_destroy {
571 struct be_cmd_req_hdr hdr;
572 u32 interface_id;
573};
574
575/*************** HW Stats Get **********************************/
576struct be_port_rxf_stats_v0 {
577 u32 rx_bytes_lsd; /* dword 0*/
578 u32 rx_bytes_msd; /* dword 1*/
579 u32 rx_total_frames; /* dword 2*/
580 u32 rx_unicast_frames; /* dword 3*/
581 u32 rx_multicast_frames; /* dword 4*/
582 u32 rx_broadcast_frames; /* dword 5*/
583 u32 rx_crc_errors; /* dword 6*/
584 u32 rx_alignment_symbol_errors; /* dword 7*/
585 u32 rx_pause_frames; /* dword 8*/
586 u32 rx_control_frames; /* dword 9*/
587 u32 rx_in_range_errors; /* dword 10*/
588 u32 rx_out_range_errors; /* dword 11*/
589 u32 rx_frame_too_long; /* dword 12*/
590 u32 rx_address_match_errors; /* dword 13*/
591 u32 rx_vlan_mismatch; /* dword 14*/
592 u32 rx_dropped_too_small; /* dword 15*/
593 u32 rx_dropped_too_short; /* dword 16*/
594 u32 rx_dropped_header_too_small; /* dword 17*/
595 u32 rx_dropped_tcp_length; /* dword 18*/
596 u32 rx_dropped_runt; /* dword 19*/
597 u32 rx_64_byte_packets; /* dword 20*/
598 u32 rx_65_127_byte_packets; /* dword 21*/
599 u32 rx_128_256_byte_packets; /* dword 22*/
600 u32 rx_256_511_byte_packets; /* dword 23*/
601 u32 rx_512_1023_byte_packets; /* dword 24*/
602 u32 rx_1024_1518_byte_packets; /* dword 25*/
603 u32 rx_1519_2047_byte_packets; /* dword 26*/
604 u32 rx_2048_4095_byte_packets; /* dword 27*/
605 u32 rx_4096_8191_byte_packets; /* dword 28*/
606 u32 rx_8192_9216_byte_packets; /* dword 29*/
607 u32 rx_ip_checksum_errs; /* dword 30*/
608 u32 rx_tcp_checksum_errs; /* dword 31*/
609 u32 rx_udp_checksum_errs; /* dword 32*/
610 u32 rx_non_rss_packets; /* dword 33*/
611 u32 rx_ipv4_packets; /* dword 34*/
612 u32 rx_ipv6_packets; /* dword 35*/
613 u32 rx_ipv4_bytes_lsd; /* dword 36*/
614 u32 rx_ipv4_bytes_msd; /* dword 37*/
615 u32 rx_ipv6_bytes_lsd; /* dword 38*/
616 u32 rx_ipv6_bytes_msd; /* dword 39*/
617 u32 rx_chute1_packets; /* dword 40*/
618 u32 rx_chute2_packets; /* dword 41*/
619 u32 rx_chute3_packets; /* dword 42*/
620 u32 rx_management_packets; /* dword 43*/
621 u32 rx_switched_unicast_packets; /* dword 44*/
622 u32 rx_switched_multicast_packets; /* dword 45*/
623 u32 rx_switched_broadcast_packets; /* dword 46*/
624 u32 tx_bytes_lsd; /* dword 47*/
625 u32 tx_bytes_msd; /* dword 48*/
626 u32 tx_unicastframes; /* dword 49*/
627 u32 tx_multicastframes; /* dword 50*/
628 u32 tx_broadcastframes; /* dword 51*/
629 u32 tx_pauseframes; /* dword 52*/
630 u32 tx_controlframes; /* dword 53*/
631 u32 tx_64_byte_packets; /* dword 54*/
632 u32 tx_65_127_byte_packets; /* dword 55*/
633 u32 tx_128_256_byte_packets; /* dword 56*/
634 u32 tx_256_511_byte_packets; /* dword 57*/
635 u32 tx_512_1023_byte_packets; /* dword 58*/
636 u32 tx_1024_1518_byte_packets; /* dword 59*/
637 u32 tx_1519_2047_byte_packets; /* dword 60*/
638 u32 tx_2048_4095_byte_packets; /* dword 61*/
639 u32 tx_4096_8191_byte_packets; /* dword 62*/
640 u32 tx_8192_9216_byte_packets; /* dword 63*/
641 u32 rx_fifo_overflow; /* dword 64*/
642 u32 rx_input_fifo_overflow; /* dword 65*/
643};
644
645struct be_rxf_stats_v0 {
646 struct be_port_rxf_stats_v0 port[2];
647 u32 rx_drops_no_pbuf; /* dword 132*/
648 u32 rx_drops_no_txpb; /* dword 133*/
649 u32 rx_drops_no_erx_descr; /* dword 134*/
650 u32 rx_drops_no_tpre_descr; /* dword 135*/
651 u32 management_rx_port_packets; /* dword 136*/
652 u32 management_rx_port_bytes; /* dword 137*/
653 u32 management_rx_port_pause_frames; /* dword 138*/
654 u32 management_rx_port_errors; /* dword 139*/
655 u32 management_tx_port_packets; /* dword 140*/
656 u32 management_tx_port_bytes; /* dword 141*/
657 u32 management_tx_port_pause; /* dword 142*/
658 u32 management_rx_port_rxfifo_overflow; /* dword 143*/
659 u32 rx_drops_too_many_frags; /* dword 144*/
660 u32 rx_drops_invalid_ring; /* dword 145*/
661 u32 forwarded_packets; /* dword 146*/
662 u32 rx_drops_mtu; /* dword 147*/
663 u32 rsvd0[7];
664 u32 port0_jabber_events;
665 u32 port1_jabber_events;
666 u32 rsvd1[6];
667};
668
669struct be_erx_stats_v0 {
670 u32 rx_drops_no_fragments[44]; /* dwordS 0 to 43*/
671 u32 rsvd[4];
672};
673
674struct be_pmem_stats {
675 u32 eth_red_drops;
676 u32 rsvd[5];
677};
678
679struct be_hw_stats_v0 {
680 struct be_rxf_stats_v0 rxf;
681 u32 rsvd[48];
682 struct be_erx_stats_v0 erx;
683 struct be_pmem_stats pmem;
684};
685
686struct be_cmd_req_get_stats_v0 {
687 struct be_cmd_req_hdr hdr;
688 u8 rsvd[sizeof(struct be_hw_stats_v0)];
689};
690
691struct be_cmd_resp_get_stats_v0 {
692 struct be_cmd_resp_hdr hdr;
693 struct be_hw_stats_v0 hw_stats;
694};
695
696#define make_64bit_val(hi_32, lo_32) (((u64)hi_32<<32) | lo_32)
697struct lancer_cmd_pport_stats {
698 u32 tx_packets_lo;
699 u32 tx_packets_hi;
700 u32 tx_unicast_packets_lo;
701 u32 tx_unicast_packets_hi;
702 u32 tx_multicast_packets_lo;
703 u32 tx_multicast_packets_hi;
704 u32 tx_broadcast_packets_lo;
705 u32 tx_broadcast_packets_hi;
706 u32 tx_bytes_lo;
707 u32 tx_bytes_hi;
708 u32 tx_unicast_bytes_lo;
709 u32 tx_unicast_bytes_hi;
710 u32 tx_multicast_bytes_lo;
711 u32 tx_multicast_bytes_hi;
712 u32 tx_broadcast_bytes_lo;
713 u32 tx_broadcast_bytes_hi;
714 u32 tx_discards_lo;
715 u32 tx_discards_hi;
716 u32 tx_errors_lo;
717 u32 tx_errors_hi;
718 u32 tx_pause_frames_lo;
719 u32 tx_pause_frames_hi;
720 u32 tx_pause_on_frames_lo;
721 u32 tx_pause_on_frames_hi;
722 u32 tx_pause_off_frames_lo;
723 u32 tx_pause_off_frames_hi;
724 u32 tx_internal_mac_errors_lo;
725 u32 tx_internal_mac_errors_hi;
726 u32 tx_control_frames_lo;
727 u32 tx_control_frames_hi;
728 u32 tx_packets_64_bytes_lo;
729 u32 tx_packets_64_bytes_hi;
730 u32 tx_packets_65_to_127_bytes_lo;
731 u32 tx_packets_65_to_127_bytes_hi;
732 u32 tx_packets_128_to_255_bytes_lo;
733 u32 tx_packets_128_to_255_bytes_hi;
734 u32 tx_packets_256_to_511_bytes_lo;
735 u32 tx_packets_256_to_511_bytes_hi;
736 u32 tx_packets_512_to_1023_bytes_lo;
737 u32 tx_packets_512_to_1023_bytes_hi;
738 u32 tx_packets_1024_to_1518_bytes_lo;
739 u32 tx_packets_1024_to_1518_bytes_hi;
740 u32 tx_packets_1519_to_2047_bytes_lo;
741 u32 tx_packets_1519_to_2047_bytes_hi;
742 u32 tx_packets_2048_to_4095_bytes_lo;
743 u32 tx_packets_2048_to_4095_bytes_hi;
744 u32 tx_packets_4096_to_8191_bytes_lo;
745 u32 tx_packets_4096_to_8191_bytes_hi;
746 u32 tx_packets_8192_to_9216_bytes_lo;
747 u32 tx_packets_8192_to_9216_bytes_hi;
748 u32 tx_lso_packets_lo;
749 u32 tx_lso_packets_hi;
750 u32 rx_packets_lo;
751 u32 rx_packets_hi;
752 u32 rx_unicast_packets_lo;
753 u32 rx_unicast_packets_hi;
754 u32 rx_multicast_packets_lo;
755 u32 rx_multicast_packets_hi;
756 u32 rx_broadcast_packets_lo;
757 u32 rx_broadcast_packets_hi;
758 u32 rx_bytes_lo;
759 u32 rx_bytes_hi;
760 u32 rx_unicast_bytes_lo;
761 u32 rx_unicast_bytes_hi;
762 u32 rx_multicast_bytes_lo;
763 u32 rx_multicast_bytes_hi;
764 u32 rx_broadcast_bytes_lo;
765 u32 rx_broadcast_bytes_hi;
766 u32 rx_unknown_protos;
767 u32 rsvd_69; /* Word 69 is reserved */
768 u32 rx_discards_lo;
769 u32 rx_discards_hi;
770 u32 rx_errors_lo;
771 u32 rx_errors_hi;
772 u32 rx_crc_errors_lo;
773 u32 rx_crc_errors_hi;
774 u32 rx_alignment_errors_lo;
775 u32 rx_alignment_errors_hi;
776 u32 rx_symbol_errors_lo;
777 u32 rx_symbol_errors_hi;
778 u32 rx_pause_frames_lo;
779 u32 rx_pause_frames_hi;
780 u32 rx_pause_on_frames_lo;
781 u32 rx_pause_on_frames_hi;
782 u32 rx_pause_off_frames_lo;
783 u32 rx_pause_off_frames_hi;
784 u32 rx_frames_too_long_lo;
785 u32 rx_frames_too_long_hi;
786 u32 rx_internal_mac_errors_lo;
787 u32 rx_internal_mac_errors_hi;
788 u32 rx_undersize_packets;
789 u32 rx_oversize_packets;
790 u32 rx_fragment_packets;
791 u32 rx_jabbers;
792 u32 rx_control_frames_lo;
793 u32 rx_control_frames_hi;
794 u32 rx_control_frames_unknown_opcode_lo;
795 u32 rx_control_frames_unknown_opcode_hi;
796 u32 rx_in_range_errors;
797 u32 rx_out_of_range_errors;
798 u32 rx_address_match_errors;
799 u32 rx_vlan_mismatch_errors;
800 u32 rx_dropped_too_small;
801 u32 rx_dropped_too_short;
802 u32 rx_dropped_header_too_small;
803 u32 rx_dropped_invalid_tcp_length;
804 u32 rx_dropped_runt;
805 u32 rx_ip_checksum_errors;
806 u32 rx_tcp_checksum_errors;
807 u32 rx_udp_checksum_errors;
808 u32 rx_non_rss_packets;
809 u32 rsvd_111;
810 u32 rx_ipv4_packets_lo;
811 u32 rx_ipv4_packets_hi;
812 u32 rx_ipv6_packets_lo;
813 u32 rx_ipv6_packets_hi;
814 u32 rx_ipv4_bytes_lo;
815 u32 rx_ipv4_bytes_hi;
816 u32 rx_ipv6_bytes_lo;
817 u32 rx_ipv6_bytes_hi;
818 u32 rx_nic_packets_lo;
819 u32 rx_nic_packets_hi;
820 u32 rx_tcp_packets_lo;
821 u32 rx_tcp_packets_hi;
822 u32 rx_iscsi_packets_lo;
823 u32 rx_iscsi_packets_hi;
824 u32 rx_management_packets_lo;
825 u32 rx_management_packets_hi;
826 u32 rx_switched_unicast_packets_lo;
827 u32 rx_switched_unicast_packets_hi;
828 u32 rx_switched_multicast_packets_lo;
829 u32 rx_switched_multicast_packets_hi;
830 u32 rx_switched_broadcast_packets_lo;
831 u32 rx_switched_broadcast_packets_hi;
832 u32 num_forwards_lo;
833 u32 num_forwards_hi;
834 u32 rx_fifo_overflow;
835 u32 rx_input_fifo_overflow;
836 u32 rx_drops_too_many_frags_lo;
837 u32 rx_drops_too_many_frags_hi;
838 u32 rx_drops_invalid_queue;
839 u32 rsvd_141;
840 u32 rx_drops_mtu_lo;
841 u32 rx_drops_mtu_hi;
842 u32 rx_packets_64_bytes_lo;
843 u32 rx_packets_64_bytes_hi;
844 u32 rx_packets_65_to_127_bytes_lo;
845 u32 rx_packets_65_to_127_bytes_hi;
846 u32 rx_packets_128_to_255_bytes_lo;
847 u32 rx_packets_128_to_255_bytes_hi;
848 u32 rx_packets_256_to_511_bytes_lo;
849 u32 rx_packets_256_to_511_bytes_hi;
850 u32 rx_packets_512_to_1023_bytes_lo;
851 u32 rx_packets_512_to_1023_bytes_hi;
852 u32 rx_packets_1024_to_1518_bytes_lo;
853 u32 rx_packets_1024_to_1518_bytes_hi;
854 u32 rx_packets_1519_to_2047_bytes_lo;
855 u32 rx_packets_1519_to_2047_bytes_hi;
856 u32 rx_packets_2048_to_4095_bytes_lo;
857 u32 rx_packets_2048_to_4095_bytes_hi;
858 u32 rx_packets_4096_to_8191_bytes_lo;
859 u32 rx_packets_4096_to_8191_bytes_hi;
860 u32 rx_packets_8192_to_9216_bytes_lo;
861 u32 rx_packets_8192_to_9216_bytes_hi;
862};
863
864struct pport_stats_params {
865 u16 pport_num;
866 u8 rsvd;
867 u8 reset_stats;
868};
869
870struct lancer_cmd_req_pport_stats {
871 struct be_cmd_req_hdr hdr;
872 union {
873 struct pport_stats_params params;
874 u8 rsvd[sizeof(struct lancer_cmd_pport_stats)];
875 } cmd_params;
876};
877
878struct lancer_cmd_resp_pport_stats {
879 struct be_cmd_resp_hdr hdr;
880 struct lancer_cmd_pport_stats pport_stats;
881};
882
883static inline struct lancer_cmd_pport_stats*
884 pport_stats_from_cmd(struct be_adapter *adapter)
885{
886 struct lancer_cmd_resp_pport_stats *cmd = adapter->stats_cmd.va;
887 return &cmd->pport_stats;
888}
889
890struct be_cmd_req_get_cntl_addnl_attribs {
891 struct be_cmd_req_hdr hdr;
892 u8 rsvd[8];
893};
894
895struct be_cmd_resp_get_cntl_addnl_attribs {
896 struct be_cmd_resp_hdr hdr;
897 u16 ipl_file_number;
898 u8 ipl_file_version;
899 u8 rsvd0;
900 u8 on_die_temperature; /* in degrees centigrade*/
901 u8 rsvd1[3];
902};
903
904struct be_cmd_req_vlan_config {
905 struct be_cmd_req_hdr hdr;
906 u8 interface_id;
907 u8 promiscuous;
908 u8 untagged;
909 u8 num_vlan;
910 u16 normal_vlan[64];
911} __packed;
912
913/******************** Multicast MAC Config *******************/
914#define BE_MAX_MC 64 /* set mcast promisc if > 64 */
915struct macaddr {
916 u8 byte[ETH_ALEN];
917};
918
919struct be_cmd_req_mcast_mac_config {
920 struct be_cmd_req_hdr hdr;
921 u16 num_mac;
922 u8 promiscuous;
923 u8 interface_id;
924 struct macaddr mac[BE_MAX_MC];
925} __packed;
926
927/******************* RX FILTER ******************************/
928struct be_cmd_req_rx_filter {
929 struct be_cmd_req_hdr hdr;
930 u32 global_flags_mask;
931 u32 global_flags;
932 u32 if_flags_mask;
933 u32 if_flags;
934 u32 if_id;
935 u32 multicast_num;
936 struct macaddr mac[BE_MAX_MC];
937};
938
939
940/******************** Link Status Query *******************/
941struct be_cmd_req_link_status {
942 struct be_cmd_req_hdr hdr;
943 u32 rsvd;
944};
945
946enum {
947 PHY_LINK_DUPLEX_NONE = 0x0,
948 PHY_LINK_DUPLEX_HALF = 0x1,
949 PHY_LINK_DUPLEX_FULL = 0x2
950};
951
952enum {
953 PHY_LINK_SPEED_ZERO = 0x0, /* => No link */
954 PHY_LINK_SPEED_10MBPS = 0x1,
955 PHY_LINK_SPEED_100MBPS = 0x2,
956 PHY_LINK_SPEED_1GBPS = 0x3,
957 PHY_LINK_SPEED_10GBPS = 0x4
958};
959
960struct be_cmd_resp_link_status {
961 struct be_cmd_resp_hdr hdr;
962 u8 physical_port;
963 u8 mac_duplex;
964 u8 mac_speed;
965 u8 mac_fault;
966 u8 mgmt_mac_duplex;
967 u8 mgmt_mac_speed;
968 u16 link_speed;
969 u32 rsvd0;
970} __packed;
971
972/******************** Port Identification ***************************/
973/* Identifies the type of port attached to NIC */
974struct be_cmd_req_port_type {
975 struct be_cmd_req_hdr hdr;
976 u32 page_num;
977 u32 port;
978};
979
980enum {
981 TR_PAGE_A0 = 0xa0,
982 TR_PAGE_A2 = 0xa2
983};
984
985struct be_cmd_resp_port_type {
986 struct be_cmd_resp_hdr hdr;
987 u32 page_num;
988 u32 port;
989 struct data {
990 u8 identifier;
991 u8 identifier_ext;
992 u8 connector;
993 u8 transceiver[8];
994 u8 rsvd0[3];
995 u8 length_km;
996 u8 length_hm;
997 u8 length_om1;
998 u8 length_om2;
999 u8 length_cu;
1000 u8 length_cu_m;
1001 u8 vendor_name[16];
1002 u8 rsvd;
1003 u8 vendor_oui[3];
1004 u8 vendor_pn[16];
1005 u8 vendor_rev[4];
1006 } data;
1007};
1008
1009/******************** Get FW Version *******************/
1010struct be_cmd_req_get_fw_version {
1011 struct be_cmd_req_hdr hdr;
1012 u8 rsvd0[FW_VER_LEN];
1013 u8 rsvd1[FW_VER_LEN];
1014} __packed;
1015
1016struct be_cmd_resp_get_fw_version {
1017 struct be_cmd_resp_hdr hdr;
1018 u8 firmware_version_string[FW_VER_LEN];
1019 u8 fw_on_flash_version_string[FW_VER_LEN];
1020} __packed;
1021
1022/******************** Set Flow Contrl *******************/
1023struct be_cmd_req_set_flow_control {
1024 struct be_cmd_req_hdr hdr;
1025 u16 tx_flow_control;
1026 u16 rx_flow_control;
1027} __packed;
1028
1029/******************** Get Flow Contrl *******************/
1030struct be_cmd_req_get_flow_control {
1031 struct be_cmd_req_hdr hdr;
1032 u32 rsvd;
1033};
1034
1035struct be_cmd_resp_get_flow_control {
1036 struct be_cmd_resp_hdr hdr;
1037 u16 tx_flow_control;
1038 u16 rx_flow_control;
1039} __packed;
1040
1041/******************** Modify EQ Delay *******************/
1042struct be_cmd_req_modify_eq_delay {
1043 struct be_cmd_req_hdr hdr;
1044 u32 num_eq;
1045 struct {
1046 u32 eq_id;
1047 u32 phase;
1048 u32 delay_multiplier;
1049 } delay[8];
1050} __packed;
1051
1052struct be_cmd_resp_modify_eq_delay {
1053 struct be_cmd_resp_hdr hdr;
1054 u32 rsvd0;
1055} __packed;
1056
1057/******************** Get FW Config *******************/
1058#define BE_FUNCTION_CAPS_RSS 0x2
1059struct be_cmd_req_query_fw_cfg {
1060 struct be_cmd_req_hdr hdr;
1061 u32 rsvd[31];
1062};
1063
1064struct be_cmd_resp_query_fw_cfg {
1065 struct be_cmd_resp_hdr hdr;
1066 u32 be_config_number;
1067 u32 asic_revision;
1068 u32 phys_port;
1069 u32 function_mode;
1070 u32 rsvd[26];
1071 u32 function_caps;
1072};
1073
1074/******************** RSS Config *******************/
1075/* RSS types */
1076#define RSS_ENABLE_NONE 0x0
1077#define RSS_ENABLE_IPV4 0x1
1078#define RSS_ENABLE_TCP_IPV4 0x2
1079#define RSS_ENABLE_IPV6 0x4
1080#define RSS_ENABLE_TCP_IPV6 0x8
1081
1082struct be_cmd_req_rss_config {
1083 struct be_cmd_req_hdr hdr;
1084 u32 if_id;
1085 u16 enable_rss;
1086 u16 cpu_table_size_log2;
1087 u32 hash[10];
1088 u8 cpu_table[128];
1089 u8 flush;
1090 u8 rsvd0[3];
1091};
1092
1093/******************** Port Beacon ***************************/
1094
1095#define BEACON_STATE_ENABLED 0x1
1096#define BEACON_STATE_DISABLED 0x0
1097
1098struct be_cmd_req_enable_disable_beacon {
1099 struct be_cmd_req_hdr hdr;
1100 u8 port_num;
1101 u8 beacon_state;
1102 u8 beacon_duration;
1103 u8 status_duration;
1104} __packed;
1105
1106struct be_cmd_resp_enable_disable_beacon {
1107 struct be_cmd_resp_hdr resp_hdr;
1108 u32 rsvd0;
1109} __packed;
1110
1111struct be_cmd_req_get_beacon_state {
1112 struct be_cmd_req_hdr hdr;
1113 u8 port_num;
1114 u8 rsvd0;
1115 u16 rsvd1;
1116} __packed;
1117
1118struct be_cmd_resp_get_beacon_state {
1119 struct be_cmd_resp_hdr resp_hdr;
1120 u8 beacon_state;
1121 u8 rsvd0[3];
1122} __packed;
1123
1124/****************** Firmware Flash ******************/
1125struct flashrom_params {
1126 u32 op_code;
1127 u32 op_type;
1128 u32 data_buf_size;
1129 u32 offset;
1130 u8 data_buf[4];
1131};
1132
1133struct be_cmd_write_flashrom {
1134 struct be_cmd_req_hdr hdr;
1135 struct flashrom_params params;
1136};
1137
1138/**************** Lancer Firmware Flash ************/
1139struct amap_lancer_write_obj_context {
1140 u8 write_length[24];
1141 u8 reserved1[7];
1142 u8 eof;
1143} __packed;
1144
1145struct lancer_cmd_req_write_object {
1146 struct be_cmd_req_hdr hdr;
1147 u8 context[sizeof(struct amap_lancer_write_obj_context) / 8];
1148 u32 write_offset;
1149 u8 object_name[104];
1150 u32 descriptor_count;
1151 u32 buf_len;
1152 u32 addr_low;
1153 u32 addr_high;
1154};
1155
1156struct lancer_cmd_resp_write_object {
1157 u8 opcode;
1158 u8 subsystem;
1159 u8 rsvd1[2];
1160 u8 status;
1161 u8 additional_status;
1162 u8 rsvd2[2];
1163 u32 resp_len;
1164 u32 actual_resp_len;
1165 u32 actual_write_len;
1166};
1167
1168/************************ WOL *******************************/
1169struct be_cmd_req_acpi_wol_magic_config{
1170 struct be_cmd_req_hdr hdr;
1171 u32 rsvd0[145];
1172 u8 magic_mac[6];
1173 u8 rsvd2[2];
1174} __packed;
1175
1176/********************** LoopBack test *********************/
1177struct be_cmd_req_loopback_test {
1178 struct be_cmd_req_hdr hdr;
1179 u32 loopback_type;
1180 u32 num_pkts;
1181 u64 pattern;
1182 u32 src_port;
1183 u32 dest_port;
1184 u32 pkt_size;
1185};
1186
1187struct be_cmd_resp_loopback_test {
1188 struct be_cmd_resp_hdr resp_hdr;
1189 u32 status;
1190 u32 num_txfer;
1191 u32 num_rx;
1192 u32 miscomp_off;
1193 u32 ticks_compl;
1194};
1195
1196struct be_cmd_req_set_lmode {
1197 struct be_cmd_req_hdr hdr;
1198 u8 src_port;
1199 u8 dest_port;
1200 u8 loopback_type;
1201 u8 loopback_state;
1202};
1203
1204struct be_cmd_resp_set_lmode {
1205 struct be_cmd_resp_hdr resp_hdr;
1206 u8 rsvd0[4];
1207};
1208
1209/********************** DDR DMA test *********************/
1210struct be_cmd_req_ddrdma_test {
1211 struct be_cmd_req_hdr hdr;
1212 u64 pattern;
1213 u32 byte_count;
1214 u32 rsvd0;
1215 u8 snd_buff[4096];
1216 u8 rsvd1[4096];
1217};
1218
1219struct be_cmd_resp_ddrdma_test {
1220 struct be_cmd_resp_hdr hdr;
1221 u64 pattern;
1222 u32 byte_cnt;
1223 u32 snd_err;
1224 u8 rsvd0[4096];
1225 u8 rcv_buff[4096];
1226};
1227
1228/*********************** SEEPROM Read ***********************/
1229
1230#define BE_READ_SEEPROM_LEN 1024
1231struct be_cmd_req_seeprom_read {
1232 struct be_cmd_req_hdr hdr;
1233 u8 rsvd0[BE_READ_SEEPROM_LEN];
1234};
1235
1236struct be_cmd_resp_seeprom_read {
1237 struct be_cmd_req_hdr hdr;
1238 u8 seeprom_data[BE_READ_SEEPROM_LEN];
1239};
1240
1241enum {
1242 PHY_TYPE_CX4_10GB = 0,
1243 PHY_TYPE_XFP_10GB,
1244 PHY_TYPE_SFP_1GB,
1245 PHY_TYPE_SFP_PLUS_10GB,
1246 PHY_TYPE_KR_10GB,
1247 PHY_TYPE_KX4_10GB,
1248 PHY_TYPE_BASET_10GB,
1249 PHY_TYPE_BASET_1GB,
1250 PHY_TYPE_DISABLED = 255
1251};
1252
1253struct be_cmd_req_get_phy_info {
1254 struct be_cmd_req_hdr hdr;
1255 u8 rsvd0[24];
1256};
1257struct be_cmd_resp_get_phy_info {
1258 struct be_cmd_req_hdr hdr;
1259 u16 phy_type;
1260 u16 interface_type;
1261 u32 misc_params;
1262 u32 future_use[4];
1263};
1264
1265/*********************** Set QOS ***********************/
1266
1267#define BE_QOS_BITS_NIC 1
1268
1269struct be_cmd_req_set_qos {
1270 struct be_cmd_req_hdr hdr;
1271 u32 valid_bits;
1272 u32 max_bps_nic;
1273 u32 rsvd[7];
1274};
1275
1276struct be_cmd_resp_set_qos {
1277 struct be_cmd_resp_hdr hdr;
1278 u32 rsvd;
1279};
1280
1281/*********************** Controller Attributes ***********************/
1282struct be_cmd_req_cntl_attribs {
1283 struct be_cmd_req_hdr hdr;
1284};
1285
1286struct be_cmd_resp_cntl_attribs {
1287 struct be_cmd_resp_hdr hdr;
1288 struct mgmt_controller_attrib attribs;
1289};
1290
1291/*********************** Set driver function ***********************/
1292#define CAPABILITY_SW_TIMESTAMPS 2
1293#define CAPABILITY_BE3_NATIVE_ERX_API 4
1294
1295struct be_cmd_req_set_func_cap {
1296 struct be_cmd_req_hdr hdr;
1297 u32 valid_cap_flags;
1298 u32 cap_flags;
1299 u8 rsvd[212];
1300};
1301
1302struct be_cmd_resp_set_func_cap {
1303 struct be_cmd_resp_hdr hdr;
1304 u32 valid_cap_flags;
1305 u32 cap_flags;
1306 u8 rsvd[212];
1307};
1308
1309/*************** HW Stats Get v1 **********************************/
1310#define BE_TXP_SW_SZ 48
1311struct be_port_rxf_stats_v1 {
1312 u32 rsvd0[12];
1313 u32 rx_crc_errors;
1314 u32 rx_alignment_symbol_errors;
1315 u32 rx_pause_frames;
1316 u32 rx_priority_pause_frames;
1317 u32 rx_control_frames;
1318 u32 rx_in_range_errors;
1319 u32 rx_out_range_errors;
1320 u32 rx_frame_too_long;
1321 u32 rx_address_match_errors;
1322 u32 rx_dropped_too_small;
1323 u32 rx_dropped_too_short;
1324 u32 rx_dropped_header_too_small;
1325 u32 rx_dropped_tcp_length;
1326 u32 rx_dropped_runt;
1327 u32 rsvd1[10];
1328 u32 rx_ip_checksum_errs;
1329 u32 rx_tcp_checksum_errs;
1330 u32 rx_udp_checksum_errs;
1331 u32 rsvd2[7];
1332 u32 rx_switched_unicast_packets;
1333 u32 rx_switched_multicast_packets;
1334 u32 rx_switched_broadcast_packets;
1335 u32 rsvd3[3];
1336 u32 tx_pauseframes;
1337 u32 tx_priority_pauseframes;
1338 u32 tx_controlframes;
1339 u32 rsvd4[10];
1340 u32 rxpp_fifo_overflow_drop;
1341 u32 rx_input_fifo_overflow_drop;
1342 u32 pmem_fifo_overflow_drop;
1343 u32 jabber_events;
1344 u32 rsvd5[3];
1345};
1346
1347
1348struct be_rxf_stats_v1 {
1349 struct be_port_rxf_stats_v1 port[4];
1350 u32 rsvd0[2];
1351 u32 rx_drops_no_pbuf;
1352 u32 rx_drops_no_txpb;
1353 u32 rx_drops_no_erx_descr;
1354 u32 rx_drops_no_tpre_descr;
1355 u32 rsvd1[6];
1356 u32 rx_drops_too_many_frags;
1357 u32 rx_drops_invalid_ring;
1358 u32 forwarded_packets;
1359 u32 rx_drops_mtu;
1360 u32 rsvd2[14];
1361};
1362
1363struct be_erx_stats_v1 {
1364 u32 rx_drops_no_fragments[68]; /* dwordS 0 to 67*/
1365 u32 rsvd[4];
1366};
1367
1368struct be_hw_stats_v1 {
1369 struct be_rxf_stats_v1 rxf;
1370 u32 rsvd0[BE_TXP_SW_SZ];
1371 struct be_erx_stats_v1 erx;
1372 struct be_pmem_stats pmem;
1373 u32 rsvd1[3];
1374};
1375
1376struct be_cmd_req_get_stats_v1 {
1377 struct be_cmd_req_hdr hdr;
1378 u8 rsvd[sizeof(struct be_hw_stats_v1)];
1379};
1380
1381struct be_cmd_resp_get_stats_v1 {
1382 struct be_cmd_resp_hdr hdr;
1383 struct be_hw_stats_v1 hw_stats;
1384};
1385
1386static inline void *
1387hw_stats_from_cmd(struct be_adapter *adapter)
1388{
1389 if (adapter->generation == BE_GEN3) {
1390 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
1391
1392 return &cmd->hw_stats;
1393 } else {
1394 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
1395
1396 return &cmd->hw_stats;
1397 }
1398}
1399
1400static inline void *be_port_rxf_stats_from_cmd(struct be_adapter *adapter)
1401{
1402 if (adapter->generation == BE_GEN3) {
1403 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
1404 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
1405
1406 return &rxf_stats->port[adapter->port_num];
1407 } else {
1408 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
1409 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
1410
1411 return &rxf_stats->port[adapter->port_num];
1412 }
1413}
1414
1415static inline void *be_rxf_stats_from_cmd(struct be_adapter *adapter)
1416{
1417 if (adapter->generation == BE_GEN3) {
1418 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
1419
1420 return &hw_stats->rxf;
1421 } else {
1422 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
1423
1424 return &hw_stats->rxf;
1425 }
1426}
1427
1428static inline void *be_erx_stats_from_cmd(struct be_adapter *adapter)
1429{
1430 if (adapter->generation == BE_GEN3) {
1431 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
1432
1433 return &hw_stats->erx;
1434 } else {
1435 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
1436
1437 return &hw_stats->erx;
1438 }
1439}
1440
1441static inline void *be_pmem_stats_from_cmd(struct be_adapter *adapter)
1442{
1443 if (adapter->generation == BE_GEN3) {
1444 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
1445
1446 return &hw_stats->pmem;
1447 } else {
1448 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
1449
1450 return &hw_stats->pmem;
1451 }
1452}
1453
1454extern int be_pci_fnum_get(struct be_adapter *adapter);
1455extern int be_cmd_POST(struct be_adapter *adapter);
1456extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
1457 u8 type, bool permanent, u32 if_handle);
1458extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
1459 u32 if_id, u32 *pmac_id, u32 domain);
1460extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id,
1461 u32 pmac_id, u32 domain);
1462extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
1463 u32 en_flags, u8 *mac, bool pmac_invalid,
1464 u32 *if_handle, u32 *pmac_id, u32 domain);
1465extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle,
1466 u32 domain);
1467extern int be_cmd_eq_create(struct be_adapter *adapter,
1468 struct be_queue_info *eq, int eq_delay);
1469extern int be_cmd_cq_create(struct be_adapter *adapter,
1470 struct be_queue_info *cq, struct be_queue_info *eq,
1471 bool sol_evts, bool no_delay,
1472 int num_cqe_dma_coalesce);
1473extern int be_cmd_mccq_create(struct be_adapter *adapter,
1474 struct be_queue_info *mccq,
1475 struct be_queue_info *cq);
1476extern int be_cmd_txq_create(struct be_adapter *adapter,
1477 struct be_queue_info *txq,
1478 struct be_queue_info *cq);
1479extern int be_cmd_rxq_create(struct be_adapter *adapter,
1480 struct be_queue_info *rxq, u16 cq_id,
1481 u16 frag_size, u16 max_frame_size, u32 if_id,
1482 u32 rss, u8 *rss_id);
1483extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1484 int type);
1485extern int be_cmd_rxq_destroy(struct be_adapter *adapter,
1486 struct be_queue_info *q);
1487extern int be_cmd_link_status_query(struct be_adapter *adapter,
1488 bool *link_up, u8 *mac_speed, u16 *link_speed, u32 dom);
1489extern int be_cmd_reset(struct be_adapter *adapter);
1490extern int be_cmd_get_stats(struct be_adapter *adapter,
1491 struct be_dma_mem *nonemb_cmd);
1492extern int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1493 struct be_dma_mem *nonemb_cmd);
1494extern int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver);
1495
1496extern int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd);
1497extern int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id,
1498 u16 *vtag_array, u32 num, bool untagged,
1499 bool promiscuous);
1500extern int be_cmd_promiscuous_config(struct be_adapter *adapter, bool en);
1501extern int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
1502 struct net_device *netdev, struct be_dma_mem *mem);
1503extern int be_cmd_set_flow_control(struct be_adapter *adapter,
1504 u32 tx_fc, u32 rx_fc);
1505extern int be_cmd_get_flow_control(struct be_adapter *adapter,
1506 u32 *tx_fc, u32 *rx_fc);
1507extern int be_cmd_query_fw_cfg(struct be_adapter *adapter,
1508 u32 *port_num, u32 *function_mode, u32 *function_caps);
1509extern int be_cmd_reset_function(struct be_adapter *adapter);
1510extern int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
1511 u16 table_size);
1512extern int be_process_mcc(struct be_adapter *adapter, int *status);
1513extern int be_cmd_set_beacon_state(struct be_adapter *adapter,
1514 u8 port_num, u8 beacon, u8 status, u8 state);
1515extern int be_cmd_get_beacon_state(struct be_adapter *adapter,
1516 u8 port_num, u32 *state);
1517extern int be_cmd_write_flashrom(struct be_adapter *adapter,
1518 struct be_dma_mem *cmd, u32 flash_oper,
1519 u32 flash_opcode, u32 buf_size);
1520extern int lancer_cmd_write_object(struct be_adapter *adapter,
1521 struct be_dma_mem *cmd,
1522 u32 data_size, u32 data_offset,
1523 const char *obj_name,
1524 u32 *data_written, u8 *addn_status);
1525int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
1526 int offset);
1527extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
1528 struct be_dma_mem *nonemb_cmd);
1529extern int be_cmd_fw_init(struct be_adapter *adapter);
1530extern int be_cmd_fw_clean(struct be_adapter *adapter);
1531extern void be_async_mcc_enable(struct be_adapter *adapter);
1532extern void be_async_mcc_disable(struct be_adapter *adapter);
1533extern int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
1534 u32 loopback_type, u32 pkt_size,
1535 u32 num_pkts, u64 pattern);
1536extern int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
1537 u32 byte_cnt, struct be_dma_mem *cmd);
1538extern int be_cmd_get_seeprom_data(struct be_adapter *adapter,
1539 struct be_dma_mem *nonemb_cmd);
1540extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
1541 u8 loopback_type, u8 enable);
1542extern int be_cmd_get_phy_info(struct be_adapter *adapter,
1543 struct be_dma_mem *cmd);
1544extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
1545extern void be_detect_dump_ue(struct be_adapter *adapter);
1546extern int be_cmd_get_die_temperature(struct be_adapter *adapter);
1547extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
1548extern int be_cmd_req_native_mode(struct be_adapter *adapter);
1549extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
1550extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
1551
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
new file mode 100644
index 00000000000..7fd8130d86e
--- /dev/null
+++ b/drivers/net/benet/be_ethtool.c
@@ -0,0 +1,746 @@
1/*
2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@emulex.com
12 *
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
16 */
17
18#include "be.h"
19#include "be_cmds.h"
20#include <linux/ethtool.h>
21
22struct be_ethtool_stat {
23 char desc[ETH_GSTRING_LEN];
24 int type;
25 int size;
26 int offset;
27};
28
29enum {NETSTAT, DRVSTAT_TX, DRVSTAT_RX, ERXSTAT,
30 DRVSTAT};
31#define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \
32 offsetof(_struct, field)
33#define NETSTAT_INFO(field) #field, NETSTAT,\
34 FIELDINFO(struct net_device_stats,\
35 field)
36#define DRVSTAT_TX_INFO(field) #field, DRVSTAT_TX,\
37 FIELDINFO(struct be_tx_stats, field)
38#define DRVSTAT_RX_INFO(field) #field, DRVSTAT_RX,\
39 FIELDINFO(struct be_rx_stats, field)
40#define ERXSTAT_INFO(field) #field, ERXSTAT,\
41 FIELDINFO(struct be_erx_stats_v1, field)
42#define DRVSTAT_INFO(field) #field, DRVSTAT,\
43 FIELDINFO(struct be_drv_stats, \
44 field)
45
46static const struct be_ethtool_stat et_stats[] = {
47 {NETSTAT_INFO(rx_packets)},
48 {NETSTAT_INFO(tx_packets)},
49 {NETSTAT_INFO(rx_bytes)},
50 {NETSTAT_INFO(tx_bytes)},
51 {NETSTAT_INFO(rx_errors)},
52 {NETSTAT_INFO(tx_errors)},
53 {NETSTAT_INFO(rx_dropped)},
54 {NETSTAT_INFO(tx_dropped)},
55 {DRVSTAT_INFO(be_tx_events)},
56 {DRVSTAT_INFO(rx_crc_errors)},
57 {DRVSTAT_INFO(rx_alignment_symbol_errors)},
58 {DRVSTAT_INFO(rx_pause_frames)},
59 {DRVSTAT_INFO(rx_control_frames)},
60 {DRVSTAT_INFO(rx_in_range_errors)},
61 {DRVSTAT_INFO(rx_out_range_errors)},
62 {DRVSTAT_INFO(rx_frame_too_long)},
63 {DRVSTAT_INFO(rx_address_match_errors)},
64 {DRVSTAT_INFO(rx_dropped_too_small)},
65 {DRVSTAT_INFO(rx_dropped_too_short)},
66 {DRVSTAT_INFO(rx_dropped_header_too_small)},
67 {DRVSTAT_INFO(rx_dropped_tcp_length)},
68 {DRVSTAT_INFO(rx_dropped_runt)},
69 {DRVSTAT_INFO(rxpp_fifo_overflow_drop)},
70 {DRVSTAT_INFO(rx_input_fifo_overflow_drop)},
71 {DRVSTAT_INFO(rx_ip_checksum_errs)},
72 {DRVSTAT_INFO(rx_tcp_checksum_errs)},
73 {DRVSTAT_INFO(rx_udp_checksum_errs)},
74 {DRVSTAT_INFO(rx_switched_unicast_packets)},
75 {DRVSTAT_INFO(rx_switched_multicast_packets)},
76 {DRVSTAT_INFO(rx_switched_broadcast_packets)},
77 {DRVSTAT_INFO(tx_pauseframes)},
78 {DRVSTAT_INFO(tx_controlframes)},
79 {DRVSTAT_INFO(rx_priority_pause_frames)},
80 {DRVSTAT_INFO(pmem_fifo_overflow_drop)},
81 {DRVSTAT_INFO(jabber_events)},
82 {DRVSTAT_INFO(rx_drops_no_pbuf)},
83 {DRVSTAT_INFO(rx_drops_no_txpb)},
84 {DRVSTAT_INFO(rx_drops_no_erx_descr)},
85 {DRVSTAT_INFO(rx_drops_no_tpre_descr)},
86 {DRVSTAT_INFO(rx_drops_too_many_frags)},
87 {DRVSTAT_INFO(rx_drops_invalid_ring)},
88 {DRVSTAT_INFO(forwarded_packets)},
89 {DRVSTAT_INFO(rx_drops_mtu)},
90 {DRVSTAT_INFO(eth_red_drops)},
91 {DRVSTAT_INFO(be_on_die_temperature)}
92};
93#define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
94
95/* Stats related to multi RX queues */
96static const struct be_ethtool_stat et_rx_stats[] = {
97 {DRVSTAT_RX_INFO(rx_bytes)},
98 {DRVSTAT_RX_INFO(rx_pkts)},
99 {DRVSTAT_RX_INFO(rx_rate)},
100 {DRVSTAT_RX_INFO(rx_polls)},
101 {DRVSTAT_RX_INFO(rx_events)},
102 {DRVSTAT_RX_INFO(rx_compl)},
103 {DRVSTAT_RX_INFO(rx_mcast_pkts)},
104 {DRVSTAT_RX_INFO(rx_post_fail)},
105 {DRVSTAT_RX_INFO(rx_dropped)},
106 {ERXSTAT_INFO(rx_drops_no_fragments)}
107};
108#define ETHTOOL_RXSTATS_NUM (ARRAY_SIZE(et_rx_stats))
109
110/* Stats related to multi TX queues */
111static const struct be_ethtool_stat et_tx_stats[] = {
112 {DRVSTAT_TX_INFO(be_tx_rate)},
113 {DRVSTAT_TX_INFO(be_tx_reqs)},
114 {DRVSTAT_TX_INFO(be_tx_wrbs)},
115 {DRVSTAT_TX_INFO(be_tx_stops)},
116 {DRVSTAT_TX_INFO(be_tx_compl)}
117};
118#define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats))
119
120static const char et_self_tests[][ETH_GSTRING_LEN] = {
121 "MAC Loopback test",
122 "PHY Loopback test",
123 "External Loopback test",
124 "DDR DMA test",
125 "Link test"
126};
127
128#define ETHTOOL_TESTS_NUM ARRAY_SIZE(et_self_tests)
129#define BE_MAC_LOOPBACK 0x0
130#define BE_PHY_LOOPBACK 0x1
131#define BE_ONE_PORT_EXT_LOOPBACK 0x2
132#define BE_NO_LOOPBACK 0xff
133
134static void
135be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
136{
137 struct be_adapter *adapter = netdev_priv(netdev);
138
139 strcpy(drvinfo->driver, DRV_NAME);
140 strcpy(drvinfo->version, DRV_VER);
141 strncpy(drvinfo->fw_version, adapter->fw_ver, FW_VER_LEN);
142 strcpy(drvinfo->bus_info, pci_name(adapter->pdev));
143 drvinfo->testinfo_len = 0;
144 drvinfo->regdump_len = 0;
145 drvinfo->eedump_len = 0;
146}
147
148static int
149be_get_reg_len(struct net_device *netdev)
150{
151 struct be_adapter *adapter = netdev_priv(netdev);
152 u32 log_size = 0;
153
154 if (be_physfn(adapter))
155 be_cmd_get_reg_len(adapter, &log_size);
156
157 return log_size;
158}
159
160static void
161be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
162{
163 struct be_adapter *adapter = netdev_priv(netdev);
164
165 if (be_physfn(adapter)) {
166 memset(buf, 0, regs->len);
167 be_cmd_get_regs(adapter, regs->len, buf);
168 }
169}
170
171static int
172be_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
173{
174 struct be_adapter *adapter = netdev_priv(netdev);
175 struct be_eq_obj *rx_eq = &adapter->rx_obj[0].rx_eq;
176 struct be_eq_obj *tx_eq = &adapter->tx_eq;
177
178 coalesce->rx_coalesce_usecs = rx_eq->cur_eqd;
179 coalesce->rx_coalesce_usecs_high = rx_eq->max_eqd;
180 coalesce->rx_coalesce_usecs_low = rx_eq->min_eqd;
181
182 coalesce->tx_coalesce_usecs = tx_eq->cur_eqd;
183 coalesce->tx_coalesce_usecs_high = tx_eq->max_eqd;
184 coalesce->tx_coalesce_usecs_low = tx_eq->min_eqd;
185
186 coalesce->use_adaptive_rx_coalesce = rx_eq->enable_aic;
187 coalesce->use_adaptive_tx_coalesce = tx_eq->enable_aic;
188
189 return 0;
190}
191
192/*
193 * This routine is used to set interrup coalescing delay
194 */
195static int
196be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
197{
198 struct be_adapter *adapter = netdev_priv(netdev);
199 struct be_rx_obj *rxo;
200 struct be_eq_obj *rx_eq;
201 struct be_eq_obj *tx_eq = &adapter->tx_eq;
202 u32 rx_max, rx_min, rx_cur;
203 int status = 0, i;
204 u32 tx_cur;
205
206 if (coalesce->use_adaptive_tx_coalesce == 1)
207 return -EINVAL;
208
209 for_all_rx_queues(adapter, rxo, i) {
210 rx_eq = &rxo->rx_eq;
211
212 if (!rx_eq->enable_aic && coalesce->use_adaptive_rx_coalesce)
213 rx_eq->cur_eqd = 0;
214 rx_eq->enable_aic = coalesce->use_adaptive_rx_coalesce;
215
216 rx_max = coalesce->rx_coalesce_usecs_high;
217 rx_min = coalesce->rx_coalesce_usecs_low;
218 rx_cur = coalesce->rx_coalesce_usecs;
219
220 if (rx_eq->enable_aic) {
221 if (rx_max > BE_MAX_EQD)
222 rx_max = BE_MAX_EQD;
223 if (rx_min > rx_max)
224 rx_min = rx_max;
225 rx_eq->max_eqd = rx_max;
226 rx_eq->min_eqd = rx_min;
227 if (rx_eq->cur_eqd > rx_max)
228 rx_eq->cur_eqd = rx_max;
229 if (rx_eq->cur_eqd < rx_min)
230 rx_eq->cur_eqd = rx_min;
231 } else {
232 if (rx_cur > BE_MAX_EQD)
233 rx_cur = BE_MAX_EQD;
234 if (rx_eq->cur_eqd != rx_cur) {
235 status = be_cmd_modify_eqd(adapter, rx_eq->q.id,
236 rx_cur);
237 if (!status)
238 rx_eq->cur_eqd = rx_cur;
239 }
240 }
241 }
242
243 tx_cur = coalesce->tx_coalesce_usecs;
244
245 if (tx_cur > BE_MAX_EQD)
246 tx_cur = BE_MAX_EQD;
247 if (tx_eq->cur_eqd != tx_cur) {
248 status = be_cmd_modify_eqd(adapter, tx_eq->q.id, tx_cur);
249 if (!status)
250 tx_eq->cur_eqd = tx_cur;
251 }
252
253 return 0;
254}
255
256static void
257be_get_ethtool_stats(struct net_device *netdev,
258 struct ethtool_stats *stats, uint64_t *data)
259{
260 struct be_adapter *adapter = netdev_priv(netdev);
261 struct be_rx_obj *rxo;
262 struct be_tx_obj *txo;
263 void *p = NULL;
264 int i, j, base;
265
266 for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
267 switch (et_stats[i].type) {
268 case NETSTAT:
269 p = &netdev->stats;
270 break;
271 case DRVSTAT:
272 p = &adapter->drv_stats;
273 break;
274 }
275
276 p = (u8 *)p + et_stats[i].offset;
277 data[i] = (et_stats[i].size == sizeof(u64)) ?
278 *(u64 *)p: *(u32 *)p;
279 }
280
281 base = ETHTOOL_STATS_NUM;
282 for_all_rx_queues(adapter, rxo, j) {
283 for (i = 0; i < ETHTOOL_RXSTATS_NUM; i++) {
284 switch (et_rx_stats[i].type) {
285 case DRVSTAT_RX:
286 p = (u8 *)&rxo->stats + et_rx_stats[i].offset;
287 break;
288 case ERXSTAT:
289 p = (u32 *)be_erx_stats_from_cmd(adapter) +
290 rxo->q.id;
291 break;
292 }
293 data[base + j * ETHTOOL_RXSTATS_NUM + i] =
294 (et_rx_stats[i].size == sizeof(u64)) ?
295 *(u64 *)p: *(u32 *)p;
296 }
297 }
298
299 base = ETHTOOL_STATS_NUM + adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM;
300 for_all_tx_queues(adapter, txo, j) {
301 for (i = 0; i < ETHTOOL_TXSTATS_NUM; i++) {
302 p = (u8 *)&txo->stats + et_tx_stats[i].offset;
303 data[base + j * ETHTOOL_TXSTATS_NUM + i] =
304 (et_tx_stats[i].size == sizeof(u64)) ?
305 *(u64 *)p: *(u32 *)p;
306 }
307 }
308}
309
310static void
311be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
312 uint8_t *data)
313{
314 struct be_adapter *adapter = netdev_priv(netdev);
315 int i, j;
316
317 switch (stringset) {
318 case ETH_SS_STATS:
319 for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
320 memcpy(data, et_stats[i].desc, ETH_GSTRING_LEN);
321 data += ETH_GSTRING_LEN;
322 }
323 for (i = 0; i < adapter->num_rx_qs; i++) {
324 for (j = 0; j < ETHTOOL_RXSTATS_NUM; j++) {
325 sprintf(data, "rxq%d: %s", i,
326 et_rx_stats[j].desc);
327 data += ETH_GSTRING_LEN;
328 }
329 }
330 for (i = 0; i < adapter->num_tx_qs; i++) {
331 for (j = 0; j < ETHTOOL_TXSTATS_NUM; j++) {
332 sprintf(data, "txq%d: %s", i,
333 et_tx_stats[j].desc);
334 data += ETH_GSTRING_LEN;
335 }
336 }
337 break;
338 case ETH_SS_TEST:
339 for (i = 0; i < ETHTOOL_TESTS_NUM; i++) {
340 memcpy(data, et_self_tests[i], ETH_GSTRING_LEN);
341 data += ETH_GSTRING_LEN;
342 }
343 break;
344 }
345}
346
347static int be_get_sset_count(struct net_device *netdev, int stringset)
348{
349 struct be_adapter *adapter = netdev_priv(netdev);
350
351 switch (stringset) {
352 case ETH_SS_TEST:
353 return ETHTOOL_TESTS_NUM;
354 case ETH_SS_STATS:
355 return ETHTOOL_STATS_NUM +
356 adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM +
357 adapter->num_tx_qs * ETHTOOL_TXSTATS_NUM;
358 default:
359 return -EINVAL;
360 }
361}
362
363static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
364{
365 struct be_adapter *adapter = netdev_priv(netdev);
366 struct be_dma_mem phy_cmd;
367 struct be_cmd_resp_get_phy_info *resp;
368 u8 mac_speed = 0;
369 u16 link_speed = 0;
370 bool link_up = false;
371 int status;
372 u16 intf_type;
373
374 if ((adapter->link_speed < 0) || (!(netdev->flags & IFF_UP))) {
375 status = be_cmd_link_status_query(adapter, &link_up,
376 &mac_speed, &link_speed, 0);
377
378 be_link_status_update(adapter, link_up);
379 /* link_speed is in units of 10 Mbps */
380 if (link_speed) {
381 ethtool_cmd_speed_set(ecmd, link_speed*10);
382 } else {
383 switch (mac_speed) {
384 case PHY_LINK_SPEED_10MBPS:
385 ethtool_cmd_speed_set(ecmd, SPEED_10);
386 break;
387 case PHY_LINK_SPEED_100MBPS:
388 ethtool_cmd_speed_set(ecmd, SPEED_100);
389 break;
390 case PHY_LINK_SPEED_1GBPS:
391 ethtool_cmd_speed_set(ecmd, SPEED_1000);
392 break;
393 case PHY_LINK_SPEED_10GBPS:
394 ethtool_cmd_speed_set(ecmd, SPEED_10000);
395 break;
396 case PHY_LINK_SPEED_ZERO:
397 ethtool_cmd_speed_set(ecmd, 0);
398 break;
399 }
400 }
401
402 phy_cmd.size = sizeof(struct be_cmd_req_get_phy_info);
403 phy_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
404 phy_cmd.size, &phy_cmd.dma,
405 GFP_KERNEL);
406 if (!phy_cmd.va) {
407 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
408 return -ENOMEM;
409 }
410 status = be_cmd_get_phy_info(adapter, &phy_cmd);
411 if (!status) {
412 resp = phy_cmd.va;
413 intf_type = le16_to_cpu(resp->interface_type);
414
415 switch (intf_type) {
416 case PHY_TYPE_XFP_10GB:
417 case PHY_TYPE_SFP_1GB:
418 case PHY_TYPE_SFP_PLUS_10GB:
419 ecmd->port = PORT_FIBRE;
420 break;
421 default:
422 ecmd->port = PORT_TP;
423 break;
424 }
425
426 switch (intf_type) {
427 case PHY_TYPE_KR_10GB:
428 case PHY_TYPE_KX4_10GB:
429 ecmd->autoneg = AUTONEG_ENABLE;
430 ecmd->transceiver = XCVR_INTERNAL;
431 break;
432 default:
433 ecmd->autoneg = AUTONEG_DISABLE;
434 ecmd->transceiver = XCVR_EXTERNAL;
435 break;
436 }
437 }
438
439 /* Save for future use */
440 adapter->link_speed = ethtool_cmd_speed(ecmd);
441 adapter->port_type = ecmd->port;
442 adapter->transceiver = ecmd->transceiver;
443 adapter->autoneg = ecmd->autoneg;
444 dma_free_coherent(&adapter->pdev->dev, phy_cmd.size, phy_cmd.va,
445 phy_cmd.dma);
446 } else {
447 ethtool_cmd_speed_set(ecmd, adapter->link_speed);
448 ecmd->port = adapter->port_type;
449 ecmd->transceiver = adapter->transceiver;
450 ecmd->autoneg = adapter->autoneg;
451 }
452
453 ecmd->duplex = DUPLEX_FULL;
454 ecmd->phy_address = adapter->port_num;
455 switch (ecmd->port) {
456 case PORT_FIBRE:
457 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
458 break;
459 case PORT_TP:
460 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP);
461 break;
462 case PORT_AUI:
463 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_AUI);
464 break;
465 }
466
467 if (ecmd->autoneg) {
468 ecmd->supported |= SUPPORTED_1000baseT_Full;
469 ecmd->supported |= SUPPORTED_Autoneg;
470 ecmd->advertising |= (ADVERTISED_10000baseT_Full |
471 ADVERTISED_1000baseT_Full);
472 }
473
474 return 0;
475}
476
477static void
478be_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
479{
480 struct be_adapter *adapter = netdev_priv(netdev);
481
482 ring->rx_max_pending = adapter->rx_obj[0].q.len;
483 ring->tx_max_pending = adapter->tx_obj[0].q.len;
484
485 ring->rx_pending = atomic_read(&adapter->rx_obj[0].q.used);
486 ring->tx_pending = atomic_read(&adapter->tx_obj[0].q.used);
487}
488
489static void
490be_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
491{
492 struct be_adapter *adapter = netdev_priv(netdev);
493
494 be_cmd_get_flow_control(adapter, &ecmd->tx_pause, &ecmd->rx_pause);
495 ecmd->autoneg = 0;
496}
497
498static int
499be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
500{
501 struct be_adapter *adapter = netdev_priv(netdev);
502 int status;
503
504 if (ecmd->autoneg != 0)
505 return -EINVAL;
506 adapter->tx_fc = ecmd->tx_pause;
507 adapter->rx_fc = ecmd->rx_pause;
508
509 status = be_cmd_set_flow_control(adapter,
510 adapter->tx_fc, adapter->rx_fc);
511 if (status)
512 dev_warn(&adapter->pdev->dev, "Pause param set failed.\n");
513
514 return status;
515}
516
517static int
518be_set_phys_id(struct net_device *netdev,
519 enum ethtool_phys_id_state state)
520{
521 struct be_adapter *adapter = netdev_priv(netdev);
522
523 switch (state) {
524 case ETHTOOL_ID_ACTIVE:
525 be_cmd_get_beacon_state(adapter, adapter->hba_port_num,
526 &adapter->beacon_state);
527 return 1; /* cycle on/off once per second */
528
529 case ETHTOOL_ID_ON:
530 be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
531 BEACON_STATE_ENABLED);
532 break;
533
534 case ETHTOOL_ID_OFF:
535 be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
536 BEACON_STATE_DISABLED);
537 break;
538
539 case ETHTOOL_ID_INACTIVE:
540 be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
541 adapter->beacon_state);
542 }
543
544 return 0;
545}
546
547static bool
548be_is_wol_supported(struct be_adapter *adapter)
549{
550 if (!be_physfn(adapter))
551 return false;
552 else
553 return true;
554}
555
556static void
557be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
558{
559 struct be_adapter *adapter = netdev_priv(netdev);
560
561 if (be_is_wol_supported(adapter))
562 wol->supported = WAKE_MAGIC;
563
564 if (adapter->wol)
565 wol->wolopts = WAKE_MAGIC;
566 else
567 wol->wolopts = 0;
568 memset(&wol->sopass, 0, sizeof(wol->sopass));
569}
570
571static int
572be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
573{
574 struct be_adapter *adapter = netdev_priv(netdev);
575
576 if (wol->wolopts & ~WAKE_MAGIC)
577 return -EINVAL;
578
579 if ((wol->wolopts & WAKE_MAGIC) && be_is_wol_supported(adapter))
580 adapter->wol = true;
581 else
582 adapter->wol = false;
583
584 return 0;
585}
586
587static int
588be_test_ddr_dma(struct be_adapter *adapter)
589{
590 int ret, i;
591 struct be_dma_mem ddrdma_cmd;
592 static const u64 pattern[2] = {
593 0x5a5a5a5a5a5a5a5aULL, 0xa5a5a5a5a5a5a5a5ULL
594 };
595
596 ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
597 ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size,
598 &ddrdma_cmd.dma, GFP_KERNEL);
599 if (!ddrdma_cmd.va) {
600 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
601 return -ENOMEM;
602 }
603
604 for (i = 0; i < 2; i++) {
605 ret = be_cmd_ddr_dma_test(adapter, pattern[i],
606 4096, &ddrdma_cmd);
607 if (ret != 0)
608 goto err;
609 }
610
611err:
612 dma_free_coherent(&adapter->pdev->dev, ddrdma_cmd.size, ddrdma_cmd.va,
613 ddrdma_cmd.dma);
614 return ret;
615}
616
617static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
618 u64 *status)
619{
620 be_cmd_set_loopback(adapter, adapter->hba_port_num,
621 loopback_type, 1);
622 *status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
623 loopback_type, 1500,
624 2, 0xabc);
625 be_cmd_set_loopback(adapter, adapter->hba_port_num,
626 BE_NO_LOOPBACK, 1);
627 return *status;
628}
629
630static void
631be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
632{
633 struct be_adapter *adapter = netdev_priv(netdev);
634 bool link_up;
635 u8 mac_speed = 0;
636 u16 qos_link_speed = 0;
637
638 memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
639
640 if (test->flags & ETH_TEST_FL_OFFLINE) {
641 if (be_loopback_test(adapter, BE_MAC_LOOPBACK,
642 &data[0]) != 0) {
643 test->flags |= ETH_TEST_FL_FAILED;
644 }
645 if (be_loopback_test(adapter, BE_PHY_LOOPBACK,
646 &data[1]) != 0) {
647 test->flags |= ETH_TEST_FL_FAILED;
648 }
649 if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK,
650 &data[2]) != 0) {
651 test->flags |= ETH_TEST_FL_FAILED;
652 }
653 }
654
655 if (be_test_ddr_dma(adapter) != 0) {
656 data[3] = 1;
657 test->flags |= ETH_TEST_FL_FAILED;
658 }
659
660 if (be_cmd_link_status_query(adapter, &link_up, &mac_speed,
661 &qos_link_speed, 0) != 0) {
662 test->flags |= ETH_TEST_FL_FAILED;
663 data[4] = -1;
664 } else if (!mac_speed) {
665 test->flags |= ETH_TEST_FL_FAILED;
666 data[4] = 1;
667 }
668}
669
670static int
671be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
672{
673 struct be_adapter *adapter = netdev_priv(netdev);
674 char file_name[ETHTOOL_FLASH_MAX_FILENAME];
675
676 file_name[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0;
677 strcpy(file_name, efl->data);
678
679 return be_load_fw(adapter, file_name);
680}
681
682static int
683be_get_eeprom_len(struct net_device *netdev)
684{
685 return BE_READ_SEEPROM_LEN;
686}
687
688static int
689be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
690 uint8_t *data)
691{
692 struct be_adapter *adapter = netdev_priv(netdev);
693 struct be_dma_mem eeprom_cmd;
694 struct be_cmd_resp_seeprom_read *resp;
695 int status;
696
697 if (!eeprom->len)
698 return -EINVAL;
699
700 eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);
701
702 memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
703 eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
704 eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size,
705 &eeprom_cmd.dma, GFP_KERNEL);
706
707 if (!eeprom_cmd.va) {
708 dev_err(&adapter->pdev->dev,
709 "Memory allocation failure. Could not read eeprom\n");
710 return -ENOMEM;
711 }
712
713 status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd);
714
715 if (!status) {
716 resp = eeprom_cmd.va;
717 memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len);
718 }
719 dma_free_coherent(&adapter->pdev->dev, eeprom_cmd.size, eeprom_cmd.va,
720 eeprom_cmd.dma);
721
722 return status;
723}
724
725const struct ethtool_ops be_ethtool_ops = {
726 .get_settings = be_get_settings,
727 .get_drvinfo = be_get_drvinfo,
728 .get_wol = be_get_wol,
729 .set_wol = be_set_wol,
730 .get_link = ethtool_op_get_link,
731 .get_eeprom_len = be_get_eeprom_len,
732 .get_eeprom = be_read_eeprom,
733 .get_coalesce = be_get_coalesce,
734 .set_coalesce = be_set_coalesce,
735 .get_ringparam = be_get_ringparam,
736 .get_pauseparam = be_get_pauseparam,
737 .set_pauseparam = be_set_pauseparam,
738 .get_strings = be_get_stat_strings,
739 .set_phys_id = be_set_phys_id,
740 .get_sset_count = be_get_sset_count,
741 .get_ethtool_stats = be_get_ethtool_stats,
742 .get_regs_len = be_get_reg_len,
743 .get_regs = be_get_regs,
744 .flash_device = be_do_flash,
745 .self_test = be_self_test,
746};
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
new file mode 100644
index 00000000000..53d658afea2
--- /dev/null
+++ b/drivers/net/benet/be_hw.h
@@ -0,0 +1,503 @@
1/*
2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@emulex.com
12 *
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
16 */
17
18/********* Mailbox door bell *************/
19/* Used for driver communication with the FW.
20 * The software must write this register twice to post any command. First,
21 * it writes the register with hi=1 and the upper bits of the physical address
22 * for the MAILBOX structure. Software must poll the ready bit until this
23 * is acknowledged. Then, sotware writes the register with hi=0 with the lower
24 * bits in the address. It must poll the ready bit until the command is
25 * complete. Upon completion, the MAILBOX will contain a valid completion
26 * queue entry.
27 */
28#define MPU_MAILBOX_DB_OFFSET 0x160
29#define MPU_MAILBOX_DB_RDY_MASK 0x1 /* bit 0 */
30#define MPU_MAILBOX_DB_HI_MASK 0x2 /* bit 1 */
31
32#define MPU_EP_CONTROL 0
33
34/********** MPU semphore ******************/
35#define MPU_EP_SEMAPHORE_OFFSET 0xac
36#define MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET 0x400
37#define EP_SEMAPHORE_POST_STAGE_MASK 0x0000FFFF
38#define EP_SEMAPHORE_POST_ERR_MASK 0x1
39#define EP_SEMAPHORE_POST_ERR_SHIFT 31
40
41/* MPU semphore POST stage values */
42#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */
43#define POST_STAGE_HOST_RDY 0x2 /* Host has given go-ahed to FW */
44#define POST_STAGE_BE_RESET 0x3 /* Host wants to reset chip */
45#define POST_STAGE_ARMFW_RDY 0xc000 /* FW is done with POST */
46
47
48/* Lancer SLIPORT_CONTROL SLIPORT_STATUS registers */
49#define SLIPORT_STATUS_OFFSET 0x404
50#define SLIPORT_CONTROL_OFFSET 0x408
51
52#define SLIPORT_STATUS_ERR_MASK 0x80000000
53#define SLIPORT_STATUS_RN_MASK 0x01000000
54#define SLIPORT_STATUS_RDY_MASK 0x00800000
55
56
57#define SLI_PORT_CONTROL_IP_MASK 0x08000000
58
59/********* Memory BAR register ************/
60#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
61/* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt
62 * Disable" may still globally block interrupts in addition to individual
63 * interrupt masks; a mechanism for the device driver to block all interrupts
64 * atomically without having to arbitrate for the PCI Interrupt Disable bit
65 * with the OS.
66 */
67#define MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK (1 << 29) /* bit 29 */
68
69/********* Power management (WOL) **********/
70#define PCICFG_PM_CONTROL_OFFSET 0x44
71#define PCICFG_PM_CONTROL_MASK 0x108 /* bits 3 & 8 */
72
73/********* Online Control Registers *******/
74#define PCICFG_ONLINE0 0xB0
75#define PCICFG_ONLINE1 0xB4
76
77/********* UE Status and Mask Registers ***/
78#define PCICFG_UE_STATUS_LOW 0xA0
79#define PCICFG_UE_STATUS_HIGH 0xA4
80#define PCICFG_UE_STATUS_LOW_MASK 0xA8
81#define PCICFG_UE_STATUS_HI_MASK 0xAC
82
83/******** SLI_INTF ***********************/
84#define SLI_INTF_REG_OFFSET 0x58
85#define SLI_INTF_VALID_MASK 0xE0000000
86#define SLI_INTF_VALID 0xC0000000
87#define SLI_INTF_HINT2_MASK 0x1F000000
88#define SLI_INTF_HINT2_SHIFT 24
89#define SLI_INTF_HINT1_MASK 0x00FF0000
90#define SLI_INTF_HINT1_SHIFT 16
91#define SLI_INTF_FAMILY_MASK 0x00000F00
92#define SLI_INTF_FAMILY_SHIFT 8
93#define SLI_INTF_IF_TYPE_MASK 0x0000F000
94#define SLI_INTF_IF_TYPE_SHIFT 12
95#define SLI_INTF_REV_MASK 0x000000F0
96#define SLI_INTF_REV_SHIFT 4
97#define SLI_INTF_FT_MASK 0x00000001
98
99
100/* SLI family */
101#define BE_SLI_FAMILY 0x0
102#define LANCER_A0_SLI_FAMILY 0xA
103
104
105/********* ISR0 Register offset **********/
106#define CEV_ISR0_OFFSET 0xC18
107#define CEV_ISR_SIZE 4
108
109/********* Event Q door bell *************/
110#define DB_EQ_OFFSET DB_CQ_OFFSET
111#define DB_EQ_RING_ID_MASK 0x1FF /* bits 0 - 8 */
112#define DB_EQ_RING_ID_EXT_MASK 0x3e00 /* bits 9-13 */
113#define DB_EQ_RING_ID_EXT_MASK_SHIFT (2) /* qid bits 9-13 placing at 11-15 */
114
115/* Clear the interrupt for this eq */
116#define DB_EQ_CLR_SHIFT (9) /* bit 9 */
117/* Must be 1 */
118#define DB_EQ_EVNT_SHIFT (10) /* bit 10 */
119/* Number of event entries processed */
120#define DB_EQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
121/* Rearm bit */
122#define DB_EQ_REARM_SHIFT (29) /* bit 29 */
123
124/********* Compl Q door bell *************/
125#define DB_CQ_OFFSET 0x120
126#define DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
127#define DB_CQ_RING_ID_EXT_MASK 0x7C00 /* bits 10-14 */
128#define DB_CQ_RING_ID_EXT_MASK_SHIFT (1) /* qid bits 10-14
129 placing at 11-15 */
130
131/* Number of event entries processed */
132#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
133/* Rearm bit */
134#define DB_CQ_REARM_SHIFT (29) /* bit 29 */
135
136/********** TX ULP door bell *************/
137#define DB_TXULP1_OFFSET 0x60
138#define DB_TXULP_RING_ID_MASK 0x7FF /* bits 0 - 10 */
139/* Number of tx entries posted */
140#define DB_TXULP_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */
141#define DB_TXULP_NUM_POSTED_MASK 0x3FFF /* bits 16 - 29 */
142
143/********** RQ(erx) door bell ************/
144#define DB_RQ_OFFSET 0x100
145#define DB_RQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
146/* Number of rx frags posted */
147#define DB_RQ_NUM_POSTED_SHIFT (24) /* bits 24 - 31 */
148
149/********** MCC door bell ************/
150#define DB_MCCQ_OFFSET 0x140
151#define DB_MCCQ_RING_ID_MASK 0x7FF /* bits 0 - 10 */
152/* Number of entries posted */
153#define DB_MCCQ_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */
154
155/********** SRIOV VF PCICFG OFFSET ********/
156#define SRIOV_VF_PCICFG_OFFSET (4096)
157
158/********** FAT TABLE ********/
159#define RETRIEVE_FAT 0
160#define QUERY_FAT 1
161
162/* Flashrom related descriptors */
163#define IMAGE_TYPE_FIRMWARE 160
164#define IMAGE_TYPE_BOOTCODE 224
165#define IMAGE_TYPE_OPTIONROM 32
166
167#define NUM_FLASHDIR_ENTRIES 32
168
169#define IMG_TYPE_ISCSI_ACTIVE 0
170#define IMG_TYPE_REDBOOT 1
171#define IMG_TYPE_BIOS 2
172#define IMG_TYPE_PXE_BIOS 3
173#define IMG_TYPE_FCOE_BIOS 8
174#define IMG_TYPE_ISCSI_BACKUP 9
175#define IMG_TYPE_FCOE_FW_ACTIVE 10
176#define IMG_TYPE_FCOE_FW_BACKUP 11
177#define IMG_TYPE_NCSI_FW 13
178
179#define FLASHROM_OPER_FLASH 1
180#define FLASHROM_OPER_SAVE 2
181#define FLASHROM_OPER_REPORT 4
182
183#define FLASH_IMAGE_MAX_SIZE_g2 (1310720) /* Max firmware image sz */
184#define FLASH_BIOS_IMAGE_MAX_SIZE_g2 (262144) /* Max OPTION ROM img sz */
185#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g2 (262144) /* Max Redboot image sz */
186#define FLASH_IMAGE_MAX_SIZE_g3 (2097152) /* Max fw image size */
187#define FLASH_BIOS_IMAGE_MAX_SIZE_g3 (524288) /* Max OPTION ROM img sz */
188#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g3 (1048576) /* Max Redboot image sz */
189#define FLASH_NCSI_IMAGE_MAX_SIZE_g3 (262144) /* Max NSCI image sz */
190
191#define FLASH_NCSI_MAGIC (0x16032009)
192#define FLASH_NCSI_DISABLED (0)
193#define FLASH_NCSI_ENABLED (1)
194
195#define FLASH_NCSI_BITFILE_HDR_OFFSET (0x600000)
196
197/* Offsets for components on Flash. */
198#define FLASH_iSCSI_PRIMARY_IMAGE_START_g2 (1048576)
199#define FLASH_iSCSI_BACKUP_IMAGE_START_g2 (2359296)
200#define FLASH_FCoE_PRIMARY_IMAGE_START_g2 (3670016)
201#define FLASH_FCoE_BACKUP_IMAGE_START_g2 (4980736)
202#define FLASH_iSCSI_BIOS_START_g2 (7340032)
203#define FLASH_PXE_BIOS_START_g2 (7864320)
204#define FLASH_FCoE_BIOS_START_g2 (524288)
205#define FLASH_REDBOOT_START_g2 (0)
206
207#define FLASH_NCSI_START_g3 (15990784)
208#define FLASH_iSCSI_PRIMARY_IMAGE_START_g3 (2097152)
209#define FLASH_iSCSI_BACKUP_IMAGE_START_g3 (4194304)
210#define FLASH_FCoE_PRIMARY_IMAGE_START_g3 (6291456)
211#define FLASH_FCoE_BACKUP_IMAGE_START_g3 (8388608)
212#define FLASH_iSCSI_BIOS_START_g3 (12582912)
213#define FLASH_PXE_BIOS_START_g3 (13107200)
214#define FLASH_FCoE_BIOS_START_g3 (13631488)
215#define FLASH_REDBOOT_START_g3 (262144)
216
217/************* Rx Packet Type Encoding **************/
218#define BE_UNICAST_PACKET 0
219#define BE_MULTICAST_PACKET 1
220#define BE_BROADCAST_PACKET 2
221#define BE_RSVD_PACKET 3
222
223/*
224 * BE descriptors: host memory data structures whose formats
225 * are hardwired in BE silicon.
226 */
227/* Event Queue Descriptor */
228#define EQ_ENTRY_VALID_MASK 0x1 /* bit 0 */
229#define EQ_ENTRY_RES_ID_MASK 0xFFFF /* bits 16 - 31 */
230#define EQ_ENTRY_RES_ID_SHIFT 16
231
232struct be_eq_entry {
233 u32 evt;
234};
235
236/* TX Queue Descriptor */
237#define ETH_WRB_FRAG_LEN_MASK 0xFFFF
238struct be_eth_wrb {
239 u32 frag_pa_hi; /* dword 0 */
240 u32 frag_pa_lo; /* dword 1 */
241 u32 rsvd0; /* dword 2 */
242 u32 frag_len; /* dword 3: bits 0 - 15 */
243} __packed;
244
245/* Pseudo amap definition for eth_hdr_wrb in which each bit of the
246 * actual structure is defined as a byte : used to calculate
247 * offset/shift/mask of each field */
248struct amap_eth_hdr_wrb {
249 u8 rsvd0[32]; /* dword 0 */
250 u8 rsvd1[32]; /* dword 1 */
251 u8 complete; /* dword 2 */
252 u8 event;
253 u8 crc;
254 u8 forward;
255 u8 lso6;
256 u8 mgmt;
257 u8 ipcs;
258 u8 udpcs;
259 u8 tcpcs;
260 u8 lso;
261 u8 vlan;
262 u8 gso[2];
263 u8 num_wrb[5];
264 u8 lso_mss[14];
265 u8 len[16]; /* dword 3 */
266 u8 vlan_tag[16];
267} __packed;
268
269struct be_eth_hdr_wrb {
270 u32 dw[4];
271};
272
273/* TX Compl Queue Descriptor */
274
275/* Pseudo amap definition for eth_tx_compl in which each bit of the
276 * actual structure is defined as a byte: used to calculate
277 * offset/shift/mask of each field */
278struct amap_eth_tx_compl {
279 u8 wrb_index[16]; /* dword 0 */
280 u8 ct[2]; /* dword 0 */
281 u8 port[2]; /* dword 0 */
282 u8 rsvd0[8]; /* dword 0 */
283 u8 status[4]; /* dword 0 */
284 u8 user_bytes[16]; /* dword 1 */
285 u8 nwh_bytes[8]; /* dword 1 */
286 u8 lso; /* dword 1 */
287 u8 cast_enc[2]; /* dword 1 */
288 u8 rsvd1[5]; /* dword 1 */
289 u8 rsvd2[32]; /* dword 2 */
290 u8 pkts[16]; /* dword 3 */
291 u8 ringid[11]; /* dword 3 */
292 u8 hash_val[4]; /* dword 3 */
293 u8 valid; /* dword 3 */
294} __packed;
295
296struct be_eth_tx_compl {
297 u32 dw[4];
298};
299
300/* RX Queue Descriptor */
301struct be_eth_rx_d {
302 u32 fragpa_hi;
303 u32 fragpa_lo;
304};
305
306/* RX Compl Queue Descriptor */
307
308/* Pseudo amap definition for BE2 and BE3 legacy mode eth_rx_compl in which
309 * each bit of the actual structure is defined as a byte: used to calculate
310 * offset/shift/mask of each field */
311struct amap_eth_rx_compl_v0 {
312 u8 vlan_tag[16]; /* dword 0 */
313 u8 pktsize[14]; /* dword 0 */
314 u8 port; /* dword 0 */
315 u8 ip_opt; /* dword 0 */
316 u8 err; /* dword 1 */
317 u8 rsshp; /* dword 1 */
318 u8 ipf; /* dword 1 */
319 u8 tcpf; /* dword 1 */
320 u8 udpf; /* dword 1 */
321 u8 ipcksm; /* dword 1 */
322 u8 l4_cksm; /* dword 1 */
323 u8 ip_version; /* dword 1 */
324 u8 macdst[6]; /* dword 1 */
325 u8 vtp; /* dword 1 */
326 u8 rsvd0; /* dword 1 */
327 u8 fragndx[10]; /* dword 1 */
328 u8 ct[2]; /* dword 1 */
329 u8 sw; /* dword 1 */
330 u8 numfrags[3]; /* dword 1 */
331 u8 rss_flush; /* dword 2 */
332 u8 cast_enc[2]; /* dword 2 */
333 u8 vtm; /* dword 2 */
334 u8 rss_bank; /* dword 2 */
335 u8 rsvd1[23]; /* dword 2 */
336 u8 lro_pkt; /* dword 2 */
337 u8 rsvd2[2]; /* dword 2 */
338 u8 valid; /* dword 2 */
339 u8 rsshash[32]; /* dword 3 */
340} __packed;
341
342/* Pseudo amap definition for BE3 native mode eth_rx_compl in which
343 * each bit of the actual structure is defined as a byte: used to calculate
344 * offset/shift/mask of each field */
345struct amap_eth_rx_compl_v1 {
346 u8 vlan_tag[16]; /* dword 0 */
347 u8 pktsize[14]; /* dword 0 */
348 u8 vtp; /* dword 0 */
349 u8 ip_opt; /* dword 0 */
350 u8 err; /* dword 1 */
351 u8 rsshp; /* dword 1 */
352 u8 ipf; /* dword 1 */
353 u8 tcpf; /* dword 1 */
354 u8 udpf; /* dword 1 */
355 u8 ipcksm; /* dword 1 */
356 u8 l4_cksm; /* dword 1 */
357 u8 ip_version; /* dword 1 */
358 u8 macdst[7]; /* dword 1 */
359 u8 rsvd0; /* dword 1 */
360 u8 fragndx[10]; /* dword 1 */
361 u8 ct[2]; /* dword 1 */
362 u8 sw; /* dword 1 */
363 u8 numfrags[3]; /* dword 1 */
364 u8 rss_flush; /* dword 2 */
365 u8 cast_enc[2]; /* dword 2 */
366 u8 vtm; /* dword 2 */
367 u8 rss_bank; /* dword 2 */
368 u8 port[2]; /* dword 2 */
369 u8 vntagp; /* dword 2 */
370 u8 header_len[8]; /* dword 2 */
371 u8 header_split[2]; /* dword 2 */
372 u8 rsvd1[13]; /* dword 2 */
373 u8 valid; /* dword 2 */
374 u8 rsshash[32]; /* dword 3 */
375} __packed;
376
377struct be_eth_rx_compl {
378 u32 dw[4];
379};
380
381struct mgmt_hba_attribs {
382 u8 flashrom_version_string[32];
383 u8 manufacturer_name[32];
384 u32 supported_modes;
385 u32 rsvd0[3];
386 u8 ncsi_ver_string[12];
387 u32 default_extended_timeout;
388 u8 controller_model_number[32];
389 u8 controller_description[64];
390 u8 controller_serial_number[32];
391 u8 ip_version_string[32];
392 u8 firmware_version_string[32];
393 u8 bios_version_string[32];
394 u8 redboot_version_string[32];
395 u8 driver_version_string[32];
396 u8 fw_on_flash_version_string[32];
397 u32 functionalities_supported;
398 u16 max_cdblength;
399 u8 asic_revision;
400 u8 generational_guid[16];
401 u8 hba_port_count;
402 u16 default_link_down_timeout;
403 u8 iscsi_ver_min_max;
404 u8 multifunction_device;
405 u8 cache_valid;
406 u8 hba_status;
407 u8 max_domains_supported;
408 u8 phy_port;
409 u32 firmware_post_status;
410 u32 hba_mtu[8];
411 u32 rsvd1[4];
412};
413
414struct mgmt_controller_attrib {
415 struct mgmt_hba_attribs hba_attribs;
416 u16 pci_vendor_id;
417 u16 pci_device_id;
418 u16 pci_sub_vendor_id;
419 u16 pci_sub_system_id;
420 u8 pci_bus_number;
421 u8 pci_device_number;
422 u8 pci_function_number;
423 u8 interface_type;
424 u64 unique_identifier;
425 u32 rsvd0[5];
426};
427
428struct controller_id {
429 u32 vendor;
430 u32 device;
431 u32 subvendor;
432 u32 subdevice;
433};
434
435struct flash_comp {
436 unsigned long offset;
437 int optype;
438 int size;
439};
440
441struct image_hdr {
442 u32 imageid;
443 u32 imageoffset;
444 u32 imagelength;
445 u32 image_checksum;
446 u8 image_version[32];
447};
448struct flash_file_hdr_g2 {
449 u8 sign[32];
450 u32 cksum;
451 u32 antidote;
452 struct controller_id cont_id;
453 u32 file_len;
454 u32 chunk_num;
455 u32 total_chunks;
456 u32 num_imgs;
457 u8 build[24];
458};
459
460struct flash_file_hdr_g3 {
461 u8 sign[52];
462 u8 ufi_version[4];
463 u32 file_len;
464 u32 cksum;
465 u32 antidote;
466 u32 num_imgs;
467 u8 build[24];
468 u8 rsvd[32];
469};
470
471struct flash_section_hdr {
472 u32 format_rev;
473 u32 cksum;
474 u32 antidote;
475 u32 build_no;
476 u8 id_string[64];
477 u32 active_entry_mask;
478 u32 valid_entry_mask;
479 u32 org_content_mask;
480 u32 rsvd0;
481 u32 rsvd1;
482 u32 rsvd2;
483 u32 rsvd3;
484 u32 rsvd4;
485};
486
487struct flash_section_entry {
488 u32 type;
489 u32 offset;
490 u32 pad_size;
491 u32 image_size;
492 u32 cksum;
493 u32 entry_point;
494 u32 rsvd0;
495 u32 rsvd1;
496 u8 ver_data[32];
497};
498
499struct flash_section_info {
500 u8 cookie[32];
501 struct flash_section_hdr fsec_hdr;
502 struct flash_section_entry fsec_entry[32];
503};
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
new file mode 100644
index 00000000000..c411bb1845f
--- /dev/null
+++ b/drivers/net/benet/be_main.c
@@ -0,0 +1,3676 @@
1/*
2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@emulex.com
12 *
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
16 */
17
18#include <linux/prefetch.h>
19#include "be.h"
20#include "be_cmds.h"
21#include <asm/div64.h>
22
23MODULE_VERSION(DRV_VER);
24MODULE_DEVICE_TABLE(pci, be_dev_ids);
25MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26MODULE_AUTHOR("ServerEngines Corporation");
27MODULE_LICENSE("GPL");
28
29static ushort rx_frag_size = 2048;
30static unsigned int num_vfs;
31module_param(rx_frag_size, ushort, S_IRUGO);
32module_param(num_vfs, uint, S_IRUGO);
33MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
34MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
35
36static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
37 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
38 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
39 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
40 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
41 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
42 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
43 { 0 }
44};
45MODULE_DEVICE_TABLE(pci, be_dev_ids);
46/* UE Status Low CSR */
47static const char * const ue_status_low_desc[] = {
48 "CEV",
49 "CTX",
50 "DBUF",
51 "ERX",
52 "Host",
53 "MPU",
54 "NDMA",
55 "PTC ",
56 "RDMA ",
57 "RXF ",
58 "RXIPS ",
59 "RXULP0 ",
60 "RXULP1 ",
61 "RXULP2 ",
62 "TIM ",
63 "TPOST ",
64 "TPRE ",
65 "TXIPS ",
66 "TXULP0 ",
67 "TXULP1 ",
68 "UC ",
69 "WDMA ",
70 "TXULP2 ",
71 "HOST1 ",
72 "P0_OB_LINK ",
73 "P1_OB_LINK ",
74 "HOST_GPIO ",
75 "MBOX ",
76 "AXGMAC0",
77 "AXGMAC1",
78 "JTAG",
79 "MPU_INTPEND"
80};
81/* UE Status High CSR */
82static const char * const ue_status_hi_desc[] = {
83 "LPCMEMHOST",
84 "MGMT_MAC",
85 "PCS0ONLINE",
86 "MPU_IRAM",
87 "PCS1ONLINE",
88 "PCTL0",
89 "PCTL1",
90 "PMEM",
91 "RR",
92 "TXPB",
93 "RXPP",
94 "XAUI",
95 "TXP",
96 "ARM",
97 "IPC",
98 "HOST2",
99 "HOST3",
100 "HOST4",
101 "HOST5",
102 "HOST6",
103 "HOST7",
104 "HOST8",
105 "HOST9",
106 "NETC",
107 "Unknown",
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown"
115};
116
117static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
118{
119 struct be_dma_mem *mem = &q->dma_mem;
120 if (mem->va)
121 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
122 mem->dma);
123}
124
125static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
126 u16 len, u16 entry_size)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
129
130 memset(q, 0, sizeof(*q));
131 q->len = len;
132 q->entry_size = entry_size;
133 mem->size = len * entry_size;
134 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
135 GFP_KERNEL);
136 if (!mem->va)
137 return -1;
138 memset(mem->va, 0, mem->size);
139 return 0;
140}
141
142static void be_intr_set(struct be_adapter *adapter, bool enable)
143{
144 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
145 u32 reg = ioread32(addr);
146 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
147
148 if (adapter->eeh_err)
149 return;
150
151 if (!enabled && enable)
152 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
153 else if (enabled && !enable)
154 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
155 else
156 return;
157
158 iowrite32(reg, addr);
159}
160
161static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
162{
163 u32 val = 0;
164 val |= qid & DB_RQ_RING_ID_MASK;
165 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
166
167 wmb();
168 iowrite32(val, adapter->db + DB_RQ_OFFSET);
169}
170
171static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
172{
173 u32 val = 0;
174 val |= qid & DB_TXULP_RING_ID_MASK;
175 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
176
177 wmb();
178 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
179}
180
181static void be_eq_notify(struct be_adapter *adapter, u16 qid,
182 bool arm, bool clear_int, u16 num_popped)
183{
184 u32 val = 0;
185 val |= qid & DB_EQ_RING_ID_MASK;
186 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
187 DB_EQ_RING_ID_EXT_MASK_SHIFT);
188
189 if (adapter->eeh_err)
190 return;
191
192 if (arm)
193 val |= 1 << DB_EQ_REARM_SHIFT;
194 if (clear_int)
195 val |= 1 << DB_EQ_CLR_SHIFT;
196 val |= 1 << DB_EQ_EVNT_SHIFT;
197 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
198 iowrite32(val, adapter->db + DB_EQ_OFFSET);
199}
200
201void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
202{
203 u32 val = 0;
204 val |= qid & DB_CQ_RING_ID_MASK;
205 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
206 DB_CQ_RING_ID_EXT_MASK_SHIFT);
207
208 if (adapter->eeh_err)
209 return;
210
211 if (arm)
212 val |= 1 << DB_CQ_REARM_SHIFT;
213 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
214 iowrite32(val, adapter->db + DB_CQ_OFFSET);
215}
216
217static int be_mac_addr_set(struct net_device *netdev, void *p)
218{
219 struct be_adapter *adapter = netdev_priv(netdev);
220 struct sockaddr *addr = p;
221 int status = 0;
222
223 if (!is_valid_ether_addr(addr->sa_data))
224 return -EADDRNOTAVAIL;
225
226 /* MAC addr configuration will be done in hardware for VFs
227 * by their corresponding PFs. Just copy to netdev addr here
228 */
229 if (!be_physfn(adapter))
230 goto netdev_addr;
231
232 status = be_cmd_pmac_del(adapter, adapter->if_handle,
233 adapter->pmac_id, 0);
234 if (status)
235 return status;
236
237 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
238 adapter->if_handle, &adapter->pmac_id, 0);
239netdev_addr:
240 if (!status)
241 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
242
243 return status;
244}
245
246static void populate_be2_stats(struct be_adapter *adapter)
247{
248
249 struct be_drv_stats *drvs = &adapter->drv_stats;
250 struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
251 struct be_port_rxf_stats_v0 *port_stats =
252 be_port_rxf_stats_from_cmd(adapter);
253 struct be_rxf_stats_v0 *rxf_stats =
254 be_rxf_stats_from_cmd(adapter);
255
256 drvs->rx_pause_frames = port_stats->rx_pause_frames;
257 drvs->rx_crc_errors = port_stats->rx_crc_errors;
258 drvs->rx_control_frames = port_stats->rx_control_frames;
259 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
260 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
261 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
262 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
263 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
264 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
265 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
266 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
267 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
268 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
269 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
270 drvs->rx_input_fifo_overflow_drop =
271 port_stats->rx_input_fifo_overflow;
272 drvs->rx_dropped_header_too_small =
273 port_stats->rx_dropped_header_too_small;
274 drvs->rx_address_match_errors =
275 port_stats->rx_address_match_errors;
276 drvs->rx_alignment_symbol_errors =
277 port_stats->rx_alignment_symbol_errors;
278
279 drvs->tx_pauseframes = port_stats->tx_pauseframes;
280 drvs->tx_controlframes = port_stats->tx_controlframes;
281
282 if (adapter->port_num)
283 drvs->jabber_events =
284 rxf_stats->port1_jabber_events;
285 else
286 drvs->jabber_events =
287 rxf_stats->port0_jabber_events;
288 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
289 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
290 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
291 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
292 drvs->forwarded_packets = rxf_stats->forwarded_packets;
293 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
294 drvs->rx_drops_no_tpre_descr =
295 rxf_stats->rx_drops_no_tpre_descr;
296 drvs->rx_drops_too_many_frags =
297 rxf_stats->rx_drops_too_many_frags;
298 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
299}
300
301static void populate_be3_stats(struct be_adapter *adapter)
302{
303 struct be_drv_stats *drvs = &adapter->drv_stats;
304 struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
305
306 struct be_rxf_stats_v1 *rxf_stats =
307 be_rxf_stats_from_cmd(adapter);
308 struct be_port_rxf_stats_v1 *port_stats =
309 be_port_rxf_stats_from_cmd(adapter);
310
311 drvs->rx_priority_pause_frames = 0;
312 drvs->pmem_fifo_overflow_drop = 0;
313 drvs->rx_pause_frames = port_stats->rx_pause_frames;
314 drvs->rx_crc_errors = port_stats->rx_crc_errors;
315 drvs->rx_control_frames = port_stats->rx_control_frames;
316 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
317 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
318 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
319 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
320 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
321 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
322 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
323 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
324 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
325 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
326 drvs->rx_dropped_header_too_small =
327 port_stats->rx_dropped_header_too_small;
328 drvs->rx_input_fifo_overflow_drop =
329 port_stats->rx_input_fifo_overflow_drop;
330 drvs->rx_address_match_errors =
331 port_stats->rx_address_match_errors;
332 drvs->rx_alignment_symbol_errors =
333 port_stats->rx_alignment_symbol_errors;
334 drvs->rxpp_fifo_overflow_drop =
335 port_stats->rxpp_fifo_overflow_drop;
336 drvs->tx_pauseframes = port_stats->tx_pauseframes;
337 drvs->tx_controlframes = port_stats->tx_controlframes;
338 drvs->jabber_events = port_stats->jabber_events;
339 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
340 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
341 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
342 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
343 drvs->forwarded_packets = rxf_stats->forwarded_packets;
344 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
345 drvs->rx_drops_no_tpre_descr =
346 rxf_stats->rx_drops_no_tpre_descr;
347 drvs->rx_drops_too_many_frags =
348 rxf_stats->rx_drops_too_many_frags;
349 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
350}
351
352static void populate_lancer_stats(struct be_adapter *adapter)
353{
354
355 struct be_drv_stats *drvs = &adapter->drv_stats;
356 struct lancer_cmd_pport_stats *pport_stats = pport_stats_from_cmd
357 (adapter);
358 drvs->rx_priority_pause_frames = 0;
359 drvs->pmem_fifo_overflow_drop = 0;
360 drvs->rx_pause_frames =
361 make_64bit_val(pport_stats->rx_pause_frames_hi,
362 pport_stats->rx_pause_frames_lo);
363 drvs->rx_crc_errors = make_64bit_val(pport_stats->rx_crc_errors_hi,
364 pport_stats->rx_crc_errors_lo);
365 drvs->rx_control_frames =
366 make_64bit_val(pport_stats->rx_control_frames_hi,
367 pport_stats->rx_control_frames_lo);
368 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
369 drvs->rx_frame_too_long =
370 make_64bit_val(pport_stats->rx_internal_mac_errors_hi,
371 pport_stats->rx_frames_too_long_lo);
372 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
373 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
374 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
375 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
376 drvs->rx_dropped_tcp_length =
377 pport_stats->rx_dropped_invalid_tcp_length;
378 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
379 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
380 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
381 drvs->rx_dropped_header_too_small =
382 pport_stats->rx_dropped_header_too_small;
383 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
384 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
385 drvs->rx_alignment_symbol_errors =
386 make_64bit_val(pport_stats->rx_symbol_errors_hi,
387 pport_stats->rx_symbol_errors_lo);
388 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
389 drvs->tx_pauseframes = make_64bit_val(pport_stats->tx_pause_frames_hi,
390 pport_stats->tx_pause_frames_lo);
391 drvs->tx_controlframes =
392 make_64bit_val(pport_stats->tx_control_frames_hi,
393 pport_stats->tx_control_frames_lo);
394 drvs->jabber_events = pport_stats->rx_jabbers;
395 drvs->rx_drops_no_pbuf = 0;
396 drvs->rx_drops_no_txpb = 0;
397 drvs->rx_drops_no_erx_descr = 0;
398 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
399 drvs->forwarded_packets = make_64bit_val(pport_stats->num_forwards_hi,
400 pport_stats->num_forwards_lo);
401 drvs->rx_drops_mtu = make_64bit_val(pport_stats->rx_drops_mtu_hi,
402 pport_stats->rx_drops_mtu_lo);
403 drvs->rx_drops_no_tpre_descr = 0;
404 drvs->rx_drops_too_many_frags =
405 make_64bit_val(pport_stats->rx_drops_too_many_frags_hi,
406 pport_stats->rx_drops_too_many_frags_lo);
407}
408
409void be_parse_stats(struct be_adapter *adapter)
410{
411 if (adapter->generation == BE_GEN3) {
412 if (lancer_chip(adapter))
413 populate_lancer_stats(adapter);
414 else
415 populate_be3_stats(adapter);
416 } else {
417 populate_be2_stats(adapter);
418 }
419}
420
421void netdev_stats_update(struct be_adapter *adapter)
422{
423 struct be_drv_stats *drvs = &adapter->drv_stats;
424 struct net_device_stats *dev_stats = &adapter->netdev->stats;
425 struct be_rx_obj *rxo;
426 struct be_tx_obj *txo;
427 unsigned long pkts = 0, bytes = 0, mcast = 0, drops = 0;
428 int i;
429
430 for_all_rx_queues(adapter, rxo, i) {
431 pkts += rx_stats(rxo)->rx_pkts;
432 bytes += rx_stats(rxo)->rx_bytes;
433 mcast += rx_stats(rxo)->rx_mcast_pkts;
434 drops += rx_stats(rxo)->rx_dropped;
435 /* no space in linux buffers: best possible approximation */
436 if (adapter->generation == BE_GEN3) {
437 if (!(lancer_chip(adapter))) {
438 struct be_erx_stats_v1 *erx =
439 be_erx_stats_from_cmd(adapter);
440 drops += erx->rx_drops_no_fragments[rxo->q.id];
441 }
442 } else {
443 struct be_erx_stats_v0 *erx =
444 be_erx_stats_from_cmd(adapter);
445 drops += erx->rx_drops_no_fragments[rxo->q.id];
446 }
447 }
448 dev_stats->rx_packets = pkts;
449 dev_stats->rx_bytes = bytes;
450 dev_stats->multicast = mcast;
451 dev_stats->rx_dropped = drops;
452
453 pkts = bytes = 0;
454 for_all_tx_queues(adapter, txo, i) {
455 pkts += tx_stats(txo)->be_tx_pkts;
456 bytes += tx_stats(txo)->be_tx_bytes;
457 }
458 dev_stats->tx_packets = pkts;
459 dev_stats->tx_bytes = bytes;
460
461 /* bad pkts received */
462 dev_stats->rx_errors = drvs->rx_crc_errors +
463 drvs->rx_alignment_symbol_errors +
464 drvs->rx_in_range_errors +
465 drvs->rx_out_range_errors +
466 drvs->rx_frame_too_long +
467 drvs->rx_dropped_too_small +
468 drvs->rx_dropped_too_short +
469 drvs->rx_dropped_header_too_small +
470 drvs->rx_dropped_tcp_length +
471 drvs->rx_dropped_runt +
472 drvs->rx_tcp_checksum_errs +
473 drvs->rx_ip_checksum_errs +
474 drvs->rx_udp_checksum_errs;
475
476 /* detailed rx errors */
477 dev_stats->rx_length_errors = drvs->rx_in_range_errors +
478 drvs->rx_out_range_errors +
479 drvs->rx_frame_too_long;
480
481 dev_stats->rx_crc_errors = drvs->rx_crc_errors;
482
483 /* frame alignment errors */
484 dev_stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
485
486 /* receiver fifo overrun */
487 /* drops_no_pbuf is no per i/f, it's per BE card */
488 dev_stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
489 drvs->rx_input_fifo_overflow_drop +
490 drvs->rx_drops_no_pbuf;
491}
492
493void be_link_status_update(struct be_adapter *adapter, bool link_up)
494{
495 struct net_device *netdev = adapter->netdev;
496
497 /* If link came up or went down */
498 if (adapter->link_up != link_up) {
499 adapter->link_speed = -1;
500 if (link_up) {
501 netif_carrier_on(netdev);
502 printk(KERN_INFO "%s: Link up\n", netdev->name);
503 } else {
504 netif_carrier_off(netdev);
505 printk(KERN_INFO "%s: Link down\n", netdev->name);
506 }
507 adapter->link_up = link_up;
508 }
509}
510
511/* Update the EQ delay n BE based on the RX frags consumed / sec */
512static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
513{
514 struct be_eq_obj *rx_eq = &rxo->rx_eq;
515 struct be_rx_stats *stats = &rxo->stats;
516 ulong now = jiffies;
517 u32 eqd;
518
519 if (!rx_eq->enable_aic)
520 return;
521
522 /* Wrapped around */
523 if (time_before(now, stats->rx_fps_jiffies)) {
524 stats->rx_fps_jiffies = now;
525 return;
526 }
527
528 /* Update once a second */
529 if ((now - stats->rx_fps_jiffies) < HZ)
530 return;
531
532 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
533 ((now - stats->rx_fps_jiffies) / HZ);
534
535 stats->rx_fps_jiffies = now;
536 stats->prev_rx_frags = stats->rx_frags;
537 eqd = stats->rx_fps / 110000;
538 eqd = eqd << 3;
539 if (eqd > rx_eq->max_eqd)
540 eqd = rx_eq->max_eqd;
541 if (eqd < rx_eq->min_eqd)
542 eqd = rx_eq->min_eqd;
543 if (eqd < 10)
544 eqd = 0;
545 if (eqd != rx_eq->cur_eqd)
546 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
547
548 rx_eq->cur_eqd = eqd;
549}
550
551static u32 be_calc_rate(u64 bytes, unsigned long ticks)
552{
553 u64 rate = bytes;
554
555 do_div(rate, ticks / HZ);
556 rate <<= 3; /* bytes/sec -> bits/sec */
557 do_div(rate, 1000000ul); /* MB/Sec */
558
559 return rate;
560}
561
562static void be_tx_rate_update(struct be_tx_obj *txo)
563{
564 struct be_tx_stats *stats = tx_stats(txo);
565 ulong now = jiffies;
566
567 /* Wrapped around? */
568 if (time_before(now, stats->be_tx_jiffies)) {
569 stats->be_tx_jiffies = now;
570 return;
571 }
572
573 /* Update tx rate once in two seconds */
574 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
575 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
576 - stats->be_tx_bytes_prev,
577 now - stats->be_tx_jiffies);
578 stats->be_tx_jiffies = now;
579 stats->be_tx_bytes_prev = stats->be_tx_bytes;
580 }
581}
582
583static void be_tx_stats_update(struct be_tx_obj *txo,
584 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
585{
586 struct be_tx_stats *stats = tx_stats(txo);
587
588 stats->be_tx_reqs++;
589 stats->be_tx_wrbs += wrb_cnt;
590 stats->be_tx_bytes += copied;
591 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
592 if (stopped)
593 stats->be_tx_stops++;
594}
595
596/* Determine number of WRB entries needed to xmit data in an skb */
597static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
598 bool *dummy)
599{
600 int cnt = (skb->len > skb->data_len);
601
602 cnt += skb_shinfo(skb)->nr_frags;
603
604 /* to account for hdr wrb */
605 cnt++;
606 if (lancer_chip(adapter) || !(cnt & 1)) {
607 *dummy = false;
608 } else {
609 /* add a dummy to make it an even num */
610 cnt++;
611 *dummy = true;
612 }
613 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
614 return cnt;
615}
616
617static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
618{
619 wrb->frag_pa_hi = upper_32_bits(addr);
620 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
621 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
622}
623
624static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
625 struct sk_buff *skb, u32 wrb_cnt, u32 len)
626{
627 u8 vlan_prio = 0;
628 u16 vlan_tag = 0;
629
630 memset(hdr, 0, sizeof(*hdr));
631
632 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
633
634 if (skb_is_gso(skb)) {
635 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
636 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
637 hdr, skb_shinfo(skb)->gso_size);
638 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
639 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
640 if (lancer_chip(adapter) && adapter->sli_family ==
641 LANCER_A0_SLI_FAMILY) {
642 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
643 if (is_tcp_pkt(skb))
644 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
645 tcpcs, hdr, 1);
646 else if (is_udp_pkt(skb))
647 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
648 udpcs, hdr, 1);
649 }
650 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
651 if (is_tcp_pkt(skb))
652 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
653 else if (is_udp_pkt(skb))
654 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
655 }
656
657 if (vlan_tx_tag_present(skb)) {
658 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
659 vlan_tag = vlan_tx_tag_get(skb);
660 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
661 /* If vlan priority provided by OS is NOT in available bmap */
662 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
663 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
664 adapter->recommended_prio;
665 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
666 }
667
668 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
669 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
670 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
671 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
672}
673
674static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
675 bool unmap_single)
676{
677 dma_addr_t dma;
678
679 be_dws_le_to_cpu(wrb, sizeof(*wrb));
680
681 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
682 if (wrb->frag_len) {
683 if (unmap_single)
684 dma_unmap_single(dev, dma, wrb->frag_len,
685 DMA_TO_DEVICE);
686 else
687 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
688 }
689}
690
691static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
692 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
693{
694 dma_addr_t busaddr;
695 int i, copied = 0;
696 struct device *dev = &adapter->pdev->dev;
697 struct sk_buff *first_skb = skb;
698 struct be_eth_wrb *wrb;
699 struct be_eth_hdr_wrb *hdr;
700 bool map_single = false;
701 u16 map_head;
702
703 hdr = queue_head_node(txq);
704 queue_head_inc(txq);
705 map_head = txq->head;
706
707 if (skb->len > skb->data_len) {
708 int len = skb_headlen(skb);
709 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
710 if (dma_mapping_error(dev, busaddr))
711 goto dma_err;
712 map_single = true;
713 wrb = queue_head_node(txq);
714 wrb_fill(wrb, busaddr, len);
715 be_dws_cpu_to_le(wrb, sizeof(*wrb));
716 queue_head_inc(txq);
717 copied += len;
718 }
719
720 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
721 struct skb_frag_struct *frag =
722 &skb_shinfo(skb)->frags[i];
723 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
724 frag->size, DMA_TO_DEVICE);
725 if (dma_mapping_error(dev, busaddr))
726 goto dma_err;
727 wrb = queue_head_node(txq);
728 wrb_fill(wrb, busaddr, frag->size);
729 be_dws_cpu_to_le(wrb, sizeof(*wrb));
730 queue_head_inc(txq);
731 copied += frag->size;
732 }
733
734 if (dummy_wrb) {
735 wrb = queue_head_node(txq);
736 wrb_fill(wrb, 0, 0);
737 be_dws_cpu_to_le(wrb, sizeof(*wrb));
738 queue_head_inc(txq);
739 }
740
741 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
742 be_dws_cpu_to_le(hdr, sizeof(*hdr));
743
744 return copied;
745dma_err:
746 txq->head = map_head;
747 while (copied) {
748 wrb = queue_head_node(txq);
749 unmap_tx_frag(dev, wrb, map_single);
750 map_single = false;
751 copied -= wrb->frag_len;
752 queue_head_inc(txq);
753 }
754 return 0;
755}
756
757static netdev_tx_t be_xmit(struct sk_buff *skb,
758 struct net_device *netdev)
759{
760 struct be_adapter *adapter = netdev_priv(netdev);
761 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
762 struct be_queue_info *txq = &txo->q;
763 u32 wrb_cnt = 0, copied = 0;
764 u32 start = txq->head;
765 bool dummy_wrb, stopped = false;
766
767 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
768
769 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
770 if (copied) {
771 /* record the sent skb in the sent_skb table */
772 BUG_ON(txo->sent_skb_list[start]);
773 txo->sent_skb_list[start] = skb;
774
775 /* Ensure txq has space for the next skb; Else stop the queue
776 * *BEFORE* ringing the tx doorbell, so that we serialze the
777 * tx compls of the current transmit which'll wake up the queue
778 */
779 atomic_add(wrb_cnt, &txq->used);
780 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
781 txq->len) {
782 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
783 stopped = true;
784 }
785
786 be_txq_notify(adapter, txq->id, wrb_cnt);
787
788 be_tx_stats_update(txo, wrb_cnt, copied,
789 skb_shinfo(skb)->gso_segs, stopped);
790 } else {
791 txq->head = start;
792 dev_kfree_skb_any(skb);
793 }
794 return NETDEV_TX_OK;
795}
796
797static int be_change_mtu(struct net_device *netdev, int new_mtu)
798{
799 struct be_adapter *adapter = netdev_priv(netdev);
800 if (new_mtu < BE_MIN_MTU ||
801 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
802 (ETH_HLEN + ETH_FCS_LEN))) {
803 dev_info(&adapter->pdev->dev,
804 "MTU must be between %d and %d bytes\n",
805 BE_MIN_MTU,
806 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
807 return -EINVAL;
808 }
809 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
810 netdev->mtu, new_mtu);
811 netdev->mtu = new_mtu;
812 return 0;
813}
814
815/*
816 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
817 * If the user configures more, place BE in vlan promiscuous mode.
818 */
819static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
820{
821 u16 vtag[BE_NUM_VLANS_SUPPORTED];
822 u16 ntags = 0, i;
823 int status = 0;
824 u32 if_handle;
825
826 if (vf) {
827 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
828 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
829 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
830 }
831
832 if (adapter->vlans_added <= adapter->max_vlans) {
833 /* Construct VLAN Table to give to HW */
834 for (i = 0; i < VLAN_N_VID; i++) {
835 if (adapter->vlan_tag[i]) {
836 vtag[ntags] = cpu_to_le16(i);
837 ntags++;
838 }
839 }
840 status = be_cmd_vlan_config(adapter, adapter->if_handle,
841 vtag, ntags, 1, 0);
842 } else {
843 status = be_cmd_vlan_config(adapter, adapter->if_handle,
844 NULL, 0, 1, 1);
845 }
846
847 return status;
848}
849
850static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
851{
852 struct be_adapter *adapter = netdev_priv(netdev);
853
854 adapter->vlans_added++;
855 if (!be_physfn(adapter))
856 return;
857
858 adapter->vlan_tag[vid] = 1;
859 if (adapter->vlans_added <= (adapter->max_vlans + 1))
860 be_vid_config(adapter, false, 0);
861}
862
863static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
864{
865 struct be_adapter *adapter = netdev_priv(netdev);
866
867 adapter->vlans_added--;
868
869 if (!be_physfn(adapter))
870 return;
871
872 adapter->vlan_tag[vid] = 0;
873 if (adapter->vlans_added <= adapter->max_vlans)
874 be_vid_config(adapter, false, 0);
875}
876
877static void be_set_multicast_list(struct net_device *netdev)
878{
879 struct be_adapter *adapter = netdev_priv(netdev);
880
881 if (netdev->flags & IFF_PROMISC) {
882 be_cmd_promiscuous_config(adapter, true);
883 adapter->promiscuous = true;
884 goto done;
885 }
886
887 /* BE was previously in promiscuous mode; disable it */
888 if (adapter->promiscuous) {
889 adapter->promiscuous = false;
890 be_cmd_promiscuous_config(adapter, false);
891 }
892
893 /* Enable multicast promisc if num configured exceeds what we support */
894 if (netdev->flags & IFF_ALLMULTI ||
895 netdev_mc_count(netdev) > BE_MAX_MC) {
896 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
897 &adapter->mc_cmd_mem);
898 goto done;
899 }
900
901 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
902 &adapter->mc_cmd_mem);
903done:
904 return;
905}
906
907static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
908{
909 struct be_adapter *adapter = netdev_priv(netdev);
910 int status;
911
912 if (!adapter->sriov_enabled)
913 return -EPERM;
914
915 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
916 return -EINVAL;
917
918 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
919 status = be_cmd_pmac_del(adapter,
920 adapter->vf_cfg[vf].vf_if_handle,
921 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
922
923 status = be_cmd_pmac_add(adapter, mac,
924 adapter->vf_cfg[vf].vf_if_handle,
925 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
926
927 if (status)
928 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
929 mac, vf);
930 else
931 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
932
933 return status;
934}
935
936static int be_get_vf_config(struct net_device *netdev, int vf,
937 struct ifla_vf_info *vi)
938{
939 struct be_adapter *adapter = netdev_priv(netdev);
940
941 if (!adapter->sriov_enabled)
942 return -EPERM;
943
944 if (vf >= num_vfs)
945 return -EINVAL;
946
947 vi->vf = vf;
948 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
949 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
950 vi->qos = 0;
951 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
952
953 return 0;
954}
955
956static int be_set_vf_vlan(struct net_device *netdev,
957 int vf, u16 vlan, u8 qos)
958{
959 struct be_adapter *adapter = netdev_priv(netdev);
960 int status = 0;
961
962 if (!adapter->sriov_enabled)
963 return -EPERM;
964
965 if ((vf >= num_vfs) || (vlan > 4095))
966 return -EINVAL;
967
968 if (vlan) {
969 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
970 adapter->vlans_added++;
971 } else {
972 adapter->vf_cfg[vf].vf_vlan_tag = 0;
973 adapter->vlans_added--;
974 }
975
976 status = be_vid_config(adapter, true, vf);
977
978 if (status)
979 dev_info(&adapter->pdev->dev,
980 "VLAN %d config on VF %d failed\n", vlan, vf);
981 return status;
982}
983
984static int be_set_vf_tx_rate(struct net_device *netdev,
985 int vf, int rate)
986{
987 struct be_adapter *adapter = netdev_priv(netdev);
988 int status = 0;
989
990 if (!adapter->sriov_enabled)
991 return -EPERM;
992
993 if ((vf >= num_vfs) || (rate < 0))
994 return -EINVAL;
995
996 if (rate > 10000)
997 rate = 10000;
998
999 adapter->vf_cfg[vf].vf_tx_rate = rate;
1000 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1001
1002 if (status)
1003 dev_info(&adapter->pdev->dev,
1004 "tx rate %d on VF %d failed\n", rate, vf);
1005 return status;
1006}
1007
1008static void be_rx_rate_update(struct be_rx_obj *rxo)
1009{
1010 struct be_rx_stats *stats = &rxo->stats;
1011 ulong now = jiffies;
1012
1013 /* Wrapped around */
1014 if (time_before(now, stats->rx_jiffies)) {
1015 stats->rx_jiffies = now;
1016 return;
1017 }
1018
1019 /* Update the rate once in two seconds */
1020 if ((now - stats->rx_jiffies) < 2 * HZ)
1021 return;
1022
1023 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
1024 now - stats->rx_jiffies);
1025 stats->rx_jiffies = now;
1026 stats->rx_bytes_prev = stats->rx_bytes;
1027}
1028
1029static void be_rx_stats_update(struct be_rx_obj *rxo,
1030 struct be_rx_compl_info *rxcp)
1031{
1032 struct be_rx_stats *stats = &rxo->stats;
1033
1034 stats->rx_compl++;
1035 stats->rx_frags += rxcp->num_rcvd;
1036 stats->rx_bytes += rxcp->pkt_size;
1037 stats->rx_pkts++;
1038 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1039 stats->rx_mcast_pkts++;
1040 if (rxcp->err)
1041 stats->rxcp_err++;
1042}
1043
1044static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1045{
1046 /* L4 checksum is not reliable for non TCP/UDP packets.
1047 * Also ignore ipcksm for ipv6 pkts */
1048 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1049 (rxcp->ip_csum || rxcp->ipv6);
1050}
1051
1052static struct be_rx_page_info *
1053get_rx_page_info(struct be_adapter *adapter,
1054 struct be_rx_obj *rxo,
1055 u16 frag_idx)
1056{
1057 struct be_rx_page_info *rx_page_info;
1058 struct be_queue_info *rxq = &rxo->q;
1059
1060 rx_page_info = &rxo->page_info_tbl[frag_idx];
1061 BUG_ON(!rx_page_info->page);
1062
1063 if (rx_page_info->last_page_user) {
1064 dma_unmap_page(&adapter->pdev->dev,
1065 dma_unmap_addr(rx_page_info, bus),
1066 adapter->big_page_size, DMA_FROM_DEVICE);
1067 rx_page_info->last_page_user = false;
1068 }
1069
1070 atomic_dec(&rxq->used);
1071 return rx_page_info;
1072}
1073
1074/* Throwaway the data in the Rx completion */
1075static void be_rx_compl_discard(struct be_adapter *adapter,
1076 struct be_rx_obj *rxo,
1077 struct be_rx_compl_info *rxcp)
1078{
1079 struct be_queue_info *rxq = &rxo->q;
1080 struct be_rx_page_info *page_info;
1081 u16 i, num_rcvd = rxcp->num_rcvd;
1082
1083 for (i = 0; i < num_rcvd; i++) {
1084 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1085 put_page(page_info->page);
1086 memset(page_info, 0, sizeof(*page_info));
1087 index_inc(&rxcp->rxq_idx, rxq->len);
1088 }
1089}
1090
1091/*
1092 * skb_fill_rx_data forms a complete skb for an ether frame
1093 * indicated by rxcp.
1094 */
1095static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
1096 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
1097{
1098 struct be_queue_info *rxq = &rxo->q;
1099 struct be_rx_page_info *page_info;
1100 u16 i, j;
1101 u16 hdr_len, curr_frag_len, remaining;
1102 u8 *start;
1103
1104 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1105 start = page_address(page_info->page) + page_info->page_offset;
1106 prefetch(start);
1107
1108 /* Copy data in the first descriptor of this completion */
1109 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1110
1111 /* Copy the header portion into skb_data */
1112 hdr_len = min(BE_HDR_LEN, curr_frag_len);
1113 memcpy(skb->data, start, hdr_len);
1114 skb->len = curr_frag_len;
1115 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1116 /* Complete packet has now been moved to data */
1117 put_page(page_info->page);
1118 skb->data_len = 0;
1119 skb->tail += curr_frag_len;
1120 } else {
1121 skb_shinfo(skb)->nr_frags = 1;
1122 skb_shinfo(skb)->frags[0].page = page_info->page;
1123 skb_shinfo(skb)->frags[0].page_offset =
1124 page_info->page_offset + hdr_len;
1125 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
1126 skb->data_len = curr_frag_len - hdr_len;
1127 skb->tail += hdr_len;
1128 }
1129 page_info->page = NULL;
1130
1131 if (rxcp->pkt_size <= rx_frag_size) {
1132 BUG_ON(rxcp->num_rcvd != 1);
1133 return;
1134 }
1135
1136 /* More frags present for this completion */
1137 index_inc(&rxcp->rxq_idx, rxq->len);
1138 remaining = rxcp->pkt_size - curr_frag_len;
1139 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1140 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1141 curr_frag_len = min(remaining, rx_frag_size);
1142
1143 /* Coalesce all frags from the same physical page in one slot */
1144 if (page_info->page_offset == 0) {
1145 /* Fresh page */
1146 j++;
1147 skb_shinfo(skb)->frags[j].page = page_info->page;
1148 skb_shinfo(skb)->frags[j].page_offset =
1149 page_info->page_offset;
1150 skb_shinfo(skb)->frags[j].size = 0;
1151 skb_shinfo(skb)->nr_frags++;
1152 } else {
1153 put_page(page_info->page);
1154 }
1155
1156 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1157 skb->len += curr_frag_len;
1158 skb->data_len += curr_frag_len;
1159
1160 remaining -= curr_frag_len;
1161 index_inc(&rxcp->rxq_idx, rxq->len);
1162 page_info->page = NULL;
1163 }
1164 BUG_ON(j > MAX_SKB_FRAGS);
1165}
1166
1167/* Process the RX completion indicated by rxcp when GRO is disabled */
1168static void be_rx_compl_process(struct be_adapter *adapter,
1169 struct be_rx_obj *rxo,
1170 struct be_rx_compl_info *rxcp)
1171{
1172 struct net_device *netdev = adapter->netdev;
1173 struct sk_buff *skb;
1174
1175 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
1176 if (unlikely(!skb)) {
1177 rxo->stats.rx_dropped++;
1178 be_rx_compl_discard(adapter, rxo, rxcp);
1179 return;
1180 }
1181
1182 skb_fill_rx_data(adapter, rxo, skb, rxcp);
1183
1184 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1185 skb->ip_summed = CHECKSUM_UNNECESSARY;
1186 else
1187 skb_checksum_none_assert(skb);
1188
1189 skb->truesize = skb->len + sizeof(struct sk_buff);
1190 skb->protocol = eth_type_trans(skb, netdev);
1191 if (adapter->netdev->features & NETIF_F_RXHASH)
1192 skb->rxhash = rxcp->rss_hash;
1193
1194
1195 if (unlikely(rxcp->vlanf))
1196 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1197
1198 netif_receive_skb(skb);
1199}
1200
1201/* Process the RX completion indicated by rxcp when GRO is enabled */
1202static void be_rx_compl_process_gro(struct be_adapter *adapter,
1203 struct be_rx_obj *rxo,
1204 struct be_rx_compl_info *rxcp)
1205{
1206 struct be_rx_page_info *page_info;
1207 struct sk_buff *skb = NULL;
1208 struct be_queue_info *rxq = &rxo->q;
1209 struct be_eq_obj *eq_obj = &rxo->rx_eq;
1210 u16 remaining, curr_frag_len;
1211 u16 i, j;
1212
1213 skb = napi_get_frags(&eq_obj->napi);
1214 if (!skb) {
1215 be_rx_compl_discard(adapter, rxo, rxcp);
1216 return;
1217 }
1218
1219 remaining = rxcp->pkt_size;
1220 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1221 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1222
1223 curr_frag_len = min(remaining, rx_frag_size);
1224
1225 /* Coalesce all frags from the same physical page in one slot */
1226 if (i == 0 || page_info->page_offset == 0) {
1227 /* First frag or Fresh page */
1228 j++;
1229 skb_shinfo(skb)->frags[j].page = page_info->page;
1230 skb_shinfo(skb)->frags[j].page_offset =
1231 page_info->page_offset;
1232 skb_shinfo(skb)->frags[j].size = 0;
1233 } else {
1234 put_page(page_info->page);
1235 }
1236 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1237
1238 remaining -= curr_frag_len;
1239 index_inc(&rxcp->rxq_idx, rxq->len);
1240 memset(page_info, 0, sizeof(*page_info));
1241 }
1242 BUG_ON(j > MAX_SKB_FRAGS);
1243
1244 skb_shinfo(skb)->nr_frags = j + 1;
1245 skb->len = rxcp->pkt_size;
1246 skb->data_len = rxcp->pkt_size;
1247 skb->truesize += rxcp->pkt_size;
1248 skb->ip_summed = CHECKSUM_UNNECESSARY;
1249 if (adapter->netdev->features & NETIF_F_RXHASH)
1250 skb->rxhash = rxcp->rss_hash;
1251
1252 if (unlikely(rxcp->vlanf))
1253 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1254
1255 napi_gro_frags(&eq_obj->napi);
1256}
1257
1258static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1259 struct be_eth_rx_compl *compl,
1260 struct be_rx_compl_info *rxcp)
1261{
1262 rxcp->pkt_size =
1263 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1264 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1265 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1266 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1267 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1268 rxcp->ip_csum =
1269 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1270 rxcp->l4_csum =
1271 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1272 rxcp->ipv6 =
1273 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1274 rxcp->rxq_idx =
1275 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1276 rxcp->num_rcvd =
1277 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1278 rxcp->pkt_type =
1279 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1280 rxcp->rss_hash =
1281 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1282 if (rxcp->vlanf) {
1283 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1284 compl);
1285 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1286 compl);
1287 }
1288}
1289
1290static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1291 struct be_eth_rx_compl *compl,
1292 struct be_rx_compl_info *rxcp)
1293{
1294 rxcp->pkt_size =
1295 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1296 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1297 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1298 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1299 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1300 rxcp->ip_csum =
1301 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1302 rxcp->l4_csum =
1303 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1304 rxcp->ipv6 =
1305 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1306 rxcp->rxq_idx =
1307 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1308 rxcp->num_rcvd =
1309 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1310 rxcp->pkt_type =
1311 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1312 rxcp->rss_hash =
1313 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1314 if (rxcp->vlanf) {
1315 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1316 compl);
1317 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1318 compl);
1319 }
1320}
1321
1322static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1323{
1324 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1325 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1326 struct be_adapter *adapter = rxo->adapter;
1327
1328 /* For checking the valid bit it is Ok to use either definition as the
1329 * valid bit is at the same position in both v0 and v1 Rx compl */
1330 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1331 return NULL;
1332
1333 rmb();
1334 be_dws_le_to_cpu(compl, sizeof(*compl));
1335
1336 if (adapter->be3_native)
1337 be_parse_rx_compl_v1(adapter, compl, rxcp);
1338 else
1339 be_parse_rx_compl_v0(adapter, compl, rxcp);
1340
1341 if (rxcp->vlanf) {
1342 /* vlanf could be wrongly set in some cards.
1343 * ignore if vtm is not set */
1344 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1345 rxcp->vlanf = 0;
1346
1347 if (!lancer_chip(adapter))
1348 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1349
1350 if (((adapter->pvid & VLAN_VID_MASK) ==
1351 (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1352 !adapter->vlan_tag[rxcp->vlan_tag])
1353 rxcp->vlanf = 0;
1354 }
1355
1356 /* As the compl has been parsed, reset it; we wont touch it again */
1357 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1358
1359 queue_tail_inc(&rxo->cq);
1360 return rxcp;
1361}
1362
1363static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1364{
1365 u32 order = get_order(size);
1366
1367 if (order > 0)
1368 gfp |= __GFP_COMP;
1369 return alloc_pages(gfp, order);
1370}
1371
1372/*
1373 * Allocate a page, split it to fragments of size rx_frag_size and post as
1374 * receive buffers to BE
1375 */
1376static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1377{
1378 struct be_adapter *adapter = rxo->adapter;
1379 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1380 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1381 struct be_queue_info *rxq = &rxo->q;
1382 struct page *pagep = NULL;
1383 struct be_eth_rx_d *rxd;
1384 u64 page_dmaaddr = 0, frag_dmaaddr;
1385 u32 posted, page_offset = 0;
1386
1387 page_info = &rxo->page_info_tbl[rxq->head];
1388 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1389 if (!pagep) {
1390 pagep = be_alloc_pages(adapter->big_page_size, gfp);
1391 if (unlikely(!pagep)) {
1392 rxo->stats.rx_post_fail++;
1393 break;
1394 }
1395 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1396 0, adapter->big_page_size,
1397 DMA_FROM_DEVICE);
1398 page_info->page_offset = 0;
1399 } else {
1400 get_page(pagep);
1401 page_info->page_offset = page_offset + rx_frag_size;
1402 }
1403 page_offset = page_info->page_offset;
1404 page_info->page = pagep;
1405 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1406 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1407
1408 rxd = queue_head_node(rxq);
1409 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1410 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1411
1412 /* Any space left in the current big page for another frag? */
1413 if ((page_offset + rx_frag_size + rx_frag_size) >
1414 adapter->big_page_size) {
1415 pagep = NULL;
1416 page_info->last_page_user = true;
1417 }
1418
1419 prev_page_info = page_info;
1420 queue_head_inc(rxq);
1421 page_info = &page_info_tbl[rxq->head];
1422 }
1423 if (pagep)
1424 prev_page_info->last_page_user = true;
1425
1426 if (posted) {
1427 atomic_add(posted, &rxq->used);
1428 be_rxq_notify(adapter, rxq->id, posted);
1429 } else if (atomic_read(&rxq->used) == 0) {
1430 /* Let be_worker replenish when memory is available */
1431 rxo->rx_post_starved = true;
1432 }
1433}
1434
1435static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1436{
1437 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1438
1439 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1440 return NULL;
1441
1442 rmb();
1443 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1444
1445 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1446
1447 queue_tail_inc(tx_cq);
1448 return txcp;
1449}
1450
1451static u16 be_tx_compl_process(struct be_adapter *adapter,
1452 struct be_tx_obj *txo, u16 last_index)
1453{
1454 struct be_queue_info *txq = &txo->q;
1455 struct be_eth_wrb *wrb;
1456 struct sk_buff **sent_skbs = txo->sent_skb_list;
1457 struct sk_buff *sent_skb;
1458 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1459 bool unmap_skb_hdr = true;
1460
1461 sent_skb = sent_skbs[txq->tail];
1462 BUG_ON(!sent_skb);
1463 sent_skbs[txq->tail] = NULL;
1464
1465 /* skip header wrb */
1466 queue_tail_inc(txq);
1467
1468 do {
1469 cur_index = txq->tail;
1470 wrb = queue_tail_node(txq);
1471 unmap_tx_frag(&adapter->pdev->dev, wrb,
1472 (unmap_skb_hdr && skb_headlen(sent_skb)));
1473 unmap_skb_hdr = false;
1474
1475 num_wrbs++;
1476 queue_tail_inc(txq);
1477 } while (cur_index != last_index);
1478
1479 kfree_skb(sent_skb);
1480 return num_wrbs;
1481}
1482
1483static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1484{
1485 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1486
1487 if (!eqe->evt)
1488 return NULL;
1489
1490 rmb();
1491 eqe->evt = le32_to_cpu(eqe->evt);
1492 queue_tail_inc(&eq_obj->q);
1493 return eqe;
1494}
1495
1496static int event_handle(struct be_adapter *adapter,
1497 struct be_eq_obj *eq_obj,
1498 bool rearm)
1499{
1500 struct be_eq_entry *eqe;
1501 u16 num = 0;
1502
1503 while ((eqe = event_get(eq_obj)) != NULL) {
1504 eqe->evt = 0;
1505 num++;
1506 }
1507
1508 /* Deal with any spurious interrupts that come
1509 * without events
1510 */
1511 if (!num)
1512 rearm = true;
1513
1514 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
1515 if (num)
1516 napi_schedule(&eq_obj->napi);
1517
1518 return num;
1519}
1520
1521/* Just read and notify events without processing them.
1522 * Used at the time of destroying event queues */
1523static void be_eq_clean(struct be_adapter *adapter,
1524 struct be_eq_obj *eq_obj)
1525{
1526 struct be_eq_entry *eqe;
1527 u16 num = 0;
1528
1529 while ((eqe = event_get(eq_obj)) != NULL) {
1530 eqe->evt = 0;
1531 num++;
1532 }
1533
1534 if (num)
1535 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1536}
1537
1538static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1539{
1540 struct be_rx_page_info *page_info;
1541 struct be_queue_info *rxq = &rxo->q;
1542 struct be_queue_info *rx_cq = &rxo->cq;
1543 struct be_rx_compl_info *rxcp;
1544 u16 tail;
1545
1546 /* First cleanup pending rx completions */
1547 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1548 be_rx_compl_discard(adapter, rxo, rxcp);
1549 be_cq_notify(adapter, rx_cq->id, false, 1);
1550 }
1551
1552 /* Then free posted rx buffer that were not used */
1553 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1554 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1555 page_info = get_rx_page_info(adapter, rxo, tail);
1556 put_page(page_info->page);
1557 memset(page_info, 0, sizeof(*page_info));
1558 }
1559 BUG_ON(atomic_read(&rxq->used));
1560 rxq->tail = rxq->head = 0;
1561}
1562
1563static void be_tx_compl_clean(struct be_adapter *adapter,
1564 struct be_tx_obj *txo)
1565{
1566 struct be_queue_info *tx_cq = &txo->cq;
1567 struct be_queue_info *txq = &txo->q;
1568 struct be_eth_tx_compl *txcp;
1569 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1570 struct sk_buff **sent_skbs = txo->sent_skb_list;
1571 struct sk_buff *sent_skb;
1572 bool dummy_wrb;
1573
1574 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1575 do {
1576 while ((txcp = be_tx_compl_get(tx_cq))) {
1577 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1578 wrb_index, txcp);
1579 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
1580 cmpl++;
1581 }
1582 if (cmpl) {
1583 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1584 atomic_sub(num_wrbs, &txq->used);
1585 cmpl = 0;
1586 num_wrbs = 0;
1587 }
1588
1589 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1590 break;
1591
1592 mdelay(1);
1593 } while (true);
1594
1595 if (atomic_read(&txq->used))
1596 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1597 atomic_read(&txq->used));
1598
1599 /* free posted tx for which compls will never arrive */
1600 while (atomic_read(&txq->used)) {
1601 sent_skb = sent_skbs[txq->tail];
1602 end_idx = txq->tail;
1603 index_adv(&end_idx,
1604 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1605 txq->len);
1606 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1607 atomic_sub(num_wrbs, &txq->used);
1608 }
1609}
1610
1611static void be_mcc_queues_destroy(struct be_adapter *adapter)
1612{
1613 struct be_queue_info *q;
1614
1615 q = &adapter->mcc_obj.q;
1616 if (q->created)
1617 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1618 be_queue_free(adapter, q);
1619
1620 q = &adapter->mcc_obj.cq;
1621 if (q->created)
1622 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1623 be_queue_free(adapter, q);
1624}
1625
1626/* Must be called only after TX qs are created as MCC shares TX EQ */
1627static int be_mcc_queues_create(struct be_adapter *adapter)
1628{
1629 struct be_queue_info *q, *cq;
1630
1631 /* Alloc MCC compl queue */
1632 cq = &adapter->mcc_obj.cq;
1633 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1634 sizeof(struct be_mcc_compl)))
1635 goto err;
1636
1637 /* Ask BE to create MCC compl queue; share TX's eq */
1638 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1639 goto mcc_cq_free;
1640
1641 /* Alloc MCC queue */
1642 q = &adapter->mcc_obj.q;
1643 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1644 goto mcc_cq_destroy;
1645
1646 /* Ask BE to create MCC queue */
1647 if (be_cmd_mccq_create(adapter, q, cq))
1648 goto mcc_q_free;
1649
1650 return 0;
1651
1652mcc_q_free:
1653 be_queue_free(adapter, q);
1654mcc_cq_destroy:
1655 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1656mcc_cq_free:
1657 be_queue_free(adapter, cq);
1658err:
1659 return -1;
1660}
1661
1662static void be_tx_queues_destroy(struct be_adapter *adapter)
1663{
1664 struct be_queue_info *q;
1665 struct be_tx_obj *txo;
1666 u8 i;
1667
1668 for_all_tx_queues(adapter, txo, i) {
1669 q = &txo->q;
1670 if (q->created)
1671 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1672 be_queue_free(adapter, q);
1673
1674 q = &txo->cq;
1675 if (q->created)
1676 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1677 be_queue_free(adapter, q);
1678 }
1679
1680 /* Clear any residual events */
1681 be_eq_clean(adapter, &adapter->tx_eq);
1682
1683 q = &adapter->tx_eq.q;
1684 if (q->created)
1685 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1686 be_queue_free(adapter, q);
1687}
1688
1689/* One TX event queue is shared by all TX compl qs */
1690static int be_tx_queues_create(struct be_adapter *adapter)
1691{
1692 struct be_queue_info *eq, *q, *cq;
1693 struct be_tx_obj *txo;
1694 u8 i;
1695
1696 adapter->tx_eq.max_eqd = 0;
1697 adapter->tx_eq.min_eqd = 0;
1698 adapter->tx_eq.cur_eqd = 96;
1699 adapter->tx_eq.enable_aic = false;
1700
1701 eq = &adapter->tx_eq.q;
1702 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1703 sizeof(struct be_eq_entry)))
1704 return -1;
1705
1706 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1707 goto err;
1708 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1709
1710 for_all_tx_queues(adapter, txo, i) {
1711 cq = &txo->cq;
1712 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1713 sizeof(struct be_eth_tx_compl)))
1714 goto err;
1715
1716 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1717 goto err;
1718
1719 q = &txo->q;
1720 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1721 sizeof(struct be_eth_wrb)))
1722 goto err;
1723
1724 if (be_cmd_txq_create(adapter, q, cq))
1725 goto err;
1726 }
1727 return 0;
1728
1729err:
1730 be_tx_queues_destroy(adapter);
1731 return -1;
1732}
1733
1734static void be_rx_queues_destroy(struct be_adapter *adapter)
1735{
1736 struct be_queue_info *q;
1737 struct be_rx_obj *rxo;
1738 int i;
1739
1740 for_all_rx_queues(adapter, rxo, i) {
1741 be_queue_free(adapter, &rxo->q);
1742
1743 q = &rxo->cq;
1744 if (q->created)
1745 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1746 be_queue_free(adapter, q);
1747
1748 q = &rxo->rx_eq.q;
1749 if (q->created)
1750 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1751 be_queue_free(adapter, q);
1752 }
1753}
1754
1755static u32 be_num_rxqs_want(struct be_adapter *adapter)
1756{
1757 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1758 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1759 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1760 } else {
1761 dev_warn(&adapter->pdev->dev,
1762 "No support for multiple RX queues\n");
1763 return 1;
1764 }
1765}
1766
1767static int be_rx_queues_create(struct be_adapter *adapter)
1768{
1769 struct be_queue_info *eq, *q, *cq;
1770 struct be_rx_obj *rxo;
1771 int rc, i;
1772
1773 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1774 msix_enabled(adapter) ?
1775 adapter->num_msix_vec - 1 : 1);
1776 if (adapter->num_rx_qs != MAX_RX_QS)
1777 dev_warn(&adapter->pdev->dev,
1778 "Can create only %d RX queues", adapter->num_rx_qs);
1779
1780 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1781 for_all_rx_queues(adapter, rxo, i) {
1782 rxo->adapter = adapter;
1783 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1784 rxo->rx_eq.enable_aic = true;
1785
1786 /* EQ */
1787 eq = &rxo->rx_eq.q;
1788 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1789 sizeof(struct be_eq_entry));
1790 if (rc)
1791 goto err;
1792
1793 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1794 if (rc)
1795 goto err;
1796
1797 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1798
1799 /* CQ */
1800 cq = &rxo->cq;
1801 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1802 sizeof(struct be_eth_rx_compl));
1803 if (rc)
1804 goto err;
1805
1806 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1807 if (rc)
1808 goto err;
1809
1810 /* Rx Q - will be created in be_open() */
1811 q = &rxo->q;
1812 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1813 sizeof(struct be_eth_rx_d));
1814 if (rc)
1815 goto err;
1816
1817 }
1818
1819 return 0;
1820err:
1821 be_rx_queues_destroy(adapter);
1822 return -1;
1823}
1824
1825static bool event_peek(struct be_eq_obj *eq_obj)
1826{
1827 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1828 if (!eqe->evt)
1829 return false;
1830 else
1831 return true;
1832}
1833
1834static irqreturn_t be_intx(int irq, void *dev)
1835{
1836 struct be_adapter *adapter = dev;
1837 struct be_rx_obj *rxo;
1838 int isr, i, tx = 0 , rx = 0;
1839
1840 if (lancer_chip(adapter)) {
1841 if (event_peek(&adapter->tx_eq))
1842 tx = event_handle(adapter, &adapter->tx_eq, false);
1843 for_all_rx_queues(adapter, rxo, i) {
1844 if (event_peek(&rxo->rx_eq))
1845 rx |= event_handle(adapter, &rxo->rx_eq, true);
1846 }
1847
1848 if (!(tx || rx))
1849 return IRQ_NONE;
1850
1851 } else {
1852 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1853 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1854 if (!isr)
1855 return IRQ_NONE;
1856
1857 if ((1 << adapter->tx_eq.eq_idx & isr))
1858 event_handle(adapter, &adapter->tx_eq, false);
1859
1860 for_all_rx_queues(adapter, rxo, i) {
1861 if ((1 << rxo->rx_eq.eq_idx & isr))
1862 event_handle(adapter, &rxo->rx_eq, true);
1863 }
1864 }
1865
1866 return IRQ_HANDLED;
1867}
1868
1869static irqreturn_t be_msix_rx(int irq, void *dev)
1870{
1871 struct be_rx_obj *rxo = dev;
1872 struct be_adapter *adapter = rxo->adapter;
1873
1874 event_handle(adapter, &rxo->rx_eq, true);
1875
1876 return IRQ_HANDLED;
1877}
1878
1879static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1880{
1881 struct be_adapter *adapter = dev;
1882
1883 event_handle(adapter, &adapter->tx_eq, false);
1884
1885 return IRQ_HANDLED;
1886}
1887
1888static inline bool do_gro(struct be_rx_compl_info *rxcp)
1889{
1890 return (rxcp->tcpf && !rxcp->err) ? true : false;
1891}
1892
1893static int be_poll_rx(struct napi_struct *napi, int budget)
1894{
1895 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1896 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1897 struct be_adapter *adapter = rxo->adapter;
1898 struct be_queue_info *rx_cq = &rxo->cq;
1899 struct be_rx_compl_info *rxcp;
1900 u32 work_done;
1901
1902 rxo->stats.rx_polls++;
1903 for (work_done = 0; work_done < budget; work_done++) {
1904 rxcp = be_rx_compl_get(rxo);
1905 if (!rxcp)
1906 break;
1907
1908 /* Ignore flush completions */
1909 if (rxcp->num_rcvd && rxcp->pkt_size) {
1910 if (do_gro(rxcp))
1911 be_rx_compl_process_gro(adapter, rxo, rxcp);
1912 else
1913 be_rx_compl_process(adapter, rxo, rxcp);
1914 } else if (rxcp->pkt_size == 0) {
1915 be_rx_compl_discard(adapter, rxo, rxcp);
1916 }
1917
1918 be_rx_stats_update(rxo, rxcp);
1919 }
1920
1921 /* Refill the queue */
1922 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1923 be_post_rx_frags(rxo, GFP_ATOMIC);
1924
1925 /* All consumed */
1926 if (work_done < budget) {
1927 napi_complete(napi);
1928 be_cq_notify(adapter, rx_cq->id, true, work_done);
1929 } else {
1930 /* More to be consumed; continue with interrupts disabled */
1931 be_cq_notify(adapter, rx_cq->id, false, work_done);
1932 }
1933 return work_done;
1934}
1935
1936/* As TX and MCC share the same EQ check for both TX and MCC completions.
1937 * For TX/MCC we don't honour budget; consume everything
1938 */
1939static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1940{
1941 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1942 struct be_adapter *adapter =
1943 container_of(tx_eq, struct be_adapter, tx_eq);
1944 struct be_tx_obj *txo;
1945 struct be_eth_tx_compl *txcp;
1946 int tx_compl, mcc_compl, status = 0;
1947 u8 i;
1948 u16 num_wrbs;
1949
1950 for_all_tx_queues(adapter, txo, i) {
1951 tx_compl = 0;
1952 num_wrbs = 0;
1953 while ((txcp = be_tx_compl_get(&txo->cq))) {
1954 num_wrbs += be_tx_compl_process(adapter, txo,
1955 AMAP_GET_BITS(struct amap_eth_tx_compl,
1956 wrb_index, txcp));
1957 tx_compl++;
1958 }
1959 if (tx_compl) {
1960 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1961
1962 atomic_sub(num_wrbs, &txo->q.used);
1963
1964 /* As Tx wrbs have been freed up, wake up netdev queue
1965 * if it was stopped due to lack of tx wrbs. */
1966 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1967 atomic_read(&txo->q.used) < txo->q.len / 2) {
1968 netif_wake_subqueue(adapter->netdev, i);
1969 }
1970
1971 adapter->drv_stats.be_tx_events++;
1972 txo->stats.be_tx_compl += tx_compl;
1973 }
1974 }
1975
1976 mcc_compl = be_process_mcc(adapter, &status);
1977
1978 if (mcc_compl) {
1979 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1980 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1981 }
1982
1983 napi_complete(napi);
1984
1985 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
1986 return 1;
1987}
1988
1989void be_detect_dump_ue(struct be_adapter *adapter)
1990{
1991 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1992 u32 i;
1993
1994 pci_read_config_dword(adapter->pdev,
1995 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1996 pci_read_config_dword(adapter->pdev,
1997 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1998 pci_read_config_dword(adapter->pdev,
1999 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
2000 pci_read_config_dword(adapter->pdev,
2001 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
2002
2003 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
2004 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
2005
2006 if (ue_status_lo || ue_status_hi) {
2007 adapter->ue_detected = true;
2008 adapter->eeh_err = true;
2009 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
2010 }
2011
2012 if (ue_status_lo) {
2013 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
2014 if (ue_status_lo & 1)
2015 dev_err(&adapter->pdev->dev,
2016 "UE: %s bit set\n", ue_status_low_desc[i]);
2017 }
2018 }
2019 if (ue_status_hi) {
2020 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
2021 if (ue_status_hi & 1)
2022 dev_err(&adapter->pdev->dev,
2023 "UE: %s bit set\n", ue_status_hi_desc[i]);
2024 }
2025 }
2026
2027}
2028
2029static void be_worker(struct work_struct *work)
2030{
2031 struct be_adapter *adapter =
2032 container_of(work, struct be_adapter, work.work);
2033 struct be_rx_obj *rxo;
2034 struct be_tx_obj *txo;
2035 int i;
2036
2037 if (!adapter->ue_detected && !lancer_chip(adapter))
2038 be_detect_dump_ue(adapter);
2039
2040 /* when interrupts are not yet enabled, just reap any pending
2041 * mcc completions */
2042 if (!netif_running(adapter->netdev)) {
2043 int mcc_compl, status = 0;
2044
2045 mcc_compl = be_process_mcc(adapter, &status);
2046
2047 if (mcc_compl) {
2048 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2049 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2050 }
2051
2052 goto reschedule;
2053 }
2054
2055 if (!adapter->stats_cmd_sent) {
2056 if (lancer_chip(adapter))
2057 lancer_cmd_get_pport_stats(adapter,
2058 &adapter->stats_cmd);
2059 else
2060 be_cmd_get_stats(adapter, &adapter->stats_cmd);
2061 }
2062
2063 for_all_tx_queues(adapter, txo, i)
2064 be_tx_rate_update(txo);
2065
2066 for_all_rx_queues(adapter, rxo, i) {
2067 be_rx_rate_update(rxo);
2068 be_rx_eqd_update(adapter, rxo);
2069
2070 if (rxo->rx_post_starved) {
2071 rxo->rx_post_starved = false;
2072 be_post_rx_frags(rxo, GFP_KERNEL);
2073 }
2074 }
2075
2076reschedule:
2077 adapter->work_counter++;
2078 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2079}
2080
2081static void be_msix_disable(struct be_adapter *adapter)
2082{
2083 if (msix_enabled(adapter)) {
2084 pci_disable_msix(adapter->pdev);
2085 adapter->num_msix_vec = 0;
2086 }
2087}
2088
2089static void be_msix_enable(struct be_adapter *adapter)
2090{
2091#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
2092 int i, status, num_vec;
2093
2094 num_vec = be_num_rxqs_want(adapter) + 1;
2095
2096 for (i = 0; i < num_vec; i++)
2097 adapter->msix_entries[i].entry = i;
2098
2099 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2100 if (status == 0) {
2101 goto done;
2102 } else if (status >= BE_MIN_MSIX_VECTORS) {
2103 num_vec = status;
2104 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2105 num_vec) == 0)
2106 goto done;
2107 }
2108 return;
2109done:
2110 adapter->num_msix_vec = num_vec;
2111 return;
2112}
2113
2114static void be_sriov_enable(struct be_adapter *adapter)
2115{
2116 be_check_sriov_fn_type(adapter);
2117#ifdef CONFIG_PCI_IOV
2118 if (be_physfn(adapter) && num_vfs) {
2119 int status, pos;
2120 u16 nvfs;
2121
2122 pos = pci_find_ext_capability(adapter->pdev,
2123 PCI_EXT_CAP_ID_SRIOV);
2124 pci_read_config_word(adapter->pdev,
2125 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2126
2127 if (num_vfs > nvfs) {
2128 dev_info(&adapter->pdev->dev,
2129 "Device supports %d VFs and not %d\n",
2130 nvfs, num_vfs);
2131 num_vfs = nvfs;
2132 }
2133
2134 status = pci_enable_sriov(adapter->pdev, num_vfs);
2135 adapter->sriov_enabled = status ? false : true;
2136 }
2137#endif
2138}
2139
2140static void be_sriov_disable(struct be_adapter *adapter)
2141{
2142#ifdef CONFIG_PCI_IOV
2143 if (adapter->sriov_enabled) {
2144 pci_disable_sriov(adapter->pdev);
2145 adapter->sriov_enabled = false;
2146 }
2147#endif
2148}
2149
2150static inline int be_msix_vec_get(struct be_adapter *adapter,
2151 struct be_eq_obj *eq_obj)
2152{
2153 return adapter->msix_entries[eq_obj->eq_idx].vector;
2154}
2155
2156static int be_request_irq(struct be_adapter *adapter,
2157 struct be_eq_obj *eq_obj,
2158 void *handler, char *desc, void *context)
2159{
2160 struct net_device *netdev = adapter->netdev;
2161 int vec;
2162
2163 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
2164 vec = be_msix_vec_get(adapter, eq_obj);
2165 return request_irq(vec, handler, 0, eq_obj->desc, context);
2166}
2167
2168static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2169 void *context)
2170{
2171 int vec = be_msix_vec_get(adapter, eq_obj);
2172 free_irq(vec, context);
2173}
2174
2175static int be_msix_register(struct be_adapter *adapter)
2176{
2177 struct be_rx_obj *rxo;
2178 int status, i;
2179 char qname[10];
2180
2181 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2182 adapter);
2183 if (status)
2184 goto err;
2185
2186 for_all_rx_queues(adapter, rxo, i) {
2187 sprintf(qname, "rxq%d", i);
2188 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2189 qname, rxo);
2190 if (status)
2191 goto err_msix;
2192 }
2193
2194 return 0;
2195
2196err_msix:
2197 be_free_irq(adapter, &adapter->tx_eq, adapter);
2198
2199 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2200 be_free_irq(adapter, &rxo->rx_eq, rxo);
2201
2202err:
2203 dev_warn(&adapter->pdev->dev,
2204 "MSIX Request IRQ failed - err %d\n", status);
2205 be_msix_disable(adapter);
2206 return status;
2207}
2208
2209static int be_irq_register(struct be_adapter *adapter)
2210{
2211 struct net_device *netdev = adapter->netdev;
2212 int status;
2213
2214 if (msix_enabled(adapter)) {
2215 status = be_msix_register(adapter);
2216 if (status == 0)
2217 goto done;
2218 /* INTx is not supported for VF */
2219 if (!be_physfn(adapter))
2220 return status;
2221 }
2222
2223 /* INTx */
2224 netdev->irq = adapter->pdev->irq;
2225 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2226 adapter);
2227 if (status) {
2228 dev_err(&adapter->pdev->dev,
2229 "INTx request IRQ failed - err %d\n", status);
2230 return status;
2231 }
2232done:
2233 adapter->isr_registered = true;
2234 return 0;
2235}
2236
2237static void be_irq_unregister(struct be_adapter *adapter)
2238{
2239 struct net_device *netdev = adapter->netdev;
2240 struct be_rx_obj *rxo;
2241 int i;
2242
2243 if (!adapter->isr_registered)
2244 return;
2245
2246 /* INTx */
2247 if (!msix_enabled(adapter)) {
2248 free_irq(netdev->irq, adapter);
2249 goto done;
2250 }
2251
2252 /* MSIx */
2253 be_free_irq(adapter, &adapter->tx_eq, adapter);
2254
2255 for_all_rx_queues(adapter, rxo, i)
2256 be_free_irq(adapter, &rxo->rx_eq, rxo);
2257
2258done:
2259 adapter->isr_registered = false;
2260}
2261
2262static void be_rx_queues_clear(struct be_adapter *adapter)
2263{
2264 struct be_queue_info *q;
2265 struct be_rx_obj *rxo;
2266 int i;
2267
2268 for_all_rx_queues(adapter, rxo, i) {
2269 q = &rxo->q;
2270 if (q->created) {
2271 be_cmd_rxq_destroy(adapter, q);
2272 /* After the rxq is invalidated, wait for a grace time
2273 * of 1ms for all dma to end and the flush compl to
2274 * arrive
2275 */
2276 mdelay(1);
2277 be_rx_q_clean(adapter, rxo);
2278 }
2279
2280 /* Clear any residual events */
2281 q = &rxo->rx_eq.q;
2282 if (q->created)
2283 be_eq_clean(adapter, &rxo->rx_eq);
2284 }
2285}
2286
2287static int be_close(struct net_device *netdev)
2288{
2289 struct be_adapter *adapter = netdev_priv(netdev);
2290 struct be_rx_obj *rxo;
2291 struct be_tx_obj *txo;
2292 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2293 int vec, i;
2294
2295 be_async_mcc_disable(adapter);
2296
2297 netif_carrier_off(netdev);
2298 adapter->link_up = false;
2299
2300 if (!lancer_chip(adapter))
2301 be_intr_set(adapter, false);
2302
2303 for_all_rx_queues(adapter, rxo, i)
2304 napi_disable(&rxo->rx_eq.napi);
2305
2306 napi_disable(&tx_eq->napi);
2307
2308 if (lancer_chip(adapter)) {
2309 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2310 for_all_rx_queues(adapter, rxo, i)
2311 be_cq_notify(adapter, rxo->cq.id, false, 0);
2312 for_all_tx_queues(adapter, txo, i)
2313 be_cq_notify(adapter, txo->cq.id, false, 0);
2314 }
2315
2316 if (msix_enabled(adapter)) {
2317 vec = be_msix_vec_get(adapter, tx_eq);
2318 synchronize_irq(vec);
2319
2320 for_all_rx_queues(adapter, rxo, i) {
2321 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2322 synchronize_irq(vec);
2323 }
2324 } else {
2325 synchronize_irq(netdev->irq);
2326 }
2327 be_irq_unregister(adapter);
2328
2329 /* Wait for all pending tx completions to arrive so that
2330 * all tx skbs are freed.
2331 */
2332 for_all_tx_queues(adapter, txo, i)
2333 be_tx_compl_clean(adapter, txo);
2334
2335 be_rx_queues_clear(adapter);
2336 return 0;
2337}
2338
2339static int be_rx_queues_setup(struct be_adapter *adapter)
2340{
2341 struct be_rx_obj *rxo;
2342 int rc, i;
2343 u8 rsstable[MAX_RSS_QS];
2344
2345 for_all_rx_queues(adapter, rxo, i) {
2346 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2347 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2348 adapter->if_handle,
2349 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2350 if (rc)
2351 return rc;
2352 }
2353
2354 if (be_multi_rxq(adapter)) {
2355 for_all_rss_queues(adapter, rxo, i)
2356 rsstable[i] = rxo->rss_id;
2357
2358 rc = be_cmd_rss_config(adapter, rsstable,
2359 adapter->num_rx_qs - 1);
2360 if (rc)
2361 return rc;
2362 }
2363
2364 /* First time posting */
2365 for_all_rx_queues(adapter, rxo, i) {
2366 be_post_rx_frags(rxo, GFP_KERNEL);
2367 napi_enable(&rxo->rx_eq.napi);
2368 }
2369 return 0;
2370}
2371
2372static int be_open(struct net_device *netdev)
2373{
2374 struct be_adapter *adapter = netdev_priv(netdev);
2375 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2376 struct be_rx_obj *rxo;
2377 bool link_up;
2378 int status, i;
2379 u8 mac_speed;
2380 u16 link_speed;
2381
2382 status = be_rx_queues_setup(adapter);
2383 if (status)
2384 goto err;
2385
2386 napi_enable(&tx_eq->napi);
2387
2388 be_irq_register(adapter);
2389
2390 if (!lancer_chip(adapter))
2391 be_intr_set(adapter, true);
2392
2393 /* The evt queues are created in unarmed state; arm them */
2394 for_all_rx_queues(adapter, rxo, i) {
2395 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2396 be_cq_notify(adapter, rxo->cq.id, true, 0);
2397 }
2398 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2399
2400 /* Now that interrupts are on we can process async mcc */
2401 be_async_mcc_enable(adapter);
2402
2403 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2404 &link_speed, 0);
2405 if (status)
2406 goto err;
2407 be_link_status_update(adapter, link_up);
2408
2409 if (be_physfn(adapter)) {
2410 status = be_vid_config(adapter, false, 0);
2411 if (status)
2412 goto err;
2413
2414 status = be_cmd_set_flow_control(adapter,
2415 adapter->tx_fc, adapter->rx_fc);
2416 if (status)
2417 goto err;
2418 }
2419
2420 return 0;
2421err:
2422 be_close(adapter->netdev);
2423 return -EIO;
2424}
2425
2426static int be_setup_wol(struct be_adapter *adapter, bool enable)
2427{
2428 struct be_dma_mem cmd;
2429 int status = 0;
2430 u8 mac[ETH_ALEN];
2431
2432 memset(mac, 0, ETH_ALEN);
2433
2434 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2435 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2436 GFP_KERNEL);
2437 if (cmd.va == NULL)
2438 return -1;
2439 memset(cmd.va, 0, cmd.size);
2440
2441 if (enable) {
2442 status = pci_write_config_dword(adapter->pdev,
2443 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2444 if (status) {
2445 dev_err(&adapter->pdev->dev,
2446 "Could not enable Wake-on-lan\n");
2447 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2448 cmd.dma);
2449 return status;
2450 }
2451 status = be_cmd_enable_magic_wol(adapter,
2452 adapter->netdev->dev_addr, &cmd);
2453 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2454 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2455 } else {
2456 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2457 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2458 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2459 }
2460
2461 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2462 return status;
2463}
2464
2465/*
2466 * Generate a seed MAC address from the PF MAC Address using jhash.
2467 * MAC Address for VFs are assigned incrementally starting from the seed.
2468 * These addresses are programmed in the ASIC by the PF and the VF driver
2469 * queries for the MAC address during its probe.
2470 */
2471static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2472{
2473 u32 vf = 0;
2474 int status = 0;
2475 u8 mac[ETH_ALEN];
2476
2477 be_vf_eth_addr_generate(adapter, mac);
2478
2479 for (vf = 0; vf < num_vfs; vf++) {
2480 status = be_cmd_pmac_add(adapter, mac,
2481 adapter->vf_cfg[vf].vf_if_handle,
2482 &adapter->vf_cfg[vf].vf_pmac_id,
2483 vf + 1);
2484 if (status)
2485 dev_err(&adapter->pdev->dev,
2486 "Mac address add failed for VF %d\n", vf);
2487 else
2488 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2489
2490 mac[5] += 1;
2491 }
2492 return status;
2493}
2494
2495static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2496{
2497 u32 vf;
2498
2499 for (vf = 0; vf < num_vfs; vf++) {
2500 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2501 be_cmd_pmac_del(adapter,
2502 adapter->vf_cfg[vf].vf_if_handle,
2503 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2504 }
2505}
2506
2507static int be_setup(struct be_adapter *adapter)
2508{
2509 struct net_device *netdev = adapter->netdev;
2510 u32 cap_flags, en_flags, vf = 0;
2511 int status;
2512 u8 mac[ETH_ALEN];
2513
2514 be_cmd_req_native_mode(adapter);
2515
2516 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2517 BE_IF_FLAGS_BROADCAST |
2518 BE_IF_FLAGS_MULTICAST;
2519
2520 if (be_physfn(adapter)) {
2521 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2522 BE_IF_FLAGS_PROMISCUOUS |
2523 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2524 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2525
2526 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2527 cap_flags |= BE_IF_FLAGS_RSS;
2528 en_flags |= BE_IF_FLAGS_RSS;
2529 }
2530 }
2531
2532 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2533 netdev->dev_addr, false/* pmac_invalid */,
2534 &adapter->if_handle, &adapter->pmac_id, 0);
2535 if (status != 0)
2536 goto do_none;
2537
2538 if (be_physfn(adapter)) {
2539 if (adapter->sriov_enabled) {
2540 while (vf < num_vfs) {
2541 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2542 BE_IF_FLAGS_BROADCAST;
2543 status = be_cmd_if_create(adapter, cap_flags,
2544 en_flags, mac, true,
2545 &adapter->vf_cfg[vf].vf_if_handle,
2546 NULL, vf+1);
2547 if (status) {
2548 dev_err(&adapter->pdev->dev,
2549 "Interface Create failed for VF %d\n",
2550 vf);
2551 goto if_destroy;
2552 }
2553 adapter->vf_cfg[vf].vf_pmac_id =
2554 BE_INVALID_PMAC_ID;
2555 vf++;
2556 }
2557 }
2558 } else {
2559 status = be_cmd_mac_addr_query(adapter, mac,
2560 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2561 if (!status) {
2562 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2563 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2564 }
2565 }
2566
2567 status = be_tx_queues_create(adapter);
2568 if (status != 0)
2569 goto if_destroy;
2570
2571 status = be_rx_queues_create(adapter);
2572 if (status != 0)
2573 goto tx_qs_destroy;
2574
2575 /* Allow all priorities by default. A GRP5 evt may modify this */
2576 adapter->vlan_prio_bmap = 0xff;
2577
2578 status = be_mcc_queues_create(adapter);
2579 if (status != 0)
2580 goto rx_qs_destroy;
2581
2582 adapter->link_speed = -1;
2583
2584 return 0;
2585
2586rx_qs_destroy:
2587 be_rx_queues_destroy(adapter);
2588tx_qs_destroy:
2589 be_tx_queues_destroy(adapter);
2590if_destroy:
2591 if (be_physfn(adapter) && adapter->sriov_enabled)
2592 for (vf = 0; vf < num_vfs; vf++)
2593 if (adapter->vf_cfg[vf].vf_if_handle)
2594 be_cmd_if_destroy(adapter,
2595 adapter->vf_cfg[vf].vf_if_handle,
2596 vf + 1);
2597 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2598do_none:
2599 return status;
2600}
2601
2602static int be_clear(struct be_adapter *adapter)
2603{
2604 int vf;
2605
2606 if (be_physfn(adapter) && adapter->sriov_enabled)
2607 be_vf_eth_addr_rem(adapter);
2608
2609 be_mcc_queues_destroy(adapter);
2610 be_rx_queues_destroy(adapter);
2611 be_tx_queues_destroy(adapter);
2612 adapter->eq_next_idx = 0;
2613
2614 if (be_physfn(adapter) && adapter->sriov_enabled)
2615 for (vf = 0; vf < num_vfs; vf++)
2616 if (adapter->vf_cfg[vf].vf_if_handle)
2617 be_cmd_if_destroy(adapter,
2618 adapter->vf_cfg[vf].vf_if_handle,
2619 vf + 1);
2620
2621 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2622
2623 adapter->be3_native = 0;
2624
2625 /* tell fw we're done with firing cmds */
2626 be_cmd_fw_clean(adapter);
2627 return 0;
2628}
2629
2630
2631#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2632static bool be_flash_redboot(struct be_adapter *adapter,
2633 const u8 *p, u32 img_start, int image_size,
2634 int hdr_size)
2635{
2636 u32 crc_offset;
2637 u8 flashed_crc[4];
2638 int status;
2639
2640 crc_offset = hdr_size + img_start + image_size - 4;
2641
2642 p += crc_offset;
2643
2644 status = be_cmd_get_flash_crc(adapter, flashed_crc,
2645 (image_size - 4));
2646 if (status) {
2647 dev_err(&adapter->pdev->dev,
2648 "could not get crc from flash, not flashing redboot\n");
2649 return false;
2650 }
2651
2652 /*update redboot only if crc does not match*/
2653 if (!memcmp(flashed_crc, p, 4))
2654 return false;
2655 else
2656 return true;
2657}
2658
2659static int be_flash_data(struct be_adapter *adapter,
2660 const struct firmware *fw,
2661 struct be_dma_mem *flash_cmd, int num_of_images)
2662
2663{
2664 int status = 0, i, filehdr_size = 0;
2665 u32 total_bytes = 0, flash_op;
2666 int num_bytes;
2667 const u8 *p = fw->data;
2668 struct be_cmd_write_flashrom *req = flash_cmd->va;
2669 const struct flash_comp *pflashcomp;
2670 int num_comp;
2671
2672 static const struct flash_comp gen3_flash_types[9] = {
2673 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2674 FLASH_IMAGE_MAX_SIZE_g3},
2675 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2676 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2677 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2678 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2679 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2680 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2681 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2682 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2683 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2684 FLASH_IMAGE_MAX_SIZE_g3},
2685 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2686 FLASH_IMAGE_MAX_SIZE_g3},
2687 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2688 FLASH_IMAGE_MAX_SIZE_g3},
2689 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2690 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2691 };
2692 static const struct flash_comp gen2_flash_types[8] = {
2693 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2694 FLASH_IMAGE_MAX_SIZE_g2},
2695 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2696 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2697 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2698 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2699 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2700 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2701 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2702 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2703 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2704 FLASH_IMAGE_MAX_SIZE_g2},
2705 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2706 FLASH_IMAGE_MAX_SIZE_g2},
2707 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2708 FLASH_IMAGE_MAX_SIZE_g2}
2709 };
2710
2711 if (adapter->generation == BE_GEN3) {
2712 pflashcomp = gen3_flash_types;
2713 filehdr_size = sizeof(struct flash_file_hdr_g3);
2714 num_comp = ARRAY_SIZE(gen3_flash_types);
2715 } else {
2716 pflashcomp = gen2_flash_types;
2717 filehdr_size = sizeof(struct flash_file_hdr_g2);
2718 num_comp = ARRAY_SIZE(gen2_flash_types);
2719 }
2720 for (i = 0; i < num_comp; i++) {
2721 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2722 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2723 continue;
2724 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2725 (!be_flash_redboot(adapter, fw->data,
2726 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2727 (num_of_images * sizeof(struct image_hdr)))))
2728 continue;
2729 p = fw->data;
2730 p += filehdr_size + pflashcomp[i].offset
2731 + (num_of_images * sizeof(struct image_hdr));
2732 if (p + pflashcomp[i].size > fw->data + fw->size)
2733 return -1;
2734 total_bytes = pflashcomp[i].size;
2735 while (total_bytes) {
2736 if (total_bytes > 32*1024)
2737 num_bytes = 32*1024;
2738 else
2739 num_bytes = total_bytes;
2740 total_bytes -= num_bytes;
2741
2742 if (!total_bytes)
2743 flash_op = FLASHROM_OPER_FLASH;
2744 else
2745 flash_op = FLASHROM_OPER_SAVE;
2746 memcpy(req->params.data_buf, p, num_bytes);
2747 p += num_bytes;
2748 status = be_cmd_write_flashrom(adapter, flash_cmd,
2749 pflashcomp[i].optype, flash_op, num_bytes);
2750 if (status) {
2751 dev_err(&adapter->pdev->dev,
2752 "cmd to write to flash rom failed.\n");
2753 return -1;
2754 }
2755 }
2756 }
2757 return 0;
2758}
2759
2760static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2761{
2762 if (fhdr == NULL)
2763 return 0;
2764 if (fhdr->build[0] == '3')
2765 return BE_GEN3;
2766 else if (fhdr->build[0] == '2')
2767 return BE_GEN2;
2768 else
2769 return 0;
2770}
2771
2772static int lancer_fw_download(struct be_adapter *adapter,
2773 const struct firmware *fw)
2774{
2775#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2776#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2777 struct be_dma_mem flash_cmd;
2778 const u8 *data_ptr = NULL;
2779 u8 *dest_image_ptr = NULL;
2780 size_t image_size = 0;
2781 u32 chunk_size = 0;
2782 u32 data_written = 0;
2783 u32 offset = 0;
2784 int status = 0;
2785 u8 add_status = 0;
2786
2787 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2788 dev_err(&adapter->pdev->dev,
2789 "FW Image not properly aligned. "
2790 "Length must be 4 byte aligned.\n");
2791 status = -EINVAL;
2792 goto lancer_fw_exit;
2793 }
2794
2795 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2796 + LANCER_FW_DOWNLOAD_CHUNK;
2797 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2798 &flash_cmd.dma, GFP_KERNEL);
2799 if (!flash_cmd.va) {
2800 status = -ENOMEM;
2801 dev_err(&adapter->pdev->dev,
2802 "Memory allocation failure while flashing\n");
2803 goto lancer_fw_exit;
2804 }
2805
2806 dest_image_ptr = flash_cmd.va +
2807 sizeof(struct lancer_cmd_req_write_object);
2808 image_size = fw->size;
2809 data_ptr = fw->data;
2810
2811 while (image_size) {
2812 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2813
2814 /* Copy the image chunk content. */
2815 memcpy(dest_image_ptr, data_ptr, chunk_size);
2816
2817 status = lancer_cmd_write_object(adapter, &flash_cmd,
2818 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2819 &data_written, &add_status);
2820
2821 if (status)
2822 break;
2823
2824 offset += data_written;
2825 data_ptr += data_written;
2826 image_size -= data_written;
2827 }
2828
2829 if (!status) {
2830 /* Commit the FW written */
2831 status = lancer_cmd_write_object(adapter, &flash_cmd,
2832 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2833 &data_written, &add_status);
2834 }
2835
2836 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2837 flash_cmd.dma);
2838 if (status) {
2839 dev_err(&adapter->pdev->dev,
2840 "Firmware load error. "
2841 "Status code: 0x%x Additional Status: 0x%x\n",
2842 status, add_status);
2843 goto lancer_fw_exit;
2844 }
2845
2846 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2847lancer_fw_exit:
2848 return status;
2849}
2850
2851static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2852{
2853 struct flash_file_hdr_g2 *fhdr;
2854 struct flash_file_hdr_g3 *fhdr3;
2855 struct image_hdr *img_hdr_ptr = NULL;
2856 struct be_dma_mem flash_cmd;
2857 const u8 *p;
2858 int status = 0, i = 0, num_imgs = 0;
2859
2860 p = fw->data;
2861 fhdr = (struct flash_file_hdr_g2 *) p;
2862
2863 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2864 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2865 &flash_cmd.dma, GFP_KERNEL);
2866 if (!flash_cmd.va) {
2867 status = -ENOMEM;
2868 dev_err(&adapter->pdev->dev,
2869 "Memory allocation failure while flashing\n");
2870 goto be_fw_exit;
2871 }
2872
2873 if ((adapter->generation == BE_GEN3) &&
2874 (get_ufigen_type(fhdr) == BE_GEN3)) {
2875 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2876 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2877 for (i = 0; i < num_imgs; i++) {
2878 img_hdr_ptr = (struct image_hdr *) (fw->data +
2879 (sizeof(struct flash_file_hdr_g3) +
2880 i * sizeof(struct image_hdr)));
2881 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2882 status = be_flash_data(adapter, fw, &flash_cmd,
2883 num_imgs);
2884 }
2885 } else if ((adapter->generation == BE_GEN2) &&
2886 (get_ufigen_type(fhdr) == BE_GEN2)) {
2887 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2888 } else {
2889 dev_err(&adapter->pdev->dev,
2890 "UFI and Interface are not compatible for flashing\n");
2891 status = -1;
2892 }
2893
2894 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2895 flash_cmd.dma);
2896 if (status) {
2897 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2898 goto be_fw_exit;
2899 }
2900
2901 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2902
2903be_fw_exit:
2904 return status;
2905}
2906
2907int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2908{
2909 const struct firmware *fw;
2910 int status;
2911
2912 if (!netif_running(adapter->netdev)) {
2913 dev_err(&adapter->pdev->dev,
2914 "Firmware load not allowed (interface is down)\n");
2915 return -1;
2916 }
2917
2918 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2919 if (status)
2920 goto fw_exit;
2921
2922 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2923
2924 if (lancer_chip(adapter))
2925 status = lancer_fw_download(adapter, fw);
2926 else
2927 status = be_fw_download(adapter, fw);
2928
2929fw_exit:
2930 release_firmware(fw);
2931 return status;
2932}
2933
2934static struct net_device_ops be_netdev_ops = {
2935 .ndo_open = be_open,
2936 .ndo_stop = be_close,
2937 .ndo_start_xmit = be_xmit,
2938 .ndo_set_rx_mode = be_set_multicast_list,
2939 .ndo_set_mac_address = be_mac_addr_set,
2940 .ndo_change_mtu = be_change_mtu,
2941 .ndo_validate_addr = eth_validate_addr,
2942 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2943 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
2944 .ndo_set_vf_mac = be_set_vf_mac,
2945 .ndo_set_vf_vlan = be_set_vf_vlan,
2946 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
2947 .ndo_get_vf_config = be_get_vf_config
2948};
2949
2950static void be_netdev_init(struct net_device *netdev)
2951{
2952 struct be_adapter *adapter = netdev_priv(netdev);
2953 struct be_rx_obj *rxo;
2954 int i;
2955
2956 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2957 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2958 NETIF_F_HW_VLAN_TX;
2959 if (be_multi_rxq(adapter))
2960 netdev->hw_features |= NETIF_F_RXHASH;
2961
2962 netdev->features |= netdev->hw_features |
2963 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
2964
2965 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2966 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2967
2968 netdev->flags |= IFF_MULTICAST;
2969
2970 /* Default settings for Rx and Tx flow control */
2971 adapter->rx_fc = true;
2972 adapter->tx_fc = true;
2973
2974 netif_set_gso_max_size(netdev, 65535);
2975
2976 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2977
2978 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2979
2980 for_all_rx_queues(adapter, rxo, i)
2981 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2982 BE_NAPI_WEIGHT);
2983
2984 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2985 BE_NAPI_WEIGHT);
2986}
2987
2988static void be_unmap_pci_bars(struct be_adapter *adapter)
2989{
2990 if (adapter->csr)
2991 iounmap(adapter->csr);
2992 if (adapter->db)
2993 iounmap(adapter->db);
2994 if (adapter->pcicfg && be_physfn(adapter))
2995 iounmap(adapter->pcicfg);
2996}
2997
2998static int be_map_pci_bars(struct be_adapter *adapter)
2999{
3000 u8 __iomem *addr;
3001 int pcicfg_reg, db_reg;
3002
3003 if (lancer_chip(adapter)) {
3004 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3005 pci_resource_len(adapter->pdev, 0));
3006 if (addr == NULL)
3007 return -ENOMEM;
3008 adapter->db = addr;
3009 return 0;
3010 }
3011
3012 if (be_physfn(adapter)) {
3013 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3014 pci_resource_len(adapter->pdev, 2));
3015 if (addr == NULL)
3016 return -ENOMEM;
3017 adapter->csr = addr;
3018 }
3019
3020 if (adapter->generation == BE_GEN2) {
3021 pcicfg_reg = 1;
3022 db_reg = 4;
3023 } else {
3024 pcicfg_reg = 0;
3025 if (be_physfn(adapter))
3026 db_reg = 4;
3027 else
3028 db_reg = 0;
3029 }
3030 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3031 pci_resource_len(adapter->pdev, db_reg));
3032 if (addr == NULL)
3033 goto pci_map_err;
3034 adapter->db = addr;
3035
3036 if (be_physfn(adapter)) {
3037 addr = ioremap_nocache(
3038 pci_resource_start(adapter->pdev, pcicfg_reg),
3039 pci_resource_len(adapter->pdev, pcicfg_reg));
3040 if (addr == NULL)
3041 goto pci_map_err;
3042 adapter->pcicfg = addr;
3043 } else
3044 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
3045
3046 return 0;
3047pci_map_err:
3048 be_unmap_pci_bars(adapter);
3049 return -ENOMEM;
3050}
3051
3052
3053static void be_ctrl_cleanup(struct be_adapter *adapter)
3054{
3055 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3056
3057 be_unmap_pci_bars(adapter);
3058
3059 if (mem->va)
3060 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3061 mem->dma);
3062
3063 mem = &adapter->mc_cmd_mem;
3064 if (mem->va)
3065 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3066 mem->dma);
3067}
3068
3069static int be_ctrl_init(struct be_adapter *adapter)
3070{
3071 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3072 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3073 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
3074 int status;
3075
3076 status = be_map_pci_bars(adapter);
3077 if (status)
3078 goto done;
3079
3080 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3081 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3082 mbox_mem_alloc->size,
3083 &mbox_mem_alloc->dma,
3084 GFP_KERNEL);
3085 if (!mbox_mem_alloc->va) {
3086 status = -ENOMEM;
3087 goto unmap_pci_bars;
3088 }
3089
3090 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3091 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3092 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3093 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3094
3095 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
3096 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
3097 mc_cmd_mem->size, &mc_cmd_mem->dma,
3098 GFP_KERNEL);
3099 if (mc_cmd_mem->va == NULL) {
3100 status = -ENOMEM;
3101 goto free_mbox;
3102 }
3103 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
3104
3105 mutex_init(&adapter->mbox_lock);
3106 spin_lock_init(&adapter->mcc_lock);
3107 spin_lock_init(&adapter->mcc_cq_lock);
3108
3109 init_completion(&adapter->flash_compl);
3110 pci_save_state(adapter->pdev);
3111 return 0;
3112
3113free_mbox:
3114 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3115 mbox_mem_alloc->va, mbox_mem_alloc->dma);
3116
3117unmap_pci_bars:
3118 be_unmap_pci_bars(adapter);
3119
3120done:
3121 return status;
3122}
3123
3124static void be_stats_cleanup(struct be_adapter *adapter)
3125{
3126 struct be_dma_mem *cmd = &adapter->stats_cmd;
3127
3128 if (cmd->va)
3129 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3130 cmd->va, cmd->dma);
3131}
3132
3133static int be_stats_init(struct be_adapter *adapter)
3134{
3135 struct be_dma_mem *cmd = &adapter->stats_cmd;
3136
3137 if (adapter->generation == BE_GEN2) {
3138 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3139 } else {
3140 if (lancer_chip(adapter))
3141 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3142 else
3143 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3144 }
3145 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3146 GFP_KERNEL);
3147 if (cmd->va == NULL)
3148 return -1;
3149 memset(cmd->va, 0, cmd->size);
3150 return 0;
3151}
3152
3153static void __devexit be_remove(struct pci_dev *pdev)
3154{
3155 struct be_adapter *adapter = pci_get_drvdata(pdev);
3156
3157 if (!adapter)
3158 return;
3159
3160 cancel_delayed_work_sync(&adapter->work);
3161
3162 unregister_netdev(adapter->netdev);
3163
3164 be_clear(adapter);
3165
3166 be_stats_cleanup(adapter);
3167
3168 be_ctrl_cleanup(adapter);
3169
3170 kfree(adapter->vf_cfg);
3171 be_sriov_disable(adapter);
3172
3173 be_msix_disable(adapter);
3174
3175 pci_set_drvdata(pdev, NULL);
3176 pci_release_regions(pdev);
3177 pci_disable_device(pdev);
3178
3179 free_netdev(adapter->netdev);
3180}
3181
3182static int be_get_config(struct be_adapter *adapter)
3183{
3184 int status;
3185 u8 mac[ETH_ALEN];
3186
3187 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
3188 if (status)
3189 return status;
3190
3191 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3192 &adapter->function_mode, &adapter->function_caps);
3193 if (status)
3194 return status;
3195
3196 memset(mac, 0, ETH_ALEN);
3197
3198 /* A default permanent address is given to each VF for Lancer*/
3199 if (be_physfn(adapter) || lancer_chip(adapter)) {
3200 status = be_cmd_mac_addr_query(adapter, mac,
3201 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
3202
3203 if (status)
3204 return status;
3205
3206 if (!is_valid_ether_addr(mac))
3207 return -EADDRNOTAVAIL;
3208
3209 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3210 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3211 }
3212
3213 if (adapter->function_mode & 0x400)
3214 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3215 else
3216 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3217
3218 status = be_cmd_get_cntl_attributes(adapter);
3219 if (status)
3220 return status;
3221
3222 if ((num_vfs && adapter->sriov_enabled) ||
3223 (adapter->function_mode & 0x400) ||
3224 lancer_chip(adapter) || !be_physfn(adapter)) {
3225 adapter->num_tx_qs = 1;
3226 netif_set_real_num_tx_queues(adapter->netdev,
3227 adapter->num_tx_qs);
3228 } else {
3229 adapter->num_tx_qs = MAX_TX_QS;
3230 }
3231
3232 return 0;
3233}
3234
3235static int be_dev_family_check(struct be_adapter *adapter)
3236{
3237 struct pci_dev *pdev = adapter->pdev;
3238 u32 sli_intf = 0, if_type;
3239
3240 switch (pdev->device) {
3241 case BE_DEVICE_ID1:
3242 case OC_DEVICE_ID1:
3243 adapter->generation = BE_GEN2;
3244 break;
3245 case BE_DEVICE_ID2:
3246 case OC_DEVICE_ID2:
3247 adapter->generation = BE_GEN3;
3248 break;
3249 case OC_DEVICE_ID3:
3250 case OC_DEVICE_ID4:
3251 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3252 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3253 SLI_INTF_IF_TYPE_SHIFT;
3254
3255 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3256 if_type != 0x02) {
3257 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3258 return -EINVAL;
3259 }
3260 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3261 SLI_INTF_FAMILY_SHIFT);
3262 adapter->generation = BE_GEN3;
3263 break;
3264 default:
3265 adapter->generation = 0;
3266 }
3267 return 0;
3268}
3269
3270static int lancer_wait_ready(struct be_adapter *adapter)
3271{
3272#define SLIPORT_READY_TIMEOUT 500
3273 u32 sliport_status;
3274 int status = 0, i;
3275
3276 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3277 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3278 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3279 break;
3280
3281 msleep(20);
3282 }
3283
3284 if (i == SLIPORT_READY_TIMEOUT)
3285 status = -1;
3286
3287 return status;
3288}
3289
3290static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3291{
3292 int status;
3293 u32 sliport_status, err, reset_needed;
3294 status = lancer_wait_ready(adapter);
3295 if (!status) {
3296 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3297 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3298 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3299 if (err && reset_needed) {
3300 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3301 adapter->db + SLIPORT_CONTROL_OFFSET);
3302
3303 /* check adapter has corrected the error */
3304 status = lancer_wait_ready(adapter);
3305 sliport_status = ioread32(adapter->db +
3306 SLIPORT_STATUS_OFFSET);
3307 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3308 SLIPORT_STATUS_RN_MASK);
3309 if (status || sliport_status)
3310 status = -1;
3311 } else if (err || reset_needed) {
3312 status = -1;
3313 }
3314 }
3315 return status;
3316}
3317
3318static int __devinit be_probe(struct pci_dev *pdev,
3319 const struct pci_device_id *pdev_id)
3320{
3321 int status = 0;
3322 struct be_adapter *adapter;
3323 struct net_device *netdev;
3324
3325 status = pci_enable_device(pdev);
3326 if (status)
3327 goto do_none;
3328
3329 status = pci_request_regions(pdev, DRV_NAME);
3330 if (status)
3331 goto disable_dev;
3332 pci_set_master(pdev);
3333
3334 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
3335 if (netdev == NULL) {
3336 status = -ENOMEM;
3337 goto rel_reg;
3338 }
3339 adapter = netdev_priv(netdev);
3340 adapter->pdev = pdev;
3341 pci_set_drvdata(pdev, adapter);
3342
3343 status = be_dev_family_check(adapter);
3344 if (status)
3345 goto free_netdev;
3346
3347 adapter->netdev = netdev;
3348 SET_NETDEV_DEV(netdev, &pdev->dev);
3349
3350 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3351 if (!status) {
3352 netdev->features |= NETIF_F_HIGHDMA;
3353 } else {
3354 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3355 if (status) {
3356 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3357 goto free_netdev;
3358 }
3359 }
3360
3361 be_sriov_enable(adapter);
3362 if (adapter->sriov_enabled) {
3363 adapter->vf_cfg = kcalloc(num_vfs,
3364 sizeof(struct be_vf_cfg), GFP_KERNEL);
3365
3366 if (!adapter->vf_cfg)
3367 goto free_netdev;
3368 }
3369
3370 status = be_ctrl_init(adapter);
3371 if (status)
3372 goto free_vf_cfg;
3373
3374 if (lancer_chip(adapter)) {
3375 status = lancer_test_and_set_rdy_state(adapter);
3376 if (status) {
3377 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3378 goto ctrl_clean;
3379 }
3380 }
3381
3382 /* sync up with fw's ready state */
3383 if (be_physfn(adapter)) {
3384 status = be_cmd_POST(adapter);
3385 if (status)
3386 goto ctrl_clean;
3387 }
3388
3389 /* tell fw we're ready to fire cmds */
3390 status = be_cmd_fw_init(adapter);
3391 if (status)
3392 goto ctrl_clean;
3393
3394 status = be_cmd_reset_function(adapter);
3395 if (status)
3396 goto ctrl_clean;
3397
3398 status = be_stats_init(adapter);
3399 if (status)
3400 goto ctrl_clean;
3401
3402 status = be_get_config(adapter);
3403 if (status)
3404 goto stats_clean;
3405
3406 /* The INTR bit may be set in the card when probed by a kdump kernel
3407 * after a crash.
3408 */
3409 if (!lancer_chip(adapter))
3410 be_intr_set(adapter, false);
3411
3412 be_msix_enable(adapter);
3413
3414 INIT_DELAYED_WORK(&adapter->work, be_worker);
3415
3416 status = be_setup(adapter);
3417 if (status)
3418 goto msix_disable;
3419
3420 be_netdev_init(netdev);
3421 status = register_netdev(netdev);
3422 if (status != 0)
3423 goto unsetup;
3424 netif_carrier_off(netdev);
3425
3426 if (be_physfn(adapter) && adapter->sriov_enabled) {
3427 u8 mac_speed;
3428 bool link_up;
3429 u16 vf, lnk_speed;
3430
3431 if (!lancer_chip(adapter)) {
3432 status = be_vf_eth_addr_config(adapter);
3433 if (status)
3434 goto unreg_netdev;
3435 }
3436
3437 for (vf = 0; vf < num_vfs; vf++) {
3438 status = be_cmd_link_status_query(adapter, &link_up,
3439 &mac_speed, &lnk_speed, vf + 1);
3440 if (!status)
3441 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3442 else
3443 goto unreg_netdev;
3444 }
3445 }
3446
3447 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3448
3449 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3450 return 0;
3451
3452unreg_netdev:
3453 unregister_netdev(netdev);
3454unsetup:
3455 be_clear(adapter);
3456msix_disable:
3457 be_msix_disable(adapter);
3458stats_clean:
3459 be_stats_cleanup(adapter);
3460ctrl_clean:
3461 be_ctrl_cleanup(adapter);
3462free_vf_cfg:
3463 kfree(adapter->vf_cfg);
3464free_netdev:
3465 be_sriov_disable(adapter);
3466 free_netdev(netdev);
3467 pci_set_drvdata(pdev, NULL);
3468rel_reg:
3469 pci_release_regions(pdev);
3470disable_dev:
3471 pci_disable_device(pdev);
3472do_none:
3473 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3474 return status;
3475}
3476
3477static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3478{
3479 struct be_adapter *adapter = pci_get_drvdata(pdev);
3480 struct net_device *netdev = adapter->netdev;
3481
3482 cancel_delayed_work_sync(&adapter->work);
3483 if (adapter->wol)
3484 be_setup_wol(adapter, true);
3485
3486 netif_device_detach(netdev);
3487 if (netif_running(netdev)) {
3488 rtnl_lock();
3489 be_close(netdev);
3490 rtnl_unlock();
3491 }
3492 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3493 be_clear(adapter);
3494
3495 be_msix_disable(adapter);
3496 pci_save_state(pdev);
3497 pci_disable_device(pdev);
3498 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3499 return 0;
3500}
3501
3502static int be_resume(struct pci_dev *pdev)
3503{
3504 int status = 0;
3505 struct be_adapter *adapter = pci_get_drvdata(pdev);
3506 struct net_device *netdev = adapter->netdev;
3507
3508 netif_device_detach(netdev);
3509
3510 status = pci_enable_device(pdev);
3511 if (status)
3512 return status;
3513
3514 pci_set_power_state(pdev, 0);
3515 pci_restore_state(pdev);
3516
3517 be_msix_enable(adapter);
3518 /* tell fw we're ready to fire cmds */
3519 status = be_cmd_fw_init(adapter);
3520 if (status)
3521 return status;
3522
3523 be_setup(adapter);
3524 if (netif_running(netdev)) {
3525 rtnl_lock();
3526 be_open(netdev);
3527 rtnl_unlock();
3528 }
3529 netif_device_attach(netdev);
3530
3531 if (adapter->wol)
3532 be_setup_wol(adapter, false);
3533
3534 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3535 return 0;
3536}
3537
3538/*
3539 * An FLR will stop BE from DMAing any data.
3540 */
3541static void be_shutdown(struct pci_dev *pdev)
3542{
3543 struct be_adapter *adapter = pci_get_drvdata(pdev);
3544
3545 if (!adapter)
3546 return;
3547
3548 cancel_delayed_work_sync(&adapter->work);
3549
3550 netif_device_detach(adapter->netdev);
3551
3552 if (adapter->wol)
3553 be_setup_wol(adapter, true);
3554
3555 be_cmd_reset_function(adapter);
3556
3557 pci_disable_device(pdev);
3558}
3559
3560static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3561 pci_channel_state_t state)
3562{
3563 struct be_adapter *adapter = pci_get_drvdata(pdev);
3564 struct net_device *netdev = adapter->netdev;
3565
3566 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3567
3568 adapter->eeh_err = true;
3569
3570 netif_device_detach(netdev);
3571
3572 if (netif_running(netdev)) {
3573 rtnl_lock();
3574 be_close(netdev);
3575 rtnl_unlock();
3576 }
3577 be_clear(adapter);
3578
3579 if (state == pci_channel_io_perm_failure)
3580 return PCI_ERS_RESULT_DISCONNECT;
3581
3582 pci_disable_device(pdev);
3583
3584 return PCI_ERS_RESULT_NEED_RESET;
3585}
3586
3587static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3588{
3589 struct be_adapter *adapter = pci_get_drvdata(pdev);
3590 int status;
3591
3592 dev_info(&adapter->pdev->dev, "EEH reset\n");
3593 adapter->eeh_err = false;
3594
3595 status = pci_enable_device(pdev);
3596 if (status)
3597 return PCI_ERS_RESULT_DISCONNECT;
3598
3599 pci_set_master(pdev);
3600 pci_set_power_state(pdev, 0);
3601 pci_restore_state(pdev);
3602
3603 /* Check if card is ok and fw is ready */
3604 status = be_cmd_POST(adapter);
3605 if (status)
3606 return PCI_ERS_RESULT_DISCONNECT;
3607
3608 return PCI_ERS_RESULT_RECOVERED;
3609}
3610
3611static void be_eeh_resume(struct pci_dev *pdev)
3612{
3613 int status = 0;
3614 struct be_adapter *adapter = pci_get_drvdata(pdev);
3615 struct net_device *netdev = adapter->netdev;
3616
3617 dev_info(&adapter->pdev->dev, "EEH resume\n");
3618
3619 pci_save_state(pdev);
3620
3621 /* tell fw we're ready to fire cmds */
3622 status = be_cmd_fw_init(adapter);
3623 if (status)
3624 goto err;
3625
3626 status = be_setup(adapter);
3627 if (status)
3628 goto err;
3629
3630 if (netif_running(netdev)) {
3631 status = be_open(netdev);
3632 if (status)
3633 goto err;
3634 }
3635 netif_device_attach(netdev);
3636 return;
3637err:
3638 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3639}
3640
3641static struct pci_error_handlers be_eeh_handlers = {
3642 .error_detected = be_eeh_err_detected,
3643 .slot_reset = be_eeh_reset,
3644 .resume = be_eeh_resume,
3645};
3646
3647static struct pci_driver be_driver = {
3648 .name = DRV_NAME,
3649 .id_table = be_dev_ids,
3650 .probe = be_probe,
3651 .remove = be_remove,
3652 .suspend = be_suspend,
3653 .resume = be_resume,
3654 .shutdown = be_shutdown,
3655 .err_handler = &be_eeh_handlers
3656};
3657
3658static int __init be_init_module(void)
3659{
3660 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3661 rx_frag_size != 2048) {
3662 printk(KERN_WARNING DRV_NAME
3663 " : Module param rx_frag_size must be 2048/4096/8192."
3664 " Using 2048\n");
3665 rx_frag_size = 2048;
3666 }
3667
3668 return pci_register_driver(&be_driver);
3669}
3670module_init(be_init_module);
3671
3672static void __exit be_exit_module(void)
3673{
3674 pci_unregister_driver(&be_driver);
3675}
3676module_exit(be_exit_module);