aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorJeff Kirsher <jeffrey.t.kirsher@intel.com>2011-05-13 03:37:27 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2011-08-11 05:33:55 -0400
commit9aebddd11be42366f89b0296590770c02797aa98 (patch)
treebca16cd2c06a513096e930ab6337bd6f9d9ac674 /drivers/net/ethernet
parent874aeea5d01cac55c160a4e503e3ddb4db030de7 (diff)
be2net: Move the Emulex driver
Moves the Emulex driver into drivers/net/ethernet/emulex/ and make the necessary Kconfig and Makefile changes. CC: Sathya Perla <sathya.perla@emulex.com> CC: Subbu Seetharaman <subbu.seetharaman@emulex.com> CC: Ajit Khaparde <ajit.khaparde@emulex.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/emulex/Kconfig22
-rw-r--r--drivers/net/ethernet/emulex/Makefile5
-rw-r--r--drivers/net/ethernet/emulex/benet/Kconfig6
-rw-r--r--drivers/net/ethernet/emulex/benet/Makefile7
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h529
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c2367
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h1502
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c714
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h510
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c3633
12 files changed, 9297 insertions, 0 deletions
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 0bc6635b071b..9c003f363a9d 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -16,6 +16,7 @@ source "drivers/net/ethernet/8390/Kconfig"
16source "drivers/net/ethernet/amd/Kconfig" 16source "drivers/net/ethernet/amd/Kconfig"
17source "drivers/net/ethernet/broadcom/Kconfig" 17source "drivers/net/ethernet/broadcom/Kconfig"
18source "drivers/net/ethernet/chelsio/Kconfig" 18source "drivers/net/ethernet/chelsio/Kconfig"
19source "drivers/net/ethernet/emulex/Kconfig"
19source "drivers/net/ethernet/intel/Kconfig" 20source "drivers/net/ethernet/intel/Kconfig"
20source "drivers/net/ethernet/i825xx/Kconfig" 21source "drivers/net/ethernet/i825xx/Kconfig"
21source "drivers/net/ethernet/qlogic/Kconfig" 22source "drivers/net/ethernet/qlogic/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 50faab53b95b..2ac05bacab8f 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_NET_VENDOR_8390) += 8390/
7obj-$(CONFIG_NET_VENDOR_AMD) += amd/ 7obj-$(CONFIG_NET_VENDOR_AMD) += amd/
8obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/ 8obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/
9obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/ 9obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
10obj-$(CONFIG_NET_VENDOR_EMULEX) += emulex/
10obj-$(CONFIG_NET_VENDOR_INTEL) += intel/ 11obj-$(CONFIG_NET_VENDOR_INTEL) += intel/
11obj-$(CONFIG_NET_VENDOR_I825XX) += i825xx/ 12obj-$(CONFIG_NET_VENDOR_I825XX) += i825xx/
12obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/ 13obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/
diff --git a/drivers/net/ethernet/emulex/Kconfig b/drivers/net/ethernet/emulex/Kconfig
new file mode 100644
index 000000000000..018ac94fb824
--- /dev/null
+++ b/drivers/net/ethernet/emulex/Kconfig
@@ -0,0 +1,22 @@
1#
2# Emulex driver configuration
3#
4
5config NET_VENDOR_EMULEX
6 bool "Emulex devices"
7 depends on PCI && INET
8 ---help---
9 If you have a network (Ethernet) card belonging to this class, say Y
10 and read the Ethernet-HOWTO, available from
11 <http://www.tldp.org/docs.html#howto>.
12
13 Note that the answer to this question doesn't directly affect the
14 kernel: saying N will just cause the configurator to skip all
15 the questions about Emulex cards. If you say Y, you will be asked for
16 your specific card in the following questions.
17
18if NET_VENDOR_EMULEX
19
20source "drivers/net/ethernet/emulex/benet/Kconfig"
21
22endif # NET_VENDOR_EMULEX
diff --git a/drivers/net/ethernet/emulex/Makefile b/drivers/net/ethernet/emulex/Makefile
new file mode 100644
index 000000000000..ea8ec574d45a
--- /dev/null
+++ b/drivers/net/ethernet/emulex/Makefile
@@ -0,0 +1,5 @@
1#
2# Makefile for the Emulex device drivers.
3#
4
5obj-$(CONFIG_BE2NET) += benet/
diff --git a/drivers/net/ethernet/emulex/benet/Kconfig b/drivers/net/ethernet/emulex/benet/Kconfig
new file mode 100644
index 000000000000..804db04a2bd0
--- /dev/null
+++ b/drivers/net/ethernet/emulex/benet/Kconfig
@@ -0,0 +1,6 @@
1config BE2NET
2 tristate "ServerEngines' 10Gbps NIC - BladeEngine"
3 depends on PCI && INET
4 ---help---
5 This driver implements the NIC functionality for ServerEngines'
6 10Gbps network adapter - BladeEngine.
diff --git a/drivers/net/ethernet/emulex/benet/Makefile b/drivers/net/ethernet/emulex/benet/Makefile
new file mode 100644
index 000000000000..a60cd8051135
--- /dev/null
+++ b/drivers/net/ethernet/emulex/benet/Makefile
@@ -0,0 +1,7 @@
1#
2# Makefile to build the network driver for ServerEngine's BladeEngine.
3#
4
5obj-$(CONFIG_BE2NET) += be2net.o
6
7be2net-y := be_main.o be_cmds.o be_ethtool.o
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
new file mode 100644
index 000000000000..12b5b5168dca
--- /dev/null
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -0,0 +1,529 @@
1/*
2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@emulex.com
12 *
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
16 */
17
18#ifndef BE_H
19#define BE_H
20
21#include <linux/pci.h>
22#include <linux/etherdevice.h>
23#include <linux/delay.h>
24#include <net/tcp.h>
25#include <net/ip.h>
26#include <net/ipv6.h>
27#include <linux/if_vlan.h>
28#include <linux/workqueue.h>
29#include <linux/interrupt.h>
30#include <linux/firmware.h>
31#include <linux/slab.h>
32#include <linux/u64_stats_sync.h>
33
34#include "be_hw.h"
35
36#define DRV_VER "4.0.100u"
37#define DRV_NAME "be2net"
38#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
39#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
40#define OC_NAME "Emulex OneConnect 10Gbps NIC"
41#define OC_NAME_BE OC_NAME "(be3)"
42#define OC_NAME_LANCER OC_NAME "(Lancer)"
43#define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver"
44
45#define BE_VENDOR_ID 0x19a2
46#define EMULEX_VENDOR_ID 0x10df
47#define BE_DEVICE_ID1 0x211
48#define BE_DEVICE_ID2 0x221
49#define OC_DEVICE_ID1 0x700 /* Device Id for BE2 cards */
50#define OC_DEVICE_ID2 0x710 /* Device Id for BE3 cards */
51#define OC_DEVICE_ID3 0xe220 /* Device id for Lancer cards */
52#define OC_DEVICE_ID4 0xe228 /* Device id for VF in Lancer */
53
54static inline char *nic_name(struct pci_dev *pdev)
55{
56 switch (pdev->device) {
57 case OC_DEVICE_ID1:
58 return OC_NAME;
59 case OC_DEVICE_ID2:
60 return OC_NAME_BE;
61 case OC_DEVICE_ID3:
62 case OC_DEVICE_ID4:
63 return OC_NAME_LANCER;
64 case BE_DEVICE_ID2:
65 return BE3_NAME;
66 default:
67 return BE_NAME;
68 }
69}
70
71/* Number of bytes of an RX frame that are copied to skb->data */
72#define BE_HDR_LEN ((u16) 64)
73#define BE_MAX_JUMBO_FRAME_SIZE 9018
74#define BE_MIN_MTU 256
75
76#define BE_NUM_VLANS_SUPPORTED 64
77#define BE_MAX_EQD 96
78#define BE_MAX_TX_FRAG_COUNT 30
79
80#define EVNT_Q_LEN 1024
81#define TX_Q_LEN 2048
82#define TX_CQ_LEN 1024
83#define RX_Q_LEN 1024 /* Does not support any other value */
84#define RX_CQ_LEN 1024
85#define MCC_Q_LEN 128 /* total size not to exceed 8 pages */
86#define MCC_CQ_LEN 256
87
88#define MAX_RSS_QS 4 /* BE limit is 4 queues/port */
89#define MAX_RX_QS (MAX_RSS_QS + 1) /* RSS qs + 1 def Rx */
90#define MAX_TX_QS 8
91#define BE_MAX_MSIX_VECTORS (MAX_RX_QS + 1)/* RX + TX */
92#define BE_NAPI_WEIGHT 64
93#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
94#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST)
95
96#define FW_VER_LEN 32
97
98struct be_dma_mem {
99 void *va;
100 dma_addr_t dma;
101 u32 size;
102};
103
104struct be_queue_info {
105 struct be_dma_mem dma_mem;
106 u16 len;
107 u16 entry_size; /* Size of an element in the queue */
108 u16 id;
109 u16 tail, head;
110 bool created;
111 atomic_t used; /* Number of valid elements in the queue */
112};
113
114static inline u32 MODULO(u16 val, u16 limit)
115{
116 BUG_ON(limit & (limit - 1));
117 return val & (limit - 1);
118}
119
120static inline void index_adv(u16 *index, u16 val, u16 limit)
121{
122 *index = MODULO((*index + val), limit);
123}
124
125static inline void index_inc(u16 *index, u16 limit)
126{
127 *index = MODULO((*index + 1), limit);
128}
129
130static inline void *queue_head_node(struct be_queue_info *q)
131{
132 return q->dma_mem.va + q->head * q->entry_size;
133}
134
135static inline void *queue_tail_node(struct be_queue_info *q)
136{
137 return q->dma_mem.va + q->tail * q->entry_size;
138}
139
140static inline void queue_head_inc(struct be_queue_info *q)
141{
142 index_inc(&q->head, q->len);
143}
144
145static inline void queue_tail_inc(struct be_queue_info *q)
146{
147 index_inc(&q->tail, q->len);
148}
149
150struct be_eq_obj {
151 struct be_queue_info q;
152 char desc[32];
153
154 /* Adaptive interrupt coalescing (AIC) info */
155 bool enable_aic;
156 u16 min_eqd; /* in usecs */
157 u16 max_eqd; /* in usecs */
158 u16 cur_eqd; /* in usecs */
159 u8 eq_idx;
160
161 struct napi_struct napi;
162};
163
164struct be_mcc_obj {
165 struct be_queue_info q;
166 struct be_queue_info cq;
167 bool rearm_cq;
168};
169
170struct be_tx_stats {
171 u64 tx_bytes;
172 u64 tx_pkts;
173 u64 tx_reqs;
174 u64 tx_wrbs;
175 u64 tx_compl;
176 ulong tx_jiffies;
177 u32 tx_stops;
178 struct u64_stats_sync sync;
179 struct u64_stats_sync sync_compl;
180};
181
182struct be_tx_obj {
183 struct be_queue_info q;
184 struct be_queue_info cq;
185 /* Remember the skbs that were transmitted */
186 struct sk_buff *sent_skb_list[TX_Q_LEN];
187 struct be_tx_stats stats;
188};
189
190/* Struct to remember the pages posted for rx frags */
191struct be_rx_page_info {
192 struct page *page;
193 DEFINE_DMA_UNMAP_ADDR(bus);
194 u16 page_offset;
195 bool last_page_user;
196};
197
198struct be_rx_stats {
199 u64 rx_bytes;
200 u64 rx_pkts;
201 u64 rx_pkts_prev;
202 ulong rx_jiffies;
203 u32 rx_drops_no_skbs; /* skb allocation errors */
204 u32 rx_drops_no_frags; /* HW has no fetched frags */
205 u32 rx_post_fail; /* page post alloc failures */
206 u32 rx_polls; /* NAPI calls */
207 u32 rx_events;
208 u32 rx_compl;
209 u32 rx_mcast_pkts;
210 u32 rx_compl_err; /* completions with err set */
211 u32 rx_pps; /* pkts per second */
212 struct u64_stats_sync sync;
213};
214
215struct be_rx_compl_info {
216 u32 rss_hash;
217 u16 vlan_tag;
218 u16 pkt_size;
219 u16 rxq_idx;
220 u16 port;
221 u8 vlanf;
222 u8 num_rcvd;
223 u8 err;
224 u8 ipf;
225 u8 tcpf;
226 u8 udpf;
227 u8 ip_csum;
228 u8 l4_csum;
229 u8 ipv6;
230 u8 vtm;
231 u8 pkt_type;
232};
233
234struct be_rx_obj {
235 struct be_adapter *adapter;
236 struct be_queue_info q;
237 struct be_queue_info cq;
238 struct be_rx_compl_info rxcp;
239 struct be_rx_page_info page_info_tbl[RX_Q_LEN];
240 struct be_eq_obj rx_eq;
241 struct be_rx_stats stats;
242 u8 rss_id;
243 bool rx_post_starved; /* Zero rx frags have been posted to BE */
244 u32 cache_line_barrier[16];
245};
246
247struct be_drv_stats {
248 u8 be_on_die_temperature;
249 u32 tx_events;
250 u32 eth_red_drops;
251 u32 rx_drops_no_pbuf;
252 u32 rx_drops_no_txpb;
253 u32 rx_drops_no_erx_descr;
254 u32 rx_drops_no_tpre_descr;
255 u32 rx_drops_too_many_frags;
256 u32 rx_drops_invalid_ring;
257 u32 forwarded_packets;
258 u32 rx_drops_mtu;
259 u32 rx_crc_errors;
260 u32 rx_alignment_symbol_errors;
261 u32 rx_pause_frames;
262 u32 rx_priority_pause_frames;
263 u32 rx_control_frames;
264 u32 rx_in_range_errors;
265 u32 rx_out_range_errors;
266 u32 rx_frame_too_long;
267 u32 rx_address_match_errors;
268 u32 rx_dropped_too_small;
269 u32 rx_dropped_too_short;
270 u32 rx_dropped_header_too_small;
271 u32 rx_dropped_tcp_length;
272 u32 rx_dropped_runt;
273 u32 rx_ip_checksum_errs;
274 u32 rx_tcp_checksum_errs;
275 u32 rx_udp_checksum_errs;
276 u32 tx_pauseframes;
277 u32 tx_priority_pauseframes;
278 u32 tx_controlframes;
279 u32 rxpp_fifo_overflow_drop;
280 u32 rx_input_fifo_overflow_drop;
281 u32 pmem_fifo_overflow_drop;
282 u32 jabber_events;
283};
284
285struct be_vf_cfg {
286 unsigned char vf_mac_addr[ETH_ALEN];
287 u32 vf_if_handle;
288 u32 vf_pmac_id;
289 u16 vf_vlan_tag;
290 u32 vf_tx_rate;
291};
292
293#define BE_INVALID_PMAC_ID 0xffffffff
294
295struct be_adapter {
296 struct pci_dev *pdev;
297 struct net_device *netdev;
298
299 u8 __iomem *csr;
300 u8 __iomem *db; /* Door Bell */
301 u8 __iomem *pcicfg; /* PCI config space */
302
303 struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
304 struct be_dma_mem mbox_mem;
305 /* Mbox mem is adjusted to align to 16 bytes. The allocated addr
306 * is stored for freeing purpose */
307 struct be_dma_mem mbox_mem_alloced;
308
309 struct be_mcc_obj mcc_obj;
310 spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
311 spinlock_t mcc_cq_lock;
312
313 struct msix_entry msix_entries[BE_MAX_MSIX_VECTORS];
314 u32 num_msix_vec;
315 bool isr_registered;
316
317 /* TX Rings */
318 struct be_eq_obj tx_eq;
319 struct be_tx_obj tx_obj[MAX_TX_QS];
320 u8 num_tx_qs;
321
322 u32 cache_line_break[8];
323
324 /* Rx rings */
325 struct be_rx_obj rx_obj[MAX_RX_QS];
326 u32 num_rx_qs;
327 u32 big_page_size; /* Compounded page size shared by rx wrbs */
328
329 u8 eq_next_idx;
330 struct be_drv_stats drv_stats;
331
332 u16 vlans_added;
333 u16 max_vlans; /* Number of vlans supported */
334 u8 vlan_tag[VLAN_N_VID];
335 u8 vlan_prio_bmap; /* Available Priority BitMap */
336 u16 recommended_prio; /* Recommended Priority */
337 struct be_dma_mem rx_filter; /* Cmd DMA mem for rx-filter */
338
339 struct be_dma_mem stats_cmd;
340 /* Work queue used to perform periodic tasks like getting statistics */
341 struct delayed_work work;
342 u16 work_counter;
343
344 /* Ethtool knobs and info */
345 char fw_ver[FW_VER_LEN];
346 u32 if_handle; /* Used to configure filtering */
347 u32 pmac_id; /* MAC addr handle used by BE card */
348 u32 beacon_state; /* for set_phys_id */
349
350 bool eeh_err;
351 bool link_up;
352 u32 port_num;
353 bool promiscuous;
354 bool wol;
355 u32 function_mode;
356 u32 function_caps;
357 u32 rx_fc; /* Rx flow control */
358 u32 tx_fc; /* Tx flow control */
359 bool ue_detected;
360 bool stats_cmd_sent;
361 int link_speed;
362 u8 port_type;
363 u8 transceiver;
364 u8 autoneg;
365 u8 generation; /* BladeEngine ASIC generation */
366 u32 flash_status;
367 struct completion flash_compl;
368
369 bool be3_native;
370 bool sriov_enabled;
371 struct be_vf_cfg *vf_cfg;
372 u8 is_virtfn;
373 u32 sli_family;
374 u8 hba_port_num;
375 u16 pvid;
376};
377
378#define be_physfn(adapter) (!adapter->is_virtfn)
379
380/* BladeEngine Generation numbers */
381#define BE_GEN2 2
382#define BE_GEN3 3
383
384#define ON 1
385#define OFF 0
386#define lancer_chip(adapter) ((adapter->pdev->device == OC_DEVICE_ID3) || \
387 (adapter->pdev->device == OC_DEVICE_ID4))
388
389extern const struct ethtool_ops be_ethtool_ops;
390
391#define msix_enabled(adapter) (adapter->num_msix_vec > 0)
392#define tx_stats(txo) (&txo->stats)
393#define rx_stats(rxo) (&rxo->stats)
394
395#define BE_SET_NETDEV_OPS(netdev, ops) (netdev->netdev_ops = ops)
396
397#define for_all_rx_queues(adapter, rxo, i) \
398 for (i = 0, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs; \
399 i++, rxo++)
400
401/* Just skip the first default non-rss queue */
402#define for_all_rss_queues(adapter, rxo, i) \
403 for (i = 0, rxo = &adapter->rx_obj[i+1]; i < (adapter->num_rx_qs - 1);\
404 i++, rxo++)
405
406#define for_all_tx_queues(adapter, txo, i) \
407 for (i = 0, txo = &adapter->tx_obj[i]; i < adapter->num_tx_qs; \
408 i++, txo++)
409
410#define PAGE_SHIFT_4K 12
411#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
412
413/* Returns number of pages spanned by the data starting at the given addr */
414#define PAGES_4K_SPANNED(_address, size) \
415 ((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \
416 (size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K))
417
418/* Byte offset into the page corresponding to given address */
419#define OFFSET_IN_PAGE(addr) \
420 ((size_t)(addr) & (PAGE_SIZE_4K-1))
421
422/* Returns bit offset within a DWORD of a bitfield */
423#define AMAP_BIT_OFFSET(_struct, field) \
424 (((size_t)&(((_struct *)0)->field))%32)
425
426/* Returns the bit mask of the field that is NOT shifted into location. */
427static inline u32 amap_mask(u32 bitsize)
428{
429 return (bitsize == 32 ? 0xFFFFFFFF : (1 << bitsize) - 1);
430}
431
432static inline void
433amap_set(void *ptr, u32 dw_offset, u32 mask, u32 offset, u32 value)
434{
435 u32 *dw = (u32 *) ptr + dw_offset;
436 *dw &= ~(mask << offset);
437 *dw |= (mask & value) << offset;
438}
439
440#define AMAP_SET_BITS(_struct, field, ptr, val) \
441 amap_set(ptr, \
442 offsetof(_struct, field)/32, \
443 amap_mask(sizeof(((_struct *)0)->field)), \
444 AMAP_BIT_OFFSET(_struct, field), \
445 val)
446
447static inline u32 amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset)
448{
449 u32 *dw = (u32 *) ptr;
450 return mask & (*(dw + dw_offset) >> offset);
451}
452
453#define AMAP_GET_BITS(_struct, field, ptr) \
454 amap_get(ptr, \
455 offsetof(_struct, field)/32, \
456 amap_mask(sizeof(((_struct *)0)->field)), \
457 AMAP_BIT_OFFSET(_struct, field))
458
459#define be_dws_cpu_to_le(wrb, len) swap_dws(wrb, len)
460#define be_dws_le_to_cpu(wrb, len) swap_dws(wrb, len)
461static inline void swap_dws(void *wrb, int len)
462{
463#ifdef __BIG_ENDIAN
464 u32 *dw = wrb;
465 BUG_ON(len % 4);
466 do {
467 *dw = cpu_to_le32(*dw);
468 dw++;
469 len -= 4;
470 } while (len);
471#endif /* __BIG_ENDIAN */
472}
473
474static inline u8 is_tcp_pkt(struct sk_buff *skb)
475{
476 u8 val = 0;
477
478 if (ip_hdr(skb)->version == 4)
479 val = (ip_hdr(skb)->protocol == IPPROTO_TCP);
480 else if (ip_hdr(skb)->version == 6)
481 val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP);
482
483 return val;
484}
485
486static inline u8 is_udp_pkt(struct sk_buff *skb)
487{
488 u8 val = 0;
489
490 if (ip_hdr(skb)->version == 4)
491 val = (ip_hdr(skb)->protocol == IPPROTO_UDP);
492 else if (ip_hdr(skb)->version == 6)
493 val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP);
494
495 return val;
496}
497
498static inline void be_check_sriov_fn_type(struct be_adapter *adapter)
499{
500 u32 sli_intf;
501
502 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
503 adapter->is_virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
504}
505
506static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
507{
508 u32 addr;
509
510 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
511
512 mac[5] = (u8)(addr & 0xFF);
513 mac[4] = (u8)((addr >> 8) & 0xFF);
514 mac[3] = (u8)((addr >> 16) & 0xFF);
515 /* Use the OUI from the current MAC address */
516 memcpy(mac, adapter->netdev->dev_addr, 3);
517}
518
519static inline bool be_multi_rxq(const struct be_adapter *adapter)
520{
521 return adapter->num_rx_qs > 1;
522}
523
524extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
525 u16 num_popped);
526extern void be_link_status_update(struct be_adapter *adapter, u32 link_status);
527extern void be_parse_stats(struct be_adapter *adapter);
528extern int be_load_fw(struct be_adapter *adapter, u8 *func);
529#endif /* BE_H */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
new file mode 100644
index 000000000000..427859532f02
--- /dev/null
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -0,0 +1,2367 @@
1/*
2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@emulex.com
12 *
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
16 */
17
18#include "be.h"
19#include "be_cmds.h"
20
21/* Must be a power of 2 or else MODULO will BUG_ON */
22static int be_get_temp_freq = 32;
23
24static void be_mcc_notify(struct be_adapter *adapter)
25{
26 struct be_queue_info *mccq = &adapter->mcc_obj.q;
27 u32 val = 0;
28
29 if (adapter->eeh_err) {
30 dev_info(&adapter->pdev->dev,
31 "Error in Card Detected! Cannot issue commands\n");
32 return;
33 }
34
35 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
36 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
37
38 wmb();
39 iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
40}
41
42/* To check if valid bit is set, check the entire word as we don't know
43 * the endianness of the data (old entry is host endian while a new entry is
44 * little endian) */
45static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
46{
47 if (compl->flags != 0) {
48 compl->flags = le32_to_cpu(compl->flags);
49 BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
50 return true;
51 } else {
52 return false;
53 }
54}
55
56/* Need to reset the entire word that houses the valid bit */
57static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
58{
59 compl->flags = 0;
60}
61
62static int be_mcc_compl_process(struct be_adapter *adapter,
63 struct be_mcc_compl *compl)
64{
65 u16 compl_status, extd_status;
66
67 /* Just swap the status to host endian; mcc tag is opaquely copied
68 * from mcc_wrb */
69 be_dws_le_to_cpu(compl, 4);
70
71 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
72 CQE_STATUS_COMPL_MASK;
73
74 if (((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) ||
75 (compl->tag0 == OPCODE_COMMON_WRITE_OBJECT)) &&
76 (compl->tag1 == CMD_SUBSYSTEM_COMMON)) {
77 adapter->flash_status = compl_status;
78 complete(&adapter->flash_compl);
79 }
80
81 if (compl_status == MCC_STATUS_SUCCESS) {
82 if (((compl->tag0 == OPCODE_ETH_GET_STATISTICS) ||
83 (compl->tag0 == OPCODE_ETH_GET_PPORT_STATS)) &&
84 (compl->tag1 == CMD_SUBSYSTEM_ETH)) {
85 be_parse_stats(adapter);
86 adapter->stats_cmd_sent = false;
87 }
88 } else {
89 if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
90 compl_status == MCC_STATUS_ILLEGAL_REQUEST)
91 goto done;
92
93 if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
94 dev_warn(&adapter->pdev->dev, "This domain(VM) is not "
95 "permitted to execute this cmd (opcode %d)\n",
96 compl->tag0);
97 } else {
98 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
99 CQE_STATUS_EXTD_MASK;
100 dev_err(&adapter->pdev->dev, "Cmd (opcode %d) failed:"
101 "status %d, extd-status %d\n",
102 compl->tag0, compl_status, extd_status);
103 }
104 }
105done:
106 return compl_status;
107}
108
109/* Link state evt is a string of bytes; no need for endian swapping */
110static void be_async_link_state_process(struct be_adapter *adapter,
111 struct be_async_event_link_state *evt)
112{
113 be_link_status_update(adapter, evt->port_link_status);
114}
115
116/* Grp5 CoS Priority evt */
117static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
118 struct be_async_event_grp5_cos_priority *evt)
119{
120 if (evt->valid) {
121 adapter->vlan_prio_bmap = evt->available_priority_bmap;
122 adapter->recommended_prio &= ~VLAN_PRIO_MASK;
123 adapter->recommended_prio =
124 evt->reco_default_priority << VLAN_PRIO_SHIFT;
125 }
126}
127
128/* Grp5 QOS Speed evt */
129static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
130 struct be_async_event_grp5_qos_link_speed *evt)
131{
132 if (evt->physical_port == adapter->port_num) {
133 /* qos_link_speed is in units of 10 Mbps */
134 adapter->link_speed = evt->qos_link_speed * 10;
135 }
136}
137
138/*Grp5 PVID evt*/
139static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
140 struct be_async_event_grp5_pvid_state *evt)
141{
142 if (evt->enabled)
143 adapter->pvid = le16_to_cpu(evt->tag);
144 else
145 adapter->pvid = 0;
146}
147
148static void be_async_grp5_evt_process(struct be_adapter *adapter,
149 u32 trailer, struct be_mcc_compl *evt)
150{
151 u8 event_type = 0;
152
153 event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
154 ASYNC_TRAILER_EVENT_TYPE_MASK;
155
156 switch (event_type) {
157 case ASYNC_EVENT_COS_PRIORITY:
158 be_async_grp5_cos_priority_process(adapter,
159 (struct be_async_event_grp5_cos_priority *)evt);
160 break;
161 case ASYNC_EVENT_QOS_SPEED:
162 be_async_grp5_qos_speed_process(adapter,
163 (struct be_async_event_grp5_qos_link_speed *)evt);
164 break;
165 case ASYNC_EVENT_PVID_STATE:
166 be_async_grp5_pvid_state_process(adapter,
167 (struct be_async_event_grp5_pvid_state *)evt);
168 break;
169 default:
170 dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
171 break;
172 }
173}
174
175static inline bool is_link_state_evt(u32 trailer)
176{
177 return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
178 ASYNC_TRAILER_EVENT_CODE_MASK) ==
179 ASYNC_EVENT_CODE_LINK_STATE;
180}
181
182static inline bool is_grp5_evt(u32 trailer)
183{
184 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
185 ASYNC_TRAILER_EVENT_CODE_MASK) ==
186 ASYNC_EVENT_CODE_GRP_5);
187}
188
189static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
190{
191 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
192 struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
193
194 if (be_mcc_compl_is_new(compl)) {
195 queue_tail_inc(mcc_cq);
196 return compl;
197 }
198 return NULL;
199}
200
201void be_async_mcc_enable(struct be_adapter *adapter)
202{
203 spin_lock_bh(&adapter->mcc_cq_lock);
204
205 be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
206 adapter->mcc_obj.rearm_cq = true;
207
208 spin_unlock_bh(&adapter->mcc_cq_lock);
209}
210
211void be_async_mcc_disable(struct be_adapter *adapter)
212{
213 adapter->mcc_obj.rearm_cq = false;
214}
215
216int be_process_mcc(struct be_adapter *adapter, int *status)
217{
218 struct be_mcc_compl *compl;
219 int num = 0;
220 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
221
222 spin_lock_bh(&adapter->mcc_cq_lock);
223 while ((compl = be_mcc_compl_get(adapter))) {
224 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
225 /* Interpret flags as an async trailer */
226 if (is_link_state_evt(compl->flags))
227 be_async_link_state_process(adapter,
228 (struct be_async_event_link_state *) compl);
229 else if (is_grp5_evt(compl->flags))
230 be_async_grp5_evt_process(adapter,
231 compl->flags, compl);
232 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
233 *status = be_mcc_compl_process(adapter, compl);
234 atomic_dec(&mcc_obj->q.used);
235 }
236 be_mcc_compl_use(compl);
237 num++;
238 }
239
240 spin_unlock_bh(&adapter->mcc_cq_lock);
241 return num;
242}
243
244/* Wait till no more pending mcc requests are present */
245static int be_mcc_wait_compl(struct be_adapter *adapter)
246{
247#define mcc_timeout 120000 /* 12s timeout */
248 int i, num, status = 0;
249 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
250
251 if (adapter->eeh_err)
252 return -EIO;
253
254 for (i = 0; i < mcc_timeout; i++) {
255 num = be_process_mcc(adapter, &status);
256 if (num)
257 be_cq_notify(adapter, mcc_obj->cq.id,
258 mcc_obj->rearm_cq, num);
259
260 if (atomic_read(&mcc_obj->q.used) == 0)
261 break;
262 udelay(100);
263 }
264 if (i == mcc_timeout) {
265 dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
266 return -1;
267 }
268 return status;
269}
270
271/* Notify MCC requests and wait for completion */
272static int be_mcc_notify_wait(struct be_adapter *adapter)
273{
274 be_mcc_notify(adapter);
275 return be_mcc_wait_compl(adapter);
276}
277
278static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
279{
280 int msecs = 0;
281 u32 ready;
282
283 if (adapter->eeh_err) {
284 dev_err(&adapter->pdev->dev,
285 "Error detected in card.Cannot issue commands\n");
286 return -EIO;
287 }
288
289 do {
290 ready = ioread32(db);
291 if (ready == 0xffffffff) {
292 dev_err(&adapter->pdev->dev,
293 "pci slot disconnected\n");
294 return -1;
295 }
296
297 ready &= MPU_MAILBOX_DB_RDY_MASK;
298 if (ready)
299 break;
300
301 if (msecs > 4000) {
302 dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
303 if (!lancer_chip(adapter))
304 be_detect_dump_ue(adapter);
305 return -1;
306 }
307
308 msleep(1);
309 msecs++;
310 } while (true);
311
312 return 0;
313}
314
315/*
316 * Insert the mailbox address into the doorbell in two steps
317 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
318 */
319static int be_mbox_notify_wait(struct be_adapter *adapter)
320{
321 int status;
322 u32 val = 0;
323 void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
324 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
325 struct be_mcc_mailbox *mbox = mbox_mem->va;
326 struct be_mcc_compl *compl = &mbox->compl;
327
328 /* wait for ready to be set */
329 status = be_mbox_db_ready_wait(adapter, db);
330 if (status != 0)
331 return status;
332
333 val |= MPU_MAILBOX_DB_HI_MASK;
334 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
335 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
336 iowrite32(val, db);
337
338 /* wait for ready to be set */
339 status = be_mbox_db_ready_wait(adapter, db);
340 if (status != 0)
341 return status;
342
343 val = 0;
344 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
345 val |= (u32)(mbox_mem->dma >> 4) << 2;
346 iowrite32(val, db);
347
348 status = be_mbox_db_ready_wait(adapter, db);
349 if (status != 0)
350 return status;
351
352 /* A cq entry has been made now */
353 if (be_mcc_compl_is_new(compl)) {
354 status = be_mcc_compl_process(adapter, &mbox->compl);
355 be_mcc_compl_use(compl);
356 if (status)
357 return status;
358 } else {
359 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
360 return -1;
361 }
362 return 0;
363}
364
365static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
366{
367 u32 sem;
368
369 if (lancer_chip(adapter))
370 sem = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET);
371 else
372 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
373
374 *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
375 if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
376 return -1;
377 else
378 return 0;
379}
380
381int be_cmd_POST(struct be_adapter *adapter)
382{
383 u16 stage;
384 int status, timeout = 0;
385 struct device *dev = &adapter->pdev->dev;
386
387 do {
388 status = be_POST_stage_get(adapter, &stage);
389 if (status) {
390 dev_err(dev, "POST error; stage=0x%x\n", stage);
391 return -1;
392 } else if (stage != POST_STAGE_ARMFW_RDY) {
393 if (msleep_interruptible(2000)) {
394 dev_err(dev, "Waiting for POST aborted\n");
395 return -EINTR;
396 }
397 timeout += 2;
398 } else {
399 return 0;
400 }
401 } while (timeout < 40);
402
403 dev_err(dev, "POST timeout; stage=0x%x\n", stage);
404 return -1;
405}
406
407static inline void *embedded_payload(struct be_mcc_wrb *wrb)
408{
409 return wrb->payload.embedded_payload;
410}
411
412static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
413{
414 return &wrb->payload.sgl[0];
415}
416
417/* Don't touch the hdr after it's prepared */
418static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
419 bool embedded, u8 sge_cnt, u32 opcode)
420{
421 if (embedded)
422 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
423 else
424 wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
425 MCC_WRB_SGE_CNT_SHIFT;
426 wrb->payload_length = payload_len;
427 wrb->tag0 = opcode;
428 be_dws_cpu_to_le(wrb, 8);
429}
430
431/* Don't touch the hdr after it's prepared */
432static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
433 u8 subsystem, u8 opcode, int cmd_len)
434{
435 req_hdr->opcode = opcode;
436 req_hdr->subsystem = subsystem;
437 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
438 req_hdr->version = 0;
439}
440
441static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
442 struct be_dma_mem *mem)
443{
444 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
445 u64 dma = (u64)mem->dma;
446
447 for (i = 0; i < buf_pages; i++) {
448 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
449 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
450 dma += PAGE_SIZE_4K;
451 }
452}
453
454/* Converts interrupt delay in microseconds to multiplier value */
455static u32 eq_delay_to_mult(u32 usec_delay)
456{
457#define MAX_INTR_RATE 651042
458 const u32 round = 10;
459 u32 multiplier;
460
461 if (usec_delay == 0)
462 multiplier = 0;
463 else {
464 u32 interrupt_rate = 1000000 / usec_delay;
465 /* Max delay, corresponding to the lowest interrupt rate */
466 if (interrupt_rate == 0)
467 multiplier = 1023;
468 else {
469 multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
470 multiplier /= interrupt_rate;
471 /* Round the multiplier to the closest value.*/
472 multiplier = (multiplier + round/2) / round;
473 multiplier = min(multiplier, (u32)1023);
474 }
475 }
476 return multiplier;
477}
478
479static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
480{
481 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
482 struct be_mcc_wrb *wrb
483 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
484 memset(wrb, 0, sizeof(*wrb));
485 return wrb;
486}
487
488static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
489{
490 struct be_queue_info *mccq = &adapter->mcc_obj.q;
491 struct be_mcc_wrb *wrb;
492
493 if (atomic_read(&mccq->used) >= mccq->len) {
494 dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
495 return NULL;
496 }
497
498 wrb = queue_head_node(mccq);
499 queue_head_inc(mccq);
500 atomic_inc(&mccq->used);
501 memset(wrb, 0, sizeof(*wrb));
502 return wrb;
503}
504
505/* Tell fw we're about to start firing cmds by writing a
506 * special pattern across the wrb hdr; uses mbox
507 */
508int be_cmd_fw_init(struct be_adapter *adapter)
509{
510 u8 *wrb;
511 int status;
512
513 if (mutex_lock_interruptible(&adapter->mbox_lock))
514 return -1;
515
516 wrb = (u8 *)wrb_from_mbox(adapter);
517 *wrb++ = 0xFF;
518 *wrb++ = 0x12;
519 *wrb++ = 0x34;
520 *wrb++ = 0xFF;
521 *wrb++ = 0xFF;
522 *wrb++ = 0x56;
523 *wrb++ = 0x78;
524 *wrb = 0xFF;
525
526 status = be_mbox_notify_wait(adapter);
527
528 mutex_unlock(&adapter->mbox_lock);
529 return status;
530}
531
532/* Tell fw we're done with firing cmds by writing a
533 * special pattern across the wrb hdr; uses mbox
534 */
535int be_cmd_fw_clean(struct be_adapter *adapter)
536{
537 u8 *wrb;
538 int status;
539
540 if (adapter->eeh_err)
541 return -EIO;
542
543 if (mutex_lock_interruptible(&adapter->mbox_lock))
544 return -1;
545
546 wrb = (u8 *)wrb_from_mbox(adapter);
547 *wrb++ = 0xFF;
548 *wrb++ = 0xAA;
549 *wrb++ = 0xBB;
550 *wrb++ = 0xFF;
551 *wrb++ = 0xFF;
552 *wrb++ = 0xCC;
553 *wrb++ = 0xDD;
554 *wrb = 0xFF;
555
556 status = be_mbox_notify_wait(adapter);
557
558 mutex_unlock(&adapter->mbox_lock);
559 return status;
560}
561int be_cmd_eq_create(struct be_adapter *adapter,
562 struct be_queue_info *eq, int eq_delay)
563{
564 struct be_mcc_wrb *wrb;
565 struct be_cmd_req_eq_create *req;
566 struct be_dma_mem *q_mem = &eq->dma_mem;
567 int status;
568
569 if (mutex_lock_interruptible(&adapter->mbox_lock))
570 return -1;
571
572 wrb = wrb_from_mbox(adapter);
573 req = embedded_payload(wrb);
574
575 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_COMMON_EQ_CREATE);
576
577 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
578 OPCODE_COMMON_EQ_CREATE, sizeof(*req));
579
580 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
581
582 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
583 /* 4byte eqe*/
584 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
585 AMAP_SET_BITS(struct amap_eq_context, count, req->context,
586 __ilog2_u32(eq->len/256));
587 AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
588 eq_delay_to_mult(eq_delay));
589 be_dws_cpu_to_le(req->context, sizeof(req->context));
590
591 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
592
593 status = be_mbox_notify_wait(adapter);
594 if (!status) {
595 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
596 eq->id = le16_to_cpu(resp->eq_id);
597 eq->created = true;
598 }
599
600 mutex_unlock(&adapter->mbox_lock);
601 return status;
602}
603
604/* Uses mbox */
605int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
606 u8 type, bool permanent, u32 if_handle)
607{
608 struct be_mcc_wrb *wrb;
609 struct be_cmd_req_mac_query *req;
610 int status;
611
612 if (mutex_lock_interruptible(&adapter->mbox_lock))
613 return -1;
614
615 wrb = wrb_from_mbox(adapter);
616 req = embedded_payload(wrb);
617
618 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
619 OPCODE_COMMON_NTWK_MAC_QUERY);
620
621 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
622 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req));
623
624 req->type = type;
625 if (permanent) {
626 req->permanent = 1;
627 } else {
628 req->if_id = cpu_to_le16((u16) if_handle);
629 req->permanent = 0;
630 }
631
632 status = be_mbox_notify_wait(adapter);
633 if (!status) {
634 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
635 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
636 }
637
638 mutex_unlock(&adapter->mbox_lock);
639 return status;
640}
641
642/* Uses synchronous MCCQ */
643int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
644 u32 if_id, u32 *pmac_id, u32 domain)
645{
646 struct be_mcc_wrb *wrb;
647 struct be_cmd_req_pmac_add *req;
648 int status;
649
650 spin_lock_bh(&adapter->mcc_lock);
651
652 wrb = wrb_from_mccq(adapter);
653 if (!wrb) {
654 status = -EBUSY;
655 goto err;
656 }
657 req = embedded_payload(wrb);
658
659 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
660 OPCODE_COMMON_NTWK_PMAC_ADD);
661
662 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
663 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
664
665 req->hdr.domain = domain;
666 req->if_id = cpu_to_le32(if_id);
667 memcpy(req->mac_address, mac_addr, ETH_ALEN);
668
669 status = be_mcc_notify_wait(adapter);
670 if (!status) {
671 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
672 *pmac_id = le32_to_cpu(resp->pmac_id);
673 }
674
675err:
676 spin_unlock_bh(&adapter->mcc_lock);
677 return status;
678}
679
680/* Uses synchronous MCCQ */
681int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id, u32 dom)
682{
683 struct be_mcc_wrb *wrb;
684 struct be_cmd_req_pmac_del *req;
685 int status;
686
687 spin_lock_bh(&adapter->mcc_lock);
688
689 wrb = wrb_from_mccq(adapter);
690 if (!wrb) {
691 status = -EBUSY;
692 goto err;
693 }
694 req = embedded_payload(wrb);
695
696 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
697 OPCODE_COMMON_NTWK_PMAC_DEL);
698
699 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
700 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
701
702 req->hdr.domain = dom;
703 req->if_id = cpu_to_le32(if_id);
704 req->pmac_id = cpu_to_le32(pmac_id);
705
706 status = be_mcc_notify_wait(adapter);
707
708err:
709 spin_unlock_bh(&adapter->mcc_lock);
710 return status;
711}
712
713/* Uses Mbox */
714int be_cmd_cq_create(struct be_adapter *adapter,
715 struct be_queue_info *cq, struct be_queue_info *eq,
716 bool sol_evts, bool no_delay, int coalesce_wm)
717{
718 struct be_mcc_wrb *wrb;
719 struct be_cmd_req_cq_create *req;
720 struct be_dma_mem *q_mem = &cq->dma_mem;
721 void *ctxt;
722 int status;
723
724 if (mutex_lock_interruptible(&adapter->mbox_lock))
725 return -1;
726
727 wrb = wrb_from_mbox(adapter);
728 req = embedded_payload(wrb);
729 ctxt = &req->context;
730
731 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
732 OPCODE_COMMON_CQ_CREATE);
733
734 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
735 OPCODE_COMMON_CQ_CREATE, sizeof(*req));
736
737 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
738 if (lancer_chip(adapter)) {
739 req->hdr.version = 2;
740 req->page_size = 1; /* 1 for 4K */
741 AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
742 no_delay);
743 AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
744 __ilog2_u32(cq->len/256));
745 AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
746 AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
747 ctxt, 1);
748 AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
749 ctxt, eq->id);
750 AMAP_SET_BITS(struct amap_cq_context_lancer, armed, ctxt, 1);
751 } else {
752 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
753 coalesce_wm);
754 AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
755 ctxt, no_delay);
756 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
757 __ilog2_u32(cq->len/256));
758 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
759 AMAP_SET_BITS(struct amap_cq_context_be, solevent,
760 ctxt, sol_evts);
761 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
762 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
763 AMAP_SET_BITS(struct amap_cq_context_be, armed, ctxt, 1);
764 }
765
766 be_dws_cpu_to_le(ctxt, sizeof(req->context));
767
768 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
769
770 status = be_mbox_notify_wait(adapter);
771 if (!status) {
772 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
773 cq->id = le16_to_cpu(resp->cq_id);
774 cq->created = true;
775 }
776
777 mutex_unlock(&adapter->mbox_lock);
778
779 return status;
780}
781
782static u32 be_encoded_q_len(int q_len)
783{
784 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
785 if (len_encoded == 16)
786 len_encoded = 0;
787 return len_encoded;
788}
789
790int be_cmd_mccq_ext_create(struct be_adapter *adapter,
791 struct be_queue_info *mccq,
792 struct be_queue_info *cq)
793{
794 struct be_mcc_wrb *wrb;
795 struct be_cmd_req_mcc_ext_create *req;
796 struct be_dma_mem *q_mem = &mccq->dma_mem;
797 void *ctxt;
798 int status;
799
800 if (mutex_lock_interruptible(&adapter->mbox_lock))
801 return -1;
802
803 wrb = wrb_from_mbox(adapter);
804 req = embedded_payload(wrb);
805 ctxt = &req->context;
806
807 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
808 OPCODE_COMMON_MCC_CREATE_EXT);
809
810 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
811 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
812
813 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
814 if (lancer_chip(adapter)) {
815 req->hdr.version = 1;
816 req->cq_id = cpu_to_le16(cq->id);
817
818 AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
819 be_encoded_q_len(mccq->len));
820 AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
821 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
822 ctxt, cq->id);
823 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
824 ctxt, 1);
825
826 } else {
827 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
828 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
829 be_encoded_q_len(mccq->len));
830 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
831 }
832
833 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
834 req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
835 be_dws_cpu_to_le(ctxt, sizeof(req->context));
836
837 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
838
839 status = be_mbox_notify_wait(adapter);
840 if (!status) {
841 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
842 mccq->id = le16_to_cpu(resp->id);
843 mccq->created = true;
844 }
845 mutex_unlock(&adapter->mbox_lock);
846
847 return status;
848}
849
850int be_cmd_mccq_org_create(struct be_adapter *adapter,
851 struct be_queue_info *mccq,
852 struct be_queue_info *cq)
853{
854 struct be_mcc_wrb *wrb;
855 struct be_cmd_req_mcc_create *req;
856 struct be_dma_mem *q_mem = &mccq->dma_mem;
857 void *ctxt;
858 int status;
859
860 if (mutex_lock_interruptible(&adapter->mbox_lock))
861 return -1;
862
863 wrb = wrb_from_mbox(adapter);
864 req = embedded_payload(wrb);
865 ctxt = &req->context;
866
867 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
868 OPCODE_COMMON_MCC_CREATE);
869
870 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
871 OPCODE_COMMON_MCC_CREATE, sizeof(*req));
872
873 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
874
875 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
876 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
877 be_encoded_q_len(mccq->len));
878 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
879
880 be_dws_cpu_to_le(ctxt, sizeof(req->context));
881
882 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
883
884 status = be_mbox_notify_wait(adapter);
885 if (!status) {
886 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
887 mccq->id = le16_to_cpu(resp->id);
888 mccq->created = true;
889 }
890
891 mutex_unlock(&adapter->mbox_lock);
892 return status;
893}
894
895int be_cmd_mccq_create(struct be_adapter *adapter,
896 struct be_queue_info *mccq,
897 struct be_queue_info *cq)
898{
899 int status;
900
901 status = be_cmd_mccq_ext_create(adapter, mccq, cq);
902 if (status && !lancer_chip(adapter)) {
903 dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
904 "or newer to avoid conflicting priorities between NIC "
905 "and FCoE traffic");
906 status = be_cmd_mccq_org_create(adapter, mccq, cq);
907 }
908 return status;
909}
910
911int be_cmd_txq_create(struct be_adapter *adapter,
912 struct be_queue_info *txq,
913 struct be_queue_info *cq)
914{
915 struct be_mcc_wrb *wrb;
916 struct be_cmd_req_eth_tx_create *req;
917 struct be_dma_mem *q_mem = &txq->dma_mem;
918 void *ctxt;
919 int status;
920
921 if (mutex_lock_interruptible(&adapter->mbox_lock))
922 return -1;
923
924 wrb = wrb_from_mbox(adapter);
925 req = embedded_payload(wrb);
926 ctxt = &req->context;
927
928 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
929 OPCODE_ETH_TX_CREATE);
930
931 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
932 sizeof(*req));
933
934 if (lancer_chip(adapter)) {
935 req->hdr.version = 1;
936 AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt,
937 adapter->if_handle);
938 }
939
940 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
941 req->ulp_num = BE_ULP1_NUM;
942 req->type = BE_ETH_TX_RING_TYPE_STANDARD;
943
944 AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
945 be_encoded_q_len(txq->len));
946 AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
947 AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
948
949 be_dws_cpu_to_le(ctxt, sizeof(req->context));
950
951 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
952
953 status = be_mbox_notify_wait(adapter);
954 if (!status) {
955 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
956 txq->id = le16_to_cpu(resp->cid);
957 txq->created = true;
958 }
959
960 mutex_unlock(&adapter->mbox_lock);
961
962 return status;
963}
964
965/* Uses MCC */
966int be_cmd_rxq_create(struct be_adapter *adapter,
967 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
968 u16 max_frame_size, u32 if_id, u32 rss, u8 *rss_id)
969{
970 struct be_mcc_wrb *wrb;
971 struct be_cmd_req_eth_rx_create *req;
972 struct be_dma_mem *q_mem = &rxq->dma_mem;
973 int status;
974
975 spin_lock_bh(&adapter->mcc_lock);
976
977 wrb = wrb_from_mccq(adapter);
978 if (!wrb) {
979 status = -EBUSY;
980 goto err;
981 }
982 req = embedded_payload(wrb);
983
984 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
985 OPCODE_ETH_RX_CREATE);
986
987 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE,
988 sizeof(*req));
989
990 req->cq_id = cpu_to_le16(cq_id);
991 req->frag_size = fls(frag_size) - 1;
992 req->num_pages = 2;
993 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
994 req->interface_id = cpu_to_le32(if_id);
995 req->max_frame_size = cpu_to_le16(max_frame_size);
996 req->rss_queue = cpu_to_le32(rss);
997
998 status = be_mcc_notify_wait(adapter);
999 if (!status) {
1000 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
1001 rxq->id = le16_to_cpu(resp->id);
1002 rxq->created = true;
1003 *rss_id = resp->rss_id;
1004 }
1005
1006err:
1007 spin_unlock_bh(&adapter->mcc_lock);
1008 return status;
1009}
1010
1011/* Generic destroyer function for all types of queues
1012 * Uses Mbox
1013 */
1014int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1015 int queue_type)
1016{
1017 struct be_mcc_wrb *wrb;
1018 struct be_cmd_req_q_destroy *req;
1019 u8 subsys = 0, opcode = 0;
1020 int status;
1021
1022 if (adapter->eeh_err)
1023 return -EIO;
1024
1025 if (mutex_lock_interruptible(&adapter->mbox_lock))
1026 return -1;
1027
1028 wrb = wrb_from_mbox(adapter);
1029 req = embedded_payload(wrb);
1030
1031 switch (queue_type) {
1032 case QTYPE_EQ:
1033 subsys = CMD_SUBSYSTEM_COMMON;
1034 opcode = OPCODE_COMMON_EQ_DESTROY;
1035 break;
1036 case QTYPE_CQ:
1037 subsys = CMD_SUBSYSTEM_COMMON;
1038 opcode = OPCODE_COMMON_CQ_DESTROY;
1039 break;
1040 case QTYPE_TXQ:
1041 subsys = CMD_SUBSYSTEM_ETH;
1042 opcode = OPCODE_ETH_TX_DESTROY;
1043 break;
1044 case QTYPE_RXQ:
1045 subsys = CMD_SUBSYSTEM_ETH;
1046 opcode = OPCODE_ETH_RX_DESTROY;
1047 break;
1048 case QTYPE_MCCQ:
1049 subsys = CMD_SUBSYSTEM_COMMON;
1050 opcode = OPCODE_COMMON_MCC_DESTROY;
1051 break;
1052 default:
1053 BUG();
1054 }
1055
1056 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, opcode);
1057
1058 be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
1059 req->id = cpu_to_le16(q->id);
1060
1061 status = be_mbox_notify_wait(adapter);
1062 if (!status)
1063 q->created = false;
1064
1065 mutex_unlock(&adapter->mbox_lock);
1066 return status;
1067}
1068
1069/* Uses MCC */
1070int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1071{
1072 struct be_mcc_wrb *wrb;
1073 struct be_cmd_req_q_destroy *req;
1074 int status;
1075
1076 spin_lock_bh(&adapter->mcc_lock);
1077
1078 wrb = wrb_from_mccq(adapter);
1079 if (!wrb) {
1080 status = -EBUSY;
1081 goto err;
1082 }
1083 req = embedded_payload(wrb);
1084
1085 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_ETH_RX_DESTROY);
1086 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_DESTROY,
1087 sizeof(*req));
1088 req->id = cpu_to_le16(q->id);
1089
1090 status = be_mcc_notify_wait(adapter);
1091 if (!status)
1092 q->created = false;
1093
1094err:
1095 spin_unlock_bh(&adapter->mcc_lock);
1096 return status;
1097}
1098
1099/* Create an rx filtering policy configuration on an i/f
1100 * Uses mbox
1101 */
1102int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1103 u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id,
1104 u32 domain)
1105{
1106 struct be_mcc_wrb *wrb;
1107 struct be_cmd_req_if_create *req;
1108 int status;
1109
1110 if (mutex_lock_interruptible(&adapter->mbox_lock))
1111 return -1;
1112
1113 wrb = wrb_from_mbox(adapter);
1114 req = embedded_payload(wrb);
1115
1116 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1117 OPCODE_COMMON_NTWK_INTERFACE_CREATE);
1118
1119 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1120 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
1121
1122 req->hdr.domain = domain;
1123 req->capability_flags = cpu_to_le32(cap_flags);
1124 req->enable_flags = cpu_to_le32(en_flags);
1125 req->pmac_invalid = pmac_invalid;
1126 if (!pmac_invalid)
1127 memcpy(req->mac_addr, mac, ETH_ALEN);
1128
1129 status = be_mbox_notify_wait(adapter);
1130 if (!status) {
1131 struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
1132 *if_handle = le32_to_cpu(resp->interface_id);
1133 if (!pmac_invalid)
1134 *pmac_id = le32_to_cpu(resp->pmac_id);
1135 }
1136
1137 mutex_unlock(&adapter->mbox_lock);
1138 return status;
1139}
1140
1141/* Uses mbox */
1142int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain)
1143{
1144 struct be_mcc_wrb *wrb;
1145 struct be_cmd_req_if_destroy *req;
1146 int status;
1147
1148 if (adapter->eeh_err)
1149 return -EIO;
1150
1151 if (mutex_lock_interruptible(&adapter->mbox_lock))
1152 return -1;
1153
1154 wrb = wrb_from_mbox(adapter);
1155 req = embedded_payload(wrb);
1156
1157 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1158 OPCODE_COMMON_NTWK_INTERFACE_DESTROY);
1159
1160 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1161 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
1162
1163 req->hdr.domain = domain;
1164 req->interface_id = cpu_to_le32(interface_id);
1165
1166 status = be_mbox_notify_wait(adapter);
1167
1168 mutex_unlock(&adapter->mbox_lock);
1169
1170 return status;
1171}
1172
1173/* Get stats is a non embedded command: the request is not embedded inside
1174 * WRB but is a separate dma memory block
1175 * Uses asynchronous MCC
1176 */
1177int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1178{
1179 struct be_mcc_wrb *wrb;
1180 struct be_cmd_req_hdr *hdr;
1181 struct be_sge *sge;
1182 int status = 0;
1183
1184 if (MODULO(adapter->work_counter, be_get_temp_freq) == 0)
1185 be_cmd_get_die_temperature(adapter);
1186
1187 spin_lock_bh(&adapter->mcc_lock);
1188
1189 wrb = wrb_from_mccq(adapter);
1190 if (!wrb) {
1191 status = -EBUSY;
1192 goto err;
1193 }
1194 hdr = nonemb_cmd->va;
1195 sge = nonembedded_sgl(wrb);
1196
1197 be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1,
1198 OPCODE_ETH_GET_STATISTICS);
1199
1200 be_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1201 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size);
1202
1203 if (adapter->generation == BE_GEN3)
1204 hdr->version = 1;
1205
1206 wrb->tag1 = CMD_SUBSYSTEM_ETH;
1207 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
1208 sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
1209 sge->len = cpu_to_le32(nonemb_cmd->size);
1210
1211 be_mcc_notify(adapter);
1212 adapter->stats_cmd_sent = true;
1213
1214err:
1215 spin_unlock_bh(&adapter->mcc_lock);
1216 return status;
1217}
1218
1219/* Lancer Stats */
1220int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1221 struct be_dma_mem *nonemb_cmd)
1222{
1223
1224 struct be_mcc_wrb *wrb;
1225 struct lancer_cmd_req_pport_stats *req;
1226 struct be_sge *sge;
1227 int status = 0;
1228
1229 spin_lock_bh(&adapter->mcc_lock);
1230
1231 wrb = wrb_from_mccq(adapter);
1232 if (!wrb) {
1233 status = -EBUSY;
1234 goto err;
1235 }
1236 req = nonemb_cmd->va;
1237 sge = nonembedded_sgl(wrb);
1238
1239 be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1,
1240 OPCODE_ETH_GET_PPORT_STATS);
1241
1242 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1243 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size);
1244
1245
1246 req->cmd_params.params.pport_num = cpu_to_le16(adapter->port_num);
1247 req->cmd_params.params.reset_stats = 0;
1248
1249 wrb->tag1 = CMD_SUBSYSTEM_ETH;
1250 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
1251 sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
1252 sge->len = cpu_to_le32(nonemb_cmd->size);
1253
1254 be_mcc_notify(adapter);
1255 adapter->stats_cmd_sent = true;
1256
1257err:
1258 spin_unlock_bh(&adapter->mcc_lock);
1259 return status;
1260}
1261
1262/* Uses synchronous mcc */
1263int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
1264 u16 *link_speed, u32 dom)
1265{
1266 struct be_mcc_wrb *wrb;
1267 struct be_cmd_req_link_status *req;
1268 int status;
1269
1270 spin_lock_bh(&adapter->mcc_lock);
1271
1272 wrb = wrb_from_mccq(adapter);
1273 if (!wrb) {
1274 status = -EBUSY;
1275 goto err;
1276 }
1277 req = embedded_payload(wrb);
1278
1279 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1280 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY);
1281
1282 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1283 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req));
1284
1285 status = be_mcc_notify_wait(adapter);
1286 if (!status) {
1287 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1288 if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
1289 *link_speed = le16_to_cpu(resp->link_speed);
1290 *mac_speed = resp->mac_speed;
1291 }
1292 }
1293
1294err:
1295 spin_unlock_bh(&adapter->mcc_lock);
1296 return status;
1297}
1298
1299/* Uses synchronous mcc */
1300int be_cmd_get_die_temperature(struct be_adapter *adapter)
1301{
1302 struct be_mcc_wrb *wrb;
1303 struct be_cmd_req_get_cntl_addnl_attribs *req;
1304 int status;
1305
1306 spin_lock_bh(&adapter->mcc_lock);
1307
1308 wrb = wrb_from_mccq(adapter);
1309 if (!wrb) {
1310 status = -EBUSY;
1311 goto err;
1312 }
1313 req = embedded_payload(wrb);
1314
1315 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1316 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES);
1317
1318 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1319 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req));
1320
1321 status = be_mcc_notify_wait(adapter);
1322 if (!status) {
1323 struct be_cmd_resp_get_cntl_addnl_attribs *resp =
1324 embedded_payload(wrb);
1325 adapter->drv_stats.be_on_die_temperature =
1326 resp->on_die_temperature;
1327 }
1328 /* If IOCTL fails once, do not bother issuing it again */
1329 else
1330 be_get_temp_freq = 0;
1331
1332err:
1333 spin_unlock_bh(&adapter->mcc_lock);
1334 return status;
1335}
1336
1337/* Uses synchronous mcc */
1338int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1339{
1340 struct be_mcc_wrb *wrb;
1341 struct be_cmd_req_get_fat *req;
1342 int status;
1343
1344 spin_lock_bh(&adapter->mcc_lock);
1345
1346 wrb = wrb_from_mccq(adapter);
1347 if (!wrb) {
1348 status = -EBUSY;
1349 goto err;
1350 }
1351 req = embedded_payload(wrb);
1352
1353 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1354 OPCODE_COMMON_MANAGE_FAT);
1355
1356 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1357 OPCODE_COMMON_MANAGE_FAT, sizeof(*req));
1358 req->fat_operation = cpu_to_le32(QUERY_FAT);
1359 status = be_mcc_notify_wait(adapter);
1360 if (!status) {
1361 struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
1362 if (log_size && resp->log_size)
1363 *log_size = le32_to_cpu(resp->log_size) -
1364 sizeof(u32);
1365 }
1366err:
1367 spin_unlock_bh(&adapter->mcc_lock);
1368 return status;
1369}
1370
1371void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1372{
1373 struct be_dma_mem get_fat_cmd;
1374 struct be_mcc_wrb *wrb;
1375 struct be_cmd_req_get_fat *req;
1376 struct be_sge *sge;
1377 u32 offset = 0, total_size, buf_size,
1378 log_offset = sizeof(u32), payload_len;
1379 int status;
1380
1381 if (buf_len == 0)
1382 return;
1383
1384 total_size = buf_len;
1385
1386 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1387 get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
1388 get_fat_cmd.size,
1389 &get_fat_cmd.dma);
1390 if (!get_fat_cmd.va) {
1391 status = -ENOMEM;
1392 dev_err(&adapter->pdev->dev,
1393 "Memory allocation failure while retrieving FAT data\n");
1394 return;
1395 }
1396
1397 spin_lock_bh(&adapter->mcc_lock);
1398
1399 while (total_size) {
1400 buf_size = min(total_size, (u32)60*1024);
1401 total_size -= buf_size;
1402
1403 wrb = wrb_from_mccq(adapter);
1404 if (!wrb) {
1405 status = -EBUSY;
1406 goto err;
1407 }
1408 req = get_fat_cmd.va;
1409 sge = nonembedded_sgl(wrb);
1410
1411 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
1412 be_wrb_hdr_prepare(wrb, payload_len, false, 1,
1413 OPCODE_COMMON_MANAGE_FAT);
1414
1415 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1416 OPCODE_COMMON_MANAGE_FAT, payload_len);
1417
1418 sge->pa_hi = cpu_to_le32(upper_32_bits(get_fat_cmd.dma));
1419 sge->pa_lo = cpu_to_le32(get_fat_cmd.dma & 0xFFFFFFFF);
1420 sge->len = cpu_to_le32(get_fat_cmd.size);
1421
1422 req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1423 req->read_log_offset = cpu_to_le32(log_offset);
1424 req->read_log_length = cpu_to_le32(buf_size);
1425 req->data_buffer_size = cpu_to_le32(buf_size);
1426
1427 status = be_mcc_notify_wait(adapter);
1428 if (!status) {
1429 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1430 memcpy(buf + offset,
1431 resp->data_buffer,
1432 resp->read_log_length);
1433 } else {
1434 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
1435 goto err;
1436 }
1437 offset += buf_size;
1438 log_offset += buf_size;
1439 }
1440err:
1441 pci_free_consistent(adapter->pdev, get_fat_cmd.size,
1442 get_fat_cmd.va,
1443 get_fat_cmd.dma);
1444 spin_unlock_bh(&adapter->mcc_lock);
1445}
1446
1447/* Uses Mbox */
1448int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
1449{
1450 struct be_mcc_wrb *wrb;
1451 struct be_cmd_req_get_fw_version *req;
1452 int status;
1453
1454 if (mutex_lock_interruptible(&adapter->mbox_lock))
1455 return -1;
1456
1457 wrb = wrb_from_mbox(adapter);
1458 req = embedded_payload(wrb);
1459
1460 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1461 OPCODE_COMMON_GET_FW_VERSION);
1462
1463 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1464 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));
1465
1466 status = be_mbox_notify_wait(adapter);
1467 if (!status) {
1468 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1469 strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
1470 }
1471
1472 mutex_unlock(&adapter->mbox_lock);
1473 return status;
1474}
1475
1476/* set the EQ delay interval of an EQ to specified value
1477 * Uses async mcc
1478 */
1479int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
1480{
1481 struct be_mcc_wrb *wrb;
1482 struct be_cmd_req_modify_eq_delay *req;
1483 int status = 0;
1484
1485 spin_lock_bh(&adapter->mcc_lock);
1486
1487 wrb = wrb_from_mccq(adapter);
1488 if (!wrb) {
1489 status = -EBUSY;
1490 goto err;
1491 }
1492 req = embedded_payload(wrb);
1493
1494 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1495 OPCODE_COMMON_MODIFY_EQ_DELAY);
1496
1497 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1498 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
1499
1500 req->num_eq = cpu_to_le32(1);
1501 req->delay[0].eq_id = cpu_to_le32(eq_id);
1502 req->delay[0].phase = 0;
1503 req->delay[0].delay_multiplier = cpu_to_le32(eqd);
1504
1505 be_mcc_notify(adapter);
1506
1507err:
1508 spin_unlock_bh(&adapter->mcc_lock);
1509 return status;
1510}
1511
1512/* Uses sycnhronous mcc */
1513int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1514 u32 num, bool untagged, bool promiscuous)
1515{
1516 struct be_mcc_wrb *wrb;
1517 struct be_cmd_req_vlan_config *req;
1518 int status;
1519
1520 spin_lock_bh(&adapter->mcc_lock);
1521
1522 wrb = wrb_from_mccq(adapter);
1523 if (!wrb) {
1524 status = -EBUSY;
1525 goto err;
1526 }
1527 req = embedded_payload(wrb);
1528
1529 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1530 OPCODE_COMMON_NTWK_VLAN_CONFIG);
1531
1532 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1533 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req));
1534
1535 req->interface_id = if_id;
1536 req->promiscuous = promiscuous;
1537 req->untagged = untagged;
1538 req->num_vlan = num;
1539 if (!promiscuous) {
1540 memcpy(req->normal_vlan, vtag_array,
1541 req->num_vlan * sizeof(vtag_array[0]));
1542 }
1543
1544 status = be_mcc_notify_wait(adapter);
1545
1546err:
1547 spin_unlock_bh(&adapter->mcc_lock);
1548 return status;
1549}
1550
1551int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1552{
1553 struct be_mcc_wrb *wrb;
1554 struct be_dma_mem *mem = &adapter->rx_filter;
1555 struct be_cmd_req_rx_filter *req = mem->va;
1556 struct be_sge *sge;
1557 int status;
1558
1559 spin_lock_bh(&adapter->mcc_lock);
1560
1561 wrb = wrb_from_mccq(adapter);
1562 if (!wrb) {
1563 status = -EBUSY;
1564 goto err;
1565 }
1566 sge = nonembedded_sgl(wrb);
1567 sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
1568 sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
1569 sge->len = cpu_to_le32(mem->size);
1570 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1571 OPCODE_COMMON_NTWK_RX_FILTER);
1572
1573 memset(req, 0, sizeof(*req));
1574 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1575 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req));
1576
1577 req->if_id = cpu_to_le32(adapter->if_handle);
1578 if (flags & IFF_PROMISC) {
1579 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1580 BE_IF_FLAGS_VLAN_PROMISCUOUS);
1581 if (value == ON)
1582 req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1583 BE_IF_FLAGS_VLAN_PROMISCUOUS);
1584 } else if (flags & IFF_ALLMULTI) {
1585 req->if_flags_mask = req->if_flags =
1586 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1587 } else {
1588 struct netdev_hw_addr *ha;
1589 int i = 0;
1590
1591 req->mcast_num = cpu_to_le16(netdev_mc_count(adapter->netdev));
1592 netdev_for_each_mc_addr(ha, adapter->netdev)
1593 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
1594 }
1595
1596 status = be_mcc_notify_wait(adapter);
1597err:
1598 spin_unlock_bh(&adapter->mcc_lock);
1599 return status;
1600}
1601
1602/* Uses synchrounous mcc */
1603int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1604{
1605 struct be_mcc_wrb *wrb;
1606 struct be_cmd_req_set_flow_control *req;
1607 int status;
1608
1609 spin_lock_bh(&adapter->mcc_lock);
1610
1611 wrb = wrb_from_mccq(adapter);
1612 if (!wrb) {
1613 status = -EBUSY;
1614 goto err;
1615 }
1616 req = embedded_payload(wrb);
1617
1618 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1619 OPCODE_COMMON_SET_FLOW_CONTROL);
1620
1621 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1622 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req));
1623
1624 req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1625 req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1626
1627 status = be_mcc_notify_wait(adapter);
1628
1629err:
1630 spin_unlock_bh(&adapter->mcc_lock);
1631 return status;
1632}
1633
1634/* Uses sycn mcc */
1635int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1636{
1637 struct be_mcc_wrb *wrb;
1638 struct be_cmd_req_get_flow_control *req;
1639 int status;
1640
1641 spin_lock_bh(&adapter->mcc_lock);
1642
1643 wrb = wrb_from_mccq(adapter);
1644 if (!wrb) {
1645 status = -EBUSY;
1646 goto err;
1647 }
1648 req = embedded_payload(wrb);
1649
1650 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1651 OPCODE_COMMON_GET_FLOW_CONTROL);
1652
1653 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1654 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req));
1655
1656 status = be_mcc_notify_wait(adapter);
1657 if (!status) {
1658 struct be_cmd_resp_get_flow_control *resp =
1659 embedded_payload(wrb);
1660 *tx_fc = le16_to_cpu(resp->tx_flow_control);
1661 *rx_fc = le16_to_cpu(resp->rx_flow_control);
1662 }
1663
1664err:
1665 spin_unlock_bh(&adapter->mcc_lock);
1666 return status;
1667}
1668
1669/* Uses mbox */
1670int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
1671 u32 *mode, u32 *caps)
1672{
1673 struct be_mcc_wrb *wrb;
1674 struct be_cmd_req_query_fw_cfg *req;
1675 int status;
1676
1677 if (mutex_lock_interruptible(&adapter->mbox_lock))
1678 return -1;
1679
1680 wrb = wrb_from_mbox(adapter);
1681 req = embedded_payload(wrb);
1682
1683 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1684 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG);
1685
1686 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1687 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
1688
1689 status = be_mbox_notify_wait(adapter);
1690 if (!status) {
1691 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1692 *port_num = le32_to_cpu(resp->phys_port);
1693 *mode = le32_to_cpu(resp->function_mode);
1694 *caps = le32_to_cpu(resp->function_caps);
1695 }
1696
1697 mutex_unlock(&adapter->mbox_lock);
1698 return status;
1699}
1700
1701/* Uses mbox */
1702int be_cmd_reset_function(struct be_adapter *adapter)
1703{
1704 struct be_mcc_wrb *wrb;
1705 struct be_cmd_req_hdr *req;
1706 int status;
1707
1708 if (mutex_lock_interruptible(&adapter->mbox_lock))
1709 return -1;
1710
1711 wrb = wrb_from_mbox(adapter);
1712 req = embedded_payload(wrb);
1713
1714 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1715 OPCODE_COMMON_FUNCTION_RESET);
1716
1717 be_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
1718 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
1719
1720 status = be_mbox_notify_wait(adapter);
1721
1722 mutex_unlock(&adapter->mbox_lock);
1723 return status;
1724}
1725
1726int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
1727{
1728 struct be_mcc_wrb *wrb;
1729 struct be_cmd_req_rss_config *req;
1730 u32 myhash[10] = {0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF,
1731 0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF};
1732 int status;
1733
1734 if (mutex_lock_interruptible(&adapter->mbox_lock))
1735 return -1;
1736
1737 wrb = wrb_from_mbox(adapter);
1738 req = embedded_payload(wrb);
1739
1740 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1741 OPCODE_ETH_RSS_CONFIG);
1742
1743 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1744 OPCODE_ETH_RSS_CONFIG, sizeof(*req));
1745
1746 req->if_id = cpu_to_le32(adapter->if_handle);
1747 req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4);
1748 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
1749 memcpy(req->cpu_table, rsstable, table_size);
1750 memcpy(req->hash, myhash, sizeof(myhash));
1751 be_dws_cpu_to_le(req->hash, sizeof(req->hash));
1752
1753 status = be_mbox_notify_wait(adapter);
1754
1755 mutex_unlock(&adapter->mbox_lock);
1756 return status;
1757}
1758
1759/* Uses sync mcc */
1760int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
1761 u8 bcn, u8 sts, u8 state)
1762{
1763 struct be_mcc_wrb *wrb;
1764 struct be_cmd_req_enable_disable_beacon *req;
1765 int status;
1766
1767 spin_lock_bh(&adapter->mcc_lock);
1768
1769 wrb = wrb_from_mccq(adapter);
1770 if (!wrb) {
1771 status = -EBUSY;
1772 goto err;
1773 }
1774 req = embedded_payload(wrb);
1775
1776 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1777 OPCODE_COMMON_ENABLE_DISABLE_BEACON);
1778
1779 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1780 OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req));
1781
1782 req->port_num = port_num;
1783 req->beacon_state = state;
1784 req->beacon_duration = bcn;
1785 req->status_duration = sts;
1786
1787 status = be_mcc_notify_wait(adapter);
1788
1789err:
1790 spin_unlock_bh(&adapter->mcc_lock);
1791 return status;
1792}
1793
1794/* Uses sync mcc */
1795int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
1796{
1797 struct be_mcc_wrb *wrb;
1798 struct be_cmd_req_get_beacon_state *req;
1799 int status;
1800
1801 spin_lock_bh(&adapter->mcc_lock);
1802
1803 wrb = wrb_from_mccq(adapter);
1804 if (!wrb) {
1805 status = -EBUSY;
1806 goto err;
1807 }
1808 req = embedded_payload(wrb);
1809
1810 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1811 OPCODE_COMMON_GET_BEACON_STATE);
1812
1813 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1814 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req));
1815
1816 req->port_num = port_num;
1817
1818 status = be_mcc_notify_wait(adapter);
1819 if (!status) {
1820 struct be_cmd_resp_get_beacon_state *resp =
1821 embedded_payload(wrb);
1822 *state = resp->beacon_state;
1823 }
1824
1825err:
1826 spin_unlock_bh(&adapter->mcc_lock);
1827 return status;
1828}
1829
1830int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
1831 u32 data_size, u32 data_offset, const char *obj_name,
1832 u32 *data_written, u8 *addn_status)
1833{
1834 struct be_mcc_wrb *wrb;
1835 struct lancer_cmd_req_write_object *req;
1836 struct lancer_cmd_resp_write_object *resp;
1837 void *ctxt = NULL;
1838 int status;
1839
1840 spin_lock_bh(&adapter->mcc_lock);
1841 adapter->flash_status = 0;
1842
1843 wrb = wrb_from_mccq(adapter);
1844 if (!wrb) {
1845 status = -EBUSY;
1846 goto err_unlock;
1847 }
1848
1849 req = embedded_payload(wrb);
1850
1851 be_wrb_hdr_prepare(wrb, sizeof(struct lancer_cmd_req_write_object),
1852 true, 1, OPCODE_COMMON_WRITE_OBJECT);
1853 wrb->tag1 = CMD_SUBSYSTEM_COMMON;
1854
1855 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1856 OPCODE_COMMON_WRITE_OBJECT,
1857 sizeof(struct lancer_cmd_req_write_object));
1858
1859 ctxt = &req->context;
1860 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
1861 write_length, ctxt, data_size);
1862
1863 if (data_size == 0)
1864 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
1865 eof, ctxt, 1);
1866 else
1867 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
1868 eof, ctxt, 0);
1869
1870 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1871 req->write_offset = cpu_to_le32(data_offset);
1872 strcpy(req->object_name, obj_name);
1873 req->descriptor_count = cpu_to_le32(1);
1874 req->buf_len = cpu_to_le32(data_size);
1875 req->addr_low = cpu_to_le32((cmd->dma +
1876 sizeof(struct lancer_cmd_req_write_object))
1877 & 0xFFFFFFFF);
1878 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
1879 sizeof(struct lancer_cmd_req_write_object)));
1880
1881 be_mcc_notify(adapter);
1882 spin_unlock_bh(&adapter->mcc_lock);
1883
1884 if (!wait_for_completion_timeout(&adapter->flash_compl,
1885 msecs_to_jiffies(12000)))
1886 status = -1;
1887 else
1888 status = adapter->flash_status;
1889
1890 resp = embedded_payload(wrb);
1891 if (!status) {
1892 *data_written = le32_to_cpu(resp->actual_write_len);
1893 } else {
1894 *addn_status = resp->additional_status;
1895 status = resp->status;
1896 }
1897
1898 return status;
1899
1900err_unlock:
1901 spin_unlock_bh(&adapter->mcc_lock);
1902 return status;
1903}
1904
1905int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
1906 u32 flash_type, u32 flash_opcode, u32 buf_size)
1907{
1908 struct be_mcc_wrb *wrb;
1909 struct be_cmd_write_flashrom *req;
1910 struct be_sge *sge;
1911 int status;
1912
1913 spin_lock_bh(&adapter->mcc_lock);
1914 adapter->flash_status = 0;
1915
1916 wrb = wrb_from_mccq(adapter);
1917 if (!wrb) {
1918 status = -EBUSY;
1919 goto err_unlock;
1920 }
1921 req = cmd->va;
1922 sge = nonembedded_sgl(wrb);
1923
1924 be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
1925 OPCODE_COMMON_WRITE_FLASHROM);
1926 wrb->tag1 = CMD_SUBSYSTEM_COMMON;
1927
1928 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1929 OPCODE_COMMON_WRITE_FLASHROM, cmd->size);
1930 sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
1931 sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
1932 sge->len = cpu_to_le32(cmd->size);
1933
1934 req->params.op_type = cpu_to_le32(flash_type);
1935 req->params.op_code = cpu_to_le32(flash_opcode);
1936 req->params.data_buf_size = cpu_to_le32(buf_size);
1937
1938 be_mcc_notify(adapter);
1939 spin_unlock_bh(&adapter->mcc_lock);
1940
1941 if (!wait_for_completion_timeout(&adapter->flash_compl,
1942 msecs_to_jiffies(12000)))
1943 status = -1;
1944 else
1945 status = adapter->flash_status;
1946
1947 return status;
1948
1949err_unlock:
1950 spin_unlock_bh(&adapter->mcc_lock);
1951 return status;
1952}
1953
1954int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
1955 int offset)
1956{
1957 struct be_mcc_wrb *wrb;
1958 struct be_cmd_write_flashrom *req;
1959 int status;
1960
1961 spin_lock_bh(&adapter->mcc_lock);
1962
1963 wrb = wrb_from_mccq(adapter);
1964 if (!wrb) {
1965 status = -EBUSY;
1966 goto err;
1967 }
1968 req = embedded_payload(wrb);
1969
1970 be_wrb_hdr_prepare(wrb, sizeof(*req)+4, true, 0,
1971 OPCODE_COMMON_READ_FLASHROM);
1972
1973 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1974 OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4);
1975
1976 req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT);
1977 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
1978 req->params.offset = cpu_to_le32(offset);
1979 req->params.data_buf_size = cpu_to_le32(0x4);
1980
1981 status = be_mcc_notify_wait(adapter);
1982 if (!status)
1983 memcpy(flashed_crc, req->params.data_buf, 4);
1984
1985err:
1986 spin_unlock_bh(&adapter->mcc_lock);
1987 return status;
1988}
1989
1990int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
1991 struct be_dma_mem *nonemb_cmd)
1992{
1993 struct be_mcc_wrb *wrb;
1994 struct be_cmd_req_acpi_wol_magic_config *req;
1995 struct be_sge *sge;
1996 int status;
1997
1998 spin_lock_bh(&adapter->mcc_lock);
1999
2000 wrb = wrb_from_mccq(adapter);
2001 if (!wrb) {
2002 status = -EBUSY;
2003 goto err;
2004 }
2005 req = nonemb_cmd->va;
2006 sge = nonembedded_sgl(wrb);
2007
2008 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
2009 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG);
2010
2011 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2012 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req));
2013 memcpy(req->magic_mac, mac, ETH_ALEN);
2014
2015 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
2016 sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
2017 sge->len = cpu_to_le32(nonemb_cmd->size);
2018
2019 status = be_mcc_notify_wait(adapter);
2020
2021err:
2022 spin_unlock_bh(&adapter->mcc_lock);
2023 return status;
2024}
2025
2026int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
2027 u8 loopback_type, u8 enable)
2028{
2029 struct be_mcc_wrb *wrb;
2030 struct be_cmd_req_set_lmode *req;
2031 int status;
2032
2033 spin_lock_bh(&adapter->mcc_lock);
2034
2035 wrb = wrb_from_mccq(adapter);
2036 if (!wrb) {
2037 status = -EBUSY;
2038 goto err;
2039 }
2040
2041 req = embedded_payload(wrb);
2042
2043 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
2044 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE);
2045
2046 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2047 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
2048 sizeof(*req));
2049
2050 req->src_port = port_num;
2051 req->dest_port = port_num;
2052 req->loopback_type = loopback_type;
2053 req->loopback_state = enable;
2054
2055 status = be_mcc_notify_wait(adapter);
2056err:
2057 spin_unlock_bh(&adapter->mcc_lock);
2058 return status;
2059}
2060
2061int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2062 u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
2063{
2064 struct be_mcc_wrb *wrb;
2065 struct be_cmd_req_loopback_test *req;
2066 int status;
2067
2068 spin_lock_bh(&adapter->mcc_lock);
2069
2070 wrb = wrb_from_mccq(adapter);
2071 if (!wrb) {
2072 status = -EBUSY;
2073 goto err;
2074 }
2075
2076 req = embedded_payload(wrb);
2077
2078 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
2079 OPCODE_LOWLEVEL_LOOPBACK_TEST);
2080
2081 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2082 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req));
2083 req->hdr.timeout = cpu_to_le32(4);
2084
2085 req->pattern = cpu_to_le64(pattern);
2086 req->src_port = cpu_to_le32(port_num);
2087 req->dest_port = cpu_to_le32(port_num);
2088 req->pkt_size = cpu_to_le32(pkt_size);
2089 req->num_pkts = cpu_to_le32(num_pkts);
2090 req->loopback_type = cpu_to_le32(loopback_type);
2091
2092 status = be_mcc_notify_wait(adapter);
2093 if (!status) {
2094 struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
2095 status = le32_to_cpu(resp->status);
2096 }
2097
2098err:
2099 spin_unlock_bh(&adapter->mcc_lock);
2100 return status;
2101}
2102
2103int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
2104 u32 byte_cnt, struct be_dma_mem *cmd)
2105{
2106 struct be_mcc_wrb *wrb;
2107 struct be_cmd_req_ddrdma_test *req;
2108 struct be_sge *sge;
2109 int status;
2110 int i, j = 0;
2111
2112 spin_lock_bh(&adapter->mcc_lock);
2113
2114 wrb = wrb_from_mccq(adapter);
2115 if (!wrb) {
2116 status = -EBUSY;
2117 goto err;
2118 }
2119 req = cmd->va;
2120 sge = nonembedded_sgl(wrb);
2121 be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
2122 OPCODE_LOWLEVEL_HOST_DDR_DMA);
2123 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2124 OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size);
2125
2126 sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
2127 sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
2128 sge->len = cpu_to_le32(cmd->size);
2129
2130 req->pattern = cpu_to_le64(pattern);
2131 req->byte_count = cpu_to_le32(byte_cnt);
2132 for (i = 0; i < byte_cnt; i++) {
2133 req->snd_buff[i] = (u8)(pattern >> (j*8));
2134 j++;
2135 if (j > 7)
2136 j = 0;
2137 }
2138
2139 status = be_mcc_notify_wait(adapter);
2140
2141 if (!status) {
2142 struct be_cmd_resp_ddrdma_test *resp;
2143 resp = cmd->va;
2144 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
2145 resp->snd_err) {
2146 status = -1;
2147 }
2148 }
2149
2150err:
2151 spin_unlock_bh(&adapter->mcc_lock);
2152 return status;
2153}
2154
2155int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2156 struct be_dma_mem *nonemb_cmd)
2157{
2158 struct be_mcc_wrb *wrb;
2159 struct be_cmd_req_seeprom_read *req;
2160 struct be_sge *sge;
2161 int status;
2162
2163 spin_lock_bh(&adapter->mcc_lock);
2164
2165 wrb = wrb_from_mccq(adapter);
2166 if (!wrb) {
2167 status = -EBUSY;
2168 goto err;
2169 }
2170 req = nonemb_cmd->va;
2171 sge = nonembedded_sgl(wrb);
2172
2173 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
2174 OPCODE_COMMON_SEEPROM_READ);
2175
2176 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2177 OPCODE_COMMON_SEEPROM_READ, sizeof(*req));
2178
2179 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
2180 sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
2181 sge->len = cpu_to_le32(nonemb_cmd->size);
2182
2183 status = be_mcc_notify_wait(adapter);
2184
2185err:
2186 spin_unlock_bh(&adapter->mcc_lock);
2187 return status;
2188}
2189
2190int be_cmd_get_phy_info(struct be_adapter *adapter,
2191 struct be_phy_info *phy_info)
2192{
2193 struct be_mcc_wrb *wrb;
2194 struct be_cmd_req_get_phy_info *req;
2195 struct be_sge *sge;
2196 struct be_dma_mem cmd;
2197 int status;
2198
2199 spin_lock_bh(&adapter->mcc_lock);
2200
2201 wrb = wrb_from_mccq(adapter);
2202 if (!wrb) {
2203 status = -EBUSY;
2204 goto err;
2205 }
2206 cmd.size = sizeof(struct be_cmd_req_get_phy_info);
2207 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2208 &cmd.dma);
2209 if (!cmd.va) {
2210 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2211 status = -ENOMEM;
2212 goto err;
2213 }
2214
2215 req = cmd.va;
2216 sge = nonembedded_sgl(wrb);
2217
2218 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
2219 OPCODE_COMMON_GET_PHY_DETAILS);
2220
2221 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2222 OPCODE_COMMON_GET_PHY_DETAILS,
2223 sizeof(*req));
2224
2225 sge->pa_hi = cpu_to_le32(upper_32_bits(cmd.dma));
2226 sge->pa_lo = cpu_to_le32(cmd.dma & 0xFFFFFFFF);
2227 sge->len = cpu_to_le32(cmd.size);
2228
2229 status = be_mcc_notify_wait(adapter);
2230 if (!status) {
2231 struct be_phy_info *resp_phy_info =
2232 cmd.va + sizeof(struct be_cmd_req_hdr);
2233 phy_info->phy_type = le16_to_cpu(resp_phy_info->phy_type);
2234 phy_info->interface_type =
2235 le16_to_cpu(resp_phy_info->interface_type);
2236 }
2237 pci_free_consistent(adapter->pdev, cmd.size,
2238 cmd.va, cmd.dma);
2239err:
2240 spin_unlock_bh(&adapter->mcc_lock);
2241 return status;
2242}
2243
2244int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2245{
2246 struct be_mcc_wrb *wrb;
2247 struct be_cmd_req_set_qos *req;
2248 int status;
2249
2250 spin_lock_bh(&adapter->mcc_lock);
2251
2252 wrb = wrb_from_mccq(adapter);
2253 if (!wrb) {
2254 status = -EBUSY;
2255 goto err;
2256 }
2257
2258 req = embedded_payload(wrb);
2259
2260 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
2261 OPCODE_COMMON_SET_QOS);
2262
2263 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2264 OPCODE_COMMON_SET_QOS, sizeof(*req));
2265
2266 req->hdr.domain = domain;
2267 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
2268 req->max_bps_nic = cpu_to_le32(bps);
2269
2270 status = be_mcc_notify_wait(adapter);
2271
2272err:
2273 spin_unlock_bh(&adapter->mcc_lock);
2274 return status;
2275}
2276
2277int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2278{
2279 struct be_mcc_wrb *wrb;
2280 struct be_cmd_req_cntl_attribs *req;
2281 struct be_cmd_resp_cntl_attribs *resp;
2282 struct be_sge *sge;
2283 int status;
2284 int payload_len = max(sizeof(*req), sizeof(*resp));
2285 struct mgmt_controller_attrib *attribs;
2286 struct be_dma_mem attribs_cmd;
2287
2288 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2289 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2290 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
2291 &attribs_cmd.dma);
2292 if (!attribs_cmd.va) {
2293 dev_err(&adapter->pdev->dev,
2294 "Memory allocation failure\n");
2295 return -ENOMEM;
2296 }
2297
2298 if (mutex_lock_interruptible(&adapter->mbox_lock))
2299 return -1;
2300
2301 wrb = wrb_from_mbox(adapter);
2302 if (!wrb) {
2303 status = -EBUSY;
2304 goto err;
2305 }
2306 req = attribs_cmd.va;
2307 sge = nonembedded_sgl(wrb);
2308
2309 be_wrb_hdr_prepare(wrb, payload_len, false, 1,
2310 OPCODE_COMMON_GET_CNTL_ATTRIBUTES);
2311 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2312 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len);
2313 sge->pa_hi = cpu_to_le32(upper_32_bits(attribs_cmd.dma));
2314 sge->pa_lo = cpu_to_le32(attribs_cmd.dma & 0xFFFFFFFF);
2315 sge->len = cpu_to_le32(attribs_cmd.size);
2316
2317 status = be_mbox_notify_wait(adapter);
2318 if (!status) {
2319 attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
2320 adapter->hba_port_num = attribs->hba_attribs.phy_port;
2321 }
2322
2323err:
2324 mutex_unlock(&adapter->mbox_lock);
2325 pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
2326 attribs_cmd.dma);
2327 return status;
2328}
2329
2330/* Uses mbox */
2331int be_cmd_req_native_mode(struct be_adapter *adapter)
2332{
2333 struct be_mcc_wrb *wrb;
2334 struct be_cmd_req_set_func_cap *req;
2335 int status;
2336
2337 if (mutex_lock_interruptible(&adapter->mbox_lock))
2338 return -1;
2339
2340 wrb = wrb_from_mbox(adapter);
2341 if (!wrb) {
2342 status = -EBUSY;
2343 goto err;
2344 }
2345
2346 req = embedded_payload(wrb);
2347
2348 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
2349 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP);
2350
2351 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2352 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req));
2353
2354 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
2355 CAPABILITY_BE3_NATIVE_ERX_API);
2356 req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
2357
2358 status = be_mbox_notify_wait(adapter);
2359 if (!status) {
2360 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2361 adapter->be3_native = le32_to_cpu(resp->cap_flags) &
2362 CAPABILITY_BE3_NATIVE_ERX_API;
2363 }
2364err:
2365 mutex_unlock(&adapter->mbox_lock);
2366 return status;
2367}
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
new file mode 100644
index 000000000000..b61eac7ece35
--- /dev/null
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -0,0 +1,1502 @@
1/*
2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@emulex.com
12 *
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
16 */
17
18/*
19 * The driver sends configuration and managements command requests to the
20 * firmware in the BE. These requests are communicated to the processor
21 * using Work Request Blocks (WRBs) submitted to the MCC-WRB ring or via one
22 * WRB inside a MAILBOX.
23 * The commands are serviced by the ARM processor in the BladeEngine's MPU.
24 */
25
26struct be_sge {
27 u32 pa_lo;
28 u32 pa_hi;
29 u32 len;
30};
31
32#define MCC_WRB_EMBEDDED_MASK 1 /* bit 0 of dword 0*/
33#define MCC_WRB_SGE_CNT_SHIFT 3 /* bits 3 - 7 of dword 0 */
34#define MCC_WRB_SGE_CNT_MASK 0x1F /* bits 3 - 7 of dword 0 */
35struct be_mcc_wrb {
36 u32 embedded; /* dword 0 */
37 u32 payload_length; /* dword 1 */
38 u32 tag0; /* dword 2 */
39 u32 tag1; /* dword 3 */
40 u32 rsvd; /* dword 4 */
41 union {
42 u8 embedded_payload[236]; /* used by embedded cmds */
43 struct be_sge sgl[19]; /* used by non-embedded cmds */
44 } payload;
45};
46
47#define CQE_FLAGS_VALID_MASK (1 << 31)
48#define CQE_FLAGS_ASYNC_MASK (1 << 30)
49#define CQE_FLAGS_COMPLETED_MASK (1 << 28)
50#define CQE_FLAGS_CONSUMED_MASK (1 << 27)
51
52/* Completion Status */
53enum {
54 MCC_STATUS_SUCCESS = 0,
55 MCC_STATUS_FAILED = 1,
56 MCC_STATUS_ILLEGAL_REQUEST = 2,
57 MCC_STATUS_ILLEGAL_FIELD = 3,
58 MCC_STATUS_INSUFFICIENT_BUFFER = 4,
59 MCC_STATUS_UNAUTHORIZED_REQUEST = 5,
60 MCC_STATUS_NOT_SUPPORTED = 66
61};
62
63#define CQE_STATUS_COMPL_MASK 0xFFFF
64#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */
65#define CQE_STATUS_EXTD_MASK 0xFFFF
66#define CQE_STATUS_EXTD_SHIFT 16 /* bits 16 - 31 */
67
68struct be_mcc_compl {
69 u32 status; /* dword 0 */
70 u32 tag0; /* dword 1 */
71 u32 tag1; /* dword 2 */
72 u32 flags; /* dword 3 */
73};
74
75/* When the async bit of mcc_compl is set, the last 4 bytes of
76 * mcc_compl is interpreted as follows:
77 */
78#define ASYNC_TRAILER_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */
79#define ASYNC_TRAILER_EVENT_CODE_MASK 0xFF
80#define ASYNC_TRAILER_EVENT_TYPE_SHIFT 16
81#define ASYNC_TRAILER_EVENT_TYPE_MASK 0xFF
82#define ASYNC_EVENT_CODE_LINK_STATE 0x1
83#define ASYNC_EVENT_CODE_GRP_5 0x5
84#define ASYNC_EVENT_QOS_SPEED 0x1
85#define ASYNC_EVENT_COS_PRIORITY 0x2
86#define ASYNC_EVENT_PVID_STATE 0x3
87struct be_async_event_trailer {
88 u32 code;
89};
90
91enum {
92 LINK_DOWN = 0x0,
93 LINK_UP = 0x1
94};
95#define LINK_STATUS_MASK 0x1
96
97/* When the event code of an async trailer is link-state, the mcc_compl
98 * must be interpreted as follows
99 */
100struct be_async_event_link_state {
101 u8 physical_port;
102 u8 port_link_status;
103 u8 port_duplex;
104 u8 port_speed;
105 u8 port_fault;
106 u8 rsvd0[7];
107 struct be_async_event_trailer trailer;
108} __packed;
109
110/* When the event code of an async trailer is GRP-5 and event_type is QOS_SPEED
111 * the mcc_compl must be interpreted as follows
112 */
113struct be_async_event_grp5_qos_link_speed {
114 u8 physical_port;
115 u8 rsvd[5];
116 u16 qos_link_speed;
117 u32 event_tag;
118 struct be_async_event_trailer trailer;
119} __packed;
120
121/* When the event code of an async trailer is GRP5 and event type is
122 * CoS-Priority, the mcc_compl must be interpreted as follows
123 */
124struct be_async_event_grp5_cos_priority {
125 u8 physical_port;
126 u8 available_priority_bmap;
127 u8 reco_default_priority;
128 u8 valid;
129 u8 rsvd0;
130 u8 event_tag;
131 struct be_async_event_trailer trailer;
132} __packed;
133
134/* When the event code of an async trailer is GRP5 and event type is
135 * PVID state, the mcc_compl must be interpreted as follows
136 */
137struct be_async_event_grp5_pvid_state {
138 u8 enabled;
139 u8 rsvd0;
140 u16 tag;
141 u32 event_tag;
142 u32 rsvd1;
143 struct be_async_event_trailer trailer;
144} __packed;
145
146struct be_mcc_mailbox {
147 struct be_mcc_wrb wrb;
148 struct be_mcc_compl compl;
149};
150
151#define CMD_SUBSYSTEM_COMMON 0x1
152#define CMD_SUBSYSTEM_ETH 0x3
153#define CMD_SUBSYSTEM_LOWLEVEL 0xb
154
155#define OPCODE_COMMON_NTWK_MAC_QUERY 1
156#define OPCODE_COMMON_NTWK_MAC_SET 2
157#define OPCODE_COMMON_NTWK_MULTICAST_SET 3
158#define OPCODE_COMMON_NTWK_VLAN_CONFIG 4
159#define OPCODE_COMMON_NTWK_LINK_STATUS_QUERY 5
160#define OPCODE_COMMON_READ_FLASHROM 6
161#define OPCODE_COMMON_WRITE_FLASHROM 7
162#define OPCODE_COMMON_CQ_CREATE 12
163#define OPCODE_COMMON_EQ_CREATE 13
164#define OPCODE_COMMON_MCC_CREATE 21
165#define OPCODE_COMMON_SET_QOS 28
166#define OPCODE_COMMON_MCC_CREATE_EXT 90
167#define OPCODE_COMMON_SEEPROM_READ 30
168#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES 32
169#define OPCODE_COMMON_NTWK_RX_FILTER 34
170#define OPCODE_COMMON_GET_FW_VERSION 35
171#define OPCODE_COMMON_SET_FLOW_CONTROL 36
172#define OPCODE_COMMON_GET_FLOW_CONTROL 37
173#define OPCODE_COMMON_SET_FRAME_SIZE 39
174#define OPCODE_COMMON_MODIFY_EQ_DELAY 41
175#define OPCODE_COMMON_FIRMWARE_CONFIG 42
176#define OPCODE_COMMON_NTWK_INTERFACE_CREATE 50
177#define OPCODE_COMMON_NTWK_INTERFACE_DESTROY 51
178#define OPCODE_COMMON_MCC_DESTROY 53
179#define OPCODE_COMMON_CQ_DESTROY 54
180#define OPCODE_COMMON_EQ_DESTROY 55
181#define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58
182#define OPCODE_COMMON_NTWK_PMAC_ADD 59
183#define OPCODE_COMMON_NTWK_PMAC_DEL 60
184#define OPCODE_COMMON_FUNCTION_RESET 61
185#define OPCODE_COMMON_MANAGE_FAT 68
186#define OPCODE_COMMON_ENABLE_DISABLE_BEACON 69
187#define OPCODE_COMMON_GET_BEACON_STATE 70
188#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
189#define OPCODE_COMMON_GET_PHY_DETAILS 102
190#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103
191#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121
192#define OPCODE_COMMON_WRITE_OBJECT 172
193
194#define OPCODE_ETH_RSS_CONFIG 1
195#define OPCODE_ETH_ACPI_CONFIG 2
196#define OPCODE_ETH_PROMISCUOUS 3
197#define OPCODE_ETH_GET_STATISTICS 4
198#define OPCODE_ETH_TX_CREATE 7
199#define OPCODE_ETH_RX_CREATE 8
200#define OPCODE_ETH_TX_DESTROY 9
201#define OPCODE_ETH_RX_DESTROY 10
202#define OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG 12
203#define OPCODE_ETH_GET_PPORT_STATS 18
204
205#define OPCODE_LOWLEVEL_HOST_DDR_DMA 17
206#define OPCODE_LOWLEVEL_LOOPBACK_TEST 18
207#define OPCODE_LOWLEVEL_SET_LOOPBACK_MODE 19
208
209struct be_cmd_req_hdr {
210 u8 opcode; /* dword 0 */
211 u8 subsystem; /* dword 0 */
212 u8 port_number; /* dword 0 */
213 u8 domain; /* dword 0 */
214 u32 timeout; /* dword 1 */
215 u32 request_length; /* dword 2 */
216 u8 version; /* dword 3 */
217 u8 rsvd[3]; /* dword 3 */
218};
219
220#define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */
221#define RESP_HDR_INFO_SUBSYS_SHIFT 8 /* bits 8 - 15 */
222struct be_cmd_resp_hdr {
223 u32 info; /* dword 0 */
224 u32 status; /* dword 1 */
225 u32 response_length; /* dword 2 */
226 u32 actual_resp_len; /* dword 3 */
227};
228
229struct phys_addr {
230 u32 lo;
231 u32 hi;
232};
233
234/**************************
235 * BE Command definitions *
236 **************************/
237
238/* Pseudo amap definition in which each bit of the actual structure is defined
239 * as a byte: used to calculate offset/shift/mask of each field */
240struct amap_eq_context {
241 u8 cidx[13]; /* dword 0*/
242 u8 rsvd0[3]; /* dword 0*/
243 u8 epidx[13]; /* dword 0*/
244 u8 valid; /* dword 0*/
245 u8 rsvd1; /* dword 0*/
246 u8 size; /* dword 0*/
247 u8 pidx[13]; /* dword 1*/
248 u8 rsvd2[3]; /* dword 1*/
249 u8 pd[10]; /* dword 1*/
250 u8 count[3]; /* dword 1*/
251 u8 solevent; /* dword 1*/
252 u8 stalled; /* dword 1*/
253 u8 armed; /* dword 1*/
254 u8 rsvd3[4]; /* dword 2*/
255 u8 func[8]; /* dword 2*/
256 u8 rsvd4; /* dword 2*/
257 u8 delaymult[10]; /* dword 2*/
258 u8 rsvd5[2]; /* dword 2*/
259 u8 phase[2]; /* dword 2*/
260 u8 nodelay; /* dword 2*/
261 u8 rsvd6[4]; /* dword 2*/
262 u8 rsvd7[32]; /* dword 3*/
263} __packed;
264
265struct be_cmd_req_eq_create {
266 struct be_cmd_req_hdr hdr;
267 u16 num_pages; /* sword */
268 u16 rsvd0; /* sword */
269 u8 context[sizeof(struct amap_eq_context) / 8];
270 struct phys_addr pages[8];
271} __packed;
272
273struct be_cmd_resp_eq_create {
274 struct be_cmd_resp_hdr resp_hdr;
275 u16 eq_id; /* sword */
276 u16 rsvd0; /* sword */
277} __packed;
278
279/******************** Mac query ***************************/
280enum {
281 MAC_ADDRESS_TYPE_STORAGE = 0x0,
282 MAC_ADDRESS_TYPE_NETWORK = 0x1,
283 MAC_ADDRESS_TYPE_PD = 0x2,
284 MAC_ADDRESS_TYPE_MANAGEMENT = 0x3
285};
286
287struct mac_addr {
288 u16 size_of_struct;
289 u8 addr[ETH_ALEN];
290} __packed;
291
292struct be_cmd_req_mac_query {
293 struct be_cmd_req_hdr hdr;
294 u8 type;
295 u8 permanent;
296 u16 if_id;
297} __packed;
298
299struct be_cmd_resp_mac_query {
300 struct be_cmd_resp_hdr hdr;
301 struct mac_addr mac;
302};
303
304/******************** PMac Add ***************************/
305struct be_cmd_req_pmac_add {
306 struct be_cmd_req_hdr hdr;
307 u32 if_id;
308 u8 mac_address[ETH_ALEN];
309 u8 rsvd0[2];
310} __packed;
311
312struct be_cmd_resp_pmac_add {
313 struct be_cmd_resp_hdr hdr;
314 u32 pmac_id;
315};
316
317/******************** PMac Del ***************************/
318struct be_cmd_req_pmac_del {
319 struct be_cmd_req_hdr hdr;
320 u32 if_id;
321 u32 pmac_id;
322};
323
324/******************** Create CQ ***************************/
325/* Pseudo amap definition in which each bit of the actual structure is defined
326 * as a byte: used to calculate offset/shift/mask of each field */
327struct amap_cq_context_be {
328 u8 cidx[11]; /* dword 0*/
329 u8 rsvd0; /* dword 0*/
330 u8 coalescwm[2]; /* dword 0*/
331 u8 nodelay; /* dword 0*/
332 u8 epidx[11]; /* dword 0*/
333 u8 rsvd1; /* dword 0*/
334 u8 count[2]; /* dword 0*/
335 u8 valid; /* dword 0*/
336 u8 solevent; /* dword 0*/
337 u8 eventable; /* dword 0*/
338 u8 pidx[11]; /* dword 1*/
339 u8 rsvd2; /* dword 1*/
340 u8 pd[10]; /* dword 1*/
341 u8 eqid[8]; /* dword 1*/
342 u8 stalled; /* dword 1*/
343 u8 armed; /* dword 1*/
344 u8 rsvd3[4]; /* dword 2*/
345 u8 func[8]; /* dword 2*/
346 u8 rsvd4[20]; /* dword 2*/
347 u8 rsvd5[32]; /* dword 3*/
348} __packed;
349
350struct amap_cq_context_lancer {
351 u8 rsvd0[12]; /* dword 0*/
352 u8 coalescwm[2]; /* dword 0*/
353 u8 nodelay; /* dword 0*/
354 u8 rsvd1[12]; /* dword 0*/
355 u8 count[2]; /* dword 0*/
356 u8 valid; /* dword 0*/
357 u8 rsvd2; /* dword 0*/
358 u8 eventable; /* dword 0*/
359 u8 eqid[16]; /* dword 1*/
360 u8 rsvd3[15]; /* dword 1*/
361 u8 armed; /* dword 1*/
362 u8 rsvd4[32]; /* dword 2*/
363 u8 rsvd5[32]; /* dword 3*/
364} __packed;
365
366struct be_cmd_req_cq_create {
367 struct be_cmd_req_hdr hdr;
368 u16 num_pages;
369 u8 page_size;
370 u8 rsvd0;
371 u8 context[sizeof(struct amap_cq_context_be) / 8];
372 struct phys_addr pages[8];
373} __packed;
374
375
376struct be_cmd_resp_cq_create {
377 struct be_cmd_resp_hdr hdr;
378 u16 cq_id;
379 u16 rsvd0;
380} __packed;
381
382struct be_cmd_req_get_fat {
383 struct be_cmd_req_hdr hdr;
384 u32 fat_operation;
385 u32 read_log_offset;
386 u32 read_log_length;
387 u32 data_buffer_size;
388 u32 data_buffer[1];
389} __packed;
390
391struct be_cmd_resp_get_fat {
392 struct be_cmd_resp_hdr hdr;
393 u32 log_size;
394 u32 read_log_length;
395 u32 rsvd[2];
396 u32 data_buffer[1];
397} __packed;
398
399
400/******************** Create MCCQ ***************************/
401/* Pseudo amap definition in which each bit of the actual structure is defined
402 * as a byte: used to calculate offset/shift/mask of each field */
403struct amap_mcc_context_be {
404 u8 con_index[14];
405 u8 rsvd0[2];
406 u8 ring_size[4];
407 u8 fetch_wrb;
408 u8 fetch_r2t;
409 u8 cq_id[10];
410 u8 prod_index[14];
411 u8 fid[8];
412 u8 pdid[9];
413 u8 valid;
414 u8 rsvd1[32];
415 u8 rsvd2[32];
416} __packed;
417
418struct amap_mcc_context_lancer {
419 u8 async_cq_id[16];
420 u8 ring_size[4];
421 u8 rsvd0[12];
422 u8 rsvd1[31];
423 u8 valid;
424 u8 async_cq_valid[1];
425 u8 rsvd2[31];
426 u8 rsvd3[32];
427} __packed;
428
429struct be_cmd_req_mcc_create {
430 struct be_cmd_req_hdr hdr;
431 u16 num_pages;
432 u16 cq_id;
433 u8 context[sizeof(struct amap_mcc_context_be) / 8];
434 struct phys_addr pages[8];
435} __packed;
436
437struct be_cmd_req_mcc_ext_create {
438 struct be_cmd_req_hdr hdr;
439 u16 num_pages;
440 u16 cq_id;
441 u32 async_event_bitmap[1];
442 u8 context[sizeof(struct amap_mcc_context_be) / 8];
443 struct phys_addr pages[8];
444} __packed;
445
446struct be_cmd_resp_mcc_create {
447 struct be_cmd_resp_hdr hdr;
448 u16 id;
449 u16 rsvd0;
450} __packed;
451
452/******************** Create TxQ ***************************/
453#define BE_ETH_TX_RING_TYPE_STANDARD 2
454#define BE_ULP1_NUM 1
455
456/* Pseudo amap definition in which each bit of the actual structure is defined
457 * as a byte: used to calculate offset/shift/mask of each field */
458struct amap_tx_context {
459 u8 if_id[16]; /* dword 0 */
460 u8 tx_ring_size[4]; /* dword 0 */
461 u8 rsvd1[26]; /* dword 0 */
462 u8 pci_func_id[8]; /* dword 1 */
463 u8 rsvd2[9]; /* dword 1 */
464 u8 ctx_valid; /* dword 1 */
465 u8 cq_id_send[16]; /* dword 2 */
466 u8 rsvd3[16]; /* dword 2 */
467 u8 rsvd4[32]; /* dword 3 */
468 u8 rsvd5[32]; /* dword 4 */
469 u8 rsvd6[32]; /* dword 5 */
470 u8 rsvd7[32]; /* dword 6 */
471 u8 rsvd8[32]; /* dword 7 */
472 u8 rsvd9[32]; /* dword 8 */
473 u8 rsvd10[32]; /* dword 9 */
474 u8 rsvd11[32]; /* dword 10 */
475 u8 rsvd12[32]; /* dword 11 */
476 u8 rsvd13[32]; /* dword 12 */
477 u8 rsvd14[32]; /* dword 13 */
478 u8 rsvd15[32]; /* dword 14 */
479 u8 rsvd16[32]; /* dword 15 */
480} __packed;
481
482struct be_cmd_req_eth_tx_create {
483 struct be_cmd_req_hdr hdr;
484 u8 num_pages;
485 u8 ulp_num;
486 u8 type;
487 u8 bound_port;
488 u8 context[sizeof(struct amap_tx_context) / 8];
489 struct phys_addr pages[8];
490} __packed;
491
492struct be_cmd_resp_eth_tx_create {
493 struct be_cmd_resp_hdr hdr;
494 u16 cid;
495 u16 rsvd0;
496} __packed;
497
498/******************** Create RxQ ***************************/
499struct be_cmd_req_eth_rx_create {
500 struct be_cmd_req_hdr hdr;
501 u16 cq_id;
502 u8 frag_size;
503 u8 num_pages;
504 struct phys_addr pages[2];
505 u32 interface_id;
506 u16 max_frame_size;
507 u16 rsvd0;
508 u32 rss_queue;
509} __packed;
510
511struct be_cmd_resp_eth_rx_create {
512 struct be_cmd_resp_hdr hdr;
513 u16 id;
514 u8 rss_id;
515 u8 rsvd0;
516} __packed;
517
518/******************** Q Destroy ***************************/
519/* Type of Queue to be destroyed */
520enum {
521 QTYPE_EQ = 1,
522 QTYPE_CQ,
523 QTYPE_TXQ,
524 QTYPE_RXQ,
525 QTYPE_MCCQ
526};
527
528struct be_cmd_req_q_destroy {
529 struct be_cmd_req_hdr hdr;
530 u16 id;
531 u16 bypass_flush; /* valid only for rx q destroy */
532} __packed;
533
534/************ I/f Create (it's actually I/f Config Create)**********/
535
536/* Capability flags for the i/f */
537enum be_if_flags {
538 BE_IF_FLAGS_RSS = 0x4,
539 BE_IF_FLAGS_PROMISCUOUS = 0x8,
540 BE_IF_FLAGS_BROADCAST = 0x10,
541 BE_IF_FLAGS_UNTAGGED = 0x20,
542 BE_IF_FLAGS_ULP = 0x40,
543 BE_IF_FLAGS_VLAN_PROMISCUOUS = 0x80,
544 BE_IF_FLAGS_VLAN = 0x100,
545 BE_IF_FLAGS_MCAST_PROMISCUOUS = 0x200,
546 BE_IF_FLAGS_PASS_L2_ERRORS = 0x400,
547 BE_IF_FLAGS_PASS_L3L4_ERRORS = 0x800,
548 BE_IF_FLAGS_MULTICAST = 0x1000
549};
550
551/* An RX interface is an object with one or more MAC addresses and
552 * filtering capabilities. */
553struct be_cmd_req_if_create {
554 struct be_cmd_req_hdr hdr;
555 u32 version; /* ignore currently */
556 u32 capability_flags;
557 u32 enable_flags;
558 u8 mac_addr[ETH_ALEN];
559 u8 rsvd0;
560 u8 pmac_invalid; /* if set, don't attach the mac addr to the i/f */
561 u32 vlan_tag; /* not used currently */
562} __packed;
563
564struct be_cmd_resp_if_create {
565 struct be_cmd_resp_hdr hdr;
566 u32 interface_id;
567 u32 pmac_id;
568};
569
570/****** I/f Destroy(it's actually I/f Config Destroy )**********/
571struct be_cmd_req_if_destroy {
572 struct be_cmd_req_hdr hdr;
573 u32 interface_id;
574};
575
576/*************** HW Stats Get **********************************/
577struct be_port_rxf_stats_v0 {
578 u32 rx_bytes_lsd; /* dword 0*/
579 u32 rx_bytes_msd; /* dword 1*/
580 u32 rx_total_frames; /* dword 2*/
581 u32 rx_unicast_frames; /* dword 3*/
582 u32 rx_multicast_frames; /* dword 4*/
583 u32 rx_broadcast_frames; /* dword 5*/
584 u32 rx_crc_errors; /* dword 6*/
585 u32 rx_alignment_symbol_errors; /* dword 7*/
586 u32 rx_pause_frames; /* dword 8*/
587 u32 rx_control_frames; /* dword 9*/
588 u32 rx_in_range_errors; /* dword 10*/
589 u32 rx_out_range_errors; /* dword 11*/
590 u32 rx_frame_too_long; /* dword 12*/
591 u32 rx_address_match_errors; /* dword 13*/
592 u32 rx_vlan_mismatch; /* dword 14*/
593 u32 rx_dropped_too_small; /* dword 15*/
594 u32 rx_dropped_too_short; /* dword 16*/
595 u32 rx_dropped_header_too_small; /* dword 17*/
596 u32 rx_dropped_tcp_length; /* dword 18*/
597 u32 rx_dropped_runt; /* dword 19*/
598 u32 rx_64_byte_packets; /* dword 20*/
599 u32 rx_65_127_byte_packets; /* dword 21*/
600 u32 rx_128_256_byte_packets; /* dword 22*/
601 u32 rx_256_511_byte_packets; /* dword 23*/
602 u32 rx_512_1023_byte_packets; /* dword 24*/
603 u32 rx_1024_1518_byte_packets; /* dword 25*/
604 u32 rx_1519_2047_byte_packets; /* dword 26*/
605 u32 rx_2048_4095_byte_packets; /* dword 27*/
606 u32 rx_4096_8191_byte_packets; /* dword 28*/
607 u32 rx_8192_9216_byte_packets; /* dword 29*/
608 u32 rx_ip_checksum_errs; /* dword 30*/
609 u32 rx_tcp_checksum_errs; /* dword 31*/
610 u32 rx_udp_checksum_errs; /* dword 32*/
611 u32 rx_non_rss_packets; /* dword 33*/
612 u32 rx_ipv4_packets; /* dword 34*/
613 u32 rx_ipv6_packets; /* dword 35*/
614 u32 rx_ipv4_bytes_lsd; /* dword 36*/
615 u32 rx_ipv4_bytes_msd; /* dword 37*/
616 u32 rx_ipv6_bytes_lsd; /* dword 38*/
617 u32 rx_ipv6_bytes_msd; /* dword 39*/
618 u32 rx_chute1_packets; /* dword 40*/
619 u32 rx_chute2_packets; /* dword 41*/
620 u32 rx_chute3_packets; /* dword 42*/
621 u32 rx_management_packets; /* dword 43*/
622 u32 rx_switched_unicast_packets; /* dword 44*/
623 u32 rx_switched_multicast_packets; /* dword 45*/
624 u32 rx_switched_broadcast_packets; /* dword 46*/
625 u32 tx_bytes_lsd; /* dword 47*/
626 u32 tx_bytes_msd; /* dword 48*/
627 u32 tx_unicastframes; /* dword 49*/
628 u32 tx_multicastframes; /* dword 50*/
629 u32 tx_broadcastframes; /* dword 51*/
630 u32 tx_pauseframes; /* dword 52*/
631 u32 tx_controlframes; /* dword 53*/
632 u32 tx_64_byte_packets; /* dword 54*/
633 u32 tx_65_127_byte_packets; /* dword 55*/
634 u32 tx_128_256_byte_packets; /* dword 56*/
635 u32 tx_256_511_byte_packets; /* dword 57*/
636 u32 tx_512_1023_byte_packets; /* dword 58*/
637 u32 tx_1024_1518_byte_packets; /* dword 59*/
638 u32 tx_1519_2047_byte_packets; /* dword 60*/
639 u32 tx_2048_4095_byte_packets; /* dword 61*/
640 u32 tx_4096_8191_byte_packets; /* dword 62*/
641 u32 tx_8192_9216_byte_packets; /* dword 63*/
642 u32 rx_fifo_overflow; /* dword 64*/
643 u32 rx_input_fifo_overflow; /* dword 65*/
644};
645
646struct be_rxf_stats_v0 {
647 struct be_port_rxf_stats_v0 port[2];
648 u32 rx_drops_no_pbuf; /* dword 132*/
649 u32 rx_drops_no_txpb; /* dword 133*/
650 u32 rx_drops_no_erx_descr; /* dword 134*/
651 u32 rx_drops_no_tpre_descr; /* dword 135*/
652 u32 management_rx_port_packets; /* dword 136*/
653 u32 management_rx_port_bytes; /* dword 137*/
654 u32 management_rx_port_pause_frames; /* dword 138*/
655 u32 management_rx_port_errors; /* dword 139*/
656 u32 management_tx_port_packets; /* dword 140*/
657 u32 management_tx_port_bytes; /* dword 141*/
658 u32 management_tx_port_pause; /* dword 142*/
659 u32 management_rx_port_rxfifo_overflow; /* dword 143*/
660 u32 rx_drops_too_many_frags; /* dword 144*/
661 u32 rx_drops_invalid_ring; /* dword 145*/
662 u32 forwarded_packets; /* dword 146*/
663 u32 rx_drops_mtu; /* dword 147*/
664 u32 rsvd0[7];
665 u32 port0_jabber_events;
666 u32 port1_jabber_events;
667 u32 rsvd1[6];
668};
669
670struct be_erx_stats_v0 {
671 u32 rx_drops_no_fragments[44]; /* dwordS 0 to 43*/
672 u32 rsvd[4];
673};
674
675struct be_pmem_stats {
676 u32 eth_red_drops;
677 u32 rsvd[5];
678};
679
680struct be_hw_stats_v0 {
681 struct be_rxf_stats_v0 rxf;
682 u32 rsvd[48];
683 struct be_erx_stats_v0 erx;
684 struct be_pmem_stats pmem;
685};
686
687struct be_cmd_req_get_stats_v0 {
688 struct be_cmd_req_hdr hdr;
689 u8 rsvd[sizeof(struct be_hw_stats_v0)];
690};
691
692struct be_cmd_resp_get_stats_v0 {
693 struct be_cmd_resp_hdr hdr;
694 struct be_hw_stats_v0 hw_stats;
695};
696
697struct lancer_pport_stats {
698 u32 tx_packets_lo;
699 u32 tx_packets_hi;
700 u32 tx_unicast_packets_lo;
701 u32 tx_unicast_packets_hi;
702 u32 tx_multicast_packets_lo;
703 u32 tx_multicast_packets_hi;
704 u32 tx_broadcast_packets_lo;
705 u32 tx_broadcast_packets_hi;
706 u32 tx_bytes_lo;
707 u32 tx_bytes_hi;
708 u32 tx_unicast_bytes_lo;
709 u32 tx_unicast_bytes_hi;
710 u32 tx_multicast_bytes_lo;
711 u32 tx_multicast_bytes_hi;
712 u32 tx_broadcast_bytes_lo;
713 u32 tx_broadcast_bytes_hi;
714 u32 tx_discards_lo;
715 u32 tx_discards_hi;
716 u32 tx_errors_lo;
717 u32 tx_errors_hi;
718 u32 tx_pause_frames_lo;
719 u32 tx_pause_frames_hi;
720 u32 tx_pause_on_frames_lo;
721 u32 tx_pause_on_frames_hi;
722 u32 tx_pause_off_frames_lo;
723 u32 tx_pause_off_frames_hi;
724 u32 tx_internal_mac_errors_lo;
725 u32 tx_internal_mac_errors_hi;
726 u32 tx_control_frames_lo;
727 u32 tx_control_frames_hi;
728 u32 tx_packets_64_bytes_lo;
729 u32 tx_packets_64_bytes_hi;
730 u32 tx_packets_65_to_127_bytes_lo;
731 u32 tx_packets_65_to_127_bytes_hi;
732 u32 tx_packets_128_to_255_bytes_lo;
733 u32 tx_packets_128_to_255_bytes_hi;
734 u32 tx_packets_256_to_511_bytes_lo;
735 u32 tx_packets_256_to_511_bytes_hi;
736 u32 tx_packets_512_to_1023_bytes_lo;
737 u32 tx_packets_512_to_1023_bytes_hi;
738 u32 tx_packets_1024_to_1518_bytes_lo;
739 u32 tx_packets_1024_to_1518_bytes_hi;
740 u32 tx_packets_1519_to_2047_bytes_lo;
741 u32 tx_packets_1519_to_2047_bytes_hi;
742 u32 tx_packets_2048_to_4095_bytes_lo;
743 u32 tx_packets_2048_to_4095_bytes_hi;
744 u32 tx_packets_4096_to_8191_bytes_lo;
745 u32 tx_packets_4096_to_8191_bytes_hi;
746 u32 tx_packets_8192_to_9216_bytes_lo;
747 u32 tx_packets_8192_to_9216_bytes_hi;
748 u32 tx_lso_packets_lo;
749 u32 tx_lso_packets_hi;
750 u32 rx_packets_lo;
751 u32 rx_packets_hi;
752 u32 rx_unicast_packets_lo;
753 u32 rx_unicast_packets_hi;
754 u32 rx_multicast_packets_lo;
755 u32 rx_multicast_packets_hi;
756 u32 rx_broadcast_packets_lo;
757 u32 rx_broadcast_packets_hi;
758 u32 rx_bytes_lo;
759 u32 rx_bytes_hi;
760 u32 rx_unicast_bytes_lo;
761 u32 rx_unicast_bytes_hi;
762 u32 rx_multicast_bytes_lo;
763 u32 rx_multicast_bytes_hi;
764 u32 rx_broadcast_bytes_lo;
765 u32 rx_broadcast_bytes_hi;
766 u32 rx_unknown_protos;
767 u32 rsvd_69; /* Word 69 is reserved */
768 u32 rx_discards_lo;
769 u32 rx_discards_hi;
770 u32 rx_errors_lo;
771 u32 rx_errors_hi;
772 u32 rx_crc_errors_lo;
773 u32 rx_crc_errors_hi;
774 u32 rx_alignment_errors_lo;
775 u32 rx_alignment_errors_hi;
776 u32 rx_symbol_errors_lo;
777 u32 rx_symbol_errors_hi;
778 u32 rx_pause_frames_lo;
779 u32 rx_pause_frames_hi;
780 u32 rx_pause_on_frames_lo;
781 u32 rx_pause_on_frames_hi;
782 u32 rx_pause_off_frames_lo;
783 u32 rx_pause_off_frames_hi;
784 u32 rx_frames_too_long_lo;
785 u32 rx_frames_too_long_hi;
786 u32 rx_internal_mac_errors_lo;
787 u32 rx_internal_mac_errors_hi;
788 u32 rx_undersize_packets;
789 u32 rx_oversize_packets;
790 u32 rx_fragment_packets;
791 u32 rx_jabbers;
792 u32 rx_control_frames_lo;
793 u32 rx_control_frames_hi;
794 u32 rx_control_frames_unknown_opcode_lo;
795 u32 rx_control_frames_unknown_opcode_hi;
796 u32 rx_in_range_errors;
797 u32 rx_out_of_range_errors;
798 u32 rx_address_match_errors;
799 u32 rx_vlan_mismatch_errors;
800 u32 rx_dropped_too_small;
801 u32 rx_dropped_too_short;
802 u32 rx_dropped_header_too_small;
803 u32 rx_dropped_invalid_tcp_length;
804 u32 rx_dropped_runt;
805 u32 rx_ip_checksum_errors;
806 u32 rx_tcp_checksum_errors;
807 u32 rx_udp_checksum_errors;
808 u32 rx_non_rss_packets;
809 u32 rsvd_111;
810 u32 rx_ipv4_packets_lo;
811 u32 rx_ipv4_packets_hi;
812 u32 rx_ipv6_packets_lo;
813 u32 rx_ipv6_packets_hi;
814 u32 rx_ipv4_bytes_lo;
815 u32 rx_ipv4_bytes_hi;
816 u32 rx_ipv6_bytes_lo;
817 u32 rx_ipv6_bytes_hi;
818 u32 rx_nic_packets_lo;
819 u32 rx_nic_packets_hi;
820 u32 rx_tcp_packets_lo;
821 u32 rx_tcp_packets_hi;
822 u32 rx_iscsi_packets_lo;
823 u32 rx_iscsi_packets_hi;
824 u32 rx_management_packets_lo;
825 u32 rx_management_packets_hi;
826 u32 rx_switched_unicast_packets_lo;
827 u32 rx_switched_unicast_packets_hi;
828 u32 rx_switched_multicast_packets_lo;
829 u32 rx_switched_multicast_packets_hi;
830 u32 rx_switched_broadcast_packets_lo;
831 u32 rx_switched_broadcast_packets_hi;
832 u32 num_forwards_lo;
833 u32 num_forwards_hi;
834 u32 rx_fifo_overflow;
835 u32 rx_input_fifo_overflow;
836 u32 rx_drops_too_many_frags_lo;
837 u32 rx_drops_too_many_frags_hi;
838 u32 rx_drops_invalid_queue;
839 u32 rsvd_141;
840 u32 rx_drops_mtu_lo;
841 u32 rx_drops_mtu_hi;
842 u32 rx_packets_64_bytes_lo;
843 u32 rx_packets_64_bytes_hi;
844 u32 rx_packets_65_to_127_bytes_lo;
845 u32 rx_packets_65_to_127_bytes_hi;
846 u32 rx_packets_128_to_255_bytes_lo;
847 u32 rx_packets_128_to_255_bytes_hi;
848 u32 rx_packets_256_to_511_bytes_lo;
849 u32 rx_packets_256_to_511_bytes_hi;
850 u32 rx_packets_512_to_1023_bytes_lo;
851 u32 rx_packets_512_to_1023_bytes_hi;
852 u32 rx_packets_1024_to_1518_bytes_lo;
853 u32 rx_packets_1024_to_1518_bytes_hi;
854 u32 rx_packets_1519_to_2047_bytes_lo;
855 u32 rx_packets_1519_to_2047_bytes_hi;
856 u32 rx_packets_2048_to_4095_bytes_lo;
857 u32 rx_packets_2048_to_4095_bytes_hi;
858 u32 rx_packets_4096_to_8191_bytes_lo;
859 u32 rx_packets_4096_to_8191_bytes_hi;
860 u32 rx_packets_8192_to_9216_bytes_lo;
861 u32 rx_packets_8192_to_9216_bytes_hi;
862};
863
864struct pport_stats_params {
865 u16 pport_num;
866 u8 rsvd;
867 u8 reset_stats;
868};
869
870struct lancer_cmd_req_pport_stats {
871 struct be_cmd_req_hdr hdr;
872 union {
873 struct pport_stats_params params;
874 u8 rsvd[sizeof(struct lancer_pport_stats)];
875 } cmd_params;
876};
877
878struct lancer_cmd_resp_pport_stats {
879 struct be_cmd_resp_hdr hdr;
880 struct lancer_pport_stats pport_stats;
881};
882
883static inline struct lancer_pport_stats*
884 pport_stats_from_cmd(struct be_adapter *adapter)
885{
886 struct lancer_cmd_resp_pport_stats *cmd = adapter->stats_cmd.va;
887 return &cmd->pport_stats;
888}
889
890struct be_cmd_req_get_cntl_addnl_attribs {
891 struct be_cmd_req_hdr hdr;
892 u8 rsvd[8];
893};
894
895struct be_cmd_resp_get_cntl_addnl_attribs {
896 struct be_cmd_resp_hdr hdr;
897 u16 ipl_file_number;
898 u8 ipl_file_version;
899 u8 rsvd0;
900 u8 on_die_temperature; /* in degrees centigrade*/
901 u8 rsvd1[3];
902};
903
904struct be_cmd_req_vlan_config {
905 struct be_cmd_req_hdr hdr;
906 u8 interface_id;
907 u8 promiscuous;
908 u8 untagged;
909 u8 num_vlan;
910 u16 normal_vlan[64];
911} __packed;
912
913/******************* RX FILTER ******************************/
914#define BE_MAX_MC 64 /* set mcast promisc if > 64 */
915struct macaddr {
916 u8 byte[ETH_ALEN];
917};
918
919struct be_cmd_req_rx_filter {
920 struct be_cmd_req_hdr hdr;
921 u32 global_flags_mask;
922 u32 global_flags;
923 u32 if_flags_mask;
924 u32 if_flags;
925 u32 if_id;
926 u32 mcast_num;
927 struct macaddr mcast_mac[BE_MAX_MC];
928};
929
930/******************** Link Status Query *******************/
931struct be_cmd_req_link_status {
932 struct be_cmd_req_hdr hdr;
933 u32 rsvd;
934};
935
936enum {
937 PHY_LINK_DUPLEX_NONE = 0x0,
938 PHY_LINK_DUPLEX_HALF = 0x1,
939 PHY_LINK_DUPLEX_FULL = 0x2
940};
941
942enum {
943 PHY_LINK_SPEED_ZERO = 0x0, /* => No link */
944 PHY_LINK_SPEED_10MBPS = 0x1,
945 PHY_LINK_SPEED_100MBPS = 0x2,
946 PHY_LINK_SPEED_1GBPS = 0x3,
947 PHY_LINK_SPEED_10GBPS = 0x4
948};
949
950struct be_cmd_resp_link_status {
951 struct be_cmd_resp_hdr hdr;
952 u8 physical_port;
953 u8 mac_duplex;
954 u8 mac_speed;
955 u8 mac_fault;
956 u8 mgmt_mac_duplex;
957 u8 mgmt_mac_speed;
958 u16 link_speed;
959 u32 rsvd0;
960} __packed;
961
962/******************** Port Identification ***************************/
963/* Identifies the type of port attached to NIC */
964struct be_cmd_req_port_type {
965 struct be_cmd_req_hdr hdr;
966 u32 page_num;
967 u32 port;
968};
969
970enum {
971 TR_PAGE_A0 = 0xa0,
972 TR_PAGE_A2 = 0xa2
973};
974
975struct be_cmd_resp_port_type {
976 struct be_cmd_resp_hdr hdr;
977 u32 page_num;
978 u32 port;
979 struct data {
980 u8 identifier;
981 u8 identifier_ext;
982 u8 connector;
983 u8 transceiver[8];
984 u8 rsvd0[3];
985 u8 length_km;
986 u8 length_hm;
987 u8 length_om1;
988 u8 length_om2;
989 u8 length_cu;
990 u8 length_cu_m;
991 u8 vendor_name[16];
992 u8 rsvd;
993 u8 vendor_oui[3];
994 u8 vendor_pn[16];
995 u8 vendor_rev[4];
996 } data;
997};
998
999/******************** Get FW Version *******************/
1000struct be_cmd_req_get_fw_version {
1001 struct be_cmd_req_hdr hdr;
1002 u8 rsvd0[FW_VER_LEN];
1003 u8 rsvd1[FW_VER_LEN];
1004} __packed;
1005
1006struct be_cmd_resp_get_fw_version {
1007 struct be_cmd_resp_hdr hdr;
1008 u8 firmware_version_string[FW_VER_LEN];
1009 u8 fw_on_flash_version_string[FW_VER_LEN];
1010} __packed;
1011
1012/******************** Set Flow Contrl *******************/
1013struct be_cmd_req_set_flow_control {
1014 struct be_cmd_req_hdr hdr;
1015 u16 tx_flow_control;
1016 u16 rx_flow_control;
1017} __packed;
1018
1019/******************** Get Flow Contrl *******************/
1020struct be_cmd_req_get_flow_control {
1021 struct be_cmd_req_hdr hdr;
1022 u32 rsvd;
1023};
1024
1025struct be_cmd_resp_get_flow_control {
1026 struct be_cmd_resp_hdr hdr;
1027 u16 tx_flow_control;
1028 u16 rx_flow_control;
1029} __packed;
1030
1031/******************** Modify EQ Delay *******************/
1032struct be_cmd_req_modify_eq_delay {
1033 struct be_cmd_req_hdr hdr;
1034 u32 num_eq;
1035 struct {
1036 u32 eq_id;
1037 u32 phase;
1038 u32 delay_multiplier;
1039 } delay[8];
1040} __packed;
1041
1042struct be_cmd_resp_modify_eq_delay {
1043 struct be_cmd_resp_hdr hdr;
1044 u32 rsvd0;
1045} __packed;
1046
1047/******************** Get FW Config *******************/
1048#define BE_FUNCTION_CAPS_RSS 0x2
1049struct be_cmd_req_query_fw_cfg {
1050 struct be_cmd_req_hdr hdr;
1051 u32 rsvd[31];
1052};
1053
1054struct be_cmd_resp_query_fw_cfg {
1055 struct be_cmd_resp_hdr hdr;
1056 u32 be_config_number;
1057 u32 asic_revision;
1058 u32 phys_port;
1059 u32 function_mode;
1060 u32 rsvd[26];
1061 u32 function_caps;
1062};
1063
1064/******************** RSS Config *******************/
1065/* RSS types */
1066#define RSS_ENABLE_NONE 0x0
1067#define RSS_ENABLE_IPV4 0x1
1068#define RSS_ENABLE_TCP_IPV4 0x2
1069#define RSS_ENABLE_IPV6 0x4
1070#define RSS_ENABLE_TCP_IPV6 0x8
1071
1072struct be_cmd_req_rss_config {
1073 struct be_cmd_req_hdr hdr;
1074 u32 if_id;
1075 u16 enable_rss;
1076 u16 cpu_table_size_log2;
1077 u32 hash[10];
1078 u8 cpu_table[128];
1079 u8 flush;
1080 u8 rsvd0[3];
1081};
1082
1083/******************** Port Beacon ***************************/
1084
1085#define BEACON_STATE_ENABLED 0x1
1086#define BEACON_STATE_DISABLED 0x0
1087
1088struct be_cmd_req_enable_disable_beacon {
1089 struct be_cmd_req_hdr hdr;
1090 u8 port_num;
1091 u8 beacon_state;
1092 u8 beacon_duration;
1093 u8 status_duration;
1094} __packed;
1095
1096struct be_cmd_resp_enable_disable_beacon {
1097 struct be_cmd_resp_hdr resp_hdr;
1098 u32 rsvd0;
1099} __packed;
1100
1101struct be_cmd_req_get_beacon_state {
1102 struct be_cmd_req_hdr hdr;
1103 u8 port_num;
1104 u8 rsvd0;
1105 u16 rsvd1;
1106} __packed;
1107
1108struct be_cmd_resp_get_beacon_state {
1109 struct be_cmd_resp_hdr resp_hdr;
1110 u8 beacon_state;
1111 u8 rsvd0[3];
1112} __packed;
1113
1114/****************** Firmware Flash ******************/
1115struct flashrom_params {
1116 u32 op_code;
1117 u32 op_type;
1118 u32 data_buf_size;
1119 u32 offset;
1120 u8 data_buf[4];
1121};
1122
1123struct be_cmd_write_flashrom {
1124 struct be_cmd_req_hdr hdr;
1125 struct flashrom_params params;
1126};
1127
1128/**************** Lancer Firmware Flash ************/
1129struct amap_lancer_write_obj_context {
1130 u8 write_length[24];
1131 u8 reserved1[7];
1132 u8 eof;
1133} __packed;
1134
1135struct lancer_cmd_req_write_object {
1136 struct be_cmd_req_hdr hdr;
1137 u8 context[sizeof(struct amap_lancer_write_obj_context) / 8];
1138 u32 write_offset;
1139 u8 object_name[104];
1140 u32 descriptor_count;
1141 u32 buf_len;
1142 u32 addr_low;
1143 u32 addr_high;
1144};
1145
1146struct lancer_cmd_resp_write_object {
1147 u8 opcode;
1148 u8 subsystem;
1149 u8 rsvd1[2];
1150 u8 status;
1151 u8 additional_status;
1152 u8 rsvd2[2];
1153 u32 resp_len;
1154 u32 actual_resp_len;
1155 u32 actual_write_len;
1156};
1157
1158/************************ WOL *******************************/
1159struct be_cmd_req_acpi_wol_magic_config{
1160 struct be_cmd_req_hdr hdr;
1161 u32 rsvd0[145];
1162 u8 magic_mac[6];
1163 u8 rsvd2[2];
1164} __packed;
1165
1166/********************** LoopBack test *********************/
1167struct be_cmd_req_loopback_test {
1168 struct be_cmd_req_hdr hdr;
1169 u32 loopback_type;
1170 u32 num_pkts;
1171 u64 pattern;
1172 u32 src_port;
1173 u32 dest_port;
1174 u32 pkt_size;
1175};
1176
1177struct be_cmd_resp_loopback_test {
1178 struct be_cmd_resp_hdr resp_hdr;
1179 u32 status;
1180 u32 num_txfer;
1181 u32 num_rx;
1182 u32 miscomp_off;
1183 u32 ticks_compl;
1184};
1185
1186struct be_cmd_req_set_lmode {
1187 struct be_cmd_req_hdr hdr;
1188 u8 src_port;
1189 u8 dest_port;
1190 u8 loopback_type;
1191 u8 loopback_state;
1192};
1193
1194struct be_cmd_resp_set_lmode {
1195 struct be_cmd_resp_hdr resp_hdr;
1196 u8 rsvd0[4];
1197};
1198
1199/********************** DDR DMA test *********************/
1200struct be_cmd_req_ddrdma_test {
1201 struct be_cmd_req_hdr hdr;
1202 u64 pattern;
1203 u32 byte_count;
1204 u32 rsvd0;
1205 u8 snd_buff[4096];
1206 u8 rsvd1[4096];
1207};
1208
1209struct be_cmd_resp_ddrdma_test {
1210 struct be_cmd_resp_hdr hdr;
1211 u64 pattern;
1212 u32 byte_cnt;
1213 u32 snd_err;
1214 u8 rsvd0[4096];
1215 u8 rcv_buff[4096];
1216};
1217
1218/*********************** SEEPROM Read ***********************/
1219
1220#define BE_READ_SEEPROM_LEN 1024
1221struct be_cmd_req_seeprom_read {
1222 struct be_cmd_req_hdr hdr;
1223 u8 rsvd0[BE_READ_SEEPROM_LEN];
1224};
1225
1226struct be_cmd_resp_seeprom_read {
1227 struct be_cmd_req_hdr hdr;
1228 u8 seeprom_data[BE_READ_SEEPROM_LEN];
1229};
1230
1231enum {
1232 PHY_TYPE_CX4_10GB = 0,
1233 PHY_TYPE_XFP_10GB,
1234 PHY_TYPE_SFP_1GB,
1235 PHY_TYPE_SFP_PLUS_10GB,
1236 PHY_TYPE_KR_10GB,
1237 PHY_TYPE_KX4_10GB,
1238 PHY_TYPE_BASET_10GB,
1239 PHY_TYPE_BASET_1GB,
1240 PHY_TYPE_DISABLED = 255
1241};
1242
1243struct be_cmd_req_get_phy_info {
1244 struct be_cmd_req_hdr hdr;
1245 u8 rsvd0[24];
1246};
1247
1248struct be_phy_info {
1249 u16 phy_type;
1250 u16 interface_type;
1251 u32 misc_params;
1252 u32 future_use[4];
1253};
1254
1255struct be_cmd_resp_get_phy_info {
1256 struct be_cmd_req_hdr hdr;
1257 struct be_phy_info phy_info;
1258};
1259
1260/*********************** Set QOS ***********************/
1261
1262#define BE_QOS_BITS_NIC 1
1263
1264struct be_cmd_req_set_qos {
1265 struct be_cmd_req_hdr hdr;
1266 u32 valid_bits;
1267 u32 max_bps_nic;
1268 u32 rsvd[7];
1269};
1270
1271struct be_cmd_resp_set_qos {
1272 struct be_cmd_resp_hdr hdr;
1273 u32 rsvd;
1274};
1275
1276/*********************** Controller Attributes ***********************/
1277struct be_cmd_req_cntl_attribs {
1278 struct be_cmd_req_hdr hdr;
1279};
1280
1281struct be_cmd_resp_cntl_attribs {
1282 struct be_cmd_resp_hdr hdr;
1283 struct mgmt_controller_attrib attribs;
1284};
1285
1286/*********************** Set driver function ***********************/
1287#define CAPABILITY_SW_TIMESTAMPS 2
1288#define CAPABILITY_BE3_NATIVE_ERX_API 4
1289
1290struct be_cmd_req_set_func_cap {
1291 struct be_cmd_req_hdr hdr;
1292 u32 valid_cap_flags;
1293 u32 cap_flags;
1294 u8 rsvd[212];
1295};
1296
1297struct be_cmd_resp_set_func_cap {
1298 struct be_cmd_resp_hdr hdr;
1299 u32 valid_cap_flags;
1300 u32 cap_flags;
1301 u8 rsvd[212];
1302};
1303
1304/*************** HW Stats Get v1 **********************************/
1305#define BE_TXP_SW_SZ 48
1306struct be_port_rxf_stats_v1 {
1307 u32 rsvd0[12];
1308 u32 rx_crc_errors;
1309 u32 rx_alignment_symbol_errors;
1310 u32 rx_pause_frames;
1311 u32 rx_priority_pause_frames;
1312 u32 rx_control_frames;
1313 u32 rx_in_range_errors;
1314 u32 rx_out_range_errors;
1315 u32 rx_frame_too_long;
1316 u32 rx_address_match_errors;
1317 u32 rx_dropped_too_small;
1318 u32 rx_dropped_too_short;
1319 u32 rx_dropped_header_too_small;
1320 u32 rx_dropped_tcp_length;
1321 u32 rx_dropped_runt;
1322 u32 rsvd1[10];
1323 u32 rx_ip_checksum_errs;
1324 u32 rx_tcp_checksum_errs;
1325 u32 rx_udp_checksum_errs;
1326 u32 rsvd2[7];
1327 u32 rx_switched_unicast_packets;
1328 u32 rx_switched_multicast_packets;
1329 u32 rx_switched_broadcast_packets;
1330 u32 rsvd3[3];
1331 u32 tx_pauseframes;
1332 u32 tx_priority_pauseframes;
1333 u32 tx_controlframes;
1334 u32 rsvd4[10];
1335 u32 rxpp_fifo_overflow_drop;
1336 u32 rx_input_fifo_overflow_drop;
1337 u32 pmem_fifo_overflow_drop;
1338 u32 jabber_events;
1339 u32 rsvd5[3];
1340};
1341
1342
1343struct be_rxf_stats_v1 {
1344 struct be_port_rxf_stats_v1 port[4];
1345 u32 rsvd0[2];
1346 u32 rx_drops_no_pbuf;
1347 u32 rx_drops_no_txpb;
1348 u32 rx_drops_no_erx_descr;
1349 u32 rx_drops_no_tpre_descr;
1350 u32 rsvd1[6];
1351 u32 rx_drops_too_many_frags;
1352 u32 rx_drops_invalid_ring;
1353 u32 forwarded_packets;
1354 u32 rx_drops_mtu;
1355 u32 rsvd2[14];
1356};
1357
1358struct be_erx_stats_v1 {
1359 u32 rx_drops_no_fragments[68]; /* dwordS 0 to 67*/
1360 u32 rsvd[4];
1361};
1362
1363struct be_hw_stats_v1 {
1364 struct be_rxf_stats_v1 rxf;
1365 u32 rsvd0[BE_TXP_SW_SZ];
1366 struct be_erx_stats_v1 erx;
1367 struct be_pmem_stats pmem;
1368 u32 rsvd1[3];
1369};
1370
1371struct be_cmd_req_get_stats_v1 {
1372 struct be_cmd_req_hdr hdr;
1373 u8 rsvd[sizeof(struct be_hw_stats_v1)];
1374};
1375
1376struct be_cmd_resp_get_stats_v1 {
1377 struct be_cmd_resp_hdr hdr;
1378 struct be_hw_stats_v1 hw_stats;
1379};
1380
1381static inline void *hw_stats_from_cmd(struct be_adapter *adapter)
1382{
1383 if (adapter->generation == BE_GEN3) {
1384 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
1385
1386 return &cmd->hw_stats;
1387 } else {
1388 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
1389
1390 return &cmd->hw_stats;
1391 }
1392}
1393
1394static inline void *be_erx_stats_from_cmd(struct be_adapter *adapter)
1395{
1396 if (adapter->generation == BE_GEN3) {
1397 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
1398
1399 return &hw_stats->erx;
1400 } else {
1401 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
1402
1403 return &hw_stats->erx;
1404 }
1405}
1406
1407extern int be_pci_fnum_get(struct be_adapter *adapter);
1408extern int be_cmd_POST(struct be_adapter *adapter);
1409extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
1410 u8 type, bool permanent, u32 if_handle);
1411extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
1412 u32 if_id, u32 *pmac_id, u32 domain);
1413extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id,
1414 u32 pmac_id, u32 domain);
1415extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
1416 u32 en_flags, u8 *mac, bool pmac_invalid,
1417 u32 *if_handle, u32 *pmac_id, u32 domain);
1418extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle,
1419 u32 domain);
1420extern int be_cmd_eq_create(struct be_adapter *adapter,
1421 struct be_queue_info *eq, int eq_delay);
1422extern int be_cmd_cq_create(struct be_adapter *adapter,
1423 struct be_queue_info *cq, struct be_queue_info *eq,
1424 bool sol_evts, bool no_delay,
1425 int num_cqe_dma_coalesce);
1426extern int be_cmd_mccq_create(struct be_adapter *adapter,
1427 struct be_queue_info *mccq,
1428 struct be_queue_info *cq);
1429extern int be_cmd_txq_create(struct be_adapter *adapter,
1430 struct be_queue_info *txq,
1431 struct be_queue_info *cq);
1432extern int be_cmd_rxq_create(struct be_adapter *adapter,
1433 struct be_queue_info *rxq, u16 cq_id,
1434 u16 frag_size, u16 max_frame_size, u32 if_id,
1435 u32 rss, u8 *rss_id);
1436extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1437 int type);
1438extern int be_cmd_rxq_destroy(struct be_adapter *adapter,
1439 struct be_queue_info *q);
1440extern int be_cmd_link_status_query(struct be_adapter *adapter,
1441 u8 *mac_speed, u16 *link_speed, u32 dom);
1442extern int be_cmd_reset(struct be_adapter *adapter);
1443extern int be_cmd_get_stats(struct be_adapter *adapter,
1444 struct be_dma_mem *nonemb_cmd);
1445extern int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1446 struct be_dma_mem *nonemb_cmd);
1447extern int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver);
1448
1449extern int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd);
1450extern int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id,
1451 u16 *vtag_array, u32 num, bool untagged,
1452 bool promiscuous);
1453extern int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
1454extern int be_cmd_set_flow_control(struct be_adapter *adapter,
1455 u32 tx_fc, u32 rx_fc);
1456extern int be_cmd_get_flow_control(struct be_adapter *adapter,
1457 u32 *tx_fc, u32 *rx_fc);
1458extern int be_cmd_query_fw_cfg(struct be_adapter *adapter,
1459 u32 *port_num, u32 *function_mode, u32 *function_caps);
1460extern int be_cmd_reset_function(struct be_adapter *adapter);
1461extern int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
1462 u16 table_size);
1463extern int be_process_mcc(struct be_adapter *adapter, int *status);
1464extern int be_cmd_set_beacon_state(struct be_adapter *adapter,
1465 u8 port_num, u8 beacon, u8 status, u8 state);
1466extern int be_cmd_get_beacon_state(struct be_adapter *adapter,
1467 u8 port_num, u32 *state);
1468extern int be_cmd_write_flashrom(struct be_adapter *adapter,
1469 struct be_dma_mem *cmd, u32 flash_oper,
1470 u32 flash_opcode, u32 buf_size);
1471extern int lancer_cmd_write_object(struct be_adapter *adapter,
1472 struct be_dma_mem *cmd,
1473 u32 data_size, u32 data_offset,
1474 const char *obj_name,
1475 u32 *data_written, u8 *addn_status);
1476int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
1477 int offset);
1478extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
1479 struct be_dma_mem *nonemb_cmd);
1480extern int be_cmd_fw_init(struct be_adapter *adapter);
1481extern int be_cmd_fw_clean(struct be_adapter *adapter);
1482extern void be_async_mcc_enable(struct be_adapter *adapter);
1483extern void be_async_mcc_disable(struct be_adapter *adapter);
1484extern int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
1485 u32 loopback_type, u32 pkt_size,
1486 u32 num_pkts, u64 pattern);
1487extern int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
1488 u32 byte_cnt, struct be_dma_mem *cmd);
1489extern int be_cmd_get_seeprom_data(struct be_adapter *adapter,
1490 struct be_dma_mem *nonemb_cmd);
1491extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
1492 u8 loopback_type, u8 enable);
1493extern int be_cmd_get_phy_info(struct be_adapter *adapter,
1494 struct be_phy_info *phy_info);
1495extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
1496extern void be_detect_dump_ue(struct be_adapter *adapter);
1497extern int be_cmd_get_die_temperature(struct be_adapter *adapter);
1498extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
1499extern int be_cmd_req_native_mode(struct be_adapter *adapter);
1500extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
1501extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
1502
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
new file mode 100644
index 000000000000..f144a6f99862
--- /dev/null
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -0,0 +1,714 @@
1/*
2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@emulex.com
12 *
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
16 */
17
18#include "be.h"
19#include "be_cmds.h"
20#include <linux/ethtool.h>
21
22struct be_ethtool_stat {
23 char desc[ETH_GSTRING_LEN];
24 int type;
25 int size;
26 int offset;
27};
28
29enum {DRVSTAT_TX, DRVSTAT_RX, DRVSTAT};
30#define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \
31 offsetof(_struct, field)
32#define DRVSTAT_TX_INFO(field) #field, DRVSTAT_TX,\
33 FIELDINFO(struct be_tx_stats, field)
34#define DRVSTAT_RX_INFO(field) #field, DRVSTAT_RX,\
35 FIELDINFO(struct be_rx_stats, field)
36#define DRVSTAT_INFO(field) #field, DRVSTAT,\
37 FIELDINFO(struct be_drv_stats, field)
38
39static const struct be_ethtool_stat et_stats[] = {
40 {DRVSTAT_INFO(tx_events)},
41 {DRVSTAT_INFO(rx_crc_errors)},
42 {DRVSTAT_INFO(rx_alignment_symbol_errors)},
43 {DRVSTAT_INFO(rx_pause_frames)},
44 {DRVSTAT_INFO(rx_control_frames)},
45 {DRVSTAT_INFO(rx_in_range_errors)},
46 {DRVSTAT_INFO(rx_out_range_errors)},
47 {DRVSTAT_INFO(rx_frame_too_long)},
48 {DRVSTAT_INFO(rx_address_match_errors)},
49 {DRVSTAT_INFO(rx_dropped_too_small)},
50 {DRVSTAT_INFO(rx_dropped_too_short)},
51 {DRVSTAT_INFO(rx_dropped_header_too_small)},
52 {DRVSTAT_INFO(rx_dropped_tcp_length)},
53 {DRVSTAT_INFO(rx_dropped_runt)},
54 {DRVSTAT_INFO(rxpp_fifo_overflow_drop)},
55 {DRVSTAT_INFO(rx_input_fifo_overflow_drop)},
56 {DRVSTAT_INFO(rx_ip_checksum_errs)},
57 {DRVSTAT_INFO(rx_tcp_checksum_errs)},
58 {DRVSTAT_INFO(rx_udp_checksum_errs)},
59 {DRVSTAT_INFO(tx_pauseframes)},
60 {DRVSTAT_INFO(tx_controlframes)},
61 {DRVSTAT_INFO(rx_priority_pause_frames)},
62 {DRVSTAT_INFO(pmem_fifo_overflow_drop)},
63 {DRVSTAT_INFO(jabber_events)},
64 {DRVSTAT_INFO(rx_drops_no_pbuf)},
65 {DRVSTAT_INFO(rx_drops_no_txpb)},
66 {DRVSTAT_INFO(rx_drops_no_erx_descr)},
67 {DRVSTAT_INFO(rx_drops_no_tpre_descr)},
68 {DRVSTAT_INFO(rx_drops_too_many_frags)},
69 {DRVSTAT_INFO(rx_drops_invalid_ring)},
70 {DRVSTAT_INFO(forwarded_packets)},
71 {DRVSTAT_INFO(rx_drops_mtu)},
72 {DRVSTAT_INFO(eth_red_drops)},
73 {DRVSTAT_INFO(be_on_die_temperature)}
74};
75#define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
76
77/* Stats related to multi RX queues: get_stats routine assumes bytes, pkts
78 * are first and second members respectively.
79 */
80static const struct be_ethtool_stat et_rx_stats[] = {
81 {DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */
82 {DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */
83 {DRVSTAT_RX_INFO(rx_polls)},
84 {DRVSTAT_RX_INFO(rx_events)},
85 {DRVSTAT_RX_INFO(rx_compl)},
86 {DRVSTAT_RX_INFO(rx_mcast_pkts)},
87 {DRVSTAT_RX_INFO(rx_post_fail)},
88 {DRVSTAT_RX_INFO(rx_drops_no_skbs)},
89 {DRVSTAT_RX_INFO(rx_drops_no_frags)}
90};
91#define ETHTOOL_RXSTATS_NUM (ARRAY_SIZE(et_rx_stats))
92
93/* Stats related to multi TX queues: get_stats routine assumes compl is the
94 * first member
95 */
96static const struct be_ethtool_stat et_tx_stats[] = {
97 {DRVSTAT_TX_INFO(tx_compl)}, /* If moving this member see above note */
98 {DRVSTAT_TX_INFO(tx_bytes)},
99 {DRVSTAT_TX_INFO(tx_pkts)},
100 {DRVSTAT_TX_INFO(tx_reqs)},
101 {DRVSTAT_TX_INFO(tx_wrbs)},
102 {DRVSTAT_TX_INFO(tx_compl)},
103 {DRVSTAT_TX_INFO(tx_stops)}
104};
105#define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats))
106
107static const char et_self_tests[][ETH_GSTRING_LEN] = {
108 "MAC Loopback test",
109 "PHY Loopback test",
110 "External Loopback test",
111 "DDR DMA test",
112 "Link test"
113};
114
115#define ETHTOOL_TESTS_NUM ARRAY_SIZE(et_self_tests)
116#define BE_MAC_LOOPBACK 0x0
117#define BE_PHY_LOOPBACK 0x1
118#define BE_ONE_PORT_EXT_LOOPBACK 0x2
119#define BE_NO_LOOPBACK 0xff
120
121static void
122be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
123{
124 struct be_adapter *adapter = netdev_priv(netdev);
125
126 strcpy(drvinfo->driver, DRV_NAME);
127 strcpy(drvinfo->version, DRV_VER);
128 strncpy(drvinfo->fw_version, adapter->fw_ver, FW_VER_LEN);
129 strcpy(drvinfo->bus_info, pci_name(adapter->pdev));
130 drvinfo->testinfo_len = 0;
131 drvinfo->regdump_len = 0;
132 drvinfo->eedump_len = 0;
133}
134
135static int
136be_get_reg_len(struct net_device *netdev)
137{
138 struct be_adapter *adapter = netdev_priv(netdev);
139 u32 log_size = 0;
140
141 if (be_physfn(adapter))
142 be_cmd_get_reg_len(adapter, &log_size);
143
144 return log_size;
145}
146
147static void
148be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
149{
150 struct be_adapter *adapter = netdev_priv(netdev);
151
152 if (be_physfn(adapter)) {
153 memset(buf, 0, regs->len);
154 be_cmd_get_regs(adapter, regs->len, buf);
155 }
156}
157
158static int
159be_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
160{
161 struct be_adapter *adapter = netdev_priv(netdev);
162 struct be_eq_obj *rx_eq = &adapter->rx_obj[0].rx_eq;
163 struct be_eq_obj *tx_eq = &adapter->tx_eq;
164
165 coalesce->rx_coalesce_usecs = rx_eq->cur_eqd;
166 coalesce->rx_coalesce_usecs_high = rx_eq->max_eqd;
167 coalesce->rx_coalesce_usecs_low = rx_eq->min_eqd;
168
169 coalesce->tx_coalesce_usecs = tx_eq->cur_eqd;
170 coalesce->tx_coalesce_usecs_high = tx_eq->max_eqd;
171 coalesce->tx_coalesce_usecs_low = tx_eq->min_eqd;
172
173 coalesce->use_adaptive_rx_coalesce = rx_eq->enable_aic;
174 coalesce->use_adaptive_tx_coalesce = tx_eq->enable_aic;
175
176 return 0;
177}
178
179/*
180 * This routine is used to set interrup coalescing delay
181 */
182static int
183be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
184{
185 struct be_adapter *adapter = netdev_priv(netdev);
186 struct be_rx_obj *rxo;
187 struct be_eq_obj *rx_eq;
188 struct be_eq_obj *tx_eq = &adapter->tx_eq;
189 u32 rx_max, rx_min, rx_cur;
190 int status = 0, i;
191 u32 tx_cur;
192
193 if (coalesce->use_adaptive_tx_coalesce == 1)
194 return -EINVAL;
195
196 for_all_rx_queues(adapter, rxo, i) {
197 rx_eq = &rxo->rx_eq;
198
199 if (!rx_eq->enable_aic && coalesce->use_adaptive_rx_coalesce)
200 rx_eq->cur_eqd = 0;
201 rx_eq->enable_aic = coalesce->use_adaptive_rx_coalesce;
202
203 rx_max = coalesce->rx_coalesce_usecs_high;
204 rx_min = coalesce->rx_coalesce_usecs_low;
205 rx_cur = coalesce->rx_coalesce_usecs;
206
207 if (rx_eq->enable_aic) {
208 if (rx_max > BE_MAX_EQD)
209 rx_max = BE_MAX_EQD;
210 if (rx_min > rx_max)
211 rx_min = rx_max;
212 rx_eq->max_eqd = rx_max;
213 rx_eq->min_eqd = rx_min;
214 if (rx_eq->cur_eqd > rx_max)
215 rx_eq->cur_eqd = rx_max;
216 if (rx_eq->cur_eqd < rx_min)
217 rx_eq->cur_eqd = rx_min;
218 } else {
219 if (rx_cur > BE_MAX_EQD)
220 rx_cur = BE_MAX_EQD;
221 if (rx_eq->cur_eqd != rx_cur) {
222 status = be_cmd_modify_eqd(adapter, rx_eq->q.id,
223 rx_cur);
224 if (!status)
225 rx_eq->cur_eqd = rx_cur;
226 }
227 }
228 }
229
230 tx_cur = coalesce->tx_coalesce_usecs;
231
232 if (tx_cur > BE_MAX_EQD)
233 tx_cur = BE_MAX_EQD;
234 if (tx_eq->cur_eqd != tx_cur) {
235 status = be_cmd_modify_eqd(adapter, tx_eq->q.id, tx_cur);
236 if (!status)
237 tx_eq->cur_eqd = tx_cur;
238 }
239
240 return 0;
241}
242
243static void
244be_get_ethtool_stats(struct net_device *netdev,
245 struct ethtool_stats *stats, uint64_t *data)
246{
247 struct be_adapter *adapter = netdev_priv(netdev);
248 struct be_rx_obj *rxo;
249 struct be_tx_obj *txo;
250 void *p;
251 unsigned int i, j, base = 0, start;
252
253 for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
254 p = (u8 *)&adapter->drv_stats + et_stats[i].offset;
255 data[i] = *(u32 *)p;
256 }
257 base += ETHTOOL_STATS_NUM;
258
259 for_all_rx_queues(adapter, rxo, j) {
260 struct be_rx_stats *stats = rx_stats(rxo);
261
262 do {
263 start = u64_stats_fetch_begin_bh(&stats->sync);
264 data[base] = stats->rx_bytes;
265 data[base + 1] = stats->rx_pkts;
266 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
267
268 for (i = 2; i < ETHTOOL_RXSTATS_NUM; i++) {
269 p = (u8 *)stats + et_rx_stats[i].offset;
270 data[base + i] = *(u32 *)p;
271 }
272 base += ETHTOOL_RXSTATS_NUM;
273 }
274
275 for_all_tx_queues(adapter, txo, j) {
276 struct be_tx_stats *stats = tx_stats(txo);
277
278 do {
279 start = u64_stats_fetch_begin_bh(&stats->sync_compl);
280 data[base] = stats->tx_compl;
281 } while (u64_stats_fetch_retry_bh(&stats->sync_compl, start));
282
283 do {
284 start = u64_stats_fetch_begin_bh(&stats->sync);
285 for (i = 1; i < ETHTOOL_TXSTATS_NUM; i++) {
286 p = (u8 *)stats + et_tx_stats[i].offset;
287 data[base + i] =
288 (et_tx_stats[i].size == sizeof(u64)) ?
289 *(u64 *)p : *(u32 *)p;
290 }
291 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
292 base += ETHTOOL_TXSTATS_NUM;
293 }
294}
295
296static void
297be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
298 uint8_t *data)
299{
300 struct be_adapter *adapter = netdev_priv(netdev);
301 int i, j;
302
303 switch (stringset) {
304 case ETH_SS_STATS:
305 for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
306 memcpy(data, et_stats[i].desc, ETH_GSTRING_LEN);
307 data += ETH_GSTRING_LEN;
308 }
309 for (i = 0; i < adapter->num_rx_qs; i++) {
310 for (j = 0; j < ETHTOOL_RXSTATS_NUM; j++) {
311 sprintf(data, "rxq%d: %s", i,
312 et_rx_stats[j].desc);
313 data += ETH_GSTRING_LEN;
314 }
315 }
316 for (i = 0; i < adapter->num_tx_qs; i++) {
317 for (j = 0; j < ETHTOOL_TXSTATS_NUM; j++) {
318 sprintf(data, "txq%d: %s", i,
319 et_tx_stats[j].desc);
320 data += ETH_GSTRING_LEN;
321 }
322 }
323 break;
324 case ETH_SS_TEST:
325 for (i = 0; i < ETHTOOL_TESTS_NUM; i++) {
326 memcpy(data, et_self_tests[i], ETH_GSTRING_LEN);
327 data += ETH_GSTRING_LEN;
328 }
329 break;
330 }
331}
332
333static int be_get_sset_count(struct net_device *netdev, int stringset)
334{
335 struct be_adapter *adapter = netdev_priv(netdev);
336
337 switch (stringset) {
338 case ETH_SS_TEST:
339 return ETHTOOL_TESTS_NUM;
340 case ETH_SS_STATS:
341 return ETHTOOL_STATS_NUM +
342 adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM +
343 adapter->num_tx_qs * ETHTOOL_TXSTATS_NUM;
344 default:
345 return -EINVAL;
346 }
347}
348
349static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
350{
351 struct be_adapter *adapter = netdev_priv(netdev);
352 struct be_phy_info phy_info;
353 u8 mac_speed = 0;
354 u16 link_speed = 0;
355 int status;
356
357 if ((adapter->link_speed < 0) || (!(netdev->flags & IFF_UP))) {
358 status = be_cmd_link_status_query(adapter, &mac_speed,
359 &link_speed, 0);
360
361 /* link_speed is in units of 10 Mbps */
362 if (link_speed) {
363 ethtool_cmd_speed_set(ecmd, link_speed*10);
364 } else {
365 switch (mac_speed) {
366 case PHY_LINK_SPEED_10MBPS:
367 ethtool_cmd_speed_set(ecmd, SPEED_10);
368 break;
369 case PHY_LINK_SPEED_100MBPS:
370 ethtool_cmd_speed_set(ecmd, SPEED_100);
371 break;
372 case PHY_LINK_SPEED_1GBPS:
373 ethtool_cmd_speed_set(ecmd, SPEED_1000);
374 break;
375 case PHY_LINK_SPEED_10GBPS:
376 ethtool_cmd_speed_set(ecmd, SPEED_10000);
377 break;
378 case PHY_LINK_SPEED_ZERO:
379 ethtool_cmd_speed_set(ecmd, 0);
380 break;
381 }
382 }
383
384 status = be_cmd_get_phy_info(adapter, &phy_info);
385 if (!status) {
386 switch (phy_info.interface_type) {
387 case PHY_TYPE_XFP_10GB:
388 case PHY_TYPE_SFP_1GB:
389 case PHY_TYPE_SFP_PLUS_10GB:
390 ecmd->port = PORT_FIBRE;
391 break;
392 default:
393 ecmd->port = PORT_TP;
394 break;
395 }
396
397 switch (phy_info.interface_type) {
398 case PHY_TYPE_KR_10GB:
399 case PHY_TYPE_KX4_10GB:
400 ecmd->autoneg = AUTONEG_ENABLE;
401 ecmd->transceiver = XCVR_INTERNAL;
402 break;
403 default:
404 ecmd->autoneg = AUTONEG_DISABLE;
405 ecmd->transceiver = XCVR_EXTERNAL;
406 break;
407 }
408 }
409
410 /* Save for future use */
411 adapter->link_speed = ethtool_cmd_speed(ecmd);
412 adapter->port_type = ecmd->port;
413 adapter->transceiver = ecmd->transceiver;
414 adapter->autoneg = ecmd->autoneg;
415 } else {
416 ethtool_cmd_speed_set(ecmd, adapter->link_speed);
417 ecmd->port = adapter->port_type;
418 ecmd->transceiver = adapter->transceiver;
419 ecmd->autoneg = adapter->autoneg;
420 }
421
422 ecmd->duplex = DUPLEX_FULL;
423 ecmd->phy_address = adapter->port_num;
424 switch (ecmd->port) {
425 case PORT_FIBRE:
426 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
427 break;
428 case PORT_TP:
429 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP);
430 break;
431 case PORT_AUI:
432 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_AUI);
433 break;
434 }
435
436 if (ecmd->autoneg) {
437 ecmd->supported |= SUPPORTED_1000baseT_Full;
438 ecmd->supported |= SUPPORTED_Autoneg;
439 ecmd->advertising |= (ADVERTISED_10000baseT_Full |
440 ADVERTISED_1000baseT_Full);
441 }
442
443 return 0;
444}
445
446static void
447be_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
448{
449 struct be_adapter *adapter = netdev_priv(netdev);
450
451 ring->rx_max_pending = adapter->rx_obj[0].q.len;
452 ring->tx_max_pending = adapter->tx_obj[0].q.len;
453
454 ring->rx_pending = atomic_read(&adapter->rx_obj[0].q.used);
455 ring->tx_pending = atomic_read(&adapter->tx_obj[0].q.used);
456}
457
458static void
459be_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
460{
461 struct be_adapter *adapter = netdev_priv(netdev);
462
463 be_cmd_get_flow_control(adapter, &ecmd->tx_pause, &ecmd->rx_pause);
464 ecmd->autoneg = 0;
465}
466
467static int
468be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
469{
470 struct be_adapter *adapter = netdev_priv(netdev);
471 int status;
472
473 if (ecmd->autoneg != 0)
474 return -EINVAL;
475 adapter->tx_fc = ecmd->tx_pause;
476 adapter->rx_fc = ecmd->rx_pause;
477
478 status = be_cmd_set_flow_control(adapter,
479 adapter->tx_fc, adapter->rx_fc);
480 if (status)
481 dev_warn(&adapter->pdev->dev, "Pause param set failed.\n");
482
483 return status;
484}
485
486static int
487be_set_phys_id(struct net_device *netdev,
488 enum ethtool_phys_id_state state)
489{
490 struct be_adapter *adapter = netdev_priv(netdev);
491
492 switch (state) {
493 case ETHTOOL_ID_ACTIVE:
494 be_cmd_get_beacon_state(adapter, adapter->hba_port_num,
495 &adapter->beacon_state);
496 return 1; /* cycle on/off once per second */
497
498 case ETHTOOL_ID_ON:
499 be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
500 BEACON_STATE_ENABLED);
501 break;
502
503 case ETHTOOL_ID_OFF:
504 be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
505 BEACON_STATE_DISABLED);
506 break;
507
508 case ETHTOOL_ID_INACTIVE:
509 be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
510 adapter->beacon_state);
511 }
512
513 return 0;
514}
515
516static bool
517be_is_wol_supported(struct be_adapter *adapter)
518{
519 if (!be_physfn(adapter))
520 return false;
521 else
522 return true;
523}
524
525static void
526be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
527{
528 struct be_adapter *adapter = netdev_priv(netdev);
529
530 if (be_is_wol_supported(adapter))
531 wol->supported = WAKE_MAGIC;
532
533 if (adapter->wol)
534 wol->wolopts = WAKE_MAGIC;
535 else
536 wol->wolopts = 0;
537 memset(&wol->sopass, 0, sizeof(wol->sopass));
538}
539
540static int
541be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
542{
543 struct be_adapter *adapter = netdev_priv(netdev);
544
545 if (wol->wolopts & ~WAKE_MAGIC)
546 return -EINVAL;
547
548 if ((wol->wolopts & WAKE_MAGIC) && be_is_wol_supported(adapter))
549 adapter->wol = true;
550 else
551 adapter->wol = false;
552
553 return 0;
554}
555
556static int
557be_test_ddr_dma(struct be_adapter *adapter)
558{
559 int ret, i;
560 struct be_dma_mem ddrdma_cmd;
561 static const u64 pattern[2] = {
562 0x5a5a5a5a5a5a5a5aULL, 0xa5a5a5a5a5a5a5a5ULL
563 };
564
565 ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
566 ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size,
567 &ddrdma_cmd.dma, GFP_KERNEL);
568 if (!ddrdma_cmd.va) {
569 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
570 return -ENOMEM;
571 }
572
573 for (i = 0; i < 2; i++) {
574 ret = be_cmd_ddr_dma_test(adapter, pattern[i],
575 4096, &ddrdma_cmd);
576 if (ret != 0)
577 goto err;
578 }
579
580err:
581 dma_free_coherent(&adapter->pdev->dev, ddrdma_cmd.size, ddrdma_cmd.va,
582 ddrdma_cmd.dma);
583 return ret;
584}
585
586static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
587 u64 *status)
588{
589 be_cmd_set_loopback(adapter, adapter->hba_port_num,
590 loopback_type, 1);
591 *status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
592 loopback_type, 1500,
593 2, 0xabc);
594 be_cmd_set_loopback(adapter, adapter->hba_port_num,
595 BE_NO_LOOPBACK, 1);
596 return *status;
597}
598
599static void
600be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
601{
602 struct be_adapter *adapter = netdev_priv(netdev);
603 u8 mac_speed = 0;
604 u16 qos_link_speed = 0;
605
606 memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
607
608 if (test->flags & ETH_TEST_FL_OFFLINE) {
609 if (be_loopback_test(adapter, BE_MAC_LOOPBACK,
610 &data[0]) != 0) {
611 test->flags |= ETH_TEST_FL_FAILED;
612 }
613 if (be_loopback_test(adapter, BE_PHY_LOOPBACK,
614 &data[1]) != 0) {
615 test->flags |= ETH_TEST_FL_FAILED;
616 }
617 if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK,
618 &data[2]) != 0) {
619 test->flags |= ETH_TEST_FL_FAILED;
620 }
621 }
622
623 if (be_test_ddr_dma(adapter) != 0) {
624 data[3] = 1;
625 test->flags |= ETH_TEST_FL_FAILED;
626 }
627
628 if (be_cmd_link_status_query(adapter, &mac_speed,
629 &qos_link_speed, 0) != 0) {
630 test->flags |= ETH_TEST_FL_FAILED;
631 data[4] = -1;
632 } else if (!mac_speed) {
633 test->flags |= ETH_TEST_FL_FAILED;
634 data[4] = 1;
635 }
636}
637
638static int
639be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
640{
641 struct be_adapter *adapter = netdev_priv(netdev);
642 char file_name[ETHTOOL_FLASH_MAX_FILENAME];
643
644 file_name[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0;
645 strcpy(file_name, efl->data);
646
647 return be_load_fw(adapter, file_name);
648}
649
650static int
651be_get_eeprom_len(struct net_device *netdev)
652{
653 return BE_READ_SEEPROM_LEN;
654}
655
656static int
657be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
658 uint8_t *data)
659{
660 struct be_adapter *adapter = netdev_priv(netdev);
661 struct be_dma_mem eeprom_cmd;
662 struct be_cmd_resp_seeprom_read *resp;
663 int status;
664
665 if (!eeprom->len)
666 return -EINVAL;
667
668 eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);
669
670 memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
671 eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
672 eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size,
673 &eeprom_cmd.dma, GFP_KERNEL);
674
675 if (!eeprom_cmd.va) {
676 dev_err(&adapter->pdev->dev,
677 "Memory allocation failure. Could not read eeprom\n");
678 return -ENOMEM;
679 }
680
681 status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd);
682
683 if (!status) {
684 resp = eeprom_cmd.va;
685 memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len);
686 }
687 dma_free_coherent(&adapter->pdev->dev, eeprom_cmd.size, eeprom_cmd.va,
688 eeprom_cmd.dma);
689
690 return status;
691}
692
693const struct ethtool_ops be_ethtool_ops = {
694 .get_settings = be_get_settings,
695 .get_drvinfo = be_get_drvinfo,
696 .get_wol = be_get_wol,
697 .set_wol = be_set_wol,
698 .get_link = ethtool_op_get_link,
699 .get_eeprom_len = be_get_eeprom_len,
700 .get_eeprom = be_read_eeprom,
701 .get_coalesce = be_get_coalesce,
702 .set_coalesce = be_set_coalesce,
703 .get_ringparam = be_get_ringparam,
704 .get_pauseparam = be_get_pauseparam,
705 .set_pauseparam = be_set_pauseparam,
706 .get_strings = be_get_stat_strings,
707 .set_phys_id = be_set_phys_id,
708 .get_sset_count = be_get_sset_count,
709 .get_ethtool_stats = be_get_ethtool_stats,
710 .get_regs_len = be_get_reg_len,
711 .get_regs = be_get_regs,
712 .flash_device = be_do_flash,
713 .self_test = be_self_test,
714};
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
new file mode 100644
index 000000000000..fbc8a915519e
--- /dev/null
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -0,0 +1,510 @@
1/*
2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@emulex.com
12 *
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
16 */
17
18/********* Mailbox door bell *************/
19/* Used for driver communication with the FW.
20 * The software must write this register twice to post any command. First,
21 * it writes the register with hi=1 and the upper bits of the physical address
22 * for the MAILBOX structure. Software must poll the ready bit until this
23 * is acknowledged. Then, sotware writes the register with hi=0 with the lower
24 * bits in the address. It must poll the ready bit until the command is
25 * complete. Upon completion, the MAILBOX will contain a valid completion
26 * queue entry.
27 */
28#define MPU_MAILBOX_DB_OFFSET 0x160
29#define MPU_MAILBOX_DB_RDY_MASK 0x1 /* bit 0 */
30#define MPU_MAILBOX_DB_HI_MASK 0x2 /* bit 1 */
31
32#define MPU_EP_CONTROL 0
33
34/********** MPU semphore ******************/
35#define MPU_EP_SEMAPHORE_OFFSET 0xac
36#define MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET 0x400
37#define EP_SEMAPHORE_POST_STAGE_MASK 0x0000FFFF
38#define EP_SEMAPHORE_POST_ERR_MASK 0x1
39#define EP_SEMAPHORE_POST_ERR_SHIFT 31
40
41/* MPU semphore POST stage values */
42#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */
43#define POST_STAGE_HOST_RDY 0x2 /* Host has given go-ahed to FW */
44#define POST_STAGE_BE_RESET 0x3 /* Host wants to reset chip */
45#define POST_STAGE_ARMFW_RDY 0xc000 /* FW is done with POST */
46
47
48/* Lancer SLIPORT_CONTROL SLIPORT_STATUS registers */
49#define SLIPORT_STATUS_OFFSET 0x404
50#define SLIPORT_CONTROL_OFFSET 0x408
51
52#define SLIPORT_STATUS_ERR_MASK 0x80000000
53#define SLIPORT_STATUS_RN_MASK 0x01000000
54#define SLIPORT_STATUS_RDY_MASK 0x00800000
55
56
57#define SLI_PORT_CONTROL_IP_MASK 0x08000000
58
59/********* Memory BAR register ************/
60#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
61/* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt
62 * Disable" may still globally block interrupts in addition to individual
63 * interrupt masks; a mechanism for the device driver to block all interrupts
64 * atomically without having to arbitrate for the PCI Interrupt Disable bit
65 * with the OS.
66 */
67#define MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK (1 << 29) /* bit 29 */
68
69/********* Power management (WOL) **********/
70#define PCICFG_PM_CONTROL_OFFSET 0x44
71#define PCICFG_PM_CONTROL_MASK 0x108 /* bits 3 & 8 */
72
73/********* Online Control Registers *******/
74#define PCICFG_ONLINE0 0xB0
75#define PCICFG_ONLINE1 0xB4
76
77/********* UE Status and Mask Registers ***/
78#define PCICFG_UE_STATUS_LOW 0xA0
79#define PCICFG_UE_STATUS_HIGH 0xA4
80#define PCICFG_UE_STATUS_LOW_MASK 0xA8
81#define PCICFG_UE_STATUS_HI_MASK 0xAC
82
83/******** SLI_INTF ***********************/
84#define SLI_INTF_REG_OFFSET 0x58
85#define SLI_INTF_VALID_MASK 0xE0000000
86#define SLI_INTF_VALID 0xC0000000
87#define SLI_INTF_HINT2_MASK 0x1F000000
88#define SLI_INTF_HINT2_SHIFT 24
89#define SLI_INTF_HINT1_MASK 0x00FF0000
90#define SLI_INTF_HINT1_SHIFT 16
91#define SLI_INTF_FAMILY_MASK 0x00000F00
92#define SLI_INTF_FAMILY_SHIFT 8
93#define SLI_INTF_IF_TYPE_MASK 0x0000F000
94#define SLI_INTF_IF_TYPE_SHIFT 12
95#define SLI_INTF_REV_MASK 0x000000F0
96#define SLI_INTF_REV_SHIFT 4
97#define SLI_INTF_FT_MASK 0x00000001
98
99
100/* SLI family */
101#define BE_SLI_FAMILY 0x0
102#define LANCER_A0_SLI_FAMILY 0xA
103
104
105/********* ISR0 Register offset **********/
106#define CEV_ISR0_OFFSET 0xC18
107#define CEV_ISR_SIZE 4
108
109/********* Event Q door bell *************/
110#define DB_EQ_OFFSET DB_CQ_OFFSET
111#define DB_EQ_RING_ID_MASK 0x1FF /* bits 0 - 8 */
112#define DB_EQ_RING_ID_EXT_MASK 0x3e00 /* bits 9-13 */
113#define DB_EQ_RING_ID_EXT_MASK_SHIFT (2) /* qid bits 9-13 placing at 11-15 */
114
115/* Clear the interrupt for this eq */
116#define DB_EQ_CLR_SHIFT (9) /* bit 9 */
117/* Must be 1 */
118#define DB_EQ_EVNT_SHIFT (10) /* bit 10 */
119/* Number of event entries processed */
120#define DB_EQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
121/* Rearm bit */
122#define DB_EQ_REARM_SHIFT (29) /* bit 29 */
123
124/********* Compl Q door bell *************/
125#define DB_CQ_OFFSET 0x120
126#define DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
127#define DB_CQ_RING_ID_EXT_MASK 0x7C00 /* bits 10-14 */
128#define DB_CQ_RING_ID_EXT_MASK_SHIFT (1) /* qid bits 10-14
129 placing at 11-15 */
130
131/* Number of event entries processed */
132#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
133/* Rearm bit */
134#define DB_CQ_REARM_SHIFT (29) /* bit 29 */
135
136/********** TX ULP door bell *************/
137#define DB_TXULP1_OFFSET 0x60
138#define DB_TXULP_RING_ID_MASK 0x7FF /* bits 0 - 10 */
139/* Number of tx entries posted */
140#define DB_TXULP_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */
141#define DB_TXULP_NUM_POSTED_MASK 0x3FFF /* bits 16 - 29 */
142
143/********** RQ(erx) door bell ************/
144#define DB_RQ_OFFSET 0x100
145#define DB_RQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
146/* Number of rx frags posted */
147#define DB_RQ_NUM_POSTED_SHIFT (24) /* bits 24 - 31 */
148
149/********** MCC door bell ************/
150#define DB_MCCQ_OFFSET 0x140
151#define DB_MCCQ_RING_ID_MASK 0x7FF /* bits 0 - 10 */
152/* Number of entries posted */
153#define DB_MCCQ_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */
154
155/********** SRIOV VF PCICFG OFFSET ********/
156#define SRIOV_VF_PCICFG_OFFSET (4096)
157
158/********** FAT TABLE ********/
159#define RETRIEVE_FAT 0
160#define QUERY_FAT 1
161
162/* Flashrom related descriptors */
163#define IMAGE_TYPE_FIRMWARE 160
164#define IMAGE_TYPE_BOOTCODE 224
165#define IMAGE_TYPE_OPTIONROM 32
166
167#define NUM_FLASHDIR_ENTRIES 32
168
169#define IMG_TYPE_ISCSI_ACTIVE 0
170#define IMG_TYPE_REDBOOT 1
171#define IMG_TYPE_BIOS 2
172#define IMG_TYPE_PXE_BIOS 3
173#define IMG_TYPE_FCOE_BIOS 8
174#define IMG_TYPE_ISCSI_BACKUP 9
175#define IMG_TYPE_FCOE_FW_ACTIVE 10
176#define IMG_TYPE_FCOE_FW_BACKUP 11
177#define IMG_TYPE_NCSI_FW 13
178#define IMG_TYPE_PHY_FW 99
179#define TN_8022 13
180
181#define ILLEGAL_IOCTL_REQ 2
182#define FLASHROM_OPER_PHY_FLASH 9
183#define FLASHROM_OPER_PHY_SAVE 10
184#define FLASHROM_OPER_FLASH 1
185#define FLASHROM_OPER_SAVE 2
186#define FLASHROM_OPER_REPORT 4
187
188#define FLASH_IMAGE_MAX_SIZE_g2 (1310720) /* Max firmware image size */
189#define FLASH_BIOS_IMAGE_MAX_SIZE_g2 (262144) /* Max OPTION ROM image sz */
190#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g2 (262144) /* Max Redboot image sz */
191#define FLASH_IMAGE_MAX_SIZE_g3 (2097152) /* Max firmware image size */
192#define FLASH_BIOS_IMAGE_MAX_SIZE_g3 (524288) /* Max OPTION ROM image sz */
193#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g3 (1048576) /* Max Redboot image sz */
194#define FLASH_NCSI_IMAGE_MAX_SIZE_g3 (262144)
195#define FLASH_PHY_FW_IMAGE_MAX_SIZE_g3 262144
196
197#define FLASH_NCSI_MAGIC (0x16032009)
198#define FLASH_NCSI_DISABLED (0)
199#define FLASH_NCSI_ENABLED (1)
200
201#define FLASH_NCSI_BITFILE_HDR_OFFSET (0x600000)
202
203/* Offsets for components on Flash. */
204#define FLASH_iSCSI_PRIMARY_IMAGE_START_g2 (1048576)
205#define FLASH_iSCSI_BACKUP_IMAGE_START_g2 (2359296)
206#define FLASH_FCoE_PRIMARY_IMAGE_START_g2 (3670016)
207#define FLASH_FCoE_BACKUP_IMAGE_START_g2 (4980736)
208#define FLASH_iSCSI_BIOS_START_g2 (7340032)
209#define FLASH_PXE_BIOS_START_g2 (7864320)
210#define FLASH_FCoE_BIOS_START_g2 (524288)
211#define FLASH_REDBOOT_START_g2 (0)
212
213#define FLASH_NCSI_START_g3 (15990784)
214#define FLASH_iSCSI_PRIMARY_IMAGE_START_g3 (2097152)
215#define FLASH_iSCSI_BACKUP_IMAGE_START_g3 (4194304)
216#define FLASH_FCoE_PRIMARY_IMAGE_START_g3 (6291456)
217#define FLASH_FCoE_BACKUP_IMAGE_START_g3 (8388608)
218#define FLASH_iSCSI_BIOS_START_g3 (12582912)
219#define FLASH_PXE_BIOS_START_g3 (13107200)
220#define FLASH_FCoE_BIOS_START_g3 (13631488)
221#define FLASH_REDBOOT_START_g3 (262144)
222#define FLASH_PHY_FW_START_g3 1310720
223
224/************* Rx Packet Type Encoding **************/
225#define BE_UNICAST_PACKET 0
226#define BE_MULTICAST_PACKET 1
227#define BE_BROADCAST_PACKET 2
228#define BE_RSVD_PACKET 3
229
230/*
231 * BE descriptors: host memory data structures whose formats
232 * are hardwired in BE silicon.
233 */
234/* Event Queue Descriptor */
235#define EQ_ENTRY_VALID_MASK 0x1 /* bit 0 */
236#define EQ_ENTRY_RES_ID_MASK 0xFFFF /* bits 16 - 31 */
237#define EQ_ENTRY_RES_ID_SHIFT 16
238
239struct be_eq_entry {
240 u32 evt;
241};
242
243/* TX Queue Descriptor */
244#define ETH_WRB_FRAG_LEN_MASK 0xFFFF
245struct be_eth_wrb {
246 u32 frag_pa_hi; /* dword 0 */
247 u32 frag_pa_lo; /* dword 1 */
248 u32 rsvd0; /* dword 2 */
249 u32 frag_len; /* dword 3: bits 0 - 15 */
250} __packed;
251
252/* Pseudo amap definition for eth_hdr_wrb in which each bit of the
253 * actual structure is defined as a byte : used to calculate
254 * offset/shift/mask of each field */
255struct amap_eth_hdr_wrb {
256 u8 rsvd0[32]; /* dword 0 */
257 u8 rsvd1[32]; /* dword 1 */
258 u8 complete; /* dword 2 */
259 u8 event;
260 u8 crc;
261 u8 forward;
262 u8 lso6;
263 u8 mgmt;
264 u8 ipcs;
265 u8 udpcs;
266 u8 tcpcs;
267 u8 lso;
268 u8 vlan;
269 u8 gso[2];
270 u8 num_wrb[5];
271 u8 lso_mss[14];
272 u8 len[16]; /* dword 3 */
273 u8 vlan_tag[16];
274} __packed;
275
276struct be_eth_hdr_wrb {
277 u32 dw[4];
278};
279
280/* TX Compl Queue Descriptor */
281
282/* Pseudo amap definition for eth_tx_compl in which each bit of the
283 * actual structure is defined as a byte: used to calculate
284 * offset/shift/mask of each field */
285struct amap_eth_tx_compl {
286 u8 wrb_index[16]; /* dword 0 */
287 u8 ct[2]; /* dword 0 */
288 u8 port[2]; /* dword 0 */
289 u8 rsvd0[8]; /* dword 0 */
290 u8 status[4]; /* dword 0 */
291 u8 user_bytes[16]; /* dword 1 */
292 u8 nwh_bytes[8]; /* dword 1 */
293 u8 lso; /* dword 1 */
294 u8 cast_enc[2]; /* dword 1 */
295 u8 rsvd1[5]; /* dword 1 */
296 u8 rsvd2[32]; /* dword 2 */
297 u8 pkts[16]; /* dword 3 */
298 u8 ringid[11]; /* dword 3 */
299 u8 hash_val[4]; /* dword 3 */
300 u8 valid; /* dword 3 */
301} __packed;
302
303struct be_eth_tx_compl {
304 u32 dw[4];
305};
306
307/* RX Queue Descriptor */
308struct be_eth_rx_d {
309 u32 fragpa_hi;
310 u32 fragpa_lo;
311};
312
313/* RX Compl Queue Descriptor */
314
315/* Pseudo amap definition for BE2 and BE3 legacy mode eth_rx_compl in which
316 * each bit of the actual structure is defined as a byte: used to calculate
317 * offset/shift/mask of each field */
318struct amap_eth_rx_compl_v0 {
319 u8 vlan_tag[16]; /* dword 0 */
320 u8 pktsize[14]; /* dword 0 */
321 u8 port; /* dword 0 */
322 u8 ip_opt; /* dword 0 */
323 u8 err; /* dword 1 */
324 u8 rsshp; /* dword 1 */
325 u8 ipf; /* dword 1 */
326 u8 tcpf; /* dword 1 */
327 u8 udpf; /* dword 1 */
328 u8 ipcksm; /* dword 1 */
329 u8 l4_cksm; /* dword 1 */
330 u8 ip_version; /* dword 1 */
331 u8 macdst[6]; /* dword 1 */
332 u8 vtp; /* dword 1 */
333 u8 rsvd0; /* dword 1 */
334 u8 fragndx[10]; /* dword 1 */
335 u8 ct[2]; /* dword 1 */
336 u8 sw; /* dword 1 */
337 u8 numfrags[3]; /* dword 1 */
338 u8 rss_flush; /* dword 2 */
339 u8 cast_enc[2]; /* dword 2 */
340 u8 vtm; /* dword 2 */
341 u8 rss_bank; /* dword 2 */
342 u8 rsvd1[23]; /* dword 2 */
343 u8 lro_pkt; /* dword 2 */
344 u8 rsvd2[2]; /* dword 2 */
345 u8 valid; /* dword 2 */
346 u8 rsshash[32]; /* dword 3 */
347} __packed;
348
349/* Pseudo amap definition for BE3 native mode eth_rx_compl in which
350 * each bit of the actual structure is defined as a byte: used to calculate
351 * offset/shift/mask of each field */
352struct amap_eth_rx_compl_v1 {
353 u8 vlan_tag[16]; /* dword 0 */
354 u8 pktsize[14]; /* dword 0 */
355 u8 vtp; /* dword 0 */
356 u8 ip_opt; /* dword 0 */
357 u8 err; /* dword 1 */
358 u8 rsshp; /* dword 1 */
359 u8 ipf; /* dword 1 */
360 u8 tcpf; /* dword 1 */
361 u8 udpf; /* dword 1 */
362 u8 ipcksm; /* dword 1 */
363 u8 l4_cksm; /* dword 1 */
364 u8 ip_version; /* dword 1 */
365 u8 macdst[7]; /* dword 1 */
366 u8 rsvd0; /* dword 1 */
367 u8 fragndx[10]; /* dword 1 */
368 u8 ct[2]; /* dword 1 */
369 u8 sw; /* dword 1 */
370 u8 numfrags[3]; /* dword 1 */
371 u8 rss_flush; /* dword 2 */
372 u8 cast_enc[2]; /* dword 2 */
373 u8 vtm; /* dword 2 */
374 u8 rss_bank; /* dword 2 */
375 u8 port[2]; /* dword 2 */
376 u8 vntagp; /* dword 2 */
377 u8 header_len[8]; /* dword 2 */
378 u8 header_split[2]; /* dword 2 */
379 u8 rsvd1[13]; /* dword 2 */
380 u8 valid; /* dword 2 */
381 u8 rsshash[32]; /* dword 3 */
382} __packed;
383
384struct be_eth_rx_compl {
385 u32 dw[4];
386};
387
388struct mgmt_hba_attribs {
389 u8 flashrom_version_string[32];
390 u8 manufacturer_name[32];
391 u32 supported_modes;
392 u32 rsvd0[3];
393 u8 ncsi_ver_string[12];
394 u32 default_extended_timeout;
395 u8 controller_model_number[32];
396 u8 controller_description[64];
397 u8 controller_serial_number[32];
398 u8 ip_version_string[32];
399 u8 firmware_version_string[32];
400 u8 bios_version_string[32];
401 u8 redboot_version_string[32];
402 u8 driver_version_string[32];
403 u8 fw_on_flash_version_string[32];
404 u32 functionalities_supported;
405 u16 max_cdblength;
406 u8 asic_revision;
407 u8 generational_guid[16];
408 u8 hba_port_count;
409 u16 default_link_down_timeout;
410 u8 iscsi_ver_min_max;
411 u8 multifunction_device;
412 u8 cache_valid;
413 u8 hba_status;
414 u8 max_domains_supported;
415 u8 phy_port;
416 u32 firmware_post_status;
417 u32 hba_mtu[8];
418 u32 rsvd1[4];
419};
420
421struct mgmt_controller_attrib {
422 struct mgmt_hba_attribs hba_attribs;
423 u16 pci_vendor_id;
424 u16 pci_device_id;
425 u16 pci_sub_vendor_id;
426 u16 pci_sub_system_id;
427 u8 pci_bus_number;
428 u8 pci_device_number;
429 u8 pci_function_number;
430 u8 interface_type;
431 u64 unique_identifier;
432 u32 rsvd0[5];
433};
434
435struct controller_id {
436 u32 vendor;
437 u32 device;
438 u32 subvendor;
439 u32 subdevice;
440};
441
442struct flash_comp {
443 unsigned long offset;
444 int optype;
445 int size;
446};
447
448struct image_hdr {
449 u32 imageid;
450 u32 imageoffset;
451 u32 imagelength;
452 u32 image_checksum;
453 u8 image_version[32];
454};
455struct flash_file_hdr_g2 {
456 u8 sign[32];
457 u32 cksum;
458 u32 antidote;
459 struct controller_id cont_id;
460 u32 file_len;
461 u32 chunk_num;
462 u32 total_chunks;
463 u32 num_imgs;
464 u8 build[24];
465};
466
467struct flash_file_hdr_g3 {
468 u8 sign[52];
469 u8 ufi_version[4];
470 u32 file_len;
471 u32 cksum;
472 u32 antidote;
473 u32 num_imgs;
474 u8 build[24];
475 u8 rsvd[32];
476};
477
478struct flash_section_hdr {
479 u32 format_rev;
480 u32 cksum;
481 u32 antidote;
482 u32 build_no;
483 u8 id_string[64];
484 u32 active_entry_mask;
485 u32 valid_entry_mask;
486 u32 org_content_mask;
487 u32 rsvd0;
488 u32 rsvd1;
489 u32 rsvd2;
490 u32 rsvd3;
491 u32 rsvd4;
492};
493
494struct flash_section_entry {
495 u32 type;
496 u32 offset;
497 u32 pad_size;
498 u32 image_size;
499 u32 cksum;
500 u32 entry_point;
501 u32 rsvd0;
502 u32 rsvd1;
503 u8 ver_data[32];
504};
505
506struct flash_section_info {
507 u8 cookie[32];
508 struct flash_section_hdr fsec_hdr;
509 struct flash_section_entry fsec_entry[32];
510};
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
new file mode 100644
index 000000000000..1a3accab3d17
--- /dev/null
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -0,0 +1,3633 @@
1/*
2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@emulex.com
12 *
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
16 */
17
18#include <linux/prefetch.h>
19#include "be.h"
20#include "be_cmds.h"
21#include <asm/div64.h>
22
23MODULE_VERSION(DRV_VER);
24MODULE_DEVICE_TABLE(pci, be_dev_ids);
25MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26MODULE_AUTHOR("ServerEngines Corporation");
27MODULE_LICENSE("GPL");
28
29static ushort rx_frag_size = 2048;
30static unsigned int num_vfs;
31module_param(rx_frag_size, ushort, S_IRUGO);
32module_param(num_vfs, uint, S_IRUGO);
33MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
34MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
35
36static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
37 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
38 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
39 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
40 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
41 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
42 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
43 { 0 }
44};
45MODULE_DEVICE_TABLE(pci, be_dev_ids);
46/* UE Status Low CSR */
47static const char * const ue_status_low_desc[] = {
48 "CEV",
49 "CTX",
50 "DBUF",
51 "ERX",
52 "Host",
53 "MPU",
54 "NDMA",
55 "PTC ",
56 "RDMA ",
57 "RXF ",
58 "RXIPS ",
59 "RXULP0 ",
60 "RXULP1 ",
61 "RXULP2 ",
62 "TIM ",
63 "TPOST ",
64 "TPRE ",
65 "TXIPS ",
66 "TXULP0 ",
67 "TXULP1 ",
68 "UC ",
69 "WDMA ",
70 "TXULP2 ",
71 "HOST1 ",
72 "P0_OB_LINK ",
73 "P1_OB_LINK ",
74 "HOST_GPIO ",
75 "MBOX ",
76 "AXGMAC0",
77 "AXGMAC1",
78 "JTAG",
79 "MPU_INTPEND"
80};
81/* UE Status High CSR */
82static const char * const ue_status_hi_desc[] = {
83 "LPCMEMHOST",
84 "MGMT_MAC",
85 "PCS0ONLINE",
86 "MPU_IRAM",
87 "PCS1ONLINE",
88 "PCTL0",
89 "PCTL1",
90 "PMEM",
91 "RR",
92 "TXPB",
93 "RXPP",
94 "XAUI",
95 "TXP",
96 "ARM",
97 "IPC",
98 "HOST2",
99 "HOST3",
100 "HOST4",
101 "HOST5",
102 "HOST6",
103 "HOST7",
104 "HOST8",
105 "HOST9",
106 "NETC",
107 "Unknown",
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown"
115};
116
117static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
118{
119 struct be_dma_mem *mem = &q->dma_mem;
120 if (mem->va)
121 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
122 mem->dma);
123}
124
125static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
126 u16 len, u16 entry_size)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
129
130 memset(q, 0, sizeof(*q));
131 q->len = len;
132 q->entry_size = entry_size;
133 mem->size = len * entry_size;
134 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
135 GFP_KERNEL);
136 if (!mem->va)
137 return -1;
138 memset(mem->va, 0, mem->size);
139 return 0;
140}
141
142static void be_intr_set(struct be_adapter *adapter, bool enable)
143{
144 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
145 u32 reg = ioread32(addr);
146 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
147
148 if (adapter->eeh_err)
149 return;
150
151 if (!enabled && enable)
152 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
153 else if (enabled && !enable)
154 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
155 else
156 return;
157
158 iowrite32(reg, addr);
159}
160
161static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
162{
163 u32 val = 0;
164 val |= qid & DB_RQ_RING_ID_MASK;
165 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
166
167 wmb();
168 iowrite32(val, adapter->db + DB_RQ_OFFSET);
169}
170
171static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
172{
173 u32 val = 0;
174 val |= qid & DB_TXULP_RING_ID_MASK;
175 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
176
177 wmb();
178 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
179}
180
181static void be_eq_notify(struct be_adapter *adapter, u16 qid,
182 bool arm, bool clear_int, u16 num_popped)
183{
184 u32 val = 0;
185 val |= qid & DB_EQ_RING_ID_MASK;
186 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
187 DB_EQ_RING_ID_EXT_MASK_SHIFT);
188
189 if (adapter->eeh_err)
190 return;
191
192 if (arm)
193 val |= 1 << DB_EQ_REARM_SHIFT;
194 if (clear_int)
195 val |= 1 << DB_EQ_CLR_SHIFT;
196 val |= 1 << DB_EQ_EVNT_SHIFT;
197 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
198 iowrite32(val, adapter->db + DB_EQ_OFFSET);
199}
200
201void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
202{
203 u32 val = 0;
204 val |= qid & DB_CQ_RING_ID_MASK;
205 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
206 DB_CQ_RING_ID_EXT_MASK_SHIFT);
207
208 if (adapter->eeh_err)
209 return;
210
211 if (arm)
212 val |= 1 << DB_CQ_REARM_SHIFT;
213 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
214 iowrite32(val, adapter->db + DB_CQ_OFFSET);
215}
216
217static int be_mac_addr_set(struct net_device *netdev, void *p)
218{
219 struct be_adapter *adapter = netdev_priv(netdev);
220 struct sockaddr *addr = p;
221 int status = 0;
222
223 if (!is_valid_ether_addr(addr->sa_data))
224 return -EADDRNOTAVAIL;
225
226 /* MAC addr configuration will be done in hardware for VFs
227 * by their corresponding PFs. Just copy to netdev addr here
228 */
229 if (!be_physfn(adapter))
230 goto netdev_addr;
231
232 status = be_cmd_pmac_del(adapter, adapter->if_handle,
233 adapter->pmac_id, 0);
234 if (status)
235 return status;
236
237 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
238 adapter->if_handle, &adapter->pmac_id, 0);
239netdev_addr:
240 if (!status)
241 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
242
243 return status;
244}
245
246static void populate_be2_stats(struct be_adapter *adapter)
247{
248 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
249 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
250 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
251 struct be_port_rxf_stats_v0 *port_stats =
252 &rxf_stats->port[adapter->port_num];
253 struct be_drv_stats *drvs = &adapter->drv_stats;
254
255 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
256 drvs->rx_pause_frames = port_stats->rx_pause_frames;
257 drvs->rx_crc_errors = port_stats->rx_crc_errors;
258 drvs->rx_control_frames = port_stats->rx_control_frames;
259 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
260 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
261 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
262 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
263 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
264 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
265 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
266 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
267 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
268 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
269 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
270 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
271 drvs->rx_dropped_header_too_small =
272 port_stats->rx_dropped_header_too_small;
273 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
274 drvs->rx_alignment_symbol_errors =
275 port_stats->rx_alignment_symbol_errors;
276
277 drvs->tx_pauseframes = port_stats->tx_pauseframes;
278 drvs->tx_controlframes = port_stats->tx_controlframes;
279
280 if (adapter->port_num)
281 drvs->jabber_events = rxf_stats->port1_jabber_events;
282 else
283 drvs->jabber_events = rxf_stats->port0_jabber_events;
284 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
285 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
286 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
287 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
288 drvs->forwarded_packets = rxf_stats->forwarded_packets;
289 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
290 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
291 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
292 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
293}
294
295static void populate_be3_stats(struct be_adapter *adapter)
296{
297 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
298 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
299 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
300 struct be_port_rxf_stats_v1 *port_stats =
301 &rxf_stats->port[adapter->port_num];
302 struct be_drv_stats *drvs = &adapter->drv_stats;
303
304 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
305 drvs->rx_pause_frames = port_stats->rx_pause_frames;
306 drvs->rx_crc_errors = port_stats->rx_crc_errors;
307 drvs->rx_control_frames = port_stats->rx_control_frames;
308 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
309 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
310 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
311 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
312 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
313 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
314 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
315 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
316 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
317 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
318 drvs->rx_dropped_header_too_small =
319 port_stats->rx_dropped_header_too_small;
320 drvs->rx_input_fifo_overflow_drop =
321 port_stats->rx_input_fifo_overflow_drop;
322 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
323 drvs->rx_alignment_symbol_errors =
324 port_stats->rx_alignment_symbol_errors;
325 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
326 drvs->tx_pauseframes = port_stats->tx_pauseframes;
327 drvs->tx_controlframes = port_stats->tx_controlframes;
328 drvs->jabber_events = port_stats->jabber_events;
329 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
330 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
331 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
332 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
333 drvs->forwarded_packets = rxf_stats->forwarded_packets;
334 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
335 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
336 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
337 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
338}
339
340static void populate_lancer_stats(struct be_adapter *adapter)
341{
342
343 struct be_drv_stats *drvs = &adapter->drv_stats;
344 struct lancer_pport_stats *pport_stats =
345 pport_stats_from_cmd(adapter);
346
347 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
348 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
349 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
350 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
351 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
352 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
353 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
354 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
355 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
356 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
357 drvs->rx_dropped_tcp_length =
358 pport_stats->rx_dropped_invalid_tcp_length;
359 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
360 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
361 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
362 drvs->rx_dropped_header_too_small =
363 pport_stats->rx_dropped_header_too_small;
364 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
365 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
366 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
367 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
368 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
369 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
370 drvs->jabber_events = pport_stats->rx_jabbers;
371 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
372 drvs->forwarded_packets = pport_stats->num_forwards_lo;
373 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
374 drvs->rx_drops_too_many_frags =
375 pport_stats->rx_drops_too_many_frags_lo;
376}
377
378void be_parse_stats(struct be_adapter *adapter)
379{
380 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
381 struct be_rx_obj *rxo;
382 int i;
383
384 if (adapter->generation == BE_GEN3) {
385 if (lancer_chip(adapter))
386 populate_lancer_stats(adapter);
387 else
388 populate_be3_stats(adapter);
389 } else {
390 populate_be2_stats(adapter);
391 }
392
393 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
394 for_all_rx_queues(adapter, rxo, i)
395 rx_stats(rxo)->rx_drops_no_frags =
396 erx->rx_drops_no_fragments[rxo->q.id];
397}
398
399static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
400 struct rtnl_link_stats64 *stats)
401{
402 struct be_adapter *adapter = netdev_priv(netdev);
403 struct be_drv_stats *drvs = &adapter->drv_stats;
404 struct be_rx_obj *rxo;
405 struct be_tx_obj *txo;
406 u64 pkts, bytes;
407 unsigned int start;
408 int i;
409
410 for_all_rx_queues(adapter, rxo, i) {
411 const struct be_rx_stats *rx_stats = rx_stats(rxo);
412 do {
413 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
414 pkts = rx_stats(rxo)->rx_pkts;
415 bytes = rx_stats(rxo)->rx_bytes;
416 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
417 stats->rx_packets += pkts;
418 stats->rx_bytes += bytes;
419 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
420 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
421 rx_stats(rxo)->rx_drops_no_frags;
422 }
423
424 for_all_tx_queues(adapter, txo, i) {
425 const struct be_tx_stats *tx_stats = tx_stats(txo);
426 do {
427 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
428 pkts = tx_stats(txo)->tx_pkts;
429 bytes = tx_stats(txo)->tx_bytes;
430 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
431 stats->tx_packets += pkts;
432 stats->tx_bytes += bytes;
433 }
434
435 /* bad pkts received */
436 stats->rx_errors = drvs->rx_crc_errors +
437 drvs->rx_alignment_symbol_errors +
438 drvs->rx_in_range_errors +
439 drvs->rx_out_range_errors +
440 drvs->rx_frame_too_long +
441 drvs->rx_dropped_too_small +
442 drvs->rx_dropped_too_short +
443 drvs->rx_dropped_header_too_small +
444 drvs->rx_dropped_tcp_length +
445 drvs->rx_dropped_runt;
446
447 /* detailed rx errors */
448 stats->rx_length_errors = drvs->rx_in_range_errors +
449 drvs->rx_out_range_errors +
450 drvs->rx_frame_too_long;
451
452 stats->rx_crc_errors = drvs->rx_crc_errors;
453
454 /* frame alignment errors */
455 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
456
457 /* receiver fifo overrun */
458 /* drops_no_pbuf is no per i/f, it's per BE card */
459 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
460 drvs->rx_input_fifo_overflow_drop +
461 drvs->rx_drops_no_pbuf;
462 return stats;
463}
464
465void be_link_status_update(struct be_adapter *adapter, u32 link_status)
466{
467 struct net_device *netdev = adapter->netdev;
468
469 /* when link status changes, link speed must be re-queried from card */
470 adapter->link_speed = -1;
471 if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
472 netif_carrier_on(netdev);
473 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
474 } else {
475 netif_carrier_off(netdev);
476 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
477 }
478}
479
480static void be_tx_stats_update(struct be_tx_obj *txo,
481 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
482{
483 struct be_tx_stats *stats = tx_stats(txo);
484
485 u64_stats_update_begin(&stats->sync);
486 stats->tx_reqs++;
487 stats->tx_wrbs += wrb_cnt;
488 stats->tx_bytes += copied;
489 stats->tx_pkts += (gso_segs ? gso_segs : 1);
490 if (stopped)
491 stats->tx_stops++;
492 u64_stats_update_end(&stats->sync);
493}
494
495/* Determine number of WRB entries needed to xmit data in an skb */
496static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
497 bool *dummy)
498{
499 int cnt = (skb->len > skb->data_len);
500
501 cnt += skb_shinfo(skb)->nr_frags;
502
503 /* to account for hdr wrb */
504 cnt++;
505 if (lancer_chip(adapter) || !(cnt & 1)) {
506 *dummy = false;
507 } else {
508 /* add a dummy to make it an even num */
509 cnt++;
510 *dummy = true;
511 }
512 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
513 return cnt;
514}
515
516static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
517{
518 wrb->frag_pa_hi = upper_32_bits(addr);
519 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
520 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
521}
522
523static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
524 struct sk_buff *skb, u32 wrb_cnt, u32 len)
525{
526 u8 vlan_prio = 0;
527 u16 vlan_tag = 0;
528
529 memset(hdr, 0, sizeof(*hdr));
530
531 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
532
533 if (skb_is_gso(skb)) {
534 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
535 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
536 hdr, skb_shinfo(skb)->gso_size);
537 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
538 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
539 if (lancer_chip(adapter) && adapter->sli_family ==
540 LANCER_A0_SLI_FAMILY) {
541 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
542 if (is_tcp_pkt(skb))
543 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
544 tcpcs, hdr, 1);
545 else if (is_udp_pkt(skb))
546 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
547 udpcs, hdr, 1);
548 }
549 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
550 if (is_tcp_pkt(skb))
551 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
552 else if (is_udp_pkt(skb))
553 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
554 }
555
556 if (vlan_tx_tag_present(skb)) {
557 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
558 vlan_tag = vlan_tx_tag_get(skb);
559 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
560 /* If vlan priority provided by OS is NOT in available bmap */
561 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
562 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
563 adapter->recommended_prio;
564 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
565 }
566
567 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
568 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
569 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
570 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
571}
572
573static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
574 bool unmap_single)
575{
576 dma_addr_t dma;
577
578 be_dws_le_to_cpu(wrb, sizeof(*wrb));
579
580 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
581 if (wrb->frag_len) {
582 if (unmap_single)
583 dma_unmap_single(dev, dma, wrb->frag_len,
584 DMA_TO_DEVICE);
585 else
586 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
587 }
588}
589
590static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
591 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
592{
593 dma_addr_t busaddr;
594 int i, copied = 0;
595 struct device *dev = &adapter->pdev->dev;
596 struct sk_buff *first_skb = skb;
597 struct be_eth_wrb *wrb;
598 struct be_eth_hdr_wrb *hdr;
599 bool map_single = false;
600 u16 map_head;
601
602 hdr = queue_head_node(txq);
603 queue_head_inc(txq);
604 map_head = txq->head;
605
606 if (skb->len > skb->data_len) {
607 int len = skb_headlen(skb);
608 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
609 if (dma_mapping_error(dev, busaddr))
610 goto dma_err;
611 map_single = true;
612 wrb = queue_head_node(txq);
613 wrb_fill(wrb, busaddr, len);
614 be_dws_cpu_to_le(wrb, sizeof(*wrb));
615 queue_head_inc(txq);
616 copied += len;
617 }
618
619 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
620 struct skb_frag_struct *frag =
621 &skb_shinfo(skb)->frags[i];
622 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
623 frag->size, DMA_TO_DEVICE);
624 if (dma_mapping_error(dev, busaddr))
625 goto dma_err;
626 wrb = queue_head_node(txq);
627 wrb_fill(wrb, busaddr, frag->size);
628 be_dws_cpu_to_le(wrb, sizeof(*wrb));
629 queue_head_inc(txq);
630 copied += frag->size;
631 }
632
633 if (dummy_wrb) {
634 wrb = queue_head_node(txq);
635 wrb_fill(wrb, 0, 0);
636 be_dws_cpu_to_le(wrb, sizeof(*wrb));
637 queue_head_inc(txq);
638 }
639
640 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
641 be_dws_cpu_to_le(hdr, sizeof(*hdr));
642
643 return copied;
644dma_err:
645 txq->head = map_head;
646 while (copied) {
647 wrb = queue_head_node(txq);
648 unmap_tx_frag(dev, wrb, map_single);
649 map_single = false;
650 copied -= wrb->frag_len;
651 queue_head_inc(txq);
652 }
653 return 0;
654}
655
656static netdev_tx_t be_xmit(struct sk_buff *skb,
657 struct net_device *netdev)
658{
659 struct be_adapter *adapter = netdev_priv(netdev);
660 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
661 struct be_queue_info *txq = &txo->q;
662 u32 wrb_cnt = 0, copied = 0;
663 u32 start = txq->head;
664 bool dummy_wrb, stopped = false;
665
666 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
667
668 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
669 if (copied) {
670 /* record the sent skb in the sent_skb table */
671 BUG_ON(txo->sent_skb_list[start]);
672 txo->sent_skb_list[start] = skb;
673
674 /* Ensure txq has space for the next skb; Else stop the queue
675 * *BEFORE* ringing the tx doorbell, so that we serialze the
676 * tx compls of the current transmit which'll wake up the queue
677 */
678 atomic_add(wrb_cnt, &txq->used);
679 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
680 txq->len) {
681 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
682 stopped = true;
683 }
684
685 be_txq_notify(adapter, txq->id, wrb_cnt);
686
687 be_tx_stats_update(txo, wrb_cnt, copied,
688 skb_shinfo(skb)->gso_segs, stopped);
689 } else {
690 txq->head = start;
691 dev_kfree_skb_any(skb);
692 }
693 return NETDEV_TX_OK;
694}
695
696static int be_change_mtu(struct net_device *netdev, int new_mtu)
697{
698 struct be_adapter *adapter = netdev_priv(netdev);
699 if (new_mtu < BE_MIN_MTU ||
700 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
701 (ETH_HLEN + ETH_FCS_LEN))) {
702 dev_info(&adapter->pdev->dev,
703 "MTU must be between %d and %d bytes\n",
704 BE_MIN_MTU,
705 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
706 return -EINVAL;
707 }
708 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
709 netdev->mtu, new_mtu);
710 netdev->mtu = new_mtu;
711 return 0;
712}
713
714/*
715 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
716 * If the user configures more, place BE in vlan promiscuous mode.
717 */
718static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
719{
720 u16 vtag[BE_NUM_VLANS_SUPPORTED];
721 u16 ntags = 0, i;
722 int status = 0;
723 u32 if_handle;
724
725 if (vf) {
726 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
727 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
728 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
729 }
730
731 /* No need to further configure vids if in promiscuous mode */
732 if (adapter->promiscuous)
733 return 0;
734
735 if (adapter->vlans_added <= adapter->max_vlans) {
736 /* Construct VLAN Table to give to HW */
737 for (i = 0; i < VLAN_N_VID; i++) {
738 if (adapter->vlan_tag[i]) {
739 vtag[ntags] = cpu_to_le16(i);
740 ntags++;
741 }
742 }
743 status = be_cmd_vlan_config(adapter, adapter->if_handle,
744 vtag, ntags, 1, 0);
745 } else {
746 status = be_cmd_vlan_config(adapter, adapter->if_handle,
747 NULL, 0, 1, 1);
748 }
749
750 return status;
751}
752
753static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
754{
755 struct be_adapter *adapter = netdev_priv(netdev);
756
757 adapter->vlans_added++;
758 if (!be_physfn(adapter))
759 return;
760
761 adapter->vlan_tag[vid] = 1;
762 if (adapter->vlans_added <= (adapter->max_vlans + 1))
763 be_vid_config(adapter, false, 0);
764}
765
766static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
767{
768 struct be_adapter *adapter = netdev_priv(netdev);
769
770 adapter->vlans_added--;
771
772 if (!be_physfn(adapter))
773 return;
774
775 adapter->vlan_tag[vid] = 0;
776 if (adapter->vlans_added <= adapter->max_vlans)
777 be_vid_config(adapter, false, 0);
778}
779
780static void be_set_multicast_list(struct net_device *netdev)
781{
782 struct be_adapter *adapter = netdev_priv(netdev);
783
784 if (netdev->flags & IFF_PROMISC) {
785 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
786 adapter->promiscuous = true;
787 goto done;
788 }
789
790 /* BE was previously in promiscuous mode; disable it */
791 if (adapter->promiscuous) {
792 adapter->promiscuous = false;
793 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
794
795 if (adapter->vlans_added)
796 be_vid_config(adapter, false, 0);
797 }
798
799 /* Enable multicast promisc if num configured exceeds what we support */
800 if (netdev->flags & IFF_ALLMULTI ||
801 netdev_mc_count(netdev) > BE_MAX_MC) {
802 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
803 goto done;
804 }
805
806 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
807done:
808 return;
809}
810
811static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
812{
813 struct be_adapter *adapter = netdev_priv(netdev);
814 int status;
815
816 if (!adapter->sriov_enabled)
817 return -EPERM;
818
819 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
820 return -EINVAL;
821
822 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
823 status = be_cmd_pmac_del(adapter,
824 adapter->vf_cfg[vf].vf_if_handle,
825 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
826
827 status = be_cmd_pmac_add(adapter, mac,
828 adapter->vf_cfg[vf].vf_if_handle,
829 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
830
831 if (status)
832 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
833 mac, vf);
834 else
835 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
836
837 return status;
838}
839
840static int be_get_vf_config(struct net_device *netdev, int vf,
841 struct ifla_vf_info *vi)
842{
843 struct be_adapter *adapter = netdev_priv(netdev);
844
845 if (!adapter->sriov_enabled)
846 return -EPERM;
847
848 if (vf >= num_vfs)
849 return -EINVAL;
850
851 vi->vf = vf;
852 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
853 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
854 vi->qos = 0;
855 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
856
857 return 0;
858}
859
860static int be_set_vf_vlan(struct net_device *netdev,
861 int vf, u16 vlan, u8 qos)
862{
863 struct be_adapter *adapter = netdev_priv(netdev);
864 int status = 0;
865
866 if (!adapter->sriov_enabled)
867 return -EPERM;
868
869 if ((vf >= num_vfs) || (vlan > 4095))
870 return -EINVAL;
871
872 if (vlan) {
873 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
874 adapter->vlans_added++;
875 } else {
876 adapter->vf_cfg[vf].vf_vlan_tag = 0;
877 adapter->vlans_added--;
878 }
879
880 status = be_vid_config(adapter, true, vf);
881
882 if (status)
883 dev_info(&adapter->pdev->dev,
884 "VLAN %d config on VF %d failed\n", vlan, vf);
885 return status;
886}
887
888static int be_set_vf_tx_rate(struct net_device *netdev,
889 int vf, int rate)
890{
891 struct be_adapter *adapter = netdev_priv(netdev);
892 int status = 0;
893
894 if (!adapter->sriov_enabled)
895 return -EPERM;
896
897 if ((vf >= num_vfs) || (rate < 0))
898 return -EINVAL;
899
900 if (rate > 10000)
901 rate = 10000;
902
903 adapter->vf_cfg[vf].vf_tx_rate = rate;
904 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
905
906 if (status)
907 dev_info(&adapter->pdev->dev,
908 "tx rate %d on VF %d failed\n", rate, vf);
909 return status;
910}
911
912static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
913{
914 struct be_eq_obj *rx_eq = &rxo->rx_eq;
915 struct be_rx_stats *stats = rx_stats(rxo);
916 ulong now = jiffies;
917 ulong delta = now - stats->rx_jiffies;
918 u64 pkts;
919 unsigned int start, eqd;
920
921 if (!rx_eq->enable_aic)
922 return;
923
924 /* Wrapped around */
925 if (time_before(now, stats->rx_jiffies)) {
926 stats->rx_jiffies = now;
927 return;
928 }
929
930 /* Update once a second */
931 if (delta < HZ)
932 return;
933
934 do {
935 start = u64_stats_fetch_begin_bh(&stats->sync);
936 pkts = stats->rx_pkts;
937 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
938
939 stats->rx_pps = (pkts - stats->rx_pkts_prev) / (delta / HZ);
940 stats->rx_pkts_prev = pkts;
941 stats->rx_jiffies = now;
942 eqd = stats->rx_pps / 110000;
943 eqd = eqd << 3;
944 if (eqd > rx_eq->max_eqd)
945 eqd = rx_eq->max_eqd;
946 if (eqd < rx_eq->min_eqd)
947 eqd = rx_eq->min_eqd;
948 if (eqd < 10)
949 eqd = 0;
950 if (eqd != rx_eq->cur_eqd) {
951 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
952 rx_eq->cur_eqd = eqd;
953 }
954}
955
956static void be_rx_stats_update(struct be_rx_obj *rxo,
957 struct be_rx_compl_info *rxcp)
958{
959 struct be_rx_stats *stats = rx_stats(rxo);
960
961 u64_stats_update_begin(&stats->sync);
962 stats->rx_compl++;
963 stats->rx_bytes += rxcp->pkt_size;
964 stats->rx_pkts++;
965 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
966 stats->rx_mcast_pkts++;
967 if (rxcp->err)
968 stats->rx_compl_err++;
969 u64_stats_update_end(&stats->sync);
970}
971
972static inline bool csum_passed(struct be_rx_compl_info *rxcp)
973{
974 /* L4 checksum is not reliable for non TCP/UDP packets.
975 * Also ignore ipcksm for ipv6 pkts */
976 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
977 (rxcp->ip_csum || rxcp->ipv6);
978}
979
980static struct be_rx_page_info *
981get_rx_page_info(struct be_adapter *adapter,
982 struct be_rx_obj *rxo,
983 u16 frag_idx)
984{
985 struct be_rx_page_info *rx_page_info;
986 struct be_queue_info *rxq = &rxo->q;
987
988 rx_page_info = &rxo->page_info_tbl[frag_idx];
989 BUG_ON(!rx_page_info->page);
990
991 if (rx_page_info->last_page_user) {
992 dma_unmap_page(&adapter->pdev->dev,
993 dma_unmap_addr(rx_page_info, bus),
994 adapter->big_page_size, DMA_FROM_DEVICE);
995 rx_page_info->last_page_user = false;
996 }
997
998 atomic_dec(&rxq->used);
999 return rx_page_info;
1000}
1001
1002/* Throwaway the data in the Rx completion */
1003static void be_rx_compl_discard(struct be_adapter *adapter,
1004 struct be_rx_obj *rxo,
1005 struct be_rx_compl_info *rxcp)
1006{
1007 struct be_queue_info *rxq = &rxo->q;
1008 struct be_rx_page_info *page_info;
1009 u16 i, num_rcvd = rxcp->num_rcvd;
1010
1011 for (i = 0; i < num_rcvd; i++) {
1012 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1013 put_page(page_info->page);
1014 memset(page_info, 0, sizeof(*page_info));
1015 index_inc(&rxcp->rxq_idx, rxq->len);
1016 }
1017}
1018
1019/*
1020 * skb_fill_rx_data forms a complete skb for an ether frame
1021 * indicated by rxcp.
1022 */
1023static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
1024 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
1025{
1026 struct be_queue_info *rxq = &rxo->q;
1027 struct be_rx_page_info *page_info;
1028 u16 i, j;
1029 u16 hdr_len, curr_frag_len, remaining;
1030 u8 *start;
1031
1032 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1033 start = page_address(page_info->page) + page_info->page_offset;
1034 prefetch(start);
1035
1036 /* Copy data in the first descriptor of this completion */
1037 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1038
1039 /* Copy the header portion into skb_data */
1040 hdr_len = min(BE_HDR_LEN, curr_frag_len);
1041 memcpy(skb->data, start, hdr_len);
1042 skb->len = curr_frag_len;
1043 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1044 /* Complete packet has now been moved to data */
1045 put_page(page_info->page);
1046 skb->data_len = 0;
1047 skb->tail += curr_frag_len;
1048 } else {
1049 skb_shinfo(skb)->nr_frags = 1;
1050 skb_shinfo(skb)->frags[0].page = page_info->page;
1051 skb_shinfo(skb)->frags[0].page_offset =
1052 page_info->page_offset + hdr_len;
1053 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
1054 skb->data_len = curr_frag_len - hdr_len;
1055 skb->tail += hdr_len;
1056 }
1057 page_info->page = NULL;
1058
1059 if (rxcp->pkt_size <= rx_frag_size) {
1060 BUG_ON(rxcp->num_rcvd != 1);
1061 return;
1062 }
1063
1064 /* More frags present for this completion */
1065 index_inc(&rxcp->rxq_idx, rxq->len);
1066 remaining = rxcp->pkt_size - curr_frag_len;
1067 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1068 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1069 curr_frag_len = min(remaining, rx_frag_size);
1070
1071 /* Coalesce all frags from the same physical page in one slot */
1072 if (page_info->page_offset == 0) {
1073 /* Fresh page */
1074 j++;
1075 skb_shinfo(skb)->frags[j].page = page_info->page;
1076 skb_shinfo(skb)->frags[j].page_offset =
1077 page_info->page_offset;
1078 skb_shinfo(skb)->frags[j].size = 0;
1079 skb_shinfo(skb)->nr_frags++;
1080 } else {
1081 put_page(page_info->page);
1082 }
1083
1084 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1085 skb->len += curr_frag_len;
1086 skb->data_len += curr_frag_len;
1087
1088 remaining -= curr_frag_len;
1089 index_inc(&rxcp->rxq_idx, rxq->len);
1090 page_info->page = NULL;
1091 }
1092 BUG_ON(j > MAX_SKB_FRAGS);
1093}
1094
1095/* Process the RX completion indicated by rxcp when GRO is disabled */
1096static void be_rx_compl_process(struct be_adapter *adapter,
1097 struct be_rx_obj *rxo,
1098 struct be_rx_compl_info *rxcp)
1099{
1100 struct net_device *netdev = adapter->netdev;
1101 struct sk_buff *skb;
1102
1103 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
1104 if (unlikely(!skb)) {
1105 rx_stats(rxo)->rx_drops_no_skbs++;
1106 be_rx_compl_discard(adapter, rxo, rxcp);
1107 return;
1108 }
1109
1110 skb_fill_rx_data(adapter, rxo, skb, rxcp);
1111
1112 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1113 skb->ip_summed = CHECKSUM_UNNECESSARY;
1114 else
1115 skb_checksum_none_assert(skb);
1116
1117 skb->truesize = skb->len + sizeof(struct sk_buff);
1118 skb->protocol = eth_type_trans(skb, netdev);
1119 if (adapter->netdev->features & NETIF_F_RXHASH)
1120 skb->rxhash = rxcp->rss_hash;
1121
1122
1123 if (unlikely(rxcp->vlanf))
1124 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1125
1126 netif_receive_skb(skb);
1127}
1128
1129/* Process the RX completion indicated by rxcp when GRO is enabled */
1130static void be_rx_compl_process_gro(struct be_adapter *adapter,
1131 struct be_rx_obj *rxo,
1132 struct be_rx_compl_info *rxcp)
1133{
1134 struct be_rx_page_info *page_info;
1135 struct sk_buff *skb = NULL;
1136 struct be_queue_info *rxq = &rxo->q;
1137 struct be_eq_obj *eq_obj = &rxo->rx_eq;
1138 u16 remaining, curr_frag_len;
1139 u16 i, j;
1140
1141 skb = napi_get_frags(&eq_obj->napi);
1142 if (!skb) {
1143 be_rx_compl_discard(adapter, rxo, rxcp);
1144 return;
1145 }
1146
1147 remaining = rxcp->pkt_size;
1148 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1149 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1150
1151 curr_frag_len = min(remaining, rx_frag_size);
1152
1153 /* Coalesce all frags from the same physical page in one slot */
1154 if (i == 0 || page_info->page_offset == 0) {
1155 /* First frag or Fresh page */
1156 j++;
1157 skb_shinfo(skb)->frags[j].page = page_info->page;
1158 skb_shinfo(skb)->frags[j].page_offset =
1159 page_info->page_offset;
1160 skb_shinfo(skb)->frags[j].size = 0;
1161 } else {
1162 put_page(page_info->page);
1163 }
1164 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1165
1166 remaining -= curr_frag_len;
1167 index_inc(&rxcp->rxq_idx, rxq->len);
1168 memset(page_info, 0, sizeof(*page_info));
1169 }
1170 BUG_ON(j > MAX_SKB_FRAGS);
1171
1172 skb_shinfo(skb)->nr_frags = j + 1;
1173 skb->len = rxcp->pkt_size;
1174 skb->data_len = rxcp->pkt_size;
1175 skb->truesize += rxcp->pkt_size;
1176 skb->ip_summed = CHECKSUM_UNNECESSARY;
1177 if (adapter->netdev->features & NETIF_F_RXHASH)
1178 skb->rxhash = rxcp->rss_hash;
1179
1180 if (unlikely(rxcp->vlanf))
1181 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1182
1183 napi_gro_frags(&eq_obj->napi);
1184}
1185
1186static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1187 struct be_eth_rx_compl *compl,
1188 struct be_rx_compl_info *rxcp)
1189{
1190 rxcp->pkt_size =
1191 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1192 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1193 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1194 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1195 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1196 rxcp->ip_csum =
1197 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1198 rxcp->l4_csum =
1199 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1200 rxcp->ipv6 =
1201 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1202 rxcp->rxq_idx =
1203 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1204 rxcp->num_rcvd =
1205 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1206 rxcp->pkt_type =
1207 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1208 rxcp->rss_hash =
1209 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1210 if (rxcp->vlanf) {
1211 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1212 compl);
1213 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1214 compl);
1215 }
1216 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1217}
1218
1219static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1220 struct be_eth_rx_compl *compl,
1221 struct be_rx_compl_info *rxcp)
1222{
1223 rxcp->pkt_size =
1224 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1225 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1226 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1227 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1228 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1229 rxcp->ip_csum =
1230 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1231 rxcp->l4_csum =
1232 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1233 rxcp->ipv6 =
1234 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1235 rxcp->rxq_idx =
1236 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1237 rxcp->num_rcvd =
1238 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1239 rxcp->pkt_type =
1240 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1241 rxcp->rss_hash =
1242 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1243 if (rxcp->vlanf) {
1244 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1245 compl);
1246 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1247 compl);
1248 }
1249 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1250}
1251
1252static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1253{
1254 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1255 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1256 struct be_adapter *adapter = rxo->adapter;
1257
1258 /* For checking the valid bit it is Ok to use either definition as the
1259 * valid bit is at the same position in both v0 and v1 Rx compl */
1260 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1261 return NULL;
1262
1263 rmb();
1264 be_dws_le_to_cpu(compl, sizeof(*compl));
1265
1266 if (adapter->be3_native)
1267 be_parse_rx_compl_v1(adapter, compl, rxcp);
1268 else
1269 be_parse_rx_compl_v0(adapter, compl, rxcp);
1270
1271 if (rxcp->vlanf) {
1272 /* vlanf could be wrongly set in some cards.
1273 * ignore if vtm is not set */
1274 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1275 rxcp->vlanf = 0;
1276
1277 if (!lancer_chip(adapter))
1278 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1279
1280 if (((adapter->pvid & VLAN_VID_MASK) ==
1281 (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1282 !adapter->vlan_tag[rxcp->vlan_tag])
1283 rxcp->vlanf = 0;
1284 }
1285
1286 /* As the compl has been parsed, reset it; we wont touch it again */
1287 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1288
1289 queue_tail_inc(&rxo->cq);
1290 return rxcp;
1291}
1292
1293static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1294{
1295 u32 order = get_order(size);
1296
1297 if (order > 0)
1298 gfp |= __GFP_COMP;
1299 return alloc_pages(gfp, order);
1300}
1301
1302/*
1303 * Allocate a page, split it to fragments of size rx_frag_size and post as
1304 * receive buffers to BE
1305 */
1306static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1307{
1308 struct be_adapter *adapter = rxo->adapter;
1309 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1310 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1311 struct be_queue_info *rxq = &rxo->q;
1312 struct page *pagep = NULL;
1313 struct be_eth_rx_d *rxd;
1314 u64 page_dmaaddr = 0, frag_dmaaddr;
1315 u32 posted, page_offset = 0;
1316
1317 page_info = &rxo->page_info_tbl[rxq->head];
1318 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1319 if (!pagep) {
1320 pagep = be_alloc_pages(adapter->big_page_size, gfp);
1321 if (unlikely(!pagep)) {
1322 rx_stats(rxo)->rx_post_fail++;
1323 break;
1324 }
1325 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1326 0, adapter->big_page_size,
1327 DMA_FROM_DEVICE);
1328 page_info->page_offset = 0;
1329 } else {
1330 get_page(pagep);
1331 page_info->page_offset = page_offset + rx_frag_size;
1332 }
1333 page_offset = page_info->page_offset;
1334 page_info->page = pagep;
1335 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1336 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1337
1338 rxd = queue_head_node(rxq);
1339 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1340 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1341
1342 /* Any space left in the current big page for another frag? */
1343 if ((page_offset + rx_frag_size + rx_frag_size) >
1344 adapter->big_page_size) {
1345 pagep = NULL;
1346 page_info->last_page_user = true;
1347 }
1348
1349 prev_page_info = page_info;
1350 queue_head_inc(rxq);
1351 page_info = &page_info_tbl[rxq->head];
1352 }
1353 if (pagep)
1354 prev_page_info->last_page_user = true;
1355
1356 if (posted) {
1357 atomic_add(posted, &rxq->used);
1358 be_rxq_notify(adapter, rxq->id, posted);
1359 } else if (atomic_read(&rxq->used) == 0) {
1360 /* Let be_worker replenish when memory is available */
1361 rxo->rx_post_starved = true;
1362 }
1363}
1364
1365static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1366{
1367 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1368
1369 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1370 return NULL;
1371
1372 rmb();
1373 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1374
1375 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1376
1377 queue_tail_inc(tx_cq);
1378 return txcp;
1379}
1380
1381static u16 be_tx_compl_process(struct be_adapter *adapter,
1382 struct be_tx_obj *txo, u16 last_index)
1383{
1384 struct be_queue_info *txq = &txo->q;
1385 struct be_eth_wrb *wrb;
1386 struct sk_buff **sent_skbs = txo->sent_skb_list;
1387 struct sk_buff *sent_skb;
1388 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1389 bool unmap_skb_hdr = true;
1390
1391 sent_skb = sent_skbs[txq->tail];
1392 BUG_ON(!sent_skb);
1393 sent_skbs[txq->tail] = NULL;
1394
1395 /* skip header wrb */
1396 queue_tail_inc(txq);
1397
1398 do {
1399 cur_index = txq->tail;
1400 wrb = queue_tail_node(txq);
1401 unmap_tx_frag(&adapter->pdev->dev, wrb,
1402 (unmap_skb_hdr && skb_headlen(sent_skb)));
1403 unmap_skb_hdr = false;
1404
1405 num_wrbs++;
1406 queue_tail_inc(txq);
1407 } while (cur_index != last_index);
1408
1409 kfree_skb(sent_skb);
1410 return num_wrbs;
1411}
1412
1413static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1414{
1415 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1416
1417 if (!eqe->evt)
1418 return NULL;
1419
1420 rmb();
1421 eqe->evt = le32_to_cpu(eqe->evt);
1422 queue_tail_inc(&eq_obj->q);
1423 return eqe;
1424}
1425
1426static int event_handle(struct be_adapter *adapter,
1427 struct be_eq_obj *eq_obj,
1428 bool rearm)
1429{
1430 struct be_eq_entry *eqe;
1431 u16 num = 0;
1432
1433 while ((eqe = event_get(eq_obj)) != NULL) {
1434 eqe->evt = 0;
1435 num++;
1436 }
1437
1438 /* Deal with any spurious interrupts that come
1439 * without events
1440 */
1441 if (!num)
1442 rearm = true;
1443
1444 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
1445 if (num)
1446 napi_schedule(&eq_obj->napi);
1447
1448 return num;
1449}
1450
1451/* Just read and notify events without processing them.
1452 * Used at the time of destroying event queues */
1453static void be_eq_clean(struct be_adapter *adapter,
1454 struct be_eq_obj *eq_obj)
1455{
1456 struct be_eq_entry *eqe;
1457 u16 num = 0;
1458
1459 while ((eqe = event_get(eq_obj)) != NULL) {
1460 eqe->evt = 0;
1461 num++;
1462 }
1463
1464 if (num)
1465 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1466}
1467
1468static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1469{
1470 struct be_rx_page_info *page_info;
1471 struct be_queue_info *rxq = &rxo->q;
1472 struct be_queue_info *rx_cq = &rxo->cq;
1473 struct be_rx_compl_info *rxcp;
1474 u16 tail;
1475
1476 /* First cleanup pending rx completions */
1477 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1478 be_rx_compl_discard(adapter, rxo, rxcp);
1479 be_cq_notify(adapter, rx_cq->id, false, 1);
1480 }
1481
1482 /* Then free posted rx buffer that were not used */
1483 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1484 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1485 page_info = get_rx_page_info(adapter, rxo, tail);
1486 put_page(page_info->page);
1487 memset(page_info, 0, sizeof(*page_info));
1488 }
1489 BUG_ON(atomic_read(&rxq->used));
1490 rxq->tail = rxq->head = 0;
1491}
1492
1493static void be_tx_compl_clean(struct be_adapter *adapter,
1494 struct be_tx_obj *txo)
1495{
1496 struct be_queue_info *tx_cq = &txo->cq;
1497 struct be_queue_info *txq = &txo->q;
1498 struct be_eth_tx_compl *txcp;
1499 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1500 struct sk_buff **sent_skbs = txo->sent_skb_list;
1501 struct sk_buff *sent_skb;
1502 bool dummy_wrb;
1503
1504 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1505 do {
1506 while ((txcp = be_tx_compl_get(tx_cq))) {
1507 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1508 wrb_index, txcp);
1509 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
1510 cmpl++;
1511 }
1512 if (cmpl) {
1513 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1514 atomic_sub(num_wrbs, &txq->used);
1515 cmpl = 0;
1516 num_wrbs = 0;
1517 }
1518
1519 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1520 break;
1521
1522 mdelay(1);
1523 } while (true);
1524
1525 if (atomic_read(&txq->used))
1526 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1527 atomic_read(&txq->used));
1528
1529 /* free posted tx for which compls will never arrive */
1530 while (atomic_read(&txq->used)) {
1531 sent_skb = sent_skbs[txq->tail];
1532 end_idx = txq->tail;
1533 index_adv(&end_idx,
1534 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1535 txq->len);
1536 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1537 atomic_sub(num_wrbs, &txq->used);
1538 }
1539}
1540
1541static void be_mcc_queues_destroy(struct be_adapter *adapter)
1542{
1543 struct be_queue_info *q;
1544
1545 q = &adapter->mcc_obj.q;
1546 if (q->created)
1547 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1548 be_queue_free(adapter, q);
1549
1550 q = &adapter->mcc_obj.cq;
1551 if (q->created)
1552 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1553 be_queue_free(adapter, q);
1554}
1555
1556/* Must be called only after TX qs are created as MCC shares TX EQ */
1557static int be_mcc_queues_create(struct be_adapter *adapter)
1558{
1559 struct be_queue_info *q, *cq;
1560
1561 /* Alloc MCC compl queue */
1562 cq = &adapter->mcc_obj.cq;
1563 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1564 sizeof(struct be_mcc_compl)))
1565 goto err;
1566
1567 /* Ask BE to create MCC compl queue; share TX's eq */
1568 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1569 goto mcc_cq_free;
1570
1571 /* Alloc MCC queue */
1572 q = &adapter->mcc_obj.q;
1573 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1574 goto mcc_cq_destroy;
1575
1576 /* Ask BE to create MCC queue */
1577 if (be_cmd_mccq_create(adapter, q, cq))
1578 goto mcc_q_free;
1579
1580 return 0;
1581
1582mcc_q_free:
1583 be_queue_free(adapter, q);
1584mcc_cq_destroy:
1585 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1586mcc_cq_free:
1587 be_queue_free(adapter, cq);
1588err:
1589 return -1;
1590}
1591
1592static void be_tx_queues_destroy(struct be_adapter *adapter)
1593{
1594 struct be_queue_info *q;
1595 struct be_tx_obj *txo;
1596 u8 i;
1597
1598 for_all_tx_queues(adapter, txo, i) {
1599 q = &txo->q;
1600 if (q->created)
1601 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1602 be_queue_free(adapter, q);
1603
1604 q = &txo->cq;
1605 if (q->created)
1606 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1607 be_queue_free(adapter, q);
1608 }
1609
1610 /* Clear any residual events */
1611 be_eq_clean(adapter, &adapter->tx_eq);
1612
1613 q = &adapter->tx_eq.q;
1614 if (q->created)
1615 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1616 be_queue_free(adapter, q);
1617}
1618
1619/* One TX event queue is shared by all TX compl qs */
1620static int be_tx_queues_create(struct be_adapter *adapter)
1621{
1622 struct be_queue_info *eq, *q, *cq;
1623 struct be_tx_obj *txo;
1624 u8 i;
1625
1626 adapter->tx_eq.max_eqd = 0;
1627 adapter->tx_eq.min_eqd = 0;
1628 adapter->tx_eq.cur_eqd = 96;
1629 adapter->tx_eq.enable_aic = false;
1630
1631 eq = &adapter->tx_eq.q;
1632 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1633 sizeof(struct be_eq_entry)))
1634 return -1;
1635
1636 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1637 goto err;
1638 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1639
1640 for_all_tx_queues(adapter, txo, i) {
1641 cq = &txo->cq;
1642 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1643 sizeof(struct be_eth_tx_compl)))
1644 goto err;
1645
1646 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1647 goto err;
1648
1649 q = &txo->q;
1650 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1651 sizeof(struct be_eth_wrb)))
1652 goto err;
1653
1654 if (be_cmd_txq_create(adapter, q, cq))
1655 goto err;
1656 }
1657 return 0;
1658
1659err:
1660 be_tx_queues_destroy(adapter);
1661 return -1;
1662}
1663
1664static void be_rx_queues_destroy(struct be_adapter *adapter)
1665{
1666 struct be_queue_info *q;
1667 struct be_rx_obj *rxo;
1668 int i;
1669
1670 for_all_rx_queues(adapter, rxo, i) {
1671 be_queue_free(adapter, &rxo->q);
1672
1673 q = &rxo->cq;
1674 if (q->created)
1675 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1676 be_queue_free(adapter, q);
1677
1678 q = &rxo->rx_eq.q;
1679 if (q->created)
1680 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1681 be_queue_free(adapter, q);
1682 }
1683}
1684
1685static u32 be_num_rxqs_want(struct be_adapter *adapter)
1686{
1687 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1688 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1689 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1690 } else {
1691 dev_warn(&adapter->pdev->dev,
1692 "No support for multiple RX queues\n");
1693 return 1;
1694 }
1695}
1696
1697static int be_rx_queues_create(struct be_adapter *adapter)
1698{
1699 struct be_queue_info *eq, *q, *cq;
1700 struct be_rx_obj *rxo;
1701 int rc, i;
1702
1703 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1704 msix_enabled(adapter) ?
1705 adapter->num_msix_vec - 1 : 1);
1706 if (adapter->num_rx_qs != MAX_RX_QS)
1707 dev_warn(&adapter->pdev->dev,
1708 "Can create only %d RX queues", adapter->num_rx_qs);
1709
1710 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1711 for_all_rx_queues(adapter, rxo, i) {
1712 rxo->adapter = adapter;
1713 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1714 rxo->rx_eq.enable_aic = true;
1715
1716 /* EQ */
1717 eq = &rxo->rx_eq.q;
1718 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1719 sizeof(struct be_eq_entry));
1720 if (rc)
1721 goto err;
1722
1723 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1724 if (rc)
1725 goto err;
1726
1727 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1728
1729 /* CQ */
1730 cq = &rxo->cq;
1731 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1732 sizeof(struct be_eth_rx_compl));
1733 if (rc)
1734 goto err;
1735
1736 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1737 if (rc)
1738 goto err;
1739
1740 /* Rx Q - will be created in be_open() */
1741 q = &rxo->q;
1742 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1743 sizeof(struct be_eth_rx_d));
1744 if (rc)
1745 goto err;
1746
1747 }
1748
1749 return 0;
1750err:
1751 be_rx_queues_destroy(adapter);
1752 return -1;
1753}
1754
1755static bool event_peek(struct be_eq_obj *eq_obj)
1756{
1757 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1758 if (!eqe->evt)
1759 return false;
1760 else
1761 return true;
1762}
1763
1764static irqreturn_t be_intx(int irq, void *dev)
1765{
1766 struct be_adapter *adapter = dev;
1767 struct be_rx_obj *rxo;
1768 int isr, i, tx = 0 , rx = 0;
1769
1770 if (lancer_chip(adapter)) {
1771 if (event_peek(&adapter->tx_eq))
1772 tx = event_handle(adapter, &adapter->tx_eq, false);
1773 for_all_rx_queues(adapter, rxo, i) {
1774 if (event_peek(&rxo->rx_eq))
1775 rx |= event_handle(adapter, &rxo->rx_eq, true);
1776 }
1777
1778 if (!(tx || rx))
1779 return IRQ_NONE;
1780
1781 } else {
1782 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1783 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1784 if (!isr)
1785 return IRQ_NONE;
1786
1787 if ((1 << adapter->tx_eq.eq_idx & isr))
1788 event_handle(adapter, &adapter->tx_eq, false);
1789
1790 for_all_rx_queues(adapter, rxo, i) {
1791 if ((1 << rxo->rx_eq.eq_idx & isr))
1792 event_handle(adapter, &rxo->rx_eq, true);
1793 }
1794 }
1795
1796 return IRQ_HANDLED;
1797}
1798
1799static irqreturn_t be_msix_rx(int irq, void *dev)
1800{
1801 struct be_rx_obj *rxo = dev;
1802 struct be_adapter *adapter = rxo->adapter;
1803
1804 event_handle(adapter, &rxo->rx_eq, true);
1805
1806 return IRQ_HANDLED;
1807}
1808
1809static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1810{
1811 struct be_adapter *adapter = dev;
1812
1813 event_handle(adapter, &adapter->tx_eq, false);
1814
1815 return IRQ_HANDLED;
1816}
1817
1818static inline bool do_gro(struct be_rx_compl_info *rxcp)
1819{
1820 return (rxcp->tcpf && !rxcp->err) ? true : false;
1821}
1822
1823static int be_poll_rx(struct napi_struct *napi, int budget)
1824{
1825 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1826 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1827 struct be_adapter *adapter = rxo->adapter;
1828 struct be_queue_info *rx_cq = &rxo->cq;
1829 struct be_rx_compl_info *rxcp;
1830 u32 work_done;
1831
1832 rx_stats(rxo)->rx_polls++;
1833 for (work_done = 0; work_done < budget; work_done++) {
1834 rxcp = be_rx_compl_get(rxo);
1835 if (!rxcp)
1836 break;
1837
1838 /* Is it a flush compl that has no data */
1839 if (unlikely(rxcp->num_rcvd == 0))
1840 goto loop_continue;
1841
1842 /* Discard compl with partial DMA Lancer B0 */
1843 if (unlikely(!rxcp->pkt_size)) {
1844 be_rx_compl_discard(adapter, rxo, rxcp);
1845 goto loop_continue;
1846 }
1847
1848 /* On BE drop pkts that arrive due to imperfect filtering in
1849 * promiscuous mode on some skews
1850 */
1851 if (unlikely(rxcp->port != adapter->port_num &&
1852 !lancer_chip(adapter))) {
1853 be_rx_compl_discard(adapter, rxo, rxcp);
1854 goto loop_continue;
1855 }
1856
1857 if (do_gro(rxcp))
1858 be_rx_compl_process_gro(adapter, rxo, rxcp);
1859 else
1860 be_rx_compl_process(adapter, rxo, rxcp);
1861loop_continue:
1862 be_rx_stats_update(rxo, rxcp);
1863 }
1864
1865 /* Refill the queue */
1866 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1867 be_post_rx_frags(rxo, GFP_ATOMIC);
1868
1869 /* All consumed */
1870 if (work_done < budget) {
1871 napi_complete(napi);
1872 be_cq_notify(adapter, rx_cq->id, true, work_done);
1873 } else {
1874 /* More to be consumed; continue with interrupts disabled */
1875 be_cq_notify(adapter, rx_cq->id, false, work_done);
1876 }
1877 return work_done;
1878}
1879
1880/* As TX and MCC share the same EQ check for both TX and MCC completions.
1881 * For TX/MCC we don't honour budget; consume everything
1882 */
1883static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1884{
1885 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1886 struct be_adapter *adapter =
1887 container_of(tx_eq, struct be_adapter, tx_eq);
1888 struct be_tx_obj *txo;
1889 struct be_eth_tx_compl *txcp;
1890 int tx_compl, mcc_compl, status = 0;
1891 u8 i;
1892 u16 num_wrbs;
1893
1894 for_all_tx_queues(adapter, txo, i) {
1895 tx_compl = 0;
1896 num_wrbs = 0;
1897 while ((txcp = be_tx_compl_get(&txo->cq))) {
1898 num_wrbs += be_tx_compl_process(adapter, txo,
1899 AMAP_GET_BITS(struct amap_eth_tx_compl,
1900 wrb_index, txcp));
1901 tx_compl++;
1902 }
1903 if (tx_compl) {
1904 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1905
1906 atomic_sub(num_wrbs, &txo->q.used);
1907
1908 /* As Tx wrbs have been freed up, wake up netdev queue
1909 * if it was stopped due to lack of tx wrbs. */
1910 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1911 atomic_read(&txo->q.used) < txo->q.len / 2) {
1912 netif_wake_subqueue(adapter->netdev, i);
1913 }
1914
1915 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
1916 tx_stats(txo)->tx_compl += tx_compl;
1917 u64_stats_update_end(&tx_stats(txo)->sync_compl);
1918 }
1919 }
1920
1921 mcc_compl = be_process_mcc(adapter, &status);
1922
1923 if (mcc_compl) {
1924 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1925 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1926 }
1927
1928 napi_complete(napi);
1929
1930 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
1931 adapter->drv_stats.tx_events++;
1932 return 1;
1933}
1934
1935void be_detect_dump_ue(struct be_adapter *adapter)
1936{
1937 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1938 u32 i;
1939
1940 pci_read_config_dword(adapter->pdev,
1941 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1942 pci_read_config_dword(adapter->pdev,
1943 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1944 pci_read_config_dword(adapter->pdev,
1945 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1946 pci_read_config_dword(adapter->pdev,
1947 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1948
1949 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1950 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1951
1952 if (ue_status_lo || ue_status_hi) {
1953 adapter->ue_detected = true;
1954 adapter->eeh_err = true;
1955 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1956 }
1957
1958 if (ue_status_lo) {
1959 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1960 if (ue_status_lo & 1)
1961 dev_err(&adapter->pdev->dev,
1962 "UE: %s bit set\n", ue_status_low_desc[i]);
1963 }
1964 }
1965 if (ue_status_hi) {
1966 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1967 if (ue_status_hi & 1)
1968 dev_err(&adapter->pdev->dev,
1969 "UE: %s bit set\n", ue_status_hi_desc[i]);
1970 }
1971 }
1972
1973}
1974
1975static void be_worker(struct work_struct *work)
1976{
1977 struct be_adapter *adapter =
1978 container_of(work, struct be_adapter, work.work);
1979 struct be_rx_obj *rxo;
1980 int i;
1981
1982 if (!adapter->ue_detected && !lancer_chip(adapter))
1983 be_detect_dump_ue(adapter);
1984
1985 /* when interrupts are not yet enabled, just reap any pending
1986 * mcc completions */
1987 if (!netif_running(adapter->netdev)) {
1988 int mcc_compl, status = 0;
1989
1990 mcc_compl = be_process_mcc(adapter, &status);
1991
1992 if (mcc_compl) {
1993 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1994 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1995 }
1996
1997 goto reschedule;
1998 }
1999
2000 if (!adapter->stats_cmd_sent) {
2001 if (lancer_chip(adapter))
2002 lancer_cmd_get_pport_stats(adapter,
2003 &adapter->stats_cmd);
2004 else
2005 be_cmd_get_stats(adapter, &adapter->stats_cmd);
2006 }
2007
2008 for_all_rx_queues(adapter, rxo, i) {
2009 be_rx_eqd_update(adapter, rxo);
2010
2011 if (rxo->rx_post_starved) {
2012 rxo->rx_post_starved = false;
2013 be_post_rx_frags(rxo, GFP_KERNEL);
2014 }
2015 }
2016
2017reschedule:
2018 adapter->work_counter++;
2019 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2020}
2021
2022static void be_msix_disable(struct be_adapter *adapter)
2023{
2024 if (msix_enabled(adapter)) {
2025 pci_disable_msix(adapter->pdev);
2026 adapter->num_msix_vec = 0;
2027 }
2028}
2029
2030static void be_msix_enable(struct be_adapter *adapter)
2031{
2032#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
2033 int i, status, num_vec;
2034
2035 num_vec = be_num_rxqs_want(adapter) + 1;
2036
2037 for (i = 0; i < num_vec; i++)
2038 adapter->msix_entries[i].entry = i;
2039
2040 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2041 if (status == 0) {
2042 goto done;
2043 } else if (status >= BE_MIN_MSIX_VECTORS) {
2044 num_vec = status;
2045 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2046 num_vec) == 0)
2047 goto done;
2048 }
2049 return;
2050done:
2051 adapter->num_msix_vec = num_vec;
2052 return;
2053}
2054
2055static void be_sriov_enable(struct be_adapter *adapter)
2056{
2057 be_check_sriov_fn_type(adapter);
2058#ifdef CONFIG_PCI_IOV
2059 if (be_physfn(adapter) && num_vfs) {
2060 int status, pos;
2061 u16 nvfs;
2062
2063 pos = pci_find_ext_capability(adapter->pdev,
2064 PCI_EXT_CAP_ID_SRIOV);
2065 pci_read_config_word(adapter->pdev,
2066 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2067
2068 if (num_vfs > nvfs) {
2069 dev_info(&adapter->pdev->dev,
2070 "Device supports %d VFs and not %d\n",
2071 nvfs, num_vfs);
2072 num_vfs = nvfs;
2073 }
2074
2075 status = pci_enable_sriov(adapter->pdev, num_vfs);
2076 adapter->sriov_enabled = status ? false : true;
2077 }
2078#endif
2079}
2080
2081static void be_sriov_disable(struct be_adapter *adapter)
2082{
2083#ifdef CONFIG_PCI_IOV
2084 if (adapter->sriov_enabled) {
2085 pci_disable_sriov(adapter->pdev);
2086 adapter->sriov_enabled = false;
2087 }
2088#endif
2089}
2090
2091static inline int be_msix_vec_get(struct be_adapter *adapter,
2092 struct be_eq_obj *eq_obj)
2093{
2094 return adapter->msix_entries[eq_obj->eq_idx].vector;
2095}
2096
2097static int be_request_irq(struct be_adapter *adapter,
2098 struct be_eq_obj *eq_obj,
2099 void *handler, char *desc, void *context)
2100{
2101 struct net_device *netdev = adapter->netdev;
2102 int vec;
2103
2104 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
2105 vec = be_msix_vec_get(adapter, eq_obj);
2106 return request_irq(vec, handler, 0, eq_obj->desc, context);
2107}
2108
2109static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2110 void *context)
2111{
2112 int vec = be_msix_vec_get(adapter, eq_obj);
2113 free_irq(vec, context);
2114}
2115
2116static int be_msix_register(struct be_adapter *adapter)
2117{
2118 struct be_rx_obj *rxo;
2119 int status, i;
2120 char qname[10];
2121
2122 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2123 adapter);
2124 if (status)
2125 goto err;
2126
2127 for_all_rx_queues(adapter, rxo, i) {
2128 sprintf(qname, "rxq%d", i);
2129 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2130 qname, rxo);
2131 if (status)
2132 goto err_msix;
2133 }
2134
2135 return 0;
2136
2137err_msix:
2138 be_free_irq(adapter, &adapter->tx_eq, adapter);
2139
2140 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2141 be_free_irq(adapter, &rxo->rx_eq, rxo);
2142
2143err:
2144 dev_warn(&adapter->pdev->dev,
2145 "MSIX Request IRQ failed - err %d\n", status);
2146 be_msix_disable(adapter);
2147 return status;
2148}
2149
2150static int be_irq_register(struct be_adapter *adapter)
2151{
2152 struct net_device *netdev = adapter->netdev;
2153 int status;
2154
2155 if (msix_enabled(adapter)) {
2156 status = be_msix_register(adapter);
2157 if (status == 0)
2158 goto done;
2159 /* INTx is not supported for VF */
2160 if (!be_physfn(adapter))
2161 return status;
2162 }
2163
2164 /* INTx */
2165 netdev->irq = adapter->pdev->irq;
2166 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2167 adapter);
2168 if (status) {
2169 dev_err(&adapter->pdev->dev,
2170 "INTx request IRQ failed - err %d\n", status);
2171 return status;
2172 }
2173done:
2174 adapter->isr_registered = true;
2175 return 0;
2176}
2177
2178static void be_irq_unregister(struct be_adapter *adapter)
2179{
2180 struct net_device *netdev = adapter->netdev;
2181 struct be_rx_obj *rxo;
2182 int i;
2183
2184 if (!adapter->isr_registered)
2185 return;
2186
2187 /* INTx */
2188 if (!msix_enabled(adapter)) {
2189 free_irq(netdev->irq, adapter);
2190 goto done;
2191 }
2192
2193 /* MSIx */
2194 be_free_irq(adapter, &adapter->tx_eq, adapter);
2195
2196 for_all_rx_queues(adapter, rxo, i)
2197 be_free_irq(adapter, &rxo->rx_eq, rxo);
2198
2199done:
2200 adapter->isr_registered = false;
2201}
2202
2203static void be_rx_queues_clear(struct be_adapter *adapter)
2204{
2205 struct be_queue_info *q;
2206 struct be_rx_obj *rxo;
2207 int i;
2208
2209 for_all_rx_queues(adapter, rxo, i) {
2210 q = &rxo->q;
2211 if (q->created) {
2212 be_cmd_rxq_destroy(adapter, q);
2213 /* After the rxq is invalidated, wait for a grace time
2214 * of 1ms for all dma to end and the flush compl to
2215 * arrive
2216 */
2217 mdelay(1);
2218 be_rx_q_clean(adapter, rxo);
2219 }
2220
2221 /* Clear any residual events */
2222 q = &rxo->rx_eq.q;
2223 if (q->created)
2224 be_eq_clean(adapter, &rxo->rx_eq);
2225 }
2226}
2227
2228static int be_close(struct net_device *netdev)
2229{
2230 struct be_adapter *adapter = netdev_priv(netdev);
2231 struct be_rx_obj *rxo;
2232 struct be_tx_obj *txo;
2233 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2234 int vec, i;
2235
2236 be_async_mcc_disable(adapter);
2237
2238 if (!lancer_chip(adapter))
2239 be_intr_set(adapter, false);
2240
2241 for_all_rx_queues(adapter, rxo, i)
2242 napi_disable(&rxo->rx_eq.napi);
2243
2244 napi_disable(&tx_eq->napi);
2245
2246 if (lancer_chip(adapter)) {
2247 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2248 for_all_rx_queues(adapter, rxo, i)
2249 be_cq_notify(adapter, rxo->cq.id, false, 0);
2250 for_all_tx_queues(adapter, txo, i)
2251 be_cq_notify(adapter, txo->cq.id, false, 0);
2252 }
2253
2254 if (msix_enabled(adapter)) {
2255 vec = be_msix_vec_get(adapter, tx_eq);
2256 synchronize_irq(vec);
2257
2258 for_all_rx_queues(adapter, rxo, i) {
2259 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2260 synchronize_irq(vec);
2261 }
2262 } else {
2263 synchronize_irq(netdev->irq);
2264 }
2265 be_irq_unregister(adapter);
2266
2267 /* Wait for all pending tx completions to arrive so that
2268 * all tx skbs are freed.
2269 */
2270 for_all_tx_queues(adapter, txo, i)
2271 be_tx_compl_clean(adapter, txo);
2272
2273 be_rx_queues_clear(adapter);
2274 return 0;
2275}
2276
2277static int be_rx_queues_setup(struct be_adapter *adapter)
2278{
2279 struct be_rx_obj *rxo;
2280 int rc, i;
2281 u8 rsstable[MAX_RSS_QS];
2282
2283 for_all_rx_queues(adapter, rxo, i) {
2284 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2285 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2286 adapter->if_handle,
2287 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2288 if (rc)
2289 return rc;
2290 }
2291
2292 if (be_multi_rxq(adapter)) {
2293 for_all_rss_queues(adapter, rxo, i)
2294 rsstable[i] = rxo->rss_id;
2295
2296 rc = be_cmd_rss_config(adapter, rsstable,
2297 adapter->num_rx_qs - 1);
2298 if (rc)
2299 return rc;
2300 }
2301
2302 /* First time posting */
2303 for_all_rx_queues(adapter, rxo, i) {
2304 be_post_rx_frags(rxo, GFP_KERNEL);
2305 napi_enable(&rxo->rx_eq.napi);
2306 }
2307 return 0;
2308}
2309
2310static int be_open(struct net_device *netdev)
2311{
2312 struct be_adapter *adapter = netdev_priv(netdev);
2313 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2314 struct be_rx_obj *rxo;
2315 int status, i;
2316
2317 status = be_rx_queues_setup(adapter);
2318 if (status)
2319 goto err;
2320
2321 napi_enable(&tx_eq->napi);
2322
2323 be_irq_register(adapter);
2324
2325 if (!lancer_chip(adapter))
2326 be_intr_set(adapter, true);
2327
2328 /* The evt queues are created in unarmed state; arm them */
2329 for_all_rx_queues(adapter, rxo, i) {
2330 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2331 be_cq_notify(adapter, rxo->cq.id, true, 0);
2332 }
2333 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2334
2335 /* Now that interrupts are on we can process async mcc */
2336 be_async_mcc_enable(adapter);
2337
2338 if (be_physfn(adapter)) {
2339 status = be_vid_config(adapter, false, 0);
2340 if (status)
2341 goto err;
2342
2343 status = be_cmd_set_flow_control(adapter,
2344 adapter->tx_fc, adapter->rx_fc);
2345 if (status)
2346 goto err;
2347 }
2348
2349 return 0;
2350err:
2351 be_close(adapter->netdev);
2352 return -EIO;
2353}
2354
2355static int be_setup_wol(struct be_adapter *adapter, bool enable)
2356{
2357 struct be_dma_mem cmd;
2358 int status = 0;
2359 u8 mac[ETH_ALEN];
2360
2361 memset(mac, 0, ETH_ALEN);
2362
2363 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2364 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2365 GFP_KERNEL);
2366 if (cmd.va == NULL)
2367 return -1;
2368 memset(cmd.va, 0, cmd.size);
2369
2370 if (enable) {
2371 status = pci_write_config_dword(adapter->pdev,
2372 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2373 if (status) {
2374 dev_err(&adapter->pdev->dev,
2375 "Could not enable Wake-on-lan\n");
2376 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2377 cmd.dma);
2378 return status;
2379 }
2380 status = be_cmd_enable_magic_wol(adapter,
2381 adapter->netdev->dev_addr, &cmd);
2382 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2383 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2384 } else {
2385 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2386 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2387 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2388 }
2389
2390 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2391 return status;
2392}
2393
2394/*
2395 * Generate a seed MAC address from the PF MAC Address using jhash.
2396 * MAC Address for VFs are assigned incrementally starting from the seed.
2397 * These addresses are programmed in the ASIC by the PF and the VF driver
2398 * queries for the MAC address during its probe.
2399 */
2400static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2401{
2402 u32 vf = 0;
2403 int status = 0;
2404 u8 mac[ETH_ALEN];
2405
2406 be_vf_eth_addr_generate(adapter, mac);
2407
2408 for (vf = 0; vf < num_vfs; vf++) {
2409 status = be_cmd_pmac_add(adapter, mac,
2410 adapter->vf_cfg[vf].vf_if_handle,
2411 &adapter->vf_cfg[vf].vf_pmac_id,
2412 vf + 1);
2413 if (status)
2414 dev_err(&adapter->pdev->dev,
2415 "Mac address add failed for VF %d\n", vf);
2416 else
2417 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2418
2419 mac[5] += 1;
2420 }
2421 return status;
2422}
2423
2424static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2425{
2426 u32 vf;
2427
2428 for (vf = 0; vf < num_vfs; vf++) {
2429 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2430 be_cmd_pmac_del(adapter,
2431 adapter->vf_cfg[vf].vf_if_handle,
2432 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2433 }
2434}
2435
2436static int be_setup(struct be_adapter *adapter)
2437{
2438 struct net_device *netdev = adapter->netdev;
2439 u32 cap_flags, en_flags, vf = 0;
2440 int status;
2441 u8 mac[ETH_ALEN];
2442
2443 be_cmd_req_native_mode(adapter);
2444
2445 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2446 BE_IF_FLAGS_BROADCAST |
2447 BE_IF_FLAGS_MULTICAST;
2448
2449 if (be_physfn(adapter)) {
2450 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2451 BE_IF_FLAGS_PROMISCUOUS |
2452 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2453 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2454
2455 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2456 cap_flags |= BE_IF_FLAGS_RSS;
2457 en_flags |= BE_IF_FLAGS_RSS;
2458 }
2459 }
2460
2461 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2462 netdev->dev_addr, false/* pmac_invalid */,
2463 &adapter->if_handle, &adapter->pmac_id, 0);
2464 if (status != 0)
2465 goto do_none;
2466
2467 if (be_physfn(adapter)) {
2468 if (adapter->sriov_enabled) {
2469 while (vf < num_vfs) {
2470 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2471 BE_IF_FLAGS_BROADCAST;
2472 status = be_cmd_if_create(adapter, cap_flags,
2473 en_flags, mac, true,
2474 &adapter->vf_cfg[vf].vf_if_handle,
2475 NULL, vf+1);
2476 if (status) {
2477 dev_err(&adapter->pdev->dev,
2478 "Interface Create failed for VF %d\n",
2479 vf);
2480 goto if_destroy;
2481 }
2482 adapter->vf_cfg[vf].vf_pmac_id =
2483 BE_INVALID_PMAC_ID;
2484 vf++;
2485 }
2486 }
2487 } else {
2488 status = be_cmd_mac_addr_query(adapter, mac,
2489 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2490 if (!status) {
2491 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2492 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2493 }
2494 }
2495
2496 status = be_tx_queues_create(adapter);
2497 if (status != 0)
2498 goto if_destroy;
2499
2500 status = be_rx_queues_create(adapter);
2501 if (status != 0)
2502 goto tx_qs_destroy;
2503
2504 /* Allow all priorities by default. A GRP5 evt may modify this */
2505 adapter->vlan_prio_bmap = 0xff;
2506
2507 status = be_mcc_queues_create(adapter);
2508 if (status != 0)
2509 goto rx_qs_destroy;
2510
2511 adapter->link_speed = -1;
2512
2513 return 0;
2514
2515rx_qs_destroy:
2516 be_rx_queues_destroy(adapter);
2517tx_qs_destroy:
2518 be_tx_queues_destroy(adapter);
2519if_destroy:
2520 if (be_physfn(adapter) && adapter->sriov_enabled)
2521 for (vf = 0; vf < num_vfs; vf++)
2522 if (adapter->vf_cfg[vf].vf_if_handle)
2523 be_cmd_if_destroy(adapter,
2524 adapter->vf_cfg[vf].vf_if_handle,
2525 vf + 1);
2526 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2527do_none:
2528 return status;
2529}
2530
2531static int be_clear(struct be_adapter *adapter)
2532{
2533 int vf;
2534
2535 if (be_physfn(adapter) && adapter->sriov_enabled)
2536 be_vf_eth_addr_rem(adapter);
2537
2538 be_mcc_queues_destroy(adapter);
2539 be_rx_queues_destroy(adapter);
2540 be_tx_queues_destroy(adapter);
2541 adapter->eq_next_idx = 0;
2542
2543 if (be_physfn(adapter) && adapter->sriov_enabled)
2544 for (vf = 0; vf < num_vfs; vf++)
2545 if (adapter->vf_cfg[vf].vf_if_handle)
2546 be_cmd_if_destroy(adapter,
2547 adapter->vf_cfg[vf].vf_if_handle,
2548 vf + 1);
2549
2550 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2551
2552 adapter->be3_native = 0;
2553
2554 /* tell fw we're done with firing cmds */
2555 be_cmd_fw_clean(adapter);
2556 return 0;
2557}
2558
2559
2560#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2561static bool be_flash_redboot(struct be_adapter *adapter,
2562 const u8 *p, u32 img_start, int image_size,
2563 int hdr_size)
2564{
2565 u32 crc_offset;
2566 u8 flashed_crc[4];
2567 int status;
2568
2569 crc_offset = hdr_size + img_start + image_size - 4;
2570
2571 p += crc_offset;
2572
2573 status = be_cmd_get_flash_crc(adapter, flashed_crc,
2574 (image_size - 4));
2575 if (status) {
2576 dev_err(&adapter->pdev->dev,
2577 "could not get crc from flash, not flashing redboot\n");
2578 return false;
2579 }
2580
2581 /*update redboot only if crc does not match*/
2582 if (!memcmp(flashed_crc, p, 4))
2583 return false;
2584 else
2585 return true;
2586}
2587
2588static bool phy_flashing_required(struct be_adapter *adapter)
2589{
2590 int status = 0;
2591 struct be_phy_info phy_info;
2592
2593 status = be_cmd_get_phy_info(adapter, &phy_info);
2594 if (status)
2595 return false;
2596 if ((phy_info.phy_type == TN_8022) &&
2597 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2598 return true;
2599 }
2600 return false;
2601}
2602
2603static int be_flash_data(struct be_adapter *adapter,
2604 const struct firmware *fw,
2605 struct be_dma_mem *flash_cmd, int num_of_images)
2606
2607{
2608 int status = 0, i, filehdr_size = 0;
2609 u32 total_bytes = 0, flash_op;
2610 int num_bytes;
2611 const u8 *p = fw->data;
2612 struct be_cmd_write_flashrom *req = flash_cmd->va;
2613 const struct flash_comp *pflashcomp;
2614 int num_comp;
2615
2616 static const struct flash_comp gen3_flash_types[10] = {
2617 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2618 FLASH_IMAGE_MAX_SIZE_g3},
2619 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2620 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2621 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2622 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2623 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2624 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2625 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2626 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2627 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2628 FLASH_IMAGE_MAX_SIZE_g3},
2629 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2630 FLASH_IMAGE_MAX_SIZE_g3},
2631 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2632 FLASH_IMAGE_MAX_SIZE_g3},
2633 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2634 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2635 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2636 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
2637 };
2638 static const struct flash_comp gen2_flash_types[8] = {
2639 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2640 FLASH_IMAGE_MAX_SIZE_g2},
2641 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2642 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2643 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2644 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2645 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2646 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2647 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2648 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2649 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2650 FLASH_IMAGE_MAX_SIZE_g2},
2651 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2652 FLASH_IMAGE_MAX_SIZE_g2},
2653 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2654 FLASH_IMAGE_MAX_SIZE_g2}
2655 };
2656
2657 if (adapter->generation == BE_GEN3) {
2658 pflashcomp = gen3_flash_types;
2659 filehdr_size = sizeof(struct flash_file_hdr_g3);
2660 num_comp = ARRAY_SIZE(gen3_flash_types);
2661 } else {
2662 pflashcomp = gen2_flash_types;
2663 filehdr_size = sizeof(struct flash_file_hdr_g2);
2664 num_comp = ARRAY_SIZE(gen2_flash_types);
2665 }
2666 for (i = 0; i < num_comp; i++) {
2667 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2668 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2669 continue;
2670 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2671 if (!phy_flashing_required(adapter))
2672 continue;
2673 }
2674 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2675 (!be_flash_redboot(adapter, fw->data,
2676 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2677 (num_of_images * sizeof(struct image_hdr)))))
2678 continue;
2679 p = fw->data;
2680 p += filehdr_size + pflashcomp[i].offset
2681 + (num_of_images * sizeof(struct image_hdr));
2682 if (p + pflashcomp[i].size > fw->data + fw->size)
2683 return -1;
2684 total_bytes = pflashcomp[i].size;
2685 while (total_bytes) {
2686 if (total_bytes > 32*1024)
2687 num_bytes = 32*1024;
2688 else
2689 num_bytes = total_bytes;
2690 total_bytes -= num_bytes;
2691 if (!total_bytes) {
2692 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2693 flash_op = FLASHROM_OPER_PHY_FLASH;
2694 else
2695 flash_op = FLASHROM_OPER_FLASH;
2696 } else {
2697 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2698 flash_op = FLASHROM_OPER_PHY_SAVE;
2699 else
2700 flash_op = FLASHROM_OPER_SAVE;
2701 }
2702 memcpy(req->params.data_buf, p, num_bytes);
2703 p += num_bytes;
2704 status = be_cmd_write_flashrom(adapter, flash_cmd,
2705 pflashcomp[i].optype, flash_op, num_bytes);
2706 if (status) {
2707 if ((status == ILLEGAL_IOCTL_REQ) &&
2708 (pflashcomp[i].optype ==
2709 IMG_TYPE_PHY_FW))
2710 break;
2711 dev_err(&adapter->pdev->dev,
2712 "cmd to write to flash rom failed.\n");
2713 return -1;
2714 }
2715 }
2716 }
2717 return 0;
2718}
2719
2720static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2721{
2722 if (fhdr == NULL)
2723 return 0;
2724 if (fhdr->build[0] == '3')
2725 return BE_GEN3;
2726 else if (fhdr->build[0] == '2')
2727 return BE_GEN2;
2728 else
2729 return 0;
2730}
2731
2732static int lancer_fw_download(struct be_adapter *adapter,
2733 const struct firmware *fw)
2734{
2735#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2736#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2737 struct be_dma_mem flash_cmd;
2738 const u8 *data_ptr = NULL;
2739 u8 *dest_image_ptr = NULL;
2740 size_t image_size = 0;
2741 u32 chunk_size = 0;
2742 u32 data_written = 0;
2743 u32 offset = 0;
2744 int status = 0;
2745 u8 add_status = 0;
2746
2747 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2748 dev_err(&adapter->pdev->dev,
2749 "FW Image not properly aligned. "
2750 "Length must be 4 byte aligned.\n");
2751 status = -EINVAL;
2752 goto lancer_fw_exit;
2753 }
2754
2755 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2756 + LANCER_FW_DOWNLOAD_CHUNK;
2757 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2758 &flash_cmd.dma, GFP_KERNEL);
2759 if (!flash_cmd.va) {
2760 status = -ENOMEM;
2761 dev_err(&adapter->pdev->dev,
2762 "Memory allocation failure while flashing\n");
2763 goto lancer_fw_exit;
2764 }
2765
2766 dest_image_ptr = flash_cmd.va +
2767 sizeof(struct lancer_cmd_req_write_object);
2768 image_size = fw->size;
2769 data_ptr = fw->data;
2770
2771 while (image_size) {
2772 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2773
2774 /* Copy the image chunk content. */
2775 memcpy(dest_image_ptr, data_ptr, chunk_size);
2776
2777 status = lancer_cmd_write_object(adapter, &flash_cmd,
2778 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2779 &data_written, &add_status);
2780
2781 if (status)
2782 break;
2783
2784 offset += data_written;
2785 data_ptr += data_written;
2786 image_size -= data_written;
2787 }
2788
2789 if (!status) {
2790 /* Commit the FW written */
2791 status = lancer_cmd_write_object(adapter, &flash_cmd,
2792 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2793 &data_written, &add_status);
2794 }
2795
2796 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2797 flash_cmd.dma);
2798 if (status) {
2799 dev_err(&adapter->pdev->dev,
2800 "Firmware load error. "
2801 "Status code: 0x%x Additional Status: 0x%x\n",
2802 status, add_status);
2803 goto lancer_fw_exit;
2804 }
2805
2806 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2807lancer_fw_exit:
2808 return status;
2809}
2810
2811static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2812{
2813 struct flash_file_hdr_g2 *fhdr;
2814 struct flash_file_hdr_g3 *fhdr3;
2815 struct image_hdr *img_hdr_ptr = NULL;
2816 struct be_dma_mem flash_cmd;
2817 const u8 *p;
2818 int status = 0, i = 0, num_imgs = 0;
2819
2820 p = fw->data;
2821 fhdr = (struct flash_file_hdr_g2 *) p;
2822
2823 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2824 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2825 &flash_cmd.dma, GFP_KERNEL);
2826 if (!flash_cmd.va) {
2827 status = -ENOMEM;
2828 dev_err(&adapter->pdev->dev,
2829 "Memory allocation failure while flashing\n");
2830 goto be_fw_exit;
2831 }
2832
2833 if ((adapter->generation == BE_GEN3) &&
2834 (get_ufigen_type(fhdr) == BE_GEN3)) {
2835 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2836 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2837 for (i = 0; i < num_imgs; i++) {
2838 img_hdr_ptr = (struct image_hdr *) (fw->data +
2839 (sizeof(struct flash_file_hdr_g3) +
2840 i * sizeof(struct image_hdr)));
2841 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2842 status = be_flash_data(adapter, fw, &flash_cmd,
2843 num_imgs);
2844 }
2845 } else if ((adapter->generation == BE_GEN2) &&
2846 (get_ufigen_type(fhdr) == BE_GEN2)) {
2847 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2848 } else {
2849 dev_err(&adapter->pdev->dev,
2850 "UFI and Interface are not compatible for flashing\n");
2851 status = -1;
2852 }
2853
2854 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2855 flash_cmd.dma);
2856 if (status) {
2857 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2858 goto be_fw_exit;
2859 }
2860
2861 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2862
2863be_fw_exit:
2864 return status;
2865}
2866
2867int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2868{
2869 const struct firmware *fw;
2870 int status;
2871
2872 if (!netif_running(adapter->netdev)) {
2873 dev_err(&adapter->pdev->dev,
2874 "Firmware load not allowed (interface is down)\n");
2875 return -1;
2876 }
2877
2878 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2879 if (status)
2880 goto fw_exit;
2881
2882 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2883
2884 if (lancer_chip(adapter))
2885 status = lancer_fw_download(adapter, fw);
2886 else
2887 status = be_fw_download(adapter, fw);
2888
2889fw_exit:
2890 release_firmware(fw);
2891 return status;
2892}
2893
2894static struct net_device_ops be_netdev_ops = {
2895 .ndo_open = be_open,
2896 .ndo_stop = be_close,
2897 .ndo_start_xmit = be_xmit,
2898 .ndo_set_rx_mode = be_set_multicast_list,
2899 .ndo_set_mac_address = be_mac_addr_set,
2900 .ndo_change_mtu = be_change_mtu,
2901 .ndo_get_stats64 = be_get_stats64,
2902 .ndo_validate_addr = eth_validate_addr,
2903 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2904 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
2905 .ndo_set_vf_mac = be_set_vf_mac,
2906 .ndo_set_vf_vlan = be_set_vf_vlan,
2907 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
2908 .ndo_get_vf_config = be_get_vf_config
2909};
2910
2911static void be_netdev_init(struct net_device *netdev)
2912{
2913 struct be_adapter *adapter = netdev_priv(netdev);
2914 struct be_rx_obj *rxo;
2915 int i;
2916
2917 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2918 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2919 NETIF_F_HW_VLAN_TX;
2920 if (be_multi_rxq(adapter))
2921 netdev->hw_features |= NETIF_F_RXHASH;
2922
2923 netdev->features |= netdev->hw_features |
2924 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
2925
2926 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2927 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2928
2929 netdev->flags |= IFF_MULTICAST;
2930
2931 /* Default settings for Rx and Tx flow control */
2932 adapter->rx_fc = true;
2933 adapter->tx_fc = true;
2934
2935 netif_set_gso_max_size(netdev, 65535);
2936
2937 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2938
2939 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2940
2941 for_all_rx_queues(adapter, rxo, i)
2942 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2943 BE_NAPI_WEIGHT);
2944
2945 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2946 BE_NAPI_WEIGHT);
2947}
2948
2949static void be_unmap_pci_bars(struct be_adapter *adapter)
2950{
2951 if (adapter->csr)
2952 iounmap(adapter->csr);
2953 if (adapter->db)
2954 iounmap(adapter->db);
2955 if (adapter->pcicfg && be_physfn(adapter))
2956 iounmap(adapter->pcicfg);
2957}
2958
2959static int be_map_pci_bars(struct be_adapter *adapter)
2960{
2961 u8 __iomem *addr;
2962 int pcicfg_reg, db_reg;
2963
2964 if (lancer_chip(adapter)) {
2965 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2966 pci_resource_len(adapter->pdev, 0));
2967 if (addr == NULL)
2968 return -ENOMEM;
2969 adapter->db = addr;
2970 return 0;
2971 }
2972
2973 if (be_physfn(adapter)) {
2974 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2975 pci_resource_len(adapter->pdev, 2));
2976 if (addr == NULL)
2977 return -ENOMEM;
2978 adapter->csr = addr;
2979 }
2980
2981 if (adapter->generation == BE_GEN2) {
2982 pcicfg_reg = 1;
2983 db_reg = 4;
2984 } else {
2985 pcicfg_reg = 0;
2986 if (be_physfn(adapter))
2987 db_reg = 4;
2988 else
2989 db_reg = 0;
2990 }
2991 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2992 pci_resource_len(adapter->pdev, db_reg));
2993 if (addr == NULL)
2994 goto pci_map_err;
2995 adapter->db = addr;
2996
2997 if (be_physfn(adapter)) {
2998 addr = ioremap_nocache(
2999 pci_resource_start(adapter->pdev, pcicfg_reg),
3000 pci_resource_len(adapter->pdev, pcicfg_reg));
3001 if (addr == NULL)
3002 goto pci_map_err;
3003 adapter->pcicfg = addr;
3004 } else
3005 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
3006
3007 return 0;
3008pci_map_err:
3009 be_unmap_pci_bars(adapter);
3010 return -ENOMEM;
3011}
3012
3013
3014static void be_ctrl_cleanup(struct be_adapter *adapter)
3015{
3016 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3017
3018 be_unmap_pci_bars(adapter);
3019
3020 if (mem->va)
3021 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3022 mem->dma);
3023
3024 mem = &adapter->rx_filter;
3025 if (mem->va)
3026 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3027 mem->dma);
3028}
3029
3030static int be_ctrl_init(struct be_adapter *adapter)
3031{
3032 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3033 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3034 struct be_dma_mem *rx_filter = &adapter->rx_filter;
3035 int status;
3036
3037 status = be_map_pci_bars(adapter);
3038 if (status)
3039 goto done;
3040
3041 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3042 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3043 mbox_mem_alloc->size,
3044 &mbox_mem_alloc->dma,
3045 GFP_KERNEL);
3046 if (!mbox_mem_alloc->va) {
3047 status = -ENOMEM;
3048 goto unmap_pci_bars;
3049 }
3050 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3051 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3052 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3053 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3054
3055 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3056 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3057 &rx_filter->dma, GFP_KERNEL);
3058 if (rx_filter->va == NULL) {
3059 status = -ENOMEM;
3060 goto free_mbox;
3061 }
3062 memset(rx_filter->va, 0, rx_filter->size);
3063
3064 mutex_init(&adapter->mbox_lock);
3065 spin_lock_init(&adapter->mcc_lock);
3066 spin_lock_init(&adapter->mcc_cq_lock);
3067
3068 init_completion(&adapter->flash_compl);
3069 pci_save_state(adapter->pdev);
3070 return 0;
3071
3072free_mbox:
3073 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3074 mbox_mem_alloc->va, mbox_mem_alloc->dma);
3075
3076unmap_pci_bars:
3077 be_unmap_pci_bars(adapter);
3078
3079done:
3080 return status;
3081}
3082
3083static void be_stats_cleanup(struct be_adapter *adapter)
3084{
3085 struct be_dma_mem *cmd = &adapter->stats_cmd;
3086
3087 if (cmd->va)
3088 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3089 cmd->va, cmd->dma);
3090}
3091
3092static int be_stats_init(struct be_adapter *adapter)
3093{
3094 struct be_dma_mem *cmd = &adapter->stats_cmd;
3095
3096 if (adapter->generation == BE_GEN2) {
3097 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3098 } else {
3099 if (lancer_chip(adapter))
3100 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3101 else
3102 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3103 }
3104 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3105 GFP_KERNEL);
3106 if (cmd->va == NULL)
3107 return -1;
3108 memset(cmd->va, 0, cmd->size);
3109 return 0;
3110}
3111
3112static void __devexit be_remove(struct pci_dev *pdev)
3113{
3114 struct be_adapter *adapter = pci_get_drvdata(pdev);
3115
3116 if (!adapter)
3117 return;
3118
3119 cancel_delayed_work_sync(&adapter->work);
3120
3121 unregister_netdev(adapter->netdev);
3122
3123 be_clear(adapter);
3124
3125 be_stats_cleanup(adapter);
3126
3127 be_ctrl_cleanup(adapter);
3128
3129 kfree(adapter->vf_cfg);
3130 be_sriov_disable(adapter);
3131
3132 be_msix_disable(adapter);
3133
3134 pci_set_drvdata(pdev, NULL);
3135 pci_release_regions(pdev);
3136 pci_disable_device(pdev);
3137
3138 free_netdev(adapter->netdev);
3139}
3140
3141static int be_get_config(struct be_adapter *adapter)
3142{
3143 int status;
3144 u8 mac[ETH_ALEN];
3145
3146 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
3147 if (status)
3148 return status;
3149
3150 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3151 &adapter->function_mode, &adapter->function_caps);
3152 if (status)
3153 return status;
3154
3155 memset(mac, 0, ETH_ALEN);
3156
3157 /* A default permanent address is given to each VF for Lancer*/
3158 if (be_physfn(adapter) || lancer_chip(adapter)) {
3159 status = be_cmd_mac_addr_query(adapter, mac,
3160 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
3161
3162 if (status)
3163 return status;
3164
3165 if (!is_valid_ether_addr(mac))
3166 return -EADDRNOTAVAIL;
3167
3168 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3169 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3170 }
3171
3172 if (adapter->function_mode & 0x400)
3173 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3174 else
3175 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3176
3177 status = be_cmd_get_cntl_attributes(adapter);
3178 if (status)
3179 return status;
3180
3181 if ((num_vfs && adapter->sriov_enabled) ||
3182 (adapter->function_mode & 0x400) ||
3183 lancer_chip(adapter) || !be_physfn(adapter)) {
3184 adapter->num_tx_qs = 1;
3185 netif_set_real_num_tx_queues(adapter->netdev,
3186 adapter->num_tx_qs);
3187 } else {
3188 adapter->num_tx_qs = MAX_TX_QS;
3189 }
3190
3191 return 0;
3192}
3193
3194static int be_dev_family_check(struct be_adapter *adapter)
3195{
3196 struct pci_dev *pdev = adapter->pdev;
3197 u32 sli_intf = 0, if_type;
3198
3199 switch (pdev->device) {
3200 case BE_DEVICE_ID1:
3201 case OC_DEVICE_ID1:
3202 adapter->generation = BE_GEN2;
3203 break;
3204 case BE_DEVICE_ID2:
3205 case OC_DEVICE_ID2:
3206 adapter->generation = BE_GEN3;
3207 break;
3208 case OC_DEVICE_ID3:
3209 case OC_DEVICE_ID4:
3210 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3211 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3212 SLI_INTF_IF_TYPE_SHIFT;
3213
3214 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3215 if_type != 0x02) {
3216 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3217 return -EINVAL;
3218 }
3219 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3220 SLI_INTF_FAMILY_SHIFT);
3221 adapter->generation = BE_GEN3;
3222 break;
3223 default:
3224 adapter->generation = 0;
3225 }
3226 return 0;
3227}
3228
3229static int lancer_wait_ready(struct be_adapter *adapter)
3230{
3231#define SLIPORT_READY_TIMEOUT 500
3232 u32 sliport_status;
3233 int status = 0, i;
3234
3235 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3236 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3237 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3238 break;
3239
3240 msleep(20);
3241 }
3242
3243 if (i == SLIPORT_READY_TIMEOUT)
3244 status = -1;
3245
3246 return status;
3247}
3248
3249static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3250{
3251 int status;
3252 u32 sliport_status, err, reset_needed;
3253 status = lancer_wait_ready(adapter);
3254 if (!status) {
3255 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3256 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3257 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3258 if (err && reset_needed) {
3259 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3260 adapter->db + SLIPORT_CONTROL_OFFSET);
3261
3262 /* check adapter has corrected the error */
3263 status = lancer_wait_ready(adapter);
3264 sliport_status = ioread32(adapter->db +
3265 SLIPORT_STATUS_OFFSET);
3266 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3267 SLIPORT_STATUS_RN_MASK);
3268 if (status || sliport_status)
3269 status = -1;
3270 } else if (err || reset_needed) {
3271 status = -1;
3272 }
3273 }
3274 return status;
3275}
3276
3277static int __devinit be_probe(struct pci_dev *pdev,
3278 const struct pci_device_id *pdev_id)
3279{
3280 int status = 0;
3281 struct be_adapter *adapter;
3282 struct net_device *netdev;
3283
3284 status = pci_enable_device(pdev);
3285 if (status)
3286 goto do_none;
3287
3288 status = pci_request_regions(pdev, DRV_NAME);
3289 if (status)
3290 goto disable_dev;
3291 pci_set_master(pdev);
3292
3293 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
3294 if (netdev == NULL) {
3295 status = -ENOMEM;
3296 goto rel_reg;
3297 }
3298 adapter = netdev_priv(netdev);
3299 adapter->pdev = pdev;
3300 pci_set_drvdata(pdev, adapter);
3301
3302 status = be_dev_family_check(adapter);
3303 if (status)
3304 goto free_netdev;
3305
3306 adapter->netdev = netdev;
3307 SET_NETDEV_DEV(netdev, &pdev->dev);
3308
3309 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3310 if (!status) {
3311 netdev->features |= NETIF_F_HIGHDMA;
3312 } else {
3313 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3314 if (status) {
3315 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3316 goto free_netdev;
3317 }
3318 }
3319
3320 be_sriov_enable(adapter);
3321 if (adapter->sriov_enabled) {
3322 adapter->vf_cfg = kcalloc(num_vfs,
3323 sizeof(struct be_vf_cfg), GFP_KERNEL);
3324
3325 if (!adapter->vf_cfg)
3326 goto free_netdev;
3327 }
3328
3329 status = be_ctrl_init(adapter);
3330 if (status)
3331 goto free_vf_cfg;
3332
3333 if (lancer_chip(adapter)) {
3334 status = lancer_test_and_set_rdy_state(adapter);
3335 if (status) {
3336 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3337 goto ctrl_clean;
3338 }
3339 }
3340
3341 /* sync up with fw's ready state */
3342 if (be_physfn(adapter)) {
3343 status = be_cmd_POST(adapter);
3344 if (status)
3345 goto ctrl_clean;
3346 }
3347
3348 /* tell fw we're ready to fire cmds */
3349 status = be_cmd_fw_init(adapter);
3350 if (status)
3351 goto ctrl_clean;
3352
3353 status = be_cmd_reset_function(adapter);
3354 if (status)
3355 goto ctrl_clean;
3356
3357 status = be_stats_init(adapter);
3358 if (status)
3359 goto ctrl_clean;
3360
3361 status = be_get_config(adapter);
3362 if (status)
3363 goto stats_clean;
3364
3365 /* The INTR bit may be set in the card when probed by a kdump kernel
3366 * after a crash.
3367 */
3368 if (!lancer_chip(adapter))
3369 be_intr_set(adapter, false);
3370
3371 be_msix_enable(adapter);
3372
3373 INIT_DELAYED_WORK(&adapter->work, be_worker);
3374
3375 status = be_setup(adapter);
3376 if (status)
3377 goto msix_disable;
3378
3379 be_netdev_init(netdev);
3380 status = register_netdev(netdev);
3381 if (status != 0)
3382 goto unsetup;
3383
3384 if (be_physfn(adapter) && adapter->sriov_enabled) {
3385 u8 mac_speed;
3386 u16 vf, lnk_speed;
3387
3388 if (!lancer_chip(adapter)) {
3389 status = be_vf_eth_addr_config(adapter);
3390 if (status)
3391 goto unreg_netdev;
3392 }
3393
3394 for (vf = 0; vf < num_vfs; vf++) {
3395 status = be_cmd_link_status_query(adapter, &mac_speed,
3396 &lnk_speed, vf + 1);
3397 if (!status)
3398 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3399 else
3400 goto unreg_netdev;
3401 }
3402 }
3403
3404 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3405
3406 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3407 return 0;
3408
3409unreg_netdev:
3410 unregister_netdev(netdev);
3411unsetup:
3412 be_clear(adapter);
3413msix_disable:
3414 be_msix_disable(adapter);
3415stats_clean:
3416 be_stats_cleanup(adapter);
3417ctrl_clean:
3418 be_ctrl_cleanup(adapter);
3419free_vf_cfg:
3420 kfree(adapter->vf_cfg);
3421free_netdev:
3422 be_sriov_disable(adapter);
3423 free_netdev(netdev);
3424 pci_set_drvdata(pdev, NULL);
3425rel_reg:
3426 pci_release_regions(pdev);
3427disable_dev:
3428 pci_disable_device(pdev);
3429do_none:
3430 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3431 return status;
3432}
3433
3434static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3435{
3436 struct be_adapter *adapter = pci_get_drvdata(pdev);
3437 struct net_device *netdev = adapter->netdev;
3438
3439 cancel_delayed_work_sync(&adapter->work);
3440 if (adapter->wol)
3441 be_setup_wol(adapter, true);
3442
3443 netif_device_detach(netdev);
3444 if (netif_running(netdev)) {
3445 rtnl_lock();
3446 be_close(netdev);
3447 rtnl_unlock();
3448 }
3449 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3450 be_clear(adapter);
3451
3452 be_msix_disable(adapter);
3453 pci_save_state(pdev);
3454 pci_disable_device(pdev);
3455 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3456 return 0;
3457}
3458
3459static int be_resume(struct pci_dev *pdev)
3460{
3461 int status = 0;
3462 struct be_adapter *adapter = pci_get_drvdata(pdev);
3463 struct net_device *netdev = adapter->netdev;
3464
3465 netif_device_detach(netdev);
3466
3467 status = pci_enable_device(pdev);
3468 if (status)
3469 return status;
3470
3471 pci_set_power_state(pdev, 0);
3472 pci_restore_state(pdev);
3473
3474 be_msix_enable(adapter);
3475 /* tell fw we're ready to fire cmds */
3476 status = be_cmd_fw_init(adapter);
3477 if (status)
3478 return status;
3479
3480 be_setup(adapter);
3481 if (netif_running(netdev)) {
3482 rtnl_lock();
3483 be_open(netdev);
3484 rtnl_unlock();
3485 }
3486 netif_device_attach(netdev);
3487
3488 if (adapter->wol)
3489 be_setup_wol(adapter, false);
3490
3491 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3492 return 0;
3493}
3494
3495/*
3496 * An FLR will stop BE from DMAing any data.
3497 */
3498static void be_shutdown(struct pci_dev *pdev)
3499{
3500 struct be_adapter *adapter = pci_get_drvdata(pdev);
3501
3502 if (!adapter)
3503 return;
3504
3505 cancel_delayed_work_sync(&adapter->work);
3506
3507 netif_device_detach(adapter->netdev);
3508
3509 if (adapter->wol)
3510 be_setup_wol(adapter, true);
3511
3512 be_cmd_reset_function(adapter);
3513
3514 pci_disable_device(pdev);
3515}
3516
3517static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3518 pci_channel_state_t state)
3519{
3520 struct be_adapter *adapter = pci_get_drvdata(pdev);
3521 struct net_device *netdev = adapter->netdev;
3522
3523 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3524
3525 adapter->eeh_err = true;
3526
3527 netif_device_detach(netdev);
3528
3529 if (netif_running(netdev)) {
3530 rtnl_lock();
3531 be_close(netdev);
3532 rtnl_unlock();
3533 }
3534 be_clear(adapter);
3535
3536 if (state == pci_channel_io_perm_failure)
3537 return PCI_ERS_RESULT_DISCONNECT;
3538
3539 pci_disable_device(pdev);
3540
3541 return PCI_ERS_RESULT_NEED_RESET;
3542}
3543
3544static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3545{
3546 struct be_adapter *adapter = pci_get_drvdata(pdev);
3547 int status;
3548
3549 dev_info(&adapter->pdev->dev, "EEH reset\n");
3550 adapter->eeh_err = false;
3551
3552 status = pci_enable_device(pdev);
3553 if (status)
3554 return PCI_ERS_RESULT_DISCONNECT;
3555
3556 pci_set_master(pdev);
3557 pci_set_power_state(pdev, 0);
3558 pci_restore_state(pdev);
3559
3560 /* Check if card is ok and fw is ready */
3561 status = be_cmd_POST(adapter);
3562 if (status)
3563 return PCI_ERS_RESULT_DISCONNECT;
3564
3565 return PCI_ERS_RESULT_RECOVERED;
3566}
3567
3568static void be_eeh_resume(struct pci_dev *pdev)
3569{
3570 int status = 0;
3571 struct be_adapter *adapter = pci_get_drvdata(pdev);
3572 struct net_device *netdev = adapter->netdev;
3573
3574 dev_info(&adapter->pdev->dev, "EEH resume\n");
3575
3576 pci_save_state(pdev);
3577
3578 /* tell fw we're ready to fire cmds */
3579 status = be_cmd_fw_init(adapter);
3580 if (status)
3581 goto err;
3582
3583 status = be_setup(adapter);
3584 if (status)
3585 goto err;
3586
3587 if (netif_running(netdev)) {
3588 status = be_open(netdev);
3589 if (status)
3590 goto err;
3591 }
3592 netif_device_attach(netdev);
3593 return;
3594err:
3595 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3596}
3597
3598static struct pci_error_handlers be_eeh_handlers = {
3599 .error_detected = be_eeh_err_detected,
3600 .slot_reset = be_eeh_reset,
3601 .resume = be_eeh_resume,
3602};
3603
3604static struct pci_driver be_driver = {
3605 .name = DRV_NAME,
3606 .id_table = be_dev_ids,
3607 .probe = be_probe,
3608 .remove = be_remove,
3609 .suspend = be_suspend,
3610 .resume = be_resume,
3611 .shutdown = be_shutdown,
3612 .err_handler = &be_eeh_handlers
3613};
3614
3615static int __init be_init_module(void)
3616{
3617 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3618 rx_frag_size != 2048) {
3619 printk(KERN_WARNING DRV_NAME
3620 " : Module param rx_frag_size must be 2048/4096/8192."
3621 " Using 2048\n");
3622 rx_frag_size = 2048;
3623 }
3624
3625 return pci_register_driver(&be_driver);
3626}
3627module_init(be_init_module);
3628
3629static void __exit be_exit_module(void)
3630{
3631 pci_unregister_driver(&be_driver);
3632}
3633module_exit(be_exit_module);