aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/qlogic/qlcnic
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
commit8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch)
treea8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /drivers/net/ethernet/qlogic/qlcnic
parent406089d01562f1e2bf9f089fd7637009ebaad589 (diff)
Patched in Tegra support.
Diffstat (limited to 'drivers/net/ethernet/qlogic/qlcnic')
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/Makefile9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h1503
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c1240
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c1395
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h1026
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c1364
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c1332
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c1309
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c3030
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c628
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c960
11 files changed, 0 insertions, 13796 deletions
diff --git a/drivers/net/ethernet/qlogic/qlcnic/Makefile b/drivers/net/ethernet/qlogic/qlcnic/Makefile
deleted file mode 100644
index c4b8ced8382..00000000000
--- a/drivers/net/ethernet/qlogic/qlcnic/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
1#
2# Makefile for Qlogic 1G/10G Ethernet Driver for CNA devices
3#
4
5obj-$(CONFIG_QLCNIC) := qlcnic.o
6
7qlcnic-y := qlcnic_hw.o qlcnic_main.o qlcnic_init.o \
8 qlcnic_ethtool.o qlcnic_ctx.o qlcnic_io.o \
9 qlcnic_sysfs.o qlcnic_minidump.o
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
deleted file mode 100644
index bc7ec64e9c7..00000000000
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ /dev/null
@@ -1,1503 +0,0 @@
1/*
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2010 QLogic Corporation
4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
6 */
7
8#ifndef _QLCNIC_H_
9#define _QLCNIC_H_
10
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/types.h>
14#include <linux/ioport.h>
15#include <linux/pci.h>
16#include <linux/netdevice.h>
17#include <linux/etherdevice.h>
18#include <linux/ip.h>
19#include <linux/in.h>
20#include <linux/tcp.h>
21#include <linux/skbuff.h>
22#include <linux/firmware.h>
23
24#include <linux/ethtool.h>
25#include <linux/mii.h>
26#include <linux/timer.h>
27
28#include <linux/vmalloc.h>
29
30#include <linux/io.h>
31#include <asm/byteorder.h>
32#include <linux/bitops.h>
33#include <linux/if_vlan.h>
34
35#include "qlcnic_hdr.h"
36
37#define _QLCNIC_LINUX_MAJOR 5
38#define _QLCNIC_LINUX_MINOR 0
39#define _QLCNIC_LINUX_SUBVERSION 30
40#define QLCNIC_LINUX_VERSIONID "5.0.30"
41#define QLCNIC_DRV_IDC_VER 0x01
42#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
43 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
44
45#define QLCNIC_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
46#define _major(v) (((v) >> 24) & 0xff)
47#define _minor(v) (((v) >> 16) & 0xff)
48#define _build(v) ((v) & 0xffff)
49
50/* version in image has weird encoding:
51 * 7:0 - major
52 * 15:8 - minor
53 * 31:16 - build (little endian)
54 */
55#define QLCNIC_DECODE_VERSION(v) \
56 QLCNIC_VERSION_CODE(((v) & 0xff), (((v) >> 8) & 0xff), ((v) >> 16))
57
58#define QLCNIC_MIN_FW_VERSION QLCNIC_VERSION_CODE(4, 4, 2)
59#define QLCNIC_NUM_FLASH_SECTORS (64)
60#define QLCNIC_FLASH_SECTOR_SIZE (64 * 1024)
61#define QLCNIC_FLASH_TOTAL_SIZE (QLCNIC_NUM_FLASH_SECTORS \
62 * QLCNIC_FLASH_SECTOR_SIZE)
63
64#define RCV_DESC_RINGSIZE(rds_ring) \
65 (sizeof(struct rcv_desc) * (rds_ring)->num_desc)
66#define RCV_BUFF_RINGSIZE(rds_ring) \
67 (sizeof(struct qlcnic_rx_buffer) * rds_ring->num_desc)
68#define STATUS_DESC_RINGSIZE(sds_ring) \
69 (sizeof(struct status_desc) * (sds_ring)->num_desc)
70#define TX_BUFF_RINGSIZE(tx_ring) \
71 (sizeof(struct qlcnic_cmd_buffer) * tx_ring->num_desc)
72#define TX_DESC_RINGSIZE(tx_ring) \
73 (sizeof(struct cmd_desc_type0) * tx_ring->num_desc)
74
75#define QLCNIC_P3P_A0 0x50
76#define QLCNIC_P3P_C0 0x58
77
78#define QLCNIC_IS_REVISION_P3P(REVISION) (REVISION >= QLCNIC_P3P_A0)
79
80#define FIRST_PAGE_GROUP_START 0
81#define FIRST_PAGE_GROUP_END 0x100000
82
83#define P3P_MAX_MTU (9600)
84#define P3P_MIN_MTU (68)
85#define QLCNIC_MAX_ETHERHDR 32 /* This contains some padding */
86
87#define QLCNIC_P3P_RX_BUF_MAX_LEN (QLCNIC_MAX_ETHERHDR + ETH_DATA_LEN)
88#define QLCNIC_P3P_RX_JUMBO_BUF_MAX_LEN (QLCNIC_MAX_ETHERHDR + P3P_MAX_MTU)
89#define QLCNIC_CT_DEFAULT_RX_BUF_LEN 2048
90#define QLCNIC_LRO_BUFFER_EXTRA 2048
91
92/* Tx defines */
93#define QLCNIC_MAX_FRAGS_PER_TX 14
94#define MAX_TSO_HEADER_DESC 2
95#define MGMT_CMD_DESC_RESV 4
96#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
97 + MGMT_CMD_DESC_RESV)
98#define QLCNIC_MAX_TX_TIMEOUTS 2
99
100/*
101 * Following are the states of the Phantom. Phantom will set them and
102 * Host will read to check if the fields are correct.
103 */
104#define PHAN_INITIALIZE_FAILED 0xffff
105#define PHAN_INITIALIZE_COMPLETE 0xff01
106
107/* Host writes the following to notify that it has done the init-handshake */
108#define PHAN_INITIALIZE_ACK 0xf00f
109#define PHAN_PEG_RCV_INITIALIZED 0xff01
110
111#define NUM_RCV_DESC_RINGS 3
112
113#define RCV_RING_NORMAL 0
114#define RCV_RING_JUMBO 1
115
116#define MIN_CMD_DESCRIPTORS 64
117#define MIN_RCV_DESCRIPTORS 64
118#define MIN_JUMBO_DESCRIPTORS 32
119
120#define MAX_CMD_DESCRIPTORS 1024
121#define MAX_RCV_DESCRIPTORS_1G 4096
122#define MAX_RCV_DESCRIPTORS_10G 8192
123#define MAX_RCV_DESCRIPTORS_VF 2048
124#define MAX_JUMBO_RCV_DESCRIPTORS_1G 512
125#define MAX_JUMBO_RCV_DESCRIPTORS_10G 1024
126
127#define DEFAULT_RCV_DESCRIPTORS_1G 2048
128#define DEFAULT_RCV_DESCRIPTORS_10G 4096
129#define DEFAULT_RCV_DESCRIPTORS_VF 1024
130#define MAX_RDS_RINGS 2
131
132#define get_next_index(index, length) \
133 (((index) + 1) & ((length) - 1))
134
135/*
136 * Following data structures describe the descriptors that will be used.
137 * Added fileds of tcpHdrSize and ipHdrSize, The driver needs to do it only when
138 * we are doing LSO (above the 1500 size packet) only.
139 */
140struct cmd_desc_type0 {
141 u8 tcp_hdr_offset; /* For LSO only */
142 u8 ip_hdr_offset; /* For LSO only */
143 __le16 flags_opcode; /* 15:13 unused, 12:7 opcode, 6:0 flags */
144 __le32 nfrags__length; /* 31:8 total len, 7:0 frag count */
145
146 __le64 addr_buffer2;
147
148 __le16 reference_handle;
149 __le16 mss;
150 u8 port_ctxid; /* 7:4 ctxid 3:0 port */
151 u8 total_hdr_length; /* LSO only : MAC+IP+TCP Hdr size */
152 __le16 conn_id; /* IPSec offoad only */
153
154 __le64 addr_buffer3;
155 __le64 addr_buffer1;
156
157 __le16 buffer_length[4];
158
159 __le64 addr_buffer4;
160
161 u8 eth_addr[ETH_ALEN];
162 __le16 vlan_TCI;
163
164} __attribute__ ((aligned(64)));
165
166/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */
167struct rcv_desc {
168 __le16 reference_handle;
169 __le16 reserved;
170 __le32 buffer_length; /* allocated buffer length (usually 2K) */
171 __le64 addr_buffer;
172} __packed;
173
174struct status_desc {
175 __le64 status_desc_data[2];
176} __attribute__ ((aligned(16)));
177
178/* UNIFIED ROMIMAGE */
179#define QLCNIC_UNI_FW_MIN_SIZE 0xc8000
180#define QLCNIC_UNI_DIR_SECT_PRODUCT_TBL 0x0
181#define QLCNIC_UNI_DIR_SECT_BOOTLD 0x6
182#define QLCNIC_UNI_DIR_SECT_FW 0x7
183
184/*Offsets */
185#define QLCNIC_UNI_CHIP_REV_OFF 10
186#define QLCNIC_UNI_FLAGS_OFF 11
187#define QLCNIC_UNI_BIOS_VERSION_OFF 12
188#define QLCNIC_UNI_BOOTLD_IDX_OFF 27
189#define QLCNIC_UNI_FIRMWARE_IDX_OFF 29
190
191struct uni_table_desc{
192 __le32 findex;
193 __le32 num_entries;
194 __le32 entry_size;
195 __le32 reserved[5];
196};
197
198struct uni_data_desc{
199 __le32 findex;
200 __le32 size;
201 __le32 reserved[5];
202};
203
204/* Flash Defines and Structures */
205#define QLCNIC_FLT_LOCATION 0x3F1000
206#define QLCNIC_B0_FW_IMAGE_REGION 0x74
207#define QLCNIC_C0_FW_IMAGE_REGION 0x97
208#define QLCNIC_BOOTLD_REGION 0X72
209struct qlcnic_flt_header {
210 u16 version;
211 u16 len;
212 u16 checksum;
213 u16 reserved;
214};
215
216struct qlcnic_flt_entry {
217 u8 region;
218 u8 reserved0;
219 u8 attrib;
220 u8 reserved1;
221 u32 size;
222 u32 start_addr;
223 u32 end_addr;
224};
225
226/* Magic number to let user know flash is programmed */
227#define QLCNIC_BDINFO_MAGIC 0x12345678
228
229#define QLCNIC_BRDTYPE_P3P_REF_QG 0x0021
230#define QLCNIC_BRDTYPE_P3P_HMEZ 0x0022
231#define QLCNIC_BRDTYPE_P3P_10G_CX4_LP 0x0023
232#define QLCNIC_BRDTYPE_P3P_4_GB 0x0024
233#define QLCNIC_BRDTYPE_P3P_IMEZ 0x0025
234#define QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS 0x0026
235#define QLCNIC_BRDTYPE_P3P_10000_BASE_T 0x0027
236#define QLCNIC_BRDTYPE_P3P_XG_LOM 0x0028
237#define QLCNIC_BRDTYPE_P3P_4_GB_MM 0x0029
238#define QLCNIC_BRDTYPE_P3P_10G_SFP_CT 0x002a
239#define QLCNIC_BRDTYPE_P3P_10G_SFP_QT 0x002b
240#define QLCNIC_BRDTYPE_P3P_10G_CX4 0x0031
241#define QLCNIC_BRDTYPE_P3P_10G_XFP 0x0032
242#define QLCNIC_BRDTYPE_P3P_10G_TP 0x0080
243
244#define QLCNIC_MSIX_TABLE_OFFSET 0x44
245
246/* Flash memory map */
247#define QLCNIC_BRDCFG_START 0x4000 /* board config */
248#define QLCNIC_BOOTLD_START 0x10000 /* bootld */
249#define QLCNIC_IMAGE_START 0x43000 /* compressed image */
250#define QLCNIC_USER_START 0x3E8000 /* Firmare info */
251
252#define QLCNIC_FW_VERSION_OFFSET (QLCNIC_USER_START+0x408)
253#define QLCNIC_FW_SIZE_OFFSET (QLCNIC_USER_START+0x40c)
254#define QLCNIC_FW_SERIAL_NUM_OFFSET (QLCNIC_USER_START+0x81c)
255#define QLCNIC_BIOS_VERSION_OFFSET (QLCNIC_USER_START+0x83c)
256
257#define QLCNIC_BRDTYPE_OFFSET (QLCNIC_BRDCFG_START+0x8)
258#define QLCNIC_FW_MAGIC_OFFSET (QLCNIC_BRDCFG_START+0x128)
259
260#define QLCNIC_FW_MIN_SIZE (0x3fffff)
261#define QLCNIC_UNIFIED_ROMIMAGE 0
262#define QLCNIC_FLASH_ROMIMAGE 1
263#define QLCNIC_UNKNOWN_ROMIMAGE 0xff
264
265#define QLCNIC_UNIFIED_ROMIMAGE_NAME "phanfw.bin"
266#define QLCNIC_FLASH_ROMIMAGE_NAME "flash"
267
268extern char qlcnic_driver_name[];
269
270/* Number of status descriptors to handle per interrupt */
271#define MAX_STATUS_HANDLE (64)
272
273/*
274 * qlcnic_skb_frag{} is to contain mapping info for each SG list. This
275 * has to be freed when DMA is complete. This is part of qlcnic_tx_buffer{}.
276 */
277struct qlcnic_skb_frag {
278 u64 dma;
279 u64 length;
280};
281
282/* Following defines are for the state of the buffers */
283#define QLCNIC_BUFFER_FREE 0
284#define QLCNIC_BUFFER_BUSY 1
285
286/*
287 * There will be one qlcnic_buffer per skb packet. These will be
288 * used to save the dma info for pci_unmap_page()
289 */
290struct qlcnic_cmd_buffer {
291 struct sk_buff *skb;
292 struct qlcnic_skb_frag frag_array[MAX_SKB_FRAGS + 1];
293 u32 frag_count;
294};
295
296/* In rx_buffer, we do not need multiple fragments as is a single buffer */
297struct qlcnic_rx_buffer {
298 u16 ref_handle;
299 struct sk_buff *skb;
300 struct list_head list;
301 u64 dma;
302};
303
304/* Board types */
305#define QLCNIC_GBE 0x01
306#define QLCNIC_XGBE 0x02
307
308/*
309 * Interrupt coalescing defaults. The defaults are for 1500 MTU. It is
310 * adjusted based on configured MTU.
311 */
312#define QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US 3
313#define QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS 256
314
315#define QLCNIC_INTR_DEFAULT 0x04
316#define QLCNIC_CONFIG_INTR_COALESCE 3
317
318struct qlcnic_nic_intr_coalesce {
319 u8 type;
320 u8 sts_ring_mask;
321 u16 rx_packets;
322 u16 rx_time_us;
323 u16 flag;
324 u32 timer_out;
325};
326
327struct qlcnic_dump_template_hdr {
328 u32 type;
329 u32 offset;
330 u32 size;
331 u32 cap_mask;
332 u32 num_entries;
333 u32 version;
334 u32 timestamp;
335 u32 checksum;
336 u32 drv_cap_mask;
337 u32 sys_info[3];
338 u32 saved_state[16];
339 u32 cap_sizes[8];
340 u32 rsvd[0];
341};
342
343struct qlcnic_fw_dump {
344 u8 clr; /* flag to indicate if dump is cleared */
345 u8 enable; /* enable/disable dump */
346 u32 size; /* total size of the dump */
347 void *data; /* dump data area */
348 struct qlcnic_dump_template_hdr *tmpl_hdr;
349};
350
351/*
352 * One hardware_context{} per adapter
353 * contains interrupt info as well shared hardware info.
354 */
355struct qlcnic_hardware_context {
356 void __iomem *pci_base0;
357 void __iomem *ocm_win_crb;
358
359 unsigned long pci_len0;
360
361 rwlock_t crb_lock;
362 struct mutex mem_lock;
363
364 u8 revision_id;
365 u8 pci_func;
366 u8 linkup;
367 u8 loopback_state;
368 u8 beacon_state;
369 u8 has_link_events;
370 u8 fw_type;
371 u8 physical_port;
372 u8 reset_context;
373 u8 msix_supported;
374 u8 max_mac_filters;
375 u8 mc_enabled;
376 u8 max_mc_count;
377 u8 diag_test;
378 u8 num_msix;
379 u8 nic_mode;
380 char diag_cnt;
381
382 u16 port_type;
383 u16 board_type;
384
385 u16 link_speed;
386 u16 link_duplex;
387 u16 link_autoneg;
388 u16 module_type;
389
390 u16 op_mode;
391 u16 switch_mode;
392 u16 max_tx_ques;
393 u16 max_rx_ques;
394 u16 max_mtu;
395 u32 msg_enable;
396 u16 act_pci_func;
397
398 u32 capabilities;
399 u32 temp;
400 u32 int_vec_bit;
401 u32 fw_hal_version;
402 struct qlcnic_hardware_ops *hw_ops;
403 struct qlcnic_nic_intr_coalesce coal;
404 struct qlcnic_fw_dump fw_dump;
405};
406
407struct qlcnic_adapter_stats {
408 u64 xmitcalled;
409 u64 xmitfinished;
410 u64 rxdropped;
411 u64 txdropped;
412 u64 csummed;
413 u64 rx_pkts;
414 u64 lro_pkts;
415 u64 rxbytes;
416 u64 txbytes;
417 u64 lrobytes;
418 u64 lso_frames;
419 u64 xmit_on;
420 u64 xmit_off;
421 u64 skb_alloc_failure;
422 u64 null_rxbuf;
423 u64 rx_dma_map_error;
424 u64 tx_dma_map_error;
425};
426
427/*
428 * Rcv Descriptor Context. One such per Rcv Descriptor. There may
429 * be one Rcv Descriptor for normal packets, one for jumbo and may be others.
430 */
431struct qlcnic_host_rds_ring {
432 void __iomem *crb_rcv_producer;
433 struct rcv_desc *desc_head;
434 struct qlcnic_rx_buffer *rx_buf_arr;
435 u32 num_desc;
436 u32 producer;
437 u32 dma_size;
438 u32 skb_size;
439 u32 flags;
440 struct list_head free_list;
441 spinlock_t lock;
442 dma_addr_t phys_addr;
443} ____cacheline_internodealigned_in_smp;
444
445struct qlcnic_host_sds_ring {
446 u32 consumer;
447 u32 num_desc;
448 void __iomem *crb_sts_consumer;
449
450 struct status_desc *desc_head;
451 struct qlcnic_adapter *adapter;
452 struct napi_struct napi;
453 struct list_head free_list[NUM_RCV_DESC_RINGS];
454
455 void __iomem *crb_intr_mask;
456 int irq;
457
458 dma_addr_t phys_addr;
459 char name[IFNAMSIZ+4];
460} ____cacheline_internodealigned_in_smp;
461
462struct qlcnic_host_tx_ring {
463 u16 ctx_id;
464 u32 producer;
465 u32 sw_consumer;
466 u32 num_desc;
467 void __iomem *crb_cmd_producer;
468 struct cmd_desc_type0 *desc_head;
469 struct qlcnic_cmd_buffer *cmd_buf_arr;
470 __le32 *hw_consumer;
471
472 dma_addr_t phys_addr;
473 dma_addr_t hw_cons_phys_addr;
474 struct netdev_queue *txq;
475} ____cacheline_internodealigned_in_smp;
476
477/*
478 * Receive context. There is one such structure per instance of the
479 * receive processing. Any state information that is relevant to
480 * the receive, and is must be in this structure. The global data may be
481 * present elsewhere.
482 */
483struct qlcnic_recv_context {
484 struct qlcnic_host_rds_ring *rds_rings;
485 struct qlcnic_host_sds_ring *sds_rings;
486 u32 state;
487 u16 context_id;
488 u16 virt_port;
489
490};
491
492/* HW context creation */
493
494#define QLCNIC_OS_CRB_RETRY_COUNT 4000
495#define QLCNIC_CDRP_SIGNATURE_MAKE(pcifn, version) \
496 (((pcifn) & 0xff) | (((version) & 0xff) << 8) | (0xcafe << 16))
497
498#define QLCNIC_CDRP_CMD_BIT 0x80000000
499
500/*
501 * All responses must have the QLCNIC_CDRP_CMD_BIT cleared
502 * in the crb QLCNIC_CDRP_CRB_OFFSET.
503 */
504#define QLCNIC_CDRP_FORM_RSP(rsp) (rsp)
505#define QLCNIC_CDRP_IS_RSP(rsp) (((rsp) & QLCNIC_CDRP_CMD_BIT) == 0)
506
507#define QLCNIC_CDRP_RSP_OK 0x00000001
508#define QLCNIC_CDRP_RSP_FAIL 0x00000002
509#define QLCNIC_CDRP_RSP_TIMEOUT 0x00000003
510
511/*
512 * All commands must have the QLCNIC_CDRP_CMD_BIT set in
513 * the crb QLCNIC_CDRP_CRB_OFFSET.
514 */
515#define QLCNIC_CDRP_FORM_CMD(cmd) (QLCNIC_CDRP_CMD_BIT | (cmd))
516#define QLCNIC_CDRP_IS_CMD(cmd) (((cmd) & QLCNIC_CDRP_CMD_BIT) != 0)
517
518#define QLCNIC_CDRP_CMD_SUBMIT_CAPABILITIES 0x00000001
519#define QLCNIC_CDRP_CMD_READ_MAX_RDS_PER_CTX 0x00000002
520#define QLCNIC_CDRP_CMD_READ_MAX_SDS_PER_CTX 0x00000003
521#define QLCNIC_CDRP_CMD_READ_MAX_RULES_PER_CTX 0x00000004
522#define QLCNIC_CDRP_CMD_READ_MAX_RX_CTX 0x00000005
523#define QLCNIC_CDRP_CMD_READ_MAX_TX_CTX 0x00000006
524#define QLCNIC_CDRP_CMD_CREATE_RX_CTX 0x00000007
525#define QLCNIC_CDRP_CMD_DESTROY_RX_CTX 0x00000008
526#define QLCNIC_CDRP_CMD_CREATE_TX_CTX 0x00000009
527#define QLCNIC_CDRP_CMD_DESTROY_TX_CTX 0x0000000a
528#define QLCNIC_CDRP_CMD_INTRPT_TEST 0x00000011
529#define QLCNIC_CDRP_CMD_SET_MTU 0x00000012
530#define QLCNIC_CDRP_CMD_READ_PHY 0x00000013
531#define QLCNIC_CDRP_CMD_WRITE_PHY 0x00000014
532#define QLCNIC_CDRP_CMD_READ_HW_REG 0x00000015
533#define QLCNIC_CDRP_CMD_GET_FLOW_CTL 0x00000016
534#define QLCNIC_CDRP_CMD_SET_FLOW_CTL 0x00000017
535#define QLCNIC_CDRP_CMD_READ_MAX_MTU 0x00000018
536#define QLCNIC_CDRP_CMD_READ_MAX_LRO 0x00000019
537#define QLCNIC_CDRP_CMD_MAC_ADDRESS 0x0000001f
538
539#define QLCNIC_CDRP_CMD_GET_PCI_INFO 0x00000020
540#define QLCNIC_CDRP_CMD_GET_NIC_INFO 0x00000021
541#define QLCNIC_CDRP_CMD_SET_NIC_INFO 0x00000022
542#define QLCNIC_CDRP_CMD_GET_ESWITCH_CAPABILITY 0x00000024
543#define QLCNIC_CDRP_CMD_TOGGLE_ESWITCH 0x00000025
544#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATUS 0x00000026
545#define QLCNIC_CDRP_CMD_SET_PORTMIRRORING 0x00000027
546#define QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH 0x00000028
547#define QLCNIC_CDRP_CMD_GET_ESWITCH_PORT_CONFIG 0x00000029
548#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATS 0x0000002a
549#define QLCNIC_CDRP_CMD_CONFIG_PORT 0x0000002E
550#define QLCNIC_CDRP_CMD_TEMP_SIZE 0x0000002f
551#define QLCNIC_CDRP_CMD_GET_TEMP_HDR 0x00000030
552#define QLCNIC_CDRP_CMD_GET_MAC_STATS 0x00000037
553
554#define QLCNIC_RCODE_SUCCESS 0
555#define QLCNIC_RCODE_INVALID_ARGS 6
556#define QLCNIC_RCODE_NOT_SUPPORTED 9
557#define QLCNIC_RCODE_NOT_PERMITTED 10
558#define QLCNIC_RCODE_NOT_IMPL 15
559#define QLCNIC_RCODE_INVALID 16
560#define QLCNIC_RCODE_TIMEOUT 17
561#define QLCNIC_DESTROY_CTX_RESET 0
562
563/*
564 * Capabilities Announced
565 */
566#define QLCNIC_CAP0_LEGACY_CONTEXT (1)
567#define QLCNIC_CAP0_LEGACY_MN (1 << 2)
568#define QLCNIC_CAP0_LSO (1 << 6)
569#define QLCNIC_CAP0_JUMBO_CONTIGUOUS (1 << 7)
570#define QLCNIC_CAP0_LRO_CONTIGUOUS (1 << 8)
571#define QLCNIC_CAP0_VALIDOFF (1 << 11)
572#define QLCNIC_CAP0_LRO_MSS (1 << 21)
573
574/*
575 * Context state
576 */
577#define QLCNIC_HOST_CTX_STATE_FREED 0
578#define QLCNIC_HOST_CTX_STATE_ACTIVE 2
579
580/*
581 * Rx context
582 */
583
584struct qlcnic_hostrq_sds_ring {
585 __le64 host_phys_addr; /* Ring base addr */
586 __le32 ring_size; /* Ring entries */
587 __le16 msi_index;
588 __le16 rsvd; /* Padding */
589} __packed;
590
591struct qlcnic_hostrq_rds_ring {
592 __le64 host_phys_addr; /* Ring base addr */
593 __le64 buff_size; /* Packet buffer size */
594 __le32 ring_size; /* Ring entries */
595 __le32 ring_kind; /* Class of ring */
596} __packed;
597
598struct qlcnic_hostrq_rx_ctx {
599 __le64 host_rsp_dma_addr; /* Response dma'd here */
600 __le32 capabilities[4]; /* Flag bit vector */
601 __le32 host_int_crb_mode; /* Interrupt crb usage */
602 __le32 host_rds_crb_mode; /* RDS crb usage */
603 /* These ring offsets are relative to data[0] below */
604 __le32 rds_ring_offset; /* Offset to RDS config */
605 __le32 sds_ring_offset; /* Offset to SDS config */
606 __le16 num_rds_rings; /* Count of RDS rings */
607 __le16 num_sds_rings; /* Count of SDS rings */
608 __le16 valid_field_offset;
609 u8 txrx_sds_binding;
610 u8 msix_handler;
611 u8 reserved[128]; /* reserve space for future expansion*/
612 /* MUST BE 64-bit aligned.
613 The following is packed:
614 - N hostrq_rds_rings
615 - N hostrq_sds_rings */
616 char data[0];
617} __packed;
618
619struct qlcnic_cardrsp_rds_ring{
620 __le32 host_producer_crb; /* Crb to use */
621 __le32 rsvd1; /* Padding */
622} __packed;
623
624struct qlcnic_cardrsp_sds_ring {
625 __le32 host_consumer_crb; /* Crb to use */
626 __le32 interrupt_crb; /* Crb to use */
627} __packed;
628
629struct qlcnic_cardrsp_rx_ctx {
630 /* These ring offsets are relative to data[0] below */
631 __le32 rds_ring_offset; /* Offset to RDS config */
632 __le32 sds_ring_offset; /* Offset to SDS config */
633 __le32 host_ctx_state; /* Starting State */
634 __le32 num_fn_per_port; /* How many PCI fn share the port */
635 __le16 num_rds_rings; /* Count of RDS rings */
636 __le16 num_sds_rings; /* Count of SDS rings */
637 __le16 context_id; /* Handle for context */
638 u8 phys_port; /* Physical id of port */
639 u8 virt_port; /* Virtual/Logical id of port */
640 u8 reserved[128]; /* save space for future expansion */
641 /* MUST BE 64-bit aligned.
642 The following is packed:
643 - N cardrsp_rds_rings
644 - N cardrs_sds_rings */
645 char data[0];
646} __packed;
647
648#define SIZEOF_HOSTRQ_RX(HOSTRQ_RX, rds_rings, sds_rings) \
649 (sizeof(HOSTRQ_RX) + \
650 (rds_rings)*(sizeof(struct qlcnic_hostrq_rds_ring)) + \
651 (sds_rings)*(sizeof(struct qlcnic_hostrq_sds_ring)))
652
653#define SIZEOF_CARDRSP_RX(CARDRSP_RX, rds_rings, sds_rings) \
654 (sizeof(CARDRSP_RX) + \
655 (rds_rings)*(sizeof(struct qlcnic_cardrsp_rds_ring)) + \
656 (sds_rings)*(sizeof(struct qlcnic_cardrsp_sds_ring)))
657
658/*
659 * Tx context
660 */
661
662struct qlcnic_hostrq_cds_ring {
663 __le64 host_phys_addr; /* Ring base addr */
664 __le32 ring_size; /* Ring entries */
665 __le32 rsvd; /* Padding */
666} __packed;
667
668struct qlcnic_hostrq_tx_ctx {
669 __le64 host_rsp_dma_addr; /* Response dma'd here */
670 __le64 cmd_cons_dma_addr; /* */
671 __le64 dummy_dma_addr; /* */
672 __le32 capabilities[4]; /* Flag bit vector */
673 __le32 host_int_crb_mode; /* Interrupt crb usage */
674 __le32 rsvd1; /* Padding */
675 __le16 rsvd2; /* Padding */
676 __le16 interrupt_ctl;
677 __le16 msi_index;
678 __le16 rsvd3; /* Padding */
679 struct qlcnic_hostrq_cds_ring cds_ring; /* Desc of cds ring */
680 u8 reserved[128]; /* future expansion */
681} __packed;
682
683struct qlcnic_cardrsp_cds_ring {
684 __le32 host_producer_crb; /* Crb to use */
685 __le32 interrupt_crb; /* Crb to use */
686} __packed;
687
688struct qlcnic_cardrsp_tx_ctx {
689 __le32 host_ctx_state; /* Starting state */
690 __le16 context_id; /* Handle for context */
691 u8 phys_port; /* Physical id of port */
692 u8 virt_port; /* Virtual/Logical id of port */
693 struct qlcnic_cardrsp_cds_ring cds_ring; /* Card cds settings */
694 u8 reserved[128]; /* future expansion */
695} __packed;
696
697#define SIZEOF_HOSTRQ_TX(HOSTRQ_TX) (sizeof(HOSTRQ_TX))
698#define SIZEOF_CARDRSP_TX(CARDRSP_TX) (sizeof(CARDRSP_TX))
699
700/* CRB */
701
702#define QLCNIC_HOST_RDS_CRB_MODE_UNIQUE 0
703#define QLCNIC_HOST_RDS_CRB_MODE_SHARED 1
704#define QLCNIC_HOST_RDS_CRB_MODE_CUSTOM 2
705#define QLCNIC_HOST_RDS_CRB_MODE_MAX 3
706
707#define QLCNIC_HOST_INT_CRB_MODE_UNIQUE 0
708#define QLCNIC_HOST_INT_CRB_MODE_SHARED 1
709#define QLCNIC_HOST_INT_CRB_MODE_NORX 2
710#define QLCNIC_HOST_INT_CRB_MODE_NOTX 3
711#define QLCNIC_HOST_INT_CRB_MODE_NORXTX 4
712
713
714/* MAC */
715
716#define MC_COUNT_P3P 38
717
718#define QLCNIC_MAC_NOOP 0
719#define QLCNIC_MAC_ADD 1
720#define QLCNIC_MAC_DEL 2
721#define QLCNIC_MAC_VLAN_ADD 3
722#define QLCNIC_MAC_VLAN_DEL 4
723
724struct qlcnic_mac_list_s {
725 struct list_head list;
726 uint8_t mac_addr[ETH_ALEN+2];
727};
728
729#define QLCNIC_HOST_REQUEST 0x13
730#define QLCNIC_REQUEST 0x14
731
732#define QLCNIC_MAC_EVENT 0x1
733
734#define QLCNIC_IP_UP 2
735#define QLCNIC_IP_DOWN 3
736
737#define QLCNIC_ILB_MODE 0x1
738#define QLCNIC_ELB_MODE 0x2
739
740#define QLCNIC_LINKEVENT 0x1
741#define QLCNIC_LB_RESPONSE 0x2
742#define QLCNIC_IS_LB_CONFIGURED(VAL) \
743 (VAL == (QLCNIC_LINKEVENT | QLCNIC_LB_RESPONSE))
744
745/*
746 * Driver --> Firmware
747 */
748#define QLCNIC_H2C_OPCODE_CONFIG_RSS 0x1
749#define QLCNIC_H2C_OPCODE_CONFIG_INTR_COALESCE 0x3
750#define QLCNIC_H2C_OPCODE_CONFIG_LED 0x4
751#define QLCNIC_H2C_OPCODE_LRO_REQUEST 0x7
752#define QLCNIC_H2C_OPCODE_SET_MAC_RECEIVE_MODE 0xc
753#define QLCNIC_H2C_OPCODE_CONFIG_IPADDR 0x12
754
755#define QLCNIC_H2C_OPCODE_GET_LINKEVENT 0x15
756#define QLCNIC_H2C_OPCODE_CONFIG_BRIDGING 0x17
757#define QLCNIC_H2C_OPCODE_CONFIG_HW_LRO 0x18
758#define QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK 0x13
759
760/*
761 * Firmware --> Driver
762 */
763
764#define QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK 0x8f
765#define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 141
766
767#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */
768#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */
769#define VPORT_MISS_MODE_ACCEPT_MULTI 2 /* accept unmatched multicast */
770
771#define QLCNIC_LRO_REQUEST_CLEANUP 4
772
773/* Capabilites received */
774#define QLCNIC_FW_CAPABILITY_TSO BIT_1
775#define QLCNIC_FW_CAPABILITY_BDG BIT_8
776#define QLCNIC_FW_CAPABILITY_FVLANTX BIT_9
777#define QLCNIC_FW_CAPABILITY_HW_LRO BIT_10
778#define QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK BIT_27
779#define QLCNIC_FW_CAPABILITY_MORE_CAPS BIT_31
780
781#define QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG BIT_2
782
783/* module types */
784#define LINKEVENT_MODULE_NOT_PRESENT 1
785#define LINKEVENT_MODULE_OPTICAL_UNKNOWN 2
786#define LINKEVENT_MODULE_OPTICAL_SRLR 3
787#define LINKEVENT_MODULE_OPTICAL_LRM 4
788#define LINKEVENT_MODULE_OPTICAL_SFP_1G 5
789#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE 6
790#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN 7
791#define LINKEVENT_MODULE_TWINAX 8
792
793#define LINKSPEED_10GBPS 10000
794#define LINKSPEED_1GBPS 1000
795#define LINKSPEED_100MBPS 100
796#define LINKSPEED_10MBPS 10
797
798#define LINKSPEED_ENCODED_10MBPS 0
799#define LINKSPEED_ENCODED_100MBPS 1
800#define LINKSPEED_ENCODED_1GBPS 2
801
802#define LINKEVENT_AUTONEG_DISABLED 0
803#define LINKEVENT_AUTONEG_ENABLED 1
804
805#define LINKEVENT_HALF_DUPLEX 0
806#define LINKEVENT_FULL_DUPLEX 1
807
808#define LINKEVENT_LINKSPEED_MBPS 0
809#define LINKEVENT_LINKSPEED_ENCODED 1
810
811/* firmware response header:
812 * 63:58 - message type
813 * 57:56 - owner
814 * 55:53 - desc count
815 * 52:48 - reserved
816 * 47:40 - completion id
817 * 39:32 - opcode
818 * 31:16 - error code
819 * 15:00 - reserved
820 */
821#define qlcnic_get_nic_msg_opcode(msg_hdr) \
822 ((msg_hdr >> 32) & 0xFF)
823
824struct qlcnic_fw_msg {
825 union {
826 struct {
827 u64 hdr;
828 u64 body[7];
829 };
830 u64 words[8];
831 };
832};
833
834struct qlcnic_nic_req {
835 __le64 qhdr;
836 __le64 req_hdr;
837 __le64 words[6];
838} __packed;
839
840struct qlcnic_mac_req {
841 u8 op;
842 u8 tag;
843 u8 mac_addr[6];
844};
845
846struct qlcnic_vlan_req {
847 __le16 vlan_id;
848 __le16 rsvd[3];
849} __packed;
850
851struct qlcnic_ipaddr {
852 __be32 ipv4;
853 __be32 ipv6[4];
854};
855
856#define QLCNIC_MSI_ENABLED 0x02
857#define QLCNIC_MSIX_ENABLED 0x04
858#define QLCNIC_LRO_ENABLED 0x08
859#define QLCNIC_LRO_DISABLED 0x00
860#define QLCNIC_BRIDGE_ENABLED 0X10
861#define QLCNIC_DIAG_ENABLED 0x20
862#define QLCNIC_ESWITCH_ENABLED 0x40
863#define QLCNIC_ADAPTER_INITIALIZED 0x80
864#define QLCNIC_TAGGING_ENABLED 0x100
865#define QLCNIC_MACSPOOF 0x200
866#define QLCNIC_MAC_OVERRIDE_DISABLED 0x400
867#define QLCNIC_PROMISC_DISABLED 0x800
868#define QLCNIC_NEED_FLR 0x1000
869#define QLCNIC_FW_RESET_OWNER 0x2000
870#define QLCNIC_FW_HANG 0x4000
871#define QLCNIC_FW_LRO_MSS_CAP 0x8000
872#define QLCNIC_IS_MSI_FAMILY(adapter) \
873 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
874
875#define QLCNIC_DEF_NUM_STS_DESC_RINGS 4
876#define QLCNIC_MSIX_TBL_SPACE 8192
877#define QLCNIC_PCI_REG_MSIX_TBL 0x44
878#define QLCNIC_MSIX_TBL_PGSIZE 4096
879
880#define QLCNIC_NETDEV_WEIGHT 128
881#define QLCNIC_ADAPTER_UP_MAGIC 777
882
883#define __QLCNIC_FW_ATTACHED 0
884#define __QLCNIC_DEV_UP 1
885#define __QLCNIC_RESETTING 2
886#define __QLCNIC_START_FW 4
887#define __QLCNIC_AER 5
888#define __QLCNIC_DIAG_RES_ALLOC 6
889#define __QLCNIC_LED_ENABLE 7
890
891#define QLCNIC_INTERRUPT_TEST 1
892#define QLCNIC_LOOPBACK_TEST 2
893#define QLCNIC_LED_TEST 3
894
895#define QLCNIC_FILTER_AGE 80
896#define QLCNIC_READD_AGE 20
897#define QLCNIC_LB_MAX_FILTERS 64
898
899/* QLCNIC Driver Error Code */
900#define QLCNIC_FW_NOT_RESPOND 51
901#define QLCNIC_TEST_IN_PROGRESS 52
902#define QLCNIC_UNDEFINED_ERROR 53
903#define QLCNIC_LB_CABLE_NOT_CONN 54
904
905struct qlcnic_filter {
906 struct hlist_node fnode;
907 u8 faddr[ETH_ALEN];
908 __le16 vlan_id;
909 unsigned long ftime;
910};
911
912struct qlcnic_filter_hash {
913 struct hlist_head *fhead;
914 u8 fnum;
915 u8 fmax;
916};
917
918struct qlcnic_adapter {
919 struct qlcnic_hardware_context *ahw;
920 struct qlcnic_recv_context *recv_ctx;
921 struct qlcnic_host_tx_ring *tx_ring;
922 struct net_device *netdev;
923 struct pci_dev *pdev;
924
925 unsigned long state;
926 u32 flags;
927
928 int max_drv_tx_rings;
929 u16 num_txd;
930 u16 num_rxd;
931 u16 num_jumbo_rxd;
932 u16 max_rxd;
933 u16 max_jumbo_rxd;
934
935 u8 max_rds_rings;
936 u8 max_sds_rings;
937 u8 portnum;
938
939 u8 fw_wait_cnt;
940 u8 fw_fail_cnt;
941 u8 tx_timeo_cnt;
942 u8 need_fw_reset;
943
944 u16 is_up;
945 u16 pvid;
946
947 u32 irq;
948 u32 heartbeat;
949
950 u8 dev_state;
951 u8 reset_ack_timeo;
952 u8 dev_init_timeo;
953
954 u8 mac_addr[ETH_ALEN];
955
956 u64 dev_rst_time;
957 u8 mac_learn;
958 unsigned long vlans[BITS_TO_LONGS(VLAN_N_VID)];
959 struct qlcnic_npar_info *npars;
960 struct qlcnic_eswitch *eswitch;
961 struct qlcnic_nic_template *nic_ops;
962
963 struct qlcnic_adapter_stats stats;
964 struct list_head mac_list;
965
966 void __iomem *tgt_mask_reg;
967 void __iomem *tgt_status_reg;
968 void __iomem *crb_int_state_reg;
969 void __iomem *isr_int_vec;
970
971 struct msix_entry *msix_entries;
972 struct delayed_work fw_work;
973
974 struct qlcnic_filter_hash fhash;
975
976 spinlock_t tx_clean_lock;
977 spinlock_t mac_learn_lock;
978 u32 file_prd_off; /*File fw product offset*/
979 u32 fw_version;
980 const struct firmware *fw;
981};
982
983struct qlcnic_info_le {
984 __le16 pci_func;
985 __le16 op_mode; /* 1 = Priv, 2 = NP, 3 = NP passthru */
986 __le16 phys_port;
987 __le16 switch_mode; /* 0 = disabled, 1 = int, 2 = ext */
988
989 __le32 capabilities;
990 u8 max_mac_filters;
991 u8 reserved1;
992 __le16 max_mtu;
993
994 __le16 max_tx_ques;
995 __le16 max_rx_ques;
996 __le16 min_tx_bw;
997 __le16 max_tx_bw;
998 u8 reserved2[104];
999} __packed;
1000
1001struct qlcnic_info {
1002 u16 pci_func;
1003 u16 op_mode;
1004 u16 phys_port;
1005 u16 switch_mode;
1006 u32 capabilities;
1007 u8 max_mac_filters;
1008 u8 reserved1;
1009 u16 max_mtu;
1010 u16 max_tx_ques;
1011 u16 max_rx_ques;
1012 u16 min_tx_bw;
1013 u16 max_tx_bw;
1014};
1015
1016struct qlcnic_pci_info_le {
1017 __le16 id; /* pci function id */
1018 __le16 active; /* 1 = Enabled */
1019 __le16 type; /* 1 = NIC, 2 = FCoE, 3 = iSCSI */
1020 __le16 default_port; /* default port number */
1021
1022 __le16 tx_min_bw; /* Multiple of 100mbpc */
1023 __le16 tx_max_bw;
1024 __le16 reserved1[2];
1025
1026 u8 mac[ETH_ALEN];
1027 u8 reserved2[106];
1028} __packed;
1029
1030struct qlcnic_pci_info {
1031 u16 id;
1032 u16 active;
1033 u16 type;
1034 u16 default_port;
1035 u16 tx_min_bw;
1036 u16 tx_max_bw;
1037 u8 mac[ETH_ALEN];
1038};
1039
1040struct qlcnic_npar_info {
1041 u16 pvid;
1042 u16 min_bw;
1043 u16 max_bw;
1044 u8 phy_port;
1045 u8 type;
1046 u8 active;
1047 u8 enable_pm;
1048 u8 dest_npar;
1049 u8 discard_tagged;
1050 u8 mac_override;
1051 u8 mac_anti_spoof;
1052 u8 promisc_mode;
1053 u8 offload_flags;
1054 u8 pci_func;
1055};
1056
1057struct qlcnic_eswitch {
1058 u8 port;
1059 u8 active_vports;
1060 u8 active_vlans;
1061 u8 active_ucast_filters;
1062 u8 max_ucast_filters;
1063 u8 max_active_vlans;
1064
1065 u32 flags;
1066#define QLCNIC_SWITCH_ENABLE BIT_1
1067#define QLCNIC_SWITCH_VLAN_FILTERING BIT_2
1068#define QLCNIC_SWITCH_PROMISC_MODE BIT_3
1069#define QLCNIC_SWITCH_PORT_MIRRORING BIT_4
1070};
1071
1072
1073/* Return codes for Error handling */
1074#define QL_STATUS_INVALID_PARAM -1
1075
1076#define MAX_BW 100 /* % of link speed */
1077#define MAX_VLAN_ID 4095
1078#define MIN_VLAN_ID 2
1079#define DEFAULT_MAC_LEARN 1
1080
1081#define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan < MAX_VLAN_ID)
1082#define IS_VALID_BW(bw) (bw <= MAX_BW)
1083
1084struct qlcnic_pci_func_cfg {
1085 u16 func_type;
1086 u16 min_bw;
1087 u16 max_bw;
1088 u16 port_num;
1089 u8 pci_func;
1090 u8 func_state;
1091 u8 def_mac_addr[6];
1092};
1093
1094struct qlcnic_npar_func_cfg {
1095 u32 fw_capab;
1096 u16 port_num;
1097 u16 min_bw;
1098 u16 max_bw;
1099 u16 max_tx_queues;
1100 u16 max_rx_queues;
1101 u8 pci_func;
1102 u8 op_mode;
1103};
1104
1105struct qlcnic_pm_func_cfg {
1106 u8 pci_func;
1107 u8 action;
1108 u8 dest_npar;
1109 u8 reserved[5];
1110};
1111
1112struct qlcnic_esw_func_cfg {
1113 u16 vlan_id;
1114 u8 op_mode;
1115 u8 op_type;
1116 u8 pci_func;
1117 u8 host_vlan_tag;
1118 u8 promisc_mode;
1119 u8 discard_tagged;
1120 u8 mac_override;
1121 u8 mac_anti_spoof;
1122 u8 offload_flags;
1123 u8 reserved[5];
1124};
1125
1126#define QLCNIC_STATS_VERSION 1
1127#define QLCNIC_STATS_PORT 1
1128#define QLCNIC_STATS_ESWITCH 2
1129#define QLCNIC_QUERY_RX_COUNTER 0
1130#define QLCNIC_QUERY_TX_COUNTER 1
1131#define QLCNIC_STATS_NOT_AVAIL 0xffffffffffffffffULL
1132#define QLCNIC_FILL_STATS(VAL1) \
1133 (((VAL1) == QLCNIC_STATS_NOT_AVAIL) ? 0 : VAL1)
1134#define QLCNIC_MAC_STATS 1
1135#define QLCNIC_ESW_STATS 2
1136
1137#define QLCNIC_ADD_ESW_STATS(VAL1, VAL2)\
1138do { \
1139 if (((VAL1) == QLCNIC_STATS_NOT_AVAIL) && \
1140 ((VAL2) != QLCNIC_STATS_NOT_AVAIL)) \
1141 (VAL1) = (VAL2); \
1142 else if (((VAL1) != QLCNIC_STATS_NOT_AVAIL) && \
1143 ((VAL2) != QLCNIC_STATS_NOT_AVAIL)) \
1144 (VAL1) += (VAL2); \
1145} while (0)
1146
1147struct qlcnic_mac_statistics_le {
1148 __le64 mac_tx_frames;
1149 __le64 mac_tx_bytes;
1150 __le64 mac_tx_mcast_pkts;
1151 __le64 mac_tx_bcast_pkts;
1152 __le64 mac_tx_pause_cnt;
1153 __le64 mac_tx_ctrl_pkt;
1154 __le64 mac_tx_lt_64b_pkts;
1155 __le64 mac_tx_lt_127b_pkts;
1156 __le64 mac_tx_lt_255b_pkts;
1157 __le64 mac_tx_lt_511b_pkts;
1158 __le64 mac_tx_lt_1023b_pkts;
1159 __le64 mac_tx_lt_1518b_pkts;
1160 __le64 mac_tx_gt_1518b_pkts;
1161 __le64 rsvd1[3];
1162
1163 __le64 mac_rx_frames;
1164 __le64 mac_rx_bytes;
1165 __le64 mac_rx_mcast_pkts;
1166 __le64 mac_rx_bcast_pkts;
1167 __le64 mac_rx_pause_cnt;
1168 __le64 mac_rx_ctrl_pkt;
1169 __le64 mac_rx_lt_64b_pkts;
1170 __le64 mac_rx_lt_127b_pkts;
1171 __le64 mac_rx_lt_255b_pkts;
1172 __le64 mac_rx_lt_511b_pkts;
1173 __le64 mac_rx_lt_1023b_pkts;
1174 __le64 mac_rx_lt_1518b_pkts;
1175 __le64 mac_rx_gt_1518b_pkts;
1176 __le64 rsvd2[3];
1177
1178 __le64 mac_rx_length_error;
1179 __le64 mac_rx_length_small;
1180 __le64 mac_rx_length_large;
1181 __le64 mac_rx_jabber;
1182 __le64 mac_rx_dropped;
1183 __le64 mac_rx_crc_error;
1184 __le64 mac_align_error;
1185} __packed;
1186
1187struct qlcnic_mac_statistics {
1188 u64 mac_tx_frames;
1189 u64 mac_tx_bytes;
1190 u64 mac_tx_mcast_pkts;
1191 u64 mac_tx_bcast_pkts;
1192 u64 mac_tx_pause_cnt;
1193 u64 mac_tx_ctrl_pkt;
1194 u64 mac_tx_lt_64b_pkts;
1195 u64 mac_tx_lt_127b_pkts;
1196 u64 mac_tx_lt_255b_pkts;
1197 u64 mac_tx_lt_511b_pkts;
1198 u64 mac_tx_lt_1023b_pkts;
1199 u64 mac_tx_lt_1518b_pkts;
1200 u64 mac_tx_gt_1518b_pkts;
1201 u64 rsvd1[3];
1202 u64 mac_rx_frames;
1203 u64 mac_rx_bytes;
1204 u64 mac_rx_mcast_pkts;
1205 u64 mac_rx_bcast_pkts;
1206 u64 mac_rx_pause_cnt;
1207 u64 mac_rx_ctrl_pkt;
1208 u64 mac_rx_lt_64b_pkts;
1209 u64 mac_rx_lt_127b_pkts;
1210 u64 mac_rx_lt_255b_pkts;
1211 u64 mac_rx_lt_511b_pkts;
1212 u64 mac_rx_lt_1023b_pkts;
1213 u64 mac_rx_lt_1518b_pkts;
1214 u64 mac_rx_gt_1518b_pkts;
1215 u64 rsvd2[3];
1216 u64 mac_rx_length_error;
1217 u64 mac_rx_length_small;
1218 u64 mac_rx_length_large;
1219 u64 mac_rx_jabber;
1220 u64 mac_rx_dropped;
1221 u64 mac_rx_crc_error;
1222 u64 mac_align_error;
1223};
1224
1225struct qlcnic_esw_stats_le {
1226 __le16 context_id;
1227 __le16 version;
1228 __le16 size;
1229 __le16 unused;
1230 __le64 unicast_frames;
1231 __le64 multicast_frames;
1232 __le64 broadcast_frames;
1233 __le64 dropped_frames;
1234 __le64 errors;
1235 __le64 local_frames;
1236 __le64 numbytes;
1237 __le64 rsvd[3];
1238} __packed;
1239
1240struct __qlcnic_esw_statistics {
1241 u16 context_id;
1242 u16 version;
1243 u16 size;
1244 u16 unused;
1245 u64 unicast_frames;
1246 u64 multicast_frames;
1247 u64 broadcast_frames;
1248 u64 dropped_frames;
1249 u64 errors;
1250 u64 local_frames;
1251 u64 numbytes;
1252 u64 rsvd[3];
1253};
1254
1255struct qlcnic_esw_statistics {
1256 struct __qlcnic_esw_statistics rx;
1257 struct __qlcnic_esw_statistics tx;
1258};
1259
1260#define QLCNIC_DUMP_MASK_DEF 0x1f
1261#define QLCNIC_FORCE_FW_DUMP_KEY 0xdeadfeed
1262#define QLCNIC_ENABLE_FW_DUMP 0xaddfeed
1263#define QLCNIC_DISABLE_FW_DUMP 0xbadfeed
1264#define QLCNIC_FORCE_FW_RESET 0xdeaddead
1265#define QLCNIC_SET_QUIESCENT 0xadd00010
1266#define QLCNIC_RESET_QUIESCENT 0xadd00020
1267
1268struct _cdrp_cmd {
1269 u32 cmd;
1270 u32 arg1;
1271 u32 arg2;
1272 u32 arg3;
1273};
1274
1275struct qlcnic_cmd_args {
1276 struct _cdrp_cmd req;
1277 struct _cdrp_cmd rsp;
1278};
1279
1280int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter);
1281int qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config);
1282
1283int qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off);
1284int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *, ulong off, u32 data);
1285int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *, u64 off, u64 data);
1286int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *, u64 off, u64 *data);
1287void qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *, u64, u64 *);
1288void qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *, u64, u64);
1289
1290#define ADDR_IN_RANGE(addr, low, high) \
1291 (((addr) < (high)) && ((addr) >= (low)))
1292
1293#define QLCRD32(adapter, off) \
1294 (qlcnic_hw_read_wx_2M(adapter, off))
1295#define QLCWR32(adapter, off, val) \
1296 (qlcnic_hw_write_wx_2M(adapter, off, val))
1297
1298int qlcnic_pcie_sem_lock(struct qlcnic_adapter *, int, u32);
1299void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int);
1300
1301#define qlcnic_rom_lock(a) \
1302 qlcnic_pcie_sem_lock((a), 2, QLCNIC_ROM_LOCK_ID)
1303#define qlcnic_rom_unlock(a) \
1304 qlcnic_pcie_sem_unlock((a), 2)
1305#define qlcnic_phy_lock(a) \
1306 qlcnic_pcie_sem_lock((a), 3, QLCNIC_PHY_LOCK_ID)
1307#define qlcnic_phy_unlock(a) \
1308 qlcnic_pcie_sem_unlock((a), 3)
1309#define qlcnic_api_lock(a) \
1310 qlcnic_pcie_sem_lock((a), 5, 0)
1311#define qlcnic_api_unlock(a) \
1312 qlcnic_pcie_sem_unlock((a), 5)
1313#define qlcnic_sw_lock(a) \
1314 qlcnic_pcie_sem_lock((a), 6, 0)
1315#define qlcnic_sw_unlock(a) \
1316 qlcnic_pcie_sem_unlock((a), 6)
1317#define crb_win_lock(a) \
1318 qlcnic_pcie_sem_lock((a), 7, QLCNIC_CRB_WIN_LOCK_ID)
1319#define crb_win_unlock(a) \
1320 qlcnic_pcie_sem_unlock((a), 7)
1321
1322#define __QLCNIC_MAX_LED_RATE 0xf
1323#define __QLCNIC_MAX_LED_STATE 0x2
1324
1325#define MAX_CTL_CHECK 1000
1326
1327int qlcnic_get_board_info(struct qlcnic_adapter *adapter);
1328int qlcnic_wol_supported(struct qlcnic_adapter *adapter);
1329int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate);
1330void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter);
1331void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter);
1332int qlcnic_dump_fw(struct qlcnic_adapter *);
1333
1334/* Functions from qlcnic_init.c */
1335int qlcnic_load_firmware(struct qlcnic_adapter *adapter);
1336int qlcnic_need_fw_reset(struct qlcnic_adapter *adapter);
1337void qlcnic_request_firmware(struct qlcnic_adapter *adapter);
1338void qlcnic_release_firmware(struct qlcnic_adapter *adapter);
1339int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter);
1340int qlcnic_setup_idc_param(struct qlcnic_adapter *adapter);
1341int qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter);
1342
1343int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, u32 addr, u32 *valp);
1344int qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
1345 u8 *bytes, size_t size);
1346int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter);
1347void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter);
1348
1349void __iomem *qlcnic_get_ioaddr(struct qlcnic_hardware_context *, u32);
1350
1351int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter);
1352void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter);
1353
1354int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter);
1355void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter);
1356
1357void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter);
1358void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter);
1359void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter);
1360
1361int qlcnic_check_fw_status(struct qlcnic_adapter *adapter);
1362void qlcnic_watchdog_task(struct work_struct *work);
1363void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
1364 struct qlcnic_host_rds_ring *rds_ring);
1365int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max);
1366void qlcnic_set_multi(struct net_device *netdev);
1367void qlcnic_free_mac_list(struct qlcnic_adapter *adapter);
1368int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32);
1369int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter);
1370int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable);
1371int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, int cmd);
1372int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable);
1373void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup);
1374
1375int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu);
1376int qlcnic_change_mtu(struct net_device *netdev, int new_mtu);
1377netdev_features_t qlcnic_fix_features(struct net_device *netdev,
1378 netdev_features_t features);
1379int qlcnic_set_features(struct net_device *netdev, netdev_features_t features);
1380int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable);
1381int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable);
1382int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
1383void qlcnic_update_cmd_producer(struct qlcnic_host_tx_ring *);
1384void qlcnic_fetch_mac(u32, u32, u8, u8 *);
1385void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring);
1386void qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter);
1387int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode);
1388
1389/* Functions from qlcnic_ethtool.c */
1390int qlcnic_check_loopback_buff(unsigned char *data, u8 mac[]);
1391
1392/* Functions from qlcnic_main.c */
1393int qlcnic_reset_context(struct qlcnic_adapter *);
1394void qlcnic_issue_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *);
1395void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings);
1396int qlcnic_diag_alloc_res(struct net_device *netdev, int test);
1397netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
1398int qlcnic_validate_max_rss(struct net_device *netdev, u8 max_hw, u8 val);
1399int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data);
1400void qlcnic_dev_request_reset(struct qlcnic_adapter *);
1401void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
1402
1403/* Management functions */
1404int qlcnic_get_mac_address(struct qlcnic_adapter *, u8*);
1405int qlcnic_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8);
1406int qlcnic_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
1407int qlcnic_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info*);
1408
1409/* eSwitch management functions */
1410int qlcnic_config_switch_port(struct qlcnic_adapter *,
1411 struct qlcnic_esw_func_cfg *);
1412int qlcnic_get_eswitch_port_config(struct qlcnic_adapter *,
1413 struct qlcnic_esw_func_cfg *);
1414int qlcnic_config_port_mirroring(struct qlcnic_adapter *, u8, u8, u8);
1415int qlcnic_get_port_stats(struct qlcnic_adapter *, const u8, const u8,
1416 struct __qlcnic_esw_statistics *);
1417int qlcnic_get_eswitch_stats(struct qlcnic_adapter *, const u8, u8,
1418 struct __qlcnic_esw_statistics *);
1419int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, u8, u8, u8);
1420int qlcnic_get_mac_stats(struct qlcnic_adapter *, struct qlcnic_mac_statistics *);
1421extern int qlcnic_config_tso;
1422
1423int qlcnic_napi_add(struct qlcnic_adapter *, struct net_device *);
1424void qlcnic_napi_del(struct qlcnic_adapter *adapter);
1425void qlcnic_napi_enable(struct qlcnic_adapter *adapter);
1426void qlcnic_napi_disable(struct qlcnic_adapter *adapter);
1427int qlcnic_alloc_sds_rings(struct qlcnic_recv_context *, int);
1428void qlcnic_free_sds_rings(struct qlcnic_recv_context *);
1429void qlcnic_free_tx_rings(struct qlcnic_adapter *);
1430int qlcnic_alloc_tx_rings(struct qlcnic_adapter *, struct net_device *);
1431
1432void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
1433void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
1434void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
1435void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
1436int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
1437int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
1438void qlcnic_set_vlan_config(struct qlcnic_adapter *,
1439 struct qlcnic_esw_func_cfg *);
1440void qlcnic_set_eswitch_port_features(struct qlcnic_adapter *,
1441 struct qlcnic_esw_func_cfg *);
1442
1443/*
1444 * QLOGIC Board information
1445 */
1446
1447#define QLCNIC_MAX_BOARD_NAME_LEN 100
1448struct qlcnic_board_info {
1449 unsigned short vendor;
1450 unsigned short device;
1451 unsigned short sub_vendor;
1452 unsigned short sub_device;
1453 char short_name[QLCNIC_MAX_BOARD_NAME_LEN];
1454};
1455
1456static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
1457{
1458 if (likely(tx_ring->producer < tx_ring->sw_consumer))
1459 return tx_ring->sw_consumer - tx_ring->producer;
1460 else
1461 return tx_ring->sw_consumer + tx_ring->num_desc -
1462 tx_ring->producer;
1463}
1464
1465static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
1466{
1467 writel(0, sds_ring->crb_intr_mask);
1468}
1469
1470static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
1471{
1472 struct qlcnic_adapter *adapter = sds_ring->adapter;
1473
1474 writel(0x1, sds_ring->crb_intr_mask);
1475
1476 if (!QLCNIC_IS_MSI_FAMILY(adapter))
1477 writel(0xfbff, adapter->tgt_mask_reg);
1478}
1479
1480extern const struct ethtool_ops qlcnic_ethtool_ops;
1481extern const struct ethtool_ops qlcnic_ethtool_failed_ops;
1482
1483struct qlcnic_nic_template {
1484 int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
1485 int (*config_led) (struct qlcnic_adapter *, u32, u32);
1486 int (*start_firmware) (struct qlcnic_adapter *);
1487};
1488
1489#define QLCDB(adapter, lvl, _fmt, _args...) do { \
1490 if (NETIF_MSG_##lvl & adapter->ahw->msg_enable) \
1491 printk(KERN_INFO "%s: %s: " _fmt, \
1492 dev_name(&adapter->pdev->dev), \
1493 __func__, ##_args); \
1494 } while (0)
1495
1496#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
1497static inline bool qlcnic_82xx_check(struct qlcnic_adapter *adapter)
1498{
1499 unsigned short device = adapter->pdev->device;
1500 return (device == PCI_DEVICE_ID_QLOGIC_QLE824X) ? true : false;
1501}
1502
1503#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
deleted file mode 100644
index b14b8f0787e..00000000000
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ /dev/null
@@ -1,1240 +0,0 @@
1/*
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2010 QLogic Corporation
4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
6 */
7
8#include "qlcnic.h"
9
10static int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
11{
12 int i;
13
14 for (i = 0; i < adapter->ahw->act_pci_func; i++) {
15 if (adapter->npars[i].pci_func == pci_func)
16 return i;
17 }
18
19 return -1;
20}
21
22static u32
23qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
24{
25 u32 rsp;
26 int timeout = 0;
27
28 do {
29 /* give atleast 1ms for firmware to respond */
30 mdelay(1);
31
32 if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT)
33 return QLCNIC_CDRP_RSP_TIMEOUT;
34
35 rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET);
36 } while (!QLCNIC_CDRP_IS_RSP(rsp));
37
38 return rsp;
39}
40
41void
42qlcnic_issue_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd)
43{
44 u32 rsp;
45 u32 signature;
46 struct pci_dev *pdev = adapter->pdev;
47 struct qlcnic_hardware_context *ahw = adapter->ahw;
48
49 signature = QLCNIC_CDRP_SIGNATURE_MAKE(ahw->pci_func,
50 adapter->ahw->fw_hal_version);
51
52 /* Acquire semaphore before accessing CRB */
53 if (qlcnic_api_lock(adapter)) {
54 cmd->rsp.cmd = QLCNIC_RCODE_TIMEOUT;
55 return;
56 }
57
58 QLCWR32(adapter, QLCNIC_SIGN_CRB_OFFSET, signature);
59 QLCWR32(adapter, QLCNIC_ARG1_CRB_OFFSET, cmd->req.arg1);
60 QLCWR32(adapter, QLCNIC_ARG2_CRB_OFFSET, cmd->req.arg2);
61 QLCWR32(adapter, QLCNIC_ARG3_CRB_OFFSET, cmd->req.arg3);
62 QLCWR32(adapter, QLCNIC_CDRP_CRB_OFFSET,
63 QLCNIC_CDRP_FORM_CMD(cmd->req.cmd));
64
65 rsp = qlcnic_poll_rsp(adapter);
66
67 if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) {
68 dev_err(&pdev->dev, "CDRP response timeout.\n");
69 cmd->rsp.cmd = QLCNIC_RCODE_TIMEOUT;
70 } else if (rsp == QLCNIC_CDRP_RSP_FAIL) {
71 cmd->rsp.cmd = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
72 switch (cmd->rsp.cmd) {
73 case QLCNIC_RCODE_INVALID_ARGS:
74 dev_err(&pdev->dev, "CDRP invalid args: 0x%x.\n",
75 cmd->rsp.cmd);
76 break;
77 case QLCNIC_RCODE_NOT_SUPPORTED:
78 case QLCNIC_RCODE_NOT_IMPL:
79 dev_err(&pdev->dev,
80 "CDRP command not supported: 0x%x.\n",
81 cmd->rsp.cmd);
82 break;
83 case QLCNIC_RCODE_NOT_PERMITTED:
84 dev_err(&pdev->dev,
85 "CDRP requested action not permitted: 0x%x.\n",
86 cmd->rsp.cmd);
87 break;
88 case QLCNIC_RCODE_INVALID:
89 dev_err(&pdev->dev,
90 "CDRP invalid or unknown cmd received: 0x%x.\n",
91 cmd->rsp.cmd);
92 break;
93 case QLCNIC_RCODE_TIMEOUT:
94 dev_err(&pdev->dev, "CDRP command timeout: 0x%x.\n",
95 cmd->rsp.cmd);
96 break;
97 default:
98 dev_err(&pdev->dev, "CDRP command failed: 0x%x.\n",
99 cmd->rsp.cmd);
100 }
101 } else if (rsp == QLCNIC_CDRP_RSP_OK) {
102 cmd->rsp.cmd = QLCNIC_RCODE_SUCCESS;
103 if (cmd->rsp.arg2)
104 cmd->rsp.arg2 = QLCRD32(adapter,
105 QLCNIC_ARG2_CRB_OFFSET);
106 if (cmd->rsp.arg3)
107 cmd->rsp.arg3 = QLCRD32(adapter,
108 QLCNIC_ARG3_CRB_OFFSET);
109 }
110 if (cmd->rsp.arg1)
111 cmd->rsp.arg1 = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
112
113 /* Release semaphore */
114 qlcnic_api_unlock(adapter);
115
116}
117
118static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u32 temp_size)
119{
120 uint64_t sum = 0;
121 int count = temp_size / sizeof(uint32_t);
122 while (count-- > 0)
123 sum += *temp_buffer++;
124 while (sum >> 32)
125 sum = (sum & 0xFFFFFFFF) + (sum >> 32);
126 return ~sum;
127}
128
129int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
130{
131 int err, i;
132 void *tmp_addr;
133 u32 temp_size, version, csum, *template;
134 __le32 *tmp_buf;
135 struct qlcnic_cmd_args cmd;
136 struct qlcnic_hardware_context *ahw;
137 struct qlcnic_dump_template_hdr *tmpl_hdr;
138 dma_addr_t tmp_addr_t = 0;
139
140 ahw = adapter->ahw;
141 memset(&cmd, 0, sizeof(cmd));
142 cmd.req.cmd = QLCNIC_CDRP_CMD_TEMP_SIZE;
143 memset(&cmd.rsp, 1, sizeof(struct _cdrp_cmd));
144 qlcnic_issue_cmd(adapter, &cmd);
145 if (cmd.rsp.cmd != QLCNIC_RCODE_SUCCESS) {
146 dev_info(&adapter->pdev->dev,
147 "Can't get template size %d\n", cmd.rsp.cmd);
148 err = -EIO;
149 return err;
150 }
151 temp_size = cmd.rsp.arg2;
152 version = cmd.rsp.arg3;
153 dev_info(&adapter->pdev->dev,
154 "minidump template version = 0x%x", version);
155 if (!temp_size)
156 return -EIO;
157
158 tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size,
159 &tmp_addr_t, GFP_KERNEL);
160 if (!tmp_addr) {
161 dev_err(&adapter->pdev->dev,
162 "Can't get memory for FW dump template\n");
163 return -ENOMEM;
164 }
165 memset(&cmd.rsp, 0, sizeof(struct _cdrp_cmd));
166 cmd.req.cmd = QLCNIC_CDRP_CMD_GET_TEMP_HDR;
167 cmd.req.arg1 = LSD(tmp_addr_t);
168 cmd.req.arg2 = MSD(tmp_addr_t);
169 cmd.req.arg3 = temp_size;
170 qlcnic_issue_cmd(adapter, &cmd);
171
172 err = cmd.rsp.cmd;
173 if (err != QLCNIC_RCODE_SUCCESS) {
174 dev_err(&adapter->pdev->dev,
175 "Failed to get mini dump template header %d\n", err);
176 err = -EIO;
177 goto error;
178 }
179 ahw->fw_dump.tmpl_hdr = vzalloc(temp_size);
180 if (!ahw->fw_dump.tmpl_hdr) {
181 err = -EIO;
182 goto error;
183 }
184 tmp_buf = tmp_addr;
185 template = (u32 *) ahw->fw_dump.tmpl_hdr;
186 for (i = 0; i < temp_size/sizeof(u32); i++)
187 *template++ = __le32_to_cpu(*tmp_buf++);
188
189 csum = qlcnic_temp_checksum((u32 *)ahw->fw_dump.tmpl_hdr, temp_size);
190 if (csum) {
191 dev_err(&adapter->pdev->dev,
192 "Template header checksum validation failed\n");
193 err = -EIO;
194 goto error;
195 }
196
197 tmpl_hdr = ahw->fw_dump.tmpl_hdr;
198 tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF;
199 ahw->fw_dump.enable = 1;
200error:
201 dma_free_coherent(&adapter->pdev->dev, temp_size, tmp_addr, tmp_addr_t);
202 return err;
203}
204
205int
206qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu)
207{
208 struct qlcnic_cmd_args cmd;
209 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
210
211 memset(&cmd, 0, sizeof(cmd));
212 cmd.req.cmd = QLCNIC_CDRP_CMD_SET_MTU;
213 cmd.req.arg1 = recv_ctx->context_id;
214 cmd.req.arg2 = mtu;
215 cmd.req.arg3 = 0;
216 if (recv_ctx->state == QLCNIC_HOST_CTX_STATE_ACTIVE) {
217 qlcnic_issue_cmd(adapter, &cmd);
218 if (cmd.rsp.cmd) {
219 dev_err(&adapter->pdev->dev, "Failed to set mtu\n");
220 return -EIO;
221 }
222 }
223
224 return 0;
225}
226
227static int
228qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
229{
230 void *addr;
231 struct qlcnic_hostrq_rx_ctx *prq;
232 struct qlcnic_cardrsp_rx_ctx *prsp;
233 struct qlcnic_hostrq_rds_ring *prq_rds;
234 struct qlcnic_hostrq_sds_ring *prq_sds;
235 struct qlcnic_cardrsp_rds_ring *prsp_rds;
236 struct qlcnic_cardrsp_sds_ring *prsp_sds;
237 struct qlcnic_host_rds_ring *rds_ring;
238 struct qlcnic_host_sds_ring *sds_ring;
239 struct qlcnic_cmd_args cmd;
240
241 dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
242 u64 phys_addr;
243
244 u8 i, nrds_rings, nsds_rings;
245 size_t rq_size, rsp_size;
246 u32 cap, reg, val, reg2;
247 int err;
248 u16 temp;
249
250 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
251
252 nrds_rings = adapter->max_rds_rings;
253 nsds_rings = adapter->max_sds_rings;
254
255 rq_size =
256 SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings,
257 nsds_rings);
258 rsp_size =
259 SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings,
260 nsds_rings);
261
262 addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
263 &hostrq_phys_addr, GFP_KERNEL);
264 if (addr == NULL)
265 return -ENOMEM;
266 prq = addr;
267
268 addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
269 &cardrsp_phys_addr, GFP_KERNEL);
270 if (addr == NULL) {
271 err = -ENOMEM;
272 goto out_free_rq;
273 }
274 prsp = addr;
275
276 prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
277
278 cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN
279 | QLCNIC_CAP0_VALIDOFF);
280 cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS);
281
282 if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP)
283 cap |= QLCNIC_CAP0_LRO_MSS;
284
285 temp = offsetof(struct qlcnic_hostrq_rx_ctx, msix_handler);
286 prq->valid_field_offset = cpu_to_le16(temp);
287 prq->txrx_sds_binding = nsds_rings - 1;
288
289 prq->capabilities[0] = cpu_to_le32(cap);
290 prq->host_int_crb_mode =
291 cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
292 prq->host_rds_crb_mode =
293 cpu_to_le32(QLCNIC_HOST_RDS_CRB_MODE_UNIQUE);
294
295 prq->num_rds_rings = cpu_to_le16(nrds_rings);
296 prq->num_sds_rings = cpu_to_le16(nsds_rings);
297 prq->rds_ring_offset = 0;
298
299 val = le32_to_cpu(prq->rds_ring_offset) +
300 (sizeof(struct qlcnic_hostrq_rds_ring) * nrds_rings);
301 prq->sds_ring_offset = cpu_to_le32(val);
302
303 prq_rds = (struct qlcnic_hostrq_rds_ring *)(prq->data +
304 le32_to_cpu(prq->rds_ring_offset));
305
306 for (i = 0; i < nrds_rings; i++) {
307
308 rds_ring = &recv_ctx->rds_rings[i];
309 rds_ring->producer = 0;
310
311 prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
312 prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
313 prq_rds[i].ring_kind = cpu_to_le32(i);
314 prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size);
315 }
316
317 prq_sds = (struct qlcnic_hostrq_sds_ring *)(prq->data +
318 le32_to_cpu(prq->sds_ring_offset));
319
320 for (i = 0; i < nsds_rings; i++) {
321
322 sds_ring = &recv_ctx->sds_rings[i];
323 sds_ring->consumer = 0;
324 memset(sds_ring->desc_head, 0, STATUS_DESC_RINGSIZE(sds_ring));
325
326 prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
327 prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
328 prq_sds[i].msi_index = cpu_to_le16(i);
329 }
330
331 phys_addr = hostrq_phys_addr;
332 memset(&cmd, 0, sizeof(cmd));
333 cmd.req.arg1 = (u32) (phys_addr >> 32);
334 cmd.req.arg2 = (u32) (phys_addr & 0xffffffff);
335 cmd.req.arg3 = rq_size;
336 cmd.req.cmd = QLCNIC_CDRP_CMD_CREATE_RX_CTX;
337 qlcnic_issue_cmd(adapter, &cmd);
338 err = cmd.rsp.cmd;
339 if (err) {
340 dev_err(&adapter->pdev->dev,
341 "Failed to create rx ctx in firmware%d\n", err);
342 goto out_free_rsp;
343 }
344
345
346 prsp_rds = ((struct qlcnic_cardrsp_rds_ring *)
347 &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]);
348
349 for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
350 rds_ring = &recv_ctx->rds_rings[i];
351
352 reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
353 rds_ring->crb_rcv_producer = adapter->ahw->pci_base0 + reg;
354 }
355
356 prsp_sds = ((struct qlcnic_cardrsp_sds_ring *)
357 &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]);
358
359 for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
360 sds_ring = &recv_ctx->sds_rings[i];
361
362 reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
363 reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb);
364
365 sds_ring->crb_sts_consumer = adapter->ahw->pci_base0 + reg;
366 sds_ring->crb_intr_mask = adapter->ahw->pci_base0 + reg2;
367 }
368
369 recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
370 recv_ctx->context_id = le16_to_cpu(prsp->context_id);
371 recv_ctx->virt_port = prsp->virt_port;
372
373out_free_rsp:
374 dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp,
375 cardrsp_phys_addr);
376out_free_rq:
377 dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr);
378 return err;
379}
380
381static void
382qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter)
383{
384 struct qlcnic_cmd_args cmd;
385 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
386
387 memset(&cmd, 0, sizeof(cmd));
388 cmd.req.arg1 = recv_ctx->context_id;
389 cmd.req.arg2 = QLCNIC_DESTROY_CTX_RESET;
390 cmd.req.arg3 = 0;
391 cmd.req.cmd = QLCNIC_CDRP_CMD_DESTROY_RX_CTX;
392 qlcnic_issue_cmd(adapter, &cmd);
393 if (cmd.rsp.cmd)
394 dev_err(&adapter->pdev->dev,
395 "Failed to destroy rx ctx in firmware\n");
396
397 recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED;
398}
399
400static int
401qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
402{
403 struct qlcnic_hostrq_tx_ctx *prq;
404 struct qlcnic_hostrq_cds_ring *prq_cds;
405 struct qlcnic_cardrsp_tx_ctx *prsp;
406 void *rq_addr, *rsp_addr;
407 size_t rq_size, rsp_size;
408 u32 temp;
409 struct qlcnic_cmd_args cmd;
410 int err;
411 u64 phys_addr;
412 dma_addr_t rq_phys_addr, rsp_phys_addr;
413 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
414
415 /* reset host resources */
416 tx_ring->producer = 0;
417 tx_ring->sw_consumer = 0;
418 *(tx_ring->hw_consumer) = 0;
419
420 rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
421 rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
422 &rq_phys_addr, GFP_KERNEL);
423 if (!rq_addr)
424 return -ENOMEM;
425
426 rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx);
427 rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
428 &rsp_phys_addr, GFP_KERNEL);
429 if (!rsp_addr) {
430 err = -ENOMEM;
431 goto out_free_rq;
432 }
433
434 memset(rq_addr, 0, rq_size);
435 prq = rq_addr;
436
437 memset(rsp_addr, 0, rsp_size);
438 prsp = rsp_addr;
439
440 prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
441
442 temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN |
443 QLCNIC_CAP0_LSO);
444 prq->capabilities[0] = cpu_to_le32(temp);
445
446 prq->host_int_crb_mode =
447 cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
448
449 prq->interrupt_ctl = 0;
450 prq->msi_index = 0;
451 prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr);
452
453 prq_cds = &prq->cds_ring;
454
455 prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr);
456 prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
457
458 phys_addr = rq_phys_addr;
459 memset(&cmd, 0, sizeof(cmd));
460 cmd.req.arg1 = (u32)(phys_addr >> 32);
461 cmd.req.arg2 = ((u32)phys_addr & 0xffffffff);
462 cmd.req.arg3 = rq_size;
463 cmd.req.cmd = QLCNIC_CDRP_CMD_CREATE_TX_CTX;
464 qlcnic_issue_cmd(adapter, &cmd);
465 err = cmd.rsp.cmd;
466
467 if (err == QLCNIC_RCODE_SUCCESS) {
468 temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
469 tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp;
470
471 adapter->tx_ring->ctx_id = le16_to_cpu(prsp->context_id);
472 } else {
473 dev_err(&adapter->pdev->dev,
474 "Failed to create tx ctx in firmware%d\n", err);
475 err = -EIO;
476 }
477
478 dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr,
479 rsp_phys_addr);
480
481out_free_rq:
482 dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr);
483
484 return err;
485}
486
487static void
488qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter)
489{
490 struct qlcnic_cmd_args cmd;
491
492 memset(&cmd, 0, sizeof(cmd));
493 cmd.req.arg1 = adapter->tx_ring->ctx_id;
494 cmd.req.arg2 = QLCNIC_DESTROY_CTX_RESET;
495 cmd.req.arg3 = 0;
496 cmd.req.cmd = QLCNIC_CDRP_CMD_DESTROY_TX_CTX;
497 qlcnic_issue_cmd(adapter, &cmd);
498 if (cmd.rsp.cmd)
499 dev_err(&adapter->pdev->dev,
500 "Failed to destroy tx ctx in firmware\n");
501}
502
503int
504qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config)
505{
506 struct qlcnic_cmd_args cmd;
507
508 memset(&cmd, 0, sizeof(cmd));
509 cmd.req.arg1 = config;
510 cmd.req.cmd = QLCNIC_CDRP_CMD_CONFIG_PORT;
511 qlcnic_issue_cmd(adapter, &cmd);
512
513 return cmd.rsp.cmd;
514}
515
516int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
517{
518 void *addr;
519 int err;
520 int ring;
521 struct qlcnic_recv_context *recv_ctx;
522 struct qlcnic_host_rds_ring *rds_ring;
523 struct qlcnic_host_sds_ring *sds_ring;
524 struct qlcnic_host_tx_ring *tx_ring;
525
526 struct pci_dev *pdev = adapter->pdev;
527
528 recv_ctx = adapter->recv_ctx;
529 tx_ring = adapter->tx_ring;
530
531 tx_ring->hw_consumer = (__le32 *) dma_alloc_coherent(&pdev->dev,
532 sizeof(u32), &tx_ring->hw_cons_phys_addr, GFP_KERNEL);
533 if (tx_ring->hw_consumer == NULL) {
534 dev_err(&pdev->dev, "failed to allocate tx consumer\n");
535 return -ENOMEM;
536 }
537
538 /* cmd desc ring */
539 addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring),
540 &tx_ring->phys_addr, GFP_KERNEL);
541
542 if (addr == NULL) {
543 dev_err(&pdev->dev, "failed to allocate tx desc ring\n");
544 err = -ENOMEM;
545 goto err_out_free;
546 }
547
548 tx_ring->desc_head = addr;
549
550 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
551 rds_ring = &recv_ctx->rds_rings[ring];
552 addr = dma_alloc_coherent(&adapter->pdev->dev,
553 RCV_DESC_RINGSIZE(rds_ring),
554 &rds_ring->phys_addr, GFP_KERNEL);
555 if (addr == NULL) {
556 dev_err(&pdev->dev,
557 "failed to allocate rds ring [%d]\n", ring);
558 err = -ENOMEM;
559 goto err_out_free;
560 }
561 rds_ring->desc_head = addr;
562
563 }
564
565 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
566 sds_ring = &recv_ctx->sds_rings[ring];
567
568 addr = dma_alloc_coherent(&adapter->pdev->dev,
569 STATUS_DESC_RINGSIZE(sds_ring),
570 &sds_ring->phys_addr, GFP_KERNEL);
571 if (addr == NULL) {
572 dev_err(&pdev->dev,
573 "failed to allocate sds ring [%d]\n", ring);
574 err = -ENOMEM;
575 goto err_out_free;
576 }
577 sds_ring->desc_head = addr;
578 }
579
580 return 0;
581
582err_out_free:
583 qlcnic_free_hw_resources(adapter);
584 return err;
585}
586
587
588int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter)
589{
590 int err;
591
592 if (adapter->flags & QLCNIC_NEED_FLR) {
593 pci_reset_function(adapter->pdev);
594 adapter->flags &= ~QLCNIC_NEED_FLR;
595 }
596
597 err = qlcnic_fw_cmd_create_rx_ctx(adapter);
598 if (err)
599 return err;
600
601 err = qlcnic_fw_cmd_create_tx_ctx(adapter);
602 if (err) {
603 qlcnic_fw_cmd_destroy_rx_ctx(adapter);
604 return err;
605 }
606
607 set_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
608 return 0;
609}
610
611void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
612{
613 if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) {
614 qlcnic_fw_cmd_destroy_rx_ctx(adapter);
615 qlcnic_fw_cmd_destroy_tx_ctx(adapter);
616
617 /* Allow dma queues to drain after context reset */
618 mdelay(20);
619 }
620}
621
622void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
623{
624 struct qlcnic_recv_context *recv_ctx;
625 struct qlcnic_host_rds_ring *rds_ring;
626 struct qlcnic_host_sds_ring *sds_ring;
627 struct qlcnic_host_tx_ring *tx_ring;
628 int ring;
629
630 recv_ctx = adapter->recv_ctx;
631
632 tx_ring = adapter->tx_ring;
633 if (tx_ring->hw_consumer != NULL) {
634 dma_free_coherent(&adapter->pdev->dev,
635 sizeof(u32),
636 tx_ring->hw_consumer,
637 tx_ring->hw_cons_phys_addr);
638 tx_ring->hw_consumer = NULL;
639 }
640
641 if (tx_ring->desc_head != NULL) {
642 dma_free_coherent(&adapter->pdev->dev,
643 TX_DESC_RINGSIZE(tx_ring),
644 tx_ring->desc_head, tx_ring->phys_addr);
645 tx_ring->desc_head = NULL;
646 }
647
648 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
649 rds_ring = &recv_ctx->rds_rings[ring];
650
651 if (rds_ring->desc_head != NULL) {
652 dma_free_coherent(&adapter->pdev->dev,
653 RCV_DESC_RINGSIZE(rds_ring),
654 rds_ring->desc_head,
655 rds_ring->phys_addr);
656 rds_ring->desc_head = NULL;
657 }
658 }
659
660 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
661 sds_ring = &recv_ctx->sds_rings[ring];
662
663 if (sds_ring->desc_head != NULL) {
664 dma_free_coherent(&adapter->pdev->dev,
665 STATUS_DESC_RINGSIZE(sds_ring),
666 sds_ring->desc_head,
667 sds_ring->phys_addr);
668 sds_ring->desc_head = NULL;
669 }
670 }
671}
672
673
674/* Get MAC address of a NIC partition */
675int qlcnic_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
676{
677 int err;
678 struct qlcnic_cmd_args cmd;
679
680 memset(&cmd, 0, sizeof(cmd));
681 cmd.req.arg1 = adapter->ahw->pci_func | BIT_8;
682 cmd.req.cmd = QLCNIC_CDRP_CMD_MAC_ADDRESS;
683 cmd.rsp.arg1 = cmd.rsp.arg2 = 1;
684 qlcnic_issue_cmd(adapter, &cmd);
685 err = cmd.rsp.cmd;
686
687 if (err == QLCNIC_RCODE_SUCCESS)
688 qlcnic_fetch_mac(cmd.rsp.arg1, cmd.rsp.arg2, 0, mac);
689 else {
690 dev_err(&adapter->pdev->dev,
691 "Failed to get mac address%d\n", err);
692 err = -EIO;
693 }
694
695 return err;
696}
697
698/* Get info of a NIC partition */
699int qlcnic_get_nic_info(struct qlcnic_adapter *adapter,
700 struct qlcnic_info *npar_info, u8 func_id)
701{
702 int err;
703 dma_addr_t nic_dma_t;
704 struct qlcnic_info_le *nic_info;
705 void *nic_info_addr;
706 struct qlcnic_cmd_args cmd;
707 size_t nic_size = sizeof(struct qlcnic_info_le);
708
709 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
710 &nic_dma_t, GFP_KERNEL);
711 if (!nic_info_addr)
712 return -ENOMEM;
713 memset(nic_info_addr, 0, nic_size);
714
715 nic_info = nic_info_addr;
716 memset(&cmd, 0, sizeof(cmd));
717 cmd.req.cmd = QLCNIC_CDRP_CMD_GET_NIC_INFO;
718 cmd.req.arg1 = MSD(nic_dma_t);
719 cmd.req.arg2 = LSD(nic_dma_t);
720 cmd.req.arg3 = (func_id << 16 | nic_size);
721 qlcnic_issue_cmd(adapter, &cmd);
722 err = cmd.rsp.cmd;
723
724 if (err == QLCNIC_RCODE_SUCCESS) {
725 npar_info->pci_func = le16_to_cpu(nic_info->pci_func);
726 npar_info->op_mode = le16_to_cpu(nic_info->op_mode);
727 npar_info->phys_port = le16_to_cpu(nic_info->phys_port);
728 npar_info->switch_mode = le16_to_cpu(nic_info->switch_mode);
729 npar_info->max_tx_ques = le16_to_cpu(nic_info->max_tx_ques);
730 npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques);
731 npar_info->min_tx_bw = le16_to_cpu(nic_info->min_tx_bw);
732 npar_info->max_tx_bw = le16_to_cpu(nic_info->max_tx_bw);
733 npar_info->capabilities = le32_to_cpu(nic_info->capabilities);
734 npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu);
735
736 dev_info(&adapter->pdev->dev,
737 "phy port: %d switch_mode: %d,\n"
738 "\tmax_tx_q: %d max_rx_q: %d min_tx_bw: 0x%x,\n"
739 "\tmax_tx_bw: 0x%x max_mtu:0x%x, capabilities: 0x%x\n",
740 npar_info->phys_port, npar_info->switch_mode,
741 npar_info->max_tx_ques, npar_info->max_rx_ques,
742 npar_info->min_tx_bw, npar_info->max_tx_bw,
743 npar_info->max_mtu, npar_info->capabilities);
744 } else {
745 dev_err(&adapter->pdev->dev,
746 "Failed to get nic info%d\n", err);
747 err = -EIO;
748 }
749
750 dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr,
751 nic_dma_t);
752 return err;
753}
754
755/* Configure a NIC partition */
756int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic)
757{
758 int err = -EIO;
759 dma_addr_t nic_dma_t;
760 void *nic_info_addr;
761 struct qlcnic_cmd_args cmd;
762 struct qlcnic_info_le *nic_info;
763 size_t nic_size = sizeof(struct qlcnic_info_le);
764
765 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
766 return err;
767
768 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
769 &nic_dma_t, GFP_KERNEL);
770 if (!nic_info_addr)
771 return -ENOMEM;
772
773 memset(nic_info_addr, 0, nic_size);
774 nic_info = nic_info_addr;
775
776 nic_info->pci_func = cpu_to_le16(nic->pci_func);
777 nic_info->op_mode = cpu_to_le16(nic->op_mode);
778 nic_info->phys_port = cpu_to_le16(nic->phys_port);
779 nic_info->switch_mode = cpu_to_le16(nic->switch_mode);
780 nic_info->capabilities = cpu_to_le32(nic->capabilities);
781 nic_info->max_mac_filters = nic->max_mac_filters;
782 nic_info->max_tx_ques = cpu_to_le16(nic->max_tx_ques);
783 nic_info->max_rx_ques = cpu_to_le16(nic->max_rx_ques);
784 nic_info->min_tx_bw = cpu_to_le16(nic->min_tx_bw);
785 nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw);
786
787 memset(&cmd, 0, sizeof(cmd));
788 cmd.req.cmd = QLCNIC_CDRP_CMD_SET_NIC_INFO;
789 cmd.req.arg1 = MSD(nic_dma_t);
790 cmd.req.arg2 = LSD(nic_dma_t);
791 cmd.req.arg3 = ((nic->pci_func << 16) | nic_size);
792 qlcnic_issue_cmd(adapter, &cmd);
793 err = cmd.rsp.cmd;
794
795 if (err != QLCNIC_RCODE_SUCCESS) {
796 dev_err(&adapter->pdev->dev,
797 "Failed to set nic info%d\n", err);
798 err = -EIO;
799 }
800
801 dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr,
802 nic_dma_t);
803 return err;
804}
805
806/* Get PCI Info of a partition */
807int qlcnic_get_pci_info(struct qlcnic_adapter *adapter,
808 struct qlcnic_pci_info *pci_info)
809{
810 int err = 0, i;
811 struct qlcnic_cmd_args cmd;
812 dma_addr_t pci_info_dma_t;
813 struct qlcnic_pci_info_le *npar;
814 void *pci_info_addr;
815 size_t npar_size = sizeof(struct qlcnic_pci_info_le);
816 size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC;
817
818 pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size,
819 &pci_info_dma_t, GFP_KERNEL);
820 if (!pci_info_addr)
821 return -ENOMEM;
822 memset(pci_info_addr, 0, pci_size);
823
824 npar = pci_info_addr;
825 memset(&cmd, 0, sizeof(cmd));
826 cmd.req.cmd = QLCNIC_CDRP_CMD_GET_PCI_INFO;
827 cmd.req.arg1 = MSD(pci_info_dma_t);
828 cmd.req.arg2 = LSD(pci_info_dma_t);
829 cmd.req.arg3 = pci_size;
830 qlcnic_issue_cmd(adapter, &cmd);
831 err = cmd.rsp.cmd;
832
833 adapter->ahw->act_pci_func = 0;
834 if (err == QLCNIC_RCODE_SUCCESS) {
835 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++, npar++, pci_info++) {
836 pci_info->id = le16_to_cpu(npar->id);
837 pci_info->active = le16_to_cpu(npar->active);
838 pci_info->type = le16_to_cpu(npar->type);
839 if (pci_info->type == QLCNIC_TYPE_NIC)
840 adapter->ahw->act_pci_func++;
841 pci_info->default_port =
842 le16_to_cpu(npar->default_port);
843 pci_info->tx_min_bw =
844 le16_to_cpu(npar->tx_min_bw);
845 pci_info->tx_max_bw =
846 le16_to_cpu(npar->tx_max_bw);
847 memcpy(pci_info->mac, npar->mac, ETH_ALEN);
848 }
849 } else {
850 dev_err(&adapter->pdev->dev,
851 "Failed to get PCI Info%d\n", err);
852 err = -EIO;
853 }
854
855 dma_free_coherent(&adapter->pdev->dev, pci_size, pci_info_addr,
856 pci_info_dma_t);
857 return err;
858}
859
860/* Configure eSwitch for port mirroring */
861int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
862 u8 enable_mirroring, u8 pci_func)
863{
864 int err = -EIO;
865 u32 arg1;
866 struct qlcnic_cmd_args cmd;
867
868 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC ||
869 !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE))
870 return err;
871
872 arg1 = id | (enable_mirroring ? BIT_4 : 0);
873 arg1 |= pci_func << 8;
874
875 memset(&cmd, 0, sizeof(cmd));
876 cmd.req.cmd = QLCNIC_CDRP_CMD_SET_PORTMIRRORING;
877 cmd.req.arg1 = arg1;
878 qlcnic_issue_cmd(adapter, &cmd);
879 err = cmd.rsp.cmd;
880
881 if (err != QLCNIC_RCODE_SUCCESS) {
882 dev_err(&adapter->pdev->dev,
883 "Failed to configure port mirroring%d on eswitch:%d\n",
884 pci_func, id);
885 } else {
886 dev_info(&adapter->pdev->dev,
887 "Configured eSwitch %d for port mirroring:%d\n",
888 id, pci_func);
889 }
890
891 return err;
892}
893
894int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
895 const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) {
896
897 size_t stats_size = sizeof(struct qlcnic_esw_stats_le);
898 struct qlcnic_esw_stats_le *stats;
899 dma_addr_t stats_dma_t;
900 void *stats_addr;
901 u32 arg1;
902 struct qlcnic_cmd_args cmd;
903 int err;
904
905 if (esw_stats == NULL)
906 return -ENOMEM;
907
908 if ((adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) &&
909 (func != adapter->ahw->pci_func)) {
910 dev_err(&adapter->pdev->dev,
911 "Not privilege to query stats for func=%d", func);
912 return -EIO;
913 }
914
915 stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
916 &stats_dma_t, GFP_KERNEL);
917 if (!stats_addr) {
918 dev_err(&adapter->pdev->dev, "Unable to allocate memory\n");
919 return -ENOMEM;
920 }
921 memset(stats_addr, 0, stats_size);
922
923 arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12;
924 arg1 |= rx_tx << 15 | stats_size << 16;
925
926 memset(&cmd, 0, sizeof(cmd));
927 cmd.req.cmd = QLCNIC_CDRP_CMD_GET_ESWITCH_STATS;
928 cmd.req.arg1 = arg1;
929 cmd.req.arg2 = MSD(stats_dma_t);
930 cmd.req.arg3 = LSD(stats_dma_t);
931 qlcnic_issue_cmd(adapter, &cmd);
932 err = cmd.rsp.cmd;
933
934 if (!err) {
935 stats = stats_addr;
936 esw_stats->context_id = le16_to_cpu(stats->context_id);
937 esw_stats->version = le16_to_cpu(stats->version);
938 esw_stats->size = le16_to_cpu(stats->size);
939 esw_stats->multicast_frames =
940 le64_to_cpu(stats->multicast_frames);
941 esw_stats->broadcast_frames =
942 le64_to_cpu(stats->broadcast_frames);
943 esw_stats->unicast_frames = le64_to_cpu(stats->unicast_frames);
944 esw_stats->dropped_frames = le64_to_cpu(stats->dropped_frames);
945 esw_stats->local_frames = le64_to_cpu(stats->local_frames);
946 esw_stats->errors = le64_to_cpu(stats->errors);
947 esw_stats->numbytes = le64_to_cpu(stats->numbytes);
948 }
949
950 dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
951 stats_dma_t);
952 return err;
953}
954
955/* This routine will retrieve the MAC statistics from firmware */
956int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
957 struct qlcnic_mac_statistics *mac_stats)
958{
959 struct qlcnic_mac_statistics_le *stats;
960 struct qlcnic_cmd_args cmd;
961 size_t stats_size = sizeof(struct qlcnic_mac_statistics_le);
962 dma_addr_t stats_dma_t;
963 void *stats_addr;
964 int err;
965
966 stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
967 &stats_dma_t, GFP_KERNEL);
968 if (!stats_addr) {
969 dev_err(&adapter->pdev->dev,
970 "%s: Unable to allocate memory.\n", __func__);
971 return -ENOMEM;
972 }
973 memset(stats_addr, 0, stats_size);
974 memset(&cmd, 0, sizeof(cmd));
975 cmd.req.cmd = QLCNIC_CDRP_CMD_GET_MAC_STATS;
976 cmd.req.arg1 = stats_size << 16;
977 cmd.req.arg2 = MSD(stats_dma_t);
978 cmd.req.arg3 = LSD(stats_dma_t);
979
980 qlcnic_issue_cmd(adapter, &cmd);
981 err = cmd.rsp.cmd;
982
983 if (!err) {
984 stats = stats_addr;
985 mac_stats->mac_tx_frames = le64_to_cpu(stats->mac_tx_frames);
986 mac_stats->mac_tx_bytes = le64_to_cpu(stats->mac_tx_bytes);
987 mac_stats->mac_tx_mcast_pkts =
988 le64_to_cpu(stats->mac_tx_mcast_pkts);
989 mac_stats->mac_tx_bcast_pkts =
990 le64_to_cpu(stats->mac_tx_bcast_pkts);
991 mac_stats->mac_rx_frames = le64_to_cpu(stats->mac_rx_frames);
992 mac_stats->mac_rx_bytes = le64_to_cpu(stats->mac_rx_bytes);
993 mac_stats->mac_rx_mcast_pkts =
994 le64_to_cpu(stats->mac_rx_mcast_pkts);
995 mac_stats->mac_rx_length_error =
996 le64_to_cpu(stats->mac_rx_length_error);
997 mac_stats->mac_rx_length_small =
998 le64_to_cpu(stats->mac_rx_length_small);
999 mac_stats->mac_rx_length_large =
1000 le64_to_cpu(stats->mac_rx_length_large);
1001 mac_stats->mac_rx_jabber = le64_to_cpu(stats->mac_rx_jabber);
1002 mac_stats->mac_rx_dropped = le64_to_cpu(stats->mac_rx_dropped);
1003 mac_stats->mac_rx_crc_error = le64_to_cpu(stats->mac_rx_crc_error);
1004 }
1005
1006 dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
1007 stats_dma_t);
1008 return err;
1009}
1010
1011int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch,
1012 const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) {
1013
1014 struct __qlcnic_esw_statistics port_stats;
1015 u8 i;
1016 int ret = -EIO;
1017
1018 if (esw_stats == NULL)
1019 return -ENOMEM;
1020 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
1021 return -EIO;
1022 if (adapter->npars == NULL)
1023 return -EIO;
1024
1025 memset(esw_stats, 0, sizeof(u64));
1026 esw_stats->unicast_frames = QLCNIC_STATS_NOT_AVAIL;
1027 esw_stats->multicast_frames = QLCNIC_STATS_NOT_AVAIL;
1028 esw_stats->broadcast_frames = QLCNIC_STATS_NOT_AVAIL;
1029 esw_stats->dropped_frames = QLCNIC_STATS_NOT_AVAIL;
1030 esw_stats->errors = QLCNIC_STATS_NOT_AVAIL;
1031 esw_stats->local_frames = QLCNIC_STATS_NOT_AVAIL;
1032 esw_stats->numbytes = QLCNIC_STATS_NOT_AVAIL;
1033 esw_stats->context_id = eswitch;
1034
1035 for (i = 0; i < adapter->ahw->act_pci_func; i++) {
1036 if (adapter->npars[i].phy_port != eswitch)
1037 continue;
1038
1039 memset(&port_stats, 0, sizeof(struct __qlcnic_esw_statistics));
1040 if (qlcnic_get_port_stats(adapter, adapter->npars[i].pci_func,
1041 rx_tx, &port_stats))
1042 continue;
1043
1044 esw_stats->size = port_stats.size;
1045 esw_stats->version = port_stats.version;
1046 QLCNIC_ADD_ESW_STATS(esw_stats->unicast_frames,
1047 port_stats.unicast_frames);
1048 QLCNIC_ADD_ESW_STATS(esw_stats->multicast_frames,
1049 port_stats.multicast_frames);
1050 QLCNIC_ADD_ESW_STATS(esw_stats->broadcast_frames,
1051 port_stats.broadcast_frames);
1052 QLCNIC_ADD_ESW_STATS(esw_stats->dropped_frames,
1053 port_stats.dropped_frames);
1054 QLCNIC_ADD_ESW_STATS(esw_stats->errors,
1055 port_stats.errors);
1056 QLCNIC_ADD_ESW_STATS(esw_stats->local_frames,
1057 port_stats.local_frames);
1058 QLCNIC_ADD_ESW_STATS(esw_stats->numbytes,
1059 port_stats.numbytes);
1060 ret = 0;
1061 }
1062 return ret;
1063}
1064
1065int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw,
1066 const u8 port, const u8 rx_tx)
1067{
1068
1069 u32 arg1;
1070 struct qlcnic_cmd_args cmd;
1071
1072 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
1073 return -EIO;
1074
1075 if (func_esw == QLCNIC_STATS_PORT) {
1076 if (port >= QLCNIC_MAX_PCI_FUNC)
1077 goto err_ret;
1078 } else if (func_esw == QLCNIC_STATS_ESWITCH) {
1079 if (port >= QLCNIC_NIU_MAX_XG_PORTS)
1080 goto err_ret;
1081 } else {
1082 goto err_ret;
1083 }
1084
1085 if (rx_tx > QLCNIC_QUERY_TX_COUNTER)
1086 goto err_ret;
1087
1088 arg1 = port | QLCNIC_STATS_VERSION << 8 | func_esw << 12;
1089 arg1 |= BIT_14 | rx_tx << 15;
1090
1091 memset(&cmd, 0, sizeof(cmd));
1092 cmd.req.cmd = QLCNIC_CDRP_CMD_GET_ESWITCH_STATS;
1093 cmd.req.arg1 = arg1;
1094 qlcnic_issue_cmd(adapter, &cmd);
1095 return cmd.rsp.cmd;
1096
1097err_ret:
1098 dev_err(&adapter->pdev->dev, "Invalid argument func_esw=%d port=%d"
1099 "rx_ctx=%d\n", func_esw, port, rx_tx);
1100 return -EIO;
1101}
1102
1103static int
1104__qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
1105 u32 *arg1, u32 *arg2)
1106{
1107 int err = -EIO;
1108 struct qlcnic_cmd_args cmd;
1109 u8 pci_func;
1110 pci_func = (*arg1 >> 8);
1111
1112 cmd.req.cmd = QLCNIC_CDRP_CMD_GET_ESWITCH_PORT_CONFIG;
1113 cmd.req.arg1 = *arg1;
1114 cmd.rsp.arg1 = cmd.rsp.arg2 = 1;
1115 qlcnic_issue_cmd(adapter, &cmd);
1116 *arg1 = cmd.rsp.arg1;
1117 *arg2 = cmd.rsp.arg2;
1118 err = cmd.rsp.cmd;
1119
1120 if (err == QLCNIC_RCODE_SUCCESS) {
1121 dev_info(&adapter->pdev->dev,
1122 "eSwitch port config for pci func %d\n", pci_func);
1123 } else {
1124 dev_err(&adapter->pdev->dev,
1125 "Failed to get eswitch port config for pci func %d\n",
1126 pci_func);
1127 }
1128 return err;
1129}
1130/* Configure eSwitch port
1131op_mode = 0 for setting default port behavior
1132op_mode = 1 for setting vlan id
1133op_mode = 2 for deleting vlan id
1134op_type = 0 for vlan_id
1135op_type = 1 for port vlan_id
1136*/
1137int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
1138 struct qlcnic_esw_func_cfg *esw_cfg)
1139{
1140 int err = -EIO, index;
1141 u32 arg1, arg2 = 0;
1142 struct qlcnic_cmd_args cmd;
1143 u8 pci_func;
1144
1145 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
1146 return err;
1147 pci_func = esw_cfg->pci_func;
1148 index = qlcnic_is_valid_nic_func(adapter, pci_func);
1149 if (index < 0)
1150 return err;
1151 arg1 = (adapter->npars[index].phy_port & BIT_0);
1152 arg1 |= (pci_func << 8);
1153
1154 if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2))
1155 return err;
1156 arg1 &= ~(0x0ff << 8);
1157 arg1 |= (pci_func << 8);
1158 arg1 &= ~(BIT_2 | BIT_3);
1159 switch (esw_cfg->op_mode) {
1160 case QLCNIC_PORT_DEFAULTS:
1161 arg1 |= (BIT_4 | BIT_6 | BIT_7);
1162 arg2 |= (BIT_0 | BIT_1);
1163 if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
1164 arg2 |= (BIT_2 | BIT_3);
1165 if (!(esw_cfg->discard_tagged))
1166 arg1 &= ~BIT_4;
1167 if (!(esw_cfg->promisc_mode))
1168 arg1 &= ~BIT_6;
1169 if (!(esw_cfg->mac_override))
1170 arg1 &= ~BIT_7;
1171 if (!(esw_cfg->mac_anti_spoof))
1172 arg2 &= ~BIT_0;
1173 if (!(esw_cfg->offload_flags & BIT_0))
1174 arg2 &= ~(BIT_1 | BIT_2 | BIT_3);
1175 if (!(esw_cfg->offload_flags & BIT_1))
1176 arg2 &= ~BIT_2;
1177 if (!(esw_cfg->offload_flags & BIT_2))
1178 arg2 &= ~BIT_3;
1179 break;
1180 case QLCNIC_ADD_VLAN:
1181 arg1 |= (BIT_2 | BIT_5);
1182 arg1 |= (esw_cfg->vlan_id << 16);
1183 break;
1184 case QLCNIC_DEL_VLAN:
1185 arg1 |= (BIT_3 | BIT_5);
1186 arg1 &= ~(0x0ffff << 16);
1187 break;
1188 default:
1189 return err;
1190 }
1191
1192 memset(&cmd, 0, sizeof(cmd));
1193 cmd.req.cmd = QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH;
1194 cmd.req.arg1 = arg1;
1195 cmd.req.arg2 = arg2;
1196 qlcnic_issue_cmd(adapter, &cmd);
1197
1198 err = cmd.rsp.cmd;
1199 if (err != QLCNIC_RCODE_SUCCESS) {
1200 dev_err(&adapter->pdev->dev,
1201 "Failed to configure eswitch pci func %d\n", pci_func);
1202 } else {
1203 dev_info(&adapter->pdev->dev,
1204 "Configured eSwitch for pci func %d\n", pci_func);
1205 }
1206
1207 return err;
1208}
1209
1210int
1211qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
1212 struct qlcnic_esw_func_cfg *esw_cfg)
1213{
1214 u32 arg1, arg2;
1215 int index;
1216 u8 phy_port;
1217
1218 if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) {
1219 index = qlcnic_is_valid_nic_func(adapter, esw_cfg->pci_func);
1220 if (index < 0)
1221 return -EIO;
1222 phy_port = adapter->npars[index].phy_port;
1223 } else {
1224 phy_port = adapter->ahw->physical_port;
1225 }
1226 arg1 = phy_port;
1227 arg1 |= (esw_cfg->pci_func << 8);
1228 if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2))
1229 return -EIO;
1230
1231 esw_cfg->discard_tagged = !!(arg1 & BIT_4);
1232 esw_cfg->host_vlan_tag = !!(arg1 & BIT_5);
1233 esw_cfg->promisc_mode = !!(arg1 & BIT_6);
1234 esw_cfg->mac_override = !!(arg1 & BIT_7);
1235 esw_cfg->vlan_id = LSW(arg1 >> 16);
1236 esw_cfg->mac_anti_spoof = (arg2 & 0x1);
1237 esw_cfg->offload_flags = ((arg2 >> 1) & 0x7);
1238
1239 return 0;
1240}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
deleted file mode 100644
index 74b98110c5b..00000000000
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ /dev/null
@@ -1,1395 +0,0 @@
1/*
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2010 QLogic Corporation
4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
6 */
7
8#include <linux/types.h>
9#include <linux/delay.h>
10#include <linux/pci.h>
11#include <linux/io.h>
12#include <linux/netdevice.h>
13#include <linux/ethtool.h>
14
15#include "qlcnic.h"
16
17struct qlcnic_stats {
18 char stat_string[ETH_GSTRING_LEN];
19 int sizeof_stat;
20 int stat_offset;
21};
22
23#define QLC_SIZEOF(m) FIELD_SIZEOF(struct qlcnic_adapter, m)
24#define QLC_OFF(m) offsetof(struct qlcnic_adapter, m)
25
26static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
27 {"xmit_called",
28 QLC_SIZEOF(stats.xmitcalled), QLC_OFF(stats.xmitcalled)},
29 {"xmit_finished",
30 QLC_SIZEOF(stats.xmitfinished), QLC_OFF(stats.xmitfinished)},
31 {"rx_dropped",
32 QLC_SIZEOF(stats.rxdropped), QLC_OFF(stats.rxdropped)},
33 {"tx_dropped",
34 QLC_SIZEOF(stats.txdropped), QLC_OFF(stats.txdropped)},
35 {"csummed",
36 QLC_SIZEOF(stats.csummed), QLC_OFF(stats.csummed)},
37 {"rx_pkts",
38 QLC_SIZEOF(stats.rx_pkts), QLC_OFF(stats.rx_pkts)},
39 {"lro_pkts",
40 QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)},
41 {"rx_bytes",
42 QLC_SIZEOF(stats.rxbytes), QLC_OFF(stats.rxbytes)},
43 {"tx_bytes",
44 QLC_SIZEOF(stats.txbytes), QLC_OFF(stats.txbytes)},
45 {"lrobytes",
46 QLC_SIZEOF(stats.lrobytes), QLC_OFF(stats.lrobytes)},
47 {"lso_frames",
48 QLC_SIZEOF(stats.lso_frames), QLC_OFF(stats.lso_frames)},
49 {"xmit_on",
50 QLC_SIZEOF(stats.xmit_on), QLC_OFF(stats.xmit_on)},
51 {"xmit_off",
52 QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)},
53 {"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure),
54 QLC_OFF(stats.skb_alloc_failure)},
55 {"null rxbuf",
56 QLC_SIZEOF(stats.null_rxbuf), QLC_OFF(stats.null_rxbuf)},
57 {"rx dma map error", QLC_SIZEOF(stats.rx_dma_map_error),
58 QLC_OFF(stats.rx_dma_map_error)},
59 {"tx dma map error", QLC_SIZEOF(stats.tx_dma_map_error),
60 QLC_OFF(stats.tx_dma_map_error)},
61
62};
63
64static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = {
65 "rx unicast frames",
66 "rx multicast frames",
67 "rx broadcast frames",
68 "rx dropped frames",
69 "rx errors",
70 "rx local frames",
71 "rx numbytes",
72 "tx unicast frames",
73 "tx multicast frames",
74 "tx broadcast frames",
75 "tx dropped frames",
76 "tx errors",
77 "tx local frames",
78 "tx numbytes",
79};
80
81static const char qlcnic_mac_stats_strings [][ETH_GSTRING_LEN] = {
82 "mac_tx_frames",
83 "mac_tx_bytes",
84 "mac_tx_mcast_pkts",
85 "mac_tx_bcast_pkts",
86 "mac_tx_pause_cnt",
87 "mac_tx_ctrl_pkt",
88 "mac_tx_lt_64b_pkts",
89 "mac_tx_lt_127b_pkts",
90 "mac_tx_lt_255b_pkts",
91 "mac_tx_lt_511b_pkts",
92 "mac_tx_lt_1023b_pkts",
93 "mac_tx_lt_1518b_pkts",
94 "mac_tx_gt_1518b_pkts",
95 "mac_rx_frames",
96 "mac_rx_bytes",
97 "mac_rx_mcast_pkts",
98 "mac_rx_bcast_pkts",
99 "mac_rx_pause_cnt",
100 "mac_rx_ctrl_pkt",
101 "mac_rx_lt_64b_pkts",
102 "mac_rx_lt_127b_pkts",
103 "mac_rx_lt_255b_pkts",
104 "mac_rx_lt_511b_pkts",
105 "mac_rx_lt_1023b_pkts",
106 "mac_rx_lt_1518b_pkts",
107 "mac_rx_gt_1518b_pkts",
108 "mac_rx_length_error",
109 "mac_rx_length_small",
110 "mac_rx_length_large",
111 "mac_rx_jabber",
112 "mac_rx_dropped",
113 "mac_rx_crc_error",
114 "mac_align_error",
115};
116
117#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats)
118#define QLCNIC_MAC_STATS_LEN ARRAY_SIZE(qlcnic_mac_stats_strings)
119#define QLCNIC_DEVICE_STATS_LEN ARRAY_SIZE(qlcnic_device_gstrings_stats)
120#define QLCNIC_TOTAL_STATS_LEN QLCNIC_STATS_LEN + QLCNIC_MAC_STATS_LEN
121
122static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
123 "Register_Test_on_offline",
124 "Link_Test_on_offline",
125 "Interrupt_Test_offline",
126 "Internal_Loopback_offline",
127 "External_Loopback_offline"
128};
129
130#define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test)
131
132#define QLCNIC_RING_REGS_COUNT 20
133#define QLCNIC_RING_REGS_LEN (QLCNIC_RING_REGS_COUNT * sizeof(u32))
134#define QLCNIC_MAX_EEPROM_LEN 1024
135
136static const u32 diag_registers[] = {
137 CRB_CMDPEG_STATE,
138 CRB_RCVPEG_STATE,
139 CRB_XG_STATE_P3P,
140 CRB_FW_CAPABILITIES_1,
141 ISR_INT_STATE_REG,
142 QLCNIC_CRB_DRV_ACTIVE,
143 QLCNIC_CRB_DEV_STATE,
144 QLCNIC_CRB_DRV_STATE,
145 QLCNIC_CRB_DRV_SCRATCH,
146 QLCNIC_CRB_DEV_PARTITION_INFO,
147 QLCNIC_CRB_DRV_IDC_VER,
148 QLCNIC_PEG_ALIVE_COUNTER,
149 QLCNIC_PEG_HALT_STATUS1,
150 QLCNIC_PEG_HALT_STATUS2,
151 QLCNIC_CRB_PEG_NET_0+0x3c,
152 QLCNIC_CRB_PEG_NET_1+0x3c,
153 QLCNIC_CRB_PEG_NET_2+0x3c,
154 QLCNIC_CRB_PEG_NET_4+0x3c,
155 -1
156};
157
158#define QLCNIC_MGMT_API_VERSION 2
159#define QLCNIC_DEV_INFO_SIZE 1
160#define QLCNIC_ETHTOOL_REGS_VER 2
161static int qlcnic_get_regs_len(struct net_device *dev)
162{
163 return sizeof(diag_registers) + QLCNIC_RING_REGS_LEN +
164 QLCNIC_DEV_INFO_SIZE + 1;
165}
166
167static int qlcnic_get_eeprom_len(struct net_device *dev)
168{
169 return QLCNIC_FLASH_TOTAL_SIZE;
170}
171
172static void
173qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
174{
175 struct qlcnic_adapter *adapter = netdev_priv(dev);
176 u32 fw_major, fw_minor, fw_build;
177
178 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
179 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
180 fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
181 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
182 "%d.%d.%d", fw_major, fw_minor, fw_build);
183
184 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
185 sizeof(drvinfo->bus_info));
186 strlcpy(drvinfo->driver, qlcnic_driver_name, sizeof(drvinfo->driver));
187 strlcpy(drvinfo->version, QLCNIC_LINUX_VERSIONID,
188 sizeof(drvinfo->version));
189}
190
191static int
192qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
193{
194 struct qlcnic_adapter *adapter = netdev_priv(dev);
195 int check_sfp_module = 0;
196
197 /* read which mode */
198 if (adapter->ahw->port_type == QLCNIC_GBE) {
199 ecmd->supported = (SUPPORTED_10baseT_Half |
200 SUPPORTED_10baseT_Full |
201 SUPPORTED_100baseT_Half |
202 SUPPORTED_100baseT_Full |
203 SUPPORTED_1000baseT_Half |
204 SUPPORTED_1000baseT_Full);
205
206 ecmd->advertising = (ADVERTISED_100baseT_Half |
207 ADVERTISED_100baseT_Full |
208 ADVERTISED_1000baseT_Half |
209 ADVERTISED_1000baseT_Full);
210
211 ethtool_cmd_speed_set(ecmd, adapter->ahw->link_speed);
212 ecmd->duplex = adapter->ahw->link_duplex;
213 ecmd->autoneg = adapter->ahw->link_autoneg;
214
215 } else if (adapter->ahw->port_type == QLCNIC_XGBE) {
216 u32 val;
217
218 val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR);
219 if (val == QLCNIC_PORT_MODE_802_3_AP) {
220 ecmd->supported = SUPPORTED_1000baseT_Full;
221 ecmd->advertising = ADVERTISED_1000baseT_Full;
222 } else {
223 ecmd->supported = SUPPORTED_10000baseT_Full;
224 ecmd->advertising = ADVERTISED_10000baseT_Full;
225 }
226
227 if (netif_running(dev) && adapter->ahw->has_link_events) {
228 ethtool_cmd_speed_set(ecmd, adapter->ahw->link_speed);
229 ecmd->autoneg = adapter->ahw->link_autoneg;
230 ecmd->duplex = adapter->ahw->link_duplex;
231 goto skip;
232 }
233
234 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
235 ecmd->duplex = DUPLEX_UNKNOWN;
236 ecmd->autoneg = AUTONEG_DISABLE;
237 } else
238 return -EIO;
239
240skip:
241 ecmd->phy_address = adapter->ahw->physical_port;
242 ecmd->transceiver = XCVR_EXTERNAL;
243
244 switch (adapter->ahw->board_type) {
245 case QLCNIC_BRDTYPE_P3P_REF_QG:
246 case QLCNIC_BRDTYPE_P3P_4_GB:
247 case QLCNIC_BRDTYPE_P3P_4_GB_MM:
248
249 ecmd->supported |= SUPPORTED_Autoneg;
250 ecmd->advertising |= ADVERTISED_Autoneg;
251 case QLCNIC_BRDTYPE_P3P_10G_CX4:
252 case QLCNIC_BRDTYPE_P3P_10G_CX4_LP:
253 case QLCNIC_BRDTYPE_P3P_10000_BASE_T:
254 ecmd->supported |= SUPPORTED_TP;
255 ecmd->advertising |= ADVERTISED_TP;
256 ecmd->port = PORT_TP;
257 ecmd->autoneg = adapter->ahw->link_autoneg;
258 break;
259 case QLCNIC_BRDTYPE_P3P_IMEZ:
260 case QLCNIC_BRDTYPE_P3P_XG_LOM:
261 case QLCNIC_BRDTYPE_P3P_HMEZ:
262 ecmd->supported |= SUPPORTED_MII;
263 ecmd->advertising |= ADVERTISED_MII;
264 ecmd->port = PORT_MII;
265 ecmd->autoneg = AUTONEG_DISABLE;
266 break;
267 case QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS:
268 case QLCNIC_BRDTYPE_P3P_10G_SFP_CT:
269 case QLCNIC_BRDTYPE_P3P_10G_SFP_QT:
270 ecmd->advertising |= ADVERTISED_TP;
271 ecmd->supported |= SUPPORTED_TP;
272 check_sfp_module = netif_running(dev) &&
273 adapter->ahw->has_link_events;
274 case QLCNIC_BRDTYPE_P3P_10G_XFP:
275 ecmd->supported |= SUPPORTED_FIBRE;
276 ecmd->advertising |= ADVERTISED_FIBRE;
277 ecmd->port = PORT_FIBRE;
278 ecmd->autoneg = AUTONEG_DISABLE;
279 break;
280 case QLCNIC_BRDTYPE_P3P_10G_TP:
281 if (adapter->ahw->port_type == QLCNIC_XGBE) {
282 ecmd->autoneg = AUTONEG_DISABLE;
283 ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP);
284 ecmd->advertising |=
285 (ADVERTISED_FIBRE | ADVERTISED_TP);
286 ecmd->port = PORT_FIBRE;
287 check_sfp_module = netif_running(dev) &&
288 adapter->ahw->has_link_events;
289 } else {
290 ecmd->autoneg = AUTONEG_ENABLE;
291 ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
292 ecmd->advertising |=
293 (ADVERTISED_TP | ADVERTISED_Autoneg);
294 ecmd->port = PORT_TP;
295 }
296 break;
297 default:
298 dev_err(&adapter->pdev->dev, "Unsupported board model %d\n",
299 adapter->ahw->board_type);
300 return -EIO;
301 }
302
303 if (check_sfp_module) {
304 switch (adapter->ahw->module_type) {
305 case LINKEVENT_MODULE_OPTICAL_UNKNOWN:
306 case LINKEVENT_MODULE_OPTICAL_SRLR:
307 case LINKEVENT_MODULE_OPTICAL_LRM:
308 case LINKEVENT_MODULE_OPTICAL_SFP_1G:
309 ecmd->port = PORT_FIBRE;
310 break;
311 case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE:
312 case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN:
313 case LINKEVENT_MODULE_TWINAX:
314 ecmd->port = PORT_TP;
315 break;
316 default:
317 ecmd->port = PORT_OTHER;
318 }
319 }
320
321 return 0;
322}
323
324static int
325qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
326{
327 u32 config = 0;
328 u32 ret = 0;
329 struct qlcnic_adapter *adapter = netdev_priv(dev);
330
331 if (adapter->ahw->port_type != QLCNIC_GBE)
332 return -EOPNOTSUPP;
333
334 /* read which mode */
335 if (ecmd->duplex)
336 config |= 0x1;
337
338 if (ecmd->autoneg)
339 config |= 0x2;
340
341 switch (ethtool_cmd_speed(ecmd)) {
342 case SPEED_10:
343 config |= (0 << 8);
344 break;
345 case SPEED_100:
346 config |= (1 << 8);
347 break;
348 case SPEED_1000:
349 config |= (10 << 8);
350 break;
351 default:
352 return -EIO;
353 }
354
355 ret = qlcnic_fw_cmd_set_port(adapter, config);
356
357 if (ret == QLCNIC_RCODE_NOT_SUPPORTED)
358 return -EOPNOTSUPP;
359 else if (ret)
360 return -EIO;
361
362 adapter->ahw->link_speed = ethtool_cmd_speed(ecmd);
363 adapter->ahw->link_duplex = ecmd->duplex;
364 adapter->ahw->link_autoneg = ecmd->autoneg;
365
366 if (!netif_running(dev))
367 return 0;
368
369 dev->netdev_ops->ndo_stop(dev);
370 return dev->netdev_ops->ndo_open(dev);
371}
372
373static void
374qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
375{
376 struct qlcnic_adapter *adapter = netdev_priv(dev);
377 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
378 struct qlcnic_host_sds_ring *sds_ring;
379 u32 *regs_buff = p;
380 int ring, i = 0, j = 0;
381
382 memset(p, 0, qlcnic_get_regs_len(dev));
383 regs->version = (QLCNIC_ETHTOOL_REGS_VER << 24) |
384 (adapter->ahw->revision_id << 16) | (adapter->pdev)->device;
385
386 regs_buff[0] = (0xcafe0000 | (QLCNIC_DEV_INFO_SIZE & 0xffff));
387 regs_buff[1] = QLCNIC_MGMT_API_VERSION;
388
389 for (i = QLCNIC_DEV_INFO_SIZE + 1; diag_registers[j] != -1; j++, i++)
390 regs_buff[i] = QLCRD32(adapter, diag_registers[j]);
391
392 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
393 return;
394
395 regs_buff[i++] = 0xFFEFCDAB; /* Marker btw regs and ring count*/
396
397 regs_buff[i++] = 1; /* No. of tx ring */
398 regs_buff[i++] = le32_to_cpu(*(adapter->tx_ring->hw_consumer));
399 regs_buff[i++] = readl(adapter->tx_ring->crb_cmd_producer);
400
401 regs_buff[i++] = 2; /* No. of rx ring */
402 regs_buff[i++] = readl(recv_ctx->rds_rings[0].crb_rcv_producer);
403 regs_buff[i++] = readl(recv_ctx->rds_rings[1].crb_rcv_producer);
404
405 regs_buff[i++] = adapter->max_sds_rings;
406
407 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
408 sds_ring = &(recv_ctx->sds_rings[ring]);
409 regs_buff[i++] = readl(sds_ring->crb_sts_consumer);
410 }
411}
412
413static u32 qlcnic_test_link(struct net_device *dev)
414{
415 struct qlcnic_adapter *adapter = netdev_priv(dev);
416 u32 val;
417
418 val = QLCRD32(adapter, CRB_XG_STATE_P3P);
419 val = XG_LINK_STATE_P3P(adapter->ahw->pci_func, val);
420 return (val == XG_LINK_UP_P3P) ? 0 : 1;
421}
422
423static int
424qlcnic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
425 u8 *bytes)
426{
427 struct qlcnic_adapter *adapter = netdev_priv(dev);
428 int offset;
429 int ret;
430
431 if (eeprom->len == 0)
432 return -EINVAL;
433
434 eeprom->magic = (adapter->pdev)->vendor |
435 ((adapter->pdev)->device << 16);
436 offset = eeprom->offset;
437
438 ret = qlcnic_rom_fast_read_words(adapter, offset, bytes,
439 eeprom->len);
440 if (ret < 0)
441 return ret;
442
443 return 0;
444}
445
446static void
447qlcnic_get_ringparam(struct net_device *dev,
448 struct ethtool_ringparam *ring)
449{
450 struct qlcnic_adapter *adapter = netdev_priv(dev);
451
452 ring->rx_pending = adapter->num_rxd;
453 ring->rx_jumbo_pending = adapter->num_jumbo_rxd;
454 ring->tx_pending = adapter->num_txd;
455
456 ring->rx_max_pending = adapter->max_rxd;
457 ring->rx_jumbo_max_pending = adapter->max_jumbo_rxd;
458 ring->tx_max_pending = MAX_CMD_DESCRIPTORS;
459}
460
461static u32
462qlcnic_validate_ringparam(u32 val, u32 min, u32 max, char *r_name)
463{
464 u32 num_desc;
465 num_desc = max(val, min);
466 num_desc = min(num_desc, max);
467 num_desc = roundup_pow_of_two(num_desc);
468
469 if (val != num_desc) {
470 printk(KERN_INFO "%s: setting %s ring size %d instead of %d\n",
471 qlcnic_driver_name, r_name, num_desc, val);
472 }
473
474 return num_desc;
475}
476
477static int
478qlcnic_set_ringparam(struct net_device *dev,
479 struct ethtool_ringparam *ring)
480{
481 struct qlcnic_adapter *adapter = netdev_priv(dev);
482 u16 num_rxd, num_jumbo_rxd, num_txd;
483
484 if (ring->rx_mini_pending)
485 return -EOPNOTSUPP;
486
487 num_rxd = qlcnic_validate_ringparam(ring->rx_pending,
488 MIN_RCV_DESCRIPTORS, adapter->max_rxd, "rx");
489
490 num_jumbo_rxd = qlcnic_validate_ringparam(ring->rx_jumbo_pending,
491 MIN_JUMBO_DESCRIPTORS, adapter->max_jumbo_rxd,
492 "rx jumbo");
493
494 num_txd = qlcnic_validate_ringparam(ring->tx_pending,
495 MIN_CMD_DESCRIPTORS, MAX_CMD_DESCRIPTORS, "tx");
496
497 if (num_rxd == adapter->num_rxd && num_txd == adapter->num_txd &&
498 num_jumbo_rxd == adapter->num_jumbo_rxd)
499 return 0;
500
501 adapter->num_rxd = num_rxd;
502 adapter->num_jumbo_rxd = num_jumbo_rxd;
503 adapter->num_txd = num_txd;
504
505 return qlcnic_reset_context(adapter);
506}
507
508static void qlcnic_get_channels(struct net_device *dev,
509 struct ethtool_channels *channel)
510{
511 int min;
512 struct qlcnic_adapter *adapter = netdev_priv(dev);
513
514 min = min_t(int, adapter->ahw->max_rx_ques, num_online_cpus());
515 channel->max_rx = rounddown_pow_of_two(min);
516 channel->max_tx = adapter->ahw->max_tx_ques;
517
518 channel->rx_count = adapter->max_sds_rings;
519 channel->tx_count = adapter->ahw->max_tx_ques;
520}
521
522static int qlcnic_set_channels(struct net_device *dev,
523 struct ethtool_channels *channel)
524{
525 struct qlcnic_adapter *adapter = netdev_priv(dev);
526 int err;
527
528 if (channel->other_count || channel->combined_count ||
529 channel->tx_count != channel->max_tx)
530 return -EINVAL;
531
532 err = qlcnic_validate_max_rss(dev, channel->max_rx, channel->rx_count);
533 if (err)
534 return err;
535
536 err = qlcnic_set_max_rss(adapter, channel->rx_count);
537 netdev_info(dev, "allocated 0x%x sds rings\n",
538 adapter->max_sds_rings);
539 return err;
540}
541
542static void
543qlcnic_get_pauseparam(struct net_device *netdev,
544 struct ethtool_pauseparam *pause)
545{
546 struct qlcnic_adapter *adapter = netdev_priv(netdev);
547 int port = adapter->ahw->physical_port;
548 __u32 val;
549
550 if (adapter->ahw->port_type == QLCNIC_GBE) {
551 if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
552 return;
553 /* get flow control settings */
554 val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port));
555 pause->rx_pause = qlcnic_gb_get_rx_flowctl(val);
556 val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL);
557 switch (port) {
558 case 0:
559 pause->tx_pause = !(qlcnic_gb_get_gb0_mask(val));
560 break;
561 case 1:
562 pause->tx_pause = !(qlcnic_gb_get_gb1_mask(val));
563 break;
564 case 2:
565 pause->tx_pause = !(qlcnic_gb_get_gb2_mask(val));
566 break;
567 case 3:
568 default:
569 pause->tx_pause = !(qlcnic_gb_get_gb3_mask(val));
570 break;
571 }
572 } else if (adapter->ahw->port_type == QLCNIC_XGBE) {
573 if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
574 return;
575 pause->rx_pause = 1;
576 val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL);
577 if (port == 0)
578 pause->tx_pause = !(qlcnic_xg_get_xg0_mask(val));
579 else
580 pause->tx_pause = !(qlcnic_xg_get_xg1_mask(val));
581 } else {
582 dev_err(&netdev->dev, "Unknown board type: %x\n",
583 adapter->ahw->port_type);
584 }
585}
586
587static int
588qlcnic_set_pauseparam(struct net_device *netdev,
589 struct ethtool_pauseparam *pause)
590{
591 struct qlcnic_adapter *adapter = netdev_priv(netdev);
592 int port = adapter->ahw->physical_port;
593 __u32 val;
594
595 /* read mode */
596 if (adapter->ahw->port_type == QLCNIC_GBE) {
597 if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
598 return -EIO;
599 /* set flow control */
600 val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port));
601
602 if (pause->rx_pause)
603 qlcnic_gb_rx_flowctl(val);
604 else
605 qlcnic_gb_unset_rx_flowctl(val);
606
607 QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port),
608 val);
609 /* set autoneg */
610 val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL);
611 switch (port) {
612 case 0:
613 if (pause->tx_pause)
614 qlcnic_gb_unset_gb0_mask(val);
615 else
616 qlcnic_gb_set_gb0_mask(val);
617 break;
618 case 1:
619 if (pause->tx_pause)
620 qlcnic_gb_unset_gb1_mask(val);
621 else
622 qlcnic_gb_set_gb1_mask(val);
623 break;
624 case 2:
625 if (pause->tx_pause)
626 qlcnic_gb_unset_gb2_mask(val);
627 else
628 qlcnic_gb_set_gb2_mask(val);
629 break;
630 case 3:
631 default:
632 if (pause->tx_pause)
633 qlcnic_gb_unset_gb3_mask(val);
634 else
635 qlcnic_gb_set_gb3_mask(val);
636 break;
637 }
638 QLCWR32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, val);
639 } else if (adapter->ahw->port_type == QLCNIC_XGBE) {
640 if (!pause->rx_pause || pause->autoneg)
641 return -EOPNOTSUPP;
642
643 if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
644 return -EIO;
645
646 val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL);
647 if (port == 0) {
648 if (pause->tx_pause)
649 qlcnic_xg_unset_xg0_mask(val);
650 else
651 qlcnic_xg_set_xg0_mask(val);
652 } else {
653 if (pause->tx_pause)
654 qlcnic_xg_unset_xg1_mask(val);
655 else
656 qlcnic_xg_set_xg1_mask(val);
657 }
658 QLCWR32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, val);
659 } else {
660 dev_err(&netdev->dev, "Unknown board type: %x\n",
661 adapter->ahw->port_type);
662 }
663 return 0;
664}
665
666static int qlcnic_reg_test(struct net_device *dev)
667{
668 struct qlcnic_adapter *adapter = netdev_priv(dev);
669 u32 data_read;
670
671 data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0));
672 if ((data_read & 0xffff) != adapter->pdev->vendor)
673 return 1;
674
675 return 0;
676}
677
678static int qlcnic_get_sset_count(struct net_device *dev, int sset)
679{
680 struct qlcnic_adapter *adapter = netdev_priv(dev);
681 switch (sset) {
682 case ETH_SS_TEST:
683 return QLCNIC_TEST_LEN;
684 case ETH_SS_STATS:
685 if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
686 return QLCNIC_TOTAL_STATS_LEN + QLCNIC_DEVICE_STATS_LEN;
687 return QLCNIC_TOTAL_STATS_LEN;
688 default:
689 return -EOPNOTSUPP;
690 }
691}
692
693static int qlcnic_irq_test(struct net_device *netdev)
694{
695 struct qlcnic_adapter *adapter = netdev_priv(netdev);
696 int max_sds_rings = adapter->max_sds_rings;
697 int ret;
698 struct qlcnic_cmd_args cmd;
699
700 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
701 return -EIO;
702
703 ret = qlcnic_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST);
704 if (ret)
705 goto clear_it;
706
707 adapter->ahw->diag_cnt = 0;
708 memset(&cmd, 0, sizeof(cmd));
709 cmd.req.cmd = QLCNIC_CDRP_CMD_INTRPT_TEST;
710 cmd.req.arg1 = adapter->ahw->pci_func;
711 qlcnic_issue_cmd(adapter, &cmd);
712 ret = cmd.rsp.cmd;
713
714 if (ret)
715 goto done;
716
717 msleep(10);
718
719 ret = !adapter->ahw->diag_cnt;
720
721done:
722 qlcnic_diag_free_res(netdev, max_sds_rings);
723
724clear_it:
725 adapter->max_sds_rings = max_sds_rings;
726 clear_bit(__QLCNIC_RESETTING, &adapter->state);
727 return ret;
728}
729
730#define QLCNIC_ILB_PKT_SIZE 64
731#define QLCNIC_NUM_ILB_PKT 16
732#define QLCNIC_ILB_MAX_RCV_LOOP 10
733
734static void qlcnic_create_loopback_buff(unsigned char *data, u8 mac[])
735{
736 unsigned char random_data[] = {0xa8, 0x06, 0x45, 0x00};
737
738 memset(data, 0x4e, QLCNIC_ILB_PKT_SIZE);
739
740 memcpy(data, mac, ETH_ALEN);
741 memcpy(data + ETH_ALEN, mac, ETH_ALEN);
742
743 memcpy(data + 2 * ETH_ALEN, random_data, sizeof(random_data));
744}
745
746int qlcnic_check_loopback_buff(unsigned char *data, u8 mac[])
747{
748 unsigned char buff[QLCNIC_ILB_PKT_SIZE];
749 qlcnic_create_loopback_buff(buff, mac);
750 return memcmp(data, buff, QLCNIC_ILB_PKT_SIZE);
751}
752
753static int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
754{
755 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
756 struct qlcnic_host_sds_ring *sds_ring = &recv_ctx->sds_rings[0];
757 struct sk_buff *skb;
758 int i, loop, cnt = 0;
759
760 for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) {
761 skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE);
762 qlcnic_create_loopback_buff(skb->data, adapter->mac_addr);
763 skb_put(skb, QLCNIC_ILB_PKT_SIZE);
764
765 adapter->ahw->diag_cnt = 0;
766 qlcnic_xmit_frame(skb, adapter->netdev);
767
768 loop = 0;
769 do {
770 msleep(1);
771 qlcnic_process_rcv_ring_diag(sds_ring);
772 if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP)
773 break;
774 } while (!adapter->ahw->diag_cnt);
775
776 dev_kfree_skb_any(skb);
777
778 if (!adapter->ahw->diag_cnt)
779 QLCDB(adapter, DRV,
780 "LB Test: packet #%d was not received\n", i + 1);
781 else
782 cnt++;
783 }
784 if (cnt != i) {
785 dev_warn(&adapter->pdev->dev, "LB Test failed\n");
786 if (mode != QLCNIC_ILB_MODE) {
787 dev_warn(&adapter->pdev->dev,
788 "WARNING: Please make sure external"
789 "loopback connector is plugged in\n");
790 }
791 return -1;
792 }
793 return 0;
794}
795
796static int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
797{
798 struct qlcnic_adapter *adapter = netdev_priv(netdev);
799 int max_sds_rings = adapter->max_sds_rings;
800 struct qlcnic_host_sds_ring *sds_ring;
801 int loop = 0;
802 int ret;
803
804 if (!(adapter->ahw->capabilities &
805 QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK)) {
806 netdev_info(netdev, "Firmware is not loopback test capable\n");
807 return -EOPNOTSUPP;
808 }
809
810 QLCDB(adapter, DRV, "%s loopback test in progress\n",
811 mode == QLCNIC_ILB_MODE ? "internal" : "external");
812 if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
813 netdev_warn(netdev, "Loopback test not supported for non "
814 "privilege function\n");
815 return 0;
816 }
817
818 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
819 return -EBUSY;
820
821 ret = qlcnic_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST);
822 if (ret)
823 goto clear_it;
824
825 sds_ring = &adapter->recv_ctx->sds_rings[0];
826
827 ret = qlcnic_set_lb_mode(adapter, mode);
828 if (ret)
829 goto free_res;
830
831 adapter->ahw->diag_cnt = 0;
832 do {
833 msleep(500);
834 qlcnic_process_rcv_ring_diag(sds_ring);
835 if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) {
836 netdev_info(netdev, "firmware didnt respond to loopback"
837 " configure request\n");
838 ret = -QLCNIC_FW_NOT_RESPOND;
839 goto free_res;
840 } else if (adapter->ahw->diag_cnt) {
841 ret = adapter->ahw->diag_cnt;
842 goto free_res;
843 }
844 } while (!QLCNIC_IS_LB_CONFIGURED(adapter->ahw->loopback_state));
845
846 ret = qlcnic_do_lb_test(adapter, mode);
847
848 qlcnic_clear_lb_mode(adapter);
849
850 free_res:
851 qlcnic_diag_free_res(netdev, max_sds_rings);
852
853 clear_it:
854 adapter->max_sds_rings = max_sds_rings;
855 clear_bit(__QLCNIC_RESETTING, &adapter->state);
856 return ret;
857}
858
859static void
860qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
861 u64 *data)
862{
863 memset(data, 0, sizeof(u64) * QLCNIC_TEST_LEN);
864
865 data[0] = qlcnic_reg_test(dev);
866 if (data[0])
867 eth_test->flags |= ETH_TEST_FL_FAILED;
868
869 data[1] = (u64) qlcnic_test_link(dev);
870 if (data[1])
871 eth_test->flags |= ETH_TEST_FL_FAILED;
872
873 if (eth_test->flags & ETH_TEST_FL_OFFLINE) {
874 data[2] = qlcnic_irq_test(dev);
875 if (data[2])
876 eth_test->flags |= ETH_TEST_FL_FAILED;
877
878 data[3] = qlcnic_loopback_test(dev, QLCNIC_ILB_MODE);
879 if (data[3])
880 eth_test->flags |= ETH_TEST_FL_FAILED;
881 if (eth_test->flags & ETH_TEST_FL_EXTERNAL_LB) {
882 data[4] = qlcnic_loopback_test(dev, QLCNIC_ELB_MODE);
883 if (data[4])
884 eth_test->flags |= ETH_TEST_FL_FAILED;
885 eth_test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
886 }
887 }
888}
889
890static void
891qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data)
892{
893 struct qlcnic_adapter *adapter = netdev_priv(dev);
894 int index, i, j;
895
896 switch (stringset) {
897 case ETH_SS_TEST:
898 memcpy(data, *qlcnic_gstrings_test,
899 QLCNIC_TEST_LEN * ETH_GSTRING_LEN);
900 break;
901 case ETH_SS_STATS:
902 for (index = 0; index < QLCNIC_STATS_LEN; index++) {
903 memcpy(data + index * ETH_GSTRING_LEN,
904 qlcnic_gstrings_stats[index].stat_string,
905 ETH_GSTRING_LEN);
906 }
907 for (j = 0; j < QLCNIC_MAC_STATS_LEN; index++, j++) {
908 memcpy(data + index * ETH_GSTRING_LEN,
909 qlcnic_mac_stats_strings[j],
910 ETH_GSTRING_LEN);
911 }
912 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
913 return;
914 for (i = 0; i < QLCNIC_DEVICE_STATS_LEN; index++, i++) {
915 memcpy(data + index * ETH_GSTRING_LEN,
916 qlcnic_device_gstrings_stats[i],
917 ETH_GSTRING_LEN);
918 }
919 }
920}
921
922static void
923qlcnic_fill_stats(int *index, u64 *data, void *stats, int type)
924{
925 int ind = *index;
926
927 if (type == QLCNIC_MAC_STATS) {
928 struct qlcnic_mac_statistics *mac_stats =
929 (struct qlcnic_mac_statistics *)stats;
930 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_frames);
931 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_bytes);
932 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_mcast_pkts);
933 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_bcast_pkts);
934 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_pause_cnt);
935 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_ctrl_pkt);
936 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_64b_pkts);
937 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_127b_pkts);
938 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_255b_pkts);
939 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_511b_pkts);
940 data[ind++] =
941 QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_1023b_pkts);
942 data[ind++] =
943 QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_1518b_pkts);
944 data[ind++] =
945 QLCNIC_FILL_STATS(mac_stats->mac_tx_gt_1518b_pkts);
946 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_frames);
947 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_bytes);
948 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_mcast_pkts);
949 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_bcast_pkts);
950 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_pause_cnt);
951 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_ctrl_pkt);
952 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_64b_pkts);
953 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_127b_pkts);
954 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_255b_pkts);
955 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_511b_pkts);
956 data[ind++] =
957 QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_1023b_pkts);
958 data[ind++] =
959 QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_1518b_pkts);
960 data[ind++] =
961 QLCNIC_FILL_STATS(mac_stats->mac_rx_gt_1518b_pkts);
962 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_error);
963 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_small);
964 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_large);
965 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_jabber);
966 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_dropped);
967 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_crc_error);
968 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_align_error);
969 } else if (type == QLCNIC_ESW_STATS) {
970 struct __qlcnic_esw_statistics *esw_stats =
971 (struct __qlcnic_esw_statistics *)stats;
972 data[ind++] = QLCNIC_FILL_STATS(esw_stats->unicast_frames);
973 data[ind++] = QLCNIC_FILL_STATS(esw_stats->multicast_frames);
974 data[ind++] = QLCNIC_FILL_STATS(esw_stats->broadcast_frames);
975 data[ind++] = QLCNIC_FILL_STATS(esw_stats->dropped_frames);
976 data[ind++] = QLCNIC_FILL_STATS(esw_stats->errors);
977 data[ind++] = QLCNIC_FILL_STATS(esw_stats->local_frames);
978 data[ind++] = QLCNIC_FILL_STATS(esw_stats->numbytes);
979 }
980
981 *index = ind;
982}
983
984static void
985qlcnic_get_ethtool_stats(struct net_device *dev,
986 struct ethtool_stats *stats, u64 * data)
987{
988 struct qlcnic_adapter *adapter = netdev_priv(dev);
989 struct qlcnic_esw_statistics port_stats;
990 struct qlcnic_mac_statistics mac_stats;
991 int index, ret;
992
993 for (index = 0; index < QLCNIC_STATS_LEN; index++) {
994 char *p =
995 (char *)adapter +
996 qlcnic_gstrings_stats[index].stat_offset;
997 data[index] =
998 (qlcnic_gstrings_stats[index].sizeof_stat ==
999 sizeof(u64)) ? *(u64 *)p:(*(u32 *)p);
1000 }
1001
1002 /* Retrieve MAC statistics from firmware */
1003 memset(&mac_stats, 0, sizeof(struct qlcnic_mac_statistics));
1004 qlcnic_get_mac_stats(adapter, &mac_stats);
1005 qlcnic_fill_stats(&index, data, &mac_stats, QLCNIC_MAC_STATS);
1006
1007 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
1008 return;
1009
1010 memset(&port_stats, 0, sizeof(struct qlcnic_esw_statistics));
1011 ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func,
1012 QLCNIC_QUERY_RX_COUNTER, &port_stats.rx);
1013 if (ret)
1014 return;
1015
1016 qlcnic_fill_stats(&index, data, &port_stats.rx, QLCNIC_ESW_STATS);
1017
1018 ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func,
1019 QLCNIC_QUERY_TX_COUNTER, &port_stats.tx);
1020 if (ret)
1021 return;
1022
1023 qlcnic_fill_stats(&index, data, &port_stats.tx, QLCNIC_ESW_STATS);
1024}
1025
1026static int qlcnic_set_led(struct net_device *dev,
1027 enum ethtool_phys_id_state state)
1028{
1029 struct qlcnic_adapter *adapter = netdev_priv(dev);
1030 int max_sds_rings = adapter->max_sds_rings;
1031 int err = -EIO, active = 1;
1032
1033 if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
1034 netdev_warn(dev, "LED test not supported for non "
1035 "privilege function\n");
1036 return -EOPNOTSUPP;
1037 }
1038
1039 switch (state) {
1040 case ETHTOOL_ID_ACTIVE:
1041 if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state))
1042 return -EBUSY;
1043
1044 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
1045 break;
1046
1047 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
1048 if (qlcnic_diag_alloc_res(dev, QLCNIC_LED_TEST))
1049 break;
1050 set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state);
1051 }
1052
1053 if (adapter->nic_ops->config_led(adapter, 1, 0xf) == 0) {
1054 err = 0;
1055 break;
1056 }
1057
1058 dev_err(&adapter->pdev->dev,
1059 "Failed to set LED blink state.\n");
1060 break;
1061
1062 case ETHTOOL_ID_INACTIVE:
1063 active = 0;
1064
1065 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
1066 break;
1067
1068 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
1069 if (qlcnic_diag_alloc_res(dev, QLCNIC_LED_TEST))
1070 break;
1071 set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state);
1072 }
1073
1074 if (adapter->nic_ops->config_led(adapter, 0, 0xf))
1075 dev_err(&adapter->pdev->dev,
1076 "Failed to reset LED blink state.\n");
1077
1078 break;
1079
1080 default:
1081 return -EINVAL;
1082 }
1083
1084 if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
1085 qlcnic_diag_free_res(dev, max_sds_rings);
1086
1087 if (!active || err)
1088 clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
1089
1090 return err;
1091}
1092
1093static void
1094qlcnic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1095{
1096 struct qlcnic_adapter *adapter = netdev_priv(dev);
1097 u32 wol_cfg;
1098
1099 wol->supported = 0;
1100 wol->wolopts = 0;
1101
1102 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
1103 if (wol_cfg & (1UL << adapter->portnum))
1104 wol->supported |= WAKE_MAGIC;
1105
1106 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
1107 if (wol_cfg & (1UL << adapter->portnum))
1108 wol->wolopts |= WAKE_MAGIC;
1109}
1110
1111static int
1112qlcnic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1113{
1114 struct qlcnic_adapter *adapter = netdev_priv(dev);
1115 u32 wol_cfg;
1116
1117 if (wol->wolopts & ~WAKE_MAGIC)
1118 return -EOPNOTSUPP;
1119
1120 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
1121 if (!(wol_cfg & (1 << adapter->portnum)))
1122 return -EOPNOTSUPP;
1123
1124 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
1125 if (wol->wolopts & WAKE_MAGIC)
1126 wol_cfg |= 1UL << adapter->portnum;
1127 else
1128 wol_cfg &= ~(1UL << adapter->portnum);
1129
1130 QLCWR32(adapter, QLCNIC_WOL_CONFIG, wol_cfg);
1131
1132 return 0;
1133}
1134
1135/*
1136 * Set the coalescing parameters. Currently only normal is supported.
1137 * If rx_coalesce_usecs == 0 or rx_max_coalesced_frames == 0 then set the
1138 * firmware coalescing to default.
1139 */
1140static int qlcnic_set_intr_coalesce(struct net_device *netdev,
1141 struct ethtool_coalesce *ethcoal)
1142{
1143 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1144
1145 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
1146 return -EINVAL;
1147
1148 /*
1149 * Return Error if unsupported values or
1150 * unsupported parameters are set.
1151 */
1152 if (ethcoal->rx_coalesce_usecs > 0xffff ||
1153 ethcoal->rx_max_coalesced_frames > 0xffff ||
1154 ethcoal->tx_coalesce_usecs ||
1155 ethcoal->tx_max_coalesced_frames ||
1156 ethcoal->rx_coalesce_usecs_irq ||
1157 ethcoal->rx_max_coalesced_frames_irq ||
1158 ethcoal->tx_coalesce_usecs_irq ||
1159 ethcoal->tx_max_coalesced_frames_irq ||
1160 ethcoal->stats_block_coalesce_usecs ||
1161 ethcoal->use_adaptive_rx_coalesce ||
1162 ethcoal->use_adaptive_tx_coalesce ||
1163 ethcoal->pkt_rate_low ||
1164 ethcoal->rx_coalesce_usecs_low ||
1165 ethcoal->rx_max_coalesced_frames_low ||
1166 ethcoal->tx_coalesce_usecs_low ||
1167 ethcoal->tx_max_coalesced_frames_low ||
1168 ethcoal->pkt_rate_high ||
1169 ethcoal->rx_coalesce_usecs_high ||
1170 ethcoal->rx_max_coalesced_frames_high ||
1171 ethcoal->tx_coalesce_usecs_high ||
1172 ethcoal->tx_max_coalesced_frames_high)
1173 return -EINVAL;
1174
1175 if (!ethcoal->rx_coalesce_usecs ||
1176 !ethcoal->rx_max_coalesced_frames) {
1177 adapter->ahw->coal.flag = QLCNIC_INTR_DEFAULT;
1178 adapter->ahw->coal.rx_time_us =
1179 QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
1180 adapter->ahw->coal.rx_packets =
1181 QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
1182 } else {
1183 adapter->ahw->coal.flag = 0;
1184 adapter->ahw->coal.rx_time_us = ethcoal->rx_coalesce_usecs;
1185 adapter->ahw->coal.rx_packets =
1186 ethcoal->rx_max_coalesced_frames;
1187 }
1188
1189 qlcnic_config_intr_coalesce(adapter);
1190
1191 return 0;
1192}
1193
1194static int qlcnic_get_intr_coalesce(struct net_device *netdev,
1195 struct ethtool_coalesce *ethcoal)
1196{
1197 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1198
1199 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1200 return -EINVAL;
1201
1202 ethcoal->rx_coalesce_usecs = adapter->ahw->coal.rx_time_us;
1203 ethcoal->rx_max_coalesced_frames = adapter->ahw->coal.rx_packets;
1204
1205 return 0;
1206}
1207
1208static u32 qlcnic_get_msglevel(struct net_device *netdev)
1209{
1210 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1211
1212 return adapter->ahw->msg_enable;
1213}
1214
1215static void qlcnic_set_msglevel(struct net_device *netdev, u32 msglvl)
1216{
1217 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1218
1219 adapter->ahw->msg_enable = msglvl;
1220}
1221
1222static int
1223qlcnic_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
1224{
1225 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1226 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
1227
1228 if (!fw_dump->tmpl_hdr) {
1229 netdev_err(adapter->netdev, "FW Dump not supported\n");
1230 return -ENOTSUPP;
1231 }
1232
1233 if (fw_dump->clr)
1234 dump->len = fw_dump->tmpl_hdr->size + fw_dump->size;
1235 else
1236 dump->len = 0;
1237
1238 if (!fw_dump->enable)
1239 dump->flag = ETH_FW_DUMP_DISABLE;
1240 else
1241 dump->flag = fw_dump->tmpl_hdr->drv_cap_mask;
1242
1243 dump->version = adapter->fw_version;
1244 return 0;
1245}
1246
1247static int
1248qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
1249 void *buffer)
1250{
1251 int i, copy_sz;
1252 u32 *hdr_ptr;
1253 __le32 *data;
1254 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1255 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
1256
1257 if (!fw_dump->tmpl_hdr) {
1258 netdev_err(netdev, "FW Dump not supported\n");
1259 return -ENOTSUPP;
1260 }
1261
1262 if (!fw_dump->clr) {
1263 netdev_info(netdev, "Dump not available\n");
1264 return -EINVAL;
1265 }
1266 /* Copy template header first */
1267 copy_sz = fw_dump->tmpl_hdr->size;
1268 hdr_ptr = (u32 *) fw_dump->tmpl_hdr;
1269 data = buffer;
1270 for (i = 0; i < copy_sz/sizeof(u32); i++)
1271 *data++ = cpu_to_le32(*hdr_ptr++);
1272
1273 /* Copy captured dump data */
1274 memcpy(buffer + copy_sz, fw_dump->data, fw_dump->size);
1275 dump->len = copy_sz + fw_dump->size;
1276 dump->flag = fw_dump->tmpl_hdr->drv_cap_mask;
1277
1278 /* Free dump area once data has been captured */
1279 vfree(fw_dump->data);
1280 fw_dump->data = NULL;
1281 fw_dump->clr = 0;
1282 netdev_info(netdev, "extracted the FW dump Successfully\n");
1283 return 0;
1284}
1285
1286static int
1287qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val)
1288{
1289 int i;
1290 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1291 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
1292 u32 state;
1293
1294 switch (val->flag) {
1295 case QLCNIC_FORCE_FW_DUMP_KEY:
1296 if (!fw_dump->tmpl_hdr) {
1297 netdev_err(netdev, "FW dump not supported\n");
1298 return -ENOTSUPP;
1299 }
1300 if (!fw_dump->enable) {
1301 netdev_info(netdev, "FW dump not enabled\n");
1302 return 0;
1303 }
1304 if (fw_dump->clr) {
1305 netdev_info(netdev,
1306 "Previous dump not cleared, not forcing dump\n");
1307 return 0;
1308 }
1309 netdev_info(netdev, "Forcing a FW dump\n");
1310 qlcnic_dev_request_reset(adapter);
1311 break;
1312 case QLCNIC_DISABLE_FW_DUMP:
1313 if (fw_dump->enable && fw_dump->tmpl_hdr) {
1314 netdev_info(netdev, "Disabling FW dump\n");
1315 fw_dump->enable = 0;
1316 }
1317 return 0;
1318 case QLCNIC_ENABLE_FW_DUMP:
1319 if (!fw_dump->tmpl_hdr) {
1320 netdev_err(netdev, "FW dump not supported\n");
1321 return -ENOTSUPP;
1322 }
1323 if (!fw_dump->enable) {
1324 netdev_info(netdev, "Enabling FW dump\n");
1325 fw_dump->enable = 1;
1326 }
1327 return 0;
1328 case QLCNIC_FORCE_FW_RESET:
1329 netdev_info(netdev, "Forcing a FW reset\n");
1330 qlcnic_dev_request_reset(adapter);
1331 adapter->flags &= ~QLCNIC_FW_RESET_OWNER;
1332 return 0;
1333 case QLCNIC_SET_QUIESCENT:
1334 case QLCNIC_RESET_QUIESCENT:
1335 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
1336 if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
1337 netdev_info(netdev, "Device in FAILED state\n");
1338 return 0;
1339 default:
1340 if (!fw_dump->tmpl_hdr) {
1341 netdev_err(netdev, "FW dump not supported\n");
1342 return -ENOTSUPP;
1343 }
1344 for (i = 0; i < ARRAY_SIZE(FW_DUMP_LEVELS); i++) {
1345 if (val->flag == FW_DUMP_LEVELS[i]) {
1346 fw_dump->tmpl_hdr->drv_cap_mask =
1347 val->flag;
1348 netdev_info(netdev, "Driver mask changed to: 0x%x\n",
1349 fw_dump->tmpl_hdr->drv_cap_mask);
1350 return 0;
1351 }
1352 }
1353 netdev_info(netdev, "Invalid dump level: 0x%x\n", val->flag);
1354 return -EINVAL;
1355 }
1356 return 0;
1357}
1358
1359const struct ethtool_ops qlcnic_ethtool_ops = {
1360 .get_settings = qlcnic_get_settings,
1361 .set_settings = qlcnic_set_settings,
1362 .get_drvinfo = qlcnic_get_drvinfo,
1363 .get_regs_len = qlcnic_get_regs_len,
1364 .get_regs = qlcnic_get_regs,
1365 .get_link = ethtool_op_get_link,
1366 .get_eeprom_len = qlcnic_get_eeprom_len,
1367 .get_eeprom = qlcnic_get_eeprom,
1368 .get_ringparam = qlcnic_get_ringparam,
1369 .set_ringparam = qlcnic_set_ringparam,
1370 .get_channels = qlcnic_get_channels,
1371 .set_channels = qlcnic_set_channels,
1372 .get_pauseparam = qlcnic_get_pauseparam,
1373 .set_pauseparam = qlcnic_set_pauseparam,
1374 .get_wol = qlcnic_get_wol,
1375 .set_wol = qlcnic_set_wol,
1376 .self_test = qlcnic_diag_test,
1377 .get_strings = qlcnic_get_strings,
1378 .get_ethtool_stats = qlcnic_get_ethtool_stats,
1379 .get_sset_count = qlcnic_get_sset_count,
1380 .get_coalesce = qlcnic_get_intr_coalesce,
1381 .set_coalesce = qlcnic_set_intr_coalesce,
1382 .set_phys_id = qlcnic_set_led,
1383 .set_msglevel = qlcnic_set_msglevel,
1384 .get_msglevel = qlcnic_get_msglevel,
1385 .get_dump_flag = qlcnic_get_dump_flag,
1386 .get_dump_data = qlcnic_get_dump_data,
1387 .set_dump = qlcnic_set_dump,
1388};
1389
1390const struct ethtool_ops qlcnic_ethtool_failed_ops = {
1391 .get_settings = qlcnic_get_settings,
1392 .get_drvinfo = qlcnic_get_drvinfo,
1393 .set_msglevel = qlcnic_set_msglevel,
1394 .get_msglevel = qlcnic_get_msglevel,
1395};
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
deleted file mode 100644
index 49cc1ac4f05..00000000000
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
+++ /dev/null
@@ -1,1026 +0,0 @@
1/*
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2010 QLogic Corporation
4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
6 */
7
8#ifndef __QLCNIC_HDR_H_
9#define __QLCNIC_HDR_H_
10
11#include <linux/kernel.h>
12#include <linux/types.h>
13
14/*
15 * The basic unit of access when reading/writing control registers.
16 */
17
18enum {
19 QLCNIC_HW_H0_CH_HUB_ADR = 0x05,
20 QLCNIC_HW_H1_CH_HUB_ADR = 0x0E,
21 QLCNIC_HW_H2_CH_HUB_ADR = 0x03,
22 QLCNIC_HW_H3_CH_HUB_ADR = 0x01,
23 QLCNIC_HW_H4_CH_HUB_ADR = 0x06,
24 QLCNIC_HW_H5_CH_HUB_ADR = 0x07,
25 QLCNIC_HW_H6_CH_HUB_ADR = 0x08
26};
27
28/* Hub 0 */
29enum {
30 QLCNIC_HW_MN_CRB_AGT_ADR = 0x15,
31 QLCNIC_HW_MS_CRB_AGT_ADR = 0x25
32};
33
34/* Hub 1 */
35enum {
36 QLCNIC_HW_PS_CRB_AGT_ADR = 0x73,
37 QLCNIC_HW_SS_CRB_AGT_ADR = 0x20,
38 QLCNIC_HW_RPMX3_CRB_AGT_ADR = 0x0b,
39 QLCNIC_HW_QMS_CRB_AGT_ADR = 0x00,
40 QLCNIC_HW_SQGS0_CRB_AGT_ADR = 0x01,
41 QLCNIC_HW_SQGS1_CRB_AGT_ADR = 0x02,
42 QLCNIC_HW_SQGS2_CRB_AGT_ADR = 0x03,
43 QLCNIC_HW_SQGS3_CRB_AGT_ADR = 0x04,
44 QLCNIC_HW_C2C0_CRB_AGT_ADR = 0x58,
45 QLCNIC_HW_C2C1_CRB_AGT_ADR = 0x59,
46 QLCNIC_HW_C2C2_CRB_AGT_ADR = 0x5a,
47 QLCNIC_HW_RPMX2_CRB_AGT_ADR = 0x0a,
48 QLCNIC_HW_RPMX4_CRB_AGT_ADR = 0x0c,
49 QLCNIC_HW_RPMX7_CRB_AGT_ADR = 0x0f,
50 QLCNIC_HW_RPMX9_CRB_AGT_ADR = 0x12,
51 QLCNIC_HW_SMB_CRB_AGT_ADR = 0x18
52};
53
54/* Hub 2 */
55enum {
56 QLCNIC_HW_NIU_CRB_AGT_ADR = 0x31,
57 QLCNIC_HW_I2C0_CRB_AGT_ADR = 0x19,
58 QLCNIC_HW_I2C1_CRB_AGT_ADR = 0x29,
59
60 QLCNIC_HW_SN_CRB_AGT_ADR = 0x10,
61 QLCNIC_HW_I2Q_CRB_AGT_ADR = 0x20,
62 QLCNIC_HW_LPC_CRB_AGT_ADR = 0x22,
63 QLCNIC_HW_ROMUSB_CRB_AGT_ADR = 0x21,
64 QLCNIC_HW_QM_CRB_AGT_ADR = 0x66,
65 QLCNIC_HW_SQG0_CRB_AGT_ADR = 0x60,
66 QLCNIC_HW_SQG1_CRB_AGT_ADR = 0x61,
67 QLCNIC_HW_SQG2_CRB_AGT_ADR = 0x62,
68 QLCNIC_HW_SQG3_CRB_AGT_ADR = 0x63,
69 QLCNIC_HW_RPMX1_CRB_AGT_ADR = 0x09,
70 QLCNIC_HW_RPMX5_CRB_AGT_ADR = 0x0d,
71 QLCNIC_HW_RPMX6_CRB_AGT_ADR = 0x0e,
72 QLCNIC_HW_RPMX8_CRB_AGT_ADR = 0x11
73};
74
75/* Hub 3 */
76enum {
77 QLCNIC_HW_PH_CRB_AGT_ADR = 0x1A,
78 QLCNIC_HW_SRE_CRB_AGT_ADR = 0x50,
79 QLCNIC_HW_EG_CRB_AGT_ADR = 0x51,
80 QLCNIC_HW_RPMX0_CRB_AGT_ADR = 0x08
81};
82
83/* Hub 4 */
84enum {
85 QLCNIC_HW_PEGN0_CRB_AGT_ADR = 0x40,
86 QLCNIC_HW_PEGN1_CRB_AGT_ADR,
87 QLCNIC_HW_PEGN2_CRB_AGT_ADR,
88 QLCNIC_HW_PEGN3_CRB_AGT_ADR,
89 QLCNIC_HW_PEGNI_CRB_AGT_ADR,
90 QLCNIC_HW_PEGND_CRB_AGT_ADR,
91 QLCNIC_HW_PEGNC_CRB_AGT_ADR,
92 QLCNIC_HW_PEGR0_CRB_AGT_ADR,
93 QLCNIC_HW_PEGR1_CRB_AGT_ADR,
94 QLCNIC_HW_PEGR2_CRB_AGT_ADR,
95 QLCNIC_HW_PEGR3_CRB_AGT_ADR,
96 QLCNIC_HW_PEGN4_CRB_AGT_ADR
97};
98
99/* Hub 5 */
100enum {
101 QLCNIC_HW_PEGS0_CRB_AGT_ADR = 0x40,
102 QLCNIC_HW_PEGS1_CRB_AGT_ADR,
103 QLCNIC_HW_PEGS2_CRB_AGT_ADR,
104 QLCNIC_HW_PEGS3_CRB_AGT_ADR,
105 QLCNIC_HW_PEGSI_CRB_AGT_ADR,
106 QLCNIC_HW_PEGSD_CRB_AGT_ADR,
107 QLCNIC_HW_PEGSC_CRB_AGT_ADR
108};
109
110/* Hub 6 */
111enum {
112 QLCNIC_HW_CAS0_CRB_AGT_ADR = 0x46,
113 QLCNIC_HW_CAS1_CRB_AGT_ADR = 0x47,
114 QLCNIC_HW_CAS2_CRB_AGT_ADR = 0x48,
115 QLCNIC_HW_CAS3_CRB_AGT_ADR = 0x49,
116 QLCNIC_HW_NCM_CRB_AGT_ADR = 0x16,
117 QLCNIC_HW_TMR_CRB_AGT_ADR = 0x17,
118 QLCNIC_HW_XDMA_CRB_AGT_ADR = 0x05,
119 QLCNIC_HW_OCM0_CRB_AGT_ADR = 0x06,
120 QLCNIC_HW_OCM1_CRB_AGT_ADR = 0x07
121};
122
123/* Floaters - non existent modules */
124#define QLCNIC_HW_EFC_RPMX0_CRB_AGT_ADR 0x67
125
126/* This field defines PCI/X adr [25:20] of agents on the CRB */
127enum {
128 QLCNIC_HW_PX_MAP_CRB_PH = 0,
129 QLCNIC_HW_PX_MAP_CRB_PS,
130 QLCNIC_HW_PX_MAP_CRB_MN,
131 QLCNIC_HW_PX_MAP_CRB_MS,
132 QLCNIC_HW_PX_MAP_CRB_PGR1,
133 QLCNIC_HW_PX_MAP_CRB_SRE,
134 QLCNIC_HW_PX_MAP_CRB_NIU,
135 QLCNIC_HW_PX_MAP_CRB_QMN,
136 QLCNIC_HW_PX_MAP_CRB_SQN0,
137 QLCNIC_HW_PX_MAP_CRB_SQN1,
138 QLCNIC_HW_PX_MAP_CRB_SQN2,
139 QLCNIC_HW_PX_MAP_CRB_SQN3,
140 QLCNIC_HW_PX_MAP_CRB_QMS,
141 QLCNIC_HW_PX_MAP_CRB_SQS0,
142 QLCNIC_HW_PX_MAP_CRB_SQS1,
143 QLCNIC_HW_PX_MAP_CRB_SQS2,
144 QLCNIC_HW_PX_MAP_CRB_SQS3,
145 QLCNIC_HW_PX_MAP_CRB_PGN0,
146 QLCNIC_HW_PX_MAP_CRB_PGN1,
147 QLCNIC_HW_PX_MAP_CRB_PGN2,
148 QLCNIC_HW_PX_MAP_CRB_PGN3,
149 QLCNIC_HW_PX_MAP_CRB_PGND,
150 QLCNIC_HW_PX_MAP_CRB_PGNI,
151 QLCNIC_HW_PX_MAP_CRB_PGS0,
152 QLCNIC_HW_PX_MAP_CRB_PGS1,
153 QLCNIC_HW_PX_MAP_CRB_PGS2,
154 QLCNIC_HW_PX_MAP_CRB_PGS3,
155 QLCNIC_HW_PX_MAP_CRB_PGSD,
156 QLCNIC_HW_PX_MAP_CRB_PGSI,
157 QLCNIC_HW_PX_MAP_CRB_SN,
158 QLCNIC_HW_PX_MAP_CRB_PGR2,
159 QLCNIC_HW_PX_MAP_CRB_EG,
160 QLCNIC_HW_PX_MAP_CRB_PH2,
161 QLCNIC_HW_PX_MAP_CRB_PS2,
162 QLCNIC_HW_PX_MAP_CRB_CAM,
163 QLCNIC_HW_PX_MAP_CRB_CAS0,
164 QLCNIC_HW_PX_MAP_CRB_CAS1,
165 QLCNIC_HW_PX_MAP_CRB_CAS2,
166 QLCNIC_HW_PX_MAP_CRB_C2C0,
167 QLCNIC_HW_PX_MAP_CRB_C2C1,
168 QLCNIC_HW_PX_MAP_CRB_TIMR,
169 QLCNIC_HW_PX_MAP_CRB_PGR3,
170 QLCNIC_HW_PX_MAP_CRB_RPMX1,
171 QLCNIC_HW_PX_MAP_CRB_RPMX2,
172 QLCNIC_HW_PX_MAP_CRB_RPMX3,
173 QLCNIC_HW_PX_MAP_CRB_RPMX4,
174 QLCNIC_HW_PX_MAP_CRB_RPMX5,
175 QLCNIC_HW_PX_MAP_CRB_RPMX6,
176 QLCNIC_HW_PX_MAP_CRB_RPMX7,
177 QLCNIC_HW_PX_MAP_CRB_XDMA,
178 QLCNIC_HW_PX_MAP_CRB_I2Q,
179 QLCNIC_HW_PX_MAP_CRB_ROMUSB,
180 QLCNIC_HW_PX_MAP_CRB_CAS3,
181 QLCNIC_HW_PX_MAP_CRB_RPMX0,
182 QLCNIC_HW_PX_MAP_CRB_RPMX8,
183 QLCNIC_HW_PX_MAP_CRB_RPMX9,
184 QLCNIC_HW_PX_MAP_CRB_OCM0,
185 QLCNIC_HW_PX_MAP_CRB_OCM1,
186 QLCNIC_HW_PX_MAP_CRB_SMB,
187 QLCNIC_HW_PX_MAP_CRB_I2C0,
188 QLCNIC_HW_PX_MAP_CRB_I2C1,
189 QLCNIC_HW_PX_MAP_CRB_LPC,
190 QLCNIC_HW_PX_MAP_CRB_PGNC,
191 QLCNIC_HW_PX_MAP_CRB_PGR0
192};
193
194#define BIT_0 0x1
195#define BIT_1 0x2
196#define BIT_2 0x4
197#define BIT_3 0x8
198#define BIT_4 0x10
199#define BIT_5 0x20
200#define BIT_6 0x40
201#define BIT_7 0x80
202#define BIT_8 0x100
203#define BIT_9 0x200
204#define BIT_10 0x400
205#define BIT_11 0x800
206#define BIT_12 0x1000
207#define BIT_13 0x2000
208#define BIT_14 0x4000
209#define BIT_15 0x8000
210#define BIT_16 0x10000
211#define BIT_17 0x20000
212#define BIT_18 0x40000
213#define BIT_19 0x80000
214#define BIT_20 0x100000
215#define BIT_21 0x200000
216#define BIT_22 0x400000
217#define BIT_23 0x800000
218#define BIT_24 0x1000000
219#define BIT_25 0x2000000
220#define BIT_26 0x4000000
221#define BIT_27 0x8000000
222#define BIT_28 0x10000000
223#define BIT_29 0x20000000
224#define BIT_30 0x40000000
225#define BIT_31 0x80000000
226
227/* This field defines CRB adr [31:20] of the agents */
228
229#define QLCNIC_HW_CRB_HUB_AGT_ADR_MN \
230 ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_MN_CRB_AGT_ADR)
231#define QLCNIC_HW_CRB_HUB_AGT_ADR_PH \
232 ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_PH_CRB_AGT_ADR)
233#define QLCNIC_HW_CRB_HUB_AGT_ADR_MS \
234 ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_MS_CRB_AGT_ADR)
235
236#define QLCNIC_HW_CRB_HUB_AGT_ADR_PS \
237 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_PS_CRB_AGT_ADR)
238#define QLCNIC_HW_CRB_HUB_AGT_ADR_SS \
239 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SS_CRB_AGT_ADR)
240#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3 \
241 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX3_CRB_AGT_ADR)
242#define QLCNIC_HW_CRB_HUB_AGT_ADR_QMS \
243 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_QMS_CRB_AGT_ADR)
244#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS0 \
245 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS0_CRB_AGT_ADR)
246#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS1 \
247 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS1_CRB_AGT_ADR)
248#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS2 \
249 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS2_CRB_AGT_ADR)
250#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS3 \
251 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS3_CRB_AGT_ADR)
252#define QLCNIC_HW_CRB_HUB_AGT_ADR_C2C0 \
253 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_C2C0_CRB_AGT_ADR)
254#define QLCNIC_HW_CRB_HUB_AGT_ADR_C2C1 \
255 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_C2C1_CRB_AGT_ADR)
256#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2 \
257 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX2_CRB_AGT_ADR)
258#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4 \
259 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX4_CRB_AGT_ADR)
260#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7 \
261 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX7_CRB_AGT_ADR)
262#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9 \
263 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX9_CRB_AGT_ADR)
264#define QLCNIC_HW_CRB_HUB_AGT_ADR_SMB \
265 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SMB_CRB_AGT_ADR)
266
267#define QLCNIC_HW_CRB_HUB_AGT_ADR_NIU \
268 ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_NIU_CRB_AGT_ADR)
269#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0 \
270 ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_I2C0_CRB_AGT_ADR)
271#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1 \
272 ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_I2C1_CRB_AGT_ADR)
273
274#define QLCNIC_HW_CRB_HUB_AGT_ADR_SRE \
275 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SRE_CRB_AGT_ADR)
276#define QLCNIC_HW_CRB_HUB_AGT_ADR_EG \
277 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_EG_CRB_AGT_ADR)
278#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0 \
279 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX0_CRB_AGT_ADR)
280#define QLCNIC_HW_CRB_HUB_AGT_ADR_QMN \
281 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_QM_CRB_AGT_ADR)
282#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0 \
283 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG0_CRB_AGT_ADR)
284#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1 \
285 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG1_CRB_AGT_ADR)
286#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2 \
287 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG2_CRB_AGT_ADR)
288#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3 \
289 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG3_CRB_AGT_ADR)
290#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1 \
291 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX1_CRB_AGT_ADR)
292#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5 \
293 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX5_CRB_AGT_ADR)
294#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6 \
295 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX6_CRB_AGT_ADR)
296#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8 \
297 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX8_CRB_AGT_ADR)
298#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS0 \
299 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS0_CRB_AGT_ADR)
300#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS1 \
301 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS1_CRB_AGT_ADR)
302#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS2 \
303 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS2_CRB_AGT_ADR)
304#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS3 \
305 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS3_CRB_AGT_ADR)
306
307#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI \
308 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGNI_CRB_AGT_ADR)
309#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGND \
310 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGND_CRB_AGT_ADR)
311#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0 \
312 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN0_CRB_AGT_ADR)
313#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1 \
314 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN1_CRB_AGT_ADR)
315#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2 \
316 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN2_CRB_AGT_ADR)
317#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3 \
318 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN3_CRB_AGT_ADR)
319#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4 \
320 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN4_CRB_AGT_ADR)
321#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC \
322 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGNC_CRB_AGT_ADR)
323#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR0 \
324 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR0_CRB_AGT_ADR)
325#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR1 \
326 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR1_CRB_AGT_ADR)
327#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR2 \
328 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR2_CRB_AGT_ADR)
329#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR3 \
330 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR3_CRB_AGT_ADR)
331
332#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI \
333 ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSI_CRB_AGT_ADR)
334#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSD \
335 ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSD_CRB_AGT_ADR)
336#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0 \
337 ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS0_CRB_AGT_ADR)
338#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1 \
339 ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS1_CRB_AGT_ADR)
340#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2 \
341 ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS2_CRB_AGT_ADR)
342#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3 \
343 ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS3_CRB_AGT_ADR)
344#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSC \
345 ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSC_CRB_AGT_ADR)
346
347#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAM \
348 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_NCM_CRB_AGT_ADR)
349#define QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR \
350 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_TMR_CRB_AGT_ADR)
351#define QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA \
352 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_XDMA_CRB_AGT_ADR)
353#define QLCNIC_HW_CRB_HUB_AGT_ADR_SN \
354 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_SN_CRB_AGT_ADR)
355#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q \
356 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_I2Q_CRB_AGT_ADR)
357#define QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB \
358 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_ROMUSB_CRB_AGT_ADR)
359#define QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0 \
360 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_OCM0_CRB_AGT_ADR)
361#define QLCNIC_HW_CRB_HUB_AGT_ADR_OCM1 \
362 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_OCM1_CRB_AGT_ADR)
363#define QLCNIC_HW_CRB_HUB_AGT_ADR_LPC \
364 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_LPC_CRB_AGT_ADR)
365
366#define QLCNIC_SRE_MISC (QLCNIC_CRB_SRE + 0x0002c)
367
368#define QLCNIC_I2Q_CLR_PCI_HI (QLCNIC_CRB_I2Q + 0x00034)
369
370#define ROMUSB_GLB (QLCNIC_CRB_ROMUSB + 0x00000)
371#define ROMUSB_ROM (QLCNIC_CRB_ROMUSB + 0x10000)
372
373#define QLCNIC_ROMUSB_GLB_STATUS (ROMUSB_GLB + 0x0004)
374#define QLCNIC_ROMUSB_GLB_SW_RESET (ROMUSB_GLB + 0x0008)
375#define QLCNIC_ROMUSB_GLB_PAD_GPIO_I (ROMUSB_GLB + 0x000c)
376#define QLCNIC_ROMUSB_GLB_CAS_RST (ROMUSB_GLB + 0x0038)
377#define QLCNIC_ROMUSB_GLB_TEST_MUX_SEL (ROMUSB_GLB + 0x0044)
378#define QLCNIC_ROMUSB_GLB_PEGTUNE_DONE (ROMUSB_GLB + 0x005c)
379#define QLCNIC_ROMUSB_GLB_CHIP_CLK_CTRL (ROMUSB_GLB + 0x00A8)
380
381#define QLCNIC_ROMUSB_GPIO(n) (ROMUSB_GLB + 0x60 + (4 * (n)))
382
383#define QLCNIC_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004)
384#define QLCNIC_ROMUSB_ROM_ADDRESS (ROMUSB_ROM + 0x0008)
385#define QLCNIC_ROMUSB_ROM_WDATA (ROMUSB_ROM + 0x000c)
386#define QLCNIC_ROMUSB_ROM_ABYTE_CNT (ROMUSB_ROM + 0x0010)
387#define QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014)
388#define QLCNIC_ROMUSB_ROM_RDATA (ROMUSB_ROM + 0x0018)
389
390/* Lock IDs for ROM lock */
391#define ROM_LOCK_DRIVER 0x0d417340
392
393/******************************************************************************
394*
395* Definitions specific to M25P flash
396*
397*******************************************************************************
398*/
399
400/* all are 1MB windows */
401
402#define QLCNIC_PCI_CRB_WINDOWSIZE 0x00100000
403#define QLCNIC_PCI_CRB_WINDOW(A) \
404 (QLCNIC_PCI_CRBSPACE + (A)*QLCNIC_PCI_CRB_WINDOWSIZE)
405
406#define QLCNIC_CRB_NIU QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_NIU)
407#define QLCNIC_CRB_SRE QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SRE)
408#define QLCNIC_CRB_ROMUSB \
409 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_ROMUSB)
410#define QLCNIC_CRB_EPG QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_EG)
411#define QLCNIC_CRB_I2Q QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_I2Q)
412#define QLCNIC_CRB_TIMER QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_TIMR)
413#define QLCNIC_CRB_I2C0 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_I2C0)
414#define QLCNIC_CRB_SMB QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SMB)
415#define QLCNIC_CRB_MAX QLCNIC_PCI_CRB_WINDOW(64)
416
417#define QLCNIC_CRB_PCIX_HOST QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PH)
418#define QLCNIC_CRB_PCIX_HOST2 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PH2)
419#define QLCNIC_CRB_PEG_NET_0 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN0)
420#define QLCNIC_CRB_PEG_NET_1 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN1)
421#define QLCNIC_CRB_PEG_NET_2 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN2)
422#define QLCNIC_CRB_PEG_NET_3 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN3)
423#define QLCNIC_CRB_PEG_NET_4 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SQS2)
424#define QLCNIC_CRB_PEG_NET_D QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGND)
425#define QLCNIC_CRB_PEG_NET_I QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGNI)
426#define QLCNIC_CRB_DDR_NET QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_MN)
427#define QLCNIC_CRB_QDR_NET QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SN)
428
429#define QLCNIC_CRB_PCIX_MD QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PS)
430#define QLCNIC_CRB_PCIE QLCNIC_CRB_PCIX_MD
431
432#define ISR_INT_VECTOR (QLCNIC_PCIX_PS_REG(PCIX_INT_VECTOR))
433#define ISR_INT_MASK (QLCNIC_PCIX_PS_REG(PCIX_INT_MASK))
434#define ISR_INT_MASK_SLOW (QLCNIC_PCIX_PS_REG(PCIX_INT_MASK))
435#define ISR_INT_TARGET_STATUS (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS))
436#define ISR_INT_TARGET_MASK (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK))
437#define ISR_INT_TARGET_STATUS_F1 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F1))
438#define ISR_INT_TARGET_MASK_F1 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F1))
439#define ISR_INT_TARGET_STATUS_F2 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F2))
440#define ISR_INT_TARGET_MASK_F2 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F2))
441#define ISR_INT_TARGET_STATUS_F3 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F3))
442#define ISR_INT_TARGET_MASK_F3 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F3))
443#define ISR_INT_TARGET_STATUS_F4 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F4))
444#define ISR_INT_TARGET_MASK_F4 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F4))
445#define ISR_INT_TARGET_STATUS_F5 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F5))
446#define ISR_INT_TARGET_MASK_F5 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F5))
447#define ISR_INT_TARGET_STATUS_F6 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F6))
448#define ISR_INT_TARGET_MASK_F6 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F6))
449#define ISR_INT_TARGET_STATUS_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F7))
450#define ISR_INT_TARGET_MASK_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F7))
451
452#define QLCNIC_PCI_MN_2M (0)
453#define QLCNIC_PCI_MS_2M (0x80000)
454#define QLCNIC_PCI_OCM0_2M (0x000c0000UL)
455#define QLCNIC_PCI_CRBSPACE (0x06000000UL)
456#define QLCNIC_PCI_CAMQM (0x04800000UL)
457#define QLCNIC_PCI_CAMQM_END (0x04800800UL)
458#define QLCNIC_PCI_2MB_SIZE (0x00200000UL)
459#define QLCNIC_PCI_CAMQM_2M_BASE (0x000ff800UL)
460
461#define QLCNIC_CRB_CAM QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_CAM)
462
463#define QLCNIC_ADDR_DDR_NET (0x0000000000000000ULL)
464#define QLCNIC_ADDR_DDR_NET_MAX (0x000000000fffffffULL)
465#define QLCNIC_ADDR_OCM0 (0x0000000200000000ULL)
466#define QLCNIC_ADDR_OCM0_MAX (0x00000002000fffffULL)
467#define QLCNIC_ADDR_OCM1 (0x0000000200400000ULL)
468#define QLCNIC_ADDR_OCM1_MAX (0x00000002004fffffULL)
469#define QLCNIC_ADDR_QDR_NET (0x0000000300000000ULL)
470#define QLCNIC_ADDR_QDR_NET_MAX (0x0000000307ffffffULL)
471
472/*
473 * Register offsets for MN
474 */
475#define QLCNIC_MIU_CONTROL (0x000)
476#define QLCNIC_MIU_MN_CONTROL (QLCNIC_CRB_DDR_NET+QLCNIC_MIU_CONTROL)
477
478/* 200ms delay in each loop */
479#define QLCNIC_NIU_PHY_WAITLEN 200000
480/* 10 seconds before we give up */
481#define QLCNIC_NIU_PHY_WAITMAX 50
482#define QLCNIC_NIU_MAX_GBE_PORTS 4
483#define QLCNIC_NIU_MAX_XG_PORTS 2
484
485#define QLCNIC_NIU_MODE (QLCNIC_CRB_NIU + 0x00000)
486#define QLCNIC_NIU_GB_PAUSE_CTL (QLCNIC_CRB_NIU + 0x0030c)
487#define QLCNIC_NIU_XG_PAUSE_CTL (QLCNIC_CRB_NIU + 0x00098)
488
489#define QLCNIC_NIU_GB_MAC_CONFIG_0(I) \
490 (QLCNIC_CRB_NIU + 0x30000 + (I)*0x10000)
491#define QLCNIC_NIU_GB_MAC_CONFIG_1(I) \
492 (QLCNIC_CRB_NIU + 0x30004 + (I)*0x10000)
493
494
495#define TEST_AGT_CTRL (0x00)
496
497#define TA_CTL_START BIT_0
498#define TA_CTL_ENABLE BIT_1
499#define TA_CTL_WRITE BIT_2
500#define TA_CTL_BUSY BIT_3
501
502/*
503 * Register offsets for MN
504 */
505#define MIU_TEST_AGT_BASE (0x90)
506
507#define MIU_TEST_AGT_ADDR_LO (0x04)
508#define MIU_TEST_AGT_ADDR_HI (0x08)
509#define MIU_TEST_AGT_WRDATA_LO (0x10)
510#define MIU_TEST_AGT_WRDATA_HI (0x14)
511#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x20)
512#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x24)
513#define MIU_TEST_AGT_WRDATA(i) (0x10+(0x10*((i)>>1))+(4*((i)&1)))
514#define MIU_TEST_AGT_RDDATA_LO (0x18)
515#define MIU_TEST_AGT_RDDATA_HI (0x1c)
516#define MIU_TEST_AGT_RDDATA_UPPER_LO (0x28)
517#define MIU_TEST_AGT_RDDATA_UPPER_HI (0x2c)
518#define MIU_TEST_AGT_RDDATA(i) (0x18+(0x10*((i)>>1))+(4*((i)&1)))
519
520#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8
521#define MIU_TEST_AGT_UPPER_ADDR(off) (0)
522
523/*
524 * Register offsets for MS
525 */
526#define SIU_TEST_AGT_BASE (0x60)
527
528#define SIU_TEST_AGT_ADDR_LO (0x04)
529#define SIU_TEST_AGT_ADDR_HI (0x18)
530#define SIU_TEST_AGT_WRDATA_LO (0x08)
531#define SIU_TEST_AGT_WRDATA_HI (0x0c)
532#define SIU_TEST_AGT_WRDATA(i) (0x08+(4*(i)))
533#define SIU_TEST_AGT_RDDATA_LO (0x10)
534#define SIU_TEST_AGT_RDDATA_HI (0x14)
535#define SIU_TEST_AGT_RDDATA(i) (0x10+(4*(i)))
536
537#define SIU_TEST_AGT_ADDR_MASK 0x3ffff8
538#define SIU_TEST_AGT_UPPER_ADDR(off) ((off)>>22)
539
540/* XG Link status */
541#define XG_LINK_UP 0x10
542#define XG_LINK_DOWN 0x20
543
544#define XG_LINK_UP_P3P 0x01
545#define XG_LINK_DOWN_P3P 0x02
546#define XG_LINK_STATE_P3P_MASK 0xf
547#define XG_LINK_STATE_P3P(pcifn, val) \
548 (((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3P_MASK)
549
550#define P3P_LINK_SPEED_MHZ 100
551#define P3P_LINK_SPEED_MASK 0xff
552#define P3P_LINK_SPEED_REG(pcifn) \
553 (CRB_PF_LINK_SPEED_1 + (((pcifn) / 4) * 4))
554#define P3P_LINK_SPEED_VAL(pcifn, reg) \
555 (((reg) >> (8 * ((pcifn) & 0x3))) & P3P_LINK_SPEED_MASK)
556
557#define QLCNIC_CAM_RAM_BASE (QLCNIC_CRB_CAM + 0x02000)
558#define QLCNIC_CAM_RAM(reg) (QLCNIC_CAM_RAM_BASE + (reg))
559#define QLCNIC_FW_VERSION_MAJOR (QLCNIC_CAM_RAM(0x150))
560#define QLCNIC_FW_VERSION_MINOR (QLCNIC_CAM_RAM(0x154))
561#define QLCNIC_FW_VERSION_SUB (QLCNIC_CAM_RAM(0x158))
562#define QLCNIC_ROM_LOCK_ID (QLCNIC_CAM_RAM(0x100))
563#define QLCNIC_PHY_LOCK_ID (QLCNIC_CAM_RAM(0x120))
564#define QLCNIC_CRB_WIN_LOCK_ID (QLCNIC_CAM_RAM(0x124))
565
566#define NIC_CRB_BASE (QLCNIC_CAM_RAM(0x200))
567#define NIC_CRB_BASE_2 (QLCNIC_CAM_RAM(0x700))
568#define QLCNIC_REG(X) (NIC_CRB_BASE+(X))
569#define QLCNIC_REG_2(X) (NIC_CRB_BASE_2+(X))
570
571#define QLCNIC_CDRP_CRB_OFFSET (QLCNIC_REG(0x18))
572#define QLCNIC_ARG1_CRB_OFFSET (QLCNIC_REG(0x1c))
573#define QLCNIC_ARG2_CRB_OFFSET (QLCNIC_REG(0x20))
574#define QLCNIC_ARG3_CRB_OFFSET (QLCNIC_REG(0x24))
575#define QLCNIC_SIGN_CRB_OFFSET (QLCNIC_REG(0x28))
576
577#define CRB_CMDPEG_STATE (QLCNIC_REG(0x50))
578#define CRB_RCVPEG_STATE (QLCNIC_REG(0x13c))
579
580#define CRB_XG_STATE_P3P (QLCNIC_REG(0x98))
581#define CRB_PF_LINK_SPEED_1 (QLCNIC_REG(0xe8))
582#define CRB_PF_LINK_SPEED_2 (QLCNIC_REG(0xec))
583
584#define CRB_TEMP_STATE (QLCNIC_REG(0x1b4))
585
586#define CRB_V2P_0 (QLCNIC_REG(0x290))
587#define CRB_V2P(port) (CRB_V2P_0+((port)*4))
588#define CRB_DRIVER_VERSION (QLCNIC_REG(0x2a0))
589
590#define CRB_FW_CAPABILITIES_1 (QLCNIC_CAM_RAM(0x128))
591#define CRB_FW_CAPABILITIES_2 (QLCNIC_CAM_RAM(0x12c))
592#define CRB_MAC_BLOCK_START (QLCNIC_CAM_RAM(0x1c0))
593
594/*
595 * CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address
596 * which can be read by the Phantom host to get producer/consumer indexes from
597 * Phantom/Casper. If it is not HOST_SHARED_MEMORY, then the following
598 * registers will be used for the addresses of the ring's shared memory
599 * on the Phantom.
600 */
601
602#define qlcnic_get_temp_val(x) ((x) >> 16)
603#define qlcnic_get_temp_state(x) ((x) & 0xffff)
604#define qlcnic_encode_temp(val, state) (((val) << 16) | (state))
605
606/*
607 * Temperature control.
608 */
609enum {
610 QLCNIC_TEMP_NORMAL = 0x1, /* Normal operating range */
611 QLCNIC_TEMP_WARN, /* Sound alert, temperature getting high */
612 QLCNIC_TEMP_PANIC /* Fatal error, hardware has shut down. */
613};
614
615
616/* Lock IDs for PHY lock */
617#define PHY_LOCK_DRIVER 0x44524956
618
619/* Used for PS PCI Memory access */
620#define PCIX_PS_OP_ADDR_LO (0x10000)
621/* via CRB (PS side only) */
622#define PCIX_PS_OP_ADDR_HI (0x10004)
623
624#define PCIX_INT_VECTOR (0x10100)
625#define PCIX_INT_MASK (0x10104)
626
627#define PCIX_OCM_WINDOW (0x10800)
628#define PCIX_OCM_WINDOW_REG(func) (PCIX_OCM_WINDOW + 0x4 * (func))
629
630#define PCIX_TARGET_STATUS (0x10118)
631#define PCIX_TARGET_STATUS_F1 (0x10160)
632#define PCIX_TARGET_STATUS_F2 (0x10164)
633#define PCIX_TARGET_STATUS_F3 (0x10168)
634#define PCIX_TARGET_STATUS_F4 (0x10360)
635#define PCIX_TARGET_STATUS_F5 (0x10364)
636#define PCIX_TARGET_STATUS_F6 (0x10368)
637#define PCIX_TARGET_STATUS_F7 (0x1036c)
638
639#define PCIX_TARGET_MASK (0x10128)
640#define PCIX_TARGET_MASK_F1 (0x10170)
641#define PCIX_TARGET_MASK_F2 (0x10174)
642#define PCIX_TARGET_MASK_F3 (0x10178)
643#define PCIX_TARGET_MASK_F4 (0x10370)
644#define PCIX_TARGET_MASK_F5 (0x10374)
645#define PCIX_TARGET_MASK_F6 (0x10378)
646#define PCIX_TARGET_MASK_F7 (0x1037c)
647
648#define PCIX_MSI_F(i) (0x13000+((i)*4))
649
650#define QLCNIC_PCIX_PH_REG(reg) (QLCNIC_CRB_PCIE + (reg))
651#define QLCNIC_PCIX_PS_REG(reg) (QLCNIC_CRB_PCIX_MD + (reg))
652#define QLCNIC_PCIE_REG(reg) (QLCNIC_CRB_PCIE + (reg))
653
654#define PCIE_SEM0_LOCK (0x1c000)
655#define PCIE_SEM0_UNLOCK (0x1c004)
656#define PCIE_SEM_LOCK(N) (PCIE_SEM0_LOCK + 8*(N))
657#define PCIE_SEM_UNLOCK(N) (PCIE_SEM0_UNLOCK + 8*(N))
658
659#define PCIE_SETUP_FUNCTION (0x12040)
660#define PCIE_SETUP_FUNCTION2 (0x12048)
661#define PCIE_MISCCFG_RC (0x1206c)
662#define PCIE_TGT_SPLIT_CHICKEN (0x12080)
663#define PCIE_CHICKEN3 (0x120c8)
664
665#define ISR_INT_STATE_REG (QLCNIC_PCIX_PS_REG(PCIE_MISCCFG_RC))
666#define PCIE_MAX_MASTER_SPLIT (0x14048)
667
668#define QLCNIC_PORT_MODE_NONE 0
669#define QLCNIC_PORT_MODE_XG 1
670#define QLCNIC_PORT_MODE_GB 2
671#define QLCNIC_PORT_MODE_802_3_AP 3
672#define QLCNIC_PORT_MODE_AUTO_NEG 4
673#define QLCNIC_PORT_MODE_AUTO_NEG_1G 5
674#define QLCNIC_PORT_MODE_AUTO_NEG_XG 6
675#define QLCNIC_PORT_MODE_ADDR (QLCNIC_CAM_RAM(0x24))
676#define QLCNIC_WOL_PORT_MODE (QLCNIC_CAM_RAM(0x198))
677
678#define QLCNIC_WOL_CONFIG_NV (QLCNIC_CAM_RAM(0x184))
679#define QLCNIC_WOL_CONFIG (QLCNIC_CAM_RAM(0x188))
680
681#define QLCNIC_PEG_TUNE_MN_PRESENT 0x1
682#define QLCNIC_PEG_TUNE_CAPABILITY (QLCNIC_CAM_RAM(0x02c))
683
684#define QLCNIC_DMA_WATCHDOG_CTRL (QLCNIC_CAM_RAM(0x14))
685#define QLCNIC_PEG_ALIVE_COUNTER (QLCNIC_CAM_RAM(0xb0))
686#define QLCNIC_PEG_HALT_STATUS1 (QLCNIC_CAM_RAM(0xa8))
687#define QLCNIC_PEG_HALT_STATUS2 (QLCNIC_CAM_RAM(0xac))
688#define QLCNIC_CRB_DRV_ACTIVE (QLCNIC_CAM_RAM(0x138))
689#define QLCNIC_CRB_DEV_STATE (QLCNIC_CAM_RAM(0x140))
690
691#define QLCNIC_CRB_DRV_STATE (QLCNIC_CAM_RAM(0x144))
692#define QLCNIC_CRB_DRV_SCRATCH (QLCNIC_CAM_RAM(0x148))
693#define QLCNIC_CRB_DEV_PARTITION_INFO (QLCNIC_CAM_RAM(0x14c))
694#define QLCNIC_CRB_DRV_IDC_VER (QLCNIC_CAM_RAM(0x174))
695#define QLCNIC_CRB_DEV_NPAR_STATE (QLCNIC_CAM_RAM(0x19c))
696#define QLCNIC_ROM_DEV_INIT_TIMEOUT (0x3e885c)
697#define QLCNIC_ROM_DRV_RESET_TIMEOUT (0x3e8860)
698
699/* Device State */
700#define QLCNIC_DEV_COLD 0x1
701#define QLCNIC_DEV_INITIALIZING 0x2
702#define QLCNIC_DEV_READY 0x3
703#define QLCNIC_DEV_NEED_RESET 0x4
704#define QLCNIC_DEV_NEED_QUISCENT 0x5
705#define QLCNIC_DEV_FAILED 0x6
706#define QLCNIC_DEV_QUISCENT 0x7
707
708#define QLCNIC_DEV_BADBAD 0xbad0bad0
709
710#define QLCNIC_DEV_NPAR_NON_OPER 0 /* NON Operational */
711#define QLCNIC_DEV_NPAR_OPER 1 /* NPAR Operational */
712#define QLCNIC_DEV_NPAR_OPER_TIMEO 30 /* Operational time out */
713
714#define QLC_DEV_CHECK_ACTIVE(VAL, FN) ((VAL) & (1 << (FN * 4)))
715#define QLC_DEV_SET_REF_CNT(VAL, FN) ((VAL) |= (1 << (FN * 4)))
716#define QLC_DEV_CLR_REF_CNT(VAL, FN) ((VAL) &= ~(1 << (FN * 4)))
717#define QLC_DEV_SET_RST_RDY(VAL, FN) ((VAL) |= (1 << (FN * 4)))
718#define QLC_DEV_SET_QSCNT_RDY(VAL, FN) ((VAL) |= (2 << (FN * 4)))
719#define QLC_DEV_CLR_RST_QSCNT(VAL, FN) ((VAL) &= ~(3 << (FN * 4)))
720
721#define QLC_DEV_GET_DRV(VAL, FN) (0xf & ((VAL) >> (FN * 4)))
722#define QLC_DEV_SET_DRV(VAL, FN) ((VAL) << (FN * 4))
723
724#define QLCNIC_TYPE_NIC 1
725#define QLCNIC_TYPE_FCOE 2
726#define QLCNIC_TYPE_ISCSI 3
727
728#define QLCNIC_RCODE_DRIVER_INFO 0x20000000
729#define QLCNIC_RCODE_DRIVER_CAN_RELOAD BIT_30
730#define QLCNIC_RCODE_FATAL_ERROR BIT_31
731#define QLCNIC_FWERROR_PEGNUM(code) ((code) & 0xff)
732#define QLCNIC_FWERROR_CODE(code) ((code >> 8) & 0x1fffff)
733#define QLCNIC_FWERROR_FAN_FAILURE 0x16
734
735#define FW_POLL_DELAY (1 * HZ)
736#define FW_FAIL_THRESH 2
737
738#define QLCNIC_RESET_TIMEOUT_SECS 10
739#define QLCNIC_INIT_TIMEOUT_SECS 30
740#define QLCNIC_RCVPEG_CHECK_RETRY_COUNT 2000
741#define QLCNIC_RCVPEG_CHECK_DELAY 10
742#define QLCNIC_CMDPEG_CHECK_RETRY_COUNT 60
743#define QLCNIC_CMDPEG_CHECK_DELAY 500
744#define QLCNIC_HEARTBEAT_PERIOD_MSECS 200
745#define QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT 45
746
747#define ISR_MSI_INT_TRIGGER(FUNC) (QLCNIC_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
748#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
749
750/*
751 * PCI Interrupt Vector Values.
752 */
753#define PCIX_INT_VECTOR_BIT_F0 0x0080
754#define PCIX_INT_VECTOR_BIT_F1 0x0100
755#define PCIX_INT_VECTOR_BIT_F2 0x0200
756#define PCIX_INT_VECTOR_BIT_F3 0x0400
757#define PCIX_INT_VECTOR_BIT_F4 0x0800
758#define PCIX_INT_VECTOR_BIT_F5 0x1000
759#define PCIX_INT_VECTOR_BIT_F6 0x2000
760#define PCIX_INT_VECTOR_BIT_F7 0x4000
761
762struct qlcnic_legacy_intr_set {
763 u32 int_vec_bit;
764 u32 tgt_status_reg;
765 u32 tgt_mask_reg;
766 u32 pci_int_reg;
767};
768
769#define QLCNIC_FW_API 0x1b216c
770#define QLCNIC_DRV_OP_MODE 0x1b2170
771#define QLCNIC_MSIX_BASE 0x132110
772#define QLCNIC_MAX_PCI_FUNC 8
773#define QLCNIC_MAX_VLAN_FILTERS 64
774
775/* FW dump defines */
776#define MIU_TEST_CTR 0x41000090
777#define MIU_TEST_ADDR_LO 0x41000094
778#define MIU_TEST_ADDR_HI 0x41000098
779#define FLASH_ROM_WINDOW 0x42110030
780#define FLASH_ROM_DATA 0x42150000
781
782
783static const u32 FW_DUMP_LEVELS[] = {
784 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff };
785
786static const u32 MIU_TEST_READ_DATA[] = {
787 0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC, };
788
789#define QLCNIC_FW_DUMP_REG1 0x00130060
790#define QLCNIC_FW_DUMP_REG2 0x001e0000
791#define QLCNIC_FLASH_SEM2_LK 0x0013C010
792#define QLCNIC_FLASH_SEM2_ULK 0x0013C014
793#define QLCNIC_FLASH_LOCK_ID 0x001B2100
794
795/* PCI function operational mode */
796enum {
797 QLCNIC_MGMT_FUNC = 0,
798 QLCNIC_PRIV_FUNC = 1,
799 QLCNIC_NON_PRIV_FUNC = 2
800};
801
802enum {
803 QLCNIC_PORT_DEFAULTS = 0,
804 QLCNIC_ADD_VLAN = 1,
805 QLCNIC_DEL_VLAN = 2
806};
807
808#define QLC_DEV_DRV_DEFAULT 0x11111111
809
810#define LSB(x) ((uint8_t)(x))
811#define MSB(x) ((uint8_t)((uint16_t)(x) >> 8))
812
813#define LSW(x) ((uint16_t)((uint32_t)(x)))
814#define MSW(x) ((uint16_t)((uint32_t)(x) >> 16))
815
816#define LSD(x) ((uint32_t)((uint64_t)(x)))
817#define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16))
818
819#define QLCNIC_MS_CTRL 0x41000090
820#define QLCNIC_MS_ADDR_LO 0x41000094
821#define QLCNIC_MS_ADDR_HI 0x41000098
822#define QLCNIC_MS_WRTDATA_LO 0x410000A0
823#define QLCNIC_MS_WRTDATA_HI 0x410000A4
824#define QLCNIC_MS_WRTDATA_ULO 0x410000B0
825#define QLCNIC_MS_WRTDATA_UHI 0x410000B4
826#define QLCNIC_MS_RDDATA_LO 0x410000A8
827#define QLCNIC_MS_RDDATA_HI 0x410000AC
828#define QLCNIC_MS_RDDATA_ULO 0x410000B8
829#define QLCNIC_MS_RDDATA_UHI 0x410000BC
830
831#define QLCNIC_TA_WRITE_ENABLE (TA_CTL_ENABLE | TA_CTL_WRITE)
832#define QLCNIC_TA_WRITE_START (TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE)
833#define QLCNIC_TA_START_ENABLE (TA_CTL_START | TA_CTL_ENABLE)
834
835#define QLCNIC_LEGACY_INTR_CONFIG \
836{ \
837 { \
838 .int_vec_bit = PCIX_INT_VECTOR_BIT_F0, \
839 .tgt_status_reg = ISR_INT_TARGET_STATUS, \
840 .tgt_mask_reg = ISR_INT_TARGET_MASK, }, \
841 \
842 { \
843 .int_vec_bit = PCIX_INT_VECTOR_BIT_F1, \
844 .tgt_status_reg = ISR_INT_TARGET_STATUS_F1, \
845 .tgt_mask_reg = ISR_INT_TARGET_MASK_F1, }, \
846 \
847 { \
848 .int_vec_bit = PCIX_INT_VECTOR_BIT_F2, \
849 .tgt_status_reg = ISR_INT_TARGET_STATUS_F2, \
850 .tgt_mask_reg = ISR_INT_TARGET_MASK_F2, }, \
851 \
852 { \
853 .int_vec_bit = PCIX_INT_VECTOR_BIT_F3, \
854 .tgt_status_reg = ISR_INT_TARGET_STATUS_F3, \
855 .tgt_mask_reg = ISR_INT_TARGET_MASK_F3, }, \
856 \
857 { \
858 .int_vec_bit = PCIX_INT_VECTOR_BIT_F4, \
859 .tgt_status_reg = ISR_INT_TARGET_STATUS_F4, \
860 .tgt_mask_reg = ISR_INT_TARGET_MASK_F4, }, \
861 \
862 { \
863 .int_vec_bit = PCIX_INT_VECTOR_BIT_F5, \
864 .tgt_status_reg = ISR_INT_TARGET_STATUS_F5, \
865 .tgt_mask_reg = ISR_INT_TARGET_MASK_F5, }, \
866 \
867 { \
868 .int_vec_bit = PCIX_INT_VECTOR_BIT_F6, \
869 .tgt_status_reg = ISR_INT_TARGET_STATUS_F6, \
870 .tgt_mask_reg = ISR_INT_TARGET_MASK_F6, }, \
871 \
872 { \
873 .int_vec_bit = PCIX_INT_VECTOR_BIT_F7, \
874 .tgt_status_reg = ISR_INT_TARGET_STATUS_F7, \
875 .tgt_mask_reg = ISR_INT_TARGET_MASK_F7, }, \
876}
877
878/* NIU REGS */
879
880#define _qlcnic_crb_get_bit(var, bit) ((var >> bit) & 0x1)
881
882/*
883 * NIU GB MAC Config Register 0 (applies to GB0, GB1, GB2, GB3)
884 *
885 * Bit 0 : enable_tx => 1:enable frame xmit, 0:disable
886 * Bit 1 : tx_synced => R/O: xmit enable synched to xmit stream
887 * Bit 2 : enable_rx => 1:enable frame recv, 0:disable
888 * Bit 3 : rx_synced => R/O: recv enable synched to recv stream
889 * Bit 4 : tx_flowctl => 1:enable pause frame generation, 0:disable
890 * Bit 5 : rx_flowctl => 1:act on recv'd pause frames, 0:ignore
891 * Bit 8 : loopback => 1:loop MAC xmits to MAC recvs, 0:normal
892 * Bit 16: tx_reset_pb => 1:reset frame xmit protocol blk, 0:no-op
893 * Bit 17: rx_reset_pb => 1:reset frame recv protocol blk, 0:no-op
894 * Bit 18: tx_reset_mac => 1:reset data/ctl multiplexer blk, 0:no-op
895 * Bit 19: rx_reset_mac => 1:reset ctl frames & timers blk, 0:no-op
896 * Bit 31: soft_reset => 1:reset the MAC and the SERDES, 0:no-op
897 */
898#define qlcnic_gb_rx_flowctl(config_word) \
899 ((config_word) |= 1 << 5)
900#define qlcnic_gb_get_rx_flowctl(config_word) \
901 _qlcnic_crb_get_bit((config_word), 5)
902#define qlcnic_gb_unset_rx_flowctl(config_word) \
903 ((config_word) &= ~(1 << 5))
904
905/*
906 * NIU GB Pause Ctl Register
907 */
908
909#define qlcnic_gb_set_gb0_mask(config_word) \
910 ((config_word) |= 1 << 0)
911#define qlcnic_gb_set_gb1_mask(config_word) \
912 ((config_word) |= 1 << 2)
913#define qlcnic_gb_set_gb2_mask(config_word) \
914 ((config_word) |= 1 << 4)
915#define qlcnic_gb_set_gb3_mask(config_word) \
916 ((config_word) |= 1 << 6)
917
918#define qlcnic_gb_get_gb0_mask(config_word) \
919 _qlcnic_crb_get_bit((config_word), 0)
920#define qlcnic_gb_get_gb1_mask(config_word) \
921 _qlcnic_crb_get_bit((config_word), 2)
922#define qlcnic_gb_get_gb2_mask(config_word) \
923 _qlcnic_crb_get_bit((config_word), 4)
924#define qlcnic_gb_get_gb3_mask(config_word) \
925 _qlcnic_crb_get_bit((config_word), 6)
926
927#define qlcnic_gb_unset_gb0_mask(config_word) \
928 ((config_word) &= ~(1 << 0))
929#define qlcnic_gb_unset_gb1_mask(config_word) \
930 ((config_word) &= ~(1 << 2))
931#define qlcnic_gb_unset_gb2_mask(config_word) \
932 ((config_word) &= ~(1 << 4))
933#define qlcnic_gb_unset_gb3_mask(config_word) \
934 ((config_word) &= ~(1 << 6))
935
936/*
937 * NIU XG Pause Ctl Register
938 *
939 * Bit 0 : xg0_mask => 1:disable tx pause frames
940 * Bit 1 : xg0_request => 1:request single pause frame
941 * Bit 2 : xg0_on_off => 1:request is pause on, 0:off
942 * Bit 3 : xg1_mask => 1:disable tx pause frames
943 * Bit 4 : xg1_request => 1:request single pause frame
944 * Bit 5 : xg1_on_off => 1:request is pause on, 0:off
945 */
946
947#define qlcnic_xg_set_xg0_mask(config_word) \
948 ((config_word) |= 1 << 0)
949#define qlcnic_xg_set_xg1_mask(config_word) \
950 ((config_word) |= 1 << 3)
951
952#define qlcnic_xg_get_xg0_mask(config_word) \
953 _qlcnic_crb_get_bit((config_word), 0)
954#define qlcnic_xg_get_xg1_mask(config_word) \
955 _qlcnic_crb_get_bit((config_word), 3)
956
957#define qlcnic_xg_unset_xg0_mask(config_word) \
958 ((config_word) &= ~(1 << 0))
959#define qlcnic_xg_unset_xg1_mask(config_word) \
960 ((config_word) &= ~(1 << 3))
961
962/*
963 * NIU XG Pause Ctl Register
964 *
965 * Bit 0 : xg0_mask => 1:disable tx pause frames
966 * Bit 1 : xg0_request => 1:request single pause frame
967 * Bit 2 : xg0_on_off => 1:request is pause on, 0:off
968 * Bit 3 : xg1_mask => 1:disable tx pause frames
969 * Bit 4 : xg1_request => 1:request single pause frame
970 * Bit 5 : xg1_on_off => 1:request is pause on, 0:off
971 */
972
973/*
974 * PHY-Specific MII control/status registers.
975 */
976#define QLCNIC_NIU_GB_MII_MGMT_ADDR_AUTONEG 4
977#define QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_STATUS 17
978
979/*
980 * PHY-Specific Status Register (reg 17).
981 *
982 * Bit 0 : jabber => 1:jabber detected, 0:not
983 * Bit 1 : polarity => 1:polarity reversed, 0:normal
984 * Bit 2 : recvpause => 1:receive pause enabled, 0:disabled
985 * Bit 3 : xmitpause => 1:transmit pause enabled, 0:disabled
986 * Bit 4 : energydetect => 1:sleep, 0:active
987 * Bit 5 : downshift => 1:downshift, 0:no downshift
988 * Bit 6 : crossover => 1:MDIX (crossover), 0:MDI (no crossover)
989 * Bits 7-9 : cablelen => not valid in 10Mb/s mode
990 * 0:<50m, 1:50-80m, 2:80-110m, 3:110-140m, 4:>140m
991 * Bit 10 : link => 1:link up, 0:link down
992 * Bit 11 : resolved => 1:speed and duplex resolved, 0:not yet
993 * Bit 12 : pagercvd => 1:page received, 0:page not received
994 * Bit 13 : duplex => 1:full duplex, 0:half duplex
995 * Bits 14-15 : speed => 0:10Mb/s, 1:100Mb/s, 2:1000Mb/s, 3:rsvd
996 */
997
998#define qlcnic_get_phy_speed(config_word) (((config_word) >> 14) & 0x03)
999
1000#define qlcnic_set_phy_speed(config_word, val) \
1001 ((config_word) |= ((val & 0x03) << 14))
1002#define qlcnic_set_phy_duplex(config_word) \
1003 ((config_word) |= 1 << 13)
1004#define qlcnic_clear_phy_duplex(config_word) \
1005 ((config_word) &= ~(1 << 13))
1006
1007#define qlcnic_get_phy_link(config_word) \
1008 _qlcnic_crb_get_bit(config_word, 10)
1009#define qlcnic_get_phy_duplex(config_word) \
1010 _qlcnic_crb_get_bit(config_word, 13)
1011
1012#define QLCNIC_NIU_NON_PROMISC_MODE 0
1013#define QLCNIC_NIU_PROMISC_MODE 1
1014#define QLCNIC_NIU_ALLMULTI_MODE 2
1015
1016struct crb_128M_2M_sub_block_map {
1017 unsigned valid;
1018 unsigned start_128M;
1019 unsigned end_128M;
1020 unsigned start_2M;
1021};
1022
1023struct crb_128M_2M_block_map{
1024 struct crb_128M_2M_sub_block_map sub_block[16];
1025};
1026#endif /* __QLCNIC_HDR_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
deleted file mode 100644
index 7a6d5ebe4e0..00000000000
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ /dev/null
@@ -1,1364 +0,0 @@
1/*
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2010 QLogic Corporation
4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
6 */
7
8#include "qlcnic.h"
9#include "qlcnic_hdr.h"
10
11#include <linux/slab.h>
12#include <net/ip.h>
13#include <linux/bitops.h>
14
15#define MASK(n) ((1ULL<<(n))-1)
16#define OCM_WIN_P3P(addr) (addr & 0xffc0000)
17
18#define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
19
20#define CRB_BLK(off) ((off >> 20) & 0x3f)
21#define CRB_SUBBLK(off) ((off >> 16) & 0xf)
22#define CRB_WINDOW_2M (0x130060)
23#define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000))
24#define CRB_INDIRECT_2M (0x1e0000UL)
25
26struct qlcnic_ms_reg_ctrl {
27 u32 ocm_window;
28 u32 control;
29 u32 hi;
30 u32 low;
31 u32 rd[4];
32 u32 wd[4];
33 u64 off;
34};
35
36#ifndef readq
37static inline u64 readq(void __iomem *addr)
38{
39 return readl(addr) | (((u64) readl(addr + 4)) << 32LL);
40}
41#endif
42
43#ifndef writeq
44static inline void writeq(u64 val, void __iomem *addr)
45{
46 writel(((u32) (val)), (addr));
47 writel(((u32) (val >> 32)), (addr + 4));
48}
49#endif
50
51static struct crb_128M_2M_block_map
52crb_128M_2M_map[64] __cacheline_aligned_in_smp = {
53 {{{0, 0, 0, 0} } }, /* 0: PCI */
54 {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */
55 {1, 0x0110000, 0x0120000, 0x130000},
56 {1, 0x0120000, 0x0122000, 0x124000},
57 {1, 0x0130000, 0x0132000, 0x126000},
58 {1, 0x0140000, 0x0142000, 0x128000},
59 {1, 0x0150000, 0x0152000, 0x12a000},
60 {1, 0x0160000, 0x0170000, 0x110000},
61 {1, 0x0170000, 0x0172000, 0x12e000},
62 {0, 0x0000000, 0x0000000, 0x000000},
63 {0, 0x0000000, 0x0000000, 0x000000},
64 {0, 0x0000000, 0x0000000, 0x000000},
65 {0, 0x0000000, 0x0000000, 0x000000},
66 {0, 0x0000000, 0x0000000, 0x000000},
67 {0, 0x0000000, 0x0000000, 0x000000},
68 {1, 0x01e0000, 0x01e0800, 0x122000},
69 {0, 0x0000000, 0x0000000, 0x000000} } },
70 {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */
71 {{{0, 0, 0, 0} } }, /* 3: */
72 {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */
73 {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */
74 {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */
75 {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */
76 {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */
77 {0, 0x0000000, 0x0000000, 0x000000},
78 {0, 0x0000000, 0x0000000, 0x000000},
79 {0, 0x0000000, 0x0000000, 0x000000},
80 {0, 0x0000000, 0x0000000, 0x000000},
81 {0, 0x0000000, 0x0000000, 0x000000},
82 {0, 0x0000000, 0x0000000, 0x000000},
83 {0, 0x0000000, 0x0000000, 0x000000},
84 {0, 0x0000000, 0x0000000, 0x000000},
85 {0, 0x0000000, 0x0000000, 0x000000},
86 {0, 0x0000000, 0x0000000, 0x000000},
87 {0, 0x0000000, 0x0000000, 0x000000},
88 {0, 0x0000000, 0x0000000, 0x000000},
89 {0, 0x0000000, 0x0000000, 0x000000},
90 {0, 0x0000000, 0x0000000, 0x000000},
91 {1, 0x08f0000, 0x08f2000, 0x172000} } },
92 {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/
93 {0, 0x0000000, 0x0000000, 0x000000},
94 {0, 0x0000000, 0x0000000, 0x000000},
95 {0, 0x0000000, 0x0000000, 0x000000},
96 {0, 0x0000000, 0x0000000, 0x000000},
97 {0, 0x0000000, 0x0000000, 0x000000},
98 {0, 0x0000000, 0x0000000, 0x000000},
99 {0, 0x0000000, 0x0000000, 0x000000},
100 {0, 0x0000000, 0x0000000, 0x000000},
101 {0, 0x0000000, 0x0000000, 0x000000},
102 {0, 0x0000000, 0x0000000, 0x000000},
103 {0, 0x0000000, 0x0000000, 0x000000},
104 {0, 0x0000000, 0x0000000, 0x000000},
105 {0, 0x0000000, 0x0000000, 0x000000},
106 {0, 0x0000000, 0x0000000, 0x000000},
107 {1, 0x09f0000, 0x09f2000, 0x176000} } },
108 {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/
109 {0, 0x0000000, 0x0000000, 0x000000},
110 {0, 0x0000000, 0x0000000, 0x000000},
111 {0, 0x0000000, 0x0000000, 0x000000},
112 {0, 0x0000000, 0x0000000, 0x000000},
113 {0, 0x0000000, 0x0000000, 0x000000},
114 {0, 0x0000000, 0x0000000, 0x000000},
115 {0, 0x0000000, 0x0000000, 0x000000},
116 {0, 0x0000000, 0x0000000, 0x000000},
117 {0, 0x0000000, 0x0000000, 0x000000},
118 {0, 0x0000000, 0x0000000, 0x000000},
119 {0, 0x0000000, 0x0000000, 0x000000},
120 {0, 0x0000000, 0x0000000, 0x000000},
121 {0, 0x0000000, 0x0000000, 0x000000},
122 {0, 0x0000000, 0x0000000, 0x000000},
123 {1, 0x0af0000, 0x0af2000, 0x17a000} } },
124 {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/
125 {0, 0x0000000, 0x0000000, 0x000000},
126 {0, 0x0000000, 0x0000000, 0x000000},
127 {0, 0x0000000, 0x0000000, 0x000000},
128 {0, 0x0000000, 0x0000000, 0x000000},
129 {0, 0x0000000, 0x0000000, 0x000000},
130 {0, 0x0000000, 0x0000000, 0x000000},
131 {0, 0x0000000, 0x0000000, 0x000000},
132 {0, 0x0000000, 0x0000000, 0x000000},
133 {0, 0x0000000, 0x0000000, 0x000000},
134 {0, 0x0000000, 0x0000000, 0x000000},
135 {0, 0x0000000, 0x0000000, 0x000000},
136 {0, 0x0000000, 0x0000000, 0x000000},
137 {0, 0x0000000, 0x0000000, 0x000000},
138 {0, 0x0000000, 0x0000000, 0x000000},
139 {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
140 {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */
141 {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */
142 {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */
143 {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */
144 {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */
145 {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */
146 {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */
147 {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */
148 {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */
149 {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */
150 {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */
151 {{{0, 0, 0, 0} } }, /* 23: */
152 {{{0, 0, 0, 0} } }, /* 24: */
153 {{{0, 0, 0, 0} } }, /* 25: */
154 {{{0, 0, 0, 0} } }, /* 26: */
155 {{{0, 0, 0, 0} } }, /* 27: */
156 {{{0, 0, 0, 0} } }, /* 28: */
157 {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */
158 {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */
159 {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */
160 {{{0} } }, /* 32: PCI */
161 {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */
162 {1, 0x2110000, 0x2120000, 0x130000},
163 {1, 0x2120000, 0x2122000, 0x124000},
164 {1, 0x2130000, 0x2132000, 0x126000},
165 {1, 0x2140000, 0x2142000, 0x128000},
166 {1, 0x2150000, 0x2152000, 0x12a000},
167 {1, 0x2160000, 0x2170000, 0x110000},
168 {1, 0x2170000, 0x2172000, 0x12e000},
169 {0, 0x0000000, 0x0000000, 0x000000},
170 {0, 0x0000000, 0x0000000, 0x000000},
171 {0, 0x0000000, 0x0000000, 0x000000},
172 {0, 0x0000000, 0x0000000, 0x000000},
173 {0, 0x0000000, 0x0000000, 0x000000},
174 {0, 0x0000000, 0x0000000, 0x000000},
175 {0, 0x0000000, 0x0000000, 0x000000},
176 {0, 0x0000000, 0x0000000, 0x000000} } },
177 {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */
178 {{{0} } }, /* 35: */
179 {{{0} } }, /* 36: */
180 {{{0} } }, /* 37: */
181 {{{0} } }, /* 38: */
182 {{{0} } }, /* 39: */
183 {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */
184 {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */
185 {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */
186 {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */
187 {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */
188 {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */
189 {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */
190 {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */
191 {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */
192 {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */
193 {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */
194 {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */
195 {{{0} } }, /* 52: */
196 {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */
197 {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */
198 {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */
199 {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */
200 {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */
201 {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */
202 {{{0} } }, /* 59: I2C0 */
203 {{{0} } }, /* 60: I2C1 */
204 {{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */
205 {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */
206 {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */
207};
208
209/*
210 * top 12 bits of crb internal address (hub, agent)
211 */
212static const unsigned crb_hub_agt[64] = {
213 0,
214 QLCNIC_HW_CRB_HUB_AGT_ADR_PS,
215 QLCNIC_HW_CRB_HUB_AGT_ADR_MN,
216 QLCNIC_HW_CRB_HUB_AGT_ADR_MS,
217 0,
218 QLCNIC_HW_CRB_HUB_AGT_ADR_SRE,
219 QLCNIC_HW_CRB_HUB_AGT_ADR_NIU,
220 QLCNIC_HW_CRB_HUB_AGT_ADR_QMN,
221 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0,
222 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1,
223 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2,
224 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3,
225 QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q,
226 QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR,
227 QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB,
228 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4,
229 QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA,
230 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0,
231 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1,
232 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2,
233 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3,
234 QLCNIC_HW_CRB_HUB_AGT_ADR_PGND,
235 QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI,
236 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0,
237 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1,
238 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2,
239 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3,
240 0,
241 QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI,
242 QLCNIC_HW_CRB_HUB_AGT_ADR_SN,
243 0,
244 QLCNIC_HW_CRB_HUB_AGT_ADR_EG,
245 0,
246 QLCNIC_HW_CRB_HUB_AGT_ADR_PS,
247 QLCNIC_HW_CRB_HUB_AGT_ADR_CAM,
248 0,
249 0,
250 0,
251 0,
252 0,
253 QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR,
254 0,
255 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1,
256 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2,
257 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3,
258 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4,
259 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5,
260 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6,
261 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7,
262 QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA,
263 QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q,
264 QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB,
265 0,
266 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0,
267 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8,
268 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9,
269 QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0,
270 0,
271 QLCNIC_HW_CRB_HUB_AGT_ADR_SMB,
272 QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0,
273 QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1,
274 0,
275 QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC,
276 0,
277};
278
279static const u32 msi_tgt_status[8] = {
280 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
281 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
282 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
283 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
284};
285
286/* PCI Windowing for DDR regions. */
287
288#define QLCNIC_PCIE_SEM_TIMEOUT 10000
289
290static void qlcnic_read_window_reg(u32 addr, void __iomem *bar0, u32 *data)
291{
292 u32 dest;
293 void __iomem *val;
294
295 dest = addr & 0xFFFF0000;
296 val = bar0 + QLCNIC_FW_DUMP_REG1;
297 writel(dest, val);
298 readl(val);
299 val = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr);
300 *data = readl(val);
301}
302
303static void qlcnic_write_window_reg(u32 addr, void __iomem *bar0, u32 data)
304{
305 u32 dest;
306 void __iomem *val;
307
308 dest = addr & 0xFFFF0000;
309 val = bar0 + QLCNIC_FW_DUMP_REG1;
310 writel(dest, val);
311 readl(val);
312 val = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr);
313 writel(data, val);
314 readl(val);
315}
316
317int
318qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
319{
320 int done = 0, timeout = 0;
321
322 while (!done) {
323 done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)));
324 if (done == 1)
325 break;
326 if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) {
327 dev_err(&adapter->pdev->dev,
328 "Failed to acquire sem=%d lock; holdby=%d\n",
329 sem, id_reg ? QLCRD32(adapter, id_reg) : -1);
330 return -EIO;
331 }
332 msleep(1);
333 }
334
335 if (id_reg)
336 QLCWR32(adapter, id_reg, adapter->portnum);
337
338 return 0;
339}
340
341void
342qlcnic_pcie_sem_unlock(struct qlcnic_adapter *adapter, int sem)
343{
344 QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem)));
345}
346
347static int qlcnic_ind_rd(struct qlcnic_adapter *adapter, u32 addr)
348{
349 u32 data;
350
351 if (qlcnic_82xx_check(adapter))
352 qlcnic_read_window_reg(addr, adapter->ahw->pci_base0, &data);
353 else
354 return -EIO;
355 return data;
356}
357
358static void qlcnic_ind_wr(struct qlcnic_adapter *adapter, u32 addr, u32 data)
359{
360 if (qlcnic_82xx_check(adapter))
361 qlcnic_write_window_reg(addr, adapter->ahw->pci_base0, data);
362}
363
364static int
365qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
366 struct cmd_desc_type0 *cmd_desc_arr, int nr_desc)
367{
368 u32 i, producer;
369 struct qlcnic_cmd_buffer *pbuf;
370 struct cmd_desc_type0 *cmd_desc;
371 struct qlcnic_host_tx_ring *tx_ring;
372
373 i = 0;
374
375 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
376 return -EIO;
377
378 tx_ring = adapter->tx_ring;
379 __netif_tx_lock_bh(tx_ring->txq);
380
381 producer = tx_ring->producer;
382
383 if (nr_desc >= qlcnic_tx_avail(tx_ring)) {
384 netif_tx_stop_queue(tx_ring->txq);
385 smp_mb();
386 if (qlcnic_tx_avail(tx_ring) > nr_desc) {
387 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
388 netif_tx_wake_queue(tx_ring->txq);
389 } else {
390 adapter->stats.xmit_off++;
391 __netif_tx_unlock_bh(tx_ring->txq);
392 return -EBUSY;
393 }
394 }
395
396 do {
397 cmd_desc = &cmd_desc_arr[i];
398
399 pbuf = &tx_ring->cmd_buf_arr[producer];
400 pbuf->skb = NULL;
401 pbuf->frag_count = 0;
402
403 memcpy(&tx_ring->desc_head[producer],
404 cmd_desc, sizeof(struct cmd_desc_type0));
405
406 producer = get_next_index(producer, tx_ring->num_desc);
407 i++;
408
409 } while (i != nr_desc);
410
411 tx_ring->producer = producer;
412
413 qlcnic_update_cmd_producer(tx_ring);
414
415 __netif_tx_unlock_bh(tx_ring->txq);
416
417 return 0;
418}
419
420static int
421qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
422 __le16 vlan_id, unsigned op)
423{
424 struct qlcnic_nic_req req;
425 struct qlcnic_mac_req *mac_req;
426 struct qlcnic_vlan_req *vlan_req;
427 u64 word;
428
429 memset(&req, 0, sizeof(struct qlcnic_nic_req));
430 req.qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
431
432 word = QLCNIC_MAC_EVENT | ((u64)adapter->portnum << 16);
433 req.req_hdr = cpu_to_le64(word);
434
435 mac_req = (struct qlcnic_mac_req *)&req.words[0];
436 mac_req->op = op;
437 memcpy(mac_req->mac_addr, addr, 6);
438
439 vlan_req = (struct qlcnic_vlan_req *)&req.words[1];
440 vlan_req->vlan_id = vlan_id;
441
442 return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
443}
444
445static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr)
446{
447 struct list_head *head;
448 struct qlcnic_mac_list_s *cur;
449
450 /* look up if already exists */
451 list_for_each(head, &adapter->mac_list) {
452 cur = list_entry(head, struct qlcnic_mac_list_s, list);
453 if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0)
454 return 0;
455 }
456
457 cur = kzalloc(sizeof(struct qlcnic_mac_list_s), GFP_ATOMIC);
458 if (cur == NULL) {
459 dev_err(&adapter->netdev->dev,
460 "failed to add mac address filter\n");
461 return -ENOMEM;
462 }
463 memcpy(cur->mac_addr, addr, ETH_ALEN);
464
465 if (qlcnic_sre_macaddr_change(adapter,
466 cur->mac_addr, 0, QLCNIC_MAC_ADD)) {
467 kfree(cur);
468 return -EIO;
469 }
470
471 list_add_tail(&cur->list, &adapter->mac_list);
472 return 0;
473}
474
475void qlcnic_set_multi(struct net_device *netdev)
476{
477 struct qlcnic_adapter *adapter = netdev_priv(netdev);
478 struct netdev_hw_addr *ha;
479 static const u8 bcast_addr[ETH_ALEN] = {
480 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
481 };
482 u32 mode = VPORT_MISS_MODE_DROP;
483
484 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
485 return;
486
487 qlcnic_nic_add_mac(adapter, adapter->mac_addr);
488 qlcnic_nic_add_mac(adapter, bcast_addr);
489
490 if (netdev->flags & IFF_PROMISC) {
491 if (!(adapter->flags & QLCNIC_PROMISC_DISABLED))
492 mode = VPORT_MISS_MODE_ACCEPT_ALL;
493 goto send_fw_cmd;
494 }
495
496 if ((netdev->flags & IFF_ALLMULTI) ||
497 (netdev_mc_count(netdev) > adapter->ahw->max_mc_count)) {
498 mode = VPORT_MISS_MODE_ACCEPT_MULTI;
499 goto send_fw_cmd;
500 }
501
502 if (!netdev_mc_empty(netdev)) {
503 netdev_for_each_mc_addr(ha, netdev) {
504 qlcnic_nic_add_mac(adapter, ha->addr);
505 }
506 }
507
508send_fw_cmd:
509 if (mode == VPORT_MISS_MODE_ACCEPT_ALL) {
510 qlcnic_alloc_lb_filters_mem(adapter);
511 adapter->mac_learn = 1;
512 } else {
513 adapter->mac_learn = 0;
514 }
515
516 qlcnic_nic_set_promisc(adapter, mode);
517}
518
519int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
520{
521 struct qlcnic_nic_req req;
522 u64 word;
523
524 memset(&req, 0, sizeof(struct qlcnic_nic_req));
525
526 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
527
528 word = QLCNIC_H2C_OPCODE_SET_MAC_RECEIVE_MODE |
529 ((u64)adapter->portnum << 16);
530 req.req_hdr = cpu_to_le64(word);
531
532 req.words[0] = cpu_to_le64(mode);
533
534 return qlcnic_send_cmd_descs(adapter,
535 (struct cmd_desc_type0 *)&req, 1);
536}
537
538void qlcnic_free_mac_list(struct qlcnic_adapter *adapter)
539{
540 struct qlcnic_mac_list_s *cur;
541 struct list_head *head = &adapter->mac_list;
542
543 while (!list_empty(head)) {
544 cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
545 qlcnic_sre_macaddr_change(adapter,
546 cur->mac_addr, 0, QLCNIC_MAC_DEL);
547 list_del(&cur->list);
548 kfree(cur);
549 }
550}
551
552void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
553{
554 struct qlcnic_filter *tmp_fil;
555 struct hlist_node *tmp_hnode, *n;
556 struct hlist_head *head;
557 int i;
558
559 for (i = 0; i < adapter->fhash.fmax; i++) {
560 head = &(adapter->fhash.fhead[i]);
561
562 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode)
563 {
564 if (jiffies >
565 (QLCNIC_FILTER_AGE * HZ + tmp_fil->ftime)) {
566 qlcnic_sre_macaddr_change(adapter,
567 tmp_fil->faddr, tmp_fil->vlan_id,
568 tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
569 QLCNIC_MAC_DEL);
570 spin_lock_bh(&adapter->mac_learn_lock);
571 adapter->fhash.fnum--;
572 hlist_del(&tmp_fil->fnode);
573 spin_unlock_bh(&adapter->mac_learn_lock);
574 kfree(tmp_fil);
575 }
576 }
577 }
578}
579
580void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter)
581{
582 struct qlcnic_filter *tmp_fil;
583 struct hlist_node *tmp_hnode, *n;
584 struct hlist_head *head;
585 int i;
586
587 for (i = 0; i < adapter->fhash.fmax; i++) {
588 head = &(adapter->fhash.fhead[i]);
589
590 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
591 qlcnic_sre_macaddr_change(adapter, tmp_fil->faddr,
592 tmp_fil->vlan_id, tmp_fil->vlan_id ?
593 QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL);
594 spin_lock_bh(&adapter->mac_learn_lock);
595 adapter->fhash.fnum--;
596 hlist_del(&tmp_fil->fnode);
597 spin_unlock_bh(&adapter->mac_learn_lock);
598 kfree(tmp_fil);
599 }
600 }
601}
602
603static int qlcnic_set_fw_loopback(struct qlcnic_adapter *adapter, u8 flag)
604{
605 struct qlcnic_nic_req req;
606 int rv;
607
608 memset(&req, 0, sizeof(struct qlcnic_nic_req));
609
610 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
611 req.req_hdr = cpu_to_le64(QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK |
612 ((u64) adapter->portnum << 16) | ((u64) 0x1 << 32));
613
614 req.words[0] = cpu_to_le64(flag);
615
616 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
617 if (rv != 0)
618 dev_err(&adapter->pdev->dev, "%sting loopback mode failed\n",
619 flag ? "Set" : "Reset");
620 return rv;
621}
622
623int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
624{
625 if (qlcnic_set_fw_loopback(adapter, mode))
626 return -EIO;
627
628 if (qlcnic_nic_set_promisc(adapter, VPORT_MISS_MODE_ACCEPT_ALL)) {
629 qlcnic_set_fw_loopback(adapter, 0);
630 return -EIO;
631 }
632
633 msleep(1000);
634 return 0;
635}
636
637void qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter)
638{
639 int mode = VPORT_MISS_MODE_DROP;
640 struct net_device *netdev = adapter->netdev;
641
642 qlcnic_set_fw_loopback(adapter, 0);
643
644 if (netdev->flags & IFF_PROMISC)
645 mode = VPORT_MISS_MODE_ACCEPT_ALL;
646 else if (netdev->flags & IFF_ALLMULTI)
647 mode = VPORT_MISS_MODE_ACCEPT_MULTI;
648
649 qlcnic_nic_set_promisc(adapter, mode);
650 msleep(1000);
651}
652
653/*
654 * Send the interrupt coalescing parameter set by ethtool to the card.
655 */
656int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter)
657{
658 struct qlcnic_nic_req req;
659 int rv;
660
661 memset(&req, 0, sizeof(struct qlcnic_nic_req));
662
663 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
664
665 req.req_hdr = cpu_to_le64(QLCNIC_CONFIG_INTR_COALESCE |
666 ((u64) adapter->portnum << 16));
667
668 req.words[0] = cpu_to_le64(((u64) adapter->ahw->coal.flag) << 32);
669 req.words[2] = cpu_to_le64(adapter->ahw->coal.rx_packets |
670 ((u64) adapter->ahw->coal.rx_time_us) << 16);
671 req.words[5] = cpu_to_le64(adapter->ahw->coal.timer_out |
672 ((u64) adapter->ahw->coal.type) << 32 |
673 ((u64) adapter->ahw->coal.sts_ring_mask) << 40);
674 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
675 if (rv != 0)
676 dev_err(&adapter->netdev->dev,
677 "Could not send interrupt coalescing parameters\n");
678 return rv;
679}
680
681int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
682{
683 struct qlcnic_nic_req req;
684 u64 word;
685 int rv;
686
687 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
688 return 0;
689
690 memset(&req, 0, sizeof(struct qlcnic_nic_req));
691
692 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
693
694 word = QLCNIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16);
695 req.req_hdr = cpu_to_le64(word);
696
697 req.words[0] = cpu_to_le64(enable);
698
699 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
700 if (rv != 0)
701 dev_err(&adapter->netdev->dev,
702 "Could not send configure hw lro request\n");
703
704 return rv;
705}
706
707int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
708{
709 struct qlcnic_nic_req req;
710 u64 word;
711 int rv;
712
713 if (!!(adapter->flags & QLCNIC_BRIDGE_ENABLED) == enable)
714 return 0;
715
716 memset(&req, 0, sizeof(struct qlcnic_nic_req));
717
718 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
719
720 word = QLCNIC_H2C_OPCODE_CONFIG_BRIDGING |
721 ((u64)adapter->portnum << 16);
722 req.req_hdr = cpu_to_le64(word);
723
724 req.words[0] = cpu_to_le64(enable);
725
726 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
727 if (rv != 0)
728 dev_err(&adapter->netdev->dev,
729 "Could not send configure bridge mode request\n");
730
731 adapter->flags ^= QLCNIC_BRIDGE_ENABLED;
732
733 return rv;
734}
735
736
737#define RSS_HASHTYPE_IP_TCP 0x3
738
739int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
740{
741 struct qlcnic_nic_req req;
742 u64 word;
743 int i, rv;
744
745 static const u64 key[] = {
746 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
747 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
748 0x255b0ec26d5a56daULL
749 };
750
751 memset(&req, 0, sizeof(struct qlcnic_nic_req));
752 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
753
754 word = QLCNIC_H2C_OPCODE_CONFIG_RSS | ((u64)adapter->portnum << 16);
755 req.req_hdr = cpu_to_le64(word);
756
757 /*
758 * RSS request:
759 * bits 3-0: hash_method
760 * 5-4: hash_type_ipv4
761 * 7-6: hash_type_ipv6
762 * 8: enable
763 * 9: use indirection table
764 * 47-10: reserved
765 * 63-48: indirection table mask
766 */
767 word = ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) |
768 ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) |
769 ((u64)(enable & 0x1) << 8) |
770 ((0x7ULL) << 48);
771 req.words[0] = cpu_to_le64(word);
772 for (i = 0; i < 5; i++)
773 req.words[i+1] = cpu_to_le64(key[i]);
774
775 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
776 if (rv != 0)
777 dev_err(&adapter->netdev->dev, "could not configure RSS\n");
778
779 return rv;
780}
781
782int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, int cmd)
783{
784 struct qlcnic_nic_req req;
785 struct qlcnic_ipaddr *ipa;
786 u64 word;
787 int rv;
788
789 memset(&req, 0, sizeof(struct qlcnic_nic_req));
790 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
791
792 word = QLCNIC_H2C_OPCODE_CONFIG_IPADDR | ((u64)adapter->portnum << 16);
793 req.req_hdr = cpu_to_le64(word);
794
795 req.words[0] = cpu_to_le64(cmd);
796 ipa = (struct qlcnic_ipaddr *)&req.words[1];
797 ipa->ipv4 = ip;
798
799 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
800 if (rv != 0)
801 dev_err(&adapter->netdev->dev,
802 "could not notify %s IP 0x%x reuqest\n",
803 (cmd == QLCNIC_IP_UP) ? "Add" : "Remove", ip);
804
805 return rv;
806}
807
808int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable)
809{
810 struct qlcnic_nic_req req;
811 u64 word;
812 int rv;
813
814 memset(&req, 0, sizeof(struct qlcnic_nic_req));
815 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
816
817 word = QLCNIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16);
818 req.req_hdr = cpu_to_le64(word);
819 req.words[0] = cpu_to_le64(enable | (enable << 8));
820
821 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
822 if (rv != 0)
823 dev_err(&adapter->netdev->dev,
824 "could not configure link notification\n");
825
826 return rv;
827}
828
829int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter)
830{
831 struct qlcnic_nic_req req;
832 u64 word;
833 int rv;
834
835 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
836 return 0;
837
838 memset(&req, 0, sizeof(struct qlcnic_nic_req));
839 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
840
841 word = QLCNIC_H2C_OPCODE_LRO_REQUEST |
842 ((u64)adapter->portnum << 16) |
843 ((u64)QLCNIC_LRO_REQUEST_CLEANUP << 56) ;
844
845 req.req_hdr = cpu_to_le64(word);
846
847 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
848 if (rv != 0)
849 dev_err(&adapter->netdev->dev,
850 "could not cleanup lro flows\n");
851
852 return rv;
853}
854
855/*
856 * qlcnic_change_mtu - Change the Maximum Transfer Unit
857 * @returns 0 on success, negative on failure
858 */
859
860int qlcnic_change_mtu(struct net_device *netdev, int mtu)
861{
862 struct qlcnic_adapter *adapter = netdev_priv(netdev);
863 int rc = 0;
864
865 if (mtu < P3P_MIN_MTU || mtu > P3P_MAX_MTU) {
866 dev_err(&adapter->netdev->dev, "%d bytes < mtu < %d bytes"
867 " not supported\n", P3P_MAX_MTU, P3P_MIN_MTU);
868 return -EINVAL;
869 }
870
871 rc = qlcnic_fw_cmd_set_mtu(adapter, mtu);
872
873 if (!rc)
874 netdev->mtu = mtu;
875
876 return rc;
877}
878
879
880netdev_features_t qlcnic_fix_features(struct net_device *netdev,
881 netdev_features_t features)
882{
883 struct qlcnic_adapter *adapter = netdev_priv(netdev);
884
885 if ((adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
886 netdev_features_t changed = features ^ netdev->features;
887 features ^= changed & (NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
888 }
889
890 if (!(features & NETIF_F_RXCSUM))
891 features &= ~NETIF_F_LRO;
892
893 return features;
894}
895
896
897int qlcnic_set_features(struct net_device *netdev, netdev_features_t features)
898{
899 struct qlcnic_adapter *adapter = netdev_priv(netdev);
900 netdev_features_t changed = netdev->features ^ features;
901 int hw_lro = (features & NETIF_F_LRO) ? QLCNIC_LRO_ENABLED : 0;
902
903 if (!(changed & NETIF_F_LRO))
904 return 0;
905
906 netdev->features = features ^ NETIF_F_LRO;
907
908 if (qlcnic_config_hw_lro(adapter, hw_lro))
909 return -EIO;
910
911 if ((hw_lro == 0) && qlcnic_send_lro_cleanup(adapter))
912 return -EIO;
913
914 return 0;
915}
916
917/*
918 * Changes the CRB window to the specified window.
919 */
920 /* Returns < 0 if off is not valid,
921 * 1 if window access is needed. 'off' is set to offset from
922 * CRB space in 128M pci map
923 * 0 if no window access is needed. 'off' is set to 2M addr
924 * In: 'off' is offset from base in 128M pci map
925 */
926static int qlcnic_pci_get_crb_addr_2M(struct qlcnic_hardware_context *ahw,
927 ulong off, void __iomem **addr)
928{
929 const struct crb_128M_2M_sub_block_map *m;
930
931 if ((off >= QLCNIC_CRB_MAX) || (off < QLCNIC_PCI_CRBSPACE))
932 return -EINVAL;
933
934 off -= QLCNIC_PCI_CRBSPACE;
935
936 /*
937 * Try direct map
938 */
939 m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)];
940
941 if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) {
942 *addr = ahw->pci_base0 + m->start_2M +
943 (off - m->start_128M);
944 return 0;
945 }
946
947 /*
948 * Not in direct map, use crb window
949 */
950 *addr = ahw->pci_base0 + CRB_INDIRECT_2M + (off & MASK(16));
951 return 1;
952}
953
954/*
955 * In: 'off' is offset from CRB space in 128M pci map
956 * Out: 'off' is 2M pci map addr
957 * side effect: lock crb window
958 */
959static int
960qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
961{
962 u32 window;
963 void __iomem *addr = adapter->ahw->pci_base0 + CRB_WINDOW_2M;
964
965 off -= QLCNIC_PCI_CRBSPACE;
966
967 window = CRB_HI(off);
968 if (window == 0) {
969 dev_err(&adapter->pdev->dev, "Invalid offset 0x%lx\n", off);
970 return -EIO;
971 }
972
973 writel(window, addr);
974 if (readl(addr) != window) {
975 if (printk_ratelimit())
976 dev_warn(&adapter->pdev->dev,
977 "failed to set CRB window to %d off 0x%lx\n",
978 window, off);
979 return -EIO;
980 }
981 return 0;
982}
983
984int
985qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data)
986{
987 unsigned long flags;
988 int rv;
989 void __iomem *addr = NULL;
990
991 rv = qlcnic_pci_get_crb_addr_2M(adapter->ahw, off, &addr);
992
993 if (rv == 0) {
994 writel(data, addr);
995 return 0;
996 }
997
998 if (rv > 0) {
999 /* indirect access */
1000 write_lock_irqsave(&adapter->ahw->crb_lock, flags);
1001 crb_win_lock(adapter);
1002 rv = qlcnic_pci_set_crbwindow_2M(adapter, off);
1003 if (!rv)
1004 writel(data, addr);
1005 crb_win_unlock(adapter);
1006 write_unlock_irqrestore(&adapter->ahw->crb_lock, flags);
1007 return rv;
1008 }
1009
1010 dev_err(&adapter->pdev->dev,
1011 "%s: invalid offset: 0x%016lx\n", __func__, off);
1012 dump_stack();
1013 return -EIO;
1014}
1015
1016int qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
1017{
1018 unsigned long flags;
1019 int rv;
1020 u32 data = -1;
1021 void __iomem *addr = NULL;
1022
1023 rv = qlcnic_pci_get_crb_addr_2M(adapter->ahw, off, &addr);
1024
1025 if (rv == 0)
1026 return readl(addr);
1027
1028 if (rv > 0) {
1029 /* indirect access */
1030 write_lock_irqsave(&adapter->ahw->crb_lock, flags);
1031 crb_win_lock(adapter);
1032 if (!qlcnic_pci_set_crbwindow_2M(adapter, off))
1033 data = readl(addr);
1034 crb_win_unlock(adapter);
1035 write_unlock_irqrestore(&adapter->ahw->crb_lock, flags);
1036 return data;
1037 }
1038
1039 dev_err(&adapter->pdev->dev,
1040 "%s: invalid offset: 0x%016lx\n", __func__, off);
1041 dump_stack();
1042 return -1;
1043}
1044
1045
1046void __iomem *qlcnic_get_ioaddr(struct qlcnic_hardware_context *ahw,
1047 u32 offset)
1048{
1049 void __iomem *addr = NULL;
1050
1051 WARN_ON(qlcnic_pci_get_crb_addr_2M(ahw, offset, &addr));
1052
1053 return addr;
1054}
1055
1056static int qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter,
1057 u32 window, u64 off, u64 *data, int op)
1058{
1059 void __iomem *addr;
1060 u32 start;
1061
1062 mutex_lock(&adapter->ahw->mem_lock);
1063
1064 writel(window, adapter->ahw->ocm_win_crb);
1065 /* read back to flush */
1066 readl(adapter->ahw->ocm_win_crb);
1067 start = QLCNIC_PCI_OCM0_2M + off;
1068
1069 addr = adapter->ahw->pci_base0 + start;
1070
1071 if (op == 0) /* read */
1072 *data = readq(addr);
1073 else /* write */
1074 writeq(*data, addr);
1075
1076 /* Set window to 0 */
1077 writel(0, adapter->ahw->ocm_win_crb);
1078 readl(adapter->ahw->ocm_win_crb);
1079
1080 mutex_unlock(&adapter->ahw->mem_lock);
1081 return 0;
1082}
1083
1084void
1085qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
1086{
1087 void __iomem *addr = adapter->ahw->pci_base0 +
1088 QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM);
1089
1090 mutex_lock(&adapter->ahw->mem_lock);
1091 *data = readq(addr);
1092 mutex_unlock(&adapter->ahw->mem_lock);
1093}
1094
1095void
1096qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data)
1097{
1098 void __iomem *addr = adapter->ahw->pci_base0 +
1099 QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM);
1100
1101 mutex_lock(&adapter->ahw->mem_lock);
1102 writeq(data, addr);
1103 mutex_unlock(&adapter->ahw->mem_lock);
1104}
1105
1106
1107
1108/* Set MS memory control data for different adapters */
1109static void qlcnic_set_ms_controls(struct qlcnic_adapter *adapter, u64 off,
1110 struct qlcnic_ms_reg_ctrl *ms)
1111{
1112 ms->control = QLCNIC_MS_CTRL;
1113 ms->low = QLCNIC_MS_ADDR_LO;
1114 ms->hi = QLCNIC_MS_ADDR_HI;
1115 if (off & 0xf) {
1116 ms->wd[0] = QLCNIC_MS_WRTDATA_LO;
1117 ms->rd[0] = QLCNIC_MS_RDDATA_LO;
1118 ms->wd[1] = QLCNIC_MS_WRTDATA_HI;
1119 ms->rd[1] = QLCNIC_MS_RDDATA_HI;
1120 ms->wd[2] = QLCNIC_MS_WRTDATA_ULO;
1121 ms->wd[3] = QLCNIC_MS_WRTDATA_UHI;
1122 ms->rd[2] = QLCNIC_MS_RDDATA_ULO;
1123 ms->rd[3] = QLCNIC_MS_RDDATA_UHI;
1124 } else {
1125 ms->wd[0] = QLCNIC_MS_WRTDATA_ULO;
1126 ms->rd[0] = QLCNIC_MS_RDDATA_ULO;
1127 ms->wd[1] = QLCNIC_MS_WRTDATA_UHI;
1128 ms->rd[1] = QLCNIC_MS_RDDATA_UHI;
1129 ms->wd[2] = QLCNIC_MS_WRTDATA_LO;
1130 ms->wd[3] = QLCNIC_MS_WRTDATA_HI;
1131 ms->rd[2] = QLCNIC_MS_RDDATA_LO;
1132 ms->rd[3] = QLCNIC_MS_RDDATA_HI;
1133 }
1134
1135 ms->ocm_window = OCM_WIN_P3P(off);
1136 ms->off = GET_MEM_OFFS_2M(off);
1137}
1138
1139int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data)
1140{
1141 int j, ret = 0;
1142 u32 temp, off8;
1143 struct qlcnic_ms_reg_ctrl ms;
1144
1145 /* Only 64-bit aligned access */
1146 if (off & 7)
1147 return -EIO;
1148
1149 memset(&ms, 0, sizeof(struct qlcnic_ms_reg_ctrl));
1150 if (!(ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
1151 QLCNIC_ADDR_QDR_NET_MAX) ||
1152 ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET,
1153 QLCNIC_ADDR_DDR_NET_MAX)))
1154 return -EIO;
1155
1156 qlcnic_set_ms_controls(adapter, off, &ms);
1157
1158 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX))
1159 return qlcnic_pci_mem_access_direct(adapter, ms.ocm_window,
1160 ms.off, &data, 1);
1161
1162 off8 = off & ~0xf;
1163
1164 mutex_lock(&adapter->ahw->mem_lock);
1165
1166 qlcnic_ind_wr(adapter, ms.low, off8);
1167 qlcnic_ind_wr(adapter, ms.hi, 0);
1168
1169 qlcnic_ind_wr(adapter, ms.control, TA_CTL_ENABLE);
1170 qlcnic_ind_wr(adapter, ms.control, QLCNIC_TA_START_ENABLE);
1171
1172 for (j = 0; j < MAX_CTL_CHECK; j++) {
1173 temp = qlcnic_ind_rd(adapter, ms.control);
1174 if ((temp & TA_CTL_BUSY) == 0)
1175 break;
1176 }
1177
1178 if (j >= MAX_CTL_CHECK) {
1179 ret = -EIO;
1180 goto done;
1181 }
1182
1183 /* This is the modify part of read-modify-write */
1184 qlcnic_ind_wr(adapter, ms.wd[0], qlcnic_ind_rd(adapter, ms.rd[0]));
1185 qlcnic_ind_wr(adapter, ms.wd[1], qlcnic_ind_rd(adapter, ms.rd[1]));
1186 /* This is the write part of read-modify-write */
1187 qlcnic_ind_wr(adapter, ms.wd[2], data & 0xffffffff);
1188 qlcnic_ind_wr(adapter, ms.wd[3], (data >> 32) & 0xffffffff);
1189
1190 qlcnic_ind_wr(adapter, ms.control, QLCNIC_TA_WRITE_ENABLE);
1191 qlcnic_ind_wr(adapter, ms.control, QLCNIC_TA_WRITE_START);
1192
1193 for (j = 0; j < MAX_CTL_CHECK; j++) {
1194 temp = qlcnic_ind_rd(adapter, ms.control);
1195 if ((temp & TA_CTL_BUSY) == 0)
1196 break;
1197 }
1198
1199 if (j >= MAX_CTL_CHECK) {
1200 if (printk_ratelimit())
1201 dev_err(&adapter->pdev->dev,
1202 "failed to write through agent\n");
1203 ret = -EIO;
1204 } else
1205 ret = 0;
1206
1207done:
1208 mutex_unlock(&adapter->ahw->mem_lock);
1209
1210 return ret;
1211}
1212
1213int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
1214{
1215 int j, ret;
1216 u32 temp, off8;
1217 u64 val;
1218 struct qlcnic_ms_reg_ctrl ms;
1219
1220 /* Only 64-bit aligned access */
1221 if (off & 7)
1222 return -EIO;
1223 if (!(ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
1224 QLCNIC_ADDR_QDR_NET_MAX) ||
1225 ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET,
1226 QLCNIC_ADDR_DDR_NET_MAX)))
1227 return -EIO;
1228
1229 memset(&ms, 0, sizeof(struct qlcnic_ms_reg_ctrl));
1230 qlcnic_set_ms_controls(adapter, off, &ms);
1231
1232 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX))
1233 return qlcnic_pci_mem_access_direct(adapter, ms.ocm_window,
1234 ms.off, data, 0);
1235
1236 mutex_lock(&adapter->ahw->mem_lock);
1237
1238 off8 = off & ~0xf;
1239
1240 qlcnic_ind_wr(adapter, ms.low, off8);
1241 qlcnic_ind_wr(adapter, ms.hi, 0);
1242
1243 qlcnic_ind_wr(adapter, ms.control, TA_CTL_ENABLE);
1244 qlcnic_ind_wr(adapter, ms.control, QLCNIC_TA_START_ENABLE);
1245
1246 for (j = 0; j < MAX_CTL_CHECK; j++) {
1247 temp = qlcnic_ind_rd(adapter, ms.control);
1248 if ((temp & TA_CTL_BUSY) == 0)
1249 break;
1250 }
1251
1252 if (j >= MAX_CTL_CHECK) {
1253 if (printk_ratelimit())
1254 dev_err(&adapter->pdev->dev,
1255 "failed to read through agent\n");
1256 ret = -EIO;
1257 } else {
1258
1259 temp = qlcnic_ind_rd(adapter, ms.rd[3]);
1260 val = (u64)temp << 32;
1261 val |= qlcnic_ind_rd(adapter, ms.rd[2]);
1262 *data = val;
1263 ret = 0;
1264 }
1265
1266 mutex_unlock(&adapter->ahw->mem_lock);
1267
1268 return ret;
1269}
1270
1271int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
1272{
1273 int offset, board_type, magic;
1274 struct pci_dev *pdev = adapter->pdev;
1275
1276 offset = QLCNIC_FW_MAGIC_OFFSET;
1277 if (qlcnic_rom_fast_read(adapter, offset, &magic))
1278 return -EIO;
1279
1280 if (magic != QLCNIC_BDINFO_MAGIC) {
1281 dev_err(&pdev->dev, "invalid board config, magic=%08x\n",
1282 magic);
1283 return -EIO;
1284 }
1285
1286 offset = QLCNIC_BRDTYPE_OFFSET;
1287 if (qlcnic_rom_fast_read(adapter, offset, &board_type))
1288 return -EIO;
1289
1290 adapter->ahw->board_type = board_type;
1291
1292 if (board_type == QLCNIC_BRDTYPE_P3P_4_GB_MM) {
1293 u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I);
1294 if ((gpio & 0x8000) == 0)
1295 board_type = QLCNIC_BRDTYPE_P3P_10G_TP;
1296 }
1297
1298 switch (board_type) {
1299 case QLCNIC_BRDTYPE_P3P_HMEZ:
1300 case QLCNIC_BRDTYPE_P3P_XG_LOM:
1301 case QLCNIC_BRDTYPE_P3P_10G_CX4:
1302 case QLCNIC_BRDTYPE_P3P_10G_CX4_LP:
1303 case QLCNIC_BRDTYPE_P3P_IMEZ:
1304 case QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS:
1305 case QLCNIC_BRDTYPE_P3P_10G_SFP_CT:
1306 case QLCNIC_BRDTYPE_P3P_10G_SFP_QT:
1307 case QLCNIC_BRDTYPE_P3P_10G_XFP:
1308 case QLCNIC_BRDTYPE_P3P_10000_BASE_T:
1309 adapter->ahw->port_type = QLCNIC_XGBE;
1310 break;
1311 case QLCNIC_BRDTYPE_P3P_REF_QG:
1312 case QLCNIC_BRDTYPE_P3P_4_GB:
1313 case QLCNIC_BRDTYPE_P3P_4_GB_MM:
1314 adapter->ahw->port_type = QLCNIC_GBE;
1315 break;
1316 case QLCNIC_BRDTYPE_P3P_10G_TP:
1317 adapter->ahw->port_type = (adapter->portnum < 2) ?
1318 QLCNIC_XGBE : QLCNIC_GBE;
1319 break;
1320 default:
1321 dev_err(&pdev->dev, "unknown board type %x\n", board_type);
1322 adapter->ahw->port_type = QLCNIC_XGBE;
1323 break;
1324 }
1325
1326 return 0;
1327}
1328
1329int
1330qlcnic_wol_supported(struct qlcnic_adapter *adapter)
1331{
1332 u32 wol_cfg;
1333
1334 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
1335 if (wol_cfg & (1UL << adapter->portnum)) {
1336 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
1337 if (wol_cfg & (1 << adapter->portnum))
1338 return 1;
1339 }
1340
1341 return 0;
1342}
1343
1344int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
1345{
1346 struct qlcnic_nic_req req;
1347 int rv;
1348 u64 word;
1349
1350 memset(&req, 0, sizeof(struct qlcnic_nic_req));
1351 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
1352
1353 word = QLCNIC_H2C_OPCODE_CONFIG_LED | ((u64)adapter->portnum << 16);
1354 req.req_hdr = cpu_to_le64(word);
1355
1356 req.words[0] = cpu_to_le64((u64)rate << 32);
1357 req.words[1] = cpu_to_le64(state);
1358
1359 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
1360 if (rv)
1361 dev_err(&adapter->pdev->dev, "LED configuration failed.\n");
1362
1363 return rv;
1364}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
deleted file mode 100644
index de79cde233d..00000000000
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+++ /dev/null
@@ -1,1332 +0,0 @@
1/*
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2010 QLogic Corporation
4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
6 */
7
8#include <linux/netdevice.h>
9#include <linux/delay.h>
10#include <linux/slab.h>
11#include <linux/if_vlan.h>
12#include "qlcnic.h"
13
14struct crb_addr_pair {
15 u32 addr;
16 u32 data;
17};
18
19#define QLCNIC_MAX_CRB_XFORM 60
20static unsigned int crb_addr_xform[QLCNIC_MAX_CRB_XFORM];
21
22#define crb_addr_transform(name) \
23 (crb_addr_xform[QLCNIC_HW_PX_MAP_CRB_##name] = \
24 QLCNIC_HW_CRB_HUB_AGT_ADR_##name << 20)
25
26#define QLCNIC_ADDR_ERROR (0xffffffff)
27
28static int
29qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter);
30
31static void crb_addr_transform_setup(void)
32{
33 crb_addr_transform(XDMA);
34 crb_addr_transform(TIMR);
35 crb_addr_transform(SRE);
36 crb_addr_transform(SQN3);
37 crb_addr_transform(SQN2);
38 crb_addr_transform(SQN1);
39 crb_addr_transform(SQN0);
40 crb_addr_transform(SQS3);
41 crb_addr_transform(SQS2);
42 crb_addr_transform(SQS1);
43 crb_addr_transform(SQS0);
44 crb_addr_transform(RPMX7);
45 crb_addr_transform(RPMX6);
46 crb_addr_transform(RPMX5);
47 crb_addr_transform(RPMX4);
48 crb_addr_transform(RPMX3);
49 crb_addr_transform(RPMX2);
50 crb_addr_transform(RPMX1);
51 crb_addr_transform(RPMX0);
52 crb_addr_transform(ROMUSB);
53 crb_addr_transform(SN);
54 crb_addr_transform(QMN);
55 crb_addr_transform(QMS);
56 crb_addr_transform(PGNI);
57 crb_addr_transform(PGND);
58 crb_addr_transform(PGN3);
59 crb_addr_transform(PGN2);
60 crb_addr_transform(PGN1);
61 crb_addr_transform(PGN0);
62 crb_addr_transform(PGSI);
63 crb_addr_transform(PGSD);
64 crb_addr_transform(PGS3);
65 crb_addr_transform(PGS2);
66 crb_addr_transform(PGS1);
67 crb_addr_transform(PGS0);
68 crb_addr_transform(PS);
69 crb_addr_transform(PH);
70 crb_addr_transform(NIU);
71 crb_addr_transform(I2Q);
72 crb_addr_transform(EG);
73 crb_addr_transform(MN);
74 crb_addr_transform(MS);
75 crb_addr_transform(CAS2);
76 crb_addr_transform(CAS1);
77 crb_addr_transform(CAS0);
78 crb_addr_transform(CAM);
79 crb_addr_transform(C2C1);
80 crb_addr_transform(C2C0);
81 crb_addr_transform(SMB);
82 crb_addr_transform(OCM0);
83 crb_addr_transform(I2C0);
84}
85
86void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter)
87{
88 struct qlcnic_recv_context *recv_ctx;
89 struct qlcnic_host_rds_ring *rds_ring;
90 struct qlcnic_rx_buffer *rx_buf;
91 int i, ring;
92
93 recv_ctx = adapter->recv_ctx;
94 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
95 rds_ring = &recv_ctx->rds_rings[ring];
96 for (i = 0; i < rds_ring->num_desc; ++i) {
97 rx_buf = &(rds_ring->rx_buf_arr[i]);
98 if (rx_buf->skb == NULL)
99 continue;
100
101 pci_unmap_single(adapter->pdev,
102 rx_buf->dma,
103 rds_ring->dma_size,
104 PCI_DMA_FROMDEVICE);
105
106 dev_kfree_skb_any(rx_buf->skb);
107 }
108 }
109}
110
111void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter)
112{
113 struct qlcnic_recv_context *recv_ctx;
114 struct qlcnic_host_rds_ring *rds_ring;
115 struct qlcnic_rx_buffer *rx_buf;
116 int i, ring;
117
118 recv_ctx = adapter->recv_ctx;
119 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
120 rds_ring = &recv_ctx->rds_rings[ring];
121
122 INIT_LIST_HEAD(&rds_ring->free_list);
123
124 rx_buf = rds_ring->rx_buf_arr;
125 for (i = 0; i < rds_ring->num_desc; i++) {
126 list_add_tail(&rx_buf->list,
127 &rds_ring->free_list);
128 rx_buf++;
129 }
130 }
131}
132
133void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter)
134{
135 struct qlcnic_cmd_buffer *cmd_buf;
136 struct qlcnic_skb_frag *buffrag;
137 int i, j;
138 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
139
140 cmd_buf = tx_ring->cmd_buf_arr;
141 for (i = 0; i < tx_ring->num_desc; i++) {
142 buffrag = cmd_buf->frag_array;
143 if (buffrag->dma) {
144 pci_unmap_single(adapter->pdev, buffrag->dma,
145 buffrag->length, PCI_DMA_TODEVICE);
146 buffrag->dma = 0ULL;
147 }
148 for (j = 0; j < cmd_buf->frag_count; j++) {
149 buffrag++;
150 if (buffrag->dma) {
151 pci_unmap_page(adapter->pdev, buffrag->dma,
152 buffrag->length,
153 PCI_DMA_TODEVICE);
154 buffrag->dma = 0ULL;
155 }
156 }
157 if (cmd_buf->skb) {
158 dev_kfree_skb_any(cmd_buf->skb);
159 cmd_buf->skb = NULL;
160 }
161 cmd_buf++;
162 }
163}
164
165void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter)
166{
167 struct qlcnic_recv_context *recv_ctx;
168 struct qlcnic_host_rds_ring *rds_ring;
169 struct qlcnic_host_tx_ring *tx_ring;
170 int ring;
171
172 recv_ctx = adapter->recv_ctx;
173
174 if (recv_ctx->rds_rings == NULL)
175 goto skip_rds;
176
177 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
178 rds_ring = &recv_ctx->rds_rings[ring];
179 vfree(rds_ring->rx_buf_arr);
180 rds_ring->rx_buf_arr = NULL;
181 }
182 kfree(recv_ctx->rds_rings);
183
184skip_rds:
185 if (adapter->tx_ring == NULL)
186 return;
187
188 tx_ring = adapter->tx_ring;
189 vfree(tx_ring->cmd_buf_arr);
190 tx_ring->cmd_buf_arr = NULL;
191 kfree(adapter->tx_ring);
192 adapter->tx_ring = NULL;
193}
194
195int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
196{
197 struct qlcnic_recv_context *recv_ctx;
198 struct qlcnic_host_rds_ring *rds_ring;
199 struct qlcnic_host_sds_ring *sds_ring;
200 struct qlcnic_host_tx_ring *tx_ring;
201 struct qlcnic_rx_buffer *rx_buf;
202 int ring, i, size;
203
204 struct qlcnic_cmd_buffer *cmd_buf_arr;
205 struct net_device *netdev = adapter->netdev;
206
207 size = sizeof(struct qlcnic_host_tx_ring);
208 tx_ring = kzalloc(size, GFP_KERNEL);
209 if (tx_ring == NULL) {
210 dev_err(&netdev->dev, "failed to allocate tx ring struct\n");
211 return -ENOMEM;
212 }
213 adapter->tx_ring = tx_ring;
214
215 tx_ring->num_desc = adapter->num_txd;
216 tx_ring->txq = netdev_get_tx_queue(netdev, 0);
217
218 cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring));
219 if (cmd_buf_arr == NULL) {
220 dev_err(&netdev->dev, "failed to allocate cmd buffer ring\n");
221 goto err_out;
222 }
223 tx_ring->cmd_buf_arr = cmd_buf_arr;
224
225 recv_ctx = adapter->recv_ctx;
226
227 size = adapter->max_rds_rings * sizeof(struct qlcnic_host_rds_ring);
228 rds_ring = kzalloc(size, GFP_KERNEL);
229 if (rds_ring == NULL) {
230 dev_err(&netdev->dev, "failed to allocate rds ring struct\n");
231 goto err_out;
232 }
233 recv_ctx->rds_rings = rds_ring;
234
235 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
236 rds_ring = &recv_ctx->rds_rings[ring];
237 switch (ring) {
238 case RCV_RING_NORMAL:
239 rds_ring->num_desc = adapter->num_rxd;
240 rds_ring->dma_size = QLCNIC_P3P_RX_BUF_MAX_LEN;
241 rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN;
242 break;
243
244 case RCV_RING_JUMBO:
245 rds_ring->num_desc = adapter->num_jumbo_rxd;
246 rds_ring->dma_size =
247 QLCNIC_P3P_RX_JUMBO_BUF_MAX_LEN;
248
249 if (adapter->ahw->capabilities &
250 QLCNIC_FW_CAPABILITY_HW_LRO)
251 rds_ring->dma_size += QLCNIC_LRO_BUFFER_EXTRA;
252
253 rds_ring->skb_size =
254 rds_ring->dma_size + NET_IP_ALIGN;
255 break;
256 }
257 rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring));
258 if (rds_ring->rx_buf_arr == NULL) {
259 dev_err(&netdev->dev, "Failed to allocate "
260 "rx buffer ring %d\n", ring);
261 goto err_out;
262 }
263 INIT_LIST_HEAD(&rds_ring->free_list);
264 /*
265 * Now go through all of them, set reference handles
266 * and put them in the queues.
267 */
268 rx_buf = rds_ring->rx_buf_arr;
269 for (i = 0; i < rds_ring->num_desc; i++) {
270 list_add_tail(&rx_buf->list,
271 &rds_ring->free_list);
272 rx_buf->ref_handle = i;
273 rx_buf++;
274 }
275 spin_lock_init(&rds_ring->lock);
276 }
277
278 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
279 sds_ring = &recv_ctx->sds_rings[ring];
280 sds_ring->irq = adapter->msix_entries[ring].vector;
281 sds_ring->adapter = adapter;
282 sds_ring->num_desc = adapter->num_rxd;
283
284 for (i = 0; i < NUM_RCV_DESC_RINGS; i++)
285 INIT_LIST_HEAD(&sds_ring->free_list[i]);
286 }
287
288 return 0;
289
290err_out:
291 qlcnic_free_sw_resources(adapter);
292 return -ENOMEM;
293}
294
295/*
296 * Utility to translate from internal Phantom CRB address
297 * to external PCI CRB address.
298 */
299static u32 qlcnic_decode_crb_addr(u32 addr)
300{
301 int i;
302 u32 base_addr, offset, pci_base;
303
304 crb_addr_transform_setup();
305
306 pci_base = QLCNIC_ADDR_ERROR;
307 base_addr = addr & 0xfff00000;
308 offset = addr & 0x000fffff;
309
310 for (i = 0; i < QLCNIC_MAX_CRB_XFORM; i++) {
311 if (crb_addr_xform[i] == base_addr) {
312 pci_base = i << 20;
313 break;
314 }
315 }
316 if (pci_base == QLCNIC_ADDR_ERROR)
317 return pci_base;
318 else
319 return pci_base + offset;
320}
321
322#define QLCNIC_MAX_ROM_WAIT_USEC 100
323
324static int qlcnic_wait_rom_done(struct qlcnic_adapter *adapter)
325{
326 long timeout = 0;
327 long done = 0;
328
329 cond_resched();
330
331 while (done == 0) {
332 done = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_STATUS);
333 done &= 2;
334 if (++timeout >= QLCNIC_MAX_ROM_WAIT_USEC) {
335 dev_err(&adapter->pdev->dev,
336 "Timeout reached waiting for rom done");
337 return -EIO;
338 }
339 udelay(1);
340 }
341 return 0;
342}
343
344static int do_rom_fast_read(struct qlcnic_adapter *adapter,
345 u32 addr, u32 *valp)
346{
347 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ADDRESS, addr);
348 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
349 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 3);
350 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_INSTR_OPCODE, 0xb);
351 if (qlcnic_wait_rom_done(adapter)) {
352 dev_err(&adapter->pdev->dev, "Error waiting for rom done\n");
353 return -EIO;
354 }
355 /* reset abyte_cnt and dummy_byte_cnt */
356 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 0);
357 udelay(10);
358 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
359
360 *valp = QLCRD32(adapter, QLCNIC_ROMUSB_ROM_RDATA);
361 return 0;
362}
363
364static int do_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
365 u8 *bytes, size_t size)
366{
367 int addridx;
368 int ret = 0;
369
370 for (addridx = addr; addridx < (addr + size); addridx += 4) {
371 int v;
372 ret = do_rom_fast_read(adapter, addridx, &v);
373 if (ret != 0)
374 break;
375 *(__le32 *)bytes = cpu_to_le32(v);
376 bytes += 4;
377 }
378
379 return ret;
380}
381
382int
383qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
384 u8 *bytes, size_t size)
385{
386 int ret;
387
388 ret = qlcnic_rom_lock(adapter);
389 if (ret < 0)
390 return ret;
391
392 ret = do_rom_fast_read_words(adapter, addr, bytes, size);
393
394 qlcnic_rom_unlock(adapter);
395 return ret;
396}
397
398int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, u32 addr, u32 *valp)
399{
400 int ret;
401
402 if (qlcnic_rom_lock(adapter) != 0)
403 return -EIO;
404
405 ret = do_rom_fast_read(adapter, addr, valp);
406 qlcnic_rom_unlock(adapter);
407 return ret;
408}
409
410int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
411{
412 int addr, val;
413 int i, n, init_delay;
414 struct crb_addr_pair *buf;
415 unsigned offset;
416 u32 off;
417 struct pci_dev *pdev = adapter->pdev;
418
419 QLCWR32(adapter, CRB_CMDPEG_STATE, 0);
420 QLCWR32(adapter, CRB_RCVPEG_STATE, 0);
421
422 /* Halt all the indiviual PEGs and other blocks */
423 /* disable all I2Q */
424 QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x10, 0x0);
425 QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x14, 0x0);
426 QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x18, 0x0);
427 QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x1c, 0x0);
428 QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x20, 0x0);
429 QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x24, 0x0);
430
431 /* disable all niu interrupts */
432 QLCWR32(adapter, QLCNIC_CRB_NIU + 0x40, 0xff);
433 /* disable xge rx/tx */
434 QLCWR32(adapter, QLCNIC_CRB_NIU + 0x70000, 0x00);
435 /* disable xg1 rx/tx */
436 QLCWR32(adapter, QLCNIC_CRB_NIU + 0x80000, 0x00);
437 /* disable sideband mac */
438 QLCWR32(adapter, QLCNIC_CRB_NIU + 0x90000, 0x00);
439 /* disable ap0 mac */
440 QLCWR32(adapter, QLCNIC_CRB_NIU + 0xa0000, 0x00);
441 /* disable ap1 mac */
442 QLCWR32(adapter, QLCNIC_CRB_NIU + 0xb0000, 0x00);
443
444 /* halt sre */
445 val = QLCRD32(adapter, QLCNIC_CRB_SRE + 0x1000);
446 QLCWR32(adapter, QLCNIC_CRB_SRE + 0x1000, val & (~(0x1)));
447
448 /* halt epg */
449 QLCWR32(adapter, QLCNIC_CRB_EPG + 0x1300, 0x1);
450
451 /* halt timers */
452 QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x0, 0x0);
453 QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x8, 0x0);
454 QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x10, 0x0);
455 QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x18, 0x0);
456 QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x100, 0x0);
457 QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x200, 0x0);
458 /* halt pegs */
459 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x3c, 1);
460 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x3c, 1);
461 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x3c, 1);
462 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c, 1);
463 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c, 1);
464 msleep(20);
465
466 qlcnic_rom_unlock(adapter);
467 /* big hammer don't reset CAM block on reset */
468 QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xfeffffff);
469
470 /* Init HW CRB block */
471 if (qlcnic_rom_fast_read(adapter, 0, &n) != 0 || (n != 0xcafecafe) ||
472 qlcnic_rom_fast_read(adapter, 4, &n) != 0) {
473 dev_err(&pdev->dev, "ERROR Reading crb_init area: val:%x\n", n);
474 return -EIO;
475 }
476 offset = n & 0xffffU;
477 n = (n >> 16) & 0xffffU;
478
479 if (n >= 1024) {
480 dev_err(&pdev->dev, "QLOGIC card flash not initialized.\n");
481 return -EIO;
482 }
483
484 buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL);
485 if (buf == NULL) {
486 dev_err(&pdev->dev, "Unable to calloc memory for rom read.\n");
487 return -ENOMEM;
488 }
489
490 for (i = 0; i < n; i++) {
491 if (qlcnic_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
492 qlcnic_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) {
493 kfree(buf);
494 return -EIO;
495 }
496
497 buf[i].addr = addr;
498 buf[i].data = val;
499 }
500
501 for (i = 0; i < n; i++) {
502
503 off = qlcnic_decode_crb_addr(buf[i].addr);
504 if (off == QLCNIC_ADDR_ERROR) {
505 dev_err(&pdev->dev, "CRB init value out of range %x\n",
506 buf[i].addr);
507 continue;
508 }
509 off += QLCNIC_PCI_CRBSPACE;
510
511 if (off & 1)
512 continue;
513
514 /* skipping cold reboot MAGIC */
515 if (off == QLCNIC_CAM_RAM(0x1fc))
516 continue;
517 if (off == (QLCNIC_CRB_I2C0 + 0x1c))
518 continue;
519 if (off == (ROMUSB_GLB + 0xbc)) /* do not reset PCI */
520 continue;
521 if (off == (ROMUSB_GLB + 0xa8))
522 continue;
523 if (off == (ROMUSB_GLB + 0xc8)) /* core clock */
524 continue;
525 if (off == (ROMUSB_GLB + 0x24)) /* MN clock */
526 continue;
527 if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */
528 continue;
529 if ((off & 0x0ff00000) == QLCNIC_CRB_DDR_NET)
530 continue;
531 /* skip the function enable register */
532 if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION))
533 continue;
534 if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION2))
535 continue;
536 if ((off & 0x0ff00000) == QLCNIC_CRB_SMB)
537 continue;
538
539 init_delay = 1;
540 /* After writing this register, HW needs time for CRB */
541 /* to quiet down (else crb_window returns 0xffffffff) */
542 if (off == QLCNIC_ROMUSB_GLB_SW_RESET)
543 init_delay = 1000;
544
545 QLCWR32(adapter, off, buf[i].data);
546
547 msleep(init_delay);
548 }
549 kfree(buf);
550
551 /* Initialize protocol process engine */
552 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0xec, 0x1e);
553 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0x4c, 8);
554 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_I + 0x4c, 8);
555 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x8, 0);
556 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0xc, 0);
557 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x8, 0);
558 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0xc, 0);
559 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x8, 0);
560 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0xc, 0);
561 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x8, 0);
562 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0xc, 0);
563 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x8, 0);
564 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0xc, 0);
565 msleep(1);
566
567 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
568 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
569
570 return 0;
571}
572
573static int qlcnic_cmd_peg_ready(struct qlcnic_adapter *adapter)
574{
575 u32 val;
576 int retries = QLCNIC_CMDPEG_CHECK_RETRY_COUNT;
577
578 do {
579 val = QLCRD32(adapter, CRB_CMDPEG_STATE);
580
581 switch (val) {
582 case PHAN_INITIALIZE_COMPLETE:
583 case PHAN_INITIALIZE_ACK:
584 return 0;
585 case PHAN_INITIALIZE_FAILED:
586 goto out_err;
587 default:
588 break;
589 }
590
591 msleep(QLCNIC_CMDPEG_CHECK_DELAY);
592
593 } while (--retries);
594
595 QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
596
597out_err:
598 dev_err(&adapter->pdev->dev, "Command Peg initialization not "
599 "complete, state: 0x%x.\n", val);
600 return -EIO;
601}
602
603static int
604qlcnic_receive_peg_ready(struct qlcnic_adapter *adapter)
605{
606 u32 val;
607 int retries = QLCNIC_RCVPEG_CHECK_RETRY_COUNT;
608
609 do {
610 val = QLCRD32(adapter, CRB_RCVPEG_STATE);
611
612 if (val == PHAN_PEG_RCV_INITIALIZED)
613 return 0;
614
615 msleep(QLCNIC_RCVPEG_CHECK_DELAY);
616
617 } while (--retries);
618
619 if (!retries) {
620 dev_err(&adapter->pdev->dev, "Receive Peg initialization not "
621 "complete, state: 0x%x.\n", val);
622 return -EIO;
623 }
624
625 return 0;
626}
627
628int
629qlcnic_check_fw_status(struct qlcnic_adapter *adapter)
630{
631 int err;
632
633 err = qlcnic_cmd_peg_ready(adapter);
634 if (err)
635 return err;
636
637 err = qlcnic_receive_peg_ready(adapter);
638 if (err)
639 return err;
640
641 QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
642
643 return err;
644}
645
646int
647qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) {
648
649 int timeo;
650 u32 val;
651
652 val = QLCRD32(adapter, QLCNIC_CRB_DEV_PARTITION_INFO);
653 val = QLC_DEV_GET_DRV(val, adapter->portnum);
654 if ((val & 0x3) != QLCNIC_TYPE_NIC) {
655 dev_err(&adapter->pdev->dev,
656 "Not an Ethernet NIC func=%u\n", val);
657 return -EIO;
658 }
659 adapter->ahw->physical_port = (val >> 2);
660 if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DEV_INIT_TIMEOUT, &timeo))
661 timeo = QLCNIC_INIT_TIMEOUT_SECS;
662
663 adapter->dev_init_timeo = timeo;
664
665 if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DRV_RESET_TIMEOUT, &timeo))
666 timeo = QLCNIC_RESET_TIMEOUT_SECS;
667
668 adapter->reset_ack_timeo = timeo;
669
670 return 0;
671}
672
673static int qlcnic_get_flt_entry(struct qlcnic_adapter *adapter, u8 region,
674 struct qlcnic_flt_entry *region_entry)
675{
676 struct qlcnic_flt_header flt_hdr;
677 struct qlcnic_flt_entry *flt_entry;
678 int i = 0, ret;
679 u32 entry_size;
680
681 memset(region_entry, 0, sizeof(struct qlcnic_flt_entry));
682 ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION,
683 (u8 *)&flt_hdr,
684 sizeof(struct qlcnic_flt_header));
685 if (ret) {
686 dev_warn(&adapter->pdev->dev,
687 "error reading flash layout header\n");
688 return -EIO;
689 }
690
691 entry_size = flt_hdr.len - sizeof(struct qlcnic_flt_header);
692 flt_entry = (struct qlcnic_flt_entry *)vzalloc(entry_size);
693 if (flt_entry == NULL) {
694 dev_warn(&adapter->pdev->dev, "error allocating memory\n");
695 return -EIO;
696 }
697
698 ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION +
699 sizeof(struct qlcnic_flt_header),
700 (u8 *)flt_entry, entry_size);
701 if (ret) {
702 dev_warn(&adapter->pdev->dev,
703 "error reading flash layout entries\n");
704 goto err_out;
705 }
706
707 while (i < (entry_size/sizeof(struct qlcnic_flt_entry))) {
708 if (flt_entry[i].region == region)
709 break;
710 i++;
711 }
712 if (i >= (entry_size/sizeof(struct qlcnic_flt_entry))) {
713 dev_warn(&adapter->pdev->dev,
714 "region=%x not found in %d regions\n", region, i);
715 ret = -EIO;
716 goto err_out;
717 }
718 memcpy(region_entry, &flt_entry[i], sizeof(struct qlcnic_flt_entry));
719
720err_out:
721 vfree(flt_entry);
722 return ret;
723}
724
725int
726qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter)
727{
728 struct qlcnic_flt_entry fw_entry;
729 u32 ver = -1, min_ver;
730 int ret;
731
732 if (adapter->ahw->revision_id == QLCNIC_P3P_C0)
733 ret = qlcnic_get_flt_entry(adapter, QLCNIC_C0_FW_IMAGE_REGION,
734 &fw_entry);
735 else
736 ret = qlcnic_get_flt_entry(adapter, QLCNIC_B0_FW_IMAGE_REGION,
737 &fw_entry);
738
739 if (!ret)
740 /* 0-4:-signature, 4-8:-fw version */
741 qlcnic_rom_fast_read(adapter, fw_entry.start_addr + 4,
742 (int *)&ver);
743 else
744 qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET,
745 (int *)&ver);
746
747 ver = QLCNIC_DECODE_VERSION(ver);
748 min_ver = QLCNIC_MIN_FW_VERSION;
749
750 if (ver < min_ver) {
751 dev_err(&adapter->pdev->dev,
752 "firmware version %d.%d.%d unsupported."
753 "Min supported version %d.%d.%d\n",
754 _major(ver), _minor(ver), _build(ver),
755 _major(min_ver), _minor(min_ver), _build(min_ver));
756 return -EINVAL;
757 }
758
759 return 0;
760}
761
762static int
763qlcnic_has_mn(struct qlcnic_adapter *adapter)
764{
765 u32 capability;
766 capability = 0;
767
768 capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY);
769 if (capability & QLCNIC_PEG_TUNE_MN_PRESENT)
770 return 1;
771
772 return 0;
773}
774
775static
776struct uni_table_desc *qlcnic_get_table_desc(const u8 *unirom, int section)
777{
778 u32 i, entries;
779 struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
780 entries = le32_to_cpu(directory->num_entries);
781
782 for (i = 0; i < entries; i++) {
783
784 u32 offs = le32_to_cpu(directory->findex) +
785 i * le32_to_cpu(directory->entry_size);
786 u32 tab_type = le32_to_cpu(*((__le32 *)&unirom[offs] + 8));
787
788 if (tab_type == section)
789 return (struct uni_table_desc *) &unirom[offs];
790 }
791
792 return NULL;
793}
794
795#define FILEHEADER_SIZE (14 * 4)
796
797static int
798qlcnic_validate_header(struct qlcnic_adapter *adapter)
799{
800 const u8 *unirom = adapter->fw->data;
801 struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
802 u32 entries, entry_size, tab_size, fw_file_size;
803
804 fw_file_size = adapter->fw->size;
805
806 if (fw_file_size < FILEHEADER_SIZE)
807 return -EINVAL;
808
809 entries = le32_to_cpu(directory->num_entries);
810 entry_size = le32_to_cpu(directory->entry_size);
811 tab_size = le32_to_cpu(directory->findex) + (entries * entry_size);
812
813 if (fw_file_size < tab_size)
814 return -EINVAL;
815
816 return 0;
817}
818
819static int
820qlcnic_validate_bootld(struct qlcnic_adapter *adapter)
821{
822 struct uni_table_desc *tab_desc;
823 struct uni_data_desc *descr;
824 u32 offs, tab_size, data_size, idx;
825 const u8 *unirom = adapter->fw->data;
826 __le32 temp;
827
828 temp = *((__le32 *)&unirom[adapter->file_prd_off] +
829 QLCNIC_UNI_BOOTLD_IDX_OFF);
830 idx = le32_to_cpu(temp);
831 tab_desc = qlcnic_get_table_desc(unirom, QLCNIC_UNI_DIR_SECT_BOOTLD);
832
833 if (!tab_desc)
834 return -EINVAL;
835
836 tab_size = le32_to_cpu(tab_desc->findex) +
837 le32_to_cpu(tab_desc->entry_size) * (idx + 1);
838
839 if (adapter->fw->size < tab_size)
840 return -EINVAL;
841
842 offs = le32_to_cpu(tab_desc->findex) +
843 le32_to_cpu(tab_desc->entry_size) * idx;
844 descr = (struct uni_data_desc *)&unirom[offs];
845
846 data_size = le32_to_cpu(descr->findex) + le32_to_cpu(descr->size);
847
848 if (adapter->fw->size < data_size)
849 return -EINVAL;
850
851 return 0;
852}
853
854static int
855qlcnic_validate_fw(struct qlcnic_adapter *adapter)
856{
857 struct uni_table_desc *tab_desc;
858 struct uni_data_desc *descr;
859 const u8 *unirom = adapter->fw->data;
860 u32 offs, tab_size, data_size, idx;
861 __le32 temp;
862
863 temp = *((__le32 *)&unirom[adapter->file_prd_off] +
864 QLCNIC_UNI_FIRMWARE_IDX_OFF);
865 idx = le32_to_cpu(temp);
866 tab_desc = qlcnic_get_table_desc(unirom, QLCNIC_UNI_DIR_SECT_FW);
867
868 if (!tab_desc)
869 return -EINVAL;
870
871 tab_size = le32_to_cpu(tab_desc->findex) +
872 le32_to_cpu(tab_desc->entry_size) * (idx + 1);
873
874 if (adapter->fw->size < tab_size)
875 return -EINVAL;
876
877 offs = le32_to_cpu(tab_desc->findex) +
878 le32_to_cpu(tab_desc->entry_size) * idx;
879 descr = (struct uni_data_desc *)&unirom[offs];
880 data_size = le32_to_cpu(descr->findex) + le32_to_cpu(descr->size);
881
882 if (adapter->fw->size < data_size)
883 return -EINVAL;
884
885 return 0;
886}
887
888static int
889qlcnic_validate_product_offs(struct qlcnic_adapter *adapter)
890{
891 struct uni_table_desc *ptab_descr;
892 const u8 *unirom = adapter->fw->data;
893 int mn_present = qlcnic_has_mn(adapter);
894 u32 entries, entry_size, tab_size, i;
895 __le32 temp;
896
897 ptab_descr = qlcnic_get_table_desc(unirom,
898 QLCNIC_UNI_DIR_SECT_PRODUCT_TBL);
899 if (!ptab_descr)
900 return -EINVAL;
901
902 entries = le32_to_cpu(ptab_descr->num_entries);
903 entry_size = le32_to_cpu(ptab_descr->entry_size);
904 tab_size = le32_to_cpu(ptab_descr->findex) + (entries * entry_size);
905
906 if (adapter->fw->size < tab_size)
907 return -EINVAL;
908
909nomn:
910 for (i = 0; i < entries; i++) {
911
912 u32 flags, file_chiprev, offs;
913 u8 chiprev = adapter->ahw->revision_id;
914 u32 flagbit;
915
916 offs = le32_to_cpu(ptab_descr->findex) +
917 i * le32_to_cpu(ptab_descr->entry_size);
918 temp = *((__le32 *)&unirom[offs] + QLCNIC_UNI_FLAGS_OFF);
919 flags = le32_to_cpu(temp);
920 temp = *((__le32 *)&unirom[offs] + QLCNIC_UNI_CHIP_REV_OFF);
921 file_chiprev = le32_to_cpu(temp);
922
923 flagbit = mn_present ? 1 : 2;
924
925 if ((chiprev == file_chiprev) &&
926 ((1ULL << flagbit) & flags)) {
927 adapter->file_prd_off = offs;
928 return 0;
929 }
930 }
931 if (mn_present) {
932 mn_present = 0;
933 goto nomn;
934 }
935 return -EINVAL;
936}
937
938static int
939qlcnic_validate_unified_romimage(struct qlcnic_adapter *adapter)
940{
941 if (qlcnic_validate_header(adapter)) {
942 dev_err(&adapter->pdev->dev,
943 "unified image: header validation failed\n");
944 return -EINVAL;
945 }
946
947 if (qlcnic_validate_product_offs(adapter)) {
948 dev_err(&adapter->pdev->dev,
949 "unified image: product validation failed\n");
950 return -EINVAL;
951 }
952
953 if (qlcnic_validate_bootld(adapter)) {
954 dev_err(&adapter->pdev->dev,
955 "unified image: bootld validation failed\n");
956 return -EINVAL;
957 }
958
959 if (qlcnic_validate_fw(adapter)) {
960 dev_err(&adapter->pdev->dev,
961 "unified image: firmware validation failed\n");
962 return -EINVAL;
963 }
964
965 return 0;
966}
967
968static
969struct uni_data_desc *qlcnic_get_data_desc(struct qlcnic_adapter *adapter,
970 u32 section, u32 idx_offset)
971{
972 const u8 *unirom = adapter->fw->data;
973 struct uni_table_desc *tab_desc;
974 u32 offs, idx;
975 __le32 temp;
976
977 temp = *((__le32 *)&unirom[adapter->file_prd_off] + idx_offset);
978 idx = le32_to_cpu(temp);
979
980 tab_desc = qlcnic_get_table_desc(unirom, section);
981
982 if (tab_desc == NULL)
983 return NULL;
984
985 offs = le32_to_cpu(tab_desc->findex) +
986 le32_to_cpu(tab_desc->entry_size) * idx;
987
988 return (struct uni_data_desc *)&unirom[offs];
989}
990
991static u8 *
992qlcnic_get_bootld_offs(struct qlcnic_adapter *adapter)
993{
994 u32 offs = QLCNIC_BOOTLD_START;
995 struct uni_data_desc *data_desc;
996
997 data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_BOOTLD,
998 QLCNIC_UNI_BOOTLD_IDX_OFF);
999
1000 if (adapter->ahw->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
1001 offs = le32_to_cpu(data_desc->findex);
1002
1003 return (u8 *)&adapter->fw->data[offs];
1004}
1005
1006static u8 *
1007qlcnic_get_fw_offs(struct qlcnic_adapter *adapter)
1008{
1009 u32 offs = QLCNIC_IMAGE_START;
1010 struct uni_data_desc *data_desc;
1011
1012 data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW,
1013 QLCNIC_UNI_FIRMWARE_IDX_OFF);
1014 if (adapter->ahw->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
1015 offs = le32_to_cpu(data_desc->findex);
1016
1017 return (u8 *)&adapter->fw->data[offs];
1018}
1019
1020static u32 qlcnic_get_fw_size(struct qlcnic_adapter *adapter)
1021{
1022 struct uni_data_desc *data_desc;
1023 const u8 *unirom = adapter->fw->data;
1024
1025 data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW,
1026 QLCNIC_UNI_FIRMWARE_IDX_OFF);
1027
1028 if (adapter->ahw->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
1029 return le32_to_cpu(data_desc->size);
1030 else
1031 return le32_to_cpu(*(__le32 *)&unirom[QLCNIC_FW_SIZE_OFFSET]);
1032}
1033
1034static u32 qlcnic_get_fw_version(struct qlcnic_adapter *adapter)
1035{
1036 struct uni_data_desc *fw_data_desc;
1037 const struct firmware *fw = adapter->fw;
1038 u32 major, minor, sub;
1039 __le32 version_offset;
1040 const u8 *ver_str;
1041 int i, ret;
1042
1043 if (adapter->ahw->fw_type != QLCNIC_UNIFIED_ROMIMAGE) {
1044 version_offset = *(__le32 *)&fw->data[QLCNIC_FW_VERSION_OFFSET];
1045 return le32_to_cpu(version_offset);
1046 }
1047
1048 fw_data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW,
1049 QLCNIC_UNI_FIRMWARE_IDX_OFF);
1050 ver_str = fw->data + le32_to_cpu(fw_data_desc->findex) +
1051 le32_to_cpu(fw_data_desc->size) - 17;
1052
1053 for (i = 0; i < 12; i++) {
1054 if (!strncmp(&ver_str[i], "REV=", 4)) {
1055 ret = sscanf(&ver_str[i+4], "%u.%u.%u ",
1056 &major, &minor, &sub);
1057 if (ret != 3)
1058 return 0;
1059 else
1060 return major + (minor << 8) + (sub << 16);
1061 }
1062 }
1063
1064 return 0;
1065}
1066
1067static u32 qlcnic_get_bios_version(struct qlcnic_adapter *adapter)
1068{
1069 const struct firmware *fw = adapter->fw;
1070 u32 bios_ver, prd_off = adapter->file_prd_off;
1071 u8 *version_offset;
1072 __le32 temp;
1073
1074 if (adapter->ahw->fw_type != QLCNIC_UNIFIED_ROMIMAGE) {
1075 version_offset = (u8 *)&fw->data[QLCNIC_BIOS_VERSION_OFFSET];
1076 return le32_to_cpu(*(__le32 *)version_offset);
1077 }
1078
1079 temp = *((__le32 *)(&fw->data[prd_off]) + QLCNIC_UNI_BIOS_VERSION_OFF);
1080 bios_ver = le32_to_cpu(temp);
1081
1082 return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) + (bios_ver >> 24);
1083}
1084
1085static void qlcnic_rom_lock_recovery(struct qlcnic_adapter *adapter)
1086{
1087 if (qlcnic_pcie_sem_lock(adapter, 2, QLCNIC_ROM_LOCK_ID))
1088 dev_info(&adapter->pdev->dev, "Resetting rom_lock\n");
1089
1090 qlcnic_pcie_sem_unlock(adapter, 2);
1091}
1092
1093static int
1094qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter)
1095{
1096 u32 heartbeat, ret = -EIO;
1097 int retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT;
1098
1099 adapter->heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
1100
1101 do {
1102 msleep(QLCNIC_HEARTBEAT_PERIOD_MSECS);
1103 heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
1104 if (heartbeat != adapter->heartbeat) {
1105 ret = QLCNIC_RCODE_SUCCESS;
1106 break;
1107 }
1108 } while (--retries);
1109
1110 return ret;
1111}
1112
1113int
1114qlcnic_need_fw_reset(struct qlcnic_adapter *adapter)
1115{
1116 if ((adapter->flags & QLCNIC_FW_HANG) ||
1117 qlcnic_check_fw_hearbeat(adapter)) {
1118 qlcnic_rom_lock_recovery(adapter);
1119 return 1;
1120 }
1121
1122 if (adapter->need_fw_reset)
1123 return 1;
1124
1125 if (adapter->fw)
1126 return 1;
1127
1128 return 0;
1129}
1130
1131static const char *fw_name[] = {
1132 QLCNIC_UNIFIED_ROMIMAGE_NAME,
1133 QLCNIC_FLASH_ROMIMAGE_NAME,
1134};
1135
1136int
1137qlcnic_load_firmware(struct qlcnic_adapter *adapter)
1138{
1139 __le64 *ptr64;
1140 u32 i, flashaddr, size;
1141 const struct firmware *fw = adapter->fw;
1142 struct pci_dev *pdev = adapter->pdev;
1143
1144 dev_info(&pdev->dev, "loading firmware from %s\n",
1145 fw_name[adapter->ahw->fw_type]);
1146
1147 if (fw) {
1148 u64 data;
1149
1150 size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8;
1151
1152 ptr64 = (__le64 *)qlcnic_get_bootld_offs(adapter);
1153 flashaddr = QLCNIC_BOOTLD_START;
1154
1155 for (i = 0; i < size; i++) {
1156 data = le64_to_cpu(ptr64[i]);
1157
1158 if (qlcnic_pci_mem_write_2M(adapter, flashaddr, data))
1159 return -EIO;
1160
1161 flashaddr += 8;
1162 }
1163
1164 size = qlcnic_get_fw_size(adapter) / 8;
1165
1166 ptr64 = (__le64 *)qlcnic_get_fw_offs(adapter);
1167 flashaddr = QLCNIC_IMAGE_START;
1168
1169 for (i = 0; i < size; i++) {
1170 data = le64_to_cpu(ptr64[i]);
1171
1172 if (qlcnic_pci_mem_write_2M(adapter,
1173 flashaddr, data))
1174 return -EIO;
1175
1176 flashaddr += 8;
1177 }
1178
1179 size = qlcnic_get_fw_size(adapter) % 8;
1180 if (size) {
1181 data = le64_to_cpu(ptr64[i]);
1182
1183 if (qlcnic_pci_mem_write_2M(adapter,
1184 flashaddr, data))
1185 return -EIO;
1186 }
1187
1188 } else {
1189 u64 data;
1190 u32 hi, lo;
1191 int ret;
1192 struct qlcnic_flt_entry bootld_entry;
1193
1194 ret = qlcnic_get_flt_entry(adapter, QLCNIC_BOOTLD_REGION,
1195 &bootld_entry);
1196 if (!ret) {
1197 size = bootld_entry.size / 8;
1198 flashaddr = bootld_entry.start_addr;
1199 } else {
1200 size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8;
1201 flashaddr = QLCNIC_BOOTLD_START;
1202 dev_info(&pdev->dev,
1203 "using legacy method to get flash fw region");
1204 }
1205
1206 for (i = 0; i < size; i++) {
1207 if (qlcnic_rom_fast_read(adapter,
1208 flashaddr, (int *)&lo) != 0)
1209 return -EIO;
1210 if (qlcnic_rom_fast_read(adapter,
1211 flashaddr + 4, (int *)&hi) != 0)
1212 return -EIO;
1213
1214 data = (((u64)hi << 32) | lo);
1215
1216 if (qlcnic_pci_mem_write_2M(adapter,
1217 flashaddr, data))
1218 return -EIO;
1219
1220 flashaddr += 8;
1221 }
1222 }
1223 msleep(1);
1224
1225 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x18, 0x1020);
1226 QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0x80001e);
1227 return 0;
1228}
1229
1230static int
1231qlcnic_validate_firmware(struct qlcnic_adapter *adapter)
1232{
1233 u32 val;
1234 u32 ver, bios, min_size;
1235 struct pci_dev *pdev = adapter->pdev;
1236 const struct firmware *fw = adapter->fw;
1237 u8 fw_type = adapter->ahw->fw_type;
1238
1239 if (fw_type == QLCNIC_UNIFIED_ROMIMAGE) {
1240 if (qlcnic_validate_unified_romimage(adapter))
1241 return -EINVAL;
1242
1243 min_size = QLCNIC_UNI_FW_MIN_SIZE;
1244 } else {
1245 val = le32_to_cpu(*(__le32 *)&fw->data[QLCNIC_FW_MAGIC_OFFSET]);
1246 if (val != QLCNIC_BDINFO_MAGIC)
1247 return -EINVAL;
1248
1249 min_size = QLCNIC_FW_MIN_SIZE;
1250 }
1251
1252 if (fw->size < min_size)
1253 return -EINVAL;
1254
1255 val = qlcnic_get_fw_version(adapter);
1256 ver = QLCNIC_DECODE_VERSION(val);
1257
1258 if (ver < QLCNIC_MIN_FW_VERSION) {
1259 dev_err(&pdev->dev,
1260 "%s: firmware version %d.%d.%d unsupported\n",
1261 fw_name[fw_type], _major(ver), _minor(ver), _build(ver));
1262 return -EINVAL;
1263 }
1264
1265 val = qlcnic_get_bios_version(adapter);
1266 qlcnic_rom_fast_read(adapter, QLCNIC_BIOS_VERSION_OFFSET, (int *)&bios);
1267 if (val != bios) {
1268 dev_err(&pdev->dev, "%s: firmware bios is incompatible\n",
1269 fw_name[fw_type]);
1270 return -EINVAL;
1271 }
1272
1273 QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
1274 return 0;
1275}
1276
1277static void
1278qlcnic_get_next_fwtype(struct qlcnic_adapter *adapter)
1279{
1280 u8 fw_type;
1281
1282 switch (adapter->ahw->fw_type) {
1283 case QLCNIC_UNKNOWN_ROMIMAGE:
1284 fw_type = QLCNIC_UNIFIED_ROMIMAGE;
1285 break;
1286
1287 case QLCNIC_UNIFIED_ROMIMAGE:
1288 default:
1289 fw_type = QLCNIC_FLASH_ROMIMAGE;
1290 break;
1291 }
1292
1293 adapter->ahw->fw_type = fw_type;
1294}
1295
1296
1297
1298void qlcnic_request_firmware(struct qlcnic_adapter *adapter)
1299{
1300 struct pci_dev *pdev = adapter->pdev;
1301 int rc;
1302
1303 adapter->ahw->fw_type = QLCNIC_UNKNOWN_ROMIMAGE;
1304
1305next:
1306 qlcnic_get_next_fwtype(adapter);
1307
1308 if (adapter->ahw->fw_type == QLCNIC_FLASH_ROMIMAGE) {
1309 adapter->fw = NULL;
1310 } else {
1311 rc = request_firmware(&adapter->fw,
1312 fw_name[adapter->ahw->fw_type],
1313 &pdev->dev);
1314 if (rc != 0)
1315 goto next;
1316
1317 rc = qlcnic_validate_firmware(adapter);
1318 if (rc != 0) {
1319 release_firmware(adapter->fw);
1320 msleep(1);
1321 goto next;
1322 }
1323 }
1324}
1325
1326
1327void
1328qlcnic_release_firmware(struct qlcnic_adapter *adapter)
1329{
1330 release_firmware(adapter->fw);
1331 adapter->fw = NULL;
1332}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
deleted file mode 100644
index 6f82812d0fa..00000000000
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ /dev/null
@@ -1,1309 +0,0 @@
1#include <linux/netdevice.h>
2#include <linux/if_vlan.h>
3#include <net/ip.h>
4#include <linux/ipv6.h>
5
6#include "qlcnic.h"
7
8#define QLCNIC_MAC_HASH(MAC)\
9 ((((MAC) & 0x70000) >> 0x10) | (((MAC) & 0x70000000000ULL) >> 0x25))
10
11#define TX_ETHER_PKT 0x01
12#define TX_TCP_PKT 0x02
13#define TX_UDP_PKT 0x03
14#define TX_IP_PKT 0x04
15#define TX_TCP_LSO 0x05
16#define TX_TCP_LSO6 0x06
17#define TX_TCPV6_PKT 0x0b
18#define TX_UDPV6_PKT 0x0c
19#define FLAGS_VLAN_TAGGED 0x10
20#define FLAGS_VLAN_OOB 0x40
21
22#define qlcnic_set_tx_vlan_tci(cmd_desc, v) \
23 (cmd_desc)->vlan_TCI = cpu_to_le16(v);
24#define qlcnic_set_cmd_desc_port(cmd_desc, var) \
25 ((cmd_desc)->port_ctxid |= ((var) & 0x0F))
26#define qlcnic_set_cmd_desc_ctxid(cmd_desc, var) \
27 ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
28
29#define qlcnic_set_tx_port(_desc, _port) \
30 ((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0))
31
32#define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \
33 ((_desc)->flags_opcode |= \
34 cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)))
35
36#define qlcnic_set_tx_frags_len(_desc, _frags, _len) \
37 ((_desc)->nfrags__length = \
38 cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8)))
39
40/* owner bits of status_desc */
41#define STATUS_OWNER_HOST (0x1ULL << 56)
42#define STATUS_OWNER_PHANTOM (0x2ULL << 56)
43
44/* Status descriptor:
45 0-3 port, 4-7 status, 8-11 type, 12-27 total_length
46 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
47 53-55 desc_cnt, 56-57 owner, 58-63 opcode
48 */
49#define qlcnic_get_sts_port(sts_data) \
50 ((sts_data) & 0x0F)
51#define qlcnic_get_sts_status(sts_data) \
52 (((sts_data) >> 4) & 0x0F)
53#define qlcnic_get_sts_type(sts_data) \
54 (((sts_data) >> 8) & 0x0F)
55#define qlcnic_get_sts_totallength(sts_data) \
56 (((sts_data) >> 12) & 0xFFFF)
57#define qlcnic_get_sts_refhandle(sts_data) \
58 (((sts_data) >> 28) & 0xFFFF)
59#define qlcnic_get_sts_prot(sts_data) \
60 (((sts_data) >> 44) & 0x0F)
61#define qlcnic_get_sts_pkt_offset(sts_data) \
62 (((sts_data) >> 48) & 0x1F)
63#define qlcnic_get_sts_desc_cnt(sts_data) \
64 (((sts_data) >> 53) & 0x7)
65#define qlcnic_get_sts_opcode(sts_data) \
66 (((sts_data) >> 58) & 0x03F)
67
68#define qlcnic_get_lro_sts_refhandle(sts_data) \
69 ((sts_data) & 0x07FFF)
70#define qlcnic_get_lro_sts_length(sts_data) \
71 (((sts_data) >> 16) & 0x0FFFF)
72#define qlcnic_get_lro_sts_l2_hdr_offset(sts_data) \
73 (((sts_data) >> 32) & 0x0FF)
74#define qlcnic_get_lro_sts_l4_hdr_offset(sts_data) \
75 (((sts_data) >> 40) & 0x0FF)
76#define qlcnic_get_lro_sts_timestamp(sts_data) \
77 (((sts_data) >> 48) & 0x1)
78#define qlcnic_get_lro_sts_type(sts_data) \
79 (((sts_data) >> 49) & 0x7)
80#define qlcnic_get_lro_sts_push_flag(sts_data) \
81 (((sts_data) >> 52) & 0x1)
82#define qlcnic_get_lro_sts_seq_number(sts_data) \
83 ((sts_data) & 0x0FFFFFFFF)
84#define qlcnic_get_lro_sts_mss(sts_data1) \
85 ((sts_data1 >> 32) & 0x0FFFF)
86
87/* opcode field in status_desc */
88#define QLCNIC_SYN_OFFLOAD 0x03
89#define QLCNIC_RXPKT_DESC 0x04
90#define QLCNIC_OLD_RXPKT_DESC 0x3f
91#define QLCNIC_RESPONSE_DESC 0x05
92#define QLCNIC_LRO_DESC 0x12
93
94/* for status field in status_desc */
95#define STATUS_CKSUM_LOOP 0
96#define STATUS_CKSUM_OK 2
97
98static void qlcnic_change_filter(struct qlcnic_adapter *adapter,
99 u64 uaddr, __le16 vlan_id,
100 struct qlcnic_host_tx_ring *tx_ring)
101{
102 struct cmd_desc_type0 *hwdesc;
103 struct qlcnic_nic_req *req;
104 struct qlcnic_mac_req *mac_req;
105 struct qlcnic_vlan_req *vlan_req;
106 u32 producer;
107 u64 word;
108
109 producer = tx_ring->producer;
110 hwdesc = &tx_ring->desc_head[tx_ring->producer];
111
112 req = (struct qlcnic_nic_req *)hwdesc;
113 memset(req, 0, sizeof(struct qlcnic_nic_req));
114 req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
115
116 word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16);
117 req->req_hdr = cpu_to_le64(word);
118
119 mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
120 mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
121 memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
122
123 vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
124 vlan_req->vlan_id = vlan_id;
125
126 tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
127 smp_mb();
128}
129
130static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
131 struct qlcnic_host_tx_ring *tx_ring,
132 struct cmd_desc_type0 *first_desc,
133 struct sk_buff *skb)
134{
135 struct ethhdr *phdr = (struct ethhdr *)(skb->data);
136 struct qlcnic_filter *fil, *tmp_fil;
137 struct hlist_node *tmp_hnode, *n;
138 struct hlist_head *head;
139 u64 src_addr = 0;
140 __le16 vlan_id = 0;
141 u8 hindex;
142
143 if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
144 return;
145
146 if (adapter->fhash.fnum >= adapter->fhash.fmax)
147 return;
148
149 /* Only NPAR capable devices support vlan based learning*/
150 if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
151 vlan_id = first_desc->vlan_TCI;
152 memcpy(&src_addr, phdr->h_source, ETH_ALEN);
153 hindex = QLCNIC_MAC_HASH(src_addr) & (QLCNIC_LB_MAX_FILTERS - 1);
154 head = &(adapter->fhash.fhead[hindex]);
155
156 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
157 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
158 tmp_fil->vlan_id == vlan_id) {
159
160 if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
161 qlcnic_change_filter(adapter, src_addr, vlan_id,
162 tx_ring);
163 tmp_fil->ftime = jiffies;
164 return;
165 }
166 }
167
168 fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
169 if (!fil)
170 return;
171
172 qlcnic_change_filter(adapter, src_addr, vlan_id, tx_ring);
173
174 fil->ftime = jiffies;
175 fil->vlan_id = vlan_id;
176 memcpy(fil->faddr, &src_addr, ETH_ALEN);
177
178 spin_lock(&adapter->mac_learn_lock);
179
180 hlist_add_head(&(fil->fnode), head);
181 adapter->fhash.fnum++;
182
183 spin_unlock(&adapter->mac_learn_lock);
184}
185
186static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
187 struct cmd_desc_type0 *first_desc, struct sk_buff *skb)
188{
189 u8 l4proto, opcode = 0, hdr_len = 0;
190 u16 flags = 0, vlan_tci = 0;
191 int copied, offset, copy_len, size;
192 struct cmd_desc_type0 *hwdesc;
193 struct vlan_ethhdr *vh;
194 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
195 u16 protocol = ntohs(skb->protocol);
196 u32 producer = tx_ring->producer;
197
198 if (protocol == ETH_P_8021Q) {
199 vh = (struct vlan_ethhdr *)skb->data;
200 flags = FLAGS_VLAN_TAGGED;
201 vlan_tci = ntohs(vh->h_vlan_TCI);
202 protocol = ntohs(vh->h_vlan_encapsulated_proto);
203 } else if (vlan_tx_tag_present(skb)) {
204 flags = FLAGS_VLAN_OOB;
205 vlan_tci = vlan_tx_tag_get(skb);
206 }
207 if (unlikely(adapter->pvid)) {
208 if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
209 return -EIO;
210 if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
211 goto set_flags;
212
213 flags = FLAGS_VLAN_OOB;
214 vlan_tci = adapter->pvid;
215 }
216set_flags:
217 qlcnic_set_tx_vlan_tci(first_desc, vlan_tci);
218 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
219
220 if (*(skb->data) & BIT_0) {
221 flags |= BIT_0;
222 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
223 }
224 opcode = TX_ETHER_PKT;
225 if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
226 skb_shinfo(skb)->gso_size > 0) {
227 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
228 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
229 first_desc->total_hdr_length = hdr_len;
230 opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO;
231
232 /* For LSO, we need to copy the MAC/IP/TCP headers into
233 * the descriptor ring */
234 copied = 0;
235 offset = 2;
236
237 if (flags & FLAGS_VLAN_OOB) {
238 first_desc->total_hdr_length += VLAN_HLEN;
239 first_desc->tcp_hdr_offset = VLAN_HLEN;
240 first_desc->ip_hdr_offset = VLAN_HLEN;
241
242 /* Only in case of TSO on vlan device */
243 flags |= FLAGS_VLAN_TAGGED;
244
245 /* Create a TSO vlan header template for firmware */
246 hwdesc = &tx_ring->desc_head[producer];
247 tx_ring->cmd_buf_arr[producer].skb = NULL;
248
249 copy_len = min((int)sizeof(struct cmd_desc_type0) -
250 offset, hdr_len + VLAN_HLEN);
251
252 vh = (struct vlan_ethhdr *)((char *) hwdesc + 2);
253 skb_copy_from_linear_data(skb, vh, 12);
254 vh->h_vlan_proto = htons(ETH_P_8021Q);
255 vh->h_vlan_TCI = htons(vlan_tci);
256
257 skb_copy_from_linear_data_offset(skb, 12,
258 (char *)vh + 16,
259 copy_len - 16);
260 copied = copy_len - VLAN_HLEN;
261 offset = 0;
262 producer = get_next_index(producer, tx_ring->num_desc);
263 }
264
265 while (copied < hdr_len) {
266 size = (int)sizeof(struct cmd_desc_type0) - offset;
267 copy_len = min(size, (hdr_len - copied));
268 hwdesc = &tx_ring->desc_head[producer];
269 tx_ring->cmd_buf_arr[producer].skb = NULL;
270 skb_copy_from_linear_data_offset(skb, copied,
271 (char *)hwdesc +
272 offset, copy_len);
273 copied += copy_len;
274 offset = 0;
275 producer = get_next_index(producer, tx_ring->num_desc);
276 }
277
278 tx_ring->producer = producer;
279 smp_mb();
280 adapter->stats.lso_frames++;
281
282 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
283 if (protocol == ETH_P_IP) {
284 l4proto = ip_hdr(skb)->protocol;
285
286 if (l4proto == IPPROTO_TCP)
287 opcode = TX_TCP_PKT;
288 else if (l4proto == IPPROTO_UDP)
289 opcode = TX_UDP_PKT;
290 } else if (protocol == ETH_P_IPV6) {
291 l4proto = ipv6_hdr(skb)->nexthdr;
292
293 if (l4proto == IPPROTO_TCP)
294 opcode = TX_TCPV6_PKT;
295 else if (l4proto == IPPROTO_UDP)
296 opcode = TX_UDPV6_PKT;
297 }
298 }
299 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
300 first_desc->ip_hdr_offset += skb_network_offset(skb);
301 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
302
303 return 0;
304}
305
306static int qlcnic_map_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
307 struct qlcnic_cmd_buffer *pbuf)
308{
309 struct qlcnic_skb_frag *nf;
310 struct skb_frag_struct *frag;
311 int i, nr_frags;
312 dma_addr_t map;
313
314 nr_frags = skb_shinfo(skb)->nr_frags;
315 nf = &pbuf->frag_array[0];
316
317 map = pci_map_single(pdev, skb->data, skb_headlen(skb),
318 PCI_DMA_TODEVICE);
319 if (pci_dma_mapping_error(pdev, map))
320 goto out_err;
321
322 nf->dma = map;
323 nf->length = skb_headlen(skb);
324
325 for (i = 0; i < nr_frags; i++) {
326 frag = &skb_shinfo(skb)->frags[i];
327 nf = &pbuf->frag_array[i+1];
328 map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
329 DMA_TO_DEVICE);
330 if (dma_mapping_error(&pdev->dev, map))
331 goto unwind;
332
333 nf->dma = map;
334 nf->length = skb_frag_size(frag);
335 }
336
337 return 0;
338
339unwind:
340 while (--i >= 0) {
341 nf = &pbuf->frag_array[i+1];
342 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
343 }
344
345 nf = &pbuf->frag_array[0];
346 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
347
348out_err:
349 return -ENOMEM;
350}
351
352static void qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
353 struct qlcnic_cmd_buffer *pbuf)
354{
355 struct qlcnic_skb_frag *nf = &pbuf->frag_array[0];
356 int i, nr_frags = skb_shinfo(skb)->nr_frags;
357
358 for (i = 0; i < nr_frags; i++) {
359 nf = &pbuf->frag_array[i+1];
360 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
361 }
362
363 nf = &pbuf->frag_array[0];
364 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
365 pbuf->skb = NULL;
366}
367
368static inline void qlcnic_clear_cmddesc(u64 *desc)
369{
370 desc[0] = 0ULL;
371 desc[2] = 0ULL;
372 desc[7] = 0ULL;
373}
374
375netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
376{
377 struct qlcnic_adapter *adapter = netdev_priv(netdev);
378 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
379 struct qlcnic_cmd_buffer *pbuf;
380 struct qlcnic_skb_frag *buffrag;
381 struct cmd_desc_type0 *hwdesc, *first_desc;
382 struct pci_dev *pdev;
383 struct ethhdr *phdr;
384 int i, k, frag_count, delta = 0;
385 u32 producer, num_txd;
386
387 num_txd = tx_ring->num_desc;
388
389 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
390 netif_stop_queue(netdev);
391 return NETDEV_TX_BUSY;
392 }
393
394 if (adapter->flags & QLCNIC_MACSPOOF) {
395 phdr = (struct ethhdr *)skb->data;
396 if (!ether_addr_equal(phdr->h_source, adapter->mac_addr))
397 goto drop_packet;
398 }
399
400 frag_count = skb_shinfo(skb)->nr_frags + 1;
401 /* 14 frags supported for normal packet and
402 * 32 frags supported for TSO packet
403 */
404 if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
405 for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
406 delta += skb_frag_size(&skb_shinfo(skb)->frags[i]);
407
408 if (!__pskb_pull_tail(skb, delta))
409 goto drop_packet;
410
411 frag_count = 1 + skb_shinfo(skb)->nr_frags;
412 }
413
414 if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
415 netif_stop_queue(netdev);
416 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
417 netif_start_queue(netdev);
418 } else {
419 adapter->stats.xmit_off++;
420 return NETDEV_TX_BUSY;
421 }
422 }
423
424 producer = tx_ring->producer;
425 pbuf = &tx_ring->cmd_buf_arr[producer];
426 pdev = adapter->pdev;
427 first_desc = &tx_ring->desc_head[producer];
428 hwdesc = &tx_ring->desc_head[producer];
429 qlcnic_clear_cmddesc((u64 *)hwdesc);
430
431 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
432 adapter->stats.tx_dma_map_error++;
433 goto drop_packet;
434 }
435
436 pbuf->skb = skb;
437 pbuf->frag_count = frag_count;
438
439 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
440 qlcnic_set_tx_port(first_desc, adapter->portnum);
441
442 for (i = 0; i < frag_count; i++) {
443 k = i % 4;
444
445 if ((k == 0) && (i > 0)) {
446 /* move to next desc.*/
447 producer = get_next_index(producer, num_txd);
448 hwdesc = &tx_ring->desc_head[producer];
449 qlcnic_clear_cmddesc((u64 *)hwdesc);
450 tx_ring->cmd_buf_arr[producer].skb = NULL;
451 }
452
453 buffrag = &pbuf->frag_array[i];
454 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
455 switch (k) {
456 case 0:
457 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
458 break;
459 case 1:
460 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
461 break;
462 case 2:
463 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
464 break;
465 case 3:
466 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
467 break;
468 }
469 }
470
471 tx_ring->producer = get_next_index(producer, num_txd);
472 smp_mb();
473
474 if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb)))
475 goto unwind_buff;
476
477 if (adapter->mac_learn)
478 qlcnic_send_filter(adapter, tx_ring, first_desc, skb);
479
480 adapter->stats.txbytes += skb->len;
481 adapter->stats.xmitcalled++;
482
483 qlcnic_update_cmd_producer(tx_ring);
484
485 return NETDEV_TX_OK;
486
487unwind_buff:
488 qlcnic_unmap_buffers(pdev, skb, pbuf);
489drop_packet:
490 adapter->stats.txdropped++;
491 dev_kfree_skb_any(skb);
492 return NETDEV_TX_OK;
493}
494
495void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
496{
497 struct net_device *netdev = adapter->netdev;
498
499 if (adapter->ahw->linkup && !linkup) {
500 netdev_info(netdev, "NIC Link is down\n");
501 adapter->ahw->linkup = 0;
502 if (netif_running(netdev)) {
503 netif_carrier_off(netdev);
504 netif_stop_queue(netdev);
505 }
506 } else if (!adapter->ahw->linkup && linkup) {
507 netdev_info(netdev, "NIC Link is up\n");
508 adapter->ahw->linkup = 1;
509 if (netif_running(netdev)) {
510 netif_carrier_on(netdev);
511 netif_wake_queue(netdev);
512 }
513 }
514}
515
516static int qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
517 struct qlcnic_host_rds_ring *rds_ring,
518 struct qlcnic_rx_buffer *buffer)
519{
520 struct sk_buff *skb;
521 dma_addr_t dma;
522 struct pci_dev *pdev = adapter->pdev;
523
524 skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size);
525 if (!skb) {
526 adapter->stats.skb_alloc_failure++;
527 return -ENOMEM;
528 }
529
530 skb_reserve(skb, NET_IP_ALIGN);
531 dma = pci_map_single(pdev, skb->data, rds_ring->dma_size,
532 PCI_DMA_FROMDEVICE);
533
534 if (pci_dma_mapping_error(pdev, dma)) {
535 adapter->stats.rx_dma_map_error++;
536 dev_kfree_skb_any(skb);
537 return -ENOMEM;
538 }
539
540 buffer->skb = skb;
541 buffer->dma = dma;
542
543 return 0;
544}
545
546static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
547 struct qlcnic_host_rds_ring *rds_ring)
548{
549 struct rcv_desc *pdesc;
550 struct qlcnic_rx_buffer *buffer;
551 int count = 0;
552 uint32_t producer;
553 struct list_head *head;
554
555 if (!spin_trylock(&rds_ring->lock))
556 return;
557
558 producer = rds_ring->producer;
559 head = &rds_ring->free_list;
560
561 while (!list_empty(head)) {
562 buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
563
564 if (!buffer->skb) {
565 if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
566 break;
567 }
568
569 count++;
570 list_del(&buffer->list);
571
572 /* make a rcv descriptor */
573 pdesc = &rds_ring->desc_head[producer];
574 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
575 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
576 pdesc->addr_buffer = cpu_to_le64(buffer->dma);
577 producer = get_next_index(producer, rds_ring->num_desc);
578 }
579
580 if (count) {
581 rds_ring->producer = producer;
582 writel((producer - 1) & (rds_ring->num_desc - 1),
583 rds_ring->crb_rcv_producer);
584 }
585
586 spin_unlock(&rds_ring->lock);
587}
588
589static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
590{
591 u32 sw_consumer, hw_consumer;
592 int i, done, count = 0;
593 struct qlcnic_cmd_buffer *buffer;
594 struct pci_dev *pdev = adapter->pdev;
595 struct net_device *netdev = adapter->netdev;
596 struct qlcnic_skb_frag *frag;
597 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
598
599 if (!spin_trylock(&adapter->tx_clean_lock))
600 return 1;
601
602 sw_consumer = tx_ring->sw_consumer;
603 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
604
605 while (sw_consumer != hw_consumer) {
606 buffer = &tx_ring->cmd_buf_arr[sw_consumer];
607 if (buffer->skb) {
608 frag = &buffer->frag_array[0];
609 pci_unmap_single(pdev, frag->dma, frag->length,
610 PCI_DMA_TODEVICE);
611 frag->dma = 0ULL;
612 for (i = 1; i < buffer->frag_count; i++) {
613 frag++;
614 pci_unmap_page(pdev, frag->dma, frag->length,
615 PCI_DMA_TODEVICE);
616 frag->dma = 0ULL;
617 }
618
619 adapter->stats.xmitfinished++;
620 dev_kfree_skb_any(buffer->skb);
621 buffer->skb = NULL;
622 }
623
624 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
625 if (++count >= MAX_STATUS_HANDLE)
626 break;
627 }
628
629 if (count && netif_running(netdev)) {
630 tx_ring->sw_consumer = sw_consumer;
631
632 smp_mb();
633
634 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
635 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
636 netif_wake_queue(netdev);
637 adapter->stats.xmit_on++;
638 }
639 }
640 adapter->tx_timeo_cnt = 0;
641 }
642 /*
643 * If everything is freed up to consumer then check if the ring is full
644 * If the ring is full then check if more needs to be freed and
645 * schedule the call back again.
646 *
647 * This happens when there are 2 CPUs. One could be freeing and the
648 * other filling it. If the ring is full when we get out of here and
649 * the card has already interrupted the host then the host can miss the
650 * interrupt.
651 *
652 * There is still a possible race condition and the host could miss an
653 * interrupt. The card has to take care of this.
654 */
655 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
656 done = (sw_consumer == hw_consumer);
657
658 spin_unlock(&adapter->tx_clean_lock);
659
660 return done;
661}
662
663static int qlcnic_poll(struct napi_struct *napi, int budget)
664{
665 struct qlcnic_host_sds_ring *sds_ring;
666 struct qlcnic_adapter *adapter;
667 int tx_complete, work_done;
668
669 sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
670 adapter = sds_ring->adapter;
671
672 tx_complete = qlcnic_process_cmd_ring(adapter);
673 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
674
675 if ((work_done < budget) && tx_complete) {
676 napi_complete(&sds_ring->napi);
677 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
678 qlcnic_enable_int(sds_ring);
679 }
680
681 return work_done;
682}
683
684static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
685{
686 struct qlcnic_host_sds_ring *sds_ring;
687 struct qlcnic_adapter *adapter;
688 int work_done;
689
690 sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
691 adapter = sds_ring->adapter;
692
693 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
694
695 if (work_done < budget) {
696 napi_complete(&sds_ring->napi);
697 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
698 qlcnic_enable_int(sds_ring);
699 }
700
701 return work_done;
702}
703
704static void qlcnic_handle_linkevent(struct qlcnic_adapter *adapter,
705 struct qlcnic_fw_msg *msg)
706{
707 u32 cable_OUI;
708 u16 cable_len, link_speed;
709 u8 link_status, module, duplex, autoneg, lb_status = 0;
710 struct net_device *netdev = adapter->netdev;
711
712 adapter->ahw->has_link_events = 1;
713
714 cable_OUI = msg->body[1] & 0xffffffff;
715 cable_len = (msg->body[1] >> 32) & 0xffff;
716 link_speed = (msg->body[1] >> 48) & 0xffff;
717
718 link_status = msg->body[2] & 0xff;
719 duplex = (msg->body[2] >> 16) & 0xff;
720 autoneg = (msg->body[2] >> 24) & 0xff;
721 lb_status = (msg->body[2] >> 32) & 0x3;
722
723 module = (msg->body[2] >> 8) & 0xff;
724 if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE)
725 dev_info(&netdev->dev,
726 "unsupported cable: OUI 0x%x, length %d\n",
727 cable_OUI, cable_len);
728 else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN)
729 dev_info(&netdev->dev, "unsupported cable length %d\n",
730 cable_len);
731
732 if (!link_status && (lb_status == QLCNIC_ILB_MODE ||
733 lb_status == QLCNIC_ELB_MODE))
734 adapter->ahw->loopback_state |= QLCNIC_LINKEVENT;
735
736 qlcnic_advert_link_change(adapter, link_status);
737
738 if (duplex == LINKEVENT_FULL_DUPLEX)
739 adapter->ahw->link_duplex = DUPLEX_FULL;
740 else
741 adapter->ahw->link_duplex = DUPLEX_HALF;
742
743 adapter->ahw->module_type = module;
744 adapter->ahw->link_autoneg = autoneg;
745
746 if (link_status) {
747 adapter->ahw->link_speed = link_speed;
748 } else {
749 adapter->ahw->link_speed = SPEED_UNKNOWN;
750 adapter->ahw->link_duplex = DUPLEX_UNKNOWN;
751 }
752}
753
754static void qlcnic_handle_fw_message(int desc_cnt, int index,
755 struct qlcnic_host_sds_ring *sds_ring)
756{
757 struct qlcnic_fw_msg msg;
758 struct status_desc *desc;
759 struct qlcnic_adapter *adapter;
760 struct device *dev;
761 int i = 0, opcode, ret;
762
763 while (desc_cnt > 0 && i < 8) {
764 desc = &sds_ring->desc_head[index];
765 msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]);
766 msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]);
767
768 index = get_next_index(index, sds_ring->num_desc);
769 desc_cnt--;
770 }
771
772 adapter = sds_ring->adapter;
773 dev = &adapter->pdev->dev;
774 opcode = qlcnic_get_nic_msg_opcode(msg.body[0]);
775
776 switch (opcode) {
777 case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
778 qlcnic_handle_linkevent(adapter, &msg);
779 break;
780 case QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK:
781 ret = (u32)(msg.body[1]);
782 switch (ret) {
783 case 0:
784 adapter->ahw->loopback_state |= QLCNIC_LB_RESPONSE;
785 break;
786 case 1:
787 dev_info(dev, "loopback already in progress\n");
788 adapter->ahw->diag_cnt = -QLCNIC_TEST_IN_PROGRESS;
789 break;
790 case 2:
791 dev_info(dev, "loopback cable is not connected\n");
792 adapter->ahw->diag_cnt = -QLCNIC_LB_CABLE_NOT_CONN;
793 break;
794 default:
795 dev_info(dev,
796 "loopback configure request failed, err %x\n",
797 ret);
798 adapter->ahw->diag_cnt = -QLCNIC_UNDEFINED_ERROR;
799 break;
800 }
801 break;
802 default:
803 break;
804 }
805}
806
807static struct sk_buff *
808qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
809 struct qlcnic_host_rds_ring *rds_ring, u16 index,
810 u16 cksum)
811{
812 struct qlcnic_rx_buffer *buffer;
813 struct sk_buff *skb;
814
815 buffer = &rds_ring->rx_buf_arr[index];
816
817 if (unlikely(buffer->skb == NULL)) {
818 WARN_ON(1);
819 return NULL;
820 }
821
822 pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
823 PCI_DMA_FROMDEVICE);
824
825 skb = buffer->skb;
826
827 if (likely((adapter->netdev->features & NETIF_F_RXCSUM) &&
828 (cksum == STATUS_CKSUM_OK || cksum == STATUS_CKSUM_LOOP))) {
829 adapter->stats.csummed++;
830 skb->ip_summed = CHECKSUM_UNNECESSARY;
831 } else {
832 skb_checksum_none_assert(skb);
833 }
834
835 buffer->skb = NULL;
836
837 return skb;
838}
839
840static inline int qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter,
841 struct sk_buff *skb, u16 *vlan_tag)
842{
843 struct ethhdr *eth_hdr;
844
845 if (!__vlan_get_tag(skb, vlan_tag)) {
846 eth_hdr = (struct ethhdr *)skb->data;
847 memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
848 skb_pull(skb, VLAN_HLEN);
849 }
850 if (!adapter->pvid)
851 return 0;
852
853 if (*vlan_tag == adapter->pvid) {
854 /* Outer vlan tag. Packet should follow non-vlan path */
855 *vlan_tag = 0xffff;
856 return 0;
857 }
858 if (adapter->flags & QLCNIC_TAGGING_ENABLED)
859 return 0;
860
861 return -EINVAL;
862}
863
864static struct qlcnic_rx_buffer *
865qlcnic_process_rcv(struct qlcnic_adapter *adapter,
866 struct qlcnic_host_sds_ring *sds_ring, int ring,
867 u64 sts_data0)
868{
869 struct net_device *netdev = adapter->netdev;
870 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
871 struct qlcnic_rx_buffer *buffer;
872 struct sk_buff *skb;
873 struct qlcnic_host_rds_ring *rds_ring;
874 int index, length, cksum, pkt_offset;
875 u16 vid = 0xffff;
876
877 if (unlikely(ring >= adapter->max_rds_rings))
878 return NULL;
879
880 rds_ring = &recv_ctx->rds_rings[ring];
881
882 index = qlcnic_get_sts_refhandle(sts_data0);
883 if (unlikely(index >= rds_ring->num_desc))
884 return NULL;
885
886 buffer = &rds_ring->rx_buf_arr[index];
887 length = qlcnic_get_sts_totallength(sts_data0);
888 cksum = qlcnic_get_sts_status(sts_data0);
889 pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
890
891 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
892 if (!skb)
893 return buffer;
894
895 if (length > rds_ring->skb_size)
896 skb_put(skb, rds_ring->skb_size);
897 else
898 skb_put(skb, length);
899
900 if (pkt_offset)
901 skb_pull(skb, pkt_offset);
902
903 if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
904 adapter->stats.rxdropped++;
905 dev_kfree_skb(skb);
906 return buffer;
907 }
908
909 skb->protocol = eth_type_trans(skb, netdev);
910
911 if (vid != 0xffff)
912 __vlan_hwaccel_put_tag(skb, vid);
913
914 napi_gro_receive(&sds_ring->napi, skb);
915
916 adapter->stats.rx_pkts++;
917 adapter->stats.rxbytes += length;
918
919 return buffer;
920}
921
922#define QLC_TCP_HDR_SIZE 20
923#define QLC_TCP_TS_OPTION_SIZE 12
924#define QLC_TCP_TS_HDR_SIZE (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE)
925
926static struct qlcnic_rx_buffer *
927qlcnic_process_lro(struct qlcnic_adapter *adapter,
928 int ring, u64 sts_data0, u64 sts_data1)
929{
930 struct net_device *netdev = adapter->netdev;
931 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
932 struct qlcnic_rx_buffer *buffer;
933 struct sk_buff *skb;
934 struct qlcnic_host_rds_ring *rds_ring;
935 struct iphdr *iph;
936 struct tcphdr *th;
937 bool push, timestamp;
938 int index, l2_hdr_offset, l4_hdr_offset;
939 u16 lro_length, length, data_offset, vid = 0xffff;
940 u32 seq_number;
941
942 if (unlikely(ring > adapter->max_rds_rings))
943 return NULL;
944
945 rds_ring = &recv_ctx->rds_rings[ring];
946
947 index = qlcnic_get_lro_sts_refhandle(sts_data0);
948 if (unlikely(index > rds_ring->num_desc))
949 return NULL;
950
951 buffer = &rds_ring->rx_buf_arr[index];
952
953 timestamp = qlcnic_get_lro_sts_timestamp(sts_data0);
954 lro_length = qlcnic_get_lro_sts_length(sts_data0);
955 l2_hdr_offset = qlcnic_get_lro_sts_l2_hdr_offset(sts_data0);
956 l4_hdr_offset = qlcnic_get_lro_sts_l4_hdr_offset(sts_data0);
957 push = qlcnic_get_lro_sts_push_flag(sts_data0);
958 seq_number = qlcnic_get_lro_sts_seq_number(sts_data1);
959
960 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
961 if (!skb)
962 return buffer;
963
964 if (timestamp)
965 data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE;
966 else
967 data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE;
968
969 skb_put(skb, lro_length + data_offset);
970 skb_pull(skb, l2_hdr_offset);
971
972 if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
973 adapter->stats.rxdropped++;
974 dev_kfree_skb(skb);
975 return buffer;
976 }
977
978 skb->protocol = eth_type_trans(skb, netdev);
979 iph = (struct iphdr *)skb->data;
980 th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
981 length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
982 iph->tot_len = htons(length);
983 iph->check = 0;
984 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
985 th->psh = push;
986 th->seq = htonl(seq_number);
987 length = skb->len;
988
989 if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP)
990 skb_shinfo(skb)->gso_size = qlcnic_get_lro_sts_mss(sts_data1);
991
992 if (vid != 0xffff)
993 __vlan_hwaccel_put_tag(skb, vid);
994 netif_receive_skb(skb);
995
996 adapter->stats.lro_pkts++;
997 adapter->stats.lrobytes += length;
998
999 return buffer;
1000}
1001
1002int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
1003{
1004 struct qlcnic_host_rds_ring *rds_ring;
1005 struct qlcnic_adapter *adapter = sds_ring->adapter;
1006 struct list_head *cur;
1007 struct status_desc *desc;
1008 struct qlcnic_rx_buffer *rxbuf;
1009 u64 sts_data0, sts_data1;
1010 __le64 owner_phantom = cpu_to_le64(STATUS_OWNER_PHANTOM);
1011 int opcode, ring, desc_cnt, count = 0;
1012 u32 consumer = sds_ring->consumer;
1013
1014 while (count < max) {
1015 desc = &sds_ring->desc_head[consumer];
1016 sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
1017
1018 if (!(sts_data0 & STATUS_OWNER_HOST))
1019 break;
1020
1021 desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
1022 opcode = qlcnic_get_sts_opcode(sts_data0);
1023
1024 switch (opcode) {
1025 case QLCNIC_RXPKT_DESC:
1026 case QLCNIC_OLD_RXPKT_DESC:
1027 case QLCNIC_SYN_OFFLOAD:
1028 ring = qlcnic_get_sts_type(sts_data0);
1029 rxbuf = qlcnic_process_rcv(adapter, sds_ring, ring,
1030 sts_data0);
1031 break;
1032 case QLCNIC_LRO_DESC:
1033 ring = qlcnic_get_lro_sts_type(sts_data0);
1034 sts_data1 = le64_to_cpu(desc->status_desc_data[1]);
1035 rxbuf = qlcnic_process_lro(adapter, ring, sts_data0,
1036 sts_data1);
1037 break;
1038 case QLCNIC_RESPONSE_DESC:
1039 qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
1040 default:
1041 goto skip;
1042 }
1043
1044 WARN_ON(desc_cnt > 1);
1045
1046 if (likely(rxbuf))
1047 list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
1048 else
1049 adapter->stats.null_rxbuf++;
1050
1051skip:
1052 for (; desc_cnt > 0; desc_cnt--) {
1053 desc = &sds_ring->desc_head[consumer];
1054 desc->status_desc_data[0] = owner_phantom;
1055 consumer = get_next_index(consumer, sds_ring->num_desc);
1056 }
1057 count++;
1058 }
1059
1060 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1061 rds_ring = &adapter->recv_ctx->rds_rings[ring];
1062
1063 if (!list_empty(&sds_ring->free_list[ring])) {
1064 list_for_each(cur, &sds_ring->free_list[ring]) {
1065 rxbuf = list_entry(cur, struct qlcnic_rx_buffer,
1066 list);
1067 qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
1068 }
1069 spin_lock(&rds_ring->lock);
1070 list_splice_tail_init(&sds_ring->free_list[ring],
1071 &rds_ring->free_list);
1072 spin_unlock(&rds_ring->lock);
1073 }
1074
1075 qlcnic_post_rx_buffers_nodb(adapter, rds_ring);
1076 }
1077
1078 if (count) {
1079 sds_ring->consumer = consumer;
1080 writel(consumer, sds_ring->crb_sts_consumer);
1081 }
1082
1083 return count;
1084}
1085
1086void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
1087 struct qlcnic_host_rds_ring *rds_ring)
1088{
1089 struct rcv_desc *pdesc;
1090 struct qlcnic_rx_buffer *buffer;
1091 int count = 0;
1092 u32 producer;
1093 struct list_head *head;
1094
1095 producer = rds_ring->producer;
1096 head = &rds_ring->free_list;
1097
1098 while (!list_empty(head)) {
1099
1100 buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
1101
1102 if (!buffer->skb) {
1103 if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
1104 break;
1105 }
1106
1107 count++;
1108 list_del(&buffer->list);
1109
1110 /* make a rcv descriptor */
1111 pdesc = &rds_ring->desc_head[producer];
1112 pdesc->addr_buffer = cpu_to_le64(buffer->dma);
1113 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
1114 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
1115 producer = get_next_index(producer, rds_ring->num_desc);
1116 }
1117
1118 if (count) {
1119 rds_ring->producer = producer;
1120 writel((producer-1) & (rds_ring->num_desc-1),
1121 rds_ring->crb_rcv_producer);
1122 }
1123}
1124
1125static void dump_skb(struct sk_buff *skb, struct qlcnic_adapter *adapter)
1126{
1127 int i;
1128 unsigned char *data = skb->data;
1129
1130 pr_info(KERN_INFO "\n");
1131 for (i = 0; i < skb->len; i++) {
1132 QLCDB(adapter, DRV, "%02x ", data[i]);
1133 if ((i & 0x0f) == 8)
1134 pr_info(KERN_INFO "\n");
1135 }
1136}
1137
1138static void qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter, int ring,
1139 u64 sts_data0)
1140{
1141 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1142 struct sk_buff *skb;
1143 struct qlcnic_host_rds_ring *rds_ring;
1144 int index, length, cksum, pkt_offset;
1145
1146 if (unlikely(ring >= adapter->max_rds_rings))
1147 return;
1148
1149 rds_ring = &recv_ctx->rds_rings[ring];
1150
1151 index = qlcnic_get_sts_refhandle(sts_data0);
1152 length = qlcnic_get_sts_totallength(sts_data0);
1153 if (unlikely(index >= rds_ring->num_desc))
1154 return;
1155
1156 cksum = qlcnic_get_sts_status(sts_data0);
1157 pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
1158
1159 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
1160 if (!skb)
1161 return;
1162
1163 if (length > rds_ring->skb_size)
1164 skb_put(skb, rds_ring->skb_size);
1165 else
1166 skb_put(skb, length);
1167
1168 if (pkt_offset)
1169 skb_pull(skb, pkt_offset);
1170
1171 if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
1172 adapter->ahw->diag_cnt++;
1173 else
1174 dump_skb(skb, adapter);
1175
1176 dev_kfree_skb_any(skb);
1177 adapter->stats.rx_pkts++;
1178 adapter->stats.rxbytes += length;
1179
1180 return;
1181}
1182
1183void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
1184{
1185 struct qlcnic_adapter *adapter = sds_ring->adapter;
1186 struct status_desc *desc;
1187 u64 sts_data0;
1188 int ring, opcode, desc_cnt;
1189
1190 u32 consumer = sds_ring->consumer;
1191
1192 desc = &sds_ring->desc_head[consumer];
1193 sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
1194
1195 if (!(sts_data0 & STATUS_OWNER_HOST))
1196 return;
1197
1198 desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
1199 opcode = qlcnic_get_sts_opcode(sts_data0);
1200 switch (opcode) {
1201 case QLCNIC_RESPONSE_DESC:
1202 qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
1203 break;
1204 default:
1205 ring = qlcnic_get_sts_type(sts_data0);
1206 qlcnic_process_rcv_diag(adapter, ring, sts_data0);
1207 break;
1208 }
1209
1210 for (; desc_cnt > 0; desc_cnt--) {
1211 desc = &sds_ring->desc_head[consumer];
1212 desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
1213 consumer = get_next_index(consumer, sds_ring->num_desc);
1214 }
1215
1216 sds_ring->consumer = consumer;
1217 writel(consumer, sds_ring->crb_sts_consumer);
1218}
1219
1220void qlcnic_fetch_mac(u32 off1, u32 off2, u8 alt_mac, u8 *mac)
1221{
1222 u32 mac_low, mac_high;
1223 int i;
1224
1225 mac_low = off1;
1226 mac_high = off2;
1227
1228 if (alt_mac) {
1229 mac_low |= (mac_low >> 16) | (mac_high << 16);
1230 mac_high >>= 16;
1231 }
1232
1233 for (i = 0; i < 2; i++)
1234 mac[i] = (u8)(mac_high >> ((1 - i) * 8));
1235 for (i = 2; i < 6; i++)
1236 mac[i] = (u8)(mac_low >> ((5 - i) * 8));
1237}
1238
1239int qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
1240{
1241 int ring, max_sds_rings;
1242 struct qlcnic_host_sds_ring *sds_ring;
1243 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1244
1245 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
1246 return -ENOMEM;
1247
1248 max_sds_rings = adapter->max_sds_rings;
1249
1250 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1251 sds_ring = &recv_ctx->sds_rings[ring];
1252
1253 if (ring == max_sds_rings - 1)
1254 netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
1255 QLCNIC_NETDEV_WEIGHT / max_sds_rings);
1256 else
1257 netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll,
1258 QLCNIC_NETDEV_WEIGHT*2);
1259 }
1260
1261 return 0;
1262}
1263
1264void qlcnic_napi_del(struct qlcnic_adapter *adapter)
1265{
1266 int ring;
1267 struct qlcnic_host_sds_ring *sds_ring;
1268 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1269
1270 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1271 sds_ring = &recv_ctx->sds_rings[ring];
1272 netif_napi_del(&sds_ring->napi);
1273 }
1274
1275 qlcnic_free_sds_rings(adapter->recv_ctx);
1276}
1277
1278void qlcnic_napi_enable(struct qlcnic_adapter *adapter)
1279{
1280 int ring;
1281 struct qlcnic_host_sds_ring *sds_ring;
1282 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1283
1284 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1285 return;
1286
1287 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1288 sds_ring = &recv_ctx->sds_rings[ring];
1289 napi_enable(&sds_ring->napi);
1290 qlcnic_enable_int(sds_ring);
1291 }
1292}
1293
1294void qlcnic_napi_disable(struct qlcnic_adapter *adapter)
1295{
1296 int ring;
1297 struct qlcnic_host_sds_ring *sds_ring;
1298 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1299
1300 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1301 return;
1302
1303 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1304 sds_ring = &recv_ctx->sds_rings[ring];
1305 qlcnic_disable_int(sds_ring);
1306 napi_synchronize(&sds_ring->napi);
1307 napi_disable(&sds_ring->napi);
1308 }
1309}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
deleted file mode 100644
index d833f592789..00000000000
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ /dev/null
@@ -1,3030 +0,0 @@
1/*
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2010 QLogic Corporation
4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
6 */
7
8#include <linux/slab.h>
9#include <linux/vmalloc.h>
10#include <linux/interrupt.h>
11
12#include "qlcnic.h"
13
14#include <linux/swab.h>
15#include <linux/dma-mapping.h>
16#include <net/ip.h>
17#include <linux/ipv6.h>
18#include <linux/inetdevice.h>
19#include <linux/sysfs.h>
20#include <linux/aer.h>
21#include <linux/log2.h>
22
23MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
24MODULE_LICENSE("GPL");
25MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
26MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
27
28char qlcnic_driver_name[] = "qlcnic";
29static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
30 "Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID;
31
32static struct workqueue_struct *qlcnic_wq;
33static int qlcnic_mac_learn;
34module_param(qlcnic_mac_learn, int, 0444);
35MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)");
36
37static int qlcnic_use_msi = 1;
38MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
39module_param_named(use_msi, qlcnic_use_msi, int, 0444);
40
41static int qlcnic_use_msi_x = 1;
42MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
43module_param_named(use_msi_x, qlcnic_use_msi_x, int, 0444);
44
45static int qlcnic_auto_fw_reset = 1;
46MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
47module_param_named(auto_fw_reset, qlcnic_auto_fw_reset, int, 0644);
48
49static int qlcnic_load_fw_file;
50MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
51module_param_named(load_fw_file, qlcnic_load_fw_file, int, 0444);
52
53static int qlcnic_config_npars;
54module_param(qlcnic_config_npars, int, 0444);
55MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
56
57static int qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
58static void qlcnic_remove(struct pci_dev *pdev);
59static int qlcnic_open(struct net_device *netdev);
60static int qlcnic_close(struct net_device *netdev);
61static void qlcnic_tx_timeout(struct net_device *netdev);
62static void qlcnic_attach_work(struct work_struct *work);
63static void qlcnic_fwinit_work(struct work_struct *work);
64static void qlcnic_fw_poll_work(struct work_struct *work);
65static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
66 work_func_t func, int delay);
67static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
68#ifdef CONFIG_NET_POLL_CONTROLLER
69static void qlcnic_poll_controller(struct net_device *netdev);
70#endif
71
72static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
73static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8);
74static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
75
76static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
77static irqreturn_t qlcnic_intr(int irq, void *data);
78static irqreturn_t qlcnic_msi_intr(int irq, void *data);
79static irqreturn_t qlcnic_msix_intr(int irq, void *data);
80
81static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
82static void qlcnic_restore_indev_addr(struct net_device *dev, unsigned long);
83static int qlcnic_start_firmware(struct qlcnic_adapter *);
84
85static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter);
86static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
87static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
88static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
89 struct qlcnic_esw_func_cfg *);
90static int qlcnic_vlan_rx_add(struct net_device *, u16);
91static int qlcnic_vlan_rx_del(struct net_device *, u16);
92
93#define QLCNIC_IS_TSO_CAPABLE(adapter) \
94 ((adapter)->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
95
96/* PCI Device ID Table */
97#define ENTRY(device) \
98 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
99 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
100
101#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
102
103static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
104 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
105 {0,}
106};
107
108MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
109
110
111inline void qlcnic_update_cmd_producer(struct qlcnic_host_tx_ring *tx_ring)
112{
113 writel(tx_ring->producer, tx_ring->crb_cmd_producer);
114}
115
116static const u32 msi_tgt_status[8] = {
117 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
118 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
119 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
120 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
121};
122
123static const struct qlcnic_board_info qlcnic_boards[] = {
124 {0x1077, 0x8020, 0x1077, 0x203,
125 "8200 Series Single Port 10GbE Converged Network Adapter"
126 "(TCP/IP Networking)"},
127 {0x1077, 0x8020, 0x1077, 0x207,
128 "8200 Series Dual Port 10GbE Converged Network Adapter"
129 "(TCP/IP Networking)"},
130 {0x1077, 0x8020, 0x1077, 0x20b,
131 "3200 Series Dual Port 10Gb Intelligent Ethernet Adapter"},
132 {0x1077, 0x8020, 0x1077, 0x20c,
133 "3200 Series Quad Port 1Gb Intelligent Ethernet Adapter"},
134 {0x1077, 0x8020, 0x1077, 0x20f,
135 "3200 Series Single Port 10Gb Intelligent Ethernet Adapter"},
136 {0x1077, 0x8020, 0x103c, 0x3733,
137 "NC523SFP 10Gb 2-port Server Adapter"},
138 {0x1077, 0x8020, 0x103c, 0x3346,
139 "CN1000Q Dual Port Converged Network Adapter"},
140 {0x1077, 0x8020, 0x1077, 0x210,
141 "QME8242-k 10GbE Dual Port Mezzanine Card"},
142 {0x1077, 0x8020, 0x0, 0x0, "cLOM8214 1/10GbE Controller"},
143};
144
145#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(qlcnic_boards)
146
147static const
148struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
149
150int qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
151{
152 int size = sizeof(struct qlcnic_host_sds_ring) * count;
153
154 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
155
156 return recv_ctx->sds_rings == NULL;
157}
158
159void qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
160{
161 if (recv_ctx->sds_rings != NULL)
162 kfree(recv_ctx->sds_rings);
163
164 recv_ctx->sds_rings = NULL;
165}
166
167static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
168{
169 memset(&adapter->stats, 0, sizeof(adapter->stats));
170}
171
172static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
173{
174 u32 control;
175 int pos;
176
177 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
178 if (pos) {
179 pci_read_config_dword(pdev, pos, &control);
180 if (enable)
181 control |= PCI_MSIX_FLAGS_ENABLE;
182 else
183 control = 0;
184 pci_write_config_dword(pdev, pos, control);
185 }
186}
187
188static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
189{
190 int i;
191
192 for (i = 0; i < count; i++)
193 adapter->msix_entries[i].entry = i;
194}
195
196static int
197qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
198{
199 u8 mac_addr[ETH_ALEN];
200 struct net_device *netdev = adapter->netdev;
201 struct pci_dev *pdev = adapter->pdev;
202
203 if (qlcnic_get_mac_address(adapter, mac_addr) != 0)
204 return -EIO;
205
206 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
207 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
208 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
209
210 /* set station address */
211
212 if (!is_valid_ether_addr(netdev->perm_addr))
213 dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
214 netdev->dev_addr);
215
216 return 0;
217}
218
219static int qlcnic_set_mac(struct net_device *netdev, void *p)
220{
221 struct qlcnic_adapter *adapter = netdev_priv(netdev);
222 struct sockaddr *addr = p;
223
224 if ((adapter->flags & QLCNIC_MAC_OVERRIDE_DISABLED))
225 return -EOPNOTSUPP;
226
227 if (!is_valid_ether_addr(addr->sa_data))
228 return -EADDRNOTAVAIL;
229
230 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
231 netif_device_detach(netdev);
232 qlcnic_napi_disable(adapter);
233 }
234
235 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
236 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
237 qlcnic_set_multi(adapter->netdev);
238
239 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
240 netif_device_attach(netdev);
241 qlcnic_napi_enable(adapter);
242 }
243 return 0;
244}
245
246static const struct net_device_ops qlcnic_netdev_ops = {
247 .ndo_open = qlcnic_open,
248 .ndo_stop = qlcnic_close,
249 .ndo_start_xmit = qlcnic_xmit_frame,
250 .ndo_get_stats = qlcnic_get_stats,
251 .ndo_validate_addr = eth_validate_addr,
252 .ndo_set_rx_mode = qlcnic_set_multi,
253 .ndo_set_mac_address = qlcnic_set_mac,
254 .ndo_change_mtu = qlcnic_change_mtu,
255 .ndo_fix_features = qlcnic_fix_features,
256 .ndo_set_features = qlcnic_set_features,
257 .ndo_tx_timeout = qlcnic_tx_timeout,
258 .ndo_vlan_rx_add_vid = qlcnic_vlan_rx_add,
259 .ndo_vlan_rx_kill_vid = qlcnic_vlan_rx_del,
260#ifdef CONFIG_NET_POLL_CONTROLLER
261 .ndo_poll_controller = qlcnic_poll_controller,
262#endif
263};
264
265static const struct net_device_ops qlcnic_netdev_failed_ops = {
266 .ndo_open = qlcnic_open,
267};
268
269static struct qlcnic_nic_template qlcnic_ops = {
270 .config_bridged_mode = qlcnic_config_bridged_mode,
271 .config_led = qlcnic_config_led,
272 .start_firmware = qlcnic_start_firmware
273};
274
275static struct qlcnic_nic_template qlcnic_vf_ops = {
276 .config_bridged_mode = qlcnicvf_config_bridged_mode,
277 .config_led = qlcnicvf_config_led,
278 .start_firmware = qlcnicvf_start_firmware
279};
280
281static int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
282{
283 struct pci_dev *pdev = adapter->pdev;
284 int err = -1;
285
286 adapter->max_sds_rings = 1;
287 adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
288 qlcnic_set_msix_bit(pdev, 0);
289
290 if (adapter->ahw->msix_supported) {
291 enable_msix:
292 qlcnic_init_msix_entries(adapter, num_msix);
293 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
294 if (err == 0) {
295 adapter->flags |= QLCNIC_MSIX_ENABLED;
296 qlcnic_set_msix_bit(pdev, 1);
297
298 adapter->max_sds_rings = num_msix;
299
300 dev_info(&pdev->dev, "using msi-x interrupts\n");
301 return err;
302 }
303 if (err > 0) {
304 num_msix = rounddown_pow_of_two(err);
305 if (num_msix)
306 goto enable_msix;
307 }
308 }
309 return err;
310}
311
312static void qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
313{
314 u32 offset, mask_reg;
315 const struct qlcnic_legacy_intr_set *legacy_intrp;
316 struct qlcnic_hardware_context *ahw = adapter->ahw;
317 struct pci_dev *pdev = adapter->pdev;
318
319 if (qlcnic_use_msi && !pci_enable_msi(pdev)) {
320 adapter->flags |= QLCNIC_MSI_ENABLED;
321 offset = msi_tgt_status[adapter->ahw->pci_func];
322 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter->ahw,
323 offset);
324 dev_info(&pdev->dev, "using msi interrupts\n");
325 adapter->msix_entries[0].vector = pdev->irq;
326 return;
327 }
328
329 legacy_intrp = &legacy_intr[adapter->ahw->pci_func];
330 adapter->ahw->int_vec_bit = legacy_intrp->int_vec_bit;
331 offset = legacy_intrp->tgt_status_reg;
332 adapter->tgt_status_reg = qlcnic_get_ioaddr(ahw, offset);
333 mask_reg = legacy_intrp->tgt_mask_reg;
334 adapter->tgt_mask_reg = qlcnic_get_ioaddr(ahw, mask_reg);
335 adapter->isr_int_vec = qlcnic_get_ioaddr(ahw, ISR_INT_VECTOR);
336 adapter->crb_int_state_reg = qlcnic_get_ioaddr(ahw, ISR_INT_STATE_REG);
337 dev_info(&pdev->dev, "using legacy interrupts\n");
338 adapter->msix_entries[0].vector = pdev->irq;
339}
340
341static void
342qlcnic_setup_intr(struct qlcnic_adapter *adapter)
343{
344 int num_msix;
345
346 if (adapter->ahw->msix_supported) {
347 num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(),
348 QLCNIC_DEF_NUM_STS_DESC_RINGS));
349 } else
350 num_msix = 1;
351
352 if (!qlcnic_enable_msix(adapter, num_msix))
353 return;
354
355 qlcnic_enable_msi_legacy(adapter);
356}
357
358static void
359qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
360{
361 if (adapter->flags & QLCNIC_MSIX_ENABLED)
362 pci_disable_msix(adapter->pdev);
363 if (adapter->flags & QLCNIC_MSI_ENABLED)
364 pci_disable_msi(adapter->pdev);
365}
366
367static void
368qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
369{
370 if (adapter->ahw->pci_base0 != NULL)
371 iounmap(adapter->ahw->pci_base0);
372}
373
374static int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
375{
376 struct qlcnic_pci_info *pci_info;
377 int i, ret = 0, j = 0;
378 u16 act_pci_func;
379 u8 pfn;
380
381 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
382 if (!pci_info)
383 return -ENOMEM;
384
385 ret = qlcnic_get_pci_info(adapter, pci_info);
386 if (ret)
387 goto err_pci_info;
388
389 act_pci_func = adapter->ahw->act_pci_func;
390
391 adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
392 act_pci_func, GFP_KERNEL);
393 if (!adapter->npars) {
394 ret = -ENOMEM;
395 goto err_pci_info;
396 }
397
398 adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
399 QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
400 if (!adapter->eswitch) {
401 ret = -ENOMEM;
402 goto err_npars;
403 }
404
405 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
406 pfn = pci_info[i].id;
407
408 if (pfn >= QLCNIC_MAX_PCI_FUNC) {
409 ret = QL_STATUS_INVALID_PARAM;
410 goto err_eswitch;
411 }
412
413 if (!pci_info[i].active ||
414 (pci_info[i].type != QLCNIC_TYPE_NIC))
415 continue;
416
417 adapter->npars[j].pci_func = pfn;
418 adapter->npars[j].active = (u8)pci_info[i].active;
419 adapter->npars[j].type = (u8)pci_info[i].type;
420 adapter->npars[j].phy_port = (u8)pci_info[i].default_port;
421 adapter->npars[j].min_bw = pci_info[i].tx_min_bw;
422 adapter->npars[j].max_bw = pci_info[i].tx_max_bw;
423 j++;
424 }
425
426 for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
427 adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
428
429 kfree(pci_info);
430 return 0;
431
432err_eswitch:
433 kfree(adapter->eswitch);
434 adapter->eswitch = NULL;
435err_npars:
436 kfree(adapter->npars);
437 adapter->npars = NULL;
438err_pci_info:
439 kfree(pci_info);
440
441 return ret;
442}
443
444static int
445qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
446{
447 u8 id;
448 int i, ret = 1;
449 u32 data = QLCNIC_MGMT_FUNC;
450 struct qlcnic_hardware_context *ahw = adapter->ahw;
451
452 ret = qlcnic_api_lock(adapter);
453 if (ret)
454 goto err_lock;
455
456 if (qlcnic_config_npars) {
457 for (i = 0; i < ahw->act_pci_func; i++) {
458 id = adapter->npars[i].pci_func;
459 if (id == ahw->pci_func)
460 continue;
461 data |= (qlcnic_config_npars &
462 QLC_DEV_SET_DRV(0xf, id));
463 }
464 } else {
465 data = QLCRD32(adapter, QLCNIC_DRV_OP_MODE);
466 data = (data & ~QLC_DEV_SET_DRV(0xf, ahw->pci_func)) |
467 (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
468 ahw->pci_func));
469 }
470 QLCWR32(adapter, QLCNIC_DRV_OP_MODE, data);
471 qlcnic_api_unlock(adapter);
472err_lock:
473 return ret;
474}
475
476static void
477qlcnic_check_vf(struct qlcnic_adapter *adapter)
478{
479 void __iomem *msix_base_addr;
480 void __iomem *priv_op;
481 u32 func;
482 u32 msix_base;
483 u32 op_mode, priv_level;
484
485 /* Determine FW API version */
486 adapter->ahw->fw_hal_version = readl(adapter->ahw->pci_base0 +
487 QLCNIC_FW_API);
488
489 /* Find PCI function number */
490 pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
491 msix_base_addr = adapter->ahw->pci_base0 + QLCNIC_MSIX_BASE;
492 msix_base = readl(msix_base_addr);
493 func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
494 adapter->ahw->pci_func = func;
495
496 /* Determine function privilege level */
497 priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
498 op_mode = readl(priv_op);
499 if (op_mode == QLC_DEV_DRV_DEFAULT)
500 priv_level = QLCNIC_MGMT_FUNC;
501 else
502 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
503
504 if (priv_level == QLCNIC_NON_PRIV_FUNC) {
505 adapter->ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
506 dev_info(&adapter->pdev->dev,
507 "HAL Version: %d Non Privileged function\n",
508 adapter->ahw->fw_hal_version);
509 adapter->nic_ops = &qlcnic_vf_ops;
510 } else
511 adapter->nic_ops = &qlcnic_ops;
512}
513
514#define QLCNIC_82XX_BAR0_LENGTH 0x00200000UL
515static void qlcnic_get_bar_length(u32 dev_id, ulong *bar)
516{
517 switch (dev_id) {
518 case PCI_DEVICE_ID_QLOGIC_QLE824X:
519 *bar = QLCNIC_82XX_BAR0_LENGTH;
520 break;
521 default:
522 *bar = 0;
523 }
524}
525
526static int qlcnic_setup_pci_map(struct pci_dev *pdev,
527 struct qlcnic_hardware_context *ahw)
528{
529 u32 offset;
530 void __iomem *mem_ptr0 = NULL;
531 unsigned long mem_len, pci_len0 = 0, bar0_len;
532
533 /* remap phys address */
534 mem_len = pci_resource_len(pdev, 0);
535
536 qlcnic_get_bar_length(pdev->device, &bar0_len);
537 if (mem_len >= bar0_len) {
538
539 mem_ptr0 = pci_ioremap_bar(pdev, 0);
540 if (mem_ptr0 == NULL) {
541 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
542 return -EIO;
543 }
544 pci_len0 = mem_len;
545 } else {
546 return -EIO;
547 }
548
549 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
550 ahw->pci_base0 = mem_ptr0;
551 ahw->pci_len0 = pci_len0;
552 offset = QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(ahw->pci_func));
553 qlcnic_get_ioaddr(ahw, offset);
554
555 return 0;
556}
557
558static void qlcnic_get_board_name(struct qlcnic_adapter *adapter, char *name)
559{
560 struct pci_dev *pdev = adapter->pdev;
561 int i, found = 0;
562
563 for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
564 if (qlcnic_boards[i].vendor == pdev->vendor &&
565 qlcnic_boards[i].device == pdev->device &&
566 qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
567 qlcnic_boards[i].sub_device == pdev->subsystem_device) {
568 sprintf(name, "%pM: %s" ,
569 adapter->mac_addr,
570 qlcnic_boards[i].short_name);
571 found = 1;
572 break;
573 }
574
575 }
576
577 if (!found)
578 sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr);
579}
580
581static void
582qlcnic_check_options(struct qlcnic_adapter *adapter)
583{
584 u32 fw_major, fw_minor, fw_build, prev_fw_version;
585 struct pci_dev *pdev = adapter->pdev;
586 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
587
588 prev_fw_version = adapter->fw_version;
589
590 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
591 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
592 fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
593
594 adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
595
596 if (adapter->ahw->op_mode != QLCNIC_NON_PRIV_FUNC) {
597 if (fw_dump->tmpl_hdr == NULL ||
598 adapter->fw_version > prev_fw_version) {
599 if (fw_dump->tmpl_hdr)
600 vfree(fw_dump->tmpl_hdr);
601 if (!qlcnic_fw_cmd_get_minidump_temp(adapter))
602 dev_info(&pdev->dev,
603 "Supports FW dump capability\n");
604 }
605 }
606
607 dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
608 fw_major, fw_minor, fw_build);
609 if (adapter->ahw->port_type == QLCNIC_XGBE) {
610 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
611 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
612 adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF;
613 } else {
614 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
615 adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
616 }
617
618 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
619 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
620
621 } else if (adapter->ahw->port_type == QLCNIC_GBE) {
622 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
623 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
624 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
625 adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
626 }
627
628 adapter->ahw->msix_supported = !!qlcnic_use_msi_x;
629
630 adapter->num_txd = MAX_CMD_DESCRIPTORS;
631
632 adapter->max_rds_rings = MAX_RDS_RINGS;
633}
634
635static int
636qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
637{
638 int err;
639 struct qlcnic_info nic_info;
640
641 memset(&nic_info, 0, sizeof(struct qlcnic_info));
642 err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw->pci_func);
643 if (err)
644 return err;
645
646 adapter->ahw->physical_port = (u8)nic_info.phys_port;
647 adapter->ahw->switch_mode = nic_info.switch_mode;
648 adapter->ahw->max_tx_ques = nic_info.max_tx_ques;
649 adapter->ahw->max_rx_ques = nic_info.max_rx_ques;
650 adapter->ahw->capabilities = nic_info.capabilities;
651 adapter->ahw->max_mac_filters = nic_info.max_mac_filters;
652 adapter->ahw->max_mtu = nic_info.max_mtu;
653
654 if (adapter->ahw->capabilities & BIT_6)
655 adapter->flags |= QLCNIC_ESWITCH_ENABLED;
656 else
657 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
658
659 return err;
660}
661
662void qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
663 struct qlcnic_esw_func_cfg *esw_cfg)
664{
665 if (esw_cfg->discard_tagged)
666 adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
667 else
668 adapter->flags |= QLCNIC_TAGGING_ENABLED;
669
670 if (esw_cfg->vlan_id)
671 adapter->pvid = esw_cfg->vlan_id;
672 else
673 adapter->pvid = 0;
674}
675
676static int
677qlcnic_vlan_rx_add(struct net_device *netdev, u16 vid)
678{
679 struct qlcnic_adapter *adapter = netdev_priv(netdev);
680 set_bit(vid, adapter->vlans);
681 return 0;
682}
683
684static int
685qlcnic_vlan_rx_del(struct net_device *netdev, u16 vid)
686{
687 struct qlcnic_adapter *adapter = netdev_priv(netdev);
688
689 qlcnic_restore_indev_addr(netdev, NETDEV_DOWN);
690 clear_bit(vid, adapter->vlans);
691 return 0;
692}
693
694void qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
695 struct qlcnic_esw_func_cfg *esw_cfg)
696{
697 adapter->flags &= ~(QLCNIC_MACSPOOF | QLCNIC_MAC_OVERRIDE_DISABLED |
698 QLCNIC_PROMISC_DISABLED);
699
700 if (esw_cfg->mac_anti_spoof)
701 adapter->flags |= QLCNIC_MACSPOOF;
702
703 if (!esw_cfg->mac_override)
704 adapter->flags |= QLCNIC_MAC_OVERRIDE_DISABLED;
705
706 if (!esw_cfg->promisc_mode)
707 adapter->flags |= QLCNIC_PROMISC_DISABLED;
708
709 qlcnic_set_netdev_features(adapter, esw_cfg);
710}
711
712static int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
713{
714 struct qlcnic_esw_func_cfg esw_cfg;
715
716 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
717 return 0;
718
719 esw_cfg.pci_func = adapter->ahw->pci_func;
720 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg))
721 return -EIO;
722 qlcnic_set_vlan_config(adapter, &esw_cfg);
723 qlcnic_set_eswitch_port_features(adapter, &esw_cfg);
724
725 return 0;
726}
727
728static void
729qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
730 struct qlcnic_esw_func_cfg *esw_cfg)
731{
732 struct net_device *netdev = adapter->netdev;
733 netdev_features_t features, vlan_features;
734
735 features = (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
736 NETIF_F_IPV6_CSUM | NETIF_F_GRO);
737 vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM |
738 NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_FILTER);
739
740 if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
741 features |= (NETIF_F_TSO | NETIF_F_TSO6);
742 vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
743 }
744
745 if (netdev->features & NETIF_F_LRO)
746 features |= NETIF_F_LRO;
747
748 if (esw_cfg->offload_flags & BIT_0) {
749 netdev->features |= features;
750 if (!(esw_cfg->offload_flags & BIT_1))
751 netdev->features &= ~NETIF_F_TSO;
752 if (!(esw_cfg->offload_flags & BIT_2))
753 netdev->features &= ~NETIF_F_TSO6;
754 } else {
755 netdev->features &= ~features;
756 }
757
758 netdev->vlan_features = (features & vlan_features);
759}
760
761static int
762qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
763{
764 void __iomem *priv_op;
765 u32 op_mode, priv_level;
766 int err = 0;
767
768 err = qlcnic_initialize_nic(adapter);
769 if (err)
770 return err;
771
772 if (adapter->flags & QLCNIC_ADAPTER_INITIALIZED)
773 return 0;
774
775 priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
776 op_mode = readl(priv_op);
777 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
778
779 if (op_mode == QLC_DEV_DRV_DEFAULT)
780 priv_level = QLCNIC_MGMT_FUNC;
781 else
782 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
783
784 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
785 if (priv_level == QLCNIC_MGMT_FUNC) {
786 adapter->ahw->op_mode = QLCNIC_MGMT_FUNC;
787 err = qlcnic_init_pci_info(adapter);
788 if (err)
789 return err;
790 /* Set privilege level for other functions */
791 qlcnic_set_function_modes(adapter);
792 dev_info(&adapter->pdev->dev,
793 "HAL Version: %d, Management function\n",
794 adapter->ahw->fw_hal_version);
795 } else if (priv_level == QLCNIC_PRIV_FUNC) {
796 adapter->ahw->op_mode = QLCNIC_PRIV_FUNC;
797 dev_info(&adapter->pdev->dev,
798 "HAL Version: %d, Privileged function\n",
799 adapter->ahw->fw_hal_version);
800 }
801 }
802
803 adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
804
805 return err;
806}
807
808static int qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
809{
810 struct qlcnic_esw_func_cfg esw_cfg;
811 struct qlcnic_npar_info *npar;
812 u8 i;
813
814 if (adapter->need_fw_reset)
815 return 0;
816
817 for (i = 0; i < adapter->ahw->act_pci_func; i++) {
818 memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg));
819 esw_cfg.pci_func = adapter->npars[i].pci_func;
820 esw_cfg.mac_override = BIT_0;
821 esw_cfg.promisc_mode = BIT_0;
822 if (qlcnic_82xx_check(adapter)) {
823 esw_cfg.offload_flags = BIT_0;
824 if (QLCNIC_IS_TSO_CAPABLE(adapter))
825 esw_cfg.offload_flags |= (BIT_1 | BIT_2);
826 }
827 if (qlcnic_config_switch_port(adapter, &esw_cfg))
828 return -EIO;
829 npar = &adapter->npars[i];
830 npar->pvid = esw_cfg.vlan_id;
831 npar->mac_override = esw_cfg.mac_override;
832 npar->mac_anti_spoof = esw_cfg.mac_anti_spoof;
833 npar->discard_tagged = esw_cfg.discard_tagged;
834 npar->promisc_mode = esw_cfg.promisc_mode;
835 npar->offload_flags = esw_cfg.offload_flags;
836 }
837
838 return 0;
839}
840
841static int
842qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter,
843 struct qlcnic_npar_info *npar, int pci_func)
844{
845 struct qlcnic_esw_func_cfg esw_cfg;
846 esw_cfg.op_mode = QLCNIC_PORT_DEFAULTS;
847 esw_cfg.pci_func = pci_func;
848 esw_cfg.vlan_id = npar->pvid;
849 esw_cfg.mac_override = npar->mac_override;
850 esw_cfg.discard_tagged = npar->discard_tagged;
851 esw_cfg.mac_anti_spoof = npar->mac_anti_spoof;
852 esw_cfg.offload_flags = npar->offload_flags;
853 esw_cfg.promisc_mode = npar->promisc_mode;
854 if (qlcnic_config_switch_port(adapter, &esw_cfg))
855 return -EIO;
856
857 esw_cfg.op_mode = QLCNIC_ADD_VLAN;
858 if (qlcnic_config_switch_port(adapter, &esw_cfg))
859 return -EIO;
860
861 return 0;
862}
863
864static int qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
865{
866 int i, err;
867 struct qlcnic_npar_info *npar;
868 struct qlcnic_info nic_info;
869 u8 pci_func;
870
871 if (qlcnic_82xx_check(adapter))
872 if (!adapter->need_fw_reset)
873 return 0;
874
875 /* Set the NPAR config data after FW reset */
876 for (i = 0; i < adapter->ahw->act_pci_func; i++) {
877 npar = &adapter->npars[i];
878 pci_func = npar->pci_func;
879 memset(&nic_info, 0, sizeof(struct qlcnic_info));
880 err = qlcnic_get_nic_info(adapter,
881 &nic_info, pci_func);
882 if (err)
883 return err;
884 nic_info.min_tx_bw = npar->min_bw;
885 nic_info.max_tx_bw = npar->max_bw;
886 err = qlcnic_set_nic_info(adapter, &nic_info);
887 if (err)
888 return err;
889
890 if (npar->enable_pm) {
891 err = qlcnic_config_port_mirroring(adapter,
892 npar->dest_npar, 1,
893 pci_func);
894 if (err)
895 return err;
896 }
897 err = qlcnic_reset_eswitch_config(adapter, npar, pci_func);
898 if (err)
899 return err;
900 }
901 return 0;
902}
903
904static int qlcnic_check_npar_opertional(struct qlcnic_adapter *adapter)
905{
906 u8 npar_opt_timeo = QLCNIC_DEV_NPAR_OPER_TIMEO;
907 u32 npar_state;
908
909 if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
910 return 0;
911
912 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
913 while (npar_state != QLCNIC_DEV_NPAR_OPER && --npar_opt_timeo) {
914 msleep(1000);
915 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
916 }
917 if (!npar_opt_timeo) {
918 dev_err(&adapter->pdev->dev,
919 "Waiting for NPAR state to opertional timeout\n");
920 return -EIO;
921 }
922 return 0;
923}
924
925static int
926qlcnic_set_mgmt_operations(struct qlcnic_adapter *adapter)
927{
928 int err;
929
930 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
931 adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
932 return 0;
933
934 err = qlcnic_set_default_offload_settings(adapter);
935 if (err)
936 return err;
937
938 err = qlcnic_reset_npar_config(adapter);
939 if (err)
940 return err;
941
942 qlcnic_dev_set_npar_ready(adapter);
943
944 return err;
945}
946
947static int
948qlcnic_start_firmware(struct qlcnic_adapter *adapter)
949{
950 int err;
951
952 err = qlcnic_can_start_firmware(adapter);
953 if (err < 0)
954 return err;
955 else if (!err)
956 goto check_fw_status;
957
958 if (qlcnic_load_fw_file)
959 qlcnic_request_firmware(adapter);
960 else {
961 err = qlcnic_check_flash_fw_ver(adapter);
962 if (err)
963 goto err_out;
964
965 adapter->ahw->fw_type = QLCNIC_FLASH_ROMIMAGE;
966 }
967
968 err = qlcnic_need_fw_reset(adapter);
969 if (err == 0)
970 goto check_fw_status;
971
972 err = qlcnic_pinit_from_rom(adapter);
973 if (err)
974 goto err_out;
975
976 err = qlcnic_load_firmware(adapter);
977 if (err)
978 goto err_out;
979
980 qlcnic_release_firmware(adapter);
981 QLCWR32(adapter, CRB_DRIVER_VERSION, QLCNIC_DRIVER_VERSION);
982
983check_fw_status:
984 err = qlcnic_check_fw_status(adapter);
985 if (err)
986 goto err_out;
987
988 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
989 qlcnic_idc_debug_info(adapter, 1);
990
991 err = qlcnic_check_eswitch_mode(adapter);
992 if (err) {
993 dev_err(&adapter->pdev->dev,
994 "Memory allocation failed for eswitch\n");
995 goto err_out;
996 }
997 err = qlcnic_set_mgmt_operations(adapter);
998 if (err)
999 goto err_out;
1000
1001 qlcnic_check_options(adapter);
1002 adapter->need_fw_reset = 0;
1003
1004 qlcnic_release_firmware(adapter);
1005 return 0;
1006
1007err_out:
1008 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
1009 dev_err(&adapter->pdev->dev, "Device state set to failed\n");
1010
1011 qlcnic_release_firmware(adapter);
1012 return err;
1013}
1014
1015static int
1016qlcnic_request_irq(struct qlcnic_adapter *adapter)
1017{
1018 irq_handler_t handler;
1019 struct qlcnic_host_sds_ring *sds_ring;
1020 int err, ring;
1021
1022 unsigned long flags = 0;
1023 struct net_device *netdev = adapter->netdev;
1024 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1025
1026 if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
1027 handler = qlcnic_tmp_intr;
1028 if (!QLCNIC_IS_MSI_FAMILY(adapter))
1029 flags |= IRQF_SHARED;
1030
1031 } else {
1032 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1033 handler = qlcnic_msix_intr;
1034 else if (adapter->flags & QLCNIC_MSI_ENABLED)
1035 handler = qlcnic_msi_intr;
1036 else {
1037 flags |= IRQF_SHARED;
1038 handler = qlcnic_intr;
1039 }
1040 }
1041 adapter->irq = netdev->irq;
1042
1043 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1044 sds_ring = &recv_ctx->sds_rings[ring];
1045 sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
1046 err = request_irq(sds_ring->irq, handler,
1047 flags, sds_ring->name, sds_ring);
1048 if (err)
1049 return err;
1050 }
1051
1052 return 0;
1053}
1054
1055static void
1056qlcnic_free_irq(struct qlcnic_adapter *adapter)
1057{
1058 int ring;
1059 struct qlcnic_host_sds_ring *sds_ring;
1060
1061 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1062
1063 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1064 sds_ring = &recv_ctx->sds_rings[ring];
1065 free_irq(sds_ring->irq, sds_ring);
1066 }
1067}
1068
1069static int
1070__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1071{
1072 int ring;
1073 u32 capab2;
1074
1075 struct qlcnic_host_rds_ring *rds_ring;
1076
1077 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1078 return -EIO;
1079
1080 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
1081 return 0;
1082 if (qlcnic_set_eswitch_port_config(adapter))
1083 return -EIO;
1084
1085 if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) {
1086 capab2 = QLCRD32(adapter, CRB_FW_CAPABILITIES_2);
1087 if (capab2 & QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG)
1088 adapter->flags |= QLCNIC_FW_LRO_MSS_CAP;
1089 }
1090
1091 if (qlcnic_fw_create_ctx(adapter))
1092 return -EIO;
1093
1094 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1095 rds_ring = &adapter->recv_ctx->rds_rings[ring];
1096 qlcnic_post_rx_buffers(adapter, rds_ring);
1097 }
1098
1099 qlcnic_set_multi(netdev);
1100 qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
1101
1102 adapter->ahw->linkup = 0;
1103
1104 if (adapter->max_sds_rings > 1)
1105 qlcnic_config_rss(adapter, 1);
1106
1107 qlcnic_config_intr_coalesce(adapter);
1108
1109 if (netdev->features & NETIF_F_LRO)
1110 qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
1111
1112 qlcnic_napi_enable(adapter);
1113
1114 qlcnic_linkevent_request(adapter, 1);
1115
1116 adapter->ahw->reset_context = 0;
1117 set_bit(__QLCNIC_DEV_UP, &adapter->state);
1118 return 0;
1119}
1120
1121/* Usage: During resume and firmware recovery module.*/
1122
1123static int
1124qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1125{
1126 int err = 0;
1127
1128 rtnl_lock();
1129 if (netif_running(netdev))
1130 err = __qlcnic_up(adapter, netdev);
1131 rtnl_unlock();
1132
1133 return err;
1134}
1135
1136static void
1137__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1138{
1139 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1140 return;
1141
1142 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
1143 return;
1144
1145 smp_mb();
1146 spin_lock(&adapter->tx_clean_lock);
1147 netif_carrier_off(netdev);
1148 netif_tx_disable(netdev);
1149
1150 qlcnic_free_mac_list(adapter);
1151
1152 if (adapter->fhash.fnum)
1153 qlcnic_delete_lb_filters(adapter);
1154
1155 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
1156
1157 qlcnic_napi_disable(adapter);
1158
1159 qlcnic_fw_destroy_ctx(adapter);
1160 adapter->flags &= ~QLCNIC_FW_LRO_MSS_CAP;
1161
1162 qlcnic_reset_rx_buffers_list(adapter);
1163 qlcnic_release_tx_buffers(adapter);
1164 spin_unlock(&adapter->tx_clean_lock);
1165}
1166
1167/* Usage: During suspend and firmware recovery module */
1168
1169static void
1170qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1171{
1172 rtnl_lock();
1173 if (netif_running(netdev))
1174 __qlcnic_down(adapter, netdev);
1175 rtnl_unlock();
1176
1177}
1178
1179static int
1180qlcnic_attach(struct qlcnic_adapter *adapter)
1181{
1182 struct net_device *netdev = adapter->netdev;
1183 struct pci_dev *pdev = adapter->pdev;
1184 int err;
1185
1186 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
1187 return 0;
1188
1189 err = qlcnic_napi_add(adapter, netdev);
1190 if (err)
1191 return err;
1192
1193 err = qlcnic_alloc_sw_resources(adapter);
1194 if (err) {
1195 dev_err(&pdev->dev, "Error in setting sw resources\n");
1196 goto err_out_napi_del;
1197 }
1198
1199 err = qlcnic_alloc_hw_resources(adapter);
1200 if (err) {
1201 dev_err(&pdev->dev, "Error in setting hw resources\n");
1202 goto err_out_free_sw;
1203 }
1204
1205 err = qlcnic_request_irq(adapter);
1206 if (err) {
1207 dev_err(&pdev->dev, "failed to setup interrupt\n");
1208 goto err_out_free_hw;
1209 }
1210
1211 qlcnic_create_sysfs_entries(adapter);
1212
1213 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
1214 return 0;
1215
1216err_out_free_hw:
1217 qlcnic_free_hw_resources(adapter);
1218err_out_free_sw:
1219 qlcnic_free_sw_resources(adapter);
1220err_out_napi_del:
1221 qlcnic_napi_del(adapter);
1222 return err;
1223}
1224
1225static void
1226qlcnic_detach(struct qlcnic_adapter *adapter)
1227{
1228 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1229 return;
1230
1231 qlcnic_remove_sysfs_entries(adapter);
1232
1233 qlcnic_free_hw_resources(adapter);
1234 qlcnic_release_rx_buffers(adapter);
1235 qlcnic_free_irq(adapter);
1236 qlcnic_napi_del(adapter);
1237 qlcnic_free_sw_resources(adapter);
1238
1239 adapter->is_up = 0;
1240}
1241
1242void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
1243{
1244 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1245 struct qlcnic_host_sds_ring *sds_ring;
1246 int ring;
1247
1248 clear_bit(__QLCNIC_DEV_UP, &adapter->state);
1249 if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
1250 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1251 sds_ring = &adapter->recv_ctx->sds_rings[ring];
1252 qlcnic_disable_int(sds_ring);
1253 }
1254 }
1255
1256 qlcnic_fw_destroy_ctx(adapter);
1257
1258 qlcnic_detach(adapter);
1259
1260 adapter->ahw->diag_test = 0;
1261 adapter->max_sds_rings = max_sds_rings;
1262
1263 if (qlcnic_attach(adapter))
1264 goto out;
1265
1266 if (netif_running(netdev))
1267 __qlcnic_up(adapter, netdev);
1268out:
1269 netif_device_attach(netdev);
1270}
1271
1272static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
1273{
1274 int err = 0;
1275 adapter->ahw = kzalloc(sizeof(struct qlcnic_hardware_context),
1276 GFP_KERNEL);
1277 if (!adapter->ahw) {
1278 dev_err(&adapter->pdev->dev,
1279 "Failed to allocate recv ctx resources for adapter\n");
1280 err = -ENOMEM;
1281 goto err_out;
1282 }
1283 adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context),
1284 GFP_KERNEL);
1285 if (!adapter->recv_ctx) {
1286 dev_err(&adapter->pdev->dev,
1287 "Failed to allocate recv ctx resources for adapter\n");
1288 kfree(adapter->ahw);
1289 adapter->ahw = NULL;
1290 err = -ENOMEM;
1291 goto err_out;
1292 }
1293 /* Initialize interrupt coalesce parameters */
1294 adapter->ahw->coal.flag = QLCNIC_INTR_DEFAULT;
1295 adapter->ahw->coal.rx_time_us = QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
1296 adapter->ahw->coal.rx_packets = QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
1297err_out:
1298 return err;
1299}
1300
1301static void qlcnic_free_adapter_resources(struct qlcnic_adapter *adapter)
1302{
1303 kfree(adapter->recv_ctx);
1304 adapter->recv_ctx = NULL;
1305
1306 if (adapter->ahw->fw_dump.tmpl_hdr) {
1307 vfree(adapter->ahw->fw_dump.tmpl_hdr);
1308 adapter->ahw->fw_dump.tmpl_hdr = NULL;
1309 }
1310 kfree(adapter->ahw);
1311 adapter->ahw = NULL;
1312}
1313
1314int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
1315{
1316 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1317 struct qlcnic_host_sds_ring *sds_ring;
1318 struct qlcnic_host_rds_ring *rds_ring;
1319 int ring;
1320 int ret;
1321
1322 netif_device_detach(netdev);
1323
1324 if (netif_running(netdev))
1325 __qlcnic_down(adapter, netdev);
1326
1327 qlcnic_detach(adapter);
1328
1329 adapter->max_sds_rings = 1;
1330 adapter->ahw->diag_test = test;
1331
1332 ret = qlcnic_attach(adapter);
1333 if (ret) {
1334 netif_device_attach(netdev);
1335 return ret;
1336 }
1337
1338 ret = qlcnic_fw_create_ctx(adapter);
1339 if (ret) {
1340 qlcnic_detach(adapter);
1341 netif_device_attach(netdev);
1342 return ret;
1343 }
1344
1345 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1346 rds_ring = &adapter->recv_ctx->rds_rings[ring];
1347 qlcnic_post_rx_buffers(adapter, rds_ring);
1348 }
1349
1350 if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
1351 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1352 sds_ring = &adapter->recv_ctx->sds_rings[ring];
1353 qlcnic_enable_int(sds_ring);
1354 }
1355 }
1356
1357 if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
1358 adapter->ahw->loopback_state = 0;
1359 qlcnic_linkevent_request(adapter, 1);
1360 }
1361
1362 set_bit(__QLCNIC_DEV_UP, &adapter->state);
1363
1364 return 0;
1365}
1366
1367/* Reset context in hardware only */
1368static int
1369qlcnic_reset_hw_context(struct qlcnic_adapter *adapter)
1370{
1371 struct net_device *netdev = adapter->netdev;
1372
1373 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1374 return -EBUSY;
1375
1376 netif_device_detach(netdev);
1377
1378 qlcnic_down(adapter, netdev);
1379
1380 qlcnic_up(adapter, netdev);
1381
1382 netif_device_attach(netdev);
1383
1384 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1385 return 0;
1386}
1387
1388int
1389qlcnic_reset_context(struct qlcnic_adapter *adapter)
1390{
1391 int err = 0;
1392 struct net_device *netdev = adapter->netdev;
1393
1394 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1395 return -EBUSY;
1396
1397 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
1398
1399 netif_device_detach(netdev);
1400
1401 if (netif_running(netdev))
1402 __qlcnic_down(adapter, netdev);
1403
1404 qlcnic_detach(adapter);
1405
1406 if (netif_running(netdev)) {
1407 err = qlcnic_attach(adapter);
1408 if (!err) {
1409 __qlcnic_up(adapter, netdev);
1410 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
1411 }
1412 }
1413
1414 netif_device_attach(netdev);
1415 }
1416
1417 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1418 return err;
1419}
1420
1421static int
1422qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
1423 int pci_using_dac)
1424{
1425 int err;
1426 struct pci_dev *pdev = adapter->pdev;
1427
1428 adapter->ahw->mc_enabled = 0;
1429 adapter->ahw->max_mc_count = 38;
1430
1431 netdev->netdev_ops = &qlcnic_netdev_ops;
1432 netdev->watchdog_timeo = 5*HZ;
1433
1434 qlcnic_change_mtu(netdev, netdev->mtu);
1435
1436 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
1437
1438 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
1439 NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
1440
1441 if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
1442 netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
1443 if (pci_using_dac == 1)
1444 netdev->hw_features |= NETIF_F_HIGHDMA;
1445
1446 netdev->vlan_features = netdev->hw_features;
1447
1448 if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
1449 netdev->hw_features |= NETIF_F_HW_VLAN_TX;
1450 if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
1451 netdev->hw_features |= NETIF_F_LRO;
1452
1453 netdev->features |= netdev->hw_features |
1454 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
1455
1456 netdev->irq = adapter->msix_entries[0].vector;
1457
1458 err = register_netdev(netdev);
1459 if (err) {
1460 dev_err(&pdev->dev, "failed to register net device\n");
1461 return err;
1462 }
1463
1464 return 0;
1465}
1466
1467static int qlcnic_set_dma_mask(struct pci_dev *pdev, int *pci_using_dac)
1468{
1469 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1470 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
1471 *pci_using_dac = 1;
1472 else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
1473 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
1474 *pci_using_dac = 0;
1475 else {
1476 dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n");
1477 return -EIO;
1478 }
1479
1480 return 0;
1481}
1482
1483static int
1484qlcnic_alloc_msix_entries(struct qlcnic_adapter *adapter, u16 count)
1485{
1486 adapter->msix_entries = kcalloc(count, sizeof(struct msix_entry),
1487 GFP_KERNEL);
1488
1489 if (adapter->msix_entries)
1490 return 0;
1491
1492 dev_err(&adapter->pdev->dev, "failed allocating msix_entries\n");
1493 return -ENOMEM;
1494}
1495
1496static int
1497qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1498{
1499 struct net_device *netdev = NULL;
1500 struct qlcnic_adapter *adapter = NULL;
1501 int err, pci_using_dac = -1;
1502 uint8_t revision_id;
1503 char board_name[QLCNIC_MAX_BOARD_NAME_LEN];
1504
1505 err = pci_enable_device(pdev);
1506 if (err)
1507 return err;
1508
1509 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1510 err = -ENODEV;
1511 goto err_out_disable_pdev;
1512 }
1513
1514 err = qlcnic_set_dma_mask(pdev, &pci_using_dac);
1515 if (err)
1516 goto err_out_disable_pdev;
1517
1518 err = pci_request_regions(pdev, qlcnic_driver_name);
1519 if (err)
1520 goto err_out_disable_pdev;
1521
1522 pci_set_master(pdev);
1523 pci_enable_pcie_error_reporting(pdev);
1524
1525 netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
1526 if (!netdev) {
1527 err = -ENOMEM;
1528 goto err_out_free_res;
1529 }
1530
1531 SET_NETDEV_DEV(netdev, &pdev->dev);
1532
1533 adapter = netdev_priv(netdev);
1534 adapter->netdev = netdev;
1535 adapter->pdev = pdev;
1536
1537 err = qlcnic_alloc_adapter_resources(adapter);
1538 if (err)
1539 goto err_out_free_netdev;
1540
1541 adapter->dev_rst_time = jiffies;
1542 revision_id = pdev->revision;
1543 adapter->ahw->revision_id = revision_id;
1544 adapter->mac_learn = qlcnic_mac_learn;
1545
1546 rwlock_init(&adapter->ahw->crb_lock);
1547 mutex_init(&adapter->ahw->mem_lock);
1548
1549 spin_lock_init(&adapter->tx_clean_lock);
1550 INIT_LIST_HEAD(&adapter->mac_list);
1551
1552 err = qlcnic_setup_pci_map(pdev, adapter->ahw);
1553 if (err)
1554 goto err_out_free_hw;
1555 qlcnic_check_vf(adapter);
1556
1557 /* This will be reset for mezz cards */
1558 adapter->portnum = adapter->ahw->pci_func;
1559
1560 err = qlcnic_get_board_info(adapter);
1561 if (err) {
1562 dev_err(&pdev->dev, "Error getting board config info.\n");
1563 goto err_out_iounmap;
1564 }
1565
1566 err = qlcnic_setup_idc_param(adapter);
1567 if (err)
1568 goto err_out_iounmap;
1569
1570 adapter->flags |= QLCNIC_NEED_FLR;
1571
1572 err = adapter->nic_ops->start_firmware(adapter);
1573 if (err) {
1574 dev_err(&pdev->dev, "Loading fw failed. Please Reboot\n"
1575 "\t\tIf reboot doesn't help, try flashing the card\n");
1576 goto err_out_maintenance_mode;
1577 }
1578
1579 if (qlcnic_read_mac_addr(adapter))
1580 dev_warn(&pdev->dev, "failed to read mac addr\n");
1581
1582 if (adapter->portnum == 0) {
1583 qlcnic_get_board_name(adapter, board_name);
1584 pr_info("%s: %s Board Chip rev 0x%x\n",
1585 module_name(THIS_MODULE),
1586 board_name, adapter->ahw->revision_id);
1587 }
1588
1589 qlcnic_clear_stats(adapter);
1590
1591 err = qlcnic_alloc_msix_entries(adapter, adapter->ahw->max_rx_ques);
1592 if (err)
1593 goto err_out_decr_ref;
1594
1595 qlcnic_setup_intr(adapter);
1596
1597 err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
1598 if (err)
1599 goto err_out_disable_msi;
1600
1601 pci_set_drvdata(pdev, adapter);
1602
1603 if (qlcnic_82xx_check(adapter))
1604 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
1605 FW_POLL_DELAY);
1606
1607 switch (adapter->ahw->port_type) {
1608 case QLCNIC_GBE:
1609 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
1610 adapter->netdev->name);
1611 break;
1612 case QLCNIC_XGBE:
1613 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
1614 adapter->netdev->name);
1615 break;
1616 }
1617
1618 if (adapter->mac_learn)
1619 qlcnic_alloc_lb_filters_mem(adapter);
1620
1621 qlcnic_create_diag_entries(adapter);
1622
1623 return 0;
1624
1625err_out_disable_msi:
1626 qlcnic_teardown_intr(adapter);
1627 kfree(adapter->msix_entries);
1628
1629err_out_decr_ref:
1630 qlcnic_clr_all_drv_state(adapter, 0);
1631
1632err_out_iounmap:
1633 qlcnic_cleanup_pci_map(adapter);
1634
1635err_out_free_hw:
1636 qlcnic_free_adapter_resources(adapter);
1637
1638err_out_free_netdev:
1639 free_netdev(netdev);
1640
1641err_out_free_res:
1642 pci_release_regions(pdev);
1643
1644err_out_disable_pdev:
1645 pci_set_drvdata(pdev, NULL);
1646 pci_disable_device(pdev);
1647 return err;
1648
1649err_out_maintenance_mode:
1650 netdev->netdev_ops = &qlcnic_netdev_failed_ops;
1651 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_failed_ops);
1652 err = register_netdev(netdev);
1653 if (err) {
1654 dev_err(&pdev->dev, "failed to register net device\n");
1655 goto err_out_decr_ref;
1656 }
1657 pci_set_drvdata(pdev, adapter);
1658 qlcnic_create_diag_entries(adapter);
1659 return 0;
1660}
1661
1662static void qlcnic_remove(struct pci_dev *pdev)
1663{
1664 struct qlcnic_adapter *adapter;
1665 struct net_device *netdev;
1666
1667 adapter = pci_get_drvdata(pdev);
1668 if (adapter == NULL)
1669 return;
1670
1671 netdev = adapter->netdev;
1672
1673 qlcnic_cancel_fw_work(adapter);
1674
1675 unregister_netdev(netdev);
1676
1677 qlcnic_detach(adapter);
1678
1679 if (adapter->npars != NULL)
1680 kfree(adapter->npars);
1681 if (adapter->eswitch != NULL)
1682 kfree(adapter->eswitch);
1683
1684 if (qlcnic_82xx_check(adapter))
1685 qlcnic_clr_all_drv_state(adapter, 0);
1686
1687 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1688
1689 qlcnic_free_lb_filters_mem(adapter);
1690
1691 qlcnic_teardown_intr(adapter);
1692 kfree(adapter->msix_entries);
1693
1694 qlcnic_remove_diag_entries(adapter);
1695
1696 qlcnic_cleanup_pci_map(adapter);
1697
1698 qlcnic_release_firmware(adapter);
1699
1700 pci_disable_pcie_error_reporting(pdev);
1701 pci_release_regions(pdev);
1702 pci_disable_device(pdev);
1703 pci_set_drvdata(pdev, NULL);
1704
1705 qlcnic_free_adapter_resources(adapter);
1706 free_netdev(netdev);
1707}
1708static int __qlcnic_shutdown(struct pci_dev *pdev)
1709{
1710 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1711 struct net_device *netdev = adapter->netdev;
1712 int retval;
1713
1714 netif_device_detach(netdev);
1715
1716 qlcnic_cancel_fw_work(adapter);
1717
1718 if (netif_running(netdev))
1719 qlcnic_down(adapter, netdev);
1720
1721 if (qlcnic_82xx_check(adapter))
1722 qlcnic_clr_all_drv_state(adapter, 0);
1723
1724 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1725
1726 retval = pci_save_state(pdev);
1727 if (retval)
1728 return retval;
1729
1730 if (qlcnic_82xx_check(adapter)) {
1731 if (qlcnic_wol_supported(adapter)) {
1732 pci_enable_wake(pdev, PCI_D3cold, 1);
1733 pci_enable_wake(pdev, PCI_D3hot, 1);
1734 }
1735 }
1736
1737 return 0;
1738}
1739
1740static void qlcnic_shutdown(struct pci_dev *pdev)
1741{
1742 if (__qlcnic_shutdown(pdev))
1743 return;
1744
1745 pci_disable_device(pdev);
1746}
1747
1748#ifdef CONFIG_PM
1749static int
1750qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
1751{
1752 int retval;
1753
1754 retval = __qlcnic_shutdown(pdev);
1755 if (retval)
1756 return retval;
1757
1758 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1759 return 0;
1760}
1761
1762static int
1763qlcnic_resume(struct pci_dev *pdev)
1764{
1765 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1766 struct net_device *netdev = adapter->netdev;
1767 int err;
1768
1769 err = pci_enable_device(pdev);
1770 if (err)
1771 return err;
1772
1773 pci_set_power_state(pdev, PCI_D0);
1774 pci_set_master(pdev);
1775 pci_restore_state(pdev);
1776
1777 err = adapter->nic_ops->start_firmware(adapter);
1778 if (err) {
1779 dev_err(&pdev->dev, "failed to start firmware\n");
1780 return err;
1781 }
1782
1783 if (netif_running(netdev)) {
1784 err = qlcnic_up(adapter, netdev);
1785 if (err)
1786 goto done;
1787
1788 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
1789 }
1790done:
1791 netif_device_attach(netdev);
1792 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1793 return 0;
1794}
1795#endif
1796
1797static int qlcnic_open(struct net_device *netdev)
1798{
1799 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1800 u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
1801 int err;
1802
1803 if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD)) {
1804 netdev_err(netdev, "Device in FAILED state\n");
1805 return -EIO;
1806 }
1807
1808 netif_carrier_off(netdev);
1809
1810 err = qlcnic_attach(adapter);
1811 if (err)
1812 return err;
1813
1814 err = __qlcnic_up(adapter, netdev);
1815 if (err)
1816 goto err_out;
1817
1818 netif_start_queue(netdev);
1819
1820 return 0;
1821
1822err_out:
1823 qlcnic_detach(adapter);
1824 return err;
1825}
1826
1827/*
1828 * qlcnic_close - Disables a network interface entry point
1829 */
1830static int qlcnic_close(struct net_device *netdev)
1831{
1832 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1833
1834 __qlcnic_down(adapter, netdev);
1835 return 0;
1836}
1837
1838void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
1839{
1840 void *head;
1841 int i;
1842
1843 if (adapter->fhash.fmax && adapter->fhash.fhead)
1844 return;
1845
1846 spin_lock_init(&adapter->mac_learn_lock);
1847
1848 head = kcalloc(QLCNIC_LB_MAX_FILTERS, sizeof(struct hlist_head),
1849 GFP_KERNEL);
1850 if (!head)
1851 return;
1852
1853 adapter->fhash.fmax = QLCNIC_LB_MAX_FILTERS;
1854 adapter->fhash.fhead = head;
1855
1856 for (i = 0; i < adapter->fhash.fmax; i++)
1857 INIT_HLIST_HEAD(&adapter->fhash.fhead[i]);
1858}
1859
1860static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter)
1861{
1862 if (adapter->fhash.fmax && adapter->fhash.fhead)
1863 kfree(adapter->fhash.fhead);
1864
1865 adapter->fhash.fhead = NULL;
1866 adapter->fhash.fmax = 0;
1867}
1868
1869static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
1870{
1871 struct net_device *netdev = adapter->netdev;
1872 u32 temp_state, temp_val, temp = 0;
1873 int rv = 0;
1874
1875 if (qlcnic_82xx_check(adapter))
1876 temp = QLCRD32(adapter, CRB_TEMP_STATE);
1877
1878 temp_state = qlcnic_get_temp_state(temp);
1879 temp_val = qlcnic_get_temp_val(temp);
1880
1881 if (temp_state == QLCNIC_TEMP_PANIC) {
1882 dev_err(&netdev->dev,
1883 "Device temperature %d degrees C exceeds"
1884 " maximum allowed. Hardware has been shut down.\n",
1885 temp_val);
1886 rv = 1;
1887 } else if (temp_state == QLCNIC_TEMP_WARN) {
1888 if (adapter->ahw->temp == QLCNIC_TEMP_NORMAL) {
1889 dev_err(&netdev->dev,
1890 "Device temperature %d degrees C "
1891 "exceeds operating range."
1892 " Immediate action needed.\n",
1893 temp_val);
1894 }
1895 } else {
1896 if (adapter->ahw->temp == QLCNIC_TEMP_WARN) {
1897 dev_info(&netdev->dev,
1898 "Device temperature is now %d degrees C"
1899 " in normal range.\n", temp_val);
1900 }
1901 }
1902 adapter->ahw->temp = temp_state;
1903 return rv;
1904}
1905
1906static void qlcnic_tx_timeout(struct net_device *netdev)
1907{
1908 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1909
1910 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
1911 return;
1912
1913 dev_err(&netdev->dev, "transmit timeout, resetting.\n");
1914
1915 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
1916 adapter->need_fw_reset = 1;
1917 else
1918 adapter->ahw->reset_context = 1;
1919}
1920
1921static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
1922{
1923 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1924 struct net_device_stats *stats = &netdev->stats;
1925
1926 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
1927 stats->tx_packets = adapter->stats.xmitfinished;
1928 stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
1929 stats->tx_bytes = adapter->stats.txbytes;
1930 stats->rx_dropped = adapter->stats.rxdropped;
1931 stats->tx_dropped = adapter->stats.txdropped;
1932
1933 return stats;
1934}
1935
1936static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
1937{
1938 u32 status;
1939
1940 status = readl(adapter->isr_int_vec);
1941
1942 if (!(status & adapter->ahw->int_vec_bit))
1943 return IRQ_NONE;
1944
1945 /* check interrupt state machine, to be sure */
1946 status = readl(adapter->crb_int_state_reg);
1947 if (!ISR_LEGACY_INT_TRIGGERED(status))
1948 return IRQ_NONE;
1949
1950 writel(0xffffffff, adapter->tgt_status_reg);
1951 /* read twice to ensure write is flushed */
1952 readl(adapter->isr_int_vec);
1953 readl(adapter->isr_int_vec);
1954
1955 return IRQ_HANDLED;
1956}
1957
1958static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
1959{
1960 struct qlcnic_host_sds_ring *sds_ring = data;
1961 struct qlcnic_adapter *adapter = sds_ring->adapter;
1962
1963 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1964 goto done;
1965 else if (adapter->flags & QLCNIC_MSI_ENABLED) {
1966 writel(0xffffffff, adapter->tgt_status_reg);
1967 goto done;
1968 }
1969
1970 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
1971 return IRQ_NONE;
1972
1973done:
1974 adapter->ahw->diag_cnt++;
1975 qlcnic_enable_int(sds_ring);
1976 return IRQ_HANDLED;
1977}
1978
1979static irqreturn_t qlcnic_intr(int irq, void *data)
1980{
1981 struct qlcnic_host_sds_ring *sds_ring = data;
1982 struct qlcnic_adapter *adapter = sds_ring->adapter;
1983
1984 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
1985 return IRQ_NONE;
1986
1987 napi_schedule(&sds_ring->napi);
1988
1989 return IRQ_HANDLED;
1990}
1991
1992static irqreturn_t qlcnic_msi_intr(int irq, void *data)
1993{
1994 struct qlcnic_host_sds_ring *sds_ring = data;
1995 struct qlcnic_adapter *adapter = sds_ring->adapter;
1996
1997 /* clear interrupt */
1998 writel(0xffffffff, adapter->tgt_status_reg);
1999
2000 napi_schedule(&sds_ring->napi);
2001 return IRQ_HANDLED;
2002}
2003
2004static irqreturn_t qlcnic_msix_intr(int irq, void *data)
2005{
2006 struct qlcnic_host_sds_ring *sds_ring = data;
2007
2008 napi_schedule(&sds_ring->napi);
2009 return IRQ_HANDLED;
2010}
2011
2012#ifdef CONFIG_NET_POLL_CONTROLLER
2013static void qlcnic_poll_controller(struct net_device *netdev)
2014{
2015 int ring;
2016 struct qlcnic_host_sds_ring *sds_ring;
2017 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2018 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
2019
2020 disable_irq(adapter->irq);
2021 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
2022 sds_ring = &recv_ctx->sds_rings[ring];
2023 qlcnic_intr(adapter->irq, sds_ring);
2024 }
2025 enable_irq(adapter->irq);
2026}
2027#endif
2028
2029static void
2030qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
2031{
2032 u32 val;
2033
2034 val = adapter->portnum & 0xf;
2035 val |= encoding << 7;
2036 val |= (jiffies - adapter->dev_rst_time) << 8;
2037
2038 QLCWR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
2039 adapter->dev_rst_time = jiffies;
2040}
2041
2042static int
2043qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state)
2044{
2045 u32 val;
2046
2047 WARN_ON(state != QLCNIC_DEV_NEED_RESET &&
2048 state != QLCNIC_DEV_NEED_QUISCENT);
2049
2050 if (qlcnic_api_lock(adapter))
2051 return -EIO;
2052
2053 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2054
2055 if (state == QLCNIC_DEV_NEED_RESET)
2056 QLC_DEV_SET_RST_RDY(val, adapter->portnum);
2057 else if (state == QLCNIC_DEV_NEED_QUISCENT)
2058 QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum);
2059
2060 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2061
2062 qlcnic_api_unlock(adapter);
2063
2064 return 0;
2065}
2066
2067static int
2068qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
2069{
2070 u32 val;
2071
2072 if (qlcnic_api_lock(adapter))
2073 return -EBUSY;
2074
2075 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2076 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
2077 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2078
2079 qlcnic_api_unlock(adapter);
2080
2081 return 0;
2082}
2083
2084static void
2085qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed)
2086{
2087 u32 val;
2088
2089 if (qlcnic_api_lock(adapter))
2090 goto err;
2091
2092 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
2093 QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
2094 QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
2095
2096 if (failed) {
2097 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
2098 dev_info(&adapter->pdev->dev,
2099 "Device state set to Failed. Please Reboot\n");
2100 } else if (!(val & 0x11111111))
2101 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
2102
2103 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2104 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
2105 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2106
2107 qlcnic_api_unlock(adapter);
2108err:
2109 adapter->fw_fail_cnt = 0;
2110 adapter->flags &= ~QLCNIC_FW_HANG;
2111 clear_bit(__QLCNIC_START_FW, &adapter->state);
2112 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2113}
2114
2115/* Grab api lock, before checking state */
2116static int
2117qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
2118{
2119 int act, state, active_mask;
2120
2121 state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2122 act = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
2123
2124 if (adapter->flags & QLCNIC_FW_RESET_OWNER) {
2125 active_mask = (~(1 << (adapter->ahw->pci_func * 4)));
2126 act = act & active_mask;
2127 }
2128
2129 if (((state & 0x11111111) == (act & 0x11111111)) ||
2130 ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
2131 return 0;
2132 else
2133 return 1;
2134}
2135
2136static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter)
2137{
2138 u32 val = QLCRD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
2139
2140 if (val != QLCNIC_DRV_IDC_VER) {
2141 dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's"
2142 " idc ver = %x; reqd = %x\n", QLCNIC_DRV_IDC_VER, val);
2143 }
2144
2145 return 0;
2146}
2147
2148static int
2149qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
2150{
2151 u32 val, prev_state;
2152 u8 dev_init_timeo = adapter->dev_init_timeo;
2153 u8 portnum = adapter->portnum;
2154 u8 ret;
2155
2156 if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state))
2157 return 1;
2158
2159 if (qlcnic_api_lock(adapter))
2160 return -1;
2161
2162 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
2163 if (!(val & (1 << (portnum * 4)))) {
2164 QLC_DEV_SET_REF_CNT(val, portnum);
2165 QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
2166 }
2167
2168 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2169 QLCDB(adapter, HW, "Device state = %u\n", prev_state);
2170
2171 switch (prev_state) {
2172 case QLCNIC_DEV_COLD:
2173 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
2174 QLCWR32(adapter, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_DRV_IDC_VER);
2175 qlcnic_idc_debug_info(adapter, 0);
2176 qlcnic_api_unlock(adapter);
2177 return 1;
2178
2179 case QLCNIC_DEV_READY:
2180 ret = qlcnic_check_idc_ver(adapter);
2181 qlcnic_api_unlock(adapter);
2182 return ret;
2183
2184 case QLCNIC_DEV_NEED_RESET:
2185 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2186 QLC_DEV_SET_RST_RDY(val, portnum);
2187 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2188 break;
2189
2190 case QLCNIC_DEV_NEED_QUISCENT:
2191 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2192 QLC_DEV_SET_QSCNT_RDY(val, portnum);
2193 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2194 break;
2195
2196 case QLCNIC_DEV_FAILED:
2197 dev_err(&adapter->pdev->dev, "Device in failed state.\n");
2198 qlcnic_api_unlock(adapter);
2199 return -1;
2200
2201 case QLCNIC_DEV_INITIALIZING:
2202 case QLCNIC_DEV_QUISCENT:
2203 break;
2204 }
2205
2206 qlcnic_api_unlock(adapter);
2207
2208 do {
2209 msleep(1000);
2210 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2211
2212 if (prev_state == QLCNIC_DEV_QUISCENT)
2213 continue;
2214 } while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo);
2215
2216 if (!dev_init_timeo) {
2217 dev_err(&adapter->pdev->dev,
2218 "Waiting for device to initialize timeout\n");
2219 return -1;
2220 }
2221
2222 if (qlcnic_api_lock(adapter))
2223 return -1;
2224
2225 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2226 QLC_DEV_CLR_RST_QSCNT(val, portnum);
2227 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2228
2229 ret = qlcnic_check_idc_ver(adapter);
2230 qlcnic_api_unlock(adapter);
2231
2232 return ret;
2233}
2234
2235static void
2236qlcnic_fwinit_work(struct work_struct *work)
2237{
2238 struct qlcnic_adapter *adapter = container_of(work,
2239 struct qlcnic_adapter, fw_work.work);
2240 u32 dev_state = 0xf;
2241 u32 val;
2242
2243 if (qlcnic_api_lock(adapter))
2244 goto err_ret;
2245
2246 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2247 if (dev_state == QLCNIC_DEV_QUISCENT ||
2248 dev_state == QLCNIC_DEV_NEED_QUISCENT) {
2249 qlcnic_api_unlock(adapter);
2250 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2251 FW_POLL_DELAY * 2);
2252 return;
2253 }
2254
2255 if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
2256 qlcnic_api_unlock(adapter);
2257 goto wait_npar;
2258 }
2259
2260 if (dev_state == QLCNIC_DEV_INITIALIZING ||
2261 dev_state == QLCNIC_DEV_READY) {
2262 dev_info(&adapter->pdev->dev, "Detected state change from "
2263 "DEV_NEED_RESET, skipping ack check\n");
2264 goto skip_ack_check;
2265 }
2266
2267 if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
2268 dev_info(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
2269 adapter->reset_ack_timeo);
2270 goto skip_ack_check;
2271 }
2272
2273 if (!qlcnic_check_drv_state(adapter)) {
2274skip_ack_check:
2275 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2276
2277 if (dev_state == QLCNIC_DEV_NEED_RESET) {
2278 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2279 QLCNIC_DEV_INITIALIZING);
2280 set_bit(__QLCNIC_START_FW, &adapter->state);
2281 QLCDB(adapter, DRV, "Restarting fw\n");
2282 qlcnic_idc_debug_info(adapter, 0);
2283 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2284 QLC_DEV_SET_RST_RDY(val, adapter->portnum);
2285 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2286 }
2287
2288 qlcnic_api_unlock(adapter);
2289
2290 rtnl_lock();
2291 if (adapter->ahw->fw_dump.enable &&
2292 (adapter->flags & QLCNIC_FW_RESET_OWNER)) {
2293 QLCDB(adapter, DRV, "Take FW dump\n");
2294 qlcnic_dump_fw(adapter);
2295 adapter->flags |= QLCNIC_FW_HANG;
2296 }
2297 rtnl_unlock();
2298
2299 adapter->flags &= ~QLCNIC_FW_RESET_OWNER;
2300 if (!adapter->nic_ops->start_firmware(adapter)) {
2301 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2302 adapter->fw_wait_cnt = 0;
2303 return;
2304 }
2305 goto err_ret;
2306 }
2307
2308 qlcnic_api_unlock(adapter);
2309
2310wait_npar:
2311 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2312 QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
2313
2314 switch (dev_state) {
2315 case QLCNIC_DEV_READY:
2316 if (!adapter->nic_ops->start_firmware(adapter)) {
2317 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2318 adapter->fw_wait_cnt = 0;
2319 return;
2320 }
2321 case QLCNIC_DEV_FAILED:
2322 break;
2323 default:
2324 qlcnic_schedule_work(adapter,
2325 qlcnic_fwinit_work, FW_POLL_DELAY);
2326 return;
2327 }
2328
2329err_ret:
2330 dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u "
2331 "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt);
2332 netif_device_attach(adapter->netdev);
2333 qlcnic_clr_all_drv_state(adapter, 0);
2334}
2335
2336static void
2337qlcnic_detach_work(struct work_struct *work)
2338{
2339 struct qlcnic_adapter *adapter = container_of(work,
2340 struct qlcnic_adapter, fw_work.work);
2341 struct net_device *netdev = adapter->netdev;
2342 u32 status;
2343
2344 netif_device_detach(netdev);
2345
2346 /* Dont grab rtnl lock during Quiscent mode */
2347 if (adapter->dev_state == QLCNIC_DEV_NEED_QUISCENT) {
2348 if (netif_running(netdev))
2349 __qlcnic_down(adapter, netdev);
2350 } else
2351 qlcnic_down(adapter, netdev);
2352
2353 status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
2354
2355 if (status & QLCNIC_RCODE_FATAL_ERROR) {
2356 dev_err(&adapter->pdev->dev,
2357 "Detaching the device: peg halt status1=0x%x\n",
2358 status);
2359
2360 if (QLCNIC_FWERROR_CODE(status) == QLCNIC_FWERROR_FAN_FAILURE) {
2361 dev_err(&adapter->pdev->dev,
2362 "On board active cooling fan failed. "
2363 "Device has been halted.\n");
2364 dev_err(&adapter->pdev->dev,
2365 "Replace the adapter.\n");
2366 }
2367
2368 goto err_ret;
2369 }
2370
2371 if (adapter->ahw->temp == QLCNIC_TEMP_PANIC) {
2372 dev_err(&adapter->pdev->dev, "Detaching the device: temp=%d\n",
2373 adapter->ahw->temp);
2374 goto err_ret;
2375 }
2376
2377 /* Dont ack if this instance is the reset owner */
2378 if (!(adapter->flags & QLCNIC_FW_RESET_OWNER)) {
2379 if (qlcnic_set_drv_state(adapter, adapter->dev_state)) {
2380 dev_err(&adapter->pdev->dev,
2381 "Failed to set driver state,"
2382 "detaching the device.\n");
2383 goto err_ret;
2384 }
2385 }
2386
2387 adapter->fw_wait_cnt = 0;
2388
2389 qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY);
2390
2391 return;
2392
2393err_ret:
2394 netif_device_attach(netdev);
2395 qlcnic_clr_all_drv_state(adapter, 1);
2396}
2397
2398/*Transit NPAR state to NON Operational */
2399static void
2400qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
2401{
2402 u32 state;
2403
2404 state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2405 if (state == QLCNIC_DEV_NPAR_NON_OPER)
2406 return;
2407
2408 if (qlcnic_api_lock(adapter))
2409 return;
2410 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
2411 qlcnic_api_unlock(adapter);
2412}
2413
2414/*Transit to RESET state from READY state only */
2415void
2416qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2417{
2418 u32 state, xg_val = 0, gb_val = 0;
2419
2420 qlcnic_xg_set_xg0_mask(xg_val);
2421 qlcnic_xg_set_xg1_mask(xg_val);
2422 QLCWR32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, xg_val);
2423 qlcnic_gb_set_gb0_mask(gb_val);
2424 qlcnic_gb_set_gb1_mask(gb_val);
2425 qlcnic_gb_set_gb2_mask(gb_val);
2426 qlcnic_gb_set_gb3_mask(gb_val);
2427 QLCWR32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, gb_val);
2428 dev_info(&adapter->pdev->dev, "Pause control frames disabled"
2429 " on all ports\n");
2430 adapter->need_fw_reset = 1;
2431 if (qlcnic_api_lock(adapter))
2432 return;
2433
2434 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2435 if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD)) {
2436 netdev_err(adapter->netdev,
2437 "Device is in FAILED state, Please Reboot\n");
2438 qlcnic_api_unlock(adapter);
2439 return;
2440 }
2441
2442 if (state == QLCNIC_DEV_READY) {
2443 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
2444 adapter->flags |= QLCNIC_FW_RESET_OWNER;
2445 QLCDB(adapter, DRV, "NEED_RESET state set\n");
2446 qlcnic_idc_debug_info(adapter, 0);
2447 }
2448
2449 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
2450 qlcnic_api_unlock(adapter);
2451}
2452
2453/* Transit to NPAR READY state from NPAR NOT READY state */
2454static void
2455qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter)
2456{
2457 if (qlcnic_api_lock(adapter))
2458 return;
2459
2460 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_OPER);
2461 QLCDB(adapter, DRV, "NPAR operational state set\n");
2462
2463 qlcnic_api_unlock(adapter);
2464}
2465
2466static void
2467qlcnic_schedule_work(struct qlcnic_adapter *adapter,
2468 work_func_t func, int delay)
2469{
2470 if (test_bit(__QLCNIC_AER, &adapter->state))
2471 return;
2472
2473 INIT_DELAYED_WORK(&adapter->fw_work, func);
2474 queue_delayed_work(qlcnic_wq, &adapter->fw_work,
2475 round_jiffies_relative(delay));
2476}
2477
2478static void
2479qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
2480{
2481 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
2482 msleep(10);
2483
2484 if (!adapter->fw_work.work.func)
2485 return;
2486
2487 cancel_delayed_work_sync(&adapter->fw_work);
2488}
2489
2490static void
2491qlcnic_attach_work(struct work_struct *work)
2492{
2493 struct qlcnic_adapter *adapter = container_of(work,
2494 struct qlcnic_adapter, fw_work.work);
2495 struct net_device *netdev = adapter->netdev;
2496 u32 npar_state;
2497
2498 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) {
2499 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2500 if (adapter->fw_wait_cnt++ > QLCNIC_DEV_NPAR_OPER_TIMEO)
2501 qlcnic_clr_all_drv_state(adapter, 0);
2502 else if (npar_state != QLCNIC_DEV_NPAR_OPER)
2503 qlcnic_schedule_work(adapter, qlcnic_attach_work,
2504 FW_POLL_DELAY);
2505 else
2506 goto attach;
2507 QLCDB(adapter, DRV, "Waiting for NPAR state to operational\n");
2508 return;
2509 }
2510attach:
2511 if (netif_running(netdev)) {
2512 if (qlcnic_up(adapter, netdev))
2513 goto done;
2514
2515 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
2516 }
2517
2518done:
2519 netif_device_attach(netdev);
2520 adapter->fw_fail_cnt = 0;
2521 adapter->flags &= ~QLCNIC_FW_HANG;
2522 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2523
2524 if (!qlcnic_clr_drv_state(adapter))
2525 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
2526 FW_POLL_DELAY);
2527}
2528
2529static int
2530qlcnic_check_health(struct qlcnic_adapter *adapter)
2531{
2532 u32 state = 0, heartbeat;
2533 u32 peg_status;
2534
2535 if (qlcnic_check_temp(adapter))
2536 goto detach;
2537
2538 if (adapter->need_fw_reset)
2539 qlcnic_dev_request_reset(adapter);
2540
2541 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2542 if (state == QLCNIC_DEV_NEED_RESET) {
2543 qlcnic_set_npar_non_operational(adapter);
2544 adapter->need_fw_reset = 1;
2545 } else if (state == QLCNIC_DEV_NEED_QUISCENT)
2546 goto detach;
2547
2548 heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
2549 if (heartbeat != adapter->heartbeat) {
2550 adapter->heartbeat = heartbeat;
2551 adapter->fw_fail_cnt = 0;
2552 if (adapter->need_fw_reset)
2553 goto detach;
2554
2555 if (adapter->ahw->reset_context && qlcnic_auto_fw_reset) {
2556 qlcnic_reset_hw_context(adapter);
2557 adapter->netdev->trans_start = jiffies;
2558 }
2559
2560 return 0;
2561 }
2562
2563 if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
2564 return 0;
2565
2566 adapter->flags |= QLCNIC_FW_HANG;
2567
2568 qlcnic_dev_request_reset(adapter);
2569
2570 if (qlcnic_auto_fw_reset)
2571 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
2572
2573 dev_err(&adapter->pdev->dev, "firmware hang detected\n");
2574 dev_err(&adapter->pdev->dev, "Dumping hw/fw registers\n"
2575 "PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n"
2576 "PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n"
2577 "PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n"
2578 "PEG_NET_4_PC: 0x%x\n",
2579 QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1),
2580 QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS2),
2581 QLCRD32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x3c),
2582 QLCRD32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x3c),
2583 QLCRD32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x3c),
2584 QLCRD32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c),
2585 QLCRD32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c));
2586 peg_status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
2587 if (QLCNIC_FWERROR_CODE(peg_status) == 0x67)
2588 dev_err(&adapter->pdev->dev,
2589 "Firmware aborted with error code 0x00006700. "
2590 "Device is being reset.\n");
2591detach:
2592 adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
2593 QLCNIC_DEV_NEED_RESET;
2594
2595 if (qlcnic_auto_fw_reset && !test_and_set_bit(__QLCNIC_RESETTING,
2596 &adapter->state)) {
2597
2598 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
2599 QLCDB(adapter, DRV, "fw recovery scheduled.\n");
2600 }
2601
2602 return 1;
2603}
2604
2605static void
2606qlcnic_fw_poll_work(struct work_struct *work)
2607{
2608 struct qlcnic_adapter *adapter = container_of(work,
2609 struct qlcnic_adapter, fw_work.work);
2610
2611 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
2612 goto reschedule;
2613
2614
2615 if (qlcnic_check_health(adapter))
2616 return;
2617
2618 if (adapter->fhash.fnum)
2619 qlcnic_prune_lb_filters(adapter);
2620
2621reschedule:
2622 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
2623}
2624
2625static int qlcnic_is_first_func(struct pci_dev *pdev)
2626{
2627 struct pci_dev *oth_pdev;
2628 int val = pdev->devfn;
2629
2630 while (val-- > 0) {
2631 oth_pdev = pci_get_domain_bus_and_slot(pci_domain_nr
2632 (pdev->bus), pdev->bus->number,
2633 PCI_DEVFN(PCI_SLOT(pdev->devfn), val));
2634 if (!oth_pdev)
2635 continue;
2636
2637 if (oth_pdev->current_state != PCI_D3cold) {
2638 pci_dev_put(oth_pdev);
2639 return 0;
2640 }
2641 pci_dev_put(oth_pdev);
2642 }
2643 return 1;
2644}
2645
2646static int qlcnic_attach_func(struct pci_dev *pdev)
2647{
2648 int err, first_func;
2649 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
2650 struct net_device *netdev = adapter->netdev;
2651
2652 pdev->error_state = pci_channel_io_normal;
2653
2654 err = pci_enable_device(pdev);
2655 if (err)
2656 return err;
2657
2658 pci_set_power_state(pdev, PCI_D0);
2659 pci_set_master(pdev);
2660 pci_restore_state(pdev);
2661
2662 first_func = qlcnic_is_first_func(pdev);
2663
2664 if (qlcnic_api_lock(adapter))
2665 return -EINVAL;
2666
2667 if (adapter->ahw->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) {
2668 adapter->need_fw_reset = 1;
2669 set_bit(__QLCNIC_START_FW, &adapter->state);
2670 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
2671 QLCDB(adapter, DRV, "Restarting fw\n");
2672 }
2673 qlcnic_api_unlock(adapter);
2674
2675 err = adapter->nic_ops->start_firmware(adapter);
2676 if (err)
2677 return err;
2678
2679 qlcnic_clr_drv_state(adapter);
2680 qlcnic_setup_intr(adapter);
2681
2682 if (netif_running(netdev)) {
2683 err = qlcnic_attach(adapter);
2684 if (err) {
2685 qlcnic_clr_all_drv_state(adapter, 1);
2686 clear_bit(__QLCNIC_AER, &adapter->state);
2687 netif_device_attach(netdev);
2688 return err;
2689 }
2690
2691 err = qlcnic_up(adapter, netdev);
2692 if (err)
2693 goto done;
2694
2695 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
2696 }
2697 done:
2698 netif_device_attach(netdev);
2699 return err;
2700}
2701
2702static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
2703 pci_channel_state_t state)
2704{
2705 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
2706 struct net_device *netdev = adapter->netdev;
2707
2708 if (state == pci_channel_io_perm_failure)
2709 return PCI_ERS_RESULT_DISCONNECT;
2710
2711 if (state == pci_channel_io_normal)
2712 return PCI_ERS_RESULT_RECOVERED;
2713
2714 set_bit(__QLCNIC_AER, &adapter->state);
2715 netif_device_detach(netdev);
2716
2717 cancel_delayed_work_sync(&adapter->fw_work);
2718
2719 if (netif_running(netdev))
2720 qlcnic_down(adapter, netdev);
2721
2722 qlcnic_detach(adapter);
2723 qlcnic_teardown_intr(adapter);
2724
2725 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2726
2727 pci_save_state(pdev);
2728 pci_disable_device(pdev);
2729
2730 return PCI_ERS_RESULT_NEED_RESET;
2731}
2732
2733static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev)
2734{
2735 return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
2736 PCI_ERS_RESULT_RECOVERED;
2737}
2738
2739static void qlcnic_io_resume(struct pci_dev *pdev)
2740{
2741 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
2742
2743 pci_cleanup_aer_uncorrect_error_status(pdev);
2744
2745 if (QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) == QLCNIC_DEV_READY &&
2746 test_and_clear_bit(__QLCNIC_AER, &adapter->state))
2747 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
2748 FW_POLL_DELAY);
2749}
2750
2751static int
2752qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
2753{
2754 int err;
2755
2756 err = qlcnic_can_start_firmware(adapter);
2757 if (err)
2758 return err;
2759
2760 err = qlcnic_check_npar_opertional(adapter);
2761 if (err)
2762 return err;
2763
2764 err = qlcnic_initialize_nic(adapter);
2765 if (err)
2766 return err;
2767
2768 qlcnic_check_options(adapter);
2769
2770 err = qlcnic_set_eswitch_port_config(adapter);
2771 if (err)
2772 return err;
2773
2774 adapter->need_fw_reset = 0;
2775
2776 return err;
2777}
2778
2779int qlcnic_validate_max_rss(struct net_device *netdev, u8 max_hw, u8 val)
2780{
2781 if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
2782 netdev_info(netdev, "no msix or msi support, hence no rss\n");
2783 return -EINVAL;
2784 }
2785
2786 if ((val > max_hw) || (val < 2) || !is_power_of_2(val)) {
2787 netdev_info(netdev, "rss_ring valid range [2 - %x] in "
2788 " powers of 2\n", max_hw);
2789 return -EINVAL;
2790 }
2791 return 0;
2792
2793}
2794
2795int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data)
2796{
2797 struct net_device *netdev = adapter->netdev;
2798 int err = 0;
2799
2800 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
2801 return -EBUSY;
2802
2803 netif_device_detach(netdev);
2804 if (netif_running(netdev))
2805 __qlcnic_down(adapter, netdev);
2806 qlcnic_detach(adapter);
2807 qlcnic_teardown_intr(adapter);
2808
2809 if (qlcnic_enable_msix(adapter, data)) {
2810 netdev_info(netdev, "failed setting max_rss; rss disabled\n");
2811 qlcnic_enable_msi_legacy(adapter);
2812 }
2813
2814 if (netif_running(netdev)) {
2815 err = qlcnic_attach(adapter);
2816 if (err)
2817 goto done;
2818 err = __qlcnic_up(adapter, netdev);
2819 if (err)
2820 goto done;
2821 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
2822 }
2823 done:
2824 netif_device_attach(netdev);
2825 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2826 return err;
2827}
2828
2829#ifdef CONFIG_INET
2830
2831#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
2832
2833static void
2834qlcnic_config_indev_addr(struct qlcnic_adapter *adapter,
2835 struct net_device *dev, unsigned long event)
2836{
2837 struct in_device *indev;
2838
2839 indev = in_dev_get(dev);
2840 if (!indev)
2841 return;
2842
2843 for_ifa(indev) {
2844 switch (event) {
2845 case NETDEV_UP:
2846 qlcnic_config_ipaddr(adapter,
2847 ifa->ifa_address, QLCNIC_IP_UP);
2848 break;
2849 case NETDEV_DOWN:
2850 qlcnic_config_ipaddr(adapter,
2851 ifa->ifa_address, QLCNIC_IP_DOWN);
2852 break;
2853 default:
2854 break;
2855 }
2856 } endfor_ifa(indev);
2857
2858 in_dev_put(indev);
2859}
2860
2861static void
2862qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
2863{
2864 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2865 struct net_device *dev;
2866 u16 vid;
2867
2868 qlcnic_config_indev_addr(adapter, netdev, event);
2869
2870 for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) {
2871 dev = __vlan_find_dev_deep(netdev, vid);
2872 if (!dev)
2873 continue;
2874 qlcnic_config_indev_addr(adapter, dev, event);
2875 }
2876}
2877
2878static int qlcnic_netdev_event(struct notifier_block *this,
2879 unsigned long event, void *ptr)
2880{
2881 struct qlcnic_adapter *adapter;
2882 struct net_device *dev = (struct net_device *)ptr;
2883
2884recheck:
2885 if (dev == NULL)
2886 goto done;
2887
2888 if (dev->priv_flags & IFF_802_1Q_VLAN) {
2889 dev = vlan_dev_real_dev(dev);
2890 goto recheck;
2891 }
2892
2893 if (!is_qlcnic_netdev(dev))
2894 goto done;
2895
2896 adapter = netdev_priv(dev);
2897
2898 if (!adapter)
2899 goto done;
2900
2901 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
2902 goto done;
2903
2904 qlcnic_config_indev_addr(adapter, dev, event);
2905done:
2906 return NOTIFY_DONE;
2907}
2908
2909static int
2910qlcnic_inetaddr_event(struct notifier_block *this,
2911 unsigned long event, void *ptr)
2912{
2913 struct qlcnic_adapter *adapter;
2914 struct net_device *dev;
2915
2916 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
2917
2918 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
2919
2920recheck:
2921 if (dev == NULL)
2922 goto done;
2923
2924 if (dev->priv_flags & IFF_802_1Q_VLAN) {
2925 dev = vlan_dev_real_dev(dev);
2926 goto recheck;
2927 }
2928
2929 if (!is_qlcnic_netdev(dev))
2930 goto done;
2931
2932 adapter = netdev_priv(dev);
2933
2934 if (!adapter)
2935 goto done;
2936
2937 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
2938 goto done;
2939
2940 switch (event) {
2941 case NETDEV_UP:
2942 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
2943 break;
2944 case NETDEV_DOWN:
2945 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
2946 break;
2947 default:
2948 break;
2949 }
2950
2951done:
2952 return NOTIFY_DONE;
2953}
2954
2955static struct notifier_block qlcnic_netdev_cb = {
2956 .notifier_call = qlcnic_netdev_event,
2957};
2958
2959static struct notifier_block qlcnic_inetaddr_cb = {
2960 .notifier_call = qlcnic_inetaddr_event,
2961};
2962#else
2963static void
2964qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event)
2965{ }
2966#endif
2967static struct pci_error_handlers qlcnic_err_handler = {
2968 .error_detected = qlcnic_io_error_detected,
2969 .slot_reset = qlcnic_io_slot_reset,
2970 .resume = qlcnic_io_resume,
2971};
2972
2973static struct pci_driver qlcnic_driver = {
2974 .name = qlcnic_driver_name,
2975 .id_table = qlcnic_pci_tbl,
2976 .probe = qlcnic_probe,
2977 .remove = qlcnic_remove,
2978#ifdef CONFIG_PM
2979 .suspend = qlcnic_suspend,
2980 .resume = qlcnic_resume,
2981#endif
2982 .shutdown = qlcnic_shutdown,
2983 .err_handler = &qlcnic_err_handler
2984
2985};
2986
2987static int __init qlcnic_init_module(void)
2988{
2989 int ret;
2990
2991 printk(KERN_INFO "%s\n", qlcnic_driver_string);
2992
2993 qlcnic_wq = create_singlethread_workqueue("qlcnic");
2994 if (qlcnic_wq == NULL) {
2995 printk(KERN_ERR "qlcnic: cannot create workqueue\n");
2996 return -ENOMEM;
2997 }
2998
2999#ifdef CONFIG_INET
3000 register_netdevice_notifier(&qlcnic_netdev_cb);
3001 register_inetaddr_notifier(&qlcnic_inetaddr_cb);
3002#endif
3003
3004 ret = pci_register_driver(&qlcnic_driver);
3005 if (ret) {
3006#ifdef CONFIG_INET
3007 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
3008 unregister_netdevice_notifier(&qlcnic_netdev_cb);
3009#endif
3010 destroy_workqueue(qlcnic_wq);
3011 }
3012
3013 return ret;
3014}
3015
3016module_init(qlcnic_init_module);
3017
3018static void __exit qlcnic_exit_module(void)
3019{
3020
3021 pci_unregister_driver(&qlcnic_driver);
3022
3023#ifdef CONFIG_INET
3024 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
3025 unregister_netdevice_notifier(&qlcnic_netdev_cb);
3026#endif
3027 destroy_workqueue(qlcnic_wq);
3028}
3029
3030module_exit(qlcnic_exit_module);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
deleted file mode 100644
index 0b8d8625834..00000000000
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ /dev/null
@@ -1,628 +0,0 @@
1#include "qlcnic.h"
2#include "qlcnic_hdr.h"
3
4#include <net/ip.h>
5
6#define QLCNIC_DUMP_WCRB BIT_0
7#define QLCNIC_DUMP_RWCRB BIT_1
8#define QLCNIC_DUMP_ANDCRB BIT_2
9#define QLCNIC_DUMP_ORCRB BIT_3
10#define QLCNIC_DUMP_POLLCRB BIT_4
11#define QLCNIC_DUMP_RD_SAVE BIT_5
12#define QLCNIC_DUMP_WRT_SAVED BIT_6
13#define QLCNIC_DUMP_MOD_SAVE_ST BIT_7
14#define QLCNIC_DUMP_SKIP BIT_7
15
16#define QLCNIC_DUMP_MASK_MAX 0xff
17
18struct qlcnic_common_entry_hdr {
19 u32 type;
20 u32 offset;
21 u32 cap_size;
22 u8 mask;
23 u8 rsvd[2];
24 u8 flags;
25} __packed;
26
27struct __crb {
28 u32 addr;
29 u8 stride;
30 u8 rsvd1[3];
31 u32 data_size;
32 u32 no_ops;
33 u32 rsvd2[4];
34} __packed;
35
36struct __ctrl {
37 u32 addr;
38 u8 stride;
39 u8 index_a;
40 u16 timeout;
41 u32 data_size;
42 u32 no_ops;
43 u8 opcode;
44 u8 index_v;
45 u8 shl_val;
46 u8 shr_val;
47 u32 val1;
48 u32 val2;
49 u32 val3;
50} __packed;
51
52struct __cache {
53 u32 addr;
54 u16 stride;
55 u16 init_tag_val;
56 u32 size;
57 u32 no_ops;
58 u32 ctrl_addr;
59 u32 ctrl_val;
60 u32 read_addr;
61 u8 read_addr_stride;
62 u8 read_addr_num;
63 u8 rsvd1[2];
64} __packed;
65
66struct __ocm {
67 u8 rsvd[8];
68 u32 size;
69 u32 no_ops;
70 u8 rsvd1[8];
71 u32 read_addr;
72 u32 read_addr_stride;
73} __packed;
74
75struct __mem {
76 u8 rsvd[24];
77 u32 addr;
78 u32 size;
79} __packed;
80
81struct __mux {
82 u32 addr;
83 u8 rsvd[4];
84 u32 size;
85 u32 no_ops;
86 u32 val;
87 u32 val_stride;
88 u32 read_addr;
89 u8 rsvd2[4];
90} __packed;
91
92struct __queue {
93 u32 sel_addr;
94 u16 stride;
95 u8 rsvd[2];
96 u32 size;
97 u32 no_ops;
98 u8 rsvd2[8];
99 u32 read_addr;
100 u8 read_addr_stride;
101 u8 read_addr_cnt;
102 u8 rsvd3[2];
103} __packed;
104
105struct qlcnic_dump_entry {
106 struct qlcnic_common_entry_hdr hdr;
107 union {
108 struct __crb crb;
109 struct __cache cache;
110 struct __ocm ocm;
111 struct __mem mem;
112 struct __mux mux;
113 struct __queue que;
114 struct __ctrl ctrl;
115 } region;
116} __packed;
117
118enum qlcnic_minidump_opcode {
119 QLCNIC_DUMP_NOP = 0,
120 QLCNIC_DUMP_READ_CRB = 1,
121 QLCNIC_DUMP_READ_MUX = 2,
122 QLCNIC_DUMP_QUEUE = 3,
123 QLCNIC_DUMP_BRD_CONFIG = 4,
124 QLCNIC_DUMP_READ_OCM = 6,
125 QLCNIC_DUMP_PEG_REG = 7,
126 QLCNIC_DUMP_L1_DTAG = 8,
127 QLCNIC_DUMP_L1_ITAG = 9,
128 QLCNIC_DUMP_L1_DATA = 11,
129 QLCNIC_DUMP_L1_INST = 12,
130 QLCNIC_DUMP_L2_DTAG = 21,
131 QLCNIC_DUMP_L2_ITAG = 22,
132 QLCNIC_DUMP_L2_DATA = 23,
133 QLCNIC_DUMP_L2_INST = 24,
134 QLCNIC_DUMP_READ_ROM = 71,
135 QLCNIC_DUMP_READ_MEM = 72,
136 QLCNIC_DUMP_READ_CTRL = 98,
137 QLCNIC_DUMP_TLHDR = 99,
138 QLCNIC_DUMP_RDEND = 255
139};
140
141struct qlcnic_dump_operations {
142 enum qlcnic_minidump_opcode opcode;
143 u32 (*handler)(struct qlcnic_adapter *, struct qlcnic_dump_entry *,
144 __le32 *);
145};
146
147static void qlcnic_read_dump_reg(u32 addr, void __iomem *bar0, u32 *data)
148{
149 u32 dest;
150 void __iomem *window_reg;
151
152 dest = addr & 0xFFFF0000;
153 window_reg = bar0 + QLCNIC_FW_DUMP_REG1;
154 writel(dest, window_reg);
155 readl(window_reg);
156 window_reg = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr);
157 *data = readl(window_reg);
158}
159
160static void qlcnic_write_dump_reg(u32 addr, void __iomem *bar0, u32 data)
161{
162 u32 dest;
163 void __iomem *window_reg;
164
165 dest = addr & 0xFFFF0000;
166 window_reg = bar0 + QLCNIC_FW_DUMP_REG1;
167 writel(dest, window_reg);
168 readl(window_reg);
169 window_reg = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr);
170 writel(data, window_reg);
171 readl(window_reg);
172}
173
174/* FW dump related functions */
175static u32 qlcnic_dump_crb(struct qlcnic_adapter *adapter,
176 struct qlcnic_dump_entry *entry, __le32 *buffer)
177{
178 int i;
179 u32 addr, data;
180 struct __crb *crb = &entry->region.crb;
181 void __iomem *base = adapter->ahw->pci_base0;
182
183 addr = crb->addr;
184
185 for (i = 0; i < crb->no_ops; i++) {
186 qlcnic_read_dump_reg(addr, base, &data);
187 *buffer++ = cpu_to_le32(addr);
188 *buffer++ = cpu_to_le32(data);
189 addr += crb->stride;
190 }
191 return crb->no_ops * 2 * sizeof(u32);
192}
193
194static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
195 struct qlcnic_dump_entry *entry, __le32 *buffer)
196{
197 int i, k, timeout = 0;
198 void __iomem *base = adapter->ahw->pci_base0;
199 u32 addr, data;
200 u8 no_ops;
201 struct __ctrl *ctr = &entry->region.ctrl;
202 struct qlcnic_dump_template_hdr *t_hdr = adapter->ahw->fw_dump.tmpl_hdr;
203
204 addr = ctr->addr;
205 no_ops = ctr->no_ops;
206
207 for (i = 0; i < no_ops; i++) {
208 k = 0;
209 for (k = 0; k < 8; k++) {
210 if (!(ctr->opcode & (1 << k)))
211 continue;
212 switch (1 << k) {
213 case QLCNIC_DUMP_WCRB:
214 qlcnic_write_dump_reg(addr, base, ctr->val1);
215 break;
216 case QLCNIC_DUMP_RWCRB:
217 qlcnic_read_dump_reg(addr, base, &data);
218 qlcnic_write_dump_reg(addr, base, data);
219 break;
220 case QLCNIC_DUMP_ANDCRB:
221 qlcnic_read_dump_reg(addr, base, &data);
222 qlcnic_write_dump_reg(addr, base,
223 data & ctr->val2);
224 break;
225 case QLCNIC_DUMP_ORCRB:
226 qlcnic_read_dump_reg(addr, base, &data);
227 qlcnic_write_dump_reg(addr, base,
228 data | ctr->val3);
229 break;
230 case QLCNIC_DUMP_POLLCRB:
231 while (timeout <= ctr->timeout) {
232 qlcnic_read_dump_reg(addr, base, &data);
233 if ((data & ctr->val2) == ctr->val1)
234 break;
235 msleep(1);
236 timeout++;
237 }
238 if (timeout > ctr->timeout) {
239 dev_info(&adapter->pdev->dev,
240 "Timed out, aborting poll CRB\n");
241 return -EINVAL;
242 }
243 break;
244 case QLCNIC_DUMP_RD_SAVE:
245 if (ctr->index_a)
246 addr = t_hdr->saved_state[ctr->index_a];
247 qlcnic_read_dump_reg(addr, base, &data);
248 t_hdr->saved_state[ctr->index_v] = data;
249 break;
250 case QLCNIC_DUMP_WRT_SAVED:
251 if (ctr->index_v)
252 data = t_hdr->saved_state[ctr->index_v];
253 else
254 data = ctr->val1;
255 if (ctr->index_a)
256 addr = t_hdr->saved_state[ctr->index_a];
257 qlcnic_write_dump_reg(addr, base, data);
258 break;
259 case QLCNIC_DUMP_MOD_SAVE_ST:
260 data = t_hdr->saved_state[ctr->index_v];
261 data <<= ctr->shl_val;
262 data >>= ctr->shr_val;
263 if (ctr->val2)
264 data &= ctr->val2;
265 data |= ctr->val3;
266 data += ctr->val1;
267 t_hdr->saved_state[ctr->index_v] = data;
268 break;
269 default:
270 dev_info(&adapter->pdev->dev,
271 "Unknown opcode\n");
272 break;
273 }
274 }
275 addr += ctr->stride;
276 }
277 return 0;
278}
279
280static u32 qlcnic_dump_mux(struct qlcnic_adapter *adapter,
281 struct qlcnic_dump_entry *entry, __le32 *buffer)
282{
283 int loop;
284 u32 val, data = 0;
285 struct __mux *mux = &entry->region.mux;
286 void __iomem *base = adapter->ahw->pci_base0;
287
288 val = mux->val;
289 for (loop = 0; loop < mux->no_ops; loop++) {
290 qlcnic_write_dump_reg(mux->addr, base, val);
291 qlcnic_read_dump_reg(mux->read_addr, base, &data);
292 *buffer++ = cpu_to_le32(val);
293 *buffer++ = cpu_to_le32(data);
294 val += mux->val_stride;
295 }
296 return 2 * mux->no_ops * sizeof(u32);
297}
298
299static u32 qlcnic_dump_que(struct qlcnic_adapter *adapter,
300 struct qlcnic_dump_entry *entry, __le32 *buffer)
301{
302 int i, loop;
303 u32 cnt, addr, data, que_id = 0;
304 void __iomem *base = adapter->ahw->pci_base0;
305 struct __queue *que = &entry->region.que;
306
307 addr = que->read_addr;
308 cnt = que->read_addr_cnt;
309
310 for (loop = 0; loop < que->no_ops; loop++) {
311 qlcnic_write_dump_reg(que->sel_addr, base, que_id);
312 addr = que->read_addr;
313 for (i = 0; i < cnt; i++) {
314 qlcnic_read_dump_reg(addr, base, &data);
315 *buffer++ = cpu_to_le32(data);
316 addr += que->read_addr_stride;
317 }
318 que_id += que->stride;
319 }
320 return que->no_ops * cnt * sizeof(u32);
321}
322
323static u32 qlcnic_dump_ocm(struct qlcnic_adapter *adapter,
324 struct qlcnic_dump_entry *entry, __le32 *buffer)
325{
326 int i;
327 u32 data;
328 void __iomem *addr;
329 struct __ocm *ocm = &entry->region.ocm;
330
331 addr = adapter->ahw->pci_base0 + ocm->read_addr;
332 for (i = 0; i < ocm->no_ops; i++) {
333 data = readl(addr);
334 *buffer++ = cpu_to_le32(data);
335 addr += ocm->read_addr_stride;
336 }
337 return ocm->no_ops * sizeof(u32);
338}
339
340static u32 qlcnic_read_rom(struct qlcnic_adapter *adapter,
341 struct qlcnic_dump_entry *entry, __le32 *buffer)
342{
343 int i, count = 0;
344 u32 fl_addr, size, val, lck_val, addr;
345 struct __mem *rom = &entry->region.mem;
346 void __iomem *base = adapter->ahw->pci_base0;
347
348 fl_addr = rom->addr;
349 size = rom->size/4;
350lock_try:
351 lck_val = readl(base + QLCNIC_FLASH_SEM2_LK);
352 if (!lck_val && count < MAX_CTL_CHECK) {
353 msleep(10);
354 count++;
355 goto lock_try;
356 }
357 writel(adapter->ahw->pci_func, (base + QLCNIC_FLASH_LOCK_ID));
358 for (i = 0; i < size; i++) {
359 addr = fl_addr & 0xFFFF0000;
360 qlcnic_write_dump_reg(FLASH_ROM_WINDOW, base, addr);
361 addr = LSW(fl_addr) + FLASH_ROM_DATA;
362 qlcnic_read_dump_reg(addr, base, &val);
363 fl_addr += 4;
364 *buffer++ = cpu_to_le32(val);
365 }
366 readl(base + QLCNIC_FLASH_SEM2_ULK);
367 return rom->size;
368}
369
370static u32 qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter,
371 struct qlcnic_dump_entry *entry, __le32 *buffer)
372{
373 int i;
374 u32 cnt, val, data, addr;
375 void __iomem *base = adapter->ahw->pci_base0;
376 struct __cache *l1 = &entry->region.cache;
377
378 val = l1->init_tag_val;
379
380 for (i = 0; i < l1->no_ops; i++) {
381 qlcnic_write_dump_reg(l1->addr, base, val);
382 qlcnic_write_dump_reg(l1->ctrl_addr, base, LSW(l1->ctrl_val));
383 addr = l1->read_addr;
384 cnt = l1->read_addr_num;
385 while (cnt) {
386 qlcnic_read_dump_reg(addr, base, &data);
387 *buffer++ = cpu_to_le32(data);
388 addr += l1->read_addr_stride;
389 cnt--;
390 }
391 val += l1->stride;
392 }
393 return l1->no_ops * l1->read_addr_num * sizeof(u32);
394}
395
396static u32 qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter,
397 struct qlcnic_dump_entry *entry, __le32 *buffer)
398{
399 int i;
400 u32 cnt, val, data, addr;
401 u8 poll_mask, poll_to, time_out = 0;
402 void __iomem *base = adapter->ahw->pci_base0;
403 struct __cache *l2 = &entry->region.cache;
404
405 val = l2->init_tag_val;
406 poll_mask = LSB(MSW(l2->ctrl_val));
407 poll_to = MSB(MSW(l2->ctrl_val));
408
409 for (i = 0; i < l2->no_ops; i++) {
410 qlcnic_write_dump_reg(l2->addr, base, val);
411 if (LSW(l2->ctrl_val))
412 qlcnic_write_dump_reg(l2->ctrl_addr, base,
413 LSW(l2->ctrl_val));
414 if (!poll_mask)
415 goto skip_poll;
416 do {
417 qlcnic_read_dump_reg(l2->ctrl_addr, base, &data);
418 if (!(data & poll_mask))
419 break;
420 msleep(1);
421 time_out++;
422 } while (time_out <= poll_to);
423
424 if (time_out > poll_to) {
425 dev_err(&adapter->pdev->dev,
426 "Timeout exceeded in %s, aborting dump\n",
427 __func__);
428 return -EINVAL;
429 }
430skip_poll:
431 addr = l2->read_addr;
432 cnt = l2->read_addr_num;
433 while (cnt) {
434 qlcnic_read_dump_reg(addr, base, &data);
435 *buffer++ = cpu_to_le32(data);
436 addr += l2->read_addr_stride;
437 cnt--;
438 }
439 val += l2->stride;
440 }
441 return l2->no_ops * l2->read_addr_num * sizeof(u32);
442}
443
444static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter,
445 struct qlcnic_dump_entry *entry, __le32 *buffer)
446{
447 u32 addr, data, test, ret = 0;
448 int i, reg_read;
449 struct __mem *mem = &entry->region.mem;
450 void __iomem *base = adapter->ahw->pci_base0;
451
452 reg_read = mem->size;
453 addr = mem->addr;
454 /* check for data size of multiple of 16 and 16 byte alignment */
455 if ((addr & 0xf) || (reg_read%16)) {
456 dev_info(&adapter->pdev->dev,
457 "Unaligned memory addr:0x%x size:0x%x\n",
458 addr, reg_read);
459 return -EINVAL;
460 }
461
462 mutex_lock(&adapter->ahw->mem_lock);
463
464 while (reg_read != 0) {
465 qlcnic_write_dump_reg(MIU_TEST_ADDR_LO, base, addr);
466 qlcnic_write_dump_reg(MIU_TEST_ADDR_HI, base, 0);
467 qlcnic_write_dump_reg(MIU_TEST_CTR, base,
468 TA_CTL_ENABLE | TA_CTL_START);
469
470 for (i = 0; i < MAX_CTL_CHECK; i++) {
471 qlcnic_read_dump_reg(MIU_TEST_CTR, base, &test);
472 if (!(test & TA_CTL_BUSY))
473 break;
474 }
475 if (i == MAX_CTL_CHECK) {
476 if (printk_ratelimit()) {
477 dev_err(&adapter->pdev->dev,
478 "failed to read through agent\n");
479 ret = -EINVAL;
480 goto out;
481 }
482 }
483 for (i = 0; i < 4; i++) {
484 qlcnic_read_dump_reg(MIU_TEST_READ_DATA[i], base,
485 &data);
486 *buffer++ = cpu_to_le32(data);
487 }
488 addr += 16;
489 reg_read -= 16;
490 ret += 16;
491 }
492out:
493 mutex_unlock(&adapter->ahw->mem_lock);
494 return mem->size;
495}
496
497static u32 qlcnic_dump_nop(struct qlcnic_adapter *adapter,
498 struct qlcnic_dump_entry *entry, __le32 *buffer)
499{
500 entry->hdr.flags |= QLCNIC_DUMP_SKIP;
501 return 0;
502}
503
504static const struct qlcnic_dump_operations fw_dump_ops[] = {
505 { QLCNIC_DUMP_NOP, qlcnic_dump_nop },
506 { QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb },
507 { QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux },
508 { QLCNIC_DUMP_QUEUE, qlcnic_dump_que },
509 { QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom },
510 { QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm },
511 { QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl },
512 { QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache },
513 { QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache },
514 { QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache },
515 { QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache },
516 { QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache },
517 { QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache },
518 { QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache },
519 { QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache },
520 { QLCNIC_DUMP_READ_ROM, qlcnic_read_rom },
521 { QLCNIC_DUMP_READ_MEM, qlcnic_read_memory },
522 { QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl },
523 { QLCNIC_DUMP_TLHDR, qlcnic_dump_nop },
524 { QLCNIC_DUMP_RDEND, qlcnic_dump_nop },
525};
526
527/* Walk the template and collect dump for each entry in the dump template */
528static int
529qlcnic_valid_dump_entry(struct device *dev, struct qlcnic_dump_entry *entry,
530 u32 size)
531{
532 int ret = 1;
533 if (size != entry->hdr.cap_size) {
534 dev_info(dev,
535 "Invalid dump, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
536 entry->hdr.type, entry->hdr.mask, size, entry->hdr.cap_size);
537 dev_info(dev, "Aborting further dump capture\n");
538 ret = 0;
539 }
540 return ret;
541}
542
543int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
544{
545 __le32 *buffer;
546 char mesg[64];
547 char *msg[] = {mesg, NULL};
548 int i, k, ops_cnt, ops_index, dump_size = 0;
549 u32 entry_offset, dump, no_entries, buf_offset = 0;
550 struct qlcnic_dump_entry *entry;
551 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
552 struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
553
554 if (fw_dump->clr) {
555 dev_info(&adapter->pdev->dev,
556 "Previous dump not cleared, not capturing dump\n");
557 return -EIO;
558 }
559 /* Calculate the size for dump data area only */
560 for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
561 if (i & tmpl_hdr->drv_cap_mask)
562 dump_size += tmpl_hdr->cap_sizes[k];
563 if (!dump_size)
564 return -EIO;
565
566 fw_dump->data = vzalloc(dump_size);
567 if (!fw_dump->data) {
568 dev_info(&adapter->pdev->dev,
569 "Unable to allocate (%d KB) for fw dump\n",
570 dump_size / 1024);
571 return -ENOMEM;
572 }
573 buffer = fw_dump->data;
574 fw_dump->size = dump_size;
575 no_entries = tmpl_hdr->num_entries;
576 ops_cnt = ARRAY_SIZE(fw_dump_ops);
577 entry_offset = tmpl_hdr->offset;
578 tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION;
579 tmpl_hdr->sys_info[1] = adapter->fw_version;
580
581 for (i = 0; i < no_entries; i++) {
582 entry = (void *)tmpl_hdr + entry_offset;
583 if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) {
584 entry->hdr.flags |= QLCNIC_DUMP_SKIP;
585 entry_offset += entry->hdr.offset;
586 continue;
587 }
588 /* Find the handler for this entry */
589 ops_index = 0;
590 while (ops_index < ops_cnt) {
591 if (entry->hdr.type == fw_dump_ops[ops_index].opcode)
592 break;
593 ops_index++;
594 }
595 if (ops_index == ops_cnt) {
596 dev_info(&adapter->pdev->dev,
597 "Invalid entry type %d, exiting dump\n",
598 entry->hdr.type);
599 goto error;
600 }
601 /* Collect dump for this entry */
602 dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
603 if (dump && !qlcnic_valid_dump_entry(&adapter->pdev->dev, entry,
604 dump))
605 entry->hdr.flags |= QLCNIC_DUMP_SKIP;
606 buf_offset += entry->hdr.cap_size;
607 entry_offset += entry->hdr.offset;
608 buffer = fw_dump->data + buf_offset;
609 }
610 if (dump_size != buf_offset) {
611 dev_info(&adapter->pdev->dev,
612 "Captured(%d) and expected size(%d) do not match\n",
613 buf_offset, dump_size);
614 goto error;
615 } else {
616 fw_dump->clr = 1;
617 snprintf(mesg, sizeof(mesg), "FW_DUMP=%s",
618 adapter->netdev->name);
619 dev_info(&adapter->pdev->dev, "Dump data, %d bytes captured\n",
620 fw_dump->size);
621 /* Send a udev event to notify availability of FW dump */
622 kobject_uevent_env(&adapter->pdev->dev.kobj, KOBJ_CHANGE, msg);
623 return 0;
624 }
625error:
626 vfree(fw_dump->data);
627 return -EINVAL;
628}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
deleted file mode 100644
index 341d37c867f..00000000000
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ /dev/null
@@ -1,960 +0,0 @@
1#include <linux/slab.h>
2#include <linux/vmalloc.h>
3#include <linux/interrupt.h>
4
5#include "qlcnic.h"
6
7#include <linux/swab.h>
8#include <linux/dma-mapping.h>
9#include <net/ip.h>
10#include <linux/ipv6.h>
11#include <linux/inetdevice.h>
12#include <linux/sysfs.h>
13#include <linux/aer.h>
14#include <linux/log2.h>
15
16int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
17{
18 return -EOPNOTSUPP;
19}
20
21int qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
22{
23 return -EOPNOTSUPP;
24}
25
26static ssize_t qlcnic_store_bridged_mode(struct device *dev,
27 struct device_attribute *attr,
28 const char *buf, size_t len)
29{
30 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
31 unsigned long new;
32 int ret = -EINVAL;
33
34 if (!(adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_BDG))
35 goto err_out;
36
37 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
38 goto err_out;
39
40 if (strict_strtoul(buf, 2, &new))
41 goto err_out;
42
43 if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
44 ret = len;
45
46err_out:
47 return ret;
48}
49
50static ssize_t qlcnic_show_bridged_mode(struct device *dev,
51 struct device_attribute *attr,
52 char *buf)
53{
54 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
55 int bridged_mode = 0;
56
57 if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_BDG)
58 bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
59
60 return sprintf(buf, "%d\n", bridged_mode);
61}
62
63static ssize_t qlcnic_store_diag_mode(struct device *dev,
64 struct device_attribute *attr,
65 const char *buf, size_t len)
66{
67 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
68 unsigned long new;
69
70 if (strict_strtoul(buf, 2, &new))
71 return -EINVAL;
72
73 if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
74 adapter->flags ^= QLCNIC_DIAG_ENABLED;
75
76 return len;
77}
78
79static ssize_t qlcnic_show_diag_mode(struct device *dev,
80 struct device_attribute *attr, char *buf)
81{
82 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
83
84 return sprintf(buf, "%d\n",
85 !!(adapter->flags & QLCNIC_DIAG_ENABLED));
86}
87
88static int qlcnic_validate_beacon(struct qlcnic_adapter *adapter, u16 beacon,
89 u8 *state, u8 *rate)
90{
91 *rate = LSB(beacon);
92 *state = MSB(beacon);
93
94 QLCDB(adapter, DRV, "rate %x state %x\n", *rate, *state);
95
96 if (!*state) {
97 *rate = __QLCNIC_MAX_LED_RATE;
98 return 0;
99 } else if (*state > __QLCNIC_MAX_LED_STATE) {
100 return -EINVAL;
101 }
102
103 if ((!*rate) || (*rate > __QLCNIC_MAX_LED_RATE))
104 return -EINVAL;
105
106 return 0;
107}
108
109static ssize_t qlcnic_store_beacon(struct device *dev,
110 struct device_attribute *attr,
111 const char *buf, size_t len)
112{
113 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
114 int max_sds_rings = adapter->max_sds_rings;
115 u16 beacon;
116 u8 b_state, b_rate;
117 int err;
118
119 if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
120 dev_warn(dev,
121 "LED test not supported in non privileged mode\n");
122 return -EOPNOTSUPP;
123 }
124
125 if (len != sizeof(u16))
126 return QL_STATUS_INVALID_PARAM;
127
128 memcpy(&beacon, buf, sizeof(u16));
129 err = qlcnic_validate_beacon(adapter, beacon, &b_state, &b_rate);
130 if (err)
131 return err;
132
133 if (adapter->ahw->beacon_state == b_state)
134 return len;
135
136 rtnl_lock();
137
138 if (!adapter->ahw->beacon_state)
139 if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) {
140 rtnl_unlock();
141 return -EBUSY;
142 }
143
144 if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
145 err = -EIO;
146 goto out;
147 }
148
149 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
150 err = qlcnic_diag_alloc_res(adapter->netdev, QLCNIC_LED_TEST);
151 if (err)
152 goto out;
153 set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state);
154 }
155
156 err = qlcnic_config_led(adapter, b_state, b_rate);
157
158 if (!err) {
159 err = len;
160 adapter->ahw->beacon_state = b_state;
161 }
162
163 if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
164 qlcnic_diag_free_res(adapter->netdev, max_sds_rings);
165
166 out:
167 if (!adapter->ahw->beacon_state)
168 clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
169 rtnl_unlock();
170
171 return err;
172}
173
174static ssize_t qlcnic_show_beacon(struct device *dev,
175 struct device_attribute *attr, char *buf)
176{
177 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
178
179 return sprintf(buf, "%d\n", adapter->ahw->beacon_state);
180}
181
182static int qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
183 loff_t offset, size_t size)
184{
185 size_t crb_size = 4;
186
187 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
188 return -EIO;
189
190 if (offset < QLCNIC_PCI_CRBSPACE) {
191 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
192 QLCNIC_PCI_CAMQM_END))
193 crb_size = 8;
194 else
195 return -EINVAL;
196 }
197
198 if ((size != crb_size) || (offset & (crb_size-1)))
199 return -EINVAL;
200
201 return 0;
202}
203
204static ssize_t qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
205 struct bin_attribute *attr, char *buf,
206 loff_t offset, size_t size)
207{
208 struct device *dev = container_of(kobj, struct device, kobj);
209 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
210 u32 data;
211 u64 qmdata;
212 int ret;
213
214 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
215 if (ret != 0)
216 return ret;
217
218 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
219 qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
220 memcpy(buf, &qmdata, size);
221 } else {
222 data = QLCRD32(adapter, offset);
223 memcpy(buf, &data, size);
224 }
225 return size;
226}
227
228static ssize_t qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
229 struct bin_attribute *attr, char *buf,
230 loff_t offset, size_t size)
231{
232 struct device *dev = container_of(kobj, struct device, kobj);
233 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
234 u32 data;
235 u64 qmdata;
236 int ret;
237
238 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
239 if (ret != 0)
240 return ret;
241
242 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
243 memcpy(&qmdata, buf, size);
244 qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
245 } else {
246 memcpy(&data, buf, size);
247 QLCWR32(adapter, offset, data);
248 }
249 return size;
250}
251
252static int qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
253 loff_t offset, size_t size)
254{
255 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
256 return -EIO;
257
258 if ((size != 8) || (offset & 0x7))
259 return -EIO;
260
261 return 0;
262}
263
264static ssize_t qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
265 struct bin_attribute *attr, char *buf,
266 loff_t offset, size_t size)
267{
268 struct device *dev = container_of(kobj, struct device, kobj);
269 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
270 u64 data;
271 int ret;
272
273 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
274 if (ret != 0)
275 return ret;
276
277 if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
278 return -EIO;
279
280 memcpy(buf, &data, size);
281
282 return size;
283}
284
285static ssize_t qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
286 struct bin_attribute *attr, char *buf,
287 loff_t offset, size_t size)
288{
289 struct device *dev = container_of(kobj, struct device, kobj);
290 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
291 u64 data;
292 int ret;
293
294 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
295 if (ret != 0)
296 return ret;
297
298 memcpy(&data, buf, size);
299
300 if (qlcnic_pci_mem_write_2M(adapter, offset, data))
301 return -EIO;
302
303 return size;
304}
305
306static int validate_pm_config(struct qlcnic_adapter *adapter,
307 struct qlcnic_pm_func_cfg *pm_cfg, int count)
308{
309 u8 src_pci_func, s_esw_id, d_esw_id, dest_pci_func;
310 int i;
311
312 for (i = 0; i < count; i++) {
313 src_pci_func = pm_cfg[i].pci_func;
314 dest_pci_func = pm_cfg[i].dest_npar;
315 if (src_pci_func >= QLCNIC_MAX_PCI_FUNC ||
316 dest_pci_func >= QLCNIC_MAX_PCI_FUNC)
317 return QL_STATUS_INVALID_PARAM;
318
319 if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC)
320 return QL_STATUS_INVALID_PARAM;
321
322 if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
323 return QL_STATUS_INVALID_PARAM;
324
325 s_esw_id = adapter->npars[src_pci_func].phy_port;
326 d_esw_id = adapter->npars[dest_pci_func].phy_port;
327
328 if (s_esw_id != d_esw_id)
329 return QL_STATUS_INVALID_PARAM;
330 }
331 return 0;
332
333}
334
335static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
336 struct kobject *kobj,
337 struct bin_attribute *attr,
338 char *buf, loff_t offset,
339 size_t size)
340{
341 struct device *dev = container_of(kobj, struct device, kobj);
342 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
343 struct qlcnic_pm_func_cfg *pm_cfg;
344 u32 id, action, pci_func;
345 int count, rem, i, ret;
346
347 count = size / sizeof(struct qlcnic_pm_func_cfg);
348 rem = size % sizeof(struct qlcnic_pm_func_cfg);
349 if (rem)
350 return QL_STATUS_INVALID_PARAM;
351
352 pm_cfg = (struct qlcnic_pm_func_cfg *)buf;
353
354 ret = validate_pm_config(adapter, pm_cfg, count);
355 if (ret)
356 return ret;
357 for (i = 0; i < count; i++) {
358 pci_func = pm_cfg[i].pci_func;
359 action = !!pm_cfg[i].action;
360 id = adapter->npars[pci_func].phy_port;
361 ret = qlcnic_config_port_mirroring(adapter, id, action,
362 pci_func);
363 if (ret)
364 return ret;
365 }
366
367 for (i = 0; i < count; i++) {
368 pci_func = pm_cfg[i].pci_func;
369 id = adapter->npars[pci_func].phy_port;
370 adapter->npars[pci_func].enable_pm = !!pm_cfg[i].action;
371 adapter->npars[pci_func].dest_npar = id;
372 }
373 return size;
374}
375
376static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
377 struct kobject *kobj,
378 struct bin_attribute *attr,
379 char *buf, loff_t offset,
380 size_t size)
381{
382 struct device *dev = container_of(kobj, struct device, kobj);
383 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
384 struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
385 int i;
386
387 if (size != sizeof(pm_cfg))
388 return QL_STATUS_INVALID_PARAM;
389
390 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
391 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
392 continue;
393 pm_cfg[i].action = adapter->npars[i].enable_pm;
394 pm_cfg[i].dest_npar = 0;
395 pm_cfg[i].pci_func = i;
396 }
397 memcpy(buf, &pm_cfg, size);
398
399 return size;
400}
401
402static int validate_esw_config(struct qlcnic_adapter *adapter,
403 struct qlcnic_esw_func_cfg *esw_cfg, int count)
404{
405 u32 op_mode;
406 u8 pci_func;
407 int i;
408
409 op_mode = readl(adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
410
411 for (i = 0; i < count; i++) {
412 pci_func = esw_cfg[i].pci_func;
413 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
414 return QL_STATUS_INVALID_PARAM;
415
416 if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) {
417 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
418 return QL_STATUS_INVALID_PARAM;
419 }
420
421 switch (esw_cfg[i].op_mode) {
422 case QLCNIC_PORT_DEFAULTS:
423 if (QLC_DEV_GET_DRV(op_mode, pci_func) !=
424 QLCNIC_NON_PRIV_FUNC) {
425 if (esw_cfg[i].mac_anti_spoof != 0)
426 return QL_STATUS_INVALID_PARAM;
427 if (esw_cfg[i].mac_override != 1)
428 return QL_STATUS_INVALID_PARAM;
429 if (esw_cfg[i].promisc_mode != 1)
430 return QL_STATUS_INVALID_PARAM;
431 }
432 break;
433 case QLCNIC_ADD_VLAN:
434 if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
435 return QL_STATUS_INVALID_PARAM;
436 if (!esw_cfg[i].op_type)
437 return QL_STATUS_INVALID_PARAM;
438 break;
439 case QLCNIC_DEL_VLAN:
440 if (!esw_cfg[i].op_type)
441 return QL_STATUS_INVALID_PARAM;
442 break;
443 default:
444 return QL_STATUS_INVALID_PARAM;
445 }
446 }
447 return 0;
448}
449
450static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
451 struct kobject *kobj,
452 struct bin_attribute *attr,
453 char *buf, loff_t offset,
454 size_t size)
455{
456 struct device *dev = container_of(kobj, struct device, kobj);
457 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
458 struct qlcnic_esw_func_cfg *esw_cfg;
459 struct qlcnic_npar_info *npar;
460 int count, rem, i, ret;
461 u8 pci_func, op_mode = 0;
462
463 count = size / sizeof(struct qlcnic_esw_func_cfg);
464 rem = size % sizeof(struct qlcnic_esw_func_cfg);
465 if (rem)
466 return QL_STATUS_INVALID_PARAM;
467
468 esw_cfg = (struct qlcnic_esw_func_cfg *)buf;
469 ret = validate_esw_config(adapter, esw_cfg, count);
470 if (ret)
471 return ret;
472
473 for (i = 0; i < count; i++) {
474 if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) {
475 if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
476 return QL_STATUS_INVALID_PARAM;
477 }
478
479 if (adapter->ahw->pci_func != esw_cfg[i].pci_func)
480 continue;
481
482 op_mode = esw_cfg[i].op_mode;
483 qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]);
484 esw_cfg[i].op_mode = op_mode;
485 esw_cfg[i].pci_func = adapter->ahw->pci_func;
486
487 switch (esw_cfg[i].op_mode) {
488 case QLCNIC_PORT_DEFAULTS:
489 qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]);
490 break;
491 case QLCNIC_ADD_VLAN:
492 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
493 break;
494 case QLCNIC_DEL_VLAN:
495 esw_cfg[i].vlan_id = 0;
496 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
497 break;
498 }
499 }
500
501 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
502 goto out;
503
504 for (i = 0; i < count; i++) {
505 pci_func = esw_cfg[i].pci_func;
506 npar = &adapter->npars[pci_func];
507 switch (esw_cfg[i].op_mode) {
508 case QLCNIC_PORT_DEFAULTS:
509 npar->promisc_mode = esw_cfg[i].promisc_mode;
510 npar->mac_override = esw_cfg[i].mac_override;
511 npar->offload_flags = esw_cfg[i].offload_flags;
512 npar->mac_anti_spoof = esw_cfg[i].mac_anti_spoof;
513 npar->discard_tagged = esw_cfg[i].discard_tagged;
514 break;
515 case QLCNIC_ADD_VLAN:
516 npar->pvid = esw_cfg[i].vlan_id;
517 break;
518 case QLCNIC_DEL_VLAN:
519 npar->pvid = 0;
520 break;
521 }
522 }
523out:
524 return size;
525}
526
527static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
528 struct kobject *kobj,
529 struct bin_attribute *attr,
530 char *buf, loff_t offset,
531 size_t size)
532{
533 struct device *dev = container_of(kobj, struct device, kobj);
534 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
535 struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
536 u8 i;
537
538 if (size != sizeof(esw_cfg))
539 return QL_STATUS_INVALID_PARAM;
540
541 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
542 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
543 continue;
544 esw_cfg[i].pci_func = i;
545 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]))
546 return QL_STATUS_INVALID_PARAM;
547 }
548 memcpy(buf, &esw_cfg, size);
549
550 return size;
551}
552
553static int validate_npar_config(struct qlcnic_adapter *adapter,
554 struct qlcnic_npar_func_cfg *np_cfg,
555 int count)
556{
557 u8 pci_func, i;
558
559 for (i = 0; i < count; i++) {
560 pci_func = np_cfg[i].pci_func;
561 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
562 return QL_STATUS_INVALID_PARAM;
563
564 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
565 return QL_STATUS_INVALID_PARAM;
566
567 if (!IS_VALID_BW(np_cfg[i].min_bw) ||
568 !IS_VALID_BW(np_cfg[i].max_bw))
569 return QL_STATUS_INVALID_PARAM;
570 }
571 return 0;
572}
573
574static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
575 struct kobject *kobj,
576 struct bin_attribute *attr,
577 char *buf, loff_t offset,
578 size_t size)
579{
580 struct device *dev = container_of(kobj, struct device, kobj);
581 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
582 struct qlcnic_info nic_info;
583 struct qlcnic_npar_func_cfg *np_cfg;
584 int i, count, rem, ret;
585 u8 pci_func;
586
587 count = size / sizeof(struct qlcnic_npar_func_cfg);
588 rem = size % sizeof(struct qlcnic_npar_func_cfg);
589 if (rem)
590 return QL_STATUS_INVALID_PARAM;
591
592 np_cfg = (struct qlcnic_npar_func_cfg *)buf;
593 ret = validate_npar_config(adapter, np_cfg, count);
594 if (ret)
595 return ret;
596
597 for (i = 0; i < count ; i++) {
598 pci_func = np_cfg[i].pci_func;
599 ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
600 if (ret)
601 return ret;
602 nic_info.pci_func = pci_func;
603 nic_info.min_tx_bw = np_cfg[i].min_bw;
604 nic_info.max_tx_bw = np_cfg[i].max_bw;
605 ret = qlcnic_set_nic_info(adapter, &nic_info);
606 if (ret)
607 return ret;
608 adapter->npars[i].min_bw = nic_info.min_tx_bw;
609 adapter->npars[i].max_bw = nic_info.max_tx_bw;
610 }
611
612 return size;
613
614}
615
616static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
617 struct kobject *kobj,
618 struct bin_attribute *attr,
619 char *buf, loff_t offset,
620 size_t size)
621{
622 struct device *dev = container_of(kobj, struct device, kobj);
623 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
624 struct qlcnic_info nic_info;
625 struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC];
626 int i, ret;
627
628 if (size != sizeof(np_cfg))
629 return QL_STATUS_INVALID_PARAM;
630
631 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
632 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
633 continue;
634 ret = qlcnic_get_nic_info(adapter, &nic_info, i);
635 if (ret)
636 return ret;
637
638 np_cfg[i].pci_func = i;
639 np_cfg[i].op_mode = (u8)nic_info.op_mode;
640 np_cfg[i].port_num = nic_info.phys_port;
641 np_cfg[i].fw_capab = nic_info.capabilities;
642 np_cfg[i].min_bw = nic_info.min_tx_bw;
643 np_cfg[i].max_bw = nic_info.max_tx_bw;
644 np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
645 np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
646 }
647 memcpy(buf, &np_cfg, size);
648 return size;
649}
650
651static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
652 struct kobject *kobj,
653 struct bin_attribute *attr,
654 char *buf, loff_t offset,
655 size_t size)
656{
657 struct device *dev = container_of(kobj, struct device, kobj);
658 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
659 struct qlcnic_esw_statistics port_stats;
660 int ret;
661
662 if (size != sizeof(struct qlcnic_esw_statistics))
663 return QL_STATUS_INVALID_PARAM;
664
665 if (offset >= QLCNIC_MAX_PCI_FUNC)
666 return QL_STATUS_INVALID_PARAM;
667
668 memset(&port_stats, 0, size);
669 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
670 &port_stats.rx);
671 if (ret)
672 return ret;
673
674 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
675 &port_stats.tx);
676 if (ret)
677 return ret;
678
679 memcpy(buf, &port_stats, size);
680 return size;
681}
682
683static ssize_t qlcnic_sysfs_get_esw_stats(struct file *file,
684 struct kobject *kobj,
685 struct bin_attribute *attr,
686 char *buf, loff_t offset,
687 size_t size)
688{
689 struct device *dev = container_of(kobj, struct device, kobj);
690 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
691 struct qlcnic_esw_statistics esw_stats;
692 int ret;
693
694 if (size != sizeof(struct qlcnic_esw_statistics))
695 return QL_STATUS_INVALID_PARAM;
696
697 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
698 return QL_STATUS_INVALID_PARAM;
699
700 memset(&esw_stats, 0, size);
701 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
702 &esw_stats.rx);
703 if (ret)
704 return ret;
705
706 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
707 &esw_stats.tx);
708 if (ret)
709 return ret;
710
711 memcpy(buf, &esw_stats, size);
712 return size;
713}
714
715static ssize_t qlcnic_sysfs_clear_esw_stats(struct file *file,
716 struct kobject *kobj,
717 struct bin_attribute *attr,
718 char *buf, loff_t offset,
719 size_t size)
720{
721 struct device *dev = container_of(kobj, struct device, kobj);
722 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
723 int ret;
724
725 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
726 return QL_STATUS_INVALID_PARAM;
727
728 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
729 QLCNIC_QUERY_RX_COUNTER);
730 if (ret)
731 return ret;
732
733 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
734 QLCNIC_QUERY_TX_COUNTER);
735 if (ret)
736 return ret;
737
738 return size;
739}
740
741static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file,
742 struct kobject *kobj,
743 struct bin_attribute *attr,
744 char *buf, loff_t offset,
745 size_t size)
746{
747 struct device *dev = container_of(kobj, struct device, kobj);
748 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
749 int ret;
750
751 if (offset >= QLCNIC_MAX_PCI_FUNC)
752 return QL_STATUS_INVALID_PARAM;
753
754 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
755 QLCNIC_QUERY_RX_COUNTER);
756 if (ret)
757 return ret;
758
759 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
760 QLCNIC_QUERY_TX_COUNTER);
761 if (ret)
762 return ret;
763
764 return size;
765}
766
767static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
768 struct kobject *kobj,
769 struct bin_attribute *attr,
770 char *buf, loff_t offset,
771 size_t size)
772{
773 struct device *dev = container_of(kobj, struct device, kobj);
774 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
775 struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
776 struct qlcnic_pci_info *pci_info;
777 int i, ret;
778
779 if (size != sizeof(pci_cfg))
780 return QL_STATUS_INVALID_PARAM;
781
782 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
783 if (!pci_info)
784 return -ENOMEM;
785
786 ret = qlcnic_get_pci_info(adapter, pci_info);
787 if (ret) {
788 kfree(pci_info);
789 return ret;
790 }
791
792 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
793 pci_cfg[i].pci_func = pci_info[i].id;
794 pci_cfg[i].func_type = pci_info[i].type;
795 pci_cfg[i].port_num = pci_info[i].default_port;
796 pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
797 pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
798 memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
799 }
800 memcpy(buf, &pci_cfg, size);
801 kfree(pci_info);
802 return size;
803}
804
805static struct device_attribute dev_attr_bridged_mode = {
806 .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
807 .show = qlcnic_show_bridged_mode,
808 .store = qlcnic_store_bridged_mode,
809};
810
811static struct device_attribute dev_attr_diag_mode = {
812 .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
813 .show = qlcnic_show_diag_mode,
814 .store = qlcnic_store_diag_mode,
815};
816
817static struct device_attribute dev_attr_beacon = {
818 .attr = {.name = "beacon", .mode = (S_IRUGO | S_IWUSR)},
819 .show = qlcnic_show_beacon,
820 .store = qlcnic_store_beacon,
821};
822
823static struct bin_attribute bin_attr_crb = {
824 .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
825 .size = 0,
826 .read = qlcnic_sysfs_read_crb,
827 .write = qlcnic_sysfs_write_crb,
828};
829
830static struct bin_attribute bin_attr_mem = {
831 .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
832 .size = 0,
833 .read = qlcnic_sysfs_read_mem,
834 .write = qlcnic_sysfs_write_mem,
835};
836
837static struct bin_attribute bin_attr_npar_config = {
838 .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
839 .size = 0,
840 .read = qlcnic_sysfs_read_npar_config,
841 .write = qlcnic_sysfs_write_npar_config,
842};
843
844static struct bin_attribute bin_attr_pci_config = {
845 .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)},
846 .size = 0,
847 .read = qlcnic_sysfs_read_pci_config,
848 .write = NULL,
849};
850
851static struct bin_attribute bin_attr_port_stats = {
852 .attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)},
853 .size = 0,
854 .read = qlcnic_sysfs_get_port_stats,
855 .write = qlcnic_sysfs_clear_port_stats,
856};
857
858static struct bin_attribute bin_attr_esw_stats = {
859 .attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)},
860 .size = 0,
861 .read = qlcnic_sysfs_get_esw_stats,
862 .write = qlcnic_sysfs_clear_esw_stats,
863};
864
865static struct bin_attribute bin_attr_esw_config = {
866 .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
867 .size = 0,
868 .read = qlcnic_sysfs_read_esw_config,
869 .write = qlcnic_sysfs_write_esw_config,
870};
871
872static struct bin_attribute bin_attr_pm_config = {
873 .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)},
874 .size = 0,
875 .read = qlcnic_sysfs_read_pm_config,
876 .write = qlcnic_sysfs_write_pm_config,
877};
878
879void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
880{
881 struct device *dev = &adapter->pdev->dev;
882
883 if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_BDG)
884 if (device_create_file(dev, &dev_attr_bridged_mode))
885 dev_warn(dev,
886 "failed to create bridged_mode sysfs entry\n");
887}
888
889void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
890{
891 struct device *dev = &adapter->pdev->dev;
892
893 if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_BDG)
894 device_remove_file(dev, &dev_attr_bridged_mode);
895}
896
897void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
898{
899 struct device *dev = &adapter->pdev->dev;
900 u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
901
902 if (device_create_bin_file(dev, &bin_attr_port_stats))
903 dev_info(dev, "failed to create port stats sysfs entry");
904
905 if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC)
906 return;
907 if (device_create_file(dev, &dev_attr_diag_mode))
908 dev_info(dev, "failed to create diag_mode sysfs entry\n");
909 if (device_create_bin_file(dev, &bin_attr_crb))
910 dev_info(dev, "failed to create crb sysfs entry\n");
911 if (device_create_bin_file(dev, &bin_attr_mem))
912 dev_info(dev, "failed to create mem sysfs entry\n");
913
914 if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
915 return;
916
917 if (device_create_bin_file(dev, &bin_attr_pci_config))
918 dev_info(dev, "failed to create pci config sysfs entry");
919 if (device_create_file(dev, &dev_attr_beacon))
920 dev_info(dev, "failed to create beacon sysfs entry");
921
922 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
923 return;
924 if (device_create_bin_file(dev, &bin_attr_esw_config))
925 dev_info(dev, "failed to create esw config sysfs entry");
926 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
927 return;
928 if (device_create_bin_file(dev, &bin_attr_npar_config))
929 dev_info(dev, "failed to create npar config sysfs entry");
930 if (device_create_bin_file(dev, &bin_attr_pm_config))
931 dev_info(dev, "failed to create pm config sysfs entry");
932 if (device_create_bin_file(dev, &bin_attr_esw_stats))
933 dev_info(dev, "failed to create eswitch stats sysfs entry");
934}
935
936void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
937{
938 struct device *dev = &adapter->pdev->dev;
939 u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
940
941 device_remove_bin_file(dev, &bin_attr_port_stats);
942
943 if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC)
944 return;
945 device_remove_file(dev, &dev_attr_diag_mode);
946 device_remove_bin_file(dev, &bin_attr_crb);
947 device_remove_bin_file(dev, &bin_attr_mem);
948 if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
949 return;
950 device_remove_bin_file(dev, &bin_attr_pci_config);
951 device_remove_file(dev, &dev_attr_beacon);
952 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
953 return;
954 device_remove_bin_file(dev, &bin_attr_esw_config);
955 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
956 return;
957 device_remove_bin_file(dev, &bin_attr_npar_config);
958 device_remove_bin_file(dev, &bin_attr_pm_config);
959 device_remove_bin_file(dev, &bin_attr_esw_stats);
960}