aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/qlcnic
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/qlcnic')
-rw-r--r--drivers/net/qlcnic/Makefile8
-rw-r--r--drivers/net/qlcnic/qlcnic.h1553
-rw-r--r--drivers/net/qlcnic/qlcnic_ctx.c1117
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c1222
-rw-r--r--drivers/net/qlcnic/qlcnic_hdr.h1023
-rw-r--r--drivers/net/qlcnic/qlcnic_hw.c1787
-rw-r--r--drivers/net/qlcnic/qlcnic_init.c1897
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c4366
8 files changed, 12973 insertions, 0 deletions
diff --git a/drivers/net/qlcnic/Makefile b/drivers/net/qlcnic/Makefile
new file mode 100644
index 00000000000..ddba83ef3f4
--- /dev/null
+++ b/drivers/net/qlcnic/Makefile
@@ -0,0 +1,8 @@
1#
2# Makefile for Qlogic 1G/10G Ethernet Driver for CNA devices
3#
4
5obj-$(CONFIG_QLCNIC) := qlcnic.o
6
7qlcnic-y := qlcnic_hw.o qlcnic_main.o qlcnic_init.o \
8 qlcnic_ethtool.o qlcnic_ctx.o
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
new file mode 100644
index 00000000000..baf646d98fa
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -0,0 +1,1553 @@
1/*
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2010 QLogic Corporation
4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
6 */
7
8#ifndef _QLCNIC_H_
9#define _QLCNIC_H_
10
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/types.h>
14#include <linux/ioport.h>
15#include <linux/pci.h>
16#include <linux/netdevice.h>
17#include <linux/etherdevice.h>
18#include <linux/ip.h>
19#include <linux/in.h>
20#include <linux/tcp.h>
21#include <linux/skbuff.h>
22#include <linux/firmware.h>
23
24#include <linux/ethtool.h>
25#include <linux/mii.h>
26#include <linux/timer.h>
27
28#include <linux/vmalloc.h>
29
30#include <linux/io.h>
31#include <asm/byteorder.h>
32#include <linux/bitops.h>
33#include <linux/if_vlan.h>
34
35#include "qlcnic_hdr.h"
36
37#define _QLCNIC_LINUX_MAJOR 5
38#define _QLCNIC_LINUX_MINOR 0
39#define _QLCNIC_LINUX_SUBVERSION 21
40#define QLCNIC_LINUX_VERSIONID "5.0.21"
41#define QLCNIC_DRV_IDC_VER 0x01
42#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
43 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
44
45#define QLCNIC_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
46#define _major(v) (((v) >> 24) & 0xff)
47#define _minor(v) (((v) >> 16) & 0xff)
48#define _build(v) ((v) & 0xffff)
49
50/* version in image has weird encoding:
51 * 7:0 - major
52 * 15:8 - minor
53 * 31:16 - build (little endian)
54 */
55#define QLCNIC_DECODE_VERSION(v) \
56 QLCNIC_VERSION_CODE(((v) & 0xff), (((v) >> 8) & 0xff), ((v) >> 16))
57
58#define QLCNIC_MIN_FW_VERSION QLCNIC_VERSION_CODE(4, 4, 2)
59#define QLCNIC_NUM_FLASH_SECTORS (64)
60#define QLCNIC_FLASH_SECTOR_SIZE (64 * 1024)
61#define QLCNIC_FLASH_TOTAL_SIZE (QLCNIC_NUM_FLASH_SECTORS \
62 * QLCNIC_FLASH_SECTOR_SIZE)
63
64#define RCV_DESC_RINGSIZE(rds_ring) \
65 (sizeof(struct rcv_desc) * (rds_ring)->num_desc)
66#define RCV_BUFF_RINGSIZE(rds_ring) \
67 (sizeof(struct qlcnic_rx_buffer) * rds_ring->num_desc)
68#define STATUS_DESC_RINGSIZE(sds_ring) \
69 (sizeof(struct status_desc) * (sds_ring)->num_desc)
70#define TX_BUFF_RINGSIZE(tx_ring) \
71 (sizeof(struct qlcnic_cmd_buffer) * tx_ring->num_desc)
72#define TX_DESC_RINGSIZE(tx_ring) \
73 (sizeof(struct cmd_desc_type0) * tx_ring->num_desc)
74
75#define QLCNIC_P3P_A0 0x50
76
77#define QLCNIC_IS_REVISION_P3P(REVISION) (REVISION >= QLCNIC_P3P_A0)
78
79#define FIRST_PAGE_GROUP_START 0
80#define FIRST_PAGE_GROUP_END 0x100000
81
82#define P3P_MAX_MTU (9600)
83#define P3P_MIN_MTU (68)
84#define QLCNIC_MAX_ETHERHDR 32 /* This contains some padding */
85
86#define QLCNIC_P3P_RX_BUF_MAX_LEN (QLCNIC_MAX_ETHERHDR + ETH_DATA_LEN)
87#define QLCNIC_P3P_RX_JUMBO_BUF_MAX_LEN (QLCNIC_MAX_ETHERHDR + P3P_MAX_MTU)
88#define QLCNIC_CT_DEFAULT_RX_BUF_LEN 2048
89#define QLCNIC_LRO_BUFFER_EXTRA 2048
90
91/* Opcodes to be used with the commands */
92#define TX_ETHER_PKT 0x01
93#define TX_TCP_PKT 0x02
94#define TX_UDP_PKT 0x03
95#define TX_IP_PKT 0x04
96#define TX_TCP_LSO 0x05
97#define TX_TCP_LSO6 0x06
98#define TX_TCPV6_PKT 0x0b
99#define TX_UDPV6_PKT 0x0c
100
101/* Tx defines */
102#define QLCNIC_MAX_FRAGS_PER_TX 14
103#define MAX_TSO_HEADER_DESC 2
104#define MGMT_CMD_DESC_RESV 4
105#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
106 + MGMT_CMD_DESC_RESV)
107#define QLCNIC_MAX_TX_TIMEOUTS 2
108
109/*
110 * Following are the states of the Phantom. Phantom will set them and
111 * Host will read to check if the fields are correct.
112 */
113#define PHAN_INITIALIZE_FAILED 0xffff
114#define PHAN_INITIALIZE_COMPLETE 0xff01
115
116/* Host writes the following to notify that it has done the init-handshake */
117#define PHAN_INITIALIZE_ACK 0xf00f
118#define PHAN_PEG_RCV_INITIALIZED 0xff01
119
120#define NUM_RCV_DESC_RINGS 3
121
122#define RCV_RING_NORMAL 0
123#define RCV_RING_JUMBO 1
124
125#define MIN_CMD_DESCRIPTORS 64
126#define MIN_RCV_DESCRIPTORS 64
127#define MIN_JUMBO_DESCRIPTORS 32
128
129#define MAX_CMD_DESCRIPTORS 1024
130#define MAX_RCV_DESCRIPTORS_1G 4096
131#define MAX_RCV_DESCRIPTORS_10G 8192
132#define MAX_RCV_DESCRIPTORS_VF 2048
133#define MAX_JUMBO_RCV_DESCRIPTORS_1G 512
134#define MAX_JUMBO_RCV_DESCRIPTORS_10G 1024
135
136#define DEFAULT_RCV_DESCRIPTORS_1G 2048
137#define DEFAULT_RCV_DESCRIPTORS_10G 4096
138#define DEFAULT_RCV_DESCRIPTORS_VF 1024
139#define MAX_RDS_RINGS 2
140
141#define get_next_index(index, length) \
142 (((index) + 1) & ((length) - 1))
143
144/*
145 * Following data structures describe the descriptors that will be used.
146 * Added fileds of tcpHdrSize and ipHdrSize, The driver needs to do it only when
147 * we are doing LSO (above the 1500 size packet) only.
148 */
149
150#define FLAGS_VLAN_TAGGED 0x10
151#define FLAGS_VLAN_OOB 0x40
152
153#define qlcnic_set_tx_vlan_tci(cmd_desc, v) \
154 (cmd_desc)->vlan_TCI = cpu_to_le16(v);
155#define qlcnic_set_cmd_desc_port(cmd_desc, var) \
156 ((cmd_desc)->port_ctxid |= ((var) & 0x0F))
157#define qlcnic_set_cmd_desc_ctxid(cmd_desc, var) \
158 ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
159
160#define qlcnic_set_tx_port(_desc, _port) \
161 ((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0))
162
163#define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \
164 ((_desc)->flags_opcode |= \
165 cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)))
166
167#define qlcnic_set_tx_frags_len(_desc, _frags, _len) \
168 ((_desc)->nfrags__length = \
169 cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8)))
170
171struct cmd_desc_type0 {
172 u8 tcp_hdr_offset; /* For LSO only */
173 u8 ip_hdr_offset; /* For LSO only */
174 __le16 flags_opcode; /* 15:13 unused, 12:7 opcode, 6:0 flags */
175 __le32 nfrags__length; /* 31:8 total len, 7:0 frag count */
176
177 __le64 addr_buffer2;
178
179 __le16 reference_handle;
180 __le16 mss;
181 u8 port_ctxid; /* 7:4 ctxid 3:0 port */
182 u8 total_hdr_length; /* LSO only : MAC+IP+TCP Hdr size */
183 __le16 conn_id; /* IPSec offoad only */
184
185 __le64 addr_buffer3;
186 __le64 addr_buffer1;
187
188 __le16 buffer_length[4];
189
190 __le64 addr_buffer4;
191
192 u8 eth_addr[ETH_ALEN];
193 __le16 vlan_TCI;
194
195} __attribute__ ((aligned(64)));
196
197/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */
198struct rcv_desc {
199 __le16 reference_handle;
200 __le16 reserved;
201 __le32 buffer_length; /* allocated buffer length (usually 2K) */
202 __le64 addr_buffer;
203} __packed;
204
205/* opcode field in status_desc */
206#define QLCNIC_SYN_OFFLOAD 0x03
207#define QLCNIC_RXPKT_DESC 0x04
208#define QLCNIC_OLD_RXPKT_DESC 0x3f
209#define QLCNIC_RESPONSE_DESC 0x05
210#define QLCNIC_LRO_DESC 0x12
211
212/* for status field in status_desc */
213#define STATUS_CKSUM_LOOP 0
214#define STATUS_CKSUM_OK 2
215
216/* owner bits of status_desc */
217#define STATUS_OWNER_HOST (0x1ULL << 56)
218#define STATUS_OWNER_PHANTOM (0x2ULL << 56)
219
220/* Status descriptor:
221 0-3 port, 4-7 status, 8-11 type, 12-27 total_length
222 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
223 53-55 desc_cnt, 56-57 owner, 58-63 opcode
224 */
225#define qlcnic_get_sts_port(sts_data) \
226 ((sts_data) & 0x0F)
227#define qlcnic_get_sts_status(sts_data) \
228 (((sts_data) >> 4) & 0x0F)
229#define qlcnic_get_sts_type(sts_data) \
230 (((sts_data) >> 8) & 0x0F)
231#define qlcnic_get_sts_totallength(sts_data) \
232 (((sts_data) >> 12) & 0xFFFF)
233#define qlcnic_get_sts_refhandle(sts_data) \
234 (((sts_data) >> 28) & 0xFFFF)
235#define qlcnic_get_sts_prot(sts_data) \
236 (((sts_data) >> 44) & 0x0F)
237#define qlcnic_get_sts_pkt_offset(sts_data) \
238 (((sts_data) >> 48) & 0x1F)
239#define qlcnic_get_sts_desc_cnt(sts_data) \
240 (((sts_data) >> 53) & 0x7)
241#define qlcnic_get_sts_opcode(sts_data) \
242 (((sts_data) >> 58) & 0x03F)
243
244#define qlcnic_get_lro_sts_refhandle(sts_data) \
245 ((sts_data) & 0x0FFFF)
246#define qlcnic_get_lro_sts_length(sts_data) \
247 (((sts_data) >> 16) & 0x0FFFF)
248#define qlcnic_get_lro_sts_l2_hdr_offset(sts_data) \
249 (((sts_data) >> 32) & 0x0FF)
250#define qlcnic_get_lro_sts_l4_hdr_offset(sts_data) \
251 (((sts_data) >> 40) & 0x0FF)
252#define qlcnic_get_lro_sts_timestamp(sts_data) \
253 (((sts_data) >> 48) & 0x1)
254#define qlcnic_get_lro_sts_type(sts_data) \
255 (((sts_data) >> 49) & 0x7)
256#define qlcnic_get_lro_sts_push_flag(sts_data) \
257 (((sts_data) >> 52) & 0x1)
258#define qlcnic_get_lro_sts_seq_number(sts_data) \
259 ((sts_data) & 0x0FFFFFFFF)
260
261
262struct status_desc {
263 __le64 status_desc_data[2];
264} __attribute__ ((aligned(16)));
265
266/* UNIFIED ROMIMAGE */
267#define QLCNIC_UNI_FW_MIN_SIZE 0xc8000
268#define QLCNIC_UNI_DIR_SECT_PRODUCT_TBL 0x0
269#define QLCNIC_UNI_DIR_SECT_BOOTLD 0x6
270#define QLCNIC_UNI_DIR_SECT_FW 0x7
271
272/*Offsets */
273#define QLCNIC_UNI_CHIP_REV_OFF 10
274#define QLCNIC_UNI_FLAGS_OFF 11
275#define QLCNIC_UNI_BIOS_VERSION_OFF 12
276#define QLCNIC_UNI_BOOTLD_IDX_OFF 27
277#define QLCNIC_UNI_FIRMWARE_IDX_OFF 29
278
279struct uni_table_desc{
280 u32 findex;
281 u32 num_entries;
282 u32 entry_size;
283 u32 reserved[5];
284};
285
286struct uni_data_desc{
287 u32 findex;
288 u32 size;
289 u32 reserved[5];
290};
291
292/* Flash Defines and Structures */
293#define QLCNIC_FLT_LOCATION 0x3F1000
294#define QLCNIC_FW_IMAGE_REGION 0x74
295#define QLCNIC_BOOTLD_REGION 0X72
296struct qlcnic_flt_header {
297 u16 version;
298 u16 len;
299 u16 checksum;
300 u16 reserved;
301};
302
303struct qlcnic_flt_entry {
304 u8 region;
305 u8 reserved0;
306 u8 attrib;
307 u8 reserved1;
308 u32 size;
309 u32 start_addr;
310 u32 end_addr;
311};
312
313/* Magic number to let user know flash is programmed */
314#define QLCNIC_BDINFO_MAGIC 0x12345678
315
316#define QLCNIC_BRDTYPE_P3P_REF_QG 0x0021
317#define QLCNIC_BRDTYPE_P3P_HMEZ 0x0022
318#define QLCNIC_BRDTYPE_P3P_10G_CX4_LP 0x0023
319#define QLCNIC_BRDTYPE_P3P_4_GB 0x0024
320#define QLCNIC_BRDTYPE_P3P_IMEZ 0x0025
321#define QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS 0x0026
322#define QLCNIC_BRDTYPE_P3P_10000_BASE_T 0x0027
323#define QLCNIC_BRDTYPE_P3P_XG_LOM 0x0028
324#define QLCNIC_BRDTYPE_P3P_4_GB_MM 0x0029
325#define QLCNIC_BRDTYPE_P3P_10G_SFP_CT 0x002a
326#define QLCNIC_BRDTYPE_P3P_10G_SFP_QT 0x002b
327#define QLCNIC_BRDTYPE_P3P_10G_CX4 0x0031
328#define QLCNIC_BRDTYPE_P3P_10G_XFP 0x0032
329#define QLCNIC_BRDTYPE_P3P_10G_TP 0x0080
330
331#define QLCNIC_MSIX_TABLE_OFFSET 0x44
332
333/* Flash memory map */
334#define QLCNIC_BRDCFG_START 0x4000 /* board config */
335#define QLCNIC_BOOTLD_START 0x10000 /* bootld */
336#define QLCNIC_IMAGE_START 0x43000 /* compressed image */
337#define QLCNIC_USER_START 0x3E8000 /* Firmare info */
338
339#define QLCNIC_FW_VERSION_OFFSET (QLCNIC_USER_START+0x408)
340#define QLCNIC_FW_SIZE_OFFSET (QLCNIC_USER_START+0x40c)
341#define QLCNIC_FW_SERIAL_NUM_OFFSET (QLCNIC_USER_START+0x81c)
342#define QLCNIC_BIOS_VERSION_OFFSET (QLCNIC_USER_START+0x83c)
343
344#define QLCNIC_BRDTYPE_OFFSET (QLCNIC_BRDCFG_START+0x8)
345#define QLCNIC_FW_MAGIC_OFFSET (QLCNIC_BRDCFG_START+0x128)
346
347#define QLCNIC_FW_MIN_SIZE (0x3fffff)
348#define QLCNIC_UNIFIED_ROMIMAGE 0
349#define QLCNIC_FLASH_ROMIMAGE 1
350#define QLCNIC_UNKNOWN_ROMIMAGE 0xff
351
352#define QLCNIC_UNIFIED_ROMIMAGE_NAME "phanfw.bin"
353#define QLCNIC_FLASH_ROMIMAGE_NAME "flash"
354
355extern char qlcnic_driver_name[];
356
357/* Number of status descriptors to handle per interrupt */
358#define MAX_STATUS_HANDLE (64)
359
360/*
361 * qlcnic_skb_frag{} is to contain mapping info for each SG list. This
362 * has to be freed when DMA is complete. This is part of qlcnic_tx_buffer{}.
363 */
364struct qlcnic_skb_frag {
365 u64 dma;
366 u64 length;
367};
368
369/* Following defines are for the state of the buffers */
370#define QLCNIC_BUFFER_FREE 0
371#define QLCNIC_BUFFER_BUSY 1
372
373/*
374 * There will be one qlcnic_buffer per skb packet. These will be
375 * used to save the dma info for pci_unmap_page()
376 */
377struct qlcnic_cmd_buffer {
378 struct sk_buff *skb;
379 struct qlcnic_skb_frag frag_array[MAX_SKB_FRAGS + 1];
380 u32 frag_count;
381};
382
383/* In rx_buffer, we do not need multiple fragments as is a single buffer */
384struct qlcnic_rx_buffer {
385 u16 ref_handle;
386 struct sk_buff *skb;
387 struct list_head list;
388 u64 dma;
389};
390
391/* Board types */
392#define QLCNIC_GBE 0x01
393#define QLCNIC_XGBE 0x02
394
395/*
396 * Interrupt coalescing defaults. The defaults are for 1500 MTU. It is
397 * adjusted based on configured MTU.
398 */
399#define QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US 3
400#define QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS 256
401
402#define QLCNIC_INTR_DEFAULT 0x04
403#define QLCNIC_CONFIG_INTR_COALESCE 3
404
405struct qlcnic_nic_intr_coalesce {
406 u8 type;
407 u8 sts_ring_mask;
408 u16 rx_packets;
409 u16 rx_time_us;
410 u16 flag;
411 u32 timer_out;
412};
413
414struct qlcnic_dump_template_hdr {
415 __le32 type;
416 __le32 offset;
417 __le32 size;
418 __le32 cap_mask;
419 __le32 num_entries;
420 __le32 version;
421 __le32 timestamp;
422 __le32 checksum;
423 __le32 drv_cap_mask;
424 __le32 sys_info[3];
425 __le32 saved_state[16];
426 __le32 cap_sizes[8];
427 __le32 rsvd[0];
428};
429
430struct qlcnic_fw_dump {
431 u8 clr; /* flag to indicate if dump is cleared */
432 u8 enable; /* enable/disable dump */
433 u32 size; /* total size of the dump */
434 void *data; /* dump data area */
435 struct qlcnic_dump_template_hdr *tmpl_hdr;
436};
437
438/*
439 * One hardware_context{} per adapter
440 * contains interrupt info as well shared hardware info.
441 */
442struct qlcnic_hardware_context {
443 void __iomem *pci_base0;
444 void __iomem *ocm_win_crb;
445
446 unsigned long pci_len0;
447
448 rwlock_t crb_lock;
449 struct mutex mem_lock;
450
451 u8 revision_id;
452 u8 pci_func;
453 u8 linkup;
454 u8 loopback_state;
455 u16 port_type;
456 u16 board_type;
457
458 struct qlcnic_nic_intr_coalesce coal;
459 struct qlcnic_fw_dump fw_dump;
460};
461
462struct qlcnic_adapter_stats {
463 u64 xmitcalled;
464 u64 xmitfinished;
465 u64 rxdropped;
466 u64 txdropped;
467 u64 csummed;
468 u64 rx_pkts;
469 u64 lro_pkts;
470 u64 rxbytes;
471 u64 txbytes;
472 u64 lrobytes;
473 u64 lso_frames;
474 u64 xmit_on;
475 u64 xmit_off;
476 u64 skb_alloc_failure;
477 u64 null_rxbuf;
478 u64 rx_dma_map_error;
479 u64 tx_dma_map_error;
480};
481
482/*
483 * Rcv Descriptor Context. One such per Rcv Descriptor. There may
484 * be one Rcv Descriptor for normal packets, one for jumbo and may be others.
485 */
486struct qlcnic_host_rds_ring {
487 void __iomem *crb_rcv_producer;
488 struct rcv_desc *desc_head;
489 struct qlcnic_rx_buffer *rx_buf_arr;
490 u32 num_desc;
491 u32 producer;
492 u32 dma_size;
493 u32 skb_size;
494 u32 flags;
495 struct list_head free_list;
496 spinlock_t lock;
497 dma_addr_t phys_addr;
498} ____cacheline_internodealigned_in_smp;
499
500struct qlcnic_host_sds_ring {
501 u32 consumer;
502 u32 num_desc;
503 void __iomem *crb_sts_consumer;
504
505 struct status_desc *desc_head;
506 struct qlcnic_adapter *adapter;
507 struct napi_struct napi;
508 struct list_head free_list[NUM_RCV_DESC_RINGS];
509
510 void __iomem *crb_intr_mask;
511 int irq;
512
513 dma_addr_t phys_addr;
514 char name[IFNAMSIZ+4];
515} ____cacheline_internodealigned_in_smp;
516
517struct qlcnic_host_tx_ring {
518 u32 producer;
519 u32 sw_consumer;
520 u32 num_desc;
521 void __iomem *crb_cmd_producer;
522 struct cmd_desc_type0 *desc_head;
523 struct qlcnic_cmd_buffer *cmd_buf_arr;
524 __le32 *hw_consumer;
525
526 dma_addr_t phys_addr;
527 dma_addr_t hw_cons_phys_addr;
528 struct netdev_queue *txq;
529} ____cacheline_internodealigned_in_smp;
530
531/*
532 * Receive context. There is one such structure per instance of the
533 * receive processing. Any state information that is relevant to
534 * the receive, and is must be in this structure. The global data may be
535 * present elsewhere.
536 */
537struct qlcnic_recv_context {
538 struct qlcnic_host_rds_ring *rds_rings;
539 struct qlcnic_host_sds_ring *sds_rings;
540 u32 state;
541 u16 context_id;
542 u16 virt_port;
543
544};
545
546/* HW context creation */
547
548#define QLCNIC_OS_CRB_RETRY_COUNT 4000
549#define QLCNIC_CDRP_SIGNATURE_MAKE(pcifn, version) \
550 (((pcifn) & 0xff) | (((version) & 0xff) << 8) | (0xcafe << 16))
551
552#define QLCNIC_CDRP_CMD_BIT 0x80000000
553
554/*
555 * All responses must have the QLCNIC_CDRP_CMD_BIT cleared
556 * in the crb QLCNIC_CDRP_CRB_OFFSET.
557 */
558#define QLCNIC_CDRP_FORM_RSP(rsp) (rsp)
559#define QLCNIC_CDRP_IS_RSP(rsp) (((rsp) & QLCNIC_CDRP_CMD_BIT) == 0)
560
561#define QLCNIC_CDRP_RSP_OK 0x00000001
562#define QLCNIC_CDRP_RSP_FAIL 0x00000002
563#define QLCNIC_CDRP_RSP_TIMEOUT 0x00000003
564
565/*
566 * All commands must have the QLCNIC_CDRP_CMD_BIT set in
567 * the crb QLCNIC_CDRP_CRB_OFFSET.
568 */
569#define QLCNIC_CDRP_FORM_CMD(cmd) (QLCNIC_CDRP_CMD_BIT | (cmd))
570#define QLCNIC_CDRP_IS_CMD(cmd) (((cmd) & QLCNIC_CDRP_CMD_BIT) != 0)
571
572#define QLCNIC_CDRP_CMD_SUBMIT_CAPABILITIES 0x00000001
573#define QLCNIC_CDRP_CMD_READ_MAX_RDS_PER_CTX 0x00000002
574#define QLCNIC_CDRP_CMD_READ_MAX_SDS_PER_CTX 0x00000003
575#define QLCNIC_CDRP_CMD_READ_MAX_RULES_PER_CTX 0x00000004
576#define QLCNIC_CDRP_CMD_READ_MAX_RX_CTX 0x00000005
577#define QLCNIC_CDRP_CMD_READ_MAX_TX_CTX 0x00000006
578#define QLCNIC_CDRP_CMD_CREATE_RX_CTX 0x00000007
579#define QLCNIC_CDRP_CMD_DESTROY_RX_CTX 0x00000008
580#define QLCNIC_CDRP_CMD_CREATE_TX_CTX 0x00000009
581#define QLCNIC_CDRP_CMD_DESTROY_TX_CTX 0x0000000a
582#define QLCNIC_CDRP_CMD_SET_MTU 0x00000012
583#define QLCNIC_CDRP_CMD_READ_PHY 0x00000013
584#define QLCNIC_CDRP_CMD_WRITE_PHY 0x00000014
585#define QLCNIC_CDRP_CMD_READ_HW_REG 0x00000015
586#define QLCNIC_CDRP_CMD_GET_FLOW_CTL 0x00000016
587#define QLCNIC_CDRP_CMD_SET_FLOW_CTL 0x00000017
588#define QLCNIC_CDRP_CMD_READ_MAX_MTU 0x00000018
589#define QLCNIC_CDRP_CMD_READ_MAX_LRO 0x00000019
590#define QLCNIC_CDRP_CMD_MAC_ADDRESS 0x0000001f
591
592#define QLCNIC_CDRP_CMD_GET_PCI_INFO 0x00000020
593#define QLCNIC_CDRP_CMD_GET_NIC_INFO 0x00000021
594#define QLCNIC_CDRP_CMD_SET_NIC_INFO 0x00000022
595#define QLCNIC_CDRP_CMD_GET_ESWITCH_CAPABILITY 0x00000024
596#define QLCNIC_CDRP_CMD_TOGGLE_ESWITCH 0x00000025
597#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATUS 0x00000026
598#define QLCNIC_CDRP_CMD_SET_PORTMIRRORING 0x00000027
599#define QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH 0x00000028
600#define QLCNIC_CDRP_CMD_GET_ESWITCH_PORT_CONFIG 0x00000029
601#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATS 0x0000002a
602#define QLCNIC_CDRP_CMD_CONFIG_PORT 0x0000002E
603#define QLCNIC_CDRP_CMD_TEMP_SIZE 0x0000002f
604#define QLCNIC_CDRP_CMD_GET_TEMP_HDR 0x00000030
605
606#define QLCNIC_RCODE_SUCCESS 0
607#define QLCNIC_RCODE_NOT_SUPPORTED 9
608#define QLCNIC_RCODE_TIMEOUT 17
609#define QLCNIC_DESTROY_CTX_RESET 0
610
611/*
612 * Capabilities Announced
613 */
614#define QLCNIC_CAP0_LEGACY_CONTEXT (1)
615#define QLCNIC_CAP0_LEGACY_MN (1 << 2)
616#define QLCNIC_CAP0_LSO (1 << 6)
617#define QLCNIC_CAP0_JUMBO_CONTIGUOUS (1 << 7)
618#define QLCNIC_CAP0_LRO_CONTIGUOUS (1 << 8)
619#define QLCNIC_CAP0_VALIDOFF (1 << 11)
620
621/*
622 * Context state
623 */
624#define QLCNIC_HOST_CTX_STATE_FREED 0
625#define QLCNIC_HOST_CTX_STATE_ACTIVE 2
626
627/*
628 * Rx context
629 */
630
631struct qlcnic_hostrq_sds_ring {
632 __le64 host_phys_addr; /* Ring base addr */
633 __le32 ring_size; /* Ring entries */
634 __le16 msi_index;
635 __le16 rsvd; /* Padding */
636} __packed;
637
638struct qlcnic_hostrq_rds_ring {
639 __le64 host_phys_addr; /* Ring base addr */
640 __le64 buff_size; /* Packet buffer size */
641 __le32 ring_size; /* Ring entries */
642 __le32 ring_kind; /* Class of ring */
643} __packed;
644
645struct qlcnic_hostrq_rx_ctx {
646 __le64 host_rsp_dma_addr; /* Response dma'd here */
647 __le32 capabilities[4]; /* Flag bit vector */
648 __le32 host_int_crb_mode; /* Interrupt crb usage */
649 __le32 host_rds_crb_mode; /* RDS crb usage */
650 /* These ring offsets are relative to data[0] below */
651 __le32 rds_ring_offset; /* Offset to RDS config */
652 __le32 sds_ring_offset; /* Offset to SDS config */
653 __le16 num_rds_rings; /* Count of RDS rings */
654 __le16 num_sds_rings; /* Count of SDS rings */
655 __le16 valid_field_offset;
656 u8 txrx_sds_binding;
657 u8 msix_handler;
658 u8 reserved[128]; /* reserve space for future expansion*/
659 /* MUST BE 64-bit aligned.
660 The following is packed:
661 - N hostrq_rds_rings
662 - N hostrq_sds_rings */
663 char data[0];
664} __packed;
665
666struct qlcnic_cardrsp_rds_ring{
667 __le32 host_producer_crb; /* Crb to use */
668 __le32 rsvd1; /* Padding */
669} __packed;
670
671struct qlcnic_cardrsp_sds_ring {
672 __le32 host_consumer_crb; /* Crb to use */
673 __le32 interrupt_crb; /* Crb to use */
674} __packed;
675
676struct qlcnic_cardrsp_rx_ctx {
677 /* These ring offsets are relative to data[0] below */
678 __le32 rds_ring_offset; /* Offset to RDS config */
679 __le32 sds_ring_offset; /* Offset to SDS config */
680 __le32 host_ctx_state; /* Starting State */
681 __le32 num_fn_per_port; /* How many PCI fn share the port */
682 __le16 num_rds_rings; /* Count of RDS rings */
683 __le16 num_sds_rings; /* Count of SDS rings */
684 __le16 context_id; /* Handle for context */
685 u8 phys_port; /* Physical id of port */
686 u8 virt_port; /* Virtual/Logical id of port */
687 u8 reserved[128]; /* save space for future expansion */
688 /* MUST BE 64-bit aligned.
689 The following is packed:
690 - N cardrsp_rds_rings
691 - N cardrs_sds_rings */
692 char data[0];
693} __packed;
694
695#define SIZEOF_HOSTRQ_RX(HOSTRQ_RX, rds_rings, sds_rings) \
696 (sizeof(HOSTRQ_RX) + \
697 (rds_rings)*(sizeof(struct qlcnic_hostrq_rds_ring)) + \
698 (sds_rings)*(sizeof(struct qlcnic_hostrq_sds_ring)))
699
700#define SIZEOF_CARDRSP_RX(CARDRSP_RX, rds_rings, sds_rings) \
701 (sizeof(CARDRSP_RX) + \
702 (rds_rings)*(sizeof(struct qlcnic_cardrsp_rds_ring)) + \
703 (sds_rings)*(sizeof(struct qlcnic_cardrsp_sds_ring)))
704
705/*
706 * Tx context
707 */
708
709struct qlcnic_hostrq_cds_ring {
710 __le64 host_phys_addr; /* Ring base addr */
711 __le32 ring_size; /* Ring entries */
712 __le32 rsvd; /* Padding */
713} __packed;
714
715struct qlcnic_hostrq_tx_ctx {
716 __le64 host_rsp_dma_addr; /* Response dma'd here */
717 __le64 cmd_cons_dma_addr; /* */
718 __le64 dummy_dma_addr; /* */
719 __le32 capabilities[4]; /* Flag bit vector */
720 __le32 host_int_crb_mode; /* Interrupt crb usage */
721 __le32 rsvd1; /* Padding */
722 __le16 rsvd2; /* Padding */
723 __le16 interrupt_ctl;
724 __le16 msi_index;
725 __le16 rsvd3; /* Padding */
726 struct qlcnic_hostrq_cds_ring cds_ring; /* Desc of cds ring */
727 u8 reserved[128]; /* future expansion */
728} __packed;
729
730struct qlcnic_cardrsp_cds_ring {
731 __le32 host_producer_crb; /* Crb to use */
732 __le32 interrupt_crb; /* Crb to use */
733} __packed;
734
735struct qlcnic_cardrsp_tx_ctx {
736 __le32 host_ctx_state; /* Starting state */
737 __le16 context_id; /* Handle for context */
738 u8 phys_port; /* Physical id of port */
739 u8 virt_port; /* Virtual/Logical id of port */
740 struct qlcnic_cardrsp_cds_ring cds_ring; /* Card cds settings */
741 u8 reserved[128]; /* future expansion */
742} __packed;
743
744#define SIZEOF_HOSTRQ_TX(HOSTRQ_TX) (sizeof(HOSTRQ_TX))
745#define SIZEOF_CARDRSP_TX(CARDRSP_TX) (sizeof(CARDRSP_TX))
746
747/* CRB */
748
749#define QLCNIC_HOST_RDS_CRB_MODE_UNIQUE 0
750#define QLCNIC_HOST_RDS_CRB_MODE_SHARED 1
751#define QLCNIC_HOST_RDS_CRB_MODE_CUSTOM 2
752#define QLCNIC_HOST_RDS_CRB_MODE_MAX 3
753
754#define QLCNIC_HOST_INT_CRB_MODE_UNIQUE 0
755#define QLCNIC_HOST_INT_CRB_MODE_SHARED 1
756#define QLCNIC_HOST_INT_CRB_MODE_NORX 2
757#define QLCNIC_HOST_INT_CRB_MODE_NOTX 3
758#define QLCNIC_HOST_INT_CRB_MODE_NORXTX 4
759
760
761/* MAC */
762
763#define MC_COUNT_P3P 38
764
765#define QLCNIC_MAC_NOOP 0
766#define QLCNIC_MAC_ADD 1
767#define QLCNIC_MAC_DEL 2
768#define QLCNIC_MAC_VLAN_ADD 3
769#define QLCNIC_MAC_VLAN_DEL 4
770
771struct qlcnic_mac_list_s {
772 struct list_head list;
773 uint8_t mac_addr[ETH_ALEN+2];
774};
775
776#define QLCNIC_HOST_REQUEST 0x13
777#define QLCNIC_REQUEST 0x14
778
779#define QLCNIC_MAC_EVENT 0x1
780
781#define QLCNIC_IP_UP 2
782#define QLCNIC_IP_DOWN 3
783
784#define QLCNIC_ILB_MODE 0x1
785#define QLCNIC_ELB_MODE 0x2
786
787#define QLCNIC_LINKEVENT 0x1
788#define QLCNIC_LB_RESPONSE 0x2
789#define QLCNIC_IS_LB_CONFIGURED(VAL) \
790 (VAL == (QLCNIC_LINKEVENT | QLCNIC_LB_RESPONSE))
791
792/*
793 * Driver --> Firmware
794 */
795#define QLCNIC_H2C_OPCODE_CONFIG_RSS 0x1
796#define QLCNIC_H2C_OPCODE_CONFIG_INTR_COALESCE 0x3
797#define QLCNIC_H2C_OPCODE_CONFIG_LED 0x4
798#define QLCNIC_H2C_OPCODE_LRO_REQUEST 0x7
799#define QLCNIC_H2C_OPCODE_SET_MAC_RECEIVE_MODE 0xc
800#define QLCNIC_H2C_OPCODE_CONFIG_IPADDR 0x12
801
802#define QLCNIC_H2C_OPCODE_GET_LINKEVENT 0x15
803#define QLCNIC_H2C_OPCODE_CONFIG_BRIDGING 0x17
804#define QLCNIC_H2C_OPCODE_CONFIG_HW_LRO 0x18
805#define QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK 0x13
806
807/*
808 * Firmware --> Driver
809 */
810
811#define QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK 0x8f
812#define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 141
813
814#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */
815#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */
816#define VPORT_MISS_MODE_ACCEPT_MULTI 2 /* accept unmatched multicast */
817
818#define QLCNIC_LRO_REQUEST_CLEANUP 4
819
820/* Capabilites received */
821#define QLCNIC_FW_CAPABILITY_TSO BIT_1
822#define QLCNIC_FW_CAPABILITY_BDG BIT_8
823#define QLCNIC_FW_CAPABILITY_FVLANTX BIT_9
824#define QLCNIC_FW_CAPABILITY_HW_LRO BIT_10
825#define QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK BIT_27
826
827/* module types */
828#define LINKEVENT_MODULE_NOT_PRESENT 1
829#define LINKEVENT_MODULE_OPTICAL_UNKNOWN 2
830#define LINKEVENT_MODULE_OPTICAL_SRLR 3
831#define LINKEVENT_MODULE_OPTICAL_LRM 4
832#define LINKEVENT_MODULE_OPTICAL_SFP_1G 5
833#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE 6
834#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN 7
835#define LINKEVENT_MODULE_TWINAX 8
836
837#define LINKSPEED_10GBPS 10000
838#define LINKSPEED_1GBPS 1000
839#define LINKSPEED_100MBPS 100
840#define LINKSPEED_10MBPS 10
841
842#define LINKSPEED_ENCODED_10MBPS 0
843#define LINKSPEED_ENCODED_100MBPS 1
844#define LINKSPEED_ENCODED_1GBPS 2
845
846#define LINKEVENT_AUTONEG_DISABLED 0
847#define LINKEVENT_AUTONEG_ENABLED 1
848
849#define LINKEVENT_HALF_DUPLEX 0
850#define LINKEVENT_FULL_DUPLEX 1
851
852#define LINKEVENT_LINKSPEED_MBPS 0
853#define LINKEVENT_LINKSPEED_ENCODED 1
854
855/* firmware response header:
856 * 63:58 - message type
857 * 57:56 - owner
858 * 55:53 - desc count
859 * 52:48 - reserved
860 * 47:40 - completion id
861 * 39:32 - opcode
862 * 31:16 - error code
863 * 15:00 - reserved
864 */
865#define qlcnic_get_nic_msg_opcode(msg_hdr) \
866 ((msg_hdr >> 32) & 0xFF)
867
868struct qlcnic_fw_msg {
869 union {
870 struct {
871 u64 hdr;
872 u64 body[7];
873 };
874 u64 words[8];
875 };
876};
877
878struct qlcnic_nic_req {
879 __le64 qhdr;
880 __le64 req_hdr;
881 __le64 words[6];
882} __packed;
883
884struct qlcnic_mac_req {
885 u8 op;
886 u8 tag;
887 u8 mac_addr[6];
888};
889
890struct qlcnic_vlan_req {
891 __le16 vlan_id;
892 __le16 rsvd[3];
893} __packed;
894
895struct qlcnic_ipaddr {
896 __be32 ipv4;
897 __be32 ipv6[4];
898};
899
900#define QLCNIC_MSI_ENABLED 0x02
901#define QLCNIC_MSIX_ENABLED 0x04
902#define QLCNIC_LRO_ENABLED 0x08
903#define QLCNIC_LRO_DISABLED 0x00
904#define QLCNIC_BRIDGE_ENABLED 0X10
905#define QLCNIC_DIAG_ENABLED 0x20
906#define QLCNIC_ESWITCH_ENABLED 0x40
907#define QLCNIC_ADAPTER_INITIALIZED 0x80
908#define QLCNIC_TAGGING_ENABLED 0x100
909#define QLCNIC_MACSPOOF 0x200
910#define QLCNIC_MAC_OVERRIDE_DISABLED 0x400
911#define QLCNIC_PROMISC_DISABLED 0x800
912#define QLCNIC_NEED_FLR 0x1000
913#define QLCNIC_FW_RESET_OWNER 0x2000
914#define QLCNIC_IS_MSI_FAMILY(adapter) \
915 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
916
917#define QLCNIC_DEF_NUM_STS_DESC_RINGS 4
918#define QLCNIC_MSIX_TBL_SPACE 8192
919#define QLCNIC_PCI_REG_MSIX_TBL 0x44
920#define QLCNIC_MSIX_TBL_PGSIZE 4096
921
922#define QLCNIC_NETDEV_WEIGHT 128
923#define QLCNIC_ADAPTER_UP_MAGIC 777
924
925#define __QLCNIC_FW_ATTACHED 0
926#define __QLCNIC_DEV_UP 1
927#define __QLCNIC_RESETTING 2
928#define __QLCNIC_START_FW 4
929#define __QLCNIC_AER 5
930#define __QLCNIC_DIAG_RES_ALLOC 6
931
932#define QLCNIC_INTERRUPT_TEST 1
933#define QLCNIC_LOOPBACK_TEST 2
934#define QLCNIC_LED_TEST 3
935
936#define QLCNIC_FILTER_AGE 80
937#define QLCNIC_READD_AGE 20
938#define QLCNIC_LB_MAX_FILTERS 64
939
940/* QLCNIC Driver Error Code */
941#define QLCNIC_FW_NOT_RESPOND 51
942#define QLCNIC_TEST_IN_PROGRESS 52
943#define QLCNIC_UNDEFINED_ERROR 53
944#define QLCNIC_LB_CABLE_NOT_CONN 54
945
946struct qlcnic_filter {
947 struct hlist_node fnode;
948 u8 faddr[ETH_ALEN];
949 __le16 vlan_id;
950 unsigned long ftime;
951};
952
953struct qlcnic_filter_hash {
954 struct hlist_head *fhead;
955 u8 fnum;
956 u8 fmax;
957};
958
959struct qlcnic_adapter {
960 struct qlcnic_hardware_context *ahw;
961 struct qlcnic_recv_context *recv_ctx;
962 struct qlcnic_host_tx_ring *tx_ring;
963 struct net_device *netdev;
964 struct pci_dev *pdev;
965
966 unsigned long state;
967 u32 flags;
968
969 u16 num_txd;
970 u16 num_rxd;
971 u16 num_jumbo_rxd;
972 u16 max_rxd;
973 u16 max_jumbo_rxd;
974
975 u8 max_rds_rings;
976 u8 max_sds_rings;
977 u8 msix_supported;
978 u8 portnum;
979 u8 physical_port;
980 u8 reset_context;
981
982 u8 mc_enabled;
983 u8 max_mc_count;
984 u8 fw_wait_cnt;
985 u8 fw_fail_cnt;
986 u8 tx_timeo_cnt;
987 u8 need_fw_reset;
988
989 u8 has_link_events;
990 u8 fw_type;
991 u16 tx_context_id;
992 u16 is_up;
993
994 u16 link_speed;
995 u16 link_duplex;
996 u16 link_autoneg;
997 u16 module_type;
998
999 u16 op_mode;
1000 u16 switch_mode;
1001 u16 max_tx_ques;
1002 u16 max_rx_ques;
1003 u16 max_mtu;
1004 u16 pvid;
1005
1006 u32 fw_hal_version;
1007 u32 capabilities;
1008 u32 irq;
1009 u32 temp;
1010
1011 u32 int_vec_bit;
1012 u32 heartbeat;
1013
1014 u8 max_mac_filters;
1015 u8 dev_state;
1016 u8 diag_test;
1017 char diag_cnt;
1018 u8 reset_ack_timeo;
1019 u8 dev_init_timeo;
1020 u16 msg_enable;
1021
1022 u8 mac_addr[ETH_ALEN];
1023
1024 u64 dev_rst_time;
1025 u8 mac_learn;
1026 unsigned long vlans[BITS_TO_LONGS(VLAN_N_VID)];
1027
1028 struct qlcnic_npar_info *npars;
1029 struct qlcnic_eswitch *eswitch;
1030 struct qlcnic_nic_template *nic_ops;
1031
1032 struct qlcnic_adapter_stats stats;
1033 struct list_head mac_list;
1034
1035 void __iomem *tgt_mask_reg;
1036 void __iomem *tgt_status_reg;
1037 void __iomem *crb_int_state_reg;
1038 void __iomem *isr_int_vec;
1039
1040 struct msix_entry *msix_entries;
1041
1042 struct delayed_work fw_work;
1043
1044
1045 struct qlcnic_filter_hash fhash;
1046
1047 spinlock_t tx_clean_lock;
1048 spinlock_t mac_learn_lock;
1049 __le32 file_prd_off; /*File fw product offset*/
1050 u32 fw_version;
1051 const struct firmware *fw;
1052};
1053
1054struct qlcnic_info {
1055 __le16 pci_func;
1056 __le16 op_mode; /* 1 = Priv, 2 = NP, 3 = NP passthru */
1057 __le16 phys_port;
1058 __le16 switch_mode; /* 0 = disabled, 1 = int, 2 = ext */
1059
1060 __le32 capabilities;
1061 u8 max_mac_filters;
1062 u8 reserved1;
1063 __le16 max_mtu;
1064
1065 __le16 max_tx_ques;
1066 __le16 max_rx_ques;
1067 __le16 min_tx_bw;
1068 __le16 max_tx_bw;
1069 u8 reserved2[104];
1070} __packed;
1071
1072struct qlcnic_pci_info {
1073 __le16 id; /* pci function id */
1074 __le16 active; /* 1 = Enabled */
1075 __le16 type; /* 1 = NIC, 2 = FCoE, 3 = iSCSI */
1076 __le16 default_port; /* default port number */
1077
1078 __le16 tx_min_bw; /* Multiple of 100mbpc */
1079 __le16 tx_max_bw;
1080 __le16 reserved1[2];
1081
1082 u8 mac[ETH_ALEN];
1083 u8 reserved2[106];
1084} __packed;
1085
1086struct qlcnic_npar_info {
1087 u16 pvid;
1088 u16 min_bw;
1089 u16 max_bw;
1090 u8 phy_port;
1091 u8 type;
1092 u8 active;
1093 u8 enable_pm;
1094 u8 dest_npar;
1095 u8 discard_tagged;
1096 u8 mac_override;
1097 u8 mac_anti_spoof;
1098 u8 promisc_mode;
1099 u8 offload_flags;
1100};
1101
1102struct qlcnic_eswitch {
1103 u8 port;
1104 u8 active_vports;
1105 u8 active_vlans;
1106 u8 active_ucast_filters;
1107 u8 max_ucast_filters;
1108 u8 max_active_vlans;
1109
1110 u32 flags;
1111#define QLCNIC_SWITCH_ENABLE BIT_1
1112#define QLCNIC_SWITCH_VLAN_FILTERING BIT_2
1113#define QLCNIC_SWITCH_PROMISC_MODE BIT_3
1114#define QLCNIC_SWITCH_PORT_MIRRORING BIT_4
1115};
1116
1117
1118/* Return codes for Error handling */
1119#define QL_STATUS_INVALID_PARAM -1
1120
1121#define MAX_BW 100 /* % of link speed */
1122#define MAX_VLAN_ID 4095
1123#define MIN_VLAN_ID 2
1124#define DEFAULT_MAC_LEARN 1
1125
1126#define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan < MAX_VLAN_ID)
1127#define IS_VALID_BW(bw) (bw <= MAX_BW)
1128
1129struct qlcnic_pci_func_cfg {
1130 u16 func_type;
1131 u16 min_bw;
1132 u16 max_bw;
1133 u16 port_num;
1134 u8 pci_func;
1135 u8 func_state;
1136 u8 def_mac_addr[6];
1137};
1138
1139struct qlcnic_npar_func_cfg {
1140 u32 fw_capab;
1141 u16 port_num;
1142 u16 min_bw;
1143 u16 max_bw;
1144 u16 max_tx_queues;
1145 u16 max_rx_queues;
1146 u8 pci_func;
1147 u8 op_mode;
1148};
1149
1150struct qlcnic_pm_func_cfg {
1151 u8 pci_func;
1152 u8 action;
1153 u8 dest_npar;
1154 u8 reserved[5];
1155};
1156
1157struct qlcnic_esw_func_cfg {
1158 u16 vlan_id;
1159 u8 op_mode;
1160 u8 op_type;
1161 u8 pci_func;
1162 u8 host_vlan_tag;
1163 u8 promisc_mode;
1164 u8 discard_tagged;
1165 u8 mac_override;
1166 u8 mac_anti_spoof;
1167 u8 offload_flags;
1168 u8 reserved[5];
1169};
1170
1171#define QLCNIC_STATS_VERSION 1
1172#define QLCNIC_STATS_PORT 1
1173#define QLCNIC_STATS_ESWITCH 2
1174#define QLCNIC_QUERY_RX_COUNTER 0
1175#define QLCNIC_QUERY_TX_COUNTER 1
1176#define QLCNIC_ESW_STATS_NOT_AVAIL 0xffffffffffffffffULL
1177
1178#define QLCNIC_ADD_ESW_STATS(VAL1, VAL2)\
1179do { \
1180 if (((VAL1) == QLCNIC_ESW_STATS_NOT_AVAIL) && \
1181 ((VAL2) != QLCNIC_ESW_STATS_NOT_AVAIL)) \
1182 (VAL1) = (VAL2); \
1183 else if (((VAL1) != QLCNIC_ESW_STATS_NOT_AVAIL) && \
1184 ((VAL2) != QLCNIC_ESW_STATS_NOT_AVAIL)) \
1185 (VAL1) += (VAL2); \
1186} while (0)
1187
1188struct __qlcnic_esw_statistics {
1189 __le16 context_id;
1190 __le16 version;
1191 __le16 size;
1192 __le16 unused;
1193 __le64 unicast_frames;
1194 __le64 multicast_frames;
1195 __le64 broadcast_frames;
1196 __le64 dropped_frames;
1197 __le64 errors;
1198 __le64 local_frames;
1199 __le64 numbytes;
1200 __le64 rsvd[3];
1201} __packed;
1202
1203struct qlcnic_esw_statistics {
1204 struct __qlcnic_esw_statistics rx;
1205 struct __qlcnic_esw_statistics tx;
1206};
1207
1208struct qlcnic_common_entry_hdr {
1209 __le32 type;
1210 __le32 offset;
1211 __le32 cap_size;
1212 u8 mask;
1213 u8 rsvd[2];
1214 u8 flags;
1215} __packed;
1216
1217struct __crb {
1218 __le32 addr;
1219 u8 stride;
1220 u8 rsvd1[3];
1221 __le32 data_size;
1222 __le32 no_ops;
1223 __le32 rsvd2[4];
1224} __packed;
1225
1226struct __ctrl {
1227 __le32 addr;
1228 u8 stride;
1229 u8 index_a;
1230 __le16 timeout;
1231 __le32 data_size;
1232 __le32 no_ops;
1233 u8 opcode;
1234 u8 index_v;
1235 u8 shl_val;
1236 u8 shr_val;
1237 __le32 val1;
1238 __le32 val2;
1239 __le32 val3;
1240} __packed;
1241
1242struct __cache {
1243 __le32 addr;
1244 __le16 stride;
1245 __le16 init_tag_val;
1246 __le32 size;
1247 __le32 no_ops;
1248 __le32 ctrl_addr;
1249 __le32 ctrl_val;
1250 __le32 read_addr;
1251 u8 read_addr_stride;
1252 u8 read_addr_num;
1253 u8 rsvd1[2];
1254} __packed;
1255
1256struct __ocm {
1257 u8 rsvd[8];
1258 __le32 size;
1259 __le32 no_ops;
1260 u8 rsvd1[8];
1261 __le32 read_addr;
1262 __le32 read_addr_stride;
1263} __packed;
1264
1265struct __mem {
1266 u8 rsvd[24];
1267 __le32 addr;
1268 __le32 size;
1269} __packed;
1270
1271struct __mux {
1272 __le32 addr;
1273 u8 rsvd[4];
1274 __le32 size;
1275 __le32 no_ops;
1276 __le32 val;
1277 __le32 val_stride;
1278 __le32 read_addr;
1279 u8 rsvd2[4];
1280} __packed;
1281
1282struct __queue {
1283 __le32 sel_addr;
1284 __le16 stride;
1285 u8 rsvd[2];
1286 __le32 size;
1287 __le32 no_ops;
1288 u8 rsvd2[8];
1289 __le32 read_addr;
1290 u8 read_addr_stride;
1291 u8 read_addr_cnt;
1292 u8 rsvd3[2];
1293} __packed;
1294
1295struct qlcnic_dump_entry {
1296 struct qlcnic_common_entry_hdr hdr;
1297 union {
1298 struct __crb crb;
1299 struct __cache cache;
1300 struct __ocm ocm;
1301 struct __mem mem;
1302 struct __mux mux;
1303 struct __queue que;
1304 struct __ctrl ctrl;
1305 } region;
1306} __packed;
1307
1308enum op_codes {
1309 QLCNIC_DUMP_NOP = 0,
1310 QLCNIC_DUMP_READ_CRB = 1,
1311 QLCNIC_DUMP_READ_MUX = 2,
1312 QLCNIC_DUMP_QUEUE = 3,
1313 QLCNIC_DUMP_BRD_CONFIG = 4,
1314 QLCNIC_DUMP_READ_OCM = 6,
1315 QLCNIC_DUMP_PEG_REG = 7,
1316 QLCNIC_DUMP_L1_DTAG = 8,
1317 QLCNIC_DUMP_L1_ITAG = 9,
1318 QLCNIC_DUMP_L1_DATA = 11,
1319 QLCNIC_DUMP_L1_INST = 12,
1320 QLCNIC_DUMP_L2_DTAG = 21,
1321 QLCNIC_DUMP_L2_ITAG = 22,
1322 QLCNIC_DUMP_L2_DATA = 23,
1323 QLCNIC_DUMP_L2_INST = 24,
1324 QLCNIC_DUMP_READ_ROM = 71,
1325 QLCNIC_DUMP_READ_MEM = 72,
1326 QLCNIC_DUMP_READ_CTRL = 98,
1327 QLCNIC_DUMP_TLHDR = 99,
1328 QLCNIC_DUMP_RDEND = 255
1329};
1330
1331#define QLCNIC_DUMP_WCRB BIT_0
1332#define QLCNIC_DUMP_RWCRB BIT_1
1333#define QLCNIC_DUMP_ANDCRB BIT_2
1334#define QLCNIC_DUMP_ORCRB BIT_3
1335#define QLCNIC_DUMP_POLLCRB BIT_4
1336#define QLCNIC_DUMP_RD_SAVE BIT_5
1337#define QLCNIC_DUMP_WRT_SAVED BIT_6
1338#define QLCNIC_DUMP_MOD_SAVE_ST BIT_7
1339#define QLCNIC_DUMP_SKIP BIT_7
1340
1341#define QLCNIC_DUMP_MASK_MIN 3
1342#define QLCNIC_DUMP_MASK_DEF 0x1f
1343#define QLCNIC_DUMP_MASK_MAX 0xff
1344#define QLCNIC_FORCE_FW_DUMP_KEY 0xdeadfeed
1345#define QLCNIC_ENABLE_FW_DUMP 0xaddfeed
1346#define QLCNIC_DISABLE_FW_DUMP 0xbadfeed
1347
1348struct qlcnic_dump_operations {
1349 enum op_codes opcode;
1350 u32 (*handler)(struct qlcnic_adapter *,
1351 struct qlcnic_dump_entry *, u32 *);
1352};
1353
1354int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter);
1355int qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config);
1356
1357u32 qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off);
1358int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *, ulong off, u32 data);
1359int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *, u64 off, u64 data);
1360int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *, u64 off, u64 *data);
1361void qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *, u64, u64 *);
1362void qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *, u64, u64);
1363
1364#define ADDR_IN_RANGE(addr, low, high) \
1365 (((addr) < (high)) && ((addr) >= (low)))
1366
1367#define QLCRD32(adapter, off) \
1368 (qlcnic_hw_read_wx_2M(adapter, off))
1369#define QLCWR32(adapter, off, val) \
1370 (qlcnic_hw_write_wx_2M(adapter, off, val))
1371
1372int qlcnic_pcie_sem_lock(struct qlcnic_adapter *, int, u32);
1373void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int);
1374
1375#define qlcnic_rom_lock(a) \
1376 qlcnic_pcie_sem_lock((a), 2, QLCNIC_ROM_LOCK_ID)
1377#define qlcnic_rom_unlock(a) \
1378 qlcnic_pcie_sem_unlock((a), 2)
1379#define qlcnic_phy_lock(a) \
1380 qlcnic_pcie_sem_lock((a), 3, QLCNIC_PHY_LOCK_ID)
1381#define qlcnic_phy_unlock(a) \
1382 qlcnic_pcie_sem_unlock((a), 3)
1383#define qlcnic_api_lock(a) \
1384 qlcnic_pcie_sem_lock((a), 5, 0)
1385#define qlcnic_api_unlock(a) \
1386 qlcnic_pcie_sem_unlock((a), 5)
1387#define qlcnic_sw_lock(a) \
1388 qlcnic_pcie_sem_lock((a), 6, 0)
1389#define qlcnic_sw_unlock(a) \
1390 qlcnic_pcie_sem_unlock((a), 6)
1391#define crb_win_lock(a) \
1392 qlcnic_pcie_sem_lock((a), 7, QLCNIC_CRB_WIN_LOCK_ID)
1393#define crb_win_unlock(a) \
1394 qlcnic_pcie_sem_unlock((a), 7)
1395
1396int qlcnic_get_board_info(struct qlcnic_adapter *adapter);
1397int qlcnic_wol_supported(struct qlcnic_adapter *adapter);
1398int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate);
1399void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter);
1400void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter);
1401int qlcnic_dump_fw(struct qlcnic_adapter *);
1402
1403/* Functions from qlcnic_init.c */
1404int qlcnic_load_firmware(struct qlcnic_adapter *adapter);
1405int qlcnic_need_fw_reset(struct qlcnic_adapter *adapter);
1406void qlcnic_request_firmware(struct qlcnic_adapter *adapter);
1407void qlcnic_release_firmware(struct qlcnic_adapter *adapter);
1408int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter);
1409int qlcnic_setup_idc_param(struct qlcnic_adapter *adapter);
1410int qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter);
1411
1412int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, u32 addr, u32 *valp);
1413int qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
1414 u8 *bytes, size_t size);
1415int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter);
1416void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter);
1417
1418void __iomem *qlcnic_get_ioaddr(struct qlcnic_adapter *, u32);
1419
1420int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter);
1421void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter);
1422
1423int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter);
1424void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter);
1425
1426void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter);
1427void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter);
1428void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter);
1429
1430int qlcnic_check_fw_status(struct qlcnic_adapter *adapter);
1431void qlcnic_watchdog_task(struct work_struct *work);
1432void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
1433 struct qlcnic_host_rds_ring *rds_ring);
1434int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max);
1435void qlcnic_set_multi(struct net_device *netdev);
1436void qlcnic_free_mac_list(struct qlcnic_adapter *adapter);
1437int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32);
1438int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter);
1439int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable);
1440int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, int cmd);
1441int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable);
1442void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup);
1443
1444int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu);
1445int qlcnic_change_mtu(struct net_device *netdev, int new_mtu);
1446u32 qlcnic_fix_features(struct net_device *netdev, u32 features);
1447int qlcnic_set_features(struct net_device *netdev, u32 features);
1448int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable);
1449int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable);
1450int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
1451void qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
1452 struct qlcnic_host_tx_ring *tx_ring);
1453void qlcnic_fetch_mac(struct qlcnic_adapter *, u32, u32, u8, u8 *);
1454void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring);
1455void qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter);
1456int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode);
1457
1458/* Functions from qlcnic_ethtool.c */
1459int qlcnic_check_loopback_buff(unsigned char *data, u8 mac[]);
1460
1461/* Functions from qlcnic_main.c */
1462int qlcnic_reset_context(struct qlcnic_adapter *);
1463u32 qlcnic_issue_cmd(struct qlcnic_adapter *adapter,
1464 u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd);
1465void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings);
1466int qlcnic_diag_alloc_res(struct net_device *netdev, int test);
1467netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
1468int qlcnic_validate_max_rss(struct net_device *netdev, u8 max_hw, u8 val);
1469int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data);
1470void qlcnic_dev_request_reset(struct qlcnic_adapter *);
1471void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
1472
1473/* Management functions */
1474int qlcnic_get_mac_address(struct qlcnic_adapter *, u8*);
1475int qlcnic_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8);
1476int qlcnic_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
1477int qlcnic_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info*);
1478
1479/* eSwitch management functions */
1480int qlcnic_config_switch_port(struct qlcnic_adapter *,
1481 struct qlcnic_esw_func_cfg *);
1482int qlcnic_get_eswitch_port_config(struct qlcnic_adapter *,
1483 struct qlcnic_esw_func_cfg *);
1484int qlcnic_config_port_mirroring(struct qlcnic_adapter *, u8, u8, u8);
1485int qlcnic_get_port_stats(struct qlcnic_adapter *, const u8, const u8,
1486 struct __qlcnic_esw_statistics *);
1487int qlcnic_get_eswitch_stats(struct qlcnic_adapter *, const u8, u8,
1488 struct __qlcnic_esw_statistics *);
1489int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, u8, u8, u8);
1490extern int qlcnic_config_tso;
1491
1492/*
1493 * QLOGIC Board information
1494 */
1495
1496#define QLCNIC_MAX_BOARD_NAME_LEN 100
1497struct qlcnic_brdinfo {
1498 unsigned short vendor;
1499 unsigned short device;
1500 unsigned short sub_vendor;
1501 unsigned short sub_device;
1502 char short_name[QLCNIC_MAX_BOARD_NAME_LEN];
1503};
1504
1505static const struct qlcnic_brdinfo qlcnic_boards[] = {
1506 {0x1077, 0x8020, 0x1077, 0x203,
1507 "8200 Series Single Port 10GbE Converged Network Adapter "
1508 "(TCP/IP Networking)"},
1509 {0x1077, 0x8020, 0x1077, 0x207,
1510 "8200 Series Dual Port 10GbE Converged Network Adapter "
1511 "(TCP/IP Networking)"},
1512 {0x1077, 0x8020, 0x1077, 0x20b,
1513 "3200 Series Dual Port 10Gb Intelligent Ethernet Adapter"},
1514 {0x1077, 0x8020, 0x1077, 0x20c,
1515 "3200 Series Quad Port 1Gb Intelligent Ethernet Adapter"},
1516 {0x1077, 0x8020, 0x1077, 0x20f,
1517 "3200 Series Single Port 10Gb Intelligent Ethernet Adapter"},
1518 {0x1077, 0x8020, 0x103c, 0x3733,
1519 "NC523SFP 10Gb 2-port Server Adapter"},
1520 {0x1077, 0x8020, 0x103c, 0x3346,
1521 "CN1000Q Dual Port Converged Network Adapter"},
1522 {0x1077, 0x8020, 0x1077, 0x210,
1523 "QME8242-k 10GbE Dual Port Mezzanine Card"},
1524 {0x1077, 0x8020, 0x0, 0x0, "cLOM8214 1/10GbE Controller"},
1525};
1526
1527#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(qlcnic_boards)
1528
1529static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
1530{
1531 if (likely(tx_ring->producer < tx_ring->sw_consumer))
1532 return tx_ring->sw_consumer - tx_ring->producer;
1533 else
1534 return tx_ring->sw_consumer + tx_ring->num_desc -
1535 tx_ring->producer;
1536}
1537
1538extern const struct ethtool_ops qlcnic_ethtool_ops;
1539
1540struct qlcnic_nic_template {
1541 int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
1542 int (*config_led) (struct qlcnic_adapter *, u32, u32);
1543 int (*start_firmware) (struct qlcnic_adapter *);
1544};
1545
1546#define QLCDB(adapter, lvl, _fmt, _args...) do { \
1547 if (NETIF_MSG_##lvl & adapter->msg_enable) \
1548 printk(KERN_INFO "%s: %s: " _fmt, \
1549 dev_name(&adapter->pdev->dev), \
1550 __func__, ##_args); \
1551 } while (0)
1552
1553#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/qlcnic/qlcnic_ctx.c b/drivers/net/qlcnic/qlcnic_ctx.c
new file mode 100644
index 00000000000..b0d32ddd2cc
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_ctx.c
@@ -0,0 +1,1117 @@
1/*
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2010 QLogic Corporation
4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
6 */
7
8#include "qlcnic.h"
9
10static u32
11qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
12{
13 u32 rsp;
14 int timeout = 0;
15
16 do {
17 /* give atleast 1ms for firmware to respond */
18 msleep(1);
19
20 if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT)
21 return QLCNIC_CDRP_RSP_TIMEOUT;
22
23 rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET);
24 } while (!QLCNIC_CDRP_IS_RSP(rsp));
25
26 return rsp;
27}
28
29u32
30qlcnic_issue_cmd(struct qlcnic_adapter *adapter,
31 u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd)
32{
33 u32 rsp;
34 u32 signature;
35 u32 rcode = QLCNIC_RCODE_SUCCESS;
36 struct pci_dev *pdev = adapter->pdev;
37
38 signature = QLCNIC_CDRP_SIGNATURE_MAKE(pci_fn, version);
39
40 /* Acquire semaphore before accessing CRB */
41 if (qlcnic_api_lock(adapter))
42 return QLCNIC_RCODE_TIMEOUT;
43
44 QLCWR32(adapter, QLCNIC_SIGN_CRB_OFFSET, signature);
45 QLCWR32(adapter, QLCNIC_ARG1_CRB_OFFSET, arg1);
46 QLCWR32(adapter, QLCNIC_ARG2_CRB_OFFSET, arg2);
47 QLCWR32(adapter, QLCNIC_ARG3_CRB_OFFSET, arg3);
48 QLCWR32(adapter, QLCNIC_CDRP_CRB_OFFSET, QLCNIC_CDRP_FORM_CMD(cmd));
49
50 rsp = qlcnic_poll_rsp(adapter);
51
52 if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) {
53 dev_err(&pdev->dev, "card response timeout.\n");
54 rcode = QLCNIC_RCODE_TIMEOUT;
55 } else if (rsp == QLCNIC_CDRP_RSP_FAIL) {
56 rcode = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
57 dev_err(&pdev->dev, "failed card response code:0x%x\n",
58 rcode);
59 }
60
61 /* Release semaphore */
62 qlcnic_api_unlock(adapter);
63
64 return rcode;
65}
66
67static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u16 temp_size)
68{
69 uint64_t sum = 0;
70 int count = temp_size / sizeof(uint32_t);
71 while (count-- > 0)
72 sum += *temp_buffer++;
73 while (sum >> 32)
74 sum = (sum & 0xFFFFFFFF) + (sum >> 32);
75 return ~sum;
76}
77
78int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
79{
80 int err, i;
81 u16 temp_size;
82 void *tmp_addr;
83 u32 version, csum, *template, *tmp_buf;
84 struct qlcnic_hardware_context *ahw;
85 struct qlcnic_dump_template_hdr *tmpl_hdr, *tmp_tmpl;
86 dma_addr_t tmp_addr_t = 0;
87
88 ahw = adapter->ahw;
89 err = qlcnic_issue_cmd(adapter,
90 adapter->ahw->pci_func,
91 adapter->fw_hal_version,
92 0,
93 0,
94 0,
95 QLCNIC_CDRP_CMD_TEMP_SIZE);
96 if (err != QLCNIC_RCODE_SUCCESS) {
97 err = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
98 dev_info(&adapter->pdev->dev,
99 "Can't get template size %d\n", err);
100 err = -EIO;
101 return err;
102 }
103 version = QLCRD32(adapter, QLCNIC_ARG3_CRB_OFFSET);
104 temp_size = QLCRD32(adapter, QLCNIC_ARG2_CRB_OFFSET);
105 if (!temp_size)
106 return -EIO;
107
108 tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size,
109 &tmp_addr_t, GFP_KERNEL);
110 if (!tmp_addr) {
111 dev_err(&adapter->pdev->dev,
112 "Can't get memory for FW dump template\n");
113 return -ENOMEM;
114 }
115 err = qlcnic_issue_cmd(adapter,
116 adapter->ahw->pci_func,
117 adapter->fw_hal_version,
118 LSD(tmp_addr_t),
119 MSD(tmp_addr_t),
120 temp_size,
121 QLCNIC_CDRP_CMD_GET_TEMP_HDR);
122
123 if (err != QLCNIC_RCODE_SUCCESS) {
124 dev_err(&adapter->pdev->dev,
125 "Failed to get mini dump template header %d\n", err);
126 err = -EIO;
127 goto error;
128 }
129 tmp_tmpl = tmp_addr;
130 csum = qlcnic_temp_checksum((uint32_t *) tmp_addr, temp_size);
131 if (csum) {
132 dev_err(&adapter->pdev->dev,
133 "Template header checksum validation failed\n");
134 err = -EIO;
135 goto error;
136 }
137 ahw->fw_dump.tmpl_hdr = vzalloc(temp_size);
138 if (!ahw->fw_dump.tmpl_hdr) {
139 err = -EIO;
140 goto error;
141 }
142 tmp_buf = tmp_addr;
143 template = (u32 *) ahw->fw_dump.tmpl_hdr;
144 for (i = 0; i < temp_size/sizeof(u32); i++)
145 *template++ = __le32_to_cpu(*tmp_buf++);
146
147 tmpl_hdr = ahw->fw_dump.tmpl_hdr;
148 tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF;
149 ahw->fw_dump.enable = 1;
150error:
151 dma_free_coherent(&adapter->pdev->dev, temp_size, tmp_addr, tmp_addr_t);
152 return err;
153}
154
155int
156qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu)
157{
158 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
159
160 if (recv_ctx->state == QLCNIC_HOST_CTX_STATE_ACTIVE) {
161 if (qlcnic_issue_cmd(adapter,
162 adapter->ahw->pci_func,
163 adapter->fw_hal_version,
164 recv_ctx->context_id,
165 mtu,
166 0,
167 QLCNIC_CDRP_CMD_SET_MTU)) {
168
169 dev_err(&adapter->pdev->dev, "Failed to set mtu\n");
170 return -EIO;
171 }
172 }
173
174 return 0;
175}
176
177static int
178qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
179{
180 void *addr;
181 struct qlcnic_hostrq_rx_ctx *prq;
182 struct qlcnic_cardrsp_rx_ctx *prsp;
183 struct qlcnic_hostrq_rds_ring *prq_rds;
184 struct qlcnic_hostrq_sds_ring *prq_sds;
185 struct qlcnic_cardrsp_rds_ring *prsp_rds;
186 struct qlcnic_cardrsp_sds_ring *prsp_sds;
187 struct qlcnic_host_rds_ring *rds_ring;
188 struct qlcnic_host_sds_ring *sds_ring;
189
190 dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
191 u64 phys_addr;
192
193 u8 i, nrds_rings, nsds_rings;
194 size_t rq_size, rsp_size;
195 u32 cap, reg, val, reg2;
196 int err;
197
198 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
199
200 nrds_rings = adapter->max_rds_rings;
201 nsds_rings = adapter->max_sds_rings;
202
203 rq_size =
204 SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings,
205 nsds_rings);
206 rsp_size =
207 SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings,
208 nsds_rings);
209
210 addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
211 &hostrq_phys_addr, GFP_KERNEL);
212 if (addr == NULL)
213 return -ENOMEM;
214 prq = addr;
215
216 addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
217 &cardrsp_phys_addr, GFP_KERNEL);
218 if (addr == NULL) {
219 err = -ENOMEM;
220 goto out_free_rq;
221 }
222 prsp = addr;
223
224 prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
225
226 cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN
227 | QLCNIC_CAP0_VALIDOFF);
228 cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS);
229
230 prq->valid_field_offset = offsetof(struct qlcnic_hostrq_rx_ctx,
231 msix_handler);
232 prq->txrx_sds_binding = nsds_rings - 1;
233
234 prq->capabilities[0] = cpu_to_le32(cap);
235 prq->host_int_crb_mode =
236 cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
237 prq->host_rds_crb_mode =
238 cpu_to_le32(QLCNIC_HOST_RDS_CRB_MODE_UNIQUE);
239
240 prq->num_rds_rings = cpu_to_le16(nrds_rings);
241 prq->num_sds_rings = cpu_to_le16(nsds_rings);
242 prq->rds_ring_offset = 0;
243
244 val = le32_to_cpu(prq->rds_ring_offset) +
245 (sizeof(struct qlcnic_hostrq_rds_ring) * nrds_rings);
246 prq->sds_ring_offset = cpu_to_le32(val);
247
248 prq_rds = (struct qlcnic_hostrq_rds_ring *)(prq->data +
249 le32_to_cpu(prq->rds_ring_offset));
250
251 for (i = 0; i < nrds_rings; i++) {
252
253 rds_ring = &recv_ctx->rds_rings[i];
254 rds_ring->producer = 0;
255
256 prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
257 prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
258 prq_rds[i].ring_kind = cpu_to_le32(i);
259 prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size);
260 }
261
262 prq_sds = (struct qlcnic_hostrq_sds_ring *)(prq->data +
263 le32_to_cpu(prq->sds_ring_offset));
264
265 for (i = 0; i < nsds_rings; i++) {
266
267 sds_ring = &recv_ctx->sds_rings[i];
268 sds_ring->consumer = 0;
269 memset(sds_ring->desc_head, 0, STATUS_DESC_RINGSIZE(sds_ring));
270
271 prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
272 prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
273 prq_sds[i].msi_index = cpu_to_le16(i);
274 }
275
276 phys_addr = hostrq_phys_addr;
277 err = qlcnic_issue_cmd(adapter,
278 adapter->ahw->pci_func,
279 adapter->fw_hal_version,
280 (u32)(phys_addr >> 32),
281 (u32)(phys_addr & 0xffffffff),
282 rq_size,
283 QLCNIC_CDRP_CMD_CREATE_RX_CTX);
284 if (err) {
285 dev_err(&adapter->pdev->dev,
286 "Failed to create rx ctx in firmware%d\n", err);
287 goto out_free_rsp;
288 }
289
290
291 prsp_rds = ((struct qlcnic_cardrsp_rds_ring *)
292 &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]);
293
294 for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
295 rds_ring = &recv_ctx->rds_rings[i];
296
297 reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
298 rds_ring->crb_rcv_producer = adapter->ahw->pci_base0 + reg;
299 }
300
301 prsp_sds = ((struct qlcnic_cardrsp_sds_ring *)
302 &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]);
303
304 for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
305 sds_ring = &recv_ctx->sds_rings[i];
306
307 reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
308 reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb);
309
310 sds_ring->crb_sts_consumer = adapter->ahw->pci_base0 + reg;
311 sds_ring->crb_intr_mask = adapter->ahw->pci_base0 + reg2;
312 }
313
314 recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
315 recv_ctx->context_id = le16_to_cpu(prsp->context_id);
316 recv_ctx->virt_port = prsp->virt_port;
317
318out_free_rsp:
319 dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp,
320 cardrsp_phys_addr);
321out_free_rq:
322 dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr);
323 return err;
324}
325
326static void
327qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter)
328{
329 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
330
331 if (qlcnic_issue_cmd(adapter,
332 adapter->ahw->pci_func,
333 adapter->fw_hal_version,
334 recv_ctx->context_id,
335 QLCNIC_DESTROY_CTX_RESET,
336 0,
337 QLCNIC_CDRP_CMD_DESTROY_RX_CTX)) {
338
339 dev_err(&adapter->pdev->dev,
340 "Failed to destroy rx ctx in firmware\n");
341 }
342
343 recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED;
344}
345
346static int
347qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
348{
349 struct qlcnic_hostrq_tx_ctx *prq;
350 struct qlcnic_hostrq_cds_ring *prq_cds;
351 struct qlcnic_cardrsp_tx_ctx *prsp;
352 void *rq_addr, *rsp_addr;
353 size_t rq_size, rsp_size;
354 u32 temp;
355 int err;
356 u64 phys_addr;
357 dma_addr_t rq_phys_addr, rsp_phys_addr;
358 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
359
360 /* reset host resources */
361 tx_ring->producer = 0;
362 tx_ring->sw_consumer = 0;
363 *(tx_ring->hw_consumer) = 0;
364
365 rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
366 rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
367 &rq_phys_addr, GFP_KERNEL);
368 if (!rq_addr)
369 return -ENOMEM;
370
371 rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx);
372 rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
373 &rsp_phys_addr, GFP_KERNEL);
374 if (!rsp_addr) {
375 err = -ENOMEM;
376 goto out_free_rq;
377 }
378
379 memset(rq_addr, 0, rq_size);
380 prq = rq_addr;
381
382 memset(rsp_addr, 0, rsp_size);
383 prsp = rsp_addr;
384
385 prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
386
387 temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN |
388 QLCNIC_CAP0_LSO);
389 prq->capabilities[0] = cpu_to_le32(temp);
390
391 prq->host_int_crb_mode =
392 cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
393
394 prq->interrupt_ctl = 0;
395 prq->msi_index = 0;
396 prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr);
397
398 prq_cds = &prq->cds_ring;
399
400 prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr);
401 prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
402
403 phys_addr = rq_phys_addr;
404 err = qlcnic_issue_cmd(adapter,
405 adapter->ahw->pci_func,
406 adapter->fw_hal_version,
407 (u32)(phys_addr >> 32),
408 ((u32)phys_addr & 0xffffffff),
409 rq_size,
410 QLCNIC_CDRP_CMD_CREATE_TX_CTX);
411
412 if (err == QLCNIC_RCODE_SUCCESS) {
413 temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
414 tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp;
415
416 adapter->tx_context_id =
417 le16_to_cpu(prsp->context_id);
418 } else {
419 dev_err(&adapter->pdev->dev,
420 "Failed to create tx ctx in firmware%d\n", err);
421 err = -EIO;
422 }
423
424 dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr,
425 rsp_phys_addr);
426
427out_free_rq:
428 dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr);
429
430 return err;
431}
432
433static void
434qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter)
435{
436 if (qlcnic_issue_cmd(adapter,
437 adapter->ahw->pci_func,
438 adapter->fw_hal_version,
439 adapter->tx_context_id,
440 QLCNIC_DESTROY_CTX_RESET,
441 0,
442 QLCNIC_CDRP_CMD_DESTROY_TX_CTX)) {
443
444 dev_err(&adapter->pdev->dev,
445 "Failed to destroy tx ctx in firmware\n");
446 }
447}
448
449int
450qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config)
451{
452 return qlcnic_issue_cmd(adapter,
453 adapter->ahw->pci_func,
454 adapter->fw_hal_version,
455 config,
456 0,
457 0,
458 QLCNIC_CDRP_CMD_CONFIG_PORT);
459}
460
461int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
462{
463 void *addr;
464 int err;
465 int ring;
466 struct qlcnic_recv_context *recv_ctx;
467 struct qlcnic_host_rds_ring *rds_ring;
468 struct qlcnic_host_sds_ring *sds_ring;
469 struct qlcnic_host_tx_ring *tx_ring;
470
471 struct pci_dev *pdev = adapter->pdev;
472
473 recv_ctx = adapter->recv_ctx;
474 tx_ring = adapter->tx_ring;
475
476 tx_ring->hw_consumer = (__le32 *) dma_alloc_coherent(&pdev->dev,
477 sizeof(u32), &tx_ring->hw_cons_phys_addr, GFP_KERNEL);
478 if (tx_ring->hw_consumer == NULL) {
479 dev_err(&pdev->dev, "failed to allocate tx consumer\n");
480 return -ENOMEM;
481 }
482
483 /* cmd desc ring */
484 addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring),
485 &tx_ring->phys_addr, GFP_KERNEL);
486
487 if (addr == NULL) {
488 dev_err(&pdev->dev, "failed to allocate tx desc ring\n");
489 err = -ENOMEM;
490 goto err_out_free;
491 }
492
493 tx_ring->desc_head = addr;
494
495 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
496 rds_ring = &recv_ctx->rds_rings[ring];
497 addr = dma_alloc_coherent(&adapter->pdev->dev,
498 RCV_DESC_RINGSIZE(rds_ring),
499 &rds_ring->phys_addr, GFP_KERNEL);
500 if (addr == NULL) {
501 dev_err(&pdev->dev,
502 "failed to allocate rds ring [%d]\n", ring);
503 err = -ENOMEM;
504 goto err_out_free;
505 }
506 rds_ring->desc_head = addr;
507
508 }
509
510 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
511 sds_ring = &recv_ctx->sds_rings[ring];
512
513 addr = dma_alloc_coherent(&adapter->pdev->dev,
514 STATUS_DESC_RINGSIZE(sds_ring),
515 &sds_ring->phys_addr, GFP_KERNEL);
516 if (addr == NULL) {
517 dev_err(&pdev->dev,
518 "failed to allocate sds ring [%d]\n", ring);
519 err = -ENOMEM;
520 goto err_out_free;
521 }
522 sds_ring->desc_head = addr;
523 }
524
525 return 0;
526
527err_out_free:
528 qlcnic_free_hw_resources(adapter);
529 return err;
530}
531
532
533int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter)
534{
535 int err;
536
537 if (adapter->flags & QLCNIC_NEED_FLR) {
538 pci_reset_function(adapter->pdev);
539 adapter->flags &= ~QLCNIC_NEED_FLR;
540 }
541
542 err = qlcnic_fw_cmd_create_rx_ctx(adapter);
543 if (err)
544 return err;
545
546 err = qlcnic_fw_cmd_create_tx_ctx(adapter);
547 if (err) {
548 qlcnic_fw_cmd_destroy_rx_ctx(adapter);
549 return err;
550 }
551
552 set_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
553 return 0;
554}
555
556void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
557{
558 if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) {
559 qlcnic_fw_cmd_destroy_rx_ctx(adapter);
560 qlcnic_fw_cmd_destroy_tx_ctx(adapter);
561
562 /* Allow dma queues to drain after context reset */
563 msleep(20);
564 }
565}
566
567void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
568{
569 struct qlcnic_recv_context *recv_ctx;
570 struct qlcnic_host_rds_ring *rds_ring;
571 struct qlcnic_host_sds_ring *sds_ring;
572 struct qlcnic_host_tx_ring *tx_ring;
573 int ring;
574
575 recv_ctx = adapter->recv_ctx;
576
577 tx_ring = adapter->tx_ring;
578 if (tx_ring->hw_consumer != NULL) {
579 dma_free_coherent(&adapter->pdev->dev,
580 sizeof(u32),
581 tx_ring->hw_consumer,
582 tx_ring->hw_cons_phys_addr);
583 tx_ring->hw_consumer = NULL;
584 }
585
586 if (tx_ring->desc_head != NULL) {
587 dma_free_coherent(&adapter->pdev->dev,
588 TX_DESC_RINGSIZE(tx_ring),
589 tx_ring->desc_head, tx_ring->phys_addr);
590 tx_ring->desc_head = NULL;
591 }
592
593 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
594 rds_ring = &recv_ctx->rds_rings[ring];
595
596 if (rds_ring->desc_head != NULL) {
597 dma_free_coherent(&adapter->pdev->dev,
598 RCV_DESC_RINGSIZE(rds_ring),
599 rds_ring->desc_head,
600 rds_ring->phys_addr);
601 rds_ring->desc_head = NULL;
602 }
603 }
604
605 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
606 sds_ring = &recv_ctx->sds_rings[ring];
607
608 if (sds_ring->desc_head != NULL) {
609 dma_free_coherent(&adapter->pdev->dev,
610 STATUS_DESC_RINGSIZE(sds_ring),
611 sds_ring->desc_head,
612 sds_ring->phys_addr);
613 sds_ring->desc_head = NULL;
614 }
615 }
616}
617
618
619/* Get MAC address of a NIC partition */
620int qlcnic_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
621{
622 int err;
623 u32 arg1;
624
625 arg1 = adapter->ahw->pci_func | BIT_8;
626 err = qlcnic_issue_cmd(adapter,
627 adapter->ahw->pci_func,
628 adapter->fw_hal_version,
629 arg1,
630 0,
631 0,
632 QLCNIC_CDRP_CMD_MAC_ADDRESS);
633
634 if (err == QLCNIC_RCODE_SUCCESS)
635 qlcnic_fetch_mac(adapter, QLCNIC_ARG1_CRB_OFFSET,
636 QLCNIC_ARG2_CRB_OFFSET, 0, mac);
637 else {
638 dev_err(&adapter->pdev->dev,
639 "Failed to get mac address%d\n", err);
640 err = -EIO;
641 }
642
643 return err;
644}
645
646/* Get info of a NIC partition */
647int qlcnic_get_nic_info(struct qlcnic_adapter *adapter,
648 struct qlcnic_info *npar_info, u8 func_id)
649{
650 int err;
651 dma_addr_t nic_dma_t;
652 struct qlcnic_info *nic_info;
653 void *nic_info_addr;
654 size_t nic_size = sizeof(struct qlcnic_info);
655
656 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
657 &nic_dma_t, GFP_KERNEL);
658 if (!nic_info_addr)
659 return -ENOMEM;
660 memset(nic_info_addr, 0, nic_size);
661
662 nic_info = nic_info_addr;
663 err = qlcnic_issue_cmd(adapter,
664 adapter->ahw->pci_func,
665 adapter->fw_hal_version,
666 MSD(nic_dma_t),
667 LSD(nic_dma_t),
668 (func_id << 16 | nic_size),
669 QLCNIC_CDRP_CMD_GET_NIC_INFO);
670
671 if (err == QLCNIC_RCODE_SUCCESS) {
672 npar_info->pci_func = le16_to_cpu(nic_info->pci_func);
673 npar_info->op_mode = le16_to_cpu(nic_info->op_mode);
674 npar_info->phys_port = le16_to_cpu(nic_info->phys_port);
675 npar_info->switch_mode = le16_to_cpu(nic_info->switch_mode);
676 npar_info->max_tx_ques = le16_to_cpu(nic_info->max_tx_ques);
677 npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques);
678 npar_info->min_tx_bw = le16_to_cpu(nic_info->min_tx_bw);
679 npar_info->max_tx_bw = le16_to_cpu(nic_info->max_tx_bw);
680 npar_info->capabilities = le32_to_cpu(nic_info->capabilities);
681 npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu);
682
683 dev_info(&adapter->pdev->dev,
684 "phy port: %d switch_mode: %d,\n"
685 "\tmax_tx_q: %d max_rx_q: %d min_tx_bw: 0x%x,\n"
686 "\tmax_tx_bw: 0x%x max_mtu:0x%x, capabilities: 0x%x\n",
687 npar_info->phys_port, npar_info->switch_mode,
688 npar_info->max_tx_ques, npar_info->max_rx_ques,
689 npar_info->min_tx_bw, npar_info->max_tx_bw,
690 npar_info->max_mtu, npar_info->capabilities);
691 } else {
692 dev_err(&adapter->pdev->dev,
693 "Failed to get nic info%d\n", err);
694 err = -EIO;
695 }
696
697 dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr,
698 nic_dma_t);
699 return err;
700}
701
702/* Configure a NIC partition */
703int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic)
704{
705 int err = -EIO;
706 dma_addr_t nic_dma_t;
707 void *nic_info_addr;
708 struct qlcnic_info *nic_info;
709 size_t nic_size = sizeof(struct qlcnic_info);
710
711 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
712 return err;
713
714 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
715 &nic_dma_t, GFP_KERNEL);
716 if (!nic_info_addr)
717 return -ENOMEM;
718
719 memset(nic_info_addr, 0, nic_size);
720 nic_info = nic_info_addr;
721
722 nic_info->pci_func = cpu_to_le16(nic->pci_func);
723 nic_info->op_mode = cpu_to_le16(nic->op_mode);
724 nic_info->phys_port = cpu_to_le16(nic->phys_port);
725 nic_info->switch_mode = cpu_to_le16(nic->switch_mode);
726 nic_info->capabilities = cpu_to_le32(nic->capabilities);
727 nic_info->max_mac_filters = nic->max_mac_filters;
728 nic_info->max_tx_ques = cpu_to_le16(nic->max_tx_ques);
729 nic_info->max_rx_ques = cpu_to_le16(nic->max_rx_ques);
730 nic_info->min_tx_bw = cpu_to_le16(nic->min_tx_bw);
731 nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw);
732
733 err = qlcnic_issue_cmd(adapter,
734 adapter->ahw->pci_func,
735 adapter->fw_hal_version,
736 MSD(nic_dma_t),
737 LSD(nic_dma_t),
738 ((nic->pci_func << 16) | nic_size),
739 QLCNIC_CDRP_CMD_SET_NIC_INFO);
740
741 if (err != QLCNIC_RCODE_SUCCESS) {
742 dev_err(&adapter->pdev->dev,
743 "Failed to set nic info%d\n", err);
744 err = -EIO;
745 }
746
747 dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr,
748 nic_dma_t);
749 return err;
750}
751
752/* Get PCI Info of a partition */
753int qlcnic_get_pci_info(struct qlcnic_adapter *adapter,
754 struct qlcnic_pci_info *pci_info)
755{
756 int err = 0, i;
757 dma_addr_t pci_info_dma_t;
758 struct qlcnic_pci_info *npar;
759 void *pci_info_addr;
760 size_t npar_size = sizeof(struct qlcnic_pci_info);
761 size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC;
762
763 pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size,
764 &pci_info_dma_t, GFP_KERNEL);
765 if (!pci_info_addr)
766 return -ENOMEM;
767 memset(pci_info_addr, 0, pci_size);
768
769 npar = pci_info_addr;
770 err = qlcnic_issue_cmd(adapter,
771 adapter->ahw->pci_func,
772 adapter->fw_hal_version,
773 MSD(pci_info_dma_t),
774 LSD(pci_info_dma_t),
775 pci_size,
776 QLCNIC_CDRP_CMD_GET_PCI_INFO);
777
778 if (err == QLCNIC_RCODE_SUCCESS) {
779 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++, npar++, pci_info++) {
780 pci_info->id = le16_to_cpu(npar->id);
781 pci_info->active = le16_to_cpu(npar->active);
782 pci_info->type = le16_to_cpu(npar->type);
783 pci_info->default_port =
784 le16_to_cpu(npar->default_port);
785 pci_info->tx_min_bw =
786 le16_to_cpu(npar->tx_min_bw);
787 pci_info->tx_max_bw =
788 le16_to_cpu(npar->tx_max_bw);
789 memcpy(pci_info->mac, npar->mac, ETH_ALEN);
790 }
791 } else {
792 dev_err(&adapter->pdev->dev,
793 "Failed to get PCI Info%d\n", err);
794 err = -EIO;
795 }
796
797 dma_free_coherent(&adapter->pdev->dev, pci_size, pci_info_addr,
798 pci_info_dma_t);
799 return err;
800}
801
802/* Configure eSwitch for port mirroring */
803int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
804 u8 enable_mirroring, u8 pci_func)
805{
806 int err = -EIO;
807 u32 arg1;
808
809 if (adapter->op_mode != QLCNIC_MGMT_FUNC ||
810 !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE))
811 return err;
812
813 arg1 = id | (enable_mirroring ? BIT_4 : 0);
814 arg1 |= pci_func << 8;
815
816 err = qlcnic_issue_cmd(adapter,
817 adapter->ahw->pci_func,
818 adapter->fw_hal_version,
819 arg1,
820 0,
821 0,
822 QLCNIC_CDRP_CMD_SET_PORTMIRRORING);
823
824 if (err != QLCNIC_RCODE_SUCCESS) {
825 dev_err(&adapter->pdev->dev,
826 "Failed to configure port mirroring%d on eswitch:%d\n",
827 pci_func, id);
828 } else {
829 dev_info(&adapter->pdev->dev,
830 "Configured eSwitch %d for port mirroring:%d\n",
831 id, pci_func);
832 }
833
834 return err;
835}
836
837int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
838 const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) {
839
840 size_t stats_size = sizeof(struct __qlcnic_esw_statistics);
841 struct __qlcnic_esw_statistics *stats;
842 dma_addr_t stats_dma_t;
843 void *stats_addr;
844 u32 arg1;
845 int err;
846
847 if (esw_stats == NULL)
848 return -ENOMEM;
849
850 if (adapter->op_mode != QLCNIC_MGMT_FUNC &&
851 func != adapter->ahw->pci_func) {
852 dev_err(&adapter->pdev->dev,
853 "Not privilege to query stats for func=%d", func);
854 return -EIO;
855 }
856
857 stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
858 &stats_dma_t, GFP_KERNEL);
859 if (!stats_addr) {
860 dev_err(&adapter->pdev->dev, "Unable to allocate memory\n");
861 return -ENOMEM;
862 }
863 memset(stats_addr, 0, stats_size);
864
865 arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12;
866 arg1 |= rx_tx << 15 | stats_size << 16;
867
868 err = qlcnic_issue_cmd(adapter,
869 adapter->ahw->pci_func,
870 adapter->fw_hal_version,
871 arg1,
872 MSD(stats_dma_t),
873 LSD(stats_dma_t),
874 QLCNIC_CDRP_CMD_GET_ESWITCH_STATS);
875
876 if (!err) {
877 stats = stats_addr;
878 esw_stats->context_id = le16_to_cpu(stats->context_id);
879 esw_stats->version = le16_to_cpu(stats->version);
880 esw_stats->size = le16_to_cpu(stats->size);
881 esw_stats->multicast_frames =
882 le64_to_cpu(stats->multicast_frames);
883 esw_stats->broadcast_frames =
884 le64_to_cpu(stats->broadcast_frames);
885 esw_stats->unicast_frames = le64_to_cpu(stats->unicast_frames);
886 esw_stats->dropped_frames = le64_to_cpu(stats->dropped_frames);
887 esw_stats->local_frames = le64_to_cpu(stats->local_frames);
888 esw_stats->errors = le64_to_cpu(stats->errors);
889 esw_stats->numbytes = le64_to_cpu(stats->numbytes);
890 }
891
892 dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
893 stats_dma_t);
894 return err;
895}
896
897int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch,
898 const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) {
899
900 struct __qlcnic_esw_statistics port_stats;
901 u8 i;
902 int ret = -EIO;
903
904 if (esw_stats == NULL)
905 return -ENOMEM;
906 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
907 return -EIO;
908 if (adapter->npars == NULL)
909 return -EIO;
910
911 memset(esw_stats, 0, sizeof(u64));
912 esw_stats->unicast_frames = QLCNIC_ESW_STATS_NOT_AVAIL;
913 esw_stats->multicast_frames = QLCNIC_ESW_STATS_NOT_AVAIL;
914 esw_stats->broadcast_frames = QLCNIC_ESW_STATS_NOT_AVAIL;
915 esw_stats->dropped_frames = QLCNIC_ESW_STATS_NOT_AVAIL;
916 esw_stats->errors = QLCNIC_ESW_STATS_NOT_AVAIL;
917 esw_stats->local_frames = QLCNIC_ESW_STATS_NOT_AVAIL;
918 esw_stats->numbytes = QLCNIC_ESW_STATS_NOT_AVAIL;
919 esw_stats->context_id = eswitch;
920
921 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
922 if (adapter->npars[i].phy_port != eswitch)
923 continue;
924
925 memset(&port_stats, 0, sizeof(struct __qlcnic_esw_statistics));
926 if (qlcnic_get_port_stats(adapter, i, rx_tx, &port_stats))
927 continue;
928
929 esw_stats->size = port_stats.size;
930 esw_stats->version = port_stats.version;
931 QLCNIC_ADD_ESW_STATS(esw_stats->unicast_frames,
932 port_stats.unicast_frames);
933 QLCNIC_ADD_ESW_STATS(esw_stats->multicast_frames,
934 port_stats.multicast_frames);
935 QLCNIC_ADD_ESW_STATS(esw_stats->broadcast_frames,
936 port_stats.broadcast_frames);
937 QLCNIC_ADD_ESW_STATS(esw_stats->dropped_frames,
938 port_stats.dropped_frames);
939 QLCNIC_ADD_ESW_STATS(esw_stats->errors,
940 port_stats.errors);
941 QLCNIC_ADD_ESW_STATS(esw_stats->local_frames,
942 port_stats.local_frames);
943 QLCNIC_ADD_ESW_STATS(esw_stats->numbytes,
944 port_stats.numbytes);
945 ret = 0;
946 }
947 return ret;
948}
949
950int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw,
951 const u8 port, const u8 rx_tx)
952{
953
954 u32 arg1;
955
956 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
957 return -EIO;
958
959 if (func_esw == QLCNIC_STATS_PORT) {
960 if (port >= QLCNIC_MAX_PCI_FUNC)
961 goto err_ret;
962 } else if (func_esw == QLCNIC_STATS_ESWITCH) {
963 if (port >= QLCNIC_NIU_MAX_XG_PORTS)
964 goto err_ret;
965 } else {
966 goto err_ret;
967 }
968
969 if (rx_tx > QLCNIC_QUERY_TX_COUNTER)
970 goto err_ret;
971
972 arg1 = port | QLCNIC_STATS_VERSION << 8 | func_esw << 12;
973 arg1 |= BIT_14 | rx_tx << 15;
974
975 return qlcnic_issue_cmd(adapter,
976 adapter->ahw->pci_func,
977 adapter->fw_hal_version,
978 arg1,
979 0,
980 0,
981 QLCNIC_CDRP_CMD_GET_ESWITCH_STATS);
982
983err_ret:
984 dev_err(&adapter->pdev->dev, "Invalid argument func_esw=%d port=%d"
985 "rx_ctx=%d\n", func_esw, port, rx_tx);
986 return -EIO;
987}
988
989static int
990__qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
991 u32 *arg1, u32 *arg2)
992{
993 int err = -EIO;
994 u8 pci_func;
995 pci_func = (*arg1 >> 8);
996 err = qlcnic_issue_cmd(adapter,
997 adapter->ahw->pci_func,
998 adapter->fw_hal_version,
999 *arg1,
1000 0,
1001 0,
1002 QLCNIC_CDRP_CMD_GET_ESWITCH_PORT_CONFIG);
1003
1004 if (err == QLCNIC_RCODE_SUCCESS) {
1005 *arg1 = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
1006 *arg2 = QLCRD32(adapter, QLCNIC_ARG2_CRB_OFFSET);
1007 dev_info(&adapter->pdev->dev,
1008 "eSwitch port config for pci func %d\n", pci_func);
1009 } else {
1010 dev_err(&adapter->pdev->dev,
1011 "Failed to get eswitch port config for pci func %d\n",
1012 pci_func);
1013 }
1014 return err;
1015}
1016/* Configure eSwitch port
1017op_mode = 0 for setting default port behavior
1018op_mode = 1 for setting vlan id
1019op_mode = 2 for deleting vlan id
1020op_type = 0 for vlan_id
1021op_type = 1 for port vlan_id
1022*/
1023int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
1024 struct qlcnic_esw_func_cfg *esw_cfg)
1025{
1026 int err = -EIO;
1027 u32 arg1, arg2 = 0;
1028 u8 pci_func;
1029
1030 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
1031 return err;
1032 pci_func = esw_cfg->pci_func;
1033 arg1 = (adapter->npars[pci_func].phy_port & BIT_0);
1034 arg1 |= (pci_func << 8);
1035
1036 if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2))
1037 return err;
1038 arg1 &= ~(0x0ff << 8);
1039 arg1 |= (pci_func << 8);
1040 arg1 &= ~(BIT_2 | BIT_3);
1041 switch (esw_cfg->op_mode) {
1042 case QLCNIC_PORT_DEFAULTS:
1043 arg1 |= (BIT_4 | BIT_6 | BIT_7);
1044 arg2 |= (BIT_0 | BIT_1);
1045 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
1046 arg2 |= (BIT_2 | BIT_3);
1047 if (!(esw_cfg->discard_tagged))
1048 arg1 &= ~BIT_4;
1049 if (!(esw_cfg->promisc_mode))
1050 arg1 &= ~BIT_6;
1051 if (!(esw_cfg->mac_override))
1052 arg1 &= ~BIT_7;
1053 if (!(esw_cfg->mac_anti_spoof))
1054 arg2 &= ~BIT_0;
1055 if (!(esw_cfg->offload_flags & BIT_0))
1056 arg2 &= ~(BIT_1 | BIT_2 | BIT_3);
1057 if (!(esw_cfg->offload_flags & BIT_1))
1058 arg2 &= ~BIT_2;
1059 if (!(esw_cfg->offload_flags & BIT_2))
1060 arg2 &= ~BIT_3;
1061 break;
1062 case QLCNIC_ADD_VLAN:
1063 arg1 |= (BIT_2 | BIT_5);
1064 arg1 |= (esw_cfg->vlan_id << 16);
1065 break;
1066 case QLCNIC_DEL_VLAN:
1067 arg1 |= (BIT_3 | BIT_5);
1068 arg1 &= ~(0x0ffff << 16);
1069 break;
1070 default:
1071 return err;
1072 }
1073
1074 err = qlcnic_issue_cmd(adapter,
1075 adapter->ahw->pci_func,
1076 adapter->fw_hal_version,
1077 arg1,
1078 arg2,
1079 0,
1080 QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH);
1081
1082 if (err != QLCNIC_RCODE_SUCCESS) {
1083 dev_err(&adapter->pdev->dev,
1084 "Failed to configure eswitch pci func %d\n", pci_func);
1085 } else {
1086 dev_info(&adapter->pdev->dev,
1087 "Configured eSwitch for pci func %d\n", pci_func);
1088 }
1089
1090 return err;
1091}
1092
1093int
1094qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
1095 struct qlcnic_esw_func_cfg *esw_cfg)
1096{
1097 u32 arg1, arg2;
1098 u8 phy_port;
1099 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
1100 phy_port = adapter->npars[esw_cfg->pci_func].phy_port;
1101 else
1102 phy_port = adapter->physical_port;
1103 arg1 = phy_port;
1104 arg1 |= (esw_cfg->pci_func << 8);
1105 if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2))
1106 return -EIO;
1107
1108 esw_cfg->discard_tagged = !!(arg1 & BIT_4);
1109 esw_cfg->host_vlan_tag = !!(arg1 & BIT_5);
1110 esw_cfg->promisc_mode = !!(arg1 & BIT_6);
1111 esw_cfg->mac_override = !!(arg1 & BIT_7);
1112 esw_cfg->vlan_id = LSW(arg1 >> 16);
1113 esw_cfg->mac_anti_spoof = (arg2 & 0x1);
1114 esw_cfg->offload_flags = ((arg2 >> 1) & 0x7);
1115
1116 return 0;
1117}
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
new file mode 100644
index 00000000000..72a723d5c98
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -0,0 +1,1222 @@
1/*
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2010 QLogic Corporation
4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
6 */
7
8#include <linux/types.h>
9#include <linux/delay.h>
10#include <linux/pci.h>
11#include <linux/io.h>
12#include <linux/netdevice.h>
13#include <linux/ethtool.h>
14
15#include "qlcnic.h"
16
17struct qlcnic_stats {
18 char stat_string[ETH_GSTRING_LEN];
19 int sizeof_stat;
20 int stat_offset;
21};
22
23#define QLC_SIZEOF(m) FIELD_SIZEOF(struct qlcnic_adapter, m)
24#define QLC_OFF(m) offsetof(struct qlcnic_adapter, m)
25
26static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
27 {"xmit_called",
28 QLC_SIZEOF(stats.xmitcalled), QLC_OFF(stats.xmitcalled)},
29 {"xmit_finished",
30 QLC_SIZEOF(stats.xmitfinished), QLC_OFF(stats.xmitfinished)},
31 {"rx_dropped",
32 QLC_SIZEOF(stats.rxdropped), QLC_OFF(stats.rxdropped)},
33 {"tx_dropped",
34 QLC_SIZEOF(stats.txdropped), QLC_OFF(stats.txdropped)},
35 {"csummed",
36 QLC_SIZEOF(stats.csummed), QLC_OFF(stats.csummed)},
37 {"rx_pkts",
38 QLC_SIZEOF(stats.rx_pkts), QLC_OFF(stats.rx_pkts)},
39 {"lro_pkts",
40 QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)},
41 {"rx_bytes",
42 QLC_SIZEOF(stats.rxbytes), QLC_OFF(stats.rxbytes)},
43 {"tx_bytes",
44 QLC_SIZEOF(stats.txbytes), QLC_OFF(stats.txbytes)},
45 {"lrobytes",
46 QLC_SIZEOF(stats.lrobytes), QLC_OFF(stats.lrobytes)},
47 {"lso_frames",
48 QLC_SIZEOF(stats.lso_frames), QLC_OFF(stats.lso_frames)},
49 {"xmit_on",
50 QLC_SIZEOF(stats.xmit_on), QLC_OFF(stats.xmit_on)},
51 {"xmit_off",
52 QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)},
53 {"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure),
54 QLC_OFF(stats.skb_alloc_failure)},
55 {"null rxbuf",
56 QLC_SIZEOF(stats.null_rxbuf), QLC_OFF(stats.null_rxbuf)},
57 {"rx dma map error", QLC_SIZEOF(stats.rx_dma_map_error),
58 QLC_OFF(stats.rx_dma_map_error)},
59 {"tx dma map error", QLC_SIZEOF(stats.tx_dma_map_error),
60 QLC_OFF(stats.tx_dma_map_error)},
61
62};
63
64static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = {
65 "rx unicast frames",
66 "rx multicast frames",
67 "rx broadcast frames",
68 "rx dropped frames",
69 "rx errors",
70 "rx local frames",
71 "rx numbytes",
72 "tx unicast frames",
73 "tx multicast frames",
74 "tx broadcast frames",
75 "tx dropped frames",
76 "tx errors",
77 "tx local frames",
78 "tx numbytes",
79};
80
81#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats)
82#define QLCNIC_DEVICE_STATS_LEN ARRAY_SIZE(qlcnic_device_gstrings_stats)
83
84static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
85 "Register_Test_on_offline",
86 "Link_Test_on_offline",
87 "Interrupt_Test_offline",
88 "Internal_Loopback_offline",
89 "External_Loopback_offline"
90};
91
92#define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test)
93
94#define QLCNIC_RING_REGS_COUNT 20
95#define QLCNIC_RING_REGS_LEN (QLCNIC_RING_REGS_COUNT * sizeof(u32))
96#define QLCNIC_MAX_EEPROM_LEN 1024
97
98static const u32 diag_registers[] = {
99 CRB_CMDPEG_STATE,
100 CRB_RCVPEG_STATE,
101 CRB_XG_STATE_P3P,
102 CRB_FW_CAPABILITIES_1,
103 ISR_INT_STATE_REG,
104 QLCNIC_CRB_DRV_ACTIVE,
105 QLCNIC_CRB_DEV_STATE,
106 QLCNIC_CRB_DRV_STATE,
107 QLCNIC_CRB_DRV_SCRATCH,
108 QLCNIC_CRB_DEV_PARTITION_INFO,
109 QLCNIC_CRB_DRV_IDC_VER,
110 QLCNIC_PEG_ALIVE_COUNTER,
111 QLCNIC_PEG_HALT_STATUS1,
112 QLCNIC_PEG_HALT_STATUS2,
113 QLCNIC_CRB_PEG_NET_0+0x3c,
114 QLCNIC_CRB_PEG_NET_1+0x3c,
115 QLCNIC_CRB_PEG_NET_2+0x3c,
116 QLCNIC_CRB_PEG_NET_4+0x3c,
117 -1
118};
119
120#define QLCNIC_MGMT_API_VERSION 2
121#define QLCNIC_DEV_INFO_SIZE 1
122#define QLCNIC_ETHTOOL_REGS_VER 2
123static int qlcnic_get_regs_len(struct net_device *dev)
124{
125 return sizeof(diag_registers) + QLCNIC_RING_REGS_LEN +
126 QLCNIC_DEV_INFO_SIZE + 1;
127}
128
129static int qlcnic_get_eeprom_len(struct net_device *dev)
130{
131 return QLCNIC_FLASH_TOTAL_SIZE;
132}
133
134static void
135qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
136{
137 struct qlcnic_adapter *adapter = netdev_priv(dev);
138 u32 fw_major, fw_minor, fw_build;
139
140 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
141 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
142 fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
143 sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build);
144
145 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
146 strlcpy(drvinfo->driver, qlcnic_driver_name, 32);
147 strlcpy(drvinfo->version, QLCNIC_LINUX_VERSIONID, 32);
148}
149
150static int
151qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
152{
153 struct qlcnic_adapter *adapter = netdev_priv(dev);
154 int check_sfp_module = 0;
155 u16 pcifn = adapter->ahw->pci_func;
156
157 /* read which mode */
158 if (adapter->ahw->port_type == QLCNIC_GBE) {
159 ecmd->supported = (SUPPORTED_10baseT_Half |
160 SUPPORTED_10baseT_Full |
161 SUPPORTED_100baseT_Half |
162 SUPPORTED_100baseT_Full |
163 SUPPORTED_1000baseT_Half |
164 SUPPORTED_1000baseT_Full);
165
166 ecmd->advertising = (ADVERTISED_100baseT_Half |
167 ADVERTISED_100baseT_Full |
168 ADVERTISED_1000baseT_Half |
169 ADVERTISED_1000baseT_Full);
170
171 ethtool_cmd_speed_set(ecmd, adapter->link_speed);
172 ecmd->duplex = adapter->link_duplex;
173 ecmd->autoneg = adapter->link_autoneg;
174
175 } else if (adapter->ahw->port_type == QLCNIC_XGBE) {
176 u32 val;
177
178 val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR);
179 if (val == QLCNIC_PORT_MODE_802_3_AP) {
180 ecmd->supported = SUPPORTED_1000baseT_Full;
181 ecmd->advertising = ADVERTISED_1000baseT_Full;
182 } else {
183 ecmd->supported = SUPPORTED_10000baseT_Full;
184 ecmd->advertising = ADVERTISED_10000baseT_Full;
185 }
186
187 if (netif_running(dev) && adapter->has_link_events) {
188 ethtool_cmd_speed_set(ecmd, adapter->link_speed);
189 ecmd->autoneg = adapter->link_autoneg;
190 ecmd->duplex = adapter->link_duplex;
191 goto skip;
192 }
193
194 val = QLCRD32(adapter, P3P_LINK_SPEED_REG(pcifn));
195 ethtool_cmd_speed_set(ecmd, P3P_LINK_SPEED_MHZ *
196 P3P_LINK_SPEED_VAL(pcifn, val));
197 ecmd->duplex = DUPLEX_FULL;
198 ecmd->autoneg = AUTONEG_DISABLE;
199 } else
200 return -EIO;
201
202skip:
203 ecmd->phy_address = adapter->physical_port;
204 ecmd->transceiver = XCVR_EXTERNAL;
205
206 switch (adapter->ahw->board_type) {
207 case QLCNIC_BRDTYPE_P3P_REF_QG:
208 case QLCNIC_BRDTYPE_P3P_4_GB:
209 case QLCNIC_BRDTYPE_P3P_4_GB_MM:
210
211 ecmd->supported |= SUPPORTED_Autoneg;
212 ecmd->advertising |= ADVERTISED_Autoneg;
213 case QLCNIC_BRDTYPE_P3P_10G_CX4:
214 case QLCNIC_BRDTYPE_P3P_10G_CX4_LP:
215 case QLCNIC_BRDTYPE_P3P_10000_BASE_T:
216 ecmd->supported |= SUPPORTED_TP;
217 ecmd->advertising |= ADVERTISED_TP;
218 ecmd->port = PORT_TP;
219 ecmd->autoneg = adapter->link_autoneg;
220 break;
221 case QLCNIC_BRDTYPE_P3P_IMEZ:
222 case QLCNIC_BRDTYPE_P3P_XG_LOM:
223 case QLCNIC_BRDTYPE_P3P_HMEZ:
224 ecmd->supported |= SUPPORTED_MII;
225 ecmd->advertising |= ADVERTISED_MII;
226 ecmd->port = PORT_MII;
227 ecmd->autoneg = AUTONEG_DISABLE;
228 break;
229 case QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS:
230 case QLCNIC_BRDTYPE_P3P_10G_SFP_CT:
231 case QLCNIC_BRDTYPE_P3P_10G_SFP_QT:
232 ecmd->advertising |= ADVERTISED_TP;
233 ecmd->supported |= SUPPORTED_TP;
234 check_sfp_module = netif_running(dev) &&
235 adapter->has_link_events;
236 case QLCNIC_BRDTYPE_P3P_10G_XFP:
237 ecmd->supported |= SUPPORTED_FIBRE;
238 ecmd->advertising |= ADVERTISED_FIBRE;
239 ecmd->port = PORT_FIBRE;
240 ecmd->autoneg = AUTONEG_DISABLE;
241 break;
242 case QLCNIC_BRDTYPE_P3P_10G_TP:
243 if (adapter->ahw->port_type == QLCNIC_XGBE) {
244 ecmd->autoneg = AUTONEG_DISABLE;
245 ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP);
246 ecmd->advertising |=
247 (ADVERTISED_FIBRE | ADVERTISED_TP);
248 ecmd->port = PORT_FIBRE;
249 check_sfp_module = netif_running(dev) &&
250 adapter->has_link_events;
251 } else {
252 ecmd->autoneg = AUTONEG_ENABLE;
253 ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
254 ecmd->advertising |=
255 (ADVERTISED_TP | ADVERTISED_Autoneg);
256 ecmd->port = PORT_TP;
257 }
258 break;
259 default:
260 dev_err(&adapter->pdev->dev, "Unsupported board model %d\n",
261 adapter->ahw->board_type);
262 return -EIO;
263 }
264
265 if (check_sfp_module) {
266 switch (adapter->module_type) {
267 case LINKEVENT_MODULE_OPTICAL_UNKNOWN:
268 case LINKEVENT_MODULE_OPTICAL_SRLR:
269 case LINKEVENT_MODULE_OPTICAL_LRM:
270 case LINKEVENT_MODULE_OPTICAL_SFP_1G:
271 ecmd->port = PORT_FIBRE;
272 break;
273 case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE:
274 case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN:
275 case LINKEVENT_MODULE_TWINAX:
276 ecmd->port = PORT_TP;
277 break;
278 default:
279 ecmd->port = PORT_OTHER;
280 }
281 }
282
283 return 0;
284}
285
286static int
287qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
288{
289 u32 config = 0;
290 u32 ret = 0;
291 struct qlcnic_adapter *adapter = netdev_priv(dev);
292
293 if (adapter->ahw->port_type != QLCNIC_GBE)
294 return -EOPNOTSUPP;
295
296 /* read which mode */
297 if (ecmd->duplex)
298 config |= 0x1;
299
300 if (ecmd->autoneg)
301 config |= 0x2;
302
303 switch (ethtool_cmd_speed(ecmd)) {
304 case SPEED_10:
305 config |= (0 << 8);
306 break;
307 case SPEED_100:
308 config |= (1 << 8);
309 break;
310 case SPEED_1000:
311 config |= (10 << 8);
312 break;
313 default:
314 return -EIO;
315 }
316
317 ret = qlcnic_fw_cmd_set_port(adapter, config);
318
319 if (ret == QLCNIC_RCODE_NOT_SUPPORTED)
320 return -EOPNOTSUPP;
321 else if (ret)
322 return -EIO;
323
324 adapter->link_speed = ethtool_cmd_speed(ecmd);
325 adapter->link_duplex = ecmd->duplex;
326 adapter->link_autoneg = ecmd->autoneg;
327
328 if (!netif_running(dev))
329 return 0;
330
331 dev->netdev_ops->ndo_stop(dev);
332 return dev->netdev_ops->ndo_open(dev);
333}
334
335static void
336qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
337{
338 struct qlcnic_adapter *adapter = netdev_priv(dev);
339 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
340 struct qlcnic_host_sds_ring *sds_ring;
341 u32 *regs_buff = p;
342 int ring, i = 0, j = 0;
343
344 memset(p, 0, qlcnic_get_regs_len(dev));
345 regs->version = (QLCNIC_ETHTOOL_REGS_VER << 24) |
346 (adapter->ahw->revision_id << 16) | (adapter->pdev)->device;
347
348 regs_buff[0] = (0xcafe0000 | (QLCNIC_DEV_INFO_SIZE & 0xffff));
349 regs_buff[1] = QLCNIC_MGMT_API_VERSION;
350
351 for (i = QLCNIC_DEV_INFO_SIZE + 1; diag_registers[j] != -1; j++, i++)
352 regs_buff[i] = QLCRD32(adapter, diag_registers[j]);
353
354 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
355 return;
356
357 regs_buff[i++] = 0xFFEFCDAB; /* Marker btw regs and ring count*/
358
359 regs_buff[i++] = 1; /* No. of tx ring */
360 regs_buff[i++] = le32_to_cpu(*(adapter->tx_ring->hw_consumer));
361 regs_buff[i++] = readl(adapter->tx_ring->crb_cmd_producer);
362
363 regs_buff[i++] = 2; /* No. of rx ring */
364 regs_buff[i++] = readl(recv_ctx->rds_rings[0].crb_rcv_producer);
365 regs_buff[i++] = readl(recv_ctx->rds_rings[1].crb_rcv_producer);
366
367 regs_buff[i++] = adapter->max_sds_rings;
368
369 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
370 sds_ring = &(recv_ctx->sds_rings[ring]);
371 regs_buff[i++] = readl(sds_ring->crb_sts_consumer);
372 }
373}
374
375static u32 qlcnic_test_link(struct net_device *dev)
376{
377 struct qlcnic_adapter *adapter = netdev_priv(dev);
378 u32 val;
379
380 val = QLCRD32(adapter, CRB_XG_STATE_P3P);
381 val = XG_LINK_STATE_P3P(adapter->ahw->pci_func, val);
382 return (val == XG_LINK_UP_P3P) ? 0 : 1;
383}
384
385static int
386qlcnic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
387 u8 *bytes)
388{
389 struct qlcnic_adapter *adapter = netdev_priv(dev);
390 int offset;
391 int ret;
392
393 if (eeprom->len == 0)
394 return -EINVAL;
395
396 eeprom->magic = (adapter->pdev)->vendor |
397 ((adapter->pdev)->device << 16);
398 offset = eeprom->offset;
399
400 ret = qlcnic_rom_fast_read_words(adapter, offset, bytes,
401 eeprom->len);
402 if (ret < 0)
403 return ret;
404
405 return 0;
406}
407
408static void
409qlcnic_get_ringparam(struct net_device *dev,
410 struct ethtool_ringparam *ring)
411{
412 struct qlcnic_adapter *adapter = netdev_priv(dev);
413
414 ring->rx_pending = adapter->num_rxd;
415 ring->rx_jumbo_pending = adapter->num_jumbo_rxd;
416 ring->tx_pending = adapter->num_txd;
417
418 ring->rx_max_pending = adapter->max_rxd;
419 ring->rx_jumbo_max_pending = adapter->max_jumbo_rxd;
420 ring->tx_max_pending = MAX_CMD_DESCRIPTORS;
421
422 ring->rx_mini_max_pending = 0;
423 ring->rx_mini_pending = 0;
424}
425
426static u32
427qlcnic_validate_ringparam(u32 val, u32 min, u32 max, char *r_name)
428{
429 u32 num_desc;
430 num_desc = max(val, min);
431 num_desc = min(num_desc, max);
432 num_desc = roundup_pow_of_two(num_desc);
433
434 if (val != num_desc) {
435 printk(KERN_INFO "%s: setting %s ring size %d instead of %d\n",
436 qlcnic_driver_name, r_name, num_desc, val);
437 }
438
439 return num_desc;
440}
441
442static int
443qlcnic_set_ringparam(struct net_device *dev,
444 struct ethtool_ringparam *ring)
445{
446 struct qlcnic_adapter *adapter = netdev_priv(dev);
447 u16 num_rxd, num_jumbo_rxd, num_txd;
448
449 if (ring->rx_mini_pending)
450 return -EOPNOTSUPP;
451
452 num_rxd = qlcnic_validate_ringparam(ring->rx_pending,
453 MIN_RCV_DESCRIPTORS, adapter->max_rxd, "rx");
454
455 num_jumbo_rxd = qlcnic_validate_ringparam(ring->rx_jumbo_pending,
456 MIN_JUMBO_DESCRIPTORS, adapter->max_jumbo_rxd,
457 "rx jumbo");
458
459 num_txd = qlcnic_validate_ringparam(ring->tx_pending,
460 MIN_CMD_DESCRIPTORS, MAX_CMD_DESCRIPTORS, "tx");
461
462 if (num_rxd == adapter->num_rxd && num_txd == adapter->num_txd &&
463 num_jumbo_rxd == adapter->num_jumbo_rxd)
464 return 0;
465
466 adapter->num_rxd = num_rxd;
467 adapter->num_jumbo_rxd = num_jumbo_rxd;
468 adapter->num_txd = num_txd;
469
470 return qlcnic_reset_context(adapter);
471}
472
473static void qlcnic_get_channels(struct net_device *dev,
474 struct ethtool_channels *channel)
475{
476 struct qlcnic_adapter *adapter = netdev_priv(dev);
477
478 channel->max_rx = rounddown_pow_of_two(min_t(int,
479 adapter->max_rx_ques, num_online_cpus()));
480 channel->max_tx = adapter->max_tx_ques;
481
482 channel->rx_count = adapter->max_sds_rings;
483 channel->tx_count = adapter->max_tx_ques;
484}
485
486static int qlcnic_set_channels(struct net_device *dev,
487 struct ethtool_channels *channel)
488{
489 struct qlcnic_adapter *adapter = netdev_priv(dev);
490 int err;
491
492 if (channel->other_count || channel->combined_count ||
493 channel->tx_count != channel->max_tx)
494 return -EINVAL;
495
496 err = qlcnic_validate_max_rss(dev, channel->max_rx, channel->rx_count);
497 if (err)
498 return err;
499
500 err = qlcnic_set_max_rss(adapter, channel->rx_count);
501 netdev_info(dev, "allocated 0x%x sds rings\n",
502 adapter->max_sds_rings);
503 return err;
504}
505
506static void
507qlcnic_get_pauseparam(struct net_device *netdev,
508 struct ethtool_pauseparam *pause)
509{
510 struct qlcnic_adapter *adapter = netdev_priv(netdev);
511 int port = adapter->physical_port;
512 __u32 val;
513
514 if (adapter->ahw->port_type == QLCNIC_GBE) {
515 if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
516 return;
517 /* get flow control settings */
518 val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port));
519 pause->rx_pause = qlcnic_gb_get_rx_flowctl(val);
520 val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL);
521 switch (port) {
522 case 0:
523 pause->tx_pause = !(qlcnic_gb_get_gb0_mask(val));
524 break;
525 case 1:
526 pause->tx_pause = !(qlcnic_gb_get_gb1_mask(val));
527 break;
528 case 2:
529 pause->tx_pause = !(qlcnic_gb_get_gb2_mask(val));
530 break;
531 case 3:
532 default:
533 pause->tx_pause = !(qlcnic_gb_get_gb3_mask(val));
534 break;
535 }
536 } else if (adapter->ahw->port_type == QLCNIC_XGBE) {
537 if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
538 return;
539 pause->rx_pause = 1;
540 val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL);
541 if (port == 0)
542 pause->tx_pause = !(qlcnic_xg_get_xg0_mask(val));
543 else
544 pause->tx_pause = !(qlcnic_xg_get_xg1_mask(val));
545 } else {
546 dev_err(&netdev->dev, "Unknown board type: %x\n",
547 adapter->ahw->port_type);
548 }
549}
550
551static int
552qlcnic_set_pauseparam(struct net_device *netdev,
553 struct ethtool_pauseparam *pause)
554{
555 struct qlcnic_adapter *adapter = netdev_priv(netdev);
556 int port = adapter->physical_port;
557 __u32 val;
558
559 /* read mode */
560 if (adapter->ahw->port_type == QLCNIC_GBE) {
561 if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
562 return -EIO;
563 /* set flow control */
564 val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port));
565
566 if (pause->rx_pause)
567 qlcnic_gb_rx_flowctl(val);
568 else
569 qlcnic_gb_unset_rx_flowctl(val);
570
571 QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port),
572 val);
573 /* set autoneg */
574 val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL);
575 switch (port) {
576 case 0:
577 if (pause->tx_pause)
578 qlcnic_gb_unset_gb0_mask(val);
579 else
580 qlcnic_gb_set_gb0_mask(val);
581 break;
582 case 1:
583 if (pause->tx_pause)
584 qlcnic_gb_unset_gb1_mask(val);
585 else
586 qlcnic_gb_set_gb1_mask(val);
587 break;
588 case 2:
589 if (pause->tx_pause)
590 qlcnic_gb_unset_gb2_mask(val);
591 else
592 qlcnic_gb_set_gb2_mask(val);
593 break;
594 case 3:
595 default:
596 if (pause->tx_pause)
597 qlcnic_gb_unset_gb3_mask(val);
598 else
599 qlcnic_gb_set_gb3_mask(val);
600 break;
601 }
602 QLCWR32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, val);
603 } else if (adapter->ahw->port_type == QLCNIC_XGBE) {
604 if (!pause->rx_pause || pause->autoneg)
605 return -EOPNOTSUPP;
606
607 if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
608 return -EIO;
609
610 val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL);
611 if (port == 0) {
612 if (pause->tx_pause)
613 qlcnic_xg_unset_xg0_mask(val);
614 else
615 qlcnic_xg_set_xg0_mask(val);
616 } else {
617 if (pause->tx_pause)
618 qlcnic_xg_unset_xg1_mask(val);
619 else
620 qlcnic_xg_set_xg1_mask(val);
621 }
622 QLCWR32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, val);
623 } else {
624 dev_err(&netdev->dev, "Unknown board type: %x\n",
625 adapter->ahw->port_type);
626 }
627 return 0;
628}
629
630static int qlcnic_reg_test(struct net_device *dev)
631{
632 struct qlcnic_adapter *adapter = netdev_priv(dev);
633 u32 data_read;
634
635 data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0));
636 if ((data_read & 0xffff) != adapter->pdev->vendor)
637 return 1;
638
639 return 0;
640}
641
642static int qlcnic_get_sset_count(struct net_device *dev, int sset)
643{
644 struct qlcnic_adapter *adapter = netdev_priv(dev);
645 switch (sset) {
646 case ETH_SS_TEST:
647 return QLCNIC_TEST_LEN;
648 case ETH_SS_STATS:
649 if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
650 return QLCNIC_STATS_LEN + QLCNIC_DEVICE_STATS_LEN;
651 return QLCNIC_STATS_LEN;
652 default:
653 return -EOPNOTSUPP;
654 }
655}
656
657static int qlcnic_irq_test(struct net_device *netdev)
658{
659 struct qlcnic_adapter *adapter = netdev_priv(netdev);
660 int max_sds_rings = adapter->max_sds_rings;
661 int ret;
662
663 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
664 return -EIO;
665
666 ret = qlcnic_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST);
667 if (ret)
668 goto clear_it;
669
670 adapter->diag_cnt = 0;
671 ret = qlcnic_issue_cmd(adapter, adapter->ahw->pci_func,
672 adapter->fw_hal_version, adapter->ahw->pci_func,
673 0, 0, 0x00000011);
674 if (ret)
675 goto done;
676
677 msleep(10);
678
679 ret = !adapter->diag_cnt;
680
681done:
682 qlcnic_diag_free_res(netdev, max_sds_rings);
683
684clear_it:
685 adapter->max_sds_rings = max_sds_rings;
686 clear_bit(__QLCNIC_RESETTING, &adapter->state);
687 return ret;
688}
689
690#define QLCNIC_ILB_PKT_SIZE 64
691#define QLCNIC_NUM_ILB_PKT 16
692#define QLCNIC_ILB_MAX_RCV_LOOP 10
693
694static void qlcnic_create_loopback_buff(unsigned char *data, u8 mac[])
695{
696 unsigned char random_data[] = {0xa8, 0x06, 0x45, 0x00};
697
698 memset(data, 0x4e, QLCNIC_ILB_PKT_SIZE);
699
700 memcpy(data, mac, ETH_ALEN);
701 memcpy(data + ETH_ALEN, mac, ETH_ALEN);
702
703 memcpy(data + 2 * ETH_ALEN, random_data, sizeof(random_data));
704}
705
706int qlcnic_check_loopback_buff(unsigned char *data, u8 mac[])
707{
708 unsigned char buff[QLCNIC_ILB_PKT_SIZE];
709 qlcnic_create_loopback_buff(buff, mac);
710 return memcmp(data, buff, QLCNIC_ILB_PKT_SIZE);
711}
712
713static int qlcnic_do_lb_test(struct qlcnic_adapter *adapter)
714{
715 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
716 struct qlcnic_host_sds_ring *sds_ring = &recv_ctx->sds_rings[0];
717 struct sk_buff *skb;
718 int i, loop, cnt = 0;
719
720 for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) {
721 skb = dev_alloc_skb(QLCNIC_ILB_PKT_SIZE);
722 qlcnic_create_loopback_buff(skb->data, adapter->mac_addr);
723 skb_put(skb, QLCNIC_ILB_PKT_SIZE);
724
725 adapter->diag_cnt = 0;
726 qlcnic_xmit_frame(skb, adapter->netdev);
727
728 loop = 0;
729 do {
730 msleep(1);
731 qlcnic_process_rcv_ring_diag(sds_ring);
732 if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP)
733 break;
734 } while (!adapter->diag_cnt);
735
736 dev_kfree_skb_any(skb);
737
738 if (!adapter->diag_cnt)
739 dev_warn(&adapter->pdev->dev, "LB Test: %dth packet"
740 " not recevied\n", i + 1);
741 else
742 cnt++;
743 }
744 if (cnt != i) {
745 dev_warn(&adapter->pdev->dev, "LB Test failed\n");
746 return -1;
747 }
748 return 0;
749}
750
751static int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
752{
753 struct qlcnic_adapter *adapter = netdev_priv(netdev);
754 int max_sds_rings = adapter->max_sds_rings;
755 struct qlcnic_host_sds_ring *sds_ring;
756 int loop = 0;
757 int ret;
758
759 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK)) {
760 netdev_info(netdev, "Firmware is not loopback test capable\n");
761 return -EOPNOTSUPP;
762 }
763
764 netdev_info(netdev, "%s loopback test in progress\n",
765 mode == QLCNIC_ILB_MODE ? "internal" : "external");
766 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
767 netdev_warn(netdev, "Loopback test not supported for non "
768 "privilege function\n");
769 return 0;
770 }
771
772 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
773 return -EBUSY;
774
775 ret = qlcnic_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST);
776 if (ret)
777 goto clear_it;
778
779 sds_ring = &adapter->recv_ctx->sds_rings[0];
780
781 ret = qlcnic_set_lb_mode(adapter, mode);
782 if (ret)
783 goto free_res;
784
785 adapter->diag_cnt = 0;
786 do {
787 msleep(500);
788 qlcnic_process_rcv_ring_diag(sds_ring);
789 if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) {
790 netdev_info(netdev, "firmware didnt respond to loopback"
791 " configure request\n");
792 ret = -QLCNIC_FW_NOT_RESPOND;
793 goto free_res;
794 } else if (adapter->diag_cnt) {
795 ret = adapter->diag_cnt;
796 goto free_res;
797 }
798 } while (!QLCNIC_IS_LB_CONFIGURED(adapter->ahw->loopback_state));
799
800 ret = qlcnic_do_lb_test(adapter);
801
802 qlcnic_clear_lb_mode(adapter);
803
804 free_res:
805 qlcnic_diag_free_res(netdev, max_sds_rings);
806
807 clear_it:
808 adapter->max_sds_rings = max_sds_rings;
809 clear_bit(__QLCNIC_RESETTING, &adapter->state);
810 return ret;
811}
812
813static void
814qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
815 u64 *data)
816{
817 memset(data, 0, sizeof(u64) * QLCNIC_TEST_LEN);
818
819 data[0] = qlcnic_reg_test(dev);
820 if (data[0])
821 eth_test->flags |= ETH_TEST_FL_FAILED;
822
823 data[1] = (u64) qlcnic_test_link(dev);
824 if (data[1])
825 eth_test->flags |= ETH_TEST_FL_FAILED;
826
827 if (eth_test->flags & ETH_TEST_FL_OFFLINE) {
828 data[2] = qlcnic_irq_test(dev);
829 if (data[2])
830 eth_test->flags |= ETH_TEST_FL_FAILED;
831
832 data[3] = qlcnic_loopback_test(dev, QLCNIC_ILB_MODE);
833 if (data[3])
834 eth_test->flags |= ETH_TEST_FL_FAILED;
835
836 if (eth_test->flags & ETH_TEST_FL_EXTERNAL_LB) {
837 data[4] = qlcnic_loopback_test(dev, QLCNIC_ELB_MODE);
838 if (data[4])
839 eth_test->flags |= ETH_TEST_FL_FAILED;
840 eth_test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
841 }
842 }
843}
844
845static void
846qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data)
847{
848 struct qlcnic_adapter *adapter = netdev_priv(dev);
849 int index, i;
850
851 switch (stringset) {
852 case ETH_SS_TEST:
853 memcpy(data, *qlcnic_gstrings_test,
854 QLCNIC_TEST_LEN * ETH_GSTRING_LEN);
855 break;
856 case ETH_SS_STATS:
857 for (index = 0; index < QLCNIC_STATS_LEN; index++) {
858 memcpy(data + index * ETH_GSTRING_LEN,
859 qlcnic_gstrings_stats[index].stat_string,
860 ETH_GSTRING_LEN);
861 }
862 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
863 return;
864 for (i = 0; i < QLCNIC_DEVICE_STATS_LEN; index++, i++) {
865 memcpy(data + index * ETH_GSTRING_LEN,
866 qlcnic_device_gstrings_stats[i],
867 ETH_GSTRING_LEN);
868 }
869 }
870}
871
872#define QLCNIC_FILL_ESWITCH_STATS(VAL1) \
873 (((VAL1) == QLCNIC_ESW_STATS_NOT_AVAIL) ? 0 : VAL1)
874
875static void
876qlcnic_fill_device_stats(int *index, u64 *data,
877 struct __qlcnic_esw_statistics *stats)
878{
879 int ind = *index;
880
881 data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->unicast_frames);
882 data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->multicast_frames);
883 data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->broadcast_frames);
884 data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->dropped_frames);
885 data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->errors);
886 data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->local_frames);
887 data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->numbytes);
888
889 *index = ind;
890}
891
892static void
893qlcnic_get_ethtool_stats(struct net_device *dev,
894 struct ethtool_stats *stats, u64 * data)
895{
896 struct qlcnic_adapter *adapter = netdev_priv(dev);
897 struct qlcnic_esw_statistics port_stats;
898 int index, ret;
899
900 for (index = 0; index < QLCNIC_STATS_LEN; index++) {
901 char *p =
902 (char *)adapter +
903 qlcnic_gstrings_stats[index].stat_offset;
904 data[index] =
905 (qlcnic_gstrings_stats[index].sizeof_stat ==
906 sizeof(u64)) ? *(u64 *)p:(*(u32 *)p);
907 }
908
909 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
910 return;
911
912 memset(&port_stats, 0, sizeof(struct qlcnic_esw_statistics));
913 ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func,
914 QLCNIC_QUERY_RX_COUNTER, &port_stats.rx);
915 if (ret)
916 return;
917
918 qlcnic_fill_device_stats(&index, data, &port_stats.rx);
919
920 ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func,
921 QLCNIC_QUERY_TX_COUNTER, &port_stats.tx);
922 if (ret)
923 return;
924
925 qlcnic_fill_device_stats(&index, data, &port_stats.tx);
926}
927
928static int qlcnic_set_led(struct net_device *dev,
929 enum ethtool_phys_id_state state)
930{
931 struct qlcnic_adapter *adapter = netdev_priv(dev);
932 int max_sds_rings = adapter->max_sds_rings;
933
934 switch (state) {
935 case ETHTOOL_ID_ACTIVE:
936 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
937 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
938 return -EIO;
939
940 if (qlcnic_diag_alloc_res(dev, QLCNIC_LED_TEST)) {
941 clear_bit(__QLCNIC_RESETTING, &adapter->state);
942 return -EIO;
943 }
944 set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state);
945 }
946
947 if (adapter->nic_ops->config_led(adapter, 1, 0xf) == 0)
948 return 0;
949
950 dev_err(&adapter->pdev->dev,
951 "Failed to set LED blink state.\n");
952 break;
953
954 case ETHTOOL_ID_INACTIVE:
955 if (adapter->nic_ops->config_led(adapter, 0, 0xf))
956 dev_err(&adapter->pdev->dev,
957 "Failed to reset LED blink state.\n");
958
959 break;
960
961 default:
962 return -EINVAL;
963 }
964
965 if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state)) {
966 qlcnic_diag_free_res(dev, max_sds_rings);
967 clear_bit(__QLCNIC_RESETTING, &adapter->state);
968 }
969
970 return -EIO;
971}
972
973static void
974qlcnic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
975{
976 struct qlcnic_adapter *adapter = netdev_priv(dev);
977 u32 wol_cfg;
978
979 wol->supported = 0;
980 wol->wolopts = 0;
981
982 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
983 if (wol_cfg & (1UL << adapter->portnum))
984 wol->supported |= WAKE_MAGIC;
985
986 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
987 if (wol_cfg & (1UL << adapter->portnum))
988 wol->wolopts |= WAKE_MAGIC;
989}
990
991static int
992qlcnic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
993{
994 struct qlcnic_adapter *adapter = netdev_priv(dev);
995 u32 wol_cfg;
996
997 if (wol->wolopts & ~WAKE_MAGIC)
998 return -EOPNOTSUPP;
999
1000 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
1001 if (!(wol_cfg & (1 << adapter->portnum)))
1002 return -EOPNOTSUPP;
1003
1004 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
1005 if (wol->wolopts & WAKE_MAGIC)
1006 wol_cfg |= 1UL << adapter->portnum;
1007 else
1008 wol_cfg &= ~(1UL << adapter->portnum);
1009
1010 QLCWR32(adapter, QLCNIC_WOL_CONFIG, wol_cfg);
1011
1012 return 0;
1013}
1014
1015/*
1016 * Set the coalescing parameters. Currently only normal is supported.
1017 * If rx_coalesce_usecs == 0 or rx_max_coalesced_frames == 0 then set the
1018 * firmware coalescing to default.
1019 */
1020static int qlcnic_set_intr_coalesce(struct net_device *netdev,
1021 struct ethtool_coalesce *ethcoal)
1022{
1023 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1024
1025 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
1026 return -EINVAL;
1027
1028 /*
1029 * Return Error if unsupported values or
1030 * unsupported parameters are set.
1031 */
1032 if (ethcoal->rx_coalesce_usecs > 0xffff ||
1033 ethcoal->rx_max_coalesced_frames > 0xffff ||
1034 ethcoal->tx_coalesce_usecs ||
1035 ethcoal->tx_max_coalesced_frames ||
1036 ethcoal->rx_coalesce_usecs_irq ||
1037 ethcoal->rx_max_coalesced_frames_irq ||
1038 ethcoal->tx_coalesce_usecs_irq ||
1039 ethcoal->tx_max_coalesced_frames_irq ||
1040 ethcoal->stats_block_coalesce_usecs ||
1041 ethcoal->use_adaptive_rx_coalesce ||
1042 ethcoal->use_adaptive_tx_coalesce ||
1043 ethcoal->pkt_rate_low ||
1044 ethcoal->rx_coalesce_usecs_low ||
1045 ethcoal->rx_max_coalesced_frames_low ||
1046 ethcoal->tx_coalesce_usecs_low ||
1047 ethcoal->tx_max_coalesced_frames_low ||
1048 ethcoal->pkt_rate_high ||
1049 ethcoal->rx_coalesce_usecs_high ||
1050 ethcoal->rx_max_coalesced_frames_high ||
1051 ethcoal->tx_coalesce_usecs_high ||
1052 ethcoal->tx_max_coalesced_frames_high)
1053 return -EINVAL;
1054
1055 if (!ethcoal->rx_coalesce_usecs ||
1056 !ethcoal->rx_max_coalesced_frames) {
1057 adapter->ahw->coal.flag = QLCNIC_INTR_DEFAULT;
1058 adapter->ahw->coal.rx_time_us =
1059 QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
1060 adapter->ahw->coal.rx_packets =
1061 QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
1062 } else {
1063 adapter->ahw->coal.flag = 0;
1064 adapter->ahw->coal.rx_time_us = ethcoal->rx_coalesce_usecs;
1065 adapter->ahw->coal.rx_packets =
1066 ethcoal->rx_max_coalesced_frames;
1067 }
1068
1069 qlcnic_config_intr_coalesce(adapter);
1070
1071 return 0;
1072}
1073
1074static int qlcnic_get_intr_coalesce(struct net_device *netdev,
1075 struct ethtool_coalesce *ethcoal)
1076{
1077 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1078
1079 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1080 return -EINVAL;
1081
1082 ethcoal->rx_coalesce_usecs = adapter->ahw->coal.rx_time_us;
1083 ethcoal->rx_max_coalesced_frames = adapter->ahw->coal.rx_packets;
1084
1085 return 0;
1086}
1087
1088static u32 qlcnic_get_msglevel(struct net_device *netdev)
1089{
1090 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1091
1092 return adapter->msg_enable;
1093}
1094
1095static void qlcnic_set_msglevel(struct net_device *netdev, u32 msglvl)
1096{
1097 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1098
1099 adapter->msg_enable = msglvl;
1100}
1101
1102static int
1103qlcnic_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
1104{
1105 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1106 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
1107
1108 dump->len = fw_dump->tmpl_hdr->size + fw_dump->size;
1109 dump->flag = fw_dump->tmpl_hdr->drv_cap_mask;
1110 dump->version = adapter->fw_version;
1111 return 0;
1112}
1113
1114static int
1115qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
1116 void *buffer)
1117{
1118 int i, copy_sz;
1119 u32 *hdr_ptr, *data;
1120 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1121 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
1122
1123 if (!fw_dump->clr) {
1124 netdev_info(netdev, "Dump not available\n");
1125 qlcnic_api_unlock(adapter);
1126 return -EINVAL;
1127 }
1128 /* Copy template header first */
1129 copy_sz = fw_dump->tmpl_hdr->size;
1130 hdr_ptr = (u32 *) fw_dump->tmpl_hdr;
1131 data = buffer;
1132 for (i = 0; i < copy_sz/sizeof(u32); i++)
1133 *data++ = cpu_to_le32(*hdr_ptr++);
1134
1135 /* Copy captured dump data */
1136 memcpy(buffer + copy_sz, fw_dump->data, fw_dump->size);
1137 dump->len = copy_sz + fw_dump->size;
1138 dump->flag = fw_dump->tmpl_hdr->drv_cap_mask;
1139
1140 /* Free dump area once data has been captured */
1141 vfree(fw_dump->data);
1142 fw_dump->data = NULL;
1143 fw_dump->clr = 0;
1144
1145 return 0;
1146}
1147
1148static int
1149qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val)
1150{
1151 int ret = 0;
1152 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1153 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
1154
1155 if (val->flag == QLCNIC_FORCE_FW_DUMP_KEY) {
1156 if (!fw_dump->enable) {
1157 netdev_info(netdev, "FW dump not enabled\n");
1158 return ret;
1159 }
1160 if (fw_dump->clr) {
1161 dev_info(&adapter->pdev->dev,
1162 "Previous dump not cleared, not forcing dump\n");
1163 return ret;
1164 }
1165 netdev_info(netdev, "Forcing a FW dump\n");
1166 qlcnic_dev_request_reset(adapter);
1167 } else if (val->flag == QLCNIC_DISABLE_FW_DUMP) {
1168 if (fw_dump->enable) {
1169 netdev_info(netdev, "Disabling FW dump\n");
1170 fw_dump->enable = 0;
1171 }
1172 } else if (val->flag == QLCNIC_ENABLE_FW_DUMP) {
1173 if (!fw_dump->enable && fw_dump->tmpl_hdr) {
1174 netdev_info(netdev, "Enabling FW dump\n");
1175 fw_dump->enable = 1;
1176 }
1177 } else {
1178 if (val->flag > QLCNIC_DUMP_MASK_MAX ||
1179 val->flag < QLCNIC_DUMP_MASK_MIN) {
1180 netdev_info(netdev,
1181 "Invalid dump level: 0x%x\n", val->flag);
1182 ret = -EINVAL;
1183 goto out;
1184 }
1185 fw_dump->tmpl_hdr->drv_cap_mask = val->flag & 0xff;
1186 netdev_info(netdev, "Driver mask changed to: 0x%x\n",
1187 fw_dump->tmpl_hdr->drv_cap_mask);
1188 }
1189out:
1190 return ret;
1191}
1192
1193const struct ethtool_ops qlcnic_ethtool_ops = {
1194 .get_settings = qlcnic_get_settings,
1195 .set_settings = qlcnic_set_settings,
1196 .get_drvinfo = qlcnic_get_drvinfo,
1197 .get_regs_len = qlcnic_get_regs_len,
1198 .get_regs = qlcnic_get_regs,
1199 .get_link = ethtool_op_get_link,
1200 .get_eeprom_len = qlcnic_get_eeprom_len,
1201 .get_eeprom = qlcnic_get_eeprom,
1202 .get_ringparam = qlcnic_get_ringparam,
1203 .set_ringparam = qlcnic_set_ringparam,
1204 .get_channels = qlcnic_get_channels,
1205 .set_channels = qlcnic_set_channels,
1206 .get_pauseparam = qlcnic_get_pauseparam,
1207 .set_pauseparam = qlcnic_set_pauseparam,
1208 .get_wol = qlcnic_get_wol,
1209 .set_wol = qlcnic_set_wol,
1210 .self_test = qlcnic_diag_test,
1211 .get_strings = qlcnic_get_strings,
1212 .get_ethtool_stats = qlcnic_get_ethtool_stats,
1213 .get_sset_count = qlcnic_get_sset_count,
1214 .get_coalesce = qlcnic_get_intr_coalesce,
1215 .set_coalesce = qlcnic_set_intr_coalesce,
1216 .set_phys_id = qlcnic_set_led,
1217 .set_msglevel = qlcnic_set_msglevel,
1218 .get_msglevel = qlcnic_get_msglevel,
1219 .get_dump_flag = qlcnic_get_dump_flag,
1220 .get_dump_data = qlcnic_get_dump_data,
1221 .set_dump = qlcnic_set_dump,
1222};
diff --git a/drivers/net/qlcnic/qlcnic_hdr.h b/drivers/net/qlcnic/qlcnic_hdr.h
new file mode 100644
index 00000000000..d14506f764e
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_hdr.h
@@ -0,0 +1,1023 @@
1/*
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2010 QLogic Corporation
4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
6 */
7
8#ifndef __QLCNIC_HDR_H_
9#define __QLCNIC_HDR_H_
10
11#include <linux/kernel.h>
12#include <linux/types.h>
13
14/*
15 * The basic unit of access when reading/writing control registers.
16 */
17
18enum {
19 QLCNIC_HW_H0_CH_HUB_ADR = 0x05,
20 QLCNIC_HW_H1_CH_HUB_ADR = 0x0E,
21 QLCNIC_HW_H2_CH_HUB_ADR = 0x03,
22 QLCNIC_HW_H3_CH_HUB_ADR = 0x01,
23 QLCNIC_HW_H4_CH_HUB_ADR = 0x06,
24 QLCNIC_HW_H5_CH_HUB_ADR = 0x07,
25 QLCNIC_HW_H6_CH_HUB_ADR = 0x08
26};
27
28/* Hub 0 */
29enum {
30 QLCNIC_HW_MN_CRB_AGT_ADR = 0x15,
31 QLCNIC_HW_MS_CRB_AGT_ADR = 0x25
32};
33
34/* Hub 1 */
35enum {
36 QLCNIC_HW_PS_CRB_AGT_ADR = 0x73,
37 QLCNIC_HW_SS_CRB_AGT_ADR = 0x20,
38 QLCNIC_HW_RPMX3_CRB_AGT_ADR = 0x0b,
39 QLCNIC_HW_QMS_CRB_AGT_ADR = 0x00,
40 QLCNIC_HW_SQGS0_CRB_AGT_ADR = 0x01,
41 QLCNIC_HW_SQGS1_CRB_AGT_ADR = 0x02,
42 QLCNIC_HW_SQGS2_CRB_AGT_ADR = 0x03,
43 QLCNIC_HW_SQGS3_CRB_AGT_ADR = 0x04,
44 QLCNIC_HW_C2C0_CRB_AGT_ADR = 0x58,
45 QLCNIC_HW_C2C1_CRB_AGT_ADR = 0x59,
46 QLCNIC_HW_C2C2_CRB_AGT_ADR = 0x5a,
47 QLCNIC_HW_RPMX2_CRB_AGT_ADR = 0x0a,
48 QLCNIC_HW_RPMX4_CRB_AGT_ADR = 0x0c,
49 QLCNIC_HW_RPMX7_CRB_AGT_ADR = 0x0f,
50 QLCNIC_HW_RPMX9_CRB_AGT_ADR = 0x12,
51 QLCNIC_HW_SMB_CRB_AGT_ADR = 0x18
52};
53
54/* Hub 2 */
55enum {
56 QLCNIC_HW_NIU_CRB_AGT_ADR = 0x31,
57 QLCNIC_HW_I2C0_CRB_AGT_ADR = 0x19,
58 QLCNIC_HW_I2C1_CRB_AGT_ADR = 0x29,
59
60 QLCNIC_HW_SN_CRB_AGT_ADR = 0x10,
61 QLCNIC_HW_I2Q_CRB_AGT_ADR = 0x20,
62 QLCNIC_HW_LPC_CRB_AGT_ADR = 0x22,
63 QLCNIC_HW_ROMUSB_CRB_AGT_ADR = 0x21,
64 QLCNIC_HW_QM_CRB_AGT_ADR = 0x66,
65 QLCNIC_HW_SQG0_CRB_AGT_ADR = 0x60,
66 QLCNIC_HW_SQG1_CRB_AGT_ADR = 0x61,
67 QLCNIC_HW_SQG2_CRB_AGT_ADR = 0x62,
68 QLCNIC_HW_SQG3_CRB_AGT_ADR = 0x63,
69 QLCNIC_HW_RPMX1_CRB_AGT_ADR = 0x09,
70 QLCNIC_HW_RPMX5_CRB_AGT_ADR = 0x0d,
71 QLCNIC_HW_RPMX6_CRB_AGT_ADR = 0x0e,
72 QLCNIC_HW_RPMX8_CRB_AGT_ADR = 0x11
73};
74
75/* Hub 3 */
76enum {
77 QLCNIC_HW_PH_CRB_AGT_ADR = 0x1A,
78 QLCNIC_HW_SRE_CRB_AGT_ADR = 0x50,
79 QLCNIC_HW_EG_CRB_AGT_ADR = 0x51,
80 QLCNIC_HW_RPMX0_CRB_AGT_ADR = 0x08
81};
82
83/* Hub 4 */
84enum {
85 QLCNIC_HW_PEGN0_CRB_AGT_ADR = 0x40,
86 QLCNIC_HW_PEGN1_CRB_AGT_ADR,
87 QLCNIC_HW_PEGN2_CRB_AGT_ADR,
88 QLCNIC_HW_PEGN3_CRB_AGT_ADR,
89 QLCNIC_HW_PEGNI_CRB_AGT_ADR,
90 QLCNIC_HW_PEGND_CRB_AGT_ADR,
91 QLCNIC_HW_PEGNC_CRB_AGT_ADR,
92 QLCNIC_HW_PEGR0_CRB_AGT_ADR,
93 QLCNIC_HW_PEGR1_CRB_AGT_ADR,
94 QLCNIC_HW_PEGR2_CRB_AGT_ADR,
95 QLCNIC_HW_PEGR3_CRB_AGT_ADR,
96 QLCNIC_HW_PEGN4_CRB_AGT_ADR
97};
98
99/* Hub 5 */
100enum {
101 QLCNIC_HW_PEGS0_CRB_AGT_ADR = 0x40,
102 QLCNIC_HW_PEGS1_CRB_AGT_ADR,
103 QLCNIC_HW_PEGS2_CRB_AGT_ADR,
104 QLCNIC_HW_PEGS3_CRB_AGT_ADR,
105 QLCNIC_HW_PEGSI_CRB_AGT_ADR,
106 QLCNIC_HW_PEGSD_CRB_AGT_ADR,
107 QLCNIC_HW_PEGSC_CRB_AGT_ADR
108};
109
110/* Hub 6 */
111enum {
112 QLCNIC_HW_CAS0_CRB_AGT_ADR = 0x46,
113 QLCNIC_HW_CAS1_CRB_AGT_ADR = 0x47,
114 QLCNIC_HW_CAS2_CRB_AGT_ADR = 0x48,
115 QLCNIC_HW_CAS3_CRB_AGT_ADR = 0x49,
116 QLCNIC_HW_NCM_CRB_AGT_ADR = 0x16,
117 QLCNIC_HW_TMR_CRB_AGT_ADR = 0x17,
118 QLCNIC_HW_XDMA_CRB_AGT_ADR = 0x05,
119 QLCNIC_HW_OCM0_CRB_AGT_ADR = 0x06,
120 QLCNIC_HW_OCM1_CRB_AGT_ADR = 0x07
121};
122
123/* Floaters - non existent modules */
124#define QLCNIC_HW_EFC_RPMX0_CRB_AGT_ADR 0x67
125
126/* This field defines PCI/X adr [25:20] of agents on the CRB */
127enum {
128 QLCNIC_HW_PX_MAP_CRB_PH = 0,
129 QLCNIC_HW_PX_MAP_CRB_PS,
130 QLCNIC_HW_PX_MAP_CRB_MN,
131 QLCNIC_HW_PX_MAP_CRB_MS,
132 QLCNIC_HW_PX_MAP_CRB_PGR1,
133 QLCNIC_HW_PX_MAP_CRB_SRE,
134 QLCNIC_HW_PX_MAP_CRB_NIU,
135 QLCNIC_HW_PX_MAP_CRB_QMN,
136 QLCNIC_HW_PX_MAP_CRB_SQN0,
137 QLCNIC_HW_PX_MAP_CRB_SQN1,
138 QLCNIC_HW_PX_MAP_CRB_SQN2,
139 QLCNIC_HW_PX_MAP_CRB_SQN3,
140 QLCNIC_HW_PX_MAP_CRB_QMS,
141 QLCNIC_HW_PX_MAP_CRB_SQS0,
142 QLCNIC_HW_PX_MAP_CRB_SQS1,
143 QLCNIC_HW_PX_MAP_CRB_SQS2,
144 QLCNIC_HW_PX_MAP_CRB_SQS3,
145 QLCNIC_HW_PX_MAP_CRB_PGN0,
146 QLCNIC_HW_PX_MAP_CRB_PGN1,
147 QLCNIC_HW_PX_MAP_CRB_PGN2,
148 QLCNIC_HW_PX_MAP_CRB_PGN3,
149 QLCNIC_HW_PX_MAP_CRB_PGND,
150 QLCNIC_HW_PX_MAP_CRB_PGNI,
151 QLCNIC_HW_PX_MAP_CRB_PGS0,
152 QLCNIC_HW_PX_MAP_CRB_PGS1,
153 QLCNIC_HW_PX_MAP_CRB_PGS2,
154 QLCNIC_HW_PX_MAP_CRB_PGS3,
155 QLCNIC_HW_PX_MAP_CRB_PGSD,
156 QLCNIC_HW_PX_MAP_CRB_PGSI,
157 QLCNIC_HW_PX_MAP_CRB_SN,
158 QLCNIC_HW_PX_MAP_CRB_PGR2,
159 QLCNIC_HW_PX_MAP_CRB_EG,
160 QLCNIC_HW_PX_MAP_CRB_PH2,
161 QLCNIC_HW_PX_MAP_CRB_PS2,
162 QLCNIC_HW_PX_MAP_CRB_CAM,
163 QLCNIC_HW_PX_MAP_CRB_CAS0,
164 QLCNIC_HW_PX_MAP_CRB_CAS1,
165 QLCNIC_HW_PX_MAP_CRB_CAS2,
166 QLCNIC_HW_PX_MAP_CRB_C2C0,
167 QLCNIC_HW_PX_MAP_CRB_C2C1,
168 QLCNIC_HW_PX_MAP_CRB_TIMR,
169 QLCNIC_HW_PX_MAP_CRB_PGR3,
170 QLCNIC_HW_PX_MAP_CRB_RPMX1,
171 QLCNIC_HW_PX_MAP_CRB_RPMX2,
172 QLCNIC_HW_PX_MAP_CRB_RPMX3,
173 QLCNIC_HW_PX_MAP_CRB_RPMX4,
174 QLCNIC_HW_PX_MAP_CRB_RPMX5,
175 QLCNIC_HW_PX_MAP_CRB_RPMX6,
176 QLCNIC_HW_PX_MAP_CRB_RPMX7,
177 QLCNIC_HW_PX_MAP_CRB_XDMA,
178 QLCNIC_HW_PX_MAP_CRB_I2Q,
179 QLCNIC_HW_PX_MAP_CRB_ROMUSB,
180 QLCNIC_HW_PX_MAP_CRB_CAS3,
181 QLCNIC_HW_PX_MAP_CRB_RPMX0,
182 QLCNIC_HW_PX_MAP_CRB_RPMX8,
183 QLCNIC_HW_PX_MAP_CRB_RPMX9,
184 QLCNIC_HW_PX_MAP_CRB_OCM0,
185 QLCNIC_HW_PX_MAP_CRB_OCM1,
186 QLCNIC_HW_PX_MAP_CRB_SMB,
187 QLCNIC_HW_PX_MAP_CRB_I2C0,
188 QLCNIC_HW_PX_MAP_CRB_I2C1,
189 QLCNIC_HW_PX_MAP_CRB_LPC,
190 QLCNIC_HW_PX_MAP_CRB_PGNC,
191 QLCNIC_HW_PX_MAP_CRB_PGR0
192};
193
194#define BIT_0 0x1
195#define BIT_1 0x2
196#define BIT_2 0x4
197#define BIT_3 0x8
198#define BIT_4 0x10
199#define BIT_5 0x20
200#define BIT_6 0x40
201#define BIT_7 0x80
202#define BIT_8 0x100
203#define BIT_9 0x200
204#define BIT_10 0x400
205#define BIT_11 0x800
206#define BIT_12 0x1000
207#define BIT_13 0x2000
208#define BIT_14 0x4000
209#define BIT_15 0x8000
210#define BIT_16 0x10000
211#define BIT_17 0x20000
212#define BIT_18 0x40000
213#define BIT_19 0x80000
214#define BIT_20 0x100000
215#define BIT_21 0x200000
216#define BIT_22 0x400000
217#define BIT_23 0x800000
218#define BIT_24 0x1000000
219#define BIT_25 0x2000000
220#define BIT_26 0x4000000
221#define BIT_27 0x8000000
222#define BIT_28 0x10000000
223#define BIT_29 0x20000000
224#define BIT_30 0x40000000
225#define BIT_31 0x80000000
226
227/* This field defines CRB adr [31:20] of the agents */
228
229#define QLCNIC_HW_CRB_HUB_AGT_ADR_MN \
230 ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_MN_CRB_AGT_ADR)
231#define QLCNIC_HW_CRB_HUB_AGT_ADR_PH \
232 ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_PH_CRB_AGT_ADR)
233#define QLCNIC_HW_CRB_HUB_AGT_ADR_MS \
234 ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_MS_CRB_AGT_ADR)
235
236#define QLCNIC_HW_CRB_HUB_AGT_ADR_PS \
237 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_PS_CRB_AGT_ADR)
238#define QLCNIC_HW_CRB_HUB_AGT_ADR_SS \
239 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SS_CRB_AGT_ADR)
240#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3 \
241 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX3_CRB_AGT_ADR)
242#define QLCNIC_HW_CRB_HUB_AGT_ADR_QMS \
243 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_QMS_CRB_AGT_ADR)
244#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS0 \
245 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS0_CRB_AGT_ADR)
246#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS1 \
247 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS1_CRB_AGT_ADR)
248#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS2 \
249 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS2_CRB_AGT_ADR)
250#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS3 \
251 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS3_CRB_AGT_ADR)
252#define QLCNIC_HW_CRB_HUB_AGT_ADR_C2C0 \
253 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_C2C0_CRB_AGT_ADR)
254#define QLCNIC_HW_CRB_HUB_AGT_ADR_C2C1 \
255 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_C2C1_CRB_AGT_ADR)
256#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2 \
257 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX2_CRB_AGT_ADR)
258#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4 \
259 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX4_CRB_AGT_ADR)
260#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7 \
261 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX7_CRB_AGT_ADR)
262#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9 \
263 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX9_CRB_AGT_ADR)
264#define QLCNIC_HW_CRB_HUB_AGT_ADR_SMB \
265 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SMB_CRB_AGT_ADR)
266
267#define QLCNIC_HW_CRB_HUB_AGT_ADR_NIU \
268 ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_NIU_CRB_AGT_ADR)
269#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0 \
270 ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_I2C0_CRB_AGT_ADR)
271#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1 \
272 ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_I2C1_CRB_AGT_ADR)
273
274#define QLCNIC_HW_CRB_HUB_AGT_ADR_SRE \
275 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SRE_CRB_AGT_ADR)
276#define QLCNIC_HW_CRB_HUB_AGT_ADR_EG \
277 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_EG_CRB_AGT_ADR)
278#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0 \
279 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX0_CRB_AGT_ADR)
280#define QLCNIC_HW_CRB_HUB_AGT_ADR_QMN \
281 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_QM_CRB_AGT_ADR)
282#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0 \
283 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG0_CRB_AGT_ADR)
284#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1 \
285 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG1_CRB_AGT_ADR)
286#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2 \
287 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG2_CRB_AGT_ADR)
288#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3 \
289 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG3_CRB_AGT_ADR)
290#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1 \
291 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX1_CRB_AGT_ADR)
292#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5 \
293 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX5_CRB_AGT_ADR)
294#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6 \
295 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX6_CRB_AGT_ADR)
296#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8 \
297 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX8_CRB_AGT_ADR)
298#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS0 \
299 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS0_CRB_AGT_ADR)
300#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS1 \
301 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS1_CRB_AGT_ADR)
302#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS2 \
303 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS2_CRB_AGT_ADR)
304#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS3 \
305 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS3_CRB_AGT_ADR)
306
307#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI \
308 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGNI_CRB_AGT_ADR)
309#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGND \
310 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGND_CRB_AGT_ADR)
311#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0 \
312 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN0_CRB_AGT_ADR)
313#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1 \
314 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN1_CRB_AGT_ADR)
315#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2 \
316 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN2_CRB_AGT_ADR)
317#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3 \
318 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN3_CRB_AGT_ADR)
319#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4 \
320 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN4_CRB_AGT_ADR)
321#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC \
322 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGNC_CRB_AGT_ADR)
323#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR0 \
324 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR0_CRB_AGT_ADR)
325#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR1 \
326 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR1_CRB_AGT_ADR)
327#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR2 \
328 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR2_CRB_AGT_ADR)
329#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR3 \
330 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR3_CRB_AGT_ADR)
331
332#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI \
333 ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSI_CRB_AGT_ADR)
334#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSD \
335 ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSD_CRB_AGT_ADR)
336#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0 \
337 ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS0_CRB_AGT_ADR)
338#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1 \
339 ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS1_CRB_AGT_ADR)
340#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2 \
341 ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS2_CRB_AGT_ADR)
342#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3 \
343 ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS3_CRB_AGT_ADR)
344#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSC \
345 ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSC_CRB_AGT_ADR)
346
347#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAM \
348 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_NCM_CRB_AGT_ADR)
349#define QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR \
350 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_TMR_CRB_AGT_ADR)
351#define QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA \
352 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_XDMA_CRB_AGT_ADR)
353#define QLCNIC_HW_CRB_HUB_AGT_ADR_SN \
354 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_SN_CRB_AGT_ADR)
355#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q \
356 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_I2Q_CRB_AGT_ADR)
357#define QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB \
358 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_ROMUSB_CRB_AGT_ADR)
359#define QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0 \
360 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_OCM0_CRB_AGT_ADR)
361#define QLCNIC_HW_CRB_HUB_AGT_ADR_OCM1 \
362 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_OCM1_CRB_AGT_ADR)
363#define QLCNIC_HW_CRB_HUB_AGT_ADR_LPC \
364 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_LPC_CRB_AGT_ADR)
365
366#define QLCNIC_SRE_MISC (QLCNIC_CRB_SRE + 0x0002c)
367
368#define QLCNIC_I2Q_CLR_PCI_HI (QLCNIC_CRB_I2Q + 0x00034)
369
370#define ROMUSB_GLB (QLCNIC_CRB_ROMUSB + 0x00000)
371#define ROMUSB_ROM (QLCNIC_CRB_ROMUSB + 0x10000)
372
373#define QLCNIC_ROMUSB_GLB_STATUS (ROMUSB_GLB + 0x0004)
374#define QLCNIC_ROMUSB_GLB_SW_RESET (ROMUSB_GLB + 0x0008)
375#define QLCNIC_ROMUSB_GLB_PAD_GPIO_I (ROMUSB_GLB + 0x000c)
376#define QLCNIC_ROMUSB_GLB_CAS_RST (ROMUSB_GLB + 0x0038)
377#define QLCNIC_ROMUSB_GLB_TEST_MUX_SEL (ROMUSB_GLB + 0x0044)
378#define QLCNIC_ROMUSB_GLB_PEGTUNE_DONE (ROMUSB_GLB + 0x005c)
379#define QLCNIC_ROMUSB_GLB_CHIP_CLK_CTRL (ROMUSB_GLB + 0x00A8)
380
381#define QLCNIC_ROMUSB_GPIO(n) (ROMUSB_GLB + 0x60 + (4 * (n)))
382
383#define QLCNIC_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004)
384#define QLCNIC_ROMUSB_ROM_ADDRESS (ROMUSB_ROM + 0x0008)
385#define QLCNIC_ROMUSB_ROM_WDATA (ROMUSB_ROM + 0x000c)
386#define QLCNIC_ROMUSB_ROM_ABYTE_CNT (ROMUSB_ROM + 0x0010)
387#define QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014)
388#define QLCNIC_ROMUSB_ROM_RDATA (ROMUSB_ROM + 0x0018)
389
390/* Lock IDs for ROM lock */
391#define ROM_LOCK_DRIVER 0x0d417340
392
393/******************************************************************************
394*
395* Definitions specific to M25P flash
396*
397*******************************************************************************
398*/
399
400/* all are 1MB windows */
401
402#define QLCNIC_PCI_CRB_WINDOWSIZE 0x00100000
403#define QLCNIC_PCI_CRB_WINDOW(A) \
404 (QLCNIC_PCI_CRBSPACE + (A)*QLCNIC_PCI_CRB_WINDOWSIZE)
405
406#define QLCNIC_CRB_NIU QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_NIU)
407#define QLCNIC_CRB_SRE QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SRE)
408#define QLCNIC_CRB_ROMUSB \
409 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_ROMUSB)
410#define QLCNIC_CRB_I2Q QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_I2Q)
411#define QLCNIC_CRB_I2C0 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_I2C0)
412#define QLCNIC_CRB_SMB QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SMB)
413#define QLCNIC_CRB_MAX QLCNIC_PCI_CRB_WINDOW(64)
414
415#define QLCNIC_CRB_PCIX_HOST QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PH)
416#define QLCNIC_CRB_PCIX_HOST2 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PH2)
417#define QLCNIC_CRB_PEG_NET_0 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN0)
418#define QLCNIC_CRB_PEG_NET_1 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN1)
419#define QLCNIC_CRB_PEG_NET_2 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN2)
420#define QLCNIC_CRB_PEG_NET_3 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN3)
421#define QLCNIC_CRB_PEG_NET_4 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SQS2)
422#define QLCNIC_CRB_PEG_NET_D QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGND)
423#define QLCNIC_CRB_PEG_NET_I QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGNI)
424#define QLCNIC_CRB_DDR_NET QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_MN)
425#define QLCNIC_CRB_QDR_NET QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SN)
426
427#define QLCNIC_CRB_PCIX_MD QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PS)
428#define QLCNIC_CRB_PCIE QLCNIC_CRB_PCIX_MD
429
430#define ISR_INT_VECTOR (QLCNIC_PCIX_PS_REG(PCIX_INT_VECTOR))
431#define ISR_INT_MASK (QLCNIC_PCIX_PS_REG(PCIX_INT_MASK))
432#define ISR_INT_MASK_SLOW (QLCNIC_PCIX_PS_REG(PCIX_INT_MASK))
433#define ISR_INT_TARGET_STATUS (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS))
434#define ISR_INT_TARGET_MASK (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK))
435#define ISR_INT_TARGET_STATUS_F1 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F1))
436#define ISR_INT_TARGET_MASK_F1 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F1))
437#define ISR_INT_TARGET_STATUS_F2 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F2))
438#define ISR_INT_TARGET_MASK_F2 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F2))
439#define ISR_INT_TARGET_STATUS_F3 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F3))
440#define ISR_INT_TARGET_MASK_F3 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F3))
441#define ISR_INT_TARGET_STATUS_F4 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F4))
442#define ISR_INT_TARGET_MASK_F4 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F4))
443#define ISR_INT_TARGET_STATUS_F5 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F5))
444#define ISR_INT_TARGET_MASK_F5 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F5))
445#define ISR_INT_TARGET_STATUS_F6 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F6))
446#define ISR_INT_TARGET_MASK_F6 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F6))
447#define ISR_INT_TARGET_STATUS_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F7))
448#define ISR_INT_TARGET_MASK_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F7))
449
450#define QLCNIC_PCI_MN_2M (0)
451#define QLCNIC_PCI_MS_2M (0x80000)
452#define QLCNIC_PCI_OCM0_2M (0x000c0000UL)
453#define QLCNIC_PCI_CRBSPACE (0x06000000UL)
454#define QLCNIC_PCI_CAMQM (0x04800000UL)
455#define QLCNIC_PCI_CAMQM_END (0x04800800UL)
456#define QLCNIC_PCI_2MB_SIZE (0x00200000UL)
457#define QLCNIC_PCI_CAMQM_2M_BASE (0x000ff800UL)
458
459#define QLCNIC_CRB_CAM QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_CAM)
460
461#define QLCNIC_ADDR_DDR_NET (0x0000000000000000ULL)
462#define QLCNIC_ADDR_DDR_NET_MAX (0x000000000fffffffULL)
463#define QLCNIC_ADDR_OCM0 (0x0000000200000000ULL)
464#define QLCNIC_ADDR_OCM0_MAX (0x00000002000fffffULL)
465#define QLCNIC_ADDR_OCM1 (0x0000000200400000ULL)
466#define QLCNIC_ADDR_OCM1_MAX (0x00000002004fffffULL)
467#define QLCNIC_ADDR_QDR_NET (0x0000000300000000ULL)
468#define QLCNIC_ADDR_QDR_NET_MAX (0x0000000307ffffffULL)
469
470/*
471 * Register offsets for MN
472 */
473#define QLCNIC_MIU_CONTROL (0x000)
474#define QLCNIC_MIU_MN_CONTROL (QLCNIC_CRB_DDR_NET+QLCNIC_MIU_CONTROL)
475
476/* 200ms delay in each loop */
477#define QLCNIC_NIU_PHY_WAITLEN 200000
478/* 10 seconds before we give up */
479#define QLCNIC_NIU_PHY_WAITMAX 50
480#define QLCNIC_NIU_MAX_GBE_PORTS 4
481#define QLCNIC_NIU_MAX_XG_PORTS 2
482
483#define QLCNIC_NIU_MODE (QLCNIC_CRB_NIU + 0x00000)
484#define QLCNIC_NIU_GB_PAUSE_CTL (QLCNIC_CRB_NIU + 0x0030c)
485#define QLCNIC_NIU_XG_PAUSE_CTL (QLCNIC_CRB_NIU + 0x00098)
486
487#define QLCNIC_NIU_GB_MAC_CONFIG_0(I) \
488 (QLCNIC_CRB_NIU + 0x30000 + (I)*0x10000)
489#define QLCNIC_NIU_GB_MAC_CONFIG_1(I) \
490 (QLCNIC_CRB_NIU + 0x30004 + (I)*0x10000)
491
492
493#define TEST_AGT_CTRL (0x00)
494
495#define TA_CTL_START BIT_0
496#define TA_CTL_ENABLE BIT_1
497#define TA_CTL_WRITE BIT_2
498#define TA_CTL_BUSY BIT_3
499
500/*
501 * Register offsets for MN
502 */
503#define MIU_TEST_AGT_BASE (0x90)
504
505#define MIU_TEST_AGT_ADDR_LO (0x04)
506#define MIU_TEST_AGT_ADDR_HI (0x08)
507#define MIU_TEST_AGT_WRDATA_LO (0x10)
508#define MIU_TEST_AGT_WRDATA_HI (0x14)
509#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x20)
510#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x24)
511#define MIU_TEST_AGT_WRDATA(i) (0x10+(0x10*((i)>>1))+(4*((i)&1)))
512#define MIU_TEST_AGT_RDDATA_LO (0x18)
513#define MIU_TEST_AGT_RDDATA_HI (0x1c)
514#define MIU_TEST_AGT_RDDATA_UPPER_LO (0x28)
515#define MIU_TEST_AGT_RDDATA_UPPER_HI (0x2c)
516#define MIU_TEST_AGT_RDDATA(i) (0x18+(0x10*((i)>>1))+(4*((i)&1)))
517
518#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8
519#define MIU_TEST_AGT_UPPER_ADDR(off) (0)
520
521/*
522 * Register offsets for MS
523 */
524#define SIU_TEST_AGT_BASE (0x60)
525
526#define SIU_TEST_AGT_ADDR_LO (0x04)
527#define SIU_TEST_AGT_ADDR_HI (0x18)
528#define SIU_TEST_AGT_WRDATA_LO (0x08)
529#define SIU_TEST_AGT_WRDATA_HI (0x0c)
530#define SIU_TEST_AGT_WRDATA(i) (0x08+(4*(i)))
531#define SIU_TEST_AGT_RDDATA_LO (0x10)
532#define SIU_TEST_AGT_RDDATA_HI (0x14)
533#define SIU_TEST_AGT_RDDATA(i) (0x10+(4*(i)))
534
535#define SIU_TEST_AGT_ADDR_MASK 0x3ffff8
536#define SIU_TEST_AGT_UPPER_ADDR(off) ((off)>>22)
537
538/* XG Link status */
539#define XG_LINK_UP 0x10
540#define XG_LINK_DOWN 0x20
541
542#define XG_LINK_UP_P3P 0x01
543#define XG_LINK_DOWN_P3P 0x02
544#define XG_LINK_STATE_P3P_MASK 0xf
545#define XG_LINK_STATE_P3P(pcifn, val) \
546 (((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3P_MASK)
547
548#define P3P_LINK_SPEED_MHZ 100
549#define P3P_LINK_SPEED_MASK 0xff
550#define P3P_LINK_SPEED_REG(pcifn) \
551 (CRB_PF_LINK_SPEED_1 + (((pcifn) / 4) * 4))
552#define P3P_LINK_SPEED_VAL(pcifn, reg) \
553 (((reg) >> (8 * ((pcifn) & 0x3))) & P3P_LINK_SPEED_MASK)
554
555#define QLCNIC_CAM_RAM_BASE (QLCNIC_CRB_CAM + 0x02000)
556#define QLCNIC_CAM_RAM(reg) (QLCNIC_CAM_RAM_BASE + (reg))
557#define QLCNIC_FW_VERSION_MAJOR (QLCNIC_CAM_RAM(0x150))
558#define QLCNIC_FW_VERSION_MINOR (QLCNIC_CAM_RAM(0x154))
559#define QLCNIC_FW_VERSION_SUB (QLCNIC_CAM_RAM(0x158))
560#define QLCNIC_ROM_LOCK_ID (QLCNIC_CAM_RAM(0x100))
561#define QLCNIC_PHY_LOCK_ID (QLCNIC_CAM_RAM(0x120))
562#define QLCNIC_CRB_WIN_LOCK_ID (QLCNIC_CAM_RAM(0x124))
563
564#define NIC_CRB_BASE (QLCNIC_CAM_RAM(0x200))
565#define NIC_CRB_BASE_2 (QLCNIC_CAM_RAM(0x700))
566#define QLCNIC_REG(X) (NIC_CRB_BASE+(X))
567#define QLCNIC_REG_2(X) (NIC_CRB_BASE_2+(X))
568
569#define QLCNIC_CDRP_CRB_OFFSET (QLCNIC_REG(0x18))
570#define QLCNIC_ARG1_CRB_OFFSET (QLCNIC_REG(0x1c))
571#define QLCNIC_ARG2_CRB_OFFSET (QLCNIC_REG(0x20))
572#define QLCNIC_ARG3_CRB_OFFSET (QLCNIC_REG(0x24))
573#define QLCNIC_SIGN_CRB_OFFSET (QLCNIC_REG(0x28))
574
575#define CRB_CMDPEG_STATE (QLCNIC_REG(0x50))
576#define CRB_RCVPEG_STATE (QLCNIC_REG(0x13c))
577
578#define CRB_XG_STATE_P3P (QLCNIC_REG(0x98))
579#define CRB_PF_LINK_SPEED_1 (QLCNIC_REG(0xe8))
580#define CRB_PF_LINK_SPEED_2 (QLCNIC_REG(0xec))
581
582#define CRB_TEMP_STATE (QLCNIC_REG(0x1b4))
583
584#define CRB_V2P_0 (QLCNIC_REG(0x290))
585#define CRB_V2P(port) (CRB_V2P_0+((port)*4))
586#define CRB_DRIVER_VERSION (QLCNIC_REG(0x2a0))
587
588#define CRB_FW_CAPABILITIES_1 (QLCNIC_CAM_RAM(0x128))
589#define CRB_MAC_BLOCK_START (QLCNIC_CAM_RAM(0x1c0))
590
591/*
592 * CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address
593 * which can be read by the Phantom host to get producer/consumer indexes from
594 * Phantom/Casper. If it is not HOST_SHARED_MEMORY, then the following
595 * registers will be used for the addresses of the ring's shared memory
596 * on the Phantom.
597 */
598
599#define qlcnic_get_temp_val(x) ((x) >> 16)
600#define qlcnic_get_temp_state(x) ((x) & 0xffff)
601#define qlcnic_encode_temp(val, state) (((val) << 16) | (state))
602
603/*
604 * Temperature control.
605 */
606enum {
607 QLCNIC_TEMP_NORMAL = 0x1, /* Normal operating range */
608 QLCNIC_TEMP_WARN, /* Sound alert, temperature getting high */
609 QLCNIC_TEMP_PANIC /* Fatal error, hardware has shut down. */
610};
611
612/* Lock IDs for PHY lock */
613#define PHY_LOCK_DRIVER 0x44524956
614
615/* Used for PS PCI Memory access */
616#define PCIX_PS_OP_ADDR_LO (0x10000)
617/* via CRB (PS side only) */
618#define PCIX_PS_OP_ADDR_HI (0x10004)
619
620#define PCIX_INT_VECTOR (0x10100)
621#define PCIX_INT_MASK (0x10104)
622
623#define PCIX_OCM_WINDOW (0x10800)
624#define PCIX_OCM_WINDOW_REG(func) (PCIX_OCM_WINDOW + 0x4 * (func))
625
626#define PCIX_TARGET_STATUS (0x10118)
627#define PCIX_TARGET_STATUS_F1 (0x10160)
628#define PCIX_TARGET_STATUS_F2 (0x10164)
629#define PCIX_TARGET_STATUS_F3 (0x10168)
630#define PCIX_TARGET_STATUS_F4 (0x10360)
631#define PCIX_TARGET_STATUS_F5 (0x10364)
632#define PCIX_TARGET_STATUS_F6 (0x10368)
633#define PCIX_TARGET_STATUS_F7 (0x1036c)
634
635#define PCIX_TARGET_MASK (0x10128)
636#define PCIX_TARGET_MASK_F1 (0x10170)
637#define PCIX_TARGET_MASK_F2 (0x10174)
638#define PCIX_TARGET_MASK_F3 (0x10178)
639#define PCIX_TARGET_MASK_F4 (0x10370)
640#define PCIX_TARGET_MASK_F5 (0x10374)
641#define PCIX_TARGET_MASK_F6 (0x10378)
642#define PCIX_TARGET_MASK_F7 (0x1037c)
643
644#define PCIX_MSI_F(i) (0x13000+((i)*4))
645
646#define QLCNIC_PCIX_PH_REG(reg) (QLCNIC_CRB_PCIE + (reg))
647#define QLCNIC_PCIX_PS_REG(reg) (QLCNIC_CRB_PCIX_MD + (reg))
648#define QLCNIC_PCIE_REG(reg) (QLCNIC_CRB_PCIE + (reg))
649
650#define PCIE_SEM0_LOCK (0x1c000)
651#define PCIE_SEM0_UNLOCK (0x1c004)
652#define PCIE_SEM_LOCK(N) (PCIE_SEM0_LOCK + 8*(N))
653#define PCIE_SEM_UNLOCK(N) (PCIE_SEM0_UNLOCK + 8*(N))
654
655#define PCIE_SETUP_FUNCTION (0x12040)
656#define PCIE_SETUP_FUNCTION2 (0x12048)
657#define PCIE_MISCCFG_RC (0x1206c)
658#define PCIE_TGT_SPLIT_CHICKEN (0x12080)
659#define PCIE_CHICKEN3 (0x120c8)
660
661#define ISR_INT_STATE_REG (QLCNIC_PCIX_PS_REG(PCIE_MISCCFG_RC))
662#define PCIE_MAX_MASTER_SPLIT (0x14048)
663
664#define QLCNIC_PORT_MODE_NONE 0
665#define QLCNIC_PORT_MODE_XG 1
666#define QLCNIC_PORT_MODE_GB 2
667#define QLCNIC_PORT_MODE_802_3_AP 3
668#define QLCNIC_PORT_MODE_AUTO_NEG 4
669#define QLCNIC_PORT_MODE_AUTO_NEG_1G 5
670#define QLCNIC_PORT_MODE_AUTO_NEG_XG 6
671#define QLCNIC_PORT_MODE_ADDR (QLCNIC_CAM_RAM(0x24))
672#define QLCNIC_WOL_PORT_MODE (QLCNIC_CAM_RAM(0x198))
673
674#define QLCNIC_WOL_CONFIG_NV (QLCNIC_CAM_RAM(0x184))
675#define QLCNIC_WOL_CONFIG (QLCNIC_CAM_RAM(0x188))
676
677#define QLCNIC_PEG_TUNE_MN_PRESENT 0x1
678#define QLCNIC_PEG_TUNE_CAPABILITY (QLCNIC_CAM_RAM(0x02c))
679
680#define QLCNIC_DMA_WATCHDOG_CTRL (QLCNIC_CAM_RAM(0x14))
681#define QLCNIC_PEG_ALIVE_COUNTER (QLCNIC_CAM_RAM(0xb0))
682#define QLCNIC_PEG_HALT_STATUS1 (QLCNIC_CAM_RAM(0xa8))
683#define QLCNIC_PEG_HALT_STATUS2 (QLCNIC_CAM_RAM(0xac))
684#define QLCNIC_CRB_DRV_ACTIVE (QLCNIC_CAM_RAM(0x138))
685#define QLCNIC_CRB_DEV_STATE (QLCNIC_CAM_RAM(0x140))
686
687#define QLCNIC_CRB_DRV_STATE (QLCNIC_CAM_RAM(0x144))
688#define QLCNIC_CRB_DRV_SCRATCH (QLCNIC_CAM_RAM(0x148))
689#define QLCNIC_CRB_DEV_PARTITION_INFO (QLCNIC_CAM_RAM(0x14c))
690#define QLCNIC_CRB_DRV_IDC_VER (QLCNIC_CAM_RAM(0x174))
691#define QLCNIC_CRB_DEV_NPAR_STATE (QLCNIC_CAM_RAM(0x19c))
692#define QLCNIC_ROM_DEV_INIT_TIMEOUT (0x3e885c)
693#define QLCNIC_ROM_DRV_RESET_TIMEOUT (0x3e8860)
694
695/* Device State */
696#define QLCNIC_DEV_COLD 0x1
697#define QLCNIC_DEV_INITIALIZING 0x2
698#define QLCNIC_DEV_READY 0x3
699#define QLCNIC_DEV_NEED_RESET 0x4
700#define QLCNIC_DEV_NEED_QUISCENT 0x5
701#define QLCNIC_DEV_FAILED 0x6
702#define QLCNIC_DEV_QUISCENT 0x7
703
704#define QLCNIC_DEV_NPAR_NON_OPER 0 /* NON Operational */
705#define QLCNIC_DEV_NPAR_OPER 1 /* NPAR Operational */
706#define QLCNIC_DEV_NPAR_OPER_TIMEO 30 /* Operational time out */
707
708#define QLC_DEV_CHECK_ACTIVE(VAL, FN) ((VAL) & (1 << (FN * 4)))
709#define QLC_DEV_SET_REF_CNT(VAL, FN) ((VAL) |= (1 << (FN * 4)))
710#define QLC_DEV_CLR_REF_CNT(VAL, FN) ((VAL) &= ~(1 << (FN * 4)))
711#define QLC_DEV_SET_RST_RDY(VAL, FN) ((VAL) |= (1 << (FN * 4)))
712#define QLC_DEV_SET_QSCNT_RDY(VAL, FN) ((VAL) |= (2 << (FN * 4)))
713#define QLC_DEV_CLR_RST_QSCNT(VAL, FN) ((VAL) &= ~(3 << (FN * 4)))
714
715#define QLC_DEV_GET_DRV(VAL, FN) (0xf & ((VAL) >> (FN * 4)))
716#define QLC_DEV_SET_DRV(VAL, FN) ((VAL) << (FN * 4))
717
718#define QLCNIC_TYPE_NIC 1
719#define QLCNIC_TYPE_FCOE 2
720#define QLCNIC_TYPE_ISCSI 3
721
722#define QLCNIC_RCODE_DRIVER_INFO 0x20000000
723#define QLCNIC_RCODE_DRIVER_CAN_RELOAD BIT_30
724#define QLCNIC_RCODE_FATAL_ERROR BIT_31
725#define QLCNIC_FWERROR_PEGNUM(code) ((code) & 0xff)
726#define QLCNIC_FWERROR_CODE(code) ((code >> 8) & 0xfffff)
727
728#define FW_POLL_DELAY (1 * HZ)
729#define FW_FAIL_THRESH 2
730
731#define QLCNIC_RESET_TIMEOUT_SECS 10
732#define QLCNIC_INIT_TIMEOUT_SECS 30
733#define QLCNIC_RCVPEG_CHECK_RETRY_COUNT 2000
734#define QLCNIC_RCVPEG_CHECK_DELAY 10
735#define QLCNIC_CMDPEG_CHECK_RETRY_COUNT 60
736#define QLCNIC_CMDPEG_CHECK_DELAY 500
737#define QLCNIC_HEARTBEAT_PERIOD_MSECS 200
738#define QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT 45
739
740#define ISR_MSI_INT_TRIGGER(FUNC) (QLCNIC_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
741#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
742
743/*
744 * PCI Interrupt Vector Values.
745 */
746#define PCIX_INT_VECTOR_BIT_F0 0x0080
747#define PCIX_INT_VECTOR_BIT_F1 0x0100
748#define PCIX_INT_VECTOR_BIT_F2 0x0200
749#define PCIX_INT_VECTOR_BIT_F3 0x0400
750#define PCIX_INT_VECTOR_BIT_F4 0x0800
751#define PCIX_INT_VECTOR_BIT_F5 0x1000
752#define PCIX_INT_VECTOR_BIT_F6 0x2000
753#define PCIX_INT_VECTOR_BIT_F7 0x4000
754
755struct qlcnic_legacy_intr_set {
756 u32 int_vec_bit;
757 u32 tgt_status_reg;
758 u32 tgt_mask_reg;
759 u32 pci_int_reg;
760};
761
762#define QLCNIC_FW_API 0x1b216c
763#define QLCNIC_DRV_OP_MODE 0x1b2170
764#define QLCNIC_MSIX_BASE 0x132110
765#define QLCNIC_MAX_PCI_FUNC 8
766#define QLCNIC_MAX_VLAN_FILTERS 64
767
768/* FW dump defines */
769#define MIU_TEST_CTR 0x41000090
770#define MIU_TEST_ADDR_LO 0x41000094
771#define MIU_TEST_ADDR_HI 0x41000098
772#define FLASH_ROM_WINDOW 0x42110030
773#define FLASH_ROM_DATA 0x42150000
774
775static const u32 MIU_TEST_READ_DATA[] = {
776 0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC, };
777
778#define QLCNIC_FW_DUMP_REG1 0x00130060
779#define QLCNIC_FW_DUMP_REG2 0x001e0000
780#define QLCNIC_FLASH_SEM2_LK 0x0013C010
781#define QLCNIC_FLASH_SEM2_ULK 0x0013C014
782#define QLCNIC_FLASH_LOCK_ID 0x001B2100
783
784#define QLCNIC_RD_DUMP_REG(addr, bar0, data) do { \
785 writel((addr & 0xFFFF0000), (void *) (bar0 + \
786 QLCNIC_FW_DUMP_REG1)); \
787 readl((void *) (bar0 + QLCNIC_FW_DUMP_REG1)); \
788 *data = readl((void *) (bar0 + QLCNIC_FW_DUMP_REG2 + \
789 LSW(addr))); \
790} while (0)
791
792#define QLCNIC_WR_DUMP_REG(addr, bar0, data) do { \
793 writel((addr & 0xFFFF0000), (void *) (bar0 + \
794 QLCNIC_FW_DUMP_REG1)); \
795 readl((void *) (bar0 + QLCNIC_FW_DUMP_REG1)); \
796 writel(data, (void *) (bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr)));\
797 readl((void *) (bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr))); \
798} while (0)
799
800/* PCI function operational mode */
801enum {
802 QLCNIC_MGMT_FUNC = 0,
803 QLCNIC_PRIV_FUNC = 1,
804 QLCNIC_NON_PRIV_FUNC = 2
805};
806
807enum {
808 QLCNIC_PORT_DEFAULTS = 0,
809 QLCNIC_ADD_VLAN = 1,
810 QLCNIC_DEL_VLAN = 2
811};
812
813#define QLC_DEV_DRV_DEFAULT 0x11111111
814
815#define LSB(x) ((uint8_t)(x))
816#define MSB(x) ((uint8_t)((uint16_t)(x) >> 8))
817
818#define LSW(x) ((uint16_t)((uint32_t)(x)))
819#define MSW(x) ((uint16_t)((uint32_t)(x) >> 16))
820
821#define LSD(x) ((uint32_t)((uint64_t)(x)))
822#define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16))
823
824#define QLCNIC_LEGACY_INTR_CONFIG \
825{ \
826 { \
827 .int_vec_bit = PCIX_INT_VECTOR_BIT_F0, \
828 .tgt_status_reg = ISR_INT_TARGET_STATUS, \
829 .tgt_mask_reg = ISR_INT_TARGET_MASK, \
830 .pci_int_reg = ISR_MSI_INT_TRIGGER(0) }, \
831 \
832 { \
833 .int_vec_bit = PCIX_INT_VECTOR_BIT_F1, \
834 .tgt_status_reg = ISR_INT_TARGET_STATUS_F1, \
835 .tgt_mask_reg = ISR_INT_TARGET_MASK_F1, \
836 .pci_int_reg = ISR_MSI_INT_TRIGGER(1) }, \
837 \
838 { \
839 .int_vec_bit = PCIX_INT_VECTOR_BIT_F2, \
840 .tgt_status_reg = ISR_INT_TARGET_STATUS_F2, \
841 .tgt_mask_reg = ISR_INT_TARGET_MASK_F2, \
842 .pci_int_reg = ISR_MSI_INT_TRIGGER(2) }, \
843 \
844 { \
845 .int_vec_bit = PCIX_INT_VECTOR_BIT_F3, \
846 .tgt_status_reg = ISR_INT_TARGET_STATUS_F3, \
847 .tgt_mask_reg = ISR_INT_TARGET_MASK_F3, \
848 .pci_int_reg = ISR_MSI_INT_TRIGGER(3) }, \
849 \
850 { \
851 .int_vec_bit = PCIX_INT_VECTOR_BIT_F4, \
852 .tgt_status_reg = ISR_INT_TARGET_STATUS_F4, \
853 .tgt_mask_reg = ISR_INT_TARGET_MASK_F4, \
854 .pci_int_reg = ISR_MSI_INT_TRIGGER(4) }, \
855 \
856 { \
857 .int_vec_bit = PCIX_INT_VECTOR_BIT_F5, \
858 .tgt_status_reg = ISR_INT_TARGET_STATUS_F5, \
859 .tgt_mask_reg = ISR_INT_TARGET_MASK_F5, \
860 .pci_int_reg = ISR_MSI_INT_TRIGGER(5) }, \
861 \
862 { \
863 .int_vec_bit = PCIX_INT_VECTOR_BIT_F6, \
864 .tgt_status_reg = ISR_INT_TARGET_STATUS_F6, \
865 .tgt_mask_reg = ISR_INT_TARGET_MASK_F6, \
866 .pci_int_reg = ISR_MSI_INT_TRIGGER(6) }, \
867 \
868 { \
869 .int_vec_bit = PCIX_INT_VECTOR_BIT_F7, \
870 .tgt_status_reg = ISR_INT_TARGET_STATUS_F7, \
871 .tgt_mask_reg = ISR_INT_TARGET_MASK_F7, \
872 .pci_int_reg = ISR_MSI_INT_TRIGGER(7) }, \
873}
874
875/* NIU REGS */
876
877#define _qlcnic_crb_get_bit(var, bit) ((var >> bit) & 0x1)
878
879/*
880 * NIU GB MAC Config Register 0 (applies to GB0, GB1, GB2, GB3)
881 *
882 * Bit 0 : enable_tx => 1:enable frame xmit, 0:disable
883 * Bit 1 : tx_synced => R/O: xmit enable synched to xmit stream
884 * Bit 2 : enable_rx => 1:enable frame recv, 0:disable
885 * Bit 3 : rx_synced => R/O: recv enable synched to recv stream
886 * Bit 4 : tx_flowctl => 1:enable pause frame generation, 0:disable
887 * Bit 5 : rx_flowctl => 1:act on recv'd pause frames, 0:ignore
888 * Bit 8 : loopback => 1:loop MAC xmits to MAC recvs, 0:normal
889 * Bit 16: tx_reset_pb => 1:reset frame xmit protocol blk, 0:no-op
890 * Bit 17: rx_reset_pb => 1:reset frame recv protocol blk, 0:no-op
891 * Bit 18: tx_reset_mac => 1:reset data/ctl multiplexer blk, 0:no-op
892 * Bit 19: rx_reset_mac => 1:reset ctl frames & timers blk, 0:no-op
893 * Bit 31: soft_reset => 1:reset the MAC and the SERDES, 0:no-op
894 */
895#define qlcnic_gb_rx_flowctl(config_word) \
896 ((config_word) |= 1 << 5)
897#define qlcnic_gb_get_rx_flowctl(config_word) \
898 _qlcnic_crb_get_bit((config_word), 5)
899#define qlcnic_gb_unset_rx_flowctl(config_word) \
900 ((config_word) &= ~(1 << 5))
901
902/*
903 * NIU GB Pause Ctl Register
904 */
905
906#define qlcnic_gb_set_gb0_mask(config_word) \
907 ((config_word) |= 1 << 0)
908#define qlcnic_gb_set_gb1_mask(config_word) \
909 ((config_word) |= 1 << 2)
910#define qlcnic_gb_set_gb2_mask(config_word) \
911 ((config_word) |= 1 << 4)
912#define qlcnic_gb_set_gb3_mask(config_word) \
913 ((config_word) |= 1 << 6)
914
915#define qlcnic_gb_get_gb0_mask(config_word) \
916 _qlcnic_crb_get_bit((config_word), 0)
917#define qlcnic_gb_get_gb1_mask(config_word) \
918 _qlcnic_crb_get_bit((config_word), 2)
919#define qlcnic_gb_get_gb2_mask(config_word) \
920 _qlcnic_crb_get_bit((config_word), 4)
921#define qlcnic_gb_get_gb3_mask(config_word) \
922 _qlcnic_crb_get_bit((config_word), 6)
923
924#define qlcnic_gb_unset_gb0_mask(config_word) \
925 ((config_word) &= ~(1 << 0))
926#define qlcnic_gb_unset_gb1_mask(config_word) \
927 ((config_word) &= ~(1 << 2))
928#define qlcnic_gb_unset_gb2_mask(config_word) \
929 ((config_word) &= ~(1 << 4))
930#define qlcnic_gb_unset_gb3_mask(config_word) \
931 ((config_word) &= ~(1 << 6))
932
933/*
934 * NIU XG Pause Ctl Register
935 *
936 * Bit 0 : xg0_mask => 1:disable tx pause frames
937 * Bit 1 : xg0_request => 1:request single pause frame
938 * Bit 2 : xg0_on_off => 1:request is pause on, 0:off
939 * Bit 3 : xg1_mask => 1:disable tx pause frames
940 * Bit 4 : xg1_request => 1:request single pause frame
941 * Bit 5 : xg1_on_off => 1:request is pause on, 0:off
942 */
943
944#define qlcnic_xg_set_xg0_mask(config_word) \
945 ((config_word) |= 1 << 0)
946#define qlcnic_xg_set_xg1_mask(config_word) \
947 ((config_word) |= 1 << 3)
948
949#define qlcnic_xg_get_xg0_mask(config_word) \
950 _qlcnic_crb_get_bit((config_word), 0)
951#define qlcnic_xg_get_xg1_mask(config_word) \
952 _qlcnic_crb_get_bit((config_word), 3)
953
954#define qlcnic_xg_unset_xg0_mask(config_word) \
955 ((config_word) &= ~(1 << 0))
956#define qlcnic_xg_unset_xg1_mask(config_word) \
957 ((config_word) &= ~(1 << 3))
958
959/*
960 * NIU XG Pause Ctl Register
961 *
962 * Bit 0 : xg0_mask => 1:disable tx pause frames
963 * Bit 1 : xg0_request => 1:request single pause frame
964 * Bit 2 : xg0_on_off => 1:request is pause on, 0:off
965 * Bit 3 : xg1_mask => 1:disable tx pause frames
966 * Bit 4 : xg1_request => 1:request single pause frame
967 * Bit 5 : xg1_on_off => 1:request is pause on, 0:off
968 */
969
970/*
971 * PHY-Specific MII control/status registers.
972 */
973#define QLCNIC_NIU_GB_MII_MGMT_ADDR_AUTONEG 4
974#define QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_STATUS 17
975
976/*
977 * PHY-Specific Status Register (reg 17).
978 *
979 * Bit 0 : jabber => 1:jabber detected, 0:not
980 * Bit 1 : polarity => 1:polarity reversed, 0:normal
981 * Bit 2 : recvpause => 1:receive pause enabled, 0:disabled
982 * Bit 3 : xmitpause => 1:transmit pause enabled, 0:disabled
983 * Bit 4 : energydetect => 1:sleep, 0:active
984 * Bit 5 : downshift => 1:downshift, 0:no downshift
985 * Bit 6 : crossover => 1:MDIX (crossover), 0:MDI (no crossover)
986 * Bits 7-9 : cablelen => not valid in 10Mb/s mode
987 * 0:<50m, 1:50-80m, 2:80-110m, 3:110-140m, 4:>140m
988 * Bit 10 : link => 1:link up, 0:link down
989 * Bit 11 : resolved => 1:speed and duplex resolved, 0:not yet
990 * Bit 12 : pagercvd => 1:page received, 0:page not received
991 * Bit 13 : duplex => 1:full duplex, 0:half duplex
992 * Bits 14-15 : speed => 0:10Mb/s, 1:100Mb/s, 2:1000Mb/s, 3:rsvd
993 */
994
995#define qlcnic_get_phy_speed(config_word) (((config_word) >> 14) & 0x03)
996
997#define qlcnic_set_phy_speed(config_word, val) \
998 ((config_word) |= ((val & 0x03) << 14))
999#define qlcnic_set_phy_duplex(config_word) \
1000 ((config_word) |= 1 << 13)
1001#define qlcnic_clear_phy_duplex(config_word) \
1002 ((config_word) &= ~(1 << 13))
1003
1004#define qlcnic_get_phy_link(config_word) \
1005 _qlcnic_crb_get_bit(config_word, 10)
1006#define qlcnic_get_phy_duplex(config_word) \
1007 _qlcnic_crb_get_bit(config_word, 13)
1008
1009#define QLCNIC_NIU_NON_PROMISC_MODE 0
1010#define QLCNIC_NIU_PROMISC_MODE 1
1011#define QLCNIC_NIU_ALLMULTI_MODE 2
1012
1013struct crb_128M_2M_sub_block_map {
1014 unsigned valid;
1015 unsigned start_128M;
1016 unsigned end_128M;
1017 unsigned start_2M;
1018};
1019
1020struct crb_128M_2M_block_map{
1021 struct crb_128M_2M_sub_block_map sub_block[16];
1022};
1023#endif /* __QLCNIC_HDR_H_ */
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
new file mode 100644
index 00000000000..4055c218ef2
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -0,0 +1,1787 @@
1/*
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2010 QLogic Corporation
4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
6 */
7
8#include "qlcnic.h"
9
10#include <linux/slab.h>
11#include <net/ip.h>
12#include <linux/bitops.h>
13
14#define MASK(n) ((1ULL<<(n))-1)
15#define OCM_WIN_P3P(addr) (addr & 0xffc0000)
16
17#define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
18
19#define CRB_BLK(off) ((off >> 20) & 0x3f)
20#define CRB_SUBBLK(off) ((off >> 16) & 0xf)
21#define CRB_WINDOW_2M (0x130060)
22#define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000))
23#define CRB_INDIRECT_2M (0x1e0000UL)
24
25
26#ifndef readq
27static inline u64 readq(void __iomem *addr)
28{
29 return readl(addr) | (((u64) readl(addr + 4)) << 32LL);
30}
31#endif
32
33#ifndef writeq
34static inline void writeq(u64 val, void __iomem *addr)
35{
36 writel(((u32) (val)), (addr));
37 writel(((u32) (val >> 32)), (addr + 4));
38}
39#endif
40
41static const struct crb_128M_2M_block_map
42crb_128M_2M_map[64] __cacheline_aligned_in_smp = {
43 {{{0, 0, 0, 0} } }, /* 0: PCI */
44 {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */
45 {1, 0x0110000, 0x0120000, 0x130000},
46 {1, 0x0120000, 0x0122000, 0x124000},
47 {1, 0x0130000, 0x0132000, 0x126000},
48 {1, 0x0140000, 0x0142000, 0x128000},
49 {1, 0x0150000, 0x0152000, 0x12a000},
50 {1, 0x0160000, 0x0170000, 0x110000},
51 {1, 0x0170000, 0x0172000, 0x12e000},
52 {0, 0x0000000, 0x0000000, 0x000000},
53 {0, 0x0000000, 0x0000000, 0x000000},
54 {0, 0x0000000, 0x0000000, 0x000000},
55 {0, 0x0000000, 0x0000000, 0x000000},
56 {0, 0x0000000, 0x0000000, 0x000000},
57 {0, 0x0000000, 0x0000000, 0x000000},
58 {1, 0x01e0000, 0x01e0800, 0x122000},
59 {0, 0x0000000, 0x0000000, 0x000000} } },
60 {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */
61 {{{0, 0, 0, 0} } }, /* 3: */
62 {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */
63 {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */
64 {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */
65 {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */
66 {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */
67 {0, 0x0000000, 0x0000000, 0x000000},
68 {0, 0x0000000, 0x0000000, 0x000000},
69 {0, 0x0000000, 0x0000000, 0x000000},
70 {0, 0x0000000, 0x0000000, 0x000000},
71 {0, 0x0000000, 0x0000000, 0x000000},
72 {0, 0x0000000, 0x0000000, 0x000000},
73 {0, 0x0000000, 0x0000000, 0x000000},
74 {0, 0x0000000, 0x0000000, 0x000000},
75 {0, 0x0000000, 0x0000000, 0x000000},
76 {0, 0x0000000, 0x0000000, 0x000000},
77 {0, 0x0000000, 0x0000000, 0x000000},
78 {0, 0x0000000, 0x0000000, 0x000000},
79 {0, 0x0000000, 0x0000000, 0x000000},
80 {0, 0x0000000, 0x0000000, 0x000000},
81 {1, 0x08f0000, 0x08f2000, 0x172000} } },
82 {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/
83 {0, 0x0000000, 0x0000000, 0x000000},
84 {0, 0x0000000, 0x0000000, 0x000000},
85 {0, 0x0000000, 0x0000000, 0x000000},
86 {0, 0x0000000, 0x0000000, 0x000000},
87 {0, 0x0000000, 0x0000000, 0x000000},
88 {0, 0x0000000, 0x0000000, 0x000000},
89 {0, 0x0000000, 0x0000000, 0x000000},
90 {0, 0x0000000, 0x0000000, 0x000000},
91 {0, 0x0000000, 0x0000000, 0x000000},
92 {0, 0x0000000, 0x0000000, 0x000000},
93 {0, 0x0000000, 0x0000000, 0x000000},
94 {0, 0x0000000, 0x0000000, 0x000000},
95 {0, 0x0000000, 0x0000000, 0x000000},
96 {0, 0x0000000, 0x0000000, 0x000000},
97 {1, 0x09f0000, 0x09f2000, 0x176000} } },
98 {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/
99 {0, 0x0000000, 0x0000000, 0x000000},
100 {0, 0x0000000, 0x0000000, 0x000000},
101 {0, 0x0000000, 0x0000000, 0x000000},
102 {0, 0x0000000, 0x0000000, 0x000000},
103 {0, 0x0000000, 0x0000000, 0x000000},
104 {0, 0x0000000, 0x0000000, 0x000000},
105 {0, 0x0000000, 0x0000000, 0x000000},
106 {0, 0x0000000, 0x0000000, 0x000000},
107 {0, 0x0000000, 0x0000000, 0x000000},
108 {0, 0x0000000, 0x0000000, 0x000000},
109 {0, 0x0000000, 0x0000000, 0x000000},
110 {0, 0x0000000, 0x0000000, 0x000000},
111 {0, 0x0000000, 0x0000000, 0x000000},
112 {0, 0x0000000, 0x0000000, 0x000000},
113 {1, 0x0af0000, 0x0af2000, 0x17a000} } },
114 {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/
115 {0, 0x0000000, 0x0000000, 0x000000},
116 {0, 0x0000000, 0x0000000, 0x000000},
117 {0, 0x0000000, 0x0000000, 0x000000},
118 {0, 0x0000000, 0x0000000, 0x000000},
119 {0, 0x0000000, 0x0000000, 0x000000},
120 {0, 0x0000000, 0x0000000, 0x000000},
121 {0, 0x0000000, 0x0000000, 0x000000},
122 {0, 0x0000000, 0x0000000, 0x000000},
123 {0, 0x0000000, 0x0000000, 0x000000},
124 {0, 0x0000000, 0x0000000, 0x000000},
125 {0, 0x0000000, 0x0000000, 0x000000},
126 {0, 0x0000000, 0x0000000, 0x000000},
127 {0, 0x0000000, 0x0000000, 0x000000},
128 {0, 0x0000000, 0x0000000, 0x000000},
129 {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
130 {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */
131 {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */
132 {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */
133 {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */
134 {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */
135 {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */
136 {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */
137 {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */
138 {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */
139 {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */
140 {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */
141 {{{0, 0, 0, 0} } }, /* 23: */
142 {{{0, 0, 0, 0} } }, /* 24: */
143 {{{0, 0, 0, 0} } }, /* 25: */
144 {{{0, 0, 0, 0} } }, /* 26: */
145 {{{0, 0, 0, 0} } }, /* 27: */
146 {{{0, 0, 0, 0} } }, /* 28: */
147 {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */
148 {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */
149 {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */
150 {{{0} } }, /* 32: PCI */
151 {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */
152 {1, 0x2110000, 0x2120000, 0x130000},
153 {1, 0x2120000, 0x2122000, 0x124000},
154 {1, 0x2130000, 0x2132000, 0x126000},
155 {1, 0x2140000, 0x2142000, 0x128000},
156 {1, 0x2150000, 0x2152000, 0x12a000},
157 {1, 0x2160000, 0x2170000, 0x110000},
158 {1, 0x2170000, 0x2172000, 0x12e000},
159 {0, 0x0000000, 0x0000000, 0x000000},
160 {0, 0x0000000, 0x0000000, 0x000000},
161 {0, 0x0000000, 0x0000000, 0x000000},
162 {0, 0x0000000, 0x0000000, 0x000000},
163 {0, 0x0000000, 0x0000000, 0x000000},
164 {0, 0x0000000, 0x0000000, 0x000000},
165 {0, 0x0000000, 0x0000000, 0x000000},
166 {0, 0x0000000, 0x0000000, 0x000000} } },
167 {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */
168 {{{0} } }, /* 35: */
169 {{{0} } }, /* 36: */
170 {{{0} } }, /* 37: */
171 {{{0} } }, /* 38: */
172 {{{0} } }, /* 39: */
173 {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */
174 {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */
175 {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */
176 {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */
177 {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */
178 {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */
179 {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */
180 {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */
181 {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */
182 {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */
183 {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */
184 {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */
185 {{{0} } }, /* 52: */
186 {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */
187 {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */
188 {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */
189 {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */
190 {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */
191 {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */
192 {{{0} } }, /* 59: I2C0 */
193 {{{0} } }, /* 60: I2C1 */
194 {{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */
195 {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */
196 {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */
197};
198
199/*
200 * top 12 bits of crb internal address (hub, agent)
201 */
202static const unsigned crb_hub_agt[64] = {
203 0,
204 QLCNIC_HW_CRB_HUB_AGT_ADR_PS,
205 QLCNIC_HW_CRB_HUB_AGT_ADR_MN,
206 QLCNIC_HW_CRB_HUB_AGT_ADR_MS,
207 0,
208 QLCNIC_HW_CRB_HUB_AGT_ADR_SRE,
209 QLCNIC_HW_CRB_HUB_AGT_ADR_NIU,
210 QLCNIC_HW_CRB_HUB_AGT_ADR_QMN,
211 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0,
212 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1,
213 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2,
214 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3,
215 QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q,
216 QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR,
217 QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB,
218 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4,
219 QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA,
220 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0,
221 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1,
222 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2,
223 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3,
224 QLCNIC_HW_CRB_HUB_AGT_ADR_PGND,
225 QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI,
226 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0,
227 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1,
228 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2,
229 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3,
230 0,
231 QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI,
232 QLCNIC_HW_CRB_HUB_AGT_ADR_SN,
233 0,
234 QLCNIC_HW_CRB_HUB_AGT_ADR_EG,
235 0,
236 QLCNIC_HW_CRB_HUB_AGT_ADR_PS,
237 QLCNIC_HW_CRB_HUB_AGT_ADR_CAM,
238 0,
239 0,
240 0,
241 0,
242 0,
243 QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR,
244 0,
245 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1,
246 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2,
247 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3,
248 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4,
249 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5,
250 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6,
251 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7,
252 QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA,
253 QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q,
254 QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB,
255 0,
256 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0,
257 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8,
258 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9,
259 QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0,
260 0,
261 QLCNIC_HW_CRB_HUB_AGT_ADR_SMB,
262 QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0,
263 QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1,
264 0,
265 QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC,
266 0,
267};
268
269/* PCI Windowing for DDR regions. */
270
271#define QLCNIC_PCIE_SEM_TIMEOUT 10000
272
273int
274qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
275{
276 int done = 0, timeout = 0;
277
278 while (!done) {
279 done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)));
280 if (done == 1)
281 break;
282 if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) {
283 dev_err(&adapter->pdev->dev,
284 "Failed to acquire sem=%d lock; holdby=%d\n",
285 sem, id_reg ? QLCRD32(adapter, id_reg) : -1);
286 return -EIO;
287 }
288 msleep(1);
289 }
290
291 if (id_reg)
292 QLCWR32(adapter, id_reg, adapter->portnum);
293
294 return 0;
295}
296
297void
298qlcnic_pcie_sem_unlock(struct qlcnic_adapter *adapter, int sem)
299{
300 QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem)));
301}
302
303static int
304qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
305 struct cmd_desc_type0 *cmd_desc_arr, int nr_desc)
306{
307 u32 i, producer, consumer;
308 struct qlcnic_cmd_buffer *pbuf;
309 struct cmd_desc_type0 *cmd_desc;
310 struct qlcnic_host_tx_ring *tx_ring;
311
312 i = 0;
313
314 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
315 return -EIO;
316
317 tx_ring = adapter->tx_ring;
318 __netif_tx_lock_bh(tx_ring->txq);
319
320 producer = tx_ring->producer;
321 consumer = tx_ring->sw_consumer;
322
323 if (nr_desc >= qlcnic_tx_avail(tx_ring)) {
324 netif_tx_stop_queue(tx_ring->txq);
325 smp_mb();
326 if (qlcnic_tx_avail(tx_ring) > nr_desc) {
327 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
328 netif_tx_wake_queue(tx_ring->txq);
329 } else {
330 adapter->stats.xmit_off++;
331 __netif_tx_unlock_bh(tx_ring->txq);
332 return -EBUSY;
333 }
334 }
335
336 do {
337 cmd_desc = &cmd_desc_arr[i];
338
339 pbuf = &tx_ring->cmd_buf_arr[producer];
340 pbuf->skb = NULL;
341 pbuf->frag_count = 0;
342
343 memcpy(&tx_ring->desc_head[producer],
344 &cmd_desc_arr[i], sizeof(struct cmd_desc_type0));
345
346 producer = get_next_index(producer, tx_ring->num_desc);
347 i++;
348
349 } while (i != nr_desc);
350
351 tx_ring->producer = producer;
352
353 qlcnic_update_cmd_producer(adapter, tx_ring);
354
355 __netif_tx_unlock_bh(tx_ring->txq);
356
357 return 0;
358}
359
360static int
361qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
362 __le16 vlan_id, unsigned op)
363{
364 struct qlcnic_nic_req req;
365 struct qlcnic_mac_req *mac_req;
366 struct qlcnic_vlan_req *vlan_req;
367 u64 word;
368
369 memset(&req, 0, sizeof(struct qlcnic_nic_req));
370 req.qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
371
372 word = QLCNIC_MAC_EVENT | ((u64)adapter->portnum << 16);
373 req.req_hdr = cpu_to_le64(word);
374
375 mac_req = (struct qlcnic_mac_req *)&req.words[0];
376 mac_req->op = op;
377 memcpy(mac_req->mac_addr, addr, 6);
378
379 vlan_req = (struct qlcnic_vlan_req *)&req.words[1];
380 vlan_req->vlan_id = vlan_id;
381
382 return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
383}
384
385static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr)
386{
387 struct list_head *head;
388 struct qlcnic_mac_list_s *cur;
389
390 /* look up if already exists */
391 list_for_each(head, &adapter->mac_list) {
392 cur = list_entry(head, struct qlcnic_mac_list_s, list);
393 if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0)
394 return 0;
395 }
396
397 cur = kzalloc(sizeof(struct qlcnic_mac_list_s), GFP_ATOMIC);
398 if (cur == NULL) {
399 dev_err(&adapter->netdev->dev,
400 "failed to add mac address filter\n");
401 return -ENOMEM;
402 }
403 memcpy(cur->mac_addr, addr, ETH_ALEN);
404
405 if (qlcnic_sre_macaddr_change(adapter,
406 cur->mac_addr, 0, QLCNIC_MAC_ADD)) {
407 kfree(cur);
408 return -EIO;
409 }
410
411 list_add_tail(&cur->list, &adapter->mac_list);
412 return 0;
413}
414
415void qlcnic_set_multi(struct net_device *netdev)
416{
417 struct qlcnic_adapter *adapter = netdev_priv(netdev);
418 struct netdev_hw_addr *ha;
419 static const u8 bcast_addr[ETH_ALEN] = {
420 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
421 };
422 u32 mode = VPORT_MISS_MODE_DROP;
423
424 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
425 return;
426
427 qlcnic_nic_add_mac(adapter, adapter->mac_addr);
428 qlcnic_nic_add_mac(adapter, bcast_addr);
429
430 if (netdev->flags & IFF_PROMISC) {
431 if (!(adapter->flags & QLCNIC_PROMISC_DISABLED))
432 mode = VPORT_MISS_MODE_ACCEPT_ALL;
433 goto send_fw_cmd;
434 }
435
436 if ((netdev->flags & IFF_ALLMULTI) ||
437 (netdev_mc_count(netdev) > adapter->max_mc_count)) {
438 mode = VPORT_MISS_MODE_ACCEPT_MULTI;
439 goto send_fw_cmd;
440 }
441
442 if (!netdev_mc_empty(netdev)) {
443 netdev_for_each_mc_addr(ha, netdev) {
444 qlcnic_nic_add_mac(adapter, ha->addr);
445 }
446 }
447
448send_fw_cmd:
449 if (mode == VPORT_MISS_MODE_ACCEPT_ALL) {
450 qlcnic_alloc_lb_filters_mem(adapter);
451 adapter->mac_learn = 1;
452 } else {
453 adapter->mac_learn = 0;
454 }
455
456 qlcnic_nic_set_promisc(adapter, mode);
457}
458
459int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
460{
461 struct qlcnic_nic_req req;
462 u64 word;
463
464 memset(&req, 0, sizeof(struct qlcnic_nic_req));
465
466 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
467
468 word = QLCNIC_H2C_OPCODE_SET_MAC_RECEIVE_MODE |
469 ((u64)adapter->portnum << 16);
470 req.req_hdr = cpu_to_le64(word);
471
472 req.words[0] = cpu_to_le64(mode);
473
474 return qlcnic_send_cmd_descs(adapter,
475 (struct cmd_desc_type0 *)&req, 1);
476}
477
478void qlcnic_free_mac_list(struct qlcnic_adapter *adapter)
479{
480 struct qlcnic_mac_list_s *cur;
481 struct list_head *head = &adapter->mac_list;
482
483 while (!list_empty(head)) {
484 cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
485 qlcnic_sre_macaddr_change(adapter,
486 cur->mac_addr, 0, QLCNIC_MAC_DEL);
487 list_del(&cur->list);
488 kfree(cur);
489 }
490}
491
492void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
493{
494 struct qlcnic_filter *tmp_fil;
495 struct hlist_node *tmp_hnode, *n;
496 struct hlist_head *head;
497 int i;
498
499 for (i = 0; i < adapter->fhash.fmax; i++) {
500 head = &(adapter->fhash.fhead[i]);
501
502 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode)
503 {
504 if (jiffies >
505 (QLCNIC_FILTER_AGE * HZ + tmp_fil->ftime)) {
506 qlcnic_sre_macaddr_change(adapter,
507 tmp_fil->faddr, tmp_fil->vlan_id,
508 tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
509 QLCNIC_MAC_DEL);
510 spin_lock_bh(&adapter->mac_learn_lock);
511 adapter->fhash.fnum--;
512 hlist_del(&tmp_fil->fnode);
513 spin_unlock_bh(&adapter->mac_learn_lock);
514 kfree(tmp_fil);
515 }
516 }
517 }
518}
519
520void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter)
521{
522 struct qlcnic_filter *tmp_fil;
523 struct hlist_node *tmp_hnode, *n;
524 struct hlist_head *head;
525 int i;
526
527 for (i = 0; i < adapter->fhash.fmax; i++) {
528 head = &(adapter->fhash.fhead[i]);
529
530 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
531 qlcnic_sre_macaddr_change(adapter, tmp_fil->faddr,
532 tmp_fil->vlan_id, tmp_fil->vlan_id ?
533 QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL);
534 spin_lock_bh(&adapter->mac_learn_lock);
535 adapter->fhash.fnum--;
536 hlist_del(&tmp_fil->fnode);
537 spin_unlock_bh(&adapter->mac_learn_lock);
538 kfree(tmp_fil);
539 }
540 }
541}
542
543int qlcnic_set_fw_loopback(struct qlcnic_adapter *adapter, u8 flag)
544{
545 struct qlcnic_nic_req req;
546 int rv;
547
548 memset(&req, 0, sizeof(struct qlcnic_nic_req));
549
550 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
551 req.req_hdr = cpu_to_le64(QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK |
552 ((u64) adapter->portnum << 16) | ((u64) 0x1 << 32));
553
554 req.words[0] = cpu_to_le64(flag);
555
556 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
557 if (rv != 0)
558 dev_err(&adapter->pdev->dev, "%sting loopback mode failed\n",
559 flag ? "Set" : "Reset");
560 return rv;
561}
562
563int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
564{
565 if (qlcnic_set_fw_loopback(adapter, mode))
566 return -EIO;
567
568 if (qlcnic_nic_set_promisc(adapter, VPORT_MISS_MODE_ACCEPT_ALL)) {
569 qlcnic_set_fw_loopback(adapter, mode);
570 return -EIO;
571 }
572
573 msleep(1000);
574 return 0;
575}
576
577void qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter)
578{
579 int mode = VPORT_MISS_MODE_DROP;
580 struct net_device *netdev = adapter->netdev;
581
582 qlcnic_set_fw_loopback(adapter, 0);
583
584 if (netdev->flags & IFF_PROMISC)
585 mode = VPORT_MISS_MODE_ACCEPT_ALL;
586 else if (netdev->flags & IFF_ALLMULTI)
587 mode = VPORT_MISS_MODE_ACCEPT_MULTI;
588
589 qlcnic_nic_set_promisc(adapter, mode);
590 msleep(1000);
591}
592
593/*
594 * Send the interrupt coalescing parameter set by ethtool to the card.
595 */
596int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter)
597{
598 struct qlcnic_nic_req req;
599 int rv;
600
601 memset(&req, 0, sizeof(struct qlcnic_nic_req));
602
603 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
604
605 req.req_hdr = cpu_to_le64(QLCNIC_CONFIG_INTR_COALESCE |
606 ((u64) adapter->portnum << 16));
607
608 req.words[0] = cpu_to_le64(((u64) adapter->ahw->coal.flag) << 32);
609 req.words[2] = cpu_to_le64(adapter->ahw->coal.rx_packets |
610 ((u64) adapter->ahw->coal.rx_time_us) << 16);
611 req.words[5] = cpu_to_le64(adapter->ahw->coal.timer_out |
612 ((u64) adapter->ahw->coal.type) << 32 |
613 ((u64) adapter->ahw->coal.sts_ring_mask) << 40);
614 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
615 if (rv != 0)
616 dev_err(&adapter->netdev->dev,
617 "Could not send interrupt coalescing parameters\n");
618 return rv;
619}
620
621int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
622{
623 struct qlcnic_nic_req req;
624 u64 word;
625 int rv;
626
627 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
628 return 0;
629
630 memset(&req, 0, sizeof(struct qlcnic_nic_req));
631
632 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
633
634 word = QLCNIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16);
635 req.req_hdr = cpu_to_le64(word);
636
637 req.words[0] = cpu_to_le64(enable);
638
639 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
640 if (rv != 0)
641 dev_err(&adapter->netdev->dev,
642 "Could not send configure hw lro request\n");
643
644 return rv;
645}
646
647int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
648{
649 struct qlcnic_nic_req req;
650 u64 word;
651 int rv;
652
653 if (!!(adapter->flags & QLCNIC_BRIDGE_ENABLED) == enable)
654 return 0;
655
656 memset(&req, 0, sizeof(struct qlcnic_nic_req));
657
658 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
659
660 word = QLCNIC_H2C_OPCODE_CONFIG_BRIDGING |
661 ((u64)adapter->portnum << 16);
662 req.req_hdr = cpu_to_le64(word);
663
664 req.words[0] = cpu_to_le64(enable);
665
666 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
667 if (rv != 0)
668 dev_err(&adapter->netdev->dev,
669 "Could not send configure bridge mode request\n");
670
671 adapter->flags ^= QLCNIC_BRIDGE_ENABLED;
672
673 return rv;
674}
675
676
677#define RSS_HASHTYPE_IP_TCP 0x3
678
679int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
680{
681 struct qlcnic_nic_req req;
682 u64 word;
683 int i, rv;
684
685 static const u64 key[] = {
686 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
687 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
688 0x255b0ec26d5a56daULL
689 };
690
691 memset(&req, 0, sizeof(struct qlcnic_nic_req));
692 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
693
694 word = QLCNIC_H2C_OPCODE_CONFIG_RSS | ((u64)adapter->portnum << 16);
695 req.req_hdr = cpu_to_le64(word);
696
697 /*
698 * RSS request:
699 * bits 3-0: hash_method
700 * 5-4: hash_type_ipv4
701 * 7-6: hash_type_ipv6
702 * 8: enable
703 * 9: use indirection table
704 * 47-10: reserved
705 * 63-48: indirection table mask
706 */
707 word = ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) |
708 ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) |
709 ((u64)(enable & 0x1) << 8) |
710 ((0x7ULL) << 48);
711 req.words[0] = cpu_to_le64(word);
712 for (i = 0; i < 5; i++)
713 req.words[i+1] = cpu_to_le64(key[i]);
714
715 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
716 if (rv != 0)
717 dev_err(&adapter->netdev->dev, "could not configure RSS\n");
718
719 return rv;
720}
721
722int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, int cmd)
723{
724 struct qlcnic_nic_req req;
725 struct qlcnic_ipaddr *ipa;
726 u64 word;
727 int rv;
728
729 memset(&req, 0, sizeof(struct qlcnic_nic_req));
730 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
731
732 word = QLCNIC_H2C_OPCODE_CONFIG_IPADDR | ((u64)adapter->portnum << 16);
733 req.req_hdr = cpu_to_le64(word);
734
735 req.words[0] = cpu_to_le64(cmd);
736 ipa = (struct qlcnic_ipaddr *)&req.words[1];
737 ipa->ipv4 = ip;
738
739 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
740 if (rv != 0)
741 dev_err(&adapter->netdev->dev,
742 "could not notify %s IP 0x%x reuqest\n",
743 (cmd == QLCNIC_IP_UP) ? "Add" : "Remove", ip);
744
745 return rv;
746}
747
748int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable)
749{
750 struct qlcnic_nic_req req;
751 u64 word;
752 int rv;
753
754 memset(&req, 0, sizeof(struct qlcnic_nic_req));
755 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
756
757 word = QLCNIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16);
758 req.req_hdr = cpu_to_le64(word);
759 req.words[0] = cpu_to_le64(enable | (enable << 8));
760
761 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
762 if (rv != 0)
763 dev_err(&adapter->netdev->dev,
764 "could not configure link notification\n");
765
766 return rv;
767}
768
769int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter)
770{
771 struct qlcnic_nic_req req;
772 u64 word;
773 int rv;
774
775 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
776 return 0;
777
778 memset(&req, 0, sizeof(struct qlcnic_nic_req));
779 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
780
781 word = QLCNIC_H2C_OPCODE_LRO_REQUEST |
782 ((u64)adapter->portnum << 16) |
783 ((u64)QLCNIC_LRO_REQUEST_CLEANUP << 56) ;
784
785 req.req_hdr = cpu_to_le64(word);
786
787 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
788 if (rv != 0)
789 dev_err(&adapter->netdev->dev,
790 "could not cleanup lro flows\n");
791
792 return rv;
793}
794
795/*
796 * qlcnic_change_mtu - Change the Maximum Transfer Unit
797 * @returns 0 on success, negative on failure
798 */
799
800int qlcnic_change_mtu(struct net_device *netdev, int mtu)
801{
802 struct qlcnic_adapter *adapter = netdev_priv(netdev);
803 int rc = 0;
804
805 if (mtu < P3P_MIN_MTU || mtu > P3P_MAX_MTU) {
806 dev_err(&adapter->netdev->dev, "%d bytes < mtu < %d bytes"
807 " not supported\n", P3P_MAX_MTU, P3P_MIN_MTU);
808 return -EINVAL;
809 }
810
811 rc = qlcnic_fw_cmd_set_mtu(adapter, mtu);
812
813 if (!rc)
814 netdev->mtu = mtu;
815
816 return rc;
817}
818
819
820u32 qlcnic_fix_features(struct net_device *netdev, u32 features)
821{
822 struct qlcnic_adapter *adapter = netdev_priv(netdev);
823
824 if ((adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
825 u32 changed = features ^ netdev->features;
826 features ^= changed & (NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
827 }
828
829 if (!(features & NETIF_F_RXCSUM))
830 features &= ~NETIF_F_LRO;
831
832 return features;
833}
834
835
836int qlcnic_set_features(struct net_device *netdev, u32 features)
837{
838 struct qlcnic_adapter *adapter = netdev_priv(netdev);
839 u32 changed = netdev->features ^ features;
840 int hw_lro = (features & NETIF_F_LRO) ? QLCNIC_LRO_ENABLED : 0;
841
842 if (!(changed & NETIF_F_LRO))
843 return 0;
844
845 netdev->features = features ^ NETIF_F_LRO;
846
847 if (qlcnic_config_hw_lro(adapter, hw_lro))
848 return -EIO;
849
850 if ((hw_lro == 0) && qlcnic_send_lro_cleanup(adapter))
851 return -EIO;
852
853 return 0;
854}
855
856/*
857 * Changes the CRB window to the specified window.
858 */
859 /* Returns < 0 if off is not valid,
860 * 1 if window access is needed. 'off' is set to offset from
861 * CRB space in 128M pci map
862 * 0 if no window access is needed. 'off' is set to 2M addr
863 * In: 'off' is offset from base in 128M pci map
864 */
865static int
866qlcnic_pci_get_crb_addr_2M(struct qlcnic_adapter *adapter,
867 ulong off, void __iomem **addr)
868{
869 const struct crb_128M_2M_sub_block_map *m;
870
871 if ((off >= QLCNIC_CRB_MAX) || (off < QLCNIC_PCI_CRBSPACE))
872 return -EINVAL;
873
874 off -= QLCNIC_PCI_CRBSPACE;
875
876 /*
877 * Try direct map
878 */
879 m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)];
880
881 if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) {
882 *addr = adapter->ahw->pci_base0 + m->start_2M +
883 (off - m->start_128M);
884 return 0;
885 }
886
887 /*
888 * Not in direct map, use crb window
889 */
890 *addr = adapter->ahw->pci_base0 + CRB_INDIRECT_2M + (off & MASK(16));
891 return 1;
892}
893
894/*
895 * In: 'off' is offset from CRB space in 128M pci map
896 * Out: 'off' is 2M pci map addr
897 * side effect: lock crb window
898 */
899static int
900qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
901{
902 u32 window;
903 void __iomem *addr = adapter->ahw->pci_base0 + CRB_WINDOW_2M;
904
905 off -= QLCNIC_PCI_CRBSPACE;
906
907 window = CRB_HI(off);
908 if (window == 0) {
909 dev_err(&adapter->pdev->dev, "Invalid offset 0x%lx\n", off);
910 return -EIO;
911 }
912
913 writel(window, addr);
914 if (readl(addr) != window) {
915 if (printk_ratelimit())
916 dev_warn(&adapter->pdev->dev,
917 "failed to set CRB window to %d off 0x%lx\n",
918 window, off);
919 return -EIO;
920 }
921 return 0;
922}
923
924int
925qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data)
926{
927 unsigned long flags;
928 int rv;
929 void __iomem *addr = NULL;
930
931 rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr);
932
933 if (rv == 0) {
934 writel(data, addr);
935 return 0;
936 }
937
938 if (rv > 0) {
939 /* indirect access */
940 write_lock_irqsave(&adapter->ahw->crb_lock, flags);
941 crb_win_lock(adapter);
942 rv = qlcnic_pci_set_crbwindow_2M(adapter, off);
943 if (!rv)
944 writel(data, addr);
945 crb_win_unlock(adapter);
946 write_unlock_irqrestore(&adapter->ahw->crb_lock, flags);
947 return rv;
948 }
949
950 dev_err(&adapter->pdev->dev,
951 "%s: invalid offset: 0x%016lx\n", __func__, off);
952 dump_stack();
953 return -EIO;
954}
955
956u32
957qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
958{
959 unsigned long flags;
960 int rv;
961 u32 data = -1;
962 void __iomem *addr = NULL;
963
964 rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr);
965
966 if (rv == 0)
967 return readl(addr);
968
969 if (rv > 0) {
970 /* indirect access */
971 write_lock_irqsave(&adapter->ahw->crb_lock, flags);
972 crb_win_lock(adapter);
973 if (!qlcnic_pci_set_crbwindow_2M(adapter, off))
974 data = readl(addr);
975 crb_win_unlock(adapter);
976 write_unlock_irqrestore(&adapter->ahw->crb_lock, flags);
977 return data;
978 }
979
980 dev_err(&adapter->pdev->dev,
981 "%s: invalid offset: 0x%016lx\n", __func__, off);
982 dump_stack();
983 return -1;
984}
985
986
987void __iomem *
988qlcnic_get_ioaddr(struct qlcnic_adapter *adapter, u32 offset)
989{
990 void __iomem *addr = NULL;
991
992 WARN_ON(qlcnic_pci_get_crb_addr_2M(adapter, offset, &addr));
993
994 return addr;
995}
996
997
998static int
999qlcnic_pci_set_window_2M(struct qlcnic_adapter *adapter,
1000 u64 addr, u32 *start)
1001{
1002 u32 window;
1003
1004 window = OCM_WIN_P3P(addr);
1005
1006 writel(window, adapter->ahw->ocm_win_crb);
1007 /* read back to flush */
1008 readl(adapter->ahw->ocm_win_crb);
1009
1010 *start = QLCNIC_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr);
1011 return 0;
1012}
1013
1014static int
1015qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off,
1016 u64 *data, int op)
1017{
1018 void __iomem *addr;
1019 int ret;
1020 u32 start;
1021
1022 mutex_lock(&adapter->ahw->mem_lock);
1023
1024 ret = qlcnic_pci_set_window_2M(adapter, off, &start);
1025 if (ret != 0)
1026 goto unlock;
1027
1028 addr = adapter->ahw->pci_base0 + start;
1029
1030 if (op == 0) /* read */
1031 *data = readq(addr);
1032 else /* write */
1033 writeq(*data, addr);
1034
1035unlock:
1036 mutex_unlock(&adapter->ahw->mem_lock);
1037
1038 return ret;
1039}
1040
1041void
1042qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
1043{
1044 void __iomem *addr = adapter->ahw->pci_base0 +
1045 QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM);
1046
1047 mutex_lock(&adapter->ahw->mem_lock);
1048 *data = readq(addr);
1049 mutex_unlock(&adapter->ahw->mem_lock);
1050}
1051
1052void
1053qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data)
1054{
1055 void __iomem *addr = adapter->ahw->pci_base0 +
1056 QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM);
1057
1058 mutex_lock(&adapter->ahw->mem_lock);
1059 writeq(data, addr);
1060 mutex_unlock(&adapter->ahw->mem_lock);
1061}
1062
1063#define MAX_CTL_CHECK 1000
1064
1065int
1066qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter,
1067 u64 off, u64 data)
1068{
1069 int i, j, ret;
1070 u32 temp, off8;
1071 void __iomem *mem_crb;
1072
1073 /* Only 64-bit aligned access */
1074 if (off & 7)
1075 return -EIO;
1076
1077 /* P3 onward, test agent base for MIU and SIU is same */
1078 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
1079 QLCNIC_ADDR_QDR_NET_MAX)) {
1080 mem_crb = qlcnic_get_ioaddr(adapter,
1081 QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
1082 goto correct;
1083 }
1084
1085 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) {
1086 mem_crb = qlcnic_get_ioaddr(adapter,
1087 QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE);
1088 goto correct;
1089 }
1090
1091 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX))
1092 return qlcnic_pci_mem_access_direct(adapter, off, &data, 1);
1093
1094 return -EIO;
1095
1096correct:
1097 off8 = off & ~0xf;
1098
1099 mutex_lock(&adapter->ahw->mem_lock);
1100
1101 writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
1102 writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
1103
1104 i = 0;
1105 writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
1106 writel((TA_CTL_START | TA_CTL_ENABLE),
1107 (mem_crb + TEST_AGT_CTRL));
1108
1109 for (j = 0; j < MAX_CTL_CHECK; j++) {
1110 temp = readl(mem_crb + TEST_AGT_CTRL);
1111 if ((temp & TA_CTL_BUSY) == 0)
1112 break;
1113 }
1114
1115 if (j >= MAX_CTL_CHECK) {
1116 ret = -EIO;
1117 goto done;
1118 }
1119
1120 i = (off & 0xf) ? 0 : 2;
1121 writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)),
1122 mem_crb + MIU_TEST_AGT_WRDATA(i));
1123 writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)),
1124 mem_crb + MIU_TEST_AGT_WRDATA(i+1));
1125 i = (off & 0xf) ? 2 : 0;
1126
1127 writel(data & 0xffffffff,
1128 mem_crb + MIU_TEST_AGT_WRDATA(i));
1129 writel((data >> 32) & 0xffffffff,
1130 mem_crb + MIU_TEST_AGT_WRDATA(i+1));
1131
1132 writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL));
1133 writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE),
1134 (mem_crb + TEST_AGT_CTRL));
1135
1136 for (j = 0; j < MAX_CTL_CHECK; j++) {
1137 temp = readl(mem_crb + TEST_AGT_CTRL);
1138 if ((temp & TA_CTL_BUSY) == 0)
1139 break;
1140 }
1141
1142 if (j >= MAX_CTL_CHECK) {
1143 if (printk_ratelimit())
1144 dev_err(&adapter->pdev->dev,
1145 "failed to write through agent\n");
1146 ret = -EIO;
1147 } else
1148 ret = 0;
1149
1150done:
1151 mutex_unlock(&adapter->ahw->mem_lock);
1152
1153 return ret;
1154}
1155
1156int
1157qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter,
1158 u64 off, u64 *data)
1159{
1160 int j, ret;
1161 u32 temp, off8;
1162 u64 val;
1163 void __iomem *mem_crb;
1164
1165 /* Only 64-bit aligned access */
1166 if (off & 7)
1167 return -EIO;
1168
1169 /* P3 onward, test agent base for MIU and SIU is same */
1170 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
1171 QLCNIC_ADDR_QDR_NET_MAX)) {
1172 mem_crb = qlcnic_get_ioaddr(adapter,
1173 QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
1174 goto correct;
1175 }
1176
1177 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) {
1178 mem_crb = qlcnic_get_ioaddr(adapter,
1179 QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE);
1180 goto correct;
1181 }
1182
1183 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX)) {
1184 return qlcnic_pci_mem_access_direct(adapter,
1185 off, data, 0);
1186 }
1187
1188 return -EIO;
1189
1190correct:
1191 off8 = off & ~0xf;
1192
1193 mutex_lock(&adapter->ahw->mem_lock);
1194
1195 writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
1196 writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
1197 writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
1198 writel((TA_CTL_START | TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL));
1199
1200 for (j = 0; j < MAX_CTL_CHECK; j++) {
1201 temp = readl(mem_crb + TEST_AGT_CTRL);
1202 if ((temp & TA_CTL_BUSY) == 0)
1203 break;
1204 }
1205
1206 if (j >= MAX_CTL_CHECK) {
1207 if (printk_ratelimit())
1208 dev_err(&adapter->pdev->dev,
1209 "failed to read through agent\n");
1210 ret = -EIO;
1211 } else {
1212 off8 = MIU_TEST_AGT_RDDATA_LO;
1213 if (off & 0xf)
1214 off8 = MIU_TEST_AGT_RDDATA_UPPER_LO;
1215
1216 temp = readl(mem_crb + off8 + 4);
1217 val = (u64)temp << 32;
1218 val |= readl(mem_crb + off8);
1219 *data = val;
1220 ret = 0;
1221 }
1222
1223 mutex_unlock(&adapter->ahw->mem_lock);
1224
1225 return ret;
1226}
1227
1228int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
1229{
1230 int offset, board_type, magic;
1231 struct pci_dev *pdev = adapter->pdev;
1232
1233 offset = QLCNIC_FW_MAGIC_OFFSET;
1234 if (qlcnic_rom_fast_read(adapter, offset, &magic))
1235 return -EIO;
1236
1237 if (magic != QLCNIC_BDINFO_MAGIC) {
1238 dev_err(&pdev->dev, "invalid board config, magic=%08x\n",
1239 magic);
1240 return -EIO;
1241 }
1242
1243 offset = QLCNIC_BRDTYPE_OFFSET;
1244 if (qlcnic_rom_fast_read(adapter, offset, &board_type))
1245 return -EIO;
1246
1247 adapter->ahw->board_type = board_type;
1248
1249 if (board_type == QLCNIC_BRDTYPE_P3P_4_GB_MM) {
1250 u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I);
1251 if ((gpio & 0x8000) == 0)
1252 board_type = QLCNIC_BRDTYPE_P3P_10G_TP;
1253 }
1254
1255 switch (board_type) {
1256 case QLCNIC_BRDTYPE_P3P_HMEZ:
1257 case QLCNIC_BRDTYPE_P3P_XG_LOM:
1258 case QLCNIC_BRDTYPE_P3P_10G_CX4:
1259 case QLCNIC_BRDTYPE_P3P_10G_CX4_LP:
1260 case QLCNIC_BRDTYPE_P3P_IMEZ:
1261 case QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS:
1262 case QLCNIC_BRDTYPE_P3P_10G_SFP_CT:
1263 case QLCNIC_BRDTYPE_P3P_10G_SFP_QT:
1264 case QLCNIC_BRDTYPE_P3P_10G_XFP:
1265 case QLCNIC_BRDTYPE_P3P_10000_BASE_T:
1266 adapter->ahw->port_type = QLCNIC_XGBE;
1267 break;
1268 case QLCNIC_BRDTYPE_P3P_REF_QG:
1269 case QLCNIC_BRDTYPE_P3P_4_GB:
1270 case QLCNIC_BRDTYPE_P3P_4_GB_MM:
1271 adapter->ahw->port_type = QLCNIC_GBE;
1272 break;
1273 case QLCNIC_BRDTYPE_P3P_10G_TP:
1274 adapter->ahw->port_type = (adapter->portnum < 2) ?
1275 QLCNIC_XGBE : QLCNIC_GBE;
1276 break;
1277 default:
1278 dev_err(&pdev->dev, "unknown board type %x\n", board_type);
1279 adapter->ahw->port_type = QLCNIC_XGBE;
1280 break;
1281 }
1282
1283 return 0;
1284}
1285
1286int
1287qlcnic_wol_supported(struct qlcnic_adapter *adapter)
1288{
1289 u32 wol_cfg;
1290
1291 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
1292 if (wol_cfg & (1UL << adapter->portnum)) {
1293 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
1294 if (wol_cfg & (1 << adapter->portnum))
1295 return 1;
1296 }
1297
1298 return 0;
1299}
1300
1301int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
1302{
1303 struct qlcnic_nic_req req;
1304 int rv;
1305 u64 word;
1306
1307 memset(&req, 0, sizeof(struct qlcnic_nic_req));
1308 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
1309
1310 word = QLCNIC_H2C_OPCODE_CONFIG_LED | ((u64)adapter->portnum << 16);
1311 req.req_hdr = cpu_to_le64(word);
1312
1313 req.words[0] = cpu_to_le64((u64)rate << 32);
1314 req.words[1] = cpu_to_le64(state);
1315
1316 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
1317 if (rv)
1318 dev_err(&adapter->pdev->dev, "LED configuration failed.\n");
1319
1320 return rv;
1321}
1322
1323/* FW dump related functions */
1324static u32
1325qlcnic_dump_crb(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
1326 u32 *buffer)
1327{
1328 int i;
1329 u32 addr, data;
1330 struct __crb *crb = &entry->region.crb;
1331 void __iomem *base = adapter->ahw->pci_base0;
1332
1333 addr = crb->addr;
1334
1335 for (i = 0; i < crb->no_ops; i++) {
1336 QLCNIC_RD_DUMP_REG(addr, base, &data);
1337 *buffer++ = cpu_to_le32(addr);
1338 *buffer++ = cpu_to_le32(data);
1339 addr += crb->stride;
1340 }
1341 return crb->no_ops * 2 * sizeof(u32);
1342}
1343
1344static u32
1345qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
1346 struct qlcnic_dump_entry *entry, u32 *buffer)
1347{
1348 int i, k, timeout = 0;
1349 void __iomem *base = adapter->ahw->pci_base0;
1350 u32 addr, data;
1351 u8 opcode, no_ops;
1352 struct __ctrl *ctr = &entry->region.ctrl;
1353 struct qlcnic_dump_template_hdr *t_hdr = adapter->ahw->fw_dump.tmpl_hdr;
1354
1355 addr = ctr->addr;
1356 no_ops = ctr->no_ops;
1357
1358 for (i = 0; i < no_ops; i++) {
1359 k = 0;
1360 opcode = 0;
1361 for (k = 0; k < 8; k++) {
1362 if (!(ctr->opcode & (1 << k)))
1363 continue;
1364 switch (1 << k) {
1365 case QLCNIC_DUMP_WCRB:
1366 QLCNIC_WR_DUMP_REG(addr, base, ctr->val1);
1367 break;
1368 case QLCNIC_DUMP_RWCRB:
1369 QLCNIC_RD_DUMP_REG(addr, base, &data);
1370 QLCNIC_WR_DUMP_REG(addr, base, data);
1371 break;
1372 case QLCNIC_DUMP_ANDCRB:
1373 QLCNIC_RD_DUMP_REG(addr, base, &data);
1374 QLCNIC_WR_DUMP_REG(addr, base,
1375 (data & ctr->val2));
1376 break;
1377 case QLCNIC_DUMP_ORCRB:
1378 QLCNIC_RD_DUMP_REG(addr, base, &data);
1379 QLCNIC_WR_DUMP_REG(addr, base,
1380 (data | ctr->val3));
1381 break;
1382 case QLCNIC_DUMP_POLLCRB:
1383 while (timeout <= ctr->timeout) {
1384 QLCNIC_RD_DUMP_REG(addr, base, &data);
1385 if ((data & ctr->val2) == ctr->val1)
1386 break;
1387 msleep(1);
1388 timeout++;
1389 }
1390 if (timeout > ctr->timeout) {
1391 dev_info(&adapter->pdev->dev,
1392 "Timed out, aborting poll CRB\n");
1393 return -EINVAL;
1394 }
1395 break;
1396 case QLCNIC_DUMP_RD_SAVE:
1397 if (ctr->index_a)
1398 addr = t_hdr->saved_state[ctr->index_a];
1399 QLCNIC_RD_DUMP_REG(addr, base, &data);
1400 t_hdr->saved_state[ctr->index_v] = data;
1401 break;
1402 case QLCNIC_DUMP_WRT_SAVED:
1403 if (ctr->index_v)
1404 data = t_hdr->saved_state[ctr->index_v];
1405 else
1406 data = ctr->val1;
1407 if (ctr->index_a)
1408 addr = t_hdr->saved_state[ctr->index_a];
1409 QLCNIC_WR_DUMP_REG(addr, base, data);
1410 break;
1411 case QLCNIC_DUMP_MOD_SAVE_ST:
1412 data = t_hdr->saved_state[ctr->index_v];
1413 data <<= ctr->shl_val;
1414 data >>= ctr->shr_val;
1415 if (ctr->val2)
1416 data &= ctr->val2;
1417 data |= ctr->val3;
1418 data += ctr->val1;
1419 t_hdr->saved_state[ctr->index_v] = data;
1420 break;
1421 default:
1422 dev_info(&adapter->pdev->dev,
1423 "Unknown opcode\n");
1424 break;
1425 }
1426 }
1427 addr += ctr->stride;
1428 }
1429 return 0;
1430}
1431
1432static u32
1433qlcnic_dump_mux(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
1434 u32 *buffer)
1435{
1436 int loop;
1437 u32 val, data = 0;
1438 struct __mux *mux = &entry->region.mux;
1439 void __iomem *base = adapter->ahw->pci_base0;
1440
1441 val = mux->val;
1442 for (loop = 0; loop < mux->no_ops; loop++) {
1443 QLCNIC_WR_DUMP_REG(mux->addr, base, val);
1444 QLCNIC_RD_DUMP_REG(mux->read_addr, base, &data);
1445 *buffer++ = cpu_to_le32(val);
1446 *buffer++ = cpu_to_le32(data);
1447 val += mux->val_stride;
1448 }
1449 return 2 * mux->no_ops * sizeof(u32);
1450}
1451
1452static u32
1453qlcnic_dump_que(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
1454 u32 *buffer)
1455{
1456 int i, loop;
1457 u32 cnt, addr, data, que_id = 0;
1458 void __iomem *base = adapter->ahw->pci_base0;
1459 struct __queue *que = &entry->region.que;
1460
1461 addr = que->read_addr;
1462 cnt = que->read_addr_cnt;
1463
1464 for (loop = 0; loop < que->no_ops; loop++) {
1465 QLCNIC_WR_DUMP_REG(que->sel_addr, base, que_id);
1466 addr = que->read_addr;
1467 for (i = 0; i < cnt; i++) {
1468 QLCNIC_RD_DUMP_REG(addr, base, &data);
1469 *buffer++ = cpu_to_le32(data);
1470 addr += que->read_addr_stride;
1471 }
1472 que_id += que->stride;
1473 }
1474 return que->no_ops * cnt * sizeof(u32);
1475}
1476
1477static u32
1478qlcnic_dump_ocm(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
1479 u32 *buffer)
1480{
1481 int i;
1482 u32 data;
1483 void __iomem *addr;
1484 struct __ocm *ocm = &entry->region.ocm;
1485
1486 addr = adapter->ahw->pci_base0 + ocm->read_addr;
1487 for (i = 0; i < ocm->no_ops; i++) {
1488 data = readl(addr);
1489 *buffer++ = cpu_to_le32(data);
1490 addr += ocm->read_addr_stride;
1491 }
1492 return ocm->no_ops * sizeof(u32);
1493}
1494
1495static u32
1496qlcnic_read_rom(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
1497 u32 *buffer)
1498{
1499 int i, count = 0;
1500 u32 fl_addr, size, val, lck_val, addr;
1501 struct __mem *rom = &entry->region.mem;
1502 void __iomem *base = adapter->ahw->pci_base0;
1503
1504 fl_addr = rom->addr;
1505 size = rom->size/4;
1506lock_try:
1507 lck_val = readl(base + QLCNIC_FLASH_SEM2_LK);
1508 if (!lck_val && count < MAX_CTL_CHECK) {
1509 msleep(10);
1510 count++;
1511 goto lock_try;
1512 }
1513 writel(adapter->ahw->pci_func, (base + QLCNIC_FLASH_LOCK_ID));
1514 for (i = 0; i < size; i++) {
1515 addr = fl_addr & 0xFFFF0000;
1516 QLCNIC_WR_DUMP_REG(FLASH_ROM_WINDOW, base, addr);
1517 addr = LSW(fl_addr) + FLASH_ROM_DATA;
1518 QLCNIC_RD_DUMP_REG(addr, base, &val);
1519 fl_addr += 4;
1520 *buffer++ = cpu_to_le32(val);
1521 }
1522 readl(base + QLCNIC_FLASH_SEM2_ULK);
1523 return rom->size;
1524}
1525
1526static u32
1527qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter,
1528 struct qlcnic_dump_entry *entry, u32 *buffer)
1529{
1530 int i;
1531 u32 cnt, val, data, addr;
1532 void __iomem *base = adapter->ahw->pci_base0;
1533 struct __cache *l1 = &entry->region.cache;
1534
1535 val = l1->init_tag_val;
1536
1537 for (i = 0; i < l1->no_ops; i++) {
1538 QLCNIC_WR_DUMP_REG(l1->addr, base, val);
1539 QLCNIC_WR_DUMP_REG(l1->ctrl_addr, base, LSW(l1->ctrl_val));
1540 addr = l1->read_addr;
1541 cnt = l1->read_addr_num;
1542 while (cnt) {
1543 QLCNIC_RD_DUMP_REG(addr, base, &data);
1544 *buffer++ = cpu_to_le32(data);
1545 addr += l1->read_addr_stride;
1546 cnt--;
1547 }
1548 val += l1->stride;
1549 }
1550 return l1->no_ops * l1->read_addr_num * sizeof(u32);
1551}
1552
1553static u32
1554qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter,
1555 struct qlcnic_dump_entry *entry, u32 *buffer)
1556{
1557 int i;
1558 u32 cnt, val, data, addr;
1559 u8 poll_mask, poll_to, time_out = 0;
1560 void __iomem *base = adapter->ahw->pci_base0;
1561 struct __cache *l2 = &entry->region.cache;
1562
1563 val = l2->init_tag_val;
1564 poll_mask = LSB(MSW(l2->ctrl_val));
1565 poll_to = MSB(MSW(l2->ctrl_val));
1566
1567 for (i = 0; i < l2->no_ops; i++) {
1568 QLCNIC_WR_DUMP_REG(l2->addr, base, val);
1569 if (LSW(l2->ctrl_val))
1570 QLCNIC_WR_DUMP_REG(l2->ctrl_addr, base,
1571 LSW(l2->ctrl_val));
1572 if (!poll_mask)
1573 goto skip_poll;
1574 do {
1575 QLCNIC_RD_DUMP_REG(l2->ctrl_addr, base, &data);
1576 if (!(data & poll_mask))
1577 break;
1578 msleep(1);
1579 time_out++;
1580 } while (time_out <= poll_to);
1581
1582 if (time_out > poll_to) {
1583 dev_err(&adapter->pdev->dev,
1584 "Timeout exceeded in %s, aborting dump\n",
1585 __func__);
1586 return -EINVAL;
1587 }
1588skip_poll:
1589 addr = l2->read_addr;
1590 cnt = l2->read_addr_num;
1591 while (cnt) {
1592 QLCNIC_RD_DUMP_REG(addr, base, &data);
1593 *buffer++ = cpu_to_le32(data);
1594 addr += l2->read_addr_stride;
1595 cnt--;
1596 }
1597 val += l2->stride;
1598 }
1599 return l2->no_ops * l2->read_addr_num * sizeof(u32);
1600}
1601
1602static u32
1603qlcnic_read_memory(struct qlcnic_adapter *adapter,
1604 struct qlcnic_dump_entry *entry, u32 *buffer)
1605{
1606 u32 addr, data, test, ret = 0;
1607 int i, reg_read;
1608 struct __mem *mem = &entry->region.mem;
1609 void __iomem *base = adapter->ahw->pci_base0;
1610
1611 reg_read = mem->size;
1612 addr = mem->addr;
1613 /* check for data size of multiple of 16 and 16 byte alignment */
1614 if ((addr & 0xf) || (reg_read%16)) {
1615 dev_info(&adapter->pdev->dev,
1616 "Unaligned memory addr:0x%x size:0x%x\n",
1617 addr, reg_read);
1618 return -EINVAL;
1619 }
1620
1621 mutex_lock(&adapter->ahw->mem_lock);
1622
1623 while (reg_read != 0) {
1624 QLCNIC_WR_DUMP_REG(MIU_TEST_ADDR_LO, base, addr);
1625 QLCNIC_WR_DUMP_REG(MIU_TEST_ADDR_HI, base, 0);
1626 QLCNIC_WR_DUMP_REG(MIU_TEST_CTR, base,
1627 TA_CTL_ENABLE | TA_CTL_START);
1628
1629 for (i = 0; i < MAX_CTL_CHECK; i++) {
1630 QLCNIC_RD_DUMP_REG(MIU_TEST_CTR, base, &test);
1631 if (!(test & TA_CTL_BUSY))
1632 break;
1633 }
1634 if (i == MAX_CTL_CHECK) {
1635 if (printk_ratelimit()) {
1636 dev_err(&adapter->pdev->dev,
1637 "failed to read through agent\n");
1638 ret = -EINVAL;
1639 goto out;
1640 }
1641 }
1642 for (i = 0; i < 4; i++) {
1643 QLCNIC_RD_DUMP_REG(MIU_TEST_READ_DATA[i], base, &data);
1644 *buffer++ = cpu_to_le32(data);
1645 }
1646 addr += 16;
1647 reg_read -= 16;
1648 ret += 16;
1649 }
1650out:
1651 mutex_unlock(&adapter->ahw->mem_lock);
1652 return mem->size;
1653}
1654
1655static u32
1656qlcnic_dump_nop(struct qlcnic_adapter *adapter,
1657 struct qlcnic_dump_entry *entry, u32 *buffer)
1658{
1659 entry->hdr.flags |= QLCNIC_DUMP_SKIP;
1660 return 0;
1661}
1662
1663struct qlcnic_dump_operations fw_dump_ops[] = {
1664 { QLCNIC_DUMP_NOP, qlcnic_dump_nop },
1665 { QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb },
1666 { QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux },
1667 { QLCNIC_DUMP_QUEUE, qlcnic_dump_que },
1668 { QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom },
1669 { QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm },
1670 { QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl },
1671 { QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache },
1672 { QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache },
1673 { QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache },
1674 { QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache },
1675 { QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache },
1676 { QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache },
1677 { QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache },
1678 { QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache },
1679 { QLCNIC_DUMP_READ_ROM, qlcnic_read_rom },
1680 { QLCNIC_DUMP_READ_MEM, qlcnic_read_memory },
1681 { QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl },
1682 { QLCNIC_DUMP_TLHDR, qlcnic_dump_nop },
1683 { QLCNIC_DUMP_RDEND, qlcnic_dump_nop },
1684};
1685
1686/* Walk the template and collect dump for each entry in the dump template */
1687static int
1688qlcnic_valid_dump_entry(struct device *dev, struct qlcnic_dump_entry *entry,
1689 u32 size)
1690{
1691 int ret = 1;
1692 if (size != entry->hdr.cap_size) {
1693 dev_info(dev,
1694 "Invalidate dump, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
1695 entry->hdr.type, entry->hdr.mask, size, entry->hdr.cap_size);
1696 dev_info(dev, "Aborting further dump capture\n");
1697 ret = 0;
1698 }
1699 return ret;
1700}
1701
1702int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
1703{
1704 u32 *buffer;
1705 char mesg[64];
1706 char *msg[] = {mesg, NULL};
1707 int i, k, ops_cnt, ops_index, dump_size = 0;
1708 u32 entry_offset, dump, no_entries, buf_offset = 0;
1709 struct qlcnic_dump_entry *entry;
1710 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
1711 struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
1712
1713 if (fw_dump->clr) {
1714 dev_info(&adapter->pdev->dev,
1715 "Previous dump not cleared, not capturing dump\n");
1716 return -EIO;
1717 }
1718 /* Calculate the size for dump data area only */
1719 for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
1720 if (i & tmpl_hdr->drv_cap_mask)
1721 dump_size += tmpl_hdr->cap_sizes[k];
1722 if (!dump_size)
1723 return -EIO;
1724
1725 fw_dump->data = vzalloc(dump_size);
1726 if (!fw_dump->data) {
1727 dev_info(&adapter->pdev->dev,
1728 "Unable to allocate (%d KB) for fw dump\n",
1729 dump_size/1024);
1730 return -ENOMEM;
1731 }
1732 buffer = fw_dump->data;
1733 fw_dump->size = dump_size;
1734 no_entries = tmpl_hdr->num_entries;
1735 ops_cnt = ARRAY_SIZE(fw_dump_ops);
1736 entry_offset = tmpl_hdr->offset;
1737 tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION;
1738 tmpl_hdr->sys_info[1] = adapter->fw_version;
1739
1740 for (i = 0; i < no_entries; i++) {
1741 entry = (void *)tmpl_hdr + entry_offset;
1742 if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) {
1743 entry->hdr.flags |= QLCNIC_DUMP_SKIP;
1744 entry_offset += entry->hdr.offset;
1745 continue;
1746 }
1747 /* Find the handler for this entry */
1748 ops_index = 0;
1749 while (ops_index < ops_cnt) {
1750 if (entry->hdr.type == fw_dump_ops[ops_index].opcode)
1751 break;
1752 ops_index++;
1753 }
1754 if (ops_index == ops_cnt) {
1755 dev_info(&adapter->pdev->dev,
1756 "Invalid entry type %d, exiting dump\n",
1757 entry->hdr.type);
1758 goto error;
1759 }
1760 /* Collect dump for this entry */
1761 dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
1762 if (dump && !qlcnic_valid_dump_entry(&adapter->pdev->dev, entry,
1763 dump))
1764 entry->hdr.flags |= QLCNIC_DUMP_SKIP;
1765 buf_offset += entry->hdr.cap_size;
1766 entry_offset += entry->hdr.offset;
1767 buffer = fw_dump->data + buf_offset;
1768 }
1769 if (dump_size != buf_offset) {
1770 dev_info(&adapter->pdev->dev,
1771 "Captured(%d) and expected size(%d) do not match\n",
1772 buf_offset, dump_size);
1773 goto error;
1774 } else {
1775 fw_dump->clr = 1;
1776 snprintf(mesg, sizeof(mesg), "FW dump for device: %d\n",
1777 adapter->pdev->devfn);
1778 dev_info(&adapter->pdev->dev, "Dump data, %d bytes captured\n",
1779 fw_dump->size);
1780 /* Send a udev event to notify availability of FW dump */
1781 kobject_uevent_env(&adapter->pdev->dev.kobj, KOBJ_CHANGE, msg);
1782 return 0;
1783 }
1784error:
1785 vfree(fw_dump->data);
1786 return -EINVAL;
1787}
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
new file mode 100644
index 00000000000..ee8a3982395
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -0,0 +1,1897 @@
1/*
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2010 QLogic Corporation
4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
6 */
7
8#include <linux/netdevice.h>
9#include <linux/delay.h>
10#include <linux/slab.h>
11#include <linux/if_vlan.h>
12#include "qlcnic.h"
13
14struct crb_addr_pair {
15 u32 addr;
16 u32 data;
17};
18
19#define QLCNIC_MAX_CRB_XFORM 60
20static unsigned int crb_addr_xform[QLCNIC_MAX_CRB_XFORM];
21
22#define crb_addr_transform(name) \
23 (crb_addr_xform[QLCNIC_HW_PX_MAP_CRB_##name] = \
24 QLCNIC_HW_CRB_HUB_AGT_ADR_##name << 20)
25
26#define QLCNIC_ADDR_ERROR (0xffffffff)
27
28static void
29qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
30 struct qlcnic_host_rds_ring *rds_ring);
31
32static int
33qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter);
34
35static void crb_addr_transform_setup(void)
36{
37 crb_addr_transform(XDMA);
38 crb_addr_transform(TIMR);
39 crb_addr_transform(SRE);
40 crb_addr_transform(SQN3);
41 crb_addr_transform(SQN2);
42 crb_addr_transform(SQN1);
43 crb_addr_transform(SQN0);
44 crb_addr_transform(SQS3);
45 crb_addr_transform(SQS2);
46 crb_addr_transform(SQS1);
47 crb_addr_transform(SQS0);
48 crb_addr_transform(RPMX7);
49 crb_addr_transform(RPMX6);
50 crb_addr_transform(RPMX5);
51 crb_addr_transform(RPMX4);
52 crb_addr_transform(RPMX3);
53 crb_addr_transform(RPMX2);
54 crb_addr_transform(RPMX1);
55 crb_addr_transform(RPMX0);
56 crb_addr_transform(ROMUSB);
57 crb_addr_transform(SN);
58 crb_addr_transform(QMN);
59 crb_addr_transform(QMS);
60 crb_addr_transform(PGNI);
61 crb_addr_transform(PGND);
62 crb_addr_transform(PGN3);
63 crb_addr_transform(PGN2);
64 crb_addr_transform(PGN1);
65 crb_addr_transform(PGN0);
66 crb_addr_transform(PGSI);
67 crb_addr_transform(PGSD);
68 crb_addr_transform(PGS3);
69 crb_addr_transform(PGS2);
70 crb_addr_transform(PGS1);
71 crb_addr_transform(PGS0);
72 crb_addr_transform(PS);
73 crb_addr_transform(PH);
74 crb_addr_transform(NIU);
75 crb_addr_transform(I2Q);
76 crb_addr_transform(EG);
77 crb_addr_transform(MN);
78 crb_addr_transform(MS);
79 crb_addr_transform(CAS2);
80 crb_addr_transform(CAS1);
81 crb_addr_transform(CAS0);
82 crb_addr_transform(CAM);
83 crb_addr_transform(C2C1);
84 crb_addr_transform(C2C0);
85 crb_addr_transform(SMB);
86 crb_addr_transform(OCM0);
87 crb_addr_transform(I2C0);
88}
89
90void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter)
91{
92 struct qlcnic_recv_context *recv_ctx;
93 struct qlcnic_host_rds_ring *rds_ring;
94 struct qlcnic_rx_buffer *rx_buf;
95 int i, ring;
96
97 recv_ctx = adapter->recv_ctx;
98 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
99 rds_ring = &recv_ctx->rds_rings[ring];
100 for (i = 0; i < rds_ring->num_desc; ++i) {
101 rx_buf = &(rds_ring->rx_buf_arr[i]);
102 if (rx_buf->skb == NULL)
103 continue;
104
105 pci_unmap_single(adapter->pdev,
106 rx_buf->dma,
107 rds_ring->dma_size,
108 PCI_DMA_FROMDEVICE);
109
110 dev_kfree_skb_any(rx_buf->skb);
111 }
112 }
113}
114
115void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter)
116{
117 struct qlcnic_recv_context *recv_ctx;
118 struct qlcnic_host_rds_ring *rds_ring;
119 struct qlcnic_rx_buffer *rx_buf;
120 int i, ring;
121
122 recv_ctx = adapter->recv_ctx;
123 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
124 rds_ring = &recv_ctx->rds_rings[ring];
125
126 INIT_LIST_HEAD(&rds_ring->free_list);
127
128 rx_buf = rds_ring->rx_buf_arr;
129 for (i = 0; i < rds_ring->num_desc; i++) {
130 list_add_tail(&rx_buf->list,
131 &rds_ring->free_list);
132 rx_buf++;
133 }
134 }
135}
136
137void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter)
138{
139 struct qlcnic_cmd_buffer *cmd_buf;
140 struct qlcnic_skb_frag *buffrag;
141 int i, j;
142 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
143
144 cmd_buf = tx_ring->cmd_buf_arr;
145 for (i = 0; i < tx_ring->num_desc; i++) {
146 buffrag = cmd_buf->frag_array;
147 if (buffrag->dma) {
148 pci_unmap_single(adapter->pdev, buffrag->dma,
149 buffrag->length, PCI_DMA_TODEVICE);
150 buffrag->dma = 0ULL;
151 }
152 for (j = 0; j < cmd_buf->frag_count; j++) {
153 buffrag++;
154 if (buffrag->dma) {
155 pci_unmap_page(adapter->pdev, buffrag->dma,
156 buffrag->length,
157 PCI_DMA_TODEVICE);
158 buffrag->dma = 0ULL;
159 }
160 }
161 if (cmd_buf->skb) {
162 dev_kfree_skb_any(cmd_buf->skb);
163 cmd_buf->skb = NULL;
164 }
165 cmd_buf++;
166 }
167}
168
169void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter)
170{
171 struct qlcnic_recv_context *recv_ctx;
172 struct qlcnic_host_rds_ring *rds_ring;
173 struct qlcnic_host_tx_ring *tx_ring;
174 int ring;
175
176 recv_ctx = adapter->recv_ctx;
177
178 if (recv_ctx->rds_rings == NULL)
179 goto skip_rds;
180
181 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
182 rds_ring = &recv_ctx->rds_rings[ring];
183 vfree(rds_ring->rx_buf_arr);
184 rds_ring->rx_buf_arr = NULL;
185 }
186 kfree(recv_ctx->rds_rings);
187
188skip_rds:
189 if (adapter->tx_ring == NULL)
190 return;
191
192 tx_ring = adapter->tx_ring;
193 vfree(tx_ring->cmd_buf_arr);
194 tx_ring->cmd_buf_arr = NULL;
195 kfree(adapter->tx_ring);
196 adapter->tx_ring = NULL;
197}
198
199int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
200{
201 struct qlcnic_recv_context *recv_ctx;
202 struct qlcnic_host_rds_ring *rds_ring;
203 struct qlcnic_host_sds_ring *sds_ring;
204 struct qlcnic_host_tx_ring *tx_ring;
205 struct qlcnic_rx_buffer *rx_buf;
206 int ring, i, size;
207
208 struct qlcnic_cmd_buffer *cmd_buf_arr;
209 struct net_device *netdev = adapter->netdev;
210
211 size = sizeof(struct qlcnic_host_tx_ring);
212 tx_ring = kzalloc(size, GFP_KERNEL);
213 if (tx_ring == NULL) {
214 dev_err(&netdev->dev, "failed to allocate tx ring struct\n");
215 return -ENOMEM;
216 }
217 adapter->tx_ring = tx_ring;
218
219 tx_ring->num_desc = adapter->num_txd;
220 tx_ring->txq = netdev_get_tx_queue(netdev, 0);
221
222 cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring));
223 if (cmd_buf_arr == NULL) {
224 dev_err(&netdev->dev, "failed to allocate cmd buffer ring\n");
225 goto err_out;
226 }
227 tx_ring->cmd_buf_arr = cmd_buf_arr;
228
229 recv_ctx = adapter->recv_ctx;
230
231 size = adapter->max_rds_rings * sizeof(struct qlcnic_host_rds_ring);
232 rds_ring = kzalloc(size, GFP_KERNEL);
233 if (rds_ring == NULL) {
234 dev_err(&netdev->dev, "failed to allocate rds ring struct\n");
235 goto err_out;
236 }
237 recv_ctx->rds_rings = rds_ring;
238
239 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
240 rds_ring = &recv_ctx->rds_rings[ring];
241 switch (ring) {
242 case RCV_RING_NORMAL:
243 rds_ring->num_desc = adapter->num_rxd;
244 rds_ring->dma_size = QLCNIC_P3P_RX_BUF_MAX_LEN;
245 rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN;
246 break;
247
248 case RCV_RING_JUMBO:
249 rds_ring->num_desc = adapter->num_jumbo_rxd;
250 rds_ring->dma_size =
251 QLCNIC_P3P_RX_JUMBO_BUF_MAX_LEN;
252
253 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
254 rds_ring->dma_size += QLCNIC_LRO_BUFFER_EXTRA;
255
256 rds_ring->skb_size =
257 rds_ring->dma_size + NET_IP_ALIGN;
258 break;
259 }
260 rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring));
261 if (rds_ring->rx_buf_arr == NULL) {
262 dev_err(&netdev->dev, "Failed to allocate "
263 "rx buffer ring %d\n", ring);
264 goto err_out;
265 }
266 INIT_LIST_HEAD(&rds_ring->free_list);
267 /*
268 * Now go through all of them, set reference handles
269 * and put them in the queues.
270 */
271 rx_buf = rds_ring->rx_buf_arr;
272 for (i = 0; i < rds_ring->num_desc; i++) {
273 list_add_tail(&rx_buf->list,
274 &rds_ring->free_list);
275 rx_buf->ref_handle = i;
276 rx_buf++;
277 }
278 spin_lock_init(&rds_ring->lock);
279 }
280
281 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
282 sds_ring = &recv_ctx->sds_rings[ring];
283 sds_ring->irq = adapter->msix_entries[ring].vector;
284 sds_ring->adapter = adapter;
285 sds_ring->num_desc = adapter->num_rxd;
286
287 for (i = 0; i < NUM_RCV_DESC_RINGS; i++)
288 INIT_LIST_HEAD(&sds_ring->free_list[i]);
289 }
290
291 return 0;
292
293err_out:
294 qlcnic_free_sw_resources(adapter);
295 return -ENOMEM;
296}
297
298/*
299 * Utility to translate from internal Phantom CRB address
300 * to external PCI CRB address.
301 */
302static u32 qlcnic_decode_crb_addr(u32 addr)
303{
304 int i;
305 u32 base_addr, offset, pci_base;
306
307 crb_addr_transform_setup();
308
309 pci_base = QLCNIC_ADDR_ERROR;
310 base_addr = addr & 0xfff00000;
311 offset = addr & 0x000fffff;
312
313 for (i = 0; i < QLCNIC_MAX_CRB_XFORM; i++) {
314 if (crb_addr_xform[i] == base_addr) {
315 pci_base = i << 20;
316 break;
317 }
318 }
319 if (pci_base == QLCNIC_ADDR_ERROR)
320 return pci_base;
321 else
322 return pci_base + offset;
323}
324
325#define QLCNIC_MAX_ROM_WAIT_USEC 100
326
327static int qlcnic_wait_rom_done(struct qlcnic_adapter *adapter)
328{
329 long timeout = 0;
330 long done = 0;
331
332 cond_resched();
333
334 while (done == 0) {
335 done = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_STATUS);
336 done &= 2;
337 if (++timeout >= QLCNIC_MAX_ROM_WAIT_USEC) {
338 dev_err(&adapter->pdev->dev,
339 "Timeout reached waiting for rom done");
340 return -EIO;
341 }
342 udelay(1);
343 }
344 return 0;
345}
346
347static int do_rom_fast_read(struct qlcnic_adapter *adapter,
348 u32 addr, u32 *valp)
349{
350 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ADDRESS, addr);
351 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
352 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 3);
353 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_INSTR_OPCODE, 0xb);
354 if (qlcnic_wait_rom_done(adapter)) {
355 dev_err(&adapter->pdev->dev, "Error waiting for rom done\n");
356 return -EIO;
357 }
358 /* reset abyte_cnt and dummy_byte_cnt */
359 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 0);
360 udelay(10);
361 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
362
363 *valp = QLCRD32(adapter, QLCNIC_ROMUSB_ROM_RDATA);
364 return 0;
365}
366
367static int do_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
368 u8 *bytes, size_t size)
369{
370 int addridx;
371 int ret = 0;
372
373 for (addridx = addr; addridx < (addr + size); addridx += 4) {
374 int v;
375 ret = do_rom_fast_read(adapter, addridx, &v);
376 if (ret != 0)
377 break;
378 *(__le32 *)bytes = cpu_to_le32(v);
379 bytes += 4;
380 }
381
382 return ret;
383}
384
385int
386qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
387 u8 *bytes, size_t size)
388{
389 int ret;
390
391 ret = qlcnic_rom_lock(adapter);
392 if (ret < 0)
393 return ret;
394
395 ret = do_rom_fast_read_words(adapter, addr, bytes, size);
396
397 qlcnic_rom_unlock(adapter);
398 return ret;
399}
400
401int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, u32 addr, u32 *valp)
402{
403 int ret;
404
405 if (qlcnic_rom_lock(adapter) != 0)
406 return -EIO;
407
408 ret = do_rom_fast_read(adapter, addr, valp);
409 qlcnic_rom_unlock(adapter);
410 return ret;
411}
412
413int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
414{
415 int addr, val;
416 int i, n, init_delay;
417 struct crb_addr_pair *buf;
418 unsigned offset;
419 u32 off;
420 struct pci_dev *pdev = adapter->pdev;
421
422 QLCWR32(adapter, CRB_CMDPEG_STATE, 0);
423 QLCWR32(adapter, CRB_RCVPEG_STATE, 0);
424
425 qlcnic_rom_lock(adapter);
426 QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xfeffffff);
427 qlcnic_rom_unlock(adapter);
428
429 /* Init HW CRB block */
430 if (qlcnic_rom_fast_read(adapter, 0, &n) != 0 || (n != 0xcafecafe) ||
431 qlcnic_rom_fast_read(adapter, 4, &n) != 0) {
432 dev_err(&pdev->dev, "ERROR Reading crb_init area: val:%x\n", n);
433 return -EIO;
434 }
435 offset = n & 0xffffU;
436 n = (n >> 16) & 0xffffU;
437
438 if (n >= 1024) {
439 dev_err(&pdev->dev, "QLOGIC card flash not initialized.\n");
440 return -EIO;
441 }
442
443 buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL);
444 if (buf == NULL) {
445 dev_err(&pdev->dev, "Unable to calloc memory for rom read.\n");
446 return -ENOMEM;
447 }
448
449 for (i = 0; i < n; i++) {
450 if (qlcnic_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
451 qlcnic_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) {
452 kfree(buf);
453 return -EIO;
454 }
455
456 buf[i].addr = addr;
457 buf[i].data = val;
458 }
459
460 for (i = 0; i < n; i++) {
461
462 off = qlcnic_decode_crb_addr(buf[i].addr);
463 if (off == QLCNIC_ADDR_ERROR) {
464 dev_err(&pdev->dev, "CRB init value out of range %x\n",
465 buf[i].addr);
466 continue;
467 }
468 off += QLCNIC_PCI_CRBSPACE;
469
470 if (off & 1)
471 continue;
472
473 /* skipping cold reboot MAGIC */
474 if (off == QLCNIC_CAM_RAM(0x1fc))
475 continue;
476 if (off == (QLCNIC_CRB_I2C0 + 0x1c))
477 continue;
478 if (off == (ROMUSB_GLB + 0xbc)) /* do not reset PCI */
479 continue;
480 if (off == (ROMUSB_GLB + 0xa8))
481 continue;
482 if (off == (ROMUSB_GLB + 0xc8)) /* core clock */
483 continue;
484 if (off == (ROMUSB_GLB + 0x24)) /* MN clock */
485 continue;
486 if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */
487 continue;
488 if ((off & 0x0ff00000) == QLCNIC_CRB_DDR_NET)
489 continue;
490 /* skip the function enable register */
491 if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION))
492 continue;
493 if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION2))
494 continue;
495 if ((off & 0x0ff00000) == QLCNIC_CRB_SMB)
496 continue;
497
498 init_delay = 1;
499 /* After writing this register, HW needs time for CRB */
500 /* to quiet down (else crb_window returns 0xffffffff) */
501 if (off == QLCNIC_ROMUSB_GLB_SW_RESET)
502 init_delay = 1000;
503
504 QLCWR32(adapter, off, buf[i].data);
505
506 msleep(init_delay);
507 }
508 kfree(buf);
509
510 /* Initialize protocol process engine */
511 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0xec, 0x1e);
512 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0x4c, 8);
513 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_I + 0x4c, 8);
514 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x8, 0);
515 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0xc, 0);
516 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x8, 0);
517 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0xc, 0);
518 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x8, 0);
519 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0xc, 0);
520 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x8, 0);
521 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0xc, 0);
522 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x8, 0);
523 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0xc, 0);
524 msleep(1);
525 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
526 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
527 return 0;
528}
529
530static int qlcnic_cmd_peg_ready(struct qlcnic_adapter *adapter)
531{
532 u32 val;
533 int retries = QLCNIC_CMDPEG_CHECK_RETRY_COUNT;
534
535 do {
536 val = QLCRD32(adapter, CRB_CMDPEG_STATE);
537
538 switch (val) {
539 case PHAN_INITIALIZE_COMPLETE:
540 case PHAN_INITIALIZE_ACK:
541 return 0;
542 case PHAN_INITIALIZE_FAILED:
543 goto out_err;
544 default:
545 break;
546 }
547
548 msleep(QLCNIC_CMDPEG_CHECK_DELAY);
549
550 } while (--retries);
551
552 QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
553
554out_err:
555 dev_err(&adapter->pdev->dev, "Command Peg initialization not "
556 "complete, state: 0x%x.\n", val);
557 return -EIO;
558}
559
560static int
561qlcnic_receive_peg_ready(struct qlcnic_adapter *adapter)
562{
563 u32 val;
564 int retries = QLCNIC_RCVPEG_CHECK_RETRY_COUNT;
565
566 do {
567 val = QLCRD32(adapter, CRB_RCVPEG_STATE);
568
569 if (val == PHAN_PEG_RCV_INITIALIZED)
570 return 0;
571
572 msleep(QLCNIC_RCVPEG_CHECK_DELAY);
573
574 } while (--retries);
575
576 if (!retries) {
577 dev_err(&adapter->pdev->dev, "Receive Peg initialization not "
578 "complete, state: 0x%x.\n", val);
579 return -EIO;
580 }
581
582 return 0;
583}
584
585int
586qlcnic_check_fw_status(struct qlcnic_adapter *adapter)
587{
588 int err;
589
590 err = qlcnic_cmd_peg_ready(adapter);
591 if (err)
592 return err;
593
594 err = qlcnic_receive_peg_ready(adapter);
595 if (err)
596 return err;
597
598 QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
599
600 return err;
601}
602
603int
604qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) {
605
606 int timeo;
607 u32 val;
608
609 val = QLCRD32(adapter, QLCNIC_CRB_DEV_PARTITION_INFO);
610 val = QLC_DEV_GET_DRV(val, adapter->portnum);
611 if ((val & 0x3) != QLCNIC_TYPE_NIC) {
612 dev_err(&adapter->pdev->dev,
613 "Not an Ethernet NIC func=%u\n", val);
614 return -EIO;
615 }
616 adapter->physical_port = (val >> 2);
617 if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DEV_INIT_TIMEOUT, &timeo))
618 timeo = QLCNIC_INIT_TIMEOUT_SECS;
619
620 adapter->dev_init_timeo = timeo;
621
622 if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DRV_RESET_TIMEOUT, &timeo))
623 timeo = QLCNIC_RESET_TIMEOUT_SECS;
624
625 adapter->reset_ack_timeo = timeo;
626
627 return 0;
628}
629
630static int qlcnic_get_flt_entry(struct qlcnic_adapter *adapter, u8 region,
631 struct qlcnic_flt_entry *region_entry)
632{
633 struct qlcnic_flt_header flt_hdr;
634 struct qlcnic_flt_entry *flt_entry;
635 int i = 0, ret;
636 u32 entry_size;
637
638 memset(region_entry, 0, sizeof(struct qlcnic_flt_entry));
639 ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION,
640 (u8 *)&flt_hdr,
641 sizeof(struct qlcnic_flt_header));
642 if (ret) {
643 dev_warn(&adapter->pdev->dev,
644 "error reading flash layout header\n");
645 return -EIO;
646 }
647
648 entry_size = flt_hdr.len - sizeof(struct qlcnic_flt_header);
649 flt_entry = (struct qlcnic_flt_entry *)vzalloc(entry_size);
650 if (flt_entry == NULL) {
651 dev_warn(&adapter->pdev->dev, "error allocating memory\n");
652 return -EIO;
653 }
654
655 ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION +
656 sizeof(struct qlcnic_flt_header),
657 (u8 *)flt_entry, entry_size);
658 if (ret) {
659 dev_warn(&adapter->pdev->dev,
660 "error reading flash layout entries\n");
661 goto err_out;
662 }
663
664 while (i < (entry_size/sizeof(struct qlcnic_flt_entry))) {
665 if (flt_entry[i].region == region)
666 break;
667 i++;
668 }
669 if (i >= (entry_size/sizeof(struct qlcnic_flt_entry))) {
670 dev_warn(&adapter->pdev->dev,
671 "region=%x not found in %d regions\n", region, i);
672 ret = -EIO;
673 goto err_out;
674 }
675 memcpy(region_entry, &flt_entry[i], sizeof(struct qlcnic_flt_entry));
676
677err_out:
678 vfree(flt_entry);
679 return ret;
680}
681
682int
683qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter)
684{
685 struct qlcnic_flt_entry fw_entry;
686 u32 ver = -1, min_ver;
687 int ret;
688
689 ret = qlcnic_get_flt_entry(adapter, QLCNIC_FW_IMAGE_REGION, &fw_entry);
690 if (!ret)
691 /* 0-4:-signature, 4-8:-fw version */
692 qlcnic_rom_fast_read(adapter, fw_entry.start_addr + 4,
693 (int *)&ver);
694 else
695 qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET,
696 (int *)&ver);
697
698 ver = QLCNIC_DECODE_VERSION(ver);
699 min_ver = QLCNIC_MIN_FW_VERSION;
700
701 if (ver < min_ver) {
702 dev_err(&adapter->pdev->dev,
703 "firmware version %d.%d.%d unsupported."
704 "Min supported version %d.%d.%d\n",
705 _major(ver), _minor(ver), _build(ver),
706 _major(min_ver), _minor(min_ver), _build(min_ver));
707 return -EINVAL;
708 }
709
710 return 0;
711}
712
713static int
714qlcnic_has_mn(struct qlcnic_adapter *adapter)
715{
716 u32 capability;
717 capability = 0;
718
719 capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY);
720 if (capability & QLCNIC_PEG_TUNE_MN_PRESENT)
721 return 1;
722
723 return 0;
724}
725
726static
727struct uni_table_desc *qlcnic_get_table_desc(const u8 *unirom, int section)
728{
729 u32 i;
730 struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
731 __le32 entries = cpu_to_le32(directory->num_entries);
732
733 for (i = 0; i < entries; i++) {
734
735 __le32 offs = cpu_to_le32(directory->findex) +
736 (i * cpu_to_le32(directory->entry_size));
737 __le32 tab_type = cpu_to_le32(*((u32 *)&unirom[offs] + 8));
738
739 if (tab_type == section)
740 return (struct uni_table_desc *) &unirom[offs];
741 }
742
743 return NULL;
744}
745
746#define FILEHEADER_SIZE (14 * 4)
747
748static int
749qlcnic_validate_header(struct qlcnic_adapter *adapter)
750{
751 const u8 *unirom = adapter->fw->data;
752 struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
753 __le32 fw_file_size = adapter->fw->size;
754 __le32 entries;
755 __le32 entry_size;
756 __le32 tab_size;
757
758 if (fw_file_size < FILEHEADER_SIZE)
759 return -EINVAL;
760
761 entries = cpu_to_le32(directory->num_entries);
762 entry_size = cpu_to_le32(directory->entry_size);
763 tab_size = cpu_to_le32(directory->findex) + (entries * entry_size);
764
765 if (fw_file_size < tab_size)
766 return -EINVAL;
767
768 return 0;
769}
770
771static int
772qlcnic_validate_bootld(struct qlcnic_adapter *adapter)
773{
774 struct uni_table_desc *tab_desc;
775 struct uni_data_desc *descr;
776 const u8 *unirom = adapter->fw->data;
777 int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
778 QLCNIC_UNI_BOOTLD_IDX_OFF));
779 __le32 offs;
780 __le32 tab_size;
781 __le32 data_size;
782
783 tab_desc = qlcnic_get_table_desc(unirom, QLCNIC_UNI_DIR_SECT_BOOTLD);
784
785 if (!tab_desc)
786 return -EINVAL;
787
788 tab_size = cpu_to_le32(tab_desc->findex) +
789 (cpu_to_le32(tab_desc->entry_size) * (idx + 1));
790
791 if (adapter->fw->size < tab_size)
792 return -EINVAL;
793
794 offs = cpu_to_le32(tab_desc->findex) +
795 (cpu_to_le32(tab_desc->entry_size) * (idx));
796 descr = (struct uni_data_desc *)&unirom[offs];
797
798 data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
799
800 if (adapter->fw->size < data_size)
801 return -EINVAL;
802
803 return 0;
804}
805
806static int
807qlcnic_validate_fw(struct qlcnic_adapter *adapter)
808{
809 struct uni_table_desc *tab_desc;
810 struct uni_data_desc *descr;
811 const u8 *unirom = adapter->fw->data;
812 int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
813 QLCNIC_UNI_FIRMWARE_IDX_OFF));
814 __le32 offs;
815 __le32 tab_size;
816 __le32 data_size;
817
818 tab_desc = qlcnic_get_table_desc(unirom, QLCNIC_UNI_DIR_SECT_FW);
819
820 if (!tab_desc)
821 return -EINVAL;
822
823 tab_size = cpu_to_le32(tab_desc->findex) +
824 (cpu_to_le32(tab_desc->entry_size) * (idx + 1));
825
826 if (adapter->fw->size < tab_size)
827 return -EINVAL;
828
829 offs = cpu_to_le32(tab_desc->findex) +
830 (cpu_to_le32(tab_desc->entry_size) * (idx));
831 descr = (struct uni_data_desc *)&unirom[offs];
832 data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
833
834 if (adapter->fw->size < data_size)
835 return -EINVAL;
836
837 return 0;
838}
839
840static int
841qlcnic_validate_product_offs(struct qlcnic_adapter *adapter)
842{
843 struct uni_table_desc *ptab_descr;
844 const u8 *unirom = adapter->fw->data;
845 int mn_present = qlcnic_has_mn(adapter);
846 __le32 entries;
847 __le32 entry_size;
848 __le32 tab_size;
849 u32 i;
850
851 ptab_descr = qlcnic_get_table_desc(unirom,
852 QLCNIC_UNI_DIR_SECT_PRODUCT_TBL);
853 if (!ptab_descr)
854 return -EINVAL;
855
856 entries = cpu_to_le32(ptab_descr->num_entries);
857 entry_size = cpu_to_le32(ptab_descr->entry_size);
858 tab_size = cpu_to_le32(ptab_descr->findex) + (entries * entry_size);
859
860 if (adapter->fw->size < tab_size)
861 return -EINVAL;
862
863nomn:
864 for (i = 0; i < entries; i++) {
865
866 __le32 flags, file_chiprev, offs;
867 u8 chiprev = adapter->ahw->revision_id;
868 u32 flagbit;
869
870 offs = cpu_to_le32(ptab_descr->findex) +
871 (i * cpu_to_le32(ptab_descr->entry_size));
872 flags = cpu_to_le32(*((int *)&unirom[offs] +
873 QLCNIC_UNI_FLAGS_OFF));
874 file_chiprev = cpu_to_le32(*((int *)&unirom[offs] +
875 QLCNIC_UNI_CHIP_REV_OFF));
876
877 flagbit = mn_present ? 1 : 2;
878
879 if ((chiprev == file_chiprev) &&
880 ((1ULL << flagbit) & flags)) {
881 adapter->file_prd_off = offs;
882 return 0;
883 }
884 }
885 if (mn_present) {
886 mn_present = 0;
887 goto nomn;
888 }
889 return -EINVAL;
890}
891
892static int
893qlcnic_validate_unified_romimage(struct qlcnic_adapter *adapter)
894{
895 if (qlcnic_validate_header(adapter)) {
896 dev_err(&adapter->pdev->dev,
897 "unified image: header validation failed\n");
898 return -EINVAL;
899 }
900
901 if (qlcnic_validate_product_offs(adapter)) {
902 dev_err(&adapter->pdev->dev,
903 "unified image: product validation failed\n");
904 return -EINVAL;
905 }
906
907 if (qlcnic_validate_bootld(adapter)) {
908 dev_err(&adapter->pdev->dev,
909 "unified image: bootld validation failed\n");
910 return -EINVAL;
911 }
912
913 if (qlcnic_validate_fw(adapter)) {
914 dev_err(&adapter->pdev->dev,
915 "unified image: firmware validation failed\n");
916 return -EINVAL;
917 }
918
919 return 0;
920}
921
922static
923struct uni_data_desc *qlcnic_get_data_desc(struct qlcnic_adapter *adapter,
924 u32 section, u32 idx_offset)
925{
926 const u8 *unirom = adapter->fw->data;
927 int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
928 idx_offset));
929 struct uni_table_desc *tab_desc;
930 __le32 offs;
931
932 tab_desc = qlcnic_get_table_desc(unirom, section);
933
934 if (tab_desc == NULL)
935 return NULL;
936
937 offs = cpu_to_le32(tab_desc->findex) +
938 (cpu_to_le32(tab_desc->entry_size) * idx);
939
940 return (struct uni_data_desc *)&unirom[offs];
941}
942
943static u8 *
944qlcnic_get_bootld_offs(struct qlcnic_adapter *adapter)
945{
946 u32 offs = QLCNIC_BOOTLD_START;
947
948 if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
949 offs = cpu_to_le32((qlcnic_get_data_desc(adapter,
950 QLCNIC_UNI_DIR_SECT_BOOTLD,
951 QLCNIC_UNI_BOOTLD_IDX_OFF))->findex);
952
953 return (u8 *)&adapter->fw->data[offs];
954}
955
956static u8 *
957qlcnic_get_fw_offs(struct qlcnic_adapter *adapter)
958{
959 u32 offs = QLCNIC_IMAGE_START;
960
961 if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
962 offs = cpu_to_le32((qlcnic_get_data_desc(adapter,
963 QLCNIC_UNI_DIR_SECT_FW,
964 QLCNIC_UNI_FIRMWARE_IDX_OFF))->findex);
965
966 return (u8 *)&adapter->fw->data[offs];
967}
968
969static __le32
970qlcnic_get_fw_size(struct qlcnic_adapter *adapter)
971{
972 if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
973 return cpu_to_le32((qlcnic_get_data_desc(adapter,
974 QLCNIC_UNI_DIR_SECT_FW,
975 QLCNIC_UNI_FIRMWARE_IDX_OFF))->size);
976 else
977 return cpu_to_le32(
978 *(u32 *)&adapter->fw->data[QLCNIC_FW_SIZE_OFFSET]);
979}
980
981static __le32
982qlcnic_get_fw_version(struct qlcnic_adapter *adapter)
983{
984 struct uni_data_desc *fw_data_desc;
985 const struct firmware *fw = adapter->fw;
986 __le32 major, minor, sub;
987 const u8 *ver_str;
988 int i, ret;
989
990 if (adapter->fw_type != QLCNIC_UNIFIED_ROMIMAGE)
991 return cpu_to_le32(*(u32 *)&fw->data[QLCNIC_FW_VERSION_OFFSET]);
992
993 fw_data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW,
994 QLCNIC_UNI_FIRMWARE_IDX_OFF);
995 ver_str = fw->data + cpu_to_le32(fw_data_desc->findex) +
996 cpu_to_le32(fw_data_desc->size) - 17;
997
998 for (i = 0; i < 12; i++) {
999 if (!strncmp(&ver_str[i], "REV=", 4)) {
1000 ret = sscanf(&ver_str[i+4], "%u.%u.%u ",
1001 &major, &minor, &sub);
1002 if (ret != 3)
1003 return 0;
1004 else
1005 return major + (minor << 8) + (sub << 16);
1006 }
1007 }
1008
1009 return 0;
1010}
1011
1012static __le32
1013qlcnic_get_bios_version(struct qlcnic_adapter *adapter)
1014{
1015 const struct firmware *fw = adapter->fw;
1016 __le32 bios_ver, prd_off = adapter->file_prd_off;
1017
1018 if (adapter->fw_type != QLCNIC_UNIFIED_ROMIMAGE)
1019 return cpu_to_le32(
1020 *(u32 *)&fw->data[QLCNIC_BIOS_VERSION_OFFSET]);
1021
1022 bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off])
1023 + QLCNIC_UNI_BIOS_VERSION_OFF));
1024
1025 return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) + (bios_ver >> 24);
1026}
1027
1028static void qlcnic_rom_lock_recovery(struct qlcnic_adapter *adapter)
1029{
1030 if (qlcnic_pcie_sem_lock(adapter, 2, QLCNIC_ROM_LOCK_ID))
1031 dev_info(&adapter->pdev->dev, "Resetting rom_lock\n");
1032
1033 qlcnic_pcie_sem_unlock(adapter, 2);
1034}
1035
1036static int
1037qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter)
1038{
1039 u32 heartbeat, ret = -EIO;
1040 int retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT;
1041
1042 adapter->heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
1043
1044 do {
1045 msleep(QLCNIC_HEARTBEAT_PERIOD_MSECS);
1046 heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
1047 if (heartbeat != adapter->heartbeat) {
1048 ret = QLCNIC_RCODE_SUCCESS;
1049 break;
1050 }
1051 } while (--retries);
1052
1053 return ret;
1054}
1055
1056int
1057qlcnic_need_fw_reset(struct qlcnic_adapter *adapter)
1058{
1059 if (qlcnic_check_fw_hearbeat(adapter)) {
1060 qlcnic_rom_lock_recovery(adapter);
1061 return 1;
1062 }
1063
1064 if (adapter->need_fw_reset)
1065 return 1;
1066
1067 if (adapter->fw)
1068 return 1;
1069
1070 return 0;
1071}
1072
1073static const char *fw_name[] = {
1074 QLCNIC_UNIFIED_ROMIMAGE_NAME,
1075 QLCNIC_FLASH_ROMIMAGE_NAME,
1076};
1077
1078int
1079qlcnic_load_firmware(struct qlcnic_adapter *adapter)
1080{
1081 u64 *ptr64;
1082 u32 i, flashaddr, size;
1083 const struct firmware *fw = adapter->fw;
1084 struct pci_dev *pdev = adapter->pdev;
1085
1086 dev_info(&pdev->dev, "loading firmware from %s\n",
1087 fw_name[adapter->fw_type]);
1088
1089 if (fw) {
1090 __le64 data;
1091
1092 size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8;
1093
1094 ptr64 = (u64 *)qlcnic_get_bootld_offs(adapter);
1095 flashaddr = QLCNIC_BOOTLD_START;
1096
1097 for (i = 0; i < size; i++) {
1098 data = cpu_to_le64(ptr64[i]);
1099
1100 if (qlcnic_pci_mem_write_2M(adapter, flashaddr, data))
1101 return -EIO;
1102
1103 flashaddr += 8;
1104 }
1105
1106 size = (__force u32)qlcnic_get_fw_size(adapter) / 8;
1107
1108 ptr64 = (u64 *)qlcnic_get_fw_offs(adapter);
1109 flashaddr = QLCNIC_IMAGE_START;
1110
1111 for (i = 0; i < size; i++) {
1112 data = cpu_to_le64(ptr64[i]);
1113
1114 if (qlcnic_pci_mem_write_2M(adapter,
1115 flashaddr, data))
1116 return -EIO;
1117
1118 flashaddr += 8;
1119 }
1120
1121 size = (__force u32)qlcnic_get_fw_size(adapter) % 8;
1122 if (size) {
1123 data = cpu_to_le64(ptr64[i]);
1124
1125 if (qlcnic_pci_mem_write_2M(adapter,
1126 flashaddr, data))
1127 return -EIO;
1128 }
1129
1130 } else {
1131 u64 data;
1132 u32 hi, lo;
1133 int ret;
1134 struct qlcnic_flt_entry bootld_entry;
1135
1136 ret = qlcnic_get_flt_entry(adapter, QLCNIC_BOOTLD_REGION,
1137 &bootld_entry);
1138 if (!ret) {
1139 size = bootld_entry.size / 8;
1140 flashaddr = bootld_entry.start_addr;
1141 } else {
1142 size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8;
1143 flashaddr = QLCNIC_BOOTLD_START;
1144 dev_info(&pdev->dev,
1145 "using legacy method to get flash fw region");
1146 }
1147
1148 for (i = 0; i < size; i++) {
1149 if (qlcnic_rom_fast_read(adapter,
1150 flashaddr, (int *)&lo) != 0)
1151 return -EIO;
1152 if (qlcnic_rom_fast_read(adapter,
1153 flashaddr + 4, (int *)&hi) != 0)
1154 return -EIO;
1155
1156 data = (((u64)hi << 32) | lo);
1157
1158 if (qlcnic_pci_mem_write_2M(adapter,
1159 flashaddr, data))
1160 return -EIO;
1161
1162 flashaddr += 8;
1163 }
1164 }
1165 msleep(1);
1166
1167 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x18, 0x1020);
1168 QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0x80001e);
1169 return 0;
1170}
1171
1172static int
1173qlcnic_validate_firmware(struct qlcnic_adapter *adapter)
1174{
1175 __le32 val;
1176 u32 ver, bios, min_size;
1177 struct pci_dev *pdev = adapter->pdev;
1178 const struct firmware *fw = adapter->fw;
1179 u8 fw_type = adapter->fw_type;
1180
1181 if (fw_type == QLCNIC_UNIFIED_ROMIMAGE) {
1182 if (qlcnic_validate_unified_romimage(adapter))
1183 return -EINVAL;
1184
1185 min_size = QLCNIC_UNI_FW_MIN_SIZE;
1186 } else {
1187 val = cpu_to_le32(*(u32 *)&fw->data[QLCNIC_FW_MAGIC_OFFSET]);
1188 if ((__force u32)val != QLCNIC_BDINFO_MAGIC)
1189 return -EINVAL;
1190
1191 min_size = QLCNIC_FW_MIN_SIZE;
1192 }
1193
1194 if (fw->size < min_size)
1195 return -EINVAL;
1196
1197 val = qlcnic_get_fw_version(adapter);
1198 ver = QLCNIC_DECODE_VERSION(val);
1199
1200 if (ver < QLCNIC_MIN_FW_VERSION) {
1201 dev_err(&pdev->dev,
1202 "%s: firmware version %d.%d.%d unsupported\n",
1203 fw_name[fw_type], _major(ver), _minor(ver), _build(ver));
1204 return -EINVAL;
1205 }
1206
1207 val = qlcnic_get_bios_version(adapter);
1208 qlcnic_rom_fast_read(adapter, QLCNIC_BIOS_VERSION_OFFSET, (int *)&bios);
1209 if ((__force u32)val != bios) {
1210 dev_err(&pdev->dev, "%s: firmware bios is incompatible\n",
1211 fw_name[fw_type]);
1212 return -EINVAL;
1213 }
1214
1215 QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
1216 return 0;
1217}
1218
1219static void
1220qlcnic_get_next_fwtype(struct qlcnic_adapter *adapter)
1221{
1222 u8 fw_type;
1223
1224 switch (adapter->fw_type) {
1225 case QLCNIC_UNKNOWN_ROMIMAGE:
1226 fw_type = QLCNIC_UNIFIED_ROMIMAGE;
1227 break;
1228
1229 case QLCNIC_UNIFIED_ROMIMAGE:
1230 default:
1231 fw_type = QLCNIC_FLASH_ROMIMAGE;
1232 break;
1233 }
1234
1235 adapter->fw_type = fw_type;
1236}
1237
1238
1239
1240void qlcnic_request_firmware(struct qlcnic_adapter *adapter)
1241{
1242 struct pci_dev *pdev = adapter->pdev;
1243 int rc;
1244
1245 adapter->fw_type = QLCNIC_UNKNOWN_ROMIMAGE;
1246
1247next:
1248 qlcnic_get_next_fwtype(adapter);
1249
1250 if (adapter->fw_type == QLCNIC_FLASH_ROMIMAGE) {
1251 adapter->fw = NULL;
1252 } else {
1253 rc = request_firmware(&adapter->fw,
1254 fw_name[adapter->fw_type], &pdev->dev);
1255 if (rc != 0)
1256 goto next;
1257
1258 rc = qlcnic_validate_firmware(adapter);
1259 if (rc != 0) {
1260 release_firmware(adapter->fw);
1261 msleep(1);
1262 goto next;
1263 }
1264 }
1265}
1266
1267
1268void
1269qlcnic_release_firmware(struct qlcnic_adapter *adapter)
1270{
1271 if (adapter->fw)
1272 release_firmware(adapter->fw);
1273 adapter->fw = NULL;
1274}
1275
1276static void
1277qlcnic_handle_linkevent(struct qlcnic_adapter *adapter,
1278 struct qlcnic_fw_msg *msg)
1279{
1280 u32 cable_OUI;
1281 u16 cable_len;
1282 u16 link_speed;
1283 u8 link_status, module, duplex, autoneg;
1284 u8 lb_status = 0;
1285 struct net_device *netdev = adapter->netdev;
1286
1287 adapter->has_link_events = 1;
1288
1289 cable_OUI = msg->body[1] & 0xffffffff;
1290 cable_len = (msg->body[1] >> 32) & 0xffff;
1291 link_speed = (msg->body[1] >> 48) & 0xffff;
1292
1293 link_status = msg->body[2] & 0xff;
1294 duplex = (msg->body[2] >> 16) & 0xff;
1295 autoneg = (msg->body[2] >> 24) & 0xff;
1296 lb_status = (msg->body[2] >> 32) & 0x3;
1297
1298 module = (msg->body[2] >> 8) & 0xff;
1299 if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE)
1300 dev_info(&netdev->dev, "unsupported cable: OUI 0x%x, "
1301 "length %d\n", cable_OUI, cable_len);
1302 else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN)
1303 dev_info(&netdev->dev, "unsupported cable length %d\n",
1304 cable_len);
1305
1306 if (!link_status && (lb_status == QLCNIC_ILB_MODE ||
1307 lb_status == QLCNIC_ELB_MODE))
1308 adapter->ahw->loopback_state |= QLCNIC_LINKEVENT;
1309
1310 qlcnic_advert_link_change(adapter, link_status);
1311
1312 if (duplex == LINKEVENT_FULL_DUPLEX)
1313 adapter->link_duplex = DUPLEX_FULL;
1314 else
1315 adapter->link_duplex = DUPLEX_HALF;
1316
1317 adapter->module_type = module;
1318 adapter->link_autoneg = autoneg;
1319 adapter->link_speed = link_speed;
1320}
1321
1322static void
1323qlcnic_handle_fw_message(int desc_cnt, int index,
1324 struct qlcnic_host_sds_ring *sds_ring)
1325{
1326 struct qlcnic_fw_msg msg;
1327 struct status_desc *desc;
1328 struct qlcnic_adapter *adapter;
1329 struct device *dev;
1330 int i = 0, opcode, ret;
1331
1332 while (desc_cnt > 0 && i < 8) {
1333 desc = &sds_ring->desc_head[index];
1334 msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]);
1335 msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]);
1336
1337 index = get_next_index(index, sds_ring->num_desc);
1338 desc_cnt--;
1339 }
1340
1341 adapter = sds_ring->adapter;
1342 dev = &adapter->pdev->dev;
1343 opcode = qlcnic_get_nic_msg_opcode(msg.body[0]);
1344
1345 switch (opcode) {
1346 case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
1347 qlcnic_handle_linkevent(adapter, &msg);
1348 break;
1349 case QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK:
1350 ret = (u32)(msg.body[1]);
1351 switch (ret) {
1352 case 0:
1353 adapter->ahw->loopback_state |= QLCNIC_LB_RESPONSE;
1354 break;
1355 case 1:
1356 dev_info(dev, "loopback already in progress\n");
1357 adapter->diag_cnt = -QLCNIC_TEST_IN_PROGRESS;
1358 break;
1359 case 2:
1360 dev_info(dev, "loopback cable is not connected\n");
1361 adapter->diag_cnt = -QLCNIC_LB_CABLE_NOT_CONN;
1362 break;
1363 default:
1364 dev_info(dev, "loopback configure request failed,"
1365 " ret %x\n", ret);
1366 adapter->diag_cnt = -QLCNIC_UNDEFINED_ERROR;
1367 break;
1368 }
1369 break;
1370 default:
1371 break;
1372 }
1373}
1374
1375static int
1376qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
1377 struct qlcnic_host_rds_ring *rds_ring,
1378 struct qlcnic_rx_buffer *buffer)
1379{
1380 struct sk_buff *skb;
1381 dma_addr_t dma;
1382 struct pci_dev *pdev = adapter->pdev;
1383
1384 skb = dev_alloc_skb(rds_ring->skb_size);
1385 if (!skb) {
1386 adapter->stats.skb_alloc_failure++;
1387 return -ENOMEM;
1388 }
1389
1390 skb_reserve(skb, NET_IP_ALIGN);
1391
1392 dma = pci_map_single(pdev, skb->data,
1393 rds_ring->dma_size, PCI_DMA_FROMDEVICE);
1394
1395 if (pci_dma_mapping_error(pdev, dma)) {
1396 adapter->stats.rx_dma_map_error++;
1397 dev_kfree_skb_any(skb);
1398 return -ENOMEM;
1399 }
1400
1401 buffer->skb = skb;
1402 buffer->dma = dma;
1403
1404 return 0;
1405}
1406
1407static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
1408 struct qlcnic_host_rds_ring *rds_ring, u16 index, u16 cksum)
1409{
1410 struct qlcnic_rx_buffer *buffer;
1411 struct sk_buff *skb;
1412
1413 buffer = &rds_ring->rx_buf_arr[index];
1414
1415 if (unlikely(buffer->skb == NULL)) {
1416 WARN_ON(1);
1417 return NULL;
1418 }
1419
1420 pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
1421 PCI_DMA_FROMDEVICE);
1422
1423 skb = buffer->skb;
1424
1425 if (likely((adapter->netdev->features & NETIF_F_RXCSUM) &&
1426 (cksum == STATUS_CKSUM_OK || cksum == STATUS_CKSUM_LOOP))) {
1427 adapter->stats.csummed++;
1428 skb->ip_summed = CHECKSUM_UNNECESSARY;
1429 } else {
1430 skb_checksum_none_assert(skb);
1431 }
1432
1433 skb->dev = adapter->netdev;
1434
1435 buffer->skb = NULL;
1436
1437 return skb;
1438}
1439
1440static inline int
1441qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter, struct sk_buff *skb,
1442 u16 *vlan_tag)
1443{
1444 struct ethhdr *eth_hdr;
1445
1446 if (!__vlan_get_tag(skb, vlan_tag)) {
1447 eth_hdr = (struct ethhdr *) skb->data;
1448 memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
1449 skb_pull(skb, VLAN_HLEN);
1450 }
1451 if (!adapter->pvid)
1452 return 0;
1453
1454 if (*vlan_tag == adapter->pvid) {
1455 /* Outer vlan tag. Packet should follow non-vlan path */
1456 *vlan_tag = 0xffff;
1457 return 0;
1458 }
1459 if (adapter->flags & QLCNIC_TAGGING_ENABLED)
1460 return 0;
1461
1462 return -EINVAL;
1463}
1464
1465static struct qlcnic_rx_buffer *
1466qlcnic_process_rcv(struct qlcnic_adapter *adapter,
1467 struct qlcnic_host_sds_ring *sds_ring,
1468 int ring, u64 sts_data0)
1469{
1470 struct net_device *netdev = adapter->netdev;
1471 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1472 struct qlcnic_rx_buffer *buffer;
1473 struct sk_buff *skb;
1474 struct qlcnic_host_rds_ring *rds_ring;
1475 int index, length, cksum, pkt_offset;
1476 u16 vid = 0xffff;
1477
1478 if (unlikely(ring >= adapter->max_rds_rings))
1479 return NULL;
1480
1481 rds_ring = &recv_ctx->rds_rings[ring];
1482
1483 index = qlcnic_get_sts_refhandle(sts_data0);
1484 if (unlikely(index >= rds_ring->num_desc))
1485 return NULL;
1486
1487 buffer = &rds_ring->rx_buf_arr[index];
1488
1489 length = qlcnic_get_sts_totallength(sts_data0);
1490 cksum = qlcnic_get_sts_status(sts_data0);
1491 pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
1492
1493 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
1494 if (!skb)
1495 return buffer;
1496
1497 if (length > rds_ring->skb_size)
1498 skb_put(skb, rds_ring->skb_size);
1499 else
1500 skb_put(skb, length);
1501
1502 if (pkt_offset)
1503 skb_pull(skb, pkt_offset);
1504
1505 if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
1506 adapter->stats.rxdropped++;
1507 dev_kfree_skb(skb);
1508 return buffer;
1509 }
1510
1511 skb->protocol = eth_type_trans(skb, netdev);
1512
1513 if (vid != 0xffff)
1514 __vlan_hwaccel_put_tag(skb, vid);
1515
1516 napi_gro_receive(&sds_ring->napi, skb);
1517
1518 adapter->stats.rx_pkts++;
1519 adapter->stats.rxbytes += length;
1520
1521 return buffer;
1522}
1523
1524#define QLC_TCP_HDR_SIZE 20
1525#define QLC_TCP_TS_OPTION_SIZE 12
1526#define QLC_TCP_TS_HDR_SIZE (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE)
1527
1528static struct qlcnic_rx_buffer *
1529qlcnic_process_lro(struct qlcnic_adapter *adapter,
1530 struct qlcnic_host_sds_ring *sds_ring,
1531 int ring, u64 sts_data0, u64 sts_data1)
1532{
1533 struct net_device *netdev = adapter->netdev;
1534 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1535 struct qlcnic_rx_buffer *buffer;
1536 struct sk_buff *skb;
1537 struct qlcnic_host_rds_ring *rds_ring;
1538 struct iphdr *iph;
1539 struct tcphdr *th;
1540 bool push, timestamp;
1541 int l2_hdr_offset, l4_hdr_offset;
1542 int index;
1543 u16 lro_length, length, data_offset;
1544 u32 seq_number;
1545 u16 vid = 0xffff;
1546
1547 if (unlikely(ring > adapter->max_rds_rings))
1548 return NULL;
1549
1550 rds_ring = &recv_ctx->rds_rings[ring];
1551
1552 index = qlcnic_get_lro_sts_refhandle(sts_data0);
1553 if (unlikely(index > rds_ring->num_desc))
1554 return NULL;
1555
1556 buffer = &rds_ring->rx_buf_arr[index];
1557
1558 timestamp = qlcnic_get_lro_sts_timestamp(sts_data0);
1559 lro_length = qlcnic_get_lro_sts_length(sts_data0);
1560 l2_hdr_offset = qlcnic_get_lro_sts_l2_hdr_offset(sts_data0);
1561 l4_hdr_offset = qlcnic_get_lro_sts_l4_hdr_offset(sts_data0);
1562 push = qlcnic_get_lro_sts_push_flag(sts_data0);
1563 seq_number = qlcnic_get_lro_sts_seq_number(sts_data1);
1564
1565 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
1566 if (!skb)
1567 return buffer;
1568
1569 if (timestamp)
1570 data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE;
1571 else
1572 data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE;
1573
1574 skb_put(skb, lro_length + data_offset);
1575
1576 skb_pull(skb, l2_hdr_offset);
1577
1578 if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
1579 adapter->stats.rxdropped++;
1580 dev_kfree_skb(skb);
1581 return buffer;
1582 }
1583
1584 skb->protocol = eth_type_trans(skb, netdev);
1585
1586 iph = (struct iphdr *)skb->data;
1587 th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
1588
1589 length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
1590 iph->tot_len = htons(length);
1591 iph->check = 0;
1592 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
1593 th->psh = push;
1594 th->seq = htonl(seq_number);
1595
1596 length = skb->len;
1597
1598 if (vid != 0xffff)
1599 __vlan_hwaccel_put_tag(skb, vid);
1600 netif_receive_skb(skb);
1601
1602 adapter->stats.lro_pkts++;
1603 adapter->stats.lrobytes += length;
1604
1605 return buffer;
1606}
1607
1608int
1609qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
1610{
1611 struct qlcnic_adapter *adapter = sds_ring->adapter;
1612 struct list_head *cur;
1613 struct status_desc *desc;
1614 struct qlcnic_rx_buffer *rxbuf;
1615 u64 sts_data0, sts_data1;
1616
1617 int count = 0;
1618 int opcode, ring, desc_cnt;
1619 u32 consumer = sds_ring->consumer;
1620
1621 while (count < max) {
1622 desc = &sds_ring->desc_head[consumer];
1623 sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
1624
1625 if (!(sts_data0 & STATUS_OWNER_HOST))
1626 break;
1627
1628 desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
1629 opcode = qlcnic_get_sts_opcode(sts_data0);
1630
1631 switch (opcode) {
1632 case QLCNIC_RXPKT_DESC:
1633 case QLCNIC_OLD_RXPKT_DESC:
1634 case QLCNIC_SYN_OFFLOAD:
1635 ring = qlcnic_get_sts_type(sts_data0);
1636 rxbuf = qlcnic_process_rcv(adapter, sds_ring,
1637 ring, sts_data0);
1638 break;
1639 case QLCNIC_LRO_DESC:
1640 ring = qlcnic_get_lro_sts_type(sts_data0);
1641 sts_data1 = le64_to_cpu(desc->status_desc_data[1]);
1642 rxbuf = qlcnic_process_lro(adapter, sds_ring,
1643 ring, sts_data0, sts_data1);
1644 break;
1645 case QLCNIC_RESPONSE_DESC:
1646 qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
1647 default:
1648 goto skip;
1649 }
1650
1651 WARN_ON(desc_cnt > 1);
1652
1653 if (likely(rxbuf))
1654 list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
1655 else
1656 adapter->stats.null_rxbuf++;
1657
1658skip:
1659 for (; desc_cnt > 0; desc_cnt--) {
1660 desc = &sds_ring->desc_head[consumer];
1661 desc->status_desc_data[0] =
1662 cpu_to_le64(STATUS_OWNER_PHANTOM);
1663 consumer = get_next_index(consumer, sds_ring->num_desc);
1664 }
1665 count++;
1666 }
1667
1668 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1669 struct qlcnic_host_rds_ring *rds_ring =
1670 &adapter->recv_ctx->rds_rings[ring];
1671
1672 if (!list_empty(&sds_ring->free_list[ring])) {
1673 list_for_each(cur, &sds_ring->free_list[ring]) {
1674 rxbuf = list_entry(cur,
1675 struct qlcnic_rx_buffer, list);
1676 qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
1677 }
1678 spin_lock(&rds_ring->lock);
1679 list_splice_tail_init(&sds_ring->free_list[ring],
1680 &rds_ring->free_list);
1681 spin_unlock(&rds_ring->lock);
1682 }
1683
1684 qlcnic_post_rx_buffers_nodb(adapter, rds_ring);
1685 }
1686
1687 if (count) {
1688 sds_ring->consumer = consumer;
1689 writel(consumer, sds_ring->crb_sts_consumer);
1690 }
1691
1692 return count;
1693}
1694
1695void
1696qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
1697 struct qlcnic_host_rds_ring *rds_ring)
1698{
1699 struct rcv_desc *pdesc;
1700 struct qlcnic_rx_buffer *buffer;
1701 int count = 0;
1702 u32 producer;
1703 struct list_head *head;
1704
1705 producer = rds_ring->producer;
1706
1707 head = &rds_ring->free_list;
1708 while (!list_empty(head)) {
1709
1710 buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
1711
1712 if (!buffer->skb) {
1713 if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
1714 break;
1715 }
1716
1717 count++;
1718 list_del(&buffer->list);
1719
1720 /* make a rcv descriptor */
1721 pdesc = &rds_ring->desc_head[producer];
1722 pdesc->addr_buffer = cpu_to_le64(buffer->dma);
1723 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
1724 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
1725
1726 producer = get_next_index(producer, rds_ring->num_desc);
1727 }
1728
1729 if (count) {
1730 rds_ring->producer = producer;
1731 writel((producer-1) & (rds_ring->num_desc-1),
1732 rds_ring->crb_rcv_producer);
1733 }
1734}
1735
1736static void
1737qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
1738 struct qlcnic_host_rds_ring *rds_ring)
1739{
1740 struct rcv_desc *pdesc;
1741 struct qlcnic_rx_buffer *buffer;
1742 int count = 0;
1743 uint32_t producer;
1744 struct list_head *head;
1745
1746 if (!spin_trylock(&rds_ring->lock))
1747 return;
1748
1749 producer = rds_ring->producer;
1750
1751 head = &rds_ring->free_list;
1752 while (!list_empty(head)) {
1753
1754 buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
1755
1756 if (!buffer->skb) {
1757 if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
1758 break;
1759 }
1760
1761 count++;
1762 list_del(&buffer->list);
1763
1764 /* make a rcv descriptor */
1765 pdesc = &rds_ring->desc_head[producer];
1766 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
1767 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
1768 pdesc->addr_buffer = cpu_to_le64(buffer->dma);
1769
1770 producer = get_next_index(producer, rds_ring->num_desc);
1771 }
1772
1773 if (count) {
1774 rds_ring->producer = producer;
1775 writel((producer - 1) & (rds_ring->num_desc - 1),
1776 rds_ring->crb_rcv_producer);
1777 }
1778 spin_unlock(&rds_ring->lock);
1779}
1780
1781static void dump_skb(struct sk_buff *skb)
1782{
1783 int i;
1784 unsigned char *data = skb->data;
1785
1786 printk(KERN_INFO "\n");
1787 for (i = 0; i < skb->len; i++) {
1788 printk(KERN_INFO "%02x ", data[i]);
1789 if ((i & 0x0f) == 8)
1790 printk(KERN_INFO "\n");
1791 }
1792}
1793
1794void qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter,
1795 struct qlcnic_host_sds_ring *sds_ring,
1796 int ring, u64 sts_data0)
1797{
1798 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1799 struct sk_buff *skb;
1800 struct qlcnic_host_rds_ring *rds_ring;
1801 int index, length, cksum, pkt_offset;
1802
1803 if (unlikely(ring >= adapter->max_rds_rings))
1804 return;
1805
1806 rds_ring = &recv_ctx->rds_rings[ring];
1807
1808 index = qlcnic_get_sts_refhandle(sts_data0);
1809 length = qlcnic_get_sts_totallength(sts_data0);
1810 if (unlikely(index >= rds_ring->num_desc))
1811 return;
1812
1813 cksum = qlcnic_get_sts_status(sts_data0);
1814 pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
1815
1816 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
1817 if (!skb)
1818 return;
1819
1820 if (length > rds_ring->skb_size)
1821 skb_put(skb, rds_ring->skb_size);
1822 else
1823 skb_put(skb, length);
1824
1825 if (pkt_offset)
1826 skb_pull(skb, pkt_offset);
1827
1828 if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
1829 adapter->diag_cnt++;
1830 else
1831 dump_skb(skb);
1832
1833 dev_kfree_skb_any(skb);
1834 adapter->stats.rx_pkts++;
1835 adapter->stats.rxbytes += length;
1836
1837 return;
1838}
1839
1840void
1841qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
1842{
1843 struct qlcnic_adapter *adapter = sds_ring->adapter;
1844 struct status_desc *desc;
1845 u64 sts_data0;
1846 int ring, opcode, desc_cnt;
1847
1848 u32 consumer = sds_ring->consumer;
1849
1850 desc = &sds_ring->desc_head[consumer];
1851 sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
1852
1853 if (!(sts_data0 & STATUS_OWNER_HOST))
1854 return;
1855
1856 desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
1857 opcode = qlcnic_get_sts_opcode(sts_data0);
1858 switch (opcode) {
1859 case QLCNIC_RESPONSE_DESC:
1860 qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
1861 break;
1862 default:
1863 ring = qlcnic_get_sts_type(sts_data0);
1864 qlcnic_process_rcv_diag(adapter, sds_ring, ring, sts_data0);
1865 break;
1866 }
1867
1868 for (; desc_cnt > 0; desc_cnt--) {
1869 desc = &sds_ring->desc_head[consumer];
1870 desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
1871 consumer = get_next_index(consumer, sds_ring->num_desc);
1872 }
1873
1874 sds_ring->consumer = consumer;
1875 writel(consumer, sds_ring->crb_sts_consumer);
1876}
1877
1878void
1879qlcnic_fetch_mac(struct qlcnic_adapter *adapter, u32 off1, u32 off2,
1880 u8 alt_mac, u8 *mac)
1881{
1882 u32 mac_low, mac_high;
1883 int i;
1884
1885 mac_low = QLCRD32(adapter, off1);
1886 mac_high = QLCRD32(adapter, off2);
1887
1888 if (alt_mac) {
1889 mac_low |= (mac_low >> 16) | (mac_high << 16);
1890 mac_high >>= 16;
1891 }
1892
1893 for (i = 0; i < 2; i++)
1894 mac[i] = (u8)(mac_high >> ((1 - i) * 8));
1895 for (i = 2; i < 6; i++)
1896 mac[i] = (u8)(mac_low >> ((5 - i) * 8));
1897}
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
new file mode 100644
index 00000000000..5ca1b562443
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -0,0 +1,4366 @@
1/*
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2010 QLogic Corporation
4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
6 */
7
8#include <linux/slab.h>
9#include <linux/vmalloc.h>
10#include <linux/interrupt.h>
11
12#include "qlcnic.h"
13
14#include <linux/swab.h>
15#include <linux/dma-mapping.h>
16#include <net/ip.h>
17#include <linux/ipv6.h>
18#include <linux/inetdevice.h>
19#include <linux/sysfs.h>
20#include <linux/aer.h>
21#include <linux/log2.h>
22
23MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
24MODULE_LICENSE("GPL");
25MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
26MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
27
28char qlcnic_driver_name[] = "qlcnic";
29static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
30 "Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID;
31
32static struct workqueue_struct *qlcnic_wq;
33static int qlcnic_mac_learn;
34module_param(qlcnic_mac_learn, int, 0444);
35MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)");
36
37static int use_msi = 1;
38module_param(use_msi, int, 0444);
39MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
40
41static int use_msi_x = 1;
42module_param(use_msi_x, int, 0444);
43MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
44
45static int auto_fw_reset = 1;
46module_param(auto_fw_reset, int, 0644);
47MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
48
49static int load_fw_file;
50module_param(load_fw_file, int, 0444);
51MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
52
53static int qlcnic_config_npars;
54module_param(qlcnic_config_npars, int, 0444);
55MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
56
57static int __devinit qlcnic_probe(struct pci_dev *pdev,
58 const struct pci_device_id *ent);
59static void __devexit qlcnic_remove(struct pci_dev *pdev);
60static int qlcnic_open(struct net_device *netdev);
61static int qlcnic_close(struct net_device *netdev);
62static void qlcnic_tx_timeout(struct net_device *netdev);
63static void qlcnic_attach_work(struct work_struct *work);
64static void qlcnic_fwinit_work(struct work_struct *work);
65static void qlcnic_fw_poll_work(struct work_struct *work);
66static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
67 work_func_t func, int delay);
68static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
69static int qlcnic_poll(struct napi_struct *napi, int budget);
70static int qlcnic_rx_poll(struct napi_struct *napi, int budget);
71#ifdef CONFIG_NET_POLL_CONTROLLER
72static void qlcnic_poll_controller(struct net_device *netdev);
73#endif
74
75static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
76static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
77static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
78static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
79
80static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
81static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8);
82static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
83
84static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
85static irqreturn_t qlcnic_intr(int irq, void *data);
86static irqreturn_t qlcnic_msi_intr(int irq, void *data);
87static irqreturn_t qlcnic_msix_intr(int irq, void *data);
88
89static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
90static void qlcnic_restore_indev_addr(struct net_device *dev, unsigned long);
91static int qlcnic_start_firmware(struct qlcnic_adapter *);
92
93static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter);
94static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
95static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
96static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
97static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
98static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
99 struct qlcnic_esw_func_cfg *);
100static void qlcnic_vlan_rx_add(struct net_device *, u16);
101static void qlcnic_vlan_rx_del(struct net_device *, u16);
102
103/* PCI Device ID Table */
104#define ENTRY(device) \
105 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
106 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
107
108#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
109
110static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
111 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
112 {0,}
113};
114
115MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
116
117
118inline void
119qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
120 struct qlcnic_host_tx_ring *tx_ring)
121{
122 writel(tx_ring->producer, tx_ring->crb_cmd_producer);
123}
124
125static const u32 msi_tgt_status[8] = {
126 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
127 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
128 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
129 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
130};
131
132static const
133struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
134
135static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
136{
137 writel(0, sds_ring->crb_intr_mask);
138}
139
140static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
141{
142 struct qlcnic_adapter *adapter = sds_ring->adapter;
143
144 writel(0x1, sds_ring->crb_intr_mask);
145
146 if (!QLCNIC_IS_MSI_FAMILY(adapter))
147 writel(0xfbff, adapter->tgt_mask_reg);
148}
149
150static int
151qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
152{
153 int size = sizeof(struct qlcnic_host_sds_ring) * count;
154
155 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
156
157 return recv_ctx->sds_rings == NULL;
158}
159
160static void
161qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
162{
163 if (recv_ctx->sds_rings != NULL)
164 kfree(recv_ctx->sds_rings);
165
166 recv_ctx->sds_rings = NULL;
167}
168
169static int
170qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
171{
172 int ring;
173 struct qlcnic_host_sds_ring *sds_ring;
174 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
175
176 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
177 return -ENOMEM;
178
179 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
180 sds_ring = &recv_ctx->sds_rings[ring];
181
182 if (ring == adapter->max_sds_rings - 1)
183 netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
184 QLCNIC_NETDEV_WEIGHT/adapter->max_sds_rings);
185 else
186 netif_napi_add(netdev, &sds_ring->napi,
187 qlcnic_rx_poll, QLCNIC_NETDEV_WEIGHT*2);
188 }
189
190 return 0;
191}
192
193static void
194qlcnic_napi_del(struct qlcnic_adapter *adapter)
195{
196 int ring;
197 struct qlcnic_host_sds_ring *sds_ring;
198 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
199
200 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
201 sds_ring = &recv_ctx->sds_rings[ring];
202 netif_napi_del(&sds_ring->napi);
203 }
204
205 qlcnic_free_sds_rings(adapter->recv_ctx);
206}
207
208static void
209qlcnic_napi_enable(struct qlcnic_adapter *adapter)
210{
211 int ring;
212 struct qlcnic_host_sds_ring *sds_ring;
213 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
214
215 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
216 return;
217
218 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
219 sds_ring = &recv_ctx->sds_rings[ring];
220 napi_enable(&sds_ring->napi);
221 qlcnic_enable_int(sds_ring);
222 }
223}
224
225static void
226qlcnic_napi_disable(struct qlcnic_adapter *adapter)
227{
228 int ring;
229 struct qlcnic_host_sds_ring *sds_ring;
230 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
231
232 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
233 return;
234
235 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
236 sds_ring = &recv_ctx->sds_rings[ring];
237 qlcnic_disable_int(sds_ring);
238 napi_synchronize(&sds_ring->napi);
239 napi_disable(&sds_ring->napi);
240 }
241}
242
243static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
244{
245 memset(&adapter->stats, 0, sizeof(adapter->stats));
246}
247
248static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
249{
250 u32 control;
251 int pos;
252
253 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
254 if (pos) {
255 pci_read_config_dword(pdev, pos, &control);
256 if (enable)
257 control |= PCI_MSIX_FLAGS_ENABLE;
258 else
259 control = 0;
260 pci_write_config_dword(pdev, pos, control);
261 }
262}
263
264static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
265{
266 int i;
267
268 for (i = 0; i < count; i++)
269 adapter->msix_entries[i].entry = i;
270}
271
272static int
273qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
274{
275 u8 mac_addr[ETH_ALEN];
276 struct net_device *netdev = adapter->netdev;
277 struct pci_dev *pdev = adapter->pdev;
278
279 if (qlcnic_get_mac_address(adapter, mac_addr) != 0)
280 return -EIO;
281
282 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
283 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
284 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
285
286 /* set station address */
287
288 if (!is_valid_ether_addr(netdev->perm_addr))
289 dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
290 netdev->dev_addr);
291
292 return 0;
293}
294
295static int qlcnic_set_mac(struct net_device *netdev, void *p)
296{
297 struct qlcnic_adapter *adapter = netdev_priv(netdev);
298 struct sockaddr *addr = p;
299
300 if ((adapter->flags & QLCNIC_MAC_OVERRIDE_DISABLED))
301 return -EOPNOTSUPP;
302
303 if (!is_valid_ether_addr(addr->sa_data))
304 return -EINVAL;
305
306 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
307 netif_device_detach(netdev);
308 qlcnic_napi_disable(adapter);
309 }
310
311 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
312 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
313 qlcnic_set_multi(adapter->netdev);
314
315 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
316 netif_device_attach(netdev);
317 qlcnic_napi_enable(adapter);
318 }
319 return 0;
320}
321
322static const struct net_device_ops qlcnic_netdev_ops = {
323 .ndo_open = qlcnic_open,
324 .ndo_stop = qlcnic_close,
325 .ndo_start_xmit = qlcnic_xmit_frame,
326 .ndo_get_stats = qlcnic_get_stats,
327 .ndo_validate_addr = eth_validate_addr,
328 .ndo_set_multicast_list = qlcnic_set_multi,
329 .ndo_set_mac_address = qlcnic_set_mac,
330 .ndo_change_mtu = qlcnic_change_mtu,
331 .ndo_fix_features = qlcnic_fix_features,
332 .ndo_set_features = qlcnic_set_features,
333 .ndo_tx_timeout = qlcnic_tx_timeout,
334 .ndo_vlan_rx_add_vid = qlcnic_vlan_rx_add,
335 .ndo_vlan_rx_kill_vid = qlcnic_vlan_rx_del,
336#ifdef CONFIG_NET_POLL_CONTROLLER
337 .ndo_poll_controller = qlcnic_poll_controller,
338#endif
339};
340
341static struct qlcnic_nic_template qlcnic_ops = {
342 .config_bridged_mode = qlcnic_config_bridged_mode,
343 .config_led = qlcnic_config_led,
344 .start_firmware = qlcnic_start_firmware
345};
346
347static struct qlcnic_nic_template qlcnic_vf_ops = {
348 .config_bridged_mode = qlcnicvf_config_bridged_mode,
349 .config_led = qlcnicvf_config_led,
350 .start_firmware = qlcnicvf_start_firmware
351};
352
353static int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
354{
355 struct pci_dev *pdev = adapter->pdev;
356 int err = -1;
357
358 adapter->max_sds_rings = 1;
359 adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
360 qlcnic_set_msix_bit(pdev, 0);
361
362 if (adapter->msix_supported) {
363 enable_msix:
364 qlcnic_init_msix_entries(adapter, num_msix);
365 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
366 if (err == 0) {
367 adapter->flags |= QLCNIC_MSIX_ENABLED;
368 qlcnic_set_msix_bit(pdev, 1);
369
370 adapter->max_sds_rings = num_msix;
371
372 dev_info(&pdev->dev, "using msi-x interrupts\n");
373 return err;
374 }
375 if (err > 0) {
376 num_msix = rounddown_pow_of_two(err);
377 if (num_msix)
378 goto enable_msix;
379 }
380 }
381 return err;
382}
383
384
385static void qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
386{
387 const struct qlcnic_legacy_intr_set *legacy_intrp;
388 struct pci_dev *pdev = adapter->pdev;
389
390 if (use_msi && !pci_enable_msi(pdev)) {
391 adapter->flags |= QLCNIC_MSI_ENABLED;
392 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
393 msi_tgt_status[adapter->ahw->pci_func]);
394 dev_info(&pdev->dev, "using msi interrupts\n");
395 adapter->msix_entries[0].vector = pdev->irq;
396 return;
397 }
398
399 legacy_intrp = &legacy_intr[adapter->ahw->pci_func];
400
401 adapter->int_vec_bit = legacy_intrp->int_vec_bit;
402 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
403 legacy_intrp->tgt_status_reg);
404 adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
405 legacy_intrp->tgt_mask_reg);
406 adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
407
408 adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
409 ISR_INT_STATE_REG);
410 dev_info(&pdev->dev, "using legacy interrupts\n");
411 adapter->msix_entries[0].vector = pdev->irq;
412}
413
414static void
415qlcnic_setup_intr(struct qlcnic_adapter *adapter)
416{
417 int num_msix;
418
419 if (adapter->msix_supported) {
420 num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(),
421 QLCNIC_DEF_NUM_STS_DESC_RINGS));
422 } else
423 num_msix = 1;
424
425 if (!qlcnic_enable_msix(adapter, num_msix))
426 return;
427
428 qlcnic_enable_msi_legacy(adapter);
429}
430
431static void
432qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
433{
434 if (adapter->flags & QLCNIC_MSIX_ENABLED)
435 pci_disable_msix(adapter->pdev);
436 if (adapter->flags & QLCNIC_MSI_ENABLED)
437 pci_disable_msi(adapter->pdev);
438}
439
440static void
441qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
442{
443 if (adapter->ahw->pci_base0 != NULL)
444 iounmap(adapter->ahw->pci_base0);
445}
446
447static int
448qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
449{
450 struct qlcnic_pci_info *pci_info;
451 int i, ret = 0;
452 u8 pfn;
453
454 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
455 if (!pci_info)
456 return -ENOMEM;
457
458 adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
459 QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
460 if (!adapter->npars) {
461 ret = -ENOMEM;
462 goto err_pci_info;
463 }
464
465 adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
466 QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
467 if (!adapter->eswitch) {
468 ret = -ENOMEM;
469 goto err_npars;
470 }
471
472 ret = qlcnic_get_pci_info(adapter, pci_info);
473 if (ret)
474 goto err_eswitch;
475
476 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
477 pfn = pci_info[i].id;
478 if (pfn > QLCNIC_MAX_PCI_FUNC) {
479 ret = QL_STATUS_INVALID_PARAM;
480 goto err_eswitch;
481 }
482 adapter->npars[pfn].active = (u8)pci_info[i].active;
483 adapter->npars[pfn].type = (u8)pci_info[i].type;
484 adapter->npars[pfn].phy_port = (u8)pci_info[i].default_port;
485 adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw;
486 adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw;
487 }
488
489 for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
490 adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
491
492 kfree(pci_info);
493 return 0;
494
495err_eswitch:
496 kfree(adapter->eswitch);
497 adapter->eswitch = NULL;
498err_npars:
499 kfree(adapter->npars);
500 adapter->npars = NULL;
501err_pci_info:
502 kfree(pci_info);
503
504 return ret;
505}
506
507static int
508qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
509{
510 u8 id;
511 u32 ref_count;
512 int i, ret = 1;
513 u32 data = QLCNIC_MGMT_FUNC;
514 void __iomem *priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
515
516 /* If other drivers are not in use set their privilege level */
517 ref_count = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
518 ret = qlcnic_api_lock(adapter);
519 if (ret)
520 goto err_lock;
521
522 if (qlcnic_config_npars) {
523 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
524 id = i;
525 if (adapter->npars[i].type != QLCNIC_TYPE_NIC ||
526 id == adapter->ahw->pci_func)
527 continue;
528 data |= (qlcnic_config_npars &
529 QLC_DEV_SET_DRV(0xf, id));
530 }
531 } else {
532 data = readl(priv_op);
533 data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw->pci_func)) |
534 (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
535 adapter->ahw->pci_func));
536 }
537 writel(data, priv_op);
538 qlcnic_api_unlock(adapter);
539err_lock:
540 return ret;
541}
542
543static void
544qlcnic_check_vf(struct qlcnic_adapter *adapter)
545{
546 void __iomem *msix_base_addr;
547 void __iomem *priv_op;
548 u32 func;
549 u32 msix_base;
550 u32 op_mode, priv_level;
551
552 /* Determine FW API version */
553 adapter->fw_hal_version = readl(adapter->ahw->pci_base0 +
554 QLCNIC_FW_API);
555
556 /* Find PCI function number */
557 pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
558 msix_base_addr = adapter->ahw->pci_base0 + QLCNIC_MSIX_BASE;
559 msix_base = readl(msix_base_addr);
560 func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
561 adapter->ahw->pci_func = func;
562
563 /* Determine function privilege level */
564 priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
565 op_mode = readl(priv_op);
566 if (op_mode == QLC_DEV_DRV_DEFAULT)
567 priv_level = QLCNIC_MGMT_FUNC;
568 else
569 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
570
571 if (priv_level == QLCNIC_NON_PRIV_FUNC) {
572 adapter->op_mode = QLCNIC_NON_PRIV_FUNC;
573 dev_info(&adapter->pdev->dev,
574 "HAL Version: %d Non Privileged function\n",
575 adapter->fw_hal_version);
576 adapter->nic_ops = &qlcnic_vf_ops;
577 } else
578 adapter->nic_ops = &qlcnic_ops;
579}
580
581static int
582qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
583{
584 void __iomem *mem_ptr0 = NULL;
585 resource_size_t mem_base;
586 unsigned long mem_len, pci_len0 = 0;
587
588 struct pci_dev *pdev = adapter->pdev;
589
590 /* remap phys address */
591 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
592 mem_len = pci_resource_len(pdev, 0);
593
594 if (mem_len == QLCNIC_PCI_2MB_SIZE) {
595
596 mem_ptr0 = pci_ioremap_bar(pdev, 0);
597 if (mem_ptr0 == NULL) {
598 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
599 return -EIO;
600 }
601 pci_len0 = mem_len;
602 } else {
603 return -EIO;
604 }
605
606 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
607
608 adapter->ahw->pci_base0 = mem_ptr0;
609 adapter->ahw->pci_len0 = pci_len0;
610
611 qlcnic_check_vf(adapter);
612
613 adapter->ahw->ocm_win_crb = qlcnic_get_ioaddr(adapter,
614 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(
615 adapter->ahw->pci_func)));
616
617 return 0;
618}
619
620static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
621{
622 struct pci_dev *pdev = adapter->pdev;
623 int i, found = 0;
624
625 for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
626 if (qlcnic_boards[i].vendor == pdev->vendor &&
627 qlcnic_boards[i].device == pdev->device &&
628 qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
629 qlcnic_boards[i].sub_device == pdev->subsystem_device) {
630 sprintf(name, "%pM: %s" ,
631 adapter->mac_addr,
632 qlcnic_boards[i].short_name);
633 found = 1;
634 break;
635 }
636
637 }
638
639 if (!found)
640 sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr);
641}
642
643static void
644qlcnic_check_options(struct qlcnic_adapter *adapter)
645{
646 u32 fw_major, fw_minor, fw_build;
647 struct pci_dev *pdev = adapter->pdev;
648
649 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
650 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
651 fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
652
653 adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
654
655 dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
656 fw_major, fw_minor, fw_build);
657 if (adapter->ahw->port_type == QLCNIC_XGBE) {
658 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
659 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
660 adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF;
661 } else {
662 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
663 adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
664 }
665
666 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
667 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
668
669 } else if (adapter->ahw->port_type == QLCNIC_GBE) {
670 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
671 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
672 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
673 adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
674 }
675
676 adapter->msix_supported = !!use_msi_x;
677
678 adapter->num_txd = MAX_CMD_DESCRIPTORS;
679
680 adapter->max_rds_rings = MAX_RDS_RINGS;
681}
682
683static int
684qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
685{
686 int err;
687 struct qlcnic_info nic_info;
688
689 err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw->pci_func);
690 if (err)
691 return err;
692
693 adapter->physical_port = (u8)nic_info.phys_port;
694 adapter->switch_mode = nic_info.switch_mode;
695 adapter->max_tx_ques = nic_info.max_tx_ques;
696 adapter->max_rx_ques = nic_info.max_rx_ques;
697 adapter->capabilities = nic_info.capabilities;
698 adapter->max_mac_filters = nic_info.max_mac_filters;
699 adapter->max_mtu = nic_info.max_mtu;
700
701 if (adapter->capabilities & BIT_6)
702 adapter->flags |= QLCNIC_ESWITCH_ENABLED;
703 else
704 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
705
706 return err;
707}
708
709static void
710qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
711 struct qlcnic_esw_func_cfg *esw_cfg)
712{
713 if (esw_cfg->discard_tagged)
714 adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
715 else
716 adapter->flags |= QLCNIC_TAGGING_ENABLED;
717
718 if (esw_cfg->vlan_id)
719 adapter->pvid = esw_cfg->vlan_id;
720 else
721 adapter->pvid = 0;
722}
723
724static void
725qlcnic_vlan_rx_add(struct net_device *netdev, u16 vid)
726{
727 struct qlcnic_adapter *adapter = netdev_priv(netdev);
728 set_bit(vid, adapter->vlans);
729}
730
731static void
732qlcnic_vlan_rx_del(struct net_device *netdev, u16 vid)
733{
734 struct qlcnic_adapter *adapter = netdev_priv(netdev);
735
736 qlcnic_restore_indev_addr(netdev, NETDEV_DOWN);
737 clear_bit(vid, adapter->vlans);
738}
739
740static void
741qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
742 struct qlcnic_esw_func_cfg *esw_cfg)
743{
744 adapter->flags &= ~(QLCNIC_MACSPOOF | QLCNIC_MAC_OVERRIDE_DISABLED |
745 QLCNIC_PROMISC_DISABLED);
746
747 if (esw_cfg->mac_anti_spoof)
748 adapter->flags |= QLCNIC_MACSPOOF;
749
750 if (!esw_cfg->mac_override)
751 adapter->flags |= QLCNIC_MAC_OVERRIDE_DISABLED;
752
753 if (!esw_cfg->promisc_mode)
754 adapter->flags |= QLCNIC_PROMISC_DISABLED;
755
756 qlcnic_set_netdev_features(adapter, esw_cfg);
757}
758
759static int
760qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
761{
762 struct qlcnic_esw_func_cfg esw_cfg;
763
764 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
765 return 0;
766
767 esw_cfg.pci_func = adapter->ahw->pci_func;
768 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg))
769 return -EIO;
770 qlcnic_set_vlan_config(adapter, &esw_cfg);
771 qlcnic_set_eswitch_port_features(adapter, &esw_cfg);
772
773 return 0;
774}
775
776static void
777qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
778 struct qlcnic_esw_func_cfg *esw_cfg)
779{
780 struct net_device *netdev = adapter->netdev;
781 unsigned long features, vlan_features;
782
783 features = (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
784 NETIF_F_IPV6_CSUM | NETIF_F_GRO);
785 vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM |
786 NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_FILTER);
787
788 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
789 features |= (NETIF_F_TSO | NETIF_F_TSO6);
790 vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
791 }
792
793 if (netdev->features & NETIF_F_LRO)
794 features |= NETIF_F_LRO;
795
796 if (esw_cfg->offload_flags & BIT_0) {
797 netdev->features |= features;
798 if (!(esw_cfg->offload_flags & BIT_1))
799 netdev->features &= ~NETIF_F_TSO;
800 if (!(esw_cfg->offload_flags & BIT_2))
801 netdev->features &= ~NETIF_F_TSO6;
802 } else {
803 netdev->features &= ~features;
804 }
805
806 netdev->vlan_features = (features & vlan_features);
807}
808
809static int
810qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
811{
812 void __iomem *priv_op;
813 u32 op_mode, priv_level;
814 int err = 0;
815
816 err = qlcnic_initialize_nic(adapter);
817 if (err)
818 return err;
819
820 if (adapter->flags & QLCNIC_ADAPTER_INITIALIZED)
821 return 0;
822
823 priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
824 op_mode = readl(priv_op);
825 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
826
827 if (op_mode == QLC_DEV_DRV_DEFAULT)
828 priv_level = QLCNIC_MGMT_FUNC;
829 else
830 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
831
832 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
833 if (priv_level == QLCNIC_MGMT_FUNC) {
834 adapter->op_mode = QLCNIC_MGMT_FUNC;
835 err = qlcnic_init_pci_info(adapter);
836 if (err)
837 return err;
838 /* Set privilege level for other functions */
839 qlcnic_set_function_modes(adapter);
840 dev_info(&adapter->pdev->dev,
841 "HAL Version: %d, Management function\n",
842 adapter->fw_hal_version);
843 } else if (priv_level == QLCNIC_PRIV_FUNC) {
844 adapter->op_mode = QLCNIC_PRIV_FUNC;
845 dev_info(&adapter->pdev->dev,
846 "HAL Version: %d, Privileged function\n",
847 adapter->fw_hal_version);
848 }
849 }
850
851 adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
852
853 return err;
854}
855
856static int
857qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
858{
859 struct qlcnic_esw_func_cfg esw_cfg;
860 struct qlcnic_npar_info *npar;
861 u8 i;
862
863 if (adapter->need_fw_reset)
864 return 0;
865
866 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
867 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
868 continue;
869 memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg));
870 esw_cfg.pci_func = i;
871 esw_cfg.offload_flags = BIT_0;
872 esw_cfg.mac_override = BIT_0;
873 esw_cfg.promisc_mode = BIT_0;
874 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
875 esw_cfg.offload_flags |= (BIT_1 | BIT_2);
876 if (qlcnic_config_switch_port(adapter, &esw_cfg))
877 return -EIO;
878 npar = &adapter->npars[i];
879 npar->pvid = esw_cfg.vlan_id;
880 npar->mac_override = esw_cfg.mac_override;
881 npar->mac_anti_spoof = esw_cfg.mac_anti_spoof;
882 npar->discard_tagged = esw_cfg.discard_tagged;
883 npar->promisc_mode = esw_cfg.promisc_mode;
884 npar->offload_flags = esw_cfg.offload_flags;
885 }
886
887 return 0;
888}
889
890static int
891qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter,
892 struct qlcnic_npar_info *npar, int pci_func)
893{
894 struct qlcnic_esw_func_cfg esw_cfg;
895 esw_cfg.op_mode = QLCNIC_PORT_DEFAULTS;
896 esw_cfg.pci_func = pci_func;
897 esw_cfg.vlan_id = npar->pvid;
898 esw_cfg.mac_override = npar->mac_override;
899 esw_cfg.discard_tagged = npar->discard_tagged;
900 esw_cfg.mac_anti_spoof = npar->mac_anti_spoof;
901 esw_cfg.offload_flags = npar->offload_flags;
902 esw_cfg.promisc_mode = npar->promisc_mode;
903 if (qlcnic_config_switch_port(adapter, &esw_cfg))
904 return -EIO;
905
906 esw_cfg.op_mode = QLCNIC_ADD_VLAN;
907 if (qlcnic_config_switch_port(adapter, &esw_cfg))
908 return -EIO;
909
910 return 0;
911}
912
913static int
914qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
915{
916 int i, err;
917 struct qlcnic_npar_info *npar;
918 struct qlcnic_info nic_info;
919
920 if (!adapter->need_fw_reset)
921 return 0;
922
923 /* Set the NPAR config data after FW reset */
924 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
925 npar = &adapter->npars[i];
926 if (npar->type != QLCNIC_TYPE_NIC)
927 continue;
928 err = qlcnic_get_nic_info(adapter, &nic_info, i);
929 if (err)
930 return err;
931 nic_info.min_tx_bw = npar->min_bw;
932 nic_info.max_tx_bw = npar->max_bw;
933 err = qlcnic_set_nic_info(adapter, &nic_info);
934 if (err)
935 return err;
936
937 if (npar->enable_pm) {
938 err = qlcnic_config_port_mirroring(adapter,
939 npar->dest_npar, 1, i);
940 if (err)
941 return err;
942 }
943 err = qlcnic_reset_eswitch_config(adapter, npar, i);
944 if (err)
945 return err;
946 }
947 return 0;
948}
949
950static int qlcnic_check_npar_opertional(struct qlcnic_adapter *adapter)
951{
952 u8 npar_opt_timeo = QLCNIC_DEV_NPAR_OPER_TIMEO;
953 u32 npar_state;
954
955 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
956 return 0;
957
958 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
959 while (npar_state != QLCNIC_DEV_NPAR_OPER && --npar_opt_timeo) {
960 msleep(1000);
961 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
962 }
963 if (!npar_opt_timeo) {
964 dev_err(&adapter->pdev->dev,
965 "Waiting for NPAR state to opertional timeout\n");
966 return -EIO;
967 }
968 return 0;
969}
970
971static int
972qlcnic_set_mgmt_operations(struct qlcnic_adapter *adapter)
973{
974 int err;
975
976 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
977 adapter->op_mode != QLCNIC_MGMT_FUNC)
978 return 0;
979
980 err = qlcnic_set_default_offload_settings(adapter);
981 if (err)
982 return err;
983
984 err = qlcnic_reset_npar_config(adapter);
985 if (err)
986 return err;
987
988 qlcnic_dev_set_npar_ready(adapter);
989
990 return err;
991}
992
993static int
994qlcnic_start_firmware(struct qlcnic_adapter *adapter)
995{
996 int err;
997
998 err = qlcnic_can_start_firmware(adapter);
999 if (err < 0)
1000 return err;
1001 else if (!err)
1002 goto check_fw_status;
1003
1004 if (load_fw_file)
1005 qlcnic_request_firmware(adapter);
1006 else {
1007 err = qlcnic_check_flash_fw_ver(adapter);
1008 if (err)
1009 goto err_out;
1010
1011 adapter->fw_type = QLCNIC_FLASH_ROMIMAGE;
1012 }
1013
1014 err = qlcnic_need_fw_reset(adapter);
1015 if (err == 0)
1016 goto check_fw_status;
1017
1018 err = qlcnic_pinit_from_rom(adapter);
1019 if (err)
1020 goto err_out;
1021
1022 err = qlcnic_load_firmware(adapter);
1023 if (err)
1024 goto err_out;
1025
1026 qlcnic_release_firmware(adapter);
1027 QLCWR32(adapter, CRB_DRIVER_VERSION, QLCNIC_DRIVER_VERSION);
1028
1029check_fw_status:
1030 err = qlcnic_check_fw_status(adapter);
1031 if (err)
1032 goto err_out;
1033
1034 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
1035 qlcnic_idc_debug_info(adapter, 1);
1036
1037 err = qlcnic_check_eswitch_mode(adapter);
1038 if (err) {
1039 dev_err(&adapter->pdev->dev,
1040 "Memory allocation failed for eswitch\n");
1041 goto err_out;
1042 }
1043 err = qlcnic_set_mgmt_operations(adapter);
1044 if (err)
1045 goto err_out;
1046
1047 qlcnic_check_options(adapter);
1048 adapter->need_fw_reset = 0;
1049
1050 qlcnic_release_firmware(adapter);
1051 return 0;
1052
1053err_out:
1054 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
1055 dev_err(&adapter->pdev->dev, "Device state set to failed\n");
1056
1057 qlcnic_release_firmware(adapter);
1058 return err;
1059}
1060
1061static int
1062qlcnic_request_irq(struct qlcnic_adapter *adapter)
1063{
1064 irq_handler_t handler;
1065 struct qlcnic_host_sds_ring *sds_ring;
1066 int err, ring;
1067
1068 unsigned long flags = 0;
1069 struct net_device *netdev = adapter->netdev;
1070 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1071
1072 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1073 handler = qlcnic_tmp_intr;
1074 if (!QLCNIC_IS_MSI_FAMILY(adapter))
1075 flags |= IRQF_SHARED;
1076
1077 } else {
1078 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1079 handler = qlcnic_msix_intr;
1080 else if (adapter->flags & QLCNIC_MSI_ENABLED)
1081 handler = qlcnic_msi_intr;
1082 else {
1083 flags |= IRQF_SHARED;
1084 handler = qlcnic_intr;
1085 }
1086 }
1087 adapter->irq = netdev->irq;
1088
1089 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1090 sds_ring = &recv_ctx->sds_rings[ring];
1091 sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
1092 err = request_irq(sds_ring->irq, handler,
1093 flags, sds_ring->name, sds_ring);
1094 if (err)
1095 return err;
1096 }
1097
1098 return 0;
1099}
1100
1101static void
1102qlcnic_free_irq(struct qlcnic_adapter *adapter)
1103{
1104 int ring;
1105 struct qlcnic_host_sds_ring *sds_ring;
1106
1107 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1108
1109 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1110 sds_ring = &recv_ctx->sds_rings[ring];
1111 free_irq(sds_ring->irq, sds_ring);
1112 }
1113}
1114
1115static int
1116__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1117{
1118 int ring;
1119 struct qlcnic_host_rds_ring *rds_ring;
1120
1121 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1122 return -EIO;
1123
1124 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
1125 return 0;
1126 if (qlcnic_set_eswitch_port_config(adapter))
1127 return -EIO;
1128
1129 if (qlcnic_fw_create_ctx(adapter))
1130 return -EIO;
1131
1132 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1133 rds_ring = &adapter->recv_ctx->rds_rings[ring];
1134 qlcnic_post_rx_buffers(adapter, rds_ring);
1135 }
1136
1137 qlcnic_set_multi(netdev);
1138 qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
1139
1140 adapter->ahw->linkup = 0;
1141
1142 if (adapter->max_sds_rings > 1)
1143 qlcnic_config_rss(adapter, 1);
1144
1145 qlcnic_config_intr_coalesce(adapter);
1146
1147 if (netdev->features & NETIF_F_LRO)
1148 qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
1149
1150 qlcnic_napi_enable(adapter);
1151
1152 qlcnic_linkevent_request(adapter, 1);
1153
1154 adapter->reset_context = 0;
1155 set_bit(__QLCNIC_DEV_UP, &adapter->state);
1156 return 0;
1157}
1158
1159/* Usage: During resume and firmware recovery module.*/
1160
1161static int
1162qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1163{
1164 int err = 0;
1165
1166 rtnl_lock();
1167 if (netif_running(netdev))
1168 err = __qlcnic_up(adapter, netdev);
1169 rtnl_unlock();
1170
1171 return err;
1172}
1173
1174static void
1175__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1176{
1177 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1178 return;
1179
1180 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
1181 return;
1182
1183 smp_mb();
1184 spin_lock(&adapter->tx_clean_lock);
1185 netif_carrier_off(netdev);
1186 netif_tx_disable(netdev);
1187
1188 qlcnic_free_mac_list(adapter);
1189
1190 if (adapter->fhash.fnum)
1191 qlcnic_delete_lb_filters(adapter);
1192
1193 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
1194
1195 qlcnic_napi_disable(adapter);
1196
1197 qlcnic_fw_destroy_ctx(adapter);
1198
1199 qlcnic_reset_rx_buffers_list(adapter);
1200 qlcnic_release_tx_buffers(adapter);
1201 spin_unlock(&adapter->tx_clean_lock);
1202}
1203
1204/* Usage: During suspend and firmware recovery module */
1205
1206static void
1207qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1208{
1209 rtnl_lock();
1210 if (netif_running(netdev))
1211 __qlcnic_down(adapter, netdev);
1212 rtnl_unlock();
1213
1214}
1215
1216static int
1217qlcnic_attach(struct qlcnic_adapter *adapter)
1218{
1219 struct net_device *netdev = adapter->netdev;
1220 struct pci_dev *pdev = adapter->pdev;
1221 int err;
1222
1223 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
1224 return 0;
1225
1226 err = qlcnic_napi_add(adapter, netdev);
1227 if (err)
1228 return err;
1229
1230 err = qlcnic_alloc_sw_resources(adapter);
1231 if (err) {
1232 dev_err(&pdev->dev, "Error in setting sw resources\n");
1233 goto err_out_napi_del;
1234 }
1235
1236 err = qlcnic_alloc_hw_resources(adapter);
1237 if (err) {
1238 dev_err(&pdev->dev, "Error in setting hw resources\n");
1239 goto err_out_free_sw;
1240 }
1241
1242 err = qlcnic_request_irq(adapter);
1243 if (err) {
1244 dev_err(&pdev->dev, "failed to setup interrupt\n");
1245 goto err_out_free_hw;
1246 }
1247
1248 qlcnic_create_sysfs_entries(adapter);
1249
1250 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
1251 return 0;
1252
1253err_out_free_hw:
1254 qlcnic_free_hw_resources(adapter);
1255err_out_free_sw:
1256 qlcnic_free_sw_resources(adapter);
1257err_out_napi_del:
1258 qlcnic_napi_del(adapter);
1259 return err;
1260}
1261
1262static void
1263qlcnic_detach(struct qlcnic_adapter *adapter)
1264{
1265 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1266 return;
1267
1268 qlcnic_remove_sysfs_entries(adapter);
1269
1270 qlcnic_free_hw_resources(adapter);
1271 qlcnic_release_rx_buffers(adapter);
1272 qlcnic_free_irq(adapter);
1273 qlcnic_napi_del(adapter);
1274 qlcnic_free_sw_resources(adapter);
1275
1276 adapter->is_up = 0;
1277}
1278
1279void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
1280{
1281 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1282 struct qlcnic_host_sds_ring *sds_ring;
1283 int ring;
1284
1285 clear_bit(__QLCNIC_DEV_UP, &adapter->state);
1286 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1287 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1288 sds_ring = &adapter->recv_ctx->sds_rings[ring];
1289 qlcnic_disable_int(sds_ring);
1290 }
1291 }
1292
1293 qlcnic_fw_destroy_ctx(adapter);
1294
1295 qlcnic_detach(adapter);
1296
1297 adapter->diag_test = 0;
1298 adapter->max_sds_rings = max_sds_rings;
1299
1300 if (qlcnic_attach(adapter))
1301 goto out;
1302
1303 if (netif_running(netdev))
1304 __qlcnic_up(adapter, netdev);
1305out:
1306 netif_device_attach(netdev);
1307}
1308
1309static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
1310{
1311 int err = 0;
1312 adapter->ahw = kzalloc(sizeof(struct qlcnic_hardware_context),
1313 GFP_KERNEL);
1314 if (!adapter->ahw) {
1315 dev_err(&adapter->pdev->dev,
1316 "Failed to allocate recv ctx resources for adapter\n");
1317 err = -ENOMEM;
1318 goto err_out;
1319 }
1320 adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context),
1321 GFP_KERNEL);
1322 if (!adapter->recv_ctx) {
1323 dev_err(&adapter->pdev->dev,
1324 "Failed to allocate recv ctx resources for adapter\n");
1325 kfree(adapter->ahw);
1326 adapter->ahw = NULL;
1327 err = -ENOMEM;
1328 goto err_out;
1329 }
1330 /* Initialize interrupt coalesce parameters */
1331 adapter->ahw->coal.flag = QLCNIC_INTR_DEFAULT;
1332 adapter->ahw->coal.rx_time_us = QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
1333 adapter->ahw->coal.rx_packets = QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
1334err_out:
1335 return err;
1336}
1337
1338static void qlcnic_free_adapter_resources(struct qlcnic_adapter *adapter)
1339{
1340 kfree(adapter->recv_ctx);
1341 adapter->recv_ctx = NULL;
1342
1343 if (adapter->ahw->fw_dump.tmpl_hdr) {
1344 vfree(adapter->ahw->fw_dump.tmpl_hdr);
1345 adapter->ahw->fw_dump.tmpl_hdr = NULL;
1346 }
1347 kfree(adapter->ahw);
1348 adapter->ahw = NULL;
1349}
1350
1351int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
1352{
1353 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1354 struct qlcnic_host_sds_ring *sds_ring;
1355 struct qlcnic_host_rds_ring *rds_ring;
1356 int ring;
1357 int ret;
1358
1359 netif_device_detach(netdev);
1360
1361 if (netif_running(netdev))
1362 __qlcnic_down(adapter, netdev);
1363
1364 qlcnic_detach(adapter);
1365
1366 adapter->max_sds_rings = 1;
1367 adapter->diag_test = test;
1368
1369 ret = qlcnic_attach(adapter);
1370 if (ret) {
1371 netif_device_attach(netdev);
1372 return ret;
1373 }
1374
1375 ret = qlcnic_fw_create_ctx(adapter);
1376 if (ret) {
1377 qlcnic_detach(adapter);
1378 netif_device_attach(netdev);
1379 return ret;
1380 }
1381
1382 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1383 rds_ring = &adapter->recv_ctx->rds_rings[ring];
1384 qlcnic_post_rx_buffers(adapter, rds_ring);
1385 }
1386
1387 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1388 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1389 sds_ring = &adapter->recv_ctx->sds_rings[ring];
1390 qlcnic_enable_int(sds_ring);
1391 }
1392 }
1393
1394 if (adapter->diag_test == QLCNIC_LOOPBACK_TEST) {
1395 adapter->ahw->loopback_state = 0;
1396 qlcnic_linkevent_request(adapter, 1);
1397 }
1398
1399 set_bit(__QLCNIC_DEV_UP, &adapter->state);
1400
1401 return 0;
1402}
1403
1404/* Reset context in hardware only */
1405static int
1406qlcnic_reset_hw_context(struct qlcnic_adapter *adapter)
1407{
1408 struct net_device *netdev = adapter->netdev;
1409
1410 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1411 return -EBUSY;
1412
1413 netif_device_detach(netdev);
1414
1415 qlcnic_down(adapter, netdev);
1416
1417 qlcnic_up(adapter, netdev);
1418
1419 netif_device_attach(netdev);
1420
1421 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1422 return 0;
1423}
1424
1425int
1426qlcnic_reset_context(struct qlcnic_adapter *adapter)
1427{
1428 int err = 0;
1429 struct net_device *netdev = adapter->netdev;
1430
1431 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1432 return -EBUSY;
1433
1434 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
1435
1436 netif_device_detach(netdev);
1437
1438 if (netif_running(netdev))
1439 __qlcnic_down(adapter, netdev);
1440
1441 qlcnic_detach(adapter);
1442
1443 if (netif_running(netdev)) {
1444 err = qlcnic_attach(adapter);
1445 if (!err)
1446 __qlcnic_up(adapter, netdev);
1447 }
1448
1449 netif_device_attach(netdev);
1450 }
1451
1452 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1453 return err;
1454}
1455
1456static int
1457qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1458 struct net_device *netdev, u8 pci_using_dac)
1459{
1460 int err;
1461 struct pci_dev *pdev = adapter->pdev;
1462
1463 adapter->mc_enabled = 0;
1464 adapter->max_mc_count = 38;
1465
1466 netdev->netdev_ops = &qlcnic_netdev_ops;
1467 netdev->watchdog_timeo = 5*HZ;
1468
1469 qlcnic_change_mtu(netdev, netdev->mtu);
1470
1471 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
1472
1473 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
1474 NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
1475
1476 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
1477 netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
1478 if (pci_using_dac)
1479 netdev->hw_features |= NETIF_F_HIGHDMA;
1480
1481 netdev->vlan_features = netdev->hw_features;
1482
1483 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
1484 netdev->hw_features |= NETIF_F_HW_VLAN_TX;
1485 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
1486 netdev->hw_features |= NETIF_F_LRO;
1487
1488 netdev->features |= netdev->hw_features |
1489 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
1490
1491 netdev->irq = adapter->msix_entries[0].vector;
1492
1493 err = register_netdev(netdev);
1494 if (err) {
1495 dev_err(&pdev->dev, "failed to register net device\n");
1496 return err;
1497 }
1498
1499 return 0;
1500}
1501
1502static int qlcnic_set_dma_mask(struct pci_dev *pdev, u8 *pci_using_dac)
1503{
1504 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1505 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
1506 *pci_using_dac = 1;
1507 else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
1508 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
1509 *pci_using_dac = 0;
1510 else {
1511 dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n");
1512 return -EIO;
1513 }
1514
1515 return 0;
1516}
1517
1518static int
1519qlcnic_alloc_msix_entries(struct qlcnic_adapter *adapter, u16 count)
1520{
1521 adapter->msix_entries = kcalloc(count, sizeof(struct msix_entry),
1522 GFP_KERNEL);
1523
1524 if (adapter->msix_entries)
1525 return 0;
1526
1527 dev_err(&adapter->pdev->dev, "failed allocating msix_entries\n");
1528 return -ENOMEM;
1529}
1530
1531static int __devinit
1532qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1533{
1534 struct net_device *netdev = NULL;
1535 struct qlcnic_adapter *adapter = NULL;
1536 int err;
1537 uint8_t revision_id;
1538 uint8_t pci_using_dac;
1539 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
1540
1541 err = pci_enable_device(pdev);
1542 if (err)
1543 return err;
1544
1545 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1546 err = -ENODEV;
1547 goto err_out_disable_pdev;
1548 }
1549
1550 err = qlcnic_set_dma_mask(pdev, &pci_using_dac);
1551 if (err)
1552 goto err_out_disable_pdev;
1553
1554 err = pci_request_regions(pdev, qlcnic_driver_name);
1555 if (err)
1556 goto err_out_disable_pdev;
1557
1558 pci_set_master(pdev);
1559 pci_enable_pcie_error_reporting(pdev);
1560
1561 netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
1562 if (!netdev) {
1563 dev_err(&pdev->dev, "failed to allocate net_device\n");
1564 err = -ENOMEM;
1565 goto err_out_free_res;
1566 }
1567
1568 SET_NETDEV_DEV(netdev, &pdev->dev);
1569
1570 adapter = netdev_priv(netdev);
1571 adapter->netdev = netdev;
1572 adapter->pdev = pdev;
1573
1574 if (qlcnic_alloc_adapter_resources(adapter))
1575 goto err_out_free_netdev;
1576
1577 adapter->dev_rst_time = jiffies;
1578 revision_id = pdev->revision;
1579 adapter->ahw->revision_id = revision_id;
1580 adapter->mac_learn = qlcnic_mac_learn;
1581
1582 rwlock_init(&adapter->ahw->crb_lock);
1583 mutex_init(&adapter->ahw->mem_lock);
1584
1585 spin_lock_init(&adapter->tx_clean_lock);
1586 INIT_LIST_HEAD(&adapter->mac_list);
1587
1588 err = qlcnic_setup_pci_map(adapter);
1589 if (err)
1590 goto err_out_free_hw;
1591
1592 /* This will be reset for mezz cards */
1593 adapter->portnum = adapter->ahw->pci_func;
1594
1595 err = qlcnic_get_board_info(adapter);
1596 if (err) {
1597 dev_err(&pdev->dev, "Error getting board config info.\n");
1598 goto err_out_iounmap;
1599 }
1600
1601 err = qlcnic_setup_idc_param(adapter);
1602 if (err)
1603 goto err_out_iounmap;
1604
1605 adapter->flags |= QLCNIC_NEED_FLR;
1606
1607 err = adapter->nic_ops->start_firmware(adapter);
1608 if (err) {
1609 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
1610 goto err_out_decr_ref;
1611 }
1612
1613 /* Get FW dump template and store it */
1614 if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC)
1615 if (!qlcnic_fw_cmd_get_minidump_temp(adapter))
1616 dev_info(&pdev->dev,
1617 "Supports FW dump capability\n");
1618
1619 if (qlcnic_read_mac_addr(adapter))
1620 dev_warn(&pdev->dev, "failed to read mac addr\n");
1621
1622 if (adapter->portnum == 0) {
1623 get_brd_name(adapter, brd_name);
1624
1625 pr_info("%s: %s Board Chip rev 0x%x\n",
1626 module_name(THIS_MODULE),
1627 brd_name, adapter->ahw->revision_id);
1628 }
1629
1630 qlcnic_clear_stats(adapter);
1631
1632 err = qlcnic_alloc_msix_entries(adapter, adapter->max_rx_ques);
1633 if (err)
1634 goto err_out_decr_ref;
1635
1636 qlcnic_setup_intr(adapter);
1637
1638 err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
1639 if (err)
1640 goto err_out_disable_msi;
1641
1642 pci_set_drvdata(pdev, adapter);
1643
1644 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1645
1646 switch (adapter->ahw->port_type) {
1647 case QLCNIC_GBE:
1648 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
1649 adapter->netdev->name);
1650 break;
1651 case QLCNIC_XGBE:
1652 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
1653 adapter->netdev->name);
1654 break;
1655 }
1656
1657 if (adapter->mac_learn)
1658 qlcnic_alloc_lb_filters_mem(adapter);
1659
1660 qlcnic_create_diag_entries(adapter);
1661
1662 return 0;
1663
1664err_out_disable_msi:
1665 qlcnic_teardown_intr(adapter);
1666 kfree(adapter->msix_entries);
1667
1668err_out_decr_ref:
1669 qlcnic_clr_all_drv_state(adapter, 0);
1670
1671err_out_iounmap:
1672 qlcnic_cleanup_pci_map(adapter);
1673
1674err_out_free_hw:
1675 qlcnic_free_adapter_resources(adapter);
1676
1677err_out_free_netdev:
1678 free_netdev(netdev);
1679
1680err_out_free_res:
1681 pci_release_regions(pdev);
1682
1683err_out_disable_pdev:
1684 pci_set_drvdata(pdev, NULL);
1685 pci_disable_device(pdev);
1686 return err;
1687}
1688
1689static void __devexit qlcnic_remove(struct pci_dev *pdev)
1690{
1691 struct qlcnic_adapter *adapter;
1692 struct net_device *netdev;
1693
1694 adapter = pci_get_drvdata(pdev);
1695 if (adapter == NULL)
1696 return;
1697
1698 netdev = adapter->netdev;
1699
1700 qlcnic_cancel_fw_work(adapter);
1701
1702 unregister_netdev(netdev);
1703
1704 qlcnic_detach(adapter);
1705
1706 if (adapter->npars != NULL)
1707 kfree(adapter->npars);
1708 if (adapter->eswitch != NULL)
1709 kfree(adapter->eswitch);
1710
1711 qlcnic_clr_all_drv_state(adapter, 0);
1712
1713 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1714
1715 qlcnic_free_lb_filters_mem(adapter);
1716
1717 qlcnic_teardown_intr(adapter);
1718 kfree(adapter->msix_entries);
1719
1720 qlcnic_remove_diag_entries(adapter);
1721
1722 qlcnic_cleanup_pci_map(adapter);
1723
1724 qlcnic_release_firmware(adapter);
1725
1726 pci_disable_pcie_error_reporting(pdev);
1727 pci_release_regions(pdev);
1728 pci_disable_device(pdev);
1729 pci_set_drvdata(pdev, NULL);
1730
1731 qlcnic_free_adapter_resources(adapter);
1732 free_netdev(netdev);
1733}
1734static int __qlcnic_shutdown(struct pci_dev *pdev)
1735{
1736 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1737 struct net_device *netdev = adapter->netdev;
1738 int retval;
1739
1740 netif_device_detach(netdev);
1741
1742 qlcnic_cancel_fw_work(adapter);
1743
1744 if (netif_running(netdev))
1745 qlcnic_down(adapter, netdev);
1746
1747 qlcnic_clr_all_drv_state(adapter, 0);
1748
1749 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1750
1751 retval = pci_save_state(pdev);
1752 if (retval)
1753 return retval;
1754
1755 if (qlcnic_wol_supported(adapter)) {
1756 pci_enable_wake(pdev, PCI_D3cold, 1);
1757 pci_enable_wake(pdev, PCI_D3hot, 1);
1758 }
1759
1760 return 0;
1761}
1762
1763static void qlcnic_shutdown(struct pci_dev *pdev)
1764{
1765 if (__qlcnic_shutdown(pdev))
1766 return;
1767
1768 pci_disable_device(pdev);
1769}
1770
1771#ifdef CONFIG_PM
1772static int
1773qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
1774{
1775 int retval;
1776
1777 retval = __qlcnic_shutdown(pdev);
1778 if (retval)
1779 return retval;
1780
1781 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1782 return 0;
1783}
1784
1785static int
1786qlcnic_resume(struct pci_dev *pdev)
1787{
1788 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1789 struct net_device *netdev = adapter->netdev;
1790 int err;
1791
1792 err = pci_enable_device(pdev);
1793 if (err)
1794 return err;
1795
1796 pci_set_power_state(pdev, PCI_D0);
1797 pci_set_master(pdev);
1798 pci_restore_state(pdev);
1799
1800 err = adapter->nic_ops->start_firmware(adapter);
1801 if (err) {
1802 dev_err(&pdev->dev, "failed to start firmware\n");
1803 return err;
1804 }
1805
1806 if (netif_running(netdev)) {
1807 err = qlcnic_up(adapter, netdev);
1808 if (err)
1809 goto done;
1810
1811 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
1812 }
1813done:
1814 netif_device_attach(netdev);
1815 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1816 return 0;
1817}
1818#endif
1819
1820static int qlcnic_open(struct net_device *netdev)
1821{
1822 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1823 int err;
1824
1825 netif_carrier_off(netdev);
1826
1827 err = qlcnic_attach(adapter);
1828 if (err)
1829 return err;
1830
1831 err = __qlcnic_up(adapter, netdev);
1832 if (err)
1833 goto err_out;
1834
1835 netif_start_queue(netdev);
1836
1837 return 0;
1838
1839err_out:
1840 qlcnic_detach(adapter);
1841 return err;
1842}
1843
1844/*
1845 * qlcnic_close - Disables a network interface entry point
1846 */
1847static int qlcnic_close(struct net_device *netdev)
1848{
1849 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1850
1851 __qlcnic_down(adapter, netdev);
1852 return 0;
1853}
1854
1855void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
1856{
1857 void *head;
1858 int i;
1859
1860 if (adapter->fhash.fmax && adapter->fhash.fhead)
1861 return;
1862
1863 spin_lock_init(&adapter->mac_learn_lock);
1864
1865 head = kcalloc(QLCNIC_LB_MAX_FILTERS, sizeof(struct hlist_head),
1866 GFP_KERNEL);
1867 if (!head)
1868 return;
1869
1870 adapter->fhash.fmax = QLCNIC_LB_MAX_FILTERS;
1871 adapter->fhash.fhead = head;
1872
1873 for (i = 0; i < adapter->fhash.fmax; i++)
1874 INIT_HLIST_HEAD(&adapter->fhash.fhead[i]);
1875}
1876
1877static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter)
1878{
1879 if (adapter->fhash.fmax && adapter->fhash.fhead)
1880 kfree(adapter->fhash.fhead);
1881
1882 adapter->fhash.fhead = NULL;
1883 adapter->fhash.fmax = 0;
1884}
1885
1886static void qlcnic_change_filter(struct qlcnic_adapter *adapter,
1887 u64 uaddr, __le16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
1888{
1889 struct cmd_desc_type0 *hwdesc;
1890 struct qlcnic_nic_req *req;
1891 struct qlcnic_mac_req *mac_req;
1892 struct qlcnic_vlan_req *vlan_req;
1893 u32 producer;
1894 u64 word;
1895
1896 producer = tx_ring->producer;
1897 hwdesc = &tx_ring->desc_head[tx_ring->producer];
1898
1899 req = (struct qlcnic_nic_req *)hwdesc;
1900 memset(req, 0, sizeof(struct qlcnic_nic_req));
1901 req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
1902
1903 word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16);
1904 req->req_hdr = cpu_to_le64(word);
1905
1906 mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
1907 mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
1908 memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
1909
1910 vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
1911 vlan_req->vlan_id = vlan_id;
1912
1913 tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
1914 smp_mb();
1915}
1916
1917#define QLCNIC_MAC_HASH(MAC)\
1918 ((((MAC) & 0x70000) >> 0x10) | (((MAC) & 0x70000000000ULL) >> 0x25))
1919
1920static void
1921qlcnic_send_filter(struct qlcnic_adapter *adapter,
1922 struct qlcnic_host_tx_ring *tx_ring,
1923 struct cmd_desc_type0 *first_desc,
1924 struct sk_buff *skb)
1925{
1926 struct ethhdr *phdr = (struct ethhdr *)(skb->data);
1927 struct qlcnic_filter *fil, *tmp_fil;
1928 struct hlist_node *tmp_hnode, *n;
1929 struct hlist_head *head;
1930 u64 src_addr = 0;
1931 __le16 vlan_id = 0;
1932 u8 hindex;
1933
1934 if (!compare_ether_addr(phdr->h_source, adapter->mac_addr))
1935 return;
1936
1937 if (adapter->fhash.fnum >= adapter->fhash.fmax)
1938 return;
1939
1940 /* Only NPAR capable devices support vlan based learning*/
1941 if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
1942 vlan_id = first_desc->vlan_TCI;
1943 memcpy(&src_addr, phdr->h_source, ETH_ALEN);
1944 hindex = QLCNIC_MAC_HASH(src_addr) & (QLCNIC_LB_MAX_FILTERS - 1);
1945 head = &(adapter->fhash.fhead[hindex]);
1946
1947 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
1948 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
1949 tmp_fil->vlan_id == vlan_id) {
1950
1951 if (jiffies >
1952 (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
1953 qlcnic_change_filter(adapter, src_addr, vlan_id,
1954 tx_ring);
1955 tmp_fil->ftime = jiffies;
1956 return;
1957 }
1958 }
1959
1960 fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
1961 if (!fil)
1962 return;
1963
1964 qlcnic_change_filter(adapter, src_addr, vlan_id, tx_ring);
1965
1966 fil->ftime = jiffies;
1967 fil->vlan_id = vlan_id;
1968 memcpy(fil->faddr, &src_addr, ETH_ALEN);
1969 spin_lock(&adapter->mac_learn_lock);
1970 hlist_add_head(&(fil->fnode), head);
1971 adapter->fhash.fnum++;
1972 spin_unlock(&adapter->mac_learn_lock);
1973}
1974
1975static int
1976qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
1977 struct cmd_desc_type0 *first_desc,
1978 struct sk_buff *skb)
1979{
1980 u8 opcode = 0, hdr_len = 0;
1981 u16 flags = 0, vlan_tci = 0;
1982 int copied, offset, copy_len;
1983 struct cmd_desc_type0 *hwdesc;
1984 struct vlan_ethhdr *vh;
1985 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
1986 u16 protocol = ntohs(skb->protocol);
1987 u32 producer = tx_ring->producer;
1988
1989 if (protocol == ETH_P_8021Q) {
1990 vh = (struct vlan_ethhdr *)skb->data;
1991 flags = FLAGS_VLAN_TAGGED;
1992 vlan_tci = vh->h_vlan_TCI;
1993 } else if (vlan_tx_tag_present(skb)) {
1994 flags = FLAGS_VLAN_OOB;
1995 vlan_tci = vlan_tx_tag_get(skb);
1996 }
1997 if (unlikely(adapter->pvid)) {
1998 if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
1999 return -EIO;
2000 if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
2001 goto set_flags;
2002
2003 flags = FLAGS_VLAN_OOB;
2004 vlan_tci = adapter->pvid;
2005 }
2006set_flags:
2007 qlcnic_set_tx_vlan_tci(first_desc, vlan_tci);
2008 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
2009
2010 if (*(skb->data) & BIT_0) {
2011 flags |= BIT_0;
2012 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
2013 }
2014 opcode = TX_ETHER_PKT;
2015 if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
2016 skb_shinfo(skb)->gso_size > 0) {
2017
2018 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2019
2020 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2021 first_desc->total_hdr_length = hdr_len;
2022
2023 opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO;
2024
2025 /* For LSO, we need to copy the MAC/IP/TCP headers into
2026 * the descriptor ring */
2027 copied = 0;
2028 offset = 2;
2029
2030 if (flags & FLAGS_VLAN_OOB) {
2031 first_desc->total_hdr_length += VLAN_HLEN;
2032 first_desc->tcp_hdr_offset = VLAN_HLEN;
2033 first_desc->ip_hdr_offset = VLAN_HLEN;
2034 /* Only in case of TSO on vlan device */
2035 flags |= FLAGS_VLAN_TAGGED;
2036
2037 /* Create a TSO vlan header template for firmware */
2038
2039 hwdesc = &tx_ring->desc_head[producer];
2040 tx_ring->cmd_buf_arr[producer].skb = NULL;
2041
2042 copy_len = min((int)sizeof(struct cmd_desc_type0) -
2043 offset, hdr_len + VLAN_HLEN);
2044
2045 vh = (struct vlan_ethhdr *)((char *) hwdesc + 2);
2046 skb_copy_from_linear_data(skb, vh, 12);
2047 vh->h_vlan_proto = htons(ETH_P_8021Q);
2048 vh->h_vlan_TCI = htons(vlan_tci);
2049
2050 skb_copy_from_linear_data_offset(skb, 12,
2051 (char *)vh + 16, copy_len - 16);
2052
2053 copied = copy_len - VLAN_HLEN;
2054 offset = 0;
2055
2056 producer = get_next_index(producer, tx_ring->num_desc);
2057 }
2058
2059 while (copied < hdr_len) {
2060
2061 copy_len = min((int)sizeof(struct cmd_desc_type0) -
2062 offset, (hdr_len - copied));
2063
2064 hwdesc = &tx_ring->desc_head[producer];
2065 tx_ring->cmd_buf_arr[producer].skb = NULL;
2066
2067 skb_copy_from_linear_data_offset(skb, copied,
2068 (char *) hwdesc + offset, copy_len);
2069
2070 copied += copy_len;
2071 offset = 0;
2072
2073 producer = get_next_index(producer, tx_ring->num_desc);
2074 }
2075
2076 tx_ring->producer = producer;
2077 smp_mb();
2078 adapter->stats.lso_frames++;
2079
2080 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
2081 u8 l4proto;
2082
2083 if (protocol == ETH_P_IP) {
2084 l4proto = ip_hdr(skb)->protocol;
2085
2086 if (l4proto == IPPROTO_TCP)
2087 opcode = TX_TCP_PKT;
2088 else if (l4proto == IPPROTO_UDP)
2089 opcode = TX_UDP_PKT;
2090 } else if (protocol == ETH_P_IPV6) {
2091 l4proto = ipv6_hdr(skb)->nexthdr;
2092
2093 if (l4proto == IPPROTO_TCP)
2094 opcode = TX_TCPV6_PKT;
2095 else if (l4proto == IPPROTO_UDP)
2096 opcode = TX_UDPV6_PKT;
2097 }
2098 }
2099 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
2100 first_desc->ip_hdr_offset += skb_network_offset(skb);
2101 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
2102
2103 return 0;
2104}
2105
2106static int
2107qlcnic_map_tx_skb(struct pci_dev *pdev,
2108 struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
2109{
2110 struct qlcnic_skb_frag *nf;
2111 struct skb_frag_struct *frag;
2112 int i, nr_frags;
2113 dma_addr_t map;
2114
2115 nr_frags = skb_shinfo(skb)->nr_frags;
2116 nf = &pbuf->frag_array[0];
2117
2118 map = pci_map_single(pdev, skb->data,
2119 skb_headlen(skb), PCI_DMA_TODEVICE);
2120 if (pci_dma_mapping_error(pdev, map))
2121 goto out_err;
2122
2123 nf->dma = map;
2124 nf->length = skb_headlen(skb);
2125
2126 for (i = 0; i < nr_frags; i++) {
2127 frag = &skb_shinfo(skb)->frags[i];
2128 nf = &pbuf->frag_array[i+1];
2129
2130 map = pci_map_page(pdev, frag->page, frag->page_offset,
2131 frag->size, PCI_DMA_TODEVICE);
2132 if (pci_dma_mapping_error(pdev, map))
2133 goto unwind;
2134
2135 nf->dma = map;
2136 nf->length = frag->size;
2137 }
2138
2139 return 0;
2140
2141unwind:
2142 while (--i >= 0) {
2143 nf = &pbuf->frag_array[i+1];
2144 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
2145 }
2146
2147 nf = &pbuf->frag_array[0];
2148 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
2149
2150out_err:
2151 return -ENOMEM;
2152}
2153
2154static void
2155qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
2156 struct qlcnic_cmd_buffer *pbuf)
2157{
2158 struct qlcnic_skb_frag *nf = &pbuf->frag_array[0];
2159 int nr_frags = skb_shinfo(skb)->nr_frags;
2160 int i;
2161
2162 for (i = 0; i < nr_frags; i++) {
2163 nf = &pbuf->frag_array[i+1];
2164 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
2165 }
2166
2167 nf = &pbuf->frag_array[0];
2168 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
2169 pbuf->skb = NULL;
2170}
2171
2172static inline void
2173qlcnic_clear_cmddesc(u64 *desc)
2174{
2175 desc[0] = 0ULL;
2176 desc[2] = 0ULL;
2177 desc[7] = 0ULL;
2178}
2179
2180netdev_tx_t
2181qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2182{
2183 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2184 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2185 struct qlcnic_cmd_buffer *pbuf;
2186 struct qlcnic_skb_frag *buffrag;
2187 struct cmd_desc_type0 *hwdesc, *first_desc;
2188 struct pci_dev *pdev;
2189 struct ethhdr *phdr;
2190 int delta = 0;
2191 int i, k;
2192
2193 u32 producer;
2194 int frag_count;
2195 u32 num_txd = tx_ring->num_desc;
2196
2197 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
2198 netif_stop_queue(netdev);
2199 return NETDEV_TX_BUSY;
2200 }
2201
2202 if (adapter->flags & QLCNIC_MACSPOOF) {
2203 phdr = (struct ethhdr *)skb->data;
2204 if (compare_ether_addr(phdr->h_source,
2205 adapter->mac_addr))
2206 goto drop_packet;
2207 }
2208
2209 frag_count = skb_shinfo(skb)->nr_frags + 1;
2210 /* 14 frags supported for normal packet and
2211 * 32 frags supported for TSO packet
2212 */
2213 if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
2214
2215 for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
2216 delta += skb_shinfo(skb)->frags[i].size;
2217
2218 if (!__pskb_pull_tail(skb, delta))
2219 goto drop_packet;
2220
2221 frag_count = 1 + skb_shinfo(skb)->nr_frags;
2222 }
2223
2224 if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
2225 netif_stop_queue(netdev);
2226 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
2227 netif_start_queue(netdev);
2228 else {
2229 adapter->stats.xmit_off++;
2230 return NETDEV_TX_BUSY;
2231 }
2232 }
2233
2234 producer = tx_ring->producer;
2235 pbuf = &tx_ring->cmd_buf_arr[producer];
2236
2237 pdev = adapter->pdev;
2238
2239 first_desc = hwdesc = &tx_ring->desc_head[producer];
2240 qlcnic_clear_cmddesc((u64 *)hwdesc);
2241
2242 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
2243 adapter->stats.tx_dma_map_error++;
2244 goto drop_packet;
2245 }
2246
2247 pbuf->skb = skb;
2248 pbuf->frag_count = frag_count;
2249
2250 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
2251 qlcnic_set_tx_port(first_desc, adapter->portnum);
2252
2253 for (i = 0; i < frag_count; i++) {
2254
2255 k = i % 4;
2256
2257 if ((k == 0) && (i > 0)) {
2258 /* move to next desc.*/
2259 producer = get_next_index(producer, num_txd);
2260 hwdesc = &tx_ring->desc_head[producer];
2261 qlcnic_clear_cmddesc((u64 *)hwdesc);
2262 tx_ring->cmd_buf_arr[producer].skb = NULL;
2263 }
2264
2265 buffrag = &pbuf->frag_array[i];
2266
2267 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
2268 switch (k) {
2269 case 0:
2270 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
2271 break;
2272 case 1:
2273 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
2274 break;
2275 case 2:
2276 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
2277 break;
2278 case 3:
2279 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
2280 break;
2281 }
2282 }
2283
2284 tx_ring->producer = get_next_index(producer, num_txd);
2285 smp_mb();
2286
2287 if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb)))
2288 goto unwind_buff;
2289
2290 if (adapter->mac_learn)
2291 qlcnic_send_filter(adapter, tx_ring, first_desc, skb);
2292
2293 adapter->stats.txbytes += skb->len;
2294 adapter->stats.xmitcalled++;
2295
2296 qlcnic_update_cmd_producer(adapter, tx_ring);
2297
2298 return NETDEV_TX_OK;
2299
2300unwind_buff:
2301 qlcnic_unmap_buffers(pdev, skb, pbuf);
2302drop_packet:
2303 adapter->stats.txdropped++;
2304 dev_kfree_skb_any(skb);
2305 return NETDEV_TX_OK;
2306}
2307
2308static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
2309{
2310 struct net_device *netdev = adapter->netdev;
2311 u32 temp, temp_state, temp_val;
2312 int rv = 0;
2313
2314 temp = QLCRD32(adapter, CRB_TEMP_STATE);
2315
2316 temp_state = qlcnic_get_temp_state(temp);
2317 temp_val = qlcnic_get_temp_val(temp);
2318
2319 if (temp_state == QLCNIC_TEMP_PANIC) {
2320 dev_err(&netdev->dev,
2321 "Device temperature %d degrees C exceeds"
2322 " maximum allowed. Hardware has been shut down.\n",
2323 temp_val);
2324 rv = 1;
2325 } else if (temp_state == QLCNIC_TEMP_WARN) {
2326 if (adapter->temp == QLCNIC_TEMP_NORMAL) {
2327 dev_err(&netdev->dev,
2328 "Device temperature %d degrees C "
2329 "exceeds operating range."
2330 " Immediate action needed.\n",
2331 temp_val);
2332 }
2333 } else {
2334 if (adapter->temp == QLCNIC_TEMP_WARN) {
2335 dev_info(&netdev->dev,
2336 "Device temperature is now %d degrees C"
2337 " in normal range.\n", temp_val);
2338 }
2339 }
2340 adapter->temp = temp_state;
2341 return rv;
2342}
2343
2344void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
2345{
2346 struct net_device *netdev = adapter->netdev;
2347
2348 if (adapter->ahw->linkup && !linkup) {
2349 netdev_info(netdev, "NIC Link is down\n");
2350 adapter->ahw->linkup = 0;
2351 if (netif_running(netdev)) {
2352 netif_carrier_off(netdev);
2353 netif_stop_queue(netdev);
2354 }
2355 } else if (!adapter->ahw->linkup && linkup) {
2356 netdev_info(netdev, "NIC Link is up\n");
2357 adapter->ahw->linkup = 1;
2358 if (netif_running(netdev)) {
2359 netif_carrier_on(netdev);
2360 netif_wake_queue(netdev);
2361 }
2362 }
2363}
2364
2365static void qlcnic_tx_timeout(struct net_device *netdev)
2366{
2367 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2368
2369 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
2370 return;
2371
2372 dev_err(&netdev->dev, "transmit timeout, resetting.\n");
2373
2374 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
2375 adapter->need_fw_reset = 1;
2376 else
2377 adapter->reset_context = 1;
2378}
2379
2380static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
2381{
2382 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2383 struct net_device_stats *stats = &netdev->stats;
2384
2385 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
2386 stats->tx_packets = adapter->stats.xmitfinished;
2387 stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
2388 stats->tx_bytes = adapter->stats.txbytes;
2389 stats->rx_dropped = adapter->stats.rxdropped;
2390 stats->tx_dropped = adapter->stats.txdropped;
2391
2392 return stats;
2393}
2394
2395static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
2396{
2397 u32 status;
2398
2399 status = readl(adapter->isr_int_vec);
2400
2401 if (!(status & adapter->int_vec_bit))
2402 return IRQ_NONE;
2403
2404 /* check interrupt state machine, to be sure */
2405 status = readl(adapter->crb_int_state_reg);
2406 if (!ISR_LEGACY_INT_TRIGGERED(status))
2407 return IRQ_NONE;
2408
2409 writel(0xffffffff, adapter->tgt_status_reg);
2410 /* read twice to ensure write is flushed */
2411 readl(adapter->isr_int_vec);
2412 readl(adapter->isr_int_vec);
2413
2414 return IRQ_HANDLED;
2415}
2416
2417static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
2418{
2419 struct qlcnic_host_sds_ring *sds_ring = data;
2420 struct qlcnic_adapter *adapter = sds_ring->adapter;
2421
2422 if (adapter->flags & QLCNIC_MSIX_ENABLED)
2423 goto done;
2424 else if (adapter->flags & QLCNIC_MSI_ENABLED) {
2425 writel(0xffffffff, adapter->tgt_status_reg);
2426 goto done;
2427 }
2428
2429 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2430 return IRQ_NONE;
2431
2432done:
2433 adapter->diag_cnt++;
2434 qlcnic_enable_int(sds_ring);
2435 return IRQ_HANDLED;
2436}
2437
2438static irqreturn_t qlcnic_intr(int irq, void *data)
2439{
2440 struct qlcnic_host_sds_ring *sds_ring = data;
2441 struct qlcnic_adapter *adapter = sds_ring->adapter;
2442
2443 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2444 return IRQ_NONE;
2445
2446 napi_schedule(&sds_ring->napi);
2447
2448 return IRQ_HANDLED;
2449}
2450
2451static irqreturn_t qlcnic_msi_intr(int irq, void *data)
2452{
2453 struct qlcnic_host_sds_ring *sds_ring = data;
2454 struct qlcnic_adapter *adapter = sds_ring->adapter;
2455
2456 /* clear interrupt */
2457 writel(0xffffffff, adapter->tgt_status_reg);
2458
2459 napi_schedule(&sds_ring->napi);
2460 return IRQ_HANDLED;
2461}
2462
2463static irqreturn_t qlcnic_msix_intr(int irq, void *data)
2464{
2465 struct qlcnic_host_sds_ring *sds_ring = data;
2466
2467 napi_schedule(&sds_ring->napi);
2468 return IRQ_HANDLED;
2469}
2470
2471static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
2472{
2473 u32 sw_consumer, hw_consumer;
2474 int count = 0, i;
2475 struct qlcnic_cmd_buffer *buffer;
2476 struct pci_dev *pdev = adapter->pdev;
2477 struct net_device *netdev = adapter->netdev;
2478 struct qlcnic_skb_frag *frag;
2479 int done;
2480 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2481
2482 if (!spin_trylock(&adapter->tx_clean_lock))
2483 return 1;
2484
2485 sw_consumer = tx_ring->sw_consumer;
2486 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2487
2488 while (sw_consumer != hw_consumer) {
2489 buffer = &tx_ring->cmd_buf_arr[sw_consumer];
2490 if (buffer->skb) {
2491 frag = &buffer->frag_array[0];
2492 pci_unmap_single(pdev, frag->dma, frag->length,
2493 PCI_DMA_TODEVICE);
2494 frag->dma = 0ULL;
2495 for (i = 1; i < buffer->frag_count; i++) {
2496 frag++;
2497 pci_unmap_page(pdev, frag->dma, frag->length,
2498 PCI_DMA_TODEVICE);
2499 frag->dma = 0ULL;
2500 }
2501
2502 adapter->stats.xmitfinished++;
2503 dev_kfree_skb_any(buffer->skb);
2504 buffer->skb = NULL;
2505 }
2506
2507 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
2508 if (++count >= MAX_STATUS_HANDLE)
2509 break;
2510 }
2511
2512 if (count && netif_running(netdev)) {
2513 tx_ring->sw_consumer = sw_consumer;
2514
2515 smp_mb();
2516
2517 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
2518 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
2519 netif_wake_queue(netdev);
2520 adapter->stats.xmit_on++;
2521 }
2522 }
2523 adapter->tx_timeo_cnt = 0;
2524 }
2525 /*
2526 * If everything is freed up to consumer then check if the ring is full
2527 * If the ring is full then check if more needs to be freed and
2528 * schedule the call back again.
2529 *
2530 * This happens when there are 2 CPUs. One could be freeing and the
2531 * other filling it. If the ring is full when we get out of here and
2532 * the card has already interrupted the host then the host can miss the
2533 * interrupt.
2534 *
2535 * There is still a possible race condition and the host could miss an
2536 * interrupt. The card has to take care of this.
2537 */
2538 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2539 done = (sw_consumer == hw_consumer);
2540 spin_unlock(&adapter->tx_clean_lock);
2541
2542 return done;
2543}
2544
2545static int qlcnic_poll(struct napi_struct *napi, int budget)
2546{
2547 struct qlcnic_host_sds_ring *sds_ring =
2548 container_of(napi, struct qlcnic_host_sds_ring, napi);
2549
2550 struct qlcnic_adapter *adapter = sds_ring->adapter;
2551
2552 int tx_complete;
2553 int work_done;
2554
2555 tx_complete = qlcnic_process_cmd_ring(adapter);
2556
2557 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2558
2559 if ((work_done < budget) && tx_complete) {
2560 napi_complete(&sds_ring->napi);
2561 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2562 qlcnic_enable_int(sds_ring);
2563 }
2564
2565 return work_done;
2566}
2567
2568static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
2569{
2570 struct qlcnic_host_sds_ring *sds_ring =
2571 container_of(napi, struct qlcnic_host_sds_ring, napi);
2572
2573 struct qlcnic_adapter *adapter = sds_ring->adapter;
2574 int work_done;
2575
2576 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2577
2578 if (work_done < budget) {
2579 napi_complete(&sds_ring->napi);
2580 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2581 qlcnic_enable_int(sds_ring);
2582 }
2583
2584 return work_done;
2585}
2586
2587#ifdef CONFIG_NET_POLL_CONTROLLER
2588static void qlcnic_poll_controller(struct net_device *netdev)
2589{
2590 int ring;
2591 struct qlcnic_host_sds_ring *sds_ring;
2592 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2593 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
2594
2595 disable_irq(adapter->irq);
2596 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
2597 sds_ring = &recv_ctx->sds_rings[ring];
2598 qlcnic_intr(adapter->irq, sds_ring);
2599 }
2600 enable_irq(adapter->irq);
2601}
2602#endif
2603
2604static void
2605qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
2606{
2607 u32 val;
2608
2609 val = adapter->portnum & 0xf;
2610 val |= encoding << 7;
2611 val |= (jiffies - adapter->dev_rst_time) << 8;
2612
2613 QLCWR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
2614 adapter->dev_rst_time = jiffies;
2615}
2616
2617static int
2618qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state)
2619{
2620 u32 val;
2621
2622 WARN_ON(state != QLCNIC_DEV_NEED_RESET &&
2623 state != QLCNIC_DEV_NEED_QUISCENT);
2624
2625 if (qlcnic_api_lock(adapter))
2626 return -EIO;
2627
2628 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2629
2630 if (state == QLCNIC_DEV_NEED_RESET)
2631 QLC_DEV_SET_RST_RDY(val, adapter->portnum);
2632 else if (state == QLCNIC_DEV_NEED_QUISCENT)
2633 QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum);
2634
2635 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2636
2637 qlcnic_api_unlock(adapter);
2638
2639 return 0;
2640}
2641
2642static int
2643qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
2644{
2645 u32 val;
2646
2647 if (qlcnic_api_lock(adapter))
2648 return -EBUSY;
2649
2650 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2651 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
2652 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2653
2654 qlcnic_api_unlock(adapter);
2655
2656 return 0;
2657}
2658
2659static void
2660qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed)
2661{
2662 u32 val;
2663
2664 if (qlcnic_api_lock(adapter))
2665 goto err;
2666
2667 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
2668 QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
2669 QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
2670
2671 if (failed) {
2672 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
2673 dev_info(&adapter->pdev->dev,
2674 "Device state set to Failed. Please Reboot\n");
2675 } else if (!(val & 0x11111111))
2676 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
2677
2678 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2679 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
2680 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2681
2682 qlcnic_api_unlock(adapter);
2683err:
2684 adapter->fw_fail_cnt = 0;
2685 clear_bit(__QLCNIC_START_FW, &adapter->state);
2686 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2687}
2688
2689/* Grab api lock, before checking state */
2690static int
2691qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
2692{
2693 int act, state, active_mask;
2694
2695 state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2696 act = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
2697
2698 if (adapter->flags & QLCNIC_FW_RESET_OWNER) {
2699 active_mask = (~(1 << (adapter->ahw->pci_func * 4)));
2700 act = act & active_mask;
2701 }
2702
2703 if (((state & 0x11111111) == (act & 0x11111111)) ||
2704 ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
2705 return 0;
2706 else
2707 return 1;
2708}
2709
2710static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter)
2711{
2712 u32 val = QLCRD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
2713
2714 if (val != QLCNIC_DRV_IDC_VER) {
2715 dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's"
2716 " idc ver = %x; reqd = %x\n", QLCNIC_DRV_IDC_VER, val);
2717 }
2718
2719 return 0;
2720}
2721
2722static int
2723qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
2724{
2725 u32 val, prev_state;
2726 u8 dev_init_timeo = adapter->dev_init_timeo;
2727 u8 portnum = adapter->portnum;
2728 u8 ret;
2729
2730 if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state))
2731 return 1;
2732
2733 if (qlcnic_api_lock(adapter))
2734 return -1;
2735
2736 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
2737 if (!(val & (1 << (portnum * 4)))) {
2738 QLC_DEV_SET_REF_CNT(val, portnum);
2739 QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
2740 }
2741
2742 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2743 QLCDB(adapter, HW, "Device state = %u\n", prev_state);
2744
2745 switch (prev_state) {
2746 case QLCNIC_DEV_COLD:
2747 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
2748 QLCWR32(adapter, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_DRV_IDC_VER);
2749 qlcnic_idc_debug_info(adapter, 0);
2750 qlcnic_api_unlock(adapter);
2751 return 1;
2752
2753 case QLCNIC_DEV_READY:
2754 ret = qlcnic_check_idc_ver(adapter);
2755 qlcnic_api_unlock(adapter);
2756 return ret;
2757
2758 case QLCNIC_DEV_NEED_RESET:
2759 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2760 QLC_DEV_SET_RST_RDY(val, portnum);
2761 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2762 break;
2763
2764 case QLCNIC_DEV_NEED_QUISCENT:
2765 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2766 QLC_DEV_SET_QSCNT_RDY(val, portnum);
2767 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2768 break;
2769
2770 case QLCNIC_DEV_FAILED:
2771 dev_err(&adapter->pdev->dev, "Device in failed state.\n");
2772 qlcnic_api_unlock(adapter);
2773 return -1;
2774
2775 case QLCNIC_DEV_INITIALIZING:
2776 case QLCNIC_DEV_QUISCENT:
2777 break;
2778 }
2779
2780 qlcnic_api_unlock(adapter);
2781
2782 do {
2783 msleep(1000);
2784 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2785
2786 if (prev_state == QLCNIC_DEV_QUISCENT)
2787 continue;
2788 } while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo);
2789
2790 if (!dev_init_timeo) {
2791 dev_err(&adapter->pdev->dev,
2792 "Waiting for device to initialize timeout\n");
2793 return -1;
2794 }
2795
2796 if (qlcnic_api_lock(adapter))
2797 return -1;
2798
2799 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2800 QLC_DEV_CLR_RST_QSCNT(val, portnum);
2801 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2802
2803 ret = qlcnic_check_idc_ver(adapter);
2804 qlcnic_api_unlock(adapter);
2805
2806 return ret;
2807}
2808
2809static void
2810qlcnic_fwinit_work(struct work_struct *work)
2811{
2812 struct qlcnic_adapter *adapter = container_of(work,
2813 struct qlcnic_adapter, fw_work.work);
2814 u32 dev_state = 0xf;
2815 u32 val;
2816
2817 if (qlcnic_api_lock(adapter))
2818 goto err_ret;
2819
2820 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2821 if (dev_state == QLCNIC_DEV_QUISCENT ||
2822 dev_state == QLCNIC_DEV_NEED_QUISCENT) {
2823 qlcnic_api_unlock(adapter);
2824 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2825 FW_POLL_DELAY * 2);
2826 return;
2827 }
2828
2829 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
2830 qlcnic_api_unlock(adapter);
2831 goto wait_npar;
2832 }
2833
2834 if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
2835 dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
2836 adapter->reset_ack_timeo);
2837 goto skip_ack_check;
2838 }
2839
2840 if (!qlcnic_check_drv_state(adapter)) {
2841skip_ack_check:
2842 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2843
2844 if (dev_state == QLCNIC_DEV_NEED_RESET) {
2845 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2846 QLCNIC_DEV_INITIALIZING);
2847 set_bit(__QLCNIC_START_FW, &adapter->state);
2848 QLCDB(adapter, DRV, "Restarting fw\n");
2849 qlcnic_idc_debug_info(adapter, 0);
2850 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2851 QLC_DEV_SET_RST_RDY(val, adapter->portnum);
2852 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2853 }
2854
2855 qlcnic_api_unlock(adapter);
2856
2857 rtnl_lock();
2858 if (adapter->ahw->fw_dump.enable &&
2859 (adapter->flags & QLCNIC_FW_RESET_OWNER)) {
2860 QLCDB(adapter, DRV, "Take FW dump\n");
2861 qlcnic_dump_fw(adapter);
2862 }
2863 rtnl_unlock();
2864
2865 adapter->flags &= ~QLCNIC_FW_RESET_OWNER;
2866 if (!adapter->nic_ops->start_firmware(adapter)) {
2867 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2868 adapter->fw_wait_cnt = 0;
2869 return;
2870 }
2871 goto err_ret;
2872 }
2873
2874 qlcnic_api_unlock(adapter);
2875
2876wait_npar:
2877 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2878 QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
2879
2880 switch (dev_state) {
2881 case QLCNIC_DEV_READY:
2882 if (!adapter->nic_ops->start_firmware(adapter)) {
2883 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2884 adapter->fw_wait_cnt = 0;
2885 return;
2886 }
2887 case QLCNIC_DEV_FAILED:
2888 break;
2889 default:
2890 qlcnic_schedule_work(adapter,
2891 qlcnic_fwinit_work, FW_POLL_DELAY);
2892 return;
2893 }
2894
2895err_ret:
2896 dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u "
2897 "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt);
2898 netif_device_attach(adapter->netdev);
2899 qlcnic_clr_all_drv_state(adapter, 0);
2900}
2901
2902static void
2903qlcnic_detach_work(struct work_struct *work)
2904{
2905 struct qlcnic_adapter *adapter = container_of(work,
2906 struct qlcnic_adapter, fw_work.work);
2907 struct net_device *netdev = adapter->netdev;
2908 u32 status;
2909
2910 netif_device_detach(netdev);
2911
2912 /* Dont grab rtnl lock during Quiscent mode */
2913 if (adapter->dev_state == QLCNIC_DEV_NEED_QUISCENT) {
2914 if (netif_running(netdev))
2915 __qlcnic_down(adapter, netdev);
2916 } else
2917 qlcnic_down(adapter, netdev);
2918
2919 status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
2920
2921 if (status & QLCNIC_RCODE_FATAL_ERROR)
2922 goto err_ret;
2923
2924 if (adapter->temp == QLCNIC_TEMP_PANIC)
2925 goto err_ret;
2926 /* Dont ack if this instance is the reset owner */
2927 if (!(adapter->flags & QLCNIC_FW_RESET_OWNER)) {
2928 if (qlcnic_set_drv_state(adapter, adapter->dev_state))
2929 goto err_ret;
2930 }
2931
2932 adapter->fw_wait_cnt = 0;
2933
2934 qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY);
2935
2936 return;
2937
2938err_ret:
2939 dev_err(&adapter->pdev->dev, "detach failed; status=%d temp=%d\n",
2940 status, adapter->temp);
2941 netif_device_attach(netdev);
2942 qlcnic_clr_all_drv_state(adapter, 1);
2943}
2944
2945/*Transit NPAR state to NON Operational */
2946static void
2947qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
2948{
2949 u32 state;
2950
2951 state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2952 if (state == QLCNIC_DEV_NPAR_NON_OPER)
2953 return;
2954
2955 if (qlcnic_api_lock(adapter))
2956 return;
2957 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
2958 qlcnic_api_unlock(adapter);
2959}
2960
2961/*Transit to RESET state from READY state only */
2962void
2963qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2964{
2965 u32 state;
2966
2967 adapter->need_fw_reset = 1;
2968 if (qlcnic_api_lock(adapter))
2969 return;
2970
2971 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2972
2973 if (state == QLCNIC_DEV_READY) {
2974 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
2975 adapter->flags |= QLCNIC_FW_RESET_OWNER;
2976 QLCDB(adapter, DRV, "NEED_RESET state set\n");
2977 qlcnic_idc_debug_info(adapter, 0);
2978 }
2979
2980 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
2981 qlcnic_api_unlock(adapter);
2982}
2983
2984/* Transit to NPAR READY state from NPAR NOT READY state */
2985static void
2986qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter)
2987{
2988 if (qlcnic_api_lock(adapter))
2989 return;
2990
2991 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_OPER);
2992 QLCDB(adapter, DRV, "NPAR operational state set\n");
2993
2994 qlcnic_api_unlock(adapter);
2995}
2996
2997static void
2998qlcnic_schedule_work(struct qlcnic_adapter *adapter,
2999 work_func_t func, int delay)
3000{
3001 if (test_bit(__QLCNIC_AER, &adapter->state))
3002 return;
3003
3004 INIT_DELAYED_WORK(&adapter->fw_work, func);
3005 queue_delayed_work(qlcnic_wq, &adapter->fw_work,
3006 round_jiffies_relative(delay));
3007}
3008
3009static void
3010qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
3011{
3012 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
3013 msleep(10);
3014
3015 cancel_delayed_work_sync(&adapter->fw_work);
3016}
3017
3018static void
3019qlcnic_attach_work(struct work_struct *work)
3020{
3021 struct qlcnic_adapter *adapter = container_of(work,
3022 struct qlcnic_adapter, fw_work.work);
3023 struct net_device *netdev = adapter->netdev;
3024 u32 npar_state;
3025
3026 if (adapter->op_mode != QLCNIC_MGMT_FUNC) {
3027 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
3028 if (adapter->fw_wait_cnt++ > QLCNIC_DEV_NPAR_OPER_TIMEO)
3029 qlcnic_clr_all_drv_state(adapter, 0);
3030 else if (npar_state != QLCNIC_DEV_NPAR_OPER)
3031 qlcnic_schedule_work(adapter, qlcnic_attach_work,
3032 FW_POLL_DELAY);
3033 else
3034 goto attach;
3035 QLCDB(adapter, DRV, "Waiting for NPAR state to operational\n");
3036 return;
3037 }
3038attach:
3039 if (netif_running(netdev)) {
3040 if (qlcnic_up(adapter, netdev))
3041 goto done;
3042
3043 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
3044 }
3045
3046done:
3047 netif_device_attach(netdev);
3048 adapter->fw_fail_cnt = 0;
3049 clear_bit(__QLCNIC_RESETTING, &adapter->state);
3050
3051 if (!qlcnic_clr_drv_state(adapter))
3052 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
3053 FW_POLL_DELAY);
3054}
3055
3056static int
3057qlcnic_check_health(struct qlcnic_adapter *adapter)
3058{
3059 u32 state = 0, heartbeat;
3060 struct net_device *netdev = adapter->netdev;
3061
3062 if (qlcnic_check_temp(adapter))
3063 goto detach;
3064
3065 if (adapter->need_fw_reset)
3066 qlcnic_dev_request_reset(adapter);
3067
3068 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
3069 if (state == QLCNIC_DEV_NEED_RESET) {
3070 qlcnic_set_npar_non_operational(adapter);
3071 adapter->need_fw_reset = 1;
3072 } else if (state == QLCNIC_DEV_NEED_QUISCENT)
3073 goto detach;
3074
3075 heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
3076 if (heartbeat != adapter->heartbeat) {
3077 adapter->heartbeat = heartbeat;
3078 adapter->fw_fail_cnt = 0;
3079 if (adapter->need_fw_reset)
3080 goto detach;
3081
3082 if (adapter->reset_context && auto_fw_reset) {
3083 qlcnic_reset_hw_context(adapter);
3084 adapter->netdev->trans_start = jiffies;
3085 }
3086
3087 return 0;
3088 }
3089
3090 if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
3091 return 0;
3092
3093 qlcnic_dev_request_reset(adapter);
3094
3095 if (auto_fw_reset)
3096 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
3097
3098 dev_info(&netdev->dev, "firmware hang detected\n");
3099
3100detach:
3101 adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
3102 QLCNIC_DEV_NEED_RESET;
3103
3104 if (auto_fw_reset &&
3105 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) {
3106
3107 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
3108 QLCDB(adapter, DRV, "fw recovery scheduled.\n");
3109 }
3110
3111 return 1;
3112}
3113
3114static void
3115qlcnic_fw_poll_work(struct work_struct *work)
3116{
3117 struct qlcnic_adapter *adapter = container_of(work,
3118 struct qlcnic_adapter, fw_work.work);
3119
3120 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
3121 goto reschedule;
3122
3123
3124 if (qlcnic_check_health(adapter))
3125 return;
3126
3127 if (adapter->fhash.fnum)
3128 qlcnic_prune_lb_filters(adapter);
3129
3130reschedule:
3131 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
3132}
3133
3134static int qlcnic_is_first_func(struct pci_dev *pdev)
3135{
3136 struct pci_dev *oth_pdev;
3137 int val = pdev->devfn;
3138
3139 while (val-- > 0) {
3140 oth_pdev = pci_get_domain_bus_and_slot(pci_domain_nr
3141 (pdev->bus), pdev->bus->number,
3142 PCI_DEVFN(PCI_SLOT(pdev->devfn), val));
3143 if (!oth_pdev)
3144 continue;
3145
3146 if (oth_pdev->current_state != PCI_D3cold) {
3147 pci_dev_put(oth_pdev);
3148 return 0;
3149 }
3150 pci_dev_put(oth_pdev);
3151 }
3152 return 1;
3153}
3154
3155static int qlcnic_attach_func(struct pci_dev *pdev)
3156{
3157 int err, first_func;
3158 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3159 struct net_device *netdev = adapter->netdev;
3160
3161 pdev->error_state = pci_channel_io_normal;
3162
3163 err = pci_enable_device(pdev);
3164 if (err)
3165 return err;
3166
3167 pci_set_power_state(pdev, PCI_D0);
3168 pci_set_master(pdev);
3169 pci_restore_state(pdev);
3170
3171 first_func = qlcnic_is_first_func(pdev);
3172
3173 if (qlcnic_api_lock(adapter))
3174 return -EINVAL;
3175
3176 if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) {
3177 adapter->need_fw_reset = 1;
3178 set_bit(__QLCNIC_START_FW, &adapter->state);
3179 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
3180 QLCDB(adapter, DRV, "Restarting fw\n");
3181 }
3182 qlcnic_api_unlock(adapter);
3183
3184 err = adapter->nic_ops->start_firmware(adapter);
3185 if (err)
3186 return err;
3187
3188 qlcnic_clr_drv_state(adapter);
3189 qlcnic_setup_intr(adapter);
3190
3191 if (netif_running(netdev)) {
3192 err = qlcnic_attach(adapter);
3193 if (err) {
3194 qlcnic_clr_all_drv_state(adapter, 1);
3195 clear_bit(__QLCNIC_AER, &adapter->state);
3196 netif_device_attach(netdev);
3197 return err;
3198 }
3199
3200 err = qlcnic_up(adapter, netdev);
3201 if (err)
3202 goto done;
3203
3204 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
3205 }
3206 done:
3207 netif_device_attach(netdev);
3208 return err;
3209}
3210
3211static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
3212 pci_channel_state_t state)
3213{
3214 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3215 struct net_device *netdev = adapter->netdev;
3216
3217 if (state == pci_channel_io_perm_failure)
3218 return PCI_ERS_RESULT_DISCONNECT;
3219
3220 if (state == pci_channel_io_normal)
3221 return PCI_ERS_RESULT_RECOVERED;
3222
3223 set_bit(__QLCNIC_AER, &adapter->state);
3224 netif_device_detach(netdev);
3225
3226 cancel_delayed_work_sync(&adapter->fw_work);
3227
3228 if (netif_running(netdev))
3229 qlcnic_down(adapter, netdev);
3230
3231 qlcnic_detach(adapter);
3232 qlcnic_teardown_intr(adapter);
3233
3234 clear_bit(__QLCNIC_RESETTING, &adapter->state);
3235
3236 pci_save_state(pdev);
3237 pci_disable_device(pdev);
3238
3239 return PCI_ERS_RESULT_NEED_RESET;
3240}
3241
3242static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev)
3243{
3244 return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
3245 PCI_ERS_RESULT_RECOVERED;
3246}
3247
3248static void qlcnic_io_resume(struct pci_dev *pdev)
3249{
3250 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3251
3252 pci_cleanup_aer_uncorrect_error_status(pdev);
3253
3254 if (QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) == QLCNIC_DEV_READY &&
3255 test_and_clear_bit(__QLCNIC_AER, &adapter->state))
3256 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
3257 FW_POLL_DELAY);
3258}
3259
3260static int
3261qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
3262{
3263 int err;
3264
3265 err = qlcnic_can_start_firmware(adapter);
3266 if (err)
3267 return err;
3268
3269 err = qlcnic_check_npar_opertional(adapter);
3270 if (err)
3271 return err;
3272
3273 err = qlcnic_initialize_nic(adapter);
3274 if (err)
3275 return err;
3276
3277 qlcnic_check_options(adapter);
3278
3279 err = qlcnic_set_eswitch_port_config(adapter);
3280 if (err)
3281 return err;
3282
3283 adapter->need_fw_reset = 0;
3284
3285 return err;
3286}
3287
3288static int
3289qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
3290{
3291 return -EOPNOTSUPP;
3292}
3293
3294static int
3295qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
3296{
3297 return -EOPNOTSUPP;
3298}
3299
3300static ssize_t
3301qlcnic_store_bridged_mode(struct device *dev,
3302 struct device_attribute *attr, const char *buf, size_t len)
3303{
3304 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3305 unsigned long new;
3306 int ret = -EINVAL;
3307
3308 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
3309 goto err_out;
3310
3311 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
3312 goto err_out;
3313
3314 if (strict_strtoul(buf, 2, &new))
3315 goto err_out;
3316
3317 if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
3318 ret = len;
3319
3320err_out:
3321 return ret;
3322}
3323
3324static ssize_t
3325qlcnic_show_bridged_mode(struct device *dev,
3326 struct device_attribute *attr, char *buf)
3327{
3328 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3329 int bridged_mode = 0;
3330
3331 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3332 bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
3333
3334 return sprintf(buf, "%d\n", bridged_mode);
3335}
3336
3337static struct device_attribute dev_attr_bridged_mode = {
3338 .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
3339 .show = qlcnic_show_bridged_mode,
3340 .store = qlcnic_store_bridged_mode,
3341};
3342
3343static ssize_t
3344qlcnic_store_diag_mode(struct device *dev,
3345 struct device_attribute *attr, const char *buf, size_t len)
3346{
3347 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3348 unsigned long new;
3349
3350 if (strict_strtoul(buf, 2, &new))
3351 return -EINVAL;
3352
3353 if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
3354 adapter->flags ^= QLCNIC_DIAG_ENABLED;
3355
3356 return len;
3357}
3358
3359static ssize_t
3360qlcnic_show_diag_mode(struct device *dev,
3361 struct device_attribute *attr, char *buf)
3362{
3363 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3364
3365 return sprintf(buf, "%d\n",
3366 !!(adapter->flags & QLCNIC_DIAG_ENABLED));
3367}
3368
3369static struct device_attribute dev_attr_diag_mode = {
3370 .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
3371 .show = qlcnic_show_diag_mode,
3372 .store = qlcnic_store_diag_mode,
3373};
3374
3375int qlcnic_validate_max_rss(struct net_device *netdev, u8 max_hw, u8 val)
3376{
3377 if (!use_msi_x && !use_msi) {
3378 netdev_info(netdev, "no msix or msi support, hence no rss\n");
3379 return -EINVAL;
3380 }
3381
3382 if ((val > max_hw) || (val < 2) || !is_power_of_2(val)) {
3383 netdev_info(netdev, "rss_ring valid range [2 - %x] in "
3384 " powers of 2\n", max_hw);
3385 return -EINVAL;
3386 }
3387 return 0;
3388
3389}
3390
3391int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data)
3392{
3393 struct net_device *netdev = adapter->netdev;
3394 int err = 0;
3395
3396 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
3397 return -EBUSY;
3398
3399 netif_device_detach(netdev);
3400 if (netif_running(netdev))
3401 __qlcnic_down(adapter, netdev);
3402 qlcnic_detach(adapter);
3403 qlcnic_teardown_intr(adapter);
3404
3405 if (qlcnic_enable_msix(adapter, data)) {
3406 netdev_info(netdev, "failed setting max_rss; rss disabled\n");
3407 qlcnic_enable_msi_legacy(adapter);
3408 }
3409
3410 if (netif_running(netdev)) {
3411 err = qlcnic_attach(adapter);
3412 if (err)
3413 goto done;
3414 err = __qlcnic_up(adapter, netdev);
3415 if (err)
3416 goto done;
3417 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
3418 }
3419 done:
3420 netif_device_attach(netdev);
3421 clear_bit(__QLCNIC_RESETTING, &adapter->state);
3422 return err;
3423}
3424
3425static int
3426qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
3427 loff_t offset, size_t size)
3428{
3429 size_t crb_size = 4;
3430
3431 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
3432 return -EIO;
3433
3434 if (offset < QLCNIC_PCI_CRBSPACE) {
3435 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
3436 QLCNIC_PCI_CAMQM_END))
3437 crb_size = 8;
3438 else
3439 return -EINVAL;
3440 }
3441
3442 if ((size != crb_size) || (offset & (crb_size-1)))
3443 return -EINVAL;
3444
3445 return 0;
3446}
3447
3448static ssize_t
3449qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
3450 struct bin_attribute *attr,
3451 char *buf, loff_t offset, size_t size)
3452{
3453 struct device *dev = container_of(kobj, struct device, kobj);
3454 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3455 u32 data;
3456 u64 qmdata;
3457 int ret;
3458
3459 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
3460 if (ret != 0)
3461 return ret;
3462
3463 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
3464 qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
3465 memcpy(buf, &qmdata, size);
3466 } else {
3467 data = QLCRD32(adapter, offset);
3468 memcpy(buf, &data, size);
3469 }
3470 return size;
3471}
3472
3473static ssize_t
3474qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
3475 struct bin_attribute *attr,
3476 char *buf, loff_t offset, size_t size)
3477{
3478 struct device *dev = container_of(kobj, struct device, kobj);
3479 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3480 u32 data;
3481 u64 qmdata;
3482 int ret;
3483
3484 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
3485 if (ret != 0)
3486 return ret;
3487
3488 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
3489 memcpy(&qmdata, buf, size);
3490 qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
3491 } else {
3492 memcpy(&data, buf, size);
3493 QLCWR32(adapter, offset, data);
3494 }
3495 return size;
3496}
3497
3498static int
3499qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
3500 loff_t offset, size_t size)
3501{
3502 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
3503 return -EIO;
3504
3505 if ((size != 8) || (offset & 0x7))
3506 return -EIO;
3507
3508 return 0;
3509}
3510
3511static ssize_t
3512qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
3513 struct bin_attribute *attr,
3514 char *buf, loff_t offset, size_t size)
3515{
3516 struct device *dev = container_of(kobj, struct device, kobj);
3517 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3518 u64 data;
3519 int ret;
3520
3521 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3522 if (ret != 0)
3523 return ret;
3524
3525 if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
3526 return -EIO;
3527
3528 memcpy(buf, &data, size);
3529
3530 return size;
3531}
3532
3533static ssize_t
3534qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
3535 struct bin_attribute *attr,
3536 char *buf, loff_t offset, size_t size)
3537{
3538 struct device *dev = container_of(kobj, struct device, kobj);
3539 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3540 u64 data;
3541 int ret;
3542
3543 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3544 if (ret != 0)
3545 return ret;
3546
3547 memcpy(&data, buf, size);
3548
3549 if (qlcnic_pci_mem_write_2M(adapter, offset, data))
3550 return -EIO;
3551
3552 return size;
3553}
3554
3555static struct bin_attribute bin_attr_crb = {
3556 .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
3557 .size = 0,
3558 .read = qlcnic_sysfs_read_crb,
3559 .write = qlcnic_sysfs_write_crb,
3560};
3561
3562static struct bin_attribute bin_attr_mem = {
3563 .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
3564 .size = 0,
3565 .read = qlcnic_sysfs_read_mem,
3566 .write = qlcnic_sysfs_write_mem,
3567};
3568
3569static int
3570validate_pm_config(struct qlcnic_adapter *adapter,
3571 struct qlcnic_pm_func_cfg *pm_cfg, int count)
3572{
3573
3574 u8 src_pci_func, s_esw_id, d_esw_id;
3575 u8 dest_pci_func;
3576 int i;
3577
3578 for (i = 0; i < count; i++) {
3579 src_pci_func = pm_cfg[i].pci_func;
3580 dest_pci_func = pm_cfg[i].dest_npar;
3581 if (src_pci_func >= QLCNIC_MAX_PCI_FUNC
3582 || dest_pci_func >= QLCNIC_MAX_PCI_FUNC)
3583 return QL_STATUS_INVALID_PARAM;
3584
3585 if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC)
3586 return QL_STATUS_INVALID_PARAM;
3587
3588 if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
3589 return QL_STATUS_INVALID_PARAM;
3590
3591 s_esw_id = adapter->npars[src_pci_func].phy_port;
3592 d_esw_id = adapter->npars[dest_pci_func].phy_port;
3593
3594 if (s_esw_id != d_esw_id)
3595 return QL_STATUS_INVALID_PARAM;
3596
3597 }
3598 return 0;
3599
3600}
3601
3602static ssize_t
3603qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj,
3604 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3605{
3606 struct device *dev = container_of(kobj, struct device, kobj);
3607 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3608 struct qlcnic_pm_func_cfg *pm_cfg;
3609 u32 id, action, pci_func;
3610 int count, rem, i, ret;
3611
3612 count = size / sizeof(struct qlcnic_pm_func_cfg);
3613 rem = size % sizeof(struct qlcnic_pm_func_cfg);
3614 if (rem)
3615 return QL_STATUS_INVALID_PARAM;
3616
3617 pm_cfg = (struct qlcnic_pm_func_cfg *) buf;
3618
3619 ret = validate_pm_config(adapter, pm_cfg, count);
3620 if (ret)
3621 return ret;
3622 for (i = 0; i < count; i++) {
3623 pci_func = pm_cfg[i].pci_func;
3624 action = !!pm_cfg[i].action;
3625 id = adapter->npars[pci_func].phy_port;
3626 ret = qlcnic_config_port_mirroring(adapter, id,
3627 action, pci_func);
3628 if (ret)
3629 return ret;
3630 }
3631
3632 for (i = 0; i < count; i++) {
3633 pci_func = pm_cfg[i].pci_func;
3634 id = adapter->npars[pci_func].phy_port;
3635 adapter->npars[pci_func].enable_pm = !!pm_cfg[i].action;
3636 adapter->npars[pci_func].dest_npar = id;
3637 }
3638 return size;
3639}
3640
3641static ssize_t
3642qlcnic_sysfs_read_pm_config(struct file *filp, struct kobject *kobj,
3643 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3644{
3645 struct device *dev = container_of(kobj, struct device, kobj);
3646 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3647 struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
3648 int i;
3649
3650 if (size != sizeof(pm_cfg))
3651 return QL_STATUS_INVALID_PARAM;
3652
3653 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3654 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3655 continue;
3656 pm_cfg[i].action = adapter->npars[i].enable_pm;
3657 pm_cfg[i].dest_npar = 0;
3658 pm_cfg[i].pci_func = i;
3659 }
3660 memcpy(buf, &pm_cfg, size);
3661
3662 return size;
3663}
3664
3665static int
3666validate_esw_config(struct qlcnic_adapter *adapter,
3667 struct qlcnic_esw_func_cfg *esw_cfg, int count)
3668{
3669 u32 op_mode;
3670 u8 pci_func;
3671 int i;
3672
3673 op_mode = readl(adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
3674
3675 for (i = 0; i < count; i++) {
3676 pci_func = esw_cfg[i].pci_func;
3677 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3678 return QL_STATUS_INVALID_PARAM;
3679
3680 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3681 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3682 return QL_STATUS_INVALID_PARAM;
3683
3684 switch (esw_cfg[i].op_mode) {
3685 case QLCNIC_PORT_DEFAULTS:
3686 if (QLC_DEV_GET_DRV(op_mode, pci_func) !=
3687 QLCNIC_NON_PRIV_FUNC) {
3688 if (esw_cfg[i].mac_anti_spoof != 0)
3689 return QL_STATUS_INVALID_PARAM;
3690 if (esw_cfg[i].mac_override != 1)
3691 return QL_STATUS_INVALID_PARAM;
3692 if (esw_cfg[i].promisc_mode != 1)
3693 return QL_STATUS_INVALID_PARAM;
3694 }
3695 break;
3696 case QLCNIC_ADD_VLAN:
3697 if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
3698 return QL_STATUS_INVALID_PARAM;
3699 if (!esw_cfg[i].op_type)
3700 return QL_STATUS_INVALID_PARAM;
3701 break;
3702 case QLCNIC_DEL_VLAN:
3703 if (!esw_cfg[i].op_type)
3704 return QL_STATUS_INVALID_PARAM;
3705 break;
3706 default:
3707 return QL_STATUS_INVALID_PARAM;
3708 }
3709 }
3710 return 0;
3711}
3712
3713static ssize_t
3714qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj,
3715 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3716{
3717 struct device *dev = container_of(kobj, struct device, kobj);
3718 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3719 struct qlcnic_esw_func_cfg *esw_cfg;
3720 struct qlcnic_npar_info *npar;
3721 int count, rem, i, ret;
3722 u8 pci_func, op_mode = 0;
3723
3724 count = size / sizeof(struct qlcnic_esw_func_cfg);
3725 rem = size % sizeof(struct qlcnic_esw_func_cfg);
3726 if (rem)
3727 return QL_STATUS_INVALID_PARAM;
3728
3729 esw_cfg = (struct qlcnic_esw_func_cfg *) buf;
3730 ret = validate_esw_config(adapter, esw_cfg, count);
3731 if (ret)
3732 return ret;
3733
3734 for (i = 0; i < count; i++) {
3735 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3736 if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
3737 return QL_STATUS_INVALID_PARAM;
3738
3739 if (adapter->ahw->pci_func != esw_cfg[i].pci_func)
3740 continue;
3741
3742 op_mode = esw_cfg[i].op_mode;
3743 qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]);
3744 esw_cfg[i].op_mode = op_mode;
3745 esw_cfg[i].pci_func = adapter->ahw->pci_func;
3746
3747 switch (esw_cfg[i].op_mode) {
3748 case QLCNIC_PORT_DEFAULTS:
3749 qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]);
3750 break;
3751 case QLCNIC_ADD_VLAN:
3752 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
3753 break;
3754 case QLCNIC_DEL_VLAN:
3755 esw_cfg[i].vlan_id = 0;
3756 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
3757 break;
3758 }
3759 }
3760
3761 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
3762 goto out;
3763
3764 for (i = 0; i < count; i++) {
3765 pci_func = esw_cfg[i].pci_func;
3766 npar = &adapter->npars[pci_func];
3767 switch (esw_cfg[i].op_mode) {
3768 case QLCNIC_PORT_DEFAULTS:
3769 npar->promisc_mode = esw_cfg[i].promisc_mode;
3770 npar->mac_override = esw_cfg[i].mac_override;
3771 npar->offload_flags = esw_cfg[i].offload_flags;
3772 npar->mac_anti_spoof = esw_cfg[i].mac_anti_spoof;
3773 npar->discard_tagged = esw_cfg[i].discard_tagged;
3774 break;
3775 case QLCNIC_ADD_VLAN:
3776 npar->pvid = esw_cfg[i].vlan_id;
3777 break;
3778 case QLCNIC_DEL_VLAN:
3779 npar->pvid = 0;
3780 break;
3781 }
3782 }
3783out:
3784 return size;
3785}
3786
3787static ssize_t
3788qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj,
3789 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3790{
3791 struct device *dev = container_of(kobj, struct device, kobj);
3792 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3793 struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
3794 u8 i;
3795
3796 if (size != sizeof(esw_cfg))
3797 return QL_STATUS_INVALID_PARAM;
3798
3799 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3800 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3801 continue;
3802 esw_cfg[i].pci_func = i;
3803 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]))
3804 return QL_STATUS_INVALID_PARAM;
3805 }
3806 memcpy(buf, &esw_cfg, size);
3807
3808 return size;
3809}
3810
3811static int
3812validate_npar_config(struct qlcnic_adapter *adapter,
3813 struct qlcnic_npar_func_cfg *np_cfg, int count)
3814{
3815 u8 pci_func, i;
3816
3817 for (i = 0; i < count; i++) {
3818 pci_func = np_cfg[i].pci_func;
3819 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3820 return QL_STATUS_INVALID_PARAM;
3821
3822 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3823 return QL_STATUS_INVALID_PARAM;
3824
3825 if (!IS_VALID_BW(np_cfg[i].min_bw) ||
3826 !IS_VALID_BW(np_cfg[i].max_bw))
3827 return QL_STATUS_INVALID_PARAM;
3828 }
3829 return 0;
3830}
3831
3832static ssize_t
3833qlcnic_sysfs_write_npar_config(struct file *file, struct kobject *kobj,
3834 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3835{
3836 struct device *dev = container_of(kobj, struct device, kobj);
3837 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3838 struct qlcnic_info nic_info;
3839 struct qlcnic_npar_func_cfg *np_cfg;
3840 int i, count, rem, ret;
3841 u8 pci_func;
3842
3843 count = size / sizeof(struct qlcnic_npar_func_cfg);
3844 rem = size % sizeof(struct qlcnic_npar_func_cfg);
3845 if (rem)
3846 return QL_STATUS_INVALID_PARAM;
3847
3848 np_cfg = (struct qlcnic_npar_func_cfg *) buf;
3849 ret = validate_npar_config(adapter, np_cfg, count);
3850 if (ret)
3851 return ret;
3852
3853 for (i = 0; i < count ; i++) {
3854 pci_func = np_cfg[i].pci_func;
3855 ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
3856 if (ret)
3857 return ret;
3858 nic_info.pci_func = pci_func;
3859 nic_info.min_tx_bw = np_cfg[i].min_bw;
3860 nic_info.max_tx_bw = np_cfg[i].max_bw;
3861 ret = qlcnic_set_nic_info(adapter, &nic_info);
3862 if (ret)
3863 return ret;
3864 adapter->npars[i].min_bw = nic_info.min_tx_bw;
3865 adapter->npars[i].max_bw = nic_info.max_tx_bw;
3866 }
3867
3868 return size;
3869
3870}
3871static ssize_t
3872qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj,
3873 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3874{
3875 struct device *dev = container_of(kobj, struct device, kobj);
3876 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3877 struct qlcnic_info nic_info;
3878 struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC];
3879 int i, ret;
3880
3881 if (size != sizeof(np_cfg))
3882 return QL_STATUS_INVALID_PARAM;
3883
3884 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
3885 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3886 continue;
3887 ret = qlcnic_get_nic_info(adapter, &nic_info, i);
3888 if (ret)
3889 return ret;
3890
3891 np_cfg[i].pci_func = i;
3892 np_cfg[i].op_mode = (u8)nic_info.op_mode;
3893 np_cfg[i].port_num = nic_info.phys_port;
3894 np_cfg[i].fw_capab = nic_info.capabilities;
3895 np_cfg[i].min_bw = nic_info.min_tx_bw ;
3896 np_cfg[i].max_bw = nic_info.max_tx_bw;
3897 np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
3898 np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
3899 }
3900 memcpy(buf, &np_cfg, size);
3901 return size;
3902}
3903
3904static ssize_t
3905qlcnic_sysfs_get_port_stats(struct file *file, struct kobject *kobj,
3906 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3907{
3908 struct device *dev = container_of(kobj, struct device, kobj);
3909 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3910 struct qlcnic_esw_statistics port_stats;
3911 int ret;
3912
3913 if (size != sizeof(struct qlcnic_esw_statistics))
3914 return QL_STATUS_INVALID_PARAM;
3915
3916 if (offset >= QLCNIC_MAX_PCI_FUNC)
3917 return QL_STATUS_INVALID_PARAM;
3918
3919 memset(&port_stats, 0, size);
3920 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
3921 &port_stats.rx);
3922 if (ret)
3923 return ret;
3924
3925 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
3926 &port_stats.tx);
3927 if (ret)
3928 return ret;
3929
3930 memcpy(buf, &port_stats, size);
3931 return size;
3932}
3933
3934static ssize_t
3935qlcnic_sysfs_get_esw_stats(struct file *file, struct kobject *kobj,
3936 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3937{
3938 struct device *dev = container_of(kobj, struct device, kobj);
3939 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3940 struct qlcnic_esw_statistics esw_stats;
3941 int ret;
3942
3943 if (size != sizeof(struct qlcnic_esw_statistics))
3944 return QL_STATUS_INVALID_PARAM;
3945
3946 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
3947 return QL_STATUS_INVALID_PARAM;
3948
3949 memset(&esw_stats, 0, size);
3950 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
3951 &esw_stats.rx);
3952 if (ret)
3953 return ret;
3954
3955 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
3956 &esw_stats.tx);
3957 if (ret)
3958 return ret;
3959
3960 memcpy(buf, &esw_stats, size);
3961 return size;
3962}
3963
3964static ssize_t
3965qlcnic_sysfs_clear_esw_stats(struct file *file, struct kobject *kobj,
3966 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3967{
3968 struct device *dev = container_of(kobj, struct device, kobj);
3969 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3970 int ret;
3971
3972 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
3973 return QL_STATUS_INVALID_PARAM;
3974
3975 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
3976 QLCNIC_QUERY_RX_COUNTER);
3977 if (ret)
3978 return ret;
3979
3980 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
3981 QLCNIC_QUERY_TX_COUNTER);
3982 if (ret)
3983 return ret;
3984
3985 return size;
3986}
3987
3988static ssize_t
3989qlcnic_sysfs_clear_port_stats(struct file *file, struct kobject *kobj,
3990 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3991{
3992
3993 struct device *dev = container_of(kobj, struct device, kobj);
3994 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3995 int ret;
3996
3997 if (offset >= QLCNIC_MAX_PCI_FUNC)
3998 return QL_STATUS_INVALID_PARAM;
3999
4000 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
4001 QLCNIC_QUERY_RX_COUNTER);
4002 if (ret)
4003 return ret;
4004
4005 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
4006 QLCNIC_QUERY_TX_COUNTER);
4007 if (ret)
4008 return ret;
4009
4010 return size;
4011}
4012
4013static ssize_t
4014qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
4015 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
4016{
4017 struct device *dev = container_of(kobj, struct device, kobj);
4018 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
4019 struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
4020 struct qlcnic_pci_info *pci_info;
4021 int i, ret;
4022
4023 if (size != sizeof(pci_cfg))
4024 return QL_STATUS_INVALID_PARAM;
4025
4026 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
4027 if (!pci_info)
4028 return -ENOMEM;
4029
4030 ret = qlcnic_get_pci_info(adapter, pci_info);
4031 if (ret) {
4032 kfree(pci_info);
4033 return ret;
4034 }
4035
4036 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
4037 pci_cfg[i].pci_func = pci_info[i].id;
4038 pci_cfg[i].func_type = pci_info[i].type;
4039 pci_cfg[i].port_num = pci_info[i].default_port;
4040 pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
4041 pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
4042 memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
4043 }
4044 memcpy(buf, &pci_cfg, size);
4045 kfree(pci_info);
4046 return size;
4047}
4048static struct bin_attribute bin_attr_npar_config = {
4049 .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
4050 .size = 0,
4051 .read = qlcnic_sysfs_read_npar_config,
4052 .write = qlcnic_sysfs_write_npar_config,
4053};
4054
4055static struct bin_attribute bin_attr_pci_config = {
4056 .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)},
4057 .size = 0,
4058 .read = qlcnic_sysfs_read_pci_config,
4059 .write = NULL,
4060};
4061
4062static struct bin_attribute bin_attr_port_stats = {
4063 .attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)},
4064 .size = 0,
4065 .read = qlcnic_sysfs_get_port_stats,
4066 .write = qlcnic_sysfs_clear_port_stats,
4067};
4068
4069static struct bin_attribute bin_attr_esw_stats = {
4070 .attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)},
4071 .size = 0,
4072 .read = qlcnic_sysfs_get_esw_stats,
4073 .write = qlcnic_sysfs_clear_esw_stats,
4074};
4075
4076static struct bin_attribute bin_attr_esw_config = {
4077 .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
4078 .size = 0,
4079 .read = qlcnic_sysfs_read_esw_config,
4080 .write = qlcnic_sysfs_write_esw_config,
4081};
4082
4083static struct bin_attribute bin_attr_pm_config = {
4084 .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)},
4085 .size = 0,
4086 .read = qlcnic_sysfs_read_pm_config,
4087 .write = qlcnic_sysfs_write_pm_config,
4088};
4089
4090static void
4091qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
4092{
4093 struct device *dev = &adapter->pdev->dev;
4094
4095 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
4096 if (device_create_file(dev, &dev_attr_bridged_mode))
4097 dev_warn(dev,
4098 "failed to create bridged_mode sysfs entry\n");
4099}
4100
4101static void
4102qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
4103{
4104 struct device *dev = &adapter->pdev->dev;
4105
4106 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
4107 device_remove_file(dev, &dev_attr_bridged_mode);
4108}
4109
4110static void
4111qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
4112{
4113 struct device *dev = &adapter->pdev->dev;
4114
4115 if (device_create_bin_file(dev, &bin_attr_port_stats))
4116 dev_info(dev, "failed to create port stats sysfs entry");
4117
4118 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
4119 return;
4120 if (device_create_file(dev, &dev_attr_diag_mode))
4121 dev_info(dev, "failed to create diag_mode sysfs entry\n");
4122 if (device_create_bin_file(dev, &bin_attr_crb))
4123 dev_info(dev, "failed to create crb sysfs entry\n");
4124 if (device_create_bin_file(dev, &bin_attr_mem))
4125 dev_info(dev, "failed to create mem sysfs entry\n");
4126 if (device_create_bin_file(dev, &bin_attr_pci_config))
4127 dev_info(dev, "failed to create pci config sysfs entry");
4128 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
4129 return;
4130 if (device_create_bin_file(dev, &bin_attr_esw_config))
4131 dev_info(dev, "failed to create esw config sysfs entry");
4132 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
4133 return;
4134 if (device_create_bin_file(dev, &bin_attr_npar_config))
4135 dev_info(dev, "failed to create npar config sysfs entry");
4136 if (device_create_bin_file(dev, &bin_attr_pm_config))
4137 dev_info(dev, "failed to create pm config sysfs entry");
4138 if (device_create_bin_file(dev, &bin_attr_esw_stats))
4139 dev_info(dev, "failed to create eswitch stats sysfs entry");
4140}
4141
4142static void
4143qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
4144{
4145 struct device *dev = &adapter->pdev->dev;
4146
4147 device_remove_bin_file(dev, &bin_attr_port_stats);
4148
4149 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
4150 return;
4151 device_remove_file(dev, &dev_attr_diag_mode);
4152 device_remove_bin_file(dev, &bin_attr_crb);
4153 device_remove_bin_file(dev, &bin_attr_mem);
4154 device_remove_bin_file(dev, &bin_attr_pci_config);
4155 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
4156 return;
4157 device_remove_bin_file(dev, &bin_attr_esw_config);
4158 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
4159 return;
4160 device_remove_bin_file(dev, &bin_attr_npar_config);
4161 device_remove_bin_file(dev, &bin_attr_pm_config);
4162 device_remove_bin_file(dev, &bin_attr_esw_stats);
4163}
4164
4165#ifdef CONFIG_INET
4166
4167#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
4168
4169static void
4170qlcnic_config_indev_addr(struct qlcnic_adapter *adapter,
4171 struct net_device *dev, unsigned long event)
4172{
4173 struct in_device *indev;
4174
4175 indev = in_dev_get(dev);
4176 if (!indev)
4177 return;
4178
4179 for_ifa(indev) {
4180 switch (event) {
4181 case NETDEV_UP:
4182 qlcnic_config_ipaddr(adapter,
4183 ifa->ifa_address, QLCNIC_IP_UP);
4184 break;
4185 case NETDEV_DOWN:
4186 qlcnic_config_ipaddr(adapter,
4187 ifa->ifa_address, QLCNIC_IP_DOWN);
4188 break;
4189 default:
4190 break;
4191 }
4192 } endfor_ifa(indev);
4193
4194 in_dev_put(indev);
4195}
4196
4197static void
4198qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
4199{
4200 struct qlcnic_adapter *adapter = netdev_priv(netdev);
4201 struct net_device *dev;
4202 u16 vid;
4203
4204 qlcnic_config_indev_addr(adapter, netdev, event);
4205
4206 for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) {
4207 dev = __vlan_find_dev_deep(netdev, vid);
4208 if (!dev)
4209 continue;
4210 qlcnic_config_indev_addr(adapter, dev, event);
4211 }
4212}
4213
4214static int qlcnic_netdev_event(struct notifier_block *this,
4215 unsigned long event, void *ptr)
4216{
4217 struct qlcnic_adapter *adapter;
4218 struct net_device *dev = (struct net_device *)ptr;
4219
4220recheck:
4221 if (dev == NULL)
4222 goto done;
4223
4224 if (dev->priv_flags & IFF_802_1Q_VLAN) {
4225 dev = vlan_dev_real_dev(dev);
4226 goto recheck;
4227 }
4228
4229 if (!is_qlcnic_netdev(dev))
4230 goto done;
4231
4232 adapter = netdev_priv(dev);
4233
4234 if (!adapter)
4235 goto done;
4236
4237 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
4238 goto done;
4239
4240 qlcnic_config_indev_addr(adapter, dev, event);
4241done:
4242 return NOTIFY_DONE;
4243}
4244
4245static int
4246qlcnic_inetaddr_event(struct notifier_block *this,
4247 unsigned long event, void *ptr)
4248{
4249 struct qlcnic_adapter *adapter;
4250 struct net_device *dev;
4251
4252 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
4253
4254 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
4255
4256recheck:
4257 if (dev == NULL)
4258 goto done;
4259
4260 if (dev->priv_flags & IFF_802_1Q_VLAN) {
4261 dev = vlan_dev_real_dev(dev);
4262 goto recheck;
4263 }
4264
4265 if (!is_qlcnic_netdev(dev))
4266 goto done;
4267
4268 adapter = netdev_priv(dev);
4269
4270 if (!adapter)
4271 goto done;
4272
4273 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
4274 goto done;
4275
4276 switch (event) {
4277 case NETDEV_UP:
4278 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
4279 break;
4280 case NETDEV_DOWN:
4281 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
4282 break;
4283 default:
4284 break;
4285 }
4286
4287done:
4288 return NOTIFY_DONE;
4289}
4290
4291static struct notifier_block qlcnic_netdev_cb = {
4292 .notifier_call = qlcnic_netdev_event,
4293};
4294
4295static struct notifier_block qlcnic_inetaddr_cb = {
4296 .notifier_call = qlcnic_inetaddr_event,
4297};
4298#else
4299static void
4300qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event)
4301{ }
4302#endif
4303static struct pci_error_handlers qlcnic_err_handler = {
4304 .error_detected = qlcnic_io_error_detected,
4305 .slot_reset = qlcnic_io_slot_reset,
4306 .resume = qlcnic_io_resume,
4307};
4308
4309static struct pci_driver qlcnic_driver = {
4310 .name = qlcnic_driver_name,
4311 .id_table = qlcnic_pci_tbl,
4312 .probe = qlcnic_probe,
4313 .remove = __devexit_p(qlcnic_remove),
4314#ifdef CONFIG_PM
4315 .suspend = qlcnic_suspend,
4316 .resume = qlcnic_resume,
4317#endif
4318 .shutdown = qlcnic_shutdown,
4319 .err_handler = &qlcnic_err_handler
4320
4321};
4322
4323static int __init qlcnic_init_module(void)
4324{
4325 int ret;
4326
4327 printk(KERN_INFO "%s\n", qlcnic_driver_string);
4328
4329 qlcnic_wq = create_singlethread_workqueue("qlcnic");
4330 if (qlcnic_wq == NULL) {
4331 printk(KERN_ERR "qlcnic: cannot create workqueue\n");
4332 return -ENOMEM;
4333 }
4334
4335#ifdef CONFIG_INET
4336 register_netdevice_notifier(&qlcnic_netdev_cb);
4337 register_inetaddr_notifier(&qlcnic_inetaddr_cb);
4338#endif
4339
4340 ret = pci_register_driver(&qlcnic_driver);
4341 if (ret) {
4342#ifdef CONFIG_INET
4343 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
4344 unregister_netdevice_notifier(&qlcnic_netdev_cb);
4345#endif
4346 destroy_workqueue(qlcnic_wq);
4347 }
4348
4349 return ret;
4350}
4351
4352module_init(qlcnic_init_module);
4353
4354static void __exit qlcnic_exit_module(void)
4355{
4356
4357 pci_unregister_driver(&qlcnic_driver);
4358
4359#ifdef CONFIG_INET
4360 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
4361 unregister_netdevice_notifier(&qlcnic_netdev_cb);
4362#endif
4363 destroy_workqueue(qlcnic_wq);
4364}
4365
4366module_exit(qlcnic_exit_module);