aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/qlcnic
diff options
context:
space:
mode:
authorAmit Kumar Salecha <amit.salecha@qlogic.com>2010-01-12 19:37:25 -0500
committerDavid S. Miller <davem@davemloft.net>2010-01-16 04:17:15 -0500
commitaf19b49152bdb68fda894183e88096d6d1aa5c3d (patch)
tree811e6ecedf58c3356ac721a18d9a5270e9b2c40c /drivers/net/qlcnic
parent6eb3a8553345ba2b4efd5390709e158289b9ece4 (diff)
qlcnic: Qlogic ethernet driver for CNA devices
o 1G/10G Ethernet Driver for Qlgic QLE8240 and QLE8242 CNA devices. Signed-off-by: Amit Kumar Salecha <amit.salecha@qlogic.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/qlcnic')
-rw-r--r--drivers/net/qlcnic/Makefile8
-rw-r--r--drivers/net/qlcnic/qlcnic.h1106
-rw-r--r--drivers/net/qlcnic/qlcnic_ctx.c536
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c870
-rw-r--r--drivers/net/qlcnic/qlcnic_hdr.h937
-rw-r--r--drivers/net/qlcnic/qlcnic_hw.c1201
-rw-r--r--drivers/net/qlcnic/qlcnic_init.c1466
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c2604
8 files changed, 8728 insertions, 0 deletions
diff --git a/drivers/net/qlcnic/Makefile b/drivers/net/qlcnic/Makefile
new file mode 100644
index 000000000000..ddba83ef3f44
--- /dev/null
+++ b/drivers/net/qlcnic/Makefile
@@ -0,0 +1,8 @@
1#
2# Makefile for Qlogic 1G/10G Ethernet Driver for CNA devices
3#
4
5obj-$(CONFIG_QLCNIC) := qlcnic.o
6
7qlcnic-y := qlcnic_hw.o qlcnic_main.o qlcnic_init.o \
8 qlcnic_ethtool.o qlcnic_ctx.o
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
new file mode 100644
index 000000000000..abec46846539
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -0,0 +1,1106 @@
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
25#ifndef _QLCNIC_H_
26#define _QLCNIC_H_
27
28#include <linux/module.h>
29#include <linux/kernel.h>
30#include <linux/types.h>
31#include <linux/ioport.h>
32#include <linux/pci.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/ip.h>
36#include <linux/in.h>
37#include <linux/tcp.h>
38#include <linux/skbuff.h>
39#include <linux/firmware.h>
40
41#include <linux/ethtool.h>
42#include <linux/mii.h>
43#include <linux/timer.h>
44
45#include <linux/vmalloc.h>
46
47#include <linux/io.h>
48#include <asm/byteorder.h>
49
50#include "qlcnic_hdr.h"
51
52#define _QLCNIC_LINUX_MAJOR 5
53#define _QLCNIC_LINUX_MINOR 0
54#define _QLCNIC_LINUX_SUBVERSION 0
55#define QLCNIC_LINUX_VERSIONID "5.0.0"
56
57#define QLCNIC_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
58#define _major(v) (((v) >> 24) & 0xff)
59#define _minor(v) (((v) >> 16) & 0xff)
60#define _build(v) ((v) & 0xffff)
61
62/* version in image has weird encoding:
63 * 7:0 - major
64 * 15:8 - minor
65 * 31:16 - build (little endian)
66 */
67#define QLCNIC_DECODE_VERSION(v) \
68 QLCNIC_VERSION_CODE(((v) & 0xff), (((v) >> 8) & 0xff), ((v) >> 16))
69
70#define QLCNIC_NUM_FLASH_SECTORS (64)
71#define QLCNIC_FLASH_SECTOR_SIZE (64 * 1024)
72#define QLCNIC_FLASH_TOTAL_SIZE (QLCNIC_NUM_FLASH_SECTORS \
73 * QLCNIC_FLASH_SECTOR_SIZE)
74
75#define RCV_DESC_RINGSIZE(rds_ring) \
76 (sizeof(struct rcv_desc) * (rds_ring)->num_desc)
77#define RCV_BUFF_RINGSIZE(rds_ring) \
78 (sizeof(struct qlcnic_rx_buffer) * rds_ring->num_desc)
79#define STATUS_DESC_RINGSIZE(sds_ring) \
80 (sizeof(struct status_desc) * (sds_ring)->num_desc)
81#define TX_BUFF_RINGSIZE(tx_ring) \
82 (sizeof(struct qlcnic_cmd_buffer) * tx_ring->num_desc)
83#define TX_DESC_RINGSIZE(tx_ring) \
84 (sizeof(struct cmd_desc_type0) * tx_ring->num_desc)
85
86#define QLCNIC_P3P_A0 0x50
87
88#define QLCNIC_IS_REVISION_P3P(REVISION) (REVISION >= QLCNIC_P3P_A0)
89
90#define FIRST_PAGE_GROUP_START 0
91#define FIRST_PAGE_GROUP_END 0x100000
92
93#define P3_MAX_MTU (9600)
94#define QLCNIC_MAX_ETHERHDR 32 /* This contains some padding */
95
96#define QLCNIC_P3_RX_BUF_MAX_LEN (QLCNIC_MAX_ETHERHDR + ETH_DATA_LEN)
97#define QLCNIC_P3_RX_JUMBO_BUF_MAX_LEN (QLCNIC_MAX_ETHERHDR + P3_MAX_MTU)
98#define QLCNIC_CT_DEFAULT_RX_BUF_LEN 2048
99#define QLCNIC_LRO_BUFFER_EXTRA 2048
100
101#define QLCNIC_RX_LRO_BUFFER_LENGTH (8060)
102
103/* Opcodes to be used with the commands */
104#define TX_ETHER_PKT 0x01
105#define TX_TCP_PKT 0x02
106#define TX_UDP_PKT 0x03
107#define TX_IP_PKT 0x04
108#define TX_TCP_LSO 0x05
109#define TX_TCP_LSO6 0x06
110#define TX_IPSEC 0x07
111#define TX_IPSEC_CMD 0x0a
112#define TX_TCPV6_PKT 0x0b
113#define TX_UDPV6_PKT 0x0c
114
115/* Tx defines */
116#define MAX_BUFFERS_PER_CMD 32
117#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + 4)
118#define QLCNIC_MAX_TX_TIMEOUTS 2
119
120/*
121 * Following are the states of the Phantom. Phantom will set them and
122 * Host will read to check if the fields are correct.
123 */
124#define PHAN_INITIALIZE_FAILED 0xffff
125#define PHAN_INITIALIZE_COMPLETE 0xff01
126
127/* Host writes the following to notify that it has done the init-handshake */
128#define PHAN_INITIALIZE_ACK 0xf00f
129#define PHAN_PEG_RCV_INITIALIZED 0xff01
130
131#define NUM_RCV_DESC_RINGS 3
132#define NUM_STS_DESC_RINGS 4
133
134#define RCV_RING_NORMAL 0
135#define RCV_RING_JUMBO 1
136#define RCV_RING_LRO 2
137
138#define MIN_CMD_DESCRIPTORS 64
139#define MIN_RCV_DESCRIPTORS 64
140#define MIN_JUMBO_DESCRIPTORS 32
141
142#define MAX_CMD_DESCRIPTORS 1024
143#define MAX_RCV_DESCRIPTORS_1G 4096
144#define MAX_RCV_DESCRIPTORS_10G 8192
145#define MAX_JUMBO_RCV_DESCRIPTORS_1G 512
146#define MAX_JUMBO_RCV_DESCRIPTORS_10G 1024
147#define MAX_LRO_RCV_DESCRIPTORS 8
148
149#define DEFAULT_RCV_DESCRIPTORS_1G 2048
150#define DEFAULT_RCV_DESCRIPTORS_10G 4096
151
152#define get_next_index(index, length) \
153 (((index) + 1) & ((length) - 1))
154
155#define MPORT_MULTI_FUNCTION_MODE 0x2222
156
157/*
158 * Following data structures describe the descriptors that will be used.
159 * Added fileds of tcpHdrSize and ipHdrSize, The driver needs to do it only when
160 * we are doing LSO (above the 1500 size packet) only.
161 */
162
163#define FLAGS_VLAN_TAGGED 0x10
164#define FLAGS_VLAN_OOB 0x40
165
166#define qlcnic_set_tx_vlan_tci(cmd_desc, v) \
167 (cmd_desc)->vlan_TCI = cpu_to_le16(v);
168#define qlcnic_set_cmd_desc_port(cmd_desc, var) \
169 ((cmd_desc)->port_ctxid |= ((var) & 0x0F))
170#define qlcnic_set_cmd_desc_ctxid(cmd_desc, var) \
171 ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
172
173#define qlcnic_set_tx_port(_desc, _port) \
174 ((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0))
175
176#define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \
177 ((_desc)->flags_opcode = \
178 cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)))
179
180#define qlcnic_set_tx_frags_len(_desc, _frags, _len) \
181 ((_desc)->nfrags__length = \
182 cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8)))
183
184struct cmd_desc_type0 {
185 u8 tcp_hdr_offset; /* For LSO only */
186 u8 ip_hdr_offset; /* For LSO only */
187 __le16 flags_opcode; /* 15:13 unused, 12:7 opcode, 6:0 flags */
188 __le32 nfrags__length; /* 31:8 total len, 7:0 frag count */
189
190 __le64 addr_buffer2;
191
192 __le16 reference_handle;
193 __le16 mss;
194 u8 port_ctxid; /* 7:4 ctxid 3:0 port */
195 u8 total_hdr_length; /* LSO only : MAC+IP+TCP Hdr size */
196 __le16 conn_id; /* IPSec offoad only */
197
198 __le64 addr_buffer3;
199 __le64 addr_buffer1;
200
201 __le16 buffer_length[4];
202
203 __le64 addr_buffer4;
204
205 __le32 reserved2;
206 __le16 reserved;
207 __le16 vlan_TCI;
208
209} __attribute__ ((aligned(64)));
210
211/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */
212struct rcv_desc {
213 __le16 reference_handle;
214 __le16 reserved;
215 __le32 buffer_length; /* allocated buffer length (usually 2K) */
216 __le64 addr_buffer;
217};
218
219/* opcode field in status_desc */
220#define QLCNIC_SYN_OFFLOAD 0x03
221#define QLCNIC_RXPKT_DESC 0x04
222#define QLCNIC_OLD_RXPKT_DESC 0x3f
223#define QLCNIC_RESPONSE_DESC 0x05
224#define QLCNIC_LRO_DESC 0x12
225
226/* for status field in status_desc */
227#define STATUS_CKSUM_OK (2)
228
229/* owner bits of status_desc */
230#define STATUS_OWNER_HOST (0x1ULL << 56)
231#define STATUS_OWNER_PHANTOM (0x2ULL << 56)
232
233/* Status descriptor:
234 0-3 port, 4-7 status, 8-11 type, 12-27 total_length
235 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
236 53-55 desc_cnt, 56-57 owner, 58-63 opcode
237 */
238#define qlcnic_get_sts_port(sts_data) \
239 ((sts_data) & 0x0F)
240#define qlcnic_get_sts_status(sts_data) \
241 (((sts_data) >> 4) & 0x0F)
242#define qlcnic_get_sts_type(sts_data) \
243 (((sts_data) >> 8) & 0x0F)
244#define qlcnic_get_sts_totallength(sts_data) \
245 (((sts_data) >> 12) & 0xFFFF)
246#define qlcnic_get_sts_refhandle(sts_data) \
247 (((sts_data) >> 28) & 0xFFFF)
248#define qlcnic_get_sts_prot(sts_data) \
249 (((sts_data) >> 44) & 0x0F)
250#define qlcnic_get_sts_pkt_offset(sts_data) \
251 (((sts_data) >> 48) & 0x1F)
252#define qlcnic_get_sts_desc_cnt(sts_data) \
253 (((sts_data) >> 53) & 0x7)
254#define qlcnic_get_sts_opcode(sts_data) \
255 (((sts_data) >> 58) & 0x03F)
256
257#define qlcnic_get_lro_sts_refhandle(sts_data) \
258 ((sts_data) & 0x0FFFF)
259#define qlcnic_get_lro_sts_length(sts_data) \
260 (((sts_data) >> 16) & 0x0FFFF)
261#define qlcnic_get_lro_sts_l2_hdr_offset(sts_data) \
262 (((sts_data) >> 32) & 0x0FF)
263#define qlcnic_get_lro_sts_l4_hdr_offset(sts_data) \
264 (((sts_data) >> 40) & 0x0FF)
265#define qlcnic_get_lro_sts_timestamp(sts_data) \
266 (((sts_data) >> 48) & 0x1)
267#define qlcnic_get_lro_sts_type(sts_data) \
268 (((sts_data) >> 49) & 0x7)
269#define qlcnic_get_lro_sts_push_flag(sts_data) \
270 (((sts_data) >> 52) & 0x1)
271#define qlcnic_get_lro_sts_seq_number(sts_data) \
272 ((sts_data) & 0x0FFFFFFFF)
273
274
275struct status_desc {
276 __le64 status_desc_data[2];
277} __attribute__ ((aligned(16)));
278
279/* UNIFIED ROMIMAGE */
280#define QLCNIC_UNI_FW_MIN_SIZE 0xc8000
281#define QLCNIC_UNI_DIR_SECT_PRODUCT_TBL 0x0
282#define QLCNIC_UNI_DIR_SECT_BOOTLD 0x6
283#define QLCNIC_UNI_DIR_SECT_FW 0x7
284
285/*Offsets */
286#define QLCNIC_UNI_CHIP_REV_OFF 10
287#define QLCNIC_UNI_FLAGS_OFF 11
288#define QLCNIC_UNI_BIOS_VERSION_OFF 12
289#define QLCNIC_UNI_BOOTLD_IDX_OFF 27
290#define QLCNIC_UNI_FIRMWARE_IDX_OFF 29
291
292struct uni_table_desc{
293 u32 findex;
294 u32 num_entries;
295 u32 entry_size;
296 u32 reserved[5];
297};
298
299struct uni_data_desc{
300 u32 findex;
301 u32 size;
302 u32 reserved[5];
303};
304
305/* Magic number to let user know flash is programmed */
306#define QLCNIC_BDINFO_MAGIC 0x12345678
307
308#define QLCNIC_BRDTYPE_P3_REF_QG 0x0021
309#define QLCNIC_BRDTYPE_P3_HMEZ 0x0022
310#define QLCNIC_BRDTYPE_P3_10G_CX4_LP 0x0023
311#define QLCNIC_BRDTYPE_P3_4_GB 0x0024
312#define QLCNIC_BRDTYPE_P3_IMEZ 0x0025
313#define QLCNIC_BRDTYPE_P3_10G_SFP_PLUS 0x0026
314#define QLCNIC_BRDTYPE_P3_10000_BASE_T 0x0027
315#define QLCNIC_BRDTYPE_P3_XG_LOM 0x0028
316#define QLCNIC_BRDTYPE_P3_4_GB_MM 0x0029
317#define QLCNIC_BRDTYPE_P3_10G_SFP_CT 0x002a
318#define QLCNIC_BRDTYPE_P3_10G_SFP_QT 0x002b
319#define QLCNIC_BRDTYPE_P3_10G_CX4 0x0031
320#define QLCNIC_BRDTYPE_P3_10G_XFP 0x0032
321#define QLCNIC_BRDTYPE_P3_10G_TP 0x0080
322
323/* Flash memory map */
324#define QLCNIC_BRDCFG_START 0x4000 /* board config */
325#define QLCNIC_BOOTLD_START 0x10000 /* bootld */
326#define QLCNIC_IMAGE_START 0x43000 /* compressed image */
327#define QLCNIC_USER_START 0x3E8000 /* Firmare info */
328
329#define QLCNIC_FW_VERSION_OFFSET (QLCNIC_USER_START+0x408)
330#define QLCNIC_FW_SIZE_OFFSET (QLCNIC_USER_START+0x40c)
331#define QLCNIC_FW_SERIAL_NUM_OFFSET (QLCNIC_USER_START+0x81c)
332#define QLCNIC_BIOS_VERSION_OFFSET (QLCNIC_USER_START+0x83c)
333
334#define QLCNIC_BRDTYPE_OFFSET (QLCNIC_BRDCFG_START+0x8)
335#define QLCNIC_FW_MAGIC_OFFSET (QLCNIC_BRDCFG_START+0x128)
336
337#define QLCNIC_FW_MIN_SIZE (0x3fffff)
338#define QLCNIC_UNIFIED_ROMIMAGE 0
339#define QLCNIC_FLASH_ROMIMAGE 1
340#define QLCNIC_UNKNOWN_ROMIMAGE 0xff
341
342#define QLCNIC_UNIFIED_ROMIMAGE_NAME "phanfw.bin"
343#define QLCNIC_FLASH_ROMIMAGE_NAME "flash"
344
345extern char qlcnic_driver_name[];
346
347/* Number of status descriptors to handle per interrupt */
348#define MAX_STATUS_HANDLE (64)
349
350/*
351 * qlcnic_skb_frag{} is to contain mapping info for each SG list. This
352 * has to be freed when DMA is complete. This is part of qlcnic_tx_buffer{}.
353 */
354struct qlcnic_skb_frag {
355 u64 dma;
356 u64 length;
357};
358
359struct qlcnic_recv_crb {
360 u32 crb_rcv_producer[NUM_RCV_DESC_RINGS];
361 u32 crb_sts_consumer[NUM_STS_DESC_RINGS];
362 u32 sw_int_mask[NUM_STS_DESC_RINGS];
363};
364
365/* Following defines are for the state of the buffers */
366#define QLCNIC_BUFFER_FREE 0
367#define QLCNIC_BUFFER_BUSY 1
368
369/*
370 * There will be one qlcnic_buffer per skb packet. These will be
371 * used to save the dma info for pci_unmap_page()
372 */
373struct qlcnic_cmd_buffer {
374 struct sk_buff *skb;
375 struct qlcnic_skb_frag frag_array[MAX_BUFFERS_PER_CMD + 1];
376 u32 frag_count;
377};
378
379/* In rx_buffer, we do not need multiple fragments as is a single buffer */
380struct qlcnic_rx_buffer {
381 struct list_head list;
382 struct sk_buff *skb;
383 u64 dma;
384 u16 ref_handle;
385 u16 state;
386};
387
388/* Board types */
389#define QLCNIC_GBE 0x01
390#define QLCNIC_XGBE 0x02
391
392/*
393 * One hardware_context{} per adapter
394 * contains interrupt info as well shared hardware info.
395 */
396struct qlcnic_hardware_context {
397 void __iomem *pci_base0;
398 void __iomem *ocm_win_crb;
399
400 unsigned long pci_len0;
401
402 u32 ocm_win;
403 u32 crb_win;
404
405 rwlock_t crb_lock;
406 struct mutex mem_lock;
407
408 u8 cut_through;
409 u8 revision_id;
410 u8 pci_func;
411 u8 linkup;
412 u16 port_type;
413 u16 board_type;
414};
415
416struct qlcnic_adapter_stats {
417 u64 xmitcalled;
418 u64 xmitfinished;
419 u64 rxdropped;
420 u64 txdropped;
421 u64 csummed;
422 u64 rx_pkts;
423 u64 lro_pkts;
424 u64 rxbytes;
425 u64 txbytes;
426};
427
428/*
429 * Rcv Descriptor Context. One such per Rcv Descriptor. There may
430 * be one Rcv Descriptor for normal packets, one for jumbo and may be others.
431 */
432struct qlcnic_host_rds_ring {
433 u32 producer;
434 u32 num_desc;
435 u32 dma_size;
436 u32 skb_size;
437 u32 flags;
438 void __iomem *crb_rcv_producer;
439 struct rcv_desc *desc_head;
440 struct qlcnic_rx_buffer *rx_buf_arr;
441 struct list_head free_list;
442 spinlock_t lock;
443 dma_addr_t phys_addr;
444};
445
446struct qlcnic_host_sds_ring {
447 u32 consumer;
448 u32 num_desc;
449 void __iomem *crb_sts_consumer;
450 void __iomem *crb_intr_mask;
451
452 struct status_desc *desc_head;
453 struct qlcnic_adapter *adapter;
454 struct napi_struct napi;
455 struct list_head free_list[NUM_RCV_DESC_RINGS];
456
457 int irq;
458
459 dma_addr_t phys_addr;
460 char name[IFNAMSIZ+4];
461};
462
463struct qlcnic_host_tx_ring {
464 u32 producer;
465 __le32 *hw_consumer;
466 u32 sw_consumer;
467 void __iomem *crb_cmd_producer;
468 u32 num_desc;
469
470 struct netdev_queue *txq;
471
472 struct qlcnic_cmd_buffer *cmd_buf_arr;
473 struct cmd_desc_type0 *desc_head;
474 dma_addr_t phys_addr;
475 dma_addr_t hw_cons_phys_addr;
476};
477
478/*
479 * Receive context. There is one such structure per instance of the
480 * receive processing. Any state information that is relevant to
481 * the receive, and is must be in this structure. The global data may be
482 * present elsewhere.
483 */
484struct qlcnic_recv_context {
485 u32 state;
486 u16 context_id;
487 u16 virt_port;
488
489 struct qlcnic_host_rds_ring *rds_rings;
490 struct qlcnic_host_sds_ring *sds_rings;
491};
492
493/* HW context creation */
494
495#define QLCNIC_OS_CRB_RETRY_COUNT 4000
496#define QLCNIC_CDRP_SIGNATURE_MAKE(pcifn, version) \
497 (((pcifn) & 0xff) | (((version) & 0xff) << 8) | (0xcafe << 16))
498
499#define QLCNIC_CDRP_CMD_BIT 0x80000000
500
501/*
502 * All responses must have the QLCNIC_CDRP_CMD_BIT cleared
503 * in the crb QLCNIC_CDRP_CRB_OFFSET.
504 */
505#define QLCNIC_CDRP_FORM_RSP(rsp) (rsp)
506#define QLCNIC_CDRP_IS_RSP(rsp) (((rsp) & QLCNIC_CDRP_CMD_BIT) == 0)
507
508#define QLCNIC_CDRP_RSP_OK 0x00000001
509#define QLCNIC_CDRP_RSP_FAIL 0x00000002
510#define QLCNIC_CDRP_RSP_TIMEOUT 0x00000003
511
512/*
513 * All commands must have the QLCNIC_CDRP_CMD_BIT set in
514 * the crb QLCNIC_CDRP_CRB_OFFSET.
515 */
516#define QLCNIC_CDRP_FORM_CMD(cmd) (QLCNIC_CDRP_CMD_BIT | (cmd))
517#define QLCNIC_CDRP_IS_CMD(cmd) (((cmd) & QLCNIC_CDRP_CMD_BIT) != 0)
518
519#define QLCNIC_CDRP_CMD_SUBMIT_CAPABILITIES 0x00000001
520#define QLCNIC_CDRP_CMD_READ_MAX_RDS_PER_CTX 0x00000002
521#define QLCNIC_CDRP_CMD_READ_MAX_SDS_PER_CTX 0x00000003
522#define QLCNIC_CDRP_CMD_READ_MAX_RULES_PER_CTX 0x00000004
523#define QLCNIC_CDRP_CMD_READ_MAX_RX_CTX 0x00000005
524#define QLCNIC_CDRP_CMD_READ_MAX_TX_CTX 0x00000006
525#define QLCNIC_CDRP_CMD_CREATE_RX_CTX 0x00000007
526#define QLCNIC_CDRP_CMD_DESTROY_RX_CTX 0x00000008
527#define QLCNIC_CDRP_CMD_CREATE_TX_CTX 0x00000009
528#define QLCNIC_CDRP_CMD_DESTROY_TX_CTX 0x0000000a
529#define QLCNIC_CDRP_CMD_SETUP_STATISTICS 0x0000000e
530#define QLCNIC_CDRP_CMD_GET_STATISTICS 0x0000000f
531#define QLCNIC_CDRP_CMD_DELETE_STATISTICS 0x00000010
532#define QLCNIC_CDRP_CMD_SET_MTU 0x00000012
533#define QLCNIC_CDRP_CMD_READ_PHY 0x00000013
534#define QLCNIC_CDRP_CMD_WRITE_PHY 0x00000014
535#define QLCNIC_CDRP_CMD_READ_HW_REG 0x00000015
536#define QLCNIC_CDRP_CMD_GET_FLOW_CTL 0x00000016
537#define QLCNIC_CDRP_CMD_SET_FLOW_CTL 0x00000017
538#define QLCNIC_CDRP_CMD_READ_MAX_MTU 0x00000018
539#define QLCNIC_CDRP_CMD_READ_MAX_LRO 0x00000019
540#define QLCNIC_CDRP_CMD_CONFIGURE_TOE 0x0000001a
541#define QLCNIC_CDRP_CMD_FUNC_ATTRIB 0x0000001b
542#define QLCNIC_CDRP_CMD_READ_PEXQ_PARAMETERS 0x0000001c
543#define QLCNIC_CDRP_CMD_GET_LIC_CAPABILITIES 0x0000001d
544#define QLCNIC_CDRP_CMD_READ_MAX_LRO_PER_BOARD 0x0000001e
545#define QLCNIC_CDRP_CMD_MAX 0x0000001f
546
547#define QLCNIC_RCODE_SUCCESS 0
548#define QLCNIC_RCODE_TIMEOUT 17
549#define QLCNIC_DESTROY_CTX_RESET 0
550
551/*
552 * Capabilities Announced
553 */
554#define QLCNIC_CAP0_LEGACY_CONTEXT (1)
555#define QLCNIC_CAP0_LEGACY_MN (1 << 2)
556#define QLCNIC_CAP0_LSO (1 << 6)
557#define QLCNIC_CAP0_JUMBO_CONTIGUOUS (1 << 7)
558#define QLCNIC_CAP0_LRO_CONTIGUOUS (1 << 8)
559
560/*
561 * Context state
562 */
563#define QLCNIC_HOST_CTX_STATE_ACTIVE 2
564
565/*
566 * Rx context
567 */
568
569struct qlcnic_hostrq_sds_ring {
570 __le64 host_phys_addr; /* Ring base addr */
571 __le32 ring_size; /* Ring entries */
572 __le16 msi_index;
573 __le16 rsvd; /* Padding */
574};
575
576struct qlcnic_hostrq_rds_ring {
577 __le64 host_phys_addr; /* Ring base addr */
578 __le64 buff_size; /* Packet buffer size */
579 __le32 ring_size; /* Ring entries */
580 __le32 ring_kind; /* Class of ring */
581};
582
583struct qlcnic_hostrq_rx_ctx {
584 __le64 host_rsp_dma_addr; /* Response dma'd here */
585 __le32 capabilities[4]; /* Flag bit vector */
586 __le32 host_int_crb_mode; /* Interrupt crb usage */
587 __le32 host_rds_crb_mode; /* RDS crb usage */
588 /* These ring offsets are relative to data[0] below */
589 __le32 rds_ring_offset; /* Offset to RDS config */
590 __le32 sds_ring_offset; /* Offset to SDS config */
591 __le16 num_rds_rings; /* Count of RDS rings */
592 __le16 num_sds_rings; /* Count of SDS rings */
593 __le16 rsvd1; /* Padding */
594 __le16 rsvd2; /* Padding */
595 u8 reserved[128]; /* reserve space for future expansion*/
596 /* MUST BE 64-bit aligned.
597 The following is packed:
598 - N hostrq_rds_rings
599 - N hostrq_sds_rings */
600 char data[0];
601};
602
603struct qlcnic_cardrsp_rds_ring{
604 __le32 host_producer_crb; /* Crb to use */
605 __le32 rsvd1; /* Padding */
606};
607
608struct qlcnic_cardrsp_sds_ring {
609 __le32 host_consumer_crb; /* Crb to use */
610 __le32 interrupt_crb; /* Crb to use */
611};
612
613struct qlcnic_cardrsp_rx_ctx {
614 /* These ring offsets are relative to data[0] below */
615 __le32 rds_ring_offset; /* Offset to RDS config */
616 __le32 sds_ring_offset; /* Offset to SDS config */
617 __le32 host_ctx_state; /* Starting State */
618 __le32 num_fn_per_port; /* How many PCI fn share the port */
619 __le16 num_rds_rings; /* Count of RDS rings */
620 __le16 num_sds_rings; /* Count of SDS rings */
621 __le16 context_id; /* Handle for context */
622 u8 phys_port; /* Physical id of port */
623 u8 virt_port; /* Virtual/Logical id of port */
624 u8 reserved[128]; /* save space for future expansion */
625 /* MUST BE 64-bit aligned.
626 The following is packed:
627 - N cardrsp_rds_rings
628 - N cardrs_sds_rings */
629 char data[0];
630};
631
632#define SIZEOF_HOSTRQ_RX(HOSTRQ_RX, rds_rings, sds_rings) \
633 (sizeof(HOSTRQ_RX) + \
634 (rds_rings)*(sizeof(struct qlcnic_hostrq_rds_ring)) + \
635 (sds_rings)*(sizeof(struct qlcnic_hostrq_sds_ring)))
636
637#define SIZEOF_CARDRSP_RX(CARDRSP_RX, rds_rings, sds_rings) \
638 (sizeof(CARDRSP_RX) + \
639 (rds_rings)*(sizeof(struct qlcnic_cardrsp_rds_ring)) + \
640 (sds_rings)*(sizeof(struct qlcnic_cardrsp_sds_ring)))
641
642/*
643 * Tx context
644 */
645
646struct qlcnic_hostrq_cds_ring {
647 __le64 host_phys_addr; /* Ring base addr */
648 __le32 ring_size; /* Ring entries */
649 __le32 rsvd; /* Padding */
650};
651
652struct qlcnic_hostrq_tx_ctx {
653 __le64 host_rsp_dma_addr; /* Response dma'd here */
654 __le64 cmd_cons_dma_addr; /* */
655 __le64 dummy_dma_addr; /* */
656 __le32 capabilities[4]; /* Flag bit vector */
657 __le32 host_int_crb_mode; /* Interrupt crb usage */
658 __le32 rsvd1; /* Padding */
659 __le16 rsvd2; /* Padding */
660 __le16 interrupt_ctl;
661 __le16 msi_index;
662 __le16 rsvd3; /* Padding */
663 struct qlcnic_hostrq_cds_ring cds_ring; /* Desc of cds ring */
664 u8 reserved[128]; /* future expansion */
665};
666
667struct qlcnic_cardrsp_cds_ring {
668 __le32 host_producer_crb; /* Crb to use */
669 __le32 interrupt_crb; /* Crb to use */
670};
671
672struct qlcnic_cardrsp_tx_ctx {
673 __le32 host_ctx_state; /* Starting state */
674 __le16 context_id; /* Handle for context */
675 u8 phys_port; /* Physical id of port */
676 u8 virt_port; /* Virtual/Logical id of port */
677 struct qlcnic_cardrsp_cds_ring cds_ring; /* Card cds settings */
678 u8 reserved[128]; /* future expansion */
679};
680
681#define SIZEOF_HOSTRQ_TX(HOSTRQ_TX) (sizeof(HOSTRQ_TX))
682#define SIZEOF_CARDRSP_TX(CARDRSP_TX) (sizeof(CARDRSP_TX))
683
684/* CRB */
685
686#define QLCNIC_HOST_RDS_CRB_MODE_UNIQUE 0
687#define QLCNIC_HOST_RDS_CRB_MODE_SHARED 1
688#define QLCNIC_HOST_RDS_CRB_MODE_CUSTOM 2
689#define QLCNIC_HOST_RDS_CRB_MODE_MAX 3
690
691#define QLCNIC_HOST_INT_CRB_MODE_UNIQUE 0
692#define QLCNIC_HOST_INT_CRB_MODE_SHARED 1
693#define QLCNIC_HOST_INT_CRB_MODE_NORX 2
694#define QLCNIC_HOST_INT_CRB_MODE_NOTX 3
695#define QLCNIC_HOST_INT_CRB_MODE_NORXTX 4
696
697
698/* MAC */
699
700#define MC_COUNT_P3 38
701
702#define QLCNIC_MAC_NOOP 0
703#define QLCNIC_MAC_ADD 1
704#define QLCNIC_MAC_DEL 2
705
706struct qlcnic_mac_list_s {
707 struct list_head list;
708 uint8_t mac_addr[ETH_ALEN+2];
709};
710
711/*
712 * Interrupt coalescing defaults. The defaults are for 1500 MTU. It is
713 * adjusted based on configured MTU.
714 */
715#define QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US 3
716#define QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS 256
717#define QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS 64
718#define QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US 4
719
720#define QLCNIC_INTR_DEFAULT 0x04
721
722union qlcnic_nic_intr_coalesce_data {
723 struct {
724 u16 rx_packets;
725 u16 rx_time_us;
726 u16 tx_packets;
727 u16 tx_time_us;
728 } data;
729 u64 word;
730};
731
732struct qlcnic_nic_intr_coalesce {
733 u16 stats_time_us;
734 u16 rate_sample_time;
735 u16 flags;
736 u16 rsvd_1;
737 u32 low_threshold;
738 u32 high_threshold;
739 union qlcnic_nic_intr_coalesce_data normal;
740 union qlcnic_nic_intr_coalesce_data low;
741 union qlcnic_nic_intr_coalesce_data high;
742 union qlcnic_nic_intr_coalesce_data irq;
743};
744
745#define QLCNIC_HOST_REQUEST 0x13
746#define QLCNIC_REQUEST 0x14
747
748#define QLCNIC_MAC_EVENT 0x1
749
750#define QLCNIC_IP_UP 2
751#define QLCNIC_IP_DOWN 3
752
753/*
754 * Driver --> Firmware
755 */
756#define QLCNIC_H2C_OPCODE_START 0
757#define QLCNIC_H2C_OPCODE_CONFIG_RSS 1
758#define QLCNIC_H2C_OPCODE_CONFIG_RSS_TBL 2
759#define QLCNIC_H2C_OPCODE_CONFIG_INTR_COALESCE 3
760#define QLCNIC_H2C_OPCODE_CONFIG_LED 4
761#define QLCNIC_H2C_OPCODE_CONFIG_PROMISCUOUS 5
762#define QLCNIC_H2C_OPCODE_CONFIG_L2_MAC 6
763#define QLCNIC_H2C_OPCODE_LRO_REQUEST 7
764#define QLCNIC_H2C_OPCODE_GET_SNMP_STATS 8
765#define QLCNIC_H2C_OPCODE_PROXY_START_REQUEST 9
766#define QLCNIC_H2C_OPCODE_PROXY_STOP_REQUEST 10
767#define QLCNIC_H2C_OPCODE_PROXY_SET_MTU 11
768#define QLCNIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE 12
769#define QLCNIC_H2C_OPCODE_GET_FINGER_PRINT_REQUEST 13
770#define QLCNIC_H2C_OPCODE_INSTALL_LICENSE_REQUEST 14
771#define QLCNIC_H2C_OPCODE_GET_LICENSE_CAPABILITY_REQUEST 15
772#define QLCNIC_H2C_OPCODE_GET_NET_STATS 16
773#define QLCNIC_H2C_OPCODE_PROXY_UPDATE_P2V 17
774#define QLCNIC_H2C_OPCODE_CONFIG_IPADDR 18
775#define QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK 19
776#define QLCNIC_H2C_OPCODE_PROXY_STOP_DONE 20
777#define QLCNIC_H2C_OPCODE_GET_LINKEVENT 21
778#define QLCNIC_C2C_OPCODE 22
779#define QLCNIC_H2C_OPCODE_CONFIG_BRIDGING 23
780#define QLCNIC_H2C_OPCODE_CONFIG_HW_LRO 24
781#define QLCNIC_H2C_OPCODE_LAST 25
782/*
783 * Firmware --> Driver
784 */
785
786#define QLCNIC_C2H_OPCODE_START 128
787#define QLCNIC_C2H_OPCODE_CONFIG_RSS_RESPONSE 129
788#define QLCNIC_C2H_OPCODE_CONFIG_RSS_TBL_RESPONSE 130
789#define QLCNIC_C2H_OPCODE_CONFIG_MAC_RESPONSE 131
790#define QLCNIC_C2H_OPCODE_CONFIG_PROMISCUOUS_RESPONSE 132
791#define QLCNIC_C2H_OPCODE_CONFIG_L2_MAC_RESPONSE 133
792#define QLCNIC_C2H_OPCODE_LRO_DELETE_RESPONSE 134
793#define QLCNIC_C2H_OPCODE_LRO_ADD_FAILURE_RESPONSE 135
794#define QLCNIC_C2H_OPCODE_GET_SNMP_STATS 136
795#define QLCNIC_C2H_OPCODE_GET_FINGER_PRINT_REPLY 137
796#define QLCNIC_C2H_OPCODE_INSTALL_LICENSE_REPLY 138
797#define QLCNIC_C2H_OPCODE_GET_LICENSE_CAPABILITIES_REPLY 139
798#define QLCNIC_C2H_OPCODE_GET_NET_STATS_RESPONSE 140
799#define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 141
800#define QLCNIC_C2H_OPCODE_LAST 142
801
802#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */
803#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */
804#define VPORT_MISS_MODE_ACCEPT_MULTI 2 /* accept unmatched multicast */
805
806#define QLCNIC_LRO_REQUEST_CLEANUP 4
807
808/* Capabilites received */
809#define QLCNIC_FW_CAPABILITY_BDG (1 << 8)
810#define QLCNIC_FW_CAPABILITY_FVLANTX (1 << 9)
811#define QLCNIC_FW_CAPABILITY_HW_LRO (1 << 10)
812
813/* module types */
814#define LINKEVENT_MODULE_NOT_PRESENT 1
815#define LINKEVENT_MODULE_OPTICAL_UNKNOWN 2
816#define LINKEVENT_MODULE_OPTICAL_SRLR 3
817#define LINKEVENT_MODULE_OPTICAL_LRM 4
818#define LINKEVENT_MODULE_OPTICAL_SFP_1G 5
819#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE 6
820#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN 7
821#define LINKEVENT_MODULE_TWINAX 8
822
823#define LINKSPEED_10GBPS 10000
824#define LINKSPEED_1GBPS 1000
825#define LINKSPEED_100MBPS 100
826#define LINKSPEED_10MBPS 10
827
828#define LINKSPEED_ENCODED_10MBPS 0
829#define LINKSPEED_ENCODED_100MBPS 1
830#define LINKSPEED_ENCODED_1GBPS 2
831
832#define LINKEVENT_AUTONEG_DISABLED 0
833#define LINKEVENT_AUTONEG_ENABLED 1
834
835#define LINKEVENT_HALF_DUPLEX 0
836#define LINKEVENT_FULL_DUPLEX 1
837
838#define LINKEVENT_LINKSPEED_MBPS 0
839#define LINKEVENT_LINKSPEED_ENCODED 1
840
841#define AUTO_FW_RESET_ENABLED 0x01
842/* firmware response header:
843 * 63:58 - message type
844 * 57:56 - owner
845 * 55:53 - desc count
846 * 52:48 - reserved
847 * 47:40 - completion id
848 * 39:32 - opcode
849 * 31:16 - error code
850 * 15:00 - reserved
851 */
852#define qlcnic_get_nic_msg_opcode(msg_hdr) \
853 ((msg_hdr >> 32) & 0xFF)
854
855struct qlcnic_fw_msg {
856 union {
857 struct {
858 u64 hdr;
859 u64 body[7];
860 };
861 u64 words[8];
862 };
863};
864
865struct qlcnic_nic_req {
866 __le64 qhdr;
867 __le64 req_hdr;
868 __le64 words[6];
869};
870
871struct qlcnic_mac_req {
872 u8 op;
873 u8 tag;
874 u8 mac_addr[6];
875};
876
877#define QLCNIC_MSI_ENABLED 0x02
878#define QLCNIC_MSIX_ENABLED 0x04
879#define QLCNIC_LRO_ENABLED 0x08
880#define QLCNIC_BRIDGE_ENABLED 0X10
881#define QLCNIC_DIAG_ENABLED 0x20
882#define QLCNIC_IS_MSI_FAMILY(adapter) \
883 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
884
885#define MSIX_ENTRIES_PER_ADAPTER NUM_STS_DESC_RINGS
886#define QLCNIC_MSIX_TBL_SPACE 8192
887#define QLCNIC_PCI_REG_MSIX_TBL 0x44
888
889#define QLCNIC_NETDEV_WEIGHT 128
890#define QLCNIC_ADAPTER_UP_MAGIC 777
891
892#define __QLCNIC_FW_ATTACHED 0
893#define __QLCNIC_DEV_UP 1
894#define __QLCNIC_RESETTING 2
895#define __QLCNIC_START_FW 4
896
897struct qlcnic_adapter {
898 struct qlcnic_hardware_context ahw;
899
900 struct net_device *netdev;
901 struct pci_dev *pdev;
902 struct list_head mac_list;
903
904 spinlock_t tx_clean_lock;
905
906 u16 num_txd;
907 u16 num_rxd;
908 u16 num_jumbo_rxd;
909 u16 num_lro_rxd;
910
911 u8 max_rds_rings;
912 u8 max_sds_rings;
913 u8 driver_mismatch;
914 u8 msix_supported;
915 u8 rx_csum;
916 u8 pci_using_dac;
917 u8 portnum;
918 u8 physical_port;
919
920 u8 mc_enabled;
921 u8 max_mc_count;
922 u8 rss_supported;
923 u8 rsrvd1;
924 u8 fw_wait_cnt;
925 u8 fw_fail_cnt;
926 u8 tx_timeo_cnt;
927 u8 need_fw_reset;
928
929 u8 has_link_events;
930 u8 fw_type;
931 u16 tx_context_id;
932 u16 mtu;
933 u16 is_up;
934
935 u16 link_speed;
936 u16 link_duplex;
937 u16 link_autoneg;
938 u16 module_type;
939
940 u32 capabilities;
941 u32 flags;
942 u32 irq;
943 u32 temp;
944
945 u32 int_vec_bit;
946 u32 heartbit;
947
948 u8 dev_state;
949 u8 rsrd1;
950 u32 rsrd2;
951
952
953 u8 mac_addr[ETH_ALEN];
954
955 struct qlcnic_adapter_stats stats;
956
957 struct qlcnic_recv_context recv_ctx;
958 struct qlcnic_host_tx_ring *tx_ring;
959
960 void __iomem *tgt_mask_reg;
961 void __iomem *tgt_status_reg;
962 void __iomem *crb_int_state_reg;
963 void __iomem *isr_int_vec;
964
965 struct msix_entry msix_entries[MSIX_ENTRIES_PER_ADAPTER];
966
967 struct delayed_work fw_work;
968
969 struct work_struct tx_timeout_task;
970
971 struct qlcnic_nic_intr_coalesce coal;
972
973 unsigned long state;
974 __le32 file_prd_off; /*File fw product offset*/
975 u32 fw_version;
976 const struct firmware *fw;
977};
978
979int qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val);
980int qlcnic_fw_cmd_set_phy(struct qlcnic_adapter *adapter, u32 reg, u32 val);
981
982u32 qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off);
983int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *, ulong off, u32 data);
984int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *, u64 off, u64 data);
985int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *, u64 off, u64 *data);
986
987#define QLCRD32(adapter, off) \
988 (qlcnic_hw_read_wx_2M(adapter, off))
989#define QLCWR32(adapter, off, val) \
990 (qlcnic_hw_write_wx_2M(adapter, off, val))
991
992int qlcnic_pcie_sem_lock(struct qlcnic_adapter *, int, u32);
993void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int);
994
995#define qlcnic_rom_lock(a) \
996 qlcnic_pcie_sem_lock((a), 2, QLCNIC_ROM_LOCK_ID)
997#define qlcnic_rom_unlock(a) \
998 qlcnic_pcie_sem_unlock((a), 2)
999#define qlcnic_phy_lock(a) \
1000 qlcnic_pcie_sem_lock((a), 3, QLCNIC_PHY_LOCK_ID)
1001#define qlcnic_phy_unlock(a) \
1002 qlcnic_pcie_sem_unlock((a), 3)
1003#define qlcnic_api_lock(a) \
1004 qlcnic_pcie_sem_lock((a), 5, 0)
1005#define qlcnic_api_unlock(a) \
1006 qlcnic_pcie_sem_unlock((a), 5)
1007#define qlcnic_sw_lock(a) \
1008 qlcnic_pcie_sem_lock((a), 6, 0)
1009#define qlcnic_sw_unlock(a) \
1010 qlcnic_pcie_sem_unlock((a), 6)
1011#define crb_win_lock(a) \
1012 qlcnic_pcie_sem_lock((a), 7, QLCNIC_CRB_WIN_LOCK_ID)
1013#define crb_win_unlock(a) \
1014 qlcnic_pcie_sem_unlock((a), 7)
1015
1016int qlcnic_get_board_info(struct qlcnic_adapter *adapter);
1017int qlcnic_wol_supported(struct qlcnic_adapter *adapter);
1018
1019/* Functions from qlcnic_init.c */
1020int qlcnic_phantom_init(struct qlcnic_adapter *adapter);
1021int qlcnic_load_firmware(struct qlcnic_adapter *adapter);
1022int qlcnic_need_fw_reset(struct qlcnic_adapter *adapter);
1023void qlcnic_request_firmware(struct qlcnic_adapter *adapter);
1024void qlcnic_release_firmware(struct qlcnic_adapter *adapter);
1025int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter);
1026
1027int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, int addr, int *valp);
1028int qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
1029 u8 *bytes, size_t size);
1030int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter);
1031void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter);
1032
1033void __iomem *qlcnic_get_ioaddr(struct qlcnic_adapter *, u32);
1034
1035int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter);
1036void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter);
1037
1038void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter);
1039void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter);
1040
1041int qlcnic_init_firmware(struct qlcnic_adapter *adapter);
1042void qlcnic_watchdog_task(struct work_struct *work);
1043void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
1044 struct qlcnic_host_rds_ring *rds_ring);
1045int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max);
1046void qlcnic_set_multi(struct net_device *netdev);
1047void qlcnic_free_mac_list(struct qlcnic_adapter *adapter);
1048int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32);
1049int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter);
1050int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable);
1051int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, u32 ip, int cmd);
1052int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable);
1053void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup);
1054
1055int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu);
1056int qlcnic_change_mtu(struct net_device *netdev, int new_mtu);
1057int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable);
1058int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, int enable);
1059int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
1060void qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
1061 struct qlcnic_host_tx_ring *tx_ring);
1062int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u64 *mac);
1063
1064/* Functions from qlcnic_main.c */
1065int qlcnic_reset_context(struct qlcnic_adapter *);
1066
1067/*
1068 * QLOGIC Board information
1069 */
1070
1071#define QLCNIC_MAX_BOARD_NAME_LEN 64
1072struct qlcnic_brdinfo {
1073 unsigned short vendor;
1074 unsigned short device;
1075 unsigned short sub_vendor;
1076 unsigned short sub_device;
1077 char short_name[QLCNIC_MAX_BOARD_NAME_LEN];
1078};
1079
1080static const struct qlcnic_brdinfo qlcnic_boards[] = {
1081 {0x1077, 0x8020, 0x1077, 0x203, "8200 Series Single Port 10GbE CNA"},
1082 {0x1077, 0x8020, 0x1077, 0x207, "8200 Series Dual Port 10GbE CNA"},
1083 {0x1077, 0x8020, 0x1077, 0x20b,
1084 "3200 Series Dual Port 10Gb Intelligent Ethernet Adapter"},
1085 {0x1077, 0x8020, 0x1077, 0x20c,
1086 "3200 Series Quad Port 1Gb Intelligent Ethernet Adapter"},
1087 {0x1077, 0x8020, 0x1077, 0x20f,
1088 "3200 Series Single Port 10Gb Intelligent Ethernet Adapter"},
1089 {0x1077, 0x8020, 0x0, 0x0, "cLOM8214 1/10GbE Controller"},
1090};
1091
1092#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(qlcnic_boards)
1093
1094static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
1095{
1096 smp_mb();
1097 if (tx_ring->producer < tx_ring->sw_consumer)
1098 return tx_ring->sw_consumer - tx_ring->producer;
1099 else
1100 return tx_ring->sw_consumer + tx_ring->num_desc -
1101 tx_ring->producer;
1102}
1103
1104extern const struct ethtool_ops qlcnic_ethtool_ops;
1105
1106#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/qlcnic/qlcnic_ctx.c b/drivers/net/qlcnic/qlcnic_ctx.c
new file mode 100644
index 000000000000..71c16a183458
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_ctx.c
@@ -0,0 +1,536 @@
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
25#include "qlcnic.h"
26
27#define QLCHAL_VERSION 1
28
29static u32
30qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
31{
32 u32 rsp;
33 int timeout = 0;
34
35 do {
36 /* give atleast 1ms for firmware to respond */
37 msleep(1);
38
39 if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT)
40 return QLCNIC_CDRP_RSP_TIMEOUT;
41
42 rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET);
43 } while (!QLCNIC_CDRP_IS_RSP(rsp));
44
45 return rsp;
46}
47
48static u32
49qlcnic_issue_cmd(struct qlcnic_adapter *adapter,
50 u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd)
51{
52 u32 rsp;
53 u32 signature;
54 u32 rcode = QLCNIC_RCODE_SUCCESS;
55 struct pci_dev *pdev = adapter->pdev;
56
57 signature = QLCNIC_CDRP_SIGNATURE_MAKE(pci_fn, version);
58
59 /* Acquire semaphore before accessing CRB */
60 if (qlcnic_api_lock(adapter))
61 return QLCNIC_RCODE_TIMEOUT;
62
63 QLCWR32(adapter, QLCNIC_SIGN_CRB_OFFSET, signature);
64 QLCWR32(adapter, QLCNIC_ARG1_CRB_OFFSET, arg1);
65 QLCWR32(adapter, QLCNIC_ARG2_CRB_OFFSET, arg2);
66 QLCWR32(adapter, QLCNIC_ARG3_CRB_OFFSET, arg3);
67 QLCWR32(adapter, QLCNIC_CDRP_CRB_OFFSET, QLCNIC_CDRP_FORM_CMD(cmd));
68
69 rsp = qlcnic_poll_rsp(adapter);
70
71 if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) {
72 dev_err(&pdev->dev, "card response timeout.\n");
73 rcode = QLCNIC_RCODE_TIMEOUT;
74 } else if (rsp == QLCNIC_CDRP_RSP_FAIL) {
75 rcode = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
76 dev_err(&pdev->dev, "failed card response code:0x%x\n",
77 rcode);
78 }
79
80 /* Release semaphore */
81 qlcnic_api_unlock(adapter);
82
83 return rcode;
84}
85
86int
87qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu)
88{
89 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
90
91 if (recv_ctx->state == QLCNIC_HOST_CTX_STATE_ACTIVE) {
92 if (qlcnic_issue_cmd(adapter,
93 adapter->ahw.pci_func,
94 QLCHAL_VERSION,
95 recv_ctx->context_id,
96 mtu,
97 0,
98 QLCNIC_CDRP_CMD_SET_MTU)) {
99
100 dev_err(&adapter->pdev->dev, "Failed to set mtu\n");
101 return -EIO;
102 }
103 }
104
105 return 0;
106}
107
108static int
109qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
110{
111 void *addr;
112 struct qlcnic_hostrq_rx_ctx *prq;
113 struct qlcnic_cardrsp_rx_ctx *prsp;
114 struct qlcnic_hostrq_rds_ring *prq_rds;
115 struct qlcnic_hostrq_sds_ring *prq_sds;
116 struct qlcnic_cardrsp_rds_ring *prsp_rds;
117 struct qlcnic_cardrsp_sds_ring *prsp_sds;
118 struct qlcnic_host_rds_ring *rds_ring;
119 struct qlcnic_host_sds_ring *sds_ring;
120
121 dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
122 u64 phys_addr;
123
124 int i, nrds_rings, nsds_rings;
125 size_t rq_size, rsp_size;
126 u32 cap, reg, val;
127 int err;
128
129 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
130
131 nrds_rings = adapter->max_rds_rings;
132 nsds_rings = adapter->max_sds_rings;
133
134 rq_size =
135 SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings,
136 nsds_rings);
137 rsp_size =
138 SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings,
139 nsds_rings);
140
141 addr = pci_alloc_consistent(adapter->pdev,
142 rq_size, &hostrq_phys_addr);
143 if (addr == NULL)
144 return -ENOMEM;
145 prq = (struct qlcnic_hostrq_rx_ctx *)addr;
146
147 addr = pci_alloc_consistent(adapter->pdev,
148 rsp_size, &cardrsp_phys_addr);
149 if (addr == NULL) {
150 err = -ENOMEM;
151 goto out_free_rq;
152 }
153 prsp = (struct qlcnic_cardrsp_rx_ctx *)addr;
154
155 prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
156
157 cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN);
158 cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS);
159
160 prq->capabilities[0] = cpu_to_le32(cap);
161 prq->host_int_crb_mode =
162 cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
163 prq->host_rds_crb_mode =
164 cpu_to_le32(QLCNIC_HOST_RDS_CRB_MODE_UNIQUE);
165
166 prq->num_rds_rings = cpu_to_le16(nrds_rings);
167 prq->num_sds_rings = cpu_to_le16(nsds_rings);
168 prq->rds_ring_offset = cpu_to_le32(0);
169
170 val = le32_to_cpu(prq->rds_ring_offset) +
171 (sizeof(struct qlcnic_hostrq_rds_ring) * nrds_rings);
172 prq->sds_ring_offset = cpu_to_le32(val);
173
174 prq_rds = (struct qlcnic_hostrq_rds_ring *)(prq->data +
175 le32_to_cpu(prq->rds_ring_offset));
176
177 for (i = 0; i < nrds_rings; i++) {
178
179 rds_ring = &recv_ctx->rds_rings[i];
180
181 prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
182 prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
183 prq_rds[i].ring_kind = cpu_to_le32(i);
184 prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size);
185 }
186
187 prq_sds = (struct qlcnic_hostrq_sds_ring *)(prq->data +
188 le32_to_cpu(prq->sds_ring_offset));
189
190 for (i = 0; i < nsds_rings; i++) {
191
192 sds_ring = &recv_ctx->sds_rings[i];
193
194 prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
195 prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
196 prq_sds[i].msi_index = cpu_to_le16(i);
197 }
198
199 phys_addr = hostrq_phys_addr;
200 err = qlcnic_issue_cmd(adapter,
201 adapter->ahw.pci_func,
202 QLCHAL_VERSION,
203 (u32)(phys_addr >> 32),
204 (u32)(phys_addr & 0xffffffff),
205 rq_size,
206 QLCNIC_CDRP_CMD_CREATE_RX_CTX);
207 if (err) {
208 dev_err(&adapter->pdev->dev,
209 "Failed to create rx ctx in firmware%d\n", err);
210 goto out_free_rsp;
211 }
212
213
214 prsp_rds = ((struct qlcnic_cardrsp_rds_ring *)
215 &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]);
216
217 for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
218 rds_ring = &recv_ctx->rds_rings[i];
219
220 reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
221 rds_ring->crb_rcv_producer = qlcnic_get_ioaddr(adapter,
222 QLCNIC_REG(reg - 0x200));
223 }
224
225 prsp_sds = ((struct qlcnic_cardrsp_sds_ring *)
226 &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]);
227
228 for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
229 sds_ring = &recv_ctx->sds_rings[i];
230
231 reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
232 sds_ring->crb_sts_consumer = qlcnic_get_ioaddr(adapter,
233 QLCNIC_REG(reg - 0x200));
234
235 reg = le32_to_cpu(prsp_sds[i].interrupt_crb);
236 sds_ring->crb_intr_mask = qlcnic_get_ioaddr(adapter,
237 QLCNIC_REG(reg - 0x200));
238 }
239
240 recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
241 recv_ctx->context_id = le16_to_cpu(prsp->context_id);
242 recv_ctx->virt_port = prsp->virt_port;
243
244out_free_rsp:
245 pci_free_consistent(adapter->pdev, rsp_size, prsp, cardrsp_phys_addr);
246out_free_rq:
247 pci_free_consistent(adapter->pdev, rq_size, prq, hostrq_phys_addr);
248 return err;
249}
250
251static void
252qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter)
253{
254 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
255
256 if (qlcnic_issue_cmd(adapter,
257 adapter->ahw.pci_func,
258 QLCHAL_VERSION,
259 recv_ctx->context_id,
260 QLCNIC_DESTROY_CTX_RESET,
261 0,
262 QLCNIC_CDRP_CMD_DESTROY_RX_CTX)) {
263
264 dev_err(&adapter->pdev->dev,
265 "Failed to destroy rx ctx in firmware\n");
266 }
267}
268
269static int
270qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
271{
272 struct qlcnic_hostrq_tx_ctx *prq;
273 struct qlcnic_hostrq_cds_ring *prq_cds;
274 struct qlcnic_cardrsp_tx_ctx *prsp;
275 void *rq_addr, *rsp_addr;
276 size_t rq_size, rsp_size;
277 u32 temp;
278 int err;
279 u64 phys_addr;
280 dma_addr_t rq_phys_addr, rsp_phys_addr;
281 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
282
283 rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
284 rq_addr = pci_alloc_consistent(adapter->pdev,
285 rq_size, &rq_phys_addr);
286 if (!rq_addr)
287 return -ENOMEM;
288
289 rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx);
290 rsp_addr = pci_alloc_consistent(adapter->pdev,
291 rsp_size, &rsp_phys_addr);
292 if (!rsp_addr) {
293 err = -ENOMEM;
294 goto out_free_rq;
295 }
296
297 memset(rq_addr, 0, rq_size);
298 prq = (struct qlcnic_hostrq_tx_ctx *)rq_addr;
299
300 memset(rsp_addr, 0, rsp_size);
301 prsp = (struct qlcnic_cardrsp_tx_ctx *)rsp_addr;
302
303 prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
304
305 temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN |
306 QLCNIC_CAP0_LSO);
307 prq->capabilities[0] = cpu_to_le32(temp);
308
309 prq->host_int_crb_mode =
310 cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
311
312 prq->interrupt_ctl = 0;
313 prq->msi_index = 0;
314 prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr);
315
316 prq_cds = &prq->cds_ring;
317
318 prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr);
319 prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
320
321 phys_addr = rq_phys_addr;
322 err = qlcnic_issue_cmd(adapter,
323 adapter->ahw.pci_func,
324 QLCHAL_VERSION,
325 (u32)(phys_addr >> 32),
326 ((u32)phys_addr & 0xffffffff),
327 rq_size,
328 QLCNIC_CDRP_CMD_CREATE_TX_CTX);
329
330 if (err == QLCNIC_RCODE_SUCCESS) {
331 temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
332 tx_ring->crb_cmd_producer = qlcnic_get_ioaddr(adapter,
333 QLCNIC_REG(temp - 0x200));
334
335 adapter->tx_context_id =
336 le16_to_cpu(prsp->context_id);
337 } else {
338 dev_err(&adapter->pdev->dev,
339 "Failed to create tx ctx in firmware%d\n", err);
340 err = -EIO;
341 }
342
343 pci_free_consistent(adapter->pdev, rsp_size, rsp_addr, rsp_phys_addr);
344
345out_free_rq:
346 pci_free_consistent(adapter->pdev, rq_size, rq_addr, rq_phys_addr);
347
348 return err;
349}
350
351static void
352qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter)
353{
354 if (qlcnic_issue_cmd(adapter,
355 adapter->ahw.pci_func,
356 QLCHAL_VERSION,
357 adapter->tx_context_id,
358 QLCNIC_DESTROY_CTX_RESET,
359 0,
360 QLCNIC_CDRP_CMD_DESTROY_TX_CTX)) {
361
362 dev_err(&adapter->pdev->dev,
363 "Failed to destroy tx ctx in firmware\n");
364 }
365}
366
367int
368qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val)
369{
370
371 if (qlcnic_issue_cmd(adapter,
372 adapter->ahw.pci_func,
373 QLCHAL_VERSION,
374 reg,
375 0,
376 0,
377 QLCNIC_CDRP_CMD_READ_PHY)) {
378
379 return -EIO;
380 }
381
382 return QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
383}
384
385int
386qlcnic_fw_cmd_set_phy(struct qlcnic_adapter *adapter, u32 reg, u32 val)
387{
388 return qlcnic_issue_cmd(adapter,
389 adapter->ahw.pci_func,
390 QLCHAL_VERSION,
391 reg,
392 val,
393 0,
394 QLCNIC_CDRP_CMD_WRITE_PHY);
395}
396
397int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
398{
399 void *addr;
400 int err;
401 int ring;
402 struct qlcnic_recv_context *recv_ctx;
403 struct qlcnic_host_rds_ring *rds_ring;
404 struct qlcnic_host_sds_ring *sds_ring;
405 struct qlcnic_host_tx_ring *tx_ring;
406
407 struct pci_dev *pdev = adapter->pdev;
408
409 recv_ctx = &adapter->recv_ctx;
410 tx_ring = adapter->tx_ring;
411
412 tx_ring->hw_consumer = (__le32 *)pci_alloc_consistent(pdev, sizeof(u32),
413 &tx_ring->hw_cons_phys_addr);
414 if (tx_ring->hw_consumer == NULL) {
415 dev_err(&pdev->dev, "failed to allocate tx consumer\n");
416 return -ENOMEM;
417 }
418 *(tx_ring->hw_consumer) = 0;
419
420 /* cmd desc ring */
421 addr = pci_alloc_consistent(pdev, TX_DESC_RINGSIZE(tx_ring),
422 &tx_ring->phys_addr);
423
424 if (addr == NULL) {
425 dev_err(&pdev->dev, "failed to allocate tx desc ring\n");
426 return -ENOMEM;
427 }
428
429 tx_ring->desc_head = (struct cmd_desc_type0 *)addr;
430
431 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
432 rds_ring = &recv_ctx->rds_rings[ring];
433 addr = pci_alloc_consistent(adapter->pdev,
434 RCV_DESC_RINGSIZE(rds_ring),
435 &rds_ring->phys_addr);
436 if (addr == NULL) {
437 dev_err(&pdev->dev,
438 "failed to allocate rds ring [%d]\n", ring);
439 err = -ENOMEM;
440 goto err_out_free;
441 }
442 rds_ring->desc_head = (struct rcv_desc *)addr;
443
444 }
445
446 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
447 sds_ring = &recv_ctx->sds_rings[ring];
448
449 addr = pci_alloc_consistent(adapter->pdev,
450 STATUS_DESC_RINGSIZE(sds_ring),
451 &sds_ring->phys_addr);
452 if (addr == NULL) {
453 dev_err(&pdev->dev,
454 "failed to allocate sds ring [%d]\n", ring);
455 err = -ENOMEM;
456 goto err_out_free;
457 }
458 sds_ring->desc_head = (struct status_desc *)addr;
459 }
460
461
462 err = qlcnic_fw_cmd_create_rx_ctx(adapter);
463 if (err)
464 goto err_out_free;
465 err = qlcnic_fw_cmd_create_tx_ctx(adapter);
466 if (err)
467 goto err_out_free;
468
469 set_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
470 return 0;
471
472err_out_free:
473 qlcnic_free_hw_resources(adapter);
474 return err;
475}
476
477void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
478{
479 struct qlcnic_recv_context *recv_ctx;
480 struct qlcnic_host_rds_ring *rds_ring;
481 struct qlcnic_host_sds_ring *sds_ring;
482 struct qlcnic_host_tx_ring *tx_ring;
483 int ring;
484
485
486 if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) {
487 qlcnic_fw_cmd_destroy_rx_ctx(adapter);
488 qlcnic_fw_cmd_destroy_tx_ctx(adapter);
489
490 /* Allow dma queues to drain after context reset */
491 msleep(20);
492 }
493
494 recv_ctx = &adapter->recv_ctx;
495
496 tx_ring = adapter->tx_ring;
497 if (tx_ring->hw_consumer != NULL) {
498 pci_free_consistent(adapter->pdev,
499 sizeof(u32),
500 tx_ring->hw_consumer,
501 tx_ring->hw_cons_phys_addr);
502 tx_ring->hw_consumer = NULL;
503 }
504
505 if (tx_ring->desc_head != NULL) {
506 pci_free_consistent(adapter->pdev,
507 TX_DESC_RINGSIZE(tx_ring),
508 tx_ring->desc_head, tx_ring->phys_addr);
509 tx_ring->desc_head = NULL;
510 }
511
512 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
513 rds_ring = &recv_ctx->rds_rings[ring];
514
515 if (rds_ring->desc_head != NULL) {
516 pci_free_consistent(adapter->pdev,
517 RCV_DESC_RINGSIZE(rds_ring),
518 rds_ring->desc_head,
519 rds_ring->phys_addr);
520 rds_ring->desc_head = NULL;
521 }
522 }
523
524 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
525 sds_ring = &recv_ctx->sds_rings[ring];
526
527 if (sds_ring->desc_head != NULL) {
528 pci_free_consistent(adapter->pdev,
529 STATUS_DESC_RINGSIZE(sds_ring),
530 sds_ring->desc_head,
531 sds_ring->phys_addr);
532 sds_ring->desc_head = NULL;
533 }
534 }
535}
536
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
new file mode 100644
index 000000000000..65e9620e28f1
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -0,0 +1,870 @@
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
25#include <linux/types.h>
26#include <linux/delay.h>
27#include <linux/pci.h>
28#include <linux/io.h>
29#include <linux/netdevice.h>
30#include <linux/ethtool.h>
31
32#include "qlcnic.h"
33
34struct qlcnic_stats {
35 char stat_string[ETH_GSTRING_LEN];
36 int sizeof_stat;
37 int stat_offset;
38};
39
40#define QLC_SIZEOF(m) FIELD_SIZEOF(struct qlcnic_adapter, m)
41#define QLC_OFF(m) offsetof(struct qlcnic_adapter, m)
42
43static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
44 {"xmit_called",
45 QLC_SIZEOF(stats.xmitcalled), QLC_OFF(stats.xmitcalled)},
46 {"xmit_finished",
47 QLC_SIZEOF(stats.xmitfinished), QLC_OFF(stats.xmitfinished)},
48 {"rx_dropped",
49 QLC_SIZEOF(stats.rxdropped), QLC_OFF(stats.rxdropped)},
50 {"tx_dropped",
51 QLC_SIZEOF(stats.txdropped), QLC_OFF(stats.txdropped)},
52 {"csummed",
53 QLC_SIZEOF(stats.csummed), QLC_OFF(stats.csummed)},
54 {"rx_pkts",
55 QLC_SIZEOF(stats.rx_pkts), QLC_OFF(stats.rx_pkts)},
56 {"lro_pkts",
57 QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)},
58 {"rx_bytes",
59 QLC_SIZEOF(stats.rxbytes), QLC_OFF(stats.rxbytes)},
60 {"tx_bytes",
61 QLC_SIZEOF(stats.txbytes), QLC_OFF(stats.txbytes)},
62};
63
64#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats)
65
66static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
67 "Register_Test_on_offline",
68 "Link_Test_on_offline"
69};
70
71#define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test)
72
73#define QLCNIC_RING_REGS_COUNT 20
74#define QLCNIC_RING_REGS_LEN (QLCNIC_RING_REGS_COUNT * sizeof(u32))
75#define QLCNIC_MAX_EEPROM_LEN 1024
76
77static const u32 diag_registers[] = {
78 CRB_CMDPEG_STATE,
79 CRB_RCVPEG_STATE,
80 CRB_XG_STATE_P3,
81 CRB_FW_CAPABILITIES_1,
82 ISR_INT_STATE_REG,
83 QLCNIC_CRB_DEV_REF_COUNT,
84 QLCNIC_CRB_DEV_STATE,
85 QLCNIC_CRB_DRV_STATE,
86 QLCNIC_CRB_DRV_SCRATCH,
87 QLCNIC_CRB_DEV_PARTITION_INFO,
88 QLCNIC_CRB_DRV_IDC_VER,
89 QLCNIC_PEG_ALIVE_COUNTER,
90 QLCNIC_PEG_HALT_STATUS1,
91 QLCNIC_PEG_HALT_STATUS2,
92 QLCNIC_CRB_PEG_NET_0+0x3c,
93 QLCNIC_CRB_PEG_NET_1+0x3c,
94 QLCNIC_CRB_PEG_NET_2+0x3c,
95 QLCNIC_CRB_PEG_NET_4+0x3c,
96 -1
97};
98
99static int qlcnic_get_regs_len(struct net_device *dev)
100{
101 return sizeof(diag_registers) + QLCNIC_RING_REGS_LEN;
102}
103
104static int qlcnic_get_eeprom_len(struct net_device *dev)
105{
106 return QLCNIC_FLASH_TOTAL_SIZE;
107}
108
109static void
110qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
111{
112 struct qlcnic_adapter *adapter = netdev_priv(dev);
113 u32 fw_major, fw_minor, fw_build;
114
115 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
116 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
117 fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
118 sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build);
119
120 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
121 strlcpy(drvinfo->driver, qlcnic_driver_name, 32);
122 strlcpy(drvinfo->version, QLCNIC_LINUX_VERSIONID, 32);
123}
124
125static int
126qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
127{
128 struct qlcnic_adapter *adapter = netdev_priv(dev);
129 int check_sfp_module = 0;
130 u16 pcifn = adapter->ahw.pci_func;
131
132 /* read which mode */
133 if (adapter->ahw.port_type == QLCNIC_GBE) {
134 ecmd->supported = (SUPPORTED_10baseT_Half |
135 SUPPORTED_10baseT_Full |
136 SUPPORTED_100baseT_Half |
137 SUPPORTED_100baseT_Full |
138 SUPPORTED_1000baseT_Half |
139 SUPPORTED_1000baseT_Full);
140
141 ecmd->advertising = (ADVERTISED_100baseT_Half |
142 ADVERTISED_100baseT_Full |
143 ADVERTISED_1000baseT_Half |
144 ADVERTISED_1000baseT_Full);
145
146 ecmd->speed = adapter->link_speed;
147 ecmd->duplex = adapter->link_duplex;
148 ecmd->autoneg = adapter->link_autoneg;
149
150 } else if (adapter->ahw.port_type == QLCNIC_XGBE) {
151 u32 val;
152
153 val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR);
154 if (val == QLCNIC_PORT_MODE_802_3_AP) {
155 ecmd->supported = SUPPORTED_1000baseT_Full;
156 ecmd->advertising = ADVERTISED_1000baseT_Full;
157 } else {
158 ecmd->supported = SUPPORTED_10000baseT_Full;
159 ecmd->advertising = ADVERTISED_10000baseT_Full;
160 }
161
162 if (netif_running(dev) && adapter->has_link_events) {
163 ecmd->speed = adapter->link_speed;
164 ecmd->autoneg = adapter->link_autoneg;
165 ecmd->duplex = adapter->link_duplex;
166 goto skip;
167 }
168
169 val = QLCRD32(adapter, P3_LINK_SPEED_REG(pcifn));
170 ecmd->speed = P3_LINK_SPEED_MHZ *
171 P3_LINK_SPEED_VAL(pcifn, val);
172 ecmd->duplex = DUPLEX_FULL;
173 ecmd->autoneg = AUTONEG_DISABLE;
174 } else
175 return -EIO;
176
177skip:
178 ecmd->phy_address = adapter->physical_port;
179 ecmd->transceiver = XCVR_EXTERNAL;
180
181 switch (adapter->ahw.board_type) {
182 case QLCNIC_BRDTYPE_P3_REF_QG:
183 case QLCNIC_BRDTYPE_P3_4_GB:
184 case QLCNIC_BRDTYPE_P3_4_GB_MM:
185
186 ecmd->supported |= SUPPORTED_Autoneg;
187 ecmd->advertising |= ADVERTISED_Autoneg;
188 case QLCNIC_BRDTYPE_P3_10G_CX4:
189 case QLCNIC_BRDTYPE_P3_10G_CX4_LP:
190 case QLCNIC_BRDTYPE_P3_10000_BASE_T:
191 ecmd->supported |= SUPPORTED_TP;
192 ecmd->advertising |= ADVERTISED_TP;
193 ecmd->port = PORT_TP;
194 ecmd->autoneg = adapter->link_autoneg;
195 break;
196 case QLCNIC_BRDTYPE_P3_IMEZ:
197 case QLCNIC_BRDTYPE_P3_XG_LOM:
198 case QLCNIC_BRDTYPE_P3_HMEZ:
199 ecmd->supported |= SUPPORTED_MII;
200 ecmd->advertising |= ADVERTISED_MII;
201 ecmd->port = PORT_MII;
202 ecmd->autoneg = AUTONEG_DISABLE;
203 break;
204 case QLCNIC_BRDTYPE_P3_10G_SFP_PLUS:
205 case QLCNIC_BRDTYPE_P3_10G_SFP_CT:
206 case QLCNIC_BRDTYPE_P3_10G_SFP_QT:
207 ecmd->advertising |= ADVERTISED_TP;
208 ecmd->supported |= SUPPORTED_TP;
209 check_sfp_module = netif_running(dev) &&
210 adapter->has_link_events;
211 case QLCNIC_BRDTYPE_P3_10G_XFP:
212 ecmd->supported |= SUPPORTED_FIBRE;
213 ecmd->advertising |= ADVERTISED_FIBRE;
214 ecmd->port = PORT_FIBRE;
215 ecmd->autoneg = AUTONEG_DISABLE;
216 break;
217 case QLCNIC_BRDTYPE_P3_10G_TP:
218 if (adapter->ahw.port_type == QLCNIC_XGBE) {
219 ecmd->autoneg = AUTONEG_DISABLE;
220 ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP);
221 ecmd->advertising |=
222 (ADVERTISED_FIBRE | ADVERTISED_TP);
223 ecmd->port = PORT_FIBRE;
224 check_sfp_module = netif_running(dev) &&
225 adapter->has_link_events;
226 } else {
227 ecmd->autoneg = AUTONEG_ENABLE;
228 ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
229 ecmd->advertising |=
230 (ADVERTISED_TP | ADVERTISED_Autoneg);
231 ecmd->port = PORT_TP;
232 }
233 break;
234 default:
235 dev_err(&adapter->pdev->dev, "Unsupported board model %d\n",
236 adapter->ahw.board_type);
237 return -EIO;
238 }
239
240 if (check_sfp_module) {
241 switch (adapter->module_type) {
242 case LINKEVENT_MODULE_OPTICAL_UNKNOWN:
243 case LINKEVENT_MODULE_OPTICAL_SRLR:
244 case LINKEVENT_MODULE_OPTICAL_LRM:
245 case LINKEVENT_MODULE_OPTICAL_SFP_1G:
246 ecmd->port = PORT_FIBRE;
247 break;
248 case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE:
249 case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN:
250 case LINKEVENT_MODULE_TWINAX:
251 ecmd->port = PORT_TP;
252 break;
253 default:
254 ecmd->port = PORT_OTHER;
255 }
256 }
257
258 return 0;
259}
260
261static int
262qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
263{
264 struct qlcnic_adapter *adapter = netdev_priv(dev);
265 __u32 status;
266
267 /* read which mode */
268 if (adapter->ahw.port_type == QLCNIC_GBE) {
269 /* autonegotiation */
270 if (qlcnic_fw_cmd_set_phy(adapter,
271 QLCNIC_NIU_GB_MII_MGMT_ADDR_AUTONEG,
272 ecmd->autoneg) != 0)
273 return -EIO;
274 else
275 adapter->link_autoneg = ecmd->autoneg;
276
277 if (qlcnic_fw_cmd_query_phy(adapter,
278 QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
279 &status) != 0)
280 return -EIO;
281
282 switch (ecmd->speed) {
283 case SPEED_10:
284 qlcnic_set_phy_speed(status, 0);
285 break;
286 case SPEED_100:
287 qlcnic_set_phy_speed(status, 1);
288 break;
289 case SPEED_1000:
290 qlcnic_set_phy_speed(status, 2);
291 break;
292 }
293
294 if (ecmd->duplex == DUPLEX_HALF)
295 qlcnic_clear_phy_duplex(status);
296 if (ecmd->duplex == DUPLEX_FULL)
297 qlcnic_set_phy_duplex(status);
298 if (qlcnic_fw_cmd_set_phy(adapter,
299 QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
300 *((int *)&status)) != 0)
301 return -EIO;
302 else {
303 adapter->link_speed = ecmd->speed;
304 adapter->link_duplex = ecmd->duplex;
305 }
306 } else
307 return -EOPNOTSUPP;
308
309 if (!netif_running(dev))
310 return 0;
311
312 dev->netdev_ops->ndo_stop(dev);
313 return dev->netdev_ops->ndo_open(dev);
314}
315
316static void
317qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
318{
319 struct qlcnic_adapter *adapter = netdev_priv(dev);
320 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
321 struct qlcnic_host_sds_ring *sds_ring;
322 u32 *regs_buff = p;
323 int ring, i = 0;
324
325 memset(p, 0, qlcnic_get_regs_len(dev));
326 regs->version = (1 << 24) | (adapter->ahw.revision_id << 16) |
327 (adapter->pdev)->device;
328
329 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
330 return;
331
332 for (i = 0; diag_registers[i] != -1; i++)
333 regs_buff[i] = QLCRD32(adapter, diag_registers[i]);
334
335 regs_buff[i++] = 0xFFEFCDAB; /* Marker btw regs and ring count*/
336
337 regs_buff[i++] = 1; /* No. of tx ring */
338 regs_buff[i++] = le32_to_cpu(*(adapter->tx_ring->hw_consumer));
339 regs_buff[i++] = readl(adapter->tx_ring->crb_cmd_producer);
340
341 regs_buff[i++] = 2; /* No. of rx ring */
342 regs_buff[i++] = readl(recv_ctx->rds_rings[0].crb_rcv_producer);
343 regs_buff[i++] = readl(recv_ctx->rds_rings[1].crb_rcv_producer);
344
345 regs_buff[i++] = adapter->max_sds_rings;
346
347 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
348 sds_ring = &(recv_ctx->sds_rings[ring]);
349 regs_buff[i++] = readl(sds_ring->crb_sts_consumer);
350 }
351}
352
353static u32 qlcnic_test_link(struct net_device *dev)
354{
355 struct qlcnic_adapter *adapter = netdev_priv(dev);
356 u32 val;
357
358 val = QLCRD32(adapter, CRB_XG_STATE_P3);
359 val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
360 return (val == XG_LINK_UP_P3) ? 0 : 1;
361}
362
363static int
364qlcnic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
365 u8 *bytes)
366{
367 struct qlcnic_adapter *adapter = netdev_priv(dev);
368 int offset;
369 int ret;
370
371 if (eeprom->len == 0)
372 return -EINVAL;
373
374 eeprom->magic = (adapter->pdev)->vendor |
375 ((adapter->pdev)->device << 16);
376 offset = eeprom->offset;
377
378 ret = qlcnic_rom_fast_read_words(adapter, offset, bytes,
379 eeprom->len);
380 if (ret < 0)
381 return ret;
382
383 return 0;
384}
385
386static void
387qlcnic_get_ringparam(struct net_device *dev,
388 struct ethtool_ringparam *ring)
389{
390 struct qlcnic_adapter *adapter = netdev_priv(dev);
391
392 ring->rx_pending = adapter->num_rxd;
393 ring->rx_jumbo_pending = adapter->num_jumbo_rxd;
394 ring->rx_jumbo_pending += adapter->num_lro_rxd;
395 ring->tx_pending = adapter->num_txd;
396
397 if (adapter->ahw.port_type == QLCNIC_GBE) {
398 ring->rx_max_pending = MAX_RCV_DESCRIPTORS_1G;
399 ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_1G;
400 } else {
401 ring->rx_max_pending = MAX_RCV_DESCRIPTORS_10G;
402 ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_10G;
403 }
404
405 ring->tx_max_pending = MAX_CMD_DESCRIPTORS;
406
407 ring->rx_mini_max_pending = 0;
408 ring->rx_mini_pending = 0;
409}
410
411static u32
412qlcnic_validate_ringparam(u32 val, u32 min, u32 max, char *r_name)
413{
414 u32 num_desc;
415 num_desc = max(val, min);
416 num_desc = min(num_desc, max);
417 num_desc = roundup_pow_of_two(num_desc);
418
419 if (val != num_desc) {
420 printk(KERN_INFO "%s: setting %s ring size %d instead of %d\n",
421 qlcnic_driver_name, r_name, num_desc, val);
422 }
423
424 return num_desc;
425}
426
427static int
428qlcnic_set_ringparam(struct net_device *dev,
429 struct ethtool_ringparam *ring)
430{
431 struct qlcnic_adapter *adapter = netdev_priv(dev);
432 u16 max_rcv_desc = MAX_RCV_DESCRIPTORS_10G;
433 u16 max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
434 u16 num_rxd, num_jumbo_rxd, num_txd;
435
436
437 if (ring->rx_mini_pending)
438 return -EOPNOTSUPP;
439
440 if (adapter->ahw.port_type == QLCNIC_GBE) {
441 max_rcv_desc = MAX_RCV_DESCRIPTORS_1G;
442 max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
443 }
444
445 num_rxd = qlcnic_validate_ringparam(ring->rx_pending,
446 MIN_RCV_DESCRIPTORS, max_rcv_desc, "rx");
447
448 num_jumbo_rxd = qlcnic_validate_ringparam(ring->rx_jumbo_pending,
449 MIN_JUMBO_DESCRIPTORS, max_jumbo_desc, "rx jumbo");
450
451 num_txd = qlcnic_validate_ringparam(ring->tx_pending,
452 MIN_CMD_DESCRIPTORS, MAX_CMD_DESCRIPTORS, "tx");
453
454 if (num_rxd == adapter->num_rxd && num_txd == adapter->num_txd &&
455 num_jumbo_rxd == adapter->num_jumbo_rxd)
456 return 0;
457
458 adapter->num_rxd = num_rxd;
459 adapter->num_jumbo_rxd = num_jumbo_rxd;
460 adapter->num_txd = num_txd;
461
462 return qlcnic_reset_context(adapter);
463}
464
465static void
466qlcnic_get_pauseparam(struct net_device *netdev,
467 struct ethtool_pauseparam *pause)
468{
469 struct qlcnic_adapter *adapter = netdev_priv(netdev);
470 int port = adapter->physical_port;
471 __u32 val;
472
473 if (adapter->ahw.port_type == QLCNIC_GBE) {
474 if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
475 return;
476 /* get flow control settings */
477 val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port));
478 pause->rx_pause = qlcnic_gb_get_rx_flowctl(val);
479 val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL);
480 switch (port) {
481 case 0:
482 pause->tx_pause = !(qlcnic_gb_get_gb0_mask(val));
483 break;
484 case 1:
485 pause->tx_pause = !(qlcnic_gb_get_gb1_mask(val));
486 break;
487 case 2:
488 pause->tx_pause = !(qlcnic_gb_get_gb2_mask(val));
489 break;
490 case 3:
491 default:
492 pause->tx_pause = !(qlcnic_gb_get_gb3_mask(val));
493 break;
494 }
495 } else if (adapter->ahw.port_type == QLCNIC_XGBE) {
496 if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
497 return;
498 pause->rx_pause = 1;
499 val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL);
500 if (port == 0)
501 pause->tx_pause = !(qlcnic_xg_get_xg0_mask(val));
502 else
503 pause->tx_pause = !(qlcnic_xg_get_xg1_mask(val));
504 } else {
505 dev_err(&netdev->dev, "Unknown board type: %x\n",
506 adapter->ahw.port_type);
507 }
508}
509
510static int
511qlcnic_set_pauseparam(struct net_device *netdev,
512 struct ethtool_pauseparam *pause)
513{
514 struct qlcnic_adapter *adapter = netdev_priv(netdev);
515 int port = adapter->physical_port;
516 __u32 val;
517
518 /* read mode */
519 if (adapter->ahw.port_type == QLCNIC_GBE) {
520 if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
521 return -EIO;
522 /* set flow control */
523 val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port));
524
525 if (pause->rx_pause)
526 qlcnic_gb_rx_flowctl(val);
527 else
528 qlcnic_gb_unset_rx_flowctl(val);
529
530 QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port),
531 val);
532 /* set autoneg */
533 val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL);
534 switch (port) {
535 case 0:
536 if (pause->tx_pause)
537 qlcnic_gb_unset_gb0_mask(val);
538 else
539 qlcnic_gb_set_gb0_mask(val);
540 break;
541 case 1:
542 if (pause->tx_pause)
543 qlcnic_gb_unset_gb1_mask(val);
544 else
545 qlcnic_gb_set_gb1_mask(val);
546 break;
547 case 2:
548 if (pause->tx_pause)
549 qlcnic_gb_unset_gb2_mask(val);
550 else
551 qlcnic_gb_set_gb2_mask(val);
552 break;
553 case 3:
554 default:
555 if (pause->tx_pause)
556 qlcnic_gb_unset_gb3_mask(val);
557 else
558 qlcnic_gb_set_gb3_mask(val);
559 break;
560 }
561 QLCWR32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, val);
562 } else if (adapter->ahw.port_type == QLCNIC_XGBE) {
563 if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
564 return -EIO;
565 val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL);
566 if (port == 0) {
567 if (pause->tx_pause)
568 qlcnic_xg_unset_xg0_mask(val);
569 else
570 qlcnic_xg_set_xg0_mask(val);
571 } else {
572 if (pause->tx_pause)
573 qlcnic_xg_unset_xg1_mask(val);
574 else
575 qlcnic_xg_set_xg1_mask(val);
576 }
577 QLCWR32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, val);
578 } else {
579 dev_err(&netdev->dev, "Unknown board type: %x\n",
580 adapter->ahw.port_type);
581 }
582 return 0;
583}
584
585static int qlcnic_reg_test(struct net_device *dev)
586{
587 struct qlcnic_adapter *adapter = netdev_priv(dev);
588 u32 data_read, data_written;
589
590 data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0));
591 if ((data_read & 0xffff) != adapter->pdev->vendor)
592 return 1;
593
594 data_written = (u32)0xa5a5a5a5;
595
596 QLCWR32(adapter, CRB_SCRATCHPAD_TEST, data_written);
597 data_read = QLCRD32(adapter, CRB_SCRATCHPAD_TEST);
598 if (data_written != data_read)
599 return 1;
600
601 return 0;
602}
603
604static int qlcnic_get_sset_count(struct net_device *dev, int sset)
605{
606 switch (sset) {
607 case ETH_SS_TEST:
608 return QLCNIC_TEST_LEN;
609 case ETH_SS_STATS:
610 return QLCNIC_STATS_LEN;
611 default:
612 return -EOPNOTSUPP;
613 }
614}
615
616static void
617qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
618 u64 *data)
619{
620 memset(data, 0, sizeof(u64) * QLCNIC_TEST_LEN);
621 data[0] = qlcnic_reg_test(dev);
622 if (data[0])
623 eth_test->flags |= ETH_TEST_FL_FAILED;
624
625 /* link test */
626 data[1] = (u64) qlcnic_test_link(dev);
627 if (data[1])
628 eth_test->flags |= ETH_TEST_FL_FAILED;
629}
630
631static void
632qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data)
633{
634 int index;
635
636 switch (stringset) {
637 case ETH_SS_TEST:
638 memcpy(data, *qlcnic_gstrings_test,
639 QLCNIC_TEST_LEN * ETH_GSTRING_LEN);
640 break;
641 case ETH_SS_STATS:
642 for (index = 0; index < QLCNIC_STATS_LEN; index++) {
643 memcpy(data + index * ETH_GSTRING_LEN,
644 qlcnic_gstrings_stats[index].stat_string,
645 ETH_GSTRING_LEN);
646 }
647 break;
648 }
649}
650
651static void
652qlcnic_get_ethtool_stats(struct net_device *dev,
653 struct ethtool_stats *stats, u64 * data)
654{
655 struct qlcnic_adapter *adapter = netdev_priv(dev);
656 int index;
657
658 for (index = 0; index < QLCNIC_STATS_LEN; index++) {
659 char *p =
660 (char *)adapter +
661 qlcnic_gstrings_stats[index].stat_offset;
662 data[index] =
663 (qlcnic_gstrings_stats[index].sizeof_stat ==
664 sizeof(u64)) ? *(u64 *)p:(*(u32 *)p);
665 }
666}
667
668static u32 qlcnic_get_rx_csum(struct net_device *dev)
669{
670 struct qlcnic_adapter *adapter = netdev_priv(dev);
671 return adapter->rx_csum;
672}
673
674static int qlcnic_set_rx_csum(struct net_device *dev, u32 data)
675{
676 struct qlcnic_adapter *adapter = netdev_priv(dev);
677 adapter->rx_csum = !!data;
678 return 0;
679}
680
681static u32 qlcnic_get_tso(struct net_device *dev)
682{
683 return (dev->features & (NETIF_F_TSO | NETIF_F_TSO6)) != 0;
684}
685
686static int qlcnic_set_tso(struct net_device *dev, u32 data)
687{
688 if (data)
689 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
690 else
691 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
692
693 return 0;
694}
695
696static void
697qlcnic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
698{
699 struct qlcnic_adapter *adapter = netdev_priv(dev);
700 u32 wol_cfg;
701
702 wol->supported = 0;
703 wol->wolopts = 0;
704
705 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
706 if (wol_cfg & (1UL << adapter->portnum))
707 wol->supported |= WAKE_MAGIC;
708
709 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
710 if (wol_cfg & (1UL << adapter->portnum))
711 wol->wolopts |= WAKE_MAGIC;
712}
713
714static int
715qlcnic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
716{
717 struct qlcnic_adapter *adapter = netdev_priv(dev);
718 u32 wol_cfg;
719
720 if (wol->wolopts & ~WAKE_MAGIC)
721 return -EOPNOTSUPP;
722
723 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
724 if (!(wol_cfg & (1 << adapter->portnum)))
725 return -EOPNOTSUPP;
726
727 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
728 if (wol->wolopts & WAKE_MAGIC)
729 wol_cfg |= 1UL << adapter->portnum;
730 else
731 wol_cfg &= ~(1UL << adapter->portnum);
732
733 QLCWR32(adapter, QLCNIC_WOL_CONFIG, wol_cfg);
734
735 return 0;
736}
737
738/*
739 * Set the coalescing parameters. Currently only normal is supported.
740 * If rx_coalesce_usecs == 0 or rx_max_coalesced_frames == 0 then set the
741 * firmware coalescing to default.
742 */
743static int qlcnic_set_intr_coalesce(struct net_device *netdev,
744 struct ethtool_coalesce *ethcoal)
745{
746 struct qlcnic_adapter *adapter = netdev_priv(netdev);
747
748 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
749 return -EINVAL;
750
751 /*
752 * Return Error if unsupported values or
753 * unsupported parameters are set.
754 */
755 if (ethcoal->rx_coalesce_usecs > 0xffff ||
756 ethcoal->rx_max_coalesced_frames > 0xffff ||
757 ethcoal->tx_coalesce_usecs > 0xffff ||
758 ethcoal->tx_max_coalesced_frames > 0xffff ||
759 ethcoal->rx_coalesce_usecs_irq ||
760 ethcoal->rx_max_coalesced_frames_irq ||
761 ethcoal->tx_coalesce_usecs_irq ||
762 ethcoal->tx_max_coalesced_frames_irq ||
763 ethcoal->stats_block_coalesce_usecs ||
764 ethcoal->use_adaptive_rx_coalesce ||
765 ethcoal->use_adaptive_tx_coalesce ||
766 ethcoal->pkt_rate_low ||
767 ethcoal->rx_coalesce_usecs_low ||
768 ethcoal->rx_max_coalesced_frames_low ||
769 ethcoal->tx_coalesce_usecs_low ||
770 ethcoal->tx_max_coalesced_frames_low ||
771 ethcoal->pkt_rate_high ||
772 ethcoal->rx_coalesce_usecs_high ||
773 ethcoal->rx_max_coalesced_frames_high ||
774 ethcoal->tx_coalesce_usecs_high ||
775 ethcoal->tx_max_coalesced_frames_high)
776 return -EINVAL;
777
778 if (!ethcoal->rx_coalesce_usecs ||
779 !ethcoal->rx_max_coalesced_frames) {
780 adapter->coal.flags = QLCNIC_INTR_DEFAULT;
781 adapter->coal.normal.data.rx_time_us =
782 QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
783 adapter->coal.normal.data.rx_packets =
784 QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
785 } else {
786 adapter->coal.flags = 0;
787 adapter->coal.normal.data.rx_time_us =
788 ethcoal->rx_coalesce_usecs;
789 adapter->coal.normal.data.rx_packets =
790 ethcoal->rx_max_coalesced_frames;
791 }
792 adapter->coal.normal.data.tx_time_us = ethcoal->tx_coalesce_usecs;
793 adapter->coal.normal.data.tx_packets =
794 ethcoal->tx_max_coalesced_frames;
795
796 qlcnic_config_intr_coalesce(adapter);
797
798 return 0;
799}
800
801static int qlcnic_get_intr_coalesce(struct net_device *netdev,
802 struct ethtool_coalesce *ethcoal)
803{
804 struct qlcnic_adapter *adapter = netdev_priv(netdev);
805
806 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
807 return -EINVAL;
808
809 ethcoal->rx_coalesce_usecs = adapter->coal.normal.data.rx_time_us;
810 ethcoal->tx_coalesce_usecs = adapter->coal.normal.data.tx_time_us;
811 ethcoal->rx_max_coalesced_frames =
812 adapter->coal.normal.data.rx_packets;
813 ethcoal->tx_max_coalesced_frames =
814 adapter->coal.normal.data.tx_packets;
815
816 return 0;
817}
818
819static int qlcnic_set_flags(struct net_device *netdev, u32 data)
820{
821 struct qlcnic_adapter *adapter = netdev_priv(netdev);
822 int hw_lro;
823
824 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO))
825 return -EINVAL;
826
827 ethtool_op_set_flags(netdev, data);
828
829 hw_lro = (data & ETH_FLAG_LRO) ? QLCNIC_LRO_ENABLED : 0;
830
831 if (qlcnic_config_hw_lro(adapter, hw_lro))
832 return -EIO;
833
834 if ((hw_lro == 0) && qlcnic_send_lro_cleanup(adapter))
835 return -EIO;
836
837
838 return 0;
839}
840
841const struct ethtool_ops qlcnic_ethtool_ops = {
842 .get_settings = qlcnic_get_settings,
843 .set_settings = qlcnic_set_settings,
844 .get_drvinfo = qlcnic_get_drvinfo,
845 .get_regs_len = qlcnic_get_regs_len,
846 .get_regs = qlcnic_get_regs,
847 .get_link = ethtool_op_get_link,
848 .get_eeprom_len = qlcnic_get_eeprom_len,
849 .get_eeprom = qlcnic_get_eeprom,
850 .get_ringparam = qlcnic_get_ringparam,
851 .set_ringparam = qlcnic_set_ringparam,
852 .get_pauseparam = qlcnic_get_pauseparam,
853 .set_pauseparam = qlcnic_set_pauseparam,
854 .set_tx_csum = ethtool_op_set_tx_csum,
855 .set_sg = ethtool_op_set_sg,
856 .get_tso = qlcnic_get_tso,
857 .set_tso = qlcnic_set_tso,
858 .get_wol = qlcnic_get_wol,
859 .set_wol = qlcnic_set_wol,
860 .self_test = qlcnic_diag_test,
861 .get_strings = qlcnic_get_strings,
862 .get_ethtool_stats = qlcnic_get_ethtool_stats,
863 .get_sset_count = qlcnic_get_sset_count,
864 .get_rx_csum = qlcnic_get_rx_csum,
865 .set_rx_csum = qlcnic_set_rx_csum,
866 .get_coalesce = qlcnic_get_intr_coalesce,
867 .set_coalesce = qlcnic_set_intr_coalesce,
868 .get_flags = ethtool_op_get_flags,
869 .set_flags = qlcnic_set_flags,
870};
diff --git a/drivers/net/qlcnic/qlcnic_hdr.h b/drivers/net/qlcnic/qlcnic_hdr.h
new file mode 100644
index 000000000000..0469f84360a4
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_hdr.h
@@ -0,0 +1,937 @@
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
25#ifndef __QLCNIC_HDR_H_
26#define __QLCNIC_HDR_H_
27
28#include <linux/kernel.h>
29#include <linux/types.h>
30
31/*
32 * The basic unit of access when reading/writing control registers.
33 */
34
35enum {
36 QLCNIC_HW_H0_CH_HUB_ADR = 0x05,
37 QLCNIC_HW_H1_CH_HUB_ADR = 0x0E,
38 QLCNIC_HW_H2_CH_HUB_ADR = 0x03,
39 QLCNIC_HW_H3_CH_HUB_ADR = 0x01,
40 QLCNIC_HW_H4_CH_HUB_ADR = 0x06,
41 QLCNIC_HW_H5_CH_HUB_ADR = 0x07,
42 QLCNIC_HW_H6_CH_HUB_ADR = 0x08
43};
44
45/* Hub 0 */
46enum {
47 QLCNIC_HW_MN_CRB_AGT_ADR = 0x15,
48 QLCNIC_HW_MS_CRB_AGT_ADR = 0x25
49};
50
51/* Hub 1 */
52enum {
53 QLCNIC_HW_PS_CRB_AGT_ADR = 0x73,
54 QLCNIC_HW_SS_CRB_AGT_ADR = 0x20,
55 QLCNIC_HW_RPMX3_CRB_AGT_ADR = 0x0b,
56 QLCNIC_HW_QMS_CRB_AGT_ADR = 0x00,
57 QLCNIC_HW_SQGS0_CRB_AGT_ADR = 0x01,
58 QLCNIC_HW_SQGS1_CRB_AGT_ADR = 0x02,
59 QLCNIC_HW_SQGS2_CRB_AGT_ADR = 0x03,
60 QLCNIC_HW_SQGS3_CRB_AGT_ADR = 0x04,
61 QLCNIC_HW_C2C0_CRB_AGT_ADR = 0x58,
62 QLCNIC_HW_C2C1_CRB_AGT_ADR = 0x59,
63 QLCNIC_HW_C2C2_CRB_AGT_ADR = 0x5a,
64 QLCNIC_HW_RPMX2_CRB_AGT_ADR = 0x0a,
65 QLCNIC_HW_RPMX4_CRB_AGT_ADR = 0x0c,
66 QLCNIC_HW_RPMX7_CRB_AGT_ADR = 0x0f,
67 QLCNIC_HW_RPMX9_CRB_AGT_ADR = 0x12,
68 QLCNIC_HW_SMB_CRB_AGT_ADR = 0x18
69};
70
71/* Hub 2 */
72enum {
73 QLCNIC_HW_NIU_CRB_AGT_ADR = 0x31,
74 QLCNIC_HW_I2C0_CRB_AGT_ADR = 0x19,
75 QLCNIC_HW_I2C1_CRB_AGT_ADR = 0x29,
76
77 QLCNIC_HW_SN_CRB_AGT_ADR = 0x10,
78 QLCNIC_HW_I2Q_CRB_AGT_ADR = 0x20,
79 QLCNIC_HW_LPC_CRB_AGT_ADR = 0x22,
80 QLCNIC_HW_ROMUSB_CRB_AGT_ADR = 0x21,
81 QLCNIC_HW_QM_CRB_AGT_ADR = 0x66,
82 QLCNIC_HW_SQG0_CRB_AGT_ADR = 0x60,
83 QLCNIC_HW_SQG1_CRB_AGT_ADR = 0x61,
84 QLCNIC_HW_SQG2_CRB_AGT_ADR = 0x62,
85 QLCNIC_HW_SQG3_CRB_AGT_ADR = 0x63,
86 QLCNIC_HW_RPMX1_CRB_AGT_ADR = 0x09,
87 QLCNIC_HW_RPMX5_CRB_AGT_ADR = 0x0d,
88 QLCNIC_HW_RPMX6_CRB_AGT_ADR = 0x0e,
89 QLCNIC_HW_RPMX8_CRB_AGT_ADR = 0x11
90};
91
92/* Hub 3 */
93enum {
94 QLCNIC_HW_PH_CRB_AGT_ADR = 0x1A,
95 QLCNIC_HW_SRE_CRB_AGT_ADR = 0x50,
96 QLCNIC_HW_EG_CRB_AGT_ADR = 0x51,
97 QLCNIC_HW_RPMX0_CRB_AGT_ADR = 0x08
98};
99
100/* Hub 4 */
101enum {
102 QLCNIC_HW_PEGN0_CRB_AGT_ADR = 0x40,
103 QLCNIC_HW_PEGN1_CRB_AGT_ADR,
104 QLCNIC_HW_PEGN2_CRB_AGT_ADR,
105 QLCNIC_HW_PEGN3_CRB_AGT_ADR,
106 QLCNIC_HW_PEGNI_CRB_AGT_ADR,
107 QLCNIC_HW_PEGND_CRB_AGT_ADR,
108 QLCNIC_HW_PEGNC_CRB_AGT_ADR,
109 QLCNIC_HW_PEGR0_CRB_AGT_ADR,
110 QLCNIC_HW_PEGR1_CRB_AGT_ADR,
111 QLCNIC_HW_PEGR2_CRB_AGT_ADR,
112 QLCNIC_HW_PEGR3_CRB_AGT_ADR,
113 QLCNIC_HW_PEGN4_CRB_AGT_ADR
114};
115
116/* Hub 5 */
117enum {
118 QLCNIC_HW_PEGS0_CRB_AGT_ADR = 0x40,
119 QLCNIC_HW_PEGS1_CRB_AGT_ADR,
120 QLCNIC_HW_PEGS2_CRB_AGT_ADR,
121 QLCNIC_HW_PEGS3_CRB_AGT_ADR,
122 QLCNIC_HW_PEGSI_CRB_AGT_ADR,
123 QLCNIC_HW_PEGSD_CRB_AGT_ADR,
124 QLCNIC_HW_PEGSC_CRB_AGT_ADR
125};
126
127/* Hub 6 */
128enum {
129 QLCNIC_HW_CAS0_CRB_AGT_ADR = 0x46,
130 QLCNIC_HW_CAS1_CRB_AGT_ADR = 0x47,
131 QLCNIC_HW_CAS2_CRB_AGT_ADR = 0x48,
132 QLCNIC_HW_CAS3_CRB_AGT_ADR = 0x49,
133 QLCNIC_HW_NCM_CRB_AGT_ADR = 0x16,
134 QLCNIC_HW_TMR_CRB_AGT_ADR = 0x17,
135 QLCNIC_HW_XDMA_CRB_AGT_ADR = 0x05,
136 QLCNIC_HW_OCM0_CRB_AGT_ADR = 0x06,
137 QLCNIC_HW_OCM1_CRB_AGT_ADR = 0x07
138};
139
140/* Floaters - non existent modules */
141#define QLCNIC_HW_EFC_RPMX0_CRB_AGT_ADR 0x67
142
143/* This field defines PCI/X adr [25:20] of agents on the CRB */
144enum {
145 QLCNIC_HW_PX_MAP_CRB_PH = 0,
146 QLCNIC_HW_PX_MAP_CRB_PS,
147 QLCNIC_HW_PX_MAP_CRB_MN,
148 QLCNIC_HW_PX_MAP_CRB_MS,
149 QLCNIC_HW_PX_MAP_CRB_PGR1,
150 QLCNIC_HW_PX_MAP_CRB_SRE,
151 QLCNIC_HW_PX_MAP_CRB_NIU,
152 QLCNIC_HW_PX_MAP_CRB_QMN,
153 QLCNIC_HW_PX_MAP_CRB_SQN0,
154 QLCNIC_HW_PX_MAP_CRB_SQN1,
155 QLCNIC_HW_PX_MAP_CRB_SQN2,
156 QLCNIC_HW_PX_MAP_CRB_SQN3,
157 QLCNIC_HW_PX_MAP_CRB_QMS,
158 QLCNIC_HW_PX_MAP_CRB_SQS0,
159 QLCNIC_HW_PX_MAP_CRB_SQS1,
160 QLCNIC_HW_PX_MAP_CRB_SQS2,
161 QLCNIC_HW_PX_MAP_CRB_SQS3,
162 QLCNIC_HW_PX_MAP_CRB_PGN0,
163 QLCNIC_HW_PX_MAP_CRB_PGN1,
164 QLCNIC_HW_PX_MAP_CRB_PGN2,
165 QLCNIC_HW_PX_MAP_CRB_PGN3,
166 QLCNIC_HW_PX_MAP_CRB_PGND,
167 QLCNIC_HW_PX_MAP_CRB_PGNI,
168 QLCNIC_HW_PX_MAP_CRB_PGS0,
169 QLCNIC_HW_PX_MAP_CRB_PGS1,
170 QLCNIC_HW_PX_MAP_CRB_PGS2,
171 QLCNIC_HW_PX_MAP_CRB_PGS3,
172 QLCNIC_HW_PX_MAP_CRB_PGSD,
173 QLCNIC_HW_PX_MAP_CRB_PGSI,
174 QLCNIC_HW_PX_MAP_CRB_SN,
175 QLCNIC_HW_PX_MAP_CRB_PGR2,
176 QLCNIC_HW_PX_MAP_CRB_EG,
177 QLCNIC_HW_PX_MAP_CRB_PH2,
178 QLCNIC_HW_PX_MAP_CRB_PS2,
179 QLCNIC_HW_PX_MAP_CRB_CAM,
180 QLCNIC_HW_PX_MAP_CRB_CAS0,
181 QLCNIC_HW_PX_MAP_CRB_CAS1,
182 QLCNIC_HW_PX_MAP_CRB_CAS2,
183 QLCNIC_HW_PX_MAP_CRB_C2C0,
184 QLCNIC_HW_PX_MAP_CRB_C2C1,
185 QLCNIC_HW_PX_MAP_CRB_TIMR,
186 QLCNIC_HW_PX_MAP_CRB_PGR3,
187 QLCNIC_HW_PX_MAP_CRB_RPMX1,
188 QLCNIC_HW_PX_MAP_CRB_RPMX2,
189 QLCNIC_HW_PX_MAP_CRB_RPMX3,
190 QLCNIC_HW_PX_MAP_CRB_RPMX4,
191 QLCNIC_HW_PX_MAP_CRB_RPMX5,
192 QLCNIC_HW_PX_MAP_CRB_RPMX6,
193 QLCNIC_HW_PX_MAP_CRB_RPMX7,
194 QLCNIC_HW_PX_MAP_CRB_XDMA,
195 QLCNIC_HW_PX_MAP_CRB_I2Q,
196 QLCNIC_HW_PX_MAP_CRB_ROMUSB,
197 QLCNIC_HW_PX_MAP_CRB_CAS3,
198 QLCNIC_HW_PX_MAP_CRB_RPMX0,
199 QLCNIC_HW_PX_MAP_CRB_RPMX8,
200 QLCNIC_HW_PX_MAP_CRB_RPMX9,
201 QLCNIC_HW_PX_MAP_CRB_OCM0,
202 QLCNIC_HW_PX_MAP_CRB_OCM1,
203 QLCNIC_HW_PX_MAP_CRB_SMB,
204 QLCNIC_HW_PX_MAP_CRB_I2C0,
205 QLCNIC_HW_PX_MAP_CRB_I2C1,
206 QLCNIC_HW_PX_MAP_CRB_LPC,
207 QLCNIC_HW_PX_MAP_CRB_PGNC,
208 QLCNIC_HW_PX_MAP_CRB_PGR0
209};
210
211/* This field defines CRB adr [31:20] of the agents */
212
213#define QLCNIC_HW_CRB_HUB_AGT_ADR_MN \
214 ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_MN_CRB_AGT_ADR)
215#define QLCNIC_HW_CRB_HUB_AGT_ADR_PH \
216 ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_PH_CRB_AGT_ADR)
217#define QLCNIC_HW_CRB_HUB_AGT_ADR_MS \
218 ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_MS_CRB_AGT_ADR)
219
220#define QLCNIC_HW_CRB_HUB_AGT_ADR_PS \
221 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_PS_CRB_AGT_ADR)
222#define QLCNIC_HW_CRB_HUB_AGT_ADR_SS \
223 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SS_CRB_AGT_ADR)
224#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3 \
225 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX3_CRB_AGT_ADR)
226#define QLCNIC_HW_CRB_HUB_AGT_ADR_QMS \
227 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_QMS_CRB_AGT_ADR)
228#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS0 \
229 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS0_CRB_AGT_ADR)
230#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS1 \
231 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS1_CRB_AGT_ADR)
232#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS2 \
233 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS2_CRB_AGT_ADR)
234#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS3 \
235 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS3_CRB_AGT_ADR)
236#define QLCNIC_HW_CRB_HUB_AGT_ADR_C2C0 \
237 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_C2C0_CRB_AGT_ADR)
238#define QLCNIC_HW_CRB_HUB_AGT_ADR_C2C1 \
239 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_C2C1_CRB_AGT_ADR)
240#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2 \
241 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX2_CRB_AGT_ADR)
242#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4 \
243 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX4_CRB_AGT_ADR)
244#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7 \
245 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX7_CRB_AGT_ADR)
246#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9 \
247 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX9_CRB_AGT_ADR)
248#define QLCNIC_HW_CRB_HUB_AGT_ADR_SMB \
249 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SMB_CRB_AGT_ADR)
250
251#define QLCNIC_HW_CRB_HUB_AGT_ADR_NIU \
252 ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_NIU_CRB_AGT_ADR)
253#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0 \
254 ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_I2C0_CRB_AGT_ADR)
255#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1 \
256 ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_I2C1_CRB_AGT_ADR)
257
258#define QLCNIC_HW_CRB_HUB_AGT_ADR_SRE \
259 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SRE_CRB_AGT_ADR)
260#define QLCNIC_HW_CRB_HUB_AGT_ADR_EG \
261 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_EG_CRB_AGT_ADR)
262#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0 \
263 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX0_CRB_AGT_ADR)
264#define QLCNIC_HW_CRB_HUB_AGT_ADR_QMN \
265 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_QM_CRB_AGT_ADR)
266#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0 \
267 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG0_CRB_AGT_ADR)
268#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1 \
269 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG1_CRB_AGT_ADR)
270#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2 \
271 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG2_CRB_AGT_ADR)
272#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3 \
273 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG3_CRB_AGT_ADR)
274#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1 \
275 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX1_CRB_AGT_ADR)
276#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5 \
277 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX5_CRB_AGT_ADR)
278#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6 \
279 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX6_CRB_AGT_ADR)
280#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8 \
281 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX8_CRB_AGT_ADR)
282#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS0 \
283 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS0_CRB_AGT_ADR)
284#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS1 \
285 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS1_CRB_AGT_ADR)
286#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS2 \
287 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS2_CRB_AGT_ADR)
288#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS3 \
289 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS3_CRB_AGT_ADR)
290
291#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI \
292 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGNI_CRB_AGT_ADR)
293#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGND \
294 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGND_CRB_AGT_ADR)
295#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0 \
296 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN0_CRB_AGT_ADR)
297#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1 \
298 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN1_CRB_AGT_ADR)
299#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2 \
300 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN2_CRB_AGT_ADR)
301#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3 \
302 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN3_CRB_AGT_ADR)
303#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4 \
304 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN4_CRB_AGT_ADR)
305#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC \
306 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGNC_CRB_AGT_ADR)
307#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR0 \
308 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR0_CRB_AGT_ADR)
309#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR1 \
310 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR1_CRB_AGT_ADR)
311#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR2 \
312 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR2_CRB_AGT_ADR)
313#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR3 \
314 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR3_CRB_AGT_ADR)
315
316#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI \
317 ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSI_CRB_AGT_ADR)
318#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSD \
319 ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSD_CRB_AGT_ADR)
320#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0 \
321 ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS0_CRB_AGT_ADR)
322#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1 \
323 ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS1_CRB_AGT_ADR)
324#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2 \
325 ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS2_CRB_AGT_ADR)
326#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3 \
327 ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS3_CRB_AGT_ADR)
328#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSC \
329 ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSC_CRB_AGT_ADR)
330
331#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAM \
332 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_NCM_CRB_AGT_ADR)
333#define QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR \
334 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_TMR_CRB_AGT_ADR)
335#define QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA \
336 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_XDMA_CRB_AGT_ADR)
337#define QLCNIC_HW_CRB_HUB_AGT_ADR_SN \
338 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_SN_CRB_AGT_ADR)
339#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q \
340 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_I2Q_CRB_AGT_ADR)
341#define QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB \
342 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_ROMUSB_CRB_AGT_ADR)
343#define QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0 \
344 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_OCM0_CRB_AGT_ADR)
345#define QLCNIC_HW_CRB_HUB_AGT_ADR_OCM1 \
346 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_OCM1_CRB_AGT_ADR)
347#define QLCNIC_HW_CRB_HUB_AGT_ADR_LPC \
348 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_LPC_CRB_AGT_ADR)
349
350#define QLCNIC_SRE_MISC (QLCNIC_CRB_SRE + 0x0002c)
351
352#define QLCNIC_I2Q_CLR_PCI_HI (QLCNIC_CRB_I2Q + 0x00034)
353
354#define ROMUSB_GLB (QLCNIC_CRB_ROMUSB + 0x00000)
355#define ROMUSB_ROM (QLCNIC_CRB_ROMUSB + 0x10000)
356
357#define QLCNIC_ROMUSB_GLB_STATUS (ROMUSB_GLB + 0x0004)
358#define QLCNIC_ROMUSB_GLB_SW_RESET (ROMUSB_GLB + 0x0008)
359#define QLCNIC_ROMUSB_GLB_PAD_GPIO_I (ROMUSB_GLB + 0x000c)
360#define QLCNIC_ROMUSB_GLB_CAS_RST (ROMUSB_GLB + 0x0038)
361#define QLCNIC_ROMUSB_GLB_TEST_MUX_SEL (ROMUSB_GLB + 0x0044)
362#define QLCNIC_ROMUSB_GLB_PEGTUNE_DONE (ROMUSB_GLB + 0x005c)
363#define QLCNIC_ROMUSB_GLB_CHIP_CLK_CTRL (ROMUSB_GLB + 0x00A8)
364
365#define QLCNIC_ROMUSB_GPIO(n) (ROMUSB_GLB + 0x60 + (4 * (n)))
366
367#define QLCNIC_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004)
368#define QLCNIC_ROMUSB_ROM_ADDRESS (ROMUSB_ROM + 0x0008)
369#define QLCNIC_ROMUSB_ROM_WDATA (ROMUSB_ROM + 0x000c)
370#define QLCNIC_ROMUSB_ROM_ABYTE_CNT (ROMUSB_ROM + 0x0010)
371#define QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014)
372#define QLCNIC_ROMUSB_ROM_RDATA (ROMUSB_ROM + 0x0018)
373
374/* Lock IDs for ROM lock */
375#define ROM_LOCK_DRIVER 0x0d417340
376
377/******************************************************************************
378*
379* Definitions specific to M25P flash
380*
381*******************************************************************************
382*/
383
384/* all are 1MB windows */
385
386#define QLCNIC_PCI_CRB_WINDOWSIZE 0x00100000
387#define QLCNIC_PCI_CRB_WINDOW(A) \
388 (QLCNIC_PCI_CRBSPACE + (A)*QLCNIC_PCI_CRB_WINDOWSIZE)
389
390#define QLCNIC_CRB_NIU QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_NIU)
391#define QLCNIC_CRB_SRE QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SRE)
392#define QLCNIC_CRB_ROMUSB \
393 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_ROMUSB)
394#define QLCNIC_CRB_I2Q QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_I2Q)
395#define QLCNIC_CRB_I2C0 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_I2C0)
396#define QLCNIC_CRB_SMB QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SMB)
397#define QLCNIC_CRB_MAX QLCNIC_PCI_CRB_WINDOW(64)
398
399#define QLCNIC_CRB_PCIX_HOST QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PH)
400#define QLCNIC_CRB_PCIX_HOST2 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PH2)
401#define QLCNIC_CRB_PEG_NET_0 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN0)
402#define QLCNIC_CRB_PEG_NET_1 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN1)
403#define QLCNIC_CRB_PEG_NET_2 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN2)
404#define QLCNIC_CRB_PEG_NET_3 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN3)
405#define QLCNIC_CRB_PEG_NET_4 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SQS2)
406#define QLCNIC_CRB_PEG_NET_D QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGND)
407#define QLCNIC_CRB_PEG_NET_I QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGNI)
408#define QLCNIC_CRB_DDR_NET QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_MN)
409#define QLCNIC_CRB_QDR_NET QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SN)
410
411#define QLCNIC_CRB_PCIX_MD QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PS)
412#define QLCNIC_CRB_PCIE QLCNIC_CRB_PCIX_MD
413
414#define ISR_INT_VECTOR (QLCNIC_PCIX_PS_REG(PCIX_INT_VECTOR))
415#define ISR_INT_MASK (QLCNIC_PCIX_PS_REG(PCIX_INT_MASK))
416#define ISR_INT_MASK_SLOW (QLCNIC_PCIX_PS_REG(PCIX_INT_MASK))
417#define ISR_INT_TARGET_STATUS (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS))
418#define ISR_INT_TARGET_MASK (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK))
419#define ISR_INT_TARGET_STATUS_F1 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F1))
420#define ISR_INT_TARGET_MASK_F1 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F1))
421#define ISR_INT_TARGET_STATUS_F2 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F2))
422#define ISR_INT_TARGET_MASK_F2 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F2))
423#define ISR_INT_TARGET_STATUS_F3 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F3))
424#define ISR_INT_TARGET_MASK_F3 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F3))
425#define ISR_INT_TARGET_STATUS_F4 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F4))
426#define ISR_INT_TARGET_MASK_F4 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F4))
427#define ISR_INT_TARGET_STATUS_F5 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F5))
428#define ISR_INT_TARGET_MASK_F5 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F5))
429#define ISR_INT_TARGET_STATUS_F6 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F6))
430#define ISR_INT_TARGET_MASK_F6 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F6))
431#define ISR_INT_TARGET_STATUS_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F7))
432#define ISR_INT_TARGET_MASK_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F7))
433
434#define QLCNIC_PCI_MN_2M (0)
435#define QLCNIC_PCI_MS_2M (0x80000)
436#define QLCNIC_PCI_OCM0_2M (0x000c0000UL)
437#define QLCNIC_PCI_CRBSPACE (0x06000000UL)
438#define QLCNIC_PCI_2MB_SIZE (0x00200000UL)
439#define QLCNIC_PCI_CAMQM_2M_BASE (0x000ff800UL)
440#define QLCNIC_PCI_CAMQM_2M_END (0x04800800UL)
441
442#define QLCNIC_CRB_CAM QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_CAM)
443
444#define QLCNIC_ADDR_DDR_NET (0x0000000000000000ULL)
445#define QLCNIC_ADDR_DDR_NET_MAX (0x000000000fffffffULL)
446#define QLCNIC_ADDR_OCM0 (0x0000000200000000ULL)
447#define QLCNIC_ADDR_OCM0_MAX (0x00000002000fffffULL)
448#define QLCNIC_ADDR_OCM1 (0x0000000200400000ULL)
449#define QLCNIC_ADDR_OCM1_MAX (0x00000002004fffffULL)
450#define QLCNIC_ADDR_QDR_NET (0x0000000300000000ULL)
451#define QLCNIC_ADDR_QDR_NET_MAX_P3 (0x0000000303ffffffULL)
452
453/*
454 * Register offsets for MN
455 */
456#define QLCNIC_MIU_CONTROL (0x000)
457#define QLCNIC_MIU_MN_CONTROL (QLCNIC_CRB_DDR_NET+QLCNIC_MIU_CONTROL)
458
459/* 200ms delay in each loop */
460#define QLCNIC_NIU_PHY_WAITLEN 200000
461/* 10 seconds before we give up */
462#define QLCNIC_NIU_PHY_WAITMAX 50
463#define QLCNIC_NIU_MAX_GBE_PORTS 4
464#define QLCNIC_NIU_MAX_XG_PORTS 2
465
466#define QLCNIC_NIU_MODE (QLCNIC_CRB_NIU + 0x00000)
467#define QLCNIC_NIU_GB_PAUSE_CTL (QLCNIC_CRB_NIU + 0x0030c)
468#define QLCNIC_NIU_XG_PAUSE_CTL (QLCNIC_CRB_NIU + 0x00098)
469
470#define QLCNIC_NIU_GB_MAC_CONFIG_0(I) \
471 (QLCNIC_CRB_NIU + 0x30000 + (I)*0x10000)
472#define QLCNIC_NIU_GB_MAC_CONFIG_1(I) \
473 (QLCNIC_CRB_NIU + 0x30004 + (I)*0x10000)
474
475
476#define TEST_AGT_CTRL (0x00)
477
478#define TA_CTL_START 1
479#define TA_CTL_ENABLE 2
480#define TA_CTL_WRITE 4
481#define TA_CTL_BUSY 8
482
483/*
484 * Register offsets for MN
485 */
486#define MIU_TEST_AGT_BASE (0x90)
487
488#define MIU_TEST_AGT_ADDR_LO (0x04)
489#define MIU_TEST_AGT_ADDR_HI (0x08)
490#define MIU_TEST_AGT_WRDATA_LO (0x10)
491#define MIU_TEST_AGT_WRDATA_HI (0x14)
492#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x20)
493#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x24)
494#define MIU_TEST_AGT_WRDATA(i) (0x10+(0x10*((i)>>1))+(4*((i)&1)))
495#define MIU_TEST_AGT_RDDATA_LO (0x18)
496#define MIU_TEST_AGT_RDDATA_HI (0x1c)
497#define MIU_TEST_AGT_RDDATA_UPPER_LO (0x28)
498#define MIU_TEST_AGT_RDDATA_UPPER_HI (0x2c)
499#define MIU_TEST_AGT_RDDATA(i) (0x18+(0x10*((i)>>1))+(4*((i)&1)))
500
501#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8
502#define MIU_TEST_AGT_UPPER_ADDR(off) (0)
503
504/*
505 * Register offsets for MS
506 */
507#define SIU_TEST_AGT_BASE (0x60)
508
509#define SIU_TEST_AGT_ADDR_LO (0x04)
510#define SIU_TEST_AGT_ADDR_HI (0x18)
511#define SIU_TEST_AGT_WRDATA_LO (0x08)
512#define SIU_TEST_AGT_WRDATA_HI (0x0c)
513#define SIU_TEST_AGT_WRDATA(i) (0x08+(4*(i)))
514#define SIU_TEST_AGT_RDDATA_LO (0x10)
515#define SIU_TEST_AGT_RDDATA_HI (0x14)
516#define SIU_TEST_AGT_RDDATA(i) (0x10+(4*(i)))
517
518#define SIU_TEST_AGT_ADDR_MASK 0x3ffff8
519#define SIU_TEST_AGT_UPPER_ADDR(off) ((off)>>22)
520
521/* XG Link status */
522#define XG_LINK_UP 0x10
523#define XG_LINK_DOWN 0x20
524
525#define XG_LINK_UP_P3 0x01
526#define XG_LINK_DOWN_P3 0x02
527#define XG_LINK_STATE_P3_MASK 0xf
528#define XG_LINK_STATE_P3(pcifn, val) \
529 (((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3_MASK)
530
531#define P3_LINK_SPEED_MHZ 100
532#define P3_LINK_SPEED_MASK 0xff
533#define P3_LINK_SPEED_REG(pcifn) \
534 (CRB_PF_LINK_SPEED_1 + (((pcifn) / 4) * 4))
535#define P3_LINK_SPEED_VAL(pcifn, reg) \
536 (((reg) >> (8 * ((pcifn) & 0x3))) & P3_LINK_SPEED_MASK)
537
538#define QLCNIC_CAM_RAM_BASE (QLCNIC_CRB_CAM + 0x02000)
539#define QLCNIC_CAM_RAM(reg) (QLCNIC_CAM_RAM_BASE + (reg))
540#define QLCNIC_FW_VERSION_MAJOR (QLCNIC_CAM_RAM(0x150))
541#define QLCNIC_FW_VERSION_MINOR (QLCNIC_CAM_RAM(0x154))
542#define QLCNIC_FW_VERSION_SUB (QLCNIC_CAM_RAM(0x158))
543#define QLCNIC_ROM_LOCK_ID (QLCNIC_CAM_RAM(0x100))
544#define QLCNIC_PHY_LOCK_ID (QLCNIC_CAM_RAM(0x120))
545#define QLCNIC_CRB_WIN_LOCK_ID (QLCNIC_CAM_RAM(0x124))
546
547#define NIC_CRB_BASE (QLCNIC_CAM_RAM(0x200))
548#define NIC_CRB_BASE_2 (QLCNIC_CAM_RAM(0x700))
549#define QLCNIC_REG(X) (NIC_CRB_BASE+(X))
550#define QLCNIC_REG_2(X) (NIC_CRB_BASE_2+(X))
551
552#define QLCNIC_CDRP_CRB_OFFSET (QLCNIC_REG(0x18))
553#define QLCNIC_ARG1_CRB_OFFSET (QLCNIC_REG(0x1c))
554#define QLCNIC_ARG2_CRB_OFFSET (QLCNIC_REG(0x20))
555#define QLCNIC_ARG3_CRB_OFFSET (QLCNIC_REG(0x24))
556#define QLCNIC_SIGN_CRB_OFFSET (QLCNIC_REG(0x28))
557
558#define CRB_CMDPEG_STATE (QLCNIC_REG(0x50))
559#define CRB_RCVPEG_STATE (QLCNIC_REG(0x13c))
560
561#define CRB_XG_STATE_P3 (QLCNIC_REG(0x98))
562#define CRB_PF_LINK_SPEED_1 (QLCNIC_REG(0xe8))
563#define CRB_PF_LINK_SPEED_2 (QLCNIC_REG(0xec))
564
565#define CRB_MPORT_MODE (QLCNIC_REG(0xc4))
566#define CRB_DMA_SHIFT (QLCNIC_REG(0xcc))
567
568#define CRB_TEMP_STATE (QLCNIC_REG(0x1b4))
569
570#define CRB_V2P_0 (QLCNIC_REG(0x290))
571#define CRB_V2P(port) (CRB_V2P_0+((port)*4))
572#define CRB_DRIVER_VERSION (QLCNIC_REG(0x2a0))
573
574#define CRB_SW_INT_MASK_0 (QLCNIC_REG(0x1d8))
575#define CRB_SW_INT_MASK_1 (QLCNIC_REG(0x1e0))
576#define CRB_SW_INT_MASK_2 (QLCNIC_REG(0x1e4))
577#define CRB_SW_INT_MASK_3 (QLCNIC_REG(0x1e8))
578
579#define CRB_FW_CAPABILITIES_1 (QLCNIC_CAM_RAM(0x128))
580#define CRB_MAC_BLOCK_START (QLCNIC_CAM_RAM(0x1c0))
581
582/*
583 * capabilities register, can be used to selectively enable/disable features
584 * for backward compability
585 */
586#define CRB_NIC_CAPABILITIES_HOST QLCNIC_REG(0x1a8)
587#define CRB_NIC_CAPABILITIES_FW QLCNIC_REG(0x1dc)
588#define CRB_NIC_MSI_MODE_HOST QLCNIC_REG(0x270)
589#define CRB_NIC_MSI_MODE_FW QLCNIC_REG(0x274)
590
591#define INTR_SCHEME_PERPORT 0x1
592#define MSI_MODE_MULTIFUNC 0x1
593
594/* used for ethtool tests */
595#define CRB_SCRATCHPAD_TEST QLCNIC_REG(0x280)
596
597/*
598 * CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address
599 * which can be read by the Phantom host to get producer/consumer indexes from
600 * Phantom/Casper. If it is not HOST_SHARED_MEMORY, then the following
601 * registers will be used for the addresses of the ring's shared memory
602 * on the Phantom.
603 */
604
605#define qlcnic_get_temp_val(x) ((x) >> 16)
606#define qlcnic_get_temp_state(x) ((x) & 0xffff)
607#define qlcnic_encode_temp(val, state) (((val) << 16) | (state))
608
609/*
610 * Temperature control.
611 */
612enum {
613 QLCNIC_TEMP_NORMAL = 0x1, /* Normal operating range */
614 QLCNIC_TEMP_WARN, /* Sound alert, temperature getting high */
615 QLCNIC_TEMP_PANIC /* Fatal error, hardware has shut down. */
616};
617
618/* Lock IDs for PHY lock */
619#define PHY_LOCK_DRIVER 0x44524956
620
621/* Used for PS PCI Memory access */
622#define PCIX_PS_OP_ADDR_LO (0x10000)
623/* via CRB (PS side only) */
624#define PCIX_PS_OP_ADDR_HI (0x10004)
625
626#define PCIX_INT_VECTOR (0x10100)
627#define PCIX_INT_MASK (0x10104)
628
629#define PCIX_OCM_WINDOW (0x10800)
630#define PCIX_OCM_WINDOW_REG(func) (PCIX_OCM_WINDOW + 0x20 * (func))
631
632#define PCIX_TARGET_STATUS (0x10118)
633#define PCIX_TARGET_STATUS_F1 (0x10160)
634#define PCIX_TARGET_STATUS_F2 (0x10164)
635#define PCIX_TARGET_STATUS_F3 (0x10168)
636#define PCIX_TARGET_STATUS_F4 (0x10360)
637#define PCIX_TARGET_STATUS_F5 (0x10364)
638#define PCIX_TARGET_STATUS_F6 (0x10368)
639#define PCIX_TARGET_STATUS_F7 (0x1036c)
640
641#define PCIX_TARGET_MASK (0x10128)
642#define PCIX_TARGET_MASK_F1 (0x10170)
643#define PCIX_TARGET_MASK_F2 (0x10174)
644#define PCIX_TARGET_MASK_F3 (0x10178)
645#define PCIX_TARGET_MASK_F4 (0x10370)
646#define PCIX_TARGET_MASK_F5 (0x10374)
647#define PCIX_TARGET_MASK_F6 (0x10378)
648#define PCIX_TARGET_MASK_F7 (0x1037c)
649
650#define PCIX_MSI_F(i) (0x13000+((i)*4))
651
652#define QLCNIC_PCIX_PH_REG(reg) (QLCNIC_CRB_PCIE + (reg))
653#define QLCNIC_PCIX_PS_REG(reg) (QLCNIC_CRB_PCIX_MD + (reg))
654#define QLCNIC_PCIE_REG(reg) (QLCNIC_CRB_PCIE + (reg))
655
656#define PCIE_SEM0_LOCK (0x1c000)
657#define PCIE_SEM0_UNLOCK (0x1c004)
658#define PCIE_SEM_LOCK(N) (PCIE_SEM0_LOCK + 8*(N))
659#define PCIE_SEM_UNLOCK(N) (PCIE_SEM0_UNLOCK + 8*(N))
660
661#define PCIE_SETUP_FUNCTION (0x12040)
662#define PCIE_SETUP_FUNCTION2 (0x12048)
663#define PCIE_MISCCFG_RC (0x1206c)
664#define PCIE_TGT_SPLIT_CHICKEN (0x12080)
665#define PCIE_CHICKEN3 (0x120c8)
666
667#define ISR_INT_STATE_REG (QLCNIC_PCIX_PS_REG(PCIE_MISCCFG_RC))
668#define PCIE_MAX_MASTER_SPLIT (0x14048)
669
670#define QLCNIC_PORT_MODE_NONE 0
671#define QLCNIC_PORT_MODE_XG 1
672#define QLCNIC_PORT_MODE_GB 2
673#define QLCNIC_PORT_MODE_802_3_AP 3
674#define QLCNIC_PORT_MODE_AUTO_NEG 4
675#define QLCNIC_PORT_MODE_AUTO_NEG_1G 5
676#define QLCNIC_PORT_MODE_AUTO_NEG_XG 6
677#define QLCNIC_PORT_MODE_ADDR (QLCNIC_CAM_RAM(0x24))
678#define QLCNIC_WOL_PORT_MODE (QLCNIC_CAM_RAM(0x198))
679
680#define QLCNIC_WOL_CONFIG_NV (QLCNIC_CAM_RAM(0x184))
681#define QLCNIC_WOL_CONFIG (QLCNIC_CAM_RAM(0x188))
682
683#define QLCNIC_PEG_TUNE_MN_PRESENT 0x1
684#define QLCNIC_PEG_TUNE_CAPABILITY (QLCNIC_CAM_RAM(0x02c))
685
686#define QLCNIC_DMA_WATCHDOG_CTRL (QLCNIC_CAM_RAM(0x14))
687#define QLCNIC_PEG_ALIVE_COUNTER (QLCNIC_CAM_RAM(0xb0))
688#define QLCNIC_PEG_HALT_STATUS1 (QLCNIC_CAM_RAM(0xa8))
689#define QLCNIC_PEG_HALT_STATUS2 (QLCNIC_CAM_RAM(0xac))
690#define QLCNIC_CRB_DEV_REF_COUNT (QLCNIC_CAM_RAM(0x138))
691#define QLCNIC_CRB_DEV_STATE (QLCNIC_CAM_RAM(0x140))
692
693#define QLCNIC_CRB_DRV_STATE (QLCNIC_CAM_RAM(0x144))
694#define QLCNIC_CRB_DRV_SCRATCH (QLCNIC_CAM_RAM(0x148))
695#define QLCNIC_CRB_DEV_PARTITION_INFO (QLCNIC_CAM_RAM(0x14c))
696#define QLCNIC_CRB_DRV_IDC_VER (QLCNIC_CAM_RAM(0x14c))
697
698 /* Device State */
699#define QLCNIC_DEV_COLD 1
700#define QLCNIC_DEV_INITALIZING 2
701#define QLCNIC_DEV_READY 3
702#define QLCNIC_DEV_NEED_RESET 4
703#define QLCNIC_DEV_NEED_QUISCENT 5
704#define QLCNIC_DEV_FAILED 6
705
706#define QLCNIC_RCODE_DRIVER_INFO 0x20000000
707#define QLCNIC_RCODE_DRIVER_CAN_RELOAD 0x40000000
708#define QLCNIC_RCODE_FATAL_ERROR 0x80000000
709#define QLCNIC_FWERROR_PEGNUM(code) ((code) & 0xff)
710#define QLCNIC_FWERROR_CODE(code) ((code >> 8) & 0xfffff)
711
712#define FW_POLL_DELAY (2 * HZ)
713#define FW_FAIL_THRESH 3
714#define FW_POLL_THRESH 10
715
716#define ISR_MSI_INT_TRIGGER(FUNC) (QLCNIC_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
717#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
718
719/*
720 * PCI Interrupt Vector Values.
721 */
722#define PCIX_INT_VECTOR_BIT_F0 0x0080
723#define PCIX_INT_VECTOR_BIT_F1 0x0100
724#define PCIX_INT_VECTOR_BIT_F2 0x0200
725#define PCIX_INT_VECTOR_BIT_F3 0x0400
726#define PCIX_INT_VECTOR_BIT_F4 0x0800
727#define PCIX_INT_VECTOR_BIT_F5 0x1000
728#define PCIX_INT_VECTOR_BIT_F6 0x2000
729#define PCIX_INT_VECTOR_BIT_F7 0x4000
730
731struct qlcnic_legacy_intr_set {
732 u32 int_vec_bit;
733 u32 tgt_status_reg;
734 u32 tgt_mask_reg;
735 u32 pci_int_reg;
736};
737
738#define QLCNIC_LEGACY_INTR_CONFIG \
739{ \
740 { \
741 .int_vec_bit = PCIX_INT_VECTOR_BIT_F0, \
742 .tgt_status_reg = ISR_INT_TARGET_STATUS, \
743 .tgt_mask_reg = ISR_INT_TARGET_MASK, \
744 .pci_int_reg = ISR_MSI_INT_TRIGGER(0) }, \
745 \
746 { \
747 .int_vec_bit = PCIX_INT_VECTOR_BIT_F1, \
748 .tgt_status_reg = ISR_INT_TARGET_STATUS_F1, \
749 .tgt_mask_reg = ISR_INT_TARGET_MASK_F1, \
750 .pci_int_reg = ISR_MSI_INT_TRIGGER(1) }, \
751 \
752 { \
753 .int_vec_bit = PCIX_INT_VECTOR_BIT_F2, \
754 .tgt_status_reg = ISR_INT_TARGET_STATUS_F2, \
755 .tgt_mask_reg = ISR_INT_TARGET_MASK_F2, \
756 .pci_int_reg = ISR_MSI_INT_TRIGGER(2) }, \
757 \
758 { \
759 .int_vec_bit = PCIX_INT_VECTOR_BIT_F3, \
760 .tgt_status_reg = ISR_INT_TARGET_STATUS_F3, \
761 .tgt_mask_reg = ISR_INT_TARGET_MASK_F3, \
762 .pci_int_reg = ISR_MSI_INT_TRIGGER(3) }, \
763 \
764 { \
765 .int_vec_bit = PCIX_INT_VECTOR_BIT_F4, \
766 .tgt_status_reg = ISR_INT_TARGET_STATUS_F4, \
767 .tgt_mask_reg = ISR_INT_TARGET_MASK_F4, \
768 .pci_int_reg = ISR_MSI_INT_TRIGGER(4) }, \
769 \
770 { \
771 .int_vec_bit = PCIX_INT_VECTOR_BIT_F5, \
772 .tgt_status_reg = ISR_INT_TARGET_STATUS_F5, \
773 .tgt_mask_reg = ISR_INT_TARGET_MASK_F5, \
774 .pci_int_reg = ISR_MSI_INT_TRIGGER(5) }, \
775 \
776 { \
777 .int_vec_bit = PCIX_INT_VECTOR_BIT_F6, \
778 .tgt_status_reg = ISR_INT_TARGET_STATUS_F6, \
779 .tgt_mask_reg = ISR_INT_TARGET_MASK_F6, \
780 .pci_int_reg = ISR_MSI_INT_TRIGGER(6) }, \
781 \
782 { \
783 .int_vec_bit = PCIX_INT_VECTOR_BIT_F7, \
784 .tgt_status_reg = ISR_INT_TARGET_STATUS_F7, \
785 .tgt_mask_reg = ISR_INT_TARGET_MASK_F7, \
786 .pci_int_reg = ISR_MSI_INT_TRIGGER(7) }, \
787}
788
789/* NIU REGS */
790
791#define _qlcnic_crb_get_bit(var, bit) ((var >> bit) & 0x1)
792
793/*
794 * NIU GB MAC Config Register 0 (applies to GB0, GB1, GB2, GB3)
795 *
796 * Bit 0 : enable_tx => 1:enable frame xmit, 0:disable
797 * Bit 1 : tx_synced => R/O: xmit enable synched to xmit stream
798 * Bit 2 : enable_rx => 1:enable frame recv, 0:disable
799 * Bit 3 : rx_synced => R/O: recv enable synched to recv stream
800 * Bit 4 : tx_flowctl => 1:enable pause frame generation, 0:disable
801 * Bit 5 : rx_flowctl => 1:act on recv'd pause frames, 0:ignore
802 * Bit 8 : loopback => 1:loop MAC xmits to MAC recvs, 0:normal
803 * Bit 16: tx_reset_pb => 1:reset frame xmit protocol blk, 0:no-op
804 * Bit 17: rx_reset_pb => 1:reset frame recv protocol blk, 0:no-op
805 * Bit 18: tx_reset_mac => 1:reset data/ctl multiplexer blk, 0:no-op
806 * Bit 19: rx_reset_mac => 1:reset ctl frames & timers blk, 0:no-op
807 * Bit 31: soft_reset => 1:reset the MAC and the SERDES, 0:no-op
808 */
809#define qlcnic_gb_rx_flowctl(config_word) \
810 ((config_word) |= 1 << 5)
811#define qlcnic_gb_get_rx_flowctl(config_word) \
812 _qlcnic_crb_get_bit((config_word), 5)
813#define qlcnic_gb_unset_rx_flowctl(config_word) \
814 ((config_word) &= ~(1 << 5))
815
816/*
817 * NIU GB Pause Ctl Register
818 */
819
820#define qlcnic_gb_set_gb0_mask(config_word) \
821 ((config_word) |= 1 << 0)
822#define qlcnic_gb_set_gb1_mask(config_word) \
823 ((config_word) |= 1 << 2)
824#define qlcnic_gb_set_gb2_mask(config_word) \
825 ((config_word) |= 1 << 4)
826#define qlcnic_gb_set_gb3_mask(config_word) \
827 ((config_word) |= 1 << 6)
828
829#define qlcnic_gb_get_gb0_mask(config_word) \
830 _qlcnic_crb_get_bit((config_word), 0)
831#define qlcnic_gb_get_gb1_mask(config_word) \
832 _qlcnic_crb_get_bit((config_word), 2)
833#define qlcnic_gb_get_gb2_mask(config_word) \
834 _qlcnic_crb_get_bit((config_word), 4)
835#define qlcnic_gb_get_gb3_mask(config_word) \
836 _qlcnic_crb_get_bit((config_word), 6)
837
838#define qlcnic_gb_unset_gb0_mask(config_word) \
839 ((config_word) &= ~(1 << 0))
840#define qlcnic_gb_unset_gb1_mask(config_word) \
841 ((config_word) &= ~(1 << 2))
842#define qlcnic_gb_unset_gb2_mask(config_word) \
843 ((config_word) &= ~(1 << 4))
844#define qlcnic_gb_unset_gb3_mask(config_word) \
845 ((config_word) &= ~(1 << 6))
846
847/*
848 * NIU XG Pause Ctl Register
849 *
850 * Bit 0 : xg0_mask => 1:disable tx pause frames
851 * Bit 1 : xg0_request => 1:request single pause frame
852 * Bit 2 : xg0_on_off => 1:request is pause on, 0:off
853 * Bit 3 : xg1_mask => 1:disable tx pause frames
854 * Bit 4 : xg1_request => 1:request single pause frame
855 * Bit 5 : xg1_on_off => 1:request is pause on, 0:off
856 */
857
858#define qlcnic_xg_set_xg0_mask(config_word) \
859 ((config_word) |= 1 << 0)
860#define qlcnic_xg_set_xg1_mask(config_word) \
861 ((config_word) |= 1 << 3)
862
863#define qlcnic_xg_get_xg0_mask(config_word) \
864 _qlcnic_crb_get_bit((config_word), 0)
865#define qlcnic_xg_get_xg1_mask(config_word) \
866 _qlcnic_crb_get_bit((config_word), 3)
867
868#define qlcnic_xg_unset_xg0_mask(config_word) \
869 ((config_word) &= ~(1 << 0))
870#define qlcnic_xg_unset_xg1_mask(config_word) \
871 ((config_word) &= ~(1 << 3))
872
873/*
874 * NIU XG Pause Ctl Register
875 *
876 * Bit 0 : xg0_mask => 1:disable tx pause frames
877 * Bit 1 : xg0_request => 1:request single pause frame
878 * Bit 2 : xg0_on_off => 1:request is pause on, 0:off
879 * Bit 3 : xg1_mask => 1:disable tx pause frames
880 * Bit 4 : xg1_request => 1:request single pause frame
881 * Bit 5 : xg1_on_off => 1:request is pause on, 0:off
882 */
883
884/*
885 * PHY-Specific MII control/status registers.
886 */
887#define QLCNIC_NIU_GB_MII_MGMT_ADDR_AUTONEG 4
888#define QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_STATUS 17
889
890/*
891 * PHY-Specific Status Register (reg 17).
892 *
893 * Bit 0 : jabber => 1:jabber detected, 0:not
894 * Bit 1 : polarity => 1:polarity reversed, 0:normal
895 * Bit 2 : recvpause => 1:receive pause enabled, 0:disabled
896 * Bit 3 : xmitpause => 1:transmit pause enabled, 0:disabled
897 * Bit 4 : energydetect => 1:sleep, 0:active
898 * Bit 5 : downshift => 1:downshift, 0:no downshift
899 * Bit 6 : crossover => 1:MDIX (crossover), 0:MDI (no crossover)
900 * Bits 7-9 : cablelen => not valid in 10Mb/s mode
901 * 0:<50m, 1:50-80m, 2:80-110m, 3:110-140m, 4:>140m
902 * Bit 10 : link => 1:link up, 0:link down
903 * Bit 11 : resolved => 1:speed and duplex resolved, 0:not yet
904 * Bit 12 : pagercvd => 1:page received, 0:page not received
905 * Bit 13 : duplex => 1:full duplex, 0:half duplex
906 * Bits 14-15 : speed => 0:10Mb/s, 1:100Mb/s, 2:1000Mb/s, 3:rsvd
907 */
908
909#define qlcnic_get_phy_speed(config_word) (((config_word) >> 14) & 0x03)
910
911#define qlcnic_set_phy_speed(config_word, val) \
912 ((config_word) |= ((val & 0x03) << 14))
913#define qlcnic_set_phy_duplex(config_word) \
914 ((config_word) |= 1 << 13)
915#define qlcnic_clear_phy_duplex(config_word) \
916 ((config_word) &= ~(1 << 13))
917
918#define qlcnic_get_phy_link(config_word) \
919 _qlcnic_crb_get_bit(config_word, 10)
920#define qlcnic_get_phy_duplex(config_word) \
921 _qlcnic_crb_get_bit(config_word, 13)
922
923#define QLCNIC_NIU_NON_PROMISC_MODE 0
924#define QLCNIC_NIU_PROMISC_MODE 1
925#define QLCNIC_NIU_ALLMULTI_MODE 2
926
927struct crb_128M_2M_sub_block_map {
928 unsigned valid;
929 unsigned start_128M;
930 unsigned end_128M;
931 unsigned start_2M;
932};
933
934struct crb_128M_2M_block_map{
935 struct crb_128M_2M_sub_block_map sub_block[16];
936};
937#endif /* __QLCNIC_HDR_H_ */
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
new file mode 100644
index 000000000000..91234e7b39e4
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -0,0 +1,1201 @@
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
25#include "qlcnic.h"
26
27#include <net/ip.h>
28
29#define MASK(n) ((1ULL<<(n))-1)
30#define OCM_WIN_P3P(addr) (addr & 0xffc0000)
31
32#define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
33
34#define CRB_BLK(off) ((off >> 20) & 0x3f)
35#define CRB_SUBBLK(off) ((off >> 16) & 0xf)
36#define CRB_WINDOW_2M (0x130060)
37#define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000))
38#define CRB_INDIRECT_2M (0x1e0000UL)
39
40
41#ifndef readq
42static inline u64 readq(void __iomem *addr)
43{
44 return readl(addr) | (((u64) readl(addr + 4)) << 32LL);
45}
46#endif
47
48#ifndef writeq
49static inline void writeq(u64 val, void __iomem *addr)
50{
51 writel(((u32) (val)), (addr));
52 writel(((u32) (val >> 32)), (addr + 4));
53}
54#endif
55
56#define ADDR_IN_RANGE(addr, low, high) \
57 (((addr) < (high)) && ((addr) >= (low)))
58
59#define PCI_OFFSET_FIRST_RANGE(adapter, off) \
60 ((adapter)->ahw.pci_base0 + (off))
61
62static void __iomem *pci_base_offset(struct qlcnic_adapter *adapter,
63 unsigned long off)
64{
65 if (ADDR_IN_RANGE(off, FIRST_PAGE_GROUP_START, FIRST_PAGE_GROUP_END))
66 return PCI_OFFSET_FIRST_RANGE(adapter, off);
67
68 return NULL;
69}
70
71static const struct crb_128M_2M_block_map
72crb_128M_2M_map[64] __cacheline_aligned_in_smp = {
73 {{{0, 0, 0, 0} } }, /* 0: PCI */
74 {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */
75 {1, 0x0110000, 0x0120000, 0x130000},
76 {1, 0x0120000, 0x0122000, 0x124000},
77 {1, 0x0130000, 0x0132000, 0x126000},
78 {1, 0x0140000, 0x0142000, 0x128000},
79 {1, 0x0150000, 0x0152000, 0x12a000},
80 {1, 0x0160000, 0x0170000, 0x110000},
81 {1, 0x0170000, 0x0172000, 0x12e000},
82 {0, 0x0000000, 0x0000000, 0x000000},
83 {0, 0x0000000, 0x0000000, 0x000000},
84 {0, 0x0000000, 0x0000000, 0x000000},
85 {0, 0x0000000, 0x0000000, 0x000000},
86 {0, 0x0000000, 0x0000000, 0x000000},
87 {0, 0x0000000, 0x0000000, 0x000000},
88 {1, 0x01e0000, 0x01e0800, 0x122000},
89 {0, 0x0000000, 0x0000000, 0x000000} } },
90 {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */
91 {{{0, 0, 0, 0} } }, /* 3: */
92 {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */
93 {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */
94 {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */
95 {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */
96 {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */
97 {0, 0x0000000, 0x0000000, 0x000000},
98 {0, 0x0000000, 0x0000000, 0x000000},
99 {0, 0x0000000, 0x0000000, 0x000000},
100 {0, 0x0000000, 0x0000000, 0x000000},
101 {0, 0x0000000, 0x0000000, 0x000000},
102 {0, 0x0000000, 0x0000000, 0x000000},
103 {0, 0x0000000, 0x0000000, 0x000000},
104 {0, 0x0000000, 0x0000000, 0x000000},
105 {0, 0x0000000, 0x0000000, 0x000000},
106 {0, 0x0000000, 0x0000000, 0x000000},
107 {0, 0x0000000, 0x0000000, 0x000000},
108 {0, 0x0000000, 0x0000000, 0x000000},
109 {0, 0x0000000, 0x0000000, 0x000000},
110 {0, 0x0000000, 0x0000000, 0x000000},
111 {1, 0x08f0000, 0x08f2000, 0x172000} } },
112 {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/
113 {0, 0x0000000, 0x0000000, 0x000000},
114 {0, 0x0000000, 0x0000000, 0x000000},
115 {0, 0x0000000, 0x0000000, 0x000000},
116 {0, 0x0000000, 0x0000000, 0x000000},
117 {0, 0x0000000, 0x0000000, 0x000000},
118 {0, 0x0000000, 0x0000000, 0x000000},
119 {0, 0x0000000, 0x0000000, 0x000000},
120 {0, 0x0000000, 0x0000000, 0x000000},
121 {0, 0x0000000, 0x0000000, 0x000000},
122 {0, 0x0000000, 0x0000000, 0x000000},
123 {0, 0x0000000, 0x0000000, 0x000000},
124 {0, 0x0000000, 0x0000000, 0x000000},
125 {0, 0x0000000, 0x0000000, 0x000000},
126 {0, 0x0000000, 0x0000000, 0x000000},
127 {1, 0x09f0000, 0x09f2000, 0x176000} } },
128 {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/
129 {0, 0x0000000, 0x0000000, 0x000000},
130 {0, 0x0000000, 0x0000000, 0x000000},
131 {0, 0x0000000, 0x0000000, 0x000000},
132 {0, 0x0000000, 0x0000000, 0x000000},
133 {0, 0x0000000, 0x0000000, 0x000000},
134 {0, 0x0000000, 0x0000000, 0x000000},
135 {0, 0x0000000, 0x0000000, 0x000000},
136 {0, 0x0000000, 0x0000000, 0x000000},
137 {0, 0x0000000, 0x0000000, 0x000000},
138 {0, 0x0000000, 0x0000000, 0x000000},
139 {0, 0x0000000, 0x0000000, 0x000000},
140 {0, 0x0000000, 0x0000000, 0x000000},
141 {0, 0x0000000, 0x0000000, 0x000000},
142 {0, 0x0000000, 0x0000000, 0x000000},
143 {1, 0x0af0000, 0x0af2000, 0x17a000} } },
144 {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/
145 {0, 0x0000000, 0x0000000, 0x000000},
146 {0, 0x0000000, 0x0000000, 0x000000},
147 {0, 0x0000000, 0x0000000, 0x000000},
148 {0, 0x0000000, 0x0000000, 0x000000},
149 {0, 0x0000000, 0x0000000, 0x000000},
150 {0, 0x0000000, 0x0000000, 0x000000},
151 {0, 0x0000000, 0x0000000, 0x000000},
152 {0, 0x0000000, 0x0000000, 0x000000},
153 {0, 0x0000000, 0x0000000, 0x000000},
154 {0, 0x0000000, 0x0000000, 0x000000},
155 {0, 0x0000000, 0x0000000, 0x000000},
156 {0, 0x0000000, 0x0000000, 0x000000},
157 {0, 0x0000000, 0x0000000, 0x000000},
158 {0, 0x0000000, 0x0000000, 0x000000},
159 {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
160 {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */
161 {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */
162 {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */
163 {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */
164 {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */
165 {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */
166 {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */
167 {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */
168 {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */
169 {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */
170 {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */
171 {{{0, 0, 0, 0} } }, /* 23: */
172 {{{0, 0, 0, 0} } }, /* 24: */
173 {{{0, 0, 0, 0} } }, /* 25: */
174 {{{0, 0, 0, 0} } }, /* 26: */
175 {{{0, 0, 0, 0} } }, /* 27: */
176 {{{0, 0, 0, 0} } }, /* 28: */
177 {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */
178 {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */
179 {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */
180 {{{0} } }, /* 32: PCI */
181 {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */
182 {1, 0x2110000, 0x2120000, 0x130000},
183 {1, 0x2120000, 0x2122000, 0x124000},
184 {1, 0x2130000, 0x2132000, 0x126000},
185 {1, 0x2140000, 0x2142000, 0x128000},
186 {1, 0x2150000, 0x2152000, 0x12a000},
187 {1, 0x2160000, 0x2170000, 0x110000},
188 {1, 0x2170000, 0x2172000, 0x12e000},
189 {0, 0x0000000, 0x0000000, 0x000000},
190 {0, 0x0000000, 0x0000000, 0x000000},
191 {0, 0x0000000, 0x0000000, 0x000000},
192 {0, 0x0000000, 0x0000000, 0x000000},
193 {0, 0x0000000, 0x0000000, 0x000000},
194 {0, 0x0000000, 0x0000000, 0x000000},
195 {0, 0x0000000, 0x0000000, 0x000000},
196 {0, 0x0000000, 0x0000000, 0x000000} } },
197 {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */
198 {{{0} } }, /* 35: */
199 {{{0} } }, /* 36: */
200 {{{0} } }, /* 37: */
201 {{{0} } }, /* 38: */
202 {{{0} } }, /* 39: */
203 {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */
204 {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */
205 {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */
206 {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */
207 {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */
208 {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */
209 {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */
210 {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */
211 {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */
212 {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */
213 {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */
214 {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */
215 {{{0} } }, /* 52: */
216 {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */
217 {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */
218 {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */
219 {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */
220 {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */
221 {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */
222 {{{0} } }, /* 59: I2C0 */
223 {{{0} } }, /* 60: I2C1 */
224 {{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */
225 {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */
226 {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */
227};
228
229/*
230 * top 12 bits of crb internal address (hub, agent)
231 */
232static const unsigned crb_hub_agt[64] = {
233 0,
234 QLCNIC_HW_CRB_HUB_AGT_ADR_PS,
235 QLCNIC_HW_CRB_HUB_AGT_ADR_MN,
236 QLCNIC_HW_CRB_HUB_AGT_ADR_MS,
237 0,
238 QLCNIC_HW_CRB_HUB_AGT_ADR_SRE,
239 QLCNIC_HW_CRB_HUB_AGT_ADR_NIU,
240 QLCNIC_HW_CRB_HUB_AGT_ADR_QMN,
241 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0,
242 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1,
243 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2,
244 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3,
245 QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q,
246 QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR,
247 QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB,
248 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4,
249 QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA,
250 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0,
251 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1,
252 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2,
253 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3,
254 QLCNIC_HW_CRB_HUB_AGT_ADR_PGND,
255 QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI,
256 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0,
257 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1,
258 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2,
259 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3,
260 0,
261 QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI,
262 QLCNIC_HW_CRB_HUB_AGT_ADR_SN,
263 0,
264 QLCNIC_HW_CRB_HUB_AGT_ADR_EG,
265 0,
266 QLCNIC_HW_CRB_HUB_AGT_ADR_PS,
267 QLCNIC_HW_CRB_HUB_AGT_ADR_CAM,
268 0,
269 0,
270 0,
271 0,
272 0,
273 QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR,
274 0,
275 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1,
276 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2,
277 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3,
278 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4,
279 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5,
280 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6,
281 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7,
282 QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA,
283 QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q,
284 QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB,
285 0,
286 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0,
287 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8,
288 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9,
289 QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0,
290 0,
291 QLCNIC_HW_CRB_HUB_AGT_ADR_SMB,
292 QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0,
293 QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1,
294 0,
295 QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC,
296 0,
297};
298
299/* PCI Windowing for DDR regions. */
300
301#define QLCNIC_PCIE_SEM_TIMEOUT 10000
302
303int
304qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
305{
306 int done = 0, timeout = 0;
307
308 while (!done) {
309 done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)));
310 if (done == 1)
311 break;
312 if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT)
313 return -EIO;
314 msleep(1);
315 }
316
317 if (id_reg)
318 QLCWR32(adapter, id_reg, adapter->portnum);
319
320 return 0;
321}
322
323void
324qlcnic_pcie_sem_unlock(struct qlcnic_adapter *adapter, int sem)
325{
326 QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem)));
327}
328
329static int
330qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
331 struct cmd_desc_type0 *cmd_desc_arr, int nr_desc)
332{
333 u32 i, producer, consumer;
334 struct qlcnic_cmd_buffer *pbuf;
335 struct cmd_desc_type0 *cmd_desc;
336 struct qlcnic_host_tx_ring *tx_ring;
337
338 i = 0;
339
340 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
341 return -EIO;
342
343 tx_ring = adapter->tx_ring;
344 __netif_tx_lock_bh(tx_ring->txq);
345
346 producer = tx_ring->producer;
347 consumer = tx_ring->sw_consumer;
348
349 if (nr_desc >= qlcnic_tx_avail(tx_ring)) {
350 netif_tx_stop_queue(tx_ring->txq);
351 __netif_tx_unlock_bh(tx_ring->txq);
352 return -EBUSY;
353 }
354
355 do {
356 cmd_desc = &cmd_desc_arr[i];
357
358 pbuf = &tx_ring->cmd_buf_arr[producer];
359 pbuf->skb = NULL;
360 pbuf->frag_count = 0;
361
362 memcpy(&tx_ring->desc_head[producer],
363 &cmd_desc_arr[i], sizeof(struct cmd_desc_type0));
364
365 producer = get_next_index(producer, tx_ring->num_desc);
366 i++;
367
368 } while (i != nr_desc);
369
370 tx_ring->producer = producer;
371
372 qlcnic_update_cmd_producer(adapter, tx_ring);
373
374 __netif_tx_unlock_bh(tx_ring->txq);
375
376 return 0;
377}
378
379static int
380qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
381 unsigned op)
382{
383 struct qlcnic_nic_req req;
384 struct qlcnic_mac_req *mac_req;
385 u64 word;
386
387 memset(&req, 0, sizeof(struct qlcnic_nic_req));
388 req.qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
389
390 word = QLCNIC_MAC_EVENT | ((u64)adapter->portnum << 16);
391 req.req_hdr = cpu_to_le64(word);
392
393 mac_req = (struct qlcnic_mac_req *)&req.words[0];
394 mac_req->op = op;
395 memcpy(mac_req->mac_addr, addr, 6);
396
397 return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
398}
399
400static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter,
401 u8 *addr, struct list_head *del_list)
402{
403 struct list_head *head;
404 struct qlcnic_mac_list_s *cur;
405
406 /* look up if already exists */
407 list_for_each(head, del_list) {
408 cur = list_entry(head, struct qlcnic_mac_list_s, list);
409
410 if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0) {
411 list_move_tail(head, &adapter->mac_list);
412 return 0;
413 }
414 }
415
416 cur = kzalloc(sizeof(struct qlcnic_mac_list_s), GFP_ATOMIC);
417 if (cur == NULL) {
418 dev_err(&adapter->netdev->dev,
419 "failed to add mac address filter\n");
420 return -ENOMEM;
421 }
422 memcpy(cur->mac_addr, addr, ETH_ALEN);
423 list_add_tail(&cur->list, &adapter->mac_list);
424
425 return qlcnic_sre_macaddr_change(adapter,
426 cur->mac_addr, QLCNIC_MAC_ADD);
427}
428
429void qlcnic_set_multi(struct net_device *netdev)
430{
431 struct qlcnic_adapter *adapter = netdev_priv(netdev);
432 struct dev_mc_list *mc_ptr;
433 u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
434 u32 mode = VPORT_MISS_MODE_DROP;
435 LIST_HEAD(del_list);
436 struct list_head *head;
437 struct qlcnic_mac_list_s *cur;
438
439 list_splice_tail_init(&adapter->mac_list, &del_list);
440
441 qlcnic_nic_add_mac(adapter, adapter->mac_addr, &del_list);
442 qlcnic_nic_add_mac(adapter, bcast_addr, &del_list);
443
444 if (netdev->flags & IFF_PROMISC) {
445 mode = VPORT_MISS_MODE_ACCEPT_ALL;
446 goto send_fw_cmd;
447 }
448
449 if ((netdev->flags & IFF_ALLMULTI) ||
450 (netdev->mc_count > adapter->max_mc_count)) {
451 mode = VPORT_MISS_MODE_ACCEPT_MULTI;
452 goto send_fw_cmd;
453 }
454
455 if (netdev->mc_count > 0) {
456 for (mc_ptr = netdev->mc_list; mc_ptr;
457 mc_ptr = mc_ptr->next) {
458 qlcnic_nic_add_mac(adapter, mc_ptr->dmi_addr,
459 &del_list);
460 }
461 }
462
463send_fw_cmd:
464 qlcnic_nic_set_promisc(adapter, mode);
465 head = &del_list;
466 while (!list_empty(head)) {
467 cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
468
469 qlcnic_sre_macaddr_change(adapter,
470 cur->mac_addr, QLCNIC_MAC_DEL);
471 list_del(&cur->list);
472 kfree(cur);
473 }
474}
475
476int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
477{
478 struct qlcnic_nic_req req;
479 u64 word;
480
481 memset(&req, 0, sizeof(struct qlcnic_nic_req));
482
483 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
484
485 word = QLCNIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE |
486 ((u64)adapter->portnum << 16);
487 req.req_hdr = cpu_to_le64(word);
488
489 req.words[0] = cpu_to_le64(mode);
490
491 return qlcnic_send_cmd_descs(adapter,
492 (struct cmd_desc_type0 *)&req, 1);
493}
494
495void qlcnic_free_mac_list(struct qlcnic_adapter *adapter)
496{
497 struct qlcnic_mac_list_s *cur;
498 struct list_head *head = &adapter->mac_list;
499
500 while (!list_empty(head)) {
501 cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
502 qlcnic_sre_macaddr_change(adapter,
503 cur->mac_addr, QLCNIC_MAC_DEL);
504 list_del(&cur->list);
505 kfree(cur);
506 }
507}
508
509#define QLCNIC_CONFIG_INTR_COALESCE 3
510
511/*
512 * Send the interrupt coalescing parameter set by ethtool to the card.
513 */
514int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter)
515{
516 struct qlcnic_nic_req req;
517 u64 word[6];
518 int rv, i;
519
520 memset(&req, 0, sizeof(struct qlcnic_nic_req));
521
522 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
523
524 word[0] = QLCNIC_CONFIG_INTR_COALESCE | ((u64)adapter->portnum << 16);
525 req.req_hdr = cpu_to_le64(word[0]);
526
527 memcpy(&word[0], &adapter->coal, sizeof(adapter->coal));
528 for (i = 0; i < 6; i++)
529 req.words[i] = cpu_to_le64(word[i]);
530
531 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
532 if (rv != 0)
533 dev_err(&adapter->netdev->dev,
534 "Could not send interrupt coalescing parameters\n");
535
536 return rv;
537}
538
539int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
540{
541 struct qlcnic_nic_req req;
542 u64 word;
543 int rv;
544
545 if ((adapter->flags & QLCNIC_LRO_ENABLED) == enable)
546 return 0;
547
548 memset(&req, 0, sizeof(struct qlcnic_nic_req));
549
550 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
551
552 word = QLCNIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16);
553 req.req_hdr = cpu_to_le64(word);
554
555 req.words[0] = cpu_to_le64(enable);
556
557 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
558 if (rv != 0)
559 dev_err(&adapter->netdev->dev,
560 "Could not send configure hw lro request\n");
561
562 adapter->flags ^= QLCNIC_LRO_ENABLED;
563
564 return rv;
565}
566
567int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, int enable)
568{
569 struct qlcnic_nic_req req;
570 u64 word;
571 int rv;
572
573 if (!!(adapter->flags & QLCNIC_BRIDGE_ENABLED) == enable)
574 return 0;
575
576 memset(&req, 0, sizeof(struct qlcnic_nic_req));
577
578 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
579
580 word = QLCNIC_H2C_OPCODE_CONFIG_BRIDGING |
581 ((u64)adapter->portnum << 16);
582 req.req_hdr = cpu_to_le64(word);
583
584 req.words[0] = cpu_to_le64(enable);
585
586 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
587 if (rv != 0)
588 dev_err(&adapter->netdev->dev,
589 "Could not send configure bridge mode request\n");
590
591 adapter->flags ^= QLCNIC_BRIDGE_ENABLED;
592
593 return rv;
594}
595
596
597#define RSS_HASHTYPE_IP_TCP 0x3
598
599int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
600{
601 struct qlcnic_nic_req req;
602 u64 word;
603 int i, rv;
604
605 const u64 key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
606 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
607 0x255b0ec26d5a56daULL };
608
609
610 memset(&req, 0, sizeof(struct qlcnic_nic_req));
611 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
612
613 word = QLCNIC_H2C_OPCODE_CONFIG_RSS | ((u64)adapter->portnum << 16);
614 req.req_hdr = cpu_to_le64(word);
615
616 /*
617 * RSS request:
618 * bits 3-0: hash_method
619 * 5-4: hash_type_ipv4
620 * 7-6: hash_type_ipv6
621 * 8: enable
622 * 9: use indirection table
623 * 47-10: reserved
624 * 63-48: indirection table mask
625 */
626 word = ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) |
627 ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) |
628 ((u64)(enable & 0x1) << 8) |
629 ((0x7ULL) << 48);
630 req.words[0] = cpu_to_le64(word);
631 for (i = 0; i < 5; i++)
632 req.words[i+1] = cpu_to_le64(key[i]);
633
634 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
635 if (rv != 0)
636 dev_err(&adapter->netdev->dev, "could not configure RSS\n");
637
638 return rv;
639}
640
641int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, u32 ip, int cmd)
642{
643 struct qlcnic_nic_req req;
644 u64 word;
645 int rv;
646
647 memset(&req, 0, sizeof(struct qlcnic_nic_req));
648 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
649
650 word = QLCNIC_H2C_OPCODE_CONFIG_IPADDR | ((u64)adapter->portnum << 16);
651 req.req_hdr = cpu_to_le64(word);
652
653 req.words[0] = cpu_to_le64(cmd);
654 req.words[1] = cpu_to_le64(ip);
655
656 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
657 if (rv != 0)
658 dev_err(&adapter->netdev->dev,
659 "could not notify %s IP 0x%x reuqest\n",
660 (cmd == QLCNIC_IP_UP) ? "Add" : "Remove", ip);
661
662 return rv;
663}
664
665int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable)
666{
667 struct qlcnic_nic_req req;
668 u64 word;
669 int rv;
670
671 memset(&req, 0, sizeof(struct qlcnic_nic_req));
672 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
673
674 word = QLCNIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16);
675 req.req_hdr = cpu_to_le64(word);
676 req.words[0] = cpu_to_le64(enable | (enable << 8));
677
678 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
679 if (rv != 0)
680 dev_err(&adapter->netdev->dev,
681 "could not configure link notification\n");
682
683 return rv;
684}
685
686int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter)
687{
688 struct qlcnic_nic_req req;
689 u64 word;
690 int rv;
691
692 memset(&req, 0, sizeof(struct qlcnic_nic_req));
693 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
694
695 word = QLCNIC_H2C_OPCODE_LRO_REQUEST |
696 ((u64)adapter->portnum << 16) |
697 ((u64)QLCNIC_LRO_REQUEST_CLEANUP << 56) ;
698
699 req.req_hdr = cpu_to_le64(word);
700
701 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
702 if (rv != 0)
703 dev_err(&adapter->netdev->dev,
704 "could not cleanup lro flows\n");
705
706 return rv;
707}
708
709/*
710 * qlcnic_change_mtu - Change the Maximum Transfer Unit
711 * @returns 0 on success, negative on failure
712 */
713
714int qlcnic_change_mtu(struct net_device *netdev, int mtu)
715{
716 struct qlcnic_adapter *adapter = netdev_priv(netdev);
717 int rc = 0;
718
719 if (mtu > P3_MAX_MTU) {
720 dev_err(&adapter->netdev->dev, "mtu > %d bytes unsupported\n",
721 P3_MAX_MTU);
722 return -EINVAL;
723 }
724
725 rc = qlcnic_fw_cmd_set_mtu(adapter, mtu);
726
727 if (!rc)
728 netdev->mtu = mtu;
729
730 return rc;
731}
732
733int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u64 *mac)
734{
735 u32 crbaddr, mac_hi, mac_lo;
736 int pci_func = adapter->ahw.pci_func;
737
738 crbaddr = CRB_MAC_BLOCK_START +
739 (4 * ((pci_func/2) * 3)) + (4 * (pci_func & 1));
740
741 mac_lo = QLCRD32(adapter, crbaddr);
742 mac_hi = QLCRD32(adapter, crbaddr+4);
743
744 if (pci_func & 1)
745 *mac = le64_to_cpu((mac_lo >> 16) | ((u64)mac_hi << 16));
746 else
747 *mac = le64_to_cpu((u64)mac_lo | ((u64)mac_hi << 32));
748
749 return 0;
750}
751
752/*
753 * Changes the CRB window to the specified window.
754 */
755 /* Returns < 0 if off is not valid,
756 * 1 if window access is needed. 'off' is set to offset from
757 * CRB space in 128M pci map
758 * 0 if no window access is needed. 'off' is set to 2M addr
759 * In: 'off' is offset from base in 128M pci map
760 */
761static int
762qlcnic_pci_get_crb_addr_2M(struct qlcnic_adapter *adapter,
763 ulong off, void __iomem **addr)
764{
765 const struct crb_128M_2M_sub_block_map *m;
766
767 if ((off >= QLCNIC_CRB_MAX) || (off < QLCNIC_PCI_CRBSPACE))
768 return -EINVAL;
769
770 off -= QLCNIC_PCI_CRBSPACE;
771
772 /*
773 * Try direct map
774 */
775 m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)];
776
777 if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) {
778 *addr = adapter->ahw.pci_base0 + m->start_2M +
779 (off - m->start_128M);
780 return 0;
781 }
782
783 /*
784 * Not in direct map, use crb window
785 */
786 *addr = adapter->ahw.pci_base0 + CRB_INDIRECT_2M + (off & MASK(16));
787 return 1;
788}
789
790/*
791 * In: 'off' is offset from CRB space in 128M pci map
792 * Out: 'off' is 2M pci map addr
793 * side effect: lock crb window
794 */
795static void
796qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
797{
798 u32 window;
799 void __iomem *addr = adapter->ahw.pci_base0 + CRB_WINDOW_2M;
800
801 off -= QLCNIC_PCI_CRBSPACE;
802
803 window = CRB_HI(off);
804
805 if (adapter->ahw.crb_win == window)
806 return;
807
808 writel(window, addr);
809 if (readl(addr) != window) {
810 if (printk_ratelimit())
811 dev_warn(&adapter->pdev->dev,
812 "failed to set CRB window to %d off 0x%lx\n",
813 window, off);
814 }
815 adapter->ahw.crb_win = window;
816}
817
818int
819qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data)
820{
821 unsigned long flags;
822 int rv;
823 void __iomem *addr = NULL;
824
825 rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr);
826
827 if (rv == 0) {
828 writel(data, addr);
829 return 0;
830 }
831
832 if (rv > 0) {
833 /* indirect access */
834 write_lock_irqsave(&adapter->ahw.crb_lock, flags);
835 crb_win_lock(adapter);
836 qlcnic_pci_set_crbwindow_2M(adapter, off);
837 writel(data, addr);
838 crb_win_unlock(adapter);
839 write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
840 return 0;
841 }
842
843 dev_err(&adapter->pdev->dev,
844 "%s: invalid offset: 0x%016lx\n", __func__, off);
845 dump_stack();
846 return -EIO;
847}
848
849u32
850qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
851{
852 unsigned long flags;
853 int rv;
854 u32 data;
855 void __iomem *addr = NULL;
856
857 rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr);
858
859 if (rv == 0)
860 return readl(addr);
861
862 if (rv > 0) {
863 /* indirect access */
864 write_lock_irqsave(&adapter->ahw.crb_lock, flags);
865 crb_win_lock(adapter);
866 qlcnic_pci_set_crbwindow_2M(adapter, off);
867 data = readl(addr);
868 crb_win_unlock(adapter);
869 write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
870 return data;
871 }
872
873 dev_err(&adapter->pdev->dev,
874 "%s: invalid offset: 0x%016lx\n", __func__, off);
875 dump_stack();
876 return -1;
877}
878
879
880void __iomem *
881qlcnic_get_ioaddr(struct qlcnic_adapter *adapter, u32 offset)
882{
883 void __iomem *addr = NULL;
884
885 WARN_ON(qlcnic_pci_get_crb_addr_2M(adapter, offset, &addr));
886
887 return addr;
888}
889
890
891static int
892qlcnic_pci_set_window_2M(struct qlcnic_adapter *adapter,
893 u64 addr, u32 *start)
894{
895 u32 window;
896 struct pci_dev *pdev = adapter->pdev;
897
898 if ((addr & 0x00ff800) == 0xff800) {
899 if (printk_ratelimit())
900 dev_warn(&pdev->dev, "QM access not handled\n");
901 return -EIO;
902 }
903
904 window = OCM_WIN_P3P(addr);
905
906 writel(window, adapter->ahw.ocm_win_crb);
907 /* read back to flush */
908 readl(adapter->ahw.ocm_win_crb);
909
910 adapter->ahw.ocm_win = window;
911 *start = QLCNIC_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr);
912 return 0;
913}
914
915static int
916qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off,
917 u64 *data, int op)
918{
919 void __iomem *addr, *mem_ptr = NULL;
920 resource_size_t mem_base;
921 int ret;
922 u32 start;
923
924 mutex_lock(&adapter->ahw.mem_lock);
925
926 ret = qlcnic_pci_set_window_2M(adapter, off, &start);
927 if (ret != 0)
928 goto unlock;
929
930 addr = pci_base_offset(adapter, start);
931 if (addr)
932 goto noremap;
933
934 mem_base = pci_resource_start(adapter->pdev, 0) + (start & PAGE_MASK);
935
936 mem_ptr = ioremap(mem_base, PAGE_SIZE);
937 if (mem_ptr == NULL) {
938 ret = -EIO;
939 goto unlock;
940 }
941
942 addr = mem_ptr + (start & (PAGE_SIZE - 1));
943
944noremap:
945 if (op == 0) /* read */
946 *data = readq(addr);
947 else /* write */
948 writeq(*data, addr);
949
950unlock:
951 mutex_unlock(&adapter->ahw.mem_lock);
952
953 if (mem_ptr)
954 iounmap(mem_ptr);
955 return ret;
956}
957
958#define MAX_CTL_CHECK 1000
959
960int
961qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter,
962 u64 off, u64 data)
963{
964 int i, j, ret;
965 u32 temp, off8;
966 u64 stride;
967 void __iomem *mem_crb;
968
969 /* Only 64-bit aligned access */
970 if (off & 7)
971 return -EIO;
972
973 /* P3 onward, test agent base for MIU and SIU is same */
974 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
975 QLCNIC_ADDR_QDR_NET_MAX_P3)) {
976 mem_crb = qlcnic_get_ioaddr(adapter,
977 QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
978 goto correct;
979 }
980
981 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) {
982 mem_crb = qlcnic_get_ioaddr(adapter,
983 QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE);
984 goto correct;
985 }
986
987 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX))
988 return qlcnic_pci_mem_access_direct(adapter, off, &data, 1);
989
990 return -EIO;
991
992correct:
993 stride = QLCNIC_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8;
994
995 off8 = off & ~(stride-1);
996
997 mutex_lock(&adapter->ahw.mem_lock);
998
999 writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
1000 writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
1001
1002 i = 0;
1003 if (stride == 16) {
1004 writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
1005 writel((TA_CTL_START | TA_CTL_ENABLE),
1006 (mem_crb + TEST_AGT_CTRL));
1007
1008 for (j = 0; j < MAX_CTL_CHECK; j++) {
1009 temp = readl(mem_crb + TEST_AGT_CTRL);
1010 if ((temp & TA_CTL_BUSY) == 0)
1011 break;
1012 }
1013
1014 if (j >= MAX_CTL_CHECK) {
1015 ret = -EIO;
1016 goto done;
1017 }
1018
1019 i = (off & 0xf) ? 0 : 2;
1020 writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)),
1021 mem_crb + MIU_TEST_AGT_WRDATA(i));
1022 writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)),
1023 mem_crb + MIU_TEST_AGT_WRDATA(i+1));
1024 i = (off & 0xf) ? 2 : 0;
1025 }
1026
1027 writel(data & 0xffffffff,
1028 mem_crb + MIU_TEST_AGT_WRDATA(i));
1029 writel((data >> 32) & 0xffffffff,
1030 mem_crb + MIU_TEST_AGT_WRDATA(i+1));
1031
1032 writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL));
1033 writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE),
1034 (mem_crb + TEST_AGT_CTRL));
1035
1036 for (j = 0; j < MAX_CTL_CHECK; j++) {
1037 temp = readl(mem_crb + TEST_AGT_CTRL);
1038 if ((temp & TA_CTL_BUSY) == 0)
1039 break;
1040 }
1041
1042 if (j >= MAX_CTL_CHECK) {
1043 if (printk_ratelimit())
1044 dev_err(&adapter->pdev->dev,
1045 "failed to write through agent\n");
1046 ret = -EIO;
1047 } else
1048 ret = 0;
1049
1050done:
1051 mutex_unlock(&adapter->ahw.mem_lock);
1052
1053 return ret;
1054}
1055
1056int
1057qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter,
1058 u64 off, u64 *data)
1059{
1060 int j, ret;
1061 u32 temp, off8;
1062 u64 val, stride;
1063 void __iomem *mem_crb;
1064
1065 /* Only 64-bit aligned access */
1066 if (off & 7)
1067 return -EIO;
1068
1069 /* P3 onward, test agent base for MIU and SIU is same */
1070 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
1071 QLCNIC_ADDR_QDR_NET_MAX_P3)) {
1072 mem_crb = qlcnic_get_ioaddr(adapter,
1073 QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
1074 goto correct;
1075 }
1076
1077 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) {
1078 mem_crb = qlcnic_get_ioaddr(adapter,
1079 QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE);
1080 goto correct;
1081 }
1082
1083 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX)) {
1084 return qlcnic_pci_mem_access_direct(adapter,
1085 off, data, 0);
1086 }
1087
1088 return -EIO;
1089
1090correct:
1091 stride = QLCNIC_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8;
1092
1093 off8 = off & ~(stride-1);
1094
1095 mutex_lock(&adapter->ahw.mem_lock);
1096
1097 writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
1098 writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
1099 writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
1100 writel((TA_CTL_START | TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL));
1101
1102 for (j = 0; j < MAX_CTL_CHECK; j++) {
1103 temp = readl(mem_crb + TEST_AGT_CTRL);
1104 if ((temp & TA_CTL_BUSY) == 0)
1105 break;
1106 }
1107
1108 if (j >= MAX_CTL_CHECK) {
1109 if (printk_ratelimit())
1110 dev_err(&adapter->pdev->dev,
1111 "failed to read through agent\n");
1112 ret = -EIO;
1113 } else {
1114 off8 = MIU_TEST_AGT_RDDATA_LO;
1115 if ((stride == 16) && (off & 0xf))
1116 off8 = MIU_TEST_AGT_RDDATA_UPPER_LO;
1117
1118 temp = readl(mem_crb + off8 + 4);
1119 val = (u64)temp << 32;
1120 val |= readl(mem_crb + off8);
1121 *data = val;
1122 ret = 0;
1123 }
1124
1125 mutex_unlock(&adapter->ahw.mem_lock);
1126
1127 return ret;
1128}
1129
1130int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
1131{
1132 int offset, board_type, magic;
1133 struct pci_dev *pdev = adapter->pdev;
1134
1135 offset = QLCNIC_FW_MAGIC_OFFSET;
1136 if (qlcnic_rom_fast_read(adapter, offset, &magic))
1137 return -EIO;
1138
1139 if (magic != QLCNIC_BDINFO_MAGIC) {
1140 dev_err(&pdev->dev, "invalid board config, magic=%08x\n",
1141 magic);
1142 return -EIO;
1143 }
1144
1145 offset = QLCNIC_BRDTYPE_OFFSET;
1146 if (qlcnic_rom_fast_read(adapter, offset, &board_type))
1147 return -EIO;
1148
1149 adapter->ahw.board_type = board_type;
1150
1151 if (board_type == QLCNIC_BRDTYPE_P3_4_GB_MM) {
1152 u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I);
1153 if ((gpio & 0x8000) == 0)
1154 board_type = QLCNIC_BRDTYPE_P3_10G_TP;
1155 }
1156
1157 switch (board_type) {
1158 case QLCNIC_BRDTYPE_P3_HMEZ:
1159 case QLCNIC_BRDTYPE_P3_XG_LOM:
1160 case QLCNIC_BRDTYPE_P3_10G_CX4:
1161 case QLCNIC_BRDTYPE_P3_10G_CX4_LP:
1162 case QLCNIC_BRDTYPE_P3_IMEZ:
1163 case QLCNIC_BRDTYPE_P3_10G_SFP_PLUS:
1164 case QLCNIC_BRDTYPE_P3_10G_SFP_CT:
1165 case QLCNIC_BRDTYPE_P3_10G_SFP_QT:
1166 case QLCNIC_BRDTYPE_P3_10G_XFP:
1167 case QLCNIC_BRDTYPE_P3_10000_BASE_T:
1168 adapter->ahw.port_type = QLCNIC_XGBE;
1169 break;
1170 case QLCNIC_BRDTYPE_P3_REF_QG:
1171 case QLCNIC_BRDTYPE_P3_4_GB:
1172 case QLCNIC_BRDTYPE_P3_4_GB_MM:
1173 adapter->ahw.port_type = QLCNIC_GBE;
1174 break;
1175 case QLCNIC_BRDTYPE_P3_10G_TP:
1176 adapter->ahw.port_type = (adapter->portnum < 2) ?
1177 QLCNIC_XGBE : QLCNIC_GBE;
1178 break;
1179 default:
1180 dev_err(&pdev->dev, "unknown board type %x\n", board_type);
1181 adapter->ahw.port_type = QLCNIC_XGBE;
1182 break;
1183 }
1184
1185 return 0;
1186}
1187
1188int
1189qlcnic_wol_supported(struct qlcnic_adapter *adapter)
1190{
1191 u32 wol_cfg;
1192
1193 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
1194 if (wol_cfg & (1UL << adapter->portnum)) {
1195 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
1196 if (wol_cfg & (1 << adapter->portnum))
1197 return 1;
1198 }
1199
1200 return 0;
1201}
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
new file mode 100644
index 000000000000..7ae8bcc1e439
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -0,0 +1,1466 @@
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
25#include <linux/netdevice.h>
26#include <linux/delay.h>
27#include "qlcnic.h"
28
29struct crb_addr_pair {
30 u32 addr;
31 u32 data;
32};
33
34#define QLCNIC_MAX_CRB_XFORM 60
35static unsigned int crb_addr_xform[QLCNIC_MAX_CRB_XFORM];
36
37#define crb_addr_transform(name) \
38 (crb_addr_xform[QLCNIC_HW_PX_MAP_CRB_##name] = \
39 QLCNIC_HW_CRB_HUB_AGT_ADR_##name << 20)
40
41#define QLCNIC_ADDR_ERROR (0xffffffff)
42
43static void
44qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
45 struct qlcnic_host_rds_ring *rds_ring);
46
47static void crb_addr_transform_setup(void)
48{
49 crb_addr_transform(XDMA);
50 crb_addr_transform(TIMR);
51 crb_addr_transform(SRE);
52 crb_addr_transform(SQN3);
53 crb_addr_transform(SQN2);
54 crb_addr_transform(SQN1);
55 crb_addr_transform(SQN0);
56 crb_addr_transform(SQS3);
57 crb_addr_transform(SQS2);
58 crb_addr_transform(SQS1);
59 crb_addr_transform(SQS0);
60 crb_addr_transform(RPMX7);
61 crb_addr_transform(RPMX6);
62 crb_addr_transform(RPMX5);
63 crb_addr_transform(RPMX4);
64 crb_addr_transform(RPMX3);
65 crb_addr_transform(RPMX2);
66 crb_addr_transform(RPMX1);
67 crb_addr_transform(RPMX0);
68 crb_addr_transform(ROMUSB);
69 crb_addr_transform(SN);
70 crb_addr_transform(QMN);
71 crb_addr_transform(QMS);
72 crb_addr_transform(PGNI);
73 crb_addr_transform(PGND);
74 crb_addr_transform(PGN3);
75 crb_addr_transform(PGN2);
76 crb_addr_transform(PGN1);
77 crb_addr_transform(PGN0);
78 crb_addr_transform(PGSI);
79 crb_addr_transform(PGSD);
80 crb_addr_transform(PGS3);
81 crb_addr_transform(PGS2);
82 crb_addr_transform(PGS1);
83 crb_addr_transform(PGS0);
84 crb_addr_transform(PS);
85 crb_addr_transform(PH);
86 crb_addr_transform(NIU);
87 crb_addr_transform(I2Q);
88 crb_addr_transform(EG);
89 crb_addr_transform(MN);
90 crb_addr_transform(MS);
91 crb_addr_transform(CAS2);
92 crb_addr_transform(CAS1);
93 crb_addr_transform(CAS0);
94 crb_addr_transform(CAM);
95 crb_addr_transform(C2C1);
96 crb_addr_transform(C2C0);
97 crb_addr_transform(SMB);
98 crb_addr_transform(OCM0);
99 crb_addr_transform(I2C0);
100}
101
102void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter)
103{
104 struct qlcnic_recv_context *recv_ctx;
105 struct qlcnic_host_rds_ring *rds_ring;
106 struct qlcnic_rx_buffer *rx_buf;
107 int i, ring;
108
109 recv_ctx = &adapter->recv_ctx;
110 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
111 rds_ring = &recv_ctx->rds_rings[ring];
112 for (i = 0; i < rds_ring->num_desc; ++i) {
113 rx_buf = &(rds_ring->rx_buf_arr[i]);
114 if (rx_buf->state == QLCNIC_BUFFER_FREE)
115 continue;
116 pci_unmap_single(adapter->pdev,
117 rx_buf->dma,
118 rds_ring->dma_size,
119 PCI_DMA_FROMDEVICE);
120 if (rx_buf->skb != NULL)
121 dev_kfree_skb_any(rx_buf->skb);
122 }
123 }
124}
125
126void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter)
127{
128 struct qlcnic_cmd_buffer *cmd_buf;
129 struct qlcnic_skb_frag *buffrag;
130 int i, j;
131 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
132
133 cmd_buf = tx_ring->cmd_buf_arr;
134 for (i = 0; i < tx_ring->num_desc; i++) {
135 buffrag = cmd_buf->frag_array;
136 if (buffrag->dma) {
137 pci_unmap_single(adapter->pdev, buffrag->dma,
138 buffrag->length, PCI_DMA_TODEVICE);
139 buffrag->dma = 0ULL;
140 }
141 for (j = 0; j < cmd_buf->frag_count; j++) {
142 buffrag++;
143 if (buffrag->dma) {
144 pci_unmap_page(adapter->pdev, buffrag->dma,
145 buffrag->length,
146 PCI_DMA_TODEVICE);
147 buffrag->dma = 0ULL;
148 }
149 }
150 if (cmd_buf->skb) {
151 dev_kfree_skb_any(cmd_buf->skb);
152 cmd_buf->skb = NULL;
153 }
154 cmd_buf++;
155 }
156}
157
158void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter)
159{
160 struct qlcnic_recv_context *recv_ctx;
161 struct qlcnic_host_rds_ring *rds_ring;
162 struct qlcnic_host_tx_ring *tx_ring;
163 int ring;
164
165 recv_ctx = &adapter->recv_ctx;
166
167 if (recv_ctx->rds_rings == NULL)
168 goto skip_rds;
169
170 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
171 rds_ring = &recv_ctx->rds_rings[ring];
172 vfree(rds_ring->rx_buf_arr);
173 rds_ring->rx_buf_arr = NULL;
174 }
175 kfree(recv_ctx->rds_rings);
176
177skip_rds:
178 if (adapter->tx_ring == NULL)
179 return;
180
181 tx_ring = adapter->tx_ring;
182 vfree(tx_ring->cmd_buf_arr);
183 kfree(adapter->tx_ring);
184}
185
186int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
187{
188 struct qlcnic_recv_context *recv_ctx;
189 struct qlcnic_host_rds_ring *rds_ring;
190 struct qlcnic_host_sds_ring *sds_ring;
191 struct qlcnic_host_tx_ring *tx_ring;
192 struct qlcnic_rx_buffer *rx_buf;
193 int ring, i, size;
194
195 struct qlcnic_cmd_buffer *cmd_buf_arr;
196 struct net_device *netdev = adapter->netdev;
197
198 size = sizeof(struct qlcnic_host_tx_ring);
199 tx_ring = kzalloc(size, GFP_KERNEL);
200 if (tx_ring == NULL) {
201 dev_err(&netdev->dev, "failed to allocate tx ring struct\n");
202 return -ENOMEM;
203 }
204 adapter->tx_ring = tx_ring;
205
206 tx_ring->num_desc = adapter->num_txd;
207 tx_ring->txq = netdev_get_tx_queue(netdev, 0);
208
209 cmd_buf_arr = vmalloc(TX_BUFF_RINGSIZE(tx_ring));
210 if (cmd_buf_arr == NULL) {
211 dev_err(&netdev->dev, "failed to allocate cmd buffer ring\n");
212 return -ENOMEM;
213 }
214 memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
215 tx_ring->cmd_buf_arr = cmd_buf_arr;
216
217 recv_ctx = &adapter->recv_ctx;
218
219 size = adapter->max_rds_rings * sizeof(struct qlcnic_host_rds_ring);
220 rds_ring = kzalloc(size, GFP_KERNEL);
221 if (rds_ring == NULL) {
222 dev_err(&netdev->dev, "failed to allocate rds ring struct\n");
223 return -ENOMEM;
224 }
225 recv_ctx->rds_rings = rds_ring;
226
227 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
228 rds_ring = &recv_ctx->rds_rings[ring];
229 switch (ring) {
230 case RCV_RING_NORMAL:
231 rds_ring->num_desc = adapter->num_rxd;
232 if (adapter->ahw.cut_through) {
233 rds_ring->dma_size =
234 QLCNIC_CT_DEFAULT_RX_BUF_LEN;
235 rds_ring->skb_size =
236 QLCNIC_CT_DEFAULT_RX_BUF_LEN;
237 } else {
238 rds_ring->dma_size =
239 QLCNIC_P3_RX_BUF_MAX_LEN;
240 rds_ring->skb_size =
241 rds_ring->dma_size + NET_IP_ALIGN;
242 }
243 break;
244
245 case RCV_RING_JUMBO:
246 rds_ring->num_desc = adapter->num_jumbo_rxd;
247 rds_ring->dma_size =
248 QLCNIC_P3_RX_JUMBO_BUF_MAX_LEN;
249
250 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
251 rds_ring->dma_size += QLCNIC_LRO_BUFFER_EXTRA;
252
253 rds_ring->skb_size =
254 rds_ring->dma_size + NET_IP_ALIGN;
255 break;
256
257 case RCV_RING_LRO:
258 rds_ring->num_desc = adapter->num_lro_rxd;
259 rds_ring->dma_size = QLCNIC_RX_LRO_BUFFER_LENGTH;
260 rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN;
261 break;
262
263 }
264 rds_ring->rx_buf_arr = (struct qlcnic_rx_buffer *)
265 vmalloc(RCV_BUFF_RINGSIZE(rds_ring));
266 if (rds_ring->rx_buf_arr == NULL) {
267 dev_err(&netdev->dev, "Failed to allocate "
268 "rx buffer ring %d\n", ring);
269 goto err_out;
270 }
271 memset(rds_ring->rx_buf_arr, 0, RCV_BUFF_RINGSIZE(rds_ring));
272 INIT_LIST_HEAD(&rds_ring->free_list);
273 /*
274 * Now go through all of them, set reference handles
275 * and put them in the queues.
276 */
277 rx_buf = rds_ring->rx_buf_arr;
278 for (i = 0; i < rds_ring->num_desc; i++) {
279 list_add_tail(&rx_buf->list,
280 &rds_ring->free_list);
281 rx_buf->ref_handle = i;
282 rx_buf->state = QLCNIC_BUFFER_FREE;
283 rx_buf++;
284 }
285 spin_lock_init(&rds_ring->lock);
286 }
287
288 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
289 sds_ring = &recv_ctx->sds_rings[ring];
290 sds_ring->irq = adapter->msix_entries[ring].vector;
291 sds_ring->adapter = adapter;
292 sds_ring->num_desc = adapter->num_rxd;
293
294 for (i = 0; i < NUM_RCV_DESC_RINGS; i++)
295 INIT_LIST_HEAD(&sds_ring->free_list[i]);
296 }
297
298 return 0;
299
300err_out:
301 qlcnic_free_sw_resources(adapter);
302 return -ENOMEM;
303}
304
305/*
306 * Utility to translate from internal Phantom CRB address
307 * to external PCI CRB address.
308 */
309static u32 qlcnic_decode_crb_addr(u32 addr)
310{
311 int i;
312 u32 base_addr, offset, pci_base;
313
314 crb_addr_transform_setup();
315
316 pci_base = QLCNIC_ADDR_ERROR;
317 base_addr = addr & 0xfff00000;
318 offset = addr & 0x000fffff;
319
320 for (i = 0; i < QLCNIC_MAX_CRB_XFORM; i++) {
321 if (crb_addr_xform[i] == base_addr) {
322 pci_base = i << 20;
323 break;
324 }
325 }
326 if (pci_base == QLCNIC_ADDR_ERROR)
327 return pci_base;
328 else
329 return pci_base + offset;
330}
331
332#define QLCNIC_MAX_ROM_WAIT_USEC 100
333
334static int qlcnic_wait_rom_done(struct qlcnic_adapter *adapter)
335{
336 long timeout = 0;
337 long done = 0;
338
339 cond_resched();
340
341 while (done == 0) {
342 done = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_STATUS);
343 done &= 2;
344 if (++timeout >= QLCNIC_MAX_ROM_WAIT_USEC) {
345 dev_err(&adapter->pdev->dev,
346 "Timeout reached waiting for rom done");
347 return -EIO;
348 }
349 udelay(1);
350 }
351 return 0;
352}
353
354static int do_rom_fast_read(struct qlcnic_adapter *adapter,
355 int addr, int *valp)
356{
357 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ADDRESS, addr);
358 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
359 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 3);
360 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_INSTR_OPCODE, 0xb);
361 if (qlcnic_wait_rom_done(adapter)) {
362 dev_err(&adapter->pdev->dev, "Error waiting for rom done\n");
363 return -EIO;
364 }
365 /* reset abyte_cnt and dummy_byte_cnt */
366 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 0);
367 udelay(10);
368 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
369
370 *valp = QLCRD32(adapter, QLCNIC_ROMUSB_ROM_RDATA);
371 return 0;
372}
373
374static int do_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
375 u8 *bytes, size_t size)
376{
377 int addridx;
378 int ret = 0;
379
380 for (addridx = addr; addridx < (addr + size); addridx += 4) {
381 int v;
382 ret = do_rom_fast_read(adapter, addridx, &v);
383 if (ret != 0)
384 break;
385 *(__le32 *)bytes = cpu_to_le32(v);
386 bytes += 4;
387 }
388
389 return ret;
390}
391
392int
393qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
394 u8 *bytes, size_t size)
395{
396 int ret;
397
398 ret = qlcnic_rom_lock(adapter);
399 if (ret < 0)
400 return ret;
401
402 ret = do_rom_fast_read_words(adapter, addr, bytes, size);
403
404 qlcnic_rom_unlock(adapter);
405 return ret;
406}
407
408int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, int addr, int *valp)
409{
410 int ret;
411
412 if (qlcnic_rom_lock(adapter) != 0)
413 return -EIO;
414
415 ret = do_rom_fast_read(adapter, addr, valp);
416 qlcnic_rom_unlock(adapter);
417 return ret;
418}
419
420int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
421{
422 int addr, val;
423 int i, n, init_delay;
424 struct crb_addr_pair *buf;
425 unsigned offset;
426 u32 off;
427 struct pci_dev *pdev = adapter->pdev;
428
429 /* resetall */
430 qlcnic_rom_lock(adapter);
431 QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xffffffff);
432 qlcnic_rom_unlock(adapter);
433
434 if (qlcnic_rom_fast_read(adapter, 0, &n) != 0 || (n != 0xcafecafe) ||
435 qlcnic_rom_fast_read(adapter, 4, &n) != 0) {
436 dev_err(&pdev->dev, "ERROR Reading crb_init area: val:%x\n", n);
437 return -EIO;
438 }
439 offset = n & 0xffffU;
440 n = (n >> 16) & 0xffffU;
441
442 if (n >= 1024) {
443 dev_err(&pdev->dev, "QLOGIC card flash not initialized.\n");
444 return -EIO;
445 }
446
447 buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL);
448 if (buf == NULL) {
449 dev_err(&pdev->dev, "Unable to calloc memory for rom read.\n");
450 return -ENOMEM;
451 }
452
453 for (i = 0; i < n; i++) {
454 if (qlcnic_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
455 qlcnic_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) {
456 kfree(buf);
457 return -EIO;
458 }
459
460 buf[i].addr = addr;
461 buf[i].data = val;
462 }
463
464 for (i = 0; i < n; i++) {
465
466 off = qlcnic_decode_crb_addr(buf[i].addr);
467 if (off == QLCNIC_ADDR_ERROR) {
468 dev_err(&pdev->dev, "CRB init value out of range %x\n",
469 buf[i].addr);
470 continue;
471 }
472 off += QLCNIC_PCI_CRBSPACE;
473
474 if (off & 1)
475 continue;
476
477 /* skipping cold reboot MAGIC */
478 if (off == QLCNIC_CAM_RAM(0x1fc))
479 continue;
480 if (off == (QLCNIC_CRB_I2C0 + 0x1c))
481 continue;
482 if (off == (ROMUSB_GLB + 0xbc)) /* do not reset PCI */
483 continue;
484 if (off == (ROMUSB_GLB + 0xa8))
485 continue;
486 if (off == (ROMUSB_GLB + 0xc8)) /* core clock */
487 continue;
488 if (off == (ROMUSB_GLB + 0x24)) /* MN clock */
489 continue;
490 if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */
491 continue;
492 if ((off & 0x0ff00000) == QLCNIC_CRB_DDR_NET)
493 continue;
494 /* skip the function enable register */
495 if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION))
496 continue;
497 if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION2))
498 continue;
499 if ((off & 0x0ff00000) == QLCNIC_CRB_SMB)
500 continue;
501
502 init_delay = 1;
503 /* After writing this register, HW needs time for CRB */
504 /* to quiet down (else crb_window returns 0xffffffff) */
505 if (off == QLCNIC_ROMUSB_GLB_SW_RESET)
506 init_delay = 1000;
507
508 QLCWR32(adapter, off, buf[i].data);
509
510 msleep(init_delay);
511 }
512 kfree(buf);
513
514 /* p2dn replyCount */
515 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0xec, 0x1e);
516 /* disable_peg_cache 0 & 1*/
517 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0x4c, 8);
518 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_I + 0x4c, 8);
519
520 /* peg_clr_all */
521 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x8, 0);
522 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0xc, 0);
523 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x8, 0);
524 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0xc, 0);
525 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x8, 0);
526 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0xc, 0);
527 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x8, 0);
528 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0xc, 0);
529 return 0;
530}
531
532static int
533qlcnic_has_mn(struct qlcnic_adapter *adapter)
534{
535 u32 capability, flashed_ver;
536 capability = 0;
537
538 qlcnic_rom_fast_read(adapter,
539 QLCNIC_FW_VERSION_OFFSET, (int *)&flashed_ver);
540 flashed_ver = QLCNIC_DECODE_VERSION(flashed_ver);
541
542 if (flashed_ver >= QLCNIC_VERSION_CODE(4, 0, 220)) {
543
544 capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY);
545 if (capability & QLCNIC_PEG_TUNE_MN_PRESENT)
546 return 1;
547 }
548 return 0;
549}
550
551static
552struct uni_table_desc *qlcnic_get_table_desc(const u8 *unirom, int section)
553{
554 u32 i;
555 struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
556 __le32 entries = cpu_to_le32(directory->num_entries);
557
558 for (i = 0; i < entries; i++) {
559
560 __le32 offs = cpu_to_le32(directory->findex) +
561 (i * cpu_to_le32(directory->entry_size));
562 __le32 tab_type = cpu_to_le32(*((u32 *)&unirom[offs] + 8));
563
564 if (tab_type == section)
565 return (struct uni_table_desc *) &unirom[offs];
566 }
567
568 return NULL;
569}
570
571static int
572qlcnic_set_product_offs(struct qlcnic_adapter *adapter)
573{
574 struct uni_table_desc *ptab_descr;
575 const u8 *unirom = adapter->fw->data;
576 u32 i;
577 __le32 entries;
578 int mn_present = qlcnic_has_mn(adapter);
579
580 ptab_descr = qlcnic_get_table_desc(unirom,
581 QLCNIC_UNI_DIR_SECT_PRODUCT_TBL);
582 if (ptab_descr == NULL)
583 return -1;
584
585 entries = cpu_to_le32(ptab_descr->num_entries);
586nomn:
587 for (i = 0; i < entries; i++) {
588
589 __le32 flags, file_chiprev, offs;
590 u8 chiprev = adapter->ahw.revision_id;
591 u32 flagbit;
592
593 offs = cpu_to_le32(ptab_descr->findex) +
594 (i * cpu_to_le32(ptab_descr->entry_size));
595 flags = cpu_to_le32(*((int *)&unirom[offs] +
596 QLCNIC_UNI_FLAGS_OFF));
597 file_chiprev = cpu_to_le32(*((int *)&unirom[offs] +
598 QLCNIC_UNI_CHIP_REV_OFF));
599
600 flagbit = mn_present ? 1 : 2;
601
602 if ((chiprev == file_chiprev) &&
603 ((1ULL << flagbit) & flags)) {
604 adapter->file_prd_off = offs;
605 return 0;
606 }
607 }
608 if (mn_present) {
609 mn_present = 0;
610 goto nomn;
611 }
612 return -1;
613}
614
615static
616struct uni_data_desc *qlcnic_get_data_desc(struct qlcnic_adapter *adapter,
617 u32 section, u32 idx_offset)
618{
619 const u8 *unirom = adapter->fw->data;
620 int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
621 idx_offset));
622 struct uni_table_desc *tab_desc;
623 __le32 offs;
624
625 tab_desc = qlcnic_get_table_desc(unirom, section);
626
627 if (tab_desc == NULL)
628 return NULL;
629
630 offs = cpu_to_le32(tab_desc->findex) +
631 (cpu_to_le32(tab_desc->entry_size) * idx);
632
633 return (struct uni_data_desc *)&unirom[offs];
634}
635
636static u8 *
637qlcnic_get_bootld_offs(struct qlcnic_adapter *adapter)
638{
639 u32 offs = QLCNIC_BOOTLD_START;
640
641 if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
642 offs = cpu_to_le32((qlcnic_get_data_desc(adapter,
643 QLCNIC_UNI_DIR_SECT_BOOTLD,
644 QLCNIC_UNI_BOOTLD_IDX_OFF))->findex);
645
646 return (u8 *)&adapter->fw->data[offs];
647}
648
649static u8 *
650qlcnic_get_fw_offs(struct qlcnic_adapter *adapter)
651{
652 u32 offs = QLCNIC_IMAGE_START;
653
654 if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
655 offs = cpu_to_le32((qlcnic_get_data_desc(adapter,
656 QLCNIC_UNI_DIR_SECT_FW,
657 QLCNIC_UNI_FIRMWARE_IDX_OFF))->findex);
658
659 return (u8 *)&adapter->fw->data[offs];
660}
661
662static __le32
663qlcnic_get_fw_size(struct qlcnic_adapter *adapter)
664{
665 if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
666 return cpu_to_le32((qlcnic_get_data_desc(adapter,
667 QLCNIC_UNI_DIR_SECT_FW,
668 QLCNIC_UNI_FIRMWARE_IDX_OFF))->size);
669 else
670 return cpu_to_le32(
671 *(u32 *)&adapter->fw->data[QLCNIC_FW_SIZE_OFFSET]);
672}
673
674static __le32
675qlcnic_get_fw_version(struct qlcnic_adapter *adapter)
676{
677 struct uni_data_desc *fw_data_desc;
678 const struct firmware *fw = adapter->fw;
679 __le32 major, minor, sub;
680 const u8 *ver_str;
681 int i, ret;
682
683 if (adapter->fw_type != QLCNIC_UNIFIED_ROMIMAGE)
684 return cpu_to_le32(*(u32 *)&fw->data[QLCNIC_FW_VERSION_OFFSET]);
685
686 fw_data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW,
687 QLCNIC_UNI_FIRMWARE_IDX_OFF);
688 ver_str = fw->data + cpu_to_le32(fw_data_desc->findex) +
689 cpu_to_le32(fw_data_desc->size) - 17;
690
691 for (i = 0; i < 12; i++) {
692 if (!strncmp(&ver_str[i], "REV=", 4)) {
693 ret = sscanf(&ver_str[i+4], "%u.%u.%u ",
694 &major, &minor, &sub);
695 if (ret != 3)
696 return 0;
697 else
698 return major + (minor << 8) + (sub << 16);
699 }
700 }
701
702 return 0;
703}
704
705static __le32
706qlcnic_get_bios_version(struct qlcnic_adapter *adapter)
707{
708 const struct firmware *fw = adapter->fw;
709 __le32 bios_ver, prd_off = adapter->file_prd_off;
710
711 if (adapter->fw_type != QLCNIC_UNIFIED_ROMIMAGE)
712 return cpu_to_le32(
713 *(u32 *)&fw->data[QLCNIC_BIOS_VERSION_OFFSET]);
714
715 bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off])
716 + QLCNIC_UNI_BIOS_VERSION_OFF));
717
718 return (bios_ver << 24) + ((bios_ver >> 8) & 0xff00) + (bios_ver >> 24);
719}
720
721int
722qlcnic_need_fw_reset(struct qlcnic_adapter *adapter)
723{
724 u32 count, old_count;
725 u32 val, version, major, minor, build;
726 int i, timeout;
727
728 if (adapter->need_fw_reset)
729 return 1;
730
731 /* last attempt had failed */
732 if (QLCRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED)
733 return 1;
734
735 old_count = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
736
737 for (i = 0; i < 10; i++) {
738
739 timeout = msleep_interruptible(200);
740 if (timeout) {
741 QLCWR32(adapter, CRB_CMDPEG_STATE,
742 PHAN_INITIALIZE_FAILED);
743 return -EINTR;
744 }
745
746 count = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
747 if (count != old_count)
748 break;
749 }
750
751 /* firmware is dead */
752 if (count == old_count)
753 return 1;
754
755 /* check if we have got newer or different file firmware */
756 if (adapter->fw) {
757
758 val = qlcnic_get_fw_version(adapter);
759
760 version = QLCNIC_DECODE_VERSION(val);
761
762 major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
763 minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
764 build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
765
766 if (version > QLCNIC_VERSION_CODE(major, minor, build))
767 return 1;
768 }
769
770 return 0;
771}
772
773static const char *fw_name[] = {
774 QLCNIC_UNIFIED_ROMIMAGE_NAME,
775 QLCNIC_FLASH_ROMIMAGE_NAME,
776};
777
778int
779qlcnic_load_firmware(struct qlcnic_adapter *adapter)
780{
781 u64 *ptr64;
782 u32 i, flashaddr, size;
783 const struct firmware *fw = adapter->fw;
784 struct pci_dev *pdev = adapter->pdev;
785
786 dev_info(&pdev->dev, "loading firmware from %s\n",
787 fw_name[adapter->fw_type]);
788
789 if (fw) {
790 __le64 data;
791
792 size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8;
793
794 ptr64 = (u64 *)qlcnic_get_bootld_offs(adapter);
795 flashaddr = QLCNIC_BOOTLD_START;
796
797 for (i = 0; i < size; i++) {
798 data = cpu_to_le64(ptr64[i]);
799
800 if (qlcnic_pci_mem_write_2M(adapter, flashaddr, data))
801 return -EIO;
802
803 flashaddr += 8;
804 }
805
806 size = (__force u32)qlcnic_get_fw_size(adapter) / 8;
807
808 ptr64 = (u64 *)qlcnic_get_fw_offs(adapter);
809 flashaddr = QLCNIC_IMAGE_START;
810
811 for (i = 0; i < size; i++) {
812 data = cpu_to_le64(ptr64[i]);
813
814 if (qlcnic_pci_mem_write_2M(adapter,
815 flashaddr, data))
816 return -EIO;
817
818 flashaddr += 8;
819 }
820 } else {
821 u64 data;
822 u32 hi, lo;
823
824 size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8;
825 flashaddr = QLCNIC_BOOTLD_START;
826
827 for (i = 0; i < size; i++) {
828 if (qlcnic_rom_fast_read(adapter,
829 flashaddr, (int *)&lo) != 0)
830 return -EIO;
831 if (qlcnic_rom_fast_read(adapter,
832 flashaddr + 4, (int *)&hi) != 0)
833 return -EIO;
834
835 data = (((u64)hi << 32) | lo);
836
837 if (qlcnic_pci_mem_write_2M(adapter,
838 flashaddr, data))
839 return -EIO;
840
841 flashaddr += 8;
842 }
843 }
844 msleep(1);
845
846 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x18, 0x1020);
847 QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0x80001e);
848 return 0;
849}
850
851static int
852qlcnic_validate_firmware(struct qlcnic_adapter *adapter)
853{
854 __le32 val;
855 u32 ver, min_ver, bios, min_size;
856 struct pci_dev *pdev = adapter->pdev;
857 const struct firmware *fw = adapter->fw;
858 u8 fw_type = adapter->fw_type;
859
860 if (fw_type == QLCNIC_UNIFIED_ROMIMAGE) {
861 if (qlcnic_set_product_offs(adapter))
862 return -EINVAL;
863
864 min_size = QLCNIC_UNI_FW_MIN_SIZE;
865 } else {
866 val = cpu_to_le32(*(u32 *)&fw->data[QLCNIC_FW_MAGIC_OFFSET]);
867 if ((__force u32)val != QLCNIC_BDINFO_MAGIC)
868 return -EINVAL;
869
870 min_size = QLCNIC_FW_MIN_SIZE;
871 }
872
873 if (fw->size < min_size)
874 return -EINVAL;
875
876 val = qlcnic_get_fw_version(adapter);
877
878 min_ver = QLCNIC_VERSION_CODE(4, 0, 216);
879
880 ver = QLCNIC_DECODE_VERSION(val);
881
882 if ((_major(ver) > _QLCNIC_LINUX_MAJOR) || (ver < min_ver)) {
883 dev_err(&pdev->dev,
884 "%s: firmware version %d.%d.%d unsupported\n",
885 fw_name[fw_type], _major(ver), _minor(ver), _build(ver));
886 return -EINVAL;
887 }
888
889 val = qlcnic_get_bios_version(adapter);
890 qlcnic_rom_fast_read(adapter, QLCNIC_BIOS_VERSION_OFFSET, (int *)&bios);
891 if ((__force u32)val != bios) {
892 dev_err(&pdev->dev, "%s: firmware bios is incompatible\n",
893 fw_name[fw_type]);
894 return -EINVAL;
895 }
896
897 /* check if flashed firmware is newer */
898 if (qlcnic_rom_fast_read(adapter,
899 QLCNIC_FW_VERSION_OFFSET, (int *)&val))
900 return -EIO;
901
902 val = QLCNIC_DECODE_VERSION(val);
903 if (val > ver) {
904 dev_info(&pdev->dev, "%s: firmware is older than flash\n",
905 fw_name[fw_type]);
906 return -EINVAL;
907 }
908
909 QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
910 return 0;
911}
912
913static void
914qlcnic_get_next_fwtype(struct qlcnic_adapter *adapter)
915{
916 u8 fw_type;
917
918 switch (adapter->fw_type) {
919 case QLCNIC_UNKNOWN_ROMIMAGE:
920 fw_type = QLCNIC_UNIFIED_ROMIMAGE;
921 break;
922
923 case QLCNIC_UNIFIED_ROMIMAGE:
924 default:
925 fw_type = QLCNIC_FLASH_ROMIMAGE;
926 break;
927 }
928
929 adapter->fw_type = fw_type;
930}
931
932
933
934void qlcnic_request_firmware(struct qlcnic_adapter *adapter)
935{
936 struct pci_dev *pdev = adapter->pdev;
937 int rc;
938
939 adapter->fw_type = QLCNIC_UNKNOWN_ROMIMAGE;
940
941next:
942 qlcnic_get_next_fwtype(adapter);
943
944 if (adapter->fw_type == QLCNIC_FLASH_ROMIMAGE) {
945 adapter->fw = NULL;
946 } else {
947 rc = request_firmware(&adapter->fw,
948 fw_name[adapter->fw_type], &pdev->dev);
949 if (rc != 0)
950 goto next;
951
952 rc = qlcnic_validate_firmware(adapter);
953 if (rc != 0) {
954 release_firmware(adapter->fw);
955 msleep(1);
956 goto next;
957 }
958 }
959}
960
961
962void
963qlcnic_release_firmware(struct qlcnic_adapter *adapter)
964{
965 if (adapter->fw)
966 release_firmware(adapter->fw);
967 adapter->fw = NULL;
968}
969
970int qlcnic_phantom_init(struct qlcnic_adapter *adapter)
971{
972 u32 val;
973 int retries = 60;
974
975 do {
976 val = QLCRD32(adapter, CRB_CMDPEG_STATE);
977
978 switch (val) {
979 case PHAN_INITIALIZE_COMPLETE:
980 case PHAN_INITIALIZE_ACK:
981 return 0;
982 case PHAN_INITIALIZE_FAILED:
983 goto out_err;
984 default:
985 break;
986 }
987
988 msleep(500);
989
990 } while (--retries);
991
992 QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
993
994out_err:
995 dev_err(&adapter->pdev->dev, "firmware init failed\n");
996 return -EIO;
997}
998
999static int
1000qlcnic_receive_peg_ready(struct qlcnic_adapter *adapter)
1001{
1002 u32 val;
1003 int retries = 2000;
1004
1005 do {
1006 val = QLCRD32(adapter, CRB_RCVPEG_STATE);
1007
1008 if (val == PHAN_PEG_RCV_INITIALIZED)
1009 return 0;
1010
1011 msleep(10);
1012
1013 } while (--retries);
1014
1015 if (!retries) {
1016 dev_err(&adapter->pdev->dev, "Receive Peg initialization not "
1017 "complete, state: 0x%x.\n", val);
1018 return -EIO;
1019 }
1020
1021 return 0;
1022}
1023
1024int qlcnic_init_firmware(struct qlcnic_adapter *adapter)
1025{
1026 int err;
1027
1028 err = qlcnic_receive_peg_ready(adapter);
1029 if (err)
1030 return err;
1031
1032 QLCWR32(adapter, CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT);
1033 QLCWR32(adapter, CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC);
1034 QLCWR32(adapter, CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE);
1035 QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
1036
1037 return err;
1038}
1039
1040static void
1041qlcnic_handle_linkevent(struct qlcnic_adapter *adapter,
1042 struct qlcnic_fw_msg *msg)
1043{
1044 u32 cable_OUI;
1045 u16 cable_len;
1046 u16 link_speed;
1047 u8 link_status, module, duplex, autoneg;
1048 struct net_device *netdev = adapter->netdev;
1049
1050 adapter->has_link_events = 1;
1051
1052 cable_OUI = msg->body[1] & 0xffffffff;
1053 cable_len = (msg->body[1] >> 32) & 0xffff;
1054 link_speed = (msg->body[1] >> 48) & 0xffff;
1055
1056 link_status = msg->body[2] & 0xff;
1057 duplex = (msg->body[2] >> 16) & 0xff;
1058 autoneg = (msg->body[2] >> 24) & 0xff;
1059
1060 module = (msg->body[2] >> 8) & 0xff;
1061 if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE)
1062 dev_info(&netdev->dev, "unsupported cable: OUI 0x%x, "
1063 "length %d\n", cable_OUI, cable_len);
1064 else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN)
1065 dev_info(&netdev->dev, "unsupported cable length %d\n",
1066 cable_len);
1067
1068 qlcnic_advert_link_change(adapter, link_status);
1069
1070 if (duplex == LINKEVENT_FULL_DUPLEX)
1071 adapter->link_duplex = DUPLEX_FULL;
1072 else
1073 adapter->link_duplex = DUPLEX_HALF;
1074
1075 adapter->module_type = module;
1076 adapter->link_autoneg = autoneg;
1077 adapter->link_speed = link_speed;
1078}
1079
1080static void
1081qlcnic_handle_fw_message(int desc_cnt, int index,
1082 struct qlcnic_host_sds_ring *sds_ring)
1083{
1084 struct qlcnic_fw_msg msg;
1085 struct status_desc *desc;
1086 int i = 0, opcode;
1087
1088 while (desc_cnt > 0 && i < 8) {
1089 desc = &sds_ring->desc_head[index];
1090 msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]);
1091 msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]);
1092
1093 index = get_next_index(index, sds_ring->num_desc);
1094 desc_cnt--;
1095 }
1096
1097 opcode = qlcnic_get_nic_msg_opcode(msg.body[0]);
1098 switch (opcode) {
1099 case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
1100 qlcnic_handle_linkevent(sds_ring->adapter, &msg);
1101 break;
1102 default:
1103 break;
1104 }
1105}
1106
1107static int
1108qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
1109 struct qlcnic_host_rds_ring *rds_ring,
1110 struct qlcnic_rx_buffer *buffer)
1111{
1112 struct sk_buff *skb;
1113 dma_addr_t dma;
1114 struct pci_dev *pdev = adapter->pdev;
1115
1116 buffer->skb = dev_alloc_skb(rds_ring->skb_size);
1117 if (!buffer->skb)
1118 return -ENOMEM;
1119
1120 skb = buffer->skb;
1121
1122 if (!adapter->ahw.cut_through)
1123 skb_reserve(skb, 2);
1124
1125 dma = pci_map_single(pdev, skb->data,
1126 rds_ring->dma_size, PCI_DMA_FROMDEVICE);
1127
1128 if (pci_dma_mapping_error(pdev, dma)) {
1129 dev_kfree_skb_any(skb);
1130 buffer->skb = NULL;
1131 return -ENOMEM;
1132 }
1133
1134 buffer->skb = skb;
1135 buffer->dma = dma;
1136 buffer->state = QLCNIC_BUFFER_BUSY;
1137
1138 return 0;
1139}
1140
1141static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
1142 struct qlcnic_host_rds_ring *rds_ring, u16 index, u16 cksum)
1143{
1144 struct qlcnic_rx_buffer *buffer;
1145 struct sk_buff *skb;
1146
1147 buffer = &rds_ring->rx_buf_arr[index];
1148
1149 pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
1150 PCI_DMA_FROMDEVICE);
1151
1152 skb = buffer->skb;
1153 if (!skb)
1154 goto no_skb;
1155
1156 if (likely(adapter->rx_csum && cksum == STATUS_CKSUM_OK)) {
1157 adapter->stats.csummed++;
1158 skb->ip_summed = CHECKSUM_UNNECESSARY;
1159 } else {
1160 skb->ip_summed = CHECKSUM_NONE;
1161 }
1162
1163 skb->dev = adapter->netdev;
1164
1165 buffer->skb = NULL;
1166no_skb:
1167 buffer->state = QLCNIC_BUFFER_FREE;
1168 return skb;
1169}
1170
1171static struct qlcnic_rx_buffer *
1172qlcnic_process_rcv(struct qlcnic_adapter *adapter,
1173 struct qlcnic_host_sds_ring *sds_ring,
1174 int ring, u64 sts_data0)
1175{
1176 struct net_device *netdev = adapter->netdev;
1177 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
1178 struct qlcnic_rx_buffer *buffer;
1179 struct sk_buff *skb;
1180 struct qlcnic_host_rds_ring *rds_ring;
1181 int index, length, cksum, pkt_offset;
1182
1183 if (unlikely(ring >= adapter->max_rds_rings))
1184 return NULL;
1185
1186 rds_ring = &recv_ctx->rds_rings[ring];
1187
1188 index = qlcnic_get_sts_refhandle(sts_data0);
1189 if (unlikely(index >= rds_ring->num_desc))
1190 return NULL;
1191
1192 buffer = &rds_ring->rx_buf_arr[index];
1193
1194 length = qlcnic_get_sts_totallength(sts_data0);
1195 cksum = qlcnic_get_sts_status(sts_data0);
1196 pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
1197
1198 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
1199 if (!skb)
1200 return buffer;
1201
1202 if (length > rds_ring->skb_size)
1203 skb_put(skb, rds_ring->skb_size);
1204 else
1205 skb_put(skb, length);
1206
1207 if (pkt_offset)
1208 skb_pull(skb, pkt_offset);
1209
1210 skb->truesize = skb->len + sizeof(struct sk_buff);
1211 skb->protocol = eth_type_trans(skb, netdev);
1212
1213 napi_gro_receive(&sds_ring->napi, skb);
1214
1215 adapter->stats.rx_pkts++;
1216 adapter->stats.rxbytes += length;
1217
1218 return buffer;
1219}
1220
1221#define QLC_TCP_HDR_SIZE 20
1222#define QLC_TCP_TS_OPTION_SIZE 12
1223#define QLC_TCP_TS_HDR_SIZE (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE)
1224
1225static struct qlcnic_rx_buffer *
1226qlcnic_process_lro(struct qlcnic_adapter *adapter,
1227 struct qlcnic_host_sds_ring *sds_ring,
1228 int ring, u64 sts_data0, u64 sts_data1)
1229{
1230 struct net_device *netdev = adapter->netdev;
1231 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
1232 struct qlcnic_rx_buffer *buffer;
1233 struct sk_buff *skb;
1234 struct qlcnic_host_rds_ring *rds_ring;
1235 struct iphdr *iph;
1236 struct tcphdr *th;
1237 bool push, timestamp;
1238 int l2_hdr_offset, l4_hdr_offset;
1239 int index;
1240 u16 lro_length, length, data_offset;
1241 u32 seq_number;
1242
1243 if (unlikely(ring > adapter->max_rds_rings))
1244 return NULL;
1245
1246 rds_ring = &recv_ctx->rds_rings[ring];
1247
1248 index = qlcnic_get_lro_sts_refhandle(sts_data0);
1249 if (unlikely(index > rds_ring->num_desc))
1250 return NULL;
1251
1252 buffer = &rds_ring->rx_buf_arr[index];
1253
1254 timestamp = qlcnic_get_lro_sts_timestamp(sts_data0);
1255 lro_length = qlcnic_get_lro_sts_length(sts_data0);
1256 l2_hdr_offset = qlcnic_get_lro_sts_l2_hdr_offset(sts_data0);
1257 l4_hdr_offset = qlcnic_get_lro_sts_l4_hdr_offset(sts_data0);
1258 push = qlcnic_get_lro_sts_push_flag(sts_data0);
1259 seq_number = qlcnic_get_lro_sts_seq_number(sts_data1);
1260
1261 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
1262 if (!skb)
1263 return buffer;
1264
1265 if (timestamp)
1266 data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE;
1267 else
1268 data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE;
1269
1270 skb_put(skb, lro_length + data_offset);
1271
1272 skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb);
1273
1274 skb_pull(skb, l2_hdr_offset);
1275 skb->protocol = eth_type_trans(skb, netdev);
1276
1277 iph = (struct iphdr *)skb->data;
1278 th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
1279
1280 length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
1281 iph->tot_len = htons(length);
1282 iph->check = 0;
1283 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
1284 th->psh = push;
1285 th->seq = htonl(seq_number);
1286
1287 length = skb->len;
1288
1289 netif_receive_skb(skb);
1290
1291 adapter->stats.lro_pkts++;
1292 adapter->stats.rxbytes += length;
1293
1294 return buffer;
1295}
1296
1297int
1298qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
1299{
1300 struct qlcnic_adapter *adapter = sds_ring->adapter;
1301 struct list_head *cur;
1302 struct status_desc *desc;
1303 struct qlcnic_rx_buffer *rxbuf;
1304 u64 sts_data0, sts_data1;
1305
1306 int count = 0;
1307 int opcode, ring, desc_cnt;
1308 u32 consumer = sds_ring->consumer;
1309
1310 while (count < max) {
1311 desc = &sds_ring->desc_head[consumer];
1312 sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
1313
1314 if (!(sts_data0 & STATUS_OWNER_HOST))
1315 break;
1316
1317 desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
1318 opcode = qlcnic_get_sts_opcode(sts_data0);
1319
1320 switch (opcode) {
1321 case QLCNIC_RXPKT_DESC:
1322 case QLCNIC_OLD_RXPKT_DESC:
1323 case QLCNIC_SYN_OFFLOAD:
1324 ring = qlcnic_get_sts_type(sts_data0);
1325 rxbuf = qlcnic_process_rcv(adapter, sds_ring,
1326 ring, sts_data0);
1327 break;
1328 case QLCNIC_LRO_DESC:
1329 ring = qlcnic_get_lro_sts_type(sts_data0);
1330 sts_data1 = le64_to_cpu(desc->status_desc_data[1]);
1331 rxbuf = qlcnic_process_lro(adapter, sds_ring,
1332 ring, sts_data0, sts_data1);
1333 break;
1334 case QLCNIC_RESPONSE_DESC:
1335 qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
1336 default:
1337 goto skip;
1338 }
1339
1340 WARN_ON(desc_cnt > 1);
1341
1342 if (rxbuf)
1343 list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
1344
1345skip:
1346 for (; desc_cnt > 0; desc_cnt--) {
1347 desc = &sds_ring->desc_head[consumer];
1348 desc->status_desc_data[0] =
1349 cpu_to_le64(STATUS_OWNER_PHANTOM);
1350 consumer = get_next_index(consumer, sds_ring->num_desc);
1351 }
1352 count++;
1353 }
1354
1355 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1356 struct qlcnic_host_rds_ring *rds_ring =
1357 &adapter->recv_ctx.rds_rings[ring];
1358
1359 if (!list_empty(&sds_ring->free_list[ring])) {
1360 list_for_each(cur, &sds_ring->free_list[ring]) {
1361 rxbuf = list_entry(cur,
1362 struct qlcnic_rx_buffer, list);
1363 qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
1364 }
1365 spin_lock(&rds_ring->lock);
1366 list_splice_tail_init(&sds_ring->free_list[ring],
1367 &rds_ring->free_list);
1368 spin_unlock(&rds_ring->lock);
1369 }
1370
1371 qlcnic_post_rx_buffers_nodb(adapter, rds_ring);
1372 }
1373
1374 if (count) {
1375 sds_ring->consumer = consumer;
1376 writel(consumer, sds_ring->crb_sts_consumer);
1377 }
1378
1379 return count;
1380}
1381
1382void
1383qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
1384 struct qlcnic_host_rds_ring *rds_ring)
1385{
1386 struct rcv_desc *pdesc;
1387 struct qlcnic_rx_buffer *buffer;
1388 int producer, count = 0;
1389 struct list_head *head;
1390
1391 producer = rds_ring->producer;
1392
1393 spin_lock(&rds_ring->lock);
1394 head = &rds_ring->free_list;
1395 while (!list_empty(head)) {
1396
1397 buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
1398
1399 if (!buffer->skb) {
1400 if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
1401 break;
1402 }
1403
1404 count++;
1405 list_del(&buffer->list);
1406
1407 /* make a rcv descriptor */
1408 pdesc = &rds_ring->desc_head[producer];
1409 pdesc->addr_buffer = cpu_to_le64(buffer->dma);
1410 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
1411 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
1412
1413 producer = get_next_index(producer, rds_ring->num_desc);
1414 }
1415 spin_unlock(&rds_ring->lock);
1416
1417 if (count) {
1418 rds_ring->producer = producer;
1419 writel((producer-1) & (rds_ring->num_desc-1),
1420 rds_ring->crb_rcv_producer);
1421 }
1422}
1423
1424static void
1425qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
1426 struct qlcnic_host_rds_ring *rds_ring)
1427{
1428 struct rcv_desc *pdesc;
1429 struct qlcnic_rx_buffer *buffer;
1430 int producer, count = 0;
1431 struct list_head *head;
1432
1433 producer = rds_ring->producer;
1434 if (!spin_trylock(&rds_ring->lock))
1435 return;
1436
1437 head = &rds_ring->free_list;
1438 while (!list_empty(head)) {
1439
1440 buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
1441
1442 if (!buffer->skb) {
1443 if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
1444 break;
1445 }
1446
1447 count++;
1448 list_del(&buffer->list);
1449
1450 /* make a rcv descriptor */
1451 pdesc = &rds_ring->desc_head[producer];
1452 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
1453 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
1454 pdesc->addr_buffer = cpu_to_le64(buffer->dma);
1455
1456 producer = get_next_index(producer, rds_ring->num_desc);
1457 }
1458
1459 if (count) {
1460 rds_ring->producer = producer;
1461 writel((producer - 1) & (rds_ring->num_desc - 1),
1462 rds_ring->crb_rcv_producer);
1463 }
1464 spin_unlock(&rds_ring->lock);
1465}
1466
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
new file mode 100644
index 000000000000..1698b6a68ed1
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -0,0 +1,2604 @@
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
25#include <linux/vmalloc.h>
26#include <linux/interrupt.h>
27
28#include "qlcnic.h"
29
30#include <linux/dma-mapping.h>
31#include <linux/if_vlan.h>
32#include <net/ip.h>
33#include <linux/ipv6.h>
34#include <linux/inetdevice.h>
35#include <linux/sysfs.h>
36
37MODULE_DESCRIPTION("QLogic 10 GbE Converged Ethernet Driver");
38MODULE_LICENSE("GPL");
39MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
40MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
41
42char qlcnic_driver_name[] = "qlcnic";
43static const char qlcnic_driver_string[] = "QLogic Converged Ethernet Driver v"
44 QLCNIC_LINUX_VERSIONID;
45
46static int port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
47
48/* Default to restricted 1G auto-neg mode */
49static int wol_port_mode = 5;
50
51static int use_msi = 1;
52module_param(use_msi, int, 0644);
53MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
54
55static int use_msi_x = 1;
56module_param(use_msi_x, int, 0644);
57MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
58
59static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
60module_param(auto_fw_reset, int, 0644);
61MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
62
63static int __devinit qlcnic_probe(struct pci_dev *pdev,
64 const struct pci_device_id *ent);
65static void __devexit qlcnic_remove(struct pci_dev *pdev);
66static int qlcnic_open(struct net_device *netdev);
67static int qlcnic_close(struct net_device *netdev);
68static netdev_tx_t qlcnic_xmit_frame(struct sk_buff *,
69 struct net_device *);
70static void qlcnic_tx_timeout(struct net_device *netdev);
71static void qlcnic_tx_timeout_task(struct work_struct *work);
72static void qlcnic_attach_work(struct work_struct *work);
73static void qlcnic_fwinit_work(struct work_struct *work);
74static void qlcnic_fw_poll_work(struct work_struct *work);
75static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
76 work_func_t func, int delay);
77static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
78static int qlcnic_poll(struct napi_struct *napi, int budget);
79#ifdef CONFIG_NET_POLL_CONTROLLER
80static void qlcnic_poll_controller(struct net_device *netdev);
81#endif
82
83static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
84static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
85static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
86static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
87
88static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter);
89static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
90
91static irqreturn_t qlcnic_intr(int irq, void *data);
92static irqreturn_t qlcnic_msi_intr(int irq, void *data);
93static irqreturn_t qlcnic_msix_intr(int irq, void *data);
94
95static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
96static void qlcnic_config_indev_addr(struct net_device *dev, unsigned long);
97
98/* PCI Device ID Table */
99#define ENTRY(device) \
100 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
101 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
102
103#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
104
105static const struct pci_device_id qlcnic_pci_tbl[] __devinitdata = {
106 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
107 {0,}
108};
109
110MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
111
112
113void
114qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
115 struct qlcnic_host_tx_ring *tx_ring)
116{
117 writel(tx_ring->producer, tx_ring->crb_cmd_producer);
118
119 if (qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH) {
120 netif_stop_queue(adapter->netdev);
121 smp_mb();
122 }
123}
124
125static const u32 msi_tgt_status[8] = {
126 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
127 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
128 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
129 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
130};
131
132static const
133struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
134
135static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
136{
137 writel(0, sds_ring->crb_intr_mask);
138}
139
140static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
141{
142 struct qlcnic_adapter *adapter = sds_ring->adapter;
143
144 writel(0x1, sds_ring->crb_intr_mask);
145
146 if (!QLCNIC_IS_MSI_FAMILY(adapter))
147 writel(0xfbff, adapter->tgt_mask_reg);
148}
149
150static int
151qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
152{
153 int size = sizeof(struct qlcnic_host_sds_ring) * count;
154
155 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
156
157 return (recv_ctx->sds_rings == NULL);
158}
159
160static void
161qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
162{
163 if (recv_ctx->sds_rings != NULL)
164 kfree(recv_ctx->sds_rings);
165
166 recv_ctx->sds_rings = NULL;
167}
168
169static int
170qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
171{
172 int ring;
173 struct qlcnic_host_sds_ring *sds_ring;
174 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
175
176 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
177 return -ENOMEM;
178
179 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
180 sds_ring = &recv_ctx->sds_rings[ring];
181 netif_napi_add(netdev, &sds_ring->napi,
182 qlcnic_poll, QLCNIC_NETDEV_WEIGHT);
183 }
184
185 return 0;
186}
187
188static void
189qlcnic_napi_del(struct qlcnic_adapter *adapter)
190{
191 int ring;
192 struct qlcnic_host_sds_ring *sds_ring;
193 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
194
195 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
196 sds_ring = &recv_ctx->sds_rings[ring];
197 netif_napi_del(&sds_ring->napi);
198 }
199
200 qlcnic_free_sds_rings(&adapter->recv_ctx);
201}
202
203static void
204qlcnic_napi_enable(struct qlcnic_adapter *adapter)
205{
206 int ring;
207 struct qlcnic_host_sds_ring *sds_ring;
208 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
209
210 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
211 sds_ring = &recv_ctx->sds_rings[ring];
212 napi_enable(&sds_ring->napi);
213 qlcnic_enable_int(sds_ring);
214 }
215}
216
217static void
218qlcnic_napi_disable(struct qlcnic_adapter *adapter)
219{
220 int ring;
221 struct qlcnic_host_sds_ring *sds_ring;
222 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
223
224 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
225 sds_ring = &recv_ctx->sds_rings[ring];
226 qlcnic_disable_int(sds_ring);
227 napi_synchronize(&sds_ring->napi);
228 napi_disable(&sds_ring->napi);
229 }
230}
231
232static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
233{
234 memset(&adapter->stats, 0, sizeof(adapter->stats));
235 return;
236}
237
238static int qlcnic_set_dma_mask(struct qlcnic_adapter *adapter)
239{
240 struct pci_dev *pdev = adapter->pdev;
241 u64 mask, cmask;
242
243 adapter->pci_using_dac = 0;
244
245 mask = DMA_BIT_MASK(39);
246 cmask = mask;
247
248 if (pci_set_dma_mask(pdev, mask) == 0 &&
249 pci_set_consistent_dma_mask(pdev, cmask) == 0) {
250 adapter->pci_using_dac = 1;
251 return 0;
252 }
253
254 return -EIO;
255}
256
257/* Update addressable range if firmware supports it */
258static int
259qlcnic_update_dma_mask(struct qlcnic_adapter *adapter)
260{
261 int change, shift, err;
262 u64 mask, old_mask, old_cmask;
263 struct pci_dev *pdev = adapter->pdev;
264
265 change = 0;
266
267 shift = QLCRD32(adapter, CRB_DMA_SHIFT);
268 if (shift > 32)
269 return 0;
270
271 if (shift > 9)
272 change = 1;
273
274 if (change) {
275 old_mask = pdev->dma_mask;
276 old_cmask = pdev->dev.coherent_dma_mask;
277
278 mask = DMA_BIT_MASK(32+shift);
279
280 err = pci_set_dma_mask(pdev, mask);
281 if (err)
282 goto err_out;
283
284 err = pci_set_consistent_dma_mask(pdev, mask);
285 if (err)
286 goto err_out;
287 dev_info(&pdev->dev, "using %d-bit dma mask\n", 32+shift);
288 }
289
290 return 0;
291
292err_out:
293 pci_set_dma_mask(pdev, old_mask);
294 pci_set_consistent_dma_mask(pdev, old_cmask);
295 return err;
296}
297
298static void qlcnic_set_port_mode(struct qlcnic_adapter *adapter)
299{
300 u32 val, data;
301
302 val = adapter->ahw.board_type;
303 if ((val == QLCNIC_BRDTYPE_P3_HMEZ) ||
304 (val == QLCNIC_BRDTYPE_P3_XG_LOM)) {
305 if (port_mode == QLCNIC_PORT_MODE_802_3_AP) {
306 data = QLCNIC_PORT_MODE_802_3_AP;
307 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
308 } else if (port_mode == QLCNIC_PORT_MODE_XG) {
309 data = QLCNIC_PORT_MODE_XG;
310 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
311 } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_1G) {
312 data = QLCNIC_PORT_MODE_AUTO_NEG_1G;
313 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
314 } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_XG) {
315 data = QLCNIC_PORT_MODE_AUTO_NEG_XG;
316 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
317 } else {
318 data = QLCNIC_PORT_MODE_AUTO_NEG;
319 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
320 }
321
322 if ((wol_port_mode != QLCNIC_PORT_MODE_802_3_AP) &&
323 (wol_port_mode != QLCNIC_PORT_MODE_XG) &&
324 (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_1G) &&
325 (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_XG)) {
326 wol_port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
327 }
328 QLCWR32(adapter, QLCNIC_WOL_PORT_MODE, wol_port_mode);
329 }
330}
331
332static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
333{
334 u32 control;
335 int pos;
336
337 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
338 if (pos) {
339 pci_read_config_dword(pdev, pos, &control);
340 if (enable)
341 control |= PCI_MSIX_FLAGS_ENABLE;
342 else
343 control = 0;
344 pci_write_config_dword(pdev, pos, control);
345 }
346}
347
348static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
349{
350 int i;
351
352 for (i = 0; i < count; i++)
353 adapter->msix_entries[i].entry = i;
354}
355
356static int
357qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
358{
359 int i;
360 unsigned char *p;
361 u64 mac_addr;
362 struct net_device *netdev = adapter->netdev;
363 struct pci_dev *pdev = adapter->pdev;
364
365 if (qlcnic_get_mac_addr(adapter, &mac_addr) != 0)
366 return -EIO;
367
368 p = (unsigned char *)&mac_addr;
369 for (i = 0; i < 6; i++)
370 netdev->dev_addr[i] = *(p + 5 - i);
371
372 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
373 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
374
375 /* set station address */
376
377 if (!is_valid_ether_addr(netdev->perm_addr))
378 dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
379 netdev->dev_addr);
380
381 return 0;
382}
383
384static int qlcnic_set_mac(struct net_device *netdev, void *p)
385{
386 struct qlcnic_adapter *adapter = netdev_priv(netdev);
387 struct sockaddr *addr = p;
388
389 if (!is_valid_ether_addr(addr->sa_data))
390 return -EINVAL;
391
392 if (netif_running(netdev)) {
393 netif_device_detach(netdev);
394 qlcnic_napi_disable(adapter);
395 }
396
397 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
398 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
399 qlcnic_set_multi(adapter->netdev);
400
401 if (netif_running(netdev)) {
402 netif_device_attach(netdev);
403 qlcnic_napi_enable(adapter);
404 }
405 return 0;
406}
407
408static const struct net_device_ops qlcnic_netdev_ops = {
409 .ndo_open = qlcnic_open,
410 .ndo_stop = qlcnic_close,
411 .ndo_start_xmit = qlcnic_xmit_frame,
412 .ndo_get_stats = qlcnic_get_stats,
413 .ndo_validate_addr = eth_validate_addr,
414 .ndo_set_multicast_list = qlcnic_set_multi,
415 .ndo_set_mac_address = qlcnic_set_mac,
416 .ndo_change_mtu = qlcnic_change_mtu,
417 .ndo_tx_timeout = qlcnic_tx_timeout,
418#ifdef CONFIG_NET_POLL_CONTROLLER
419 .ndo_poll_controller = qlcnic_poll_controller,
420#endif
421};
422
423static void
424qlcnic_setup_intr(struct qlcnic_adapter *adapter)
425{
426 const struct qlcnic_legacy_intr_set *legacy_intrp;
427 struct pci_dev *pdev = adapter->pdev;
428 int err, num_msix;
429
430 if (adapter->rss_supported) {
431 num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
432 MSIX_ENTRIES_PER_ADAPTER : 2;
433 } else
434 num_msix = 1;
435
436 adapter->max_sds_rings = 1;
437
438 adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
439
440 legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
441
442 adapter->int_vec_bit = legacy_intrp->int_vec_bit;
443 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
444 legacy_intrp->tgt_status_reg);
445 adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
446 legacy_intrp->tgt_mask_reg);
447 adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
448
449 adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
450 ISR_INT_STATE_REG);
451
452 qlcnic_set_msix_bit(pdev, 0);
453
454 if (adapter->msix_supported) {
455
456 qlcnic_init_msix_entries(adapter, num_msix);
457 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
458 if (err == 0) {
459 adapter->flags |= QLCNIC_MSIX_ENABLED;
460 qlcnic_set_msix_bit(pdev, 1);
461
462 if (adapter->rss_supported)
463 adapter->max_sds_rings = num_msix;
464
465 dev_info(&pdev->dev, "using msi-x interrupts\n");
466 return;
467 }
468
469 if (err > 0)
470 pci_disable_msix(pdev);
471
472 /* fall through for msi */
473 }
474
475 if (use_msi && !pci_enable_msi(pdev)) {
476 adapter->flags |= QLCNIC_MSI_ENABLED;
477 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
478 msi_tgt_status[adapter->ahw.pci_func]);
479 dev_info(&pdev->dev, "using msi interrupts\n");
480 adapter->msix_entries[0].vector = pdev->irq;
481 return;
482 }
483
484 dev_info(&pdev->dev, "using legacy interrupts\n");
485 adapter->msix_entries[0].vector = pdev->irq;
486}
487
488static void
489qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
490{
491 if (adapter->flags & QLCNIC_MSIX_ENABLED)
492 pci_disable_msix(adapter->pdev);
493 if (adapter->flags & QLCNIC_MSI_ENABLED)
494 pci_disable_msi(adapter->pdev);
495}
496
497static void
498qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
499{
500 if (adapter->ahw.pci_base0 != NULL)
501 iounmap(adapter->ahw.pci_base0);
502}
503
504static int
505qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
506{
507 void __iomem *mem_ptr0 = NULL;
508 resource_size_t mem_base;
509 unsigned long mem_len, pci_len0 = 0;
510
511 struct pci_dev *pdev = adapter->pdev;
512 int pci_func = adapter->ahw.pci_func;
513
514 /*
515 * Set the CRB window to invalid. If any register in window 0 is
516 * accessed it should set the window to 0 and then reset it to 1.
517 */
518 adapter->ahw.crb_win = -1;
519 adapter->ahw.ocm_win = -1;
520
521 /* remap phys address */
522 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
523 mem_len = pci_resource_len(pdev, 0);
524
525 if (mem_len == QLCNIC_PCI_2MB_SIZE) {
526
527 mem_ptr0 = pci_ioremap_bar(pdev, 0);
528 if (mem_ptr0 == NULL) {
529 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
530 return -EIO;
531 }
532 pci_len0 = mem_len;
533 } else {
534 return -EIO;
535 }
536
537 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
538
539 adapter->ahw.pci_base0 = mem_ptr0;
540 adapter->ahw.pci_len0 = pci_len0;
541
542 adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter,
543 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func)));
544
545 return 0;
546}
547
548static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
549{
550 struct pci_dev *pdev = adapter->pdev;
551 int i, found = 0;
552
553 for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
554 if (qlcnic_boards[i].vendor == pdev->vendor &&
555 qlcnic_boards[i].device == pdev->device &&
556 qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
557 qlcnic_boards[i].sub_device == pdev->subsystem_device) {
558 strcpy(name, qlcnic_boards[i].short_name);
559 found = 1;
560 break;
561 }
562
563 }
564
565 if (!found)
566 name = "Unknown";
567}
568
569static void
570qlcnic_check_options(struct qlcnic_adapter *adapter)
571{
572 u32 fw_major, fw_minor, fw_build;
573 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
574 char serial_num[32];
575 int i, offset, val;
576 int *ptr32;
577 struct pci_dev *pdev = adapter->pdev;
578
579 adapter->driver_mismatch = 0;
580
581 ptr32 = (int *)&serial_num;
582 offset = QLCNIC_FW_SERIAL_NUM_OFFSET;
583 for (i = 0; i < 8; i++) {
584 if (qlcnic_rom_fast_read(adapter, offset, &val) == -1) {
585 dev_err(&pdev->dev, "error reading board info\n");
586 adapter->driver_mismatch = 1;
587 return;
588 }
589 ptr32[i] = cpu_to_le32(val);
590 offset += sizeof(u32);
591 }
592
593 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
594 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
595 fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
596
597 adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
598
599 if (adapter->portnum == 0) {
600 get_brd_name(adapter, brd_name);
601
602 pr_info("%s: %s Board Chip rev 0x%x\n",
603 module_name(THIS_MODULE),
604 brd_name, adapter->ahw.revision_id);
605 }
606
607 if (adapter->fw_version < QLCNIC_VERSION_CODE(3, 4, 216)) {
608 adapter->driver_mismatch = 1;
609 dev_warn(&pdev->dev, "firmware version %d.%d.%d unsupported\n",
610 fw_major, fw_minor, fw_build);
611 return;
612 }
613
614 i = QLCRD32(adapter, QLCNIC_SRE_MISC);
615 adapter->ahw.cut_through = (i & 0x8000) ? 1 : 0;
616
617 dev_info(&pdev->dev, "firmware v%d.%d.%d [%s]\n",
618 fw_major, fw_minor, fw_build,
619 adapter->ahw.cut_through ? "cut-through" : "legacy");
620
621 if (adapter->fw_version >= QLCNIC_VERSION_CODE(4, 0, 222))
622 adapter->capabilities = QLCRD32(adapter, CRB_FW_CAPABILITIES_1);
623
624 adapter->flags &= ~QLCNIC_LRO_ENABLED;
625
626 if (adapter->ahw.port_type == QLCNIC_XGBE) {
627 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
628 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
629 } else if (adapter->ahw.port_type == QLCNIC_GBE) {
630 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
631 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
632 }
633
634 adapter->msix_supported = !!use_msi_x;
635 adapter->rss_supported = !!use_msi_x;
636
637 adapter->num_txd = MAX_CMD_DESCRIPTORS;
638
639 adapter->num_lro_rxd = 0;
640 adapter->max_rds_rings = 2;
641}
642
643static int
644qlcnic_start_firmware(struct qlcnic_adapter *adapter)
645{
646 int val, err, first_boot;
647
648 err = qlcnic_set_dma_mask(adapter);
649 if (err)
650 return err;
651
652 if (!qlcnic_can_start_firmware(adapter))
653 goto wait_init;
654
655 first_boot = QLCRD32(adapter, QLCNIC_CAM_RAM(0x1fc));
656 if (first_boot == 0x55555555)
657 /* This is the first boot after power up */
658 QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
659
660 qlcnic_request_firmware(adapter);
661
662 err = qlcnic_need_fw_reset(adapter);
663 if (err < 0)
664 goto err_out;
665 if (err == 0)
666 goto wait_init;
667
668 if (first_boot != 0x55555555) {
669 QLCWR32(adapter, CRB_CMDPEG_STATE, 0);
670 qlcnic_pinit_from_rom(adapter);
671 msleep(1);
672 }
673
674 QLCWR32(adapter, CRB_DMA_SHIFT, 0x55555555);
675 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
676 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
677
678 qlcnic_set_port_mode(adapter);
679
680 err = qlcnic_load_firmware(adapter);
681 if (err)
682 goto err_out;
683
684 qlcnic_release_firmware(adapter);
685
686 val = (_QLCNIC_LINUX_MAJOR << 16)
687 | ((_QLCNIC_LINUX_MINOR << 8))
688 | (_QLCNIC_LINUX_SUBVERSION);
689 QLCWR32(adapter, CRB_DRIVER_VERSION, val);
690
691wait_init:
692 /* Handshake with the card before we register the devices. */
693 err = qlcnic_phantom_init(adapter);
694 if (err)
695 goto err_out;
696
697 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
698
699 qlcnic_update_dma_mask(adapter);
700
701 qlcnic_check_options(adapter);
702
703 adapter->need_fw_reset = 0;
704
705 /* fall through and release firmware */
706
707err_out:
708 qlcnic_release_firmware(adapter);
709 return err;
710}
711
712static int
713qlcnic_request_irq(struct qlcnic_adapter *adapter)
714{
715 irq_handler_t handler;
716 struct qlcnic_host_sds_ring *sds_ring;
717 int err, ring;
718
719 unsigned long flags = 0;
720 struct net_device *netdev = adapter->netdev;
721 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
722
723 if (adapter->flags & QLCNIC_MSIX_ENABLED)
724 handler = qlcnic_msix_intr;
725 else if (adapter->flags & QLCNIC_MSI_ENABLED)
726 handler = qlcnic_msi_intr;
727 else {
728 flags |= IRQF_SHARED;
729 handler = qlcnic_intr;
730 }
731 adapter->irq = netdev->irq;
732
733 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
734 sds_ring = &recv_ctx->sds_rings[ring];
735 sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
736 err = request_irq(sds_ring->irq, handler,
737 flags, sds_ring->name, sds_ring);
738 if (err)
739 return err;
740 }
741
742 return 0;
743}
744
745static void
746qlcnic_free_irq(struct qlcnic_adapter *adapter)
747{
748 int ring;
749 struct qlcnic_host_sds_ring *sds_ring;
750
751 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
752
753 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
754 sds_ring = &recv_ctx->sds_rings[ring];
755 free_irq(sds_ring->irq, sds_ring);
756 }
757}
758
759static void
760qlcnic_init_coalesce_defaults(struct qlcnic_adapter *adapter)
761{
762 adapter->coal.flags = QLCNIC_INTR_DEFAULT;
763 adapter->coal.normal.data.rx_time_us =
764 QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
765 adapter->coal.normal.data.rx_packets =
766 QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
767 adapter->coal.normal.data.tx_time_us =
768 QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US;
769 adapter->coal.normal.data.tx_packets =
770 QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS;
771}
772
773static int
774__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
775{
776 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
777 return -EIO;
778
779 qlcnic_set_multi(netdev);
780 qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
781
782 adapter->ahw.linkup = 0;
783
784 if (adapter->max_sds_rings > 1)
785 qlcnic_config_rss(adapter, 1);
786
787 qlcnic_config_intr_coalesce(adapter);
788
789 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
790 qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
791
792 qlcnic_napi_enable(adapter);
793
794 qlcnic_linkevent_request(adapter, 1);
795
796 set_bit(__QLCNIC_DEV_UP, &adapter->state);
797 return 0;
798}
799
800/* Usage: During resume and firmware recovery module.*/
801
802static int
803qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
804{
805 int err = 0;
806
807 rtnl_lock();
808 if (netif_running(netdev))
809 err = __qlcnic_up(adapter, netdev);
810 rtnl_unlock();
811
812 return err;
813}
814
815static void
816__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
817{
818 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
819 return;
820
821 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
822 return;
823
824 smp_mb();
825 spin_lock(&adapter->tx_clean_lock);
826 netif_carrier_off(netdev);
827 netif_tx_disable(netdev);
828
829 qlcnic_free_mac_list(adapter);
830
831 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
832
833 qlcnic_napi_disable(adapter);
834
835 qlcnic_release_tx_buffers(adapter);
836 spin_unlock(&adapter->tx_clean_lock);
837}
838
839/* Usage: During suspend and firmware recovery module */
840
841static void
842qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
843{
844 rtnl_lock();
845 if (netif_running(netdev))
846 __qlcnic_down(adapter, netdev);
847 rtnl_unlock();
848
849}
850
851static int
852qlcnic_attach(struct qlcnic_adapter *adapter)
853{
854 struct net_device *netdev = adapter->netdev;
855 struct pci_dev *pdev = adapter->pdev;
856 int err, ring;
857 struct qlcnic_host_rds_ring *rds_ring;
858
859 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
860 return 0;
861
862 err = qlcnic_init_firmware(adapter);
863 if (err)
864 return err;
865
866 err = qlcnic_napi_add(adapter, netdev);
867 if (err)
868 return err;
869
870 err = qlcnic_alloc_sw_resources(adapter);
871 if (err) {
872 dev_err(&pdev->dev, "Error in setting sw resources\n");
873 return err;
874 }
875
876 err = qlcnic_alloc_hw_resources(adapter);
877 if (err) {
878 dev_err(&pdev->dev, "Error in setting hw resources\n");
879 goto err_out_free_sw;
880 }
881
882
883 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
884 rds_ring = &adapter->recv_ctx.rds_rings[ring];
885 qlcnic_post_rx_buffers(adapter, ring, rds_ring);
886 }
887
888 err = qlcnic_request_irq(adapter);
889 if (err) {
890 dev_err(&pdev->dev, "failed to setup interrupt\n");
891 goto err_out_free_rxbuf;
892 }
893
894 qlcnic_init_coalesce_defaults(adapter);
895
896 qlcnic_create_sysfs_entries(adapter);
897
898 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
899 return 0;
900
901err_out_free_rxbuf:
902 qlcnic_release_rx_buffers(adapter);
903 qlcnic_free_hw_resources(adapter);
904err_out_free_sw:
905 qlcnic_free_sw_resources(adapter);
906 return err;
907}
908
909static void
910qlcnic_detach(struct qlcnic_adapter *adapter)
911{
912 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
913 return;
914
915 qlcnic_remove_sysfs_entries(adapter);
916
917 qlcnic_free_hw_resources(adapter);
918 qlcnic_release_rx_buffers(adapter);
919 qlcnic_free_irq(adapter);
920 qlcnic_napi_del(adapter);
921 qlcnic_free_sw_resources(adapter);
922
923 adapter->is_up = 0;
924}
925
926int
927qlcnic_reset_context(struct qlcnic_adapter *adapter)
928{
929 int err = 0;
930 struct net_device *netdev = adapter->netdev;
931
932 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
933 return -EBUSY;
934
935 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
936
937 netif_device_detach(netdev);
938
939 if (netif_running(netdev))
940 __qlcnic_down(adapter, netdev);
941
942 qlcnic_detach(adapter);
943
944 if (netif_running(netdev)) {
945 err = qlcnic_attach(adapter);
946 if (!err)
947 err = __qlcnic_up(adapter, netdev);
948
949 if (err)
950 goto done;
951 }
952
953 netif_device_attach(netdev);
954 }
955
956done:
957 clear_bit(__QLCNIC_RESETTING, &adapter->state);
958 return err;
959}
960
961static int
962qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
963 struct net_device *netdev)
964{
965 int err;
966 struct pci_dev *pdev = adapter->pdev;
967
968 adapter->rx_csum = 1;
969 adapter->mc_enabled = 0;
970 adapter->max_mc_count = 38;
971
972 netdev->netdev_ops = &qlcnic_netdev_ops;
973 netdev->watchdog_timeo = 2*HZ;
974
975 qlcnic_change_mtu(netdev, netdev->mtu);
976
977 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
978
979 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
980 netdev->features |= (NETIF_F_GRO);
981 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
982
983 netdev->features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
984 netdev->vlan_features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
985
986 if (adapter->pci_using_dac) {
987 netdev->features |= NETIF_F_HIGHDMA;
988 netdev->vlan_features |= NETIF_F_HIGHDMA;
989 }
990
991 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
992 netdev->features |= (NETIF_F_HW_VLAN_TX);
993
994 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
995 netdev->features |= NETIF_F_LRO;
996
997 netdev->irq = adapter->msix_entries[0].vector;
998
999 INIT_WORK(&adapter->tx_timeout_task, qlcnic_tx_timeout_task);
1000
1001 if (qlcnic_read_mac_addr(adapter))
1002 dev_warn(&pdev->dev, "failed to read mac addr\n");
1003
1004 netif_carrier_off(netdev);
1005 netif_stop_queue(netdev);
1006
1007 err = register_netdev(netdev);
1008 if (err) {
1009 dev_err(&pdev->dev, "failed to register net device\n");
1010 return err;
1011 }
1012
1013 return 0;
1014}
1015
1016static int __devinit
1017qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1018{
1019 struct net_device *netdev = NULL;
1020 struct qlcnic_adapter *adapter = NULL;
1021 int err;
1022 int pci_func_id = PCI_FUNC(pdev->devfn);
1023 uint8_t revision_id;
1024
1025 err = pci_enable_device(pdev);
1026 if (err)
1027 return err;
1028
1029 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1030 err = -ENODEV;
1031 goto err_out_disable_pdev;
1032 }
1033
1034 err = pci_request_regions(pdev, qlcnic_driver_name);
1035 if (err)
1036 goto err_out_disable_pdev;
1037
1038 pci_set_master(pdev);
1039
1040 netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
1041 if (!netdev) {
1042 dev_err(&pdev->dev, "failed to allocate net_device\n");
1043 err = -ENOMEM;
1044 goto err_out_free_res;
1045 }
1046
1047 SET_NETDEV_DEV(netdev, &pdev->dev);
1048
1049 adapter = netdev_priv(netdev);
1050 adapter->netdev = netdev;
1051 adapter->pdev = pdev;
1052 adapter->ahw.pci_func = pci_func_id;
1053
1054 revision_id = pdev->revision;
1055 adapter->ahw.revision_id = revision_id;
1056
1057 rwlock_init(&adapter->ahw.crb_lock);
1058 mutex_init(&adapter->ahw.mem_lock);
1059
1060 spin_lock_init(&adapter->tx_clean_lock);
1061 INIT_LIST_HEAD(&adapter->mac_list);
1062
1063 err = qlcnic_setup_pci_map(adapter);
1064 if (err)
1065 goto err_out_free_netdev;
1066
1067 /* This will be reset for mezz cards */
1068 adapter->portnum = pci_func_id;
1069
1070 err = qlcnic_get_board_info(adapter);
1071 if (err) {
1072 dev_err(&pdev->dev, "Error getting board config info.\n");
1073 goto err_out_iounmap;
1074 }
1075
1076
1077 err = qlcnic_start_firmware(adapter);
1078 if (err)
1079 goto err_out_decr_ref;
1080
1081 /*
1082 * See if the firmware gave us a virtual-physical port mapping.
1083 */
1084 adapter->physical_port = adapter->portnum;
1085
1086 qlcnic_clear_stats(adapter);
1087
1088 qlcnic_setup_intr(adapter);
1089
1090 err = qlcnic_setup_netdev(adapter, netdev);
1091 if (err)
1092 goto err_out_disable_msi;
1093
1094 pci_set_drvdata(pdev, adapter);
1095
1096 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1097
1098 switch (adapter->ahw.port_type) {
1099 case QLCNIC_GBE:
1100 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
1101 adapter->netdev->name);
1102 break;
1103 case QLCNIC_XGBE:
1104 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
1105 adapter->netdev->name);
1106 break;
1107 }
1108
1109 qlcnic_create_diag_entries(adapter);
1110
1111 return 0;
1112
1113err_out_disable_msi:
1114 qlcnic_teardown_intr(adapter);
1115
1116err_out_decr_ref:
1117 qlcnic_clr_all_drv_state(adapter);
1118
1119err_out_iounmap:
1120 qlcnic_cleanup_pci_map(adapter);
1121
1122err_out_free_netdev:
1123 free_netdev(netdev);
1124
1125err_out_free_res:
1126 pci_release_regions(pdev);
1127
1128err_out_disable_pdev:
1129 pci_set_drvdata(pdev, NULL);
1130 pci_disable_device(pdev);
1131 return err;
1132}
1133
1134static void __devexit qlcnic_remove(struct pci_dev *pdev)
1135{
1136 struct qlcnic_adapter *adapter;
1137 struct net_device *netdev;
1138
1139 adapter = pci_get_drvdata(pdev);
1140 if (adapter == NULL)
1141 return;
1142
1143 netdev = adapter->netdev;
1144
1145 qlcnic_cancel_fw_work(adapter);
1146
1147 unregister_netdev(netdev);
1148
1149 cancel_work_sync(&adapter->tx_timeout_task);
1150
1151 qlcnic_detach(adapter);
1152
1153 qlcnic_clr_all_drv_state(adapter);
1154
1155 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1156
1157 qlcnic_teardown_intr(adapter);
1158
1159 qlcnic_remove_diag_entries(adapter);
1160
1161 qlcnic_cleanup_pci_map(adapter);
1162
1163 qlcnic_release_firmware(adapter);
1164
1165 pci_release_regions(pdev);
1166 pci_disable_device(pdev);
1167 pci_set_drvdata(pdev, NULL);
1168
1169 free_netdev(netdev);
1170}
1171static int __qlcnic_shutdown(struct pci_dev *pdev)
1172{
1173 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1174 struct net_device *netdev = adapter->netdev;
1175 int retval;
1176
1177 netif_device_detach(netdev);
1178
1179 qlcnic_cancel_fw_work(adapter);
1180
1181 if (netif_running(netdev))
1182 qlcnic_down(adapter, netdev);
1183
1184 cancel_work_sync(&adapter->tx_timeout_task);
1185
1186 qlcnic_detach(adapter);
1187
1188 qlcnic_clr_all_drv_state(adapter);
1189
1190 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1191
1192 retval = pci_save_state(pdev);
1193 if (retval)
1194 return retval;
1195
1196 if (qlcnic_wol_supported(adapter)) {
1197 pci_enable_wake(pdev, PCI_D3cold, 1);
1198 pci_enable_wake(pdev, PCI_D3hot, 1);
1199 }
1200
1201 return 0;
1202}
1203
1204static void qlcnic_shutdown(struct pci_dev *pdev)
1205{
1206 if (__qlcnic_shutdown(pdev))
1207 return;
1208
1209 pci_disable_device(pdev);
1210}
1211
1212#ifdef CONFIG_PM
1213static int
1214qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
1215{
1216 int retval;
1217
1218 retval = __qlcnic_shutdown(pdev);
1219 if (retval)
1220 return retval;
1221
1222 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1223 return 0;
1224}
1225
1226static int
1227qlcnic_resume(struct pci_dev *pdev)
1228{
1229 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1230 struct net_device *netdev = adapter->netdev;
1231 int err;
1232
1233 err = pci_enable_device(pdev);
1234 if (err)
1235 return err;
1236
1237 pci_set_power_state(pdev, PCI_D0);
1238 pci_set_master(pdev);
1239 pci_restore_state(pdev);
1240
1241 adapter->ahw.crb_win = -1;
1242 adapter->ahw.ocm_win = -1;
1243
1244 err = qlcnic_start_firmware(adapter);
1245 if (err) {
1246 dev_err(&pdev->dev, "failed to start firmware\n");
1247 return err;
1248 }
1249
1250 if (netif_running(netdev)) {
1251 err = qlcnic_attach(adapter);
1252 if (err)
1253 goto err_out;
1254
1255 err = qlcnic_up(adapter, netdev);
1256 if (err)
1257 goto err_out_detach;
1258
1259
1260 qlcnic_config_indev_addr(netdev, NETDEV_UP);
1261 }
1262
1263 netif_device_attach(netdev);
1264 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1265 return 0;
1266
1267err_out_detach:
1268 qlcnic_detach(adapter);
1269err_out:
1270 qlcnic_clr_all_drv_state(adapter);
1271 return err;
1272}
1273#endif
1274
1275static int qlcnic_open(struct net_device *netdev)
1276{
1277 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1278 int err;
1279
1280 if (adapter->driver_mismatch)
1281 return -EIO;
1282
1283 err = qlcnic_attach(adapter);
1284 if (err)
1285 return err;
1286
1287 err = __qlcnic_up(adapter, netdev);
1288 if (err)
1289 goto err_out;
1290
1291 netif_start_queue(netdev);
1292
1293 return 0;
1294
1295err_out:
1296 qlcnic_detach(adapter);
1297 return err;
1298}
1299
1300/*
1301 * qlcnic_close - Disables a network interface entry point
1302 */
1303static int qlcnic_close(struct net_device *netdev)
1304{
1305 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1306
1307 __qlcnic_down(adapter, netdev);
1308 return 0;
1309}
1310
1311static void
1312qlcnic_tso_check(struct net_device *netdev,
1313 struct qlcnic_host_tx_ring *tx_ring,
1314 struct cmd_desc_type0 *first_desc,
1315 struct sk_buff *skb)
1316{
1317 u8 opcode = TX_ETHER_PKT;
1318 __be16 protocol = skb->protocol;
1319 u16 flags = 0, vid = 0;
1320 u32 producer;
1321 int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0;
1322 struct cmd_desc_type0 *hwdesc;
1323 struct vlan_ethhdr *vh;
1324
1325 if (protocol == cpu_to_be16(ETH_P_8021Q)) {
1326
1327 vh = (struct vlan_ethhdr *)skb->data;
1328 protocol = vh->h_vlan_encapsulated_proto;
1329 flags = FLAGS_VLAN_TAGGED;
1330
1331 } else if (vlan_tx_tag_present(skb)) {
1332
1333 flags = FLAGS_VLAN_OOB;
1334 vid = vlan_tx_tag_get(skb);
1335 qlcnic_set_tx_vlan_tci(first_desc, vid);
1336 vlan_oob = 1;
1337 }
1338
1339 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
1340 skb_shinfo(skb)->gso_size > 0) {
1341
1342 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1343
1344 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1345 first_desc->total_hdr_length = hdr_len;
1346 if (vlan_oob) {
1347 first_desc->total_hdr_length += VLAN_HLEN;
1348 first_desc->tcp_hdr_offset = VLAN_HLEN;
1349 first_desc->ip_hdr_offset = VLAN_HLEN;
1350 /* Only in case of TSO on vlan device */
1351 flags |= FLAGS_VLAN_TAGGED;
1352 }
1353
1354 opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
1355 TX_TCP_LSO6 : TX_TCP_LSO;
1356 tso = 1;
1357
1358 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1359 u8 l4proto;
1360
1361 if (protocol == cpu_to_be16(ETH_P_IP)) {
1362 l4proto = ip_hdr(skb)->protocol;
1363
1364 if (l4proto == IPPROTO_TCP)
1365 opcode = TX_TCP_PKT;
1366 else if (l4proto == IPPROTO_UDP)
1367 opcode = TX_UDP_PKT;
1368 } else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
1369 l4proto = ipv6_hdr(skb)->nexthdr;
1370
1371 if (l4proto == IPPROTO_TCP)
1372 opcode = TX_TCPV6_PKT;
1373 else if (l4proto == IPPROTO_UDP)
1374 opcode = TX_UDPV6_PKT;
1375 }
1376 }
1377
1378 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
1379 first_desc->ip_hdr_offset += skb_network_offset(skb);
1380 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
1381
1382 if (!tso)
1383 return;
1384
1385 /* For LSO, we need to copy the MAC/IP/TCP headers into
1386 * the descriptor ring
1387 */
1388 producer = tx_ring->producer;
1389 copied = 0;
1390 offset = 2;
1391
1392 if (vlan_oob) {
1393 /* Create a TSO vlan header template for firmware */
1394
1395 hwdesc = &tx_ring->desc_head[producer];
1396 tx_ring->cmd_buf_arr[producer].skb = NULL;
1397
1398 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1399 hdr_len + VLAN_HLEN);
1400
1401 vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
1402 skb_copy_from_linear_data(skb, vh, 12);
1403 vh->h_vlan_proto = htons(ETH_P_8021Q);
1404 vh->h_vlan_TCI = htons(vid);
1405 skb_copy_from_linear_data_offset(skb, 12,
1406 (char *)vh + 16, copy_len - 16);
1407
1408 copied = copy_len - VLAN_HLEN;
1409 offset = 0;
1410
1411 producer = get_next_index(producer, tx_ring->num_desc);
1412 }
1413
1414 while (copied < hdr_len) {
1415
1416 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1417 (hdr_len - copied));
1418
1419 hwdesc = &tx_ring->desc_head[producer];
1420 tx_ring->cmd_buf_arr[producer].skb = NULL;
1421
1422 skb_copy_from_linear_data_offset(skb, copied,
1423 (char *)hwdesc + offset, copy_len);
1424
1425 copied += copy_len;
1426 offset = 0;
1427
1428 producer = get_next_index(producer, tx_ring->num_desc);
1429 }
1430
1431 tx_ring->producer = producer;
1432 barrier();
1433}
1434
1435static int
1436qlcnic_map_tx_skb(struct pci_dev *pdev,
1437 struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
1438{
1439 struct qlcnic_skb_frag *nf;
1440 struct skb_frag_struct *frag;
1441 int i, nr_frags;
1442 dma_addr_t map;
1443
1444 nr_frags = skb_shinfo(skb)->nr_frags;
1445 nf = &pbuf->frag_array[0];
1446
1447 map = pci_map_single(pdev, skb->data,
1448 skb_headlen(skb), PCI_DMA_TODEVICE);
1449 if (pci_dma_mapping_error(pdev, map))
1450 goto out_err;
1451
1452 nf->dma = map;
1453 nf->length = skb_headlen(skb);
1454
1455 for (i = 0; i < nr_frags; i++) {
1456 frag = &skb_shinfo(skb)->frags[i];
1457 nf = &pbuf->frag_array[i+1];
1458
1459 map = pci_map_page(pdev, frag->page, frag->page_offset,
1460 frag->size, PCI_DMA_TODEVICE);
1461 if (pci_dma_mapping_error(pdev, map))
1462 goto unwind;
1463
1464 nf->dma = map;
1465 nf->length = frag->size;
1466 }
1467
1468 return 0;
1469
1470unwind:
1471 while (--i >= 0) {
1472 nf = &pbuf->frag_array[i+1];
1473 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
1474 }
1475
1476 nf = &pbuf->frag_array[0];
1477 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
1478
1479out_err:
1480 return -ENOMEM;
1481}
1482
1483static inline void
1484qlcnic_clear_cmddesc(u64 *desc)
1485{
1486 desc[0] = 0ULL;
1487 desc[2] = 0ULL;
1488}
1489
1490static netdev_tx_t
1491qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1492{
1493 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1494 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
1495 struct qlcnic_cmd_buffer *pbuf;
1496 struct qlcnic_skb_frag *buffrag;
1497 struct cmd_desc_type0 *hwdesc, *first_desc;
1498 struct pci_dev *pdev;
1499 int i, k;
1500
1501 u32 producer;
1502 int frag_count, no_of_desc;
1503 u32 num_txd = tx_ring->num_desc;
1504
1505 frag_count = skb_shinfo(skb)->nr_frags + 1;
1506
1507 /* 4 fragments per cmd des */
1508 no_of_desc = (frag_count + 3) >> 2;
1509
1510 if (unlikely(no_of_desc + 2 > qlcnic_tx_avail(tx_ring))) {
1511 netif_stop_queue(netdev);
1512 return NETDEV_TX_BUSY;
1513 }
1514
1515 producer = tx_ring->producer;
1516 pbuf = &tx_ring->cmd_buf_arr[producer];
1517
1518 pdev = adapter->pdev;
1519
1520 if (qlcnic_map_tx_skb(pdev, skb, pbuf))
1521 goto drop_packet;
1522
1523 pbuf->skb = skb;
1524 pbuf->frag_count = frag_count;
1525
1526 first_desc = hwdesc = &tx_ring->desc_head[producer];
1527 qlcnic_clear_cmddesc((u64 *)hwdesc);
1528
1529 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
1530 qlcnic_set_tx_port(first_desc, adapter->portnum);
1531
1532 for (i = 0; i < frag_count; i++) {
1533
1534 k = i % 4;
1535
1536 if ((k == 0) && (i > 0)) {
1537 /* move to next desc.*/
1538 producer = get_next_index(producer, num_txd);
1539 hwdesc = &tx_ring->desc_head[producer];
1540 qlcnic_clear_cmddesc((u64 *)hwdesc);
1541 tx_ring->cmd_buf_arr[producer].skb = NULL;
1542 }
1543
1544 buffrag = &pbuf->frag_array[i];
1545
1546 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
1547 switch (k) {
1548 case 0:
1549 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
1550 break;
1551 case 1:
1552 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
1553 break;
1554 case 2:
1555 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
1556 break;
1557 case 3:
1558 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
1559 break;
1560 }
1561 }
1562
1563 tx_ring->producer = get_next_index(producer, num_txd);
1564
1565 qlcnic_tso_check(netdev, tx_ring, first_desc, skb);
1566
1567 qlcnic_update_cmd_producer(adapter, tx_ring);
1568
1569 adapter->stats.txbytes += skb->len;
1570 adapter->stats.xmitcalled++;
1571
1572 return NETDEV_TX_OK;
1573
1574drop_packet:
1575 adapter->stats.txdropped++;
1576 dev_kfree_skb_any(skb);
1577 return NETDEV_TX_OK;
1578}
1579
1580static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
1581{
1582 struct net_device *netdev = adapter->netdev;
1583 u32 temp, temp_state, temp_val;
1584 int rv = 0;
1585
1586 temp = QLCRD32(adapter, CRB_TEMP_STATE);
1587
1588 temp_state = qlcnic_get_temp_state(temp);
1589 temp_val = qlcnic_get_temp_val(temp);
1590
1591 if (temp_state == QLCNIC_TEMP_PANIC) {
1592 dev_err(&netdev->dev,
1593 "Device temperature %d degrees C exceeds"
1594 " maximum allowed. Hardware has been shut down.\n",
1595 temp_val);
1596 rv = 1;
1597 } else if (temp_state == QLCNIC_TEMP_WARN) {
1598 if (adapter->temp == QLCNIC_TEMP_NORMAL) {
1599 dev_err(&netdev->dev,
1600 "Device temperature %d degrees C "
1601 "exceeds operating range."
1602 " Immediate action needed.\n",
1603 temp_val);
1604 }
1605 } else {
1606 if (adapter->temp == QLCNIC_TEMP_WARN) {
1607 dev_info(&netdev->dev,
1608 "Device temperature is now %d degrees C"
1609 " in normal range.\n", temp_val);
1610 }
1611 }
1612 adapter->temp = temp_state;
1613 return rv;
1614}
1615
1616void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
1617{
1618 struct net_device *netdev = adapter->netdev;
1619
1620 if (adapter->ahw.linkup && !linkup) {
1621 dev_info(&netdev->dev, "NIC Link is down\n");
1622 adapter->ahw.linkup = 0;
1623 if (netif_running(netdev)) {
1624 netif_carrier_off(netdev);
1625 netif_stop_queue(netdev);
1626 }
1627 } else if (!adapter->ahw.linkup && linkup) {
1628 dev_info(&netdev->dev, "NIC Link is up\n");
1629 adapter->ahw.linkup = 1;
1630 if (netif_running(netdev)) {
1631 netif_carrier_on(netdev);
1632 netif_wake_queue(netdev);
1633 }
1634 }
1635}
1636
1637static void qlcnic_tx_timeout(struct net_device *netdev)
1638{
1639 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1640
1641 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
1642 return;
1643
1644 dev_err(&netdev->dev, "transmit timeout, resetting.\n");
1645 schedule_work(&adapter->tx_timeout_task);
1646}
1647
1648static void qlcnic_tx_timeout_task(struct work_struct *work)
1649{
1650 struct qlcnic_adapter *adapter =
1651 container_of(work, struct qlcnic_adapter, tx_timeout_task);
1652
1653 if (!netif_running(adapter->netdev))
1654 return;
1655
1656 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1657 return;
1658
1659 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
1660 goto request_reset;
1661
1662 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1663 if (!qlcnic_reset_context(adapter)) {
1664 adapter->netdev->trans_start = jiffies;
1665 return;
1666
1667 /* context reset failed, fall through for fw reset */
1668 }
1669
1670request_reset:
1671 adapter->need_fw_reset = 1;
1672 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1673}
1674
1675static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
1676{
1677 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1678 struct net_device_stats *stats = &netdev->stats;
1679
1680 memset(stats, 0, sizeof(*stats));
1681
1682 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
1683 stats->tx_packets = adapter->stats.xmitfinished;
1684 stats->rx_bytes = adapter->stats.rxbytes;
1685 stats->tx_bytes = adapter->stats.txbytes;
1686 stats->rx_dropped = adapter->stats.rxdropped;
1687 stats->tx_dropped = adapter->stats.txdropped;
1688
1689 return stats;
1690}
1691
1692static irqreturn_t qlcnic_intr(int irq, void *data)
1693{
1694 struct qlcnic_host_sds_ring *sds_ring = data;
1695 struct qlcnic_adapter *adapter = sds_ring->adapter;
1696 u32 status;
1697
1698 status = readl(adapter->isr_int_vec);
1699
1700 if (!(status & adapter->int_vec_bit))
1701 return IRQ_NONE;
1702
1703 /* check interrupt state machine, to be sure */
1704 status = readl(adapter->crb_int_state_reg);
1705 if (!ISR_LEGACY_INT_TRIGGERED(status))
1706 return IRQ_NONE;
1707
1708 writel(0xffffffff, adapter->tgt_status_reg);
1709 /* read twice to ensure write is flushed */
1710 readl(adapter->isr_int_vec);
1711 readl(adapter->isr_int_vec);
1712
1713 napi_schedule(&sds_ring->napi);
1714
1715 return IRQ_HANDLED;
1716}
1717
1718static irqreturn_t qlcnic_msi_intr(int irq, void *data)
1719{
1720 struct qlcnic_host_sds_ring *sds_ring = data;
1721 struct qlcnic_adapter *adapter = sds_ring->adapter;
1722
1723 /* clear interrupt */
1724 writel(0xffffffff, adapter->tgt_status_reg);
1725
1726 napi_schedule(&sds_ring->napi);
1727 return IRQ_HANDLED;
1728}
1729
1730static irqreturn_t qlcnic_msix_intr(int irq, void *data)
1731{
1732 struct qlcnic_host_sds_ring *sds_ring = data;
1733
1734 napi_schedule(&sds_ring->napi);
1735 return IRQ_HANDLED;
1736}
1737
1738static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
1739{
1740 u32 sw_consumer, hw_consumer;
1741 int count = 0, i;
1742 struct qlcnic_cmd_buffer *buffer;
1743 struct pci_dev *pdev = adapter->pdev;
1744 struct net_device *netdev = adapter->netdev;
1745 struct qlcnic_skb_frag *frag;
1746 int done;
1747 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
1748
1749 if (!spin_trylock(&adapter->tx_clean_lock))
1750 return 1;
1751
1752 sw_consumer = tx_ring->sw_consumer;
1753 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
1754
1755 while (sw_consumer != hw_consumer) {
1756 buffer = &tx_ring->cmd_buf_arr[sw_consumer];
1757 if (buffer->skb) {
1758 frag = &buffer->frag_array[0];
1759 pci_unmap_single(pdev, frag->dma, frag->length,
1760 PCI_DMA_TODEVICE);
1761 frag->dma = 0ULL;
1762 for (i = 1; i < buffer->frag_count; i++) {
1763 frag++;
1764 pci_unmap_page(pdev, frag->dma, frag->length,
1765 PCI_DMA_TODEVICE);
1766 frag->dma = 0ULL;
1767 }
1768
1769 adapter->stats.xmitfinished++;
1770 dev_kfree_skb_any(buffer->skb);
1771 buffer->skb = NULL;
1772 }
1773
1774 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
1775 if (++count >= MAX_STATUS_HANDLE)
1776 break;
1777 }
1778
1779 if (count && netif_running(netdev)) {
1780 tx_ring->sw_consumer = sw_consumer;
1781
1782 smp_mb();
1783
1784 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
1785 __netif_tx_lock(tx_ring->txq, smp_processor_id());
1786 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
1787 netif_wake_queue(netdev);
1788 adapter->tx_timeo_cnt = 0;
1789 }
1790 __netif_tx_unlock(tx_ring->txq);
1791 }
1792 }
1793 /*
1794 * If everything is freed up to consumer then check if the ring is full
1795 * If the ring is full then check if more needs to be freed and
1796 * schedule the call back again.
1797 *
1798 * This happens when there are 2 CPUs. One could be freeing and the
1799 * other filling it. If the ring is full when we get out of here and
1800 * the card has already interrupted the host then the host can miss the
1801 * interrupt.
1802 *
1803 * There is still a possible race condition and the host could miss an
1804 * interrupt. The card has to take care of this.
1805 */
1806 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
1807 done = (sw_consumer == hw_consumer);
1808 spin_unlock(&adapter->tx_clean_lock);
1809
1810 return done;
1811}
1812
1813static int qlcnic_poll(struct napi_struct *napi, int budget)
1814{
1815 struct qlcnic_host_sds_ring *sds_ring =
1816 container_of(napi, struct qlcnic_host_sds_ring, napi);
1817
1818 struct qlcnic_adapter *adapter = sds_ring->adapter;
1819
1820 int tx_complete;
1821 int work_done;
1822
1823 tx_complete = qlcnic_process_cmd_ring(adapter);
1824
1825 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
1826
1827 if ((work_done < budget) && tx_complete) {
1828 napi_complete(&sds_ring->napi);
1829 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
1830 qlcnic_enable_int(sds_ring);
1831 }
1832
1833 return work_done;
1834}
1835
1836#ifdef CONFIG_NET_POLL_CONTROLLER
1837static void qlcnic_poll_controller(struct net_device *netdev)
1838{
1839 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1840 disable_irq(adapter->irq);
1841 qlcnic_intr(adapter->irq, adapter);
1842 enable_irq(adapter->irq);
1843}
1844#endif
1845
1846static void
1847qlcnic_set_drv_state(struct qlcnic_adapter *adapter, int state)
1848{
1849 u32 val;
1850
1851 WARN_ON(state != QLCNIC_DEV_NEED_RESET &&
1852 state != QLCNIC_DEV_NEED_QUISCENT);
1853
1854 if (qlcnic_api_lock(adapter))
1855 return ;
1856
1857 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
1858
1859 if (state == QLCNIC_DEV_NEED_RESET)
1860 val |= ((u32)0x1 << (adapter->portnum * 4));
1861 else if (state == QLCNIC_DEV_NEED_QUISCENT)
1862 val |= ((u32)0x1 << ((adapter->portnum * 4) + 1));
1863
1864 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
1865
1866 qlcnic_api_unlock(adapter);
1867}
1868
1869static void
1870qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter)
1871{
1872 u32 val;
1873
1874 if (qlcnic_api_lock(adapter))
1875 goto err;
1876
1877 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
1878 val &= ~((u32)0x1 << (adapter->portnum * 4));
1879 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
1880
1881 if (!(val & 0x11111111))
1882 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
1883
1884 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
1885 val &= ~((u32)0x3 << (adapter->portnum * 4));
1886 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
1887
1888 qlcnic_api_unlock(adapter);
1889err:
1890 adapter->fw_fail_cnt = 0;
1891 clear_bit(__QLCNIC_START_FW, &adapter->state);
1892 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1893}
1894
1895static int
1896qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
1897{
1898 int act, state;
1899
1900 state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
1901 act = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
1902
1903 if (((state & 0x11111111) == (act & 0x11111111)) ||
1904 ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
1905 return 0;
1906 else
1907 return 1;
1908}
1909
1910static int
1911qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
1912{
1913 u32 val, prev_state;
1914 int cnt = 0;
1915 int portnum = adapter->portnum;
1916
1917 if (qlcnic_api_lock(adapter))
1918 return -1;
1919
1920 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
1921 if (!(val & ((int)0x1 << (portnum * 4)))) {
1922 val |= ((u32)0x1 << (portnum * 4));
1923 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
1924 } else if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state)) {
1925 goto start_fw;
1926 }
1927
1928 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
1929
1930 switch (prev_state) {
1931 case QLCNIC_DEV_COLD:
1932start_fw:
1933 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITALIZING);
1934 qlcnic_api_unlock(adapter);
1935 return 1;
1936
1937 case QLCNIC_DEV_READY:
1938 qlcnic_api_unlock(adapter);
1939 return 0;
1940
1941 case QLCNIC_DEV_NEED_RESET:
1942 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
1943 val |= ((u32)0x1 << (portnum * 4));
1944 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
1945 break;
1946
1947 case QLCNIC_DEV_NEED_QUISCENT:
1948 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
1949 val |= ((u32)0x1 << ((portnum * 4) + 1));
1950 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
1951 break;
1952
1953 case QLCNIC_DEV_FAILED:
1954 qlcnic_api_unlock(adapter);
1955 return -1;
1956 }
1957
1958 qlcnic_api_unlock(adapter);
1959 msleep(1000);
1960 while ((QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) != QLCNIC_DEV_READY) &&
1961 ++cnt < 20)
1962 msleep(1000);
1963
1964 if (cnt >= 20)
1965 return -1;
1966
1967 if (qlcnic_api_lock(adapter))
1968 return -1;
1969
1970 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
1971 val &= ~((u32)0x3 << (portnum * 4));
1972 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
1973
1974 qlcnic_api_unlock(adapter);
1975
1976 return 0;
1977}
1978
1979static void
1980qlcnic_fwinit_work(struct work_struct *work)
1981{
1982 struct qlcnic_adapter *adapter = container_of(work,
1983 struct qlcnic_adapter, fw_work.work);
1984 int dev_state;
1985
1986 if (++adapter->fw_wait_cnt > FW_POLL_THRESH)
1987 goto err_ret;
1988
1989 if (test_bit(__QLCNIC_START_FW, &adapter->state)) {
1990
1991 if (qlcnic_check_drv_state(adapter)) {
1992 qlcnic_schedule_work(adapter,
1993 qlcnic_fwinit_work, FW_POLL_DELAY);
1994 return;
1995 }
1996
1997 if (!qlcnic_start_firmware(adapter)) {
1998 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
1999 return;
2000 }
2001
2002 goto err_ret;
2003 }
2004
2005 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2006 switch (dev_state) {
2007 case QLCNIC_DEV_READY:
2008 if (!qlcnic_start_firmware(adapter)) {
2009 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2010 return;
2011 }
2012 case QLCNIC_DEV_FAILED:
2013 break;
2014
2015 default:
2016 qlcnic_schedule_work(adapter,
2017 qlcnic_fwinit_work, 2 * FW_POLL_DELAY);
2018 return;
2019 }
2020
2021err_ret:
2022 qlcnic_clr_all_drv_state(adapter);
2023}
2024
2025static void
2026qlcnic_detach_work(struct work_struct *work)
2027{
2028 struct qlcnic_adapter *adapter = container_of(work,
2029 struct qlcnic_adapter, fw_work.work);
2030 struct net_device *netdev = adapter->netdev;
2031 u32 status;
2032
2033 netif_device_detach(netdev);
2034
2035 qlcnic_down(adapter, netdev);
2036
2037 qlcnic_detach(adapter);
2038
2039 status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
2040
2041 if (status & QLCNIC_RCODE_FATAL_ERROR)
2042 goto err_ret;
2043
2044 if (adapter->temp == QLCNIC_TEMP_PANIC)
2045 goto err_ret;
2046
2047 qlcnic_set_drv_state(adapter, adapter->dev_state);
2048
2049 adapter->fw_wait_cnt = 0;
2050
2051 qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY);
2052
2053 return;
2054
2055err_ret:
2056 qlcnic_clr_all_drv_state(adapter);
2057
2058}
2059
2060static void
2061qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2062{
2063 u32 state;
2064
2065 if (qlcnic_api_lock(adapter))
2066 return;
2067
2068 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2069
2070 if (state != QLCNIC_DEV_INITALIZING && state != QLCNIC_DEV_NEED_RESET) {
2071 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
2072 set_bit(__QLCNIC_START_FW, &adapter->state);
2073 }
2074
2075 qlcnic_api_unlock(adapter);
2076}
2077
2078static void
2079qlcnic_schedule_work(struct qlcnic_adapter *adapter,
2080 work_func_t func, int delay)
2081{
2082 INIT_DELAYED_WORK(&adapter->fw_work, func);
2083 schedule_delayed_work(&adapter->fw_work, round_jiffies_relative(delay));
2084}
2085
2086static void
2087qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
2088{
2089 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
2090 msleep(10);
2091
2092 cancel_delayed_work_sync(&adapter->fw_work);
2093}
2094
2095static void
2096qlcnic_attach_work(struct work_struct *work)
2097{
2098 struct qlcnic_adapter *adapter = container_of(work,
2099 struct qlcnic_adapter, fw_work.work);
2100 struct net_device *netdev = adapter->netdev;
2101 int err;
2102
2103 if (netif_running(netdev)) {
2104 err = qlcnic_attach(adapter);
2105 if (err)
2106 goto done;
2107
2108 err = qlcnic_up(adapter, netdev);
2109 if (err) {
2110 qlcnic_detach(adapter);
2111 goto done;
2112 }
2113
2114 qlcnic_config_indev_addr(netdev, NETDEV_UP);
2115 }
2116
2117 netif_device_attach(netdev);
2118
2119done:
2120 adapter->fw_fail_cnt = 0;
2121 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2122 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
2123}
2124
2125static int
2126qlcnic_check_health(struct qlcnic_adapter *adapter)
2127{
2128 u32 state = 0, heartbit;
2129 struct net_device *netdev = adapter->netdev;
2130
2131 if (qlcnic_check_temp(adapter))
2132 goto detach;
2133
2134 if (adapter->need_fw_reset) {
2135 qlcnic_dev_request_reset(adapter);
2136 goto detach;
2137 }
2138
2139 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2140 if (state == QLCNIC_DEV_NEED_RESET || state == QLCNIC_DEV_NEED_QUISCENT)
2141 adapter->need_fw_reset = 1;
2142
2143 heartbit = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
2144 if (heartbit != adapter->heartbit) {
2145 adapter->heartbit = heartbit;
2146 adapter->fw_fail_cnt = 0;
2147 if (adapter->need_fw_reset)
2148 goto detach;
2149 return 0;
2150 }
2151
2152 if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
2153 return 0;
2154
2155 qlcnic_dev_request_reset(adapter);
2156
2157 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
2158
2159 dev_info(&netdev->dev, "firmware hang detected\n");
2160
2161detach:
2162 adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
2163 QLCNIC_DEV_NEED_RESET;
2164
2165 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
2166 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
2167 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
2168
2169 return 1;
2170}
2171
2172static void
2173qlcnic_fw_poll_work(struct work_struct *work)
2174{
2175 struct qlcnic_adapter *adapter = container_of(work,
2176 struct qlcnic_adapter, fw_work.work);
2177
2178 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
2179 goto reschedule;
2180
2181
2182 if (qlcnic_check_health(adapter))
2183 return;
2184
2185reschedule:
2186 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
2187}
2188
2189static ssize_t
2190qlcnic_store_bridged_mode(struct device *dev,
2191 struct device_attribute *attr, const char *buf, size_t len)
2192{
2193 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2194 unsigned long new;
2195 int ret = -EINVAL;
2196
2197 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
2198 goto err_out;
2199
2200 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
2201 goto err_out;
2202
2203 if (strict_strtoul(buf, 2, &new))
2204 goto err_out;
2205
2206 if (!qlcnic_config_bridged_mode(adapter, !!new))
2207 ret = len;
2208
2209err_out:
2210 return ret;
2211}
2212
2213static ssize_t
2214qlcnic_show_bridged_mode(struct device *dev,
2215 struct device_attribute *attr, char *buf)
2216{
2217 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2218 int bridged_mode = 0;
2219
2220 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
2221 bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
2222
2223 return sprintf(buf, "%d\n", bridged_mode);
2224}
2225
2226static struct device_attribute dev_attr_bridged_mode = {
2227 .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
2228 .show = qlcnic_show_bridged_mode,
2229 .store = qlcnic_store_bridged_mode,
2230};
2231
2232static ssize_t
2233qlcnic_store_diag_mode(struct device *dev,
2234 struct device_attribute *attr, const char *buf, size_t len)
2235{
2236 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2237 unsigned long new;
2238
2239 if (strict_strtoul(buf, 2, &new))
2240 return -EINVAL;
2241
2242 if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
2243 adapter->flags ^= QLCNIC_DIAG_ENABLED;
2244
2245 return len;
2246}
2247
2248static ssize_t
2249qlcnic_show_diag_mode(struct device *dev,
2250 struct device_attribute *attr, char *buf)
2251{
2252 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2253
2254 return sprintf(buf, "%d\n",
2255 !!(adapter->flags & QLCNIC_DIAG_ENABLED));
2256}
2257
2258static struct device_attribute dev_attr_diag_mode = {
2259 .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
2260 .show = qlcnic_show_diag_mode,
2261 .store = qlcnic_store_diag_mode,
2262};
2263
2264static int
2265qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
2266 loff_t offset, size_t size)
2267{
2268 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
2269 return -EIO;
2270
2271 if ((size != 4) || (offset & 0x3))
2272 return -EINVAL;
2273
2274 if (offset < QLCNIC_PCI_CRBSPACE)
2275 return -EINVAL;
2276
2277 return 0;
2278}
2279
2280static ssize_t
2281qlcnic_sysfs_read_crb(struct kobject *kobj, struct bin_attribute *attr,
2282 char *buf, loff_t offset, size_t size)
2283{
2284 struct device *dev = container_of(kobj, struct device, kobj);
2285 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2286 u32 data;
2287 int ret;
2288
2289 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
2290 if (ret != 0)
2291 return ret;
2292
2293 data = QLCRD32(adapter, offset);
2294 memcpy(buf, &data, size);
2295 return size;
2296}
2297
2298static ssize_t
2299qlcnic_sysfs_write_crb(struct kobject *kobj, struct bin_attribute *attr,
2300 char *buf, loff_t offset, size_t size)
2301{
2302 struct device *dev = container_of(kobj, struct device, kobj);
2303 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2304 u32 data;
2305 int ret;
2306
2307 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
2308 if (ret != 0)
2309 return ret;
2310
2311 memcpy(&data, buf, size);
2312 QLCWR32(adapter, offset, data);
2313 return size;
2314}
2315
2316static int
2317qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
2318 loff_t offset, size_t size)
2319{
2320 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
2321 return -EIO;
2322
2323 if ((size != 8) || (offset & 0x7))
2324 return -EIO;
2325
2326 return 0;
2327}
2328
2329static ssize_t
2330qlcnic_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr,
2331 char *buf, loff_t offset, size_t size)
2332{
2333 struct device *dev = container_of(kobj, struct device, kobj);
2334 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2335 u64 data;
2336 int ret;
2337
2338 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
2339 if (ret != 0)
2340 return ret;
2341
2342 if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
2343 return -EIO;
2344
2345 memcpy(buf, &data, size);
2346
2347 return size;
2348}
2349
2350static ssize_t
2351qlcnic_sysfs_write_mem(struct kobject *kobj, struct bin_attribute *attr,
2352 char *buf, loff_t offset, size_t size)
2353{
2354 struct device *dev = container_of(kobj, struct device, kobj);
2355 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2356 u64 data;
2357 int ret;
2358
2359 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
2360 if (ret != 0)
2361 return ret;
2362
2363 memcpy(&data, buf, size);
2364
2365 if (qlcnic_pci_mem_write_2M(adapter, offset, data))
2366 return -EIO;
2367
2368 return size;
2369}
2370
2371
2372static struct bin_attribute bin_attr_crb = {
2373 .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
2374 .size = 0,
2375 .read = qlcnic_sysfs_read_crb,
2376 .write = qlcnic_sysfs_write_crb,
2377};
2378
2379static struct bin_attribute bin_attr_mem = {
2380 .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
2381 .size = 0,
2382 .read = qlcnic_sysfs_read_mem,
2383 .write = qlcnic_sysfs_write_mem,
2384};
2385
2386static void
2387qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
2388{
2389 struct device *dev = &adapter->pdev->dev;
2390
2391 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
2392 if (device_create_file(dev, &dev_attr_bridged_mode))
2393 dev_warn(dev,
2394 "failed to create bridged_mode sysfs entry\n");
2395}
2396
2397static void
2398qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
2399{
2400 struct device *dev = &adapter->pdev->dev;
2401
2402 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
2403 device_remove_file(dev, &dev_attr_bridged_mode);
2404}
2405
2406static void
2407qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
2408{
2409 struct device *dev = &adapter->pdev->dev;
2410
2411 if (device_create_file(dev, &dev_attr_diag_mode))
2412 dev_info(dev, "failed to create diag_mode sysfs entry\n");
2413 if (device_create_bin_file(dev, &bin_attr_crb))
2414 dev_info(dev, "failed to create crb sysfs entry\n");
2415 if (device_create_bin_file(dev, &bin_attr_mem))
2416 dev_info(dev, "failed to create mem sysfs entry\n");
2417}
2418
2419
2420static void
2421qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
2422{
2423 struct device *dev = &adapter->pdev->dev;
2424
2425 device_remove_file(dev, &dev_attr_diag_mode);
2426 device_remove_bin_file(dev, &bin_attr_crb);
2427 device_remove_bin_file(dev, &bin_attr_mem);
2428}
2429
2430#ifdef CONFIG_INET
2431
2432#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
2433
2434static int
2435qlcnic_destip_supported(struct qlcnic_adapter *adapter)
2436{
2437 if (adapter->ahw.cut_through)
2438 return 0;
2439
2440 return 1;
2441}
2442
2443static void
2444qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
2445{
2446 struct in_device *indev;
2447 struct qlcnic_adapter *adapter = netdev_priv(dev);
2448
2449 if (!qlcnic_destip_supported(adapter))
2450 return;
2451
2452 indev = in_dev_get(dev);
2453 if (!indev)
2454 return;
2455
2456 for_ifa(indev) {
2457 switch (event) {
2458 case NETDEV_UP:
2459 qlcnic_config_ipaddr(adapter,
2460 ifa->ifa_address, QLCNIC_IP_UP);
2461 break;
2462 case NETDEV_DOWN:
2463 qlcnic_config_ipaddr(adapter,
2464 ifa->ifa_address, QLCNIC_IP_DOWN);
2465 break;
2466 default:
2467 break;
2468 }
2469 } endfor_ifa(indev);
2470
2471 in_dev_put(indev);
2472 return;
2473}
2474
2475static int qlcnic_netdev_event(struct notifier_block *this,
2476 unsigned long event, void *ptr)
2477{
2478 struct qlcnic_adapter *adapter;
2479 struct net_device *dev = (struct net_device *)ptr;
2480
2481recheck:
2482 if (dev == NULL)
2483 goto done;
2484
2485 if (dev->priv_flags & IFF_802_1Q_VLAN) {
2486 dev = vlan_dev_real_dev(dev);
2487 goto recheck;
2488 }
2489
2490 if (!is_qlcnic_netdev(dev))
2491 goto done;
2492
2493 adapter = netdev_priv(dev);
2494
2495 if (!adapter)
2496 goto done;
2497
2498 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
2499 goto done;
2500
2501 qlcnic_config_indev_addr(dev, event);
2502done:
2503 return NOTIFY_DONE;
2504}
2505
2506static int
2507qlcnic_inetaddr_event(struct notifier_block *this,
2508 unsigned long event, void *ptr)
2509{
2510 struct qlcnic_adapter *adapter;
2511 struct net_device *dev;
2512
2513 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
2514
2515 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
2516
2517recheck:
2518 if (dev == NULL || !netif_running(dev))
2519 goto done;
2520
2521 if (dev->priv_flags & IFF_802_1Q_VLAN) {
2522 dev = vlan_dev_real_dev(dev);
2523 goto recheck;
2524 }
2525
2526 if (!is_qlcnic_netdev(dev))
2527 goto done;
2528
2529 adapter = netdev_priv(dev);
2530
2531 if (!adapter || !qlcnic_destip_supported(adapter))
2532 goto done;
2533
2534 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
2535 goto done;
2536
2537 switch (event) {
2538 case NETDEV_UP:
2539 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
2540 break;
2541 case NETDEV_DOWN:
2542 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
2543 break;
2544 default:
2545 break;
2546 }
2547
2548done:
2549 return NOTIFY_DONE;
2550}
2551
2552static struct notifier_block qlcnic_netdev_cb = {
2553 .notifier_call = qlcnic_netdev_event,
2554};
2555
2556static struct notifier_block qlcnic_inetaddr_cb = {
2557 .notifier_call = qlcnic_inetaddr_event,
2558};
2559#else
2560static void
2561qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
2562{ }
2563#endif
2564
2565static struct pci_driver qlcnic_driver = {
2566 .name = qlcnic_driver_name,
2567 .id_table = qlcnic_pci_tbl,
2568 .probe = qlcnic_probe,
2569 .remove = __devexit_p(qlcnic_remove),
2570#ifdef CONFIG_PM
2571 .suspend = qlcnic_suspend,
2572 .resume = qlcnic_resume,
2573#endif
2574 .shutdown = qlcnic_shutdown
2575};
2576
2577static int __init qlcnic_init_module(void)
2578{
2579
2580 printk(KERN_INFO "%s\n", qlcnic_driver_string);
2581
2582#ifdef CONFIG_INET
2583 register_netdevice_notifier(&qlcnic_netdev_cb);
2584 register_inetaddr_notifier(&qlcnic_inetaddr_cb);
2585#endif
2586
2587
2588 return pci_register_driver(&qlcnic_driver);
2589}
2590
2591module_init(qlcnic_init_module);
2592
2593static void __exit qlcnic_exit_module(void)
2594{
2595
2596 pci_unregister_driver(&qlcnic_driver);
2597
2598#ifdef CONFIG_INET
2599 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
2600 unregister_netdevice_notifier(&qlcnic_netdev_cb);
2601#endif
2602}
2603
2604module_exit(qlcnic_exit_module);