aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/igc
diff options
context:
space:
mode:
authorSasha Neftin <sasha.neftin@intel.com>2018-10-11 03:17:19 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2018-10-17 16:20:43 -0400
commit13b5b7fd6a4a96dffe604f25e7b64cfbd9520924 (patch)
tree43a5df47a21d4ff121a50369c033c2ca10f6f583 /drivers/net/ethernet/intel/igc
parent3df25e4c1e66a69097bde99990fb095b26125c82 (diff)
igc: Add support for Tx/Rx rings
This change adds the defines and structures necessary to support both Tx and Rx descriptor rings. Signed-off-by: Sasha Neftin <sasha.neftin@intel.com> Tested-by: Aaron Brown <aaron.f.brown@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/igc')
-rw-r--r--drivers/net/ethernet/intel/igc/Makefile2
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h125
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c83
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.h89
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h43
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c827
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h3
8 files changed, 1172 insertions, 1 deletions
diff --git a/drivers/net/ethernet/intel/igc/Makefile b/drivers/net/ethernet/intel/igc/Makefile
index 06e0b9e23a8c..c32c45300692 100644
--- a/drivers/net/ethernet/intel/igc/Makefile
+++ b/drivers/net/ethernet/intel/igc/Makefile
@@ -7,4 +7,4 @@
7 7
8obj-$(CONFIG_IGC) += igc.o 8obj-$(CONFIG_IGC) += igc.o
9 9
10igc-objs := igc_main.o igc_mac.o 10igc-objs := igc_main.o igc_mac.o igc_base.o
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index e595d135ea7b..7bb19328b899 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -46,6 +46,45 @@ extern char igc_driver_version[];
46#define MAX_Q_VECTORS 8 46#define MAX_Q_VECTORS 8
47#define MAX_STD_JUMBO_FRAME_SIZE 9216 47#define MAX_STD_JUMBO_FRAME_SIZE 9216
48 48
49/* Supported Rx Buffer Sizes */
50#define IGC_RXBUFFER_256 256
51#define IGC_RXBUFFER_2048 2048
52#define IGC_RXBUFFER_3072 3072
53
54#define IGC_RX_HDR_LEN IGC_RXBUFFER_256
55
56/* RX and TX descriptor control thresholds.
57 * PTHRESH - MAC will consider prefetch if it has fewer than this number of
58 * descriptors available in its onboard memory.
59 * Setting this to 0 disables RX descriptor prefetch.
60 * HTHRESH - MAC will only prefetch if there are at least this many descriptors
61 * available in host memory.
62 * If PTHRESH is 0, this should also be 0.
63 * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back
64 * descriptors until either it has this many to write back, or the
65 * ITR timer expires.
66 */
67#define IGC_RX_PTHRESH 8
68#define IGC_RX_HTHRESH 8
69#define IGC_TX_PTHRESH 8
70#define IGC_TX_HTHRESH 1
71#define IGC_RX_WTHRESH 4
72#define IGC_TX_WTHRESH 16
73
74#define IGC_RX_DMA_ATTR \
75 (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
76
77#define IGC_TS_HDR_LEN 16
78
79#define IGC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
80
81#if (PAGE_SIZE < 8192)
82#define IGC_MAX_FRAME_BUILD_SKB \
83 (SKB_WITH_OVERHEAD(IGC_RXBUFFER_2048) - IGC_SKB_PAD - IGC_TS_HDR_LEN)
84#else
85#define IGC_MAX_FRAME_BUILD_SKB (IGC_RXBUFFER_2048 - IGC_TS_HDR_LEN)
86#endif
87
49enum igc_state_t { 88enum igc_state_t {
50 __IGC_TESTING, 89 __IGC_TESTING,
51 __IGC_RESETTING, 90 __IGC_RESETTING,
@@ -53,6 +92,33 @@ enum igc_state_t {
53 __IGC_PTP_TX_IN_PROGRESS, 92 __IGC_PTP_TX_IN_PROGRESS,
54}; 93};
55 94
95/* wrapper around a pointer to a socket buffer,
96 * so a DMA handle can be stored along with the buffer
97 */
98struct igc_tx_buffer {
99 union igc_adv_tx_desc *next_to_watch;
100 unsigned long time_stamp;
101 struct sk_buff *skb;
102 unsigned int bytecount;
103 u16 gso_segs;
104 __be16 protocol;
105
106 DEFINE_DMA_UNMAP_ADDR(dma);
107 DEFINE_DMA_UNMAP_LEN(len);
108 u32 tx_flags;
109};
110
111struct igc_rx_buffer {
112 dma_addr_t dma;
113 struct page *page;
114#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
115 __u32 page_offset;
116#else
117 __u16 page_offset;
118#endif
119 __u16 pagecnt_bias;
120};
121
56struct igc_tx_queue_stats { 122struct igc_tx_queue_stats {
57 u64 packets; 123 u64 packets;
58 u64 bytes; 124 u64 bytes;
@@ -214,4 +280,63 @@ struct igc_adapter {
214 struct igc_mac_addr *mac_table; 280 struct igc_mac_addr *mac_table;
215}; 281};
216 282
283/* igc_desc_unused - calculate if we have unused descriptors */
284static inline u16 igc_desc_unused(const struct igc_ring *ring)
285{
286 u16 ntc = ring->next_to_clean;
287 u16 ntu = ring->next_to_use;
288
289 return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
290}
291
292static inline struct netdev_queue *txring_txq(const struct igc_ring *tx_ring)
293{
294 return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
295}
296
297enum igc_ring_flags_t {
298 IGC_RING_FLAG_RX_3K_BUFFER,
299 IGC_RING_FLAG_RX_BUILD_SKB_ENABLED,
300 IGC_RING_FLAG_RX_SCTP_CSUM,
301 IGC_RING_FLAG_RX_LB_VLAN_BSWAP,
302 IGC_RING_FLAG_TX_CTX_IDX,
303 IGC_RING_FLAG_TX_DETECT_HANG
304};
305
306#define ring_uses_large_buffer(ring) \
307 test_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
308
309#define ring_uses_build_skb(ring) \
310 test_bit(IGC_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
311
312static inline unsigned int igc_rx_bufsz(struct igc_ring *ring)
313{
314#if (PAGE_SIZE < 8192)
315 if (ring_uses_large_buffer(ring))
316 return IGC_RXBUFFER_3072;
317
318 if (ring_uses_build_skb(ring))
319 return IGC_MAX_FRAME_BUILD_SKB + IGC_TS_HDR_LEN;
320#endif
321 return IGC_RXBUFFER_2048;
322}
323
324static inline unsigned int igc_rx_pg_order(struct igc_ring *ring)
325{
326#if (PAGE_SIZE < 8192)
327 if (ring_uses_large_buffer(ring))
328 return 1;
329#endif
330 return 0;
331}
332
333#define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring))
334
335#define IGC_RX_DESC(R, i) \
336 (&(((union igc_adv_rx_desc *)((R)->desc))[i]))
337#define IGC_TX_DESC(R, i) \
338 (&(((union igc_adv_tx_desc *)((R)->desc))[i]))
339#define IGC_TX_CTXTDESC(R, i) \
340 (&(((struct igc_adv_tx_context_desc *)((R)->desc))[i]))
341
217#endif /* _IGC_H_ */ 342#endif /* _IGC_H_ */
diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
new file mode 100644
index 000000000000..3425b7466017
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_base.c
@@ -0,0 +1,83 @@
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018 Intel Corporation */
3
4#include <linux/delay.h>
5
6#include "igc_hw.h"
7#include "igc_i225.h"
8
9/**
10 * igc_rx_fifo_flush_base - Clean rx fifo after Rx enable
11 * @hw: pointer to the HW structure
12 *
13 * After Rx enable, if manageability is enabled then there is likely some
14 * bad data at the start of the fifo and possibly in the DMA fifo. This
15 * function clears the fifos and flushes any packets that came in as rx was
16 * being enabled.
17 */
18void igc_rx_fifo_flush_base(struct igc_hw *hw)
19{
20 u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
21 int i, ms_wait;
22
23 /* disable IPv6 options as per hardware errata */
24 rfctl = rd32(IGC_RFCTL);
25 rfctl |= IGC_RFCTL_IPV6_EX_DIS;
26 wr32(IGC_RFCTL, rfctl);
27
28 if (!(rd32(IGC_MANC) & IGC_MANC_RCV_TCO_EN))
29 return;
30
31 /* Disable all Rx queues */
32 for (i = 0; i < 4; i++) {
33 rxdctl[i] = rd32(IGC_RXDCTL(i));
34 wr32(IGC_RXDCTL(i),
35 rxdctl[i] & ~IGC_RXDCTL_QUEUE_ENABLE);
36 }
37 /* Poll all queues to verify they have shut down */
38 for (ms_wait = 0; ms_wait < 10; ms_wait++) {
39 usleep_range(1000, 2000);
40 rx_enabled = 0;
41 for (i = 0; i < 4; i++)
42 rx_enabled |= rd32(IGC_RXDCTL(i));
43 if (!(rx_enabled & IGC_RXDCTL_QUEUE_ENABLE))
44 break;
45 }
46
47 if (ms_wait == 10)
48 pr_debug("Queue disable timed out after 10ms\n");
49
50 /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
51 * incoming packets are rejected. Set enable and wait 2ms so that
52 * any packet that was coming in as RCTL.EN was set is flushed
53 */
54 wr32(IGC_RFCTL, rfctl & ~IGC_RFCTL_LEF);
55
56 rlpml = rd32(IGC_RLPML);
57 wr32(IGC_RLPML, 0);
58
59 rctl = rd32(IGC_RCTL);
60 temp_rctl = rctl & ~(IGC_RCTL_EN | IGC_RCTL_SBP);
61 temp_rctl |= IGC_RCTL_LPE;
62
63 wr32(IGC_RCTL, temp_rctl);
64 wr32(IGC_RCTL, temp_rctl | IGC_RCTL_EN);
65 wrfl();
66 usleep_range(2000, 3000);
67
68 /* Enable Rx queues that were previously enabled and restore our
69 * previous state
70 */
71 for (i = 0; i < 4; i++)
72 wr32(IGC_RXDCTL(i), rxdctl[i]);
73 wr32(IGC_RCTL, rctl);
74 wrfl();
75
76 wr32(IGC_RLPML, rlpml);
77 wr32(IGC_RFCTL, rfctl);
78
79 /* Flush receive errors generated by workaround */
80 rd32(IGC_ROC);
81 rd32(IGC_RNBC);
82 rd32(IGC_MPC);
83}
diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h
new file mode 100644
index 000000000000..4bdb4ecf3bc8
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_base.h
@@ -0,0 +1,89 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (c) 2018 Intel Corporation */
3
4#ifndef _IGC_BASE_H
5#define _IGC_BASE_H
6
7/* forward declaration */
8void igc_rx_fifo_flush_base(struct igc_hw *hw);
9
10/* Transmit Descriptor - Advanced */
11union igc_adv_tx_desc {
12 struct {
13 __le64 buffer_addr; /* Address of descriptor's data buf */
14 __le32 cmd_type_len;
15 __le32 olinfo_status;
16 } read;
17 struct {
18 __le64 rsvd; /* Reserved */
19 __le32 nxtseq_seed;
20 __le32 status;
21 } wb;
22};
23
24struct igc_adv_data_desc {
25 __le64 buffer_addr; /* Address of the descriptor's data buffer */
26 union {
27 u32 data;
28 struct {
29 u32 datalen:16; /* Data buffer length */
30 u32 rsvd:4;
31 u32 dtyp:4; /* Descriptor type */
32 u32 dcmd:8; /* Descriptor command */
33 } config;
34 } lower;
35 union {
36 u32 data;
37 struct {
38 u32 status:4; /* Descriptor status */
39 u32 idx:4;
40 u32 popts:6; /* Packet Options */
41 u32 paylen:18; /* Payload length */
42 } options;
43 } upper;
44};
45
46/* Receive Descriptor - Advanced */
47union igc_adv_rx_desc {
48 struct {
49 __le64 pkt_addr; /* Packet buffer address */
50 __le64 hdr_addr; /* Header buffer address */
51 } read;
52 struct {
53 struct {
54 union {
55 __le32 data;
56 struct {
57 __le16 pkt_info; /*RSS type, Pkt type*/
58 /* Split Header, header buffer len */
59 __le16 hdr_info;
60 } hs_rss;
61 } lo_dword;
62 union {
63 __le32 rss; /* RSS Hash */
64 struct {
65 __le16 ip_id; /* IP id */
66 __le16 csum; /* Packet Checksum */
67 } csum_ip;
68 } hi_dword;
69 } lower;
70 struct {
71 __le32 status_error; /* ext status/error */
72 __le16 length; /* Packet length */
73 __le16 vlan; /* VLAN tag */
74 } upper;
75 } wb; /* writeback */
76};
77
78/* Additional Transmit Descriptor Control definitions */
79#define IGC_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */
80
81/* Additional Receive Descriptor Control definitions */
82#define IGC_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */
83
84/* SRRCTL bit definitions */
85#define IGC_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */
86#define IGC_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */
87#define IGC_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
88
89#endif /* _IGC_BASE_H */
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index 7262ad44dcf8..dbc30dead461 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -51,6 +51,10 @@
51#define IGC_ICR_RXO BIT(6) /* Rx overrun */ 51#define IGC_ICR_RXO BIT(6) /* Rx overrun */
52#define IGC_ICR_RXT0 BIT(7) /* Rx timer intr (ring 0) */ 52#define IGC_ICR_RXT0 BIT(7) /* Rx timer intr (ring 0) */
53#define IGC_ICR_DRSTA BIT(30) /* Device Reset Asserted */ 53#define IGC_ICR_DRSTA BIT(30) /* Device Reset Asserted */
54
55/* If this bit asserted, the driver should claim the interrupt */
56#define IGC_ICR_INT_ASSERTED BIT(31)
57
54#define IGC_ICS_RXT0 IGC_ICR_RXT0 /* Rx timer intr */ 58#define IGC_ICS_RXT0 IGC_ICR_RXT0 /* Rx timer intr */
55 59
56#define IMS_ENABLE_MASK ( \ 60#define IMS_ENABLE_MASK ( \
@@ -80,6 +84,45 @@
80#define IGC_GPIE_EIAME 0x40000000 84#define IGC_GPIE_EIAME 0x40000000
81#define IGC_GPIE_PBA 0x80000000 85#define IGC_GPIE_PBA 0x80000000
82 86
87/* Transmit Control */
88#define IGC_TCTL_EN 0x00000002 /* enable Tx */
89#define IGC_TCTL_PSP 0x00000008 /* pad short packets */
90#define IGC_TCTL_CT 0x00000ff0 /* collision threshold */
91#define IGC_TCTL_COLD 0x003ff000 /* collision distance */
92#define IGC_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
93#define IGC_TCTL_MULR 0x10000000 /* Multiple request support */
94
95#define IGC_CT_SHIFT 4
96#define IGC_COLLISION_THRESHOLD 15
97
98/* Management Control */
99#define IGC_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
100
101/* Receive Control */
102#define IGC_RCTL_RST 0x00000001 /* Software reset */
103#define IGC_RCTL_EN 0x00000002 /* enable */
104#define IGC_RCTL_SBP 0x00000004 /* store bad packet */
105#define IGC_RCTL_UPE 0x00000008 /* unicast promisc enable */
106#define IGC_RCTL_MPE 0x00000010 /* multicast promisc enable */
107#define IGC_RCTL_LPE 0x00000020 /* long packet enable */
108#define IGC_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
109#define IGC_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
110
111#define IGC_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min thresh size */
112#define IGC_RCTL_BAM 0x00008000 /* broadcast enable */
113
114/* Header split receive */
115#define IGC_RFCTL_IPV6_EX_DIS 0x00010000
116#define IGC_RFCTL_LEF 0x00040000
117
118#define IGC_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */
119
120#define IGC_RCTL_MO_SHIFT 12 /* multicast offset shift */
121#define IGC_RCTL_CFIEN 0x00080000 /* canonical form enable */
122#define IGC_RCTL_DPF 0x00400000 /* discard pause frames */
123#define IGC_RCTL_PMCF 0x00800000 /* pass MAC control frames */
124#define IGC_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
125
83#define IGC_N0_QUEUE -1 126#define IGC_N0_QUEUE -1
84 127
85#endif /* _IGC_DEFINES_H_ */ 128#endif /* _IGC_DEFINES_H_ */
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index 3905efb1bb1b..a032495a0479 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -10,6 +10,7 @@
10#include "igc_defines.h" 10#include "igc_defines.h"
11#include "igc_mac.h" 11#include "igc_mac.h"
12#include "igc_i225.h" 12#include "igc_i225.h"
13#include "igc_base.h"
13 14
14#define IGC_DEV_ID_I225_LM 0x15F2 15#define IGC_DEV_ID_I225_LM 0x15F2
15#define IGC_DEV_ID_I225_V 0x15F3 16#define IGC_DEV_ID_I225_V 0x15F3
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 0fd66620cfa1..373ccea86fb0 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -37,10 +37,12 @@ static const struct pci_device_id igc_pci_tbl[] = {
37MODULE_DEVICE_TABLE(pci, igc_pci_tbl); 37MODULE_DEVICE_TABLE(pci, igc_pci_tbl);
38 38
39/* forward declaration */ 39/* forward declaration */
40static void igc_clean_tx_ring(struct igc_ring *tx_ring);
40static int igc_sw_init(struct igc_adapter *); 41static int igc_sw_init(struct igc_adapter *);
41static void igc_configure(struct igc_adapter *adapter); 42static void igc_configure(struct igc_adapter *adapter);
42static void igc_power_down_link(struct igc_adapter *adapter); 43static void igc_power_down_link(struct igc_adapter *adapter);
43static void igc_set_default_mac_filter(struct igc_adapter *adapter); 44static void igc_set_default_mac_filter(struct igc_adapter *adapter);
45static void igc_set_rx_mode(struct net_device *netdev);
44static void igc_write_itr(struct igc_q_vector *q_vector); 46static void igc_write_itr(struct igc_q_vector *q_vector);
45static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector); 47static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector);
46static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx); 48static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx);
@@ -119,6 +121,527 @@ static void igc_get_hw_control(struct igc_adapter *adapter)
119} 121}
120 122
121/** 123/**
124 * igc_free_tx_resources - Free Tx Resources per Queue
125 * @tx_ring: Tx descriptor ring for a specific queue
126 *
127 * Free all transmit software resources
128 */
129static void igc_free_tx_resources(struct igc_ring *tx_ring)
130{
131 igc_clean_tx_ring(tx_ring);
132
133 vfree(tx_ring->tx_buffer_info);
134 tx_ring->tx_buffer_info = NULL;
135
136 /* if not set, then don't free */
137 if (!tx_ring->desc)
138 return;
139
140 dma_free_coherent(tx_ring->dev, tx_ring->size,
141 tx_ring->desc, tx_ring->dma);
142
143 tx_ring->desc = NULL;
144}
145
146/**
147 * igc_free_all_tx_resources - Free Tx Resources for All Queues
148 * @adapter: board private structure
149 *
150 * Free all transmit software resources
151 */
152static void igc_free_all_tx_resources(struct igc_adapter *adapter)
153{
154 int i;
155
156 for (i = 0; i < adapter->num_tx_queues; i++)
157 igc_free_tx_resources(adapter->tx_ring[i]);
158}
159
160/**
161 * igc_clean_tx_ring - Free Tx Buffers
162 * @tx_ring: ring to be cleaned
163 */
164static void igc_clean_tx_ring(struct igc_ring *tx_ring)
165{
166 u16 i = tx_ring->next_to_clean;
167 struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
168
169 while (i != tx_ring->next_to_use) {
170 union igc_adv_tx_desc *eop_desc, *tx_desc;
171
172 /* Free all the Tx ring sk_buffs */
173 dev_kfree_skb_any(tx_buffer->skb);
174
175 /* unmap skb header data */
176 dma_unmap_single(tx_ring->dev,
177 dma_unmap_addr(tx_buffer, dma),
178 dma_unmap_len(tx_buffer, len),
179 DMA_TO_DEVICE);
180
181 /* check for eop_desc to determine the end of the packet */
182 eop_desc = tx_buffer->next_to_watch;
183 tx_desc = IGC_TX_DESC(tx_ring, i);
184
185 /* unmap remaining buffers */
186 while (tx_desc != eop_desc) {
187 tx_buffer++;
188 tx_desc++;
189 i++;
190 if (unlikely(i == tx_ring->count)) {
191 i = 0;
192 tx_buffer = tx_ring->tx_buffer_info;
193 tx_desc = IGC_TX_DESC(tx_ring, 0);
194 }
195
196 /* unmap any remaining paged data */
197 if (dma_unmap_len(tx_buffer, len))
198 dma_unmap_page(tx_ring->dev,
199 dma_unmap_addr(tx_buffer, dma),
200 dma_unmap_len(tx_buffer, len),
201 DMA_TO_DEVICE);
202 }
203
204 /* move us one more past the eop_desc for start of next pkt */
205 tx_buffer++;
206 i++;
207 if (unlikely(i == tx_ring->count)) {
208 i = 0;
209 tx_buffer = tx_ring->tx_buffer_info;
210 }
211 }
212
213 /* reset BQL for queue */
214 netdev_tx_reset_queue(txring_txq(tx_ring));
215
216 /* reset next_to_use and next_to_clean */
217 tx_ring->next_to_use = 0;
218 tx_ring->next_to_clean = 0;
219}
220
221/**
222 * igc_setup_tx_resources - allocate Tx resources (Descriptors)
223 * @tx_ring: tx descriptor ring (for a specific queue) to setup
224 *
225 * Return 0 on success, negative on failure
226 */
227static int igc_setup_tx_resources(struct igc_ring *tx_ring)
228{
229 struct device *dev = tx_ring->dev;
230 int size = 0;
231
232 size = sizeof(struct igc_tx_buffer) * tx_ring->count;
233 tx_ring->tx_buffer_info = vzalloc(size);
234 if (!tx_ring->tx_buffer_info)
235 goto err;
236
237 /* round up to nearest 4K */
238 tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc);
239 tx_ring->size = ALIGN(tx_ring->size, 4096);
240
241 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
242 &tx_ring->dma, GFP_KERNEL);
243
244 if (!tx_ring->desc)
245 goto err;
246
247 tx_ring->next_to_use = 0;
248 tx_ring->next_to_clean = 0;
249
250 return 0;
251
252err:
253 vfree(tx_ring->tx_buffer_info);
254 dev_err(dev,
255 "Unable to allocate memory for the transmit descriptor ring\n");
256 return -ENOMEM;
257}
258
259/**
260 * igc_setup_all_tx_resources - wrapper to allocate Tx resources for all queues
261 * @adapter: board private structure
262 *
263 * Return 0 on success, negative on failure
264 */
265static int igc_setup_all_tx_resources(struct igc_adapter *adapter)
266{
267 struct pci_dev *pdev = adapter->pdev;
268 int i, err = 0;
269
270 for (i = 0; i < adapter->num_tx_queues; i++) {
271 err = igc_setup_tx_resources(adapter->tx_ring[i]);
272 if (err) {
273 dev_err(&pdev->dev,
274 "Allocation for Tx Queue %u failed\n", i);
275 for (i--; i >= 0; i--)
276 igc_free_tx_resources(adapter->tx_ring[i]);
277 break;
278 }
279 }
280
281 return err;
282}
283
284/**
285 * igc_clean_rx_ring - Free Rx Buffers per Queue
286 * @rx_ring: ring to free buffers from
287 */
288static void igc_clean_rx_ring(struct igc_ring *rx_ring)
289{
290 u16 i = rx_ring->next_to_clean;
291
292 if (rx_ring->skb)
293 dev_kfree_skb(rx_ring->skb);
294 rx_ring->skb = NULL;
295
296 /* Free all the Rx ring sk_buffs */
297 while (i != rx_ring->next_to_alloc) {
298 struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
299
300 /* Invalidate cache lines that may have been written to by
301 * device so that we avoid corrupting memory.
302 */
303 dma_sync_single_range_for_cpu(rx_ring->dev,
304 buffer_info->dma,
305 buffer_info->page_offset,
306 igc_rx_bufsz(rx_ring),
307 DMA_FROM_DEVICE);
308
309 /* free resources associated with mapping */
310 dma_unmap_page_attrs(rx_ring->dev,
311 buffer_info->dma,
312 igc_rx_pg_size(rx_ring),
313 DMA_FROM_DEVICE,
314 IGC_RX_DMA_ATTR);
315 __page_frag_cache_drain(buffer_info->page,
316 buffer_info->pagecnt_bias);
317
318 i++;
319 if (i == rx_ring->count)
320 i = 0;
321 }
322
323 rx_ring->next_to_alloc = 0;
324 rx_ring->next_to_clean = 0;
325 rx_ring->next_to_use = 0;
326}
327
328/**
329 * igc_free_rx_resources - Free Rx Resources
330 * @rx_ring: ring to clean the resources from
331 *
332 * Free all receive software resources
333 */
334static void igc_free_rx_resources(struct igc_ring *rx_ring)
335{
336 igc_clean_rx_ring(rx_ring);
337
338 vfree(rx_ring->rx_buffer_info);
339 rx_ring->rx_buffer_info = NULL;
340
341 /* if not set, then don't free */
342 if (!rx_ring->desc)
343 return;
344
345 dma_free_coherent(rx_ring->dev, rx_ring->size,
346 rx_ring->desc, rx_ring->dma);
347
348 rx_ring->desc = NULL;
349}
350
351/**
352 * igc_free_all_rx_resources - Free Rx Resources for All Queues
353 * @adapter: board private structure
354 *
355 * Free all receive software resources
356 */
357static void igc_free_all_rx_resources(struct igc_adapter *adapter)
358{
359 int i;
360
361 for (i = 0; i < adapter->num_rx_queues; i++)
362 igc_free_rx_resources(adapter->rx_ring[i]);
363}
364
365/**
366 * igc_setup_rx_resources - allocate Rx resources (Descriptors)
367 * @rx_ring: rx descriptor ring (for a specific queue) to setup
368 *
369 * Returns 0 on success, negative on failure
370 */
371static int igc_setup_rx_resources(struct igc_ring *rx_ring)
372{
373 struct device *dev = rx_ring->dev;
374 int size, desc_len;
375
376 size = sizeof(struct igc_rx_buffer) * rx_ring->count;
377 rx_ring->rx_buffer_info = vzalloc(size);
378 if (!rx_ring->rx_buffer_info)
379 goto err;
380
381 desc_len = sizeof(union igc_adv_rx_desc);
382
383 /* Round up to nearest 4K */
384 rx_ring->size = rx_ring->count * desc_len;
385 rx_ring->size = ALIGN(rx_ring->size, 4096);
386
387 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
388 &rx_ring->dma, GFP_KERNEL);
389
390 if (!rx_ring->desc)
391 goto err;
392
393 rx_ring->next_to_alloc = 0;
394 rx_ring->next_to_clean = 0;
395 rx_ring->next_to_use = 0;
396
397 return 0;
398
399err:
400 vfree(rx_ring->rx_buffer_info);
401 rx_ring->rx_buffer_info = NULL;
402 dev_err(dev,
403 "Unable to allocate memory for the receive descriptor ring\n");
404 return -ENOMEM;
405}
406
407/**
408 * igc_setup_all_rx_resources - wrapper to allocate Rx resources
409 * (Descriptors) for all queues
410 * @adapter: board private structure
411 *
412 * Return 0 on success, negative on failure
413 */
414static int igc_setup_all_rx_resources(struct igc_adapter *adapter)
415{
416 struct pci_dev *pdev = adapter->pdev;
417 int i, err = 0;
418
419 for (i = 0; i < adapter->num_rx_queues; i++) {
420 err = igc_setup_rx_resources(adapter->rx_ring[i]);
421 if (err) {
422 dev_err(&pdev->dev,
423 "Allocation for Rx Queue %u failed\n", i);
424 for (i--; i >= 0; i--)
425 igc_free_rx_resources(adapter->rx_ring[i]);
426 break;
427 }
428 }
429
430 return err;
431}
432
433/**
434 * igc_configure_rx_ring - Configure a receive ring after Reset
435 * @adapter: board private structure
436 * @ring: receive ring to be configured
437 *
438 * Configure the Rx unit of the MAC after a reset.
439 */
440static void igc_configure_rx_ring(struct igc_adapter *adapter,
441 struct igc_ring *ring)
442{
443 struct igc_hw *hw = &adapter->hw;
444 union igc_adv_rx_desc *rx_desc;
445 int reg_idx = ring->reg_idx;
446 u32 srrctl = 0, rxdctl = 0;
447 u64 rdba = ring->dma;
448
449 /* disable the queue */
450 wr32(IGC_RXDCTL(reg_idx), 0);
451
452 /* Set DMA base address registers */
453 wr32(IGC_RDBAL(reg_idx),
454 rdba & 0x00000000ffffffffULL);
455 wr32(IGC_RDBAH(reg_idx), rdba >> 32);
456 wr32(IGC_RDLEN(reg_idx),
457 ring->count * sizeof(union igc_adv_rx_desc));
458
459 /* initialize head and tail */
460 ring->tail = adapter->io_addr + IGC_RDT(reg_idx);
461 wr32(IGC_RDH(reg_idx), 0);
462 writel(0, ring->tail);
463
464 /* reset next-to- use/clean to place SW in sync with hardware */
465 ring->next_to_clean = 0;
466 ring->next_to_use = 0;
467
468 /* set descriptor configuration */
469 srrctl = IGC_RX_HDR_LEN << IGC_SRRCTL_BSIZEHDRSIZE_SHIFT;
470 if (ring_uses_large_buffer(ring))
471 srrctl |= IGC_RXBUFFER_3072 >> IGC_SRRCTL_BSIZEPKT_SHIFT;
472 else
473 srrctl |= IGC_RXBUFFER_2048 >> IGC_SRRCTL_BSIZEPKT_SHIFT;
474 srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF;
475
476 wr32(IGC_SRRCTL(reg_idx), srrctl);
477
478 rxdctl |= IGC_RX_PTHRESH;
479 rxdctl |= IGC_RX_HTHRESH << 8;
480 rxdctl |= IGC_RX_WTHRESH << 16;
481
482 /* initialize rx_buffer_info */
483 memset(ring->rx_buffer_info, 0,
484 sizeof(struct igc_rx_buffer) * ring->count);
485
486 /* initialize Rx descriptor 0 */
487 rx_desc = IGC_RX_DESC(ring, 0);
488 rx_desc->wb.upper.length = 0;
489
490 /* enable receive descriptor fetching */
491 rxdctl |= IGC_RXDCTL_QUEUE_ENABLE;
492
493 wr32(IGC_RXDCTL(reg_idx), rxdctl);
494}
495
496/**
497 * igc_configure_rx - Configure receive Unit after Reset
498 * @adapter: board private structure
499 *
500 * Configure the Rx unit of the MAC after a reset.
501 */
502static void igc_configure_rx(struct igc_adapter *adapter)
503{
504 int i;
505
506 /* Setup the HW Rx Head and Tail Descriptor Pointers and
507 * the Base and Length of the Rx Descriptor Ring
508 */
509 for (i = 0; i < adapter->num_rx_queues; i++)
510 igc_configure_rx_ring(adapter, adapter->rx_ring[i]);
511}
512
513/**
514 * igc_configure_tx_ring - Configure transmit ring after Reset
515 * @adapter: board private structure
516 * @ring: tx ring to configure
517 *
518 * Configure a transmit ring after a reset.
519 */
520static void igc_configure_tx_ring(struct igc_adapter *adapter,
521 struct igc_ring *ring)
522{
523 struct igc_hw *hw = &adapter->hw;
524 int reg_idx = ring->reg_idx;
525 u64 tdba = ring->dma;
526 u32 txdctl = 0;
527
528 /* disable the queue */
529 wr32(IGC_TXDCTL(reg_idx), 0);
530 wrfl();
531 mdelay(10);
532
533 wr32(IGC_TDLEN(reg_idx),
534 ring->count * sizeof(union igc_adv_tx_desc));
535 wr32(IGC_TDBAL(reg_idx),
536 tdba & 0x00000000ffffffffULL);
537 wr32(IGC_TDBAH(reg_idx), tdba >> 32);
538
539 ring->tail = adapter->io_addr + IGC_TDT(reg_idx);
540 wr32(IGC_TDH(reg_idx), 0);
541 writel(0, ring->tail);
542
543 txdctl |= IGC_TX_PTHRESH;
544 txdctl |= IGC_TX_HTHRESH << 8;
545 txdctl |= IGC_TX_WTHRESH << 16;
546
547 txdctl |= IGC_TXDCTL_QUEUE_ENABLE;
548 wr32(IGC_TXDCTL(reg_idx), txdctl);
549}
550
551/**
552 * igc_configure_tx - Configure transmit Unit after Reset
553 * @adapter: board private structure
554 *
555 * Configure the Tx unit of the MAC after a reset.
556 */
557static void igc_configure_tx(struct igc_adapter *adapter)
558{
559 int i;
560
561 for (i = 0; i < adapter->num_tx_queues; i++)
562 igc_configure_tx_ring(adapter, adapter->tx_ring[i]);
563}
564
565/**
566 * igc_setup_mrqc - configure the multiple receive queue control registers
567 * @adapter: Board private structure
568 */
569static void igc_setup_mrqc(struct igc_adapter *adapter)
570{
571}
572
573/**
574 * igc_setup_rctl - configure the receive control registers
575 * @adapter: Board private structure
576 */
577static void igc_setup_rctl(struct igc_adapter *adapter)
578{
579 struct igc_hw *hw = &adapter->hw;
580 u32 rctl;
581
582 rctl = rd32(IGC_RCTL);
583
584 rctl &= ~(3 << IGC_RCTL_MO_SHIFT);
585 rctl &= ~(IGC_RCTL_LBM_TCVR | IGC_RCTL_LBM_MAC);
586
587 rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_RDMTS_HALF |
588 (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT);
589
590 /* enable stripping of CRC. Newer features require
591 * that the HW strips the CRC.
592 */
593 rctl |= IGC_RCTL_SECRC;
594
595 /* disable store bad packets and clear size bits. */
596 rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_SZ_256);
597
598 /* enable LPE to allow for reception of jumbo frames */
599 rctl |= IGC_RCTL_LPE;
600
601 /* disable queue 0 to prevent tail write w/o re-config */
602 wr32(IGC_RXDCTL(0), 0);
603
604 /* This is useful for sniffing bad packets. */
605 if (adapter->netdev->features & NETIF_F_RXALL) {
606 /* UPE and MPE will be handled by normal PROMISC logic
607 * in set_rx_mode
608 */
609 rctl |= (IGC_RCTL_SBP | /* Receive bad packets */
610 IGC_RCTL_BAM | /* RX All Bcast Pkts */
611 IGC_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
612
613 rctl &= ~(IGC_RCTL_DPF | /* Allow filtered pause */
614 IGC_RCTL_CFIEN); /* Disable VLAN CFIEN Filter */
615 }
616
617 wr32(IGC_RCTL, rctl);
618}
619
620/**
621 * igc_setup_tctl - configure the transmit control registers
622 * @adapter: Board private structure
623 */
624static void igc_setup_tctl(struct igc_adapter *adapter)
625{
626 struct igc_hw *hw = &adapter->hw;
627 u32 tctl;
628
629 /* disable queue 0 which icould be enabled by default */
630 wr32(IGC_TXDCTL(0), 0);
631
632 /* Program the Transmit Control Register */
633 tctl = rd32(IGC_TCTL);
634 tctl &= ~IGC_TCTL_CT;
635 tctl |= IGC_TCTL_PSP | IGC_TCTL_RTLC |
636 (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT);
637
638 /* Enable transmits */
639 tctl |= IGC_TCTL_EN;
640
641 wr32(IGC_TCTL, tctl);
642}
643
644/**
122 * igc_set_mac - Change the Ethernet Address of the NIC 645 * igc_set_mac - Change the Ethernet Address of the NIC
123 * @netdev: network interface device structure 646 * @netdev: network interface device structure
124 * @p: pointer to an address structure 647 * @p: pointer to an address structure
@@ -150,6 +673,121 @@ static netdev_tx_t igc_xmit_frame(struct sk_buff *skb,
150 return NETDEV_TX_OK; 673 return NETDEV_TX_OK;
151} 674}
152 675
676static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring)
677{
678 return ring_uses_build_skb(rx_ring) ? IGC_SKB_PAD : 0;
679}
680
681static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
682 struct igc_rx_buffer *bi)
683{
684 struct page *page = bi->page;
685 dma_addr_t dma;
686
687 /* since we are recycling buffers we should seldom need to alloc */
688 if (likely(page))
689 return true;
690
691 /* alloc new page for storage */
692 page = dev_alloc_pages(igc_rx_pg_order(rx_ring));
693 if (unlikely(!page)) {
694 rx_ring->rx_stats.alloc_failed++;
695 return false;
696 }
697
698 /* map page for use */
699 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
700 igc_rx_pg_size(rx_ring),
701 DMA_FROM_DEVICE,
702 IGC_RX_DMA_ATTR);
703
704 /* if mapping failed free memory back to system since
705 * there isn't much point in holding memory we can't use
706 */
707 if (dma_mapping_error(rx_ring->dev, dma)) {
708 __free_page(page);
709
710 rx_ring->rx_stats.alloc_failed++;
711 return false;
712 }
713
714 bi->dma = dma;
715 bi->page = page;
716 bi->page_offset = igc_rx_offset(rx_ring);
717 bi->pagecnt_bias = 1;
718
719 return true;
720}
721
722/**
723 * igc_alloc_rx_buffers - Replace used receive buffers; packet split
724 * @adapter: address of board private structure
725 */
726static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count)
727{
728 union igc_adv_rx_desc *rx_desc;
729 u16 i = rx_ring->next_to_use;
730 struct igc_rx_buffer *bi;
731 u16 bufsz;
732
733 /* nothing to do */
734 if (!cleaned_count)
735 return;
736
737 rx_desc = IGC_RX_DESC(rx_ring, i);
738 bi = &rx_ring->rx_buffer_info[i];
739 i -= rx_ring->count;
740
741 bufsz = igc_rx_bufsz(rx_ring);
742
743 do {
744 if (!igc_alloc_mapped_page(rx_ring, bi))
745 break;
746
747 /* sync the buffer for use by the device */
748 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
749 bi->page_offset, bufsz,
750 DMA_FROM_DEVICE);
751
752 /* Refresh the desc even if buffer_addrs didn't change
753 * because each write-back erases this info.
754 */
755 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
756
757 rx_desc++;
758 bi++;
759 i++;
760 if (unlikely(!i)) {
761 rx_desc = IGC_RX_DESC(rx_ring, 0);
762 bi = rx_ring->rx_buffer_info;
763 i -= rx_ring->count;
764 }
765
766 /* clear the length for the next_to_use descriptor */
767 rx_desc->wb.upper.length = 0;
768
769 cleaned_count--;
770 } while (cleaned_count);
771
772 i += rx_ring->count;
773
774 if (rx_ring->next_to_use != i) {
775 /* record the next descriptor to use */
776 rx_ring->next_to_use = i;
777
778 /* update next to alloc since we have filled the ring */
779 rx_ring->next_to_alloc = i;
780
781 /* Force memory writes to complete before letting h/w
782 * know there are new descriptors to fetch. (Only
783 * applicable for weak-ordered memory model archs,
784 * such as IA-64).
785 */
786 wmb();
787 writel(i, rx_ring->tail);
788 }
789}
790
153/** 791/**
154 * igc_ioctl - I/O control method 792 * igc_ioctl - I/O control method
155 * @netdev: network interface device structure 793 * @netdev: network interface device structure
@@ -189,6 +827,11 @@ static void igc_up(struct igc_adapter *adapter)
189 /* Clear any pending interrupts. */ 827 /* Clear any pending interrupts. */
190 rd32(IGC_ICR); 828 rd32(IGC_ICR);
191 igc_irq_enable(adapter); 829 igc_irq_enable(adapter);
830
831 netif_tx_start_all_queues(adapter->netdev);
832
833 /* start the watchdog. */
834 hw->mac.get_link_status = 1;
192} 835}
193 836
194/** 837/**
@@ -287,7 +930,30 @@ static struct net_device_stats *igc_get_stats(struct net_device *netdev)
287 */ 930 */
288static void igc_configure(struct igc_adapter *adapter) 931static void igc_configure(struct igc_adapter *adapter)
289{ 932{
933 struct net_device *netdev = adapter->netdev;
934 int i = 0;
935
290 igc_get_hw_control(adapter); 936 igc_get_hw_control(adapter);
937 igc_set_rx_mode(netdev);
938
939 igc_setup_tctl(adapter);
940 igc_setup_mrqc(adapter);
941 igc_setup_rctl(adapter);
942
943 igc_configure_tx(adapter);
944 igc_configure_rx(adapter);
945
946 igc_rx_fifo_flush_base(&adapter->hw);
947
948 /* call igc_desc_unused which always leaves
949 * at least 1 descriptor unused to make sure
950 * next_to_use != next_to_clean
951 */
952 for (i = 0; i < adapter->num_rx_queues; i++) {
953 struct igc_ring *ring = adapter->rx_ring[i];
954
955 igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
956 }
291} 957}
292 958
293/** 959/**
@@ -336,6 +1002,19 @@ static void igc_set_default_mac_filter(struct igc_adapter *adapter)
336} 1002}
337 1003
338/** 1004/**
1005 * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
1006 * @netdev: network interface device structure
1007 *
1008 * The set_rx_mode entry point is called whenever the unicast or multicast
1009 * address lists or the network interface flags are updated. This routine is
1010 * responsible for configuring the hardware for proper unicast, multicast,
1011 * promiscuous mode, and all-multi behavior.
1012 */
1013static void igc_set_rx_mode(struct net_device *netdev)
1014{
1015}
1016
1017/**
339 * igc_msix_other - msix other interrupt handler 1018 * igc_msix_other - msix other interrupt handler
340 * @irq: interrupt number 1019 * @irq: interrupt number
341 * @data: pointer to a q_vector 1020 * @data: pointer to a q_vector
@@ -784,6 +1463,83 @@ static void igc_update_itr(struct igc_q_vector *q_vector,
784 ring_container->itr = itrval; 1463 ring_container->itr = itrval;
785} 1464}
786 1465
1466/**
1467 * igc_intr_msi - Interrupt Handler
1468 * @irq: interrupt number
1469 * @data: pointer to a network interface device structure
1470 */
1471static irqreturn_t igc_intr_msi(int irq, void *data)
1472{
1473 struct igc_adapter *adapter = data;
1474 struct igc_q_vector *q_vector = adapter->q_vector[0];
1475 struct igc_hw *hw = &adapter->hw;
1476 /* read ICR disables interrupts using IAM */
1477 u32 icr = rd32(IGC_ICR);
1478
1479 igc_write_itr(q_vector);
1480
1481 if (icr & IGC_ICR_DRSTA)
1482 schedule_work(&adapter->reset_task);
1483
1484 if (icr & IGC_ICR_DOUTSYNC) {
1485 /* HW is reporting DMA is out of sync */
1486 adapter->stats.doosync++;
1487 }
1488
1489 if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
1490 hw->mac.get_link_status = 1;
1491 if (!test_bit(__IGC_DOWN, &adapter->state))
1492 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1493 }
1494
1495 napi_schedule(&q_vector->napi);
1496
1497 return IRQ_HANDLED;
1498}
1499
1500/**
1501 * igc_intr - Legacy Interrupt Handler
1502 * @irq: interrupt number
1503 * @data: pointer to a network interface device structure
1504 */
1505static irqreturn_t igc_intr(int irq, void *data)
1506{
1507 struct igc_adapter *adapter = data;
1508 struct igc_q_vector *q_vector = adapter->q_vector[0];
1509 struct igc_hw *hw = &adapter->hw;
1510 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
1511 * need for the IMC write
1512 */
1513 u32 icr = rd32(IGC_ICR);
1514
1515 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
1516 * not set, then the adapter didn't send an interrupt
1517 */
1518 if (!(icr & IGC_ICR_INT_ASSERTED))
1519 return IRQ_NONE;
1520
1521 igc_write_itr(q_vector);
1522
1523 if (icr & IGC_ICR_DRSTA)
1524 schedule_work(&adapter->reset_task);
1525
1526 if (icr & IGC_ICR_DOUTSYNC) {
1527 /* HW is reporting DMA is out of sync */
1528 adapter->stats.doosync++;
1529 }
1530
1531 if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
1532 hw->mac.get_link_status = 1;
1533 /* guard against interrupt when we're going down */
1534 if (!test_bit(__IGC_DOWN, &adapter->state))
1535 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1536 }
1537
1538 napi_schedule(&q_vector->napi);
1539
1540 return IRQ_HANDLED;
1541}
1542
787static void igc_set_itr(struct igc_q_vector *q_vector) 1543static void igc_set_itr(struct igc_q_vector *q_vector)
788{ 1544{
789 struct igc_adapter *adapter = q_vector->adapter; 1545 struct igc_adapter *adapter = q_vector->adapter;
@@ -1147,6 +1903,29 @@ err_out:
1147} 1903}
1148 1904
1149/** 1905/**
1906 * igc_cache_ring_register - Descriptor ring to register mapping
1907 * @adapter: board private structure to initialize
1908 *
1909 * Once we know the feature-set enabled for the device, we'll cache
1910 * the register offset the descriptor ring is assigned to.
1911 */
1912static void igc_cache_ring_register(struct igc_adapter *adapter)
1913{
1914 int i = 0, j = 0;
1915
1916 switch (adapter->hw.mac.type) {
1917 case igc_i225:
1918 /* Fall through */
1919 default:
1920 for (; i < adapter->num_rx_queues; i++)
1921 adapter->rx_ring[i]->reg_idx = i;
1922 for (; j < adapter->num_tx_queues; j++)
1923 adapter->tx_ring[j]->reg_idx = j;
1924 break;
1925 }
1926}
1927
1928/**
1150 * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors 1929 * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
1151 * @adapter: Pointer to adapter structure 1930 * @adapter: Pointer to adapter structure
1152 * 1931 *
@@ -1165,6 +1944,8 @@ static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix)
1165 goto err_alloc_q_vectors; 1944 goto err_alloc_q_vectors;
1166 } 1945 }
1167 1946
1947 igc_cache_ring_register(adapter);
1948
1168 return 0; 1949 return 0;
1169 1950
1170err_alloc_q_vectors: 1951err_alloc_q_vectors:
@@ -1252,6 +2033,8 @@ static void igc_irq_enable(struct igc_adapter *adapter)
1252 */ 2033 */
1253static int igc_request_irq(struct igc_adapter *adapter) 2034static int igc_request_irq(struct igc_adapter *adapter)
1254{ 2035{
2036 struct net_device *netdev = adapter->netdev;
2037 struct pci_dev *pdev = adapter->pdev;
1255 int err = 0; 2038 int err = 0;
1256 2039
1257 if (adapter->flags & IGC_FLAG_HAS_MSIX) { 2040 if (adapter->flags & IGC_FLAG_HAS_MSIX) {
@@ -1259,14 +2042,38 @@ static int igc_request_irq(struct igc_adapter *adapter)
1259 if (!err) 2042 if (!err)
1260 goto request_done; 2043 goto request_done;
1261 /* fall back to MSI */ 2044 /* fall back to MSI */
2045 igc_free_all_tx_resources(adapter);
2046 igc_free_all_rx_resources(adapter);
1262 2047
1263 igc_clear_interrupt_scheme(adapter); 2048 igc_clear_interrupt_scheme(adapter);
1264 err = igc_init_interrupt_scheme(adapter, false); 2049 err = igc_init_interrupt_scheme(adapter, false);
1265 if (err) 2050 if (err)
1266 goto request_done; 2051 goto request_done;
2052 igc_setup_all_tx_resources(adapter);
2053 igc_setup_all_rx_resources(adapter);
1267 igc_configure(adapter); 2054 igc_configure(adapter);
1268 } 2055 }
1269 2056
2057 igc_assign_vector(adapter->q_vector[0], 0);
2058
2059 if (adapter->flags & IGC_FLAG_HAS_MSI) {
2060 err = request_irq(pdev->irq, &igc_intr_msi, 0,
2061 netdev->name, adapter);
2062 if (!err)
2063 goto request_done;
2064
2065 /* fall back to legacy interrupts */
2066 igc_reset_interrupt_capability(adapter);
2067 adapter->flags &= ~IGC_FLAG_HAS_MSI;
2068 }
2069
2070 err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED,
2071 netdev->name, adapter);
2072
2073 if (err)
2074 dev_err(&pdev->dev, "Error %d getting interrupt\n",
2075 err);
2076
1270request_done: 2077request_done:
1271 return err; 2078 return err;
1272} 2079}
@@ -1315,6 +2122,16 @@ static int __igc_open(struct net_device *netdev, bool resuming)
1315 2122
1316 netif_carrier_off(netdev); 2123 netif_carrier_off(netdev);
1317 2124
2125 /* allocate transmit descriptors */
2126 err = igc_setup_all_tx_resources(adapter);
2127 if (err)
2128 goto err_setup_tx;
2129
2130 /* allocate receive descriptors */
2131 err = igc_setup_all_rx_resources(adapter);
2132 if (err)
2133 goto err_setup_rx;
2134
1318 igc_power_up_link(adapter); 2135 igc_power_up_link(adapter);
1319 2136
1320 igc_configure(adapter); 2137 igc_configure(adapter);
@@ -1341,6 +2158,8 @@ static int __igc_open(struct net_device *netdev, bool resuming)
1341 rd32(IGC_ICR); 2158 rd32(IGC_ICR);
1342 igc_irq_enable(adapter); 2159 igc_irq_enable(adapter);
1343 2160
2161 netif_tx_start_all_queues(netdev);
2162
1344 /* start the watchdog. */ 2163 /* start the watchdog. */
1345 hw->mac.get_link_status = 1; 2164 hw->mac.get_link_status = 1;
1346 2165
@@ -1351,6 +2170,11 @@ err_set_queues:
1351err_req_irq: 2170err_req_irq:
1352 igc_release_hw_control(adapter); 2171 igc_release_hw_control(adapter);
1353 igc_power_down_link(adapter); 2172 igc_power_down_link(adapter);
2173 igc_free_all_rx_resources(adapter);
2174err_setup_rx:
2175 igc_free_all_tx_resources(adapter);
2176err_setup_tx:
2177 igc_reset(adapter);
1354 2178
1355 return err; 2179 return err;
1356} 2180}
@@ -1383,6 +2207,9 @@ static int __igc_close(struct net_device *netdev, bool suspending)
1383 2207
1384 igc_free_irq(adapter); 2208 igc_free_irq(adapter);
1385 2209
2210 igc_free_all_tx_resources(adapter);
2211 igc_free_all_rx_resources(adapter);
2212
1386 return 0; 2213 return 0;
1387} 2214}
1388 2215
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index 2372d6d68dbc..e268986eeb9f 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -168,6 +168,9 @@
168#define IGC_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */ 168#define IGC_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */
169#define IGC_HRMPC 0x0A018 /* Header Redirection Missed Packet Count */ 169#define IGC_HRMPC 0x0A018 /* Header Redirection Missed Packet Count */
170 170
171/* Management registers */
172#define IGC_MANC 0x05820 /* Management Control - RW */
173
171/* forward declaration */ 174/* forward declaration */
172struct igc_hw; 175struct igc_hw;
173u32 igc_rd32(struct igc_hw *hw, u32 reg); 176u32 igc_rd32(struct igc_hw *hw, u32 reg);