aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/atl1
diff options
context:
space:
mode:
authorJay Cliburn <jacliburn@bellsouth.net>2007-07-15 12:03:26 -0400
committerJeff Garzik <jeff@garzik.org>2007-07-16 18:29:16 -0400
commit2b116145bbdbe1b13a2eb780988447eecd657a55 (patch)
tree83475d1c967cb3df29749b51c8f9fd3060344531 /drivers/net/atl1
parent70d9d4b2477f90fb99227026f4cb6e75920eb1ec (diff)
atl1: header file cleanup
Remove unused structure members, improve comments, break long comment lines, rename a constant to be consistent with others in the file. Signed-off-by: Jay Cliburn <jacliburn@bellsouth.net> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/atl1')
-rw-r--r--drivers/net/atl1/atl1.h155
-rw-r--r--drivers/net/atl1/atl1_main.c20
2 files changed, 90 insertions, 85 deletions
diff --git a/drivers/net/atl1/atl1.h b/drivers/net/atl1/atl1.h
index a769e7b8b80b..991c8b93d386 100644
--- a/drivers/net/atl1/atl1.h
+++ b/drivers/net/atl1/atl1.h
@@ -43,6 +43,7 @@ extern const struct ethtool_ops atl1_ethtool_ops;
43struct atl1_adapter; 43struct atl1_adapter;
44 44
45#define ATL1_MAX_INTR 3 45#define ATL1_MAX_INTR 3
46#define ATL1_MAX_TX_BUF_LEN 0x3000 /* 12288 bytes */
46 47
47#define ATL1_DEFAULT_TPD 256 48#define ATL1_DEFAULT_TPD 256
48#define ATL1_MAX_TPD 1024 49#define ATL1_MAX_TPD 1024
@@ -57,29 +58,45 @@ struct atl1_adapter;
57#define ATL1_RRD_DESC(R, i) ATL1_GET_DESC(R, i, struct rx_return_desc) 58#define ATL1_RRD_DESC(R, i) ATL1_GET_DESC(R, i, struct rx_return_desc)
58 59
59/* 60/*
61 * This detached comment is preserved for documentation purposes only.
62 * It was originally attached to some code that got deleted, but seems
63 * important enough to keep around...
64 *
65 * <begin detached comment>
60 * Some workarounds require millisecond delays and are run during interrupt 66 * Some workarounds require millisecond delays and are run during interrupt
61 * context. Most notably, when establishing link, the phy may need tweaking 67 * context. Most notably, when establishing link, the phy may need tweaking
62 * but cannot process phy register reads/writes faster than millisecond 68 * but cannot process phy register reads/writes faster than millisecond
63 * intervals...and we establish link due to a "link status change" interrupt. 69 * intervals...and we establish link due to a "link status change" interrupt.
70 * <end detached comment>
71 */
72
73/*
74 * atl1_ring_header represents a single, contiguous block of DMA space
75 * mapped for the three descriptor rings (tpd, rfd, rrd) and the two
76 * message blocks (cmb, smb) described below
64 */ 77 */
78struct atl1_ring_header {
79 void *desc; /* virtual address */
80 dma_addr_t dma; /* physical address*/
81 unsigned int size; /* length in bytes */
82};
65 83
66/* 84/*
67 * wrapper around a pointer to a socket buffer, 85 * atl1_buffer is wrapper around a pointer to a socket buffer
68 * so a DMA handle can be stored along with the buffer 86 * so a DMA handle can be stored along with the skb
69 */ 87 */
70struct atl1_buffer { 88struct atl1_buffer {
71 struct sk_buff *skb; 89 struct sk_buff *skb; /* socket buffer */
72 u16 length; 90 u16 length; /* rx buffer length */
73 u16 alloced; 91 u16 alloced; /* 1 if skb allocated */
74 dma_addr_t dma; 92 dma_addr_t dma;
75}; 93};
76 94
77#define MAX_TX_BUF_LEN 0x3000 /* 12KB */ 95/* transmit packet descriptor (tpd) ring */
78
79struct atl1_tpd_ring { 96struct atl1_tpd_ring {
80 void *desc; /* pointer to the descriptor ring memory */ 97 void *desc; /* descriptor ring virtual address */
81 dma_addr_t dma; /* physical adress of the descriptor ring */ 98 dma_addr_t dma; /* descriptor ring physical address */
82 u16 size; /* length of descriptor ring in bytes */ 99 u16 size; /* descriptor ring length in bytes */
83 u16 count; /* number of descriptors in the ring */ 100 u16 count; /* number of descriptors in the ring */
84 u16 hw_idx; /* hardware index */ 101 u16 hw_idx; /* hardware index */
85 atomic_t next_to_clean; 102 atomic_t next_to_clean;
@@ -87,36 +104,34 @@ struct atl1_tpd_ring {
87 struct atl1_buffer *buffer_info; 104 struct atl1_buffer *buffer_info;
88}; 105};
89 106
107/* receive free descriptor (rfd) ring */
90struct atl1_rfd_ring { 108struct atl1_rfd_ring {
91 void *desc; 109 void *desc; /* descriptor ring virtual address */
92 dma_addr_t dma; 110 dma_addr_t dma; /* descriptor ring physical address */
93 u16 size; 111 u16 size; /* descriptor ring length in bytes */
94 u16 count; 112 u16 count; /* number of descriptors in the ring */
95 atomic_t next_to_use; 113 atomic_t next_to_use;
96 u16 next_to_clean; 114 u16 next_to_clean;
97 struct atl1_buffer *buffer_info; 115 struct atl1_buffer *buffer_info;
98}; 116};
99 117
118/* receive return descriptor (rrd) ring */
100struct atl1_rrd_ring { 119struct atl1_rrd_ring {
101 void *desc; 120 void *desc; /* descriptor ring virtual address */
102 dma_addr_t dma; 121 dma_addr_t dma; /* descriptor ring physical address */
103 unsigned int size; 122 unsigned int size; /* descriptor ring length in bytes */
104 u16 count; 123 u16 count; /* number of descriptors in the ring */
105 u16 next_to_use; 124 u16 next_to_use;
106 atomic_t next_to_clean; 125 atomic_t next_to_clean;
107}; 126};
108 127
109struct atl1_ring_header { 128/* coalescing message block (cmb) */
110 void *desc; /* pointer to the descriptor ring memory */
111 dma_addr_t dma; /* physical adress of the descriptor ring */
112 unsigned int size; /* length of descriptor ring in bytes */
113};
114
115struct atl1_cmb { 129struct atl1_cmb {
116 struct coals_msg_block *cmb; 130 struct coals_msg_block *cmb;
117 dma_addr_t dma; 131 dma_addr_t dma;
118}; 132};
119 133
134/* statistics message block (smb) */
120struct atl1_smb { 135struct atl1_smb {
121 struct stats_msg_block *smb; 136 struct stats_msg_block *smb;
122 dma_addr_t dma; 137 dma_addr_t dma;
@@ -141,24 +156,26 @@ struct atl1_sft_stats {
141 u64 tx_aborted_errors; 156 u64 tx_aborted_errors;
142 u64 tx_window_errors; 157 u64 tx_window_errors;
143 u64 tx_carrier_errors; 158 u64 tx_carrier_errors;
144 159 u64 tx_pause; /* num pause packets transmitted. */
145 u64 tx_pause; /* num Pause packet transmitted. */ 160 u64 excecol; /* num tx packets w/ excessive collisions. */
146 u64 excecol; /* num tx packets aborted due to excessive collisions. */ 161 u64 deffer; /* num tx packets deferred */
147 u64 deffer; /* num deferred tx packets */ 162 u64 scc; /* num packets subsequently transmitted
148 u64 scc; /* num packets subsequently transmitted successfully w/ single prior collision. */ 163 * successfully w/ single prior collision. */
149 u64 mcc; /* num packets subsequently transmitted successfully w/ multiple prior collisions. */ 164 u64 mcc; /* num packets subsequently transmitted
165 * successfully w/ multiple prior collisions. */
150 u64 latecol; /* num tx packets w/ late collisions. */ 166 u64 latecol; /* num tx packets w/ late collisions. */
151 u64 tx_underun; /* num tx packets aborted due to transmit FIFO underrun, or TRD FIFO underrun */ 167 u64 tx_underun; /* num tx packets aborted due to transmit
152 u64 tx_trunc; /* num tx packets truncated due to size exceeding MTU, regardless whether truncated by Selene or not. (The name doesn't really reflect the meaning in this case.) */ 168 * FIFO underrun, or TRD FIFO underrun */
169 u64 tx_trunc; /* num tx packets truncated due to size
170 * exceeding MTU, regardless whether truncated
171 * by the chip or not. (The name doesn't really
172 * reflect the meaning in this case.) */
153 u64 rx_pause; /* num Pause packets received. */ 173 u64 rx_pause; /* num Pause packets received. */
154 u64 rx_rrd_ov; 174 u64 rx_rrd_ov;
155 u64 rx_trunc; 175 u64 rx_trunc;
156}; 176};
157 177
158/* board specific private data structure */ 178/* hardware structure */
159#define ATL1_REGS_LEN 8
160
161/* Structure containing variables used by the shared code */
162struct atl1_hw { 179struct atl1_hw {
163 u8 __iomem *hw_addr; 180 u8 __iomem *hw_addr;
164 struct atl1_adapter *back; 181 struct atl1_adapter *back;
@@ -167,24 +184,35 @@ struct atl1_hw {
167 enum atl1_dma_req_block dmar_block; 184 enum atl1_dma_req_block dmar_block;
168 enum atl1_dma_req_block dmaw_block; 185 enum atl1_dma_req_block dmaw_block;
169 u8 preamble_len; 186 u8 preamble_len;
170 u8 max_retry; /* Retransmission maximum, after which the packet will be discarded */ 187 u8 max_retry; /* Retransmission maximum, after which the
171 u8 jam_ipg; /* IPG to start JAM for collision based flow control in half-duplex mode. In units of 8-bit time */ 188 * packet will be discarded */
172 u8 ipgt; /* Desired back to back inter-packet gap. The default is 96-bit time */ 189 u8 jam_ipg; /* IPG to start JAM for collision based flow
173 u8 min_ifg; /* Minimum number of IFG to enforce in between RX frames. Frame gap below such IFP is dropped */ 190 * control in half-duplex mode. In units of
191 * 8-bit time */
192 u8 ipgt; /* Desired back to back inter-packet gap.
193 * The default is 96-bit time */
194 u8 min_ifg; /* Minimum number of IFG to enforce in between
195 * receive frames. Frame gap below such IFP
196 * is dropped */
174 u8 ipgr1; /* 64bit Carrier-Sense window */ 197 u8 ipgr1; /* 64bit Carrier-Sense window */
175 u8 ipgr2; /* 96-bit IPG window */ 198 u8 ipgr2; /* 96-bit IPG window */
176 u8 tpd_burst; /* Number of TPD to prefetch in cache-aligned burst. Each TPD is 16 bytes long */ 199 u8 tpd_burst; /* Number of TPD to prefetch in cache-aligned
177 u8 rfd_burst; /* Number of RFD to prefetch in cache-aligned burst. Each RFD is 12 bytes long */ 200 * burst. Each TPD is 16 bytes long */
201 u8 rfd_burst; /* Number of RFD to prefetch in cache-aligned
202 * burst. Each RFD is 12 bytes long */
178 u8 rfd_fetch_gap; 203 u8 rfd_fetch_gap;
179 u8 rrd_burst; /* Threshold number of RRDs that can be retired in a burst. Each RRD is 16 bytes long */ 204 u8 rrd_burst; /* Threshold number of RRDs that can be retired
205 * in a burst. Each RRD is 16 bytes long */
180 u8 tpd_fetch_th; 206 u8 tpd_fetch_th;
181 u8 tpd_fetch_gap; 207 u8 tpd_fetch_gap;
182 u16 tx_jumbo_task_th; 208 u16 tx_jumbo_task_th;
183 u16 txf_burst; /* Number of data bytes to read in a cache-aligned burst. Each SRAM entry is 209 u16 txf_burst; /* Number of data bytes to read in a cache-
184 8 bytes long */ 210 * aligned burst. Each SRAM entry is 8 bytes */
185 u16 rx_jumbo_th; /* Jumbo packet size for non-VLAN packet. VLAN packets should add 4 bytes */ 211 u16 rx_jumbo_th; /* Jumbo packet size for non-VLAN packet. VLAN
212 * packets should add 4 bytes */
186 u16 rx_jumbo_lkah; 213 u16 rx_jumbo_lkah;
187 u16 rrd_ret_timer; /* RRD retirement timer. Decrement by 1 after every 512ns passes. */ 214 u16 rrd_ret_timer; /* RRD retirement timer. Decrement by 1 after
215 * every 512ns passes. */
188 u16 lcol; /* Collision Window */ 216 u16 lcol; /* Collision Window */
189 217
190 u16 cmb_tpd; 218 u16 cmb_tpd;
@@ -194,20 +222,12 @@ struct atl1_hw {
194 u32 smb_timer; 222 u32 smb_timer;
195 u16 media_type; 223 u16 media_type;
196 u16 autoneg_advertised; 224 u16 autoneg_advertised;
197 u16 pci_cmd_word;
198 225
199 u16 mii_autoneg_adv_reg; 226 u16 mii_autoneg_adv_reg;
200 u16 mii_1000t_ctrl_reg; 227 u16 mii_1000t_ctrl_reg;
201 228
202 u32 mem_rang;
203 u32 txcw;
204 u32 max_frame_size; 229 u32 max_frame_size;
205 u32 min_frame_size; 230 u32 min_frame_size;
206 u32 mc_filter_type;
207 u32 num_mc_addrs;
208 u32 collision_delta;
209 u32 tx_packet_delta;
210 u16 phy_spd_default;
211 231
212 u16 dev_rev; 232 u16 dev_rev;
213 u8 revision_id; 233 u8 revision_id;
@@ -215,21 +235,17 @@ struct atl1_hw {
215 /* spi flash */ 235 /* spi flash */
216 u8 flash_vendor; 236 u8 flash_vendor;
217 237
218 u8 dma_fairness;
219 u8 mac_addr[ETH_ALEN]; 238 u8 mac_addr[ETH_ALEN];
220 u8 perm_mac_addr[ETH_ALEN]; 239 u8 perm_mac_addr[ETH_ALEN];
221 240
222 /* bool phy_preamble_sup; */
223 bool phy_configured; 241 bool phy_configured;
224}; 242};
225 243
226struct atl1_adapter { 244struct atl1_adapter {
227 /* OS defined structs */
228 struct net_device *netdev; 245 struct net_device *netdev;
229 struct pci_dev *pdev; 246 struct pci_dev *pdev;
230 struct net_device_stats net_stats; 247 struct net_device_stats net_stats;
231 struct atl1_sft_stats soft_stats; 248 struct atl1_sft_stats soft_stats;
232
233 struct vlan_group *vlgrp; 249 struct vlan_group *vlgrp;
234 u32 rx_buffer_len; 250 u32 rx_buffer_len;
235 u32 wol; 251 u32 wol;
@@ -243,9 +259,7 @@ struct atl1_adapter {
243 struct timer_list phy_config_timer; 259 struct timer_list phy_config_timer;
244 bool phy_timer_pending; 260 bool phy_timer_pending;
245 261
246 bool mac_disabled; 262 /* all descriptor rings' memory */
247
248 /* All descriptor rings' memory */
249 struct atl1_ring_header ring_header; 263 struct atl1_ring_header ring_header;
250 264
251 /* TX */ 265 /* TX */
@@ -258,25 +272,16 @@ struct atl1_adapter {
258 u64 hw_csum_err; 272 u64 hw_csum_err;
259 u64 hw_csum_good; 273 u64 hw_csum_good;
260 274
261 u32 gorcl; 275 u16 imt; /* interrupt moderator timer (2us resolution */
262 u64 gorcl_old; 276 u16 ict; /* interrupt clear timer (2us resolution */
263 277 struct mii_if_info mii; /* MII interface info */
264 /* Interrupt Moderator timer ( 2us resolution) */
265 u16 imt;
266 /* Interrupt Clear timer (2us resolution) */
267 u16 ict;
268
269 /* MII interface info */
270 struct mii_if_info mii;
271 278
272 /* structs defined in atl1_hw.h */ 279 /* structs defined in atl1_hw.h */
273 u32 bd_number; /* board number */ 280 u32 bd_number; /* board number */
274 bool pci_using_64; 281 bool pci_using_64;
275 struct atl1_hw hw; 282 struct atl1_hw hw;
276 struct atl1_smb smb; 283 struct atl1_smb smb;
277 struct atl1_cmb cmb; 284 struct atl1_cmb cmb;
278
279 u32 pci_state[16];
280}; 285};
281 286
282#endif /* _ATL1_H_ */ 287#endif /* _ATL1_H_ */
diff --git a/drivers/net/atl1/atl1_main.c b/drivers/net/atl1/atl1_main.c
index ef12dba85811..6c8cf986bee4 100644
--- a/drivers/net/atl1/atl1_main.c
+++ b/drivers/net/atl1/atl1_main.c
@@ -1344,21 +1344,21 @@ static void atl1_tx_map(struct atl1_adapter *adapter,
1344 1344
1345 if (first_buf_len > proto_hdr_len) { 1345 if (first_buf_len > proto_hdr_len) {
1346 len12 = first_buf_len - proto_hdr_len; 1346 len12 = first_buf_len - proto_hdr_len;
1347 m = (len12 + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN; 1347 m = (len12 + ATL1_MAX_TX_BUF_LEN - 1) / ATL1_MAX_TX_BUF_LEN;
1348 for (i = 0; i < m; i++) { 1348 for (i = 0; i < m; i++) {
1349 buffer_info = 1349 buffer_info =
1350 &tpd_ring->buffer_info[tpd_next_to_use]; 1350 &tpd_ring->buffer_info[tpd_next_to_use];
1351 buffer_info->skb = NULL; 1351 buffer_info->skb = NULL;
1352 buffer_info->length = 1352 buffer_info->length =
1353 (MAX_TX_BUF_LEN >= 1353 (ATL1_MAX_TX_BUF_LEN >=
1354 len12) ? MAX_TX_BUF_LEN : len12; 1354 len12) ? ATL1_MAX_TX_BUF_LEN : len12;
1355 len12 -= buffer_info->length; 1355 len12 -= buffer_info->length;
1356 page = virt_to_page(skb->data + 1356 page = virt_to_page(skb->data +
1357 (proto_hdr_len + 1357 (proto_hdr_len +
1358 i * MAX_TX_BUF_LEN)); 1358 i * ATL1_MAX_TX_BUF_LEN));
1359 offset = (unsigned long)(skb->data + 1359 offset = (unsigned long)(skb->data +
1360 (proto_hdr_len + 1360 (proto_hdr_len +
1361 i * MAX_TX_BUF_LEN)) & 1361 i * ATL1_MAX_TX_BUF_LEN)) &
1362 ~PAGE_MASK; 1362 ~PAGE_MASK;
1363 buffer_info->dma = 1363 buffer_info->dma =
1364 pci_map_page(adapter->pdev, page, offset, 1364 pci_map_page(adapter->pdev, page, offset,
@@ -1387,18 +1387,18 @@ static void atl1_tx_map(struct atl1_adapter *adapter,
1387 frag = &skb_shinfo(skb)->frags[f]; 1387 frag = &skb_shinfo(skb)->frags[f];
1388 lenf = frag->size; 1388 lenf = frag->size;
1389 1389
1390 m = (lenf + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN; 1390 m = (lenf + ATL1_MAX_TX_BUF_LEN - 1) / ATL1_MAX_TX_BUF_LEN;
1391 for (i = 0; i < m; i++) { 1391 for (i = 0; i < m; i++) {
1392 buffer_info = &tpd_ring->buffer_info[tpd_next_to_use]; 1392 buffer_info = &tpd_ring->buffer_info[tpd_next_to_use];
1393 if (unlikely(buffer_info->skb)) 1393 if (unlikely(buffer_info->skb))
1394 BUG(); 1394 BUG();
1395 buffer_info->skb = NULL; 1395 buffer_info->skb = NULL;
1396 buffer_info->length = 1396 buffer_info->length =
1397 (lenf > MAX_TX_BUF_LEN) ? MAX_TX_BUF_LEN : lenf; 1397 (lenf > ATL1_MAX_TX_BUF_LEN) ? ATL1_MAX_TX_BUF_LEN : lenf;
1398 lenf -= buffer_info->length; 1398 lenf -= buffer_info->length;
1399 buffer_info->dma = 1399 buffer_info->dma =
1400 pci_map_page(adapter->pdev, frag->page, 1400 pci_map_page(adapter->pdev, frag->page,
1401 frag->page_offset + i * MAX_TX_BUF_LEN, 1401 frag->page_offset + i * ATL1_MAX_TX_BUF_LEN,
1402 buffer_info->length, PCI_DMA_TODEVICE); 1402 buffer_info->length, PCI_DMA_TODEVICE);
1403 1403
1404 if (++tpd_next_to_use == tpd_ring->count) 1404 if (++tpd_next_to_use == tpd_ring->count)
@@ -1516,7 +1516,7 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1516 frag_size = skb_shinfo(skb)->frags[f].size; 1516 frag_size = skb_shinfo(skb)->frags[f].size;
1517 if (frag_size) 1517 if (frag_size)
1518 count += 1518 count +=
1519 (frag_size + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN; 1519 (frag_size + ATL1_MAX_TX_BUF_LEN - 1) / ATL1_MAX_TX_BUF_LEN;
1520 } 1520 }
1521 1521
1522 /* mss will be nonzero if we're doing segment offload (TSO/GSO) */ 1522 /* mss will be nonzero if we're doing segment offload (TSO/GSO) */
@@ -1532,7 +1532,7 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1532 /* need additional TPD ? */ 1532 /* need additional TPD ? */
1533 if (proto_hdr_len != len) 1533 if (proto_hdr_len != len)
1534 count += (len - proto_hdr_len + 1534 count += (len - proto_hdr_len +
1535 MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN; 1535 ATL1_MAX_TX_BUF_LEN - 1) / ATL1_MAX_TX_BUF_LEN;
1536 } 1536 }
1537 } 1537 }
1538 1538