aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorAl Viro <viro@ftp.linux.org.uk>2007-12-22 13:55:49 -0500
committerDavid S. Miller <davem@davemloft.net>2008-01-28 18:07:59 -0500
commit3dd5f1d422d8a736178a760562c98d4169b33a23 (patch)
treefb484782158edf65d16c3a8aedeec9e88395364c /drivers/net
parent904584018e9ba30a3e562d86ee7dfb6239105664 (diff)
eepro100 annotations
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/eepro100.c90
1 files changed, 42 insertions, 48 deletions
diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c
index 3003e9276976..e3e26c595fa3 100644
--- a/drivers/net/eepro100.c
+++ b/drivers/net/eepro100.c
@@ -304,13 +304,7 @@ enum commands {
304#if defined(__alpha__) 304#if defined(__alpha__)
305# define clear_suspend(cmd) clear_bit(30, &(cmd)->cmd_status); 305# define clear_suspend(cmd) clear_bit(30, &(cmd)->cmd_status);
306#else 306#else
307# if defined(__LITTLE_ENDIAN) 307# define clear_suspend(cmd) ((__le16 *)&(cmd)->cmd_status)[1] &= ~cpu_to_le16(1<<14)
308# define clear_suspend(cmd) ((__u16 *)&(cmd)->cmd_status)[1] &= ~0x4000
309# elif defined(__BIG_ENDIAN)
310# define clear_suspend(cmd) ((__u16 *)&(cmd)->cmd_status)[1] &= ~0x0040
311# else
312# error Unsupported byteorder
313# endif
314#endif 308#endif
315 309
316enum SCBCmdBits { 310enum SCBCmdBits {
@@ -331,17 +325,17 @@ enum SCBPort_cmds {
331 325
332/* The Speedo3 Rx and Tx frame/buffer descriptors. */ 326/* The Speedo3 Rx and Tx frame/buffer descriptors. */
333struct descriptor { /* A generic descriptor. */ 327struct descriptor { /* A generic descriptor. */
334 volatile s32 cmd_status; /* All command and status fields. */ 328 volatile __le32 cmd_status; /* All command and status fields. */
335 u32 link; /* struct descriptor * */ 329 __le32 link; /* struct descriptor * */
336 unsigned char params[0]; 330 unsigned char params[0];
337}; 331};
338 332
339/* The Speedo3 Rx and Tx buffer descriptors. */ 333/* The Speedo3 Rx and Tx buffer descriptors. */
340struct RxFD { /* Receive frame descriptor. */ 334struct RxFD { /* Receive frame descriptor. */
341 volatile s32 status; 335 volatile __le32 status;
342 u32 link; /* struct RxFD * */ 336 __le32 link; /* struct RxFD * */
343 u32 rx_buf_addr; /* void * */ 337 __le32 rx_buf_addr; /* void * */
344 u32 count; 338 __le32 count;
345} RxFD_ALIGNMENT; 339} RxFD_ALIGNMENT;
346 340
347/* Selected elements of the Tx/RxFD.status word. */ 341/* Selected elements of the Tx/RxFD.status word. */
@@ -354,16 +348,16 @@ enum RxFD_bits {
354 348
355#define CONFIG_DATA_SIZE 22 349#define CONFIG_DATA_SIZE 22
356struct TxFD { /* Transmit frame descriptor set. */ 350struct TxFD { /* Transmit frame descriptor set. */
357 s32 status; 351 __le32 status;
358 u32 link; /* void * */ 352 __le32 link; /* void * */
359 u32 tx_desc_addr; /* Always points to the tx_buf_addr element. */ 353 __le32 tx_desc_addr; /* Always points to the tx_buf_addr element. */
360 s32 count; /* # of TBD (=1), Tx start thresh., etc. */ 354 __le32 count; /* # of TBD (=1), Tx start thresh., etc. */
361 /* This constitutes two "TBD" entries -- we only use one. */ 355 /* This constitutes two "TBD" entries -- we only use one. */
362#define TX_DESCR_BUF_OFFSET 16 356#define TX_DESCR_BUF_OFFSET 16
363 u32 tx_buf_addr0; /* void *, frame to be transmitted. */ 357 __le32 tx_buf_addr0; /* void *, frame to be transmitted. */
364 s32 tx_buf_size0; /* Length of Tx frame. */ 358 __le32 tx_buf_size0; /* Length of Tx frame. */
365 u32 tx_buf_addr1; /* void *, frame to be transmitted. */ 359 __le32 tx_buf_addr1; /* void *, frame to be transmitted. */
366 s32 tx_buf_size1; /* Length of Tx frame. */ 360 __le32 tx_buf_size1; /* Length of Tx frame. */
367 /* the structure must have space for at least CONFIG_DATA_SIZE starting 361 /* the structure must have space for at least CONFIG_DATA_SIZE starting
368 * from tx_desc_addr field */ 362 * from tx_desc_addr field */
369}; 363};
@@ -379,23 +373,23 @@ struct speedo_mc_block {
379 373
380/* Elements of the dump_statistics block. This block must be lword aligned. */ 374/* Elements of the dump_statistics block. This block must be lword aligned. */
381struct speedo_stats { 375struct speedo_stats {
382 u32 tx_good_frames; 376 __le32 tx_good_frames;
383 u32 tx_coll16_errs; 377 __le32 tx_coll16_errs;
384 u32 tx_late_colls; 378 __le32 tx_late_colls;
385 u32 tx_underruns; 379 __le32 tx_underruns;
386 u32 tx_lost_carrier; 380 __le32 tx_lost_carrier;
387 u32 tx_deferred; 381 __le32 tx_deferred;
388 u32 tx_one_colls; 382 __le32 tx_one_colls;
389 u32 tx_multi_colls; 383 __le32 tx_multi_colls;
390 u32 tx_total_colls; 384 __le32 tx_total_colls;
391 u32 rx_good_frames; 385 __le32 rx_good_frames;
392 u32 rx_crc_errs; 386 __le32 rx_crc_errs;
393 u32 rx_align_errs; 387 __le32 rx_align_errs;
394 u32 rx_resource_errs; 388 __le32 rx_resource_errs;
395 u32 rx_overrun_errs; 389 __le32 rx_overrun_errs;
396 u32 rx_colls_errs; 390 __le32 rx_colls_errs;
397 u32 rx_runt_errs; 391 __le32 rx_runt_errs;
398 u32 done_marker; 392 __le32 done_marker;
399}; 393};
400 394
401enum Rx_ring_state_bits { 395enum Rx_ring_state_bits {
@@ -1139,7 +1133,7 @@ speedo_rx_soft_reset(struct net_device *dev)
1139 1133
1140 rfd = sp->rx_ringp[sp->cur_rx % RX_RING_SIZE]; 1134 rfd = sp->rx_ringp[sp->cur_rx % RX_RING_SIZE];
1141 1135
1142 rfd->rx_buf_addr = 0xffffffff; 1136 rfd->rx_buf_addr = cpu_to_le32(0xffffffff);
1143 1137
1144 if (wait_for_cmd_done(dev, sp) != 0) { 1138 if (wait_for_cmd_done(dev, sp) != 0) {
1145 printk("%s: RxAbort command stalled\n", dev->name); 1139 printk("%s: RxAbort command stalled\n", dev->name);
@@ -1275,7 +1269,7 @@ speedo_init_rx_ring(struct net_device *dev)
1275 rxf->status = cpu_to_le32(0x00000001); /* '1' is flag value only. */ 1269 rxf->status = cpu_to_le32(0x00000001); /* '1' is flag value only. */
1276 rxf->link = 0; /* None yet. */ 1270 rxf->link = 0; /* None yet. */
1277 /* This field unused by i82557. */ 1271 /* This field unused by i82557. */
1278 rxf->rx_buf_addr = 0xffffffff; 1272 rxf->rx_buf_addr = cpu_to_le32(0xffffffff);
1279 rxf->count = cpu_to_le32(PKT_BUF_SZ << 16); 1273 rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
1280 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[i], 1274 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[i],
1281 sizeof(struct RxFD), PCI_DMA_TODEVICE); 1275 sizeof(struct RxFD), PCI_DMA_TODEVICE);
@@ -1657,7 +1651,7 @@ static inline struct RxFD *speedo_rx_alloc(struct net_device *dev, int entry)
1657 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE); 1651 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1658 skb->dev = dev; 1652 skb->dev = dev;
1659 skb_reserve(skb, sizeof(struct RxFD)); 1653 skb_reserve(skb, sizeof(struct RxFD));
1660 rxf->rx_buf_addr = 0xffffffff; 1654 rxf->rx_buf_addr = cpu_to_le32(0xffffffff);
1661 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry], 1655 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry],
1662 sizeof(struct RxFD), PCI_DMA_TODEVICE); 1656 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1663 return rxf; 1657 return rxf;
@@ -1933,7 +1927,7 @@ speedo_get_stats(struct net_device *dev)
1933 void __iomem *ioaddr = sp->regs; 1927 void __iomem *ioaddr = sp->regs;
1934 1928
1935 /* Update only if the previous dump finished. */ 1929 /* Update only if the previous dump finished. */
1936 if (sp->lstats->done_marker == le32_to_cpu(0xA007)) { 1930 if (sp->lstats->done_marker == cpu_to_le32(0xA007)) {
1937 sp->stats.tx_aborted_errors += le32_to_cpu(sp->lstats->tx_coll16_errs); 1931 sp->stats.tx_aborted_errors += le32_to_cpu(sp->lstats->tx_coll16_errs);
1938 sp->stats.tx_window_errors += le32_to_cpu(sp->lstats->tx_late_colls); 1932 sp->stats.tx_window_errors += le32_to_cpu(sp->lstats->tx_late_colls);
1939 sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats->tx_underruns); 1933 sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats->tx_underruns);
@@ -2142,7 +2136,7 @@ static void set_rx_mode(struct net_device *dev)
2142 /* The simple case of 0-3 multicast list entries occurs often, and 2136 /* The simple case of 0-3 multicast list entries occurs often, and
2143 fits within one tx_ring[] entry. */ 2137 fits within one tx_ring[] entry. */
2144 struct dev_mc_list *mclist; 2138 struct dev_mc_list *mclist;
2145 u16 *setup_params, *eaddrs; 2139 __le16 *setup_params, *eaddrs;
2146 2140
2147 spin_lock_irqsave(&sp->lock, flags); 2141 spin_lock_irqsave(&sp->lock, flags);
2148 entry = sp->cur_tx++ % TX_RING_SIZE; 2142 entry = sp->cur_tx++ % TX_RING_SIZE;
@@ -2154,12 +2148,12 @@ static void set_rx_mode(struct net_device *dev)
2154 sp->tx_ring[entry].link = 2148 sp->tx_ring[entry].link =
2155 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE)); 2149 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2156 sp->tx_ring[entry].tx_desc_addr = 0; /* Really MC list count. */ 2150 sp->tx_ring[entry].tx_desc_addr = 0; /* Really MC list count. */
2157 setup_params = (u16 *)&sp->tx_ring[entry].tx_desc_addr; 2151 setup_params = (__le16 *)&sp->tx_ring[entry].tx_desc_addr;
2158 *setup_params++ = cpu_to_le16(dev->mc_count*6); 2152 *setup_params++ = cpu_to_le16(dev->mc_count*6);
2159 /* Fill in the multicast addresses. */ 2153 /* Fill in the multicast addresses. */
2160 for (i = 0, mclist = dev->mc_list; i < dev->mc_count; 2154 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
2161 i++, mclist = mclist->next) { 2155 i++, mclist = mclist->next) {
2162 eaddrs = (u16 *)mclist->dmi_addr; 2156 eaddrs = (__le16 *)mclist->dmi_addr;
2163 *setup_params++ = *eaddrs++; 2157 *setup_params++ = *eaddrs++;
2164 *setup_params++ = *eaddrs++; 2158 *setup_params++ = *eaddrs++;
2165 *setup_params++ = *eaddrs++; 2159 *setup_params++ = *eaddrs++;
@@ -2177,7 +2171,7 @@ static void set_rx_mode(struct net_device *dev)
2177 spin_unlock_irqrestore(&sp->lock, flags); 2171 spin_unlock_irqrestore(&sp->lock, flags);
2178 } else if (new_rx_mode == 0) { 2172 } else if (new_rx_mode == 0) {
2179 struct dev_mc_list *mclist; 2173 struct dev_mc_list *mclist;
2180 u16 *setup_params, *eaddrs; 2174 __le16 *setup_params, *eaddrs;
2181 struct speedo_mc_block *mc_blk; 2175 struct speedo_mc_block *mc_blk;
2182 struct descriptor *mc_setup_frm; 2176 struct descriptor *mc_setup_frm;
2183 int i; 2177 int i;
@@ -2204,12 +2198,12 @@ static void set_rx_mode(struct net_device *dev)
2204 mc_setup_frm->cmd_status = 2198 mc_setup_frm->cmd_status =
2205 cpu_to_le32(CmdSuspend | CmdIntr | CmdMulticastList); 2199 cpu_to_le32(CmdSuspend | CmdIntr | CmdMulticastList);
2206 /* Link set below. */ 2200 /* Link set below. */
2207 setup_params = (u16 *)&mc_setup_frm->params; 2201 setup_params = (__le16 *)&mc_setup_frm->params;
2208 *setup_params++ = cpu_to_le16(dev->mc_count*6); 2202 *setup_params++ = cpu_to_le16(dev->mc_count*6);
2209 /* Fill in the multicast addresses. */ 2203 /* Fill in the multicast addresses. */
2210 for (i = 0, mclist = dev->mc_list; i < dev->mc_count; 2204 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
2211 i++, mclist = mclist->next) { 2205 i++, mclist = mclist->next) {
2212 eaddrs = (u16 *)mclist->dmi_addr; 2206 eaddrs = (__le16 *)mclist->dmi_addr;
2213 *setup_params++ = *eaddrs++; 2207 *setup_params++ = *eaddrs++;
2214 *setup_params++ = *eaddrs++; 2208 *setup_params++ = *eaddrs++;
2215 *setup_params++ = *eaddrs++; 2209 *setup_params++ = *eaddrs++;