diff options
author | David S. Miller <davem@davemloft.net> | 2008-04-17 17:13:13 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-04-17 17:13:13 -0400 |
commit | 2e5a3eaca386ce026f240c7b21e5c4958fcea946 (patch) | |
tree | 191cf2b340d008b711137ce8c40b27a3dadff8d5 | |
parent | 8c95b4773dd8d0415269ffad7301ef96d75be8ee (diff) | |
parent | 36b30ea940bb88d88c90698e0e3d97a805ab5856 (diff) |
Merge branch 'upstream-net26' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6
70 files changed, 3375 insertions, 3929 deletions
diff --git a/arch/powerpc/platforms/82xx/ep8248e.c b/arch/powerpc/platforms/82xx/ep8248e.c index ba93d8ae9b0c..d5770fdf7f09 100644 --- a/arch/powerpc/platforms/82xx/ep8248e.c +++ b/arch/powerpc/platforms/82xx/ep8248e.c | |||
@@ -138,7 +138,7 @@ static int __devinit ep8248e_mdio_probe(struct of_device *ofdev, | |||
138 | 138 | ||
139 | bus->name = "ep8248e-mdio-bitbang"; | 139 | bus->name = "ep8248e-mdio-bitbang"; |
140 | bus->dev = &ofdev->dev; | 140 | bus->dev = &ofdev->dev; |
141 | bus->id = res.start; | 141 | snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start); |
142 | 142 | ||
143 | return mdiobus_register(bus); | 143 | return mdiobus_register(bus); |
144 | } | 144 | } |
diff --git a/arch/powerpc/platforms/pasemi/gpio_mdio.c b/arch/powerpc/platforms/pasemi/gpio_mdio.c index b46542990cf8..ab6955412ba4 100644 --- a/arch/powerpc/platforms/pasemi/gpio_mdio.c +++ b/arch/powerpc/platforms/pasemi/gpio_mdio.c | |||
@@ -241,7 +241,7 @@ static int __devinit gpio_mdio_probe(struct of_device *ofdev, | |||
241 | new_bus->reset = &gpio_mdio_reset; | 241 | new_bus->reset = &gpio_mdio_reset; |
242 | 242 | ||
243 | prop = of_get_property(np, "reg", NULL); | 243 | prop = of_get_property(np, "reg", NULL); |
244 | new_bus->id = *prop; | 244 | snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", *prop); |
245 | new_bus->priv = priv; | 245 | new_bus->priv = priv; |
246 | 246 | ||
247 | new_bus->phy_mask = 0; | 247 | new_bus->phy_mask = 0; |
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c index 2c5388ce902a..3581416905ea 100644 --- a/arch/powerpc/sysdev/fsl_soc.c +++ b/arch/powerpc/sysdev/fsl_soc.c | |||
@@ -341,7 +341,7 @@ static int __init gfar_of_init(void) | |||
341 | goto unreg; | 341 | goto unreg; |
342 | } | 342 | } |
343 | 343 | ||
344 | gfar_data.bus_id = 0; | 344 | snprintf(gfar_data.bus_id, MII_BUS_ID_SIZE, "0"); |
345 | gfar_data.phy_id = fixed_link[0]; | 345 | gfar_data.phy_id = fixed_link[0]; |
346 | } else { | 346 | } else { |
347 | phy = of_find_node_by_phandle(*ph); | 347 | phy = of_find_node_by_phandle(*ph); |
@@ -362,7 +362,8 @@ static int __init gfar_of_init(void) | |||
362 | } | 362 | } |
363 | 363 | ||
364 | gfar_data.phy_id = *id; | 364 | gfar_data.phy_id = *id; |
365 | gfar_data.bus_id = res.start; | 365 | snprintf(gfar_data.bus_id, MII_BUS_ID_SIZE, "%x", |
366 | res.start); | ||
366 | 367 | ||
367 | of_node_put(phy); | 368 | of_node_put(phy); |
368 | of_node_put(mdio); | 369 | of_node_put(mdio); |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index da30a31e66f9..45c3a208d93f 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -1438,7 +1438,7 @@ config CS89x0 | |||
1438 | config TC35815 | 1438 | config TC35815 |
1439 | tristate "TOSHIBA TC35815 Ethernet support" | 1439 | tristate "TOSHIBA TC35815 Ethernet support" |
1440 | depends on NET_PCI && PCI && MIPS | 1440 | depends on NET_PCI && PCI && MIPS |
1441 | select MII | 1441 | select PHYLIB |
1442 | 1442 | ||
1443 | config EEPRO100 | 1443 | config EEPRO100 |
1444 | tristate "EtherExpressPro/100 support (eepro100, original Becker driver)" | 1444 | tristate "EtherExpressPro/100 support (eepro100, original Becker driver)" |
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c index 504b7ce2747d..3634b5fd7919 100644 --- a/drivers/net/au1000_eth.c +++ b/drivers/net/au1000_eth.c | |||
@@ -701,7 +701,7 @@ static struct net_device * au1000_probe(int port_num) | |||
701 | aup->mii_bus.write = mdiobus_write; | 701 | aup->mii_bus.write = mdiobus_write; |
702 | aup->mii_bus.reset = mdiobus_reset; | 702 | aup->mii_bus.reset = mdiobus_reset; |
703 | aup->mii_bus.name = "au1000_eth_mii"; | 703 | aup->mii_bus.name = "au1000_eth_mii"; |
704 | aup->mii_bus.id = aup->mac_id; | 704 | snprintf(aup->mii_bus.id, MII_BUS_ID_SIZE, "%x", aup->mac_id); |
705 | aup->mii_bus.irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); | 705 | aup->mii_bus.irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); |
706 | for(i = 0; i < PHY_MAX_ADDR; ++i) | 706 | for(i = 0; i < PHY_MAX_ADDR; ++i) |
707 | aup->mii_bus.irq[i] = PHY_POLL; | 707 | aup->mii_bus.irq[i] = PHY_POLL; |
@@ -709,11 +709,11 @@ static struct net_device * au1000_probe(int port_num) | |||
709 | /* if known, set corresponding PHY IRQs */ | 709 | /* if known, set corresponding PHY IRQs */ |
710 | #if defined(AU1XXX_PHY_STATIC_CONFIG) | 710 | #if defined(AU1XXX_PHY_STATIC_CONFIG) |
711 | # if defined(AU1XXX_PHY0_IRQ) | 711 | # if defined(AU1XXX_PHY0_IRQ) |
712 | if (AU1XXX_PHY0_BUSID == aup->mii_bus.id) | 712 | if (AU1XXX_PHY0_BUSID == aup->mac_id) |
713 | aup->mii_bus.irq[AU1XXX_PHY0_ADDR] = AU1XXX_PHY0_IRQ; | 713 | aup->mii_bus.irq[AU1XXX_PHY0_ADDR] = AU1XXX_PHY0_IRQ; |
714 | # endif | 714 | # endif |
715 | # if defined(AU1XXX_PHY1_IRQ) | 715 | # if defined(AU1XXX_PHY1_IRQ) |
716 | if (AU1XXX_PHY1_BUSID == aup->mii_bus.id) | 716 | if (AU1XXX_PHY1_BUSID == aup->mac_id) |
717 | aup->mii_bus.irq[AU1XXX_PHY1_ADDR] = AU1XXX_PHY1_IRQ; | 717 | aup->mii_bus.irq[AU1XXX_PHY1_ADDR] = AU1XXX_PHY1_IRQ; |
718 | # endif | 718 | # endif |
719 | #endif | 719 | #endif |
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c index 26b2dd5016cd..717dcc1aa1e9 100644 --- a/drivers/net/bfin_mac.c +++ b/drivers/net/bfin_mac.c | |||
@@ -969,7 +969,7 @@ static int __init bf537mac_probe(struct net_device *dev) | |||
969 | lp->mii_bus.write = mdiobus_write; | 969 | lp->mii_bus.write = mdiobus_write; |
970 | lp->mii_bus.reset = mdiobus_reset; | 970 | lp->mii_bus.reset = mdiobus_reset; |
971 | lp->mii_bus.name = "bfin_mac_mdio"; | 971 | lp->mii_bus.name = "bfin_mac_mdio"; |
972 | lp->mii_bus.id = 0; | 972 | snprintf(lp->mii_bus.id, MII_BUS_ID_SIZE, "0"); |
973 | lp->mii_bus.irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); | 973 | lp->mii_bus.irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); |
974 | for (i = 0; i < PHY_MAX_ADDR; ++i) | 974 | for (i = 0; i < PHY_MAX_ADDR; ++i) |
975 | lp->mii_bus.irq[i] = PHY_POLL; | 975 | lp->mii_bus.irq[i] = PHY_POLL; |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index ecfaf1460b1a..6e91b4b7aabb 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -3065,8 +3065,6 @@ out: | |||
3065 | 3065 | ||
3066 | #ifdef CONFIG_PROC_FS | 3066 | #ifdef CONFIG_PROC_FS |
3067 | 3067 | ||
3068 | #define SEQ_START_TOKEN ((void *)1) | ||
3069 | |||
3070 | static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos) | 3068 | static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos) |
3071 | { | 3069 | { |
3072 | struct bonding *bond = seq->private; | 3070 | struct bonding *bond = seq->private; |
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c index c85194f2cd2d..9da7ff437031 100644 --- a/drivers/net/cpmac.c +++ b/drivers/net/cpmac.c | |||
@@ -987,7 +987,7 @@ static int external_switch; | |||
987 | static int __devinit cpmac_probe(struct platform_device *pdev) | 987 | static int __devinit cpmac_probe(struct platform_device *pdev) |
988 | { | 988 | { |
989 | int rc, phy_id, i; | 989 | int rc, phy_id, i; |
990 | int mdio_bus_id = cpmac_mii.id; | 990 | char *mdio_bus_id = "0"; |
991 | struct resource *mem; | 991 | struct resource *mem; |
992 | struct cpmac_priv *priv; | 992 | struct cpmac_priv *priv; |
993 | struct net_device *dev; | 993 | struct net_device *dev; |
@@ -1008,8 +1008,6 @@ static int __devinit cpmac_probe(struct platform_device *pdev) | |||
1008 | if (external_switch || dumb_switch) { | 1008 | if (external_switch || dumb_switch) { |
1009 | struct fixed_phy_status status = {}; | 1009 | struct fixed_phy_status status = {}; |
1010 | 1010 | ||
1011 | mdio_bus_id = 0; | ||
1012 | |||
1013 | /* | 1011 | /* |
1014 | * FIXME: this should be in the platform code! | 1012 | * FIXME: this should be in the platform code! |
1015 | * Since there is not platform code at all (that is, | 1013 | * Since there is not platform code at all (that is, |
@@ -1143,6 +1141,7 @@ int __devinit cpmac_init(void) | |||
1143 | } | 1141 | } |
1144 | 1142 | ||
1145 | cpmac_mii.phy_mask = ~(mask | 0x80000000); | 1143 | cpmac_mii.phy_mask = ~(mask | 0x80000000); |
1144 | snprintf(cpmac_mii.id, MII_BUS_ID_SIZE, "0"); | ||
1146 | 1145 | ||
1147 | res = mdiobus_register(&cpmac_mii); | 1146 | res = mdiobus_register(&cpmac_mii); |
1148 | if (res) | 1147 | if (res) |
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index fd2e05bbb903..05e5f59e87fa 100644 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c | |||
@@ -1014,8 +1014,8 @@ static int offload_open(struct net_device *dev) | |||
1014 | adapter->port[0]->mtu : 0xffff); | 1014 | adapter->port[0]->mtu : 0xffff); |
1015 | init_smt(adapter); | 1015 | init_smt(adapter); |
1016 | 1016 | ||
1017 | /* Never mind if the next step fails */ | 1017 | if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group)) |
1018 | sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group); | 1018 | dev_dbg(&dev->dev, "cannot create sysfs group\n"); |
1019 | 1019 | ||
1020 | /* Call back all registered clients */ | 1020 | /* Call back all registered clients */ |
1021 | cxgb3_add_clients(tdev); | 1021 | cxgb3_add_clients(tdev); |
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h index a05aa51ecfa6..31feae1ea390 100644 --- a/drivers/net/e1000/e1000.h +++ b/drivers/net/e1000/e1000.h | |||
@@ -161,13 +161,13 @@ struct e1000_buffer { | |||
161 | struct sk_buff *skb; | 161 | struct sk_buff *skb; |
162 | dma_addr_t dma; | 162 | dma_addr_t dma; |
163 | unsigned long time_stamp; | 163 | unsigned long time_stamp; |
164 | uint16_t length; | 164 | u16 length; |
165 | uint16_t next_to_watch; | 165 | u16 next_to_watch; |
166 | }; | 166 | }; |
167 | 167 | ||
168 | 168 | ||
169 | struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; }; | 169 | struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; }; |
170 | struct e1000_ps_page_dma { uint64_t ps_page_dma[PS_PAGE_BUFFERS]; }; | 170 | struct e1000_ps_page_dma { u64 ps_page_dma[PS_PAGE_BUFFERS]; }; |
171 | 171 | ||
172 | struct e1000_tx_ring { | 172 | struct e1000_tx_ring { |
173 | /* pointer to the descriptor ring memory */ | 173 | /* pointer to the descriptor ring memory */ |
@@ -186,8 +186,8 @@ struct e1000_tx_ring { | |||
186 | struct e1000_buffer *buffer_info; | 186 | struct e1000_buffer *buffer_info; |
187 | 187 | ||
188 | spinlock_t tx_lock; | 188 | spinlock_t tx_lock; |
189 | uint16_t tdh; | 189 | u16 tdh; |
190 | uint16_t tdt; | 190 | u16 tdt; |
191 | bool last_tx_tso; | 191 | bool last_tx_tso; |
192 | }; | 192 | }; |
193 | 193 | ||
@@ -213,8 +213,8 @@ struct e1000_rx_ring { | |||
213 | /* cpu for rx queue */ | 213 | /* cpu for rx queue */ |
214 | int cpu; | 214 | int cpu; |
215 | 215 | ||
216 | uint16_t rdh; | 216 | u16 rdh; |
217 | uint16_t rdt; | 217 | u16 rdt; |
218 | }; | 218 | }; |
219 | 219 | ||
220 | #define E1000_DESC_UNUSED(R) \ | 220 | #define E1000_DESC_UNUSED(R) \ |
@@ -237,14 +237,14 @@ struct e1000_adapter { | |||
237 | struct timer_list watchdog_timer; | 237 | struct timer_list watchdog_timer; |
238 | struct timer_list phy_info_timer; | 238 | struct timer_list phy_info_timer; |
239 | struct vlan_group *vlgrp; | 239 | struct vlan_group *vlgrp; |
240 | uint16_t mng_vlan_id; | 240 | u16 mng_vlan_id; |
241 | uint32_t bd_number; | 241 | u32 bd_number; |
242 | uint32_t rx_buffer_len; | 242 | u32 rx_buffer_len; |
243 | uint32_t wol; | 243 | u32 wol; |
244 | uint32_t smartspeed; | 244 | u32 smartspeed; |
245 | uint32_t en_mng_pt; | 245 | u32 en_mng_pt; |
246 | uint16_t link_speed; | 246 | u16 link_speed; |
247 | uint16_t link_duplex; | 247 | u16 link_duplex; |
248 | spinlock_t stats_lock; | 248 | spinlock_t stats_lock; |
249 | #ifdef CONFIG_E1000_NAPI | 249 | #ifdef CONFIG_E1000_NAPI |
250 | spinlock_t tx_queue_lock; | 250 | spinlock_t tx_queue_lock; |
@@ -254,13 +254,13 @@ struct e1000_adapter { | |||
254 | unsigned int total_rx_bytes; | 254 | unsigned int total_rx_bytes; |
255 | unsigned int total_rx_packets; | 255 | unsigned int total_rx_packets; |
256 | /* Interrupt Throttle Rate */ | 256 | /* Interrupt Throttle Rate */ |
257 | uint32_t itr; | 257 | u32 itr; |
258 | uint32_t itr_setting; | 258 | u32 itr_setting; |
259 | uint16_t tx_itr; | 259 | u16 tx_itr; |
260 | uint16_t rx_itr; | 260 | u16 rx_itr; |
261 | 261 | ||
262 | struct work_struct reset_task; | 262 | struct work_struct reset_task; |
263 | uint8_t fc_autoneg; | 263 | u8 fc_autoneg; |
264 | 264 | ||
265 | struct timer_list blink_timer; | 265 | struct timer_list blink_timer; |
266 | unsigned long led_status; | 266 | unsigned long led_status; |
@@ -269,18 +269,18 @@ struct e1000_adapter { | |||
269 | struct e1000_tx_ring *tx_ring; /* One per active queue */ | 269 | struct e1000_tx_ring *tx_ring; /* One per active queue */ |
270 | unsigned int restart_queue; | 270 | unsigned int restart_queue; |
271 | unsigned long tx_queue_len; | 271 | unsigned long tx_queue_len; |
272 | uint32_t txd_cmd; | 272 | u32 txd_cmd; |
273 | uint32_t tx_int_delay; | 273 | u32 tx_int_delay; |
274 | uint32_t tx_abs_int_delay; | 274 | u32 tx_abs_int_delay; |
275 | uint32_t gotcl; | 275 | u32 gotcl; |
276 | uint64_t gotcl_old; | 276 | u64 gotcl_old; |
277 | uint64_t tpt_old; | 277 | u64 tpt_old; |
278 | uint64_t colc_old; | 278 | u64 colc_old; |
279 | uint32_t tx_timeout_count; | 279 | u32 tx_timeout_count; |
280 | uint32_t tx_fifo_head; | 280 | u32 tx_fifo_head; |
281 | uint32_t tx_head_addr; | 281 | u32 tx_head_addr; |
282 | uint32_t tx_fifo_size; | 282 | u32 tx_fifo_size; |
283 | uint8_t tx_timeout_factor; | 283 | u8 tx_timeout_factor; |
284 | atomic_t tx_fifo_stall; | 284 | atomic_t tx_fifo_stall; |
285 | bool pcix_82544; | 285 | bool pcix_82544; |
286 | bool detect_tx_hung; | 286 | bool detect_tx_hung; |
@@ -305,17 +305,17 @@ struct e1000_adapter { | |||
305 | int num_tx_queues; | 305 | int num_tx_queues; |
306 | int num_rx_queues; | 306 | int num_rx_queues; |
307 | 307 | ||
308 | uint64_t hw_csum_err; | 308 | u64 hw_csum_err; |
309 | uint64_t hw_csum_good; | 309 | u64 hw_csum_good; |
310 | uint64_t rx_hdr_split; | 310 | u64 rx_hdr_split; |
311 | uint32_t alloc_rx_buff_failed; | 311 | u32 alloc_rx_buff_failed; |
312 | uint32_t rx_int_delay; | 312 | u32 rx_int_delay; |
313 | uint32_t rx_abs_int_delay; | 313 | u32 rx_abs_int_delay; |
314 | bool rx_csum; | 314 | bool rx_csum; |
315 | unsigned int rx_ps_pages; | 315 | unsigned int rx_ps_pages; |
316 | uint32_t gorcl; | 316 | u32 gorcl; |
317 | uint64_t gorcl_old; | 317 | u64 gorcl_old; |
318 | uint16_t rx_ps_bsize0; | 318 | u16 rx_ps_bsize0; |
319 | 319 | ||
320 | 320 | ||
321 | /* OS defined structs */ | 321 | /* OS defined structs */ |
@@ -329,7 +329,7 @@ struct e1000_adapter { | |||
329 | struct e1000_phy_info phy_info; | 329 | struct e1000_phy_info phy_info; |
330 | struct e1000_phy_stats phy_stats; | 330 | struct e1000_phy_stats phy_stats; |
331 | 331 | ||
332 | uint32_t test_icr; | 332 | u32 test_icr; |
333 | struct e1000_tx_ring test_tx_ring; | 333 | struct e1000_tx_ring test_tx_ring; |
334 | struct e1000_rx_ring test_rx_ring; | 334 | struct e1000_rx_ring test_rx_ring; |
335 | 335 | ||
@@ -341,7 +341,7 @@ struct e1000_adapter { | |||
341 | bool smart_power_down; /* phy smart power down */ | 341 | bool smart_power_down; /* phy smart power down */ |
342 | bool quad_port_a; | 342 | bool quad_port_a; |
343 | unsigned long flags; | 343 | unsigned long flags; |
344 | uint32_t eeprom_wol; | 344 | u32 eeprom_wol; |
345 | }; | 345 | }; |
346 | 346 | ||
347 | enum e1000_state_t { | 347 | enum e1000_state_t { |
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c index 05e1fb3cf49f..701531e72e7b 100644 --- a/drivers/net/e1000/e1000_ethtool.c +++ b/drivers/net/e1000/e1000_ethtool.c | |||
@@ -36,7 +36,7 @@ extern int e1000_up(struct e1000_adapter *adapter); | |||
36 | extern void e1000_down(struct e1000_adapter *adapter); | 36 | extern void e1000_down(struct e1000_adapter *adapter); |
37 | extern void e1000_reinit_locked(struct e1000_adapter *adapter); | 37 | extern void e1000_reinit_locked(struct e1000_adapter *adapter); |
38 | extern void e1000_reset(struct e1000_adapter *adapter); | 38 | extern void e1000_reset(struct e1000_adapter *adapter); |
39 | extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx); | 39 | extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx); |
40 | extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); | 40 | extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); |
41 | extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); | 41 | extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); |
42 | extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter); | 42 | extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter); |
@@ -289,7 +289,7 @@ e1000_set_pauseparam(struct net_device *netdev, | |||
289 | return retval; | 289 | return retval; |
290 | } | 290 | } |
291 | 291 | ||
292 | static uint32_t | 292 | static u32 |
293 | e1000_get_rx_csum(struct net_device *netdev) | 293 | e1000_get_rx_csum(struct net_device *netdev) |
294 | { | 294 | { |
295 | struct e1000_adapter *adapter = netdev_priv(netdev); | 295 | struct e1000_adapter *adapter = netdev_priv(netdev); |
@@ -297,7 +297,7 @@ e1000_get_rx_csum(struct net_device *netdev) | |||
297 | } | 297 | } |
298 | 298 | ||
299 | static int | 299 | static int |
300 | e1000_set_rx_csum(struct net_device *netdev, uint32_t data) | 300 | e1000_set_rx_csum(struct net_device *netdev, u32 data) |
301 | { | 301 | { |
302 | struct e1000_adapter *adapter = netdev_priv(netdev); | 302 | struct e1000_adapter *adapter = netdev_priv(netdev); |
303 | adapter->rx_csum = data; | 303 | adapter->rx_csum = data; |
@@ -309,14 +309,14 @@ e1000_set_rx_csum(struct net_device *netdev, uint32_t data) | |||
309 | return 0; | 309 | return 0; |
310 | } | 310 | } |
311 | 311 | ||
312 | static uint32_t | 312 | static u32 |
313 | e1000_get_tx_csum(struct net_device *netdev) | 313 | e1000_get_tx_csum(struct net_device *netdev) |
314 | { | 314 | { |
315 | return (netdev->features & NETIF_F_HW_CSUM) != 0; | 315 | return (netdev->features & NETIF_F_HW_CSUM) != 0; |
316 | } | 316 | } |
317 | 317 | ||
318 | static int | 318 | static int |
319 | e1000_set_tx_csum(struct net_device *netdev, uint32_t data) | 319 | e1000_set_tx_csum(struct net_device *netdev, u32 data) |
320 | { | 320 | { |
321 | struct e1000_adapter *adapter = netdev_priv(netdev); | 321 | struct e1000_adapter *adapter = netdev_priv(netdev); |
322 | 322 | ||
@@ -335,7 +335,7 @@ e1000_set_tx_csum(struct net_device *netdev, uint32_t data) | |||
335 | } | 335 | } |
336 | 336 | ||
337 | static int | 337 | static int |
338 | e1000_set_tso(struct net_device *netdev, uint32_t data) | 338 | e1000_set_tso(struct net_device *netdev, u32 data) |
339 | { | 339 | { |
340 | struct e1000_adapter *adapter = netdev_priv(netdev); | 340 | struct e1000_adapter *adapter = netdev_priv(netdev); |
341 | if ((adapter->hw.mac_type < e1000_82544) || | 341 | if ((adapter->hw.mac_type < e1000_82544) || |
@@ -357,7 +357,7 @@ e1000_set_tso(struct net_device *netdev, uint32_t data) | |||
357 | return 0; | 357 | return 0; |
358 | } | 358 | } |
359 | 359 | ||
360 | static uint32_t | 360 | static u32 |
361 | e1000_get_msglevel(struct net_device *netdev) | 361 | e1000_get_msglevel(struct net_device *netdev) |
362 | { | 362 | { |
363 | struct e1000_adapter *adapter = netdev_priv(netdev); | 363 | struct e1000_adapter *adapter = netdev_priv(netdev); |
@@ -365,7 +365,7 @@ e1000_get_msglevel(struct net_device *netdev) | |||
365 | } | 365 | } |
366 | 366 | ||
367 | static void | 367 | static void |
368 | e1000_set_msglevel(struct net_device *netdev, uint32_t data) | 368 | e1000_set_msglevel(struct net_device *netdev, u32 data) |
369 | { | 369 | { |
370 | struct e1000_adapter *adapter = netdev_priv(netdev); | 370 | struct e1000_adapter *adapter = netdev_priv(netdev); |
371 | adapter->msg_enable = data; | 371 | adapter->msg_enable = data; |
@@ -375,7 +375,7 @@ static int | |||
375 | e1000_get_regs_len(struct net_device *netdev) | 375 | e1000_get_regs_len(struct net_device *netdev) |
376 | { | 376 | { |
377 | #define E1000_REGS_LEN 32 | 377 | #define E1000_REGS_LEN 32 |
378 | return E1000_REGS_LEN * sizeof(uint32_t); | 378 | return E1000_REGS_LEN * sizeof(u32); |
379 | } | 379 | } |
380 | 380 | ||
381 | static void | 381 | static void |
@@ -384,10 +384,10 @@ e1000_get_regs(struct net_device *netdev, | |||
384 | { | 384 | { |
385 | struct e1000_adapter *adapter = netdev_priv(netdev); | 385 | struct e1000_adapter *adapter = netdev_priv(netdev); |
386 | struct e1000_hw *hw = &adapter->hw; | 386 | struct e1000_hw *hw = &adapter->hw; |
387 | uint32_t *regs_buff = p; | 387 | u32 *regs_buff = p; |
388 | uint16_t phy_data; | 388 | u16 phy_data; |
389 | 389 | ||
390 | memset(p, 0, E1000_REGS_LEN * sizeof(uint32_t)); | 390 | memset(p, 0, E1000_REGS_LEN * sizeof(u32)); |
391 | 391 | ||
392 | regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; | 392 | regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; |
393 | 393 | ||
@@ -412,44 +412,44 @@ e1000_get_regs(struct net_device *netdev, | |||
412 | IGP01E1000_PHY_AGC_A); | 412 | IGP01E1000_PHY_AGC_A); |
413 | e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A & | 413 | e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A & |
414 | IGP01E1000_PHY_PAGE_SELECT, &phy_data); | 414 | IGP01E1000_PHY_PAGE_SELECT, &phy_data); |
415 | regs_buff[13] = (uint32_t)phy_data; /* cable length */ | 415 | regs_buff[13] = (u32)phy_data; /* cable length */ |
416 | e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, | 416 | e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, |
417 | IGP01E1000_PHY_AGC_B); | 417 | IGP01E1000_PHY_AGC_B); |
418 | e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_B & | 418 | e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_B & |
419 | IGP01E1000_PHY_PAGE_SELECT, &phy_data); | 419 | IGP01E1000_PHY_PAGE_SELECT, &phy_data); |
420 | regs_buff[14] = (uint32_t)phy_data; /* cable length */ | 420 | regs_buff[14] = (u32)phy_data; /* cable length */ |
421 | e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, | 421 | e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, |
422 | IGP01E1000_PHY_AGC_C); | 422 | IGP01E1000_PHY_AGC_C); |
423 | e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_C & | 423 | e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_C & |
424 | IGP01E1000_PHY_PAGE_SELECT, &phy_data); | 424 | IGP01E1000_PHY_PAGE_SELECT, &phy_data); |
425 | regs_buff[15] = (uint32_t)phy_data; /* cable length */ | 425 | regs_buff[15] = (u32)phy_data; /* cable length */ |
426 | e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, | 426 | e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, |
427 | IGP01E1000_PHY_AGC_D); | 427 | IGP01E1000_PHY_AGC_D); |
428 | e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_D & | 428 | e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_D & |
429 | IGP01E1000_PHY_PAGE_SELECT, &phy_data); | 429 | IGP01E1000_PHY_PAGE_SELECT, &phy_data); |
430 | regs_buff[16] = (uint32_t)phy_data; /* cable length */ | 430 | regs_buff[16] = (u32)phy_data; /* cable length */ |
431 | regs_buff[17] = 0; /* extended 10bt distance (not needed) */ | 431 | regs_buff[17] = 0; /* extended 10bt distance (not needed) */ |
432 | e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0); | 432 | e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0); |
433 | e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS & | 433 | e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS & |
434 | IGP01E1000_PHY_PAGE_SELECT, &phy_data); | 434 | IGP01E1000_PHY_PAGE_SELECT, &phy_data); |
435 | regs_buff[18] = (uint32_t)phy_data; /* cable polarity */ | 435 | regs_buff[18] = (u32)phy_data; /* cable polarity */ |
436 | e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, | 436 | e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, |
437 | IGP01E1000_PHY_PCS_INIT_REG); | 437 | IGP01E1000_PHY_PCS_INIT_REG); |
438 | e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG & | 438 | e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG & |
439 | IGP01E1000_PHY_PAGE_SELECT, &phy_data); | 439 | IGP01E1000_PHY_PAGE_SELECT, &phy_data); |
440 | regs_buff[19] = (uint32_t)phy_data; /* cable polarity */ | 440 | regs_buff[19] = (u32)phy_data; /* cable polarity */ |
441 | regs_buff[20] = 0; /* polarity correction enabled (always) */ | 441 | regs_buff[20] = 0; /* polarity correction enabled (always) */ |
442 | regs_buff[22] = 0; /* phy receive errors (unavailable) */ | 442 | regs_buff[22] = 0; /* phy receive errors (unavailable) */ |
443 | regs_buff[23] = regs_buff[18]; /* mdix mode */ | 443 | regs_buff[23] = regs_buff[18]; /* mdix mode */ |
444 | e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0); | 444 | e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0); |
445 | } else { | 445 | } else { |
446 | e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); | 446 | e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); |
447 | regs_buff[13] = (uint32_t)phy_data; /* cable length */ | 447 | regs_buff[13] = (u32)phy_data; /* cable length */ |
448 | regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */ | 448 | regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */ |
449 | regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */ | 449 | regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */ |
450 | regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */ | 450 | regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */ |
451 | e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); | 451 | e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); |
452 | regs_buff[17] = (uint32_t)phy_data; /* extended 10bt distance */ | 452 | regs_buff[17] = (u32)phy_data; /* extended 10bt distance */ |
453 | regs_buff[18] = regs_buff[13]; /* cable polarity */ | 453 | regs_buff[18] = regs_buff[13]; /* cable polarity */ |
454 | regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */ | 454 | regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */ |
455 | regs_buff[20] = regs_buff[17]; /* polarity correction */ | 455 | regs_buff[20] = regs_buff[17]; /* polarity correction */ |
@@ -459,7 +459,7 @@ e1000_get_regs(struct net_device *netdev, | |||
459 | } | 459 | } |
460 | regs_buff[21] = adapter->phy_stats.idle_errors; /* phy idle errors */ | 460 | regs_buff[21] = adapter->phy_stats.idle_errors; /* phy idle errors */ |
461 | e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); | 461 | e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); |
462 | regs_buff[24] = (uint32_t)phy_data; /* phy local receiver status */ | 462 | regs_buff[24] = (u32)phy_data; /* phy local receiver status */ |
463 | regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ | 463 | regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ |
464 | if (hw->mac_type >= e1000_82540 && | 464 | if (hw->mac_type >= e1000_82540 && |
465 | hw->mac_type < e1000_82571 && | 465 | hw->mac_type < e1000_82571 && |
@@ -477,14 +477,14 @@ e1000_get_eeprom_len(struct net_device *netdev) | |||
477 | 477 | ||
478 | static int | 478 | static int |
479 | e1000_get_eeprom(struct net_device *netdev, | 479 | e1000_get_eeprom(struct net_device *netdev, |
480 | struct ethtool_eeprom *eeprom, uint8_t *bytes) | 480 | struct ethtool_eeprom *eeprom, u8 *bytes) |
481 | { | 481 | { |
482 | struct e1000_adapter *adapter = netdev_priv(netdev); | 482 | struct e1000_adapter *adapter = netdev_priv(netdev); |
483 | struct e1000_hw *hw = &adapter->hw; | 483 | struct e1000_hw *hw = &adapter->hw; |
484 | uint16_t *eeprom_buff; | 484 | u16 *eeprom_buff; |
485 | int first_word, last_word; | 485 | int first_word, last_word; |
486 | int ret_val = 0; | 486 | int ret_val = 0; |
487 | uint16_t i; | 487 | u16 i; |
488 | 488 | ||
489 | if (eeprom->len == 0) | 489 | if (eeprom->len == 0) |
490 | return -EINVAL; | 490 | return -EINVAL; |
@@ -494,7 +494,7 @@ e1000_get_eeprom(struct net_device *netdev, | |||
494 | first_word = eeprom->offset >> 1; | 494 | first_word = eeprom->offset >> 1; |
495 | last_word = (eeprom->offset + eeprom->len - 1) >> 1; | 495 | last_word = (eeprom->offset + eeprom->len - 1) >> 1; |
496 | 496 | ||
497 | eeprom_buff = kmalloc(sizeof(uint16_t) * | 497 | eeprom_buff = kmalloc(sizeof(u16) * |
498 | (last_word - first_word + 1), GFP_KERNEL); | 498 | (last_word - first_word + 1), GFP_KERNEL); |
499 | if (!eeprom_buff) | 499 | if (!eeprom_buff) |
500 | return -ENOMEM; | 500 | return -ENOMEM; |
@@ -514,7 +514,7 @@ e1000_get_eeprom(struct net_device *netdev, | |||
514 | for (i = 0; i < last_word - first_word + 1; i++) | 514 | for (i = 0; i < last_word - first_word + 1; i++) |
515 | le16_to_cpus(&eeprom_buff[i]); | 515 | le16_to_cpus(&eeprom_buff[i]); |
516 | 516 | ||
517 | memcpy(bytes, (uint8_t *)eeprom_buff + (eeprom->offset & 1), | 517 | memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), |
518 | eeprom->len); | 518 | eeprom->len); |
519 | kfree(eeprom_buff); | 519 | kfree(eeprom_buff); |
520 | 520 | ||
@@ -523,14 +523,14 @@ e1000_get_eeprom(struct net_device *netdev, | |||
523 | 523 | ||
524 | static int | 524 | static int |
525 | e1000_set_eeprom(struct net_device *netdev, | 525 | e1000_set_eeprom(struct net_device *netdev, |
526 | struct ethtool_eeprom *eeprom, uint8_t *bytes) | 526 | struct ethtool_eeprom *eeprom, u8 *bytes) |
527 | { | 527 | { |
528 | struct e1000_adapter *adapter = netdev_priv(netdev); | 528 | struct e1000_adapter *adapter = netdev_priv(netdev); |
529 | struct e1000_hw *hw = &adapter->hw; | 529 | struct e1000_hw *hw = &adapter->hw; |
530 | uint16_t *eeprom_buff; | 530 | u16 *eeprom_buff; |
531 | void *ptr; | 531 | void *ptr; |
532 | int max_len, first_word, last_word, ret_val = 0; | 532 | int max_len, first_word, last_word, ret_val = 0; |
533 | uint16_t i; | 533 | u16 i; |
534 | 534 | ||
535 | if (eeprom->len == 0) | 535 | if (eeprom->len == 0) |
536 | return -EOPNOTSUPP; | 536 | return -EOPNOTSUPP; |
@@ -590,7 +590,7 @@ e1000_get_drvinfo(struct net_device *netdev, | |||
590 | { | 590 | { |
591 | struct e1000_adapter *adapter = netdev_priv(netdev); | 591 | struct e1000_adapter *adapter = netdev_priv(netdev); |
592 | char firmware_version[32]; | 592 | char firmware_version[32]; |
593 | uint16_t eeprom_data; | 593 | u16 eeprom_data; |
594 | 594 | ||
595 | strncpy(drvinfo->driver, e1000_driver_name, 32); | 595 | strncpy(drvinfo->driver, e1000_driver_name, 32); |
596 | strncpy(drvinfo->version, e1000_driver_version, 32); | 596 | strncpy(drvinfo->version, e1000_driver_version, 32); |
@@ -674,13 +674,13 @@ e1000_set_ringparam(struct net_device *netdev, | |||
674 | adapter->tx_ring = txdr; | 674 | adapter->tx_ring = txdr; |
675 | adapter->rx_ring = rxdr; | 675 | adapter->rx_ring = rxdr; |
676 | 676 | ||
677 | rxdr->count = max(ring->rx_pending,(uint32_t)E1000_MIN_RXD); | 677 | rxdr->count = max(ring->rx_pending,(u32)E1000_MIN_RXD); |
678 | rxdr->count = min(rxdr->count,(uint32_t)(mac_type < e1000_82544 ? | 678 | rxdr->count = min(rxdr->count,(u32)(mac_type < e1000_82544 ? |
679 | E1000_MAX_RXD : E1000_MAX_82544_RXD)); | 679 | E1000_MAX_RXD : E1000_MAX_82544_RXD)); |
680 | rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE); | 680 | rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE); |
681 | 681 | ||
682 | txdr->count = max(ring->tx_pending,(uint32_t)E1000_MIN_TXD); | 682 | txdr->count = max(ring->tx_pending,(u32)E1000_MIN_TXD); |
683 | txdr->count = min(txdr->count,(uint32_t)(mac_type < e1000_82544 ? | 683 | txdr->count = min(txdr->count,(u32)(mac_type < e1000_82544 ? |
684 | E1000_MAX_TXD : E1000_MAX_82544_TXD)); | 684 | E1000_MAX_TXD : E1000_MAX_82544_TXD)); |
685 | txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE); | 685 | txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE); |
686 | 686 | ||
@@ -728,13 +728,13 @@ err_setup: | |||
728 | return err; | 728 | return err; |
729 | } | 729 | } |
730 | 730 | ||
731 | static bool reg_pattern_test(struct e1000_adapter *adapter, uint64_t *data, | 731 | static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, |
732 | int reg, uint32_t mask, uint32_t write) | 732 | int reg, u32 mask, u32 write) |
733 | { | 733 | { |
734 | static const uint32_t test[] = | 734 | static const u32 test[] = |
735 | {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; | 735 | {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; |
736 | uint8_t __iomem *address = adapter->hw.hw_addr + reg; | 736 | u8 __iomem *address = adapter->hw.hw_addr + reg; |
737 | uint32_t read; | 737 | u32 read; |
738 | int i; | 738 | int i; |
739 | 739 | ||
740 | for (i = 0; i < ARRAY_SIZE(test); i++) { | 740 | for (i = 0; i < ARRAY_SIZE(test); i++) { |
@@ -751,11 +751,11 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, uint64_t *data, | |||
751 | return false; | 751 | return false; |
752 | } | 752 | } |
753 | 753 | ||
754 | static bool reg_set_and_check(struct e1000_adapter *adapter, uint64_t *data, | 754 | static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, |
755 | int reg, uint32_t mask, uint32_t write) | 755 | int reg, u32 mask, u32 write) |
756 | { | 756 | { |
757 | uint8_t __iomem *address = adapter->hw.hw_addr + reg; | 757 | u8 __iomem *address = adapter->hw.hw_addr + reg; |
758 | uint32_t read; | 758 | u32 read; |
759 | 759 | ||
760 | writel(write & mask, address); | 760 | writel(write & mask, address); |
761 | read = readl(address); | 761 | read = readl(address); |
@@ -788,10 +788,10 @@ static bool reg_set_and_check(struct e1000_adapter *adapter, uint64_t *data, | |||
788 | } while (0) | 788 | } while (0) |
789 | 789 | ||
790 | static int | 790 | static int |
791 | e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data) | 791 | e1000_reg_test(struct e1000_adapter *adapter, u64 *data) |
792 | { | 792 | { |
793 | uint32_t value, before, after; | 793 | u32 value, before, after; |
794 | uint32_t i, toggle; | 794 | u32 i, toggle; |
795 | 795 | ||
796 | /* The status register is Read Only, so a write should fail. | 796 | /* The status register is Read Only, so a write should fail. |
797 | * Some bits that get toggled are ignored. | 797 | * Some bits that get toggled are ignored. |
@@ -884,11 +884,11 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data) | |||
884 | } | 884 | } |
885 | 885 | ||
886 | static int | 886 | static int |
887 | e1000_eeprom_test(struct e1000_adapter *adapter, uint64_t *data) | 887 | e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data) |
888 | { | 888 | { |
889 | uint16_t temp; | 889 | u16 temp; |
890 | uint16_t checksum = 0; | 890 | u16 checksum = 0; |
891 | uint16_t i; | 891 | u16 i; |
892 | 892 | ||
893 | *data = 0; | 893 | *data = 0; |
894 | /* Read and add up the contents of the EEPROM */ | 894 | /* Read and add up the contents of the EEPROM */ |
@@ -901,7 +901,7 @@ e1000_eeprom_test(struct e1000_adapter *adapter, uint64_t *data) | |||
901 | } | 901 | } |
902 | 902 | ||
903 | /* If Checksum is not Correct return error else test passed */ | 903 | /* If Checksum is not Correct return error else test passed */ |
904 | if ((checksum != (uint16_t) EEPROM_SUM) && !(*data)) | 904 | if ((checksum != (u16) EEPROM_SUM) && !(*data)) |
905 | *data = 2; | 905 | *data = 2; |
906 | 906 | ||
907 | return *data; | 907 | return *data; |
@@ -919,12 +919,12 @@ e1000_test_intr(int irq, void *data) | |||
919 | } | 919 | } |
920 | 920 | ||
921 | static int | 921 | static int |
922 | e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data) | 922 | e1000_intr_test(struct e1000_adapter *adapter, u64 *data) |
923 | { | 923 | { |
924 | struct net_device *netdev = adapter->netdev; | 924 | struct net_device *netdev = adapter->netdev; |
925 | uint32_t mask, i = 0; | 925 | u32 mask, i = 0; |
926 | bool shared_int = true; | 926 | bool shared_int = true; |
927 | uint32_t irq = adapter->pdev->irq; | 927 | u32 irq = adapter->pdev->irq; |
928 | 928 | ||
929 | *data = 0; | 929 | *data = 0; |
930 | 930 | ||
@@ -1070,7 +1070,7 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1070 | struct e1000_tx_ring *txdr = &adapter->test_tx_ring; | 1070 | struct e1000_tx_ring *txdr = &adapter->test_tx_ring; |
1071 | struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; | 1071 | struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; |
1072 | struct pci_dev *pdev = adapter->pdev; | 1072 | struct pci_dev *pdev = adapter->pdev; |
1073 | uint32_t rctl; | 1073 | u32 rctl; |
1074 | int i, ret_val; | 1074 | int i, ret_val; |
1075 | 1075 | ||
1076 | /* Setup Tx descriptor ring and Tx buffers */ | 1076 | /* Setup Tx descriptor ring and Tx buffers */ |
@@ -1096,8 +1096,8 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1096 | txdr->next_to_use = txdr->next_to_clean = 0; | 1096 | txdr->next_to_use = txdr->next_to_clean = 0; |
1097 | 1097 | ||
1098 | E1000_WRITE_REG(&adapter->hw, TDBAL, | 1098 | E1000_WRITE_REG(&adapter->hw, TDBAL, |
1099 | ((uint64_t) txdr->dma & 0x00000000FFFFFFFF)); | 1099 | ((u64) txdr->dma & 0x00000000FFFFFFFF)); |
1100 | E1000_WRITE_REG(&adapter->hw, TDBAH, ((uint64_t) txdr->dma >> 32)); | 1100 | E1000_WRITE_REG(&adapter->hw, TDBAH, ((u64) txdr->dma >> 32)); |
1101 | E1000_WRITE_REG(&adapter->hw, TDLEN, | 1101 | E1000_WRITE_REG(&adapter->hw, TDLEN, |
1102 | txdr->count * sizeof(struct e1000_tx_desc)); | 1102 | txdr->count * sizeof(struct e1000_tx_desc)); |
1103 | E1000_WRITE_REG(&adapter->hw, TDH, 0); | 1103 | E1000_WRITE_REG(&adapter->hw, TDH, 0); |
@@ -1153,8 +1153,8 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1153 | rctl = E1000_READ_REG(&adapter->hw, RCTL); | 1153 | rctl = E1000_READ_REG(&adapter->hw, RCTL); |
1154 | E1000_WRITE_REG(&adapter->hw, RCTL, rctl & ~E1000_RCTL_EN); | 1154 | E1000_WRITE_REG(&adapter->hw, RCTL, rctl & ~E1000_RCTL_EN); |
1155 | E1000_WRITE_REG(&adapter->hw, RDBAL, | 1155 | E1000_WRITE_REG(&adapter->hw, RDBAL, |
1156 | ((uint64_t) rxdr->dma & 0xFFFFFFFF)); | 1156 | ((u64) rxdr->dma & 0xFFFFFFFF)); |
1157 | E1000_WRITE_REG(&adapter->hw, RDBAH, ((uint64_t) rxdr->dma >> 32)); | 1157 | E1000_WRITE_REG(&adapter->hw, RDBAH, ((u64) rxdr->dma >> 32)); |
1158 | E1000_WRITE_REG(&adapter->hw, RDLEN, rxdr->size); | 1158 | E1000_WRITE_REG(&adapter->hw, RDLEN, rxdr->size); |
1159 | E1000_WRITE_REG(&adapter->hw, RDH, 0); | 1159 | E1000_WRITE_REG(&adapter->hw, RDH, 0); |
1160 | E1000_WRITE_REG(&adapter->hw, RDT, 0); | 1160 | E1000_WRITE_REG(&adapter->hw, RDT, 0); |
@@ -1202,7 +1202,7 @@ e1000_phy_disable_receiver(struct e1000_adapter *adapter) | |||
1202 | static void | 1202 | static void |
1203 | e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter) | 1203 | e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter) |
1204 | { | 1204 | { |
1205 | uint16_t phy_reg; | 1205 | u16 phy_reg; |
1206 | 1206 | ||
1207 | /* Because we reset the PHY above, we need to re-force TX_CLK in the | 1207 | /* Because we reset the PHY above, we need to re-force TX_CLK in the |
1208 | * Extended PHY Specific Control Register to 25MHz clock. This | 1208 | * Extended PHY Specific Control Register to 25MHz clock. This |
@@ -1226,8 +1226,8 @@ e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter) | |||
1226 | static int | 1226 | static int |
1227 | e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter) | 1227 | e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter) |
1228 | { | 1228 | { |
1229 | uint32_t ctrl_reg; | 1229 | u32 ctrl_reg; |
1230 | uint16_t phy_reg; | 1230 | u16 phy_reg; |
1231 | 1231 | ||
1232 | /* Setup the Device Control Register for PHY loopback test. */ | 1232 | /* Setup the Device Control Register for PHY loopback test. */ |
1233 | 1233 | ||
@@ -1293,8 +1293,8 @@ e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter) | |||
1293 | static int | 1293 | static int |
1294 | e1000_integrated_phy_loopback(struct e1000_adapter *adapter) | 1294 | e1000_integrated_phy_loopback(struct e1000_adapter *adapter) |
1295 | { | 1295 | { |
1296 | uint32_t ctrl_reg = 0; | 1296 | u32 ctrl_reg = 0; |
1297 | uint32_t stat_reg = 0; | 1297 | u32 stat_reg = 0; |
1298 | 1298 | ||
1299 | adapter->hw.autoneg = false; | 1299 | adapter->hw.autoneg = false; |
1300 | 1300 | ||
@@ -1363,8 +1363,8 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter) | |||
1363 | static int | 1363 | static int |
1364 | e1000_set_phy_loopback(struct e1000_adapter *adapter) | 1364 | e1000_set_phy_loopback(struct e1000_adapter *adapter) |
1365 | { | 1365 | { |
1366 | uint16_t phy_reg = 0; | 1366 | u16 phy_reg = 0; |
1367 | uint16_t count = 0; | 1367 | u16 count = 0; |
1368 | 1368 | ||
1369 | switch (adapter->hw.mac_type) { | 1369 | switch (adapter->hw.mac_type) { |
1370 | case e1000_82543: | 1370 | case e1000_82543: |
@@ -1416,7 +1416,7 @@ static int | |||
1416 | e1000_setup_loopback_test(struct e1000_adapter *adapter) | 1416 | e1000_setup_loopback_test(struct e1000_adapter *adapter) |
1417 | { | 1417 | { |
1418 | struct e1000_hw *hw = &adapter->hw; | 1418 | struct e1000_hw *hw = &adapter->hw; |
1419 | uint32_t rctl; | 1419 | u32 rctl; |
1420 | 1420 | ||
1421 | if (hw->media_type == e1000_media_type_fiber || | 1421 | if (hw->media_type == e1000_media_type_fiber || |
1422 | hw->media_type == e1000_media_type_internal_serdes) { | 1422 | hw->media_type == e1000_media_type_internal_serdes) { |
@@ -1451,8 +1451,8 @@ static void | |||
1451 | e1000_loopback_cleanup(struct e1000_adapter *adapter) | 1451 | e1000_loopback_cleanup(struct e1000_adapter *adapter) |
1452 | { | 1452 | { |
1453 | struct e1000_hw *hw = &adapter->hw; | 1453 | struct e1000_hw *hw = &adapter->hw; |
1454 | uint32_t rctl; | 1454 | u32 rctl; |
1455 | uint16_t phy_reg; | 1455 | u16 phy_reg; |
1456 | 1456 | ||
1457 | rctl = E1000_READ_REG(hw, RCTL); | 1457 | rctl = E1000_READ_REG(hw, RCTL); |
1458 | rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); | 1458 | rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); |
@@ -1578,7 +1578,7 @@ e1000_run_loopback_test(struct e1000_adapter *adapter) | |||
1578 | } | 1578 | } |
1579 | 1579 | ||
1580 | static int | 1580 | static int |
1581 | e1000_loopback_test(struct e1000_adapter *adapter, uint64_t *data) | 1581 | e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) |
1582 | { | 1582 | { |
1583 | /* PHY loopback cannot be performed if SoL/IDER | 1583 | /* PHY loopback cannot be performed if SoL/IDER |
1584 | * sessions are active */ | 1584 | * sessions are active */ |
@@ -1603,7 +1603,7 @@ out: | |||
1603 | } | 1603 | } |
1604 | 1604 | ||
1605 | static int | 1605 | static int |
1606 | e1000_link_test(struct e1000_adapter *adapter, uint64_t *data) | 1606 | e1000_link_test(struct e1000_adapter *adapter, u64 *data) |
1607 | { | 1607 | { |
1608 | *data = 0; | 1608 | *data = 0; |
1609 | if (adapter->hw.media_type == e1000_media_type_internal_serdes) { | 1609 | if (adapter->hw.media_type == e1000_media_type_internal_serdes) { |
@@ -1647,7 +1647,7 @@ e1000_get_sset_count(struct net_device *netdev, int sset) | |||
1647 | 1647 | ||
1648 | static void | 1648 | static void |
1649 | e1000_diag_test(struct net_device *netdev, | 1649 | e1000_diag_test(struct net_device *netdev, |
1650 | struct ethtool_test *eth_test, uint64_t *data) | 1650 | struct ethtool_test *eth_test, u64 *data) |
1651 | { | 1651 | { |
1652 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1652 | struct e1000_adapter *adapter = netdev_priv(netdev); |
1653 | bool if_running = netif_running(netdev); | 1653 | bool if_running = netif_running(netdev); |
@@ -1657,9 +1657,9 @@ e1000_diag_test(struct net_device *netdev, | |||
1657 | /* Offline tests */ | 1657 | /* Offline tests */ |
1658 | 1658 | ||
1659 | /* save speed, duplex, autoneg settings */ | 1659 | /* save speed, duplex, autoneg settings */ |
1660 | uint16_t autoneg_advertised = adapter->hw.autoneg_advertised; | 1660 | u16 autoneg_advertised = adapter->hw.autoneg_advertised; |
1661 | uint8_t forced_speed_duplex = adapter->hw.forced_speed_duplex; | 1661 | u8 forced_speed_duplex = adapter->hw.forced_speed_duplex; |
1662 | uint8_t autoneg = adapter->hw.autoneg; | 1662 | u8 autoneg = adapter->hw.autoneg; |
1663 | 1663 | ||
1664 | DPRINTK(HW, INFO, "offline testing starting\n"); | 1664 | DPRINTK(HW, INFO, "offline testing starting\n"); |
1665 | 1665 | ||
@@ -1877,7 +1877,7 @@ e1000_led_blink_callback(unsigned long data) | |||
1877 | } | 1877 | } |
1878 | 1878 | ||
1879 | static int | 1879 | static int |
1880 | e1000_phys_id(struct net_device *netdev, uint32_t data) | 1880 | e1000_phys_id(struct net_device *netdev, u32 data) |
1881 | { | 1881 | { |
1882 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1882 | struct e1000_adapter *adapter = netdev_priv(netdev); |
1883 | 1883 | ||
@@ -1927,7 +1927,7 @@ e1000_nway_reset(struct net_device *netdev) | |||
1927 | 1927 | ||
1928 | static void | 1928 | static void |
1929 | e1000_get_ethtool_stats(struct net_device *netdev, | 1929 | e1000_get_ethtool_stats(struct net_device *netdev, |
1930 | struct ethtool_stats *stats, uint64_t *data) | 1930 | struct ethtool_stats *stats, u64 *data) |
1931 | { | 1931 | { |
1932 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1932 | struct e1000_adapter *adapter = netdev_priv(netdev); |
1933 | int i; | 1933 | int i; |
@@ -1936,15 +1936,15 @@ e1000_get_ethtool_stats(struct net_device *netdev, | |||
1936 | for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { | 1936 | for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { |
1937 | char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset; | 1937 | char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset; |
1938 | data[i] = (e1000_gstrings_stats[i].sizeof_stat == | 1938 | data[i] = (e1000_gstrings_stats[i].sizeof_stat == |
1939 | sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p; | 1939 | sizeof(u64)) ? *(u64 *)p : *(u32 *)p; |
1940 | } | 1940 | } |
1941 | /* BUG_ON(i != E1000_STATS_LEN); */ | 1941 | /* BUG_ON(i != E1000_STATS_LEN); */ |
1942 | } | 1942 | } |
1943 | 1943 | ||
1944 | static void | 1944 | static void |
1945 | e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data) | 1945 | e1000_get_strings(struct net_device *netdev, u32 stringset, u8 *data) |
1946 | { | 1946 | { |
1947 | uint8_t *p = data; | 1947 | u8 *p = data; |
1948 | int i; | 1948 | int i; |
1949 | 1949 | ||
1950 | switch (stringset) { | 1950 | switch (stringset) { |
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c index b64203458e9a..9a4b6cbddf2c 100644 --- a/drivers/net/e1000/e1000_hw.c +++ b/drivers/net/e1000/e1000_hw.c | |||
@@ -33,107 +33,107 @@ | |||
33 | 33 | ||
34 | #include "e1000_hw.h" | 34 | #include "e1000_hw.h" |
35 | 35 | ||
36 | static int32_t e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask); | 36 | static s32 e1000_swfw_sync_acquire(struct e1000_hw *hw, u16 mask); |
37 | static void e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask); | 37 | static void e1000_swfw_sync_release(struct e1000_hw *hw, u16 mask); |
38 | static int32_t e1000_read_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *data); | 38 | static s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 *data); |
39 | static int32_t e1000_write_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t data); | 39 | static s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 data); |
40 | static int32_t e1000_get_software_semaphore(struct e1000_hw *hw); | 40 | static s32 e1000_get_software_semaphore(struct e1000_hw *hw); |
41 | static void e1000_release_software_semaphore(struct e1000_hw *hw); | 41 | static void e1000_release_software_semaphore(struct e1000_hw *hw); |
42 | 42 | ||
43 | static uint8_t e1000_arc_subsystem_valid(struct e1000_hw *hw); | 43 | static u8 e1000_arc_subsystem_valid(struct e1000_hw *hw); |
44 | static int32_t e1000_check_downshift(struct e1000_hw *hw); | 44 | static s32 e1000_check_downshift(struct e1000_hw *hw); |
45 | static int32_t e1000_check_polarity(struct e1000_hw *hw, e1000_rev_polarity *polarity); | 45 | static s32 e1000_check_polarity(struct e1000_hw *hw, e1000_rev_polarity *polarity); |
46 | static void e1000_clear_hw_cntrs(struct e1000_hw *hw); | 46 | static void e1000_clear_hw_cntrs(struct e1000_hw *hw); |
47 | static void e1000_clear_vfta(struct e1000_hw *hw); | 47 | static void e1000_clear_vfta(struct e1000_hw *hw); |
48 | static int32_t e1000_commit_shadow_ram(struct e1000_hw *hw); | 48 | static s32 e1000_commit_shadow_ram(struct e1000_hw *hw); |
49 | static int32_t e1000_config_dsp_after_link_change(struct e1000_hw *hw, | 49 | static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, |
50 | bool link_up); | 50 | bool link_up); |
51 | static int32_t e1000_config_fc_after_link_up(struct e1000_hw *hw); | 51 | static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw); |
52 | static int32_t e1000_detect_gig_phy(struct e1000_hw *hw); | 52 | static s32 e1000_detect_gig_phy(struct e1000_hw *hw); |
53 | static int32_t e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t bank); | 53 | static s32 e1000_erase_ich8_4k_segment(struct e1000_hw *hw, u32 bank); |
54 | static int32_t e1000_get_auto_rd_done(struct e1000_hw *hw); | 54 | static s32 e1000_get_auto_rd_done(struct e1000_hw *hw); |
55 | static int32_t e1000_get_cable_length(struct e1000_hw *hw, uint16_t *min_length, uint16_t *max_length); | 55 | static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, u16 *max_length); |
56 | static int32_t e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw); | 56 | static s32 e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw); |
57 | static int32_t e1000_get_phy_cfg_done(struct e1000_hw *hw); | 57 | static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw); |
58 | static int32_t e1000_get_software_flag(struct e1000_hw *hw); | 58 | static s32 e1000_get_software_flag(struct e1000_hw *hw); |
59 | static int32_t e1000_ich8_cycle_init(struct e1000_hw *hw); | 59 | static s32 e1000_ich8_cycle_init(struct e1000_hw *hw); |
60 | static int32_t e1000_ich8_flash_cycle(struct e1000_hw *hw, uint32_t timeout); | 60 | static s32 e1000_ich8_flash_cycle(struct e1000_hw *hw, u32 timeout); |
61 | static int32_t e1000_id_led_init(struct e1000_hw *hw); | 61 | static s32 e1000_id_led_init(struct e1000_hw *hw); |
62 | static int32_t e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw, uint32_t cnf_base_addr, uint32_t cnf_size); | 62 | static s32 e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw, u32 cnf_base_addr, u32 cnf_size); |
63 | static int32_t e1000_init_lcd_from_nvm(struct e1000_hw *hw); | 63 | static s32 e1000_init_lcd_from_nvm(struct e1000_hw *hw); |
64 | static void e1000_init_rx_addrs(struct e1000_hw *hw); | 64 | static void e1000_init_rx_addrs(struct e1000_hw *hw); |
65 | static void e1000_initialize_hardware_bits(struct e1000_hw *hw); | 65 | static void e1000_initialize_hardware_bits(struct e1000_hw *hw); |
66 | static bool e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw); | 66 | static bool e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw); |
67 | static int32_t e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw); | 67 | static s32 e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw); |
68 | static int32_t e1000_mng_enable_host_if(struct e1000_hw *hw); | 68 | static s32 e1000_mng_enable_host_if(struct e1000_hw *hw); |
69 | static int32_t e1000_mng_host_if_write(struct e1000_hw *hw, uint8_t *buffer, uint16_t length, uint16_t offset, uint8_t *sum); | 69 | static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length, u16 offset, u8 *sum); |
70 | static int32_t e1000_mng_write_cmd_header(struct e1000_hw* hw, struct e1000_host_mng_command_header* hdr); | 70 | static s32 e1000_mng_write_cmd_header(struct e1000_hw* hw, struct e1000_host_mng_command_header* hdr); |
71 | static int32_t e1000_mng_write_commit(struct e1000_hw *hw); | 71 | static s32 e1000_mng_write_commit(struct e1000_hw *hw); |
72 | static int32_t e1000_phy_ife_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); | 72 | static s32 e1000_phy_ife_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); |
73 | static int32_t e1000_phy_igp_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); | 73 | static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); |
74 | static int32_t e1000_read_eeprom_eerd(struct e1000_hw *hw, uint16_t offset, uint16_t words, uint16_t *data); | 74 | static s32 e1000_read_eeprom_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); |
75 | static int32_t e1000_write_eeprom_eewr(struct e1000_hw *hw, uint16_t offset, uint16_t words, uint16_t *data); | 75 | static s32 e1000_write_eeprom_eewr(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); |
76 | static int32_t e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd); | 76 | static s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd); |
77 | static int32_t e1000_phy_m88_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); | 77 | static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); |
78 | static void e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw); | 78 | static void e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw); |
79 | static int32_t e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t *data); | 79 | static s32 e1000_read_ich8_byte(struct e1000_hw *hw, u32 index, u8 *data); |
80 | static int32_t e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t byte); | 80 | static s32 e1000_verify_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 byte); |
81 | static int32_t e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t byte); | 81 | static s32 e1000_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 byte); |
82 | static int32_t e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t *data); | 82 | static s32 e1000_read_ich8_word(struct e1000_hw *hw, u32 index, u16 *data); |
83 | static int32_t e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size, uint16_t *data); | 83 | static s32 e1000_read_ich8_data(struct e1000_hw *hw, u32 index, u32 size, u16 *data); |
84 | static int32_t e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size, uint16_t data); | 84 | static s32 e1000_write_ich8_data(struct e1000_hw *hw, u32 index, u32 size, u16 data); |
85 | static int32_t e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words, uint16_t *data); | 85 | static s32 e1000_read_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); |
86 | static int32_t e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words, uint16_t *data); | 86 | static s32 e1000_write_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); |
87 | static void e1000_release_software_flag(struct e1000_hw *hw); | 87 | static void e1000_release_software_flag(struct e1000_hw *hw); |
88 | static int32_t e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active); | 88 | static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active); |
89 | static int32_t e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active); | 89 | static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active); |
90 | static int32_t e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop); | 90 | static s32 e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, u32 no_snoop); |
91 | static void e1000_set_pci_express_master_disable(struct e1000_hw *hw); | 91 | static void e1000_set_pci_express_master_disable(struct e1000_hw *hw); |
92 | static int32_t e1000_wait_autoneg(struct e1000_hw *hw); | 92 | static s32 e1000_wait_autoneg(struct e1000_hw *hw); |
93 | static void e1000_write_reg_io(struct e1000_hw *hw, uint32_t offset, uint32_t value); | 93 | static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value); |
94 | static int32_t e1000_set_phy_type(struct e1000_hw *hw); | 94 | static s32 e1000_set_phy_type(struct e1000_hw *hw); |
95 | static void e1000_phy_init_script(struct e1000_hw *hw); | 95 | static void e1000_phy_init_script(struct e1000_hw *hw); |
96 | static int32_t e1000_setup_copper_link(struct e1000_hw *hw); | 96 | static s32 e1000_setup_copper_link(struct e1000_hw *hw); |
97 | static int32_t e1000_setup_fiber_serdes_link(struct e1000_hw *hw); | 97 | static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw); |
98 | static int32_t e1000_adjust_serdes_amplitude(struct e1000_hw *hw); | 98 | static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw); |
99 | static int32_t e1000_phy_force_speed_duplex(struct e1000_hw *hw); | 99 | static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw); |
100 | static int32_t e1000_config_mac_to_phy(struct e1000_hw *hw); | 100 | static s32 e1000_config_mac_to_phy(struct e1000_hw *hw); |
101 | static void e1000_raise_mdi_clk(struct e1000_hw *hw, uint32_t *ctrl); | 101 | static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl); |
102 | static void e1000_lower_mdi_clk(struct e1000_hw *hw, uint32_t *ctrl); | 102 | static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl); |
103 | static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, uint32_t data, | 103 | static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, |
104 | uint16_t count); | 104 | u16 count); |
105 | static uint16_t e1000_shift_in_mdi_bits(struct e1000_hw *hw); | 105 | static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw); |
106 | static int32_t e1000_phy_reset_dsp(struct e1000_hw *hw); | 106 | static s32 e1000_phy_reset_dsp(struct e1000_hw *hw); |
107 | static int32_t e1000_write_eeprom_spi(struct e1000_hw *hw, uint16_t offset, | 107 | static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, |
108 | uint16_t words, uint16_t *data); | 108 | u16 words, u16 *data); |
109 | static int32_t e1000_write_eeprom_microwire(struct e1000_hw *hw, | 109 | static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, |
110 | uint16_t offset, uint16_t words, | 110 | u16 offset, u16 words, |
111 | uint16_t *data); | 111 | u16 *data); |
112 | static int32_t e1000_spi_eeprom_ready(struct e1000_hw *hw); | 112 | static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw); |
113 | static void e1000_raise_ee_clk(struct e1000_hw *hw, uint32_t *eecd); | 113 | static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd); |
114 | static void e1000_lower_ee_clk(struct e1000_hw *hw, uint32_t *eecd); | 114 | static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd); |
115 | static void e1000_shift_out_ee_bits(struct e1000_hw *hw, uint16_t data, | 115 | static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, |
116 | uint16_t count); | 116 | u16 count); |
117 | static int32_t e1000_write_phy_reg_ex(struct e1000_hw *hw, uint32_t reg_addr, | 117 | static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, |
118 | uint16_t phy_data); | 118 | u16 phy_data); |
119 | static int32_t e1000_read_phy_reg_ex(struct e1000_hw *hw,uint32_t reg_addr, | 119 | static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw,u32 reg_addr, |
120 | uint16_t *phy_data); | 120 | u16 *phy_data); |
121 | static uint16_t e1000_shift_in_ee_bits(struct e1000_hw *hw, uint16_t count); | 121 | static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count); |
122 | static int32_t e1000_acquire_eeprom(struct e1000_hw *hw); | 122 | static s32 e1000_acquire_eeprom(struct e1000_hw *hw); |
123 | static void e1000_release_eeprom(struct e1000_hw *hw); | 123 | static void e1000_release_eeprom(struct e1000_hw *hw); |
124 | static void e1000_standby_eeprom(struct e1000_hw *hw); | 124 | static void e1000_standby_eeprom(struct e1000_hw *hw); |
125 | static int32_t e1000_set_vco_speed(struct e1000_hw *hw); | 125 | static s32 e1000_set_vco_speed(struct e1000_hw *hw); |
126 | static int32_t e1000_polarity_reversal_workaround(struct e1000_hw *hw); | 126 | static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw); |
127 | static int32_t e1000_set_phy_mode(struct e1000_hw *hw); | 127 | static s32 e1000_set_phy_mode(struct e1000_hw *hw); |
128 | static int32_t e1000_host_if_read_cookie(struct e1000_hw *hw, uint8_t *buffer); | 128 | static s32 e1000_host_if_read_cookie(struct e1000_hw *hw, u8 *buffer); |
129 | static uint8_t e1000_calculate_mng_checksum(char *buffer, uint32_t length); | 129 | static u8 e1000_calculate_mng_checksum(char *buffer, u32 length); |
130 | static int32_t e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, | 130 | static s32 e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, |
131 | uint16_t duplex); | 131 | u16 duplex); |
132 | static int32_t e1000_configure_kmrn_for_1000(struct e1000_hw *hw); | 132 | static s32 e1000_configure_kmrn_for_1000(struct e1000_hw *hw); |
133 | 133 | ||
134 | /* IGP cable length table */ | 134 | /* IGP cable length table */ |
135 | static const | 135 | static const |
136 | uint16_t e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] = | 136 | u16 e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] = |
137 | { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, | 137 | { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, |
138 | 5, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 25, 25, 25, | 138 | 5, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 25, 25, 25, |
139 | 25, 25, 25, 25, 30, 30, 30, 30, 40, 40, 40, 40, 40, 40, 40, 40, | 139 | 25, 25, 25, 25, 30, 30, 30, 30, 40, 40, 40, 40, 40, 40, 40, 40, |
@@ -144,7 +144,7 @@ uint16_t e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] = | |||
144 | 110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120}; | 144 | 110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120}; |
145 | 145 | ||
146 | static const | 146 | static const |
147 | uint16_t e1000_igp_2_cable_length_table[IGP02E1000_AGC_LENGTH_TABLE_SIZE] = | 147 | u16 e1000_igp_2_cable_length_table[IGP02E1000_AGC_LENGTH_TABLE_SIZE] = |
148 | { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, | 148 | { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, |
149 | 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, | 149 | 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, |
150 | 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, | 150 | 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, |
@@ -159,7 +159,7 @@ uint16_t e1000_igp_2_cable_length_table[IGP02E1000_AGC_LENGTH_TABLE_SIZE] = | |||
159 | * | 159 | * |
160 | * hw - Struct containing variables accessed by shared code | 160 | * hw - Struct containing variables accessed by shared code |
161 | *****************************************************************************/ | 161 | *****************************************************************************/ |
162 | static int32_t | 162 | static s32 |
163 | e1000_set_phy_type(struct e1000_hw *hw) | 163 | e1000_set_phy_type(struct e1000_hw *hw) |
164 | { | 164 | { |
165 | DEBUGFUNC("e1000_set_phy_type"); | 165 | DEBUGFUNC("e1000_set_phy_type"); |
@@ -213,8 +213,8 @@ e1000_set_phy_type(struct e1000_hw *hw) | |||
213 | static void | 213 | static void |
214 | e1000_phy_init_script(struct e1000_hw *hw) | 214 | e1000_phy_init_script(struct e1000_hw *hw) |
215 | { | 215 | { |
216 | uint32_t ret_val; | 216 | u32 ret_val; |
217 | uint16_t phy_saved_data; | 217 | u16 phy_saved_data; |
218 | 218 | ||
219 | DEBUGFUNC("e1000_phy_init_script"); | 219 | DEBUGFUNC("e1000_phy_init_script"); |
220 | 220 | ||
@@ -272,7 +272,7 @@ e1000_phy_init_script(struct e1000_hw *hw) | |||
272 | e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); | 272 | e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); |
273 | 273 | ||
274 | if (hw->mac_type == e1000_82547) { | 274 | if (hw->mac_type == e1000_82547) { |
275 | uint16_t fused, fine, coarse; | 275 | u16 fused, fine, coarse; |
276 | 276 | ||
277 | /* Move to analog registers page */ | 277 | /* Move to analog registers page */ |
278 | e1000_read_phy_reg(hw, IGP01E1000_ANALOG_SPARE_FUSE_STATUS, &fused); | 278 | e1000_read_phy_reg(hw, IGP01E1000_ANALOG_SPARE_FUSE_STATUS, &fused); |
@@ -306,7 +306,7 @@ e1000_phy_init_script(struct e1000_hw *hw) | |||
306 | * | 306 | * |
307 | * hw - Struct containing variables accessed by shared code | 307 | * hw - Struct containing variables accessed by shared code |
308 | *****************************************************************************/ | 308 | *****************************************************************************/ |
309 | int32_t | 309 | s32 |
310 | e1000_set_mac_type(struct e1000_hw *hw) | 310 | e1000_set_mac_type(struct e1000_hw *hw) |
311 | { | 311 | { |
312 | DEBUGFUNC("e1000_set_mac_type"); | 312 | DEBUGFUNC("e1000_set_mac_type"); |
@@ -477,7 +477,7 @@ e1000_set_mac_type(struct e1000_hw *hw) | |||
477 | void | 477 | void |
478 | e1000_set_media_type(struct e1000_hw *hw) | 478 | e1000_set_media_type(struct e1000_hw *hw) |
479 | { | 479 | { |
480 | uint32_t status; | 480 | u32 status; |
481 | 481 | ||
482 | DEBUGFUNC("e1000_set_media_type"); | 482 | DEBUGFUNC("e1000_set_media_type"); |
483 | 483 | ||
@@ -528,17 +528,17 @@ e1000_set_media_type(struct e1000_hw *hw) | |||
528 | * | 528 | * |
529 | * hw - Struct containing variables accessed by shared code | 529 | * hw - Struct containing variables accessed by shared code |
530 | *****************************************************************************/ | 530 | *****************************************************************************/ |
531 | int32_t | 531 | s32 |
532 | e1000_reset_hw(struct e1000_hw *hw) | 532 | e1000_reset_hw(struct e1000_hw *hw) |
533 | { | 533 | { |
534 | uint32_t ctrl; | 534 | u32 ctrl; |
535 | uint32_t ctrl_ext; | 535 | u32 ctrl_ext; |
536 | uint32_t icr; | 536 | u32 icr; |
537 | uint32_t manc; | 537 | u32 manc; |
538 | uint32_t led_ctrl; | 538 | u32 led_ctrl; |
539 | uint32_t timeout; | 539 | u32 timeout; |
540 | uint32_t extcnf_ctrl; | 540 | u32 extcnf_ctrl; |
541 | int32_t ret_val; | 541 | s32 ret_val; |
542 | 542 | ||
543 | DEBUGFUNC("e1000_reset_hw"); | 543 | DEBUGFUNC("e1000_reset_hw"); |
544 | 544 | ||
@@ -730,7 +730,7 @@ e1000_reset_hw(struct e1000_hw *hw) | |||
730 | } | 730 | } |
731 | 731 | ||
732 | if (hw->mac_type == e1000_ich8lan) { | 732 | if (hw->mac_type == e1000_ich8lan) { |
733 | uint32_t kab = E1000_READ_REG(hw, KABGTXD); | 733 | u32 kab = E1000_READ_REG(hw, KABGTXD); |
734 | kab |= E1000_KABGTXD_BGSQLBIAS; | 734 | kab |= E1000_KABGTXD_BGSQLBIAS; |
735 | E1000_WRITE_REG(hw, KABGTXD, kab); | 735 | E1000_WRITE_REG(hw, KABGTXD, kab); |
736 | } | 736 | } |
@@ -752,10 +752,10 @@ e1000_initialize_hardware_bits(struct e1000_hw *hw) | |||
752 | { | 752 | { |
753 | if ((hw->mac_type >= e1000_82571) && (!hw->initialize_hw_bits_disable)) { | 753 | if ((hw->mac_type >= e1000_82571) && (!hw->initialize_hw_bits_disable)) { |
754 | /* Settings common to all PCI-express silicon */ | 754 | /* Settings common to all PCI-express silicon */ |
755 | uint32_t reg_ctrl, reg_ctrl_ext; | 755 | u32 reg_ctrl, reg_ctrl_ext; |
756 | uint32_t reg_tarc0, reg_tarc1; | 756 | u32 reg_tarc0, reg_tarc1; |
757 | uint32_t reg_tctl; | 757 | u32 reg_tctl; |
758 | uint32_t reg_txdctl, reg_txdctl1; | 758 | u32 reg_txdctl, reg_txdctl1; |
759 | 759 | ||
760 | /* link autonegotiation/sync workarounds */ | 760 | /* link autonegotiation/sync workarounds */ |
761 | reg_tarc0 = E1000_READ_REG(hw, TARC0); | 761 | reg_tarc0 = E1000_READ_REG(hw, TARC0); |
@@ -866,15 +866,15 @@ e1000_initialize_hardware_bits(struct e1000_hw *hw) | |||
866 | * configuration and flow control settings. Clears all on-chip counters. Leaves | 866 | * configuration and flow control settings. Clears all on-chip counters. Leaves |
867 | * the transmit and receive units disabled and uninitialized. | 867 | * the transmit and receive units disabled and uninitialized. |
868 | *****************************************************************************/ | 868 | *****************************************************************************/ |
869 | int32_t | 869 | s32 |
870 | e1000_init_hw(struct e1000_hw *hw) | 870 | e1000_init_hw(struct e1000_hw *hw) |
871 | { | 871 | { |
872 | uint32_t ctrl; | 872 | u32 ctrl; |
873 | uint32_t i; | 873 | u32 i; |
874 | int32_t ret_val; | 874 | s32 ret_val; |
875 | uint32_t mta_size; | 875 | u32 mta_size; |
876 | uint32_t reg_data; | 876 | u32 reg_data; |
877 | uint32_t ctrl_ext; | 877 | u32 ctrl_ext; |
878 | 878 | ||
879 | DEBUGFUNC("e1000_init_hw"); | 879 | DEBUGFUNC("e1000_init_hw"); |
880 | 880 | ||
@@ -1020,7 +1020,7 @@ e1000_init_hw(struct e1000_hw *hw) | |||
1020 | 1020 | ||
1021 | 1021 | ||
1022 | if (hw->mac_type == e1000_82573) { | 1022 | if (hw->mac_type == e1000_82573) { |
1023 | uint32_t gcr = E1000_READ_REG(hw, GCR); | 1023 | u32 gcr = E1000_READ_REG(hw, GCR); |
1024 | gcr |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX; | 1024 | gcr |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX; |
1025 | E1000_WRITE_REG(hw, GCR, gcr); | 1025 | E1000_WRITE_REG(hw, GCR, gcr); |
1026 | } | 1026 | } |
@@ -1054,11 +1054,11 @@ e1000_init_hw(struct e1000_hw *hw) | |||
1054 | * | 1054 | * |
1055 | * hw - Struct containing variables accessed by shared code. | 1055 | * hw - Struct containing variables accessed by shared code. |
1056 | *****************************************************************************/ | 1056 | *****************************************************************************/ |
1057 | static int32_t | 1057 | static s32 |
1058 | e1000_adjust_serdes_amplitude(struct e1000_hw *hw) | 1058 | e1000_adjust_serdes_amplitude(struct e1000_hw *hw) |
1059 | { | 1059 | { |
1060 | uint16_t eeprom_data; | 1060 | u16 eeprom_data; |
1061 | int32_t ret_val; | 1061 | s32 ret_val; |
1062 | 1062 | ||
1063 | DEBUGFUNC("e1000_adjust_serdes_amplitude"); | 1063 | DEBUGFUNC("e1000_adjust_serdes_amplitude"); |
1064 | 1064 | ||
@@ -1100,12 +1100,12 @@ e1000_adjust_serdes_amplitude(struct e1000_hw *hw) | |||
1100 | * established. Assumes the hardware has previously been reset and the | 1100 | * established. Assumes the hardware has previously been reset and the |
1101 | * transmitter and receiver are not enabled. | 1101 | * transmitter and receiver are not enabled. |
1102 | *****************************************************************************/ | 1102 | *****************************************************************************/ |
1103 | int32_t | 1103 | s32 |
1104 | e1000_setup_link(struct e1000_hw *hw) | 1104 | e1000_setup_link(struct e1000_hw *hw) |
1105 | { | 1105 | { |
1106 | uint32_t ctrl_ext; | 1106 | u32 ctrl_ext; |
1107 | int32_t ret_val; | 1107 | s32 ret_val; |
1108 | uint16_t eeprom_data; | 1108 | u16 eeprom_data; |
1109 | 1109 | ||
1110 | DEBUGFUNC("e1000_setup_link"); | 1110 | DEBUGFUNC("e1000_setup_link"); |
1111 | 1111 | ||
@@ -1233,15 +1233,15 @@ e1000_setup_link(struct e1000_hw *hw) | |||
1233 | * link. Assumes the hardware has been previously reset and the transmitter | 1233 | * link. Assumes the hardware has been previously reset and the transmitter |
1234 | * and receiver are not enabled. | 1234 | * and receiver are not enabled. |
1235 | *****************************************************************************/ | 1235 | *****************************************************************************/ |
1236 | static int32_t | 1236 | static s32 |
1237 | e1000_setup_fiber_serdes_link(struct e1000_hw *hw) | 1237 | e1000_setup_fiber_serdes_link(struct e1000_hw *hw) |
1238 | { | 1238 | { |
1239 | uint32_t ctrl; | 1239 | u32 ctrl; |
1240 | uint32_t status; | 1240 | u32 status; |
1241 | uint32_t txcw = 0; | 1241 | u32 txcw = 0; |
1242 | uint32_t i; | 1242 | u32 i; |
1243 | uint32_t signal = 0; | 1243 | u32 signal = 0; |
1244 | int32_t ret_val; | 1244 | s32 ret_val; |
1245 | 1245 | ||
1246 | DEBUGFUNC("e1000_setup_fiber_serdes_link"); | 1246 | DEBUGFUNC("e1000_setup_fiber_serdes_link"); |
1247 | 1247 | ||
@@ -1380,12 +1380,12 @@ e1000_setup_fiber_serdes_link(struct e1000_hw *hw) | |||
1380 | * | 1380 | * |
1381 | * hw - Struct containing variables accessed by shared code | 1381 | * hw - Struct containing variables accessed by shared code |
1382 | ******************************************************************************/ | 1382 | ******************************************************************************/ |
1383 | static int32_t | 1383 | static s32 |
1384 | e1000_copper_link_preconfig(struct e1000_hw *hw) | 1384 | e1000_copper_link_preconfig(struct e1000_hw *hw) |
1385 | { | 1385 | { |
1386 | uint32_t ctrl; | 1386 | u32 ctrl; |
1387 | int32_t ret_val; | 1387 | s32 ret_val; |
1388 | uint16_t phy_data; | 1388 | u16 phy_data; |
1389 | 1389 | ||
1390 | DEBUGFUNC("e1000_copper_link_preconfig"); | 1390 | DEBUGFUNC("e1000_copper_link_preconfig"); |
1391 | 1391 | ||
@@ -1440,12 +1440,12 @@ e1000_copper_link_preconfig(struct e1000_hw *hw) | |||
1440 | * | 1440 | * |
1441 | * hw - Struct containing variables accessed by shared code | 1441 | * hw - Struct containing variables accessed by shared code |
1442 | *********************************************************************/ | 1442 | *********************************************************************/ |
1443 | static int32_t | 1443 | static s32 |
1444 | e1000_copper_link_igp_setup(struct e1000_hw *hw) | 1444 | e1000_copper_link_igp_setup(struct e1000_hw *hw) |
1445 | { | 1445 | { |
1446 | uint32_t led_ctrl; | 1446 | u32 led_ctrl; |
1447 | int32_t ret_val; | 1447 | s32 ret_val; |
1448 | uint16_t phy_data; | 1448 | u16 phy_data; |
1449 | 1449 | ||
1450 | DEBUGFUNC("e1000_copper_link_igp_setup"); | 1450 | DEBUGFUNC("e1000_copper_link_igp_setup"); |
1451 | 1451 | ||
@@ -1587,12 +1587,12 @@ e1000_copper_link_igp_setup(struct e1000_hw *hw) | |||
1587 | * | 1587 | * |
1588 | * hw - Struct containing variables accessed by shared code | 1588 | * hw - Struct containing variables accessed by shared code |
1589 | *********************************************************************/ | 1589 | *********************************************************************/ |
1590 | static int32_t | 1590 | static s32 |
1591 | e1000_copper_link_ggp_setup(struct e1000_hw *hw) | 1591 | e1000_copper_link_ggp_setup(struct e1000_hw *hw) |
1592 | { | 1592 | { |
1593 | int32_t ret_val; | 1593 | s32 ret_val; |
1594 | uint16_t phy_data; | 1594 | u16 phy_data; |
1595 | uint32_t reg_data; | 1595 | u32 reg_data; |
1596 | 1596 | ||
1597 | DEBUGFUNC("e1000_copper_link_ggp_setup"); | 1597 | DEBUGFUNC("e1000_copper_link_ggp_setup"); |
1598 | 1598 | ||
@@ -1735,11 +1735,11 @@ e1000_copper_link_ggp_setup(struct e1000_hw *hw) | |||
1735 | * | 1735 | * |
1736 | * hw - Struct containing variables accessed by shared code | 1736 | * hw - Struct containing variables accessed by shared code |
1737 | *********************************************************************/ | 1737 | *********************************************************************/ |
1738 | static int32_t | 1738 | static s32 |
1739 | e1000_copper_link_mgp_setup(struct e1000_hw *hw) | 1739 | e1000_copper_link_mgp_setup(struct e1000_hw *hw) |
1740 | { | 1740 | { |
1741 | int32_t ret_val; | 1741 | s32 ret_val; |
1742 | uint16_t phy_data; | 1742 | u16 phy_data; |
1743 | 1743 | ||
1744 | DEBUGFUNC("e1000_copper_link_mgp_setup"); | 1744 | DEBUGFUNC("e1000_copper_link_mgp_setup"); |
1745 | 1745 | ||
@@ -1839,11 +1839,11 @@ e1000_copper_link_mgp_setup(struct e1000_hw *hw) | |||
1839 | * | 1839 | * |
1840 | * hw - Struct containing variables accessed by shared code | 1840 | * hw - Struct containing variables accessed by shared code |
1841 | *********************************************************************/ | 1841 | *********************************************************************/ |
1842 | static int32_t | 1842 | static s32 |
1843 | e1000_copper_link_autoneg(struct e1000_hw *hw) | 1843 | e1000_copper_link_autoneg(struct e1000_hw *hw) |
1844 | { | 1844 | { |
1845 | int32_t ret_val; | 1845 | s32 ret_val; |
1846 | uint16_t phy_data; | 1846 | u16 phy_data; |
1847 | 1847 | ||
1848 | DEBUGFUNC("e1000_copper_link_autoneg"); | 1848 | DEBUGFUNC("e1000_copper_link_autoneg"); |
1849 | 1849 | ||
@@ -1910,10 +1910,10 @@ e1000_copper_link_autoneg(struct e1000_hw *hw) | |||
1910 | * | 1910 | * |
1911 | * hw - Struct containing variables accessed by shared code | 1911 | * hw - Struct containing variables accessed by shared code |
1912 | ******************************************************************************/ | 1912 | ******************************************************************************/ |
1913 | static int32_t | 1913 | static s32 |
1914 | e1000_copper_link_postconfig(struct e1000_hw *hw) | 1914 | e1000_copper_link_postconfig(struct e1000_hw *hw) |
1915 | { | 1915 | { |
1916 | int32_t ret_val; | 1916 | s32 ret_val; |
1917 | DEBUGFUNC("e1000_copper_link_postconfig"); | 1917 | DEBUGFUNC("e1000_copper_link_postconfig"); |
1918 | 1918 | ||
1919 | if (hw->mac_type >= e1000_82544) { | 1919 | if (hw->mac_type >= e1000_82544) { |
@@ -1948,13 +1948,13 @@ e1000_copper_link_postconfig(struct e1000_hw *hw) | |||
1948 | * | 1948 | * |
1949 | * hw - Struct containing variables accessed by shared code | 1949 | * hw - Struct containing variables accessed by shared code |
1950 | ******************************************************************************/ | 1950 | ******************************************************************************/ |
1951 | static int32_t | 1951 | static s32 |
1952 | e1000_setup_copper_link(struct e1000_hw *hw) | 1952 | e1000_setup_copper_link(struct e1000_hw *hw) |
1953 | { | 1953 | { |
1954 | int32_t ret_val; | 1954 | s32 ret_val; |
1955 | uint16_t i; | 1955 | u16 i; |
1956 | uint16_t phy_data; | 1956 | u16 phy_data; |
1957 | uint16_t reg_data; | 1957 | u16 reg_data; |
1958 | 1958 | ||
1959 | DEBUGFUNC("e1000_setup_copper_link"); | 1959 | DEBUGFUNC("e1000_setup_copper_link"); |
1960 | 1960 | ||
@@ -2062,12 +2062,12 @@ e1000_setup_copper_link(struct e1000_hw *hw) | |||
2062 | * | 2062 | * |
2063 | * hw - Struct containing variables accessed by shared code | 2063 | * hw - Struct containing variables accessed by shared code |
2064 | ******************************************************************************/ | 2064 | ******************************************************************************/ |
2065 | static int32_t | 2065 | static s32 |
2066 | e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, uint16_t duplex) | 2066 | e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, u16 duplex) |
2067 | { | 2067 | { |
2068 | int32_t ret_val = E1000_SUCCESS; | 2068 | s32 ret_val = E1000_SUCCESS; |
2069 | uint32_t tipg; | 2069 | u32 tipg; |
2070 | uint16_t reg_data; | 2070 | u16 reg_data; |
2071 | 2071 | ||
2072 | DEBUGFUNC("e1000_configure_kmrn_for_10_100"); | 2072 | DEBUGFUNC("e1000_configure_kmrn_for_10_100"); |
2073 | 2073 | ||
@@ -2098,12 +2098,12 @@ e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, uint16_t duplex) | |||
2098 | return ret_val; | 2098 | return ret_val; |
2099 | } | 2099 | } |
2100 | 2100 | ||
2101 | static int32_t | 2101 | static s32 |
2102 | e1000_configure_kmrn_for_1000(struct e1000_hw *hw) | 2102 | e1000_configure_kmrn_for_1000(struct e1000_hw *hw) |
2103 | { | 2103 | { |
2104 | int32_t ret_val = E1000_SUCCESS; | 2104 | s32 ret_val = E1000_SUCCESS; |
2105 | uint16_t reg_data; | 2105 | u16 reg_data; |
2106 | uint32_t tipg; | 2106 | u32 tipg; |
2107 | 2107 | ||
2108 | DEBUGFUNC("e1000_configure_kmrn_for_1000"); | 2108 | DEBUGFUNC("e1000_configure_kmrn_for_1000"); |
2109 | 2109 | ||
@@ -2135,12 +2135,12 @@ e1000_configure_kmrn_for_1000(struct e1000_hw *hw) | |||
2135 | * | 2135 | * |
2136 | * hw - Struct containing variables accessed by shared code | 2136 | * hw - Struct containing variables accessed by shared code |
2137 | ******************************************************************************/ | 2137 | ******************************************************************************/ |
2138 | int32_t | 2138 | s32 |
2139 | e1000_phy_setup_autoneg(struct e1000_hw *hw) | 2139 | e1000_phy_setup_autoneg(struct e1000_hw *hw) |
2140 | { | 2140 | { |
2141 | int32_t ret_val; | 2141 | s32 ret_val; |
2142 | uint16_t mii_autoneg_adv_reg; | 2142 | u16 mii_autoneg_adv_reg; |
2143 | uint16_t mii_1000t_ctrl_reg; | 2143 | u16 mii_1000t_ctrl_reg; |
2144 | 2144 | ||
2145 | DEBUGFUNC("e1000_phy_setup_autoneg"); | 2145 | DEBUGFUNC("e1000_phy_setup_autoneg"); |
2146 | 2146 | ||
@@ -2284,15 +2284,15 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw) | |||
2284 | * | 2284 | * |
2285 | * hw - Struct containing variables accessed by shared code | 2285 | * hw - Struct containing variables accessed by shared code |
2286 | ******************************************************************************/ | 2286 | ******************************************************************************/ |
2287 | static int32_t | 2287 | static s32 |
2288 | e1000_phy_force_speed_duplex(struct e1000_hw *hw) | 2288 | e1000_phy_force_speed_duplex(struct e1000_hw *hw) |
2289 | { | 2289 | { |
2290 | uint32_t ctrl; | 2290 | u32 ctrl; |
2291 | int32_t ret_val; | 2291 | s32 ret_val; |
2292 | uint16_t mii_ctrl_reg; | 2292 | u16 mii_ctrl_reg; |
2293 | uint16_t mii_status_reg; | 2293 | u16 mii_status_reg; |
2294 | uint16_t phy_data; | 2294 | u16 phy_data; |
2295 | uint16_t i; | 2295 | u16 i; |
2296 | 2296 | ||
2297 | DEBUGFUNC("e1000_phy_force_speed_duplex"); | 2297 | DEBUGFUNC("e1000_phy_force_speed_duplex"); |
2298 | 2298 | ||
@@ -2538,7 +2538,7 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw) | |||
2538 | void | 2538 | void |
2539 | e1000_config_collision_dist(struct e1000_hw *hw) | 2539 | e1000_config_collision_dist(struct e1000_hw *hw) |
2540 | { | 2540 | { |
2541 | uint32_t tctl, coll_dist; | 2541 | u32 tctl, coll_dist; |
2542 | 2542 | ||
2543 | DEBUGFUNC("e1000_config_collision_dist"); | 2543 | DEBUGFUNC("e1000_config_collision_dist"); |
2544 | 2544 | ||
@@ -2565,12 +2565,12 @@ e1000_config_collision_dist(struct e1000_hw *hw) | |||
2565 | * The contents of the PHY register containing the needed information need to | 2565 | * The contents of the PHY register containing the needed information need to |
2566 | * be passed in. | 2566 | * be passed in. |
2567 | ******************************************************************************/ | 2567 | ******************************************************************************/ |
2568 | static int32_t | 2568 | static s32 |
2569 | e1000_config_mac_to_phy(struct e1000_hw *hw) | 2569 | e1000_config_mac_to_phy(struct e1000_hw *hw) |
2570 | { | 2570 | { |
2571 | uint32_t ctrl; | 2571 | u32 ctrl; |
2572 | int32_t ret_val; | 2572 | s32 ret_val; |
2573 | uint16_t phy_data; | 2573 | u16 phy_data; |
2574 | 2574 | ||
2575 | DEBUGFUNC("e1000_config_mac_to_phy"); | 2575 | DEBUGFUNC("e1000_config_mac_to_phy"); |
2576 | 2576 | ||
@@ -2624,10 +2624,10 @@ e1000_config_mac_to_phy(struct e1000_hw *hw) | |||
2624 | * by the PHY rather than the MAC. Software must also configure these | 2624 | * by the PHY rather than the MAC. Software must also configure these |
2625 | * bits when link is forced on a fiber connection. | 2625 | * bits when link is forced on a fiber connection. |
2626 | *****************************************************************************/ | 2626 | *****************************************************************************/ |
2627 | int32_t | 2627 | s32 |
2628 | e1000_force_mac_fc(struct e1000_hw *hw) | 2628 | e1000_force_mac_fc(struct e1000_hw *hw) |
2629 | { | 2629 | { |
2630 | uint32_t ctrl; | 2630 | u32 ctrl; |
2631 | 2631 | ||
2632 | DEBUGFUNC("e1000_force_mac_fc"); | 2632 | DEBUGFUNC("e1000_force_mac_fc"); |
2633 | 2633 | ||
@@ -2691,15 +2691,15 @@ e1000_force_mac_fc(struct e1000_hw *hw) | |||
2691 | * based on the flow control negotiated by the PHY. In TBI mode, the TFCE | 2691 | * based on the flow control negotiated by the PHY. In TBI mode, the TFCE |
2692 | * and RFCE bits will be automaticaly set to the negotiated flow control mode. | 2692 | * and RFCE bits will be automaticaly set to the negotiated flow control mode. |
2693 | *****************************************************************************/ | 2693 | *****************************************************************************/ |
2694 | static int32_t | 2694 | static s32 |
2695 | e1000_config_fc_after_link_up(struct e1000_hw *hw) | 2695 | e1000_config_fc_after_link_up(struct e1000_hw *hw) |
2696 | { | 2696 | { |
2697 | int32_t ret_val; | 2697 | s32 ret_val; |
2698 | uint16_t mii_status_reg; | 2698 | u16 mii_status_reg; |
2699 | uint16_t mii_nway_adv_reg; | 2699 | u16 mii_nway_adv_reg; |
2700 | uint16_t mii_nway_lp_ability_reg; | 2700 | u16 mii_nway_lp_ability_reg; |
2701 | uint16_t speed; | 2701 | u16 speed; |
2702 | uint16_t duplex; | 2702 | u16 duplex; |
2703 | 2703 | ||
2704 | DEBUGFUNC("e1000_config_fc_after_link_up"); | 2704 | DEBUGFUNC("e1000_config_fc_after_link_up"); |
2705 | 2705 | ||
@@ -2896,17 +2896,17 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw) | |||
2896 | * | 2896 | * |
2897 | * Called by any function that needs to check the link status of the adapter. | 2897 | * Called by any function that needs to check the link status of the adapter. |
2898 | *****************************************************************************/ | 2898 | *****************************************************************************/ |
2899 | int32_t | 2899 | s32 |
2900 | e1000_check_for_link(struct e1000_hw *hw) | 2900 | e1000_check_for_link(struct e1000_hw *hw) |
2901 | { | 2901 | { |
2902 | uint32_t rxcw = 0; | 2902 | u32 rxcw = 0; |
2903 | uint32_t ctrl; | 2903 | u32 ctrl; |
2904 | uint32_t status; | 2904 | u32 status; |
2905 | uint32_t rctl; | 2905 | u32 rctl; |
2906 | uint32_t icr; | 2906 | u32 icr; |
2907 | uint32_t signal = 0; | 2907 | u32 signal = 0; |
2908 | int32_t ret_val; | 2908 | s32 ret_val; |
2909 | uint16_t phy_data; | 2909 | u16 phy_data; |
2910 | 2910 | ||
2911 | DEBUGFUNC("e1000_check_for_link"); | 2911 | DEBUGFUNC("e1000_check_for_link"); |
2912 | 2912 | ||
@@ -3022,7 +3022,7 @@ e1000_check_for_link(struct e1000_hw *hw) | |||
3022 | * at gigabit speed, we turn on TBI compatibility. | 3022 | * at gigabit speed, we turn on TBI compatibility. |
3023 | */ | 3023 | */ |
3024 | if (hw->tbi_compatibility_en) { | 3024 | if (hw->tbi_compatibility_en) { |
3025 | uint16_t speed, duplex; | 3025 | u16 speed, duplex; |
3026 | ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex); | 3026 | ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex); |
3027 | if (ret_val) { | 3027 | if (ret_val) { |
3028 | DEBUGOUT("Error getting link speed and duplex\n"); | 3028 | DEBUGOUT("Error getting link speed and duplex\n"); |
@@ -3132,14 +3132,14 @@ e1000_check_for_link(struct e1000_hw *hw) | |||
3132 | * speed - Speed of the connection | 3132 | * speed - Speed of the connection |
3133 | * duplex - Duplex setting of the connection | 3133 | * duplex - Duplex setting of the connection |
3134 | *****************************************************************************/ | 3134 | *****************************************************************************/ |
3135 | int32_t | 3135 | s32 |
3136 | e1000_get_speed_and_duplex(struct e1000_hw *hw, | 3136 | e1000_get_speed_and_duplex(struct e1000_hw *hw, |
3137 | uint16_t *speed, | 3137 | u16 *speed, |
3138 | uint16_t *duplex) | 3138 | u16 *duplex) |
3139 | { | 3139 | { |
3140 | uint32_t status; | 3140 | u32 status; |
3141 | int32_t ret_val; | 3141 | s32 ret_val; |
3142 | uint16_t phy_data; | 3142 | u16 phy_data; |
3143 | 3143 | ||
3144 | DEBUGFUNC("e1000_get_speed_and_duplex"); | 3144 | DEBUGFUNC("e1000_get_speed_and_duplex"); |
3145 | 3145 | ||
@@ -3214,12 +3214,12 @@ e1000_get_speed_and_duplex(struct e1000_hw *hw, | |||
3214 | * | 3214 | * |
3215 | * hw - Struct containing variables accessed by shared code | 3215 | * hw - Struct containing variables accessed by shared code |
3216 | ******************************************************************************/ | 3216 | ******************************************************************************/ |
3217 | static int32_t | 3217 | static s32 |
3218 | e1000_wait_autoneg(struct e1000_hw *hw) | 3218 | e1000_wait_autoneg(struct e1000_hw *hw) |
3219 | { | 3219 | { |
3220 | int32_t ret_val; | 3220 | s32 ret_val; |
3221 | uint16_t i; | 3221 | u16 i; |
3222 | uint16_t phy_data; | 3222 | u16 phy_data; |
3223 | 3223 | ||
3224 | DEBUGFUNC("e1000_wait_autoneg"); | 3224 | DEBUGFUNC("e1000_wait_autoneg"); |
3225 | DEBUGOUT("Waiting for Auto-Neg to complete.\n"); | 3225 | DEBUGOUT("Waiting for Auto-Neg to complete.\n"); |
@@ -3251,7 +3251,7 @@ e1000_wait_autoneg(struct e1000_hw *hw) | |||
3251 | ******************************************************************************/ | 3251 | ******************************************************************************/ |
3252 | static void | 3252 | static void |
3253 | e1000_raise_mdi_clk(struct e1000_hw *hw, | 3253 | e1000_raise_mdi_clk(struct e1000_hw *hw, |
3254 | uint32_t *ctrl) | 3254 | u32 *ctrl) |
3255 | { | 3255 | { |
3256 | /* Raise the clock input to the Management Data Clock (by setting the MDC | 3256 | /* Raise the clock input to the Management Data Clock (by setting the MDC |
3257 | * bit), and then delay 10 microseconds. | 3257 | * bit), and then delay 10 microseconds. |
@@ -3269,7 +3269,7 @@ e1000_raise_mdi_clk(struct e1000_hw *hw, | |||
3269 | ******************************************************************************/ | 3269 | ******************************************************************************/ |
3270 | static void | 3270 | static void |
3271 | e1000_lower_mdi_clk(struct e1000_hw *hw, | 3271 | e1000_lower_mdi_clk(struct e1000_hw *hw, |
3272 | uint32_t *ctrl) | 3272 | u32 *ctrl) |
3273 | { | 3273 | { |
3274 | /* Lower the clock input to the Management Data Clock (by clearing the MDC | 3274 | /* Lower the clock input to the Management Data Clock (by clearing the MDC |
3275 | * bit), and then delay 10 microseconds. | 3275 | * bit), and then delay 10 microseconds. |
@@ -3290,11 +3290,11 @@ e1000_lower_mdi_clk(struct e1000_hw *hw, | |||
3290 | ******************************************************************************/ | 3290 | ******************************************************************************/ |
3291 | static void | 3291 | static void |
3292 | e1000_shift_out_mdi_bits(struct e1000_hw *hw, | 3292 | e1000_shift_out_mdi_bits(struct e1000_hw *hw, |
3293 | uint32_t data, | 3293 | u32 data, |
3294 | uint16_t count) | 3294 | u16 count) |
3295 | { | 3295 | { |
3296 | uint32_t ctrl; | 3296 | u32 ctrl; |
3297 | uint32_t mask; | 3297 | u32 mask; |
3298 | 3298 | ||
3299 | /* We need to shift "count" number of bits out to the PHY. So, the value | 3299 | /* We need to shift "count" number of bits out to the PHY. So, the value |
3300 | * in the "data" parameter will be shifted out to the PHY one bit at a | 3300 | * in the "data" parameter will be shifted out to the PHY one bit at a |
@@ -3338,12 +3338,12 @@ e1000_shift_out_mdi_bits(struct e1000_hw *hw, | |||
3338 | * | 3338 | * |
3339 | * Bits are shifted in in MSB to LSB order. | 3339 | * Bits are shifted in in MSB to LSB order. |
3340 | ******************************************************************************/ | 3340 | ******************************************************************************/ |
3341 | static uint16_t | 3341 | static u16 |
3342 | e1000_shift_in_mdi_bits(struct e1000_hw *hw) | 3342 | e1000_shift_in_mdi_bits(struct e1000_hw *hw) |
3343 | { | 3343 | { |
3344 | uint32_t ctrl; | 3344 | u32 ctrl; |
3345 | uint16_t data = 0; | 3345 | u16 data = 0; |
3346 | uint8_t i; | 3346 | u8 i; |
3347 | 3347 | ||
3348 | /* In order to read a register from the PHY, we need to shift in a total | 3348 | /* In order to read a register from the PHY, we need to shift in a total |
3349 | * of 18 bits from the PHY. The first two bit (turnaround) times are used | 3349 | * of 18 bits from the PHY. The first two bit (turnaround) times are used |
@@ -3384,13 +3384,13 @@ e1000_shift_in_mdi_bits(struct e1000_hw *hw) | |||
3384 | return data; | 3384 | return data; |
3385 | } | 3385 | } |
3386 | 3386 | ||
3387 | static int32_t | 3387 | static s32 |
3388 | e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask) | 3388 | e1000_swfw_sync_acquire(struct e1000_hw *hw, u16 mask) |
3389 | { | 3389 | { |
3390 | uint32_t swfw_sync = 0; | 3390 | u32 swfw_sync = 0; |
3391 | uint32_t swmask = mask; | 3391 | u32 swmask = mask; |
3392 | uint32_t fwmask = mask << 16; | 3392 | u32 fwmask = mask << 16; |
3393 | int32_t timeout = 200; | 3393 | s32 timeout = 200; |
3394 | 3394 | ||
3395 | DEBUGFUNC("e1000_swfw_sync_acquire"); | 3395 | DEBUGFUNC("e1000_swfw_sync_acquire"); |
3396 | 3396 | ||
@@ -3429,10 +3429,10 @@ e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask) | |||
3429 | } | 3429 | } |
3430 | 3430 | ||
3431 | static void | 3431 | static void |
3432 | e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask) | 3432 | e1000_swfw_sync_release(struct e1000_hw *hw, u16 mask) |
3433 | { | 3433 | { |
3434 | uint32_t swfw_sync; | 3434 | u32 swfw_sync; |
3435 | uint32_t swmask = mask; | 3435 | u32 swmask = mask; |
3436 | 3436 | ||
3437 | DEBUGFUNC("e1000_swfw_sync_release"); | 3437 | DEBUGFUNC("e1000_swfw_sync_release"); |
3438 | 3438 | ||
@@ -3464,13 +3464,13 @@ e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask) | |||
3464 | * hw - Struct containing variables accessed by shared code | 3464 | * hw - Struct containing variables accessed by shared code |
3465 | * reg_addr - address of the PHY register to read | 3465 | * reg_addr - address of the PHY register to read |
3466 | ******************************************************************************/ | 3466 | ******************************************************************************/ |
3467 | int32_t | 3467 | s32 |
3468 | e1000_read_phy_reg(struct e1000_hw *hw, | 3468 | e1000_read_phy_reg(struct e1000_hw *hw, |
3469 | uint32_t reg_addr, | 3469 | u32 reg_addr, |
3470 | uint16_t *phy_data) | 3470 | u16 *phy_data) |
3471 | { | 3471 | { |
3472 | uint32_t ret_val; | 3472 | u32 ret_val; |
3473 | uint16_t swfw; | 3473 | u16 swfw; |
3474 | 3474 | ||
3475 | DEBUGFUNC("e1000_read_phy_reg"); | 3475 | DEBUGFUNC("e1000_read_phy_reg"); |
3476 | 3476 | ||
@@ -3488,7 +3488,7 @@ e1000_read_phy_reg(struct e1000_hw *hw, | |||
3488 | hw->phy_type == e1000_phy_igp_2) && | 3488 | hw->phy_type == e1000_phy_igp_2) && |
3489 | (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { | 3489 | (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { |
3490 | ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, | 3490 | ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, |
3491 | (uint16_t)reg_addr); | 3491 | (u16)reg_addr); |
3492 | if (ret_val) { | 3492 | if (ret_val) { |
3493 | e1000_swfw_sync_release(hw, swfw); | 3493 | e1000_swfw_sync_release(hw, swfw); |
3494 | return ret_val; | 3494 | return ret_val; |
@@ -3499,14 +3499,14 @@ e1000_read_phy_reg(struct e1000_hw *hw, | |||
3499 | /* Select Configuration Page */ | 3499 | /* Select Configuration Page */ |
3500 | if ((reg_addr & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { | 3500 | if ((reg_addr & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { |
3501 | ret_val = e1000_write_phy_reg_ex(hw, GG82563_PHY_PAGE_SELECT, | 3501 | ret_val = e1000_write_phy_reg_ex(hw, GG82563_PHY_PAGE_SELECT, |
3502 | (uint16_t)((uint16_t)reg_addr >> GG82563_PAGE_SHIFT)); | 3502 | (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT)); |
3503 | } else { | 3503 | } else { |
3504 | /* Use Alternative Page Select register to access | 3504 | /* Use Alternative Page Select register to access |
3505 | * registers 30 and 31 | 3505 | * registers 30 and 31 |
3506 | */ | 3506 | */ |
3507 | ret_val = e1000_write_phy_reg_ex(hw, | 3507 | ret_val = e1000_write_phy_reg_ex(hw, |
3508 | GG82563_PHY_PAGE_SELECT_ALT, | 3508 | GG82563_PHY_PAGE_SELECT_ALT, |
3509 | (uint16_t)((uint16_t)reg_addr >> GG82563_PAGE_SHIFT)); | 3509 | (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT)); |
3510 | } | 3510 | } |
3511 | 3511 | ||
3512 | if (ret_val) { | 3512 | if (ret_val) { |
@@ -3523,13 +3523,13 @@ e1000_read_phy_reg(struct e1000_hw *hw, | |||
3523 | return ret_val; | 3523 | return ret_val; |
3524 | } | 3524 | } |
3525 | 3525 | ||
3526 | static int32_t | 3526 | static s32 |
3527 | e1000_read_phy_reg_ex(struct e1000_hw *hw, uint32_t reg_addr, | 3527 | e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, |
3528 | uint16_t *phy_data) | 3528 | u16 *phy_data) |
3529 | { | 3529 | { |
3530 | uint32_t i; | 3530 | u32 i; |
3531 | uint32_t mdic = 0; | 3531 | u32 mdic = 0; |
3532 | const uint32_t phy_addr = 1; | 3532 | const u32 phy_addr = 1; |
3533 | 3533 | ||
3534 | DEBUGFUNC("e1000_read_phy_reg_ex"); | 3534 | DEBUGFUNC("e1000_read_phy_reg_ex"); |
3535 | 3535 | ||
@@ -3563,7 +3563,7 @@ e1000_read_phy_reg_ex(struct e1000_hw *hw, uint32_t reg_addr, | |||
3563 | DEBUGOUT("MDI Error\n"); | 3563 | DEBUGOUT("MDI Error\n"); |
3564 | return -E1000_ERR_PHY; | 3564 | return -E1000_ERR_PHY; |
3565 | } | 3565 | } |
3566 | *phy_data = (uint16_t) mdic; | 3566 | *phy_data = (u16) mdic; |
3567 | } else { | 3567 | } else { |
3568 | /* We must first send a preamble through the MDIO pin to signal the | 3568 | /* We must first send a preamble through the MDIO pin to signal the |
3569 | * beginning of an MII instruction. This is done by sending 32 | 3569 | * beginning of an MII instruction. This is done by sending 32 |
@@ -3603,12 +3603,12 @@ e1000_read_phy_reg_ex(struct e1000_hw *hw, uint32_t reg_addr, | |||
3603 | * reg_addr - address of the PHY register to write | 3603 | * reg_addr - address of the PHY register to write |
3604 | * data - data to write to the PHY | 3604 | * data - data to write to the PHY |
3605 | ******************************************************************************/ | 3605 | ******************************************************************************/ |
3606 | int32_t | 3606 | s32 |
3607 | e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, | 3607 | e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, |
3608 | uint16_t phy_data) | 3608 | u16 phy_data) |
3609 | { | 3609 | { |
3610 | uint32_t ret_val; | 3610 | u32 ret_val; |
3611 | uint16_t swfw; | 3611 | u16 swfw; |
3612 | 3612 | ||
3613 | DEBUGFUNC("e1000_write_phy_reg"); | 3613 | DEBUGFUNC("e1000_write_phy_reg"); |
3614 | 3614 | ||
@@ -3626,7 +3626,7 @@ e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, | |||
3626 | hw->phy_type == e1000_phy_igp_2) && | 3626 | hw->phy_type == e1000_phy_igp_2) && |
3627 | (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { | 3627 | (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { |
3628 | ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, | 3628 | ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, |
3629 | (uint16_t)reg_addr); | 3629 | (u16)reg_addr); |
3630 | if (ret_val) { | 3630 | if (ret_val) { |
3631 | e1000_swfw_sync_release(hw, swfw); | 3631 | e1000_swfw_sync_release(hw, swfw); |
3632 | return ret_val; | 3632 | return ret_val; |
@@ -3637,14 +3637,14 @@ e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, | |||
3637 | /* Select Configuration Page */ | 3637 | /* Select Configuration Page */ |
3638 | if ((reg_addr & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { | 3638 | if ((reg_addr & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { |
3639 | ret_val = e1000_write_phy_reg_ex(hw, GG82563_PHY_PAGE_SELECT, | 3639 | ret_val = e1000_write_phy_reg_ex(hw, GG82563_PHY_PAGE_SELECT, |
3640 | (uint16_t)((uint16_t)reg_addr >> GG82563_PAGE_SHIFT)); | 3640 | (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT)); |
3641 | } else { | 3641 | } else { |
3642 | /* Use Alternative Page Select register to access | 3642 | /* Use Alternative Page Select register to access |
3643 | * registers 30 and 31 | 3643 | * registers 30 and 31 |
3644 | */ | 3644 | */ |
3645 | ret_val = e1000_write_phy_reg_ex(hw, | 3645 | ret_val = e1000_write_phy_reg_ex(hw, |
3646 | GG82563_PHY_PAGE_SELECT_ALT, | 3646 | GG82563_PHY_PAGE_SELECT_ALT, |
3647 | (uint16_t)((uint16_t)reg_addr >> GG82563_PAGE_SHIFT)); | 3647 | (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT)); |
3648 | } | 3648 | } |
3649 | 3649 | ||
3650 | if (ret_val) { | 3650 | if (ret_val) { |
@@ -3661,13 +3661,13 @@ e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, | |||
3661 | return ret_val; | 3661 | return ret_val; |
3662 | } | 3662 | } |
3663 | 3663 | ||
3664 | static int32_t | 3664 | static s32 |
3665 | e1000_write_phy_reg_ex(struct e1000_hw *hw, uint32_t reg_addr, | 3665 | e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, |
3666 | uint16_t phy_data) | 3666 | u16 phy_data) |
3667 | { | 3667 | { |
3668 | uint32_t i; | 3668 | u32 i; |
3669 | uint32_t mdic = 0; | 3669 | u32 mdic = 0; |
3670 | const uint32_t phy_addr = 1; | 3670 | const u32 phy_addr = 1; |
3671 | 3671 | ||
3672 | DEBUGFUNC("e1000_write_phy_reg_ex"); | 3672 | DEBUGFUNC("e1000_write_phy_reg_ex"); |
3673 | 3673 | ||
@@ -3681,7 +3681,7 @@ e1000_write_phy_reg_ex(struct e1000_hw *hw, uint32_t reg_addr, | |||
3681 | * for the PHY register in the MDI Control register. The MAC will take | 3681 | * for the PHY register in the MDI Control register. The MAC will take |
3682 | * care of interfacing with the PHY to send the desired data. | 3682 | * care of interfacing with the PHY to send the desired data. |
3683 | */ | 3683 | */ |
3684 | mdic = (((uint32_t) phy_data) | | 3684 | mdic = (((u32) phy_data) | |
3685 | (reg_addr << E1000_MDIC_REG_SHIFT) | | 3685 | (reg_addr << E1000_MDIC_REG_SHIFT) | |
3686 | (phy_addr << E1000_MDIC_PHY_SHIFT) | | 3686 | (phy_addr << E1000_MDIC_PHY_SHIFT) | |
3687 | (E1000_MDIC_OP_WRITE)); | 3687 | (E1000_MDIC_OP_WRITE)); |
@@ -3715,7 +3715,7 @@ e1000_write_phy_reg_ex(struct e1000_hw *hw, uint32_t reg_addr, | |||
3715 | mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) | | 3715 | mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) | |
3716 | (PHY_OP_WRITE << 12) | (PHY_SOF << 14)); | 3716 | (PHY_OP_WRITE << 12) | (PHY_SOF << 14)); |
3717 | mdic <<= 16; | 3717 | mdic <<= 16; |
3718 | mdic |= (uint32_t) phy_data; | 3718 | mdic |= (u32) phy_data; |
3719 | 3719 | ||
3720 | e1000_shift_out_mdi_bits(hw, mdic, 32); | 3720 | e1000_shift_out_mdi_bits(hw, mdic, 32); |
3721 | } | 3721 | } |
@@ -3723,13 +3723,13 @@ e1000_write_phy_reg_ex(struct e1000_hw *hw, uint32_t reg_addr, | |||
3723 | return E1000_SUCCESS; | 3723 | return E1000_SUCCESS; |
3724 | } | 3724 | } |
3725 | 3725 | ||
3726 | static int32_t | 3726 | static s32 |
3727 | e1000_read_kmrn_reg(struct e1000_hw *hw, | 3727 | e1000_read_kmrn_reg(struct e1000_hw *hw, |
3728 | uint32_t reg_addr, | 3728 | u32 reg_addr, |
3729 | uint16_t *data) | 3729 | u16 *data) |
3730 | { | 3730 | { |
3731 | uint32_t reg_val; | 3731 | u32 reg_val; |
3732 | uint16_t swfw; | 3732 | u16 swfw; |
3733 | DEBUGFUNC("e1000_read_kmrn_reg"); | 3733 | DEBUGFUNC("e1000_read_kmrn_reg"); |
3734 | 3734 | ||
3735 | if ((hw->mac_type == e1000_80003es2lan) && | 3735 | if ((hw->mac_type == e1000_80003es2lan) && |
@@ -3750,19 +3750,19 @@ e1000_read_kmrn_reg(struct e1000_hw *hw, | |||
3750 | 3750 | ||
3751 | /* Read the data returned */ | 3751 | /* Read the data returned */ |
3752 | reg_val = E1000_READ_REG(hw, KUMCTRLSTA); | 3752 | reg_val = E1000_READ_REG(hw, KUMCTRLSTA); |
3753 | *data = (uint16_t)reg_val; | 3753 | *data = (u16)reg_val; |
3754 | 3754 | ||
3755 | e1000_swfw_sync_release(hw, swfw); | 3755 | e1000_swfw_sync_release(hw, swfw); |
3756 | return E1000_SUCCESS; | 3756 | return E1000_SUCCESS; |
3757 | } | 3757 | } |
3758 | 3758 | ||
3759 | static int32_t | 3759 | static s32 |
3760 | e1000_write_kmrn_reg(struct e1000_hw *hw, | 3760 | e1000_write_kmrn_reg(struct e1000_hw *hw, |
3761 | uint32_t reg_addr, | 3761 | u32 reg_addr, |
3762 | uint16_t data) | 3762 | u16 data) |
3763 | { | 3763 | { |
3764 | uint32_t reg_val; | 3764 | u32 reg_val; |
3765 | uint16_t swfw; | 3765 | u16 swfw; |
3766 | DEBUGFUNC("e1000_write_kmrn_reg"); | 3766 | DEBUGFUNC("e1000_write_kmrn_reg"); |
3767 | 3767 | ||
3768 | if ((hw->mac_type == e1000_80003es2lan) && | 3768 | if ((hw->mac_type == e1000_80003es2lan) && |
@@ -3788,13 +3788,13 @@ e1000_write_kmrn_reg(struct e1000_hw *hw, | |||
3788 | * | 3788 | * |
3789 | * hw - Struct containing variables accessed by shared code | 3789 | * hw - Struct containing variables accessed by shared code |
3790 | ******************************************************************************/ | 3790 | ******************************************************************************/ |
3791 | int32_t | 3791 | s32 |
3792 | e1000_phy_hw_reset(struct e1000_hw *hw) | 3792 | e1000_phy_hw_reset(struct e1000_hw *hw) |
3793 | { | 3793 | { |
3794 | uint32_t ctrl, ctrl_ext; | 3794 | u32 ctrl, ctrl_ext; |
3795 | uint32_t led_ctrl; | 3795 | u32 led_ctrl; |
3796 | int32_t ret_val; | 3796 | s32 ret_val; |
3797 | uint16_t swfw; | 3797 | u16 swfw; |
3798 | 3798 | ||
3799 | DEBUGFUNC("e1000_phy_hw_reset"); | 3799 | DEBUGFUNC("e1000_phy_hw_reset"); |
3800 | 3800 | ||
@@ -3882,11 +3882,11 @@ e1000_phy_hw_reset(struct e1000_hw *hw) | |||
3882 | * | 3882 | * |
3883 | * Sets bit 15 of the MII Control register | 3883 | * Sets bit 15 of the MII Control register |
3884 | ******************************************************************************/ | 3884 | ******************************************************************************/ |
3885 | int32_t | 3885 | s32 |
3886 | e1000_phy_reset(struct e1000_hw *hw) | 3886 | e1000_phy_reset(struct e1000_hw *hw) |
3887 | { | 3887 | { |
3888 | int32_t ret_val; | 3888 | s32 ret_val; |
3889 | uint16_t phy_data; | 3889 | u16 phy_data; |
3890 | 3890 | ||
3891 | DEBUGFUNC("e1000_phy_reset"); | 3891 | DEBUGFUNC("e1000_phy_reset"); |
3892 | 3892 | ||
@@ -3937,9 +3937,9 @@ e1000_phy_reset(struct e1000_hw *hw) | |||
3937 | void | 3937 | void |
3938 | e1000_phy_powerdown_workaround(struct e1000_hw *hw) | 3938 | e1000_phy_powerdown_workaround(struct e1000_hw *hw) |
3939 | { | 3939 | { |
3940 | int32_t reg; | 3940 | s32 reg; |
3941 | uint16_t phy_data; | 3941 | u16 phy_data; |
3942 | int32_t retry = 0; | 3942 | s32 retry = 0; |
3943 | 3943 | ||
3944 | DEBUGFUNC("e1000_phy_powerdown_workaround"); | 3944 | DEBUGFUNC("e1000_phy_powerdown_workaround"); |
3945 | 3945 | ||
@@ -3987,13 +3987,13 @@ e1000_phy_powerdown_workaround(struct e1000_hw *hw) | |||
3987 | * | 3987 | * |
3988 | * hw - struct containing variables accessed by shared code | 3988 | * hw - struct containing variables accessed by shared code |
3989 | ******************************************************************************/ | 3989 | ******************************************************************************/ |
3990 | static int32_t | 3990 | static s32 |
3991 | e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw) | 3991 | e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw) |
3992 | { | 3992 | { |
3993 | int32_t ret_val; | 3993 | s32 ret_val; |
3994 | int32_t reg; | 3994 | s32 reg; |
3995 | int32_t cnt; | 3995 | s32 cnt; |
3996 | uint16_t phy_data; | 3996 | u16 phy_data; |
3997 | 3997 | ||
3998 | if (hw->kmrn_lock_loss_workaround_disabled) | 3998 | if (hw->kmrn_lock_loss_workaround_disabled) |
3999 | return E1000_SUCCESS; | 3999 | return E1000_SUCCESS; |
@@ -4040,11 +4040,11 @@ e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw) | |||
4040 | * | 4040 | * |
4041 | * hw - Struct containing variables accessed by shared code | 4041 | * hw - Struct containing variables accessed by shared code |
4042 | ******************************************************************************/ | 4042 | ******************************************************************************/ |
4043 | static int32_t | 4043 | static s32 |
4044 | e1000_detect_gig_phy(struct e1000_hw *hw) | 4044 | e1000_detect_gig_phy(struct e1000_hw *hw) |
4045 | { | 4045 | { |
4046 | int32_t phy_init_status, ret_val; | 4046 | s32 phy_init_status, ret_val; |
4047 | uint16_t phy_id_high, phy_id_low; | 4047 | u16 phy_id_high, phy_id_low; |
4048 | bool match = false; | 4048 | bool match = false; |
4049 | 4049 | ||
4050 | DEBUGFUNC("e1000_detect_gig_phy"); | 4050 | DEBUGFUNC("e1000_detect_gig_phy"); |
@@ -4076,14 +4076,14 @@ e1000_detect_gig_phy(struct e1000_hw *hw) | |||
4076 | if (ret_val) | 4076 | if (ret_val) |
4077 | return ret_val; | 4077 | return ret_val; |
4078 | 4078 | ||
4079 | hw->phy_id = (uint32_t) (phy_id_high << 16); | 4079 | hw->phy_id = (u32) (phy_id_high << 16); |
4080 | udelay(20); | 4080 | udelay(20); |
4081 | ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low); | 4081 | ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low); |
4082 | if (ret_val) | 4082 | if (ret_val) |
4083 | return ret_val; | 4083 | return ret_val; |
4084 | 4084 | ||
4085 | hw->phy_id |= (uint32_t) (phy_id_low & PHY_REVISION_MASK); | 4085 | hw->phy_id |= (u32) (phy_id_low & PHY_REVISION_MASK); |
4086 | hw->phy_revision = (uint32_t) phy_id_low & ~PHY_REVISION_MASK; | 4086 | hw->phy_revision = (u32) phy_id_low & ~PHY_REVISION_MASK; |
4087 | 4087 | ||
4088 | switch (hw->mac_type) { | 4088 | switch (hw->mac_type) { |
4089 | case e1000_82543: | 4089 | case e1000_82543: |
@@ -4136,10 +4136,10 @@ e1000_detect_gig_phy(struct e1000_hw *hw) | |||
4136 | * | 4136 | * |
4137 | * hw - Struct containing variables accessed by shared code | 4137 | * hw - Struct containing variables accessed by shared code |
4138 | ******************************************************************************/ | 4138 | ******************************************************************************/ |
4139 | static int32_t | 4139 | static s32 |
4140 | e1000_phy_reset_dsp(struct e1000_hw *hw) | 4140 | e1000_phy_reset_dsp(struct e1000_hw *hw) |
4141 | { | 4141 | { |
4142 | int32_t ret_val; | 4142 | s32 ret_val; |
4143 | DEBUGFUNC("e1000_phy_reset_dsp"); | 4143 | DEBUGFUNC("e1000_phy_reset_dsp"); |
4144 | 4144 | ||
4145 | do { | 4145 | do { |
@@ -4163,12 +4163,12 @@ e1000_phy_reset_dsp(struct e1000_hw *hw) | |||
4163 | * hw - Struct containing variables accessed by shared code | 4163 | * hw - Struct containing variables accessed by shared code |
4164 | * phy_info - PHY information structure | 4164 | * phy_info - PHY information structure |
4165 | ******************************************************************************/ | 4165 | ******************************************************************************/ |
4166 | static int32_t | 4166 | static s32 |
4167 | e1000_phy_igp_get_info(struct e1000_hw *hw, | 4167 | e1000_phy_igp_get_info(struct e1000_hw *hw, |
4168 | struct e1000_phy_info *phy_info) | 4168 | struct e1000_phy_info *phy_info) |
4169 | { | 4169 | { |
4170 | int32_t ret_val; | 4170 | s32 ret_val; |
4171 | uint16_t phy_data, min_length, max_length, average; | 4171 | u16 phy_data, min_length, max_length, average; |
4172 | e1000_rev_polarity polarity; | 4172 | e1000_rev_polarity polarity; |
4173 | 4173 | ||
4174 | DEBUGFUNC("e1000_phy_igp_get_info"); | 4174 | DEBUGFUNC("e1000_phy_igp_get_info"); |
@@ -4240,12 +4240,12 @@ e1000_phy_igp_get_info(struct e1000_hw *hw, | |||
4240 | * hw - Struct containing variables accessed by shared code | 4240 | * hw - Struct containing variables accessed by shared code |
4241 | * phy_info - PHY information structure | 4241 | * phy_info - PHY information structure |
4242 | ******************************************************************************/ | 4242 | ******************************************************************************/ |
4243 | static int32_t | 4243 | static s32 |
4244 | e1000_phy_ife_get_info(struct e1000_hw *hw, | 4244 | e1000_phy_ife_get_info(struct e1000_hw *hw, |
4245 | struct e1000_phy_info *phy_info) | 4245 | struct e1000_phy_info *phy_info) |
4246 | { | 4246 | { |
4247 | int32_t ret_val; | 4247 | s32 ret_val; |
4248 | uint16_t phy_data; | 4248 | u16 phy_data; |
4249 | e1000_rev_polarity polarity; | 4249 | e1000_rev_polarity polarity; |
4250 | 4250 | ||
4251 | DEBUGFUNC("e1000_phy_ife_get_info"); | 4251 | DEBUGFUNC("e1000_phy_ife_get_info"); |
@@ -4290,12 +4290,12 @@ e1000_phy_ife_get_info(struct e1000_hw *hw, | |||
4290 | * hw - Struct containing variables accessed by shared code | 4290 | * hw - Struct containing variables accessed by shared code |
4291 | * phy_info - PHY information structure | 4291 | * phy_info - PHY information structure |
4292 | ******************************************************************************/ | 4292 | ******************************************************************************/ |
4293 | static int32_t | 4293 | static s32 |
4294 | e1000_phy_m88_get_info(struct e1000_hw *hw, | 4294 | e1000_phy_m88_get_info(struct e1000_hw *hw, |
4295 | struct e1000_phy_info *phy_info) | 4295 | struct e1000_phy_info *phy_info) |
4296 | { | 4296 | { |
4297 | int32_t ret_val; | 4297 | s32 ret_val; |
4298 | uint16_t phy_data; | 4298 | u16 phy_data; |
4299 | e1000_rev_polarity polarity; | 4299 | e1000_rev_polarity polarity; |
4300 | 4300 | ||
4301 | DEBUGFUNC("e1000_phy_m88_get_info"); | 4301 | DEBUGFUNC("e1000_phy_m88_get_info"); |
@@ -4369,12 +4369,12 @@ e1000_phy_m88_get_info(struct e1000_hw *hw, | |||
4369 | * hw - Struct containing variables accessed by shared code | 4369 | * hw - Struct containing variables accessed by shared code |
4370 | * phy_info - PHY information structure | 4370 | * phy_info - PHY information structure |
4371 | ******************************************************************************/ | 4371 | ******************************************************************************/ |
4372 | int32_t | 4372 | s32 |
4373 | e1000_phy_get_info(struct e1000_hw *hw, | 4373 | e1000_phy_get_info(struct e1000_hw *hw, |
4374 | struct e1000_phy_info *phy_info) | 4374 | struct e1000_phy_info *phy_info) |
4375 | { | 4375 | { |
4376 | int32_t ret_val; | 4376 | s32 ret_val; |
4377 | uint16_t phy_data; | 4377 | u16 phy_data; |
4378 | 4378 | ||
4379 | DEBUGFUNC("e1000_phy_get_info"); | 4379 | DEBUGFUNC("e1000_phy_get_info"); |
4380 | 4380 | ||
@@ -4415,7 +4415,7 @@ e1000_phy_get_info(struct e1000_hw *hw, | |||
4415 | return e1000_phy_m88_get_info(hw, phy_info); | 4415 | return e1000_phy_m88_get_info(hw, phy_info); |
4416 | } | 4416 | } |
4417 | 4417 | ||
4418 | int32_t | 4418 | s32 |
4419 | e1000_validate_mdi_setting(struct e1000_hw *hw) | 4419 | e1000_validate_mdi_setting(struct e1000_hw *hw) |
4420 | { | 4420 | { |
4421 | DEBUGFUNC("e1000_validate_mdi_settings"); | 4421 | DEBUGFUNC("e1000_validate_mdi_settings"); |
@@ -4436,13 +4436,13 @@ e1000_validate_mdi_setting(struct e1000_hw *hw) | |||
4436 | * | 4436 | * |
4437 | * hw - Struct containing variables accessed by shared code | 4437 | * hw - Struct containing variables accessed by shared code |
4438 | *****************************************************************************/ | 4438 | *****************************************************************************/ |
4439 | int32_t | 4439 | s32 |
4440 | e1000_init_eeprom_params(struct e1000_hw *hw) | 4440 | e1000_init_eeprom_params(struct e1000_hw *hw) |
4441 | { | 4441 | { |
4442 | struct e1000_eeprom_info *eeprom = &hw->eeprom; | 4442 | struct e1000_eeprom_info *eeprom = &hw->eeprom; |
4443 | uint32_t eecd = E1000_READ_REG(hw, EECD); | 4443 | u32 eecd = E1000_READ_REG(hw, EECD); |
4444 | int32_t ret_val = E1000_SUCCESS; | 4444 | s32 ret_val = E1000_SUCCESS; |
4445 | uint16_t eeprom_size; | 4445 | u16 eeprom_size; |
4446 | 4446 | ||
4447 | DEBUGFUNC("e1000_init_eeprom_params"); | 4447 | DEBUGFUNC("e1000_init_eeprom_params"); |
4448 | 4448 | ||
@@ -4561,8 +4561,8 @@ e1000_init_eeprom_params(struct e1000_hw *hw) | |||
4561 | break; | 4561 | break; |
4562 | case e1000_ich8lan: | 4562 | case e1000_ich8lan: |
4563 | { | 4563 | { |
4564 | int32_t i = 0; | 4564 | s32 i = 0; |
4565 | uint32_t flash_size = E1000_READ_ICH_FLASH_REG(hw, ICH_FLASH_GFPREG); | 4565 | u32 flash_size = E1000_READ_ICH_FLASH_REG(hw, ICH_FLASH_GFPREG); |
4566 | 4566 | ||
4567 | eeprom->type = e1000_eeprom_ich8; | 4567 | eeprom->type = e1000_eeprom_ich8; |
4568 | eeprom->use_eerd = false; | 4568 | eeprom->use_eerd = false; |
@@ -4586,7 +4586,7 @@ e1000_init_eeprom_params(struct e1000_hw *hw) | |||
4586 | 4586 | ||
4587 | hw->flash_bank_size *= ICH_FLASH_SECTOR_SIZE; | 4587 | hw->flash_bank_size *= ICH_FLASH_SECTOR_SIZE; |
4588 | 4588 | ||
4589 | hw->flash_bank_size /= 2 * sizeof(uint16_t); | 4589 | hw->flash_bank_size /= 2 * sizeof(u16); |
4590 | 4590 | ||
4591 | break; | 4591 | break; |
4592 | } | 4592 | } |
@@ -4611,7 +4611,7 @@ e1000_init_eeprom_params(struct e1000_hw *hw) | |||
4611 | if (eeprom_size) | 4611 | if (eeprom_size) |
4612 | eeprom_size++; | 4612 | eeprom_size++; |
4613 | } else { | 4613 | } else { |
4614 | eeprom_size = (uint16_t)((eecd & E1000_EECD_SIZE_EX_MASK) >> | 4614 | eeprom_size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> |
4615 | E1000_EECD_SIZE_EX_SHIFT); | 4615 | E1000_EECD_SIZE_EX_SHIFT); |
4616 | } | 4616 | } |
4617 | 4617 | ||
@@ -4628,7 +4628,7 @@ e1000_init_eeprom_params(struct e1000_hw *hw) | |||
4628 | *****************************************************************************/ | 4628 | *****************************************************************************/ |
4629 | static void | 4629 | static void |
4630 | e1000_raise_ee_clk(struct e1000_hw *hw, | 4630 | e1000_raise_ee_clk(struct e1000_hw *hw, |
4631 | uint32_t *eecd) | 4631 | u32 *eecd) |
4632 | { | 4632 | { |
4633 | /* Raise the clock input to the EEPROM (by setting the SK bit), and then | 4633 | /* Raise the clock input to the EEPROM (by setting the SK bit), and then |
4634 | * wait <delay> microseconds. | 4634 | * wait <delay> microseconds. |
@@ -4647,7 +4647,7 @@ e1000_raise_ee_clk(struct e1000_hw *hw, | |||
4647 | *****************************************************************************/ | 4647 | *****************************************************************************/ |
4648 | static void | 4648 | static void |
4649 | e1000_lower_ee_clk(struct e1000_hw *hw, | 4649 | e1000_lower_ee_clk(struct e1000_hw *hw, |
4650 | uint32_t *eecd) | 4650 | u32 *eecd) |
4651 | { | 4651 | { |
4652 | /* Lower the clock input to the EEPROM (by clearing the SK bit), and then | 4652 | /* Lower the clock input to the EEPROM (by clearing the SK bit), and then |
4653 | * wait 50 microseconds. | 4653 | * wait 50 microseconds. |
@@ -4667,12 +4667,12 @@ e1000_lower_ee_clk(struct e1000_hw *hw, | |||
4667 | *****************************************************************************/ | 4667 | *****************************************************************************/ |
4668 | static void | 4668 | static void |
4669 | e1000_shift_out_ee_bits(struct e1000_hw *hw, | 4669 | e1000_shift_out_ee_bits(struct e1000_hw *hw, |
4670 | uint16_t data, | 4670 | u16 data, |
4671 | uint16_t count) | 4671 | u16 count) |
4672 | { | 4672 | { |
4673 | struct e1000_eeprom_info *eeprom = &hw->eeprom; | 4673 | struct e1000_eeprom_info *eeprom = &hw->eeprom; |
4674 | uint32_t eecd; | 4674 | u32 eecd; |
4675 | uint32_t mask; | 4675 | u32 mask; |
4676 | 4676 | ||
4677 | /* We need to shift "count" bits out to the EEPROM. So, value in the | 4677 | /* We need to shift "count" bits out to the EEPROM. So, value in the |
4678 | * "data" parameter will be shifted out to the EEPROM one bit at a time. | 4678 | * "data" parameter will be shifted out to the EEPROM one bit at a time. |
@@ -4718,13 +4718,13 @@ e1000_shift_out_ee_bits(struct e1000_hw *hw, | |||
4718 | * | 4718 | * |
4719 | * hw - Struct containing variables accessed by shared code | 4719 | * hw - Struct containing variables accessed by shared code |
4720 | *****************************************************************************/ | 4720 | *****************************************************************************/ |
4721 | static uint16_t | 4721 | static u16 |
4722 | e1000_shift_in_ee_bits(struct e1000_hw *hw, | 4722 | e1000_shift_in_ee_bits(struct e1000_hw *hw, |
4723 | uint16_t count) | 4723 | u16 count) |
4724 | { | 4724 | { |
4725 | uint32_t eecd; | 4725 | u32 eecd; |
4726 | uint32_t i; | 4726 | u32 i; |
4727 | uint16_t data; | 4727 | u16 data; |
4728 | 4728 | ||
4729 | /* In order to read a register from the EEPROM, we need to shift 'count' | 4729 | /* In order to read a register from the EEPROM, we need to shift 'count' |
4730 | * bits in from the EEPROM. Bits are "shifted in" by raising the clock | 4730 | * bits in from the EEPROM. Bits are "shifted in" by raising the clock |
@@ -4762,11 +4762,11 @@ e1000_shift_in_ee_bits(struct e1000_hw *hw, | |||
4762 | * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This | 4762 | * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This |
4763 | * function should be called before issuing a command to the EEPROM. | 4763 | * function should be called before issuing a command to the EEPROM. |
4764 | *****************************************************************************/ | 4764 | *****************************************************************************/ |
4765 | static int32_t | 4765 | static s32 |
4766 | e1000_acquire_eeprom(struct e1000_hw *hw) | 4766 | e1000_acquire_eeprom(struct e1000_hw *hw) |
4767 | { | 4767 | { |
4768 | struct e1000_eeprom_info *eeprom = &hw->eeprom; | 4768 | struct e1000_eeprom_info *eeprom = &hw->eeprom; |
4769 | uint32_t eecd, i=0; | 4769 | u32 eecd, i=0; |
4770 | 4770 | ||
4771 | DEBUGFUNC("e1000_acquire_eeprom"); | 4771 | DEBUGFUNC("e1000_acquire_eeprom"); |
4772 | 4772 | ||
@@ -4825,7 +4825,7 @@ static void | |||
4825 | e1000_standby_eeprom(struct e1000_hw *hw) | 4825 | e1000_standby_eeprom(struct e1000_hw *hw) |
4826 | { | 4826 | { |
4827 | struct e1000_eeprom_info *eeprom = &hw->eeprom; | 4827 | struct e1000_eeprom_info *eeprom = &hw->eeprom; |
4828 | uint32_t eecd; | 4828 | u32 eecd; |
4829 | 4829 | ||
4830 | eecd = E1000_READ_REG(hw, EECD); | 4830 | eecd = E1000_READ_REG(hw, EECD); |
4831 | 4831 | ||
@@ -4873,7 +4873,7 @@ e1000_standby_eeprom(struct e1000_hw *hw) | |||
4873 | static void | 4873 | static void |
4874 | e1000_release_eeprom(struct e1000_hw *hw) | 4874 | e1000_release_eeprom(struct e1000_hw *hw) |
4875 | { | 4875 | { |
4876 | uint32_t eecd; | 4876 | u32 eecd; |
4877 | 4877 | ||
4878 | DEBUGFUNC("e1000_release_eeprom"); | 4878 | DEBUGFUNC("e1000_release_eeprom"); |
4879 | 4879 | ||
@@ -4921,11 +4921,11 @@ e1000_release_eeprom(struct e1000_hw *hw) | |||
4921 | * | 4921 | * |
4922 | * hw - Struct containing variables accessed by shared code | 4922 | * hw - Struct containing variables accessed by shared code |
4923 | *****************************************************************************/ | 4923 | *****************************************************************************/ |
4924 | static int32_t | 4924 | static s32 |
4925 | e1000_spi_eeprom_ready(struct e1000_hw *hw) | 4925 | e1000_spi_eeprom_ready(struct e1000_hw *hw) |
4926 | { | 4926 | { |
4927 | uint16_t retry_count = 0; | 4927 | u16 retry_count = 0; |
4928 | uint8_t spi_stat_reg; | 4928 | u8 spi_stat_reg; |
4929 | 4929 | ||
4930 | DEBUGFUNC("e1000_spi_eeprom_ready"); | 4930 | DEBUGFUNC("e1000_spi_eeprom_ready"); |
4931 | 4931 | ||
@@ -4938,7 +4938,7 @@ e1000_spi_eeprom_ready(struct e1000_hw *hw) | |||
4938 | do { | 4938 | do { |
4939 | e1000_shift_out_ee_bits(hw, EEPROM_RDSR_OPCODE_SPI, | 4939 | e1000_shift_out_ee_bits(hw, EEPROM_RDSR_OPCODE_SPI, |
4940 | hw->eeprom.opcode_bits); | 4940 | hw->eeprom.opcode_bits); |
4941 | spi_stat_reg = (uint8_t)e1000_shift_in_ee_bits(hw, 8); | 4941 | spi_stat_reg = (u8)e1000_shift_in_ee_bits(hw, 8); |
4942 | if (!(spi_stat_reg & EEPROM_STATUS_RDY_SPI)) | 4942 | if (!(spi_stat_reg & EEPROM_STATUS_RDY_SPI)) |
4943 | break; | 4943 | break; |
4944 | 4944 | ||
@@ -4967,14 +4967,14 @@ e1000_spi_eeprom_ready(struct e1000_hw *hw) | |||
4967 | * data - word read from the EEPROM | 4967 | * data - word read from the EEPROM |
4968 | * words - number of words to read | 4968 | * words - number of words to read |
4969 | *****************************************************************************/ | 4969 | *****************************************************************************/ |
4970 | int32_t | 4970 | s32 |
4971 | e1000_read_eeprom(struct e1000_hw *hw, | 4971 | e1000_read_eeprom(struct e1000_hw *hw, |
4972 | uint16_t offset, | 4972 | u16 offset, |
4973 | uint16_t words, | 4973 | u16 words, |
4974 | uint16_t *data) | 4974 | u16 *data) |
4975 | { | 4975 | { |
4976 | struct e1000_eeprom_info *eeprom = &hw->eeprom; | 4976 | struct e1000_eeprom_info *eeprom = &hw->eeprom; |
4977 | uint32_t i = 0; | 4977 | u32 i = 0; |
4978 | 4978 | ||
4979 | DEBUGFUNC("e1000_read_eeprom"); | 4979 | DEBUGFUNC("e1000_read_eeprom"); |
4980 | 4980 | ||
@@ -5012,8 +5012,8 @@ e1000_read_eeprom(struct e1000_hw *hw, | |||
5012 | /* Set up the SPI or Microwire EEPROM for bit-bang reading. We have | 5012 | /* Set up the SPI or Microwire EEPROM for bit-bang reading. We have |
5013 | * acquired the EEPROM at this point, so any returns should relase it */ | 5013 | * acquired the EEPROM at this point, so any returns should relase it */ |
5014 | if (eeprom->type == e1000_eeprom_spi) { | 5014 | if (eeprom->type == e1000_eeprom_spi) { |
5015 | uint16_t word_in; | 5015 | u16 word_in; |
5016 | uint8_t read_opcode = EEPROM_READ_OPCODE_SPI; | 5016 | u8 read_opcode = EEPROM_READ_OPCODE_SPI; |
5017 | 5017 | ||
5018 | if (e1000_spi_eeprom_ready(hw)) { | 5018 | if (e1000_spi_eeprom_ready(hw)) { |
5019 | e1000_release_eeprom(hw); | 5019 | e1000_release_eeprom(hw); |
@@ -5028,7 +5028,7 @@ e1000_read_eeprom(struct e1000_hw *hw, | |||
5028 | 5028 | ||
5029 | /* Send the READ command (opcode + addr) */ | 5029 | /* Send the READ command (opcode + addr) */ |
5030 | e1000_shift_out_ee_bits(hw, read_opcode, eeprom->opcode_bits); | 5030 | e1000_shift_out_ee_bits(hw, read_opcode, eeprom->opcode_bits); |
5031 | e1000_shift_out_ee_bits(hw, (uint16_t)(offset*2), eeprom->address_bits); | 5031 | e1000_shift_out_ee_bits(hw, (u16)(offset*2), eeprom->address_bits); |
5032 | 5032 | ||
5033 | /* Read the data. The address of the eeprom internally increments with | 5033 | /* Read the data. The address of the eeprom internally increments with |
5034 | * each byte (spi) being read, saving on the overhead of eeprom setup | 5034 | * each byte (spi) being read, saving on the overhead of eeprom setup |
@@ -5044,7 +5044,7 @@ e1000_read_eeprom(struct e1000_hw *hw, | |||
5044 | /* Send the READ command (opcode + addr) */ | 5044 | /* Send the READ command (opcode + addr) */ |
5045 | e1000_shift_out_ee_bits(hw, EEPROM_READ_OPCODE_MICROWIRE, | 5045 | e1000_shift_out_ee_bits(hw, EEPROM_READ_OPCODE_MICROWIRE, |
5046 | eeprom->opcode_bits); | 5046 | eeprom->opcode_bits); |
5047 | e1000_shift_out_ee_bits(hw, (uint16_t)(offset + i), | 5047 | e1000_shift_out_ee_bits(hw, (u16)(offset + i), |
5048 | eeprom->address_bits); | 5048 | eeprom->address_bits); |
5049 | 5049 | ||
5050 | /* Read the data. For microwire, each word requires the overhead | 5050 | /* Read the data. For microwire, each word requires the overhead |
@@ -5068,14 +5068,14 @@ e1000_read_eeprom(struct e1000_hw *hw, | |||
5068 | * data - word read from the EEPROM | 5068 | * data - word read from the EEPROM |
5069 | * words - number of words to read | 5069 | * words - number of words to read |
5070 | *****************************************************************************/ | 5070 | *****************************************************************************/ |
5071 | static int32_t | 5071 | static s32 |
5072 | e1000_read_eeprom_eerd(struct e1000_hw *hw, | 5072 | e1000_read_eeprom_eerd(struct e1000_hw *hw, |
5073 | uint16_t offset, | 5073 | u16 offset, |
5074 | uint16_t words, | 5074 | u16 words, |
5075 | uint16_t *data) | 5075 | u16 *data) |
5076 | { | 5076 | { |
5077 | uint32_t i, eerd = 0; | 5077 | u32 i, eerd = 0; |
5078 | int32_t error = 0; | 5078 | s32 error = 0; |
5079 | 5079 | ||
5080 | for (i = 0; i < words; i++) { | 5080 | for (i = 0; i < words; i++) { |
5081 | eerd = ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) + | 5081 | eerd = ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) + |
@@ -5102,15 +5102,15 @@ e1000_read_eeprom_eerd(struct e1000_hw *hw, | |||
5102 | * data - word read from the EEPROM | 5102 | * data - word read from the EEPROM |
5103 | * words - number of words to read | 5103 | * words - number of words to read |
5104 | *****************************************************************************/ | 5104 | *****************************************************************************/ |
5105 | static int32_t | 5105 | static s32 |
5106 | e1000_write_eeprom_eewr(struct e1000_hw *hw, | 5106 | e1000_write_eeprom_eewr(struct e1000_hw *hw, |
5107 | uint16_t offset, | 5107 | u16 offset, |
5108 | uint16_t words, | 5108 | u16 words, |
5109 | uint16_t *data) | 5109 | u16 *data) |
5110 | { | 5110 | { |
5111 | uint32_t register_value = 0; | 5111 | u32 register_value = 0; |
5112 | uint32_t i = 0; | 5112 | u32 i = 0; |
5113 | int32_t error = 0; | 5113 | s32 error = 0; |
5114 | 5114 | ||
5115 | if (e1000_swfw_sync_acquire(hw, E1000_SWFW_EEP_SM)) | 5115 | if (e1000_swfw_sync_acquire(hw, E1000_SWFW_EEP_SM)) |
5116 | return -E1000_ERR_SWFW_SYNC; | 5116 | return -E1000_ERR_SWFW_SYNC; |
@@ -5143,12 +5143,12 @@ e1000_write_eeprom_eewr(struct e1000_hw *hw, | |||
5143 | * | 5143 | * |
5144 | * hw - Struct containing variables accessed by shared code | 5144 | * hw - Struct containing variables accessed by shared code |
5145 | *****************************************************************************/ | 5145 | *****************************************************************************/ |
5146 | static int32_t | 5146 | static s32 |
5147 | e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd) | 5147 | e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd) |
5148 | { | 5148 | { |
5149 | uint32_t attempts = 100000; | 5149 | u32 attempts = 100000; |
5150 | uint32_t i, reg = 0; | 5150 | u32 i, reg = 0; |
5151 | int32_t done = E1000_ERR_EEPROM; | 5151 | s32 done = E1000_ERR_EEPROM; |
5152 | 5152 | ||
5153 | for (i = 0; i < attempts; i++) { | 5153 | for (i = 0; i < attempts; i++) { |
5154 | if (eerd == E1000_EEPROM_POLL_READ) | 5154 | if (eerd == E1000_EEPROM_POLL_READ) |
@@ -5174,7 +5174,7 @@ e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd) | |||
5174 | static bool | 5174 | static bool |
5175 | e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw) | 5175 | e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw) |
5176 | { | 5176 | { |
5177 | uint32_t eecd = 0; | 5177 | u32 eecd = 0; |
5178 | 5178 | ||
5179 | DEBUGFUNC("e1000_is_onboard_nvm_eeprom"); | 5179 | DEBUGFUNC("e1000_is_onboard_nvm_eeprom"); |
5180 | 5180 | ||
@@ -5204,11 +5204,11 @@ e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw) | |||
5204 | * If the the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is | 5204 | * If the the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is |
5205 | * valid. | 5205 | * valid. |
5206 | *****************************************************************************/ | 5206 | *****************************************************************************/ |
5207 | int32_t | 5207 | s32 |
5208 | e1000_validate_eeprom_checksum(struct e1000_hw *hw) | 5208 | e1000_validate_eeprom_checksum(struct e1000_hw *hw) |
5209 | { | 5209 | { |
5210 | uint16_t checksum = 0; | 5210 | u16 checksum = 0; |
5211 | uint16_t i, eeprom_data; | 5211 | u16 i, eeprom_data; |
5212 | 5212 | ||
5213 | DEBUGFUNC("e1000_validate_eeprom_checksum"); | 5213 | DEBUGFUNC("e1000_validate_eeprom_checksum"); |
5214 | 5214 | ||
@@ -5252,7 +5252,7 @@ e1000_validate_eeprom_checksum(struct e1000_hw *hw) | |||
5252 | checksum += eeprom_data; | 5252 | checksum += eeprom_data; |
5253 | } | 5253 | } |
5254 | 5254 | ||
5255 | if (checksum == (uint16_t) EEPROM_SUM) | 5255 | if (checksum == (u16) EEPROM_SUM) |
5256 | return E1000_SUCCESS; | 5256 | return E1000_SUCCESS; |
5257 | else { | 5257 | else { |
5258 | DEBUGOUT("EEPROM Checksum Invalid\n"); | 5258 | DEBUGOUT("EEPROM Checksum Invalid\n"); |
@@ -5268,12 +5268,12 @@ e1000_validate_eeprom_checksum(struct e1000_hw *hw) | |||
5268 | * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA. | 5268 | * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA. |
5269 | * Writes the difference to word offset 63 of the EEPROM. | 5269 | * Writes the difference to word offset 63 of the EEPROM. |
5270 | *****************************************************************************/ | 5270 | *****************************************************************************/ |
5271 | int32_t | 5271 | s32 |
5272 | e1000_update_eeprom_checksum(struct e1000_hw *hw) | 5272 | e1000_update_eeprom_checksum(struct e1000_hw *hw) |
5273 | { | 5273 | { |
5274 | uint32_t ctrl_ext; | 5274 | u32 ctrl_ext; |
5275 | uint16_t checksum = 0; | 5275 | u16 checksum = 0; |
5276 | uint16_t i, eeprom_data; | 5276 | u16 i, eeprom_data; |
5277 | 5277 | ||
5278 | DEBUGFUNC("e1000_update_eeprom_checksum"); | 5278 | DEBUGFUNC("e1000_update_eeprom_checksum"); |
5279 | 5279 | ||
@@ -5284,7 +5284,7 @@ e1000_update_eeprom_checksum(struct e1000_hw *hw) | |||
5284 | } | 5284 | } |
5285 | checksum += eeprom_data; | 5285 | checksum += eeprom_data; |
5286 | } | 5286 | } |
5287 | checksum = (uint16_t) EEPROM_SUM - checksum; | 5287 | checksum = (u16) EEPROM_SUM - checksum; |
5288 | if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) { | 5288 | if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) { |
5289 | DEBUGOUT("EEPROM Write Error\n"); | 5289 | DEBUGOUT("EEPROM Write Error\n"); |
5290 | return -E1000_ERR_EEPROM; | 5290 | return -E1000_ERR_EEPROM; |
@@ -5313,14 +5313,14 @@ e1000_update_eeprom_checksum(struct e1000_hw *hw) | |||
5313 | * If e1000_update_eeprom_checksum is not called after this function, the | 5313 | * If e1000_update_eeprom_checksum is not called after this function, the |
5314 | * EEPROM will most likely contain an invalid checksum. | 5314 | * EEPROM will most likely contain an invalid checksum. |
5315 | *****************************************************************************/ | 5315 | *****************************************************************************/ |
5316 | int32_t | 5316 | s32 |
5317 | e1000_write_eeprom(struct e1000_hw *hw, | 5317 | e1000_write_eeprom(struct e1000_hw *hw, |
5318 | uint16_t offset, | 5318 | u16 offset, |
5319 | uint16_t words, | 5319 | u16 words, |
5320 | uint16_t *data) | 5320 | u16 *data) |
5321 | { | 5321 | { |
5322 | struct e1000_eeprom_info *eeprom = &hw->eeprom; | 5322 | struct e1000_eeprom_info *eeprom = &hw->eeprom; |
5323 | int32_t status = 0; | 5323 | s32 status = 0; |
5324 | 5324 | ||
5325 | DEBUGFUNC("e1000_write_eeprom"); | 5325 | DEBUGFUNC("e1000_write_eeprom"); |
5326 | 5326 | ||
@@ -5370,19 +5370,19 @@ e1000_write_eeprom(struct e1000_hw *hw, | |||
5370 | * data - pointer to array of 8 bit words to be written to the EEPROM | 5370 | * data - pointer to array of 8 bit words to be written to the EEPROM |
5371 | * | 5371 | * |
5372 | *****************************************************************************/ | 5372 | *****************************************************************************/ |
5373 | static int32_t | 5373 | static s32 |
5374 | e1000_write_eeprom_spi(struct e1000_hw *hw, | 5374 | e1000_write_eeprom_spi(struct e1000_hw *hw, |
5375 | uint16_t offset, | 5375 | u16 offset, |
5376 | uint16_t words, | 5376 | u16 words, |
5377 | uint16_t *data) | 5377 | u16 *data) |
5378 | { | 5378 | { |
5379 | struct e1000_eeprom_info *eeprom = &hw->eeprom; | 5379 | struct e1000_eeprom_info *eeprom = &hw->eeprom; |
5380 | uint16_t widx = 0; | 5380 | u16 widx = 0; |
5381 | 5381 | ||
5382 | DEBUGFUNC("e1000_write_eeprom_spi"); | 5382 | DEBUGFUNC("e1000_write_eeprom_spi"); |
5383 | 5383 | ||
5384 | while (widx < words) { | 5384 | while (widx < words) { |
5385 | uint8_t write_opcode = EEPROM_WRITE_OPCODE_SPI; | 5385 | u8 write_opcode = EEPROM_WRITE_OPCODE_SPI; |
5386 | 5386 | ||
5387 | if (e1000_spi_eeprom_ready(hw)) return -E1000_ERR_EEPROM; | 5387 | if (e1000_spi_eeprom_ready(hw)) return -E1000_ERR_EEPROM; |
5388 | 5388 | ||
@@ -5401,14 +5401,14 @@ e1000_write_eeprom_spi(struct e1000_hw *hw, | |||
5401 | /* Send the Write command (8-bit opcode + addr) */ | 5401 | /* Send the Write command (8-bit opcode + addr) */ |
5402 | e1000_shift_out_ee_bits(hw, write_opcode, eeprom->opcode_bits); | 5402 | e1000_shift_out_ee_bits(hw, write_opcode, eeprom->opcode_bits); |
5403 | 5403 | ||
5404 | e1000_shift_out_ee_bits(hw, (uint16_t)((offset + widx)*2), | 5404 | e1000_shift_out_ee_bits(hw, (u16)((offset + widx)*2), |
5405 | eeprom->address_bits); | 5405 | eeprom->address_bits); |
5406 | 5406 | ||
5407 | /* Send the data */ | 5407 | /* Send the data */ |
5408 | 5408 | ||
5409 | /* Loop to allow for up to whole page write (32 bytes) of eeprom */ | 5409 | /* Loop to allow for up to whole page write (32 bytes) of eeprom */ |
5410 | while (widx < words) { | 5410 | while (widx < words) { |
5411 | uint16_t word_out = data[widx]; | 5411 | u16 word_out = data[widx]; |
5412 | word_out = (word_out >> 8) | (word_out << 8); | 5412 | word_out = (word_out >> 8) | (word_out << 8); |
5413 | e1000_shift_out_ee_bits(hw, word_out, 16); | 5413 | e1000_shift_out_ee_bits(hw, word_out, 16); |
5414 | widx++; | 5414 | widx++; |
@@ -5436,16 +5436,16 @@ e1000_write_eeprom_spi(struct e1000_hw *hw, | |||
5436 | * data - pointer to array of 16 bit words to be written to the EEPROM | 5436 | * data - pointer to array of 16 bit words to be written to the EEPROM |
5437 | * | 5437 | * |
5438 | *****************************************************************************/ | 5438 | *****************************************************************************/ |
5439 | static int32_t | 5439 | static s32 |
5440 | e1000_write_eeprom_microwire(struct e1000_hw *hw, | 5440 | e1000_write_eeprom_microwire(struct e1000_hw *hw, |
5441 | uint16_t offset, | 5441 | u16 offset, |
5442 | uint16_t words, | 5442 | u16 words, |
5443 | uint16_t *data) | 5443 | u16 *data) |
5444 | { | 5444 | { |
5445 | struct e1000_eeprom_info *eeprom = &hw->eeprom; | 5445 | struct e1000_eeprom_info *eeprom = &hw->eeprom; |
5446 | uint32_t eecd; | 5446 | u32 eecd; |
5447 | uint16_t words_written = 0; | 5447 | u16 words_written = 0; |
5448 | uint16_t i = 0; | 5448 | u16 i = 0; |
5449 | 5449 | ||
5450 | DEBUGFUNC("e1000_write_eeprom_microwire"); | 5450 | DEBUGFUNC("e1000_write_eeprom_microwire"); |
5451 | 5451 | ||
@@ -5456,9 +5456,9 @@ e1000_write_eeprom_microwire(struct e1000_hw *hw, | |||
5456 | * EEPROM into write/erase mode. | 5456 | * EEPROM into write/erase mode. |
5457 | */ | 5457 | */ |
5458 | e1000_shift_out_ee_bits(hw, EEPROM_EWEN_OPCODE_MICROWIRE, | 5458 | e1000_shift_out_ee_bits(hw, EEPROM_EWEN_OPCODE_MICROWIRE, |
5459 | (uint16_t)(eeprom->opcode_bits + 2)); | 5459 | (u16)(eeprom->opcode_bits + 2)); |
5460 | 5460 | ||
5461 | e1000_shift_out_ee_bits(hw, 0, (uint16_t)(eeprom->address_bits - 2)); | 5461 | e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2)); |
5462 | 5462 | ||
5463 | /* Prepare the EEPROM */ | 5463 | /* Prepare the EEPROM */ |
5464 | e1000_standby_eeprom(hw); | 5464 | e1000_standby_eeprom(hw); |
@@ -5468,7 +5468,7 @@ e1000_write_eeprom_microwire(struct e1000_hw *hw, | |||
5468 | e1000_shift_out_ee_bits(hw, EEPROM_WRITE_OPCODE_MICROWIRE, | 5468 | e1000_shift_out_ee_bits(hw, EEPROM_WRITE_OPCODE_MICROWIRE, |
5469 | eeprom->opcode_bits); | 5469 | eeprom->opcode_bits); |
5470 | 5470 | ||
5471 | e1000_shift_out_ee_bits(hw, (uint16_t)(offset + words_written), | 5471 | e1000_shift_out_ee_bits(hw, (u16)(offset + words_written), |
5472 | eeprom->address_bits); | 5472 | eeprom->address_bits); |
5473 | 5473 | ||
5474 | /* Send the data */ | 5474 | /* Send the data */ |
@@ -5506,9 +5506,9 @@ e1000_write_eeprom_microwire(struct e1000_hw *hw, | |||
5506 | * EEPROM out of write/erase mode. | 5506 | * EEPROM out of write/erase mode. |
5507 | */ | 5507 | */ |
5508 | e1000_shift_out_ee_bits(hw, EEPROM_EWDS_OPCODE_MICROWIRE, | 5508 | e1000_shift_out_ee_bits(hw, EEPROM_EWDS_OPCODE_MICROWIRE, |
5509 | (uint16_t)(eeprom->opcode_bits + 2)); | 5509 | (u16)(eeprom->opcode_bits + 2)); |
5510 | 5510 | ||
5511 | e1000_shift_out_ee_bits(hw, 0, (uint16_t)(eeprom->address_bits - 2)); | 5511 | e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2)); |
5512 | 5512 | ||
5513 | return E1000_SUCCESS; | 5513 | return E1000_SUCCESS; |
5514 | } | 5514 | } |
@@ -5523,18 +5523,18 @@ e1000_write_eeprom_microwire(struct e1000_hw *hw, | |||
5523 | * data - word read from the EEPROM | 5523 | * data - word read from the EEPROM |
5524 | * words - number of words to read | 5524 | * words - number of words to read |
5525 | *****************************************************************************/ | 5525 | *****************************************************************************/ |
5526 | static int32_t | 5526 | static s32 |
5527 | e1000_commit_shadow_ram(struct e1000_hw *hw) | 5527 | e1000_commit_shadow_ram(struct e1000_hw *hw) |
5528 | { | 5528 | { |
5529 | uint32_t attempts = 100000; | 5529 | u32 attempts = 100000; |
5530 | uint32_t eecd = 0; | 5530 | u32 eecd = 0; |
5531 | uint32_t flop = 0; | 5531 | u32 flop = 0; |
5532 | uint32_t i = 0; | 5532 | u32 i = 0; |
5533 | int32_t error = E1000_SUCCESS; | 5533 | s32 error = E1000_SUCCESS; |
5534 | uint32_t old_bank_offset = 0; | 5534 | u32 old_bank_offset = 0; |
5535 | uint32_t new_bank_offset = 0; | 5535 | u32 new_bank_offset = 0; |
5536 | uint8_t low_byte = 0; | 5536 | u8 low_byte = 0; |
5537 | uint8_t high_byte = 0; | 5537 | u8 high_byte = 0; |
5538 | bool sector_write_failed = false; | 5538 | bool sector_write_failed = false; |
5539 | 5539 | ||
5540 | if (hw->mac_type == e1000_82573) { | 5540 | if (hw->mac_type == e1000_82573) { |
@@ -5595,7 +5595,7 @@ e1000_commit_shadow_ram(struct e1000_hw *hw) | |||
5595 | * in the other NVM bank or a modified value stored | 5595 | * in the other NVM bank or a modified value stored |
5596 | * in the shadow RAM */ | 5596 | * in the shadow RAM */ |
5597 | if (hw->eeprom_shadow_ram[i].modified) { | 5597 | if (hw->eeprom_shadow_ram[i].modified) { |
5598 | low_byte = (uint8_t)hw->eeprom_shadow_ram[i].eeprom_word; | 5598 | low_byte = (u8)hw->eeprom_shadow_ram[i].eeprom_word; |
5599 | udelay(100); | 5599 | udelay(100); |
5600 | error = e1000_verify_write_ich8_byte(hw, | 5600 | error = e1000_verify_write_ich8_byte(hw, |
5601 | (i << 1) + new_bank_offset, low_byte); | 5601 | (i << 1) + new_bank_offset, low_byte); |
@@ -5604,7 +5604,7 @@ e1000_commit_shadow_ram(struct e1000_hw *hw) | |||
5604 | sector_write_failed = true; | 5604 | sector_write_failed = true; |
5605 | else { | 5605 | else { |
5606 | high_byte = | 5606 | high_byte = |
5607 | (uint8_t)(hw->eeprom_shadow_ram[i].eeprom_word >> 8); | 5607 | (u8)(hw->eeprom_shadow_ram[i].eeprom_word >> 8); |
5608 | udelay(100); | 5608 | udelay(100); |
5609 | } | 5609 | } |
5610 | } else { | 5610 | } else { |
@@ -5687,11 +5687,11 @@ e1000_commit_shadow_ram(struct e1000_hw *hw) | |||
5687 | * | 5687 | * |
5688 | * hw - Struct containing variables accessed by shared code | 5688 | * hw - Struct containing variables accessed by shared code |
5689 | *****************************************************************************/ | 5689 | *****************************************************************************/ |
5690 | int32_t | 5690 | s32 |
5691 | e1000_read_mac_addr(struct e1000_hw * hw) | 5691 | e1000_read_mac_addr(struct e1000_hw * hw) |
5692 | { | 5692 | { |
5693 | uint16_t offset; | 5693 | u16 offset; |
5694 | uint16_t eeprom_data, i; | 5694 | u16 eeprom_data, i; |
5695 | 5695 | ||
5696 | DEBUGFUNC("e1000_read_mac_addr"); | 5696 | DEBUGFUNC("e1000_read_mac_addr"); |
5697 | 5697 | ||
@@ -5701,8 +5701,8 @@ e1000_read_mac_addr(struct e1000_hw * hw) | |||
5701 | DEBUGOUT("EEPROM Read Error\n"); | 5701 | DEBUGOUT("EEPROM Read Error\n"); |
5702 | return -E1000_ERR_EEPROM; | 5702 | return -E1000_ERR_EEPROM; |
5703 | } | 5703 | } |
5704 | hw->perm_mac_addr[i] = (uint8_t) (eeprom_data & 0x00FF); | 5704 | hw->perm_mac_addr[i] = (u8) (eeprom_data & 0x00FF); |
5705 | hw->perm_mac_addr[i+1] = (uint8_t) (eeprom_data >> 8); | 5705 | hw->perm_mac_addr[i+1] = (u8) (eeprom_data >> 8); |
5706 | } | 5706 | } |
5707 | 5707 | ||
5708 | switch (hw->mac_type) { | 5708 | switch (hw->mac_type) { |
@@ -5734,8 +5734,8 @@ e1000_read_mac_addr(struct e1000_hw * hw) | |||
5734 | static void | 5734 | static void |
5735 | e1000_init_rx_addrs(struct e1000_hw *hw) | 5735 | e1000_init_rx_addrs(struct e1000_hw *hw) |
5736 | { | 5736 | { |
5737 | uint32_t i; | 5737 | u32 i; |
5738 | uint32_t rar_num; | 5738 | u32 rar_num; |
5739 | 5739 | ||
5740 | DEBUGFUNC("e1000_init_rx_addrs"); | 5740 | DEBUGFUNC("e1000_init_rx_addrs"); |
5741 | 5741 | ||
@@ -5770,11 +5770,11 @@ e1000_init_rx_addrs(struct e1000_hw *hw) | |||
5770 | * hw - Struct containing variables accessed by shared code | 5770 | * hw - Struct containing variables accessed by shared code |
5771 | * mc_addr - the multicast address to hash | 5771 | * mc_addr - the multicast address to hash |
5772 | *****************************************************************************/ | 5772 | *****************************************************************************/ |
5773 | uint32_t | 5773 | u32 |
5774 | e1000_hash_mc_addr(struct e1000_hw *hw, | 5774 | e1000_hash_mc_addr(struct e1000_hw *hw, |
5775 | uint8_t *mc_addr) | 5775 | u8 *mc_addr) |
5776 | { | 5776 | { |
5777 | uint32_t hash_value = 0; | 5777 | u32 hash_value = 0; |
5778 | 5778 | ||
5779 | /* The portion of the address that is used for the hash table is | 5779 | /* The portion of the address that is used for the hash table is |
5780 | * determined by the mc_filter_type setting. | 5780 | * determined by the mc_filter_type setting. |
@@ -5787,37 +5787,37 @@ e1000_hash_mc_addr(struct e1000_hw *hw, | |||
5787 | case 0: | 5787 | case 0: |
5788 | if (hw->mac_type == e1000_ich8lan) { | 5788 | if (hw->mac_type == e1000_ich8lan) { |
5789 | /* [47:38] i.e. 0x158 for above example address */ | 5789 | /* [47:38] i.e. 0x158 for above example address */ |
5790 | hash_value = ((mc_addr[4] >> 6) | (((uint16_t) mc_addr[5]) << 2)); | 5790 | hash_value = ((mc_addr[4] >> 6) | (((u16) mc_addr[5]) << 2)); |
5791 | } else { | 5791 | } else { |
5792 | /* [47:36] i.e. 0x563 for above example address */ | 5792 | /* [47:36] i.e. 0x563 for above example address */ |
5793 | hash_value = ((mc_addr[4] >> 4) | (((uint16_t) mc_addr[5]) << 4)); | 5793 | hash_value = ((mc_addr[4] >> 4) | (((u16) mc_addr[5]) << 4)); |
5794 | } | 5794 | } |
5795 | break; | 5795 | break; |
5796 | case 1: | 5796 | case 1: |
5797 | if (hw->mac_type == e1000_ich8lan) { | 5797 | if (hw->mac_type == e1000_ich8lan) { |
5798 | /* [46:37] i.e. 0x2B1 for above example address */ | 5798 | /* [46:37] i.e. 0x2B1 for above example address */ |
5799 | hash_value = ((mc_addr[4] >> 5) | (((uint16_t) mc_addr[5]) << 3)); | 5799 | hash_value = ((mc_addr[4] >> 5) | (((u16) mc_addr[5]) << 3)); |
5800 | } else { | 5800 | } else { |
5801 | /* [46:35] i.e. 0xAC6 for above example address */ | 5801 | /* [46:35] i.e. 0xAC6 for above example address */ |
5802 | hash_value = ((mc_addr[4] >> 3) | (((uint16_t) mc_addr[5]) << 5)); | 5802 | hash_value = ((mc_addr[4] >> 3) | (((u16) mc_addr[5]) << 5)); |
5803 | } | 5803 | } |
5804 | break; | 5804 | break; |
5805 | case 2: | 5805 | case 2: |
5806 | if (hw->mac_type == e1000_ich8lan) { | 5806 | if (hw->mac_type == e1000_ich8lan) { |
5807 | /*[45:36] i.e. 0x163 for above example address */ | 5807 | /*[45:36] i.e. 0x163 for above example address */ |
5808 | hash_value = ((mc_addr[4] >> 4) | (((uint16_t) mc_addr[5]) << 4)); | 5808 | hash_value = ((mc_addr[4] >> 4) | (((u16) mc_addr[5]) << 4)); |
5809 | } else { | 5809 | } else { |
5810 | /* [45:34] i.e. 0x5D8 for above example address */ | 5810 | /* [45:34] i.e. 0x5D8 for above example address */ |
5811 | hash_value = ((mc_addr[4] >> 2) | (((uint16_t) mc_addr[5]) << 6)); | 5811 | hash_value = ((mc_addr[4] >> 2) | (((u16) mc_addr[5]) << 6)); |
5812 | } | 5812 | } |
5813 | break; | 5813 | break; |
5814 | case 3: | 5814 | case 3: |
5815 | if (hw->mac_type == e1000_ich8lan) { | 5815 | if (hw->mac_type == e1000_ich8lan) { |
5816 | /* [43:34] i.e. 0x18D for above example address */ | 5816 | /* [43:34] i.e. 0x18D for above example address */ |
5817 | hash_value = ((mc_addr[4] >> 2) | (((uint16_t) mc_addr[5]) << 6)); | 5817 | hash_value = ((mc_addr[4] >> 2) | (((u16) mc_addr[5]) << 6)); |
5818 | } else { | 5818 | } else { |
5819 | /* [43:32] i.e. 0x634 for above example address */ | 5819 | /* [43:32] i.e. 0x634 for above example address */ |
5820 | hash_value = ((mc_addr[4]) | (((uint16_t) mc_addr[5]) << 8)); | 5820 | hash_value = ((mc_addr[4]) | (((u16) mc_addr[5]) << 8)); |
5821 | } | 5821 | } |
5822 | break; | 5822 | break; |
5823 | } | 5823 | } |
@@ -5837,11 +5837,11 @@ e1000_hash_mc_addr(struct e1000_hw *hw, | |||
5837 | *****************************************************************************/ | 5837 | *****************************************************************************/ |
5838 | void | 5838 | void |
5839 | e1000_mta_set(struct e1000_hw *hw, | 5839 | e1000_mta_set(struct e1000_hw *hw, |
5840 | uint32_t hash_value) | 5840 | u32 hash_value) |
5841 | { | 5841 | { |
5842 | uint32_t hash_bit, hash_reg; | 5842 | u32 hash_bit, hash_reg; |
5843 | uint32_t mta; | 5843 | u32 mta; |
5844 | uint32_t temp; | 5844 | u32 temp; |
5845 | 5845 | ||
5846 | /* The MTA is a register array of 128 32-bit registers. | 5846 | /* The MTA is a register array of 128 32-bit registers. |
5847 | * It is treated like an array of 4096 bits. We want to set | 5847 | * It is treated like an array of 4096 bits. We want to set |
@@ -5886,18 +5886,18 @@ e1000_mta_set(struct e1000_hw *hw, | |||
5886 | *****************************************************************************/ | 5886 | *****************************************************************************/ |
5887 | void | 5887 | void |
5888 | e1000_rar_set(struct e1000_hw *hw, | 5888 | e1000_rar_set(struct e1000_hw *hw, |
5889 | uint8_t *addr, | 5889 | u8 *addr, |
5890 | uint32_t index) | 5890 | u32 index) |
5891 | { | 5891 | { |
5892 | uint32_t rar_low, rar_high; | 5892 | u32 rar_low, rar_high; |
5893 | 5893 | ||
5894 | /* HW expects these in little endian so we reverse the byte order | 5894 | /* HW expects these in little endian so we reverse the byte order |
5895 | * from network order (big endian) to little endian | 5895 | * from network order (big endian) to little endian |
5896 | */ | 5896 | */ |
5897 | rar_low = ((uint32_t) addr[0] | | 5897 | rar_low = ((u32) addr[0] | |
5898 | ((uint32_t) addr[1] << 8) | | 5898 | ((u32) addr[1] << 8) | |
5899 | ((uint32_t) addr[2] << 16) | ((uint32_t) addr[3] << 24)); | 5899 | ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); |
5900 | rar_high = ((uint32_t) addr[4] | ((uint32_t) addr[5] << 8)); | 5900 | rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); |
5901 | 5901 | ||
5902 | /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx | 5902 | /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx |
5903 | * unit hang. | 5903 | * unit hang. |
@@ -5944,10 +5944,10 @@ e1000_rar_set(struct e1000_hw *hw, | |||
5944 | *****************************************************************************/ | 5944 | *****************************************************************************/ |
5945 | void | 5945 | void |
5946 | e1000_write_vfta(struct e1000_hw *hw, | 5946 | e1000_write_vfta(struct e1000_hw *hw, |
5947 | uint32_t offset, | 5947 | u32 offset, |
5948 | uint32_t value) | 5948 | u32 value) |
5949 | { | 5949 | { |
5950 | uint32_t temp; | 5950 | u32 temp; |
5951 | 5951 | ||
5952 | if (hw->mac_type == e1000_ich8lan) | 5952 | if (hw->mac_type == e1000_ich8lan) |
5953 | return; | 5953 | return; |
@@ -5972,10 +5972,10 @@ e1000_write_vfta(struct e1000_hw *hw, | |||
5972 | static void | 5972 | static void |
5973 | e1000_clear_vfta(struct e1000_hw *hw) | 5973 | e1000_clear_vfta(struct e1000_hw *hw) |
5974 | { | 5974 | { |
5975 | uint32_t offset; | 5975 | u32 offset; |
5976 | uint32_t vfta_value = 0; | 5976 | u32 vfta_value = 0; |
5977 | uint32_t vfta_offset = 0; | 5977 | u32 vfta_offset = 0; |
5978 | uint32_t vfta_bit_in_reg = 0; | 5978 | u32 vfta_bit_in_reg = 0; |
5979 | 5979 | ||
5980 | if (hw->mac_type == e1000_ich8lan) | 5980 | if (hw->mac_type == e1000_ich8lan) |
5981 | return; | 5981 | return; |
@@ -6003,15 +6003,15 @@ e1000_clear_vfta(struct e1000_hw *hw) | |||
6003 | } | 6003 | } |
6004 | } | 6004 | } |
6005 | 6005 | ||
6006 | static int32_t | 6006 | static s32 |
6007 | e1000_id_led_init(struct e1000_hw * hw) | 6007 | e1000_id_led_init(struct e1000_hw * hw) |
6008 | { | 6008 | { |
6009 | uint32_t ledctl; | 6009 | u32 ledctl; |
6010 | const uint32_t ledctl_mask = 0x000000FF; | 6010 | const u32 ledctl_mask = 0x000000FF; |
6011 | const uint32_t ledctl_on = E1000_LEDCTL_MODE_LED_ON; | 6011 | const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; |
6012 | const uint32_t ledctl_off = E1000_LEDCTL_MODE_LED_OFF; | 6012 | const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; |
6013 | uint16_t eeprom_data, i, temp; | 6013 | u16 eeprom_data, i, temp; |
6014 | const uint16_t led_mask = 0x0F; | 6014 | const u16 led_mask = 0x0F; |
6015 | 6015 | ||
6016 | DEBUGFUNC("e1000_id_led_init"); | 6016 | DEBUGFUNC("e1000_id_led_init"); |
6017 | 6017 | ||
@@ -6086,11 +6086,11 @@ e1000_id_led_init(struct e1000_hw * hw) | |||
6086 | * | 6086 | * |
6087 | * hw - Struct containing variables accessed by shared code | 6087 | * hw - Struct containing variables accessed by shared code |
6088 | *****************************************************************************/ | 6088 | *****************************************************************************/ |
6089 | int32_t | 6089 | s32 |
6090 | e1000_setup_led(struct e1000_hw *hw) | 6090 | e1000_setup_led(struct e1000_hw *hw) |
6091 | { | 6091 | { |
6092 | uint32_t ledctl; | 6092 | u32 ledctl; |
6093 | int32_t ret_val = E1000_SUCCESS; | 6093 | s32 ret_val = E1000_SUCCESS; |
6094 | 6094 | ||
6095 | DEBUGFUNC("e1000_setup_led"); | 6095 | DEBUGFUNC("e1000_setup_led"); |
6096 | 6096 | ||
@@ -6111,7 +6111,7 @@ e1000_setup_led(struct e1000_hw *hw) | |||
6111 | if (ret_val) | 6111 | if (ret_val) |
6112 | return ret_val; | 6112 | return ret_val; |
6113 | ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, | 6113 | ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, |
6114 | (uint16_t)(hw->phy_spd_default & | 6114 | (u16)(hw->phy_spd_default & |
6115 | ~IGP01E1000_GMII_SPD)); | 6115 | ~IGP01E1000_GMII_SPD)); |
6116 | if (ret_val) | 6116 | if (ret_val) |
6117 | return ret_val; | 6117 | return ret_val; |
@@ -6145,11 +6145,11 @@ e1000_setup_led(struct e1000_hw *hw) | |||
6145 | * | 6145 | * |
6146 | * hw - Struct containing variables accessed by shared code | 6146 | * hw - Struct containing variables accessed by shared code |
6147 | *****************************************************************************/ | 6147 | *****************************************************************************/ |
6148 | int32_t | 6148 | s32 |
6149 | e1000_blink_led_start(struct e1000_hw *hw) | 6149 | e1000_blink_led_start(struct e1000_hw *hw) |
6150 | { | 6150 | { |
6151 | int16_t i; | 6151 | s16 i; |
6152 | uint32_t ledctl_blink = 0; | 6152 | u32 ledctl_blink = 0; |
6153 | 6153 | ||
6154 | DEBUGFUNC("e1000_id_led_blink_on"); | 6154 | DEBUGFUNC("e1000_id_led_blink_on"); |
6155 | 6155 | ||
@@ -6180,10 +6180,10 @@ e1000_blink_led_start(struct e1000_hw *hw) | |||
6180 | * | 6180 | * |
6181 | * hw - Struct containing variables accessed by shared code | 6181 | * hw - Struct containing variables accessed by shared code |
6182 | *****************************************************************************/ | 6182 | *****************************************************************************/ |
6183 | int32_t | 6183 | s32 |
6184 | e1000_cleanup_led(struct e1000_hw *hw) | 6184 | e1000_cleanup_led(struct e1000_hw *hw) |
6185 | { | 6185 | { |
6186 | int32_t ret_val = E1000_SUCCESS; | 6186 | s32 ret_val = E1000_SUCCESS; |
6187 | 6187 | ||
6188 | DEBUGFUNC("e1000_cleanup_led"); | 6188 | DEBUGFUNC("e1000_cleanup_led"); |
6189 | 6189 | ||
@@ -6222,10 +6222,10 @@ e1000_cleanup_led(struct e1000_hw *hw) | |||
6222 | * | 6222 | * |
6223 | * hw - Struct containing variables accessed by shared code | 6223 | * hw - Struct containing variables accessed by shared code |
6224 | *****************************************************************************/ | 6224 | *****************************************************************************/ |
6225 | int32_t | 6225 | s32 |
6226 | e1000_led_on(struct e1000_hw *hw) | 6226 | e1000_led_on(struct e1000_hw *hw) |
6227 | { | 6227 | { |
6228 | uint32_t ctrl = E1000_READ_REG(hw, CTRL); | 6228 | u32 ctrl = E1000_READ_REG(hw, CTRL); |
6229 | 6229 | ||
6230 | DEBUGFUNC("e1000_led_on"); | 6230 | DEBUGFUNC("e1000_led_on"); |
6231 | 6231 | ||
@@ -6273,10 +6273,10 @@ e1000_led_on(struct e1000_hw *hw) | |||
6273 | * | 6273 | * |
6274 | * hw - Struct containing variables accessed by shared code | 6274 | * hw - Struct containing variables accessed by shared code |
6275 | *****************************************************************************/ | 6275 | *****************************************************************************/ |
6276 | int32_t | 6276 | s32 |
6277 | e1000_led_off(struct e1000_hw *hw) | 6277 | e1000_led_off(struct e1000_hw *hw) |
6278 | { | 6278 | { |
6279 | uint32_t ctrl = E1000_READ_REG(hw, CTRL); | 6279 | u32 ctrl = E1000_READ_REG(hw, CTRL); |
6280 | 6280 | ||
6281 | DEBUGFUNC("e1000_led_off"); | 6281 | DEBUGFUNC("e1000_led_off"); |
6282 | 6282 | ||
@@ -6327,7 +6327,7 @@ e1000_led_off(struct e1000_hw *hw) | |||
6327 | static void | 6327 | static void |
6328 | e1000_clear_hw_cntrs(struct e1000_hw *hw) | 6328 | e1000_clear_hw_cntrs(struct e1000_hw *hw) |
6329 | { | 6329 | { |
6330 | volatile uint32_t temp; | 6330 | volatile u32 temp; |
6331 | 6331 | ||
6332 | temp = E1000_READ_REG(hw, CRCERRS); | 6332 | temp = E1000_READ_REG(hw, CRCERRS); |
6333 | temp = E1000_READ_REG(hw, SYMERRS); | 6333 | temp = E1000_READ_REG(hw, SYMERRS); |
@@ -6495,10 +6495,10 @@ e1000_update_adaptive(struct e1000_hw *hw) | |||
6495 | void | 6495 | void |
6496 | e1000_tbi_adjust_stats(struct e1000_hw *hw, | 6496 | e1000_tbi_adjust_stats(struct e1000_hw *hw, |
6497 | struct e1000_hw_stats *stats, | 6497 | struct e1000_hw_stats *stats, |
6498 | uint32_t frame_len, | 6498 | u32 frame_len, |
6499 | uint8_t *mac_addr) | 6499 | u8 *mac_addr) |
6500 | { | 6500 | { |
6501 | uint64_t carry_bit; | 6501 | u64 carry_bit; |
6502 | 6502 | ||
6503 | /* First adjust the frame length. */ | 6503 | /* First adjust the frame length. */ |
6504 | frame_len--; | 6504 | frame_len--; |
@@ -6527,7 +6527,7 @@ e1000_tbi_adjust_stats(struct e1000_hw *hw, | |||
6527 | * since the test for a multicast frame will test positive on | 6527 | * since the test for a multicast frame will test positive on |
6528 | * a broadcast frame. | 6528 | * a broadcast frame. |
6529 | */ | 6529 | */ |
6530 | if ((mac_addr[0] == (uint8_t) 0xff) && (mac_addr[1] == (uint8_t) 0xff)) | 6530 | if ((mac_addr[0] == (u8) 0xff) && (mac_addr[1] == (u8) 0xff)) |
6531 | /* Broadcast packet */ | 6531 | /* Broadcast packet */ |
6532 | stats->bprc++; | 6532 | stats->bprc++; |
6533 | else if (*mac_addr & 0x01) | 6533 | else if (*mac_addr & 0x01) |
@@ -6573,9 +6573,9 @@ e1000_tbi_adjust_stats(struct e1000_hw *hw, | |||
6573 | void | 6573 | void |
6574 | e1000_get_bus_info(struct e1000_hw *hw) | 6574 | e1000_get_bus_info(struct e1000_hw *hw) |
6575 | { | 6575 | { |
6576 | int32_t ret_val; | 6576 | s32 ret_val; |
6577 | uint16_t pci_ex_link_status; | 6577 | u16 pci_ex_link_status; |
6578 | uint32_t status; | 6578 | u32 status; |
6579 | 6579 | ||
6580 | switch (hw->mac_type) { | 6580 | switch (hw->mac_type) { |
6581 | case e1000_82542_rev2_0: | 6581 | case e1000_82542_rev2_0: |
@@ -6647,8 +6647,8 @@ e1000_get_bus_info(struct e1000_hw *hw) | |||
6647 | *****************************************************************************/ | 6647 | *****************************************************************************/ |
6648 | static void | 6648 | static void |
6649 | e1000_write_reg_io(struct e1000_hw *hw, | 6649 | e1000_write_reg_io(struct e1000_hw *hw, |
6650 | uint32_t offset, | 6650 | u32 offset, |
6651 | uint32_t value) | 6651 | u32 value) |
6652 | { | 6652 | { |
6653 | unsigned long io_addr = hw->io_base; | 6653 | unsigned long io_addr = hw->io_base; |
6654 | unsigned long io_data = hw->io_base + 4; | 6654 | unsigned long io_data = hw->io_base + 4; |
@@ -6672,15 +6672,15 @@ e1000_write_reg_io(struct e1000_hw *hw, | |||
6672 | * register to the minimum and maximum range. | 6672 | * register to the minimum and maximum range. |
6673 | * For IGP phy's, the function calculates the range by the AGC registers. | 6673 | * For IGP phy's, the function calculates the range by the AGC registers. |
6674 | *****************************************************************************/ | 6674 | *****************************************************************************/ |
6675 | static int32_t | 6675 | static s32 |
6676 | e1000_get_cable_length(struct e1000_hw *hw, | 6676 | e1000_get_cable_length(struct e1000_hw *hw, |
6677 | uint16_t *min_length, | 6677 | u16 *min_length, |
6678 | uint16_t *max_length) | 6678 | u16 *max_length) |
6679 | { | 6679 | { |
6680 | int32_t ret_val; | 6680 | s32 ret_val; |
6681 | uint16_t agc_value = 0; | 6681 | u16 agc_value = 0; |
6682 | uint16_t i, phy_data; | 6682 | u16 i, phy_data; |
6683 | uint16_t cable_length; | 6683 | u16 cable_length; |
6684 | 6684 | ||
6685 | DEBUGFUNC("e1000_get_cable_length"); | 6685 | DEBUGFUNC("e1000_get_cable_length"); |
6686 | 6686 | ||
@@ -6751,9 +6751,9 @@ e1000_get_cable_length(struct e1000_hw *hw, | |||
6751 | break; | 6751 | break; |
6752 | } | 6752 | } |
6753 | } else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */ | 6753 | } else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */ |
6754 | uint16_t cur_agc_value; | 6754 | u16 cur_agc_value; |
6755 | uint16_t min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE; | 6755 | u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE; |
6756 | uint16_t agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = | 6756 | u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = |
6757 | {IGP01E1000_PHY_AGC_A, | 6757 | {IGP01E1000_PHY_AGC_A, |
6758 | IGP01E1000_PHY_AGC_B, | 6758 | IGP01E1000_PHY_AGC_B, |
6759 | IGP01E1000_PHY_AGC_C, | 6759 | IGP01E1000_PHY_AGC_C, |
@@ -6799,9 +6799,9 @@ e1000_get_cable_length(struct e1000_hw *hw, | |||
6799 | IGP01E1000_AGC_RANGE; | 6799 | IGP01E1000_AGC_RANGE; |
6800 | } else if (hw->phy_type == e1000_phy_igp_2 || | 6800 | } else if (hw->phy_type == e1000_phy_igp_2 || |
6801 | hw->phy_type == e1000_phy_igp_3) { | 6801 | hw->phy_type == e1000_phy_igp_3) { |
6802 | uint16_t cur_agc_index, max_agc_index = 0; | 6802 | u16 cur_agc_index, max_agc_index = 0; |
6803 | uint16_t min_agc_index = IGP02E1000_AGC_LENGTH_TABLE_SIZE - 1; | 6803 | u16 min_agc_index = IGP02E1000_AGC_LENGTH_TABLE_SIZE - 1; |
6804 | uint16_t agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = | 6804 | u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = |
6805 | {IGP02E1000_PHY_AGC_A, | 6805 | {IGP02E1000_PHY_AGC_A, |
6806 | IGP02E1000_PHY_AGC_B, | 6806 | IGP02E1000_PHY_AGC_B, |
6807 | IGP02E1000_PHY_AGC_C, | 6807 | IGP02E1000_PHY_AGC_C, |
@@ -6863,12 +6863,12 @@ e1000_get_cable_length(struct e1000_hw *hw, | |||
6863 | * return 0. If the link speed is 1000 Mbps the polarity status is in the | 6863 | * return 0. If the link speed is 1000 Mbps the polarity status is in the |
6864 | * IGP01E1000_PHY_PCS_INIT_REG. | 6864 | * IGP01E1000_PHY_PCS_INIT_REG. |
6865 | *****************************************************************************/ | 6865 | *****************************************************************************/ |
6866 | static int32_t | 6866 | static s32 |
6867 | e1000_check_polarity(struct e1000_hw *hw, | 6867 | e1000_check_polarity(struct e1000_hw *hw, |
6868 | e1000_rev_polarity *polarity) | 6868 | e1000_rev_polarity *polarity) |
6869 | { | 6869 | { |
6870 | int32_t ret_val; | 6870 | s32 ret_val; |
6871 | uint16_t phy_data; | 6871 | u16 phy_data; |
6872 | 6872 | ||
6873 | DEBUGFUNC("e1000_check_polarity"); | 6873 | DEBUGFUNC("e1000_check_polarity"); |
6874 | 6874 | ||
@@ -6939,11 +6939,11 @@ e1000_check_polarity(struct e1000_hw *hw, | |||
6939 | * Link Health register. In IGP this bit is latched high, so the driver must | 6939 | * Link Health register. In IGP this bit is latched high, so the driver must |
6940 | * read it immediately after link is established. | 6940 | * read it immediately after link is established. |
6941 | *****************************************************************************/ | 6941 | *****************************************************************************/ |
6942 | static int32_t | 6942 | static s32 |
6943 | e1000_check_downshift(struct e1000_hw *hw) | 6943 | e1000_check_downshift(struct e1000_hw *hw) |
6944 | { | 6944 | { |
6945 | int32_t ret_val; | 6945 | s32 ret_val; |
6946 | uint16_t phy_data; | 6946 | u16 phy_data; |
6947 | 6947 | ||
6948 | DEBUGFUNC("e1000_check_downshift"); | 6948 | DEBUGFUNC("e1000_check_downshift"); |
6949 | 6949 | ||
@@ -6985,18 +6985,18 @@ e1000_check_downshift(struct e1000_hw *hw) | |||
6985 | * | 6985 | * |
6986 | ****************************************************************************/ | 6986 | ****************************************************************************/ |
6987 | 6987 | ||
6988 | static int32_t | 6988 | static s32 |
6989 | e1000_config_dsp_after_link_change(struct e1000_hw *hw, | 6989 | e1000_config_dsp_after_link_change(struct e1000_hw *hw, |
6990 | bool link_up) | 6990 | bool link_up) |
6991 | { | 6991 | { |
6992 | int32_t ret_val; | 6992 | s32 ret_val; |
6993 | uint16_t phy_data, phy_saved_data, speed, duplex, i; | 6993 | u16 phy_data, phy_saved_data, speed, duplex, i; |
6994 | uint16_t dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = | 6994 | u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = |
6995 | {IGP01E1000_PHY_AGC_PARAM_A, | 6995 | {IGP01E1000_PHY_AGC_PARAM_A, |
6996 | IGP01E1000_PHY_AGC_PARAM_B, | 6996 | IGP01E1000_PHY_AGC_PARAM_B, |
6997 | IGP01E1000_PHY_AGC_PARAM_C, | 6997 | IGP01E1000_PHY_AGC_PARAM_C, |
6998 | IGP01E1000_PHY_AGC_PARAM_D}; | 6998 | IGP01E1000_PHY_AGC_PARAM_D}; |
6999 | uint16_t min_length, max_length; | 6999 | u16 min_length, max_length; |
7000 | 7000 | ||
7001 | DEBUGFUNC("e1000_config_dsp_after_link_change"); | 7001 | DEBUGFUNC("e1000_config_dsp_after_link_change"); |
7002 | 7002 | ||
@@ -7038,8 +7038,8 @@ e1000_config_dsp_after_link_change(struct e1000_hw *hw, | |||
7038 | if ((hw->ffe_config_state == e1000_ffe_config_enabled) && | 7038 | if ((hw->ffe_config_state == e1000_ffe_config_enabled) && |
7039 | (min_length < e1000_igp_cable_length_50)) { | 7039 | (min_length < e1000_igp_cable_length_50)) { |
7040 | 7040 | ||
7041 | uint16_t ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20; | 7041 | u16 ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20; |
7042 | uint32_t idle_errs = 0; | 7042 | u32 idle_errs = 0; |
7043 | 7043 | ||
7044 | /* clear previous idle error counts */ | 7044 | /* clear previous idle error counts */ |
7045 | ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, | 7045 | ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, |
@@ -7173,11 +7173,11 @@ e1000_config_dsp_after_link_change(struct e1000_hw *hw, | |||
7173 | * | 7173 | * |
7174 | * hw - Struct containing variables accessed by shared code | 7174 | * hw - Struct containing variables accessed by shared code |
7175 | ****************************************************************************/ | 7175 | ****************************************************************************/ |
7176 | static int32_t | 7176 | static s32 |
7177 | e1000_set_phy_mode(struct e1000_hw *hw) | 7177 | e1000_set_phy_mode(struct e1000_hw *hw) |
7178 | { | 7178 | { |
7179 | int32_t ret_val; | 7179 | s32 ret_val; |
7180 | uint16_t eeprom_data; | 7180 | u16 eeprom_data; |
7181 | 7181 | ||
7182 | DEBUGFUNC("e1000_set_phy_mode"); | 7182 | DEBUGFUNC("e1000_set_phy_mode"); |
7183 | 7183 | ||
@@ -7218,13 +7218,13 @@ e1000_set_phy_mode(struct e1000_hw *hw) | |||
7218 | * | 7218 | * |
7219 | ****************************************************************************/ | 7219 | ****************************************************************************/ |
7220 | 7220 | ||
7221 | static int32_t | 7221 | static s32 |
7222 | e1000_set_d3_lplu_state(struct e1000_hw *hw, | 7222 | e1000_set_d3_lplu_state(struct e1000_hw *hw, |
7223 | bool active) | 7223 | bool active) |
7224 | { | 7224 | { |
7225 | uint32_t phy_ctrl = 0; | 7225 | u32 phy_ctrl = 0; |
7226 | int32_t ret_val; | 7226 | s32 ret_val; |
7227 | uint16_t phy_data; | 7227 | u16 phy_data; |
7228 | DEBUGFUNC("e1000_set_d3_lplu_state"); | 7228 | DEBUGFUNC("e1000_set_d3_lplu_state"); |
7229 | 7229 | ||
7230 | if (hw->phy_type != e1000_phy_igp && hw->phy_type != e1000_phy_igp_2 | 7230 | if (hw->phy_type != e1000_phy_igp && hw->phy_type != e1000_phy_igp_2 |
@@ -7348,13 +7348,13 @@ e1000_set_d3_lplu_state(struct e1000_hw *hw, | |||
7348 | * | 7348 | * |
7349 | ****************************************************************************/ | 7349 | ****************************************************************************/ |
7350 | 7350 | ||
7351 | static int32_t | 7351 | static s32 |
7352 | e1000_set_d0_lplu_state(struct e1000_hw *hw, | 7352 | e1000_set_d0_lplu_state(struct e1000_hw *hw, |
7353 | bool active) | 7353 | bool active) |
7354 | { | 7354 | { |
7355 | uint32_t phy_ctrl = 0; | 7355 | u32 phy_ctrl = 0; |
7356 | int32_t ret_val; | 7356 | s32 ret_val; |
7357 | uint16_t phy_data; | 7357 | u16 phy_data; |
7358 | DEBUGFUNC("e1000_set_d0_lplu_state"); | 7358 | DEBUGFUNC("e1000_set_d0_lplu_state"); |
7359 | 7359 | ||
7360 | if (hw->mac_type <= e1000_82547_rev_2) | 7360 | if (hw->mac_type <= e1000_82547_rev_2) |
@@ -7439,12 +7439,12 @@ e1000_set_d0_lplu_state(struct e1000_hw *hw, | |||
7439 | * | 7439 | * |
7440 | * hw - Struct containing variables accessed by shared code | 7440 | * hw - Struct containing variables accessed by shared code |
7441 | *****************************************************************************/ | 7441 | *****************************************************************************/ |
7442 | static int32_t | 7442 | static s32 |
7443 | e1000_set_vco_speed(struct e1000_hw *hw) | 7443 | e1000_set_vco_speed(struct e1000_hw *hw) |
7444 | { | 7444 | { |
7445 | int32_t ret_val; | 7445 | s32 ret_val; |
7446 | uint16_t default_page = 0; | 7446 | u16 default_page = 0; |
7447 | uint16_t phy_data; | 7447 | u16 phy_data; |
7448 | 7448 | ||
7449 | DEBUGFUNC("e1000_set_vco_speed"); | 7449 | DEBUGFUNC("e1000_set_vco_speed"); |
7450 | 7450 | ||
@@ -7503,18 +7503,18 @@ e1000_set_vco_speed(struct e1000_hw *hw) | |||
7503 | * | 7503 | * |
7504 | * returns: - E1000_SUCCESS . | 7504 | * returns: - E1000_SUCCESS . |
7505 | ****************************************************************************/ | 7505 | ****************************************************************************/ |
7506 | static int32_t | 7506 | static s32 |
7507 | e1000_host_if_read_cookie(struct e1000_hw * hw, uint8_t *buffer) | 7507 | e1000_host_if_read_cookie(struct e1000_hw * hw, u8 *buffer) |
7508 | { | 7508 | { |
7509 | uint8_t i; | 7509 | u8 i; |
7510 | uint32_t offset = E1000_MNG_DHCP_COOKIE_OFFSET; | 7510 | u32 offset = E1000_MNG_DHCP_COOKIE_OFFSET; |
7511 | uint8_t length = E1000_MNG_DHCP_COOKIE_LENGTH; | 7511 | u8 length = E1000_MNG_DHCP_COOKIE_LENGTH; |
7512 | 7512 | ||
7513 | length = (length >> 2); | 7513 | length = (length >> 2); |
7514 | offset = (offset >> 2); | 7514 | offset = (offset >> 2); |
7515 | 7515 | ||
7516 | for (i = 0; i < length; i++) { | 7516 | for (i = 0; i < length; i++) { |
7517 | *((uint32_t *) buffer + i) = | 7517 | *((u32 *) buffer + i) = |
7518 | E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset + i); | 7518 | E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset + i); |
7519 | } | 7519 | } |
7520 | return E1000_SUCCESS; | 7520 | return E1000_SUCCESS; |
@@ -7530,11 +7530,11 @@ e1000_host_if_read_cookie(struct e1000_hw * hw, uint8_t *buffer) | |||
7530 | * timeout | 7530 | * timeout |
7531 | * - E1000_SUCCESS for success. | 7531 | * - E1000_SUCCESS for success. |
7532 | ****************************************************************************/ | 7532 | ****************************************************************************/ |
7533 | static int32_t | 7533 | static s32 |
7534 | e1000_mng_enable_host_if(struct e1000_hw * hw) | 7534 | e1000_mng_enable_host_if(struct e1000_hw * hw) |
7535 | { | 7535 | { |
7536 | uint32_t hicr; | 7536 | u32 hicr; |
7537 | uint8_t i; | 7537 | u8 i; |
7538 | 7538 | ||
7539 | /* Check that the host interface is enabled. */ | 7539 | /* Check that the host interface is enabled. */ |
7540 | hicr = E1000_READ_REG(hw, HICR); | 7540 | hicr = E1000_READ_REG(hw, HICR); |
@@ -7564,14 +7564,14 @@ e1000_mng_enable_host_if(struct e1000_hw * hw) | |||
7564 | * | 7564 | * |
7565 | * returns - E1000_SUCCESS for success. | 7565 | * returns - E1000_SUCCESS for success. |
7566 | ****************************************************************************/ | 7566 | ****************************************************************************/ |
7567 | static int32_t | 7567 | static s32 |
7568 | e1000_mng_host_if_write(struct e1000_hw * hw, uint8_t *buffer, | 7568 | e1000_mng_host_if_write(struct e1000_hw * hw, u8 *buffer, |
7569 | uint16_t length, uint16_t offset, uint8_t *sum) | 7569 | u16 length, u16 offset, u8 *sum) |
7570 | { | 7570 | { |
7571 | uint8_t *tmp; | 7571 | u8 *tmp; |
7572 | uint8_t *bufptr = buffer; | 7572 | u8 *bufptr = buffer; |
7573 | uint32_t data = 0; | 7573 | u32 data = 0; |
7574 | uint16_t remaining, i, j, prev_bytes; | 7574 | u16 remaining, i, j, prev_bytes; |
7575 | 7575 | ||
7576 | /* sum = only sum of the data and it is not checksum */ | 7576 | /* sum = only sum of the data and it is not checksum */ |
7577 | 7577 | ||
@@ -7579,14 +7579,14 @@ e1000_mng_host_if_write(struct e1000_hw * hw, uint8_t *buffer, | |||
7579 | return -E1000_ERR_PARAM; | 7579 | return -E1000_ERR_PARAM; |
7580 | } | 7580 | } |
7581 | 7581 | ||
7582 | tmp = (uint8_t *)&data; | 7582 | tmp = (u8 *)&data; |
7583 | prev_bytes = offset & 0x3; | 7583 | prev_bytes = offset & 0x3; |
7584 | offset &= 0xFFFC; | 7584 | offset &= 0xFFFC; |
7585 | offset >>= 2; | 7585 | offset >>= 2; |
7586 | 7586 | ||
7587 | if (prev_bytes) { | 7587 | if (prev_bytes) { |
7588 | data = E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset); | 7588 | data = E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset); |
7589 | for (j = prev_bytes; j < sizeof(uint32_t); j++) { | 7589 | for (j = prev_bytes; j < sizeof(u32); j++) { |
7590 | *(tmp + j) = *bufptr++; | 7590 | *(tmp + j) = *bufptr++; |
7591 | *sum += *(tmp + j); | 7591 | *sum += *(tmp + j); |
7592 | } | 7592 | } |
@@ -7604,7 +7604,7 @@ e1000_mng_host_if_write(struct e1000_hw * hw, uint8_t *buffer, | |||
7604 | /* The device driver writes the relevant command block into the | 7604 | /* The device driver writes the relevant command block into the |
7605 | * ram area. */ | 7605 | * ram area. */ |
7606 | for (i = 0; i < length; i++) { | 7606 | for (i = 0; i < length; i++) { |
7607 | for (j = 0; j < sizeof(uint32_t); j++) { | 7607 | for (j = 0; j < sizeof(u32); j++) { |
7608 | *(tmp + j) = *bufptr++; | 7608 | *(tmp + j) = *bufptr++; |
7609 | *sum += *(tmp + j); | 7609 | *sum += *(tmp + j); |
7610 | } | 7610 | } |
@@ -7612,7 +7612,7 @@ e1000_mng_host_if_write(struct e1000_hw * hw, uint8_t *buffer, | |||
7612 | E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset + i, data); | 7612 | E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset + i, data); |
7613 | } | 7613 | } |
7614 | if (remaining) { | 7614 | if (remaining) { |
7615 | for (j = 0; j < sizeof(uint32_t); j++) { | 7615 | for (j = 0; j < sizeof(u32); j++) { |
7616 | if (j < remaining) | 7616 | if (j < remaining) |
7617 | *(tmp + j) = *bufptr++; | 7617 | *(tmp + j) = *bufptr++; |
7618 | else | 7618 | else |
@@ -7632,23 +7632,23 @@ e1000_mng_host_if_write(struct e1000_hw * hw, uint8_t *buffer, | |||
7632 | * | 7632 | * |
7633 | * returns - E1000_SUCCESS for success. | 7633 | * returns - E1000_SUCCESS for success. |
7634 | ****************************************************************************/ | 7634 | ****************************************************************************/ |
7635 | static int32_t | 7635 | static s32 |
7636 | e1000_mng_write_cmd_header(struct e1000_hw * hw, | 7636 | e1000_mng_write_cmd_header(struct e1000_hw * hw, |
7637 | struct e1000_host_mng_command_header * hdr) | 7637 | struct e1000_host_mng_command_header * hdr) |
7638 | { | 7638 | { |
7639 | uint16_t i; | 7639 | u16 i; |
7640 | uint8_t sum; | 7640 | u8 sum; |
7641 | uint8_t *buffer; | 7641 | u8 *buffer; |
7642 | 7642 | ||
7643 | /* Write the whole command header structure which includes sum of | 7643 | /* Write the whole command header structure which includes sum of |
7644 | * the buffer */ | 7644 | * the buffer */ |
7645 | 7645 | ||
7646 | uint16_t length = sizeof(struct e1000_host_mng_command_header); | 7646 | u16 length = sizeof(struct e1000_host_mng_command_header); |
7647 | 7647 | ||
7648 | sum = hdr->checksum; | 7648 | sum = hdr->checksum; |
7649 | hdr->checksum = 0; | 7649 | hdr->checksum = 0; |
7650 | 7650 | ||
7651 | buffer = (uint8_t *) hdr; | 7651 | buffer = (u8 *) hdr; |
7652 | i = length; | 7652 | i = length; |
7653 | while (i--) | 7653 | while (i--) |
7654 | sum += buffer[i]; | 7654 | sum += buffer[i]; |
@@ -7658,7 +7658,7 @@ e1000_mng_write_cmd_header(struct e1000_hw * hw, | |||
7658 | length >>= 2; | 7658 | length >>= 2; |
7659 | /* The device driver writes the relevant command block into the ram area. */ | 7659 | /* The device driver writes the relevant command block into the ram area. */ |
7660 | for (i = 0; i < length; i++) { | 7660 | for (i = 0; i < length; i++) { |
7661 | E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, i, *((uint32_t *) hdr + i)); | 7661 | E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, i, *((u32 *) hdr + i)); |
7662 | E1000_WRITE_FLUSH(hw); | 7662 | E1000_WRITE_FLUSH(hw); |
7663 | } | 7663 | } |
7664 | 7664 | ||
@@ -7672,10 +7672,10 @@ e1000_mng_write_cmd_header(struct e1000_hw * hw, | |||
7672 | * | 7672 | * |
7673 | * returns - E1000_SUCCESS for success. | 7673 | * returns - E1000_SUCCESS for success. |
7674 | ****************************************************************************/ | 7674 | ****************************************************************************/ |
7675 | static int32_t | 7675 | static s32 |
7676 | e1000_mng_write_commit(struct e1000_hw * hw) | 7676 | e1000_mng_write_commit(struct e1000_hw * hw) |
7677 | { | 7677 | { |
7678 | uint32_t hicr; | 7678 | u32 hicr; |
7679 | 7679 | ||
7680 | hicr = E1000_READ_REG(hw, HICR); | 7680 | hicr = E1000_READ_REG(hw, HICR); |
7681 | /* Setting this bit tells the ARC that a new command is pending. */ | 7681 | /* Setting this bit tells the ARC that a new command is pending. */ |
@@ -7693,7 +7693,7 @@ e1000_mng_write_commit(struct e1000_hw * hw) | |||
7693 | bool | 7693 | bool |
7694 | e1000_check_mng_mode(struct e1000_hw *hw) | 7694 | e1000_check_mng_mode(struct e1000_hw *hw) |
7695 | { | 7695 | { |
7696 | uint32_t fwsm; | 7696 | u32 fwsm; |
7697 | 7697 | ||
7698 | fwsm = E1000_READ_REG(hw, FWSM); | 7698 | fwsm = E1000_READ_REG(hw, FWSM); |
7699 | 7699 | ||
@@ -7712,11 +7712,11 @@ e1000_check_mng_mode(struct e1000_hw *hw) | |||
7712 | /***************************************************************************** | 7712 | /***************************************************************************** |
7713 | * This function writes the dhcp info . | 7713 | * This function writes the dhcp info . |
7714 | ****************************************************************************/ | 7714 | ****************************************************************************/ |
7715 | int32_t | 7715 | s32 |
7716 | e1000_mng_write_dhcp_info(struct e1000_hw * hw, uint8_t *buffer, | 7716 | e1000_mng_write_dhcp_info(struct e1000_hw * hw, u8 *buffer, |
7717 | uint16_t length) | 7717 | u16 length) |
7718 | { | 7718 | { |
7719 | int32_t ret_val; | 7719 | s32 ret_val; |
7720 | struct e1000_host_mng_command_header hdr; | 7720 | struct e1000_host_mng_command_header hdr; |
7721 | 7721 | ||
7722 | hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD; | 7722 | hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD; |
@@ -7744,11 +7744,11 @@ e1000_mng_write_dhcp_info(struct e1000_hw * hw, uint8_t *buffer, | |||
7744 | * | 7744 | * |
7745 | * returns - checksum of buffer contents. | 7745 | * returns - checksum of buffer contents. |
7746 | ****************************************************************************/ | 7746 | ****************************************************************************/ |
7747 | static uint8_t | 7747 | static u8 |
7748 | e1000_calculate_mng_checksum(char *buffer, uint32_t length) | 7748 | e1000_calculate_mng_checksum(char *buffer, u32 length) |
7749 | { | 7749 | { |
7750 | uint8_t sum = 0; | 7750 | u8 sum = 0; |
7751 | uint32_t i; | 7751 | u32 i; |
7752 | 7752 | ||
7753 | if (!buffer) | 7753 | if (!buffer) |
7754 | return 0; | 7754 | return 0; |
@@ -7756,7 +7756,7 @@ e1000_calculate_mng_checksum(char *buffer, uint32_t length) | |||
7756 | for (i=0; i < length; i++) | 7756 | for (i=0; i < length; i++) |
7757 | sum += buffer[i]; | 7757 | sum += buffer[i]; |
7758 | 7758 | ||
7759 | return (uint8_t) (0 - sum); | 7759 | return (u8) (0 - sum); |
7760 | } | 7760 | } |
7761 | 7761 | ||
7762 | /***************************************************************************** | 7762 | /***************************************************************************** |
@@ -7769,10 +7769,10 @@ e1000_enable_tx_pkt_filtering(struct e1000_hw *hw) | |||
7769 | { | 7769 | { |
7770 | /* called in init as well as watchdog timer functions */ | 7770 | /* called in init as well as watchdog timer functions */ |
7771 | 7771 | ||
7772 | int32_t ret_val, checksum; | 7772 | s32 ret_val, checksum; |
7773 | bool tx_filter = false; | 7773 | bool tx_filter = false; |
7774 | struct e1000_host_mng_dhcp_cookie *hdr = &(hw->mng_cookie); | 7774 | struct e1000_host_mng_dhcp_cookie *hdr = &(hw->mng_cookie); |
7775 | uint8_t *buffer = (uint8_t *) &(hw->mng_cookie); | 7775 | u8 *buffer = (u8 *) &(hw->mng_cookie); |
7776 | 7776 | ||
7777 | if (e1000_check_mng_mode(hw)) { | 7777 | if (e1000_check_mng_mode(hw)) { |
7778 | ret_val = e1000_mng_enable_host_if(hw); | 7778 | ret_val = e1000_mng_enable_host_if(hw); |
@@ -7806,11 +7806,11 @@ e1000_enable_tx_pkt_filtering(struct e1000_hw *hw) | |||
7806 | * returns: - true/false | 7806 | * returns: - true/false |
7807 | * | 7807 | * |
7808 | *****************************************************************************/ | 7808 | *****************************************************************************/ |
7809 | uint32_t | 7809 | u32 |
7810 | e1000_enable_mng_pass_thru(struct e1000_hw *hw) | 7810 | e1000_enable_mng_pass_thru(struct e1000_hw *hw) |
7811 | { | 7811 | { |
7812 | uint32_t manc; | 7812 | u32 manc; |
7813 | uint32_t fwsm, factps; | 7813 | u32 fwsm, factps; |
7814 | 7814 | ||
7815 | if (hw->asf_firmware_present) { | 7815 | if (hw->asf_firmware_present) { |
7816 | manc = E1000_READ_REG(hw, MANC); | 7816 | manc = E1000_READ_REG(hw, MANC); |
@@ -7832,12 +7832,12 @@ e1000_enable_mng_pass_thru(struct e1000_hw *hw) | |||
7832 | return false; | 7832 | return false; |
7833 | } | 7833 | } |
7834 | 7834 | ||
7835 | static int32_t | 7835 | static s32 |
7836 | e1000_polarity_reversal_workaround(struct e1000_hw *hw) | 7836 | e1000_polarity_reversal_workaround(struct e1000_hw *hw) |
7837 | { | 7837 | { |
7838 | int32_t ret_val; | 7838 | s32 ret_val; |
7839 | uint16_t mii_status_reg; | 7839 | u16 mii_status_reg; |
7840 | uint16_t i; | 7840 | u16 i; |
7841 | 7841 | ||
7842 | /* Polarity reversal workaround for forced 10F/10H links. */ | 7842 | /* Polarity reversal workaround for forced 10F/10H links. */ |
7843 | 7843 | ||
@@ -7929,7 +7929,7 @@ e1000_polarity_reversal_workaround(struct e1000_hw *hw) | |||
7929 | static void | 7929 | static void |
7930 | e1000_set_pci_express_master_disable(struct e1000_hw *hw) | 7930 | e1000_set_pci_express_master_disable(struct e1000_hw *hw) |
7931 | { | 7931 | { |
7932 | uint32_t ctrl; | 7932 | u32 ctrl; |
7933 | 7933 | ||
7934 | DEBUGFUNC("e1000_set_pci_express_master_disable"); | 7934 | DEBUGFUNC("e1000_set_pci_express_master_disable"); |
7935 | 7935 | ||
@@ -7952,10 +7952,10 @@ e1000_set_pci_express_master_disable(struct e1000_hw *hw) | |||
7952 | * E1000_SUCCESS master requests disabled. | 7952 | * E1000_SUCCESS master requests disabled. |
7953 | * | 7953 | * |
7954 | ******************************************************************************/ | 7954 | ******************************************************************************/ |
7955 | int32_t | 7955 | s32 |
7956 | e1000_disable_pciex_master(struct e1000_hw *hw) | 7956 | e1000_disable_pciex_master(struct e1000_hw *hw) |
7957 | { | 7957 | { |
7958 | int32_t timeout = MASTER_DISABLE_TIMEOUT; /* 80ms */ | 7958 | s32 timeout = MASTER_DISABLE_TIMEOUT; /* 80ms */ |
7959 | 7959 | ||
7960 | DEBUGFUNC("e1000_disable_pciex_master"); | 7960 | DEBUGFUNC("e1000_disable_pciex_master"); |
7961 | 7961 | ||
@@ -7990,10 +7990,10 @@ e1000_disable_pciex_master(struct e1000_hw *hw) | |||
7990 | * E1000_SUCCESS at any other case. | 7990 | * E1000_SUCCESS at any other case. |
7991 | * | 7991 | * |
7992 | ******************************************************************************/ | 7992 | ******************************************************************************/ |
7993 | static int32_t | 7993 | static s32 |
7994 | e1000_get_auto_rd_done(struct e1000_hw *hw) | 7994 | e1000_get_auto_rd_done(struct e1000_hw *hw) |
7995 | { | 7995 | { |
7996 | int32_t timeout = AUTO_READ_DONE_TIMEOUT; | 7996 | s32 timeout = AUTO_READ_DONE_TIMEOUT; |
7997 | 7997 | ||
7998 | DEBUGFUNC("e1000_get_auto_rd_done"); | 7998 | DEBUGFUNC("e1000_get_auto_rd_done"); |
7999 | 7999 | ||
@@ -8038,11 +8038,11 @@ e1000_get_auto_rd_done(struct e1000_hw *hw) | |||
8038 | * E1000_SUCCESS at any other case. | 8038 | * E1000_SUCCESS at any other case. |
8039 | * | 8039 | * |
8040 | ***************************************************************************/ | 8040 | ***************************************************************************/ |
8041 | static int32_t | 8041 | static s32 |
8042 | e1000_get_phy_cfg_done(struct e1000_hw *hw) | 8042 | e1000_get_phy_cfg_done(struct e1000_hw *hw) |
8043 | { | 8043 | { |
8044 | int32_t timeout = PHY_CFG_TIMEOUT; | 8044 | s32 timeout = PHY_CFG_TIMEOUT; |
8045 | uint32_t cfg_mask = E1000_EEPROM_CFG_DONE; | 8045 | u32 cfg_mask = E1000_EEPROM_CFG_DONE; |
8046 | 8046 | ||
8047 | DEBUGFUNC("e1000_get_phy_cfg_done"); | 8047 | DEBUGFUNC("e1000_get_phy_cfg_done"); |
8048 | 8048 | ||
@@ -8085,11 +8085,11 @@ e1000_get_phy_cfg_done(struct e1000_hw *hw) | |||
8085 | * E1000_SUCCESS at any other case. | 8085 | * E1000_SUCCESS at any other case. |
8086 | * | 8086 | * |
8087 | ***************************************************************************/ | 8087 | ***************************************************************************/ |
8088 | static int32_t | 8088 | static s32 |
8089 | e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw) | 8089 | e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw) |
8090 | { | 8090 | { |
8091 | int32_t timeout; | 8091 | s32 timeout; |
8092 | uint32_t swsm; | 8092 | u32 swsm; |
8093 | 8093 | ||
8094 | DEBUGFUNC("e1000_get_hw_eeprom_semaphore"); | 8094 | DEBUGFUNC("e1000_get_hw_eeprom_semaphore"); |
8095 | 8095 | ||
@@ -8138,7 +8138,7 @@ e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw) | |||
8138 | static void | 8138 | static void |
8139 | e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw) | 8139 | e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw) |
8140 | { | 8140 | { |
8141 | uint32_t swsm; | 8141 | u32 swsm; |
8142 | 8142 | ||
8143 | DEBUGFUNC("e1000_put_hw_eeprom_semaphore"); | 8143 | DEBUGFUNC("e1000_put_hw_eeprom_semaphore"); |
8144 | 8144 | ||
@@ -8164,11 +8164,11 @@ e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw) | |||
8164 | * E1000_SUCCESS at any other case. | 8164 | * E1000_SUCCESS at any other case. |
8165 | * | 8165 | * |
8166 | ***************************************************************************/ | 8166 | ***************************************************************************/ |
8167 | static int32_t | 8167 | static s32 |
8168 | e1000_get_software_semaphore(struct e1000_hw *hw) | 8168 | e1000_get_software_semaphore(struct e1000_hw *hw) |
8169 | { | 8169 | { |
8170 | int32_t timeout = hw->eeprom.word_size + 1; | 8170 | s32 timeout = hw->eeprom.word_size + 1; |
8171 | uint32_t swsm; | 8171 | u32 swsm; |
8172 | 8172 | ||
8173 | DEBUGFUNC("e1000_get_software_semaphore"); | 8173 | DEBUGFUNC("e1000_get_software_semaphore"); |
8174 | 8174 | ||
@@ -8203,7 +8203,7 @@ e1000_get_software_semaphore(struct e1000_hw *hw) | |||
8203 | static void | 8203 | static void |
8204 | e1000_release_software_semaphore(struct e1000_hw *hw) | 8204 | e1000_release_software_semaphore(struct e1000_hw *hw) |
8205 | { | 8205 | { |
8206 | uint32_t swsm; | 8206 | u32 swsm; |
8207 | 8207 | ||
8208 | DEBUGFUNC("e1000_release_software_semaphore"); | 8208 | DEBUGFUNC("e1000_release_software_semaphore"); |
8209 | 8209 | ||
@@ -8228,11 +8228,11 @@ e1000_release_software_semaphore(struct e1000_hw *hw) | |||
8228 | * E1000_SUCCESS | 8228 | * E1000_SUCCESS |
8229 | * | 8229 | * |
8230 | *****************************************************************************/ | 8230 | *****************************************************************************/ |
8231 | int32_t | 8231 | s32 |
8232 | e1000_check_phy_reset_block(struct e1000_hw *hw) | 8232 | e1000_check_phy_reset_block(struct e1000_hw *hw) |
8233 | { | 8233 | { |
8234 | uint32_t manc = 0; | 8234 | u32 manc = 0; |
8235 | uint32_t fwsm = 0; | 8235 | u32 fwsm = 0; |
8236 | 8236 | ||
8237 | if (hw->mac_type == e1000_ich8lan) { | 8237 | if (hw->mac_type == e1000_ich8lan) { |
8238 | fwsm = E1000_READ_REG(hw, FWSM); | 8238 | fwsm = E1000_READ_REG(hw, FWSM); |
@@ -8246,10 +8246,10 @@ e1000_check_phy_reset_block(struct e1000_hw *hw) | |||
8246 | E1000_BLK_PHY_RESET : E1000_SUCCESS; | 8246 | E1000_BLK_PHY_RESET : E1000_SUCCESS; |
8247 | } | 8247 | } |
8248 | 8248 | ||
8249 | static uint8_t | 8249 | static u8 |
8250 | e1000_arc_subsystem_valid(struct e1000_hw *hw) | 8250 | e1000_arc_subsystem_valid(struct e1000_hw *hw) |
8251 | { | 8251 | { |
8252 | uint32_t fwsm; | 8252 | u32 fwsm; |
8253 | 8253 | ||
8254 | /* On 8257x silicon, registers in the range of 0x8800 - 0x8FFC | 8254 | /* On 8257x silicon, registers in the range of 0x8800 - 0x8FFC |
8255 | * may not be provided a DMA clock when no manageability features are | 8255 | * may not be provided a DMA clock when no manageability features are |
@@ -8283,10 +8283,10 @@ e1000_arc_subsystem_valid(struct e1000_hw *hw) | |||
8283 | * returns: E1000_SUCCESS | 8283 | * returns: E1000_SUCCESS |
8284 | * | 8284 | * |
8285 | *****************************************************************************/ | 8285 | *****************************************************************************/ |
8286 | static int32_t | 8286 | static s32 |
8287 | e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop) | 8287 | e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, u32 no_snoop) |
8288 | { | 8288 | { |
8289 | uint32_t gcr_reg = 0; | 8289 | u32 gcr_reg = 0; |
8290 | 8290 | ||
8291 | DEBUGFUNC("e1000_set_pci_ex_no_snoop"); | 8291 | DEBUGFUNC("e1000_set_pci_ex_no_snoop"); |
8292 | 8292 | ||
@@ -8303,7 +8303,7 @@ e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop) | |||
8303 | E1000_WRITE_REG(hw, GCR, gcr_reg); | 8303 | E1000_WRITE_REG(hw, GCR, gcr_reg); |
8304 | } | 8304 | } |
8305 | if (hw->mac_type == e1000_ich8lan) { | 8305 | if (hw->mac_type == e1000_ich8lan) { |
8306 | uint32_t ctrl_ext; | 8306 | u32 ctrl_ext; |
8307 | 8307 | ||
8308 | E1000_WRITE_REG(hw, GCR, PCI_EX_82566_SNOOP_ALL); | 8308 | E1000_WRITE_REG(hw, GCR, PCI_EX_82566_SNOOP_ALL); |
8309 | 8309 | ||
@@ -8324,11 +8324,11 @@ e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop) | |||
8324 | * hw: Struct containing variables accessed by shared code | 8324 | * hw: Struct containing variables accessed by shared code |
8325 | * | 8325 | * |
8326 | ***************************************************************************/ | 8326 | ***************************************************************************/ |
8327 | static int32_t | 8327 | static s32 |
8328 | e1000_get_software_flag(struct e1000_hw *hw) | 8328 | e1000_get_software_flag(struct e1000_hw *hw) |
8329 | { | 8329 | { |
8330 | int32_t timeout = PHY_CFG_TIMEOUT; | 8330 | s32 timeout = PHY_CFG_TIMEOUT; |
8331 | uint32_t extcnf_ctrl; | 8331 | u32 extcnf_ctrl; |
8332 | 8332 | ||
8333 | DEBUGFUNC("e1000_get_software_flag"); | 8333 | DEBUGFUNC("e1000_get_software_flag"); |
8334 | 8334 | ||
@@ -8366,7 +8366,7 @@ e1000_get_software_flag(struct e1000_hw *hw) | |||
8366 | static void | 8366 | static void |
8367 | e1000_release_software_flag(struct e1000_hw *hw) | 8367 | e1000_release_software_flag(struct e1000_hw *hw) |
8368 | { | 8368 | { |
8369 | uint32_t extcnf_ctrl; | 8369 | u32 extcnf_ctrl; |
8370 | 8370 | ||
8371 | DEBUGFUNC("e1000_release_software_flag"); | 8371 | DEBUGFUNC("e1000_release_software_flag"); |
8372 | 8372 | ||
@@ -8388,16 +8388,16 @@ e1000_release_software_flag(struct e1000_hw *hw) | |||
8388 | * data - word read from the EEPROM | 8388 | * data - word read from the EEPROM |
8389 | * words - number of words to read | 8389 | * words - number of words to read |
8390 | *****************************************************************************/ | 8390 | *****************************************************************************/ |
8391 | static int32_t | 8391 | static s32 |
8392 | e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words, | 8392 | e1000_read_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words, |
8393 | uint16_t *data) | 8393 | u16 *data) |
8394 | { | 8394 | { |
8395 | int32_t error = E1000_SUCCESS; | 8395 | s32 error = E1000_SUCCESS; |
8396 | uint32_t flash_bank = 0; | 8396 | u32 flash_bank = 0; |
8397 | uint32_t act_offset = 0; | 8397 | u32 act_offset = 0; |
8398 | uint32_t bank_offset = 0; | 8398 | u32 bank_offset = 0; |
8399 | uint16_t word = 0; | 8399 | u16 word = 0; |
8400 | uint16_t i = 0; | 8400 | u16 i = 0; |
8401 | 8401 | ||
8402 | /* We need to know which is the valid flash bank. In the event | 8402 | /* We need to know which is the valid flash bank. In the event |
8403 | * that we didn't allocate eeprom_shadow_ram, we may not be | 8403 | * that we didn't allocate eeprom_shadow_ram, we may not be |
@@ -8444,12 +8444,12 @@ e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words, | |||
8444 | * words - number of words to write | 8444 | * words - number of words to write |
8445 | * data - words to write to the EEPROM | 8445 | * data - words to write to the EEPROM |
8446 | *****************************************************************************/ | 8446 | *****************************************************************************/ |
8447 | static int32_t | 8447 | static s32 |
8448 | e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words, | 8448 | e1000_write_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words, |
8449 | uint16_t *data) | 8449 | u16 *data) |
8450 | { | 8450 | { |
8451 | uint32_t i = 0; | 8451 | u32 i = 0; |
8452 | int32_t error = E1000_SUCCESS; | 8452 | s32 error = E1000_SUCCESS; |
8453 | 8453 | ||
8454 | error = e1000_get_software_flag(hw); | 8454 | error = e1000_get_software_flag(hw); |
8455 | if (error != E1000_SUCCESS) | 8455 | if (error != E1000_SUCCESS) |
@@ -8491,12 +8491,12 @@ e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words, | |||
8491 | * | 8491 | * |
8492 | * hw - The pointer to the hw structure | 8492 | * hw - The pointer to the hw structure |
8493 | ****************************************************************************/ | 8493 | ****************************************************************************/ |
8494 | static int32_t | 8494 | static s32 |
8495 | e1000_ich8_cycle_init(struct e1000_hw *hw) | 8495 | e1000_ich8_cycle_init(struct e1000_hw *hw) |
8496 | { | 8496 | { |
8497 | union ich8_hws_flash_status hsfsts; | 8497 | union ich8_hws_flash_status hsfsts; |
8498 | int32_t error = E1000_ERR_EEPROM; | 8498 | s32 error = E1000_ERR_EEPROM; |
8499 | int32_t i = 0; | 8499 | s32 i = 0; |
8500 | 8500 | ||
8501 | DEBUGFUNC("e1000_ich8_cycle_init"); | 8501 | DEBUGFUNC("e1000_ich8_cycle_init"); |
8502 | 8502 | ||
@@ -8558,13 +8558,13 @@ e1000_ich8_cycle_init(struct e1000_hw *hw) | |||
8558 | * | 8558 | * |
8559 | * hw - The pointer to the hw structure | 8559 | * hw - The pointer to the hw structure |
8560 | ****************************************************************************/ | 8560 | ****************************************************************************/ |
8561 | static int32_t | 8561 | static s32 |
8562 | e1000_ich8_flash_cycle(struct e1000_hw *hw, uint32_t timeout) | 8562 | e1000_ich8_flash_cycle(struct e1000_hw *hw, u32 timeout) |
8563 | { | 8563 | { |
8564 | union ich8_hws_flash_ctrl hsflctl; | 8564 | union ich8_hws_flash_ctrl hsflctl; |
8565 | union ich8_hws_flash_status hsfsts; | 8565 | union ich8_hws_flash_status hsfsts; |
8566 | int32_t error = E1000_ERR_EEPROM; | 8566 | s32 error = E1000_ERR_EEPROM; |
8567 | uint32_t i = 0; | 8567 | u32 i = 0; |
8568 | 8568 | ||
8569 | /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ | 8569 | /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ |
8570 | hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL); | 8570 | hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL); |
@@ -8593,16 +8593,16 @@ e1000_ich8_flash_cycle(struct e1000_hw *hw, uint32_t timeout) | |||
8593 | * size - Size of data to read, 1=byte 2=word | 8593 | * size - Size of data to read, 1=byte 2=word |
8594 | * data - Pointer to the word to store the value read. | 8594 | * data - Pointer to the word to store the value read. |
8595 | *****************************************************************************/ | 8595 | *****************************************************************************/ |
8596 | static int32_t | 8596 | static s32 |
8597 | e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index, | 8597 | e1000_read_ich8_data(struct e1000_hw *hw, u32 index, |
8598 | uint32_t size, uint16_t* data) | 8598 | u32 size, u16* data) |
8599 | { | 8599 | { |
8600 | union ich8_hws_flash_status hsfsts; | 8600 | union ich8_hws_flash_status hsfsts; |
8601 | union ich8_hws_flash_ctrl hsflctl; | 8601 | union ich8_hws_flash_ctrl hsflctl; |
8602 | uint32_t flash_linear_address; | 8602 | u32 flash_linear_address; |
8603 | uint32_t flash_data = 0; | 8603 | u32 flash_data = 0; |
8604 | int32_t error = -E1000_ERR_EEPROM; | 8604 | s32 error = -E1000_ERR_EEPROM; |
8605 | int32_t count = 0; | 8605 | s32 count = 0; |
8606 | 8606 | ||
8607 | DEBUGFUNC("e1000_read_ich8_data"); | 8607 | DEBUGFUNC("e1000_read_ich8_data"); |
8608 | 8608 | ||
@@ -8640,9 +8640,9 @@ e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index, | |||
8640 | if (error == E1000_SUCCESS) { | 8640 | if (error == E1000_SUCCESS) { |
8641 | flash_data = E1000_READ_ICH_FLASH_REG(hw, ICH_FLASH_FDATA0); | 8641 | flash_data = E1000_READ_ICH_FLASH_REG(hw, ICH_FLASH_FDATA0); |
8642 | if (size == 1) { | 8642 | if (size == 1) { |
8643 | *data = (uint8_t)(flash_data & 0x000000FF); | 8643 | *data = (u8)(flash_data & 0x000000FF); |
8644 | } else if (size == 2) { | 8644 | } else if (size == 2) { |
8645 | *data = (uint16_t)(flash_data & 0x0000FFFF); | 8645 | *data = (u16)(flash_data & 0x0000FFFF); |
8646 | } | 8646 | } |
8647 | break; | 8647 | break; |
8648 | } else { | 8648 | } else { |
@@ -8672,16 +8672,16 @@ e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index, | |||
8672 | * size - Size of data to read, 1=byte 2=word | 8672 | * size - Size of data to read, 1=byte 2=word |
8673 | * data - The byte(s) to write to the NVM. | 8673 | * data - The byte(s) to write to the NVM. |
8674 | *****************************************************************************/ | 8674 | *****************************************************************************/ |
8675 | static int32_t | 8675 | static s32 |
8676 | e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size, | 8676 | e1000_write_ich8_data(struct e1000_hw *hw, u32 index, u32 size, |
8677 | uint16_t data) | 8677 | u16 data) |
8678 | { | 8678 | { |
8679 | union ich8_hws_flash_status hsfsts; | 8679 | union ich8_hws_flash_status hsfsts; |
8680 | union ich8_hws_flash_ctrl hsflctl; | 8680 | union ich8_hws_flash_ctrl hsflctl; |
8681 | uint32_t flash_linear_address; | 8681 | u32 flash_linear_address; |
8682 | uint32_t flash_data = 0; | 8682 | u32 flash_data = 0; |
8683 | int32_t error = -E1000_ERR_EEPROM; | 8683 | s32 error = -E1000_ERR_EEPROM; |
8684 | int32_t count = 0; | 8684 | s32 count = 0; |
8685 | 8685 | ||
8686 | DEBUGFUNC("e1000_write_ich8_data"); | 8686 | DEBUGFUNC("e1000_write_ich8_data"); |
8687 | 8687 | ||
@@ -8710,9 +8710,9 @@ e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size, | |||
8710 | E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_address); | 8710 | E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_address); |
8711 | 8711 | ||
8712 | if (size == 1) | 8712 | if (size == 1) |
8713 | flash_data = (uint32_t)data & 0x00FF; | 8713 | flash_data = (u32)data & 0x00FF; |
8714 | else | 8714 | else |
8715 | flash_data = (uint32_t)data; | 8715 | flash_data = (u32)data; |
8716 | 8716 | ||
8717 | E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data); | 8717 | E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data); |
8718 | 8718 | ||
@@ -8747,15 +8747,15 @@ e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size, | |||
8747 | * index - The index of the byte to read. | 8747 | * index - The index of the byte to read. |
8748 | * data - Pointer to a byte to store the value read. | 8748 | * data - Pointer to a byte to store the value read. |
8749 | *****************************************************************************/ | 8749 | *****************************************************************************/ |
8750 | static int32_t | 8750 | static s32 |
8751 | e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t* data) | 8751 | e1000_read_ich8_byte(struct e1000_hw *hw, u32 index, u8* data) |
8752 | { | 8752 | { |
8753 | int32_t status = E1000_SUCCESS; | 8753 | s32 status = E1000_SUCCESS; |
8754 | uint16_t word = 0; | 8754 | u16 word = 0; |
8755 | 8755 | ||
8756 | status = e1000_read_ich8_data(hw, index, 1, &word); | 8756 | status = e1000_read_ich8_data(hw, index, 1, &word); |
8757 | if (status == E1000_SUCCESS) { | 8757 | if (status == E1000_SUCCESS) { |
8758 | *data = (uint8_t)word; | 8758 | *data = (u8)word; |
8759 | } | 8759 | } |
8760 | 8760 | ||
8761 | return status; | 8761 | return status; |
@@ -8770,11 +8770,11 @@ e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t* data) | |||
8770 | * index - The index of the byte to write. | 8770 | * index - The index of the byte to write. |
8771 | * byte - The byte to write to the NVM. | 8771 | * byte - The byte to write to the NVM. |
8772 | *****************************************************************************/ | 8772 | *****************************************************************************/ |
8773 | static int32_t | 8773 | static s32 |
8774 | e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t byte) | 8774 | e1000_verify_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 byte) |
8775 | { | 8775 | { |
8776 | int32_t error = E1000_SUCCESS; | 8776 | s32 error = E1000_SUCCESS; |
8777 | int32_t program_retries = 0; | 8777 | s32 program_retries = 0; |
8778 | 8778 | ||
8779 | DEBUGOUT2("Byte := %2.2X Offset := %d\n", byte, index); | 8779 | DEBUGOUT2("Byte := %2.2X Offset := %d\n", byte, index); |
8780 | 8780 | ||
@@ -8803,11 +8803,11 @@ e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t byte) | |||
8803 | * index - The index of the byte to read. | 8803 | * index - The index of the byte to read. |
8804 | * data - The byte to write to the NVM. | 8804 | * data - The byte to write to the NVM. |
8805 | *****************************************************************************/ | 8805 | *****************************************************************************/ |
8806 | static int32_t | 8806 | static s32 |
8807 | e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t data) | 8807 | e1000_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 data) |
8808 | { | 8808 | { |
8809 | int32_t status = E1000_SUCCESS; | 8809 | s32 status = E1000_SUCCESS; |
8810 | uint16_t word = (uint16_t)data; | 8810 | u16 word = (u16)data; |
8811 | 8811 | ||
8812 | status = e1000_write_ich8_data(hw, index, 1, word); | 8812 | status = e1000_write_ich8_data(hw, index, 1, word); |
8813 | 8813 | ||
@@ -8821,10 +8821,10 @@ e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t data) | |||
8821 | * index - The starting byte index of the word to read. | 8821 | * index - The starting byte index of the word to read. |
8822 | * data - Pointer to a word to store the value read. | 8822 | * data - Pointer to a word to store the value read. |
8823 | *****************************************************************************/ | 8823 | *****************************************************************************/ |
8824 | static int32_t | 8824 | static s32 |
8825 | e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t *data) | 8825 | e1000_read_ich8_word(struct e1000_hw *hw, u32 index, u16 *data) |
8826 | { | 8826 | { |
8827 | int32_t status = E1000_SUCCESS; | 8827 | s32 status = E1000_SUCCESS; |
8828 | status = e1000_read_ich8_data(hw, index, 2, data); | 8828 | status = e1000_read_ich8_data(hw, index, 2, data); |
8829 | return status; | 8829 | return status; |
8830 | } | 8830 | } |
@@ -8840,19 +8840,19 @@ e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t *data) | |||
8840 | * amount of NVM used in each bank is a *minimum* of 4 KBytes, but in fact the | 8840 | * amount of NVM used in each bank is a *minimum* of 4 KBytes, but in fact the |
8841 | * bank size may be 4, 8 or 64 KBytes | 8841 | * bank size may be 4, 8 or 64 KBytes |
8842 | *****************************************************************************/ | 8842 | *****************************************************************************/ |
8843 | static int32_t | 8843 | static s32 |
8844 | e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t bank) | 8844 | e1000_erase_ich8_4k_segment(struct e1000_hw *hw, u32 bank) |
8845 | { | 8845 | { |
8846 | union ich8_hws_flash_status hsfsts; | 8846 | union ich8_hws_flash_status hsfsts; |
8847 | union ich8_hws_flash_ctrl hsflctl; | 8847 | union ich8_hws_flash_ctrl hsflctl; |
8848 | uint32_t flash_linear_address; | 8848 | u32 flash_linear_address; |
8849 | int32_t count = 0; | 8849 | s32 count = 0; |
8850 | int32_t error = E1000_ERR_EEPROM; | 8850 | s32 error = E1000_ERR_EEPROM; |
8851 | int32_t iteration; | 8851 | s32 iteration; |
8852 | int32_t sub_sector_size = 0; | 8852 | s32 sub_sector_size = 0; |
8853 | int32_t bank_size; | 8853 | s32 bank_size; |
8854 | int32_t j = 0; | 8854 | s32 j = 0; |
8855 | int32_t error_flag = 0; | 8855 | s32 error_flag = 0; |
8856 | 8856 | ||
8857 | hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS); | 8857 | hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS); |
8858 | 8858 | ||
@@ -8930,16 +8930,16 @@ e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t bank) | |||
8930 | return error; | 8930 | return error; |
8931 | } | 8931 | } |
8932 | 8932 | ||
8933 | static int32_t | 8933 | static s32 |
8934 | e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw, | 8934 | e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw, |
8935 | uint32_t cnf_base_addr, uint32_t cnf_size) | 8935 | u32 cnf_base_addr, u32 cnf_size) |
8936 | { | 8936 | { |
8937 | uint32_t ret_val = E1000_SUCCESS; | 8937 | u32 ret_val = E1000_SUCCESS; |
8938 | uint16_t word_addr, reg_data, reg_addr; | 8938 | u16 word_addr, reg_data, reg_addr; |
8939 | uint16_t i; | 8939 | u16 i; |
8940 | 8940 | ||
8941 | /* cnf_base_addr is in DWORD */ | 8941 | /* cnf_base_addr is in DWORD */ |
8942 | word_addr = (uint16_t)(cnf_base_addr << 1); | 8942 | word_addr = (u16)(cnf_base_addr << 1); |
8943 | 8943 | ||
8944 | /* cnf_size is returned in size of dwords */ | 8944 | /* cnf_size is returned in size of dwords */ |
8945 | for (i = 0; i < cnf_size; i++) { | 8945 | for (i = 0; i < cnf_size; i++) { |
@@ -8955,7 +8955,7 @@ e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw, | |||
8955 | if (ret_val != E1000_SUCCESS) | 8955 | if (ret_val != E1000_SUCCESS) |
8956 | return ret_val; | 8956 | return ret_val; |
8957 | 8957 | ||
8958 | ret_val = e1000_write_phy_reg_ex(hw, (uint32_t)reg_addr, reg_data); | 8958 | ret_val = e1000_write_phy_reg_ex(hw, (u32)reg_addr, reg_data); |
8959 | 8959 | ||
8960 | e1000_release_software_flag(hw); | 8960 | e1000_release_software_flag(hw); |
8961 | } | 8961 | } |
@@ -8972,10 +8972,10 @@ e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw, | |||
8972 | * | 8972 | * |
8973 | * hw: Struct containing variables accessed by shared code | 8973 | * hw: Struct containing variables accessed by shared code |
8974 | *****************************************************************************/ | 8974 | *****************************************************************************/ |
8975 | static int32_t | 8975 | static s32 |
8976 | e1000_init_lcd_from_nvm(struct e1000_hw *hw) | 8976 | e1000_init_lcd_from_nvm(struct e1000_hw *hw) |
8977 | { | 8977 | { |
8978 | uint32_t reg_data, cnf_base_addr, cnf_size, ret_val, loop; | 8978 | u32 reg_data, cnf_base_addr, cnf_size, ret_val, loop; |
8979 | 8979 | ||
8980 | if (hw->phy_type != e1000_phy_igp_3) | 8980 | if (hw->phy_type != e1000_phy_igp_3) |
8981 | return E1000_SUCCESS; | 8981 | return E1000_SUCCESS; |
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h index 572a7b6dc12e..99fce2c5dd26 100644 --- a/drivers/net/e1000/e1000_hw.h +++ b/drivers/net/e1000/e1000_hw.h | |||
@@ -100,7 +100,7 @@ typedef enum { | |||
100 | } e1000_fc_type; | 100 | } e1000_fc_type; |
101 | 101 | ||
102 | struct e1000_shadow_ram { | 102 | struct e1000_shadow_ram { |
103 | uint16_t eeprom_word; | 103 | u16 eeprom_word; |
104 | bool modified; | 104 | bool modified; |
105 | }; | 105 | }; |
106 | 106 | ||
@@ -263,17 +263,17 @@ struct e1000_phy_info { | |||
263 | }; | 263 | }; |
264 | 264 | ||
265 | struct e1000_phy_stats { | 265 | struct e1000_phy_stats { |
266 | uint32_t idle_errors; | 266 | u32 idle_errors; |
267 | uint32_t receive_errors; | 267 | u32 receive_errors; |
268 | }; | 268 | }; |
269 | 269 | ||
270 | struct e1000_eeprom_info { | 270 | struct e1000_eeprom_info { |
271 | e1000_eeprom_type type; | 271 | e1000_eeprom_type type; |
272 | uint16_t word_size; | 272 | u16 word_size; |
273 | uint16_t opcode_bits; | 273 | u16 opcode_bits; |
274 | uint16_t address_bits; | 274 | u16 address_bits; |
275 | uint16_t delay_usec; | 275 | u16 delay_usec; |
276 | uint16_t page_size; | 276 | u16 page_size; |
277 | bool use_eerd; | 277 | bool use_eerd; |
278 | bool use_eewr; | 278 | bool use_eewr; |
279 | }; | 279 | }; |
@@ -308,34 +308,34 @@ typedef enum { | |||
308 | 308 | ||
309 | /* Function prototypes */ | 309 | /* Function prototypes */ |
310 | /* Initialization */ | 310 | /* Initialization */ |
311 | int32_t e1000_reset_hw(struct e1000_hw *hw); | 311 | s32 e1000_reset_hw(struct e1000_hw *hw); |
312 | int32_t e1000_init_hw(struct e1000_hw *hw); | 312 | s32 e1000_init_hw(struct e1000_hw *hw); |
313 | int32_t e1000_set_mac_type(struct e1000_hw *hw); | 313 | s32 e1000_set_mac_type(struct e1000_hw *hw); |
314 | void e1000_set_media_type(struct e1000_hw *hw); | 314 | void e1000_set_media_type(struct e1000_hw *hw); |
315 | 315 | ||
316 | /* Link Configuration */ | 316 | /* Link Configuration */ |
317 | int32_t e1000_setup_link(struct e1000_hw *hw); | 317 | s32 e1000_setup_link(struct e1000_hw *hw); |
318 | int32_t e1000_phy_setup_autoneg(struct e1000_hw *hw); | 318 | s32 e1000_phy_setup_autoneg(struct e1000_hw *hw); |
319 | void e1000_config_collision_dist(struct e1000_hw *hw); | 319 | void e1000_config_collision_dist(struct e1000_hw *hw); |
320 | int32_t e1000_check_for_link(struct e1000_hw *hw); | 320 | s32 e1000_check_for_link(struct e1000_hw *hw); |
321 | int32_t e1000_get_speed_and_duplex(struct e1000_hw *hw, uint16_t *speed, uint16_t *duplex); | 321 | s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex); |
322 | int32_t e1000_force_mac_fc(struct e1000_hw *hw); | 322 | s32 e1000_force_mac_fc(struct e1000_hw *hw); |
323 | 323 | ||
324 | /* PHY */ | 324 | /* PHY */ |
325 | int32_t e1000_read_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *phy_data); | 325 | s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data); |
326 | int32_t e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t data); | 326 | s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 data); |
327 | int32_t e1000_phy_hw_reset(struct e1000_hw *hw); | 327 | s32 e1000_phy_hw_reset(struct e1000_hw *hw); |
328 | int32_t e1000_phy_reset(struct e1000_hw *hw); | 328 | s32 e1000_phy_reset(struct e1000_hw *hw); |
329 | int32_t e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); | 329 | s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); |
330 | int32_t e1000_validate_mdi_setting(struct e1000_hw *hw); | 330 | s32 e1000_validate_mdi_setting(struct e1000_hw *hw); |
331 | 331 | ||
332 | void e1000_phy_powerdown_workaround(struct e1000_hw *hw); | 332 | void e1000_phy_powerdown_workaround(struct e1000_hw *hw); |
333 | 333 | ||
334 | /* EEPROM Functions */ | 334 | /* EEPROM Functions */ |
335 | int32_t e1000_init_eeprom_params(struct e1000_hw *hw); | 335 | s32 e1000_init_eeprom_params(struct e1000_hw *hw); |
336 | 336 | ||
337 | /* MNG HOST IF functions */ | 337 | /* MNG HOST IF functions */ |
338 | uint32_t e1000_enable_mng_pass_thru(struct e1000_hw *hw); | 338 | u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw); |
339 | 339 | ||
340 | #define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 | 340 | #define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 |
341 | #define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 /* Host Interface data length */ | 341 | #define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 /* Host Interface data length */ |
@@ -354,80 +354,80 @@ uint32_t e1000_enable_mng_pass_thru(struct e1000_hw *hw); | |||
354 | #define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F | 354 | #define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F |
355 | 355 | ||
356 | struct e1000_host_mng_command_header { | 356 | struct e1000_host_mng_command_header { |
357 | uint8_t command_id; | 357 | u8 command_id; |
358 | uint8_t checksum; | 358 | u8 checksum; |
359 | uint16_t reserved1; | 359 | u16 reserved1; |
360 | uint16_t reserved2; | 360 | u16 reserved2; |
361 | uint16_t command_length; | 361 | u16 command_length; |
362 | }; | 362 | }; |
363 | 363 | ||
364 | struct e1000_host_mng_command_info { | 364 | struct e1000_host_mng_command_info { |
365 | struct e1000_host_mng_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ | 365 | struct e1000_host_mng_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ |
366 | uint8_t command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; /* Command data can length 0..0x658*/ | 366 | u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; /* Command data can length 0..0x658*/ |
367 | }; | 367 | }; |
368 | #ifdef __BIG_ENDIAN | 368 | #ifdef __BIG_ENDIAN |
369 | struct e1000_host_mng_dhcp_cookie{ | 369 | struct e1000_host_mng_dhcp_cookie{ |
370 | uint32_t signature; | 370 | u32 signature; |
371 | uint16_t vlan_id; | 371 | u16 vlan_id; |
372 | uint8_t reserved0; | 372 | u8 reserved0; |
373 | uint8_t status; | 373 | u8 status; |
374 | uint32_t reserved1; | 374 | u32 reserved1; |
375 | uint8_t checksum; | 375 | u8 checksum; |
376 | uint8_t reserved3; | 376 | u8 reserved3; |
377 | uint16_t reserved2; | 377 | u16 reserved2; |
378 | }; | 378 | }; |
379 | #else | 379 | #else |
380 | struct e1000_host_mng_dhcp_cookie{ | 380 | struct e1000_host_mng_dhcp_cookie{ |
381 | uint32_t signature; | 381 | u32 signature; |
382 | uint8_t status; | 382 | u8 status; |
383 | uint8_t reserved0; | 383 | u8 reserved0; |
384 | uint16_t vlan_id; | 384 | u16 vlan_id; |
385 | uint32_t reserved1; | 385 | u32 reserved1; |
386 | uint16_t reserved2; | 386 | u16 reserved2; |
387 | uint8_t reserved3; | 387 | u8 reserved3; |
388 | uint8_t checksum; | 388 | u8 checksum; |
389 | }; | 389 | }; |
390 | #endif | 390 | #endif |
391 | 391 | ||
392 | int32_t e1000_mng_write_dhcp_info(struct e1000_hw *hw, uint8_t *buffer, | 392 | s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, |
393 | uint16_t length); | 393 | u16 length); |
394 | bool e1000_check_mng_mode(struct e1000_hw *hw); | 394 | bool e1000_check_mng_mode(struct e1000_hw *hw); |
395 | bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw); | 395 | bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw); |
396 | int32_t e1000_read_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data); | 396 | s32 e1000_read_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 *data); |
397 | int32_t e1000_validate_eeprom_checksum(struct e1000_hw *hw); | 397 | s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw); |
398 | int32_t e1000_update_eeprom_checksum(struct e1000_hw *hw); | 398 | s32 e1000_update_eeprom_checksum(struct e1000_hw *hw); |
399 | int32_t e1000_write_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data); | 399 | s32 e1000_write_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 *data); |
400 | int32_t e1000_read_mac_addr(struct e1000_hw * hw); | 400 | s32 e1000_read_mac_addr(struct e1000_hw * hw); |
401 | 401 | ||
402 | /* Filters (multicast, vlan, receive) */ | 402 | /* Filters (multicast, vlan, receive) */ |
403 | uint32_t e1000_hash_mc_addr(struct e1000_hw *hw, uint8_t * mc_addr); | 403 | u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 * mc_addr); |
404 | void e1000_mta_set(struct e1000_hw *hw, uint32_t hash_value); | 404 | void e1000_mta_set(struct e1000_hw *hw, u32 hash_value); |
405 | void e1000_rar_set(struct e1000_hw *hw, uint8_t * mc_addr, uint32_t rar_index); | 405 | void e1000_rar_set(struct e1000_hw *hw, u8 * mc_addr, u32 rar_index); |
406 | void e1000_write_vfta(struct e1000_hw *hw, uint32_t offset, uint32_t value); | 406 | void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value); |
407 | 407 | ||
408 | /* LED functions */ | 408 | /* LED functions */ |
409 | int32_t e1000_setup_led(struct e1000_hw *hw); | 409 | s32 e1000_setup_led(struct e1000_hw *hw); |
410 | int32_t e1000_cleanup_led(struct e1000_hw *hw); | 410 | s32 e1000_cleanup_led(struct e1000_hw *hw); |
411 | int32_t e1000_led_on(struct e1000_hw *hw); | 411 | s32 e1000_led_on(struct e1000_hw *hw); |
412 | int32_t e1000_led_off(struct e1000_hw *hw); | 412 | s32 e1000_led_off(struct e1000_hw *hw); |
413 | int32_t e1000_blink_led_start(struct e1000_hw *hw); | 413 | s32 e1000_blink_led_start(struct e1000_hw *hw); |
414 | 414 | ||
415 | /* Adaptive IFS Functions */ | 415 | /* Adaptive IFS Functions */ |
416 | 416 | ||
417 | /* Everything else */ | 417 | /* Everything else */ |
418 | void e1000_reset_adaptive(struct e1000_hw *hw); | 418 | void e1000_reset_adaptive(struct e1000_hw *hw); |
419 | void e1000_update_adaptive(struct e1000_hw *hw); | 419 | void e1000_update_adaptive(struct e1000_hw *hw); |
420 | void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats, uint32_t frame_len, uint8_t * mac_addr); | 420 | void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats, u32 frame_len, u8 * mac_addr); |
421 | void e1000_get_bus_info(struct e1000_hw *hw); | 421 | void e1000_get_bus_info(struct e1000_hw *hw); |
422 | void e1000_pci_set_mwi(struct e1000_hw *hw); | 422 | void e1000_pci_set_mwi(struct e1000_hw *hw); |
423 | void e1000_pci_clear_mwi(struct e1000_hw *hw); | 423 | void e1000_pci_clear_mwi(struct e1000_hw *hw); |
424 | int32_t e1000_read_pcie_cap_reg(struct e1000_hw *hw, uint32_t reg, uint16_t *value); | 424 | s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); |
425 | void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc); | 425 | void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc); |
426 | int e1000_pcix_get_mmrbc(struct e1000_hw *hw); | 426 | int e1000_pcix_get_mmrbc(struct e1000_hw *hw); |
427 | /* Port I/O is only supported on 82544 and newer */ | 427 | /* Port I/O is only supported on 82544 and newer */ |
428 | void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value); | 428 | void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value); |
429 | int32_t e1000_disable_pciex_master(struct e1000_hw *hw); | 429 | s32 e1000_disable_pciex_master(struct e1000_hw *hw); |
430 | int32_t e1000_check_phy_reset_block(struct e1000_hw *hw); | 430 | s32 e1000_check_phy_reset_block(struct e1000_hw *hw); |
431 | 431 | ||
432 | 432 | ||
433 | #define E1000_READ_REG_IO(a, reg) \ | 433 | #define E1000_READ_REG_IO(a, reg) \ |
@@ -596,8 +596,8 @@ struct e1000_rx_desc { | |||
596 | __le64 buffer_addr; /* Address of the descriptor's data buffer */ | 596 | __le64 buffer_addr; /* Address of the descriptor's data buffer */ |
597 | __le16 length; /* Length of data DMAed into data buffer */ | 597 | __le16 length; /* Length of data DMAed into data buffer */ |
598 | __le16 csum; /* Packet checksum */ | 598 | __le16 csum; /* Packet checksum */ |
599 | uint8_t status; /* Descriptor status */ | 599 | u8 status; /* Descriptor status */ |
600 | uint8_t errors; /* Descriptor Errors */ | 600 | u8 errors; /* Descriptor Errors */ |
601 | __le16 special; | 601 | __le16 special; |
602 | }; | 602 | }; |
603 | 603 | ||
@@ -718,15 +718,15 @@ struct e1000_tx_desc { | |||
718 | __le32 data; | 718 | __le32 data; |
719 | struct { | 719 | struct { |
720 | __le16 length; /* Data buffer length */ | 720 | __le16 length; /* Data buffer length */ |
721 | uint8_t cso; /* Checksum offset */ | 721 | u8 cso; /* Checksum offset */ |
722 | uint8_t cmd; /* Descriptor control */ | 722 | u8 cmd; /* Descriptor control */ |
723 | } flags; | 723 | } flags; |
724 | } lower; | 724 | } lower; |
725 | union { | 725 | union { |
726 | __le32 data; | 726 | __le32 data; |
727 | struct { | 727 | struct { |
728 | uint8_t status; /* Descriptor status */ | 728 | u8 status; /* Descriptor status */ |
729 | uint8_t css; /* Checksum start */ | 729 | u8 css; /* Checksum start */ |
730 | __le16 special; | 730 | __le16 special; |
731 | } fields; | 731 | } fields; |
732 | } upper; | 732 | } upper; |
@@ -759,16 +759,16 @@ struct e1000_context_desc { | |||
759 | union { | 759 | union { |
760 | __le32 ip_config; | 760 | __le32 ip_config; |
761 | struct { | 761 | struct { |
762 | uint8_t ipcss; /* IP checksum start */ | 762 | u8 ipcss; /* IP checksum start */ |
763 | uint8_t ipcso; /* IP checksum offset */ | 763 | u8 ipcso; /* IP checksum offset */ |
764 | __le16 ipcse; /* IP checksum end */ | 764 | __le16 ipcse; /* IP checksum end */ |
765 | } ip_fields; | 765 | } ip_fields; |
766 | } lower_setup; | 766 | } lower_setup; |
767 | union { | 767 | union { |
768 | __le32 tcp_config; | 768 | __le32 tcp_config; |
769 | struct { | 769 | struct { |
770 | uint8_t tucss; /* TCP checksum start */ | 770 | u8 tucss; /* TCP checksum start */ |
771 | uint8_t tucso; /* TCP checksum offset */ | 771 | u8 tucso; /* TCP checksum offset */ |
772 | __le16 tucse; /* TCP checksum end */ | 772 | __le16 tucse; /* TCP checksum end */ |
773 | } tcp_fields; | 773 | } tcp_fields; |
774 | } upper_setup; | 774 | } upper_setup; |
@@ -776,8 +776,8 @@ struct e1000_context_desc { | |||
776 | union { | 776 | union { |
777 | __le32 data; | 777 | __le32 data; |
778 | struct { | 778 | struct { |
779 | uint8_t status; /* Descriptor status */ | 779 | u8 status; /* Descriptor status */ |
780 | uint8_t hdr_len; /* Header length */ | 780 | u8 hdr_len; /* Header length */ |
781 | __le16 mss; /* Maximum segment size */ | 781 | __le16 mss; /* Maximum segment size */ |
782 | } fields; | 782 | } fields; |
783 | } tcp_seg_setup; | 783 | } tcp_seg_setup; |
@@ -790,15 +790,15 @@ struct e1000_data_desc { | |||
790 | __le32 data; | 790 | __le32 data; |
791 | struct { | 791 | struct { |
792 | __le16 length; /* Data buffer length */ | 792 | __le16 length; /* Data buffer length */ |
793 | uint8_t typ_len_ext; /* */ | 793 | u8 typ_len_ext; /* */ |
794 | uint8_t cmd; /* */ | 794 | u8 cmd; /* */ |
795 | } flags; | 795 | } flags; |
796 | } lower; | 796 | } lower; |
797 | union { | 797 | union { |
798 | __le32 data; | 798 | __le32 data; |
799 | struct { | 799 | struct { |
800 | uint8_t status; /* Descriptor status */ | 800 | u8 status; /* Descriptor status */ |
801 | uint8_t popts; /* Packet Options */ | 801 | u8 popts; /* Packet Options */ |
802 | __le16 special; /* */ | 802 | __le16 special; /* */ |
803 | } fields; | 803 | } fields; |
804 | } upper; | 804 | } upper; |
@@ -825,8 +825,8 @@ struct e1000_rar { | |||
825 | 825 | ||
826 | /* IPv4 Address Table Entry */ | 826 | /* IPv4 Address Table Entry */ |
827 | struct e1000_ipv4_at_entry { | 827 | struct e1000_ipv4_at_entry { |
828 | volatile uint32_t ipv4_addr; /* IP Address (RW) */ | 828 | volatile u32 ipv4_addr; /* IP Address (RW) */ |
829 | volatile uint32_t reserved; | 829 | volatile u32 reserved; |
830 | }; | 830 | }; |
831 | 831 | ||
832 | /* Four wakeup IP addresses are supported */ | 832 | /* Four wakeup IP addresses are supported */ |
@@ -837,25 +837,25 @@ struct e1000_ipv4_at_entry { | |||
837 | 837 | ||
838 | /* IPv6 Address Table Entry */ | 838 | /* IPv6 Address Table Entry */ |
839 | struct e1000_ipv6_at_entry { | 839 | struct e1000_ipv6_at_entry { |
840 | volatile uint8_t ipv6_addr[16]; | 840 | volatile u8 ipv6_addr[16]; |
841 | }; | 841 | }; |
842 | 842 | ||
843 | /* Flexible Filter Length Table Entry */ | 843 | /* Flexible Filter Length Table Entry */ |
844 | struct e1000_fflt_entry { | 844 | struct e1000_fflt_entry { |
845 | volatile uint32_t length; /* Flexible Filter Length (RW) */ | 845 | volatile u32 length; /* Flexible Filter Length (RW) */ |
846 | volatile uint32_t reserved; | 846 | volatile u32 reserved; |
847 | }; | 847 | }; |
848 | 848 | ||
849 | /* Flexible Filter Mask Table Entry */ | 849 | /* Flexible Filter Mask Table Entry */ |
850 | struct e1000_ffmt_entry { | 850 | struct e1000_ffmt_entry { |
851 | volatile uint32_t mask; /* Flexible Filter Mask (RW) */ | 851 | volatile u32 mask; /* Flexible Filter Mask (RW) */ |
852 | volatile uint32_t reserved; | 852 | volatile u32 reserved; |
853 | }; | 853 | }; |
854 | 854 | ||
855 | /* Flexible Filter Value Table Entry */ | 855 | /* Flexible Filter Value Table Entry */ |
856 | struct e1000_ffvt_entry { | 856 | struct e1000_ffvt_entry { |
857 | volatile uint32_t value; /* Flexible Filter Value (RW) */ | 857 | volatile u32 value; /* Flexible Filter Value (RW) */ |
858 | volatile uint32_t reserved; | 858 | volatile u32 reserved; |
859 | }; | 859 | }; |
860 | 860 | ||
861 | /* Four Flexible Filters are supported */ | 861 | /* Four Flexible Filters are supported */ |
@@ -1309,89 +1309,89 @@ struct e1000_ffvt_entry { | |||
1309 | 1309 | ||
1310 | /* Statistics counters collected by the MAC */ | 1310 | /* Statistics counters collected by the MAC */ |
1311 | struct e1000_hw_stats { | 1311 | struct e1000_hw_stats { |
1312 | uint64_t crcerrs; | 1312 | u64 crcerrs; |
1313 | uint64_t algnerrc; | 1313 | u64 algnerrc; |
1314 | uint64_t symerrs; | 1314 | u64 symerrs; |
1315 | uint64_t rxerrc; | 1315 | u64 rxerrc; |
1316 | uint64_t txerrc; | 1316 | u64 txerrc; |
1317 | uint64_t mpc; | 1317 | u64 mpc; |
1318 | uint64_t scc; | 1318 | u64 scc; |
1319 | uint64_t ecol; | 1319 | u64 ecol; |
1320 | uint64_t mcc; | 1320 | u64 mcc; |
1321 | uint64_t latecol; | 1321 | u64 latecol; |
1322 | uint64_t colc; | 1322 | u64 colc; |
1323 | uint64_t dc; | 1323 | u64 dc; |
1324 | uint64_t tncrs; | 1324 | u64 tncrs; |
1325 | uint64_t sec; | 1325 | u64 sec; |
1326 | uint64_t cexterr; | 1326 | u64 cexterr; |
1327 | uint64_t rlec; | 1327 | u64 rlec; |
1328 | uint64_t xonrxc; | 1328 | u64 xonrxc; |
1329 | uint64_t xontxc; | 1329 | u64 xontxc; |
1330 | uint64_t xoffrxc; | 1330 | u64 xoffrxc; |
1331 | uint64_t xofftxc; | 1331 | u64 xofftxc; |
1332 | uint64_t fcruc; | 1332 | u64 fcruc; |
1333 | uint64_t prc64; | 1333 | u64 prc64; |
1334 | uint64_t prc127; | 1334 | u64 prc127; |
1335 | uint64_t prc255; | 1335 | u64 prc255; |
1336 | uint64_t prc511; | 1336 | u64 prc511; |
1337 | uint64_t prc1023; | 1337 | u64 prc1023; |
1338 | uint64_t prc1522; | 1338 | u64 prc1522; |
1339 | uint64_t gprc; | 1339 | u64 gprc; |
1340 | uint64_t bprc; | 1340 | u64 bprc; |
1341 | uint64_t mprc; | 1341 | u64 mprc; |
1342 | uint64_t gptc; | 1342 | u64 gptc; |
1343 | uint64_t gorcl; | 1343 | u64 gorcl; |
1344 | uint64_t gorch; | 1344 | u64 gorch; |
1345 | uint64_t gotcl; | 1345 | u64 gotcl; |
1346 | uint64_t gotch; | 1346 | u64 gotch; |
1347 | uint64_t rnbc; | 1347 | u64 rnbc; |
1348 | uint64_t ruc; | 1348 | u64 ruc; |
1349 | uint64_t rfc; | 1349 | u64 rfc; |
1350 | uint64_t roc; | 1350 | u64 roc; |
1351 | uint64_t rlerrc; | 1351 | u64 rlerrc; |
1352 | uint64_t rjc; | 1352 | u64 rjc; |
1353 | uint64_t mgprc; | 1353 | u64 mgprc; |
1354 | uint64_t mgpdc; | 1354 | u64 mgpdc; |
1355 | uint64_t mgptc; | 1355 | u64 mgptc; |
1356 | uint64_t torl; | 1356 | u64 torl; |
1357 | uint64_t torh; | 1357 | u64 torh; |
1358 | uint64_t totl; | 1358 | u64 totl; |
1359 | uint64_t toth; | 1359 | u64 toth; |
1360 | uint64_t tpr; | 1360 | u64 tpr; |
1361 | uint64_t tpt; | 1361 | u64 tpt; |
1362 | uint64_t ptc64; | 1362 | u64 ptc64; |
1363 | uint64_t ptc127; | 1363 | u64 ptc127; |
1364 | uint64_t ptc255; | 1364 | u64 ptc255; |
1365 | uint64_t ptc511; | 1365 | u64 ptc511; |
1366 | uint64_t ptc1023; | 1366 | u64 ptc1023; |
1367 | uint64_t ptc1522; | 1367 | u64 ptc1522; |
1368 | uint64_t mptc; | 1368 | u64 mptc; |
1369 | uint64_t bptc; | 1369 | u64 bptc; |
1370 | uint64_t tsctc; | 1370 | u64 tsctc; |
1371 | uint64_t tsctfc; | 1371 | u64 tsctfc; |
1372 | uint64_t iac; | 1372 | u64 iac; |
1373 | uint64_t icrxptc; | 1373 | u64 icrxptc; |
1374 | uint64_t icrxatc; | 1374 | u64 icrxatc; |
1375 | uint64_t ictxptc; | 1375 | u64 ictxptc; |
1376 | uint64_t ictxatc; | 1376 | u64 ictxatc; |
1377 | uint64_t ictxqec; | 1377 | u64 ictxqec; |
1378 | uint64_t ictxqmtc; | 1378 | u64 ictxqmtc; |
1379 | uint64_t icrxdmtc; | 1379 | u64 icrxdmtc; |
1380 | uint64_t icrxoc; | 1380 | u64 icrxoc; |
1381 | }; | 1381 | }; |
1382 | 1382 | ||
1383 | /* Structure containing variables used by the shared code (e1000_hw.c) */ | 1383 | /* Structure containing variables used by the shared code (e1000_hw.c) */ |
1384 | struct e1000_hw { | 1384 | struct e1000_hw { |
1385 | uint8_t __iomem *hw_addr; | 1385 | u8 __iomem *hw_addr; |
1386 | uint8_t __iomem *flash_address; | 1386 | u8 __iomem *flash_address; |
1387 | e1000_mac_type mac_type; | 1387 | e1000_mac_type mac_type; |
1388 | e1000_phy_type phy_type; | 1388 | e1000_phy_type phy_type; |
1389 | uint32_t phy_init_script; | 1389 | u32 phy_init_script; |
1390 | e1000_media_type media_type; | 1390 | e1000_media_type media_type; |
1391 | void *back; | 1391 | void *back; |
1392 | struct e1000_shadow_ram *eeprom_shadow_ram; | 1392 | struct e1000_shadow_ram *eeprom_shadow_ram; |
1393 | uint32_t flash_bank_size; | 1393 | u32 flash_bank_size; |
1394 | uint32_t flash_base_addr; | 1394 | u32 flash_base_addr; |
1395 | e1000_fc_type fc; | 1395 | e1000_fc_type fc; |
1396 | e1000_bus_speed bus_speed; | 1396 | e1000_bus_speed bus_speed; |
1397 | e1000_bus_width bus_width; | 1397 | e1000_bus_width bus_width; |
@@ -1400,51 +1400,51 @@ struct e1000_hw { | |||
1400 | e1000_ms_type master_slave; | 1400 | e1000_ms_type master_slave; |
1401 | e1000_ms_type original_master_slave; | 1401 | e1000_ms_type original_master_slave; |
1402 | e1000_ffe_config ffe_config_state; | 1402 | e1000_ffe_config ffe_config_state; |
1403 | uint32_t asf_firmware_present; | 1403 | u32 asf_firmware_present; |
1404 | uint32_t eeprom_semaphore_present; | 1404 | u32 eeprom_semaphore_present; |
1405 | uint32_t swfw_sync_present; | 1405 | u32 swfw_sync_present; |
1406 | uint32_t swfwhw_semaphore_present; | 1406 | u32 swfwhw_semaphore_present; |
1407 | unsigned long io_base; | 1407 | unsigned long io_base; |
1408 | uint32_t phy_id; | 1408 | u32 phy_id; |
1409 | uint32_t phy_revision; | 1409 | u32 phy_revision; |
1410 | uint32_t phy_addr; | 1410 | u32 phy_addr; |
1411 | uint32_t original_fc; | 1411 | u32 original_fc; |
1412 | uint32_t txcw; | 1412 | u32 txcw; |
1413 | uint32_t autoneg_failed; | 1413 | u32 autoneg_failed; |
1414 | uint32_t max_frame_size; | 1414 | u32 max_frame_size; |
1415 | uint32_t min_frame_size; | 1415 | u32 min_frame_size; |
1416 | uint32_t mc_filter_type; | 1416 | u32 mc_filter_type; |
1417 | uint32_t num_mc_addrs; | 1417 | u32 num_mc_addrs; |
1418 | uint32_t collision_delta; | 1418 | u32 collision_delta; |
1419 | uint32_t tx_packet_delta; | 1419 | u32 tx_packet_delta; |
1420 | uint32_t ledctl_default; | 1420 | u32 ledctl_default; |
1421 | uint32_t ledctl_mode1; | 1421 | u32 ledctl_mode1; |
1422 | uint32_t ledctl_mode2; | 1422 | u32 ledctl_mode2; |
1423 | bool tx_pkt_filtering; | 1423 | bool tx_pkt_filtering; |
1424 | struct e1000_host_mng_dhcp_cookie mng_cookie; | 1424 | struct e1000_host_mng_dhcp_cookie mng_cookie; |
1425 | uint16_t phy_spd_default; | 1425 | u16 phy_spd_default; |
1426 | uint16_t autoneg_advertised; | 1426 | u16 autoneg_advertised; |
1427 | uint16_t pci_cmd_word; | 1427 | u16 pci_cmd_word; |
1428 | uint16_t fc_high_water; | 1428 | u16 fc_high_water; |
1429 | uint16_t fc_low_water; | 1429 | u16 fc_low_water; |
1430 | uint16_t fc_pause_time; | 1430 | u16 fc_pause_time; |
1431 | uint16_t current_ifs_val; | 1431 | u16 current_ifs_val; |
1432 | uint16_t ifs_min_val; | 1432 | u16 ifs_min_val; |
1433 | uint16_t ifs_max_val; | 1433 | u16 ifs_max_val; |
1434 | uint16_t ifs_step_size; | 1434 | u16 ifs_step_size; |
1435 | uint16_t ifs_ratio; | 1435 | u16 ifs_ratio; |
1436 | uint16_t device_id; | 1436 | u16 device_id; |
1437 | uint16_t vendor_id; | 1437 | u16 vendor_id; |
1438 | uint16_t subsystem_id; | 1438 | u16 subsystem_id; |
1439 | uint16_t subsystem_vendor_id; | 1439 | u16 subsystem_vendor_id; |
1440 | uint8_t revision_id; | 1440 | u8 revision_id; |
1441 | uint8_t autoneg; | 1441 | u8 autoneg; |
1442 | uint8_t mdix; | 1442 | u8 mdix; |
1443 | uint8_t forced_speed_duplex; | 1443 | u8 forced_speed_duplex; |
1444 | uint8_t wait_autoneg_complete; | 1444 | u8 wait_autoneg_complete; |
1445 | uint8_t dma_fairness; | 1445 | u8 dma_fairness; |
1446 | uint8_t mac_addr[NODE_ADDRESS_SIZE]; | 1446 | u8 mac_addr[NODE_ADDRESS_SIZE]; |
1447 | uint8_t perm_mac_addr[NODE_ADDRESS_SIZE]; | 1447 | u8 perm_mac_addr[NODE_ADDRESS_SIZE]; |
1448 | bool disable_polarity_correction; | 1448 | bool disable_polarity_correction; |
1449 | bool speed_downgraded; | 1449 | bool speed_downgraded; |
1450 | e1000_smart_speed smart_speed; | 1450 | e1000_smart_speed smart_speed; |
@@ -2165,14 +2165,14 @@ typedef enum { | |||
2165 | #define E1000_HI_COMMAND_TIMEOUT 500 /* Time in ms to process HI command */ | 2165 | #define E1000_HI_COMMAND_TIMEOUT 500 /* Time in ms to process HI command */ |
2166 | 2166 | ||
2167 | struct e1000_host_command_header { | 2167 | struct e1000_host_command_header { |
2168 | uint8_t command_id; | 2168 | u8 command_id; |
2169 | uint8_t command_length; | 2169 | u8 command_length; |
2170 | uint8_t command_options; /* I/F bits for command, status for return */ | 2170 | u8 command_options; /* I/F bits for command, status for return */ |
2171 | uint8_t checksum; | 2171 | u8 checksum; |
2172 | }; | 2172 | }; |
2173 | struct e1000_host_command_info { | 2173 | struct e1000_host_command_info { |
2174 | struct e1000_host_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ | 2174 | struct e1000_host_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ |
2175 | uint8_t command_data[E1000_HI_MAX_DATA_LENGTH]; /* Command data can length 0..252 */ | 2175 | u8 command_data[E1000_HI_MAX_DATA_LENGTH]; /* Command data can length 0..252 */ |
2176 | }; | 2176 | }; |
2177 | 2177 | ||
2178 | /* Host SMB register #0 */ | 2178 | /* Host SMB register #0 */ |
@@ -2495,7 +2495,7 @@ struct e1000_host_command_info { | |||
2495 | /* Number of milliseconds we wait for PHY configuration done after MAC reset */ | 2495 | /* Number of milliseconds we wait for PHY configuration done after MAC reset */ |
2496 | #define PHY_CFG_TIMEOUT 100 | 2496 | #define PHY_CFG_TIMEOUT 100 |
2497 | 2497 | ||
2498 | #define E1000_TX_BUFFER_SIZE ((uint32_t)1514) | 2498 | #define E1000_TX_BUFFER_SIZE ((u32)1514) |
2499 | 2499 | ||
2500 | /* The carrier extension symbol, as received by the NIC. */ | 2500 | /* The carrier extension symbol, as received by the NIC. */ |
2501 | #define CARRIER_EXTENSION 0x0F | 2501 | #define CARRIER_EXTENSION 0x0F |
@@ -3312,68 +3312,68 @@ struct e1000_host_command_info { | |||
3312 | /* Offset 04h HSFSTS */ | 3312 | /* Offset 04h HSFSTS */ |
3313 | union ich8_hws_flash_status { | 3313 | union ich8_hws_flash_status { |
3314 | struct ich8_hsfsts { | 3314 | struct ich8_hsfsts { |
3315 | #ifdef E1000_BIG_ENDIAN | 3315 | #ifdef __BIG_ENDIAN |
3316 | uint16_t reserved2 :6; | 3316 | u16 reserved2 :6; |
3317 | uint16_t fldesvalid :1; | 3317 | u16 fldesvalid :1; |
3318 | uint16_t flockdn :1; | 3318 | u16 flockdn :1; |
3319 | uint16_t flcdone :1; | 3319 | u16 flcdone :1; |
3320 | uint16_t flcerr :1; | 3320 | u16 flcerr :1; |
3321 | uint16_t dael :1; | 3321 | u16 dael :1; |
3322 | uint16_t berasesz :2; | 3322 | u16 berasesz :2; |
3323 | uint16_t flcinprog :1; | 3323 | u16 flcinprog :1; |
3324 | uint16_t reserved1 :2; | 3324 | u16 reserved1 :2; |
3325 | #else | 3325 | #else |
3326 | uint16_t flcdone :1; /* bit 0 Flash Cycle Done */ | 3326 | u16 flcdone :1; /* bit 0 Flash Cycle Done */ |
3327 | uint16_t flcerr :1; /* bit 1 Flash Cycle Error */ | 3327 | u16 flcerr :1; /* bit 1 Flash Cycle Error */ |
3328 | uint16_t dael :1; /* bit 2 Direct Access error Log */ | 3328 | u16 dael :1; /* bit 2 Direct Access error Log */ |
3329 | uint16_t berasesz :2; /* bit 4:3 Block/Sector Erase Size */ | 3329 | u16 berasesz :2; /* bit 4:3 Block/Sector Erase Size */ |
3330 | uint16_t flcinprog :1; /* bit 5 flash SPI cycle in Progress */ | 3330 | u16 flcinprog :1; /* bit 5 flash SPI cycle in Progress */ |
3331 | uint16_t reserved1 :2; /* bit 13:6 Reserved */ | 3331 | u16 reserved1 :2; /* bit 13:6 Reserved */ |
3332 | uint16_t reserved2 :6; /* bit 13:6 Reserved */ | 3332 | u16 reserved2 :6; /* bit 13:6 Reserved */ |
3333 | uint16_t fldesvalid :1; /* bit 14 Flash Descriptor Valid */ | 3333 | u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */ |
3334 | uint16_t flockdn :1; /* bit 15 Flash Configuration Lock-Down */ | 3334 | u16 flockdn :1; /* bit 15 Flash Configuration Lock-Down */ |
3335 | #endif | 3335 | #endif |
3336 | } hsf_status; | 3336 | } hsf_status; |
3337 | uint16_t regval; | 3337 | u16 regval; |
3338 | }; | 3338 | }; |
3339 | 3339 | ||
3340 | /* ICH8 GbE Flash Hardware Sequencing Flash control Register bit breakdown */ | 3340 | /* ICH8 GbE Flash Hardware Sequencing Flash control Register bit breakdown */ |
3341 | /* Offset 06h FLCTL */ | 3341 | /* Offset 06h FLCTL */ |
3342 | union ich8_hws_flash_ctrl { | 3342 | union ich8_hws_flash_ctrl { |
3343 | struct ich8_hsflctl { | 3343 | struct ich8_hsflctl { |
3344 | #ifdef E1000_BIG_ENDIAN | 3344 | #ifdef __BIG_ENDIAN |
3345 | uint16_t fldbcount :2; | 3345 | u16 fldbcount :2; |
3346 | uint16_t flockdn :6; | 3346 | u16 flockdn :6; |
3347 | uint16_t flcgo :1; | 3347 | u16 flcgo :1; |
3348 | uint16_t flcycle :2; | 3348 | u16 flcycle :2; |
3349 | uint16_t reserved :5; | 3349 | u16 reserved :5; |
3350 | #else | 3350 | #else |
3351 | uint16_t flcgo :1; /* 0 Flash Cycle Go */ | 3351 | u16 flcgo :1; /* 0 Flash Cycle Go */ |
3352 | uint16_t flcycle :2; /* 2:1 Flash Cycle */ | 3352 | u16 flcycle :2; /* 2:1 Flash Cycle */ |
3353 | uint16_t reserved :5; /* 7:3 Reserved */ | 3353 | u16 reserved :5; /* 7:3 Reserved */ |
3354 | uint16_t fldbcount :2; /* 9:8 Flash Data Byte Count */ | 3354 | u16 fldbcount :2; /* 9:8 Flash Data Byte Count */ |
3355 | uint16_t flockdn :6; /* 15:10 Reserved */ | 3355 | u16 flockdn :6; /* 15:10 Reserved */ |
3356 | #endif | 3356 | #endif |
3357 | } hsf_ctrl; | 3357 | } hsf_ctrl; |
3358 | uint16_t regval; | 3358 | u16 regval; |
3359 | }; | 3359 | }; |
3360 | 3360 | ||
3361 | /* ICH8 Flash Region Access Permissions */ | 3361 | /* ICH8 Flash Region Access Permissions */ |
3362 | union ich8_hws_flash_regacc { | 3362 | union ich8_hws_flash_regacc { |
3363 | struct ich8_flracc { | 3363 | struct ich8_flracc { |
3364 | #ifdef E1000_BIG_ENDIAN | 3364 | #ifdef __BIG_ENDIAN |
3365 | uint32_t gmwag :8; | 3365 | u32 gmwag :8; |
3366 | uint32_t gmrag :8; | 3366 | u32 gmrag :8; |
3367 | uint32_t grwa :8; | 3367 | u32 grwa :8; |
3368 | uint32_t grra :8; | 3368 | u32 grra :8; |
3369 | #else | 3369 | #else |
3370 | uint32_t grra :8; /* 0:7 GbE region Read Access */ | 3370 | u32 grra :8; /* 0:7 GbE region Read Access */ |
3371 | uint32_t grwa :8; /* 8:15 GbE region Write Access */ | 3371 | u32 grwa :8; /* 8:15 GbE region Write Access */ |
3372 | uint32_t gmrag :8; /* 23:16 GbE Master Read Access Grant */ | 3372 | u32 gmrag :8; /* 23:16 GbE Master Read Access Grant */ |
3373 | uint32_t gmwag :8; /* 31:24 GbE Master Write Access Grant */ | 3373 | u32 gmwag :8; /* 31:24 GbE Master Write Access Grant */ |
3374 | #endif | 3374 | #endif |
3375 | } hsf_flregacc; | 3375 | } hsf_flregacc; |
3376 | uint16_t regval; | 3376 | u16 regval; |
3377 | }; | 3377 | }; |
3378 | 3378 | ||
3379 | /* Miscellaneous PHY bit definitions. */ | 3379 | /* Miscellaneous PHY bit definitions. */ |
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 757d02f443a5..59579b1d8843 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -127,7 +127,7 @@ int e1000_up(struct e1000_adapter *adapter); | |||
127 | void e1000_down(struct e1000_adapter *adapter); | 127 | void e1000_down(struct e1000_adapter *adapter); |
128 | void e1000_reinit_locked(struct e1000_adapter *adapter); | 128 | void e1000_reinit_locked(struct e1000_adapter *adapter); |
129 | void e1000_reset(struct e1000_adapter *adapter); | 129 | void e1000_reset(struct e1000_adapter *adapter); |
130 | int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx); | 130 | int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx); |
131 | int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); | 131 | int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); |
132 | int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); | 132 | int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); |
133 | void e1000_free_all_tx_resources(struct e1000_adapter *adapter); | 133 | void e1000_free_all_tx_resources(struct e1000_adapter *adapter); |
@@ -203,8 +203,8 @@ static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, | |||
203 | struct sk_buff *skb); | 203 | struct sk_buff *skb); |
204 | 204 | ||
205 | static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp); | 205 | static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp); |
206 | static void e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid); | 206 | static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid); |
207 | static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid); | 207 | static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); |
208 | static void e1000_restore_vlan(struct e1000_adapter *adapter); | 208 | static void e1000_restore_vlan(struct e1000_adapter *adapter); |
209 | 209 | ||
210 | static int e1000_suspend(struct pci_dev *pdev, pm_message_t state); | 210 | static int e1000_suspend(struct pci_dev *pdev, pm_message_t state); |
@@ -368,8 +368,8 @@ static void | |||
368 | e1000_update_mng_vlan(struct e1000_adapter *adapter) | 368 | e1000_update_mng_vlan(struct e1000_adapter *adapter) |
369 | { | 369 | { |
370 | struct net_device *netdev = adapter->netdev; | 370 | struct net_device *netdev = adapter->netdev; |
371 | uint16_t vid = adapter->hw.mng_cookie.vlan_id; | 371 | u16 vid = adapter->hw.mng_cookie.vlan_id; |
372 | uint16_t old_vid = adapter->mng_vlan_id; | 372 | u16 old_vid = adapter->mng_vlan_id; |
373 | if (adapter->vlgrp) { | 373 | if (adapter->vlgrp) { |
374 | if (!vlan_group_get_device(adapter->vlgrp, vid)) { | 374 | if (!vlan_group_get_device(adapter->vlgrp, vid)) { |
375 | if (adapter->hw.mng_cookie.status & | 375 | if (adapter->hw.mng_cookie.status & |
@@ -379,7 +379,7 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter) | |||
379 | } else | 379 | } else |
380 | adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; | 380 | adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; |
381 | 381 | ||
382 | if ((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) && | 382 | if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && |
383 | (vid != old_vid) && | 383 | (vid != old_vid) && |
384 | !vlan_group_get_device(adapter->vlgrp, old_vid)) | 384 | !vlan_group_get_device(adapter->vlgrp, old_vid)) |
385 | e1000_vlan_rx_kill_vid(netdev, old_vid); | 385 | e1000_vlan_rx_kill_vid(netdev, old_vid); |
@@ -402,8 +402,8 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter) | |||
402 | static void | 402 | static void |
403 | e1000_release_hw_control(struct e1000_adapter *adapter) | 403 | e1000_release_hw_control(struct e1000_adapter *adapter) |
404 | { | 404 | { |
405 | uint32_t ctrl_ext; | 405 | u32 ctrl_ext; |
406 | uint32_t swsm; | 406 | u32 swsm; |
407 | 407 | ||
408 | /* Let firmware taken over control of h/w */ | 408 | /* Let firmware taken over control of h/w */ |
409 | switch (adapter->hw.mac_type) { | 409 | switch (adapter->hw.mac_type) { |
@@ -439,8 +439,8 @@ e1000_release_hw_control(struct e1000_adapter *adapter) | |||
439 | static void | 439 | static void |
440 | e1000_get_hw_control(struct e1000_adapter *adapter) | 440 | e1000_get_hw_control(struct e1000_adapter *adapter) |
441 | { | 441 | { |
442 | uint32_t ctrl_ext; | 442 | u32 ctrl_ext; |
443 | uint32_t swsm; | 443 | u32 swsm; |
444 | 444 | ||
445 | /* Let firmware know the driver has taken over */ | 445 | /* Let firmware know the driver has taken over */ |
446 | switch (adapter->hw.mac_type) { | 446 | switch (adapter->hw.mac_type) { |
@@ -466,7 +466,7 @@ static void | |||
466 | e1000_init_manageability(struct e1000_adapter *adapter) | 466 | e1000_init_manageability(struct e1000_adapter *adapter) |
467 | { | 467 | { |
468 | if (adapter->en_mng_pt) { | 468 | if (adapter->en_mng_pt) { |
469 | uint32_t manc = E1000_READ_REG(&adapter->hw, MANC); | 469 | u32 manc = E1000_READ_REG(&adapter->hw, MANC); |
470 | 470 | ||
471 | /* disable hardware interception of ARP */ | 471 | /* disable hardware interception of ARP */ |
472 | manc &= ~(E1000_MANC_ARP_EN); | 472 | manc &= ~(E1000_MANC_ARP_EN); |
@@ -475,7 +475,7 @@ e1000_init_manageability(struct e1000_adapter *adapter) | |||
475 | /* this will probably generate destination unreachable messages | 475 | /* this will probably generate destination unreachable messages |
476 | * from the host OS, but the packets will be handled on SMBUS */ | 476 | * from the host OS, but the packets will be handled on SMBUS */ |
477 | if (adapter->hw.has_manc2h) { | 477 | if (adapter->hw.has_manc2h) { |
478 | uint32_t manc2h = E1000_READ_REG(&adapter->hw, MANC2H); | 478 | u32 manc2h = E1000_READ_REG(&adapter->hw, MANC2H); |
479 | 479 | ||
480 | manc |= E1000_MANC_EN_MNG2HOST; | 480 | manc |= E1000_MANC_EN_MNG2HOST; |
481 | #define E1000_MNG2HOST_PORT_623 (1 << 5) | 481 | #define E1000_MNG2HOST_PORT_623 (1 << 5) |
@@ -493,7 +493,7 @@ static void | |||
493 | e1000_release_manageability(struct e1000_adapter *adapter) | 493 | e1000_release_manageability(struct e1000_adapter *adapter) |
494 | { | 494 | { |
495 | if (adapter->en_mng_pt) { | 495 | if (adapter->en_mng_pt) { |
496 | uint32_t manc = E1000_READ_REG(&adapter->hw, MANC); | 496 | u32 manc = E1000_READ_REG(&adapter->hw, MANC); |
497 | 497 | ||
498 | /* re-enable hardware interception of ARP */ | 498 | /* re-enable hardware interception of ARP */ |
499 | manc |= E1000_MANC_ARP_EN; | 499 | manc |= E1000_MANC_ARP_EN; |
@@ -566,7 +566,7 @@ int e1000_up(struct e1000_adapter *adapter) | |||
566 | 566 | ||
567 | void e1000_power_up_phy(struct e1000_adapter *adapter) | 567 | void e1000_power_up_phy(struct e1000_adapter *adapter) |
568 | { | 568 | { |
569 | uint16_t mii_reg = 0; | 569 | u16 mii_reg = 0; |
570 | 570 | ||
571 | /* Just clear the power down bit to wake the phy back up */ | 571 | /* Just clear the power down bit to wake the phy back up */ |
572 | if (adapter->hw.media_type == e1000_media_type_copper) { | 572 | if (adapter->hw.media_type == e1000_media_type_copper) { |
@@ -587,7 +587,7 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter) | |||
587 | * (c) SoL/IDER session is active */ | 587 | * (c) SoL/IDER session is active */ |
588 | if (!adapter->wol && adapter->hw.mac_type >= e1000_82540 && | 588 | if (!adapter->wol && adapter->hw.mac_type >= e1000_82540 && |
589 | adapter->hw.media_type == e1000_media_type_copper) { | 589 | adapter->hw.media_type == e1000_media_type_copper) { |
590 | uint16_t mii_reg = 0; | 590 | u16 mii_reg = 0; |
591 | 591 | ||
592 | switch (adapter->hw.mac_type) { | 592 | switch (adapter->hw.mac_type) { |
593 | case e1000_82540: | 593 | case e1000_82540: |
@@ -667,8 +667,8 @@ e1000_reinit_locked(struct e1000_adapter *adapter) | |||
667 | void | 667 | void |
668 | e1000_reset(struct e1000_adapter *adapter) | 668 | e1000_reset(struct e1000_adapter *adapter) |
669 | { | 669 | { |
670 | uint32_t pba = 0, tx_space, min_tx_space, min_rx_space; | 670 | u32 pba = 0, tx_space, min_tx_space, min_rx_space; |
671 | uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF; | 671 | u16 fc_high_water_mark = E1000_FC_HIGH_DIFF; |
672 | bool legacy_pba_adjust = false; | 672 | bool legacy_pba_adjust = false; |
673 | 673 | ||
674 | /* Repartition Pba for greater than 9k mtu | 674 | /* Repartition Pba for greater than 9k mtu |
@@ -815,7 +815,7 @@ e1000_reset(struct e1000_adapter *adapter) | |||
815 | adapter->hw.mac_type <= e1000_82547_rev_2 && | 815 | adapter->hw.mac_type <= e1000_82547_rev_2 && |
816 | adapter->hw.autoneg == 1 && | 816 | adapter->hw.autoneg == 1 && |
817 | adapter->hw.autoneg_advertised == ADVERTISE_1000_FULL) { | 817 | adapter->hw.autoneg_advertised == ADVERTISE_1000_FULL) { |
818 | uint32_t ctrl = E1000_READ_REG(&adapter->hw, CTRL); | 818 | u32 ctrl = E1000_READ_REG(&adapter->hw, CTRL); |
819 | /* clear phy power management bit if we are in gig only mode, | 819 | /* clear phy power management bit if we are in gig only mode, |
820 | * which if enabled will attempt negotiation to 100Mb, which | 820 | * which if enabled will attempt negotiation to 100Mb, which |
821 | * can cause a loss of link at power off or driver unload */ | 821 | * can cause a loss of link at power off or driver unload */ |
@@ -832,7 +832,7 @@ e1000_reset(struct e1000_adapter *adapter) | |||
832 | if (!adapter->smart_power_down && | 832 | if (!adapter->smart_power_down && |
833 | (adapter->hw.mac_type == e1000_82571 || | 833 | (adapter->hw.mac_type == e1000_82571 || |
834 | adapter->hw.mac_type == e1000_82572)) { | 834 | adapter->hw.mac_type == e1000_82572)) { |
835 | uint16_t phy_data = 0; | 835 | u16 phy_data = 0; |
836 | /* speed up time to link by disabling smart power down, ignore | 836 | /* speed up time to link by disabling smart power down, ignore |
837 | * the return value of this function because there is nothing | 837 | * the return value of this function because there is nothing |
838 | * different we would do if it failed */ | 838 | * different we would do if it failed */ |
@@ -926,8 +926,8 @@ e1000_probe(struct pci_dev *pdev, | |||
926 | static int cards_found = 0; | 926 | static int cards_found = 0; |
927 | static int global_quad_port_a = 0; /* global ksp3 port a indication */ | 927 | static int global_quad_port_a = 0; /* global ksp3 port a indication */ |
928 | int i, err, pci_using_dac; | 928 | int i, err, pci_using_dac; |
929 | uint16_t eeprom_data = 0; | 929 | u16 eeprom_data = 0; |
930 | uint16_t eeprom_apme_mask = E1000_EEPROM_APME; | 930 | u16 eeprom_apme_mask = E1000_EEPROM_APME; |
931 | DECLARE_MAC_BUF(mac); | 931 | DECLARE_MAC_BUF(mac); |
932 | 932 | ||
933 | if ((err = pci_enable_device(pdev))) | 933 | if ((err = pci_enable_device(pdev))) |
@@ -1702,10 +1702,10 @@ e1000_setup_all_tx_resources(struct e1000_adapter *adapter) | |||
1702 | static void | 1702 | static void |
1703 | e1000_configure_tx(struct e1000_adapter *adapter) | 1703 | e1000_configure_tx(struct e1000_adapter *adapter) |
1704 | { | 1704 | { |
1705 | uint64_t tdba; | 1705 | u64 tdba; |
1706 | struct e1000_hw *hw = &adapter->hw; | 1706 | struct e1000_hw *hw = &adapter->hw; |
1707 | uint32_t tdlen, tctl, tipg, tarc; | 1707 | u32 tdlen, tctl, tipg, tarc; |
1708 | uint32_t ipgr1, ipgr2; | 1708 | u32 ipgr1, ipgr2; |
1709 | 1709 | ||
1710 | /* Setup the HW Tx Head and Tail descriptor pointers */ | 1710 | /* Setup the HW Tx Head and Tail descriptor pointers */ |
1711 | 1711 | ||
@@ -1947,10 +1947,10 @@ e1000_setup_all_rx_resources(struct e1000_adapter *adapter) | |||
1947 | static void | 1947 | static void |
1948 | e1000_setup_rctl(struct e1000_adapter *adapter) | 1948 | e1000_setup_rctl(struct e1000_adapter *adapter) |
1949 | { | 1949 | { |
1950 | uint32_t rctl, rfctl; | 1950 | u32 rctl, rfctl; |
1951 | uint32_t psrctl = 0; | 1951 | u32 psrctl = 0; |
1952 | #ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT | 1952 | #ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT |
1953 | uint32_t pages = 0; | 1953 | u32 pages = 0; |
1954 | #endif | 1954 | #endif |
1955 | 1955 | ||
1956 | rctl = E1000_READ_REG(&adapter->hw, RCTL); | 1956 | rctl = E1000_READ_REG(&adapter->hw, RCTL); |
@@ -2065,9 +2065,9 @@ e1000_setup_rctl(struct e1000_adapter *adapter) | |||
2065 | static void | 2065 | static void |
2066 | e1000_configure_rx(struct e1000_adapter *adapter) | 2066 | e1000_configure_rx(struct e1000_adapter *adapter) |
2067 | { | 2067 | { |
2068 | uint64_t rdba; | 2068 | u64 rdba; |
2069 | struct e1000_hw *hw = &adapter->hw; | 2069 | struct e1000_hw *hw = &adapter->hw; |
2070 | uint32_t rdlen, rctl, rxcsum, ctrl_ext; | 2070 | u32 rdlen, rctl, rxcsum, ctrl_ext; |
2071 | 2071 | ||
2072 | if (adapter->rx_ps_pages) { | 2072 | if (adapter->rx_ps_pages) { |
2073 | /* this is a 32 byte descriptor */ | 2073 | /* this is a 32 byte descriptor */ |
@@ -2387,7 +2387,7 @@ static void | |||
2387 | e1000_enter_82542_rst(struct e1000_adapter *adapter) | 2387 | e1000_enter_82542_rst(struct e1000_adapter *adapter) |
2388 | { | 2388 | { |
2389 | struct net_device *netdev = adapter->netdev; | 2389 | struct net_device *netdev = adapter->netdev; |
2390 | uint32_t rctl; | 2390 | u32 rctl; |
2391 | 2391 | ||
2392 | e1000_pci_clear_mwi(&adapter->hw); | 2392 | e1000_pci_clear_mwi(&adapter->hw); |
2393 | 2393 | ||
@@ -2405,7 +2405,7 @@ static void | |||
2405 | e1000_leave_82542_rst(struct e1000_adapter *adapter) | 2405 | e1000_leave_82542_rst(struct e1000_adapter *adapter) |
2406 | { | 2406 | { |
2407 | struct net_device *netdev = adapter->netdev; | 2407 | struct net_device *netdev = adapter->netdev; |
2408 | uint32_t rctl; | 2408 | u32 rctl; |
2409 | 2409 | ||
2410 | rctl = E1000_READ_REG(&adapter->hw, RCTL); | 2410 | rctl = E1000_READ_REG(&adapter->hw, RCTL); |
2411 | rctl &= ~E1000_RCTL_RST; | 2411 | rctl &= ~E1000_RCTL_RST; |
@@ -2490,8 +2490,8 @@ e1000_set_rx_mode(struct net_device *netdev) | |||
2490 | struct e1000_hw *hw = &adapter->hw; | 2490 | struct e1000_hw *hw = &adapter->hw; |
2491 | struct dev_addr_list *uc_ptr; | 2491 | struct dev_addr_list *uc_ptr; |
2492 | struct dev_addr_list *mc_ptr; | 2492 | struct dev_addr_list *mc_ptr; |
2493 | uint32_t rctl; | 2493 | u32 rctl; |
2494 | uint32_t hash_value; | 2494 | u32 hash_value; |
2495 | int i, rar_entries = E1000_RAR_ENTRIES; | 2495 | int i, rar_entries = E1000_RAR_ENTRIES; |
2496 | int mta_reg_count = (hw->mac_type == e1000_ich8lan) ? | 2496 | int mta_reg_count = (hw->mac_type == e1000_ich8lan) ? |
2497 | E1000_NUM_MTA_REGISTERS_ICH8LAN : | 2497 | E1000_NUM_MTA_REGISTERS_ICH8LAN : |
@@ -2595,7 +2595,7 @@ e1000_82547_tx_fifo_stall(unsigned long data) | |||
2595 | { | 2595 | { |
2596 | struct e1000_adapter *adapter = (struct e1000_adapter *) data; | 2596 | struct e1000_adapter *adapter = (struct e1000_adapter *) data; |
2597 | struct net_device *netdev = adapter->netdev; | 2597 | struct net_device *netdev = adapter->netdev; |
2598 | uint32_t tctl; | 2598 | u32 tctl; |
2599 | 2599 | ||
2600 | if (atomic_read(&adapter->tx_fifo_stall)) { | 2600 | if (atomic_read(&adapter->tx_fifo_stall)) { |
2601 | if ((E1000_READ_REG(&adapter->hw, TDT) == | 2601 | if ((E1000_READ_REG(&adapter->hw, TDT) == |
@@ -2637,8 +2637,8 @@ e1000_watchdog(unsigned long data) | |||
2637 | struct e1000_adapter *adapter = (struct e1000_adapter *) data; | 2637 | struct e1000_adapter *adapter = (struct e1000_adapter *) data; |
2638 | struct net_device *netdev = adapter->netdev; | 2638 | struct net_device *netdev = adapter->netdev; |
2639 | struct e1000_tx_ring *txdr = adapter->tx_ring; | 2639 | struct e1000_tx_ring *txdr = adapter->tx_ring; |
2640 | uint32_t link, tctl; | 2640 | u32 link, tctl; |
2641 | int32_t ret_val; | 2641 | s32 ret_val; |
2642 | 2642 | ||
2643 | ret_val = e1000_check_for_link(&adapter->hw); | 2643 | ret_val = e1000_check_for_link(&adapter->hw); |
2644 | if ((ret_val == E1000_ERR_PHY) && | 2644 | if ((ret_val == E1000_ERR_PHY) && |
@@ -2663,7 +2663,7 @@ e1000_watchdog(unsigned long data) | |||
2663 | 2663 | ||
2664 | if (link) { | 2664 | if (link) { |
2665 | if (!netif_carrier_ok(netdev)) { | 2665 | if (!netif_carrier_ok(netdev)) { |
2666 | uint32_t ctrl; | 2666 | u32 ctrl; |
2667 | bool txb2b = true; | 2667 | bool txb2b = true; |
2668 | e1000_get_speed_and_duplex(&adapter->hw, | 2668 | e1000_get_speed_and_duplex(&adapter->hw, |
2669 | &adapter->link_speed, | 2669 | &adapter->link_speed, |
@@ -2700,7 +2700,7 @@ e1000_watchdog(unsigned long data) | |||
2700 | if ((adapter->hw.mac_type == e1000_82571 || | 2700 | if ((adapter->hw.mac_type == e1000_82571 || |
2701 | adapter->hw.mac_type == e1000_82572) && | 2701 | adapter->hw.mac_type == e1000_82572) && |
2702 | !txb2b) { | 2702 | !txb2b) { |
2703 | uint32_t tarc0; | 2703 | u32 tarc0; |
2704 | tarc0 = E1000_READ_REG(&adapter->hw, TARC0); | 2704 | tarc0 = E1000_READ_REG(&adapter->hw, TARC0); |
2705 | tarc0 &= ~(1 << 21); | 2705 | tarc0 &= ~(1 << 21); |
2706 | E1000_WRITE_REG(&adapter->hw, TARC0, tarc0); | 2706 | E1000_WRITE_REG(&adapter->hw, TARC0, tarc0); |
@@ -2742,7 +2742,7 @@ e1000_watchdog(unsigned long data) | |||
2742 | /* make sure the receive unit is started */ | 2742 | /* make sure the receive unit is started */ |
2743 | if (adapter->hw.rx_needs_kicking) { | 2743 | if (adapter->hw.rx_needs_kicking) { |
2744 | struct e1000_hw *hw = &adapter->hw; | 2744 | struct e1000_hw *hw = &adapter->hw; |
2745 | uint32_t rctl = E1000_READ_REG(hw, RCTL); | 2745 | u32 rctl = E1000_READ_REG(hw, RCTL); |
2746 | E1000_WRITE_REG(hw, RCTL, rctl | E1000_RCTL_EN); | 2746 | E1000_WRITE_REG(hw, RCTL, rctl | E1000_RCTL_EN); |
2747 | } | 2747 | } |
2748 | } | 2748 | } |
@@ -2832,7 +2832,7 @@ enum latency_range { | |||
2832 | * @bytes: the number of bytes during this measurement interval | 2832 | * @bytes: the number of bytes during this measurement interval |
2833 | **/ | 2833 | **/ |
2834 | static unsigned int e1000_update_itr(struct e1000_adapter *adapter, | 2834 | static unsigned int e1000_update_itr(struct e1000_adapter *adapter, |
2835 | uint16_t itr_setting, | 2835 | u16 itr_setting, |
2836 | int packets, | 2836 | int packets, |
2837 | int bytes) | 2837 | int bytes) |
2838 | { | 2838 | { |
@@ -2884,8 +2884,8 @@ update_itr_done: | |||
2884 | static void e1000_set_itr(struct e1000_adapter *adapter) | 2884 | static void e1000_set_itr(struct e1000_adapter *adapter) |
2885 | { | 2885 | { |
2886 | struct e1000_hw *hw = &adapter->hw; | 2886 | struct e1000_hw *hw = &adapter->hw; |
2887 | uint16_t current_itr; | 2887 | u16 current_itr; |
2888 | uint32_t new_itr = adapter->itr; | 2888 | u32 new_itr = adapter->itr; |
2889 | 2889 | ||
2890 | if (unlikely(hw->mac_type < e1000_82540)) | 2890 | if (unlikely(hw->mac_type < e1000_82540)) |
2891 | return; | 2891 | return; |
@@ -2959,9 +2959,9 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2959 | struct e1000_context_desc *context_desc; | 2959 | struct e1000_context_desc *context_desc; |
2960 | struct e1000_buffer *buffer_info; | 2960 | struct e1000_buffer *buffer_info; |
2961 | unsigned int i; | 2961 | unsigned int i; |
2962 | uint32_t cmd_length = 0; | 2962 | u32 cmd_length = 0; |
2963 | uint16_t ipcse = 0, tucse, mss; | 2963 | u16 ipcse = 0, tucse, mss; |
2964 | uint8_t ipcss, ipcso, tucss, tucso, hdr_len; | 2964 | u8 ipcss, ipcso, tucss, tucso, hdr_len; |
2965 | int err; | 2965 | int err; |
2966 | 2966 | ||
2967 | if (skb_is_gso(skb)) { | 2967 | if (skb_is_gso(skb)) { |
@@ -3032,7 +3032,7 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
3032 | struct e1000_context_desc *context_desc; | 3032 | struct e1000_context_desc *context_desc; |
3033 | struct e1000_buffer *buffer_info; | 3033 | struct e1000_buffer *buffer_info; |
3034 | unsigned int i; | 3034 | unsigned int i; |
3035 | uint8_t css; | 3035 | u8 css; |
3036 | 3036 | ||
3037 | if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { | 3037 | if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { |
3038 | css = skb_transport_offset(skb); | 3038 | css = skb_transport_offset(skb); |
@@ -3177,7 +3177,7 @@ e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
3177 | { | 3177 | { |
3178 | struct e1000_tx_desc *tx_desc = NULL; | 3178 | struct e1000_tx_desc *tx_desc = NULL; |
3179 | struct e1000_buffer *buffer_info; | 3179 | struct e1000_buffer *buffer_info; |
3180 | uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; | 3180 | u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; |
3181 | unsigned int i; | 3181 | unsigned int i; |
3182 | 3182 | ||
3183 | if (likely(tx_flags & E1000_TX_FLAGS_TSO)) { | 3183 | if (likely(tx_flags & E1000_TX_FLAGS_TSO)) { |
@@ -3241,8 +3241,8 @@ e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
3241 | static int | 3241 | static int |
3242 | e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb) | 3242 | e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb) |
3243 | { | 3243 | { |
3244 | uint32_t fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; | 3244 | u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; |
3245 | uint32_t skb_fifo_len = skb->len + E1000_FIFO_HDR; | 3245 | u32 skb_fifo_len = skb->len + E1000_FIFO_HDR; |
3246 | 3246 | ||
3247 | skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR); | 3247 | skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR); |
3248 | 3248 | ||
@@ -3269,7 +3269,7 @@ static int | |||
3269 | e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb) | 3269 | e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb) |
3270 | { | 3270 | { |
3271 | struct e1000_hw *hw = &adapter->hw; | 3271 | struct e1000_hw *hw = &adapter->hw; |
3272 | uint16_t length, offset; | 3272 | u16 length, offset; |
3273 | if (vlan_tx_tag_present(skb)) { | 3273 | if (vlan_tx_tag_present(skb)) { |
3274 | if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && | 3274 | if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && |
3275 | ( adapter->hw.mng_cookie.status & | 3275 | ( adapter->hw.mng_cookie.status & |
@@ -3280,17 +3280,17 @@ e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb) | |||
3280 | struct ethhdr *eth = (struct ethhdr *) skb->data; | 3280 | struct ethhdr *eth = (struct ethhdr *) skb->data; |
3281 | if ((htons(ETH_P_IP) == eth->h_proto)) { | 3281 | if ((htons(ETH_P_IP) == eth->h_proto)) { |
3282 | const struct iphdr *ip = | 3282 | const struct iphdr *ip = |
3283 | (struct iphdr *)((uint8_t *)skb->data+14); | 3283 | (struct iphdr *)((u8 *)skb->data+14); |
3284 | if (IPPROTO_UDP == ip->protocol) { | 3284 | if (IPPROTO_UDP == ip->protocol) { |
3285 | struct udphdr *udp = | 3285 | struct udphdr *udp = |
3286 | (struct udphdr *)((uint8_t *)ip + | 3286 | (struct udphdr *)((u8 *)ip + |
3287 | (ip->ihl << 2)); | 3287 | (ip->ihl << 2)); |
3288 | if (ntohs(udp->dest) == 67) { | 3288 | if (ntohs(udp->dest) == 67) { |
3289 | offset = (uint8_t *)udp + 8 - skb->data; | 3289 | offset = (u8 *)udp + 8 - skb->data; |
3290 | length = skb->len - offset; | 3290 | length = skb->len - offset; |
3291 | 3291 | ||
3292 | return e1000_mng_write_dhcp_info(hw, | 3292 | return e1000_mng_write_dhcp_info(hw, |
3293 | (uint8_t *)udp + 8, | 3293 | (u8 *)udp + 8, |
3294 | length); | 3294 | length); |
3295 | } | 3295 | } |
3296 | } | 3296 | } |
@@ -3370,7 +3370,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
3370 | * overrun the FIFO, adjust the max buffer len if mss | 3370 | * overrun the FIFO, adjust the max buffer len if mss |
3371 | * drops. */ | 3371 | * drops. */ |
3372 | if (mss) { | 3372 | if (mss) { |
3373 | uint8_t hdr_len; | 3373 | u8 hdr_len; |
3374 | max_per_txd = min(mss << 2, max_per_txd); | 3374 | max_per_txd = min(mss << 2, max_per_txd); |
3375 | max_txd_pwr = fls(max_per_txd) - 1; | 3375 | max_txd_pwr = fls(max_per_txd) - 1; |
3376 | 3376 | ||
@@ -3557,7 +3557,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
3557 | { | 3557 | { |
3558 | struct e1000_adapter *adapter = netdev_priv(netdev); | 3558 | struct e1000_adapter *adapter = netdev_priv(netdev); |
3559 | int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; | 3559 | int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; |
3560 | uint16_t eeprom_data = 0; | 3560 | u16 eeprom_data = 0; |
3561 | 3561 | ||
3562 | if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || | 3562 | if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || |
3563 | (max_frame > MAX_JUMBO_FRAME_SIZE)) { | 3563 | (max_frame > MAX_JUMBO_FRAME_SIZE)) { |
@@ -3652,7 +3652,7 @@ e1000_update_stats(struct e1000_adapter *adapter) | |||
3652 | struct e1000_hw *hw = &adapter->hw; | 3652 | struct e1000_hw *hw = &adapter->hw; |
3653 | struct pci_dev *pdev = adapter->pdev; | 3653 | struct pci_dev *pdev = adapter->pdev; |
3654 | unsigned long flags; | 3654 | unsigned long flags; |
3655 | uint16_t phy_tmp; | 3655 | u16 phy_tmp; |
3656 | 3656 | ||
3657 | #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF | 3657 | #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF |
3658 | 3658 | ||
@@ -3829,7 +3829,7 @@ e1000_intr_msi(int irq, void *data) | |||
3829 | #ifndef CONFIG_E1000_NAPI | 3829 | #ifndef CONFIG_E1000_NAPI |
3830 | int i; | 3830 | int i; |
3831 | #endif | 3831 | #endif |
3832 | uint32_t icr = E1000_READ_REG(hw, ICR); | 3832 | u32 icr = E1000_READ_REG(hw, ICR); |
3833 | 3833 | ||
3834 | /* in NAPI mode read ICR disables interrupts using IAM */ | 3834 | /* in NAPI mode read ICR disables interrupts using IAM */ |
3835 | 3835 | ||
@@ -3841,7 +3841,7 @@ e1000_intr_msi(int irq, void *data) | |||
3841 | if (netif_carrier_ok(netdev) && | 3841 | if (netif_carrier_ok(netdev) && |
3842 | (adapter->hw.mac_type == e1000_80003es2lan)) { | 3842 | (adapter->hw.mac_type == e1000_80003es2lan)) { |
3843 | /* disable receives */ | 3843 | /* disable receives */ |
3844 | uint32_t rctl = E1000_READ_REG(hw, RCTL); | 3844 | u32 rctl = E1000_READ_REG(hw, RCTL); |
3845 | E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN); | 3845 | E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN); |
3846 | } | 3846 | } |
3847 | /* guard against interrupt when we're going down */ | 3847 | /* guard against interrupt when we're going down */ |
@@ -3888,7 +3888,7 @@ e1000_intr(int irq, void *data) | |||
3888 | struct net_device *netdev = data; | 3888 | struct net_device *netdev = data; |
3889 | struct e1000_adapter *adapter = netdev_priv(netdev); | 3889 | struct e1000_adapter *adapter = netdev_priv(netdev); |
3890 | struct e1000_hw *hw = &adapter->hw; | 3890 | struct e1000_hw *hw = &adapter->hw; |
3891 | uint32_t rctl, icr = E1000_READ_REG(hw, ICR); | 3891 | u32 rctl, icr = E1000_READ_REG(hw, ICR); |
3892 | #ifndef CONFIG_E1000_NAPI | 3892 | #ifndef CONFIG_E1000_NAPI |
3893 | int i; | 3893 | int i; |
3894 | #endif | 3894 | #endif |
@@ -4139,11 +4139,11 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, | |||
4139 | 4139 | ||
4140 | static void | 4140 | static void |
4141 | e1000_rx_checksum(struct e1000_adapter *adapter, | 4141 | e1000_rx_checksum(struct e1000_adapter *adapter, |
4142 | uint32_t status_err, uint32_t csum, | 4142 | u32 status_err, u32 csum, |
4143 | struct sk_buff *skb) | 4143 | struct sk_buff *skb) |
4144 | { | 4144 | { |
4145 | uint16_t status = (uint16_t)status_err; | 4145 | u16 status = (u16)status_err; |
4146 | uint8_t errors = (uint8_t)(status_err >> 24); | 4146 | u8 errors = (u8)(status_err >> 24); |
4147 | skb->ip_summed = CHECKSUM_NONE; | 4147 | skb->ip_summed = CHECKSUM_NONE; |
4148 | 4148 | ||
4149 | /* 82543 or newer only */ | 4149 | /* 82543 or newer only */ |
@@ -4200,8 +4200,8 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
4200 | struct e1000_rx_desc *rx_desc, *next_rxd; | 4200 | struct e1000_rx_desc *rx_desc, *next_rxd; |
4201 | struct e1000_buffer *buffer_info, *next_buffer; | 4201 | struct e1000_buffer *buffer_info, *next_buffer; |
4202 | unsigned long flags; | 4202 | unsigned long flags; |
4203 | uint32_t length; | 4203 | u32 length; |
4204 | uint8_t last_byte; | 4204 | u8 last_byte; |
4205 | unsigned int i; | 4205 | unsigned int i; |
4206 | int cleaned_count = 0; | 4206 | int cleaned_count = 0; |
4207 | bool cleaned = false; | 4207 | bool cleaned = false; |
@@ -4301,8 +4301,8 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
4301 | 4301 | ||
4302 | /* Receive Checksum Offload */ | 4302 | /* Receive Checksum Offload */ |
4303 | e1000_rx_checksum(adapter, | 4303 | e1000_rx_checksum(adapter, |
4304 | (uint32_t)(status) | | 4304 | (u32)(status) | |
4305 | ((uint32_t)(rx_desc->errors) << 24), | 4305 | ((u32)(rx_desc->errors) << 24), |
4306 | le16_to_cpu(rx_desc->csum), skb); | 4306 | le16_to_cpu(rx_desc->csum), skb); |
4307 | 4307 | ||
4308 | skb->protocol = eth_type_trans(skb, netdev); | 4308 | skb->protocol = eth_type_trans(skb, netdev); |
@@ -4376,7 +4376,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
4376 | struct e1000_ps_page_dma *ps_page_dma; | 4376 | struct e1000_ps_page_dma *ps_page_dma; |
4377 | struct sk_buff *skb; | 4377 | struct sk_buff *skb; |
4378 | unsigned int i, j; | 4378 | unsigned int i, j; |
4379 | uint32_t length, staterr; | 4379 | u32 length, staterr; |
4380 | int cleaned_count = 0; | 4380 | int cleaned_count = 0; |
4381 | bool cleaned = false; | 4381 | bool cleaned = false; |
4382 | unsigned int total_rx_bytes=0, total_rx_packets=0; | 4382 | unsigned int total_rx_bytes=0, total_rx_packets=0; |
@@ -4759,8 +4759,8 @@ no_buffers: | |||
4759 | static void | 4759 | static void |
4760 | e1000_smartspeed(struct e1000_adapter *adapter) | 4760 | e1000_smartspeed(struct e1000_adapter *adapter) |
4761 | { | 4761 | { |
4762 | uint16_t phy_status; | 4762 | u16 phy_status; |
4763 | uint16_t phy_ctrl; | 4763 | u16 phy_ctrl; |
4764 | 4764 | ||
4765 | if ((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg || | 4765 | if ((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg || |
4766 | !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL)) | 4766 | !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL)) |
@@ -4839,8 +4839,8 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | |||
4839 | struct e1000_adapter *adapter = netdev_priv(netdev); | 4839 | struct e1000_adapter *adapter = netdev_priv(netdev); |
4840 | struct mii_ioctl_data *data = if_mii(ifr); | 4840 | struct mii_ioctl_data *data = if_mii(ifr); |
4841 | int retval; | 4841 | int retval; |
4842 | uint16_t mii_reg; | 4842 | u16 mii_reg; |
4843 | uint16_t spddplx; | 4843 | u16 spddplx; |
4844 | unsigned long flags; | 4844 | unsigned long flags; |
4845 | 4845 | ||
4846 | if (adapter->hw.media_type != e1000_media_type_copper) | 4846 | if (adapter->hw.media_type != e1000_media_type_copper) |
@@ -4959,11 +4959,11 @@ e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc) | |||
4959 | pcix_set_mmrbc(adapter->pdev, mmrbc); | 4959 | pcix_set_mmrbc(adapter->pdev, mmrbc); |
4960 | } | 4960 | } |
4961 | 4961 | ||
4962 | int32_t | 4962 | s32 |
4963 | e1000_read_pcie_cap_reg(struct e1000_hw *hw, uint32_t reg, uint16_t *value) | 4963 | e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) |
4964 | { | 4964 | { |
4965 | struct e1000_adapter *adapter = hw->back; | 4965 | struct e1000_adapter *adapter = hw->back; |
4966 | uint16_t cap_offset; | 4966 | u16 cap_offset; |
4967 | 4967 | ||
4968 | cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP); | 4968 | cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP); |
4969 | if (!cap_offset) | 4969 | if (!cap_offset) |
@@ -4975,7 +4975,7 @@ e1000_read_pcie_cap_reg(struct e1000_hw *hw, uint32_t reg, uint16_t *value) | |||
4975 | } | 4975 | } |
4976 | 4976 | ||
4977 | void | 4977 | void |
4978 | e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value) | 4978 | e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value) |
4979 | { | 4979 | { |
4980 | outl(value, port); | 4980 | outl(value, port); |
4981 | } | 4981 | } |
@@ -4984,7 +4984,7 @@ static void | |||
4984 | e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) | 4984 | e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) |
4985 | { | 4985 | { |
4986 | struct e1000_adapter *adapter = netdev_priv(netdev); | 4986 | struct e1000_adapter *adapter = netdev_priv(netdev); |
4987 | uint32_t ctrl, rctl; | 4987 | u32 ctrl, rctl; |
4988 | 4988 | ||
4989 | if (!test_bit(__E1000_DOWN, &adapter->flags)) | 4989 | if (!test_bit(__E1000_DOWN, &adapter->flags)) |
4990 | e1000_irq_disable(adapter); | 4990 | e1000_irq_disable(adapter); |
@@ -5016,7 +5016,7 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) | |||
5016 | rctl &= ~E1000_RCTL_VFE; | 5016 | rctl &= ~E1000_RCTL_VFE; |
5017 | E1000_WRITE_REG(&adapter->hw, RCTL, rctl); | 5017 | E1000_WRITE_REG(&adapter->hw, RCTL, rctl); |
5018 | if (adapter->mng_vlan_id != | 5018 | if (adapter->mng_vlan_id != |
5019 | (uint16_t)E1000_MNG_VLAN_NONE) { | 5019 | (u16)E1000_MNG_VLAN_NONE) { |
5020 | e1000_vlan_rx_kill_vid(netdev, | 5020 | e1000_vlan_rx_kill_vid(netdev, |
5021 | adapter->mng_vlan_id); | 5021 | adapter->mng_vlan_id); |
5022 | adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; | 5022 | adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; |
@@ -5029,10 +5029,10 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) | |||
5029 | } | 5029 | } |
5030 | 5030 | ||
5031 | static void | 5031 | static void |
5032 | e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid) | 5032 | e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) |
5033 | { | 5033 | { |
5034 | struct e1000_adapter *adapter = netdev_priv(netdev); | 5034 | struct e1000_adapter *adapter = netdev_priv(netdev); |
5035 | uint32_t vfta, index; | 5035 | u32 vfta, index; |
5036 | 5036 | ||
5037 | if ((adapter->hw.mng_cookie.status & | 5037 | if ((adapter->hw.mng_cookie.status & |
5038 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && | 5038 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && |
@@ -5046,10 +5046,10 @@ e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid) | |||
5046 | } | 5046 | } |
5047 | 5047 | ||
5048 | static void | 5048 | static void |
5049 | e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid) | 5049 | e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) |
5050 | { | 5050 | { |
5051 | struct e1000_adapter *adapter = netdev_priv(netdev); | 5051 | struct e1000_adapter *adapter = netdev_priv(netdev); |
5052 | uint32_t vfta, index; | 5052 | u32 vfta, index; |
5053 | 5053 | ||
5054 | if (!test_bit(__E1000_DOWN, &adapter->flags)) | 5054 | if (!test_bit(__E1000_DOWN, &adapter->flags)) |
5055 | e1000_irq_disable(adapter); | 5055 | e1000_irq_disable(adapter); |
@@ -5078,7 +5078,7 @@ e1000_restore_vlan(struct e1000_adapter *adapter) | |||
5078 | e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp); | 5078 | e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp); |
5079 | 5079 | ||
5080 | if (adapter->vlgrp) { | 5080 | if (adapter->vlgrp) { |
5081 | uint16_t vid; | 5081 | u16 vid; |
5082 | for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { | 5082 | for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { |
5083 | if (!vlan_group_get_device(adapter->vlgrp, vid)) | 5083 | if (!vlan_group_get_device(adapter->vlgrp, vid)) |
5084 | continue; | 5084 | continue; |
@@ -5088,7 +5088,7 @@ e1000_restore_vlan(struct e1000_adapter *adapter) | |||
5088 | } | 5088 | } |
5089 | 5089 | ||
5090 | int | 5090 | int |
5091 | e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx) | 5091 | e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) |
5092 | { | 5092 | { |
5093 | adapter->hw.autoneg = 0; | 5093 | adapter->hw.autoneg = 0; |
5094 | 5094 | ||
@@ -5129,8 +5129,8 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
5129 | { | 5129 | { |
5130 | struct net_device *netdev = pci_get_drvdata(pdev); | 5130 | struct net_device *netdev = pci_get_drvdata(pdev); |
5131 | struct e1000_adapter *adapter = netdev_priv(netdev); | 5131 | struct e1000_adapter *adapter = netdev_priv(netdev); |
5132 | uint32_t ctrl, ctrl_ext, rctl, status; | 5132 | u32 ctrl, ctrl_ext, rctl, status; |
5133 | uint32_t wufc = adapter->wol; | 5133 | u32 wufc = adapter->wol; |
5134 | #ifdef CONFIG_PM | 5134 | #ifdef CONFIG_PM |
5135 | int retval = 0; | 5135 | int retval = 0; |
5136 | #endif | 5136 | #endif |
@@ -5227,7 +5227,7 @@ e1000_resume(struct pci_dev *pdev) | |||
5227 | { | 5227 | { |
5228 | struct net_device *netdev = pci_get_drvdata(pdev); | 5228 | struct net_device *netdev = pci_get_drvdata(pdev); |
5229 | struct e1000_adapter *adapter = netdev_priv(netdev); | 5229 | struct e1000_adapter *adapter = netdev_priv(netdev); |
5230 | uint32_t err; | 5230 | u32 err; |
5231 | 5231 | ||
5232 | pci_set_power_state(pdev, PCI_D0); | 5232 | pci_set_power_state(pdev, PCI_D0); |
5233 | pci_restore_state(pdev); | 5233 | pci_restore_state(pdev); |
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c index f7e1619b974e..01c88664bad3 100644 --- a/drivers/net/e1000e/82571.c +++ b/drivers/net/e1000e/82571.c | |||
@@ -171,6 +171,10 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw) | |||
171 | * for setting word_size. | 171 | * for setting word_size. |
172 | */ | 172 | */ |
173 | size += NVM_WORD_SIZE_BASE_SHIFT; | 173 | size += NVM_WORD_SIZE_BASE_SHIFT; |
174 | |||
175 | /* EEPROM access above 16k is unsupported */ | ||
176 | if (size > 14) | ||
177 | size = 14; | ||
174 | nvm->word_size = 1 << size; | 178 | nvm->word_size = 1 << size; |
175 | break; | 179 | break; |
176 | } | 180 | } |
@@ -244,7 +248,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter) | |||
244 | return 0; | 248 | return 0; |
245 | } | 249 | } |
246 | 250 | ||
247 | static s32 e1000_get_invariants_82571(struct e1000_adapter *adapter) | 251 | static s32 e1000_get_variants_82571(struct e1000_adapter *adapter) |
248 | { | 252 | { |
249 | struct e1000_hw *hw = &adapter->hw; | 253 | struct e1000_hw *hw = &adapter->hw; |
250 | static int global_quad_port_a; /* global port a indication */ | 254 | static int global_quad_port_a; /* global port a indication */ |
@@ -832,19 +836,19 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw) | |||
832 | ret_val = e1000_setup_link_82571(hw); | 836 | ret_val = e1000_setup_link_82571(hw); |
833 | 837 | ||
834 | /* Set the transmit descriptor write-back policy */ | 838 | /* Set the transmit descriptor write-back policy */ |
835 | reg_data = er32(TXDCTL); | 839 | reg_data = er32(TXDCTL(0)); |
836 | reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | | 840 | reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | |
837 | E1000_TXDCTL_FULL_TX_DESC_WB | | 841 | E1000_TXDCTL_FULL_TX_DESC_WB | |
838 | E1000_TXDCTL_COUNT_DESC; | 842 | E1000_TXDCTL_COUNT_DESC; |
839 | ew32(TXDCTL, reg_data); | 843 | ew32(TXDCTL(0), reg_data); |
840 | 844 | ||
841 | /* ...for both queues. */ | 845 | /* ...for both queues. */ |
842 | if (mac->type != e1000_82573) { | 846 | if (mac->type != e1000_82573) { |
843 | reg_data = er32(TXDCTL1); | 847 | reg_data = er32(TXDCTL(1)); |
844 | reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | | 848 | reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | |
845 | E1000_TXDCTL_FULL_TX_DESC_WB | | 849 | E1000_TXDCTL_FULL_TX_DESC_WB | |
846 | E1000_TXDCTL_COUNT_DESC; | 850 | E1000_TXDCTL_COUNT_DESC; |
847 | ew32(TXDCTL1, reg_data); | 851 | ew32(TXDCTL(1), reg_data); |
848 | } else { | 852 | } else { |
849 | e1000e_enable_tx_pkt_filtering(hw); | 853 | e1000e_enable_tx_pkt_filtering(hw); |
850 | reg_data = er32(GCR); | 854 | reg_data = er32(GCR); |
@@ -874,17 +878,17 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) | |||
874 | u32 reg; | 878 | u32 reg; |
875 | 879 | ||
876 | /* Transmit Descriptor Control 0 */ | 880 | /* Transmit Descriptor Control 0 */ |
877 | reg = er32(TXDCTL); | 881 | reg = er32(TXDCTL(0)); |
878 | reg |= (1 << 22); | 882 | reg |= (1 << 22); |
879 | ew32(TXDCTL, reg); | 883 | ew32(TXDCTL(0), reg); |
880 | 884 | ||
881 | /* Transmit Descriptor Control 1 */ | 885 | /* Transmit Descriptor Control 1 */ |
882 | reg = er32(TXDCTL1); | 886 | reg = er32(TXDCTL(1)); |
883 | reg |= (1 << 22); | 887 | reg |= (1 << 22); |
884 | ew32(TXDCTL1, reg); | 888 | ew32(TXDCTL(1), reg); |
885 | 889 | ||
886 | /* Transmit Arbitration Control 0 */ | 890 | /* Transmit Arbitration Control 0 */ |
887 | reg = er32(TARC0); | 891 | reg = er32(TARC(0)); |
888 | reg &= ~(0xF << 27); /* 30:27 */ | 892 | reg &= ~(0xF << 27); /* 30:27 */ |
889 | switch (hw->mac.type) { | 893 | switch (hw->mac.type) { |
890 | case e1000_82571: | 894 | case e1000_82571: |
@@ -894,10 +898,10 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) | |||
894 | default: | 898 | default: |
895 | break; | 899 | break; |
896 | } | 900 | } |
897 | ew32(TARC0, reg); | 901 | ew32(TARC(0), reg); |
898 | 902 | ||
899 | /* Transmit Arbitration Control 1 */ | 903 | /* Transmit Arbitration Control 1 */ |
900 | reg = er32(TARC1); | 904 | reg = er32(TARC(1)); |
901 | switch (hw->mac.type) { | 905 | switch (hw->mac.type) { |
902 | case e1000_82571: | 906 | case e1000_82571: |
903 | case e1000_82572: | 907 | case e1000_82572: |
@@ -907,7 +911,7 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) | |||
907 | reg &= ~(1 << 28); | 911 | reg &= ~(1 << 28); |
908 | else | 912 | else |
909 | reg |= (1 << 28); | 913 | reg |= (1 << 28); |
910 | ew32(TARC1, reg); | 914 | ew32(TARC(1), reg); |
911 | break; | 915 | break; |
912 | default: | 916 | default: |
913 | break; | 917 | break; |
@@ -1333,7 +1337,7 @@ struct e1000_info e1000_82571_info = { | |||
1333 | | FLAG_TARC_SPEED_MODE_BIT /* errata */ | 1337 | | FLAG_TARC_SPEED_MODE_BIT /* errata */ |
1334 | | FLAG_APME_CHECK_PORT_B, | 1338 | | FLAG_APME_CHECK_PORT_B, |
1335 | .pba = 38, | 1339 | .pba = 38, |
1336 | .get_invariants = e1000_get_invariants_82571, | 1340 | .get_variants = e1000_get_variants_82571, |
1337 | .mac_ops = &e82571_mac_ops, | 1341 | .mac_ops = &e82571_mac_ops, |
1338 | .phy_ops = &e82_phy_ops_igp, | 1342 | .phy_ops = &e82_phy_ops_igp, |
1339 | .nvm_ops = &e82571_nvm_ops, | 1343 | .nvm_ops = &e82571_nvm_ops, |
@@ -1351,7 +1355,7 @@ struct e1000_info e1000_82572_info = { | |||
1351 | | FLAG_HAS_STATS_ICR_ICT | 1355 | | FLAG_HAS_STATS_ICR_ICT |
1352 | | FLAG_TARC_SPEED_MODE_BIT, /* errata */ | 1356 | | FLAG_TARC_SPEED_MODE_BIT, /* errata */ |
1353 | .pba = 38, | 1357 | .pba = 38, |
1354 | .get_invariants = e1000_get_invariants_82571, | 1358 | .get_variants = e1000_get_variants_82571, |
1355 | .mac_ops = &e82571_mac_ops, | 1359 | .mac_ops = &e82571_mac_ops, |
1356 | .phy_ops = &e82_phy_ops_igp, | 1360 | .phy_ops = &e82_phy_ops_igp, |
1357 | .nvm_ops = &e82571_nvm_ops, | 1361 | .nvm_ops = &e82571_nvm_ops, |
@@ -1371,7 +1375,7 @@ struct e1000_info e1000_82573_info = { | |||
1371 | | FLAG_HAS_ERT | 1375 | | FLAG_HAS_ERT |
1372 | | FLAG_HAS_SWSM_ON_LOAD, | 1376 | | FLAG_HAS_SWSM_ON_LOAD, |
1373 | .pba = 20, | 1377 | .pba = 20, |
1374 | .get_invariants = e1000_get_invariants_82571, | 1378 | .get_variants = e1000_get_variants_82571, |
1375 | .mac_ops = &e82571_mac_ops, | 1379 | .mac_ops = &e82571_mac_ops, |
1376 | .phy_ops = &e82_phy_ops_m88, | 1380 | .phy_ops = &e82_phy_ops_m88, |
1377 | .nvm_ops = &e82571_nvm_ops, | 1381 | .nvm_ops = &e82571_nvm_ops, |
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index b941a6b509c4..5a89dff52264 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h | |||
@@ -271,7 +271,7 @@ struct e1000_info { | |||
271 | enum e1000_mac_type mac; | 271 | enum e1000_mac_type mac; |
272 | unsigned int flags; | 272 | unsigned int flags; |
273 | u32 pba; | 273 | u32 pba; |
274 | s32 (*get_invariants)(struct e1000_adapter *); | 274 | s32 (*get_variants)(struct e1000_adapter *); |
275 | struct e1000_mac_operations *mac_ops; | 275 | struct e1000_mac_operations *mac_ops; |
276 | struct e1000_phy_operations *phy_ops; | 276 | struct e1000_phy_operations *phy_ops; |
277 | struct e1000_nvm_operations *nvm_ops; | 277 | struct e1000_nvm_operations *nvm_ops; |
@@ -357,7 +357,7 @@ extern struct e1000_info e1000_ich8_info; | |||
357 | extern struct e1000_info e1000_ich9_info; | 357 | extern struct e1000_info e1000_ich9_info; |
358 | extern struct e1000_info e1000_es2_info; | 358 | extern struct e1000_info e1000_es2_info; |
359 | 359 | ||
360 | extern s32 e1000e_read_part_num(struct e1000_hw *hw, u32 *part_num); | 360 | extern s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num); |
361 | 361 | ||
362 | extern s32 e1000e_commit_phy(struct e1000_hw *hw); | 362 | extern s32 e1000e_commit_phy(struct e1000_hw *hw); |
363 | 363 | ||
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c index e3f4aeefeae2..d59a99ae44be 100644 --- a/drivers/net/e1000e/es2lan.c +++ b/drivers/net/e1000e/es2lan.c | |||
@@ -178,6 +178,10 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw) | |||
178 | * for setting word_size. | 178 | * for setting word_size. |
179 | */ | 179 | */ |
180 | size += NVM_WORD_SIZE_BASE_SHIFT; | 180 | size += NVM_WORD_SIZE_BASE_SHIFT; |
181 | |||
182 | /* EEPROM access above 16k is unsupported */ | ||
183 | if (size > 14) | ||
184 | size = 14; | ||
181 | nvm->word_size = 1 << size; | 185 | nvm->word_size = 1 << size; |
182 | 186 | ||
183 | return 0; | 187 | return 0; |
@@ -234,7 +238,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter) | |||
234 | return 0; | 238 | return 0; |
235 | } | 239 | } |
236 | 240 | ||
237 | static s32 e1000_get_invariants_80003es2lan(struct e1000_adapter *adapter) | 241 | static s32 e1000_get_variants_80003es2lan(struct e1000_adapter *adapter) |
238 | { | 242 | { |
239 | struct e1000_hw *hw = &adapter->hw; | 243 | struct e1000_hw *hw = &adapter->hw; |
240 | s32 rc; | 244 | s32 rc; |
@@ -788,16 +792,16 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) | |||
788 | ret_val = e1000e_setup_link(hw); | 792 | ret_val = e1000e_setup_link(hw); |
789 | 793 | ||
790 | /* Set the transmit descriptor write-back policy */ | 794 | /* Set the transmit descriptor write-back policy */ |
791 | reg_data = er32(TXDCTL); | 795 | reg_data = er32(TXDCTL(0)); |
792 | reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | | 796 | reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | |
793 | E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC; | 797 | E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC; |
794 | ew32(TXDCTL, reg_data); | 798 | ew32(TXDCTL(0), reg_data); |
795 | 799 | ||
796 | /* ...for both queues. */ | 800 | /* ...for both queues. */ |
797 | reg_data = er32(TXDCTL1); | 801 | reg_data = er32(TXDCTL(1)); |
798 | reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | | 802 | reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | |
799 | E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC; | 803 | E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC; |
800 | ew32(TXDCTL1, reg_data); | 804 | ew32(TXDCTL(1), reg_data); |
801 | 805 | ||
802 | /* Enable retransmit on late collisions */ | 806 | /* Enable retransmit on late collisions */ |
803 | reg_data = er32(TCTL); | 807 | reg_data = er32(TCTL); |
@@ -842,29 +846,29 @@ static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw) | |||
842 | u32 reg; | 846 | u32 reg; |
843 | 847 | ||
844 | /* Transmit Descriptor Control 0 */ | 848 | /* Transmit Descriptor Control 0 */ |
845 | reg = er32(TXDCTL); | 849 | reg = er32(TXDCTL(0)); |
846 | reg |= (1 << 22); | 850 | reg |= (1 << 22); |
847 | ew32(TXDCTL, reg); | 851 | ew32(TXDCTL(0), reg); |
848 | 852 | ||
849 | /* Transmit Descriptor Control 1 */ | 853 | /* Transmit Descriptor Control 1 */ |
850 | reg = er32(TXDCTL1); | 854 | reg = er32(TXDCTL(1)); |
851 | reg |= (1 << 22); | 855 | reg |= (1 << 22); |
852 | ew32(TXDCTL1, reg); | 856 | ew32(TXDCTL(1), reg); |
853 | 857 | ||
854 | /* Transmit Arbitration Control 0 */ | 858 | /* Transmit Arbitration Control 0 */ |
855 | reg = er32(TARC0); | 859 | reg = er32(TARC(0)); |
856 | reg &= ~(0xF << 27); /* 30:27 */ | 860 | reg &= ~(0xF << 27); /* 30:27 */ |
857 | if (hw->phy.media_type != e1000_media_type_copper) | 861 | if (hw->phy.media_type != e1000_media_type_copper) |
858 | reg &= ~(1 << 20); | 862 | reg &= ~(1 << 20); |
859 | ew32(TARC0, reg); | 863 | ew32(TARC(0), reg); |
860 | 864 | ||
861 | /* Transmit Arbitration Control 1 */ | 865 | /* Transmit Arbitration Control 1 */ |
862 | reg = er32(TARC1); | 866 | reg = er32(TARC(1)); |
863 | if (er32(TCTL) & E1000_TCTL_MULR) | 867 | if (er32(TCTL) & E1000_TCTL_MULR) |
864 | reg &= ~(1 << 28); | 868 | reg &= ~(1 << 28); |
865 | else | 869 | else |
866 | reg |= (1 << 28); | 870 | reg |= (1 << 28); |
867 | ew32(TARC1, reg); | 871 | ew32(TARC(1), reg); |
868 | } | 872 | } |
869 | 873 | ||
870 | /** | 874 | /** |
@@ -1239,7 +1243,7 @@ struct e1000_info e1000_es2_info = { | |||
1239 | | FLAG_DISABLE_FC_PAUSE_TIME /* errata */ | 1243 | | FLAG_DISABLE_FC_PAUSE_TIME /* errata */ |
1240 | | FLAG_TIPG_MEDIUM_FOR_80003ESLAN, | 1244 | | FLAG_TIPG_MEDIUM_FOR_80003ESLAN, |
1241 | .pba = 38, | 1245 | .pba = 38, |
1242 | .get_invariants = e1000_get_invariants_80003es2lan, | 1246 | .get_variants = e1000_get_variants_80003es2lan, |
1243 | .mac_ops = &es2_mac_ops, | 1247 | .mac_ops = &es2_mac_ops, |
1244 | .phy_ops = &es2_phy_ops, | 1248 | .phy_ops = &es2_phy_ops, |
1245 | .nvm_ops = &es2_nvm_ops, | 1249 | .nvm_ops = &es2_nvm_ops, |
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c index 4ae00567bba6..6d1b257bbda6 100644 --- a/drivers/net/e1000e/ethtool.c +++ b/drivers/net/e1000e/ethtool.c | |||
@@ -641,10 +641,17 @@ static int e1000_set_ringparam(struct net_device *netdev, | |||
641 | tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); | 641 | tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); |
642 | if (!tx_ring) | 642 | if (!tx_ring) |
643 | goto err_alloc_tx; | 643 | goto err_alloc_tx; |
644 | /* | ||
645 | * use a memcpy to save any previously configured | ||
646 | * items like napi structs from having to be | ||
647 | * reinitialized | ||
648 | */ | ||
649 | memcpy(tx_ring, tx_old, sizeof(struct e1000_ring)); | ||
644 | 650 | ||
645 | rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); | 651 | rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); |
646 | if (!rx_ring) | 652 | if (!rx_ring) |
647 | goto err_alloc_rx; | 653 | goto err_alloc_rx; |
654 | memcpy(rx_ring, rx_old, sizeof(struct e1000_ring)); | ||
648 | 655 | ||
649 | adapter->tx_ring = tx_ring; | 656 | adapter->tx_ring = tx_ring; |
650 | adapter->rx_ring = rx_ring; | 657 | adapter->rx_ring = rx_ring; |
@@ -700,61 +707,55 @@ err_setup: | |||
700 | return err; | 707 | return err; |
701 | } | 708 | } |
702 | 709 | ||
703 | static bool reg_pattern_test_array(struct e1000_adapter *adapter, u64 *data, | 710 | static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, |
704 | int reg, int offset, u32 mask, u32 write) | 711 | int reg, int offset, u32 mask, u32 write) |
705 | { | 712 | { |
706 | int i; | 713 | u32 pat, val; |
707 | u32 read; | ||
708 | static const u32 test[] = | 714 | static const u32 test[] = |
709 | {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; | 715 | {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; |
710 | for (i = 0; i < ARRAY_SIZE(test); i++) { | 716 | for (pat = 0; pat < ARRAY_SIZE(test); pat++) { |
711 | E1000_WRITE_REG_ARRAY(&adapter->hw, reg, offset, | 717 | E1000_WRITE_REG_ARRAY(&adapter->hw, reg, offset, |
712 | (test[i] & write)); | 718 | (test[pat] & write)); |
713 | read = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset); | 719 | val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset); |
714 | if (read != (test[i] & write & mask)) { | 720 | if (val != (test[pat] & write & mask)) { |
715 | ndev_err(adapter->netdev, "pattern test reg %04X " | 721 | ndev_err(adapter->netdev, "pattern test reg %04X " |
716 | "failed: got 0x%08X expected 0x%08X\n", | 722 | "failed: got 0x%08X expected 0x%08X\n", |
717 | reg + offset, | 723 | reg + offset, |
718 | read, (test[i] & write & mask)); | 724 | val, (test[pat] & write & mask)); |
719 | *data = reg; | 725 | *data = reg; |
720 | return true; | 726 | return 1; |
721 | } | 727 | } |
722 | } | 728 | } |
723 | return false; | 729 | return 0; |
724 | } | 730 | } |
725 | 731 | ||
726 | static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, | 732 | static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, |
727 | int reg, u32 mask, u32 write) | 733 | int reg, u32 mask, u32 write) |
728 | { | 734 | { |
729 | u32 read; | 735 | u32 val; |
730 | __ew32(&adapter->hw, reg, write & mask); | 736 | __ew32(&adapter->hw, reg, write & mask); |
731 | read = __er32(&adapter->hw, reg); | 737 | val = __er32(&adapter->hw, reg); |
732 | if ((write & mask) != (read & mask)) { | 738 | if ((write & mask) != (val & mask)) { |
733 | ndev_err(adapter->netdev, "set/check reg %04X test failed: " | 739 | ndev_err(adapter->netdev, "set/check reg %04X test failed: " |
734 | "got 0x%08X expected 0x%08X\n", reg, (read & mask), | 740 | "got 0x%08X expected 0x%08X\n", reg, (val & mask), |
735 | (write & mask)); | 741 | (write & mask)); |
736 | *data = reg; | 742 | *data = reg; |
737 | return true; | 743 | return 1; |
738 | } | 744 | } |
739 | return false; | 745 | return 0; |
740 | } | 746 | } |
741 | 747 | #define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write) \ | |
742 | #define REG_PATTERN_TEST(R, M, W) \ | 748 | do { \ |
743 | do { \ | 749 | if (reg_pattern_test(adapter, data, reg, offset, mask, write)) \ |
744 | if (reg_pattern_test_array(adapter, data, R, 0, M, W)) \ | 750 | return 1; \ |
745 | return 1; \ | ||
746 | } while (0) | 751 | } while (0) |
752 | #define REG_PATTERN_TEST(reg, mask, write) \ | ||
753 | REG_PATTERN_TEST_ARRAY(reg, 0, mask, write) | ||
747 | 754 | ||
748 | #define REG_PATTERN_TEST_ARRAY(R, offset, M, W) \ | 755 | #define REG_SET_AND_CHECK(reg, mask, write) \ |
749 | do { \ | 756 | do { \ |
750 | if (reg_pattern_test_array(adapter, data, R, offset, M, W)) \ | 757 | if (reg_set_and_check(adapter, data, reg, mask, write)) \ |
751 | return 1; \ | 758 | return 1; \ |
752 | } while (0) | ||
753 | |||
754 | #define REG_SET_AND_CHECK(R, M, W) \ | ||
755 | do { \ | ||
756 | if (reg_set_and_check(adapter, data, R, M, W)) \ | ||
757 | return 1; \ | ||
758 | } while (0) | 759 | } while (0) |
759 | 760 | ||
760 | static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) | 761 | static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) |
@@ -1038,7 +1039,6 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1038 | struct pci_dev *pdev = adapter->pdev; | 1039 | struct pci_dev *pdev = adapter->pdev; |
1039 | struct e1000_hw *hw = &adapter->hw; | 1040 | struct e1000_hw *hw = &adapter->hw; |
1040 | u32 rctl; | 1041 | u32 rctl; |
1041 | int size; | ||
1042 | int i; | 1042 | int i; |
1043 | int ret_val; | 1043 | int ret_val; |
1044 | 1044 | ||
@@ -1047,13 +1047,13 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1047 | if (!tx_ring->count) | 1047 | if (!tx_ring->count) |
1048 | tx_ring->count = E1000_DEFAULT_TXD; | 1048 | tx_ring->count = E1000_DEFAULT_TXD; |
1049 | 1049 | ||
1050 | size = tx_ring->count * sizeof(struct e1000_buffer); | 1050 | tx_ring->buffer_info = kcalloc(tx_ring->count, |
1051 | tx_ring->buffer_info = kmalloc(size, GFP_KERNEL); | 1051 | sizeof(struct e1000_buffer), |
1052 | if (!tx_ring->buffer_info) { | 1052 | GFP_KERNEL); |
1053 | if (!(tx_ring->buffer_info)) { | ||
1053 | ret_val = 1; | 1054 | ret_val = 1; |
1054 | goto err_nomem; | 1055 | goto err_nomem; |
1055 | } | 1056 | } |
1056 | memset(tx_ring->buffer_info, 0, size); | ||
1057 | 1057 | ||
1058 | tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); | 1058 | tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); |
1059 | tx_ring->size = ALIGN(tx_ring->size, 4096); | 1059 | tx_ring->size = ALIGN(tx_ring->size, 4096); |
@@ -1063,21 +1063,17 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1063 | ret_val = 2; | 1063 | ret_val = 2; |
1064 | goto err_nomem; | 1064 | goto err_nomem; |
1065 | } | 1065 | } |
1066 | memset(tx_ring->desc, 0, tx_ring->size); | ||
1067 | tx_ring->next_to_use = 0; | 1066 | tx_ring->next_to_use = 0; |
1068 | tx_ring->next_to_clean = 0; | 1067 | tx_ring->next_to_clean = 0; |
1069 | 1068 | ||
1070 | ew32(TDBAL, | 1069 | ew32(TDBAL, ((u64) tx_ring->dma & 0x00000000FFFFFFFF)); |
1071 | ((u64) tx_ring->dma & 0x00000000FFFFFFFF)); | ||
1072 | ew32(TDBAH, ((u64) tx_ring->dma >> 32)); | 1070 | ew32(TDBAH, ((u64) tx_ring->dma >> 32)); |
1073 | ew32(TDLEN, | 1071 | ew32(TDLEN, tx_ring->count * sizeof(struct e1000_tx_desc)); |
1074 | tx_ring->count * sizeof(struct e1000_tx_desc)); | ||
1075 | ew32(TDH, 0); | 1072 | ew32(TDH, 0); |
1076 | ew32(TDT, 0); | 1073 | ew32(TDT, 0); |
1077 | ew32(TCTL, | 1074 | ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | E1000_TCTL_MULR | |
1078 | E1000_TCTL_PSP | E1000_TCTL_EN | | 1075 | E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | |
1079 | E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | | 1076 | E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT); |
1080 | E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT); | ||
1081 | 1077 | ||
1082 | for (i = 0; i < tx_ring->count; i++) { | 1078 | for (i = 0; i < tx_ring->count; i++) { |
1083 | struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); | 1079 | struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); |
@@ -1099,12 +1095,11 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1099 | ret_val = 4; | 1095 | ret_val = 4; |
1100 | goto err_nomem; | 1096 | goto err_nomem; |
1101 | } | 1097 | } |
1102 | tx_desc->buffer_addr = cpu_to_le64( | 1098 | tx_desc->buffer_addr = cpu_to_le64(tx_ring->buffer_info[i].dma); |
1103 | tx_ring->buffer_info[i].dma); | ||
1104 | tx_desc->lower.data = cpu_to_le32(skb->len); | 1099 | tx_desc->lower.data = cpu_to_le32(skb->len); |
1105 | tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | | 1100 | tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | |
1106 | E1000_TXD_CMD_IFCS | | 1101 | E1000_TXD_CMD_IFCS | |
1107 | E1000_TXD_CMD_RPS); | 1102 | E1000_TXD_CMD_RS); |
1108 | tx_desc->upper.data = 0; | 1103 | tx_desc->upper.data = 0; |
1109 | } | 1104 | } |
1110 | 1105 | ||
@@ -1113,13 +1108,13 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1113 | if (!rx_ring->count) | 1108 | if (!rx_ring->count) |
1114 | rx_ring->count = E1000_DEFAULT_RXD; | 1109 | rx_ring->count = E1000_DEFAULT_RXD; |
1115 | 1110 | ||
1116 | size = rx_ring->count * sizeof(struct e1000_buffer); | 1111 | rx_ring->buffer_info = kcalloc(rx_ring->count, |
1117 | rx_ring->buffer_info = kmalloc(size, GFP_KERNEL); | 1112 | sizeof(struct e1000_buffer), |
1118 | if (!rx_ring->buffer_info) { | 1113 | GFP_KERNEL); |
1114 | if (!(rx_ring->buffer_info)) { | ||
1119 | ret_val = 5; | 1115 | ret_val = 5; |
1120 | goto err_nomem; | 1116 | goto err_nomem; |
1121 | } | 1117 | } |
1122 | memset(rx_ring->buffer_info, 0, size); | ||
1123 | 1118 | ||
1124 | rx_ring->size = rx_ring->count * sizeof(struct e1000_rx_desc); | 1119 | rx_ring->size = rx_ring->count * sizeof(struct e1000_rx_desc); |
1125 | rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, | 1120 | rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, |
@@ -1128,7 +1123,6 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1128 | ret_val = 6; | 1123 | ret_val = 6; |
1129 | goto err_nomem; | 1124 | goto err_nomem; |
1130 | } | 1125 | } |
1131 | memset(rx_ring->desc, 0, rx_ring->size); | ||
1132 | rx_ring->next_to_use = 0; | 1126 | rx_ring->next_to_use = 0; |
1133 | rx_ring->next_to_clean = 0; | 1127 | rx_ring->next_to_clean = 0; |
1134 | 1128 | ||
@@ -1140,6 +1134,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1140 | ew32(RDH, 0); | 1134 | ew32(RDH, 0); |
1141 | ew32(RDT, 0); | 1135 | ew32(RDT, 0); |
1142 | rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | | 1136 | rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | |
1137 | E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE | | ||
1138 | E1000_RCTL_SBP | E1000_RCTL_SECRC | | ||
1143 | E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | | 1139 | E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | |
1144 | (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); | 1140 | (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); |
1145 | ew32(RCTL, rctl); | 1141 | ew32(RCTL, rctl); |
@@ -1203,7 +1199,8 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) | |||
1203 | 1199 | ||
1204 | ctrl_reg = er32(CTRL); | 1200 | ctrl_reg = er32(CTRL); |
1205 | 1201 | ||
1206 | if (hw->phy.type == e1000_phy_ife) { | 1202 | switch (hw->phy.type) { |
1203 | case e1000_phy_ife: | ||
1207 | /* force 100, set loopback */ | 1204 | /* force 100, set loopback */ |
1208 | e1e_wphy(hw, PHY_CONTROL, 0x6100); | 1205 | e1e_wphy(hw, PHY_CONTROL, 0x6100); |
1209 | 1206 | ||
@@ -1213,9 +1210,11 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) | |||
1213 | E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ | 1210 | E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ |
1214 | E1000_CTRL_SPD_100 |/* Force Speed to 100 */ | 1211 | E1000_CTRL_SPD_100 |/* Force Speed to 100 */ |
1215 | E1000_CTRL_FD); /* Force Duplex to FULL */ | 1212 | E1000_CTRL_FD); /* Force Duplex to FULL */ |
1216 | } else { | 1213 | break; |
1214 | default: | ||
1217 | /* force 1000, set loopback */ | 1215 | /* force 1000, set loopback */ |
1218 | e1e_wphy(hw, PHY_CONTROL, 0x4140); | 1216 | e1e_wphy(hw, PHY_CONTROL, 0x4140); |
1217 | mdelay(250); | ||
1219 | 1218 | ||
1220 | /* Now set up the MAC to the same speed/duplex as the PHY. */ | 1219 | /* Now set up the MAC to the same speed/duplex as the PHY. */ |
1221 | ctrl_reg = er32(CTRL); | 1220 | ctrl_reg = er32(CTRL); |
@@ -1224,6 +1223,10 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) | |||
1224 | E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ | 1223 | E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ |
1225 | E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ | 1224 | E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ |
1226 | E1000_CTRL_FD); /* Force Duplex to FULL */ | 1225 | E1000_CTRL_FD); /* Force Duplex to FULL */ |
1226 | |||
1227 | if ((adapter->hw.mac.type == e1000_ich8lan) || | ||
1228 | (adapter->hw.mac.type == e1000_ich9lan)) | ||
1229 | ctrl_reg |= E1000_CTRL_SLU; /* Set Link Up */ | ||
1227 | } | 1230 | } |
1228 | 1231 | ||
1229 | if (hw->phy.media_type == e1000_media_type_copper && | 1232 | if (hw->phy.media_type == e1000_media_type_copper && |
@@ -1325,7 +1328,7 @@ static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter) | |||
1325 | #define KMRNCTRLSTA_OPMODE (0x1F << 16) | 1328 | #define KMRNCTRLSTA_OPMODE (0x1F << 16) |
1326 | #define KMRNCTRLSTA_OPMODE_1GB_FD_GMII 0x0582 | 1329 | #define KMRNCTRLSTA_OPMODE_1GB_FD_GMII 0x0582 |
1327 | ew32(KMRNCTRLSTA, | 1330 | ew32(KMRNCTRLSTA, |
1328 | (KMRNCTRLSTA_OPMODE | KMRNCTRLSTA_OPMODE_1GB_FD_GMII)); | 1331 | (KMRNCTRLSTA_OPMODE | KMRNCTRLSTA_OPMODE_1GB_FD_GMII)); |
1329 | 1332 | ||
1330 | return 0; | 1333 | return 0; |
1331 | } | 1334 | } |
@@ -1451,8 +1454,8 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter) | |||
1451 | l = 0; | 1454 | l = 0; |
1452 | for (j = 0; j <= lc; j++) { /* loop count loop */ | 1455 | for (j = 0; j <= lc; j++) { /* loop count loop */ |
1453 | for (i = 0; i < 64; i++) { /* send the packets */ | 1456 | for (i = 0; i < 64; i++) { /* send the packets */ |
1454 | e1000_create_lbtest_frame( | 1457 | e1000_create_lbtest_frame(tx_ring->buffer_info[k].skb, |
1455 | tx_ring->buffer_info[i].skb, 1024); | 1458 | 1024); |
1456 | pci_dma_sync_single_for_device(pdev, | 1459 | pci_dma_sync_single_for_device(pdev, |
1457 | tx_ring->buffer_info[k].dma, | 1460 | tx_ring->buffer_info[k].dma, |
1458 | tx_ring->buffer_info[k].length, | 1461 | tx_ring->buffer_info[k].length, |
@@ -1487,7 +1490,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter) | |||
1487 | ret_val = 13; /* ret_val is the same as mis-compare */ | 1490 | ret_val = 13; /* ret_val is the same as mis-compare */ |
1488 | break; | 1491 | break; |
1489 | } | 1492 | } |
1490 | if (jiffies >= (time + 2)) { | 1493 | if (jiffies >= (time + 20)) { |
1491 | ret_val = 14; /* error code for time out error */ | 1494 | ret_val = 14; /* error code for time out error */ |
1492 | break; | 1495 | break; |
1493 | } | 1496 | } |
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h index 0b4145a73229..53f1ac6327fa 100644 --- a/drivers/net/e1000e/hw.h +++ b/drivers/net/e1000e/hw.h | |||
@@ -93,6 +93,8 @@ enum e1e_registers { | |||
93 | E1000_RDH = 0x02810, /* Rx Descriptor Head - RW */ | 93 | E1000_RDH = 0x02810, /* Rx Descriptor Head - RW */ |
94 | E1000_RDT = 0x02818, /* Rx Descriptor Tail - RW */ | 94 | E1000_RDT = 0x02818, /* Rx Descriptor Tail - RW */ |
95 | E1000_RDTR = 0x02820, /* Rx Delay Timer - RW */ | 95 | E1000_RDTR = 0x02820, /* Rx Delay Timer - RW */ |
96 | E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */ | ||
97 | #define E1000_RXDCTL(_n) (E1000_RXDCTL_BASE + (_n << 8)) | ||
96 | E1000_RADV = 0x0282C, /* RX Interrupt Absolute Delay Timer - RW */ | 98 | E1000_RADV = 0x0282C, /* RX Interrupt Absolute Delay Timer - RW */ |
97 | 99 | ||
98 | /* Convenience macros | 100 | /* Convenience macros |
@@ -111,11 +113,11 @@ enum e1e_registers { | |||
111 | E1000_TDH = 0x03810, /* Tx Descriptor Head - RW */ | 113 | E1000_TDH = 0x03810, /* Tx Descriptor Head - RW */ |
112 | E1000_TDT = 0x03818, /* Tx Descriptor Tail - RW */ | 114 | E1000_TDT = 0x03818, /* Tx Descriptor Tail - RW */ |
113 | E1000_TIDV = 0x03820, /* Tx Interrupt Delay Value - RW */ | 115 | E1000_TIDV = 0x03820, /* Tx Interrupt Delay Value - RW */ |
114 | E1000_TXDCTL = 0x03828, /* Tx Descriptor Control - RW */ | 116 | E1000_TXDCTL_BASE = 0x03828, /* Tx Descriptor Control - RW */ |
117 | #define E1000_TXDCTL(_n) (E1000_TXDCTL_BASE + (_n << 8)) | ||
115 | E1000_TADV = 0x0382C, /* Tx Interrupt Absolute Delay Val - RW */ | 118 | E1000_TADV = 0x0382C, /* Tx Interrupt Absolute Delay Val - RW */ |
116 | E1000_TARC0 = 0x03840, /* Tx Arbitration Count (0) */ | 119 | E1000_TARC_BASE = 0x03840, /* Tx Arbitration Count (0) */ |
117 | E1000_TXDCTL1 = 0x03928, /* Tx Descriptor Control (1) - RW */ | 120 | #define E1000_TARC(_n) (E1000_TARC_BASE + (_n << 8)) |
118 | E1000_TARC1 = 0x03940, /* Tx Arbitration Count (1) */ | ||
119 | E1000_CRCERRS = 0x04000, /* CRC Error Count - R/clr */ | 121 | E1000_CRCERRS = 0x04000, /* CRC Error Count - R/clr */ |
120 | E1000_ALGNERRC = 0x04004, /* Alignment Error Count - R/clr */ | 122 | E1000_ALGNERRC = 0x04004, /* Alignment Error Count - R/clr */ |
121 | E1000_SYMERRS = 0x04008, /* Symbol Error Count - R/clr */ | 123 | E1000_SYMERRS = 0x04008, /* Symbol Error Count - R/clr */ |
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c index e358a773e67a..768485dbb2c6 100644 --- a/drivers/net/e1000e/ich8lan.c +++ b/drivers/net/e1000e/ich8lan.c | |||
@@ -316,7 +316,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter) | |||
316 | return 0; | 316 | return 0; |
317 | } | 317 | } |
318 | 318 | ||
319 | static s32 e1000_get_invariants_ich8lan(struct e1000_adapter *adapter) | 319 | static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) |
320 | { | 320 | { |
321 | struct e1000_hw *hw = &adapter->hw; | 321 | struct e1000_hw *hw = &adapter->hw; |
322 | s32 rc; | 322 | s32 rc; |
@@ -1753,18 +1753,18 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) | |||
1753 | ret_val = e1000_setup_link_ich8lan(hw); | 1753 | ret_val = e1000_setup_link_ich8lan(hw); |
1754 | 1754 | ||
1755 | /* Set the transmit descriptor write-back policy for both queues */ | 1755 | /* Set the transmit descriptor write-back policy for both queues */ |
1756 | txdctl = er32(TXDCTL); | 1756 | txdctl = er32(TXDCTL(0)); |
1757 | txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | | 1757 | txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | |
1758 | E1000_TXDCTL_FULL_TX_DESC_WB; | 1758 | E1000_TXDCTL_FULL_TX_DESC_WB; |
1759 | txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) | | 1759 | txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) | |
1760 | E1000_TXDCTL_MAX_TX_DESC_PREFETCH; | 1760 | E1000_TXDCTL_MAX_TX_DESC_PREFETCH; |
1761 | ew32(TXDCTL, txdctl); | 1761 | ew32(TXDCTL(0), txdctl); |
1762 | txdctl = er32(TXDCTL1); | 1762 | txdctl = er32(TXDCTL(1)); |
1763 | txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | | 1763 | txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | |
1764 | E1000_TXDCTL_FULL_TX_DESC_WB; | 1764 | E1000_TXDCTL_FULL_TX_DESC_WB; |
1765 | txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) | | 1765 | txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) | |
1766 | E1000_TXDCTL_MAX_TX_DESC_PREFETCH; | 1766 | E1000_TXDCTL_MAX_TX_DESC_PREFETCH; |
1767 | ew32(TXDCTL1, txdctl); | 1767 | ew32(TXDCTL(1), txdctl); |
1768 | 1768 | ||
1769 | /* | 1769 | /* |
1770 | * ICH8 has opposite polarity of no_snoop bits. | 1770 | * ICH8 has opposite polarity of no_snoop bits. |
@@ -1807,30 +1807,30 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) | |||
1807 | ew32(CTRL_EXT, reg); | 1807 | ew32(CTRL_EXT, reg); |
1808 | 1808 | ||
1809 | /* Transmit Descriptor Control 0 */ | 1809 | /* Transmit Descriptor Control 0 */ |
1810 | reg = er32(TXDCTL); | 1810 | reg = er32(TXDCTL(0)); |
1811 | reg |= (1 << 22); | 1811 | reg |= (1 << 22); |
1812 | ew32(TXDCTL, reg); | 1812 | ew32(TXDCTL(0), reg); |
1813 | 1813 | ||
1814 | /* Transmit Descriptor Control 1 */ | 1814 | /* Transmit Descriptor Control 1 */ |
1815 | reg = er32(TXDCTL1); | 1815 | reg = er32(TXDCTL(1)); |
1816 | reg |= (1 << 22); | 1816 | reg |= (1 << 22); |
1817 | ew32(TXDCTL1, reg); | 1817 | ew32(TXDCTL(1), reg); |
1818 | 1818 | ||
1819 | /* Transmit Arbitration Control 0 */ | 1819 | /* Transmit Arbitration Control 0 */ |
1820 | reg = er32(TARC0); | 1820 | reg = er32(TARC(0)); |
1821 | if (hw->mac.type == e1000_ich8lan) | 1821 | if (hw->mac.type == e1000_ich8lan) |
1822 | reg |= (1 << 28) | (1 << 29); | 1822 | reg |= (1 << 28) | (1 << 29); |
1823 | reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27); | 1823 | reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27); |
1824 | ew32(TARC0, reg); | 1824 | ew32(TARC(0), reg); |
1825 | 1825 | ||
1826 | /* Transmit Arbitration Control 1 */ | 1826 | /* Transmit Arbitration Control 1 */ |
1827 | reg = er32(TARC1); | 1827 | reg = er32(TARC(1)); |
1828 | if (er32(TCTL) & E1000_TCTL_MULR) | 1828 | if (er32(TCTL) & E1000_TCTL_MULR) |
1829 | reg &= ~(1 << 28); | 1829 | reg &= ~(1 << 28); |
1830 | else | 1830 | else |
1831 | reg |= (1 << 28); | 1831 | reg |= (1 << 28); |
1832 | reg |= (1 << 24) | (1 << 26) | (1 << 30); | 1832 | reg |= (1 << 24) | (1 << 26) | (1 << 30); |
1833 | ew32(TARC1, reg); | 1833 | ew32(TARC(1), reg); |
1834 | 1834 | ||
1835 | /* Device Status */ | 1835 | /* Device Status */ |
1836 | if (hw->mac.type == e1000_ich8lan) { | 1836 | if (hw->mac.type == e1000_ich8lan) { |
@@ -2253,7 +2253,7 @@ struct e1000_info e1000_ich8_info = { | |||
2253 | | FLAG_HAS_FLASH | 2253 | | FLAG_HAS_FLASH |
2254 | | FLAG_APME_IN_WUC, | 2254 | | FLAG_APME_IN_WUC, |
2255 | .pba = 8, | 2255 | .pba = 8, |
2256 | .get_invariants = e1000_get_invariants_ich8lan, | 2256 | .get_variants = e1000_get_variants_ich8lan, |
2257 | .mac_ops = &ich8_mac_ops, | 2257 | .mac_ops = &ich8_mac_ops, |
2258 | .phy_ops = &ich8_phy_ops, | 2258 | .phy_ops = &ich8_phy_ops, |
2259 | .nvm_ops = &ich8_nvm_ops, | 2259 | .nvm_ops = &ich8_nvm_ops, |
@@ -2270,7 +2270,7 @@ struct e1000_info e1000_ich9_info = { | |||
2270 | | FLAG_HAS_FLASH | 2270 | | FLAG_HAS_FLASH |
2271 | | FLAG_APME_IN_WUC, | 2271 | | FLAG_APME_IN_WUC, |
2272 | .pba = 10, | 2272 | .pba = 10, |
2273 | .get_invariants = e1000_get_invariants_ich8lan, | 2273 | .get_variants = e1000_get_variants_ich8lan, |
2274 | .mac_ops = &ich8_mac_ops, | 2274 | .mac_ops = &ich8_mac_ops, |
2275 | .phy_ops = &ich8_phy_ops, | 2275 | .phy_ops = &ich8_phy_ops, |
2276 | .nvm_ops = &ich8_nvm_ops, | 2276 | .nvm_ops = &ich8_nvm_ops, |
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c index ea3ff6369c86..f1f4e9dfd0a0 100644 --- a/drivers/net/e1000e/lib.c +++ b/drivers/net/e1000e/lib.c | |||
@@ -2477,7 +2477,7 @@ bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw) | |||
2477 | return ret_val; | 2477 | return ret_val; |
2478 | } | 2478 | } |
2479 | 2479 | ||
2480 | s32 e1000e_read_part_num(struct e1000_hw *hw, u32 *part_num) | 2480 | s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num) |
2481 | { | 2481 | { |
2482 | s32 ret_val; | 2482 | s32 ret_val; |
2483 | u16 nvm_data; | 2483 | u16 nvm_data; |
@@ -2487,14 +2487,14 @@ s32 e1000e_read_part_num(struct e1000_hw *hw, u32 *part_num) | |||
2487 | hw_dbg(hw, "NVM Read Error\n"); | 2487 | hw_dbg(hw, "NVM Read Error\n"); |
2488 | return ret_val; | 2488 | return ret_val; |
2489 | } | 2489 | } |
2490 | *part_num = (u32)(nvm_data << 16); | 2490 | *pba_num = (u32)(nvm_data << 16); |
2491 | 2491 | ||
2492 | ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data); | 2492 | ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data); |
2493 | if (ret_val) { | 2493 | if (ret_val) { |
2494 | hw_dbg(hw, "NVM Read Error\n"); | 2494 | hw_dbg(hw, "NVM Read Error\n"); |
2495 | return ret_val; | 2495 | return ret_val; |
2496 | } | 2496 | } |
2497 | *part_num |= nvm_data; | 2497 | *pba_num |= nvm_data; |
2498 | 2498 | ||
2499 | return 0; | 2499 | return 0; |
2500 | } | 2500 | } |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index d70bde03619e..c8dc47fd132a 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -1639,24 +1639,24 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) | |||
1639 | (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); | 1639 | (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); |
1640 | 1640 | ||
1641 | if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { | 1641 | if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { |
1642 | tarc = er32(TARC0); | 1642 | tarc = er32(TARC(0)); |
1643 | /* | 1643 | /* |
1644 | * set the speed mode bit, we'll clear it if we're not at | 1644 | * set the speed mode bit, we'll clear it if we're not at |
1645 | * gigabit link later | 1645 | * gigabit link later |
1646 | */ | 1646 | */ |
1647 | #define SPEED_MODE_BIT (1 << 21) | 1647 | #define SPEED_MODE_BIT (1 << 21) |
1648 | tarc |= SPEED_MODE_BIT; | 1648 | tarc |= SPEED_MODE_BIT; |
1649 | ew32(TARC0, tarc); | 1649 | ew32(TARC(0), tarc); |
1650 | } | 1650 | } |
1651 | 1651 | ||
1652 | /* errata: program both queues to unweighted RR */ | 1652 | /* errata: program both queues to unweighted RR */ |
1653 | if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) { | 1653 | if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) { |
1654 | tarc = er32(TARC0); | 1654 | tarc = er32(TARC(0)); |
1655 | tarc |= 1; | 1655 | tarc |= 1; |
1656 | ew32(TARC0, tarc); | 1656 | ew32(TARC(0), tarc); |
1657 | tarc = er32(TARC1); | 1657 | tarc = er32(TARC(1)); |
1658 | tarc |= 1; | 1658 | tarc |= 1; |
1659 | ew32(TARC1, tarc); | 1659 | ew32(TARC(1), tarc); |
1660 | } | 1660 | } |
1661 | 1661 | ||
1662 | e1000e_config_collision_dist(hw); | 1662 | e1000e_config_collision_dist(hw); |
@@ -2775,9 +2775,9 @@ static void e1000_watchdog_task(struct work_struct *work) | |||
2775 | if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) && | 2775 | if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) && |
2776 | !txb2b) { | 2776 | !txb2b) { |
2777 | u32 tarc0; | 2777 | u32 tarc0; |
2778 | tarc0 = er32(TARC0); | 2778 | tarc0 = er32(TARC(0)); |
2779 | tarc0 &= ~SPEED_MODE_BIT; | 2779 | tarc0 &= ~SPEED_MODE_BIT; |
2780 | ew32(TARC0, tarc0); | 2780 | ew32(TARC(0), tarc0); |
2781 | } | 2781 | } |
2782 | 2782 | ||
2783 | /* | 2783 | /* |
@@ -3824,7 +3824,7 @@ static void e1000_print_device_info(struct e1000_adapter *adapter) | |||
3824 | { | 3824 | { |
3825 | struct e1000_hw *hw = &adapter->hw; | 3825 | struct e1000_hw *hw = &adapter->hw; |
3826 | struct net_device *netdev = adapter->netdev; | 3826 | struct net_device *netdev = adapter->netdev; |
3827 | u32 part_num; | 3827 | u32 pba_num; |
3828 | 3828 | ||
3829 | /* print bus type/speed/width info */ | 3829 | /* print bus type/speed/width info */ |
3830 | ndev_info(netdev, "(PCI Express:2.5GB/s:%s) " | 3830 | ndev_info(netdev, "(PCI Express:2.5GB/s:%s) " |
@@ -3839,10 +3839,10 @@ static void e1000_print_device_info(struct e1000_adapter *adapter) | |||
3839 | ndev_info(netdev, "Intel(R) PRO/%s Network Connection\n", | 3839 | ndev_info(netdev, "Intel(R) PRO/%s Network Connection\n", |
3840 | (hw->phy.type == e1000_phy_ife) | 3840 | (hw->phy.type == e1000_phy_ife) |
3841 | ? "10/100" : "1000"); | 3841 | ? "10/100" : "1000"); |
3842 | e1000e_read_part_num(hw, &part_num); | 3842 | e1000e_read_pba_num(hw, &pba_num); |
3843 | ndev_info(netdev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n", | 3843 | ndev_info(netdev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n", |
3844 | hw->mac.type, hw->phy.type, | 3844 | hw->mac.type, hw->phy.type, |
3845 | (part_num >> 8), (part_num & 0xff)); | 3845 | (pba_num >> 8), (pba_num & 0xff)); |
3846 | } | 3846 | } |
3847 | 3847 | ||
3848 | /** | 3848 | /** |
@@ -3974,7 +3974,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
3974 | memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); | 3974 | memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); |
3975 | memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); | 3975 | memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); |
3976 | 3976 | ||
3977 | err = ei->get_invariants(adapter); | 3977 | err = ei->get_variants(adapter); |
3978 | if (err) | 3978 | if (err) |
3979 | goto err_hw_init; | 3979 | goto err_hw_init; |
3980 | 3980 | ||
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c index 58b71e60204e..43b5f30743c2 100644 --- a/drivers/net/fec_mpc52xx.c +++ b/drivers/net/fec_mpc52xx.c | |||
@@ -198,7 +198,7 @@ static int mpc52xx_fec_init_phy(struct net_device *dev) | |||
198 | struct phy_device *phydev; | 198 | struct phy_device *phydev; |
199 | char phy_id[BUS_ID_SIZE]; | 199 | char phy_id[BUS_ID_SIZE]; |
200 | 200 | ||
201 | snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, | 201 | snprintf(phy_id, BUS_ID_SIZE, "%x:%02x", |
202 | (unsigned int)dev->base_addr, priv->phy_addr); | 202 | (unsigned int)dev->base_addr, priv->phy_addr); |
203 | 203 | ||
204 | priv->link = PHY_DOWN; | 204 | priv->link = PHY_DOWN; |
diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c index 6a3ac4ea97e9..956836fc5ec0 100644 --- a/drivers/net/fec_mpc52xx_phy.c +++ b/drivers/net/fec_mpc52xx_phy.c | |||
@@ -124,7 +124,7 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of, const struct of_device_i | |||
124 | goto out_free; | 124 | goto out_free; |
125 | } | 125 | } |
126 | 126 | ||
127 | bus->id = res.start; | 127 | snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start); |
128 | bus->priv = priv; | 128 | bus->priv = priv; |
129 | 129 | ||
130 | bus->dev = dev; | 130 | bus->dev = dev; |
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 3338b115fa66..8c4214b0ee1f 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -29,90 +29,6 @@ | |||
29 | * along with this program; if not, write to the Free Software | 29 | * along with this program; if not, write to the Free Software |
30 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 30 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
31 | * | 31 | * |
32 | * Changelog: | ||
33 | * 0.01: 05 Oct 2003: First release that compiles without warnings. | ||
34 | * 0.02: 05 Oct 2003: Fix bug for nv_drain_tx: do not try to free NULL skbs. | ||
35 | * Check all PCI BARs for the register window. | ||
36 | * udelay added to mii_rw. | ||
37 | * 0.03: 06 Oct 2003: Initialize dev->irq. | ||
38 | * 0.04: 07 Oct 2003: Initialize np->lock, reduce handled irqs, add printks. | ||
39 | * 0.05: 09 Oct 2003: printk removed again, irq status print tx_timeout. | ||
40 | * 0.06: 10 Oct 2003: MAC Address read updated, pff flag generation updated, | ||
41 | * irq mask updated | ||
42 | * 0.07: 14 Oct 2003: Further irq mask updates. | ||
43 | * 0.08: 20 Oct 2003: rx_desc.Length initialization added, nv_alloc_rx refill | ||
44 | * added into irq handler, NULL check for drain_ring. | ||
45 | * 0.09: 20 Oct 2003: Basic link speed irq implementation. Only handle the | ||
46 | * requested interrupt sources. | ||
47 | * 0.10: 20 Oct 2003: First cleanup for release. | ||
48 | * 0.11: 21 Oct 2003: hexdump for tx added, rx buffer sizes increased. | ||
49 | * MAC Address init fix, set_multicast cleanup. | ||
50 | * 0.12: 23 Oct 2003: Cleanups for release. | ||
51 | * 0.13: 25 Oct 2003: Limit for concurrent tx packets increased to 10. | ||
52 | * Set link speed correctly. start rx before starting | ||
53 | * tx (nv_start_rx sets the link speed). | ||
54 | * 0.14: 25 Oct 2003: Nic dependant irq mask. | ||
55 | * 0.15: 08 Nov 2003: fix smp deadlock with set_multicast_list during | ||
56 | * open. | ||
57 | * 0.16: 15 Nov 2003: include file cleanup for ppc64, rx buffer size | ||
58 | * increased to 1628 bytes. | ||
59 | * 0.17: 16 Nov 2003: undo rx buffer size increase. Substract 1 from | ||
60 | * the tx length. | ||
61 | * 0.18: 17 Nov 2003: fix oops due to late initialization of dev_stats | ||
62 | * 0.19: 29 Nov 2003: Handle RxNoBuf, detect & handle invalid mac | ||
63 | * addresses, really stop rx if already running | ||
64 | * in nv_start_rx, clean up a bit. | ||
65 | * 0.20: 07 Dec 2003: alloc fixes | ||
66 | * 0.21: 12 Jan 2004: additional alloc fix, nic polling fix. | ||
67 | * 0.22: 19 Jan 2004: reprogram timer to a sane rate, avoid lockup | ||
68 | * on close. | ||
69 | * 0.23: 26 Jan 2004: various small cleanups | ||
70 | * 0.24: 27 Feb 2004: make driver even less anonymous in backtraces | ||
71 | * 0.25: 09 Mar 2004: wol support | ||
72 | * 0.26: 03 Jun 2004: netdriver specific annotation, sparse-related fixes | ||
73 | * 0.27: 19 Jun 2004: Gigabit support, new descriptor rings, | ||
74 | * added CK804/MCP04 device IDs, code fixes | ||
75 | * for registers, link status and other minor fixes. | ||
76 | * 0.28: 21 Jun 2004: Big cleanup, making driver mostly endian safe | ||
77 | * 0.29: 31 Aug 2004: Add backup timer for link change notification. | ||
78 | * 0.30: 25 Sep 2004: rx checksum support for nf 250 Gb. Add rx reset | ||
79 | * into nv_close, otherwise reenabling for wol can | ||
80 | * cause DMA to kfree'd memory. | ||
81 | * 0.31: 14 Nov 2004: ethtool support for getting/setting link | ||
82 | * capabilities. | ||
83 | * 0.32: 16 Apr 2005: RX_ERROR4 handling added. | ||
84 | * 0.33: 16 May 2005: Support for MCP51 added. | ||
85 | * 0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics. | ||
86 | * 0.35: 26 Jun 2005: Support for MCP55 added. | ||
87 | * 0.36: 28 Jun 2005: Add jumbo frame support. | ||
88 | * 0.37: 10 Jul 2005: Additional ethtool support, cleanup of pci id list | ||
89 | * 0.38: 16 Jul 2005: tx irq rewrite: Use global flags instead of | ||
90 | * per-packet flags. | ||
91 | * 0.39: 18 Jul 2005: Add 64bit descriptor support. | ||
92 | * 0.40: 19 Jul 2005: Add support for mac address change. | ||
93 | * 0.41: 30 Jul 2005: Write back original MAC in nv_close instead | ||
94 | * of nv_remove | ||
95 | * 0.42: 06 Aug 2005: Fix lack of link speed initialization | ||
96 | * in the second (and later) nv_open call | ||
97 | * 0.43: 10 Aug 2005: Add support for tx checksum. | ||
98 | * 0.44: 20 Aug 2005: Add support for scatter gather and segmentation. | ||
99 | * 0.45: 18 Sep 2005: Remove nv_stop/start_rx from every link check | ||
100 | * 0.46: 20 Oct 2005: Add irq optimization modes. | ||
101 | * 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan. | ||
102 | * 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single | ||
103 | * 0.49: 10 Dec 2005: Fix tso for large buffers. | ||
104 | * 0.50: 20 Jan 2006: Add 8021pq tagging support. | ||
105 | * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings. | ||
106 | * 0.52: 20 Jan 2006: Add MSI/MSIX support. | ||
107 | * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset. | ||
108 | * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup. | ||
109 | * 0.55: 22 Mar 2006: Add flow control (pause frame). | ||
110 | * 0.56: 22 Mar 2006: Additional ethtool config and moduleparam support. | ||
111 | * 0.57: 14 May 2006: Mac address set in probe/remove and order corrections. | ||
112 | * 0.58: 30 Oct 2006: Added support for sideband management unit. | ||
113 | * 0.59: 30 Oct 2006: Added support for recoverable error. | ||
114 | * 0.60: 20 Jan 2007: Code optimizations for rings, rx & tx data paths, and stats. | ||
115 | * | ||
116 | * Known bugs: | 32 | * Known bugs: |
117 | * We suspect that on some hardware no TX done interrupts are generated. | 33 | * We suspect that on some hardware no TX done interrupts are generated. |
118 | * This means recovery from netif_stop_queue only happens if the hw timer | 34 | * This means recovery from netif_stop_queue only happens if the hw timer |
@@ -123,11 +39,6 @@ | |||
123 | * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few | 39 | * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few |
124 | * superfluous timer interrupts from the nic. | 40 | * superfluous timer interrupts from the nic. |
125 | */ | 41 | */ |
126 | #ifdef CONFIG_FORCEDETH_NAPI | ||
127 | #define DRIVERNAPI "-NAPI" | ||
128 | #else | ||
129 | #define DRIVERNAPI | ||
130 | #endif | ||
131 | #define FORCEDETH_VERSION "0.61" | 42 | #define FORCEDETH_VERSION "0.61" |
132 | #define DRV_NAME "forcedeth" | 43 | #define DRV_NAME "forcedeth" |
133 | 44 | ||
@@ -930,6 +841,13 @@ static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v) | |||
930 | return le32_to_cpu(prd->flaglen) & LEN_MASK_V2; | 841 | return le32_to_cpu(prd->flaglen) & LEN_MASK_V2; |
931 | } | 842 | } |
932 | 843 | ||
844 | static bool nv_optimized(struct fe_priv *np) | ||
845 | { | ||
846 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | ||
847 | return false; | ||
848 | return true; | ||
849 | } | ||
850 | |||
933 | static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, | 851 | static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, |
934 | int delay, int delaymax, const char *msg) | 852 | int delay, int delaymax, const char *msg) |
935 | { | 853 | { |
@@ -966,7 +884,7 @@ static void setup_hw_rings(struct net_device *dev, int rxtx_flags) | |||
966 | struct fe_priv *np = get_nvpriv(dev); | 884 | struct fe_priv *np = get_nvpriv(dev); |
967 | u8 __iomem *base = get_hwbase(dev); | 885 | u8 __iomem *base = get_hwbase(dev); |
968 | 886 | ||
969 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | 887 | if (!nv_optimized(np)) { |
970 | if (rxtx_flags & NV_SETUP_RX_RING) { | 888 | if (rxtx_flags & NV_SETUP_RX_RING) { |
971 | writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); | 889 | writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); |
972 | } | 890 | } |
@@ -989,7 +907,7 @@ static void free_rings(struct net_device *dev) | |||
989 | { | 907 | { |
990 | struct fe_priv *np = get_nvpriv(dev); | 908 | struct fe_priv *np = get_nvpriv(dev); |
991 | 909 | ||
992 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | 910 | if (!nv_optimized(np)) { |
993 | if (np->rx_ring.orig) | 911 | if (np->rx_ring.orig) |
994 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), | 912 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), |
995 | np->rx_ring.orig, np->ring_addr); | 913 | np->rx_ring.orig, np->ring_addr); |
@@ -1435,6 +1353,18 @@ static void nv_stop_tx(struct net_device *dev) | |||
1435 | base + NvRegTransmitPoll); | 1353 | base + NvRegTransmitPoll); |
1436 | } | 1354 | } |
1437 | 1355 | ||
1356 | static void nv_start_rxtx(struct net_device *dev) | ||
1357 | { | ||
1358 | nv_start_rx(dev); | ||
1359 | nv_start_tx(dev); | ||
1360 | } | ||
1361 | |||
1362 | static void nv_stop_rxtx(struct net_device *dev) | ||
1363 | { | ||
1364 | nv_stop_rx(dev); | ||
1365 | nv_stop_tx(dev); | ||
1366 | } | ||
1367 | |||
1438 | static void nv_txrx_reset(struct net_device *dev) | 1368 | static void nv_txrx_reset(struct net_device *dev) |
1439 | { | 1369 | { |
1440 | struct fe_priv *np = netdev_priv(dev); | 1370 | struct fe_priv *np = netdev_priv(dev); |
@@ -1657,7 +1587,7 @@ static void nv_do_rx_refill(unsigned long data) | |||
1657 | } else { | 1587 | } else { |
1658 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | 1588 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); |
1659 | } | 1589 | } |
1660 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | 1590 | if (!nv_optimized(np)) |
1661 | retcode = nv_alloc_rx(dev); | 1591 | retcode = nv_alloc_rx(dev); |
1662 | else | 1592 | else |
1663 | retcode = nv_alloc_rx_optimized(dev); | 1593 | retcode = nv_alloc_rx_optimized(dev); |
@@ -1682,8 +1612,10 @@ static void nv_init_rx(struct net_device *dev) | |||
1682 | { | 1612 | { |
1683 | struct fe_priv *np = netdev_priv(dev); | 1613 | struct fe_priv *np = netdev_priv(dev); |
1684 | int i; | 1614 | int i; |
1615 | |||
1685 | np->get_rx = np->put_rx = np->first_rx = np->rx_ring; | 1616 | np->get_rx = np->put_rx = np->first_rx = np->rx_ring; |
1686 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | 1617 | |
1618 | if (!nv_optimized(np)) | ||
1687 | np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1]; | 1619 | np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1]; |
1688 | else | 1620 | else |
1689 | np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1]; | 1621 | np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1]; |
@@ -1691,7 +1623,7 @@ static void nv_init_rx(struct net_device *dev) | |||
1691 | np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1]; | 1623 | np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1]; |
1692 | 1624 | ||
1693 | for (i = 0; i < np->rx_ring_size; i++) { | 1625 | for (i = 0; i < np->rx_ring_size; i++) { |
1694 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | 1626 | if (!nv_optimized(np)) { |
1695 | np->rx_ring.orig[i].flaglen = 0; | 1627 | np->rx_ring.orig[i].flaglen = 0; |
1696 | np->rx_ring.orig[i].buf = 0; | 1628 | np->rx_ring.orig[i].buf = 0; |
1697 | } else { | 1629 | } else { |
@@ -1709,8 +1641,10 @@ static void nv_init_tx(struct net_device *dev) | |||
1709 | { | 1641 | { |
1710 | struct fe_priv *np = netdev_priv(dev); | 1642 | struct fe_priv *np = netdev_priv(dev); |
1711 | int i; | 1643 | int i; |
1644 | |||
1712 | np->get_tx = np->put_tx = np->first_tx = np->tx_ring; | 1645 | np->get_tx = np->put_tx = np->first_tx = np->tx_ring; |
1713 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | 1646 | |
1647 | if (!nv_optimized(np)) | ||
1714 | np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1]; | 1648 | np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1]; |
1715 | else | 1649 | else |
1716 | np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1]; | 1650 | np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1]; |
@@ -1721,7 +1655,7 @@ static void nv_init_tx(struct net_device *dev) | |||
1721 | np->tx_end_flip = NULL; | 1655 | np->tx_end_flip = NULL; |
1722 | 1656 | ||
1723 | for (i = 0; i < np->tx_ring_size; i++) { | 1657 | for (i = 0; i < np->tx_ring_size; i++) { |
1724 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | 1658 | if (!nv_optimized(np)) { |
1725 | np->tx_ring.orig[i].flaglen = 0; | 1659 | np->tx_ring.orig[i].flaglen = 0; |
1726 | np->tx_ring.orig[i].buf = 0; | 1660 | np->tx_ring.orig[i].buf = 0; |
1727 | } else { | 1661 | } else { |
@@ -1744,7 +1678,8 @@ static int nv_init_ring(struct net_device *dev) | |||
1744 | 1678 | ||
1745 | nv_init_tx(dev); | 1679 | nv_init_tx(dev); |
1746 | nv_init_rx(dev); | 1680 | nv_init_rx(dev); |
1747 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | 1681 | |
1682 | if (!nv_optimized(np)) | ||
1748 | return nv_alloc_rx(dev); | 1683 | return nv_alloc_rx(dev); |
1749 | else | 1684 | else |
1750 | return nv_alloc_rx_optimized(dev); | 1685 | return nv_alloc_rx_optimized(dev); |
@@ -1775,7 +1710,7 @@ static void nv_drain_tx(struct net_device *dev) | |||
1775 | unsigned int i; | 1710 | unsigned int i; |
1776 | 1711 | ||
1777 | for (i = 0; i < np->tx_ring_size; i++) { | 1712 | for (i = 0; i < np->tx_ring_size; i++) { |
1778 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | 1713 | if (!nv_optimized(np)) { |
1779 | np->tx_ring.orig[i].flaglen = 0; | 1714 | np->tx_ring.orig[i].flaglen = 0; |
1780 | np->tx_ring.orig[i].buf = 0; | 1715 | np->tx_ring.orig[i].buf = 0; |
1781 | } else { | 1716 | } else { |
@@ -1802,7 +1737,7 @@ static void nv_drain_rx(struct net_device *dev) | |||
1802 | int i; | 1737 | int i; |
1803 | 1738 | ||
1804 | for (i = 0; i < np->rx_ring_size; i++) { | 1739 | for (i = 0; i < np->rx_ring_size; i++) { |
1805 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | 1740 | if (!nv_optimized(np)) { |
1806 | np->rx_ring.orig[i].flaglen = 0; | 1741 | np->rx_ring.orig[i].flaglen = 0; |
1807 | np->rx_ring.orig[i].buf = 0; | 1742 | np->rx_ring.orig[i].buf = 0; |
1808 | } else { | 1743 | } else { |
@@ -1823,7 +1758,7 @@ static void nv_drain_rx(struct net_device *dev) | |||
1823 | } | 1758 | } |
1824 | } | 1759 | } |
1825 | 1760 | ||
1826 | static void drain_ring(struct net_device *dev) | 1761 | static void nv_drain_rxtx(struct net_device *dev) |
1827 | { | 1762 | { |
1828 | nv_drain_tx(dev); | 1763 | nv_drain_tx(dev); |
1829 | nv_drain_rx(dev); | 1764 | nv_drain_rx(dev); |
@@ -2260,7 +2195,7 @@ static void nv_tx_timeout(struct net_device *dev) | |||
2260 | } | 2195 | } |
2261 | printk(KERN_INFO "%s: Dumping tx ring\n", dev->name); | 2196 | printk(KERN_INFO "%s: Dumping tx ring\n", dev->name); |
2262 | for (i=0;i<np->tx_ring_size;i+= 4) { | 2197 | for (i=0;i<np->tx_ring_size;i+= 4) { |
2263 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | 2198 | if (!nv_optimized(np)) { |
2264 | printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", | 2199 | printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", |
2265 | i, | 2200 | i, |
2266 | le32_to_cpu(np->tx_ring.orig[i].buf), | 2201 | le32_to_cpu(np->tx_ring.orig[i].buf), |
@@ -2296,7 +2231,7 @@ static void nv_tx_timeout(struct net_device *dev) | |||
2296 | nv_stop_tx(dev); | 2231 | nv_stop_tx(dev); |
2297 | 2232 | ||
2298 | /* 2) check that the packets were not sent already: */ | 2233 | /* 2) check that the packets were not sent already: */ |
2299 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | 2234 | if (!nv_optimized(np)) |
2300 | nv_tx_done(dev); | 2235 | nv_tx_done(dev); |
2301 | else | 2236 | else |
2302 | nv_tx_done_optimized(dev, np->tx_ring_size); | 2237 | nv_tx_done_optimized(dev, np->tx_ring_size); |
@@ -2663,12 +2598,10 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu) | |||
2663 | netif_tx_lock_bh(dev); | 2598 | netif_tx_lock_bh(dev); |
2664 | spin_lock(&np->lock); | 2599 | spin_lock(&np->lock); |
2665 | /* stop engines */ | 2600 | /* stop engines */ |
2666 | nv_stop_rx(dev); | 2601 | nv_stop_rxtx(dev); |
2667 | nv_stop_tx(dev); | ||
2668 | nv_txrx_reset(dev); | 2602 | nv_txrx_reset(dev); |
2669 | /* drain rx queue */ | 2603 | /* drain rx queue */ |
2670 | nv_drain_rx(dev); | 2604 | nv_drain_rxtx(dev); |
2671 | nv_drain_tx(dev); | ||
2672 | /* reinit driver view of the rx queue */ | 2605 | /* reinit driver view of the rx queue */ |
2673 | set_bufsize(dev); | 2606 | set_bufsize(dev); |
2674 | if (nv_init_ring(dev)) { | 2607 | if (nv_init_ring(dev)) { |
@@ -2685,8 +2618,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu) | |||
2685 | pci_push(base); | 2618 | pci_push(base); |
2686 | 2619 | ||
2687 | /* restart rx engine */ | 2620 | /* restart rx engine */ |
2688 | nv_start_rx(dev); | 2621 | nv_start_rxtx(dev); |
2689 | nv_start_tx(dev); | ||
2690 | spin_unlock(&np->lock); | 2622 | spin_unlock(&np->lock); |
2691 | netif_tx_unlock_bh(dev); | 2623 | netif_tx_unlock_bh(dev); |
2692 | nv_enable_irq(dev); | 2624 | nv_enable_irq(dev); |
@@ -3393,7 +3325,7 @@ static int nv_napi_poll(struct napi_struct *napi, int budget) | |||
3393 | unsigned long flags; | 3325 | unsigned long flags; |
3394 | int pkts, retcode; | 3326 | int pkts, retcode; |
3395 | 3327 | ||
3396 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | 3328 | if (!nv_optimized(np)) { |
3397 | pkts = nv_rx_process(dev, budget); | 3329 | pkts = nv_rx_process(dev, budget); |
3398 | retcode = nv_alloc_rx(dev); | 3330 | retcode = nv_alloc_rx(dev); |
3399 | } else { | 3331 | } else { |
@@ -3634,7 +3566,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test) | |||
3634 | if (intr_test) { | 3566 | if (intr_test) { |
3635 | handler = nv_nic_irq_test; | 3567 | handler = nv_nic_irq_test; |
3636 | } else { | 3568 | } else { |
3637 | if (np->desc_ver == DESC_VER_3) | 3569 | if (nv_optimized(np)) |
3638 | handler = nv_nic_irq_optimized; | 3570 | handler = nv_nic_irq_optimized; |
3639 | else | 3571 | else |
3640 | handler = nv_nic_irq; | 3572 | handler = nv_nic_irq; |
@@ -3787,12 +3719,10 @@ static void nv_do_nic_poll(unsigned long data) | |||
3787 | netif_tx_lock_bh(dev); | 3719 | netif_tx_lock_bh(dev); |
3788 | spin_lock(&np->lock); | 3720 | spin_lock(&np->lock); |
3789 | /* stop engines */ | 3721 | /* stop engines */ |
3790 | nv_stop_rx(dev); | 3722 | nv_stop_rxtx(dev); |
3791 | nv_stop_tx(dev); | ||
3792 | nv_txrx_reset(dev); | 3723 | nv_txrx_reset(dev); |
3793 | /* drain rx queue */ | 3724 | /* drain rx queue */ |
3794 | nv_drain_rx(dev); | 3725 | nv_drain_rxtx(dev); |
3795 | nv_drain_tx(dev); | ||
3796 | /* reinit driver view of the rx queue */ | 3726 | /* reinit driver view of the rx queue */ |
3797 | set_bufsize(dev); | 3727 | set_bufsize(dev); |
3798 | if (nv_init_ring(dev)) { | 3728 | if (nv_init_ring(dev)) { |
@@ -3809,8 +3739,7 @@ static void nv_do_nic_poll(unsigned long data) | |||
3809 | pci_push(base); | 3739 | pci_push(base); |
3810 | 3740 | ||
3811 | /* restart rx engine */ | 3741 | /* restart rx engine */ |
3812 | nv_start_rx(dev); | 3742 | nv_start_rxtx(dev); |
3813 | nv_start_tx(dev); | ||
3814 | spin_unlock(&np->lock); | 3743 | spin_unlock(&np->lock); |
3815 | netif_tx_unlock_bh(dev); | 3744 | netif_tx_unlock_bh(dev); |
3816 | } | 3745 | } |
@@ -3821,7 +3750,7 @@ static void nv_do_nic_poll(unsigned long data) | |||
3821 | pci_push(base); | 3750 | pci_push(base); |
3822 | 3751 | ||
3823 | if (!using_multi_irqs(dev)) { | 3752 | if (!using_multi_irqs(dev)) { |
3824 | if (np->desc_ver == DESC_VER_3) | 3753 | if (nv_optimized(np)) |
3825 | nv_nic_irq_optimized(0, dev); | 3754 | nv_nic_irq_optimized(0, dev); |
3826 | else | 3755 | else |
3827 | nv_nic_irq(0, dev); | 3756 | nv_nic_irq(0, dev); |
@@ -4019,8 +3948,7 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | |||
4019 | netif_tx_lock_bh(dev); | 3948 | netif_tx_lock_bh(dev); |
4020 | spin_lock(&np->lock); | 3949 | spin_lock(&np->lock); |
4021 | /* stop engines */ | 3950 | /* stop engines */ |
4022 | nv_stop_rx(dev); | 3951 | nv_stop_rxtx(dev); |
4023 | nv_stop_tx(dev); | ||
4024 | spin_unlock(&np->lock); | 3952 | spin_unlock(&np->lock); |
4025 | netif_tx_unlock_bh(dev); | 3953 | netif_tx_unlock_bh(dev); |
4026 | } | 3954 | } |
@@ -4126,8 +4054,7 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | |||
4126 | } | 4054 | } |
4127 | 4055 | ||
4128 | if (netif_running(dev)) { | 4056 | if (netif_running(dev)) { |
4129 | nv_start_rx(dev); | 4057 | nv_start_rxtx(dev); |
4130 | nv_start_tx(dev); | ||
4131 | nv_enable_irq(dev); | 4058 | nv_enable_irq(dev); |
4132 | } | 4059 | } |
4133 | 4060 | ||
@@ -4170,8 +4097,7 @@ static int nv_nway_reset(struct net_device *dev) | |||
4170 | netif_tx_lock_bh(dev); | 4097 | netif_tx_lock_bh(dev); |
4171 | spin_lock(&np->lock); | 4098 | spin_lock(&np->lock); |
4172 | /* stop engines */ | 4099 | /* stop engines */ |
4173 | nv_stop_rx(dev); | 4100 | nv_stop_rxtx(dev); |
4174 | nv_stop_tx(dev); | ||
4175 | spin_unlock(&np->lock); | 4101 | spin_unlock(&np->lock); |
4176 | netif_tx_unlock_bh(dev); | 4102 | netif_tx_unlock_bh(dev); |
4177 | printk(KERN_INFO "%s: link down.\n", dev->name); | 4103 | printk(KERN_INFO "%s: link down.\n", dev->name); |
@@ -4191,8 +4117,7 @@ static int nv_nway_reset(struct net_device *dev) | |||
4191 | } | 4117 | } |
4192 | 4118 | ||
4193 | if (netif_running(dev)) { | 4119 | if (netif_running(dev)) { |
4194 | nv_start_rx(dev); | 4120 | nv_start_rxtx(dev); |
4195 | nv_start_tx(dev); | ||
4196 | nv_enable_irq(dev); | 4121 | nv_enable_irq(dev); |
4197 | } | 4122 | } |
4198 | ret = 0; | 4123 | ret = 0; |
@@ -4249,7 +4174,7 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri | |||
4249 | } | 4174 | } |
4250 | 4175 | ||
4251 | /* allocate new rings */ | 4176 | /* allocate new rings */ |
4252 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | 4177 | if (!nv_optimized(np)) { |
4253 | rxtx_ring = pci_alloc_consistent(np->pci_dev, | 4178 | rxtx_ring = pci_alloc_consistent(np->pci_dev, |
4254 | sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), | 4179 | sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), |
4255 | &ring_addr); | 4180 | &ring_addr); |
@@ -4262,7 +4187,7 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri | |||
4262 | tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL); | 4187 | tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL); |
4263 | if (!rxtx_ring || !rx_skbuff || !tx_skbuff) { | 4188 | if (!rxtx_ring || !rx_skbuff || !tx_skbuff) { |
4264 | /* fall back to old rings */ | 4189 | /* fall back to old rings */ |
4265 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | 4190 | if (!nv_optimized(np)) { |
4266 | if (rxtx_ring) | 4191 | if (rxtx_ring) |
4267 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), | 4192 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), |
4268 | rxtx_ring, ring_addr); | 4193 | rxtx_ring, ring_addr); |
@@ -4283,12 +4208,10 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri | |||
4283 | netif_tx_lock_bh(dev); | 4208 | netif_tx_lock_bh(dev); |
4284 | spin_lock(&np->lock); | 4209 | spin_lock(&np->lock); |
4285 | /* stop engines */ | 4210 | /* stop engines */ |
4286 | nv_stop_rx(dev); | 4211 | nv_stop_rxtx(dev); |
4287 | nv_stop_tx(dev); | ||
4288 | nv_txrx_reset(dev); | 4212 | nv_txrx_reset(dev); |
4289 | /* drain queues */ | 4213 | /* drain queues */ |
4290 | nv_drain_rx(dev); | 4214 | nv_drain_rxtx(dev); |
4291 | nv_drain_tx(dev); | ||
4292 | /* delete queues */ | 4215 | /* delete queues */ |
4293 | free_rings(dev); | 4216 | free_rings(dev); |
4294 | } | 4217 | } |
@@ -4296,7 +4219,8 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri | |||
4296 | /* set new values */ | 4219 | /* set new values */ |
4297 | np->rx_ring_size = ring->rx_pending; | 4220 | np->rx_ring_size = ring->rx_pending; |
4298 | np->tx_ring_size = ring->tx_pending; | 4221 | np->tx_ring_size = ring->tx_pending; |
4299 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | 4222 | |
4223 | if (!nv_optimized(np)) { | ||
4300 | np->rx_ring.orig = (struct ring_desc*)rxtx_ring; | 4224 | np->rx_ring.orig = (struct ring_desc*)rxtx_ring; |
4301 | np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; | 4225 | np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; |
4302 | } else { | 4226 | } else { |
@@ -4328,8 +4252,7 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri | |||
4328 | pci_push(base); | 4252 | pci_push(base); |
4329 | 4253 | ||
4330 | /* restart engines */ | 4254 | /* restart engines */ |
4331 | nv_start_rx(dev); | 4255 | nv_start_rxtx(dev); |
4332 | nv_start_tx(dev); | ||
4333 | spin_unlock(&np->lock); | 4256 | spin_unlock(&np->lock); |
4334 | netif_tx_unlock_bh(dev); | 4257 | netif_tx_unlock_bh(dev); |
4335 | nv_enable_irq(dev); | 4258 | nv_enable_irq(dev); |
@@ -4370,8 +4293,7 @@ static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* | |||
4370 | netif_tx_lock_bh(dev); | 4293 | netif_tx_lock_bh(dev); |
4371 | spin_lock(&np->lock); | 4294 | spin_lock(&np->lock); |
4372 | /* stop engines */ | 4295 | /* stop engines */ |
4373 | nv_stop_rx(dev); | 4296 | nv_stop_rxtx(dev); |
4374 | nv_stop_tx(dev); | ||
4375 | spin_unlock(&np->lock); | 4297 | spin_unlock(&np->lock); |
4376 | netif_tx_unlock_bh(dev); | 4298 | netif_tx_unlock_bh(dev); |
4377 | } | 4299 | } |
@@ -4412,8 +4334,7 @@ static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* | |||
4412 | } | 4334 | } |
4413 | 4335 | ||
4414 | if (netif_running(dev)) { | 4336 | if (netif_running(dev)) { |
4415 | nv_start_rx(dev); | 4337 | nv_start_rxtx(dev); |
4416 | nv_start_tx(dev); | ||
4417 | nv_enable_irq(dev); | 4338 | nv_enable_irq(dev); |
4418 | } | 4339 | } |
4419 | return 0; | 4340 | return 0; |
@@ -4649,8 +4570,7 @@ static int nv_loopback_test(struct net_device *dev) | |||
4649 | pci_push(base); | 4570 | pci_push(base); |
4650 | 4571 | ||
4651 | /* restart rx engine */ | 4572 | /* restart rx engine */ |
4652 | nv_start_rx(dev); | 4573 | nv_start_rxtx(dev); |
4653 | nv_start_tx(dev); | ||
4654 | 4574 | ||
4655 | /* setup packet for tx */ | 4575 | /* setup packet for tx */ |
4656 | pkt_len = ETH_DATA_LEN; | 4576 | pkt_len = ETH_DATA_LEN; |
@@ -4668,7 +4588,7 @@ static int nv_loopback_test(struct net_device *dev) | |||
4668 | for (i = 0; i < pkt_len; i++) | 4588 | for (i = 0; i < pkt_len; i++) |
4669 | pkt_data[i] = (u8)(i & 0xff); | 4589 | pkt_data[i] = (u8)(i & 0xff); |
4670 | 4590 | ||
4671 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | 4591 | if (!nv_optimized(np)) { |
4672 | np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr); | 4592 | np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr); |
4673 | np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); | 4593 | np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); |
4674 | } else { | 4594 | } else { |
@@ -4682,7 +4602,7 @@ static int nv_loopback_test(struct net_device *dev) | |||
4682 | msleep(500); | 4602 | msleep(500); |
4683 | 4603 | ||
4684 | /* check for rx of the packet */ | 4604 | /* check for rx of the packet */ |
4685 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | 4605 | if (!nv_optimized(np)) { |
4686 | flags = le32_to_cpu(np->rx_ring.orig[0].flaglen); | 4606 | flags = le32_to_cpu(np->rx_ring.orig[0].flaglen); |
4687 | len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver); | 4607 | len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver); |
4688 | 4608 | ||
@@ -4728,12 +4648,10 @@ static int nv_loopback_test(struct net_device *dev) | |||
4728 | dev_kfree_skb_any(tx_skb); | 4648 | dev_kfree_skb_any(tx_skb); |
4729 | out: | 4649 | out: |
4730 | /* stop engines */ | 4650 | /* stop engines */ |
4731 | nv_stop_rx(dev); | 4651 | nv_stop_rxtx(dev); |
4732 | nv_stop_tx(dev); | ||
4733 | nv_txrx_reset(dev); | 4652 | nv_txrx_reset(dev); |
4734 | /* drain rx queue */ | 4653 | /* drain rx queue */ |
4735 | nv_drain_rx(dev); | 4654 | nv_drain_rxtx(dev); |
4736 | nv_drain_tx(dev); | ||
4737 | 4655 | ||
4738 | if (netif_running(dev)) { | 4656 | if (netif_running(dev)) { |
4739 | writel(misc1_flags, base + NvRegMisc1); | 4657 | writel(misc1_flags, base + NvRegMisc1); |
@@ -4771,12 +4689,10 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 | |||
4771 | writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); | 4689 | writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); |
4772 | } | 4690 | } |
4773 | /* stop engines */ | 4691 | /* stop engines */ |
4774 | nv_stop_rx(dev); | 4692 | nv_stop_rxtx(dev); |
4775 | nv_stop_tx(dev); | ||
4776 | nv_txrx_reset(dev); | 4693 | nv_txrx_reset(dev); |
4777 | /* drain rx queue */ | 4694 | /* drain rx queue */ |
4778 | nv_drain_rx(dev); | 4695 | nv_drain_rxtx(dev); |
4779 | nv_drain_tx(dev); | ||
4780 | spin_unlock_irq(&np->lock); | 4696 | spin_unlock_irq(&np->lock); |
4781 | netif_tx_unlock_bh(dev); | 4697 | netif_tx_unlock_bh(dev); |
4782 | } | 4698 | } |
@@ -4817,8 +4733,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 | |||
4817 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); | 4733 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); |
4818 | pci_push(base); | 4734 | pci_push(base); |
4819 | /* restart rx engine */ | 4735 | /* restart rx engine */ |
4820 | nv_start_rx(dev); | 4736 | nv_start_rxtx(dev); |
4821 | nv_start_tx(dev); | ||
4822 | netif_start_queue(dev); | 4737 | netif_start_queue(dev); |
4823 | #ifdef CONFIG_FORCEDETH_NAPI | 4738 | #ifdef CONFIG_FORCEDETH_NAPI |
4824 | napi_enable(&np->napi); | 4739 | napi_enable(&np->napi); |
@@ -5047,8 +4962,7 @@ static int nv_open(struct net_device *dev) | |||
5047 | * to init hw */ | 4962 | * to init hw */ |
5048 | np->linkspeed = 0; | 4963 | np->linkspeed = 0; |
5049 | ret = nv_update_linkspeed(dev); | 4964 | ret = nv_update_linkspeed(dev); |
5050 | nv_start_rx(dev); | 4965 | nv_start_rxtx(dev); |
5051 | nv_start_tx(dev); | ||
5052 | netif_start_queue(dev); | 4966 | netif_start_queue(dev); |
5053 | #ifdef CONFIG_FORCEDETH_NAPI | 4967 | #ifdef CONFIG_FORCEDETH_NAPI |
5054 | napi_enable(&np->napi); | 4968 | napi_enable(&np->napi); |
@@ -5072,7 +4986,7 @@ static int nv_open(struct net_device *dev) | |||
5072 | 4986 | ||
5073 | return 0; | 4987 | return 0; |
5074 | out_drain: | 4988 | out_drain: |
5075 | drain_ring(dev); | 4989 | nv_drain_rxtx(dev); |
5076 | return ret; | 4990 | return ret; |
5077 | } | 4991 | } |
5078 | 4992 | ||
@@ -5095,8 +5009,7 @@ static int nv_close(struct net_device *dev) | |||
5095 | 5009 | ||
5096 | netif_stop_queue(dev); | 5010 | netif_stop_queue(dev); |
5097 | spin_lock_irq(&np->lock); | 5011 | spin_lock_irq(&np->lock); |
5098 | nv_stop_tx(dev); | 5012 | nv_stop_rxtx(dev); |
5099 | nv_stop_rx(dev); | ||
5100 | nv_txrx_reset(dev); | 5013 | nv_txrx_reset(dev); |
5101 | 5014 | ||
5102 | /* disable interrupts on the nic or we will lock up */ | 5015 | /* disable interrupts on the nic or we will lock up */ |
@@ -5109,7 +5022,7 @@ static int nv_close(struct net_device *dev) | |||
5109 | 5022 | ||
5110 | nv_free_irq(dev); | 5023 | nv_free_irq(dev); |
5111 | 5024 | ||
5112 | drain_ring(dev); | 5025 | nv_drain_rxtx(dev); |
5113 | 5026 | ||
5114 | if (np->wolenabled) { | 5027 | if (np->wolenabled) { |
5115 | writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); | 5028 | writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); |
@@ -5269,7 +5182,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
5269 | np->rx_ring_size = RX_RING_DEFAULT; | 5182 | np->rx_ring_size = RX_RING_DEFAULT; |
5270 | np->tx_ring_size = TX_RING_DEFAULT; | 5183 | np->tx_ring_size = TX_RING_DEFAULT; |
5271 | 5184 | ||
5272 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | 5185 | if (!nv_optimized(np)) { |
5273 | np->rx_ring.orig = pci_alloc_consistent(pci_dev, | 5186 | np->rx_ring.orig = pci_alloc_consistent(pci_dev, |
5274 | sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), | 5187 | sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), |
5275 | &np->ring_addr); | 5188 | &np->ring_addr); |
@@ -5291,7 +5204,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
5291 | 5204 | ||
5292 | dev->open = nv_open; | 5205 | dev->open = nv_open; |
5293 | dev->stop = nv_close; | 5206 | dev->stop = nv_close; |
5294 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | 5207 | |
5208 | if (!nv_optimized(np)) | ||
5295 | dev->hard_start_xmit = nv_start_xmit; | 5209 | dev->hard_start_xmit = nv_start_xmit; |
5296 | else | 5210 | else |
5297 | dev->hard_start_xmit = nv_start_xmit_optimized; | 5211 | dev->hard_start_xmit = nv_start_xmit_optimized; |
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c index 940e2041ba38..67b4b0728fce 100644 --- a/drivers/net/fs_enet/fs_enet-main.c +++ b/drivers/net/fs_enet/fs_enet-main.c | |||
@@ -1178,7 +1178,7 @@ static int __devinit find_phy(struct device_node *np, | |||
1178 | 1178 | ||
1179 | data = of_get_property(np, "fixed-link", NULL); | 1179 | data = of_get_property(np, "fixed-link", NULL); |
1180 | if (data) { | 1180 | if (data) { |
1181 | snprintf(fpi->bus_id, 16, PHY_ID_FMT, 0, *data); | 1181 | snprintf(fpi->bus_id, 16, "%x:%02x", 0, *data); |
1182 | return 0; | 1182 | return 0; |
1183 | } | 1183 | } |
1184 | 1184 | ||
@@ -1202,7 +1202,7 @@ static int __devinit find_phy(struct device_node *np, | |||
1202 | if (!data || len != 4) | 1202 | if (!data || len != 4) |
1203 | goto out_put_mdio; | 1203 | goto out_put_mdio; |
1204 | 1204 | ||
1205 | snprintf(fpi->bus_id, 16, PHY_ID_FMT, res.start, *data); | 1205 | snprintf(fpi->bus_id, 16, "%x:%02x", res.start, *data); |
1206 | 1206 | ||
1207 | out_put_mdio: | 1207 | out_put_mdio: |
1208 | of_node_put(mdionode); | 1208 | of_node_put(mdionode); |
diff --git a/drivers/net/fs_enet/mii-bitbang.c b/drivers/net/fs_enet/mii-bitbang.c index b8e4a736a130..1620030cd33c 100644 --- a/drivers/net/fs_enet/mii-bitbang.c +++ b/drivers/net/fs_enet/mii-bitbang.c | |||
@@ -130,7 +130,7 @@ static int __devinit fs_mii_bitbang_init(struct mii_bus *bus, | |||
130 | * we get is an int, and the odds of multiple bitbang mdio buses | 130 | * we get is an int, and the odds of multiple bitbang mdio buses |
131 | * is low enough that it's not worth going too crazy. | 131 | * is low enough that it's not worth going too crazy. |
132 | */ | 132 | */ |
133 | bus->id = res.start; | 133 | snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start); |
134 | 134 | ||
135 | data = of_get_property(np, "fsl,mdio-pin", &len); | 135 | data = of_get_property(np, "fsl,mdio-pin", &len); |
136 | if (!data || len != 4) | 136 | if (!data || len != 4) |
@@ -307,7 +307,7 @@ static int __devinit fs_enet_mdio_probe(struct device *dev) | |||
307 | return -ENOMEM; | 307 | return -ENOMEM; |
308 | 308 | ||
309 | new_bus->name = "BB MII Bus", | 309 | new_bus->name = "BB MII Bus", |
310 | new_bus->id = pdev->id; | 310 | snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id); |
311 | 311 | ||
312 | new_bus->phy_mask = ~0x9; | 312 | new_bus->phy_mask = ~0x9; |
313 | pdata = (struct fs_mii_bb_platform_info *)pdev->dev.platform_data; | 313 | pdata = (struct fs_mii_bb_platform_info *)pdev->dev.platform_data; |
diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c index a89cf15090b8..ba75efc9f5b5 100644 --- a/drivers/net/fs_enet/mii-fec.c +++ b/drivers/net/fs_enet/mii-fec.c | |||
@@ -196,7 +196,7 @@ static int __devinit fs_enet_mdio_probe(struct of_device *ofdev, | |||
196 | if (ret) | 196 | if (ret) |
197 | return ret; | 197 | return ret; |
198 | 198 | ||
199 | new_bus->id = res.start; | 199 | snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", res.start); |
200 | 200 | ||
201 | fec->fecp = ioremap(res.start, res.end - res.start + 1); | 201 | fec->fecp = ioremap(res.start, res.end - res.start + 1); |
202 | if (!fec->fecp) | 202 | if (!fec->fecp) |
@@ -309,7 +309,7 @@ static int __devinit fs_enet_fec_mdio_probe(struct device *dev) | |||
309 | new_bus->read = &fs_enet_fec_mii_read, | 309 | new_bus->read = &fs_enet_fec_mii_read, |
310 | new_bus->write = &fs_enet_fec_mii_write, | 310 | new_bus->write = &fs_enet_fec_mii_write, |
311 | new_bus->reset = &fs_enet_fec_mii_reset, | 311 | new_bus->reset = &fs_enet_fec_mii_reset, |
312 | new_bus->id = pdev->id; | 312 | snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id); |
313 | 313 | ||
314 | pdata = (struct fs_mii_fec_platform_info *)pdev->dev.platform_data; | 314 | pdata = (struct fs_mii_fec_platform_info *)pdev->dev.platform_data; |
315 | 315 | ||
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 601f93e482c6..c8c3df737d73 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
@@ -1250,17 +1250,12 @@ static void gfar_timeout(struct net_device *dev) | |||
1250 | } | 1250 | } |
1251 | 1251 | ||
1252 | /* Interrupt Handler for Transmit complete */ | 1252 | /* Interrupt Handler for Transmit complete */ |
1253 | static irqreturn_t gfar_transmit(int irq, void *dev_id) | 1253 | int gfar_clean_tx_ring(struct net_device *dev) |
1254 | { | 1254 | { |
1255 | struct net_device *dev = (struct net_device *) dev_id; | ||
1256 | struct gfar_private *priv = netdev_priv(dev); | ||
1257 | struct txbd8 *bdp; | 1255 | struct txbd8 *bdp; |
1256 | struct gfar_private *priv = netdev_priv(dev); | ||
1257 | int howmany = 0; | ||
1258 | 1258 | ||
1259 | /* Clear IEVENT */ | ||
1260 | gfar_write(&priv->regs->ievent, IEVENT_TX_MASK); | ||
1261 | |||
1262 | /* Lock priv */ | ||
1263 | spin_lock(&priv->txlock); | ||
1264 | bdp = priv->dirty_tx; | 1259 | bdp = priv->dirty_tx; |
1265 | while ((bdp->status & TXBD_READY) == 0) { | 1260 | while ((bdp->status & TXBD_READY) == 0) { |
1266 | /* If dirty_tx and cur_tx are the same, then either the */ | 1261 | /* If dirty_tx and cur_tx are the same, then either the */ |
@@ -1269,7 +1264,7 @@ static irqreturn_t gfar_transmit(int irq, void *dev_id) | |||
1269 | if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0)) | 1264 | if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0)) |
1270 | break; | 1265 | break; |
1271 | 1266 | ||
1272 | dev->stats.tx_packets++; | 1267 | howmany++; |
1273 | 1268 | ||
1274 | /* Deferred means some collisions occurred during transmit, */ | 1269 | /* Deferred means some collisions occurred during transmit, */ |
1275 | /* but we eventually sent the packet. */ | 1270 | /* but we eventually sent the packet. */ |
@@ -1278,11 +1273,15 @@ static irqreturn_t gfar_transmit(int irq, void *dev_id) | |||
1278 | 1273 | ||
1279 | /* Free the sk buffer associated with this TxBD */ | 1274 | /* Free the sk buffer associated with this TxBD */ |
1280 | dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]); | 1275 | dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]); |
1276 | |||
1281 | priv->tx_skbuff[priv->skb_dirtytx] = NULL; | 1277 | priv->tx_skbuff[priv->skb_dirtytx] = NULL; |
1282 | priv->skb_dirtytx = | 1278 | priv->skb_dirtytx = |
1283 | (priv->skb_dirtytx + | 1279 | (priv->skb_dirtytx + |
1284 | 1) & TX_RING_MOD_MASK(priv->tx_ring_size); | 1280 | 1) & TX_RING_MOD_MASK(priv->tx_ring_size); |
1285 | 1281 | ||
1282 | /* Clean BD length for empty detection */ | ||
1283 | bdp->length = 0; | ||
1284 | |||
1286 | /* update bdp to point at next bd in the ring (wrapping if necessary) */ | 1285 | /* update bdp to point at next bd in the ring (wrapping if necessary) */ |
1287 | if (bdp->status & TXBD_WRAP) | 1286 | if (bdp->status & TXBD_WRAP) |
1288 | bdp = priv->tx_bd_base; | 1287 | bdp = priv->tx_bd_base; |
@@ -1297,6 +1296,25 @@ static irqreturn_t gfar_transmit(int irq, void *dev_id) | |||
1297 | netif_wake_queue(dev); | 1296 | netif_wake_queue(dev); |
1298 | } /* while ((bdp->status & TXBD_READY) == 0) */ | 1297 | } /* while ((bdp->status & TXBD_READY) == 0) */ |
1299 | 1298 | ||
1299 | dev->stats.tx_packets += howmany; | ||
1300 | |||
1301 | return howmany; | ||
1302 | } | ||
1303 | |||
1304 | /* Interrupt Handler for Transmit complete */ | ||
1305 | static irqreturn_t gfar_transmit(int irq, void *dev_id) | ||
1306 | { | ||
1307 | struct net_device *dev = (struct net_device *) dev_id; | ||
1308 | struct gfar_private *priv = netdev_priv(dev); | ||
1309 | |||
1310 | /* Clear IEVENT */ | ||
1311 | gfar_write(&priv->regs->ievent, IEVENT_TX_MASK); | ||
1312 | |||
1313 | /* Lock priv */ | ||
1314 | spin_lock(&priv->txlock); | ||
1315 | |||
1316 | gfar_clean_tx_ring(dev); | ||
1317 | |||
1300 | /* If we are coalescing the interrupts, reset the timer */ | 1318 | /* If we are coalescing the interrupts, reset the timer */ |
1301 | /* Otherwise, clear it */ | 1319 | /* Otherwise, clear it */ |
1302 | if (likely(priv->txcoalescing)) { | 1320 | if (likely(priv->txcoalescing)) { |
@@ -1392,15 +1410,15 @@ irqreturn_t gfar_receive(int irq, void *dev_id) | |||
1392 | unsigned long flags; | 1410 | unsigned long flags; |
1393 | #endif | 1411 | #endif |
1394 | 1412 | ||
1395 | /* Clear IEVENT, so rx interrupt isn't called again | ||
1396 | * because of this interrupt */ | ||
1397 | gfar_write(&priv->regs->ievent, IEVENT_RX_MASK); | ||
1398 | |||
1399 | /* support NAPI */ | 1413 | /* support NAPI */ |
1400 | #ifdef CONFIG_GFAR_NAPI | 1414 | #ifdef CONFIG_GFAR_NAPI |
1415 | /* Clear IEVENT, so interrupts aren't called again | ||
1416 | * because of the packets that have already arrived */ | ||
1417 | gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK); | ||
1418 | |||
1401 | if (netif_rx_schedule_prep(dev, &priv->napi)) { | 1419 | if (netif_rx_schedule_prep(dev, &priv->napi)) { |
1402 | tempval = gfar_read(&priv->regs->imask); | 1420 | tempval = gfar_read(&priv->regs->imask); |
1403 | tempval &= IMASK_RX_DISABLED; | 1421 | tempval &= IMASK_RTX_DISABLED; |
1404 | gfar_write(&priv->regs->imask, tempval); | 1422 | gfar_write(&priv->regs->imask, tempval); |
1405 | 1423 | ||
1406 | __netif_rx_schedule(dev, &priv->napi); | 1424 | __netif_rx_schedule(dev, &priv->napi); |
@@ -1411,6 +1429,9 @@ irqreturn_t gfar_receive(int irq, void *dev_id) | |||
1411 | gfar_read(&priv->regs->imask)); | 1429 | gfar_read(&priv->regs->imask)); |
1412 | } | 1430 | } |
1413 | #else | 1431 | #else |
1432 | /* Clear IEVENT, so rx interrupt isn't called again | ||
1433 | * because of this interrupt */ | ||
1434 | gfar_write(&priv->regs->ievent, IEVENT_RX_MASK); | ||
1414 | 1435 | ||
1415 | spin_lock_irqsave(&priv->rxlock, flags); | 1436 | spin_lock_irqsave(&priv->rxlock, flags); |
1416 | gfar_clean_rx_ring(dev, priv->rx_ring_size); | 1437 | gfar_clean_rx_ring(dev, priv->rx_ring_size); |
@@ -1580,6 +1601,13 @@ static int gfar_poll(struct napi_struct *napi, int budget) | |||
1580 | struct gfar_private *priv = container_of(napi, struct gfar_private, napi); | 1601 | struct gfar_private *priv = container_of(napi, struct gfar_private, napi); |
1581 | struct net_device *dev = priv->dev; | 1602 | struct net_device *dev = priv->dev; |
1582 | int howmany; | 1603 | int howmany; |
1604 | unsigned long flags; | ||
1605 | |||
1606 | /* If we fail to get the lock, don't bother with the TX BDs */ | ||
1607 | if (spin_trylock_irqsave(&priv->txlock, flags)) { | ||
1608 | gfar_clean_tx_ring(dev); | ||
1609 | spin_unlock_irqrestore(&priv->txlock, flags); | ||
1610 | } | ||
1583 | 1611 | ||
1584 | howmany = gfar_clean_rx_ring(dev, budget); | 1612 | howmany = gfar_clean_rx_ring(dev, budget); |
1585 | 1613 | ||
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h index ea8671f87bce..0d0883609469 100644 --- a/drivers/net/gianfar.h +++ b/drivers/net/gianfar.h | |||
@@ -126,9 +126,16 @@ extern const char gfar_driver_version[]; | |||
126 | #define DEFAULT_TXCOUNT 16 | 126 | #define DEFAULT_TXCOUNT 16 |
127 | #define DEFAULT_TXTIME 21 | 127 | #define DEFAULT_TXTIME 21 |
128 | 128 | ||
129 | #define DEFAULT_RXTIME 21 | ||
130 | |||
131 | /* Non NAPI Case */ | ||
132 | #ifndef CONFIG_GFAR_NAPI | ||
129 | #define DEFAULT_RX_COALESCE 1 | 133 | #define DEFAULT_RX_COALESCE 1 |
130 | #define DEFAULT_RXCOUNT 16 | 134 | #define DEFAULT_RXCOUNT 16 |
131 | #define DEFAULT_RXTIME 21 | 135 | #else |
136 | #define DEFAULT_RX_COALESCE 0 | ||
137 | #define DEFAULT_RXCOUNT 0 | ||
138 | #endif /* CONFIG_GFAR_NAPI */ | ||
132 | 139 | ||
133 | #define TBIPA_VALUE 0x1f | 140 | #define TBIPA_VALUE 0x1f |
134 | #define MIIMCFG_INIT_VALUE 0x00000007 | 141 | #define MIIMCFG_INIT_VALUE 0x00000007 |
@@ -242,6 +249,7 @@ extern const char gfar_driver_version[]; | |||
242 | #define IEVENT_PERR 0x00000001 | 249 | #define IEVENT_PERR 0x00000001 |
243 | #define IEVENT_RX_MASK (IEVENT_RXB0 | IEVENT_RXF0) | 250 | #define IEVENT_RX_MASK (IEVENT_RXB0 | IEVENT_RXF0) |
244 | #define IEVENT_TX_MASK (IEVENT_TXB | IEVENT_TXF) | 251 | #define IEVENT_TX_MASK (IEVENT_TXB | IEVENT_TXF) |
252 | #define IEVENT_RTX_MASK (IEVENT_RX_MASK | IEVENT_TX_MASK) | ||
245 | #define IEVENT_ERR_MASK \ | 253 | #define IEVENT_ERR_MASK \ |
246 | (IEVENT_RXC | IEVENT_BSY | IEVENT_EBERR | IEVENT_MSRO | \ | 254 | (IEVENT_RXC | IEVENT_BSY | IEVENT_EBERR | IEVENT_MSRO | \ |
247 | IEVENT_BABT | IEVENT_TXC | IEVENT_TXE | IEVENT_LC \ | 255 | IEVENT_BABT | IEVENT_TXC | IEVENT_TXE | IEVENT_LC \ |
@@ -269,11 +277,12 @@ extern const char gfar_driver_version[]; | |||
269 | #define IMASK_FIQ 0x00000004 | 277 | #define IMASK_FIQ 0x00000004 |
270 | #define IMASK_DPE 0x00000002 | 278 | #define IMASK_DPE 0x00000002 |
271 | #define IMASK_PERR 0x00000001 | 279 | #define IMASK_PERR 0x00000001 |
272 | #define IMASK_RX_DISABLED ~(IMASK_RXFEN0 | IMASK_BSY) | ||
273 | #define IMASK_DEFAULT (IMASK_TXEEN | IMASK_TXFEN | IMASK_TXBEN | \ | 280 | #define IMASK_DEFAULT (IMASK_TXEEN | IMASK_TXFEN | IMASK_TXBEN | \ |
274 | IMASK_RXFEN0 | IMASK_BSY | IMASK_EBERR | IMASK_BABR | \ | 281 | IMASK_RXFEN0 | IMASK_BSY | IMASK_EBERR | IMASK_BABR | \ |
275 | IMASK_XFUN | IMASK_RXC | IMASK_BABT | IMASK_DPE \ | 282 | IMASK_XFUN | IMASK_RXC | IMASK_BABT | IMASK_DPE \ |
276 | | IMASK_PERR) | 283 | | IMASK_PERR) |
284 | #define IMASK_RTX_DISABLED ((~(IMASK_RXFEN0 | IMASK_TXFEN | IMASK_BSY)) \ | ||
285 | & IMASK_DEFAULT) | ||
277 | 286 | ||
278 | /* Fifo management */ | 287 | /* Fifo management */ |
279 | #define FIFO_TX_THR_MASK 0x01ff | 288 | #define FIFO_TX_THR_MASK 0x01ff |
diff --git a/drivers/net/gianfar_mii.c b/drivers/net/gianfar_mii.c index 24327629bf03..b8898927236a 100644 --- a/drivers/net/gianfar_mii.c +++ b/drivers/net/gianfar_mii.c | |||
@@ -173,7 +173,7 @@ int gfar_mdio_probe(struct device *dev) | |||
173 | new_bus->read = &gfar_mdio_read, | 173 | new_bus->read = &gfar_mdio_read, |
174 | new_bus->write = &gfar_mdio_write, | 174 | new_bus->write = &gfar_mdio_write, |
175 | new_bus->reset = &gfar_mdio_reset, | 175 | new_bus->reset = &gfar_mdio_reset, |
176 | new_bus->id = pdev->id; | 176 | snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id); |
177 | 177 | ||
178 | pdata = (struct gianfar_mdio_data *)pdev->dev.platform_data; | 178 | pdata = (struct gianfar_mdio_data *)pdev->dev.platform_data; |
179 | 179 | ||
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h index f2fff90d2c9d..16f9c756aa46 100644 --- a/drivers/net/ixgb/ixgb.h +++ b/drivers/net/ixgb/ixgb.h | |||
@@ -117,8 +117,8 @@ struct ixgb_buffer { | |||
117 | struct sk_buff *skb; | 117 | struct sk_buff *skb; |
118 | dma_addr_t dma; | 118 | dma_addr_t dma; |
119 | unsigned long time_stamp; | 119 | unsigned long time_stamp; |
120 | uint16_t length; | 120 | u16 length; |
121 | uint16_t next_to_watch; | 121 | u16 next_to_watch; |
122 | }; | 122 | }; |
123 | 123 | ||
124 | struct ixgb_desc_ring { | 124 | struct ixgb_desc_ring { |
@@ -152,11 +152,11 @@ struct ixgb_desc_ring { | |||
152 | struct ixgb_adapter { | 152 | struct ixgb_adapter { |
153 | struct timer_list watchdog_timer; | 153 | struct timer_list watchdog_timer; |
154 | struct vlan_group *vlgrp; | 154 | struct vlan_group *vlgrp; |
155 | uint32_t bd_number; | 155 | u32 bd_number; |
156 | uint32_t rx_buffer_len; | 156 | u32 rx_buffer_len; |
157 | uint32_t part_num; | 157 | u32 part_num; |
158 | uint16_t link_speed; | 158 | u16 link_speed; |
159 | uint16_t link_duplex; | 159 | u16 link_duplex; |
160 | spinlock_t tx_lock; | 160 | spinlock_t tx_lock; |
161 | struct work_struct tx_timeout_task; | 161 | struct work_struct tx_timeout_task; |
162 | 162 | ||
@@ -167,19 +167,19 @@ struct ixgb_adapter { | |||
167 | struct ixgb_desc_ring tx_ring ____cacheline_aligned_in_smp; | 167 | struct ixgb_desc_ring tx_ring ____cacheline_aligned_in_smp; |
168 | unsigned int restart_queue; | 168 | unsigned int restart_queue; |
169 | unsigned long timeo_start; | 169 | unsigned long timeo_start; |
170 | uint32_t tx_cmd_type; | 170 | u32 tx_cmd_type; |
171 | uint64_t hw_csum_tx_good; | 171 | u64 hw_csum_tx_good; |
172 | uint64_t hw_csum_tx_error; | 172 | u64 hw_csum_tx_error; |
173 | uint32_t tx_int_delay; | 173 | u32 tx_int_delay; |
174 | uint32_t tx_timeout_count; | 174 | u32 tx_timeout_count; |
175 | bool tx_int_delay_enable; | 175 | bool tx_int_delay_enable; |
176 | bool detect_tx_hung; | 176 | bool detect_tx_hung; |
177 | 177 | ||
178 | /* RX */ | 178 | /* RX */ |
179 | struct ixgb_desc_ring rx_ring; | 179 | struct ixgb_desc_ring rx_ring; |
180 | uint64_t hw_csum_rx_error; | 180 | u64 hw_csum_rx_error; |
181 | uint64_t hw_csum_rx_good; | 181 | u64 hw_csum_rx_good; |
182 | uint32_t rx_int_delay; | 182 | u32 rx_int_delay; |
183 | bool rx_csum; | 183 | bool rx_csum; |
184 | 184 | ||
185 | /* OS defined structs */ | 185 | /* OS defined structs */ |
@@ -192,7 +192,7 @@ struct ixgb_adapter { | |||
192 | struct ixgb_hw hw; | 192 | struct ixgb_hw hw; |
193 | u16 msg_enable; | 193 | u16 msg_enable; |
194 | struct ixgb_hw_stats stats; | 194 | struct ixgb_hw_stats stats; |
195 | uint32_t alloc_rx_buff_failed; | 195 | u32 alloc_rx_buff_failed; |
196 | bool have_msi; | 196 | bool have_msi; |
197 | unsigned long flags; | 197 | unsigned long flags; |
198 | }; | 198 | }; |
diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c index 8e9302fc8865..2f7ed52c7502 100644 --- a/drivers/net/ixgb/ixgb_ee.c +++ b/drivers/net/ixgb/ixgb_ee.c | |||
@@ -29,11 +29,11 @@ | |||
29 | #include "ixgb_hw.h" | 29 | #include "ixgb_hw.h" |
30 | #include "ixgb_ee.h" | 30 | #include "ixgb_ee.h" |
31 | /* Local prototypes */ | 31 | /* Local prototypes */ |
32 | static uint16_t ixgb_shift_in_bits(struct ixgb_hw *hw); | 32 | static u16 ixgb_shift_in_bits(struct ixgb_hw *hw); |
33 | 33 | ||
34 | static void ixgb_shift_out_bits(struct ixgb_hw *hw, | 34 | static void ixgb_shift_out_bits(struct ixgb_hw *hw, |
35 | uint16_t data, | 35 | u16 data, |
36 | uint16_t count); | 36 | u16 count); |
37 | static void ixgb_standby_eeprom(struct ixgb_hw *hw); | 37 | static void ixgb_standby_eeprom(struct ixgb_hw *hw); |
38 | 38 | ||
39 | static bool ixgb_wait_eeprom_command(struct ixgb_hw *hw); | 39 | static bool ixgb_wait_eeprom_command(struct ixgb_hw *hw); |
@@ -48,7 +48,7 @@ static void ixgb_cleanup_eeprom(struct ixgb_hw *hw); | |||
48 | *****************************************************************************/ | 48 | *****************************************************************************/ |
49 | static void | 49 | static void |
50 | ixgb_raise_clock(struct ixgb_hw *hw, | 50 | ixgb_raise_clock(struct ixgb_hw *hw, |
51 | uint32_t *eecd_reg) | 51 | u32 *eecd_reg) |
52 | { | 52 | { |
53 | /* Raise the clock input to the EEPROM (by setting the SK bit), and then | 53 | /* Raise the clock input to the EEPROM (by setting the SK bit), and then |
54 | * wait 50 microseconds. | 54 | * wait 50 microseconds. |
@@ -67,7 +67,7 @@ ixgb_raise_clock(struct ixgb_hw *hw, | |||
67 | *****************************************************************************/ | 67 | *****************************************************************************/ |
68 | static void | 68 | static void |
69 | ixgb_lower_clock(struct ixgb_hw *hw, | 69 | ixgb_lower_clock(struct ixgb_hw *hw, |
70 | uint32_t *eecd_reg) | 70 | u32 *eecd_reg) |
71 | { | 71 | { |
72 | /* Lower the clock input to the EEPROM (by clearing the SK bit), and then | 72 | /* Lower the clock input to the EEPROM (by clearing the SK bit), and then |
73 | * wait 50 microseconds. | 73 | * wait 50 microseconds. |
@@ -87,11 +87,11 @@ ixgb_lower_clock(struct ixgb_hw *hw, | |||
87 | *****************************************************************************/ | 87 | *****************************************************************************/ |
88 | static void | 88 | static void |
89 | ixgb_shift_out_bits(struct ixgb_hw *hw, | 89 | ixgb_shift_out_bits(struct ixgb_hw *hw, |
90 | uint16_t data, | 90 | u16 data, |
91 | uint16_t count) | 91 | u16 count) |
92 | { | 92 | { |
93 | uint32_t eecd_reg; | 93 | u32 eecd_reg; |
94 | uint32_t mask; | 94 | u32 mask; |
95 | 95 | ||
96 | /* We need to shift "count" bits out to the EEPROM. So, value in the | 96 | /* We need to shift "count" bits out to the EEPROM. So, value in the |
97 | * "data" parameter will be shifted out to the EEPROM one bit at a time. | 97 | * "data" parameter will be shifted out to the EEPROM one bit at a time. |
@@ -133,12 +133,12 @@ ixgb_shift_out_bits(struct ixgb_hw *hw, | |||
133 | * | 133 | * |
134 | * hw - Struct containing variables accessed by shared code | 134 | * hw - Struct containing variables accessed by shared code |
135 | *****************************************************************************/ | 135 | *****************************************************************************/ |
136 | static uint16_t | 136 | static u16 |
137 | ixgb_shift_in_bits(struct ixgb_hw *hw) | 137 | ixgb_shift_in_bits(struct ixgb_hw *hw) |
138 | { | 138 | { |
139 | uint32_t eecd_reg; | 139 | u32 eecd_reg; |
140 | uint32_t i; | 140 | u32 i; |
141 | uint16_t data; | 141 | u16 data; |
142 | 142 | ||
143 | /* In order to read a register from the EEPROM, we need to shift 16 bits | 143 | /* In order to read a register from the EEPROM, we need to shift 16 bits |
144 | * in from the EEPROM. Bits are "shifted in" by raising the clock input to | 144 | * in from the EEPROM. Bits are "shifted in" by raising the clock input to |
@@ -179,7 +179,7 @@ ixgb_shift_in_bits(struct ixgb_hw *hw) | |||
179 | static void | 179 | static void |
180 | ixgb_setup_eeprom(struct ixgb_hw *hw) | 180 | ixgb_setup_eeprom(struct ixgb_hw *hw) |
181 | { | 181 | { |
182 | uint32_t eecd_reg; | 182 | u32 eecd_reg; |
183 | 183 | ||
184 | eecd_reg = IXGB_READ_REG(hw, EECD); | 184 | eecd_reg = IXGB_READ_REG(hw, EECD); |
185 | 185 | ||
@@ -201,7 +201,7 @@ ixgb_setup_eeprom(struct ixgb_hw *hw) | |||
201 | static void | 201 | static void |
202 | ixgb_standby_eeprom(struct ixgb_hw *hw) | 202 | ixgb_standby_eeprom(struct ixgb_hw *hw) |
203 | { | 203 | { |
204 | uint32_t eecd_reg; | 204 | u32 eecd_reg; |
205 | 205 | ||
206 | eecd_reg = IXGB_READ_REG(hw, EECD); | 206 | eecd_reg = IXGB_READ_REG(hw, EECD); |
207 | 207 | ||
@@ -235,7 +235,7 @@ ixgb_standby_eeprom(struct ixgb_hw *hw) | |||
235 | static void | 235 | static void |
236 | ixgb_clock_eeprom(struct ixgb_hw *hw) | 236 | ixgb_clock_eeprom(struct ixgb_hw *hw) |
237 | { | 237 | { |
238 | uint32_t eecd_reg; | 238 | u32 eecd_reg; |
239 | 239 | ||
240 | eecd_reg = IXGB_READ_REG(hw, EECD); | 240 | eecd_reg = IXGB_READ_REG(hw, EECD); |
241 | 241 | ||
@@ -259,7 +259,7 @@ ixgb_clock_eeprom(struct ixgb_hw *hw) | |||
259 | static void | 259 | static void |
260 | ixgb_cleanup_eeprom(struct ixgb_hw *hw) | 260 | ixgb_cleanup_eeprom(struct ixgb_hw *hw) |
261 | { | 261 | { |
262 | uint32_t eecd_reg; | 262 | u32 eecd_reg; |
263 | 263 | ||
264 | eecd_reg = IXGB_READ_REG(hw, EECD); | 264 | eecd_reg = IXGB_READ_REG(hw, EECD); |
265 | 265 | ||
@@ -285,8 +285,8 @@ ixgb_cleanup_eeprom(struct ixgb_hw *hw) | |||
285 | static bool | 285 | static bool |
286 | ixgb_wait_eeprom_command(struct ixgb_hw *hw) | 286 | ixgb_wait_eeprom_command(struct ixgb_hw *hw) |
287 | { | 287 | { |
288 | uint32_t eecd_reg; | 288 | u32 eecd_reg; |
289 | uint32_t i; | 289 | u32 i; |
290 | 290 | ||
291 | /* Toggle the CS line. This in effect tells to EEPROM to actually execute | 291 | /* Toggle the CS line. This in effect tells to EEPROM to actually execute |
292 | * the command in question. | 292 | * the command in question. |
@@ -325,13 +325,13 @@ ixgb_wait_eeprom_command(struct ixgb_hw *hw) | |||
325 | bool | 325 | bool |
326 | ixgb_validate_eeprom_checksum(struct ixgb_hw *hw) | 326 | ixgb_validate_eeprom_checksum(struct ixgb_hw *hw) |
327 | { | 327 | { |
328 | uint16_t checksum = 0; | 328 | u16 checksum = 0; |
329 | uint16_t i; | 329 | u16 i; |
330 | 330 | ||
331 | for(i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) | 331 | for(i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) |
332 | checksum += ixgb_read_eeprom(hw, i); | 332 | checksum += ixgb_read_eeprom(hw, i); |
333 | 333 | ||
334 | if(checksum == (uint16_t) EEPROM_SUM) | 334 | if(checksum == (u16) EEPROM_SUM) |
335 | return (true); | 335 | return (true); |
336 | else | 336 | else |
337 | return (false); | 337 | return (false); |
@@ -348,13 +348,13 @@ ixgb_validate_eeprom_checksum(struct ixgb_hw *hw) | |||
348 | void | 348 | void |
349 | ixgb_update_eeprom_checksum(struct ixgb_hw *hw) | 349 | ixgb_update_eeprom_checksum(struct ixgb_hw *hw) |
350 | { | 350 | { |
351 | uint16_t checksum = 0; | 351 | u16 checksum = 0; |
352 | uint16_t i; | 352 | u16 i; |
353 | 353 | ||
354 | for(i = 0; i < EEPROM_CHECKSUM_REG; i++) | 354 | for(i = 0; i < EEPROM_CHECKSUM_REG; i++) |
355 | checksum += ixgb_read_eeprom(hw, i); | 355 | checksum += ixgb_read_eeprom(hw, i); |
356 | 356 | ||
357 | checksum = (uint16_t) EEPROM_SUM - checksum; | 357 | checksum = (u16) EEPROM_SUM - checksum; |
358 | 358 | ||
359 | ixgb_write_eeprom(hw, EEPROM_CHECKSUM_REG, checksum); | 359 | ixgb_write_eeprom(hw, EEPROM_CHECKSUM_REG, checksum); |
360 | return; | 360 | return; |
@@ -372,7 +372,7 @@ ixgb_update_eeprom_checksum(struct ixgb_hw *hw) | |||
372 | * | 372 | * |
373 | *****************************************************************************/ | 373 | *****************************************************************************/ |
374 | void | 374 | void |
375 | ixgb_write_eeprom(struct ixgb_hw *hw, uint16_t offset, uint16_t data) | 375 | ixgb_write_eeprom(struct ixgb_hw *hw, u16 offset, u16 data) |
376 | { | 376 | { |
377 | struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; | 377 | struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; |
378 | 378 | ||
@@ -425,11 +425,11 @@ ixgb_write_eeprom(struct ixgb_hw *hw, uint16_t offset, uint16_t data) | |||
425 | * Returns: | 425 | * Returns: |
426 | * The 16-bit value read from the eeprom | 426 | * The 16-bit value read from the eeprom |
427 | *****************************************************************************/ | 427 | *****************************************************************************/ |
428 | uint16_t | 428 | u16 |
429 | ixgb_read_eeprom(struct ixgb_hw *hw, | 429 | ixgb_read_eeprom(struct ixgb_hw *hw, |
430 | uint16_t offset) | 430 | u16 offset) |
431 | { | 431 | { |
432 | uint16_t data; | 432 | u16 data; |
433 | 433 | ||
434 | /* Prepare the EEPROM for reading */ | 434 | /* Prepare the EEPROM for reading */ |
435 | ixgb_setup_eeprom(hw); | 435 | ixgb_setup_eeprom(hw); |
@@ -463,8 +463,8 @@ ixgb_read_eeprom(struct ixgb_hw *hw, | |||
463 | bool | 463 | bool |
464 | ixgb_get_eeprom_data(struct ixgb_hw *hw) | 464 | ixgb_get_eeprom_data(struct ixgb_hw *hw) |
465 | { | 465 | { |
466 | uint16_t i; | 466 | u16 i; |
467 | uint16_t checksum = 0; | 467 | u16 checksum = 0; |
468 | struct ixgb_ee_map_type *ee_map; | 468 | struct ixgb_ee_map_type *ee_map; |
469 | 469 | ||
470 | DEBUGFUNC("ixgb_get_eeprom_data"); | 470 | DEBUGFUNC("ixgb_get_eeprom_data"); |
@@ -473,13 +473,13 @@ ixgb_get_eeprom_data(struct ixgb_hw *hw) | |||
473 | 473 | ||
474 | DEBUGOUT("ixgb_ee: Reading eeprom data\n"); | 474 | DEBUGOUT("ixgb_ee: Reading eeprom data\n"); |
475 | for(i = 0; i < IXGB_EEPROM_SIZE ; i++) { | 475 | for(i = 0; i < IXGB_EEPROM_SIZE ; i++) { |
476 | uint16_t ee_data; | 476 | u16 ee_data; |
477 | ee_data = ixgb_read_eeprom(hw, i); | 477 | ee_data = ixgb_read_eeprom(hw, i); |
478 | checksum += ee_data; | 478 | checksum += ee_data; |
479 | hw->eeprom[i] = cpu_to_le16(ee_data); | 479 | hw->eeprom[i] = cpu_to_le16(ee_data); |
480 | } | 480 | } |
481 | 481 | ||
482 | if (checksum != (uint16_t) EEPROM_SUM) { | 482 | if (checksum != (u16) EEPROM_SUM) { |
483 | DEBUGOUT("ixgb_ee: Checksum invalid.\n"); | 483 | DEBUGOUT("ixgb_ee: Checksum invalid.\n"); |
484 | /* clear the init_ctrl_reg_1 to signify that the cache is | 484 | /* clear the init_ctrl_reg_1 to signify that the cache is |
485 | * invalidated */ | 485 | * invalidated */ |
@@ -529,7 +529,7 @@ ixgb_check_and_get_eeprom_data (struct ixgb_hw* hw) | |||
529 | * Word at indexed offset in eeprom, if valid, 0 otherwise. | 529 | * Word at indexed offset in eeprom, if valid, 0 otherwise. |
530 | ******************************************************************************/ | 530 | ******************************************************************************/ |
531 | __le16 | 531 | __le16 |
532 | ixgb_get_eeprom_word(struct ixgb_hw *hw, uint16_t index) | 532 | ixgb_get_eeprom_word(struct ixgb_hw *hw, u16 index) |
533 | { | 533 | { |
534 | 534 | ||
535 | if ((index < IXGB_EEPROM_SIZE) && | 535 | if ((index < IXGB_EEPROM_SIZE) && |
@@ -550,7 +550,7 @@ ixgb_get_eeprom_word(struct ixgb_hw *hw, uint16_t index) | |||
550 | ******************************************************************************/ | 550 | ******************************************************************************/ |
551 | void | 551 | void |
552 | ixgb_get_ee_mac_addr(struct ixgb_hw *hw, | 552 | ixgb_get_ee_mac_addr(struct ixgb_hw *hw, |
553 | uint8_t *mac_addr) | 553 | u8 *mac_addr) |
554 | { | 554 | { |
555 | int i; | 555 | int i; |
556 | struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; | 556 | struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; |
@@ -574,7 +574,7 @@ ixgb_get_ee_mac_addr(struct ixgb_hw *hw, | |||
574 | * Returns: | 574 | * Returns: |
575 | * PBA number if EEPROM contents are valid, 0 otherwise | 575 | * PBA number if EEPROM contents are valid, 0 otherwise |
576 | ******************************************************************************/ | 576 | ******************************************************************************/ |
577 | uint32_t | 577 | u32 |
578 | ixgb_get_ee_pba_number(struct ixgb_hw *hw) | 578 | ixgb_get_ee_pba_number(struct ixgb_hw *hw) |
579 | { | 579 | { |
580 | if (ixgb_check_and_get_eeprom_data(hw) == true) | 580 | if (ixgb_check_and_get_eeprom_data(hw) == true) |
@@ -593,7 +593,7 @@ ixgb_get_ee_pba_number(struct ixgb_hw *hw) | |||
593 | * Returns: | 593 | * Returns: |
594 | * Device Id if EEPROM contents are valid, 0 otherwise | 594 | * Device Id if EEPROM contents are valid, 0 otherwise |
595 | ******************************************************************************/ | 595 | ******************************************************************************/ |
596 | uint16_t | 596 | u16 |
597 | ixgb_get_ee_device_id(struct ixgb_hw *hw) | 597 | ixgb_get_ee_device_id(struct ixgb_hw *hw) |
598 | { | 598 | { |
599 | struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; | 599 | struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; |
diff --git a/drivers/net/ixgb/ixgb_ee.h b/drivers/net/ixgb/ixgb_ee.h index da62f58276fa..4b7bd0d4a8a9 100644 --- a/drivers/net/ixgb/ixgb_ee.h +++ b/drivers/net/ixgb/ixgb_ee.h | |||
@@ -75,7 +75,7 @@ | |||
75 | 75 | ||
76 | /* EEPROM structure */ | 76 | /* EEPROM structure */ |
77 | struct ixgb_ee_map_type { | 77 | struct ixgb_ee_map_type { |
78 | uint8_t mac_addr[IXGB_ETH_LENGTH_OF_ADDRESS]; | 78 | u8 mac_addr[IXGB_ETH_LENGTH_OF_ADDRESS]; |
79 | __le16 compatibility; | 79 | __le16 compatibility; |
80 | __le16 reserved1[4]; | 80 | __le16 reserved1[4]; |
81 | __le32 pba_number; | 81 | __le32 pba_number; |
@@ -88,19 +88,19 @@ struct ixgb_ee_map_type { | |||
88 | __le16 oem_reserved[16]; | 88 | __le16 oem_reserved[16]; |
89 | __le16 swdpins_reg; | 89 | __le16 swdpins_reg; |
90 | __le16 circuit_ctrl_reg; | 90 | __le16 circuit_ctrl_reg; |
91 | uint8_t d3_power; | 91 | u8 d3_power; |
92 | uint8_t d0_power; | 92 | u8 d0_power; |
93 | __le16 reserved2[28]; | 93 | __le16 reserved2[28]; |
94 | __le16 checksum; | 94 | __le16 checksum; |
95 | }; | 95 | }; |
96 | 96 | ||
97 | /* EEPROM Functions */ | 97 | /* EEPROM Functions */ |
98 | uint16_t ixgb_read_eeprom(struct ixgb_hw *hw, uint16_t reg); | 98 | u16 ixgb_read_eeprom(struct ixgb_hw *hw, u16 reg); |
99 | 99 | ||
100 | bool ixgb_validate_eeprom_checksum(struct ixgb_hw *hw); | 100 | bool ixgb_validate_eeprom_checksum(struct ixgb_hw *hw); |
101 | 101 | ||
102 | void ixgb_update_eeprom_checksum(struct ixgb_hw *hw); | 102 | void ixgb_update_eeprom_checksum(struct ixgb_hw *hw); |
103 | 103 | ||
104 | void ixgb_write_eeprom(struct ixgb_hw *hw, uint16_t reg, uint16_t data); | 104 | void ixgb_write_eeprom(struct ixgb_hw *hw, u16 reg, u16 data); |
105 | 105 | ||
106 | #endif /* IXGB_EE_H */ | 106 | #endif /* IXGB_EE_H */ |
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c index 45ddf804fe5e..8464d8a013b0 100644 --- a/drivers/net/ixgb/ixgb_ethtool.c +++ b/drivers/net/ixgb/ixgb_ethtool.c | |||
@@ -185,7 +185,7 @@ ixgb_set_pauseparam(struct net_device *netdev, | |||
185 | return 0; | 185 | return 0; |
186 | } | 186 | } |
187 | 187 | ||
188 | static uint32_t | 188 | static u32 |
189 | ixgb_get_rx_csum(struct net_device *netdev) | 189 | ixgb_get_rx_csum(struct net_device *netdev) |
190 | { | 190 | { |
191 | struct ixgb_adapter *adapter = netdev_priv(netdev); | 191 | struct ixgb_adapter *adapter = netdev_priv(netdev); |
@@ -194,7 +194,7 @@ ixgb_get_rx_csum(struct net_device *netdev) | |||
194 | } | 194 | } |
195 | 195 | ||
196 | static int | 196 | static int |
197 | ixgb_set_rx_csum(struct net_device *netdev, uint32_t data) | 197 | ixgb_set_rx_csum(struct net_device *netdev, u32 data) |
198 | { | 198 | { |
199 | struct ixgb_adapter *adapter = netdev_priv(netdev); | 199 | struct ixgb_adapter *adapter = netdev_priv(netdev); |
200 | 200 | ||
@@ -209,14 +209,14 @@ ixgb_set_rx_csum(struct net_device *netdev, uint32_t data) | |||
209 | return 0; | 209 | return 0; |
210 | } | 210 | } |
211 | 211 | ||
212 | static uint32_t | 212 | static u32 |
213 | ixgb_get_tx_csum(struct net_device *netdev) | 213 | ixgb_get_tx_csum(struct net_device *netdev) |
214 | { | 214 | { |
215 | return (netdev->features & NETIF_F_HW_CSUM) != 0; | 215 | return (netdev->features & NETIF_F_HW_CSUM) != 0; |
216 | } | 216 | } |
217 | 217 | ||
218 | static int | 218 | static int |
219 | ixgb_set_tx_csum(struct net_device *netdev, uint32_t data) | 219 | ixgb_set_tx_csum(struct net_device *netdev, u32 data) |
220 | { | 220 | { |
221 | if (data) | 221 | if (data) |
222 | netdev->features |= NETIF_F_HW_CSUM; | 222 | netdev->features |= NETIF_F_HW_CSUM; |
@@ -227,7 +227,7 @@ ixgb_set_tx_csum(struct net_device *netdev, uint32_t data) | |||
227 | } | 227 | } |
228 | 228 | ||
229 | static int | 229 | static int |
230 | ixgb_set_tso(struct net_device *netdev, uint32_t data) | 230 | ixgb_set_tso(struct net_device *netdev, u32 data) |
231 | { | 231 | { |
232 | if(data) | 232 | if(data) |
233 | netdev->features |= NETIF_F_TSO; | 233 | netdev->features |= NETIF_F_TSO; |
@@ -236,7 +236,7 @@ ixgb_set_tso(struct net_device *netdev, uint32_t data) | |||
236 | return 0; | 236 | return 0; |
237 | } | 237 | } |
238 | 238 | ||
239 | static uint32_t | 239 | static u32 |
240 | ixgb_get_msglevel(struct net_device *netdev) | 240 | ixgb_get_msglevel(struct net_device *netdev) |
241 | { | 241 | { |
242 | struct ixgb_adapter *adapter = netdev_priv(netdev); | 242 | struct ixgb_adapter *adapter = netdev_priv(netdev); |
@@ -244,7 +244,7 @@ ixgb_get_msglevel(struct net_device *netdev) | |||
244 | } | 244 | } |
245 | 245 | ||
246 | static void | 246 | static void |
247 | ixgb_set_msglevel(struct net_device *netdev, uint32_t data) | 247 | ixgb_set_msglevel(struct net_device *netdev, u32 data) |
248 | { | 248 | { |
249 | struct ixgb_adapter *adapter = netdev_priv(netdev); | 249 | struct ixgb_adapter *adapter = netdev_priv(netdev); |
250 | adapter->msg_enable = data; | 250 | adapter->msg_enable = data; |
@@ -254,7 +254,7 @@ ixgb_set_msglevel(struct net_device *netdev, uint32_t data) | |||
254 | static int | 254 | static int |
255 | ixgb_get_regs_len(struct net_device *netdev) | 255 | ixgb_get_regs_len(struct net_device *netdev) |
256 | { | 256 | { |
257 | #define IXGB_REG_DUMP_LEN 136*sizeof(uint32_t) | 257 | #define IXGB_REG_DUMP_LEN 136*sizeof(u32) |
258 | return IXGB_REG_DUMP_LEN; | 258 | return IXGB_REG_DUMP_LEN; |
259 | } | 259 | } |
260 | 260 | ||
@@ -264,9 +264,9 @@ ixgb_get_regs(struct net_device *netdev, | |||
264 | { | 264 | { |
265 | struct ixgb_adapter *adapter = netdev_priv(netdev); | 265 | struct ixgb_adapter *adapter = netdev_priv(netdev); |
266 | struct ixgb_hw *hw = &adapter->hw; | 266 | struct ixgb_hw *hw = &adapter->hw; |
267 | uint32_t *reg = p; | 267 | u32 *reg = p; |
268 | uint32_t *reg_start = reg; | 268 | u32 *reg_start = reg; |
269 | uint8_t i; | 269 | u8 i; |
270 | 270 | ||
271 | /* the 1 (one) below indicates an attempt at versioning, if the | 271 | /* the 1 (one) below indicates an attempt at versioning, if the |
272 | * interface in ethtool or the driver changes, this 1 should be | 272 | * interface in ethtool or the driver changes, this 1 should be |
@@ -395,7 +395,7 @@ ixgb_get_regs(struct net_device *netdev, | |||
395 | *reg++ = IXGB_GET_STAT(adapter, xofftxc); /* 134 */ | 395 | *reg++ = IXGB_GET_STAT(adapter, xofftxc); /* 134 */ |
396 | *reg++ = IXGB_GET_STAT(adapter, rjc); /* 135 */ | 396 | *reg++ = IXGB_GET_STAT(adapter, rjc); /* 135 */ |
397 | 397 | ||
398 | regs->len = (reg - reg_start) * sizeof(uint32_t); | 398 | regs->len = (reg - reg_start) * sizeof(u32); |
399 | } | 399 | } |
400 | 400 | ||
401 | static int | 401 | static int |
@@ -407,7 +407,7 @@ ixgb_get_eeprom_len(struct net_device *netdev) | |||
407 | 407 | ||
408 | static int | 408 | static int |
409 | ixgb_get_eeprom(struct net_device *netdev, | 409 | ixgb_get_eeprom(struct net_device *netdev, |
410 | struct ethtool_eeprom *eeprom, uint8_t *bytes) | 410 | struct ethtool_eeprom *eeprom, u8 *bytes) |
411 | { | 411 | { |
412 | struct ixgb_adapter *adapter = netdev_priv(netdev); | 412 | struct ixgb_adapter *adapter = netdev_priv(netdev); |
413 | struct ixgb_hw *hw = &adapter->hw; | 413 | struct ixgb_hw *hw = &adapter->hw; |
@@ -445,7 +445,7 @@ ixgb_get_eeprom(struct net_device *netdev, | |||
445 | eeprom_buff[i] = ixgb_get_eeprom_word(hw, (first_word + i)); | 445 | eeprom_buff[i] = ixgb_get_eeprom_word(hw, (first_word + i)); |
446 | } | 446 | } |
447 | 447 | ||
448 | memcpy(bytes, (uint8_t *)eeprom_buff + (eeprom->offset & 1), | 448 | memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), |
449 | eeprom->len); | 449 | eeprom->len); |
450 | kfree(eeprom_buff); | 450 | kfree(eeprom_buff); |
451 | 451 | ||
@@ -455,14 +455,14 @@ geeprom_error: | |||
455 | 455 | ||
456 | static int | 456 | static int |
457 | ixgb_set_eeprom(struct net_device *netdev, | 457 | ixgb_set_eeprom(struct net_device *netdev, |
458 | struct ethtool_eeprom *eeprom, uint8_t *bytes) | 458 | struct ethtool_eeprom *eeprom, u8 *bytes) |
459 | { | 459 | { |
460 | struct ixgb_adapter *adapter = netdev_priv(netdev); | 460 | struct ixgb_adapter *adapter = netdev_priv(netdev); |
461 | struct ixgb_hw *hw = &adapter->hw; | 461 | struct ixgb_hw *hw = &adapter->hw; |
462 | uint16_t *eeprom_buff; | 462 | u16 *eeprom_buff; |
463 | void *ptr; | 463 | void *ptr; |
464 | int max_len, first_word, last_word; | 464 | int max_len, first_word, last_word; |
465 | uint16_t i; | 465 | u16 i; |
466 | 466 | ||
467 | if(eeprom->len == 0) | 467 | if(eeprom->len == 0) |
468 | return -EINVAL; | 468 | return -EINVAL; |
@@ -563,12 +563,12 @@ ixgb_set_ringparam(struct net_device *netdev, | |||
563 | if(netif_running(adapter->netdev)) | 563 | if(netif_running(adapter->netdev)) |
564 | ixgb_down(adapter, true); | 564 | ixgb_down(adapter, true); |
565 | 565 | ||
566 | rxdr->count = max(ring->rx_pending,(uint32_t)MIN_RXD); | 566 | rxdr->count = max(ring->rx_pending,(u32)MIN_RXD); |
567 | rxdr->count = min(rxdr->count,(uint32_t)MAX_RXD); | 567 | rxdr->count = min(rxdr->count,(u32)MAX_RXD); |
568 | rxdr->count = ALIGN(rxdr->count, IXGB_REQ_RX_DESCRIPTOR_MULTIPLE); | 568 | rxdr->count = ALIGN(rxdr->count, IXGB_REQ_RX_DESCRIPTOR_MULTIPLE); |
569 | 569 | ||
570 | txdr->count = max(ring->tx_pending,(uint32_t)MIN_TXD); | 570 | txdr->count = max(ring->tx_pending,(u32)MIN_TXD); |
571 | txdr->count = min(txdr->count,(uint32_t)MAX_TXD); | 571 | txdr->count = min(txdr->count,(u32)MAX_TXD); |
572 | txdr->count = ALIGN(txdr->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE); | 572 | txdr->count = ALIGN(txdr->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE); |
573 | 573 | ||
574 | if(netif_running(adapter->netdev)) { | 574 | if(netif_running(adapter->netdev)) { |
@@ -624,7 +624,7 @@ ixgb_led_blink_callback(unsigned long data) | |||
624 | } | 624 | } |
625 | 625 | ||
626 | static int | 626 | static int |
627 | ixgb_phys_id(struct net_device *netdev, uint32_t data) | 627 | ixgb_phys_id(struct net_device *netdev, u32 data) |
628 | { | 628 | { |
629 | struct ixgb_adapter *adapter = netdev_priv(netdev); | 629 | struct ixgb_adapter *adapter = netdev_priv(netdev); |
630 | 630 | ||
@@ -660,7 +660,7 @@ ixgb_get_sset_count(struct net_device *netdev, int sset) | |||
660 | 660 | ||
661 | static void | 661 | static void |
662 | ixgb_get_ethtool_stats(struct net_device *netdev, | 662 | ixgb_get_ethtool_stats(struct net_device *netdev, |
663 | struct ethtool_stats *stats, uint64_t *data) | 663 | struct ethtool_stats *stats, u64 *data) |
664 | { | 664 | { |
665 | struct ixgb_adapter *adapter = netdev_priv(netdev); | 665 | struct ixgb_adapter *adapter = netdev_priv(netdev); |
666 | int i; | 666 | int i; |
@@ -669,12 +669,12 @@ ixgb_get_ethtool_stats(struct net_device *netdev, | |||
669 | for(i = 0; i < IXGB_STATS_LEN; i++) { | 669 | for(i = 0; i < IXGB_STATS_LEN; i++) { |
670 | char *p = (char *)adapter+ixgb_gstrings_stats[i].stat_offset; | 670 | char *p = (char *)adapter+ixgb_gstrings_stats[i].stat_offset; |
671 | data[i] = (ixgb_gstrings_stats[i].sizeof_stat == | 671 | data[i] = (ixgb_gstrings_stats[i].sizeof_stat == |
672 | sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p; | 672 | sizeof(u64)) ? *(u64 *)p : *(u32 *)p; |
673 | } | 673 | } |
674 | } | 674 | } |
675 | 675 | ||
676 | static void | 676 | static void |
677 | ixgb_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data) | 677 | ixgb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) |
678 | { | 678 | { |
679 | int i; | 679 | int i; |
680 | 680 | ||
diff --git a/drivers/net/ixgb/ixgb_hw.c b/drivers/net/ixgb/ixgb_hw.c index 8a04bbd258a6..04d2003e24e1 100644 --- a/drivers/net/ixgb/ixgb_hw.c +++ b/drivers/net/ixgb/ixgb_hw.c | |||
@@ -35,9 +35,9 @@ | |||
35 | 35 | ||
36 | /* Local function prototypes */ | 36 | /* Local function prototypes */ |
37 | 37 | ||
38 | static uint32_t ixgb_hash_mc_addr(struct ixgb_hw *hw, uint8_t * mc_addr); | 38 | static u32 ixgb_hash_mc_addr(struct ixgb_hw *hw, u8 * mc_addr); |
39 | 39 | ||
40 | static void ixgb_mta_set(struct ixgb_hw *hw, uint32_t hash_value); | 40 | static void ixgb_mta_set(struct ixgb_hw *hw, u32 hash_value); |
41 | 41 | ||
42 | static void ixgb_get_bus_info(struct ixgb_hw *hw); | 42 | static void ixgb_get_bus_info(struct ixgb_hw *hw); |
43 | 43 | ||
@@ -55,18 +55,18 @@ static void ixgb_clear_vfta(struct ixgb_hw *hw); | |||
55 | 55 | ||
56 | static void ixgb_init_rx_addrs(struct ixgb_hw *hw); | 56 | static void ixgb_init_rx_addrs(struct ixgb_hw *hw); |
57 | 57 | ||
58 | static uint16_t ixgb_read_phy_reg(struct ixgb_hw *hw, | 58 | static u16 ixgb_read_phy_reg(struct ixgb_hw *hw, |
59 | uint32_t reg_address, | 59 | u32 reg_address, |
60 | uint32_t phy_address, | 60 | u32 phy_address, |
61 | uint32_t device_type); | 61 | u32 device_type); |
62 | 62 | ||
63 | static bool ixgb_setup_fc(struct ixgb_hw *hw); | 63 | static bool ixgb_setup_fc(struct ixgb_hw *hw); |
64 | 64 | ||
65 | static bool mac_addr_valid(uint8_t *mac_addr); | 65 | static bool mac_addr_valid(u8 *mac_addr); |
66 | 66 | ||
67 | static uint32_t ixgb_mac_reset(struct ixgb_hw *hw) | 67 | static u32 ixgb_mac_reset(struct ixgb_hw *hw) |
68 | { | 68 | { |
69 | uint32_t ctrl_reg; | 69 | u32 ctrl_reg; |
70 | 70 | ||
71 | ctrl_reg = IXGB_CTRL0_RST | | 71 | ctrl_reg = IXGB_CTRL0_RST | |
72 | IXGB_CTRL0_SDP3_DIR | /* All pins are Output=1 */ | 72 | IXGB_CTRL0_SDP3_DIR | /* All pins are Output=1 */ |
@@ -117,8 +117,8 @@ static uint32_t ixgb_mac_reset(struct ixgb_hw *hw) | |||
117 | bool | 117 | bool |
118 | ixgb_adapter_stop(struct ixgb_hw *hw) | 118 | ixgb_adapter_stop(struct ixgb_hw *hw) |
119 | { | 119 | { |
120 | uint32_t ctrl_reg; | 120 | u32 ctrl_reg; |
121 | uint32_t icr_reg; | 121 | u32 icr_reg; |
122 | 122 | ||
123 | DEBUGFUNC("ixgb_adapter_stop"); | 123 | DEBUGFUNC("ixgb_adapter_stop"); |
124 | 124 | ||
@@ -179,8 +179,8 @@ ixgb_adapter_stop(struct ixgb_hw *hw) | |||
179 | static ixgb_xpak_vendor | 179 | static ixgb_xpak_vendor |
180 | ixgb_identify_xpak_vendor(struct ixgb_hw *hw) | 180 | ixgb_identify_xpak_vendor(struct ixgb_hw *hw) |
181 | { | 181 | { |
182 | uint32_t i; | 182 | u32 i; |
183 | uint16_t vendor_name[5]; | 183 | u16 vendor_name[5]; |
184 | ixgb_xpak_vendor xpak_vendor; | 184 | ixgb_xpak_vendor xpak_vendor; |
185 | 185 | ||
186 | DEBUGFUNC("ixgb_identify_xpak_vendor"); | 186 | DEBUGFUNC("ixgb_identify_xpak_vendor"); |
@@ -292,8 +292,8 @@ ixgb_identify_phy(struct ixgb_hw *hw) | |||
292 | bool | 292 | bool |
293 | ixgb_init_hw(struct ixgb_hw *hw) | 293 | ixgb_init_hw(struct ixgb_hw *hw) |
294 | { | 294 | { |
295 | uint32_t i; | 295 | u32 i; |
296 | uint32_t ctrl_reg; | 296 | u32 ctrl_reg; |
297 | bool status; | 297 | bool status; |
298 | 298 | ||
299 | DEBUGFUNC("ixgb_init_hw"); | 299 | DEBUGFUNC("ixgb_init_hw"); |
@@ -377,7 +377,7 @@ ixgb_init_hw(struct ixgb_hw *hw) | |||
377 | static void | 377 | static void |
378 | ixgb_init_rx_addrs(struct ixgb_hw *hw) | 378 | ixgb_init_rx_addrs(struct ixgb_hw *hw) |
379 | { | 379 | { |
380 | uint32_t i; | 380 | u32 i; |
381 | 381 | ||
382 | DEBUGFUNC("ixgb_init_rx_addrs"); | 382 | DEBUGFUNC("ixgb_init_rx_addrs"); |
383 | 383 | ||
@@ -437,13 +437,13 @@ ixgb_init_rx_addrs(struct ixgb_hw *hw) | |||
437 | *****************************************************************************/ | 437 | *****************************************************************************/ |
438 | void | 438 | void |
439 | ixgb_mc_addr_list_update(struct ixgb_hw *hw, | 439 | ixgb_mc_addr_list_update(struct ixgb_hw *hw, |
440 | uint8_t *mc_addr_list, | 440 | u8 *mc_addr_list, |
441 | uint32_t mc_addr_count, | 441 | u32 mc_addr_count, |
442 | uint32_t pad) | 442 | u32 pad) |
443 | { | 443 | { |
444 | uint32_t hash_value; | 444 | u32 hash_value; |
445 | uint32_t i; | 445 | u32 i; |
446 | uint32_t rar_used_count = 1; /* RAR[0] is used for our MAC address */ | 446 | u32 rar_used_count = 1; /* RAR[0] is used for our MAC address */ |
447 | 447 | ||
448 | DEBUGFUNC("ixgb_mc_addr_list_update"); | 448 | DEBUGFUNC("ixgb_mc_addr_list_update"); |
449 | 449 | ||
@@ -515,11 +515,11 @@ ixgb_mc_addr_list_update(struct ixgb_hw *hw, | |||
515 | * Returns: | 515 | * Returns: |
516 | * The hash value | 516 | * The hash value |
517 | *****************************************************************************/ | 517 | *****************************************************************************/ |
518 | static uint32_t | 518 | static u32 |
519 | ixgb_hash_mc_addr(struct ixgb_hw *hw, | 519 | ixgb_hash_mc_addr(struct ixgb_hw *hw, |
520 | uint8_t *mc_addr) | 520 | u8 *mc_addr) |
521 | { | 521 | { |
522 | uint32_t hash_value = 0; | 522 | u32 hash_value = 0; |
523 | 523 | ||
524 | DEBUGFUNC("ixgb_hash_mc_addr"); | 524 | DEBUGFUNC("ixgb_hash_mc_addr"); |
525 | 525 | ||
@@ -533,18 +533,18 @@ ixgb_hash_mc_addr(struct ixgb_hw *hw, | |||
533 | case 0: | 533 | case 0: |
534 | /* [47:36] i.e. 0x563 for above example address */ | 534 | /* [47:36] i.e. 0x563 for above example address */ |
535 | hash_value = | 535 | hash_value = |
536 | ((mc_addr[4] >> 4) | (((uint16_t) mc_addr[5]) << 4)); | 536 | ((mc_addr[4] >> 4) | (((u16) mc_addr[5]) << 4)); |
537 | break; | 537 | break; |
538 | case 1: /* [46:35] i.e. 0xAC6 for above example address */ | 538 | case 1: /* [46:35] i.e. 0xAC6 for above example address */ |
539 | hash_value = | 539 | hash_value = |
540 | ((mc_addr[4] >> 3) | (((uint16_t) mc_addr[5]) << 5)); | 540 | ((mc_addr[4] >> 3) | (((u16) mc_addr[5]) << 5)); |
541 | break; | 541 | break; |
542 | case 2: /* [45:34] i.e. 0x5D8 for above example address */ | 542 | case 2: /* [45:34] i.e. 0x5D8 for above example address */ |
543 | hash_value = | 543 | hash_value = |
544 | ((mc_addr[4] >> 2) | (((uint16_t) mc_addr[5]) << 6)); | 544 | ((mc_addr[4] >> 2) | (((u16) mc_addr[5]) << 6)); |
545 | break; | 545 | break; |
546 | case 3: /* [43:32] i.e. 0x634 for above example address */ | 546 | case 3: /* [43:32] i.e. 0x634 for above example address */ |
547 | hash_value = ((mc_addr[4]) | (((uint16_t) mc_addr[5]) << 8)); | 547 | hash_value = ((mc_addr[4]) | (((u16) mc_addr[5]) << 8)); |
548 | break; | 548 | break; |
549 | default: | 549 | default: |
550 | /* Invalid mc_filter_type, what should we do? */ | 550 | /* Invalid mc_filter_type, what should we do? */ |
@@ -565,10 +565,10 @@ ixgb_hash_mc_addr(struct ixgb_hw *hw, | |||
565 | *****************************************************************************/ | 565 | *****************************************************************************/ |
566 | static void | 566 | static void |
567 | ixgb_mta_set(struct ixgb_hw *hw, | 567 | ixgb_mta_set(struct ixgb_hw *hw, |
568 | uint32_t hash_value) | 568 | u32 hash_value) |
569 | { | 569 | { |
570 | uint32_t hash_bit, hash_reg; | 570 | u32 hash_bit, hash_reg; |
571 | uint32_t mta_reg; | 571 | u32 mta_reg; |
572 | 572 | ||
573 | /* The MTA is a register array of 128 32-bit registers. | 573 | /* The MTA is a register array of 128 32-bit registers. |
574 | * It is treated like an array of 4096 bits. We want to set | 574 | * It is treated like an array of 4096 bits. We want to set |
@@ -599,23 +599,23 @@ ixgb_mta_set(struct ixgb_hw *hw, | |||
599 | *****************************************************************************/ | 599 | *****************************************************************************/ |
600 | void | 600 | void |
601 | ixgb_rar_set(struct ixgb_hw *hw, | 601 | ixgb_rar_set(struct ixgb_hw *hw, |
602 | uint8_t *addr, | 602 | u8 *addr, |
603 | uint32_t index) | 603 | u32 index) |
604 | { | 604 | { |
605 | uint32_t rar_low, rar_high; | 605 | u32 rar_low, rar_high; |
606 | 606 | ||
607 | DEBUGFUNC("ixgb_rar_set"); | 607 | DEBUGFUNC("ixgb_rar_set"); |
608 | 608 | ||
609 | /* HW expects these in little endian so we reverse the byte order | 609 | /* HW expects these in little endian so we reverse the byte order |
610 | * from network order (big endian) to little endian | 610 | * from network order (big endian) to little endian |
611 | */ | 611 | */ |
612 | rar_low = ((uint32_t) addr[0] | | 612 | rar_low = ((u32) addr[0] | |
613 | ((uint32_t)addr[1] << 8) | | 613 | ((u32)addr[1] << 8) | |
614 | ((uint32_t)addr[2] << 16) | | 614 | ((u32)addr[2] << 16) | |
615 | ((uint32_t)addr[3] << 24)); | 615 | ((u32)addr[3] << 24)); |
616 | 616 | ||
617 | rar_high = ((uint32_t) addr[4] | | 617 | rar_high = ((u32) addr[4] | |
618 | ((uint32_t)addr[5] << 8) | | 618 | ((u32)addr[5] << 8) | |
619 | IXGB_RAH_AV); | 619 | IXGB_RAH_AV); |
620 | 620 | ||
621 | IXGB_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low); | 621 | IXGB_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low); |
@@ -632,8 +632,8 @@ ixgb_rar_set(struct ixgb_hw *hw, | |||
632 | *****************************************************************************/ | 632 | *****************************************************************************/ |
633 | void | 633 | void |
634 | ixgb_write_vfta(struct ixgb_hw *hw, | 634 | ixgb_write_vfta(struct ixgb_hw *hw, |
635 | uint32_t offset, | 635 | u32 offset, |
636 | uint32_t value) | 636 | u32 value) |
637 | { | 637 | { |
638 | IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, value); | 638 | IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, value); |
639 | return; | 639 | return; |
@@ -647,7 +647,7 @@ ixgb_write_vfta(struct ixgb_hw *hw, | |||
647 | static void | 647 | static void |
648 | ixgb_clear_vfta(struct ixgb_hw *hw) | 648 | ixgb_clear_vfta(struct ixgb_hw *hw) |
649 | { | 649 | { |
650 | uint32_t offset; | 650 | u32 offset; |
651 | 651 | ||
652 | for(offset = 0; offset < IXGB_VLAN_FILTER_TBL_SIZE; offset++) | 652 | for(offset = 0; offset < IXGB_VLAN_FILTER_TBL_SIZE; offset++) |
653 | IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, 0); | 653 | IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, 0); |
@@ -663,8 +663,8 @@ ixgb_clear_vfta(struct ixgb_hw *hw) | |||
663 | static bool | 663 | static bool |
664 | ixgb_setup_fc(struct ixgb_hw *hw) | 664 | ixgb_setup_fc(struct ixgb_hw *hw) |
665 | { | 665 | { |
666 | uint32_t ctrl_reg; | 666 | u32 ctrl_reg; |
667 | uint32_t pap_reg = 0; /* by default, assume no pause time */ | 667 | u32 pap_reg = 0; /* by default, assume no pause time */ |
668 | bool status = true; | 668 | bool status = true; |
669 | 669 | ||
670 | DEBUGFUNC("ixgb_setup_fc"); | 670 | DEBUGFUNC("ixgb_setup_fc"); |
@@ -762,15 +762,15 @@ ixgb_setup_fc(struct ixgb_hw *hw) | |||
762 | * This requires that first an address cycle command is sent, followed by a | 762 | * This requires that first an address cycle command is sent, followed by a |
763 | * read command. | 763 | * read command. |
764 | *****************************************************************************/ | 764 | *****************************************************************************/ |
765 | static uint16_t | 765 | static u16 |
766 | ixgb_read_phy_reg(struct ixgb_hw *hw, | 766 | ixgb_read_phy_reg(struct ixgb_hw *hw, |
767 | uint32_t reg_address, | 767 | u32 reg_address, |
768 | uint32_t phy_address, | 768 | u32 phy_address, |
769 | uint32_t device_type) | 769 | u32 device_type) |
770 | { | 770 | { |
771 | uint32_t i; | 771 | u32 i; |
772 | uint32_t data; | 772 | u32 data; |
773 | uint32_t command = 0; | 773 | u32 command = 0; |
774 | 774 | ||
775 | ASSERT(reg_address <= IXGB_MAX_PHY_REG_ADDRESS); | 775 | ASSERT(reg_address <= IXGB_MAX_PHY_REG_ADDRESS); |
776 | ASSERT(phy_address <= IXGB_MAX_PHY_ADDRESS); | 776 | ASSERT(phy_address <= IXGB_MAX_PHY_ADDRESS); |
@@ -835,7 +835,7 @@ ixgb_read_phy_reg(struct ixgb_hw *hw, | |||
835 | */ | 835 | */ |
836 | data = IXGB_READ_REG(hw, MSRWD); | 836 | data = IXGB_READ_REG(hw, MSRWD); |
837 | data >>= IXGB_MSRWD_READ_DATA_SHIFT; | 837 | data >>= IXGB_MSRWD_READ_DATA_SHIFT; |
838 | return((uint16_t) data); | 838 | return((u16) data); |
839 | } | 839 | } |
840 | 840 | ||
841 | /****************************************************************************** | 841 | /****************************************************************************** |
@@ -857,20 +857,20 @@ ixgb_read_phy_reg(struct ixgb_hw *hw, | |||
857 | *****************************************************************************/ | 857 | *****************************************************************************/ |
858 | static void | 858 | static void |
859 | ixgb_write_phy_reg(struct ixgb_hw *hw, | 859 | ixgb_write_phy_reg(struct ixgb_hw *hw, |
860 | uint32_t reg_address, | 860 | u32 reg_address, |
861 | uint32_t phy_address, | 861 | u32 phy_address, |
862 | uint32_t device_type, | 862 | u32 device_type, |
863 | uint16_t data) | 863 | u16 data) |
864 | { | 864 | { |
865 | uint32_t i; | 865 | u32 i; |
866 | uint32_t command = 0; | 866 | u32 command = 0; |
867 | 867 | ||
868 | ASSERT(reg_address <= IXGB_MAX_PHY_REG_ADDRESS); | 868 | ASSERT(reg_address <= IXGB_MAX_PHY_REG_ADDRESS); |
869 | ASSERT(phy_address <= IXGB_MAX_PHY_ADDRESS); | 869 | ASSERT(phy_address <= IXGB_MAX_PHY_ADDRESS); |
870 | ASSERT(device_type <= IXGB_MAX_PHY_DEV_TYPE); | 870 | ASSERT(device_type <= IXGB_MAX_PHY_DEV_TYPE); |
871 | 871 | ||
872 | /* Put the data in the MDIO Read/Write Data register */ | 872 | /* Put the data in the MDIO Read/Write Data register */ |
873 | IXGB_WRITE_REG(hw, MSRWD, (uint32_t)data); | 873 | IXGB_WRITE_REG(hw, MSRWD, (u32)data); |
874 | 874 | ||
875 | /* Setup and write the address cycle command */ | 875 | /* Setup and write the address cycle command */ |
876 | command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) | | 876 | command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) | |
@@ -939,8 +939,8 @@ ixgb_write_phy_reg(struct ixgb_hw *hw, | |||
939 | void | 939 | void |
940 | ixgb_check_for_link(struct ixgb_hw *hw) | 940 | ixgb_check_for_link(struct ixgb_hw *hw) |
941 | { | 941 | { |
942 | uint32_t status_reg; | 942 | u32 status_reg; |
943 | uint32_t xpcss_reg; | 943 | u32 xpcss_reg; |
944 | 944 | ||
945 | DEBUGFUNC("ixgb_check_for_link"); | 945 | DEBUGFUNC("ixgb_check_for_link"); |
946 | 946 | ||
@@ -975,7 +975,7 @@ ixgb_check_for_link(struct ixgb_hw *hw) | |||
975 | *****************************************************************************/ | 975 | *****************************************************************************/ |
976 | bool ixgb_check_for_bad_link(struct ixgb_hw *hw) | 976 | bool ixgb_check_for_bad_link(struct ixgb_hw *hw) |
977 | { | 977 | { |
978 | uint32_t newLFC, newRFC; | 978 | u32 newLFC, newRFC; |
979 | bool bad_link_returncode = false; | 979 | bool bad_link_returncode = false; |
980 | 980 | ||
981 | if (hw->phy_type == ixgb_phy_type_txn17401) { | 981 | if (hw->phy_type == ixgb_phy_type_txn17401) { |
@@ -1002,7 +1002,7 @@ bool ixgb_check_for_bad_link(struct ixgb_hw *hw) | |||
1002 | static void | 1002 | static void |
1003 | ixgb_clear_hw_cntrs(struct ixgb_hw *hw) | 1003 | ixgb_clear_hw_cntrs(struct ixgb_hw *hw) |
1004 | { | 1004 | { |
1005 | volatile uint32_t temp_reg; | 1005 | volatile u32 temp_reg; |
1006 | 1006 | ||
1007 | DEBUGFUNC("ixgb_clear_hw_cntrs"); | 1007 | DEBUGFUNC("ixgb_clear_hw_cntrs"); |
1008 | 1008 | ||
@@ -1083,7 +1083,7 @@ ixgb_clear_hw_cntrs(struct ixgb_hw *hw) | |||
1083 | void | 1083 | void |
1084 | ixgb_led_on(struct ixgb_hw *hw) | 1084 | ixgb_led_on(struct ixgb_hw *hw) |
1085 | { | 1085 | { |
1086 | uint32_t ctrl0_reg = IXGB_READ_REG(hw, CTRL0); | 1086 | u32 ctrl0_reg = IXGB_READ_REG(hw, CTRL0); |
1087 | 1087 | ||
1088 | /* To turn on the LED, clear software-definable pin 0 (SDP0). */ | 1088 | /* To turn on the LED, clear software-definable pin 0 (SDP0). */ |
1089 | ctrl0_reg &= ~IXGB_CTRL0_SDP0; | 1089 | ctrl0_reg &= ~IXGB_CTRL0_SDP0; |
@@ -1099,7 +1099,7 @@ ixgb_led_on(struct ixgb_hw *hw) | |||
1099 | void | 1099 | void |
1100 | ixgb_led_off(struct ixgb_hw *hw) | 1100 | ixgb_led_off(struct ixgb_hw *hw) |
1101 | { | 1101 | { |
1102 | uint32_t ctrl0_reg = IXGB_READ_REG(hw, CTRL0); | 1102 | u32 ctrl0_reg = IXGB_READ_REG(hw, CTRL0); |
1103 | 1103 | ||
1104 | /* To turn off the LED, set software-definable pin 0 (SDP0). */ | 1104 | /* To turn off the LED, set software-definable pin 0 (SDP0). */ |
1105 | ctrl0_reg |= IXGB_CTRL0_SDP0; | 1105 | ctrl0_reg |= IXGB_CTRL0_SDP0; |
@@ -1115,7 +1115,7 @@ ixgb_led_off(struct ixgb_hw *hw) | |||
1115 | static void | 1115 | static void |
1116 | ixgb_get_bus_info(struct ixgb_hw *hw) | 1116 | ixgb_get_bus_info(struct ixgb_hw *hw) |
1117 | { | 1117 | { |
1118 | uint32_t status_reg; | 1118 | u32 status_reg; |
1119 | 1119 | ||
1120 | status_reg = IXGB_READ_REG(hw, STATUS); | 1120 | status_reg = IXGB_READ_REG(hw, STATUS); |
1121 | 1121 | ||
@@ -1155,7 +1155,7 @@ ixgb_get_bus_info(struct ixgb_hw *hw) | |||
1155 | * | 1155 | * |
1156 | *****************************************************************************/ | 1156 | *****************************************************************************/ |
1157 | static bool | 1157 | static bool |
1158 | mac_addr_valid(uint8_t *mac_addr) | 1158 | mac_addr_valid(u8 *mac_addr) |
1159 | { | 1159 | { |
1160 | bool is_valid = true; | 1160 | bool is_valid = true; |
1161 | DEBUGFUNC("mac_addr_valid"); | 1161 | DEBUGFUNC("mac_addr_valid"); |
@@ -1193,8 +1193,8 @@ static bool | |||
1193 | ixgb_link_reset(struct ixgb_hw *hw) | 1193 | ixgb_link_reset(struct ixgb_hw *hw) |
1194 | { | 1194 | { |
1195 | bool link_status = false; | 1195 | bool link_status = false; |
1196 | uint8_t wait_retries = MAX_RESET_ITERATIONS; | 1196 | u8 wait_retries = MAX_RESET_ITERATIONS; |
1197 | uint8_t lrst_retries = MAX_RESET_ITERATIONS; | 1197 | u8 lrst_retries = MAX_RESET_ITERATIONS; |
1198 | 1198 | ||
1199 | do { | 1199 | do { |
1200 | /* Reset the link */ | 1200 | /* Reset the link */ |
@@ -1224,7 +1224,7 @@ static void | |||
1224 | ixgb_optics_reset(struct ixgb_hw *hw) | 1224 | ixgb_optics_reset(struct ixgb_hw *hw) |
1225 | { | 1225 | { |
1226 | if (hw->phy_type == ixgb_phy_type_txn17401) { | 1226 | if (hw->phy_type == ixgb_phy_type_txn17401) { |
1227 | uint16_t mdio_reg; | 1227 | u16 mdio_reg; |
1228 | 1228 | ||
1229 | ixgb_write_phy_reg(hw, | 1229 | ixgb_write_phy_reg(hw, |
1230 | MDIO_PMA_PMD_CR1, | 1230 | MDIO_PMA_PMD_CR1, |
diff --git a/drivers/net/ixgb/ixgb_hw.h b/drivers/net/ixgb/ixgb_hw.h index d4e95665ce9e..39cfa47bea69 100644 --- a/drivers/net/ixgb/ixgb_hw.h +++ b/drivers/net/ixgb/ixgb_hw.h | |||
@@ -538,8 +538,8 @@ struct ixgb_rx_desc { | |||
538 | __le64 buff_addr; | 538 | __le64 buff_addr; |
539 | __le16 length; | 539 | __le16 length; |
540 | __le16 reserved; | 540 | __le16 reserved; |
541 | uint8_t status; | 541 | u8 status; |
542 | uint8_t errors; | 542 | u8 errors; |
543 | __le16 special; | 543 | __le16 special; |
544 | }; | 544 | }; |
545 | 545 | ||
@@ -570,8 +570,8 @@ struct ixgb_rx_desc { | |||
570 | struct ixgb_tx_desc { | 570 | struct ixgb_tx_desc { |
571 | __le64 buff_addr; | 571 | __le64 buff_addr; |
572 | __le32 cmd_type_len; | 572 | __le32 cmd_type_len; |
573 | uint8_t status; | 573 | u8 status; |
574 | uint8_t popts; | 574 | u8 popts; |
575 | __le16 vlan; | 575 | __le16 vlan; |
576 | }; | 576 | }; |
577 | 577 | ||
@@ -595,15 +595,15 @@ struct ixgb_tx_desc { | |||
595 | #define IXGB_TX_DESC_SPECIAL_PRI_SHIFT IXGB_RX_DESC_SPECIAL_PRI_SHIFT /* Priority is in upper 3 of 16 */ | 595 | #define IXGB_TX_DESC_SPECIAL_PRI_SHIFT IXGB_RX_DESC_SPECIAL_PRI_SHIFT /* Priority is in upper 3 of 16 */ |
596 | 596 | ||
597 | struct ixgb_context_desc { | 597 | struct ixgb_context_desc { |
598 | uint8_t ipcss; | 598 | u8 ipcss; |
599 | uint8_t ipcso; | 599 | u8 ipcso; |
600 | __le16 ipcse; | 600 | __le16 ipcse; |
601 | uint8_t tucss; | 601 | u8 tucss; |
602 | uint8_t tucso; | 602 | u8 tucso; |
603 | __le16 tucse; | 603 | __le16 tucse; |
604 | __le32 cmd_type_len; | 604 | __le32 cmd_type_len; |
605 | uint8_t status; | 605 | u8 status; |
606 | uint8_t hdr_len; | 606 | u8 hdr_len; |
607 | __le16 mss; | 607 | __le16 mss; |
608 | }; | 608 | }; |
609 | 609 | ||
@@ -637,32 +637,32 @@ struct ixgb_context_desc { | |||
637 | 637 | ||
638 | /* This structure takes a 64k flash and maps it for identification commands */ | 638 | /* This structure takes a 64k flash and maps it for identification commands */ |
639 | struct ixgb_flash_buffer { | 639 | struct ixgb_flash_buffer { |
640 | uint8_t manufacturer_id; | 640 | u8 manufacturer_id; |
641 | uint8_t device_id; | 641 | u8 device_id; |
642 | uint8_t filler1[0x2AA8]; | 642 | u8 filler1[0x2AA8]; |
643 | uint8_t cmd2; | 643 | u8 cmd2; |
644 | uint8_t filler2[0x2AAA]; | 644 | u8 filler2[0x2AAA]; |
645 | uint8_t cmd1; | 645 | u8 cmd1; |
646 | uint8_t filler3[0xAAAA]; | 646 | u8 filler3[0xAAAA]; |
647 | }; | 647 | }; |
648 | 648 | ||
649 | /* | 649 | /* |
650 | * This is a little-endian specific check. | 650 | * This is a little-endian specific check. |
651 | */ | 651 | */ |
652 | #define IS_MULTICAST(Address) \ | 652 | #define IS_MULTICAST(Address) \ |
653 | (bool)(((uint8_t *)(Address))[0] & ((uint8_t)0x01)) | 653 | (bool)(((u8 *)(Address))[0] & ((u8)0x01)) |
654 | 654 | ||
655 | /* | 655 | /* |
656 | * Check whether an address is broadcast. | 656 | * Check whether an address is broadcast. |
657 | */ | 657 | */ |
658 | #define IS_BROADCAST(Address) \ | 658 | #define IS_BROADCAST(Address) \ |
659 | ((((uint8_t *)(Address))[0] == ((uint8_t)0xff)) && (((uint8_t *)(Address))[1] == ((uint8_t)0xff))) | 659 | ((((u8 *)(Address))[0] == ((u8)0xff)) && (((u8 *)(Address))[1] == ((u8)0xff))) |
660 | 660 | ||
661 | /* Flow control parameters */ | 661 | /* Flow control parameters */ |
662 | struct ixgb_fc { | 662 | struct ixgb_fc { |
663 | uint32_t high_water; /* Flow Control High-water */ | 663 | u32 high_water; /* Flow Control High-water */ |
664 | uint32_t low_water; /* Flow Control Low-water */ | 664 | u32 low_water; /* Flow Control Low-water */ |
665 | uint16_t pause_time; /* Flow Control Pause timer */ | 665 | u16 pause_time; /* Flow Control Pause timer */ |
666 | bool send_xon; /* Flow control send XON */ | 666 | bool send_xon; /* Flow control send XON */ |
667 | ixgb_fc_type type; /* Type of flow control */ | 667 | ixgb_fc_type type; /* Type of flow control */ |
668 | }; | 668 | }; |
@@ -685,101 +685,101 @@ struct ixgb_bus { | |||
685 | }; | 685 | }; |
686 | 686 | ||
687 | struct ixgb_hw { | 687 | struct ixgb_hw { |
688 | uint8_t __iomem *hw_addr;/* Base Address of the hardware */ | 688 | u8 __iomem *hw_addr;/* Base Address of the hardware */ |
689 | void *back; /* Pointer to OS-dependent struct */ | 689 | void *back; /* Pointer to OS-dependent struct */ |
690 | struct ixgb_fc fc; /* Flow control parameters */ | 690 | struct ixgb_fc fc; /* Flow control parameters */ |
691 | struct ixgb_bus bus; /* Bus parameters */ | 691 | struct ixgb_bus bus; /* Bus parameters */ |
692 | uint32_t phy_id; /* Phy Identifier */ | 692 | u32 phy_id; /* Phy Identifier */ |
693 | uint32_t phy_addr; /* XGMII address of Phy */ | 693 | u32 phy_addr; /* XGMII address of Phy */ |
694 | ixgb_mac_type mac_type; /* Identifier for MAC controller */ | 694 | ixgb_mac_type mac_type; /* Identifier for MAC controller */ |
695 | ixgb_phy_type phy_type; /* Transceiver/phy identifier */ | 695 | ixgb_phy_type phy_type; /* Transceiver/phy identifier */ |
696 | uint32_t max_frame_size; /* Maximum frame size supported */ | 696 | u32 max_frame_size; /* Maximum frame size supported */ |
697 | uint32_t mc_filter_type; /* Multicast filter hash type */ | 697 | u32 mc_filter_type; /* Multicast filter hash type */ |
698 | uint32_t num_mc_addrs; /* Number of current Multicast addrs */ | 698 | u32 num_mc_addrs; /* Number of current Multicast addrs */ |
699 | uint8_t curr_mac_addr[IXGB_ETH_LENGTH_OF_ADDRESS]; /* Individual address currently programmed in MAC */ | 699 | u8 curr_mac_addr[IXGB_ETH_LENGTH_OF_ADDRESS]; /* Individual address currently programmed in MAC */ |
700 | uint32_t num_tx_desc; /* Number of Transmit descriptors */ | 700 | u32 num_tx_desc; /* Number of Transmit descriptors */ |
701 | uint32_t num_rx_desc; /* Number of Receive descriptors */ | 701 | u32 num_rx_desc; /* Number of Receive descriptors */ |
702 | uint32_t rx_buffer_size; /* Size of Receive buffer */ | 702 | u32 rx_buffer_size; /* Size of Receive buffer */ |
703 | bool link_up; /* true if link is valid */ | 703 | bool link_up; /* true if link is valid */ |
704 | bool adapter_stopped; /* State of adapter */ | 704 | bool adapter_stopped; /* State of adapter */ |
705 | uint16_t device_id; /* device id from PCI configuration space */ | 705 | u16 device_id; /* device id from PCI configuration space */ |
706 | uint16_t vendor_id; /* vendor id from PCI configuration space */ | 706 | u16 vendor_id; /* vendor id from PCI configuration space */ |
707 | uint8_t revision_id; /* revision id from PCI configuration space */ | 707 | u8 revision_id; /* revision id from PCI configuration space */ |
708 | uint16_t subsystem_vendor_id; /* subsystem vendor id from PCI configuration space */ | 708 | u16 subsystem_vendor_id; /* subsystem vendor id from PCI configuration space */ |
709 | uint16_t subsystem_id; /* subsystem id from PCI configuration space */ | 709 | u16 subsystem_id; /* subsystem id from PCI configuration space */ |
710 | uint32_t bar0; /* Base Address registers */ | 710 | u32 bar0; /* Base Address registers */ |
711 | uint32_t bar1; | 711 | u32 bar1; |
712 | uint32_t bar2; | 712 | u32 bar2; |
713 | uint32_t bar3; | 713 | u32 bar3; |
714 | uint16_t pci_cmd_word; /* PCI command register id from PCI configuration space */ | 714 | u16 pci_cmd_word; /* PCI command register id from PCI configuration space */ |
715 | __le16 eeprom[IXGB_EEPROM_SIZE]; /* EEPROM contents read at init time */ | 715 | __le16 eeprom[IXGB_EEPROM_SIZE]; /* EEPROM contents read at init time */ |
716 | unsigned long io_base; /* Our I/O mapped location */ | 716 | unsigned long io_base; /* Our I/O mapped location */ |
717 | uint32_t lastLFC; | 717 | u32 lastLFC; |
718 | uint32_t lastRFC; | 718 | u32 lastRFC; |
719 | }; | 719 | }; |
720 | 720 | ||
721 | /* Statistics reported by the hardware */ | 721 | /* Statistics reported by the hardware */ |
722 | struct ixgb_hw_stats { | 722 | struct ixgb_hw_stats { |
723 | uint64_t tprl; | 723 | u64 tprl; |
724 | uint64_t tprh; | 724 | u64 tprh; |
725 | uint64_t gprcl; | 725 | u64 gprcl; |
726 | uint64_t gprch; | 726 | u64 gprch; |
727 | uint64_t bprcl; | 727 | u64 bprcl; |
728 | uint64_t bprch; | 728 | u64 bprch; |
729 | uint64_t mprcl; | 729 | u64 mprcl; |
730 | uint64_t mprch; | 730 | u64 mprch; |
731 | uint64_t uprcl; | 731 | u64 uprcl; |
732 | uint64_t uprch; | 732 | u64 uprch; |
733 | uint64_t vprcl; | 733 | u64 vprcl; |
734 | uint64_t vprch; | 734 | u64 vprch; |
735 | uint64_t jprcl; | 735 | u64 jprcl; |
736 | uint64_t jprch; | 736 | u64 jprch; |
737 | uint64_t gorcl; | 737 | u64 gorcl; |
738 | uint64_t gorch; | 738 | u64 gorch; |
739 | uint64_t torl; | 739 | u64 torl; |
740 | uint64_t torh; | 740 | u64 torh; |
741 | uint64_t rnbc; | 741 | u64 rnbc; |
742 | uint64_t ruc; | 742 | u64 ruc; |
743 | uint64_t roc; | 743 | u64 roc; |
744 | uint64_t rlec; | 744 | u64 rlec; |
745 | uint64_t crcerrs; | 745 | u64 crcerrs; |
746 | uint64_t icbc; | 746 | u64 icbc; |
747 | uint64_t ecbc; | 747 | u64 ecbc; |
748 | uint64_t mpc; | 748 | u64 mpc; |
749 | uint64_t tptl; | 749 | u64 tptl; |
750 | uint64_t tpth; | 750 | u64 tpth; |
751 | uint64_t gptcl; | 751 | u64 gptcl; |
752 | uint64_t gptch; | 752 | u64 gptch; |
753 | uint64_t bptcl; | 753 | u64 bptcl; |
754 | uint64_t bptch; | 754 | u64 bptch; |
755 | uint64_t mptcl; | 755 | u64 mptcl; |
756 | uint64_t mptch; | 756 | u64 mptch; |
757 | uint64_t uptcl; | 757 | u64 uptcl; |
758 | uint64_t uptch; | 758 | u64 uptch; |
759 | uint64_t vptcl; | 759 | u64 vptcl; |
760 | uint64_t vptch; | 760 | u64 vptch; |
761 | uint64_t jptcl; | 761 | u64 jptcl; |
762 | uint64_t jptch; | 762 | u64 jptch; |
763 | uint64_t gotcl; | 763 | u64 gotcl; |
764 | uint64_t gotch; | 764 | u64 gotch; |
765 | uint64_t totl; | 765 | u64 totl; |
766 | uint64_t toth; | 766 | u64 toth; |
767 | uint64_t dc; | 767 | u64 dc; |
768 | uint64_t plt64c; | 768 | u64 plt64c; |
769 | uint64_t tsctc; | 769 | u64 tsctc; |
770 | uint64_t tsctfc; | 770 | u64 tsctfc; |
771 | uint64_t ibic; | 771 | u64 ibic; |
772 | uint64_t rfc; | 772 | u64 rfc; |
773 | uint64_t lfc; | 773 | u64 lfc; |
774 | uint64_t pfrc; | 774 | u64 pfrc; |
775 | uint64_t pftc; | 775 | u64 pftc; |
776 | uint64_t mcfrc; | 776 | u64 mcfrc; |
777 | uint64_t mcftc; | 777 | u64 mcftc; |
778 | uint64_t xonrxc; | 778 | u64 xonrxc; |
779 | uint64_t xontxc; | 779 | u64 xontxc; |
780 | uint64_t xoffrxc; | 780 | u64 xoffrxc; |
781 | uint64_t xofftxc; | 781 | u64 xofftxc; |
782 | uint64_t rjc; | 782 | u64 rjc; |
783 | }; | 783 | }; |
784 | 784 | ||
785 | /* Function Prototypes */ | 785 | /* Function Prototypes */ |
@@ -790,34 +790,34 @@ extern void ixgb_check_for_link(struct ixgb_hw *hw); | |||
790 | extern bool ixgb_check_for_bad_link(struct ixgb_hw *hw); | 790 | extern bool ixgb_check_for_bad_link(struct ixgb_hw *hw); |
791 | 791 | ||
792 | extern void ixgb_rar_set(struct ixgb_hw *hw, | 792 | extern void ixgb_rar_set(struct ixgb_hw *hw, |
793 | uint8_t *addr, | 793 | u8 *addr, |
794 | uint32_t index); | 794 | u32 index); |
795 | 795 | ||
796 | 796 | ||
797 | /* Filters (multicast, vlan, receive) */ | 797 | /* Filters (multicast, vlan, receive) */ |
798 | extern void ixgb_mc_addr_list_update(struct ixgb_hw *hw, | 798 | extern void ixgb_mc_addr_list_update(struct ixgb_hw *hw, |
799 | uint8_t *mc_addr_list, | 799 | u8 *mc_addr_list, |
800 | uint32_t mc_addr_count, | 800 | u32 mc_addr_count, |
801 | uint32_t pad); | 801 | u32 pad); |
802 | 802 | ||
803 | /* Vfta functions */ | 803 | /* Vfta functions */ |
804 | extern void ixgb_write_vfta(struct ixgb_hw *hw, | 804 | extern void ixgb_write_vfta(struct ixgb_hw *hw, |
805 | uint32_t offset, | 805 | u32 offset, |
806 | uint32_t value); | 806 | u32 value); |
807 | 807 | ||
808 | /* Access functions to eeprom data */ | 808 | /* Access functions to eeprom data */ |
809 | void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, uint8_t *mac_addr); | 809 | void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, u8 *mac_addr); |
810 | uint32_t ixgb_get_ee_pba_number(struct ixgb_hw *hw); | 810 | u32 ixgb_get_ee_pba_number(struct ixgb_hw *hw); |
811 | uint16_t ixgb_get_ee_device_id(struct ixgb_hw *hw); | 811 | u16 ixgb_get_ee_device_id(struct ixgb_hw *hw); |
812 | bool ixgb_get_eeprom_data(struct ixgb_hw *hw); | 812 | bool ixgb_get_eeprom_data(struct ixgb_hw *hw); |
813 | __le16 ixgb_get_eeprom_word(struct ixgb_hw *hw, uint16_t index); | 813 | __le16 ixgb_get_eeprom_word(struct ixgb_hw *hw, u16 index); |
814 | 814 | ||
815 | /* Everything else */ | 815 | /* Everything else */ |
816 | void ixgb_led_on(struct ixgb_hw *hw); | 816 | void ixgb_led_on(struct ixgb_hw *hw); |
817 | void ixgb_led_off(struct ixgb_hw *hw); | 817 | void ixgb_led_off(struct ixgb_hw *hw); |
818 | void ixgb_write_pci_cfg(struct ixgb_hw *hw, | 818 | void ixgb_write_pci_cfg(struct ixgb_hw *hw, |
819 | uint32_t reg, | 819 | u32 reg, |
820 | uint16_t * value); | 820 | u16 * value); |
821 | 821 | ||
822 | 822 | ||
823 | #endif /* _IXGB_HW_H_ */ | 823 | #endif /* _IXGB_HW_H_ */ |
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index c68b182af008..cb8daddafa29 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c | |||
@@ -108,8 +108,8 @@ static void ixgb_tx_timeout(struct net_device *dev); | |||
108 | static void ixgb_tx_timeout_task(struct work_struct *work); | 108 | static void ixgb_tx_timeout_task(struct work_struct *work); |
109 | static void ixgb_vlan_rx_register(struct net_device *netdev, | 109 | static void ixgb_vlan_rx_register(struct net_device *netdev, |
110 | struct vlan_group *grp); | 110 | struct vlan_group *grp); |
111 | static void ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid); | 111 | static void ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid); |
112 | static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid); | 112 | static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); |
113 | static void ixgb_restore_vlan(struct ixgb_adapter *adapter); | 113 | static void ixgb_restore_vlan(struct ixgb_adapter *adapter); |
114 | 114 | ||
115 | #ifdef CONFIG_NET_POLL_CONTROLLER | 115 | #ifdef CONFIG_NET_POLL_CONTROLLER |
@@ -271,7 +271,7 @@ ixgb_up(struct ixgb_adapter *adapter) | |||
271 | 271 | ||
272 | if(hw->max_frame_size > | 272 | if(hw->max_frame_size > |
273 | IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) { | 273 | IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) { |
274 | uint32_t ctrl0 = IXGB_READ_REG(hw, CTRL0); | 274 | u32 ctrl0 = IXGB_READ_REG(hw, CTRL0); |
275 | 275 | ||
276 | if(!(ctrl0 & IXGB_CTRL0_JFE)) { | 276 | if(!(ctrl0 & IXGB_CTRL0_JFE)) { |
277 | ctrl0 |= IXGB_CTRL0_JFE; | 277 | ctrl0 |= IXGB_CTRL0_JFE; |
@@ -718,9 +718,9 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter) | |||
718 | static void | 718 | static void |
719 | ixgb_configure_tx(struct ixgb_adapter *adapter) | 719 | ixgb_configure_tx(struct ixgb_adapter *adapter) |
720 | { | 720 | { |
721 | uint64_t tdba = adapter->tx_ring.dma; | 721 | u64 tdba = adapter->tx_ring.dma; |
722 | uint32_t tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc); | 722 | u32 tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc); |
723 | uint32_t tctl; | 723 | u32 tctl; |
724 | struct ixgb_hw *hw = &adapter->hw; | 724 | struct ixgb_hw *hw = &adapter->hw; |
725 | 725 | ||
726 | /* Setup the Base and Length of the Tx Descriptor Ring | 726 | /* Setup the Base and Length of the Tx Descriptor Ring |
@@ -806,7 +806,7 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter) | |||
806 | static void | 806 | static void |
807 | ixgb_setup_rctl(struct ixgb_adapter *adapter) | 807 | ixgb_setup_rctl(struct ixgb_adapter *adapter) |
808 | { | 808 | { |
809 | uint32_t rctl; | 809 | u32 rctl; |
810 | 810 | ||
811 | rctl = IXGB_READ_REG(&adapter->hw, RCTL); | 811 | rctl = IXGB_READ_REG(&adapter->hw, RCTL); |
812 | 812 | ||
@@ -841,12 +841,12 @@ ixgb_setup_rctl(struct ixgb_adapter *adapter) | |||
841 | static void | 841 | static void |
842 | ixgb_configure_rx(struct ixgb_adapter *adapter) | 842 | ixgb_configure_rx(struct ixgb_adapter *adapter) |
843 | { | 843 | { |
844 | uint64_t rdba = adapter->rx_ring.dma; | 844 | u64 rdba = adapter->rx_ring.dma; |
845 | uint32_t rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc); | 845 | u32 rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc); |
846 | struct ixgb_hw *hw = &adapter->hw; | 846 | struct ixgb_hw *hw = &adapter->hw; |
847 | uint32_t rctl; | 847 | u32 rctl; |
848 | uint32_t rxcsum; | 848 | u32 rxcsum; |
849 | uint32_t rxdctl; | 849 | u32 rxdctl; |
850 | 850 | ||
851 | /* make sure receives are disabled while setting up the descriptors */ | 851 | /* make sure receives are disabled while setting up the descriptors */ |
852 | 852 | ||
@@ -1079,7 +1079,7 @@ ixgb_set_multi(struct net_device *netdev) | |||
1079 | struct ixgb_adapter *adapter = netdev_priv(netdev); | 1079 | struct ixgb_adapter *adapter = netdev_priv(netdev); |
1080 | struct ixgb_hw *hw = &adapter->hw; | 1080 | struct ixgb_hw *hw = &adapter->hw; |
1081 | struct dev_mc_list *mc_ptr; | 1081 | struct dev_mc_list *mc_ptr; |
1082 | uint32_t rctl; | 1082 | u32 rctl; |
1083 | int i; | 1083 | int i; |
1084 | 1084 | ||
1085 | /* Check for Promiscuous and All Multicast modes */ | 1085 | /* Check for Promiscuous and All Multicast modes */ |
@@ -1099,7 +1099,7 @@ ixgb_set_multi(struct net_device *netdev) | |||
1099 | rctl |= IXGB_RCTL_MPE; | 1099 | rctl |= IXGB_RCTL_MPE; |
1100 | IXGB_WRITE_REG(hw, RCTL, rctl); | 1100 | IXGB_WRITE_REG(hw, RCTL, rctl); |
1101 | } else { | 1101 | } else { |
1102 | uint8_t mta[IXGB_MAX_NUM_MULTICAST_ADDRESSES * | 1102 | u8 mta[IXGB_MAX_NUM_MULTICAST_ADDRESSES * |
1103 | IXGB_ETH_LENGTH_OF_ADDRESS]; | 1103 | IXGB_ETH_LENGTH_OF_ADDRESS]; |
1104 | 1104 | ||
1105 | IXGB_WRITE_REG(hw, RCTL, rctl); | 1105 | IXGB_WRITE_REG(hw, RCTL, rctl); |
@@ -1183,8 +1183,8 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb) | |||
1183 | { | 1183 | { |
1184 | struct ixgb_context_desc *context_desc; | 1184 | struct ixgb_context_desc *context_desc; |
1185 | unsigned int i; | 1185 | unsigned int i; |
1186 | uint8_t ipcss, ipcso, tucss, tucso, hdr_len; | 1186 | u8 ipcss, ipcso, tucss, tucso, hdr_len; |
1187 | uint16_t ipcse, tucse, mss; | 1187 | u16 ipcse, tucse, mss; |
1188 | int err; | 1188 | int err; |
1189 | 1189 | ||
1190 | if (likely(skb_is_gso(skb))) { | 1190 | if (likely(skb_is_gso(skb))) { |
@@ -1249,7 +1249,7 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb) | |||
1249 | { | 1249 | { |
1250 | struct ixgb_context_desc *context_desc; | 1250 | struct ixgb_context_desc *context_desc; |
1251 | unsigned int i; | 1251 | unsigned int i; |
1252 | uint8_t css, cso; | 1252 | u8 css, cso; |
1253 | 1253 | ||
1254 | if(likely(skb->ip_summed == CHECKSUM_PARTIAL)) { | 1254 | if(likely(skb->ip_summed == CHECKSUM_PARTIAL)) { |
1255 | struct ixgb_buffer *buffer_info; | 1255 | struct ixgb_buffer *buffer_info; |
@@ -1265,7 +1265,7 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb) | |||
1265 | context_desc->tucso = cso; | 1265 | context_desc->tucso = cso; |
1266 | context_desc->tucse = 0; | 1266 | context_desc->tucse = 0; |
1267 | /* zero out any previously existing data in one instruction */ | 1267 | /* zero out any previously existing data in one instruction */ |
1268 | *(uint32_t *)&(context_desc->ipcss) = 0; | 1268 | *(u32 *)&(context_desc->ipcss) = 0; |
1269 | context_desc->status = 0; | 1269 | context_desc->status = 0; |
1270 | context_desc->hdr_len = 0; | 1270 | context_desc->hdr_len = 0; |
1271 | context_desc->mss = 0; | 1271 | context_desc->mss = 0; |
@@ -1372,9 +1372,9 @@ ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags) | |||
1372 | struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; | 1372 | struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; |
1373 | struct ixgb_tx_desc *tx_desc = NULL; | 1373 | struct ixgb_tx_desc *tx_desc = NULL; |
1374 | struct ixgb_buffer *buffer_info; | 1374 | struct ixgb_buffer *buffer_info; |
1375 | uint32_t cmd_type_len = adapter->tx_cmd_type; | 1375 | u32 cmd_type_len = adapter->tx_cmd_type; |
1376 | uint8_t status = 0; | 1376 | u8 status = 0; |
1377 | uint8_t popts = 0; | 1377 | u8 popts = 0; |
1378 | unsigned int i; | 1378 | unsigned int i; |
1379 | 1379 | ||
1380 | if(tx_flags & IXGB_TX_FLAGS_TSO) { | 1380 | if(tx_flags & IXGB_TX_FLAGS_TSO) { |
@@ -1750,7 +1750,7 @@ ixgb_intr(int irq, void *data) | |||
1750 | struct net_device *netdev = data; | 1750 | struct net_device *netdev = data; |
1751 | struct ixgb_adapter *adapter = netdev_priv(netdev); | 1751 | struct ixgb_adapter *adapter = netdev_priv(netdev); |
1752 | struct ixgb_hw *hw = &adapter->hw; | 1752 | struct ixgb_hw *hw = &adapter->hw; |
1753 | uint32_t icr = IXGB_READ_REG(hw, ICR); | 1753 | u32 icr = IXGB_READ_REG(hw, ICR); |
1754 | #ifndef CONFIG_IXGB_NAPI | 1754 | #ifndef CONFIG_IXGB_NAPI |
1755 | unsigned int i; | 1755 | unsigned int i; |
1756 | #endif | 1756 | #endif |
@@ -1843,7 +1843,7 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter) | |||
1843 | 1843 | ||
1844 | ixgb_unmap_and_free_tx_resource(adapter, buffer_info); | 1844 | ixgb_unmap_and_free_tx_resource(adapter, buffer_info); |
1845 | 1845 | ||
1846 | *(uint32_t *)&(tx_desc->status) = 0; | 1846 | *(u32 *)&(tx_desc->status) = 0; |
1847 | 1847 | ||
1848 | cleaned = (i == eop); | 1848 | cleaned = (i == eop); |
1849 | if(++i == tx_ring->count) i = 0; | 1849 | if(++i == tx_ring->count) i = 0; |
@@ -1948,7 +1948,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter) | |||
1948 | struct pci_dev *pdev = adapter->pdev; | 1948 | struct pci_dev *pdev = adapter->pdev; |
1949 | struct ixgb_rx_desc *rx_desc, *next_rxd; | 1949 | struct ixgb_rx_desc *rx_desc, *next_rxd; |
1950 | struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer; | 1950 | struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer; |
1951 | uint32_t length; | 1951 | u32 length; |
1952 | unsigned int i, j; | 1952 | unsigned int i, j; |
1953 | bool cleaned = false; | 1953 | bool cleaned = false; |
1954 | 1954 | ||
@@ -2166,7 +2166,7 @@ static void | |||
2166 | ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) | 2166 | ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) |
2167 | { | 2167 | { |
2168 | struct ixgb_adapter *adapter = netdev_priv(netdev); | 2168 | struct ixgb_adapter *adapter = netdev_priv(netdev); |
2169 | uint32_t ctrl, rctl; | 2169 | u32 ctrl, rctl; |
2170 | 2170 | ||
2171 | ixgb_irq_disable(adapter); | 2171 | ixgb_irq_disable(adapter); |
2172 | adapter->vlgrp = grp; | 2172 | adapter->vlgrp = grp; |
@@ -2203,10 +2203,10 @@ ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) | |||
2203 | } | 2203 | } |
2204 | 2204 | ||
2205 | static void | 2205 | static void |
2206 | ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid) | 2206 | ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid) |
2207 | { | 2207 | { |
2208 | struct ixgb_adapter *adapter = netdev_priv(netdev); | 2208 | struct ixgb_adapter *adapter = netdev_priv(netdev); |
2209 | uint32_t vfta, index; | 2209 | u32 vfta, index; |
2210 | 2210 | ||
2211 | /* add VID to filter table */ | 2211 | /* add VID to filter table */ |
2212 | 2212 | ||
@@ -2217,10 +2217,10 @@ ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid) | |||
2217 | } | 2217 | } |
2218 | 2218 | ||
2219 | static void | 2219 | static void |
2220 | ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid) | 2220 | ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) |
2221 | { | 2221 | { |
2222 | struct ixgb_adapter *adapter = netdev_priv(netdev); | 2222 | struct ixgb_adapter *adapter = netdev_priv(netdev); |
2223 | uint32_t vfta, index; | 2223 | u32 vfta, index; |
2224 | 2224 | ||
2225 | ixgb_irq_disable(adapter); | 2225 | ixgb_irq_disable(adapter); |
2226 | 2226 | ||
@@ -2244,7 +2244,7 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter) | |||
2244 | ixgb_vlan_rx_register(adapter->netdev, adapter->vlgrp); | 2244 | ixgb_vlan_rx_register(adapter->netdev, adapter->vlgrp); |
2245 | 2245 | ||
2246 | if(adapter->vlgrp) { | 2246 | if(adapter->vlgrp) { |
2247 | uint16_t vid; | 2247 | u16 vid; |
2248 | for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { | 2248 | for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { |
2249 | if(!vlan_group_get_device(adapter->vlgrp, vid)) | 2249 | if(!vlan_group_get_device(adapter->vlgrp, vid)) |
2250 | continue; | 2250 | continue; |
diff --git a/drivers/net/macb.c b/drivers/net/macb.c index 489c7c3b90d9..d513bb8a4902 100644 --- a/drivers/net/macb.c +++ b/drivers/net/macb.c | |||
@@ -246,7 +246,7 @@ static int macb_mii_init(struct macb *bp) | |||
246 | bp->mii_bus.read = &macb_mdio_read; | 246 | bp->mii_bus.read = &macb_mdio_read; |
247 | bp->mii_bus.write = &macb_mdio_write; | 247 | bp->mii_bus.write = &macb_mdio_write; |
248 | bp->mii_bus.reset = &macb_mdio_reset; | 248 | bp->mii_bus.reset = &macb_mdio_reset; |
249 | bp->mii_bus.id = bp->pdev->id; | 249 | snprintf(bp->mii_bus.id, MII_BUS_ID_SIZE, "%x", bp->pdev->id); |
250 | bp->mii_bus.priv = bp; | 250 | bp->mii_bus.priv = bp; |
251 | bp->mii_bus.dev = &bp->dev->dev; | 251 | bp->mii_bus.dev = &bp->dev->dev; |
252 | pdata = bp->pdev->dev.platform_data; | 252 | pdata = bp->pdev->dev.platform_data; |
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index d65cadef4d22..601ffd69ebc8 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c | |||
@@ -3,7 +3,8 @@ | |||
3 | * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com> | 3 | * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com> |
4 | * | 4 | * |
5 | * Based on the 64360 driver from: | 5 | * Based on the 64360 driver from: |
6 | * Copyright (C) 2002 rabeeh@galileo.co.il | 6 | * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il> |
7 | * Rabeeh Khoury <rabeeh@marvell.com> | ||
7 | * | 8 | * |
8 | * Copyright (C) 2003 PMC-Sierra, Inc., | 9 | * Copyright (C) 2003 PMC-Sierra, Inc., |
9 | * written by Manish Lachwani | 10 | * written by Manish Lachwani |
@@ -16,6 +17,9 @@ | |||
16 | * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com> | 17 | * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com> |
17 | * <sjhill@realitydiluted.com> | 18 | * <sjhill@realitydiluted.com> |
18 | * | 19 | * |
20 | * Copyright (C) 2007-2008 Marvell Semiconductor | ||
21 | * Lennert Buytenhek <buytenh@marvell.com> | ||
22 | * | ||
19 | * This program is free software; you can redistribute it and/or | 23 | * This program is free software; you can redistribute it and/or |
20 | * modify it under the terms of the GNU General Public License | 24 | * modify it under the terms of the GNU General Public License |
21 | * as published by the Free Software Foundation; either version 2 | 25 | * as published by the Free Software Foundation; either version 2 |
@@ -63,20 +67,6 @@ | |||
63 | #define MV643XX_TX_FAST_REFILL | 67 | #define MV643XX_TX_FAST_REFILL |
64 | #undef MV643XX_COAL | 68 | #undef MV643XX_COAL |
65 | 69 | ||
66 | /* | ||
67 | * Number of RX / TX descriptors on RX / TX rings. | ||
68 | * Note that allocating RX descriptors is done by allocating the RX | ||
69 | * ring AND a preallocated RX buffers (skb's) for each descriptor. | ||
70 | * The TX descriptors only allocates the TX descriptors ring, | ||
71 | * with no pre allocated TX buffers (skb's are allocated by higher layers. | ||
72 | */ | ||
73 | |||
74 | /* Default TX ring size is 1000 descriptors */ | ||
75 | #define MV643XX_DEFAULT_TX_QUEUE_SIZE 1000 | ||
76 | |||
77 | /* Default RX ring size is 400 descriptors */ | ||
78 | #define MV643XX_DEFAULT_RX_QUEUE_SIZE 400 | ||
79 | |||
80 | #define MV643XX_TX_COAL 100 | 70 | #define MV643XX_TX_COAL 100 |
81 | #ifdef MV643XX_COAL | 71 | #ifdef MV643XX_COAL |
82 | #define MV643XX_RX_COAL 100 | 72 | #define MV643XX_RX_COAL 100 |
@@ -434,14 +424,6 @@ typedef enum _eth_func_ret_status { | |||
434 | ETH_QUEUE_LAST_RESOURCE /* Ring resources about to exhaust. */ | 424 | ETH_QUEUE_LAST_RESOURCE /* Ring resources about to exhaust. */ |
435 | } ETH_FUNC_RET_STATUS; | 425 | } ETH_FUNC_RET_STATUS; |
436 | 426 | ||
437 | typedef enum _eth_target { | ||
438 | ETH_TARGET_DRAM, | ||
439 | ETH_TARGET_DEVICE, | ||
440 | ETH_TARGET_CBS, | ||
441 | ETH_TARGET_PCI0, | ||
442 | ETH_TARGET_PCI1 | ||
443 | } ETH_TARGET; | ||
444 | |||
445 | /* These are for big-endian machines. Little endian needs different | 427 | /* These are for big-endian machines. Little endian needs different |
446 | * definitions. | 428 | * definitions. |
447 | */ | 429 | */ |
@@ -586,43 +568,44 @@ struct mv643xx_private { | |||
586 | 568 | ||
587 | /* Static function declarations */ | 569 | /* Static function declarations */ |
588 | static void eth_port_init(struct mv643xx_private *mp); | 570 | static void eth_port_init(struct mv643xx_private *mp); |
589 | static void eth_port_reset(unsigned int eth_port_num); | 571 | static void eth_port_reset(struct mv643xx_private *mp); |
590 | static void eth_port_start(struct net_device *dev); | 572 | static void eth_port_start(struct net_device *dev); |
591 | 573 | ||
592 | static void ethernet_phy_reset(unsigned int eth_port_num); | 574 | static void ethernet_phy_reset(struct mv643xx_private *mp); |
593 | 575 | ||
594 | static void eth_port_write_smi_reg(unsigned int eth_port_num, | 576 | static void eth_port_write_smi_reg(struct mv643xx_private *mp, |
595 | unsigned int phy_reg, unsigned int value); | 577 | unsigned int phy_reg, unsigned int value); |
596 | 578 | ||
597 | static void eth_port_read_smi_reg(unsigned int eth_port_num, | 579 | static void eth_port_read_smi_reg(struct mv643xx_private *mp, |
598 | unsigned int phy_reg, unsigned int *value); | 580 | unsigned int phy_reg, unsigned int *value); |
599 | 581 | ||
600 | static void eth_clear_mib_counters(unsigned int eth_port_num); | 582 | static void eth_clear_mib_counters(struct mv643xx_private *mp); |
601 | 583 | ||
602 | static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp, | 584 | static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp, |
603 | struct pkt_info *p_pkt_info); | 585 | struct pkt_info *p_pkt_info); |
604 | static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp, | 586 | static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp, |
605 | struct pkt_info *p_pkt_info); | 587 | struct pkt_info *p_pkt_info); |
606 | 588 | ||
607 | static void eth_port_uc_addr_get(unsigned int port_num, unsigned char *p_addr); | 589 | static void eth_port_uc_addr_get(struct mv643xx_private *mp, |
608 | static void eth_port_uc_addr_set(unsigned int port_num, unsigned char *p_addr); | 590 | unsigned char *p_addr); |
591 | static void eth_port_uc_addr_set(struct mv643xx_private *mp, | ||
592 | unsigned char *p_addr); | ||
609 | static void eth_port_set_multicast_list(struct net_device *); | 593 | static void eth_port_set_multicast_list(struct net_device *); |
610 | static void mv643xx_eth_port_enable_tx(unsigned int port_num, | 594 | static void mv643xx_eth_port_enable_tx(struct mv643xx_private *mp, |
611 | unsigned int queues); | 595 | unsigned int queues); |
612 | static void mv643xx_eth_port_enable_rx(unsigned int port_num, | 596 | static void mv643xx_eth_port_enable_rx(struct mv643xx_private *mp, |
613 | unsigned int queues); | 597 | unsigned int queues); |
614 | static unsigned int mv643xx_eth_port_disable_tx(unsigned int port_num); | 598 | static unsigned int mv643xx_eth_port_disable_tx(struct mv643xx_private *mp); |
615 | static unsigned int mv643xx_eth_port_disable_rx(unsigned int port_num); | 599 | static unsigned int mv643xx_eth_port_disable_rx(struct mv643xx_private *mp); |
616 | static int mv643xx_eth_open(struct net_device *); | 600 | static int mv643xx_eth_open(struct net_device *); |
617 | static int mv643xx_eth_stop(struct net_device *); | 601 | static int mv643xx_eth_stop(struct net_device *); |
618 | static int mv643xx_eth_change_mtu(struct net_device *, int); | 602 | static void eth_port_init_mac_tables(struct mv643xx_private *mp); |
619 | static void eth_port_init_mac_tables(unsigned int eth_port_num); | ||
620 | #ifdef MV643XX_NAPI | 603 | #ifdef MV643XX_NAPI |
621 | static int mv643xx_poll(struct napi_struct *napi, int budget); | 604 | static int mv643xx_poll(struct napi_struct *napi, int budget); |
622 | #endif | 605 | #endif |
623 | static int ethernet_phy_get(unsigned int eth_port_num); | 606 | static int ethernet_phy_get(struct mv643xx_private *mp); |
624 | static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr); | 607 | static void ethernet_phy_set(struct mv643xx_private *mp, int phy_addr); |
625 | static int ethernet_phy_detect(unsigned int eth_port_num); | 608 | static int ethernet_phy_detect(struct mv643xx_private *mp); |
626 | static int mv643xx_mdio_read(struct net_device *dev, int phy_id, int location); | 609 | static int mv643xx_mdio_read(struct net_device *dev, int phy_id, int location); |
627 | static void mv643xx_mdio_write(struct net_device *dev, int phy_id, int location, int val); | 610 | static void mv643xx_mdio_write(struct net_device *dev, int phy_id, int location, int val); |
628 | static int mv643xx_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); | 611 | static int mv643xx_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); |
@@ -636,12 +619,12 @@ static void __iomem *mv643xx_eth_base; | |||
636 | /* used to protect SMI_REG, which is shared across ports */ | 619 | /* used to protect SMI_REG, which is shared across ports */ |
637 | static DEFINE_SPINLOCK(mv643xx_eth_phy_lock); | 620 | static DEFINE_SPINLOCK(mv643xx_eth_phy_lock); |
638 | 621 | ||
639 | static inline u32 mv_read(int offset) | 622 | static inline u32 rdl(struct mv643xx_private *mp, int offset) |
640 | { | 623 | { |
641 | return readl(mv643xx_eth_base + offset); | 624 | return readl(mv643xx_eth_base + offset); |
642 | } | 625 | } |
643 | 626 | ||
644 | static inline void mv_write(int offset, u32 data) | 627 | static inline void wrl(struct mv643xx_private *mp, int offset, u32 data) |
645 | { | 628 | { |
646 | writel(data, mv643xx_eth_base + offset); | 629 | writel(data, mv643xx_eth_base + offset); |
647 | } | 630 | } |
@@ -659,18 +642,19 @@ static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu) | |||
659 | return -EINVAL; | 642 | return -EINVAL; |
660 | 643 | ||
661 | dev->mtu = new_mtu; | 644 | dev->mtu = new_mtu; |
645 | if (!netif_running(dev)) | ||
646 | return 0; | ||
647 | |||
662 | /* | 648 | /* |
663 | * Stop then re-open the interface. This will allocate RX skb's with | 649 | * Stop and then re-open the interface. This will allocate RX |
664 | * the new MTU. | 650 | * skbs of the new MTU. |
665 | * There is a possible danger that the open will not successed, due | 651 | * There is a possible danger that the open will not succeed, |
666 | * to memory is full, which might fail the open function. | 652 | * due to memory being full, which might fail the open function. |
667 | */ | 653 | */ |
668 | if (netif_running(dev)) { | 654 | mv643xx_eth_stop(dev); |
669 | mv643xx_eth_stop(dev); | 655 | if (mv643xx_eth_open(dev)) { |
670 | if (mv643xx_eth_open(dev)) | 656 | printk(KERN_ERR "%s: Fatal error on opening device\n", |
671 | printk(KERN_ERR | 657 | dev->name); |
672 | "%s: Fatal error on opening device\n", | ||
673 | dev->name); | ||
674 | } | 658 | } |
675 | 659 | ||
676 | return 0; | 660 | return 0; |
@@ -748,10 +732,9 @@ static inline void mv643xx_eth_rx_refill_descs_timer_wrapper(unsigned long data) | |||
748 | static void mv643xx_eth_update_mac_address(struct net_device *dev) | 732 | static void mv643xx_eth_update_mac_address(struct net_device *dev) |
749 | { | 733 | { |
750 | struct mv643xx_private *mp = netdev_priv(dev); | 734 | struct mv643xx_private *mp = netdev_priv(dev); |
751 | unsigned int port_num = mp->port_num; | ||
752 | 735 | ||
753 | eth_port_init_mac_tables(port_num); | 736 | eth_port_init_mac_tables(mp); |
754 | eth_port_uc_addr_set(port_num, dev->dev_addr); | 737 | eth_port_uc_addr_set(mp, dev->dev_addr); |
755 | } | 738 | } |
756 | 739 | ||
757 | /* | 740 | /* |
@@ -767,12 +750,12 @@ static void mv643xx_eth_set_rx_mode(struct net_device *dev) | |||
767 | struct mv643xx_private *mp = netdev_priv(dev); | 750 | struct mv643xx_private *mp = netdev_priv(dev); |
768 | u32 config_reg; | 751 | u32 config_reg; |
769 | 752 | ||
770 | config_reg = mv_read(PORT_CONFIG_REG(mp->port_num)); | 753 | config_reg = rdl(mp, PORT_CONFIG_REG(mp->port_num)); |
771 | if (dev->flags & IFF_PROMISC) | 754 | if (dev->flags & IFF_PROMISC) |
772 | config_reg |= (u32) UNICAST_PROMISCUOUS_MODE; | 755 | config_reg |= (u32) UNICAST_PROMISCUOUS_MODE; |
773 | else | 756 | else |
774 | config_reg &= ~(u32) UNICAST_PROMISCUOUS_MODE; | 757 | config_reg &= ~(u32) UNICAST_PROMISCUOUS_MODE; |
775 | mv_write(PORT_CONFIG_REG(mp->port_num), config_reg); | 758 | wrl(mp, PORT_CONFIG_REG(mp->port_num), config_reg); |
776 | 759 | ||
777 | eth_port_set_multicast_list(dev); | 760 | eth_port_set_multicast_list(dev); |
778 | } | 761 | } |
@@ -826,14 +809,14 @@ static void mv643xx_eth_tx_timeout_task(struct work_struct *ugly) | |||
826 | { | 809 | { |
827 | struct mv643xx_private *mp = container_of(ugly, struct mv643xx_private, | 810 | struct mv643xx_private *mp = container_of(ugly, struct mv643xx_private, |
828 | tx_timeout_task); | 811 | tx_timeout_task); |
829 | struct net_device *dev = mp->mii.dev; /* yuck */ | 812 | struct net_device *dev = mp->dev; |
830 | 813 | ||
831 | if (!netif_running(dev)) | 814 | if (!netif_running(dev)) |
832 | return; | 815 | return; |
833 | 816 | ||
834 | netif_stop_queue(dev); | 817 | netif_stop_queue(dev); |
835 | 818 | ||
836 | eth_port_reset(mp->port_num); | 819 | eth_port_reset(mp); |
837 | eth_port_start(dev); | 820 | eth_port_start(dev); |
838 | 821 | ||
839 | if (mp->tx_ring_size - mp->tx_desc_count >= MAX_DESCS_PER_SKB) | 822 | if (mp->tx_ring_size - mp->tx_desc_count >= MAX_DESCS_PER_SKB) |
@@ -845,7 +828,7 @@ static void mv643xx_eth_tx_timeout_task(struct work_struct *ugly) | |||
845 | * | 828 | * |
846 | * If force is non-zero, frees uncompleted descriptors as well | 829 | * If force is non-zero, frees uncompleted descriptors as well |
847 | */ | 830 | */ |
848 | int mv643xx_eth_free_tx_descs(struct net_device *dev, int force) | 831 | static int mv643xx_eth_free_tx_descs(struct net_device *dev, int force) |
849 | { | 832 | { |
850 | struct mv643xx_private *mp = netdev_priv(dev); | 833 | struct mv643xx_private *mp = netdev_priv(dev); |
851 | struct eth_tx_desc *desc; | 834 | struct eth_tx_desc *desc; |
@@ -1008,7 +991,7 @@ static void mv643xx_eth_update_pscr(struct net_device *dev, | |||
1008 | u32 o_pscr, n_pscr; | 991 | u32 o_pscr, n_pscr; |
1009 | unsigned int queues; | 992 | unsigned int queues; |
1010 | 993 | ||
1011 | o_pscr = mv_read(PORT_SERIAL_CONTROL_REG(port_num)); | 994 | o_pscr = rdl(mp, PORT_SERIAL_CONTROL_REG(port_num)); |
1012 | n_pscr = o_pscr; | 995 | n_pscr = o_pscr; |
1013 | 996 | ||
1014 | /* clear speed, duplex and rx buffer size fields */ | 997 | /* clear speed, duplex and rx buffer size fields */ |
@@ -1031,16 +1014,16 @@ static void mv643xx_eth_update_pscr(struct net_device *dev, | |||
1031 | 1014 | ||
1032 | if (n_pscr != o_pscr) { | 1015 | if (n_pscr != o_pscr) { |
1033 | if ((o_pscr & SERIAL_PORT_ENABLE) == 0) | 1016 | if ((o_pscr & SERIAL_PORT_ENABLE) == 0) |
1034 | mv_write(PORT_SERIAL_CONTROL_REG(port_num), n_pscr); | 1017 | wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), n_pscr); |
1035 | else { | 1018 | else { |
1036 | queues = mv643xx_eth_port_disable_tx(port_num); | 1019 | queues = mv643xx_eth_port_disable_tx(mp); |
1037 | 1020 | ||
1038 | o_pscr &= ~SERIAL_PORT_ENABLE; | 1021 | o_pscr &= ~SERIAL_PORT_ENABLE; |
1039 | mv_write(PORT_SERIAL_CONTROL_REG(port_num), o_pscr); | 1022 | wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), o_pscr); |
1040 | mv_write(PORT_SERIAL_CONTROL_REG(port_num), n_pscr); | 1023 | wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), n_pscr); |
1041 | mv_write(PORT_SERIAL_CONTROL_REG(port_num), n_pscr); | 1024 | wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), n_pscr); |
1042 | if (queues) | 1025 | if (queues) |
1043 | mv643xx_eth_port_enable_tx(port_num, queues); | 1026 | mv643xx_eth_port_enable_tx(mp, queues); |
1044 | } | 1027 | } |
1045 | } | 1028 | } |
1046 | } | 1029 | } |
@@ -1064,13 +1047,13 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id) | |||
1064 | unsigned int port_num = mp->port_num; | 1047 | unsigned int port_num = mp->port_num; |
1065 | 1048 | ||
1066 | /* Read interrupt cause registers */ | 1049 | /* Read interrupt cause registers */ |
1067 | eth_int_cause = mv_read(INTERRUPT_CAUSE_REG(port_num)) & | 1050 | eth_int_cause = rdl(mp, INTERRUPT_CAUSE_REG(port_num)) & |
1068 | ETH_INT_UNMASK_ALL; | 1051 | ETH_INT_UNMASK_ALL; |
1069 | if (eth_int_cause & ETH_INT_CAUSE_EXT) { | 1052 | if (eth_int_cause & ETH_INT_CAUSE_EXT) { |
1070 | eth_int_cause_ext = mv_read( | 1053 | eth_int_cause_ext = rdl(mp, |
1071 | INTERRUPT_CAUSE_EXTEND_REG(port_num)) & | 1054 | INTERRUPT_CAUSE_EXTEND_REG(port_num)) & |
1072 | ETH_INT_UNMASK_ALL_EXT; | 1055 | ETH_INT_UNMASK_ALL_EXT; |
1073 | mv_write(INTERRUPT_CAUSE_EXTEND_REG(port_num), | 1056 | wrl(mp, INTERRUPT_CAUSE_EXTEND_REG(port_num), |
1074 | ~eth_int_cause_ext); | 1057 | ~eth_int_cause_ext); |
1075 | } | 1058 | } |
1076 | 1059 | ||
@@ -1081,8 +1064,7 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id) | |||
1081 | if (mii_link_ok(&mp->mii)) { | 1064 | if (mii_link_ok(&mp->mii)) { |
1082 | mii_ethtool_gset(&mp->mii, &cmd); | 1065 | mii_ethtool_gset(&mp->mii, &cmd); |
1083 | mv643xx_eth_update_pscr(dev, &cmd); | 1066 | mv643xx_eth_update_pscr(dev, &cmd); |
1084 | mv643xx_eth_port_enable_tx(port_num, | 1067 | mv643xx_eth_port_enable_tx(mp, ETH_TX_QUEUES_ENABLED); |
1085 | ETH_TX_QUEUES_ENABLED); | ||
1086 | if (!netif_carrier_ok(dev)) { | 1068 | if (!netif_carrier_ok(dev)) { |
1087 | netif_carrier_on(dev); | 1069 | netif_carrier_on(dev); |
1088 | if (mp->tx_ring_size - mp->tx_desc_count >= | 1070 | if (mp->tx_ring_size - mp->tx_desc_count >= |
@@ -1098,10 +1080,10 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id) | |||
1098 | #ifdef MV643XX_NAPI | 1080 | #ifdef MV643XX_NAPI |
1099 | if (eth_int_cause & ETH_INT_CAUSE_RX) { | 1081 | if (eth_int_cause & ETH_INT_CAUSE_RX) { |
1100 | /* schedule the NAPI poll routine to maintain port */ | 1082 | /* schedule the NAPI poll routine to maintain port */ |
1101 | mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); | 1083 | wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); |
1102 | 1084 | ||
1103 | /* wait for previous write to complete */ | 1085 | /* wait for previous write to complete */ |
1104 | mv_read(INTERRUPT_MASK_REG(port_num)); | 1086 | rdl(mp, INTERRUPT_MASK_REG(port_num)); |
1105 | 1087 | ||
1106 | netif_rx_schedule(dev, &mp->napi); | 1088 | netif_rx_schedule(dev, &mp->napi); |
1107 | } | 1089 | } |
@@ -1136,7 +1118,7 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id) | |||
1136 | * , and the required delay of the interrupt in usec. | 1118 | * , and the required delay of the interrupt in usec. |
1137 | * | 1119 | * |
1138 | * INPUT: | 1120 | * INPUT: |
1139 | * unsigned int eth_port_num Ethernet port number | 1121 | * struct mv643xx_private *mp Ethernet port |
1140 | * unsigned int t_clk t_clk of the MV-643xx chip in HZ units | 1122 | * unsigned int t_clk t_clk of the MV-643xx chip in HZ units |
1141 | * unsigned int delay Delay in usec | 1123 | * unsigned int delay Delay in usec |
1142 | * | 1124 | * |
@@ -1147,15 +1129,16 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id) | |||
1147 | * The interrupt coalescing value set in the gigE port. | 1129 | * The interrupt coalescing value set in the gigE port. |
1148 | * | 1130 | * |
1149 | */ | 1131 | */ |
1150 | static unsigned int eth_port_set_rx_coal(unsigned int eth_port_num, | 1132 | static unsigned int eth_port_set_rx_coal(struct mv643xx_private *mp, |
1151 | unsigned int t_clk, unsigned int delay) | 1133 | unsigned int t_clk, unsigned int delay) |
1152 | { | 1134 | { |
1135 | unsigned int port_num = mp->port_num; | ||
1153 | unsigned int coal = ((t_clk / 1000000) * delay) / 64; | 1136 | unsigned int coal = ((t_clk / 1000000) * delay) / 64; |
1154 | 1137 | ||
1155 | /* Set RX Coalescing mechanism */ | 1138 | /* Set RX Coalescing mechanism */ |
1156 | mv_write(SDMA_CONFIG_REG(eth_port_num), | 1139 | wrl(mp, SDMA_CONFIG_REG(port_num), |
1157 | ((coal & 0x3fff) << 8) | | 1140 | ((coal & 0x3fff) << 8) | |
1158 | (mv_read(SDMA_CONFIG_REG(eth_port_num)) | 1141 | (rdl(mp, SDMA_CONFIG_REG(port_num)) |
1159 | & 0xffc000ff)); | 1142 | & 0xffc000ff)); |
1160 | 1143 | ||
1161 | return coal; | 1144 | return coal; |
@@ -1174,7 +1157,7 @@ static unsigned int eth_port_set_rx_coal(unsigned int eth_port_num, | |||
1174 | * MV-643xx chip and the required delay in the interrupt in uSec | 1157 | * MV-643xx chip and the required delay in the interrupt in uSec |
1175 | * | 1158 | * |
1176 | * INPUT: | 1159 | * INPUT: |
1177 | * unsigned int eth_port_num Ethernet port number | 1160 | * struct mv643xx_private *mp Ethernet port |
1178 | * unsigned int t_clk t_clk of the MV-643xx chip in HZ units | 1161 | * unsigned int t_clk t_clk of the MV-643xx chip in HZ units |
1179 | * unsigned int delay Delay in uSeconds | 1162 | * unsigned int delay Delay in uSeconds |
1180 | * | 1163 | * |
@@ -1185,13 +1168,14 @@ static unsigned int eth_port_set_rx_coal(unsigned int eth_port_num, | |||
1185 | * The interrupt coalescing value set in the gigE port. | 1168 | * The interrupt coalescing value set in the gigE port. |
1186 | * | 1169 | * |
1187 | */ | 1170 | */ |
1188 | static unsigned int eth_port_set_tx_coal(unsigned int eth_port_num, | 1171 | static unsigned int eth_port_set_tx_coal(struct mv643xx_private *mp, |
1189 | unsigned int t_clk, unsigned int delay) | 1172 | unsigned int t_clk, unsigned int delay) |
1190 | { | 1173 | { |
1191 | unsigned int coal; | 1174 | unsigned int coal = ((t_clk / 1000000) * delay) / 64; |
1192 | coal = ((t_clk / 1000000) * delay) / 64; | 1175 | |
1193 | /* Set TX Coalescing mechanism */ | 1176 | /* Set TX Coalescing mechanism */ |
1194 | mv_write(TX_FIFO_URGENT_THRESHOLD_REG(eth_port_num), coal << 4); | 1177 | wrl(mp, TX_FIFO_URGENT_THRESHOLD_REG(mp->port_num), coal << 4); |
1178 | |||
1195 | return coal; | 1179 | return coal; |
1196 | } | 1180 | } |
1197 | 1181 | ||
@@ -1327,16 +1311,15 @@ static int mv643xx_eth_open(struct net_device *dev) | |||
1327 | int err; | 1311 | int err; |
1328 | 1312 | ||
1329 | /* Clear any pending ethernet port interrupts */ | 1313 | /* Clear any pending ethernet port interrupts */ |
1330 | mv_write(INTERRUPT_CAUSE_REG(port_num), 0); | 1314 | wrl(mp, INTERRUPT_CAUSE_REG(port_num), 0); |
1331 | mv_write(INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); | 1315 | wrl(mp, INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); |
1332 | /* wait for previous write to complete */ | 1316 | /* wait for previous write to complete */ |
1333 | mv_read (INTERRUPT_CAUSE_EXTEND_REG(port_num)); | 1317 | rdl(mp, INTERRUPT_CAUSE_EXTEND_REG(port_num)); |
1334 | 1318 | ||
1335 | err = request_irq(dev->irq, mv643xx_eth_int_handler, | 1319 | err = request_irq(dev->irq, mv643xx_eth_int_handler, |
1336 | IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev); | 1320 | IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev); |
1337 | if (err) { | 1321 | if (err) { |
1338 | printk(KERN_ERR "Can not assign IRQ number to MV643XX_eth%d\n", | 1322 | printk(KERN_ERR "%s: Can not assign IRQ\n", dev->name); |
1339 | port_num); | ||
1340 | return -EAGAIN; | 1323 | return -EAGAIN; |
1341 | } | 1324 | } |
1342 | 1325 | ||
@@ -1430,17 +1413,17 @@ static int mv643xx_eth_open(struct net_device *dev) | |||
1430 | 1413 | ||
1431 | #ifdef MV643XX_COAL | 1414 | #ifdef MV643XX_COAL |
1432 | mp->rx_int_coal = | 1415 | mp->rx_int_coal = |
1433 | eth_port_set_rx_coal(port_num, 133000000, MV643XX_RX_COAL); | 1416 | eth_port_set_rx_coal(mp, 133000000, MV643XX_RX_COAL); |
1434 | #endif | 1417 | #endif |
1435 | 1418 | ||
1436 | mp->tx_int_coal = | 1419 | mp->tx_int_coal = |
1437 | eth_port_set_tx_coal(port_num, 133000000, MV643XX_TX_COAL); | 1420 | eth_port_set_tx_coal(mp, 133000000, MV643XX_TX_COAL); |
1438 | 1421 | ||
1439 | /* Unmask phy and link status changes interrupts */ | 1422 | /* Unmask phy and link status changes interrupts */ |
1440 | mv_write(INTERRUPT_EXTEND_MASK_REG(port_num), ETH_INT_UNMASK_ALL_EXT); | 1423 | wrl(mp, INTERRUPT_EXTEND_MASK_REG(port_num), ETH_INT_UNMASK_ALL_EXT); |
1441 | 1424 | ||
1442 | /* Unmask RX buffer and TX end interrupt */ | 1425 | /* Unmask RX buffer and TX end interrupt */ |
1443 | mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); | 1426 | wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); |
1444 | 1427 | ||
1445 | return 0; | 1428 | return 0; |
1446 | 1429 | ||
@@ -1459,7 +1442,7 @@ static void mv643xx_eth_free_tx_rings(struct net_device *dev) | |||
1459 | struct mv643xx_private *mp = netdev_priv(dev); | 1442 | struct mv643xx_private *mp = netdev_priv(dev); |
1460 | 1443 | ||
1461 | /* Stop Tx Queues */ | 1444 | /* Stop Tx Queues */ |
1462 | mv643xx_eth_port_disable_tx(mp->port_num); | 1445 | mv643xx_eth_port_disable_tx(mp); |
1463 | 1446 | ||
1464 | /* Free outstanding skb's on TX ring */ | 1447 | /* Free outstanding skb's on TX ring */ |
1465 | mv643xx_eth_free_all_tx_descs(dev); | 1448 | mv643xx_eth_free_all_tx_descs(dev); |
@@ -1477,11 +1460,10 @@ static void mv643xx_eth_free_tx_rings(struct net_device *dev) | |||
1477 | static void mv643xx_eth_free_rx_rings(struct net_device *dev) | 1460 | static void mv643xx_eth_free_rx_rings(struct net_device *dev) |
1478 | { | 1461 | { |
1479 | struct mv643xx_private *mp = netdev_priv(dev); | 1462 | struct mv643xx_private *mp = netdev_priv(dev); |
1480 | unsigned int port_num = mp->port_num; | ||
1481 | int curr; | 1463 | int curr; |
1482 | 1464 | ||
1483 | /* Stop RX Queues */ | 1465 | /* Stop RX Queues */ |
1484 | mv643xx_eth_port_disable_rx(port_num); | 1466 | mv643xx_eth_port_disable_rx(mp); |
1485 | 1467 | ||
1486 | /* Free preallocated skb's on RX rings */ | 1468 | /* Free preallocated skb's on RX rings */ |
1487 | for (curr = 0; mp->rx_desc_count && curr < mp->rx_ring_size; curr++) { | 1469 | for (curr = 0; mp->rx_desc_count && curr < mp->rx_ring_size; curr++) { |
@@ -1520,9 +1502,9 @@ static int mv643xx_eth_stop(struct net_device *dev) | |||
1520 | unsigned int port_num = mp->port_num; | 1502 | unsigned int port_num = mp->port_num; |
1521 | 1503 | ||
1522 | /* Mask all interrupts on ethernet port */ | 1504 | /* Mask all interrupts on ethernet port */ |
1523 | mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); | 1505 | wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); |
1524 | /* wait for previous write to complete */ | 1506 | /* wait for previous write to complete */ |
1525 | mv_read(INTERRUPT_MASK_REG(port_num)); | 1507 | rdl(mp, INTERRUPT_MASK_REG(port_num)); |
1526 | 1508 | ||
1527 | #ifdef MV643XX_NAPI | 1509 | #ifdef MV643XX_NAPI |
1528 | napi_disable(&mp->napi); | 1510 | napi_disable(&mp->napi); |
@@ -1530,7 +1512,7 @@ static int mv643xx_eth_stop(struct net_device *dev) | |||
1530 | netif_carrier_off(dev); | 1512 | netif_carrier_off(dev); |
1531 | netif_stop_queue(dev); | 1513 | netif_stop_queue(dev); |
1532 | 1514 | ||
1533 | eth_port_reset(mp->port_num); | 1515 | eth_port_reset(mp); |
1534 | 1516 | ||
1535 | mv643xx_eth_free_tx_rings(dev); | 1517 | mv643xx_eth_free_tx_rings(dev); |
1536 | mv643xx_eth_free_rx_rings(dev); | 1518 | mv643xx_eth_free_rx_rings(dev); |
@@ -1561,15 +1543,15 @@ static int mv643xx_poll(struct napi_struct *napi, int budget) | |||
1561 | #endif | 1543 | #endif |
1562 | 1544 | ||
1563 | work_done = 0; | 1545 | work_done = 0; |
1564 | if ((mv_read(RX_CURRENT_QUEUE_DESC_PTR_0(port_num))) | 1546 | if ((rdl(mp, RX_CURRENT_QUEUE_DESC_PTR_0(port_num))) |
1565 | != (u32) mp->rx_used_desc_q) | 1547 | != (u32) mp->rx_used_desc_q) |
1566 | work_done = mv643xx_eth_receive_queue(dev, budget); | 1548 | work_done = mv643xx_eth_receive_queue(dev, budget); |
1567 | 1549 | ||
1568 | if (work_done < budget) { | 1550 | if (work_done < budget) { |
1569 | netif_rx_complete(dev, napi); | 1551 | netif_rx_complete(dev, napi); |
1570 | mv_write(INTERRUPT_CAUSE_REG(port_num), 0); | 1552 | wrl(mp, INTERRUPT_CAUSE_REG(port_num), 0); |
1571 | mv_write(INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); | 1553 | wrl(mp, INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); |
1572 | mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); | 1554 | wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); |
1573 | } | 1555 | } |
1574 | 1556 | ||
1575 | return work_done; | 1557 | return work_done; |
@@ -1723,7 +1705,7 @@ static void eth_tx_submit_descs_for_skb(struct mv643xx_private *mp, | |||
1723 | 1705 | ||
1724 | /* ensure all descriptors are written before poking hardware */ | 1706 | /* ensure all descriptors are written before poking hardware */ |
1725 | wmb(); | 1707 | wmb(); |
1726 | mv643xx_eth_port_enable_tx(mp->port_num, ETH_TX_QUEUES_ENABLED); | 1708 | mv643xx_eth_port_enable_tx(mp, ETH_TX_QUEUES_ENABLED); |
1727 | 1709 | ||
1728 | mp->tx_desc_count += nr_frags + 1; | 1710 | mp->tx_desc_count += nr_frags + 1; |
1729 | } | 1711 | } |
@@ -1739,25 +1721,23 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1739 | unsigned long flags; | 1721 | unsigned long flags; |
1740 | 1722 | ||
1741 | BUG_ON(netif_queue_stopped(dev)); | 1723 | BUG_ON(netif_queue_stopped(dev)); |
1742 | BUG_ON(skb == NULL); | 1724 | |
1725 | if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) { | ||
1726 | stats->tx_dropped++; | ||
1727 | printk(KERN_DEBUG "%s: failed to linearize tiny " | ||
1728 | "unaligned fragment\n", dev->name); | ||
1729 | return NETDEV_TX_BUSY; | ||
1730 | } | ||
1731 | |||
1732 | spin_lock_irqsave(&mp->lock, flags); | ||
1743 | 1733 | ||
1744 | if (mp->tx_ring_size - mp->tx_desc_count < MAX_DESCS_PER_SKB) { | 1734 | if (mp->tx_ring_size - mp->tx_desc_count < MAX_DESCS_PER_SKB) { |
1745 | printk(KERN_ERR "%s: transmit with queue full\n", dev->name); | 1735 | printk(KERN_ERR "%s: transmit with queue full\n", dev->name); |
1746 | netif_stop_queue(dev); | 1736 | netif_stop_queue(dev); |
1747 | return 1; | 1737 | spin_unlock_irqrestore(&mp->lock, flags); |
1748 | } | 1738 | return NETDEV_TX_BUSY; |
1749 | |||
1750 | if (has_tiny_unaligned_frags(skb)) { | ||
1751 | if (__skb_linearize(skb)) { | ||
1752 | stats->tx_dropped++; | ||
1753 | printk(KERN_DEBUG "%s: failed to linearize tiny " | ||
1754 | "unaligned fragment\n", dev->name); | ||
1755 | return 1; | ||
1756 | } | ||
1757 | } | 1739 | } |
1758 | 1740 | ||
1759 | spin_lock_irqsave(&mp->lock, flags); | ||
1760 | |||
1761 | eth_tx_submit_descs_for_skb(mp, skb); | 1741 | eth_tx_submit_descs_for_skb(mp, skb); |
1762 | stats->tx_bytes += skb->len; | 1742 | stats->tx_bytes += skb->len; |
1763 | stats->tx_packets++; | 1743 | stats->tx_packets++; |
@@ -1768,7 +1748,7 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1768 | 1748 | ||
1769 | spin_unlock_irqrestore(&mp->lock, flags); | 1749 | spin_unlock_irqrestore(&mp->lock, flags); |
1770 | 1750 | ||
1771 | return 0; /* success */ | 1751 | return NETDEV_TX_OK; |
1772 | } | 1752 | } |
1773 | 1753 | ||
1774 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1754 | #ifdef CONFIG_NET_POLL_CONTROLLER |
@@ -1777,13 +1757,13 @@ static void mv643xx_netpoll(struct net_device *netdev) | |||
1777 | struct mv643xx_private *mp = netdev_priv(netdev); | 1757 | struct mv643xx_private *mp = netdev_priv(netdev); |
1778 | int port_num = mp->port_num; | 1758 | int port_num = mp->port_num; |
1779 | 1759 | ||
1780 | mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); | 1760 | wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); |
1781 | /* wait for previous write to complete */ | 1761 | /* wait for previous write to complete */ |
1782 | mv_read(INTERRUPT_MASK_REG(port_num)); | 1762 | rdl(mp, INTERRUPT_MASK_REG(port_num)); |
1783 | 1763 | ||
1784 | mv643xx_eth_int_handler(netdev->irq, netdev); | 1764 | mv643xx_eth_int_handler(netdev->irq, netdev); |
1785 | 1765 | ||
1786 | mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); | 1766 | wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); |
1787 | } | 1767 | } |
1788 | #endif | 1768 | #endif |
1789 | 1769 | ||
@@ -1900,7 +1880,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
1900 | port_num = mp->port_num = pd->port_number; | 1880 | port_num = mp->port_num = pd->port_number; |
1901 | 1881 | ||
1902 | /* set default config values */ | 1882 | /* set default config values */ |
1903 | eth_port_uc_addr_get(port_num, dev->dev_addr); | 1883 | eth_port_uc_addr_get(mp, dev->dev_addr); |
1904 | mp->rx_ring_size = PORT_DEFAULT_RECEIVE_QUEUE_SIZE; | 1884 | mp->rx_ring_size = PORT_DEFAULT_RECEIVE_QUEUE_SIZE; |
1905 | mp->tx_ring_size = PORT_DEFAULT_TRANSMIT_QUEUE_SIZE; | 1885 | mp->tx_ring_size = PORT_DEFAULT_TRANSMIT_QUEUE_SIZE; |
1906 | 1886 | ||
@@ -1908,7 +1888,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
1908 | memcpy(dev->dev_addr, pd->mac_addr, 6); | 1888 | memcpy(dev->dev_addr, pd->mac_addr, 6); |
1909 | 1889 | ||
1910 | if (pd->phy_addr || pd->force_phy_addr) | 1890 | if (pd->phy_addr || pd->force_phy_addr) |
1911 | ethernet_phy_set(port_num, pd->phy_addr); | 1891 | ethernet_phy_set(mp, pd->phy_addr); |
1912 | 1892 | ||
1913 | if (pd->rx_queue_size) | 1893 | if (pd->rx_queue_size) |
1914 | mp->rx_ring_size = pd->rx_queue_size; | 1894 | mp->rx_ring_size = pd->rx_queue_size; |
@@ -1933,19 +1913,18 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
1933 | mp->mii.dev = dev; | 1913 | mp->mii.dev = dev; |
1934 | mp->mii.mdio_read = mv643xx_mdio_read; | 1914 | mp->mii.mdio_read = mv643xx_mdio_read; |
1935 | mp->mii.mdio_write = mv643xx_mdio_write; | 1915 | mp->mii.mdio_write = mv643xx_mdio_write; |
1936 | mp->mii.phy_id = ethernet_phy_get(port_num); | 1916 | mp->mii.phy_id = ethernet_phy_get(mp); |
1937 | mp->mii.phy_id_mask = 0x3f; | 1917 | mp->mii.phy_id_mask = 0x3f; |
1938 | mp->mii.reg_num_mask = 0x1f; | 1918 | mp->mii.reg_num_mask = 0x1f; |
1939 | 1919 | ||
1940 | err = ethernet_phy_detect(port_num); | 1920 | err = ethernet_phy_detect(mp); |
1941 | if (err) { | 1921 | if (err) { |
1942 | pr_debug("MV643xx ethernet port %d: " | 1922 | pr_debug("%s: No PHY detected at addr %d\n", |
1943 | "No PHY detected at addr %d\n", | 1923 | dev->name, ethernet_phy_get(mp)); |
1944 | port_num, ethernet_phy_get(port_num)); | ||
1945 | goto out; | 1924 | goto out; |
1946 | } | 1925 | } |
1947 | 1926 | ||
1948 | ethernet_phy_reset(port_num); | 1927 | ethernet_phy_reset(mp); |
1949 | mp->mii.supports_gmii = mii_check_gmii_support(&mp->mii); | 1928 | mp->mii.supports_gmii = mii_check_gmii_support(&mp->mii); |
1950 | mv643xx_init_ethtool_cmd(dev, mp->mii.phy_id, speed, duplex, &cmd); | 1929 | mv643xx_init_ethtool_cmd(dev, mp->mii.phy_id, speed, duplex, &cmd); |
1951 | mv643xx_eth_update_pscr(dev, &cmd); | 1930 | mv643xx_eth_update_pscr(dev, &cmd); |
@@ -2006,9 +1985,11 @@ static int mv643xx_eth_remove(struct platform_device *pdev) | |||
2006 | 1985 | ||
2007 | static int mv643xx_eth_shared_probe(struct platform_device *pdev) | 1986 | static int mv643xx_eth_shared_probe(struct platform_device *pdev) |
2008 | { | 1987 | { |
1988 | static int mv643xx_version_printed = 0; | ||
2009 | struct resource *res; | 1989 | struct resource *res; |
2010 | 1990 | ||
2011 | printk(KERN_NOTICE "MV-643xx 10/100/1000 Ethernet Driver\n"); | 1991 | if (!mv643xx_version_printed++) |
1992 | printk(KERN_NOTICE "MV-643xx 10/100/1000 Ethernet Driver\n"); | ||
2012 | 1993 | ||
2013 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1994 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
2014 | if (res == NULL) | 1995 | if (res == NULL) |
@@ -2037,10 +2018,10 @@ static void mv643xx_eth_shutdown(struct platform_device *pdev) | |||
2037 | unsigned int port_num = mp->port_num; | 2018 | unsigned int port_num = mp->port_num; |
2038 | 2019 | ||
2039 | /* Mask all interrupts on ethernet port */ | 2020 | /* Mask all interrupts on ethernet port */ |
2040 | mv_write(INTERRUPT_MASK_REG(port_num), 0); | 2021 | wrl(mp, INTERRUPT_MASK_REG(port_num), 0); |
2041 | mv_read (INTERRUPT_MASK_REG(port_num)); | 2022 | rdl(mp, INTERRUPT_MASK_REG(port_num)); |
2042 | 2023 | ||
2043 | eth_port_reset(port_num); | 2024 | eth_port_reset(mp); |
2044 | } | 2025 | } |
2045 | 2026 | ||
2046 | static struct platform_driver mv643xx_eth_driver = { | 2027 | static struct platform_driver mv643xx_eth_driver = { |
@@ -2229,12 +2210,9 @@ MODULE_ALIAS("platform:mv643xx_eth"); | |||
2229 | * return_info Tx/Rx user resource return information. | 2210 | * return_info Tx/Rx user resource return information. |
2230 | */ | 2211 | */ |
2231 | 2212 | ||
2232 | /* PHY routines */ | ||
2233 | static int ethernet_phy_get(unsigned int eth_port_num); | ||
2234 | static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr); | ||
2235 | |||
2236 | /* Ethernet Port routines */ | 2213 | /* Ethernet Port routines */ |
2237 | static void eth_port_set_filter_table_entry(int table, unsigned char entry); | 2214 | static void eth_port_set_filter_table_entry(struct mv643xx_private *mp, |
2215 | int table, unsigned char entry); | ||
2238 | 2216 | ||
2239 | /* | 2217 | /* |
2240 | * eth_port_init - Initialize the Ethernet port driver | 2218 | * eth_port_init - Initialize the Ethernet port driver |
@@ -2264,9 +2242,9 @@ static void eth_port_init(struct mv643xx_private *mp) | |||
2264 | { | 2242 | { |
2265 | mp->rx_resource_err = 0; | 2243 | mp->rx_resource_err = 0; |
2266 | 2244 | ||
2267 | eth_port_reset(mp->port_num); | 2245 | eth_port_reset(mp); |
2268 | 2246 | ||
2269 | eth_port_init_mac_tables(mp->port_num); | 2247 | eth_port_init_mac_tables(mp); |
2270 | } | 2248 | } |
2271 | 2249 | ||
2272 | /* | 2250 | /* |
@@ -2306,28 +2284,28 @@ static void eth_port_start(struct net_device *dev) | |||
2306 | 2284 | ||
2307 | /* Assignment of Tx CTRP of given queue */ | 2285 | /* Assignment of Tx CTRP of given queue */ |
2308 | tx_curr_desc = mp->tx_curr_desc_q; | 2286 | tx_curr_desc = mp->tx_curr_desc_q; |
2309 | mv_write(TX_CURRENT_QUEUE_DESC_PTR_0(port_num), | 2287 | wrl(mp, TX_CURRENT_QUEUE_DESC_PTR_0(port_num), |
2310 | (u32)((struct eth_tx_desc *)mp->tx_desc_dma + tx_curr_desc)); | 2288 | (u32)((struct eth_tx_desc *)mp->tx_desc_dma + tx_curr_desc)); |
2311 | 2289 | ||
2312 | /* Assignment of Rx CRDP of given queue */ | 2290 | /* Assignment of Rx CRDP of given queue */ |
2313 | rx_curr_desc = mp->rx_curr_desc_q; | 2291 | rx_curr_desc = mp->rx_curr_desc_q; |
2314 | mv_write(RX_CURRENT_QUEUE_DESC_PTR_0(port_num), | 2292 | wrl(mp, RX_CURRENT_QUEUE_DESC_PTR_0(port_num), |
2315 | (u32)((struct eth_rx_desc *)mp->rx_desc_dma + rx_curr_desc)); | 2293 | (u32)((struct eth_rx_desc *)mp->rx_desc_dma + rx_curr_desc)); |
2316 | 2294 | ||
2317 | /* Add the assigned Ethernet address to the port's address table */ | 2295 | /* Add the assigned Ethernet address to the port's address table */ |
2318 | eth_port_uc_addr_set(port_num, dev->dev_addr); | 2296 | eth_port_uc_addr_set(mp, dev->dev_addr); |
2319 | 2297 | ||
2320 | /* Assign port configuration and command. */ | 2298 | /* Assign port configuration and command. */ |
2321 | mv_write(PORT_CONFIG_REG(port_num), | 2299 | wrl(mp, PORT_CONFIG_REG(port_num), |
2322 | PORT_CONFIG_DEFAULT_VALUE); | 2300 | PORT_CONFIG_DEFAULT_VALUE); |
2323 | 2301 | ||
2324 | mv_write(PORT_CONFIG_EXTEND_REG(port_num), | 2302 | wrl(mp, PORT_CONFIG_EXTEND_REG(port_num), |
2325 | PORT_CONFIG_EXTEND_DEFAULT_VALUE); | 2303 | PORT_CONFIG_EXTEND_DEFAULT_VALUE); |
2326 | 2304 | ||
2327 | pscr = mv_read(PORT_SERIAL_CONTROL_REG(port_num)); | 2305 | pscr = rdl(mp, PORT_SERIAL_CONTROL_REG(port_num)); |
2328 | 2306 | ||
2329 | pscr &= ~(SERIAL_PORT_ENABLE | FORCE_LINK_PASS); | 2307 | pscr &= ~(SERIAL_PORT_ENABLE | FORCE_LINK_PASS); |
2330 | mv_write(PORT_SERIAL_CONTROL_REG(port_num), pscr); | 2308 | wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), pscr); |
2331 | 2309 | ||
2332 | pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL | | 2310 | pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL | |
2333 | DISABLE_AUTO_NEG_SPEED_GMII | | 2311 | DISABLE_AUTO_NEG_SPEED_GMII | |
@@ -2335,32 +2313,34 @@ static void eth_port_start(struct net_device *dev) | |||
2335 | DO_NOT_FORCE_LINK_FAIL | | 2313 | DO_NOT_FORCE_LINK_FAIL | |
2336 | SERIAL_PORT_CONTROL_RESERVED; | 2314 | SERIAL_PORT_CONTROL_RESERVED; |
2337 | 2315 | ||
2338 | mv_write(PORT_SERIAL_CONTROL_REG(port_num), pscr); | 2316 | wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), pscr); |
2339 | 2317 | ||
2340 | pscr |= SERIAL_PORT_ENABLE; | 2318 | pscr |= SERIAL_PORT_ENABLE; |
2341 | mv_write(PORT_SERIAL_CONTROL_REG(port_num), pscr); | 2319 | wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), pscr); |
2342 | 2320 | ||
2343 | /* Assign port SDMA configuration */ | 2321 | /* Assign port SDMA configuration */ |
2344 | mv_write(SDMA_CONFIG_REG(port_num), | 2322 | wrl(mp, SDMA_CONFIG_REG(port_num), |
2345 | PORT_SDMA_CONFIG_DEFAULT_VALUE); | 2323 | PORT_SDMA_CONFIG_DEFAULT_VALUE); |
2346 | 2324 | ||
2347 | /* Enable port Rx. */ | 2325 | /* Enable port Rx. */ |
2348 | mv643xx_eth_port_enable_rx(port_num, ETH_RX_QUEUES_ENABLED); | 2326 | mv643xx_eth_port_enable_rx(mp, ETH_RX_QUEUES_ENABLED); |
2349 | 2327 | ||
2350 | /* Disable port bandwidth limits by clearing MTU register */ | 2328 | /* Disable port bandwidth limits by clearing MTU register */ |
2351 | mv_write(MAXIMUM_TRANSMIT_UNIT(port_num), 0); | 2329 | wrl(mp, MAXIMUM_TRANSMIT_UNIT(port_num), 0); |
2352 | 2330 | ||
2353 | /* save phy settings across reset */ | 2331 | /* save phy settings across reset */ |
2354 | mv643xx_get_settings(dev, ðtool_cmd); | 2332 | mv643xx_get_settings(dev, ðtool_cmd); |
2355 | ethernet_phy_reset(mp->port_num); | 2333 | ethernet_phy_reset(mp); |
2356 | mv643xx_set_settings(dev, ðtool_cmd); | 2334 | mv643xx_set_settings(dev, ðtool_cmd); |
2357 | } | 2335 | } |
2358 | 2336 | ||
2359 | /* | 2337 | /* |
2360 | * eth_port_uc_addr_set - Write a MAC address into the port's hw registers | 2338 | * eth_port_uc_addr_set - Write a MAC address into the port's hw registers |
2361 | */ | 2339 | */ |
2362 | static void eth_port_uc_addr_set(unsigned int port_num, unsigned char *p_addr) | 2340 | static void eth_port_uc_addr_set(struct mv643xx_private *mp, |
2341 | unsigned char *p_addr) | ||
2363 | { | 2342 | { |
2343 | unsigned int port_num = mp->port_num; | ||
2364 | unsigned int mac_h; | 2344 | unsigned int mac_h; |
2365 | unsigned int mac_l; | 2345 | unsigned int mac_l; |
2366 | int table; | 2346 | int table; |
@@ -2369,24 +2349,26 @@ static void eth_port_uc_addr_set(unsigned int port_num, unsigned char *p_addr) | |||
2369 | mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) | | 2349 | mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) | |
2370 | (p_addr[3] << 0); | 2350 | (p_addr[3] << 0); |
2371 | 2351 | ||
2372 | mv_write(MAC_ADDR_LOW(port_num), mac_l); | 2352 | wrl(mp, MAC_ADDR_LOW(port_num), mac_l); |
2373 | mv_write(MAC_ADDR_HIGH(port_num), mac_h); | 2353 | wrl(mp, MAC_ADDR_HIGH(port_num), mac_h); |
2374 | 2354 | ||
2375 | /* Accept frames with this address */ | 2355 | /* Accept frames with this address */ |
2376 | table = DA_FILTER_UNICAST_TABLE_BASE(port_num); | 2356 | table = DA_FILTER_UNICAST_TABLE_BASE(port_num); |
2377 | eth_port_set_filter_table_entry(table, p_addr[5] & 0x0f); | 2357 | eth_port_set_filter_table_entry(mp, table, p_addr[5] & 0x0f); |
2378 | } | 2358 | } |
2379 | 2359 | ||
2380 | /* | 2360 | /* |
2381 | * eth_port_uc_addr_get - Read the MAC address from the port's hw registers | 2361 | * eth_port_uc_addr_get - Read the MAC address from the port's hw registers |
2382 | */ | 2362 | */ |
2383 | static void eth_port_uc_addr_get(unsigned int port_num, unsigned char *p_addr) | 2363 | static void eth_port_uc_addr_get(struct mv643xx_private *mp, |
2364 | unsigned char *p_addr) | ||
2384 | { | 2365 | { |
2366 | unsigned int port_num = mp->port_num; | ||
2385 | unsigned int mac_h; | 2367 | unsigned int mac_h; |
2386 | unsigned int mac_l; | 2368 | unsigned int mac_l; |
2387 | 2369 | ||
2388 | mac_h = mv_read(MAC_ADDR_HIGH(port_num)); | 2370 | mac_h = rdl(mp, MAC_ADDR_HIGH(port_num)); |
2389 | mac_l = mv_read(MAC_ADDR_LOW(port_num)); | 2371 | mac_l = rdl(mp, MAC_ADDR_LOW(port_num)); |
2390 | 2372 | ||
2391 | p_addr[0] = (mac_h >> 24) & 0xff; | 2373 | p_addr[0] = (mac_h >> 24) & 0xff; |
2392 | p_addr[1] = (mac_h >> 16) & 0xff; | 2374 | p_addr[1] = (mac_h >> 16) & 0xff; |
@@ -2405,7 +2387,8 @@ static void eth_port_uc_addr_get(unsigned int port_num, unsigned char *p_addr) | |||
2405 | * 3-1 Queue (ETH_Q0=0) | 2387 | * 3-1 Queue (ETH_Q0=0) |
2406 | * 7-4 Reserved = 0; | 2388 | * 7-4 Reserved = 0; |
2407 | */ | 2389 | */ |
2408 | static void eth_port_set_filter_table_entry(int table, unsigned char entry) | 2390 | static void eth_port_set_filter_table_entry(struct mv643xx_private *mp, |
2391 | int table, unsigned char entry) | ||
2409 | { | 2392 | { |
2410 | unsigned int table_reg; | 2393 | unsigned int table_reg; |
2411 | unsigned int tbl_offset; | 2394 | unsigned int tbl_offset; |
@@ -2415,9 +2398,9 @@ static void eth_port_set_filter_table_entry(int table, unsigned char entry) | |||
2415 | reg_offset = entry % 4; /* Entry offset within the register */ | 2398 | reg_offset = entry % 4; /* Entry offset within the register */ |
2416 | 2399 | ||
2417 | /* Set "accepts frame bit" at specified table entry */ | 2400 | /* Set "accepts frame bit" at specified table entry */ |
2418 | table_reg = mv_read(table + tbl_offset); | 2401 | table_reg = rdl(mp, table + tbl_offset); |
2419 | table_reg |= 0x01 << (8 * reg_offset); | 2402 | table_reg |= 0x01 << (8 * reg_offset); |
2420 | mv_write(table + tbl_offset, table_reg); | 2403 | wrl(mp, table + tbl_offset, table_reg); |
2421 | } | 2404 | } |
2422 | 2405 | ||
2423 | /* | 2406 | /* |
@@ -2434,8 +2417,9 @@ static void eth_port_set_filter_table_entry(int table, unsigned char entry) | |||
2434 | * In either case, eth_port_set_filter_table_entry() is then called | 2417 | * In either case, eth_port_set_filter_table_entry() is then called |
2435 | * to set to set the actual table entry. | 2418 | * to set to set the actual table entry. |
2436 | */ | 2419 | */ |
2437 | static void eth_port_mc_addr(unsigned int eth_port_num, unsigned char *p_addr) | 2420 | static void eth_port_mc_addr(struct mv643xx_private *mp, unsigned char *p_addr) |
2438 | { | 2421 | { |
2422 | unsigned int port_num = mp->port_num; | ||
2439 | unsigned int mac_h; | 2423 | unsigned int mac_h; |
2440 | unsigned int mac_l; | 2424 | unsigned int mac_l; |
2441 | unsigned char crc_result = 0; | 2425 | unsigned char crc_result = 0; |
@@ -2446,9 +2430,8 @@ static void eth_port_mc_addr(unsigned int eth_port_num, unsigned char *p_addr) | |||
2446 | 2430 | ||
2447 | if ((p_addr[0] == 0x01) && (p_addr[1] == 0x00) && | 2431 | if ((p_addr[0] == 0x01) && (p_addr[1] == 0x00) && |
2448 | (p_addr[2] == 0x5E) && (p_addr[3] == 0x00) && (p_addr[4] == 0x00)) { | 2432 | (p_addr[2] == 0x5E) && (p_addr[3] == 0x00) && (p_addr[4] == 0x00)) { |
2449 | table = DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE | 2433 | table = DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(port_num); |
2450 | (eth_port_num); | 2434 | eth_port_set_filter_table_entry(mp, table, p_addr[5]); |
2451 | eth_port_set_filter_table_entry(table, p_addr[5]); | ||
2452 | return; | 2435 | return; |
2453 | } | 2436 | } |
2454 | 2437 | ||
@@ -2520,8 +2503,8 @@ static void eth_port_mc_addr(unsigned int eth_port_num, unsigned char *p_addr) | |||
2520 | for (i = 0; i < 8; i++) | 2503 | for (i = 0; i < 8; i++) |
2521 | crc_result = crc_result | (crc[i] << i); | 2504 | crc_result = crc_result | (crc[i] << i); |
2522 | 2505 | ||
2523 | table = DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num); | 2506 | table = DA_FILTER_OTHER_MULTICAST_TABLE_BASE(port_num); |
2524 | eth_port_set_filter_table_entry(table, crc_result); | 2507 | eth_port_set_filter_table_entry(mp, table, crc_result); |
2525 | } | 2508 | } |
2526 | 2509 | ||
2527 | /* | 2510 | /* |
@@ -2550,7 +2533,7 @@ static void eth_port_set_multicast_list(struct net_device *dev) | |||
2550 | * 3-1 Queue ETH_Q0=0 | 2533 | * 3-1 Queue ETH_Q0=0 |
2551 | * 7-4 Reserved = 0; | 2534 | * 7-4 Reserved = 0; |
2552 | */ | 2535 | */ |
2553 | mv_write(DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101); | 2536 | wrl(mp, DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101); |
2554 | 2537 | ||
2555 | /* Set all entries in DA filter other multicast | 2538 | /* Set all entries in DA filter other multicast |
2556 | * table (Ex_dFOMT) | 2539 | * table (Ex_dFOMT) |
@@ -2560,7 +2543,7 @@ static void eth_port_set_multicast_list(struct net_device *dev) | |||
2560 | * 3-1 Queue ETH_Q0=0 | 2543 | * 3-1 Queue ETH_Q0=0 |
2561 | * 7-4 Reserved = 0; | 2544 | * 7-4 Reserved = 0; |
2562 | */ | 2545 | */ |
2563 | mv_write(DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101); | 2546 | wrl(mp, DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101); |
2564 | } | 2547 | } |
2565 | return; | 2548 | return; |
2566 | } | 2549 | } |
@@ -2570,11 +2553,11 @@ static void eth_port_set_multicast_list(struct net_device *dev) | |||
2570 | */ | 2553 | */ |
2571 | for (table_index = 0; table_index <= 0xFC; table_index += 4) { | 2554 | for (table_index = 0; table_index <= 0xFC; table_index += 4) { |
2572 | /* Clear DA filter special multicast table (Ex_dFSMT) */ | 2555 | /* Clear DA filter special multicast table (Ex_dFSMT) */ |
2573 | mv_write(DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE | 2556 | wrl(mp, DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE |
2574 | (eth_port_num) + table_index, 0); | 2557 | (eth_port_num) + table_index, 0); |
2575 | 2558 | ||
2576 | /* Clear DA filter other multicast table (Ex_dFOMT) */ | 2559 | /* Clear DA filter other multicast table (Ex_dFOMT) */ |
2577 | mv_write(DA_FILTER_OTHER_MULTICAST_TABLE_BASE | 2560 | wrl(mp, DA_FILTER_OTHER_MULTICAST_TABLE_BASE |
2578 | (eth_port_num) + table_index, 0); | 2561 | (eth_port_num) + table_index, 0); |
2579 | } | 2562 | } |
2580 | 2563 | ||
@@ -2583,7 +2566,7 @@ static void eth_port_set_multicast_list(struct net_device *dev) | |||
2583 | (i < 256) && (mc_list != NULL) && (i < dev->mc_count); | 2566 | (i < 256) && (mc_list != NULL) && (i < dev->mc_count); |
2584 | i++, mc_list = mc_list->next) | 2567 | i++, mc_list = mc_list->next) |
2585 | if (mc_list->dmi_addrlen == 6) | 2568 | if (mc_list->dmi_addrlen == 6) |
2586 | eth_port_mc_addr(eth_port_num, mc_list->dmi_addr); | 2569 | eth_port_mc_addr(mp, mc_list->dmi_addr); |
2587 | } | 2570 | } |
2588 | 2571 | ||
2589 | /* | 2572 | /* |
@@ -2594,7 +2577,7 @@ static void eth_port_set_multicast_list(struct net_device *dev) | |||
2594 | * Other Multicast) and set each entry to 0. | 2577 | * Other Multicast) and set each entry to 0. |
2595 | * | 2578 | * |
2596 | * INPUT: | 2579 | * INPUT: |
2597 | * unsigned int eth_port_num Ethernet Port number. | 2580 | * struct mv643xx_private *mp Ethernet Port. |
2598 | * | 2581 | * |
2599 | * OUTPUT: | 2582 | * OUTPUT: |
2600 | * Multicast and Unicast packets are rejected. | 2583 | * Multicast and Unicast packets are rejected. |
@@ -2602,22 +2585,23 @@ static void eth_port_set_multicast_list(struct net_device *dev) | |||
2602 | * RETURN: | 2585 | * RETURN: |
2603 | * None. | 2586 | * None. |
2604 | */ | 2587 | */ |
2605 | static void eth_port_init_mac_tables(unsigned int eth_port_num) | 2588 | static void eth_port_init_mac_tables(struct mv643xx_private *mp) |
2606 | { | 2589 | { |
2590 | unsigned int port_num = mp->port_num; | ||
2607 | int table_index; | 2591 | int table_index; |
2608 | 2592 | ||
2609 | /* Clear DA filter unicast table (Ex_dFUT) */ | 2593 | /* Clear DA filter unicast table (Ex_dFUT) */ |
2610 | for (table_index = 0; table_index <= 0xC; table_index += 4) | 2594 | for (table_index = 0; table_index <= 0xC; table_index += 4) |
2611 | mv_write(DA_FILTER_UNICAST_TABLE_BASE | 2595 | wrl(mp, DA_FILTER_UNICAST_TABLE_BASE(port_num) + |
2612 | (eth_port_num) + table_index, 0); | 2596 | table_index, 0); |
2613 | 2597 | ||
2614 | for (table_index = 0; table_index <= 0xFC; table_index += 4) { | 2598 | for (table_index = 0; table_index <= 0xFC; table_index += 4) { |
2615 | /* Clear DA filter special multicast table (Ex_dFSMT) */ | 2599 | /* Clear DA filter special multicast table (Ex_dFSMT) */ |
2616 | mv_write(DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE | 2600 | wrl(mp, DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(port_num) + |
2617 | (eth_port_num) + table_index, 0); | 2601 | table_index, 0); |
2618 | /* Clear DA filter other multicast table (Ex_dFOMT) */ | 2602 | /* Clear DA filter other multicast table (Ex_dFOMT) */ |
2619 | mv_write(DA_FILTER_OTHER_MULTICAST_TABLE_BASE | 2603 | wrl(mp, DA_FILTER_OTHER_MULTICAST_TABLE_BASE(port_num) + |
2620 | (eth_port_num) + table_index, 0); | 2604 | table_index, 0); |
2621 | } | 2605 | } |
2622 | } | 2606 | } |
2623 | 2607 | ||
@@ -2629,7 +2613,7 @@ static void eth_port_init_mac_tables(unsigned int eth_port_num) | |||
2629 | * A read from the MIB counter will reset the counter. | 2613 | * A read from the MIB counter will reset the counter. |
2630 | * | 2614 | * |
2631 | * INPUT: | 2615 | * INPUT: |
2632 | * unsigned int eth_port_num Ethernet Port number. | 2616 | * struct mv643xx_private *mp Ethernet Port. |
2633 | * | 2617 | * |
2634 | * OUTPUT: | 2618 | * OUTPUT: |
2635 | * After reading all MIB counters, the counters resets. | 2619 | * After reading all MIB counters, the counters resets. |
@@ -2638,19 +2622,20 @@ static void eth_port_init_mac_tables(unsigned int eth_port_num) | |||
2638 | * MIB counter value. | 2622 | * MIB counter value. |
2639 | * | 2623 | * |
2640 | */ | 2624 | */ |
2641 | static void eth_clear_mib_counters(unsigned int eth_port_num) | 2625 | static void eth_clear_mib_counters(struct mv643xx_private *mp) |
2642 | { | 2626 | { |
2627 | unsigned int port_num = mp->port_num; | ||
2643 | int i; | 2628 | int i; |
2644 | 2629 | ||
2645 | /* Perform dummy reads from MIB counters */ | 2630 | /* Perform dummy reads from MIB counters */ |
2646 | for (i = ETH_MIB_GOOD_OCTETS_RECEIVED_LOW; i < ETH_MIB_LATE_COLLISION; | 2631 | for (i = ETH_MIB_GOOD_OCTETS_RECEIVED_LOW; i < ETH_MIB_LATE_COLLISION; |
2647 | i += 4) | 2632 | i += 4) |
2648 | mv_read(MIB_COUNTERS_BASE(eth_port_num) + i); | 2633 | rdl(mp, MIB_COUNTERS_BASE(port_num) + i); |
2649 | } | 2634 | } |
2650 | 2635 | ||
2651 | static inline u32 read_mib(struct mv643xx_private *mp, int offset) | 2636 | static inline u32 read_mib(struct mv643xx_private *mp, int offset) |
2652 | { | 2637 | { |
2653 | return mv_read(MIB_COUNTERS_BASE(mp->port_num) + offset); | 2638 | return rdl(mp, MIB_COUNTERS_BASE(mp->port_num) + offset); |
2654 | } | 2639 | } |
2655 | 2640 | ||
2656 | static void eth_update_mib_counters(struct mv643xx_private *mp) | 2641 | static void eth_update_mib_counters(struct mv643xx_private *mp) |
@@ -2686,7 +2671,7 @@ static void eth_update_mib_counters(struct mv643xx_private *mp) | |||
2686 | * the specified port. | 2671 | * the specified port. |
2687 | * | 2672 | * |
2688 | * INPUT: | 2673 | * INPUT: |
2689 | * unsigned int eth_port_num Ethernet Port number. | 2674 | * struct mv643xx_private *mp Ethernet Port. |
2690 | * | 2675 | * |
2691 | * OUTPUT: | 2676 | * OUTPUT: |
2692 | * None | 2677 | * None |
@@ -2696,22 +2681,22 @@ static void eth_update_mib_counters(struct mv643xx_private *mp) | |||
2696 | * -ENODEV on failure | 2681 | * -ENODEV on failure |
2697 | * | 2682 | * |
2698 | */ | 2683 | */ |
2699 | static int ethernet_phy_detect(unsigned int port_num) | 2684 | static int ethernet_phy_detect(struct mv643xx_private *mp) |
2700 | { | 2685 | { |
2701 | unsigned int phy_reg_data0; | 2686 | unsigned int phy_reg_data0; |
2702 | int auto_neg; | 2687 | int auto_neg; |
2703 | 2688 | ||
2704 | eth_port_read_smi_reg(port_num, 0, &phy_reg_data0); | 2689 | eth_port_read_smi_reg(mp, 0, &phy_reg_data0); |
2705 | auto_neg = phy_reg_data0 & 0x1000; | 2690 | auto_neg = phy_reg_data0 & 0x1000; |
2706 | phy_reg_data0 ^= 0x1000; /* invert auto_neg */ | 2691 | phy_reg_data0 ^= 0x1000; /* invert auto_neg */ |
2707 | eth_port_write_smi_reg(port_num, 0, phy_reg_data0); | 2692 | eth_port_write_smi_reg(mp, 0, phy_reg_data0); |
2708 | 2693 | ||
2709 | eth_port_read_smi_reg(port_num, 0, &phy_reg_data0); | 2694 | eth_port_read_smi_reg(mp, 0, &phy_reg_data0); |
2710 | if ((phy_reg_data0 & 0x1000) == auto_neg) | 2695 | if ((phy_reg_data0 & 0x1000) == auto_neg) |
2711 | return -ENODEV; /* change didn't take */ | 2696 | return -ENODEV; /* change didn't take */ |
2712 | 2697 | ||
2713 | phy_reg_data0 ^= 0x1000; | 2698 | phy_reg_data0 ^= 0x1000; |
2714 | eth_port_write_smi_reg(port_num, 0, phy_reg_data0); | 2699 | eth_port_write_smi_reg(mp, 0, phy_reg_data0); |
2715 | return 0; | 2700 | return 0; |
2716 | } | 2701 | } |
2717 | 2702 | ||
@@ -2722,7 +2707,7 @@ static int ethernet_phy_detect(unsigned int port_num) | |||
2722 | * This routine returns the given ethernet port PHY address. | 2707 | * This routine returns the given ethernet port PHY address. |
2723 | * | 2708 | * |
2724 | * INPUT: | 2709 | * INPUT: |
2725 | * unsigned int eth_port_num Ethernet Port number. | 2710 | * struct mv643xx_private *mp Ethernet Port. |
2726 | * | 2711 | * |
2727 | * OUTPUT: | 2712 | * OUTPUT: |
2728 | * None. | 2713 | * None. |
@@ -2731,13 +2716,13 @@ static int ethernet_phy_detect(unsigned int port_num) | |||
2731 | * PHY address. | 2716 | * PHY address. |
2732 | * | 2717 | * |
2733 | */ | 2718 | */ |
2734 | static int ethernet_phy_get(unsigned int eth_port_num) | 2719 | static int ethernet_phy_get(struct mv643xx_private *mp) |
2735 | { | 2720 | { |
2736 | unsigned int reg_data; | 2721 | unsigned int reg_data; |
2737 | 2722 | ||
2738 | reg_data = mv_read(PHY_ADDR_REG); | 2723 | reg_data = rdl(mp, PHY_ADDR_REG); |
2739 | 2724 | ||
2740 | return ((reg_data >> (5 * eth_port_num)) & 0x1f); | 2725 | return ((reg_data >> (5 * mp->port_num)) & 0x1f); |
2741 | } | 2726 | } |
2742 | 2727 | ||
2743 | /* | 2728 | /* |
@@ -2747,7 +2732,7 @@ static int ethernet_phy_get(unsigned int eth_port_num) | |||
2747 | * This routine sets the given ethernet port PHY address. | 2732 | * This routine sets the given ethernet port PHY address. |
2748 | * | 2733 | * |
2749 | * INPUT: | 2734 | * INPUT: |
2750 | * unsigned int eth_port_num Ethernet Port number. | 2735 | * struct mv643xx_private *mp Ethernet Port. |
2751 | * int phy_addr PHY address. | 2736 | * int phy_addr PHY address. |
2752 | * | 2737 | * |
2753 | * OUTPUT: | 2738 | * OUTPUT: |
@@ -2757,15 +2742,15 @@ static int ethernet_phy_get(unsigned int eth_port_num) | |||
2757 | * None. | 2742 | * None. |
2758 | * | 2743 | * |
2759 | */ | 2744 | */ |
2760 | static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr) | 2745 | static void ethernet_phy_set(struct mv643xx_private *mp, int phy_addr) |
2761 | { | 2746 | { |
2762 | u32 reg_data; | 2747 | u32 reg_data; |
2763 | int addr_shift = 5 * eth_port_num; | 2748 | int addr_shift = 5 * mp->port_num; |
2764 | 2749 | ||
2765 | reg_data = mv_read(PHY_ADDR_REG); | 2750 | reg_data = rdl(mp, PHY_ADDR_REG); |
2766 | reg_data &= ~(0x1f << addr_shift); | 2751 | reg_data &= ~(0x1f << addr_shift); |
2767 | reg_data |= (phy_addr & 0x1f) << addr_shift; | 2752 | reg_data |= (phy_addr & 0x1f) << addr_shift; |
2768 | mv_write(PHY_ADDR_REG, reg_data); | 2753 | wrl(mp, PHY_ADDR_REG, reg_data); |
2769 | } | 2754 | } |
2770 | 2755 | ||
2771 | /* | 2756 | /* |
@@ -2775,7 +2760,7 @@ static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr) | |||
2775 | * This routine utilizes the SMI interface to reset the ethernet port PHY. | 2760 | * This routine utilizes the SMI interface to reset the ethernet port PHY. |
2776 | * | 2761 | * |
2777 | * INPUT: | 2762 | * INPUT: |
2778 | * unsigned int eth_port_num Ethernet Port number. | 2763 | * struct mv643xx_private *mp Ethernet Port. |
2779 | * | 2764 | * |
2780 | * OUTPUT: | 2765 | * OUTPUT: |
2781 | * The PHY is reset. | 2766 | * The PHY is reset. |
@@ -2784,51 +2769,52 @@ static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr) | |||
2784 | * None. | 2769 | * None. |
2785 | * | 2770 | * |
2786 | */ | 2771 | */ |
2787 | static void ethernet_phy_reset(unsigned int eth_port_num) | 2772 | static void ethernet_phy_reset(struct mv643xx_private *mp) |
2788 | { | 2773 | { |
2789 | unsigned int phy_reg_data; | 2774 | unsigned int phy_reg_data; |
2790 | 2775 | ||
2791 | /* Reset the PHY */ | 2776 | /* Reset the PHY */ |
2792 | eth_port_read_smi_reg(eth_port_num, 0, &phy_reg_data); | 2777 | eth_port_read_smi_reg(mp, 0, &phy_reg_data); |
2793 | phy_reg_data |= 0x8000; /* Set bit 15 to reset the PHY */ | 2778 | phy_reg_data |= 0x8000; /* Set bit 15 to reset the PHY */ |
2794 | eth_port_write_smi_reg(eth_port_num, 0, phy_reg_data); | 2779 | eth_port_write_smi_reg(mp, 0, phy_reg_data); |
2795 | 2780 | ||
2796 | /* wait for PHY to come out of reset */ | 2781 | /* wait for PHY to come out of reset */ |
2797 | do { | 2782 | do { |
2798 | udelay(1); | 2783 | udelay(1); |
2799 | eth_port_read_smi_reg(eth_port_num, 0, &phy_reg_data); | 2784 | eth_port_read_smi_reg(mp, 0, &phy_reg_data); |
2800 | } while (phy_reg_data & 0x8000); | 2785 | } while (phy_reg_data & 0x8000); |
2801 | } | 2786 | } |
2802 | 2787 | ||
2803 | static void mv643xx_eth_port_enable_tx(unsigned int port_num, | 2788 | static void mv643xx_eth_port_enable_tx(struct mv643xx_private *mp, |
2804 | unsigned int queues) | 2789 | unsigned int queues) |
2805 | { | 2790 | { |
2806 | mv_write(TRANSMIT_QUEUE_COMMAND_REG(port_num), queues); | 2791 | wrl(mp, TRANSMIT_QUEUE_COMMAND_REG(mp->port_num), queues); |
2807 | } | 2792 | } |
2808 | 2793 | ||
2809 | static void mv643xx_eth_port_enable_rx(unsigned int port_num, | 2794 | static void mv643xx_eth_port_enable_rx(struct mv643xx_private *mp, |
2810 | unsigned int queues) | 2795 | unsigned int queues) |
2811 | { | 2796 | { |
2812 | mv_write(RECEIVE_QUEUE_COMMAND_REG(port_num), queues); | 2797 | wrl(mp, RECEIVE_QUEUE_COMMAND_REG(mp->port_num), queues); |
2813 | } | 2798 | } |
2814 | 2799 | ||
2815 | static unsigned int mv643xx_eth_port_disable_tx(unsigned int port_num) | 2800 | static unsigned int mv643xx_eth_port_disable_tx(struct mv643xx_private *mp) |
2816 | { | 2801 | { |
2802 | unsigned int port_num = mp->port_num; | ||
2817 | u32 queues; | 2803 | u32 queues; |
2818 | 2804 | ||
2819 | /* Stop Tx port activity. Check port Tx activity. */ | 2805 | /* Stop Tx port activity. Check port Tx activity. */ |
2820 | queues = mv_read(TRANSMIT_QUEUE_COMMAND_REG(port_num)) & 0xFF; | 2806 | queues = rdl(mp, TRANSMIT_QUEUE_COMMAND_REG(port_num)) & 0xFF; |
2821 | if (queues) { | 2807 | if (queues) { |
2822 | /* Issue stop command for active queues only */ | 2808 | /* Issue stop command for active queues only */ |
2823 | mv_write(TRANSMIT_QUEUE_COMMAND_REG(port_num), (queues << 8)); | 2809 | wrl(mp, TRANSMIT_QUEUE_COMMAND_REG(port_num), (queues << 8)); |
2824 | 2810 | ||
2825 | /* Wait for all Tx activity to terminate. */ | 2811 | /* Wait for all Tx activity to terminate. */ |
2826 | /* Check port cause register that all Tx queues are stopped */ | 2812 | /* Check port cause register that all Tx queues are stopped */ |
2827 | while (mv_read(TRANSMIT_QUEUE_COMMAND_REG(port_num)) & 0xFF) | 2813 | while (rdl(mp, TRANSMIT_QUEUE_COMMAND_REG(port_num)) & 0xFF) |
2828 | udelay(PHY_WAIT_MICRO_SECONDS); | 2814 | udelay(PHY_WAIT_MICRO_SECONDS); |
2829 | 2815 | ||
2830 | /* Wait for Tx FIFO to empty */ | 2816 | /* Wait for Tx FIFO to empty */ |
2831 | while (mv_read(PORT_STATUS_REG(port_num)) & | 2817 | while (rdl(mp, PORT_STATUS_REG(port_num)) & |
2832 | ETH_PORT_TX_FIFO_EMPTY) | 2818 | ETH_PORT_TX_FIFO_EMPTY) |
2833 | udelay(PHY_WAIT_MICRO_SECONDS); | 2819 | udelay(PHY_WAIT_MICRO_SECONDS); |
2834 | } | 2820 | } |
@@ -2836,19 +2822,20 @@ static unsigned int mv643xx_eth_port_disable_tx(unsigned int port_num) | |||
2836 | return queues; | 2822 | return queues; |
2837 | } | 2823 | } |
2838 | 2824 | ||
2839 | static unsigned int mv643xx_eth_port_disable_rx(unsigned int port_num) | 2825 | static unsigned int mv643xx_eth_port_disable_rx(struct mv643xx_private *mp) |
2840 | { | 2826 | { |
2827 | unsigned int port_num = mp->port_num; | ||
2841 | u32 queues; | 2828 | u32 queues; |
2842 | 2829 | ||
2843 | /* Stop Rx port activity. Check port Rx activity. */ | 2830 | /* Stop Rx port activity. Check port Rx activity. */ |
2844 | queues = mv_read(RECEIVE_QUEUE_COMMAND_REG(port_num)) & 0xFF; | 2831 | queues = rdl(mp, RECEIVE_QUEUE_COMMAND_REG(port_num)) & 0xFF; |
2845 | if (queues) { | 2832 | if (queues) { |
2846 | /* Issue stop command for active queues only */ | 2833 | /* Issue stop command for active queues only */ |
2847 | mv_write(RECEIVE_QUEUE_COMMAND_REG(port_num), (queues << 8)); | 2834 | wrl(mp, RECEIVE_QUEUE_COMMAND_REG(port_num), (queues << 8)); |
2848 | 2835 | ||
2849 | /* Wait for all Rx activity to terminate. */ | 2836 | /* Wait for all Rx activity to terminate. */ |
2850 | /* Check port cause register that all Rx queues are stopped */ | 2837 | /* Check port cause register that all Rx queues are stopped */ |
2851 | while (mv_read(RECEIVE_QUEUE_COMMAND_REG(port_num)) & 0xFF) | 2838 | while (rdl(mp, RECEIVE_QUEUE_COMMAND_REG(port_num)) & 0xFF) |
2852 | udelay(PHY_WAIT_MICRO_SECONDS); | 2839 | udelay(PHY_WAIT_MICRO_SECONDS); |
2853 | } | 2840 | } |
2854 | 2841 | ||
@@ -2864,7 +2851,7 @@ static unsigned int mv643xx_eth_port_disable_rx(unsigned int port_num) | |||
2864 | * idle state after this command is performed and the port is disabled. | 2851 | * idle state after this command is performed and the port is disabled. |
2865 | * | 2852 | * |
2866 | * INPUT: | 2853 | * INPUT: |
2867 | * unsigned int eth_port_num Ethernet Port number. | 2854 | * struct mv643xx_private *mp Ethernet Port. |
2868 | * | 2855 | * |
2869 | * OUTPUT: | 2856 | * OUTPUT: |
2870 | * Channel activity is halted. | 2857 | * Channel activity is halted. |
@@ -2873,22 +2860,23 @@ static unsigned int mv643xx_eth_port_disable_rx(unsigned int port_num) | |||
2873 | * None. | 2860 | * None. |
2874 | * | 2861 | * |
2875 | */ | 2862 | */ |
2876 | static void eth_port_reset(unsigned int port_num) | 2863 | static void eth_port_reset(struct mv643xx_private *mp) |
2877 | { | 2864 | { |
2865 | unsigned int port_num = mp->port_num; | ||
2878 | unsigned int reg_data; | 2866 | unsigned int reg_data; |
2879 | 2867 | ||
2880 | mv643xx_eth_port_disable_tx(port_num); | 2868 | mv643xx_eth_port_disable_tx(mp); |
2881 | mv643xx_eth_port_disable_rx(port_num); | 2869 | mv643xx_eth_port_disable_rx(mp); |
2882 | 2870 | ||
2883 | /* Clear all MIB counters */ | 2871 | /* Clear all MIB counters */ |
2884 | eth_clear_mib_counters(port_num); | 2872 | eth_clear_mib_counters(mp); |
2885 | 2873 | ||
2886 | /* Reset the Enable bit in the Configuration Register */ | 2874 | /* Reset the Enable bit in the Configuration Register */ |
2887 | reg_data = mv_read(PORT_SERIAL_CONTROL_REG(port_num)); | 2875 | reg_data = rdl(mp, PORT_SERIAL_CONTROL_REG(port_num)); |
2888 | reg_data &= ~(SERIAL_PORT_ENABLE | | 2876 | reg_data &= ~(SERIAL_PORT_ENABLE | |
2889 | DO_NOT_FORCE_LINK_FAIL | | 2877 | DO_NOT_FORCE_LINK_FAIL | |
2890 | FORCE_LINK_PASS); | 2878 | FORCE_LINK_PASS); |
2891 | mv_write(PORT_SERIAL_CONTROL_REG(port_num), reg_data); | 2879 | wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), reg_data); |
2892 | } | 2880 | } |
2893 | 2881 | ||
2894 | 2882 | ||
@@ -2900,7 +2888,7 @@ static void eth_port_reset(unsigned int port_num) | |||
2900 | * order to perform PHY register read. | 2888 | * order to perform PHY register read. |
2901 | * | 2889 | * |
2902 | * INPUT: | 2890 | * INPUT: |
2903 | * unsigned int port_num Ethernet Port number. | 2891 | * struct mv643xx_private *mp Ethernet Port. |
2904 | * unsigned int phy_reg PHY register address offset. | 2892 | * unsigned int phy_reg PHY register address offset. |
2905 | * unsigned int *value Register value buffer. | 2893 | * unsigned int *value Register value buffer. |
2906 | * | 2894 | * |
@@ -2912,10 +2900,10 @@ static void eth_port_reset(unsigned int port_num) | |||
2912 | * true otherwise. | 2900 | * true otherwise. |
2913 | * | 2901 | * |
2914 | */ | 2902 | */ |
2915 | static void eth_port_read_smi_reg(unsigned int port_num, | 2903 | static void eth_port_read_smi_reg(struct mv643xx_private *mp, |
2916 | unsigned int phy_reg, unsigned int *value) | 2904 | unsigned int phy_reg, unsigned int *value) |
2917 | { | 2905 | { |
2918 | int phy_addr = ethernet_phy_get(port_num); | 2906 | int phy_addr = ethernet_phy_get(mp); |
2919 | unsigned long flags; | 2907 | unsigned long flags; |
2920 | int i; | 2908 | int i; |
2921 | 2909 | ||
@@ -2923,27 +2911,27 @@ static void eth_port_read_smi_reg(unsigned int port_num, | |||
2923 | spin_lock_irqsave(&mv643xx_eth_phy_lock, flags); | 2911 | spin_lock_irqsave(&mv643xx_eth_phy_lock, flags); |
2924 | 2912 | ||
2925 | /* wait for the SMI register to become available */ | 2913 | /* wait for the SMI register to become available */ |
2926 | for (i = 0; mv_read(SMI_REG) & ETH_SMI_BUSY; i++) { | 2914 | for (i = 0; rdl(mp, SMI_REG) & ETH_SMI_BUSY; i++) { |
2927 | if (i == PHY_WAIT_ITERATIONS) { | 2915 | if (i == PHY_WAIT_ITERATIONS) { |
2928 | printk("mv643xx PHY busy timeout, port %d\n", port_num); | 2916 | printk("%s: PHY busy timeout\n", mp->dev->name); |
2929 | goto out; | 2917 | goto out; |
2930 | } | 2918 | } |
2931 | udelay(PHY_WAIT_MICRO_SECONDS); | 2919 | udelay(PHY_WAIT_MICRO_SECONDS); |
2932 | } | 2920 | } |
2933 | 2921 | ||
2934 | mv_write(SMI_REG, | 2922 | wrl(mp, SMI_REG, |
2935 | (phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ); | 2923 | (phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ); |
2936 | 2924 | ||
2937 | /* now wait for the data to be valid */ | 2925 | /* now wait for the data to be valid */ |
2938 | for (i = 0; !(mv_read(SMI_REG) & ETH_SMI_READ_VALID); i++) { | 2926 | for (i = 0; !(rdl(mp, SMI_REG) & ETH_SMI_READ_VALID); i++) { |
2939 | if (i == PHY_WAIT_ITERATIONS) { | 2927 | if (i == PHY_WAIT_ITERATIONS) { |
2940 | printk("mv643xx PHY read timeout, port %d\n", port_num); | 2928 | printk("%s: PHY read timeout\n", mp->dev->name); |
2941 | goto out; | 2929 | goto out; |
2942 | } | 2930 | } |
2943 | udelay(PHY_WAIT_MICRO_SECONDS); | 2931 | udelay(PHY_WAIT_MICRO_SECONDS); |
2944 | } | 2932 | } |
2945 | 2933 | ||
2946 | *value = mv_read(SMI_REG) & 0xffff; | 2934 | *value = rdl(mp, SMI_REG) & 0xffff; |
2947 | out: | 2935 | out: |
2948 | spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags); | 2936 | spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags); |
2949 | } | 2937 | } |
@@ -2956,7 +2944,7 @@ out: | |||
2956 | * order to perform writes to PHY registers. | 2944 | * order to perform writes to PHY registers. |
2957 | * | 2945 | * |
2958 | * INPUT: | 2946 | * INPUT: |
2959 | * unsigned int eth_port_num Ethernet Port number. | 2947 | * struct mv643xx_private *mp Ethernet Port. |
2960 | * unsigned int phy_reg PHY register address offset. | 2948 | * unsigned int phy_reg PHY register address offset. |
2961 | * unsigned int value Register value. | 2949 | * unsigned int value Register value. |
2962 | * | 2950 | * |
@@ -2968,29 +2956,28 @@ out: | |||
2968 | * true otherwise. | 2956 | * true otherwise. |
2969 | * | 2957 | * |
2970 | */ | 2958 | */ |
2971 | static void eth_port_write_smi_reg(unsigned int eth_port_num, | 2959 | static void eth_port_write_smi_reg(struct mv643xx_private *mp, |
2972 | unsigned int phy_reg, unsigned int value) | 2960 | unsigned int phy_reg, unsigned int value) |
2973 | { | 2961 | { |
2974 | int phy_addr; | 2962 | int phy_addr; |
2975 | int i; | 2963 | int i; |
2976 | unsigned long flags; | 2964 | unsigned long flags; |
2977 | 2965 | ||
2978 | phy_addr = ethernet_phy_get(eth_port_num); | 2966 | phy_addr = ethernet_phy_get(mp); |
2979 | 2967 | ||
2980 | /* the SMI register is a shared resource */ | 2968 | /* the SMI register is a shared resource */ |
2981 | spin_lock_irqsave(&mv643xx_eth_phy_lock, flags); | 2969 | spin_lock_irqsave(&mv643xx_eth_phy_lock, flags); |
2982 | 2970 | ||
2983 | /* wait for the SMI register to become available */ | 2971 | /* wait for the SMI register to become available */ |
2984 | for (i = 0; mv_read(SMI_REG) & ETH_SMI_BUSY; i++) { | 2972 | for (i = 0; rdl(mp, SMI_REG) & ETH_SMI_BUSY; i++) { |
2985 | if (i == PHY_WAIT_ITERATIONS) { | 2973 | if (i == PHY_WAIT_ITERATIONS) { |
2986 | printk("mv643xx PHY busy timeout, port %d\n", | 2974 | printk("%s: PHY busy timeout\n", mp->dev->name); |
2987 | eth_port_num); | ||
2988 | goto out; | 2975 | goto out; |
2989 | } | 2976 | } |
2990 | udelay(PHY_WAIT_MICRO_SECONDS); | 2977 | udelay(PHY_WAIT_MICRO_SECONDS); |
2991 | } | 2978 | } |
2992 | 2979 | ||
2993 | mv_write(SMI_REG, (phy_addr << 16) | (phy_reg << 21) | | 2980 | wrl(mp, SMI_REG, (phy_addr << 16) | (phy_reg << 21) | |
2994 | ETH_SMI_OPCODE_WRITE | (value & 0xffff)); | 2981 | ETH_SMI_OPCODE_WRITE | (value & 0xffff)); |
2995 | out: | 2982 | out: |
2996 | spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags); | 2983 | spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags); |
@@ -3001,17 +2988,17 @@ out: | |||
3001 | */ | 2988 | */ |
3002 | static int mv643xx_mdio_read(struct net_device *dev, int phy_id, int location) | 2989 | static int mv643xx_mdio_read(struct net_device *dev, int phy_id, int location) |
3003 | { | 2990 | { |
3004 | int val; | ||
3005 | struct mv643xx_private *mp = netdev_priv(dev); | 2991 | struct mv643xx_private *mp = netdev_priv(dev); |
2992 | int val; | ||
3006 | 2993 | ||
3007 | eth_port_read_smi_reg(mp->port_num, location, &val); | 2994 | eth_port_read_smi_reg(mp, location, &val); |
3008 | return val; | 2995 | return val; |
3009 | } | 2996 | } |
3010 | 2997 | ||
3011 | static void mv643xx_mdio_write(struct net_device *dev, int phy_id, int location, int val) | 2998 | static void mv643xx_mdio_write(struct net_device *dev, int phy_id, int location, int val) |
3012 | { | 2999 | { |
3013 | struct mv643xx_private *mp = netdev_priv(dev); | 3000 | struct mv643xx_private *mp = netdev_priv(dev); |
3014 | eth_port_write_smi_reg(mp->port_num, location, val); | 3001 | eth_port_write_smi_reg(mp, location, val); |
3015 | } | 3002 | } |
3016 | 3003 | ||
3017 | /* | 3004 | /* |
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c index 900ab5d2ba70..46119bb3770a 100644 --- a/drivers/net/natsemi.c +++ b/drivers/net/natsemi.c | |||
@@ -786,7 +786,8 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev, | |||
786 | struct netdev_private *np; | 786 | struct netdev_private *np; |
787 | int i, option, irq, chip_idx = ent->driver_data; | 787 | int i, option, irq, chip_idx = ent->driver_data; |
788 | static int find_cnt = -1; | 788 | static int find_cnt = -1; |
789 | unsigned long iostart, iosize; | 789 | resource_size_t iostart; |
790 | unsigned long iosize; | ||
790 | void __iomem *ioaddr; | 791 | void __iomem *ioaddr; |
791 | const int pcibar = 1; /* PCI base address register */ | 792 | const int pcibar = 1; /* PCI base address register */ |
792 | int prev_eedata; | 793 | int prev_eedata; |
@@ -946,10 +947,11 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev, | |||
946 | goto err_create_file; | 947 | goto err_create_file; |
947 | 948 | ||
948 | if (netif_msg_drv(np)) { | 949 | if (netif_msg_drv(np)) { |
949 | printk(KERN_INFO "natsemi %s: %s at %#08lx " | 950 | printk(KERN_INFO "natsemi %s: %s at %#08llx " |
950 | "(%s), %s, IRQ %d", | 951 | "(%s), %s, IRQ %d", |
951 | dev->name, natsemi_pci_info[chip_idx].name, iostart, | 952 | dev->name, natsemi_pci_info[chip_idx].name, |
952 | pci_name(np->pci_dev), print_mac(mac, dev->dev_addr), irq); | 953 | (unsigned long long)iostart, pci_name(np->pci_dev), |
954 | print_mac(mac, dev->dev_addr), irq); | ||
953 | if (dev->if_port == PORT_TP) | 955 | if (dev->if_port == PORT_TP) |
954 | printk(", port TP.\n"); | 956 | printk(", port TP.\n"); |
955 | else if (np->ignore_phy) | 957 | else if (np->ignore_phy) |
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h index 7f20a03623a0..8cb29f5b1038 100644 --- a/drivers/net/netxen/netxen_nic.h +++ b/drivers/net/netxen/netxen_nic.h | |||
@@ -95,23 +95,6 @@ | |||
95 | 95 | ||
96 | #define ADDR_IN_WINDOW1(off) \ | 96 | #define ADDR_IN_WINDOW1(off) \ |
97 | ((off > NETXEN_CRB_PCIX_HOST2) && (off < NETXEN_CRB_MAX)) ? 1 : 0 | 97 | ((off > NETXEN_CRB_PCIX_HOST2) && (off < NETXEN_CRB_MAX)) ? 1 : 0 |
98 | /* | ||
99 | * In netxen_nic_down(), we must wait for any pending callback requests into | ||
100 | * netxen_watchdog_task() to complete; eg otherwise the watchdog_timer could be | ||
101 | * reenabled right after it is deleted in netxen_nic_down(). FLUSH_SCHEDULED_WORK() | ||
102 | * does this synchronization. | ||
103 | * | ||
104 | * Normally, schedule_work()/flush_scheduled_work() could have worked, but | ||
105 | * netxen_nic_close() is invoked with kernel rtnl lock held. netif_carrier_off() | ||
106 | * call in netxen_nic_close() triggers a schedule_work(&linkwatch_work), and a | ||
107 | * subsequent call to flush_scheduled_work() in netxen_nic_down() would cause | ||
108 | * linkwatch_event() to be executed which also attempts to acquire the rtnl | ||
109 | * lock thus causing a deadlock. | ||
110 | */ | ||
111 | |||
112 | #define SCHEDULE_WORK(tp) queue_work(netxen_workq, tp) | ||
113 | #define FLUSH_SCHEDULED_WORK() flush_workqueue(netxen_workq) | ||
114 | extern struct workqueue_struct *netxen_workq; | ||
115 | 98 | ||
116 | /* | 99 | /* |
117 | * normalize a 64MB crb address to 32MB PCI window | 100 | * normalize a 64MB crb address to 32MB PCI window |
@@ -1050,7 +1033,6 @@ void netxen_halt_pegs(struct netxen_adapter *adapter); | |||
1050 | int netxen_rom_se(struct netxen_adapter *adapter, int addr); | 1033 | int netxen_rom_se(struct netxen_adapter *adapter, int addr); |
1051 | 1034 | ||
1052 | /* Functions from netxen_nic_isr.c */ | 1035 | /* Functions from netxen_nic_isr.c */ |
1053 | int netxen_nic_link_ok(struct netxen_adapter *adapter); | ||
1054 | void netxen_initialize_adapter_sw(struct netxen_adapter *adapter); | 1036 | void netxen_initialize_adapter_sw(struct netxen_adapter *adapter); |
1055 | void netxen_initialize_adapter_hw(struct netxen_adapter *adapter); | 1037 | void netxen_initialize_adapter_hw(struct netxen_adapter *adapter); |
1056 | void *netxen_alloc(struct pci_dev *pdev, size_t sz, dma_addr_t * ptr, | 1038 | void *netxen_alloc(struct pci_dev *pdev, size_t sz, dma_addr_t * ptr, |
diff --git a/drivers/net/netxen/netxen_nic_isr.c b/drivers/net/netxen/netxen_nic_isr.c index c81313b717bd..f487615f4063 100644 --- a/drivers/net/netxen/netxen_nic_isr.c +++ b/drivers/net/netxen/netxen_nic_isr.c | |||
@@ -172,6 +172,7 @@ void netxen_nic_gbe_handle_phy_intr(struct netxen_adapter *adapter) | |||
172 | netxen_nic_isr_other(adapter); | 172 | netxen_nic_isr_other(adapter); |
173 | } | 173 | } |
174 | 174 | ||
175 | #if 0 | ||
175 | int netxen_nic_link_ok(struct netxen_adapter *adapter) | 176 | int netxen_nic_link_ok(struct netxen_adapter *adapter) |
176 | { | 177 | { |
177 | switch (adapter->ahw.board_type) { | 178 | switch (adapter->ahw.board_type) { |
@@ -189,6 +190,7 @@ int netxen_nic_link_ok(struct netxen_adapter *adapter) | |||
189 | 190 | ||
190 | return 0; | 191 | return 0; |
191 | } | 192 | } |
193 | #endif /* 0 */ | ||
192 | 194 | ||
193 | void netxen_nic_xgbe_handle_phy_intr(struct netxen_adapter *adapter) | 195 | void netxen_nic_xgbe_handle_phy_intr(struct netxen_adapter *adapter) |
194 | { | 196 | { |
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index a8fb439a4d03..7144c255ce54 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c | |||
@@ -86,7 +86,24 @@ static struct pci_device_id netxen_pci_tbl[] __devinitdata = { | |||
86 | 86 | ||
87 | MODULE_DEVICE_TABLE(pci, netxen_pci_tbl); | 87 | MODULE_DEVICE_TABLE(pci, netxen_pci_tbl); |
88 | 88 | ||
89 | struct workqueue_struct *netxen_workq; | 89 | /* |
90 | * In netxen_nic_down(), we must wait for any pending callback requests into | ||
91 | * netxen_watchdog_task() to complete; eg otherwise the watchdog_timer could be | ||
92 | * reenabled right after it is deleted in netxen_nic_down(). | ||
93 | * FLUSH_SCHEDULED_WORK() does this synchronization. | ||
94 | * | ||
95 | * Normally, schedule_work()/flush_scheduled_work() could have worked, but | ||
96 | * netxen_nic_close() is invoked with kernel rtnl lock held. netif_carrier_off() | ||
97 | * call in netxen_nic_close() triggers a schedule_work(&linkwatch_work), and a | ||
98 | * subsequent call to flush_scheduled_work() in netxen_nic_down() would cause | ||
99 | * linkwatch_event() to be executed which also attempts to acquire the rtnl | ||
100 | * lock thus causing a deadlock. | ||
101 | */ | ||
102 | |||
103 | static struct workqueue_struct *netxen_workq; | ||
104 | #define SCHEDULE_WORK(tp) queue_work(netxen_workq, tp) | ||
105 | #define FLUSH_SCHEDULED_WORK() flush_workqueue(netxen_workq) | ||
106 | |||
90 | static void netxen_watchdog(unsigned long); | 107 | static void netxen_watchdog(unsigned long); |
91 | 108 | ||
92 | static void netxen_nic_update_cmd_producer(struct netxen_adapter *adapter, | 109 | static void netxen_nic_update_cmd_producer(struct netxen_adapter *adapter, |
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c index 2e39e0285d8f..bcd7f9814ed8 100644 --- a/drivers/net/pasemi_mac.c +++ b/drivers/net/pasemi_mac.c | |||
@@ -1012,7 +1012,7 @@ static int pasemi_mac_phy_init(struct net_device *dev) | |||
1012 | goto err; | 1012 | goto err; |
1013 | 1013 | ||
1014 | phy_id = *prop; | 1014 | phy_id = *prop; |
1015 | snprintf(mac->phy_id, BUS_ID_SIZE, PHY_ID_FMT, (int)r.start, phy_id); | 1015 | snprintf(mac->phy_id, BUS_ID_SIZE, "%x:%02x", (int)r.start, phy_id); |
1016 | 1016 | ||
1017 | of_node_put(phy_dn); | 1017 | of_node_put(phy_dn); |
1018 | 1018 | ||
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index f5310ed3760d..60c5cfe96918 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c | |||
@@ -176,6 +176,20 @@ static struct phy_driver bcm5461_driver = { | |||
176 | .driver = { .owner = THIS_MODULE }, | 176 | .driver = { .owner = THIS_MODULE }, |
177 | }; | 177 | }; |
178 | 178 | ||
179 | static struct phy_driver bcm5464_driver = { | ||
180 | .phy_id = 0x002060b0, | ||
181 | .phy_id_mask = 0xfffffff0, | ||
182 | .name = "Broadcom BCM5464", | ||
183 | .features = PHY_GBIT_FEATURES, | ||
184 | .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, | ||
185 | .config_init = bcm54xx_config_init, | ||
186 | .config_aneg = genphy_config_aneg, | ||
187 | .read_status = genphy_read_status, | ||
188 | .ack_interrupt = bcm54xx_ack_interrupt, | ||
189 | .config_intr = bcm54xx_config_intr, | ||
190 | .driver = { .owner = THIS_MODULE }, | ||
191 | }; | ||
192 | |||
179 | static struct phy_driver bcm5481_driver = { | 193 | static struct phy_driver bcm5481_driver = { |
180 | .phy_id = 0x0143bca0, | 194 | .phy_id = 0x0143bca0, |
181 | .phy_id_mask = 0xfffffff0, | 195 | .phy_id_mask = 0xfffffff0, |
@@ -217,6 +231,9 @@ static int __init broadcom_init(void) | |||
217 | ret = phy_driver_register(&bcm5461_driver); | 231 | ret = phy_driver_register(&bcm5461_driver); |
218 | if (ret) | 232 | if (ret) |
219 | goto out_5461; | 233 | goto out_5461; |
234 | ret = phy_driver_register(&bcm5464_driver); | ||
235 | if (ret) | ||
236 | goto out_5464; | ||
220 | ret = phy_driver_register(&bcm5481_driver); | 237 | ret = phy_driver_register(&bcm5481_driver); |
221 | if (ret) | 238 | if (ret) |
222 | goto out_5481; | 239 | goto out_5481; |
@@ -228,6 +245,8 @@ static int __init broadcom_init(void) | |||
228 | out_5482: | 245 | out_5482: |
229 | phy_driver_unregister(&bcm5481_driver); | 246 | phy_driver_unregister(&bcm5481_driver); |
230 | out_5481: | 247 | out_5481: |
248 | phy_driver_unregister(&bcm5464_driver); | ||
249 | out_5464: | ||
231 | phy_driver_unregister(&bcm5461_driver); | 250 | phy_driver_unregister(&bcm5461_driver); |
232 | out_5461: | 251 | out_5461: |
233 | phy_driver_unregister(&bcm5421_driver); | 252 | phy_driver_unregister(&bcm5421_driver); |
@@ -241,6 +260,7 @@ static void __exit broadcom_exit(void) | |||
241 | { | 260 | { |
242 | phy_driver_unregister(&bcm5482_driver); | 261 | phy_driver_unregister(&bcm5482_driver); |
243 | phy_driver_unregister(&bcm5481_driver); | 262 | phy_driver_unregister(&bcm5481_driver); |
263 | phy_driver_unregister(&bcm5464_driver); | ||
244 | phy_driver_unregister(&bcm5461_driver); | 264 | phy_driver_unregister(&bcm5461_driver); |
245 | phy_driver_unregister(&bcm5421_driver); | 265 | phy_driver_unregister(&bcm5421_driver); |
246 | phy_driver_unregister(&bcm5411_driver); | 266 | phy_driver_unregister(&bcm5411_driver); |
diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c index ca9b040f9ad9..4e07956a483b 100644 --- a/drivers/net/phy/fixed.c +++ b/drivers/net/phy/fixed.c | |||
@@ -213,7 +213,7 @@ static int __init fixed_mdio_bus_init(void) | |||
213 | goto err_pdev; | 213 | goto err_pdev; |
214 | } | 214 | } |
215 | 215 | ||
216 | fmb->mii_bus.id = 0; | 216 | snprintf(fmb->mii_bus.id, MII_BUS_ID_SIZE, "0"); |
217 | fmb->mii_bus.name = "Fixed MDIO Bus"; | 217 | fmb->mii_bus.name = "Fixed MDIO Bus"; |
218 | fmb->mii_bus.dev = &pdev->dev; | 218 | fmb->mii_bus.dev = &pdev->dev; |
219 | fmb->mii_bus.read = &fixed_mdio_read; | 219 | fmb->mii_bus.read = &fixed_mdio_read; |
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index f4c4fd85425f..8b1121b02f98 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c | |||
@@ -86,35 +86,55 @@ struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id) | |||
86 | EXPORT_SYMBOL(phy_device_create); | 86 | EXPORT_SYMBOL(phy_device_create); |
87 | 87 | ||
88 | /** | 88 | /** |
89 | * get_phy_device - reads the specified PHY device and returns its @phy_device struct | 89 | * get_phy_id - reads the specified addr for its ID. |
90 | * @bus: the target MII bus | 90 | * @bus: the target MII bus |
91 | * @addr: PHY address on the MII bus | 91 | * @addr: PHY address on the MII bus |
92 | * @phy_id: where to store the ID retrieved. | ||
92 | * | 93 | * |
93 | * Description: Reads the ID registers of the PHY at @addr on the | 94 | * Description: Reads the ID registers of the PHY at @addr on the |
94 | * @bus, then allocates and returns the phy_device to represent it. | 95 | * @bus, stores it in @phy_id and returns zero on success. |
95 | */ | 96 | */ |
96 | struct phy_device * get_phy_device(struct mii_bus *bus, int addr) | 97 | int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id) |
97 | { | 98 | { |
98 | int phy_reg; | 99 | int phy_reg; |
99 | u32 phy_id; | ||
100 | struct phy_device *dev = NULL; | ||
101 | 100 | ||
102 | /* Grab the bits from PHYIR1, and put them | 101 | /* Grab the bits from PHYIR1, and put them |
103 | * in the upper half */ | 102 | * in the upper half */ |
104 | phy_reg = bus->read(bus, addr, MII_PHYSID1); | 103 | phy_reg = bus->read(bus, addr, MII_PHYSID1); |
105 | 104 | ||
106 | if (phy_reg < 0) | 105 | if (phy_reg < 0) |
107 | return ERR_PTR(phy_reg); | 106 | return -EIO; |
108 | 107 | ||
109 | phy_id = (phy_reg & 0xffff) << 16; | 108 | *phy_id = (phy_reg & 0xffff) << 16; |
110 | 109 | ||
111 | /* Grab the bits from PHYIR2, and put them in the lower half */ | 110 | /* Grab the bits from PHYIR2, and put them in the lower half */ |
112 | phy_reg = bus->read(bus, addr, MII_PHYSID2); | 111 | phy_reg = bus->read(bus, addr, MII_PHYSID2); |
113 | 112 | ||
114 | if (phy_reg < 0) | 113 | if (phy_reg < 0) |
115 | return ERR_PTR(phy_reg); | 114 | return -EIO; |
115 | |||
116 | *phy_id |= (phy_reg & 0xffff); | ||
117 | |||
118 | return 0; | ||
119 | } | ||
120 | |||
121 | /** | ||
122 | * get_phy_device - reads the specified PHY device and returns its @phy_device struct | ||
123 | * @bus: the target MII bus | ||
124 | * @addr: PHY address on the MII bus | ||
125 | * | ||
126 | * Description: Reads the ID registers of the PHY at @addr on the | ||
127 | * @bus, then allocates and returns the phy_device to represent it. | ||
128 | */ | ||
129 | struct phy_device * get_phy_device(struct mii_bus *bus, int addr) | ||
130 | { | ||
131 | struct phy_device *dev = NULL; | ||
132 | u32 phy_id; | ||
133 | int r; | ||
116 | 134 | ||
117 | phy_id |= (phy_reg & 0xffff); | 135 | r = get_phy_id(bus, addr, &phy_id); |
136 | if (r) | ||
137 | return ERR_PTR(r); | ||
118 | 138 | ||
119 | /* If the phy_id is all Fs, there is no device there */ | 139 | /* If the phy_id is all Fs, there is no device there */ |
120 | if (0xffffffff == phy_id) | 140 | if (0xffffffff == phy_id) |
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c index 487f9d2ac5b4..5986cec17f19 100644 --- a/drivers/net/sb1000.c +++ b/drivers/net/sb1000.c | |||
@@ -88,31 +88,31 @@ static int sb1000_close(struct net_device *dev); | |||
88 | 88 | ||
89 | 89 | ||
90 | /* SB1000 hardware routines to be used during open/configuration phases */ | 90 | /* SB1000 hardware routines to be used during open/configuration phases */ |
91 | static inline int card_wait_for_busy_clear(const int ioaddr[], | 91 | static int card_wait_for_busy_clear(const int ioaddr[], |
92 | const char* name); | 92 | const char* name); |
93 | static inline int card_wait_for_ready(const int ioaddr[], const char* name, | 93 | static int card_wait_for_ready(const int ioaddr[], const char* name, |
94 | unsigned char in[]); | 94 | unsigned char in[]); |
95 | static int card_send_command(const int ioaddr[], const char* name, | 95 | static int card_send_command(const int ioaddr[], const char* name, |
96 | const unsigned char out[], unsigned char in[]); | 96 | const unsigned char out[], unsigned char in[]); |
97 | 97 | ||
98 | /* SB1000 hardware routines to be used during frame rx interrupt */ | 98 | /* SB1000 hardware routines to be used during frame rx interrupt */ |
99 | static inline int sb1000_wait_for_ready(const int ioaddr[], const char* name); | 99 | static int sb1000_wait_for_ready(const int ioaddr[], const char* name); |
100 | static inline int sb1000_wait_for_ready_clear(const int ioaddr[], | 100 | static int sb1000_wait_for_ready_clear(const int ioaddr[], |
101 | const char* name); | 101 | const char* name); |
102 | static inline void sb1000_send_command(const int ioaddr[], const char* name, | 102 | static void sb1000_send_command(const int ioaddr[], const char* name, |
103 | const unsigned char out[]); | 103 | const unsigned char out[]); |
104 | static inline void sb1000_read_status(const int ioaddr[], unsigned char in[]); | 104 | static void sb1000_read_status(const int ioaddr[], unsigned char in[]); |
105 | static inline void sb1000_issue_read_command(const int ioaddr[], | 105 | static void sb1000_issue_read_command(const int ioaddr[], |
106 | const char* name); | 106 | const char* name); |
107 | 107 | ||
108 | /* SB1000 commands for open/configuration */ | 108 | /* SB1000 commands for open/configuration */ |
109 | static inline int sb1000_reset(const int ioaddr[], const char* name); | 109 | static int sb1000_reset(const int ioaddr[], const char* name); |
110 | static inline int sb1000_check_CRC(const int ioaddr[], const char* name); | 110 | static int sb1000_check_CRC(const int ioaddr[], const char* name); |
111 | static inline int sb1000_start_get_set_command(const int ioaddr[], | 111 | static inline int sb1000_start_get_set_command(const int ioaddr[], |
112 | const char* name); | 112 | const char* name); |
113 | static inline int sb1000_end_get_set_command(const int ioaddr[], | 113 | static int sb1000_end_get_set_command(const int ioaddr[], |
114 | const char* name); | 114 | const char* name); |
115 | static inline int sb1000_activate(const int ioaddr[], const char* name); | 115 | static int sb1000_activate(const int ioaddr[], const char* name); |
116 | static int sb1000_get_firmware_version(const int ioaddr[], | 116 | static int sb1000_get_firmware_version(const int ioaddr[], |
117 | const char* name, unsigned char version[], int do_end); | 117 | const char* name, unsigned char version[], int do_end); |
118 | static int sb1000_get_frequency(const int ioaddr[], const char* name, | 118 | static int sb1000_get_frequency(const int ioaddr[], const char* name, |
@@ -125,8 +125,8 @@ static int sb1000_set_PIDs(const int ioaddr[], const char* name, | |||
125 | const short PID[]); | 125 | const short PID[]); |
126 | 126 | ||
127 | /* SB1000 commands for frame rx interrupt */ | 127 | /* SB1000 commands for frame rx interrupt */ |
128 | static inline int sb1000_rx(struct net_device *dev); | 128 | static int sb1000_rx(struct net_device *dev); |
129 | static inline void sb1000_error_dpc(struct net_device *dev); | 129 | static void sb1000_error_dpc(struct net_device *dev); |
130 | 130 | ||
131 | static const struct pnp_device_id sb1000_pnp_ids[] = { | 131 | static const struct pnp_device_id sb1000_pnp_ids[] = { |
132 | { "GIC1000", 0 }, | 132 | { "GIC1000", 0 }, |
@@ -250,7 +250,7 @@ static struct pnp_driver sb1000_driver = { | |||
250 | static const int TimeOutJiffies = (875 * HZ) / 100; | 250 | static const int TimeOutJiffies = (875 * HZ) / 100; |
251 | 251 | ||
252 | /* Card Wait For Busy Clear (cannot be used during an interrupt) */ | 252 | /* Card Wait For Busy Clear (cannot be used during an interrupt) */ |
253 | static inline int | 253 | static int |
254 | card_wait_for_busy_clear(const int ioaddr[], const char* name) | 254 | card_wait_for_busy_clear(const int ioaddr[], const char* name) |
255 | { | 255 | { |
256 | unsigned char a; | 256 | unsigned char a; |
@@ -274,7 +274,7 @@ card_wait_for_busy_clear(const int ioaddr[], const char* name) | |||
274 | } | 274 | } |
275 | 275 | ||
276 | /* Card Wait For Ready (cannot be used during an interrupt) */ | 276 | /* Card Wait For Ready (cannot be used during an interrupt) */ |
277 | static inline int | 277 | static int |
278 | card_wait_for_ready(const int ioaddr[], const char* name, unsigned char in[]) | 278 | card_wait_for_ready(const int ioaddr[], const char* name, unsigned char in[]) |
279 | { | 279 | { |
280 | unsigned char a; | 280 | unsigned char a; |
@@ -354,7 +354,7 @@ card_send_command(const int ioaddr[], const char* name, | |||
354 | static const int Sb1000TimeOutJiffies = 7 * HZ; | 354 | static const int Sb1000TimeOutJiffies = 7 * HZ; |
355 | 355 | ||
356 | /* Card Wait For Ready (to be used during frame rx) */ | 356 | /* Card Wait For Ready (to be used during frame rx) */ |
357 | static inline int | 357 | static int |
358 | sb1000_wait_for_ready(const int ioaddr[], const char* name) | 358 | sb1000_wait_for_ready(const int ioaddr[], const char* name) |
359 | { | 359 | { |
360 | unsigned long timeout; | 360 | unsigned long timeout; |
@@ -380,7 +380,7 @@ sb1000_wait_for_ready(const int ioaddr[], const char* name) | |||
380 | } | 380 | } |
381 | 381 | ||
382 | /* Card Wait For Ready Clear (to be used during frame rx) */ | 382 | /* Card Wait For Ready Clear (to be used during frame rx) */ |
383 | static inline int | 383 | static int |
384 | sb1000_wait_for_ready_clear(const int ioaddr[], const char* name) | 384 | sb1000_wait_for_ready_clear(const int ioaddr[], const char* name) |
385 | { | 385 | { |
386 | unsigned long timeout; | 386 | unsigned long timeout; |
@@ -405,7 +405,7 @@ sb1000_wait_for_ready_clear(const int ioaddr[], const char* name) | |||
405 | } | 405 | } |
406 | 406 | ||
407 | /* Card Send Command (to be used during frame rx) */ | 407 | /* Card Send Command (to be used during frame rx) */ |
408 | static inline void | 408 | static void |
409 | sb1000_send_command(const int ioaddr[], const char* name, | 409 | sb1000_send_command(const int ioaddr[], const char* name, |
410 | const unsigned char out[]) | 410 | const unsigned char out[]) |
411 | { | 411 | { |
@@ -422,7 +422,7 @@ sb1000_send_command(const int ioaddr[], const char* name, | |||
422 | } | 422 | } |
423 | 423 | ||
424 | /* Card Read Status (to be used during frame rx) */ | 424 | /* Card Read Status (to be used during frame rx) */ |
425 | static inline void | 425 | static void |
426 | sb1000_read_status(const int ioaddr[], unsigned char in[]) | 426 | sb1000_read_status(const int ioaddr[], unsigned char in[]) |
427 | { | 427 | { |
428 | in[1] = inb(ioaddr[0] + 1); | 428 | in[1] = inb(ioaddr[0] + 1); |
@@ -434,10 +434,10 @@ sb1000_read_status(const int ioaddr[], unsigned char in[]) | |||
434 | } | 434 | } |
435 | 435 | ||
436 | /* Issue Read Command (to be used during frame rx) */ | 436 | /* Issue Read Command (to be used during frame rx) */ |
437 | static inline void | 437 | static void |
438 | sb1000_issue_read_command(const int ioaddr[], const char* name) | 438 | sb1000_issue_read_command(const int ioaddr[], const char* name) |
439 | { | 439 | { |
440 | const unsigned char Command0[6] = {0x20, 0x00, 0x00, 0x01, 0x00, 0x00}; | 440 | static const unsigned char Command0[6] = {0x20, 0x00, 0x00, 0x01, 0x00, 0x00}; |
441 | 441 | ||
442 | sb1000_wait_for_ready_clear(ioaddr, name); | 442 | sb1000_wait_for_ready_clear(ioaddr, name); |
443 | outb(0xa0, ioaddr[0] + 6); | 443 | outb(0xa0, ioaddr[0] + 6); |
@@ -450,12 +450,13 @@ sb1000_issue_read_command(const int ioaddr[], const char* name) | |||
450 | * SB1000 commands for open/configuration | 450 | * SB1000 commands for open/configuration |
451 | */ | 451 | */ |
452 | /* reset SB1000 card */ | 452 | /* reset SB1000 card */ |
453 | static inline int | 453 | static int |
454 | sb1000_reset(const int ioaddr[], const char* name) | 454 | sb1000_reset(const int ioaddr[], const char* name) |
455 | { | 455 | { |
456 | static const unsigned char Command0[6] = {0x80, 0x16, 0x00, 0x00, 0x00, 0x00}; | ||
457 | |||
456 | unsigned char st[7]; | 458 | unsigned char st[7]; |
457 | int port, status; | 459 | int port, status; |
458 | const unsigned char Command0[6] = {0x80, 0x16, 0x00, 0x00, 0x00, 0x00}; | ||
459 | 460 | ||
460 | port = ioaddr[1] + 6; | 461 | port = ioaddr[1] + 6; |
461 | outb(0x4, port); | 462 | outb(0x4, port); |
@@ -479,12 +480,13 @@ sb1000_reset(const int ioaddr[], const char* name) | |||
479 | } | 480 | } |
480 | 481 | ||
481 | /* check SB1000 firmware CRC */ | 482 | /* check SB1000 firmware CRC */ |
482 | static inline int | 483 | static int |
483 | sb1000_check_CRC(const int ioaddr[], const char* name) | 484 | sb1000_check_CRC(const int ioaddr[], const char* name) |
484 | { | 485 | { |
486 | static const unsigned char Command0[6] = {0x80, 0x1f, 0x00, 0x00, 0x00, 0x00}; | ||
487 | |||
485 | unsigned char st[7]; | 488 | unsigned char st[7]; |
486 | int crc, status; | 489 | int crc, status; |
487 | const unsigned char Command0[6] = {0x80, 0x1f, 0x00, 0x00, 0x00, 0x00}; | ||
488 | 490 | ||
489 | /* check CRC */ | 491 | /* check CRC */ |
490 | if ((status = card_send_command(ioaddr, name, Command0, st))) | 492 | if ((status = card_send_command(ioaddr, name, Command0, st))) |
@@ -498,32 +500,35 @@ sb1000_check_CRC(const int ioaddr[], const char* name) | |||
498 | static inline int | 500 | static inline int |
499 | sb1000_start_get_set_command(const int ioaddr[], const char* name) | 501 | sb1000_start_get_set_command(const int ioaddr[], const char* name) |
500 | { | 502 | { |
503 | static const unsigned char Command0[6] = {0x80, 0x1b, 0x00, 0x00, 0x00, 0x00}; | ||
504 | |||
501 | unsigned char st[7]; | 505 | unsigned char st[7]; |
502 | const unsigned char Command0[6] = {0x80, 0x1b, 0x00, 0x00, 0x00, 0x00}; | ||
503 | 506 | ||
504 | return card_send_command(ioaddr, name, Command0, st); | 507 | return card_send_command(ioaddr, name, Command0, st); |
505 | } | 508 | } |
506 | 509 | ||
507 | static inline int | 510 | static int |
508 | sb1000_end_get_set_command(const int ioaddr[], const char* name) | 511 | sb1000_end_get_set_command(const int ioaddr[], const char* name) |
509 | { | 512 | { |
513 | static const unsigned char Command0[6] = {0x80, 0x1b, 0x02, 0x00, 0x00, 0x00}; | ||
514 | static const unsigned char Command1[6] = {0x20, 0x00, 0x00, 0x00, 0x00, 0x00}; | ||
515 | |||
510 | unsigned char st[7]; | 516 | unsigned char st[7]; |
511 | int status; | 517 | int status; |
512 | const unsigned char Command0[6] = {0x80, 0x1b, 0x02, 0x00, 0x00, 0x00}; | ||
513 | const unsigned char Command1[6] = {0x20, 0x00, 0x00, 0x00, 0x00, 0x00}; | ||
514 | 518 | ||
515 | if ((status = card_send_command(ioaddr, name, Command0, st))) | 519 | if ((status = card_send_command(ioaddr, name, Command0, st))) |
516 | return status; | 520 | return status; |
517 | return card_send_command(ioaddr, name, Command1, st); | 521 | return card_send_command(ioaddr, name, Command1, st); |
518 | } | 522 | } |
519 | 523 | ||
520 | static inline int | 524 | static int |
521 | sb1000_activate(const int ioaddr[], const char* name) | 525 | sb1000_activate(const int ioaddr[], const char* name) |
522 | { | 526 | { |
527 | static const unsigned char Command0[6] = {0x80, 0x11, 0x00, 0x00, 0x00, 0x00}; | ||
528 | static const unsigned char Command1[6] = {0x80, 0x16, 0x00, 0x00, 0x00, 0x00}; | ||
529 | |||
523 | unsigned char st[7]; | 530 | unsigned char st[7]; |
524 | int status; | 531 | int status; |
525 | const unsigned char Command0[6] = {0x80, 0x11, 0x00, 0x00, 0x00, 0x00}; | ||
526 | const unsigned char Command1[6] = {0x80, 0x16, 0x00, 0x00, 0x00, 0x00}; | ||
527 | 532 | ||
528 | ssleep(1); | 533 | ssleep(1); |
529 | if ((status = card_send_command(ioaddr, name, Command0, st))) | 534 | if ((status = card_send_command(ioaddr, name, Command0, st))) |
@@ -544,9 +549,10 @@ static int | |||
544 | sb1000_get_firmware_version(const int ioaddr[], const char* name, | 549 | sb1000_get_firmware_version(const int ioaddr[], const char* name, |
545 | unsigned char version[], int do_end) | 550 | unsigned char version[], int do_end) |
546 | { | 551 | { |
552 | static const unsigned char Command0[6] = {0x80, 0x23, 0x00, 0x00, 0x00, 0x00}; | ||
553 | |||
547 | unsigned char st[7]; | 554 | unsigned char st[7]; |
548 | int status; | 555 | int status; |
549 | const unsigned char Command0[6] = {0x80, 0x23, 0x00, 0x00, 0x00, 0x00}; | ||
550 | 556 | ||
551 | if ((status = sb1000_start_get_set_command(ioaddr, name))) | 557 | if ((status = sb1000_start_get_set_command(ioaddr, name))) |
552 | return status; | 558 | return status; |
@@ -566,9 +572,10 @@ sb1000_get_firmware_version(const int ioaddr[], const char* name, | |||
566 | static int | 572 | static int |
567 | sb1000_get_frequency(const int ioaddr[], const char* name, int* frequency) | 573 | sb1000_get_frequency(const int ioaddr[], const char* name, int* frequency) |
568 | { | 574 | { |
575 | static const unsigned char Command0[6] = {0x80, 0x44, 0x00, 0x00, 0x00, 0x00}; | ||
576 | |||
569 | unsigned char st[7]; | 577 | unsigned char st[7]; |
570 | int status; | 578 | int status; |
571 | const unsigned char Command0[6] = {0x80, 0x44, 0x00, 0x00, 0x00, 0x00}; | ||
572 | 579 | ||
573 | udelay(1000); | 580 | udelay(1000); |
574 | if ((status = sb1000_start_get_set_command(ioaddr, name))) | 581 | if ((status = sb1000_start_get_set_command(ioaddr, name))) |
@@ -613,12 +620,13 @@ sb1000_set_frequency(const int ioaddr[], const char* name, int frequency) | |||
613 | static int | 620 | static int |
614 | sb1000_get_PIDs(const int ioaddr[], const char* name, short PID[]) | 621 | sb1000_get_PIDs(const int ioaddr[], const char* name, short PID[]) |
615 | { | 622 | { |
623 | static const unsigned char Command0[6] = {0x80, 0x40, 0x00, 0x00, 0x00, 0x00}; | ||
624 | static const unsigned char Command1[6] = {0x80, 0x41, 0x00, 0x00, 0x00, 0x00}; | ||
625 | static const unsigned char Command2[6] = {0x80, 0x42, 0x00, 0x00, 0x00, 0x00}; | ||
626 | static const unsigned char Command3[6] = {0x80, 0x43, 0x00, 0x00, 0x00, 0x00}; | ||
627 | |||
616 | unsigned char st[7]; | 628 | unsigned char st[7]; |
617 | int status; | 629 | int status; |
618 | const unsigned char Command0[6] = {0x80, 0x40, 0x00, 0x00, 0x00, 0x00}; | ||
619 | const unsigned char Command1[6] = {0x80, 0x41, 0x00, 0x00, 0x00, 0x00}; | ||
620 | const unsigned char Command2[6] = {0x80, 0x42, 0x00, 0x00, 0x00, 0x00}; | ||
621 | const unsigned char Command3[6] = {0x80, 0x43, 0x00, 0x00, 0x00, 0x00}; | ||
622 | 630 | ||
623 | udelay(1000); | 631 | udelay(1000); |
624 | if ((status = sb1000_start_get_set_command(ioaddr, name))) | 632 | if ((status = sb1000_start_get_set_command(ioaddr, name))) |
@@ -647,6 +655,8 @@ sb1000_get_PIDs(const int ioaddr[], const char* name, short PID[]) | |||
647 | static int | 655 | static int |
648 | sb1000_set_PIDs(const int ioaddr[], const char* name, const short PID[]) | 656 | sb1000_set_PIDs(const int ioaddr[], const char* name, const short PID[]) |
649 | { | 657 | { |
658 | static const unsigned char Command4[6] = {0x80, 0x2e, 0x00, 0x00, 0x00, 0x00}; | ||
659 | |||
650 | unsigned char st[7]; | 660 | unsigned char st[7]; |
651 | short p; | 661 | short p; |
652 | int status; | 662 | int status; |
@@ -654,7 +664,6 @@ sb1000_set_PIDs(const int ioaddr[], const char* name, const short PID[]) | |||
654 | unsigned char Command1[6] = {0x80, 0x32, 0x00, 0x00, 0x00, 0x00}; | 664 | unsigned char Command1[6] = {0x80, 0x32, 0x00, 0x00, 0x00, 0x00}; |
655 | unsigned char Command2[6] = {0x80, 0x33, 0x00, 0x00, 0x00, 0x00}; | 665 | unsigned char Command2[6] = {0x80, 0x33, 0x00, 0x00, 0x00, 0x00}; |
656 | unsigned char Command3[6] = {0x80, 0x34, 0x00, 0x00, 0x00, 0x00}; | 666 | unsigned char Command3[6] = {0x80, 0x34, 0x00, 0x00, 0x00, 0x00}; |
657 | const unsigned char Command4[6] = {0x80, 0x2e, 0x00, 0x00, 0x00, 0x00}; | ||
658 | 667 | ||
659 | udelay(1000); | 668 | udelay(1000); |
660 | if ((status = sb1000_start_get_set_command(ioaddr, name))) | 669 | if ((status = sb1000_start_get_set_command(ioaddr, name))) |
@@ -694,7 +703,7 @@ sb1000_set_PIDs(const int ioaddr[], const char* name, const short PID[]) | |||
694 | } | 703 | } |
695 | 704 | ||
696 | 705 | ||
697 | static inline void | 706 | static void |
698 | sb1000_print_status_buffer(const char* name, unsigned char st[], | 707 | sb1000_print_status_buffer(const char* name, unsigned char st[], |
699 | unsigned char buffer[], int size) | 708 | unsigned char buffer[], int size) |
700 | { | 709 | { |
@@ -725,7 +734,7 @@ sb1000_print_status_buffer(const char* name, unsigned char st[], | |||
725 | /* receive a single frame and assemble datagram | 734 | /* receive a single frame and assemble datagram |
726 | * (this is the heart of the interrupt routine) | 735 | * (this is the heart of the interrupt routine) |
727 | */ | 736 | */ |
728 | static inline int | 737 | static int |
729 | sb1000_rx(struct net_device *dev) | 738 | sb1000_rx(struct net_device *dev) |
730 | { | 739 | { |
731 | 740 | ||
@@ -888,14 +897,15 @@ dropped_frame: | |||
888 | return -1; | 897 | return -1; |
889 | } | 898 | } |
890 | 899 | ||
891 | static inline void | 900 | static void |
892 | sb1000_error_dpc(struct net_device *dev) | 901 | sb1000_error_dpc(struct net_device *dev) |
893 | { | 902 | { |
903 | static const unsigned char Command0[6] = {0x80, 0x26, 0x00, 0x00, 0x00, 0x00}; | ||
904 | |||
894 | char *name; | 905 | char *name; |
895 | unsigned char st[5]; | 906 | unsigned char st[5]; |
896 | int ioaddr[2]; | 907 | int ioaddr[2]; |
897 | struct sb1000_private *lp = netdev_priv(dev); | 908 | struct sb1000_private *lp = netdev_priv(dev); |
898 | const unsigned char Command0[6] = {0x80, 0x26, 0x00, 0x00, 0x00, 0x00}; | ||
899 | const int ErrorDpcCounterInitialize = 200; | 909 | const int ErrorDpcCounterInitialize = 200; |
900 | 910 | ||
901 | ioaddr[0] = dev->base_addr; | 911 | ioaddr[0] = dev->base_addr; |
@@ -1077,14 +1087,15 @@ sb1000_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1077 | /* SB1000 interrupt handler. */ | 1087 | /* SB1000 interrupt handler. */ |
1078 | static irqreturn_t sb1000_interrupt(int irq, void *dev_id) | 1088 | static irqreturn_t sb1000_interrupt(int irq, void *dev_id) |
1079 | { | 1089 | { |
1090 | static const unsigned char Command0[6] = {0x80, 0x2c, 0x00, 0x00, 0x00, 0x00}; | ||
1091 | static const unsigned char Command1[6] = {0x80, 0x2e, 0x00, 0x00, 0x00, 0x00}; | ||
1092 | |||
1080 | char *name; | 1093 | char *name; |
1081 | unsigned char st; | 1094 | unsigned char st; |
1082 | int ioaddr[2]; | 1095 | int ioaddr[2]; |
1083 | struct net_device *dev = dev_id; | 1096 | struct net_device *dev = dev_id; |
1084 | struct sb1000_private *lp = netdev_priv(dev); | 1097 | struct sb1000_private *lp = netdev_priv(dev); |
1085 | 1098 | ||
1086 | const unsigned char Command0[6] = {0x80, 0x2c, 0x00, 0x00, 0x00, 0x00}; | ||
1087 | const unsigned char Command1[6] = {0x80, 0x2e, 0x00, 0x00, 0x00, 0x00}; | ||
1088 | const int MaxRxErrorCount = 6; | 1099 | const int MaxRxErrorCount = 6; |
1089 | 1100 | ||
1090 | ioaddr[0] = dev->base_addr; | 1101 | ioaddr[0] = dev->base_addr; |
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c index 7b53d658e337..888b7dec9866 100644 --- a/drivers/net/sb1250-mac.c +++ b/drivers/net/sb1250-mac.c | |||
@@ -2374,7 +2374,7 @@ static int sbmac_init(struct platform_device *pldev, long long base) | |||
2374 | dev->name, base, print_mac(mac, eaddr)); | 2374 | dev->name, base, print_mac(mac, eaddr)); |
2375 | 2375 | ||
2376 | sc->mii_bus.name = sbmac_mdio_string; | 2376 | sc->mii_bus.name = sbmac_mdio_string; |
2377 | sc->mii_bus.id = idx; | 2377 | snprintf(sc->mii_bus.id, MII_BUS_ID_SIZE, "%x", idx); |
2378 | sc->mii_bus.priv = sc; | 2378 | sc->mii_bus.priv = sc; |
2379 | sc->mii_bus.read = sbmac_mii_read; | 2379 | sc->mii_bus.read = sbmac_mii_read; |
2380 | sc->mii_bus.write = sbmac_mii_write; | 2380 | sc->mii_bus.write = sbmac_mii_write; |
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c index 15fcee55284e..f64a860029b7 100644 --- a/drivers/net/sc92031.c +++ b/drivers/net/sc92031.c | |||
@@ -311,7 +311,6 @@ struct sc92031_priv { | |||
311 | 311 | ||
312 | /* for dev->get_stats */ | 312 | /* for dev->get_stats */ |
313 | long rx_value; | 313 | long rx_value; |
314 | struct net_device_stats stats; | ||
315 | }; | 314 | }; |
316 | 315 | ||
317 | /* I don't know which registers can be safely read; however, I can guess | 316 | /* I don't know which registers can be safely read; however, I can guess |
@@ -421,7 +420,7 @@ static void _sc92031_tx_clear(struct net_device *dev) | |||
421 | 420 | ||
422 | while (priv->tx_head - priv->tx_tail > 0) { | 421 | while (priv->tx_head - priv->tx_tail > 0) { |
423 | priv->tx_tail++; | 422 | priv->tx_tail++; |
424 | priv->stats.tx_dropped++; | 423 | dev->stats.tx_dropped++; |
425 | } | 424 | } |
426 | priv->tx_head = priv->tx_tail = 0; | 425 | priv->tx_head = priv->tx_tail = 0; |
427 | } | 426 | } |
@@ -676,27 +675,27 @@ static void _sc92031_tx_tasklet(struct net_device *dev) | |||
676 | priv->tx_tail++; | 675 | priv->tx_tail++; |
677 | 676 | ||
678 | if (tx_status & TxStatOK) { | 677 | if (tx_status & TxStatOK) { |
679 | priv->stats.tx_bytes += tx_status & 0x1fff; | 678 | dev->stats.tx_bytes += tx_status & 0x1fff; |
680 | priv->stats.tx_packets++; | 679 | dev->stats.tx_packets++; |
681 | /* Note: TxCarrierLost is always asserted at 100mbps. */ | 680 | /* Note: TxCarrierLost is always asserted at 100mbps. */ |
682 | priv->stats.collisions += (tx_status >> 22) & 0xf; | 681 | dev->stats.collisions += (tx_status >> 22) & 0xf; |
683 | } | 682 | } |
684 | 683 | ||
685 | if (tx_status & (TxOutOfWindow | TxAborted)) { | 684 | if (tx_status & (TxOutOfWindow | TxAborted)) { |
686 | priv->stats.tx_errors++; | 685 | dev->stats.tx_errors++; |
687 | 686 | ||
688 | if (tx_status & TxAborted) | 687 | if (tx_status & TxAborted) |
689 | priv->stats.tx_aborted_errors++; | 688 | dev->stats.tx_aborted_errors++; |
690 | 689 | ||
691 | if (tx_status & TxCarrierLost) | 690 | if (tx_status & TxCarrierLost) |
692 | priv->stats.tx_carrier_errors++; | 691 | dev->stats.tx_carrier_errors++; |
693 | 692 | ||
694 | if (tx_status & TxOutOfWindow) | 693 | if (tx_status & TxOutOfWindow) |
695 | priv->stats.tx_window_errors++; | 694 | dev->stats.tx_window_errors++; |
696 | } | 695 | } |
697 | 696 | ||
698 | if (tx_status & TxUnderrun) | 697 | if (tx_status & TxUnderrun) |
699 | priv->stats.tx_fifo_errors++; | 698 | dev->stats.tx_fifo_errors++; |
700 | } | 699 | } |
701 | 700 | ||
702 | if (priv->tx_tail != old_tx_tail) | 701 | if (priv->tx_tail != old_tx_tail) |
@@ -704,27 +703,29 @@ static void _sc92031_tx_tasklet(struct net_device *dev) | |||
704 | netif_wake_queue(dev); | 703 | netif_wake_queue(dev); |
705 | } | 704 | } |
706 | 705 | ||
707 | static void _sc92031_rx_tasklet_error(u32 rx_status, | 706 | static void _sc92031_rx_tasklet_error(struct net_device *dev, |
708 | struct sc92031_priv *priv, unsigned rx_size) | 707 | u32 rx_status, unsigned rx_size) |
709 | { | 708 | { |
710 | if(rx_size > (MAX_ETH_FRAME_SIZE + 4) || rx_size < 16) { | 709 | if(rx_size > (MAX_ETH_FRAME_SIZE + 4) || rx_size < 16) { |
711 | priv->stats.rx_errors++; | 710 | dev->stats.rx_errors++; |
712 | priv->stats.rx_length_errors++; | 711 | dev->stats.rx_length_errors++; |
713 | } | 712 | } |
714 | 713 | ||
715 | if (!(rx_status & RxStatesOK)) { | 714 | if (!(rx_status & RxStatesOK)) { |
716 | priv->stats.rx_errors++; | 715 | dev->stats.rx_errors++; |
717 | 716 | ||
718 | if (rx_status & (RxHugeFrame | RxSmallFrame)) | 717 | if (rx_status & (RxHugeFrame | RxSmallFrame)) |
719 | priv->stats.rx_length_errors++; | 718 | dev->stats.rx_length_errors++; |
720 | 719 | ||
721 | if (rx_status & RxBadAlign) | 720 | if (rx_status & RxBadAlign) |
722 | priv->stats.rx_frame_errors++; | 721 | dev->stats.rx_frame_errors++; |
723 | 722 | ||
724 | if (!(rx_status & RxCRCOK)) | 723 | if (!(rx_status & RxCRCOK)) |
725 | priv->stats.rx_crc_errors++; | 724 | dev->stats.rx_crc_errors++; |
726 | } else | 725 | } else { |
726 | struct sc92031_priv *priv = netdev_priv(dev); | ||
727 | priv->rx_loss++; | 727 | priv->rx_loss++; |
728 | } | ||
728 | } | 729 | } |
729 | 730 | ||
730 | static void _sc92031_rx_tasklet(struct net_device *dev) | 731 | static void _sc92031_rx_tasklet(struct net_device *dev) |
@@ -783,7 +784,7 @@ static void _sc92031_rx_tasklet(struct net_device *dev) | |||
783 | || rx_size > (MAX_ETH_FRAME_SIZE + 4) | 784 | || rx_size > (MAX_ETH_FRAME_SIZE + 4) |
784 | || rx_size < 16 | 785 | || rx_size < 16 |
785 | || !(rx_status & RxStatesOK))) { | 786 | || !(rx_status & RxStatesOK))) { |
786 | _sc92031_rx_tasklet_error(rx_status, priv, rx_size); | 787 | _sc92031_rx_tasklet_error(dev, rx_status, rx_size); |
787 | break; | 788 | break; |
788 | } | 789 | } |
789 | 790 | ||
@@ -795,7 +796,7 @@ static void _sc92031_rx_tasklet(struct net_device *dev) | |||
795 | 796 | ||
796 | rx_len -= rx_size_align + 4; | 797 | rx_len -= rx_size_align + 4; |
797 | 798 | ||
798 | skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN); | 799 | skb = netdev_alloc_skb(dev, pkt_size + NET_IP_ALIGN); |
799 | if (unlikely(!skb)) { | 800 | if (unlikely(!skb)) { |
800 | if (printk_ratelimit()) | 801 | if (printk_ratelimit()) |
801 | printk(KERN_ERR "%s: Couldn't allocate a skb_buff for a packet of size %u\n", | 802 | printk(KERN_ERR "%s: Couldn't allocate a skb_buff for a packet of size %u\n", |
@@ -818,11 +819,11 @@ static void _sc92031_rx_tasklet(struct net_device *dev) | |||
818 | dev->last_rx = jiffies; | 819 | dev->last_rx = jiffies; |
819 | netif_rx(skb); | 820 | netif_rx(skb); |
820 | 821 | ||
821 | priv->stats.rx_bytes += pkt_size; | 822 | dev->stats.rx_bytes += pkt_size; |
822 | priv->stats.rx_packets++; | 823 | dev->stats.rx_packets++; |
823 | 824 | ||
824 | if (rx_status & Rx_Multicast) | 825 | if (rx_status & Rx_Multicast) |
825 | priv->stats.multicast++; | 826 | dev->stats.multicast++; |
826 | 827 | ||
827 | next: | 828 | next: |
828 | rx_ring_offset = (rx_ring_offset + rx_size_align) % RX_BUF_LEN; | 829 | rx_ring_offset = (rx_ring_offset + rx_size_align) % RX_BUF_LEN; |
@@ -835,13 +836,11 @@ static void _sc92031_rx_tasklet(struct net_device *dev) | |||
835 | 836 | ||
836 | static void _sc92031_link_tasklet(struct net_device *dev) | 837 | static void _sc92031_link_tasklet(struct net_device *dev) |
837 | { | 838 | { |
838 | struct sc92031_priv *priv = netdev_priv(dev); | ||
839 | |||
840 | if (_sc92031_check_media(dev)) | 839 | if (_sc92031_check_media(dev)) |
841 | netif_wake_queue(dev); | 840 | netif_wake_queue(dev); |
842 | else { | 841 | else { |
843 | netif_stop_queue(dev); | 842 | netif_stop_queue(dev); |
844 | priv->stats.tx_carrier_errors++; | 843 | dev->stats.tx_carrier_errors++; |
845 | } | 844 | } |
846 | } | 845 | } |
847 | 846 | ||
@@ -866,11 +865,11 @@ static void sc92031_tasklet(unsigned long data) | |||
866 | _sc92031_rx_tasklet(dev); | 865 | _sc92031_rx_tasklet(dev); |
867 | 866 | ||
868 | if (intr_status & RxOverflow) | 867 | if (intr_status & RxOverflow) |
869 | priv->stats.rx_errors++; | 868 | dev->stats.rx_errors++; |
870 | 869 | ||
871 | if (intr_status & TimeOut) { | 870 | if (intr_status & TimeOut) { |
872 | priv->stats.rx_errors++; | 871 | dev->stats.rx_errors++; |
873 | priv->stats.rx_length_errors++; | 872 | dev->stats.rx_length_errors++; |
874 | } | 873 | } |
875 | 874 | ||
876 | if (intr_status & (LinkFail | LinkOK)) | 875 | if (intr_status & (LinkFail | LinkOK)) |
@@ -936,38 +935,36 @@ static struct net_device_stats *sc92031_get_stats(struct net_device *dev) | |||
936 | 935 | ||
937 | if (temp == 0xffff) { | 936 | if (temp == 0xffff) { |
938 | priv->rx_value += temp; | 937 | priv->rx_value += temp; |
939 | priv->stats.rx_fifo_errors = priv->rx_value; | 938 | dev->stats.rx_fifo_errors = priv->rx_value; |
940 | } else { | 939 | } else |
941 | priv->stats.rx_fifo_errors = temp + priv->rx_value; | 940 | dev->stats.rx_fifo_errors = temp + priv->rx_value; |
942 | } | ||
943 | 941 | ||
944 | spin_unlock_bh(&priv->lock); | 942 | spin_unlock_bh(&priv->lock); |
945 | } | 943 | } |
946 | 944 | ||
947 | return &priv->stats; | 945 | return &dev->stats; |
948 | } | 946 | } |
949 | 947 | ||
950 | static int sc92031_start_xmit(struct sk_buff *skb, struct net_device *dev) | 948 | static int sc92031_start_xmit(struct sk_buff *skb, struct net_device *dev) |
951 | { | 949 | { |
952 | int err = 0; | ||
953 | struct sc92031_priv *priv = netdev_priv(dev); | 950 | struct sc92031_priv *priv = netdev_priv(dev); |
954 | void __iomem *port_base = priv->port_base; | 951 | void __iomem *port_base = priv->port_base; |
955 | |||
956 | unsigned len; | 952 | unsigned len; |
957 | unsigned entry; | 953 | unsigned entry; |
958 | u32 tx_status; | 954 | u32 tx_status; |
959 | 955 | ||
956 | if (skb_padto(skb, ETH_ZLEN)) | ||
957 | return NETDEV_TX_OK; | ||
958 | |||
960 | if (unlikely(skb->len > TX_BUF_SIZE)) { | 959 | if (unlikely(skb->len > TX_BUF_SIZE)) { |
961 | err = -EMSGSIZE; | 960 | dev->stats.tx_dropped++; |
962 | priv->stats.tx_dropped++; | ||
963 | goto out; | 961 | goto out; |
964 | } | 962 | } |
965 | 963 | ||
966 | spin_lock(&priv->lock); | 964 | spin_lock(&priv->lock); |
967 | 965 | ||
968 | if (unlikely(!netif_carrier_ok(dev))) { | 966 | if (unlikely(!netif_carrier_ok(dev))) { |
969 | err = -ENOLINK; | 967 | dev->stats.tx_dropped++; |
970 | priv->stats.tx_dropped++; | ||
971 | goto out_unlock; | 968 | goto out_unlock; |
972 | } | 969 | } |
973 | 970 | ||
@@ -978,11 +975,6 @@ static int sc92031_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
978 | skb_copy_and_csum_dev(skb, priv->tx_bufs + entry * TX_BUF_SIZE); | 975 | skb_copy_and_csum_dev(skb, priv->tx_bufs + entry * TX_BUF_SIZE); |
979 | 976 | ||
980 | len = skb->len; | 977 | len = skb->len; |
981 | if (unlikely(len < ETH_ZLEN)) { | ||
982 | memset(priv->tx_bufs + entry * TX_BUF_SIZE + len, | ||
983 | 0, ETH_ZLEN - len); | ||
984 | len = ETH_ZLEN; | ||
985 | } | ||
986 | 978 | ||
987 | wmb(); | 979 | wmb(); |
988 | 980 | ||
@@ -1009,7 +1001,7 @@ out_unlock: | |||
1009 | out: | 1001 | out: |
1010 | dev_kfree_skb(skb); | 1002 | dev_kfree_skb(skb); |
1011 | 1003 | ||
1012 | return err; | 1004 | return NETDEV_TX_OK; |
1013 | } | 1005 | } |
1014 | 1006 | ||
1015 | static int sc92031_open(struct net_device *dev) | 1007 | static int sc92031_open(struct net_device *dev) |
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c index bccae7e5c6ad..477671606273 100644 --- a/drivers/net/spider_net.c +++ b/drivers/net/spider_net.c | |||
@@ -1399,6 +1399,8 @@ spider_net_link_reset(struct net_device *netdev) | |||
1399 | spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0); | 1399 | spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0); |
1400 | 1400 | ||
1401 | /* reset phy and setup aneg */ | 1401 | /* reset phy and setup aneg */ |
1402 | card->aneg_count = 0; | ||
1403 | card->medium = BCM54XX_COPPER; | ||
1402 | spider_net_setup_aneg(card); | 1404 | spider_net_setup_aneg(card); |
1403 | mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER); | 1405 | mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER); |
1404 | 1406 | ||
@@ -1413,18 +1415,12 @@ spider_net_link_reset(struct net_device *netdev) | |||
1413 | * found when an interrupt is presented | 1415 | * found when an interrupt is presented |
1414 | */ | 1416 | */ |
1415 | static void | 1417 | static void |
1416 | spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg) | 1418 | spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg, |
1419 | u32 error_reg1, u32 error_reg2) | ||
1417 | { | 1420 | { |
1418 | u32 error_reg1, error_reg2; | ||
1419 | u32 i; | 1421 | u32 i; |
1420 | int show_error = 1; | 1422 | int show_error = 1; |
1421 | 1423 | ||
1422 | error_reg1 = spider_net_read_reg(card, SPIDER_NET_GHIINT1STS); | ||
1423 | error_reg2 = spider_net_read_reg(card, SPIDER_NET_GHIINT2STS); | ||
1424 | |||
1425 | error_reg1 &= SPIDER_NET_INT1_MASK_VALUE; | ||
1426 | error_reg2 &= SPIDER_NET_INT2_MASK_VALUE; | ||
1427 | |||
1428 | /* check GHIINT0STS ************************************/ | 1424 | /* check GHIINT0STS ************************************/ |
1429 | if (status_reg) | 1425 | if (status_reg) |
1430 | for (i = 0; i < 32; i++) | 1426 | for (i = 0; i < 32; i++) |
@@ -1654,12 +1650,15 @@ spider_net_interrupt(int irq, void *ptr) | |||
1654 | { | 1650 | { |
1655 | struct net_device *netdev = ptr; | 1651 | struct net_device *netdev = ptr; |
1656 | struct spider_net_card *card = netdev_priv(netdev); | 1652 | struct spider_net_card *card = netdev_priv(netdev); |
1657 | u32 status_reg; | 1653 | u32 status_reg, error_reg1, error_reg2; |
1658 | 1654 | ||
1659 | status_reg = spider_net_read_reg(card, SPIDER_NET_GHIINT0STS); | 1655 | status_reg = spider_net_read_reg(card, SPIDER_NET_GHIINT0STS); |
1660 | status_reg &= SPIDER_NET_INT0_MASK_VALUE; | 1656 | error_reg1 = spider_net_read_reg(card, SPIDER_NET_GHIINT1STS); |
1657 | error_reg2 = spider_net_read_reg(card, SPIDER_NET_GHIINT2STS); | ||
1661 | 1658 | ||
1662 | if (!status_reg) | 1659 | if (!(status_reg & SPIDER_NET_INT0_MASK_VALUE) && |
1660 | !(error_reg1 & SPIDER_NET_INT1_MASK_VALUE) && | ||
1661 | !(error_reg2 & SPIDER_NET_INT2_MASK_VALUE)) | ||
1663 | return IRQ_NONE; | 1662 | return IRQ_NONE; |
1664 | 1663 | ||
1665 | if (status_reg & SPIDER_NET_RXINT ) { | 1664 | if (status_reg & SPIDER_NET_RXINT ) { |
@@ -1674,7 +1673,8 @@ spider_net_interrupt(int irq, void *ptr) | |||
1674 | spider_net_link_reset(netdev); | 1673 | spider_net_link_reset(netdev); |
1675 | 1674 | ||
1676 | if (status_reg & SPIDER_NET_ERRINT ) | 1675 | if (status_reg & SPIDER_NET_ERRINT ) |
1677 | spider_net_handle_error_irq(card, status_reg); | 1676 | spider_net_handle_error_irq(card, status_reg, |
1677 | error_reg1, error_reg2); | ||
1678 | 1678 | ||
1679 | /* clear interrupt sources */ | 1679 | /* clear interrupt sources */ |
1680 | spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg); | 1680 | spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg); |
@@ -1982,6 +1982,8 @@ spider_net_open(struct net_device *netdev) | |||
1982 | goto init_firmware_failed; | 1982 | goto init_firmware_failed; |
1983 | 1983 | ||
1984 | /* start probing with copper */ | 1984 | /* start probing with copper */ |
1985 | card->aneg_count = 0; | ||
1986 | card->medium = BCM54XX_COPPER; | ||
1985 | spider_net_setup_aneg(card); | 1987 | spider_net_setup_aneg(card); |
1986 | if (card->phy.def->phy_id) | 1988 | if (card->phy.def->phy_id) |
1987 | mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER); | 1989 | mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER); |
@@ -2043,7 +2045,8 @@ static void spider_net_link_phy(unsigned long data) | |||
2043 | /* if link didn't come up after SPIDER_NET_ANEG_TIMEOUT tries, setup phy again */ | 2045 | /* if link didn't come up after SPIDER_NET_ANEG_TIMEOUT tries, setup phy again */ |
2044 | if (card->aneg_count > SPIDER_NET_ANEG_TIMEOUT) { | 2046 | if (card->aneg_count > SPIDER_NET_ANEG_TIMEOUT) { |
2045 | 2047 | ||
2046 | pr_info("%s: link is down trying to bring it up\n", card->netdev->name); | 2048 | pr_debug("%s: link is down trying to bring it up\n", |
2049 | card->netdev->name); | ||
2047 | 2050 | ||
2048 | switch (card->medium) { | 2051 | switch (card->medium) { |
2049 | case BCM54XX_COPPER: | 2052 | case BCM54XX_COPPER: |
@@ -2094,9 +2097,10 @@ static void spider_net_link_phy(unsigned long data) | |||
2094 | 2097 | ||
2095 | card->aneg_count = 0; | 2098 | card->aneg_count = 0; |
2096 | 2099 | ||
2097 | pr_debug("Found %s with %i Mbps, %s-duplex %sautoneg.\n", | 2100 | pr_info("%s: link up, %i Mbps, %s-duplex %sautoneg.\n", |
2098 | phy->def->name, phy->speed, phy->duplex==1 ? "Full" : "Half", | 2101 | card->netdev->name, phy->speed, |
2099 | phy->autoneg==1 ? "" : "no "); | 2102 | phy->duplex == 1 ? "Full" : "Half", |
2103 | phy->autoneg == 1 ? "" : "no "); | ||
2100 | 2104 | ||
2101 | return; | 2105 | return; |
2102 | } | 2106 | } |
diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h index e1d05c0f47eb..05f74cbdd617 100644 --- a/drivers/net/spider_net.h +++ b/drivers/net/spider_net.h | |||
@@ -52,7 +52,7 @@ extern char spider_net_driver_name[]; | |||
52 | 52 | ||
53 | #define SPIDER_NET_TX_TIMER (HZ/5) | 53 | #define SPIDER_NET_TX_TIMER (HZ/5) |
54 | #define SPIDER_NET_ANEG_TIMER (HZ) | 54 | #define SPIDER_NET_ANEG_TIMER (HZ) |
55 | #define SPIDER_NET_ANEG_TIMEOUT 2 | 55 | #define SPIDER_NET_ANEG_TIMEOUT 5 |
56 | 56 | ||
57 | #define SPIDER_NET_RX_CSUM_DEFAULT 1 | 57 | #define SPIDER_NET_RX_CSUM_DEFAULT 1 |
58 | 58 | ||
@@ -159,9 +159,8 @@ extern char spider_net_driver_name[]; | |||
159 | 159 | ||
160 | /** interrupt mask registers */ | 160 | /** interrupt mask registers */ |
161 | #define SPIDER_NET_INT0_MASK_VALUE 0x3f7fe2c7 | 161 | #define SPIDER_NET_INT0_MASK_VALUE 0x3f7fe2c7 |
162 | #define SPIDER_NET_INT1_MASK_VALUE 0xffff7ff7 | 162 | #define SPIDER_NET_INT1_MASK_VALUE 0x0000fff2 |
163 | /* no MAC aborts -> auto retransmission */ | 163 | #define SPIDER_NET_INT2_MASK_VALUE 0x000003f1 |
164 | #define SPIDER_NET_INT2_MASK_VALUE 0xffef7ff1 | ||
165 | 164 | ||
166 | /* we rely on flagged descriptor interrupts */ | 165 | /* we rely on flagged descriptor interrupts */ |
167 | #define SPIDER_NET_FRAMENUM_VALUE 0x00000000 | 166 | #define SPIDER_NET_FRAMENUM_VALUE 0x00000000 |
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c index 370d329d15d9..10e4e85da3fc 100644 --- a/drivers/net/tc35815.c +++ b/drivers/net/tc35815.c | |||
@@ -23,9 +23,9 @@ | |||
23 | */ | 23 | */ |
24 | 24 | ||
25 | #ifdef TC35815_NAPI | 25 | #ifdef TC35815_NAPI |
26 | #define DRV_VERSION "1.36-NAPI" | 26 | #define DRV_VERSION "1.37-NAPI" |
27 | #else | 27 | #else |
28 | #define DRV_VERSION "1.36" | 28 | #define DRV_VERSION "1.37" |
29 | #endif | 29 | #endif |
30 | static const char *version = "tc35815.c:v" DRV_VERSION "\n"; | 30 | static const char *version = "tc35815.c:v" DRV_VERSION "\n"; |
31 | #define MODNAME "tc35815" | 31 | #define MODNAME "tc35815" |
@@ -47,8 +47,8 @@ static const char *version = "tc35815.c:v" DRV_VERSION "\n"; | |||
47 | #include <linux/skbuff.h> | 47 | #include <linux/skbuff.h> |
48 | #include <linux/delay.h> | 48 | #include <linux/delay.h> |
49 | #include <linux/pci.h> | 49 | #include <linux/pci.h> |
50 | #include <linux/mii.h> | 50 | #include <linux/phy.h> |
51 | #include <linux/ethtool.h> | 51 | #include <linux/workqueue.h> |
52 | #include <linux/platform_device.h> | 52 | #include <linux/platform_device.h> |
53 | #include <asm/io.h> | 53 | #include <asm/io.h> |
54 | #include <asm/byteorder.h> | 54 | #include <asm/byteorder.h> |
@@ -60,16 +60,16 @@ static const char *version = "tc35815.c:v" DRV_VERSION "\n"; | |||
60 | #define WORKAROUND_100HALF_PROMISC | 60 | #define WORKAROUND_100HALF_PROMISC |
61 | /* #define TC35815_USE_PACKEDBUFFER */ | 61 | /* #define TC35815_USE_PACKEDBUFFER */ |
62 | 62 | ||
63 | typedef enum { | 63 | enum tc35815_chiptype { |
64 | TC35815CF = 0, | 64 | TC35815CF = 0, |
65 | TC35815_NWU, | 65 | TC35815_NWU, |
66 | TC35815_TX4939, | 66 | TC35815_TX4939, |
67 | } board_t; | 67 | }; |
68 | 68 | ||
69 | /* indexed by board_t, above */ | 69 | /* indexed by tc35815_chiptype, above */ |
70 | static const struct { | 70 | static const struct { |
71 | const char *name; | 71 | const char *name; |
72 | } board_info[] __devinitdata = { | 72 | } chip_info[] __devinitdata = { |
73 | { "TOSHIBA TC35815CF 10/100BaseTX" }, | 73 | { "TOSHIBA TC35815CF 10/100BaseTX" }, |
74 | { "TOSHIBA TC35815 with Wake on LAN" }, | 74 | { "TOSHIBA TC35815 with Wake on LAN" }, |
75 | { "TOSHIBA TC35815/TX4939" }, | 75 | { "TOSHIBA TC35815/TX4939" }, |
@@ -81,209 +81,208 @@ static const struct pci_device_id tc35815_pci_tbl[] = { | |||
81 | {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_TX4939), .driver_data = TC35815_TX4939 }, | 81 | {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_TX4939), .driver_data = TC35815_TX4939 }, |
82 | {0,} | 82 | {0,} |
83 | }; | 83 | }; |
84 | MODULE_DEVICE_TABLE (pci, tc35815_pci_tbl); | 84 | MODULE_DEVICE_TABLE(pci, tc35815_pci_tbl); |
85 | 85 | ||
86 | /* see MODULE_PARM_DESC */ | 86 | /* see MODULE_PARM_DESC */ |
87 | static struct tc35815_options { | 87 | static struct tc35815_options { |
88 | int speed; | 88 | int speed; |
89 | int duplex; | 89 | int duplex; |
90 | int doforce; | ||
91 | } options; | 90 | } options; |
92 | 91 | ||
93 | /* | 92 | /* |
94 | * Registers | 93 | * Registers |
95 | */ | 94 | */ |
96 | struct tc35815_regs { | 95 | struct tc35815_regs { |
97 | volatile __u32 DMA_Ctl; /* 0x00 */ | 96 | __u32 DMA_Ctl; /* 0x00 */ |
98 | volatile __u32 TxFrmPtr; | 97 | __u32 TxFrmPtr; |
99 | volatile __u32 TxThrsh; | 98 | __u32 TxThrsh; |
100 | volatile __u32 TxPollCtr; | 99 | __u32 TxPollCtr; |
101 | volatile __u32 BLFrmPtr; | 100 | __u32 BLFrmPtr; |
102 | volatile __u32 RxFragSize; | 101 | __u32 RxFragSize; |
103 | volatile __u32 Int_En; | 102 | __u32 Int_En; |
104 | volatile __u32 FDA_Bas; | 103 | __u32 FDA_Bas; |
105 | volatile __u32 FDA_Lim; /* 0x20 */ | 104 | __u32 FDA_Lim; /* 0x20 */ |
106 | volatile __u32 Int_Src; | 105 | __u32 Int_Src; |
107 | volatile __u32 unused0[2]; | 106 | __u32 unused0[2]; |
108 | volatile __u32 PauseCnt; | 107 | __u32 PauseCnt; |
109 | volatile __u32 RemPauCnt; | 108 | __u32 RemPauCnt; |
110 | volatile __u32 TxCtlFrmStat; | 109 | __u32 TxCtlFrmStat; |
111 | volatile __u32 unused1; | 110 | __u32 unused1; |
112 | volatile __u32 MAC_Ctl; /* 0x40 */ | 111 | __u32 MAC_Ctl; /* 0x40 */ |
113 | volatile __u32 CAM_Ctl; | 112 | __u32 CAM_Ctl; |
114 | volatile __u32 Tx_Ctl; | 113 | __u32 Tx_Ctl; |
115 | volatile __u32 Tx_Stat; | 114 | __u32 Tx_Stat; |
116 | volatile __u32 Rx_Ctl; | 115 | __u32 Rx_Ctl; |
117 | volatile __u32 Rx_Stat; | 116 | __u32 Rx_Stat; |
118 | volatile __u32 MD_Data; | 117 | __u32 MD_Data; |
119 | volatile __u32 MD_CA; | 118 | __u32 MD_CA; |
120 | volatile __u32 CAM_Adr; /* 0x60 */ | 119 | __u32 CAM_Adr; /* 0x60 */ |
121 | volatile __u32 CAM_Data; | 120 | __u32 CAM_Data; |
122 | volatile __u32 CAM_Ena; | 121 | __u32 CAM_Ena; |
123 | volatile __u32 PROM_Ctl; | 122 | __u32 PROM_Ctl; |
124 | volatile __u32 PROM_Data; | 123 | __u32 PROM_Data; |
125 | volatile __u32 Algn_Cnt; | 124 | __u32 Algn_Cnt; |
126 | volatile __u32 CRC_Cnt; | 125 | __u32 CRC_Cnt; |
127 | volatile __u32 Miss_Cnt; | 126 | __u32 Miss_Cnt; |
128 | }; | 127 | }; |
129 | 128 | ||
130 | /* | 129 | /* |
131 | * Bit assignments | 130 | * Bit assignments |
132 | */ | 131 | */ |
133 | /* DMA_Ctl bit asign ------------------------------------------------------- */ | 132 | /* DMA_Ctl bit asign ------------------------------------------------------- */ |
134 | #define DMA_RxAlign 0x00c00000 /* 1:Reception Alignment */ | 133 | #define DMA_RxAlign 0x00c00000 /* 1:Reception Alignment */ |
135 | #define DMA_RxAlign_1 0x00400000 | 134 | #define DMA_RxAlign_1 0x00400000 |
136 | #define DMA_RxAlign_2 0x00800000 | 135 | #define DMA_RxAlign_2 0x00800000 |
137 | #define DMA_RxAlign_3 0x00c00000 | 136 | #define DMA_RxAlign_3 0x00c00000 |
138 | #define DMA_M66EnStat 0x00080000 /* 1:66MHz Enable State */ | 137 | #define DMA_M66EnStat 0x00080000 /* 1:66MHz Enable State */ |
139 | #define DMA_IntMask 0x00040000 /* 1:Interupt mask */ | 138 | #define DMA_IntMask 0x00040000 /* 1:Interupt mask */ |
140 | #define DMA_SWIntReq 0x00020000 /* 1:Software Interrupt request */ | 139 | #define DMA_SWIntReq 0x00020000 /* 1:Software Interrupt request */ |
141 | #define DMA_TxWakeUp 0x00010000 /* 1:Transmit Wake Up */ | 140 | #define DMA_TxWakeUp 0x00010000 /* 1:Transmit Wake Up */ |
142 | #define DMA_RxBigE 0x00008000 /* 1:Receive Big Endian */ | 141 | #define DMA_RxBigE 0x00008000 /* 1:Receive Big Endian */ |
143 | #define DMA_TxBigE 0x00004000 /* 1:Transmit Big Endian */ | 142 | #define DMA_TxBigE 0x00004000 /* 1:Transmit Big Endian */ |
144 | #define DMA_TestMode 0x00002000 /* 1:Test Mode */ | 143 | #define DMA_TestMode 0x00002000 /* 1:Test Mode */ |
145 | #define DMA_PowrMgmnt 0x00001000 /* 1:Power Management */ | 144 | #define DMA_PowrMgmnt 0x00001000 /* 1:Power Management */ |
146 | #define DMA_DmBurst_Mask 0x000001fc /* DMA Burst size */ | 145 | #define DMA_DmBurst_Mask 0x000001fc /* DMA Burst size */ |
147 | 146 | ||
148 | /* RxFragSize bit asign ---------------------------------------------------- */ | 147 | /* RxFragSize bit asign ---------------------------------------------------- */ |
149 | #define RxFrag_EnPack 0x00008000 /* 1:Enable Packing */ | 148 | #define RxFrag_EnPack 0x00008000 /* 1:Enable Packing */ |
150 | #define RxFrag_MinFragMask 0x00000ffc /* Minimum Fragment */ | 149 | #define RxFrag_MinFragMask 0x00000ffc /* Minimum Fragment */ |
151 | 150 | ||
152 | /* MAC_Ctl bit asign ------------------------------------------------------- */ | 151 | /* MAC_Ctl bit asign ------------------------------------------------------- */ |
153 | #define MAC_Link10 0x00008000 /* 1:Link Status 10Mbits */ | 152 | #define MAC_Link10 0x00008000 /* 1:Link Status 10Mbits */ |
154 | #define MAC_EnMissRoll 0x00002000 /* 1:Enable Missed Roll */ | 153 | #define MAC_EnMissRoll 0x00002000 /* 1:Enable Missed Roll */ |
155 | #define MAC_MissRoll 0x00000400 /* 1:Missed Roll */ | 154 | #define MAC_MissRoll 0x00000400 /* 1:Missed Roll */ |
156 | #define MAC_Loop10 0x00000080 /* 1:Loop 10 Mbps */ | 155 | #define MAC_Loop10 0x00000080 /* 1:Loop 10 Mbps */ |
157 | #define MAC_Conn_Auto 0x00000000 /*00:Connection mode (Automatic) */ | 156 | #define MAC_Conn_Auto 0x00000000 /*00:Connection mode (Automatic) */ |
158 | #define MAC_Conn_10M 0x00000020 /*01: (10Mbps endec)*/ | 157 | #define MAC_Conn_10M 0x00000020 /*01: (10Mbps endec)*/ |
159 | #define MAC_Conn_Mll 0x00000040 /*10: (Mll clock) */ | 158 | #define MAC_Conn_Mll 0x00000040 /*10: (Mll clock) */ |
160 | #define MAC_MacLoop 0x00000010 /* 1:MAC Loopback */ | 159 | #define MAC_MacLoop 0x00000010 /* 1:MAC Loopback */ |
161 | #define MAC_FullDup 0x00000008 /* 1:Full Duplex 0:Half Duplex */ | 160 | #define MAC_FullDup 0x00000008 /* 1:Full Duplex 0:Half Duplex */ |
162 | #define MAC_Reset 0x00000004 /* 1:Software Reset */ | 161 | #define MAC_Reset 0x00000004 /* 1:Software Reset */ |
163 | #define MAC_HaltImm 0x00000002 /* 1:Halt Immediate */ | 162 | #define MAC_HaltImm 0x00000002 /* 1:Halt Immediate */ |
164 | #define MAC_HaltReq 0x00000001 /* 1:Halt request */ | 163 | #define MAC_HaltReq 0x00000001 /* 1:Halt request */ |
165 | 164 | ||
166 | /* PROM_Ctl bit asign ------------------------------------------------------ */ | 165 | /* PROM_Ctl bit asign ------------------------------------------------------ */ |
167 | #define PROM_Busy 0x00008000 /* 1:Busy (Start Operation) */ | 166 | #define PROM_Busy 0x00008000 /* 1:Busy (Start Operation) */ |
168 | #define PROM_Read 0x00004000 /*10:Read operation */ | 167 | #define PROM_Read 0x00004000 /*10:Read operation */ |
169 | #define PROM_Write 0x00002000 /*01:Write operation */ | 168 | #define PROM_Write 0x00002000 /*01:Write operation */ |
170 | #define PROM_Erase 0x00006000 /*11:Erase operation */ | 169 | #define PROM_Erase 0x00006000 /*11:Erase operation */ |
171 | /*00:Enable or Disable Writting, */ | 170 | /*00:Enable or Disable Writting, */ |
172 | /* as specified in PROM_Addr. */ | 171 | /* as specified in PROM_Addr. */ |
173 | #define PROM_Addr_Ena 0x00000030 /*11xxxx:PROM Write enable */ | 172 | #define PROM_Addr_Ena 0x00000030 /*11xxxx:PROM Write enable */ |
174 | /*00xxxx: disable */ | 173 | /*00xxxx: disable */ |
175 | 174 | ||
176 | /* CAM_Ctl bit asign ------------------------------------------------------- */ | 175 | /* CAM_Ctl bit asign ------------------------------------------------------- */ |
177 | #define CAM_CompEn 0x00000010 /* 1:CAM Compare Enable */ | 176 | #define CAM_CompEn 0x00000010 /* 1:CAM Compare Enable */ |
178 | #define CAM_NegCAM 0x00000008 /* 1:Reject packets CAM recognizes,*/ | 177 | #define CAM_NegCAM 0x00000008 /* 1:Reject packets CAM recognizes,*/ |
179 | /* accept other */ | 178 | /* accept other */ |
180 | #define CAM_BroadAcc 0x00000004 /* 1:Broadcast assept */ | 179 | #define CAM_BroadAcc 0x00000004 /* 1:Broadcast assept */ |
181 | #define CAM_GroupAcc 0x00000002 /* 1:Multicast assept */ | 180 | #define CAM_GroupAcc 0x00000002 /* 1:Multicast assept */ |
182 | #define CAM_StationAcc 0x00000001 /* 1:unicast accept */ | 181 | #define CAM_StationAcc 0x00000001 /* 1:unicast accept */ |
183 | 182 | ||
184 | /* CAM_Ena bit asign ------------------------------------------------------- */ | 183 | /* CAM_Ena bit asign ------------------------------------------------------- */ |
185 | #define CAM_ENTRY_MAX 21 /* CAM Data entry max count */ | 184 | #define CAM_ENTRY_MAX 21 /* CAM Data entry max count */ |
186 | #define CAM_Ena_Mask ((1<<CAM_ENTRY_MAX)-1) /* CAM Enable bits (Max 21bits) */ | 185 | #define CAM_Ena_Mask ((1<<CAM_ENTRY_MAX)-1) /* CAM Enable bits (Max 21bits) */ |
187 | #define CAM_Ena_Bit(index) (1<<(index)) | 186 | #define CAM_Ena_Bit(index) (1 << (index)) |
188 | #define CAM_ENTRY_DESTINATION 0 | 187 | #define CAM_ENTRY_DESTINATION 0 |
189 | #define CAM_ENTRY_SOURCE 1 | 188 | #define CAM_ENTRY_SOURCE 1 |
190 | #define CAM_ENTRY_MACCTL 20 | 189 | #define CAM_ENTRY_MACCTL 20 |
191 | 190 | ||
192 | /* Tx_Ctl bit asign -------------------------------------------------------- */ | 191 | /* Tx_Ctl bit asign -------------------------------------------------------- */ |
193 | #define Tx_En 0x00000001 /* 1:Transmit enable */ | 192 | #define Tx_En 0x00000001 /* 1:Transmit enable */ |
194 | #define Tx_TxHalt 0x00000002 /* 1:Transmit Halt Request */ | 193 | #define Tx_TxHalt 0x00000002 /* 1:Transmit Halt Request */ |
195 | #define Tx_NoPad 0x00000004 /* 1:Suppress Padding */ | 194 | #define Tx_NoPad 0x00000004 /* 1:Suppress Padding */ |
196 | #define Tx_NoCRC 0x00000008 /* 1:Suppress Padding */ | 195 | #define Tx_NoCRC 0x00000008 /* 1:Suppress Padding */ |
197 | #define Tx_FBack 0x00000010 /* 1:Fast Back-off */ | 196 | #define Tx_FBack 0x00000010 /* 1:Fast Back-off */ |
198 | #define Tx_EnUnder 0x00000100 /* 1:Enable Underrun */ | 197 | #define Tx_EnUnder 0x00000100 /* 1:Enable Underrun */ |
199 | #define Tx_EnExDefer 0x00000200 /* 1:Enable Excessive Deferral */ | 198 | #define Tx_EnExDefer 0x00000200 /* 1:Enable Excessive Deferral */ |
200 | #define Tx_EnLCarr 0x00000400 /* 1:Enable Lost Carrier */ | 199 | #define Tx_EnLCarr 0x00000400 /* 1:Enable Lost Carrier */ |
201 | #define Tx_EnExColl 0x00000800 /* 1:Enable Excessive Collision */ | 200 | #define Tx_EnExColl 0x00000800 /* 1:Enable Excessive Collision */ |
202 | #define Tx_EnLateColl 0x00001000 /* 1:Enable Late Collision */ | 201 | #define Tx_EnLateColl 0x00001000 /* 1:Enable Late Collision */ |
203 | #define Tx_EnTxPar 0x00002000 /* 1:Enable Transmit Parity */ | 202 | #define Tx_EnTxPar 0x00002000 /* 1:Enable Transmit Parity */ |
204 | #define Tx_EnComp 0x00004000 /* 1:Enable Completion */ | 203 | #define Tx_EnComp 0x00004000 /* 1:Enable Completion */ |
205 | 204 | ||
206 | /* Tx_Stat bit asign ------------------------------------------------------- */ | 205 | /* Tx_Stat bit asign ------------------------------------------------------- */ |
207 | #define Tx_TxColl_MASK 0x0000000F /* Tx Collision Count */ | 206 | #define Tx_TxColl_MASK 0x0000000F /* Tx Collision Count */ |
208 | #define Tx_ExColl 0x00000010 /* Excessive Collision */ | 207 | #define Tx_ExColl 0x00000010 /* Excessive Collision */ |
209 | #define Tx_TXDefer 0x00000020 /* Transmit Defered */ | 208 | #define Tx_TXDefer 0x00000020 /* Transmit Defered */ |
210 | #define Tx_Paused 0x00000040 /* Transmit Paused */ | 209 | #define Tx_Paused 0x00000040 /* Transmit Paused */ |
211 | #define Tx_IntTx 0x00000080 /* Interrupt on Tx */ | 210 | #define Tx_IntTx 0x00000080 /* Interrupt on Tx */ |
212 | #define Tx_Under 0x00000100 /* Underrun */ | 211 | #define Tx_Under 0x00000100 /* Underrun */ |
213 | #define Tx_Defer 0x00000200 /* Deferral */ | 212 | #define Tx_Defer 0x00000200 /* Deferral */ |
214 | #define Tx_NCarr 0x00000400 /* No Carrier */ | 213 | #define Tx_NCarr 0x00000400 /* No Carrier */ |
215 | #define Tx_10Stat 0x00000800 /* 10Mbps Status */ | 214 | #define Tx_10Stat 0x00000800 /* 10Mbps Status */ |
216 | #define Tx_LateColl 0x00001000 /* Late Collision */ | 215 | #define Tx_LateColl 0x00001000 /* Late Collision */ |
217 | #define Tx_TxPar 0x00002000 /* Tx Parity Error */ | 216 | #define Tx_TxPar 0x00002000 /* Tx Parity Error */ |
218 | #define Tx_Comp 0x00004000 /* Completion */ | 217 | #define Tx_Comp 0x00004000 /* Completion */ |
219 | #define Tx_Halted 0x00008000 /* Tx Halted */ | 218 | #define Tx_Halted 0x00008000 /* Tx Halted */ |
220 | #define Tx_SQErr 0x00010000 /* Signal Quality Error(SQE) */ | 219 | #define Tx_SQErr 0x00010000 /* Signal Quality Error(SQE) */ |
221 | 220 | ||
222 | /* Rx_Ctl bit asign -------------------------------------------------------- */ | 221 | /* Rx_Ctl bit asign -------------------------------------------------------- */ |
223 | #define Rx_EnGood 0x00004000 /* 1:Enable Good */ | 222 | #define Rx_EnGood 0x00004000 /* 1:Enable Good */ |
224 | #define Rx_EnRxPar 0x00002000 /* 1:Enable Receive Parity */ | 223 | #define Rx_EnRxPar 0x00002000 /* 1:Enable Receive Parity */ |
225 | #define Rx_EnLongErr 0x00000800 /* 1:Enable Long Error */ | 224 | #define Rx_EnLongErr 0x00000800 /* 1:Enable Long Error */ |
226 | #define Rx_EnOver 0x00000400 /* 1:Enable OverFlow */ | 225 | #define Rx_EnOver 0x00000400 /* 1:Enable OverFlow */ |
227 | #define Rx_EnCRCErr 0x00000200 /* 1:Enable CRC Error */ | 226 | #define Rx_EnCRCErr 0x00000200 /* 1:Enable CRC Error */ |
228 | #define Rx_EnAlign 0x00000100 /* 1:Enable Alignment */ | 227 | #define Rx_EnAlign 0x00000100 /* 1:Enable Alignment */ |
229 | #define Rx_IgnoreCRC 0x00000040 /* 1:Ignore CRC Value */ | 228 | #define Rx_IgnoreCRC 0x00000040 /* 1:Ignore CRC Value */ |
230 | #define Rx_StripCRC 0x00000010 /* 1:Strip CRC Value */ | 229 | #define Rx_StripCRC 0x00000010 /* 1:Strip CRC Value */ |
231 | #define Rx_ShortEn 0x00000008 /* 1:Short Enable */ | 230 | #define Rx_ShortEn 0x00000008 /* 1:Short Enable */ |
232 | #define Rx_LongEn 0x00000004 /* 1:Long Enable */ | 231 | #define Rx_LongEn 0x00000004 /* 1:Long Enable */ |
233 | #define Rx_RxHalt 0x00000002 /* 1:Receive Halt Request */ | 232 | #define Rx_RxHalt 0x00000002 /* 1:Receive Halt Request */ |
234 | #define Rx_RxEn 0x00000001 /* 1:Receive Intrrupt Enable */ | 233 | #define Rx_RxEn 0x00000001 /* 1:Receive Intrrupt Enable */ |
235 | 234 | ||
236 | /* Rx_Stat bit asign ------------------------------------------------------- */ | 235 | /* Rx_Stat bit asign ------------------------------------------------------- */ |
237 | #define Rx_Halted 0x00008000 /* Rx Halted */ | 236 | #define Rx_Halted 0x00008000 /* Rx Halted */ |
238 | #define Rx_Good 0x00004000 /* Rx Good */ | 237 | #define Rx_Good 0x00004000 /* Rx Good */ |
239 | #define Rx_RxPar 0x00002000 /* Rx Parity Error */ | 238 | #define Rx_RxPar 0x00002000 /* Rx Parity Error */ |
240 | /* 0x00001000 not use */ | 239 | /* 0x00001000 not use */ |
241 | #define Rx_LongErr 0x00000800 /* Rx Long Error */ | 240 | #define Rx_LongErr 0x00000800 /* Rx Long Error */ |
242 | #define Rx_Over 0x00000400 /* Rx Overflow */ | 241 | #define Rx_Over 0x00000400 /* Rx Overflow */ |
243 | #define Rx_CRCErr 0x00000200 /* Rx CRC Error */ | 242 | #define Rx_CRCErr 0x00000200 /* Rx CRC Error */ |
244 | #define Rx_Align 0x00000100 /* Rx Alignment Error */ | 243 | #define Rx_Align 0x00000100 /* Rx Alignment Error */ |
245 | #define Rx_10Stat 0x00000080 /* Rx 10Mbps Status */ | 244 | #define Rx_10Stat 0x00000080 /* Rx 10Mbps Status */ |
246 | #define Rx_IntRx 0x00000040 /* Rx Interrupt */ | 245 | #define Rx_IntRx 0x00000040 /* Rx Interrupt */ |
247 | #define Rx_CtlRecd 0x00000020 /* Rx Control Receive */ | 246 | #define Rx_CtlRecd 0x00000020 /* Rx Control Receive */ |
248 | 247 | ||
249 | #define Rx_Stat_Mask 0x0000EFC0 /* Rx All Status Mask */ | 248 | #define Rx_Stat_Mask 0x0000EFC0 /* Rx All Status Mask */ |
250 | 249 | ||
251 | /* Int_En bit asign -------------------------------------------------------- */ | 250 | /* Int_En bit asign -------------------------------------------------------- */ |
252 | #define Int_NRAbtEn 0x00000800 /* 1:Non-recoverable Abort Enable */ | 251 | #define Int_NRAbtEn 0x00000800 /* 1:Non-recoverable Abort Enable */ |
253 | #define Int_TxCtlCmpEn 0x00000400 /* 1:Transmit Control Complete Enable */ | 252 | #define Int_TxCtlCmpEn 0x00000400 /* 1:Transmit Ctl Complete Enable */ |
254 | #define Int_DmParErrEn 0x00000200 /* 1:DMA Parity Error Enable */ | 253 | #define Int_DmParErrEn 0x00000200 /* 1:DMA Parity Error Enable */ |
255 | #define Int_DParDEn 0x00000100 /* 1:Data Parity Error Enable */ | 254 | #define Int_DParDEn 0x00000100 /* 1:Data Parity Error Enable */ |
256 | #define Int_EarNotEn 0x00000080 /* 1:Early Notify Enable */ | 255 | #define Int_EarNotEn 0x00000080 /* 1:Early Notify Enable */ |
257 | #define Int_DParErrEn 0x00000040 /* 1:Detected Parity Error Enable */ | 256 | #define Int_DParErrEn 0x00000040 /* 1:Detected Parity Error Enable */ |
258 | #define Int_SSysErrEn 0x00000020 /* 1:Signalled System Error Enable */ | 257 | #define Int_SSysErrEn 0x00000020 /* 1:Signalled System Error Enable */ |
259 | #define Int_RMasAbtEn 0x00000010 /* 1:Received Master Abort Enable */ | 258 | #define Int_RMasAbtEn 0x00000010 /* 1:Received Master Abort Enable */ |
260 | #define Int_RTargAbtEn 0x00000008 /* 1:Received Target Abort Enable */ | 259 | #define Int_RTargAbtEn 0x00000008 /* 1:Received Target Abort Enable */ |
261 | #define Int_STargAbtEn 0x00000004 /* 1:Signalled Target Abort Enable */ | 260 | #define Int_STargAbtEn 0x00000004 /* 1:Signalled Target Abort Enable */ |
262 | #define Int_BLExEn 0x00000002 /* 1:Buffer List Exhausted Enable */ | 261 | #define Int_BLExEn 0x00000002 /* 1:Buffer List Exhausted Enable */ |
263 | #define Int_FDAExEn 0x00000001 /* 1:Free Descriptor Area */ | 262 | #define Int_FDAExEn 0x00000001 /* 1:Free Descriptor Area */ |
264 | /* Exhausted Enable */ | 263 | /* Exhausted Enable */ |
265 | 264 | ||
266 | /* Int_Src bit asign ------------------------------------------------------- */ | 265 | /* Int_Src bit asign ------------------------------------------------------- */ |
267 | #define Int_NRabt 0x00004000 /* 1:Non Recoverable error */ | 266 | #define Int_NRabt 0x00004000 /* 1:Non Recoverable error */ |
268 | #define Int_DmParErrStat 0x00002000 /* 1:DMA Parity Error & Clear */ | 267 | #define Int_DmParErrStat 0x00002000 /* 1:DMA Parity Error & Clear */ |
269 | #define Int_BLEx 0x00001000 /* 1:Buffer List Empty & Clear */ | 268 | #define Int_BLEx 0x00001000 /* 1:Buffer List Empty & Clear */ |
270 | #define Int_FDAEx 0x00000800 /* 1:FDA Empty & Clear */ | 269 | #define Int_FDAEx 0x00000800 /* 1:FDA Empty & Clear */ |
271 | #define Int_IntNRAbt 0x00000400 /* 1:Non Recoverable Abort */ | 270 | #define Int_IntNRAbt 0x00000400 /* 1:Non Recoverable Abort */ |
272 | #define Int_IntCmp 0x00000200 /* 1:MAC control packet complete */ | 271 | #define Int_IntCmp 0x00000200 /* 1:MAC control packet complete */ |
273 | #define Int_IntExBD 0x00000100 /* 1:Interrupt Extra BD & Clear */ | 272 | #define Int_IntExBD 0x00000100 /* 1:Interrupt Extra BD & Clear */ |
274 | #define Int_DmParErr 0x00000080 /* 1:DMA Parity Error & Clear */ | 273 | #define Int_DmParErr 0x00000080 /* 1:DMA Parity Error & Clear */ |
275 | #define Int_IntEarNot 0x00000040 /* 1:Receive Data write & Clear */ | 274 | #define Int_IntEarNot 0x00000040 /* 1:Receive Data write & Clear */ |
276 | #define Int_SWInt 0x00000020 /* 1:Software request & Clear */ | 275 | #define Int_SWInt 0x00000020 /* 1:Software request & Clear */ |
277 | #define Int_IntBLEx 0x00000010 /* 1:Buffer List Empty & Clear */ | 276 | #define Int_IntBLEx 0x00000010 /* 1:Buffer List Empty & Clear */ |
278 | #define Int_IntFDAEx 0x00000008 /* 1:FDA Empty & Clear */ | 277 | #define Int_IntFDAEx 0x00000008 /* 1:FDA Empty & Clear */ |
279 | #define Int_IntPCI 0x00000004 /* 1:PCI controller & Clear */ | 278 | #define Int_IntPCI 0x00000004 /* 1:PCI controller & Clear */ |
280 | #define Int_IntMacRx 0x00000002 /* 1:Rx controller & Clear */ | 279 | #define Int_IntMacRx 0x00000002 /* 1:Rx controller & Clear */ |
281 | #define Int_IntMacTx 0x00000001 /* 1:Tx controller & Clear */ | 280 | #define Int_IntMacTx 0x00000001 /* 1:Tx controller & Clear */ |
282 | 281 | ||
283 | /* MD_CA bit asign --------------------------------------------------------- */ | 282 | /* MD_CA bit asign --------------------------------------------------------- */ |
284 | #define MD_CA_PreSup 0x00001000 /* 1:Preamble Supress */ | 283 | #define MD_CA_PreSup 0x00001000 /* 1:Preamble Supress */ |
285 | #define MD_CA_Busy 0x00000800 /* 1:Busy (Start Operation) */ | 284 | #define MD_CA_Busy 0x00000800 /* 1:Busy (Start Operation) */ |
286 | #define MD_CA_Wr 0x00000400 /* 1:Write 0:Read */ | 285 | #define MD_CA_Wr 0x00000400 /* 1:Write 0:Read */ |
287 | 286 | ||
288 | 287 | ||
289 | /* | 288 | /* |
@@ -307,24 +306,24 @@ struct BDesc { | |||
307 | #define FD_ALIGN 16 | 306 | #define FD_ALIGN 16 |
308 | 307 | ||
309 | /* Frame Descripter bit asign ---------------------------------------------- */ | 308 | /* Frame Descripter bit asign ---------------------------------------------- */ |
310 | #define FD_FDLength_MASK 0x0000FFFF /* Length MASK */ | 309 | #define FD_FDLength_MASK 0x0000FFFF /* Length MASK */ |
311 | #define FD_BDCnt_MASK 0x001F0000 /* BD count MASK in FD */ | 310 | #define FD_BDCnt_MASK 0x001F0000 /* BD count MASK in FD */ |
312 | #define FD_FrmOpt_MASK 0x7C000000 /* Frame option MASK */ | 311 | #define FD_FrmOpt_MASK 0x7C000000 /* Frame option MASK */ |
313 | #define FD_FrmOpt_BigEndian 0x40000000 /* Tx/Rx */ | 312 | #define FD_FrmOpt_BigEndian 0x40000000 /* Tx/Rx */ |
314 | #define FD_FrmOpt_IntTx 0x20000000 /* Tx only */ | 313 | #define FD_FrmOpt_IntTx 0x20000000 /* Tx only */ |
315 | #define FD_FrmOpt_NoCRC 0x10000000 /* Tx only */ | 314 | #define FD_FrmOpt_NoCRC 0x10000000 /* Tx only */ |
316 | #define FD_FrmOpt_NoPadding 0x08000000 /* Tx only */ | 315 | #define FD_FrmOpt_NoPadding 0x08000000 /* Tx only */ |
317 | #define FD_FrmOpt_Packing 0x04000000 /* Rx only */ | 316 | #define FD_FrmOpt_Packing 0x04000000 /* Rx only */ |
318 | #define FD_CownsFD 0x80000000 /* FD Controller owner bit */ | 317 | #define FD_CownsFD 0x80000000 /* FD Controller owner bit */ |
319 | #define FD_Next_EOL 0x00000001 /* FD EOL indicator */ | 318 | #define FD_Next_EOL 0x00000001 /* FD EOL indicator */ |
320 | #define FD_BDCnt_SHIFT 16 | 319 | #define FD_BDCnt_SHIFT 16 |
321 | 320 | ||
322 | /* Buffer Descripter bit asign --------------------------------------------- */ | 321 | /* Buffer Descripter bit asign --------------------------------------------- */ |
323 | #define BD_BuffLength_MASK 0x0000FFFF /* Recieve Data Size */ | 322 | #define BD_BuffLength_MASK 0x0000FFFF /* Recieve Data Size */ |
324 | #define BD_RxBDID_MASK 0x00FF0000 /* BD ID Number MASK */ | 323 | #define BD_RxBDID_MASK 0x00FF0000 /* BD ID Number MASK */ |
325 | #define BD_RxBDSeqN_MASK 0x7F000000 /* Rx BD Sequence Number */ | 324 | #define BD_RxBDSeqN_MASK 0x7F000000 /* Rx BD Sequence Number */ |
326 | #define BD_CownsBD 0x80000000 /* BD Controller owner bit */ | 325 | #define BD_CownsBD 0x80000000 /* BD Controller owner bit */ |
327 | #define BD_RxBDID_SHIFT 16 | 326 | #define BD_RxBDID_SHIFT 16 |
328 | #define BD_RxBDSeqN_SHIFT 24 | 327 | #define BD_RxBDSeqN_SHIFT 24 |
329 | 328 | ||
330 | 329 | ||
@@ -348,13 +347,15 @@ struct BDesc { | |||
348 | Int_STargAbtEn | \ | 347 | Int_STargAbtEn | \ |
349 | Int_BLExEn | Int_FDAExEn) /* maybe 0xb7f*/ | 348 | Int_BLExEn | Int_FDAExEn) /* maybe 0xb7f*/ |
350 | #define DMA_CTL_CMD DMA_BURST_SIZE | 349 | #define DMA_CTL_CMD DMA_BURST_SIZE |
351 | #define HAVE_DMA_RXALIGN(lp) likely((lp)->boardtype != TC35815CF) | 350 | #define HAVE_DMA_RXALIGN(lp) likely((lp)->chiptype != TC35815CF) |
352 | 351 | ||
353 | /* Tuning parameters */ | 352 | /* Tuning parameters */ |
354 | #define DMA_BURST_SIZE 32 | 353 | #define DMA_BURST_SIZE 32 |
355 | #define TX_THRESHOLD 1024 | 354 | #define TX_THRESHOLD 1024 |
356 | #define TX_THRESHOLD_MAX 1536 /* used threshold with packet max byte for low pci transfer ability.*/ | 355 | /* used threshold with packet max byte for low pci transfer ability.*/ |
357 | #define TX_THRESHOLD_KEEP_LIMIT 10 /* setting threshold max value when overrun error occured this count. */ | 356 | #define TX_THRESHOLD_MAX 1536 |
357 | /* setting threshold max value when overrun error occured this count. */ | ||
358 | #define TX_THRESHOLD_KEEP_LIMIT 10 | ||
358 | 359 | ||
359 | /* 16 + RX_BUF_NUM * 8 + RX_FD_NUM * 16 + TX_FD_NUM * 32 <= PAGE_SIZE*FD_PAGE_NUM */ | 360 | /* 16 + RX_BUF_NUM * 8 + RX_FD_NUM * 16 + TX_FD_NUM * 32 <= PAGE_SIZE*FD_PAGE_NUM */ |
360 | #ifdef TC35815_USE_PACKEDBUFFER | 361 | #ifdef TC35815_USE_PACKEDBUFFER |
@@ -396,21 +397,12 @@ struct FrFD { | |||
396 | }; | 397 | }; |
397 | 398 | ||
398 | 399 | ||
399 | #define tc_readl(addr) readl(addr) | 400 | #define tc_readl(addr) ioread32(addr) |
400 | #define tc_writel(d, addr) writel(d, addr) | 401 | #define tc_writel(d, addr) iowrite32(d, addr) |
401 | 402 | ||
402 | #define TC35815_TX_TIMEOUT msecs_to_jiffies(400) | 403 | #define TC35815_TX_TIMEOUT msecs_to_jiffies(400) |
403 | 404 | ||
404 | /* Timer state engine. */ | 405 | /* Information that need to be kept for each controller. */ |
405 | enum tc35815_timer_state { | ||
406 | arbwait = 0, /* Waiting for auto negotiation to complete. */ | ||
407 | lupwait = 1, /* Auto-neg complete, awaiting link-up status. */ | ||
408 | ltrywait = 2, /* Forcing try of all modes, from fastest to slowest. */ | ||
409 | asleep = 3, /* Time inactive. */ | ||
410 | lcheck = 4, /* Check link status. */ | ||
411 | }; | ||
412 | |||
413 | /* Information that need to be kept for each board. */ | ||
414 | struct tc35815_local { | 406 | struct tc35815_local { |
415 | struct pci_dev *pci_dev; | 407 | struct pci_dev *pci_dev; |
416 | 408 | ||
@@ -418,12 +410,11 @@ struct tc35815_local { | |||
418 | struct napi_struct napi; | 410 | struct napi_struct napi; |
419 | 411 | ||
420 | /* statistics */ | 412 | /* statistics */ |
421 | struct net_device_stats stats; | ||
422 | struct { | 413 | struct { |
423 | int max_tx_qlen; | 414 | int max_tx_qlen; |
424 | int tx_ints; | 415 | int tx_ints; |
425 | int rx_ints; | 416 | int rx_ints; |
426 | int tx_underrun; | 417 | int tx_underrun; |
427 | } lstats; | 418 | } lstats; |
428 | 419 | ||
429 | /* Tx control lock. This protects the transmit buffer ring | 420 | /* Tx control lock. This protects the transmit buffer ring |
@@ -433,12 +424,12 @@ struct tc35815_local { | |||
433 | */ | 424 | */ |
434 | spinlock_t lock; | 425 | spinlock_t lock; |
435 | 426 | ||
436 | int phy_addr; | 427 | struct mii_bus mii_bus; |
437 | int fullduplex; | 428 | struct phy_device *phy_dev; |
438 | unsigned short saved_lpa; | 429 | int duplex; |
439 | struct timer_list timer; | 430 | int speed; |
440 | enum tc35815_timer_state timer_state; /* State of auto-neg timer. */ | 431 | int link; |
441 | unsigned int timer_ticks; /* Number of clicks at each state */ | 432 | struct work_struct restart_work; |
442 | 433 | ||
443 | /* | 434 | /* |
444 | * Transmitting: Batch Mode. | 435 | * Transmitting: Batch Mode. |
@@ -452,7 +443,7 @@ struct tc35815_local { | |||
452 | * RX_BUF_NUM BD in Free Buffer FD. | 443 | * RX_BUF_NUM BD in Free Buffer FD. |
453 | * One Free Buffer BD has ETH_FRAME_LEN data buffer. | 444 | * One Free Buffer BD has ETH_FRAME_LEN data buffer. |
454 | */ | 445 | */ |
455 | void * fd_buf; /* for TxFD, RxFD, FrFD */ | 446 | void *fd_buf; /* for TxFD, RxFD, FrFD */ |
456 | dma_addr_t fd_buf_dma; | 447 | dma_addr_t fd_buf_dma; |
457 | struct TxFD *tfd_base; | 448 | struct TxFD *tfd_base; |
458 | unsigned int tfd_start; | 449 | unsigned int tfd_start; |
@@ -463,7 +454,7 @@ struct tc35815_local { | |||
463 | struct FrFD *fbl_ptr; | 454 | struct FrFD *fbl_ptr; |
464 | #ifdef TC35815_USE_PACKEDBUFFER | 455 | #ifdef TC35815_USE_PACKEDBUFFER |
465 | unsigned char fbl_curid; | 456 | unsigned char fbl_curid; |
466 | void * data_buf[RX_BUF_NUM]; /* packing */ | 457 | void *data_buf[RX_BUF_NUM]; /* packing */ |
467 | dma_addr_t data_buf_dma[RX_BUF_NUM]; | 458 | dma_addr_t data_buf_dma[RX_BUF_NUM]; |
468 | struct { | 459 | struct { |
469 | struct sk_buff *skb; | 460 | struct sk_buff *skb; |
@@ -476,10 +467,8 @@ struct tc35815_local { | |||
476 | dma_addr_t skb_dma; | 467 | dma_addr_t skb_dma; |
477 | } tx_skbs[TX_FD_NUM], rx_skbs[RX_BUF_NUM]; | 468 | } tx_skbs[TX_FD_NUM], rx_skbs[RX_BUF_NUM]; |
478 | #endif | 469 | #endif |
479 | struct mii_if_info mii; | ||
480 | unsigned short mii_id[2]; | ||
481 | u32 msg_enable; | 470 | u32 msg_enable; |
482 | board_t boardtype; | 471 | enum tc35815_chiptype chiptype; |
483 | }; | 472 | }; |
484 | 473 | ||
485 | static inline dma_addr_t fd_virt_to_bus(struct tc35815_local *lp, void *virt) | 474 | static inline dma_addr_t fd_virt_to_bus(struct tc35815_local *lp, void *virt) |
@@ -506,13 +495,14 @@ static inline void *rxbuf_bus_to_virt(struct tc35815_local *lp, dma_addr_t bus) | |||
506 | } | 495 | } |
507 | 496 | ||
508 | #define TC35815_DMA_SYNC_ONDEMAND | 497 | #define TC35815_DMA_SYNC_ONDEMAND |
509 | static void* alloc_rxbuf_page(struct pci_dev *hwdev, dma_addr_t *dma_handle) | 498 | static void *alloc_rxbuf_page(struct pci_dev *hwdev, dma_addr_t *dma_handle) |
510 | { | 499 | { |
511 | #ifdef TC35815_DMA_SYNC_ONDEMAND | 500 | #ifdef TC35815_DMA_SYNC_ONDEMAND |
512 | void *buf; | 501 | void *buf; |
513 | /* pci_map + pci_dma_sync will be more effective than | 502 | /* pci_map + pci_dma_sync will be more effective than |
514 | * pci_alloc_consistent on some archs. */ | 503 | * pci_alloc_consistent on some archs. */ |
515 | if ((buf = (void *)__get_free_page(GFP_ATOMIC)) == NULL) | 504 | buf = (void *)__get_free_page(GFP_ATOMIC); |
505 | if (!buf) | ||
516 | return NULL; | 506 | return NULL; |
517 | *dma_handle = pci_map_single(hwdev, buf, PAGE_SIZE, | 507 | *dma_handle = pci_map_single(hwdev, buf, PAGE_SIZE, |
518 | PCI_DMA_FROMDEVICE); | 508 | PCI_DMA_FROMDEVICE); |
@@ -577,7 +567,7 @@ static void tc35815_txdone(struct net_device *dev); | |||
577 | static int tc35815_close(struct net_device *dev); | 567 | static int tc35815_close(struct net_device *dev); |
578 | static struct net_device_stats *tc35815_get_stats(struct net_device *dev); | 568 | static struct net_device_stats *tc35815_get_stats(struct net_device *dev); |
579 | static void tc35815_set_multicast_list(struct net_device *dev); | 569 | static void tc35815_set_multicast_list(struct net_device *dev); |
580 | static void tc35815_tx_timeout(struct net_device *dev); | 570 | static void tc35815_tx_timeout(struct net_device *dev); |
581 | static int tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); | 571 | static int tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); |
582 | #ifdef CONFIG_NET_POLL_CONTROLLER | 572 | #ifdef CONFIG_NET_POLL_CONTROLLER |
583 | static void tc35815_poll_controller(struct net_device *dev); | 573 | static void tc35815_poll_controller(struct net_device *dev); |
@@ -585,21 +575,225 @@ static void tc35815_poll_controller(struct net_device *dev); | |||
585 | static const struct ethtool_ops tc35815_ethtool_ops; | 575 | static const struct ethtool_ops tc35815_ethtool_ops; |
586 | 576 | ||
587 | /* Example routines you must write ;->. */ | 577 | /* Example routines you must write ;->. */ |
588 | static void tc35815_chip_reset(struct net_device *dev); | 578 | static void tc35815_chip_reset(struct net_device *dev); |
589 | static void tc35815_chip_init(struct net_device *dev); | 579 | static void tc35815_chip_init(struct net_device *dev); |
590 | static void tc35815_find_phy(struct net_device *dev); | ||
591 | static void tc35815_phy_chip_init(struct net_device *dev); | ||
592 | 580 | ||
593 | #ifdef DEBUG | 581 | #ifdef DEBUG |
594 | static void panic_queues(struct net_device *dev); | 582 | static void panic_queues(struct net_device *dev); |
595 | #endif | 583 | #endif |
596 | 584 | ||
597 | static void tc35815_timer(unsigned long data); | 585 | static void tc35815_restart_work(struct work_struct *work); |
598 | static void tc35815_start_auto_negotiation(struct net_device *dev, | 586 | |
599 | struct ethtool_cmd *ep); | 587 | static int tc_mdio_read(struct mii_bus *bus, int mii_id, int regnum) |
600 | static int tc_mdio_read(struct net_device *dev, int phy_id, int location); | 588 | { |
601 | static void tc_mdio_write(struct net_device *dev, int phy_id, int location, | 589 | struct net_device *dev = bus->priv; |
602 | int val); | 590 | struct tc35815_regs __iomem *tr = |
591 | (struct tc35815_regs __iomem *)dev->base_addr; | ||
592 | unsigned long timeout = jiffies + 10; | ||
593 | |||
594 | tc_writel(MD_CA_Busy | (mii_id << 5) | (regnum & 0x1f), &tr->MD_CA); | ||
595 | while (tc_readl(&tr->MD_CA) & MD_CA_Busy) { | ||
596 | if (time_after(jiffies, timeout)) | ||
597 | return -EIO; | ||
598 | cpu_relax(); | ||
599 | } | ||
600 | return tc_readl(&tr->MD_Data) & 0xffff; | ||
601 | } | ||
602 | |||
603 | static int tc_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 val) | ||
604 | { | ||
605 | struct net_device *dev = bus->priv; | ||
606 | struct tc35815_regs __iomem *tr = | ||
607 | (struct tc35815_regs __iomem *)dev->base_addr; | ||
608 | unsigned long timeout = jiffies + 10; | ||
609 | |||
610 | tc_writel(val, &tr->MD_Data); | ||
611 | tc_writel(MD_CA_Busy | MD_CA_Wr | (mii_id << 5) | (regnum & 0x1f), | ||
612 | &tr->MD_CA); | ||
613 | while (tc_readl(&tr->MD_CA) & MD_CA_Busy) { | ||
614 | if (time_after(jiffies, timeout)) | ||
615 | return -EIO; | ||
616 | cpu_relax(); | ||
617 | } | ||
618 | return 0; | ||
619 | } | ||
620 | |||
621 | static void tc_handle_link_change(struct net_device *dev) | ||
622 | { | ||
623 | struct tc35815_local *lp = netdev_priv(dev); | ||
624 | struct phy_device *phydev = lp->phy_dev; | ||
625 | unsigned long flags; | ||
626 | int status_change = 0; | ||
627 | |||
628 | spin_lock_irqsave(&lp->lock, flags); | ||
629 | if (phydev->link && | ||
630 | (lp->speed != phydev->speed || lp->duplex != phydev->duplex)) { | ||
631 | struct tc35815_regs __iomem *tr = | ||
632 | (struct tc35815_regs __iomem *)dev->base_addr; | ||
633 | u32 reg; | ||
634 | |||
635 | reg = tc_readl(&tr->MAC_Ctl); | ||
636 | reg |= MAC_HaltReq; | ||
637 | tc_writel(reg, &tr->MAC_Ctl); | ||
638 | if (phydev->duplex == DUPLEX_FULL) | ||
639 | reg |= MAC_FullDup; | ||
640 | else | ||
641 | reg &= ~MAC_FullDup; | ||
642 | tc_writel(reg, &tr->MAC_Ctl); | ||
643 | reg &= ~MAC_HaltReq; | ||
644 | tc_writel(reg, &tr->MAC_Ctl); | ||
645 | |||
646 | /* | ||
647 | * TX4939 PCFG.SPEEDn bit will be changed on | ||
648 | * NETDEV_CHANGE event. | ||
649 | */ | ||
650 | |||
651 | #if !defined(NO_CHECK_CARRIER) && defined(WORKAROUND_LOSTCAR) | ||
652 | /* | ||
653 | * WORKAROUND: enable LostCrS only if half duplex | ||
654 | * operation. | ||
655 | * (TX4939 does not have EnLCarr) | ||
656 | */ | ||
657 | if (phydev->duplex == DUPLEX_HALF && | ||
658 | lp->chiptype != TC35815_TX4939) | ||
659 | tc_writel(tc_readl(&tr->Tx_Ctl) | Tx_EnLCarr, | ||
660 | &tr->Tx_Ctl); | ||
661 | #endif | ||
662 | |||
663 | lp->speed = phydev->speed; | ||
664 | lp->duplex = phydev->duplex; | ||
665 | status_change = 1; | ||
666 | } | ||
667 | |||
668 | if (phydev->link != lp->link) { | ||
669 | if (phydev->link) { | ||
670 | #ifdef WORKAROUND_100HALF_PROMISC | ||
671 | /* delayed promiscuous enabling */ | ||
672 | if (dev->flags & IFF_PROMISC) | ||
673 | tc35815_set_multicast_list(dev); | ||
674 | #endif | ||
675 | netif_schedule(dev); | ||
676 | } else { | ||
677 | lp->speed = 0; | ||
678 | lp->duplex = -1; | ||
679 | } | ||
680 | lp->link = phydev->link; | ||
681 | |||
682 | status_change = 1; | ||
683 | } | ||
684 | spin_unlock_irqrestore(&lp->lock, flags); | ||
685 | |||
686 | if (status_change && netif_msg_link(lp)) { | ||
687 | phy_print_status(phydev); | ||
688 | #ifdef DEBUG | ||
689 | printk(KERN_DEBUG | ||
690 | "%s: MII BMCR %04x BMSR %04x LPA %04x\n", | ||
691 | dev->name, | ||
692 | phy_read(phydev, MII_BMCR), | ||
693 | phy_read(phydev, MII_BMSR), | ||
694 | phy_read(phydev, MII_LPA)); | ||
695 | #endif | ||
696 | } | ||
697 | } | ||
698 | |||
699 | static int tc_mii_probe(struct net_device *dev) | ||
700 | { | ||
701 | struct tc35815_local *lp = netdev_priv(dev); | ||
702 | struct phy_device *phydev = NULL; | ||
703 | int phy_addr; | ||
704 | u32 dropmask; | ||
705 | |||
706 | /* find the first phy */ | ||
707 | for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { | ||
708 | if (lp->mii_bus.phy_map[phy_addr]) { | ||
709 | if (phydev) { | ||
710 | printk(KERN_ERR "%s: multiple PHYs found\n", | ||
711 | dev->name); | ||
712 | return -EINVAL; | ||
713 | } | ||
714 | phydev = lp->mii_bus.phy_map[phy_addr]; | ||
715 | break; | ||
716 | } | ||
717 | } | ||
718 | |||
719 | if (!phydev) { | ||
720 | printk(KERN_ERR "%s: no PHY found\n", dev->name); | ||
721 | return -ENODEV; | ||
722 | } | ||
723 | |||
724 | /* attach the mac to the phy */ | ||
725 | phydev = phy_connect(dev, phydev->dev.bus_id, | ||
726 | &tc_handle_link_change, 0, | ||
727 | lp->chiptype == TC35815_TX4939 ? | ||
728 | PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII); | ||
729 | if (IS_ERR(phydev)) { | ||
730 | printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); | ||
731 | return PTR_ERR(phydev); | ||
732 | } | ||
733 | printk(KERN_INFO "%s: attached PHY driver [%s] " | ||
734 | "(mii_bus:phy_addr=%s, id=%x)\n", | ||
735 | dev->name, phydev->drv->name, phydev->dev.bus_id, | ||
736 | phydev->phy_id); | ||
737 | |||
738 | /* mask with MAC supported features */ | ||
739 | phydev->supported &= PHY_BASIC_FEATURES; | ||
740 | dropmask = 0; | ||
741 | if (options.speed == 10) | ||
742 | dropmask |= SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full; | ||
743 | else if (options.speed == 100) | ||
744 | dropmask |= SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full; | ||
745 | if (options.duplex == 1) | ||
746 | dropmask |= SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full; | ||
747 | else if (options.duplex == 2) | ||
748 | dropmask |= SUPPORTED_10baseT_Half | SUPPORTED_100baseT_Half; | ||
749 | phydev->supported &= ~dropmask; | ||
750 | phydev->advertising = phydev->supported; | ||
751 | |||
752 | lp->link = 0; | ||
753 | lp->speed = 0; | ||
754 | lp->duplex = -1; | ||
755 | lp->phy_dev = phydev; | ||
756 | |||
757 | return 0; | ||
758 | } | ||
759 | |||
760 | static int tc_mii_init(struct net_device *dev) | ||
761 | { | ||
762 | struct tc35815_local *lp = netdev_priv(dev); | ||
763 | int err; | ||
764 | int i; | ||
765 | |||
766 | lp->mii_bus.name = "tc35815_mii_bus"; | ||
767 | lp->mii_bus.read = tc_mdio_read; | ||
768 | lp->mii_bus.write = tc_mdio_write; | ||
769 | snprintf(lp->mii_bus.id, MII_BUS_ID_SIZE, "%x", | ||
770 | (lp->pci_dev->bus->number << 8) | lp->pci_dev->devfn); | ||
771 | lp->mii_bus.priv = dev; | ||
772 | lp->mii_bus.dev = &lp->pci_dev->dev; | ||
773 | lp->mii_bus.irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); | ||
774 | if (!lp->mii_bus.irq) { | ||
775 | err = -ENOMEM; | ||
776 | goto err_out; | ||
777 | } | ||
778 | |||
779 | for (i = 0; i < PHY_MAX_ADDR; i++) | ||
780 | lp->mii_bus.irq[i] = PHY_POLL; | ||
781 | |||
782 | err = mdiobus_register(&lp->mii_bus); | ||
783 | if (err) | ||
784 | goto err_out_free_mdio_irq; | ||
785 | err = tc_mii_probe(dev); | ||
786 | if (err) | ||
787 | goto err_out_unregister_bus; | ||
788 | return 0; | ||
789 | |||
790 | err_out_unregister_bus: | ||
791 | mdiobus_unregister(&lp->mii_bus); | ||
792 | err_out_free_mdio_irq: | ||
793 | kfree(lp->mii_bus.irq); | ||
794 | err_out: | ||
795 | return err; | ||
796 | } | ||
603 | 797 | ||
604 | #ifdef CONFIG_CPU_TX49XX | 798 | #ifdef CONFIG_CPU_TX49XX |
605 | /* | 799 | /* |
@@ -617,7 +811,7 @@ static int __devinit tc35815_mac_match(struct device *dev, void *data) | |||
617 | 811 | ||
618 | static int __devinit tc35815_read_plat_dev_addr(struct net_device *dev) | 812 | static int __devinit tc35815_read_plat_dev_addr(struct net_device *dev) |
619 | { | 813 | { |
620 | struct tc35815_local *lp = dev->priv; | 814 | struct tc35815_local *lp = netdev_priv(dev); |
621 | struct device *pd = bus_find_device(&platform_bus_type, NULL, | 815 | struct device *pd = bus_find_device(&platform_bus_type, NULL, |
622 | lp->pci_dev, tc35815_mac_match); | 816 | lp->pci_dev, tc35815_mac_match); |
623 | if (pd) { | 817 | if (pd) { |
@@ -635,7 +829,7 @@ static int __devinit tc35815_read_plat_dev_addr(struct net_device *dev) | |||
635 | } | 829 | } |
636 | #endif | 830 | #endif |
637 | 831 | ||
638 | static int __devinit tc35815_init_dev_addr (struct net_device *dev) | 832 | static int __devinit tc35815_init_dev_addr(struct net_device *dev) |
639 | { | 833 | { |
640 | struct tc35815_regs __iomem *tr = | 834 | struct tc35815_regs __iomem *tr = |
641 | (struct tc35815_regs __iomem *)dev->base_addr; | 835 | (struct tc35815_regs __iomem *)dev->base_addr; |
@@ -657,21 +851,21 @@ static int __devinit tc35815_init_dev_addr (struct net_device *dev) | |||
657 | return 0; | 851 | return 0; |
658 | } | 852 | } |
659 | 853 | ||
660 | static int __devinit tc35815_init_one (struct pci_dev *pdev, | 854 | static int __devinit tc35815_init_one(struct pci_dev *pdev, |
661 | const struct pci_device_id *ent) | 855 | const struct pci_device_id *ent) |
662 | { | 856 | { |
663 | void __iomem *ioaddr = NULL; | 857 | void __iomem *ioaddr = NULL; |
664 | struct net_device *dev; | 858 | struct net_device *dev; |
665 | struct tc35815_local *lp; | 859 | struct tc35815_local *lp; |
666 | int rc; | 860 | int rc; |
667 | unsigned long mmio_start, mmio_end, mmio_flags, mmio_len; | 861 | DECLARE_MAC_BUF(mac); |
668 | 862 | ||
669 | static int printed_version; | 863 | static int printed_version; |
670 | if (!printed_version++) { | 864 | if (!printed_version++) { |
671 | printk(version); | 865 | printk(version); |
672 | dev_printk(KERN_DEBUG, &pdev->dev, | 866 | dev_printk(KERN_DEBUG, &pdev->dev, |
673 | "speed:%d duplex:%d doforce:%d\n", | 867 | "speed:%d duplex:%d\n", |
674 | options.speed, options.duplex, options.doforce); | 868 | options.speed, options.duplex); |
675 | } | 869 | } |
676 | 870 | ||
677 | if (!pdev->irq) { | 871 | if (!pdev->irq) { |
@@ -680,55 +874,24 @@ static int __devinit tc35815_init_one (struct pci_dev *pdev, | |||
680 | } | 874 | } |
681 | 875 | ||
682 | /* dev zeroed in alloc_etherdev */ | 876 | /* dev zeroed in alloc_etherdev */ |
683 | dev = alloc_etherdev (sizeof (*lp)); | 877 | dev = alloc_etherdev(sizeof(*lp)); |
684 | if (dev == NULL) { | 878 | if (dev == NULL) { |
685 | dev_err(&pdev->dev, "unable to alloc new ethernet\n"); | 879 | dev_err(&pdev->dev, "unable to alloc new ethernet\n"); |
686 | return -ENOMEM; | 880 | return -ENOMEM; |
687 | } | 881 | } |
688 | SET_NETDEV_DEV(dev, &pdev->dev); | 882 | SET_NETDEV_DEV(dev, &pdev->dev); |
689 | lp = dev->priv; | 883 | lp = netdev_priv(dev); |
690 | lp->dev = dev; | 884 | lp->dev = dev; |
691 | 885 | ||
692 | /* enable device (incl. PCI PM wakeup), and bus-mastering */ | 886 | /* enable device (incl. PCI PM wakeup), and bus-mastering */ |
693 | rc = pci_enable_device (pdev); | 887 | rc = pcim_enable_device(pdev); |
694 | if (rc) | 888 | if (rc) |
695 | goto err_out; | 889 | goto err_out; |
696 | 890 | rc = pcim_iomap_regions(pdev, 1 << 1, MODNAME); | |
697 | mmio_start = pci_resource_start (pdev, 1); | ||
698 | mmio_end = pci_resource_end (pdev, 1); | ||
699 | mmio_flags = pci_resource_flags (pdev, 1); | ||
700 | mmio_len = pci_resource_len (pdev, 1); | ||
701 | |||
702 | /* set this immediately, we need to know before | ||
703 | * we talk to the chip directly */ | ||
704 | |||
705 | /* make sure PCI base addr 1 is MMIO */ | ||
706 | if (!(mmio_flags & IORESOURCE_MEM)) { | ||
707 | dev_err(&pdev->dev, "region #1 not an MMIO resource, aborting\n"); | ||
708 | rc = -ENODEV; | ||
709 | goto err_out; | ||
710 | } | ||
711 | |||
712 | /* check for weird/broken PCI region reporting */ | ||
713 | if ((mmio_len < sizeof(struct tc35815_regs))) { | ||
714 | dev_err(&pdev->dev, "Invalid PCI region size(s), aborting\n"); | ||
715 | rc = -ENODEV; | ||
716 | goto err_out; | ||
717 | } | ||
718 | |||
719 | rc = pci_request_regions (pdev, MODNAME); | ||
720 | if (rc) | 891 | if (rc) |
721 | goto err_out; | 892 | goto err_out; |
722 | 893 | pci_set_master(pdev); | |
723 | pci_set_master (pdev); | 894 | ioaddr = pcim_iomap_table(pdev)[1]; |
724 | |||
725 | /* ioremap MMIO region */ | ||
726 | ioaddr = ioremap (mmio_start, mmio_len); | ||
727 | if (ioaddr == NULL) { | ||
728 | dev_err(&pdev->dev, "cannot remap MMIO, aborting\n"); | ||
729 | rc = -EIO; | ||
730 | goto err_out_free_res; | ||
731 | } | ||
732 | 895 | ||
733 | /* Initialize the device structure. */ | 896 | /* Initialize the device structure. */ |
734 | dev->open = tc35815_open; | 897 | dev->open = tc35815_open; |
@@ -748,11 +911,12 @@ static int __devinit tc35815_init_one (struct pci_dev *pdev, | |||
748 | #endif | 911 | #endif |
749 | 912 | ||
750 | dev->irq = pdev->irq; | 913 | dev->irq = pdev->irq; |
751 | dev->base_addr = (unsigned long) ioaddr; | 914 | dev->base_addr = (unsigned long)ioaddr; |
752 | 915 | ||
916 | INIT_WORK(&lp->restart_work, tc35815_restart_work); | ||
753 | spin_lock_init(&lp->lock); | 917 | spin_lock_init(&lp->lock); |
754 | lp->pci_dev = pdev; | 918 | lp->pci_dev = pdev; |
755 | lp->boardtype = ent->driver_data; | 919 | lp->chiptype = ent->driver_data; |
756 | 920 | ||
757 | lp->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV | NETIF_MSG_LINK; | 921 | lp->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV | NETIF_MSG_LINK; |
758 | pci_set_drvdata(pdev, dev); | 922 | pci_set_drvdata(pdev, dev); |
@@ -766,68 +930,49 @@ static int __devinit tc35815_init_one (struct pci_dev *pdev, | |||
766 | random_ether_addr(dev->dev_addr); | 930 | random_ether_addr(dev->dev_addr); |
767 | } | 931 | } |
768 | 932 | ||
769 | rc = register_netdev (dev); | 933 | rc = register_netdev(dev); |
770 | if (rc) | 934 | if (rc) |
771 | goto err_out_unmap; | 935 | goto err_out; |
772 | 936 | ||
773 | memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); | 937 | memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); |
774 | printk(KERN_INFO "%s: %s at 0x%lx, " | 938 | printk(KERN_INFO "%s: %s at 0x%lx, %s, IRQ %d\n", |
775 | "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, " | ||
776 | "IRQ %d\n", | ||
777 | dev->name, | 939 | dev->name, |
778 | board_info[ent->driver_data].name, | 940 | chip_info[ent->driver_data].name, |
779 | dev->base_addr, | 941 | dev->base_addr, |
780 | dev->dev_addr[0], dev->dev_addr[1], | 942 | print_mac(mac, dev->dev_addr), |
781 | dev->dev_addr[2], dev->dev_addr[3], | ||
782 | dev->dev_addr[4], dev->dev_addr[5], | ||
783 | dev->irq); | 943 | dev->irq); |
784 | 944 | ||
785 | setup_timer(&lp->timer, tc35815_timer, (unsigned long) dev); | 945 | rc = tc_mii_init(dev); |
786 | lp->mii.dev = dev; | 946 | if (rc) |
787 | lp->mii.mdio_read = tc_mdio_read; | 947 | goto err_out_unregister; |
788 | lp->mii.mdio_write = tc_mdio_write; | ||
789 | lp->mii.phy_id_mask = 0x1f; | ||
790 | lp->mii.reg_num_mask = 0x1f; | ||
791 | tc35815_find_phy(dev); | ||
792 | lp->mii.phy_id = lp->phy_addr; | ||
793 | lp->mii.full_duplex = 0; | ||
794 | lp->mii.force_media = 0; | ||
795 | 948 | ||
796 | return 0; | 949 | return 0; |
797 | 950 | ||
798 | err_out_unmap: | 951 | err_out_unregister: |
799 | iounmap(ioaddr); | 952 | unregister_netdev(dev); |
800 | err_out_free_res: | ||
801 | pci_release_regions (pdev); | ||
802 | err_out: | 953 | err_out: |
803 | free_netdev (dev); | 954 | free_netdev(dev); |
804 | return rc; | 955 | return rc; |
805 | } | 956 | } |
806 | 957 | ||
807 | 958 | ||
808 | static void __devexit tc35815_remove_one (struct pci_dev *pdev) | 959 | static void __devexit tc35815_remove_one(struct pci_dev *pdev) |
809 | { | 960 | { |
810 | struct net_device *dev = pci_get_drvdata (pdev); | 961 | struct net_device *dev = pci_get_drvdata(pdev); |
811 | unsigned long mmio_addr; | 962 | struct tc35815_local *lp = netdev_priv(dev); |
812 | |||
813 | mmio_addr = dev->base_addr; | ||
814 | |||
815 | unregister_netdev (dev); | ||
816 | |||
817 | if (mmio_addr) { | ||
818 | iounmap ((void __iomem *)mmio_addr); | ||
819 | pci_release_regions (pdev); | ||
820 | } | ||
821 | |||
822 | free_netdev (dev); | ||
823 | 963 | ||
824 | pci_set_drvdata (pdev, NULL); | 964 | phy_disconnect(lp->phy_dev); |
965 | mdiobus_unregister(&lp->mii_bus); | ||
966 | kfree(lp->mii_bus.irq); | ||
967 | unregister_netdev(dev); | ||
968 | free_netdev(dev); | ||
969 | pci_set_drvdata(pdev, NULL); | ||
825 | } | 970 | } |
826 | 971 | ||
827 | static int | 972 | static int |
828 | tc35815_init_queues(struct net_device *dev) | 973 | tc35815_init_queues(struct net_device *dev) |
829 | { | 974 | { |
830 | struct tc35815_local *lp = dev->priv; | 975 | struct tc35815_local *lp = netdev_priv(dev); |
831 | int i; | 976 | int i; |
832 | unsigned long fd_addr; | 977 | unsigned long fd_addr; |
833 | 978 | ||
@@ -838,11 +983,17 @@ tc35815_init_queues(struct net_device *dev) | |||
838 | sizeof(struct TxFD) * TX_FD_NUM > | 983 | sizeof(struct TxFD) * TX_FD_NUM > |
839 | PAGE_SIZE * FD_PAGE_NUM); | 984 | PAGE_SIZE * FD_PAGE_NUM); |
840 | 985 | ||
841 | if ((lp->fd_buf = pci_alloc_consistent(lp->pci_dev, PAGE_SIZE * FD_PAGE_NUM, &lp->fd_buf_dma)) == 0) | 986 | lp->fd_buf = pci_alloc_consistent(lp->pci_dev, |
987 | PAGE_SIZE * FD_PAGE_NUM, | ||
988 | &lp->fd_buf_dma); | ||
989 | if (!lp->fd_buf) | ||
842 | return -ENOMEM; | 990 | return -ENOMEM; |
843 | for (i = 0; i < RX_BUF_NUM; i++) { | 991 | for (i = 0; i < RX_BUF_NUM; i++) { |
844 | #ifdef TC35815_USE_PACKEDBUFFER | 992 | #ifdef TC35815_USE_PACKEDBUFFER |
845 | if ((lp->data_buf[i] = alloc_rxbuf_page(lp->pci_dev, &lp->data_buf_dma[i])) == NULL) { | 993 | lp->data_buf[i] = |
994 | alloc_rxbuf_page(lp->pci_dev, | ||
995 | &lp->data_buf_dma[i]); | ||
996 | if (!lp->data_buf[i]) { | ||
846 | while (--i >= 0) { | 997 | while (--i >= 0) { |
847 | free_rxbuf_page(lp->pci_dev, | 998 | free_rxbuf_page(lp->pci_dev, |
848 | lp->data_buf[i], | 999 | lp->data_buf[i], |
@@ -885,18 +1036,17 @@ tc35815_init_queues(struct net_device *dev) | |||
885 | #endif | 1036 | #endif |
886 | printk("\n"); | 1037 | printk("\n"); |
887 | } else { | 1038 | } else { |
888 | for (i = 0; i < FD_PAGE_NUM; i++) { | 1039 | for (i = 0; i < FD_PAGE_NUM; i++) |
889 | clear_page((void *)((unsigned long)lp->fd_buf + i * PAGE_SIZE)); | 1040 | clear_page((void *)((unsigned long)lp->fd_buf + |
890 | } | 1041 | i * PAGE_SIZE)); |
891 | } | 1042 | } |
892 | fd_addr = (unsigned long)lp->fd_buf; | 1043 | fd_addr = (unsigned long)lp->fd_buf; |
893 | 1044 | ||
894 | /* Free Descriptors (for Receive) */ | 1045 | /* Free Descriptors (for Receive) */ |
895 | lp->rfd_base = (struct RxFD *)fd_addr; | 1046 | lp->rfd_base = (struct RxFD *)fd_addr; |
896 | fd_addr += sizeof(struct RxFD) * RX_FD_NUM; | 1047 | fd_addr += sizeof(struct RxFD) * RX_FD_NUM; |
897 | for (i = 0; i < RX_FD_NUM; i++) { | 1048 | for (i = 0; i < RX_FD_NUM; i++) |
898 | lp->rfd_base[i].fd.FDCtl = cpu_to_le32(FD_CownsFD); | 1049 | lp->rfd_base[i].fd.FDCtl = cpu_to_le32(FD_CownsFD); |
899 | } | ||
900 | lp->rfd_cur = lp->rfd_base; | 1050 | lp->rfd_cur = lp->rfd_base; |
901 | lp->rfd_limit = (struct RxFD *)fd_addr - (RX_FD_RESERVE + 1); | 1051 | lp->rfd_limit = (struct RxFD *)fd_addr - (RX_FD_RESERVE + 1); |
902 | 1052 | ||
@@ -964,7 +1114,7 @@ tc35815_init_queues(struct net_device *dev) | |||
964 | static void | 1114 | static void |
965 | tc35815_clear_queues(struct net_device *dev) | 1115 | tc35815_clear_queues(struct net_device *dev) |
966 | { | 1116 | { |
967 | struct tc35815_local *lp = dev->priv; | 1117 | struct tc35815_local *lp = netdev_priv(dev); |
968 | int i; | 1118 | int i; |
969 | 1119 | ||
970 | for (i = 0; i < TX_FD_NUM; i++) { | 1120 | for (i = 0; i < TX_FD_NUM; i++) { |
@@ -995,7 +1145,7 @@ tc35815_clear_queues(struct net_device *dev) | |||
995 | static void | 1145 | static void |
996 | tc35815_free_queues(struct net_device *dev) | 1146 | tc35815_free_queues(struct net_device *dev) |
997 | { | 1147 | { |
998 | struct tc35815_local *lp = dev->priv; | 1148 | struct tc35815_local *lp = netdev_priv(dev); |
999 | int i; | 1149 | int i; |
1000 | 1150 | ||
1001 | if (lp->tfd_base) { | 1151 | if (lp->tfd_base) { |
@@ -1076,7 +1226,7 @@ dump_rxfd(struct RxFD *fd) | |||
1076 | le32_to_cpu(fd->fd.FDStat), | 1226 | le32_to_cpu(fd->fd.FDStat), |
1077 | le32_to_cpu(fd->fd.FDCtl)); | 1227 | le32_to_cpu(fd->fd.FDCtl)); |
1078 | if (le32_to_cpu(fd->fd.FDCtl) & FD_CownsFD) | 1228 | if (le32_to_cpu(fd->fd.FDCtl) & FD_CownsFD) |
1079 | return 0; | 1229 | return 0; |
1080 | printk("BD: "); | 1230 | printk("BD: "); |
1081 | for (i = 0; i < bd_count; i++) | 1231 | for (i = 0; i < bd_count; i++) |
1082 | printk(" %08x %08x", | 1232 | printk(" %08x %08x", |
@@ -1109,7 +1259,7 @@ dump_frfd(struct FrFD *fd) | |||
1109 | static void | 1259 | static void |
1110 | panic_queues(struct net_device *dev) | 1260 | panic_queues(struct net_device *dev) |
1111 | { | 1261 | { |
1112 | struct tc35815_local *lp = dev->priv; | 1262 | struct tc35815_local *lp = netdev_priv(dev); |
1113 | int i; | 1263 | int i; |
1114 | 1264 | ||
1115 | printk("TxFD base %p, start %u, end %u\n", | 1265 | printk("TxFD base %p, start %u, end %u\n", |
@@ -1128,42 +1278,33 @@ panic_queues(struct net_device *dev) | |||
1128 | } | 1278 | } |
1129 | #endif | 1279 | #endif |
1130 | 1280 | ||
1131 | static void print_eth(char *add) | 1281 | static void print_eth(const u8 *add) |
1132 | { | 1282 | { |
1133 | int i; | 1283 | DECLARE_MAC_BUF(mac); |
1134 | 1284 | ||
1135 | printk("print_eth(%p)\n", add); | 1285 | printk(KERN_DEBUG "print_eth(%p)\n", add); |
1136 | for (i = 0; i < 6; i++) | 1286 | printk(KERN_DEBUG " %s =>", print_mac(mac, add + 6)); |
1137 | printk(" %2.2X", (unsigned char) add[i + 6]); | 1287 | printk(KERN_CONT " %s : %02x%02x\n", |
1138 | printk(" =>"); | 1288 | print_mac(mac, add), add[12], add[13]); |
1139 | for (i = 0; i < 6; i++) | ||
1140 | printk(" %2.2X", (unsigned char) add[i]); | ||
1141 | printk(" : %2.2X%2.2X\n", (unsigned char) add[12], (unsigned char) add[13]); | ||
1142 | } | 1289 | } |
1143 | 1290 | ||
1144 | static int tc35815_tx_full(struct net_device *dev) | 1291 | static int tc35815_tx_full(struct net_device *dev) |
1145 | { | 1292 | { |
1146 | struct tc35815_local *lp = dev->priv; | 1293 | struct tc35815_local *lp = netdev_priv(dev); |
1147 | return ((lp->tfd_start + 1) % TX_FD_NUM == lp->tfd_end); | 1294 | return ((lp->tfd_start + 1) % TX_FD_NUM == lp->tfd_end); |
1148 | } | 1295 | } |
1149 | 1296 | ||
1150 | static void tc35815_restart(struct net_device *dev) | 1297 | static void tc35815_restart(struct net_device *dev) |
1151 | { | 1298 | { |
1152 | struct tc35815_local *lp = dev->priv; | 1299 | struct tc35815_local *lp = netdev_priv(dev); |
1153 | int pid = lp->phy_addr; | 1300 | |
1154 | int do_phy_reset = 1; | 1301 | if (lp->phy_dev) { |
1155 | del_timer(&lp->timer); /* Kill if running */ | ||
1156 | |||
1157 | if (lp->mii_id[0] == 0x0016 && (lp->mii_id[1] & 0xfc00) == 0xf800) { | ||
1158 | /* Resetting PHY cause problem on some chip... (SEEQ 80221) */ | ||
1159 | do_phy_reset = 0; | ||
1160 | } | ||
1161 | if (do_phy_reset) { | ||
1162 | int timeout; | 1302 | int timeout; |
1163 | tc_mdio_write(dev, pid, MII_BMCR, BMCR_RESET); | 1303 | |
1304 | phy_write(lp->phy_dev, MII_BMCR, BMCR_RESET); | ||
1164 | timeout = 100; | 1305 | timeout = 100; |
1165 | while (--timeout) { | 1306 | while (--timeout) { |
1166 | if (!(tc_mdio_read(dev, pid, MII_BMCR) & BMCR_RESET)) | 1307 | if (!(phy_read(lp->phy_dev, MII_BMCR) & BMCR_RESET)) |
1167 | break; | 1308 | break; |
1168 | udelay(1); | 1309 | udelay(1); |
1169 | } | 1310 | } |
@@ -1171,16 +1312,40 @@ static void tc35815_restart(struct net_device *dev) | |||
1171 | printk(KERN_ERR "%s: BMCR reset failed.\n", dev->name); | 1312 | printk(KERN_ERR "%s: BMCR reset failed.\n", dev->name); |
1172 | } | 1313 | } |
1173 | 1314 | ||
1315 | spin_lock_irq(&lp->lock); | ||
1174 | tc35815_chip_reset(dev); | 1316 | tc35815_chip_reset(dev); |
1175 | tc35815_clear_queues(dev); | 1317 | tc35815_clear_queues(dev); |
1176 | tc35815_chip_init(dev); | 1318 | tc35815_chip_init(dev); |
1177 | /* Reconfigure CAM again since tc35815_chip_init() initialize it. */ | 1319 | /* Reconfigure CAM again since tc35815_chip_init() initialize it. */ |
1178 | tc35815_set_multicast_list(dev); | 1320 | tc35815_set_multicast_list(dev); |
1321 | spin_unlock_irq(&lp->lock); | ||
1322 | |||
1323 | netif_wake_queue(dev); | ||
1324 | } | ||
1325 | |||
1326 | static void tc35815_restart_work(struct work_struct *work) | ||
1327 | { | ||
1328 | struct tc35815_local *lp = | ||
1329 | container_of(work, struct tc35815_local, restart_work); | ||
1330 | struct net_device *dev = lp->dev; | ||
1331 | |||
1332 | tc35815_restart(dev); | ||
1333 | } | ||
1334 | |||
1335 | static void tc35815_schedule_restart(struct net_device *dev) | ||
1336 | { | ||
1337 | struct tc35815_local *lp = netdev_priv(dev); | ||
1338 | struct tc35815_regs __iomem *tr = | ||
1339 | (struct tc35815_regs __iomem *)dev->base_addr; | ||
1340 | |||
1341 | /* disable interrupts */ | ||
1342 | tc_writel(0, &tr->Int_En); | ||
1343 | tc_writel(tc_readl(&tr->DMA_Ctl) | DMA_IntMask, &tr->DMA_Ctl); | ||
1344 | schedule_work(&lp->restart_work); | ||
1179 | } | 1345 | } |
1180 | 1346 | ||
1181 | static void tc35815_tx_timeout(struct net_device *dev) | 1347 | static void tc35815_tx_timeout(struct net_device *dev) |
1182 | { | 1348 | { |
1183 | struct tc35815_local *lp = dev->priv; | ||
1184 | struct tc35815_regs __iomem *tr = | 1349 | struct tc35815_regs __iomem *tr = |
1185 | (struct tc35815_regs __iomem *)dev->base_addr; | 1350 | (struct tc35815_regs __iomem *)dev->base_addr; |
1186 | 1351 | ||
@@ -1188,28 +1353,12 @@ static void tc35815_tx_timeout(struct net_device *dev) | |||
1188 | dev->name, tc_readl(&tr->Tx_Stat)); | 1353 | dev->name, tc_readl(&tr->Tx_Stat)); |
1189 | 1354 | ||
1190 | /* Try to restart the adaptor. */ | 1355 | /* Try to restart the adaptor. */ |
1191 | spin_lock_irq(&lp->lock); | 1356 | tc35815_schedule_restart(dev); |
1192 | tc35815_restart(dev); | 1357 | dev->stats.tx_errors++; |
1193 | spin_unlock_irq(&lp->lock); | ||
1194 | |||
1195 | lp->stats.tx_errors++; | ||
1196 | |||
1197 | /* If we have space available to accept new transmit | ||
1198 | * requests, wake up the queueing layer. This would | ||
1199 | * be the case if the chipset_init() call above just | ||
1200 | * flushes out the tx queue and empties it. | ||
1201 | * | ||
1202 | * If instead, the tx queue is retained then the | ||
1203 | * netif_wake_queue() call should be placed in the | ||
1204 | * TX completion interrupt handler of the driver instead | ||
1205 | * of here. | ||
1206 | */ | ||
1207 | if (!tc35815_tx_full(dev)) | ||
1208 | netif_wake_queue(dev); | ||
1209 | } | 1358 | } |
1210 | 1359 | ||
1211 | /* | 1360 | /* |
1212 | * Open/initialize the board. This is called (in the current kernel) | 1361 | * Open/initialize the controller. This is called (in the current kernel) |
1213 | * sometime after booting when the 'ifconfig' program is run. | 1362 | * sometime after booting when the 'ifconfig' program is run. |
1214 | * | 1363 | * |
1215 | * This routine should set everything up anew at each open, even | 1364 | * This routine should set everything up anew at each open, even |
@@ -1219,17 +1368,16 @@ static void tc35815_tx_timeout(struct net_device *dev) | |||
1219 | static int | 1368 | static int |
1220 | tc35815_open(struct net_device *dev) | 1369 | tc35815_open(struct net_device *dev) |
1221 | { | 1370 | { |
1222 | struct tc35815_local *lp = dev->priv; | 1371 | struct tc35815_local *lp = netdev_priv(dev); |
1223 | 1372 | ||
1224 | /* | 1373 | /* |
1225 | * This is used if the interrupt line can turned off (shared). | 1374 | * This is used if the interrupt line can turned off (shared). |
1226 | * See 3c503.c for an example of selecting the IRQ at config-time. | 1375 | * See 3c503.c for an example of selecting the IRQ at config-time. |
1227 | */ | 1376 | */ |
1228 | if (request_irq(dev->irq, &tc35815_interrupt, IRQF_SHARED, dev->name, dev)) { | 1377 | if (request_irq(dev->irq, &tc35815_interrupt, IRQF_SHARED, |
1378 | dev->name, dev)) | ||
1229 | return -EAGAIN; | 1379 | return -EAGAIN; |
1230 | } | ||
1231 | 1380 | ||
1232 | del_timer(&lp->timer); /* Kill if running */ | ||
1233 | tc35815_chip_reset(dev); | 1381 | tc35815_chip_reset(dev); |
1234 | 1382 | ||
1235 | if (tc35815_init_queues(dev) != 0) { | 1383 | if (tc35815_init_queues(dev) != 0) { |
@@ -1246,6 +1394,9 @@ tc35815_open(struct net_device *dev) | |||
1246 | tc35815_chip_init(dev); | 1394 | tc35815_chip_init(dev); |
1247 | spin_unlock_irq(&lp->lock); | 1395 | spin_unlock_irq(&lp->lock); |
1248 | 1396 | ||
1397 | /* schedule a link state check */ | ||
1398 | phy_start(lp->phy_dev); | ||
1399 | |||
1249 | /* We are now ready to accept transmit requeusts from | 1400 | /* We are now ready to accept transmit requeusts from |
1250 | * the queueing layer of the networking. | 1401 | * the queueing layer of the networking. |
1251 | */ | 1402 | */ |
@@ -1261,7 +1412,7 @@ tc35815_open(struct net_device *dev) | |||
1261 | */ | 1412 | */ |
1262 | static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev) | 1413 | static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev) |
1263 | { | 1414 | { |
1264 | struct tc35815_local *lp = dev->priv; | 1415 | struct tc35815_local *lp = netdev_priv(dev); |
1265 | struct TxFD *txfd; | 1416 | struct TxFD *txfd; |
1266 | unsigned long flags; | 1417 | unsigned long flags; |
1267 | 1418 | ||
@@ -1366,7 +1517,7 @@ static void tc35815_fatal_error_interrupt(struct net_device *dev, u32 status) | |||
1366 | panic("%s: Too many fatal errors.", dev->name); | 1517 | panic("%s: Too many fatal errors.", dev->name); |
1367 | printk(KERN_WARNING "%s: Resetting ...\n", dev->name); | 1518 | printk(KERN_WARNING "%s: Resetting ...\n", dev->name); |
1368 | /* Try to restart the adaptor. */ | 1519 | /* Try to restart the adaptor. */ |
1369 | tc35815_restart(dev); | 1520 | tc35815_schedule_restart(dev); |
1370 | } | 1521 | } |
1371 | 1522 | ||
1372 | #ifdef TC35815_NAPI | 1523 | #ifdef TC35815_NAPI |
@@ -1375,7 +1526,7 @@ static int tc35815_do_interrupt(struct net_device *dev, u32 status, int limit) | |||
1375 | static int tc35815_do_interrupt(struct net_device *dev, u32 status) | 1526 | static int tc35815_do_interrupt(struct net_device *dev, u32 status) |
1376 | #endif | 1527 | #endif |
1377 | { | 1528 | { |
1378 | struct tc35815_local *lp = dev->priv; | 1529 | struct tc35815_local *lp = netdev_priv(dev); |
1379 | struct tc35815_regs __iomem *tr = | 1530 | struct tc35815_regs __iomem *tr = |
1380 | (struct tc35815_regs __iomem *)dev->base_addr; | 1531 | (struct tc35815_regs __iomem *)dev->base_addr; |
1381 | int ret = -1; | 1532 | int ret = -1; |
@@ -1392,7 +1543,7 @@ static int tc35815_do_interrupt(struct net_device *dev, u32 status) | |||
1392 | printk(KERN_WARNING | 1543 | printk(KERN_WARNING |
1393 | "%s: Free Descriptor Area Exhausted (%#x).\n", | 1544 | "%s: Free Descriptor Area Exhausted (%#x).\n", |
1394 | dev->name, status); | 1545 | dev->name, status); |
1395 | lp->stats.rx_dropped++; | 1546 | dev->stats.rx_dropped++; |
1396 | ret = 0; | 1547 | ret = 0; |
1397 | } | 1548 | } |
1398 | if (status & Int_IntBLEx) { | 1549 | if (status & Int_IntBLEx) { |
@@ -1401,14 +1552,14 @@ static int tc35815_do_interrupt(struct net_device *dev, u32 status) | |||
1401 | printk(KERN_WARNING | 1552 | printk(KERN_WARNING |
1402 | "%s: Buffer List Exhausted (%#x).\n", | 1553 | "%s: Buffer List Exhausted (%#x).\n", |
1403 | dev->name, status); | 1554 | dev->name, status); |
1404 | lp->stats.rx_dropped++; | 1555 | dev->stats.rx_dropped++; |
1405 | ret = 0; | 1556 | ret = 0; |
1406 | } | 1557 | } |
1407 | if (status & Int_IntExBD) { | 1558 | if (status & Int_IntExBD) { |
1408 | printk(KERN_WARNING | 1559 | printk(KERN_WARNING |
1409 | "%s: Excessive Buffer Descriptiors (%#x).\n", | 1560 | "%s: Excessive Buffer Descriptiors (%#x).\n", |
1410 | dev->name, status); | 1561 | dev->name, status); |
1411 | lp->stats.rx_length_errors++; | 1562 | dev->stats.rx_length_errors++; |
1412 | ret = 0; | 1563 | ret = 0; |
1413 | } | 1564 | } |
1414 | 1565 | ||
@@ -1492,7 +1643,7 @@ static void | |||
1492 | tc35815_rx(struct net_device *dev) | 1643 | tc35815_rx(struct net_device *dev) |
1493 | #endif | 1644 | #endif |
1494 | { | 1645 | { |
1495 | struct tc35815_local *lp = dev->priv; | 1646 | struct tc35815_local *lp = netdev_priv(dev); |
1496 | unsigned int fdctl; | 1647 | unsigned int fdctl; |
1497 | int i; | 1648 | int i; |
1498 | int buf_free_count = 0; | 1649 | int buf_free_count = 0; |
@@ -1532,7 +1683,7 @@ tc35815_rx(struct net_device *dev) | |||
1532 | if (skb == NULL) { | 1683 | if (skb == NULL) { |
1533 | printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", | 1684 | printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", |
1534 | dev->name); | 1685 | dev->name); |
1535 | lp->stats.rx_dropped++; | 1686 | dev->stats.rx_dropped++; |
1536 | break; | 1687 | break; |
1537 | } | 1688 | } |
1538 | skb_reserve(skb, 2); /* 16 bit alignment */ | 1689 | skb_reserve(skb, 2); /* 16 bit alignment */ |
@@ -1602,10 +1753,10 @@ tc35815_rx(struct net_device *dev) | |||
1602 | netif_rx(skb); | 1753 | netif_rx(skb); |
1603 | #endif | 1754 | #endif |
1604 | dev->last_rx = jiffies; | 1755 | dev->last_rx = jiffies; |
1605 | lp->stats.rx_packets++; | 1756 | dev->stats.rx_packets++; |
1606 | lp->stats.rx_bytes += pkt_len; | 1757 | dev->stats.rx_bytes += pkt_len; |
1607 | } else { | 1758 | } else { |
1608 | lp->stats.rx_errors++; | 1759 | dev->stats.rx_errors++; |
1609 | printk(KERN_DEBUG "%s: Rx error (status %x)\n", | 1760 | printk(KERN_DEBUG "%s: Rx error (status %x)\n", |
1610 | dev->name, status & Rx_Stat_Mask); | 1761 | dev->name, status & Rx_Stat_Mask); |
1611 | /* WORKAROUND: LongErr and CRCErr means Overflow. */ | 1762 | /* WORKAROUND: LongErr and CRCErr means Overflow. */ |
@@ -1613,10 +1764,14 @@ tc35815_rx(struct net_device *dev) | |||
1613 | status &= ~(Rx_LongErr|Rx_CRCErr); | 1764 | status &= ~(Rx_LongErr|Rx_CRCErr); |
1614 | status |= Rx_Over; | 1765 | status |= Rx_Over; |
1615 | } | 1766 | } |
1616 | if (status & Rx_LongErr) lp->stats.rx_length_errors++; | 1767 | if (status & Rx_LongErr) |
1617 | if (status & Rx_Over) lp->stats.rx_fifo_errors++; | 1768 | dev->stats.rx_length_errors++; |
1618 | if (status & Rx_CRCErr) lp->stats.rx_crc_errors++; | 1769 | if (status & Rx_Over) |
1619 | if (status & Rx_Align) lp->stats.rx_frame_errors++; | 1770 | dev->stats.rx_fifo_errors++; |
1771 | if (status & Rx_CRCErr) | ||
1772 | dev->stats.rx_crc_errors++; | ||
1773 | if (status & Rx_Align) | ||
1774 | dev->stats.rx_frame_errors++; | ||
1620 | } | 1775 | } |
1621 | 1776 | ||
1622 | if (bd_count > 0) { | 1777 | if (bd_count > 0) { |
@@ -1772,40 +1927,39 @@ static int tc35815_poll(struct napi_struct *napi, int budget) | |||
1772 | static void | 1927 | static void |
1773 | tc35815_check_tx_stat(struct net_device *dev, int status) | 1928 | tc35815_check_tx_stat(struct net_device *dev, int status) |
1774 | { | 1929 | { |
1775 | struct tc35815_local *lp = dev->priv; | 1930 | struct tc35815_local *lp = netdev_priv(dev); |
1776 | const char *msg = NULL; | 1931 | const char *msg = NULL; |
1777 | 1932 | ||
1778 | /* count collisions */ | 1933 | /* count collisions */ |
1779 | if (status & Tx_ExColl) | 1934 | if (status & Tx_ExColl) |
1780 | lp->stats.collisions += 16; | 1935 | dev->stats.collisions += 16; |
1781 | if (status & Tx_TxColl_MASK) | 1936 | if (status & Tx_TxColl_MASK) |
1782 | lp->stats.collisions += status & Tx_TxColl_MASK; | 1937 | dev->stats.collisions += status & Tx_TxColl_MASK; |
1783 | 1938 | ||
1784 | #ifndef NO_CHECK_CARRIER | 1939 | #ifndef NO_CHECK_CARRIER |
1785 | /* TX4939 does not have NCarr */ | 1940 | /* TX4939 does not have NCarr */ |
1786 | if (lp->boardtype == TC35815_TX4939) | 1941 | if (lp->chiptype == TC35815_TX4939) |
1787 | status &= ~Tx_NCarr; | 1942 | status &= ~Tx_NCarr; |
1788 | #ifdef WORKAROUND_LOSTCAR | 1943 | #ifdef WORKAROUND_LOSTCAR |
1789 | /* WORKAROUND: ignore LostCrS in full duplex operation */ | 1944 | /* WORKAROUND: ignore LostCrS in full duplex operation */ |
1790 | if ((lp->timer_state != asleep && lp->timer_state != lcheck) | 1945 | if (!lp->link || lp->duplex == DUPLEX_FULL) |
1791 | || lp->fullduplex) | ||
1792 | status &= ~Tx_NCarr; | 1946 | status &= ~Tx_NCarr; |
1793 | #endif | 1947 | #endif |
1794 | #endif | 1948 | #endif |
1795 | 1949 | ||
1796 | if (!(status & TX_STA_ERR)) { | 1950 | if (!(status & TX_STA_ERR)) { |
1797 | /* no error. */ | 1951 | /* no error. */ |
1798 | lp->stats.tx_packets++; | 1952 | dev->stats.tx_packets++; |
1799 | return; | 1953 | return; |
1800 | } | 1954 | } |
1801 | 1955 | ||
1802 | lp->stats.tx_errors++; | 1956 | dev->stats.tx_errors++; |
1803 | if (status & Tx_ExColl) { | 1957 | if (status & Tx_ExColl) { |
1804 | lp->stats.tx_aborted_errors++; | 1958 | dev->stats.tx_aborted_errors++; |
1805 | msg = "Excessive Collision."; | 1959 | msg = "Excessive Collision."; |
1806 | } | 1960 | } |
1807 | if (status & Tx_Under) { | 1961 | if (status & Tx_Under) { |
1808 | lp->stats.tx_fifo_errors++; | 1962 | dev->stats.tx_fifo_errors++; |
1809 | msg = "Tx FIFO Underrun."; | 1963 | msg = "Tx FIFO Underrun."; |
1810 | if (lp->lstats.tx_underrun < TX_THRESHOLD_KEEP_LIMIT) { | 1964 | if (lp->lstats.tx_underrun < TX_THRESHOLD_KEEP_LIMIT) { |
1811 | lp->lstats.tx_underrun++; | 1965 | lp->lstats.tx_underrun++; |
@@ -1818,25 +1972,25 @@ tc35815_check_tx_stat(struct net_device *dev, int status) | |||
1818 | } | 1972 | } |
1819 | } | 1973 | } |
1820 | if (status & Tx_Defer) { | 1974 | if (status & Tx_Defer) { |
1821 | lp->stats.tx_fifo_errors++; | 1975 | dev->stats.tx_fifo_errors++; |
1822 | msg = "Excessive Deferral."; | 1976 | msg = "Excessive Deferral."; |
1823 | } | 1977 | } |
1824 | #ifndef NO_CHECK_CARRIER | 1978 | #ifndef NO_CHECK_CARRIER |
1825 | if (status & Tx_NCarr) { | 1979 | if (status & Tx_NCarr) { |
1826 | lp->stats.tx_carrier_errors++; | 1980 | dev->stats.tx_carrier_errors++; |
1827 | msg = "Lost Carrier Sense."; | 1981 | msg = "Lost Carrier Sense."; |
1828 | } | 1982 | } |
1829 | #endif | 1983 | #endif |
1830 | if (status & Tx_LateColl) { | 1984 | if (status & Tx_LateColl) { |
1831 | lp->stats.tx_aborted_errors++; | 1985 | dev->stats.tx_aborted_errors++; |
1832 | msg = "Late Collision."; | 1986 | msg = "Late Collision."; |
1833 | } | 1987 | } |
1834 | if (status & Tx_TxPar) { | 1988 | if (status & Tx_TxPar) { |
1835 | lp->stats.tx_fifo_errors++; | 1989 | dev->stats.tx_fifo_errors++; |
1836 | msg = "Transmit Parity Error."; | 1990 | msg = "Transmit Parity Error."; |
1837 | } | 1991 | } |
1838 | if (status & Tx_SQErr) { | 1992 | if (status & Tx_SQErr) { |
1839 | lp->stats.tx_heartbeat_errors++; | 1993 | dev->stats.tx_heartbeat_errors++; |
1840 | msg = "Signal Quality Error."; | 1994 | msg = "Signal Quality Error."; |
1841 | } | 1995 | } |
1842 | if (msg && netif_msg_tx_err(lp)) | 1996 | if (msg && netif_msg_tx_err(lp)) |
@@ -1849,7 +2003,7 @@ tc35815_check_tx_stat(struct net_device *dev, int status) | |||
1849 | static void | 2003 | static void |
1850 | tc35815_txdone(struct net_device *dev) | 2004 | tc35815_txdone(struct net_device *dev) |
1851 | { | 2005 | { |
1852 | struct tc35815_local *lp = dev->priv; | 2006 | struct tc35815_local *lp = netdev_priv(dev); |
1853 | struct TxFD *txfd; | 2007 | struct TxFD *txfd; |
1854 | unsigned int fdctl; | 2008 | unsigned int fdctl; |
1855 | 2009 | ||
@@ -1878,7 +2032,7 @@ tc35815_txdone(struct net_device *dev) | |||
1878 | BUG_ON(lp->tx_skbs[lp->tfd_end].skb != skb); | 2032 | BUG_ON(lp->tx_skbs[lp->tfd_end].skb != skb); |
1879 | #endif | 2033 | #endif |
1880 | if (skb) { | 2034 | if (skb) { |
1881 | lp->stats.tx_bytes += skb->len; | 2035 | dev->stats.tx_bytes += skb->len; |
1882 | pci_unmap_single(lp->pci_dev, lp->tx_skbs[lp->tfd_end].skb_dma, skb->len, PCI_DMA_TODEVICE); | 2036 | pci_unmap_single(lp->pci_dev, lp->tx_skbs[lp->tfd_end].skb_dma, skb->len, PCI_DMA_TODEVICE); |
1883 | lp->tx_skbs[lp->tfd_end].skb = NULL; | 2037 | lp->tx_skbs[lp->tfd_end].skb = NULL; |
1884 | lp->tx_skbs[lp->tfd_end].skb_dma = 0; | 2038 | lp->tx_skbs[lp->tfd_end].skb_dma = 0; |
@@ -1904,7 +2058,7 @@ tc35815_txdone(struct net_device *dev) | |||
1904 | struct tc35815_regs __iomem *tr = | 2058 | struct tc35815_regs __iomem *tr = |
1905 | (struct tc35815_regs __iomem *)dev->base_addr; | 2059 | (struct tc35815_regs __iomem *)dev->base_addr; |
1906 | int head = (lp->tfd_start + TX_FD_NUM - 1) % TX_FD_NUM; | 2060 | int head = (lp->tfd_start + TX_FD_NUM - 1) % TX_FD_NUM; |
1907 | struct TxFD* txhead = &lp->tfd_base[head]; | 2061 | struct TxFD *txhead = &lp->tfd_base[head]; |
1908 | int qlen = (lp->tfd_start + TX_FD_NUM | 2062 | int qlen = (lp->tfd_start + TX_FD_NUM |
1909 | - lp->tfd_end) % TX_FD_NUM; | 2063 | - lp->tfd_end) % TX_FD_NUM; |
1910 | 2064 | ||
@@ -1939,7 +2093,7 @@ tc35815_txdone(struct net_device *dev) | |||
1939 | * condition, and space has now been made available, | 2093 | * condition, and space has now been made available, |
1940 | * wake up the queue. | 2094 | * wake up the queue. |
1941 | */ | 2095 | */ |
1942 | if (netif_queue_stopped(dev) && ! tc35815_tx_full(dev)) | 2096 | if (netif_queue_stopped(dev) && !tc35815_tx_full(dev)) |
1943 | netif_wake_queue(dev); | 2097 | netif_wake_queue(dev); |
1944 | } | 2098 | } |
1945 | 2099 | ||
@@ -1947,16 +2101,17 @@ tc35815_txdone(struct net_device *dev) | |||
1947 | static int | 2101 | static int |
1948 | tc35815_close(struct net_device *dev) | 2102 | tc35815_close(struct net_device *dev) |
1949 | { | 2103 | { |
1950 | struct tc35815_local *lp = dev->priv; | 2104 | struct tc35815_local *lp = netdev_priv(dev); |
1951 | 2105 | ||
1952 | netif_stop_queue(dev); | 2106 | netif_stop_queue(dev); |
1953 | #ifdef TC35815_NAPI | 2107 | #ifdef TC35815_NAPI |
1954 | napi_disable(&lp->napi); | 2108 | napi_disable(&lp->napi); |
1955 | #endif | 2109 | #endif |
2110 | if (lp->phy_dev) | ||
2111 | phy_stop(lp->phy_dev); | ||
2112 | cancel_work_sync(&lp->restart_work); | ||
1956 | 2113 | ||
1957 | /* Flush the Tx and disable Rx here. */ | 2114 | /* Flush the Tx and disable Rx here. */ |
1958 | |||
1959 | del_timer(&lp->timer); /* Kill if running */ | ||
1960 | tc35815_chip_reset(dev); | 2115 | tc35815_chip_reset(dev); |
1961 | free_irq(dev->irq, dev); | 2116 | free_irq(dev->irq, dev); |
1962 | 2117 | ||
@@ -1972,34 +2127,30 @@ tc35815_close(struct net_device *dev) | |||
1972 | */ | 2127 | */ |
1973 | static struct net_device_stats *tc35815_get_stats(struct net_device *dev) | 2128 | static struct net_device_stats *tc35815_get_stats(struct net_device *dev) |
1974 | { | 2129 | { |
1975 | struct tc35815_local *lp = dev->priv; | ||
1976 | struct tc35815_regs __iomem *tr = | 2130 | struct tc35815_regs __iomem *tr = |
1977 | (struct tc35815_regs __iomem *)dev->base_addr; | 2131 | (struct tc35815_regs __iomem *)dev->base_addr; |
1978 | if (netif_running(dev)) { | 2132 | if (netif_running(dev)) |
1979 | /* Update the statistics from the device registers. */ | 2133 | /* Update the statistics from the device registers. */ |
1980 | lp->stats.rx_missed_errors = tc_readl(&tr->Miss_Cnt); | 2134 | dev->stats.rx_missed_errors = tc_readl(&tr->Miss_Cnt); |
1981 | } | ||
1982 | 2135 | ||
1983 | return &lp->stats; | 2136 | return &dev->stats; |
1984 | } | 2137 | } |
1985 | 2138 | ||
1986 | static void tc35815_set_cam_entry(struct net_device *dev, int index, unsigned char *addr) | 2139 | static void tc35815_set_cam_entry(struct net_device *dev, int index, unsigned char *addr) |
1987 | { | 2140 | { |
1988 | struct tc35815_local *lp = dev->priv; | 2141 | struct tc35815_local *lp = netdev_priv(dev); |
1989 | struct tc35815_regs __iomem *tr = | 2142 | struct tc35815_regs __iomem *tr = |
1990 | (struct tc35815_regs __iomem *)dev->base_addr; | 2143 | (struct tc35815_regs __iomem *)dev->base_addr; |
1991 | int cam_index = index * 6; | 2144 | int cam_index = index * 6; |
1992 | u32 cam_data; | 2145 | u32 cam_data; |
1993 | u32 saved_addr; | 2146 | u32 saved_addr; |
2147 | DECLARE_MAC_BUF(mac); | ||
2148 | |||
1994 | saved_addr = tc_readl(&tr->CAM_Adr); | 2149 | saved_addr = tc_readl(&tr->CAM_Adr); |
1995 | 2150 | ||
1996 | if (netif_msg_hw(lp)) { | 2151 | if (netif_msg_hw(lp)) |
1997 | int i; | 2152 | printk(KERN_DEBUG "%s: CAM %d: %s\n", |
1998 | printk(KERN_DEBUG "%s: CAM %d:", dev->name, index); | 2153 | dev->name, index, print_mac(mac, addr)); |
1999 | for (i = 0; i < 6; i++) | ||
2000 | printk(" %02x", addr[i]); | ||
2001 | printk("\n"); | ||
2002 | } | ||
2003 | if (index & 1) { | 2154 | if (index & 1) { |
2004 | /* read modify write */ | 2155 | /* read modify write */ |
2005 | tc_writel(cam_index - 2, &tr->CAM_Adr); | 2156 | tc_writel(cam_index - 2, &tr->CAM_Adr); |
@@ -2039,28 +2190,24 @@ tc35815_set_multicast_list(struct net_device *dev) | |||
2039 | struct tc35815_regs __iomem *tr = | 2190 | struct tc35815_regs __iomem *tr = |
2040 | (struct tc35815_regs __iomem *)dev->base_addr; | 2191 | (struct tc35815_regs __iomem *)dev->base_addr; |
2041 | 2192 | ||
2042 | if (dev->flags&IFF_PROMISC) | 2193 | if (dev->flags & IFF_PROMISC) { |
2043 | { | ||
2044 | #ifdef WORKAROUND_100HALF_PROMISC | 2194 | #ifdef WORKAROUND_100HALF_PROMISC |
2045 | /* With some (all?) 100MHalf HUB, controller will hang | 2195 | /* With some (all?) 100MHalf HUB, controller will hang |
2046 | * if we enabled promiscuous mode before linkup... */ | 2196 | * if we enabled promiscuous mode before linkup... */ |
2047 | struct tc35815_local *lp = dev->priv; | 2197 | struct tc35815_local *lp = netdev_priv(dev); |
2048 | int pid = lp->phy_addr; | 2198 | |
2049 | if (!(tc_mdio_read(dev, pid, MII_BMSR) & BMSR_LSTATUS)) | 2199 | if (!lp->link) |
2050 | return; | 2200 | return; |
2051 | #endif | 2201 | #endif |
2052 | /* Enable promiscuous mode */ | 2202 | /* Enable promiscuous mode */ |
2053 | tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc | CAM_StationAcc, &tr->CAM_Ctl); | 2203 | tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc | CAM_StationAcc, &tr->CAM_Ctl); |
2054 | } | 2204 | } else if ((dev->flags & IFF_ALLMULTI) || |
2055 | else if((dev->flags&IFF_ALLMULTI) || dev->mc_count > CAM_ENTRY_MAX - 3) | 2205 | dev->mc_count > CAM_ENTRY_MAX - 3) { |
2056 | { | ||
2057 | /* CAM 0, 1, 20 are reserved. */ | 2206 | /* CAM 0, 1, 20 are reserved. */ |
2058 | /* Disable promiscuous mode, use normal mode. */ | 2207 | /* Disable promiscuous mode, use normal mode. */ |
2059 | tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc, &tr->CAM_Ctl); | 2208 | tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc, &tr->CAM_Ctl); |
2060 | } | 2209 | } else if (dev->mc_count) { |
2061 | else if(dev->mc_count) | 2210 | struct dev_mc_list *cur_addr = dev->mc_list; |
2062 | { | ||
2063 | struct dev_mc_list* cur_addr = dev->mc_list; | ||
2064 | int i; | 2211 | int i; |
2065 | int ena_bits = CAM_Ena_Bit(CAM_ENTRY_SOURCE); | 2212 | int ena_bits = CAM_Ena_Bit(CAM_ENTRY_SOURCE); |
2066 | 2213 | ||
@@ -2075,8 +2222,7 @@ tc35815_set_multicast_list(struct net_device *dev) | |||
2075 | } | 2222 | } |
2076 | tc_writel(ena_bits, &tr->CAM_Ena); | 2223 | tc_writel(ena_bits, &tr->CAM_Ena); |
2077 | tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl); | 2224 | tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl); |
2078 | } | 2225 | } else { |
2079 | else { | ||
2080 | tc_writel(CAM_Ena_Bit(CAM_ENTRY_SOURCE), &tr->CAM_Ena); | 2226 | tc_writel(CAM_Ena_Bit(CAM_ENTRY_SOURCE), &tr->CAM_Ena); |
2081 | tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl); | 2227 | tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl); |
2082 | } | 2228 | } |
@@ -2084,7 +2230,7 @@ tc35815_set_multicast_list(struct net_device *dev) | |||
2084 | 2230 | ||
2085 | static void tc35815_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | 2231 | static void tc35815_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) |
2086 | { | 2232 | { |
2087 | struct tc35815_local *lp = dev->priv; | 2233 | struct tc35815_local *lp = netdev_priv(dev); |
2088 | strcpy(info->driver, MODNAME); | 2234 | strcpy(info->driver, MODNAME); |
2089 | strcpy(info->version, DRV_VERSION); | 2235 | strcpy(info->version, DRV_VERSION); |
2090 | strcpy(info->bus_info, pci_name(lp->pci_dev)); | 2236 | strcpy(info->bus_info, pci_name(lp->pci_dev)); |
@@ -2092,78 +2238,37 @@ static void tc35815_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo * | |||
2092 | 2238 | ||
2093 | static int tc35815_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | 2239 | static int tc35815_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
2094 | { | 2240 | { |
2095 | struct tc35815_local *lp = dev->priv; | 2241 | struct tc35815_local *lp = netdev_priv(dev); |
2096 | spin_lock_irq(&lp->lock); | ||
2097 | mii_ethtool_gset(&lp->mii, cmd); | ||
2098 | spin_unlock_irq(&lp->lock); | ||
2099 | return 0; | ||
2100 | } | ||
2101 | |||
2102 | static int tc35815_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
2103 | { | ||
2104 | struct tc35815_local *lp = dev->priv; | ||
2105 | int rc; | ||
2106 | #if 1 /* use our negotiation method... */ | ||
2107 | /* Verify the settings we care about. */ | ||
2108 | if (cmd->autoneg != AUTONEG_ENABLE && | ||
2109 | cmd->autoneg != AUTONEG_DISABLE) | ||
2110 | return -EINVAL; | ||
2111 | if (cmd->autoneg == AUTONEG_DISABLE && | ||
2112 | ((cmd->speed != SPEED_100 && | ||
2113 | cmd->speed != SPEED_10) || | ||
2114 | (cmd->duplex != DUPLEX_HALF && | ||
2115 | cmd->duplex != DUPLEX_FULL))) | ||
2116 | return -EINVAL; | ||
2117 | 2242 | ||
2118 | /* Ok, do it to it. */ | 2243 | if (!lp->phy_dev) |
2119 | spin_lock_irq(&lp->lock); | 2244 | return -ENODEV; |
2120 | del_timer(&lp->timer); | 2245 | return phy_ethtool_gset(lp->phy_dev, cmd); |
2121 | tc35815_start_auto_negotiation(dev, cmd); | ||
2122 | spin_unlock_irq(&lp->lock); | ||
2123 | rc = 0; | ||
2124 | #else | ||
2125 | spin_lock_irq(&lp->lock); | ||
2126 | rc = mii_ethtool_sset(&lp->mii, cmd); | ||
2127 | spin_unlock_irq(&lp->lock); | ||
2128 | #endif | ||
2129 | return rc; | ||
2130 | } | 2246 | } |
2131 | 2247 | ||
2132 | static int tc35815_nway_reset(struct net_device *dev) | 2248 | static int tc35815_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
2133 | { | 2249 | { |
2134 | struct tc35815_local *lp = dev->priv; | 2250 | struct tc35815_local *lp = netdev_priv(dev); |
2135 | int rc; | ||
2136 | spin_lock_irq(&lp->lock); | ||
2137 | rc = mii_nway_restart(&lp->mii); | ||
2138 | spin_unlock_irq(&lp->lock); | ||
2139 | return rc; | ||
2140 | } | ||
2141 | 2251 | ||
2142 | static u32 tc35815_get_link(struct net_device *dev) | 2252 | if (!lp->phy_dev) |
2143 | { | 2253 | return -ENODEV; |
2144 | struct tc35815_local *lp = dev->priv; | 2254 | return phy_ethtool_sset(lp->phy_dev, cmd); |
2145 | int rc; | ||
2146 | spin_lock_irq(&lp->lock); | ||
2147 | rc = mii_link_ok(&lp->mii); | ||
2148 | spin_unlock_irq(&lp->lock); | ||
2149 | return rc; | ||
2150 | } | 2255 | } |
2151 | 2256 | ||
2152 | static u32 tc35815_get_msglevel(struct net_device *dev) | 2257 | static u32 tc35815_get_msglevel(struct net_device *dev) |
2153 | { | 2258 | { |
2154 | struct tc35815_local *lp = dev->priv; | 2259 | struct tc35815_local *lp = netdev_priv(dev); |
2155 | return lp->msg_enable; | 2260 | return lp->msg_enable; |
2156 | } | 2261 | } |
2157 | 2262 | ||
2158 | static void tc35815_set_msglevel(struct net_device *dev, u32 datum) | 2263 | static void tc35815_set_msglevel(struct net_device *dev, u32 datum) |
2159 | { | 2264 | { |
2160 | struct tc35815_local *lp = dev->priv; | 2265 | struct tc35815_local *lp = netdev_priv(dev); |
2161 | lp->msg_enable = datum; | 2266 | lp->msg_enable = datum; |
2162 | } | 2267 | } |
2163 | 2268 | ||
2164 | static int tc35815_get_sset_count(struct net_device *dev, int sset) | 2269 | static int tc35815_get_sset_count(struct net_device *dev, int sset) |
2165 | { | 2270 | { |
2166 | struct tc35815_local *lp = dev->priv; | 2271 | struct tc35815_local *lp = netdev_priv(dev); |
2167 | 2272 | ||
2168 | switch (sset) { | 2273 | switch (sset) { |
2169 | case ETH_SS_STATS: | 2274 | case ETH_SS_STATS: |
@@ -2175,7 +2280,7 @@ static int tc35815_get_sset_count(struct net_device *dev, int sset) | |||
2175 | 2280 | ||
2176 | static void tc35815_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) | 2281 | static void tc35815_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) |
2177 | { | 2282 | { |
2178 | struct tc35815_local *lp = dev->priv; | 2283 | struct tc35815_local *lp = netdev_priv(dev); |
2179 | data[0] = lp->lstats.max_tx_qlen; | 2284 | data[0] = lp->lstats.max_tx_qlen; |
2180 | data[1] = lp->lstats.tx_ints; | 2285 | data[1] = lp->lstats.tx_ints; |
2181 | data[2] = lp->lstats.rx_ints; | 2286 | data[2] = lp->lstats.rx_ints; |
@@ -2200,8 +2305,7 @@ static const struct ethtool_ops tc35815_ethtool_ops = { | |||
2200 | .get_drvinfo = tc35815_get_drvinfo, | 2305 | .get_drvinfo = tc35815_get_drvinfo, |
2201 | .get_settings = tc35815_get_settings, | 2306 | .get_settings = tc35815_get_settings, |
2202 | .set_settings = tc35815_set_settings, | 2307 | .set_settings = tc35815_set_settings, |
2203 | .nway_reset = tc35815_nway_reset, | 2308 | .get_link = ethtool_op_get_link, |
2204 | .get_link = tc35815_get_link, | ||
2205 | .get_msglevel = tc35815_get_msglevel, | 2309 | .get_msglevel = tc35815_get_msglevel, |
2206 | .set_msglevel = tc35815_set_msglevel, | 2310 | .set_msglevel = tc35815_set_msglevel, |
2207 | .get_strings = tc35815_get_strings, | 2311 | .get_strings = tc35815_get_strings, |
@@ -2211,611 +2315,13 @@ static const struct ethtool_ops tc35815_ethtool_ops = { | |||
2211 | 2315 | ||
2212 | static int tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | 2316 | static int tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
2213 | { | 2317 | { |
2214 | struct tc35815_local *lp = dev->priv; | 2318 | struct tc35815_local *lp = netdev_priv(dev); |
2215 | int rc; | ||
2216 | 2319 | ||
2217 | if (!netif_running(dev)) | 2320 | if (!netif_running(dev)) |
2218 | return -EINVAL; | 2321 | return -EINVAL; |
2219 | 2322 | if (!lp->phy_dev) | |
2220 | spin_lock_irq(&lp->lock); | 2323 | return -ENODEV; |
2221 | rc = generic_mii_ioctl(&lp->mii, if_mii(rq), cmd, NULL); | 2324 | return phy_mii_ioctl(lp->phy_dev, if_mii(rq), cmd); |
2222 | spin_unlock_irq(&lp->lock); | ||
2223 | |||
2224 | return rc; | ||
2225 | } | ||
2226 | |||
2227 | static int tc_mdio_read(struct net_device *dev, int phy_id, int location) | ||
2228 | { | ||
2229 | struct tc35815_regs __iomem *tr = | ||
2230 | (struct tc35815_regs __iomem *)dev->base_addr; | ||
2231 | u32 data; | ||
2232 | tc_writel(MD_CA_Busy | (phy_id << 5) | location, &tr->MD_CA); | ||
2233 | while (tc_readl(&tr->MD_CA) & MD_CA_Busy) | ||
2234 | ; | ||
2235 | data = tc_readl(&tr->MD_Data); | ||
2236 | return data & 0xffff; | ||
2237 | } | ||
2238 | |||
2239 | static void tc_mdio_write(struct net_device *dev, int phy_id, int location, | ||
2240 | int val) | ||
2241 | { | ||
2242 | struct tc35815_regs __iomem *tr = | ||
2243 | (struct tc35815_regs __iomem *)dev->base_addr; | ||
2244 | tc_writel(val, &tr->MD_Data); | ||
2245 | tc_writel(MD_CA_Busy | MD_CA_Wr | (phy_id << 5) | location, &tr->MD_CA); | ||
2246 | while (tc_readl(&tr->MD_CA) & MD_CA_Busy) | ||
2247 | ; | ||
2248 | } | ||
2249 | |||
2250 | /* Auto negotiation. The scheme is very simple. We have a timer routine | ||
2251 | * that keeps watching the auto negotiation process as it progresses. | ||
2252 | * The DP83840 is first told to start doing it's thing, we set up the time | ||
2253 | * and place the timer state machine in it's initial state. | ||
2254 | * | ||
2255 | * Here the timer peeks at the DP83840 status registers at each click to see | ||
2256 | * if the auto negotiation has completed, we assume here that the DP83840 PHY | ||
2257 | * will time out at some point and just tell us what (didn't) happen. For | ||
2258 | * complete coverage we only allow so many of the ticks at this level to run, | ||
2259 | * when this has expired we print a warning message and try another strategy. | ||
2260 | * This "other" strategy is to force the interface into various speed/duplex | ||
2261 | * configurations and we stop when we see a link-up condition before the | ||
2262 | * maximum number of "peek" ticks have occurred. | ||
2263 | * | ||
2264 | * Once a valid link status has been detected we configure the BigMAC and | ||
2265 | * the rest of the Happy Meal to speak the most efficient protocol we could | ||
2266 | * get a clean link for. The priority for link configurations, highest first | ||
2267 | * is: | ||
2268 | * 100 Base-T Full Duplex | ||
2269 | * 100 Base-T Half Duplex | ||
2270 | * 10 Base-T Full Duplex | ||
2271 | * 10 Base-T Half Duplex | ||
2272 | * | ||
2273 | * We start a new timer now, after a successful auto negotiation status has | ||
2274 | * been detected. This timer just waits for the link-up bit to get set in | ||
2275 | * the BMCR of the DP83840. When this occurs we print a kernel log message | ||
2276 | * describing the link type in use and the fact that it is up. | ||
2277 | * | ||
2278 | * If a fatal error of some sort is signalled and detected in the interrupt | ||
2279 | * service routine, and the chip is reset, or the link is ifconfig'd down | ||
2280 | * and then back up, this entire process repeats itself all over again. | ||
2281 | */ | ||
2282 | /* Note: Above comments are come from sunhme driver. */ | ||
2283 | |||
2284 | static int tc35815_try_next_permutation(struct net_device *dev) | ||
2285 | { | ||
2286 | struct tc35815_local *lp = dev->priv; | ||
2287 | int pid = lp->phy_addr; | ||
2288 | unsigned short bmcr; | ||
2289 | |||
2290 | bmcr = tc_mdio_read(dev, pid, MII_BMCR); | ||
2291 | |||
2292 | /* Downgrade from full to half duplex. Only possible via ethtool. */ | ||
2293 | if (bmcr & BMCR_FULLDPLX) { | ||
2294 | bmcr &= ~BMCR_FULLDPLX; | ||
2295 | printk(KERN_DEBUG "%s: try next permutation (BMCR %x)\n", dev->name, bmcr); | ||
2296 | tc_mdio_write(dev, pid, MII_BMCR, bmcr); | ||
2297 | return 0; | ||
2298 | } | ||
2299 | |||
2300 | /* Downgrade from 100 to 10. */ | ||
2301 | if (bmcr & BMCR_SPEED100) { | ||
2302 | bmcr &= ~BMCR_SPEED100; | ||
2303 | printk(KERN_DEBUG "%s: try next permutation (BMCR %x)\n", dev->name, bmcr); | ||
2304 | tc_mdio_write(dev, pid, MII_BMCR, bmcr); | ||
2305 | return 0; | ||
2306 | } | ||
2307 | |||
2308 | /* We've tried everything. */ | ||
2309 | return -1; | ||
2310 | } | ||
2311 | |||
2312 | static void | ||
2313 | tc35815_display_link_mode(struct net_device *dev) | ||
2314 | { | ||
2315 | struct tc35815_local *lp = dev->priv; | ||
2316 | int pid = lp->phy_addr; | ||
2317 | unsigned short lpa, bmcr; | ||
2318 | char *speed = "", *duplex = ""; | ||
2319 | |||
2320 | lpa = tc_mdio_read(dev, pid, MII_LPA); | ||
2321 | bmcr = tc_mdio_read(dev, pid, MII_BMCR); | ||
2322 | if (options.speed ? (bmcr & BMCR_SPEED100) : (lpa & (LPA_100HALF | LPA_100FULL))) | ||
2323 | speed = "100Mb/s"; | ||
2324 | else | ||
2325 | speed = "10Mb/s"; | ||
2326 | if (options.duplex ? (bmcr & BMCR_FULLDPLX) : (lpa & (LPA_100FULL | LPA_10FULL))) | ||
2327 | duplex = "Full Duplex"; | ||
2328 | else | ||
2329 | duplex = "Half Duplex"; | ||
2330 | |||
2331 | if (netif_msg_link(lp)) | ||
2332 | printk(KERN_INFO "%s: Link is up at %s, %s.\n", | ||
2333 | dev->name, speed, duplex); | ||
2334 | printk(KERN_DEBUG "%s: MII BMCR %04x BMSR %04x LPA %04x\n", | ||
2335 | dev->name, | ||
2336 | bmcr, tc_mdio_read(dev, pid, MII_BMSR), lpa); | ||
2337 | } | ||
2338 | |||
2339 | static void tc35815_display_forced_link_mode(struct net_device *dev) | ||
2340 | { | ||
2341 | struct tc35815_local *lp = dev->priv; | ||
2342 | int pid = lp->phy_addr; | ||
2343 | unsigned short bmcr; | ||
2344 | char *speed = "", *duplex = ""; | ||
2345 | |||
2346 | bmcr = tc_mdio_read(dev, pid, MII_BMCR); | ||
2347 | if (bmcr & BMCR_SPEED100) | ||
2348 | speed = "100Mb/s"; | ||
2349 | else | ||
2350 | speed = "10Mb/s"; | ||
2351 | if (bmcr & BMCR_FULLDPLX) | ||
2352 | duplex = "Full Duplex.\n"; | ||
2353 | else | ||
2354 | duplex = "Half Duplex.\n"; | ||
2355 | |||
2356 | if (netif_msg_link(lp)) | ||
2357 | printk(KERN_INFO "%s: Link has been forced up at %s, %s", | ||
2358 | dev->name, speed, duplex); | ||
2359 | } | ||
2360 | |||
2361 | static void tc35815_set_link_modes(struct net_device *dev) | ||
2362 | { | ||
2363 | struct tc35815_local *lp = dev->priv; | ||
2364 | struct tc35815_regs __iomem *tr = | ||
2365 | (struct tc35815_regs __iomem *)dev->base_addr; | ||
2366 | int pid = lp->phy_addr; | ||
2367 | unsigned short bmcr, lpa; | ||
2368 | int speed; | ||
2369 | |||
2370 | if (lp->timer_state == arbwait) { | ||
2371 | lpa = tc_mdio_read(dev, pid, MII_LPA); | ||
2372 | bmcr = tc_mdio_read(dev, pid, MII_BMCR); | ||
2373 | printk(KERN_DEBUG "%s: MII BMCR %04x BMSR %04x LPA %04x\n", | ||
2374 | dev->name, | ||
2375 | bmcr, tc_mdio_read(dev, pid, MII_BMSR), lpa); | ||
2376 | if (!(lpa & (LPA_10HALF | LPA_10FULL | | ||
2377 | LPA_100HALF | LPA_100FULL))) { | ||
2378 | /* fall back to 10HALF */ | ||
2379 | printk(KERN_INFO "%s: bad ability %04x - falling back to 10HD.\n", | ||
2380 | dev->name, lpa); | ||
2381 | lpa = LPA_10HALF; | ||
2382 | } | ||
2383 | if (options.duplex ? (bmcr & BMCR_FULLDPLX) : (lpa & (LPA_100FULL | LPA_10FULL))) | ||
2384 | lp->fullduplex = 1; | ||
2385 | else | ||
2386 | lp->fullduplex = 0; | ||
2387 | if (options.speed ? (bmcr & BMCR_SPEED100) : (lpa & (LPA_100HALF | LPA_100FULL))) | ||
2388 | speed = 100; | ||
2389 | else | ||
2390 | speed = 10; | ||
2391 | } else { | ||
2392 | /* Forcing a link mode. */ | ||
2393 | bmcr = tc_mdio_read(dev, pid, MII_BMCR); | ||
2394 | if (bmcr & BMCR_FULLDPLX) | ||
2395 | lp->fullduplex = 1; | ||
2396 | else | ||
2397 | lp->fullduplex = 0; | ||
2398 | if (bmcr & BMCR_SPEED100) | ||
2399 | speed = 100; | ||
2400 | else | ||
2401 | speed = 10; | ||
2402 | } | ||
2403 | |||
2404 | tc_writel(tc_readl(&tr->MAC_Ctl) | MAC_HaltReq, &tr->MAC_Ctl); | ||
2405 | if (lp->fullduplex) { | ||
2406 | tc_writel(tc_readl(&tr->MAC_Ctl) | MAC_FullDup, &tr->MAC_Ctl); | ||
2407 | } else { | ||
2408 | tc_writel(tc_readl(&tr->MAC_Ctl) & ~MAC_FullDup, &tr->MAC_Ctl); | ||
2409 | } | ||
2410 | tc_writel(tc_readl(&tr->MAC_Ctl) & ~MAC_HaltReq, &tr->MAC_Ctl); | ||
2411 | |||
2412 | /* TX4939 PCFG.SPEEDn bit will be changed on NETDEV_CHANGE event. */ | ||
2413 | |||
2414 | #ifndef NO_CHECK_CARRIER | ||
2415 | /* TX4939 does not have EnLCarr */ | ||
2416 | if (lp->boardtype != TC35815_TX4939) { | ||
2417 | #ifdef WORKAROUND_LOSTCAR | ||
2418 | /* WORKAROUND: enable LostCrS only if half duplex operation */ | ||
2419 | if (!lp->fullduplex && lp->boardtype != TC35815_TX4939) | ||
2420 | tc_writel(tc_readl(&tr->Tx_Ctl) | Tx_EnLCarr, &tr->Tx_Ctl); | ||
2421 | #endif | ||
2422 | } | ||
2423 | #endif | ||
2424 | lp->mii.full_duplex = lp->fullduplex; | ||
2425 | } | ||
2426 | |||
2427 | static void tc35815_timer(unsigned long data) | ||
2428 | { | ||
2429 | struct net_device *dev = (struct net_device *)data; | ||
2430 | struct tc35815_local *lp = dev->priv; | ||
2431 | int pid = lp->phy_addr; | ||
2432 | unsigned short bmsr, bmcr, lpa; | ||
2433 | int restart_timer = 0; | ||
2434 | |||
2435 | spin_lock_irq(&lp->lock); | ||
2436 | |||
2437 | lp->timer_ticks++; | ||
2438 | switch (lp->timer_state) { | ||
2439 | case arbwait: | ||
2440 | /* | ||
2441 | * Only allow for 5 ticks, thats 10 seconds and much too | ||
2442 | * long to wait for arbitration to complete. | ||
2443 | */ | ||
2444 | /* TC35815 need more times... */ | ||
2445 | if (lp->timer_ticks >= 10) { | ||
2446 | /* Enter force mode. */ | ||
2447 | if (!options.doforce) { | ||
2448 | printk(KERN_NOTICE "%s: Auto-Negotiation unsuccessful," | ||
2449 | " cable probblem?\n", dev->name); | ||
2450 | /* Try to restart the adaptor. */ | ||
2451 | tc35815_restart(dev); | ||
2452 | goto out; | ||
2453 | } | ||
2454 | printk(KERN_NOTICE "%s: Auto-Negotiation unsuccessful," | ||
2455 | " trying force link mode\n", dev->name); | ||
2456 | printk(KERN_DEBUG "%s: BMCR %x BMSR %x\n", dev->name, | ||
2457 | tc_mdio_read(dev, pid, MII_BMCR), | ||
2458 | tc_mdio_read(dev, pid, MII_BMSR)); | ||
2459 | bmcr = BMCR_SPEED100; | ||
2460 | tc_mdio_write(dev, pid, MII_BMCR, bmcr); | ||
2461 | |||
2462 | /* | ||
2463 | * OK, seems we need do disable the transceiver | ||
2464 | * for the first tick to make sure we get an | ||
2465 | * accurate link state at the second tick. | ||
2466 | */ | ||
2467 | |||
2468 | lp->timer_state = ltrywait; | ||
2469 | lp->timer_ticks = 0; | ||
2470 | restart_timer = 1; | ||
2471 | } else { | ||
2472 | /* Anything interesting happen? */ | ||
2473 | bmsr = tc_mdio_read(dev, pid, MII_BMSR); | ||
2474 | if (bmsr & BMSR_ANEGCOMPLETE) { | ||
2475 | /* Just what we've been waiting for... */ | ||
2476 | tc35815_set_link_modes(dev); | ||
2477 | |||
2478 | /* | ||
2479 | * Success, at least so far, advance our state | ||
2480 | * engine. | ||
2481 | */ | ||
2482 | lp->timer_state = lupwait; | ||
2483 | restart_timer = 1; | ||
2484 | } else { | ||
2485 | restart_timer = 1; | ||
2486 | } | ||
2487 | } | ||
2488 | break; | ||
2489 | |||
2490 | case lupwait: | ||
2491 | /* | ||
2492 | * Auto negotiation was successful and we are awaiting a | ||
2493 | * link up status. I have decided to let this timer run | ||
2494 | * forever until some sort of error is signalled, reporting | ||
2495 | * a message to the user at 10 second intervals. | ||
2496 | */ | ||
2497 | bmsr = tc_mdio_read(dev, pid, MII_BMSR); | ||
2498 | if (bmsr & BMSR_LSTATUS) { | ||
2499 | /* | ||
2500 | * Wheee, it's up, display the link mode in use and put | ||
2501 | * the timer to sleep. | ||
2502 | */ | ||
2503 | tc35815_display_link_mode(dev); | ||
2504 | netif_carrier_on(dev); | ||
2505 | #ifdef WORKAROUND_100HALF_PROMISC | ||
2506 | /* delayed promiscuous enabling */ | ||
2507 | if (dev->flags & IFF_PROMISC) | ||
2508 | tc35815_set_multicast_list(dev); | ||
2509 | #endif | ||
2510 | #if 1 | ||
2511 | lp->saved_lpa = tc_mdio_read(dev, pid, MII_LPA); | ||
2512 | lp->timer_state = lcheck; | ||
2513 | restart_timer = 1; | ||
2514 | #else | ||
2515 | lp->timer_state = asleep; | ||
2516 | restart_timer = 0; | ||
2517 | #endif | ||
2518 | } else { | ||
2519 | if (lp->timer_ticks >= 10) { | ||
2520 | printk(KERN_NOTICE "%s: Auto negotiation successful, link still " | ||
2521 | "not completely up.\n", dev->name); | ||
2522 | lp->timer_ticks = 0; | ||
2523 | restart_timer = 1; | ||
2524 | } else { | ||
2525 | restart_timer = 1; | ||
2526 | } | ||
2527 | } | ||
2528 | break; | ||
2529 | |||
2530 | case ltrywait: | ||
2531 | /* | ||
2532 | * Making the timeout here too long can make it take | ||
2533 | * annoyingly long to attempt all of the link mode | ||
2534 | * permutations, but then again this is essentially | ||
2535 | * error recovery code for the most part. | ||
2536 | */ | ||
2537 | bmsr = tc_mdio_read(dev, pid, MII_BMSR); | ||
2538 | bmcr = tc_mdio_read(dev, pid, MII_BMCR); | ||
2539 | if (lp->timer_ticks == 1) { | ||
2540 | /* | ||
2541 | * Re-enable transceiver, we'll re-enable the | ||
2542 | * transceiver next tick, then check link state | ||
2543 | * on the following tick. | ||
2544 | */ | ||
2545 | restart_timer = 1; | ||
2546 | break; | ||
2547 | } | ||
2548 | if (lp->timer_ticks == 2) { | ||
2549 | restart_timer = 1; | ||
2550 | break; | ||
2551 | } | ||
2552 | if (bmsr & BMSR_LSTATUS) { | ||
2553 | /* Force mode selection success. */ | ||
2554 | tc35815_display_forced_link_mode(dev); | ||
2555 | netif_carrier_on(dev); | ||
2556 | tc35815_set_link_modes(dev); | ||
2557 | #ifdef WORKAROUND_100HALF_PROMISC | ||
2558 | /* delayed promiscuous enabling */ | ||
2559 | if (dev->flags & IFF_PROMISC) | ||
2560 | tc35815_set_multicast_list(dev); | ||
2561 | #endif | ||
2562 | #if 1 | ||
2563 | lp->saved_lpa = tc_mdio_read(dev, pid, MII_LPA); | ||
2564 | lp->timer_state = lcheck; | ||
2565 | restart_timer = 1; | ||
2566 | #else | ||
2567 | lp->timer_state = asleep; | ||
2568 | restart_timer = 0; | ||
2569 | #endif | ||
2570 | } else { | ||
2571 | if (lp->timer_ticks >= 4) { /* 6 seconds or so... */ | ||
2572 | int ret; | ||
2573 | |||
2574 | ret = tc35815_try_next_permutation(dev); | ||
2575 | if (ret == -1) { | ||
2576 | /* | ||
2577 | * Aieee, tried them all, reset the | ||
2578 | * chip and try all over again. | ||
2579 | */ | ||
2580 | printk(KERN_NOTICE "%s: Link down, " | ||
2581 | "cable problem?\n", | ||
2582 | dev->name); | ||
2583 | |||
2584 | /* Try to restart the adaptor. */ | ||
2585 | tc35815_restart(dev); | ||
2586 | goto out; | ||
2587 | } | ||
2588 | lp->timer_ticks = 0; | ||
2589 | restart_timer = 1; | ||
2590 | } else { | ||
2591 | restart_timer = 1; | ||
2592 | } | ||
2593 | } | ||
2594 | break; | ||
2595 | |||
2596 | case lcheck: | ||
2597 | bmcr = tc_mdio_read(dev, pid, MII_BMCR); | ||
2598 | lpa = tc_mdio_read(dev, pid, MII_LPA); | ||
2599 | if (bmcr & (BMCR_PDOWN | BMCR_ISOLATE | BMCR_RESET)) { | ||
2600 | printk(KERN_ERR "%s: PHY down? (BMCR %x)\n", dev->name, | ||
2601 | bmcr); | ||
2602 | } else if ((lp->saved_lpa ^ lpa) & | ||
2603 | (LPA_100FULL|LPA_100HALF|LPA_10FULL|LPA_10HALF)) { | ||
2604 | printk(KERN_NOTICE "%s: link status changed" | ||
2605 | " (BMCR %x LPA %x->%x)\n", dev->name, | ||
2606 | bmcr, lp->saved_lpa, lpa); | ||
2607 | } else { | ||
2608 | /* go on */ | ||
2609 | restart_timer = 1; | ||
2610 | break; | ||
2611 | } | ||
2612 | /* Try to restart the adaptor. */ | ||
2613 | tc35815_restart(dev); | ||
2614 | goto out; | ||
2615 | |||
2616 | case asleep: | ||
2617 | default: | ||
2618 | /* Can't happens.... */ | ||
2619 | printk(KERN_ERR "%s: Aieee, link timer is asleep but we got " | ||
2620 | "one anyways!\n", dev->name); | ||
2621 | restart_timer = 0; | ||
2622 | lp->timer_ticks = 0; | ||
2623 | lp->timer_state = asleep; /* foo on you */ | ||
2624 | break; | ||
2625 | } | ||
2626 | |||
2627 | if (restart_timer) { | ||
2628 | lp->timer.expires = jiffies + msecs_to_jiffies(1200); | ||
2629 | add_timer(&lp->timer); | ||
2630 | } | ||
2631 | out: | ||
2632 | spin_unlock_irq(&lp->lock); | ||
2633 | } | ||
2634 | |||
2635 | static void tc35815_start_auto_negotiation(struct net_device *dev, | ||
2636 | struct ethtool_cmd *ep) | ||
2637 | { | ||
2638 | struct tc35815_local *lp = dev->priv; | ||
2639 | int pid = lp->phy_addr; | ||
2640 | unsigned short bmsr, bmcr, advertize; | ||
2641 | int timeout; | ||
2642 | |||
2643 | netif_carrier_off(dev); | ||
2644 | bmsr = tc_mdio_read(dev, pid, MII_BMSR); | ||
2645 | bmcr = tc_mdio_read(dev, pid, MII_BMCR); | ||
2646 | advertize = tc_mdio_read(dev, pid, MII_ADVERTISE); | ||
2647 | |||
2648 | if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) { | ||
2649 | if (options.speed || options.duplex) { | ||
2650 | /* Advertise only specified configuration. */ | ||
2651 | advertize &= ~(ADVERTISE_10HALF | | ||
2652 | ADVERTISE_10FULL | | ||
2653 | ADVERTISE_100HALF | | ||
2654 | ADVERTISE_100FULL); | ||
2655 | if (options.speed != 10) { | ||
2656 | if (options.duplex != 1) | ||
2657 | advertize |= ADVERTISE_100FULL; | ||
2658 | if (options.duplex != 2) | ||
2659 | advertize |= ADVERTISE_100HALF; | ||
2660 | } | ||
2661 | if (options.speed != 100) { | ||
2662 | if (options.duplex != 1) | ||
2663 | advertize |= ADVERTISE_10FULL; | ||
2664 | if (options.duplex != 2) | ||
2665 | advertize |= ADVERTISE_10HALF; | ||
2666 | } | ||
2667 | if (options.speed == 100) | ||
2668 | bmcr |= BMCR_SPEED100; | ||
2669 | else if (options.speed == 10) | ||
2670 | bmcr &= ~BMCR_SPEED100; | ||
2671 | if (options.duplex == 2) | ||
2672 | bmcr |= BMCR_FULLDPLX; | ||
2673 | else if (options.duplex == 1) | ||
2674 | bmcr &= ~BMCR_FULLDPLX; | ||
2675 | } else { | ||
2676 | /* Advertise everything we can support. */ | ||
2677 | if (bmsr & BMSR_10HALF) | ||
2678 | advertize |= ADVERTISE_10HALF; | ||
2679 | else | ||
2680 | advertize &= ~ADVERTISE_10HALF; | ||
2681 | if (bmsr & BMSR_10FULL) | ||
2682 | advertize |= ADVERTISE_10FULL; | ||
2683 | else | ||
2684 | advertize &= ~ADVERTISE_10FULL; | ||
2685 | if (bmsr & BMSR_100HALF) | ||
2686 | advertize |= ADVERTISE_100HALF; | ||
2687 | else | ||
2688 | advertize &= ~ADVERTISE_100HALF; | ||
2689 | if (bmsr & BMSR_100FULL) | ||
2690 | advertize |= ADVERTISE_100FULL; | ||
2691 | else | ||
2692 | advertize &= ~ADVERTISE_100FULL; | ||
2693 | } | ||
2694 | |||
2695 | tc_mdio_write(dev, pid, MII_ADVERTISE, advertize); | ||
2696 | |||
2697 | /* Enable Auto-Negotiation, this is usually on already... */ | ||
2698 | bmcr |= BMCR_ANENABLE; | ||
2699 | tc_mdio_write(dev, pid, MII_BMCR, bmcr); | ||
2700 | |||
2701 | /* Restart it to make sure it is going. */ | ||
2702 | bmcr |= BMCR_ANRESTART; | ||
2703 | tc_mdio_write(dev, pid, MII_BMCR, bmcr); | ||
2704 | printk(KERN_DEBUG "%s: ADVERTISE %x BMCR %x\n", dev->name, advertize, bmcr); | ||
2705 | |||
2706 | /* BMCR_ANRESTART self clears when the process has begun. */ | ||
2707 | timeout = 64; /* More than enough. */ | ||
2708 | while (--timeout) { | ||
2709 | bmcr = tc_mdio_read(dev, pid, MII_BMCR); | ||
2710 | if (!(bmcr & BMCR_ANRESTART)) | ||
2711 | break; /* got it. */ | ||
2712 | udelay(10); | ||
2713 | } | ||
2714 | if (!timeout) { | ||
2715 | printk(KERN_ERR "%s: TC35815 would not start auto " | ||
2716 | "negotiation BMCR=0x%04x\n", | ||
2717 | dev->name, bmcr); | ||
2718 | printk(KERN_NOTICE "%s: Performing force link " | ||
2719 | "detection.\n", dev->name); | ||
2720 | goto force_link; | ||
2721 | } else { | ||
2722 | printk(KERN_DEBUG "%s: auto negotiation started.\n", dev->name); | ||
2723 | lp->timer_state = arbwait; | ||
2724 | } | ||
2725 | } else { | ||
2726 | force_link: | ||
2727 | /* Force the link up, trying first a particular mode. | ||
2728 | * Either we are here at the request of ethtool or | ||
2729 | * because the Happy Meal would not start to autoneg. | ||
2730 | */ | ||
2731 | |||
2732 | /* Disable auto-negotiation in BMCR, enable the duplex and | ||
2733 | * speed setting, init the timer state machine, and fire it off. | ||
2734 | */ | ||
2735 | if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) { | ||
2736 | bmcr = BMCR_SPEED100; | ||
2737 | } else { | ||
2738 | if (ep->speed == SPEED_100) | ||
2739 | bmcr = BMCR_SPEED100; | ||
2740 | else | ||
2741 | bmcr = 0; | ||
2742 | if (ep->duplex == DUPLEX_FULL) | ||
2743 | bmcr |= BMCR_FULLDPLX; | ||
2744 | } | ||
2745 | tc_mdio_write(dev, pid, MII_BMCR, bmcr); | ||
2746 | |||
2747 | /* OK, seems we need do disable the transceiver for the first | ||
2748 | * tick to make sure we get an accurate link state at the | ||
2749 | * second tick. | ||
2750 | */ | ||
2751 | lp->timer_state = ltrywait; | ||
2752 | } | ||
2753 | |||
2754 | del_timer(&lp->timer); | ||
2755 | lp->timer_ticks = 0; | ||
2756 | lp->timer.expires = jiffies + msecs_to_jiffies(1200); | ||
2757 | add_timer(&lp->timer); | ||
2758 | } | ||
2759 | |||
2760 | static void tc35815_find_phy(struct net_device *dev) | ||
2761 | { | ||
2762 | struct tc35815_local *lp = dev->priv; | ||
2763 | int pid = lp->phy_addr; | ||
2764 | unsigned short id0; | ||
2765 | |||
2766 | /* find MII phy */ | ||
2767 | for (pid = 31; pid >= 0; pid--) { | ||
2768 | id0 = tc_mdio_read(dev, pid, MII_BMSR); | ||
2769 | if (id0 != 0xffff && id0 != 0x0000 && | ||
2770 | (id0 & BMSR_RESV) != (0xffff & BMSR_RESV) /* paranoia? */ | ||
2771 | ) { | ||
2772 | lp->phy_addr = pid; | ||
2773 | break; | ||
2774 | } | ||
2775 | } | ||
2776 | if (pid < 0) { | ||
2777 | printk(KERN_ERR "%s: No MII Phy found.\n", | ||
2778 | dev->name); | ||
2779 | lp->phy_addr = pid = 0; | ||
2780 | } | ||
2781 | |||
2782 | lp->mii_id[0] = tc_mdio_read(dev, pid, MII_PHYSID1); | ||
2783 | lp->mii_id[1] = tc_mdio_read(dev, pid, MII_PHYSID2); | ||
2784 | if (netif_msg_hw(lp)) | ||
2785 | printk(KERN_INFO "%s: PHY(%02x) ID %04x %04x\n", dev->name, | ||
2786 | pid, lp->mii_id[0], lp->mii_id[1]); | ||
2787 | } | ||
2788 | |||
2789 | static void tc35815_phy_chip_init(struct net_device *dev) | ||
2790 | { | ||
2791 | struct tc35815_local *lp = dev->priv; | ||
2792 | int pid = lp->phy_addr; | ||
2793 | unsigned short bmcr; | ||
2794 | struct ethtool_cmd ecmd, *ep; | ||
2795 | |||
2796 | /* dis-isolate if needed. */ | ||
2797 | bmcr = tc_mdio_read(dev, pid, MII_BMCR); | ||
2798 | if (bmcr & BMCR_ISOLATE) { | ||
2799 | int count = 32; | ||
2800 | printk(KERN_DEBUG "%s: unisolating...", dev->name); | ||
2801 | tc_mdio_write(dev, pid, MII_BMCR, bmcr & ~BMCR_ISOLATE); | ||
2802 | while (--count) { | ||
2803 | if (!(tc_mdio_read(dev, pid, MII_BMCR) & BMCR_ISOLATE)) | ||
2804 | break; | ||
2805 | udelay(20); | ||
2806 | } | ||
2807 | printk(" %s.\n", count ? "done" : "failed"); | ||
2808 | } | ||
2809 | |||
2810 | if (options.speed && options.duplex) { | ||
2811 | ecmd.autoneg = AUTONEG_DISABLE; | ||
2812 | ecmd.speed = options.speed == 10 ? SPEED_10 : SPEED_100; | ||
2813 | ecmd.duplex = options.duplex == 1 ? DUPLEX_HALF : DUPLEX_FULL; | ||
2814 | ep = &ecmd; | ||
2815 | } else { | ||
2816 | ep = NULL; | ||
2817 | } | ||
2818 | tc35815_start_auto_negotiation(dev, ep); | ||
2819 | } | 2325 | } |
2820 | 2326 | ||
2821 | static void tc35815_chip_reset(struct net_device *dev) | 2327 | static void tc35815_chip_reset(struct net_device *dev) |
@@ -2862,13 +2368,11 @@ static void tc35815_chip_reset(struct net_device *dev) | |||
2862 | 2368 | ||
2863 | static void tc35815_chip_init(struct net_device *dev) | 2369 | static void tc35815_chip_init(struct net_device *dev) |
2864 | { | 2370 | { |
2865 | struct tc35815_local *lp = dev->priv; | 2371 | struct tc35815_local *lp = netdev_priv(dev); |
2866 | struct tc35815_regs __iomem *tr = | 2372 | struct tc35815_regs __iomem *tr = |
2867 | (struct tc35815_regs __iomem *)dev->base_addr; | 2373 | (struct tc35815_regs __iomem *)dev->base_addr; |
2868 | unsigned long txctl = TX_CTL_CMD; | 2374 | unsigned long txctl = TX_CTL_CMD; |
2869 | 2375 | ||
2870 | tc35815_phy_chip_init(dev); | ||
2871 | |||
2872 | /* load station address to CAM */ | 2376 | /* load station address to CAM */ |
2873 | tc35815_set_cam_entry(dev, CAM_ENTRY_SOURCE, dev->dev_addr); | 2377 | tc35815_set_cam_entry(dev, CAM_ENTRY_SOURCE, dev->dev_addr); |
2874 | 2378 | ||
@@ -2905,12 +2409,11 @@ static void tc35815_chip_init(struct net_device *dev) | |||
2905 | /* start MAC transmitter */ | 2409 | /* start MAC transmitter */ |
2906 | #ifndef NO_CHECK_CARRIER | 2410 | #ifndef NO_CHECK_CARRIER |
2907 | /* TX4939 does not have EnLCarr */ | 2411 | /* TX4939 does not have EnLCarr */ |
2908 | if (lp->boardtype == TC35815_TX4939) | 2412 | if (lp->chiptype == TC35815_TX4939) |
2909 | txctl &= ~Tx_EnLCarr; | 2413 | txctl &= ~Tx_EnLCarr; |
2910 | #ifdef WORKAROUND_LOSTCAR | 2414 | #ifdef WORKAROUND_LOSTCAR |
2911 | /* WORKAROUND: ignore LostCrS in full duplex operation */ | 2415 | /* WORKAROUND: ignore LostCrS in full duplex operation */ |
2912 | if ((lp->timer_state != asleep && lp->timer_state != lcheck) || | 2416 | if (!lp->phy_dev || !lp->link || lp->duplex == DUPLEX_FULL) |
2913 | lp->fullduplex) | ||
2914 | txctl &= ~Tx_EnLCarr; | 2417 | txctl &= ~Tx_EnLCarr; |
2915 | #endif | 2418 | #endif |
2916 | #endif /* !NO_CHECK_CARRIER */ | 2419 | #endif /* !NO_CHECK_CARRIER */ |
@@ -2924,15 +2427,16 @@ static void tc35815_chip_init(struct net_device *dev) | |||
2924 | static int tc35815_suspend(struct pci_dev *pdev, pm_message_t state) | 2427 | static int tc35815_suspend(struct pci_dev *pdev, pm_message_t state) |
2925 | { | 2428 | { |
2926 | struct net_device *dev = pci_get_drvdata(pdev); | 2429 | struct net_device *dev = pci_get_drvdata(pdev); |
2927 | struct tc35815_local *lp = dev->priv; | 2430 | struct tc35815_local *lp = netdev_priv(dev); |
2928 | unsigned long flags; | 2431 | unsigned long flags; |
2929 | 2432 | ||
2930 | pci_save_state(pdev); | 2433 | pci_save_state(pdev); |
2931 | if (!netif_running(dev)) | 2434 | if (!netif_running(dev)) |
2932 | return 0; | 2435 | return 0; |
2933 | netif_device_detach(dev); | 2436 | netif_device_detach(dev); |
2437 | if (lp->phy_dev) | ||
2438 | phy_stop(lp->phy_dev); | ||
2934 | spin_lock_irqsave(&lp->lock, flags); | 2439 | spin_lock_irqsave(&lp->lock, flags); |
2935 | del_timer(&lp->timer); /* Kill if running */ | ||
2936 | tc35815_chip_reset(dev); | 2440 | tc35815_chip_reset(dev); |
2937 | spin_unlock_irqrestore(&lp->lock, flags); | 2441 | spin_unlock_irqrestore(&lp->lock, flags); |
2938 | pci_set_power_state(pdev, PCI_D3hot); | 2442 | pci_set_power_state(pdev, PCI_D3hot); |
@@ -2942,16 +2446,15 @@ static int tc35815_suspend(struct pci_dev *pdev, pm_message_t state) | |||
2942 | static int tc35815_resume(struct pci_dev *pdev) | 2446 | static int tc35815_resume(struct pci_dev *pdev) |
2943 | { | 2447 | { |
2944 | struct net_device *dev = pci_get_drvdata(pdev); | 2448 | struct net_device *dev = pci_get_drvdata(pdev); |
2945 | struct tc35815_local *lp = dev->priv; | 2449 | struct tc35815_local *lp = netdev_priv(dev); |
2946 | unsigned long flags; | ||
2947 | 2450 | ||
2948 | pci_restore_state(pdev); | 2451 | pci_restore_state(pdev); |
2949 | if (!netif_running(dev)) | 2452 | if (!netif_running(dev)) |
2950 | return 0; | 2453 | return 0; |
2951 | pci_set_power_state(pdev, PCI_D0); | 2454 | pci_set_power_state(pdev, PCI_D0); |
2952 | spin_lock_irqsave(&lp->lock, flags); | ||
2953 | tc35815_restart(dev); | 2455 | tc35815_restart(dev); |
2954 | spin_unlock_irqrestore(&lp->lock, flags); | 2456 | if (lp->phy_dev) |
2457 | phy_start(lp->phy_dev); | ||
2955 | netif_device_attach(dev); | 2458 | netif_device_attach(dev); |
2956 | return 0; | 2459 | return 0; |
2957 | } | 2460 | } |
@@ -2972,8 +2475,6 @@ module_param_named(speed, options.speed, int, 0); | |||
2972 | MODULE_PARM_DESC(speed, "0:auto, 10:10Mbps, 100:100Mbps"); | 2475 | MODULE_PARM_DESC(speed, "0:auto, 10:10Mbps, 100:100Mbps"); |
2973 | module_param_named(duplex, options.duplex, int, 0); | 2476 | module_param_named(duplex, options.duplex, int, 0); |
2974 | MODULE_PARM_DESC(duplex, "0:auto, 1:half, 2:full"); | 2477 | MODULE_PARM_DESC(duplex, "0:auto, 1:half, 2:full"); |
2975 | module_param_named(doforce, options.doforce, int, 0); | ||
2976 | MODULE_PARM_DESC(doforce, "try force link mode if auto-negotiation failed"); | ||
2977 | 2478 | ||
2978 | static int __init tc35815_init_module(void) | 2479 | static int __init tc35815_init_module(void) |
2979 | { | 2480 | { |
diff --git a/drivers/net/tulip/tulip.h b/drivers/net/tulip/tulip.h index 3f69f53d7768..908422f2f320 100644 --- a/drivers/net/tulip/tulip.h +++ b/drivers/net/tulip/tulip.h | |||
@@ -268,7 +268,12 @@ enum t21143_csr6_bits { | |||
268 | #define RX_RING_SIZE 128 | 268 | #define RX_RING_SIZE 128 |
269 | #define MEDIA_MASK 31 | 269 | #define MEDIA_MASK 31 |
270 | 270 | ||
271 | #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer. */ | 271 | /* The receiver on the DC21143 rev 65 can fail to close the last |
272 | * receive descriptor in certain circumstances (see errata) when | ||
273 | * using MWI. This can only occur if the receive buffer ends on | ||
274 | * a cache line boundary, so the "+ 4" below ensures it doesn't. | ||
275 | */ | ||
276 | #define PKT_BUF_SZ (1536 + 4) /* Size of each temporary Rx buffer. */ | ||
272 | 277 | ||
273 | #define TULIP_MIN_CACHE_LINE 8 /* in units of 32-bit words */ | 278 | #define TULIP_MIN_CACHE_LINE 8 /* in units of 32-bit words */ |
274 | 279 | ||
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c index 82f404b76d81..fa1c1c329a2d 100644 --- a/drivers/net/tulip/tulip_core.c +++ b/drivers/net/tulip/tulip_core.c | |||
@@ -1154,18 +1154,13 @@ static void __devinit tulip_mwi_config (struct pci_dev *pdev, | |||
1154 | 1154 | ||
1155 | tp->csr0 = csr0 = 0; | 1155 | tp->csr0 = csr0 = 0; |
1156 | 1156 | ||
1157 | /* if we have any cache line size at all, we can do MRM */ | 1157 | /* if we have any cache line size at all, we can do MRM and MWI */ |
1158 | csr0 |= MRM; | 1158 | csr0 |= MRM | MWI; |
1159 | 1159 | ||
1160 | /* ...and barring hardware bugs, MWI */ | 1160 | /* Enable MWI in the standard PCI command bit. |
1161 | if (!(tp->chip_id == DC21143 && tp->revision == 65)) | 1161 | * Check for the case where MWI is desired but not available |
1162 | csr0 |= MWI; | ||
1163 | |||
1164 | /* set or disable MWI in the standard PCI command bit. | ||
1165 | * Check for the case where mwi is desired but not available | ||
1166 | */ | 1162 | */ |
1167 | if (csr0 & MWI) pci_try_set_mwi(pdev); | 1163 | pci_try_set_mwi(pdev); |
1168 | else pci_clear_mwi(pdev); | ||
1169 | 1164 | ||
1170 | /* read result from hardware (in case bit refused to enable) */ | 1165 | /* read result from hardware (in case bit refused to enable) */ |
1171 | pci_read_config_word(pdev, PCI_COMMAND, &pci_command); | 1166 | pci_read_config_word(pdev, PCI_COMMAND, &pci_command); |
@@ -1401,10 +1396,6 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, | |||
1401 | #ifdef CONFIG_TULIP_MWI | 1396 | #ifdef CONFIG_TULIP_MWI |
1402 | if (!force_csr0 && (tp->flags & HAS_PCI_MWI)) | 1397 | if (!force_csr0 && (tp->flags & HAS_PCI_MWI)) |
1403 | tulip_mwi_config (pdev, dev); | 1398 | tulip_mwi_config (pdev, dev); |
1404 | #else | ||
1405 | /* MWI is broken for DC21143 rev 65... */ | ||
1406 | if (chip_idx == DC21143 && pdev->revision == 65) | ||
1407 | tp->csr0 &= ~MWI; | ||
1408 | #endif | 1399 | #endif |
1409 | 1400 | ||
1410 | /* Stop the chip's Tx and Rx processes. */ | 1401 | /* Stop the chip's Tx and Rx processes. */ |
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c index 35d0cfcf8c47..50068194c163 100644 --- a/drivers/net/tulip/winbond-840.c +++ b/drivers/net/tulip/winbond-840.c | |||
@@ -107,8 +107,6 @@ static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; | |||
107 | /* Time in jiffies before concluding the transmitter is hung. */ | 107 | /* Time in jiffies before concluding the transmitter is hung. */ |
108 | #define TX_TIMEOUT (2*HZ) | 108 | #define TX_TIMEOUT (2*HZ) |
109 | 109 | ||
110 | #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ | ||
111 | |||
112 | /* Include files, designed to support most kernel versions 2.0.0 and later. */ | 110 | /* Include files, designed to support most kernel versions 2.0.0 and later. */ |
113 | #include <linux/module.h> | 111 | #include <linux/module.h> |
114 | #include <linux/kernel.h> | 112 | #include <linux/kernel.h> |
@@ -137,6 +135,9 @@ static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; | |||
137 | 135 | ||
138 | #include "tulip.h" | 136 | #include "tulip.h" |
139 | 137 | ||
138 | #undef PKT_BUF_SZ /* tulip.h also defines this */ | ||
139 | #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ | ||
140 | |||
140 | /* These identify the driver base version and may not be removed. */ | 141 | /* These identify the driver base version and may not be removed. */ |
141 | static char version[] = | 142 | static char version[] = |
142 | KERN_INFO DRV_NAME ".c:v" DRV_VERSION " (2.4 port) " DRV_RELDATE " Donald Becker <becker@scyld.com>\n" | 143 | KERN_INFO DRV_NAME ".c:v" DRV_VERSION " (2.4 port) " DRV_RELDATE " Donald Becker <becker@scyld.com>\n" |
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index 0ee4c168e4c0..29a4d650e8a8 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c | |||
@@ -3954,7 +3954,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma | |||
3954 | if (err) | 3954 | if (err) |
3955 | return -1; | 3955 | return -1; |
3956 | 3956 | ||
3957 | ug_info->mdio_bus = res.start; | 3957 | snprintf(ug_info->mdio_bus, MII_BUS_ID_SIZE, "%x", res.start); |
3958 | } | 3958 | } |
3959 | 3959 | ||
3960 | /* get the phy interface type, or default to MII */ | 3960 | /* get the phy interface type, or default to MII */ |
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h index 4fb95b3af948..9f8b7580a3a4 100644 --- a/drivers/net/ucc_geth.h +++ b/drivers/net/ucc_geth.h | |||
@@ -1156,7 +1156,7 @@ struct ucc_geth_info { | |||
1156 | u16 pausePeriod; | 1156 | u16 pausePeriod; |
1157 | u16 extensionField; | 1157 | u16 extensionField; |
1158 | u8 phy_address; | 1158 | u8 phy_address; |
1159 | u32 mdio_bus; | 1159 | char mdio_bus[MII_BUS_ID_SIZE]; |
1160 | u8 weightfactor[NUM_TX_QUEUES]; | 1160 | u8 weightfactor[NUM_TX_QUEUES]; |
1161 | u8 interruptcoalescingmaxvalue[NUM_RX_QUEUES]; | 1161 | u8 interruptcoalescingmaxvalue[NUM_RX_QUEUES]; |
1162 | u8 l2qt[UCC_GETH_VLAN_PRIORITY_MAX]; | 1162 | u8 l2qt[UCC_GETH_VLAN_PRIORITY_MAX]; |
diff --git a/drivers/net/ucc_geth_mii.c b/drivers/net/ucc_geth_mii.c index c69e654d539f..e4d3f330bac3 100644 --- a/drivers/net/ucc_geth_mii.c +++ b/drivers/net/ucc_geth_mii.c | |||
@@ -157,7 +157,7 @@ static int uec_mdio_probe(struct of_device *ofdev, const struct of_device_id *ma | |||
157 | if (err) | 157 | if (err) |
158 | goto reg_map_fail; | 158 | goto reg_map_fail; |
159 | 159 | ||
160 | new_bus->id = res.start; | 160 | snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", res.start); |
161 | 161 | ||
162 | new_bus->irq = kmalloc(32 * sizeof(int), GFP_KERNEL); | 162 | new_bus->irq = kmalloc(32 * sizeof(int), GFP_KERNEL); |
163 | 163 | ||
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 9485e363ca11..66f4f12503c9 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h | |||
@@ -34,59 +34,53 @@ | |||
34 | 34 | ||
35 | #include "qeth_core_mpc.h" | 35 | #include "qeth_core_mpc.h" |
36 | 36 | ||
37 | #define KMSG_COMPONENT "qeth" | ||
38 | |||
37 | /** | 39 | /** |
38 | * Debug Facility stuff | 40 | * Debug Facility stuff |
39 | */ | 41 | */ |
40 | #define QETH_DBF_SETUP_NAME "qeth_setup" | 42 | enum qeth_dbf_names { |
41 | #define QETH_DBF_SETUP_LEN 8 | 43 | QETH_DBF_SETUP, |
42 | #define QETH_DBF_SETUP_PAGES 8 | 44 | QETH_DBF_QERR, |
43 | #define QETH_DBF_SETUP_NR_AREAS 1 | 45 | QETH_DBF_TRACE, |
44 | #define QETH_DBF_SETUP_LEVEL 5 | 46 | QETH_DBF_MSG, |
45 | 47 | QETH_DBF_SENSE, | |
46 | #define QETH_DBF_MISC_NAME "qeth_misc" | 48 | QETH_DBF_MISC, |
47 | #define QETH_DBF_MISC_LEN 128 | 49 | QETH_DBF_CTRL, |
48 | #define QETH_DBF_MISC_PAGES 2 | 50 | QETH_DBF_INFOS /* must be last element */ |
49 | #define QETH_DBF_MISC_NR_AREAS 1 | 51 | }; |
50 | #define QETH_DBF_MISC_LEVEL 2 | 52 | |
51 | 53 | struct qeth_dbf_info { | |
52 | #define QETH_DBF_DATA_NAME "qeth_data" | 54 | char name[DEBUG_MAX_NAME_LEN]; |
53 | #define QETH_DBF_DATA_LEN 96 | 55 | int pages; |
54 | #define QETH_DBF_DATA_PAGES 8 | 56 | int areas; |
55 | #define QETH_DBF_DATA_NR_AREAS 1 | 57 | int len; |
56 | #define QETH_DBF_DATA_LEVEL 2 | 58 | int level; |
57 | 59 | struct debug_view *view; | |
58 | #define QETH_DBF_CONTROL_NAME "qeth_control" | 60 | debug_info_t *id; |
59 | #define QETH_DBF_CONTROL_LEN 256 | 61 | }; |
60 | #define QETH_DBF_CONTROL_PAGES 8 | 62 | |
61 | #define QETH_DBF_CONTROL_NR_AREAS 1 | 63 | #define QETH_DBF_CTRL_LEN 256 |
62 | #define QETH_DBF_CONTROL_LEVEL 5 | ||
63 | |||
64 | #define QETH_DBF_TRACE_NAME "qeth_trace" | ||
65 | #define QETH_DBF_TRACE_LEN 8 | ||
66 | #define QETH_DBF_TRACE_PAGES 4 | ||
67 | #define QETH_DBF_TRACE_NR_AREAS 1 | ||
68 | #define QETH_DBF_TRACE_LEVEL 3 | ||
69 | |||
70 | #define QETH_DBF_SENSE_NAME "qeth_sense" | ||
71 | #define QETH_DBF_SENSE_LEN 64 | ||
72 | #define QETH_DBF_SENSE_PAGES 2 | ||
73 | #define QETH_DBF_SENSE_NR_AREAS 1 | ||
74 | #define QETH_DBF_SENSE_LEVEL 2 | ||
75 | |||
76 | #define QETH_DBF_QERR_NAME "qeth_qerr" | ||
77 | #define QETH_DBF_QERR_LEN 8 | ||
78 | #define QETH_DBF_QERR_PAGES 2 | ||
79 | #define QETH_DBF_QERR_NR_AREAS 1 | ||
80 | #define QETH_DBF_QERR_LEVEL 2 | ||
81 | 64 | ||
82 | #define QETH_DBF_TEXT(name, level, text) \ | 65 | #define QETH_DBF_TEXT(name, level, text) \ |
83 | do { \ | 66 | debug_text_event(qeth_dbf[QETH_DBF_##name].id, level, text) |
84 | debug_text_event(qeth_dbf_##name, level, text); \ | ||
85 | } while (0) | ||
86 | 67 | ||
87 | #define QETH_DBF_HEX(name, level, addr, len) \ | 68 | #define QETH_DBF_HEX(name, level, addr, len) \ |
69 | debug_event(qeth_dbf[QETH_DBF_##name].id, level, (void *)(addr), len) | ||
70 | |||
71 | #define QETH_DBF_MESSAGE(level, text...) \ | ||
72 | debug_sprintf_event(qeth_dbf[QETH_DBF_MSG].id, level, text) | ||
73 | |||
74 | #define QETH_DBF_TEXT_(name, level, text...) \ | ||
88 | do { \ | 75 | do { \ |
89 | debug_event(qeth_dbf_##name, level, (void *)(addr), len); \ | 76 | if (qeth_dbf_passes(qeth_dbf[QETH_DBF_##name].id, level)) { \ |
77 | char *dbf_txt_buf = \ | ||
78 | get_cpu_var(QETH_DBF_TXT_BUF); \ | ||
79 | sprintf(dbf_txt_buf, text); \ | ||
80 | debug_text_event(qeth_dbf[QETH_DBF_##name].id, \ | ||
81 | level, dbf_txt_buf); \ | ||
82 | put_cpu_var(QETH_DBF_TXT_BUF); \ | ||
83 | } \ | ||
90 | } while (0) | 84 | } while (0) |
91 | 85 | ||
92 | /* Allow to sort out low debug levels early to avoid wasted sprints */ | 86 | /* Allow to sort out low debug levels early to avoid wasted sprints */ |
@@ -826,13 +820,8 @@ void qeth_core_remove_osn_attributes(struct device *); | |||
826 | 820 | ||
827 | /* exports for qeth discipline device drivers */ | 821 | /* exports for qeth discipline device drivers */ |
828 | extern struct qeth_card_list_struct qeth_core_card_list; | 822 | extern struct qeth_card_list_struct qeth_core_card_list; |
829 | extern debug_info_t *qeth_dbf_setup; | 823 | |
830 | extern debug_info_t *qeth_dbf_data; | 824 | extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS]; |
831 | extern debug_info_t *qeth_dbf_misc; | ||
832 | extern debug_info_t *qeth_dbf_control; | ||
833 | extern debug_info_t *qeth_dbf_trace; | ||
834 | extern debug_info_t *qeth_dbf_sense; | ||
835 | extern debug_info_t *qeth_dbf_qerr; | ||
836 | 825 | ||
837 | void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int); | 826 | void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int); |
838 | int qeth_threads_running(struct qeth_card *, unsigned long); | 827 | int qeth_threads_running(struct qeth_card *, unsigned long); |
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 95c6fcf58953..055f5c3e7b56 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
@@ -26,38 +26,35 @@ | |||
26 | #include "qeth_core.h" | 26 | #include "qeth_core.h" |
27 | #include "qeth_core_offl.h" | 27 | #include "qeth_core_offl.h" |
28 | 28 | ||
29 | #define QETH_DBF_TEXT_(name, level, text...) \ | 29 | static DEFINE_PER_CPU(char[256], qeth_core_dbf_txt_buf); |
30 | do { \ | 30 | #define QETH_DBF_TXT_BUF qeth_core_dbf_txt_buf |
31 | if (qeth_dbf_passes(qeth_dbf_##name, level)) { \ | 31 | |
32 | char *dbf_txt_buf = \ | 32 | struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = { |
33 | get_cpu_var(qeth_core_dbf_txt_buf); \ | 33 | /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */ |
34 | sprintf(dbf_txt_buf, text); \ | 34 | /* N P A M L V H */ |
35 | debug_text_event(qeth_dbf_##name, level, dbf_txt_buf); \ | 35 | [QETH_DBF_SETUP] = {"qeth_setup", |
36 | put_cpu_var(qeth_core_dbf_txt_buf); \ | 36 | 8, 1, 8, 5, &debug_hex_ascii_view, NULL}, |
37 | } \ | 37 | [QETH_DBF_QERR] = {"qeth_qerr", |
38 | } while (0) | 38 | 2, 1, 8, 2, &debug_hex_ascii_view, NULL}, |
39 | [QETH_DBF_TRACE] = {"qeth_trace", | ||
40 | 4, 1, 8, 3, &debug_hex_ascii_view, NULL}, | ||
41 | [QETH_DBF_MSG] = {"qeth_msg", | ||
42 | 8, 1, 128, 3, &debug_sprintf_view, NULL}, | ||
43 | [QETH_DBF_SENSE] = {"qeth_sense", | ||
44 | 2, 1, 64, 2, &debug_hex_ascii_view, NULL}, | ||
45 | [QETH_DBF_MISC] = {"qeth_misc", | ||
46 | 2, 1, 256, 2, &debug_hex_ascii_view, NULL}, | ||
47 | [QETH_DBF_CTRL] = {"qeth_control", | ||
48 | 8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL}, | ||
49 | }; | ||
50 | EXPORT_SYMBOL_GPL(qeth_dbf); | ||
39 | 51 | ||
40 | struct qeth_card_list_struct qeth_core_card_list; | 52 | struct qeth_card_list_struct qeth_core_card_list; |
41 | EXPORT_SYMBOL_GPL(qeth_core_card_list); | 53 | EXPORT_SYMBOL_GPL(qeth_core_card_list); |
42 | debug_info_t *qeth_dbf_setup; | ||
43 | EXPORT_SYMBOL_GPL(qeth_dbf_setup); | ||
44 | debug_info_t *qeth_dbf_data; | ||
45 | EXPORT_SYMBOL_GPL(qeth_dbf_data); | ||
46 | debug_info_t *qeth_dbf_misc; | ||
47 | EXPORT_SYMBOL_GPL(qeth_dbf_misc); | ||
48 | debug_info_t *qeth_dbf_control; | ||
49 | EXPORT_SYMBOL_GPL(qeth_dbf_control); | ||
50 | debug_info_t *qeth_dbf_trace; | ||
51 | EXPORT_SYMBOL_GPL(qeth_dbf_trace); | ||
52 | debug_info_t *qeth_dbf_sense; | ||
53 | EXPORT_SYMBOL_GPL(qeth_dbf_sense); | ||
54 | debug_info_t *qeth_dbf_qerr; | ||
55 | EXPORT_SYMBOL_GPL(qeth_dbf_qerr); | ||
56 | 54 | ||
57 | static struct device *qeth_core_root_dev; | 55 | static struct device *qeth_core_root_dev; |
58 | static unsigned int known_devices[][10] = QETH_MODELLIST_ARRAY; | 56 | static unsigned int known_devices[][10] = QETH_MODELLIST_ARRAY; |
59 | static struct lock_class_key qdio_out_skb_queue_key; | 57 | static struct lock_class_key qdio_out_skb_queue_key; |
60 | static DEFINE_PER_CPU(char[256], qeth_core_dbf_txt_buf); | ||
61 | 58 | ||
62 | static void qeth_send_control_data_cb(struct qeth_channel *, | 59 | static void qeth_send_control_data_cb(struct qeth_channel *, |
63 | struct qeth_cmd_buffer *); | 60 | struct qeth_cmd_buffer *); |
@@ -219,7 +216,7 @@ void qeth_clear_working_pool_list(struct qeth_card *card) | |||
219 | { | 216 | { |
220 | struct qeth_buffer_pool_entry *pool_entry, *tmp; | 217 | struct qeth_buffer_pool_entry *pool_entry, *tmp; |
221 | 218 | ||
222 | QETH_DBF_TEXT(trace, 5, "clwrklst"); | 219 | QETH_DBF_TEXT(TRACE, 5, "clwrklst"); |
223 | list_for_each_entry_safe(pool_entry, tmp, | 220 | list_for_each_entry_safe(pool_entry, tmp, |
224 | &card->qdio.in_buf_pool.entry_list, list){ | 221 | &card->qdio.in_buf_pool.entry_list, list){ |
225 | list_del(&pool_entry->list); | 222 | list_del(&pool_entry->list); |
@@ -233,7 +230,7 @@ static int qeth_alloc_buffer_pool(struct qeth_card *card) | |||
233 | void *ptr; | 230 | void *ptr; |
234 | int i, j; | 231 | int i, j; |
235 | 232 | ||
236 | QETH_DBF_TEXT(trace, 5, "alocpool"); | 233 | QETH_DBF_TEXT(TRACE, 5, "alocpool"); |
237 | for (i = 0; i < card->qdio.init_pool.buf_count; ++i) { | 234 | for (i = 0; i < card->qdio.init_pool.buf_count; ++i) { |
238 | pool_entry = kmalloc(sizeof(*pool_entry), GFP_KERNEL); | 235 | pool_entry = kmalloc(sizeof(*pool_entry), GFP_KERNEL); |
239 | if (!pool_entry) { | 236 | if (!pool_entry) { |
@@ -241,7 +238,7 @@ static int qeth_alloc_buffer_pool(struct qeth_card *card) | |||
241 | return -ENOMEM; | 238 | return -ENOMEM; |
242 | } | 239 | } |
243 | for (j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j) { | 240 | for (j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j) { |
244 | ptr = (void *) __get_free_page(GFP_KERNEL|GFP_DMA); | 241 | ptr = (void *) __get_free_page(GFP_KERNEL); |
245 | if (!ptr) { | 242 | if (!ptr) { |
246 | while (j > 0) | 243 | while (j > 0) |
247 | free_page((unsigned long) | 244 | free_page((unsigned long) |
@@ -260,7 +257,7 @@ static int qeth_alloc_buffer_pool(struct qeth_card *card) | |||
260 | 257 | ||
261 | int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt) | 258 | int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt) |
262 | { | 259 | { |
263 | QETH_DBF_TEXT(trace, 2, "realcbp"); | 260 | QETH_DBF_TEXT(TRACE, 2, "realcbp"); |
264 | 261 | ||
265 | if ((card->state != CARD_STATE_DOWN) && | 262 | if ((card->state != CARD_STATE_DOWN) && |
266 | (card->state != CARD_STATE_RECOVER)) | 263 | (card->state != CARD_STATE_RECOVER)) |
@@ -321,7 +318,7 @@ static int qeth_issue_next_read(struct qeth_card *card) | |||
321 | int rc; | 318 | int rc; |
322 | struct qeth_cmd_buffer *iob; | 319 | struct qeth_cmd_buffer *iob; |
323 | 320 | ||
324 | QETH_DBF_TEXT(trace, 5, "issnxrd"); | 321 | QETH_DBF_TEXT(TRACE, 5, "issnxrd"); |
325 | if (card->read.state != CH_STATE_UP) | 322 | if (card->read.state != CH_STATE_UP) |
326 | return -EIO; | 323 | return -EIO; |
327 | iob = qeth_get_buffer(&card->read); | 324 | iob = qeth_get_buffer(&card->read); |
@@ -330,7 +327,7 @@ static int qeth_issue_next_read(struct qeth_card *card) | |||
330 | return -ENOMEM; | 327 | return -ENOMEM; |
331 | } | 328 | } |
332 | qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE); | 329 | qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE); |
333 | QETH_DBF_TEXT(trace, 6, "noirqpnd"); | 330 | QETH_DBF_TEXT(TRACE, 6, "noirqpnd"); |
334 | rc = ccw_device_start(card->read.ccwdev, &card->read.ccw, | 331 | rc = ccw_device_start(card->read.ccwdev, &card->read.ccw, |
335 | (addr_t) iob, 0, 0); | 332 | (addr_t) iob, 0, 0); |
336 | if (rc) { | 333 | if (rc) { |
@@ -368,19 +365,19 @@ static void qeth_put_reply(struct qeth_reply *reply) | |||
368 | kfree(reply); | 365 | kfree(reply); |
369 | } | 366 | } |
370 | 367 | ||
371 | static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, | 368 | static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc, |
372 | struct qeth_card *card) | 369 | struct qeth_card *card) |
373 | { | 370 | { |
374 | int rc; | ||
375 | int com; | ||
376 | char *ipa_name; | 371 | char *ipa_name; |
377 | 372 | int com = cmd->hdr.command; | |
378 | com = cmd->hdr.command; | ||
379 | rc = cmd->hdr.return_code; | ||
380 | ipa_name = qeth_get_ipa_cmd_name(com); | 373 | ipa_name = qeth_get_ipa_cmd_name(com); |
381 | 374 | if (rc) | |
382 | PRINT_ERR("%s(x%X) for %s returned x%X \"%s\"\n", ipa_name, com, | 375 | QETH_DBF_MESSAGE(2, "IPA: %s(x%X) for %s returned x%X \"%s\"\n", |
383 | QETH_CARD_IFNAME(card), rc, qeth_get_ipa_msg(rc)); | 376 | ipa_name, com, QETH_CARD_IFNAME(card), |
377 | rc, qeth_get_ipa_msg(rc)); | ||
378 | else | ||
379 | QETH_DBF_MESSAGE(5, "IPA: %s(x%X) for %s succeeded\n", | ||
380 | ipa_name, com, QETH_CARD_IFNAME(card)); | ||
384 | } | 381 | } |
385 | 382 | ||
386 | static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, | 383 | static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, |
@@ -388,14 +385,14 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, | |||
388 | { | 385 | { |
389 | struct qeth_ipa_cmd *cmd = NULL; | 386 | struct qeth_ipa_cmd *cmd = NULL; |
390 | 387 | ||
391 | QETH_DBF_TEXT(trace, 5, "chkipad"); | 388 | QETH_DBF_TEXT(TRACE, 5, "chkipad"); |
392 | if (IS_IPA(iob->data)) { | 389 | if (IS_IPA(iob->data)) { |
393 | cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data); | 390 | cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data); |
394 | if (IS_IPA_REPLY(cmd)) { | 391 | if (IS_IPA_REPLY(cmd)) { |
395 | if (cmd->hdr.return_code && | 392 | if (cmd->hdr.command < IPA_CMD_SETCCID || |
396 | (cmd->hdr.command < IPA_CMD_SETCCID || | 393 | cmd->hdr.command > IPA_CMD_MODCCID) |
397 | cmd->hdr.command > IPA_CMD_MODCCID)) | 394 | qeth_issue_ipa_msg(cmd, |
398 | qeth_issue_ipa_msg(cmd, card); | 395 | cmd->hdr.return_code, card); |
399 | return cmd; | 396 | return cmd; |
400 | } else { | 397 | } else { |
401 | switch (cmd->hdr.command) { | 398 | switch (cmd->hdr.command) { |
@@ -417,15 +414,16 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, | |||
417 | QETH_CARD_IFNAME(card), | 414 | QETH_CARD_IFNAME(card), |
418 | card->info.chpid); | 415 | card->info.chpid); |
419 | netif_carrier_on(card->dev); | 416 | netif_carrier_on(card->dev); |
417 | card->lan_online = 1; | ||
420 | qeth_schedule_recovery(card); | 418 | qeth_schedule_recovery(card); |
421 | return NULL; | 419 | return NULL; |
422 | case IPA_CMD_MODCCID: | 420 | case IPA_CMD_MODCCID: |
423 | return cmd; | 421 | return cmd; |
424 | case IPA_CMD_REGISTER_LOCAL_ADDR: | 422 | case IPA_CMD_REGISTER_LOCAL_ADDR: |
425 | QETH_DBF_TEXT(trace, 3, "irla"); | 423 | QETH_DBF_TEXT(TRACE, 3, "irla"); |
426 | break; | 424 | break; |
427 | case IPA_CMD_UNREGISTER_LOCAL_ADDR: | 425 | case IPA_CMD_UNREGISTER_LOCAL_ADDR: |
428 | QETH_DBF_TEXT(trace, 3, "urla"); | 426 | QETH_DBF_TEXT(TRACE, 3, "urla"); |
429 | break; | 427 | break; |
430 | default: | 428 | default: |
431 | PRINT_WARN("Received data is IPA " | 429 | PRINT_WARN("Received data is IPA " |
@@ -442,7 +440,7 @@ void qeth_clear_ipacmd_list(struct qeth_card *card) | |||
442 | struct qeth_reply *reply, *r; | 440 | struct qeth_reply *reply, *r; |
443 | unsigned long flags; | 441 | unsigned long flags; |
444 | 442 | ||
445 | QETH_DBF_TEXT(trace, 4, "clipalst"); | 443 | QETH_DBF_TEXT(TRACE, 4, "clipalst"); |
446 | 444 | ||
447 | spin_lock_irqsave(&card->lock, flags); | 445 | spin_lock_irqsave(&card->lock, flags); |
448 | list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) { | 446 | list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) { |
@@ -462,16 +460,16 @@ static int qeth_check_idx_response(unsigned char *buffer) | |||
462 | if (!buffer) | 460 | if (!buffer) |
463 | return 0; | 461 | return 0; |
464 | 462 | ||
465 | QETH_DBF_HEX(control, 2, buffer, QETH_DBF_CONTROL_LEN); | 463 | QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN); |
466 | if ((buffer[2] & 0xc0) == 0xc0) { | 464 | if ((buffer[2] & 0xc0) == 0xc0) { |
467 | PRINT_WARN("received an IDX TERMINATE " | 465 | PRINT_WARN("received an IDX TERMINATE " |
468 | "with cause code 0x%02x%s\n", | 466 | "with cause code 0x%02x%s\n", |
469 | buffer[4], | 467 | buffer[4], |
470 | ((buffer[4] == 0x22) ? | 468 | ((buffer[4] == 0x22) ? |
471 | " -- try another portname" : "")); | 469 | " -- try another portname" : "")); |
472 | QETH_DBF_TEXT(trace, 2, "ckidxres"); | 470 | QETH_DBF_TEXT(TRACE, 2, "ckidxres"); |
473 | QETH_DBF_TEXT(trace, 2, " idxterm"); | 471 | QETH_DBF_TEXT(TRACE, 2, " idxterm"); |
474 | QETH_DBF_TEXT_(trace, 2, " rc%d", -EIO); | 472 | QETH_DBF_TEXT_(TRACE, 2, " rc%d", -EIO); |
475 | return -EIO; | 473 | return -EIO; |
476 | } | 474 | } |
477 | return 0; | 475 | return 0; |
@@ -482,7 +480,7 @@ static void qeth_setup_ccw(struct qeth_channel *channel, unsigned char *iob, | |||
482 | { | 480 | { |
483 | struct qeth_card *card; | 481 | struct qeth_card *card; |
484 | 482 | ||
485 | QETH_DBF_TEXT(trace, 4, "setupccw"); | 483 | QETH_DBF_TEXT(TRACE, 4, "setupccw"); |
486 | card = CARD_FROM_CDEV(channel->ccwdev); | 484 | card = CARD_FROM_CDEV(channel->ccwdev); |
487 | if (channel == &card->read) | 485 | if (channel == &card->read) |
488 | memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1)); | 486 | memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1)); |
@@ -496,7 +494,7 @@ static struct qeth_cmd_buffer *__qeth_get_buffer(struct qeth_channel *channel) | |||
496 | { | 494 | { |
497 | __u8 index; | 495 | __u8 index; |
498 | 496 | ||
499 | QETH_DBF_TEXT(trace, 6, "getbuff"); | 497 | QETH_DBF_TEXT(TRACE, 6, "getbuff"); |
500 | index = channel->io_buf_no; | 498 | index = channel->io_buf_no; |
501 | do { | 499 | do { |
502 | if (channel->iob[index].state == BUF_STATE_FREE) { | 500 | if (channel->iob[index].state == BUF_STATE_FREE) { |
@@ -517,7 +515,7 @@ void qeth_release_buffer(struct qeth_channel *channel, | |||
517 | { | 515 | { |
518 | unsigned long flags; | 516 | unsigned long flags; |
519 | 517 | ||
520 | QETH_DBF_TEXT(trace, 6, "relbuff"); | 518 | QETH_DBF_TEXT(TRACE, 6, "relbuff"); |
521 | spin_lock_irqsave(&channel->iob_lock, flags); | 519 | spin_lock_irqsave(&channel->iob_lock, flags); |
522 | memset(iob->data, 0, QETH_BUFSIZE); | 520 | memset(iob->data, 0, QETH_BUFSIZE); |
523 | iob->state = BUF_STATE_FREE; | 521 | iob->state = BUF_STATE_FREE; |
@@ -567,7 +565,7 @@ static void qeth_send_control_data_cb(struct qeth_channel *channel, | |||
567 | unsigned long flags; | 565 | unsigned long flags; |
568 | int keep_reply; | 566 | int keep_reply; |
569 | 567 | ||
570 | QETH_DBF_TEXT(trace, 4, "sndctlcb"); | 568 | QETH_DBF_TEXT(TRACE, 4, "sndctlcb"); |
571 | 569 | ||
572 | card = CARD_FROM_CDEV(channel->ccwdev); | 570 | card = CARD_FROM_CDEV(channel->ccwdev); |
573 | if (qeth_check_idx_response(iob->data)) { | 571 | if (qeth_check_idx_response(iob->data)) { |
@@ -637,7 +635,7 @@ static int qeth_setup_channel(struct qeth_channel *channel) | |||
637 | { | 635 | { |
638 | int cnt; | 636 | int cnt; |
639 | 637 | ||
640 | QETH_DBF_TEXT(setup, 2, "setupch"); | 638 | QETH_DBF_TEXT(SETUP, 2, "setupch"); |
641 | for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) { | 639 | for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) { |
642 | channel->iob[cnt].data = (char *) | 640 | channel->iob[cnt].data = (char *) |
643 | kmalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL); | 641 | kmalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL); |
@@ -731,7 +729,7 @@ EXPORT_SYMBOL_GPL(qeth_do_run_thread); | |||
731 | 729 | ||
732 | void qeth_schedule_recovery(struct qeth_card *card) | 730 | void qeth_schedule_recovery(struct qeth_card *card) |
733 | { | 731 | { |
734 | QETH_DBF_TEXT(trace, 2, "startrec"); | 732 | QETH_DBF_TEXT(TRACE, 2, "startrec"); |
735 | if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0) | 733 | if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0) |
736 | schedule_work(&card->kernel_thread_starter); | 734 | schedule_work(&card->kernel_thread_starter); |
737 | } | 735 | } |
@@ -749,7 +747,7 @@ static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb) | |||
749 | if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | | 747 | if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | |
750 | SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | | 748 | SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | |
751 | SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) { | 749 | SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) { |
752 | QETH_DBF_TEXT(trace, 2, "CGENCHK"); | 750 | QETH_DBF_TEXT(TRACE, 2, "CGENCHK"); |
753 | PRINT_WARN("check on device %s, dstat=x%x, cstat=x%x ", | 751 | PRINT_WARN("check on device %s, dstat=x%x, cstat=x%x ", |
754 | cdev->dev.bus_id, dstat, cstat); | 752 | cdev->dev.bus_id, dstat, cstat); |
755 | print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET, | 753 | print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET, |
@@ -760,23 +758,23 @@ static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb) | |||
760 | if (dstat & DEV_STAT_UNIT_CHECK) { | 758 | if (dstat & DEV_STAT_UNIT_CHECK) { |
761 | if (sense[SENSE_RESETTING_EVENT_BYTE] & | 759 | if (sense[SENSE_RESETTING_EVENT_BYTE] & |
762 | SENSE_RESETTING_EVENT_FLAG) { | 760 | SENSE_RESETTING_EVENT_FLAG) { |
763 | QETH_DBF_TEXT(trace, 2, "REVIND"); | 761 | QETH_DBF_TEXT(TRACE, 2, "REVIND"); |
764 | return 1; | 762 | return 1; |
765 | } | 763 | } |
766 | if (sense[SENSE_COMMAND_REJECT_BYTE] & | 764 | if (sense[SENSE_COMMAND_REJECT_BYTE] & |
767 | SENSE_COMMAND_REJECT_FLAG) { | 765 | SENSE_COMMAND_REJECT_FLAG) { |
768 | QETH_DBF_TEXT(trace, 2, "CMDREJi"); | 766 | QETH_DBF_TEXT(TRACE, 2, "CMDREJi"); |
769 | return 0; | 767 | return 0; |
770 | } | 768 | } |
771 | if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) { | 769 | if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) { |
772 | QETH_DBF_TEXT(trace, 2, "AFFE"); | 770 | QETH_DBF_TEXT(TRACE, 2, "AFFE"); |
773 | return 1; | 771 | return 1; |
774 | } | 772 | } |
775 | if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) { | 773 | if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) { |
776 | QETH_DBF_TEXT(trace, 2, "ZEROSEN"); | 774 | QETH_DBF_TEXT(TRACE, 2, "ZEROSEN"); |
777 | return 0; | 775 | return 0; |
778 | } | 776 | } |
779 | QETH_DBF_TEXT(trace, 2, "DGENCHK"); | 777 | QETH_DBF_TEXT(TRACE, 2, "DGENCHK"); |
780 | return 1; | 778 | return 1; |
781 | } | 779 | } |
782 | return 0; | 780 | return 0; |
@@ -791,13 +789,13 @@ static long __qeth_check_irb_error(struct ccw_device *cdev, | |||
791 | switch (PTR_ERR(irb)) { | 789 | switch (PTR_ERR(irb)) { |
792 | case -EIO: | 790 | case -EIO: |
793 | PRINT_WARN("i/o-error on device %s\n", cdev->dev.bus_id); | 791 | PRINT_WARN("i/o-error on device %s\n", cdev->dev.bus_id); |
794 | QETH_DBF_TEXT(trace, 2, "ckirberr"); | 792 | QETH_DBF_TEXT(TRACE, 2, "ckirberr"); |
795 | QETH_DBF_TEXT_(trace, 2, " rc%d", -EIO); | 793 | QETH_DBF_TEXT_(TRACE, 2, " rc%d", -EIO); |
796 | break; | 794 | break; |
797 | case -ETIMEDOUT: | 795 | case -ETIMEDOUT: |
798 | PRINT_WARN("timeout on device %s\n", cdev->dev.bus_id); | 796 | PRINT_WARN("timeout on device %s\n", cdev->dev.bus_id); |
799 | QETH_DBF_TEXT(trace, 2, "ckirberr"); | 797 | QETH_DBF_TEXT(TRACE, 2, "ckirberr"); |
800 | QETH_DBF_TEXT_(trace, 2, " rc%d", -ETIMEDOUT); | 798 | QETH_DBF_TEXT_(TRACE, 2, " rc%d", -ETIMEDOUT); |
801 | if (intparm == QETH_RCD_PARM) { | 799 | if (intparm == QETH_RCD_PARM) { |
802 | struct qeth_card *card = CARD_FROM_CDEV(cdev); | 800 | struct qeth_card *card = CARD_FROM_CDEV(cdev); |
803 | 801 | ||
@@ -810,8 +808,8 @@ static long __qeth_check_irb_error(struct ccw_device *cdev, | |||
810 | default: | 808 | default: |
811 | PRINT_WARN("unknown error %ld on device %s\n", PTR_ERR(irb), | 809 | PRINT_WARN("unknown error %ld on device %s\n", PTR_ERR(irb), |
812 | cdev->dev.bus_id); | 810 | cdev->dev.bus_id); |
813 | QETH_DBF_TEXT(trace, 2, "ckirberr"); | 811 | QETH_DBF_TEXT(TRACE, 2, "ckirberr"); |
814 | QETH_DBF_TEXT(trace, 2, " rc???"); | 812 | QETH_DBF_TEXT(TRACE, 2, " rc???"); |
815 | } | 813 | } |
816 | return PTR_ERR(irb); | 814 | return PTR_ERR(irb); |
817 | } | 815 | } |
@@ -827,7 +825,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, | |||
827 | struct qeth_cmd_buffer *iob; | 825 | struct qeth_cmd_buffer *iob; |
828 | __u8 index; | 826 | __u8 index; |
829 | 827 | ||
830 | QETH_DBF_TEXT(trace, 5, "irq"); | 828 | QETH_DBF_TEXT(TRACE, 5, "irq"); |
831 | 829 | ||
832 | if (__qeth_check_irb_error(cdev, intparm, irb)) | 830 | if (__qeth_check_irb_error(cdev, intparm, irb)) |
833 | return; | 831 | return; |
@@ -840,13 +838,13 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, | |||
840 | 838 | ||
841 | if (card->read.ccwdev == cdev) { | 839 | if (card->read.ccwdev == cdev) { |
842 | channel = &card->read; | 840 | channel = &card->read; |
843 | QETH_DBF_TEXT(trace, 5, "read"); | 841 | QETH_DBF_TEXT(TRACE, 5, "read"); |
844 | } else if (card->write.ccwdev == cdev) { | 842 | } else if (card->write.ccwdev == cdev) { |
845 | channel = &card->write; | 843 | channel = &card->write; |
846 | QETH_DBF_TEXT(trace, 5, "write"); | 844 | QETH_DBF_TEXT(TRACE, 5, "write"); |
847 | } else { | 845 | } else { |
848 | channel = &card->data; | 846 | channel = &card->data; |
849 | QETH_DBF_TEXT(trace, 5, "data"); | 847 | QETH_DBF_TEXT(TRACE, 5, "data"); |
850 | } | 848 | } |
851 | atomic_set(&channel->irq_pending, 0); | 849 | atomic_set(&channel->irq_pending, 0); |
852 | 850 | ||
@@ -862,12 +860,12 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, | |||
862 | goto out; | 860 | goto out; |
863 | 861 | ||
864 | if (intparm == QETH_CLEAR_CHANNEL_PARM) { | 862 | if (intparm == QETH_CLEAR_CHANNEL_PARM) { |
865 | QETH_DBF_TEXT(trace, 6, "clrchpar"); | 863 | QETH_DBF_TEXT(TRACE, 6, "clrchpar"); |
866 | /* we don't have to handle this further */ | 864 | /* we don't have to handle this further */ |
867 | intparm = 0; | 865 | intparm = 0; |
868 | } | 866 | } |
869 | if (intparm == QETH_HALT_CHANNEL_PARM) { | 867 | if (intparm == QETH_HALT_CHANNEL_PARM) { |
870 | QETH_DBF_TEXT(trace, 6, "hltchpar"); | 868 | QETH_DBF_TEXT(TRACE, 6, "hltchpar"); |
871 | /* we don't have to handle this further */ | 869 | /* we don't have to handle this further */ |
872 | intparm = 0; | 870 | intparm = 0; |
873 | } | 871 | } |
@@ -953,7 +951,7 @@ void qeth_clear_qdio_buffers(struct qeth_card *card) | |||
953 | { | 951 | { |
954 | int i, j; | 952 | int i, j; |
955 | 953 | ||
956 | QETH_DBF_TEXT(trace, 2, "clearqdbf"); | 954 | QETH_DBF_TEXT(TRACE, 2, "clearqdbf"); |
957 | /* clear outbound buffers to free skbs */ | 955 | /* clear outbound buffers to free skbs */ |
958 | for (i = 0; i < card->qdio.no_out_queues; ++i) | 956 | for (i = 0; i < card->qdio.no_out_queues; ++i) |
959 | if (card->qdio.out_qs[i]) { | 957 | if (card->qdio.out_qs[i]) { |
@@ -968,7 +966,7 @@ static void qeth_free_buffer_pool(struct qeth_card *card) | |||
968 | { | 966 | { |
969 | struct qeth_buffer_pool_entry *pool_entry, *tmp; | 967 | struct qeth_buffer_pool_entry *pool_entry, *tmp; |
970 | int i = 0; | 968 | int i = 0; |
971 | QETH_DBF_TEXT(trace, 5, "freepool"); | 969 | QETH_DBF_TEXT(TRACE, 5, "freepool"); |
972 | list_for_each_entry_safe(pool_entry, tmp, | 970 | list_for_each_entry_safe(pool_entry, tmp, |
973 | &card->qdio.init_pool.entry_list, init_list){ | 971 | &card->qdio.init_pool.entry_list, init_list){ |
974 | for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) | 972 | for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) |
@@ -982,7 +980,7 @@ static void qeth_free_qdio_buffers(struct qeth_card *card) | |||
982 | { | 980 | { |
983 | int i, j; | 981 | int i, j; |
984 | 982 | ||
985 | QETH_DBF_TEXT(trace, 2, "freeqdbf"); | 983 | QETH_DBF_TEXT(TRACE, 2, "freeqdbf"); |
986 | if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) == | 984 | if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) == |
987 | QETH_QDIO_UNINITIALIZED) | 985 | QETH_QDIO_UNINITIALIZED) |
988 | return; | 986 | return; |
@@ -1007,7 +1005,7 @@ static void qeth_clean_channel(struct qeth_channel *channel) | |||
1007 | { | 1005 | { |
1008 | int cnt; | 1006 | int cnt; |
1009 | 1007 | ||
1010 | QETH_DBF_TEXT(setup, 2, "freech"); | 1008 | QETH_DBF_TEXT(SETUP, 2, "freech"); |
1011 | for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) | 1009 | for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) |
1012 | kfree(channel->iob[cnt].data); | 1010 | kfree(channel->iob[cnt].data); |
1013 | } | 1011 | } |
@@ -1027,7 +1025,7 @@ static int qeth_is_1920_device(struct qeth_card *card) | |||
1027 | u8 chpp; | 1025 | u8 chpp; |
1028 | } *chp_dsc; | 1026 | } *chp_dsc; |
1029 | 1027 | ||
1030 | QETH_DBF_TEXT(setup, 2, "chk_1920"); | 1028 | QETH_DBF_TEXT(SETUP, 2, "chk_1920"); |
1031 | 1029 | ||
1032 | ccwdev = card->data.ccwdev; | 1030 | ccwdev = card->data.ccwdev; |
1033 | chp_dsc = (struct channelPath_dsc *)ccw_device_get_chp_desc(ccwdev, 0); | 1031 | chp_dsc = (struct channelPath_dsc *)ccw_device_get_chp_desc(ccwdev, 0); |
@@ -1036,13 +1034,13 @@ static int qeth_is_1920_device(struct qeth_card *card) | |||
1036 | single_queue = ((chp_dsc->chpp & 0x02) == 0x02); | 1034 | single_queue = ((chp_dsc->chpp & 0x02) == 0x02); |
1037 | kfree(chp_dsc); | 1035 | kfree(chp_dsc); |
1038 | } | 1036 | } |
1039 | QETH_DBF_TEXT_(setup, 2, "rc:%x", single_queue); | 1037 | QETH_DBF_TEXT_(SETUP, 2, "rc:%x", single_queue); |
1040 | return single_queue; | 1038 | return single_queue; |
1041 | } | 1039 | } |
1042 | 1040 | ||
1043 | static void qeth_init_qdio_info(struct qeth_card *card) | 1041 | static void qeth_init_qdio_info(struct qeth_card *card) |
1044 | { | 1042 | { |
1045 | QETH_DBF_TEXT(setup, 4, "intqdinf"); | 1043 | QETH_DBF_TEXT(SETUP, 4, "intqdinf"); |
1046 | atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); | 1044 | atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); |
1047 | /* inbound */ | 1045 | /* inbound */ |
1048 | card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT; | 1046 | card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT; |
@@ -1072,7 +1070,7 @@ static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread) | |||
1072 | int rc = 0; | 1070 | int rc = 0; |
1073 | 1071 | ||
1074 | spin_lock_irqsave(&card->thread_mask_lock, flags); | 1072 | spin_lock_irqsave(&card->thread_mask_lock, flags); |
1075 | QETH_DBF_TEXT_(trace, 4, " %02x%02x%02x", | 1073 | QETH_DBF_TEXT_(TRACE, 4, " %02x%02x%02x", |
1076 | (u8) card->thread_start_mask, | 1074 | (u8) card->thread_start_mask, |
1077 | (u8) card->thread_allowed_mask, | 1075 | (u8) card->thread_allowed_mask, |
1078 | (u8) card->thread_running_mask); | 1076 | (u8) card->thread_running_mask); |
@@ -1085,7 +1083,7 @@ static void qeth_start_kernel_thread(struct work_struct *work) | |||
1085 | { | 1083 | { |
1086 | struct qeth_card *card = container_of(work, struct qeth_card, | 1084 | struct qeth_card *card = container_of(work, struct qeth_card, |
1087 | kernel_thread_starter); | 1085 | kernel_thread_starter); |
1088 | QETH_DBF_TEXT(trace , 2, "strthrd"); | 1086 | QETH_DBF_TEXT(TRACE , 2, "strthrd"); |
1089 | 1087 | ||
1090 | if (card->read.state != CH_STATE_UP && | 1088 | if (card->read.state != CH_STATE_UP && |
1091 | card->write.state != CH_STATE_UP) | 1089 | card->write.state != CH_STATE_UP) |
@@ -1098,8 +1096,8 @@ static void qeth_start_kernel_thread(struct work_struct *work) | |||
1098 | static int qeth_setup_card(struct qeth_card *card) | 1096 | static int qeth_setup_card(struct qeth_card *card) |
1099 | { | 1097 | { |
1100 | 1098 | ||
1101 | QETH_DBF_TEXT(setup, 2, "setupcrd"); | 1099 | QETH_DBF_TEXT(SETUP, 2, "setupcrd"); |
1102 | QETH_DBF_HEX(setup, 2, &card, sizeof(void *)); | 1100 | QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); |
1103 | 1101 | ||
1104 | card->read.state = CH_STATE_DOWN; | 1102 | card->read.state = CH_STATE_DOWN; |
1105 | card->write.state = CH_STATE_DOWN; | 1103 | card->write.state = CH_STATE_DOWN; |
@@ -1121,7 +1119,7 @@ static int qeth_setup_card(struct qeth_card *card) | |||
1121 | INIT_LIST_HEAD(&card->ip_list); | 1119 | INIT_LIST_HEAD(&card->ip_list); |
1122 | card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_KERNEL); | 1120 | card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_KERNEL); |
1123 | if (!card->ip_tbd_list) { | 1121 | if (!card->ip_tbd_list) { |
1124 | QETH_DBF_TEXT(setup, 0, "iptbdnom"); | 1122 | QETH_DBF_TEXT(SETUP, 0, "iptbdnom"); |
1125 | return -ENOMEM; | 1123 | return -ENOMEM; |
1126 | } | 1124 | } |
1127 | INIT_LIST_HEAD(card->ip_tbd_list); | 1125 | INIT_LIST_HEAD(card->ip_tbd_list); |
@@ -1143,11 +1141,11 @@ static struct qeth_card *qeth_alloc_card(void) | |||
1143 | { | 1141 | { |
1144 | struct qeth_card *card; | 1142 | struct qeth_card *card; |
1145 | 1143 | ||
1146 | QETH_DBF_TEXT(setup, 2, "alloccrd"); | 1144 | QETH_DBF_TEXT(SETUP, 2, "alloccrd"); |
1147 | card = kzalloc(sizeof(struct qeth_card), GFP_DMA|GFP_KERNEL); | 1145 | card = kzalloc(sizeof(struct qeth_card), GFP_DMA|GFP_KERNEL); |
1148 | if (!card) | 1146 | if (!card) |
1149 | return NULL; | 1147 | return NULL; |
1150 | QETH_DBF_HEX(setup, 2, &card, sizeof(void *)); | 1148 | QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); |
1151 | if (qeth_setup_channel(&card->read)) { | 1149 | if (qeth_setup_channel(&card->read)) { |
1152 | kfree(card); | 1150 | kfree(card); |
1153 | return NULL; | 1151 | return NULL; |
@@ -1165,7 +1163,7 @@ static int qeth_determine_card_type(struct qeth_card *card) | |||
1165 | { | 1163 | { |
1166 | int i = 0; | 1164 | int i = 0; |
1167 | 1165 | ||
1168 | QETH_DBF_TEXT(setup, 2, "detcdtyp"); | 1166 | QETH_DBF_TEXT(SETUP, 2, "detcdtyp"); |
1169 | 1167 | ||
1170 | card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT; | 1168 | card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT; |
1171 | card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; | 1169 | card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; |
@@ -1196,7 +1194,7 @@ static int qeth_clear_channel(struct qeth_channel *channel) | |||
1196 | struct qeth_card *card; | 1194 | struct qeth_card *card; |
1197 | int rc; | 1195 | int rc; |
1198 | 1196 | ||
1199 | QETH_DBF_TEXT(trace, 3, "clearch"); | 1197 | QETH_DBF_TEXT(TRACE, 3, "clearch"); |
1200 | card = CARD_FROM_CDEV(channel->ccwdev); | 1198 | card = CARD_FROM_CDEV(channel->ccwdev); |
1201 | spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); | 1199 | spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); |
1202 | rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM); | 1200 | rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM); |
@@ -1220,7 +1218,7 @@ static int qeth_halt_channel(struct qeth_channel *channel) | |||
1220 | struct qeth_card *card; | 1218 | struct qeth_card *card; |
1221 | int rc; | 1219 | int rc; |
1222 | 1220 | ||
1223 | QETH_DBF_TEXT(trace, 3, "haltch"); | 1221 | QETH_DBF_TEXT(TRACE, 3, "haltch"); |
1224 | card = CARD_FROM_CDEV(channel->ccwdev); | 1222 | card = CARD_FROM_CDEV(channel->ccwdev); |
1225 | spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); | 1223 | spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); |
1226 | rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM); | 1224 | rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM); |
@@ -1241,7 +1239,7 @@ static int qeth_halt_channels(struct qeth_card *card) | |||
1241 | { | 1239 | { |
1242 | int rc1 = 0, rc2 = 0, rc3 = 0; | 1240 | int rc1 = 0, rc2 = 0, rc3 = 0; |
1243 | 1241 | ||
1244 | QETH_DBF_TEXT(trace, 3, "haltchs"); | 1242 | QETH_DBF_TEXT(TRACE, 3, "haltchs"); |
1245 | rc1 = qeth_halt_channel(&card->read); | 1243 | rc1 = qeth_halt_channel(&card->read); |
1246 | rc2 = qeth_halt_channel(&card->write); | 1244 | rc2 = qeth_halt_channel(&card->write); |
1247 | rc3 = qeth_halt_channel(&card->data); | 1245 | rc3 = qeth_halt_channel(&card->data); |
@@ -1256,7 +1254,7 @@ static int qeth_clear_channels(struct qeth_card *card) | |||
1256 | { | 1254 | { |
1257 | int rc1 = 0, rc2 = 0, rc3 = 0; | 1255 | int rc1 = 0, rc2 = 0, rc3 = 0; |
1258 | 1256 | ||
1259 | QETH_DBF_TEXT(trace, 3, "clearchs"); | 1257 | QETH_DBF_TEXT(TRACE, 3, "clearchs"); |
1260 | rc1 = qeth_clear_channel(&card->read); | 1258 | rc1 = qeth_clear_channel(&card->read); |
1261 | rc2 = qeth_clear_channel(&card->write); | 1259 | rc2 = qeth_clear_channel(&card->write); |
1262 | rc3 = qeth_clear_channel(&card->data); | 1260 | rc3 = qeth_clear_channel(&card->data); |
@@ -1271,8 +1269,8 @@ static int qeth_clear_halt_card(struct qeth_card *card, int halt) | |||
1271 | { | 1269 | { |
1272 | int rc = 0; | 1270 | int rc = 0; |
1273 | 1271 | ||
1274 | QETH_DBF_TEXT(trace, 3, "clhacrd"); | 1272 | QETH_DBF_TEXT(TRACE, 3, "clhacrd"); |
1275 | QETH_DBF_HEX(trace, 3, &card, sizeof(void *)); | 1273 | QETH_DBF_HEX(TRACE, 3, &card, sizeof(void *)); |
1276 | 1274 | ||
1277 | if (halt) | 1275 | if (halt) |
1278 | rc = qeth_halt_channels(card); | 1276 | rc = qeth_halt_channels(card); |
@@ -1285,7 +1283,7 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt) | |||
1285 | { | 1283 | { |
1286 | int rc = 0; | 1284 | int rc = 0; |
1287 | 1285 | ||
1288 | QETH_DBF_TEXT(trace, 3, "qdioclr"); | 1286 | QETH_DBF_TEXT(TRACE, 3, "qdioclr"); |
1289 | switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED, | 1287 | switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED, |
1290 | QETH_QDIO_CLEANING)) { | 1288 | QETH_QDIO_CLEANING)) { |
1291 | case QETH_QDIO_ESTABLISHED: | 1289 | case QETH_QDIO_ESTABLISHED: |
@@ -1296,7 +1294,7 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt) | |||
1296 | rc = qdio_cleanup(CARD_DDEV(card), | 1294 | rc = qdio_cleanup(CARD_DDEV(card), |
1297 | QDIO_FLAG_CLEANUP_USING_CLEAR); | 1295 | QDIO_FLAG_CLEANUP_USING_CLEAR); |
1298 | if (rc) | 1296 | if (rc) |
1299 | QETH_DBF_TEXT_(trace, 3, "1err%d", rc); | 1297 | QETH_DBF_TEXT_(TRACE, 3, "1err%d", rc); |
1300 | atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); | 1298 | atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); |
1301 | break; | 1299 | break; |
1302 | case QETH_QDIO_CLEANING: | 1300 | case QETH_QDIO_CLEANING: |
@@ -1306,7 +1304,7 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt) | |||
1306 | } | 1304 | } |
1307 | rc = qeth_clear_halt_card(card, use_halt); | 1305 | rc = qeth_clear_halt_card(card, use_halt); |
1308 | if (rc) | 1306 | if (rc) |
1309 | QETH_DBF_TEXT_(trace, 3, "2err%d", rc); | 1307 | QETH_DBF_TEXT_(TRACE, 3, "2err%d", rc); |
1310 | card->state = CARD_STATE_DOWN; | 1308 | card->state = CARD_STATE_DOWN; |
1311 | return rc; | 1309 | return rc; |
1312 | } | 1310 | } |
@@ -1366,7 +1364,7 @@ static int qeth_get_unitaddr(struct qeth_card *card) | |||
1366 | char *prcd; | 1364 | char *prcd; |
1367 | int rc; | 1365 | int rc; |
1368 | 1366 | ||
1369 | QETH_DBF_TEXT(setup, 2, "getunit"); | 1367 | QETH_DBF_TEXT(SETUP, 2, "getunit"); |
1370 | rc = qeth_read_conf_data(card, (void **) &prcd, &length); | 1368 | rc = qeth_read_conf_data(card, (void **) &prcd, &length); |
1371 | if (rc) { | 1369 | if (rc) { |
1372 | PRINT_ERR("qeth_read_conf_data for device %s returned %i\n", | 1370 | PRINT_ERR("qeth_read_conf_data for device %s returned %i\n", |
@@ -1427,7 +1425,7 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel, | |||
1427 | int rc; | 1425 | int rc; |
1428 | struct qeth_card *card; | 1426 | struct qeth_card *card; |
1429 | 1427 | ||
1430 | QETH_DBF_TEXT(setup, 2, "idxanswr"); | 1428 | QETH_DBF_TEXT(SETUP, 2, "idxanswr"); |
1431 | card = CARD_FROM_CDEV(channel->ccwdev); | 1429 | card = CARD_FROM_CDEV(channel->ccwdev); |
1432 | iob = qeth_get_buffer(channel); | 1430 | iob = qeth_get_buffer(channel); |
1433 | iob->callback = idx_reply_cb; | 1431 | iob->callback = idx_reply_cb; |
@@ -1437,7 +1435,7 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel, | |||
1437 | 1435 | ||
1438 | wait_event(card->wait_q, | 1436 | wait_event(card->wait_q, |
1439 | atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); | 1437 | atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); |
1440 | QETH_DBF_TEXT(setup, 6, "noirqpnd"); | 1438 | QETH_DBF_TEXT(SETUP, 6, "noirqpnd"); |
1441 | spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); | 1439 | spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); |
1442 | rc = ccw_device_start(channel->ccwdev, | 1440 | rc = ccw_device_start(channel->ccwdev, |
1443 | &channel->ccw, (addr_t) iob, 0, 0); | 1441 | &channel->ccw, (addr_t) iob, 0, 0); |
@@ -1445,7 +1443,7 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel, | |||
1445 | 1443 | ||
1446 | if (rc) { | 1444 | if (rc) { |
1447 | PRINT_ERR("Error2 in activating channel rc=%d\n", rc); | 1445 | PRINT_ERR("Error2 in activating channel rc=%d\n", rc); |
1448 | QETH_DBF_TEXT_(setup, 2, "2err%d", rc); | 1446 | QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); |
1449 | atomic_set(&channel->irq_pending, 0); | 1447 | atomic_set(&channel->irq_pending, 0); |
1450 | wake_up(&card->wait_q); | 1448 | wake_up(&card->wait_q); |
1451 | return rc; | 1449 | return rc; |
@@ -1456,7 +1454,7 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel, | |||
1456 | return rc; | 1454 | return rc; |
1457 | if (channel->state != CH_STATE_UP) { | 1455 | if (channel->state != CH_STATE_UP) { |
1458 | rc = -ETIME; | 1456 | rc = -ETIME; |
1459 | QETH_DBF_TEXT_(setup, 2, "3err%d", rc); | 1457 | QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); |
1460 | qeth_clear_cmd_buffers(channel); | 1458 | qeth_clear_cmd_buffers(channel); |
1461 | } else | 1459 | } else |
1462 | rc = 0; | 1460 | rc = 0; |
@@ -1476,7 +1474,7 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel, | |||
1476 | 1474 | ||
1477 | card = CARD_FROM_CDEV(channel->ccwdev); | 1475 | card = CARD_FROM_CDEV(channel->ccwdev); |
1478 | 1476 | ||
1479 | QETH_DBF_TEXT(setup, 2, "idxactch"); | 1477 | QETH_DBF_TEXT(SETUP, 2, "idxactch"); |
1480 | 1478 | ||
1481 | iob = qeth_get_buffer(channel); | 1479 | iob = qeth_get_buffer(channel); |
1482 | iob->callback = idx_reply_cb; | 1480 | iob->callback = idx_reply_cb; |
@@ -1506,7 +1504,7 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel, | |||
1506 | 1504 | ||
1507 | wait_event(card->wait_q, | 1505 | wait_event(card->wait_q, |
1508 | atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); | 1506 | atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); |
1509 | QETH_DBF_TEXT(setup, 6, "noirqpnd"); | 1507 | QETH_DBF_TEXT(SETUP, 6, "noirqpnd"); |
1510 | spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); | 1508 | spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); |
1511 | rc = ccw_device_start(channel->ccwdev, | 1509 | rc = ccw_device_start(channel->ccwdev, |
1512 | &channel->ccw, (addr_t) iob, 0, 0); | 1510 | &channel->ccw, (addr_t) iob, 0, 0); |
@@ -1514,7 +1512,7 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel, | |||
1514 | 1512 | ||
1515 | if (rc) { | 1513 | if (rc) { |
1516 | PRINT_ERR("Error1 in activating channel. rc=%d\n", rc); | 1514 | PRINT_ERR("Error1 in activating channel. rc=%d\n", rc); |
1517 | QETH_DBF_TEXT_(setup, 2, "1err%d", rc); | 1515 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); |
1518 | atomic_set(&channel->irq_pending, 0); | 1516 | atomic_set(&channel->irq_pending, 0); |
1519 | wake_up(&card->wait_q); | 1517 | wake_up(&card->wait_q); |
1520 | return rc; | 1518 | return rc; |
@@ -1525,7 +1523,7 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel, | |||
1525 | return rc; | 1523 | return rc; |
1526 | if (channel->state != CH_STATE_ACTIVATING) { | 1524 | if (channel->state != CH_STATE_ACTIVATING) { |
1527 | PRINT_WARN("IDX activate timed out!\n"); | 1525 | PRINT_WARN("IDX activate timed out!\n"); |
1528 | QETH_DBF_TEXT_(setup, 2, "2err%d", -ETIME); | 1526 | QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME); |
1529 | qeth_clear_cmd_buffers(channel); | 1527 | qeth_clear_cmd_buffers(channel); |
1530 | return -ETIME; | 1528 | return -ETIME; |
1531 | } | 1529 | } |
@@ -1547,7 +1545,7 @@ static void qeth_idx_write_cb(struct qeth_channel *channel, | |||
1547 | struct qeth_card *card; | 1545 | struct qeth_card *card; |
1548 | __u16 temp; | 1546 | __u16 temp; |
1549 | 1547 | ||
1550 | QETH_DBF_TEXT(setup , 2, "idxwrcb"); | 1548 | QETH_DBF_TEXT(SETUP , 2, "idxwrcb"); |
1551 | 1549 | ||
1552 | if (channel->state == CH_STATE_DOWN) { | 1550 | if (channel->state == CH_STATE_DOWN) { |
1553 | channel->state = CH_STATE_ACTIVATING; | 1551 | channel->state = CH_STATE_ACTIVATING; |
@@ -1584,7 +1582,7 @@ static void qeth_idx_read_cb(struct qeth_channel *channel, | |||
1584 | struct qeth_card *card; | 1582 | struct qeth_card *card; |
1585 | __u16 temp; | 1583 | __u16 temp; |
1586 | 1584 | ||
1587 | QETH_DBF_TEXT(setup , 2, "idxrdcb"); | 1585 | QETH_DBF_TEXT(SETUP , 2, "idxrdcb"); |
1588 | if (channel->state == CH_STATE_DOWN) { | 1586 | if (channel->state == CH_STATE_DOWN) { |
1589 | channel->state = CH_STATE_ACTIVATING; | 1587 | channel->state = CH_STATE_ACTIVATING; |
1590 | goto out; | 1588 | goto out; |
@@ -1644,7 +1642,7 @@ void qeth_prepare_control_data(struct qeth_card *card, int len, | |||
1644 | card->seqno.pdu_hdr++; | 1642 | card->seqno.pdu_hdr++; |
1645 | memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data), | 1643 | memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data), |
1646 | &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH); | 1644 | &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH); |
1647 | QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN); | 1645 | QETH_DBF_HEX(CTRL, 2, iob->data, QETH_DBF_CTRL_LEN); |
1648 | } | 1646 | } |
1649 | EXPORT_SYMBOL_GPL(qeth_prepare_control_data); | 1647 | EXPORT_SYMBOL_GPL(qeth_prepare_control_data); |
1650 | 1648 | ||
@@ -1659,11 +1657,11 @@ int qeth_send_control_data(struct qeth_card *card, int len, | |||
1659 | struct qeth_reply *reply = NULL; | 1657 | struct qeth_reply *reply = NULL; |
1660 | unsigned long timeout; | 1658 | unsigned long timeout; |
1661 | 1659 | ||
1662 | QETH_DBF_TEXT(trace, 2, "sendctl"); | 1660 | QETH_DBF_TEXT(TRACE, 2, "sendctl"); |
1663 | 1661 | ||
1664 | reply = qeth_alloc_reply(card); | 1662 | reply = qeth_alloc_reply(card); |
1665 | if (!reply) { | 1663 | if (!reply) { |
1666 | PRINT_WARN("Could no alloc qeth_reply!\n"); | 1664 | PRINT_WARN("Could not alloc qeth_reply!\n"); |
1667 | return -ENOMEM; | 1665 | return -ENOMEM; |
1668 | } | 1666 | } |
1669 | reply->callback = reply_cb; | 1667 | reply->callback = reply_cb; |
@@ -1676,7 +1674,7 @@ int qeth_send_control_data(struct qeth_card *card, int len, | |||
1676 | spin_lock_irqsave(&card->lock, flags); | 1674 | spin_lock_irqsave(&card->lock, flags); |
1677 | list_add_tail(&reply->list, &card->cmd_waiter_list); | 1675 | list_add_tail(&reply->list, &card->cmd_waiter_list); |
1678 | spin_unlock_irqrestore(&card->lock, flags); | 1676 | spin_unlock_irqrestore(&card->lock, flags); |
1679 | QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN); | 1677 | QETH_DBF_HEX(CTRL, 2, iob->data, QETH_DBF_CTRL_LEN); |
1680 | 1678 | ||
1681 | while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ; | 1679 | while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ; |
1682 | qeth_prepare_control_data(card, len, iob); | 1680 | qeth_prepare_control_data(card, len, iob); |
@@ -1686,7 +1684,7 @@ int qeth_send_control_data(struct qeth_card *card, int len, | |||
1686 | else | 1684 | else |
1687 | timeout = jiffies + QETH_TIMEOUT; | 1685 | timeout = jiffies + QETH_TIMEOUT; |
1688 | 1686 | ||
1689 | QETH_DBF_TEXT(trace, 6, "noirqpnd"); | 1687 | QETH_DBF_TEXT(TRACE, 6, "noirqpnd"); |
1690 | spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); | 1688 | spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); |
1691 | rc = ccw_device_start(card->write.ccwdev, &card->write.ccw, | 1689 | rc = ccw_device_start(card->write.ccwdev, &card->write.ccw, |
1692 | (addr_t) iob, 0, 0); | 1690 | (addr_t) iob, 0, 0); |
@@ -1694,7 +1692,7 @@ int qeth_send_control_data(struct qeth_card *card, int len, | |||
1694 | if (rc) { | 1692 | if (rc) { |
1695 | PRINT_WARN("qeth_send_control_data: " | 1693 | PRINT_WARN("qeth_send_control_data: " |
1696 | "ccw_device_start rc = %i\n", rc); | 1694 | "ccw_device_start rc = %i\n", rc); |
1697 | QETH_DBF_TEXT_(trace, 2, " err%d", rc); | 1695 | QETH_DBF_TEXT_(TRACE, 2, " err%d", rc); |
1698 | spin_lock_irqsave(&card->lock, flags); | 1696 | spin_lock_irqsave(&card->lock, flags); |
1699 | list_del_init(&reply->list); | 1697 | list_del_init(&reply->list); |
1700 | qeth_put_reply(reply); | 1698 | qeth_put_reply(reply); |
@@ -1726,13 +1724,13 @@ static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply, | |||
1726 | { | 1724 | { |
1727 | struct qeth_cmd_buffer *iob; | 1725 | struct qeth_cmd_buffer *iob; |
1728 | 1726 | ||
1729 | QETH_DBF_TEXT(setup, 2, "cmenblcb"); | 1727 | QETH_DBF_TEXT(SETUP, 2, "cmenblcb"); |
1730 | 1728 | ||
1731 | iob = (struct qeth_cmd_buffer *) data; | 1729 | iob = (struct qeth_cmd_buffer *) data; |
1732 | memcpy(&card->token.cm_filter_r, | 1730 | memcpy(&card->token.cm_filter_r, |
1733 | QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data), | 1731 | QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data), |
1734 | QETH_MPC_TOKEN_LENGTH); | 1732 | QETH_MPC_TOKEN_LENGTH); |
1735 | QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc); | 1733 | QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); |
1736 | return 0; | 1734 | return 0; |
1737 | } | 1735 | } |
1738 | 1736 | ||
@@ -1741,7 +1739,7 @@ static int qeth_cm_enable(struct qeth_card *card) | |||
1741 | int rc; | 1739 | int rc; |
1742 | struct qeth_cmd_buffer *iob; | 1740 | struct qeth_cmd_buffer *iob; |
1743 | 1741 | ||
1744 | QETH_DBF_TEXT(setup, 2, "cmenable"); | 1742 | QETH_DBF_TEXT(SETUP, 2, "cmenable"); |
1745 | 1743 | ||
1746 | iob = qeth_wait_for_buffer(&card->write); | 1744 | iob = qeth_wait_for_buffer(&card->write); |
1747 | memcpy(iob->data, CM_ENABLE, CM_ENABLE_SIZE); | 1745 | memcpy(iob->data, CM_ENABLE, CM_ENABLE_SIZE); |
@@ -1761,13 +1759,13 @@ static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply, | |||
1761 | 1759 | ||
1762 | struct qeth_cmd_buffer *iob; | 1760 | struct qeth_cmd_buffer *iob; |
1763 | 1761 | ||
1764 | QETH_DBF_TEXT(setup, 2, "cmsetpcb"); | 1762 | QETH_DBF_TEXT(SETUP, 2, "cmsetpcb"); |
1765 | 1763 | ||
1766 | iob = (struct qeth_cmd_buffer *) data; | 1764 | iob = (struct qeth_cmd_buffer *) data; |
1767 | memcpy(&card->token.cm_connection_r, | 1765 | memcpy(&card->token.cm_connection_r, |
1768 | QETH_CM_SETUP_RESP_DEST_ADDR(iob->data), | 1766 | QETH_CM_SETUP_RESP_DEST_ADDR(iob->data), |
1769 | QETH_MPC_TOKEN_LENGTH); | 1767 | QETH_MPC_TOKEN_LENGTH); |
1770 | QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc); | 1768 | QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); |
1771 | return 0; | 1769 | return 0; |
1772 | } | 1770 | } |
1773 | 1771 | ||
@@ -1776,7 +1774,7 @@ static int qeth_cm_setup(struct qeth_card *card) | |||
1776 | int rc; | 1774 | int rc; |
1777 | struct qeth_cmd_buffer *iob; | 1775 | struct qeth_cmd_buffer *iob; |
1778 | 1776 | ||
1779 | QETH_DBF_TEXT(setup, 2, "cmsetup"); | 1777 | QETH_DBF_TEXT(SETUP, 2, "cmsetup"); |
1780 | 1778 | ||
1781 | iob = qeth_wait_for_buffer(&card->write); | 1779 | iob = qeth_wait_for_buffer(&card->write); |
1782 | memcpy(iob->data, CM_SETUP, CM_SETUP_SIZE); | 1780 | memcpy(iob->data, CM_SETUP, CM_SETUP_SIZE); |
@@ -1877,7 +1875,7 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply, | |||
1877 | __u8 link_type; | 1875 | __u8 link_type; |
1878 | struct qeth_cmd_buffer *iob; | 1876 | struct qeth_cmd_buffer *iob; |
1879 | 1877 | ||
1880 | QETH_DBF_TEXT(setup, 2, "ulpenacb"); | 1878 | QETH_DBF_TEXT(SETUP, 2, "ulpenacb"); |
1881 | 1879 | ||
1882 | iob = (struct qeth_cmd_buffer *) data; | 1880 | iob = (struct qeth_cmd_buffer *) data; |
1883 | memcpy(&card->token.ulp_filter_r, | 1881 | memcpy(&card->token.ulp_filter_r, |
@@ -1888,7 +1886,7 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply, | |||
1888 | mtu = qeth_get_mtu_outof_framesize(framesize); | 1886 | mtu = qeth_get_mtu_outof_framesize(framesize); |
1889 | if (!mtu) { | 1887 | if (!mtu) { |
1890 | iob->rc = -EINVAL; | 1888 | iob->rc = -EINVAL; |
1891 | QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc); | 1889 | QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); |
1892 | return 0; | 1890 | return 0; |
1893 | } | 1891 | } |
1894 | card->info.max_mtu = mtu; | 1892 | card->info.max_mtu = mtu; |
@@ -1907,7 +1905,7 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply, | |||
1907 | card->info.link_type = link_type; | 1905 | card->info.link_type = link_type; |
1908 | } else | 1906 | } else |
1909 | card->info.link_type = 0; | 1907 | card->info.link_type = 0; |
1910 | QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc); | 1908 | QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); |
1911 | return 0; | 1909 | return 0; |
1912 | } | 1910 | } |
1913 | 1911 | ||
@@ -1918,7 +1916,7 @@ static int qeth_ulp_enable(struct qeth_card *card) | |||
1918 | struct qeth_cmd_buffer *iob; | 1916 | struct qeth_cmd_buffer *iob; |
1919 | 1917 | ||
1920 | /*FIXME: trace view callbacks*/ | 1918 | /*FIXME: trace view callbacks*/ |
1921 | QETH_DBF_TEXT(setup, 2, "ulpenabl"); | 1919 | QETH_DBF_TEXT(SETUP, 2, "ulpenabl"); |
1922 | 1920 | ||
1923 | iob = qeth_wait_for_buffer(&card->write); | 1921 | iob = qeth_wait_for_buffer(&card->write); |
1924 | memcpy(iob->data, ULP_ENABLE, ULP_ENABLE_SIZE); | 1922 | memcpy(iob->data, ULP_ENABLE, ULP_ENABLE_SIZE); |
@@ -1951,13 +1949,13 @@ static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply, | |||
1951 | { | 1949 | { |
1952 | struct qeth_cmd_buffer *iob; | 1950 | struct qeth_cmd_buffer *iob; |
1953 | 1951 | ||
1954 | QETH_DBF_TEXT(setup, 2, "ulpstpcb"); | 1952 | QETH_DBF_TEXT(SETUP, 2, "ulpstpcb"); |
1955 | 1953 | ||
1956 | iob = (struct qeth_cmd_buffer *) data; | 1954 | iob = (struct qeth_cmd_buffer *) data; |
1957 | memcpy(&card->token.ulp_connection_r, | 1955 | memcpy(&card->token.ulp_connection_r, |
1958 | QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data), | 1956 | QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data), |
1959 | QETH_MPC_TOKEN_LENGTH); | 1957 | QETH_MPC_TOKEN_LENGTH); |
1960 | QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc); | 1958 | QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); |
1961 | return 0; | 1959 | return 0; |
1962 | } | 1960 | } |
1963 | 1961 | ||
@@ -1968,7 +1966,7 @@ static int qeth_ulp_setup(struct qeth_card *card) | |||
1968 | struct qeth_cmd_buffer *iob; | 1966 | struct qeth_cmd_buffer *iob; |
1969 | struct ccw_dev_id dev_id; | 1967 | struct ccw_dev_id dev_id; |
1970 | 1968 | ||
1971 | QETH_DBF_TEXT(setup, 2, "ulpsetup"); | 1969 | QETH_DBF_TEXT(SETUP, 2, "ulpsetup"); |
1972 | 1970 | ||
1973 | iob = qeth_wait_for_buffer(&card->write); | 1971 | iob = qeth_wait_for_buffer(&card->write); |
1974 | memcpy(iob->data, ULP_SETUP, ULP_SETUP_SIZE); | 1972 | memcpy(iob->data, ULP_SETUP, ULP_SETUP_SIZE); |
@@ -1993,18 +1991,18 @@ static int qeth_alloc_qdio_buffers(struct qeth_card *card) | |||
1993 | { | 1991 | { |
1994 | int i, j; | 1992 | int i, j; |
1995 | 1993 | ||
1996 | QETH_DBF_TEXT(setup, 2, "allcqdbf"); | 1994 | QETH_DBF_TEXT(SETUP, 2, "allcqdbf"); |
1997 | 1995 | ||
1998 | if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED, | 1996 | if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED, |
1999 | QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED) | 1997 | QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED) |
2000 | return 0; | 1998 | return 0; |
2001 | 1999 | ||
2002 | card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q), | 2000 | card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q), |
2003 | GFP_KERNEL|GFP_DMA); | 2001 | GFP_KERNEL); |
2004 | if (!card->qdio.in_q) | 2002 | if (!card->qdio.in_q) |
2005 | goto out_nomem; | 2003 | goto out_nomem; |
2006 | QETH_DBF_TEXT(setup, 2, "inq"); | 2004 | QETH_DBF_TEXT(SETUP, 2, "inq"); |
2007 | QETH_DBF_HEX(setup, 2, &card->qdio.in_q, sizeof(void *)); | 2005 | QETH_DBF_HEX(SETUP, 2, &card->qdio.in_q, sizeof(void *)); |
2008 | memset(card->qdio.in_q, 0, sizeof(struct qeth_qdio_q)); | 2006 | memset(card->qdio.in_q, 0, sizeof(struct qeth_qdio_q)); |
2009 | /* give inbound qeth_qdio_buffers their qdio_buffers */ | 2007 | /* give inbound qeth_qdio_buffers their qdio_buffers */ |
2010 | for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) | 2008 | for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) |
@@ -2021,11 +2019,11 @@ static int qeth_alloc_qdio_buffers(struct qeth_card *card) | |||
2021 | goto out_freepool; | 2019 | goto out_freepool; |
2022 | for (i = 0; i < card->qdio.no_out_queues; ++i) { | 2020 | for (i = 0; i < card->qdio.no_out_queues; ++i) { |
2023 | card->qdio.out_qs[i] = kmalloc(sizeof(struct qeth_qdio_out_q), | 2021 | card->qdio.out_qs[i] = kmalloc(sizeof(struct qeth_qdio_out_q), |
2024 | GFP_KERNEL|GFP_DMA); | 2022 | GFP_KERNEL); |
2025 | if (!card->qdio.out_qs[i]) | 2023 | if (!card->qdio.out_qs[i]) |
2026 | goto out_freeoutq; | 2024 | goto out_freeoutq; |
2027 | QETH_DBF_TEXT_(setup, 2, "outq %i", i); | 2025 | QETH_DBF_TEXT_(SETUP, 2, "outq %i", i); |
2028 | QETH_DBF_HEX(setup, 2, &card->qdio.out_qs[i], sizeof(void *)); | 2026 | QETH_DBF_HEX(SETUP, 2, &card->qdio.out_qs[i], sizeof(void *)); |
2029 | memset(card->qdio.out_qs[i], 0, sizeof(struct qeth_qdio_out_q)); | 2027 | memset(card->qdio.out_qs[i], 0, sizeof(struct qeth_qdio_out_q)); |
2030 | card->qdio.out_qs[i]->queue_no = i; | 2028 | card->qdio.out_qs[i]->queue_no = i; |
2031 | /* give outbound qeth_qdio_buffers their qdio_buffers */ | 2029 | /* give outbound qeth_qdio_buffers their qdio_buffers */ |
@@ -2085,7 +2083,7 @@ static void qeth_create_qib_param_field_blkt(struct qeth_card *card, | |||
2085 | 2083 | ||
2086 | static int qeth_qdio_activate(struct qeth_card *card) | 2084 | static int qeth_qdio_activate(struct qeth_card *card) |
2087 | { | 2085 | { |
2088 | QETH_DBF_TEXT(setup, 3, "qdioact"); | 2086 | QETH_DBF_TEXT(SETUP, 3, "qdioact"); |
2089 | return qdio_activate(CARD_DDEV(card), 0); | 2087 | return qdio_activate(CARD_DDEV(card), 0); |
2090 | } | 2088 | } |
2091 | 2089 | ||
@@ -2094,7 +2092,7 @@ static int qeth_dm_act(struct qeth_card *card) | |||
2094 | int rc; | 2092 | int rc; |
2095 | struct qeth_cmd_buffer *iob; | 2093 | struct qeth_cmd_buffer *iob; |
2096 | 2094 | ||
2097 | QETH_DBF_TEXT(setup, 2, "dmact"); | 2095 | QETH_DBF_TEXT(SETUP, 2, "dmact"); |
2098 | 2096 | ||
2099 | iob = qeth_wait_for_buffer(&card->write); | 2097 | iob = qeth_wait_for_buffer(&card->write); |
2100 | memcpy(iob->data, DM_ACT, DM_ACT_SIZE); | 2098 | memcpy(iob->data, DM_ACT, DM_ACT_SIZE); |
@@ -2111,52 +2109,52 @@ static int qeth_mpc_initialize(struct qeth_card *card) | |||
2111 | { | 2109 | { |
2112 | int rc; | 2110 | int rc; |
2113 | 2111 | ||
2114 | QETH_DBF_TEXT(setup, 2, "mpcinit"); | 2112 | QETH_DBF_TEXT(SETUP, 2, "mpcinit"); |
2115 | 2113 | ||
2116 | rc = qeth_issue_next_read(card); | 2114 | rc = qeth_issue_next_read(card); |
2117 | if (rc) { | 2115 | if (rc) { |
2118 | QETH_DBF_TEXT_(setup, 2, "1err%d", rc); | 2116 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); |
2119 | return rc; | 2117 | return rc; |
2120 | } | 2118 | } |
2121 | rc = qeth_cm_enable(card); | 2119 | rc = qeth_cm_enable(card); |
2122 | if (rc) { | 2120 | if (rc) { |
2123 | QETH_DBF_TEXT_(setup, 2, "2err%d", rc); | 2121 | QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); |
2124 | goto out_qdio; | 2122 | goto out_qdio; |
2125 | } | 2123 | } |
2126 | rc = qeth_cm_setup(card); | 2124 | rc = qeth_cm_setup(card); |
2127 | if (rc) { | 2125 | if (rc) { |
2128 | QETH_DBF_TEXT_(setup, 2, "3err%d", rc); | 2126 | QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); |
2129 | goto out_qdio; | 2127 | goto out_qdio; |
2130 | } | 2128 | } |
2131 | rc = qeth_ulp_enable(card); | 2129 | rc = qeth_ulp_enable(card); |
2132 | if (rc) { | 2130 | if (rc) { |
2133 | QETH_DBF_TEXT_(setup, 2, "4err%d", rc); | 2131 | QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc); |
2134 | goto out_qdio; | 2132 | goto out_qdio; |
2135 | } | 2133 | } |
2136 | rc = qeth_ulp_setup(card); | 2134 | rc = qeth_ulp_setup(card); |
2137 | if (rc) { | 2135 | if (rc) { |
2138 | QETH_DBF_TEXT_(setup, 2, "5err%d", rc); | 2136 | QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); |
2139 | goto out_qdio; | 2137 | goto out_qdio; |
2140 | } | 2138 | } |
2141 | rc = qeth_alloc_qdio_buffers(card); | 2139 | rc = qeth_alloc_qdio_buffers(card); |
2142 | if (rc) { | 2140 | if (rc) { |
2143 | QETH_DBF_TEXT_(setup, 2, "5err%d", rc); | 2141 | QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); |
2144 | goto out_qdio; | 2142 | goto out_qdio; |
2145 | } | 2143 | } |
2146 | rc = qeth_qdio_establish(card); | 2144 | rc = qeth_qdio_establish(card); |
2147 | if (rc) { | 2145 | if (rc) { |
2148 | QETH_DBF_TEXT_(setup, 2, "6err%d", rc); | 2146 | QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); |
2149 | qeth_free_qdio_buffers(card); | 2147 | qeth_free_qdio_buffers(card); |
2150 | goto out_qdio; | 2148 | goto out_qdio; |
2151 | } | 2149 | } |
2152 | rc = qeth_qdio_activate(card); | 2150 | rc = qeth_qdio_activate(card); |
2153 | if (rc) { | 2151 | if (rc) { |
2154 | QETH_DBF_TEXT_(setup, 2, "7err%d", rc); | 2152 | QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc); |
2155 | goto out_qdio; | 2153 | goto out_qdio; |
2156 | } | 2154 | } |
2157 | rc = qeth_dm_act(card); | 2155 | rc = qeth_dm_act(card); |
2158 | if (rc) { | 2156 | if (rc) { |
2159 | QETH_DBF_TEXT_(setup, 2, "8err%d", rc); | 2157 | QETH_DBF_TEXT_(SETUP, 2, "8err%d", rc); |
2160 | goto out_qdio; | 2158 | goto out_qdio; |
2161 | } | 2159 | } |
2162 | 2160 | ||
@@ -2260,7 +2258,7 @@ EXPORT_SYMBOL_GPL(qeth_print_status_message); | |||
2260 | void qeth_put_buffer_pool_entry(struct qeth_card *card, | 2258 | void qeth_put_buffer_pool_entry(struct qeth_card *card, |
2261 | struct qeth_buffer_pool_entry *entry) | 2259 | struct qeth_buffer_pool_entry *entry) |
2262 | { | 2260 | { |
2263 | QETH_DBF_TEXT(trace, 6, "ptbfplen"); | 2261 | QETH_DBF_TEXT(TRACE, 6, "ptbfplen"); |
2264 | list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list); | 2262 | list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list); |
2265 | } | 2263 | } |
2266 | EXPORT_SYMBOL_GPL(qeth_put_buffer_pool_entry); | 2264 | EXPORT_SYMBOL_GPL(qeth_put_buffer_pool_entry); |
@@ -2269,7 +2267,7 @@ static void qeth_initialize_working_pool_list(struct qeth_card *card) | |||
2269 | { | 2267 | { |
2270 | struct qeth_buffer_pool_entry *entry; | 2268 | struct qeth_buffer_pool_entry *entry; |
2271 | 2269 | ||
2272 | QETH_DBF_TEXT(trace, 5, "inwrklst"); | 2270 | QETH_DBF_TEXT(TRACE, 5, "inwrklst"); |
2273 | 2271 | ||
2274 | list_for_each_entry(entry, | 2272 | list_for_each_entry(entry, |
2275 | &card->qdio.init_pool.entry_list, init_list) { | 2273 | &card->qdio.init_pool.entry_list, init_list) { |
@@ -2308,7 +2306,7 @@ static inline struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry( | |||
2308 | struct qeth_buffer_pool_entry, list); | 2306 | struct qeth_buffer_pool_entry, list); |
2309 | for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { | 2307 | for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { |
2310 | if (page_count(virt_to_page(entry->elements[i])) > 1) { | 2308 | if (page_count(virt_to_page(entry->elements[i])) > 1) { |
2311 | page = alloc_page(GFP_ATOMIC|GFP_DMA); | 2309 | page = alloc_page(GFP_ATOMIC); |
2312 | if (!page) { | 2310 | if (!page) { |
2313 | return NULL; | 2311 | return NULL; |
2314 | } else { | 2312 | } else { |
@@ -2358,7 +2356,7 @@ int qeth_init_qdio_queues(struct qeth_card *card) | |||
2358 | int i, j; | 2356 | int i, j; |
2359 | int rc; | 2357 | int rc; |
2360 | 2358 | ||
2361 | QETH_DBF_TEXT(setup, 2, "initqdqs"); | 2359 | QETH_DBF_TEXT(SETUP, 2, "initqdqs"); |
2362 | 2360 | ||
2363 | /* inbound queue */ | 2361 | /* inbound queue */ |
2364 | memset(card->qdio.in_q->qdio_bufs, 0, | 2362 | memset(card->qdio.in_q->qdio_bufs, 0, |
@@ -2372,12 +2370,12 @@ int qeth_init_qdio_queues(struct qeth_card *card) | |||
2372 | rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, | 2370 | rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, |
2373 | card->qdio.in_buf_pool.buf_count - 1, NULL); | 2371 | card->qdio.in_buf_pool.buf_count - 1, NULL); |
2374 | if (rc) { | 2372 | if (rc) { |
2375 | QETH_DBF_TEXT_(setup, 2, "1err%d", rc); | 2373 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); |
2376 | return rc; | 2374 | return rc; |
2377 | } | 2375 | } |
2378 | rc = qdio_synchronize(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0); | 2376 | rc = qdio_synchronize(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0); |
2379 | if (rc) { | 2377 | if (rc) { |
2380 | QETH_DBF_TEXT_(setup, 2, "2err%d", rc); | 2378 | QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); |
2381 | return rc; | 2379 | return rc; |
2382 | } | 2380 | } |
2383 | /* outbound queue */ | 2381 | /* outbound queue */ |
@@ -2461,11 +2459,8 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, | |||
2461 | { | 2459 | { |
2462 | int rc; | 2460 | int rc; |
2463 | char prot_type; | 2461 | char prot_type; |
2464 | int cmd; | ||
2465 | cmd = ((struct qeth_ipa_cmd *) | ||
2466 | (iob->data+IPA_PDU_HEADER_SIZE))->hdr.command; | ||
2467 | 2462 | ||
2468 | QETH_DBF_TEXT(trace, 4, "sendipa"); | 2463 | QETH_DBF_TEXT(TRACE, 4, "sendipa"); |
2469 | 2464 | ||
2470 | if (card->options.layer2) | 2465 | if (card->options.layer2) |
2471 | if (card->info.type == QETH_CARD_TYPE_OSN) | 2466 | if (card->info.type == QETH_CARD_TYPE_OSN) |
@@ -2475,14 +2470,8 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, | |||
2475 | else | 2470 | else |
2476 | prot_type = QETH_PROT_TCPIP; | 2471 | prot_type = QETH_PROT_TCPIP; |
2477 | qeth_prepare_ipa_cmd(card, iob, prot_type); | 2472 | qeth_prepare_ipa_cmd(card, iob, prot_type); |
2478 | rc = qeth_send_control_data(card, IPA_CMD_LENGTH, iob, | 2473 | rc = qeth_send_control_data(card, IPA_CMD_LENGTH, |
2479 | reply_cb, reply_param); | 2474 | iob, reply_cb, reply_param); |
2480 | if (rc != 0) { | ||
2481 | char *ipa_cmd_name; | ||
2482 | ipa_cmd_name = qeth_get_ipa_cmd_name(cmd); | ||
2483 | PRINT_ERR("%s %s(%x) returned %s(%x)\n", __FUNCTION__, | ||
2484 | ipa_cmd_name, cmd, qeth_get_ipa_msg(rc), rc); | ||
2485 | } | ||
2486 | return rc; | 2475 | return rc; |
2487 | } | 2476 | } |
2488 | EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd); | 2477 | EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd); |
@@ -2503,7 +2492,7 @@ int qeth_send_startlan(struct qeth_card *card) | |||
2503 | { | 2492 | { |
2504 | int rc; | 2493 | int rc; |
2505 | 2494 | ||
2506 | QETH_DBF_TEXT(setup, 2, "strtlan"); | 2495 | QETH_DBF_TEXT(SETUP, 2, "strtlan"); |
2507 | 2496 | ||
2508 | rc = qeth_send_startstoplan(card, IPA_CMD_STARTLAN, 0); | 2497 | rc = qeth_send_startstoplan(card, IPA_CMD_STARTLAN, 0); |
2509 | return rc; | 2498 | return rc; |
@@ -2519,7 +2508,7 @@ int qeth_send_stoplan(struct qeth_card *card) | |||
2519 | * TCP/IP (we!) never issue a STOPLAN | 2508 | * TCP/IP (we!) never issue a STOPLAN |
2520 | * is this right ?!? | 2509 | * is this right ?!? |
2521 | */ | 2510 | */ |
2522 | QETH_DBF_TEXT(setup, 2, "stoplan"); | 2511 | QETH_DBF_TEXT(SETUP, 2, "stoplan"); |
2523 | 2512 | ||
2524 | rc = qeth_send_startstoplan(card, IPA_CMD_STOPLAN, 0); | 2513 | rc = qeth_send_startstoplan(card, IPA_CMD_STOPLAN, 0); |
2525 | return rc; | 2514 | return rc; |
@@ -2531,7 +2520,7 @@ int qeth_default_setadapterparms_cb(struct qeth_card *card, | |||
2531 | { | 2520 | { |
2532 | struct qeth_ipa_cmd *cmd; | 2521 | struct qeth_ipa_cmd *cmd; |
2533 | 2522 | ||
2534 | QETH_DBF_TEXT(trace, 4, "defadpcb"); | 2523 | QETH_DBF_TEXT(TRACE, 4, "defadpcb"); |
2535 | 2524 | ||
2536 | cmd = (struct qeth_ipa_cmd *) data; | 2525 | cmd = (struct qeth_ipa_cmd *) data; |
2537 | if (cmd->hdr.return_code == 0) | 2526 | if (cmd->hdr.return_code == 0) |
@@ -2546,7 +2535,7 @@ static int qeth_query_setadapterparms_cb(struct qeth_card *card, | |||
2546 | { | 2535 | { |
2547 | struct qeth_ipa_cmd *cmd; | 2536 | struct qeth_ipa_cmd *cmd; |
2548 | 2537 | ||
2549 | QETH_DBF_TEXT(trace, 3, "quyadpcb"); | 2538 | QETH_DBF_TEXT(TRACE, 3, "quyadpcb"); |
2550 | 2539 | ||
2551 | cmd = (struct qeth_ipa_cmd *) data; | 2540 | cmd = (struct qeth_ipa_cmd *) data; |
2552 | if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) | 2541 | if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) |
@@ -2580,7 +2569,7 @@ int qeth_query_setadapterparms(struct qeth_card *card) | |||
2580 | int rc; | 2569 | int rc; |
2581 | struct qeth_cmd_buffer *iob; | 2570 | struct qeth_cmd_buffer *iob; |
2582 | 2571 | ||
2583 | QETH_DBF_TEXT(trace, 3, "queryadp"); | 2572 | QETH_DBF_TEXT(TRACE, 3, "queryadp"); |
2584 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED, | 2573 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED, |
2585 | sizeof(struct qeth_ipacmd_setadpparms)); | 2574 | sizeof(struct qeth_ipacmd_setadpparms)); |
2586 | rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL); | 2575 | rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL); |
@@ -2592,14 +2581,14 @@ int qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error, | |||
2592 | unsigned int siga_error, const char *dbftext) | 2581 | unsigned int siga_error, const char *dbftext) |
2593 | { | 2582 | { |
2594 | if (qdio_error || siga_error) { | 2583 | if (qdio_error || siga_error) { |
2595 | QETH_DBF_TEXT(trace, 2, dbftext); | 2584 | QETH_DBF_TEXT(TRACE, 2, dbftext); |
2596 | QETH_DBF_TEXT(qerr, 2, dbftext); | 2585 | QETH_DBF_TEXT(QERR, 2, dbftext); |
2597 | QETH_DBF_TEXT_(qerr, 2, " F15=%02X", | 2586 | QETH_DBF_TEXT_(QERR, 2, " F15=%02X", |
2598 | buf->element[15].flags & 0xff); | 2587 | buf->element[15].flags & 0xff); |
2599 | QETH_DBF_TEXT_(qerr, 2, " F14=%02X", | 2588 | QETH_DBF_TEXT_(QERR, 2, " F14=%02X", |
2600 | buf->element[14].flags & 0xff); | 2589 | buf->element[14].flags & 0xff); |
2601 | QETH_DBF_TEXT_(qerr, 2, " qerr=%X", qdio_error); | 2590 | QETH_DBF_TEXT_(QERR, 2, " qerr=%X", qdio_error); |
2602 | QETH_DBF_TEXT_(qerr, 2, " serr=%X", siga_error); | 2591 | QETH_DBF_TEXT_(QERR, 2, " serr=%X", siga_error); |
2603 | return 1; | 2592 | return 1; |
2604 | } | 2593 | } |
2605 | return 0; | 2594 | return 0; |
@@ -2614,7 +2603,7 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index) | |||
2614 | int rc; | 2603 | int rc; |
2615 | int newcount = 0; | 2604 | int newcount = 0; |
2616 | 2605 | ||
2617 | QETH_DBF_TEXT(trace, 6, "queinbuf"); | 2606 | QETH_DBF_TEXT(TRACE, 6, "queinbuf"); |
2618 | count = (index < queue->next_buf_to_init)? | 2607 | count = (index < queue->next_buf_to_init)? |
2619 | card->qdio.in_buf_pool.buf_count - | 2608 | card->qdio.in_buf_pool.buf_count - |
2620 | (queue->next_buf_to_init - index) : | 2609 | (queue->next_buf_to_init - index) : |
@@ -2670,8 +2659,8 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index) | |||
2670 | PRINT_WARN("qeth_queue_input_buffer's do_QDIO " | 2659 | PRINT_WARN("qeth_queue_input_buffer's do_QDIO " |
2671 | "return %i (device %s).\n", | 2660 | "return %i (device %s).\n", |
2672 | rc, CARD_DDEV_ID(card)); | 2661 | rc, CARD_DDEV_ID(card)); |
2673 | QETH_DBF_TEXT(trace, 2, "qinberr"); | 2662 | QETH_DBF_TEXT(TRACE, 2, "qinberr"); |
2674 | QETH_DBF_TEXT_(trace, 2, "%s", CARD_BUS_ID(card)); | 2663 | QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card)); |
2675 | } | 2664 | } |
2676 | queue->next_buf_to_init = (queue->next_buf_to_init + count) % | 2665 | queue->next_buf_to_init = (queue->next_buf_to_init + count) % |
2677 | QDIO_MAX_BUFFERS_PER_Q; | 2666 | QDIO_MAX_BUFFERS_PER_Q; |
@@ -2686,22 +2675,22 @@ static int qeth_handle_send_error(struct qeth_card *card, | |||
2686 | int sbalf15 = buffer->buffer->element[15].flags & 0xff; | 2675 | int sbalf15 = buffer->buffer->element[15].flags & 0xff; |
2687 | int cc = siga_err & 3; | 2676 | int cc = siga_err & 3; |
2688 | 2677 | ||
2689 | QETH_DBF_TEXT(trace, 6, "hdsnderr"); | 2678 | QETH_DBF_TEXT(TRACE, 6, "hdsnderr"); |
2690 | qeth_check_qdio_errors(buffer->buffer, qdio_err, siga_err, "qouterr"); | 2679 | qeth_check_qdio_errors(buffer->buffer, qdio_err, siga_err, "qouterr"); |
2691 | switch (cc) { | 2680 | switch (cc) { |
2692 | case 0: | 2681 | case 0: |
2693 | if (qdio_err) { | 2682 | if (qdio_err) { |
2694 | QETH_DBF_TEXT(trace, 1, "lnkfail"); | 2683 | QETH_DBF_TEXT(TRACE, 1, "lnkfail"); |
2695 | QETH_DBF_TEXT_(trace, 1, "%s", CARD_BUS_ID(card)); | 2684 | QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card)); |
2696 | QETH_DBF_TEXT_(trace, 1, "%04x %02x", | 2685 | QETH_DBF_TEXT_(TRACE, 1, "%04x %02x", |
2697 | (u16)qdio_err, (u8)sbalf15); | 2686 | (u16)qdio_err, (u8)sbalf15); |
2698 | return QETH_SEND_ERROR_LINK_FAILURE; | 2687 | return QETH_SEND_ERROR_LINK_FAILURE; |
2699 | } | 2688 | } |
2700 | return QETH_SEND_ERROR_NONE; | 2689 | return QETH_SEND_ERROR_NONE; |
2701 | case 2: | 2690 | case 2: |
2702 | if (siga_err & QDIO_SIGA_ERROR_B_BIT_SET) { | 2691 | if (siga_err & QDIO_SIGA_ERROR_B_BIT_SET) { |
2703 | QETH_DBF_TEXT(trace, 1, "SIGAcc2B"); | 2692 | QETH_DBF_TEXT(TRACE, 1, "SIGAcc2B"); |
2704 | QETH_DBF_TEXT_(trace, 1, "%s", CARD_BUS_ID(card)); | 2693 | QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card)); |
2705 | return QETH_SEND_ERROR_KICK_IT; | 2694 | return QETH_SEND_ERROR_KICK_IT; |
2706 | } | 2695 | } |
2707 | if ((sbalf15 >= 15) && (sbalf15 <= 31)) | 2696 | if ((sbalf15 >= 15) && (sbalf15 <= 31)) |
@@ -2709,13 +2698,13 @@ static int qeth_handle_send_error(struct qeth_card *card, | |||
2709 | return QETH_SEND_ERROR_LINK_FAILURE; | 2698 | return QETH_SEND_ERROR_LINK_FAILURE; |
2710 | /* look at qdio_error and sbalf 15 */ | 2699 | /* look at qdio_error and sbalf 15 */ |
2711 | case 1: | 2700 | case 1: |
2712 | QETH_DBF_TEXT(trace, 1, "SIGAcc1"); | 2701 | QETH_DBF_TEXT(TRACE, 1, "SIGAcc1"); |
2713 | QETH_DBF_TEXT_(trace, 1, "%s", CARD_BUS_ID(card)); | 2702 | QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card)); |
2714 | return QETH_SEND_ERROR_LINK_FAILURE; | 2703 | return QETH_SEND_ERROR_LINK_FAILURE; |
2715 | case 3: | 2704 | case 3: |
2716 | default: | 2705 | default: |
2717 | QETH_DBF_TEXT(trace, 1, "SIGAcc3"); | 2706 | QETH_DBF_TEXT(TRACE, 1, "SIGAcc3"); |
2718 | QETH_DBF_TEXT_(trace, 1, "%s", CARD_BUS_ID(card)); | 2707 | QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card)); |
2719 | return QETH_SEND_ERROR_KICK_IT; | 2708 | return QETH_SEND_ERROR_KICK_IT; |
2720 | } | 2709 | } |
2721 | } | 2710 | } |
@@ -2730,7 +2719,7 @@ static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue) | |||
2730 | if (atomic_read(&queue->used_buffers) | 2719 | if (atomic_read(&queue->used_buffers) |
2731 | >= QETH_HIGH_WATERMARK_PACK){ | 2720 | >= QETH_HIGH_WATERMARK_PACK){ |
2732 | /* switch non-PACKING -> PACKING */ | 2721 | /* switch non-PACKING -> PACKING */ |
2733 | QETH_DBF_TEXT(trace, 6, "np->pack"); | 2722 | QETH_DBF_TEXT(TRACE, 6, "np->pack"); |
2734 | if (queue->card->options.performance_stats) | 2723 | if (queue->card->options.performance_stats) |
2735 | queue->card->perf_stats.sc_dp_p++; | 2724 | queue->card->perf_stats.sc_dp_p++; |
2736 | queue->do_pack = 1; | 2725 | queue->do_pack = 1; |
@@ -2753,7 +2742,7 @@ static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) | |||
2753 | if (atomic_read(&queue->used_buffers) | 2742 | if (atomic_read(&queue->used_buffers) |
2754 | <= QETH_LOW_WATERMARK_PACK) { | 2743 | <= QETH_LOW_WATERMARK_PACK) { |
2755 | /* switch PACKING -> non-PACKING */ | 2744 | /* switch PACKING -> non-PACKING */ |
2756 | QETH_DBF_TEXT(trace, 6, "pack->np"); | 2745 | QETH_DBF_TEXT(TRACE, 6, "pack->np"); |
2757 | if (queue->card->options.performance_stats) | 2746 | if (queue->card->options.performance_stats) |
2758 | queue->card->perf_stats.sc_p_dp++; | 2747 | queue->card->perf_stats.sc_p_dp++; |
2759 | queue->do_pack = 0; | 2748 | queue->do_pack = 0; |
@@ -2803,7 +2792,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int, | |||
2803 | int i; | 2792 | int i; |
2804 | unsigned int qdio_flags; | 2793 | unsigned int qdio_flags; |
2805 | 2794 | ||
2806 | QETH_DBF_TEXT(trace, 6, "flushbuf"); | 2795 | QETH_DBF_TEXT(TRACE, 6, "flushbuf"); |
2807 | 2796 | ||
2808 | for (i = index; i < index + count; ++i) { | 2797 | for (i = index; i < index + count; ++i) { |
2809 | buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]; | 2798 | buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]; |
@@ -2857,9 +2846,9 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int, | |||
2857 | qeth_get_micros() - | 2846 | qeth_get_micros() - |
2858 | queue->card->perf_stats.outbound_do_qdio_start_time; | 2847 | queue->card->perf_stats.outbound_do_qdio_start_time; |
2859 | if (rc) { | 2848 | if (rc) { |
2860 | QETH_DBF_TEXT(trace, 2, "flushbuf"); | 2849 | QETH_DBF_TEXT(TRACE, 2, "flushbuf"); |
2861 | QETH_DBF_TEXT_(trace, 2, " err%d", rc); | 2850 | QETH_DBF_TEXT_(TRACE, 2, " err%d", rc); |
2862 | QETH_DBF_TEXT_(trace, 2, "%s", CARD_DDEV_ID(queue->card)); | 2851 | QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_DDEV_ID(queue->card)); |
2863 | queue->card->stats.tx_errors += count; | 2852 | queue->card->stats.tx_errors += count; |
2864 | /* this must not happen under normal circumstances. if it | 2853 | /* this must not happen under normal circumstances. if it |
2865 | * happens something is really wrong -> recover */ | 2854 | * happens something is really wrong -> recover */ |
@@ -2921,12 +2910,12 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev, unsigned int status, | |||
2921 | struct qeth_qdio_out_buffer *buffer; | 2910 | struct qeth_qdio_out_buffer *buffer; |
2922 | int i; | 2911 | int i; |
2923 | 2912 | ||
2924 | QETH_DBF_TEXT(trace, 6, "qdouhdl"); | 2913 | QETH_DBF_TEXT(TRACE, 6, "qdouhdl"); |
2925 | if (status & QDIO_STATUS_LOOK_FOR_ERROR) { | 2914 | if (status & QDIO_STATUS_LOOK_FOR_ERROR) { |
2926 | if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION) { | 2915 | if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION) { |
2927 | QETH_DBF_TEXT(trace, 2, "achkcond"); | 2916 | QETH_DBF_TEXT(TRACE, 2, "achkcond"); |
2928 | QETH_DBF_TEXT_(trace, 2, "%s", CARD_BUS_ID(card)); | 2917 | QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card)); |
2929 | QETH_DBF_TEXT_(trace, 2, "%08x", status); | 2918 | QETH_DBF_TEXT_(TRACE, 2, "%08x", status); |
2930 | netif_stop_queue(card->dev); | 2919 | netif_stop_queue(card->dev); |
2931 | qeth_schedule_recovery(card); | 2920 | qeth_schedule_recovery(card); |
2932 | return; | 2921 | return; |
@@ -3074,7 +3063,7 @@ struct sk_buff *qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, | |||
3074 | { | 3063 | { |
3075 | struct sk_buff *new_skb; | 3064 | struct sk_buff *new_skb; |
3076 | 3065 | ||
3077 | QETH_DBF_TEXT(trace, 6, "prepskb"); | 3066 | QETH_DBF_TEXT(TRACE, 6, "prepskb"); |
3078 | 3067 | ||
3079 | new_skb = qeth_realloc_headroom(card, skb, | 3068 | new_skb = qeth_realloc_headroom(card, skb, |
3080 | sizeof(struct qeth_hdr)); | 3069 | sizeof(struct qeth_hdr)); |
@@ -3161,7 +3150,7 @@ static int qeth_fill_buffer(struct qeth_qdio_out_q *queue, | |||
3161 | struct qeth_hdr_tso *hdr; | 3150 | struct qeth_hdr_tso *hdr; |
3162 | int flush_cnt = 0, hdr_len, large_send = 0; | 3151 | int flush_cnt = 0, hdr_len, large_send = 0; |
3163 | 3152 | ||
3164 | QETH_DBF_TEXT(trace, 6, "qdfillbf"); | 3153 | QETH_DBF_TEXT(TRACE, 6, "qdfillbf"); |
3165 | 3154 | ||
3166 | buffer = buf->buffer; | 3155 | buffer = buf->buffer; |
3167 | atomic_inc(&skb->users); | 3156 | atomic_inc(&skb->users); |
@@ -3190,12 +3179,12 @@ static int qeth_fill_buffer(struct qeth_qdio_out_q *queue, | |||
3190 | (int *)&buf->next_element_to_fill); | 3179 | (int *)&buf->next_element_to_fill); |
3191 | 3180 | ||
3192 | if (!queue->do_pack) { | 3181 | if (!queue->do_pack) { |
3193 | QETH_DBF_TEXT(trace, 6, "fillbfnp"); | 3182 | QETH_DBF_TEXT(TRACE, 6, "fillbfnp"); |
3194 | /* set state to PRIMED -> will be flushed */ | 3183 | /* set state to PRIMED -> will be flushed */ |
3195 | atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED); | 3184 | atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED); |
3196 | flush_cnt = 1; | 3185 | flush_cnt = 1; |
3197 | } else { | 3186 | } else { |
3198 | QETH_DBF_TEXT(trace, 6, "fillbfpa"); | 3187 | QETH_DBF_TEXT(TRACE, 6, "fillbfpa"); |
3199 | if (queue->card->options.performance_stats) | 3188 | if (queue->card->options.performance_stats) |
3200 | queue->card->perf_stats.skbs_sent_pack++; | 3189 | queue->card->perf_stats.skbs_sent_pack++; |
3201 | if (buf->next_element_to_fill >= | 3190 | if (buf->next_element_to_fill >= |
@@ -3221,7 +3210,7 @@ int qeth_do_send_packet_fast(struct qeth_card *card, | |||
3221 | int flush_cnt = 0; | 3210 | int flush_cnt = 0; |
3222 | int index; | 3211 | int index; |
3223 | 3212 | ||
3224 | QETH_DBF_TEXT(trace, 6, "dosndpfa"); | 3213 | QETH_DBF_TEXT(TRACE, 6, "dosndpfa"); |
3225 | 3214 | ||
3226 | /* spin until we get the queue ... */ | 3215 | /* spin until we get the queue ... */ |
3227 | while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, | 3216 | while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, |
@@ -3274,7 +3263,7 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, | |||
3274 | int tmp; | 3263 | int tmp; |
3275 | int rc = 0; | 3264 | int rc = 0; |
3276 | 3265 | ||
3277 | QETH_DBF_TEXT(trace, 6, "dosndpkt"); | 3266 | QETH_DBF_TEXT(TRACE, 6, "dosndpkt"); |
3278 | 3267 | ||
3279 | /* spin until we get the queue ... */ | 3268 | /* spin until we get the queue ... */ |
3280 | while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, | 3269 | while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, |
@@ -3381,14 +3370,14 @@ static int qeth_setadp_promisc_mode_cb(struct qeth_card *card, | |||
3381 | struct qeth_ipa_cmd *cmd; | 3370 | struct qeth_ipa_cmd *cmd; |
3382 | struct qeth_ipacmd_setadpparms *setparms; | 3371 | struct qeth_ipacmd_setadpparms *setparms; |
3383 | 3372 | ||
3384 | QETH_DBF_TEXT(trace, 4, "prmadpcb"); | 3373 | QETH_DBF_TEXT(TRACE, 4, "prmadpcb"); |
3385 | 3374 | ||
3386 | cmd = (struct qeth_ipa_cmd *) data; | 3375 | cmd = (struct qeth_ipa_cmd *) data; |
3387 | setparms = &(cmd->data.setadapterparms); | 3376 | setparms = &(cmd->data.setadapterparms); |
3388 | 3377 | ||
3389 | qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd); | 3378 | qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd); |
3390 | if (cmd->hdr.return_code) { | 3379 | if (cmd->hdr.return_code) { |
3391 | QETH_DBF_TEXT_(trace, 4, "prmrc%2.2x", cmd->hdr.return_code); | 3380 | QETH_DBF_TEXT_(TRACE, 4, "prmrc%2.2x", cmd->hdr.return_code); |
3392 | setparms->data.mode = SET_PROMISC_MODE_OFF; | 3381 | setparms->data.mode = SET_PROMISC_MODE_OFF; |
3393 | } | 3382 | } |
3394 | card->info.promisc_mode = setparms->data.mode; | 3383 | card->info.promisc_mode = setparms->data.mode; |
@@ -3402,7 +3391,7 @@ void qeth_setadp_promisc_mode(struct qeth_card *card) | |||
3402 | struct qeth_cmd_buffer *iob; | 3391 | struct qeth_cmd_buffer *iob; |
3403 | struct qeth_ipa_cmd *cmd; | 3392 | struct qeth_ipa_cmd *cmd; |
3404 | 3393 | ||
3405 | QETH_DBF_TEXT(trace, 4, "setprom"); | 3394 | QETH_DBF_TEXT(TRACE, 4, "setprom"); |
3406 | 3395 | ||
3407 | if (((dev->flags & IFF_PROMISC) && | 3396 | if (((dev->flags & IFF_PROMISC) && |
3408 | (card->info.promisc_mode == SET_PROMISC_MODE_ON)) || | 3397 | (card->info.promisc_mode == SET_PROMISC_MODE_ON)) || |
@@ -3412,7 +3401,7 @@ void qeth_setadp_promisc_mode(struct qeth_card *card) | |||
3412 | mode = SET_PROMISC_MODE_OFF; | 3401 | mode = SET_PROMISC_MODE_OFF; |
3413 | if (dev->flags & IFF_PROMISC) | 3402 | if (dev->flags & IFF_PROMISC) |
3414 | mode = SET_PROMISC_MODE_ON; | 3403 | mode = SET_PROMISC_MODE_ON; |
3415 | QETH_DBF_TEXT_(trace, 4, "mode:%x", mode); | 3404 | QETH_DBF_TEXT_(TRACE, 4, "mode:%x", mode); |
3416 | 3405 | ||
3417 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE, | 3406 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE, |
3418 | sizeof(struct qeth_ipacmd_setadpparms)); | 3407 | sizeof(struct qeth_ipacmd_setadpparms)); |
@@ -3429,9 +3418,9 @@ int qeth_change_mtu(struct net_device *dev, int new_mtu) | |||
3429 | 3418 | ||
3430 | card = netdev_priv(dev); | 3419 | card = netdev_priv(dev); |
3431 | 3420 | ||
3432 | QETH_DBF_TEXT(trace, 4, "chgmtu"); | 3421 | QETH_DBF_TEXT(TRACE, 4, "chgmtu"); |
3433 | sprintf(dbf_text, "%8x", new_mtu); | 3422 | sprintf(dbf_text, "%8x", new_mtu); |
3434 | QETH_DBF_TEXT(trace, 4, dbf_text); | 3423 | QETH_DBF_TEXT(TRACE, 4, dbf_text); |
3435 | 3424 | ||
3436 | if (new_mtu < 64) | 3425 | if (new_mtu < 64) |
3437 | return -EINVAL; | 3426 | return -EINVAL; |
@@ -3451,7 +3440,7 @@ struct net_device_stats *qeth_get_stats(struct net_device *dev) | |||
3451 | 3440 | ||
3452 | card = netdev_priv(dev); | 3441 | card = netdev_priv(dev); |
3453 | 3442 | ||
3454 | QETH_DBF_TEXT(trace, 5, "getstat"); | 3443 | QETH_DBF_TEXT(TRACE, 5, "getstat"); |
3455 | 3444 | ||
3456 | return &card->stats; | 3445 | return &card->stats; |
3457 | } | 3446 | } |
@@ -3462,7 +3451,7 @@ static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card, | |||
3462 | { | 3451 | { |
3463 | struct qeth_ipa_cmd *cmd; | 3452 | struct qeth_ipa_cmd *cmd; |
3464 | 3453 | ||
3465 | QETH_DBF_TEXT(trace, 4, "chgmaccb"); | 3454 | QETH_DBF_TEXT(TRACE, 4, "chgmaccb"); |
3466 | 3455 | ||
3467 | cmd = (struct qeth_ipa_cmd *) data; | 3456 | cmd = (struct qeth_ipa_cmd *) data; |
3468 | if (!card->options.layer2 || | 3457 | if (!card->options.layer2 || |
@@ -3482,7 +3471,7 @@ int qeth_setadpparms_change_macaddr(struct qeth_card *card) | |||
3482 | struct qeth_cmd_buffer *iob; | 3471 | struct qeth_cmd_buffer *iob; |
3483 | struct qeth_ipa_cmd *cmd; | 3472 | struct qeth_ipa_cmd *cmd; |
3484 | 3473 | ||
3485 | QETH_DBF_TEXT(trace, 4, "chgmac"); | 3474 | QETH_DBF_TEXT(TRACE, 4, "chgmac"); |
3486 | 3475 | ||
3487 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS, | 3476 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS, |
3488 | sizeof(struct qeth_ipacmd_setadpparms)); | 3477 | sizeof(struct qeth_ipacmd_setadpparms)); |
@@ -3580,7 +3569,7 @@ static int qeth_send_ipa_snmp_cmd(struct qeth_card *card, | |||
3580 | { | 3569 | { |
3581 | u16 s1, s2; | 3570 | u16 s1, s2; |
3582 | 3571 | ||
3583 | QETH_DBF_TEXT(trace, 4, "sendsnmp"); | 3572 | QETH_DBF_TEXT(TRACE, 4, "sendsnmp"); |
3584 | 3573 | ||
3585 | memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); | 3574 | memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); |
3586 | memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data), | 3575 | memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data), |
@@ -3605,7 +3594,7 @@ static int qeth_snmp_command_cb(struct qeth_card *card, | |||
3605 | unsigned char *data; | 3594 | unsigned char *data; |
3606 | __u16 data_len; | 3595 | __u16 data_len; |
3607 | 3596 | ||
3608 | QETH_DBF_TEXT(trace, 3, "snpcmdcb"); | 3597 | QETH_DBF_TEXT(TRACE, 3, "snpcmdcb"); |
3609 | 3598 | ||
3610 | cmd = (struct qeth_ipa_cmd *) sdata; | 3599 | cmd = (struct qeth_ipa_cmd *) sdata; |
3611 | data = (unsigned char *)((char *)cmd - reply->offset); | 3600 | data = (unsigned char *)((char *)cmd - reply->offset); |
@@ -3613,13 +3602,13 @@ static int qeth_snmp_command_cb(struct qeth_card *card, | |||
3613 | snmp = &cmd->data.setadapterparms.data.snmp; | 3602 | snmp = &cmd->data.setadapterparms.data.snmp; |
3614 | 3603 | ||
3615 | if (cmd->hdr.return_code) { | 3604 | if (cmd->hdr.return_code) { |
3616 | QETH_DBF_TEXT_(trace, 4, "scer1%i", cmd->hdr.return_code); | 3605 | QETH_DBF_TEXT_(TRACE, 4, "scer1%i", cmd->hdr.return_code); |
3617 | return 0; | 3606 | return 0; |
3618 | } | 3607 | } |
3619 | if (cmd->data.setadapterparms.hdr.return_code) { | 3608 | if (cmd->data.setadapterparms.hdr.return_code) { |
3620 | cmd->hdr.return_code = | 3609 | cmd->hdr.return_code = |
3621 | cmd->data.setadapterparms.hdr.return_code; | 3610 | cmd->data.setadapterparms.hdr.return_code; |
3622 | QETH_DBF_TEXT_(trace, 4, "scer2%i", cmd->hdr.return_code); | 3611 | QETH_DBF_TEXT_(TRACE, 4, "scer2%i", cmd->hdr.return_code); |
3623 | return 0; | 3612 | return 0; |
3624 | } | 3613 | } |
3625 | data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data)); | 3614 | data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data)); |
@@ -3630,13 +3619,13 @@ static int qeth_snmp_command_cb(struct qeth_card *card, | |||
3630 | 3619 | ||
3631 | /* check if there is enough room in userspace */ | 3620 | /* check if there is enough room in userspace */ |
3632 | if ((qinfo->udata_len - qinfo->udata_offset) < data_len) { | 3621 | if ((qinfo->udata_len - qinfo->udata_offset) < data_len) { |
3633 | QETH_DBF_TEXT_(trace, 4, "scer3%i", -ENOMEM); | 3622 | QETH_DBF_TEXT_(TRACE, 4, "scer3%i", -ENOMEM); |
3634 | cmd->hdr.return_code = -ENOMEM; | 3623 | cmd->hdr.return_code = -ENOMEM; |
3635 | return 0; | 3624 | return 0; |
3636 | } | 3625 | } |
3637 | QETH_DBF_TEXT_(trace, 4, "snore%i", | 3626 | QETH_DBF_TEXT_(TRACE, 4, "snore%i", |
3638 | cmd->data.setadapterparms.hdr.used_total); | 3627 | cmd->data.setadapterparms.hdr.used_total); |
3639 | QETH_DBF_TEXT_(trace, 4, "sseqn%i", | 3628 | QETH_DBF_TEXT_(TRACE, 4, "sseqn%i", |
3640 | cmd->data.setadapterparms.hdr.seq_no); | 3629 | cmd->data.setadapterparms.hdr.seq_no); |
3641 | /*copy entries to user buffer*/ | 3630 | /*copy entries to user buffer*/ |
3642 | if (cmd->data.setadapterparms.hdr.seq_no == 1) { | 3631 | if (cmd->data.setadapterparms.hdr.seq_no == 1) { |
@@ -3650,9 +3639,9 @@ static int qeth_snmp_command_cb(struct qeth_card *card, | |||
3650 | } | 3639 | } |
3651 | qinfo->udata_offset += data_len; | 3640 | qinfo->udata_offset += data_len; |
3652 | /* check if all replies received ... */ | 3641 | /* check if all replies received ... */ |
3653 | QETH_DBF_TEXT_(trace, 4, "srtot%i", | 3642 | QETH_DBF_TEXT_(TRACE, 4, "srtot%i", |
3654 | cmd->data.setadapterparms.hdr.used_total); | 3643 | cmd->data.setadapterparms.hdr.used_total); |
3655 | QETH_DBF_TEXT_(trace, 4, "srseq%i", | 3644 | QETH_DBF_TEXT_(TRACE, 4, "srseq%i", |
3656 | cmd->data.setadapterparms.hdr.seq_no); | 3645 | cmd->data.setadapterparms.hdr.seq_no); |
3657 | if (cmd->data.setadapterparms.hdr.seq_no < | 3646 | if (cmd->data.setadapterparms.hdr.seq_no < |
3658 | cmd->data.setadapterparms.hdr.used_total) | 3647 | cmd->data.setadapterparms.hdr.used_total) |
@@ -3669,7 +3658,7 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata) | |||
3669 | struct qeth_arp_query_info qinfo = {0, }; | 3658 | struct qeth_arp_query_info qinfo = {0, }; |
3670 | int rc = 0; | 3659 | int rc = 0; |
3671 | 3660 | ||
3672 | QETH_DBF_TEXT(trace, 3, "snmpcmd"); | 3661 | QETH_DBF_TEXT(TRACE, 3, "snmpcmd"); |
3673 | 3662 | ||
3674 | if (card->info.guestlan) | 3663 | if (card->info.guestlan) |
3675 | return -EOPNOTSUPP; | 3664 | return -EOPNOTSUPP; |
@@ -3685,7 +3674,7 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata) | |||
3685 | return -EFAULT; | 3674 | return -EFAULT; |
3686 | ureq = kmalloc(req_len+sizeof(struct qeth_snmp_ureq_hdr), GFP_KERNEL); | 3675 | ureq = kmalloc(req_len+sizeof(struct qeth_snmp_ureq_hdr), GFP_KERNEL); |
3687 | if (!ureq) { | 3676 | if (!ureq) { |
3688 | QETH_DBF_TEXT(trace, 2, "snmpnome"); | 3677 | QETH_DBF_TEXT(TRACE, 2, "snmpnome"); |
3689 | return -ENOMEM; | 3678 | return -ENOMEM; |
3690 | } | 3679 | } |
3691 | if (copy_from_user(ureq, udata, | 3680 | if (copy_from_user(ureq, udata, |
@@ -3740,7 +3729,7 @@ static int qeth_qdio_establish(struct qeth_card *card) | |||
3740 | int i, j, k; | 3729 | int i, j, k; |
3741 | int rc = 0; | 3730 | int rc = 0; |
3742 | 3731 | ||
3743 | QETH_DBF_TEXT(setup, 2, "qdioest"); | 3732 | QETH_DBF_TEXT(SETUP, 2, "qdioest"); |
3744 | 3733 | ||
3745 | qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(char), | 3734 | qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(char), |
3746 | GFP_KERNEL); | 3735 | GFP_KERNEL); |
@@ -3809,8 +3798,8 @@ static int qeth_qdio_establish(struct qeth_card *card) | |||
3809 | static void qeth_core_free_card(struct qeth_card *card) | 3798 | static void qeth_core_free_card(struct qeth_card *card) |
3810 | { | 3799 | { |
3811 | 3800 | ||
3812 | QETH_DBF_TEXT(setup, 2, "freecrd"); | 3801 | QETH_DBF_TEXT(SETUP, 2, "freecrd"); |
3813 | QETH_DBF_HEX(setup, 2, &card, sizeof(void *)); | 3802 | QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); |
3814 | qeth_clean_channel(&card->read); | 3803 | qeth_clean_channel(&card->read); |
3815 | qeth_clean_channel(&card->write); | 3804 | qeth_clean_channel(&card->write); |
3816 | if (card->dev) | 3805 | if (card->dev) |
@@ -3867,7 +3856,7 @@ int qeth_core_hardsetup_card(struct qeth_card *card) | |||
3867 | int mpno; | 3856 | int mpno; |
3868 | int rc; | 3857 | int rc; |
3869 | 3858 | ||
3870 | QETH_DBF_TEXT(setup, 2, "hrdsetup"); | 3859 | QETH_DBF_TEXT(SETUP, 2, "hrdsetup"); |
3871 | atomic_set(&card->force_alloc_skb, 0); | 3860 | atomic_set(&card->force_alloc_skb, 0); |
3872 | retry: | 3861 | retry: |
3873 | if (retries < 3) { | 3862 | if (retries < 3) { |
@@ -3881,10 +3870,10 @@ retry: | |||
3881 | } | 3870 | } |
3882 | rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD); | 3871 | rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD); |
3883 | if (rc == -ERESTARTSYS) { | 3872 | if (rc == -ERESTARTSYS) { |
3884 | QETH_DBF_TEXT(setup, 2, "break1"); | 3873 | QETH_DBF_TEXT(SETUP, 2, "break1"); |
3885 | return rc; | 3874 | return rc; |
3886 | } else if (rc) { | 3875 | } else if (rc) { |
3887 | QETH_DBF_TEXT_(setup, 2, "1err%d", rc); | 3876 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); |
3888 | if (--retries < 0) | 3877 | if (--retries < 0) |
3889 | goto out; | 3878 | goto out; |
3890 | else | 3879 | else |
@@ -3893,7 +3882,7 @@ retry: | |||
3893 | 3882 | ||
3894 | rc = qeth_get_unitaddr(card); | 3883 | rc = qeth_get_unitaddr(card); |
3895 | if (rc) { | 3884 | if (rc) { |
3896 | QETH_DBF_TEXT_(setup, 2, "2err%d", rc); | 3885 | QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); |
3897 | return rc; | 3886 | return rc; |
3898 | } | 3887 | } |
3899 | 3888 | ||
@@ -3908,10 +3897,10 @@ retry: | |||
3908 | qeth_init_func_level(card); | 3897 | qeth_init_func_level(card); |
3909 | rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb); | 3898 | rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb); |
3910 | if (rc == -ERESTARTSYS) { | 3899 | if (rc == -ERESTARTSYS) { |
3911 | QETH_DBF_TEXT(setup, 2, "break2"); | 3900 | QETH_DBF_TEXT(SETUP, 2, "break2"); |
3912 | return rc; | 3901 | return rc; |
3913 | } else if (rc) { | 3902 | } else if (rc) { |
3914 | QETH_DBF_TEXT_(setup, 2, "3err%d", rc); | 3903 | QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); |
3915 | if (--retries < 0) | 3904 | if (--retries < 0) |
3916 | goto out; | 3905 | goto out; |
3917 | else | 3906 | else |
@@ -3919,10 +3908,10 @@ retry: | |||
3919 | } | 3908 | } |
3920 | rc = qeth_idx_activate_channel(&card->write, qeth_idx_write_cb); | 3909 | rc = qeth_idx_activate_channel(&card->write, qeth_idx_write_cb); |
3921 | if (rc == -ERESTARTSYS) { | 3910 | if (rc == -ERESTARTSYS) { |
3922 | QETH_DBF_TEXT(setup, 2, "break3"); | 3911 | QETH_DBF_TEXT(SETUP, 2, "break3"); |
3923 | return rc; | 3912 | return rc; |
3924 | } else if (rc) { | 3913 | } else if (rc) { |
3925 | QETH_DBF_TEXT_(setup, 2, "4err%d", rc); | 3914 | QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc); |
3926 | if (--retries < 0) | 3915 | if (--retries < 0) |
3927 | goto out; | 3916 | goto out; |
3928 | else | 3917 | else |
@@ -3930,7 +3919,7 @@ retry: | |||
3930 | } | 3919 | } |
3931 | rc = qeth_mpc_initialize(card); | 3920 | rc = qeth_mpc_initialize(card); |
3932 | if (rc) { | 3921 | if (rc) { |
3933 | QETH_DBF_TEXT_(setup, 2, "5err%d", rc); | 3922 | QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); |
3934 | goto out; | 3923 | goto out; |
3935 | } | 3924 | } |
3936 | return 0; | 3925 | return 0; |
@@ -3991,7 +3980,7 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card, | |||
3991 | int use_rx_sg = 0; | 3980 | int use_rx_sg = 0; |
3992 | int frag = 0; | 3981 | int frag = 0; |
3993 | 3982 | ||
3994 | QETH_DBF_TEXT(trace, 6, "nextskb"); | 3983 | QETH_DBF_TEXT(TRACE, 6, "nextskb"); |
3995 | /* qeth_hdr must not cross element boundaries */ | 3984 | /* qeth_hdr must not cross element boundaries */ |
3996 | if (element->length < offset + sizeof(struct qeth_hdr)) { | 3985 | if (element->length < offset + sizeof(struct qeth_hdr)) { |
3997 | if (qeth_is_last_sbale(element)) | 3986 | if (qeth_is_last_sbale(element)) |
@@ -4013,7 +4002,11 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card, | |||
4013 | } | 4002 | } |
4014 | } else { | 4003 | } else { |
4015 | skb_len = (*hdr)->hdr.l3.length; | 4004 | skb_len = (*hdr)->hdr.l3.length; |
4016 | headroom = max((int)ETH_HLEN, (int)TR_HLEN); | 4005 | if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) || |
4006 | (card->info.link_type == QETH_LINK_TYPE_HSTR)) | ||
4007 | headroom = TR_HLEN; | ||
4008 | else | ||
4009 | headroom = ETH_HLEN; | ||
4017 | } | 4010 | } |
4018 | 4011 | ||
4019 | if (!skb_len) | 4012 | if (!skb_len) |
@@ -4047,13 +4040,13 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card, | |||
4047 | skb_len -= data_len; | 4040 | skb_len -= data_len; |
4048 | if (skb_len) { | 4041 | if (skb_len) { |
4049 | if (qeth_is_last_sbale(element)) { | 4042 | if (qeth_is_last_sbale(element)) { |
4050 | QETH_DBF_TEXT(trace, 4, "unexeob"); | 4043 | QETH_DBF_TEXT(TRACE, 4, "unexeob"); |
4051 | QETH_DBF_TEXT_(trace, 4, "%s", | 4044 | QETH_DBF_TEXT_(TRACE, 4, "%s", |
4052 | CARD_BUS_ID(card)); | 4045 | CARD_BUS_ID(card)); |
4053 | QETH_DBF_TEXT(qerr, 2, "unexeob"); | 4046 | QETH_DBF_TEXT(QERR, 2, "unexeob"); |
4054 | QETH_DBF_TEXT_(qerr, 2, "%s", | 4047 | QETH_DBF_TEXT_(QERR, 2, "%s", |
4055 | CARD_BUS_ID(card)); | 4048 | CARD_BUS_ID(card)); |
4056 | QETH_DBF_HEX(misc, 4, buffer, sizeof(*buffer)); | 4049 | QETH_DBF_HEX(MISC, 4, buffer, sizeof(*buffer)); |
4057 | dev_kfree_skb_any(skb); | 4050 | dev_kfree_skb_any(skb); |
4058 | card->stats.rx_errors++; | 4051 | card->stats.rx_errors++; |
4059 | return NULL; | 4052 | return NULL; |
@@ -4076,8 +4069,8 @@ no_mem: | |||
4076 | if (net_ratelimit()) { | 4069 | if (net_ratelimit()) { |
4077 | PRINT_WARN("No memory for packet received on %s.\n", | 4070 | PRINT_WARN("No memory for packet received on %s.\n", |
4078 | QETH_CARD_IFNAME(card)); | 4071 | QETH_CARD_IFNAME(card)); |
4079 | QETH_DBF_TEXT(trace, 2, "noskbmem"); | 4072 | QETH_DBF_TEXT(TRACE, 2, "noskbmem"); |
4080 | QETH_DBF_TEXT_(trace, 2, "%s", CARD_BUS_ID(card)); | 4073 | QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card)); |
4081 | } | 4074 | } |
4082 | card->stats.rx_dropped++; | 4075 | card->stats.rx_dropped++; |
4083 | return NULL; | 4076 | return NULL; |
@@ -4086,80 +4079,39 @@ EXPORT_SYMBOL_GPL(qeth_core_get_next_skb); | |||
4086 | 4079 | ||
4087 | static void qeth_unregister_dbf_views(void) | 4080 | static void qeth_unregister_dbf_views(void) |
4088 | { | 4081 | { |
4089 | if (qeth_dbf_setup) | 4082 | int x; |
4090 | debug_unregister(qeth_dbf_setup); | 4083 | for (x = 0; x < QETH_DBF_INFOS; x++) { |
4091 | if (qeth_dbf_qerr) | 4084 | debug_unregister(qeth_dbf[x].id); |
4092 | debug_unregister(qeth_dbf_qerr); | 4085 | qeth_dbf[x].id = NULL; |
4093 | if (qeth_dbf_sense) | 4086 | } |
4094 | debug_unregister(qeth_dbf_sense); | ||
4095 | if (qeth_dbf_misc) | ||
4096 | debug_unregister(qeth_dbf_misc); | ||
4097 | if (qeth_dbf_data) | ||
4098 | debug_unregister(qeth_dbf_data); | ||
4099 | if (qeth_dbf_control) | ||
4100 | debug_unregister(qeth_dbf_control); | ||
4101 | if (qeth_dbf_trace) | ||
4102 | debug_unregister(qeth_dbf_trace); | ||
4103 | } | 4087 | } |
4104 | 4088 | ||
4105 | static int qeth_register_dbf_views(void) | 4089 | static int qeth_register_dbf_views(void) |
4106 | { | 4090 | { |
4107 | qeth_dbf_setup = debug_register(QETH_DBF_SETUP_NAME, | 4091 | int ret; |
4108 | QETH_DBF_SETUP_PAGES, | 4092 | int x; |
4109 | QETH_DBF_SETUP_NR_AREAS, | 4093 | |
4110 | QETH_DBF_SETUP_LEN); | 4094 | for (x = 0; x < QETH_DBF_INFOS; x++) { |
4111 | qeth_dbf_misc = debug_register(QETH_DBF_MISC_NAME, | 4095 | /* register the areas */ |
4112 | QETH_DBF_MISC_PAGES, | 4096 | qeth_dbf[x].id = debug_register(qeth_dbf[x].name, |
4113 | QETH_DBF_MISC_NR_AREAS, | 4097 | qeth_dbf[x].pages, |
4114 | QETH_DBF_MISC_LEN); | 4098 | qeth_dbf[x].areas, |
4115 | qeth_dbf_data = debug_register(QETH_DBF_DATA_NAME, | 4099 | qeth_dbf[x].len); |
4116 | QETH_DBF_DATA_PAGES, | 4100 | if (qeth_dbf[x].id == NULL) { |
4117 | QETH_DBF_DATA_NR_AREAS, | 4101 | qeth_unregister_dbf_views(); |
4118 | QETH_DBF_DATA_LEN); | 4102 | return -ENOMEM; |
4119 | qeth_dbf_control = debug_register(QETH_DBF_CONTROL_NAME, | 4103 | } |
4120 | QETH_DBF_CONTROL_PAGES, | ||
4121 | QETH_DBF_CONTROL_NR_AREAS, | ||
4122 | QETH_DBF_CONTROL_LEN); | ||
4123 | qeth_dbf_sense = debug_register(QETH_DBF_SENSE_NAME, | ||
4124 | QETH_DBF_SENSE_PAGES, | ||
4125 | QETH_DBF_SENSE_NR_AREAS, | ||
4126 | QETH_DBF_SENSE_LEN); | ||
4127 | qeth_dbf_qerr = debug_register(QETH_DBF_QERR_NAME, | ||
4128 | QETH_DBF_QERR_PAGES, | ||
4129 | QETH_DBF_QERR_NR_AREAS, | ||
4130 | QETH_DBF_QERR_LEN); | ||
4131 | qeth_dbf_trace = debug_register(QETH_DBF_TRACE_NAME, | ||
4132 | QETH_DBF_TRACE_PAGES, | ||
4133 | QETH_DBF_TRACE_NR_AREAS, | ||
4134 | QETH_DBF_TRACE_LEN); | ||
4135 | |||
4136 | if ((qeth_dbf_setup == NULL) || (qeth_dbf_misc == NULL) || | ||
4137 | (qeth_dbf_data == NULL) || (qeth_dbf_control == NULL) || | ||
4138 | (qeth_dbf_sense == NULL) || (qeth_dbf_qerr == NULL) || | ||
4139 | (qeth_dbf_trace == NULL)) { | ||
4140 | qeth_unregister_dbf_views(); | ||
4141 | return -ENOMEM; | ||
4142 | } | ||
4143 | debug_register_view(qeth_dbf_setup, &debug_hex_ascii_view); | ||
4144 | debug_set_level(qeth_dbf_setup, QETH_DBF_SETUP_LEVEL); | ||
4145 | |||
4146 | debug_register_view(qeth_dbf_misc, &debug_hex_ascii_view); | ||
4147 | debug_set_level(qeth_dbf_misc, QETH_DBF_MISC_LEVEL); | ||
4148 | |||
4149 | debug_register_view(qeth_dbf_data, &debug_hex_ascii_view); | ||
4150 | debug_set_level(qeth_dbf_data, QETH_DBF_DATA_LEVEL); | ||
4151 | |||
4152 | debug_register_view(qeth_dbf_control, &debug_hex_ascii_view); | ||
4153 | debug_set_level(qeth_dbf_control, QETH_DBF_CONTROL_LEVEL); | ||
4154 | |||
4155 | debug_register_view(qeth_dbf_sense, &debug_hex_ascii_view); | ||
4156 | debug_set_level(qeth_dbf_sense, QETH_DBF_SENSE_LEVEL); | ||
4157 | 4104 | ||
4158 | debug_register_view(qeth_dbf_qerr, &debug_hex_ascii_view); | 4105 | /* register a view */ |
4159 | debug_set_level(qeth_dbf_qerr, QETH_DBF_QERR_LEVEL); | 4106 | ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view); |
4107 | if (ret) { | ||
4108 | qeth_unregister_dbf_views(); | ||
4109 | return ret; | ||
4110 | } | ||
4160 | 4111 | ||
4161 | debug_register_view(qeth_dbf_trace, &debug_hex_ascii_view); | 4112 | /* set a passing level */ |
4162 | debug_set_level(qeth_dbf_trace, QETH_DBF_TRACE_LEVEL); | 4113 | debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level); |
4114 | } | ||
4163 | 4115 | ||
4164 | return 0; | 4116 | return 0; |
4165 | } | 4117 | } |
@@ -4204,17 +4156,17 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) | |||
4204 | int rc; | 4156 | int rc; |
4205 | unsigned long flags; | 4157 | unsigned long flags; |
4206 | 4158 | ||
4207 | QETH_DBF_TEXT(setup, 2, "probedev"); | 4159 | QETH_DBF_TEXT(SETUP, 2, "probedev"); |
4208 | 4160 | ||
4209 | dev = &gdev->dev; | 4161 | dev = &gdev->dev; |
4210 | if (!get_device(dev)) | 4162 | if (!get_device(dev)) |
4211 | return -ENODEV; | 4163 | return -ENODEV; |
4212 | 4164 | ||
4213 | QETH_DBF_TEXT_(setup, 2, "%s", gdev->dev.bus_id); | 4165 | QETH_DBF_TEXT_(SETUP, 2, "%s", gdev->dev.bus_id); |
4214 | 4166 | ||
4215 | card = qeth_alloc_card(); | 4167 | card = qeth_alloc_card(); |
4216 | if (!card) { | 4168 | if (!card) { |
4217 | QETH_DBF_TEXT_(setup, 2, "1err%d", -ENOMEM); | 4169 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM); |
4218 | rc = -ENOMEM; | 4170 | rc = -ENOMEM; |
4219 | goto err_dev; | 4171 | goto err_dev; |
4220 | } | 4172 | } |
@@ -4230,12 +4182,12 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) | |||
4230 | rc = qeth_determine_card_type(card); | 4182 | rc = qeth_determine_card_type(card); |
4231 | if (rc) { | 4183 | if (rc) { |
4232 | PRINT_WARN("%s: not a valid card type\n", __func__); | 4184 | PRINT_WARN("%s: not a valid card type\n", __func__); |
4233 | QETH_DBF_TEXT_(setup, 2, "3err%d", rc); | 4185 | QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); |
4234 | goto err_card; | 4186 | goto err_card; |
4235 | } | 4187 | } |
4236 | rc = qeth_setup_card(card); | 4188 | rc = qeth_setup_card(card); |
4237 | if (rc) { | 4189 | if (rc) { |
4238 | QETH_DBF_TEXT_(setup, 2, "2err%d", rc); | 4190 | QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); |
4239 | goto err_card; | 4191 | goto err_card; |
4240 | } | 4192 | } |
4241 | 4193 | ||
diff --git a/drivers/s390/net/qeth_core_mpc.c b/drivers/s390/net/qeth_core_mpc.c index 8653b73e5dcf..06f4de1f0507 100644 --- a/drivers/s390/net/qeth_core_mpc.c +++ b/drivers/s390/net/qeth_core_mpc.c | |||
@@ -195,7 +195,7 @@ static struct ipa_rc_msg qeth_ipa_rc_msg[] = { | |||
195 | {IPA_RC_SETIP_NO_STARTLAN, "Setip no startlan received"}, | 195 | {IPA_RC_SETIP_NO_STARTLAN, "Setip no startlan received"}, |
196 | {IPA_RC_SETIP_ALREADY_RECEIVED, "Setip already received"}, | 196 | {IPA_RC_SETIP_ALREADY_RECEIVED, "Setip already received"}, |
197 | {IPA_RC_IP_ADDR_ALREADY_USED, "IP address already in use on LAN"}, | 197 | {IPA_RC_IP_ADDR_ALREADY_USED, "IP address already in use on LAN"}, |
198 | {IPA_RC_MULTICAST_FULL, "No task available, multicast full"}, | 198 | {IPA_RC_MC_ADDR_NOT_FOUND, "Multicast address not found"}, |
199 | {IPA_RC_SETIP_INVALID_VERSION, "SETIP invalid IP version"}, | 199 | {IPA_RC_SETIP_INVALID_VERSION, "SETIP invalid IP version"}, |
200 | {IPA_RC_UNSUPPORTED_SUBCMD, "Unsupported assist subcommand"}, | 200 | {IPA_RC_UNSUPPORTED_SUBCMD, "Unsupported assist subcommand"}, |
201 | {IPA_RC_ARP_ASSIST_NO_ENABLE, "Only partial success, no enable"}, | 201 | {IPA_RC_ARP_ASSIST_NO_ENABLE, "Only partial success, no enable"}, |
@@ -230,7 +230,7 @@ static struct ipa_cmd_names qeth_ipa_cmd_names[] = { | |||
230 | {IPA_CMD_STARTLAN, "startlan"}, | 230 | {IPA_CMD_STARTLAN, "startlan"}, |
231 | {IPA_CMD_STOPLAN, "stoplan"}, | 231 | {IPA_CMD_STOPLAN, "stoplan"}, |
232 | {IPA_CMD_SETVMAC, "setvmac"}, | 232 | {IPA_CMD_SETVMAC, "setvmac"}, |
233 | {IPA_CMD_DELVMAC, "delvmca"}, | 233 | {IPA_CMD_DELVMAC, "delvmac"}, |
234 | {IPA_CMD_SETGMAC, "setgmac"}, | 234 | {IPA_CMD_SETGMAC, "setgmac"}, |
235 | {IPA_CMD_DELGMAC, "delgmac"}, | 235 | {IPA_CMD_DELGMAC, "delgmac"}, |
236 | {IPA_CMD_SETVLAN, "setvlan"}, | 236 | {IPA_CMD_SETVLAN, "setvlan"}, |
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h index de221932f30f..18548822e37c 100644 --- a/drivers/s390/net/qeth_core_mpc.h +++ b/drivers/s390/net/qeth_core_mpc.h | |||
@@ -182,7 +182,7 @@ enum qeth_ipa_return_codes { | |||
182 | IPA_RC_SETIP_NO_STARTLAN = 0xe008, | 182 | IPA_RC_SETIP_NO_STARTLAN = 0xe008, |
183 | IPA_RC_SETIP_ALREADY_RECEIVED = 0xe009, | 183 | IPA_RC_SETIP_ALREADY_RECEIVED = 0xe009, |
184 | IPA_RC_IP_ADDR_ALREADY_USED = 0xe00a, | 184 | IPA_RC_IP_ADDR_ALREADY_USED = 0xe00a, |
185 | IPA_RC_MULTICAST_FULL = 0xe00b, | 185 | IPA_RC_MC_ADDR_NOT_FOUND = 0xe00b, |
186 | IPA_RC_SETIP_INVALID_VERSION = 0xe00d, | 186 | IPA_RC_SETIP_INVALID_VERSION = 0xe00d, |
187 | IPA_RC_UNSUPPORTED_SUBCMD = 0xe00e, | 187 | IPA_RC_UNSUPPORTED_SUBCMD = 0xe00e, |
188 | IPA_RC_ARP_ASSIST_NO_ENABLE = 0xe00f, | 188 | IPA_RC_ARP_ASSIST_NO_ENABLE = 0xe00f, |
diff --git a/drivers/s390/net/qeth_core_offl.c b/drivers/s390/net/qeth_core_offl.c index 8b407d6a83cf..822df8362856 100644 --- a/drivers/s390/net/qeth_core_offl.c +++ b/drivers/s390/net/qeth_core_offl.c | |||
@@ -31,7 +31,7 @@ int qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue, | |||
31 | int skbs_in_buffer; | 31 | int skbs_in_buffer; |
32 | int buffers_needed = 0; | 32 | int buffers_needed = 0; |
33 | 33 | ||
34 | QETH_DBF_TEXT(trace, 5, "eddpcbfc"); | 34 | QETH_DBF_TEXT(TRACE, 5, "eddpcbfc"); |
35 | while (elements_needed > 0) { | 35 | while (elements_needed > 0) { |
36 | buffers_needed++; | 36 | buffers_needed++; |
37 | if (atomic_read(&queue->bufs[index].state) != | 37 | if (atomic_read(&queue->bufs[index].state) != |
@@ -51,7 +51,7 @@ static void qeth_eddp_free_context(struct qeth_eddp_context *ctx) | |||
51 | { | 51 | { |
52 | int i; | 52 | int i; |
53 | 53 | ||
54 | QETH_DBF_TEXT(trace, 5, "eddpfctx"); | 54 | QETH_DBF_TEXT(TRACE, 5, "eddpfctx"); |
55 | for (i = 0; i < ctx->num_pages; ++i) | 55 | for (i = 0; i < ctx->num_pages; ++i) |
56 | free_page((unsigned long)ctx->pages[i]); | 56 | free_page((unsigned long)ctx->pages[i]); |
57 | kfree(ctx->pages); | 57 | kfree(ctx->pages); |
@@ -76,7 +76,7 @@ void qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf) | |||
76 | { | 76 | { |
77 | struct qeth_eddp_context_reference *ref; | 77 | struct qeth_eddp_context_reference *ref; |
78 | 78 | ||
79 | QETH_DBF_TEXT(trace, 6, "eddprctx"); | 79 | QETH_DBF_TEXT(TRACE, 6, "eddprctx"); |
80 | while (!list_empty(&buf->ctx_list)) { | 80 | while (!list_empty(&buf->ctx_list)) { |
81 | ref = list_entry(buf->ctx_list.next, | 81 | ref = list_entry(buf->ctx_list.next, |
82 | struct qeth_eddp_context_reference, list); | 82 | struct qeth_eddp_context_reference, list); |
@@ -91,7 +91,7 @@ static int qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf, | |||
91 | { | 91 | { |
92 | struct qeth_eddp_context_reference *ref; | 92 | struct qeth_eddp_context_reference *ref; |
93 | 93 | ||
94 | QETH_DBF_TEXT(trace, 6, "eddprfcx"); | 94 | QETH_DBF_TEXT(TRACE, 6, "eddprfcx"); |
95 | ref = kmalloc(sizeof(struct qeth_eddp_context_reference), GFP_ATOMIC); | 95 | ref = kmalloc(sizeof(struct qeth_eddp_context_reference), GFP_ATOMIC); |
96 | if (ref == NULL) | 96 | if (ref == NULL) |
97 | return -ENOMEM; | 97 | return -ENOMEM; |
@@ -112,7 +112,7 @@ int qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue, | |||
112 | int must_refcnt = 1; | 112 | int must_refcnt = 1; |
113 | int i; | 113 | int i; |
114 | 114 | ||
115 | QETH_DBF_TEXT(trace, 5, "eddpfibu"); | 115 | QETH_DBF_TEXT(TRACE, 5, "eddpfibu"); |
116 | while (elements > 0) { | 116 | while (elements > 0) { |
117 | buf = &queue->bufs[index]; | 117 | buf = &queue->bufs[index]; |
118 | if (atomic_read(&buf->state) != QETH_QDIO_BUF_EMPTY) { | 118 | if (atomic_read(&buf->state) != QETH_QDIO_BUF_EMPTY) { |
@@ -166,7 +166,7 @@ int qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue, | |||
166 | } | 166 | } |
167 | out_check: | 167 | out_check: |
168 | if (!queue->do_pack) { | 168 | if (!queue->do_pack) { |
169 | QETH_DBF_TEXT(trace, 6, "fillbfnp"); | 169 | QETH_DBF_TEXT(TRACE, 6, "fillbfnp"); |
170 | /* set state to PRIMED -> will be flushed */ | 170 | /* set state to PRIMED -> will be flushed */ |
171 | if (buf->next_element_to_fill > 0) { | 171 | if (buf->next_element_to_fill > 0) { |
172 | atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED); | 172 | atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED); |
@@ -175,7 +175,7 @@ out_check: | |||
175 | } else { | 175 | } else { |
176 | if (queue->card->options.performance_stats) | 176 | if (queue->card->options.performance_stats) |
177 | queue->card->perf_stats.skbs_sent_pack++; | 177 | queue->card->perf_stats.skbs_sent_pack++; |
178 | QETH_DBF_TEXT(trace, 6, "fillbfpa"); | 178 | QETH_DBF_TEXT(TRACE, 6, "fillbfpa"); |
179 | if (buf->next_element_to_fill >= | 179 | if (buf->next_element_to_fill >= |
180 | QETH_MAX_BUFFER_ELEMENTS(queue->card)) { | 180 | QETH_MAX_BUFFER_ELEMENTS(queue->card)) { |
181 | /* | 181 | /* |
@@ -199,7 +199,7 @@ static void qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx, | |||
199 | int pkt_len; | 199 | int pkt_len; |
200 | struct qeth_eddp_element *element; | 200 | struct qeth_eddp_element *element; |
201 | 201 | ||
202 | QETH_DBF_TEXT(trace, 5, "eddpcrsh"); | 202 | QETH_DBF_TEXT(TRACE, 5, "eddpcrsh"); |
203 | page = ctx->pages[ctx->offset >> PAGE_SHIFT]; | 203 | page = ctx->pages[ctx->offset >> PAGE_SHIFT]; |
204 | page_offset = ctx->offset % PAGE_SIZE; | 204 | page_offset = ctx->offset % PAGE_SIZE; |
205 | element = &ctx->elements[ctx->num_elements]; | 205 | element = &ctx->elements[ctx->num_elements]; |
@@ -257,7 +257,7 @@ static void qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, | |||
257 | int copy_len; | 257 | int copy_len; |
258 | u8 *src; | 258 | u8 *src; |
259 | 259 | ||
260 | QETH_DBF_TEXT(trace, 5, "eddpcdtc"); | 260 | QETH_DBF_TEXT(TRACE, 5, "eddpcdtc"); |
261 | if (skb_shinfo(eddp->skb)->nr_frags == 0) { | 261 | if (skb_shinfo(eddp->skb)->nr_frags == 0) { |
262 | skb_copy_from_linear_data_offset(eddp->skb, eddp->skb_offset, | 262 | skb_copy_from_linear_data_offset(eddp->skb, eddp->skb_offset, |
263 | dst, len); | 263 | dst, len); |
@@ -305,7 +305,7 @@ static void qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx, | |||
305 | struct qeth_eddp_element *element; | 305 | struct qeth_eddp_element *element; |
306 | int first_lap = 1; | 306 | int first_lap = 1; |
307 | 307 | ||
308 | QETH_DBF_TEXT(trace, 5, "eddpcsdt"); | 308 | QETH_DBF_TEXT(TRACE, 5, "eddpcsdt"); |
309 | page = ctx->pages[ctx->offset >> PAGE_SHIFT]; | 309 | page = ctx->pages[ctx->offset >> PAGE_SHIFT]; |
310 | page_offset = ctx->offset % PAGE_SIZE; | 310 | page_offset = ctx->offset % PAGE_SIZE; |
311 | element = &ctx->elements[ctx->num_elements]; | 311 | element = &ctx->elements[ctx->num_elements]; |
@@ -346,7 +346,7 @@ static __wsum qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, | |||
346 | { | 346 | { |
347 | __wsum phcsum; /* pseudo header checksum */ | 347 | __wsum phcsum; /* pseudo header checksum */ |
348 | 348 | ||
349 | QETH_DBF_TEXT(trace, 5, "eddpckt4"); | 349 | QETH_DBF_TEXT(TRACE, 5, "eddpckt4"); |
350 | eddp->th.tcp.h.check = 0; | 350 | eddp->th.tcp.h.check = 0; |
351 | /* compute pseudo header checksum */ | 351 | /* compute pseudo header checksum */ |
352 | phcsum = csum_tcpudp_nofold(eddp->nh.ip4.h.saddr, eddp->nh.ip4.h.daddr, | 352 | phcsum = csum_tcpudp_nofold(eddp->nh.ip4.h.saddr, eddp->nh.ip4.h.daddr, |
@@ -361,7 +361,7 @@ static __wsum qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, | |||
361 | __be32 proto; | 361 | __be32 proto; |
362 | __wsum phcsum; /* pseudo header checksum */ | 362 | __wsum phcsum; /* pseudo header checksum */ |
363 | 363 | ||
364 | QETH_DBF_TEXT(trace, 5, "eddpckt6"); | 364 | QETH_DBF_TEXT(TRACE, 5, "eddpckt6"); |
365 | eddp->th.tcp.h.check = 0; | 365 | eddp->th.tcp.h.check = 0; |
366 | /* compute pseudo header checksum */ | 366 | /* compute pseudo header checksum */ |
367 | phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.saddr, | 367 | phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.saddr, |
@@ -378,7 +378,7 @@ static struct qeth_eddp_data *qeth_eddp_create_eddp_data(struct qeth_hdr *qh, | |||
378 | { | 378 | { |
379 | struct qeth_eddp_data *eddp; | 379 | struct qeth_eddp_data *eddp; |
380 | 380 | ||
381 | QETH_DBF_TEXT(trace, 5, "eddpcrda"); | 381 | QETH_DBF_TEXT(TRACE, 5, "eddpcrda"); |
382 | eddp = kzalloc(sizeof(struct qeth_eddp_data), GFP_ATOMIC); | 382 | eddp = kzalloc(sizeof(struct qeth_eddp_data), GFP_ATOMIC); |
383 | if (eddp) { | 383 | if (eddp) { |
384 | eddp->nhl = nhl; | 384 | eddp->nhl = nhl; |
@@ -398,7 +398,7 @@ static void __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, | |||
398 | int data_len; | 398 | int data_len; |
399 | __wsum hcsum; | 399 | __wsum hcsum; |
400 | 400 | ||
401 | QETH_DBF_TEXT(trace, 5, "eddpftcp"); | 401 | QETH_DBF_TEXT(TRACE, 5, "eddpftcp"); |
402 | eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl; | 402 | eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl; |
403 | if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) { | 403 | if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) { |
404 | eddp->skb_offset += sizeof(struct ethhdr); | 404 | eddp->skb_offset += sizeof(struct ethhdr); |
@@ -457,7 +457,7 @@ static int qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, | |||
457 | { | 457 | { |
458 | struct qeth_eddp_data *eddp = NULL; | 458 | struct qeth_eddp_data *eddp = NULL; |
459 | 459 | ||
460 | QETH_DBF_TEXT(trace, 5, "eddpficx"); | 460 | QETH_DBF_TEXT(TRACE, 5, "eddpficx"); |
461 | /* create our segmentation headers and copy original headers */ | 461 | /* create our segmentation headers and copy original headers */ |
462 | if (skb->protocol == htons(ETH_P_IP)) | 462 | if (skb->protocol == htons(ETH_P_IP)) |
463 | eddp = qeth_eddp_create_eddp_data(qhdr, | 463 | eddp = qeth_eddp_create_eddp_data(qhdr, |
@@ -473,7 +473,7 @@ static int qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, | |||
473 | tcp_hdrlen(skb)); | 473 | tcp_hdrlen(skb)); |
474 | 474 | ||
475 | if (eddp == NULL) { | 475 | if (eddp == NULL) { |
476 | QETH_DBF_TEXT(trace, 2, "eddpfcnm"); | 476 | QETH_DBF_TEXT(TRACE, 2, "eddpfcnm"); |
477 | return -ENOMEM; | 477 | return -ENOMEM; |
478 | } | 478 | } |
479 | if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) { | 479 | if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) { |
@@ -499,7 +499,7 @@ static void qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, | |||
499 | { | 499 | { |
500 | int skbs_per_page; | 500 | int skbs_per_page; |
501 | 501 | ||
502 | QETH_DBF_TEXT(trace, 5, "eddpcanp"); | 502 | QETH_DBF_TEXT(TRACE, 5, "eddpcanp"); |
503 | /* can we put multiple skbs in one page? */ | 503 | /* can we put multiple skbs in one page? */ |
504 | skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->gso_size + hdr_len); | 504 | skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->gso_size + hdr_len); |
505 | if (skbs_per_page > 1) { | 505 | if (skbs_per_page > 1) { |
@@ -524,30 +524,30 @@ static struct qeth_eddp_context *qeth_eddp_create_context_generic( | |||
524 | u8 *addr; | 524 | u8 *addr; |
525 | int i; | 525 | int i; |
526 | 526 | ||
527 | QETH_DBF_TEXT(trace, 5, "creddpcg"); | 527 | QETH_DBF_TEXT(TRACE, 5, "creddpcg"); |
528 | /* create the context and allocate pages */ | 528 | /* create the context and allocate pages */ |
529 | ctx = kzalloc(sizeof(struct qeth_eddp_context), GFP_ATOMIC); | 529 | ctx = kzalloc(sizeof(struct qeth_eddp_context), GFP_ATOMIC); |
530 | if (ctx == NULL) { | 530 | if (ctx == NULL) { |
531 | QETH_DBF_TEXT(trace, 2, "ceddpcn1"); | 531 | QETH_DBF_TEXT(TRACE, 2, "ceddpcn1"); |
532 | return NULL; | 532 | return NULL; |
533 | } | 533 | } |
534 | ctx->type = QETH_LARGE_SEND_EDDP; | 534 | ctx->type = QETH_LARGE_SEND_EDDP; |
535 | qeth_eddp_calc_num_pages(ctx, skb, hdr_len); | 535 | qeth_eddp_calc_num_pages(ctx, skb, hdr_len); |
536 | if (ctx->elements_per_skb > QETH_MAX_BUFFER_ELEMENTS(card)) { | 536 | if (ctx->elements_per_skb > QETH_MAX_BUFFER_ELEMENTS(card)) { |
537 | QETH_DBF_TEXT(trace, 2, "ceddpcis"); | 537 | QETH_DBF_TEXT(TRACE, 2, "ceddpcis"); |
538 | kfree(ctx); | 538 | kfree(ctx); |
539 | return NULL; | 539 | return NULL; |
540 | } | 540 | } |
541 | ctx->pages = kcalloc(ctx->num_pages, sizeof(u8 *), GFP_ATOMIC); | 541 | ctx->pages = kcalloc(ctx->num_pages, sizeof(u8 *), GFP_ATOMIC); |
542 | if (ctx->pages == NULL) { | 542 | if (ctx->pages == NULL) { |
543 | QETH_DBF_TEXT(trace, 2, "ceddpcn2"); | 543 | QETH_DBF_TEXT(TRACE, 2, "ceddpcn2"); |
544 | kfree(ctx); | 544 | kfree(ctx); |
545 | return NULL; | 545 | return NULL; |
546 | } | 546 | } |
547 | for (i = 0; i < ctx->num_pages; ++i) { | 547 | for (i = 0; i < ctx->num_pages; ++i) { |
548 | addr = (u8 *)get_zeroed_page(GFP_ATOMIC); | 548 | addr = (u8 *)get_zeroed_page(GFP_ATOMIC); |
549 | if (addr == NULL) { | 549 | if (addr == NULL) { |
550 | QETH_DBF_TEXT(trace, 2, "ceddpcn3"); | 550 | QETH_DBF_TEXT(TRACE, 2, "ceddpcn3"); |
551 | ctx->num_pages = i; | 551 | ctx->num_pages = i; |
552 | qeth_eddp_free_context(ctx); | 552 | qeth_eddp_free_context(ctx); |
553 | return NULL; | 553 | return NULL; |
@@ -557,7 +557,7 @@ static struct qeth_eddp_context *qeth_eddp_create_context_generic( | |||
557 | ctx->elements = kcalloc(ctx->num_elements, | 557 | ctx->elements = kcalloc(ctx->num_elements, |
558 | sizeof(struct qeth_eddp_element), GFP_ATOMIC); | 558 | sizeof(struct qeth_eddp_element), GFP_ATOMIC); |
559 | if (ctx->elements == NULL) { | 559 | if (ctx->elements == NULL) { |
560 | QETH_DBF_TEXT(trace, 2, "ceddpcn4"); | 560 | QETH_DBF_TEXT(TRACE, 2, "ceddpcn4"); |
561 | qeth_eddp_free_context(ctx); | 561 | qeth_eddp_free_context(ctx); |
562 | return NULL; | 562 | return NULL; |
563 | } | 563 | } |
@@ -573,7 +573,7 @@ static struct qeth_eddp_context *qeth_eddp_create_context_tcp( | |||
573 | { | 573 | { |
574 | struct qeth_eddp_context *ctx = NULL; | 574 | struct qeth_eddp_context *ctx = NULL; |
575 | 575 | ||
576 | QETH_DBF_TEXT(trace, 5, "creddpct"); | 576 | QETH_DBF_TEXT(TRACE, 5, "creddpct"); |
577 | if (skb->protocol == htons(ETH_P_IP)) | 577 | if (skb->protocol == htons(ETH_P_IP)) |
578 | ctx = qeth_eddp_create_context_generic(card, skb, | 578 | ctx = qeth_eddp_create_context_generic(card, skb, |
579 | (sizeof(struct qeth_hdr) + | 579 | (sizeof(struct qeth_hdr) + |
@@ -584,14 +584,14 @@ static struct qeth_eddp_context *qeth_eddp_create_context_tcp( | |||
584 | sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) + | 584 | sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) + |
585 | tcp_hdrlen(skb)); | 585 | tcp_hdrlen(skb)); |
586 | else | 586 | else |
587 | QETH_DBF_TEXT(trace, 2, "cetcpinv"); | 587 | QETH_DBF_TEXT(TRACE, 2, "cetcpinv"); |
588 | 588 | ||
589 | if (ctx == NULL) { | 589 | if (ctx == NULL) { |
590 | QETH_DBF_TEXT(trace, 2, "creddpnl"); | 590 | QETH_DBF_TEXT(TRACE, 2, "creddpnl"); |
591 | return NULL; | 591 | return NULL; |
592 | } | 592 | } |
593 | if (qeth_eddp_fill_context_tcp(ctx, skb, qhdr)) { | 593 | if (qeth_eddp_fill_context_tcp(ctx, skb, qhdr)) { |
594 | QETH_DBF_TEXT(trace, 2, "ceddptfe"); | 594 | QETH_DBF_TEXT(TRACE, 2, "ceddptfe"); |
595 | qeth_eddp_free_context(ctx); | 595 | qeth_eddp_free_context(ctx); |
596 | return NULL; | 596 | return NULL; |
597 | } | 597 | } |
@@ -603,12 +603,12 @@ struct qeth_eddp_context *qeth_eddp_create_context(struct qeth_card *card, | |||
603 | struct sk_buff *skb, struct qeth_hdr *qhdr, | 603 | struct sk_buff *skb, struct qeth_hdr *qhdr, |
604 | unsigned char sk_protocol) | 604 | unsigned char sk_protocol) |
605 | { | 605 | { |
606 | QETH_DBF_TEXT(trace, 5, "creddpc"); | 606 | QETH_DBF_TEXT(TRACE, 5, "creddpc"); |
607 | switch (sk_protocol) { | 607 | switch (sk_protocol) { |
608 | case IPPROTO_TCP: | 608 | case IPPROTO_TCP: |
609 | return qeth_eddp_create_context_tcp(card, skb, qhdr); | 609 | return qeth_eddp_create_context_tcp(card, skb, qhdr); |
610 | default: | 610 | default: |
611 | QETH_DBF_TEXT(trace, 2, "eddpinvp"); | 611 | QETH_DBF_TEXT(TRACE, 2, "eddpinvp"); |
612 | } | 612 | } |
613 | return NULL; | 613 | return NULL; |
614 | } | 614 | } |
@@ -622,7 +622,7 @@ void qeth_tso_fill_header(struct qeth_card *card, struct qeth_hdr *qhdr, | |||
622 | struct iphdr *iph = ip_hdr(skb); | 622 | struct iphdr *iph = ip_hdr(skb); |
623 | struct ipv6hdr *ip6h = ipv6_hdr(skb); | 623 | struct ipv6hdr *ip6h = ipv6_hdr(skb); |
624 | 624 | ||
625 | QETH_DBF_TEXT(trace, 5, "tsofhdr"); | 625 | QETH_DBF_TEXT(TRACE, 5, "tsofhdr"); |
626 | 626 | ||
627 | /*fix header to TSO values ...*/ | 627 | /*fix header to TSO values ...*/ |
628 | hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO; | 628 | hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO; |
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 4417a3629ae0..3921d1631a78 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c | |||
@@ -22,16 +22,7 @@ | |||
22 | #include "qeth_core.h" | 22 | #include "qeth_core.h" |
23 | #include "qeth_core_offl.h" | 23 | #include "qeth_core_offl.h" |
24 | 24 | ||
25 | #define QETH_DBF_TEXT_(name, level, text...) \ | 25 | #define QETH_DBF_TXT_BUF qeth_l2_dbf_txt_buf |
26 | do { \ | ||
27 | if (qeth_dbf_passes(qeth_dbf_##name, level)) { \ | ||
28 | char *dbf_txt_buf = get_cpu_var(qeth_l2_dbf_txt_buf); \ | ||
29 | sprintf(dbf_txt_buf, text); \ | ||
30 | debug_text_event(qeth_dbf_##name, level, dbf_txt_buf); \ | ||
31 | put_cpu_var(qeth_l2_dbf_txt_buf); \ | ||
32 | } \ | ||
33 | } while (0) | ||
34 | |||
35 | static DEFINE_PER_CPU(char[256], qeth_l2_dbf_txt_buf); | 26 | static DEFINE_PER_CPU(char[256], qeth_l2_dbf_txt_buf); |
36 | 27 | ||
37 | static int qeth_l2_set_offline(struct ccwgroup_device *); | 28 | static int qeth_l2_set_offline(struct ccwgroup_device *); |
@@ -87,7 +78,7 @@ static int qeth_l2_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
87 | rc = -EOPNOTSUPP; | 78 | rc = -EOPNOTSUPP; |
88 | } | 79 | } |
89 | if (rc) | 80 | if (rc) |
90 | QETH_DBF_TEXT_(trace, 2, "ioce%d", rc); | 81 | QETH_DBF_TEXT_(TRACE, 2, "ioce%d", rc); |
91 | return rc; | 82 | return rc; |
92 | } | 83 | } |
93 | 84 | ||
@@ -141,7 +132,7 @@ static int qeth_l2_send_setgroupmac_cb(struct qeth_card *card, | |||
141 | struct qeth_ipa_cmd *cmd; | 132 | struct qeth_ipa_cmd *cmd; |
142 | __u8 *mac; | 133 | __u8 *mac; |
143 | 134 | ||
144 | QETH_DBF_TEXT(trace, 2, "L2Sgmacb"); | 135 | QETH_DBF_TEXT(TRACE, 2, "L2Sgmacb"); |
145 | cmd = (struct qeth_ipa_cmd *) data; | 136 | cmd = (struct qeth_ipa_cmd *) data; |
146 | mac = &cmd->data.setdelmac.mac[0]; | 137 | mac = &cmd->data.setdelmac.mac[0]; |
147 | /* MAC already registered, needed in couple/uncouple case */ | 138 | /* MAC already registered, needed in couple/uncouple case */ |
@@ -162,7 +153,7 @@ static int qeth_l2_send_setgroupmac_cb(struct qeth_card *card, | |||
162 | 153 | ||
163 | static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac) | 154 | static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac) |
164 | { | 155 | { |
165 | QETH_DBF_TEXT(trace, 2, "L2Sgmac"); | 156 | QETH_DBF_TEXT(TRACE, 2, "L2Sgmac"); |
166 | return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC, | 157 | return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC, |
167 | qeth_l2_send_setgroupmac_cb); | 158 | qeth_l2_send_setgroupmac_cb); |
168 | } | 159 | } |
@@ -174,7 +165,7 @@ static int qeth_l2_send_delgroupmac_cb(struct qeth_card *card, | |||
174 | struct qeth_ipa_cmd *cmd; | 165 | struct qeth_ipa_cmd *cmd; |
175 | __u8 *mac; | 166 | __u8 *mac; |
176 | 167 | ||
177 | QETH_DBF_TEXT(trace, 2, "L2Dgmacb"); | 168 | QETH_DBF_TEXT(TRACE, 2, "L2Dgmacb"); |
178 | cmd = (struct qeth_ipa_cmd *) data; | 169 | cmd = (struct qeth_ipa_cmd *) data; |
179 | mac = &cmd->data.setdelmac.mac[0]; | 170 | mac = &cmd->data.setdelmac.mac[0]; |
180 | if (cmd->hdr.return_code) | 171 | if (cmd->hdr.return_code) |
@@ -187,7 +178,7 @@ static int qeth_l2_send_delgroupmac_cb(struct qeth_card *card, | |||
187 | 178 | ||
188 | static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac) | 179 | static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac) |
189 | { | 180 | { |
190 | QETH_DBF_TEXT(trace, 2, "L2Dgmac"); | 181 | QETH_DBF_TEXT(TRACE, 2, "L2Dgmac"); |
191 | return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC, | 182 | return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC, |
192 | qeth_l2_send_delgroupmac_cb); | 183 | qeth_l2_send_delgroupmac_cb); |
193 | } | 184 | } |
@@ -289,15 +280,15 @@ static int qeth_l2_send_setdelvlan_cb(struct qeth_card *card, | |||
289 | { | 280 | { |
290 | struct qeth_ipa_cmd *cmd; | 281 | struct qeth_ipa_cmd *cmd; |
291 | 282 | ||
292 | QETH_DBF_TEXT(trace, 2, "L2sdvcb"); | 283 | QETH_DBF_TEXT(TRACE, 2, "L2sdvcb"); |
293 | cmd = (struct qeth_ipa_cmd *) data; | 284 | cmd = (struct qeth_ipa_cmd *) data; |
294 | if (cmd->hdr.return_code) { | 285 | if (cmd->hdr.return_code) { |
295 | PRINT_ERR("Error in processing VLAN %i on %s: 0x%x. " | 286 | PRINT_ERR("Error in processing VLAN %i on %s: 0x%x. " |
296 | "Continuing\n", cmd->data.setdelvlan.vlan_id, | 287 | "Continuing\n", cmd->data.setdelvlan.vlan_id, |
297 | QETH_CARD_IFNAME(card), cmd->hdr.return_code); | 288 | QETH_CARD_IFNAME(card), cmd->hdr.return_code); |
298 | QETH_DBF_TEXT_(trace, 2, "L2VL%4x", cmd->hdr.command); | 289 | QETH_DBF_TEXT_(TRACE, 2, "L2VL%4x", cmd->hdr.command); |
299 | QETH_DBF_TEXT_(trace, 2, "L2%s", CARD_BUS_ID(card)); | 290 | QETH_DBF_TEXT_(TRACE, 2, "L2%s", CARD_BUS_ID(card)); |
300 | QETH_DBF_TEXT_(trace, 2, "err%d", cmd->hdr.return_code); | 291 | QETH_DBF_TEXT_(TRACE, 2, "err%d", cmd->hdr.return_code); |
301 | } | 292 | } |
302 | return 0; | 293 | return 0; |
303 | } | 294 | } |
@@ -308,7 +299,7 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i, | |||
308 | struct qeth_ipa_cmd *cmd; | 299 | struct qeth_ipa_cmd *cmd; |
309 | struct qeth_cmd_buffer *iob; | 300 | struct qeth_cmd_buffer *iob; |
310 | 301 | ||
311 | QETH_DBF_TEXT_(trace, 4, "L2sdv%x", ipacmd); | 302 | QETH_DBF_TEXT_(TRACE, 4, "L2sdv%x", ipacmd); |
312 | iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4); | 303 | iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4); |
313 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 304 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
314 | cmd->data.setdelvlan.vlan_id = i; | 305 | cmd->data.setdelvlan.vlan_id = i; |
@@ -319,7 +310,7 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i, | |||
319 | static void qeth_l2_process_vlans(struct qeth_card *card, int clear) | 310 | static void qeth_l2_process_vlans(struct qeth_card *card, int clear) |
320 | { | 311 | { |
321 | struct qeth_vlan_vid *id; | 312 | struct qeth_vlan_vid *id; |
322 | QETH_DBF_TEXT(trace, 3, "L2prcvln"); | 313 | QETH_DBF_TEXT(TRACE, 3, "L2prcvln"); |
323 | spin_lock_bh(&card->vlanlock); | 314 | spin_lock_bh(&card->vlanlock); |
324 | list_for_each_entry(id, &card->vid_list, list) { | 315 | list_for_each_entry(id, &card->vid_list, list) { |
325 | if (clear) | 316 | if (clear) |
@@ -337,7 +328,7 @@ static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) | |||
337 | struct qeth_card *card = netdev_priv(dev); | 328 | struct qeth_card *card = netdev_priv(dev); |
338 | struct qeth_vlan_vid *id; | 329 | struct qeth_vlan_vid *id; |
339 | 330 | ||
340 | QETH_DBF_TEXT_(trace, 4, "aid:%d", vid); | 331 | QETH_DBF_TEXT_(TRACE, 4, "aid:%d", vid); |
341 | id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC); | 332 | id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC); |
342 | if (id) { | 333 | if (id) { |
343 | id->vid = vid; | 334 | id->vid = vid; |
@@ -355,7 +346,7 @@ static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | |||
355 | struct qeth_vlan_vid *id, *tmpid = NULL; | 346 | struct qeth_vlan_vid *id, *tmpid = NULL; |
356 | struct qeth_card *card = netdev_priv(dev); | 347 | struct qeth_card *card = netdev_priv(dev); |
357 | 348 | ||
358 | QETH_DBF_TEXT_(trace, 4, "kid:%d", vid); | 349 | QETH_DBF_TEXT_(TRACE, 4, "kid:%d", vid); |
359 | spin_lock_bh(&card->vlanlock); | 350 | spin_lock_bh(&card->vlanlock); |
360 | list_for_each_entry(id, &card->vid_list, list) { | 351 | list_for_each_entry(id, &card->vid_list, list) { |
361 | if (id->vid == vid) { | 352 | if (id->vid == vid) { |
@@ -376,8 +367,8 @@ static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode) | |||
376 | { | 367 | { |
377 | int rc = 0; | 368 | int rc = 0; |
378 | 369 | ||
379 | QETH_DBF_TEXT(setup , 2, "stopcard"); | 370 | QETH_DBF_TEXT(SETUP , 2, "stopcard"); |
380 | QETH_DBF_HEX(setup, 2, &card, sizeof(void *)); | 371 | QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); |
381 | 372 | ||
382 | qeth_set_allowed_threads(card, 0, 1); | 373 | qeth_set_allowed_threads(card, 0, 1); |
383 | if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD)) | 374 | if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD)) |
@@ -396,7 +387,7 @@ static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode) | |||
396 | if (!card->use_hard_stop) { | 387 | if (!card->use_hard_stop) { |
397 | __u8 *mac = &card->dev->dev_addr[0]; | 388 | __u8 *mac = &card->dev->dev_addr[0]; |
398 | rc = qeth_l2_send_delmac(card, mac); | 389 | rc = qeth_l2_send_delmac(card, mac); |
399 | QETH_DBF_TEXT_(setup, 2, "Lerr%d", rc); | 390 | QETH_DBF_TEXT_(SETUP, 2, "Lerr%d", rc); |
400 | } | 391 | } |
401 | card->state = CARD_STATE_SOFTSETUP; | 392 | card->state = CARD_STATE_SOFTSETUP; |
402 | } | 393 | } |
@@ -451,7 +442,8 @@ static void qeth_l2_process_inbound_buffer(struct qeth_card *card, | |||
451 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 442 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
452 | else | 443 | else |
453 | skb->ip_summed = CHECKSUM_NONE; | 444 | skb->ip_summed = CHECKSUM_NONE; |
454 | *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno; | 445 | if (skb->protocol == htons(ETH_P_802_2)) |
446 | *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno; | ||
455 | len = skb->len; | 447 | len = skb->len; |
456 | netif_rx(skb); | 448 | netif_rx(skb); |
457 | break; | 449 | break; |
@@ -464,8 +456,8 @@ static void qeth_l2_process_inbound_buffer(struct qeth_card *card, | |||
464 | break; | 456 | break; |
465 | default: | 457 | default: |
466 | dev_kfree_skb_any(skb); | 458 | dev_kfree_skb_any(skb); |
467 | QETH_DBF_TEXT(trace, 3, "inbunkno"); | 459 | QETH_DBF_TEXT(TRACE, 3, "inbunkno"); |
468 | QETH_DBF_HEX(control, 3, hdr, QETH_DBF_CONTROL_LEN); | 460 | QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN); |
469 | continue; | 461 | continue; |
470 | } | 462 | } |
471 | card->dev->last_rx = jiffies; | 463 | card->dev->last_rx = jiffies; |
@@ -483,7 +475,7 @@ static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac, | |||
483 | struct qeth_ipa_cmd *cmd; | 475 | struct qeth_ipa_cmd *cmd; |
484 | struct qeth_cmd_buffer *iob; | 476 | struct qeth_cmd_buffer *iob; |
485 | 477 | ||
486 | QETH_DBF_TEXT(trace, 2, "L2sdmac"); | 478 | QETH_DBF_TEXT(TRACE, 2, "L2sdmac"); |
487 | iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4); | 479 | iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4); |
488 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 480 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
489 | cmd->data.setdelmac.mac_length = OSA_ADDR_LEN; | 481 | cmd->data.setdelmac.mac_length = OSA_ADDR_LEN; |
@@ -497,10 +489,10 @@ static int qeth_l2_send_setmac_cb(struct qeth_card *card, | |||
497 | { | 489 | { |
498 | struct qeth_ipa_cmd *cmd; | 490 | struct qeth_ipa_cmd *cmd; |
499 | 491 | ||
500 | QETH_DBF_TEXT(trace, 2, "L2Smaccb"); | 492 | QETH_DBF_TEXT(TRACE, 2, "L2Smaccb"); |
501 | cmd = (struct qeth_ipa_cmd *) data; | 493 | cmd = (struct qeth_ipa_cmd *) data; |
502 | if (cmd->hdr.return_code) { | 494 | if (cmd->hdr.return_code) { |
503 | QETH_DBF_TEXT_(trace, 2, "L2er%x", cmd->hdr.return_code); | 495 | QETH_DBF_TEXT_(TRACE, 2, "L2er%x", cmd->hdr.return_code); |
504 | card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; | 496 | card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; |
505 | cmd->hdr.return_code = -EIO; | 497 | cmd->hdr.return_code = -EIO; |
506 | } else { | 498 | } else { |
@@ -519,7 +511,7 @@ static int qeth_l2_send_setmac_cb(struct qeth_card *card, | |||
519 | 511 | ||
520 | static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) | 512 | static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) |
521 | { | 513 | { |
522 | QETH_DBF_TEXT(trace, 2, "L2Setmac"); | 514 | QETH_DBF_TEXT(TRACE, 2, "L2Setmac"); |
523 | return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC, | 515 | return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC, |
524 | qeth_l2_send_setmac_cb); | 516 | qeth_l2_send_setmac_cb); |
525 | } | 517 | } |
@@ -530,10 +522,10 @@ static int qeth_l2_send_delmac_cb(struct qeth_card *card, | |||
530 | { | 522 | { |
531 | struct qeth_ipa_cmd *cmd; | 523 | struct qeth_ipa_cmd *cmd; |
532 | 524 | ||
533 | QETH_DBF_TEXT(trace, 2, "L2Dmaccb"); | 525 | QETH_DBF_TEXT(TRACE, 2, "L2Dmaccb"); |
534 | cmd = (struct qeth_ipa_cmd *) data; | 526 | cmd = (struct qeth_ipa_cmd *) data; |
535 | if (cmd->hdr.return_code) { | 527 | if (cmd->hdr.return_code) { |
536 | QETH_DBF_TEXT_(trace, 2, "err%d", cmd->hdr.return_code); | 528 | QETH_DBF_TEXT_(TRACE, 2, "err%d", cmd->hdr.return_code); |
537 | cmd->hdr.return_code = -EIO; | 529 | cmd->hdr.return_code = -EIO; |
538 | return 0; | 530 | return 0; |
539 | } | 531 | } |
@@ -544,7 +536,7 @@ static int qeth_l2_send_delmac_cb(struct qeth_card *card, | |||
544 | 536 | ||
545 | static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac) | 537 | static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac) |
546 | { | 538 | { |
547 | QETH_DBF_TEXT(trace, 2, "L2Delmac"); | 539 | QETH_DBF_TEXT(TRACE, 2, "L2Delmac"); |
548 | if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)) | 540 | if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)) |
549 | return 0; | 541 | return 0; |
550 | return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC, | 542 | return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC, |
@@ -556,8 +548,8 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card) | |||
556 | int rc = 0; | 548 | int rc = 0; |
557 | char vendor_pre[] = {0x02, 0x00, 0x00}; | 549 | char vendor_pre[] = {0x02, 0x00, 0x00}; |
558 | 550 | ||
559 | QETH_DBF_TEXT(setup, 2, "doL2init"); | 551 | QETH_DBF_TEXT(SETUP, 2, "doL2init"); |
560 | QETH_DBF_TEXT_(setup, 2, "doL2%s", CARD_BUS_ID(card)); | 552 | QETH_DBF_TEXT_(SETUP, 2, "doL2%s", CARD_BUS_ID(card)); |
561 | 553 | ||
562 | rc = qeth_query_setadapterparms(card); | 554 | rc = qeth_query_setadapterparms(card); |
563 | if (rc) { | 555 | if (rc) { |
@@ -571,10 +563,10 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card) | |||
571 | PRINT_WARN("couldn't get MAC address on " | 563 | PRINT_WARN("couldn't get MAC address on " |
572 | "device %s: x%x\n", | 564 | "device %s: x%x\n", |
573 | CARD_BUS_ID(card), rc); | 565 | CARD_BUS_ID(card), rc); |
574 | QETH_DBF_TEXT_(setup, 2, "1err%d", rc); | 566 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); |
575 | return rc; | 567 | return rc; |
576 | } | 568 | } |
577 | QETH_DBF_HEX(setup, 2, card->dev->dev_addr, OSA_ADDR_LEN); | 569 | QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, OSA_ADDR_LEN); |
578 | } else { | 570 | } else { |
579 | random_ether_addr(card->dev->dev_addr); | 571 | random_ether_addr(card->dev->dev_addr); |
580 | memcpy(card->dev->dev_addr, vendor_pre, 3); | 572 | memcpy(card->dev->dev_addr, vendor_pre, 3); |
@@ -588,21 +580,21 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p) | |||
588 | struct qeth_card *card = netdev_priv(dev); | 580 | struct qeth_card *card = netdev_priv(dev); |
589 | int rc = 0; | 581 | int rc = 0; |
590 | 582 | ||
591 | QETH_DBF_TEXT(trace, 3, "setmac"); | 583 | QETH_DBF_TEXT(TRACE, 3, "setmac"); |
592 | 584 | ||
593 | if (qeth_l2_verify_dev(dev) != QETH_REAL_CARD) { | 585 | if (qeth_l2_verify_dev(dev) != QETH_REAL_CARD) { |
594 | QETH_DBF_TEXT(trace, 3, "setmcINV"); | 586 | QETH_DBF_TEXT(TRACE, 3, "setmcINV"); |
595 | return -EOPNOTSUPP; | 587 | return -EOPNOTSUPP; |
596 | } | 588 | } |
597 | 589 | ||
598 | if (card->info.type == QETH_CARD_TYPE_OSN) { | 590 | if (card->info.type == QETH_CARD_TYPE_OSN) { |
599 | PRINT_WARN("Setting MAC address on %s is not supported.\n", | 591 | PRINT_WARN("Setting MAC address on %s is not supported.\n", |
600 | dev->name); | 592 | dev->name); |
601 | QETH_DBF_TEXT(trace, 3, "setmcOSN"); | 593 | QETH_DBF_TEXT(TRACE, 3, "setmcOSN"); |
602 | return -EOPNOTSUPP; | 594 | return -EOPNOTSUPP; |
603 | } | 595 | } |
604 | QETH_DBF_TEXT_(trace, 3, "%s", CARD_BUS_ID(card)); | 596 | QETH_DBF_TEXT_(TRACE, 3, "%s", CARD_BUS_ID(card)); |
605 | QETH_DBF_HEX(trace, 3, addr->sa_data, OSA_ADDR_LEN); | 597 | QETH_DBF_HEX(TRACE, 3, addr->sa_data, OSA_ADDR_LEN); |
606 | rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]); | 598 | rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]); |
607 | if (!rc) | 599 | if (!rc) |
608 | rc = qeth_l2_send_setmac(card, addr->sa_data); | 600 | rc = qeth_l2_send_setmac(card, addr->sa_data); |
@@ -617,7 +609,7 @@ static void qeth_l2_set_multicast_list(struct net_device *dev) | |||
617 | if (card->info.type == QETH_CARD_TYPE_OSN) | 609 | if (card->info.type == QETH_CARD_TYPE_OSN) |
618 | return ; | 610 | return ; |
619 | 611 | ||
620 | QETH_DBF_TEXT(trace, 3, "setmulti"); | 612 | QETH_DBF_TEXT(TRACE, 3, "setmulti"); |
621 | qeth_l2_del_all_mc(card); | 613 | qeth_l2_del_all_mc(card); |
622 | spin_lock_bh(&card->mclock); | 614 | spin_lock_bh(&card->mclock); |
623 | for (dm = dev->mc_list; dm; dm = dm->next) | 615 | for (dm = dev->mc_list; dm; dm = dm->next) |
@@ -643,7 +635,7 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
643 | enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO; | 635 | enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO; |
644 | struct qeth_eddp_context *ctx = NULL; | 636 | struct qeth_eddp_context *ctx = NULL; |
645 | 637 | ||
646 | QETH_DBF_TEXT(trace, 6, "l2xmit"); | 638 | QETH_DBF_TEXT(TRACE, 6, "l2xmit"); |
647 | 639 | ||
648 | if ((card->state != CARD_STATE_UP) || !card->lan_online) { | 640 | if ((card->state != CARD_STATE_UP) || !card->lan_online) { |
649 | card->stats.tx_carrier_errors++; | 641 | card->stats.tx_carrier_errors++; |
@@ -755,7 +747,7 @@ static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev, | |||
755 | int index; | 747 | int index; |
756 | int i; | 748 | int i; |
757 | 749 | ||
758 | QETH_DBF_TEXT(trace, 6, "qdinput"); | 750 | QETH_DBF_TEXT(TRACE, 6, "qdinput"); |
759 | card = (struct qeth_card *) card_ptr; | 751 | card = (struct qeth_card *) card_ptr; |
760 | net_dev = card->dev; | 752 | net_dev = card->dev; |
761 | if (card->options.performance_stats) { | 753 | if (card->options.performance_stats) { |
@@ -764,11 +756,11 @@ static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev, | |||
764 | } | 756 | } |
765 | if (status & QDIO_STATUS_LOOK_FOR_ERROR) { | 757 | if (status & QDIO_STATUS_LOOK_FOR_ERROR) { |
766 | if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION) { | 758 | if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION) { |
767 | QETH_DBF_TEXT(trace, 1, "qdinchk"); | 759 | QETH_DBF_TEXT(TRACE, 1, "qdinchk"); |
768 | QETH_DBF_TEXT_(trace, 1, "%s", CARD_BUS_ID(card)); | 760 | QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card)); |
769 | QETH_DBF_TEXT_(trace, 1, "%04X%04X", first_element, | 761 | QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", first_element, |
770 | count); | 762 | count); |
771 | QETH_DBF_TEXT_(trace, 1, "%04X%04X", queue, status); | 763 | QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", queue, status); |
772 | qeth_schedule_recovery(card); | 764 | qeth_schedule_recovery(card); |
773 | return; | 765 | return; |
774 | } | 766 | } |
@@ -793,13 +785,13 @@ static int qeth_l2_open(struct net_device *dev) | |||
793 | { | 785 | { |
794 | struct qeth_card *card = netdev_priv(dev); | 786 | struct qeth_card *card = netdev_priv(dev); |
795 | 787 | ||
796 | QETH_DBF_TEXT(trace, 4, "qethopen"); | 788 | QETH_DBF_TEXT(TRACE, 4, "qethopen"); |
797 | if (card->state != CARD_STATE_SOFTSETUP) | 789 | if (card->state != CARD_STATE_SOFTSETUP) |
798 | return -ENODEV; | 790 | return -ENODEV; |
799 | 791 | ||
800 | if ((card->info.type != QETH_CARD_TYPE_OSN) && | 792 | if ((card->info.type != QETH_CARD_TYPE_OSN) && |
801 | (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))) { | 793 | (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))) { |
802 | QETH_DBF_TEXT(trace, 4, "nomacadr"); | 794 | QETH_DBF_TEXT(TRACE, 4, "nomacadr"); |
803 | return -EPERM; | 795 | return -EPERM; |
804 | } | 796 | } |
805 | card->data.state = CH_STATE_UP; | 797 | card->data.state = CH_STATE_UP; |
@@ -817,7 +809,7 @@ static int qeth_l2_stop(struct net_device *dev) | |||
817 | { | 809 | { |
818 | struct qeth_card *card = netdev_priv(dev); | 810 | struct qeth_card *card = netdev_priv(dev); |
819 | 811 | ||
820 | QETH_DBF_TEXT(trace, 4, "qethstop"); | 812 | QETH_DBF_TEXT(TRACE, 4, "qethstop"); |
821 | netif_tx_disable(dev); | 813 | netif_tx_disable(dev); |
822 | card->dev->flags &= ~IFF_UP; | 814 | card->dev->flags &= ~IFF_UP; |
823 | if (card->state == CARD_STATE_UP) | 815 | if (card->state == CARD_STATE_UP) |
@@ -933,8 +925,8 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
933 | enum qeth_card_states recover_flag; | 925 | enum qeth_card_states recover_flag; |
934 | 926 | ||
935 | BUG_ON(!card); | 927 | BUG_ON(!card); |
936 | QETH_DBF_TEXT(setup, 2, "setonlin"); | 928 | QETH_DBF_TEXT(SETUP, 2, "setonlin"); |
937 | QETH_DBF_HEX(setup, 2, &card, sizeof(void *)); | 929 | QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); |
938 | 930 | ||
939 | qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1); | 931 | qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1); |
940 | if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD)) { | 932 | if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD)) { |
@@ -946,23 +938,23 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
946 | recover_flag = card->state; | 938 | recover_flag = card->state; |
947 | rc = ccw_device_set_online(CARD_RDEV(card)); | 939 | rc = ccw_device_set_online(CARD_RDEV(card)); |
948 | if (rc) { | 940 | if (rc) { |
949 | QETH_DBF_TEXT_(setup, 2, "1err%d", rc); | 941 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); |
950 | return -EIO; | 942 | return -EIO; |
951 | } | 943 | } |
952 | rc = ccw_device_set_online(CARD_WDEV(card)); | 944 | rc = ccw_device_set_online(CARD_WDEV(card)); |
953 | if (rc) { | 945 | if (rc) { |
954 | QETH_DBF_TEXT_(setup, 2, "1err%d", rc); | 946 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); |
955 | return -EIO; | 947 | return -EIO; |
956 | } | 948 | } |
957 | rc = ccw_device_set_online(CARD_DDEV(card)); | 949 | rc = ccw_device_set_online(CARD_DDEV(card)); |
958 | if (rc) { | 950 | if (rc) { |
959 | QETH_DBF_TEXT_(setup, 2, "1err%d", rc); | 951 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); |
960 | return -EIO; | 952 | return -EIO; |
961 | } | 953 | } |
962 | 954 | ||
963 | rc = qeth_core_hardsetup_card(card); | 955 | rc = qeth_core_hardsetup_card(card); |
964 | if (rc) { | 956 | if (rc) { |
965 | QETH_DBF_TEXT_(setup, 2, "2err%d", rc); | 957 | QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); |
966 | goto out_remove; | 958 | goto out_remove; |
967 | } | 959 | } |
968 | 960 | ||
@@ -976,11 +968,11 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
976 | qeth_print_status_message(card); | 968 | qeth_print_status_message(card); |
977 | 969 | ||
978 | /* softsetup */ | 970 | /* softsetup */ |
979 | QETH_DBF_TEXT(setup, 2, "softsetp"); | 971 | QETH_DBF_TEXT(SETUP, 2, "softsetp"); |
980 | 972 | ||
981 | rc = qeth_send_startlan(card); | 973 | rc = qeth_send_startlan(card); |
982 | if (rc) { | 974 | if (rc) { |
983 | QETH_DBF_TEXT_(setup, 2, "1err%d", rc); | 975 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); |
984 | if (rc == 0xe080) { | 976 | if (rc == 0xe080) { |
985 | PRINT_WARN("LAN on card %s if offline! " | 977 | PRINT_WARN("LAN on card %s if offline! " |
986 | "Waiting for STARTLAN from card.\n", | 978 | "Waiting for STARTLAN from card.\n", |
@@ -1000,7 +992,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
1000 | 992 | ||
1001 | rc = qeth_init_qdio_queues(card); | 993 | rc = qeth_init_qdio_queues(card); |
1002 | if (rc) { | 994 | if (rc) { |
1003 | QETH_DBF_TEXT_(setup, 2, "6err%d", rc); | 995 | QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); |
1004 | goto out_remove; | 996 | goto out_remove; |
1005 | } | 997 | } |
1006 | card->state = CARD_STATE_SOFTSETUP; | 998 | card->state = CARD_STATE_SOFTSETUP; |
@@ -1047,8 +1039,8 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev, | |||
1047 | int rc = 0, rc2 = 0, rc3 = 0; | 1039 | int rc = 0, rc2 = 0, rc3 = 0; |
1048 | enum qeth_card_states recover_flag; | 1040 | enum qeth_card_states recover_flag; |
1049 | 1041 | ||
1050 | QETH_DBF_TEXT(setup, 3, "setoffl"); | 1042 | QETH_DBF_TEXT(SETUP, 3, "setoffl"); |
1051 | QETH_DBF_HEX(setup, 3, &card, sizeof(void *)); | 1043 | QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *)); |
1052 | 1044 | ||
1053 | if (card->dev && netif_carrier_ok(card->dev)) | 1045 | if (card->dev && netif_carrier_ok(card->dev)) |
1054 | netif_carrier_off(card->dev); | 1046 | netif_carrier_off(card->dev); |
@@ -1064,7 +1056,7 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev, | |||
1064 | if (!rc) | 1056 | if (!rc) |
1065 | rc = (rc2) ? rc2 : rc3; | 1057 | rc = (rc2) ? rc2 : rc3; |
1066 | if (rc) | 1058 | if (rc) |
1067 | QETH_DBF_TEXT_(setup, 2, "1err%d", rc); | 1059 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); |
1068 | if (recover_flag == CARD_STATE_UP) | 1060 | if (recover_flag == CARD_STATE_UP) |
1069 | card->state = CARD_STATE_RECOVER; | 1061 | card->state = CARD_STATE_RECOVER; |
1070 | /* let user_space know that device is offline */ | 1062 | /* let user_space know that device is offline */ |
@@ -1083,11 +1075,11 @@ static int qeth_l2_recover(void *ptr) | |||
1083 | int rc = 0; | 1075 | int rc = 0; |
1084 | 1076 | ||
1085 | card = (struct qeth_card *) ptr; | 1077 | card = (struct qeth_card *) ptr; |
1086 | QETH_DBF_TEXT(trace, 2, "recover1"); | 1078 | QETH_DBF_TEXT(TRACE, 2, "recover1"); |
1087 | QETH_DBF_HEX(trace, 2, &card, sizeof(void *)); | 1079 | QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *)); |
1088 | if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) | 1080 | if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) |
1089 | return 0; | 1081 | return 0; |
1090 | QETH_DBF_TEXT(trace, 2, "recover2"); | 1082 | QETH_DBF_TEXT(TRACE, 2, "recover2"); |
1091 | PRINT_WARN("Recovery of device %s started ...\n", | 1083 | PRINT_WARN("Recovery of device %s started ...\n", |
1092 | CARD_BUS_ID(card)); | 1084 | CARD_BUS_ID(card)); |
1093 | card->use_hard_stop = 1; | 1085 | card->use_hard_stop = 1; |
@@ -1138,12 +1130,12 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len, | |||
1138 | unsigned long flags; | 1130 | unsigned long flags; |
1139 | int rc = 0; | 1131 | int rc = 0; |
1140 | 1132 | ||
1141 | QETH_DBF_TEXT(trace, 5, "osndctrd"); | 1133 | QETH_DBF_TEXT(TRACE, 5, "osndctrd"); |
1142 | 1134 | ||
1143 | wait_event(card->wait_q, | 1135 | wait_event(card->wait_q, |
1144 | atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0); | 1136 | atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0); |
1145 | qeth_prepare_control_data(card, len, iob); | 1137 | qeth_prepare_control_data(card, len, iob); |
1146 | QETH_DBF_TEXT(trace, 6, "osnoirqp"); | 1138 | QETH_DBF_TEXT(TRACE, 6, "osnoirqp"); |
1147 | spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); | 1139 | spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); |
1148 | rc = ccw_device_start(card->write.ccwdev, &card->write.ccw, | 1140 | rc = ccw_device_start(card->write.ccwdev, &card->write.ccw, |
1149 | (addr_t) iob, 0, 0); | 1141 | (addr_t) iob, 0, 0); |
@@ -1151,7 +1143,7 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len, | |||
1151 | if (rc) { | 1143 | if (rc) { |
1152 | PRINT_WARN("qeth_osn_send_control_data: " | 1144 | PRINT_WARN("qeth_osn_send_control_data: " |
1153 | "ccw_device_start rc = %i\n", rc); | 1145 | "ccw_device_start rc = %i\n", rc); |
1154 | QETH_DBF_TEXT_(trace, 2, " err%d", rc); | 1146 | QETH_DBF_TEXT_(TRACE, 2, " err%d", rc); |
1155 | qeth_release_buffer(iob->channel, iob); | 1147 | qeth_release_buffer(iob->channel, iob); |
1156 | atomic_set(&card->write.irq_pending, 0); | 1148 | atomic_set(&card->write.irq_pending, 0); |
1157 | wake_up(&card->wait_q); | 1149 | wake_up(&card->wait_q); |
@@ -1164,7 +1156,7 @@ static int qeth_osn_send_ipa_cmd(struct qeth_card *card, | |||
1164 | { | 1156 | { |
1165 | u16 s1, s2; | 1157 | u16 s1, s2; |
1166 | 1158 | ||
1167 | QETH_DBF_TEXT(trace, 4, "osndipa"); | 1159 | QETH_DBF_TEXT(TRACE, 4, "osndipa"); |
1168 | 1160 | ||
1169 | qeth_prepare_ipa_cmd(card, iob, QETH_PROT_OSN2); | 1161 | qeth_prepare_ipa_cmd(card, iob, QETH_PROT_OSN2); |
1170 | s1 = (u16)(IPA_PDU_HEADER_SIZE + data_len); | 1162 | s1 = (u16)(IPA_PDU_HEADER_SIZE + data_len); |
@@ -1182,7 +1174,7 @@ int qeth_osn_assist(struct net_device *dev, void *data, int data_len) | |||
1182 | struct qeth_card *card; | 1174 | struct qeth_card *card; |
1183 | int rc; | 1175 | int rc; |
1184 | 1176 | ||
1185 | QETH_DBF_TEXT(trace, 2, "osnsdmc"); | 1177 | QETH_DBF_TEXT(TRACE, 2, "osnsdmc"); |
1186 | if (!dev) | 1178 | if (!dev) |
1187 | return -ENODEV; | 1179 | return -ENODEV; |
1188 | card = netdev_priv(dev); | 1180 | card = netdev_priv(dev); |
@@ -1204,7 +1196,7 @@ int qeth_osn_register(unsigned char *read_dev_no, struct net_device **dev, | |||
1204 | { | 1196 | { |
1205 | struct qeth_card *card; | 1197 | struct qeth_card *card; |
1206 | 1198 | ||
1207 | QETH_DBF_TEXT(trace, 2, "osnreg"); | 1199 | QETH_DBF_TEXT(TRACE, 2, "osnreg"); |
1208 | *dev = qeth_l2_netdev_by_devno(read_dev_no); | 1200 | *dev = qeth_l2_netdev_by_devno(read_dev_no); |
1209 | if (*dev == NULL) | 1201 | if (*dev == NULL) |
1210 | return -ENODEV; | 1202 | return -ENODEV; |
@@ -1223,7 +1215,7 @@ void qeth_osn_deregister(struct net_device *dev) | |||
1223 | { | 1215 | { |
1224 | struct qeth_card *card; | 1216 | struct qeth_card *card; |
1225 | 1217 | ||
1226 | QETH_DBF_TEXT(trace, 2, "osndereg"); | 1218 | QETH_DBF_TEXT(TRACE, 2, "osndereg"); |
1227 | if (!dev) | 1219 | if (!dev) |
1228 | return; | 1220 | return; |
1229 | card = netdev_priv(dev); | 1221 | card = netdev_priv(dev); |
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h index f639cc3af22b..1be353593a59 100644 --- a/drivers/s390/net/qeth_l3.h +++ b/drivers/s390/net/qeth_l3.h | |||
@@ -13,16 +13,7 @@ | |||
13 | 13 | ||
14 | #include "qeth_core.h" | 14 | #include "qeth_core.h" |
15 | 15 | ||
16 | #define QETH_DBF_TEXT_(name, level, text...) \ | 16 | #define QETH_DBF_TXT_BUF qeth_l3_dbf_txt_buf |
17 | do { \ | ||
18 | if (qeth_dbf_passes(qeth_dbf_##name, level)) { \ | ||
19 | char *dbf_txt_buf = get_cpu_var(qeth_l3_dbf_txt_buf); \ | ||
20 | sprintf(dbf_txt_buf, text); \ | ||
21 | debug_text_event(qeth_dbf_##name, level, dbf_txt_buf); \ | ||
22 | put_cpu_var(qeth_l3_dbf_txt_buf); \ | ||
23 | } \ | ||
24 | } while (0) | ||
25 | |||
26 | DECLARE_PER_CPU(char[256], qeth_l3_dbf_txt_buf); | 17 | DECLARE_PER_CPU(char[256], qeth_l3_dbf_txt_buf); |
27 | 18 | ||
28 | struct qeth_ipaddr { | 19 | struct qeth_ipaddr { |
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 21c439046b3c..e1bfe56087d6 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c | |||
@@ -259,7 +259,7 @@ static int __qeth_l3_insert_ip_todo(struct qeth_card *card, | |||
259 | addr->users += add ? 1 : -1; | 259 | addr->users += add ? 1 : -1; |
260 | if (add && (addr->type == QETH_IP_TYPE_NORMAL) && | 260 | if (add && (addr->type == QETH_IP_TYPE_NORMAL) && |
261 | qeth_l3_is_addr_covered_by_ipato(card, addr)) { | 261 | qeth_l3_is_addr_covered_by_ipato(card, addr)) { |
262 | QETH_DBF_TEXT(trace, 2, "tkovaddr"); | 262 | QETH_DBF_TEXT(TRACE, 2, "tkovaddr"); |
263 | addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG; | 263 | addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG; |
264 | } | 264 | } |
265 | list_add_tail(&addr->entry, card->ip_tbd_list); | 265 | list_add_tail(&addr->entry, card->ip_tbd_list); |
@@ -273,13 +273,13 @@ static int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *addr) | |||
273 | unsigned long flags; | 273 | unsigned long flags; |
274 | int rc = 0; | 274 | int rc = 0; |
275 | 275 | ||
276 | QETH_DBF_TEXT(trace, 4, "delip"); | 276 | QETH_DBF_TEXT(TRACE, 4, "delip"); |
277 | 277 | ||
278 | if (addr->proto == QETH_PROT_IPV4) | 278 | if (addr->proto == QETH_PROT_IPV4) |
279 | QETH_DBF_HEX(trace, 4, &addr->u.a4.addr, 4); | 279 | QETH_DBF_HEX(TRACE, 4, &addr->u.a4.addr, 4); |
280 | else { | 280 | else { |
281 | QETH_DBF_HEX(trace, 4, &addr->u.a6.addr, 8); | 281 | QETH_DBF_HEX(TRACE, 4, &addr->u.a6.addr, 8); |
282 | QETH_DBF_HEX(trace, 4, ((char *)&addr->u.a6.addr) + 8, 8); | 282 | QETH_DBF_HEX(TRACE, 4, ((char *)&addr->u.a6.addr) + 8, 8); |
283 | } | 283 | } |
284 | spin_lock_irqsave(&card->ip_lock, flags); | 284 | spin_lock_irqsave(&card->ip_lock, flags); |
285 | rc = __qeth_l3_insert_ip_todo(card, addr, 0); | 285 | rc = __qeth_l3_insert_ip_todo(card, addr, 0); |
@@ -292,12 +292,12 @@ static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr) | |||
292 | unsigned long flags; | 292 | unsigned long flags; |
293 | int rc = 0; | 293 | int rc = 0; |
294 | 294 | ||
295 | QETH_DBF_TEXT(trace, 4, "addip"); | 295 | QETH_DBF_TEXT(TRACE, 4, "addip"); |
296 | if (addr->proto == QETH_PROT_IPV4) | 296 | if (addr->proto == QETH_PROT_IPV4) |
297 | QETH_DBF_HEX(trace, 4, &addr->u.a4.addr, 4); | 297 | QETH_DBF_HEX(TRACE, 4, &addr->u.a4.addr, 4); |
298 | else { | 298 | else { |
299 | QETH_DBF_HEX(trace, 4, &addr->u.a6.addr, 8); | 299 | QETH_DBF_HEX(TRACE, 4, &addr->u.a6.addr, 8); |
300 | QETH_DBF_HEX(trace, 4, ((char *)&addr->u.a6.addr) + 8, 8); | 300 | QETH_DBF_HEX(TRACE, 4, ((char *)&addr->u.a6.addr) + 8, 8); |
301 | } | 301 | } |
302 | spin_lock_irqsave(&card->ip_lock, flags); | 302 | spin_lock_irqsave(&card->ip_lock, flags); |
303 | rc = __qeth_l3_insert_ip_todo(card, addr, 1); | 303 | rc = __qeth_l3_insert_ip_todo(card, addr, 1); |
@@ -326,10 +326,10 @@ static void qeth_l3_delete_mc_addresses(struct qeth_card *card) | |||
326 | struct qeth_ipaddr *iptodo; | 326 | struct qeth_ipaddr *iptodo; |
327 | unsigned long flags; | 327 | unsigned long flags; |
328 | 328 | ||
329 | QETH_DBF_TEXT(trace, 4, "delmc"); | 329 | QETH_DBF_TEXT(TRACE, 4, "delmc"); |
330 | iptodo = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); | 330 | iptodo = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); |
331 | if (!iptodo) { | 331 | if (!iptodo) { |
332 | QETH_DBF_TEXT(trace, 2, "dmcnomem"); | 332 | QETH_DBF_TEXT(TRACE, 2, "dmcnomem"); |
333 | return; | 333 | return; |
334 | } | 334 | } |
335 | iptodo->type = QETH_IP_TYPE_DEL_ALL_MC; | 335 | iptodo->type = QETH_IP_TYPE_DEL_ALL_MC; |
@@ -401,8 +401,11 @@ static int __qeth_l3_ref_ip_on_card(struct qeth_card *card, | |||
401 | static void __qeth_l3_delete_all_mc(struct qeth_card *card, | 401 | static void __qeth_l3_delete_all_mc(struct qeth_card *card, |
402 | unsigned long *flags) | 402 | unsigned long *flags) |
403 | { | 403 | { |
404 | struct list_head fail_list; | ||
404 | struct qeth_ipaddr *addr, *tmp; | 405 | struct qeth_ipaddr *addr, *tmp; |
405 | int rc; | 406 | int rc; |
407 | |||
408 | INIT_LIST_HEAD(&fail_list); | ||
406 | again: | 409 | again: |
407 | list_for_each_entry_safe(addr, tmp, &card->ip_list, entry) { | 410 | list_for_each_entry_safe(addr, tmp, &card->ip_list, entry) { |
408 | if (addr->is_multicast) { | 411 | if (addr->is_multicast) { |
@@ -410,13 +413,14 @@ again: | |||
410 | spin_unlock_irqrestore(&card->ip_lock, *flags); | 413 | spin_unlock_irqrestore(&card->ip_lock, *flags); |
411 | rc = qeth_l3_deregister_addr_entry(card, addr); | 414 | rc = qeth_l3_deregister_addr_entry(card, addr); |
412 | spin_lock_irqsave(&card->ip_lock, *flags); | 415 | spin_lock_irqsave(&card->ip_lock, *flags); |
413 | if (!rc) { | 416 | if (!rc || (rc == IPA_RC_MC_ADDR_NOT_FOUND)) |
414 | kfree(addr); | 417 | kfree(addr); |
415 | goto again; | 418 | else |
416 | } else | 419 | list_add_tail(&addr->entry, &fail_list); |
417 | list_add(&addr->entry, &card->ip_list); | 420 | goto again; |
418 | } | 421 | } |
419 | } | 422 | } |
423 | list_splice(&fail_list, &card->ip_list); | ||
420 | } | 424 | } |
421 | 425 | ||
422 | static void qeth_l3_set_ip_addr_list(struct qeth_card *card) | 426 | static void qeth_l3_set_ip_addr_list(struct qeth_card *card) |
@@ -426,14 +430,14 @@ static void qeth_l3_set_ip_addr_list(struct qeth_card *card) | |||
426 | unsigned long flags; | 430 | unsigned long flags; |
427 | int rc; | 431 | int rc; |
428 | 432 | ||
429 | QETH_DBF_TEXT(trace, 2, "sdiplist"); | 433 | QETH_DBF_TEXT(TRACE, 2, "sdiplist"); |
430 | QETH_DBF_HEX(trace, 2, &card, sizeof(void *)); | 434 | QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *)); |
431 | 435 | ||
432 | spin_lock_irqsave(&card->ip_lock, flags); | 436 | spin_lock_irqsave(&card->ip_lock, flags); |
433 | tbd_list = card->ip_tbd_list; | 437 | tbd_list = card->ip_tbd_list; |
434 | card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC); | 438 | card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC); |
435 | if (!card->ip_tbd_list) { | 439 | if (!card->ip_tbd_list) { |
436 | QETH_DBF_TEXT(trace, 0, "silnomem"); | 440 | QETH_DBF_TEXT(TRACE, 0, "silnomem"); |
437 | card->ip_tbd_list = tbd_list; | 441 | card->ip_tbd_list = tbd_list; |
438 | spin_unlock_irqrestore(&card->ip_lock, flags); | 442 | spin_unlock_irqrestore(&card->ip_lock, flags); |
439 | return; | 443 | return; |
@@ -457,7 +461,7 @@ static void qeth_l3_set_ip_addr_list(struct qeth_card *card) | |||
457 | spin_unlock_irqrestore(&card->ip_lock, flags); | 461 | spin_unlock_irqrestore(&card->ip_lock, flags); |
458 | rc = qeth_l3_register_addr_entry(card, todo); | 462 | rc = qeth_l3_register_addr_entry(card, todo); |
459 | spin_lock_irqsave(&card->ip_lock, flags); | 463 | spin_lock_irqsave(&card->ip_lock, flags); |
460 | if (!rc) | 464 | if (!rc || (rc == IPA_RC_LAN_OFFLINE)) |
461 | list_add_tail(&todo->entry, &card->ip_list); | 465 | list_add_tail(&todo->entry, &card->ip_list); |
462 | else | 466 | else |
463 | kfree(todo); | 467 | kfree(todo); |
@@ -467,7 +471,7 @@ static void qeth_l3_set_ip_addr_list(struct qeth_card *card) | |||
467 | spin_unlock_irqrestore(&card->ip_lock, flags); | 471 | spin_unlock_irqrestore(&card->ip_lock, flags); |
468 | rc = qeth_l3_deregister_addr_entry(card, addr); | 472 | rc = qeth_l3_deregister_addr_entry(card, addr); |
469 | spin_lock_irqsave(&card->ip_lock, flags); | 473 | spin_lock_irqsave(&card->ip_lock, flags); |
470 | if (!rc) | 474 | if (!rc || (rc == IPA_RC_PRIMARY_ALREADY_DEFINED)) |
471 | kfree(addr); | 475 | kfree(addr); |
472 | else | 476 | else |
473 | list_add_tail(&addr->entry, &card->ip_list); | 477 | list_add_tail(&addr->entry, &card->ip_list); |
@@ -484,7 +488,7 @@ static void qeth_l3_clear_ip_list(struct qeth_card *card, int clean, | |||
484 | struct qeth_ipaddr *addr, *tmp; | 488 | struct qeth_ipaddr *addr, *tmp; |
485 | unsigned long flags; | 489 | unsigned long flags; |
486 | 490 | ||
487 | QETH_DBF_TEXT(trace, 4, "clearip"); | 491 | QETH_DBF_TEXT(TRACE, 4, "clearip"); |
488 | spin_lock_irqsave(&card->ip_lock, flags); | 492 | spin_lock_irqsave(&card->ip_lock, flags); |
489 | /* clear todo list */ | 493 | /* clear todo list */ |
490 | list_for_each_entry_safe(addr, tmp, card->ip_tbd_list, entry) { | 494 | list_for_each_entry_safe(addr, tmp, card->ip_tbd_list, entry) { |
@@ -542,7 +546,7 @@ static int qeth_l3_send_setdelmc(struct qeth_card *card, | |||
542 | struct qeth_cmd_buffer *iob; | 546 | struct qeth_cmd_buffer *iob; |
543 | struct qeth_ipa_cmd *cmd; | 547 | struct qeth_ipa_cmd *cmd; |
544 | 548 | ||
545 | QETH_DBF_TEXT(trace, 4, "setdelmc"); | 549 | QETH_DBF_TEXT(TRACE, 4, "setdelmc"); |
546 | 550 | ||
547 | iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); | 551 | iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); |
548 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 552 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
@@ -580,8 +584,8 @@ static int qeth_l3_send_setdelip(struct qeth_card *card, | |||
580 | struct qeth_ipa_cmd *cmd; | 584 | struct qeth_ipa_cmd *cmd; |
581 | __u8 netmask[16]; | 585 | __u8 netmask[16]; |
582 | 586 | ||
583 | QETH_DBF_TEXT(trace, 4, "setdelip"); | 587 | QETH_DBF_TEXT(TRACE, 4, "setdelip"); |
584 | QETH_DBF_TEXT_(trace, 4, "flags%02X", flags); | 588 | QETH_DBF_TEXT_(TRACE, 4, "flags%02X", flags); |
585 | 589 | ||
586 | iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); | 590 | iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); |
587 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 591 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
@@ -610,7 +614,7 @@ static int qeth_l3_send_setrouting(struct qeth_card *card, | |||
610 | struct qeth_ipa_cmd *cmd; | 614 | struct qeth_ipa_cmd *cmd; |
611 | struct qeth_cmd_buffer *iob; | 615 | struct qeth_cmd_buffer *iob; |
612 | 616 | ||
613 | QETH_DBF_TEXT(trace, 4, "setroutg"); | 617 | QETH_DBF_TEXT(TRACE, 4, "setroutg"); |
614 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot); | 618 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot); |
615 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 619 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
616 | cmd->data.setrtg.type = (type); | 620 | cmd->data.setrtg.type = (type); |
@@ -663,7 +667,7 @@ int qeth_l3_setrouting_v4(struct qeth_card *card) | |||
663 | { | 667 | { |
664 | int rc; | 668 | int rc; |
665 | 669 | ||
666 | QETH_DBF_TEXT(trace, 3, "setrtg4"); | 670 | QETH_DBF_TEXT(TRACE, 3, "setrtg4"); |
667 | 671 | ||
668 | qeth_l3_correct_routing_type(card, &card->options.route4.type, | 672 | qeth_l3_correct_routing_type(card, &card->options.route4.type, |
669 | QETH_PROT_IPV4); | 673 | QETH_PROT_IPV4); |
@@ -683,7 +687,7 @@ int qeth_l3_setrouting_v6(struct qeth_card *card) | |||
683 | { | 687 | { |
684 | int rc = 0; | 688 | int rc = 0; |
685 | 689 | ||
686 | QETH_DBF_TEXT(trace, 3, "setrtg6"); | 690 | QETH_DBF_TEXT(TRACE, 3, "setrtg6"); |
687 | #ifdef CONFIG_QETH_IPV6 | 691 | #ifdef CONFIG_QETH_IPV6 |
688 | 692 | ||
689 | if (!qeth_is_supported(card, IPA_IPV6)) | 693 | if (!qeth_is_supported(card, IPA_IPV6)) |
@@ -727,7 +731,7 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card, | |||
727 | unsigned long flags; | 731 | unsigned long flags; |
728 | int rc = 0; | 732 | int rc = 0; |
729 | 733 | ||
730 | QETH_DBF_TEXT(trace, 2, "addipato"); | 734 | QETH_DBF_TEXT(TRACE, 2, "addipato"); |
731 | spin_lock_irqsave(&card->ip_lock, flags); | 735 | spin_lock_irqsave(&card->ip_lock, flags); |
732 | list_for_each_entry(ipatoe, &card->ipato.entries, entry) { | 736 | list_for_each_entry(ipatoe, &card->ipato.entries, entry) { |
733 | if (ipatoe->proto != new->proto) | 737 | if (ipatoe->proto != new->proto) |
@@ -753,7 +757,7 @@ void qeth_l3_del_ipato_entry(struct qeth_card *card, | |||
753 | struct qeth_ipato_entry *ipatoe, *tmp; | 757 | struct qeth_ipato_entry *ipatoe, *tmp; |
754 | unsigned long flags; | 758 | unsigned long flags; |
755 | 759 | ||
756 | QETH_DBF_TEXT(trace, 2, "delipato"); | 760 | QETH_DBF_TEXT(TRACE, 2, "delipato"); |
757 | spin_lock_irqsave(&card->ip_lock, flags); | 761 | spin_lock_irqsave(&card->ip_lock, flags); |
758 | list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) { | 762 | list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) { |
759 | if (ipatoe->proto != proto) | 763 | if (ipatoe->proto != proto) |
@@ -781,11 +785,11 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto, | |||
781 | ipaddr = qeth_l3_get_addr_buffer(proto); | 785 | ipaddr = qeth_l3_get_addr_buffer(proto); |
782 | if (ipaddr) { | 786 | if (ipaddr) { |
783 | if (proto == QETH_PROT_IPV4) { | 787 | if (proto == QETH_PROT_IPV4) { |
784 | QETH_DBF_TEXT(trace, 2, "addvipa4"); | 788 | QETH_DBF_TEXT(TRACE, 2, "addvipa4"); |
785 | memcpy(&ipaddr->u.a4.addr, addr, 4); | 789 | memcpy(&ipaddr->u.a4.addr, addr, 4); |
786 | ipaddr->u.a4.mask = 0; | 790 | ipaddr->u.a4.mask = 0; |
787 | } else if (proto == QETH_PROT_IPV6) { | 791 | } else if (proto == QETH_PROT_IPV6) { |
788 | QETH_DBF_TEXT(trace, 2, "addvipa6"); | 792 | QETH_DBF_TEXT(TRACE, 2, "addvipa6"); |
789 | memcpy(&ipaddr->u.a6.addr, addr, 16); | 793 | memcpy(&ipaddr->u.a6.addr, addr, 16); |
790 | ipaddr->u.a6.pfxlen = 0; | 794 | ipaddr->u.a6.pfxlen = 0; |
791 | } | 795 | } |
@@ -817,11 +821,11 @@ void qeth_l3_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto, | |||
817 | ipaddr = qeth_l3_get_addr_buffer(proto); | 821 | ipaddr = qeth_l3_get_addr_buffer(proto); |
818 | if (ipaddr) { | 822 | if (ipaddr) { |
819 | if (proto == QETH_PROT_IPV4) { | 823 | if (proto == QETH_PROT_IPV4) { |
820 | QETH_DBF_TEXT(trace, 2, "delvipa4"); | 824 | QETH_DBF_TEXT(TRACE, 2, "delvipa4"); |
821 | memcpy(&ipaddr->u.a4.addr, addr, 4); | 825 | memcpy(&ipaddr->u.a4.addr, addr, 4); |
822 | ipaddr->u.a4.mask = 0; | 826 | ipaddr->u.a4.mask = 0; |
823 | } else if (proto == QETH_PROT_IPV6) { | 827 | } else if (proto == QETH_PROT_IPV6) { |
824 | QETH_DBF_TEXT(trace, 2, "delvipa6"); | 828 | QETH_DBF_TEXT(TRACE, 2, "delvipa6"); |
825 | memcpy(&ipaddr->u.a6.addr, addr, 16); | 829 | memcpy(&ipaddr->u.a6.addr, addr, 16); |
826 | ipaddr->u.a6.pfxlen = 0; | 830 | ipaddr->u.a6.pfxlen = 0; |
827 | } | 831 | } |
@@ -846,11 +850,11 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto, | |||
846 | ipaddr = qeth_l3_get_addr_buffer(proto); | 850 | ipaddr = qeth_l3_get_addr_buffer(proto); |
847 | if (ipaddr) { | 851 | if (ipaddr) { |
848 | if (proto == QETH_PROT_IPV4) { | 852 | if (proto == QETH_PROT_IPV4) { |
849 | QETH_DBF_TEXT(trace, 2, "addrxip4"); | 853 | QETH_DBF_TEXT(TRACE, 2, "addrxip4"); |
850 | memcpy(&ipaddr->u.a4.addr, addr, 4); | 854 | memcpy(&ipaddr->u.a4.addr, addr, 4); |
851 | ipaddr->u.a4.mask = 0; | 855 | ipaddr->u.a4.mask = 0; |
852 | } else if (proto == QETH_PROT_IPV6) { | 856 | } else if (proto == QETH_PROT_IPV6) { |
853 | QETH_DBF_TEXT(trace, 2, "addrxip6"); | 857 | QETH_DBF_TEXT(TRACE, 2, "addrxip6"); |
854 | memcpy(&ipaddr->u.a6.addr, addr, 16); | 858 | memcpy(&ipaddr->u.a6.addr, addr, 16); |
855 | ipaddr->u.a6.pfxlen = 0; | 859 | ipaddr->u.a6.pfxlen = 0; |
856 | } | 860 | } |
@@ -882,11 +886,11 @@ void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto, | |||
882 | ipaddr = qeth_l3_get_addr_buffer(proto); | 886 | ipaddr = qeth_l3_get_addr_buffer(proto); |
883 | if (ipaddr) { | 887 | if (ipaddr) { |
884 | if (proto == QETH_PROT_IPV4) { | 888 | if (proto == QETH_PROT_IPV4) { |
885 | QETH_DBF_TEXT(trace, 2, "addrxip4"); | 889 | QETH_DBF_TEXT(TRACE, 2, "addrxip4"); |
886 | memcpy(&ipaddr->u.a4.addr, addr, 4); | 890 | memcpy(&ipaddr->u.a4.addr, addr, 4); |
887 | ipaddr->u.a4.mask = 0; | 891 | ipaddr->u.a4.mask = 0; |
888 | } else if (proto == QETH_PROT_IPV6) { | 892 | } else if (proto == QETH_PROT_IPV6) { |
889 | QETH_DBF_TEXT(trace, 2, "addrxip6"); | 893 | QETH_DBF_TEXT(TRACE, 2, "addrxip6"); |
890 | memcpy(&ipaddr->u.a6.addr, addr, 16); | 894 | memcpy(&ipaddr->u.a6.addr, addr, 16); |
891 | ipaddr->u.a6.pfxlen = 0; | 895 | ipaddr->u.a6.pfxlen = 0; |
892 | } | 896 | } |
@@ -906,15 +910,15 @@ static int qeth_l3_register_addr_entry(struct qeth_card *card, | |||
906 | int cnt = 3; | 910 | int cnt = 3; |
907 | 911 | ||
908 | if (addr->proto == QETH_PROT_IPV4) { | 912 | if (addr->proto == QETH_PROT_IPV4) { |
909 | QETH_DBF_TEXT(trace, 2, "setaddr4"); | 913 | QETH_DBF_TEXT(TRACE, 2, "setaddr4"); |
910 | QETH_DBF_HEX(trace, 3, &addr->u.a4.addr, sizeof(int)); | 914 | QETH_DBF_HEX(TRACE, 3, &addr->u.a4.addr, sizeof(int)); |
911 | } else if (addr->proto == QETH_PROT_IPV6) { | 915 | } else if (addr->proto == QETH_PROT_IPV6) { |
912 | QETH_DBF_TEXT(trace, 2, "setaddr6"); | 916 | QETH_DBF_TEXT(TRACE, 2, "setaddr6"); |
913 | QETH_DBF_HEX(trace, 3, &addr->u.a6.addr, 8); | 917 | QETH_DBF_HEX(TRACE, 3, &addr->u.a6.addr, 8); |
914 | QETH_DBF_HEX(trace, 3, ((char *)&addr->u.a6.addr) + 8, 8); | 918 | QETH_DBF_HEX(TRACE, 3, ((char *)&addr->u.a6.addr) + 8, 8); |
915 | } else { | 919 | } else { |
916 | QETH_DBF_TEXT(trace, 2, "setaddr?"); | 920 | QETH_DBF_TEXT(TRACE, 2, "setaddr?"); |
917 | QETH_DBF_HEX(trace, 3, addr, sizeof(struct qeth_ipaddr)); | 921 | QETH_DBF_HEX(TRACE, 3, addr, sizeof(struct qeth_ipaddr)); |
918 | } | 922 | } |
919 | do { | 923 | do { |
920 | if (addr->is_multicast) | 924 | if (addr->is_multicast) |
@@ -923,10 +927,10 @@ static int qeth_l3_register_addr_entry(struct qeth_card *card, | |||
923 | rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_SETIP, | 927 | rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_SETIP, |
924 | addr->set_flags); | 928 | addr->set_flags); |
925 | if (rc) | 929 | if (rc) |
926 | QETH_DBF_TEXT(trace, 2, "failed"); | 930 | QETH_DBF_TEXT(TRACE, 2, "failed"); |
927 | } while ((--cnt > 0) && rc); | 931 | } while ((--cnt > 0) && rc); |
928 | if (rc) { | 932 | if (rc) { |
929 | QETH_DBF_TEXT(trace, 2, "FAILED"); | 933 | QETH_DBF_TEXT(TRACE, 2, "FAILED"); |
930 | qeth_l3_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf); | 934 | qeth_l3_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf); |
931 | PRINT_WARN("Could not register IP address %s (rc=0x%x/%d)\n", | 935 | PRINT_WARN("Could not register IP address %s (rc=0x%x/%d)\n", |
932 | buf, rc, rc); | 936 | buf, rc, rc); |
@@ -940,15 +944,15 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *card, | |||
940 | int rc = 0; | 944 | int rc = 0; |
941 | 945 | ||
942 | if (addr->proto == QETH_PROT_IPV4) { | 946 | if (addr->proto == QETH_PROT_IPV4) { |
943 | QETH_DBF_TEXT(trace, 2, "deladdr4"); | 947 | QETH_DBF_TEXT(TRACE, 2, "deladdr4"); |
944 | QETH_DBF_HEX(trace, 3, &addr->u.a4.addr, sizeof(int)); | 948 | QETH_DBF_HEX(TRACE, 3, &addr->u.a4.addr, sizeof(int)); |
945 | } else if (addr->proto == QETH_PROT_IPV6) { | 949 | } else if (addr->proto == QETH_PROT_IPV6) { |
946 | QETH_DBF_TEXT(trace, 2, "deladdr6"); | 950 | QETH_DBF_TEXT(TRACE, 2, "deladdr6"); |
947 | QETH_DBF_HEX(trace, 3, &addr->u.a6.addr, 8); | 951 | QETH_DBF_HEX(TRACE, 3, &addr->u.a6.addr, 8); |
948 | QETH_DBF_HEX(trace, 3, ((char *)&addr->u.a6.addr) + 8, 8); | 952 | QETH_DBF_HEX(TRACE, 3, ((char *)&addr->u.a6.addr) + 8, 8); |
949 | } else { | 953 | } else { |
950 | QETH_DBF_TEXT(trace, 2, "deladdr?"); | 954 | QETH_DBF_TEXT(TRACE, 2, "deladdr?"); |
951 | QETH_DBF_HEX(trace, 3, addr, sizeof(struct qeth_ipaddr)); | 955 | QETH_DBF_HEX(TRACE, 3, addr, sizeof(struct qeth_ipaddr)); |
952 | } | 956 | } |
953 | if (addr->is_multicast) | 957 | if (addr->is_multicast) |
954 | rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_DELIPM); | 958 | rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_DELIPM); |
@@ -956,7 +960,7 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *card, | |||
956 | rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_DELIP, | 960 | rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_DELIP, |
957 | addr->del_flags); | 961 | addr->del_flags); |
958 | if (rc) { | 962 | if (rc) { |
959 | QETH_DBF_TEXT(trace, 2, "failed"); | 963 | QETH_DBF_TEXT(TRACE, 2, "failed"); |
960 | /* TODO: re-activate this warning as soon as we have a | 964 | /* TODO: re-activate this warning as soon as we have a |
961 | * clean mirco code | 965 | * clean mirco code |
962 | qeth_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf); | 966 | qeth_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf); |
@@ -996,7 +1000,7 @@ static int qeth_l3_send_setadp_mode(struct qeth_card *card, __u32 command, | |||
996 | struct qeth_cmd_buffer *iob; | 1000 | struct qeth_cmd_buffer *iob; |
997 | struct qeth_ipa_cmd *cmd; | 1001 | struct qeth_ipa_cmd *cmd; |
998 | 1002 | ||
999 | QETH_DBF_TEXT(trace, 4, "adpmode"); | 1003 | QETH_DBF_TEXT(TRACE, 4, "adpmode"); |
1000 | 1004 | ||
1001 | iob = qeth_get_adapter_cmd(card, command, | 1005 | iob = qeth_get_adapter_cmd(card, command, |
1002 | sizeof(struct qeth_ipacmd_setadpparms)); | 1006 | sizeof(struct qeth_ipacmd_setadpparms)); |
@@ -1011,7 +1015,7 @@ static int qeth_l3_setadapter_hstr(struct qeth_card *card) | |||
1011 | { | 1015 | { |
1012 | int rc; | 1016 | int rc; |
1013 | 1017 | ||
1014 | QETH_DBF_TEXT(trace, 4, "adphstr"); | 1018 | QETH_DBF_TEXT(TRACE, 4, "adphstr"); |
1015 | 1019 | ||
1016 | if (qeth_adp_supported(card, IPA_SETADP_SET_BROADCAST_MODE)) { | 1020 | if (qeth_adp_supported(card, IPA_SETADP_SET_BROADCAST_MODE)) { |
1017 | rc = qeth_l3_send_setadp_mode(card, | 1021 | rc = qeth_l3_send_setadp_mode(card, |
@@ -1044,13 +1048,13 @@ static int qeth_l3_setadapter_parms(struct qeth_card *card) | |||
1044 | { | 1048 | { |
1045 | int rc; | 1049 | int rc; |
1046 | 1050 | ||
1047 | QETH_DBF_TEXT(setup, 2, "setadprm"); | 1051 | QETH_DBF_TEXT(SETUP, 2, "setadprm"); |
1048 | 1052 | ||
1049 | if (!qeth_is_supported(card, IPA_SETADAPTERPARMS)) { | 1053 | if (!qeth_is_supported(card, IPA_SETADAPTERPARMS)) { |
1050 | PRINT_WARN("set adapter parameters not supported " | 1054 | PRINT_WARN("set adapter parameters not supported " |
1051 | "on device %s.\n", | 1055 | "on device %s.\n", |
1052 | CARD_BUS_ID(card)); | 1056 | CARD_BUS_ID(card)); |
1053 | QETH_DBF_TEXT(setup, 2, " notsupp"); | 1057 | QETH_DBF_TEXT(SETUP, 2, " notsupp"); |
1054 | return 0; | 1058 | return 0; |
1055 | } | 1059 | } |
1056 | rc = qeth_query_setadapterparms(card); | 1060 | rc = qeth_query_setadapterparms(card); |
@@ -1079,7 +1083,7 @@ static int qeth_l3_default_setassparms_cb(struct qeth_card *card, | |||
1079 | { | 1083 | { |
1080 | struct qeth_ipa_cmd *cmd; | 1084 | struct qeth_ipa_cmd *cmd; |
1081 | 1085 | ||
1082 | QETH_DBF_TEXT(trace, 4, "defadpcb"); | 1086 | QETH_DBF_TEXT(TRACE, 4, "defadpcb"); |
1083 | 1087 | ||
1084 | cmd = (struct qeth_ipa_cmd *) data; | 1088 | cmd = (struct qeth_ipa_cmd *) data; |
1085 | if (cmd->hdr.return_code == 0) { | 1089 | if (cmd->hdr.return_code == 0) { |
@@ -1092,7 +1096,7 @@ static int qeth_l3_default_setassparms_cb(struct qeth_card *card, | |||
1092 | if (cmd->data.setassparms.hdr.assist_no == IPA_INBOUND_CHECKSUM && | 1096 | if (cmd->data.setassparms.hdr.assist_no == IPA_INBOUND_CHECKSUM && |
1093 | cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) { | 1097 | cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) { |
1094 | card->info.csum_mask = cmd->data.setassparms.data.flags_32bit; | 1098 | card->info.csum_mask = cmd->data.setassparms.data.flags_32bit; |
1095 | QETH_DBF_TEXT_(trace, 3, "csum:%d", card->info.csum_mask); | 1099 | QETH_DBF_TEXT_(TRACE, 3, "csum:%d", card->info.csum_mask); |
1096 | } | 1100 | } |
1097 | return 0; | 1101 | return 0; |
1098 | } | 1102 | } |
@@ -1104,7 +1108,7 @@ static struct qeth_cmd_buffer *qeth_l3_get_setassparms_cmd( | |||
1104 | struct qeth_cmd_buffer *iob; | 1108 | struct qeth_cmd_buffer *iob; |
1105 | struct qeth_ipa_cmd *cmd; | 1109 | struct qeth_ipa_cmd *cmd; |
1106 | 1110 | ||
1107 | QETH_DBF_TEXT(trace, 4, "getasscm"); | 1111 | QETH_DBF_TEXT(TRACE, 4, "getasscm"); |
1108 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot); | 1112 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot); |
1109 | 1113 | ||
1110 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 1114 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
@@ -1126,7 +1130,7 @@ static int qeth_l3_send_setassparms(struct qeth_card *card, | |||
1126 | int rc; | 1130 | int rc; |
1127 | struct qeth_ipa_cmd *cmd; | 1131 | struct qeth_ipa_cmd *cmd; |
1128 | 1132 | ||
1129 | QETH_DBF_TEXT(trace, 4, "sendassp"); | 1133 | QETH_DBF_TEXT(TRACE, 4, "sendassp"); |
1130 | 1134 | ||
1131 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 1135 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
1132 | if (len <= sizeof(__u32)) | 1136 | if (len <= sizeof(__u32)) |
@@ -1145,7 +1149,7 @@ static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card, | |||
1145 | int rc; | 1149 | int rc; |
1146 | struct qeth_cmd_buffer *iob; | 1150 | struct qeth_cmd_buffer *iob; |
1147 | 1151 | ||
1148 | QETH_DBF_TEXT(trace, 4, "simassp6"); | 1152 | QETH_DBF_TEXT(TRACE, 4, "simassp6"); |
1149 | iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code, | 1153 | iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code, |
1150 | 0, QETH_PROT_IPV6); | 1154 | 0, QETH_PROT_IPV6); |
1151 | rc = qeth_l3_send_setassparms(card, iob, 0, 0, | 1155 | rc = qeth_l3_send_setassparms(card, iob, 0, 0, |
@@ -1161,7 +1165,7 @@ static int qeth_l3_send_simple_setassparms(struct qeth_card *card, | |||
1161 | int length = 0; | 1165 | int length = 0; |
1162 | struct qeth_cmd_buffer *iob; | 1166 | struct qeth_cmd_buffer *iob; |
1163 | 1167 | ||
1164 | QETH_DBF_TEXT(trace, 4, "simassp4"); | 1168 | QETH_DBF_TEXT(TRACE, 4, "simassp4"); |
1165 | if (data) | 1169 | if (data) |
1166 | length = sizeof(__u32); | 1170 | length = sizeof(__u32); |
1167 | iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code, | 1171 | iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code, |
@@ -1175,7 +1179,7 @@ static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card) | |||
1175 | { | 1179 | { |
1176 | int rc; | 1180 | int rc; |
1177 | 1181 | ||
1178 | QETH_DBF_TEXT(trace, 3, "ipaarp"); | 1182 | QETH_DBF_TEXT(TRACE, 3, "ipaarp"); |
1179 | 1183 | ||
1180 | if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { | 1184 | if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { |
1181 | PRINT_WARN("ARP processing not supported " | 1185 | PRINT_WARN("ARP processing not supported " |
@@ -1196,7 +1200,7 @@ static int qeth_l3_start_ipa_ip_fragmentation(struct qeth_card *card) | |||
1196 | { | 1200 | { |
1197 | int rc; | 1201 | int rc; |
1198 | 1202 | ||
1199 | QETH_DBF_TEXT(trace, 3, "ipaipfrg"); | 1203 | QETH_DBF_TEXT(TRACE, 3, "ipaipfrg"); |
1200 | 1204 | ||
1201 | if (!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) { | 1205 | if (!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) { |
1202 | PRINT_INFO("Hardware IP fragmentation not supported on %s\n", | 1206 | PRINT_INFO("Hardware IP fragmentation not supported on %s\n", |
@@ -1219,7 +1223,7 @@ static int qeth_l3_start_ipa_source_mac(struct qeth_card *card) | |||
1219 | { | 1223 | { |
1220 | int rc; | 1224 | int rc; |
1221 | 1225 | ||
1222 | QETH_DBF_TEXT(trace, 3, "stsrcmac"); | 1226 | QETH_DBF_TEXT(TRACE, 3, "stsrcmac"); |
1223 | 1227 | ||
1224 | if (!card->options.fake_ll) | 1228 | if (!card->options.fake_ll) |
1225 | return -EOPNOTSUPP; | 1229 | return -EOPNOTSUPP; |
@@ -1243,7 +1247,7 @@ static int qeth_l3_start_ipa_vlan(struct qeth_card *card) | |||
1243 | { | 1247 | { |
1244 | int rc = 0; | 1248 | int rc = 0; |
1245 | 1249 | ||
1246 | QETH_DBF_TEXT(trace, 3, "strtvlan"); | 1250 | QETH_DBF_TEXT(TRACE, 3, "strtvlan"); |
1247 | 1251 | ||
1248 | if (!qeth_is_supported(card, IPA_FULL_VLAN)) { | 1252 | if (!qeth_is_supported(card, IPA_FULL_VLAN)) { |
1249 | PRINT_WARN("VLAN not supported on %s\n", | 1253 | PRINT_WARN("VLAN not supported on %s\n", |
@@ -1267,7 +1271,7 @@ static int qeth_l3_start_ipa_multicast(struct qeth_card *card) | |||
1267 | { | 1271 | { |
1268 | int rc; | 1272 | int rc; |
1269 | 1273 | ||
1270 | QETH_DBF_TEXT(trace, 3, "stmcast"); | 1274 | QETH_DBF_TEXT(TRACE, 3, "stmcast"); |
1271 | 1275 | ||
1272 | if (!qeth_is_supported(card, IPA_MULTICASTING)) { | 1276 | if (!qeth_is_supported(card, IPA_MULTICASTING)) { |
1273 | PRINT_WARN("Multicast not supported on %s\n", | 1277 | PRINT_WARN("Multicast not supported on %s\n", |
@@ -1293,7 +1297,7 @@ static int qeth_l3_query_ipassists_cb(struct qeth_card *card, | |||
1293 | { | 1297 | { |
1294 | struct qeth_ipa_cmd *cmd; | 1298 | struct qeth_ipa_cmd *cmd; |
1295 | 1299 | ||
1296 | QETH_DBF_TEXT(setup, 2, "qipasscb"); | 1300 | QETH_DBF_TEXT(SETUP, 2, "qipasscb"); |
1297 | 1301 | ||
1298 | cmd = (struct qeth_ipa_cmd *) data; | 1302 | cmd = (struct qeth_ipa_cmd *) data; |
1299 | if (cmd->hdr.prot_version == QETH_PROT_IPV4) { | 1303 | if (cmd->hdr.prot_version == QETH_PROT_IPV4) { |
@@ -1303,9 +1307,9 @@ static int qeth_l3_query_ipassists_cb(struct qeth_card *card, | |||
1303 | card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported; | 1307 | card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported; |
1304 | card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled; | 1308 | card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled; |
1305 | } | 1309 | } |
1306 | QETH_DBF_TEXT(setup, 2, "suppenbl"); | 1310 | QETH_DBF_TEXT(SETUP, 2, "suppenbl"); |
1307 | QETH_DBF_TEXT_(setup, 2, "%x", cmd->hdr.ipa_supported); | 1311 | QETH_DBF_TEXT_(SETUP, 2, "%x", cmd->hdr.ipa_supported); |
1308 | QETH_DBF_TEXT_(setup, 2, "%x", cmd->hdr.ipa_enabled); | 1312 | QETH_DBF_TEXT_(SETUP, 2, "%x", cmd->hdr.ipa_enabled); |
1309 | return 0; | 1313 | return 0; |
1310 | } | 1314 | } |
1311 | 1315 | ||
@@ -1315,7 +1319,7 @@ static int qeth_l3_query_ipassists(struct qeth_card *card, | |||
1315 | int rc; | 1319 | int rc; |
1316 | struct qeth_cmd_buffer *iob; | 1320 | struct qeth_cmd_buffer *iob; |
1317 | 1321 | ||
1318 | QETH_DBF_TEXT_(setup, 2, "qipassi%i", prot); | 1322 | QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot); |
1319 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot); | 1323 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot); |
1320 | rc = qeth_send_ipa_cmd(card, iob, qeth_l3_query_ipassists_cb, NULL); | 1324 | rc = qeth_send_ipa_cmd(card, iob, qeth_l3_query_ipassists_cb, NULL); |
1321 | return rc; | 1325 | return rc; |
@@ -1326,7 +1330,7 @@ static int qeth_l3_softsetup_ipv6(struct qeth_card *card) | |||
1326 | { | 1330 | { |
1327 | int rc; | 1331 | int rc; |
1328 | 1332 | ||
1329 | QETH_DBF_TEXT(trace, 3, "softipv6"); | 1333 | QETH_DBF_TEXT(TRACE, 3, "softipv6"); |
1330 | 1334 | ||
1331 | if (card->info.type == QETH_CARD_TYPE_IQD) | 1335 | if (card->info.type == QETH_CARD_TYPE_IQD) |
1332 | goto out; | 1336 | goto out; |
@@ -1371,7 +1375,7 @@ static int qeth_l3_start_ipa_ipv6(struct qeth_card *card) | |||
1371 | { | 1375 | { |
1372 | int rc = 0; | 1376 | int rc = 0; |
1373 | 1377 | ||
1374 | QETH_DBF_TEXT(trace, 3, "strtipv6"); | 1378 | QETH_DBF_TEXT(TRACE, 3, "strtipv6"); |
1375 | 1379 | ||
1376 | if (!qeth_is_supported(card, IPA_IPV6)) { | 1380 | if (!qeth_is_supported(card, IPA_IPV6)) { |
1377 | PRINT_WARN("IPv6 not supported on %s\n", | 1381 | PRINT_WARN("IPv6 not supported on %s\n", |
@@ -1388,7 +1392,7 @@ static int qeth_l3_start_ipa_broadcast(struct qeth_card *card) | |||
1388 | { | 1392 | { |
1389 | int rc; | 1393 | int rc; |
1390 | 1394 | ||
1391 | QETH_DBF_TEXT(trace, 3, "stbrdcst"); | 1395 | QETH_DBF_TEXT(TRACE, 3, "stbrdcst"); |
1392 | card->info.broadcast_capable = 0; | 1396 | card->info.broadcast_capable = 0; |
1393 | if (!qeth_is_supported(card, IPA_FILTERING)) { | 1397 | if (!qeth_is_supported(card, IPA_FILTERING)) { |
1394 | PRINT_WARN("Broadcast not supported on %s\n", | 1398 | PRINT_WARN("Broadcast not supported on %s\n", |
@@ -1458,7 +1462,7 @@ static int qeth_l3_start_ipa_checksum(struct qeth_card *card) | |||
1458 | { | 1462 | { |
1459 | int rc = 0; | 1463 | int rc = 0; |
1460 | 1464 | ||
1461 | QETH_DBF_TEXT(trace, 3, "strtcsum"); | 1465 | QETH_DBF_TEXT(TRACE, 3, "strtcsum"); |
1462 | 1466 | ||
1463 | if (card->options.checksum_type == NO_CHECKSUMMING) { | 1467 | if (card->options.checksum_type == NO_CHECKSUMMING) { |
1464 | PRINT_WARN("Using no checksumming on %s.\n", | 1468 | PRINT_WARN("Using no checksumming on %s.\n", |
@@ -1489,7 +1493,7 @@ static int qeth_l3_start_ipa_tso(struct qeth_card *card) | |||
1489 | { | 1493 | { |
1490 | int rc; | 1494 | int rc; |
1491 | 1495 | ||
1492 | QETH_DBF_TEXT(trace, 3, "sttso"); | 1496 | QETH_DBF_TEXT(TRACE, 3, "sttso"); |
1493 | 1497 | ||
1494 | if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) { | 1498 | if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) { |
1495 | PRINT_WARN("Outbound TSO not supported on %s\n", | 1499 | PRINT_WARN("Outbound TSO not supported on %s\n", |
@@ -1514,7 +1518,7 @@ static int qeth_l3_start_ipa_tso(struct qeth_card *card) | |||
1514 | 1518 | ||
1515 | static int qeth_l3_start_ipassists(struct qeth_card *card) | 1519 | static int qeth_l3_start_ipassists(struct qeth_card *card) |
1516 | { | 1520 | { |
1517 | QETH_DBF_TEXT(trace, 3, "strtipas"); | 1521 | QETH_DBF_TEXT(TRACE, 3, "strtipas"); |
1518 | qeth_l3_start_ipa_arp_processing(card); /* go on*/ | 1522 | qeth_l3_start_ipa_arp_processing(card); /* go on*/ |
1519 | qeth_l3_start_ipa_ip_fragmentation(card); /* go on*/ | 1523 | qeth_l3_start_ipa_ip_fragmentation(card); /* go on*/ |
1520 | qeth_l3_start_ipa_source_mac(card); /* go on*/ | 1524 | qeth_l3_start_ipa_source_mac(card); /* go on*/ |
@@ -1534,7 +1538,7 @@ static int qeth_l3_put_unique_id(struct qeth_card *card) | |||
1534 | struct qeth_cmd_buffer *iob; | 1538 | struct qeth_cmd_buffer *iob; |
1535 | struct qeth_ipa_cmd *cmd; | 1539 | struct qeth_ipa_cmd *cmd; |
1536 | 1540 | ||
1537 | QETH_DBF_TEXT(trace, 2, "puniqeid"); | 1541 | QETH_DBF_TEXT(TRACE, 2, "puniqeid"); |
1538 | 1542 | ||
1539 | if ((card->info.unique_id & UNIQUE_ID_NOT_BY_CARD) == | 1543 | if ((card->info.unique_id & UNIQUE_ID_NOT_BY_CARD) == |
1540 | UNIQUE_ID_NOT_BY_CARD) | 1544 | UNIQUE_ID_NOT_BY_CARD) |
@@ -1571,7 +1575,7 @@ static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card) | |||
1571 | struct qeth_cmd_buffer *iob; | 1575 | struct qeth_cmd_buffer *iob; |
1572 | struct qeth_ipa_cmd *cmd; | 1576 | struct qeth_ipa_cmd *cmd; |
1573 | 1577 | ||
1574 | QETH_DBF_TEXT(setup, 2, "hsrmac"); | 1578 | QETH_DBF_TEXT(SETUP, 2, "hsrmac"); |
1575 | 1579 | ||
1576 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR, | 1580 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR, |
1577 | QETH_PROT_IPV6); | 1581 | QETH_PROT_IPV6); |
@@ -1612,7 +1616,7 @@ static int qeth_l3_get_unique_id(struct qeth_card *card) | |||
1612 | struct qeth_cmd_buffer *iob; | 1616 | struct qeth_cmd_buffer *iob; |
1613 | struct qeth_ipa_cmd *cmd; | 1617 | struct qeth_ipa_cmd *cmd; |
1614 | 1618 | ||
1615 | QETH_DBF_TEXT(setup, 2, "guniqeid"); | 1619 | QETH_DBF_TEXT(SETUP, 2, "guniqeid"); |
1616 | 1620 | ||
1617 | if (!qeth_is_supported(card, IPA_IPV6)) { | 1621 | if (!qeth_is_supported(card, IPA_IPV6)) { |
1618 | card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED | | 1622 | card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED | |
@@ -1645,7 +1649,7 @@ static void qeth_l3_add_mc(struct qeth_card *card, struct in_device *in4_dev) | |||
1645 | struct ip_mc_list *im4; | 1649 | struct ip_mc_list *im4; |
1646 | char buf[MAX_ADDR_LEN]; | 1650 | char buf[MAX_ADDR_LEN]; |
1647 | 1651 | ||
1648 | QETH_DBF_TEXT(trace, 4, "addmc"); | 1652 | QETH_DBF_TEXT(TRACE, 4, "addmc"); |
1649 | for (im4 = in4_dev->mc_list; im4; im4 = im4->next) { | 1653 | for (im4 = in4_dev->mc_list; im4; im4 = im4->next) { |
1650 | qeth_l3_get_mac_for_ipm(im4->multiaddr, buf, in4_dev->dev); | 1654 | qeth_l3_get_mac_for_ipm(im4->multiaddr, buf, in4_dev->dev); |
1651 | ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); | 1655 | ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); |
@@ -1665,7 +1669,7 @@ static void qeth_l3_add_vlan_mc(struct qeth_card *card) | |||
1665 | struct vlan_group *vg; | 1669 | struct vlan_group *vg; |
1666 | int i; | 1670 | int i; |
1667 | 1671 | ||
1668 | QETH_DBF_TEXT(trace, 4, "addmcvl"); | 1672 | QETH_DBF_TEXT(TRACE, 4, "addmcvl"); |
1669 | if (!qeth_is_supported(card, IPA_FULL_VLAN) || (card->vlangrp == NULL)) | 1673 | if (!qeth_is_supported(card, IPA_FULL_VLAN) || (card->vlangrp == NULL)) |
1670 | return; | 1674 | return; |
1671 | 1675 | ||
@@ -1689,7 +1693,7 @@ static void qeth_l3_add_multicast_ipv4(struct qeth_card *card) | |||
1689 | { | 1693 | { |
1690 | struct in_device *in4_dev; | 1694 | struct in_device *in4_dev; |
1691 | 1695 | ||
1692 | QETH_DBF_TEXT(trace, 4, "chkmcv4"); | 1696 | QETH_DBF_TEXT(TRACE, 4, "chkmcv4"); |
1693 | in4_dev = in_dev_get(card->dev); | 1697 | in4_dev = in_dev_get(card->dev); |
1694 | if (in4_dev == NULL) | 1698 | if (in4_dev == NULL) |
1695 | return; | 1699 | return; |
@@ -1707,7 +1711,7 @@ static void qeth_l3_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev) | |||
1707 | struct ifmcaddr6 *im6; | 1711 | struct ifmcaddr6 *im6; |
1708 | char buf[MAX_ADDR_LEN]; | 1712 | char buf[MAX_ADDR_LEN]; |
1709 | 1713 | ||
1710 | QETH_DBF_TEXT(trace, 4, "addmc6"); | 1714 | QETH_DBF_TEXT(TRACE, 4, "addmc6"); |
1711 | for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) { | 1715 | for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) { |
1712 | ndisc_mc_map(&im6->mca_addr, buf, in6_dev->dev, 0); | 1716 | ndisc_mc_map(&im6->mca_addr, buf, in6_dev->dev, 0); |
1713 | ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV6); | 1717 | ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV6); |
@@ -1728,7 +1732,7 @@ static void qeth_l3_add_vlan_mc6(struct qeth_card *card) | |||
1728 | struct vlan_group *vg; | 1732 | struct vlan_group *vg; |
1729 | int i; | 1733 | int i; |
1730 | 1734 | ||
1731 | QETH_DBF_TEXT(trace, 4, "admc6vl"); | 1735 | QETH_DBF_TEXT(TRACE, 4, "admc6vl"); |
1732 | if (!qeth_is_supported(card, IPA_FULL_VLAN) || (card->vlangrp == NULL)) | 1736 | if (!qeth_is_supported(card, IPA_FULL_VLAN) || (card->vlangrp == NULL)) |
1733 | return; | 1737 | return; |
1734 | 1738 | ||
@@ -1752,7 +1756,7 @@ static void qeth_l3_add_multicast_ipv6(struct qeth_card *card) | |||
1752 | { | 1756 | { |
1753 | struct inet6_dev *in6_dev; | 1757 | struct inet6_dev *in6_dev; |
1754 | 1758 | ||
1755 | QETH_DBF_TEXT(trace, 4, "chkmcv6"); | 1759 | QETH_DBF_TEXT(TRACE, 4, "chkmcv6"); |
1756 | if (!qeth_is_supported(card, IPA_IPV6)) | 1760 | if (!qeth_is_supported(card, IPA_IPV6)) |
1757 | return ; | 1761 | return ; |
1758 | in6_dev = in6_dev_get(card->dev); | 1762 | in6_dev = in6_dev_get(card->dev); |
@@ -1773,7 +1777,7 @@ static void qeth_l3_free_vlan_addresses4(struct qeth_card *card, | |||
1773 | struct in_ifaddr *ifa; | 1777 | struct in_ifaddr *ifa; |
1774 | struct qeth_ipaddr *addr; | 1778 | struct qeth_ipaddr *addr; |
1775 | 1779 | ||
1776 | QETH_DBF_TEXT(trace, 4, "frvaddr4"); | 1780 | QETH_DBF_TEXT(TRACE, 4, "frvaddr4"); |
1777 | 1781 | ||
1778 | in_dev = in_dev_get(vlan_group_get_device(card->vlangrp, vid)); | 1782 | in_dev = in_dev_get(vlan_group_get_device(card->vlangrp, vid)); |
1779 | if (!in_dev) | 1783 | if (!in_dev) |
@@ -1799,7 +1803,7 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card, | |||
1799 | struct inet6_ifaddr *ifa; | 1803 | struct inet6_ifaddr *ifa; |
1800 | struct qeth_ipaddr *addr; | 1804 | struct qeth_ipaddr *addr; |
1801 | 1805 | ||
1802 | QETH_DBF_TEXT(trace, 4, "frvaddr6"); | 1806 | QETH_DBF_TEXT(TRACE, 4, "frvaddr6"); |
1803 | 1807 | ||
1804 | in6_dev = in6_dev_get(vlan_group_get_device(card->vlangrp, vid)); | 1808 | in6_dev = in6_dev_get(vlan_group_get_device(card->vlangrp, vid)); |
1805 | if (!in6_dev) | 1809 | if (!in6_dev) |
@@ -1834,7 +1838,7 @@ static void qeth_l3_vlan_rx_register(struct net_device *dev, | |||
1834 | struct qeth_card *card = netdev_priv(dev); | 1838 | struct qeth_card *card = netdev_priv(dev); |
1835 | unsigned long flags; | 1839 | unsigned long flags; |
1836 | 1840 | ||
1837 | QETH_DBF_TEXT(trace, 4, "vlanreg"); | 1841 | QETH_DBF_TEXT(TRACE, 4, "vlanreg"); |
1838 | spin_lock_irqsave(&card->vlanlock, flags); | 1842 | spin_lock_irqsave(&card->vlanlock, flags); |
1839 | card->vlangrp = grp; | 1843 | card->vlangrp = grp; |
1840 | spin_unlock_irqrestore(&card->vlanlock, flags); | 1844 | spin_unlock_irqrestore(&card->vlanlock, flags); |
@@ -1872,7 +1876,7 @@ static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | |||
1872 | struct qeth_card *card = netdev_priv(dev); | 1876 | struct qeth_card *card = netdev_priv(dev); |
1873 | unsigned long flags; | 1877 | unsigned long flags; |
1874 | 1878 | ||
1875 | QETH_DBF_TEXT_(trace, 4, "kid:%d", vid); | 1879 | QETH_DBF_TEXT_(TRACE, 4, "kid:%d", vid); |
1876 | spin_lock_irqsave(&card->vlanlock, flags); | 1880 | spin_lock_irqsave(&card->vlanlock, flags); |
1877 | /* unregister IP addresses of vlan device */ | 1881 | /* unregister IP addresses of vlan device */ |
1878 | qeth_l3_free_vlan_addresses(card, vid); | 1882 | qeth_l3_free_vlan_addresses(card, vid); |
@@ -2002,8 +2006,8 @@ static void qeth_l3_process_inbound_buffer(struct qeth_card *card, | |||
2002 | break; | 2006 | break; |
2003 | default: | 2007 | default: |
2004 | dev_kfree_skb_any(skb); | 2008 | dev_kfree_skb_any(skb); |
2005 | QETH_DBF_TEXT(trace, 3, "inbunkno"); | 2009 | QETH_DBF_TEXT(TRACE, 3, "inbunkno"); |
2006 | QETH_DBF_HEX(control, 3, hdr, QETH_DBF_CONTROL_LEN); | 2010 | QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN); |
2007 | continue; | 2011 | continue; |
2008 | } | 2012 | } |
2009 | 2013 | ||
@@ -2070,7 +2074,7 @@ static struct qeth_card *qeth_l3_get_card_from_dev(struct net_device *dev) | |||
2070 | card = netdev_priv(vlan_dev_info(dev)->real_dev); | 2074 | card = netdev_priv(vlan_dev_info(dev)->real_dev); |
2071 | if (card->options.layer2) | 2075 | if (card->options.layer2) |
2072 | card = NULL; | 2076 | card = NULL; |
2073 | QETH_DBF_TEXT_(trace, 4, "%d", rc); | 2077 | QETH_DBF_TEXT_(TRACE, 4, "%d", rc); |
2074 | return card ; | 2078 | return card ; |
2075 | } | 2079 | } |
2076 | 2080 | ||
@@ -2078,8 +2082,8 @@ static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode) | |||
2078 | { | 2082 | { |
2079 | int rc = 0; | 2083 | int rc = 0; |
2080 | 2084 | ||
2081 | QETH_DBF_TEXT(setup, 2, "stopcard"); | 2085 | QETH_DBF_TEXT(SETUP, 2, "stopcard"); |
2082 | QETH_DBF_HEX(setup, 2, &card, sizeof(void *)); | 2086 | QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); |
2083 | 2087 | ||
2084 | qeth_set_allowed_threads(card, 0, 1); | 2088 | qeth_set_allowed_threads(card, 0, 1); |
2085 | if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD)) | 2089 | if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD)) |
@@ -2092,7 +2096,7 @@ static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode) | |||
2092 | if (!card->use_hard_stop) { | 2096 | if (!card->use_hard_stop) { |
2093 | rc = qeth_send_stoplan(card); | 2097 | rc = qeth_send_stoplan(card); |
2094 | if (rc) | 2098 | if (rc) |
2095 | QETH_DBF_TEXT_(setup, 2, "1err%d", rc); | 2099 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); |
2096 | } | 2100 | } |
2097 | card->state = CARD_STATE_SOFTSETUP; | 2101 | card->state = CARD_STATE_SOFTSETUP; |
2098 | } | 2102 | } |
@@ -2106,7 +2110,7 @@ static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode) | |||
2106 | (card->info.type != QETH_CARD_TYPE_IQD)) { | 2110 | (card->info.type != QETH_CARD_TYPE_IQD)) { |
2107 | rc = qeth_l3_put_unique_id(card); | 2111 | rc = qeth_l3_put_unique_id(card); |
2108 | if (rc) | 2112 | if (rc) |
2109 | QETH_DBF_TEXT_(setup, 2, "2err%d", rc); | 2113 | QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); |
2110 | } | 2114 | } |
2111 | qeth_qdio_clear_card(card, 0); | 2115 | qeth_qdio_clear_card(card, 0); |
2112 | qeth_clear_qdio_buffers(card); | 2116 | qeth_clear_qdio_buffers(card); |
@@ -2125,7 +2129,7 @@ static void qeth_l3_set_multicast_list(struct net_device *dev) | |||
2125 | { | 2129 | { |
2126 | struct qeth_card *card = netdev_priv(dev); | 2130 | struct qeth_card *card = netdev_priv(dev); |
2127 | 2131 | ||
2128 | QETH_DBF_TEXT(trace, 3, "setmulti"); | 2132 | QETH_DBF_TEXT(TRACE, 3, "setmulti"); |
2129 | qeth_l3_delete_mc_addresses(card); | 2133 | qeth_l3_delete_mc_addresses(card); |
2130 | qeth_l3_add_multicast_ipv4(card); | 2134 | qeth_l3_add_multicast_ipv4(card); |
2131 | #ifdef CONFIG_QETH_IPV6 | 2135 | #ifdef CONFIG_QETH_IPV6 |
@@ -2165,7 +2169,7 @@ static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries) | |||
2165 | int tmp; | 2169 | int tmp; |
2166 | int rc; | 2170 | int rc; |
2167 | 2171 | ||
2168 | QETH_DBF_TEXT(trace, 3, "arpstnoe"); | 2172 | QETH_DBF_TEXT(TRACE, 3, "arpstnoe"); |
2169 | 2173 | ||
2170 | /* | 2174 | /* |
2171 | * currently GuestLAN only supports the ARP assist function | 2175 | * currently GuestLAN only supports the ARP assist function |
@@ -2219,17 +2223,17 @@ static int qeth_l3_arp_query_cb(struct qeth_card *card, | |||
2219 | int uentry_size; | 2223 | int uentry_size; |
2220 | int i; | 2224 | int i; |
2221 | 2225 | ||
2222 | QETH_DBF_TEXT(trace, 4, "arpquecb"); | 2226 | QETH_DBF_TEXT(TRACE, 4, "arpquecb"); |
2223 | 2227 | ||
2224 | qinfo = (struct qeth_arp_query_info *) reply->param; | 2228 | qinfo = (struct qeth_arp_query_info *) reply->param; |
2225 | cmd = (struct qeth_ipa_cmd *) data; | 2229 | cmd = (struct qeth_ipa_cmd *) data; |
2226 | if (cmd->hdr.return_code) { | 2230 | if (cmd->hdr.return_code) { |
2227 | QETH_DBF_TEXT_(trace, 4, "qaer1%i", cmd->hdr.return_code); | 2231 | QETH_DBF_TEXT_(TRACE, 4, "qaer1%i", cmd->hdr.return_code); |
2228 | return 0; | 2232 | return 0; |
2229 | } | 2233 | } |
2230 | if (cmd->data.setassparms.hdr.return_code) { | 2234 | if (cmd->data.setassparms.hdr.return_code) { |
2231 | cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; | 2235 | cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; |
2232 | QETH_DBF_TEXT_(trace, 4, "qaer2%i", cmd->hdr.return_code); | 2236 | QETH_DBF_TEXT_(TRACE, 4, "qaer2%i", cmd->hdr.return_code); |
2233 | return 0; | 2237 | return 0; |
2234 | } | 2238 | } |
2235 | qdata = &cmd->data.setassparms.data.query_arp; | 2239 | qdata = &cmd->data.setassparms.data.query_arp; |
@@ -2251,17 +2255,17 @@ static int qeth_l3_arp_query_cb(struct qeth_card *card, | |||
2251 | /* check if there is enough room in userspace */ | 2255 | /* check if there is enough room in userspace */ |
2252 | if ((qinfo->udata_len - qinfo->udata_offset) < | 2256 | if ((qinfo->udata_len - qinfo->udata_offset) < |
2253 | qdata->no_entries * uentry_size){ | 2257 | qdata->no_entries * uentry_size){ |
2254 | QETH_DBF_TEXT_(trace, 4, "qaer3%i", -ENOMEM); | 2258 | QETH_DBF_TEXT_(TRACE, 4, "qaer3%i", -ENOMEM); |
2255 | cmd->hdr.return_code = -ENOMEM; | 2259 | cmd->hdr.return_code = -ENOMEM; |
2256 | PRINT_WARN("query ARP user space buffer is too small for " | 2260 | PRINT_WARN("query ARP user space buffer is too small for " |
2257 | "the returned number of ARP entries. " | 2261 | "the returned number of ARP entries. " |
2258 | "Aborting query!\n"); | 2262 | "Aborting query!\n"); |
2259 | goto out_error; | 2263 | goto out_error; |
2260 | } | 2264 | } |
2261 | QETH_DBF_TEXT_(trace, 4, "anore%i", | 2265 | QETH_DBF_TEXT_(TRACE, 4, "anore%i", |
2262 | cmd->data.setassparms.hdr.number_of_replies); | 2266 | cmd->data.setassparms.hdr.number_of_replies); |
2263 | QETH_DBF_TEXT_(trace, 4, "aseqn%i", cmd->data.setassparms.hdr.seq_no); | 2267 | QETH_DBF_TEXT_(TRACE, 4, "aseqn%i", cmd->data.setassparms.hdr.seq_no); |
2264 | QETH_DBF_TEXT_(trace, 4, "anoen%i", qdata->no_entries); | 2268 | QETH_DBF_TEXT_(TRACE, 4, "anoen%i", qdata->no_entries); |
2265 | 2269 | ||
2266 | if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) { | 2270 | if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) { |
2267 | /* strip off "media specific information" */ | 2271 | /* strip off "media specific information" */ |
@@ -2297,7 +2301,7 @@ static int qeth_l3_send_ipa_arp_cmd(struct qeth_card *card, | |||
2297 | unsigned long), | 2301 | unsigned long), |
2298 | void *reply_param) | 2302 | void *reply_param) |
2299 | { | 2303 | { |
2300 | QETH_DBF_TEXT(trace, 4, "sendarp"); | 2304 | QETH_DBF_TEXT(TRACE, 4, "sendarp"); |
2301 | 2305 | ||
2302 | memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); | 2306 | memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); |
2303 | memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data), | 2307 | memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data), |
@@ -2313,7 +2317,7 @@ static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata) | |||
2313 | int tmp; | 2317 | int tmp; |
2314 | int rc; | 2318 | int rc; |
2315 | 2319 | ||
2316 | QETH_DBF_TEXT(trace, 3, "arpquery"); | 2320 | QETH_DBF_TEXT(TRACE, 3, "arpquery"); |
2317 | 2321 | ||
2318 | if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/ | 2322 | if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/ |
2319 | IPA_ARP_PROCESSING)) { | 2323 | IPA_ARP_PROCESSING)) { |
@@ -2358,7 +2362,7 @@ static int qeth_l3_arp_add_entry(struct qeth_card *card, | |||
2358 | int tmp; | 2362 | int tmp; |
2359 | int rc; | 2363 | int rc; |
2360 | 2364 | ||
2361 | QETH_DBF_TEXT(trace, 3, "arpadent"); | 2365 | QETH_DBF_TEXT(TRACE, 3, "arpadent"); |
2362 | 2366 | ||
2363 | /* | 2367 | /* |
2364 | * currently GuestLAN only supports the ARP assist function | 2368 | * currently GuestLAN only supports the ARP assist function |
@@ -2400,7 +2404,7 @@ static int qeth_l3_arp_remove_entry(struct qeth_card *card, | |||
2400 | int tmp; | 2404 | int tmp; |
2401 | int rc; | 2405 | int rc; |
2402 | 2406 | ||
2403 | QETH_DBF_TEXT(trace, 3, "arprment"); | 2407 | QETH_DBF_TEXT(TRACE, 3, "arprment"); |
2404 | 2408 | ||
2405 | /* | 2409 | /* |
2406 | * currently GuestLAN only supports the ARP assist function | 2410 | * currently GuestLAN only supports the ARP assist function |
@@ -2439,7 +2443,7 @@ static int qeth_l3_arp_flush_cache(struct qeth_card *card) | |||
2439 | int rc; | 2443 | int rc; |
2440 | int tmp; | 2444 | int tmp; |
2441 | 2445 | ||
2442 | QETH_DBF_TEXT(trace, 3, "arpflush"); | 2446 | QETH_DBF_TEXT(TRACE, 3, "arpflush"); |
2443 | 2447 | ||
2444 | /* | 2448 | /* |
2445 | * currently GuestLAN only supports the ARP assist function | 2449 | * currently GuestLAN only supports the ARP assist function |
@@ -2548,14 +2552,14 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
2548 | rc = -EOPNOTSUPP; | 2552 | rc = -EOPNOTSUPP; |
2549 | } | 2553 | } |
2550 | if (rc) | 2554 | if (rc) |
2551 | QETH_DBF_TEXT_(trace, 2, "ioce%d", rc); | 2555 | QETH_DBF_TEXT_(TRACE, 2, "ioce%d", rc); |
2552 | return rc; | 2556 | return rc; |
2553 | } | 2557 | } |
2554 | 2558 | ||
2555 | static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, | 2559 | static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, |
2556 | struct sk_buff *skb, int ipv, int cast_type) | 2560 | struct sk_buff *skb, int ipv, int cast_type) |
2557 | { | 2561 | { |
2558 | QETH_DBF_TEXT(trace, 6, "fillhdr"); | 2562 | QETH_DBF_TEXT(TRACE, 6, "fillhdr"); |
2559 | 2563 | ||
2560 | memset(hdr, 0, sizeof(struct qeth_hdr)); | 2564 | memset(hdr, 0, sizeof(struct qeth_hdr)); |
2561 | hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3; | 2565 | hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3; |
@@ -2634,7 +2638,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2634 | enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO; | 2638 | enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO; |
2635 | struct qeth_eddp_context *ctx = NULL; | 2639 | struct qeth_eddp_context *ctx = NULL; |
2636 | 2640 | ||
2637 | QETH_DBF_TEXT(trace, 6, "l3xmit"); | 2641 | QETH_DBF_TEXT(TRACE, 6, "l3xmit"); |
2638 | 2642 | ||
2639 | if ((card->info.type == QETH_CARD_TYPE_IQD) && | 2643 | if ((card->info.type == QETH_CARD_TYPE_IQD) && |
2640 | (skb->protocol != htons(ETH_P_IPV6)) && | 2644 | (skb->protocol != htons(ETH_P_IPV6)) && |
@@ -2795,7 +2799,7 @@ static int qeth_l3_open(struct net_device *dev) | |||
2795 | { | 2799 | { |
2796 | struct qeth_card *card = netdev_priv(dev); | 2800 | struct qeth_card *card = netdev_priv(dev); |
2797 | 2801 | ||
2798 | QETH_DBF_TEXT(trace, 4, "qethopen"); | 2802 | QETH_DBF_TEXT(TRACE, 4, "qethopen"); |
2799 | if (card->state != CARD_STATE_SOFTSETUP) | 2803 | if (card->state != CARD_STATE_SOFTSETUP) |
2800 | return -ENODEV; | 2804 | return -ENODEV; |
2801 | card->data.state = CH_STATE_UP; | 2805 | card->data.state = CH_STATE_UP; |
@@ -2812,7 +2816,7 @@ static int qeth_l3_stop(struct net_device *dev) | |||
2812 | { | 2816 | { |
2813 | struct qeth_card *card = netdev_priv(dev); | 2817 | struct qeth_card *card = netdev_priv(dev); |
2814 | 2818 | ||
2815 | QETH_DBF_TEXT(trace, 4, "qethstop"); | 2819 | QETH_DBF_TEXT(TRACE, 4, "qethstop"); |
2816 | netif_tx_disable(dev); | 2820 | netif_tx_disable(dev); |
2817 | card->dev->flags &= ~IFF_UP; | 2821 | card->dev->flags &= ~IFF_UP; |
2818 | if (card->state == CARD_STATE_UP) | 2822 | if (card->state == CARD_STATE_UP) |
@@ -2957,6 +2961,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) | |||
2957 | card->dev->vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid; | 2961 | card->dev->vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid; |
2958 | card->dev->vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid; | 2962 | card->dev->vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid; |
2959 | card->dev->mtu = card->info.initial_mtu; | 2963 | card->dev->mtu = card->info.initial_mtu; |
2964 | card->dev->set_mac_address = NULL; | ||
2960 | SET_ETHTOOL_OPS(card->dev, &qeth_l3_ethtool_ops); | 2965 | SET_ETHTOOL_OPS(card->dev, &qeth_l3_ethtool_ops); |
2961 | card->dev->features |= NETIF_F_HW_VLAN_TX | | 2966 | card->dev->features |= NETIF_F_HW_VLAN_TX | |
2962 | NETIF_F_HW_VLAN_RX | | 2967 | NETIF_F_HW_VLAN_RX | |
@@ -2977,7 +2982,7 @@ static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev, | |||
2977 | int index; | 2982 | int index; |
2978 | int i; | 2983 | int i; |
2979 | 2984 | ||
2980 | QETH_DBF_TEXT(trace, 6, "qdinput"); | 2985 | QETH_DBF_TEXT(TRACE, 6, "qdinput"); |
2981 | card = (struct qeth_card *) card_ptr; | 2986 | card = (struct qeth_card *) card_ptr; |
2982 | net_dev = card->dev; | 2987 | net_dev = card->dev; |
2983 | if (card->options.performance_stats) { | 2988 | if (card->options.performance_stats) { |
@@ -2986,11 +2991,11 @@ static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev, | |||
2986 | } | 2991 | } |
2987 | if (status & QDIO_STATUS_LOOK_FOR_ERROR) { | 2992 | if (status & QDIO_STATUS_LOOK_FOR_ERROR) { |
2988 | if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION) { | 2993 | if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION) { |
2989 | QETH_DBF_TEXT(trace, 1, "qdinchk"); | 2994 | QETH_DBF_TEXT(TRACE, 1, "qdinchk"); |
2990 | QETH_DBF_TEXT_(trace, 1, "%s", CARD_BUS_ID(card)); | 2995 | QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card)); |
2991 | QETH_DBF_TEXT_(trace, 1, "%04X%04X", | 2996 | QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", |
2992 | first_element, count); | 2997 | first_element, count); |
2993 | QETH_DBF_TEXT_(trace, 1, "%04X%04X", queue, status); | 2998 | QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", queue, status); |
2994 | qeth_schedule_recovery(card); | 2999 | qeth_schedule_recovery(card); |
2995 | return; | 3000 | return; |
2996 | } | 3001 | } |
@@ -3054,8 +3059,8 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
3054 | enum qeth_card_states recover_flag; | 3059 | enum qeth_card_states recover_flag; |
3055 | 3060 | ||
3056 | BUG_ON(!card); | 3061 | BUG_ON(!card); |
3057 | QETH_DBF_TEXT(setup, 2, "setonlin"); | 3062 | QETH_DBF_TEXT(SETUP, 2, "setonlin"); |
3058 | QETH_DBF_HEX(setup, 2, &card, sizeof(void *)); | 3063 | QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); |
3059 | 3064 | ||
3060 | qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1); | 3065 | qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1); |
3061 | if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD)) { | 3066 | if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD)) { |
@@ -3067,23 +3072,23 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
3067 | recover_flag = card->state; | 3072 | recover_flag = card->state; |
3068 | rc = ccw_device_set_online(CARD_RDEV(card)); | 3073 | rc = ccw_device_set_online(CARD_RDEV(card)); |
3069 | if (rc) { | 3074 | if (rc) { |
3070 | QETH_DBF_TEXT_(setup, 2, "1err%d", rc); | 3075 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); |
3071 | return -EIO; | 3076 | return -EIO; |
3072 | } | 3077 | } |
3073 | rc = ccw_device_set_online(CARD_WDEV(card)); | 3078 | rc = ccw_device_set_online(CARD_WDEV(card)); |
3074 | if (rc) { | 3079 | if (rc) { |
3075 | QETH_DBF_TEXT_(setup, 2, "1err%d", rc); | 3080 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); |
3076 | return -EIO; | 3081 | return -EIO; |
3077 | } | 3082 | } |
3078 | rc = ccw_device_set_online(CARD_DDEV(card)); | 3083 | rc = ccw_device_set_online(CARD_DDEV(card)); |
3079 | if (rc) { | 3084 | if (rc) { |
3080 | QETH_DBF_TEXT_(setup, 2, "1err%d", rc); | 3085 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); |
3081 | return -EIO; | 3086 | return -EIO; |
3082 | } | 3087 | } |
3083 | 3088 | ||
3084 | rc = qeth_core_hardsetup_card(card); | 3089 | rc = qeth_core_hardsetup_card(card); |
3085 | if (rc) { | 3090 | if (rc) { |
3086 | QETH_DBF_TEXT_(setup, 2, "2err%d", rc); | 3091 | QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); |
3087 | goto out_remove; | 3092 | goto out_remove; |
3088 | } | 3093 | } |
3089 | 3094 | ||
@@ -3096,11 +3101,11 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
3096 | qeth_print_status_message(card); | 3101 | qeth_print_status_message(card); |
3097 | 3102 | ||
3098 | /* softsetup */ | 3103 | /* softsetup */ |
3099 | QETH_DBF_TEXT(setup, 2, "softsetp"); | 3104 | QETH_DBF_TEXT(SETUP, 2, "softsetp"); |
3100 | 3105 | ||
3101 | rc = qeth_send_startlan(card); | 3106 | rc = qeth_send_startlan(card); |
3102 | if (rc) { | 3107 | if (rc) { |
3103 | QETH_DBF_TEXT_(setup, 2, "1err%d", rc); | 3108 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); |
3104 | if (rc == 0xe080) { | 3109 | if (rc == 0xe080) { |
3105 | PRINT_WARN("LAN on card %s if offline! " | 3110 | PRINT_WARN("LAN on card %s if offline! " |
3106 | "Waiting for STARTLAN from card.\n", | 3111 | "Waiting for STARTLAN from card.\n", |
@@ -3114,21 +3119,21 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
3114 | 3119 | ||
3115 | rc = qeth_l3_setadapter_parms(card); | 3120 | rc = qeth_l3_setadapter_parms(card); |
3116 | if (rc) | 3121 | if (rc) |
3117 | QETH_DBF_TEXT_(setup, 2, "2err%d", rc); | 3122 | QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); |
3118 | rc = qeth_l3_start_ipassists(card); | 3123 | rc = qeth_l3_start_ipassists(card); |
3119 | if (rc) | 3124 | if (rc) |
3120 | QETH_DBF_TEXT_(setup, 2, "3err%d", rc); | 3125 | QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); |
3121 | rc = qeth_l3_setrouting_v4(card); | 3126 | rc = qeth_l3_setrouting_v4(card); |
3122 | if (rc) | 3127 | if (rc) |
3123 | QETH_DBF_TEXT_(setup, 2, "4err%d", rc); | 3128 | QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc); |
3124 | rc = qeth_l3_setrouting_v6(card); | 3129 | rc = qeth_l3_setrouting_v6(card); |
3125 | if (rc) | 3130 | if (rc) |
3126 | QETH_DBF_TEXT_(setup, 2, "5err%d", rc); | 3131 | QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); |
3127 | netif_tx_disable(card->dev); | 3132 | netif_tx_disable(card->dev); |
3128 | 3133 | ||
3129 | rc = qeth_init_qdio_queues(card); | 3134 | rc = qeth_init_qdio_queues(card); |
3130 | if (rc) { | 3135 | if (rc) { |
3131 | QETH_DBF_TEXT_(setup, 2, "6err%d", rc); | 3136 | QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); |
3132 | goto out_remove; | 3137 | goto out_remove; |
3133 | } | 3138 | } |
3134 | card->state = CARD_STATE_SOFTSETUP; | 3139 | card->state = CARD_STATE_SOFTSETUP; |
@@ -3167,8 +3172,8 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev, | |||
3167 | int rc = 0, rc2 = 0, rc3 = 0; | 3172 | int rc = 0, rc2 = 0, rc3 = 0; |
3168 | enum qeth_card_states recover_flag; | 3173 | enum qeth_card_states recover_flag; |
3169 | 3174 | ||
3170 | QETH_DBF_TEXT(setup, 3, "setoffl"); | 3175 | QETH_DBF_TEXT(SETUP, 3, "setoffl"); |
3171 | QETH_DBF_HEX(setup, 3, &card, sizeof(void *)); | 3176 | QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *)); |
3172 | 3177 | ||
3173 | if (card->dev && netif_carrier_ok(card->dev)) | 3178 | if (card->dev && netif_carrier_ok(card->dev)) |
3174 | netif_carrier_off(card->dev); | 3179 | netif_carrier_off(card->dev); |
@@ -3184,7 +3189,7 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev, | |||
3184 | if (!rc) | 3189 | if (!rc) |
3185 | rc = (rc2) ? rc2 : rc3; | 3190 | rc = (rc2) ? rc2 : rc3; |
3186 | if (rc) | 3191 | if (rc) |
3187 | QETH_DBF_TEXT_(setup, 2, "1err%d", rc); | 3192 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); |
3188 | if (recover_flag == CARD_STATE_UP) | 3193 | if (recover_flag == CARD_STATE_UP) |
3189 | card->state = CARD_STATE_RECOVER; | 3194 | card->state = CARD_STATE_RECOVER; |
3190 | /* let user_space know that device is offline */ | 3195 | /* let user_space know that device is offline */ |
@@ -3203,11 +3208,11 @@ static int qeth_l3_recover(void *ptr) | |||
3203 | int rc = 0; | 3208 | int rc = 0; |
3204 | 3209 | ||
3205 | card = (struct qeth_card *) ptr; | 3210 | card = (struct qeth_card *) ptr; |
3206 | QETH_DBF_TEXT(trace, 2, "recover1"); | 3211 | QETH_DBF_TEXT(TRACE, 2, "recover1"); |
3207 | QETH_DBF_HEX(trace, 2, &card, sizeof(void *)); | 3212 | QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *)); |
3208 | if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) | 3213 | if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) |
3209 | return 0; | 3214 | return 0; |
3210 | QETH_DBF_TEXT(trace, 2, "recover2"); | 3215 | QETH_DBF_TEXT(TRACE, 2, "recover2"); |
3211 | PRINT_WARN("Recovery of device %s started ...\n", | 3216 | PRINT_WARN("Recovery of device %s started ...\n", |
3212 | CARD_BUS_ID(card)); | 3217 | CARD_BUS_ID(card)); |
3213 | card->use_hard_stop = 1; | 3218 | card->use_hard_stop = 1; |
@@ -3253,7 +3258,7 @@ static int qeth_l3_ip_event(struct notifier_block *this, | |||
3253 | if (dev_net(dev) != &init_net) | 3258 | if (dev_net(dev) != &init_net) |
3254 | return NOTIFY_DONE; | 3259 | return NOTIFY_DONE; |
3255 | 3260 | ||
3256 | QETH_DBF_TEXT(trace, 3, "ipevent"); | 3261 | QETH_DBF_TEXT(TRACE, 3, "ipevent"); |
3257 | card = qeth_l3_get_card_from_dev(dev); | 3262 | card = qeth_l3_get_card_from_dev(dev); |
3258 | if (!card) | 3263 | if (!card) |
3259 | return NOTIFY_DONE; | 3264 | return NOTIFY_DONE; |
@@ -3300,7 +3305,7 @@ static int qeth_l3_ip6_event(struct notifier_block *this, | |||
3300 | struct qeth_ipaddr *addr; | 3305 | struct qeth_ipaddr *addr; |
3301 | struct qeth_card *card; | 3306 | struct qeth_card *card; |
3302 | 3307 | ||
3303 | QETH_DBF_TEXT(trace, 3, "ip6event"); | 3308 | QETH_DBF_TEXT(TRACE, 3, "ip6event"); |
3304 | 3309 | ||
3305 | card = qeth_l3_get_card_from_dev(dev); | 3310 | card = qeth_l3_get_card_from_dev(dev); |
3306 | if (!card) | 3311 | if (!card) |
@@ -3343,7 +3348,7 @@ static int qeth_l3_register_notifiers(void) | |||
3343 | { | 3348 | { |
3344 | int rc; | 3349 | int rc; |
3345 | 3350 | ||
3346 | QETH_DBF_TEXT(trace, 5, "regnotif"); | 3351 | QETH_DBF_TEXT(TRACE, 5, "regnotif"); |
3347 | rc = register_inetaddr_notifier(&qeth_l3_ip_notifier); | 3352 | rc = register_inetaddr_notifier(&qeth_l3_ip_notifier); |
3348 | if (rc) | 3353 | if (rc) |
3349 | return rc; | 3354 | return rc; |
@@ -3362,7 +3367,7 @@ static int qeth_l3_register_notifiers(void) | |||
3362 | static void qeth_l3_unregister_notifiers(void) | 3367 | static void qeth_l3_unregister_notifiers(void) |
3363 | { | 3368 | { |
3364 | 3369 | ||
3365 | QETH_DBF_TEXT(trace, 5, "unregnot"); | 3370 | QETH_DBF_TEXT(TRACE, 5, "unregnot"); |
3366 | BUG_ON(unregister_inetaddr_notifier(&qeth_l3_ip_notifier)); | 3371 | BUG_ON(unregister_inetaddr_notifier(&qeth_l3_ip_notifier)); |
3367 | #ifdef CONFIG_QETH_IPV6 | 3372 | #ifdef CONFIG_QETH_IPV6 |
3368 | BUG_ON(unregister_inet6addr_notifier(&qeth_l3_ip6_notifier)); | 3373 | BUG_ON(unregister_inet6addr_notifier(&qeth_l3_ip6_notifier)); |
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h index 1831b196c70a..2cad5c67397e 100644 --- a/include/linux/fsl_devices.h +++ b/include/linux/fsl_devices.h | |||
@@ -50,7 +50,7 @@ struct gianfar_platform_data { | |||
50 | u32 device_flags; | 50 | u32 device_flags; |
51 | /* board specific information */ | 51 | /* board specific information */ |
52 | u32 board_flags; | 52 | u32 board_flags; |
53 | u32 bus_id; | 53 | char bus_id[MII_BUS_ID_SIZE]; |
54 | u32 phy_id; | 54 | u32 phy_id; |
55 | u8 mac_addr[6]; | 55 | u8 mac_addr[6]; |
56 | phy_interface_t interface; | 56 | phy_interface_t interface; |
diff --git a/include/linux/phy.h b/include/linux/phy.h index 5e43ae751412..779cbcd65f62 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h | |||
@@ -39,7 +39,8 @@ | |||
39 | SUPPORTED_1000baseT_Half | \ | 39 | SUPPORTED_1000baseT_Half | \ |
40 | SUPPORTED_1000baseT_Full) | 40 | SUPPORTED_1000baseT_Full) |
41 | 41 | ||
42 | /* Set phydev->irq to PHY_POLL if interrupts are not supported, | 42 | /* |
43 | * Set phydev->irq to PHY_POLL if interrupts are not supported, | ||
43 | * or not desired for this PHY. Set to PHY_IGNORE_INTERRUPT if | 44 | * or not desired for this PHY. Set to PHY_IGNORE_INTERRUPT if |
44 | * the attached driver handles the interrupt | 45 | * the attached driver handles the interrupt |
45 | */ | 46 | */ |
@@ -63,8 +64,6 @@ typedef enum { | |||
63 | PHY_INTERFACE_MODE_RTBI | 64 | PHY_INTERFACE_MODE_RTBI |
64 | } phy_interface_t; | 65 | } phy_interface_t; |
65 | 66 | ||
66 | #define MII_BUS_MAX 4 | ||
67 | |||
68 | 67 | ||
69 | #define PHY_INIT_TIMEOUT 100000 | 68 | #define PHY_INIT_TIMEOUT 100000 |
70 | #define PHY_STATE_TIME 1 | 69 | #define PHY_STATE_TIME 1 |
@@ -74,20 +73,30 @@ typedef enum { | |||
74 | #define PHY_MAX_ADDR 32 | 73 | #define PHY_MAX_ADDR 32 |
75 | 74 | ||
76 | /* Used when trying to connect to a specific phy (mii bus id:phy device id) */ | 75 | /* Used when trying to connect to a specific phy (mii bus id:phy device id) */ |
77 | #define PHY_ID_FMT "%x:%02x" | 76 | #define PHY_ID_FMT "%s:%02x" |
78 | 77 | ||
79 | /* The Bus class for PHYs. Devices which provide access to | 78 | /* |
80 | * PHYs should register using this structure */ | 79 | * Need to be a little smaller than phydev->dev.bus_id to leave room |
80 | * for the ":%02x" | ||
81 | */ | ||
82 | #define MII_BUS_ID_SIZE (BUS_ID_SIZE - 3) | ||
83 | |||
84 | /* | ||
85 | * The Bus class for PHYs. Devices which provide access to | ||
86 | * PHYs should register using this structure | ||
87 | */ | ||
81 | struct mii_bus { | 88 | struct mii_bus { |
82 | const char *name; | 89 | const char *name; |
83 | int id; | 90 | char id[MII_BUS_ID_SIZE]; |
84 | void *priv; | 91 | void *priv; |
85 | int (*read)(struct mii_bus *bus, int phy_id, int regnum); | 92 | int (*read)(struct mii_bus *bus, int phy_id, int regnum); |
86 | int (*write)(struct mii_bus *bus, int phy_id, int regnum, u16 val); | 93 | int (*write)(struct mii_bus *bus, int phy_id, int regnum, u16 val); |
87 | int (*reset)(struct mii_bus *bus); | 94 | int (*reset)(struct mii_bus *bus); |
88 | 95 | ||
89 | /* A lock to ensure that only one thing can read/write | 96 | /* |
90 | * the MDIO bus at a time */ | 97 | * A lock to ensure that only one thing can read/write |
98 | * the MDIO bus at a time | ||
99 | */ | ||
91 | struct mutex mdio_lock; | 100 | struct mutex mdio_lock; |
92 | 101 | ||
93 | struct device *dev; | 102 | struct device *dev; |
@@ -98,8 +107,10 @@ struct mii_bus { | |||
98 | /* Phy addresses to be ignored when probing */ | 107 | /* Phy addresses to be ignored when probing */ |
99 | u32 phy_mask; | 108 | u32 phy_mask; |
100 | 109 | ||
101 | /* Pointer to an array of interrupts, each PHY's | 110 | /* |
102 | * interrupt at the index matching its address */ | 111 | * Pointer to an array of interrupts, each PHY's |
112 | * interrupt at the index matching its address | ||
113 | */ | ||
103 | int *irq; | 114 | int *irq; |
104 | }; | 115 | }; |
105 | 116 | ||
@@ -251,7 +262,8 @@ struct phy_device { | |||
251 | /* Bus address of the PHY (0-32) */ | 262 | /* Bus address of the PHY (0-32) */ |
252 | int addr; | 263 | int addr; |
253 | 264 | ||
254 | /* forced speed & duplex (no autoneg) | 265 | /* |
266 | * forced speed & duplex (no autoneg) | ||
255 | * partner speed & duplex & pause (autoneg) | 267 | * partner speed & duplex & pause (autoneg) |
256 | */ | 268 | */ |
257 | int speed; | 269 | int speed; |
@@ -274,8 +286,10 @@ struct phy_device { | |||
274 | 286 | ||
275 | int link_timeout; | 287 | int link_timeout; |
276 | 288 | ||
277 | /* Interrupt number for this PHY | 289 | /* |
278 | * -1 means no interrupt */ | 290 | * Interrupt number for this PHY |
291 | * -1 means no interrupt | ||
292 | */ | ||
279 | int irq; | 293 | int irq; |
280 | 294 | ||
281 | /* private data pointer */ | 295 | /* private data pointer */ |
@@ -325,22 +339,28 @@ struct phy_driver { | |||
325 | u32 features; | 339 | u32 features; |
326 | u32 flags; | 340 | u32 flags; |
327 | 341 | ||
328 | /* Called to initialize the PHY, | 342 | /* |
329 | * including after a reset */ | 343 | * Called to initialize the PHY, |
344 | * including after a reset | ||
345 | */ | ||
330 | int (*config_init)(struct phy_device *phydev); | 346 | int (*config_init)(struct phy_device *phydev); |
331 | 347 | ||
332 | /* Called during discovery. Used to set | 348 | /* |
333 | * up device-specific structures, if any */ | 349 | * Called during discovery. Used to set |
350 | * up device-specific structures, if any | ||
351 | */ | ||
334 | int (*probe)(struct phy_device *phydev); | 352 | int (*probe)(struct phy_device *phydev); |
335 | 353 | ||
336 | /* PHY Power Management */ | 354 | /* PHY Power Management */ |
337 | int (*suspend)(struct phy_device *phydev); | 355 | int (*suspend)(struct phy_device *phydev); |
338 | int (*resume)(struct phy_device *phydev); | 356 | int (*resume)(struct phy_device *phydev); |
339 | 357 | ||
340 | /* Configures the advertisement and resets | 358 | /* |
359 | * Configures the advertisement and resets | ||
341 | * autonegotiation if phydev->autoneg is on, | 360 | * autonegotiation if phydev->autoneg is on, |
342 | * forces the speed to the current settings in phydev | 361 | * forces the speed to the current settings in phydev |
343 | * if phydev->autoneg is off */ | 362 | * if phydev->autoneg is off |
363 | */ | ||
344 | int (*config_aneg)(struct phy_device *phydev); | 364 | int (*config_aneg)(struct phy_device *phydev); |
345 | 365 | ||
346 | /* Determines the negotiated speed and duplex */ | 366 | /* Determines the negotiated speed and duplex */ |
@@ -361,6 +381,7 @@ struct phy_driver { | |||
361 | 381 | ||
362 | int phy_read(struct phy_device *phydev, u16 regnum); | 382 | int phy_read(struct phy_device *phydev, u16 regnum); |
363 | int phy_write(struct phy_device *phydev, u16 regnum, u16 val); | 383 | int phy_write(struct phy_device *phydev, u16 regnum, u16 val); |
384 | int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id); | ||
364 | struct phy_device* get_phy_device(struct mii_bus *bus, int addr); | 385 | struct phy_device* get_phy_device(struct mii_bus *bus, int addr); |
365 | int phy_clear_interrupt(struct phy_device *phydev); | 386 | int phy_clear_interrupt(struct phy_device *phydev); |
366 | int phy_config_interrupt(struct phy_device *phydev, u32 interrupts); | 387 | int phy_config_interrupt(struct phy_device *phydev, u32 interrupts); |