diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-12 23:30:02 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-12 23:30:02 -0500 |
commit | 7c17d86a8502c2e30c2eea777ed1b830aa3b447b (patch) | |
tree | 353f739a33f46f9861b479e64d2a59f9b5c85868 /drivers/net | |
parent | 2485a4b610171f4e1c4ab0d053569747795c1bbe (diff) | |
parent | 91dce7ddab99a29b600e3d792b847dc2cdbf0848 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (69 commits)
pptp: Accept packet with seq zero
RDS: Remove some unused iWARP code
net: fsl: fec: handle 10Mbps speed in RMII mode
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c: add missing iounmap
drivers/net/ethernet/tundra/tsi108_eth.c: add missing iounmap
ksz884x: fix mtu for VLAN
net_sched: sfq: add optional RED on top of SFQ
dp83640: Fix NOHZ local_softirq_pending 08 warning
gianfar: Fix invalid TX frames returned on error queue when time stamping
gianfar: Fix missing sock reference when processing TX time stamps
phylib: introduce mdiobus_alloc_size()
net: decrement memcg jump label when limit, not usage, is changed
net: reintroduce missing rcu_assign_pointer() calls
inet_diag: Rename inet_diag_req_compat into inet_diag_req
inet_diag: Rename inet_diag_req into inet_diag_req_v2
bond_alb: don't disable softirq under bond_alb_xmit
mac80211: fix rx->key NULL pointer dereference in promiscuous mode
nl80211: fix old station flags compatibility
mdio-octeon: use an unique MDIO bus name.
mdio-gpio: use an unique MDIO bus name.
...
Diffstat (limited to 'drivers/net')
42 files changed, 576 insertions, 394 deletions
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 106b88a04738..342626f4bc46 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c | |||
@@ -99,16 +99,26 @@ static inline u8 _simple_hash(const u8 *hash_start, int hash_size) | |||
99 | 99 | ||
100 | /*********************** tlb specific functions ***************************/ | 100 | /*********************** tlb specific functions ***************************/ |
101 | 101 | ||
102 | static inline void _lock_tx_hashtbl(struct bonding *bond) | 102 | static inline void _lock_tx_hashtbl_bh(struct bonding *bond) |
103 | { | 103 | { |
104 | spin_lock_bh(&(BOND_ALB_INFO(bond).tx_hashtbl_lock)); | 104 | spin_lock_bh(&(BOND_ALB_INFO(bond).tx_hashtbl_lock)); |
105 | } | 105 | } |
106 | 106 | ||
107 | static inline void _unlock_tx_hashtbl(struct bonding *bond) | 107 | static inline void _unlock_tx_hashtbl_bh(struct bonding *bond) |
108 | { | 108 | { |
109 | spin_unlock_bh(&(BOND_ALB_INFO(bond).tx_hashtbl_lock)); | 109 | spin_unlock_bh(&(BOND_ALB_INFO(bond).tx_hashtbl_lock)); |
110 | } | 110 | } |
111 | 111 | ||
112 | static inline void _lock_tx_hashtbl(struct bonding *bond) | ||
113 | { | ||
114 | spin_lock(&(BOND_ALB_INFO(bond).tx_hashtbl_lock)); | ||
115 | } | ||
116 | |||
117 | static inline void _unlock_tx_hashtbl(struct bonding *bond) | ||
118 | { | ||
119 | spin_unlock(&(BOND_ALB_INFO(bond).tx_hashtbl_lock)); | ||
120 | } | ||
121 | |||
112 | /* Caller must hold tx_hashtbl lock */ | 122 | /* Caller must hold tx_hashtbl lock */ |
113 | static inline void tlb_init_table_entry(struct tlb_client_info *entry, int save_load) | 123 | static inline void tlb_init_table_entry(struct tlb_client_info *entry, int save_load) |
114 | { | 124 | { |
@@ -129,14 +139,13 @@ static inline void tlb_init_slave(struct slave *slave) | |||
129 | SLAVE_TLB_INFO(slave).head = TLB_NULL_INDEX; | 139 | SLAVE_TLB_INFO(slave).head = TLB_NULL_INDEX; |
130 | } | 140 | } |
131 | 141 | ||
132 | /* Caller must hold bond lock for read */ | 142 | /* Caller must hold bond lock for read, BH disabled */ |
133 | static void tlb_clear_slave(struct bonding *bond, struct slave *slave, int save_load) | 143 | static void __tlb_clear_slave(struct bonding *bond, struct slave *slave, |
144 | int save_load) | ||
134 | { | 145 | { |
135 | struct tlb_client_info *tx_hash_table; | 146 | struct tlb_client_info *tx_hash_table; |
136 | u32 index; | 147 | u32 index; |
137 | 148 | ||
138 | _lock_tx_hashtbl(bond); | ||
139 | |||
140 | /* clear slave from tx_hashtbl */ | 149 | /* clear slave from tx_hashtbl */ |
141 | tx_hash_table = BOND_ALB_INFO(bond).tx_hashtbl; | 150 | tx_hash_table = BOND_ALB_INFO(bond).tx_hashtbl; |
142 | 151 | ||
@@ -151,8 +160,15 @@ static void tlb_clear_slave(struct bonding *bond, struct slave *slave, int save_ | |||
151 | } | 160 | } |
152 | 161 | ||
153 | tlb_init_slave(slave); | 162 | tlb_init_slave(slave); |
163 | } | ||
154 | 164 | ||
155 | _unlock_tx_hashtbl(bond); | 165 | /* Caller must hold bond lock for read */ |
166 | static void tlb_clear_slave(struct bonding *bond, struct slave *slave, | ||
167 | int save_load) | ||
168 | { | ||
169 | _lock_tx_hashtbl_bh(bond); | ||
170 | __tlb_clear_slave(bond, slave, save_load); | ||
171 | _unlock_tx_hashtbl_bh(bond); | ||
156 | } | 172 | } |
157 | 173 | ||
158 | /* Must be called before starting the monitor timer */ | 174 | /* Must be called before starting the monitor timer */ |
@@ -169,7 +185,7 @@ static int tlb_initialize(struct bonding *bond) | |||
169 | bond->dev->name); | 185 | bond->dev->name); |
170 | return -1; | 186 | return -1; |
171 | } | 187 | } |
172 | _lock_tx_hashtbl(bond); | 188 | _lock_tx_hashtbl_bh(bond); |
173 | 189 | ||
174 | bond_info->tx_hashtbl = new_hashtbl; | 190 | bond_info->tx_hashtbl = new_hashtbl; |
175 | 191 | ||
@@ -177,7 +193,7 @@ static int tlb_initialize(struct bonding *bond) | |||
177 | tlb_init_table_entry(&bond_info->tx_hashtbl[i], 0); | 193 | tlb_init_table_entry(&bond_info->tx_hashtbl[i], 0); |
178 | } | 194 | } |
179 | 195 | ||
180 | _unlock_tx_hashtbl(bond); | 196 | _unlock_tx_hashtbl_bh(bond); |
181 | 197 | ||
182 | return 0; | 198 | return 0; |
183 | } | 199 | } |
@@ -187,12 +203,12 @@ static void tlb_deinitialize(struct bonding *bond) | |||
187 | { | 203 | { |
188 | struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); | 204 | struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); |
189 | 205 | ||
190 | _lock_tx_hashtbl(bond); | 206 | _lock_tx_hashtbl_bh(bond); |
191 | 207 | ||
192 | kfree(bond_info->tx_hashtbl); | 208 | kfree(bond_info->tx_hashtbl); |
193 | bond_info->tx_hashtbl = NULL; | 209 | bond_info->tx_hashtbl = NULL; |
194 | 210 | ||
195 | _unlock_tx_hashtbl(bond); | 211 | _unlock_tx_hashtbl_bh(bond); |
196 | } | 212 | } |
197 | 213 | ||
198 | static long long compute_gap(struct slave *slave) | 214 | static long long compute_gap(struct slave *slave) |
@@ -226,15 +242,13 @@ static struct slave *tlb_get_least_loaded_slave(struct bonding *bond) | |||
226 | return least_loaded; | 242 | return least_loaded; |
227 | } | 243 | } |
228 | 244 | ||
229 | /* Caller must hold bond lock for read */ | 245 | static struct slave *__tlb_choose_channel(struct bonding *bond, u32 hash_index, |
230 | static struct slave *tlb_choose_channel(struct bonding *bond, u32 hash_index, u32 skb_len) | 246 | u32 skb_len) |
231 | { | 247 | { |
232 | struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); | 248 | struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); |
233 | struct tlb_client_info *hash_table; | 249 | struct tlb_client_info *hash_table; |
234 | struct slave *assigned_slave; | 250 | struct slave *assigned_slave; |
235 | 251 | ||
236 | _lock_tx_hashtbl(bond); | ||
237 | |||
238 | hash_table = bond_info->tx_hashtbl; | 252 | hash_table = bond_info->tx_hashtbl; |
239 | assigned_slave = hash_table[hash_index].tx_slave; | 253 | assigned_slave = hash_table[hash_index].tx_slave; |
240 | if (!assigned_slave) { | 254 | if (!assigned_slave) { |
@@ -263,22 +277,46 @@ static struct slave *tlb_choose_channel(struct bonding *bond, u32 hash_index, u3 | |||
263 | hash_table[hash_index].tx_bytes += skb_len; | 277 | hash_table[hash_index].tx_bytes += skb_len; |
264 | } | 278 | } |
265 | 279 | ||
266 | _unlock_tx_hashtbl(bond); | ||
267 | |||
268 | return assigned_slave; | 280 | return assigned_slave; |
269 | } | 281 | } |
270 | 282 | ||
283 | /* Caller must hold bond lock for read */ | ||
284 | static struct slave *tlb_choose_channel(struct bonding *bond, u32 hash_index, | ||
285 | u32 skb_len) | ||
286 | { | ||
287 | struct slave *tx_slave; | ||
288 | /* | ||
289 | * We don't need to disable softirq here, becase | ||
290 | * tlb_choose_channel() is only called by bond_alb_xmit() | ||
291 | * which already has softirq disabled. | ||
292 | */ | ||
293 | _lock_tx_hashtbl(bond); | ||
294 | tx_slave = __tlb_choose_channel(bond, hash_index, skb_len); | ||
295 | _unlock_tx_hashtbl(bond); | ||
296 | return tx_slave; | ||
297 | } | ||
298 | |||
271 | /*********************** rlb specific functions ***************************/ | 299 | /*********************** rlb specific functions ***************************/ |
272 | static inline void _lock_rx_hashtbl(struct bonding *bond) | 300 | static inline void _lock_rx_hashtbl_bh(struct bonding *bond) |
273 | { | 301 | { |
274 | spin_lock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock)); | 302 | spin_lock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock)); |
275 | } | 303 | } |
276 | 304 | ||
277 | static inline void _unlock_rx_hashtbl(struct bonding *bond) | 305 | static inline void _unlock_rx_hashtbl_bh(struct bonding *bond) |
278 | { | 306 | { |
279 | spin_unlock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock)); | 307 | spin_unlock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock)); |
280 | } | 308 | } |
281 | 309 | ||
310 | static inline void _lock_rx_hashtbl(struct bonding *bond) | ||
311 | { | ||
312 | spin_lock(&(BOND_ALB_INFO(bond).rx_hashtbl_lock)); | ||
313 | } | ||
314 | |||
315 | static inline void _unlock_rx_hashtbl(struct bonding *bond) | ||
316 | { | ||
317 | spin_unlock(&(BOND_ALB_INFO(bond).rx_hashtbl_lock)); | ||
318 | } | ||
319 | |||
282 | /* when an ARP REPLY is received from a client update its info | 320 | /* when an ARP REPLY is received from a client update its info |
283 | * in the rx_hashtbl | 321 | * in the rx_hashtbl |
284 | */ | 322 | */ |
@@ -288,7 +326,7 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp) | |||
288 | struct rlb_client_info *client_info; | 326 | struct rlb_client_info *client_info; |
289 | u32 hash_index; | 327 | u32 hash_index; |
290 | 328 | ||
291 | _lock_rx_hashtbl(bond); | 329 | _lock_rx_hashtbl_bh(bond); |
292 | 330 | ||
293 | hash_index = _simple_hash((u8*)&(arp->ip_src), sizeof(arp->ip_src)); | 331 | hash_index = _simple_hash((u8*)&(arp->ip_src), sizeof(arp->ip_src)); |
294 | client_info = &(bond_info->rx_hashtbl[hash_index]); | 332 | client_info = &(bond_info->rx_hashtbl[hash_index]); |
@@ -303,7 +341,7 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp) | |||
303 | bond_info->rx_ntt = 1; | 341 | bond_info->rx_ntt = 1; |
304 | } | 342 | } |
305 | 343 | ||
306 | _unlock_rx_hashtbl(bond); | 344 | _unlock_rx_hashtbl_bh(bond); |
307 | } | 345 | } |
308 | 346 | ||
309 | static void rlb_arp_recv(struct sk_buff *skb, struct bonding *bond, | 347 | static void rlb_arp_recv(struct sk_buff *skb, struct bonding *bond, |
@@ -401,7 +439,7 @@ static void rlb_clear_slave(struct bonding *bond, struct slave *slave) | |||
401 | u32 index, next_index; | 439 | u32 index, next_index; |
402 | 440 | ||
403 | /* clear slave from rx_hashtbl */ | 441 | /* clear slave from rx_hashtbl */ |
404 | _lock_rx_hashtbl(bond); | 442 | _lock_rx_hashtbl_bh(bond); |
405 | 443 | ||
406 | rx_hash_table = bond_info->rx_hashtbl; | 444 | rx_hash_table = bond_info->rx_hashtbl; |
407 | index = bond_info->rx_hashtbl_head; | 445 | index = bond_info->rx_hashtbl_head; |
@@ -432,7 +470,7 @@ static void rlb_clear_slave(struct bonding *bond, struct slave *slave) | |||
432 | } | 470 | } |
433 | } | 471 | } |
434 | 472 | ||
435 | _unlock_rx_hashtbl(bond); | 473 | _unlock_rx_hashtbl_bh(bond); |
436 | 474 | ||
437 | write_lock_bh(&bond->curr_slave_lock); | 475 | write_lock_bh(&bond->curr_slave_lock); |
438 | 476 | ||
@@ -489,7 +527,7 @@ static void rlb_update_rx_clients(struct bonding *bond) | |||
489 | struct rlb_client_info *client_info; | 527 | struct rlb_client_info *client_info; |
490 | u32 hash_index; | 528 | u32 hash_index; |
491 | 529 | ||
492 | _lock_rx_hashtbl(bond); | 530 | _lock_rx_hashtbl_bh(bond); |
493 | 531 | ||
494 | hash_index = bond_info->rx_hashtbl_head; | 532 | hash_index = bond_info->rx_hashtbl_head; |
495 | for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->next) { | 533 | for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->next) { |
@@ -507,7 +545,7 @@ static void rlb_update_rx_clients(struct bonding *bond) | |||
507 | */ | 545 | */ |
508 | bond_info->rlb_update_delay_counter = RLB_UPDATE_DELAY; | 546 | bond_info->rlb_update_delay_counter = RLB_UPDATE_DELAY; |
509 | 547 | ||
510 | _unlock_rx_hashtbl(bond); | 548 | _unlock_rx_hashtbl_bh(bond); |
511 | } | 549 | } |
512 | 550 | ||
513 | /* The slave was assigned a new mac address - update the clients */ | 551 | /* The slave was assigned a new mac address - update the clients */ |
@@ -518,7 +556,7 @@ static void rlb_req_update_slave_clients(struct bonding *bond, struct slave *sla | |||
518 | int ntt = 0; | 556 | int ntt = 0; |
519 | u32 hash_index; | 557 | u32 hash_index; |
520 | 558 | ||
521 | _lock_rx_hashtbl(bond); | 559 | _lock_rx_hashtbl_bh(bond); |
522 | 560 | ||
523 | hash_index = bond_info->rx_hashtbl_head; | 561 | hash_index = bond_info->rx_hashtbl_head; |
524 | for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->next) { | 562 | for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->next) { |
@@ -538,7 +576,7 @@ static void rlb_req_update_slave_clients(struct bonding *bond, struct slave *sla | |||
538 | bond_info->rlb_update_retry_counter = RLB_UPDATE_RETRY; | 576 | bond_info->rlb_update_retry_counter = RLB_UPDATE_RETRY; |
539 | } | 577 | } |
540 | 578 | ||
541 | _unlock_rx_hashtbl(bond); | 579 | _unlock_rx_hashtbl_bh(bond); |
542 | } | 580 | } |
543 | 581 | ||
544 | /* mark all clients using src_ip to be updated */ | 582 | /* mark all clients using src_ip to be updated */ |
@@ -709,7 +747,7 @@ static void rlb_rebalance(struct bonding *bond) | |||
709 | int ntt; | 747 | int ntt; |
710 | u32 hash_index; | 748 | u32 hash_index; |
711 | 749 | ||
712 | _lock_rx_hashtbl(bond); | 750 | _lock_rx_hashtbl_bh(bond); |
713 | 751 | ||
714 | ntt = 0; | 752 | ntt = 0; |
715 | hash_index = bond_info->rx_hashtbl_head; | 753 | hash_index = bond_info->rx_hashtbl_head; |
@@ -727,7 +765,7 @@ static void rlb_rebalance(struct bonding *bond) | |||
727 | if (ntt) { | 765 | if (ntt) { |
728 | bond_info->rx_ntt = 1; | 766 | bond_info->rx_ntt = 1; |
729 | } | 767 | } |
730 | _unlock_rx_hashtbl(bond); | 768 | _unlock_rx_hashtbl_bh(bond); |
731 | } | 769 | } |
732 | 770 | ||
733 | /* Caller must hold rx_hashtbl lock */ | 771 | /* Caller must hold rx_hashtbl lock */ |
@@ -751,7 +789,7 @@ static int rlb_initialize(struct bonding *bond) | |||
751 | bond->dev->name); | 789 | bond->dev->name); |
752 | return -1; | 790 | return -1; |
753 | } | 791 | } |
754 | _lock_rx_hashtbl(bond); | 792 | _lock_rx_hashtbl_bh(bond); |
755 | 793 | ||
756 | bond_info->rx_hashtbl = new_hashtbl; | 794 | bond_info->rx_hashtbl = new_hashtbl; |
757 | 795 | ||
@@ -761,7 +799,7 @@ static int rlb_initialize(struct bonding *bond) | |||
761 | rlb_init_table_entry(bond_info->rx_hashtbl + i); | 799 | rlb_init_table_entry(bond_info->rx_hashtbl + i); |
762 | } | 800 | } |
763 | 801 | ||
764 | _unlock_rx_hashtbl(bond); | 802 | _unlock_rx_hashtbl_bh(bond); |
765 | 803 | ||
766 | /* register to receive ARPs */ | 804 | /* register to receive ARPs */ |
767 | bond->recv_probe = rlb_arp_recv; | 805 | bond->recv_probe = rlb_arp_recv; |
@@ -773,13 +811,13 @@ static void rlb_deinitialize(struct bonding *bond) | |||
773 | { | 811 | { |
774 | struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); | 812 | struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); |
775 | 813 | ||
776 | _lock_rx_hashtbl(bond); | 814 | _lock_rx_hashtbl_bh(bond); |
777 | 815 | ||
778 | kfree(bond_info->rx_hashtbl); | 816 | kfree(bond_info->rx_hashtbl); |
779 | bond_info->rx_hashtbl = NULL; | 817 | bond_info->rx_hashtbl = NULL; |
780 | bond_info->rx_hashtbl_head = RLB_NULL_INDEX; | 818 | bond_info->rx_hashtbl_head = RLB_NULL_INDEX; |
781 | 819 | ||
782 | _unlock_rx_hashtbl(bond); | 820 | _unlock_rx_hashtbl_bh(bond); |
783 | } | 821 | } |
784 | 822 | ||
785 | static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id) | 823 | static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id) |
@@ -787,7 +825,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id) | |||
787 | struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); | 825 | struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); |
788 | u32 curr_index; | 826 | u32 curr_index; |
789 | 827 | ||
790 | _lock_rx_hashtbl(bond); | 828 | _lock_rx_hashtbl_bh(bond); |
791 | 829 | ||
792 | curr_index = bond_info->rx_hashtbl_head; | 830 | curr_index = bond_info->rx_hashtbl_head; |
793 | while (curr_index != RLB_NULL_INDEX) { | 831 | while (curr_index != RLB_NULL_INDEX) { |
@@ -812,7 +850,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id) | |||
812 | curr_index = next_index; | 850 | curr_index = next_index; |
813 | } | 851 | } |
814 | 852 | ||
815 | _unlock_rx_hashtbl(bond); | 853 | _unlock_rx_hashtbl_bh(bond); |
816 | } | 854 | } |
817 | 855 | ||
818 | /*********************** tlb/rlb shared functions *********************/ | 856 | /*********************** tlb/rlb shared functions *********************/ |
@@ -1320,7 +1358,9 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) | |||
1320 | res = bond_dev_queue_xmit(bond, skb, tx_slave->dev); | 1358 | res = bond_dev_queue_xmit(bond, skb, tx_slave->dev); |
1321 | } else { | 1359 | } else { |
1322 | if (tx_slave) { | 1360 | if (tx_slave) { |
1323 | tlb_clear_slave(bond, tx_slave, 0); | 1361 | _lock_tx_hashtbl(bond); |
1362 | __tlb_clear_slave(bond, tx_slave, 0); | ||
1363 | _unlock_tx_hashtbl(bond); | ||
1324 | } | 1364 | } |
1325 | } | 1365 | } |
1326 | 1366 | ||
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c index 9e8ba4f5636b..0f92e3567f68 100644 --- a/drivers/net/ethernet/8390/ax88796.c +++ b/drivers/net/ethernet/8390/ax88796.c | |||
@@ -623,7 +623,8 @@ static int ax_mii_init(struct net_device *dev) | |||
623 | 623 | ||
624 | ax->mii_bus->name = "ax88796_mii_bus"; | 624 | ax->mii_bus->name = "ax88796_mii_bus"; |
625 | ax->mii_bus->parent = dev->dev.parent; | 625 | ax->mii_bus->parent = dev->dev.parent; |
626 | snprintf(ax->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id); | 626 | snprintf(ax->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", |
627 | pdev->name, pdev->id); | ||
627 | 628 | ||
628 | ax->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); | 629 | ax->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); |
629 | if (!ax->mii_bus->irq) { | 630 | if (!ax->mii_bus->irq) { |
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c index b6d69c91db96..d812a103e032 100644 --- a/drivers/net/ethernet/adi/bfin_mac.c +++ b/drivers/net/ethernet/adi/bfin_mac.c | |||
@@ -1670,7 +1670,8 @@ static int __devinit bfin_mii_bus_probe(struct platform_device *pdev) | |||
1670 | miibus->name = "bfin_mii_bus"; | 1670 | miibus->name = "bfin_mii_bus"; |
1671 | miibus->phy_mask = mii_bus_pd->phy_mask; | 1671 | miibus->phy_mask = mii_bus_pd->phy_mask; |
1672 | 1672 | ||
1673 | snprintf(miibus->id, MII_BUS_ID_SIZE, "0"); | 1673 | snprintf(miibus->id, MII_BUS_ID_SIZE, "%s-%x", |
1674 | pdev->name, pdev->id); | ||
1674 | miibus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); | 1675 | miibus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); |
1675 | if (!miibus->irq) | 1676 | if (!miibus->irq) |
1676 | goto out_err_irq_alloc; | 1677 | goto out_err_irq_alloc; |
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index cc9262be69c8..8b95dd314253 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c | |||
@@ -1171,7 +1171,8 @@ static int __devinit au1000_probe(struct platform_device *pdev) | |||
1171 | aup->mii_bus->write = au1000_mdiobus_write; | 1171 | aup->mii_bus->write = au1000_mdiobus_write; |
1172 | aup->mii_bus->reset = au1000_mdiobus_reset; | 1172 | aup->mii_bus->reset = au1000_mdiobus_reset; |
1173 | aup->mii_bus->name = "au1000_eth_mii"; | 1173 | aup->mii_bus->name = "au1000_eth_mii"; |
1174 | snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%x", aup->mac_id); | 1174 | snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", |
1175 | pdev->name, aup->mac_id); | ||
1175 | aup->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); | 1176 | aup->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); |
1176 | if (aup->mii_bus->irq == NULL) | 1177 | if (aup->mii_bus->irq == NULL) |
1177 | goto err_out; | 1178 | goto err_out; |
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index d44331eb07fe..986019b2c849 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c | |||
@@ -1727,7 +1727,7 @@ static int __devinit bcm_enet_probe(struct platform_device *pdev) | |||
1727 | bus->priv = priv; | 1727 | bus->priv = priv; |
1728 | bus->read = bcm_enet_mdio_read_phylib; | 1728 | bus->read = bcm_enet_mdio_read_phylib; |
1729 | bus->write = bcm_enet_mdio_write_phylib; | 1729 | bus->write = bcm_enet_mdio_write_phylib; |
1730 | sprintf(bus->id, "%d", priv->mac_id); | 1730 | sprintf(bus->id, "%s-%d", pdev->name, priv->mac_id); |
1731 | 1731 | ||
1732 | /* only probe bus where we think the PHY is, because | 1732 | /* only probe bus where we think the PHY is, because |
1733 | * the mdio read operation return 0 instead of 0xffff | 1733 | * the mdio read operation return 0 instead of 0xffff |
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index 8fa7abc53ec6..084904ceaa30 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c | |||
@@ -2259,7 +2259,8 @@ static int sbmac_init(struct platform_device *pldev, long long base) | |||
2259 | } | 2259 | } |
2260 | 2260 | ||
2261 | sc->mii_bus->name = sbmac_mdio_string; | 2261 | sc->mii_bus->name = sbmac_mdio_string; |
2262 | snprintf(sc->mii_bus->id, MII_BUS_ID_SIZE, "%x", idx); | 2262 | snprintf(sc->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", |
2263 | pldev->name, idx); | ||
2263 | sc->mii_bus->priv = sc; | 2264 | sc->mii_bus->priv = sc; |
2264 | sc->mii_bus->read = sbmac_mii_read; | 2265 | sc->mii_bus->read = sbmac_mii_read; |
2265 | sc->mii_bus->write = sbmac_mii_write; | 2266 | sc->mii_bus->write = sbmac_mii_write; |
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index f3d5c65d99cf..23200680d4c1 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c | |||
@@ -243,7 +243,8 @@ static int macb_mii_init(struct macb *bp) | |||
243 | bp->mii_bus->read = &macb_mdio_read; | 243 | bp->mii_bus->read = &macb_mdio_read; |
244 | bp->mii_bus->write = &macb_mdio_write; | 244 | bp->mii_bus->write = &macb_mdio_write; |
245 | bp->mii_bus->reset = &macb_mdio_reset; | 245 | bp->mii_bus->reset = &macb_mdio_reset; |
246 | snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%x", bp->pdev->id); | 246 | snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", |
247 | bp->pdev->name, bp->pdev->id); | ||
247 | bp->mii_bus->priv = bp; | 248 | bp->mii_bus->priv = bp; |
248 | bp->mii_bus->parent = &bp->dev->dev; | 249 | bp->mii_bus->parent = &bp->dev->dev; |
249 | pdata = bp->pdev->dev.platform_data; | 250 | pdata = bp->pdev->dev.platform_data; |
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c index ce88c0f399f6..925c9bafc9b9 100644 --- a/drivers/net/ethernet/dnet.c +++ b/drivers/net/ethernet/dnet.c | |||
@@ -325,7 +325,8 @@ static int dnet_mii_init(struct dnet *bp) | |||
325 | bp->mii_bus->write = &dnet_mdio_write; | 325 | bp->mii_bus->write = &dnet_mdio_write; |
326 | bp->mii_bus->reset = &dnet_mdio_reset; | 326 | bp->mii_bus->reset = &dnet_mdio_reset; |
327 | 327 | ||
328 | snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%x", 0); | 328 | snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", |
329 | bp->pdev->name, bp->pdev->id); | ||
329 | 330 | ||
330 | bp->mii_bus->priv = bp; | 331 | bp->mii_bus->priv = bp; |
331 | 332 | ||
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c index ddcbbb34d1b9..7b25e9cf13f6 100644 --- a/drivers/net/ethernet/freescale/fec.c +++ b/drivers/net/ethernet/freescale/fec.c | |||
@@ -476,6 +476,7 @@ fec_restart(struct net_device *ndev, int duplex) | |||
476 | } else { | 476 | } else { |
477 | #ifdef FEC_MIIGSK_ENR | 477 | #ifdef FEC_MIIGSK_ENR |
478 | if (id_entry->driver_data & FEC_QUIRK_USE_GASKET) { | 478 | if (id_entry->driver_data & FEC_QUIRK_USE_GASKET) { |
479 | u32 cfgr; | ||
479 | /* disable the gasket and wait */ | 480 | /* disable the gasket and wait */ |
480 | writel(0, fep->hwp + FEC_MIIGSK_ENR); | 481 | writel(0, fep->hwp + FEC_MIIGSK_ENR); |
481 | while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4) | 482 | while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4) |
@@ -486,9 +487,11 @@ fec_restart(struct net_device *ndev, int duplex) | |||
486 | * RMII, 50 MHz, no loopback, no echo | 487 | * RMII, 50 MHz, no loopback, no echo |
487 | * MII, 25 MHz, no loopback, no echo | 488 | * MII, 25 MHz, no loopback, no echo |
488 | */ | 489 | */ |
489 | writel((fep->phy_interface == PHY_INTERFACE_MODE_RMII) ? | 490 | cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII) |
490 | 1 : 0, fep->hwp + FEC_MIIGSK_CFGR); | 491 | ? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII; |
491 | 492 | if (fep->phy_dev && fep->phy_dev->speed == SPEED_10) | |
493 | cfgr |= BM_MIIGSK_CFGR_FRCONT_10M; | ||
494 | writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR); | ||
492 | 495 | ||
493 | /* re-enable the gasket */ | 496 | /* re-enable the gasket */ |
494 | writel(2, fep->hwp + FEC_MIIGSK_ENR); | 497 | writel(2, fep->hwp + FEC_MIIGSK_ENR); |
@@ -1077,7 +1080,8 @@ static int fec_enet_mii_init(struct platform_device *pdev) | |||
1077 | fep->mii_bus->read = fec_enet_mdio_read; | 1080 | fep->mii_bus->read = fec_enet_mdio_read; |
1078 | fep->mii_bus->write = fec_enet_mdio_write; | 1081 | fep->mii_bus->write = fec_enet_mdio_write; |
1079 | fep->mii_bus->reset = fec_enet_mdio_reset; | 1082 | fep->mii_bus->reset = fec_enet_mdio_reset; |
1080 | snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", fep->dev_id + 1); | 1083 | snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", |
1084 | pdev->name, fep->dev_id + 1); | ||
1081 | fep->mii_bus->priv = fep; | 1085 | fep->mii_bus->priv = fep; |
1082 | fep->mii_bus->parent = &pdev->dev; | 1086 | fep->mii_bus->parent = &pdev->dev; |
1083 | 1087 | ||
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 8b2c6d797e6d..8408c627b195 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h | |||
@@ -47,6 +47,10 @@ | |||
47 | #define FEC_MIIGSK_CFGR 0x300 /* MIIGSK Configuration reg */ | 47 | #define FEC_MIIGSK_CFGR 0x300 /* MIIGSK Configuration reg */ |
48 | #define FEC_MIIGSK_ENR 0x308 /* MIIGSK Enable reg */ | 48 | #define FEC_MIIGSK_ENR 0x308 /* MIIGSK Enable reg */ |
49 | 49 | ||
50 | #define BM_MIIGSK_CFGR_MII 0x00 | ||
51 | #define BM_MIIGSK_CFGR_RMII 0x01 | ||
52 | #define BM_MIIGSK_CFGR_FRCONT_10M 0x40 | ||
53 | |||
50 | #else | 54 | #else |
51 | 55 | ||
52 | #define FEC_ECNTRL 0x000 /* Ethernet control reg */ | 56 | #define FEC_ECNTRL 0x000 /* Ethernet control reg */ |
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index e01cdaa722a9..39d160d353a4 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c | |||
@@ -1984,7 +1984,8 @@ static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb) | |||
1984 | return fcb; | 1984 | return fcb; |
1985 | } | 1985 | } |
1986 | 1986 | ||
1987 | static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb) | 1987 | static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb, |
1988 | int fcb_length) | ||
1988 | { | 1989 | { |
1989 | u8 flags = 0; | 1990 | u8 flags = 0; |
1990 | 1991 | ||
@@ -2006,7 +2007,7 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb) | |||
2006 | * frame (skb->data) and the start of the IP hdr. | 2007 | * frame (skb->data) and the start of the IP hdr. |
2007 | * l4os is the distance between the start of the | 2008 | * l4os is the distance between the start of the |
2008 | * l3 hdr and the l4 hdr */ | 2009 | * l3 hdr and the l4 hdr */ |
2009 | fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN); | 2010 | fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length); |
2010 | fcb->l4os = skb_network_header_len(skb); | 2011 | fcb->l4os = skb_network_header_len(skb); |
2011 | 2012 | ||
2012 | fcb->flags = flags; | 2013 | fcb->flags = flags; |
@@ -2046,7 +2047,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2046 | int i, rq = 0, do_tstamp = 0; | 2047 | int i, rq = 0, do_tstamp = 0; |
2047 | u32 bufaddr; | 2048 | u32 bufaddr; |
2048 | unsigned long flags; | 2049 | unsigned long flags; |
2049 | unsigned int nr_frags, nr_txbds, length; | 2050 | unsigned int nr_frags, nr_txbds, length, fcb_length = GMAC_FCB_LEN; |
2050 | 2051 | ||
2051 | /* | 2052 | /* |
2052 | * TOE=1 frames larger than 2500 bytes may see excess delays | 2053 | * TOE=1 frames larger than 2500 bytes may see excess delays |
@@ -2070,22 +2071,28 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2070 | 2071 | ||
2071 | /* check if time stamp should be generated */ | 2072 | /* check if time stamp should be generated */ |
2072 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && | 2073 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && |
2073 | priv->hwts_tx_en)) | 2074 | priv->hwts_tx_en)) { |
2074 | do_tstamp = 1; | 2075 | do_tstamp = 1; |
2076 | fcb_length = GMAC_FCB_LEN + GMAC_TXPAL_LEN; | ||
2077 | } | ||
2075 | 2078 | ||
2076 | /* make space for additional header when fcb is needed */ | 2079 | /* make space for additional header when fcb is needed */ |
2077 | if (((skb->ip_summed == CHECKSUM_PARTIAL) || | 2080 | if (((skb->ip_summed == CHECKSUM_PARTIAL) || |
2078 | vlan_tx_tag_present(skb) || | 2081 | vlan_tx_tag_present(skb) || |
2079 | unlikely(do_tstamp)) && | 2082 | unlikely(do_tstamp)) && |
2080 | (skb_headroom(skb) < GMAC_FCB_LEN)) { | 2083 | (skb_headroom(skb) < fcb_length)) { |
2081 | struct sk_buff *skb_new; | 2084 | struct sk_buff *skb_new; |
2082 | 2085 | ||
2083 | skb_new = skb_realloc_headroom(skb, GMAC_FCB_LEN); | 2086 | skb_new = skb_realloc_headroom(skb, fcb_length); |
2084 | if (!skb_new) { | 2087 | if (!skb_new) { |
2085 | dev->stats.tx_errors++; | 2088 | dev->stats.tx_errors++; |
2086 | kfree_skb(skb); | 2089 | kfree_skb(skb); |
2087 | return NETDEV_TX_OK; | 2090 | return NETDEV_TX_OK; |
2088 | } | 2091 | } |
2092 | |||
2093 | /* Steal sock reference for processing TX time stamps */ | ||
2094 | swap(skb_new->sk, skb->sk); | ||
2095 | swap(skb_new->destructor, skb->destructor); | ||
2089 | kfree_skb(skb); | 2096 | kfree_skb(skb); |
2090 | skb = skb_new; | 2097 | skb = skb_new; |
2091 | } | 2098 | } |
@@ -2154,6 +2161,12 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2154 | lstatus = txbdp_start->lstatus; | 2161 | lstatus = txbdp_start->lstatus; |
2155 | } | 2162 | } |
2156 | 2163 | ||
2164 | /* Add TxPAL between FCB and frame if required */ | ||
2165 | if (unlikely(do_tstamp)) { | ||
2166 | skb_push(skb, GMAC_TXPAL_LEN); | ||
2167 | memset(skb->data, 0, GMAC_TXPAL_LEN); | ||
2168 | } | ||
2169 | |||
2157 | /* Set up checksumming */ | 2170 | /* Set up checksumming */ |
2158 | if (CHECKSUM_PARTIAL == skb->ip_summed) { | 2171 | if (CHECKSUM_PARTIAL == skb->ip_summed) { |
2159 | fcb = gfar_add_fcb(skb); | 2172 | fcb = gfar_add_fcb(skb); |
@@ -2164,7 +2177,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2164 | skb_checksum_help(skb); | 2177 | skb_checksum_help(skb); |
2165 | } else { | 2178 | } else { |
2166 | lstatus |= BD_LFLAG(TXBD_TOE); | 2179 | lstatus |= BD_LFLAG(TXBD_TOE); |
2167 | gfar_tx_checksum(skb, fcb); | 2180 | gfar_tx_checksum(skb, fcb, fcb_length); |
2168 | } | 2181 | } |
2169 | } | 2182 | } |
2170 | 2183 | ||
@@ -2196,9 +2209,9 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2196 | * the full frame length. | 2209 | * the full frame length. |
2197 | */ | 2210 | */ |
2198 | if (unlikely(do_tstamp)) { | 2211 | if (unlikely(do_tstamp)) { |
2199 | txbdp_tstamp->bufPtr = txbdp_start->bufPtr + GMAC_FCB_LEN; | 2212 | txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_length; |
2200 | txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) | | 2213 | txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) | |
2201 | (skb_headlen(skb) - GMAC_FCB_LEN); | 2214 | (skb_headlen(skb) - fcb_length); |
2202 | lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN; | 2215 | lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN; |
2203 | } else { | 2216 | } else { |
2204 | lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); | 2217 | lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); |
@@ -2490,7 +2503,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) | |||
2490 | 2503 | ||
2491 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { | 2504 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { |
2492 | next = next_txbd(bdp, base, tx_ring_size); | 2505 | next = next_txbd(bdp, base, tx_ring_size); |
2493 | buflen = next->length + GMAC_FCB_LEN; | 2506 | buflen = next->length + GMAC_FCB_LEN + GMAC_TXPAL_LEN; |
2494 | } else | 2507 | } else |
2495 | buflen = bdp->length; | 2508 | buflen = bdp->length; |
2496 | 2509 | ||
@@ -2502,6 +2515,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) | |||
2502 | u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7); | 2515 | u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7); |
2503 | memset(&shhwtstamps, 0, sizeof(shhwtstamps)); | 2516 | memset(&shhwtstamps, 0, sizeof(shhwtstamps)); |
2504 | shhwtstamps.hwtstamp = ns_to_ktime(*ns); | 2517 | shhwtstamps.hwtstamp = ns_to_ktime(*ns); |
2518 | skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN); | ||
2505 | skb_tstamp_tx(skb, &shhwtstamps); | 2519 | skb_tstamp_tx(skb, &shhwtstamps); |
2506 | bdp->lstatus &= BD_LFLAG(TXBD_WRAP); | 2520 | bdp->lstatus &= BD_LFLAG(TXBD_WRAP); |
2507 | bdp = next; | 2521 | bdp = next; |
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index fe7ac3a83194..40c33a7554c0 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h | |||
@@ -63,6 +63,9 @@ struct ethtool_rx_list { | |||
63 | /* Length for FCB */ | 63 | /* Length for FCB */ |
64 | #define GMAC_FCB_LEN 8 | 64 | #define GMAC_FCB_LEN 8 |
65 | 65 | ||
66 | /* Length for TxPAL */ | ||
67 | #define GMAC_TXPAL_LEN 16 | ||
68 | |||
66 | /* Default padding amount */ | 69 | /* Default padding amount */ |
67 | #define DEFAULT_PADDING 2 | 70 | #define DEFAULT_PADDING 2 |
68 | 71 | ||
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 0b3567ab8121..85e2c6cd9708 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c | |||
@@ -98,6 +98,7 @@ struct ltq_etop_chan { | |||
98 | 98 | ||
99 | struct ltq_etop_priv { | 99 | struct ltq_etop_priv { |
100 | struct net_device *netdev; | 100 | struct net_device *netdev; |
101 | struct platform_device *pdev; | ||
101 | struct ltq_eth_data *pldata; | 102 | struct ltq_eth_data *pldata; |
102 | struct resource *res; | 103 | struct resource *res; |
103 | 104 | ||
@@ -436,7 +437,8 @@ ltq_etop_mdio_init(struct net_device *dev) | |||
436 | priv->mii_bus->read = ltq_etop_mdio_rd; | 437 | priv->mii_bus->read = ltq_etop_mdio_rd; |
437 | priv->mii_bus->write = ltq_etop_mdio_wr; | 438 | priv->mii_bus->write = ltq_etop_mdio_wr; |
438 | priv->mii_bus->name = "ltq_mii"; | 439 | priv->mii_bus->name = "ltq_mii"; |
439 | snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%x", 0); | 440 | snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", |
441 | priv->pdev->name, priv->pdev->id); | ||
440 | priv->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); | 442 | priv->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); |
441 | if (!priv->mii_bus->irq) { | 443 | if (!priv->mii_bus->irq) { |
442 | err = -ENOMEM; | 444 | err = -ENOMEM; |
@@ -734,6 +736,7 @@ ltq_etop_probe(struct platform_device *pdev) | |||
734 | dev->ethtool_ops = <q_etop_ethtool_ops; | 736 | dev->ethtool_ops = <q_etop_ethtool_ops; |
735 | priv = netdev_priv(dev); | 737 | priv = netdev_priv(dev); |
736 | priv->res = res; | 738 | priv->res = res; |
739 | priv->pdev = pdev; | ||
737 | priv->pldata = dev_get_platdata(&pdev->dev); | 740 | priv->pldata = dev_get_platdata(&pdev->dev); |
738 | priv->netdev = dev; | 741 | priv->netdev = dev; |
739 | spin_lock_init(&priv->lock); | 742 | spin_lock_init(&priv->lock); |
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 80aab4e5d695..9c049d2cb97d 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c | |||
@@ -2613,7 +2613,8 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev) | |||
2613 | msp->smi_bus->name = "mv643xx_eth smi"; | 2613 | msp->smi_bus->name = "mv643xx_eth smi"; |
2614 | msp->smi_bus->read = smi_bus_read; | 2614 | msp->smi_bus->read = smi_bus_read; |
2615 | msp->smi_bus->write = smi_bus_write, | 2615 | msp->smi_bus->write = smi_bus_write, |
2616 | snprintf(msp->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id); | 2616 | snprintf(msp->smi_bus->id, MII_BUS_ID_SIZE, "%s-%d", |
2617 | pdev->name, pdev->id); | ||
2617 | msp->smi_bus->parent = &pdev->dev; | 2618 | msp->smi_bus->parent = &pdev->dev; |
2618 | msp->smi_bus->phy_mask = 0xffffffff; | 2619 | msp->smi_bus->phy_mask = 0xffffffff; |
2619 | if (mdiobus_register(msp->smi_bus) < 0) | 2620 | if (mdiobus_register(msp->smi_bus) < 0) |
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 5ec409e3da09..953ba5851f7b 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c | |||
@@ -1552,7 +1552,8 @@ static int pxa168_eth_probe(struct platform_device *pdev) | |||
1552 | pep->smi_bus->name = "pxa168_eth smi"; | 1552 | pep->smi_bus->name = "pxa168_eth smi"; |
1553 | pep->smi_bus->read = pxa168_smi_read; | 1553 | pep->smi_bus->read = pxa168_smi_read; |
1554 | pep->smi_bus->write = pxa168_smi_write; | 1554 | pep->smi_bus->write = pxa168_smi_write; |
1555 | snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id); | 1555 | snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%s-%d", |
1556 | pdev->name, pdev->id); | ||
1556 | pep->smi_bus->parent = &pdev->dev; | 1557 | pep->smi_bus->parent = &pdev->dev; |
1557 | pep->smi_bus->phy_mask = 0xffffffff; | 1558 | pep->smi_bus->phy_mask = 0xffffffff; |
1558 | err = mdiobus_register(pep->smi_bus); | 1559 | err = mdiobus_register(pep->smi_bus); |
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index 6ed09a85f035..e52cd310ae76 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c | |||
@@ -746,7 +746,7 @@ | |||
746 | #define MAC_ADDR_ORDER(i) (ETH_ALEN - 1 - (i)) | 746 | #define MAC_ADDR_ORDER(i) (ETH_ALEN - 1 - (i)) |
747 | 747 | ||
748 | #define MAX_ETHERNET_BODY_SIZE 1500 | 748 | #define MAX_ETHERNET_BODY_SIZE 1500 |
749 | #define ETHERNET_HEADER_SIZE 14 | 749 | #define ETHERNET_HEADER_SIZE (14 + VLAN_HLEN) |
750 | 750 | ||
751 | #define MAX_ETHERNET_PACKET_SIZE \ | 751 | #define MAX_ETHERNET_PACKET_SIZE \ |
752 | (MAX_ETHERNET_BODY_SIZE + ETHERNET_HEADER_SIZE) | 752 | (MAX_ETHERNET_BODY_SIZE + ETHERNET_HEADER_SIZE) |
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index fc9bda9bc36c..6ece4295d78f 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c | |||
@@ -1702,7 +1702,8 @@ static int sh_mdio_init(struct net_device *ndev, int id, | |||
1702 | /* Hook up MII support for ethtool */ | 1702 | /* Hook up MII support for ethtool */ |
1703 | mdp->mii_bus->name = "sh_mii"; | 1703 | mdp->mii_bus->name = "sh_mii"; |
1704 | mdp->mii_bus->parent = &ndev->dev; | 1704 | mdp->mii_bus->parent = &ndev->dev; |
1705 | snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%x", id); | 1705 | snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", |
1706 | mdp->pdev->name, pdid); | ||
1706 | 1707 | ||
1707 | /* PHY IRQ */ | 1708 | /* PHY IRQ */ |
1708 | mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); | 1709 | mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); |
diff --git a/drivers/net/ethernet/s6gmac.c b/drivers/net/ethernet/s6gmac.c index a7ff8ea342b4..22e9c0181ce8 100644 --- a/drivers/net/ethernet/s6gmac.c +++ b/drivers/net/ethernet/s6gmac.c | |||
@@ -1004,7 +1004,7 @@ static int __devinit s6gmac_probe(struct platform_device *pdev) | |||
1004 | mb->write = s6mii_write; | 1004 | mb->write = s6mii_write; |
1005 | mb->reset = s6mii_reset; | 1005 | mb->reset = s6mii_reset; |
1006 | mb->priv = pd; | 1006 | mb->priv = pd; |
1007 | snprintf(mb->id, MII_BUS_ID_SIZE, "0"); | 1007 | snprintf(mb->id, MII_BUS_ID_SIZE, "%s-%x", pdev->name, pdev->id); |
1008 | mb->phy_mask = ~(1 << 0); | 1008 | mb->phy_mask = ~(1 << 0); |
1009 | mb->irq = &pd->mii.irq[0]; | 1009 | mb->irq = &pd->mii.irq[0]; |
1010 | for (i = 0; i < PHY_MAX_ADDR; i++) { | 1010 | for (i = 0; i < PHY_MAX_ADDR; i++) { |
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 9d0b8ced0234..24d2df068d71 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c | |||
@@ -1044,7 +1044,8 @@ static int __devinit smsc911x_mii_init(struct platform_device *pdev, | |||
1044 | } | 1044 | } |
1045 | 1045 | ||
1046 | pdata->mii_bus->name = SMSC_MDIONAME; | 1046 | pdata->mii_bus->name = SMSC_MDIONAME; |
1047 | snprintf(pdata->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id); | 1047 | snprintf(pdata->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", |
1048 | pdev->name, pdev->id); | ||
1048 | pdata->mii_bus->priv = pdata; | 1049 | pdata->mii_bus->priv = pdata; |
1049 | pdata->mii_bus->read = smsc911x_mii_read; | 1050 | pdata->mii_bus->read = smsc911x_mii_read; |
1050 | pdata->mii_bus->write = smsc911x_mii_write; | 1051 | pdata->mii_bus->write = smsc911x_mii_write; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c index 41e6b33e1b08..c07cfe989f6e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c | |||
@@ -22,6 +22,7 @@ | |||
22 | Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> | 22 | Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> |
23 | *******************************************************************************/ | 23 | *******************************************************************************/ |
24 | 24 | ||
25 | #include <linux/kernel.h> | ||
25 | #include <linux/io.h> | 26 | #include <linux/io.h> |
26 | #include "mmc.h" | 27 | #include "mmc.h" |
27 | 28 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 3738b4700548..96fa2da30763 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -307,7 +307,7 @@ static int stmmac_init_phy(struct net_device *dev) | |||
307 | priv->speed = 0; | 307 | priv->speed = 0; |
308 | priv->oldduplex = -1; | 308 | priv->oldduplex = -1; |
309 | 309 | ||
310 | snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->plat->bus_id); | 310 | snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x", priv->plat->bus_id); |
311 | snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, | 311 | snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, |
312 | priv->plat->phy_addr); | 312 | priv->plat->phy_addr); |
313 | pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id); | 313 | pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id); |
@@ -772,7 +772,7 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv) | |||
772 | dwmac_mmc_ctrl(priv->ioaddr, mode); | 772 | dwmac_mmc_ctrl(priv->ioaddr, mode); |
773 | memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); | 773 | memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); |
774 | } else | 774 | } else |
775 | pr_info(" No MAC Management Counters available"); | 775 | pr_info(" No MAC Management Counters available\n"); |
776 | } | 776 | } |
777 | 777 | ||
778 | static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv) | 778 | static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv) |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index 51f441233962..da4a1042523a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c | |||
@@ -158,7 +158,8 @@ int stmmac_mdio_register(struct net_device *ndev) | |||
158 | new_bus->read = &stmmac_mdio_read; | 158 | new_bus->read = &stmmac_mdio_read; |
159 | new_bus->write = &stmmac_mdio_write; | 159 | new_bus->write = &stmmac_mdio_write; |
160 | new_bus->reset = &stmmac_mdio_reset; | 160 | new_bus->reset = &stmmac_mdio_reset; |
161 | snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", mdio_bus_data->bus_id); | 161 | snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x", |
162 | new_bus->name, mdio_bus_data->bus_id); | ||
162 | new_bus->priv = ndev; | 163 | new_bus->priv = ndev; |
163 | new_bus->irq = irqlist; | 164 | new_bus->irq = irqlist; |
164 | new_bus->phy_mask = mdio_bus_data->phy_mask; | 165 | new_bus->phy_mask = mdio_bus_data->phy_mask; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 7b1594f4944e..1ac83243649a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | |||
@@ -62,7 +62,7 @@ static int stmmac_pltfr_probe(struct platform_device *pdev) | |||
62 | priv = stmmac_dvr_probe(&(pdev->dev), plat_dat); | 62 | priv = stmmac_dvr_probe(&(pdev->dev), plat_dat); |
63 | if (!priv) { | 63 | if (!priv) { |
64 | pr_err("%s: main drivr probe failed", __func__); | 64 | pr_err("%s: main drivr probe failed", __func__); |
65 | goto out_release_region; | 65 | goto out_unmap; |
66 | } | 66 | } |
67 | 67 | ||
68 | priv->ioaddr = addr; | 68 | priv->ioaddr = addr; |
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c index aaac0c7ad111..4d9a28ffd3c3 100644 --- a/drivers/net/ethernet/ti/cpmac.c +++ b/drivers/net/ethernet/ti/cpmac.c | |||
@@ -1269,7 +1269,7 @@ int __devinit cpmac_init(void) | |||
1269 | } | 1269 | } |
1270 | 1270 | ||
1271 | cpmac_mii->phy_mask = ~(mask | 0x80000000); | 1271 | cpmac_mii->phy_mask = ~(mask | 0x80000000); |
1272 | snprintf(cpmac_mii->id, MII_BUS_ID_SIZE, "1"); | 1272 | snprintf(cpmac_mii->id, MII_BUS_ID_SIZE, "cpmac-1"); |
1273 | 1273 | ||
1274 | res = mdiobus_register(cpmac_mii); | 1274 | res = mdiobus_register(cpmac_mii); |
1275 | if (res) | 1275 | if (res) |
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index 7615040df756..ef7c9c17bfff 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c | |||
@@ -313,7 +313,8 @@ static int __devinit davinci_mdio_probe(struct platform_device *pdev) | |||
313 | data->bus->reset = davinci_mdio_reset, | 313 | data->bus->reset = davinci_mdio_reset, |
314 | data->bus->parent = dev; | 314 | data->bus->parent = dev; |
315 | data->bus->priv = data; | 315 | data->bus->priv = data; |
316 | snprintf(data->bus->id, MII_BUS_ID_SIZE, "%x", pdev->id); | 316 | snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x", |
317 | pdev->name, pdev->id); | ||
317 | 318 | ||
318 | data->clk = clk_get(dev, NULL); | 319 | data->clk = clk_get(dev, NULL); |
319 | if (IS_ERR(data->clk)) { | 320 | if (IS_ERR(data->clk)) { |
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c index a9ce01bafd20..164fb775d7b3 100644 --- a/drivers/net/ethernet/tundra/tsi108_eth.c +++ b/drivers/net/ethernet/tundra/tsi108_eth.c | |||
@@ -1604,7 +1604,7 @@ tsi108_init_one(struct platform_device *pdev) | |||
1604 | data->phyregs = ioremap(einfo->phyregs, 0x400); | 1604 | data->phyregs = ioremap(einfo->phyregs, 0x400); |
1605 | if (NULL == data->phyregs) { | 1605 | if (NULL == data->phyregs) { |
1606 | err = -ENOMEM; | 1606 | err = -ENOMEM; |
1607 | goto regs_fail; | 1607 | goto phyregs_fail; |
1608 | } | 1608 | } |
1609 | /* MII setup */ | 1609 | /* MII setup */ |
1610 | data->mii_if.dev = dev; | 1610 | data->mii_if.dev = dev; |
@@ -1663,9 +1663,11 @@ tsi108_init_one(struct platform_device *pdev) | |||
1663 | return 0; | 1663 | return 0; |
1664 | 1664 | ||
1665 | register_fail: | 1665 | register_fail: |
1666 | iounmap(data->regs); | ||
1667 | iounmap(data->phyregs); | 1666 | iounmap(data->phyregs); |
1668 | 1667 | ||
1668 | phyregs_fail: | ||
1669 | iounmap(data->regs); | ||
1670 | |||
1669 | regs_fail: | 1671 | regs_fail: |
1670 | free_netdev(dev); | 1672 | free_netdev(dev); |
1671 | return err; | 1673 | return err; |
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index 5c4983b2870a..10b18eb63d25 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c | |||
@@ -39,10 +39,9 @@ | |||
39 | 39 | ||
40 | /* A few user-configurable values. | 40 | /* A few user-configurable values. |
41 | These may be modified when a driver module is loaded. */ | 41 | These may be modified when a driver module is loaded. */ |
42 | 42 | static int debug = 0; | |
43 | #define DEBUG | 43 | #define RHINE_MSG_DEFAULT \ |
44 | static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */ | 44 | (0x0000) |
45 | static int max_interrupt_work = 20; | ||
46 | 45 | ||
47 | /* Set the copy breakpoint for the copy-only-tiny-frames scheme. | 46 | /* Set the copy breakpoint for the copy-only-tiny-frames scheme. |
48 | Setting to > 1518 effectively disables this feature. */ | 47 | Setting to > 1518 effectively disables this feature. */ |
@@ -128,12 +127,10 @@ MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); | |||
128 | MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver"); | 127 | MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver"); |
129 | MODULE_LICENSE("GPL"); | 128 | MODULE_LICENSE("GPL"); |
130 | 129 | ||
131 | module_param(max_interrupt_work, int, 0); | ||
132 | module_param(debug, int, 0); | 130 | module_param(debug, int, 0); |
133 | module_param(rx_copybreak, int, 0); | 131 | module_param(rx_copybreak, int, 0); |
134 | module_param(avoid_D3, bool, 0); | 132 | module_param(avoid_D3, bool, 0); |
135 | MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt"); | 133 | MODULE_PARM_DESC(debug, "VIA Rhine debug message flags"); |
136 | MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)"); | ||
137 | MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames"); | 134 | MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames"); |
138 | MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)"); | 135 | MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)"); |
139 | 136 | ||
@@ -351,16 +348,25 @@ static const int mmio_verify_registers[] = { | |||
351 | 348 | ||
352 | /* Bits in the interrupt status/mask registers. */ | 349 | /* Bits in the interrupt status/mask registers. */ |
353 | enum intr_status_bits { | 350 | enum intr_status_bits { |
354 | IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020, | 351 | IntrRxDone = 0x0001, |
355 | IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0210, | 352 | IntrTxDone = 0x0002, |
356 | IntrPCIErr=0x0040, | 353 | IntrRxErr = 0x0004, |
357 | IntrStatsMax=0x0080, IntrRxEarly=0x0100, | 354 | IntrTxError = 0x0008, |
358 | IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000, | 355 | IntrRxEmpty = 0x0020, |
359 | IntrTxAborted=0x2000, IntrLinkChange=0x4000, | 356 | IntrPCIErr = 0x0040, |
360 | IntrRxWakeUp=0x8000, | 357 | IntrStatsMax = 0x0080, |
361 | IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260, | 358 | IntrRxEarly = 0x0100, |
362 | IntrTxDescRace=0x080000, /* mapped from IntrStatus2 */ | 359 | IntrTxUnderrun = 0x0210, |
363 | IntrTxErrSummary=0x082218, | 360 | IntrRxOverflow = 0x0400, |
361 | IntrRxDropped = 0x0800, | ||
362 | IntrRxNoBuf = 0x1000, | ||
363 | IntrTxAborted = 0x2000, | ||
364 | IntrLinkChange = 0x4000, | ||
365 | IntrRxWakeUp = 0x8000, | ||
366 | IntrTxDescRace = 0x080000, /* mapped from IntrStatus2 */ | ||
367 | IntrNormalSummary = IntrRxDone | IntrTxDone, | ||
368 | IntrTxErrSummary = IntrTxDescRace | IntrTxAborted | IntrTxError | | ||
369 | IntrTxUnderrun, | ||
364 | }; | 370 | }; |
365 | 371 | ||
366 | /* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */ | 372 | /* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */ |
@@ -439,8 +445,13 @@ struct rhine_private { | |||
439 | struct net_device *dev; | 445 | struct net_device *dev; |
440 | struct napi_struct napi; | 446 | struct napi_struct napi; |
441 | spinlock_t lock; | 447 | spinlock_t lock; |
448 | struct mutex task_lock; | ||
449 | bool task_enable; | ||
450 | struct work_struct slow_event_task; | ||
442 | struct work_struct reset_task; | 451 | struct work_struct reset_task; |
443 | 452 | ||
453 | u32 msg_enable; | ||
454 | |||
444 | /* Frequently used values: keep some adjacent for cache effect. */ | 455 | /* Frequently used values: keep some adjacent for cache effect. */ |
445 | u32 quirks; | 456 | u32 quirks; |
446 | struct rx_desc *rx_head_desc; | 457 | struct rx_desc *rx_head_desc; |
@@ -476,41 +487,50 @@ static int mdio_read(struct net_device *dev, int phy_id, int location); | |||
476 | static void mdio_write(struct net_device *dev, int phy_id, int location, int value); | 487 | static void mdio_write(struct net_device *dev, int phy_id, int location, int value); |
477 | static int rhine_open(struct net_device *dev); | 488 | static int rhine_open(struct net_device *dev); |
478 | static void rhine_reset_task(struct work_struct *work); | 489 | static void rhine_reset_task(struct work_struct *work); |
490 | static void rhine_slow_event_task(struct work_struct *work); | ||
479 | static void rhine_tx_timeout(struct net_device *dev); | 491 | static void rhine_tx_timeout(struct net_device *dev); |
480 | static netdev_tx_t rhine_start_tx(struct sk_buff *skb, | 492 | static netdev_tx_t rhine_start_tx(struct sk_buff *skb, |
481 | struct net_device *dev); | 493 | struct net_device *dev); |
482 | static irqreturn_t rhine_interrupt(int irq, void *dev_instance); | 494 | static irqreturn_t rhine_interrupt(int irq, void *dev_instance); |
483 | static void rhine_tx(struct net_device *dev); | 495 | static void rhine_tx(struct net_device *dev); |
484 | static int rhine_rx(struct net_device *dev, int limit); | 496 | static int rhine_rx(struct net_device *dev, int limit); |
485 | static void rhine_error(struct net_device *dev, int intr_status); | ||
486 | static void rhine_set_rx_mode(struct net_device *dev); | 497 | static void rhine_set_rx_mode(struct net_device *dev); |
487 | static struct net_device_stats *rhine_get_stats(struct net_device *dev); | 498 | static struct net_device_stats *rhine_get_stats(struct net_device *dev); |
488 | static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); | 499 | static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); |
489 | static const struct ethtool_ops netdev_ethtool_ops; | 500 | static const struct ethtool_ops netdev_ethtool_ops; |
490 | static int rhine_close(struct net_device *dev); | 501 | static int rhine_close(struct net_device *dev); |
491 | static void rhine_shutdown (struct pci_dev *pdev); | ||
492 | static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid); | 502 | static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid); |
493 | static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid); | 503 | static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid); |
494 | static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr); | 504 | static void rhine_restart_tx(struct net_device *dev); |
495 | static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr); | 505 | |
496 | static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask); | 506 | static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool high) |
497 | static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask); | 507 | { |
498 | static void rhine_init_cam_filter(struct net_device *dev); | 508 | void __iomem *ioaddr = rp->base; |
499 | static void rhine_update_vcam(struct net_device *dev); | 509 | int i; |
500 | 510 | ||
501 | #define RHINE_WAIT_FOR(condition) \ | 511 | for (i = 0; i < 1024; i++) { |
502 | do { \ | 512 | if (high ^ !!(ioread8(ioaddr + reg) & mask)) |
503 | int i = 1024; \ | 513 | break; |
504 | while (!(condition) && --i) \ | 514 | udelay(10); |
505 | ; \ | 515 | } |
506 | if (debug > 1 && i < 512) \ | 516 | if (i > 64) { |
507 | pr_info("%4d cycles used @ %s:%d\n", \ | 517 | netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle " |
508 | 1024 - i, __func__, __LINE__); \ | 518 | "count: %04d\n", high ? "high" : "low", reg, mask, i); |
509 | } while (0) | 519 | } |
510 | 520 | } | |
511 | static inline u32 get_intr_status(struct net_device *dev) | 521 | |
522 | static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask) | ||
523 | { | ||
524 | rhine_wait_bit(rp, reg, mask, true); | ||
525 | } | ||
526 | |||
527 | static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask) | ||
528 | { | ||
529 | rhine_wait_bit(rp, reg, mask, false); | ||
530 | } | ||
531 | |||
532 | static u32 rhine_get_events(struct rhine_private *rp) | ||
512 | { | 533 | { |
513 | struct rhine_private *rp = netdev_priv(dev); | ||
514 | void __iomem *ioaddr = rp->base; | 534 | void __iomem *ioaddr = rp->base; |
515 | u32 intr_status; | 535 | u32 intr_status; |
516 | 536 | ||
@@ -521,6 +541,16 @@ static inline u32 get_intr_status(struct net_device *dev) | |||
521 | return intr_status; | 541 | return intr_status; |
522 | } | 542 | } |
523 | 543 | ||
544 | static void rhine_ack_events(struct rhine_private *rp, u32 mask) | ||
545 | { | ||
546 | void __iomem *ioaddr = rp->base; | ||
547 | |||
548 | if (rp->quirks & rqStatusWBRace) | ||
549 | iowrite8(mask >> 16, ioaddr + IntrStatus2); | ||
550 | iowrite16(mask, ioaddr + IntrStatus); | ||
551 | mmiowb(); | ||
552 | } | ||
553 | |||
524 | /* | 554 | /* |
525 | * Get power related registers into sane state. | 555 | * Get power related registers into sane state. |
526 | * Notify user about past WOL event. | 556 | * Notify user about past WOL event. |
@@ -585,6 +615,7 @@ static void rhine_chip_reset(struct net_device *dev) | |||
585 | { | 615 | { |
586 | struct rhine_private *rp = netdev_priv(dev); | 616 | struct rhine_private *rp = netdev_priv(dev); |
587 | void __iomem *ioaddr = rp->base; | 617 | void __iomem *ioaddr = rp->base; |
618 | u8 cmd1; | ||
588 | 619 | ||
589 | iowrite8(Cmd1Reset, ioaddr + ChipCmd1); | 620 | iowrite8(Cmd1Reset, ioaddr + ChipCmd1); |
590 | IOSYNC; | 621 | IOSYNC; |
@@ -597,13 +628,12 @@ static void rhine_chip_reset(struct net_device *dev) | |||
597 | iowrite8(0x40, ioaddr + MiscCmd); | 628 | iowrite8(0x40, ioaddr + MiscCmd); |
598 | 629 | ||
599 | /* Reset can take somewhat longer (rare) */ | 630 | /* Reset can take somewhat longer (rare) */ |
600 | RHINE_WAIT_FOR(!(ioread8(ioaddr + ChipCmd1) & Cmd1Reset)); | 631 | rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset); |
601 | } | 632 | } |
602 | 633 | ||
603 | if (debug > 1) | 634 | cmd1 = ioread8(ioaddr + ChipCmd1); |
604 | netdev_info(dev, "Reset %s\n", | 635 | netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ? |
605 | (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) ? | 636 | "failed" : "succeeded"); |
606 | "failed" : "succeeded"); | ||
607 | } | 637 | } |
608 | 638 | ||
609 | #ifdef USE_MMIO | 639 | #ifdef USE_MMIO |
@@ -629,9 +659,15 @@ static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev) | |||
629 | { | 659 | { |
630 | struct rhine_private *rp = netdev_priv(dev); | 660 | struct rhine_private *rp = netdev_priv(dev); |
631 | void __iomem *ioaddr = rp->base; | 661 | void __iomem *ioaddr = rp->base; |
662 | int i; | ||
632 | 663 | ||
633 | outb(0x20, pioaddr + MACRegEEcsr); | 664 | outb(0x20, pioaddr + MACRegEEcsr); |
634 | RHINE_WAIT_FOR(!(inb(pioaddr + MACRegEEcsr) & 0x20)); | 665 | for (i = 0; i < 1024; i++) { |
666 | if (!(inb(pioaddr + MACRegEEcsr) & 0x20)) | ||
667 | break; | ||
668 | } | ||
669 | if (i > 512) | ||
670 | pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__); | ||
635 | 671 | ||
636 | #ifdef USE_MMIO | 672 | #ifdef USE_MMIO |
637 | /* | 673 | /* |
@@ -657,23 +693,127 @@ static void rhine_poll(struct net_device *dev) | |||
657 | } | 693 | } |
658 | #endif | 694 | #endif |
659 | 695 | ||
696 | static void rhine_kick_tx_threshold(struct rhine_private *rp) | ||
697 | { | ||
698 | if (rp->tx_thresh < 0xe0) { | ||
699 | void __iomem *ioaddr = rp->base; | ||
700 | |||
701 | rp->tx_thresh += 0x20; | ||
702 | BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig); | ||
703 | } | ||
704 | } | ||
705 | |||
706 | static void rhine_tx_err(struct rhine_private *rp, u32 status) | ||
707 | { | ||
708 | struct net_device *dev = rp->dev; | ||
709 | |||
710 | if (status & IntrTxAborted) { | ||
711 | netif_info(rp, tx_err, dev, | ||
712 | "Abort %08x, frame dropped\n", status); | ||
713 | } | ||
714 | |||
715 | if (status & IntrTxUnderrun) { | ||
716 | rhine_kick_tx_threshold(rp); | ||
717 | netif_info(rp, tx_err ,dev, "Transmitter underrun, " | ||
718 | "Tx threshold now %02x\n", rp->tx_thresh); | ||
719 | } | ||
720 | |||
721 | if (status & IntrTxDescRace) | ||
722 | netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n"); | ||
723 | |||
724 | if ((status & IntrTxError) && | ||
725 | (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) { | ||
726 | rhine_kick_tx_threshold(rp); | ||
727 | netif_info(rp, tx_err, dev, "Unspecified error. " | ||
728 | "Tx threshold now %02x\n", rp->tx_thresh); | ||
729 | } | ||
730 | |||
731 | rhine_restart_tx(dev); | ||
732 | } | ||
733 | |||
734 | static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp) | ||
735 | { | ||
736 | void __iomem *ioaddr = rp->base; | ||
737 | struct net_device_stats *stats = &rp->dev->stats; | ||
738 | |||
739 | stats->rx_crc_errors += ioread16(ioaddr + RxCRCErrs); | ||
740 | stats->rx_missed_errors += ioread16(ioaddr + RxMissed); | ||
741 | |||
742 | /* | ||
743 | * Clears the "tally counters" for CRC errors and missed frames(?). | ||
744 | * It has been reported that some chips need a write of 0 to clear | ||
745 | * these, for others the counters are set to 1 when written to and | ||
746 | * instead cleared when read. So we clear them both ways ... | ||
747 | */ | ||
748 | iowrite32(0, ioaddr + RxMissed); | ||
749 | ioread16(ioaddr + RxCRCErrs); | ||
750 | ioread16(ioaddr + RxMissed); | ||
751 | } | ||
752 | |||
753 | #define RHINE_EVENT_NAPI_RX (IntrRxDone | \ | ||
754 | IntrRxErr | \ | ||
755 | IntrRxEmpty | \ | ||
756 | IntrRxOverflow | \ | ||
757 | IntrRxDropped | \ | ||
758 | IntrRxNoBuf | \ | ||
759 | IntrRxWakeUp) | ||
760 | |||
761 | #define RHINE_EVENT_NAPI_TX_ERR (IntrTxError | \ | ||
762 | IntrTxAborted | \ | ||
763 | IntrTxUnderrun | \ | ||
764 | IntrTxDescRace) | ||
765 | #define RHINE_EVENT_NAPI_TX (IntrTxDone | RHINE_EVENT_NAPI_TX_ERR) | ||
766 | |||
767 | #define RHINE_EVENT_NAPI (RHINE_EVENT_NAPI_RX | \ | ||
768 | RHINE_EVENT_NAPI_TX | \ | ||
769 | IntrStatsMax) | ||
770 | #define RHINE_EVENT_SLOW (IntrPCIErr | IntrLinkChange) | ||
771 | #define RHINE_EVENT (RHINE_EVENT_NAPI | RHINE_EVENT_SLOW) | ||
772 | |||
660 | static int rhine_napipoll(struct napi_struct *napi, int budget) | 773 | static int rhine_napipoll(struct napi_struct *napi, int budget) |
661 | { | 774 | { |
662 | struct rhine_private *rp = container_of(napi, struct rhine_private, napi); | 775 | struct rhine_private *rp = container_of(napi, struct rhine_private, napi); |
663 | struct net_device *dev = rp->dev; | 776 | struct net_device *dev = rp->dev; |
664 | void __iomem *ioaddr = rp->base; | 777 | void __iomem *ioaddr = rp->base; |
665 | int work_done; | 778 | u16 enable_mask = RHINE_EVENT & 0xffff; |
779 | int work_done = 0; | ||
780 | u32 status; | ||
781 | |||
782 | status = rhine_get_events(rp); | ||
783 | rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW); | ||
784 | |||
785 | if (status & RHINE_EVENT_NAPI_RX) | ||
786 | work_done += rhine_rx(dev, budget); | ||
787 | |||
788 | if (status & RHINE_EVENT_NAPI_TX) { | ||
789 | if (status & RHINE_EVENT_NAPI_TX_ERR) { | ||
790 | /* Avoid scavenging before Tx engine turned off */ | ||
791 | rhine_wait_bit_low(rp, ChipCmd, CmdTxOn); | ||
792 | if (ioread8(ioaddr + ChipCmd) & CmdTxOn) | ||
793 | netif_warn(rp, tx_err, dev, "Tx still on\n"); | ||
794 | } | ||
666 | 795 | ||
667 | work_done = rhine_rx(dev, budget); | 796 | rhine_tx(dev); |
797 | |||
798 | if (status & RHINE_EVENT_NAPI_TX_ERR) | ||
799 | rhine_tx_err(rp, status); | ||
800 | } | ||
801 | |||
802 | if (status & IntrStatsMax) { | ||
803 | spin_lock(&rp->lock); | ||
804 | rhine_update_rx_crc_and_missed_errord(rp); | ||
805 | spin_unlock(&rp->lock); | ||
806 | } | ||
807 | |||
808 | if (status & RHINE_EVENT_SLOW) { | ||
809 | enable_mask &= ~RHINE_EVENT_SLOW; | ||
810 | schedule_work(&rp->slow_event_task); | ||
811 | } | ||
668 | 812 | ||
669 | if (work_done < budget) { | 813 | if (work_done < budget) { |
670 | napi_complete(napi); | 814 | napi_complete(napi); |
671 | 815 | iowrite16(enable_mask, ioaddr + IntrEnable); | |
672 | iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow | | 816 | mmiowb(); |
673 | IntrRxDropped | IntrRxNoBuf | IntrTxAborted | | ||
674 | IntrTxDone | IntrTxError | IntrTxUnderrun | | ||
675 | IntrPCIErr | IntrStatsMax | IntrLinkChange, | ||
676 | ioaddr + IntrEnable); | ||
677 | } | 817 | } |
678 | return work_done; | 818 | return work_done; |
679 | } | 819 | } |
@@ -797,6 +937,7 @@ static int __devinit rhine_init_one(struct pci_dev *pdev, | |||
797 | rp->quirks = quirks; | 937 | rp->quirks = quirks; |
798 | rp->pioaddr = pioaddr; | 938 | rp->pioaddr = pioaddr; |
799 | rp->pdev = pdev; | 939 | rp->pdev = pdev; |
940 | rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT); | ||
800 | 941 | ||
801 | rc = pci_request_regions(pdev, DRV_NAME); | 942 | rc = pci_request_regions(pdev, DRV_NAME); |
802 | if (rc) | 943 | if (rc) |
@@ -856,7 +997,9 @@ static int __devinit rhine_init_one(struct pci_dev *pdev, | |||
856 | dev->irq = pdev->irq; | 997 | dev->irq = pdev->irq; |
857 | 998 | ||
858 | spin_lock_init(&rp->lock); | 999 | spin_lock_init(&rp->lock); |
1000 | mutex_init(&rp->task_lock); | ||
859 | INIT_WORK(&rp->reset_task, rhine_reset_task); | 1001 | INIT_WORK(&rp->reset_task, rhine_reset_task); |
1002 | INIT_WORK(&rp->slow_event_task, rhine_slow_event_task); | ||
860 | 1003 | ||
861 | rp->mii_if.dev = dev; | 1004 | rp->mii_if.dev = dev; |
862 | rp->mii_if.mdio_read = mdio_read; | 1005 | rp->mii_if.mdio_read = mdio_read; |
@@ -916,8 +1059,8 @@ static int __devinit rhine_init_one(struct pci_dev *pdev, | |||
916 | } | 1059 | } |
917 | } | 1060 | } |
918 | rp->mii_if.phy_id = phy_id; | 1061 | rp->mii_if.phy_id = phy_id; |
919 | if (debug > 1 && avoid_D3) | 1062 | if (avoid_D3) |
920 | netdev_info(dev, "No D3 power state at shutdown\n"); | 1063 | netif_info(rp, probe, dev, "No D3 power state at shutdown\n"); |
921 | 1064 | ||
922 | return 0; | 1065 | return 0; |
923 | 1066 | ||
@@ -1093,7 +1236,7 @@ static void rhine_check_media(struct net_device *dev, unsigned int init_media) | |||
1093 | struct rhine_private *rp = netdev_priv(dev); | 1236 | struct rhine_private *rp = netdev_priv(dev); |
1094 | void __iomem *ioaddr = rp->base; | 1237 | void __iomem *ioaddr = rp->base; |
1095 | 1238 | ||
1096 | mii_check_media(&rp->mii_if, debug, init_media); | 1239 | mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media); |
1097 | 1240 | ||
1098 | if (rp->mii_if.full_duplex) | 1241 | if (rp->mii_if.full_duplex) |
1099 | iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex, | 1242 | iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex, |
@@ -1101,24 +1244,26 @@ static void rhine_check_media(struct net_device *dev, unsigned int init_media) | |||
1101 | else | 1244 | else |
1102 | iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex, | 1245 | iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex, |
1103 | ioaddr + ChipCmd1); | 1246 | ioaddr + ChipCmd1); |
1104 | if (debug > 1) | 1247 | |
1105 | netdev_info(dev, "force_media %d, carrier %d\n", | 1248 | netif_info(rp, link, dev, "force_media %d, carrier %d\n", |
1106 | rp->mii_if.force_media, netif_carrier_ok(dev)); | 1249 | rp->mii_if.force_media, netif_carrier_ok(dev)); |
1107 | } | 1250 | } |
1108 | 1251 | ||
1109 | /* Called after status of force_media possibly changed */ | 1252 | /* Called after status of force_media possibly changed */ |
1110 | static void rhine_set_carrier(struct mii_if_info *mii) | 1253 | static void rhine_set_carrier(struct mii_if_info *mii) |
1111 | { | 1254 | { |
1255 | struct net_device *dev = mii->dev; | ||
1256 | struct rhine_private *rp = netdev_priv(dev); | ||
1257 | |||
1112 | if (mii->force_media) { | 1258 | if (mii->force_media) { |
1113 | /* autoneg is off: Link is always assumed to be up */ | 1259 | /* autoneg is off: Link is always assumed to be up */ |
1114 | if (!netif_carrier_ok(mii->dev)) | 1260 | if (!netif_carrier_ok(dev)) |
1115 | netif_carrier_on(mii->dev); | 1261 | netif_carrier_on(dev); |
1116 | } | 1262 | } else /* Let MMI library update carrier status */ |
1117 | else /* Let MMI library update carrier status */ | 1263 | rhine_check_media(dev, 0); |
1118 | rhine_check_media(mii->dev, 0); | 1264 | |
1119 | if (debug > 1) | 1265 | netif_info(rp, link, dev, "force_media %d, carrier %d\n", |
1120 | netdev_info(mii->dev, "force_media %d, carrier %d\n", | 1266 | mii->force_media, netif_carrier_ok(dev)); |
1121 | mii->force_media, netif_carrier_ok(mii->dev)); | ||
1122 | } | 1267 | } |
1123 | 1268 | ||
1124 | /** | 1269 | /** |
@@ -1266,10 +1411,10 @@ static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) | |||
1266 | { | 1411 | { |
1267 | struct rhine_private *rp = netdev_priv(dev); | 1412 | struct rhine_private *rp = netdev_priv(dev); |
1268 | 1413 | ||
1269 | spin_lock_irq(&rp->lock); | 1414 | spin_lock_bh(&rp->lock); |
1270 | set_bit(vid, rp->active_vlans); | 1415 | set_bit(vid, rp->active_vlans); |
1271 | rhine_update_vcam(dev); | 1416 | rhine_update_vcam(dev); |
1272 | spin_unlock_irq(&rp->lock); | 1417 | spin_unlock_bh(&rp->lock); |
1273 | return 0; | 1418 | return 0; |
1274 | } | 1419 | } |
1275 | 1420 | ||
@@ -1277,10 +1422,10 @@ static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | |||
1277 | { | 1422 | { |
1278 | struct rhine_private *rp = netdev_priv(dev); | 1423 | struct rhine_private *rp = netdev_priv(dev); |
1279 | 1424 | ||
1280 | spin_lock_irq(&rp->lock); | 1425 | spin_lock_bh(&rp->lock); |
1281 | clear_bit(vid, rp->active_vlans); | 1426 | clear_bit(vid, rp->active_vlans); |
1282 | rhine_update_vcam(dev); | 1427 | rhine_update_vcam(dev); |
1283 | spin_unlock_irq(&rp->lock); | 1428 | spin_unlock_bh(&rp->lock); |
1284 | return 0; | 1429 | return 0; |
1285 | } | 1430 | } |
1286 | 1431 | ||
@@ -1310,12 +1455,7 @@ static void init_registers(struct net_device *dev) | |||
1310 | 1455 | ||
1311 | napi_enable(&rp->napi); | 1456 | napi_enable(&rp->napi); |
1312 | 1457 | ||
1313 | /* Enable interrupts by setting the interrupt mask. */ | 1458 | iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable); |
1314 | iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow | | ||
1315 | IntrRxDropped | IntrRxNoBuf | IntrTxAborted | | ||
1316 | IntrTxDone | IntrTxError | IntrTxUnderrun | | ||
1317 | IntrPCIErr | IntrStatsMax | IntrLinkChange, | ||
1318 | ioaddr + IntrEnable); | ||
1319 | 1459 | ||
1320 | iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8), | 1460 | iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8), |
1321 | ioaddr + ChipCmd); | 1461 | ioaddr + ChipCmd); |
@@ -1323,23 +1463,27 @@ static void init_registers(struct net_device *dev) | |||
1323 | } | 1463 | } |
1324 | 1464 | ||
1325 | /* Enable MII link status auto-polling (required for IntrLinkChange) */ | 1465 | /* Enable MII link status auto-polling (required for IntrLinkChange) */ |
1326 | static void rhine_enable_linkmon(void __iomem *ioaddr) | 1466 | static void rhine_enable_linkmon(struct rhine_private *rp) |
1327 | { | 1467 | { |
1468 | void __iomem *ioaddr = rp->base; | ||
1469 | |||
1328 | iowrite8(0, ioaddr + MIICmd); | 1470 | iowrite8(0, ioaddr + MIICmd); |
1329 | iowrite8(MII_BMSR, ioaddr + MIIRegAddr); | 1471 | iowrite8(MII_BMSR, ioaddr + MIIRegAddr); |
1330 | iowrite8(0x80, ioaddr + MIICmd); | 1472 | iowrite8(0x80, ioaddr + MIICmd); |
1331 | 1473 | ||
1332 | RHINE_WAIT_FOR((ioread8(ioaddr + MIIRegAddr) & 0x20)); | 1474 | rhine_wait_bit_high(rp, MIIRegAddr, 0x20); |
1333 | 1475 | ||
1334 | iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr); | 1476 | iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr); |
1335 | } | 1477 | } |
1336 | 1478 | ||
1337 | /* Disable MII link status auto-polling (required for MDIO access) */ | 1479 | /* Disable MII link status auto-polling (required for MDIO access) */ |
1338 | static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks) | 1480 | static void rhine_disable_linkmon(struct rhine_private *rp) |
1339 | { | 1481 | { |
1482 | void __iomem *ioaddr = rp->base; | ||
1483 | |||
1340 | iowrite8(0, ioaddr + MIICmd); | 1484 | iowrite8(0, ioaddr + MIICmd); |
1341 | 1485 | ||
1342 | if (quirks & rqRhineI) { | 1486 | if (rp->quirks & rqRhineI) { |
1343 | iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR | 1487 | iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR |
1344 | 1488 | ||
1345 | /* Can be called from ISR. Evil. */ | 1489 | /* Can be called from ISR. Evil. */ |
@@ -1348,13 +1492,13 @@ static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks) | |||
1348 | /* 0x80 must be set immediately before turning it off */ | 1492 | /* 0x80 must be set immediately before turning it off */ |
1349 | iowrite8(0x80, ioaddr + MIICmd); | 1493 | iowrite8(0x80, ioaddr + MIICmd); |
1350 | 1494 | ||
1351 | RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x20); | 1495 | rhine_wait_bit_high(rp, MIIRegAddr, 0x20); |
1352 | 1496 | ||
1353 | /* Heh. Now clear 0x80 again. */ | 1497 | /* Heh. Now clear 0x80 again. */ |
1354 | iowrite8(0, ioaddr + MIICmd); | 1498 | iowrite8(0, ioaddr + MIICmd); |
1355 | } | 1499 | } |
1356 | else | 1500 | else |
1357 | RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x80); | 1501 | rhine_wait_bit_high(rp, MIIRegAddr, 0x80); |
1358 | } | 1502 | } |
1359 | 1503 | ||
1360 | /* Read and write over the MII Management Data I/O (MDIO) interface. */ | 1504 | /* Read and write over the MII Management Data I/O (MDIO) interface. */ |
@@ -1365,16 +1509,16 @@ static int mdio_read(struct net_device *dev, int phy_id, int regnum) | |||
1365 | void __iomem *ioaddr = rp->base; | 1509 | void __iomem *ioaddr = rp->base; |
1366 | int result; | 1510 | int result; |
1367 | 1511 | ||
1368 | rhine_disable_linkmon(ioaddr, rp->quirks); | 1512 | rhine_disable_linkmon(rp); |
1369 | 1513 | ||
1370 | /* rhine_disable_linkmon already cleared MIICmd */ | 1514 | /* rhine_disable_linkmon already cleared MIICmd */ |
1371 | iowrite8(phy_id, ioaddr + MIIPhyAddr); | 1515 | iowrite8(phy_id, ioaddr + MIIPhyAddr); |
1372 | iowrite8(regnum, ioaddr + MIIRegAddr); | 1516 | iowrite8(regnum, ioaddr + MIIRegAddr); |
1373 | iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */ | 1517 | iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */ |
1374 | RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x40)); | 1518 | rhine_wait_bit_low(rp, MIICmd, 0x40); |
1375 | result = ioread16(ioaddr + MIIData); | 1519 | result = ioread16(ioaddr + MIIData); |
1376 | 1520 | ||
1377 | rhine_enable_linkmon(ioaddr); | 1521 | rhine_enable_linkmon(rp); |
1378 | return result; | 1522 | return result; |
1379 | } | 1523 | } |
1380 | 1524 | ||
@@ -1383,16 +1527,33 @@ static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value | |||
1383 | struct rhine_private *rp = netdev_priv(dev); | 1527 | struct rhine_private *rp = netdev_priv(dev); |
1384 | void __iomem *ioaddr = rp->base; | 1528 | void __iomem *ioaddr = rp->base; |
1385 | 1529 | ||
1386 | rhine_disable_linkmon(ioaddr, rp->quirks); | 1530 | rhine_disable_linkmon(rp); |
1387 | 1531 | ||
1388 | /* rhine_disable_linkmon already cleared MIICmd */ | 1532 | /* rhine_disable_linkmon already cleared MIICmd */ |
1389 | iowrite8(phy_id, ioaddr + MIIPhyAddr); | 1533 | iowrite8(phy_id, ioaddr + MIIPhyAddr); |
1390 | iowrite8(regnum, ioaddr + MIIRegAddr); | 1534 | iowrite8(regnum, ioaddr + MIIRegAddr); |
1391 | iowrite16(value, ioaddr + MIIData); | 1535 | iowrite16(value, ioaddr + MIIData); |
1392 | iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */ | 1536 | iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */ |
1393 | RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x20)); | 1537 | rhine_wait_bit_low(rp, MIICmd, 0x20); |
1394 | 1538 | ||
1395 | rhine_enable_linkmon(ioaddr); | 1539 | rhine_enable_linkmon(rp); |
1540 | } | ||
1541 | |||
1542 | static void rhine_task_disable(struct rhine_private *rp) | ||
1543 | { | ||
1544 | mutex_lock(&rp->task_lock); | ||
1545 | rp->task_enable = false; | ||
1546 | mutex_unlock(&rp->task_lock); | ||
1547 | |||
1548 | cancel_work_sync(&rp->slow_event_task); | ||
1549 | cancel_work_sync(&rp->reset_task); | ||
1550 | } | ||
1551 | |||
1552 | static void rhine_task_enable(struct rhine_private *rp) | ||
1553 | { | ||
1554 | mutex_lock(&rp->task_lock); | ||
1555 | rp->task_enable = true; | ||
1556 | mutex_unlock(&rp->task_lock); | ||
1396 | } | 1557 | } |
1397 | 1558 | ||
1398 | static int rhine_open(struct net_device *dev) | 1559 | static int rhine_open(struct net_device *dev) |
@@ -1406,8 +1567,7 @@ static int rhine_open(struct net_device *dev) | |||
1406 | if (rc) | 1567 | if (rc) |
1407 | return rc; | 1568 | return rc; |
1408 | 1569 | ||
1409 | if (debug > 1) | 1570 | netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->pdev->irq); |
1410 | netdev_dbg(dev, "%s() irq %d\n", __func__, rp->pdev->irq); | ||
1411 | 1571 | ||
1412 | rc = alloc_ring(dev); | 1572 | rc = alloc_ring(dev); |
1413 | if (rc) { | 1573 | if (rc) { |
@@ -1417,11 +1577,12 @@ static int rhine_open(struct net_device *dev) | |||
1417 | alloc_rbufs(dev); | 1577 | alloc_rbufs(dev); |
1418 | alloc_tbufs(dev); | 1578 | alloc_tbufs(dev); |
1419 | rhine_chip_reset(dev); | 1579 | rhine_chip_reset(dev); |
1580 | rhine_task_enable(rp); | ||
1420 | init_registers(dev); | 1581 | init_registers(dev); |
1421 | if (debug > 2) | 1582 | |
1422 | netdev_dbg(dev, "%s() Done - status %04x MII status: %04x\n", | 1583 | netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n", |
1423 | __func__, ioread16(ioaddr + ChipCmd), | 1584 | __func__, ioread16(ioaddr + ChipCmd), |
1424 | mdio_read(dev, rp->mii_if.phy_id, MII_BMSR)); | 1585 | mdio_read(dev, rp->mii_if.phy_id, MII_BMSR)); |
1425 | 1586 | ||
1426 | netif_start_queue(dev); | 1587 | netif_start_queue(dev); |
1427 | 1588 | ||
@@ -1434,11 +1595,12 @@ static void rhine_reset_task(struct work_struct *work) | |||
1434 | reset_task); | 1595 | reset_task); |
1435 | struct net_device *dev = rp->dev; | 1596 | struct net_device *dev = rp->dev; |
1436 | 1597 | ||
1437 | /* protect against concurrent rx interrupts */ | 1598 | mutex_lock(&rp->task_lock); |
1438 | disable_irq(rp->pdev->irq); | ||
1439 | 1599 | ||
1440 | napi_disable(&rp->napi); | 1600 | if (!rp->task_enable) |
1601 | goto out_unlock; | ||
1441 | 1602 | ||
1603 | napi_disable(&rp->napi); | ||
1442 | spin_lock_bh(&rp->lock); | 1604 | spin_lock_bh(&rp->lock); |
1443 | 1605 | ||
1444 | /* clear all descriptors */ | 1606 | /* clear all descriptors */ |
@@ -1452,11 +1614,13 @@ static void rhine_reset_task(struct work_struct *work) | |||
1452 | init_registers(dev); | 1614 | init_registers(dev); |
1453 | 1615 | ||
1454 | spin_unlock_bh(&rp->lock); | 1616 | spin_unlock_bh(&rp->lock); |
1455 | enable_irq(rp->pdev->irq); | ||
1456 | 1617 | ||
1457 | dev->trans_start = jiffies; /* prevent tx timeout */ | 1618 | dev->trans_start = jiffies; /* prevent tx timeout */ |
1458 | dev->stats.tx_errors++; | 1619 | dev->stats.tx_errors++; |
1459 | netif_wake_queue(dev); | 1620 | netif_wake_queue(dev); |
1621 | |||
1622 | out_unlock: | ||
1623 | mutex_unlock(&rp->task_lock); | ||
1460 | } | 1624 | } |
1461 | 1625 | ||
1462 | static void rhine_tx_timeout(struct net_device *dev) | 1626 | static void rhine_tx_timeout(struct net_device *dev) |
@@ -1477,7 +1641,6 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb, | |||
1477 | struct rhine_private *rp = netdev_priv(dev); | 1641 | struct rhine_private *rp = netdev_priv(dev); |
1478 | void __iomem *ioaddr = rp->base; | 1642 | void __iomem *ioaddr = rp->base; |
1479 | unsigned entry; | 1643 | unsigned entry; |
1480 | unsigned long flags; | ||
1481 | 1644 | ||
1482 | /* Caution: the write order is important here, set the field | 1645 | /* Caution: the write order is important here, set the field |
1483 | with the "ownership" bits last. */ | 1646 | with the "ownership" bits last. */ |
@@ -1529,7 +1692,6 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb, | |||
1529 | rp->tx_ring[entry].tx_status = 0; | 1692 | rp->tx_ring[entry].tx_status = 0; |
1530 | 1693 | ||
1531 | /* lock eth irq */ | 1694 | /* lock eth irq */ |
1532 | spin_lock_irqsave(&rp->lock, flags); | ||
1533 | wmb(); | 1695 | wmb(); |
1534 | rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn); | 1696 | rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn); |
1535 | wmb(); | 1697 | wmb(); |
@@ -1550,78 +1712,43 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb, | |||
1550 | if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN) | 1712 | if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN) |
1551 | netif_stop_queue(dev); | 1713 | netif_stop_queue(dev); |
1552 | 1714 | ||
1553 | spin_unlock_irqrestore(&rp->lock, flags); | 1715 | netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n", |
1716 | rp->cur_tx - 1, entry); | ||
1554 | 1717 | ||
1555 | if (debug > 4) { | ||
1556 | netdev_dbg(dev, "Transmit frame #%d queued in slot %d\n", | ||
1557 | rp->cur_tx-1, entry); | ||
1558 | } | ||
1559 | return NETDEV_TX_OK; | 1718 | return NETDEV_TX_OK; |
1560 | } | 1719 | } |
1561 | 1720 | ||
1721 | static void rhine_irq_disable(struct rhine_private *rp) | ||
1722 | { | ||
1723 | iowrite16(0x0000, rp->base + IntrEnable); | ||
1724 | mmiowb(); | ||
1725 | } | ||
1726 | |||
1562 | /* The interrupt handler does all of the Rx thread work and cleans up | 1727 | /* The interrupt handler does all of the Rx thread work and cleans up |
1563 | after the Tx thread. */ | 1728 | after the Tx thread. */ |
1564 | static irqreturn_t rhine_interrupt(int irq, void *dev_instance) | 1729 | static irqreturn_t rhine_interrupt(int irq, void *dev_instance) |
1565 | { | 1730 | { |
1566 | struct net_device *dev = dev_instance; | 1731 | struct net_device *dev = dev_instance; |
1567 | struct rhine_private *rp = netdev_priv(dev); | 1732 | struct rhine_private *rp = netdev_priv(dev); |
1568 | void __iomem *ioaddr = rp->base; | 1733 | u32 status; |
1569 | u32 intr_status; | ||
1570 | int boguscnt = max_interrupt_work; | ||
1571 | int handled = 0; | 1734 | int handled = 0; |
1572 | 1735 | ||
1573 | while ((intr_status = get_intr_status(dev))) { | 1736 | status = rhine_get_events(rp); |
1574 | handled = 1; | ||
1575 | |||
1576 | /* Acknowledge all of the current interrupt sources ASAP. */ | ||
1577 | if (intr_status & IntrTxDescRace) | ||
1578 | iowrite8(0x08, ioaddr + IntrStatus2); | ||
1579 | iowrite16(intr_status & 0xffff, ioaddr + IntrStatus); | ||
1580 | IOSYNC; | ||
1581 | 1737 | ||
1582 | if (debug > 4) | 1738 | netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status); |
1583 | netdev_dbg(dev, "Interrupt, status %08x\n", | ||
1584 | intr_status); | ||
1585 | |||
1586 | if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped | | ||
1587 | IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) { | ||
1588 | iowrite16(IntrTxAborted | | ||
1589 | IntrTxDone | IntrTxError | IntrTxUnderrun | | ||
1590 | IntrPCIErr | IntrStatsMax | IntrLinkChange, | ||
1591 | ioaddr + IntrEnable); | ||
1592 | |||
1593 | napi_schedule(&rp->napi); | ||
1594 | } | ||
1595 | 1739 | ||
1596 | if (intr_status & (IntrTxErrSummary | IntrTxDone)) { | 1740 | if (status & RHINE_EVENT) { |
1597 | if (intr_status & IntrTxErrSummary) { | 1741 | handled = 1; |
1598 | /* Avoid scavenging before Tx engine turned off */ | ||
1599 | RHINE_WAIT_FOR(!(ioread8(ioaddr+ChipCmd) & CmdTxOn)); | ||
1600 | if (debug > 2 && | ||
1601 | ioread8(ioaddr+ChipCmd) & CmdTxOn) | ||
1602 | netdev_warn(dev, | ||
1603 | "%s: Tx engine still on\n", | ||
1604 | __func__); | ||
1605 | } | ||
1606 | rhine_tx(dev); | ||
1607 | } | ||
1608 | 1742 | ||
1609 | /* Abnormal error summary/uncommon events handlers. */ | 1743 | rhine_irq_disable(rp); |
1610 | if (intr_status & (IntrPCIErr | IntrLinkChange | | 1744 | napi_schedule(&rp->napi); |
1611 | IntrStatsMax | IntrTxError | IntrTxAborted | | 1745 | } |
1612 | IntrTxUnderrun | IntrTxDescRace)) | ||
1613 | rhine_error(dev, intr_status); | ||
1614 | 1746 | ||
1615 | if (--boguscnt < 0) { | 1747 | if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) { |
1616 | netdev_warn(dev, "Too much work at interrupt, status=%#08x\n", | 1748 | netif_err(rp, intr, dev, "Something Wicked happened! %08x\n", |
1617 | intr_status); | 1749 | status); |
1618 | break; | ||
1619 | } | ||
1620 | } | 1750 | } |
1621 | 1751 | ||
1622 | if (debug > 3) | ||
1623 | netdev_dbg(dev, "exiting interrupt, status=%08x\n", | ||
1624 | ioread16(ioaddr + IntrStatus)); | ||
1625 | return IRQ_RETVAL(handled); | 1752 | return IRQ_RETVAL(handled); |
1626 | } | 1753 | } |
1627 | 1754 | ||
@@ -1632,20 +1759,16 @@ static void rhine_tx(struct net_device *dev) | |||
1632 | struct rhine_private *rp = netdev_priv(dev); | 1759 | struct rhine_private *rp = netdev_priv(dev); |
1633 | int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE; | 1760 | int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE; |
1634 | 1761 | ||
1635 | spin_lock(&rp->lock); | ||
1636 | |||
1637 | /* find and cleanup dirty tx descriptors */ | 1762 | /* find and cleanup dirty tx descriptors */ |
1638 | while (rp->dirty_tx != rp->cur_tx) { | 1763 | while (rp->dirty_tx != rp->cur_tx) { |
1639 | txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status); | 1764 | txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status); |
1640 | if (debug > 6) | 1765 | netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n", |
1641 | netdev_dbg(dev, "Tx scavenge %d status %08x\n", | 1766 | entry, txstatus); |
1642 | entry, txstatus); | ||
1643 | if (txstatus & DescOwn) | 1767 | if (txstatus & DescOwn) |
1644 | break; | 1768 | break; |
1645 | if (txstatus & 0x8000) { | 1769 | if (txstatus & 0x8000) { |
1646 | if (debug > 1) | 1770 | netif_dbg(rp, tx_done, dev, |
1647 | netdev_dbg(dev, "Transmit error, Tx status %08x\n", | 1771 | "Transmit error, Tx status %08x\n", txstatus); |
1648 | txstatus); | ||
1649 | dev->stats.tx_errors++; | 1772 | dev->stats.tx_errors++; |
1650 | if (txstatus & 0x0400) | 1773 | if (txstatus & 0x0400) |
1651 | dev->stats.tx_carrier_errors++; | 1774 | dev->stats.tx_carrier_errors++; |
@@ -1667,10 +1790,8 @@ static void rhine_tx(struct net_device *dev) | |||
1667 | dev->stats.collisions += (txstatus >> 3) & 0x0F; | 1790 | dev->stats.collisions += (txstatus >> 3) & 0x0F; |
1668 | else | 1791 | else |
1669 | dev->stats.collisions += txstatus & 0x0F; | 1792 | dev->stats.collisions += txstatus & 0x0F; |
1670 | if (debug > 6) | 1793 | netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n", |
1671 | netdev_dbg(dev, "collisions: %1.1x:%1.1x\n", | 1794 | (txstatus >> 3) & 0xF, txstatus & 0xF); |
1672 | (txstatus >> 3) & 0xF, | ||
1673 | txstatus & 0xF); | ||
1674 | dev->stats.tx_bytes += rp->tx_skbuff[entry]->len; | 1795 | dev->stats.tx_bytes += rp->tx_skbuff[entry]->len; |
1675 | dev->stats.tx_packets++; | 1796 | dev->stats.tx_packets++; |
1676 | } | 1797 | } |
@@ -1687,8 +1808,6 @@ static void rhine_tx(struct net_device *dev) | |||
1687 | } | 1808 | } |
1688 | if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4) | 1809 | if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4) |
1689 | netif_wake_queue(dev); | 1810 | netif_wake_queue(dev); |
1690 | |||
1691 | spin_unlock(&rp->lock); | ||
1692 | } | 1811 | } |
1693 | 1812 | ||
1694 | /** | 1813 | /** |
@@ -1713,11 +1832,8 @@ static int rhine_rx(struct net_device *dev, int limit) | |||
1713 | int count; | 1832 | int count; |
1714 | int entry = rp->cur_rx % RX_RING_SIZE; | 1833 | int entry = rp->cur_rx % RX_RING_SIZE; |
1715 | 1834 | ||
1716 | if (debug > 4) { | 1835 | netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__, |
1717 | netdev_dbg(dev, "%s(), entry %d status %08x\n", | 1836 | entry, le32_to_cpu(rp->rx_head_desc->rx_status)); |
1718 | __func__, entry, | ||
1719 | le32_to_cpu(rp->rx_head_desc->rx_status)); | ||
1720 | } | ||
1721 | 1837 | ||
1722 | /* If EOP is set on the next entry, it's a new packet. Send it up. */ | 1838 | /* If EOP is set on the next entry, it's a new packet. Send it up. */ |
1723 | for (count = 0; count < limit; ++count) { | 1839 | for (count = 0; count < limit; ++count) { |
@@ -1729,9 +1845,8 @@ static int rhine_rx(struct net_device *dev, int limit) | |||
1729 | if (desc_status & DescOwn) | 1845 | if (desc_status & DescOwn) |
1730 | break; | 1846 | break; |
1731 | 1847 | ||
1732 | if (debug > 4) | 1848 | netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__, |
1733 | netdev_dbg(dev, "%s() status is %08x\n", | 1849 | desc_status); |
1734 | __func__, desc_status); | ||
1735 | 1850 | ||
1736 | if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) { | 1851 | if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) { |
1737 | if ((desc_status & RxWholePkt) != RxWholePkt) { | 1852 | if ((desc_status & RxWholePkt) != RxWholePkt) { |
@@ -1747,9 +1862,9 @@ static int rhine_rx(struct net_device *dev, int limit) | |||
1747 | dev->stats.rx_length_errors++; | 1862 | dev->stats.rx_length_errors++; |
1748 | } else if (desc_status & RxErr) { | 1863 | } else if (desc_status & RxErr) { |
1749 | /* There was a error. */ | 1864 | /* There was a error. */ |
1750 | if (debug > 2) | 1865 | netif_dbg(rp, rx_err, dev, |
1751 | netdev_dbg(dev, "%s() Rx error was %08x\n", | 1866 | "%s() Rx error %08x\n", __func__, |
1752 | __func__, desc_status); | 1867 | desc_status); |
1753 | dev->stats.rx_errors++; | 1868 | dev->stats.rx_errors++; |
1754 | if (desc_status & 0x0030) | 1869 | if (desc_status & 0x0030) |
1755 | dev->stats.rx_length_errors++; | 1870 | dev->stats.rx_length_errors++; |
@@ -1839,19 +1954,6 @@ static int rhine_rx(struct net_device *dev, int limit) | |||
1839 | return count; | 1954 | return count; |
1840 | } | 1955 | } |
1841 | 1956 | ||
1842 | /* | ||
1843 | * Clears the "tally counters" for CRC errors and missed frames(?). | ||
1844 | * It has been reported that some chips need a write of 0 to clear | ||
1845 | * these, for others the counters are set to 1 when written to and | ||
1846 | * instead cleared when read. So we clear them both ways ... | ||
1847 | */ | ||
1848 | static inline void clear_tally_counters(void __iomem *ioaddr) | ||
1849 | { | ||
1850 | iowrite32(0, ioaddr + RxMissed); | ||
1851 | ioread16(ioaddr + RxCRCErrs); | ||
1852 | ioread16(ioaddr + RxMissed); | ||
1853 | } | ||
1854 | |||
1855 | static void rhine_restart_tx(struct net_device *dev) { | 1957 | static void rhine_restart_tx(struct net_device *dev) { |
1856 | struct rhine_private *rp = netdev_priv(dev); | 1958 | struct rhine_private *rp = netdev_priv(dev); |
1857 | void __iomem *ioaddr = rp->base; | 1959 | void __iomem *ioaddr = rp->base; |
@@ -1862,7 +1964,7 @@ static void rhine_restart_tx(struct net_device *dev) { | |||
1862 | * If new errors occurred, we need to sort them out before doing Tx. | 1964 | * If new errors occurred, we need to sort them out before doing Tx. |
1863 | * In that case the ISR will be back here RSN anyway. | 1965 | * In that case the ISR will be back here RSN anyway. |
1864 | */ | 1966 | */ |
1865 | intr_status = get_intr_status(dev); | 1967 | intr_status = rhine_get_events(rp); |
1866 | 1968 | ||
1867 | if ((intr_status & IntrTxErrSummary) == 0) { | 1969 | if ((intr_status & IntrTxErrSummary) == 0) { |
1868 | 1970 | ||
@@ -1883,79 +1985,50 @@ static void rhine_restart_tx(struct net_device *dev) { | |||
1883 | } | 1985 | } |
1884 | else { | 1986 | else { |
1885 | /* This should never happen */ | 1987 | /* This should never happen */ |
1886 | if (debug > 1) | 1988 | netif_warn(rp, tx_err, dev, "another error occurred %08x\n", |
1887 | netdev_warn(dev, "%s() Another error occurred %08x\n", | 1989 | intr_status); |
1888 | __func__, intr_status); | ||
1889 | } | 1990 | } |
1890 | 1991 | ||
1891 | } | 1992 | } |
1892 | 1993 | ||
1893 | static void rhine_error(struct net_device *dev, int intr_status) | 1994 | static void rhine_slow_event_task(struct work_struct *work) |
1894 | { | 1995 | { |
1895 | struct rhine_private *rp = netdev_priv(dev); | 1996 | struct rhine_private *rp = |
1896 | void __iomem *ioaddr = rp->base; | 1997 | container_of(work, struct rhine_private, slow_event_task); |
1998 | struct net_device *dev = rp->dev; | ||
1999 | u32 intr_status; | ||
1897 | 2000 | ||
1898 | spin_lock(&rp->lock); | 2001 | mutex_lock(&rp->task_lock); |
2002 | |||
2003 | if (!rp->task_enable) | ||
2004 | goto out_unlock; | ||
2005 | |||
2006 | intr_status = rhine_get_events(rp); | ||
2007 | rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW); | ||
1899 | 2008 | ||
1900 | if (intr_status & IntrLinkChange) | 2009 | if (intr_status & IntrLinkChange) |
1901 | rhine_check_media(dev, 0); | 2010 | rhine_check_media(dev, 0); |
1902 | if (intr_status & IntrStatsMax) { | ||
1903 | dev->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs); | ||
1904 | dev->stats.rx_missed_errors += ioread16(ioaddr + RxMissed); | ||
1905 | clear_tally_counters(ioaddr); | ||
1906 | } | ||
1907 | if (intr_status & IntrTxAborted) { | ||
1908 | if (debug > 1) | ||
1909 | netdev_info(dev, "Abort %08x, frame dropped\n", | ||
1910 | intr_status); | ||
1911 | } | ||
1912 | if (intr_status & IntrTxUnderrun) { | ||
1913 | if (rp->tx_thresh < 0xE0) | ||
1914 | BYTE_REG_BITS_SET((rp->tx_thresh += 0x20), 0x80, ioaddr + TxConfig); | ||
1915 | if (debug > 1) | ||
1916 | netdev_info(dev, "Transmitter underrun, Tx threshold now %02x\n", | ||
1917 | rp->tx_thresh); | ||
1918 | } | ||
1919 | if (intr_status & IntrTxDescRace) { | ||
1920 | if (debug > 2) | ||
1921 | netdev_info(dev, "Tx descriptor write-back race\n"); | ||
1922 | } | ||
1923 | if ((intr_status & IntrTxError) && | ||
1924 | (intr_status & (IntrTxAborted | | ||
1925 | IntrTxUnderrun | IntrTxDescRace)) == 0) { | ||
1926 | if (rp->tx_thresh < 0xE0) { | ||
1927 | BYTE_REG_BITS_SET((rp->tx_thresh += 0x20), 0x80, ioaddr + TxConfig); | ||
1928 | } | ||
1929 | if (debug > 1) | ||
1930 | netdev_info(dev, "Unspecified error. Tx threshold now %02x\n", | ||
1931 | rp->tx_thresh); | ||
1932 | } | ||
1933 | if (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace | | ||
1934 | IntrTxError)) | ||
1935 | rhine_restart_tx(dev); | ||
1936 | |||
1937 | if (intr_status & ~(IntrLinkChange | IntrStatsMax | IntrTxUnderrun | | ||
1938 | IntrTxError | IntrTxAborted | IntrNormalSummary | | ||
1939 | IntrTxDescRace)) { | ||
1940 | if (debug > 1) | ||
1941 | netdev_err(dev, "Something Wicked happened! %08x\n", | ||
1942 | intr_status); | ||
1943 | } | ||
1944 | 2011 | ||
1945 | spin_unlock(&rp->lock); | 2012 | if (intr_status & IntrPCIErr) |
2013 | netif_warn(rp, hw, dev, "PCI error\n"); | ||
2014 | |||
2015 | napi_disable(&rp->napi); | ||
2016 | rhine_irq_disable(rp); | ||
2017 | /* Slow and safe. Consider __napi_schedule as a replacement ? */ | ||
2018 | napi_enable(&rp->napi); | ||
2019 | napi_schedule(&rp->napi); | ||
2020 | |||
2021 | out_unlock: | ||
2022 | mutex_unlock(&rp->task_lock); | ||
1946 | } | 2023 | } |
1947 | 2024 | ||
1948 | static struct net_device_stats *rhine_get_stats(struct net_device *dev) | 2025 | static struct net_device_stats *rhine_get_stats(struct net_device *dev) |
1949 | { | 2026 | { |
1950 | struct rhine_private *rp = netdev_priv(dev); | 2027 | struct rhine_private *rp = netdev_priv(dev); |
1951 | void __iomem *ioaddr = rp->base; | ||
1952 | unsigned long flags; | ||
1953 | 2028 | ||
1954 | spin_lock_irqsave(&rp->lock, flags); | 2029 | spin_lock_bh(&rp->lock); |
1955 | dev->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs); | 2030 | rhine_update_rx_crc_and_missed_errord(rp); |
1956 | dev->stats.rx_missed_errors += ioread16(ioaddr + RxMissed); | 2031 | spin_unlock_bh(&rp->lock); |
1957 | clear_tally_counters(ioaddr); | ||
1958 | spin_unlock_irqrestore(&rp->lock, flags); | ||
1959 | 2032 | ||
1960 | return &dev->stats; | 2033 | return &dev->stats; |
1961 | } | 2034 | } |
@@ -2022,9 +2095,9 @@ static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
2022 | struct rhine_private *rp = netdev_priv(dev); | 2095 | struct rhine_private *rp = netdev_priv(dev); |
2023 | int rc; | 2096 | int rc; |
2024 | 2097 | ||
2025 | spin_lock_irq(&rp->lock); | 2098 | mutex_lock(&rp->task_lock); |
2026 | rc = mii_ethtool_gset(&rp->mii_if, cmd); | 2099 | rc = mii_ethtool_gset(&rp->mii_if, cmd); |
2027 | spin_unlock_irq(&rp->lock); | 2100 | mutex_unlock(&rp->task_lock); |
2028 | 2101 | ||
2029 | return rc; | 2102 | return rc; |
2030 | } | 2103 | } |
@@ -2034,10 +2107,10 @@ static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
2034 | struct rhine_private *rp = netdev_priv(dev); | 2107 | struct rhine_private *rp = netdev_priv(dev); |
2035 | int rc; | 2108 | int rc; |
2036 | 2109 | ||
2037 | spin_lock_irq(&rp->lock); | 2110 | mutex_lock(&rp->task_lock); |
2038 | rc = mii_ethtool_sset(&rp->mii_if, cmd); | 2111 | rc = mii_ethtool_sset(&rp->mii_if, cmd); |
2039 | spin_unlock_irq(&rp->lock); | ||
2040 | rhine_set_carrier(&rp->mii_if); | 2112 | rhine_set_carrier(&rp->mii_if); |
2113 | mutex_unlock(&rp->task_lock); | ||
2041 | 2114 | ||
2042 | return rc; | 2115 | return rc; |
2043 | } | 2116 | } |
@@ -2058,12 +2131,16 @@ static u32 netdev_get_link(struct net_device *dev) | |||
2058 | 2131 | ||
2059 | static u32 netdev_get_msglevel(struct net_device *dev) | 2132 | static u32 netdev_get_msglevel(struct net_device *dev) |
2060 | { | 2133 | { |
2061 | return debug; | 2134 | struct rhine_private *rp = netdev_priv(dev); |
2135 | |||
2136 | return rp->msg_enable; | ||
2062 | } | 2137 | } |
2063 | 2138 | ||
2064 | static void netdev_set_msglevel(struct net_device *dev, u32 value) | 2139 | static void netdev_set_msglevel(struct net_device *dev, u32 value) |
2065 | { | 2140 | { |
2066 | debug = value; | 2141 | struct rhine_private *rp = netdev_priv(dev); |
2142 | |||
2143 | rp->msg_enable = value; | ||
2067 | } | 2144 | } |
2068 | 2145 | ||
2069 | static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | 2146 | static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) |
@@ -2119,10 +2196,10 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
2119 | if (!netif_running(dev)) | 2196 | if (!netif_running(dev)) |
2120 | return -EINVAL; | 2197 | return -EINVAL; |
2121 | 2198 | ||
2122 | spin_lock_irq(&rp->lock); | 2199 | mutex_lock(&rp->task_lock); |
2123 | rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL); | 2200 | rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL); |
2124 | spin_unlock_irq(&rp->lock); | ||
2125 | rhine_set_carrier(&rp->mii_if); | 2201 | rhine_set_carrier(&rp->mii_if); |
2202 | mutex_unlock(&rp->task_lock); | ||
2126 | 2203 | ||
2127 | return rc; | 2204 | return rc; |
2128 | } | 2205 | } |
@@ -2132,27 +2209,21 @@ static int rhine_close(struct net_device *dev) | |||
2132 | struct rhine_private *rp = netdev_priv(dev); | 2209 | struct rhine_private *rp = netdev_priv(dev); |
2133 | void __iomem *ioaddr = rp->base; | 2210 | void __iomem *ioaddr = rp->base; |
2134 | 2211 | ||
2212 | rhine_task_disable(rp); | ||
2135 | napi_disable(&rp->napi); | 2213 | napi_disable(&rp->napi); |
2136 | cancel_work_sync(&rp->reset_task); | ||
2137 | netif_stop_queue(dev); | 2214 | netif_stop_queue(dev); |
2138 | 2215 | ||
2139 | spin_lock_irq(&rp->lock); | 2216 | netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n", |
2140 | 2217 | ioread16(ioaddr + ChipCmd)); | |
2141 | if (debug > 1) | ||
2142 | netdev_dbg(dev, "Shutting down ethercard, status was %04x\n", | ||
2143 | ioread16(ioaddr + ChipCmd)); | ||
2144 | 2218 | ||
2145 | /* Switch to loopback mode to avoid hardware races. */ | 2219 | /* Switch to loopback mode to avoid hardware races. */ |
2146 | iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig); | 2220 | iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig); |
2147 | 2221 | ||
2148 | /* Disable interrupts by clearing the interrupt mask. */ | 2222 | rhine_irq_disable(rp); |
2149 | iowrite16(0x0000, ioaddr + IntrEnable); | ||
2150 | 2223 | ||
2151 | /* Stop the chip's Tx and Rx processes. */ | 2224 | /* Stop the chip's Tx and Rx processes. */ |
2152 | iowrite16(CmdStop, ioaddr + ChipCmd); | 2225 | iowrite16(CmdStop, ioaddr + ChipCmd); |
2153 | 2226 | ||
2154 | spin_unlock_irq(&rp->lock); | ||
2155 | |||
2156 | free_irq(rp->pdev->irq, dev); | 2227 | free_irq(rp->pdev->irq, dev); |
2157 | free_rbufs(dev); | 2228 | free_rbufs(dev); |
2158 | free_tbufs(dev); | 2229 | free_tbufs(dev); |
@@ -2192,6 +2263,8 @@ static void rhine_shutdown (struct pci_dev *pdev) | |||
2192 | if (rp->quirks & rq6patterns) | 2263 | if (rp->quirks & rq6patterns) |
2193 | iowrite8(0x04, ioaddr + WOLcgClr); | 2264 | iowrite8(0x04, ioaddr + WOLcgClr); |
2194 | 2265 | ||
2266 | spin_lock(&rp->lock); | ||
2267 | |||
2195 | if (rp->wolopts & WAKE_MAGIC) { | 2268 | if (rp->wolopts & WAKE_MAGIC) { |
2196 | iowrite8(WOLmagic, ioaddr + WOLcrSet); | 2269 | iowrite8(WOLmagic, ioaddr + WOLcrSet); |
2197 | /* | 2270 | /* |
@@ -2216,58 +2289,46 @@ static void rhine_shutdown (struct pci_dev *pdev) | |||
2216 | iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW); | 2289 | iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW); |
2217 | } | 2290 | } |
2218 | 2291 | ||
2219 | /* Hit power state D3 (sleep) */ | 2292 | spin_unlock(&rp->lock); |
2220 | if (!avoid_D3) | ||
2221 | iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW); | ||
2222 | 2293 | ||
2223 | /* TODO: Check use of pci_enable_wake() */ | 2294 | if (system_state == SYSTEM_POWER_OFF && !avoid_D3) { |
2295 | iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW); | ||
2224 | 2296 | ||
2297 | pci_wake_from_d3(pdev, true); | ||
2298 | pci_set_power_state(pdev, PCI_D3hot); | ||
2299 | } | ||
2225 | } | 2300 | } |
2226 | 2301 | ||
2227 | #ifdef CONFIG_PM | 2302 | #ifdef CONFIG_PM_SLEEP |
2228 | static int rhine_suspend(struct pci_dev *pdev, pm_message_t state) | 2303 | static int rhine_suspend(struct device *device) |
2229 | { | 2304 | { |
2305 | struct pci_dev *pdev = to_pci_dev(device); | ||
2230 | struct net_device *dev = pci_get_drvdata(pdev); | 2306 | struct net_device *dev = pci_get_drvdata(pdev); |
2231 | struct rhine_private *rp = netdev_priv(dev); | 2307 | struct rhine_private *rp = netdev_priv(dev); |
2232 | unsigned long flags; | ||
2233 | 2308 | ||
2234 | if (!netif_running(dev)) | 2309 | if (!netif_running(dev)) |
2235 | return 0; | 2310 | return 0; |
2236 | 2311 | ||
2312 | rhine_task_disable(rp); | ||
2313 | rhine_irq_disable(rp); | ||
2237 | napi_disable(&rp->napi); | 2314 | napi_disable(&rp->napi); |
2238 | 2315 | ||
2239 | netif_device_detach(dev); | 2316 | netif_device_detach(dev); |
2240 | pci_save_state(pdev); | ||
2241 | 2317 | ||
2242 | spin_lock_irqsave(&rp->lock, flags); | ||
2243 | rhine_shutdown(pdev); | 2318 | rhine_shutdown(pdev); |
2244 | spin_unlock_irqrestore(&rp->lock, flags); | ||
2245 | 2319 | ||
2246 | free_irq(dev->irq, dev); | ||
2247 | return 0; | 2320 | return 0; |
2248 | } | 2321 | } |
2249 | 2322 | ||
2250 | static int rhine_resume(struct pci_dev *pdev) | 2323 | static int rhine_resume(struct device *device) |
2251 | { | 2324 | { |
2325 | struct pci_dev *pdev = to_pci_dev(device); | ||
2252 | struct net_device *dev = pci_get_drvdata(pdev); | 2326 | struct net_device *dev = pci_get_drvdata(pdev); |
2253 | struct rhine_private *rp = netdev_priv(dev); | 2327 | struct rhine_private *rp = netdev_priv(dev); |
2254 | unsigned long flags; | ||
2255 | int ret; | ||
2256 | 2328 | ||
2257 | if (!netif_running(dev)) | 2329 | if (!netif_running(dev)) |
2258 | return 0; | 2330 | return 0; |
2259 | 2331 | ||
2260 | if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev)) | ||
2261 | netdev_err(dev, "request_irq failed\n"); | ||
2262 | |||
2263 | ret = pci_set_power_state(pdev, PCI_D0); | ||
2264 | if (debug > 1) | ||
2265 | netdev_info(dev, "Entering power state D0 %s (%d)\n", | ||
2266 | ret ? "failed" : "succeeded", ret); | ||
2267 | |||
2268 | pci_restore_state(pdev); | ||
2269 | |||
2270 | spin_lock_irqsave(&rp->lock, flags); | ||
2271 | #ifdef USE_MMIO | 2332 | #ifdef USE_MMIO |
2272 | enable_mmio(rp->pioaddr, rp->quirks); | 2333 | enable_mmio(rp->pioaddr, rp->quirks); |
2273 | #endif | 2334 | #endif |
@@ -2276,25 +2337,32 @@ static int rhine_resume(struct pci_dev *pdev) | |||
2276 | free_rbufs(dev); | 2337 | free_rbufs(dev); |
2277 | alloc_tbufs(dev); | 2338 | alloc_tbufs(dev); |
2278 | alloc_rbufs(dev); | 2339 | alloc_rbufs(dev); |
2340 | rhine_task_enable(rp); | ||
2341 | spin_lock_bh(&rp->lock); | ||
2279 | init_registers(dev); | 2342 | init_registers(dev); |
2280 | spin_unlock_irqrestore(&rp->lock, flags); | 2343 | spin_unlock_bh(&rp->lock); |
2281 | 2344 | ||
2282 | netif_device_attach(dev); | 2345 | netif_device_attach(dev); |
2283 | 2346 | ||
2284 | return 0; | 2347 | return 0; |
2285 | } | 2348 | } |
2286 | #endif /* CONFIG_PM */ | 2349 | |
2350 | static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume); | ||
2351 | #define RHINE_PM_OPS (&rhine_pm_ops) | ||
2352 | |||
2353 | #else | ||
2354 | |||
2355 | #define RHINE_PM_OPS NULL | ||
2356 | |||
2357 | #endif /* !CONFIG_PM_SLEEP */ | ||
2287 | 2358 | ||
2288 | static struct pci_driver rhine_driver = { | 2359 | static struct pci_driver rhine_driver = { |
2289 | .name = DRV_NAME, | 2360 | .name = DRV_NAME, |
2290 | .id_table = rhine_pci_tbl, | 2361 | .id_table = rhine_pci_tbl, |
2291 | .probe = rhine_init_one, | 2362 | .probe = rhine_init_one, |
2292 | .remove = __devexit_p(rhine_remove_one), | 2363 | .remove = __devexit_p(rhine_remove_one), |
2293 | #ifdef CONFIG_PM | 2364 | .shutdown = rhine_shutdown, |
2294 | .suspend = rhine_suspend, | 2365 | .driver.pm = RHINE_PM_OPS, |
2295 | .resume = rhine_resume, | ||
2296 | #endif /* CONFIG_PM */ | ||
2297 | .shutdown = rhine_shutdown, | ||
2298 | }; | 2366 | }; |
2299 | 2367 | ||
2300 | static struct dmi_system_id __initdata rhine_dmi_table[] = { | 2368 | static struct dmi_system_id __initdata rhine_dmi_table[] = { |
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index f45c85a84261..72a854f05bb8 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c | |||
@@ -529,7 +529,7 @@ static int ixp4xx_mdio_register(void) | |||
529 | mdio_bus->name = "IXP4xx MII Bus"; | 529 | mdio_bus->name = "IXP4xx MII Bus"; |
530 | mdio_bus->read = &ixp4xx_mdio_read; | 530 | mdio_bus->read = &ixp4xx_mdio_read; |
531 | mdio_bus->write = &ixp4xx_mdio_write; | 531 | mdio_bus->write = &ixp4xx_mdio_write; |
532 | strcpy(mdio_bus->id, "0"); | 532 | snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "ixp4xx-eth-0"); |
533 | 533 | ||
534 | if ((err = mdiobus_register(mdio_bus))) | 534 | if ((err = mdiobus_register(mdio_bus))) |
535 | mdiobus_free(mdio_bus); | 535 | mdiobus_free(mdio_bus); |
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index 9663e0ba6003..ba3c59147aa7 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c | |||
@@ -1159,7 +1159,7 @@ static void rx_timestamp_work(struct work_struct *work) | |||
1159 | } | 1159 | } |
1160 | } | 1160 | } |
1161 | spin_unlock_irqrestore(&dp83640->rx_lock, flags); | 1161 | spin_unlock_irqrestore(&dp83640->rx_lock, flags); |
1162 | netif_rx(skb); | 1162 | netif_rx_ni(skb); |
1163 | } | 1163 | } |
1164 | 1164 | ||
1165 | /* Clear out expired time stamps. */ | 1165 | /* Clear out expired time stamps. */ |
diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c index 1fa4d73c3cca..633680d0828e 100644 --- a/drivers/net/phy/fixed.c +++ b/drivers/net/phy/fixed.c | |||
@@ -220,7 +220,7 @@ static int __init fixed_mdio_bus_init(void) | |||
220 | goto err_mdiobus_reg; | 220 | goto err_mdiobus_reg; |
221 | } | 221 | } |
222 | 222 | ||
223 | snprintf(fmb->mii_bus->id, MII_BUS_ID_SIZE, "0"); | 223 | snprintf(fmb->mii_bus->id, MII_BUS_ID_SIZE, "fixed-0"); |
224 | fmb->mii_bus->name = "Fixed MDIO Bus"; | 224 | fmb->mii_bus->name = "Fixed MDIO Bus"; |
225 | fmb->mii_bus->priv = fmb; | 225 | fmb->mii_bus->priv = fmb; |
226 | fmb->mii_bus->parent = &pdev->dev; | 226 | fmb->mii_bus->parent = &pdev->dev; |
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 89c5a3eccc12..50e8e5e74465 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c | |||
@@ -116,7 +116,7 @@ static struct mii_bus * __devinit mdio_gpio_bus_init(struct device *dev, | |||
116 | if (!new_bus->irq[i]) | 116 | if (!new_bus->irq[i]) |
117 | new_bus->irq[i] = PHY_POLL; | 117 | new_bus->irq[i] = PHY_POLL; |
118 | 118 | ||
119 | snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", bus_id); | 119 | snprintf(new_bus->id, MII_BUS_ID_SIZE, "gpio-%x", bus_id); |
120 | 120 | ||
121 | if (gpio_request(bitbang->mdc, "mdc")) | 121 | if (gpio_request(bitbang->mdc, "mdc")) |
122 | goto out_free_bus; | 122 | goto out_free_bus; |
diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c index bd12ba941be5..826d961f39f7 100644 --- a/drivers/net/phy/mdio-octeon.c +++ b/drivers/net/phy/mdio-octeon.c | |||
@@ -118,7 +118,8 @@ static int __devinit octeon_mdiobus_probe(struct platform_device *pdev) | |||
118 | bus->mii_bus->priv = bus; | 118 | bus->mii_bus->priv = bus; |
119 | bus->mii_bus->irq = bus->phy_irq; | 119 | bus->mii_bus->irq = bus->phy_irq; |
120 | bus->mii_bus->name = "mdio-octeon"; | 120 | bus->mii_bus->name = "mdio-octeon"; |
121 | snprintf(bus->mii_bus->id, MII_BUS_ID_SIZE, "%x", bus->unit); | 121 | snprintf(bus->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", |
122 | bus->mii_bus->name, bus->unit); | ||
122 | bus->mii_bus->parent = &pdev->dev; | 123 | bus->mii_bus->parent = &pdev->dev; |
123 | 124 | ||
124 | bus->mii_bus->read = octeon_mdiobus_read; | 125 | bus->mii_bus->read = octeon_mdiobus_read; |
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 6c58da2b882c..88cc5db9affd 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c | |||
@@ -37,22 +37,36 @@ | |||
37 | #include <asm/uaccess.h> | 37 | #include <asm/uaccess.h> |
38 | 38 | ||
39 | /** | 39 | /** |
40 | * mdiobus_alloc - allocate a mii_bus structure | 40 | * mdiobus_alloc_size - allocate a mii_bus structure |
41 | * | 41 | * |
42 | * Description: called by a bus driver to allocate an mii_bus | 42 | * Description: called by a bus driver to allocate an mii_bus |
43 | * structure to fill in. | 43 | * structure to fill in. |
44 | * | ||
45 | * 'size' is an an extra amount of memory to allocate for private storage. | ||
46 | * If non-zero, then bus->priv is points to that memory. | ||
44 | */ | 47 | */ |
45 | struct mii_bus *mdiobus_alloc(void) | 48 | struct mii_bus *mdiobus_alloc_size(size_t size) |
46 | { | 49 | { |
47 | struct mii_bus *bus; | 50 | struct mii_bus *bus; |
51 | size_t aligned_size = ALIGN(sizeof(*bus), NETDEV_ALIGN); | ||
52 | size_t alloc_size; | ||
53 | |||
54 | /* If we alloc extra space, it should be aligned */ | ||
55 | if (size) | ||
56 | alloc_size = aligned_size + size; | ||
57 | else | ||
58 | alloc_size = sizeof(*bus); | ||
48 | 59 | ||
49 | bus = kzalloc(sizeof(*bus), GFP_KERNEL); | 60 | bus = kzalloc(alloc_size, GFP_KERNEL); |
50 | if (bus != NULL) | 61 | if (bus) { |
51 | bus->state = MDIOBUS_ALLOCATED; | 62 | bus->state = MDIOBUS_ALLOCATED; |
63 | if (size) | ||
64 | bus->priv = (void *)bus + aligned_size; | ||
65 | } | ||
52 | 66 | ||
53 | return bus; | 67 | return bus; |
54 | } | 68 | } |
55 | EXPORT_SYMBOL(mdiobus_alloc); | 69 | EXPORT_SYMBOL(mdiobus_alloc_size); |
56 | 70 | ||
57 | /** | 71 | /** |
58 | * mdiobus_release - mii_bus device release callback | 72 | * mdiobus_release - mii_bus device release callback |
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c index c1c9293c2bbf..df884dde2a51 100644 --- a/drivers/net/ppp/pptp.c +++ b/drivers/net/ppp/pptp.c | |||
@@ -585,8 +585,8 @@ static int pptp_create(struct net *net, struct socket *sock) | |||
585 | po = pppox_sk(sk); | 585 | po = pppox_sk(sk); |
586 | opt = &po->proto.pptp; | 586 | opt = &po->proto.pptp; |
587 | 587 | ||
588 | opt->seq_sent = 0; opt->seq_recv = 0; | 588 | opt->seq_sent = 0; opt->seq_recv = 0xffffffff; |
589 | opt->ack_recv = 0; opt->ack_sent = 0; | 589 | opt->ack_recv = 0; opt->ack_sent = 0xffffffff; |
590 | 590 | ||
591 | error = 0; | 591 | error = 0; |
592 | out: | 592 | out: |
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c index d0937c4634c9..8e84f5bdd6ca 100644 --- a/drivers/net/usb/asix.c +++ b/drivers/net/usb/asix.c | |||
@@ -978,6 +978,7 @@ static int ax88772_link_reset(struct usbnet *dev) | |||
978 | 978 | ||
979 | static int ax88772_reset(struct usbnet *dev) | 979 | static int ax88772_reset(struct usbnet *dev) |
980 | { | 980 | { |
981 | struct asix_data *data = (struct asix_data *)&dev->data; | ||
981 | int ret, embd_phy; | 982 | int ret, embd_phy; |
982 | u16 rx_ctl; | 983 | u16 rx_ctl; |
983 | 984 | ||
@@ -1055,6 +1056,13 @@ static int ax88772_reset(struct usbnet *dev) | |||
1055 | goto out; | 1056 | goto out; |
1056 | } | 1057 | } |
1057 | 1058 | ||
1059 | /* Rewrite MAC address */ | ||
1060 | memcpy(data->mac_addr, dev->net->dev_addr, ETH_ALEN); | ||
1061 | ret = asix_write_cmd(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN, | ||
1062 | data->mac_addr); | ||
1063 | if (ret < 0) | ||
1064 | goto out; | ||
1065 | |||
1058 | /* Set RX_CTL to default values with 2k buffer, and enable cactus */ | 1066 | /* Set RX_CTL to default values with 2k buffer, and enable cactus */ |
1059 | ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL); | 1067 | ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL); |
1060 | if (ret < 0) | 1068 | if (ret < 0) |
@@ -1320,6 +1328,13 @@ static int ax88178_reset(struct usbnet *dev) | |||
1320 | if (ret < 0) | 1328 | if (ret < 0) |
1321 | return ret; | 1329 | return ret; |
1322 | 1330 | ||
1331 | /* Rewrite MAC address */ | ||
1332 | memcpy(data->mac_addr, dev->net->dev_addr, ETH_ALEN); | ||
1333 | ret = asix_write_cmd(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN, | ||
1334 | data->mac_addr); | ||
1335 | if (ret < 0) | ||
1336 | return ret; | ||
1337 | |||
1323 | ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL); | 1338 | ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL); |
1324 | if (ret < 0) | 1339 | if (ret < 0) |
1325 | return ret; | 1340 | return ret; |
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index 2589b38b689a..2b0bfb8cca02 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c | |||
@@ -46,7 +46,7 @@ static const int m2ThreshExt_off = 127; | |||
46 | * @chan: | 46 | * @chan: |
47 | * | 47 | * |
48 | * This is the function to change channel on single-chip devices, that is | 48 | * This is the function to change channel on single-chip devices, that is |
49 | * all devices after ar9280. | 49 | * for AR9300 family of chipsets. |
50 | * | 50 | * |
51 | * This function takes the channel value in MHz and sets | 51 | * This function takes the channel value in MHz and sets |
52 | * hardware channel value. Assumes writes have been enabled to analog bus. | 52 | * hardware channel value. Assumes writes have been enabled to analog bus. |
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index b30e9fc6433f..171ccf7c972f 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h | |||
@@ -679,7 +679,6 @@ void ath9k_deinit_device(struct ath_softc *sc); | |||
679 | void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw); | 679 | void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw); |
680 | void ath9k_reload_chainmask_settings(struct ath_softc *sc); | 680 | void ath9k_reload_chainmask_settings(struct ath_softc *sc); |
681 | 681 | ||
682 | void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw); | ||
683 | bool ath9k_uses_beacons(int type); | 682 | bool ath9k_uses_beacons(int type); |
684 | 683 | ||
685 | #ifdef CONFIG_ATH9K_PCI | 684 | #ifdef CONFIG_ATH9K_PCI |
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c index 172e33db7f4c..2f4b48e6fb03 100644 --- a/drivers/net/wireless/ath/ath9k/calib.c +++ b/drivers/net/wireless/ath/ath9k/calib.c | |||
@@ -400,6 +400,7 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan) | |||
400 | ah->noise = ath9k_hw_getchan_noise(ah, chan); | 400 | ah->noise = ath9k_hw_getchan_noise(ah, chan); |
401 | return true; | 401 | return true; |
402 | } | 402 | } |
403 | EXPORT_SYMBOL(ath9k_hw_getnf); | ||
403 | 404 | ||
404 | void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah, | 405 | void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah, |
405 | struct ath9k_channel *chan) | 406 | struct ath9k_channel *chan) |
diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h index 05b9dbf81850..3b33996d97df 100644 --- a/drivers/net/wireless/ath/ath9k/calib.h +++ b/drivers/net/wireless/ath/ath9k/calib.h | |||
@@ -19,7 +19,6 @@ | |||
19 | 19 | ||
20 | #include "hw.h" | 20 | #include "hw.h" |
21 | 21 | ||
22 | #define AR_PHY_CCA_FILTERWINDOW_LENGTH_INIT 3 | ||
23 | #define AR_PHY_CCA_FILTERWINDOW_LENGTH 5 | 22 | #define AR_PHY_CCA_FILTERWINDOW_LENGTH 5 |
24 | 23 | ||
25 | #define NUM_NF_READINGS 6 | 24 | #define NUM_NF_READINGS 6 |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index e267c92dbfb8..4a00806e2852 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
@@ -1629,7 +1629,6 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) | |||
1629 | 1629 | ||
1630 | if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { | 1630 | if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { |
1631 | struct ieee80211_channel *curchan = hw->conf.channel; | 1631 | struct ieee80211_channel *curchan = hw->conf.channel; |
1632 | struct ath9k_channel old_chan; | ||
1633 | int pos = curchan->hw_value; | 1632 | int pos = curchan->hw_value; |
1634 | int old_pos = -1; | 1633 | int old_pos = -1; |
1635 | unsigned long flags; | 1634 | unsigned long flags; |
@@ -1654,11 +1653,8 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) | |||
1654 | * Preserve the current channel values, before updating | 1653 | * Preserve the current channel values, before updating |
1655 | * the same channel | 1654 | * the same channel |
1656 | */ | 1655 | */ |
1657 | if (old_pos == pos) { | 1656 | if (ah->curchan && (old_pos == pos)) |
1658 | memcpy(&old_chan, &sc->sc_ah->channels[pos], | 1657 | ath9k_hw_getnf(ah, ah->curchan); |
1659 | sizeof(struct ath9k_channel)); | ||
1660 | ah->curchan = &old_chan; | ||
1661 | } | ||
1662 | 1658 | ||
1663 | ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos], | 1659 | ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos], |
1664 | curchan, conf->channel_type); | 1660 | curchan, conf->channel_type); |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c index 5a002a21f108..f7eeee1dcdb6 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c | |||
@@ -3119,8 +3119,10 @@ static int brcmf_sdbrcm_write_vars(struct brcmf_sdio *bus) | |||
3119 | /* Verify NVRAM bytes */ | 3119 | /* Verify NVRAM bytes */ |
3120 | brcmf_dbg(INFO, "Compare NVRAM dl & ul; varsize=%d\n", varsize); | 3120 | brcmf_dbg(INFO, "Compare NVRAM dl & ul; varsize=%d\n", varsize); |
3121 | nvram_ularray = kmalloc(varsize, GFP_ATOMIC); | 3121 | nvram_ularray = kmalloc(varsize, GFP_ATOMIC); |
3122 | if (!nvram_ularray) | 3122 | if (!nvram_ularray) { |
3123 | kfree(vbuffer); | ||
3123 | return -ENOMEM; | 3124 | return -ENOMEM; |
3125 | } | ||
3124 | 3126 | ||
3125 | /* Upload image to verify downloaded contents. */ | 3127 | /* Upload image to verify downloaded contents. */ |
3126 | memset(nvram_ularray, 0xaa, varsize); | 3128 | memset(nvram_ularray, 0xaa, varsize); |
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/fw.c b/drivers/net/wireless/rtlwifi/rtl8192se/fw.c index 6f91a148c222..3fda6b1dcf46 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/fw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192se/fw.c | |||
@@ -196,6 +196,8 @@ static bool _rtl92s_firmware_downloadcode(struct ieee80211_hw *hw, | |||
196 | /* Allocate skb buffer to contain firmware */ | 196 | /* Allocate skb buffer to contain firmware */ |
197 | /* info and tx descriptor info. */ | 197 | /* info and tx descriptor info. */ |
198 | skb = dev_alloc_skb(frag_length); | 198 | skb = dev_alloc_skb(frag_length); |
199 | if (!skb) | ||
200 | return false; | ||
199 | skb_reserve(skb, extra_descoffset); | 201 | skb_reserve(skb, extra_descoffset); |
200 | seg_ptr = (u8 *)skb_put(skb, (u32)(frag_length - | 202 | seg_ptr = (u8 *)skb_put(skb, (u32)(frag_length - |
201 | extra_descoffset)); | 203 | extra_descoffset)); |
@@ -573,6 +575,8 @@ static bool _rtl92s_firmware_set_h2c_cmd(struct ieee80211_hw *hw, u8 h2c_cmd, | |||
573 | 575 | ||
574 | len = _rtl92s_get_h2c_cmdlen(MAX_TRANSMIT_BUFFER_SIZE, 1, &cmd_len); | 576 | len = _rtl92s_get_h2c_cmdlen(MAX_TRANSMIT_BUFFER_SIZE, 1, &cmd_len); |
575 | skb = dev_alloc_skb(len); | 577 | skb = dev_alloc_skb(len); |
578 | if (!skb) | ||
579 | return false; | ||
576 | cb_desc = (struct rtl_tcb_desc *)(skb->cb); | 580 | cb_desc = (struct rtl_tcb_desc *)(skb->cb); |
577 | cb_desc->queue_index = TXCMD_QUEUE; | 581 | cb_desc->queue_index = TXCMD_QUEUE; |
578 | cb_desc->cmd_or_init = DESC_PACKET_TYPE_NORMAL; | 582 | cb_desc->cmd_or_init = DESC_PACKET_TYPE_NORMAL; |