diff options
102 files changed, 2325 insertions, 3200 deletions
diff --git a/Documentation/DocBook/z8530book.tmpl b/Documentation/DocBook/z8530book.tmpl index 42c75ba71ba2..a42a8a4c7689 100644 --- a/Documentation/DocBook/z8530book.tmpl +++ b/Documentation/DocBook/z8530book.tmpl | |||
@@ -69,12 +69,6 @@ | |||
69 | device to be used as both a tty interface and as a synchronous | 69 | device to be used as both a tty interface and as a synchronous |
70 | controller is a project for Linux post the 2.4 release | 70 | controller is a project for Linux post the 2.4 release |
71 | </para> | 71 | </para> |
72 | <para> | ||
73 | The support code handles most common card configurations and | ||
74 | supports running both Cisco HDLC and Synchronous PPP. With extra | ||
75 | glue the frame relay and X.25 protocols can also be used with this | ||
76 | driver. | ||
77 | </para> | ||
78 | </chapter> | 72 | </chapter> |
79 | 73 | ||
80 | <chapter id="Driver_Modes"> | 74 | <chapter id="Driver_Modes"> |
@@ -179,35 +173,27 @@ | |||
179 | <para> | 173 | <para> |
180 | If you wish to use the network interface facilities of the driver, | 174 | If you wish to use the network interface facilities of the driver, |
181 | then you need to attach a network device to each channel that is | 175 | then you need to attach a network device to each channel that is |
182 | present and in use. In addition to use the SyncPPP and Cisco HDLC | 176 | present and in use. In addition to use the generic HDLC |
183 | you need to follow some additional plumbing rules. They may seem | 177 | you need to follow some additional plumbing rules. They may seem |
184 | complex but a look at the example hostess_sv11 driver should | 178 | complex but a look at the example hostess_sv11 driver should |
185 | reassure you. | 179 | reassure you. |
186 | </para> | 180 | </para> |
187 | <para> | 181 | <para> |
188 | The network device used for each channel should be pointed to by | 182 | The network device used for each channel should be pointed to by |
189 | the netdevice field of each channel. The dev-> priv field of the | 183 | the netdevice field of each channel. The hdlc-> priv field of the |
190 | network device points to your private data - you will need to be | 184 | network device points to your private data - you will need to be |
191 | able to find your ppp device from this. In addition to use the | 185 | able to find your private data from this. |
192 | sync ppp layer the private data must start with a void * pointer | ||
193 | to the syncppp structures. | ||
194 | </para> | 186 | </para> |
195 | <para> | 187 | <para> |
196 | The way most drivers approach this particular problem is to | 188 | The way most drivers approach this particular problem is to |
197 | create a structure holding the Z8530 device definition and | 189 | create a structure holding the Z8530 device definition and |
198 | put that and the syncppp pointer into the private field of | 190 | put that into the private field of the network device. The |
199 | the network device. The network device fields of the channels | 191 | network device fields of the channels then point back to the |
200 | then point back to the network devices. The ppp_device can also | 192 | network devices. |
201 | be put in the private structure conveniently. | ||
202 | </para> | 193 | </para> |
203 | <para> | 194 | <para> |
204 | If you wish to use the synchronous ppp then you need to attach | 195 | If you wish to use the generic HDLC then you need to register |
205 | the syncppp layer to the network device. You should do this before | 196 | the HDLC device. |
206 | you register the network device. The | ||
207 | <function>sppp_attach</function> requires that the first void * | ||
208 | pointer in your private data is pointing to an empty struct | ||
209 | ppp_device. The function fills in the initial data for the | ||
210 | ppp/hdlc layer. | ||
211 | </para> | 197 | </para> |
212 | <para> | 198 | <para> |
213 | Before you register your network device you will also need to | 199 | Before you register your network device you will also need to |
@@ -314,10 +300,10 @@ | |||
314 | buffer in sk_buff format and queues it for transmission. The | 300 | buffer in sk_buff format and queues it for transmission. The |
315 | caller must provide the entire packet with the exception of the | 301 | caller must provide the entire packet with the exception of the |
316 | bitstuffing and CRC. This is normally done by the caller via | 302 | bitstuffing and CRC. This is normally done by the caller via |
317 | the syncppp interface layer. It returns 0 if the buffer has been | 303 | the generic HDLC interface layer. It returns 0 if the buffer has been |
318 | queued and non zero values for queue full. If the function accepts | 304 | queued and non zero values for queue full. If the function accepts |
319 | the buffer it becomes property of the Z8530 layer and the caller | 305 | the buffer it becomes property of the Z8530 layer and the caller |
320 | should not free it. | 306 | should not free it. |
321 | </para> | 307 | </para> |
322 | <para> | 308 | <para> |
323 | The function <function>z8530_get_stats</function> returns a pointer | 309 | The function <function>z8530_get_stats</function> returns a pointer |
diff --git a/arch/sh/include/asm/sh_eth.h b/arch/sh/include/asm/sh_eth.h new file mode 100644 index 000000000000..bb832584f3c1 --- /dev/null +++ b/arch/sh/include/asm/sh_eth.h | |||
@@ -0,0 +1,11 @@ | |||
1 | #ifndef __ASM_SH_ETH_H__ | ||
2 | #define __ASM_SH_ETH_H__ | ||
3 | |||
4 | enum {EDMAC_LITTLE_ENDIAN, EDMAC_BIG_ENDIAN}; | ||
5 | |||
6 | struct sh_eth_plat_data { | ||
7 | int phy; | ||
8 | int edmac_endian; | ||
9 | }; | ||
10 | |||
11 | #endif | ||
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c index d1fceabe3aef..c240562c218b 100644 --- a/drivers/char/pcmcia/synclink_cs.c +++ b/drivers/char/pcmcia/synclink_cs.c | |||
@@ -232,7 +232,6 @@ typedef struct _mgslpc_info { | |||
232 | 232 | ||
233 | /* SPPP/Cisco HDLC device parts */ | 233 | /* SPPP/Cisco HDLC device parts */ |
234 | int netcount; | 234 | int netcount; |
235 | int dosyncppp; | ||
236 | spinlock_t netlock; | 235 | spinlock_t netlock; |
237 | 236 | ||
238 | #if SYNCLINK_GENERIC_HDLC | 237 | #if SYNCLINK_GENERIC_HDLC |
@@ -459,13 +458,11 @@ static int ttymajor=0; | |||
459 | 458 | ||
460 | static int debug_level = 0; | 459 | static int debug_level = 0; |
461 | static int maxframe[MAX_DEVICE_COUNT] = {0,}; | 460 | static int maxframe[MAX_DEVICE_COUNT] = {0,}; |
462 | static int dosyncppp[MAX_DEVICE_COUNT] = {1,1,1,1}; | ||
463 | 461 | ||
464 | module_param(break_on_load, bool, 0); | 462 | module_param(break_on_load, bool, 0); |
465 | module_param(ttymajor, int, 0); | 463 | module_param(ttymajor, int, 0); |
466 | module_param(debug_level, int, 0); | 464 | module_param(debug_level, int, 0); |
467 | module_param_array(maxframe, int, NULL, 0); | 465 | module_param_array(maxframe, int, NULL, 0); |
468 | module_param_array(dosyncppp, int, NULL, 0); | ||
469 | 466 | ||
470 | MODULE_LICENSE("GPL"); | 467 | MODULE_LICENSE("GPL"); |
471 | 468 | ||
@@ -2915,7 +2912,6 @@ static void mgslpc_add_device(MGSLPC_INFO *info) | |||
2915 | if (info->line < MAX_DEVICE_COUNT) { | 2912 | if (info->line < MAX_DEVICE_COUNT) { |
2916 | if (maxframe[info->line]) | 2913 | if (maxframe[info->line]) |
2917 | info->max_frame_size = maxframe[info->line]; | 2914 | info->max_frame_size = maxframe[info->line]; |
2918 | info->dosyncppp = dosyncppp[info->line]; | ||
2919 | } | 2915 | } |
2920 | 2916 | ||
2921 | mgslpc_device_count++; | 2917 | mgslpc_device_count++; |
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c index ef6706f09061..500f5176b6ba 100644 --- a/drivers/char/synclink.c +++ b/drivers/char/synclink.c | |||
@@ -304,7 +304,6 @@ struct mgsl_struct { | |||
304 | 304 | ||
305 | /* generic HDLC device parts */ | 305 | /* generic HDLC device parts */ |
306 | int netcount; | 306 | int netcount; |
307 | int dosyncppp; | ||
308 | spinlock_t netlock; | 307 | spinlock_t netlock; |
309 | 308 | ||
310 | #if SYNCLINK_GENERIC_HDLC | 309 | #if SYNCLINK_GENERIC_HDLC |
@@ -868,7 +867,6 @@ static int irq[MAX_ISA_DEVICES]; | |||
868 | static int dma[MAX_ISA_DEVICES]; | 867 | static int dma[MAX_ISA_DEVICES]; |
869 | static int debug_level; | 868 | static int debug_level; |
870 | static int maxframe[MAX_TOTAL_DEVICES]; | 869 | static int maxframe[MAX_TOTAL_DEVICES]; |
871 | static int dosyncppp[MAX_TOTAL_DEVICES]; | ||
872 | static int txdmabufs[MAX_TOTAL_DEVICES]; | 870 | static int txdmabufs[MAX_TOTAL_DEVICES]; |
873 | static int txholdbufs[MAX_TOTAL_DEVICES]; | 871 | static int txholdbufs[MAX_TOTAL_DEVICES]; |
874 | 872 | ||
@@ -879,7 +877,6 @@ module_param_array(irq, int, NULL, 0); | |||
879 | module_param_array(dma, int, NULL, 0); | 877 | module_param_array(dma, int, NULL, 0); |
880 | module_param(debug_level, int, 0); | 878 | module_param(debug_level, int, 0); |
881 | module_param_array(maxframe, int, NULL, 0); | 879 | module_param_array(maxframe, int, NULL, 0); |
882 | module_param_array(dosyncppp, int, NULL, 0); | ||
883 | module_param_array(txdmabufs, int, NULL, 0); | 880 | module_param_array(txdmabufs, int, NULL, 0); |
884 | module_param_array(txholdbufs, int, NULL, 0); | 881 | module_param_array(txholdbufs, int, NULL, 0); |
885 | 882 | ||
@@ -4258,7 +4255,6 @@ static void mgsl_add_device( struct mgsl_struct *info ) | |||
4258 | if (info->line < MAX_TOTAL_DEVICES) { | 4255 | if (info->line < MAX_TOTAL_DEVICES) { |
4259 | if (maxframe[info->line]) | 4256 | if (maxframe[info->line]) |
4260 | info->max_frame_size = maxframe[info->line]; | 4257 | info->max_frame_size = maxframe[info->line]; |
4261 | info->dosyncppp = dosyncppp[info->line]; | ||
4262 | 4258 | ||
4263 | if (txdmabufs[info->line]) { | 4259 | if (txdmabufs[info->line]) { |
4264 | info->num_tx_dma_buffers = txdmabufs[info->line]; | 4260 | info->num_tx_dma_buffers = txdmabufs[info->line]; |
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c index 3e9058993e41..509c89ac5bd3 100644 --- a/drivers/char/synclink_gt.c +++ b/drivers/char/synclink_gt.c | |||
@@ -128,17 +128,14 @@ static int slgt_device_count; | |||
128 | static int ttymajor; | 128 | static int ttymajor; |
129 | static int debug_level; | 129 | static int debug_level; |
130 | static int maxframe[MAX_DEVICES]; | 130 | static int maxframe[MAX_DEVICES]; |
131 | static int dosyncppp[MAX_DEVICES]; | ||
132 | 131 | ||
133 | module_param(ttymajor, int, 0); | 132 | module_param(ttymajor, int, 0); |
134 | module_param(debug_level, int, 0); | 133 | module_param(debug_level, int, 0); |
135 | module_param_array(maxframe, int, NULL, 0); | 134 | module_param_array(maxframe, int, NULL, 0); |
136 | module_param_array(dosyncppp, int, NULL, 0); | ||
137 | 135 | ||
138 | MODULE_PARM_DESC(ttymajor, "TTY major device number override: 0=auto assigned"); | 136 | MODULE_PARM_DESC(ttymajor, "TTY major device number override: 0=auto assigned"); |
139 | MODULE_PARM_DESC(debug_level, "Debug syslog output: 0=disabled, 1 to 5=increasing detail"); | 137 | MODULE_PARM_DESC(debug_level, "Debug syslog output: 0=disabled, 1 to 5=increasing detail"); |
140 | MODULE_PARM_DESC(maxframe, "Maximum frame size used by device (4096 to 65535)"); | 138 | MODULE_PARM_DESC(maxframe, "Maximum frame size used by device (4096 to 65535)"); |
141 | MODULE_PARM_DESC(dosyncppp, "Enable synchronous net device, 0=disable 1=enable"); | ||
142 | 139 | ||
143 | /* | 140 | /* |
144 | * tty support and callbacks | 141 | * tty support and callbacks |
@@ -349,7 +346,6 @@ struct slgt_info { | |||
349 | /* SPPP/Cisco HDLC device parts */ | 346 | /* SPPP/Cisco HDLC device parts */ |
350 | 347 | ||
351 | int netcount; | 348 | int netcount; |
352 | int dosyncppp; | ||
353 | spinlock_t netlock; | 349 | spinlock_t netlock; |
354 | #if SYNCLINK_GENERIC_HDLC | 350 | #if SYNCLINK_GENERIC_HDLC |
355 | struct net_device *netdev; | 351 | struct net_device *netdev; |
@@ -3405,7 +3401,6 @@ static void add_device(struct slgt_info *info) | |||
3405 | if (info->line < MAX_DEVICES) { | 3401 | if (info->line < MAX_DEVICES) { |
3406 | if (maxframe[info->line]) | 3402 | if (maxframe[info->line]) |
3407 | info->max_frame_size = maxframe[info->line]; | 3403 | info->max_frame_size = maxframe[info->line]; |
3408 | info->dosyncppp = dosyncppp[info->line]; | ||
3409 | } | 3404 | } |
3410 | 3405 | ||
3411 | slgt_device_count++; | 3406 | slgt_device_count++; |
diff --git a/drivers/char/synclinkmp.c b/drivers/char/synclinkmp.c index c0490cbd0db2..6bdb44f7bec2 100644 --- a/drivers/char/synclinkmp.c +++ b/drivers/char/synclinkmp.c | |||
@@ -270,7 +270,6 @@ typedef struct _synclinkmp_info { | |||
270 | 270 | ||
271 | /* SPPP/Cisco HDLC device parts */ | 271 | /* SPPP/Cisco HDLC device parts */ |
272 | int netcount; | 272 | int netcount; |
273 | int dosyncppp; | ||
274 | spinlock_t netlock; | 273 | spinlock_t netlock; |
275 | 274 | ||
276 | #if SYNCLINK_GENERIC_HDLC | 275 | #if SYNCLINK_GENERIC_HDLC |
@@ -469,13 +468,11 @@ static int ttymajor = 0; | |||
469 | */ | 468 | */ |
470 | static int debug_level = 0; | 469 | static int debug_level = 0; |
471 | static int maxframe[MAX_DEVICES] = {0,}; | 470 | static int maxframe[MAX_DEVICES] = {0,}; |
472 | static int dosyncppp[MAX_DEVICES] = {0,}; | ||
473 | 471 | ||
474 | module_param(break_on_load, bool, 0); | 472 | module_param(break_on_load, bool, 0); |
475 | module_param(ttymajor, int, 0); | 473 | module_param(ttymajor, int, 0); |
476 | module_param(debug_level, int, 0); | 474 | module_param(debug_level, int, 0); |
477 | module_param_array(maxframe, int, NULL, 0); | 475 | module_param_array(maxframe, int, NULL, 0); |
478 | module_param_array(dosyncppp, int, NULL, 0); | ||
479 | 476 | ||
480 | static char *driver_name = "SyncLink MultiPort driver"; | 477 | static char *driver_name = "SyncLink MultiPort driver"; |
481 | static char *driver_version = "$Revision: 4.38 $"; | 478 | static char *driver_version = "$Revision: 4.38 $"; |
@@ -3752,7 +3749,6 @@ static void add_device(SLMP_INFO *info) | |||
3752 | if (info->line < MAX_DEVICES) { | 3749 | if (info->line < MAX_DEVICES) { |
3753 | if (maxframe[info->line]) | 3750 | if (maxframe[info->line]) |
3754 | info->max_frame_size = maxframe[info->line]; | 3751 | info->max_frame_size = maxframe[info->line]; |
3755 | info->dosyncppp = dosyncppp[info->line]; | ||
3756 | } | 3752 | } |
3757 | 3753 | ||
3758 | synclinkmp_device_count++; | 3754 | synclinkmp_device_count++; |
diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c index dc6e474229b1..e2ce41d3828e 100644 --- a/drivers/net/3c523.c +++ b/drivers/net/3c523.c | |||
@@ -640,10 +640,8 @@ static int init586(struct net_device *dev) | |||
640 | cfg_cmd->time_low = 0x00; | 640 | cfg_cmd->time_low = 0x00; |
641 | cfg_cmd->time_high = 0xf2; | 641 | cfg_cmd->time_high = 0xf2; |
642 | cfg_cmd->promisc = 0; | 642 | cfg_cmd->promisc = 0; |
643 | if (dev->flags & (IFF_ALLMULTI | IFF_PROMISC)) { | 643 | if (dev->flags & (IFF_ALLMULTI | IFF_PROMISC)) |
644 | cfg_cmd->promisc = 1; | 644 | cfg_cmd->promisc = 1; |
645 | dev->flags |= IFF_PROMISC; | ||
646 | } | ||
647 | cfg_cmd->carr_coll = 0x00; | 645 | cfg_cmd->carr_coll = 0x00; |
648 | 646 | ||
649 | p->scb->cbl_offset = make16(cfg_cmd); | 647 | p->scb->cbl_offset = make16(cfg_cmd); |
diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c index 6aca0c640f13..abc84f765973 100644 --- a/drivers/net/3c527.c +++ b/drivers/net/3c527.c | |||
@@ -1521,14 +1521,11 @@ static void do_mc32_set_multicast_list(struct net_device *dev, int retry) | |||
1521 | struct mc32_local *lp = netdev_priv(dev); | 1521 | struct mc32_local *lp = netdev_priv(dev); |
1522 | u16 filt = (1<<2); /* Save Bad Packets, for stats purposes */ | 1522 | u16 filt = (1<<2); /* Save Bad Packets, for stats purposes */ |
1523 | 1523 | ||
1524 | if (dev->flags&IFF_PROMISC) | 1524 | if ((dev->flags&IFF_PROMISC) || |
1525 | (dev->flags&IFF_ALLMULTI) || | ||
1526 | dev->mc_count > 10) | ||
1525 | /* Enable promiscuous mode */ | 1527 | /* Enable promiscuous mode */ |
1526 | filt |= 1; | 1528 | filt |= 1; |
1527 | else if((dev->flags&IFF_ALLMULTI) || dev->mc_count > 10) | ||
1528 | { | ||
1529 | dev->flags|=IFF_PROMISC; | ||
1530 | filt |= 1; | ||
1531 | } | ||
1532 | else if(dev->mc_count) | 1529 | else if(dev->mc_count) |
1533 | { | 1530 | { |
1534 | unsigned char block[62]; | 1531 | unsigned char block[62]; |
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index 8db4e6b89482..491ee16da5c1 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c | |||
@@ -1692,12 +1692,14 @@ vortex_open(struct net_device *dev) | |||
1692 | vp->rx_ring[i].next = cpu_to_le32(vp->rx_ring_dma + sizeof(struct boom_rx_desc) * (i+1)); | 1692 | vp->rx_ring[i].next = cpu_to_le32(vp->rx_ring_dma + sizeof(struct boom_rx_desc) * (i+1)); |
1693 | vp->rx_ring[i].status = 0; /* Clear complete bit. */ | 1693 | vp->rx_ring[i].status = 0; /* Clear complete bit. */ |
1694 | vp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ | LAST_FRAG); | 1694 | vp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ | LAST_FRAG); |
1695 | skb = dev_alloc_skb(PKT_BUF_SZ); | 1695 | |
1696 | skb = __netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN, | ||
1697 | GFP_KERNEL); | ||
1696 | vp->rx_skbuff[i] = skb; | 1698 | vp->rx_skbuff[i] = skb; |
1697 | if (skb == NULL) | 1699 | if (skb == NULL) |
1698 | break; /* Bad news! */ | 1700 | break; /* Bad news! */ |
1699 | skb->dev = dev; /* Mark as being used by this device. */ | 1701 | |
1700 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ | 1702 | skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */ |
1701 | vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); | 1703 | vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); |
1702 | } | 1704 | } |
1703 | if (i != RX_RING_SIZE) { | 1705 | if (i != RX_RING_SIZE) { |
@@ -2538,7 +2540,7 @@ boomerang_rx(struct net_device *dev) | |||
2538 | struct sk_buff *skb; | 2540 | struct sk_buff *skb; |
2539 | entry = vp->dirty_rx % RX_RING_SIZE; | 2541 | entry = vp->dirty_rx % RX_RING_SIZE; |
2540 | if (vp->rx_skbuff[entry] == NULL) { | 2542 | if (vp->rx_skbuff[entry] == NULL) { |
2541 | skb = dev_alloc_skb(PKT_BUF_SZ); | 2543 | skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN); |
2542 | if (skb == NULL) { | 2544 | if (skb == NULL) { |
2543 | static unsigned long last_jif; | 2545 | static unsigned long last_jif; |
2544 | if (time_after(jiffies, last_jif + 10 * HZ)) { | 2546 | if (time_after(jiffies, last_jif + 10 * HZ)) { |
@@ -2549,8 +2551,8 @@ boomerang_rx(struct net_device *dev) | |||
2549 | mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1)); | 2551 | mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1)); |
2550 | break; /* Bad news! */ | 2552 | break; /* Bad news! */ |
2551 | } | 2553 | } |
2552 | skb->dev = dev; /* Mark as being used by this device. */ | 2554 | |
2553 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ | 2555 | skb_reserve(skb, NET_IP_ALIGN); |
2554 | vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); | 2556 | vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); |
2555 | vp->rx_skbuff[entry] = skb; | 2557 | vp->rx_skbuff[entry] = skb; |
2556 | } | 2558 | } |
diff --git a/drivers/net/8390.c b/drivers/net/8390.c index dc5d2584bd0c..f72a2e87d569 100644 --- a/drivers/net/8390.c +++ b/drivers/net/8390.c | |||
@@ -9,42 +9,39 @@ int ei_open(struct net_device *dev) | |||
9 | { | 9 | { |
10 | return __ei_open(dev); | 10 | return __ei_open(dev); |
11 | } | 11 | } |
12 | EXPORT_SYMBOL(ei_open); | ||
12 | 13 | ||
13 | int ei_close(struct net_device *dev) | 14 | int ei_close(struct net_device *dev) |
14 | { | 15 | { |
15 | return __ei_close(dev); | 16 | return __ei_close(dev); |
16 | } | 17 | } |
18 | EXPORT_SYMBOL(ei_close); | ||
17 | 19 | ||
18 | irqreturn_t ei_interrupt(int irq, void *dev_id) | 20 | irqreturn_t ei_interrupt(int irq, void *dev_id) |
19 | { | 21 | { |
20 | return __ei_interrupt(irq, dev_id); | 22 | return __ei_interrupt(irq, dev_id); |
21 | } | 23 | } |
24 | EXPORT_SYMBOL(ei_interrupt); | ||
22 | 25 | ||
23 | #ifdef CONFIG_NET_POLL_CONTROLLER | 26 | #ifdef CONFIG_NET_POLL_CONTROLLER |
24 | void ei_poll(struct net_device *dev) | 27 | void ei_poll(struct net_device *dev) |
25 | { | 28 | { |
26 | __ei_poll(dev); | 29 | __ei_poll(dev); |
27 | } | 30 | } |
31 | EXPORT_SYMBOL(ei_poll); | ||
28 | #endif | 32 | #endif |
29 | 33 | ||
30 | struct net_device *__alloc_ei_netdev(int size) | 34 | struct net_device *__alloc_ei_netdev(int size) |
31 | { | 35 | { |
32 | return ____alloc_ei_netdev(size); | 36 | return ____alloc_ei_netdev(size); |
33 | } | 37 | } |
38 | EXPORT_SYMBOL(__alloc_ei_netdev); | ||
34 | 39 | ||
35 | void NS8390_init(struct net_device *dev, int startp) | 40 | void NS8390_init(struct net_device *dev, int startp) |
36 | { | 41 | { |
37 | __NS8390_init(dev, startp); | 42 | __NS8390_init(dev, startp); |
38 | } | 43 | } |
39 | |||
40 | EXPORT_SYMBOL(ei_open); | ||
41 | EXPORT_SYMBOL(ei_close); | ||
42 | EXPORT_SYMBOL(ei_interrupt); | ||
43 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
44 | EXPORT_SYMBOL(ei_poll); | ||
45 | #endif | ||
46 | EXPORT_SYMBOL(NS8390_init); | 44 | EXPORT_SYMBOL(NS8390_init); |
47 | EXPORT_SYMBOL(__alloc_ei_netdev); | ||
48 | 45 | ||
49 | #if defined(MODULE) | 46 | #if defined(MODULE) |
50 | 47 | ||
diff --git a/drivers/net/8390p.c b/drivers/net/8390p.c index 71f19884c4b1..4c6eea4611a2 100644 --- a/drivers/net/8390p.c +++ b/drivers/net/8390p.c | |||
@@ -4,9 +4,9 @@ static const char version[] = | |||
4 | "8390p.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; | 4 | "8390p.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; |
5 | 5 | ||
6 | #define ei_inb(_p) inb(_p) | 6 | #define ei_inb(_p) inb(_p) |
7 | #define ei_outb(_v,_p) outb(_v,_p) | 7 | #define ei_outb(_v, _p) outb(_v, _p) |
8 | #define ei_inb_p(_p) inb_p(_p) | 8 | #define ei_inb_p(_p) inb_p(_p) |
9 | #define ei_outb_p(_v,_p) outb_p(_v,_p) | 9 | #define ei_outb_p(_v, _p) outb_p(_v, _p) |
10 | 10 | ||
11 | #include "lib8390.c" | 11 | #include "lib8390.c" |
12 | 12 | ||
@@ -14,42 +14,39 @@ int eip_open(struct net_device *dev) | |||
14 | { | 14 | { |
15 | return __ei_open(dev); | 15 | return __ei_open(dev); |
16 | } | 16 | } |
17 | EXPORT_SYMBOL(eip_open); | ||
17 | 18 | ||
18 | int eip_close(struct net_device *dev) | 19 | int eip_close(struct net_device *dev) |
19 | { | 20 | { |
20 | return __ei_close(dev); | 21 | return __ei_close(dev); |
21 | } | 22 | } |
23 | EXPORT_SYMBOL(eip_close); | ||
22 | 24 | ||
23 | irqreturn_t eip_interrupt(int irq, void *dev_id) | 25 | irqreturn_t eip_interrupt(int irq, void *dev_id) |
24 | { | 26 | { |
25 | return __ei_interrupt(irq, dev_id); | 27 | return __ei_interrupt(irq, dev_id); |
26 | } | 28 | } |
29 | EXPORT_SYMBOL(eip_interrupt); | ||
27 | 30 | ||
28 | #ifdef CONFIG_NET_POLL_CONTROLLER | 31 | #ifdef CONFIG_NET_POLL_CONTROLLER |
29 | void eip_poll(struct net_device *dev) | 32 | void eip_poll(struct net_device *dev) |
30 | { | 33 | { |
31 | __ei_poll(dev); | 34 | __ei_poll(dev); |
32 | } | 35 | } |
36 | EXPORT_SYMBOL(eip_poll); | ||
33 | #endif | 37 | #endif |
34 | 38 | ||
35 | struct net_device *__alloc_eip_netdev(int size) | 39 | struct net_device *__alloc_eip_netdev(int size) |
36 | { | 40 | { |
37 | return ____alloc_ei_netdev(size); | 41 | return ____alloc_ei_netdev(size); |
38 | } | 42 | } |
43 | EXPORT_SYMBOL(__alloc_eip_netdev); | ||
39 | 44 | ||
40 | void NS8390p_init(struct net_device *dev, int startp) | 45 | void NS8390p_init(struct net_device *dev, int startp) |
41 | { | 46 | { |
42 | return __NS8390_init(dev, startp); | 47 | __NS8390_init(dev, startp); |
43 | } | 48 | } |
44 | |||
45 | EXPORT_SYMBOL(eip_open); | ||
46 | EXPORT_SYMBOL(eip_close); | ||
47 | EXPORT_SYMBOL(eip_interrupt); | ||
48 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
49 | EXPORT_SYMBOL(eip_poll); | ||
50 | #endif | ||
51 | EXPORT_SYMBOL(NS8390p_init); | 49 | EXPORT_SYMBOL(NS8390p_init); |
52 | EXPORT_SYMBOL(__alloc_eip_netdev); | ||
53 | 50 | ||
54 | #if defined(MODULE) | 51 | #if defined(MODULE) |
55 | 52 | ||
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 8a03875ec877..4b4cb2bf4f11 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -510,14 +510,15 @@ config STNIC | |||
510 | config SH_ETH | 510 | config SH_ETH |
511 | tristate "Renesas SuperH Ethernet support" | 511 | tristate "Renesas SuperH Ethernet support" |
512 | depends on SUPERH && \ | 512 | depends on SUPERH && \ |
513 | (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || CPU_SUBTYPE_SH7763) | 513 | (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || CPU_SUBTYPE_SH7763 || \ |
514 | CPU_SUBTYPE_SH7619) | ||
514 | select CRC32 | 515 | select CRC32 |
515 | select MII | 516 | select MII |
516 | select MDIO_BITBANG | 517 | select MDIO_BITBANG |
517 | select PHYLIB | 518 | select PHYLIB |
518 | help | 519 | help |
519 | Renesas SuperH Ethernet device driver. | 520 | Renesas SuperH Ethernet device driver. |
520 | This driver support SH7710, SH7712 and SH7763. | 521 | This driver support SH7710, SH7712, SH7763 and SH7619. |
521 | 522 | ||
522 | config SUNLANCE | 523 | config SUNLANCE |
523 | tristate "Sun LANCE support" | 524 | tristate "Sun LANCE support" |
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c index f12e3d12474b..e6a7bb79d4df 100644 --- a/drivers/net/atlx/atl1.c +++ b/drivers/net/atlx/atl1.c | |||
@@ -1790,6 +1790,17 @@ static void atl1_rx_checksum(struct atl1_adapter *adapter, | |||
1790 | { | 1790 | { |
1791 | struct pci_dev *pdev = adapter->pdev; | 1791 | struct pci_dev *pdev = adapter->pdev; |
1792 | 1792 | ||
1793 | /* | ||
1794 | * The L1 hardware contains a bug that erroneously sets the | ||
1795 | * PACKET_FLAG_ERR and ERR_FLAG_L4_CHKSUM bits whenever a | ||
1796 | * fragmented IP packet is received, even though the packet | ||
1797 | * is perfectly valid and its checksum is correct. There's | ||
1798 | * no way to distinguish between one of these good packets | ||
1799 | * and a packet that actually contains a TCP/UDP checksum | ||
1800 | * error, so all we can do is allow it to be handed up to | ||
1801 | * the higher layers and let it be sorted out there. | ||
1802 | */ | ||
1803 | |||
1793 | skb->ip_summed = CHECKSUM_NONE; | 1804 | skb->ip_summed = CHECKSUM_NONE; |
1794 | 1805 | ||
1795 | if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { | 1806 | if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { |
@@ -1816,14 +1827,6 @@ static void atl1_rx_checksum(struct atl1_adapter *adapter, | |||
1816 | return; | 1827 | return; |
1817 | } | 1828 | } |
1818 | 1829 | ||
1819 | /* IPv4, but hardware thinks its checksum is wrong */ | ||
1820 | if (netif_msg_rx_err(adapter)) | ||
1821 | dev_printk(KERN_DEBUG, &pdev->dev, | ||
1822 | "hw csum wrong, pkt_flag:%x, err_flag:%x\n", | ||
1823 | rrd->pkt_flg, rrd->err_flg); | ||
1824 | skb->ip_summed = CHECKSUM_COMPLETE; | ||
1825 | skb->csum = htons(rrd->xsz.xsum_sz.rx_chksum); | ||
1826 | adapter->hw_csum_err++; | ||
1827 | return; | 1830 | return; |
1828 | } | 1831 | } |
1829 | 1832 | ||
diff --git a/drivers/net/atp.c b/drivers/net/atp.c index 3d4433358a36..c10cd8058e23 100644 --- a/drivers/net/atp.c +++ b/drivers/net/atp.c | |||
@@ -854,14 +854,9 @@ static void set_rx_mode_8002(struct net_device *dev) | |||
854 | struct net_local *lp = netdev_priv(dev); | 854 | struct net_local *lp = netdev_priv(dev); |
855 | long ioaddr = dev->base_addr; | 855 | long ioaddr = dev->base_addr; |
856 | 856 | ||
857 | if ( dev->mc_count > 0 || (dev->flags & (IFF_ALLMULTI|IFF_PROMISC))) { | 857 | if (dev->mc_count > 0 || (dev->flags & (IFF_ALLMULTI|IFF_PROMISC))) |
858 | /* We must make the kernel realise we had to move | ||
859 | * into promisc mode or we start all out war on | ||
860 | * the cable. - AC | ||
861 | */ | ||
862 | dev->flags|=IFF_PROMISC; | ||
863 | lp->addr_mode = CMR2h_PROMISC; | 858 | lp->addr_mode = CMR2h_PROMISC; |
864 | } else | 859 | else |
865 | lp->addr_mode = CMR2h_Normal; | 860 | lp->addr_mode = CMR2h_Normal; |
866 | write_reg_high(ioaddr, CMR2, lp->addr_mode); | 861 | write_reg_high(ioaddr, CMR2, lp->addr_mode); |
867 | } | 862 | } |
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index ebb539e090c3..6106660a4a44 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c | |||
@@ -2107,6 +2107,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work) | |||
2107 | aggregator = __get_first_agg(port); | 2107 | aggregator = __get_first_agg(port); |
2108 | ad_agg_selection_logic(aggregator); | 2108 | ad_agg_selection_logic(aggregator); |
2109 | } | 2109 | } |
2110 | bond_3ad_set_carrier(bond); | ||
2110 | } | 2111 | } |
2111 | 2112 | ||
2112 | // for each port run the state machines | 2113 | // for each port run the state machines |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index a641eeaa2a2f..c792138511e6 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -2223,272 +2223,217 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in | |||
2223 | 2223 | ||
2224 | /*-------------------------------- Monitoring -------------------------------*/ | 2224 | /*-------------------------------- Monitoring -------------------------------*/ |
2225 | 2225 | ||
2226 | /* | ||
2227 | * if !have_locks, return nonzero if a failover is necessary. if | ||
2228 | * have_locks, do whatever failover activities are needed. | ||
2229 | * | ||
2230 | * This is to separate the inspection and failover steps for locking | ||
2231 | * purposes; failover requires rtnl, but acquiring it for every | ||
2232 | * inspection is undesirable, so a wrapper first does inspection, and | ||
2233 | * the acquires the necessary locks and calls again to perform | ||
2234 | * failover if needed. Since all locks are dropped, a complete | ||
2235 | * restart is needed between calls. | ||
2236 | */ | ||
2237 | static int __bond_mii_monitor(struct bonding *bond, int have_locks) | ||
2238 | { | ||
2239 | struct slave *slave, *oldcurrent; | ||
2240 | int do_failover = 0; | ||
2241 | int i; | ||
2242 | |||
2243 | if (bond->slave_cnt == 0) | ||
2244 | goto out; | ||
2245 | 2226 | ||
2246 | /* we will try to read the link status of each of our slaves, and | 2227 | static int bond_miimon_inspect(struct bonding *bond) |
2247 | * set their IFF_RUNNING flag appropriately. For each slave not | 2228 | { |
2248 | * supporting MII status, we won't do anything so that a user-space | 2229 | struct slave *slave; |
2249 | * program could monitor the link itself if needed. | 2230 | int i, link_state, commit = 0; |
2250 | */ | ||
2251 | |||
2252 | read_lock(&bond->curr_slave_lock); | ||
2253 | oldcurrent = bond->curr_active_slave; | ||
2254 | read_unlock(&bond->curr_slave_lock); | ||
2255 | 2231 | ||
2256 | bond_for_each_slave(bond, slave, i) { | 2232 | bond_for_each_slave(bond, slave, i) { |
2257 | struct net_device *slave_dev = slave->dev; | 2233 | slave->new_link = BOND_LINK_NOCHANGE; |
2258 | int link_state; | ||
2259 | u16 old_speed = slave->speed; | ||
2260 | u8 old_duplex = slave->duplex; | ||
2261 | 2234 | ||
2262 | link_state = bond_check_dev_link(bond, slave_dev, 0); | 2235 | link_state = bond_check_dev_link(bond, slave->dev, 0); |
2263 | 2236 | ||
2264 | switch (slave->link) { | 2237 | switch (slave->link) { |
2265 | case BOND_LINK_UP: /* the link was up */ | 2238 | case BOND_LINK_UP: |
2266 | if (link_state == BMSR_LSTATUS) { | 2239 | if (link_state) |
2267 | if (!oldcurrent) { | 2240 | continue; |
2268 | if (!have_locks) | ||
2269 | return 1; | ||
2270 | do_failover = 1; | ||
2271 | } | ||
2272 | break; | ||
2273 | } else { /* link going down */ | ||
2274 | slave->link = BOND_LINK_FAIL; | ||
2275 | slave->delay = bond->params.downdelay; | ||
2276 | |||
2277 | if (slave->link_failure_count < UINT_MAX) { | ||
2278 | slave->link_failure_count++; | ||
2279 | } | ||
2280 | 2241 | ||
2281 | if (bond->params.downdelay) { | 2242 | slave->link = BOND_LINK_FAIL; |
2282 | printk(KERN_INFO DRV_NAME | 2243 | slave->delay = bond->params.downdelay; |
2283 | ": %s: link status down for %s " | 2244 | if (slave->delay) { |
2284 | "interface %s, disabling it in " | 2245 | printk(KERN_INFO DRV_NAME |
2285 | "%d ms.\n", | 2246 | ": %s: link status down for %s" |
2286 | bond->dev->name, | 2247 | "interface %s, disabling it in %d ms.\n", |
2287 | IS_UP(slave_dev) | 2248 | bond->dev->name, |
2288 | ? ((bond->params.mode == BOND_MODE_ACTIVEBACKUP) | 2249 | (bond->params.mode == |
2289 | ? ((slave == oldcurrent) | 2250 | BOND_MODE_ACTIVEBACKUP) ? |
2290 | ? "active " : "backup ") | 2251 | ((slave->state == BOND_STATE_ACTIVE) ? |
2291 | : "") | 2252 | "active " : "backup ") : "", |
2292 | : "idle ", | 2253 | slave->dev->name, |
2293 | slave_dev->name, | 2254 | bond->params.downdelay * bond->params.miimon); |
2294 | bond->params.downdelay * bond->params.miimon); | ||
2295 | } | ||
2296 | } | 2255 | } |
2297 | /* no break ! fall through the BOND_LINK_FAIL test to | 2256 | /*FALLTHRU*/ |
2298 | ensure proper action to be taken | 2257 | case BOND_LINK_FAIL: |
2299 | */ | 2258 | if (link_state) { |
2300 | case BOND_LINK_FAIL: /* the link has just gone down */ | 2259 | /* |
2301 | if (link_state != BMSR_LSTATUS) { | 2260 | * recovered before downdelay expired |
2302 | /* link stays down */ | 2261 | */ |
2303 | if (slave->delay <= 0) { | 2262 | slave->link = BOND_LINK_UP; |
2304 | if (!have_locks) | ||
2305 | return 1; | ||
2306 | |||
2307 | /* link down for too long time */ | ||
2308 | slave->link = BOND_LINK_DOWN; | ||
2309 | |||
2310 | /* in active/backup mode, we must | ||
2311 | * completely disable this interface | ||
2312 | */ | ||
2313 | if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP) || | ||
2314 | (bond->params.mode == BOND_MODE_8023AD)) { | ||
2315 | bond_set_slave_inactive_flags(slave); | ||
2316 | } | ||
2317 | |||
2318 | printk(KERN_INFO DRV_NAME | ||
2319 | ": %s: link status definitely " | ||
2320 | "down for interface %s, " | ||
2321 | "disabling it\n", | ||
2322 | bond->dev->name, | ||
2323 | slave_dev->name); | ||
2324 | |||
2325 | /* notify ad that the link status has changed */ | ||
2326 | if (bond->params.mode == BOND_MODE_8023AD) { | ||
2327 | bond_3ad_handle_link_change(slave, BOND_LINK_DOWN); | ||
2328 | } | ||
2329 | |||
2330 | if ((bond->params.mode == BOND_MODE_TLB) || | ||
2331 | (bond->params.mode == BOND_MODE_ALB)) { | ||
2332 | bond_alb_handle_link_change(bond, slave, BOND_LINK_DOWN); | ||
2333 | } | ||
2334 | |||
2335 | if (slave == oldcurrent) { | ||
2336 | do_failover = 1; | ||
2337 | } | ||
2338 | } else { | ||
2339 | slave->delay--; | ||
2340 | } | ||
2341 | } else { | ||
2342 | /* link up again */ | ||
2343 | slave->link = BOND_LINK_UP; | ||
2344 | slave->jiffies = jiffies; | 2263 | slave->jiffies = jiffies; |
2345 | printk(KERN_INFO DRV_NAME | 2264 | printk(KERN_INFO DRV_NAME |
2346 | ": %s: link status up again after %d " | 2265 | ": %s: link status up again after %d " |
2347 | "ms for interface %s.\n", | 2266 | "ms for interface %s.\n", |
2348 | bond->dev->name, | 2267 | bond->dev->name, |
2349 | (bond->params.downdelay - slave->delay) * bond->params.miimon, | 2268 | (bond->params.downdelay - slave->delay) * |
2350 | slave_dev->name); | 2269 | bond->params.miimon, |
2270 | slave->dev->name); | ||
2271 | continue; | ||
2351 | } | 2272 | } |
2352 | break; | ||
2353 | case BOND_LINK_DOWN: /* the link was down */ | ||
2354 | if (link_state != BMSR_LSTATUS) { | ||
2355 | /* the link stays down, nothing more to do */ | ||
2356 | break; | ||
2357 | } else { /* link going up */ | ||
2358 | slave->link = BOND_LINK_BACK; | ||
2359 | slave->delay = bond->params.updelay; | ||
2360 | 2273 | ||
2361 | if (bond->params.updelay) { | 2274 | if (slave->delay <= 0) { |
2362 | /* if updelay == 0, no need to | 2275 | slave->new_link = BOND_LINK_DOWN; |
2363 | advertise about a 0 ms delay */ | 2276 | commit++; |
2364 | printk(KERN_INFO DRV_NAME | 2277 | continue; |
2365 | ": %s: link status up for " | ||
2366 | "interface %s, enabling it " | ||
2367 | "in %d ms.\n", | ||
2368 | bond->dev->name, | ||
2369 | slave_dev->name, | ||
2370 | bond->params.updelay * bond->params.miimon); | ||
2371 | } | ||
2372 | } | 2278 | } |
2373 | /* no break ! fall through the BOND_LINK_BACK state in | ||
2374 | case there's something to do. | ||
2375 | */ | ||
2376 | case BOND_LINK_BACK: /* the link has just come back */ | ||
2377 | if (link_state != BMSR_LSTATUS) { | ||
2378 | /* link down again */ | ||
2379 | slave->link = BOND_LINK_DOWN; | ||
2380 | 2279 | ||
2280 | slave->delay--; | ||
2281 | break; | ||
2282 | |||
2283 | case BOND_LINK_DOWN: | ||
2284 | if (!link_state) | ||
2285 | continue; | ||
2286 | |||
2287 | slave->link = BOND_LINK_BACK; | ||
2288 | slave->delay = bond->params.updelay; | ||
2289 | |||
2290 | if (slave->delay) { | ||
2291 | printk(KERN_INFO DRV_NAME | ||
2292 | ": %s: link status up for " | ||
2293 | "interface %s, enabling it in %d ms.\n", | ||
2294 | bond->dev->name, slave->dev->name, | ||
2295 | bond->params.updelay * | ||
2296 | bond->params.miimon); | ||
2297 | } | ||
2298 | /*FALLTHRU*/ | ||
2299 | case BOND_LINK_BACK: | ||
2300 | if (!link_state) { | ||
2301 | slave->link = BOND_LINK_DOWN; | ||
2381 | printk(KERN_INFO DRV_NAME | 2302 | printk(KERN_INFO DRV_NAME |
2382 | ": %s: link status down again after %d " | 2303 | ": %s: link status down again after %d " |
2383 | "ms for interface %s.\n", | 2304 | "ms for interface %s.\n", |
2384 | bond->dev->name, | 2305 | bond->dev->name, |
2385 | (bond->params.updelay - slave->delay) * bond->params.miimon, | 2306 | (bond->params.updelay - slave->delay) * |
2386 | slave_dev->name); | 2307 | bond->params.miimon, |
2387 | } else { | 2308 | slave->dev->name); |
2388 | /* link stays up */ | ||
2389 | if (slave->delay == 0) { | ||
2390 | if (!have_locks) | ||
2391 | return 1; | ||
2392 | |||
2393 | /* now the link has been up for long time enough */ | ||
2394 | slave->link = BOND_LINK_UP; | ||
2395 | slave->jiffies = jiffies; | ||
2396 | |||
2397 | if (bond->params.mode == BOND_MODE_8023AD) { | ||
2398 | /* prevent it from being the active one */ | ||
2399 | slave->state = BOND_STATE_BACKUP; | ||
2400 | } else if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) { | ||
2401 | /* make it immediately active */ | ||
2402 | slave->state = BOND_STATE_ACTIVE; | ||
2403 | } else if (slave != bond->primary_slave) { | ||
2404 | /* prevent it from being the active one */ | ||
2405 | slave->state = BOND_STATE_BACKUP; | ||
2406 | } | ||
2407 | 2309 | ||
2408 | printk(KERN_INFO DRV_NAME | 2310 | continue; |
2409 | ": %s: link status definitely " | ||
2410 | "up for interface %s.\n", | ||
2411 | bond->dev->name, | ||
2412 | slave_dev->name); | ||
2413 | |||
2414 | /* notify ad that the link status has changed */ | ||
2415 | if (bond->params.mode == BOND_MODE_8023AD) { | ||
2416 | bond_3ad_handle_link_change(slave, BOND_LINK_UP); | ||
2417 | } | ||
2418 | |||
2419 | if ((bond->params.mode == BOND_MODE_TLB) || | ||
2420 | (bond->params.mode == BOND_MODE_ALB)) { | ||
2421 | bond_alb_handle_link_change(bond, slave, BOND_LINK_UP); | ||
2422 | } | ||
2423 | |||
2424 | if ((!oldcurrent) || | ||
2425 | (slave == bond->primary_slave)) { | ||
2426 | do_failover = 1; | ||
2427 | } | ||
2428 | } else { | ||
2429 | slave->delay--; | ||
2430 | } | ||
2431 | } | 2311 | } |
2312 | |||
2313 | if (slave->delay <= 0) { | ||
2314 | slave->new_link = BOND_LINK_UP; | ||
2315 | commit++; | ||
2316 | continue; | ||
2317 | } | ||
2318 | |||
2319 | slave->delay--; | ||
2432 | break; | 2320 | break; |
2433 | default: | 2321 | } |
2434 | /* Should not happen */ | 2322 | } |
2435 | printk(KERN_ERR DRV_NAME | ||
2436 | ": %s: Error: %s Illegal value (link=%d)\n", | ||
2437 | bond->dev->name, | ||
2438 | slave->dev->name, | ||
2439 | slave->link); | ||
2440 | goto out; | ||
2441 | } /* end of switch (slave->link) */ | ||
2442 | 2323 | ||
2443 | bond_update_speed_duplex(slave); | 2324 | return commit; |
2325 | } | ||
2444 | 2326 | ||
2445 | if (bond->params.mode == BOND_MODE_8023AD) { | 2327 | static void bond_miimon_commit(struct bonding *bond) |
2446 | if (old_speed != slave->speed) { | 2328 | { |
2447 | bond_3ad_adapter_speed_changed(slave); | 2329 | struct slave *slave; |
2448 | } | 2330 | int i; |
2331 | |||
2332 | bond_for_each_slave(bond, slave, i) { | ||
2333 | switch (slave->new_link) { | ||
2334 | case BOND_LINK_NOCHANGE: | ||
2335 | continue; | ||
2336 | |||
2337 | case BOND_LINK_UP: | ||
2338 | slave->link = BOND_LINK_UP; | ||
2339 | slave->jiffies = jiffies; | ||
2449 | 2340 | ||
2450 | if (old_duplex != slave->duplex) { | 2341 | if (bond->params.mode == BOND_MODE_8023AD) { |
2451 | bond_3ad_adapter_duplex_changed(slave); | 2342 | /* prevent it from being the active one */ |
2343 | slave->state = BOND_STATE_BACKUP; | ||
2344 | } else if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) { | ||
2345 | /* make it immediately active */ | ||
2346 | slave->state = BOND_STATE_ACTIVE; | ||
2347 | } else if (slave != bond->primary_slave) { | ||
2348 | /* prevent it from being the active one */ | ||
2349 | slave->state = BOND_STATE_BACKUP; | ||
2452 | } | 2350 | } |
2453 | } | ||
2454 | 2351 | ||
2455 | } /* end of for */ | 2352 | printk(KERN_INFO DRV_NAME |
2353 | ": %s: link status definitely " | ||
2354 | "up for interface %s.\n", | ||
2355 | bond->dev->name, slave->dev->name); | ||
2456 | 2356 | ||
2457 | if (do_failover) { | 2357 | /* notify ad that the link status has changed */ |
2458 | ASSERT_RTNL(); | 2358 | if (bond->params.mode == BOND_MODE_8023AD) |
2359 | bond_3ad_handle_link_change(slave, BOND_LINK_UP); | ||
2459 | 2360 | ||
2460 | write_lock_bh(&bond->curr_slave_lock); | 2361 | if ((bond->params.mode == BOND_MODE_TLB) || |
2362 | (bond->params.mode == BOND_MODE_ALB)) | ||
2363 | bond_alb_handle_link_change(bond, slave, | ||
2364 | BOND_LINK_UP); | ||
2461 | 2365 | ||
2462 | bond_select_active_slave(bond); | 2366 | if (!bond->curr_active_slave || |
2367 | (slave == bond->primary_slave)) | ||
2368 | goto do_failover; | ||
2463 | 2369 | ||
2464 | write_unlock_bh(&bond->curr_slave_lock); | 2370 | continue; |
2465 | 2371 | ||
2466 | } else | 2372 | case BOND_LINK_DOWN: |
2467 | bond_set_carrier(bond); | 2373 | slave->link = BOND_LINK_DOWN; |
2468 | 2374 | ||
2469 | out: | 2375 | if (bond->params.mode == BOND_MODE_ACTIVEBACKUP || |
2470 | return 0; | 2376 | bond->params.mode == BOND_MODE_8023AD) |
2377 | bond_set_slave_inactive_flags(slave); | ||
2378 | |||
2379 | printk(KERN_INFO DRV_NAME | ||
2380 | ": %s: link status definitely down for " | ||
2381 | "interface %s, disabling it\n", | ||
2382 | bond->dev->name, slave->dev->name); | ||
2383 | |||
2384 | if (bond->params.mode == BOND_MODE_8023AD) | ||
2385 | bond_3ad_handle_link_change(slave, | ||
2386 | BOND_LINK_DOWN); | ||
2387 | |||
2388 | if (bond->params.mode == BOND_MODE_TLB || | ||
2389 | bond->params.mode == BOND_MODE_ALB) | ||
2390 | bond_alb_handle_link_change(bond, slave, | ||
2391 | BOND_LINK_DOWN); | ||
2392 | |||
2393 | if (slave == bond->curr_active_slave) | ||
2394 | goto do_failover; | ||
2395 | |||
2396 | continue; | ||
2397 | |||
2398 | default: | ||
2399 | printk(KERN_ERR DRV_NAME | ||
2400 | ": %s: invalid new link %d on slave %s\n", | ||
2401 | bond->dev->name, slave->new_link, | ||
2402 | slave->dev->name); | ||
2403 | slave->new_link = BOND_LINK_NOCHANGE; | ||
2404 | |||
2405 | continue; | ||
2406 | } | ||
2407 | |||
2408 | do_failover: | ||
2409 | ASSERT_RTNL(); | ||
2410 | write_lock_bh(&bond->curr_slave_lock); | ||
2411 | bond_select_active_slave(bond); | ||
2412 | write_unlock_bh(&bond->curr_slave_lock); | ||
2413 | } | ||
2414 | |||
2415 | bond_set_carrier(bond); | ||
2471 | } | 2416 | } |
2472 | 2417 | ||
2473 | /* | 2418 | /* |
2474 | * bond_mii_monitor | 2419 | * bond_mii_monitor |
2475 | * | 2420 | * |
2476 | * Really a wrapper that splits the mii monitor into two phases: an | 2421 | * Really a wrapper that splits the mii monitor into two phases: an |
2477 | * inspection, then (if inspection indicates something needs to be | 2422 | * inspection, then (if inspection indicates something needs to be done) |
2478 | * done) an acquisition of appropriate locks followed by another pass | 2423 | * an acquisition of appropriate locks followed by a commit phase to |
2479 | * to implement whatever link state changes are indicated. | 2424 | * implement whatever link state changes are indicated. |
2480 | */ | 2425 | */ |
2481 | void bond_mii_monitor(struct work_struct *work) | 2426 | void bond_mii_monitor(struct work_struct *work) |
2482 | { | 2427 | { |
2483 | struct bonding *bond = container_of(work, struct bonding, | 2428 | struct bonding *bond = container_of(work, struct bonding, |
2484 | mii_work.work); | 2429 | mii_work.work); |
2485 | unsigned long delay; | ||
2486 | 2430 | ||
2487 | read_lock(&bond->lock); | 2431 | read_lock(&bond->lock); |
2488 | if (bond->kill_timers) { | 2432 | if (bond->kill_timers) |
2489 | read_unlock(&bond->lock); | 2433 | goto out; |
2490 | return; | 2434 | |
2491 | } | 2435 | if (bond->slave_cnt == 0) |
2436 | goto re_arm; | ||
2492 | 2437 | ||
2493 | if (bond->send_grat_arp) { | 2438 | if (bond->send_grat_arp) { |
2494 | read_lock(&bond->curr_slave_lock); | 2439 | read_lock(&bond->curr_slave_lock); |
@@ -2496,19 +2441,24 @@ void bond_mii_monitor(struct work_struct *work) | |||
2496 | read_unlock(&bond->curr_slave_lock); | 2441 | read_unlock(&bond->curr_slave_lock); |
2497 | } | 2442 | } |
2498 | 2443 | ||
2499 | if (__bond_mii_monitor(bond, 0)) { | 2444 | if (bond_miimon_inspect(bond)) { |
2500 | read_unlock(&bond->lock); | 2445 | read_unlock(&bond->lock); |
2501 | rtnl_lock(); | 2446 | rtnl_lock(); |
2502 | read_lock(&bond->lock); | 2447 | read_lock(&bond->lock); |
2503 | __bond_mii_monitor(bond, 1); | 2448 | |
2449 | bond_miimon_commit(bond); | ||
2450 | |||
2504 | read_unlock(&bond->lock); | 2451 | read_unlock(&bond->lock); |
2505 | rtnl_unlock(); /* might sleep, hold no other locks */ | 2452 | rtnl_unlock(); /* might sleep, hold no other locks */ |
2506 | read_lock(&bond->lock); | 2453 | read_lock(&bond->lock); |
2507 | } | 2454 | } |
2508 | 2455 | ||
2509 | delay = msecs_to_jiffies(bond->params.miimon); | 2456 | re_arm: |
2457 | if (bond->params.miimon) | ||
2458 | queue_delayed_work(bond->wq, &bond->mii_work, | ||
2459 | msecs_to_jiffies(bond->params.miimon)); | ||
2460 | out: | ||
2510 | read_unlock(&bond->lock); | 2461 | read_unlock(&bond->lock); |
2511 | queue_delayed_work(bond->wq, &bond->mii_work, delay); | ||
2512 | } | 2462 | } |
2513 | 2463 | ||
2514 | static __be32 bond_glean_dev_ip(struct net_device *dev) | 2464 | static __be32 bond_glean_dev_ip(struct net_device *dev) |
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 6caac0ffb2f2..3bdb47382521 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c | |||
@@ -350,9 +350,6 @@ static ssize_t bonding_store_slaves(struct device *d, | |||
350 | if (dev) { | 350 | if (dev) { |
351 | printk(KERN_INFO DRV_NAME ": %s: Removing slave %s\n", | 351 | printk(KERN_INFO DRV_NAME ": %s: Removing slave %s\n", |
352 | bond->dev->name, dev->name); | 352 | bond->dev->name, dev->name); |
353 | if (bond->setup_by_slave) | ||
354 | res = bond_release_and_destroy(bond->dev, dev); | ||
355 | else | ||
356 | res = bond_release(bond->dev, dev); | 353 | res = bond_release(bond->dev, dev); |
357 | if (res) { | 354 | if (res) { |
358 | ret = res; | 355 | ret = res; |
diff --git a/drivers/net/de620.c b/drivers/net/de620.c index 3f5190c654cf..d454e143483e 100644 --- a/drivers/net/de620.c +++ b/drivers/net/de620.c | |||
@@ -488,13 +488,6 @@ static void de620_set_multicast_list(struct net_device *dev) | |||
488 | { | 488 | { |
489 | if (dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC)) | 489 | if (dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC)) |
490 | { /* Enable promiscuous mode */ | 490 | { /* Enable promiscuous mode */ |
491 | /* | ||
492 | * We must make the kernel realise we had to move | ||
493 | * into promisc mode or we start all out war on | ||
494 | * the cable. - AC | ||
495 | */ | ||
496 | dev->flags|=IFF_PROMISC; | ||
497 | |||
498 | de620_set_register(dev, W_TCR, (TCR_DEF & ~RXPBM) | RXALL); | 491 | de620_set_register(dev, W_TCR, (TCR_DEF & ~RXPBM) | RXALL); |
499 | } | 492 | } |
500 | else | 493 | else |
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c index 0b0f1c407a7e..f42c23f42652 100644 --- a/drivers/net/dm9000.c +++ b/drivers/net/dm9000.c | |||
@@ -1374,6 +1374,11 @@ dm9000_probe(struct platform_device *pdev) | |||
1374 | for (i = 0; i < 6; i += 2) | 1374 | for (i = 0; i < 6; i += 2) |
1375 | dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i); | 1375 | dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i); |
1376 | 1376 | ||
1377 | if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) { | ||
1378 | mac_src = "platform data"; | ||
1379 | memcpy(ndev->dev_addr, pdata->dev_addr, 6); | ||
1380 | } | ||
1381 | |||
1377 | if (!is_valid_ether_addr(ndev->dev_addr)) { | 1382 | if (!is_valid_ether_addr(ndev->dev_addr)) { |
1378 | /* try reading from mac */ | 1383 | /* try reading from mac */ |
1379 | 1384 | ||
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index 4a4f62e002b2..cf57050d99d8 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h | |||
@@ -41,24 +41,25 @@ | |||
41 | 41 | ||
42 | struct e1000_info; | 42 | struct e1000_info; |
43 | 43 | ||
44 | #define ndev_printk(level, netdev, format, arg...) \ | 44 | #define e_printk(level, adapter, format, arg...) \ |
45 | printk(level "%s: " format, (netdev)->name, ## arg) | 45 | printk(level "%s: %s: " format, pci_name(adapter->pdev), \ |
46 | adapter->netdev->name, ## arg) | ||
46 | 47 | ||
47 | #ifdef DEBUG | 48 | #ifdef DEBUG |
48 | #define ndev_dbg(netdev, format, arg...) \ | 49 | #define e_dbg(format, arg...) \ |
49 | ndev_printk(KERN_DEBUG , netdev, format, ## arg) | 50 | e_printk(KERN_DEBUG , adapter, format, ## arg) |
50 | #else | 51 | #else |
51 | #define ndev_dbg(netdev, format, arg...) do { (void)(netdev); } while (0) | 52 | #define e_dbg(format, arg...) do { (void)(adapter); } while (0) |
52 | #endif | 53 | #endif |
53 | 54 | ||
54 | #define ndev_err(netdev, format, arg...) \ | 55 | #define e_err(format, arg...) \ |
55 | ndev_printk(KERN_ERR , netdev, format, ## arg) | 56 | e_printk(KERN_ERR, adapter, format, ## arg) |
56 | #define ndev_info(netdev, format, arg...) \ | 57 | #define e_info(format, arg...) \ |
57 | ndev_printk(KERN_INFO , netdev, format, ## arg) | 58 | e_printk(KERN_INFO, adapter, format, ## arg) |
58 | #define ndev_warn(netdev, format, arg...) \ | 59 | #define e_warn(format, arg...) \ |
59 | ndev_printk(KERN_WARNING , netdev, format, ## arg) | 60 | e_printk(KERN_WARNING, adapter, format, ## arg) |
60 | #define ndev_notice(netdev, format, arg...) \ | 61 | #define e_notice(format, arg...) \ |
61 | ndev_printk(KERN_NOTICE , netdev, format, ## arg) | 62 | e_printk(KERN_NOTICE, adapter, format, ## arg) |
62 | 63 | ||
63 | 64 | ||
64 | /* Tx/Rx descriptor defines */ | 65 | /* Tx/Rx descriptor defines */ |
@@ -283,10 +284,6 @@ struct e1000_adapter { | |||
283 | unsigned long led_status; | 284 | unsigned long led_status; |
284 | 285 | ||
285 | unsigned int flags; | 286 | unsigned int flags; |
286 | |||
287 | /* for ioport free */ | ||
288 | int bars; | ||
289 | int need_ioport; | ||
290 | }; | 287 | }; |
291 | 288 | ||
292 | struct e1000_info { | 289 | struct e1000_info { |
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c index 9350564065e7..cf9679f2b7c4 100644 --- a/drivers/net/e1000e/ethtool.c +++ b/drivers/net/e1000e/ethtool.c | |||
@@ -189,8 +189,7 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) | |||
189 | /* Fiber NICs only allow 1000 gbps Full duplex */ | 189 | /* Fiber NICs only allow 1000 gbps Full duplex */ |
190 | if ((adapter->hw.phy.media_type == e1000_media_type_fiber) && | 190 | if ((adapter->hw.phy.media_type == e1000_media_type_fiber) && |
191 | spddplx != (SPEED_1000 + DUPLEX_FULL)) { | 191 | spddplx != (SPEED_1000 + DUPLEX_FULL)) { |
192 | ndev_err(adapter->netdev, "Unsupported Speed/Duplex " | 192 | e_err("Unsupported Speed/Duplex configuration\n"); |
193 | "configuration\n"); | ||
194 | return -EINVAL; | 193 | return -EINVAL; |
195 | } | 194 | } |
196 | 195 | ||
@@ -213,8 +212,7 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) | |||
213 | break; | 212 | break; |
214 | case SPEED_1000 + DUPLEX_HALF: /* not supported */ | 213 | case SPEED_1000 + DUPLEX_HALF: /* not supported */ |
215 | default: | 214 | default: |
216 | ndev_err(adapter->netdev, "Unsupported Speed/Duplex " | 215 | e_err("Unsupported Speed/Duplex configuration\n"); |
217 | "configuration\n"); | ||
218 | return -EINVAL; | 216 | return -EINVAL; |
219 | } | 217 | } |
220 | return 0; | 218 | return 0; |
@@ -231,8 +229,8 @@ static int e1000_set_settings(struct net_device *netdev, | |||
231 | * cannot be changed | 229 | * cannot be changed |
232 | */ | 230 | */ |
233 | if (e1000_check_reset_block(hw)) { | 231 | if (e1000_check_reset_block(hw)) { |
234 | ndev_err(netdev, "Cannot change link " | 232 | e_err("Cannot change link characteristics when SoL/IDER is " |
235 | "characteristics when SoL/IDER is active.\n"); | 233 | "active.\n"); |
236 | return -EINVAL; | 234 | return -EINVAL; |
237 | } | 235 | } |
238 | 236 | ||
@@ -380,8 +378,7 @@ static int e1000_set_tso(struct net_device *netdev, u32 data) | |||
380 | netdev->features &= ~NETIF_F_TSO6; | 378 | netdev->features &= ~NETIF_F_TSO6; |
381 | } | 379 | } |
382 | 380 | ||
383 | ndev_info(netdev, "TSO is %s\n", | 381 | e_info("TSO is %s\n", data ? "Enabled" : "Disabled"); |
384 | data ? "Enabled" : "Disabled"); | ||
385 | adapter->flags |= FLAG_TSO_FORCE; | 382 | adapter->flags |= FLAG_TSO_FORCE; |
386 | return 0; | 383 | return 0; |
387 | } | 384 | } |
@@ -722,10 +719,9 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, | |||
722 | (test[pat] & write)); | 719 | (test[pat] & write)); |
723 | val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset); | 720 | val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset); |
724 | if (val != (test[pat] & write & mask)) { | 721 | if (val != (test[pat] & write & mask)) { |
725 | ndev_err(adapter->netdev, "pattern test reg %04X " | 722 | e_err("pattern test reg %04X failed: got 0x%08X " |
726 | "failed: got 0x%08X expected 0x%08X\n", | 723 | "expected 0x%08X\n", reg + offset, val, |
727 | reg + offset, | 724 | (test[pat] & write & mask)); |
728 | val, (test[pat] & write & mask)); | ||
729 | *data = reg; | 725 | *data = reg; |
730 | return 1; | 726 | return 1; |
731 | } | 727 | } |
@@ -740,9 +736,8 @@ static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, | |||
740 | __ew32(&adapter->hw, reg, write & mask); | 736 | __ew32(&adapter->hw, reg, write & mask); |
741 | val = __er32(&adapter->hw, reg); | 737 | val = __er32(&adapter->hw, reg); |
742 | if ((write & mask) != (val & mask)) { | 738 | if ((write & mask) != (val & mask)) { |
743 | ndev_err(adapter->netdev, "set/check reg %04X test failed: " | 739 | e_err("set/check reg %04X test failed: got 0x%08X " |
744 | "got 0x%08X expected 0x%08X\n", reg, (val & mask), | 740 | "expected 0x%08X\n", reg, (val & mask), (write & mask)); |
745 | (write & mask)); | ||
746 | *data = reg; | 741 | *data = reg; |
747 | return 1; | 742 | return 1; |
748 | } | 743 | } |
@@ -766,7 +761,6 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) | |||
766 | { | 761 | { |
767 | struct e1000_hw *hw = &adapter->hw; | 762 | struct e1000_hw *hw = &adapter->hw; |
768 | struct e1000_mac_info *mac = &adapter->hw.mac; | 763 | struct e1000_mac_info *mac = &adapter->hw.mac; |
769 | struct net_device *netdev = adapter->netdev; | ||
770 | u32 value; | 764 | u32 value; |
771 | u32 before; | 765 | u32 before; |
772 | u32 after; | 766 | u32 after; |
@@ -799,8 +793,8 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) | |||
799 | ew32(STATUS, toggle); | 793 | ew32(STATUS, toggle); |
800 | after = er32(STATUS) & toggle; | 794 | after = er32(STATUS) & toggle; |
801 | if (value != after) { | 795 | if (value != after) { |
802 | ndev_err(netdev, "failed STATUS register test got: " | 796 | e_err("failed STATUS register test got: 0x%08X expected: " |
803 | "0x%08X expected: 0x%08X\n", after, value); | 797 | "0x%08X\n", after, value); |
804 | *data = 1; | 798 | *data = 1; |
805 | return 1; | 799 | return 1; |
806 | } | 800 | } |
@@ -903,8 +897,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) | |||
903 | *data = 1; | 897 | *data = 1; |
904 | return -1; | 898 | return -1; |
905 | } | 899 | } |
906 | ndev_info(netdev, "testing %s interrupt\n", | 900 | e_info("testing %s interrupt\n", (shared_int ? "shared" : "unshared")); |
907 | (shared_int ? "shared" : "unshared")); | ||
908 | 901 | ||
909 | /* Disable all the interrupts */ | 902 | /* Disable all the interrupts */ |
910 | ew32(IMC, 0xFFFFFFFF); | 903 | ew32(IMC, 0xFFFFFFFF); |
@@ -1526,8 +1519,7 @@ static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) | |||
1526 | * sessions are active | 1519 | * sessions are active |
1527 | */ | 1520 | */ |
1528 | if (e1000_check_reset_block(&adapter->hw)) { | 1521 | if (e1000_check_reset_block(&adapter->hw)) { |
1529 | ndev_err(adapter->netdev, "Cannot do PHY loopback test " | 1522 | e_err("Cannot do PHY loopback test when SoL/IDER is active.\n"); |
1530 | "when SoL/IDER is active.\n"); | ||
1531 | *data = 0; | 1523 | *data = 0; |
1532 | goto out; | 1524 | goto out; |
1533 | } | 1525 | } |
@@ -1612,7 +1604,7 @@ static void e1000_diag_test(struct net_device *netdev, | |||
1612 | forced_speed_duplex = adapter->hw.mac.forced_speed_duplex; | 1604 | forced_speed_duplex = adapter->hw.mac.forced_speed_duplex; |
1613 | autoneg = adapter->hw.mac.autoneg; | 1605 | autoneg = adapter->hw.mac.autoneg; |
1614 | 1606 | ||
1615 | ndev_info(netdev, "offline testing starting\n"); | 1607 | e_info("offline testing starting\n"); |
1616 | 1608 | ||
1617 | /* | 1609 | /* |
1618 | * Link test performed before hardware reset so autoneg doesn't | 1610 | * Link test performed before hardware reset so autoneg doesn't |
@@ -1658,7 +1650,7 @@ static void e1000_diag_test(struct net_device *netdev, | |||
1658 | if (if_running) | 1650 | if (if_running) |
1659 | dev_open(netdev); | 1651 | dev_open(netdev); |
1660 | } else { | 1652 | } else { |
1661 | ndev_info(netdev, "online testing starting\n"); | 1653 | e_info("online testing starting\n"); |
1662 | /* Online tests */ | 1654 | /* Online tests */ |
1663 | if (e1000_link_test(adapter, &data[4])) | 1655 | if (e1000_link_test(adapter, &data[4])) |
1664 | eth_test->flags |= ETH_TEST_FL_FAILED; | 1656 | eth_test->flags |= ETH_TEST_FL_FAILED; |
@@ -1694,8 +1686,8 @@ static void e1000_get_wol(struct net_device *netdev, | |||
1694 | wol->supported &= ~WAKE_UCAST; | 1686 | wol->supported &= ~WAKE_UCAST; |
1695 | 1687 | ||
1696 | if (adapter->wol & E1000_WUFC_EX) | 1688 | if (adapter->wol & E1000_WUFC_EX) |
1697 | ndev_err(netdev, "Interface does not support " | 1689 | e_err("Interface does not support directed (unicast) " |
1698 | "directed (unicast) frame wake-up packets\n"); | 1690 | "frame wake-up packets\n"); |
1699 | } | 1691 | } |
1700 | 1692 | ||
1701 | if (adapter->wol & E1000_WUFC_EX) | 1693 | if (adapter->wol & E1000_WUFC_EX) |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index d13677899767..05b0b2f9c54b 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -484,8 +484,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
484 | * packet, also make sure the frame isn't just CRC only */ | 484 | * packet, also make sure the frame isn't just CRC only */ |
485 | if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) { | 485 | if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) { |
486 | /* All receives must fit into a single buffer */ | 486 | /* All receives must fit into a single buffer */ |
487 | ndev_dbg(netdev, "%s: Receive packet consumed " | 487 | e_dbg("%s: Receive packet consumed multiple buffers\n", |
488 | "multiple buffers\n", netdev->name); | 488 | netdev->name); |
489 | /* recycle */ | 489 | /* recycle */ |
490 | buffer_info->skb = skb; | 490 | buffer_info->skb = skb; |
491 | goto next_desc; | 491 | goto next_desc; |
@@ -576,28 +576,26 @@ static void e1000_print_tx_hang(struct e1000_adapter *adapter) | |||
576 | unsigned int i = tx_ring->next_to_clean; | 576 | unsigned int i = tx_ring->next_to_clean; |
577 | unsigned int eop = tx_ring->buffer_info[i].next_to_watch; | 577 | unsigned int eop = tx_ring->buffer_info[i].next_to_watch; |
578 | struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop); | 578 | struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop); |
579 | struct net_device *netdev = adapter->netdev; | ||
580 | 579 | ||
581 | /* detected Tx unit hang */ | 580 | /* detected Tx unit hang */ |
582 | ndev_err(netdev, | 581 | e_err("Detected Tx Unit Hang:\n" |
583 | "Detected Tx Unit Hang:\n" | 582 | " TDH <%x>\n" |
584 | " TDH <%x>\n" | 583 | " TDT <%x>\n" |
585 | " TDT <%x>\n" | 584 | " next_to_use <%x>\n" |
586 | " next_to_use <%x>\n" | 585 | " next_to_clean <%x>\n" |
587 | " next_to_clean <%x>\n" | 586 | "buffer_info[next_to_clean]:\n" |
588 | "buffer_info[next_to_clean]:\n" | 587 | " time_stamp <%lx>\n" |
589 | " time_stamp <%lx>\n" | 588 | " next_to_watch <%x>\n" |
590 | " next_to_watch <%x>\n" | 589 | " jiffies <%lx>\n" |
591 | " jiffies <%lx>\n" | 590 | " next_to_watch.status <%x>\n", |
592 | " next_to_watch.status <%x>\n", | 591 | readl(adapter->hw.hw_addr + tx_ring->head), |
593 | readl(adapter->hw.hw_addr + tx_ring->head), | 592 | readl(adapter->hw.hw_addr + tx_ring->tail), |
594 | readl(adapter->hw.hw_addr + tx_ring->tail), | 593 | tx_ring->next_to_use, |
595 | tx_ring->next_to_use, | 594 | tx_ring->next_to_clean, |
596 | tx_ring->next_to_clean, | 595 | tx_ring->buffer_info[eop].time_stamp, |
597 | tx_ring->buffer_info[eop].time_stamp, | 596 | eop, |
598 | eop, | 597 | jiffies, |
599 | jiffies, | 598 | eop_desc->upper.fields.status); |
600 | eop_desc->upper.fields.status); | ||
601 | } | 599 | } |
602 | 600 | ||
603 | /** | 601 | /** |
@@ -747,8 +745,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
747 | buffer_info->dma = 0; | 745 | buffer_info->dma = 0; |
748 | 746 | ||
749 | if (!(staterr & E1000_RXD_STAT_EOP)) { | 747 | if (!(staterr & E1000_RXD_STAT_EOP)) { |
750 | ndev_dbg(netdev, "%s: Packet Split buffers didn't pick " | 748 | e_dbg("%s: Packet Split buffers didn't pick up the " |
751 | "up the full packet\n", netdev->name); | 749 | "full packet\n", netdev->name); |
752 | dev_kfree_skb_irq(skb); | 750 | dev_kfree_skb_irq(skb); |
753 | goto next_desc; | 751 | goto next_desc; |
754 | } | 752 | } |
@@ -761,8 +759,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
761 | length = le16_to_cpu(rx_desc->wb.middle.length0); | 759 | length = le16_to_cpu(rx_desc->wb.middle.length0); |
762 | 760 | ||
763 | if (!length) { | 761 | if (!length) { |
764 | ndev_dbg(netdev, "%s: Last part of the packet spanning" | 762 | e_dbg("%s: Last part of the packet spanning multiple " |
765 | " multiple descriptors\n", netdev->name); | 763 | "descriptors\n", netdev->name); |
766 | dev_kfree_skb_irq(skb); | 764 | dev_kfree_skb_irq(skb); |
767 | goto next_desc; | 765 | goto next_desc; |
768 | } | 766 | } |
@@ -1011,7 +1009,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, | |||
1011 | 1009 | ||
1012 | /* eth type trans needs skb->data to point to something */ | 1010 | /* eth type trans needs skb->data to point to something */ |
1013 | if (!pskb_may_pull(skb, ETH_HLEN)) { | 1011 | if (!pskb_may_pull(skb, ETH_HLEN)) { |
1014 | ndev_err(netdev, "pskb_may_pull failed.\n"); | 1012 | e_err("pskb_may_pull failed.\n"); |
1015 | dev_kfree_skb(skb); | 1013 | dev_kfree_skb(skb); |
1016 | goto next_desc; | 1014 | goto next_desc; |
1017 | } | 1015 | } |
@@ -1251,10 +1249,8 @@ static int e1000_request_irq(struct e1000_adapter *adapter) | |||
1251 | err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, | 1249 | err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, |
1252 | netdev); | 1250 | netdev); |
1253 | if (err) { | 1251 | if (err) { |
1254 | ndev_err(netdev, | 1252 | e_err("Unable to allocate %s interrupt (return: %d)\n", |
1255 | "Unable to allocate %s interrupt (return: %d)\n", | 1253 | adapter->flags & FLAG_MSI_ENABLED ? "MSI":"INTx", err); |
1256 | adapter->flags & FLAG_MSI_ENABLED ? "MSI":"INTx", | ||
1257 | err); | ||
1258 | if (adapter->flags & FLAG_MSI_ENABLED) | 1254 | if (adapter->flags & FLAG_MSI_ENABLED) |
1259 | pci_disable_msi(adapter->pdev); | 1255 | pci_disable_msi(adapter->pdev); |
1260 | } | 1256 | } |
@@ -1395,8 +1391,7 @@ int e1000e_setup_tx_resources(struct e1000_adapter *adapter) | |||
1395 | return 0; | 1391 | return 0; |
1396 | err: | 1392 | err: |
1397 | vfree(tx_ring->buffer_info); | 1393 | vfree(tx_ring->buffer_info); |
1398 | ndev_err(adapter->netdev, | 1394 | e_err("Unable to allocate memory for the transmit descriptor ring\n"); |
1399 | "Unable to allocate memory for the transmit descriptor ring\n"); | ||
1400 | return err; | 1395 | return err; |
1401 | } | 1396 | } |
1402 | 1397 | ||
@@ -1450,8 +1445,7 @@ err_pages: | |||
1450 | } | 1445 | } |
1451 | err: | 1446 | err: |
1452 | vfree(rx_ring->buffer_info); | 1447 | vfree(rx_ring->buffer_info); |
1453 | ndev_err(adapter->netdev, | 1448 | e_err("Unable to allocate memory for the transmit descriptor ring\n"); |
1454 | "Unable to allocate memory for the transmit descriptor ring\n"); | ||
1455 | return err; | 1449 | return err; |
1456 | } | 1450 | } |
1457 | 1451 | ||
@@ -2450,13 +2444,13 @@ void e1000e_reset(struct e1000_adapter *adapter) | |||
2450 | * For parts with AMT enabled, let the firmware know | 2444 | * For parts with AMT enabled, let the firmware know |
2451 | * that the network interface is in control | 2445 | * that the network interface is in control |
2452 | */ | 2446 | */ |
2453 | if ((adapter->flags & FLAG_HAS_AMT) && e1000e_check_mng_mode(hw)) | 2447 | if (adapter->flags & FLAG_HAS_AMT) |
2454 | e1000_get_hw_control(adapter); | 2448 | e1000_get_hw_control(adapter); |
2455 | 2449 | ||
2456 | ew32(WUC, 0); | 2450 | ew32(WUC, 0); |
2457 | 2451 | ||
2458 | if (mac->ops.init_hw(hw)) | 2452 | if (mac->ops.init_hw(hw)) |
2459 | ndev_err(adapter->netdev, "Hardware Error\n"); | 2453 | e_err("Hardware Error\n"); |
2460 | 2454 | ||
2461 | e1000_update_mng_vlan(adapter); | 2455 | e1000_update_mng_vlan(adapter); |
2462 | 2456 | ||
@@ -2591,7 +2585,7 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter) | |||
2591 | return 0; | 2585 | return 0; |
2592 | 2586 | ||
2593 | err: | 2587 | err: |
2594 | ndev_err(netdev, "Unable to allocate memory for queues\n"); | 2588 | e_err("Unable to allocate memory for queues\n"); |
2595 | kfree(adapter->rx_ring); | 2589 | kfree(adapter->rx_ring); |
2596 | kfree(adapter->tx_ring); | 2590 | kfree(adapter->tx_ring); |
2597 | return -ENOMEM; | 2591 | return -ENOMEM; |
@@ -2640,8 +2634,7 @@ static int e1000_open(struct net_device *netdev) | |||
2640 | * If AMT is enabled, let the firmware know that the network | 2634 | * If AMT is enabled, let the firmware know that the network |
2641 | * interface is now open | 2635 | * interface is now open |
2642 | */ | 2636 | */ |
2643 | if ((adapter->flags & FLAG_HAS_AMT) && | 2637 | if (adapter->flags & FLAG_HAS_AMT) |
2644 | e1000e_check_mng_mode(&adapter->hw)) | ||
2645 | e1000_get_hw_control(adapter); | 2638 | e1000_get_hw_control(adapter); |
2646 | 2639 | ||
2647 | /* | 2640 | /* |
@@ -2719,8 +2712,7 @@ static int e1000_close(struct net_device *netdev) | |||
2719 | * If AMT is enabled, let the firmware know that the network | 2712 | * If AMT is enabled, let the firmware know that the network |
2720 | * interface is now closed | 2713 | * interface is now closed |
2721 | */ | 2714 | */ |
2722 | if ((adapter->flags & FLAG_HAS_AMT) && | 2715 | if (adapter->flags & FLAG_HAS_AMT) |
2723 | e1000e_check_mng_mode(&adapter->hw)) | ||
2724 | e1000_release_hw_control(adapter); | 2716 | e1000_release_hw_control(adapter); |
2725 | 2717 | ||
2726 | return 0; | 2718 | return 0; |
@@ -2917,8 +2909,7 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter) | |||
2917 | ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000); | 2909 | ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000); |
2918 | ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus); | 2910 | ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus); |
2919 | if (ret_val) | 2911 | if (ret_val) |
2920 | ndev_warn(adapter->netdev, | 2912 | e_warn("Error reading PHY register\n"); |
2921 | "Error reading PHY register\n"); | ||
2922 | } else { | 2913 | } else { |
2923 | /* | 2914 | /* |
2924 | * Do not read PHY registers if link is not up | 2915 | * Do not read PHY registers if link is not up |
@@ -2943,18 +2934,16 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter) | |||
2943 | static void e1000_print_link_info(struct e1000_adapter *adapter) | 2934 | static void e1000_print_link_info(struct e1000_adapter *adapter) |
2944 | { | 2935 | { |
2945 | struct e1000_hw *hw = &adapter->hw; | 2936 | struct e1000_hw *hw = &adapter->hw; |
2946 | struct net_device *netdev = adapter->netdev; | ||
2947 | u32 ctrl = er32(CTRL); | 2937 | u32 ctrl = er32(CTRL); |
2948 | 2938 | ||
2949 | ndev_info(netdev, | 2939 | e_info("Link is Up %d Mbps %s, Flow Control: %s\n", |
2950 | "Link is Up %d Mbps %s, Flow Control: %s\n", | 2940 | adapter->link_speed, |
2951 | adapter->link_speed, | 2941 | (adapter->link_duplex == FULL_DUPLEX) ? |
2952 | (adapter->link_duplex == FULL_DUPLEX) ? | 2942 | "Full Duplex" : "Half Duplex", |
2953 | "Full Duplex" : "Half Duplex", | 2943 | ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ? |
2954 | ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ? | 2944 | "RX/TX" : |
2955 | "RX/TX" : | 2945 | ((ctrl & E1000_CTRL_RFCE) ? "RX" : |
2956 | ((ctrl & E1000_CTRL_RFCE) ? "RX" : | 2946 | ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" ))); |
2957 | ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" ))); | ||
2958 | } | 2947 | } |
2959 | 2948 | ||
2960 | static bool e1000_has_link(struct e1000_adapter *adapter) | 2949 | static bool e1000_has_link(struct e1000_adapter *adapter) |
@@ -2994,8 +2983,7 @@ static bool e1000_has_link(struct e1000_adapter *adapter) | |||
2994 | if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) && | 2983 | if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) && |
2995 | (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) { | 2984 | (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) { |
2996 | /* See e1000_kmrn_lock_loss_workaround_ich8lan() */ | 2985 | /* See e1000_kmrn_lock_loss_workaround_ich8lan() */ |
2997 | ndev_info(adapter->netdev, | 2986 | e_info("Gigabit has been disabled, downgrading speed\n"); |
2998 | "Gigabit has been disabled, downgrading speed\n"); | ||
2999 | } | 2987 | } |
3000 | 2988 | ||
3001 | return link_active; | 2989 | return link_active; |
@@ -3096,8 +3084,7 @@ static void e1000_watchdog_task(struct work_struct *work) | |||
3096 | switch (adapter->link_speed) { | 3084 | switch (adapter->link_speed) { |
3097 | case SPEED_10: | 3085 | case SPEED_10: |
3098 | case SPEED_100: | 3086 | case SPEED_100: |
3099 | ndev_info(netdev, | 3087 | e_info("10/100 speed: disabling TSO\n"); |
3100 | "10/100 speed: disabling TSO\n"); | ||
3101 | netdev->features &= ~NETIF_F_TSO; | 3088 | netdev->features &= ~NETIF_F_TSO; |
3102 | netdev->features &= ~NETIF_F_TSO6; | 3089 | netdev->features &= ~NETIF_F_TSO6; |
3103 | break; | 3090 | break; |
@@ -3130,7 +3117,7 @@ static void e1000_watchdog_task(struct work_struct *work) | |||
3130 | if (netif_carrier_ok(netdev)) { | 3117 | if (netif_carrier_ok(netdev)) { |
3131 | adapter->link_speed = 0; | 3118 | adapter->link_speed = 0; |
3132 | adapter->link_duplex = 0; | 3119 | adapter->link_duplex = 0; |
3133 | ndev_info(netdev, "Link is Down\n"); | 3120 | e_info("Link is Down\n"); |
3134 | netif_carrier_off(netdev); | 3121 | netif_carrier_off(netdev); |
3135 | netif_tx_stop_all_queues(netdev); | 3122 | netif_tx_stop_all_queues(netdev); |
3136 | if (!test_bit(__E1000_DOWN, &adapter->state)) | 3123 | if (!test_bit(__E1000_DOWN, &adapter->state)) |
@@ -3604,8 +3591,7 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
3604 | 3591 | ||
3605 | pull_size = min((unsigned int)4, skb->data_len); | 3592 | pull_size = min((unsigned int)4, skb->data_len); |
3606 | if (!__pskb_pull_tail(skb, pull_size)) { | 3593 | if (!__pskb_pull_tail(skb, pull_size)) { |
3607 | ndev_err(netdev, | 3594 | e_err("__pskb_pull_tail failed.\n"); |
3608 | "__pskb_pull_tail failed.\n"); | ||
3609 | dev_kfree_skb_any(skb); | 3595 | dev_kfree_skb_any(skb); |
3610 | return NETDEV_TX_OK; | 3596 | return NETDEV_TX_OK; |
3611 | } | 3597 | } |
@@ -3737,25 +3723,25 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
3737 | 3723 | ||
3738 | if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || | 3724 | if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || |
3739 | (max_frame > MAX_JUMBO_FRAME_SIZE)) { | 3725 | (max_frame > MAX_JUMBO_FRAME_SIZE)) { |
3740 | ndev_err(netdev, "Invalid MTU setting\n"); | 3726 | e_err("Invalid MTU setting\n"); |
3741 | return -EINVAL; | 3727 | return -EINVAL; |
3742 | } | 3728 | } |
3743 | 3729 | ||
3744 | /* Jumbo frame size limits */ | 3730 | /* Jumbo frame size limits */ |
3745 | if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) { | 3731 | if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) { |
3746 | if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) { | 3732 | if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) { |
3747 | ndev_err(netdev, "Jumbo Frames not supported.\n"); | 3733 | e_err("Jumbo Frames not supported.\n"); |
3748 | return -EINVAL; | 3734 | return -EINVAL; |
3749 | } | 3735 | } |
3750 | if (adapter->hw.phy.type == e1000_phy_ife) { | 3736 | if (adapter->hw.phy.type == e1000_phy_ife) { |
3751 | ndev_err(netdev, "Jumbo Frames not supported.\n"); | 3737 | e_err("Jumbo Frames not supported.\n"); |
3752 | return -EINVAL; | 3738 | return -EINVAL; |
3753 | } | 3739 | } |
3754 | } | 3740 | } |
3755 | 3741 | ||
3756 | #define MAX_STD_JUMBO_FRAME_SIZE 9234 | 3742 | #define MAX_STD_JUMBO_FRAME_SIZE 9234 |
3757 | if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { | 3743 | if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { |
3758 | ndev_err(netdev, "MTU > 9216 not supported.\n"); | 3744 | e_err("MTU > 9216 not supported.\n"); |
3759 | return -EINVAL; | 3745 | return -EINVAL; |
3760 | } | 3746 | } |
3761 | 3747 | ||
@@ -3792,8 +3778,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
3792 | adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN | 3778 | adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN |
3793 | + ETH_FCS_LEN; | 3779 | + ETH_FCS_LEN; |
3794 | 3780 | ||
3795 | ndev_info(netdev, "changing MTU from %d to %d\n", | 3781 | e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu); |
3796 | netdev->mtu, new_mtu); | ||
3797 | netdev->mtu = new_mtu; | 3782 | netdev->mtu = new_mtu; |
3798 | 3783 | ||
3799 | if (netif_running(netdev)) | 3784 | if (netif_running(netdev)) |
@@ -4006,10 +3991,7 @@ static int e1000_resume(struct pci_dev *pdev) | |||
4006 | pci_restore_state(pdev); | 3991 | pci_restore_state(pdev); |
4007 | e1000e_disable_l1aspm(pdev); | 3992 | e1000e_disable_l1aspm(pdev); |
4008 | 3993 | ||
4009 | if (adapter->need_ioport) | 3994 | err = pci_enable_device_mem(pdev); |
4010 | err = pci_enable_device(pdev); | ||
4011 | else | ||
4012 | err = pci_enable_device_mem(pdev); | ||
4013 | if (err) { | 3995 | if (err) { |
4014 | dev_err(&pdev->dev, | 3996 | dev_err(&pdev->dev, |
4015 | "Cannot enable PCI device from suspend\n"); | 3997 | "Cannot enable PCI device from suspend\n"); |
@@ -4043,7 +4025,7 @@ static int e1000_resume(struct pci_dev *pdev) | |||
4043 | * is up. For all other cases, let the f/w know that the h/w is now | 4025 | * is up. For all other cases, let the f/w know that the h/w is now |
4044 | * under the control of the driver. | 4026 | * under the control of the driver. |
4045 | */ | 4027 | */ |
4046 | if (!(adapter->flags & FLAG_HAS_AMT) || !e1000e_check_mng_mode(&adapter->hw)) | 4028 | if (!(adapter->flags & FLAG_HAS_AMT)) |
4047 | e1000_get_hw_control(adapter); | 4029 | e1000_get_hw_control(adapter); |
4048 | 4030 | ||
4049 | return 0; | 4031 | return 0; |
@@ -4111,10 +4093,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) | |||
4111 | int err; | 4093 | int err; |
4112 | 4094 | ||
4113 | e1000e_disable_l1aspm(pdev); | 4095 | e1000e_disable_l1aspm(pdev); |
4114 | if (adapter->need_ioport) | 4096 | err = pci_enable_device_mem(pdev); |
4115 | err = pci_enable_device(pdev); | ||
4116 | else | ||
4117 | err = pci_enable_device_mem(pdev); | ||
4118 | if (err) { | 4097 | if (err) { |
4119 | dev_err(&pdev->dev, | 4098 | dev_err(&pdev->dev, |
4120 | "Cannot re-enable PCI device after reset.\n"); | 4099 | "Cannot re-enable PCI device after reset.\n"); |
@@ -4162,8 +4141,7 @@ static void e1000_io_resume(struct pci_dev *pdev) | |||
4162 | * is up. For all other cases, let the f/w know that the h/w is now | 4141 | * is up. For all other cases, let the f/w know that the h/w is now |
4163 | * under the control of the driver. | 4142 | * under the control of the driver. |
4164 | */ | 4143 | */ |
4165 | if (!(adapter->flags & FLAG_HAS_AMT) || | 4144 | if (!(adapter->flags & FLAG_HAS_AMT)) |
4166 | !e1000e_check_mng_mode(&adapter->hw)) | ||
4167 | e1000_get_hw_control(adapter); | 4145 | e1000_get_hw_control(adapter); |
4168 | 4146 | ||
4169 | } | 4147 | } |
@@ -4175,36 +4153,40 @@ static void e1000_print_device_info(struct e1000_adapter *adapter) | |||
4175 | u32 pba_num; | 4153 | u32 pba_num; |
4176 | 4154 | ||
4177 | /* print bus type/speed/width info */ | 4155 | /* print bus type/speed/width info */ |
4178 | ndev_info(netdev, "(PCI Express:2.5GB/s:%s) " | 4156 | e_info("(PCI Express:2.5GB/s:%s) %02x:%02x:%02x:%02x:%02x:%02x\n", |
4179 | "%02x:%02x:%02x:%02x:%02x:%02x\n", | 4157 | /* bus width */ |
4180 | /* bus width */ | 4158 | ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : |
4181 | ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : | 4159 | "Width x1"), |
4182 | "Width x1"), | 4160 | /* MAC address */ |
4183 | /* MAC address */ | 4161 | netdev->dev_addr[0], netdev->dev_addr[1], |
4184 | netdev->dev_addr[0], netdev->dev_addr[1], | 4162 | netdev->dev_addr[2], netdev->dev_addr[3], |
4185 | netdev->dev_addr[2], netdev->dev_addr[3], | 4163 | netdev->dev_addr[4], netdev->dev_addr[5]); |
4186 | netdev->dev_addr[4], netdev->dev_addr[5]); | 4164 | e_info("Intel(R) PRO/%s Network Connection\n", |
4187 | ndev_info(netdev, "Intel(R) PRO/%s Network Connection\n", | 4165 | (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000"); |
4188 | (hw->phy.type == e1000_phy_ife) | ||
4189 | ? "10/100" : "1000"); | ||
4190 | e1000e_read_pba_num(hw, &pba_num); | 4166 | e1000e_read_pba_num(hw, &pba_num); |
4191 | ndev_info(netdev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n", | 4167 | e_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n", |
4192 | hw->mac.type, hw->phy.type, | 4168 | hw->mac.type, hw->phy.type, (pba_num >> 8), (pba_num & 0xff)); |
4193 | (pba_num >> 8), (pba_num & 0xff)); | ||
4194 | } | 4169 | } |
4195 | 4170 | ||
4196 | /** | 4171 | static void e1000_eeprom_checks(struct e1000_adapter *adapter) |
4197 | * e1000e_is_need_ioport - determine if an adapter needs ioport resources or not | ||
4198 | * @pdev: PCI device information struct | ||
4199 | * | ||
4200 | * Returns true if an adapters needs ioport resources | ||
4201 | **/ | ||
4202 | static int e1000e_is_need_ioport(struct pci_dev *pdev) | ||
4203 | { | 4172 | { |
4204 | switch (pdev->device) { | 4173 | struct e1000_hw *hw = &adapter->hw; |
4205 | /* Currently there are no adapters that need ioport resources */ | 4174 | int ret_val; |
4206 | default: | 4175 | u16 buf = 0; |
4207 | return false; | 4176 | |
4177 | if (hw->mac.type != e1000_82573) | ||
4178 | return; | ||
4179 | |||
4180 | ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf); | ||
4181 | if (!(le16_to_cpu(buf) & (1 << 0))) { | ||
4182 | /* Deep Smart Power Down (DSPD) */ | ||
4183 | e_warn("Warning: detected DSPD enabled in EEPROM\n"); | ||
4184 | } | ||
4185 | |||
4186 | ret_val = e1000_read_nvm(hw, NVM_INIT_3GIO_3, 1, &buf); | ||
4187 | if (le16_to_cpu(buf) & (3 << 2)) { | ||
4188 | /* ASPM enable */ | ||
4189 | e_warn("Warning: detected ASPM enabled in EEPROM\n"); | ||
4208 | } | 4190 | } |
4209 | } | 4191 | } |
4210 | 4192 | ||
@@ -4233,19 +4215,10 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
4233 | int i, err, pci_using_dac; | 4215 | int i, err, pci_using_dac; |
4234 | u16 eeprom_data = 0; | 4216 | u16 eeprom_data = 0; |
4235 | u16 eeprom_apme_mask = E1000_EEPROM_APME; | 4217 | u16 eeprom_apme_mask = E1000_EEPROM_APME; |
4236 | int bars, need_ioport; | ||
4237 | 4218 | ||
4238 | e1000e_disable_l1aspm(pdev); | 4219 | e1000e_disable_l1aspm(pdev); |
4239 | 4220 | ||
4240 | /* do not allocate ioport bars when not needed */ | 4221 | err = pci_enable_device_mem(pdev); |
4241 | need_ioport = e1000e_is_need_ioport(pdev); | ||
4242 | if (need_ioport) { | ||
4243 | bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); | ||
4244 | err = pci_enable_device(pdev); | ||
4245 | } else { | ||
4246 | bars = pci_select_bars(pdev, IORESOURCE_MEM); | ||
4247 | err = pci_enable_device_mem(pdev); | ||
4248 | } | ||
4249 | if (err) | 4222 | if (err) |
4250 | return err; | 4223 | return err; |
4251 | 4224 | ||
@@ -4268,7 +4241,9 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
4268 | } | 4241 | } |
4269 | } | 4242 | } |
4270 | 4243 | ||
4271 | err = pci_request_selected_regions(pdev, bars, e1000e_driver_name); | 4244 | err = pci_request_selected_regions(pdev, |
4245 | pci_select_bars(pdev, IORESOURCE_MEM), | ||
4246 | e1000e_driver_name); | ||
4272 | if (err) | 4247 | if (err) |
4273 | goto err_pci_reg; | 4248 | goto err_pci_reg; |
4274 | 4249 | ||
@@ -4293,8 +4268,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
4293 | adapter->hw.adapter = adapter; | 4268 | adapter->hw.adapter = adapter; |
4294 | adapter->hw.mac.type = ei->mac; | 4269 | adapter->hw.mac.type = ei->mac; |
4295 | adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1; | 4270 | adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1; |
4296 | adapter->bars = bars; | ||
4297 | adapter->need_ioport = need_ioport; | ||
4298 | 4271 | ||
4299 | mmio_start = pci_resource_start(pdev, 0); | 4272 | mmio_start = pci_resource_start(pdev, 0); |
4300 | mmio_len = pci_resource_len(pdev, 0); | 4273 | mmio_len = pci_resource_len(pdev, 0); |
@@ -4366,8 +4339,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
4366 | } | 4339 | } |
4367 | 4340 | ||
4368 | if (e1000_check_reset_block(&adapter->hw)) | 4341 | if (e1000_check_reset_block(&adapter->hw)) |
4369 | ndev_info(netdev, | 4342 | e_info("PHY reset is blocked due to SOL/IDER session.\n"); |
4370 | "PHY reset is blocked due to SOL/IDER session.\n"); | ||
4371 | 4343 | ||
4372 | netdev->features = NETIF_F_SG | | 4344 | netdev->features = NETIF_F_SG | |
4373 | NETIF_F_HW_CSUM | | 4345 | NETIF_F_HW_CSUM | |
@@ -4411,25 +4383,26 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
4411 | if (e1000_validate_nvm_checksum(&adapter->hw) >= 0) | 4383 | if (e1000_validate_nvm_checksum(&adapter->hw) >= 0) |
4412 | break; | 4384 | break; |
4413 | if (i == 2) { | 4385 | if (i == 2) { |
4414 | ndev_err(netdev, "The NVM Checksum Is Not Valid\n"); | 4386 | e_err("The NVM Checksum Is Not Valid\n"); |
4415 | err = -EIO; | 4387 | err = -EIO; |
4416 | goto err_eeprom; | 4388 | goto err_eeprom; |
4417 | } | 4389 | } |
4418 | } | 4390 | } |
4419 | 4391 | ||
4392 | e1000_eeprom_checks(adapter); | ||
4393 | |||
4420 | /* copy the MAC address out of the NVM */ | 4394 | /* copy the MAC address out of the NVM */ |
4421 | if (e1000e_read_mac_addr(&adapter->hw)) | 4395 | if (e1000e_read_mac_addr(&adapter->hw)) |
4422 | ndev_err(netdev, "NVM Read Error while reading MAC address\n"); | 4396 | e_err("NVM Read Error while reading MAC address\n"); |
4423 | 4397 | ||
4424 | memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); | 4398 | memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); |
4425 | memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); | 4399 | memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); |
4426 | 4400 | ||
4427 | if (!is_valid_ether_addr(netdev->perm_addr)) { | 4401 | if (!is_valid_ether_addr(netdev->perm_addr)) { |
4428 | ndev_err(netdev, "Invalid MAC Address: " | 4402 | e_err("Invalid MAC Address: %02x:%02x:%02x:%02x:%02x:%02x\n", |
4429 | "%02x:%02x:%02x:%02x:%02x:%02x\n", | 4403 | netdev->perm_addr[0], netdev->perm_addr[1], |
4430 | netdev->perm_addr[0], netdev->perm_addr[1], | 4404 | netdev->perm_addr[2], netdev->perm_addr[3], |
4431 | netdev->perm_addr[2], netdev->perm_addr[3], | 4405 | netdev->perm_addr[4], netdev->perm_addr[5]); |
4432 | netdev->perm_addr[4], netdev->perm_addr[5]); | ||
4433 | err = -EIO; | 4406 | err = -EIO; |
4434 | goto err_eeprom; | 4407 | goto err_eeprom; |
4435 | } | 4408 | } |
@@ -4499,8 +4472,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
4499 | * is up. For all other cases, let the f/w know that the h/w is now | 4472 | * is up. For all other cases, let the f/w know that the h/w is now |
4500 | * under the control of the driver. | 4473 | * under the control of the driver. |
4501 | */ | 4474 | */ |
4502 | if (!(adapter->flags & FLAG_HAS_AMT) || | 4475 | if (!(adapter->flags & FLAG_HAS_AMT)) |
4503 | !e1000e_check_mng_mode(&adapter->hw)) | ||
4504 | e1000_get_hw_control(adapter); | 4476 | e1000_get_hw_control(adapter); |
4505 | 4477 | ||
4506 | /* tell the stack to leave us alone until e1000_open() is called */ | 4478 | /* tell the stack to leave us alone until e1000_open() is called */ |
@@ -4517,24 +4489,25 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
4517 | return 0; | 4489 | return 0; |
4518 | 4490 | ||
4519 | err_register: | 4491 | err_register: |
4520 | err_hw_init: | 4492 | if (!(adapter->flags & FLAG_HAS_AMT)) |
4521 | e1000_release_hw_control(adapter); | 4493 | e1000_release_hw_control(adapter); |
4522 | err_eeprom: | 4494 | err_eeprom: |
4523 | if (!e1000_check_reset_block(&adapter->hw)) | 4495 | if (!e1000_check_reset_block(&adapter->hw)) |
4524 | e1000_phy_hw_reset(&adapter->hw); | 4496 | e1000_phy_hw_reset(&adapter->hw); |
4497 | err_hw_init: | ||
4525 | 4498 | ||
4526 | if (adapter->hw.flash_address) | ||
4527 | iounmap(adapter->hw.flash_address); | ||
4528 | |||
4529 | err_flashmap: | ||
4530 | kfree(adapter->tx_ring); | 4499 | kfree(adapter->tx_ring); |
4531 | kfree(adapter->rx_ring); | 4500 | kfree(adapter->rx_ring); |
4532 | err_sw_init: | 4501 | err_sw_init: |
4502 | if (adapter->hw.flash_address) | ||
4503 | iounmap(adapter->hw.flash_address); | ||
4504 | err_flashmap: | ||
4533 | iounmap(adapter->hw.hw_addr); | 4505 | iounmap(adapter->hw.hw_addr); |
4534 | err_ioremap: | 4506 | err_ioremap: |
4535 | free_netdev(netdev); | 4507 | free_netdev(netdev); |
4536 | err_alloc_etherdev: | 4508 | err_alloc_etherdev: |
4537 | pci_release_selected_regions(pdev, bars); | 4509 | pci_release_selected_regions(pdev, |
4510 | pci_select_bars(pdev, IORESOURCE_MEM)); | ||
4538 | err_pci_reg: | 4511 | err_pci_reg: |
4539 | err_dma: | 4512 | err_dma: |
4540 | pci_disable_device(pdev); | 4513 | pci_disable_device(pdev); |
@@ -4582,7 +4555,8 @@ static void __devexit e1000_remove(struct pci_dev *pdev) | |||
4582 | iounmap(adapter->hw.hw_addr); | 4555 | iounmap(adapter->hw.hw_addr); |
4583 | if (adapter->hw.flash_address) | 4556 | if (adapter->hw.flash_address) |
4584 | iounmap(adapter->hw.flash_address); | 4557 | iounmap(adapter->hw.flash_address); |
4585 | pci_release_selected_regions(pdev, adapter->bars); | 4558 | pci_release_selected_regions(pdev, |
4559 | pci_select_bars(pdev, IORESOURCE_MEM)); | ||
4586 | 4560 | ||
4587 | free_netdev(netdev); | 4561 | free_netdev(netdev); |
4588 | 4562 | ||
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c index a66b92efcf80..8effc3107f9a 100644 --- a/drivers/net/e1000e/param.c +++ b/drivers/net/e1000e/param.c | |||
@@ -27,6 +27,7 @@ | |||
27 | *******************************************************************************/ | 27 | *******************************************************************************/ |
28 | 28 | ||
29 | #include <linux/netdevice.h> | 29 | #include <linux/netdevice.h> |
30 | #include <linux/pci.h> | ||
30 | 31 | ||
31 | #include "e1000.h" | 32 | #include "e1000.h" |
32 | 33 | ||
@@ -162,17 +163,16 @@ static int __devinit e1000_validate_option(unsigned int *value, | |||
162 | case enable_option: | 163 | case enable_option: |
163 | switch (*value) { | 164 | switch (*value) { |
164 | case OPTION_ENABLED: | 165 | case OPTION_ENABLED: |
165 | ndev_info(adapter->netdev, "%s Enabled\n", opt->name); | 166 | e_info("%s Enabled\n", opt->name); |
166 | return 0; | 167 | return 0; |
167 | case OPTION_DISABLED: | 168 | case OPTION_DISABLED: |
168 | ndev_info(adapter->netdev, "%s Disabled\n", opt->name); | 169 | e_info("%s Disabled\n", opt->name); |
169 | return 0; | 170 | return 0; |
170 | } | 171 | } |
171 | break; | 172 | break; |
172 | case range_option: | 173 | case range_option: |
173 | if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { | 174 | if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { |
174 | ndev_info(adapter->netdev, | 175 | e_info("%s set to %i\n", opt->name, *value); |
175 | "%s set to %i\n", opt->name, *value); | ||
176 | return 0; | 176 | return 0; |
177 | } | 177 | } |
178 | break; | 178 | break; |
@@ -184,8 +184,7 @@ static int __devinit e1000_validate_option(unsigned int *value, | |||
184 | ent = &opt->arg.l.p[i]; | 184 | ent = &opt->arg.l.p[i]; |
185 | if (*value == ent->i) { | 185 | if (*value == ent->i) { |
186 | if (ent->str[0] != '\0') | 186 | if (ent->str[0] != '\0') |
187 | ndev_info(adapter->netdev, "%s\n", | 187 | e_info("%s\n", ent->str); |
188 | ent->str); | ||
189 | return 0; | 188 | return 0; |
190 | } | 189 | } |
191 | } | 190 | } |
@@ -195,8 +194,8 @@ static int __devinit e1000_validate_option(unsigned int *value, | |||
195 | BUG(); | 194 | BUG(); |
196 | } | 195 | } |
197 | 196 | ||
198 | ndev_info(adapter->netdev, "Invalid %s value specified (%i) %s\n", | 197 | e_info("Invalid %s value specified (%i) %s\n", opt->name, *value, |
199 | opt->name, *value, opt->err); | 198 | opt->err); |
200 | *value = opt->def; | 199 | *value = opt->def; |
201 | return -1; | 200 | return -1; |
202 | } | 201 | } |
@@ -213,13 +212,11 @@ static int __devinit e1000_validate_option(unsigned int *value, | |||
213 | void __devinit e1000e_check_options(struct e1000_adapter *adapter) | 212 | void __devinit e1000e_check_options(struct e1000_adapter *adapter) |
214 | { | 213 | { |
215 | struct e1000_hw *hw = &adapter->hw; | 214 | struct e1000_hw *hw = &adapter->hw; |
216 | struct net_device *netdev = adapter->netdev; | ||
217 | int bd = adapter->bd_number; | 215 | int bd = adapter->bd_number; |
218 | 216 | ||
219 | if (bd >= E1000_MAX_NIC) { | 217 | if (bd >= E1000_MAX_NIC) { |
220 | ndev_notice(netdev, | 218 | e_notice("Warning: no configuration for board #%i\n", bd); |
221 | "Warning: no configuration for board #%i\n", bd); | 219 | e_notice("Using defaults for all values\n"); |
222 | ndev_notice(netdev, "Using defaults for all values\n"); | ||
223 | } | 220 | } |
224 | 221 | ||
225 | { /* Transmit Interrupt Delay */ | 222 | { /* Transmit Interrupt Delay */ |
@@ -313,19 +310,15 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter) | |||
313 | adapter->itr = InterruptThrottleRate[bd]; | 310 | adapter->itr = InterruptThrottleRate[bd]; |
314 | switch (adapter->itr) { | 311 | switch (adapter->itr) { |
315 | case 0: | 312 | case 0: |
316 | ndev_info(netdev, "%s turned off\n", | 313 | e_info("%s turned off\n", opt.name); |
317 | opt.name); | ||
318 | break; | 314 | break; |
319 | case 1: | 315 | case 1: |
320 | ndev_info(netdev, | 316 | e_info("%s set to dynamic mode\n", opt.name); |
321 | "%s set to dynamic mode\n", | ||
322 | opt.name); | ||
323 | adapter->itr_setting = adapter->itr; | 317 | adapter->itr_setting = adapter->itr; |
324 | adapter->itr = 20000; | 318 | adapter->itr = 20000; |
325 | break; | 319 | break; |
326 | case 3: | 320 | case 3: |
327 | ndev_info(netdev, | 321 | e_info("%s set to dynamic conservative mode\n", |
328 | "%s set to dynamic conservative mode\n", | ||
329 | opt.name); | 322 | opt.name); |
330 | adapter->itr_setting = adapter->itr; | 323 | adapter->itr_setting = adapter->itr; |
331 | adapter->itr = 20000; | 324 | adapter->itr = 20000; |
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c index 56f50491a453..1f11350e16cf 100644 --- a/drivers/net/eepro.c +++ b/drivers/net/eepro.c | |||
@@ -1283,14 +1283,6 @@ set_multicast_list(struct net_device *dev) | |||
1283 | 1283 | ||
1284 | if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || dev->mc_count > 63) | 1284 | if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || dev->mc_count > 63) |
1285 | { | 1285 | { |
1286 | /* | ||
1287 | * We must make the kernel realise we had to move | ||
1288 | * into promisc mode or we start all out war on | ||
1289 | * the cable. If it was a promisc request the | ||
1290 | * flag is already set. If not we assert it. | ||
1291 | */ | ||
1292 | dev->flags|=IFF_PROMISC; | ||
1293 | |||
1294 | eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */ | 1286 | eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */ |
1295 | mode = inb(ioaddr + REG2); | 1287 | mode = inb(ioaddr + REG2); |
1296 | outb(mode | PRMSC_Mode, ioaddr + REG2); | 1288 | outb(mode | PRMSC_Mode, ioaddr + REG2); |
diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c index e3dd8b136908..bee8b3fbc565 100644 --- a/drivers/net/eth16i.c +++ b/drivers/net/eth16i.c | |||
@@ -1356,7 +1356,6 @@ static void eth16i_multicast(struct net_device *dev) | |||
1356 | 1356 | ||
1357 | if(dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC)) | 1357 | if(dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC)) |
1358 | { | 1358 | { |
1359 | dev->flags|=IFF_PROMISC; /* Must do this */ | ||
1360 | outb(3, ioaddr + RECEIVE_MODE_REG); | 1359 | outb(3, ioaddr + RECEIVE_MODE_REG); |
1361 | } else { | 1360 | } else { |
1362 | outb(2, ioaddr + RECEIVE_MODE_REG); | 1361 | outb(2, ioaddr + RECEIVE_MODE_REG); |
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 01b38b092c76..053971e5fc94 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -77,26 +77,27 @@ | |||
77 | * Hardware access: | 77 | * Hardware access: |
78 | */ | 78 | */ |
79 | 79 | ||
80 | #define DEV_NEED_TIMERIRQ 0x00001 /* set the timer irq flag in the irq mask */ | 80 | #define DEV_NEED_TIMERIRQ 0x000001 /* set the timer irq flag in the irq mask */ |
81 | #define DEV_NEED_LINKTIMER 0x00002 /* poll link settings. Relies on the timer irq */ | 81 | #define DEV_NEED_LINKTIMER 0x000002 /* poll link settings. Relies on the timer irq */ |
82 | #define DEV_HAS_LARGEDESC 0x00004 /* device supports jumbo frames and needs packet format 2 */ | 82 | #define DEV_HAS_LARGEDESC 0x000004 /* device supports jumbo frames and needs packet format 2 */ |
83 | #define DEV_HAS_HIGH_DMA 0x00008 /* device supports 64bit dma */ | 83 | #define DEV_HAS_HIGH_DMA 0x000008 /* device supports 64bit dma */ |
84 | #define DEV_HAS_CHECKSUM 0x00010 /* device supports tx and rx checksum offloads */ | 84 | #define DEV_HAS_CHECKSUM 0x000010 /* device supports tx and rx checksum offloads */ |
85 | #define DEV_HAS_VLAN 0x00020 /* device supports vlan tagging and striping */ | 85 | #define DEV_HAS_VLAN 0x000020 /* device supports vlan tagging and striping */ |
86 | #define DEV_HAS_MSI 0x00040 /* device supports MSI */ | 86 | #define DEV_HAS_MSI 0x000040 /* device supports MSI */ |
87 | #define DEV_HAS_MSI_X 0x00080 /* device supports MSI-X */ | 87 | #define DEV_HAS_MSI_X 0x000080 /* device supports MSI-X */ |
88 | #define DEV_HAS_POWER_CNTRL 0x00100 /* device supports power savings */ | 88 | #define DEV_HAS_POWER_CNTRL 0x000100 /* device supports power savings */ |
89 | #define DEV_HAS_STATISTICS_V1 0x00200 /* device supports hw statistics version 1 */ | 89 | #define DEV_HAS_STATISTICS_V1 0x000200 /* device supports hw statistics version 1 */ |
90 | #define DEV_HAS_STATISTICS_V2 0x00400 /* device supports hw statistics version 2 */ | 90 | #define DEV_HAS_STATISTICS_V2 0x000400 /* device supports hw statistics version 2 */ |
91 | #define DEV_HAS_TEST_EXTENDED 0x00800 /* device supports extended diagnostic test */ | 91 | #define DEV_HAS_STATISTICS_V3 0x000800 /* device supports hw statistics version 3 */ |
92 | #define DEV_HAS_MGMT_UNIT 0x01000 /* device supports management unit */ | 92 | #define DEV_HAS_TEST_EXTENDED 0x001000 /* device supports extended diagnostic test */ |
93 | #define DEV_HAS_CORRECT_MACADDR 0x02000 /* device supports correct mac address order */ | 93 | #define DEV_HAS_MGMT_UNIT 0x002000 /* device supports management unit */ |
94 | #define DEV_HAS_COLLISION_FIX 0x04000 /* device supports tx collision fix */ | 94 | #define DEV_HAS_CORRECT_MACADDR 0x004000 /* device supports correct mac address order */ |
95 | #define DEV_HAS_PAUSEFRAME_TX_V1 0x08000 /* device supports tx pause frames version 1 */ | 95 | #define DEV_HAS_COLLISION_FIX 0x008000 /* device supports tx collision fix */ |
96 | #define DEV_HAS_PAUSEFRAME_TX_V2 0x10000 /* device supports tx pause frames version 2 */ | 96 | #define DEV_HAS_PAUSEFRAME_TX_V1 0x010000 /* device supports tx pause frames version 1 */ |
97 | #define DEV_HAS_PAUSEFRAME_TX_V3 0x20000 /* device supports tx pause frames version 3 */ | 97 | #define DEV_HAS_PAUSEFRAME_TX_V2 0x020000 /* device supports tx pause frames version 2 */ |
98 | #define DEV_NEED_TX_LIMIT 0x40000 /* device needs to limit tx */ | 98 | #define DEV_HAS_PAUSEFRAME_TX_V3 0x040000 /* device supports tx pause frames version 3 */ |
99 | #define DEV_HAS_GEAR_MODE 0x80000 /* device supports gear mode */ | 99 | #define DEV_NEED_TX_LIMIT 0x080000 /* device needs to limit tx */ |
100 | #define DEV_HAS_GEAR_MODE 0x100000 /* device supports gear mode */ | ||
100 | 101 | ||
101 | enum { | 102 | enum { |
102 | NvRegIrqStatus = 0x000, | 103 | NvRegIrqStatus = 0x000, |
@@ -248,6 +249,8 @@ enum { | |||
248 | #define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010 | 249 | #define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010 |
249 | #define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0 | 250 | #define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0 |
250 | #define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880 | 251 | #define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880 |
252 | NvRegTxPauseFrameLimit = 0x174, | ||
253 | #define NVREG_TX_PAUSEFRAMELIMIT_ENABLE 0x00010000 | ||
251 | NvRegMIIStatus = 0x180, | 254 | NvRegMIIStatus = 0x180, |
252 | #define NVREG_MIISTAT_ERROR 0x0001 | 255 | #define NVREG_MIISTAT_ERROR 0x0001 |
253 | #define NVREG_MIISTAT_LINKCHANGE 0x0008 | 256 | #define NVREG_MIISTAT_LINKCHANGE 0x0008 |
@@ -270,6 +273,9 @@ enum { | |||
270 | #define NVREG_MIICTL_WRITE 0x00400 | 273 | #define NVREG_MIICTL_WRITE 0x00400 |
271 | #define NVREG_MIICTL_ADDRSHIFT 5 | 274 | #define NVREG_MIICTL_ADDRSHIFT 5 |
272 | NvRegMIIData = 0x194, | 275 | NvRegMIIData = 0x194, |
276 | NvRegTxUnicast = 0x1a0, | ||
277 | NvRegTxMulticast = 0x1a4, | ||
278 | NvRegTxBroadcast = 0x1a8, | ||
273 | NvRegWakeUpFlags = 0x200, | 279 | NvRegWakeUpFlags = 0x200, |
274 | #define NVREG_WAKEUPFLAGS_VAL 0x7770 | 280 | #define NVREG_WAKEUPFLAGS_VAL 0x7770 |
275 | #define NVREG_WAKEUPFLAGS_BUSYSHIFT 24 | 281 | #define NVREG_WAKEUPFLAGS_BUSYSHIFT 24 |
@@ -402,6 +408,7 @@ union ring_type { | |||
402 | #define NV_RX_FRAMINGERR (1<<29) | 408 | #define NV_RX_FRAMINGERR (1<<29) |
403 | #define NV_RX_ERROR (1<<30) | 409 | #define NV_RX_ERROR (1<<30) |
404 | #define NV_RX_AVAIL (1<<31) | 410 | #define NV_RX_AVAIL (1<<31) |
411 | #define NV_RX_ERROR_MASK (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4|NV_RX_CRCERR|NV_RX_OVERFLOW|NV_RX_FRAMINGERR) | ||
405 | 412 | ||
406 | #define NV_RX2_CHECKSUMMASK (0x1C000000) | 413 | #define NV_RX2_CHECKSUMMASK (0x1C000000) |
407 | #define NV_RX2_CHECKSUM_IP (0x10000000) | 414 | #define NV_RX2_CHECKSUM_IP (0x10000000) |
@@ -419,6 +426,7 @@ union ring_type { | |||
419 | /* error and avail are the same for both */ | 426 | /* error and avail are the same for both */ |
420 | #define NV_RX2_ERROR (1<<30) | 427 | #define NV_RX2_ERROR (1<<30) |
421 | #define NV_RX2_AVAIL (1<<31) | 428 | #define NV_RX2_AVAIL (1<<31) |
429 | #define NV_RX2_ERROR_MASK (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4|NV_RX2_CRCERR|NV_RX2_OVERFLOW|NV_RX2_FRAMINGERR) | ||
422 | 430 | ||
423 | #define NV_RX3_VLAN_TAG_PRESENT (1<<16) | 431 | #define NV_RX3_VLAN_TAG_PRESENT (1<<16) |
424 | #define NV_RX3_VLAN_TAG_MASK (0x0000FFFF) | 432 | #define NV_RX3_VLAN_TAG_MASK (0x0000FFFF) |
@@ -616,7 +624,12 @@ static const struct nv_ethtool_str nv_estats_str[] = { | |||
616 | { "rx_bytes" }, | 624 | { "rx_bytes" }, |
617 | { "tx_pause" }, | 625 | { "tx_pause" }, |
618 | { "rx_pause" }, | 626 | { "rx_pause" }, |
619 | { "rx_drop_frame" } | 627 | { "rx_drop_frame" }, |
628 | |||
629 | /* version 3 stats */ | ||
630 | { "tx_unicast" }, | ||
631 | { "tx_multicast" }, | ||
632 | { "tx_broadcast" } | ||
620 | }; | 633 | }; |
621 | 634 | ||
622 | struct nv_ethtool_stats { | 635 | struct nv_ethtool_stats { |
@@ -652,9 +665,15 @@ struct nv_ethtool_stats { | |||
652 | u64 tx_pause; | 665 | u64 tx_pause; |
653 | u64 rx_pause; | 666 | u64 rx_pause; |
654 | u64 rx_drop_frame; | 667 | u64 rx_drop_frame; |
668 | |||
669 | /* version 3 stats */ | ||
670 | u64 tx_unicast; | ||
671 | u64 tx_multicast; | ||
672 | u64 tx_broadcast; | ||
655 | }; | 673 | }; |
656 | 674 | ||
657 | #define NV_DEV_STATISTICS_V2_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64)) | 675 | #define NV_DEV_STATISTICS_V3_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64)) |
676 | #define NV_DEV_STATISTICS_V2_COUNT (NV_DEV_STATISTICS_V3_COUNT - 3) | ||
658 | #define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6) | 677 | #define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6) |
659 | 678 | ||
660 | /* diagnostics */ | 679 | /* diagnostics */ |
@@ -1628,6 +1647,12 @@ static void nv_get_hw_stats(struct net_device *dev) | |||
1628 | np->estats.rx_pause += readl(base + NvRegRxPause); | 1647 | np->estats.rx_pause += readl(base + NvRegRxPause); |
1629 | np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame); | 1648 | np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame); |
1630 | } | 1649 | } |
1650 | |||
1651 | if (np->driver_data & DEV_HAS_STATISTICS_V3) { | ||
1652 | np->estats.tx_unicast += readl(base + NvRegTxUnicast); | ||
1653 | np->estats.tx_multicast += readl(base + NvRegTxMulticast); | ||
1654 | np->estats.tx_broadcast += readl(base + NvRegTxBroadcast); | ||
1655 | } | ||
1631 | } | 1656 | } |
1632 | 1657 | ||
1633 | /* | 1658 | /* |
@@ -1641,7 +1666,7 @@ static struct net_device_stats *nv_get_stats(struct net_device *dev) | |||
1641 | struct fe_priv *np = netdev_priv(dev); | 1666 | struct fe_priv *np = netdev_priv(dev); |
1642 | 1667 | ||
1643 | /* If the nic supports hw counters then retrieve latest values */ | 1668 | /* If the nic supports hw counters then retrieve latest values */ |
1644 | if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) { | 1669 | if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) { |
1645 | nv_get_hw_stats(dev); | 1670 | nv_get_hw_stats(dev); |
1646 | 1671 | ||
1647 | /* copy to net_device stats */ | 1672 | /* copy to net_device stats */ |
@@ -2632,7 +2657,7 @@ static int nv_rx_process(struct net_device *dev, int limit) | |||
2632 | if (likely(flags & NV_RX_DESCRIPTORVALID)) { | 2657 | if (likely(flags & NV_RX_DESCRIPTORVALID)) { |
2633 | len = flags & LEN_MASK_V1; | 2658 | len = flags & LEN_MASK_V1; |
2634 | if (unlikely(flags & NV_RX_ERROR)) { | 2659 | if (unlikely(flags & NV_RX_ERROR)) { |
2635 | if (flags & NV_RX_ERROR4) { | 2660 | if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) { |
2636 | len = nv_getlen(dev, skb->data, len); | 2661 | len = nv_getlen(dev, skb->data, len); |
2637 | if (len < 0) { | 2662 | if (len < 0) { |
2638 | dev->stats.rx_errors++; | 2663 | dev->stats.rx_errors++; |
@@ -2641,7 +2666,7 @@ static int nv_rx_process(struct net_device *dev, int limit) | |||
2641 | } | 2666 | } |
2642 | } | 2667 | } |
2643 | /* framing errors are soft errors */ | 2668 | /* framing errors are soft errors */ |
2644 | else if (flags & NV_RX_FRAMINGERR) { | 2669 | else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) { |
2645 | if (flags & NV_RX_SUBSTRACT1) { | 2670 | if (flags & NV_RX_SUBSTRACT1) { |
2646 | len--; | 2671 | len--; |
2647 | } | 2672 | } |
@@ -2667,7 +2692,7 @@ static int nv_rx_process(struct net_device *dev, int limit) | |||
2667 | if (likely(flags & NV_RX2_DESCRIPTORVALID)) { | 2692 | if (likely(flags & NV_RX2_DESCRIPTORVALID)) { |
2668 | len = flags & LEN_MASK_V2; | 2693 | len = flags & LEN_MASK_V2; |
2669 | if (unlikely(flags & NV_RX2_ERROR)) { | 2694 | if (unlikely(flags & NV_RX2_ERROR)) { |
2670 | if (flags & NV_RX2_ERROR4) { | 2695 | if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) { |
2671 | len = nv_getlen(dev, skb->data, len); | 2696 | len = nv_getlen(dev, skb->data, len); |
2672 | if (len < 0) { | 2697 | if (len < 0) { |
2673 | dev->stats.rx_errors++; | 2698 | dev->stats.rx_errors++; |
@@ -2676,7 +2701,7 @@ static int nv_rx_process(struct net_device *dev, int limit) | |||
2676 | } | 2701 | } |
2677 | } | 2702 | } |
2678 | /* framing errors are soft errors */ | 2703 | /* framing errors are soft errors */ |
2679 | else if (flags & NV_RX2_FRAMINGERR) { | 2704 | else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) { |
2680 | if (flags & NV_RX2_SUBSTRACT1) { | 2705 | if (flags & NV_RX2_SUBSTRACT1) { |
2681 | len--; | 2706 | len--; |
2682 | } | 2707 | } |
@@ -2766,7 +2791,7 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit) | |||
2766 | if (likely(flags & NV_RX2_DESCRIPTORVALID)) { | 2791 | if (likely(flags & NV_RX2_DESCRIPTORVALID)) { |
2767 | len = flags & LEN_MASK_V2; | 2792 | len = flags & LEN_MASK_V2; |
2768 | if (unlikely(flags & NV_RX2_ERROR)) { | 2793 | if (unlikely(flags & NV_RX2_ERROR)) { |
2769 | if (flags & NV_RX2_ERROR4) { | 2794 | if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) { |
2770 | len = nv_getlen(dev, skb->data, len); | 2795 | len = nv_getlen(dev, skb->data, len); |
2771 | if (len < 0) { | 2796 | if (len < 0) { |
2772 | dev_kfree_skb(skb); | 2797 | dev_kfree_skb(skb); |
@@ -2774,7 +2799,7 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit) | |||
2774 | } | 2799 | } |
2775 | } | 2800 | } |
2776 | /* framing errors are soft errors */ | 2801 | /* framing errors are soft errors */ |
2777 | else if (flags & NV_RX2_FRAMINGERR) { | 2802 | else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) { |
2778 | if (flags & NV_RX2_SUBSTRACT1) { | 2803 | if (flags & NV_RX2_SUBSTRACT1) { |
2779 | len--; | 2804 | len--; |
2780 | } | 2805 | } |
@@ -3053,8 +3078,11 @@ static void nv_update_pause(struct net_device *dev, u32 pause_flags) | |||
3053 | u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1; | 3078 | u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1; |
3054 | if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) | 3079 | if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) |
3055 | pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2; | 3080 | pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2; |
3056 | if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) | 3081 | if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) { |
3057 | pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3; | 3082 | pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3; |
3083 | /* limit the number of tx pause frames to a default of 8 */ | ||
3084 | writel(readl(base + NvRegTxPauseFrameLimit)|NVREG_TX_PAUSEFRAMELIMIT_ENABLE, base + NvRegTxPauseFrameLimit); | ||
3085 | } | ||
3058 | writel(pause_enable, base + NvRegTxPauseFrame); | 3086 | writel(pause_enable, base + NvRegTxPauseFrame); |
3059 | writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1); | 3087 | writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1); |
3060 | np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; | 3088 | np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; |
@@ -4740,6 +4768,8 @@ static int nv_get_sset_count(struct net_device *dev, int sset) | |||
4740 | return NV_DEV_STATISTICS_V1_COUNT; | 4768 | return NV_DEV_STATISTICS_V1_COUNT; |
4741 | else if (np->driver_data & DEV_HAS_STATISTICS_V2) | 4769 | else if (np->driver_data & DEV_HAS_STATISTICS_V2) |
4742 | return NV_DEV_STATISTICS_V2_COUNT; | 4770 | return NV_DEV_STATISTICS_V2_COUNT; |
4771 | else if (np->driver_data & DEV_HAS_STATISTICS_V3) | ||
4772 | return NV_DEV_STATISTICS_V3_COUNT; | ||
4743 | else | 4773 | else |
4744 | return 0; | 4774 | return 0; |
4745 | default: | 4775 | default: |
@@ -5324,7 +5354,7 @@ static int nv_open(struct net_device *dev) | |||
5324 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | 5354 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); |
5325 | 5355 | ||
5326 | /* start statistics timer */ | 5356 | /* start statistics timer */ |
5327 | if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) | 5357 | if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) |
5328 | mod_timer(&np->stats_poll, | 5358 | mod_timer(&np->stats_poll, |
5329 | round_jiffies(jiffies + STATS_INTERVAL)); | 5359 | round_jiffies(jiffies + STATS_INTERVAL)); |
5330 | 5360 | ||
@@ -5428,7 +5458,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
5428 | if (err < 0) | 5458 | if (err < 0) |
5429 | goto out_disable; | 5459 | goto out_disable; |
5430 | 5460 | ||
5431 | if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2)) | 5461 | if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) |
5432 | np->register_size = NV_PCI_REGSZ_VER3; | 5462 | np->register_size = NV_PCI_REGSZ_VER3; |
5433 | else if (id->driver_data & DEV_HAS_STATISTICS_V1) | 5463 | else if (id->driver_data & DEV_HAS_STATISTICS_V1) |
5434 | np->register_size = NV_PCI_REGSZ_VER2; | 5464 | np->register_size = NV_PCI_REGSZ_VER2; |
@@ -6083,35 +6113,35 @@ static struct pci_device_id pci_tbl[] = { | |||
6083 | }, | 6113 | }, |
6084 | { /* MCP77 Ethernet Controller */ | 6114 | { /* MCP77 Ethernet Controller */ |
6085 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32), | 6115 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32), |
6086 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, | 6116 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, |
6087 | }, | 6117 | }, |
6088 | { /* MCP77 Ethernet Controller */ | 6118 | { /* MCP77 Ethernet Controller */ |
6089 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33), | 6119 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33), |
6090 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, | 6120 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, |
6091 | }, | 6121 | }, |
6092 | { /* MCP77 Ethernet Controller */ | 6122 | { /* MCP77 Ethernet Controller */ |
6093 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34), | 6123 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34), |
6094 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, | 6124 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, |
6095 | }, | 6125 | }, |
6096 | { /* MCP77 Ethernet Controller */ | 6126 | { /* MCP77 Ethernet Controller */ |
6097 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35), | 6127 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35), |
6098 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, | 6128 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, |
6099 | }, | 6129 | }, |
6100 | { /* MCP79 Ethernet Controller */ | 6130 | { /* MCP79 Ethernet Controller */ |
6101 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36), | 6131 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36), |
6102 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, | 6132 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, |
6103 | }, | 6133 | }, |
6104 | { /* MCP79 Ethernet Controller */ | 6134 | { /* MCP79 Ethernet Controller */ |
6105 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37), | 6135 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37), |
6106 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, | 6136 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, |
6107 | }, | 6137 | }, |
6108 | { /* MCP79 Ethernet Controller */ | 6138 | { /* MCP79 Ethernet Controller */ |
6109 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38), | 6139 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38), |
6110 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, | 6140 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, |
6111 | }, | 6141 | }, |
6112 | { /* MCP79 Ethernet Controller */ | 6142 | { /* MCP79 Ethernet Controller */ |
6113 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39), | 6143 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39), |
6114 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, | 6144 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, |
6115 | }, | 6145 | }, |
6116 | {0,}, | 6146 | {0,}, |
6117 | }; | 6147 | }; |
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c index 0a97fc2d97ec..1c7ef812a8e3 100644 --- a/drivers/net/fs_enet/mac-fcc.c +++ b/drivers/net/fs_enet/mac-fcc.c | |||
@@ -126,7 +126,7 @@ out: | |||
126 | #define FCC_NAPI_RX_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB) | 126 | #define FCC_NAPI_RX_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB) |
127 | #define FCC_RX_EVENT (FCC_ENET_RXF) | 127 | #define FCC_RX_EVENT (FCC_ENET_RXF) |
128 | #define FCC_TX_EVENT (FCC_ENET_TXB) | 128 | #define FCC_TX_EVENT (FCC_ENET_TXB) |
129 | #define FCC_ERR_EVENT_MSK (FCC_ENET_TXE | FCC_ENET_BSY) | 129 | #define FCC_ERR_EVENT_MSK (FCC_ENET_TXE) |
130 | 130 | ||
131 | static int setup_data(struct net_device *dev) | 131 | static int setup_data(struct net_device *dev) |
132 | { | 132 | { |
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index b8394cf134e8..ca6cf6ecb37b 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
@@ -414,9 +414,7 @@ static int gfar_suspend(struct platform_device *pdev, pm_message_t state) | |||
414 | spin_unlock(&priv->rxlock); | 414 | spin_unlock(&priv->rxlock); |
415 | spin_unlock_irqrestore(&priv->txlock, flags); | 415 | spin_unlock_irqrestore(&priv->txlock, flags); |
416 | 416 | ||
417 | #ifdef CONFIG_GFAR_NAPI | ||
418 | napi_disable(&priv->napi); | 417 | napi_disable(&priv->napi); |
419 | #endif | ||
420 | 418 | ||
421 | if (magic_packet) { | 419 | if (magic_packet) { |
422 | /* Enable interrupt on Magic Packet */ | 420 | /* Enable interrupt on Magic Packet */ |
@@ -469,9 +467,7 @@ static int gfar_resume(struct platform_device *pdev) | |||
469 | 467 | ||
470 | netif_device_attach(dev); | 468 | netif_device_attach(dev); |
471 | 469 | ||
472 | #ifdef CONFIG_GFAR_NAPI | ||
473 | napi_enable(&priv->napi); | 470 | napi_enable(&priv->napi); |
474 | #endif | ||
475 | 471 | ||
476 | return 0; | 472 | return 0; |
477 | } | 473 | } |
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index 3249df5e0f17..b8e25c4624d2 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c | |||
@@ -548,7 +548,7 @@ static int ax_xmit(struct sk_buff *skb, struct net_device *dev) | |||
548 | } | 548 | } |
549 | 549 | ||
550 | printk(KERN_ERR "mkiss: %s: transmit timed out, %s?\n", dev->name, | 550 | printk(KERN_ERR "mkiss: %s: transmit timed out, %s?\n", dev->name, |
551 | (ax->tty->ops->chars_in_buffer(ax->tty) || ax->xleft) ? | 551 | (tty_chars_in_buffer(ax->tty) || ax->xleft) ? |
552 | "bad line quality" : "driver error"); | 552 | "bad line quality" : "driver error"); |
553 | 553 | ||
554 | ax->xleft = 0; | 554 | ax->xleft = 0; |
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c index e098f234770f..bb823acc7443 100644 --- a/drivers/net/igb/e1000_82575.c +++ b/drivers/net/igb/e1000_82575.c | |||
@@ -850,7 +850,7 @@ void igb_update_mc_addr_list_82575(struct e1000_hw *hw, | |||
850 | for (; mc_addr_count > 0; mc_addr_count--) { | 850 | for (; mc_addr_count > 0; mc_addr_count--) { |
851 | hash_value = igb_hash_mc_addr(hw, mc_addr_list); | 851 | hash_value = igb_hash_mc_addr(hw, mc_addr_list); |
852 | hw_dbg("Hash value = 0x%03X\n", hash_value); | 852 | hw_dbg("Hash value = 0x%03X\n", hash_value); |
853 | hw->mac.ops.mta_set(hw, hash_value); | 853 | igb_mta_set(hw, hash_value); |
854 | mc_addr_list += ETH_ALEN; | 854 | mc_addr_list += ETH_ALEN; |
855 | } | 855 | } |
856 | } | 856 | } |
@@ -1136,6 +1136,12 @@ static s32 igb_setup_fiber_serdes_link_82575(struct e1000_hw *hw) | |||
1136 | E1000_PCS_LCTL_FORCE_LINK; /* Force Link */ | 1136 | E1000_PCS_LCTL_FORCE_LINK; /* Force Link */ |
1137 | hw_dbg("Configuring Forced Link; PCS_LCTL = 0x%08X\n", reg); | 1137 | hw_dbg("Configuring Forced Link; PCS_LCTL = 0x%08X\n", reg); |
1138 | } | 1138 | } |
1139 | |||
1140 | if (hw->mac.type == e1000_82576) { | ||
1141 | reg |= E1000_PCS_LCTL_FORCE_FCTRL; | ||
1142 | igb_force_mac_fc(hw); | ||
1143 | } | ||
1144 | |||
1139 | wr32(E1000_PCS_LCTL, reg); | 1145 | wr32(E1000_PCS_LCTL, reg); |
1140 | 1146 | ||
1141 | return 0; | 1147 | return 0; |
@@ -1232,70 +1238,6 @@ out: | |||
1232 | } | 1238 | } |
1233 | 1239 | ||
1234 | /** | 1240 | /** |
1235 | * igb_translate_register_82576 - Translate the proper register offset | ||
1236 | * @reg: e1000 register to be read | ||
1237 | * | ||
1238 | * Registers in 82576 are located in different offsets than other adapters | ||
1239 | * even though they function in the same manner. This function takes in | ||
1240 | * the name of the register to read and returns the correct offset for | ||
1241 | * 82576 silicon. | ||
1242 | **/ | ||
1243 | u32 igb_translate_register_82576(u32 reg) | ||
1244 | { | ||
1245 | /* | ||
1246 | * Some of the Kawela registers are located at different | ||
1247 | * offsets than they are in older adapters. | ||
1248 | * Despite the difference in location, the registers | ||
1249 | * function in the same manner. | ||
1250 | */ | ||
1251 | switch (reg) { | ||
1252 | case E1000_TDBAL(0): | ||
1253 | reg = 0x0E000; | ||
1254 | break; | ||
1255 | case E1000_TDBAH(0): | ||
1256 | reg = 0x0E004; | ||
1257 | break; | ||
1258 | case E1000_TDLEN(0): | ||
1259 | reg = 0x0E008; | ||
1260 | break; | ||
1261 | case E1000_TDH(0): | ||
1262 | reg = 0x0E010; | ||
1263 | break; | ||
1264 | case E1000_TDT(0): | ||
1265 | reg = 0x0E018; | ||
1266 | break; | ||
1267 | case E1000_TXDCTL(0): | ||
1268 | reg = 0x0E028; | ||
1269 | break; | ||
1270 | case E1000_RDBAL(0): | ||
1271 | reg = 0x0C000; | ||
1272 | break; | ||
1273 | case E1000_RDBAH(0): | ||
1274 | reg = 0x0C004; | ||
1275 | break; | ||
1276 | case E1000_RDLEN(0): | ||
1277 | reg = 0x0C008; | ||
1278 | break; | ||
1279 | case E1000_RDH(0): | ||
1280 | reg = 0x0C010; | ||
1281 | break; | ||
1282 | case E1000_RDT(0): | ||
1283 | reg = 0x0C018; | ||
1284 | break; | ||
1285 | case E1000_RXDCTL(0): | ||
1286 | reg = 0x0C028; | ||
1287 | break; | ||
1288 | case E1000_SRRCTL(0): | ||
1289 | reg = 0x0C00C; | ||
1290 | break; | ||
1291 | default: | ||
1292 | break; | ||
1293 | } | ||
1294 | |||
1295 | return reg; | ||
1296 | } | ||
1297 | |||
1298 | /** | ||
1299 | * igb_reset_init_script_82575 - Inits HW defaults after reset | 1241 | * igb_reset_init_script_82575 - Inits HW defaults after reset |
1300 | * @hw: pointer to the HW structure | 1242 | * @hw: pointer to the HW structure |
1301 | * | 1243 | * |
diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h index 2f848e578a24..c1928b5efe1f 100644 --- a/drivers/net/igb/e1000_82575.h +++ b/drivers/net/igb/e1000_82575.h | |||
@@ -28,7 +28,6 @@ | |||
28 | #ifndef _E1000_82575_H_ | 28 | #ifndef _E1000_82575_H_ |
29 | #define _E1000_82575_H_ | 29 | #define _E1000_82575_H_ |
30 | 30 | ||
31 | u32 igb_translate_register_82576(u32 reg); | ||
32 | void igb_update_mc_addr_list_82575(struct e1000_hw*, u8*, u32, u32, u32); | 31 | void igb_update_mc_addr_list_82575(struct e1000_hw*, u8*, u32, u32, u32); |
33 | extern void igb_shutdown_fiber_serdes_link_82575(struct e1000_hw *hw); | 32 | extern void igb_shutdown_fiber_serdes_link_82575(struct e1000_hw *hw); |
34 | extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw); | 33 | extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw); |
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h index afdba3c9073c..ce700689fb57 100644 --- a/drivers/net/igb/e1000_defines.h +++ b/drivers/net/igb/e1000_defines.h | |||
@@ -257,6 +257,7 @@ | |||
257 | #define E1000_PCS_LCTL_FDV_FULL 8 | 257 | #define E1000_PCS_LCTL_FDV_FULL 8 |
258 | #define E1000_PCS_LCTL_FSD 0x10 | 258 | #define E1000_PCS_LCTL_FSD 0x10 |
259 | #define E1000_PCS_LCTL_FORCE_LINK 0x20 | 259 | #define E1000_PCS_LCTL_FORCE_LINK 0x20 |
260 | #define E1000_PCS_LCTL_FORCE_FCTRL 0x80 | ||
260 | #define E1000_PCS_LCTL_AN_ENABLE 0x10000 | 261 | #define E1000_PCS_LCTL_AN_ENABLE 0x10000 |
261 | #define E1000_PCS_LCTL_AN_RESTART 0x20000 | 262 | #define E1000_PCS_LCTL_AN_RESTART 0x20000 |
262 | #define E1000_PCS_LCTL_AN_TIMEOUT 0x40000 | 263 | #define E1000_PCS_LCTL_AN_TIMEOUT 0x40000 |
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h index 19fa4ee96f2e..a65ccc3095c3 100644 --- a/drivers/net/igb/e1000_hw.h +++ b/drivers/net/igb/e1000_hw.h | |||
@@ -420,7 +420,6 @@ struct e1000_mac_operations { | |||
420 | void (*rar_set)(struct e1000_hw *, u8 *, u32); | 420 | void (*rar_set)(struct e1000_hw *, u8 *, u32); |
421 | s32 (*read_mac_addr)(struct e1000_hw *); | 421 | s32 (*read_mac_addr)(struct e1000_hw *); |
422 | s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *); | 422 | s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *); |
423 | void (*mta_set)(struct e1000_hw *, u32); | ||
424 | }; | 423 | }; |
425 | 424 | ||
426 | struct e1000_phy_operations { | 425 | struct e1000_phy_operations { |
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c index 20408aa1f916..e18747c70bec 100644 --- a/drivers/net/igb/e1000_mac.c +++ b/drivers/net/igb/e1000_mac.c | |||
@@ -144,34 +144,6 @@ void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) | |||
144 | } | 144 | } |
145 | 145 | ||
146 | /** | 146 | /** |
147 | * igb_init_rx_addrs - Initialize receive address's | ||
148 | * @hw: pointer to the HW structure | ||
149 | * @rar_count: receive address registers | ||
150 | * | ||
151 | * Setups the receive address registers by setting the base receive address | ||
152 | * register to the devices MAC address and clearing all the other receive | ||
153 | * address registers to 0. | ||
154 | **/ | ||
155 | void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) | ||
156 | { | ||
157 | u32 i; | ||
158 | |||
159 | /* Setup the receive address */ | ||
160 | hw_dbg("Programming MAC Address into RAR[0]\n"); | ||
161 | |||
162 | hw->mac.ops.rar_set(hw, hw->mac.addr, 0); | ||
163 | |||
164 | /* Zero out the other (rar_entry_count - 1) receive addresses */ | ||
165 | hw_dbg("Clearing RAR[1-%u]\n", rar_count-1); | ||
166 | for (i = 1; i < rar_count; i++) { | ||
167 | array_wr32(E1000_RA, (i << 1), 0); | ||
168 | wrfl(); | ||
169 | array_wr32(E1000_RA, ((i << 1) + 1), 0); | ||
170 | wrfl(); | ||
171 | } | ||
172 | } | ||
173 | |||
174 | /** | ||
175 | * igb_check_alt_mac_addr - Check for alternate MAC addr | 147 | * igb_check_alt_mac_addr - Check for alternate MAC addr |
176 | * @hw: pointer to the HW structure | 148 | * @hw: pointer to the HW structure |
177 | * | 149 | * |
@@ -271,7 +243,7 @@ void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) | |||
271 | * current value is read, the new bit is OR'd in and the new value is | 243 | * current value is read, the new bit is OR'd in and the new value is |
272 | * written back into the register. | 244 | * written back into the register. |
273 | **/ | 245 | **/ |
274 | static void igb_mta_set(struct e1000_hw *hw, u32 hash_value) | 246 | void igb_mta_set(struct e1000_hw *hw, u32 hash_value) |
275 | { | 247 | { |
276 | u32 hash_bit, hash_reg, mta; | 248 | u32 hash_bit, hash_reg, mta; |
277 | 249 | ||
@@ -297,60 +269,6 @@ static void igb_mta_set(struct e1000_hw *hw, u32 hash_value) | |||
297 | } | 269 | } |
298 | 270 | ||
299 | /** | 271 | /** |
300 | * igb_update_mc_addr_list - Update Multicast addresses | ||
301 | * @hw: pointer to the HW structure | ||
302 | * @mc_addr_list: array of multicast addresses to program | ||
303 | * @mc_addr_count: number of multicast addresses to program | ||
304 | * @rar_used_count: the first RAR register free to program | ||
305 | * @rar_count: total number of supported Receive Address Registers | ||
306 | * | ||
307 | * Updates the Receive Address Registers and Multicast Table Array. | ||
308 | * The caller must have a packed mc_addr_list of multicast addresses. | ||
309 | * The parameter rar_count will usually be hw->mac.rar_entry_count | ||
310 | * unless there are workarounds that change this. | ||
311 | **/ | ||
312 | void igb_update_mc_addr_list(struct e1000_hw *hw, | ||
313 | u8 *mc_addr_list, u32 mc_addr_count, | ||
314 | u32 rar_used_count, u32 rar_count) | ||
315 | { | ||
316 | u32 hash_value; | ||
317 | u32 i; | ||
318 | |||
319 | /* | ||
320 | * Load the first set of multicast addresses into the exact | ||
321 | * filters (RAR). If there are not enough to fill the RAR | ||
322 | * array, clear the filters. | ||
323 | */ | ||
324 | for (i = rar_used_count; i < rar_count; i++) { | ||
325 | if (mc_addr_count) { | ||
326 | hw->mac.ops.rar_set(hw, mc_addr_list, i); | ||
327 | mc_addr_count--; | ||
328 | mc_addr_list += ETH_ALEN; | ||
329 | } else { | ||
330 | array_wr32(E1000_RA, i << 1, 0); | ||
331 | wrfl(); | ||
332 | array_wr32(E1000_RA, (i << 1) + 1, 0); | ||
333 | wrfl(); | ||
334 | } | ||
335 | } | ||
336 | |||
337 | /* Clear the old settings from the MTA */ | ||
338 | hw_dbg("Clearing MTA\n"); | ||
339 | for (i = 0; i < hw->mac.mta_reg_count; i++) { | ||
340 | array_wr32(E1000_MTA, i, 0); | ||
341 | wrfl(); | ||
342 | } | ||
343 | |||
344 | /* Load any remaining multicast addresses into the hash table. */ | ||
345 | for (; mc_addr_count > 0; mc_addr_count--) { | ||
346 | hash_value = igb_hash_mc_addr(hw, mc_addr_list); | ||
347 | hw_dbg("Hash value = 0x%03X\n", hash_value); | ||
348 | igb_mta_set(hw, hash_value); | ||
349 | mc_addr_list += ETH_ALEN; | ||
350 | } | ||
351 | } | ||
352 | |||
353 | /** | ||
354 | * igb_hash_mc_addr - Generate a multicast hash value | 272 | * igb_hash_mc_addr - Generate a multicast hash value |
355 | * @hw: pointer to the HW structure | 273 | * @hw: pointer to the HW structure |
356 | * @mc_addr: pointer to a multicast address | 274 | * @mc_addr: pointer to a multicast address |
diff --git a/drivers/net/igb/e1000_mac.h b/drivers/net/igb/e1000_mac.h index dc2f8cce15e7..cbee6af7d912 100644 --- a/drivers/net/igb/e1000_mac.h +++ b/drivers/net/igb/e1000_mac.h | |||
@@ -51,9 +51,6 @@ s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, | |||
51 | u16 *duplex); | 51 | u16 *duplex); |
52 | s32 igb_id_led_init(struct e1000_hw *hw); | 52 | s32 igb_id_led_init(struct e1000_hw *hw); |
53 | s32 igb_led_off(struct e1000_hw *hw); | 53 | s32 igb_led_off(struct e1000_hw *hw); |
54 | void igb_update_mc_addr_list(struct e1000_hw *hw, | ||
55 | u8 *mc_addr_list, u32 mc_addr_count, | ||
56 | u32 rar_used_count, u32 rar_count); | ||
57 | s32 igb_setup_link(struct e1000_hw *hw); | 54 | s32 igb_setup_link(struct e1000_hw *hw); |
58 | s32 igb_validate_mdi_setting(struct e1000_hw *hw); | 55 | s32 igb_validate_mdi_setting(struct e1000_hw *hw); |
59 | s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, | 56 | s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, |
@@ -62,7 +59,7 @@ s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, | |||
62 | void igb_clear_hw_cntrs_base(struct e1000_hw *hw); | 59 | void igb_clear_hw_cntrs_base(struct e1000_hw *hw); |
63 | void igb_clear_vfta(struct e1000_hw *hw); | 60 | void igb_clear_vfta(struct e1000_hw *hw); |
64 | void igb_config_collision_dist(struct e1000_hw *hw); | 61 | void igb_config_collision_dist(struct e1000_hw *hw); |
65 | void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); | 62 | void igb_mta_set(struct e1000_hw *hw, u32 hash_value); |
66 | void igb_put_hw_semaphore(struct e1000_hw *hw); | 63 | void igb_put_hw_semaphore(struct e1000_hw *hw); |
67 | void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); | 64 | void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); |
68 | s32 igb_check_alt_mac_addr(struct e1000_hw *hw); | 65 | s32 igb_check_alt_mac_addr(struct e1000_hw *hw); |
diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h index b95093d24c09..95523af26056 100644 --- a/drivers/net/igb/e1000_regs.h +++ b/drivers/net/igb/e1000_regs.h | |||
@@ -262,9 +262,6 @@ | |||
262 | #define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) | 262 | #define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) |
263 | #define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */ | 263 | #define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */ |
264 | 264 | ||
265 | #define E1000_REGISTER(a, reg) (((a)->mac.type < e1000_82576) \ | ||
266 | ? reg : e1000_translate_register_82576(reg)) | ||
267 | |||
268 | #define wr32(reg, value) (writel(value, hw->hw_addr + reg)) | 265 | #define wr32(reg, value) (writel(value, hw->hw_addr + reg)) |
269 | #define rd32(reg) (readl(hw->hw_addr + reg)) | 266 | #define rd32(reg) (readl(hw->hw_addr + reg)) |
270 | #define wrfl() ((void)rd32(E1000_STATUS)) | 267 | #define wrfl() ((void)rd32(E1000_STATUS)) |
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index b602c4dd0d14..8f66e15ec8d6 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c | |||
@@ -311,7 +311,7 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue, | |||
311 | array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); | 311 | array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); |
312 | break; | 312 | break; |
313 | case e1000_82576: | 313 | case e1000_82576: |
314 | /* Kawela uses a table-based method for assigning vectors. | 314 | /* The 82576 uses a table-based method for assigning vectors. |
315 | Each queue has a single entry in the table to which we write | 315 | Each queue has a single entry in the table to which we write |
316 | a vector number along with a "valid" bit. Sadly, the layout | 316 | a vector number along with a "valid" bit. Sadly, the layout |
317 | of the table is somewhat counterintuitive. */ | 317 | of the table is somewhat counterintuitive. */ |
@@ -720,28 +720,6 @@ static void igb_get_hw_control(struct igb_adapter *adapter) | |||
720 | ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); | 720 | ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); |
721 | } | 721 | } |
722 | 722 | ||
723 | static void igb_init_manageability(struct igb_adapter *adapter) | ||
724 | { | ||
725 | struct e1000_hw *hw = &adapter->hw; | ||
726 | |||
727 | if (adapter->en_mng_pt) { | ||
728 | u32 manc2h = rd32(E1000_MANC2H); | ||
729 | u32 manc = rd32(E1000_MANC); | ||
730 | |||
731 | /* enable receiving management packets to the host */ | ||
732 | /* this will probably generate destination unreachable messages | ||
733 | * from the host OS, but the packets will be handled on SMBUS */ | ||
734 | manc |= E1000_MANC_EN_MNG2HOST; | ||
735 | #define E1000_MNG2HOST_PORT_623 (1 << 5) | ||
736 | #define E1000_MNG2HOST_PORT_664 (1 << 6) | ||
737 | manc2h |= E1000_MNG2HOST_PORT_623; | ||
738 | manc2h |= E1000_MNG2HOST_PORT_664; | ||
739 | wr32(E1000_MANC2H, manc2h); | ||
740 | |||
741 | wr32(E1000_MANC, manc); | ||
742 | } | ||
743 | } | ||
744 | |||
745 | /** | 723 | /** |
746 | * igb_configure - configure the hardware for RX and TX | 724 | * igb_configure - configure the hardware for RX and TX |
747 | * @adapter: private board structure | 725 | * @adapter: private board structure |
@@ -755,7 +733,6 @@ static void igb_configure(struct igb_adapter *adapter) | |||
755 | igb_set_multi(netdev); | 733 | igb_set_multi(netdev); |
756 | 734 | ||
757 | igb_restore_vlan(adapter); | 735 | igb_restore_vlan(adapter); |
758 | igb_init_manageability(adapter); | ||
759 | 736 | ||
760 | igb_configure_tx(adapter); | 737 | igb_configure_tx(adapter); |
761 | igb_setup_rctl(adapter); | 738 | igb_setup_rctl(adapter); |
@@ -1372,7 +1349,8 @@ static void __devexit igb_remove(struct pci_dev *pdev) | |||
1372 | 1349 | ||
1373 | unregister_netdev(netdev); | 1350 | unregister_netdev(netdev); |
1374 | 1351 | ||
1375 | if (!igb_check_reset_block(&adapter->hw)) | 1352 | if (adapter->hw.phy.ops.reset_phy && |
1353 | !igb_check_reset_block(&adapter->hw)) | ||
1376 | adapter->hw.phy.ops.reset_phy(&adapter->hw); | 1354 | adapter->hw.phy.ops.reset_phy(&adapter->hw); |
1377 | 1355 | ||
1378 | igb_remove_device(&adapter->hw); | 1356 | igb_remove_device(&adapter->hw); |
@@ -4523,8 +4501,6 @@ static void igb_io_resume(struct pci_dev *pdev) | |||
4523 | struct net_device *netdev = pci_get_drvdata(pdev); | 4501 | struct net_device *netdev = pci_get_drvdata(pdev); |
4524 | struct igb_adapter *adapter = netdev_priv(netdev); | 4502 | struct igb_adapter *adapter = netdev_priv(netdev); |
4525 | 4503 | ||
4526 | igb_init_manageability(adapter); | ||
4527 | |||
4528 | if (netif_running(netdev)) { | 4504 | if (netif_running(netdev)) { |
4529 | if (igb_up(adapter)) { | 4505 | if (igb_up(adapter)) { |
4530 | dev_err(&pdev->dev, "igb_up failed after reset\n"); | 4506 | dev_err(&pdev->dev, "igb_up failed after reset\n"); |
diff --git a/drivers/net/lp486e.c b/drivers/net/lp486e.c index 591a7e4220c7..83fa9d82a004 100644 --- a/drivers/net/lp486e.c +++ b/drivers/net/lp486e.c | |||
@@ -1272,8 +1272,6 @@ static void set_multicast_list(struct net_device *dev) { | |||
1272 | return; | 1272 | return; |
1273 | } | 1273 | } |
1274 | if (dev->mc_count == 0 && !(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) { | 1274 | if (dev->mc_count == 0 && !(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) { |
1275 | if (dev->flags & IFF_ALLMULTI) | ||
1276 | dev->flags |= IFF_PROMISC; | ||
1277 | lp->i596_config[8] &= ~0x01; | 1275 | lp->i596_config[8] &= ~0x01; |
1278 | } else { | 1276 | } else { |
1279 | lp->i596_config[8] |= 0x01; | 1277 | lp->i596_config[8] |= 0x01; |
diff --git a/drivers/net/meth.c b/drivers/net/meth.c index 4cb364e67dc6..0a97c26df6ab 100644 --- a/drivers/net/meth.c +++ b/drivers/net/meth.c | |||
@@ -100,7 +100,7 @@ static inline void load_eaddr(struct net_device *dev) | |||
100 | DPRINTK("Loading MAC Address: %s\n", print_mac(mac, dev->dev_addr)); | 100 | DPRINTK("Loading MAC Address: %s\n", print_mac(mac, dev->dev_addr)); |
101 | macaddr = 0; | 101 | macaddr = 0; |
102 | for (i = 0; i < 6; i++) | 102 | for (i = 0; i < 6; i++) |
103 | macaddr |= dev->dev_addr[i] << ((5 - i) * 8); | 103 | macaddr |= (u64)dev->dev_addr[i] << ((5 - i) * 8); |
104 | 104 | ||
105 | mace->eth.mac_addr = macaddr; | 105 | mace->eth.mac_addr = macaddr; |
106 | } | 106 | } |
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index 3ab0e5289f7a..f1de38f8b742 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c | |||
@@ -3699,6 +3699,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3699 | dev_err(&pdev->dev, "Error %d setting DMA mask\n", status); | 3699 | dev_err(&pdev->dev, "Error %d setting DMA mask\n", status); |
3700 | goto abort_with_netdev; | 3700 | goto abort_with_netdev; |
3701 | } | 3701 | } |
3702 | (void)pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); | ||
3702 | mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd), | 3703 | mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd), |
3703 | &mgp->cmd_bus, GFP_KERNEL); | 3704 | &mgp->cmd_bus, GFP_KERNEL); |
3704 | if (mgp->cmd == NULL) | 3705 | if (mgp->cmd == NULL) |
diff --git a/drivers/net/myri10ge/myri10ge_mcp.h b/drivers/net/myri10ge/myri10ge_mcp.h index fdbeeee07372..993721090777 100644 --- a/drivers/net/myri10ge/myri10ge_mcp.h +++ b/drivers/net/myri10ge/myri10ge_mcp.h | |||
@@ -101,6 +101,8 @@ struct mcp_kreq_ether_recv { | |||
101 | #define MXGEFW_ETH_SEND_3 0x2c0000 | 101 | #define MXGEFW_ETH_SEND_3 0x2c0000 |
102 | #define MXGEFW_ETH_RECV_SMALL 0x300000 | 102 | #define MXGEFW_ETH_RECV_SMALL 0x300000 |
103 | #define MXGEFW_ETH_RECV_BIG 0x340000 | 103 | #define MXGEFW_ETH_RECV_BIG 0x340000 |
104 | #define MXGEFW_ETH_SEND_GO 0x380000 | ||
105 | #define MXGEFW_ETH_SEND_STOP 0x3C0000 | ||
104 | 106 | ||
105 | #define MXGEFW_ETH_SEND(n) (0x200000 + (((n) & 0x03) * 0x40000)) | 107 | #define MXGEFW_ETH_SEND(n) (0x200000 + (((n) & 0x03) * 0x40000)) |
106 | #define MXGEFW_ETH_SEND_OFFSET(n) (MXGEFW_ETH_SEND(n) - MXGEFW_ETH_SEND_4) | 108 | #define MXGEFW_ETH_SEND_OFFSET(n) (MXGEFW_ETH_SEND(n) - MXGEFW_ETH_SEND_4) |
@@ -120,6 +122,11 @@ enum myri10ge_mcp_cmd_type { | |||
120 | * MXGEFW_CMD_RESET is issued */ | 122 | * MXGEFW_CMD_RESET is issued */ |
121 | 123 | ||
122 | MXGEFW_CMD_SET_INTRQ_DMA, | 124 | MXGEFW_CMD_SET_INTRQ_DMA, |
125 | /* data0 = LSW of the host address | ||
126 | * data1 = MSW of the host address | ||
127 | * data2 = slice number if multiple slices are used | ||
128 | */ | ||
129 | |||
123 | MXGEFW_CMD_SET_BIG_BUFFER_SIZE, /* in bytes, power of 2 */ | 130 | MXGEFW_CMD_SET_BIG_BUFFER_SIZE, /* in bytes, power of 2 */ |
124 | MXGEFW_CMD_SET_SMALL_BUFFER_SIZE, /* in bytes */ | 131 | MXGEFW_CMD_SET_SMALL_BUFFER_SIZE, /* in bytes */ |
125 | 132 | ||
@@ -129,6 +136,8 @@ enum myri10ge_mcp_cmd_type { | |||
129 | MXGEFW_CMD_GET_SEND_OFFSET, | 136 | MXGEFW_CMD_GET_SEND_OFFSET, |
130 | MXGEFW_CMD_GET_SMALL_RX_OFFSET, | 137 | MXGEFW_CMD_GET_SMALL_RX_OFFSET, |
131 | MXGEFW_CMD_GET_BIG_RX_OFFSET, | 138 | MXGEFW_CMD_GET_BIG_RX_OFFSET, |
139 | /* data0 = slice number if multiple slices are used */ | ||
140 | |||
132 | MXGEFW_CMD_GET_IRQ_ACK_OFFSET, | 141 | MXGEFW_CMD_GET_IRQ_ACK_OFFSET, |
133 | MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET, | 142 | MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET, |
134 | 143 | ||
@@ -200,7 +209,12 @@ enum myri10ge_mcp_cmd_type { | |||
200 | MXGEFW_CMD_SET_STATS_DMA_V2, | 209 | MXGEFW_CMD_SET_STATS_DMA_V2, |
201 | /* data0, data1 = bus addr, | 210 | /* data0, data1 = bus addr, |
202 | * data2 = sizeof(struct mcp_irq_data) from driver point of view, allows | 211 | * data2 = sizeof(struct mcp_irq_data) from driver point of view, allows |
203 | * adding new stuff to mcp_irq_data without changing the ABI */ | 212 | * adding new stuff to mcp_irq_data without changing the ABI |
213 | * | ||
214 | * If multiple slices are used, data2 contains both the size of the | ||
215 | * structure (in the lower 16 bits) and the slice number | ||
216 | * (in the upper 16 bits). | ||
217 | */ | ||
204 | 218 | ||
205 | MXGEFW_CMD_UNALIGNED_TEST, | 219 | MXGEFW_CMD_UNALIGNED_TEST, |
206 | /* same than DMA_TEST (same args) but abort with UNALIGNED on unaligned | 220 | /* same than DMA_TEST (same args) but abort with UNALIGNED on unaligned |
@@ -222,13 +236,18 @@ enum myri10ge_mcp_cmd_type { | |||
222 | MXGEFW_CMD_GET_MAX_RSS_QUEUES, | 236 | MXGEFW_CMD_GET_MAX_RSS_QUEUES, |
223 | MXGEFW_CMD_ENABLE_RSS_QUEUES, | 237 | MXGEFW_CMD_ENABLE_RSS_QUEUES, |
224 | /* data0 = number of slices n (0, 1, ..., n-1) to enable | 238 | /* data0 = number of slices n (0, 1, ..., n-1) to enable |
225 | * data1 = interrupt mode. | 239 | * data1 = interrupt mode | use of multiple transmit queues. |
226 | * 0=share one INTx/MSI, 1=use one MSI-X per queue. | 240 | * 0=share one INTx/MSI. |
241 | * 1=use one MSI-X per queue. | ||
227 | * If all queues share one interrupt, the driver must have set | 242 | * If all queues share one interrupt, the driver must have set |
228 | * RSS_SHARED_INTERRUPT_DMA before enabling queues. | 243 | * RSS_SHARED_INTERRUPT_DMA before enabling queues. |
244 | * 2=enable both receive and send queues. | ||
245 | * Without this bit set, only one send queue (slice 0's send queue) | ||
246 | * is enabled. The receive queues are always enabled. | ||
229 | */ | 247 | */ |
230 | #define MXGEFW_SLICE_INTR_MODE_SHARED 0 | 248 | #define MXGEFW_SLICE_INTR_MODE_SHARED 0x0 |
231 | #define MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE 1 | 249 | #define MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE 0x1 |
250 | #define MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES 0x2 | ||
232 | 251 | ||
233 | MXGEFW_CMD_GET_RSS_SHARED_INTERRUPT_MASK_OFFSET, | 252 | MXGEFW_CMD_GET_RSS_SHARED_INTERRUPT_MASK_OFFSET, |
234 | MXGEFW_CMD_SET_RSS_SHARED_INTERRUPT_DMA, | 253 | MXGEFW_CMD_SET_RSS_SHARED_INTERRUPT_DMA, |
@@ -250,10 +269,13 @@ enum myri10ge_mcp_cmd_type { | |||
250 | * 2: TCP_IPV4 (required by RSS) | 269 | * 2: TCP_IPV4 (required by RSS) |
251 | * 3: IPV4 | TCP_IPV4 (required by RSS) | 270 | * 3: IPV4 | TCP_IPV4 (required by RSS) |
252 | * 4: source port | 271 | * 4: source port |
272 | * 5: source port + destination port | ||
253 | */ | 273 | */ |
254 | #define MXGEFW_RSS_HASH_TYPE_IPV4 0x1 | 274 | #define MXGEFW_RSS_HASH_TYPE_IPV4 0x1 |
255 | #define MXGEFW_RSS_HASH_TYPE_TCP_IPV4 0x2 | 275 | #define MXGEFW_RSS_HASH_TYPE_TCP_IPV4 0x2 |
256 | #define MXGEFW_RSS_HASH_TYPE_SRC_PORT 0x4 | 276 | #define MXGEFW_RSS_HASH_TYPE_SRC_PORT 0x4 |
277 | #define MXGEFW_RSS_HASH_TYPE_SRC_DST_PORT 0x5 | ||
278 | #define MXGEFW_RSS_HASH_TYPE_MAX 0x5 | ||
257 | 279 | ||
258 | MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE, | 280 | MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE, |
259 | /* Return data = the max. size of the entire headers of a IPv6 TSO packet. | 281 | /* Return data = the max. size of the entire headers of a IPv6 TSO packet. |
@@ -329,6 +351,20 @@ enum myri10ge_mcp_cmd_type { | |||
329 | 351 | ||
330 | MXGEFW_CMD_GET_DCA_OFFSET, | 352 | MXGEFW_CMD_GET_DCA_OFFSET, |
331 | /* offset of dca control for WDMAs */ | 353 | /* offset of dca control for WDMAs */ |
354 | |||
355 | /* VMWare NetQueue commands */ | ||
356 | MXGEFW_CMD_NETQ_GET_FILTERS_PER_QUEUE, | ||
357 | MXGEFW_CMD_NETQ_ADD_FILTER, | ||
358 | /* data0 = filter_id << 16 | queue << 8 | type */ | ||
359 | /* data1 = MS4 of MAC Addr */ | ||
360 | /* data2 = LS2_MAC << 16 | VLAN_tag */ | ||
361 | MXGEFW_CMD_NETQ_DEL_FILTER, | ||
362 | /* data0 = filter_id */ | ||
363 | MXGEFW_CMD_NETQ_QUERY1, | ||
364 | MXGEFW_CMD_NETQ_QUERY2, | ||
365 | MXGEFW_CMD_NETQ_QUERY3, | ||
366 | MXGEFW_CMD_NETQ_QUERY4, | ||
367 | |||
332 | }; | 368 | }; |
333 | 369 | ||
334 | enum myri10ge_mcp_cmd_status { | 370 | enum myri10ge_mcp_cmd_status { |
@@ -381,4 +417,10 @@ struct mcp_irq_data { | |||
381 | u8 valid; | 417 | u8 valid; |
382 | }; | 418 | }; |
383 | 419 | ||
420 | /* definitions for NETQ filter type */ | ||
421 | #define MXGEFW_NETQ_FILTERTYPE_NONE 0 | ||
422 | #define MXGEFW_NETQ_FILTERTYPE_MACADDR 1 | ||
423 | #define MXGEFW_NETQ_FILTERTYPE_VLAN 2 | ||
424 | #define MXGEFW_NETQ_FILTERTYPE_VLANMACADDR 3 | ||
425 | |||
384 | #endif /* __MYRI10GE_MCP_H__ */ | 426 | #endif /* __MYRI10GE_MCP_H__ */ |
diff --git a/drivers/net/myri10ge/myri10ge_mcp_gen_header.h b/drivers/net/myri10ge/myri10ge_mcp_gen_header.h index 07d65c2cbb24..a8662ea8079a 100644 --- a/drivers/net/myri10ge/myri10ge_mcp_gen_header.h +++ b/drivers/net/myri10ge/myri10ge_mcp_gen_header.h | |||
@@ -35,7 +35,7 @@ struct mcp_gen_header { | |||
35 | unsigned char mcp_index; | 35 | unsigned char mcp_index; |
36 | unsigned char disable_rabbit; | 36 | unsigned char disable_rabbit; |
37 | unsigned char unaligned_tlp; | 37 | unsigned char unaligned_tlp; |
38 | unsigned char pad1; | 38 | unsigned char pcie_link_algo; |
39 | unsigned counters_addr; | 39 | unsigned counters_addr; |
40 | unsigned copy_block_info; /* for small mcps loaded with "lload -d" */ | 40 | unsigned copy_block_info; /* for small mcps loaded with "lload -d" */ |
41 | unsigned short handoff_id_major; /* must be equal */ | 41 | unsigned short handoff_id_major; /* must be equal */ |
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h index 8e736614407d..93a7b9b668d5 100644 --- a/drivers/net/netxen/netxen_nic.h +++ b/drivers/net/netxen/netxen_nic.h | |||
@@ -508,6 +508,8 @@ typedef enum { | |||
508 | NETXEN_BRDTYPE_P3_10000_BASE_T = 0x0027, | 508 | NETXEN_BRDTYPE_P3_10000_BASE_T = 0x0027, |
509 | NETXEN_BRDTYPE_P3_XG_LOM = 0x0028, | 509 | NETXEN_BRDTYPE_P3_XG_LOM = 0x0028, |
510 | NETXEN_BRDTYPE_P3_4_GB_MM = 0x0029, | 510 | NETXEN_BRDTYPE_P3_4_GB_MM = 0x0029, |
511 | NETXEN_BRDTYPE_P3_10G_SFP_CT = 0x002a, | ||
512 | NETXEN_BRDTYPE_P3_10G_SFP_QT = 0x002b, | ||
511 | NETXEN_BRDTYPE_P3_10G_CX4 = 0x0031, | 513 | NETXEN_BRDTYPE_P3_10G_CX4 = 0x0031, |
512 | NETXEN_BRDTYPE_P3_10G_XFP = 0x0032 | 514 | NETXEN_BRDTYPE_P3_10G_XFP = 0x0032 |
513 | 515 | ||
@@ -1170,6 +1172,36 @@ typedef struct { | |||
1170 | nx_nic_intr_coalesce_data_t irq; | 1172 | nx_nic_intr_coalesce_data_t irq; |
1171 | } nx_nic_intr_coalesce_t; | 1173 | } nx_nic_intr_coalesce_t; |
1172 | 1174 | ||
1175 | #define NX_HOST_REQUEST 0x13 | ||
1176 | #define NX_NIC_REQUEST 0x14 | ||
1177 | |||
1178 | #define NX_MAC_EVENT 0x1 | ||
1179 | |||
1180 | enum { | ||
1181 | NX_NIC_H2C_OPCODE_START = 0, | ||
1182 | NX_NIC_H2C_OPCODE_CONFIG_RSS, | ||
1183 | NX_NIC_H2C_OPCODE_CONFIG_RSS_TBL, | ||
1184 | NX_NIC_H2C_OPCODE_CONFIG_INTR_COALESCE, | ||
1185 | NX_NIC_H2C_OPCODE_CONFIG_LED, | ||
1186 | NX_NIC_H2C_OPCODE_CONFIG_PROMISCUOUS, | ||
1187 | NX_NIC_H2C_OPCODE_CONFIG_L2_MAC, | ||
1188 | NX_NIC_H2C_OPCODE_LRO_REQUEST, | ||
1189 | NX_NIC_H2C_OPCODE_GET_SNMP_STATS, | ||
1190 | NX_NIC_H2C_OPCODE_PROXY_START_REQUEST, | ||
1191 | NX_NIC_H2C_OPCODE_PROXY_STOP_REQUEST, | ||
1192 | NX_NIC_H2C_OPCODE_PROXY_SET_MTU, | ||
1193 | NX_NIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE, | ||
1194 | NX_H2P_OPCODE_GET_FINGER_PRINT_REQUEST, | ||
1195 | NX_H2P_OPCODE_INSTALL_LICENSE_REQUEST, | ||
1196 | NX_H2P_OPCODE_GET_LICENSE_CAPABILITY_REQUEST, | ||
1197 | NX_NIC_H2C_OPCODE_GET_NET_STATS, | ||
1198 | NX_NIC_H2C_OPCODE_LAST | ||
1199 | }; | ||
1200 | |||
1201 | #define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */ | ||
1202 | #define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */ | ||
1203 | #define VPORT_MISS_MODE_ACCEPT_MULTI 2 /* accept unmatched multicast */ | ||
1204 | |||
1173 | typedef struct { | 1205 | typedef struct { |
1174 | u64 qhdr; | 1206 | u64 qhdr; |
1175 | u64 req_hdr; | 1207 | u64 req_hdr; |
@@ -1288,7 +1320,7 @@ struct netxen_adapter { | |||
1288 | int (*disable_phy_interrupts) (struct netxen_adapter *); | 1320 | int (*disable_phy_interrupts) (struct netxen_adapter *); |
1289 | int (*macaddr_set) (struct netxen_adapter *, netxen_ethernet_macaddr_t); | 1321 | int (*macaddr_set) (struct netxen_adapter *, netxen_ethernet_macaddr_t); |
1290 | int (*set_mtu) (struct netxen_adapter *, int); | 1322 | int (*set_mtu) (struct netxen_adapter *, int); |
1291 | int (*set_promisc) (struct netxen_adapter *, netxen_niu_prom_mode_t); | 1323 | int (*set_promisc) (struct netxen_adapter *, u32); |
1292 | int (*phy_read) (struct netxen_adapter *, long reg, u32 *); | 1324 | int (*phy_read) (struct netxen_adapter *, long reg, u32 *); |
1293 | int (*phy_write) (struct netxen_adapter *, long reg, u32 val); | 1325 | int (*phy_write) (struct netxen_adapter *, long reg, u32 val); |
1294 | int (*init_port) (struct netxen_adapter *, int); | 1326 | int (*init_port) (struct netxen_adapter *, int); |
@@ -1465,9 +1497,10 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter); | |||
1465 | u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctx, int max); | 1497 | u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctx, int max); |
1466 | void netxen_p2_nic_set_multi(struct net_device *netdev); | 1498 | void netxen_p2_nic_set_multi(struct net_device *netdev); |
1467 | void netxen_p3_nic_set_multi(struct net_device *netdev); | 1499 | void netxen_p3_nic_set_multi(struct net_device *netdev); |
1500 | int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32); | ||
1468 | int netxen_config_intr_coalesce(struct netxen_adapter *adapter); | 1501 | int netxen_config_intr_coalesce(struct netxen_adapter *adapter); |
1469 | 1502 | ||
1470 | u32 nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, u32 mtu); | 1503 | int nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu); |
1471 | int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu); | 1504 | int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu); |
1472 | 1505 | ||
1473 | int netxen_nic_set_mac(struct net_device *netdev, void *p); | 1506 | int netxen_nic_set_mac(struct net_device *netdev, void *p); |
@@ -1502,7 +1535,9 @@ static const struct netxen_brdinfo netxen_boards[] = { | |||
1502 | {NETXEN_BRDTYPE_P3_10G_SFP_PLUS, 2, "Dual XGb SFP+ LP"}, | 1535 | {NETXEN_BRDTYPE_P3_10G_SFP_PLUS, 2, "Dual XGb SFP+ LP"}, |
1503 | {NETXEN_BRDTYPE_P3_10000_BASE_T, 1, "XGB 10G BaseT LP"}, | 1536 | {NETXEN_BRDTYPE_P3_10000_BASE_T, 1, "XGB 10G BaseT LP"}, |
1504 | {NETXEN_BRDTYPE_P3_XG_LOM, 2, "Dual XGb LOM"}, | 1537 | {NETXEN_BRDTYPE_P3_XG_LOM, 2, "Dual XGb LOM"}, |
1505 | {NETXEN_BRDTYPE_P3_4_GB_MM, 4, "Quad GB - March Madness"}, | 1538 | {NETXEN_BRDTYPE_P3_4_GB_MM, 4, "NX3031 Gigabit Ethernet"}, |
1539 | {NETXEN_BRDTYPE_P3_10G_SFP_CT, 2, "NX3031 10 Gigabit Ethernet"}, | ||
1540 | {NETXEN_BRDTYPE_P3_10G_SFP_QT, 2, "Quanta Dual XGb SFP+"}, | ||
1506 | {NETXEN_BRDTYPE_P3_10G_CX4, 2, "Reference Dual CX4 Option"}, | 1541 | {NETXEN_BRDTYPE_P3_10G_CX4, 2, "Reference Dual CX4 Option"}, |
1507 | {NETXEN_BRDTYPE_P3_10G_XFP, 1, "Reference Single XFP Option"} | 1542 | {NETXEN_BRDTYPE_P3_10G_XFP, 1, "Reference Single XFP Option"} |
1508 | }; | 1543 | }; |
diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c index 64babc59e699..64b51643c626 100644 --- a/drivers/net/netxen/netxen_nic_ctx.c +++ b/drivers/net/netxen/netxen_nic_ctx.c | |||
@@ -145,8 +145,8 @@ netxen_issue_cmd(struct netxen_adapter *adapter, | |||
145 | return rcode; | 145 | return rcode; |
146 | } | 146 | } |
147 | 147 | ||
148 | u32 | 148 | int |
149 | nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, u32 mtu) | 149 | nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu) |
150 | { | 150 | { |
151 | u32 rcode = NX_RCODE_SUCCESS; | 151 | u32 rcode = NX_RCODE_SUCCESS; |
152 | struct netxen_recv_context *recv_ctx = &adapter->recv_ctx[0]; | 152 | struct netxen_recv_context *recv_ctx = &adapter->recv_ctx[0]; |
@@ -160,7 +160,10 @@ nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, u32 mtu) | |||
160 | 0, | 160 | 0, |
161 | NX_CDRP_CMD_SET_MTU); | 161 | NX_CDRP_CMD_SET_MTU); |
162 | 162 | ||
163 | return rcode; | 163 | if (rcode != NX_RCODE_SUCCESS) |
164 | return -EIO; | ||
165 | |||
166 | return 0; | ||
164 | } | 167 | } |
165 | 168 | ||
166 | static int | 169 | static int |
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c index 48ee06b6f4e9..4ad3e0844b99 100644 --- a/drivers/net/netxen/netxen_nic_ethtool.c +++ b/drivers/net/netxen/netxen_nic_ethtool.c | |||
@@ -140,18 +140,33 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | |||
140 | if (netif_running(dev)) { | 140 | if (netif_running(dev)) { |
141 | ecmd->speed = adapter->link_speed; | 141 | ecmd->speed = adapter->link_speed; |
142 | ecmd->duplex = adapter->link_duplex; | 142 | ecmd->duplex = adapter->link_duplex; |
143 | } else | 143 | ecmd->autoneg = adapter->link_autoneg; |
144 | return -EIO; /* link absent */ | 144 | } |
145 | |||
145 | } else if (adapter->ahw.board_type == NETXEN_NIC_XGBE) { | 146 | } else if (adapter->ahw.board_type == NETXEN_NIC_XGBE) { |
146 | ecmd->supported = (SUPPORTED_TP | | 147 | u32 val; |
147 | SUPPORTED_1000baseT_Full | | 148 | |
148 | SUPPORTED_10000baseT_Full); | 149 | adapter->hw_read_wx(adapter, NETXEN_PORT_MODE_ADDR, &val, 4); |
149 | ecmd->advertising = (ADVERTISED_TP | | 150 | if (val == NETXEN_PORT_MODE_802_3_AP) { |
150 | ADVERTISED_1000baseT_Full | | 151 | ecmd->supported = SUPPORTED_1000baseT_Full; |
151 | ADVERTISED_10000baseT_Full); | 152 | ecmd->advertising = ADVERTISED_1000baseT_Full; |
153 | } else { | ||
154 | ecmd->supported = SUPPORTED_10000baseT_Full; | ||
155 | ecmd->advertising = ADVERTISED_10000baseT_Full; | ||
156 | } | ||
157 | |||
152 | ecmd->port = PORT_TP; | 158 | ecmd->port = PORT_TP; |
153 | 159 | ||
154 | ecmd->speed = SPEED_10000; | 160 | if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { |
161 | u16 pcifn = adapter->ahw.pci_func; | ||
162 | |||
163 | adapter->hw_read_wx(adapter, | ||
164 | P3_LINK_SPEED_REG(pcifn), &val, 4); | ||
165 | ecmd->speed = P3_LINK_SPEED_MHZ * | ||
166 | P3_LINK_SPEED_VAL(pcifn, val); | ||
167 | } else | ||
168 | ecmd->speed = SPEED_10000; | ||
169 | |||
155 | ecmd->duplex = DUPLEX_FULL; | 170 | ecmd->duplex = DUPLEX_FULL; |
156 | ecmd->autoneg = AUTONEG_DISABLE; | 171 | ecmd->autoneg = AUTONEG_DISABLE; |
157 | } else | 172 | } else |
@@ -192,6 +207,8 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | |||
192 | break; | 207 | break; |
193 | case NETXEN_BRDTYPE_P2_SB31_10G: | 208 | case NETXEN_BRDTYPE_P2_SB31_10G: |
194 | case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: | 209 | case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: |
210 | case NETXEN_BRDTYPE_P3_10G_SFP_CT: | ||
211 | case NETXEN_BRDTYPE_P3_10G_SFP_QT: | ||
195 | case NETXEN_BRDTYPE_P3_10G_XFP: | 212 | case NETXEN_BRDTYPE_P3_10G_XFP: |
196 | ecmd->supported |= SUPPORTED_FIBRE; | 213 | ecmd->supported |= SUPPORTED_FIBRE; |
197 | ecmd->advertising |= ADVERTISED_FIBRE; | 214 | ecmd->advertising |= ADVERTISED_FIBRE; |
diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h index 3ce13e451aac..e8e8d73f6ed7 100644 --- a/drivers/net/netxen/netxen_nic_hdr.h +++ b/drivers/net/netxen/netxen_nic_hdr.h | |||
@@ -724,6 +724,13 @@ enum { | |||
724 | #define XG_LINK_STATE_P3(pcifn,val) \ | 724 | #define XG_LINK_STATE_P3(pcifn,val) \ |
725 | (((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3_MASK) | 725 | (((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3_MASK) |
726 | 726 | ||
727 | #define P3_LINK_SPEED_MHZ 100 | ||
728 | #define P3_LINK_SPEED_MASK 0xff | ||
729 | #define P3_LINK_SPEED_REG(pcifn) \ | ||
730 | (CRB_PF_LINK_SPEED_1 + (((pcifn) / 4) * 4)) | ||
731 | #define P3_LINK_SPEED_VAL(pcifn, reg) \ | ||
732 | (((reg) >> (8 * ((pcifn) & 0x3))) & P3_LINK_SPEED_MASK) | ||
733 | |||
727 | #define NETXEN_CAM_RAM_BASE (NETXEN_CRB_CAM + 0x02000) | 734 | #define NETXEN_CAM_RAM_BASE (NETXEN_CRB_CAM + 0x02000) |
728 | #define NETXEN_CAM_RAM(reg) (NETXEN_CAM_RAM_BASE + (reg)) | 735 | #define NETXEN_CAM_RAM(reg) (NETXEN_CAM_RAM_BASE + (reg)) |
729 | #define NETXEN_FW_VERSION_MAJOR (NETXEN_CAM_RAM(0x150)) | 736 | #define NETXEN_FW_VERSION_MAJOR (NETXEN_CAM_RAM(0x150)) |
@@ -836,9 +843,11 @@ enum { | |||
836 | 843 | ||
837 | #define PCIE_SETUP_FUNCTION (0x12040) | 844 | #define PCIE_SETUP_FUNCTION (0x12040) |
838 | #define PCIE_SETUP_FUNCTION2 (0x12048) | 845 | #define PCIE_SETUP_FUNCTION2 (0x12048) |
846 | #define PCIE_MISCCFG_RC (0x1206c) | ||
839 | #define PCIE_TGT_SPLIT_CHICKEN (0x12080) | 847 | #define PCIE_TGT_SPLIT_CHICKEN (0x12080) |
840 | #define PCIE_CHICKEN3 (0x120c8) | 848 | #define PCIE_CHICKEN3 (0x120c8) |
841 | 849 | ||
850 | #define ISR_INT_STATE_REG (NETXEN_PCIX_PS_REG(PCIE_MISCCFG_RC)) | ||
842 | #define PCIE_MAX_MASTER_SPLIT (0x14048) | 851 | #define PCIE_MAX_MASTER_SPLIT (0x14048) |
843 | 852 | ||
844 | #define NETXEN_PORT_MODE_NONE 0 | 853 | #define NETXEN_PORT_MODE_NONE 0 |
@@ -854,6 +863,7 @@ enum { | |||
854 | #define NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL (0x14) | 863 | #define NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL (0x14) |
855 | 864 | ||
856 | #define ISR_MSI_INT_TRIGGER(FUNC) (NETXEN_PCIX_PS_REG(PCIX_MSI_F(FUNC))) | 865 | #define ISR_MSI_INT_TRIGGER(FUNC) (NETXEN_PCIX_PS_REG(PCIX_MSI_F(FUNC))) |
866 | #define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200) | ||
857 | 867 | ||
858 | /* | 868 | /* |
859 | * PCI Interrupt Vector Values. | 869 | * PCI Interrupt Vector Values. |
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c index 96a3bc6426e2..9aa20f961618 100644 --- a/drivers/net/netxen/netxen_nic_hw.c +++ b/drivers/net/netxen/netxen_nic_hw.c | |||
@@ -285,14 +285,7 @@ static unsigned crb_hub_agt[64] = | |||
285 | #define ADDR_IN_RANGE(addr, low, high) \ | 285 | #define ADDR_IN_RANGE(addr, low, high) \ |
286 | (((addr) <= (high)) && ((addr) >= (low))) | 286 | (((addr) <= (high)) && ((addr) >= (low))) |
287 | 287 | ||
288 | #define NETXEN_MAX_MTU 8000 + NETXEN_ENET_HEADER_SIZE + NETXEN_ETH_FCS_SIZE | ||
289 | #define NETXEN_MIN_MTU 64 | ||
290 | #define NETXEN_ETH_FCS_SIZE 4 | ||
291 | #define NETXEN_ENET_HEADER_SIZE 14 | ||
292 | #define NETXEN_WINDOW_ONE 0x2000000 /*CRB Window: bit 25 of CRB address */ | 288 | #define NETXEN_WINDOW_ONE 0x2000000 /*CRB Window: bit 25 of CRB address */ |
293 | #define NETXEN_FIRMWARE_LEN ((16 * 1024) / 4) | ||
294 | #define NETXEN_NIU_HDRSIZE (0x1 << 6) | ||
295 | #define NETXEN_NIU_TLRSIZE (0x1 << 5) | ||
296 | 289 | ||
297 | #define NETXEN_NIC_ZERO_PAUSE_ADDR 0ULL | 290 | #define NETXEN_NIC_ZERO_PAUSE_ADDR 0ULL |
298 | #define NETXEN_NIC_UNIT_PAUSE_ADDR 0x200ULL | 291 | #define NETXEN_NIC_UNIT_PAUSE_ADDR 0x200ULL |
@@ -541,9 +534,6 @@ netxen_send_cmd_descs(struct netxen_adapter *adapter, | |||
541 | return 0; | 534 | return 0; |
542 | } | 535 | } |
543 | 536 | ||
544 | #define NIC_REQUEST 0x14 | ||
545 | #define NETXEN_MAC_EVENT 0x1 | ||
546 | |||
547 | static int nx_p3_sre_macaddr_change(struct net_device *dev, | 537 | static int nx_p3_sre_macaddr_change(struct net_device *dev, |
548 | u8 *addr, unsigned op) | 538 | u8 *addr, unsigned op) |
549 | { | 539 | { |
@@ -553,8 +543,8 @@ static int nx_p3_sre_macaddr_change(struct net_device *dev, | |||
553 | int rv; | 543 | int rv; |
554 | 544 | ||
555 | memset(&req, 0, sizeof(nx_nic_req_t)); | 545 | memset(&req, 0, sizeof(nx_nic_req_t)); |
556 | req.qhdr |= (NIC_REQUEST << 23); | 546 | req.qhdr |= (NX_NIC_REQUEST << 23); |
557 | req.req_hdr |= NETXEN_MAC_EVENT; | 547 | req.req_hdr |= NX_MAC_EVENT; |
558 | req.req_hdr |= ((u64)adapter->portnum << 16); | 548 | req.req_hdr |= ((u64)adapter->portnum << 16); |
559 | mac_req.op = op; | 549 | mac_req.op = op; |
560 | memcpy(&mac_req.mac_addr, addr, 6); | 550 | memcpy(&mac_req.mac_addr, addr, 6); |
@@ -575,31 +565,35 @@ void netxen_p3_nic_set_multi(struct net_device *netdev) | |||
575 | nx_mac_list_t *cur, *next, *del_list, *add_list = NULL; | 565 | nx_mac_list_t *cur, *next, *del_list, *add_list = NULL; |
576 | struct dev_mc_list *mc_ptr; | 566 | struct dev_mc_list *mc_ptr; |
577 | u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; | 567 | u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; |
578 | 568 | u32 mode = VPORT_MISS_MODE_DROP; | |
579 | adapter->set_promisc(adapter, NETXEN_NIU_PROMISC_MODE); | ||
580 | |||
581 | /* | ||
582 | * Programming mac addresses will automaticly enabling L2 filtering. | ||
583 | * HW will replace timestamp with L2 conid when L2 filtering is | ||
584 | * enabled. This causes problem for LSA. Do not enabling L2 filtering | ||
585 | * until that problem is fixed. | ||
586 | */ | ||
587 | if ((netdev->flags & IFF_PROMISC) || | ||
588 | (netdev->mc_count > adapter->max_mc_count)) | ||
589 | return; | ||
590 | 569 | ||
591 | del_list = adapter->mac_list; | 570 | del_list = adapter->mac_list; |
592 | adapter->mac_list = NULL; | 571 | adapter->mac_list = NULL; |
593 | 572 | ||
594 | nx_p3_nic_add_mac(adapter, netdev->dev_addr, &add_list, &del_list); | 573 | nx_p3_nic_add_mac(adapter, netdev->dev_addr, &add_list, &del_list); |
574 | nx_p3_nic_add_mac(adapter, bcast_addr, &add_list, &del_list); | ||
575 | |||
576 | if (netdev->flags & IFF_PROMISC) { | ||
577 | mode = VPORT_MISS_MODE_ACCEPT_ALL; | ||
578 | goto send_fw_cmd; | ||
579 | } | ||
580 | |||
581 | if ((netdev->flags & IFF_ALLMULTI) || | ||
582 | (netdev->mc_count > adapter->max_mc_count)) { | ||
583 | mode = VPORT_MISS_MODE_ACCEPT_MULTI; | ||
584 | goto send_fw_cmd; | ||
585 | } | ||
586 | |||
595 | if (netdev->mc_count > 0) { | 587 | if (netdev->mc_count > 0) { |
596 | nx_p3_nic_add_mac(adapter, bcast_addr, &add_list, &del_list); | ||
597 | for (mc_ptr = netdev->mc_list; mc_ptr; | 588 | for (mc_ptr = netdev->mc_list; mc_ptr; |
598 | mc_ptr = mc_ptr->next) { | 589 | mc_ptr = mc_ptr->next) { |
599 | nx_p3_nic_add_mac(adapter, mc_ptr->dmi_addr, | 590 | nx_p3_nic_add_mac(adapter, mc_ptr->dmi_addr, |
600 | &add_list, &del_list); | 591 | &add_list, &del_list); |
601 | } | 592 | } |
602 | } | 593 | } |
594 | |||
595 | send_fw_cmd: | ||
596 | adapter->set_promisc(adapter, mode); | ||
603 | for (cur = del_list; cur;) { | 597 | for (cur = del_list; cur;) { |
604 | nx_p3_sre_macaddr_change(netdev, cur->mac_addr, NETXEN_MAC_DEL); | 598 | nx_p3_sre_macaddr_change(netdev, cur->mac_addr, NETXEN_MAC_DEL); |
605 | next = cur->next; | 599 | next = cur->next; |
@@ -615,6 +609,21 @@ void netxen_p3_nic_set_multi(struct net_device *netdev) | |||
615 | } | 609 | } |
616 | } | 610 | } |
617 | 611 | ||
612 | int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32 mode) | ||
613 | { | ||
614 | nx_nic_req_t req; | ||
615 | |||
616 | memset(&req, 0, sizeof(nx_nic_req_t)); | ||
617 | |||
618 | req.qhdr |= (NX_HOST_REQUEST << 23); | ||
619 | req.req_hdr |= NX_NIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE; | ||
620 | req.req_hdr |= ((u64)adapter->portnum << 16); | ||
621 | req.words[0] = cpu_to_le64(mode); | ||
622 | |||
623 | return netxen_send_cmd_descs(adapter, | ||
624 | (struct cmd_desc_type0 *)&req, 1); | ||
625 | } | ||
626 | |||
618 | #define NETXEN_CONFIG_INTR_COALESCE 3 | 627 | #define NETXEN_CONFIG_INTR_COALESCE 3 |
619 | 628 | ||
620 | /* | 629 | /* |
@@ -627,7 +636,7 @@ int netxen_config_intr_coalesce(struct netxen_adapter *adapter) | |||
627 | 636 | ||
628 | memset(&req, 0, sizeof(nx_nic_req_t)); | 637 | memset(&req, 0, sizeof(nx_nic_req_t)); |
629 | 638 | ||
630 | req.qhdr |= (NIC_REQUEST << 23); | 639 | req.qhdr |= (NX_NIC_REQUEST << 23); |
631 | req.req_hdr |= NETXEN_CONFIG_INTR_COALESCE; | 640 | req.req_hdr |= NETXEN_CONFIG_INTR_COALESCE; |
632 | req.req_hdr |= ((u64)adapter->portnum << 16); | 641 | req.req_hdr |= ((u64)adapter->portnum << 16); |
633 | 642 | ||
@@ -653,6 +662,7 @@ int netxen_nic_change_mtu(struct net_device *netdev, int mtu) | |||
653 | { | 662 | { |
654 | struct netxen_adapter *adapter = netdev_priv(netdev); | 663 | struct netxen_adapter *adapter = netdev_priv(netdev); |
655 | int max_mtu; | 664 | int max_mtu; |
665 | int rc = 0; | ||
656 | 666 | ||
657 | if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) | 667 | if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) |
658 | max_mtu = P3_MAX_MTU; | 668 | max_mtu = P3_MAX_MTU; |
@@ -666,16 +676,12 @@ int netxen_nic_change_mtu(struct net_device *netdev, int mtu) | |||
666 | } | 676 | } |
667 | 677 | ||
668 | if (adapter->set_mtu) | 678 | if (adapter->set_mtu) |
669 | adapter->set_mtu(adapter, mtu); | 679 | rc = adapter->set_mtu(adapter, mtu); |
670 | netdev->mtu = mtu; | ||
671 | 680 | ||
672 | mtu += MTU_FUDGE_FACTOR; | 681 | if (!rc) |
673 | if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) | 682 | netdev->mtu = mtu; |
674 | nx_fw_cmd_set_mtu(adapter, mtu); | ||
675 | else if (adapter->set_mtu) | ||
676 | adapter->set_mtu(adapter, mtu); | ||
677 | 683 | ||
678 | return 0; | 684 | return rc; |
679 | } | 685 | } |
680 | 686 | ||
681 | int netxen_is_flash_supported(struct netxen_adapter *adapter) | 687 | int netxen_is_flash_supported(struct netxen_adapter *adapter) |
@@ -1411,7 +1417,8 @@ static int netxen_nic_pci_mem_read_direct(struct netxen_adapter *adapter, | |||
1411 | (netxen_nic_pci_is_same_window(adapter, off+size-1) == 0)) { | 1417 | (netxen_nic_pci_is_same_window(adapter, off+size-1) == 0)) { |
1412 | write_unlock_irqrestore(&adapter->adapter_lock, flags); | 1418 | write_unlock_irqrestore(&adapter->adapter_lock, flags); |
1413 | printk(KERN_ERR "%s out of bound pci memory access. " | 1419 | printk(KERN_ERR "%s out of bound pci memory access. " |
1414 | "offset is 0x%llx\n", netxen_nic_driver_name, off); | 1420 | "offset is 0x%llx\n", netxen_nic_driver_name, |
1421 | (unsigned long long)off); | ||
1415 | return -1; | 1422 | return -1; |
1416 | } | 1423 | } |
1417 | 1424 | ||
@@ -1484,7 +1491,8 @@ netxen_nic_pci_mem_write_direct(struct netxen_adapter *adapter, u64 off, | |||
1484 | (netxen_nic_pci_is_same_window(adapter, off+size-1) == 0)) { | 1491 | (netxen_nic_pci_is_same_window(adapter, off+size-1) == 0)) { |
1485 | write_unlock_irqrestore(&adapter->adapter_lock, flags); | 1492 | write_unlock_irqrestore(&adapter->adapter_lock, flags); |
1486 | printk(KERN_ERR "%s out of bound pci memory access. " | 1493 | printk(KERN_ERR "%s out of bound pci memory access. " |
1487 | "offset is 0x%llx\n", netxen_nic_driver_name, off); | 1494 | "offset is 0x%llx\n", netxen_nic_driver_name, |
1495 | (unsigned long long)off); | ||
1488 | return -1; | 1496 | return -1; |
1489 | } | 1497 | } |
1490 | 1498 | ||
@@ -2016,6 +2024,8 @@ int netxen_nic_get_board_info(struct netxen_adapter *adapter) | |||
2016 | case NETXEN_BRDTYPE_P3_10G_CX4_LP: | 2024 | case NETXEN_BRDTYPE_P3_10G_CX4_LP: |
2017 | case NETXEN_BRDTYPE_P3_IMEZ: | 2025 | case NETXEN_BRDTYPE_P3_IMEZ: |
2018 | case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: | 2026 | case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: |
2027 | case NETXEN_BRDTYPE_P3_10G_SFP_CT: | ||
2028 | case NETXEN_BRDTYPE_P3_10G_SFP_QT: | ||
2019 | case NETXEN_BRDTYPE_P3_10G_XFP: | 2029 | case NETXEN_BRDTYPE_P3_10G_XFP: |
2020 | case NETXEN_BRDTYPE_P3_10000_BASE_T: | 2030 | case NETXEN_BRDTYPE_P3_10000_BASE_T: |
2021 | 2031 | ||
@@ -2034,6 +2044,7 @@ int netxen_nic_get_board_info(struct netxen_adapter *adapter) | |||
2034 | default: | 2044 | default: |
2035 | printk("%s: Unknown(%x)\n", netxen_nic_driver_name, | 2045 | printk("%s: Unknown(%x)\n", netxen_nic_driver_name, |
2036 | boardinfo->board_type); | 2046 | boardinfo->board_type); |
2047 | rv = -ENODEV; | ||
2037 | break; | 2048 | break; |
2038 | } | 2049 | } |
2039 | 2050 | ||
@@ -2044,6 +2055,7 @@ int netxen_nic_get_board_info(struct netxen_adapter *adapter) | |||
2044 | 2055 | ||
2045 | int netxen_nic_set_mtu_gb(struct netxen_adapter *adapter, int new_mtu) | 2056 | int netxen_nic_set_mtu_gb(struct netxen_adapter *adapter, int new_mtu) |
2046 | { | 2057 | { |
2058 | new_mtu += MTU_FUDGE_FACTOR; | ||
2047 | netxen_nic_write_w0(adapter, | 2059 | netxen_nic_write_w0(adapter, |
2048 | NETXEN_NIU_GB_MAX_FRAME_SIZE(adapter->physical_port), | 2060 | NETXEN_NIU_GB_MAX_FRAME_SIZE(adapter->physical_port), |
2049 | new_mtu); | 2061 | new_mtu); |
@@ -2052,7 +2064,7 @@ int netxen_nic_set_mtu_gb(struct netxen_adapter *adapter, int new_mtu) | |||
2052 | 2064 | ||
2053 | int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu) | 2065 | int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu) |
2054 | { | 2066 | { |
2055 | new_mtu += NETXEN_NIU_HDRSIZE + NETXEN_NIU_TLRSIZE; | 2067 | new_mtu += MTU_FUDGE_FACTOR; |
2056 | if (adapter->physical_port == 0) | 2068 | if (adapter->physical_port == 0) |
2057 | netxen_nic_write_w0(adapter, NETXEN_NIU_XGE_MAX_FRAME_SIZE, | 2069 | netxen_nic_write_w0(adapter, NETXEN_NIU_XGE_MAX_FRAME_SIZE, |
2058 | new_mtu); | 2070 | new_mtu); |
@@ -2074,12 +2086,22 @@ void netxen_nic_set_link_parameters(struct netxen_adapter *adapter) | |||
2074 | __u32 status; | 2086 | __u32 status; |
2075 | __u32 autoneg; | 2087 | __u32 autoneg; |
2076 | __u32 mode; | 2088 | __u32 mode; |
2089 | __u32 port_mode; | ||
2077 | 2090 | ||
2078 | netxen_nic_read_w0(adapter, NETXEN_NIU_MODE, &mode); | 2091 | netxen_nic_read_w0(adapter, NETXEN_NIU_MODE, &mode); |
2079 | if (netxen_get_niu_enable_ge(mode)) { /* Gb 10/100/1000 Mbps mode */ | 2092 | if (netxen_get_niu_enable_ge(mode)) { /* Gb 10/100/1000 Mbps mode */ |
2093 | |||
2094 | adapter->hw_read_wx(adapter, | ||
2095 | NETXEN_PORT_MODE_ADDR, &port_mode, 4); | ||
2096 | if (port_mode == NETXEN_PORT_MODE_802_3_AP) { | ||
2097 | adapter->link_speed = SPEED_1000; | ||
2098 | adapter->link_duplex = DUPLEX_FULL; | ||
2099 | adapter->link_autoneg = AUTONEG_DISABLE; | ||
2100 | return; | ||
2101 | } | ||
2102 | |||
2080 | if (adapter->phy_read | 2103 | if (adapter->phy_read |
2081 | && adapter-> | 2104 | && adapter->phy_read(adapter, |
2082 | phy_read(adapter, | ||
2083 | NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS, | 2105 | NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS, |
2084 | &status) == 0) { | 2106 | &status) == 0) { |
2085 | if (netxen_get_phy_link(status)) { | 2107 | if (netxen_get_phy_link(status)) { |
@@ -2109,8 +2131,7 @@ void netxen_nic_set_link_parameters(struct netxen_adapter *adapter) | |||
2109 | break; | 2131 | break; |
2110 | } | 2132 | } |
2111 | if (adapter->phy_read | 2133 | if (adapter->phy_read |
2112 | && adapter-> | 2134 | && adapter->phy_read(adapter, |
2113 | phy_read(adapter, | ||
2114 | NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG, | 2135 | NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG, |
2115 | &autoneg) != 0) | 2136 | &autoneg) != 0) |
2116 | adapter->link_autoneg = autoneg; | 2137 | adapter->link_autoneg = autoneg; |
diff --git a/drivers/net/netxen/netxen_nic_hw.h b/drivers/net/netxen/netxen_nic_hw.h index b8e0030f03d7..aae737dc77a8 100644 --- a/drivers/net/netxen/netxen_nic_hw.h +++ b/drivers/net/netxen/netxen_nic_hw.h | |||
@@ -419,12 +419,9 @@ typedef enum { | |||
419 | #define netxen_get_niu_enable_ge(config_word) \ | 419 | #define netxen_get_niu_enable_ge(config_word) \ |
420 | _netxen_crb_get_bit(config_word, 1) | 420 | _netxen_crb_get_bit(config_word, 1) |
421 | 421 | ||
422 | /* Promiscous mode options (GbE mode only) */ | 422 | #define NETXEN_NIU_NON_PROMISC_MODE 0 |
423 | typedef enum { | 423 | #define NETXEN_NIU_PROMISC_MODE 1 |
424 | NETXEN_NIU_PROMISC_MODE = 0, | 424 | #define NETXEN_NIU_ALLMULTI_MODE 2 |
425 | NETXEN_NIU_NON_PROMISC_MODE, | ||
426 | NETXEN_NIU_ALLMULTI_MODE | ||
427 | } netxen_niu_prom_mode_t; | ||
428 | 425 | ||
429 | /* | 426 | /* |
430 | * NIU GB Drop CRC Register | 427 | * NIU GB Drop CRC Register |
@@ -471,9 +468,9 @@ typedef enum { | |||
471 | 468 | ||
472 | /* Set promiscuous mode for a GbE interface */ | 469 | /* Set promiscuous mode for a GbE interface */ |
473 | int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter, | 470 | int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter, |
474 | netxen_niu_prom_mode_t mode); | 471 | u32 mode); |
475 | int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter, | 472 | int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter, |
476 | netxen_niu_prom_mode_t mode); | 473 | u32 mode); |
477 | 474 | ||
478 | /* set the MAC address for a given MAC */ | 475 | /* set the MAC address for a given MAC */ |
479 | int netxen_niu_macaddr_set(struct netxen_adapter *adapter, | 476 | int netxen_niu_macaddr_set(struct netxen_adapter *adapter, |
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index 01ab31b34a85..519fc860e17e 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c | |||
@@ -364,6 +364,11 @@ void netxen_initialize_adapter_ops(struct netxen_adapter *adapter) | |||
364 | default: | 364 | default: |
365 | break; | 365 | break; |
366 | } | 366 | } |
367 | |||
368 | if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { | ||
369 | adapter->set_mtu = nx_fw_cmd_set_mtu; | ||
370 | adapter->set_promisc = netxen_p3_nic_set_promisc; | ||
371 | } | ||
367 | } | 372 | } |
368 | 373 | ||
369 | /* | 374 | /* |
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 91d209a8f6cb..7615c715e66e 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c | |||
@@ -166,7 +166,8 @@ static void netxen_nic_disable_int(struct netxen_adapter *adapter) | |||
166 | if (!NETXEN_IS_MSI_FAMILY(adapter)) { | 166 | if (!NETXEN_IS_MSI_FAMILY(adapter)) { |
167 | do { | 167 | do { |
168 | adapter->pci_write_immediate(adapter, | 168 | adapter->pci_write_immediate(adapter, |
169 | ISR_INT_TARGET_STATUS, 0xffffffff); | 169 | adapter->legacy_intr.tgt_status_reg, |
170 | 0xffffffff); | ||
170 | mask = adapter->pci_read_immediate(adapter, | 171 | mask = adapter->pci_read_immediate(adapter, |
171 | ISR_INT_VECTOR); | 172 | ISR_INT_VECTOR); |
172 | if (!(mask & 0x80)) | 173 | if (!(mask & 0x80)) |
@@ -175,7 +176,7 @@ static void netxen_nic_disable_int(struct netxen_adapter *adapter) | |||
175 | } while (--retries); | 176 | } while (--retries); |
176 | 177 | ||
177 | if (!retries) { | 178 | if (!retries) { |
178 | printk(KERN_NOTICE "%s: Failed to disable interrupt completely\n", | 179 | printk(KERN_NOTICE "%s: Failed to disable interrupt\n", |
179 | netxen_nic_driver_name); | 180 | netxen_nic_driver_name); |
180 | } | 181 | } |
181 | } else { | 182 | } else { |
@@ -190,8 +191,6 @@ static void netxen_nic_enable_int(struct netxen_adapter *adapter) | |||
190 | { | 191 | { |
191 | u32 mask; | 192 | u32 mask; |
192 | 193 | ||
193 | DPRINTK(1, INFO, "Entered ISR Enable \n"); | ||
194 | |||
195 | if (adapter->intr_scheme != -1 && | 194 | if (adapter->intr_scheme != -1 && |
196 | adapter->intr_scheme != INTR_SCHEME_PERPORT) { | 195 | adapter->intr_scheme != INTR_SCHEME_PERPORT) { |
197 | switch (adapter->ahw.board_type) { | 196 | switch (adapter->ahw.board_type) { |
@@ -213,16 +212,13 @@ static void netxen_nic_enable_int(struct netxen_adapter *adapter) | |||
213 | 212 | ||
214 | if (!NETXEN_IS_MSI_FAMILY(adapter)) { | 213 | if (!NETXEN_IS_MSI_FAMILY(adapter)) { |
215 | mask = 0xbff; | 214 | mask = 0xbff; |
216 | if (adapter->intr_scheme != -1 && | 215 | if (adapter->intr_scheme == INTR_SCHEME_PERPORT) |
217 | adapter->intr_scheme != INTR_SCHEME_PERPORT) { | 216 | adapter->pci_write_immediate(adapter, |
217 | adapter->legacy_intr.tgt_mask_reg, mask); | ||
218 | else | ||
218 | adapter->pci_write_normalize(adapter, | 219 | adapter->pci_write_normalize(adapter, |
219 | CRB_INT_VECTOR, 0); | 220 | CRB_INT_VECTOR, 0); |
220 | } | ||
221 | adapter->pci_write_immediate(adapter, | ||
222 | ISR_INT_TARGET_MASK, mask); | ||
223 | } | 221 | } |
224 | |||
225 | DPRINTK(1, INFO, "Done with enable Int\n"); | ||
226 | } | 222 | } |
227 | 223 | ||
228 | static int nx_set_dma_mask(struct netxen_adapter *adapter, uint8_t revision_id) | 224 | static int nx_set_dma_mask(struct netxen_adapter *adapter, uint8_t revision_id) |
@@ -284,6 +280,8 @@ static void netxen_check_options(struct netxen_adapter *adapter) | |||
284 | case NETXEN_BRDTYPE_P3_10G_CX4_LP: | 280 | case NETXEN_BRDTYPE_P3_10G_CX4_LP: |
285 | case NETXEN_BRDTYPE_P3_IMEZ: | 281 | case NETXEN_BRDTYPE_P3_IMEZ: |
286 | case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: | 282 | case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: |
283 | case NETXEN_BRDTYPE_P3_10G_SFP_QT: | ||
284 | case NETXEN_BRDTYPE_P3_10G_SFP_CT: | ||
287 | case NETXEN_BRDTYPE_P3_10G_XFP: | 285 | case NETXEN_BRDTYPE_P3_10G_XFP: |
288 | case NETXEN_BRDTYPE_P3_10000_BASE_T: | 286 | case NETXEN_BRDTYPE_P3_10000_BASE_T: |
289 | adapter->msix_supported = !!use_msi_x; | 287 | adapter->msix_supported = !!use_msi_x; |
@@ -301,6 +299,10 @@ static void netxen_check_options(struct netxen_adapter *adapter) | |||
301 | case NETXEN_BRDTYPE_P3_REF_QG: | 299 | case NETXEN_BRDTYPE_P3_REF_QG: |
302 | case NETXEN_BRDTYPE_P3_4_GB: | 300 | case NETXEN_BRDTYPE_P3_4_GB: |
303 | case NETXEN_BRDTYPE_P3_4_GB_MM: | 301 | case NETXEN_BRDTYPE_P3_4_GB_MM: |
302 | adapter->msix_supported = 0; | ||
303 | adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_10G; | ||
304 | break; | ||
305 | |||
304 | case NETXEN_BRDTYPE_P2_SB35_4G: | 306 | case NETXEN_BRDTYPE_P2_SB35_4G: |
305 | case NETXEN_BRDTYPE_P2_SB31_2G: | 307 | case NETXEN_BRDTYPE_P2_SB31_2G: |
306 | adapter->msix_supported = 0; | 308 | adapter->msix_supported = 0; |
@@ -700,13 +702,10 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
700 | adapter->status &= ~NETXEN_NETDEV_STATUS; | 702 | adapter->status &= ~NETXEN_NETDEV_STATUS; |
701 | adapter->rx_csum = 1; | 703 | adapter->rx_csum = 1; |
702 | adapter->mc_enabled = 0; | 704 | adapter->mc_enabled = 0; |
703 | if (NX_IS_REVISION_P3(revision_id)) { | 705 | if (NX_IS_REVISION_P3(revision_id)) |
704 | adapter->max_mc_count = 38; | 706 | adapter->max_mc_count = 38; |
705 | adapter->max_rds_rings = 2; | 707 | else |
706 | } else { | ||
707 | adapter->max_mc_count = 16; | 708 | adapter->max_mc_count = 16; |
708 | adapter->max_rds_rings = 3; | ||
709 | } | ||
710 | 709 | ||
711 | netdev->open = netxen_nic_open; | 710 | netdev->open = netxen_nic_open; |
712 | netdev->stop = netxen_nic_close; | 711 | netdev->stop = netxen_nic_close; |
@@ -779,10 +778,6 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
779 | if (adapter->portnum == 0) | 778 | if (adapter->portnum == 0) |
780 | first_driver = 1; | 779 | first_driver = 1; |
781 | } | 780 | } |
782 | adapter->crb_addr_cmd_producer = crb_cmd_producer[adapter->portnum]; | ||
783 | adapter->crb_addr_cmd_consumer = crb_cmd_consumer[adapter->portnum]; | ||
784 | netxen_nic_update_cmd_producer(adapter, 0); | ||
785 | netxen_nic_update_cmd_consumer(adapter, 0); | ||
786 | 781 | ||
787 | if (first_driver) { | 782 | if (first_driver) { |
788 | first_boot = adapter->pci_read_normalize(adapter, | 783 | first_boot = adapter->pci_read_normalize(adapter, |
@@ -1053,6 +1048,11 @@ static int netxen_nic_open(struct net_device *netdev) | |||
1053 | return -EIO; | 1048 | return -EIO; |
1054 | } | 1049 | } |
1055 | 1050 | ||
1051 | if (adapter->fw_major < 4) | ||
1052 | adapter->max_rds_rings = 3; | ||
1053 | else | ||
1054 | adapter->max_rds_rings = 2; | ||
1055 | |||
1056 | err = netxen_alloc_sw_resources(adapter); | 1056 | err = netxen_alloc_sw_resources(adapter); |
1057 | if (err) { | 1057 | if (err) { |
1058 | printk(KERN_ERR "%s: Error in setting sw resources\n", | 1058 | printk(KERN_ERR "%s: Error in setting sw resources\n", |
@@ -1074,10 +1074,10 @@ static int netxen_nic_open(struct net_device *netdev) | |||
1074 | crb_cmd_producer[adapter->portnum]; | 1074 | crb_cmd_producer[adapter->portnum]; |
1075 | adapter->crb_addr_cmd_consumer = | 1075 | adapter->crb_addr_cmd_consumer = |
1076 | crb_cmd_consumer[adapter->portnum]; | 1076 | crb_cmd_consumer[adapter->portnum]; |
1077 | } | ||
1078 | 1077 | ||
1079 | netxen_nic_update_cmd_producer(adapter, 0); | 1078 | netxen_nic_update_cmd_producer(adapter, 0); |
1080 | netxen_nic_update_cmd_consumer(adapter, 0); | 1079 | netxen_nic_update_cmd_consumer(adapter, 0); |
1080 | } | ||
1081 | 1081 | ||
1082 | for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) { | 1082 | for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) { |
1083 | for (ring = 0; ring < adapter->max_rds_rings; ring++) | 1083 | for (ring = 0; ring < adapter->max_rds_rings; ring++) |
@@ -1113,9 +1113,7 @@ static int netxen_nic_open(struct net_device *netdev) | |||
1113 | netxen_nic_set_link_parameters(adapter); | 1113 | netxen_nic_set_link_parameters(adapter); |
1114 | 1114 | ||
1115 | netdev->set_multicast_list(netdev); | 1115 | netdev->set_multicast_list(netdev); |
1116 | if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) | 1116 | if (adapter->set_mtu) |
1117 | nx_fw_cmd_set_mtu(adapter, netdev->mtu); | ||
1118 | else | ||
1119 | adapter->set_mtu(adapter, netdev->mtu); | 1117 | adapter->set_mtu(adapter, netdev->mtu); |
1120 | 1118 | ||
1121 | mod_timer(&adapter->watchdog_timer, jiffies); | 1119 | mod_timer(&adapter->watchdog_timer, jiffies); |
@@ -1410,20 +1408,17 @@ static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter) | |||
1410 | 1408 | ||
1411 | port = adapter->physical_port; | 1409 | port = adapter->physical_port; |
1412 | 1410 | ||
1413 | if (adapter->ahw.board_type == NETXEN_NIC_GBE) { | 1411 | if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { |
1414 | val = adapter->pci_read_normalize(adapter, CRB_XG_STATE); | 1412 | val = adapter->pci_read_normalize(adapter, CRB_XG_STATE_P3); |
1415 | linkup = (val >> port) & 1; | 1413 | val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val); |
1414 | linkup = (val == XG_LINK_UP_P3); | ||
1416 | } else { | 1415 | } else { |
1417 | if (adapter->fw_major < 4) { | 1416 | val = adapter->pci_read_normalize(adapter, CRB_XG_STATE); |
1418 | val = adapter->pci_read_normalize(adapter, | 1417 | if (adapter->ahw.board_type == NETXEN_NIC_GBE) |
1419 | CRB_XG_STATE); | 1418 | linkup = (val >> port) & 1; |
1419 | else { | ||
1420 | val = (val >> port*8) & 0xff; | 1420 | val = (val >> port*8) & 0xff; |
1421 | linkup = (val == XG_LINK_UP); | 1421 | linkup = (val == XG_LINK_UP); |
1422 | } else { | ||
1423 | val = adapter->pci_read_normalize(adapter, | ||
1424 | CRB_XG_STATE_P3); | ||
1425 | val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val); | ||
1426 | linkup = (val == XG_LINK_UP_P3); | ||
1427 | } | 1422 | } |
1428 | } | 1423 | } |
1429 | 1424 | ||
@@ -1535,15 +1530,33 @@ static irqreturn_t netxen_intr(int irq, void *data) | |||
1535 | struct netxen_adapter *adapter = data; | 1530 | struct netxen_adapter *adapter = data; |
1536 | u32 our_int = 0; | 1531 | u32 our_int = 0; |
1537 | 1532 | ||
1538 | our_int = adapter->pci_read_normalize(adapter, CRB_INT_VECTOR); | 1533 | u32 status = 0; |
1539 | /* not our interrupt */ | 1534 | |
1540 | if ((our_int & (0x80 << adapter->portnum)) == 0) | 1535 | status = adapter->pci_read_immediate(adapter, ISR_INT_VECTOR); |
1536 | |||
1537 | if (!(status & adapter->legacy_intr.int_vec_bit)) | ||
1541 | return IRQ_NONE; | 1538 | return IRQ_NONE; |
1542 | 1539 | ||
1543 | if (adapter->intr_scheme == INTR_SCHEME_PERPORT) { | 1540 | if (adapter->ahw.revision_id >= NX_P3_B1) { |
1544 | /* claim interrupt */ | 1541 | /* check interrupt state machine, to be sure */ |
1545 | adapter->pci_write_normalize(adapter, CRB_INT_VECTOR, | 1542 | status = adapter->pci_read_immediate(adapter, |
1543 | ISR_INT_STATE_REG); | ||
1544 | if (!ISR_LEGACY_INT_TRIGGERED(status)) | ||
1545 | return IRQ_NONE; | ||
1546 | |||
1547 | } else if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { | ||
1548 | |||
1549 | our_int = adapter->pci_read_normalize(adapter, CRB_INT_VECTOR); | ||
1550 | /* not our interrupt */ | ||
1551 | if ((our_int & (0x80 << adapter->portnum)) == 0) | ||
1552 | return IRQ_NONE; | ||
1553 | |||
1554 | if (adapter->intr_scheme == INTR_SCHEME_PERPORT) { | ||
1555 | /* claim interrupt */ | ||
1556 | adapter->pci_write_normalize(adapter, | ||
1557 | CRB_INT_VECTOR, | ||
1546 | our_int & ~((u32)(0x80 << adapter->portnum))); | 1558 | our_int & ~((u32)(0x80 << adapter->portnum))); |
1559 | } | ||
1547 | } | 1560 | } |
1548 | 1561 | ||
1549 | netxen_handle_int(adapter); | 1562 | netxen_handle_int(adapter); |
diff --git a/drivers/net/netxen/netxen_nic_niu.c b/drivers/net/netxen/netxen_nic_niu.c index 4cb8f4a1cf4b..27f07f6a45b1 100644 --- a/drivers/net/netxen/netxen_nic_niu.c +++ b/drivers/net/netxen/netxen_nic_niu.c | |||
@@ -610,6 +610,9 @@ int netxen_niu_macaddr_set(struct netxen_adapter *adapter, | |||
610 | int i; | 610 | int i; |
611 | DECLARE_MAC_BUF(mac); | 611 | DECLARE_MAC_BUF(mac); |
612 | 612 | ||
613 | if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) | ||
614 | return 0; | ||
615 | |||
613 | for (i = 0; i < 10; i++) { | 616 | for (i = 0; i < 10; i++) { |
614 | temp[0] = temp[1] = 0; | 617 | temp[0] = temp[1] = 0; |
615 | memcpy(temp + 2, addr, 2); | 618 | memcpy(temp + 2, addr, 2); |
@@ -727,6 +730,9 @@ int netxen_niu_disable_gbe_port(struct netxen_adapter *adapter) | |||
727 | __u32 mac_cfg0; | 730 | __u32 mac_cfg0; |
728 | u32 port = adapter->physical_port; | 731 | u32 port = adapter->physical_port; |
729 | 732 | ||
733 | if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) | ||
734 | return 0; | ||
735 | |||
730 | if (port > NETXEN_NIU_MAX_GBE_PORTS) | 736 | if (port > NETXEN_NIU_MAX_GBE_PORTS) |
731 | return -EINVAL; | 737 | return -EINVAL; |
732 | mac_cfg0 = 0; | 738 | mac_cfg0 = 0; |
@@ -743,6 +749,9 @@ int netxen_niu_disable_xg_port(struct netxen_adapter *adapter) | |||
743 | __u32 mac_cfg; | 749 | __u32 mac_cfg; |
744 | u32 port = adapter->physical_port; | 750 | u32 port = adapter->physical_port; |
745 | 751 | ||
752 | if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) | ||
753 | return 0; | ||
754 | |||
746 | if (port > NETXEN_NIU_MAX_XG_PORTS) | 755 | if (port > NETXEN_NIU_MAX_XG_PORTS) |
747 | return -EINVAL; | 756 | return -EINVAL; |
748 | 757 | ||
@@ -755,7 +764,7 @@ int netxen_niu_disable_xg_port(struct netxen_adapter *adapter) | |||
755 | 764 | ||
756 | /* Set promiscuous mode for a GbE interface */ | 765 | /* Set promiscuous mode for a GbE interface */ |
757 | int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter, | 766 | int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter, |
758 | netxen_niu_prom_mode_t mode) | 767 | u32 mode) |
759 | { | 768 | { |
760 | __u32 reg; | 769 | __u32 reg; |
761 | u32 port = adapter->physical_port; | 770 | u32 port = adapter->physical_port; |
@@ -819,6 +828,9 @@ int netxen_niu_xg_macaddr_set(struct netxen_adapter *adapter, | |||
819 | u8 temp[4]; | 828 | u8 temp[4]; |
820 | u32 val; | 829 | u32 val; |
821 | 830 | ||
831 | if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) | ||
832 | return 0; | ||
833 | |||
822 | if ((phy < 0) || (phy > NETXEN_NIU_MAX_XG_PORTS)) | 834 | if ((phy < 0) || (phy > NETXEN_NIU_MAX_XG_PORTS)) |
823 | return -EIO; | 835 | return -EIO; |
824 | 836 | ||
@@ -894,7 +906,7 @@ int netxen_niu_xg_macaddr_get(struct netxen_adapter *adapter, | |||
894 | #endif /* 0 */ | 906 | #endif /* 0 */ |
895 | 907 | ||
896 | int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter, | 908 | int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter, |
897 | netxen_niu_prom_mode_t mode) | 909 | u32 mode) |
898 | { | 910 | { |
899 | __u32 reg; | 911 | __u32 reg; |
900 | u32 port = adapter->physical_port; | 912 | u32 port = adapter->physical_port; |
diff --git a/drivers/net/netxen/netxen_nic_phan_reg.h b/drivers/net/netxen/netxen_nic_phan_reg.h index 3bfa51b62a4f..83e5ee57bfef 100644 --- a/drivers/net/netxen/netxen_nic_phan_reg.h +++ b/drivers/net/netxen/netxen_nic_phan_reg.h | |||
@@ -95,8 +95,8 @@ | |||
95 | #define CRB_HOST_STS_PROD NETXEN_NIC_REG(0xdc) | 95 | #define CRB_HOST_STS_PROD NETXEN_NIC_REG(0xdc) |
96 | #define CRB_HOST_STS_CONS NETXEN_NIC_REG(0xe0) | 96 | #define CRB_HOST_STS_CONS NETXEN_NIC_REG(0xe0) |
97 | #define CRB_PEG_CMD_PROD NETXEN_NIC_REG(0xe4) | 97 | #define CRB_PEG_CMD_PROD NETXEN_NIC_REG(0xe4) |
98 | #define CRB_PEG_CMD_CONS NETXEN_NIC_REG(0xe8) | 98 | #define CRB_PF_LINK_SPEED_1 NETXEN_NIC_REG(0xe8) |
99 | #define CRB_HOST_BUFFER_PROD NETXEN_NIC_REG(0xec) | 99 | #define CRB_PF_LINK_SPEED_2 NETXEN_NIC_REG(0xec) |
100 | #define CRB_HOST_BUFFER_CONS NETXEN_NIC_REG(0xf0) | 100 | #define CRB_HOST_BUFFER_CONS NETXEN_NIC_REG(0xf0) |
101 | #define CRB_JUMBO_BUFFER_PROD NETXEN_NIC_REG(0xf4) | 101 | #define CRB_JUMBO_BUFFER_PROD NETXEN_NIC_REG(0xf4) |
102 | #define CRB_JUMBO_BUFFER_CONS NETXEN_NIC_REG(0xf8) | 102 | #define CRB_JUMBO_BUFFER_CONS NETXEN_NIC_REG(0xf8) |
diff --git a/drivers/net/ni5010.c b/drivers/net/ni5010.c index a20005c09e07..8e0ca9f4e404 100644 --- a/drivers/net/ni5010.c +++ b/drivers/net/ni5010.c | |||
@@ -648,7 +648,6 @@ static void ni5010_set_multicast_list(struct net_device *dev) | |||
648 | PRINTK2((KERN_DEBUG "%s: entering set_multicast_list\n", dev->name)); | 648 | PRINTK2((KERN_DEBUG "%s: entering set_multicast_list\n", dev->name)); |
649 | 649 | ||
650 | if (dev->flags&IFF_PROMISC || dev->flags&IFF_ALLMULTI || dev->mc_list) { | 650 | if (dev->flags&IFF_PROMISC || dev->flags&IFF_ALLMULTI || dev->mc_list) { |
651 | dev->flags |= IFF_PROMISC; | ||
652 | outb(RMD_PROMISC, EDLC_RMODE); /* Enable promiscuous mode */ | 651 | outb(RMD_PROMISC, EDLC_RMODE); /* Enable promiscuous mode */ |
653 | PRINTK((KERN_DEBUG "%s: Entering promiscuous mode\n", dev->name)); | 652 | PRINTK((KERN_DEBUG "%s: Entering promiscuous mode\n", dev->name)); |
654 | } else { | 653 | } else { |
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c index a316dcc8a06d..b9a882d362da 100644 --- a/drivers/net/ni52.c +++ b/drivers/net/ni52.c | |||
@@ -621,7 +621,7 @@ static int init586(struct net_device *dev) | |||
621 | if (num_addrs > len) { | 621 | if (num_addrs > len) { |
622 | printk(KERN_ERR "%s: switching to promisc. mode\n", | 622 | printk(KERN_ERR "%s: switching to promisc. mode\n", |
623 | dev->name); | 623 | dev->name); |
624 | dev->flags |= IFF_PROMISC; | 624 | writeb(0x01, &cfg_cmd->promisc); |
625 | } | 625 | } |
626 | } | 626 | } |
627 | if (dev->flags & IFF_PROMISC) | 627 | if (dev->flags & IFF_PROMISC) |
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c index e82b37bbd6c3..3cdd07c45b6d 100644 --- a/drivers/net/qla3xxx.c +++ b/drivers/net/qla3xxx.c | |||
@@ -38,7 +38,7 @@ | |||
38 | 38 | ||
39 | #define DRV_NAME "qla3xxx" | 39 | #define DRV_NAME "qla3xxx" |
40 | #define DRV_STRING "QLogic ISP3XXX Network Driver" | 40 | #define DRV_STRING "QLogic ISP3XXX Network Driver" |
41 | #define DRV_VERSION "v2.03.00-k4" | 41 | #define DRV_VERSION "v2.03.00-k5" |
42 | #define PFX DRV_NAME " " | 42 | #define PFX DRV_NAME " " |
43 | 43 | ||
44 | static const char ql3xxx_driver_name[] = DRV_NAME; | 44 | static const char ql3xxx_driver_name[] = DRV_NAME; |
@@ -3495,8 +3495,6 @@ static void ql_set_mac_info(struct ql3_adapter *qdev) | |||
3495 | case ISP_CONTROL_FN0_NET: | 3495 | case ISP_CONTROL_FN0_NET: |
3496 | qdev->mac_index = 0; | 3496 | qdev->mac_index = 0; |
3497 | qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; | 3497 | qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; |
3498 | qdev->tcp_ob_opcode = OUTBOUND_TCP_IOCB | func_number; | ||
3499 | qdev->update_ob_opcode = UPDATE_NCB_IOCB | func_number; | ||
3500 | qdev->mb_bit_mask = FN0_MA_BITS_MASK; | 3498 | qdev->mb_bit_mask = FN0_MA_BITS_MASK; |
3501 | qdev->PHYAddr = PORT0_PHY_ADDRESS; | 3499 | qdev->PHYAddr = PORT0_PHY_ADDRESS; |
3502 | if (port_status & PORT_STATUS_SM0) | 3500 | if (port_status & PORT_STATUS_SM0) |
@@ -3508,8 +3506,6 @@ static void ql_set_mac_info(struct ql3_adapter *qdev) | |||
3508 | case ISP_CONTROL_FN1_NET: | 3506 | case ISP_CONTROL_FN1_NET: |
3509 | qdev->mac_index = 1; | 3507 | qdev->mac_index = 1; |
3510 | qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; | 3508 | qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; |
3511 | qdev->tcp_ob_opcode = OUTBOUND_TCP_IOCB | func_number; | ||
3512 | qdev->update_ob_opcode = UPDATE_NCB_IOCB | func_number; | ||
3513 | qdev->mb_bit_mask = FN1_MA_BITS_MASK; | 3509 | qdev->mb_bit_mask = FN1_MA_BITS_MASK; |
3514 | qdev->PHYAddr = PORT1_PHY_ADDRESS; | 3510 | qdev->PHYAddr = PORT1_PHY_ADDRESS; |
3515 | if (port_status & PORT_STATUS_SM1) | 3511 | if (port_status & PORT_STATUS_SM1) |
@@ -3730,14 +3726,6 @@ static int ql3xxx_open(struct net_device *ndev) | |||
3730 | return (ql_adapter_up(qdev)); | 3726 | return (ql_adapter_up(qdev)); |
3731 | } | 3727 | } |
3732 | 3728 | ||
3733 | static void ql3xxx_set_multicast_list(struct net_device *ndev) | ||
3734 | { | ||
3735 | /* | ||
3736 | * We are manually parsing the list in the net_device structure. | ||
3737 | */ | ||
3738 | return; | ||
3739 | } | ||
3740 | |||
3741 | static int ql3xxx_set_mac_address(struct net_device *ndev, void *p) | 3729 | static int ql3xxx_set_mac_address(struct net_device *ndev, void *p) |
3742 | { | 3730 | { |
3743 | struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); | 3731 | struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); |
@@ -4007,7 +3995,11 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev, | |||
4007 | ndev->open = ql3xxx_open; | 3995 | ndev->open = ql3xxx_open; |
4008 | ndev->hard_start_xmit = ql3xxx_send; | 3996 | ndev->hard_start_xmit = ql3xxx_send; |
4009 | ndev->stop = ql3xxx_close; | 3997 | ndev->stop = ql3xxx_close; |
4010 | ndev->set_multicast_list = ql3xxx_set_multicast_list; | 3998 | /* ndev->set_multicast_list |
3999 | * This device is one side of a two-function adapter | ||
4000 | * (NIC and iSCSI). Promiscuous mode setting/clearing is | ||
4001 | * not allowed from the NIC side. | ||
4002 | */ | ||
4011 | SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops); | 4003 | SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops); |
4012 | ndev->set_mac_address = ql3xxx_set_mac_address; | 4004 | ndev->set_mac_address = ql3xxx_set_mac_address; |
4013 | ndev->tx_timeout = ql3xxx_tx_timeout; | 4005 | ndev->tx_timeout = ql3xxx_tx_timeout; |
@@ -4040,9 +4032,6 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev, | |||
4040 | 4032 | ||
4041 | ndev->tx_queue_len = NUM_REQ_Q_ENTRIES; | 4033 | ndev->tx_queue_len = NUM_REQ_Q_ENTRIES; |
4042 | 4034 | ||
4043 | /* Turn off support for multicasting */ | ||
4044 | ndev->flags &= ~IFF_MULTICAST; | ||
4045 | |||
4046 | /* Record PCI bus information. */ | 4035 | /* Record PCI bus information. */ |
4047 | ql_get_board_info(qdev); | 4036 | ql_get_board_info(qdev); |
4048 | 4037 | ||
diff --git a/drivers/net/qla3xxx.h b/drivers/net/qla3xxx.h index 58a086fddec6..7113e71b15a1 100644 --- a/drivers/net/qla3xxx.h +++ b/drivers/net/qla3xxx.h | |||
@@ -14,24 +14,14 @@ | |||
14 | 14 | ||
15 | #define OPCODE_OB_MAC_IOCB_FN0 0x01 | 15 | #define OPCODE_OB_MAC_IOCB_FN0 0x01 |
16 | #define OPCODE_OB_MAC_IOCB_FN2 0x21 | 16 | #define OPCODE_OB_MAC_IOCB_FN2 0x21 |
17 | #define OPCODE_OB_TCP_IOCB_FN0 0x03 | ||
18 | #define OPCODE_OB_TCP_IOCB_FN2 0x23 | ||
19 | #define OPCODE_UPDATE_NCB_IOCB_FN0 0x00 | ||
20 | #define OPCODE_UPDATE_NCB_IOCB_FN2 0x20 | ||
21 | 17 | ||
22 | #define OPCODE_UPDATE_NCB_IOCB 0xF0 | ||
23 | #define OPCODE_IB_MAC_IOCB 0xF9 | 18 | #define OPCODE_IB_MAC_IOCB 0xF9 |
24 | #define OPCODE_IB_3032_MAC_IOCB 0x09 | 19 | #define OPCODE_IB_3032_MAC_IOCB 0x09 |
25 | #define OPCODE_IB_IP_IOCB 0xFA | 20 | #define OPCODE_IB_IP_IOCB 0xFA |
26 | #define OPCODE_IB_3032_IP_IOCB 0x0A | 21 | #define OPCODE_IB_3032_IP_IOCB 0x0A |
27 | #define OPCODE_IB_TCP_IOCB 0xFB | ||
28 | #define OPCODE_DUMP_PROTO_IOCB 0xFE | ||
29 | #define OPCODE_BUFFER_ALERT_IOCB 0xFB | ||
30 | 22 | ||
31 | #define OPCODE_FUNC_ID_MASK 0x30 | 23 | #define OPCODE_FUNC_ID_MASK 0x30 |
32 | #define OUTBOUND_MAC_IOCB 0x01 /* plus function bits */ | 24 | #define OUTBOUND_MAC_IOCB 0x01 /* plus function bits */ |
33 | #define OUTBOUND_TCP_IOCB 0x03 /* plus function bits */ | ||
34 | #define UPDATE_NCB_IOCB 0x00 /* plus function bits */ | ||
35 | 25 | ||
36 | #define FN0_MA_BITS_MASK 0x00 | 26 | #define FN0_MA_BITS_MASK 0x00 |
37 | #define FN1_MA_BITS_MASK 0x80 | 27 | #define FN1_MA_BITS_MASK 0x80 |
@@ -159,75 +149,6 @@ struct ob_ip_iocb_rsp { | |||
159 | __le32 reserved2; | 149 | __le32 reserved2; |
160 | }; | 150 | }; |
161 | 151 | ||
162 | struct ob_tcp_iocb_req { | ||
163 | u8 opcode; | ||
164 | |||
165 | u8 flags0; | ||
166 | #define OB_TCP_IOCB_REQ_P 0x80 | ||
167 | #define OB_TCP_IOCB_REQ_CI 0x20 | ||
168 | #define OB_TCP_IOCB_REQ_H 0x10 | ||
169 | #define OB_TCP_IOCB_REQ_LN 0x08 | ||
170 | #define OB_TCP_IOCB_REQ_K 0x04 | ||
171 | #define OB_TCP_IOCB_REQ_D 0x02 | ||
172 | #define OB_TCP_IOCB_REQ_I 0x01 | ||
173 | |||
174 | u8 flags1; | ||
175 | #define OB_TCP_IOCB_REQ_OSM 0x40 | ||
176 | #define OB_TCP_IOCB_REQ_URG 0x20 | ||
177 | #define OB_TCP_IOCB_REQ_ACK 0x10 | ||
178 | #define OB_TCP_IOCB_REQ_PSH 0x08 | ||
179 | #define OB_TCP_IOCB_REQ_RST 0x04 | ||
180 | #define OB_TCP_IOCB_REQ_SYN 0x02 | ||
181 | #define OB_TCP_IOCB_REQ_FIN 0x01 | ||
182 | |||
183 | u8 options_len; | ||
184 | #define OB_TCP_IOCB_REQ_OMASK 0xF0 | ||
185 | #define OB_TCP_IOCB_REQ_SHIFT 4 | ||
186 | |||
187 | __le32 transaction_id; | ||
188 | __le32 data_len; | ||
189 | __le32 hncb_ptr_low; | ||
190 | __le32 hncb_ptr_high; | ||
191 | __le32 buf_addr0_low; | ||
192 | __le32 buf_addr0_high; | ||
193 | __le32 buf_0_len; | ||
194 | __le32 buf_addr1_low; | ||
195 | __le32 buf_addr1_high; | ||
196 | __le32 buf_1_len; | ||
197 | __le32 buf_addr2_low; | ||
198 | __le32 buf_addr2_high; | ||
199 | __le32 buf_2_len; | ||
200 | __le32 time_stamp; | ||
201 | __le32 reserved1; | ||
202 | }; | ||
203 | |||
204 | struct ob_tcp_iocb_rsp { | ||
205 | u8 opcode; | ||
206 | |||
207 | u8 flags0; | ||
208 | #define OB_TCP_IOCB_RSP_C 0x20 | ||
209 | #define OB_TCP_IOCB_RSP_H 0x10 | ||
210 | #define OB_TCP_IOCB_RSP_LN 0x08 | ||
211 | #define OB_TCP_IOCB_RSP_K 0x04 | ||
212 | #define OB_TCP_IOCB_RSP_D 0x02 | ||
213 | #define OB_TCP_IOCB_RSP_I 0x01 | ||
214 | |||
215 | u8 flags1; | ||
216 | #define OB_TCP_IOCB_RSP_E 0x10 | ||
217 | #define OB_TCP_IOCB_RSP_W 0x08 | ||
218 | #define OB_TCP_IOCB_RSP_P 0x04 | ||
219 | #define OB_TCP_IOCB_RSP_T 0x02 | ||
220 | #define OB_TCP_IOCB_RSP_F 0x01 | ||
221 | |||
222 | u8 state; | ||
223 | #define OB_TCP_IOCB_RSP_SMASK 0xF0 | ||
224 | #define OB_TCP_IOCB_RSP_SHIFT 4 | ||
225 | |||
226 | __le32 transaction_id; | ||
227 | __le32 local_ncb_ptr; | ||
228 | __le32 reserved0; | ||
229 | }; | ||
230 | |||
231 | struct ib_ip_iocb_rsp { | 152 | struct ib_ip_iocb_rsp { |
232 | u8 opcode; | 153 | u8 opcode; |
233 | #define IB_IP_IOCB_RSP_3032_V 0x80 | 154 | #define IB_IP_IOCB_RSP_3032_V 0x80 |
@@ -256,25 +177,6 @@ struct ib_ip_iocb_rsp { | |||
256 | __le32 ial_high; | 177 | __le32 ial_high; |
257 | }; | 178 | }; |
258 | 179 | ||
259 | struct ib_tcp_iocb_rsp { | ||
260 | u8 opcode; | ||
261 | u8 flags; | ||
262 | #define IB_TCP_IOCB_RSP_P 0x80 | ||
263 | #define IB_TCP_IOCB_RSP_T 0x40 | ||
264 | #define IB_TCP_IOCB_RSP_D 0x20 | ||
265 | #define IB_TCP_IOCB_RSP_N 0x10 | ||
266 | #define IB_TCP_IOCB_RSP_IP 0x03 | ||
267 | #define IB_TCP_FLAG_MASK 0xf0 | ||
268 | #define IB_TCP_FLAG_IOCB_SYN 0x00 | ||
269 | |||
270 | #define TCP_IB_RSP_FLAGS(x) (x->flags & ~IB_TCP_FLAG_MASK) | ||
271 | |||
272 | __le16 length; | ||
273 | __le32 hncb_ref_num; | ||
274 | __le32 ial_low; | ||
275 | __le32 ial_high; | ||
276 | }; | ||
277 | |||
278 | struct net_rsp_iocb { | 180 | struct net_rsp_iocb { |
279 | u8 opcode; | 181 | u8 opcode; |
280 | u8 flags; | 182 | u8 flags; |
@@ -1266,20 +1168,13 @@ struct ql3_adapter { | |||
1266 | u32 small_buf_release_cnt; | 1168 | u32 small_buf_release_cnt; |
1267 | u32 small_buf_total_size; | 1169 | u32 small_buf_total_size; |
1268 | 1170 | ||
1269 | /* ISR related, saves status for DPC. */ | ||
1270 | u32 control_status; | ||
1271 | |||
1272 | struct eeprom_data nvram_data; | 1171 | struct eeprom_data nvram_data; |
1273 | struct timer_list ioctl_timer; | ||
1274 | u32 port_link_state; | 1172 | u32 port_link_state; |
1275 | u32 last_rsp_offset; | ||
1276 | 1173 | ||
1277 | /* 4022 specific */ | 1174 | /* 4022 specific */ |
1278 | u32 mac_index; /* Driver's MAC number can be 0 or 1 for first and second networking functions respectively */ | 1175 | u32 mac_index; /* Driver's MAC number can be 0 or 1 for first and second networking functions respectively */ |
1279 | u32 PHYAddr; /* Address of PHY 0x1e00 Port 0 and 0x1f00 Port 1 */ | 1176 | u32 PHYAddr; /* Address of PHY 0x1e00 Port 0 and 0x1f00 Port 1 */ |
1280 | u32 mac_ob_opcode; /* Opcode to use on mac transmission */ | 1177 | u32 mac_ob_opcode; /* Opcode to use on mac transmission */ |
1281 | u32 tcp_ob_opcode; /* Opcode to use on tcp transmission */ | ||
1282 | u32 update_ob_opcode; /* Opcode to use for updating NCB */ | ||
1283 | u32 mb_bit_mask; /* MA Bits mask to use on transmission */ | 1178 | u32 mb_bit_mask; /* MA Bits mask to use on transmission */ |
1284 | u32 numPorts; | 1179 | u32 numPorts; |
1285 | struct workqueue_struct *workqueue; | 1180 | struct workqueue_struct *workqueue; |
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c index 6a06b9503e4f..25e62cf58d3a 100644 --- a/drivers/net/sh_eth.c +++ b/drivers/net/sh_eth.c | |||
@@ -34,6 +34,29 @@ | |||
34 | 34 | ||
35 | #include "sh_eth.h" | 35 | #include "sh_eth.h" |
36 | 36 | ||
37 | /* CPU <-> EDMAC endian convert */ | ||
38 | static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x) | ||
39 | { | ||
40 | switch (mdp->edmac_endian) { | ||
41 | case EDMAC_LITTLE_ENDIAN: | ||
42 | return cpu_to_le32(x); | ||
43 | case EDMAC_BIG_ENDIAN: | ||
44 | return cpu_to_be32(x); | ||
45 | } | ||
46 | return x; | ||
47 | } | ||
48 | |||
49 | static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x) | ||
50 | { | ||
51 | switch (mdp->edmac_endian) { | ||
52 | case EDMAC_LITTLE_ENDIAN: | ||
53 | return le32_to_cpu(x); | ||
54 | case EDMAC_BIG_ENDIAN: | ||
55 | return be32_to_cpu(x); | ||
56 | } | ||
57 | return x; | ||
58 | } | ||
59 | |||
37 | /* | 60 | /* |
38 | * Program the hardware MAC address from dev->dev_addr. | 61 | * Program the hardware MAC address from dev->dev_addr. |
39 | */ | 62 | */ |
@@ -240,7 +263,7 @@ static void sh_eth_ring_format(struct net_device *ndev) | |||
240 | /* RX descriptor */ | 263 | /* RX descriptor */ |
241 | rxdesc = &mdp->rx_ring[i]; | 264 | rxdesc = &mdp->rx_ring[i]; |
242 | rxdesc->addr = (u32)skb->data & ~0x3UL; | 265 | rxdesc->addr = (u32)skb->data & ~0x3UL; |
243 | rxdesc->status = cpu_to_le32(RD_RACT | RD_RFP); | 266 | rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); |
244 | 267 | ||
245 | /* The size of the buffer is 16 byte boundary. */ | 268 | /* The size of the buffer is 16 byte boundary. */ |
246 | rxdesc->buffer_length = (mdp->rx_buf_sz + 16) & ~0x0F; | 269 | rxdesc->buffer_length = (mdp->rx_buf_sz + 16) & ~0x0F; |
@@ -262,7 +285,7 @@ static void sh_eth_ring_format(struct net_device *ndev) | |||
262 | mdp->dirty_rx = (u32) (i - RX_RING_SIZE); | 285 | mdp->dirty_rx = (u32) (i - RX_RING_SIZE); |
263 | 286 | ||
264 | /* Mark the last entry as wrapping the ring. */ | 287 | /* Mark the last entry as wrapping the ring. */ |
265 | rxdesc->status |= cpu_to_le32(RD_RDEL); | 288 | rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL); |
266 | 289 | ||
267 | memset(mdp->tx_ring, 0, tx_ringsize); | 290 | memset(mdp->tx_ring, 0, tx_ringsize); |
268 | 291 | ||
@@ -270,10 +293,10 @@ static void sh_eth_ring_format(struct net_device *ndev) | |||
270 | for (i = 0; i < TX_RING_SIZE; i++) { | 293 | for (i = 0; i < TX_RING_SIZE; i++) { |
271 | mdp->tx_skbuff[i] = NULL; | 294 | mdp->tx_skbuff[i] = NULL; |
272 | txdesc = &mdp->tx_ring[i]; | 295 | txdesc = &mdp->tx_ring[i]; |
273 | txdesc->status = cpu_to_le32(TD_TFP); | 296 | txdesc->status = cpu_to_edmac(mdp, TD_TFP); |
274 | txdesc->buffer_length = 0; | 297 | txdesc->buffer_length = 0; |
275 | if (i == 0) { | 298 | if (i == 0) { |
276 | /* Rx descriptor address set */ | 299 | /* Tx descriptor address set */ |
277 | ctrl_outl((u32)txdesc, ioaddr + TDLAR); | 300 | ctrl_outl((u32)txdesc, ioaddr + TDLAR); |
278 | #if defined(CONFIG_CPU_SUBTYPE_SH7763) | 301 | #if defined(CONFIG_CPU_SUBTYPE_SH7763) |
279 | ctrl_outl((u32)txdesc, ioaddr + TDFAR); | 302 | ctrl_outl((u32)txdesc, ioaddr + TDFAR); |
@@ -281,13 +304,13 @@ static void sh_eth_ring_format(struct net_device *ndev) | |||
281 | } | 304 | } |
282 | } | 305 | } |
283 | 306 | ||
284 | /* Rx descriptor address set */ | 307 | /* Tx descriptor address set */ |
285 | #if defined(CONFIG_CPU_SUBTYPE_SH7763) | 308 | #if defined(CONFIG_CPU_SUBTYPE_SH7763) |
286 | ctrl_outl((u32)txdesc, ioaddr + TDFXR); | 309 | ctrl_outl((u32)txdesc, ioaddr + TDFXR); |
287 | ctrl_outl(0x1, ioaddr + TDFFR); | 310 | ctrl_outl(0x1, ioaddr + TDFFR); |
288 | #endif | 311 | #endif |
289 | 312 | ||
290 | txdesc->status |= cpu_to_le32(TD_TDLE); | 313 | txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); |
291 | } | 314 | } |
292 | 315 | ||
293 | /* Get skb and descriptor buffer */ | 316 | /* Get skb and descriptor buffer */ |
@@ -455,7 +478,7 @@ static int sh_eth_txfree(struct net_device *ndev) | |||
455 | for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { | 478 | for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { |
456 | entry = mdp->dirty_tx % TX_RING_SIZE; | 479 | entry = mdp->dirty_tx % TX_RING_SIZE; |
457 | txdesc = &mdp->tx_ring[entry]; | 480 | txdesc = &mdp->tx_ring[entry]; |
458 | if (txdesc->status & cpu_to_le32(TD_TACT)) | 481 | if (txdesc->status & cpu_to_edmac(mdp, TD_TACT)) |
459 | break; | 482 | break; |
460 | /* Free the original skb. */ | 483 | /* Free the original skb. */ |
461 | if (mdp->tx_skbuff[entry]) { | 484 | if (mdp->tx_skbuff[entry]) { |
@@ -463,9 +486,9 @@ static int sh_eth_txfree(struct net_device *ndev) | |||
463 | mdp->tx_skbuff[entry] = NULL; | 486 | mdp->tx_skbuff[entry] = NULL; |
464 | freeNum++; | 487 | freeNum++; |
465 | } | 488 | } |
466 | txdesc->status = cpu_to_le32(TD_TFP); | 489 | txdesc->status = cpu_to_edmac(mdp, TD_TFP); |
467 | if (entry >= TX_RING_SIZE - 1) | 490 | if (entry >= TX_RING_SIZE - 1) |
468 | txdesc->status |= cpu_to_le32(TD_TDLE); | 491 | txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); |
469 | 492 | ||
470 | mdp->stats.tx_packets++; | 493 | mdp->stats.tx_packets++; |
471 | mdp->stats.tx_bytes += txdesc->buffer_length; | 494 | mdp->stats.tx_bytes += txdesc->buffer_length; |
@@ -486,8 +509,8 @@ static int sh_eth_rx(struct net_device *ndev) | |||
486 | u32 desc_status, reserve = 0; | 509 | u32 desc_status, reserve = 0; |
487 | 510 | ||
488 | rxdesc = &mdp->rx_ring[entry]; | 511 | rxdesc = &mdp->rx_ring[entry]; |
489 | while (!(rxdesc->status & cpu_to_le32(RD_RACT))) { | 512 | while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) { |
490 | desc_status = le32_to_cpu(rxdesc->status); | 513 | desc_status = edmac_to_cpu(mdp, rxdesc->status); |
491 | pkt_len = rxdesc->frame_length; | 514 | pkt_len = rxdesc->frame_length; |
492 | 515 | ||
493 | if (--boguscnt < 0) | 516 | if (--boguscnt < 0) |
@@ -522,7 +545,7 @@ static int sh_eth_rx(struct net_device *ndev) | |||
522 | mdp->stats.rx_packets++; | 545 | mdp->stats.rx_packets++; |
523 | mdp->stats.rx_bytes += pkt_len; | 546 | mdp->stats.rx_bytes += pkt_len; |
524 | } | 547 | } |
525 | rxdesc->status |= cpu_to_le32(RD_RACT); | 548 | rxdesc->status |= cpu_to_edmac(mdp, RD_RACT); |
526 | entry = (++mdp->cur_rx) % RX_RING_SIZE; | 549 | entry = (++mdp->cur_rx) % RX_RING_SIZE; |
527 | } | 550 | } |
528 | 551 | ||
@@ -552,10 +575,10 @@ static int sh_eth_rx(struct net_device *ndev) | |||
552 | } | 575 | } |
553 | if (entry >= RX_RING_SIZE - 1) | 576 | if (entry >= RX_RING_SIZE - 1) |
554 | rxdesc->status |= | 577 | rxdesc->status |= |
555 | cpu_to_le32(RD_RACT | RD_RFP | RD_RDEL); | 578 | cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL); |
556 | else | 579 | else |
557 | rxdesc->status |= | 580 | rxdesc->status |= |
558 | cpu_to_le32(RD_RACT | RD_RFP); | 581 | cpu_to_edmac(mdp, RD_RACT | RD_RFP); |
559 | } | 582 | } |
560 | 583 | ||
561 | /* Restart Rx engine if stopped. */ | 584 | /* Restart Rx engine if stopped. */ |
@@ -931,9 +954,9 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
931 | txdesc->buffer_length = skb->len; | 954 | txdesc->buffer_length = skb->len; |
932 | 955 | ||
933 | if (entry >= TX_RING_SIZE - 1) | 956 | if (entry >= TX_RING_SIZE - 1) |
934 | txdesc->status |= cpu_to_le32(TD_TACT | TD_TDLE); | 957 | txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); |
935 | else | 958 | else |
936 | txdesc->status |= cpu_to_le32(TD_TACT); | 959 | txdesc->status |= cpu_to_edmac(mdp, TD_TACT); |
937 | 960 | ||
938 | mdp->cur_tx++; | 961 | mdp->cur_tx++; |
939 | 962 | ||
@@ -1159,6 +1182,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev) | |||
1159 | struct resource *res; | 1182 | struct resource *res; |
1160 | struct net_device *ndev = NULL; | 1183 | struct net_device *ndev = NULL; |
1161 | struct sh_eth_private *mdp; | 1184 | struct sh_eth_private *mdp; |
1185 | struct sh_eth_plat_data *pd; | ||
1162 | 1186 | ||
1163 | /* get base addr */ | 1187 | /* get base addr */ |
1164 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1188 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
@@ -1196,8 +1220,11 @@ static int sh_eth_drv_probe(struct platform_device *pdev) | |||
1196 | mdp = netdev_priv(ndev); | 1220 | mdp = netdev_priv(ndev); |
1197 | spin_lock_init(&mdp->lock); | 1221 | spin_lock_init(&mdp->lock); |
1198 | 1222 | ||
1223 | pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data); | ||
1199 | /* get PHY ID */ | 1224 | /* get PHY ID */ |
1200 | mdp->phy_id = (int)pdev->dev.platform_data; | 1225 | mdp->phy_id = pd->phy; |
1226 | /* EDMAC endian */ | ||
1227 | mdp->edmac_endian = pd->edmac_endian; | ||
1201 | 1228 | ||
1202 | /* set function */ | 1229 | /* set function */ |
1203 | ndev->open = sh_eth_open; | 1230 | ndev->open = sh_eth_open; |
@@ -1217,12 +1244,16 @@ static int sh_eth_drv_probe(struct platform_device *pdev) | |||
1217 | 1244 | ||
1218 | /* First device only init */ | 1245 | /* First device only init */ |
1219 | if (!devno) { | 1246 | if (!devno) { |
1247 | #if defined(ARSTR) | ||
1220 | /* reset device */ | 1248 | /* reset device */ |
1221 | ctrl_outl(ARSTR_ARSTR, ARSTR); | 1249 | ctrl_outl(ARSTR_ARSTR, ARSTR); |
1222 | mdelay(1); | 1250 | mdelay(1); |
1251 | #endif | ||
1223 | 1252 | ||
1253 | #if defined(SH_TSU_ADDR) | ||
1224 | /* TSU init (Init only)*/ | 1254 | /* TSU init (Init only)*/ |
1225 | sh_eth_tsu_init(SH_TSU_ADDR); | 1255 | sh_eth_tsu_init(SH_TSU_ADDR); |
1256 | #endif | ||
1226 | } | 1257 | } |
1227 | 1258 | ||
1228 | /* network device register */ | 1259 | /* network device register */ |
@@ -1240,8 +1271,8 @@ static int sh_eth_drv_probe(struct platform_device *pdev) | |||
1240 | ndev->name, CARDNAME, (u32) ndev->base_addr); | 1271 | ndev->name, CARDNAME, (u32) ndev->base_addr); |
1241 | 1272 | ||
1242 | for (i = 0; i < 5; i++) | 1273 | for (i = 0; i < 5; i++) |
1243 | printk(KERN_INFO "%02X:", ndev->dev_addr[i]); | 1274 | printk("%02X:", ndev->dev_addr[i]); |
1244 | printk(KERN_INFO "%02X, IRQ %d.\n", ndev->dev_addr[i], ndev->irq); | 1275 | printk("%02X, IRQ %d.\n", ndev->dev_addr[i], ndev->irq); |
1245 | 1276 | ||
1246 | platform_set_drvdata(pdev, ndev); | 1277 | platform_set_drvdata(pdev, ndev); |
1247 | 1278 | ||
diff --git a/drivers/net/sh_eth.h b/drivers/net/sh_eth.h index 45ad1b09ca5a..73bc7181cc18 100644 --- a/drivers/net/sh_eth.h +++ b/drivers/net/sh_eth.h | |||
@@ -30,6 +30,8 @@ | |||
30 | #include <linux/netdevice.h> | 30 | #include <linux/netdevice.h> |
31 | #include <linux/phy.h> | 31 | #include <linux/phy.h> |
32 | 32 | ||
33 | #include <asm/sh_eth.h> | ||
34 | |||
33 | #define CARDNAME "sh-eth" | 35 | #define CARDNAME "sh-eth" |
34 | #define TX_TIMEOUT (5*HZ) | 36 | #define TX_TIMEOUT (5*HZ) |
35 | #define TX_RING_SIZE 64 /* Tx ring size */ | 37 | #define TX_RING_SIZE 64 /* Tx ring size */ |
@@ -143,10 +145,11 @@ | |||
143 | 145 | ||
144 | #else /* CONFIG_CPU_SUBTYPE_SH7763 */ | 146 | #else /* CONFIG_CPU_SUBTYPE_SH7763 */ |
145 | # define RX_OFFSET 2 /* skb offset */ | 147 | # define RX_OFFSET 2 /* skb offset */ |
148 | #ifndef CONFIG_CPU_SUBTYPE_SH7619 | ||
146 | /* Chip base address */ | 149 | /* Chip base address */ |
147 | # define SH_TSU_ADDR 0xA7000804 | 150 | # define SH_TSU_ADDR 0xA7000804 |
148 | # define ARSTR 0xA7000800 | 151 | # define ARSTR 0xA7000800 |
149 | 152 | #endif | |
150 | /* Chip Registers */ | 153 | /* Chip Registers */ |
151 | /* E-DMAC */ | 154 | /* E-DMAC */ |
152 | # define EDMR 0x0000 | 155 | # define EDMR 0x0000 |
@@ -384,7 +387,11 @@ enum FCFTR_BIT { | |||
384 | FCFTR_RFD1 = 0x00000002, FCFTR_RFD0 = 0x00000001, | 387 | FCFTR_RFD1 = 0x00000002, FCFTR_RFD0 = 0x00000001, |
385 | }; | 388 | }; |
386 | #define FIFO_F_D_RFF (FCFTR_RFF2|FCFTR_RFF1|FCFTR_RFF0) | 389 | #define FIFO_F_D_RFF (FCFTR_RFF2|FCFTR_RFF1|FCFTR_RFF0) |
390 | #ifndef CONFIG_CPU_SUBTYPE_SH7619 | ||
387 | #define FIFO_F_D_RFD (FCFTR_RFD2|FCFTR_RFD1|FCFTR_RFD0) | 391 | #define FIFO_F_D_RFD (FCFTR_RFD2|FCFTR_RFD1|FCFTR_RFD0) |
392 | #else | ||
393 | #define FIFO_F_D_RFD (FCFTR_RFD0) | ||
394 | #endif | ||
388 | 395 | ||
389 | /* Transfer descriptor bit */ | 396 | /* Transfer descriptor bit */ |
390 | enum TD_STS_BIT { | 397 | enum TD_STS_BIT { |
@@ -414,8 +421,10 @@ enum FELIC_MODE_BIT { | |||
414 | #ifdef CONFIG_CPU_SUBTYPE_SH7763 | 421 | #ifdef CONFIG_CPU_SUBTYPE_SH7763 |
415 | #define ECMR_CHG_DM (ECMR_TRCCM | ECMR_RZPF | ECMR_ZPF |\ | 422 | #define ECMR_CHG_DM (ECMR_TRCCM | ECMR_RZPF | ECMR_ZPF |\ |
416 | ECMR_PFR | ECMR_RXF | ECMR_TXF | ECMR_MCT) | 423 | ECMR_PFR | ECMR_RXF | ECMR_TXF | ECMR_MCT) |
424 | #elif CONFIG_CPU_SUBTYPE_SH7619 | ||
425 | #define ECMR_CHG_DM (ECMR_ZPF | ECMR_PFR | ECMR_RXF | ECMR_TXF) | ||
417 | #else | 426 | #else |
418 | #define ECMR_CHG_DM (ECMR_ZPF | ECMR_PFR ECMR_RXF | ECMR_TXF | ECMR_MCT) | 427 | #define ECMR_CHG_DM (ECMR_ZPF | ECMR_PFR | ECMR_RXF | ECMR_TXF | ECMR_MCT) |
419 | #endif | 428 | #endif |
420 | 429 | ||
421 | /* ECSR */ | 430 | /* ECSR */ |
@@ -485,7 +494,11 @@ enum RPADIR_BIT { | |||
485 | 494 | ||
486 | /* FDR */ | 495 | /* FDR */ |
487 | enum FIFO_SIZE_BIT { | 496 | enum FIFO_SIZE_BIT { |
497 | #ifndef CONFIG_CPU_SUBTYPE_SH7619 | ||
488 | FIFO_SIZE_T = 0x00000700, FIFO_SIZE_R = 0x00000007, | 498 | FIFO_SIZE_T = 0x00000700, FIFO_SIZE_R = 0x00000007, |
499 | #else | ||
500 | FIFO_SIZE_T = 0x00000100, FIFO_SIZE_R = 0x00000001, | ||
501 | #endif | ||
489 | }; | 502 | }; |
490 | enum phy_offsets { | 503 | enum phy_offsets { |
491 | PHY_CTRL = 0, PHY_STAT = 1, PHY_IDT1 = 2, PHY_IDT2 = 3, | 504 | PHY_CTRL = 0, PHY_STAT = 1, PHY_IDT1 = 2, PHY_IDT2 = 3, |
@@ -601,7 +614,7 @@ struct sh_eth_txdesc { | |||
601 | #endif | 614 | #endif |
602 | u32 addr; /* TD2 */ | 615 | u32 addr; /* TD2 */ |
603 | u32 pad1; /* padding data */ | 616 | u32 pad1; /* padding data */ |
604 | }; | 617 | } __attribute__((aligned(2), packed)); |
605 | 618 | ||
606 | /* | 619 | /* |
607 | * The sh ether Rx buffer descriptors. | 620 | * The sh ether Rx buffer descriptors. |
@@ -618,7 +631,7 @@ struct sh_eth_rxdesc { | |||
618 | #endif | 631 | #endif |
619 | u32 addr; /* RD2 */ | 632 | u32 addr; /* RD2 */ |
620 | u32 pad0; /* padding data */ | 633 | u32 pad0; /* padding data */ |
621 | }; | 634 | } __attribute__((aligned(2), packed)); |
622 | 635 | ||
623 | struct sh_eth_private { | 636 | struct sh_eth_private { |
624 | dma_addr_t rx_desc_dma; | 637 | dma_addr_t rx_desc_dma; |
@@ -633,6 +646,7 @@ struct sh_eth_private { | |||
633 | u32 cur_rx, dirty_rx; /* Producer/consumer ring indices */ | 646 | u32 cur_rx, dirty_rx; /* Producer/consumer ring indices */ |
634 | u32 cur_tx, dirty_tx; | 647 | u32 cur_tx, dirty_tx; |
635 | u32 rx_buf_sz; /* Based on MTU+slack. */ | 648 | u32 rx_buf_sz; /* Based on MTU+slack. */ |
649 | int edmac_endian; | ||
636 | /* MII transceiver section. */ | 650 | /* MII transceiver section. */ |
637 | u32 phy_id; /* PHY ID */ | 651 | u32 phy_id; /* PHY ID */ |
638 | struct mii_bus *mii_bus; /* MDIO bus control */ | 652 | struct mii_bus *mii_bus; /* MDIO bus control */ |
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 5257cf464f1a..7d29edcd40b4 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
@@ -275,86 +275,6 @@ static void sky2_power_aux(struct sky2_hw *hw) | |||
275 | PC_VAUX_ON | PC_VCC_OFF)); | 275 | PC_VAUX_ON | PC_VCC_OFF)); |
276 | } | 276 | } |
277 | 277 | ||
278 | static void sky2_power_state(struct sky2_hw *hw, pci_power_t state) | ||
279 | { | ||
280 | u16 power_control = sky2_pci_read16(hw, hw->pm_cap + PCI_PM_CTRL); | ||
281 | int pex = pci_find_capability(hw->pdev, PCI_CAP_ID_EXP); | ||
282 | u32 reg; | ||
283 | |||
284 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); | ||
285 | |||
286 | switch (state) { | ||
287 | case PCI_D0: | ||
288 | break; | ||
289 | |||
290 | case PCI_D1: | ||
291 | power_control |= 1; | ||
292 | break; | ||
293 | |||
294 | case PCI_D2: | ||
295 | power_control |= 2; | ||
296 | break; | ||
297 | |||
298 | case PCI_D3hot: | ||
299 | case PCI_D3cold: | ||
300 | power_control |= 3; | ||
301 | if (hw->flags & SKY2_HW_ADV_POWER_CTL) { | ||
302 | /* additional power saving measurements */ | ||
303 | reg = sky2_pci_read32(hw, PCI_DEV_REG4); | ||
304 | |||
305 | /* set gating core clock for LTSSM in L1 state */ | ||
306 | reg |= P_PEX_LTSSM_STAT(P_PEX_LTSSM_L1_STAT) | | ||
307 | /* auto clock gated scheme controlled by CLKREQ */ | ||
308 | P_ASPM_A1_MODE_SELECT | | ||
309 | /* enable Gate Root Core Clock */ | ||
310 | P_CLK_GATE_ROOT_COR_ENA; | ||
311 | |||
312 | if (pex && (hw->flags & SKY2_HW_CLK_POWER)) { | ||
313 | /* enable Clock Power Management (CLKREQ) */ | ||
314 | u16 ctrl = sky2_pci_read16(hw, pex + PCI_EXP_DEVCTL); | ||
315 | |||
316 | ctrl |= PCI_EXP_DEVCTL_AUX_PME; | ||
317 | sky2_pci_write16(hw, pex + PCI_EXP_DEVCTL, ctrl); | ||
318 | } else | ||
319 | /* force CLKREQ Enable in Our4 (A1b only) */ | ||
320 | reg |= P_ASPM_FORCE_CLKREQ_ENA; | ||
321 | |||
322 | /* set Mask Register for Release/Gate Clock */ | ||
323 | sky2_pci_write32(hw, PCI_DEV_REG5, | ||
324 | P_REL_PCIE_EXIT_L1_ST | P_GAT_PCIE_ENTER_L1_ST | | ||
325 | P_REL_PCIE_RX_EX_IDLE | P_GAT_PCIE_RX_EL_IDLE | | ||
326 | P_REL_GPHY_LINK_UP | P_GAT_GPHY_LINK_DOWN); | ||
327 | } else | ||
328 | sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_CLK_HALT); | ||
329 | |||
330 | /* put CPU into reset state */ | ||
331 | sky2_write8(hw, B28_Y2_ASF_STAT_CMD, HCU_CCSR_ASF_RESET); | ||
332 | if (hw->chip_id == CHIP_ID_YUKON_SUPR && hw->chip_rev == CHIP_REV_YU_SU_A0) | ||
333 | /* put CPU into halt state */ | ||
334 | sky2_write8(hw, B28_Y2_ASF_STAT_CMD, HCU_CCSR_ASF_HALTED); | ||
335 | |||
336 | if (pex && !(hw->flags & SKY2_HW_RAM_BUFFER)) { | ||
337 | reg = sky2_pci_read32(hw, PCI_DEV_REG1); | ||
338 | /* force to PCIe L1 */ | ||
339 | reg |= PCI_FORCE_PEX_L1; | ||
340 | sky2_pci_write32(hw, PCI_DEV_REG1, reg); | ||
341 | } | ||
342 | break; | ||
343 | |||
344 | default: | ||
345 | dev_warn(&hw->pdev->dev, PFX "Invalid power state (%d) ", | ||
346 | state); | ||
347 | return; | ||
348 | } | ||
349 | |||
350 | power_control |= PCI_PM_CTRL_PME_ENABLE; | ||
351 | /* Finally, set the new power state. */ | ||
352 | sky2_pci_write32(hw, hw->pm_cap + PCI_PM_CTRL, power_control); | ||
353 | |||
354 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); | ||
355 | sky2_pci_read32(hw, B0_CTST); | ||
356 | } | ||
357 | |||
358 | static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port) | 278 | static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port) |
359 | { | 279 | { |
360 | u16 reg; | 280 | u16 reg; |
@@ -709,6 +629,11 @@ static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port) | |||
709 | sky2_pci_write32(hw, PCI_DEV_REG1, reg1); | 629 | sky2_pci_write32(hw, PCI_DEV_REG1, reg1); |
710 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); | 630 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); |
711 | sky2_pci_read32(hw, PCI_DEV_REG1); | 631 | sky2_pci_read32(hw, PCI_DEV_REG1); |
632 | |||
633 | if (hw->chip_id == CHIP_ID_YUKON_FE) | ||
634 | gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_ANE); | ||
635 | else if (hw->flags & SKY2_HW_ADV_POWER_CTL) | ||
636 | sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR); | ||
712 | } | 637 | } |
713 | 638 | ||
714 | static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port) | 639 | static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port) |
@@ -2855,10 +2780,6 @@ static int __devinit sky2_init(struct sky2_hw *hw) | |||
2855 | hw->flags = SKY2_HW_GIGABIT | 2780 | hw->flags = SKY2_HW_GIGABIT |
2856 | | SKY2_HW_NEWER_PHY | 2781 | | SKY2_HW_NEWER_PHY |
2857 | | SKY2_HW_ADV_POWER_CTL; | 2782 | | SKY2_HW_ADV_POWER_CTL; |
2858 | |||
2859 | /* check for Rev. A1 dev 4200 */ | ||
2860 | if (sky2_read16(hw, Q_ADDR(Q_XA1, Q_WM)) == 0) | ||
2861 | hw->flags |= SKY2_HW_CLK_POWER; | ||
2862 | break; | 2783 | break; |
2863 | 2784 | ||
2864 | case CHIP_ID_YUKON_EX: | 2785 | case CHIP_ID_YUKON_EX: |
@@ -2914,12 +2835,6 @@ static int __devinit sky2_init(struct sky2_hw *hw) | |||
2914 | if (hw->pmd_type == 'L' || hw->pmd_type == 'S' || hw->pmd_type == 'P') | 2835 | if (hw->pmd_type == 'L' || hw->pmd_type == 'S' || hw->pmd_type == 'P') |
2915 | hw->flags |= SKY2_HW_FIBRE_PHY; | 2836 | hw->flags |= SKY2_HW_FIBRE_PHY; |
2916 | 2837 | ||
2917 | hw->pm_cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PM); | ||
2918 | if (hw->pm_cap == 0) { | ||
2919 | dev_err(&hw->pdev->dev, "cannot find PowerManagement capability\n"); | ||
2920 | return -EIO; | ||
2921 | } | ||
2922 | |||
2923 | hw->ports = 1; | 2838 | hw->ports = 1; |
2924 | t8 = sky2_read8(hw, B2_Y2_HW_RES); | 2839 | t8 = sky2_read8(hw, B2_Y2_HW_RES); |
2925 | if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) { | 2840 | if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) { |
@@ -4512,7 +4427,7 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state) | |||
4512 | 4427 | ||
4513 | pci_save_state(pdev); | 4428 | pci_save_state(pdev); |
4514 | pci_enable_wake(pdev, pci_choose_state(pdev, state), wol); | 4429 | pci_enable_wake(pdev, pci_choose_state(pdev, state), wol); |
4515 | sky2_power_state(hw, pci_choose_state(pdev, state)); | 4430 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); |
4516 | 4431 | ||
4517 | return 0; | 4432 | return 0; |
4518 | } | 4433 | } |
@@ -4525,7 +4440,9 @@ static int sky2_resume(struct pci_dev *pdev) | |||
4525 | if (!hw) | 4440 | if (!hw) |
4526 | return 0; | 4441 | return 0; |
4527 | 4442 | ||
4528 | sky2_power_state(hw, PCI_D0); | 4443 | err = pci_set_power_state(pdev, PCI_D0); |
4444 | if (err) | ||
4445 | goto out; | ||
4529 | 4446 | ||
4530 | err = pci_restore_state(pdev); | 4447 | err = pci_restore_state(pdev); |
4531 | if (err) | 4448 | if (err) |
@@ -4595,7 +4512,7 @@ static void sky2_shutdown(struct pci_dev *pdev) | |||
4595 | pci_enable_wake(pdev, PCI_D3cold, wol); | 4512 | pci_enable_wake(pdev, PCI_D3cold, wol); |
4596 | 4513 | ||
4597 | pci_disable_device(pdev); | 4514 | pci_disable_device(pdev); |
4598 | sky2_power_state(hw, PCI_D3hot); | 4515 | pci_set_power_state(pdev, PCI_D3hot); |
4599 | } | 4516 | } |
4600 | 4517 | ||
4601 | static struct pci_driver sky2_driver = { | 4518 | static struct pci_driver sky2_driver = { |
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h index 4d9c4a19bb85..92fb24b27d45 100644 --- a/drivers/net/sky2.h +++ b/drivers/net/sky2.h | |||
@@ -2072,9 +2072,7 @@ struct sky2_hw { | |||
2072 | #define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */ | 2072 | #define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */ |
2073 | #define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */ | 2073 | #define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */ |
2074 | #define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */ | 2074 | #define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */ |
2075 | #define SKY2_HW_CLK_POWER 0x00000100 /* clock power management */ | ||
2076 | 2075 | ||
2077 | int pm_cap; | ||
2078 | u8 chip_id; | 2076 | u8 chip_id; |
2079 | u8 chip_rev; | 2077 | u8 chip_rev; |
2080 | u8 pmd_type; | 2078 | u8 pmd_type; |
diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c index 9b2a7f7bb258..e531302d95f5 100644 --- a/drivers/net/sun3_82586.c +++ b/drivers/net/sun3_82586.c | |||
@@ -425,14 +425,11 @@ static int init586(struct net_device *dev) | |||
425 | int len = ((char *) p->iscp - (char *) ptr - 8) / 6; | 425 | int len = ((char *) p->iscp - (char *) ptr - 8) / 6; |
426 | if(num_addrs > len) { | 426 | if(num_addrs > len) { |
427 | printk("%s: switching to promisc. mode\n",dev->name); | 427 | printk("%s: switching to promisc. mode\n",dev->name); |
428 | dev->flags|=IFF_PROMISC; | 428 | cfg_cmd->promisc = 1; |
429 | } | 429 | } |
430 | } | 430 | } |
431 | if(dev->flags&IFF_PROMISC) | 431 | if(dev->flags&IFF_PROMISC) |
432 | { | 432 | cfg_cmd->promisc = 1; |
433 | cfg_cmd->promisc=1; | ||
434 | dev->flags|=IFF_PROMISC; | ||
435 | } | ||
436 | cfg_cmd->carr_coll = 0x00; | 433 | cfg_cmd->carr_coll = 0x00; |
437 | 434 | ||
438 | p->scb->cbl_offset = make16(cfg_cmd); | 435 | p->scb->cbl_offset = make16(cfg_cmd); |
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index b588c890ea70..a84ba487c713 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c | |||
@@ -1285,6 +1285,21 @@ static void check_carrier(struct work_struct *work) | |||
1285 | } | 1285 | } |
1286 | } | 1286 | } |
1287 | 1287 | ||
1288 | static int pegasus_blacklisted(struct usb_device *udev) | ||
1289 | { | ||
1290 | struct usb_device_descriptor *udd = &udev->descriptor; | ||
1291 | |||
1292 | /* Special quirk to keep the driver from handling the Belkin Bluetooth | ||
1293 | * dongle which happens to have the same ID. | ||
1294 | */ | ||
1295 | if ((udd->idVendor == VENDOR_BELKIN && udd->idProduct == 0x0121) && | ||
1296 | (udd->bDeviceClass == USB_CLASS_WIRELESS_CONTROLLER) && | ||
1297 | (udd->bDeviceProtocol == 1)) | ||
1298 | return 1; | ||
1299 | |||
1300 | return 0; | ||
1301 | } | ||
1302 | |||
1288 | static int pegasus_probe(struct usb_interface *intf, | 1303 | static int pegasus_probe(struct usb_interface *intf, |
1289 | const struct usb_device_id *id) | 1304 | const struct usb_device_id *id) |
1290 | { | 1305 | { |
@@ -1296,6 +1311,12 @@ static int pegasus_probe(struct usb_interface *intf, | |||
1296 | DECLARE_MAC_BUF(mac); | 1311 | DECLARE_MAC_BUF(mac); |
1297 | 1312 | ||
1298 | usb_get_dev(dev); | 1313 | usb_get_dev(dev); |
1314 | |||
1315 | if (pegasus_blacklisted(dev)) { | ||
1316 | res = -ENODEV; | ||
1317 | goto out; | ||
1318 | } | ||
1319 | |||
1299 | net = alloc_etherdev(sizeof(struct pegasus)); | 1320 | net = alloc_etherdev(sizeof(struct pegasus)); |
1300 | if (!net) { | 1321 | if (!net) { |
1301 | dev_err(&intf->dev, "can't allocate %s\n", "device"); | 1322 | dev_err(&intf->dev, "can't allocate %s\n", "device"); |
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index 370ce30f2f45..007c12970065 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c | |||
@@ -662,6 +662,10 @@ static void velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid | |||
662 | spin_unlock_irq(&vptr->lock); | 662 | spin_unlock_irq(&vptr->lock); |
663 | } | 663 | } |
664 | 664 | ||
665 | static void velocity_init_rx_ring_indexes(struct velocity_info *vptr) | ||
666 | { | ||
667 | vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0; | ||
668 | } | ||
665 | 669 | ||
666 | /** | 670 | /** |
667 | * velocity_rx_reset - handle a receive reset | 671 | * velocity_rx_reset - handle a receive reset |
@@ -677,16 +681,16 @@ static void velocity_rx_reset(struct velocity_info *vptr) | |||
677 | struct mac_regs __iomem * regs = vptr->mac_regs; | 681 | struct mac_regs __iomem * regs = vptr->mac_regs; |
678 | int i; | 682 | int i; |
679 | 683 | ||
680 | vptr->rd_dirty = vptr->rd_filled = vptr->rd_curr = 0; | 684 | velocity_init_rx_ring_indexes(vptr); |
681 | 685 | ||
682 | /* | 686 | /* |
683 | * Init state, all RD entries belong to the NIC | 687 | * Init state, all RD entries belong to the NIC |
684 | */ | 688 | */ |
685 | for (i = 0; i < vptr->options.numrx; ++i) | 689 | for (i = 0; i < vptr->options.numrx; ++i) |
686 | vptr->rd_ring[i].rdesc0.len |= OWNED_BY_NIC; | 690 | vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC; |
687 | 691 | ||
688 | writew(vptr->options.numrx, ®s->RBRDU); | 692 | writew(vptr->options.numrx, ®s->RBRDU); |
689 | writel(vptr->rd_pool_dma, ®s->RDBaseLo); | 693 | writel(vptr->rx.pool_dma, ®s->RDBaseLo); |
690 | writew(0, ®s->RDIdx); | 694 | writew(0, ®s->RDIdx); |
691 | writew(vptr->options.numrx - 1, ®s->RDCSize); | 695 | writew(vptr->options.numrx - 1, ®s->RDCSize); |
692 | } | 696 | } |
@@ -779,15 +783,15 @@ static void velocity_init_registers(struct velocity_info *vptr, | |||
779 | 783 | ||
780 | vptr->int_mask = INT_MASK_DEF; | 784 | vptr->int_mask = INT_MASK_DEF; |
781 | 785 | ||
782 | writel(vptr->rd_pool_dma, ®s->RDBaseLo); | 786 | writel(vptr->rx.pool_dma, ®s->RDBaseLo); |
783 | writew(vptr->options.numrx - 1, ®s->RDCSize); | 787 | writew(vptr->options.numrx - 1, ®s->RDCSize); |
784 | mac_rx_queue_run(regs); | 788 | mac_rx_queue_run(regs); |
785 | mac_rx_queue_wake(regs); | 789 | mac_rx_queue_wake(regs); |
786 | 790 | ||
787 | writew(vptr->options.numtx - 1, ®s->TDCSize); | 791 | writew(vptr->options.numtx - 1, ®s->TDCSize); |
788 | 792 | ||
789 | for (i = 0; i < vptr->num_txq; i++) { | 793 | for (i = 0; i < vptr->tx.numq; i++) { |
790 | writel(vptr->td_pool_dma[i], ®s->TDBaseLo[i]); | 794 | writel(vptr->tx.pool_dma[i], ®s->TDBaseLo[i]); |
791 | mac_tx_queue_run(regs, i); | 795 | mac_tx_queue_run(regs, i); |
792 | } | 796 | } |
793 | 797 | ||
@@ -1047,7 +1051,7 @@ static void __devinit velocity_init_info(struct pci_dev *pdev, | |||
1047 | 1051 | ||
1048 | vptr->pdev = pdev; | 1052 | vptr->pdev = pdev; |
1049 | vptr->chip_id = info->chip_id; | 1053 | vptr->chip_id = info->chip_id; |
1050 | vptr->num_txq = info->txqueue; | 1054 | vptr->tx.numq = info->txqueue; |
1051 | vptr->multicast_limit = MCAM_SIZE; | 1055 | vptr->multicast_limit = MCAM_SIZE; |
1052 | spin_lock_init(&vptr->lock); | 1056 | spin_lock_init(&vptr->lock); |
1053 | INIT_LIST_HEAD(&vptr->list); | 1057 | INIT_LIST_HEAD(&vptr->list); |
@@ -1093,14 +1097,14 @@ static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pc | |||
1093 | } | 1097 | } |
1094 | 1098 | ||
1095 | /** | 1099 | /** |
1096 | * velocity_init_rings - set up DMA rings | 1100 | * velocity_init_dma_rings - set up DMA rings |
1097 | * @vptr: Velocity to set up | 1101 | * @vptr: Velocity to set up |
1098 | * | 1102 | * |
1099 | * Allocate PCI mapped DMA rings for the receive and transmit layer | 1103 | * Allocate PCI mapped DMA rings for the receive and transmit layer |
1100 | * to use. | 1104 | * to use. |
1101 | */ | 1105 | */ |
1102 | 1106 | ||
1103 | static int velocity_init_rings(struct velocity_info *vptr) | 1107 | static int velocity_init_dma_rings(struct velocity_info *vptr) |
1104 | { | 1108 | { |
1105 | struct velocity_opt *opt = &vptr->options; | 1109 | struct velocity_opt *opt = &vptr->options; |
1106 | const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc); | 1110 | const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc); |
@@ -1116,7 +1120,7 @@ static int velocity_init_rings(struct velocity_info *vptr) | |||
1116 | * pci_alloc_consistent() fulfills the requirement for 64 bytes | 1120 | * pci_alloc_consistent() fulfills the requirement for 64 bytes |
1117 | * alignment | 1121 | * alignment |
1118 | */ | 1122 | */ |
1119 | pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->num_txq + | 1123 | pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->tx.numq + |
1120 | rx_ring_size, &pool_dma); | 1124 | rx_ring_size, &pool_dma); |
1121 | if (!pool) { | 1125 | if (!pool) { |
1122 | dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n", | 1126 | dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n", |
@@ -1124,15 +1128,15 @@ static int velocity_init_rings(struct velocity_info *vptr) | |||
1124 | return -ENOMEM; | 1128 | return -ENOMEM; |
1125 | } | 1129 | } |
1126 | 1130 | ||
1127 | vptr->rd_ring = pool; | 1131 | vptr->rx.ring = pool; |
1128 | vptr->rd_pool_dma = pool_dma; | 1132 | vptr->rx.pool_dma = pool_dma; |
1129 | 1133 | ||
1130 | pool += rx_ring_size; | 1134 | pool += rx_ring_size; |
1131 | pool_dma += rx_ring_size; | 1135 | pool_dma += rx_ring_size; |
1132 | 1136 | ||
1133 | for (i = 0; i < vptr->num_txq; i++) { | 1137 | for (i = 0; i < vptr->tx.numq; i++) { |
1134 | vptr->td_rings[i] = pool; | 1138 | vptr->tx.rings[i] = pool; |
1135 | vptr->td_pool_dma[i] = pool_dma; | 1139 | vptr->tx.pool_dma[i] = pool_dma; |
1136 | pool += tx_ring_size; | 1140 | pool += tx_ring_size; |
1137 | pool_dma += tx_ring_size; | 1141 | pool_dma += tx_ring_size; |
1138 | } | 1142 | } |
@@ -1141,18 +1145,18 @@ static int velocity_init_rings(struct velocity_info *vptr) | |||
1141 | } | 1145 | } |
1142 | 1146 | ||
1143 | /** | 1147 | /** |
1144 | * velocity_free_rings - free PCI ring pointers | 1148 | * velocity_free_dma_rings - free PCI ring pointers |
1145 | * @vptr: Velocity to free from | 1149 | * @vptr: Velocity to free from |
1146 | * | 1150 | * |
1147 | * Clean up the PCI ring buffers allocated to this velocity. | 1151 | * Clean up the PCI ring buffers allocated to this velocity. |
1148 | */ | 1152 | */ |
1149 | 1153 | ||
1150 | static void velocity_free_rings(struct velocity_info *vptr) | 1154 | static void velocity_free_dma_rings(struct velocity_info *vptr) |
1151 | { | 1155 | { |
1152 | const int size = vptr->options.numrx * sizeof(struct rx_desc) + | 1156 | const int size = vptr->options.numrx * sizeof(struct rx_desc) + |
1153 | vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq; | 1157 | vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq; |
1154 | 1158 | ||
1155 | pci_free_consistent(vptr->pdev, size, vptr->rd_ring, vptr->rd_pool_dma); | 1159 | pci_free_consistent(vptr->pdev, size, vptr->rx.ring, vptr->rx.pool_dma); |
1156 | } | 1160 | } |
1157 | 1161 | ||
1158 | static void velocity_give_many_rx_descs(struct velocity_info *vptr) | 1162 | static void velocity_give_many_rx_descs(struct velocity_info *vptr) |
@@ -1164,44 +1168,44 @@ static void velocity_give_many_rx_descs(struct velocity_info *vptr) | |||
1164 | * RD number must be equal to 4X per hardware spec | 1168 | * RD number must be equal to 4X per hardware spec |
1165 | * (programming guide rev 1.20, p.13) | 1169 | * (programming guide rev 1.20, p.13) |
1166 | */ | 1170 | */ |
1167 | if (vptr->rd_filled < 4) | 1171 | if (vptr->rx.filled < 4) |
1168 | return; | 1172 | return; |
1169 | 1173 | ||
1170 | wmb(); | 1174 | wmb(); |
1171 | 1175 | ||
1172 | unusable = vptr->rd_filled & 0x0003; | 1176 | unusable = vptr->rx.filled & 0x0003; |
1173 | dirty = vptr->rd_dirty - unusable; | 1177 | dirty = vptr->rx.dirty - unusable; |
1174 | for (avail = vptr->rd_filled & 0xfffc; avail; avail--) { | 1178 | for (avail = vptr->rx.filled & 0xfffc; avail; avail--) { |
1175 | dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1; | 1179 | dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1; |
1176 | vptr->rd_ring[dirty].rdesc0.len |= OWNED_BY_NIC; | 1180 | vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC; |
1177 | } | 1181 | } |
1178 | 1182 | ||
1179 | writew(vptr->rd_filled & 0xfffc, ®s->RBRDU); | 1183 | writew(vptr->rx.filled & 0xfffc, ®s->RBRDU); |
1180 | vptr->rd_filled = unusable; | 1184 | vptr->rx.filled = unusable; |
1181 | } | 1185 | } |
1182 | 1186 | ||
1183 | static int velocity_rx_refill(struct velocity_info *vptr) | 1187 | static int velocity_rx_refill(struct velocity_info *vptr) |
1184 | { | 1188 | { |
1185 | int dirty = vptr->rd_dirty, done = 0; | 1189 | int dirty = vptr->rx.dirty, done = 0; |
1186 | 1190 | ||
1187 | do { | 1191 | do { |
1188 | struct rx_desc *rd = vptr->rd_ring + dirty; | 1192 | struct rx_desc *rd = vptr->rx.ring + dirty; |
1189 | 1193 | ||
1190 | /* Fine for an all zero Rx desc at init time as well */ | 1194 | /* Fine for an all zero Rx desc at init time as well */ |
1191 | if (rd->rdesc0.len & OWNED_BY_NIC) | 1195 | if (rd->rdesc0.len & OWNED_BY_NIC) |
1192 | break; | 1196 | break; |
1193 | 1197 | ||
1194 | if (!vptr->rd_info[dirty].skb) { | 1198 | if (!vptr->rx.info[dirty].skb) { |
1195 | if (velocity_alloc_rx_buf(vptr, dirty) < 0) | 1199 | if (velocity_alloc_rx_buf(vptr, dirty) < 0) |
1196 | break; | 1200 | break; |
1197 | } | 1201 | } |
1198 | done++; | 1202 | done++; |
1199 | dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0; | 1203 | dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0; |
1200 | } while (dirty != vptr->rd_curr); | 1204 | } while (dirty != vptr->rx.curr); |
1201 | 1205 | ||
1202 | if (done) { | 1206 | if (done) { |
1203 | vptr->rd_dirty = dirty; | 1207 | vptr->rx.dirty = dirty; |
1204 | vptr->rd_filled += done; | 1208 | vptr->rx.filled += done; |
1205 | } | 1209 | } |
1206 | 1210 | ||
1207 | return done; | 1211 | return done; |
@@ -1209,7 +1213,7 @@ static int velocity_rx_refill(struct velocity_info *vptr) | |||
1209 | 1213 | ||
1210 | static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu) | 1214 | static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu) |
1211 | { | 1215 | { |
1212 | vptr->rx_buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32; | 1216 | vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32; |
1213 | } | 1217 | } |
1214 | 1218 | ||
1215 | /** | 1219 | /** |
@@ -1224,12 +1228,12 @@ static int velocity_init_rd_ring(struct velocity_info *vptr) | |||
1224 | { | 1228 | { |
1225 | int ret = -ENOMEM; | 1229 | int ret = -ENOMEM; |
1226 | 1230 | ||
1227 | vptr->rd_info = kcalloc(vptr->options.numrx, | 1231 | vptr->rx.info = kcalloc(vptr->options.numrx, |
1228 | sizeof(struct velocity_rd_info), GFP_KERNEL); | 1232 | sizeof(struct velocity_rd_info), GFP_KERNEL); |
1229 | if (!vptr->rd_info) | 1233 | if (!vptr->rx.info) |
1230 | goto out; | 1234 | goto out; |
1231 | 1235 | ||
1232 | vptr->rd_filled = vptr->rd_dirty = vptr->rd_curr = 0; | 1236 | velocity_init_rx_ring_indexes(vptr); |
1233 | 1237 | ||
1234 | if (velocity_rx_refill(vptr) != vptr->options.numrx) { | 1238 | if (velocity_rx_refill(vptr) != vptr->options.numrx) { |
1235 | VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR | 1239 | VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR |
@@ -1255,18 +1259,18 @@ static void velocity_free_rd_ring(struct velocity_info *vptr) | |||
1255 | { | 1259 | { |
1256 | int i; | 1260 | int i; |
1257 | 1261 | ||
1258 | if (vptr->rd_info == NULL) | 1262 | if (vptr->rx.info == NULL) |
1259 | return; | 1263 | return; |
1260 | 1264 | ||
1261 | for (i = 0; i < vptr->options.numrx; i++) { | 1265 | for (i = 0; i < vptr->options.numrx; i++) { |
1262 | struct velocity_rd_info *rd_info = &(vptr->rd_info[i]); | 1266 | struct velocity_rd_info *rd_info = &(vptr->rx.info[i]); |
1263 | struct rx_desc *rd = vptr->rd_ring + i; | 1267 | struct rx_desc *rd = vptr->rx.ring + i; |
1264 | 1268 | ||
1265 | memset(rd, 0, sizeof(*rd)); | 1269 | memset(rd, 0, sizeof(*rd)); |
1266 | 1270 | ||
1267 | if (!rd_info->skb) | 1271 | if (!rd_info->skb) |
1268 | continue; | 1272 | continue; |
1269 | pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz, | 1273 | pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz, |
1270 | PCI_DMA_FROMDEVICE); | 1274 | PCI_DMA_FROMDEVICE); |
1271 | rd_info->skb_dma = (dma_addr_t) NULL; | 1275 | rd_info->skb_dma = (dma_addr_t) NULL; |
1272 | 1276 | ||
@@ -1274,8 +1278,8 @@ static void velocity_free_rd_ring(struct velocity_info *vptr) | |||
1274 | rd_info->skb = NULL; | 1278 | rd_info->skb = NULL; |
1275 | } | 1279 | } |
1276 | 1280 | ||
1277 | kfree(vptr->rd_info); | 1281 | kfree(vptr->rx.info); |
1278 | vptr->rd_info = NULL; | 1282 | vptr->rx.info = NULL; |
1279 | } | 1283 | } |
1280 | 1284 | ||
1281 | /** | 1285 | /** |
@@ -1293,19 +1297,19 @@ static int velocity_init_td_ring(struct velocity_info *vptr) | |||
1293 | unsigned int j; | 1297 | unsigned int j; |
1294 | 1298 | ||
1295 | /* Init the TD ring entries */ | 1299 | /* Init the TD ring entries */ |
1296 | for (j = 0; j < vptr->num_txq; j++) { | 1300 | for (j = 0; j < vptr->tx.numq; j++) { |
1297 | curr = vptr->td_pool_dma[j]; | 1301 | curr = vptr->tx.pool_dma[j]; |
1298 | 1302 | ||
1299 | vptr->td_infos[j] = kcalloc(vptr->options.numtx, | 1303 | vptr->tx.infos[j] = kcalloc(vptr->options.numtx, |
1300 | sizeof(struct velocity_td_info), | 1304 | sizeof(struct velocity_td_info), |
1301 | GFP_KERNEL); | 1305 | GFP_KERNEL); |
1302 | if (!vptr->td_infos[j]) { | 1306 | if (!vptr->tx.infos[j]) { |
1303 | while(--j >= 0) | 1307 | while(--j >= 0) |
1304 | kfree(vptr->td_infos[j]); | 1308 | kfree(vptr->tx.infos[j]); |
1305 | return -ENOMEM; | 1309 | return -ENOMEM; |
1306 | } | 1310 | } |
1307 | 1311 | ||
1308 | vptr->td_tail[j] = vptr->td_curr[j] = vptr->td_used[j] = 0; | 1312 | vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0; |
1309 | } | 1313 | } |
1310 | return 0; | 1314 | return 0; |
1311 | } | 1315 | } |
@@ -1317,7 +1321,7 @@ static int velocity_init_td_ring(struct velocity_info *vptr) | |||
1317 | static void velocity_free_td_ring_entry(struct velocity_info *vptr, | 1321 | static void velocity_free_td_ring_entry(struct velocity_info *vptr, |
1318 | int q, int n) | 1322 | int q, int n) |
1319 | { | 1323 | { |
1320 | struct velocity_td_info * td_info = &(vptr->td_infos[q][n]); | 1324 | struct velocity_td_info * td_info = &(vptr->tx.infos[q][n]); |
1321 | int i; | 1325 | int i; |
1322 | 1326 | ||
1323 | if (td_info == NULL) | 1327 | if (td_info == NULL) |
@@ -1349,15 +1353,15 @@ static void velocity_free_td_ring(struct velocity_info *vptr) | |||
1349 | { | 1353 | { |
1350 | int i, j; | 1354 | int i, j; |
1351 | 1355 | ||
1352 | for (j = 0; j < vptr->num_txq; j++) { | 1356 | for (j = 0; j < vptr->tx.numq; j++) { |
1353 | if (vptr->td_infos[j] == NULL) | 1357 | if (vptr->tx.infos[j] == NULL) |
1354 | continue; | 1358 | continue; |
1355 | for (i = 0; i < vptr->options.numtx; i++) { | 1359 | for (i = 0; i < vptr->options.numtx; i++) { |
1356 | velocity_free_td_ring_entry(vptr, j, i); | 1360 | velocity_free_td_ring_entry(vptr, j, i); |
1357 | 1361 | ||
1358 | } | 1362 | } |
1359 | kfree(vptr->td_infos[j]); | 1363 | kfree(vptr->tx.infos[j]); |
1360 | vptr->td_infos[j] = NULL; | 1364 | vptr->tx.infos[j] = NULL; |
1361 | } | 1365 | } |
1362 | } | 1366 | } |
1363 | 1367 | ||
@@ -1374,13 +1378,13 @@ static void velocity_free_td_ring(struct velocity_info *vptr) | |||
1374 | static int velocity_rx_srv(struct velocity_info *vptr, int status) | 1378 | static int velocity_rx_srv(struct velocity_info *vptr, int status) |
1375 | { | 1379 | { |
1376 | struct net_device_stats *stats = &vptr->stats; | 1380 | struct net_device_stats *stats = &vptr->stats; |
1377 | int rd_curr = vptr->rd_curr; | 1381 | int rd_curr = vptr->rx.curr; |
1378 | int works = 0; | 1382 | int works = 0; |
1379 | 1383 | ||
1380 | do { | 1384 | do { |
1381 | struct rx_desc *rd = vptr->rd_ring + rd_curr; | 1385 | struct rx_desc *rd = vptr->rx.ring + rd_curr; |
1382 | 1386 | ||
1383 | if (!vptr->rd_info[rd_curr].skb) | 1387 | if (!vptr->rx.info[rd_curr].skb) |
1384 | break; | 1388 | break; |
1385 | 1389 | ||
1386 | if (rd->rdesc0.len & OWNED_BY_NIC) | 1390 | if (rd->rdesc0.len & OWNED_BY_NIC) |
@@ -1412,7 +1416,7 @@ static int velocity_rx_srv(struct velocity_info *vptr, int status) | |||
1412 | rd_curr = 0; | 1416 | rd_curr = 0; |
1413 | } while (++works <= 15); | 1417 | } while (++works <= 15); |
1414 | 1418 | ||
1415 | vptr->rd_curr = rd_curr; | 1419 | vptr->rx.curr = rd_curr; |
1416 | 1420 | ||
1417 | if ((works > 0) && (velocity_rx_refill(vptr) > 0)) | 1421 | if ((works > 0) && (velocity_rx_refill(vptr) > 0)) |
1418 | velocity_give_many_rx_descs(vptr); | 1422 | velocity_give_many_rx_descs(vptr); |
@@ -1510,8 +1514,8 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx) | |||
1510 | { | 1514 | { |
1511 | void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int); | 1515 | void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int); |
1512 | struct net_device_stats *stats = &vptr->stats; | 1516 | struct net_device_stats *stats = &vptr->stats; |
1513 | struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]); | 1517 | struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]); |
1514 | struct rx_desc *rd = &(vptr->rd_ring[idx]); | 1518 | struct rx_desc *rd = &(vptr->rx.ring[idx]); |
1515 | int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff; | 1519 | int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff; |
1516 | struct sk_buff *skb; | 1520 | struct sk_buff *skb; |
1517 | 1521 | ||
@@ -1527,7 +1531,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx) | |||
1527 | skb = rd_info->skb; | 1531 | skb = rd_info->skb; |
1528 | 1532 | ||
1529 | pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma, | 1533 | pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma, |
1530 | vptr->rx_buf_sz, PCI_DMA_FROMDEVICE); | 1534 | vptr->rx.buf_sz, PCI_DMA_FROMDEVICE); |
1531 | 1535 | ||
1532 | /* | 1536 | /* |
1533 | * Drop frame not meeting IEEE 802.3 | 1537 | * Drop frame not meeting IEEE 802.3 |
@@ -1550,7 +1554,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx) | |||
1550 | rd_info->skb = NULL; | 1554 | rd_info->skb = NULL; |
1551 | } | 1555 | } |
1552 | 1556 | ||
1553 | pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz, | 1557 | pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz, |
1554 | PCI_DMA_FROMDEVICE); | 1558 | PCI_DMA_FROMDEVICE); |
1555 | 1559 | ||
1556 | skb_put(skb, pkt_len - 4); | 1560 | skb_put(skb, pkt_len - 4); |
@@ -1580,10 +1584,10 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx) | |||
1580 | 1584 | ||
1581 | static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx) | 1585 | static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx) |
1582 | { | 1586 | { |
1583 | struct rx_desc *rd = &(vptr->rd_ring[idx]); | 1587 | struct rx_desc *rd = &(vptr->rx.ring[idx]); |
1584 | struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]); | 1588 | struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]); |
1585 | 1589 | ||
1586 | rd_info->skb = netdev_alloc_skb(vptr->dev, vptr->rx_buf_sz + 64); | 1590 | rd_info->skb = dev_alloc_skb(vptr->rx.buf_sz + 64); |
1587 | if (rd_info->skb == NULL) | 1591 | if (rd_info->skb == NULL) |
1588 | return -ENOMEM; | 1592 | return -ENOMEM; |
1589 | 1593 | ||
@@ -1592,14 +1596,15 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx) | |||
1592 | * 64byte alignment. | 1596 | * 64byte alignment. |
1593 | */ | 1597 | */ |
1594 | skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63); | 1598 | skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63); |
1595 | rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE); | 1599 | rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, |
1600 | vptr->rx.buf_sz, PCI_DMA_FROMDEVICE); | ||
1596 | 1601 | ||
1597 | /* | 1602 | /* |
1598 | * Fill in the descriptor to match | 1603 | * Fill in the descriptor to match |
1599 | */ | 1604 | */ |
1600 | 1605 | ||
1601 | *((u32 *) & (rd->rdesc0)) = 0; | 1606 | *((u32 *) & (rd->rdesc0)) = 0; |
1602 | rd->size = cpu_to_le16(vptr->rx_buf_sz) | RX_INTEN; | 1607 | rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN; |
1603 | rd->pa_low = cpu_to_le32(rd_info->skb_dma); | 1608 | rd->pa_low = cpu_to_le32(rd_info->skb_dma); |
1604 | rd->pa_high = 0; | 1609 | rd->pa_high = 0; |
1605 | return 0; | 1610 | return 0; |
@@ -1625,15 +1630,15 @@ static int velocity_tx_srv(struct velocity_info *vptr, u32 status) | |||
1625 | struct velocity_td_info *tdinfo; | 1630 | struct velocity_td_info *tdinfo; |
1626 | struct net_device_stats *stats = &vptr->stats; | 1631 | struct net_device_stats *stats = &vptr->stats; |
1627 | 1632 | ||
1628 | for (qnum = 0; qnum < vptr->num_txq; qnum++) { | 1633 | for (qnum = 0; qnum < vptr->tx.numq; qnum++) { |
1629 | for (idx = vptr->td_tail[qnum]; vptr->td_used[qnum] > 0; | 1634 | for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0; |
1630 | idx = (idx + 1) % vptr->options.numtx) { | 1635 | idx = (idx + 1) % vptr->options.numtx) { |
1631 | 1636 | ||
1632 | /* | 1637 | /* |
1633 | * Get Tx Descriptor | 1638 | * Get Tx Descriptor |
1634 | */ | 1639 | */ |
1635 | td = &(vptr->td_rings[qnum][idx]); | 1640 | td = &(vptr->tx.rings[qnum][idx]); |
1636 | tdinfo = &(vptr->td_infos[qnum][idx]); | 1641 | tdinfo = &(vptr->tx.infos[qnum][idx]); |
1637 | 1642 | ||
1638 | if (td->tdesc0.len & OWNED_BY_NIC) | 1643 | if (td->tdesc0.len & OWNED_BY_NIC) |
1639 | break; | 1644 | break; |
@@ -1657,9 +1662,9 @@ static int velocity_tx_srv(struct velocity_info *vptr, u32 status) | |||
1657 | stats->tx_bytes += tdinfo->skb->len; | 1662 | stats->tx_bytes += tdinfo->skb->len; |
1658 | } | 1663 | } |
1659 | velocity_free_tx_buf(vptr, tdinfo); | 1664 | velocity_free_tx_buf(vptr, tdinfo); |
1660 | vptr->td_used[qnum]--; | 1665 | vptr->tx.used[qnum]--; |
1661 | } | 1666 | } |
1662 | vptr->td_tail[qnum] = idx; | 1667 | vptr->tx.tail[qnum] = idx; |
1663 | 1668 | ||
1664 | if (AVAIL_TD(vptr, qnum) < 1) { | 1669 | if (AVAIL_TD(vptr, qnum) < 1) { |
1665 | full = 1; | 1670 | full = 1; |
@@ -1846,6 +1851,40 @@ static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_ | |||
1846 | tdinfo->skb = NULL; | 1851 | tdinfo->skb = NULL; |
1847 | } | 1852 | } |
1848 | 1853 | ||
1854 | static int velocity_init_rings(struct velocity_info *vptr, int mtu) | ||
1855 | { | ||
1856 | int ret; | ||
1857 | |||
1858 | velocity_set_rxbufsize(vptr, mtu); | ||
1859 | |||
1860 | ret = velocity_init_dma_rings(vptr); | ||
1861 | if (ret < 0) | ||
1862 | goto out; | ||
1863 | |||
1864 | ret = velocity_init_rd_ring(vptr); | ||
1865 | if (ret < 0) | ||
1866 | goto err_free_dma_rings_0; | ||
1867 | |||
1868 | ret = velocity_init_td_ring(vptr); | ||
1869 | if (ret < 0) | ||
1870 | goto err_free_rd_ring_1; | ||
1871 | out: | ||
1872 | return ret; | ||
1873 | |||
1874 | err_free_rd_ring_1: | ||
1875 | velocity_free_rd_ring(vptr); | ||
1876 | err_free_dma_rings_0: | ||
1877 | velocity_free_dma_rings(vptr); | ||
1878 | goto out; | ||
1879 | } | ||
1880 | |||
1881 | static void velocity_free_rings(struct velocity_info *vptr) | ||
1882 | { | ||
1883 | velocity_free_td_ring(vptr); | ||
1884 | velocity_free_rd_ring(vptr); | ||
1885 | velocity_free_dma_rings(vptr); | ||
1886 | } | ||
1887 | |||
1849 | /** | 1888 | /** |
1850 | * velocity_open - interface activation callback | 1889 | * velocity_open - interface activation callback |
1851 | * @dev: network layer device to open | 1890 | * @dev: network layer device to open |
@@ -1862,20 +1901,10 @@ static int velocity_open(struct net_device *dev) | |||
1862 | struct velocity_info *vptr = netdev_priv(dev); | 1901 | struct velocity_info *vptr = netdev_priv(dev); |
1863 | int ret; | 1902 | int ret; |
1864 | 1903 | ||
1865 | velocity_set_rxbufsize(vptr, dev->mtu); | 1904 | ret = velocity_init_rings(vptr, dev->mtu); |
1866 | |||
1867 | ret = velocity_init_rings(vptr); | ||
1868 | if (ret < 0) | 1905 | if (ret < 0) |
1869 | goto out; | 1906 | goto out; |
1870 | 1907 | ||
1871 | ret = velocity_init_rd_ring(vptr); | ||
1872 | if (ret < 0) | ||
1873 | goto err_free_desc_rings; | ||
1874 | |||
1875 | ret = velocity_init_td_ring(vptr); | ||
1876 | if (ret < 0) | ||
1877 | goto err_free_rd_ring; | ||
1878 | |||
1879 | /* Ensure chip is running */ | 1908 | /* Ensure chip is running */ |
1880 | pci_set_power_state(vptr->pdev, PCI_D0); | 1909 | pci_set_power_state(vptr->pdev, PCI_D0); |
1881 | 1910 | ||
@@ -1888,7 +1917,8 @@ static int velocity_open(struct net_device *dev) | |||
1888 | if (ret < 0) { | 1917 | if (ret < 0) { |
1889 | /* Power down the chip */ | 1918 | /* Power down the chip */ |
1890 | pci_set_power_state(vptr->pdev, PCI_D3hot); | 1919 | pci_set_power_state(vptr->pdev, PCI_D3hot); |
1891 | goto err_free_td_ring; | 1920 | velocity_free_rings(vptr); |
1921 | goto out; | ||
1892 | } | 1922 | } |
1893 | 1923 | ||
1894 | mac_enable_int(vptr->mac_regs); | 1924 | mac_enable_int(vptr->mac_regs); |
@@ -1896,14 +1926,6 @@ static int velocity_open(struct net_device *dev) | |||
1896 | vptr->flags |= VELOCITY_FLAGS_OPENED; | 1926 | vptr->flags |= VELOCITY_FLAGS_OPENED; |
1897 | out: | 1927 | out: |
1898 | return ret; | 1928 | return ret; |
1899 | |||
1900 | err_free_td_ring: | ||
1901 | velocity_free_td_ring(vptr); | ||
1902 | err_free_rd_ring: | ||
1903 | velocity_free_rd_ring(vptr); | ||
1904 | err_free_desc_rings: | ||
1905 | velocity_free_rings(vptr); | ||
1906 | goto out; | ||
1907 | } | 1929 | } |
1908 | 1930 | ||
1909 | /** | 1931 | /** |
@@ -1919,50 +1941,72 @@ err_free_desc_rings: | |||
1919 | static int velocity_change_mtu(struct net_device *dev, int new_mtu) | 1941 | static int velocity_change_mtu(struct net_device *dev, int new_mtu) |
1920 | { | 1942 | { |
1921 | struct velocity_info *vptr = netdev_priv(dev); | 1943 | struct velocity_info *vptr = netdev_priv(dev); |
1922 | unsigned long flags; | ||
1923 | int oldmtu = dev->mtu; | ||
1924 | int ret = 0; | 1944 | int ret = 0; |
1925 | 1945 | ||
1926 | if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) { | 1946 | if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) { |
1927 | VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n", | 1947 | VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n", |
1928 | vptr->dev->name); | 1948 | vptr->dev->name); |
1929 | return -EINVAL; | 1949 | ret = -EINVAL; |
1950 | goto out_0; | ||
1930 | } | 1951 | } |
1931 | 1952 | ||
1932 | if (!netif_running(dev)) { | 1953 | if (!netif_running(dev)) { |
1933 | dev->mtu = new_mtu; | 1954 | dev->mtu = new_mtu; |
1934 | return 0; | 1955 | goto out_0; |
1935 | } | 1956 | } |
1936 | 1957 | ||
1937 | if (new_mtu != oldmtu) { | 1958 | if (dev->mtu != new_mtu) { |
1959 | struct velocity_info *tmp_vptr; | ||
1960 | unsigned long flags; | ||
1961 | struct rx_info rx; | ||
1962 | struct tx_info tx; | ||
1963 | |||
1964 | tmp_vptr = kzalloc(sizeof(*tmp_vptr), GFP_KERNEL); | ||
1965 | if (!tmp_vptr) { | ||
1966 | ret = -ENOMEM; | ||
1967 | goto out_0; | ||
1968 | } | ||
1969 | |||
1970 | tmp_vptr->dev = dev; | ||
1971 | tmp_vptr->pdev = vptr->pdev; | ||
1972 | tmp_vptr->options = vptr->options; | ||
1973 | tmp_vptr->tx.numq = vptr->tx.numq; | ||
1974 | |||
1975 | ret = velocity_init_rings(tmp_vptr, new_mtu); | ||
1976 | if (ret < 0) | ||
1977 | goto out_free_tmp_vptr_1; | ||
1978 | |||
1938 | spin_lock_irqsave(&vptr->lock, flags); | 1979 | spin_lock_irqsave(&vptr->lock, flags); |
1939 | 1980 | ||
1940 | netif_stop_queue(dev); | 1981 | netif_stop_queue(dev); |
1941 | velocity_shutdown(vptr); | 1982 | velocity_shutdown(vptr); |
1942 | 1983 | ||
1943 | velocity_free_td_ring(vptr); | 1984 | rx = vptr->rx; |
1944 | velocity_free_rd_ring(vptr); | 1985 | tx = vptr->tx; |
1945 | 1986 | ||
1946 | dev->mtu = new_mtu; | 1987 | vptr->rx = tmp_vptr->rx; |
1988 | vptr->tx = tmp_vptr->tx; | ||
1947 | 1989 | ||
1948 | velocity_set_rxbufsize(vptr, new_mtu); | 1990 | tmp_vptr->rx = rx; |
1991 | tmp_vptr->tx = tx; | ||
1949 | 1992 | ||
1950 | ret = velocity_init_rd_ring(vptr); | 1993 | dev->mtu = new_mtu; |
1951 | if (ret < 0) | ||
1952 | goto out_unlock; | ||
1953 | 1994 | ||
1954 | ret = velocity_init_td_ring(vptr); | 1995 | velocity_give_many_rx_descs(vptr); |
1955 | if (ret < 0) | ||
1956 | goto out_unlock; | ||
1957 | 1996 | ||
1958 | velocity_init_registers(vptr, VELOCITY_INIT_COLD); | 1997 | velocity_init_registers(vptr, VELOCITY_INIT_COLD); |
1959 | 1998 | ||
1960 | mac_enable_int(vptr->mac_regs); | 1999 | mac_enable_int(vptr->mac_regs); |
1961 | netif_start_queue(dev); | 2000 | netif_start_queue(dev); |
1962 | out_unlock: | 2001 | |
1963 | spin_unlock_irqrestore(&vptr->lock, flags); | 2002 | spin_unlock_irqrestore(&vptr->lock, flags); |
1964 | } | ||
1965 | 2003 | ||
2004 | velocity_free_rings(tmp_vptr); | ||
2005 | |||
2006 | out_free_tmp_vptr_1: | ||
2007 | kfree(tmp_vptr); | ||
2008 | } | ||
2009 | out_0: | ||
1966 | return ret; | 2010 | return ret; |
1967 | } | 2011 | } |
1968 | 2012 | ||
@@ -2008,9 +2052,6 @@ static int velocity_close(struct net_device *dev) | |||
2008 | /* Power down the chip */ | 2052 | /* Power down the chip */ |
2009 | pci_set_power_state(vptr->pdev, PCI_D3hot); | 2053 | pci_set_power_state(vptr->pdev, PCI_D3hot); |
2010 | 2054 | ||
2011 | /* Free the resources */ | ||
2012 | velocity_free_td_ring(vptr); | ||
2013 | velocity_free_rd_ring(vptr); | ||
2014 | velocity_free_rings(vptr); | 2055 | velocity_free_rings(vptr); |
2015 | 2056 | ||
2016 | vptr->flags &= (~VELOCITY_FLAGS_OPENED); | 2057 | vptr->flags &= (~VELOCITY_FLAGS_OPENED); |
@@ -2056,9 +2097,9 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2056 | 2097 | ||
2057 | spin_lock_irqsave(&vptr->lock, flags); | 2098 | spin_lock_irqsave(&vptr->lock, flags); |
2058 | 2099 | ||
2059 | index = vptr->td_curr[qnum]; | 2100 | index = vptr->tx.curr[qnum]; |
2060 | td_ptr = &(vptr->td_rings[qnum][index]); | 2101 | td_ptr = &(vptr->tx.rings[qnum][index]); |
2061 | tdinfo = &(vptr->td_infos[qnum][index]); | 2102 | tdinfo = &(vptr->tx.infos[qnum][index]); |
2062 | 2103 | ||
2063 | td_ptr->tdesc1.TCR = TCR0_TIC; | 2104 | td_ptr->tdesc1.TCR = TCR0_TIC; |
2064 | td_ptr->td_buf[0].size &= ~TD_QUEUE; | 2105 | td_ptr->td_buf[0].size &= ~TD_QUEUE; |
@@ -2071,9 +2112,9 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2071 | skb_copy_from_linear_data(skb, tdinfo->buf, skb->len); | 2112 | skb_copy_from_linear_data(skb, tdinfo->buf, skb->len); |
2072 | tdinfo->skb_dma[0] = tdinfo->buf_dma; | 2113 | tdinfo->skb_dma[0] = tdinfo->buf_dma; |
2073 | td_ptr->tdesc0.len = len; | 2114 | td_ptr->tdesc0.len = len; |
2074 | td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); | 2115 | td_ptr->tx.buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); |
2075 | td_ptr->td_buf[0].pa_high = 0; | 2116 | td_ptr->tx.buf[0].pa_high = 0; |
2076 | td_ptr->td_buf[0].size = len; /* queue is 0 anyway */ | 2117 | td_ptr->tx.buf[0].size = len; /* queue is 0 anyway */ |
2077 | tdinfo->nskb_dma = 1; | 2118 | tdinfo->nskb_dma = 1; |
2078 | } else { | 2119 | } else { |
2079 | int i = 0; | 2120 | int i = 0; |
@@ -2084,9 +2125,9 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2084 | td_ptr->tdesc0.len = len; | 2125 | td_ptr->tdesc0.len = len; |
2085 | 2126 | ||
2086 | /* FIXME: support 48bit DMA later */ | 2127 | /* FIXME: support 48bit DMA later */ |
2087 | td_ptr->td_buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma); | 2128 | td_ptr->tx.buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma); |
2088 | td_ptr->td_buf[i].pa_high = 0; | 2129 | td_ptr->tx.buf[i].pa_high = 0; |
2089 | td_ptr->td_buf[i].size = cpu_to_le16(skb_headlen(skb)); | 2130 | td_ptr->tx.buf[i].size = cpu_to_le16(skb_headlen(skb)); |
2090 | 2131 | ||
2091 | for (i = 0; i < nfrags; i++) { | 2132 | for (i = 0; i < nfrags; i++) { |
2092 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 2133 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
@@ -2094,9 +2135,9 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2094 | 2135 | ||
2095 | tdinfo->skb_dma[i + 1] = pci_map_single(vptr->pdev, addr, frag->size, PCI_DMA_TODEVICE); | 2136 | tdinfo->skb_dma[i + 1] = pci_map_single(vptr->pdev, addr, frag->size, PCI_DMA_TODEVICE); |
2096 | 2137 | ||
2097 | td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]); | 2138 | td_ptr->tx.buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]); |
2098 | td_ptr->td_buf[i + 1].pa_high = 0; | 2139 | td_ptr->tx.buf[i + 1].pa_high = 0; |
2099 | td_ptr->td_buf[i + 1].size = cpu_to_le16(frag->size); | 2140 | td_ptr->tx.buf[i + 1].size = cpu_to_le16(frag->size); |
2100 | } | 2141 | } |
2101 | tdinfo->nskb_dma = i - 1; | 2142 | tdinfo->nskb_dma = i - 1; |
2102 | } | 2143 | } |
@@ -2142,13 +2183,13 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2142 | if (prev < 0) | 2183 | if (prev < 0) |
2143 | prev = vptr->options.numtx - 1; | 2184 | prev = vptr->options.numtx - 1; |
2144 | td_ptr->tdesc0.len |= OWNED_BY_NIC; | 2185 | td_ptr->tdesc0.len |= OWNED_BY_NIC; |
2145 | vptr->td_used[qnum]++; | 2186 | vptr->tx.used[qnum]++; |
2146 | vptr->td_curr[qnum] = (index + 1) % vptr->options.numtx; | 2187 | vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx; |
2147 | 2188 | ||
2148 | if (AVAIL_TD(vptr, qnum) < 1) | 2189 | if (AVAIL_TD(vptr, qnum) < 1) |
2149 | netif_stop_queue(dev); | 2190 | netif_stop_queue(dev); |
2150 | 2191 | ||
2151 | td_ptr = &(vptr->td_rings[qnum][prev]); | 2192 | td_ptr = &(vptr->tx.rings[qnum][prev]); |
2152 | td_ptr->td_buf[0].size |= TD_QUEUE; | 2193 | td_ptr->td_buf[0].size |= TD_QUEUE; |
2153 | mac_tx_queue_wake(vptr->mac_regs, qnum); | 2194 | mac_tx_queue_wake(vptr->mac_regs, qnum); |
2154 | } | 2195 | } |
@@ -3405,8 +3446,8 @@ static int velocity_resume(struct pci_dev *pdev) | |||
3405 | 3446 | ||
3406 | velocity_tx_srv(vptr, 0); | 3447 | velocity_tx_srv(vptr, 0); |
3407 | 3448 | ||
3408 | for (i = 0; i < vptr->num_txq; i++) { | 3449 | for (i = 0; i < vptr->tx.numq; i++) { |
3409 | if (vptr->td_used[i]) { | 3450 | if (vptr->tx.used[i]) { |
3410 | mac_tx_queue_wake(vptr->mac_regs, i); | 3451 | mac_tx_queue_wake(vptr->mac_regs, i); |
3411 | } | 3452 | } |
3412 | } | 3453 | } |
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h index 86446147284c..1b95b04c9257 100644 --- a/drivers/net/via-velocity.h +++ b/drivers/net/via-velocity.h | |||
@@ -1494,6 +1494,10 @@ struct velocity_opt { | |||
1494 | u32 flags; | 1494 | u32 flags; |
1495 | }; | 1495 | }; |
1496 | 1496 | ||
1497 | #define AVAIL_TD(p,q) ((p)->options.numtx-((p)->tx.used[(q)])) | ||
1498 | |||
1499 | #define GET_RD_BY_IDX(vptr, idx) (vptr->rd_ring[idx]) | ||
1500 | |||
1497 | struct velocity_info { | 1501 | struct velocity_info { |
1498 | struct list_head list; | 1502 | struct list_head list; |
1499 | 1503 | ||
@@ -1501,9 +1505,6 @@ struct velocity_info { | |||
1501 | struct net_device *dev; | 1505 | struct net_device *dev; |
1502 | struct net_device_stats stats; | 1506 | struct net_device_stats stats; |
1503 | 1507 | ||
1504 | dma_addr_t rd_pool_dma; | ||
1505 | dma_addr_t td_pool_dma[TX_QUEUE_NO]; | ||
1506 | |||
1507 | struct vlan_group *vlgrp; | 1508 | struct vlan_group *vlgrp; |
1508 | u8 ip_addr[4]; | 1509 | u8 ip_addr[4]; |
1509 | enum chip_type chip_id; | 1510 | enum chip_type chip_id; |
@@ -1512,25 +1513,29 @@ struct velocity_info { | |||
1512 | unsigned long memaddr; | 1513 | unsigned long memaddr; |
1513 | unsigned long ioaddr; | 1514 | unsigned long ioaddr; |
1514 | 1515 | ||
1515 | u8 rev_id; | 1516 | struct tx_info { |
1516 | 1517 | int numq; | |
1517 | #define AVAIL_TD(p,q) ((p)->options.numtx-((p)->td_used[(q)])) | 1518 | |
1519 | /* FIXME: the locality of the data seems rather poor. */ | ||
1520 | int used[TX_QUEUE_NO]; | ||
1521 | int curr[TX_QUEUE_NO]; | ||
1522 | int tail[TX_QUEUE_NO]; | ||
1523 | struct tx_desc *rings[TX_QUEUE_NO]; | ||
1524 | struct velocity_td_info *infos[TX_QUEUE_NO]; | ||
1525 | dma_addr_t pool_dma[TX_QUEUE_NO]; | ||
1526 | } tx; | ||
1527 | |||
1528 | struct rx_info { | ||
1529 | int buf_sz; | ||
1530 | |||
1531 | int dirty; | ||
1532 | int curr; | ||
1533 | u32 filled; | ||
1534 | struct rx_desc *ring; | ||
1535 | struct velocity_rd_info *info; /* It's an array */ | ||
1536 | dma_addr_t pool_dma; | ||
1537 | } rx; | ||
1518 | 1538 | ||
1519 | int num_txq; | ||
1520 | |||
1521 | volatile int td_used[TX_QUEUE_NO]; | ||
1522 | int td_curr[TX_QUEUE_NO]; | ||
1523 | int td_tail[TX_QUEUE_NO]; | ||
1524 | struct tx_desc *td_rings[TX_QUEUE_NO]; | ||
1525 | struct velocity_td_info *td_infos[TX_QUEUE_NO]; | ||
1526 | |||
1527 | int rd_curr; | ||
1528 | int rd_dirty; | ||
1529 | u32 rd_filled; | ||
1530 | struct rx_desc *rd_ring; | ||
1531 | struct velocity_rd_info *rd_info; /* It's an array */ | ||
1532 | |||
1533 | #define GET_RD_BY_IDX(vptr, idx) (vptr->rd_ring[idx]) | ||
1534 | u32 mib_counter[MAX_HW_MIB_COUNTER]; | 1539 | u32 mib_counter[MAX_HW_MIB_COUNTER]; |
1535 | struct velocity_opt options; | 1540 | struct velocity_opt options; |
1536 | 1541 | ||
@@ -1538,7 +1543,6 @@ struct velocity_info { | |||
1538 | 1543 | ||
1539 | u32 flags; | 1544 | u32 flags; |
1540 | 1545 | ||
1541 | int rx_buf_sz; | ||
1542 | u32 mii_status; | 1546 | u32 mii_status; |
1543 | u32 phy_id; | 1547 | u32 phy_id; |
1544 | int multicast_limit; | 1548 | int multicast_limit; |
@@ -1554,8 +1558,8 @@ struct velocity_info { | |||
1554 | struct velocity_context context; | 1558 | struct velocity_context context; |
1555 | 1559 | ||
1556 | u32 ticks; | 1560 | u32 ticks; |
1557 | u32 rx_bytes; | ||
1558 | 1561 | ||
1562 | u8 rev_id; | ||
1559 | }; | 1563 | }; |
1560 | 1564 | ||
1561 | /** | 1565 | /** |
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig index 846be60e7821..2ae2ec40015d 100644 --- a/drivers/net/wan/Kconfig +++ b/drivers/net/wan/Kconfig | |||
@@ -25,7 +25,7 @@ if WAN | |||
25 | # There is no way to detect a comtrol sv11 - force it modular for now. | 25 | # There is no way to detect a comtrol sv11 - force it modular for now. |
26 | config HOSTESS_SV11 | 26 | config HOSTESS_SV11 |
27 | tristate "Comtrol Hostess SV-11 support" | 27 | tristate "Comtrol Hostess SV-11 support" |
28 | depends on ISA && m && ISA_DMA_API && INET | 28 | depends on ISA && m && ISA_DMA_API && INET && HDLC |
29 | help | 29 | help |
30 | Driver for Comtrol Hostess SV-11 network card which | 30 | Driver for Comtrol Hostess SV-11 network card which |
31 | operates on low speed synchronous serial links at up to | 31 | operates on low speed synchronous serial links at up to |
@@ -37,7 +37,7 @@ config HOSTESS_SV11 | |||
37 | # The COSA/SRP driver has not been tested as non-modular yet. | 37 | # The COSA/SRP driver has not been tested as non-modular yet. |
38 | config COSA | 38 | config COSA |
39 | tristate "COSA/SRP sync serial boards support" | 39 | tristate "COSA/SRP sync serial boards support" |
40 | depends on ISA && m && ISA_DMA_API | 40 | depends on ISA && m && ISA_DMA_API && HDLC |
41 | ---help--- | 41 | ---help--- |
42 | Driver for COSA and SRP synchronous serial boards. | 42 | Driver for COSA and SRP synchronous serial boards. |
43 | 43 | ||
@@ -61,7 +61,7 @@ config COSA | |||
61 | # | 61 | # |
62 | config LANMEDIA | 62 | config LANMEDIA |
63 | tristate "LanMedia Corp. SSI/V.35, T1/E1, HSSI, T3 boards" | 63 | tristate "LanMedia Corp. SSI/V.35, T1/E1, HSSI, T3 boards" |
64 | depends on PCI && VIRT_TO_BUS | 64 | depends on PCI && VIRT_TO_BUS && HDLC |
65 | ---help--- | 65 | ---help--- |
66 | Driver for the following Lan Media family of serial boards: | 66 | Driver for the following Lan Media family of serial boards: |
67 | 67 | ||
@@ -78,9 +78,8 @@ config LANMEDIA | |||
78 | - LMC 5245 board connects directly to a T3 circuit saving the | 78 | - LMC 5245 board connects directly to a T3 circuit saving the |
79 | additional external hardware. | 79 | additional external hardware. |
80 | 80 | ||
81 | To change setting such as syncPPP vs Cisco HDLC or clock source you | 81 | To change setting such as clock source you will need lmcctl. |
82 | will need lmcctl. It is available at <ftp://ftp.lanmedia.com/> | 82 | It is available at <ftp://ftp.lanmedia.com/> (broken link). |
83 | (broken link). | ||
84 | 83 | ||
85 | To compile this driver as a module, choose M here: the | 84 | To compile this driver as a module, choose M here: the |
86 | module will be called lmc. | 85 | module will be called lmc. |
@@ -88,7 +87,7 @@ config LANMEDIA | |||
88 | # There is no way to detect a Sealevel board. Force it modular | 87 | # There is no way to detect a Sealevel board. Force it modular |
89 | config SEALEVEL_4021 | 88 | config SEALEVEL_4021 |
90 | tristate "Sealevel Systems 4021 support" | 89 | tristate "Sealevel Systems 4021 support" |
91 | depends on ISA && m && ISA_DMA_API && INET | 90 | depends on ISA && m && ISA_DMA_API && INET && HDLC |
92 | help | 91 | help |
93 | This is a driver for the Sealevel Systems ACB 56 serial I/O adapter. | 92 | This is a driver for the Sealevel Systems ACB 56 serial I/O adapter. |
94 | 93 | ||
@@ -154,8 +153,6 @@ config HDLC_PPP | |||
154 | help | 153 | help |
155 | Generic HDLC driver supporting PPP over WAN connections. | 154 | Generic HDLC driver supporting PPP over WAN connections. |
156 | 155 | ||
157 | It will be replaced by new PPP implementation in Linux 2.6.26. | ||
158 | |||
159 | If unsure, say N. | 156 | If unsure, say N. |
160 | 157 | ||
161 | config HDLC_X25 | 158 | config HDLC_X25 |
diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile index d61fef36afc9..102549605d09 100644 --- a/drivers/net/wan/Makefile +++ b/drivers/net/wan/Makefile | |||
@@ -21,12 +21,11 @@ pc300-y := pc300_drv.o | |||
21 | pc300-$(CONFIG_PC300_MLPPP) += pc300_tty.o | 21 | pc300-$(CONFIG_PC300_MLPPP) += pc300_tty.o |
22 | pc300-objs := $(pc300-y) | 22 | pc300-objs := $(pc300-y) |
23 | 23 | ||
24 | obj-$(CONFIG_HOSTESS_SV11) += z85230.o syncppp.o hostess_sv11.o | 24 | obj-$(CONFIG_HOSTESS_SV11) += z85230.o hostess_sv11.o |
25 | obj-$(CONFIG_SEALEVEL_4021) += z85230.o syncppp.o sealevel.o | 25 | obj-$(CONFIG_SEALEVEL_4021) += z85230.o sealevel.o |
26 | obj-$(CONFIG_COSA) += syncppp.o cosa.o | 26 | obj-$(CONFIG_COSA) += cosa.o |
27 | obj-$(CONFIG_FARSYNC) += syncppp.o farsync.o | 27 | obj-$(CONFIG_FARSYNC) += farsync.o |
28 | obj-$(CONFIG_DSCC4) += dscc4.o | 28 | obj-$(CONFIG_DSCC4) += dscc4.o |
29 | obj-$(CONFIG_LANMEDIA) += syncppp.o | ||
30 | obj-$(CONFIG_X25_ASY) += x25_asy.o | 29 | obj-$(CONFIG_X25_ASY) += x25_asy.o |
31 | 30 | ||
32 | obj-$(CONFIG_LANMEDIA) += lmc/ | 31 | obj-$(CONFIG_LANMEDIA) += lmc/ |
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c index f7d3349dc3ec..f14051556c87 100644 --- a/drivers/net/wan/cosa.c +++ b/drivers/net/wan/cosa.c | |||
@@ -2,6 +2,7 @@ | |||
2 | 2 | ||
3 | /* | 3 | /* |
4 | * Copyright (C) 1995-1997 Jan "Yenya" Kasprzak <kas@fi.muni.cz> | 4 | * Copyright (C) 1995-1997 Jan "Yenya" Kasprzak <kas@fi.muni.cz> |
5 | * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl> | ||
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by | 8 | * it under the terms of the GNU General Public License as published by |
@@ -54,7 +55,7 @@ | |||
54 | * | 55 | * |
55 | * The Linux driver (unlike the present *BSD drivers :-) can work even | 56 | * The Linux driver (unlike the present *BSD drivers :-) can work even |
56 | * for the COSA and SRP in one computer and allows each channel to work | 57 | * for the COSA and SRP in one computer and allows each channel to work |
57 | * in one of the three modes (character device, Cisco HDLC, Sync PPP). | 58 | * in one of the two modes (character or network device). |
58 | * | 59 | * |
59 | * AUTHOR | 60 | * AUTHOR |
60 | * | 61 | * |
@@ -72,12 +73,6 @@ | |||
72 | * The Comtrol Hostess SV11 driver by Alan Cox | 73 | * The Comtrol Hostess SV11 driver by Alan Cox |
73 | * The Sync PPP/Cisco HDLC layer (syncppp.c) ported to Linux by Alan Cox | 74 | * The Sync PPP/Cisco HDLC layer (syncppp.c) ported to Linux by Alan Cox |
74 | */ | 75 | */ |
75 | /* | ||
76 | * 5/25/1999 : Marcelo Tosatti <marcelo@conectiva.com.br> | ||
77 | * fixed a deadlock in cosa_sppp_open | ||
78 | */ | ||
79 | |||
80 | /* ---------- Headers, macros, data structures ---------- */ | ||
81 | 76 | ||
82 | #include <linux/module.h> | 77 | #include <linux/module.h> |
83 | #include <linux/kernel.h> | 78 | #include <linux/kernel.h> |
@@ -86,6 +81,7 @@ | |||
86 | #include <linux/fs.h> | 81 | #include <linux/fs.h> |
87 | #include <linux/interrupt.h> | 82 | #include <linux/interrupt.h> |
88 | #include <linux/delay.h> | 83 | #include <linux/delay.h> |
84 | #include <linux/hdlc.h> | ||
89 | #include <linux/errno.h> | 85 | #include <linux/errno.h> |
90 | #include <linux/ioport.h> | 86 | #include <linux/ioport.h> |
91 | #include <linux/netdevice.h> | 87 | #include <linux/netdevice.h> |
@@ -93,14 +89,12 @@ | |||
93 | #include <linux/mutex.h> | 89 | #include <linux/mutex.h> |
94 | #include <linux/device.h> | 90 | #include <linux/device.h> |
95 | #include <linux/smp_lock.h> | 91 | #include <linux/smp_lock.h> |
96 | |||
97 | #undef COSA_SLOW_IO /* for testing purposes only */ | ||
98 | |||
99 | #include <asm/io.h> | 92 | #include <asm/io.h> |
100 | #include <asm/dma.h> | 93 | #include <asm/dma.h> |
101 | #include <asm/byteorder.h> | 94 | #include <asm/byteorder.h> |
102 | 95 | ||
103 | #include <net/syncppp.h> | 96 | #undef COSA_SLOW_IO /* for testing purposes only */ |
97 | |||
104 | #include "cosa.h" | 98 | #include "cosa.h" |
105 | 99 | ||
106 | /* Maximum length of the identification string. */ | 100 | /* Maximum length of the identification string. */ |
@@ -112,7 +106,6 @@ | |||
112 | /* Per-channel data structure */ | 106 | /* Per-channel data structure */ |
113 | 107 | ||
114 | struct channel_data { | 108 | struct channel_data { |
115 | void *if_ptr; /* General purpose pointer (used by SPPP) */ | ||
116 | int usage; /* Usage count; >0 for chrdev, -1 for netdev */ | 109 | int usage; /* Usage count; >0 for chrdev, -1 for netdev */ |
117 | int num; /* Number of the channel */ | 110 | int num; /* Number of the channel */ |
118 | struct cosa_data *cosa; /* Pointer to the per-card structure */ | 111 | struct cosa_data *cosa; /* Pointer to the per-card structure */ |
@@ -136,10 +129,9 @@ struct channel_data { | |||
136 | wait_queue_head_t txwaitq, rxwaitq; | 129 | wait_queue_head_t txwaitq, rxwaitq; |
137 | int tx_status, rx_status; | 130 | int tx_status, rx_status; |
138 | 131 | ||
139 | /* SPPP/HDLC device parts */ | 132 | /* generic HDLC device parts */ |
140 | struct ppp_device pppdev; | 133 | struct net_device *netdev; |
141 | struct sk_buff *rx_skb, *tx_skb; | 134 | struct sk_buff *rx_skb, *tx_skb; |
142 | struct net_device_stats stats; | ||
143 | }; | 135 | }; |
144 | 136 | ||
145 | /* cosa->firmware_status bits */ | 137 | /* cosa->firmware_status bits */ |
@@ -281,21 +273,19 @@ static int cosa_start_tx(struct channel_data *channel, char *buf, int size); | |||
281 | static void cosa_kick(struct cosa_data *cosa); | 273 | static void cosa_kick(struct cosa_data *cosa); |
282 | static int cosa_dma_able(struct channel_data *chan, char *buf, int data); | 274 | static int cosa_dma_able(struct channel_data *chan, char *buf, int data); |
283 | 275 | ||
284 | /* SPPP/HDLC stuff */ | 276 | /* Network device stuff */ |
285 | static void sppp_channel_init(struct channel_data *chan); | 277 | static int cosa_net_attach(struct net_device *dev, unsigned short encoding, |
286 | static void sppp_channel_delete(struct channel_data *chan); | 278 | unsigned short parity); |
287 | static int cosa_sppp_open(struct net_device *d); | 279 | static int cosa_net_open(struct net_device *d); |
288 | static int cosa_sppp_close(struct net_device *d); | 280 | static int cosa_net_close(struct net_device *d); |
289 | static void cosa_sppp_timeout(struct net_device *d); | 281 | static void cosa_net_timeout(struct net_device *d); |
290 | static int cosa_sppp_tx(struct sk_buff *skb, struct net_device *d); | 282 | static int cosa_net_tx(struct sk_buff *skb, struct net_device *d); |
291 | static char *sppp_setup_rx(struct channel_data *channel, int size); | 283 | static char *cosa_net_setup_rx(struct channel_data *channel, int size); |
292 | static int sppp_rx_done(struct channel_data *channel); | 284 | static int cosa_net_rx_done(struct channel_data *channel); |
293 | static int sppp_tx_done(struct channel_data *channel, int size); | 285 | static int cosa_net_tx_done(struct channel_data *channel, int size); |
294 | static int cosa_sppp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); | 286 | static int cosa_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); |
295 | static struct net_device_stats *cosa_net_stats(struct net_device *dev); | ||
296 | 287 | ||
297 | /* Character device */ | 288 | /* Character device */ |
298 | static void chardev_channel_init(struct channel_data *chan); | ||
299 | static char *chrdev_setup_rx(struct channel_data *channel, int size); | 289 | static char *chrdev_setup_rx(struct channel_data *channel, int size); |
300 | static int chrdev_rx_done(struct channel_data *channel); | 290 | static int chrdev_rx_done(struct channel_data *channel); |
301 | static int chrdev_tx_done(struct channel_data *channel, int size); | 291 | static int chrdev_tx_done(struct channel_data *channel, int size); |
@@ -357,17 +347,17 @@ static void debug_status_in(struct cosa_data *cosa, int status); | |||
357 | static void debug_status_out(struct cosa_data *cosa, int status); | 347 | static void debug_status_out(struct cosa_data *cosa, int status); |
358 | #endif | 348 | #endif |
359 | 349 | ||
360 | 350 | static inline struct channel_data* dev_to_chan(struct net_device *dev) | |
351 | { | ||
352 | return (struct channel_data *)dev_to_hdlc(dev)->priv; | ||
353 | } | ||
354 | |||
361 | /* ---------- Initialization stuff ---------- */ | 355 | /* ---------- Initialization stuff ---------- */ |
362 | 356 | ||
363 | static int __init cosa_init(void) | 357 | static int __init cosa_init(void) |
364 | { | 358 | { |
365 | int i, err = 0; | 359 | int i, err = 0; |
366 | 360 | ||
367 | printk(KERN_INFO "cosa v1.08 (c) 1997-2000 Jan Kasprzak <kas@fi.muni.cz>\n"); | ||
368 | #ifdef CONFIG_SMP | ||
369 | printk(KERN_INFO "cosa: SMP found. Please mail any success/failure reports to the author.\n"); | ||
370 | #endif | ||
371 | if (cosa_major > 0) { | 361 | if (cosa_major > 0) { |
372 | if (register_chrdev(cosa_major, "cosa", &cosa_fops)) { | 362 | if (register_chrdev(cosa_major, "cosa", &cosa_fops)) { |
373 | printk(KERN_WARNING "cosa: unable to get major %d\n", | 363 | printk(KERN_WARNING "cosa: unable to get major %d\n", |
@@ -402,7 +392,7 @@ static int __init cosa_init(void) | |||
402 | NULL, "cosa%d", i); | 392 | NULL, "cosa%d", i); |
403 | err = 0; | 393 | err = 0; |
404 | goto out; | 394 | goto out; |
405 | 395 | ||
406 | out_chrdev: | 396 | out_chrdev: |
407 | unregister_chrdev(cosa_major, "cosa"); | 397 | unregister_chrdev(cosa_major, "cosa"); |
408 | out: | 398 | out: |
@@ -414,43 +404,29 @@ static void __exit cosa_exit(void) | |||
414 | { | 404 | { |
415 | struct cosa_data *cosa; | 405 | struct cosa_data *cosa; |
416 | int i; | 406 | int i; |
417 | printk(KERN_INFO "Unloading the cosa module\n"); | ||
418 | 407 | ||
419 | for (i=0; i<nr_cards; i++) | 408 | for (i = 0; i < nr_cards; i++) |
420 | device_destroy(cosa_class, MKDEV(cosa_major, i)); | 409 | device_destroy(cosa_class, MKDEV(cosa_major, i)); |
421 | class_destroy(cosa_class); | 410 | class_destroy(cosa_class); |
422 | for (cosa=cosa_cards; nr_cards--; cosa++) { | 411 | |
412 | for (cosa = cosa_cards; nr_cards--; cosa++) { | ||
423 | /* Clean up the per-channel data */ | 413 | /* Clean up the per-channel data */ |
424 | for (i=0; i<cosa->nchannels; i++) { | 414 | for (i = 0; i < cosa->nchannels; i++) { |
425 | /* Chardev driver has no alloc'd per-channel data */ | 415 | /* Chardev driver has no alloc'd per-channel data */ |
426 | sppp_channel_delete(cosa->chan+i); | 416 | unregister_hdlc_device(cosa->chan[i].netdev); |
417 | free_netdev(cosa->chan[i].netdev); | ||
427 | } | 418 | } |
428 | /* Clean up the per-card data */ | 419 | /* Clean up the per-card data */ |
429 | kfree(cosa->chan); | 420 | kfree(cosa->chan); |
430 | kfree(cosa->bouncebuf); | 421 | kfree(cosa->bouncebuf); |
431 | free_irq(cosa->irq, cosa); | 422 | free_irq(cosa->irq, cosa); |
432 | free_dma(cosa->dma); | 423 | free_dma(cosa->dma); |
433 | release_region(cosa->datareg,is_8bit(cosa)?2:4); | 424 | release_region(cosa->datareg, is_8bit(cosa) ? 2 : 4); |
434 | } | 425 | } |
435 | unregister_chrdev(cosa_major, "cosa"); | 426 | unregister_chrdev(cosa_major, "cosa"); |
436 | } | 427 | } |
437 | module_exit(cosa_exit); | 428 | module_exit(cosa_exit); |
438 | 429 | ||
439 | /* | ||
440 | * This function should register all the net devices needed for the | ||
441 | * single channel. | ||
442 | */ | ||
443 | static __inline__ void channel_init(struct channel_data *chan) | ||
444 | { | ||
445 | sprintf(chan->name, "cosa%dc%d", chan->cosa->num, chan->num); | ||
446 | |||
447 | /* Initialize the chardev data structures */ | ||
448 | chardev_channel_init(chan); | ||
449 | |||
450 | /* Register the sppp interface */ | ||
451 | sppp_channel_init(chan); | ||
452 | } | ||
453 | |||
454 | static int cosa_probe(int base, int irq, int dma) | 430 | static int cosa_probe(int base, int irq, int dma) |
455 | { | 431 | { |
456 | struct cosa_data *cosa = cosa_cards+nr_cards; | 432 | struct cosa_data *cosa = cosa_cards+nr_cards; |
@@ -576,13 +552,43 @@ static int cosa_probe(int base, int irq, int dma) | |||
576 | /* Initialize the per-channel data */ | 552 | /* Initialize the per-channel data */ |
577 | cosa->chan = kcalloc(cosa->nchannels, sizeof(struct channel_data), GFP_KERNEL); | 553 | cosa->chan = kcalloc(cosa->nchannels, sizeof(struct channel_data), GFP_KERNEL); |
578 | if (!cosa->chan) { | 554 | if (!cosa->chan) { |
579 | err = -ENOMEM; | 555 | err = -ENOMEM; |
580 | goto err_out3; | 556 | goto err_out3; |
581 | } | 557 | } |
582 | for (i=0; i<cosa->nchannels; i++) { | 558 | |
583 | cosa->chan[i].cosa = cosa; | 559 | for (i = 0; i < cosa->nchannels; i++) { |
584 | cosa->chan[i].num = i; | 560 | struct channel_data *chan = &cosa->chan[i]; |
585 | channel_init(cosa->chan+i); | 561 | |
562 | chan->cosa = cosa; | ||
563 | chan->num = i; | ||
564 | sprintf(chan->name, "cosa%dc%d", chan->cosa->num, i); | ||
565 | |||
566 | /* Initialize the chardev data structures */ | ||
567 | mutex_init(&chan->rlock); | ||
568 | init_MUTEX(&chan->wsem); | ||
569 | |||
570 | /* Register the network interface */ | ||
571 | if (!(chan->netdev = alloc_hdlcdev(chan))) { | ||
572 | printk(KERN_WARNING "%s: alloc_hdlcdev failed.\n", | ||
573 | chan->name); | ||
574 | goto err_hdlcdev; | ||
575 | } | ||
576 | dev_to_hdlc(chan->netdev)->attach = cosa_net_attach; | ||
577 | dev_to_hdlc(chan->netdev)->xmit = cosa_net_tx; | ||
578 | chan->netdev->open = cosa_net_open; | ||
579 | chan->netdev->stop = cosa_net_close; | ||
580 | chan->netdev->do_ioctl = cosa_net_ioctl; | ||
581 | chan->netdev->tx_timeout = cosa_net_timeout; | ||
582 | chan->netdev->watchdog_timeo = TX_TIMEOUT; | ||
583 | chan->netdev->base_addr = chan->cosa->datareg; | ||
584 | chan->netdev->irq = chan->cosa->irq; | ||
585 | chan->netdev->dma = chan->cosa->dma; | ||
586 | if (register_hdlc_device(chan->netdev)) { | ||
587 | printk(KERN_WARNING "%s: register_hdlc_device()" | ||
588 | " failed.\n", chan->netdev->name); | ||
589 | free_netdev(chan->netdev); | ||
590 | goto err_hdlcdev; | ||
591 | } | ||
586 | } | 592 | } |
587 | 593 | ||
588 | printk (KERN_INFO "cosa%d: %s (%s at 0x%x irq %d dma %d), %d channels\n", | 594 | printk (KERN_INFO "cosa%d: %s (%s at 0x%x irq %d dma %d), %d channels\n", |
@@ -590,13 +596,20 @@ static int cosa_probe(int base, int irq, int dma) | |||
590 | cosa->datareg, cosa->irq, cosa->dma, cosa->nchannels); | 596 | cosa->datareg, cosa->irq, cosa->dma, cosa->nchannels); |
591 | 597 | ||
592 | return nr_cards++; | 598 | return nr_cards++; |
599 | |||
600 | err_hdlcdev: | ||
601 | while (i-- > 0) { | ||
602 | unregister_hdlc_device(cosa->chan[i].netdev); | ||
603 | free_netdev(cosa->chan[i].netdev); | ||
604 | } | ||
605 | kfree(cosa->chan); | ||
593 | err_out3: | 606 | err_out3: |
594 | kfree(cosa->bouncebuf); | 607 | kfree(cosa->bouncebuf); |
595 | err_out2: | 608 | err_out2: |
596 | free_dma(cosa->dma); | 609 | free_dma(cosa->dma); |
597 | err_out1: | 610 | err_out1: |
598 | free_irq(cosa->irq, cosa); | 611 | free_irq(cosa->irq, cosa); |
599 | err_out: | 612 | err_out: |
600 | release_region(cosa->datareg,is_8bit(cosa)?2:4); | 613 | release_region(cosa->datareg,is_8bit(cosa)?2:4); |
601 | printk(KERN_NOTICE "cosa%d: allocating resources failed\n", | 614 | printk(KERN_NOTICE "cosa%d: allocating resources failed\n", |
602 | cosa->num); | 615 | cosa->num); |
@@ -604,54 +617,19 @@ err_out: | |||
604 | } | 617 | } |
605 | 618 | ||
606 | 619 | ||
607 | /*---------- SPPP/HDLC netdevice ---------- */ | 620 | /*---------- network device ---------- */ |
608 | 621 | ||
609 | static void cosa_setup(struct net_device *d) | 622 | static int cosa_net_attach(struct net_device *dev, unsigned short encoding, |
623 | unsigned short parity) | ||
610 | { | 624 | { |
611 | d->open = cosa_sppp_open; | 625 | if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT) |
612 | d->stop = cosa_sppp_close; | 626 | return 0; |
613 | d->hard_start_xmit = cosa_sppp_tx; | 627 | return -EINVAL; |
614 | d->do_ioctl = cosa_sppp_ioctl; | ||
615 | d->get_stats = cosa_net_stats; | ||
616 | d->tx_timeout = cosa_sppp_timeout; | ||
617 | d->watchdog_timeo = TX_TIMEOUT; | ||
618 | } | ||
619 | |||
620 | static void sppp_channel_init(struct channel_data *chan) | ||
621 | { | ||
622 | struct net_device *d; | ||
623 | chan->if_ptr = &chan->pppdev; | ||
624 | d = alloc_netdev(0, chan->name, cosa_setup); | ||
625 | if (!d) { | ||
626 | printk(KERN_WARNING "%s: alloc_netdev failed.\n", chan->name); | ||
627 | return; | ||
628 | } | ||
629 | chan->pppdev.dev = d; | ||
630 | d->base_addr = chan->cosa->datareg; | ||
631 | d->irq = chan->cosa->irq; | ||
632 | d->dma = chan->cosa->dma; | ||
633 | d->ml_priv = chan; | ||
634 | sppp_attach(&chan->pppdev); | ||
635 | if (register_netdev(d)) { | ||
636 | printk(KERN_WARNING "%s: register_netdev failed.\n", d->name); | ||
637 | sppp_detach(d); | ||
638 | free_netdev(d); | ||
639 | chan->pppdev.dev = NULL; | ||
640 | return; | ||
641 | } | ||
642 | } | ||
643 | |||
644 | static void sppp_channel_delete(struct channel_data *chan) | ||
645 | { | ||
646 | unregister_netdev(chan->pppdev.dev); | ||
647 | sppp_detach(chan->pppdev.dev); | ||
648 | free_netdev(chan->pppdev.dev); | ||
649 | chan->pppdev.dev = NULL; | ||
650 | } | 628 | } |
651 | 629 | ||
652 | static int cosa_sppp_open(struct net_device *d) | 630 | static int cosa_net_open(struct net_device *dev) |
653 | { | 631 | { |
654 | struct channel_data *chan = d->ml_priv; | 632 | struct channel_data *chan = dev_to_chan(dev); |
655 | int err; | 633 | int err; |
656 | unsigned long flags; | 634 | unsigned long flags; |
657 | 635 | ||
@@ -662,36 +640,35 @@ static int cosa_sppp_open(struct net_device *d) | |||
662 | } | 640 | } |
663 | spin_lock_irqsave(&chan->cosa->lock, flags); | 641 | spin_lock_irqsave(&chan->cosa->lock, flags); |
664 | if (chan->usage != 0) { | 642 | if (chan->usage != 0) { |
665 | printk(KERN_WARNING "%s: sppp_open called with usage count %d\n", | 643 | printk(KERN_WARNING "%s: cosa_net_open called with usage count" |
666 | chan->name, chan->usage); | 644 | " %d\n", chan->name, chan->usage); |
667 | spin_unlock_irqrestore(&chan->cosa->lock, flags); | 645 | spin_unlock_irqrestore(&chan->cosa->lock, flags); |
668 | return -EBUSY; | 646 | return -EBUSY; |
669 | } | 647 | } |
670 | chan->setup_rx = sppp_setup_rx; | 648 | chan->setup_rx = cosa_net_setup_rx; |
671 | chan->tx_done = sppp_tx_done; | 649 | chan->tx_done = cosa_net_tx_done; |
672 | chan->rx_done = sppp_rx_done; | 650 | chan->rx_done = cosa_net_rx_done; |
673 | chan->usage=-1; | 651 | chan->usage = -1; |
674 | chan->cosa->usage++; | 652 | chan->cosa->usage++; |
675 | spin_unlock_irqrestore(&chan->cosa->lock, flags); | 653 | spin_unlock_irqrestore(&chan->cosa->lock, flags); |
676 | 654 | ||
677 | err = sppp_open(d); | 655 | err = hdlc_open(dev); |
678 | if (err) { | 656 | if (err) { |
679 | spin_lock_irqsave(&chan->cosa->lock, flags); | 657 | spin_lock_irqsave(&chan->cosa->lock, flags); |
680 | chan->usage=0; | 658 | chan->usage = 0; |
681 | chan->cosa->usage--; | 659 | chan->cosa->usage--; |
682 | |||
683 | spin_unlock_irqrestore(&chan->cosa->lock, flags); | 660 | spin_unlock_irqrestore(&chan->cosa->lock, flags); |
684 | return err; | 661 | return err; |
685 | } | 662 | } |
686 | 663 | ||
687 | netif_start_queue(d); | 664 | netif_start_queue(dev); |
688 | cosa_enable_rx(chan); | 665 | cosa_enable_rx(chan); |
689 | return 0; | 666 | return 0; |
690 | } | 667 | } |
691 | 668 | ||
692 | static int cosa_sppp_tx(struct sk_buff *skb, struct net_device *dev) | 669 | static int cosa_net_tx(struct sk_buff *skb, struct net_device *dev) |
693 | { | 670 | { |
694 | struct channel_data *chan = dev->ml_priv; | 671 | struct channel_data *chan = dev_to_chan(dev); |
695 | 672 | ||
696 | netif_stop_queue(dev); | 673 | netif_stop_queue(dev); |
697 | 674 | ||
@@ -700,16 +677,16 @@ static int cosa_sppp_tx(struct sk_buff *skb, struct net_device *dev) | |||
700 | return 0; | 677 | return 0; |
701 | } | 678 | } |
702 | 679 | ||
703 | static void cosa_sppp_timeout(struct net_device *dev) | 680 | static void cosa_net_timeout(struct net_device *dev) |
704 | { | 681 | { |
705 | struct channel_data *chan = dev->ml_priv; | 682 | struct channel_data *chan = dev_to_chan(dev); |
706 | 683 | ||
707 | if (test_bit(RXBIT, &chan->cosa->rxtx)) { | 684 | if (test_bit(RXBIT, &chan->cosa->rxtx)) { |
708 | chan->stats.rx_errors++; | 685 | chan->netdev->stats.rx_errors++; |
709 | chan->stats.rx_missed_errors++; | 686 | chan->netdev->stats.rx_missed_errors++; |
710 | } else { | 687 | } else { |
711 | chan->stats.tx_errors++; | 688 | chan->netdev->stats.tx_errors++; |
712 | chan->stats.tx_aborted_errors++; | 689 | chan->netdev->stats.tx_aborted_errors++; |
713 | } | 690 | } |
714 | cosa_kick(chan->cosa); | 691 | cosa_kick(chan->cosa); |
715 | if (chan->tx_skb) { | 692 | if (chan->tx_skb) { |
@@ -719,13 +696,13 @@ static void cosa_sppp_timeout(struct net_device *dev) | |||
719 | netif_wake_queue(dev); | 696 | netif_wake_queue(dev); |
720 | } | 697 | } |
721 | 698 | ||
722 | static int cosa_sppp_close(struct net_device *d) | 699 | static int cosa_net_close(struct net_device *dev) |
723 | { | 700 | { |
724 | struct channel_data *chan = d->ml_priv; | 701 | struct channel_data *chan = dev_to_chan(dev); |
725 | unsigned long flags; | 702 | unsigned long flags; |
726 | 703 | ||
727 | netif_stop_queue(d); | 704 | netif_stop_queue(dev); |
728 | sppp_close(d); | 705 | hdlc_close(dev); |
729 | cosa_disable_rx(chan); | 706 | cosa_disable_rx(chan); |
730 | spin_lock_irqsave(&chan->cosa->lock, flags); | 707 | spin_lock_irqsave(&chan->cosa->lock, flags); |
731 | if (chan->rx_skb) { | 708 | if (chan->rx_skb) { |
@@ -736,13 +713,13 @@ static int cosa_sppp_close(struct net_device *d) | |||
736 | kfree_skb(chan->tx_skb); | 713 | kfree_skb(chan->tx_skb); |
737 | chan->tx_skb = NULL; | 714 | chan->tx_skb = NULL; |
738 | } | 715 | } |
739 | chan->usage=0; | 716 | chan->usage = 0; |
740 | chan->cosa->usage--; | 717 | chan->cosa->usage--; |
741 | spin_unlock_irqrestore(&chan->cosa->lock, flags); | 718 | spin_unlock_irqrestore(&chan->cosa->lock, flags); |
742 | return 0; | 719 | return 0; |
743 | } | 720 | } |
744 | 721 | ||
745 | static char *sppp_setup_rx(struct channel_data *chan, int size) | 722 | static char *cosa_net_setup_rx(struct channel_data *chan, int size) |
746 | { | 723 | { |
747 | /* | 724 | /* |
748 | * We can safely fall back to non-dma-able memory, because we have | 725 | * We can safely fall back to non-dma-able memory, because we have |
@@ -754,66 +731,53 @@ static char *sppp_setup_rx(struct channel_data *chan, int size) | |||
754 | if (chan->rx_skb == NULL) { | 731 | if (chan->rx_skb == NULL) { |
755 | printk(KERN_NOTICE "%s: Memory squeeze, dropping packet\n", | 732 | printk(KERN_NOTICE "%s: Memory squeeze, dropping packet\n", |
756 | chan->name); | 733 | chan->name); |
757 | chan->stats.rx_dropped++; | 734 | chan->netdev->stats.rx_dropped++; |
758 | return NULL; | 735 | return NULL; |
759 | } | 736 | } |
760 | chan->pppdev.dev->trans_start = jiffies; | 737 | chan->netdev->trans_start = jiffies; |
761 | return skb_put(chan->rx_skb, size); | 738 | return skb_put(chan->rx_skb, size); |
762 | } | 739 | } |
763 | 740 | ||
764 | static int sppp_rx_done(struct channel_data *chan) | 741 | static int cosa_net_rx_done(struct channel_data *chan) |
765 | { | 742 | { |
766 | if (!chan->rx_skb) { | 743 | if (!chan->rx_skb) { |
767 | printk(KERN_WARNING "%s: rx_done with empty skb!\n", | 744 | printk(KERN_WARNING "%s: rx_done with empty skb!\n", |
768 | chan->name); | 745 | chan->name); |
769 | chan->stats.rx_errors++; | 746 | chan->netdev->stats.rx_errors++; |
770 | chan->stats.rx_frame_errors++; | 747 | chan->netdev->stats.rx_frame_errors++; |
771 | return 0; | 748 | return 0; |
772 | } | 749 | } |
773 | chan->rx_skb->protocol = htons(ETH_P_WAN_PPP); | 750 | chan->rx_skb->protocol = hdlc_type_trans(chan->rx_skb, chan->netdev); |
774 | chan->rx_skb->dev = chan->pppdev.dev; | 751 | chan->rx_skb->dev = chan->netdev; |
775 | skb_reset_mac_header(chan->rx_skb); | 752 | skb_reset_mac_header(chan->rx_skb); |
776 | chan->stats.rx_packets++; | 753 | chan->netdev->stats.rx_packets++; |
777 | chan->stats.rx_bytes += chan->cosa->rxsize; | 754 | chan->netdev->stats.rx_bytes += chan->cosa->rxsize; |
778 | netif_rx(chan->rx_skb); | 755 | netif_rx(chan->rx_skb); |
779 | chan->rx_skb = NULL; | 756 | chan->rx_skb = NULL; |
780 | chan->pppdev.dev->last_rx = jiffies; | 757 | chan->netdev->last_rx = jiffies; |
781 | return 0; | 758 | return 0; |
782 | } | 759 | } |
783 | 760 | ||
784 | /* ARGSUSED */ | 761 | /* ARGSUSED */ |
785 | static int sppp_tx_done(struct channel_data *chan, int size) | 762 | static int cosa_net_tx_done(struct channel_data *chan, int size) |
786 | { | 763 | { |
787 | if (!chan->tx_skb) { | 764 | if (!chan->tx_skb) { |
788 | printk(KERN_WARNING "%s: tx_done with empty skb!\n", | 765 | printk(KERN_WARNING "%s: tx_done with empty skb!\n", |
789 | chan->name); | 766 | chan->name); |
790 | chan->stats.tx_errors++; | 767 | chan->netdev->stats.tx_errors++; |
791 | chan->stats.tx_aborted_errors++; | 768 | chan->netdev->stats.tx_aborted_errors++; |
792 | return 1; | 769 | return 1; |
793 | } | 770 | } |
794 | dev_kfree_skb_irq(chan->tx_skb); | 771 | dev_kfree_skb_irq(chan->tx_skb); |
795 | chan->tx_skb = NULL; | 772 | chan->tx_skb = NULL; |
796 | chan->stats.tx_packets++; | 773 | chan->netdev->stats.tx_packets++; |
797 | chan->stats.tx_bytes += size; | 774 | chan->netdev->stats.tx_bytes += size; |
798 | netif_wake_queue(chan->pppdev.dev); | 775 | netif_wake_queue(chan->netdev); |
799 | return 1; | 776 | return 1; |
800 | } | 777 | } |
801 | 778 | ||
802 | static struct net_device_stats *cosa_net_stats(struct net_device *dev) | ||
803 | { | ||
804 | struct channel_data *chan = dev->ml_priv; | ||
805 | return &chan->stats; | ||
806 | } | ||
807 | |||
808 | |||
809 | /*---------- Character device ---------- */ | 779 | /*---------- Character device ---------- */ |
810 | 780 | ||
811 | static void chardev_channel_init(struct channel_data *chan) | ||
812 | { | ||
813 | mutex_init(&chan->rlock); | ||
814 | init_MUTEX(&chan->wsem); | ||
815 | } | ||
816 | |||
817 | static ssize_t cosa_read(struct file *file, | 781 | static ssize_t cosa_read(struct file *file, |
818 | char __user *buf, size_t count, loff_t *ppos) | 782 | char __user *buf, size_t count, loff_t *ppos) |
819 | { | 783 | { |
@@ -1223,16 +1187,15 @@ static int cosa_ioctl_common(struct cosa_data *cosa, | |||
1223 | return -ENOIOCTLCMD; | 1187 | return -ENOIOCTLCMD; |
1224 | } | 1188 | } |
1225 | 1189 | ||
1226 | static int cosa_sppp_ioctl(struct net_device *dev, struct ifreq *ifr, | 1190 | static int cosa_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
1227 | int cmd) | ||
1228 | { | 1191 | { |
1229 | int rv; | 1192 | int rv; |
1230 | struct channel_data *chan = dev->ml_priv; | 1193 | struct channel_data *chan = dev_to_chan(dev); |
1231 | rv = cosa_ioctl_common(chan->cosa, chan, cmd, (unsigned long)ifr->ifr_data); | 1194 | rv = cosa_ioctl_common(chan->cosa, chan, cmd, |
1232 | if (rv == -ENOIOCTLCMD) { | 1195 | (unsigned long)ifr->ifr_data); |
1233 | return sppp_do_ioctl(dev, ifr, cmd); | 1196 | if (rv != -ENOIOCTLCMD) |
1234 | } | 1197 | return rv; |
1235 | return rv; | 1198 | return hdlc_ioctl(dev, ifr, cmd); |
1236 | } | 1199 | } |
1237 | 1200 | ||
1238 | static int cosa_chardev_ioctl(struct inode *inode, struct file *file, | 1201 | static int cosa_chardev_ioctl(struct inode *inode, struct file *file, |
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c index 50ef5b4efd6d..f5d55ad02267 100644 --- a/drivers/net/wan/dscc4.c +++ b/drivers/net/wan/dscc4.c | |||
@@ -103,7 +103,6 @@ | |||
103 | #include <linux/netdevice.h> | 103 | #include <linux/netdevice.h> |
104 | #include <linux/skbuff.h> | 104 | #include <linux/skbuff.h> |
105 | #include <linux/delay.h> | 105 | #include <linux/delay.h> |
106 | #include <net/syncppp.h> | ||
107 | #include <linux/hdlc.h> | 106 | #include <linux/hdlc.h> |
108 | #include <linux/mutex.h> | 107 | #include <linux/mutex.h> |
109 | 108 | ||
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c index 754f00809e3e..9557ad078ab8 100644 --- a/drivers/net/wan/farsync.c +++ b/drivers/net/wan/farsync.c | |||
@@ -47,10 +47,7 @@ MODULE_LICENSE("GPL"); | |||
47 | /* Default parameters for the link | 47 | /* Default parameters for the link |
48 | */ | 48 | */ |
49 | #define FST_TX_QUEUE_LEN 100 /* At 8Mbps a longer queue length is | 49 | #define FST_TX_QUEUE_LEN 100 /* At 8Mbps a longer queue length is |
50 | * useful, the syncppp module forces | 50 | * useful */ |
51 | * this down assuming a slower line I | ||
52 | * guess. | ||
53 | */ | ||
54 | #define FST_TXQ_DEPTH 16 /* This one is for the buffering | 51 | #define FST_TXQ_DEPTH 16 /* This one is for the buffering |
55 | * of frames on the way down to the card | 52 | * of frames on the way down to the card |
56 | * so that we can keep the card busy | 53 | * so that we can keep the card busy |
diff --git a/drivers/net/wan/farsync.h b/drivers/net/wan/farsync.h index d871dafa87a1..6b27e7c3d449 100644 --- a/drivers/net/wan/farsync.h +++ b/drivers/net/wan/farsync.h | |||
@@ -54,9 +54,6 @@ | |||
54 | 54 | ||
55 | 55 | ||
56 | /* Ioctl call command values | 56 | /* Ioctl call command values |
57 | * | ||
58 | * The first three private ioctls are used by the sync-PPP module, | ||
59 | * allowing a little room for expansion we start our numbering at 10. | ||
60 | */ | 57 | */ |
61 | #define FSTWRITE (SIOCDEVPRIVATE+10) | 58 | #define FSTWRITE (SIOCDEVPRIVATE+10) |
62 | #define FSTCPURESET (SIOCDEVPRIVATE+11) | 59 | #define FSTCPURESET (SIOCDEVPRIVATE+11) |
@@ -202,9 +199,6 @@ struct fstioc_info { | |||
202 | #define J1 7 | 199 | #define J1 7 |
203 | 200 | ||
204 | /* "proto" */ | 201 | /* "proto" */ |
205 | #define FST_HDLC 1 /* Cisco compatible HDLC */ | ||
206 | #define FST_PPP 2 /* Sync PPP */ | ||
207 | #define FST_MONITOR 3 /* Monitor only (raw packet reception) */ | ||
208 | #define FST_RAW 4 /* Two way raw packets */ | 202 | #define FST_RAW 4 /* Two way raw packets */ |
209 | #define FST_GEN_HDLC 5 /* Using "Generic HDLC" module */ | 203 | #define FST_GEN_HDLC 5 /* Using "Generic HDLC" module */ |
210 | 204 | ||
diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c index e3a536477c7e..1f2a140c9f7c 100644 --- a/drivers/net/wan/hdlc.c +++ b/drivers/net/wan/hdlc.c | |||
@@ -22,20 +22,19 @@ | |||
22 | * - proto->start() and stop() are called with spin_lock_irq held. | 22 | * - proto->start() and stop() are called with spin_lock_irq held. |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <linux/module.h> | ||
26 | #include <linux/kernel.h> | ||
27 | #include <linux/slab.h> | ||
28 | #include <linux/poll.h> | ||
29 | #include <linux/errno.h> | 25 | #include <linux/errno.h> |
26 | #include <linux/hdlc.h> | ||
30 | #include <linux/if_arp.h> | 27 | #include <linux/if_arp.h> |
28 | #include <linux/inetdevice.h> | ||
31 | #include <linux/init.h> | 29 | #include <linux/init.h> |
32 | #include <linux/skbuff.h> | 30 | #include <linux/kernel.h> |
31 | #include <linux/module.h> | ||
32 | #include <linux/notifier.h> | ||
33 | #include <linux/pkt_sched.h> | 33 | #include <linux/pkt_sched.h> |
34 | #include <linux/inetdevice.h> | 34 | #include <linux/poll.h> |
35 | #include <linux/lapb.h> | ||
36 | #include <linux/rtnetlink.h> | 35 | #include <linux/rtnetlink.h> |
37 | #include <linux/notifier.h> | 36 | #include <linux/skbuff.h> |
38 | #include <linux/hdlc.h> | 37 | #include <linux/slab.h> |
39 | #include <net/net_namespace.h> | 38 | #include <net/net_namespace.h> |
40 | 39 | ||
41 | 40 | ||
@@ -109,7 +108,7 @@ static int hdlc_device_event(struct notifier_block *this, unsigned long event, | |||
109 | 108 | ||
110 | if (dev->get_stats != hdlc_get_stats) | 109 | if (dev->get_stats != hdlc_get_stats) |
111 | return NOTIFY_DONE; /* not an HDLC device */ | 110 | return NOTIFY_DONE; /* not an HDLC device */ |
112 | 111 | ||
113 | if (event != NETDEV_CHANGE) | 112 | if (event != NETDEV_CHANGE) |
114 | return NOTIFY_DONE; /* Only interrested in carrier changes */ | 113 | return NOTIFY_DONE; /* Only interrested in carrier changes */ |
115 | 114 | ||
@@ -357,7 +356,7 @@ static struct packet_type hdlc_packet_type = { | |||
357 | 356 | ||
358 | 357 | ||
359 | static struct notifier_block hdlc_notifier = { | 358 | static struct notifier_block hdlc_notifier = { |
360 | .notifier_call = hdlc_device_event, | 359 | .notifier_call = hdlc_device_event, |
361 | }; | 360 | }; |
362 | 361 | ||
363 | 362 | ||
@@ -367,8 +366,8 @@ static int __init hdlc_module_init(void) | |||
367 | 366 | ||
368 | printk(KERN_INFO "%s\n", version); | 367 | printk(KERN_INFO "%s\n", version); |
369 | if ((result = register_netdevice_notifier(&hdlc_notifier)) != 0) | 368 | if ((result = register_netdevice_notifier(&hdlc_notifier)) != 0) |
370 | return result; | 369 | return result; |
371 | dev_add_pack(&hdlc_packet_type); | 370 | dev_add_pack(&hdlc_packet_type); |
372 | return 0; | 371 | return 0; |
373 | } | 372 | } |
374 | 373 | ||
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c index 849819c2552d..44e64b15dbd1 100644 --- a/drivers/net/wan/hdlc_cisco.c +++ b/drivers/net/wan/hdlc_cisco.c | |||
@@ -9,19 +9,18 @@ | |||
9 | * as published by the Free Software Foundation. | 9 | * as published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/slab.h> | ||
15 | #include <linux/poll.h> | ||
16 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
13 | #include <linux/hdlc.h> | ||
17 | #include <linux/if_arp.h> | 14 | #include <linux/if_arp.h> |
15 | #include <linux/inetdevice.h> | ||
18 | #include <linux/init.h> | 16 | #include <linux/init.h> |
19 | #include <linux/skbuff.h> | 17 | #include <linux/kernel.h> |
18 | #include <linux/module.h> | ||
20 | #include <linux/pkt_sched.h> | 19 | #include <linux/pkt_sched.h> |
21 | #include <linux/inetdevice.h> | 20 | #include <linux/poll.h> |
22 | #include <linux/lapb.h> | ||
23 | #include <linux/rtnetlink.h> | 21 | #include <linux/rtnetlink.h> |
24 | #include <linux/hdlc.h> | 22 | #include <linux/skbuff.h> |
23 | #include <linux/slab.h> | ||
25 | 24 | ||
26 | #undef DEBUG_HARD_HEADER | 25 | #undef DEBUG_HARD_HEADER |
27 | 26 | ||
@@ -68,9 +67,9 @@ struct cisco_state { | |||
68 | static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr); | 67 | static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr); |
69 | 68 | ||
70 | 69 | ||
71 | static inline struct cisco_state * state(hdlc_device *hdlc) | 70 | static inline struct cisco_state* state(hdlc_device *hdlc) |
72 | { | 71 | { |
73 | return(struct cisco_state *)(hdlc->state); | 72 | return (struct cisco_state *)hdlc->state; |
74 | } | 73 | } |
75 | 74 | ||
76 | 75 | ||
@@ -172,7 +171,7 @@ static int cisco_rx(struct sk_buff *skb) | |||
172 | data->address != CISCO_UNICAST) | 171 | data->address != CISCO_UNICAST) |
173 | goto rx_error; | 172 | goto rx_error; |
174 | 173 | ||
175 | switch(ntohs(data->protocol)) { | 174 | switch (ntohs(data->protocol)) { |
176 | case CISCO_SYS_INFO: | 175 | case CISCO_SYS_INFO: |
177 | /* Packet is not needed, drop it. */ | 176 | /* Packet is not needed, drop it. */ |
178 | dev_kfree_skb_any(skb); | 177 | dev_kfree_skb_any(skb); |
@@ -336,7 +335,7 @@ static struct hdlc_proto proto = { | |||
336 | static const struct header_ops cisco_header_ops = { | 335 | static const struct header_ops cisco_header_ops = { |
337 | .create = cisco_hard_header, | 336 | .create = cisco_hard_header, |
338 | }; | 337 | }; |
339 | 338 | ||
340 | static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr) | 339 | static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr) |
341 | { | 340 | { |
342 | cisco_proto __user *cisco_s = ifr->ifr_settings.ifs_ifsu.cisco; | 341 | cisco_proto __user *cisco_s = ifr->ifr_settings.ifs_ifsu.cisco; |
@@ -359,10 +358,10 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr) | |||
359 | return 0; | 358 | return 0; |
360 | 359 | ||
361 | case IF_PROTO_CISCO: | 360 | case IF_PROTO_CISCO: |
362 | if(!capable(CAP_NET_ADMIN)) | 361 | if (!capable(CAP_NET_ADMIN)) |
363 | return -EPERM; | 362 | return -EPERM; |
364 | 363 | ||
365 | if(dev->flags & IFF_UP) | 364 | if (dev->flags & IFF_UP) |
366 | return -EBUSY; | 365 | return -EBUSY; |
367 | 366 | ||
368 | if (copy_from_user(&new_settings, cisco_s, size)) | 367 | if (copy_from_user(&new_settings, cisco_s, size)) |
@@ -372,7 +371,7 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr) | |||
372 | new_settings.timeout < 2) | 371 | new_settings.timeout < 2) |
373 | return -EINVAL; | 372 | return -EINVAL; |
374 | 373 | ||
375 | result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT); | 374 | result = hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT); |
376 | if (result) | 375 | if (result) |
377 | return result; | 376 | return result; |
378 | 377 | ||
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c index 62e93dac6b13..d3d5055741ad 100644 --- a/drivers/net/wan/hdlc_fr.c +++ b/drivers/net/wan/hdlc_fr.c | |||
@@ -33,20 +33,19 @@ | |||
33 | 33 | ||
34 | */ | 34 | */ |
35 | 35 | ||
36 | #include <linux/module.h> | ||
37 | #include <linux/kernel.h> | ||
38 | #include <linux/slab.h> | ||
39 | #include <linux/poll.h> | ||
40 | #include <linux/errno.h> | 36 | #include <linux/errno.h> |
37 | #include <linux/etherdevice.h> | ||
38 | #include <linux/hdlc.h> | ||
41 | #include <linux/if_arp.h> | 39 | #include <linux/if_arp.h> |
40 | #include <linux/inetdevice.h> | ||
42 | #include <linux/init.h> | 41 | #include <linux/init.h> |
43 | #include <linux/skbuff.h> | 42 | #include <linux/kernel.h> |
43 | #include <linux/module.h> | ||
44 | #include <linux/pkt_sched.h> | 44 | #include <linux/pkt_sched.h> |
45 | #include <linux/inetdevice.h> | 45 | #include <linux/poll.h> |
46 | #include <linux/lapb.h> | ||
47 | #include <linux/rtnetlink.h> | 46 | #include <linux/rtnetlink.h> |
48 | #include <linux/etherdevice.h> | 47 | #include <linux/skbuff.h> |
49 | #include <linux/hdlc.h> | 48 | #include <linux/slab.h> |
50 | 49 | ||
51 | #undef DEBUG_PKT | 50 | #undef DEBUG_PKT |
52 | #undef DEBUG_ECN | 51 | #undef DEBUG_ECN |
@@ -96,7 +95,7 @@ typedef struct { | |||
96 | unsigned ea1: 1; | 95 | unsigned ea1: 1; |
97 | unsigned cr: 1; | 96 | unsigned cr: 1; |
98 | unsigned dlcih: 6; | 97 | unsigned dlcih: 6; |
99 | 98 | ||
100 | unsigned ea2: 1; | 99 | unsigned ea2: 1; |
101 | unsigned de: 1; | 100 | unsigned de: 1; |
102 | unsigned becn: 1; | 101 | unsigned becn: 1; |
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c index 00308337928e..4efe9e6d32d5 100644 --- a/drivers/net/wan/hdlc_ppp.c +++ b/drivers/net/wan/hdlc_ppp.c | |||
@@ -9,19 +9,18 @@ | |||
9 | * as published by the Free Software Foundation. | 9 | * as published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/slab.h> | ||
15 | #include <linux/poll.h> | ||
16 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
13 | #include <linux/hdlc.h> | ||
17 | #include <linux/if_arp.h> | 14 | #include <linux/if_arp.h> |
15 | #include <linux/inetdevice.h> | ||
18 | #include <linux/init.h> | 16 | #include <linux/init.h> |
19 | #include <linux/skbuff.h> | 17 | #include <linux/kernel.h> |
18 | #include <linux/module.h> | ||
20 | #include <linux/pkt_sched.h> | 19 | #include <linux/pkt_sched.h> |
21 | #include <linux/inetdevice.h> | 20 | #include <linux/poll.h> |
22 | #include <linux/lapb.h> | ||
23 | #include <linux/rtnetlink.h> | 21 | #include <linux/rtnetlink.h> |
24 | #include <linux/hdlc.h> | 22 | #include <linux/skbuff.h> |
23 | #include <linux/slab.h> | ||
25 | #include <net/syncppp.h> | 24 | #include <net/syncppp.h> |
26 | 25 | ||
27 | struct ppp_state { | 26 | struct ppp_state { |
diff --git a/drivers/net/wan/hdlc_raw.c b/drivers/net/wan/hdlc_raw.c index bbbb819d764c..8612311748f4 100644 --- a/drivers/net/wan/hdlc_raw.c +++ b/drivers/net/wan/hdlc_raw.c | |||
@@ -9,19 +9,18 @@ | |||
9 | * as published by the Free Software Foundation. | 9 | * as published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/slab.h> | ||
15 | #include <linux/poll.h> | ||
16 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
13 | #include <linux/hdlc.h> | ||
17 | #include <linux/if_arp.h> | 14 | #include <linux/if_arp.h> |
15 | #include <linux/inetdevice.h> | ||
18 | #include <linux/init.h> | 16 | #include <linux/init.h> |
19 | #include <linux/skbuff.h> | 17 | #include <linux/kernel.h> |
18 | #include <linux/module.h> | ||
20 | #include <linux/pkt_sched.h> | 19 | #include <linux/pkt_sched.h> |
21 | #include <linux/inetdevice.h> | 20 | #include <linux/poll.h> |
22 | #include <linux/lapb.h> | ||
23 | #include <linux/rtnetlink.h> | 21 | #include <linux/rtnetlink.h> |
24 | #include <linux/hdlc.h> | 22 | #include <linux/skbuff.h> |
23 | #include <linux/slab.h> | ||
25 | 24 | ||
26 | 25 | ||
27 | static int raw_ioctl(struct net_device *dev, struct ifreq *ifr); | 26 | static int raw_ioctl(struct net_device *dev, struct ifreq *ifr); |
diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c index 26dee600506f..a13fc3207520 100644 --- a/drivers/net/wan/hdlc_raw_eth.c +++ b/drivers/net/wan/hdlc_raw_eth.c | |||
@@ -9,20 +9,19 @@ | |||
9 | * as published by the Free Software Foundation. | 9 | * as published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/slab.h> | ||
15 | #include <linux/poll.h> | ||
16 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
13 | #include <linux/etherdevice.h> | ||
14 | #include <linux/hdlc.h> | ||
17 | #include <linux/if_arp.h> | 15 | #include <linux/if_arp.h> |
16 | #include <linux/inetdevice.h> | ||
18 | #include <linux/init.h> | 17 | #include <linux/init.h> |
19 | #include <linux/skbuff.h> | 18 | #include <linux/kernel.h> |
19 | #include <linux/module.h> | ||
20 | #include <linux/pkt_sched.h> | 20 | #include <linux/pkt_sched.h> |
21 | #include <linux/inetdevice.h> | 21 | #include <linux/poll.h> |
22 | #include <linux/lapb.h> | ||
23 | #include <linux/rtnetlink.h> | 22 | #include <linux/rtnetlink.h> |
24 | #include <linux/etherdevice.h> | 23 | #include <linux/skbuff.h> |
25 | #include <linux/hdlc.h> | 24 | #include <linux/slab.h> |
26 | 25 | ||
27 | static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr); | 26 | static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr); |
28 | 27 | ||
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c index e808720030ef..8b7e5d2e2ac9 100644 --- a/drivers/net/wan/hdlc_x25.c +++ b/drivers/net/wan/hdlc_x25.c | |||
@@ -9,20 +9,19 @@ | |||
9 | * as published by the Free Software Foundation. | 9 | * as published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/slab.h> | ||
15 | #include <linux/poll.h> | ||
16 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
13 | #include <linux/hdlc.h> | ||
17 | #include <linux/if_arp.h> | 14 | #include <linux/if_arp.h> |
18 | #include <linux/init.h> | ||
19 | #include <linux/skbuff.h> | ||
20 | #include <linux/pkt_sched.h> | ||
21 | #include <linux/inetdevice.h> | 15 | #include <linux/inetdevice.h> |
16 | #include <linux/init.h> | ||
17 | #include <linux/kernel.h> | ||
22 | #include <linux/lapb.h> | 18 | #include <linux/lapb.h> |
19 | #include <linux/module.h> | ||
20 | #include <linux/pkt_sched.h> | ||
21 | #include <linux/poll.h> | ||
23 | #include <linux/rtnetlink.h> | 22 | #include <linux/rtnetlink.h> |
24 | #include <linux/hdlc.h> | 23 | #include <linux/skbuff.h> |
25 | 24 | #include <linux/slab.h> | |
26 | #include <net/x25device.h> | 25 | #include <net/x25device.h> |
27 | 26 | ||
28 | static int x25_ioctl(struct net_device *dev, struct ifreq *ifr); | 27 | static int x25_ioctl(struct net_device *dev, struct ifreq *ifr); |
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c index f3065d3473fd..e299313f828a 100644 --- a/drivers/net/wan/hostess_sv11.c +++ b/drivers/net/wan/hostess_sv11.c | |||
@@ -16,6 +16,8 @@ | |||
16 | * touching control registers. | 16 | * touching control registers. |
17 | * | 17 | * |
18 | * Port B isnt wired (why - beats me) | 18 | * Port B isnt wired (why - beats me) |
19 | * | ||
20 | * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl> | ||
19 | */ | 21 | */ |
20 | 22 | ||
21 | #include <linux/module.h> | 23 | #include <linux/module.h> |
@@ -26,6 +28,7 @@ | |||
26 | #include <linux/netdevice.h> | 28 | #include <linux/netdevice.h> |
27 | #include <linux/if_arp.h> | 29 | #include <linux/if_arp.h> |
28 | #include <linux/delay.h> | 30 | #include <linux/delay.h> |
31 | #include <linux/hdlc.h> | ||
29 | #include <linux/ioport.h> | 32 | #include <linux/ioport.h> |
30 | #include <net/arp.h> | 33 | #include <net/arp.h> |
31 | 34 | ||
@@ -33,34 +36,31 @@ | |||
33 | #include <asm/io.h> | 36 | #include <asm/io.h> |
34 | #include <asm/dma.h> | 37 | #include <asm/dma.h> |
35 | #include <asm/byteorder.h> | 38 | #include <asm/byteorder.h> |
36 | #include <net/syncppp.h> | ||
37 | #include "z85230.h" | 39 | #include "z85230.h" |
38 | 40 | ||
39 | static int dma; | 41 | static int dma; |
40 | 42 | ||
41 | struct sv11_device | ||
42 | { | ||
43 | void *if_ptr; /* General purpose pointer (used by SPPP) */ | ||
44 | struct z8530_dev sync; | ||
45 | struct ppp_device netdev; | ||
46 | }; | ||
47 | |||
48 | /* | 43 | /* |
49 | * Network driver support routines | 44 | * Network driver support routines |
50 | */ | 45 | */ |
51 | 46 | ||
47 | static inline struct z8530_dev* dev_to_sv(struct net_device *dev) | ||
48 | { | ||
49 | return (struct z8530_dev *)dev_to_hdlc(dev)->priv; | ||
50 | } | ||
51 | |||
52 | /* | 52 | /* |
53 | * Frame receive. Simple for our card as we do sync ppp and there | 53 | * Frame receive. Simple for our card as we do HDLC and there |
54 | * is no funny garbage involved | 54 | * is no funny garbage involved |
55 | */ | 55 | */ |
56 | 56 | ||
57 | static void hostess_input(struct z8530_channel *c, struct sk_buff *skb) | 57 | static void hostess_input(struct z8530_channel *c, struct sk_buff *skb) |
58 | { | 58 | { |
59 | /* Drop the CRC - it's not a good idea to try and negotiate it ;) */ | 59 | /* Drop the CRC - it's not a good idea to try and negotiate it ;) */ |
60 | skb_trim(skb, skb->len-2); | 60 | skb_trim(skb, skb->len - 2); |
61 | skb->protocol=__constant_htons(ETH_P_WAN_PPP); | 61 | skb->protocol = hdlc_type_trans(skb, c->netdevice); |
62 | skb_reset_mac_header(skb); | 62 | skb_reset_mac_header(skb); |
63 | skb->dev=c->netdevice; | 63 | skb->dev = c->netdevice; |
64 | /* | 64 | /* |
65 | * Send it to the PPP layer. We don't have time to process | 65 | * Send it to the PPP layer. We don't have time to process |
66 | * it right now. | 66 | * it right now. |
@@ -68,56 +68,51 @@ static void hostess_input(struct z8530_channel *c, struct sk_buff *skb) | |||
68 | netif_rx(skb); | 68 | netif_rx(skb); |
69 | c->netdevice->last_rx = jiffies; | 69 | c->netdevice->last_rx = jiffies; |
70 | } | 70 | } |
71 | 71 | ||
72 | /* | 72 | /* |
73 | * We've been placed in the UP state | 73 | * We've been placed in the UP state |
74 | */ | 74 | */ |
75 | 75 | ||
76 | static int hostess_open(struct net_device *d) | 76 | static int hostess_open(struct net_device *d) |
77 | { | 77 | { |
78 | struct sv11_device *sv11=d->ml_priv; | 78 | struct z8530_dev *sv11 = dev_to_sv(d); |
79 | int err = -1; | 79 | int err = -1; |
80 | 80 | ||
81 | /* | 81 | /* |
82 | * Link layer up | 82 | * Link layer up |
83 | */ | 83 | */ |
84 | switch(dma) | 84 | switch (dma) { |
85 | { | ||
86 | case 0: | 85 | case 0: |
87 | err=z8530_sync_open(d, &sv11->sync.chanA); | 86 | err = z8530_sync_open(d, &sv11->chanA); |
88 | break; | 87 | break; |
89 | case 1: | 88 | case 1: |
90 | err=z8530_sync_dma_open(d, &sv11->sync.chanA); | 89 | err = z8530_sync_dma_open(d, &sv11->chanA); |
91 | break; | 90 | break; |
92 | case 2: | 91 | case 2: |
93 | err=z8530_sync_txdma_open(d, &sv11->sync.chanA); | 92 | err = z8530_sync_txdma_open(d, &sv11->chanA); |
94 | break; | 93 | break; |
95 | } | 94 | } |
96 | 95 | ||
97 | if(err) | 96 | if (err) |
98 | return err; | 97 | return err; |
99 | /* | 98 | |
100 | * Begin PPP | 99 | err = hdlc_open(d); |
101 | */ | 100 | if (err) { |
102 | err=sppp_open(d); | 101 | switch (dma) { |
103 | if(err) | ||
104 | { | ||
105 | switch(dma) | ||
106 | { | ||
107 | case 0: | 102 | case 0: |
108 | z8530_sync_close(d, &sv11->sync.chanA); | 103 | z8530_sync_close(d, &sv11->chanA); |
109 | break; | 104 | break; |
110 | case 1: | 105 | case 1: |
111 | z8530_sync_dma_close(d, &sv11->sync.chanA); | 106 | z8530_sync_dma_close(d, &sv11->chanA); |
112 | break; | 107 | break; |
113 | case 2: | 108 | case 2: |
114 | z8530_sync_txdma_close(d, &sv11->sync.chanA); | 109 | z8530_sync_txdma_close(d, &sv11->chanA); |
115 | break; | 110 | break; |
116 | } | 111 | } |
117 | return err; | 112 | return err; |
118 | } | 113 | } |
119 | sv11->sync.chanA.rx_function=hostess_input; | 114 | sv11->chanA.rx_function = hostess_input; |
120 | 115 | ||
121 | /* | 116 | /* |
122 | * Go go go | 117 | * Go go go |
123 | */ | 118 | */ |
@@ -128,30 +123,24 @@ static int hostess_open(struct net_device *d) | |||
128 | 123 | ||
129 | static int hostess_close(struct net_device *d) | 124 | static int hostess_close(struct net_device *d) |
130 | { | 125 | { |
131 | struct sv11_device *sv11=d->ml_priv; | 126 | struct z8530_dev *sv11 = dev_to_sv(d); |
132 | /* | 127 | /* |
133 | * Discard new frames | 128 | * Discard new frames |
134 | */ | 129 | */ |
135 | sv11->sync.chanA.rx_function=z8530_null_rx; | 130 | sv11->chanA.rx_function = z8530_null_rx; |
136 | /* | 131 | |
137 | * PPP off | 132 | hdlc_close(d); |
138 | */ | ||
139 | sppp_close(d); | ||
140 | /* | ||
141 | * Link layer down | ||
142 | */ | ||
143 | netif_stop_queue(d); | 133 | netif_stop_queue(d); |
144 | 134 | ||
145 | switch(dma) | 135 | switch (dma) { |
146 | { | ||
147 | case 0: | 136 | case 0: |
148 | z8530_sync_close(d, &sv11->sync.chanA); | 137 | z8530_sync_close(d, &sv11->chanA); |
149 | break; | 138 | break; |
150 | case 1: | 139 | case 1: |
151 | z8530_sync_dma_close(d, &sv11->sync.chanA); | 140 | z8530_sync_dma_close(d, &sv11->chanA); |
152 | break; | 141 | break; |
153 | case 2: | 142 | case 2: |
154 | z8530_sync_txdma_close(d, &sv11->sync.chanA); | 143 | z8530_sync_txdma_close(d, &sv11->chanA); |
155 | break; | 144 | break; |
156 | } | 145 | } |
157 | return 0; | 146 | return 0; |
@@ -159,232 +148,174 @@ static int hostess_close(struct net_device *d) | |||
159 | 148 | ||
160 | static int hostess_ioctl(struct net_device *d, struct ifreq *ifr, int cmd) | 149 | static int hostess_ioctl(struct net_device *d, struct ifreq *ifr, int cmd) |
161 | { | 150 | { |
162 | /* struct sv11_device *sv11=d->ml_priv; | 151 | /* struct z8530_dev *sv11=dev_to_sv(d); |
163 | z8530_ioctl(d,&sv11->sync.chanA,ifr,cmd) */ | 152 | z8530_ioctl(d,&sv11->chanA,ifr,cmd) */ |
164 | return sppp_do_ioctl(d, ifr,cmd); | 153 | return hdlc_ioctl(d, ifr, cmd); |
165 | } | ||
166 | |||
167 | static struct net_device_stats *hostess_get_stats(struct net_device *d) | ||
168 | { | ||
169 | struct sv11_device *sv11=d->ml_priv; | ||
170 | if(sv11) | ||
171 | return z8530_get_stats(&sv11->sync.chanA); | ||
172 | else | ||
173 | return NULL; | ||
174 | } | 154 | } |
175 | 155 | ||
176 | /* | 156 | /* |
177 | * Passed PPP frames, fire them downwind. | 157 | * Passed network frames, fire them downwind. |
178 | */ | 158 | */ |
179 | 159 | ||
180 | static int hostess_queue_xmit(struct sk_buff *skb, struct net_device *d) | 160 | static int hostess_queue_xmit(struct sk_buff *skb, struct net_device *d) |
181 | { | 161 | { |
182 | struct sv11_device *sv11=d->ml_priv; | 162 | return z8530_queue_xmit(&dev_to_sv(d)->chanA, skb); |
183 | return z8530_queue_xmit(&sv11->sync.chanA, skb); | ||
184 | } | 163 | } |
185 | 164 | ||
186 | static int hostess_neigh_setup(struct neighbour *n) | 165 | static int hostess_attach(struct net_device *dev, unsigned short encoding, |
166 | unsigned short parity) | ||
187 | { | 167 | { |
188 | if (n->nud_state == NUD_NONE) { | 168 | if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT) |
189 | n->ops = &arp_broken_ops; | 169 | return 0; |
190 | n->output = n->ops->output; | 170 | return -EINVAL; |
191 | } | ||
192 | return 0; | ||
193 | } | ||
194 | |||
195 | static int hostess_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p) | ||
196 | { | ||
197 | if (p->tbl->family == AF_INET) { | ||
198 | p->neigh_setup = hostess_neigh_setup; | ||
199 | p->ucast_probes = 0; | ||
200 | p->mcast_probes = 0; | ||
201 | } | ||
202 | return 0; | ||
203 | } | ||
204 | |||
205 | static void sv11_setup(struct net_device *dev) | ||
206 | { | ||
207 | dev->open = hostess_open; | ||
208 | dev->stop = hostess_close; | ||
209 | dev->hard_start_xmit = hostess_queue_xmit; | ||
210 | dev->get_stats = hostess_get_stats; | ||
211 | dev->do_ioctl = hostess_ioctl; | ||
212 | dev->neigh_setup = hostess_neigh_setup_dev; | ||
213 | } | 171 | } |
214 | 172 | ||
215 | /* | 173 | /* |
216 | * Description block for a Comtrol Hostess SV11 card | 174 | * Description block for a Comtrol Hostess SV11 card |
217 | */ | 175 | */ |
218 | 176 | ||
219 | static struct sv11_device *sv11_init(int iobase, int irq) | 177 | static struct z8530_dev *sv11_init(int iobase, int irq) |
220 | { | 178 | { |
221 | struct z8530_dev *dev; | 179 | struct z8530_dev *sv; |
222 | struct sv11_device *sv; | 180 | struct net_device *netdev; |
223 | |||
224 | /* | 181 | /* |
225 | * Get the needed I/O space | 182 | * Get the needed I/O space |
226 | */ | 183 | */ |
227 | 184 | ||
228 | if(!request_region(iobase, 8, "Comtrol SV11")) | 185 | if (!request_region(iobase, 8, "Comtrol SV11")) { |
229 | { | 186 | printk(KERN_WARNING "hostess: I/O 0x%X already in use.\n", |
230 | printk(KERN_WARNING "hostess: I/O 0x%X already in use.\n", iobase); | 187 | iobase); |
231 | return NULL; | 188 | return NULL; |
232 | } | 189 | } |
233 | 190 | ||
234 | sv = kzalloc(sizeof(struct sv11_device), GFP_KERNEL); | 191 | sv = kzalloc(sizeof(struct z8530_dev), GFP_KERNEL); |
235 | if(!sv) | 192 | if (!sv) |
236 | goto fail3; | 193 | goto err_kzalloc; |
237 | 194 | ||
238 | sv->if_ptr=&sv->netdev; | ||
239 | |||
240 | sv->netdev.dev = alloc_netdev(0, "hdlc%d", sv11_setup); | ||
241 | if(!sv->netdev.dev) | ||
242 | goto fail2; | ||
243 | |||
244 | dev=&sv->sync; | ||
245 | |||
246 | /* | 195 | /* |
247 | * Stuff in the I/O addressing | 196 | * Stuff in the I/O addressing |
248 | */ | 197 | */ |
249 | 198 | ||
250 | dev->active = 0; | 199 | sv->active = 0; |
251 | 200 | ||
252 | dev->chanA.ctrlio=iobase+1; | 201 | sv->chanA.ctrlio = iobase + 1; |
253 | dev->chanA.dataio=iobase+3; | 202 | sv->chanA.dataio = iobase + 3; |
254 | dev->chanB.ctrlio=-1; | 203 | sv->chanB.ctrlio = -1; |
255 | dev->chanB.dataio=-1; | 204 | sv->chanB.dataio = -1; |
256 | dev->chanA.irqs=&z8530_nop; | 205 | sv->chanA.irqs = &z8530_nop; |
257 | dev->chanB.irqs=&z8530_nop; | 206 | sv->chanB.irqs = &z8530_nop; |
258 | 207 | ||
259 | outb(0, iobase+4); /* DMA off */ | 208 | outb(0, iobase + 4); /* DMA off */ |
260 | 209 | ||
261 | /* We want a fast IRQ for this device. Actually we'd like an even faster | 210 | /* We want a fast IRQ for this device. Actually we'd like an even faster |
262 | IRQ ;) - This is one driver RtLinux is made for */ | 211 | IRQ ;) - This is one driver RtLinux is made for */ |
263 | 212 | ||
264 | if(request_irq(irq, &z8530_interrupt, IRQF_DISABLED, "Hostess SV11", dev)<0) | 213 | if (request_irq(irq, &z8530_interrupt, IRQF_DISABLED, |
265 | { | 214 | "Hostess SV11", sv) < 0) { |
266 | printk(KERN_WARNING "hostess: IRQ %d already in use.\n", irq); | 215 | printk(KERN_WARNING "hostess: IRQ %d already in use.\n", irq); |
267 | goto fail1; | 216 | goto err_irq; |
268 | } | 217 | } |
269 | 218 | ||
270 | dev->irq=irq; | 219 | sv->irq = irq; |
271 | dev->chanA.private=sv; | 220 | sv->chanA.private = sv; |
272 | dev->chanA.netdevice=sv->netdev.dev; | 221 | sv->chanA.dev = sv; |
273 | dev->chanA.dev=dev; | 222 | sv->chanB.dev = sv; |
274 | dev->chanB.dev=dev; | 223 | |
275 | 224 | if (dma) { | |
276 | if(dma) | ||
277 | { | ||
278 | /* | 225 | /* |
279 | * You can have DMA off or 1 and 3 thats the lot | 226 | * You can have DMA off or 1 and 3 thats the lot |
280 | * on the Comtrol. | 227 | * on the Comtrol. |
281 | */ | 228 | */ |
282 | dev->chanA.txdma=3; | 229 | sv->chanA.txdma = 3; |
283 | dev->chanA.rxdma=1; | 230 | sv->chanA.rxdma = 1; |
284 | outb(0x03|0x08, iobase+4); /* DMA on */ | 231 | outb(0x03 | 0x08, iobase + 4); /* DMA on */ |
285 | if(request_dma(dev->chanA.txdma, "Hostess SV/11 (TX)")!=0) | 232 | if (request_dma(sv->chanA.txdma, "Hostess SV/11 (TX)")) |
286 | goto fail; | 233 | goto err_txdma; |
287 | 234 | ||
288 | if(dma==1) | 235 | if (dma == 1) |
289 | { | 236 | if (request_dma(sv->chanA.rxdma, "Hostess SV/11 (RX)")) |
290 | if(request_dma(dev->chanA.rxdma, "Hostess SV/11 (RX)")!=0) | 237 | goto err_rxdma; |
291 | goto dmafail; | ||
292 | } | ||
293 | } | 238 | } |
294 | 239 | ||
295 | /* Kill our private IRQ line the hostess can end up chattering | 240 | /* Kill our private IRQ line the hostess can end up chattering |
296 | until the configuration is set */ | 241 | until the configuration is set */ |
297 | disable_irq(irq); | 242 | disable_irq(irq); |
298 | 243 | ||
299 | /* | 244 | /* |
300 | * Begin normal initialise | 245 | * Begin normal initialise |
301 | */ | 246 | */ |
302 | 247 | ||
303 | if(z8530_init(dev)!=0) | 248 | if (z8530_init(sv)) { |
304 | { | ||
305 | printk(KERN_ERR "Z8530 series device not found.\n"); | 249 | printk(KERN_ERR "Z8530 series device not found.\n"); |
306 | enable_irq(irq); | 250 | enable_irq(irq); |
307 | goto dmafail2; | 251 | goto free_dma; |
308 | } | 252 | } |
309 | z8530_channel_load(&dev->chanB, z8530_dead_port); | 253 | z8530_channel_load(&sv->chanB, z8530_dead_port); |
310 | if(dev->type==Z85C30) | 254 | if (sv->type == Z85C30) |
311 | z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream); | 255 | z8530_channel_load(&sv->chanA, z8530_hdlc_kilostream); |
312 | else | 256 | else |
313 | z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream_85230); | 257 | z8530_channel_load(&sv->chanA, z8530_hdlc_kilostream_85230); |
314 | 258 | ||
315 | enable_irq(irq); | 259 | enable_irq(irq); |
316 | |||
317 | 260 | ||
318 | /* | 261 | /* |
319 | * Now we can take the IRQ | 262 | * Now we can take the IRQ |
320 | */ | 263 | */ |
321 | if(dev_alloc_name(dev->chanA.netdevice,"hdlc%d")>=0) | ||
322 | { | ||
323 | struct net_device *d=dev->chanA.netdevice; | ||
324 | 264 | ||
325 | /* | 265 | sv->chanA.netdevice = netdev = alloc_hdlcdev(sv); |
326 | * Initialise the PPP components | 266 | if (!netdev) |
327 | */ | 267 | goto free_dma; |
328 | d->ml_priv = sv; | ||
329 | sppp_attach(&sv->netdev); | ||
330 | |||
331 | /* | ||
332 | * Local fields | ||
333 | */ | ||
334 | |||
335 | d->base_addr = iobase; | ||
336 | d->irq = irq; | ||
337 | |||
338 | if(register_netdev(d)) | ||
339 | { | ||
340 | printk(KERN_ERR "%s: unable to register device.\n", | ||
341 | d->name); | ||
342 | sppp_detach(d); | ||
343 | goto dmafail2; | ||
344 | } | ||
345 | 268 | ||
346 | z8530_describe(dev, "I/O", iobase); | 269 | dev_to_hdlc(netdev)->attach = hostess_attach; |
347 | dev->active=1; | 270 | dev_to_hdlc(netdev)->xmit = hostess_queue_xmit; |
348 | return sv; | 271 | netdev->open = hostess_open; |
272 | netdev->stop = hostess_close; | ||
273 | netdev->do_ioctl = hostess_ioctl; | ||
274 | netdev->base_addr = iobase; | ||
275 | netdev->irq = irq; | ||
276 | |||
277 | if (register_hdlc_device(netdev)) { | ||
278 | printk(KERN_ERR "hostess: unable to register HDLC device.\n"); | ||
279 | free_netdev(netdev); | ||
280 | goto free_dma; | ||
349 | } | 281 | } |
350 | dmafail2: | 282 | |
351 | if(dma==1) | 283 | z8530_describe(sv, "I/O", iobase); |
352 | free_dma(dev->chanA.rxdma); | 284 | sv->active = 1; |
353 | dmafail: | 285 | return sv; |
354 | if(dma) | 286 | |
355 | free_dma(dev->chanA.txdma); | 287 | free_dma: |
356 | fail: | 288 | if (dma == 1) |
357 | free_irq(irq, dev); | 289 | free_dma(sv->chanA.rxdma); |
358 | fail1: | 290 | err_rxdma: |
359 | free_netdev(sv->netdev.dev); | 291 | if (dma) |
360 | fail2: | 292 | free_dma(sv->chanA.txdma); |
293 | err_txdma: | ||
294 | free_irq(irq, sv); | ||
295 | err_irq: | ||
361 | kfree(sv); | 296 | kfree(sv); |
362 | fail3: | 297 | err_kzalloc: |
363 | release_region(iobase,8); | 298 | release_region(iobase, 8); |
364 | return NULL; | 299 | return NULL; |
365 | } | 300 | } |
366 | 301 | ||
367 | static void sv11_shutdown(struct sv11_device *dev) | 302 | static void sv11_shutdown(struct z8530_dev *dev) |
368 | { | 303 | { |
369 | sppp_detach(dev->netdev.dev); | 304 | unregister_hdlc_device(dev->chanA.netdevice); |
370 | unregister_netdev(dev->netdev.dev); | 305 | z8530_shutdown(dev); |
371 | z8530_shutdown(&dev->sync); | 306 | free_irq(dev->irq, dev); |
372 | free_irq(dev->sync.irq, dev); | 307 | if (dma) { |
373 | if(dma) | 308 | if (dma == 1) |
374 | { | 309 | free_dma(dev->chanA.rxdma); |
375 | if(dma==1) | 310 | free_dma(dev->chanA.txdma); |
376 | free_dma(dev->sync.chanA.rxdma); | ||
377 | free_dma(dev->sync.chanA.txdma); | ||
378 | } | 311 | } |
379 | release_region(dev->sync.chanA.ctrlio-1, 8); | 312 | release_region(dev->chanA.ctrlio - 1, 8); |
380 | free_netdev(dev->netdev.dev); | 313 | free_netdev(dev->chanA.netdevice); |
381 | kfree(dev); | 314 | kfree(dev); |
382 | } | 315 | } |
383 | 316 | ||
384 | #ifdef MODULE | 317 | static int io = 0x200; |
385 | 318 | static int irq = 9; | |
386 | static int io=0x200; | ||
387 | static int irq=9; | ||
388 | 319 | ||
389 | module_param(io, int, 0); | 320 | module_param(io, int, 0); |
390 | MODULE_PARM_DESC(io, "The I/O base of the Comtrol Hostess SV11 card"); | 321 | MODULE_PARM_DESC(io, "The I/O base of the Comtrol Hostess SV11 card"); |
@@ -397,22 +328,17 @@ MODULE_AUTHOR("Alan Cox"); | |||
397 | MODULE_LICENSE("GPL"); | 328 | MODULE_LICENSE("GPL"); |
398 | MODULE_DESCRIPTION("Modular driver for the Comtrol Hostess SV11"); | 329 | MODULE_DESCRIPTION("Modular driver for the Comtrol Hostess SV11"); |
399 | 330 | ||
400 | static struct sv11_device *sv11_unit; | 331 | static struct z8530_dev *sv11_unit; |
401 | 332 | ||
402 | int init_module(void) | 333 | int init_module(void) |
403 | { | 334 | { |
404 | printk(KERN_INFO "SV-11 Z85230 Synchronous Driver v 0.03.\n"); | 335 | if ((sv11_unit = sv11_init(io, irq)) == NULL) |
405 | printk(KERN_INFO "(c) Copyright 2001, Red Hat Inc.\n"); | ||
406 | if((sv11_unit=sv11_init(io,irq))==NULL) | ||
407 | return -ENODEV; | 336 | return -ENODEV; |
408 | return 0; | 337 | return 0; |
409 | } | 338 | } |
410 | 339 | ||
411 | void cleanup_module(void) | 340 | void cleanup_module(void) |
412 | { | 341 | { |
413 | if(sv11_unit) | 342 | if (sv11_unit) |
414 | sv11_shutdown(sv11_unit); | 343 | sv11_shutdown(sv11_unit); |
415 | } | 344 | } |
416 | |||
417 | #endif | ||
418 | |||
diff --git a/drivers/net/wan/lmc/lmc.h b/drivers/net/wan/lmc/lmc.h index 882e58c1bfd7..4ced7ac16c2c 100644 --- a/drivers/net/wan/lmc/lmc.h +++ b/drivers/net/wan/lmc/lmc.h | |||
@@ -11,12 +11,12 @@ unsigned lmc_mii_readreg(lmc_softc_t * const sc, unsigned | |||
11 | devaddr, unsigned regno); | 11 | devaddr, unsigned regno); |
12 | void lmc_mii_writereg(lmc_softc_t * const sc, unsigned devaddr, | 12 | void lmc_mii_writereg(lmc_softc_t * const sc, unsigned devaddr, |
13 | unsigned regno, unsigned data); | 13 | unsigned regno, unsigned data); |
14 | void lmc_led_on(lmc_softc_t * const, u_int32_t); | 14 | void lmc_led_on(lmc_softc_t * const, u32); |
15 | void lmc_led_off(lmc_softc_t * const, u_int32_t); | 15 | void lmc_led_off(lmc_softc_t * const, u32); |
16 | unsigned lmc_mii_readreg(lmc_softc_t * const, unsigned, unsigned); | 16 | unsigned lmc_mii_readreg(lmc_softc_t * const, unsigned, unsigned); |
17 | void lmc_mii_writereg(lmc_softc_t * const, unsigned, unsigned, unsigned); | 17 | void lmc_mii_writereg(lmc_softc_t * const, unsigned, unsigned, unsigned); |
18 | void lmc_gpio_mkinput(lmc_softc_t * const sc, u_int32_t bits); | 18 | void lmc_gpio_mkinput(lmc_softc_t * const sc, u32 bits); |
19 | void lmc_gpio_mkoutput(lmc_softc_t * const sc, u_int32_t bits); | 19 | void lmc_gpio_mkoutput(lmc_softc_t * const sc, u32 bits); |
20 | 20 | ||
21 | int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); | 21 | int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); |
22 | 22 | ||
@@ -26,8 +26,7 @@ extern lmc_media_t lmc_t1_media; | |||
26 | extern lmc_media_t lmc_hssi_media; | 26 | extern lmc_media_t lmc_hssi_media; |
27 | 27 | ||
28 | #ifdef _DBG_EVENTLOG | 28 | #ifdef _DBG_EVENTLOG |
29 | static void lmcEventLog( u_int32_t EventNum, u_int32_t arg2, u_int32_t arg3 ); | 29 | static void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3); |
30 | #endif | 30 | #endif |
31 | 31 | ||
32 | #endif | 32 | #endif |
33 | |||
diff --git a/drivers/net/wan/lmc/lmc_debug.c b/drivers/net/wan/lmc/lmc_debug.c index 3b94352b0d03..15049d711f47 100644 --- a/drivers/net/wan/lmc/lmc_debug.c +++ b/drivers/net/wan/lmc/lmc_debug.c | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | #include <linux/types.h> | 1 | #include <linux/types.h> |
3 | #include <linux/netdevice.h> | 2 | #include <linux/netdevice.h> |
4 | #include <linux/interrupt.h> | 3 | #include <linux/interrupt.h> |
@@ -48,10 +47,10 @@ void lmcConsoleLog(char *type, unsigned char *ucData, int iLen) | |||
48 | #endif | 47 | #endif |
49 | 48 | ||
50 | #ifdef DEBUG | 49 | #ifdef DEBUG |
51 | u_int32_t lmcEventLogIndex = 0; | 50 | u32 lmcEventLogIndex; |
52 | u_int32_t lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS]; | 51 | u32 lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS]; |
53 | 52 | ||
54 | void lmcEventLog (u_int32_t EventNum, u_int32_t arg2, u_int32_t arg3) | 53 | void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3) |
55 | { | 54 | { |
56 | lmcEventLogBuf[lmcEventLogIndex++] = EventNum; | 55 | lmcEventLogBuf[lmcEventLogIndex++] = EventNum; |
57 | lmcEventLogBuf[lmcEventLogIndex++] = arg2; | 56 | lmcEventLogBuf[lmcEventLogIndex++] = arg2; |
diff --git a/drivers/net/wan/lmc/lmc_debug.h b/drivers/net/wan/lmc/lmc_debug.h index cf3563859bf3..2d46f121549f 100644 --- a/drivers/net/wan/lmc/lmc_debug.h +++ b/drivers/net/wan/lmc/lmc_debug.h | |||
@@ -38,15 +38,15 @@ | |||
38 | 38 | ||
39 | 39 | ||
40 | #ifdef DEBUG | 40 | #ifdef DEBUG |
41 | extern u_int32_t lmcEventLogIndex; | 41 | extern u32 lmcEventLogIndex; |
42 | extern u_int32_t lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS]; | 42 | extern u32 lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS]; |
43 | #define LMC_EVENT_LOG(x, y, z) lmcEventLog((x), (y), (z)) | 43 | #define LMC_EVENT_LOG(x, y, z) lmcEventLog((x), (y), (z)) |
44 | #else | 44 | #else |
45 | #define LMC_EVENT_LOG(x,y,z) | 45 | #define LMC_EVENT_LOG(x,y,z) |
46 | #endif /* end ifdef _DBG_EVENTLOG */ | 46 | #endif /* end ifdef _DBG_EVENTLOG */ |
47 | 47 | ||
48 | void lmcConsoleLog(char *type, unsigned char *ucData, int iLen); | 48 | void lmcConsoleLog(char *type, unsigned char *ucData, int iLen); |
49 | void lmcEventLog (u_int32_t EventNum, u_int32_t arg2, u_int32_t arg3); | 49 | void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3); |
50 | void lmc_trace(struct net_device *dev, char *msg); | 50 | void lmc_trace(struct net_device *dev, char *msg); |
51 | 51 | ||
52 | #endif | 52 | #endif |
diff --git a/drivers/net/wan/lmc/lmc_ioctl.h b/drivers/net/wan/lmc/lmc_ioctl.h index 57dd861cd3db..72fb113a44ca 100644 --- a/drivers/net/wan/lmc/lmc_ioctl.h +++ b/drivers/net/wan/lmc/lmc_ioctl.h | |||
@@ -61,7 +61,7 @@ | |||
61 | /* | 61 | /* |
62 | * IFTYPE defines | 62 | * IFTYPE defines |
63 | */ | 63 | */ |
64 | #define LMC_PPP 1 /* use sppp interface */ | 64 | #define LMC_PPP 1 /* use generic HDLC interface */ |
65 | #define LMC_NET 2 /* use direct net interface */ | 65 | #define LMC_NET 2 /* use direct net interface */ |
66 | #define LMC_RAW 3 /* use direct net interface */ | 66 | #define LMC_RAW 3 /* use direct net interface */ |
67 | 67 | ||
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c index 62133cee446a..f80640f5a744 100644 --- a/drivers/net/wan/lmc/lmc_main.c +++ b/drivers/net/wan/lmc/lmc_main.c | |||
@@ -1,6 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 1997-2000 LAN Media Corporation (LMC) | 2 | * Copyright (c) 1997-2000 LAN Media Corporation (LMC) |
3 | * All rights reserved. www.lanmedia.com | 3 | * All rights reserved. www.lanmedia.com |
4 | * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl> | ||
4 | * | 5 | * |
5 | * This code is written by: | 6 | * This code is written by: |
6 | * Andrew Stanley-Jones (asj@cban.com) | 7 | * Andrew Stanley-Jones (asj@cban.com) |
@@ -36,8 +37,6 @@ | |||
36 | * | 37 | * |
37 | */ | 38 | */ |
38 | 39 | ||
39 | /* $Id: lmc_main.c,v 1.36 2000/04/11 05:25:25 asj Exp $ */ | ||
40 | |||
41 | #include <linux/kernel.h> | 40 | #include <linux/kernel.h> |
42 | #include <linux/module.h> | 41 | #include <linux/module.h> |
43 | #include <linux/string.h> | 42 | #include <linux/string.h> |
@@ -49,6 +48,7 @@ | |||
49 | #include <linux/interrupt.h> | 48 | #include <linux/interrupt.h> |
50 | #include <linux/pci.h> | 49 | #include <linux/pci.h> |
51 | #include <linux/delay.h> | 50 | #include <linux/delay.h> |
51 | #include <linux/hdlc.h> | ||
52 | #include <linux/init.h> | 52 | #include <linux/init.h> |
53 | #include <linux/in.h> | 53 | #include <linux/in.h> |
54 | #include <linux/if_arp.h> | 54 | #include <linux/if_arp.h> |
@@ -57,9 +57,6 @@ | |||
57 | #include <linux/skbuff.h> | 57 | #include <linux/skbuff.h> |
58 | #include <linux/inet.h> | 58 | #include <linux/inet.h> |
59 | #include <linux/bitops.h> | 59 | #include <linux/bitops.h> |
60 | |||
61 | #include <net/syncppp.h> | ||
62 | |||
63 | #include <asm/processor.h> /* Processor type for cache alignment. */ | 60 | #include <asm/processor.h> /* Processor type for cache alignment. */ |
64 | #include <asm/io.h> | 61 | #include <asm/io.h> |
65 | #include <asm/dma.h> | 62 | #include <asm/dma.h> |
@@ -78,8 +75,6 @@ | |||
78 | #include "lmc_debug.h" | 75 | #include "lmc_debug.h" |
79 | #include "lmc_proto.h" | 76 | #include "lmc_proto.h" |
80 | 77 | ||
81 | static int lmc_first_load = 0; | ||
82 | |||
83 | static int LMC_PKT_BUF_SZ = 1542; | 78 | static int LMC_PKT_BUF_SZ = 1542; |
84 | 79 | ||
85 | static struct pci_device_id lmc_pci_tbl[] = { | 80 | static struct pci_device_id lmc_pci_tbl[] = { |
@@ -91,11 +86,10 @@ static struct pci_device_id lmc_pci_tbl[] = { | |||
91 | }; | 86 | }; |
92 | 87 | ||
93 | MODULE_DEVICE_TABLE(pci, lmc_pci_tbl); | 88 | MODULE_DEVICE_TABLE(pci, lmc_pci_tbl); |
94 | MODULE_LICENSE("GPL"); | 89 | MODULE_LICENSE("GPL v2"); |
95 | 90 | ||
96 | 91 | ||
97 | static int lmc_start_xmit(struct sk_buff *skb, struct net_device *dev); | 92 | static int lmc_start_xmit(struct sk_buff *skb, struct net_device *dev); |
98 | static int lmc_start_xmit(struct sk_buff *skb, struct net_device *dev); | ||
99 | static int lmc_rx (struct net_device *dev); | 93 | static int lmc_rx (struct net_device *dev); |
100 | static int lmc_open(struct net_device *dev); | 94 | static int lmc_open(struct net_device *dev); |
101 | static int lmc_close(struct net_device *dev); | 95 | static int lmc_close(struct net_device *dev); |
@@ -114,20 +108,14 @@ static void lmc_driver_timeout(struct net_device *dev); | |||
114 | * linux reserves 16 device specific IOCTLs. We call them | 108 | * linux reserves 16 device specific IOCTLs. We call them |
115 | * LMCIOC* to control various bits of our world. | 109 | * LMCIOC* to control various bits of our world. |
116 | */ | 110 | */ |
117 | int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ | 111 | int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ |
118 | { | 112 | { |
119 | lmc_softc_t *sc; | 113 | lmc_softc_t *sc = dev_to_sc(dev); |
120 | lmc_ctl_t ctl; | 114 | lmc_ctl_t ctl; |
121 | int ret; | 115 | int ret = -EOPNOTSUPP; |
122 | u_int16_t regVal; | 116 | u16 regVal; |
123 | unsigned long flags; | 117 | unsigned long flags; |
124 | 118 | ||
125 | struct sppp *sp; | ||
126 | |||
127 | ret = -EOPNOTSUPP; | ||
128 | |||
129 | sc = dev->priv; | ||
130 | |||
131 | lmc_trace(dev, "lmc_ioctl in"); | 119 | lmc_trace(dev, "lmc_ioctl in"); |
132 | 120 | ||
133 | /* | 121 | /* |
@@ -149,7 +137,6 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ | |||
149 | break; | 137 | break; |
150 | 138 | ||
151 | case LMCIOCSINFO: /*fold01*/ | 139 | case LMCIOCSINFO: /*fold01*/ |
152 | sp = &((struct ppp_device *) dev)->sppp; | ||
153 | if (!capable(CAP_NET_ADMIN)) { | 140 | if (!capable(CAP_NET_ADMIN)) { |
154 | ret = -EPERM; | 141 | ret = -EPERM; |
155 | break; | 142 | break; |
@@ -175,25 +162,20 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ | |||
175 | sc->TxDescriptControlInit &= ~LMC_TDES_ADD_CRC_DISABLE; | 162 | sc->TxDescriptControlInit &= ~LMC_TDES_ADD_CRC_DISABLE; |
176 | } | 163 | } |
177 | 164 | ||
178 | if (ctl.keepalive_onoff == LMC_CTL_OFF) | ||
179 | sp->pp_flags &= ~PP_KEEPALIVE; /* Turn off */ | ||
180 | else | ||
181 | sp->pp_flags |= PP_KEEPALIVE; /* Turn on */ | ||
182 | |||
183 | ret = 0; | 165 | ret = 0; |
184 | break; | 166 | break; |
185 | 167 | ||
186 | case LMCIOCIFTYPE: /*fold01*/ | 168 | case LMCIOCIFTYPE: /*fold01*/ |
187 | { | 169 | { |
188 | u_int16_t old_type = sc->if_type; | 170 | u16 old_type = sc->if_type; |
189 | u_int16_t new_type; | 171 | u16 new_type; |
190 | 172 | ||
191 | if (!capable(CAP_NET_ADMIN)) { | 173 | if (!capable(CAP_NET_ADMIN)) { |
192 | ret = -EPERM; | 174 | ret = -EPERM; |
193 | break; | 175 | break; |
194 | } | 176 | } |
195 | 177 | ||
196 | if (copy_from_user(&new_type, ifr->ifr_data, sizeof(u_int16_t))) { | 178 | if (copy_from_user(&new_type, ifr->ifr_data, sizeof(u16))) { |
197 | ret = -EFAULT; | 179 | ret = -EFAULT; |
198 | break; | 180 | break; |
199 | } | 181 | } |
@@ -206,15 +188,11 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ | |||
206 | } | 188 | } |
207 | 189 | ||
208 | lmc_proto_close(sc); | 190 | lmc_proto_close(sc); |
209 | lmc_proto_detach(sc); | ||
210 | 191 | ||
211 | sc->if_type = new_type; | 192 | sc->if_type = new_type; |
212 | // lmc_proto_init(sc); | ||
213 | lmc_proto_attach(sc); | 193 | lmc_proto_attach(sc); |
214 | lmc_proto_open(sc); | 194 | ret = lmc_proto_open(sc); |
215 | 195 | break; | |
216 | ret = 0 ; | ||
217 | break ; | ||
218 | } | 196 | } |
219 | 197 | ||
220 | case LMCIOCGETXINFO: /*fold01*/ | 198 | case LMCIOCGETXINFO: /*fold01*/ |
@@ -241,51 +219,53 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ | |||
241 | 219 | ||
242 | break; | 220 | break; |
243 | 221 | ||
244 | case LMCIOCGETLMCSTATS: /*fold01*/ | 222 | case LMCIOCGETLMCSTATS: |
245 | if (sc->lmc_cardtype == LMC_CARDTYPE_T1){ | 223 | if (sc->lmc_cardtype == LMC_CARDTYPE_T1) { |
246 | lmc_mii_writereg (sc, 0, 17, T1FRAMER_FERR_LSB); | 224 | lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_LSB); |
247 | sc->stats.framingBitErrorCount += | 225 | sc->extra_stats.framingBitErrorCount += |
248 | lmc_mii_readreg (sc, 0, 18) & 0xff; | 226 | lmc_mii_readreg(sc, 0, 18) & 0xff; |
249 | lmc_mii_writereg (sc, 0, 17, T1FRAMER_FERR_MSB); | 227 | lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_MSB); |
250 | sc->stats.framingBitErrorCount += | 228 | sc->extra_stats.framingBitErrorCount += |
251 | (lmc_mii_readreg (sc, 0, 18) & 0xff) << 8; | 229 | (lmc_mii_readreg(sc, 0, 18) & 0xff) << 8; |
252 | lmc_mii_writereg (sc, 0, 17, T1FRAMER_LCV_LSB); | 230 | lmc_mii_writereg(sc, 0, 17, T1FRAMER_LCV_LSB); |
253 | sc->stats.lineCodeViolationCount += | 231 | sc->extra_stats.lineCodeViolationCount += |
254 | lmc_mii_readreg (sc, 0, 18) & 0xff; | 232 | lmc_mii_readreg(sc, 0, 18) & 0xff; |
255 | lmc_mii_writereg (sc, 0, 17, T1FRAMER_LCV_MSB); | 233 | lmc_mii_writereg(sc, 0, 17, T1FRAMER_LCV_MSB); |
256 | sc->stats.lineCodeViolationCount += | 234 | sc->extra_stats.lineCodeViolationCount += |
257 | (lmc_mii_readreg (sc, 0, 18) & 0xff) << 8; | 235 | (lmc_mii_readreg(sc, 0, 18) & 0xff) << 8; |
258 | lmc_mii_writereg (sc, 0, 17, T1FRAMER_AERR); | 236 | lmc_mii_writereg(sc, 0, 17, T1FRAMER_AERR); |
259 | regVal = lmc_mii_readreg (sc, 0, 18) & 0xff; | 237 | regVal = lmc_mii_readreg(sc, 0, 18) & 0xff; |
260 | 238 | ||
261 | sc->stats.lossOfFrameCount += | 239 | sc->extra_stats.lossOfFrameCount += |
262 | (regVal & T1FRAMER_LOF_MASK) >> 4; | 240 | (regVal & T1FRAMER_LOF_MASK) >> 4; |
263 | sc->stats.changeOfFrameAlignmentCount += | 241 | sc->extra_stats.changeOfFrameAlignmentCount += |
264 | (regVal & T1FRAMER_COFA_MASK) >> 2; | 242 | (regVal & T1FRAMER_COFA_MASK) >> 2; |
265 | sc->stats.severelyErroredFrameCount += | 243 | sc->extra_stats.severelyErroredFrameCount += |
266 | regVal & T1FRAMER_SEF_MASK; | 244 | regVal & T1FRAMER_SEF_MASK; |
267 | } | 245 | } |
268 | 246 | if (copy_to_user(ifr->ifr_data, &sc->lmc_device->stats, | |
269 | if (copy_to_user(ifr->ifr_data, &sc->stats, | 247 | sizeof(sc->lmc_device->stats)) || |
270 | sizeof (struct lmc_statistics))) | 248 | copy_to_user(ifr->ifr_data + sizeof(sc->lmc_device->stats), |
271 | ret = -EFAULT; | 249 | &sc->extra_stats, sizeof(sc->extra_stats))) |
272 | else | 250 | ret = -EFAULT; |
273 | ret = 0; | 251 | else |
274 | break; | 252 | ret = 0; |
253 | break; | ||
275 | 254 | ||
276 | case LMCIOCCLEARLMCSTATS: /*fold01*/ | 255 | case LMCIOCCLEARLMCSTATS: |
277 | if (!capable(CAP_NET_ADMIN)){ | 256 | if (!capable(CAP_NET_ADMIN)) { |
278 | ret = -EPERM; | 257 | ret = -EPERM; |
279 | break; | 258 | break; |
280 | } | 259 | } |
281 | 260 | ||
282 | memset (&sc->stats, 0, sizeof (struct lmc_statistics)); | 261 | memset(&sc->lmc_device->stats, 0, sizeof(sc->lmc_device->stats)); |
283 | sc->stats.check = STATCHECK; | 262 | memset(&sc->extra_stats, 0, sizeof(sc->extra_stats)); |
284 | sc->stats.version_size = (DRIVER_VERSION << 16) + | 263 | sc->extra_stats.check = STATCHECK; |
285 | sizeof (struct lmc_statistics); | 264 | sc->extra_stats.version_size = (DRIVER_VERSION << 16) + |
286 | sc->stats.lmc_cardtype = sc->lmc_cardtype; | 265 | sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats); |
287 | ret = 0; | 266 | sc->extra_stats.lmc_cardtype = sc->lmc_cardtype; |
288 | break; | 267 | ret = 0; |
268 | break; | ||
289 | 269 | ||
290 | case LMCIOCSETCIRCUIT: /*fold01*/ | 270 | case LMCIOCSETCIRCUIT: /*fold01*/ |
291 | if (!capable(CAP_NET_ADMIN)){ | 271 | if (!capable(CAP_NET_ADMIN)){ |
@@ -330,7 +310,8 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ | |||
330 | ret = -EFAULT; | 310 | ret = -EFAULT; |
331 | break; | 311 | break; |
332 | } | 312 | } |
333 | if (copy_to_user(ifr->ifr_data + sizeof (u32), lmcEventLogBuf, sizeof (lmcEventLogBuf))) | 313 | if (copy_to_user(ifr->ifr_data + sizeof(u32), lmcEventLogBuf, |
314 | sizeof(lmcEventLogBuf))) | ||
334 | ret = -EFAULT; | 315 | ret = -EFAULT; |
335 | else | 316 | else |
336 | ret = 0; | 317 | ret = 0; |
@@ -641,14 +622,12 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ | |||
641 | /* the watchdog process that cruises around */ | 622 | /* the watchdog process that cruises around */ |
642 | static void lmc_watchdog (unsigned long data) /*fold00*/ | 623 | static void lmc_watchdog (unsigned long data) /*fold00*/ |
643 | { | 624 | { |
644 | struct net_device *dev = (struct net_device *) data; | 625 | struct net_device *dev = (struct net_device *)data; |
645 | lmc_softc_t *sc; | 626 | lmc_softc_t *sc = dev_to_sc(dev); |
646 | int link_status; | 627 | int link_status; |
647 | u_int32_t ticks; | 628 | u32 ticks; |
648 | unsigned long flags; | 629 | unsigned long flags; |
649 | 630 | ||
650 | sc = dev->priv; | ||
651 | |||
652 | lmc_trace(dev, "lmc_watchdog in"); | 631 | lmc_trace(dev, "lmc_watchdog in"); |
653 | 632 | ||
654 | spin_lock_irqsave(&sc->lmc_lock, flags); | 633 | spin_lock_irqsave(&sc->lmc_lock, flags); |
@@ -677,22 +656,22 @@ static void lmc_watchdog (unsigned long data) /*fold00*/ | |||
677 | * check for a transmit interrupt timeout | 656 | * check for a transmit interrupt timeout |
678 | * Has the packet xmt vs xmt serviced threshold been exceeded */ | 657 | * Has the packet xmt vs xmt serviced threshold been exceeded */ |
679 | if (sc->lmc_taint_tx == sc->lastlmc_taint_tx && | 658 | if (sc->lmc_taint_tx == sc->lastlmc_taint_tx && |
680 | sc->stats.tx_packets > sc->lasttx_packets && | 659 | sc->lmc_device->stats.tx_packets > sc->lasttx_packets && |
681 | sc->tx_TimeoutInd == 0) | 660 | sc->tx_TimeoutInd == 0) |
682 | { | 661 | { |
683 | 662 | ||
684 | /* wait for the watchdog to come around again */ | 663 | /* wait for the watchdog to come around again */ |
685 | sc->tx_TimeoutInd = 1; | 664 | sc->tx_TimeoutInd = 1; |
686 | } | 665 | } |
687 | else if (sc->lmc_taint_tx == sc->lastlmc_taint_tx && | 666 | else if (sc->lmc_taint_tx == sc->lastlmc_taint_tx && |
688 | sc->stats.tx_packets > sc->lasttx_packets && | 667 | sc->lmc_device->stats.tx_packets > sc->lasttx_packets && |
689 | sc->tx_TimeoutInd) | 668 | sc->tx_TimeoutInd) |
690 | { | 669 | { |
691 | 670 | ||
692 | LMC_EVENT_LOG(LMC_EVENT_XMTINTTMO, LMC_CSR_READ (sc, csr_status), 0); | 671 | LMC_EVENT_LOG(LMC_EVENT_XMTINTTMO, LMC_CSR_READ (sc, csr_status), 0); |
693 | 672 | ||
694 | sc->tx_TimeoutDisplay = 1; | 673 | sc->tx_TimeoutDisplay = 1; |
695 | sc->stats.tx_TimeoutCnt++; | 674 | sc->extra_stats.tx_TimeoutCnt++; |
696 | 675 | ||
697 | /* DEC chip is stuck, hit it with a RESET!!!! */ | 676 | /* DEC chip is stuck, hit it with a RESET!!!! */ |
698 | lmc_running_reset (dev); | 677 | lmc_running_reset (dev); |
@@ -712,13 +691,11 @@ static void lmc_watchdog (unsigned long data) /*fold00*/ | |||
712 | /* reset the transmit timeout detection flag */ | 691 | /* reset the transmit timeout detection flag */ |
713 | sc->tx_TimeoutInd = 0; | 692 | sc->tx_TimeoutInd = 0; |
714 | sc->lastlmc_taint_tx = sc->lmc_taint_tx; | 693 | sc->lastlmc_taint_tx = sc->lmc_taint_tx; |
715 | sc->lasttx_packets = sc->stats.tx_packets; | 694 | sc->lasttx_packets = sc->lmc_device->stats.tx_packets; |
716 | } | 695 | } else { |
717 | else | ||
718 | { | ||
719 | sc->tx_TimeoutInd = 0; | 696 | sc->tx_TimeoutInd = 0; |
720 | sc->lastlmc_taint_tx = sc->lmc_taint_tx; | 697 | sc->lastlmc_taint_tx = sc->lmc_taint_tx; |
721 | sc->lasttx_packets = sc->stats.tx_packets; | 698 | sc->lasttx_packets = sc->lmc_device->stats.tx_packets; |
722 | } | 699 | } |
723 | 700 | ||
724 | /* --- end time out check ----------------------------------- */ | 701 | /* --- end time out check ----------------------------------- */ |
@@ -748,19 +725,7 @@ static void lmc_watchdog (unsigned long data) /*fold00*/ | |||
748 | sc->last_link_status = 1; | 725 | sc->last_link_status = 1; |
749 | /* lmc_reset (sc); Again why reset??? */ | 726 | /* lmc_reset (sc); Again why reset??? */ |
750 | 727 | ||
751 | /* Inform the world that link protocol is back up. */ | ||
752 | netif_carrier_on(dev); | 728 | netif_carrier_on(dev); |
753 | |||
754 | /* Now we have to tell the syncppp that we had an outage | ||
755 | * and that it should deal. Calling sppp_reopen here | ||
756 | * should do the trick, but we may have to call sppp_close | ||
757 | * when the link goes down, and call sppp_open here. | ||
758 | * Subject to more testing. | ||
759 | * --bbraun | ||
760 | */ | ||
761 | |||
762 | lmc_proto_reopen(sc); | ||
763 | |||
764 | } | 729 | } |
765 | 730 | ||
766 | /* Call media specific watchdog functions */ | 731 | /* Call media specific watchdog functions */ |
@@ -816,114 +781,93 @@ kick_timer: | |||
816 | 781 | ||
817 | } | 782 | } |
818 | 783 | ||
819 | static void lmc_setup(struct net_device * const dev) /*fold00*/ | 784 | static int lmc_attach(struct net_device *dev, unsigned short encoding, |
785 | unsigned short parity) | ||
820 | { | 786 | { |
821 | lmc_trace(dev, "lmc_setup in"); | 787 | if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT) |
822 | 788 | return 0; | |
823 | dev->type = ARPHRD_HDLC; | 789 | return -EINVAL; |
824 | dev->hard_start_xmit = lmc_start_xmit; | ||
825 | dev->open = lmc_open; | ||
826 | dev->stop = lmc_close; | ||
827 | dev->get_stats = lmc_get_stats; | ||
828 | dev->do_ioctl = lmc_ioctl; | ||
829 | dev->tx_timeout = lmc_driver_timeout; | ||
830 | dev->watchdog_timeo = (HZ); /* 1 second */ | ||
831 | |||
832 | lmc_trace(dev, "lmc_setup out"); | ||
833 | } | 790 | } |
834 | 791 | ||
835 | |||
836 | static int __devinit lmc_init_one(struct pci_dev *pdev, | 792 | static int __devinit lmc_init_one(struct pci_dev *pdev, |
837 | const struct pci_device_id *ent) | 793 | const struct pci_device_id *ent) |
838 | { | 794 | { |
839 | struct net_device *dev; | 795 | lmc_softc_t *sc; |
840 | lmc_softc_t *sc; | 796 | struct net_device *dev; |
841 | u16 subdevice; | 797 | u16 subdevice; |
842 | u_int16_t AdapModelNum; | 798 | u16 AdapModelNum; |
843 | int err = -ENOMEM; | 799 | int err; |
844 | static int cards_found; | 800 | static int cards_found; |
845 | #ifndef GCOM | 801 | |
846 | /* We name by type not by vendor */ | 802 | /* lmc_trace(dev, "lmc_init_one in"); */ |
847 | static const char lmcname[] = "hdlc%d"; | 803 | |
848 | #else | 804 | err = pci_enable_device(pdev); |
849 | /* | 805 | if (err) { |
850 | * GCOM uses LMC vendor name so that clients can know which card | 806 | printk(KERN_ERR "lmc: pci enable failed: %d\n", err); |
851 | * to attach to. | 807 | return err; |
852 | */ | 808 | } |
853 | static const char lmcname[] = "lmc%d"; | ||
854 | #endif | ||
855 | |||
856 | |||
857 | /* | ||
858 | * Allocate our own device structure | ||
859 | */ | ||
860 | dev = alloc_netdev(sizeof(lmc_softc_t), lmcname, lmc_setup); | ||
861 | if (!dev) { | ||
862 | printk (KERN_ERR "lmc:alloc_netdev for device failed\n"); | ||
863 | goto out1; | ||
864 | } | ||
865 | |||
866 | lmc_trace(dev, "lmc_init_one in"); | ||
867 | |||
868 | err = pci_enable_device(pdev); | ||
869 | if (err) { | ||
870 | printk(KERN_ERR "lmc: pci enable failed:%d\n", err); | ||
871 | goto out2; | ||
872 | } | ||
873 | |||
874 | if (pci_request_regions(pdev, "lmc")) { | ||
875 | printk(KERN_ERR "lmc: pci_request_region failed\n"); | ||
876 | err = -EIO; | ||
877 | goto out3; | ||
878 | } | ||
879 | |||
880 | pci_set_drvdata(pdev, dev); | ||
881 | |||
882 | if(lmc_first_load == 0){ | ||
883 | printk(KERN_INFO "Lan Media Corporation WAN Driver Version %d.%d.%d\n", | ||
884 | DRIVER_MAJOR_VERSION, DRIVER_MINOR_VERSION,DRIVER_SUB_VERSION); | ||
885 | lmc_first_load = 1; | ||
886 | } | ||
887 | |||
888 | sc = dev->priv; | ||
889 | sc->lmc_device = dev; | ||
890 | sc->name = dev->name; | ||
891 | |||
892 | /* Initialize the sppp layer */ | ||
893 | /* An ioctl can cause a subsequent detach for raw frame interface */ | ||
894 | dev->ml_priv = sc; | ||
895 | sc->if_type = LMC_PPP; | ||
896 | sc->check = 0xBEAFCAFE; | ||
897 | dev->base_addr = pci_resource_start(pdev, 0); | ||
898 | dev->irq = pdev->irq; | ||
899 | |||
900 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
901 | |||
902 | /* | ||
903 | * This will get the protocol layer ready and do any 1 time init's | ||
904 | * Must have a valid sc and dev structure | ||
905 | */ | ||
906 | lmc_proto_init(sc); | ||
907 | |||
908 | lmc_proto_attach(sc); | ||
909 | 809 | ||
910 | /* | 810 | err = pci_request_regions(pdev, "lmc"); |
911 | * Why were we changing this??? | 811 | if (err) { |
912 | dev->tx_queue_len = 100; | 812 | printk(KERN_ERR "lmc: pci_request_region failed\n"); |
913 | */ | 813 | goto err_req_io; |
814 | } | ||
914 | 815 | ||
915 | /* Init the spin lock so can call it latter */ | 816 | /* |
817 | * Allocate our own device structure | ||
818 | */ | ||
819 | sc = kzalloc(sizeof(lmc_softc_t), GFP_KERNEL); | ||
820 | if (!sc) { | ||
821 | err = -ENOMEM; | ||
822 | goto err_kzalloc; | ||
823 | } | ||
916 | 824 | ||
917 | spin_lock_init(&sc->lmc_lock); | 825 | dev = alloc_hdlcdev(sc); |
918 | pci_set_master(pdev); | 826 | if (!dev) { |
827 | printk(KERN_ERR "lmc:alloc_netdev for device failed\n"); | ||
828 | goto err_hdlcdev; | ||
829 | } | ||
919 | 830 | ||
920 | printk ("%s: detected at %lx, irq %d\n", dev->name, | ||
921 | dev->base_addr, dev->irq); | ||
922 | 831 | ||
923 | if (register_netdev (dev) != 0) { | 832 | dev->type = ARPHRD_HDLC; |
924 | printk (KERN_ERR "%s: register_netdev failed.\n", dev->name); | 833 | dev_to_hdlc(dev)->xmit = lmc_start_xmit; |
925 | goto out4; | 834 | dev_to_hdlc(dev)->attach = lmc_attach; |
926 | } | 835 | dev->open = lmc_open; |
836 | dev->stop = lmc_close; | ||
837 | dev->get_stats = lmc_get_stats; | ||
838 | dev->do_ioctl = lmc_ioctl; | ||
839 | dev->tx_timeout = lmc_driver_timeout; | ||
840 | dev->watchdog_timeo = HZ; /* 1 second */ | ||
841 | dev->tx_queue_len = 100; | ||
842 | sc->lmc_device = dev; | ||
843 | sc->name = dev->name; | ||
844 | sc->if_type = LMC_PPP; | ||
845 | sc->check = 0xBEAFCAFE; | ||
846 | dev->base_addr = pci_resource_start(pdev, 0); | ||
847 | dev->irq = pdev->irq; | ||
848 | pci_set_drvdata(pdev, dev); | ||
849 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
850 | |||
851 | /* | ||
852 | * This will get the protocol layer ready and do any 1 time init's | ||
853 | * Must have a valid sc and dev structure | ||
854 | */ | ||
855 | lmc_proto_attach(sc); | ||
856 | |||
857 | /* Init the spin lock so can call it latter */ | ||
858 | |||
859 | spin_lock_init(&sc->lmc_lock); | ||
860 | pci_set_master(pdev); | ||
861 | |||
862 | printk(KERN_INFO "%s: detected at %lx, irq %d\n", dev->name, | ||
863 | dev->base_addr, dev->irq); | ||
864 | |||
865 | err = register_hdlc_device(dev); | ||
866 | if (err) { | ||
867 | printk(KERN_ERR "%s: register_netdev failed.\n", dev->name); | ||
868 | free_netdev(dev); | ||
869 | goto err_hdlcdev; | ||
870 | } | ||
927 | 871 | ||
928 | sc->lmc_cardtype = LMC_CARDTYPE_UNKNOWN; | 872 | sc->lmc_cardtype = LMC_CARDTYPE_UNKNOWN; |
929 | sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT; | 873 | sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT; |
@@ -939,27 +883,27 @@ static int __devinit lmc_init_one(struct pci_dev *pdev, | |||
939 | 883 | ||
940 | switch (subdevice) { | 884 | switch (subdevice) { |
941 | case PCI_DEVICE_ID_LMC_HSSI: | 885 | case PCI_DEVICE_ID_LMC_HSSI: |
942 | printk ("%s: LMC HSSI\n", dev->name); | 886 | printk(KERN_INFO "%s: LMC HSSI\n", dev->name); |
943 | sc->lmc_cardtype = LMC_CARDTYPE_HSSI; | 887 | sc->lmc_cardtype = LMC_CARDTYPE_HSSI; |
944 | sc->lmc_media = &lmc_hssi_media; | 888 | sc->lmc_media = &lmc_hssi_media; |
945 | break; | 889 | break; |
946 | case PCI_DEVICE_ID_LMC_DS3: | 890 | case PCI_DEVICE_ID_LMC_DS3: |
947 | printk ("%s: LMC DS3\n", dev->name); | 891 | printk(KERN_INFO "%s: LMC DS3\n", dev->name); |
948 | sc->lmc_cardtype = LMC_CARDTYPE_DS3; | 892 | sc->lmc_cardtype = LMC_CARDTYPE_DS3; |
949 | sc->lmc_media = &lmc_ds3_media; | 893 | sc->lmc_media = &lmc_ds3_media; |
950 | break; | 894 | break; |
951 | case PCI_DEVICE_ID_LMC_SSI: | 895 | case PCI_DEVICE_ID_LMC_SSI: |
952 | printk ("%s: LMC SSI\n", dev->name); | 896 | printk(KERN_INFO "%s: LMC SSI\n", dev->name); |
953 | sc->lmc_cardtype = LMC_CARDTYPE_SSI; | 897 | sc->lmc_cardtype = LMC_CARDTYPE_SSI; |
954 | sc->lmc_media = &lmc_ssi_media; | 898 | sc->lmc_media = &lmc_ssi_media; |
955 | break; | 899 | break; |
956 | case PCI_DEVICE_ID_LMC_T1: | 900 | case PCI_DEVICE_ID_LMC_T1: |
957 | printk ("%s: LMC T1\n", dev->name); | 901 | printk(KERN_INFO "%s: LMC T1\n", dev->name); |
958 | sc->lmc_cardtype = LMC_CARDTYPE_T1; | 902 | sc->lmc_cardtype = LMC_CARDTYPE_T1; |
959 | sc->lmc_media = &lmc_t1_media; | 903 | sc->lmc_media = &lmc_t1_media; |
960 | break; | 904 | break; |
961 | default: | 905 | default: |
962 | printk (KERN_WARNING "%s: LMC UNKOWN CARD!\n", dev->name); | 906 | printk(KERN_WARNING "%s: LMC UNKOWN CARD!\n", dev->name); |
963 | break; | 907 | break; |
964 | } | 908 | } |
965 | 909 | ||
@@ -977,32 +921,28 @@ static int __devinit lmc_init_one(struct pci_dev *pdev, | |||
977 | */ | 921 | */ |
978 | AdapModelNum = (lmc_mii_readreg (sc, 0, 3) & 0x3f0) >> 4; | 922 | AdapModelNum = (lmc_mii_readreg (sc, 0, 3) & 0x3f0) >> 4; |
979 | 923 | ||
980 | if ((AdapModelNum == LMC_ADAP_T1 | 924 | if ((AdapModelNum != LMC_ADAP_T1 || /* detect LMC1200 */ |
981 | && subdevice == PCI_DEVICE_ID_LMC_T1) || /* detect LMC1200 */ | 925 | subdevice != PCI_DEVICE_ID_LMC_T1) && |
982 | (AdapModelNum == LMC_ADAP_SSI | 926 | (AdapModelNum != LMC_ADAP_SSI || /* detect LMC1000 */ |
983 | && subdevice == PCI_DEVICE_ID_LMC_SSI) || /* detect LMC1000 */ | 927 | subdevice != PCI_DEVICE_ID_LMC_SSI) && |
984 | (AdapModelNum == LMC_ADAP_DS3 | 928 | (AdapModelNum != LMC_ADAP_DS3 || /* detect LMC5245 */ |
985 | && subdevice == PCI_DEVICE_ID_LMC_DS3) || /* detect LMC5245 */ | 929 | subdevice != PCI_DEVICE_ID_LMC_DS3) && |
986 | (AdapModelNum == LMC_ADAP_HSSI | 930 | (AdapModelNum != LMC_ADAP_HSSI || /* detect LMC5200 */ |
987 | && subdevice == PCI_DEVICE_ID_LMC_HSSI)) | 931 | subdevice != PCI_DEVICE_ID_LMC_HSSI)) |
988 | { /* detect LMC5200 */ | 932 | printk(KERN_WARNING "%s: Model number (%d) miscompare for PCI" |
933 | " Subsystem ID = 0x%04x\n", | ||
934 | dev->name, AdapModelNum, subdevice); | ||
989 | 935 | ||
990 | } | ||
991 | else { | ||
992 | printk ("%s: Model number (%d) miscompare for PCI Subsystem ID = 0x%04x\n", | ||
993 | dev->name, AdapModelNum, subdevice); | ||
994 | // return (NULL); | ||
995 | } | ||
996 | /* | 936 | /* |
997 | * reset clock | 937 | * reset clock |
998 | */ | 938 | */ |
999 | LMC_CSR_WRITE (sc, csr_gp_timer, 0xFFFFFFFFUL); | 939 | LMC_CSR_WRITE (sc, csr_gp_timer, 0xFFFFFFFFUL); |
1000 | 940 | ||
1001 | sc->board_idx = cards_found++; | 941 | sc->board_idx = cards_found++; |
1002 | sc->stats.check = STATCHECK; | 942 | sc->extra_stats.check = STATCHECK; |
1003 | sc->stats.version_size = (DRIVER_VERSION << 16) + | 943 | sc->extra_stats.version_size = (DRIVER_VERSION << 16) + |
1004 | sizeof (struct lmc_statistics); | 944 | sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats); |
1005 | sc->stats.lmc_cardtype = sc->lmc_cardtype; | 945 | sc->extra_stats.lmc_cardtype = sc->lmc_cardtype; |
1006 | 946 | ||
1007 | sc->lmc_ok = 0; | 947 | sc->lmc_ok = 0; |
1008 | sc->last_link_status = 0; | 948 | sc->last_link_status = 0; |
@@ -1010,58 +950,51 @@ static int __devinit lmc_init_one(struct pci_dev *pdev, | |||
1010 | lmc_trace(dev, "lmc_init_one out"); | 950 | lmc_trace(dev, "lmc_init_one out"); |
1011 | return 0; | 951 | return 0; |
1012 | 952 | ||
1013 | out4: | 953 | err_hdlcdev: |
1014 | lmc_proto_detach(sc); | 954 | pci_set_drvdata(pdev, NULL); |
1015 | out3: | 955 | kfree(sc); |
1016 | if (pdev) { | 956 | err_kzalloc: |
1017 | pci_release_regions(pdev); | 957 | pci_release_regions(pdev); |
1018 | pci_set_drvdata(pdev, NULL); | 958 | err_req_io: |
1019 | } | 959 | pci_disable_device(pdev); |
1020 | out2: | 960 | return err; |
1021 | free_netdev(dev); | ||
1022 | out1: | ||
1023 | return err; | ||
1024 | } | 961 | } |
1025 | 962 | ||
1026 | /* | 963 | /* |
1027 | * Called from pci when removing module. | 964 | * Called from pci when removing module. |
1028 | */ | 965 | */ |
1029 | static void __devexit lmc_remove_one (struct pci_dev *pdev) | 966 | static void __devexit lmc_remove_one(struct pci_dev *pdev) |
1030 | { | 967 | { |
1031 | struct net_device *dev = pci_get_drvdata(pdev); | 968 | struct net_device *dev = pci_get_drvdata(pdev); |
1032 | 969 | ||
1033 | if (dev) { | 970 | if (dev) { |
1034 | lmc_softc_t *sc = dev->priv; | 971 | printk(KERN_DEBUG "%s: removing...\n", dev->name); |
1035 | 972 | unregister_hdlc_device(dev); | |
1036 | printk("%s: removing...\n", dev->name); | 973 | free_netdev(dev); |
1037 | lmc_proto_detach(sc); | 974 | pci_release_regions(pdev); |
1038 | unregister_netdev(dev); | 975 | pci_disable_device(pdev); |
1039 | free_netdev(dev); | 976 | pci_set_drvdata(pdev, NULL); |
1040 | pci_release_regions(pdev); | 977 | } |
1041 | pci_disable_device(pdev); | ||
1042 | pci_set_drvdata(pdev, NULL); | ||
1043 | } | ||
1044 | } | 978 | } |
1045 | 979 | ||
1046 | /* After this is called, packets can be sent. | 980 | /* After this is called, packets can be sent. |
1047 | * Does not initialize the addresses | 981 | * Does not initialize the addresses |
1048 | */ | 982 | */ |
1049 | static int lmc_open (struct net_device *dev) /*fold00*/ | 983 | static int lmc_open(struct net_device *dev) |
1050 | { | 984 | { |
1051 | lmc_softc_t *sc = dev->priv; | 985 | lmc_softc_t *sc = dev_to_sc(dev); |
986 | int err; | ||
1052 | 987 | ||
1053 | lmc_trace(dev, "lmc_open in"); | 988 | lmc_trace(dev, "lmc_open in"); |
1054 | 989 | ||
1055 | lmc_led_on(sc, LMC_DS3_LED0); | 990 | lmc_led_on(sc, LMC_DS3_LED0); |
1056 | 991 | ||
1057 | lmc_dec_reset (sc); | 992 | lmc_dec_reset(sc); |
1058 | lmc_reset (sc); | 993 | lmc_reset(sc); |
1059 | |||
1060 | LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0); | ||
1061 | LMC_EVENT_LOG(LMC_EVENT_RESET2, | ||
1062 | lmc_mii_readreg (sc, 0, 16), | ||
1063 | lmc_mii_readreg (sc, 0, 17)); | ||
1064 | 994 | ||
995 | LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ(sc, csr_status), 0); | ||
996 | LMC_EVENT_LOG(LMC_EVENT_RESET2, lmc_mii_readreg(sc, 0, 16), | ||
997 | lmc_mii_readreg(sc, 0, 17)); | ||
1065 | 998 | ||
1066 | if (sc->lmc_ok){ | 999 | if (sc->lmc_ok){ |
1067 | lmc_trace(dev, "lmc_open lmc_ok out"); | 1000 | lmc_trace(dev, "lmc_open lmc_ok out"); |
@@ -1106,14 +1039,14 @@ static int lmc_open (struct net_device *dev) /*fold00*/ | |||
1106 | 1039 | ||
1107 | /* dev->flags |= IFF_UP; */ | 1040 | /* dev->flags |= IFF_UP; */ |
1108 | 1041 | ||
1109 | lmc_proto_open(sc); | 1042 | if ((err = lmc_proto_open(sc)) != 0) |
1043 | return err; | ||
1110 | 1044 | ||
1111 | dev->do_ioctl = lmc_ioctl; | 1045 | dev->do_ioctl = lmc_ioctl; |
1112 | 1046 | ||
1113 | 1047 | ||
1114 | netif_start_queue(dev); | 1048 | netif_start_queue(dev); |
1115 | 1049 | sc->extra_stats.tx_tbusy0++; | |
1116 | sc->stats.tx_tbusy0++ ; | ||
1117 | 1050 | ||
1118 | /* | 1051 | /* |
1119 | * select what interrupts we want to get | 1052 | * select what interrupts we want to get |
@@ -1165,8 +1098,7 @@ static int lmc_open (struct net_device *dev) /*fold00*/ | |||
1165 | 1098 | ||
1166 | static void lmc_running_reset (struct net_device *dev) /*fold00*/ | 1099 | static void lmc_running_reset (struct net_device *dev) /*fold00*/ |
1167 | { | 1100 | { |
1168 | 1101 | lmc_softc_t *sc = dev_to_sc(dev); | |
1169 | lmc_softc_t *sc = (lmc_softc_t *) dev->priv; | ||
1170 | 1102 | ||
1171 | lmc_trace(dev, "lmc_runnig_reset in"); | 1103 | lmc_trace(dev, "lmc_runnig_reset in"); |
1172 | 1104 | ||
@@ -1184,7 +1116,7 @@ static void lmc_running_reset (struct net_device *dev) /*fold00*/ | |||
1184 | netif_wake_queue(dev); | 1116 | netif_wake_queue(dev); |
1185 | 1117 | ||
1186 | sc->lmc_txfull = 0; | 1118 | sc->lmc_txfull = 0; |
1187 | sc->stats.tx_tbusy0++ ; | 1119 | sc->extra_stats.tx_tbusy0++; |
1188 | 1120 | ||
1189 | sc->lmc_intrmask = TULIP_DEFAULT_INTR_MASK; | 1121 | sc->lmc_intrmask = TULIP_DEFAULT_INTR_MASK; |
1190 | LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask); | 1122 | LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask); |
@@ -1200,14 +1132,13 @@ static void lmc_running_reset (struct net_device *dev) /*fold00*/ | |||
1200 | * This disables the timer for the watchdog and keepalives, | 1132 | * This disables the timer for the watchdog and keepalives, |
1201 | * and disables the irq for dev. | 1133 | * and disables the irq for dev. |
1202 | */ | 1134 | */ |
1203 | static int lmc_close (struct net_device *dev) /*fold00*/ | 1135 | static int lmc_close(struct net_device *dev) |
1204 | { | 1136 | { |
1205 | /* not calling release_region() as we should */ | 1137 | /* not calling release_region() as we should */ |
1206 | lmc_softc_t *sc; | 1138 | lmc_softc_t *sc = dev_to_sc(dev); |
1207 | 1139 | ||
1208 | lmc_trace(dev, "lmc_close in"); | 1140 | lmc_trace(dev, "lmc_close in"); |
1209 | 1141 | ||
1210 | sc = dev->priv; | ||
1211 | sc->lmc_ok = 0; | 1142 | sc->lmc_ok = 0; |
1212 | sc->lmc_media->set_link_status (sc, 0); | 1143 | sc->lmc_media->set_link_status (sc, 0); |
1213 | del_timer (&sc->timer); | 1144 | del_timer (&sc->timer); |
@@ -1215,7 +1146,7 @@ static int lmc_close (struct net_device *dev) /*fold00*/ | |||
1215 | lmc_ifdown (dev); | 1146 | lmc_ifdown (dev); |
1216 | 1147 | ||
1217 | lmc_trace(dev, "lmc_close out"); | 1148 | lmc_trace(dev, "lmc_close out"); |
1218 | 1149 | ||
1219 | return 0; | 1150 | return 0; |
1220 | } | 1151 | } |
1221 | 1152 | ||
@@ -1223,16 +1154,16 @@ static int lmc_close (struct net_device *dev) /*fold00*/ | |||
1223 | /* When the interface goes down, this is called */ | 1154 | /* When the interface goes down, this is called */ |
1224 | static int lmc_ifdown (struct net_device *dev) /*fold00*/ | 1155 | static int lmc_ifdown (struct net_device *dev) /*fold00*/ |
1225 | { | 1156 | { |
1226 | lmc_softc_t *sc = dev->priv; | 1157 | lmc_softc_t *sc = dev_to_sc(dev); |
1227 | u32 csr6; | 1158 | u32 csr6; |
1228 | int i; | 1159 | int i; |
1229 | 1160 | ||
1230 | lmc_trace(dev, "lmc_ifdown in"); | 1161 | lmc_trace(dev, "lmc_ifdown in"); |
1231 | 1162 | ||
1232 | /* Don't let anything else go on right now */ | 1163 | /* Don't let anything else go on right now */ |
1233 | // dev->start = 0; | 1164 | // dev->start = 0; |
1234 | netif_stop_queue(dev); | 1165 | netif_stop_queue(dev); |
1235 | sc->stats.tx_tbusy1++ ; | 1166 | sc->extra_stats.tx_tbusy1++; |
1236 | 1167 | ||
1237 | /* stop interrupts */ | 1168 | /* stop interrupts */ |
1238 | /* Clear the interrupt mask */ | 1169 | /* Clear the interrupt mask */ |
@@ -1244,8 +1175,8 @@ static int lmc_ifdown (struct net_device *dev) /*fold00*/ | |||
1244 | csr6 &= ~LMC_DEC_SR; /* Turn off the Receive bit */ | 1175 | csr6 &= ~LMC_DEC_SR; /* Turn off the Receive bit */ |
1245 | LMC_CSR_WRITE (sc, csr_command, csr6); | 1176 | LMC_CSR_WRITE (sc, csr_command, csr6); |
1246 | 1177 | ||
1247 | sc->stats.rx_missed_errors += | 1178 | sc->lmc_device->stats.rx_missed_errors += |
1248 | LMC_CSR_READ (sc, csr_missed_frames) & 0xffff; | 1179 | LMC_CSR_READ(sc, csr_missed_frames) & 0xffff; |
1249 | 1180 | ||
1250 | /* release the interrupt */ | 1181 | /* release the interrupt */ |
1251 | if(sc->got_irq == 1){ | 1182 | if(sc->got_irq == 1){ |
@@ -1276,7 +1207,7 @@ static int lmc_ifdown (struct net_device *dev) /*fold00*/ | |||
1276 | lmc_led_off (sc, LMC_MII16_LED_ALL); | 1207 | lmc_led_off (sc, LMC_MII16_LED_ALL); |
1277 | 1208 | ||
1278 | netif_wake_queue(dev); | 1209 | netif_wake_queue(dev); |
1279 | sc->stats.tx_tbusy0++ ; | 1210 | sc->extra_stats.tx_tbusy0++; |
1280 | 1211 | ||
1281 | lmc_trace(dev, "lmc_ifdown out"); | 1212 | lmc_trace(dev, "lmc_ifdown out"); |
1282 | 1213 | ||
@@ -1289,7 +1220,7 @@ static int lmc_ifdown (struct net_device *dev) /*fold00*/ | |||
1289 | static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/ | 1220 | static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/ |
1290 | { | 1221 | { |
1291 | struct net_device *dev = (struct net_device *) dev_instance; | 1222 | struct net_device *dev = (struct net_device *) dev_instance; |
1292 | lmc_softc_t *sc; | 1223 | lmc_softc_t *sc = dev_to_sc(dev); |
1293 | u32 csr; | 1224 | u32 csr; |
1294 | int i; | 1225 | int i; |
1295 | s32 stat; | 1226 | s32 stat; |
@@ -1300,8 +1231,6 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/ | |||
1300 | 1231 | ||
1301 | lmc_trace(dev, "lmc_interrupt in"); | 1232 | lmc_trace(dev, "lmc_interrupt in"); |
1302 | 1233 | ||
1303 | sc = dev->priv; | ||
1304 | |||
1305 | spin_lock(&sc->lmc_lock); | 1234 | spin_lock(&sc->lmc_lock); |
1306 | 1235 | ||
1307 | /* | 1236 | /* |
@@ -1354,7 +1283,7 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/ | |||
1354 | 1283 | ||
1355 | int n_compl = 0 ; | 1284 | int n_compl = 0 ; |
1356 | /* reset the transmit timeout detection flag -baz */ | 1285 | /* reset the transmit timeout detection flag -baz */ |
1357 | sc->stats.tx_NoCompleteCnt = 0; | 1286 | sc->extra_stats.tx_NoCompleteCnt = 0; |
1358 | 1287 | ||
1359 | badtx = sc->lmc_taint_tx; | 1288 | badtx = sc->lmc_taint_tx; |
1360 | i = badtx % LMC_TXDESCS; | 1289 | i = badtx % LMC_TXDESCS; |
@@ -1378,27 +1307,25 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/ | |||
1378 | if (sc->lmc_txq[i] == NULL) | 1307 | if (sc->lmc_txq[i] == NULL) |
1379 | continue; | 1308 | continue; |
1380 | 1309 | ||
1381 | /* | 1310 | /* |
1382 | * Check the total error summary to look for any errors | 1311 | * Check the total error summary to look for any errors |
1383 | */ | 1312 | */ |
1384 | if (stat & 0x8000) { | 1313 | if (stat & 0x8000) { |
1385 | sc->stats.tx_errors++; | 1314 | sc->lmc_device->stats.tx_errors++; |
1386 | if (stat & 0x4104) | 1315 | if (stat & 0x4104) |
1387 | sc->stats.tx_aborted_errors++; | 1316 | sc->lmc_device->stats.tx_aborted_errors++; |
1388 | if (stat & 0x0C00) | 1317 | if (stat & 0x0C00) |
1389 | sc->stats.tx_carrier_errors++; | 1318 | sc->lmc_device->stats.tx_carrier_errors++; |
1390 | if (stat & 0x0200) | 1319 | if (stat & 0x0200) |
1391 | sc->stats.tx_window_errors++; | 1320 | sc->lmc_device->stats.tx_window_errors++; |
1392 | if (stat & 0x0002) | 1321 | if (stat & 0x0002) |
1393 | sc->stats.tx_fifo_errors++; | 1322 | sc->lmc_device->stats.tx_fifo_errors++; |
1394 | } | 1323 | } else { |
1395 | else { | 1324 | sc->lmc_device->stats.tx_bytes += sc->lmc_txring[i].length & 0x7ff; |
1396 | 1325 | ||
1397 | sc->stats.tx_bytes += sc->lmc_txring[i].length & 0x7ff; | 1326 | sc->lmc_device->stats.tx_packets++; |
1398 | |||
1399 | sc->stats.tx_packets++; | ||
1400 | } | 1327 | } |
1401 | 1328 | ||
1402 | // dev_kfree_skb(sc->lmc_txq[i]); | 1329 | // dev_kfree_skb(sc->lmc_txq[i]); |
1403 | dev_kfree_skb_irq(sc->lmc_txq[i]); | 1330 | dev_kfree_skb_irq(sc->lmc_txq[i]); |
1404 | sc->lmc_txq[i] = NULL; | 1331 | sc->lmc_txq[i] = NULL; |
@@ -1415,13 +1342,13 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/ | |||
1415 | LMC_EVENT_LOG(LMC_EVENT_TBUSY0, n_compl, 0); | 1342 | LMC_EVENT_LOG(LMC_EVENT_TBUSY0, n_compl, 0); |
1416 | sc->lmc_txfull = 0; | 1343 | sc->lmc_txfull = 0; |
1417 | netif_wake_queue(dev); | 1344 | netif_wake_queue(dev); |
1418 | sc->stats.tx_tbusy0++ ; | 1345 | sc->extra_stats.tx_tbusy0++; |
1419 | 1346 | ||
1420 | 1347 | ||
1421 | #ifdef DEBUG | 1348 | #ifdef DEBUG |
1422 | sc->stats.dirtyTx = badtx; | 1349 | sc->extra_stats.dirtyTx = badtx; |
1423 | sc->stats.lmc_next_tx = sc->lmc_next_tx; | 1350 | sc->extra_stats.lmc_next_tx = sc->lmc_next_tx; |
1424 | sc->stats.lmc_txfull = sc->lmc_txfull; | 1351 | sc->extra_stats.lmc_txfull = sc->lmc_txfull; |
1425 | #endif | 1352 | #endif |
1426 | sc->lmc_taint_tx = badtx; | 1353 | sc->lmc_taint_tx = badtx; |
1427 | 1354 | ||
@@ -1476,9 +1403,9 @@ lmc_int_fail_out: | |||
1476 | return IRQ_RETVAL(handled); | 1403 | return IRQ_RETVAL(handled); |
1477 | } | 1404 | } |
1478 | 1405 | ||
1479 | static int lmc_start_xmit (struct sk_buff *skb, struct net_device *dev) /*fold00*/ | 1406 | static int lmc_start_xmit(struct sk_buff *skb, struct net_device *dev) |
1480 | { | 1407 | { |
1481 | lmc_softc_t *sc; | 1408 | lmc_softc_t *sc = dev_to_sc(dev); |
1482 | u32 flag; | 1409 | u32 flag; |
1483 | int entry; | 1410 | int entry; |
1484 | int ret = 0; | 1411 | int ret = 0; |
@@ -1486,8 +1413,6 @@ static int lmc_start_xmit (struct sk_buff *skb, struct net_device *dev) /*fold00 | |||
1486 | 1413 | ||
1487 | lmc_trace(dev, "lmc_start_xmit in"); | 1414 | lmc_trace(dev, "lmc_start_xmit in"); |
1488 | 1415 | ||
1489 | sc = dev->priv; | ||
1490 | |||
1491 | spin_lock_irqsave(&sc->lmc_lock, flags); | 1416 | spin_lock_irqsave(&sc->lmc_lock, flags); |
1492 | 1417 | ||
1493 | /* normal path, tbusy known to be zero */ | 1418 | /* normal path, tbusy known to be zero */ |
@@ -1532,8 +1457,8 @@ static int lmc_start_xmit (struct sk_buff *skb, struct net_device *dev) /*fold00 | |||
1532 | if (sc->lmc_next_tx - sc->lmc_taint_tx >= LMC_TXDESCS - 1) | 1457 | if (sc->lmc_next_tx - sc->lmc_taint_tx >= LMC_TXDESCS - 1) |
1533 | { /* ring full, go busy */ | 1458 | { /* ring full, go busy */ |
1534 | sc->lmc_txfull = 1; | 1459 | sc->lmc_txfull = 1; |
1535 | netif_stop_queue(dev); | 1460 | netif_stop_queue(dev); |
1536 | sc->stats.tx_tbusy1++ ; | 1461 | sc->extra_stats.tx_tbusy1++; |
1537 | LMC_EVENT_LOG(LMC_EVENT_TBUSY1, entry, 0); | 1462 | LMC_EVENT_LOG(LMC_EVENT_TBUSY1, entry, 0); |
1538 | } | 1463 | } |
1539 | #endif | 1464 | #endif |
@@ -1550,7 +1475,7 @@ static int lmc_start_xmit (struct sk_buff *skb, struct net_device *dev) /*fold00 | |||
1550 | * the watchdog timer handler. -baz | 1475 | * the watchdog timer handler. -baz |
1551 | */ | 1476 | */ |
1552 | 1477 | ||
1553 | sc->stats.tx_NoCompleteCnt++; | 1478 | sc->extra_stats.tx_NoCompleteCnt++; |
1554 | sc->lmc_next_tx++; | 1479 | sc->lmc_next_tx++; |
1555 | 1480 | ||
1556 | /* give ownership to the chip */ | 1481 | /* give ownership to the chip */ |
@@ -1569,9 +1494,9 @@ static int lmc_start_xmit (struct sk_buff *skb, struct net_device *dev) /*fold00 | |||
1569 | } | 1494 | } |
1570 | 1495 | ||
1571 | 1496 | ||
1572 | static int lmc_rx (struct net_device *dev) /*fold00*/ | 1497 | static int lmc_rx(struct net_device *dev) |
1573 | { | 1498 | { |
1574 | lmc_softc_t *sc; | 1499 | lmc_softc_t *sc = dev_to_sc(dev); |
1575 | int i; | 1500 | int i; |
1576 | int rx_work_limit = LMC_RXDESCS; | 1501 | int rx_work_limit = LMC_RXDESCS; |
1577 | unsigned int next_rx; | 1502 | unsigned int next_rx; |
@@ -1583,8 +1508,6 @@ static int lmc_rx (struct net_device *dev) /*fold00*/ | |||
1583 | 1508 | ||
1584 | lmc_trace(dev, "lmc_rx in"); | 1509 | lmc_trace(dev, "lmc_rx in"); |
1585 | 1510 | ||
1586 | sc = dev->priv; | ||
1587 | |||
1588 | lmc_led_on(sc, LMC_DS3_LED3); | 1511 | lmc_led_on(sc, LMC_DS3_LED3); |
1589 | 1512 | ||
1590 | rxIntLoopCnt = 0; /* debug -baz */ | 1513 | rxIntLoopCnt = 0; /* debug -baz */ |
@@ -1597,39 +1520,38 @@ static int lmc_rx (struct net_device *dev) /*fold00*/ | |||
1597 | rxIntLoopCnt++; /* debug -baz */ | 1520 | rxIntLoopCnt++; /* debug -baz */ |
1598 | len = ((stat & LMC_RDES_FRAME_LENGTH) >> RDES_FRAME_LENGTH_BIT_NUMBER); | 1521 | len = ((stat & LMC_RDES_FRAME_LENGTH) >> RDES_FRAME_LENGTH_BIT_NUMBER); |
1599 | if ((stat & 0x0300) != 0x0300) { /* Check first segment and last segment */ | 1522 | if ((stat & 0x0300) != 0x0300) { /* Check first segment and last segment */ |
1600 | if ((stat & 0x0000ffff) != 0x7fff) { | 1523 | if ((stat & 0x0000ffff) != 0x7fff) { |
1601 | /* Oversized frame */ | 1524 | /* Oversized frame */ |
1602 | sc->stats.rx_length_errors++; | 1525 | sc->lmc_device->stats.rx_length_errors++; |
1603 | goto skip_packet; | 1526 | goto skip_packet; |
1604 | } | 1527 | } |
1605 | } | 1528 | } |
1606 | |||
1607 | if(stat & 0x00000008){ /* Catch a dribbling bit error */ | ||
1608 | sc->stats.rx_errors++; | ||
1609 | sc->stats.rx_frame_errors++; | ||
1610 | goto skip_packet; | ||
1611 | } | ||
1612 | 1529 | ||
1530 | if (stat & 0x00000008) { /* Catch a dribbling bit error */ | ||
1531 | sc->lmc_device->stats.rx_errors++; | ||
1532 | sc->lmc_device->stats.rx_frame_errors++; | ||
1533 | goto skip_packet; | ||
1534 | } | ||
1613 | 1535 | ||
1614 | if(stat & 0x00000004){ /* Catch a CRC error by the Xilinx */ | ||
1615 | sc->stats.rx_errors++; | ||
1616 | sc->stats.rx_crc_errors++; | ||
1617 | goto skip_packet; | ||
1618 | } | ||
1619 | 1536 | ||
1537 | if (stat & 0x00000004) { /* Catch a CRC error by the Xilinx */ | ||
1538 | sc->lmc_device->stats.rx_errors++; | ||
1539 | sc->lmc_device->stats.rx_crc_errors++; | ||
1540 | goto skip_packet; | ||
1541 | } | ||
1620 | 1542 | ||
1621 | if (len > LMC_PKT_BUF_SZ){ | 1543 | if (len > LMC_PKT_BUF_SZ) { |
1622 | sc->stats.rx_length_errors++; | 1544 | sc->lmc_device->stats.rx_length_errors++; |
1623 | localLengthErrCnt++; | 1545 | localLengthErrCnt++; |
1624 | goto skip_packet; | 1546 | goto skip_packet; |
1625 | } | 1547 | } |
1626 | 1548 | ||
1627 | if (len < sc->lmc_crcSize + 2) { | 1549 | if (len < sc->lmc_crcSize + 2) { |
1628 | sc->stats.rx_length_errors++; | 1550 | sc->lmc_device->stats.rx_length_errors++; |
1629 | sc->stats.rx_SmallPktCnt++; | 1551 | sc->extra_stats.rx_SmallPktCnt++; |
1630 | localLengthErrCnt++; | 1552 | localLengthErrCnt++; |
1631 | goto skip_packet; | 1553 | goto skip_packet; |
1632 | } | 1554 | } |
1633 | 1555 | ||
1634 | if(stat & 0x00004000){ | 1556 | if(stat & 0x00004000){ |
1635 | printk(KERN_WARNING "%s: Receiver descriptor error, receiver out of sync?\n", dev->name); | 1557 | printk(KERN_WARNING "%s: Receiver descriptor error, receiver out of sync?\n", dev->name); |
@@ -1656,8 +1578,8 @@ static int lmc_rx (struct net_device *dev) /*fold00*/ | |||
1656 | } | 1578 | } |
1657 | 1579 | ||
1658 | dev->last_rx = jiffies; | 1580 | dev->last_rx = jiffies; |
1659 | sc->stats.rx_packets++; | 1581 | sc->lmc_device->stats.rx_packets++; |
1660 | sc->stats.rx_bytes += len; | 1582 | sc->lmc_device->stats.rx_bytes += len; |
1661 | 1583 | ||
1662 | LMC_CONSOLE_LOG("recv", skb->data, len); | 1584 | LMC_CONSOLE_LOG("recv", skb->data, len); |
1663 | 1585 | ||
@@ -1679,7 +1601,6 @@ static int lmc_rx (struct net_device *dev) /*fold00*/ | |||
1679 | 1601 | ||
1680 | skb_put (skb, len); | 1602 | skb_put (skb, len); |
1681 | skb->protocol = lmc_proto_type(sc, skb); | 1603 | skb->protocol = lmc_proto_type(sc, skb); |
1682 | skb->protocol = htons(ETH_P_WAN_PPP); | ||
1683 | skb_reset_mac_header(skb); | 1604 | skb_reset_mac_header(skb); |
1684 | /* skb_reset_network_header(skb); */ | 1605 | /* skb_reset_network_header(skb); */ |
1685 | skb->dev = dev; | 1606 | skb->dev = dev; |
@@ -1704,7 +1625,7 @@ static int lmc_rx (struct net_device *dev) /*fold00*/ | |||
1704 | * in which care we'll try to allocate the buffer | 1625 | * in which care we'll try to allocate the buffer |
1705 | * again. (once a second) | 1626 | * again. (once a second) |
1706 | */ | 1627 | */ |
1707 | sc->stats.rx_BuffAllocErr++; | 1628 | sc->extra_stats.rx_BuffAllocErr++; |
1708 | LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len); | 1629 | LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len); |
1709 | sc->failed_recv_alloc = 1; | 1630 | sc->failed_recv_alloc = 1; |
1710 | goto skip_out_of_mem; | 1631 | goto skip_out_of_mem; |
@@ -1739,16 +1660,14 @@ static int lmc_rx (struct net_device *dev) /*fold00*/ | |||
1739 | * descriptors with bogus packets | 1660 | * descriptors with bogus packets |
1740 | * | 1661 | * |
1741 | if (localLengthErrCnt > LMC_RXDESCS - 3) { | 1662 | if (localLengthErrCnt > LMC_RXDESCS - 3) { |
1742 | sc->stats.rx_BadPktSurgeCnt++; | 1663 | sc->extra_stats.rx_BadPktSurgeCnt++; |
1743 | LMC_EVENT_LOG(LMC_EVENT_BADPKTSURGE, | 1664 | LMC_EVENT_LOG(LMC_EVENT_BADPKTSURGE, localLengthErrCnt, |
1744 | localLengthErrCnt, | 1665 | sc->extra_stats.rx_BadPktSurgeCnt); |
1745 | sc->stats.rx_BadPktSurgeCnt); | ||
1746 | } */ | 1666 | } */ |
1747 | 1667 | ||
1748 | /* save max count of receive descriptors serviced */ | 1668 | /* save max count of receive descriptors serviced */ |
1749 | if (rxIntLoopCnt > sc->stats.rxIntLoopCnt) { | 1669 | if (rxIntLoopCnt > sc->extra_stats.rxIntLoopCnt) |
1750 | sc->stats.rxIntLoopCnt = rxIntLoopCnt; /* debug -baz */ | 1670 | sc->extra_stats.rxIntLoopCnt = rxIntLoopCnt; /* debug -baz */ |
1751 | } | ||
1752 | 1671 | ||
1753 | #ifdef DEBUG | 1672 | #ifdef DEBUG |
1754 | if (rxIntLoopCnt == 0) | 1673 | if (rxIntLoopCnt == 0) |
@@ -1775,23 +1694,22 @@ skip_out_of_mem: | |||
1775 | return 0; | 1694 | return 0; |
1776 | } | 1695 | } |
1777 | 1696 | ||
1778 | static struct net_device_stats *lmc_get_stats (struct net_device *dev) /*fold00*/ | 1697 | static struct net_device_stats *lmc_get_stats(struct net_device *dev) |
1779 | { | 1698 | { |
1780 | lmc_softc_t *sc = dev->priv; | 1699 | lmc_softc_t *sc = dev_to_sc(dev); |
1781 | unsigned long flags; | 1700 | unsigned long flags; |
1782 | 1701 | ||
1783 | lmc_trace(dev, "lmc_get_stats in"); | 1702 | lmc_trace(dev, "lmc_get_stats in"); |
1784 | 1703 | ||
1785 | |||
1786 | spin_lock_irqsave(&sc->lmc_lock, flags); | 1704 | spin_lock_irqsave(&sc->lmc_lock, flags); |
1787 | 1705 | ||
1788 | sc->stats.rx_missed_errors += LMC_CSR_READ (sc, csr_missed_frames) & 0xffff; | 1706 | sc->lmc_device->stats.rx_missed_errors += LMC_CSR_READ(sc, csr_missed_frames) & 0xffff; |
1789 | 1707 | ||
1790 | spin_unlock_irqrestore(&sc->lmc_lock, flags); | 1708 | spin_unlock_irqrestore(&sc->lmc_lock, flags); |
1791 | 1709 | ||
1792 | lmc_trace(dev, "lmc_get_stats out"); | 1710 | lmc_trace(dev, "lmc_get_stats out"); |
1793 | 1711 | ||
1794 | return (struct net_device_stats *) &sc->stats; | 1712 | return &sc->lmc_device->stats; |
1795 | } | 1713 | } |
1796 | 1714 | ||
1797 | static struct pci_driver lmc_driver = { | 1715 | static struct pci_driver lmc_driver = { |
@@ -1970,7 +1888,7 @@ static void lmc_softreset (lmc_softc_t * const sc) /*fold00*/ | |||
1970 | { | 1888 | { |
1971 | if (sc->lmc_txq[i] != NULL){ /* have buffer */ | 1889 | if (sc->lmc_txq[i] != NULL){ /* have buffer */ |
1972 | dev_kfree_skb(sc->lmc_txq[i]); /* free it */ | 1890 | dev_kfree_skb(sc->lmc_txq[i]); /* free it */ |
1973 | sc->stats.tx_dropped++; /* We just dropped a packet */ | 1891 | sc->lmc_device->stats.tx_dropped++; /* We just dropped a packet */ |
1974 | } | 1892 | } |
1975 | sc->lmc_txq[i] = NULL; | 1893 | sc->lmc_txq[i] = NULL; |
1976 | sc->lmc_txring[i].status = 0x00000000; | 1894 | sc->lmc_txring[i].status = 0x00000000; |
@@ -1982,7 +1900,7 @@ static void lmc_softreset (lmc_softc_t * const sc) /*fold00*/ | |||
1982 | lmc_trace(sc->lmc_device, "lmc_softreset out"); | 1900 | lmc_trace(sc->lmc_device, "lmc_softreset out"); |
1983 | } | 1901 | } |
1984 | 1902 | ||
1985 | void lmc_gpio_mkinput(lmc_softc_t * const sc, u_int32_t bits) /*fold00*/ | 1903 | void lmc_gpio_mkinput(lmc_softc_t * const sc, u32 bits) /*fold00*/ |
1986 | { | 1904 | { |
1987 | lmc_trace(sc->lmc_device, "lmc_gpio_mkinput in"); | 1905 | lmc_trace(sc->lmc_device, "lmc_gpio_mkinput in"); |
1988 | sc->lmc_gpio_io &= ~bits; | 1906 | sc->lmc_gpio_io &= ~bits; |
@@ -1990,7 +1908,7 @@ void lmc_gpio_mkinput(lmc_softc_t * const sc, u_int32_t bits) /*fold00*/ | |||
1990 | lmc_trace(sc->lmc_device, "lmc_gpio_mkinput out"); | 1908 | lmc_trace(sc->lmc_device, "lmc_gpio_mkinput out"); |
1991 | } | 1909 | } |
1992 | 1910 | ||
1993 | void lmc_gpio_mkoutput(lmc_softc_t * const sc, u_int32_t bits) /*fold00*/ | 1911 | void lmc_gpio_mkoutput(lmc_softc_t * const sc, u32 bits) /*fold00*/ |
1994 | { | 1912 | { |
1995 | lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput in"); | 1913 | lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput in"); |
1996 | sc->lmc_gpio_io |= bits; | 1914 | sc->lmc_gpio_io |= bits; |
@@ -1998,7 +1916,7 @@ void lmc_gpio_mkoutput(lmc_softc_t * const sc, u_int32_t bits) /*fold00*/ | |||
1998 | lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput out"); | 1916 | lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput out"); |
1999 | } | 1917 | } |
2000 | 1918 | ||
2001 | void lmc_led_on(lmc_softc_t * const sc, u_int32_t led) /*fold00*/ | 1919 | void lmc_led_on(lmc_softc_t * const sc, u32 led) /*fold00*/ |
2002 | { | 1920 | { |
2003 | lmc_trace(sc->lmc_device, "lmc_led_on in"); | 1921 | lmc_trace(sc->lmc_device, "lmc_led_on in"); |
2004 | if((~sc->lmc_miireg16) & led){ /* Already on! */ | 1922 | if((~sc->lmc_miireg16) & led){ /* Already on! */ |
@@ -2011,7 +1929,7 @@ void lmc_led_on(lmc_softc_t * const sc, u_int32_t led) /*fold00*/ | |||
2011 | lmc_trace(sc->lmc_device, "lmc_led_on out"); | 1929 | lmc_trace(sc->lmc_device, "lmc_led_on out"); |
2012 | } | 1930 | } |
2013 | 1931 | ||
2014 | void lmc_led_off(lmc_softc_t * const sc, u_int32_t led) /*fold00*/ | 1932 | void lmc_led_off(lmc_softc_t * const sc, u32 led) /*fold00*/ |
2015 | { | 1933 | { |
2016 | lmc_trace(sc->lmc_device, "lmc_led_off in"); | 1934 | lmc_trace(sc->lmc_device, "lmc_led_off in"); |
2017 | if(sc->lmc_miireg16 & led){ /* Already set don't do anything */ | 1935 | if(sc->lmc_miireg16 & led){ /* Already set don't do anything */ |
@@ -2061,13 +1979,13 @@ static void lmc_reset(lmc_softc_t * const sc) /*fold00*/ | |||
2061 | */ | 1979 | */ |
2062 | sc->lmc_media->init(sc); | 1980 | sc->lmc_media->init(sc); |
2063 | 1981 | ||
2064 | sc->stats.resetCount++; | 1982 | sc->extra_stats.resetCount++; |
2065 | lmc_trace(sc->lmc_device, "lmc_reset out"); | 1983 | lmc_trace(sc->lmc_device, "lmc_reset out"); |
2066 | } | 1984 | } |
2067 | 1985 | ||
2068 | static void lmc_dec_reset(lmc_softc_t * const sc) /*fold00*/ | 1986 | static void lmc_dec_reset(lmc_softc_t * const sc) /*fold00*/ |
2069 | { | 1987 | { |
2070 | u_int32_t val; | 1988 | u32 val; |
2071 | lmc_trace(sc->lmc_device, "lmc_dec_reset in"); | 1989 | lmc_trace(sc->lmc_device, "lmc_dec_reset in"); |
2072 | 1990 | ||
2073 | /* | 1991 | /* |
@@ -2151,23 +2069,21 @@ static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, /*fold00 | |||
2151 | lmc_trace(sc->lmc_device, "lmc_initcsrs out"); | 2069 | lmc_trace(sc->lmc_device, "lmc_initcsrs out"); |
2152 | } | 2070 | } |
2153 | 2071 | ||
2154 | static void lmc_driver_timeout(struct net_device *dev) { /*fold00*/ | 2072 | static void lmc_driver_timeout(struct net_device *dev) |
2155 | lmc_softc_t *sc; | 2073 | { |
2074 | lmc_softc_t *sc = dev_to_sc(dev); | ||
2156 | u32 csr6; | 2075 | u32 csr6; |
2157 | unsigned long flags; | 2076 | unsigned long flags; |
2158 | 2077 | ||
2159 | lmc_trace(dev, "lmc_driver_timeout in"); | 2078 | lmc_trace(dev, "lmc_driver_timeout in"); |
2160 | 2079 | ||
2161 | sc = dev->priv; | ||
2162 | |||
2163 | spin_lock_irqsave(&sc->lmc_lock, flags); | 2080 | spin_lock_irqsave(&sc->lmc_lock, flags); |
2164 | 2081 | ||
2165 | printk("%s: Xmitter busy|\n", dev->name); | 2082 | printk("%s: Xmitter busy|\n", dev->name); |
2166 | 2083 | ||
2167 | sc->stats.tx_tbusy_calls++ ; | 2084 | sc->extra_stats.tx_tbusy_calls++; |
2168 | if (jiffies - dev->trans_start < TX_TIMEOUT) { | 2085 | if (jiffies - dev->trans_start < TX_TIMEOUT) |
2169 | goto bug_out; | 2086 | goto bug_out; |
2170 | } | ||
2171 | 2087 | ||
2172 | /* | 2088 | /* |
2173 | * Chip seems to have locked up | 2089 | * Chip seems to have locked up |
@@ -2178,7 +2094,7 @@ static void lmc_driver_timeout(struct net_device *dev) { /*fold00*/ | |||
2178 | 2094 | ||
2179 | LMC_EVENT_LOG(LMC_EVENT_XMTPRCTMO, | 2095 | LMC_EVENT_LOG(LMC_EVENT_XMTPRCTMO, |
2180 | LMC_CSR_READ (sc, csr_status), | 2096 | LMC_CSR_READ (sc, csr_status), |
2181 | sc->stats.tx_ProcTimeout); | 2097 | sc->extra_stats.tx_ProcTimeout); |
2182 | 2098 | ||
2183 | lmc_running_reset (dev); | 2099 | lmc_running_reset (dev); |
2184 | 2100 | ||
@@ -2195,8 +2111,8 @@ static void lmc_driver_timeout(struct net_device *dev) { /*fold00*/ | |||
2195 | /* immediate transmit */ | 2111 | /* immediate transmit */ |
2196 | LMC_CSR_WRITE (sc, csr_txpoll, 0); | 2112 | LMC_CSR_WRITE (sc, csr_txpoll, 0); |
2197 | 2113 | ||
2198 | sc->stats.tx_errors++; | 2114 | sc->lmc_device->stats.tx_errors++; |
2199 | sc->stats.tx_ProcTimeout++; /* -baz */ | 2115 | sc->extra_stats.tx_ProcTimeout++; /* -baz */ |
2200 | 2116 | ||
2201 | dev->trans_start = jiffies; | 2117 | dev->trans_start = jiffies; |
2202 | 2118 | ||
diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c index 8aa461c941ce..f327674fc93a 100644 --- a/drivers/net/wan/lmc/lmc_media.c +++ b/drivers/net/wan/lmc/lmc_media.c | |||
@@ -16,8 +16,6 @@ | |||
16 | #include <linux/inet.h> | 16 | #include <linux/inet.h> |
17 | #include <linux/bitops.h> | 17 | #include <linux/bitops.h> |
18 | 18 | ||
19 | #include <net/syncppp.h> | ||
20 | |||
21 | #include <asm/processor.h> /* Processor type for cache alignment. */ | 19 | #include <asm/processor.h> /* Processor type for cache alignment. */ |
22 | #include <asm/io.h> | 20 | #include <asm/io.h> |
23 | #include <asm/dma.h> | 21 | #include <asm/dma.h> |
@@ -95,8 +93,7 @@ static void lmc_dummy_set_1 (lmc_softc_t * const, int); | |||
95 | static void lmc_dummy_set2_1 (lmc_softc_t * const, lmc_ctl_t *); | 93 | static void lmc_dummy_set2_1 (lmc_softc_t * const, lmc_ctl_t *); |
96 | 94 | ||
97 | static inline void write_av9110_bit (lmc_softc_t *, int); | 95 | static inline void write_av9110_bit (lmc_softc_t *, int); |
98 | static void write_av9110 (lmc_softc_t *, u_int32_t, u_int32_t, u_int32_t, | 96 | static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32); |
99 | u_int32_t, u_int32_t); | ||
100 | 97 | ||
101 | lmc_media_t lmc_ds3_media = { | 98 | lmc_media_t lmc_ds3_media = { |
102 | lmc_ds3_init, /* special media init stuff */ | 99 | lmc_ds3_init, /* special media init stuff */ |
@@ -427,7 +424,7 @@ lmc_ds3_set_scram (lmc_softc_t * const sc, int ie) | |||
427 | static int | 424 | static int |
428 | lmc_ds3_get_link_status (lmc_softc_t * const sc) | 425 | lmc_ds3_get_link_status (lmc_softc_t * const sc) |
429 | { | 426 | { |
430 | u_int16_t link_status, link_status_11; | 427 | u16 link_status, link_status_11; |
431 | int ret = 1; | 428 | int ret = 1; |
432 | 429 | ||
433 | lmc_mii_writereg (sc, 0, 17, 7); | 430 | lmc_mii_writereg (sc, 0, 17, 7); |
@@ -449,7 +446,7 @@ lmc_ds3_get_link_status (lmc_softc_t * const sc) | |||
449 | (link_status & LMC_FRAMER_REG0_OOFS)){ | 446 | (link_status & LMC_FRAMER_REG0_OOFS)){ |
450 | ret = 0; | 447 | ret = 0; |
451 | if(sc->last_led_err[3] != 1){ | 448 | if(sc->last_led_err[3] != 1){ |
452 | u16 r1; | 449 | u16 r1; |
453 | lmc_mii_writereg (sc, 0, 17, 01); /* Turn on Xbit error as our cisco does */ | 450 | lmc_mii_writereg (sc, 0, 17, 01); /* Turn on Xbit error as our cisco does */ |
454 | r1 = lmc_mii_readreg (sc, 0, 18); | 451 | r1 = lmc_mii_readreg (sc, 0, 18); |
455 | r1 &= 0xfe; | 452 | r1 &= 0xfe; |
@@ -462,7 +459,7 @@ lmc_ds3_get_link_status (lmc_softc_t * const sc) | |||
462 | else { | 459 | else { |
463 | lmc_led_off(sc, LMC_DS3_LED3); /* turn on red LED */ | 460 | lmc_led_off(sc, LMC_DS3_LED3); /* turn on red LED */ |
464 | if(sc->last_led_err[3] == 1){ | 461 | if(sc->last_led_err[3] == 1){ |
465 | u16 r1; | 462 | u16 r1; |
466 | lmc_mii_writereg (sc, 0, 17, 01); /* Turn off Xbit error */ | 463 | lmc_mii_writereg (sc, 0, 17, 01); /* Turn off Xbit error */ |
467 | r1 = lmc_mii_readreg (sc, 0, 18); | 464 | r1 = lmc_mii_readreg (sc, 0, 18); |
468 | r1 |= 0x01; | 465 | r1 |= 0x01; |
@@ -540,20 +537,19 @@ lmc_ds3_watchdog (lmc_softc_t * const sc) | |||
540 | * SSI methods | 537 | * SSI methods |
541 | */ | 538 | */ |
542 | 539 | ||
543 | static void | 540 | static void lmc_ssi_init(lmc_softc_t * const sc) |
544 | lmc_ssi_init (lmc_softc_t * const sc) | ||
545 | { | 541 | { |
546 | u_int16_t mii17; | 542 | u16 mii17; |
547 | int cable; | 543 | int cable; |
548 | 544 | ||
549 | sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1000; | 545 | sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1000; |
550 | 546 | ||
551 | mii17 = lmc_mii_readreg (sc, 0, 17); | 547 | mii17 = lmc_mii_readreg(sc, 0, 17); |
552 | 548 | ||
553 | cable = (mii17 & LMC_MII17_SSI_CABLE_MASK) >> LMC_MII17_SSI_CABLE_SHIFT; | 549 | cable = (mii17 & LMC_MII17_SSI_CABLE_MASK) >> LMC_MII17_SSI_CABLE_SHIFT; |
554 | sc->ictl.cable_type = cable; | 550 | sc->ictl.cable_type = cable; |
555 | 551 | ||
556 | lmc_gpio_mkoutput (sc, LMC_GEP_SSI_TXCLOCK); | 552 | lmc_gpio_mkoutput(sc, LMC_GEP_SSI_TXCLOCK); |
557 | } | 553 | } |
558 | 554 | ||
559 | static void | 555 | static void |
@@ -681,11 +677,11 @@ lmc_ssi_set_speed (lmc_softc_t * const sc, lmc_ctl_t * ctl) | |||
681 | static int | 677 | static int |
682 | lmc_ssi_get_link_status (lmc_softc_t * const sc) | 678 | lmc_ssi_get_link_status (lmc_softc_t * const sc) |
683 | { | 679 | { |
684 | u_int16_t link_status; | 680 | u16 link_status; |
685 | u_int32_t ticks; | 681 | u32 ticks; |
686 | int ret = 1; | 682 | int ret = 1; |
687 | int hw_hdsk = 1; | 683 | int hw_hdsk = 1; |
688 | 684 | ||
689 | /* | 685 | /* |
690 | * missing CTS? Hmm. If we require CTS on, we may never get the | 686 | * missing CTS? Hmm. If we require CTS on, we may never get the |
691 | * link to come up, so omit it in this test. | 687 | * link to come up, so omit it in this test. |
@@ -720,9 +716,9 @@ lmc_ssi_get_link_status (lmc_softc_t * const sc) | |||
720 | } | 716 | } |
721 | else if (ticks == 0 ) { /* no clock found ? */ | 717 | else if (ticks == 0 ) { /* no clock found ? */ |
722 | ret = 0; | 718 | ret = 0; |
723 | if(sc->last_led_err[3] != 1){ | 719 | if (sc->last_led_err[3] != 1) { |
724 | sc->stats.tx_lossOfClockCnt++; | 720 | sc->extra_stats.tx_lossOfClockCnt++; |
725 | printk(KERN_WARNING "%s: Lost Clock, Link Down\n", sc->name); | 721 | printk(KERN_WARNING "%s: Lost Clock, Link Down\n", sc->name); |
726 | } | 722 | } |
727 | sc->last_led_err[3] = 1; | 723 | sc->last_led_err[3] = 1; |
728 | lmc_led_on (sc, LMC_MII16_LED3); /* turn ON red LED */ | 724 | lmc_led_on (sc, LMC_MII16_LED3); /* turn ON red LED */ |
@@ -838,9 +834,7 @@ write_av9110_bit (lmc_softc_t * sc, int c) | |||
838 | LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); | 834 | LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); |
839 | } | 835 | } |
840 | 836 | ||
841 | static void | 837 | static void write_av9110(lmc_softc_t *sc, u32 n, u32 m, u32 v, u32 x, u32 r) |
842 | write_av9110 (lmc_softc_t * sc, u_int32_t n, u_int32_t m, u_int32_t v, | ||
843 | u_int32_t x, u_int32_t r) | ||
844 | { | 838 | { |
845 | int i; | 839 | int i; |
846 | 840 | ||
@@ -887,19 +881,13 @@ write_av9110 (lmc_softc_t * sc, u_int32_t n, u_int32_t m, u_int32_t v, | |||
887 | | LMC_GEP_SSI_GENERATOR)); | 881 | | LMC_GEP_SSI_GENERATOR)); |
888 | } | 882 | } |
889 | 883 | ||
890 | static void | 884 | static void lmc_ssi_watchdog(lmc_softc_t * const sc) |
891 | lmc_ssi_watchdog (lmc_softc_t * const sc) | ||
892 | { | 885 | { |
893 | u_int16_t mii17 = lmc_mii_readreg (sc, 0, 17); | 886 | u16 mii17 = lmc_mii_readreg(sc, 0, 17); |
894 | if (((mii17 >> 3) & 7) == 7) | 887 | if (((mii17 >> 3) & 7) == 7) |
895 | { | 888 | lmc_led_off(sc, LMC_MII16_LED2); |
896 | lmc_led_off (sc, LMC_MII16_LED2); | 889 | else |
897 | } | 890 | lmc_led_on(sc, LMC_MII16_LED2); |
898 | else | ||
899 | { | ||
900 | lmc_led_on (sc, LMC_MII16_LED2); | ||
901 | } | ||
902 | |||
903 | } | 891 | } |
904 | 892 | ||
905 | /* | 893 | /* |
@@ -929,7 +917,7 @@ lmc_t1_read (lmc_softc_t * const sc, int a) | |||
929 | static void | 917 | static void |
930 | lmc_t1_init (lmc_softc_t * const sc) | 918 | lmc_t1_init (lmc_softc_t * const sc) |
931 | { | 919 | { |
932 | u_int16_t mii16; | 920 | u16 mii16; |
933 | int i; | 921 | int i; |
934 | 922 | ||
935 | sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1200; | 923 | sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1200; |
@@ -1028,7 +1016,7 @@ lmc_t1_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl) | |||
1028 | */ static int | 1016 | */ static int |
1029 | lmc_t1_get_link_status (lmc_softc_t * const sc) | 1017 | lmc_t1_get_link_status (lmc_softc_t * const sc) |
1030 | { | 1018 | { |
1031 | u_int16_t link_status; | 1019 | u16 link_status; |
1032 | int ret = 1; | 1020 | int ret = 1; |
1033 | 1021 | ||
1034 | /* LMC5245 (DS3) & LMC1200 (DS1) LED definitions | 1022 | /* LMC5245 (DS3) & LMC1200 (DS1) LED definitions |
diff --git a/drivers/net/wan/lmc/lmc_proto.c b/drivers/net/wan/lmc/lmc_proto.c index 85315758198d..be9877ff551e 100644 --- a/drivers/net/wan/lmc/lmc_proto.c +++ b/drivers/net/wan/lmc/lmc_proto.c | |||
@@ -36,9 +36,6 @@ | |||
36 | #include <linux/workqueue.h> | 36 | #include <linux/workqueue.h> |
37 | #include <linux/proc_fs.h> | 37 | #include <linux/proc_fs.h> |
38 | #include <linux/bitops.h> | 38 | #include <linux/bitops.h> |
39 | |||
40 | #include <net/syncppp.h> | ||
41 | |||
42 | #include <asm/processor.h> /* Processor type for cache alignment. */ | 39 | #include <asm/processor.h> /* Processor type for cache alignment. */ |
43 | #include <asm/io.h> | 40 | #include <asm/io.h> |
44 | #include <asm/dma.h> | 41 | #include <asm/dma.h> |
@@ -50,48 +47,6 @@ | |||
50 | #include "lmc_ioctl.h" | 47 | #include "lmc_ioctl.h" |
51 | #include "lmc_proto.h" | 48 | #include "lmc_proto.h" |
52 | 49 | ||
53 | /* | ||
54 | * The compile-time variable SPPPSTUP causes the module to be | ||
55 | * compiled without referencing any of the sync ppp routines. | ||
56 | */ | ||
57 | #ifdef SPPPSTUB | ||
58 | #define SPPP_detach(d) (void)0 | ||
59 | #define SPPP_open(d) 0 | ||
60 | #define SPPP_reopen(d) (void)0 | ||
61 | #define SPPP_close(d) (void)0 | ||
62 | #define SPPP_attach(d) (void)0 | ||
63 | #define SPPP_do_ioctl(d,i,c) -EOPNOTSUPP | ||
64 | #else | ||
65 | #define SPPP_attach(x) sppp_attach((x)->pd) | ||
66 | #define SPPP_detach(x) sppp_detach((x)->pd->dev) | ||
67 | #define SPPP_open(x) sppp_open((x)->pd->dev) | ||
68 | #define SPPP_reopen(x) sppp_reopen((x)->pd->dev) | ||
69 | #define SPPP_close(x) sppp_close((x)->pd->dev) | ||
70 | #define SPPP_do_ioctl(x, y, z) sppp_do_ioctl((x)->pd->dev, (y), (z)) | ||
71 | #endif | ||
72 | |||
73 | // init | ||
74 | void lmc_proto_init(lmc_softc_t *sc) /*FOLD00*/ | ||
75 | { | ||
76 | lmc_trace(sc->lmc_device, "lmc_proto_init in"); | ||
77 | switch(sc->if_type){ | ||
78 | case LMC_PPP: | ||
79 | sc->pd = kmalloc(sizeof(struct ppp_device), GFP_KERNEL); | ||
80 | if (!sc->pd) { | ||
81 | printk("lmc_proto_init(): kmalloc failure!\n"); | ||
82 | return; | ||
83 | } | ||
84 | sc->pd->dev = sc->lmc_device; | ||
85 | sc->if_ptr = sc->pd; | ||
86 | break; | ||
87 | case LMC_RAW: | ||
88 | break; | ||
89 | default: | ||
90 | break; | ||
91 | } | ||
92 | lmc_trace(sc->lmc_device, "lmc_proto_init out"); | ||
93 | } | ||
94 | |||
95 | // attach | 50 | // attach |
96 | void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/ | 51 | void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/ |
97 | { | 52 | { |
@@ -100,7 +55,6 @@ void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/ | |||
100 | case LMC_PPP: | 55 | case LMC_PPP: |
101 | { | 56 | { |
102 | struct net_device *dev = sc->lmc_device; | 57 | struct net_device *dev = sc->lmc_device; |
103 | SPPP_attach(sc); | ||
104 | dev->do_ioctl = lmc_ioctl; | 58 | dev->do_ioctl = lmc_ioctl; |
105 | } | 59 | } |
106 | break; | 60 | break; |
@@ -108,7 +62,7 @@ void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/ | |||
108 | { | 62 | { |
109 | struct net_device *dev = sc->lmc_device; | 63 | struct net_device *dev = sc->lmc_device; |
110 | /* | 64 | /* |
111 | * They set a few basics because they don't use sync_ppp | 65 | * They set a few basics because they don't use HDLC |
112 | */ | 66 | */ |
113 | dev->flags |= IFF_POINTOPOINT; | 67 | dev->flags |= IFF_POINTOPOINT; |
114 | 68 | ||
@@ -124,88 +78,39 @@ void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/ | |||
124 | lmc_trace(sc->lmc_device, "lmc_proto_attach out"); | 78 | lmc_trace(sc->lmc_device, "lmc_proto_attach out"); |
125 | } | 79 | } |
126 | 80 | ||
127 | // detach | 81 | int lmc_proto_ioctl(lmc_softc_t *sc, struct ifreq *ifr, int cmd) |
128 | void lmc_proto_detach(lmc_softc_t *sc) /*FOLD00*/ | ||
129 | { | 82 | { |
130 | switch(sc->if_type){ | 83 | lmc_trace(sc->lmc_device, "lmc_proto_ioctl"); |
131 | case LMC_PPP: | 84 | if (sc->if_type == LMC_PPP) |
132 | SPPP_detach(sc); | 85 | return hdlc_ioctl(sc->lmc_device, ifr, cmd); |
133 | break; | 86 | return -EOPNOTSUPP; |
134 | case LMC_RAW: /* Tell someone we're detaching? */ | ||
135 | break; | ||
136 | default: | ||
137 | break; | ||
138 | } | ||
139 | |||
140 | } | 87 | } |
141 | 88 | ||
142 | // reopen | 89 | int lmc_proto_open(lmc_softc_t *sc) |
143 | void lmc_proto_reopen(lmc_softc_t *sc) /*FOLD00*/ | ||
144 | { | 90 | { |
145 | lmc_trace(sc->lmc_device, "lmc_proto_reopen in"); | 91 | int ret = 0; |
146 | switch(sc->if_type){ | ||
147 | case LMC_PPP: | ||
148 | SPPP_reopen(sc); | ||
149 | break; | ||
150 | case LMC_RAW: /* Reset the interface after being down, prerape to receive packets again */ | ||
151 | break; | ||
152 | default: | ||
153 | break; | ||
154 | } | ||
155 | lmc_trace(sc->lmc_device, "lmc_proto_reopen out"); | ||
156 | } | ||
157 | 92 | ||
93 | lmc_trace(sc->lmc_device, "lmc_proto_open in"); | ||
158 | 94 | ||
159 | // ioctl | 95 | if (sc->if_type == LMC_PPP) { |
160 | int lmc_proto_ioctl(lmc_softc_t *sc, struct ifreq *ifr, int cmd) /*FOLD00*/ | 96 | ret = hdlc_open(sc->lmc_device); |
161 | { | 97 | if (ret < 0) |
162 | lmc_trace(sc->lmc_device, "lmc_proto_ioctl out"); | 98 | printk(KERN_WARNING "%s: HDLC open failed: %d\n", |
163 | switch(sc->if_type){ | 99 | sc->name, ret); |
164 | case LMC_PPP: | 100 | } |
165 | return SPPP_do_ioctl (sc, ifr, cmd); | 101 | |
166 | break; | 102 | lmc_trace(sc->lmc_device, "lmc_proto_open out"); |
167 | default: | 103 | return ret; |
168 | return -EOPNOTSUPP; | ||
169 | break; | ||
170 | } | ||
171 | lmc_trace(sc->lmc_device, "lmc_proto_ioctl out"); | ||
172 | } | 104 | } |
173 | 105 | ||
174 | // open | 106 | void lmc_proto_close(lmc_softc_t *sc) |
175 | void lmc_proto_open(lmc_softc_t *sc) /*FOLD00*/ | ||
176 | { | 107 | { |
177 | int ret; | 108 | lmc_trace(sc->lmc_device, "lmc_proto_close in"); |
178 | 109 | ||
179 | lmc_trace(sc->lmc_device, "lmc_proto_open in"); | 110 | if (sc->if_type == LMC_PPP) |
180 | switch(sc->if_type){ | 111 | hdlc_close(sc->lmc_device); |
181 | case LMC_PPP: | ||
182 | ret = SPPP_open(sc); | ||
183 | if(ret < 0) | ||
184 | printk("%s: syncPPP open failed: %d\n", sc->name, ret); | ||
185 | break; | ||
186 | case LMC_RAW: /* We're about to start getting packets! */ | ||
187 | break; | ||
188 | default: | ||
189 | break; | ||
190 | } | ||
191 | lmc_trace(sc->lmc_device, "lmc_proto_open out"); | ||
192 | } | ||
193 | |||
194 | // close | ||
195 | 112 | ||
196 | void lmc_proto_close(lmc_softc_t *sc) /*FOLD00*/ | 113 | lmc_trace(sc->lmc_device, "lmc_proto_close out"); |
197 | { | ||
198 | lmc_trace(sc->lmc_device, "lmc_proto_close in"); | ||
199 | switch(sc->if_type){ | ||
200 | case LMC_PPP: | ||
201 | SPPP_close(sc); | ||
202 | break; | ||
203 | case LMC_RAW: /* Interface going down */ | ||
204 | break; | ||
205 | default: | ||
206 | break; | ||
207 | } | ||
208 | lmc_trace(sc->lmc_device, "lmc_proto_close out"); | ||
209 | } | 114 | } |
210 | 115 | ||
211 | __be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/ | 116 | __be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/ |
@@ -213,8 +118,8 @@ __be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/ | |||
213 | lmc_trace(sc->lmc_device, "lmc_proto_type in"); | 118 | lmc_trace(sc->lmc_device, "lmc_proto_type in"); |
214 | switch(sc->if_type){ | 119 | switch(sc->if_type){ |
215 | case LMC_PPP: | 120 | case LMC_PPP: |
216 | return htons(ETH_P_WAN_PPP); | 121 | return hdlc_type_trans(skb, sc->lmc_device); |
217 | break; | 122 | break; |
218 | case LMC_NET: | 123 | case LMC_NET: |
219 | return htons(ETH_P_802_2); | 124 | return htons(ETH_P_802_2); |
220 | break; | 125 | break; |
@@ -245,4 +150,3 @@ void lmc_proto_netif(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/ | |||
245 | } | 150 | } |
246 | lmc_trace(sc->lmc_device, "lmc_proto_netif out"); | 151 | lmc_trace(sc->lmc_device, "lmc_proto_netif out"); |
247 | } | 152 | } |
248 | |||
diff --git a/drivers/net/wan/lmc/lmc_proto.h b/drivers/net/wan/lmc/lmc_proto.h index ccaa69e8b3c7..662148c54644 100644 --- a/drivers/net/wan/lmc/lmc_proto.h +++ b/drivers/net/wan/lmc/lmc_proto.h | |||
@@ -1,16 +1,18 @@ | |||
1 | #ifndef _LMC_PROTO_H_ | 1 | #ifndef _LMC_PROTO_H_ |
2 | #define _LMC_PROTO_H_ | 2 | #define _LMC_PROTO_H_ |
3 | 3 | ||
4 | void lmc_proto_init(lmc_softc_t *sc); | 4 | #include <linux/hdlc.h> |
5 | |||
5 | void lmc_proto_attach(lmc_softc_t *sc); | 6 | void lmc_proto_attach(lmc_softc_t *sc); |
6 | void lmc_proto_detach(lmc_softc_t *sc); | ||
7 | void lmc_proto_reopen(lmc_softc_t *sc); | ||
8 | int lmc_proto_ioctl(lmc_softc_t *sc, struct ifreq *ifr, int cmd); | 7 | int lmc_proto_ioctl(lmc_softc_t *sc, struct ifreq *ifr, int cmd); |
9 | void lmc_proto_open(lmc_softc_t *sc); | 8 | int lmc_proto_open(lmc_softc_t *sc); |
10 | void lmc_proto_close(lmc_softc_t *sc); | 9 | void lmc_proto_close(lmc_softc_t *sc); |
11 | __be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb); | 10 | __be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb); |
12 | void lmc_proto_netif(lmc_softc_t *sc, struct sk_buff *skb); | 11 | void lmc_proto_netif(lmc_softc_t *sc, struct sk_buff *skb); |
13 | int lmc_skb_rawpackets(char *buf, char **start, off_t offset, int len, int unused); | ||
14 | 12 | ||
15 | #endif | 13 | static inline lmc_softc_t* dev_to_sc(struct net_device *dev) |
14 | { | ||
15 | return (lmc_softc_t *)dev_to_hdlc(dev)->priv; | ||
16 | } | ||
16 | 17 | ||
18 | #endif | ||
diff --git a/drivers/net/wan/lmc/lmc_var.h b/drivers/net/wan/lmc/lmc_var.h index 6d003a39bfad..65d01978e784 100644 --- a/drivers/net/wan/lmc/lmc_var.h +++ b/drivers/net/wan/lmc/lmc_var.h | |||
@@ -1,8 +1,6 @@ | |||
1 | #ifndef _LMC_VAR_H_ | 1 | #ifndef _LMC_VAR_H_ |
2 | #define _LMC_VAR_H_ | 2 | #define _LMC_VAR_H_ |
3 | 3 | ||
4 | /* $Id: lmc_var.h,v 1.17 2000/04/06 12:16:47 asj Exp $ */ | ||
5 | |||
6 | /* | 4 | /* |
7 | * Copyright (c) 1997-2000 LAN Media Corporation (LMC) | 5 | * Copyright (c) 1997-2000 LAN Media Corporation (LMC) |
8 | * All rights reserved. www.lanmedia.com | 6 | * All rights reserved. www.lanmedia.com |
@@ -19,23 +17,6 @@ | |||
19 | 17 | ||
20 | #include <linux/timer.h> | 18 | #include <linux/timer.h> |
21 | 19 | ||
22 | #ifndef __KERNEL__ | ||
23 | typedef signed char s8; | ||
24 | typedef unsigned char u8; | ||
25 | |||
26 | typedef signed short s16; | ||
27 | typedef unsigned short u16; | ||
28 | |||
29 | typedef signed int s32; | ||
30 | typedef unsigned int u32; | ||
31 | |||
32 | typedef signed long long s64; | ||
33 | typedef unsigned long long u64; | ||
34 | |||
35 | #define BITS_PER_LONG 32 | ||
36 | |||
37 | #endif | ||
38 | |||
39 | /* | 20 | /* |
40 | * basic definitions used in lmc include files | 21 | * basic definitions used in lmc include files |
41 | */ | 22 | */ |
@@ -45,9 +26,6 @@ typedef struct lmc___media lmc_media_t; | |||
45 | typedef struct lmc___ctl lmc_ctl_t; | 26 | typedef struct lmc___ctl lmc_ctl_t; |
46 | 27 | ||
47 | #define lmc_csrptr_t unsigned long | 28 | #define lmc_csrptr_t unsigned long |
48 | #define u_int16_t u16 | ||
49 | #define u_int8_t u8 | ||
50 | #define tulip_uint32_t u32 | ||
51 | 29 | ||
52 | #define LMC_REG_RANGE 0x80 | 30 | #define LMC_REG_RANGE 0x80 |
53 | 31 | ||
@@ -122,45 +100,45 @@ struct lmc_regfile_t { | |||
122 | * used to define bits in the second tulip_desc_t field (length) | 100 | * used to define bits in the second tulip_desc_t field (length) |
123 | * for the transmit descriptor -baz */ | 101 | * for the transmit descriptor -baz */ |
124 | 102 | ||
125 | #define LMC_TDES_FIRST_BUFFER_SIZE ((u_int32_t)(0x000007FF)) | 103 | #define LMC_TDES_FIRST_BUFFER_SIZE ((u32)(0x000007FF)) |
126 | #define LMC_TDES_SECOND_BUFFER_SIZE ((u_int32_t)(0x003FF800)) | 104 | #define LMC_TDES_SECOND_BUFFER_SIZE ((u32)(0x003FF800)) |
127 | #define LMC_TDES_HASH_FILTERING ((u_int32_t)(0x00400000)) | 105 | #define LMC_TDES_HASH_FILTERING ((u32)(0x00400000)) |
128 | #define LMC_TDES_DISABLE_PADDING ((u_int32_t)(0x00800000)) | 106 | #define LMC_TDES_DISABLE_PADDING ((u32)(0x00800000)) |
129 | #define LMC_TDES_SECOND_ADDR_CHAINED ((u_int32_t)(0x01000000)) | 107 | #define LMC_TDES_SECOND_ADDR_CHAINED ((u32)(0x01000000)) |
130 | #define LMC_TDES_END_OF_RING ((u_int32_t)(0x02000000)) | 108 | #define LMC_TDES_END_OF_RING ((u32)(0x02000000)) |
131 | #define LMC_TDES_ADD_CRC_DISABLE ((u_int32_t)(0x04000000)) | 109 | #define LMC_TDES_ADD_CRC_DISABLE ((u32)(0x04000000)) |
132 | #define LMC_TDES_SETUP_PACKET ((u_int32_t)(0x08000000)) | 110 | #define LMC_TDES_SETUP_PACKET ((u32)(0x08000000)) |
133 | #define LMC_TDES_INVERSE_FILTERING ((u_int32_t)(0x10000000)) | 111 | #define LMC_TDES_INVERSE_FILTERING ((u32)(0x10000000)) |
134 | #define LMC_TDES_FIRST_SEGMENT ((u_int32_t)(0x20000000)) | 112 | #define LMC_TDES_FIRST_SEGMENT ((u32)(0x20000000)) |
135 | #define LMC_TDES_LAST_SEGMENT ((u_int32_t)(0x40000000)) | 113 | #define LMC_TDES_LAST_SEGMENT ((u32)(0x40000000)) |
136 | #define LMC_TDES_INTERRUPT_ON_COMPLETION ((u_int32_t)(0x80000000)) | 114 | #define LMC_TDES_INTERRUPT_ON_COMPLETION ((u32)(0x80000000)) |
137 | 115 | ||
138 | #define TDES_SECOND_BUFFER_SIZE_BIT_NUMBER 11 | 116 | #define TDES_SECOND_BUFFER_SIZE_BIT_NUMBER 11 |
139 | #define TDES_COLLISION_COUNT_BIT_NUMBER 3 | 117 | #define TDES_COLLISION_COUNT_BIT_NUMBER 3 |
140 | 118 | ||
141 | /* Constants for the RCV descriptor RDES */ | 119 | /* Constants for the RCV descriptor RDES */ |
142 | 120 | ||
143 | #define LMC_RDES_OVERFLOW ((u_int32_t)(0x00000001)) | 121 | #define LMC_RDES_OVERFLOW ((u32)(0x00000001)) |
144 | #define LMC_RDES_CRC_ERROR ((u_int32_t)(0x00000002)) | 122 | #define LMC_RDES_CRC_ERROR ((u32)(0x00000002)) |
145 | #define LMC_RDES_DRIBBLING_BIT ((u_int32_t)(0x00000004)) | 123 | #define LMC_RDES_DRIBBLING_BIT ((u32)(0x00000004)) |
146 | #define LMC_RDES_REPORT_ON_MII_ERR ((u_int32_t)(0x00000008)) | 124 | #define LMC_RDES_REPORT_ON_MII_ERR ((u32)(0x00000008)) |
147 | #define LMC_RDES_RCV_WATCHDOG_TIMEOUT ((u_int32_t)(0x00000010)) | 125 | #define LMC_RDES_RCV_WATCHDOG_TIMEOUT ((u32)(0x00000010)) |
148 | #define LMC_RDES_FRAME_TYPE ((u_int32_t)(0x00000020)) | 126 | #define LMC_RDES_FRAME_TYPE ((u32)(0x00000020)) |
149 | #define LMC_RDES_COLLISION_SEEN ((u_int32_t)(0x00000040)) | 127 | #define LMC_RDES_COLLISION_SEEN ((u32)(0x00000040)) |
150 | #define LMC_RDES_FRAME_TOO_LONG ((u_int32_t)(0x00000080)) | 128 | #define LMC_RDES_FRAME_TOO_LONG ((u32)(0x00000080)) |
151 | #define LMC_RDES_LAST_DESCRIPTOR ((u_int32_t)(0x00000100)) | 129 | #define LMC_RDES_LAST_DESCRIPTOR ((u32)(0x00000100)) |
152 | #define LMC_RDES_FIRST_DESCRIPTOR ((u_int32_t)(0x00000200)) | 130 | #define LMC_RDES_FIRST_DESCRIPTOR ((u32)(0x00000200)) |
153 | #define LMC_RDES_MULTICAST_FRAME ((u_int32_t)(0x00000400)) | 131 | #define LMC_RDES_MULTICAST_FRAME ((u32)(0x00000400)) |
154 | #define LMC_RDES_RUNT_FRAME ((u_int32_t)(0x00000800)) | 132 | #define LMC_RDES_RUNT_FRAME ((u32)(0x00000800)) |
155 | #define LMC_RDES_DATA_TYPE ((u_int32_t)(0x00003000)) | 133 | #define LMC_RDES_DATA_TYPE ((u32)(0x00003000)) |
156 | #define LMC_RDES_LENGTH_ERROR ((u_int32_t)(0x00004000)) | 134 | #define LMC_RDES_LENGTH_ERROR ((u32)(0x00004000)) |
157 | #define LMC_RDES_ERROR_SUMMARY ((u_int32_t)(0x00008000)) | 135 | #define LMC_RDES_ERROR_SUMMARY ((u32)(0x00008000)) |
158 | #define LMC_RDES_FRAME_LENGTH ((u_int32_t)(0x3FFF0000)) | 136 | #define LMC_RDES_FRAME_LENGTH ((u32)(0x3FFF0000)) |
159 | #define LMC_RDES_OWN_BIT ((u_int32_t)(0x80000000)) | 137 | #define LMC_RDES_OWN_BIT ((u32)(0x80000000)) |
160 | 138 | ||
161 | #define RDES_FRAME_LENGTH_BIT_NUMBER 16 | 139 | #define RDES_FRAME_LENGTH_BIT_NUMBER 16 |
162 | 140 | ||
163 | #define LMC_RDES_ERROR_MASK ( (u_int32_t)( \ | 141 | #define LMC_RDES_ERROR_MASK ( (u32)( \ |
164 | LMC_RDES_OVERFLOW \ | 142 | LMC_RDES_OVERFLOW \ |
165 | | LMC_RDES_DRIBBLING_BIT \ | 143 | | LMC_RDES_DRIBBLING_BIT \ |
166 | | LMC_RDES_REPORT_ON_MII_ERR \ | 144 | | LMC_RDES_REPORT_ON_MII_ERR \ |
@@ -172,32 +150,32 @@ struct lmc_regfile_t { | |||
172 | */ | 150 | */ |
173 | 151 | ||
174 | typedef struct { | 152 | typedef struct { |
175 | u_int32_t n; | 153 | u32 n; |
176 | u_int32_t m; | 154 | u32 m; |
177 | u_int32_t v; | 155 | u32 v; |
178 | u_int32_t x; | 156 | u32 x; |
179 | u_int32_t r; | 157 | u32 r; |
180 | u_int32_t f; | 158 | u32 f; |
181 | u_int32_t exact; | 159 | u32 exact; |
182 | } lmc_av9110_t; | 160 | } lmc_av9110_t; |
183 | 161 | ||
184 | /* | 162 | /* |
185 | * Common structure passed to the ioctl code. | 163 | * Common structure passed to the ioctl code. |
186 | */ | 164 | */ |
187 | struct lmc___ctl { | 165 | struct lmc___ctl { |
188 | u_int32_t cardtype; | 166 | u32 cardtype; |
189 | u_int32_t clock_source; /* HSSI, T1 */ | 167 | u32 clock_source; /* HSSI, T1 */ |
190 | u_int32_t clock_rate; /* T1 */ | 168 | u32 clock_rate; /* T1 */ |
191 | u_int32_t crc_length; | 169 | u32 crc_length; |
192 | u_int32_t cable_length; /* DS3 */ | 170 | u32 cable_length; /* DS3 */ |
193 | u_int32_t scrambler_onoff; /* DS3 */ | 171 | u32 scrambler_onoff; /* DS3 */ |
194 | u_int32_t cable_type; /* T1 */ | 172 | u32 cable_type; /* T1 */ |
195 | u_int32_t keepalive_onoff; /* protocol */ | 173 | u32 keepalive_onoff; /* protocol */ |
196 | u_int32_t ticks; /* ticks/sec */ | 174 | u32 ticks; /* ticks/sec */ |
197 | union { | 175 | union { |
198 | lmc_av9110_t ssi; | 176 | lmc_av9110_t ssi; |
199 | } cardspec; | 177 | } cardspec; |
200 | u_int32_t circuit_type; /* T1 or E1 */ | 178 | u32 circuit_type; /* T1 or E1 */ |
201 | }; | 179 | }; |
202 | 180 | ||
203 | 181 | ||
@@ -244,108 +222,69 @@ struct lmc___media { | |||
244 | 222 | ||
245 | #define STATCHECK 0xBEEFCAFE | 223 | #define STATCHECK 0xBEEFCAFE |
246 | 224 | ||
247 | /* Included in this structure are first | 225 | struct lmc_extra_statistics |
248 | * - standard net_device_stats | ||
249 | * - some other counters used for debug and driver performance | ||
250 | * evaluation -baz | ||
251 | */ | ||
252 | struct lmc_statistics | ||
253 | { | 226 | { |
254 | unsigned long rx_packets; /* total packets received */ | 227 | u32 version_size; |
255 | unsigned long tx_packets; /* total packets transmitted */ | 228 | u32 lmc_cardtype; |
256 | unsigned long rx_bytes; | 229 | |
257 | unsigned long tx_bytes; | 230 | u32 tx_ProcTimeout; |
258 | 231 | u32 tx_IntTimeout; | |
259 | unsigned long rx_errors; /* bad packets received */ | 232 | u32 tx_NoCompleteCnt; |
260 | unsigned long tx_errors; /* packet transmit problems */ | 233 | u32 tx_MaxXmtsB4Int; |
261 | unsigned long rx_dropped; /* no space in linux buffers */ | 234 | u32 tx_TimeoutCnt; |
262 | unsigned long tx_dropped; /* no space available in linux */ | 235 | u32 tx_OutOfSyncPtr; |
263 | unsigned long multicast; /* multicast packets received */ | 236 | u32 tx_tbusy0; |
264 | unsigned long collisions; | 237 | u32 tx_tbusy1; |
265 | 238 | u32 tx_tbusy_calls; | |
266 | /* detailed rx_errors: */ | 239 | u32 resetCount; |
267 | unsigned long rx_length_errors; | 240 | u32 lmc_txfull; |
268 | unsigned long rx_over_errors; /* receiver ring buff overflow */ | 241 | u32 tbusy; |
269 | unsigned long rx_crc_errors; /* recved pkt with crc error */ | 242 | u32 dirtyTx; |
270 | unsigned long rx_frame_errors; /* recv'd frame alignment error */ | 243 | u32 lmc_next_tx; |
271 | unsigned long rx_fifo_errors; /* recv'r fifo overrun */ | 244 | u32 otherTypeCnt; |
272 | unsigned long rx_missed_errors; /* receiver missed packet */ | 245 | u32 lastType; |
273 | 246 | u32 lastTypeOK; | |
274 | /* detailed tx_errors */ | 247 | u32 txLoopCnt; |
275 | unsigned long tx_aborted_errors; | 248 | u32 usedXmtDescripCnt; |
276 | unsigned long tx_carrier_errors; | 249 | u32 txIndexCnt; |
277 | unsigned long tx_fifo_errors; | 250 | u32 rxIntLoopCnt; |
278 | unsigned long tx_heartbeat_errors; | 251 | |
279 | unsigned long tx_window_errors; | 252 | u32 rx_SmallPktCnt; |
280 | 253 | u32 rx_BadPktSurgeCnt; | |
281 | /* for cslip etc */ | 254 | u32 rx_BuffAllocErr; |
282 | unsigned long rx_compressed; | 255 | u32 tx_lossOfClockCnt; |
283 | unsigned long tx_compressed; | 256 | |
284 | 257 | /* T1 error counters */ | |
285 | /* ------------------------------------- | 258 | u32 framingBitErrorCount; |
286 | * Custom stats & counters follow -baz */ | 259 | u32 lineCodeViolationCount; |
287 | u_int32_t version_size; | 260 | |
288 | u_int32_t lmc_cardtype; | 261 | u32 lossOfFrameCount; |
289 | 262 | u32 changeOfFrameAlignmentCount; | |
290 | u_int32_t tx_ProcTimeout; | 263 | u32 severelyErroredFrameCount; |
291 | u_int32_t tx_IntTimeout; | 264 | |
292 | u_int32_t tx_NoCompleteCnt; | 265 | u32 check; |
293 | u_int32_t tx_MaxXmtsB4Int; | ||
294 | u_int32_t tx_TimeoutCnt; | ||
295 | u_int32_t tx_OutOfSyncPtr; | ||
296 | u_int32_t tx_tbusy0; | ||
297 | u_int32_t tx_tbusy1; | ||
298 | u_int32_t tx_tbusy_calls; | ||
299 | u_int32_t resetCount; | ||
300 | u_int32_t lmc_txfull; | ||
301 | u_int32_t tbusy; | ||
302 | u_int32_t dirtyTx; | ||
303 | u_int32_t lmc_next_tx; | ||
304 | u_int32_t otherTypeCnt; | ||
305 | u_int32_t lastType; | ||
306 | u_int32_t lastTypeOK; | ||
307 | u_int32_t txLoopCnt; | ||
308 | u_int32_t usedXmtDescripCnt; | ||
309 | u_int32_t txIndexCnt; | ||
310 | u_int32_t rxIntLoopCnt; | ||
311 | |||
312 | u_int32_t rx_SmallPktCnt; | ||
313 | u_int32_t rx_BadPktSurgeCnt; | ||
314 | u_int32_t rx_BuffAllocErr; | ||
315 | u_int32_t tx_lossOfClockCnt; | ||
316 | |||
317 | /* T1 error counters */ | ||
318 | u_int32_t framingBitErrorCount; | ||
319 | u_int32_t lineCodeViolationCount; | ||
320 | |||
321 | u_int32_t lossOfFrameCount; | ||
322 | u_int32_t changeOfFrameAlignmentCount; | ||
323 | u_int32_t severelyErroredFrameCount; | ||
324 | |||
325 | u_int32_t check; | ||
326 | }; | 266 | }; |
327 | 267 | ||
328 | |||
329 | typedef struct lmc_xinfo { | 268 | typedef struct lmc_xinfo { |
330 | u_int32_t Magic0; /* BEEFCAFE */ | 269 | u32 Magic0; /* BEEFCAFE */ |
331 | 270 | ||
332 | u_int32_t PciCardType; | 271 | u32 PciCardType; |
333 | u_int32_t PciSlotNumber; /* PCI slot number */ | 272 | u32 PciSlotNumber; /* PCI slot number */ |
334 | 273 | ||
335 | u_int16_t DriverMajorVersion; | 274 | u16 DriverMajorVersion; |
336 | u_int16_t DriverMinorVersion; | 275 | u16 DriverMinorVersion; |
337 | u_int16_t DriverSubVersion; | 276 | u16 DriverSubVersion; |
338 | 277 | ||
339 | u_int16_t XilinxRevisionNumber; | 278 | u16 XilinxRevisionNumber; |
340 | u_int16_t MaxFrameSize; | 279 | u16 MaxFrameSize; |
341 | 280 | ||
342 | u_int16_t t1_alarm1_status; | 281 | u16 t1_alarm1_status; |
343 | u_int16_t t1_alarm2_status; | 282 | u16 t1_alarm2_status; |
344 | 283 | ||
345 | int link_status; | 284 | int link_status; |
346 | u_int32_t mii_reg16; | 285 | u32 mii_reg16; |
347 | 286 | ||
348 | u_int32_t Magic1; /* DEADBEEF */ | 287 | u32 Magic1; /* DEADBEEF */ |
349 | } LMC_XINFO; | 288 | } LMC_XINFO; |
350 | 289 | ||
351 | 290 | ||
@@ -353,23 +292,22 @@ typedef struct lmc_xinfo { | |||
353 | * forward decl | 292 | * forward decl |
354 | */ | 293 | */ |
355 | struct lmc___softc { | 294 | struct lmc___softc { |
356 | void *if_ptr; /* General purpose pointer (used by SPPP) */ | ||
357 | char *name; | 295 | char *name; |
358 | u8 board_idx; | 296 | u8 board_idx; |
359 | struct lmc_statistics stats; | 297 | struct lmc_extra_statistics extra_stats; |
360 | struct net_device *lmc_device; | 298 | struct net_device *lmc_device; |
361 | 299 | ||
362 | int hang, rxdesc, bad_packet, some_counter; | 300 | int hang, rxdesc, bad_packet, some_counter; |
363 | u_int32_t txgo; | 301 | u32 txgo; |
364 | struct lmc_regfile_t lmc_csrs; | 302 | struct lmc_regfile_t lmc_csrs; |
365 | volatile u_int32_t lmc_txtick; | 303 | volatile u32 lmc_txtick; |
366 | volatile u_int32_t lmc_rxtick; | 304 | volatile u32 lmc_rxtick; |
367 | u_int32_t lmc_flags; | 305 | u32 lmc_flags; |
368 | u_int32_t lmc_intrmask; /* our copy of csr_intr */ | 306 | u32 lmc_intrmask; /* our copy of csr_intr */ |
369 | u_int32_t lmc_cmdmode; /* our copy of csr_cmdmode */ | 307 | u32 lmc_cmdmode; /* our copy of csr_cmdmode */ |
370 | u_int32_t lmc_busmode; /* our copy of csr_busmode */ | 308 | u32 lmc_busmode; /* our copy of csr_busmode */ |
371 | u_int32_t lmc_gpio_io; /* state of in/out settings */ | 309 | u32 lmc_gpio_io; /* state of in/out settings */ |
372 | u_int32_t lmc_gpio; /* state of outputs */ | 310 | u32 lmc_gpio; /* state of outputs */ |
373 | struct sk_buff* lmc_txq[LMC_TXDESCS]; | 311 | struct sk_buff* lmc_txq[LMC_TXDESCS]; |
374 | struct sk_buff* lmc_rxq[LMC_RXDESCS]; | 312 | struct sk_buff* lmc_rxq[LMC_RXDESCS]; |
375 | volatile | 313 | volatile |
@@ -381,42 +319,41 @@ struct lmc___softc { | |||
381 | unsigned int lmc_taint_tx, lmc_taint_rx; | 319 | unsigned int lmc_taint_tx, lmc_taint_rx; |
382 | int lmc_tx_start, lmc_txfull; | 320 | int lmc_tx_start, lmc_txfull; |
383 | int lmc_txbusy; | 321 | int lmc_txbusy; |
384 | u_int16_t lmc_miireg16; | 322 | u16 lmc_miireg16; |
385 | int lmc_ok; | 323 | int lmc_ok; |
386 | int last_link_status; | 324 | int last_link_status; |
387 | int lmc_cardtype; | 325 | int lmc_cardtype; |
388 | u_int32_t last_frameerr; | 326 | u32 last_frameerr; |
389 | lmc_media_t *lmc_media; | 327 | lmc_media_t *lmc_media; |
390 | struct timer_list timer; | 328 | struct timer_list timer; |
391 | lmc_ctl_t ictl; | 329 | lmc_ctl_t ictl; |
392 | u_int32_t TxDescriptControlInit; | 330 | u32 TxDescriptControlInit; |
393 | 331 | ||
394 | int tx_TimeoutInd; /* additional driver state */ | 332 | int tx_TimeoutInd; /* additional driver state */ |
395 | int tx_TimeoutDisplay; | 333 | int tx_TimeoutDisplay; |
396 | unsigned int lastlmc_taint_tx; | 334 | unsigned int lastlmc_taint_tx; |
397 | int lasttx_packets; | 335 | int lasttx_packets; |
398 | u_int32_t tx_clockState; | 336 | u32 tx_clockState; |
399 | u_int32_t lmc_crcSize; | 337 | u32 lmc_crcSize; |
400 | LMC_XINFO lmc_xinfo; | 338 | LMC_XINFO lmc_xinfo; |
401 | char lmc_yel, lmc_blue, lmc_red; /* for T1 and DS3 */ | 339 | char lmc_yel, lmc_blue, lmc_red; /* for T1 and DS3 */ |
402 | char lmc_timing; /* for HSSI and SSI */ | 340 | char lmc_timing; /* for HSSI and SSI */ |
403 | int got_irq; | 341 | int got_irq; |
404 | 342 | ||
405 | char last_led_err[4]; | 343 | char last_led_err[4]; |
406 | 344 | ||
407 | u32 last_int; | 345 | u32 last_int; |
408 | u32 num_int; | 346 | u32 num_int; |
409 | 347 | ||
410 | spinlock_t lmc_lock; | 348 | spinlock_t lmc_lock; |
411 | u_int16_t if_type; /* PPP or NET */ | 349 | u16 if_type; /* HDLC/PPP or NET */ |
412 | struct ppp_device *pd; | ||
413 | 350 | ||
414 | /* Failure cases */ | 351 | /* Failure cases */ |
415 | u8 failed_ring; | 352 | u8 failed_ring; |
416 | u8 failed_recv_alloc; | 353 | u8 failed_recv_alloc; |
417 | 354 | ||
418 | /* Structure check */ | 355 | /* Structure check */ |
419 | u32 check; | 356 | u32 check; |
420 | }; | 357 | }; |
421 | 358 | ||
422 | #define LMC_PCI_TIME 1 | 359 | #define LMC_PCI_TIME 1 |
@@ -512,8 +449,8 @@ struct lmc___softc { | |||
512 | | TULIP_STS_TXUNDERFLOW\ | 449 | | TULIP_STS_TXUNDERFLOW\ |
513 | | TULIP_STS_RXSTOPPED ) | 450 | | TULIP_STS_RXSTOPPED ) |
514 | 451 | ||
515 | #define DESC_OWNED_BY_SYSTEM ((u_int32_t)(0x00000000)) | 452 | #define DESC_OWNED_BY_SYSTEM ((u32)(0x00000000)) |
516 | #define DESC_OWNED_BY_DC21X4 ((u_int32_t)(0x80000000)) | 453 | #define DESC_OWNED_BY_DC21X4 ((u32)(0x80000000)) |
517 | 454 | ||
518 | #ifndef TULIP_CMD_RECEIVEALL | 455 | #ifndef TULIP_CMD_RECEIVEALL |
519 | #define TULIP_CMD_RECEIVEALL 0x40000000L | 456 | #define TULIP_CMD_RECEIVEALL 0x40000000L |
@@ -525,46 +462,9 @@ struct lmc___softc { | |||
525 | #define LMC_ADAP_SSI 4 | 462 | #define LMC_ADAP_SSI 4 |
526 | #define LMC_ADAP_T1 5 | 463 | #define LMC_ADAP_T1 5 |
527 | 464 | ||
528 | #define HDLC_HDR_LEN 4 | ||
529 | #define HDLC_ADDR_LEN 1 | ||
530 | #define HDLC_SLARP 0x8035 | ||
531 | #define LMC_MTU 1500 | 465 | #define LMC_MTU 1500 |
532 | #define SLARP_LINECHECK 2 | ||
533 | 466 | ||
534 | #define LMC_CRC_LEN_16 2 /* 16-bit CRC */ | 467 | #define LMC_CRC_LEN_16 2 /* 16-bit CRC */ |
535 | #define LMC_CRC_LEN_32 4 | 468 | #define LMC_CRC_LEN_32 4 |
536 | 469 | ||
537 | #ifdef LMC_HDLC | ||
538 | /* definition of an hdlc header. */ | ||
539 | struct hdlc_hdr | ||
540 | { | ||
541 | u8 address; | ||
542 | u8 control; | ||
543 | u16 type; | ||
544 | }; | ||
545 | |||
546 | /* definition of a slarp header. */ | ||
547 | struct slarp | ||
548 | { | ||
549 | long code; | ||
550 | union sl | ||
551 | { | ||
552 | struct | ||
553 | { | ||
554 | ulong address; | ||
555 | ulong mask; | ||
556 | ushort unused; | ||
557 | } add; | ||
558 | struct | ||
559 | { | ||
560 | ulong mysequence; | ||
561 | ulong yoursequence; | ||
562 | ushort reliability; | ||
563 | ulong time; | ||
564 | } chk; | ||
565 | } t; | ||
566 | }; | ||
567 | #endif /* LMC_HDLC */ | ||
568 | |||
569 | |||
570 | #endif /* _LMC_VAR_H_ */ | 470 | #endif /* _LMC_VAR_H_ */ |
diff --git a/drivers/net/wan/pc300.h b/drivers/net/wan/pc300.h index 63e9fcf31fb8..2e4f84f6cad4 100644 --- a/drivers/net/wan/pc300.h +++ b/drivers/net/wan/pc300.h | |||
@@ -100,31 +100,14 @@ | |||
100 | #define _PC300_H | 100 | #define _PC300_H |
101 | 101 | ||
102 | #include <linux/hdlc.h> | 102 | #include <linux/hdlc.h> |
103 | #include <net/syncppp.h> | ||
104 | #include "hd64572.h" | 103 | #include "hd64572.h" |
105 | #include "pc300-falc-lh.h" | 104 | #include "pc300-falc-lh.h" |
106 | 105 | ||
107 | #ifndef CY_TYPES | 106 | #define PC300_PROTO_MLPPP 1 |
108 | #define CY_TYPES | ||
109 | typedef __u64 ucdouble; /* 64 bits, unsigned */ | ||
110 | typedef __u32 uclong; /* 32 bits, unsigned */ | ||
111 | typedef __u16 ucshort; /* 16 bits, unsigned */ | ||
112 | typedef __u8 ucchar; /* 8 bits, unsigned */ | ||
113 | #endif /* CY_TYPES */ | ||
114 | 107 | ||
115 | #define PC300_PROTO_MLPPP 1 | ||
116 | |||
117 | #define PC300_KERNEL "2.4.x" /* Kernel supported by this driver */ | ||
118 | |||
119 | #define PC300_DEVNAME "hdlc" /* Dev. name base (for hdlc0, hdlc1, etc.) */ | ||
120 | #define PC300_MAXINDEX 100 /* Max dev. name index (the '0' in hdlc0) */ | ||
121 | |||
122 | #define PC300_MAXCARDS 4 /* Max number of cards per system */ | ||
123 | #define PC300_MAXCHAN 2 /* Number of channels per card */ | 108 | #define PC300_MAXCHAN 2 /* Number of channels per card */ |
124 | 109 | ||
125 | #define PC300_PLX_WIN 0x80 /* PLX control window size (128b) */ | ||
126 | #define PC300_RAMSIZE 0x40000 /* RAM window size (256Kb) */ | 110 | #define PC300_RAMSIZE 0x40000 /* RAM window size (256Kb) */ |
127 | #define PC300_SCASIZE 0x400 /* SCA window size (1Kb) */ | ||
128 | #define PC300_FALCSIZE 0x400 /* FALC window size (1Kb) */ | 111 | #define PC300_FALCSIZE 0x400 /* FALC window size (1Kb) */ |
129 | 112 | ||
130 | #define PC300_OSC_CLOCK 24576000 | 113 | #define PC300_OSC_CLOCK 24576000 |
@@ -160,26 +143,14 @@ typedef __u8 ucchar; /* 8 bits, unsigned */ | |||
160 | * Memory access functions/macros * | 143 | * Memory access functions/macros * |
161 | * (required to support Alpha systems) * | 144 | * (required to support Alpha systems) * |
162 | ***************************************/ | 145 | ***************************************/ |
163 | #ifdef __KERNEL__ | 146 | #define cpc_writeb(port,val) {writeb((u8)(val),(port)); mb();} |
164 | #define cpc_writeb(port,val) {writeb((ucchar)(val),(port)); mb();} | ||
165 | #define cpc_writew(port,val) {writew((ushort)(val),(port)); mb();} | 147 | #define cpc_writew(port,val) {writew((ushort)(val),(port)); mb();} |
166 | #define cpc_writel(port,val) {writel((uclong)(val),(port)); mb();} | 148 | #define cpc_writel(port,val) {writel((u32)(val),(port)); mb();} |
167 | 149 | ||
168 | #define cpc_readb(port) readb(port) | 150 | #define cpc_readb(port) readb(port) |
169 | #define cpc_readw(port) readw(port) | 151 | #define cpc_readw(port) readw(port) |
170 | #define cpc_readl(port) readl(port) | 152 | #define cpc_readl(port) readl(port) |
171 | 153 | ||
172 | #else /* __KERNEL__ */ | ||
173 | #define cpc_writeb(port,val) (*(volatile ucchar *)(port) = (ucchar)(val)) | ||
174 | #define cpc_writew(port,val) (*(volatile ucshort *)(port) = (ucshort)(val)) | ||
175 | #define cpc_writel(port,val) (*(volatile uclong *)(port) = (uclong)(val)) | ||
176 | |||
177 | #define cpc_readb(port) (*(volatile ucchar *)(port)) | ||
178 | #define cpc_readw(port) (*(volatile ucshort *)(port)) | ||
179 | #define cpc_readl(port) (*(volatile uclong *)(port)) | ||
180 | |||
181 | #endif /* __KERNEL__ */ | ||
182 | |||
183 | /****** Data Structures *****************************************************/ | 154 | /****** Data Structures *****************************************************/ |
184 | 155 | ||
185 | /* | 156 | /* |
@@ -188,15 +159,15 @@ typedef __u8 ucchar; /* 8 bits, unsigned */ | |||
188 | * (memory mapped). | 159 | * (memory mapped). |
189 | */ | 160 | */ |
190 | struct RUNTIME_9050 { | 161 | struct RUNTIME_9050 { |
191 | uclong loc_addr_range[4]; /* 00-0Ch : Local Address Ranges */ | 162 | u32 loc_addr_range[4]; /* 00-0Ch : Local Address Ranges */ |
192 | uclong loc_rom_range; /* 10h : Local ROM Range */ | 163 | u32 loc_rom_range; /* 10h : Local ROM Range */ |
193 | uclong loc_addr_base[4]; /* 14-20h : Local Address Base Addrs */ | 164 | u32 loc_addr_base[4]; /* 14-20h : Local Address Base Addrs */ |
194 | uclong loc_rom_base; /* 24h : Local ROM Base */ | 165 | u32 loc_rom_base; /* 24h : Local ROM Base */ |
195 | uclong loc_bus_descr[4]; /* 28-34h : Local Bus Descriptors */ | 166 | u32 loc_bus_descr[4]; /* 28-34h : Local Bus Descriptors */ |
196 | uclong rom_bus_descr; /* 38h : ROM Bus Descriptor */ | 167 | u32 rom_bus_descr; /* 38h : ROM Bus Descriptor */ |
197 | uclong cs_base[4]; /* 3C-48h : Chip Select Base Addrs */ | 168 | u32 cs_base[4]; /* 3C-48h : Chip Select Base Addrs */ |
198 | uclong intr_ctrl_stat; /* 4Ch : Interrupt Control/Status */ | 169 | u32 intr_ctrl_stat; /* 4Ch : Interrupt Control/Status */ |
199 | uclong init_ctrl; /* 50h : EEPROM ctrl, Init Ctrl, etc */ | 170 | u32 init_ctrl; /* 50h : EEPROM ctrl, Init Ctrl, etc */ |
200 | }; | 171 | }; |
201 | 172 | ||
202 | #define PLX_9050_LINT1_ENABLE 0x01 | 173 | #define PLX_9050_LINT1_ENABLE 0x01 |
@@ -240,66 +211,66 @@ struct RUNTIME_9050 { | |||
240 | #define PC300_FALC_MAXLOOP 0x0000ffff /* for falc_issue_cmd() */ | 211 | #define PC300_FALC_MAXLOOP 0x0000ffff /* for falc_issue_cmd() */ |
241 | 212 | ||
242 | typedef struct falc { | 213 | typedef struct falc { |
243 | ucchar sync; /* If true FALC is synchronized */ | 214 | u8 sync; /* If true FALC is synchronized */ |
244 | ucchar active; /* if TRUE then already active */ | 215 | u8 active; /* if TRUE then already active */ |
245 | ucchar loop_active; /* if TRUE a line loopback UP was received */ | 216 | u8 loop_active; /* if TRUE a line loopback UP was received */ |
246 | ucchar loop_gen; /* if TRUE a line loopback UP was issued */ | 217 | u8 loop_gen; /* if TRUE a line loopback UP was issued */ |
247 | 218 | ||
248 | ucchar num_channels; | 219 | u8 num_channels; |
249 | ucchar offset; /* 1 for T1, 0 for E1 */ | 220 | u8 offset; /* 1 for T1, 0 for E1 */ |
250 | ucchar full_bandwidth; | 221 | u8 full_bandwidth; |
251 | 222 | ||
252 | ucchar xmb_cause; | 223 | u8 xmb_cause; |
253 | ucchar multiframe_mode; | 224 | u8 multiframe_mode; |
254 | 225 | ||
255 | /* Statistics */ | 226 | /* Statistics */ |
256 | ucshort pden; /* Pulse Density violation count */ | 227 | u16 pden; /* Pulse Density violation count */ |
257 | ucshort los; /* Loss of Signal count */ | 228 | u16 los; /* Loss of Signal count */ |
258 | ucshort losr; /* Loss of Signal recovery count */ | 229 | u16 losr; /* Loss of Signal recovery count */ |
259 | ucshort lfa; /* Loss of frame alignment count */ | 230 | u16 lfa; /* Loss of frame alignment count */ |
260 | ucshort farec; /* Frame Alignment Recovery count */ | 231 | u16 farec; /* Frame Alignment Recovery count */ |
261 | ucshort lmfa; /* Loss of multiframe alignment count */ | 232 | u16 lmfa; /* Loss of multiframe alignment count */ |
262 | ucshort ais; /* Remote Alarm indication Signal count */ | 233 | u16 ais; /* Remote Alarm indication Signal count */ |
263 | ucshort sec; /* One-second timer */ | 234 | u16 sec; /* One-second timer */ |
264 | ucshort es; /* Errored second */ | 235 | u16 es; /* Errored second */ |
265 | ucshort rai; /* remote alarm received */ | 236 | u16 rai; /* remote alarm received */ |
266 | ucshort bec; | 237 | u16 bec; |
267 | ucshort fec; | 238 | u16 fec; |
268 | ucshort cvc; | 239 | u16 cvc; |
269 | ucshort cec; | 240 | u16 cec; |
270 | ucshort ebc; | 241 | u16 ebc; |
271 | 242 | ||
272 | /* Status */ | 243 | /* Status */ |
273 | ucchar red_alarm; | 244 | u8 red_alarm; |
274 | ucchar blue_alarm; | 245 | u8 blue_alarm; |
275 | ucchar loss_fa; | 246 | u8 loss_fa; |
276 | ucchar yellow_alarm; | 247 | u8 yellow_alarm; |
277 | ucchar loss_mfa; | 248 | u8 loss_mfa; |
278 | ucchar prbs; | 249 | u8 prbs; |
279 | } falc_t; | 250 | } falc_t; |
280 | 251 | ||
281 | typedef struct falc_status { | 252 | typedef struct falc_status { |
282 | ucchar sync; /* If true FALC is synchronized */ | 253 | u8 sync; /* If true FALC is synchronized */ |
283 | ucchar red_alarm; | 254 | u8 red_alarm; |
284 | ucchar blue_alarm; | 255 | u8 blue_alarm; |
285 | ucchar loss_fa; | 256 | u8 loss_fa; |
286 | ucchar yellow_alarm; | 257 | u8 yellow_alarm; |
287 | ucchar loss_mfa; | 258 | u8 loss_mfa; |
288 | ucchar prbs; | 259 | u8 prbs; |
289 | } falc_status_t; | 260 | } falc_status_t; |
290 | 261 | ||
291 | typedef struct rsv_x21_status { | 262 | typedef struct rsv_x21_status { |
292 | ucchar dcd; | 263 | u8 dcd; |
293 | ucchar dsr; | 264 | u8 dsr; |
294 | ucchar cts; | 265 | u8 cts; |
295 | ucchar rts; | 266 | u8 rts; |
296 | ucchar dtr; | 267 | u8 dtr; |
297 | } rsv_x21_status_t; | 268 | } rsv_x21_status_t; |
298 | 269 | ||
299 | typedef struct pc300stats { | 270 | typedef struct pc300stats { |
300 | int hw_type; | 271 | int hw_type; |
301 | uclong line_on; | 272 | u32 line_on; |
302 | uclong line_off; | 273 | u32 line_off; |
303 | struct net_device_stats gen_stats; | 274 | struct net_device_stats gen_stats; |
304 | falc_t te_stats; | 275 | falc_t te_stats; |
305 | } pc300stats_t; | 276 | } pc300stats_t; |
@@ -317,28 +288,19 @@ typedef struct pc300loopback { | |||
317 | 288 | ||
318 | typedef struct pc300patterntst { | 289 | typedef struct pc300patterntst { |
319 | char patrntst_on; /* 0 - off; 1 - on; 2 - read num_errors */ | 290 | char patrntst_on; /* 0 - off; 1 - on; 2 - read num_errors */ |
320 | ucshort num_errors; | 291 | u16 num_errors; |
321 | } pc300patterntst_t; | 292 | } pc300patterntst_t; |
322 | 293 | ||
323 | typedef struct pc300dev { | 294 | typedef struct pc300dev { |
324 | void *if_ptr; /* General purpose pointer */ | ||
325 | struct pc300ch *chan; | 295 | struct pc300ch *chan; |
326 | ucchar trace_on; | 296 | u8 trace_on; |
327 | uclong line_on; /* DCD(X.21, RSV) / sync(TE) change counters */ | 297 | u32 line_on; /* DCD(X.21, RSV) / sync(TE) change counters */ |
328 | uclong line_off; | 298 | u32 line_off; |
329 | #ifdef __KERNEL__ | ||
330 | char name[16]; | 299 | char name[16]; |
331 | struct net_device *dev; | 300 | struct net_device *dev; |
332 | |||
333 | void *private; | ||
334 | struct sk_buff *tx_skb; | ||
335 | union { /* This union has all the protocol-specific structures */ | ||
336 | struct ppp_device pppdev; | ||
337 | }ifu; | ||
338 | #ifdef CONFIG_PC300_MLPPP | 301 | #ifdef CONFIG_PC300_MLPPP |
339 | void *cpc_tty; /* information to PC300 TTY driver */ | 302 | void *cpc_tty; /* information to PC300 TTY driver */ |
340 | #endif | 303 | #endif |
341 | #endif /* __KERNEL__ */ | ||
342 | }pc300dev_t; | 304 | }pc300dev_t; |
343 | 305 | ||
344 | typedef struct pc300hw { | 306 | typedef struct pc300hw { |
@@ -346,43 +308,42 @@ typedef struct pc300hw { | |||
346 | int bus; /* Bus (PCI, PMC, etc.) */ | 308 | int bus; /* Bus (PCI, PMC, etc.) */ |
347 | int nchan; /* number of channels */ | 309 | int nchan; /* number of channels */ |
348 | int irq; /* interrupt request level */ | 310 | int irq; /* interrupt request level */ |
349 | uclong clock; /* Board clock */ | 311 | u32 clock; /* Board clock */ |
350 | ucchar cpld_id; /* CPLD ID (TE only) */ | 312 | u8 cpld_id; /* CPLD ID (TE only) */ |
351 | ucshort cpld_reg1; /* CPLD reg 1 (TE only) */ | 313 | u16 cpld_reg1; /* CPLD reg 1 (TE only) */ |
352 | ucshort cpld_reg2; /* CPLD reg 2 (TE only) */ | 314 | u16 cpld_reg2; /* CPLD reg 2 (TE only) */ |
353 | ucshort gpioc_reg; /* PLX GPIOC reg */ | 315 | u16 gpioc_reg; /* PLX GPIOC reg */ |
354 | ucshort intctl_reg; /* PLX Int Ctrl/Status reg */ | 316 | u16 intctl_reg; /* PLX Int Ctrl/Status reg */ |
355 | uclong iophys; /* PLX registers I/O base */ | 317 | u32 iophys; /* PLX registers I/O base */ |
356 | uclong iosize; /* PLX registers I/O size */ | 318 | u32 iosize; /* PLX registers I/O size */ |
357 | uclong plxphys; /* PLX registers MMIO base (physical) */ | 319 | u32 plxphys; /* PLX registers MMIO base (physical) */ |
358 | void __iomem * plxbase; /* PLX registers MMIO base (virtual) */ | 320 | void __iomem * plxbase; /* PLX registers MMIO base (virtual) */ |
359 | uclong plxsize; /* PLX registers MMIO size */ | 321 | u32 plxsize; /* PLX registers MMIO size */ |
360 | uclong scaphys; /* SCA registers MMIO base (physical) */ | 322 | u32 scaphys; /* SCA registers MMIO base (physical) */ |
361 | void __iomem * scabase; /* SCA registers MMIO base (virtual) */ | 323 | void __iomem * scabase; /* SCA registers MMIO base (virtual) */ |
362 | uclong scasize; /* SCA registers MMIO size */ | 324 | u32 scasize; /* SCA registers MMIO size */ |
363 | uclong ramphys; /* On-board RAM MMIO base (physical) */ | 325 | u32 ramphys; /* On-board RAM MMIO base (physical) */ |
364 | void __iomem * rambase; /* On-board RAM MMIO base (virtual) */ | 326 | void __iomem * rambase; /* On-board RAM MMIO base (virtual) */ |
365 | uclong alloc_ramsize; /* RAM MMIO size allocated by the PCI bridge */ | 327 | u32 alloc_ramsize; /* RAM MMIO size allocated by the PCI bridge */ |
366 | uclong ramsize; /* On-board RAM MMIO size */ | 328 | u32 ramsize; /* On-board RAM MMIO size */ |
367 | uclong falcphys; /* FALC registers MMIO base (physical) */ | 329 | u32 falcphys; /* FALC registers MMIO base (physical) */ |
368 | void __iomem * falcbase;/* FALC registers MMIO base (virtual) */ | 330 | void __iomem * falcbase;/* FALC registers MMIO base (virtual) */ |
369 | uclong falcsize; /* FALC registers MMIO size */ | 331 | u32 falcsize; /* FALC registers MMIO size */ |
370 | } pc300hw_t; | 332 | } pc300hw_t; |
371 | 333 | ||
372 | typedef struct pc300chconf { | 334 | typedef struct pc300chconf { |
373 | sync_serial_settings phys_settings; /* Clock type/rate (in bps), | 335 | sync_serial_settings phys_settings; /* Clock type/rate (in bps), |
374 | loopback mode */ | 336 | loopback mode */ |
375 | raw_hdlc_proto proto_settings; /* Encoding, parity (CRC) */ | 337 | raw_hdlc_proto proto_settings; /* Encoding, parity (CRC) */ |
376 | uclong media; /* HW media (RS232, V.35, etc.) */ | 338 | u32 media; /* HW media (RS232, V.35, etc.) */ |
377 | uclong proto; /* Protocol (PPP, X.25, etc.) */ | 339 | u32 proto; /* Protocol (PPP, X.25, etc.) */ |
378 | ucchar monitor; /* Monitor mode (0 = off, !0 = on) */ | ||
379 | 340 | ||
380 | /* TE-specific parameters */ | 341 | /* TE-specific parameters */ |
381 | ucchar lcode; /* Line Code (AMI, B8ZS, etc.) */ | 342 | u8 lcode; /* Line Code (AMI, B8ZS, etc.) */ |
382 | ucchar fr_mode; /* Frame Mode (ESF, D4, etc.) */ | 343 | u8 fr_mode; /* Frame Mode (ESF, D4, etc.) */ |
383 | ucchar lbo; /* Line Build Out */ | 344 | u8 lbo; /* Line Build Out */ |
384 | ucchar rx_sens; /* Rx Sensitivity (long- or short-haul) */ | 345 | u8 rx_sens; /* Rx Sensitivity (long- or short-haul) */ |
385 | uclong tslot_bitmap; /* bit[i]=1 => timeslot _i_ is active */ | 346 | u32 tslot_bitmap; /* bit[i]=1 => timeslot _i_ is active */ |
386 | } pc300chconf_t; | 347 | } pc300chconf_t; |
387 | 348 | ||
388 | typedef struct pc300ch { | 349 | typedef struct pc300ch { |
@@ -390,20 +351,18 @@ typedef struct pc300ch { | |||
390 | int channel; | 351 | int channel; |
391 | pc300dev_t d; | 352 | pc300dev_t d; |
392 | pc300chconf_t conf; | 353 | pc300chconf_t conf; |
393 | ucchar tx_first_bd; /* First TX DMA block descr. w/ data */ | 354 | u8 tx_first_bd; /* First TX DMA block descr. w/ data */ |
394 | ucchar tx_next_bd; /* Next free TX DMA block descriptor */ | 355 | u8 tx_next_bd; /* Next free TX DMA block descriptor */ |
395 | ucchar rx_first_bd; /* First free RX DMA block descriptor */ | 356 | u8 rx_first_bd; /* First free RX DMA block descriptor */ |
396 | ucchar rx_last_bd; /* Last free RX DMA block descriptor */ | 357 | u8 rx_last_bd; /* Last free RX DMA block descriptor */ |
397 | ucchar nfree_tx_bd; /* Number of free TX DMA block descriptors */ | 358 | u8 nfree_tx_bd; /* Number of free TX DMA block descriptors */ |
398 | falc_t falc; /* FALC structure (TE only) */ | 359 | falc_t falc; /* FALC structure (TE only) */ |
399 | } pc300ch_t; | 360 | } pc300ch_t; |
400 | 361 | ||
401 | typedef struct pc300 { | 362 | typedef struct pc300 { |
402 | pc300hw_t hw; /* hardware config. */ | 363 | pc300hw_t hw; /* hardware config. */ |
403 | pc300ch_t chan[PC300_MAXCHAN]; | 364 | pc300ch_t chan[PC300_MAXCHAN]; |
404 | #ifdef __KERNEL__ | ||
405 | spinlock_t card_lock; | 365 | spinlock_t card_lock; |
406 | #endif /* __KERNEL__ */ | ||
407 | } pc300_t; | 366 | } pc300_t; |
408 | 367 | ||
409 | typedef struct pc300conf { | 368 | typedef struct pc300conf { |
@@ -471,12 +430,7 @@ enum pc300_loopback_cmds { | |||
471 | #define PC300_TX_QUEUE_LEN 100 | 430 | #define PC300_TX_QUEUE_LEN 100 |
472 | #define PC300_DEF_MTU 1600 | 431 | #define PC300_DEF_MTU 1600 |
473 | 432 | ||
474 | #ifdef __KERNEL__ | ||
475 | /* Function Prototypes */ | 433 | /* Function Prototypes */ |
476 | void tx_dma_start(pc300_t *, int); | ||
477 | int cpc_open(struct net_device *dev); | 434 | int cpc_open(struct net_device *dev); |
478 | int cpc_set_media(hdlc_device *, int); | ||
479 | #endif /* __KERNEL__ */ | ||
480 | 435 | ||
481 | #endif /* _PC300_H */ | 436 | #endif /* _PC300_H */ |
482 | |||
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c index 334170527755..d0a8d1e352ac 100644 --- a/drivers/net/wan/pc300_drv.c +++ b/drivers/net/wan/pc300_drv.c | |||
@@ -227,8 +227,6 @@ static char rcsid[] = | |||
227 | #include <linux/netdevice.h> | 227 | #include <linux/netdevice.h> |
228 | #include <linux/spinlock.h> | 228 | #include <linux/spinlock.h> |
229 | #include <linux/if.h> | 229 | #include <linux/if.h> |
230 | |||
231 | #include <net/syncppp.h> | ||
232 | #include <net/arp.h> | 230 | #include <net/arp.h> |
233 | 231 | ||
234 | #include <asm/io.h> | 232 | #include <asm/io.h> |
@@ -285,8 +283,8 @@ static void rx_dma_buf_init(pc300_t *, int); | |||
285 | static void tx_dma_buf_check(pc300_t *, int); | 283 | static void tx_dma_buf_check(pc300_t *, int); |
286 | static void rx_dma_buf_check(pc300_t *, int); | 284 | static void rx_dma_buf_check(pc300_t *, int); |
287 | static irqreturn_t cpc_intr(int, void *); | 285 | static irqreturn_t cpc_intr(int, void *); |
288 | static int clock_rate_calc(uclong, uclong, int *); | 286 | static int clock_rate_calc(u32, u32, int *); |
289 | static uclong detect_ram(pc300_t *); | 287 | static u32 detect_ram(pc300_t *); |
290 | static void plx_init(pc300_t *); | 288 | static void plx_init(pc300_t *); |
291 | static void cpc_trace(struct net_device *, struct sk_buff *, char); | 289 | static void cpc_trace(struct net_device *, struct sk_buff *, char); |
292 | static int cpc_attach(struct net_device *, unsigned short, unsigned short); | 290 | static int cpc_attach(struct net_device *, unsigned short, unsigned short); |
@@ -311,10 +309,10 @@ static void tx_dma_buf_pt_init(pc300_t * card, int ch) | |||
311 | + DMA_TX_BD_BASE + ch_factor * sizeof(pcsca_bd_t)); | 309 | + DMA_TX_BD_BASE + ch_factor * sizeof(pcsca_bd_t)); |
312 | 310 | ||
313 | for (i = 0; i < N_DMA_TX_BUF; i++, ptdescr++) { | 311 | for (i = 0; i < N_DMA_TX_BUF; i++, ptdescr++) { |
314 | cpc_writel(&ptdescr->next, (uclong) (DMA_TX_BD_BASE + | 312 | cpc_writel(&ptdescr->next, (u32)(DMA_TX_BD_BASE + |
315 | (ch_factor + ((i + 1) & (N_DMA_TX_BUF - 1))) * sizeof(pcsca_bd_t))); | 313 | (ch_factor + ((i + 1) & (N_DMA_TX_BUF - 1))) * sizeof(pcsca_bd_t))); |
316 | cpc_writel(&ptdescr->ptbuf, | 314 | cpc_writel(&ptdescr->ptbuf, |
317 | (uclong) (DMA_TX_BASE + (ch_factor + i) * BD_DEF_LEN)); | 315 | (u32)(DMA_TX_BASE + (ch_factor + i) * BD_DEF_LEN)); |
318 | } | 316 | } |
319 | } | 317 | } |
320 | 318 | ||
@@ -341,10 +339,10 @@ static void rx_dma_buf_pt_init(pc300_t * card, int ch) | |||
341 | + DMA_RX_BD_BASE + ch_factor * sizeof(pcsca_bd_t)); | 339 | + DMA_RX_BD_BASE + ch_factor * sizeof(pcsca_bd_t)); |
342 | 340 | ||
343 | for (i = 0; i < N_DMA_RX_BUF; i++, ptdescr++) { | 341 | for (i = 0; i < N_DMA_RX_BUF; i++, ptdescr++) { |
344 | cpc_writel(&ptdescr->next, (uclong) (DMA_RX_BD_BASE + | 342 | cpc_writel(&ptdescr->next, (u32)(DMA_RX_BD_BASE + |
345 | (ch_factor + ((i + 1) & (N_DMA_RX_BUF - 1))) * sizeof(pcsca_bd_t))); | 343 | (ch_factor + ((i + 1) & (N_DMA_RX_BUF - 1))) * sizeof(pcsca_bd_t))); |
346 | cpc_writel(&ptdescr->ptbuf, | 344 | cpc_writel(&ptdescr->ptbuf, |
347 | (uclong) (DMA_RX_BASE + (ch_factor + i) * BD_DEF_LEN)); | 345 | (u32)(DMA_RX_BASE + (ch_factor + i) * BD_DEF_LEN)); |
348 | } | 346 | } |
349 | } | 347 | } |
350 | 348 | ||
@@ -367,8 +365,8 @@ static void tx_dma_buf_check(pc300_t * card, int ch) | |||
367 | { | 365 | { |
368 | volatile pcsca_bd_t __iomem *ptdescr; | 366 | volatile pcsca_bd_t __iomem *ptdescr; |
369 | int i; | 367 | int i; |
370 | ucshort first_bd = card->chan[ch].tx_first_bd; | 368 | u16 first_bd = card->chan[ch].tx_first_bd; |
371 | ucshort next_bd = card->chan[ch].tx_next_bd; | 369 | u16 next_bd = card->chan[ch].tx_next_bd; |
372 | 370 | ||
373 | printk("#CH%d: f_bd = %d(0x%08zx), n_bd = %d(0x%08zx)\n", ch, | 371 | printk("#CH%d: f_bd = %d(0x%08zx), n_bd = %d(0x%08zx)\n", ch, |
374 | first_bd, TX_BD_ADDR(ch, first_bd), | 372 | first_bd, TX_BD_ADDR(ch, first_bd), |
@@ -392,9 +390,9 @@ static void tx1_dma_buf_check(pc300_t * card, int ch) | |||
392 | { | 390 | { |
393 | volatile pcsca_bd_t __iomem *ptdescr; | 391 | volatile pcsca_bd_t __iomem *ptdescr; |
394 | int i; | 392 | int i; |
395 | ucshort first_bd = card->chan[ch].tx_first_bd; | 393 | u16 first_bd = card->chan[ch].tx_first_bd; |
396 | ucshort next_bd = card->chan[ch].tx_next_bd; | 394 | u16 next_bd = card->chan[ch].tx_next_bd; |
397 | uclong scabase = card->hw.scabase; | 395 | u32 scabase = card->hw.scabase; |
398 | 396 | ||
399 | printk ("\nnfree_tx_bd = %d \n", card->chan[ch].nfree_tx_bd); | 397 | printk ("\nnfree_tx_bd = %d \n", card->chan[ch].nfree_tx_bd); |
400 | printk("#CH%d: f_bd = %d(0x%08x), n_bd = %d(0x%08x)\n", ch, | 398 | printk("#CH%d: f_bd = %d(0x%08x), n_bd = %d(0x%08x)\n", ch, |
@@ -413,13 +411,13 @@ static void tx1_dma_buf_check(pc300_t * card, int ch) | |||
413 | printk("\n"); | 411 | printk("\n"); |
414 | } | 412 | } |
415 | #endif | 413 | #endif |
416 | 414 | ||
417 | static void rx_dma_buf_check(pc300_t * card, int ch) | 415 | static void rx_dma_buf_check(pc300_t * card, int ch) |
418 | { | 416 | { |
419 | volatile pcsca_bd_t __iomem *ptdescr; | 417 | volatile pcsca_bd_t __iomem *ptdescr; |
420 | int i; | 418 | int i; |
421 | ucshort first_bd = card->chan[ch].rx_first_bd; | 419 | u16 first_bd = card->chan[ch].rx_first_bd; |
422 | ucshort last_bd = card->chan[ch].rx_last_bd; | 420 | u16 last_bd = card->chan[ch].rx_last_bd; |
423 | int ch_factor; | 421 | int ch_factor; |
424 | 422 | ||
425 | ch_factor = ch * N_DMA_RX_BUF; | 423 | ch_factor = ch * N_DMA_RX_BUF; |
@@ -440,9 +438,9 @@ static void rx_dma_buf_check(pc300_t * card, int ch) | |||
440 | static int dma_get_rx_frame_size(pc300_t * card, int ch) | 438 | static int dma_get_rx_frame_size(pc300_t * card, int ch) |
441 | { | 439 | { |
442 | volatile pcsca_bd_t __iomem *ptdescr; | 440 | volatile pcsca_bd_t __iomem *ptdescr; |
443 | ucshort first_bd = card->chan[ch].rx_first_bd; | 441 | u16 first_bd = card->chan[ch].rx_first_bd; |
444 | int rcvd = 0; | 442 | int rcvd = 0; |
445 | volatile ucchar status; | 443 | volatile u8 status; |
446 | 444 | ||
447 | ptdescr = (card->hw.rambase + RX_BD_ADDR(ch, first_bd)); | 445 | ptdescr = (card->hw.rambase + RX_BD_ADDR(ch, first_bd)); |
448 | while ((status = cpc_readb(&ptdescr->status)) & DST_OSB) { | 446 | while ((status = cpc_readb(&ptdescr->status)) & DST_OSB) { |
@@ -462,12 +460,12 @@ static int dma_get_rx_frame_size(pc300_t * card, int ch) | |||
462 | * dma_buf_write: writes a frame to the Tx DMA buffers | 460 | * dma_buf_write: writes a frame to the Tx DMA buffers |
463 | * NOTE: this function writes one frame at a time. | 461 | * NOTE: this function writes one frame at a time. |
464 | */ | 462 | */ |
465 | static int dma_buf_write(pc300_t * card, int ch, ucchar * ptdata, int len) | 463 | static int dma_buf_write(pc300_t *card, int ch, u8 *ptdata, int len) |
466 | { | 464 | { |
467 | int i, nchar; | 465 | int i, nchar; |
468 | volatile pcsca_bd_t __iomem *ptdescr; | 466 | volatile pcsca_bd_t __iomem *ptdescr; |
469 | int tosend = len; | 467 | int tosend = len; |
470 | ucchar nbuf = ((len - 1) / BD_DEF_LEN) + 1; | 468 | u8 nbuf = ((len - 1) / BD_DEF_LEN) + 1; |
471 | 469 | ||
472 | if (nbuf >= card->chan[ch].nfree_tx_bd) { | 470 | if (nbuf >= card->chan[ch].nfree_tx_bd) { |
473 | return -ENOMEM; | 471 | return -ENOMEM; |
@@ -509,7 +507,7 @@ static int dma_buf_read(pc300_t * card, int ch, struct sk_buff *skb) | |||
509 | pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; | 507 | pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; |
510 | volatile pcsca_bd_t __iomem *ptdescr; | 508 | volatile pcsca_bd_t __iomem *ptdescr; |
511 | int rcvd = 0; | 509 | int rcvd = 0; |
512 | volatile ucchar status; | 510 | volatile u8 status; |
513 | 511 | ||
514 | ptdescr = (card->hw.rambase + | 512 | ptdescr = (card->hw.rambase + |
515 | RX_BD_ADDR(ch, chan->rx_first_bd)); | 513 | RX_BD_ADDR(ch, chan->rx_first_bd)); |
@@ -563,8 +561,8 @@ static int dma_buf_read(pc300_t * card, int ch, struct sk_buff *skb) | |||
563 | static void tx_dma_stop(pc300_t * card, int ch) | 561 | static void tx_dma_stop(pc300_t * card, int ch) |
564 | { | 562 | { |
565 | void __iomem *scabase = card->hw.scabase; | 563 | void __iomem *scabase = card->hw.scabase; |
566 | ucchar drr_ena_bit = 1 << (5 + 2 * ch); | 564 | u8 drr_ena_bit = 1 << (5 + 2 * ch); |
567 | ucchar drr_rst_bit = 1 << (1 + 2 * ch); | 565 | u8 drr_rst_bit = 1 << (1 + 2 * ch); |
568 | 566 | ||
569 | /* Disable DMA */ | 567 | /* Disable DMA */ |
570 | cpc_writeb(scabase + DRR, drr_ena_bit); | 568 | cpc_writeb(scabase + DRR, drr_ena_bit); |
@@ -574,8 +572,8 @@ static void tx_dma_stop(pc300_t * card, int ch) | |||
574 | static void rx_dma_stop(pc300_t * card, int ch) | 572 | static void rx_dma_stop(pc300_t * card, int ch) |
575 | { | 573 | { |
576 | void __iomem *scabase = card->hw.scabase; | 574 | void __iomem *scabase = card->hw.scabase; |
577 | ucchar drr_ena_bit = 1 << (4 + 2 * ch); | 575 | u8 drr_ena_bit = 1 << (4 + 2 * ch); |
578 | ucchar drr_rst_bit = 1 << (2 * ch); | 576 | u8 drr_rst_bit = 1 << (2 * ch); |
579 | 577 | ||
580 | /* Disable DMA */ | 578 | /* Disable DMA */ |
581 | cpc_writeb(scabase + DRR, drr_ena_bit); | 579 | cpc_writeb(scabase + DRR, drr_ena_bit); |
@@ -607,7 +605,7 @@ static void rx_dma_start(pc300_t * card, int ch) | |||
607 | /*************************/ | 605 | /*************************/ |
608 | /*** FALC Routines ***/ | 606 | /*** FALC Routines ***/ |
609 | /*************************/ | 607 | /*************************/ |
610 | static void falc_issue_cmd(pc300_t * card, int ch, ucchar cmd) | 608 | static void falc_issue_cmd(pc300_t *card, int ch, u8 cmd) |
611 | { | 609 | { |
612 | void __iomem *falcbase = card->hw.falcbase; | 610 | void __iomem *falcbase = card->hw.falcbase; |
613 | unsigned long i = 0; | 611 | unsigned long i = 0; |
@@ -675,7 +673,7 @@ static void falc_intr_enable(pc300_t * card, int ch) | |||
675 | static void falc_open_timeslot(pc300_t * card, int ch, int timeslot) | 673 | static void falc_open_timeslot(pc300_t * card, int ch, int timeslot) |
676 | { | 674 | { |
677 | void __iomem *falcbase = card->hw.falcbase; | 675 | void __iomem *falcbase = card->hw.falcbase; |
678 | ucchar tshf = card->chan[ch].falc.offset; | 676 | u8 tshf = card->chan[ch].falc.offset; |
679 | 677 | ||
680 | cpc_writeb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch), | 678 | cpc_writeb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch), |
681 | cpc_readb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch)) & | 679 | cpc_readb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch)) & |
@@ -691,7 +689,7 @@ static void falc_open_timeslot(pc300_t * card, int ch, int timeslot) | |||
691 | static void falc_close_timeslot(pc300_t * card, int ch, int timeslot) | 689 | static void falc_close_timeslot(pc300_t * card, int ch, int timeslot) |
692 | { | 690 | { |
693 | void __iomem *falcbase = card->hw.falcbase; | 691 | void __iomem *falcbase = card->hw.falcbase; |
694 | ucchar tshf = card->chan[ch].falc.offset; | 692 | u8 tshf = card->chan[ch].falc.offset; |
695 | 693 | ||
696 | cpc_writeb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch), | 694 | cpc_writeb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch), |
697 | cpc_readb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch)) | | 695 | cpc_readb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch)) | |
@@ -812,7 +810,7 @@ static void falc_init_t1(pc300_t * card, int ch) | |||
812 | pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; | 810 | pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; |
813 | falc_t *pfalc = (falc_t *) & chan->falc; | 811 | falc_t *pfalc = (falc_t *) & chan->falc; |
814 | void __iomem *falcbase = card->hw.falcbase; | 812 | void __iomem *falcbase = card->hw.falcbase; |
815 | ucchar dja = (ch ? (LIM2_DJA2 | LIM2_DJA1) : 0); | 813 | u8 dja = (ch ? (LIM2_DJA2 | LIM2_DJA1) : 0); |
816 | 814 | ||
817 | /* Switch to T1 mode (PCM 24) */ | 815 | /* Switch to T1 mode (PCM 24) */ |
818 | cpc_writeb(falcbase + F_REG(FMR1, ch), FMR1_PMOD); | 816 | cpc_writeb(falcbase + F_REG(FMR1, ch), FMR1_PMOD); |
@@ -981,7 +979,7 @@ static void falc_init_e1(pc300_t * card, int ch) | |||
981 | pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; | 979 | pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; |
982 | falc_t *pfalc = (falc_t *) & chan->falc; | 980 | falc_t *pfalc = (falc_t *) & chan->falc; |
983 | void __iomem *falcbase = card->hw.falcbase; | 981 | void __iomem *falcbase = card->hw.falcbase; |
984 | ucchar dja = (ch ? (LIM2_DJA2 | LIM2_DJA1) : 0); | 982 | u8 dja = (ch ? (LIM2_DJA2 | LIM2_DJA1) : 0); |
985 | 983 | ||
986 | /* Switch to E1 mode (PCM 30) */ | 984 | /* Switch to E1 mode (PCM 30) */ |
987 | cpc_writeb(falcbase + F_REG(FMR1, ch), | 985 | cpc_writeb(falcbase + F_REG(FMR1, ch), |
@@ -1187,7 +1185,7 @@ static void te_config(pc300_t * card, int ch) | |||
1187 | pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; | 1185 | pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; |
1188 | falc_t *pfalc = (falc_t *) & chan->falc; | 1186 | falc_t *pfalc = (falc_t *) & chan->falc; |
1189 | void __iomem *falcbase = card->hw.falcbase; | 1187 | void __iomem *falcbase = card->hw.falcbase; |
1190 | ucchar dummy; | 1188 | u8 dummy; |
1191 | unsigned long flags; | 1189 | unsigned long flags; |
1192 | 1190 | ||
1193 | memset(pfalc, 0, sizeof(falc_t)); | 1191 | memset(pfalc, 0, sizeof(falc_t)); |
@@ -1403,7 +1401,7 @@ static void falc_update_stats(pc300_t * card, int ch) | |||
1403 | pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; | 1401 | pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; |
1404 | falc_t *pfalc = (falc_t *) & chan->falc; | 1402 | falc_t *pfalc = (falc_t *) & chan->falc; |
1405 | void __iomem *falcbase = card->hw.falcbase; | 1403 | void __iomem *falcbase = card->hw.falcbase; |
1406 | ucshort counter; | 1404 | u16 counter; |
1407 | 1405 | ||
1408 | counter = cpc_readb(falcbase + F_REG(FECL, ch)); | 1406 | counter = cpc_readb(falcbase + F_REG(FECL, ch)); |
1409 | counter |= cpc_readb(falcbase + F_REG(FECH, ch)) << 8; | 1407 | counter |= cpc_readb(falcbase + F_REG(FECH, ch)) << 8; |
@@ -1729,7 +1727,7 @@ static void falc_pattern_test(pc300_t * card, int ch, unsigned int activate) | |||
1729 | * Description: This routine returns the bit error counter value | 1727 | * Description: This routine returns the bit error counter value |
1730 | *---------------------------------------------------------------------------- | 1728 | *---------------------------------------------------------------------------- |
1731 | */ | 1729 | */ |
1732 | static ucshort falc_pattern_test_error(pc300_t * card, int ch) | 1730 | static u16 falc_pattern_test_error(pc300_t * card, int ch) |
1733 | { | 1731 | { |
1734 | pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; | 1732 | pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; |
1735 | falc_t *pfalc = (falc_t *) & chan->falc; | 1733 | falc_t *pfalc = (falc_t *) & chan->falc; |
@@ -1776,7 +1774,7 @@ static void cpc_tx_timeout(struct net_device *dev) | |||
1776 | pc300_t *card = (pc300_t *) chan->card; | 1774 | pc300_t *card = (pc300_t *) chan->card; |
1777 | int ch = chan->channel; | 1775 | int ch = chan->channel; |
1778 | unsigned long flags; | 1776 | unsigned long flags; |
1779 | ucchar ilar; | 1777 | u8 ilar; |
1780 | 1778 | ||
1781 | dev->stats.tx_errors++; | 1779 | dev->stats.tx_errors++; |
1782 | dev->stats.tx_aborted_errors++; | 1780 | dev->stats.tx_aborted_errors++; |
@@ -1807,11 +1805,7 @@ static int cpc_queue_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1807 | int i; | 1805 | int i; |
1808 | #endif | 1806 | #endif |
1809 | 1807 | ||
1810 | if (chan->conf.monitor) { | 1808 | if (!netif_carrier_ok(dev)) { |
1811 | /* In monitor mode no Tx is done: ignore packet */ | ||
1812 | dev_kfree_skb(skb); | ||
1813 | return 0; | ||
1814 | } else if (!netif_carrier_ok(dev)) { | ||
1815 | /* DCD must be OFF: drop packet */ | 1809 | /* DCD must be OFF: drop packet */ |
1816 | dev_kfree_skb(skb); | 1810 | dev_kfree_skb(skb); |
1817 | dev->stats.tx_errors++; | 1811 | dev->stats.tx_errors++; |
@@ -1836,7 +1830,7 @@ static int cpc_queue_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1836 | } | 1830 | } |
1837 | 1831 | ||
1838 | /* Write buffer to DMA buffers */ | 1832 | /* Write buffer to DMA buffers */ |
1839 | if (dma_buf_write(card, ch, (ucchar *) skb->data, skb->len) != 0) { | 1833 | if (dma_buf_write(card, ch, (u8 *)skb->data, skb->len) != 0) { |
1840 | // printk("%s: write error. Dropping TX packet.\n", dev->name); | 1834 | // printk("%s: write error. Dropping TX packet.\n", dev->name); |
1841 | netif_stop_queue(dev); | 1835 | netif_stop_queue(dev); |
1842 | dev_kfree_skb(skb); | 1836 | dev_kfree_skb(skb); |
@@ -2001,7 +1995,7 @@ static void sca_tx_intr(pc300dev_t *dev) | |||
2001 | static void sca_intr(pc300_t * card) | 1995 | static void sca_intr(pc300_t * card) |
2002 | { | 1996 | { |
2003 | void __iomem *scabase = card->hw.scabase; | 1997 | void __iomem *scabase = card->hw.scabase; |
2004 | volatile uclong status; | 1998 | volatile u32 status; |
2005 | int ch; | 1999 | int ch; |
2006 | int intr_count = 0; | 2000 | int intr_count = 0; |
2007 | unsigned char dsr_rx; | 2001 | unsigned char dsr_rx; |
@@ -2016,7 +2010,7 @@ static void sca_intr(pc300_t * card) | |||
2016 | 2010 | ||
2017 | /**** Reception ****/ | 2011 | /**** Reception ****/ |
2018 | if (status & IR0_DRX((IR0_DMIA | IR0_DMIB), ch)) { | 2012 | if (status & IR0_DRX((IR0_DMIA | IR0_DMIB), ch)) { |
2019 | ucchar drx_stat = cpc_readb(scabase + DSR_RX(ch)); | 2013 | u8 drx_stat = cpc_readb(scabase + DSR_RX(ch)); |
2020 | 2014 | ||
2021 | /* Clear RX interrupts */ | 2015 | /* Clear RX interrupts */ |
2022 | cpc_writeb(scabase + DSR_RX(ch), drx_stat | DSR_DWE); | 2016 | cpc_writeb(scabase + DSR_RX(ch), drx_stat | DSR_DWE); |
@@ -2090,7 +2084,7 @@ static void sca_intr(pc300_t * card) | |||
2090 | 2084 | ||
2091 | /**** Transmission ****/ | 2085 | /**** Transmission ****/ |
2092 | if (status & IR0_DTX((IR0_EFT | IR0_DMIA | IR0_DMIB), ch)) { | 2086 | if (status & IR0_DTX((IR0_EFT | IR0_DMIA | IR0_DMIB), ch)) { |
2093 | ucchar dtx_stat = cpc_readb(scabase + DSR_TX(ch)); | 2087 | u8 dtx_stat = cpc_readb(scabase + DSR_TX(ch)); |
2094 | 2088 | ||
2095 | /* Clear TX interrupts */ | 2089 | /* Clear TX interrupts */ |
2096 | cpc_writeb(scabase + DSR_TX(ch), dtx_stat | DSR_DWE); | 2090 | cpc_writeb(scabase + DSR_TX(ch), dtx_stat | DSR_DWE); |
@@ -2134,7 +2128,7 @@ static void sca_intr(pc300_t * card) | |||
2134 | 2128 | ||
2135 | /**** MSCI ****/ | 2129 | /**** MSCI ****/ |
2136 | if (status & IR0_M(IR0_RXINTA, ch)) { | 2130 | if (status & IR0_M(IR0_RXINTA, ch)) { |
2137 | ucchar st1 = cpc_readb(scabase + M_REG(ST1, ch)); | 2131 | u8 st1 = cpc_readb(scabase + M_REG(ST1, ch)); |
2138 | 2132 | ||
2139 | /* Clear MSCI interrupts */ | 2133 | /* Clear MSCI interrupts */ |
2140 | cpc_writeb(scabase + M_REG(ST1, ch), st1); | 2134 | cpc_writeb(scabase + M_REG(ST1, ch), st1); |
@@ -2176,7 +2170,7 @@ static void sca_intr(pc300_t * card) | |||
2176 | } | 2170 | } |
2177 | } | 2171 | } |
2178 | 2172 | ||
2179 | static void falc_t1_loop_detection(pc300_t * card, int ch, ucchar frs1) | 2173 | static void falc_t1_loop_detection(pc300_t *card, int ch, u8 frs1) |
2180 | { | 2174 | { |
2181 | pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; | 2175 | pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; |
2182 | falc_t *pfalc = (falc_t *) & chan->falc; | 2176 | falc_t *pfalc = (falc_t *) & chan->falc; |
@@ -2201,7 +2195,7 @@ static void falc_t1_loop_detection(pc300_t * card, int ch, ucchar frs1) | |||
2201 | } | 2195 | } |
2202 | } | 2196 | } |
2203 | 2197 | ||
2204 | static void falc_e1_loop_detection(pc300_t * card, int ch, ucchar rsp) | 2198 | static void falc_e1_loop_detection(pc300_t *card, int ch, u8 rsp) |
2205 | { | 2199 | { |
2206 | pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; | 2200 | pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; |
2207 | falc_t *pfalc = (falc_t *) & chan->falc; | 2201 | falc_t *pfalc = (falc_t *) & chan->falc; |
@@ -2231,8 +2225,8 @@ static void falc_t1_intr(pc300_t * card, int ch) | |||
2231 | pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; | 2225 | pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; |
2232 | falc_t *pfalc = (falc_t *) & chan->falc; | 2226 | falc_t *pfalc = (falc_t *) & chan->falc; |
2233 | void __iomem *falcbase = card->hw.falcbase; | 2227 | void __iomem *falcbase = card->hw.falcbase; |
2234 | ucchar isr0, isr3, gis; | 2228 | u8 isr0, isr3, gis; |
2235 | ucchar dummy; | 2229 | u8 dummy; |
2236 | 2230 | ||
2237 | while ((gis = cpc_readb(falcbase + F_REG(GIS, ch))) != 0) { | 2231 | while ((gis = cpc_readb(falcbase + F_REG(GIS, ch))) != 0) { |
2238 | if (gis & GIS_ISR0) { | 2232 | if (gis & GIS_ISR0) { |
@@ -2278,8 +2272,8 @@ static void falc_e1_intr(pc300_t * card, int ch) | |||
2278 | pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; | 2272 | pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; |
2279 | falc_t *pfalc = (falc_t *) & chan->falc; | 2273 | falc_t *pfalc = (falc_t *) & chan->falc; |
2280 | void __iomem *falcbase = card->hw.falcbase; | 2274 | void __iomem *falcbase = card->hw.falcbase; |
2281 | ucchar isr1, isr2, isr3, gis, rsp; | 2275 | u8 isr1, isr2, isr3, gis, rsp; |
2282 | ucchar dummy; | 2276 | u8 dummy; |
2283 | 2277 | ||
2284 | while ((gis = cpc_readb(falcbase + F_REG(GIS, ch))) != 0) { | 2278 | while ((gis = cpc_readb(falcbase + F_REG(GIS, ch))) != 0) { |
2285 | rsp = cpc_readb(falcbase + F_REG(RSP, ch)); | 2279 | rsp = cpc_readb(falcbase + F_REG(RSP, ch)); |
@@ -2361,7 +2355,7 @@ static void falc_intr(pc300_t * card) | |||
2361 | static irqreturn_t cpc_intr(int irq, void *dev_id) | 2355 | static irqreturn_t cpc_intr(int irq, void *dev_id) |
2362 | { | 2356 | { |
2363 | pc300_t *card = dev_id; | 2357 | pc300_t *card = dev_id; |
2364 | volatile ucchar plx_status; | 2358 | volatile u8 plx_status; |
2365 | 2359 | ||
2366 | if (!card) { | 2360 | if (!card) { |
2367 | #ifdef PC300_DEBUG_INTR | 2361 | #ifdef PC300_DEBUG_INTR |
@@ -2400,7 +2394,7 @@ static irqreturn_t cpc_intr(int irq, void *dev_id) | |||
2400 | 2394 | ||
2401 | static void cpc_sca_status(pc300_t * card, int ch) | 2395 | static void cpc_sca_status(pc300_t * card, int ch) |
2402 | { | 2396 | { |
2403 | ucchar ilar; | 2397 | u8 ilar; |
2404 | void __iomem *scabase = card->hw.scabase; | 2398 | void __iomem *scabase = card->hw.scabase; |
2405 | unsigned long flags; | 2399 | unsigned long flags; |
2406 | 2400 | ||
@@ -2818,7 +2812,7 @@ static int cpc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
2818 | } | 2812 | } |
2819 | } | 2813 | } |
2820 | 2814 | ||
2821 | static int clock_rate_calc(uclong rate, uclong clock, int *br_io) | 2815 | static int clock_rate_calc(u32 rate, u32 clock, int *br_io) |
2822 | { | 2816 | { |
2823 | int br, tc; | 2817 | int br, tc; |
2824 | int br_pwr, error; | 2818 | int br_pwr, error; |
@@ -2855,12 +2849,12 @@ static int ch_config(pc300dev_t * d) | |||
2855 | void __iomem *scabase = card->hw.scabase; | 2849 | void __iomem *scabase = card->hw.scabase; |
2856 | void __iomem *plxbase = card->hw.plxbase; | 2850 | void __iomem *plxbase = card->hw.plxbase; |
2857 | int ch = chan->channel; | 2851 | int ch = chan->channel; |
2858 | uclong clkrate = chan->conf.phys_settings.clock_rate; | 2852 | u32 clkrate = chan->conf.phys_settings.clock_rate; |
2859 | uclong clktype = chan->conf.phys_settings.clock_type; | 2853 | u32 clktype = chan->conf.phys_settings.clock_type; |
2860 | ucshort encoding = chan->conf.proto_settings.encoding; | 2854 | u16 encoding = chan->conf.proto_settings.encoding; |
2861 | ucshort parity = chan->conf.proto_settings.parity; | 2855 | u16 parity = chan->conf.proto_settings.parity; |
2862 | ucchar md0, md2; | 2856 | u8 md0, md2; |
2863 | 2857 | ||
2864 | /* Reset the channel */ | 2858 | /* Reset the channel */ |
2865 | cpc_writeb(scabase + M_REG(CMD, ch), CMD_CH_RST); | 2859 | cpc_writeb(scabase + M_REG(CMD, ch), CMD_CH_RST); |
2866 | 2860 | ||
@@ -3152,19 +3146,10 @@ int cpc_open(struct net_device *dev) | |||
3152 | printk("pc300: cpc_open"); | 3146 | printk("pc300: cpc_open"); |
3153 | #endif | 3147 | #endif |
3154 | 3148 | ||
3155 | #ifdef FIXME | ||
3156 | if (hdlc->proto.id == IF_PROTO_PPP) { | ||
3157 | d->if_ptr = &hdlc->state.ppp.pppdev; | ||
3158 | } | ||
3159 | #endif | ||
3160 | |||
3161 | result = hdlc_open(dev); | 3149 | result = hdlc_open(dev); |
3162 | if (/* FIXME hdlc->proto.id == IF_PROTO_PPP*/ 0) { | 3150 | |
3163 | dev->priv = d; | 3151 | if (result) |
3164 | } | ||
3165 | if (result) { | ||
3166 | return result; | 3152 | return result; |
3167 | } | ||
3168 | 3153 | ||
3169 | sprintf(ifr.ifr_name, "%s", dev->name); | 3154 | sprintf(ifr.ifr_name, "%s", dev->name); |
3170 | result = cpc_opench(d); | 3155 | result = cpc_opench(d); |
@@ -3197,9 +3182,7 @@ static int cpc_close(struct net_device *dev) | |||
3197 | CPC_UNLOCK(card, flags); | 3182 | CPC_UNLOCK(card, flags); |
3198 | 3183 | ||
3199 | hdlc_close(dev); | 3184 | hdlc_close(dev); |
3200 | if (/* FIXME hdlc->proto.id == IF_PROTO_PPP*/ 0) { | 3185 | |
3201 | d->if_ptr = NULL; | ||
3202 | } | ||
3203 | #ifdef CONFIG_PC300_MLPPP | 3186 | #ifdef CONFIG_PC300_MLPPP |
3204 | if (chan->conf.proto == PC300_PROTO_MLPPP) { | 3187 | if (chan->conf.proto == PC300_PROTO_MLPPP) { |
3205 | cpc_tty_unregister_service(d); | 3188 | cpc_tty_unregister_service(d); |
@@ -3210,16 +3193,16 @@ static int cpc_close(struct net_device *dev) | |||
3210 | return 0; | 3193 | return 0; |
3211 | } | 3194 | } |
3212 | 3195 | ||
3213 | static uclong detect_ram(pc300_t * card) | 3196 | static u32 detect_ram(pc300_t * card) |
3214 | { | 3197 | { |
3215 | uclong i; | 3198 | u32 i; |
3216 | ucchar data; | 3199 | u8 data; |
3217 | void __iomem *rambase = card->hw.rambase; | 3200 | void __iomem *rambase = card->hw.rambase; |
3218 | 3201 | ||
3219 | card->hw.ramsize = PC300_RAMSIZE; | 3202 | card->hw.ramsize = PC300_RAMSIZE; |
3220 | /* Let's find out how much RAM is present on this board */ | 3203 | /* Let's find out how much RAM is present on this board */ |
3221 | for (i = 0; i < card->hw.ramsize; i++) { | 3204 | for (i = 0; i < card->hw.ramsize; i++) { |
3222 | data = (ucchar) (i & 0xff); | 3205 | data = (u8)(i & 0xff); |
3223 | cpc_writeb(rambase + i, data); | 3206 | cpc_writeb(rambase + i, data); |
3224 | if (cpc_readb(rambase + i) != data) { | 3207 | if (cpc_readb(rambase + i) != data) { |
3225 | break; | 3208 | break; |
@@ -3296,7 +3279,7 @@ static void cpc_init_card(pc300_t * card) | |||
3296 | cpc_writeb(card->hw.scabase + DMER, 0x80); | 3279 | cpc_writeb(card->hw.scabase + DMER, 0x80); |
3297 | 3280 | ||
3298 | if (card->hw.type == PC300_TE) { | 3281 | if (card->hw.type == PC300_TE) { |
3299 | ucchar reg1; | 3282 | u8 reg1; |
3300 | 3283 | ||
3301 | /* Check CPLD version */ | 3284 | /* Check CPLD version */ |
3302 | reg1 = cpc_readb(card->hw.falcbase + CPLD_REG1); | 3285 | reg1 = cpc_readb(card->hw.falcbase + CPLD_REG1); |
@@ -3360,7 +3343,6 @@ static void cpc_init_card(pc300_t * card) | |||
3360 | chan->nfree_tx_bd = N_DMA_TX_BUF; | 3343 | chan->nfree_tx_bd = N_DMA_TX_BUF; |
3361 | 3344 | ||
3362 | d->chan = chan; | 3345 | d->chan = chan; |
3363 | d->tx_skb = NULL; | ||
3364 | d->trace_on = 0; | 3346 | d->trace_on = 0; |
3365 | d->line_on = 0; | 3347 | d->line_on = 0; |
3366 | d->line_off = 0; | 3348 | d->line_off = 0; |
@@ -3431,7 +3413,7 @@ cpc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3431 | { | 3413 | { |
3432 | static int first_time = 1; | 3414 | static int first_time = 1; |
3433 | int err, eeprom_outdated = 0; | 3415 | int err, eeprom_outdated = 0; |
3434 | ucshort device_id; | 3416 | u16 device_id; |
3435 | pc300_t *card; | 3417 | pc300_t *card; |
3436 | 3418 | ||
3437 | if (first_time) { | 3419 | if (first_time) { |
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c index 44a89df1b8bf..c0235844a4d5 100644 --- a/drivers/net/wan/sealevel.c +++ b/drivers/net/wan/sealevel.c | |||
@@ -8,6 +8,7 @@ | |||
8 | * | 8 | * |
9 | * (c) Copyright 1999, 2001 Alan Cox | 9 | * (c) Copyright 1999, 2001 Alan Cox |
10 | * (c) Copyright 2001 Red Hat Inc. | 10 | * (c) Copyright 2001 Red Hat Inc. |
11 | * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl> | ||
11 | * | 12 | * |
12 | */ | 13 | */ |
13 | 14 | ||
@@ -19,6 +20,7 @@ | |||
19 | #include <linux/netdevice.h> | 20 | #include <linux/netdevice.h> |
20 | #include <linux/if_arp.h> | 21 | #include <linux/if_arp.h> |
21 | #include <linux/delay.h> | 22 | #include <linux/delay.h> |
23 | #include <linux/hdlc.h> | ||
22 | #include <linux/ioport.h> | 24 | #include <linux/ioport.h> |
23 | #include <linux/init.h> | 25 | #include <linux/init.h> |
24 | #include <net/arp.h> | 26 | #include <net/arp.h> |
@@ -27,22 +29,19 @@ | |||
27 | #include <asm/io.h> | 29 | #include <asm/io.h> |
28 | #include <asm/dma.h> | 30 | #include <asm/dma.h> |
29 | #include <asm/byteorder.h> | 31 | #include <asm/byteorder.h> |
30 | #include <net/syncppp.h> | ||
31 | #include "z85230.h" | 32 | #include "z85230.h" |
32 | 33 | ||
33 | 34 | ||
34 | struct slvl_device | 35 | struct slvl_device |
35 | { | 36 | { |
36 | void *if_ptr; /* General purpose pointer (used by SPPP) */ | ||
37 | struct z8530_channel *chan; | 37 | struct z8530_channel *chan; |
38 | struct ppp_device pppdev; | ||
39 | int channel; | 38 | int channel; |
40 | }; | 39 | }; |
41 | 40 | ||
42 | 41 | ||
43 | struct slvl_board | 42 | struct slvl_board |
44 | { | 43 | { |
45 | struct slvl_device *dev[2]; | 44 | struct slvl_device dev[2]; |
46 | struct z8530_dev board; | 45 | struct z8530_dev board; |
47 | int iobase; | 46 | int iobase; |
48 | }; | 47 | }; |
@@ -51,72 +50,69 @@ struct slvl_board | |||
51 | * Network driver support routines | 50 | * Network driver support routines |
52 | */ | 51 | */ |
53 | 52 | ||
53 | static inline struct slvl_device* dev_to_chan(struct net_device *dev) | ||
54 | { | ||
55 | return (struct slvl_device *)dev_to_hdlc(dev)->priv; | ||
56 | } | ||
57 | |||
54 | /* | 58 | /* |
55 | * Frame receive. Simple for our card as we do sync ppp and there | 59 | * Frame receive. Simple for our card as we do HDLC and there |
56 | * is no funny garbage involved | 60 | * is no funny garbage involved |
57 | */ | 61 | */ |
58 | 62 | ||
59 | static void sealevel_input(struct z8530_channel *c, struct sk_buff *skb) | 63 | static void sealevel_input(struct z8530_channel *c, struct sk_buff *skb) |
60 | { | 64 | { |
61 | /* Drop the CRC - it's not a good idea to try and negotiate it ;) */ | 65 | /* Drop the CRC - it's not a good idea to try and negotiate it ;) */ |
62 | skb_trim(skb, skb->len-2); | 66 | skb_trim(skb, skb->len - 2); |
63 | skb->protocol=htons(ETH_P_WAN_PPP); | 67 | skb->protocol = hdlc_type_trans(skb, c->netdevice); |
64 | skb_reset_mac_header(skb); | 68 | skb_reset_mac_header(skb); |
65 | skb->dev=c->netdevice; | 69 | skb->dev = c->netdevice; |
66 | /* | ||
67 | * Send it to the PPP layer. We don't have time to process | ||
68 | * it right now. | ||
69 | */ | ||
70 | netif_rx(skb); | 70 | netif_rx(skb); |
71 | c->netdevice->last_rx = jiffies; | 71 | c->netdevice->last_rx = jiffies; |
72 | } | 72 | } |
73 | 73 | ||
74 | /* | 74 | /* |
75 | * We've been placed in the UP state | 75 | * We've been placed in the UP state |
76 | */ | 76 | */ |
77 | 77 | ||
78 | static int sealevel_open(struct net_device *d) | 78 | static int sealevel_open(struct net_device *d) |
79 | { | 79 | { |
80 | struct slvl_device *slvl=d->priv; | 80 | struct slvl_device *slvl = dev_to_chan(d); |
81 | int err = -1; | 81 | int err = -1; |
82 | int unit = slvl->channel; | 82 | int unit = slvl->channel; |
83 | 83 | ||
84 | /* | 84 | /* |
85 | * Link layer up. | 85 | * Link layer up. |
86 | */ | 86 | */ |
87 | 87 | ||
88 | switch(unit) | 88 | switch (unit) |
89 | { | 89 | { |
90 | case 0: | 90 | case 0: |
91 | err=z8530_sync_dma_open(d, slvl->chan); | 91 | err = z8530_sync_dma_open(d, slvl->chan); |
92 | break; | 92 | break; |
93 | case 1: | 93 | case 1: |
94 | err=z8530_sync_open(d, slvl->chan); | 94 | err = z8530_sync_open(d, slvl->chan); |
95 | break; | 95 | break; |
96 | } | 96 | } |
97 | 97 | ||
98 | if(err) | 98 | if (err) |
99 | return err; | 99 | return err; |
100 | /* | 100 | |
101 | * Begin PPP | 101 | err = hdlc_open(d); |
102 | */ | 102 | if (err) { |
103 | err=sppp_open(d); | 103 | switch (unit) { |
104 | if(err) | ||
105 | { | ||
106 | switch(unit) | ||
107 | { | ||
108 | case 0: | 104 | case 0: |
109 | z8530_sync_dma_close(d, slvl->chan); | 105 | z8530_sync_dma_close(d, slvl->chan); |
110 | break; | 106 | break; |
111 | case 1: | 107 | case 1: |
112 | z8530_sync_close(d, slvl->chan); | 108 | z8530_sync_close(d, slvl->chan); |
113 | break; | 109 | break; |
114 | } | 110 | } |
115 | return err; | 111 | return err; |
116 | } | 112 | } |
117 | 113 | ||
118 | slvl->chan->rx_function=sealevel_input; | 114 | slvl->chan->rx_function = sealevel_input; |
119 | 115 | ||
120 | /* | 116 | /* |
121 | * Go go go | 117 | * Go go go |
122 | */ | 118 | */ |
@@ -126,26 +122,19 @@ static int sealevel_open(struct net_device *d) | |||
126 | 122 | ||
127 | static int sealevel_close(struct net_device *d) | 123 | static int sealevel_close(struct net_device *d) |
128 | { | 124 | { |
129 | struct slvl_device *slvl=d->priv; | 125 | struct slvl_device *slvl = dev_to_chan(d); |
130 | int unit = slvl->channel; | 126 | int unit = slvl->channel; |
131 | 127 | ||
132 | /* | 128 | /* |
133 | * Discard new frames | 129 | * Discard new frames |
134 | */ | 130 | */ |
135 | |||
136 | slvl->chan->rx_function=z8530_null_rx; | ||
137 | |||
138 | /* | ||
139 | * PPP off | ||
140 | */ | ||
141 | sppp_close(d); | ||
142 | /* | ||
143 | * Link layer down | ||
144 | */ | ||
145 | 131 | ||
132 | slvl->chan->rx_function = z8530_null_rx; | ||
133 | |||
134 | hdlc_close(d); | ||
146 | netif_stop_queue(d); | 135 | netif_stop_queue(d); |
147 | 136 | ||
148 | switch(unit) | 137 | switch (unit) |
149 | { | 138 | { |
150 | case 0: | 139 | case 0: |
151 | z8530_sync_dma_close(d, slvl->chan); | 140 | z8530_sync_dma_close(d, slvl->chan); |
@@ -159,210 +148,153 @@ static int sealevel_close(struct net_device *d) | |||
159 | 148 | ||
160 | static int sealevel_ioctl(struct net_device *d, struct ifreq *ifr, int cmd) | 149 | static int sealevel_ioctl(struct net_device *d, struct ifreq *ifr, int cmd) |
161 | { | 150 | { |
162 | /* struct slvl_device *slvl=d->priv; | 151 | /* struct slvl_device *slvl=dev_to_chan(d); |
163 | z8530_ioctl(d,&slvl->sync.chanA,ifr,cmd) */ | 152 | z8530_ioctl(d,&slvl->sync.chanA,ifr,cmd) */ |
164 | return sppp_do_ioctl(d, ifr,cmd); | 153 | return hdlc_ioctl(d, ifr, cmd); |
165 | } | ||
166 | |||
167 | static struct net_device_stats *sealevel_get_stats(struct net_device *d) | ||
168 | { | ||
169 | struct slvl_device *slvl=d->priv; | ||
170 | if(slvl) | ||
171 | return z8530_get_stats(slvl->chan); | ||
172 | else | ||
173 | return NULL; | ||
174 | } | 154 | } |
175 | 155 | ||
176 | /* | 156 | /* |
177 | * Passed PPP frames, fire them downwind. | 157 | * Passed network frames, fire them downwind. |
178 | */ | 158 | */ |
179 | 159 | ||
180 | static int sealevel_queue_xmit(struct sk_buff *skb, struct net_device *d) | 160 | static int sealevel_queue_xmit(struct sk_buff *skb, struct net_device *d) |
181 | { | 161 | { |
182 | struct slvl_device *slvl=d->priv; | 162 | return z8530_queue_xmit(dev_to_chan(d)->chan, skb); |
183 | return z8530_queue_xmit(slvl->chan, skb); | ||
184 | } | 163 | } |
185 | 164 | ||
186 | static int sealevel_neigh_setup(struct neighbour *n) | 165 | static int sealevel_attach(struct net_device *dev, unsigned short encoding, |
166 | unsigned short parity) | ||
187 | { | 167 | { |
188 | if (n->nud_state == NUD_NONE) { | 168 | if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT) |
189 | n->ops = &arp_broken_ops; | 169 | return 0; |
190 | n->output = n->ops->output; | 170 | return -EINVAL; |
191 | } | ||
192 | return 0; | ||
193 | } | 171 | } |
194 | 172 | ||
195 | static int sealevel_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p) | 173 | static int slvl_setup(struct slvl_device *sv, int iobase, int irq) |
196 | { | 174 | { |
197 | if (p->tbl->family == AF_INET) { | 175 | struct net_device *dev = alloc_hdlcdev(sv); |
198 | p->neigh_setup = sealevel_neigh_setup; | 176 | if (!dev) |
199 | p->ucast_probes = 0; | 177 | return -1; |
200 | p->mcast_probes = 0; | 178 | |
179 | dev_to_hdlc(dev)->attach = sealevel_attach; | ||
180 | dev_to_hdlc(dev)->xmit = sealevel_queue_xmit; | ||
181 | dev->open = sealevel_open; | ||
182 | dev->stop = sealevel_close; | ||
183 | dev->do_ioctl = sealevel_ioctl; | ||
184 | dev->base_addr = iobase; | ||
185 | dev->irq = irq; | ||
186 | |||
187 | if (register_hdlc_device(dev)) { | ||
188 | printk(KERN_ERR "sealevel: unable to register HDLC device\n"); | ||
189 | free_netdev(dev); | ||
190 | return -1; | ||
201 | } | 191 | } |
202 | return 0; | ||
203 | } | ||
204 | 192 | ||
205 | static int sealevel_attach(struct net_device *dev) | 193 | sv->chan->netdevice = dev; |
206 | { | ||
207 | struct slvl_device *sv = dev->priv; | ||
208 | sppp_attach(&sv->pppdev); | ||
209 | return 0; | 194 | return 0; |
210 | } | 195 | } |
211 | 196 | ||
212 | static void sealevel_detach(struct net_device *dev) | ||
213 | { | ||
214 | sppp_detach(dev); | ||
215 | } | ||
216 | |||
217 | static void slvl_setup(struct net_device *d) | ||
218 | { | ||
219 | d->open = sealevel_open; | ||
220 | d->stop = sealevel_close; | ||
221 | d->init = sealevel_attach; | ||
222 | d->uninit = sealevel_detach; | ||
223 | d->hard_start_xmit = sealevel_queue_xmit; | ||
224 | d->get_stats = sealevel_get_stats; | ||
225 | d->set_multicast_list = NULL; | ||
226 | d->do_ioctl = sealevel_ioctl; | ||
227 | d->neigh_setup = sealevel_neigh_setup_dev; | ||
228 | d->set_mac_address = NULL; | ||
229 | |||
230 | } | ||
231 | |||
232 | static inline struct slvl_device *slvl_alloc(int iobase, int irq) | ||
233 | { | ||
234 | struct net_device *d; | ||
235 | struct slvl_device *sv; | ||
236 | |||
237 | d = alloc_netdev(sizeof(struct slvl_device), "hdlc%d", | ||
238 | slvl_setup); | ||
239 | |||
240 | if (!d) | ||
241 | return NULL; | ||
242 | |||
243 | sv = d->priv; | ||
244 | d->ml_priv = sv; | ||
245 | sv->if_ptr = &sv->pppdev; | ||
246 | sv->pppdev.dev = d; | ||
247 | d->base_addr = iobase; | ||
248 | d->irq = irq; | ||
249 | |||
250 | return sv; | ||
251 | } | ||
252 | |||
253 | 197 | ||
254 | /* | 198 | /* |
255 | * Allocate and setup Sealevel board. | 199 | * Allocate and setup Sealevel board. |
256 | */ | 200 | */ |
257 | 201 | ||
258 | static __init struct slvl_board *slvl_init(int iobase, int irq, | 202 | static __init struct slvl_board *slvl_init(int iobase, int irq, |
259 | int txdma, int rxdma, int slow) | 203 | int txdma, int rxdma, int slow) |
260 | { | 204 | { |
261 | struct z8530_dev *dev; | 205 | struct z8530_dev *dev; |
262 | struct slvl_board *b; | 206 | struct slvl_board *b; |
263 | 207 | ||
264 | /* | 208 | /* |
265 | * Get the needed I/O space | 209 | * Get the needed I/O space |
266 | */ | 210 | */ |
267 | 211 | ||
268 | if(!request_region(iobase, 8, "Sealevel 4021")) | 212 | if (!request_region(iobase, 8, "Sealevel 4021")) { |
269 | { | 213 | printk(KERN_WARNING "sealevel: I/O 0x%X already in use.\n", |
270 | printk(KERN_WARNING "sealevel: I/O 0x%X already in use.\n", iobase); | 214 | iobase); |
271 | return NULL; | 215 | return NULL; |
272 | } | 216 | } |
273 | |||
274 | b = kzalloc(sizeof(struct slvl_board), GFP_KERNEL); | ||
275 | if(!b) | ||
276 | goto fail3; | ||
277 | 217 | ||
278 | if (!(b->dev[0]= slvl_alloc(iobase, irq))) | 218 | b = kzalloc(sizeof(struct slvl_board), GFP_KERNEL); |
279 | goto fail2; | 219 | if (!b) |
220 | goto err_kzalloc; | ||
280 | 221 | ||
281 | b->dev[0]->chan = &b->board.chanA; | 222 | b->dev[0].chan = &b->board.chanA; |
282 | b->dev[0]->channel = 0; | 223 | b->dev[0].channel = 0; |
283 | |||
284 | if (!(b->dev[1] = slvl_alloc(iobase, irq))) | ||
285 | goto fail1_0; | ||
286 | 224 | ||
287 | b->dev[1]->chan = &b->board.chanB; | 225 | b->dev[1].chan = &b->board.chanB; |
288 | b->dev[1]->channel = 1; | 226 | b->dev[1].channel = 1; |
289 | 227 | ||
290 | dev = &b->board; | 228 | dev = &b->board; |
291 | 229 | ||
292 | /* | 230 | /* |
293 | * Stuff in the I/O addressing | 231 | * Stuff in the I/O addressing |
294 | */ | 232 | */ |
295 | 233 | ||
296 | dev->active = 0; | 234 | dev->active = 0; |
297 | 235 | ||
298 | b->iobase = iobase; | 236 | b->iobase = iobase; |
299 | 237 | ||
300 | /* | 238 | /* |
301 | * Select 8530 delays for the old board | 239 | * Select 8530 delays for the old board |
302 | */ | 240 | */ |
303 | 241 | ||
304 | if(slow) | 242 | if (slow) |
305 | iobase |= Z8530_PORT_SLEEP; | 243 | iobase |= Z8530_PORT_SLEEP; |
306 | 244 | ||
307 | dev->chanA.ctrlio=iobase+1; | 245 | dev->chanA.ctrlio = iobase + 1; |
308 | dev->chanA.dataio=iobase; | 246 | dev->chanA.dataio = iobase; |
309 | dev->chanB.ctrlio=iobase+3; | 247 | dev->chanB.ctrlio = iobase + 3; |
310 | dev->chanB.dataio=iobase+2; | 248 | dev->chanB.dataio = iobase + 2; |
311 | 249 | ||
312 | dev->chanA.irqs=&z8530_nop; | 250 | dev->chanA.irqs = &z8530_nop; |
313 | dev->chanB.irqs=&z8530_nop; | 251 | dev->chanB.irqs = &z8530_nop; |
314 | 252 | ||
315 | /* | 253 | /* |
316 | * Assert DTR enable DMA | 254 | * Assert DTR enable DMA |
317 | */ | 255 | */ |
318 | 256 | ||
319 | outb(3|(1<<7), b->iobase+4); | 257 | outb(3 | (1 << 7), b->iobase + 4); |
320 | 258 | ||
321 | 259 | ||
322 | /* We want a fast IRQ for this device. Actually we'd like an even faster | 260 | /* We want a fast IRQ for this device. Actually we'd like an even faster |
323 | IRQ ;) - This is one driver RtLinux is made for */ | 261 | IRQ ;) - This is one driver RtLinux is made for */ |
324 | 262 | ||
325 | if(request_irq(irq, &z8530_interrupt, IRQF_DISABLED, "SeaLevel", dev)<0) | 263 | if (request_irq(irq, &z8530_interrupt, IRQF_DISABLED, |
326 | { | 264 | "SeaLevel", dev) < 0) { |
327 | printk(KERN_WARNING "sealevel: IRQ %d already in use.\n", irq); | 265 | printk(KERN_WARNING "sealevel: IRQ %d already in use.\n", irq); |
328 | goto fail1_1; | 266 | goto err_request_irq; |
329 | } | 267 | } |
330 | 268 | ||
331 | dev->irq=irq; | 269 | dev->irq = irq; |
332 | dev->chanA.private=&b->dev[0]; | 270 | dev->chanA.private = &b->dev[0]; |
333 | dev->chanB.private=&b->dev[1]; | 271 | dev->chanB.private = &b->dev[1]; |
334 | dev->chanA.netdevice=b->dev[0]->pppdev.dev; | 272 | dev->chanA.dev = dev; |
335 | dev->chanB.netdevice=b->dev[1]->pppdev.dev; | 273 | dev->chanB.dev = dev; |
336 | dev->chanA.dev=dev; | 274 | |
337 | dev->chanB.dev=dev; | 275 | dev->chanA.txdma = 3; |
338 | 276 | dev->chanA.rxdma = 1; | |
339 | dev->chanA.txdma=3; | 277 | if (request_dma(dev->chanA.txdma, "SeaLevel (TX)")) |
340 | dev->chanA.rxdma=1; | 278 | goto err_dma_tx; |
341 | if(request_dma(dev->chanA.txdma, "SeaLevel (TX)")!=0) | 279 | |
342 | goto fail; | 280 | if (request_dma(dev->chanA.rxdma, "SeaLevel (RX)")) |
343 | 281 | goto err_dma_rx; | |
344 | if(request_dma(dev->chanA.rxdma, "SeaLevel (RX)")!=0) | 282 | |
345 | goto dmafail; | ||
346 | |||
347 | disable_irq(irq); | 283 | disable_irq(irq); |
348 | 284 | ||
349 | /* | 285 | /* |
350 | * Begin normal initialise | 286 | * Begin normal initialise |
351 | */ | 287 | */ |
352 | 288 | ||
353 | if(z8530_init(dev)!=0) | 289 | if (z8530_init(dev) != 0) { |
354 | { | ||
355 | printk(KERN_ERR "Z8530 series device not found.\n"); | 290 | printk(KERN_ERR "Z8530 series device not found.\n"); |
356 | enable_irq(irq); | 291 | enable_irq(irq); |
357 | goto dmafail2; | 292 | goto free_hw; |
358 | } | 293 | } |
359 | if(dev->type==Z85C30) | 294 | if (dev->type == Z85C30) { |
360 | { | ||
361 | z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream); | 295 | z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream); |
362 | z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream); | 296 | z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream); |
363 | } | 297 | } else { |
364 | else | ||
365 | { | ||
366 | z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream_85230); | 298 | z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream_85230); |
367 | z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream_85230); | 299 | z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream_85230); |
368 | } | 300 | } |
@@ -370,36 +302,31 @@ static __init struct slvl_board *slvl_init(int iobase, int irq, | |||
370 | /* | 302 | /* |
371 | * Now we can take the IRQ | 303 | * Now we can take the IRQ |
372 | */ | 304 | */ |
373 | 305 | ||
374 | enable_irq(irq); | 306 | enable_irq(irq); |
375 | 307 | ||
376 | if (register_netdev(b->dev[0]->pppdev.dev)) | 308 | if (slvl_setup(&b->dev[0], iobase, irq)) |
377 | goto dmafail2; | 309 | goto free_hw; |
378 | 310 | if (slvl_setup(&b->dev[1], iobase, irq)) | |
379 | if (register_netdev(b->dev[1]->pppdev.dev)) | 311 | goto free_netdev0; |
380 | goto fail_unit; | ||
381 | 312 | ||
382 | z8530_describe(dev, "I/O", iobase); | 313 | z8530_describe(dev, "I/O", iobase); |
383 | dev->active=1; | 314 | dev->active = 1; |
384 | return b; | 315 | return b; |
385 | 316 | ||
386 | fail_unit: | 317 | free_netdev0: |
387 | unregister_netdev(b->dev[0]->pppdev.dev); | 318 | unregister_hdlc_device(b->dev[0].chan->netdevice); |
388 | 319 | free_netdev(b->dev[0].chan->netdevice); | |
389 | dmafail2: | 320 | free_hw: |
390 | free_dma(dev->chanA.rxdma); | 321 | free_dma(dev->chanA.rxdma); |
391 | dmafail: | 322 | err_dma_rx: |
392 | free_dma(dev->chanA.txdma); | 323 | free_dma(dev->chanA.txdma); |
393 | fail: | 324 | err_dma_tx: |
394 | free_irq(irq, dev); | 325 | free_irq(irq, dev); |
395 | fail1_1: | 326 | err_request_irq: |
396 | free_netdev(b->dev[1]->pppdev.dev); | ||
397 | fail1_0: | ||
398 | free_netdev(b->dev[0]->pppdev.dev); | ||
399 | fail2: | ||
400 | kfree(b); | 327 | kfree(b); |
401 | fail3: | 328 | err_kzalloc: |
402 | release_region(iobase,8); | 329 | release_region(iobase, 8); |
403 | return NULL; | 330 | return NULL; |
404 | } | 331 | } |
405 | 332 | ||
@@ -408,14 +335,14 @@ static void __exit slvl_shutdown(struct slvl_board *b) | |||
408 | int u; | 335 | int u; |
409 | 336 | ||
410 | z8530_shutdown(&b->board); | 337 | z8530_shutdown(&b->board); |
411 | 338 | ||
412 | for(u=0; u<2; u++) | 339 | for (u = 0; u < 2; u++) |
413 | { | 340 | { |
414 | struct net_device *d = b->dev[u]->pppdev.dev; | 341 | struct net_device *d = b->dev[u].chan->netdevice; |
415 | unregister_netdev(d); | 342 | unregister_hdlc_device(d); |
416 | free_netdev(d); | 343 | free_netdev(d); |
417 | } | 344 | } |
418 | 345 | ||
419 | free_irq(b->board.irq, &b->board); | 346 | free_irq(b->board.irq, &b->board); |
420 | free_dma(b->board.chanA.rxdma); | 347 | free_dma(b->board.chanA.rxdma); |
421 | free_dma(b->board.chanA.txdma); | 348 | free_dma(b->board.chanA.txdma); |
@@ -451,10 +378,6 @@ static struct slvl_board *slvl_unit; | |||
451 | 378 | ||
452 | static int __init slvl_init_module(void) | 379 | static int __init slvl_init_module(void) |
453 | { | 380 | { |
454 | #ifdef MODULE | ||
455 | printk(KERN_INFO "SeaLevel Z85230 Synchronous Driver v 0.02.\n"); | ||
456 | printk(KERN_INFO "(c) Copyright 1998, Building Number Three Ltd.\n"); | ||
457 | #endif | ||
458 | slvl_unit = slvl_init(io, irq, txdma, rxdma, slow); | 381 | slvl_unit = slvl_init(io, irq, txdma, rxdma, slow); |
459 | 382 | ||
460 | return slvl_unit ? 0 : -ENODEV; | 383 | return slvl_unit ? 0 : -ENODEV; |
diff --git a/drivers/net/wan/syncppp.c b/drivers/net/wan/syncppp.c index 29b4b94e4947..327d58589e12 100644 --- a/drivers/net/wan/syncppp.c +++ b/drivers/net/wan/syncppp.c | |||
@@ -230,13 +230,6 @@ static void sppp_input (struct net_device *dev, struct sk_buff *skb) | |||
230 | skb->dev=dev; | 230 | skb->dev=dev; |
231 | skb_reset_mac_header(skb); | 231 | skb_reset_mac_header(skb); |
232 | 232 | ||
233 | if (dev->flags & IFF_RUNNING) | ||
234 | { | ||
235 | /* Count received bytes, add FCS and one flag */ | ||
236 | sp->ibytes+= skb->len + 3; | ||
237 | sp->ipkts++; | ||
238 | } | ||
239 | |||
240 | if (!pskb_may_pull(skb, PPP_HEADER_LEN)) { | 233 | if (!pskb_may_pull(skb, PPP_HEADER_LEN)) { |
241 | /* Too small packet, drop it. */ | 234 | /* Too small packet, drop it. */ |
242 | if (sp->pp_flags & PP_DEBUG) | 235 | if (sp->pp_flags & PP_DEBUG) |
@@ -832,7 +825,6 @@ static void sppp_cp_send (struct sppp *sp, u16 proto, u8 type, | |||
832 | sppp_print_bytes ((u8*) (lh+1), len); | 825 | sppp_print_bytes ((u8*) (lh+1), len); |
833 | printk (">\n"); | 826 | printk (">\n"); |
834 | } | 827 | } |
835 | sp->obytes += skb->len; | ||
836 | /* Control is high priority so it doesn't get queued behind data */ | 828 | /* Control is high priority so it doesn't get queued behind data */ |
837 | skb->priority=TC_PRIO_CONTROL; | 829 | skb->priority=TC_PRIO_CONTROL; |
838 | skb->dev = dev; | 830 | skb->dev = dev; |
@@ -875,7 +867,6 @@ static void sppp_cisco_send (struct sppp *sp, int type, u32 par1, u32 par2) | |||
875 | printk (KERN_WARNING "%s: cisco output: <%xh %xh %xh %xh %xh-%xh>\n", | 867 | printk (KERN_WARNING "%s: cisco output: <%xh %xh %xh %xh %xh-%xh>\n", |
876 | dev->name, ntohl (ch->type), ch->par1, | 868 | dev->name, ntohl (ch->type), ch->par1, |
877 | ch->par2, ch->rel, ch->time0, ch->time1); | 869 | ch->par2, ch->rel, ch->time0, ch->time1); |
878 | sp->obytes += skb->len; | ||
879 | skb->priority=TC_PRIO_CONTROL; | 870 | skb->priority=TC_PRIO_CONTROL; |
880 | skb->dev = dev; | 871 | skb->dev = dev; |
881 | skb_queue_tail(&tx_queue, skb); | 872 | skb_queue_tail(&tx_queue, skb); |
diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c index 98ef400908b8..243bd8d918fe 100644 --- a/drivers/net/wan/z85230.c +++ b/drivers/net/wan/z85230.c | |||
@@ -43,6 +43,7 @@ | |||
43 | #include <linux/netdevice.h> | 43 | #include <linux/netdevice.h> |
44 | #include <linux/if_arp.h> | 44 | #include <linux/if_arp.h> |
45 | #include <linux/delay.h> | 45 | #include <linux/delay.h> |
46 | #include <linux/hdlc.h> | ||
46 | #include <linux/ioport.h> | 47 | #include <linux/ioport.h> |
47 | #include <linux/init.h> | 48 | #include <linux/init.h> |
48 | #include <asm/dma.h> | 49 | #include <asm/dma.h> |
@@ -51,7 +52,6 @@ | |||
51 | #define RT_UNLOCK | 52 | #define RT_UNLOCK |
52 | #include <linux/spinlock.h> | 53 | #include <linux/spinlock.h> |
53 | 54 | ||
54 | #include <net/syncppp.h> | ||
55 | #include "z85230.h" | 55 | #include "z85230.h" |
56 | 56 | ||
57 | 57 | ||
@@ -440,51 +440,46 @@ static void z8530_tx(struct z8530_channel *c) | |||
440 | * A status event occurred in PIO synchronous mode. There are several | 440 | * A status event occurred in PIO synchronous mode. There are several |
441 | * reasons the chip will bother us here. A transmit underrun means we | 441 | * reasons the chip will bother us here. A transmit underrun means we |
442 | * failed to feed the chip fast enough and just broke a packet. A DCD | 442 | * failed to feed the chip fast enough and just broke a packet. A DCD |
443 | * change is a line up or down. We communicate that back to the protocol | 443 | * change is a line up or down. |
444 | * layer for synchronous PPP to renegotiate. | ||
445 | */ | 444 | */ |
446 | 445 | ||
447 | static void z8530_status(struct z8530_channel *chan) | 446 | static void z8530_status(struct z8530_channel *chan) |
448 | { | 447 | { |
449 | u8 status, altered; | 448 | u8 status, altered; |
450 | 449 | ||
451 | status=read_zsreg(chan, R0); | 450 | status = read_zsreg(chan, R0); |
452 | altered=chan->status^status; | 451 | altered = chan->status ^ status; |
453 | 452 | ||
454 | chan->status=status; | 453 | chan->status = status; |
455 | 454 | ||
456 | if(status&TxEOM) | 455 | if (status & TxEOM) { |
457 | { | ||
458 | /* printk("%s: Tx underrun.\n", chan->dev->name); */ | 456 | /* printk("%s: Tx underrun.\n", chan->dev->name); */ |
459 | chan->stats.tx_fifo_errors++; | 457 | chan->netdevice->stats.tx_fifo_errors++; |
460 | write_zsctrl(chan, ERR_RES); | 458 | write_zsctrl(chan, ERR_RES); |
461 | z8530_tx_done(chan); | 459 | z8530_tx_done(chan); |
462 | } | 460 | } |
463 | 461 | ||
464 | if(altered&chan->dcdcheck) | 462 | if (altered & chan->dcdcheck) |
465 | { | 463 | { |
466 | if(status&chan->dcdcheck) | 464 | if (status & chan->dcdcheck) { |
467 | { | ||
468 | printk(KERN_INFO "%s: DCD raised\n", chan->dev->name); | 465 | printk(KERN_INFO "%s: DCD raised\n", chan->dev->name); |
469 | write_zsreg(chan, R3, chan->regs[3]|RxENABLE); | 466 | write_zsreg(chan, R3, chan->regs[3] | RxENABLE); |
470 | if(chan->netdevice && | 467 | if (chan->netdevice) |
471 | ((chan->netdevice->type == ARPHRD_HDLC) || | 468 | netif_carrier_on(chan->netdevice); |
472 | (chan->netdevice->type == ARPHRD_PPP))) | 469 | } else { |
473 | sppp_reopen(chan->netdevice); | ||
474 | } | ||
475 | else | ||
476 | { | ||
477 | printk(KERN_INFO "%s: DCD lost\n", chan->dev->name); | 470 | printk(KERN_INFO "%s: DCD lost\n", chan->dev->name); |
478 | write_zsreg(chan, R3, chan->regs[3]&~RxENABLE); | 471 | write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE); |
479 | z8530_flush_fifo(chan); | 472 | z8530_flush_fifo(chan); |
473 | if (chan->netdevice) | ||
474 | netif_carrier_off(chan->netdevice); | ||
480 | } | 475 | } |
481 | 476 | ||
482 | } | 477 | } |
483 | write_zsctrl(chan, RES_EXT_INT); | 478 | write_zsctrl(chan, RES_EXT_INT); |
484 | write_zsctrl(chan, RES_H_IUS); | 479 | write_zsctrl(chan, RES_H_IUS); |
485 | } | 480 | } |
486 | 481 | ||
487 | struct z8530_irqhandler z8530_sync= | 482 | struct z8530_irqhandler z8530_sync = |
488 | { | 483 | { |
489 | z8530_rx, | 484 | z8530_rx, |
490 | z8530_tx, | 485 | z8530_tx, |
@@ -556,8 +551,7 @@ static void z8530_dma_tx(struct z8530_channel *chan) | |||
556 | * | 551 | * |
557 | * A status event occurred on the Z8530. We receive these for two reasons | 552 | * A status event occurred on the Z8530. We receive these for two reasons |
558 | * when in DMA mode. Firstly if we finished a packet transfer we get one | 553 | * when in DMA mode. Firstly if we finished a packet transfer we get one |
559 | * and kick the next packet out. Secondly we may see a DCD change and | 554 | * and kick the next packet out. Secondly we may see a DCD change. |
560 | * have to poke the protocol layer. | ||
561 | * | 555 | * |
562 | */ | 556 | */ |
563 | 557 | ||
@@ -586,24 +580,21 @@ static void z8530_dma_status(struct z8530_channel *chan) | |||
586 | } | 580 | } |
587 | } | 581 | } |
588 | 582 | ||
589 | if(altered&chan->dcdcheck) | 583 | if (altered & chan->dcdcheck) |
590 | { | 584 | { |
591 | if(status&chan->dcdcheck) | 585 | if (status & chan->dcdcheck) { |
592 | { | ||
593 | printk(KERN_INFO "%s: DCD raised\n", chan->dev->name); | 586 | printk(KERN_INFO "%s: DCD raised\n", chan->dev->name); |
594 | write_zsreg(chan, R3, chan->regs[3]|RxENABLE); | 587 | write_zsreg(chan, R3, chan->regs[3] | RxENABLE); |
595 | if(chan->netdevice && | 588 | if (chan->netdevice) |
596 | ((chan->netdevice->type == ARPHRD_HDLC) || | 589 | netif_carrier_on(chan->netdevice); |
597 | (chan->netdevice->type == ARPHRD_PPP))) | 590 | } else { |
598 | sppp_reopen(chan->netdevice); | ||
599 | } | ||
600 | else | ||
601 | { | ||
602 | printk(KERN_INFO "%s:DCD lost\n", chan->dev->name); | 591 | printk(KERN_INFO "%s:DCD lost\n", chan->dev->name); |
603 | write_zsreg(chan, R3, chan->regs[3]&~RxENABLE); | 592 | write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE); |
604 | z8530_flush_fifo(chan); | 593 | z8530_flush_fifo(chan); |
594 | if (chan->netdevice) | ||
595 | netif_carrier_off(chan->netdevice); | ||
605 | } | 596 | } |
606 | } | 597 | } |
607 | 598 | ||
608 | write_zsctrl(chan, RES_EXT_INT); | 599 | write_zsctrl(chan, RES_EXT_INT); |
609 | write_zsctrl(chan, RES_H_IUS); | 600 | write_zsctrl(chan, RES_H_IUS); |
@@ -1459,10 +1450,10 @@ static void z8530_tx_begin(struct z8530_channel *c) | |||
1459 | /* | 1450 | /* |
1460 | * Check if we crapped out. | 1451 | * Check if we crapped out. |
1461 | */ | 1452 | */ |
1462 | if(get_dma_residue(c->txdma)) | 1453 | if (get_dma_residue(c->txdma)) |
1463 | { | 1454 | { |
1464 | c->stats.tx_dropped++; | 1455 | c->netdevice->stats.tx_dropped++; |
1465 | c->stats.tx_fifo_errors++; | 1456 | c->netdevice->stats.tx_fifo_errors++; |
1466 | } | 1457 | } |
1467 | release_dma_lock(flags); | 1458 | release_dma_lock(flags); |
1468 | } | 1459 | } |
@@ -1534,21 +1525,21 @@ static void z8530_tx_begin(struct z8530_channel *c) | |||
1534 | * packet. This code is fairly timing sensitive. | 1525 | * packet. This code is fairly timing sensitive. |
1535 | * | 1526 | * |
1536 | * Called with the register lock held. | 1527 | * Called with the register lock held. |
1537 | */ | 1528 | */ |
1538 | 1529 | ||
1539 | static void z8530_tx_done(struct z8530_channel *c) | 1530 | static void z8530_tx_done(struct z8530_channel *c) |
1540 | { | 1531 | { |
1541 | struct sk_buff *skb; | 1532 | struct sk_buff *skb; |
1542 | 1533 | ||
1543 | /* Actually this can happen.*/ | 1534 | /* Actually this can happen.*/ |
1544 | if(c->tx_skb==NULL) | 1535 | if (c->tx_skb == NULL) |
1545 | return; | 1536 | return; |
1546 | 1537 | ||
1547 | skb=c->tx_skb; | 1538 | skb = c->tx_skb; |
1548 | c->tx_skb=NULL; | 1539 | c->tx_skb = NULL; |
1549 | z8530_tx_begin(c); | 1540 | z8530_tx_begin(c); |
1550 | c->stats.tx_packets++; | 1541 | c->netdevice->stats.tx_packets++; |
1551 | c->stats.tx_bytes+=skb->len; | 1542 | c->netdevice->stats.tx_bytes += skb->len; |
1552 | dev_kfree_skb_irq(skb); | 1543 | dev_kfree_skb_irq(skb); |
1553 | } | 1544 | } |
1554 | 1545 | ||
@@ -1558,7 +1549,7 @@ static void z8530_tx_done(struct z8530_channel *c) | |||
1558 | * @skb: The buffer | 1549 | * @skb: The buffer |
1559 | * | 1550 | * |
1560 | * We point the receive handler at this function when idle. Instead | 1551 | * We point the receive handler at this function when idle. Instead |
1561 | * of syncppp processing the frames we get to throw them away. | 1552 | * of processing the frames we get to throw them away. |
1562 | */ | 1553 | */ |
1563 | 1554 | ||
1564 | void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb) | 1555 | void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb) |
@@ -1635,10 +1626,11 @@ static void z8530_rx_done(struct z8530_channel *c) | |||
1635 | else | 1626 | else |
1636 | /* Can't occur as we dont reenable the DMA irq until | 1627 | /* Can't occur as we dont reenable the DMA irq until |
1637 | after the flip is done */ | 1628 | after the flip is done */ |
1638 | printk(KERN_WARNING "%s: DMA flip overrun!\n", c->netdevice->name); | 1629 | printk(KERN_WARNING "%s: DMA flip overrun!\n", |
1639 | 1630 | c->netdevice->name); | |
1631 | |||
1640 | release_dma_lock(flags); | 1632 | release_dma_lock(flags); |
1641 | 1633 | ||
1642 | /* | 1634 | /* |
1643 | * Shove the old buffer into an sk_buff. We can't DMA | 1635 | * Shove the old buffer into an sk_buff. We can't DMA |
1644 | * directly into one on a PC - it might be above the 16Mb | 1636 | * directly into one on a PC - it might be above the 16Mb |
@@ -1646,27 +1638,23 @@ static void z8530_rx_done(struct z8530_channel *c) | |||
1646 | * can avoid the copy. Optimisation 2 - make the memcpy | 1638 | * can avoid the copy. Optimisation 2 - make the memcpy |
1647 | * a copychecksum. | 1639 | * a copychecksum. |
1648 | */ | 1640 | */ |
1649 | 1641 | ||
1650 | skb=dev_alloc_skb(ct); | 1642 | skb = dev_alloc_skb(ct); |
1651 | if(skb==NULL) | 1643 | if (skb == NULL) { |
1652 | { | 1644 | c->netdevice->stats.rx_dropped++; |
1653 | c->stats.rx_dropped++; | 1645 | printk(KERN_WARNING "%s: Memory squeeze.\n", |
1654 | printk(KERN_WARNING "%s: Memory squeeze.\n", c->netdevice->name); | 1646 | c->netdevice->name); |
1655 | } | 1647 | } else { |
1656 | else | ||
1657 | { | ||
1658 | skb_put(skb, ct); | 1648 | skb_put(skb, ct); |
1659 | skb_copy_to_linear_data(skb, rxb, ct); | 1649 | skb_copy_to_linear_data(skb, rxb, ct); |
1660 | c->stats.rx_packets++; | 1650 | c->netdevice->stats.rx_packets++; |
1661 | c->stats.rx_bytes+=ct; | 1651 | c->netdevice->stats.rx_bytes += ct; |
1662 | } | 1652 | } |
1663 | c->dma_ready=1; | 1653 | c->dma_ready = 1; |
1664 | } | 1654 | } else { |
1665 | else | 1655 | RT_LOCK; |
1666 | { | 1656 | skb = c->skb; |
1667 | RT_LOCK; | 1657 | |
1668 | skb=c->skb; | ||
1669 | |||
1670 | /* | 1658 | /* |
1671 | * The game we play for non DMA is similar. We want to | 1659 | * The game we play for non DMA is similar. We want to |
1672 | * get the controller set up for the next packet as fast | 1660 | * get the controller set up for the next packet as fast |
@@ -1677,48 +1665,39 @@ static void z8530_rx_done(struct z8530_channel *c) | |||
1677 | * if you build a system where the sync irq isnt blocked | 1665 | * if you build a system where the sync irq isnt blocked |
1678 | * by the kernel IRQ disable then you need only block the | 1666 | * by the kernel IRQ disable then you need only block the |
1679 | * sync IRQ for the RT_LOCK area. | 1667 | * sync IRQ for the RT_LOCK area. |
1680 | * | 1668 | * |
1681 | */ | 1669 | */ |
1682 | ct=c->count; | 1670 | ct=c->count; |
1683 | 1671 | ||
1684 | c->skb = c->skb2; | 1672 | c->skb = c->skb2; |
1685 | c->count = 0; | 1673 | c->count = 0; |
1686 | c->max = c->mtu; | 1674 | c->max = c->mtu; |
1687 | if(c->skb) | 1675 | if (c->skb) { |
1688 | { | ||
1689 | c->dptr = c->skb->data; | 1676 | c->dptr = c->skb->data; |
1690 | c->max = c->mtu; | 1677 | c->max = c->mtu; |
1691 | } | 1678 | } else { |
1692 | else | 1679 | c->count = 0; |
1693 | { | ||
1694 | c->count= 0; | ||
1695 | c->max = 0; | 1680 | c->max = 0; |
1696 | } | 1681 | } |
1697 | RT_UNLOCK; | 1682 | RT_UNLOCK; |
1698 | 1683 | ||
1699 | c->skb2 = dev_alloc_skb(c->mtu); | 1684 | c->skb2 = dev_alloc_skb(c->mtu); |
1700 | if(c->skb2==NULL) | 1685 | if (c->skb2 == NULL) |
1701 | printk(KERN_WARNING "%s: memory squeeze.\n", | 1686 | printk(KERN_WARNING "%s: memory squeeze.\n", |
1702 | c->netdevice->name); | 1687 | c->netdevice->name); |
1703 | else | 1688 | else |
1704 | { | 1689 | skb_put(c->skb2, c->mtu); |
1705 | skb_put(c->skb2,c->mtu); | 1690 | c->netdevice->stats.rx_packets++; |
1706 | } | 1691 | c->netdevice->stats.rx_bytes += ct; |
1707 | c->stats.rx_packets++; | ||
1708 | c->stats.rx_bytes+=ct; | ||
1709 | |||
1710 | } | 1692 | } |
1711 | /* | 1693 | /* |
1712 | * If we received a frame we must now process it. | 1694 | * If we received a frame we must now process it. |
1713 | */ | 1695 | */ |
1714 | if(skb) | 1696 | if (skb) { |
1715 | { | ||
1716 | skb_trim(skb, ct); | 1697 | skb_trim(skb, ct); |
1717 | c->rx_function(c,skb); | 1698 | c->rx_function(c, skb); |
1718 | } | 1699 | } else { |
1719 | else | 1700 | c->netdevice->stats.rx_dropped++; |
1720 | { | ||
1721 | c->stats.rx_dropped++; | ||
1722 | printk(KERN_ERR "%s: Lost a frame\n", c->netdevice->name); | 1701 | printk(KERN_ERR "%s: Lost a frame\n", c->netdevice->name); |
1723 | } | 1702 | } |
1724 | } | 1703 | } |
@@ -1730,7 +1709,7 @@ static void z8530_rx_done(struct z8530_channel *c) | |||
1730 | * Returns true if the buffer cross a DMA boundary on a PC. The poor | 1709 | * Returns true if the buffer cross a DMA boundary on a PC. The poor |
1731 | * thing can only DMA within a 64K block not across the edges of it. | 1710 | * thing can only DMA within a 64K block not across the edges of it. |
1732 | */ | 1711 | */ |
1733 | 1712 | ||
1734 | static inline int spans_boundary(struct sk_buff *skb) | 1713 | static inline int spans_boundary(struct sk_buff *skb) |
1735 | { | 1714 | { |
1736 | unsigned long a=(unsigned long)skb->data; | 1715 | unsigned long a=(unsigned long)skb->data; |
@@ -1799,24 +1778,6 @@ int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb) | |||
1799 | 1778 | ||
1800 | EXPORT_SYMBOL(z8530_queue_xmit); | 1779 | EXPORT_SYMBOL(z8530_queue_xmit); |
1801 | 1780 | ||
1802 | /** | ||
1803 | * z8530_get_stats - Get network statistics | ||
1804 | * @c: The channel to use | ||
1805 | * | ||
1806 | * Get the statistics block. We keep the statistics in software as | ||
1807 | * the chip doesn't do it for us. | ||
1808 | * | ||
1809 | * Locking is ignored here - we could lock for a copy but its | ||
1810 | * not likely to be that big an issue | ||
1811 | */ | ||
1812 | |||
1813 | struct net_device_stats *z8530_get_stats(struct z8530_channel *c) | ||
1814 | { | ||
1815 | return &c->stats; | ||
1816 | } | ||
1817 | |||
1818 | EXPORT_SYMBOL(z8530_get_stats); | ||
1819 | |||
1820 | /* | 1781 | /* |
1821 | * Module support | 1782 | * Module support |
1822 | */ | 1783 | */ |
diff --git a/drivers/net/wan/z85230.h b/drivers/net/wan/z85230.h index 158aea7b8eac..4f372396c512 100644 --- a/drivers/net/wan/z85230.h +++ b/drivers/net/wan/z85230.h | |||
@@ -325,7 +325,6 @@ struct z8530_channel | |||
325 | 325 | ||
326 | void *private; /* For our owner */ | 326 | void *private; /* For our owner */ |
327 | struct net_device *netdevice; /* Network layer device */ | 327 | struct net_device *netdevice; /* Network layer device */ |
328 | struct net_device_stats stats; /* Network layer statistics */ | ||
329 | 328 | ||
330 | /* | 329 | /* |
331 | * Async features | 330 | * Async features |
@@ -366,13 +365,13 @@ struct z8530_channel | |||
366 | unsigned char tx_active; /* character is being xmitted */ | 365 | unsigned char tx_active; /* character is being xmitted */ |
367 | unsigned char tx_stopped; /* output is suspended */ | 366 | unsigned char tx_stopped; /* output is suspended */ |
368 | 367 | ||
369 | spinlock_t *lock; /* Devicr lock */ | 368 | spinlock_t *lock; /* Device lock */ |
370 | }; | 369 | }; |
371 | 370 | ||
372 | /* | 371 | /* |
373 | * Each Z853x0 device. | 372 | * Each Z853x0 device. |
374 | */ | 373 | */ |
375 | 374 | ||
376 | struct z8530_dev | 375 | struct z8530_dev |
377 | { | 376 | { |
378 | char *name; /* Device instance name */ | 377 | char *name; /* Device instance name */ |
@@ -408,7 +407,6 @@ extern int z8530_sync_txdma_open(struct net_device *, struct z8530_channel *); | |||
408 | extern int z8530_sync_txdma_close(struct net_device *, struct z8530_channel *); | 407 | extern int z8530_sync_txdma_close(struct net_device *, struct z8530_channel *); |
409 | extern int z8530_channel_load(struct z8530_channel *, u8 *); | 408 | extern int z8530_channel_load(struct z8530_channel *, u8 *); |
410 | extern int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb); | 409 | extern int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb); |
411 | extern struct net_device_stats *z8530_get_stats(struct z8530_channel *c); | ||
412 | extern void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb); | 410 | extern void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb); |
413 | 411 | ||
414 | 412 | ||
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c index b047306bf386..1ebcafe7ca5f 100644 --- a/drivers/net/wireless/orinoco.c +++ b/drivers/net/wireless/orinoco.c | |||
@@ -1998,13 +1998,6 @@ __orinoco_set_multicast_list(struct net_device *dev) | |||
1998 | else | 1998 | else |
1999 | priv->mc_count = mc_count; | 1999 | priv->mc_count = mc_count; |
2000 | } | 2000 | } |
2001 | |||
2002 | /* Since we can set the promiscuous flag when it wasn't asked | ||
2003 | for, make sure the net_device knows about it. */ | ||
2004 | if (priv->promiscuous) | ||
2005 | dev->flags |= IFF_PROMISC; | ||
2006 | else | ||
2007 | dev->flags &= ~IFF_PROMISC; | ||
2008 | } | 2001 | } |
2009 | 2002 | ||
2010 | /* This must be called from user context, without locks held - use | 2003 | /* This must be called from user context, without locks held - use |
diff --git a/drivers/net/wireless/wavelan.c b/drivers/net/wireless/wavelan.c index 49ae97003952..136220b5ca81 100644 --- a/drivers/net/wireless/wavelan.c +++ b/drivers/net/wireless/wavelan.c | |||
@@ -1409,9 +1409,6 @@ static void wavelan_set_multicast_list(struct net_device * dev) | |||
1409 | lp->mc_count = 0; | 1409 | lp->mc_count = 0; |
1410 | 1410 | ||
1411 | wv_82586_reconfig(dev); | 1411 | wv_82586_reconfig(dev); |
1412 | |||
1413 | /* Tell the kernel that we are doing a really bad job. */ | ||
1414 | dev->flags |= IFF_PROMISC; | ||
1415 | } | 1412 | } |
1416 | } else | 1413 | } else |
1417 | /* Are there multicast addresses to send? */ | 1414 | /* Are there multicast addresses to send? */ |
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c index b584c0ecc62d..00a3559e5aa4 100644 --- a/drivers/net/wireless/wavelan_cs.c +++ b/drivers/net/wireless/wavelan_cs.c | |||
@@ -1412,9 +1412,6 @@ wavelan_set_multicast_list(struct net_device * dev) | |||
1412 | lp->mc_count = 0; | 1412 | lp->mc_count = 0; |
1413 | 1413 | ||
1414 | wv_82593_reconfig(dev); | 1414 | wv_82593_reconfig(dev); |
1415 | |||
1416 | /* Tell the kernel that we are doing a really bad job... */ | ||
1417 | dev->flags |= IFF_PROMISC; | ||
1418 | } | 1415 | } |
1419 | } | 1416 | } |
1420 | else | 1417 | else |
@@ -1433,9 +1430,6 @@ wavelan_set_multicast_list(struct net_device * dev) | |||
1433 | lp->mc_count = 0; | 1430 | lp->mc_count = 0; |
1434 | 1431 | ||
1435 | wv_82593_reconfig(dev); | 1432 | wv_82593_reconfig(dev); |
1436 | |||
1437 | /* Tell the kernel that we are doing a really bad job... */ | ||
1438 | dev->flags |= IFF_ALLMULTI; | ||
1439 | } | 1433 | } |
1440 | } | 1434 | } |
1441 | else | 1435 | else |
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 902bbe788215..c749bdba214c 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
@@ -329,7 +329,7 @@ static int xennet_open(struct net_device *dev) | |||
329 | } | 329 | } |
330 | spin_unlock_bh(&np->rx_lock); | 330 | spin_unlock_bh(&np->rx_lock); |
331 | 331 | ||
332 | xennet_maybe_wake_tx(dev); | 332 | netif_start_queue(dev); |
333 | 333 | ||
334 | return 0; | 334 | return 0; |
335 | } | 335 | } |
diff --git a/include/linux/dm9000.h b/include/linux/dm9000.h index fc82446b6425..c30879cf93bc 100644 --- a/include/linux/dm9000.h +++ b/include/linux/dm9000.h | |||
@@ -27,6 +27,7 @@ | |||
27 | 27 | ||
28 | struct dm9000_plat_data { | 28 | struct dm9000_plat_data { |
29 | unsigned int flags; | 29 | unsigned int flags; |
30 | unsigned char dev_addr[6]; | ||
30 | 31 | ||
31 | /* allow replacement IO routines */ | 32 | /* allow replacement IO routines */ |
32 | 33 | ||
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 8bb5e87df365..b4b038b89ee6 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h | |||
@@ -27,9 +27,24 @@ struct ethtool_cmd { | |||
27 | __u8 autoneg; /* Enable or disable autonegotiation */ | 27 | __u8 autoneg; /* Enable or disable autonegotiation */ |
28 | __u32 maxtxpkt; /* Tx pkts before generating tx int */ | 28 | __u32 maxtxpkt; /* Tx pkts before generating tx int */ |
29 | __u32 maxrxpkt; /* Rx pkts before generating rx int */ | 29 | __u32 maxrxpkt; /* Rx pkts before generating rx int */ |
30 | __u32 reserved[4]; | 30 | __u16 speed_hi; |
31 | __u16 reserved2; | ||
32 | __u32 reserved[3]; | ||
31 | }; | 33 | }; |
32 | 34 | ||
35 | static inline void ethtool_cmd_speed_set(struct ethtool_cmd *ep, | ||
36 | __u32 speed) | ||
37 | { | ||
38 | |||
39 | ep->speed = (__u16)speed; | ||
40 | ep->speed_hi = (__u16)(speed >> 16); | ||
41 | } | ||
42 | |||
43 | static inline __u32 ethtool_cmd_speed(struct ethtool_cmd *ep) | ||
44 | { | ||
45 | return (ep->speed_hi << 16) | ep->speed; | ||
46 | } | ||
47 | |||
33 | #define ETHTOOL_BUSINFO_LEN 32 | 48 | #define ETHTOOL_BUSINFO_LEN 32 |
34 | /* these strings are set to whatever the driver author decides... */ | 49 | /* these strings are set to whatever the driver author decides... */ |
35 | struct ethtool_drvinfo { | 50 | struct ethtool_drvinfo { |
diff --git a/include/net/syncppp.h b/include/net/syncppp.h index e43f4070d892..9e306f7f579a 100644 --- a/include/net/syncppp.h +++ b/include/net/syncppp.h | |||
@@ -43,8 +43,6 @@ struct sppp | |||
43 | u32 pp_rseq; /* remote sequence number */ | 43 | u32 pp_rseq; /* remote sequence number */ |
44 | struct slcp lcp; /* LCP params */ | 44 | struct slcp lcp; /* LCP params */ |
45 | struct sipcp ipcp; /* IPCP params */ | 45 | struct sipcp ipcp; /* IPCP params */ |
46 | u32 ibytes,obytes; /* Bytes in/out */ | ||
47 | u32 ipkts,opkts; /* Packets in/out */ | ||
48 | struct timer_list pp_timer; | 46 | struct timer_list pp_timer; |
49 | struct net_device *pp_if; | 47 | struct net_device *pp_if; |
50 | char pp_link_state; /* Link status */ | 48 | char pp_link_state; /* Link status */ |
diff --git a/net/core/dev.c b/net/core/dev.c index 01993ad74e76..600bb23c4c2e 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1939,22 +1939,6 @@ int netif_rx_ni(struct sk_buff *skb) | |||
1939 | 1939 | ||
1940 | EXPORT_SYMBOL(netif_rx_ni); | 1940 | EXPORT_SYMBOL(netif_rx_ni); |
1941 | 1941 | ||
1942 | static inline struct net_device *skb_bond(struct sk_buff *skb) | ||
1943 | { | ||
1944 | struct net_device *dev = skb->dev; | ||
1945 | |||
1946 | if (dev->master) { | ||
1947 | if (skb_bond_should_drop(skb)) { | ||
1948 | kfree_skb(skb); | ||
1949 | return NULL; | ||
1950 | } | ||
1951 | skb->dev = dev->master; | ||
1952 | } | ||
1953 | |||
1954 | return dev; | ||
1955 | } | ||
1956 | |||
1957 | |||
1958 | static void net_tx_action(struct softirq_action *h) | 1942 | static void net_tx_action(struct softirq_action *h) |
1959 | { | 1943 | { |
1960 | struct softnet_data *sd = &__get_cpu_var(softnet_data); | 1944 | struct softnet_data *sd = &__get_cpu_var(softnet_data); |
@@ -2181,6 +2165,7 @@ int netif_receive_skb(struct sk_buff *skb) | |||
2181 | { | 2165 | { |
2182 | struct packet_type *ptype, *pt_prev; | 2166 | struct packet_type *ptype, *pt_prev; |
2183 | struct net_device *orig_dev; | 2167 | struct net_device *orig_dev; |
2168 | struct net_device *null_or_orig; | ||
2184 | int ret = NET_RX_DROP; | 2169 | int ret = NET_RX_DROP; |
2185 | __be16 type; | 2170 | __be16 type; |
2186 | 2171 | ||
@@ -2194,10 +2179,14 @@ int netif_receive_skb(struct sk_buff *skb) | |||
2194 | if (!skb->iif) | 2179 | if (!skb->iif) |
2195 | skb->iif = skb->dev->ifindex; | 2180 | skb->iif = skb->dev->ifindex; |
2196 | 2181 | ||
2197 | orig_dev = skb_bond(skb); | 2182 | null_or_orig = NULL; |
2198 | 2183 | orig_dev = skb->dev; | |
2199 | if (!orig_dev) | 2184 | if (orig_dev->master) { |
2200 | return NET_RX_DROP; | 2185 | if (skb_bond_should_drop(skb)) |
2186 | null_or_orig = orig_dev; /* deliver only exact match */ | ||
2187 | else | ||
2188 | skb->dev = orig_dev->master; | ||
2189 | } | ||
2201 | 2190 | ||
2202 | __get_cpu_var(netdev_rx_stat).total++; | 2191 | __get_cpu_var(netdev_rx_stat).total++; |
2203 | 2192 | ||
@@ -2221,7 +2210,8 @@ int netif_receive_skb(struct sk_buff *skb) | |||
2221 | #endif | 2210 | #endif |
2222 | 2211 | ||
2223 | list_for_each_entry_rcu(ptype, &ptype_all, list) { | 2212 | list_for_each_entry_rcu(ptype, &ptype_all, list) { |
2224 | if (!ptype->dev || ptype->dev == skb->dev) { | 2213 | if (ptype->dev == null_or_orig || ptype->dev == skb->dev || |
2214 | ptype->dev == orig_dev) { | ||
2225 | if (pt_prev) | 2215 | if (pt_prev) |
2226 | ret = deliver_skb(skb, pt_prev, orig_dev); | 2216 | ret = deliver_skb(skb, pt_prev, orig_dev); |
2227 | pt_prev = ptype; | 2217 | pt_prev = ptype; |
@@ -2246,7 +2236,8 @@ ncls: | |||
2246 | list_for_each_entry_rcu(ptype, | 2236 | list_for_each_entry_rcu(ptype, |
2247 | &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { | 2237 | &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { |
2248 | if (ptype->type == type && | 2238 | if (ptype->type == type && |
2249 | (!ptype->dev || ptype->dev == skb->dev)) { | 2239 | (ptype->dev == null_or_orig || ptype->dev == skb->dev || |
2240 | ptype->dev == orig_dev)) { | ||
2250 | if (pt_prev) | 2241 | if (pt_prev) |
2251 | ret = deliver_skb(skb, pt_prev, orig_dev); | 2242 | ret = deliver_skb(skb, pt_prev, orig_dev); |
2252 | pt_prev = ptype; | 2243 | pt_prev = ptype; |
diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c index b210a88d0960..7f07152bc109 100644 --- a/net/wanrouter/wanmain.c +++ b/net/wanrouter/wanmain.c | |||
@@ -57,7 +57,6 @@ | |||
57 | #include <linux/vmalloc.h> /* vmalloc, vfree */ | 57 | #include <linux/vmalloc.h> /* vmalloc, vfree */ |
58 | #include <asm/uaccess.h> /* copy_to/from_user */ | 58 | #include <asm/uaccess.h> /* copy_to/from_user */ |
59 | #include <linux/init.h> /* __initfunc et al. */ | 59 | #include <linux/init.h> /* __initfunc et al. */ |
60 | #include <net/syncppp.h> | ||
61 | 60 | ||
62 | #define KMEM_SAFETYZONE 8 | 61 | #define KMEM_SAFETYZONE 8 |
63 | 62 | ||
@@ -567,9 +566,6 @@ static int wanrouter_device_new_if(struct wan_device *wandev, | |||
567 | { | 566 | { |
568 | wanif_conf_t *cnf; | 567 | wanif_conf_t *cnf; |
569 | struct net_device *dev = NULL; | 568 | struct net_device *dev = NULL; |
570 | #ifdef CONFIG_WANPIPE_MULTPPP | ||
571 | struct ppp_device *pppdev=NULL; | ||
572 | #endif | ||
573 | int err; | 569 | int err; |
574 | 570 | ||
575 | if ((wandev->state == WAN_UNCONFIGURED) || (wandev->new_if == NULL)) | 571 | if ((wandev->state == WAN_UNCONFIGURED) || (wandev->new_if == NULL)) |
@@ -588,25 +584,10 @@ static int wanrouter_device_new_if(struct wan_device *wandev, | |||
588 | goto out; | 584 | goto out; |
589 | 585 | ||
590 | if (cnf->config_id == WANCONFIG_MPPP) { | 586 | if (cnf->config_id == WANCONFIG_MPPP) { |
591 | #ifdef CONFIG_WANPIPE_MULTPPP | ||
592 | pppdev = kzalloc(sizeof(struct ppp_device), GFP_KERNEL); | ||
593 | err = -ENOBUFS; | ||
594 | if (pppdev == NULL) | ||
595 | goto out; | ||
596 | pppdev->dev = kzalloc(sizeof(struct net_device), GFP_KERNEL); | ||
597 | if (pppdev->dev == NULL) { | ||
598 | kfree(pppdev); | ||
599 | err = -ENOBUFS; | ||
600 | goto out; | ||
601 | } | ||
602 | err = wandev->new_if(wandev, (struct net_device *)pppdev, cnf); | ||
603 | dev = pppdev->dev; | ||
604 | #else | ||
605 | printk(KERN_INFO "%s: Wanpipe Mulit-Port PPP support has not been compiled in!\n", | 587 | printk(KERN_INFO "%s: Wanpipe Mulit-Port PPP support has not been compiled in!\n", |
606 | wandev->name); | 588 | wandev->name); |
607 | err = -EPROTONOSUPPORT; | 589 | err = -EPROTONOSUPPORT; |
608 | goto out; | 590 | goto out; |
609 | #endif | ||
610 | } else { | 591 | } else { |
611 | dev = kzalloc(sizeof(struct net_device), GFP_KERNEL); | 592 | dev = kzalloc(sizeof(struct net_device), GFP_KERNEL); |
612 | err = -ENOBUFS; | 593 | err = -ENOBUFS; |
@@ -661,17 +642,9 @@ static int wanrouter_device_new_if(struct wan_device *wandev, | |||
661 | kfree(dev->priv); | 642 | kfree(dev->priv); |
662 | dev->priv = NULL; | 643 | dev->priv = NULL; |
663 | 644 | ||
664 | #ifdef CONFIG_WANPIPE_MULTPPP | ||
665 | if (cnf->config_id == WANCONFIG_MPPP) | ||
666 | kfree(pppdev); | ||
667 | else | ||
668 | kfree(dev); | ||
669 | #else | ||
670 | /* Sync PPP is disabled */ | 645 | /* Sync PPP is disabled */ |
671 | if (cnf->config_id != WANCONFIG_MPPP) | 646 | if (cnf->config_id != WANCONFIG_MPPP) |
672 | kfree(dev); | 647 | kfree(dev); |
673 | #endif | ||
674 | |||
675 | out: | 648 | out: |
676 | kfree(cnf); | 649 | kfree(cnf); |
677 | return err; | 650 | return err; |