aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/net/mdio-mux-gpio.txt127
-rw-r--r--Documentation/devicetree/bindings/net/mdio-mux.txt136
-rw-r--r--Documentation/networking/ip-sysctl.txt14
-rw-r--r--drivers/atm/ambassador.c2
-rw-r--r--drivers/atm/idt77252.c2
-rw-r--r--drivers/isdn/capi/capi.c50
-rw-r--r--drivers/isdn/capi/capidrv.c8
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c44
-rw-r--r--drivers/isdn/gigaset/capi.c118
-rw-r--r--drivers/isdn/gigaset/common.c59
-rw-r--r--drivers/isdn/gigaset/dummyll.c2
-rw-r--r--drivers/isdn/gigaset/ev-layer.c319
-rw-r--r--drivers/isdn/gigaset/gigaset.h30
-rw-r--r--drivers/isdn/gigaset/i4l.c12
-rw-r--r--drivers/isdn/gigaset/isocdata.c12
-rw-r--r--drivers/isdn/gigaset/ser-gigaset.c21
-rw-r--r--drivers/isdn/gigaset/usb-gigaset.c19
-rw-r--r--drivers/isdn/hardware/mISDN/avmfritz.c5
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c5
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c5
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNipac.c17
-rw-r--r--drivers/isdn/hardware/mISDN/netjet.c5
-rw-r--r--drivers/isdn/hardware/mISDN/speedfax.c5
-rw-r--r--drivers/isdn/hardware/mISDN/w6692.c5
-rw-r--r--drivers/isdn/mISDN/core.c16
-rw-r--r--drivers/isdn/mISDN/layer1.c36
-rw-r--r--drivers/isdn/mISDN/layer2.c120
-rw-r--r--drivers/isdn/mISDN/tei.c72
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c7
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c7
-rw-r--r--drivers/net/ethernet/intel/Kconfig8
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c9
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c29
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h8
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h14
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c17
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h15
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c743
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c10
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c52
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c92
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h26
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c67
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c800
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h17
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c13
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c35
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c46
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c273
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h45
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c18
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c30
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c12
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c2
-rw-r--r--drivers/net/ethernet/sun/niu.c2
-rw-r--r--drivers/net/phy/Kconfig19
-rw-r--r--drivers/net/phy/Makefile2
-rw-r--r--drivers/net/phy/mdio-mux-gpio.c142
-rw-r--r--drivers/net/phy/mdio-mux.c192
-rw-r--r--drivers/net/phy/mdio_bus.c32
-rw-r--r--drivers/net/wimax/i2400m/usb-rx.c2
-rw-r--r--drivers/of/of_mdio.c2
-rw-r--r--drivers/staging/octeon/ethernet-tx.c2
-rw-r--r--include/linux/mISDNhw.h3
-rw-r--r--include/linux/mISDNif.h9
-rw-r--r--include/linux/mdio-mux.h21
-rw-r--r--include/linux/of_mdio.h2
-rw-r--r--include/linux/skbuff.h26
-rw-r--r--include/linux/tcp.h2
-rw-r--r--include/net/tcp.h44
-rw-r--r--net/core/skbuff.c63
-rw-r--r--net/core/sock.c1
-rw-r--r--net/ipv4/ip_sockglue.c13
-rw-r--r--net/ipv4/sysctl_net_ipv4.c10
-rw-r--r--net/ipv4/tcp.c13
-rw-r--r--net/ipv4/tcp_input.c287
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/tcp_minisocks.c1
-rw-r--r--net/ipv4/tcp_output.c5
-rw-r--r--net/ipv4/tcp_timer.c5
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/sched/sch_choke.c8
-rw-r--r--net/sched/sch_dsmark.c3
-rw-r--r--net/sched/sch_htb.c4
-rw-r--r--net/sched/sch_teql.c4
93 files changed, 3345 insertions, 1266 deletions
diff --git a/Documentation/devicetree/bindings/net/mdio-mux-gpio.txt b/Documentation/devicetree/bindings/net/mdio-mux-gpio.txt
new file mode 100644
index 000000000000..79384113c2b0
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/mdio-mux-gpio.txt
@@ -0,0 +1,127 @@
1Properties for an MDIO bus multiplexer/switch controlled by GPIO pins.
2
3This is a special case of a MDIO bus multiplexer. One or more GPIO
4lines are used to control which child bus is connected.
5
6Required properties in addition to the generic multiplexer properties:
7
8- compatible : mdio-mux-gpio.
9- gpios : GPIO specifiers for each GPIO line. One or more must be specified.
10
11
12Example :
13
14 /* The parent MDIO bus. */
15 smi1: mdio@1180000001900 {
16 compatible = "cavium,octeon-3860-mdio";
17 #address-cells = <1>;
18 #size-cells = <0>;
19 reg = <0x11800 0x00001900 0x0 0x40>;
20 };
21
22 /*
23 An NXP sn74cbtlv3253 dual 1-of-4 switch controlled by a
24 pair of GPIO lines. Child busses 2 and 3 populated with 4
25 PHYs each.
26 */
27 mdio-mux {
28 compatible = "mdio-mux-gpio";
29 gpios = <&gpio1 3 0>, <&gpio1 4 0>;
30 mdio-parent-bus = <&smi1>;
31 #address-cells = <1>;
32 #size-cells = <0>;
33
34 mdio@2 {
35 reg = <2>;
36 #address-cells = <1>;
37 #size-cells = <0>;
38
39 phy11: ethernet-phy@1 {
40 reg = <1>;
41 compatible = "marvell,88e1149r";
42 marvell,reg-init = <3 0x10 0 0x5777>,
43 <3 0x11 0 0x00aa>,
44 <3 0x12 0 0x4105>,
45 <3 0x13 0 0x0a60>;
46 interrupt-parent = <&gpio>;
47 interrupts = <10 8>; /* Pin 10, active low */
48 };
49 phy12: ethernet-phy@2 {
50 reg = <2>;
51 compatible = "marvell,88e1149r";
52 marvell,reg-init = <3 0x10 0 0x5777>,
53 <3 0x11 0 0x00aa>,
54 <3 0x12 0 0x4105>,
55 <3 0x13 0 0x0a60>;
56 interrupt-parent = <&gpio>;
57 interrupts = <10 8>; /* Pin 10, active low */
58 };
59 phy13: ethernet-phy@3 {
60 reg = <3>;
61 compatible = "marvell,88e1149r";
62 marvell,reg-init = <3 0x10 0 0x5777>,
63 <3 0x11 0 0x00aa>,
64 <3 0x12 0 0x4105>,
65 <3 0x13 0 0x0a60>;
66 interrupt-parent = <&gpio>;
67 interrupts = <10 8>; /* Pin 10, active low */
68 };
69 phy14: ethernet-phy@4 {
70 reg = <4>;
71 compatible = "marvell,88e1149r";
72 marvell,reg-init = <3 0x10 0 0x5777>,
73 <3 0x11 0 0x00aa>,
74 <3 0x12 0 0x4105>,
75 <3 0x13 0 0x0a60>;
76 interrupt-parent = <&gpio>;
77 interrupts = <10 8>; /* Pin 10, active low */
78 };
79 };
80
81 mdio@3 {
82 reg = <3>;
83 #address-cells = <1>;
84 #size-cells = <0>;
85
86 phy21: ethernet-phy@1 {
87 reg = <1>;
88 compatible = "marvell,88e1149r";
89 marvell,reg-init = <3 0x10 0 0x5777>,
90 <3 0x11 0 0x00aa>,
91 <3 0x12 0 0x4105>,
92 <3 0x13 0 0x0a60>;
93 interrupt-parent = <&gpio>;
94 interrupts = <12 8>; /* Pin 12, active low */
95 };
96 phy22: ethernet-phy@2 {
97 reg = <2>;
98 compatible = "marvell,88e1149r";
99 marvell,reg-init = <3 0x10 0 0x5777>,
100 <3 0x11 0 0x00aa>,
101 <3 0x12 0 0x4105>,
102 <3 0x13 0 0x0a60>;
103 interrupt-parent = <&gpio>;
104 interrupts = <12 8>; /* Pin 12, active low */
105 };
106 phy23: ethernet-phy@3 {
107 reg = <3>;
108 compatible = "marvell,88e1149r";
109 marvell,reg-init = <3 0x10 0 0x5777>,
110 <3 0x11 0 0x00aa>,
111 <3 0x12 0 0x4105>,
112 <3 0x13 0 0x0a60>;
113 interrupt-parent = <&gpio>;
114 interrupts = <12 8>; /* Pin 12, active low */
115 };
116 phy24: ethernet-phy@4 {
117 reg = <4>;
118 compatible = "marvell,88e1149r";
119 marvell,reg-init = <3 0x10 0 0x5777>,
120 <3 0x11 0 0x00aa>,
121 <3 0x12 0 0x4105>,
122 <3 0x13 0 0x0a60>;
123 interrupt-parent = <&gpio>;
124 interrupts = <12 8>; /* Pin 12, active low */
125 };
126 };
127 };
diff --git a/Documentation/devicetree/bindings/net/mdio-mux.txt b/Documentation/devicetree/bindings/net/mdio-mux.txt
new file mode 100644
index 000000000000..f65606f8d632
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/mdio-mux.txt
@@ -0,0 +1,136 @@
1Common MDIO bus multiplexer/switch properties.
2
3An MDIO bus multiplexer/switch will have several child busses that are
4numbered uniquely in a device dependent manner. The nodes for an MDIO
5bus multiplexer/switch will have one child node for each child bus.
6
7Required properties:
8- mdio-parent-bus : phandle to the parent MDIO bus.
9- #address-cells = <1>;
10- #size-cells = <0>;
11
12Optional properties:
13- Other properties specific to the multiplexer/switch hardware.
14
15Required properties for child nodes:
16- #address-cells = <1>;
17- #size-cells = <0>;
18- reg : The sub-bus number.
19
20
21Example :
22
23 /* The parent MDIO bus. */
24 smi1: mdio@1180000001900 {
25 compatible = "cavium,octeon-3860-mdio";
26 #address-cells = <1>;
27 #size-cells = <0>;
28 reg = <0x11800 0x00001900 0x0 0x40>;
29 };
30
31 /*
32 An NXP sn74cbtlv3253 dual 1-of-4 switch controlled by a
33 pair of GPIO lines. Child busses 2 and 3 populated with 4
34 PHYs each.
35 */
36 mdio-mux {
37 compatible = "mdio-mux-gpio";
38 gpios = <&gpio1 3 0>, <&gpio1 4 0>;
39 mdio-parent-bus = <&smi1>;
40 #address-cells = <1>;
41 #size-cells = <0>;
42
43 mdio@2 {
44 reg = <2>;
45 #address-cells = <1>;
46 #size-cells = <0>;
47
48 phy11: ethernet-phy@1 {
49 reg = <1>;
50 compatible = "marvell,88e1149r";
51 marvell,reg-init = <3 0x10 0 0x5777>,
52 <3 0x11 0 0x00aa>,
53 <3 0x12 0 0x4105>,
54 <3 0x13 0 0x0a60>;
55 interrupt-parent = <&gpio>;
56 interrupts = <10 8>; /* Pin 10, active low */
57 };
58 phy12: ethernet-phy@2 {
59 reg = <2>;
60 compatible = "marvell,88e1149r";
61 marvell,reg-init = <3 0x10 0 0x5777>,
62 <3 0x11 0 0x00aa>,
63 <3 0x12 0 0x4105>,
64 <3 0x13 0 0x0a60>;
65 interrupt-parent = <&gpio>;
66 interrupts = <10 8>; /* Pin 10, active low */
67 };
68 phy13: ethernet-phy@3 {
69 reg = <3>;
70 compatible = "marvell,88e1149r";
71 marvell,reg-init = <3 0x10 0 0x5777>,
72 <3 0x11 0 0x00aa>,
73 <3 0x12 0 0x4105>,
74 <3 0x13 0 0x0a60>;
75 interrupt-parent = <&gpio>;
76 interrupts = <10 8>; /* Pin 10, active low */
77 };
78 phy14: ethernet-phy@4 {
79 reg = <4>;
80 compatible = "marvell,88e1149r";
81 marvell,reg-init = <3 0x10 0 0x5777>,
82 <3 0x11 0 0x00aa>,
83 <3 0x12 0 0x4105>,
84 <3 0x13 0 0x0a60>;
85 interrupt-parent = <&gpio>;
86 interrupts = <10 8>; /* Pin 10, active low */
87 };
88 };
89
90 mdio@3 {
91 reg = <3>;
92 #address-cells = <1>;
93 #size-cells = <0>;
94
95 phy21: ethernet-phy@1 {
96 reg = <1>;
97 compatible = "marvell,88e1149r";
98 marvell,reg-init = <3 0x10 0 0x5777>,
99 <3 0x11 0 0x00aa>,
100 <3 0x12 0 0x4105>,
101 <3 0x13 0 0x0a60>;
102 interrupt-parent = <&gpio>;
103 interrupts = <12 8>; /* Pin 12, active low */
104 };
105 phy22: ethernet-phy@2 {
106 reg = <2>;
107 compatible = "marvell,88e1149r";
108 marvell,reg-init = <3 0x10 0 0x5777>,
109 <3 0x11 0 0x00aa>,
110 <3 0x12 0 0x4105>,
111 <3 0x13 0 0x0a60>;
112 interrupt-parent = <&gpio>;
113 interrupts = <12 8>; /* Pin 12, active low */
114 };
115 phy23: ethernet-phy@3 {
116 reg = <3>;
117 compatible = "marvell,88e1149r";
118 marvell,reg-init = <3 0x10 0 0x5777>,
119 <3 0x11 0 0x00aa>,
120 <3 0x12 0 0x4105>,
121 <3 0x13 0 0x0a60>;
122 interrupt-parent = <&gpio>;
123 interrupts = <12 8>; /* Pin 12, active low */
124 };
125 phy24: ethernet-phy@4 {
126 reg = <4>;
127 compatible = "marvell,88e1149r";
128 marvell,reg-init = <3 0x10 0 0x5777>,
129 <3 0x11 0 0x00aa>,
130 <3 0x12 0 0x4105>,
131 <3 0x13 0 0x0a60>;
132 interrupt-parent = <&gpio>;
133 interrupts = <12 8>; /* Pin 12, active low */
134 };
135 };
136 };
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 9b569a2d9c60..34916e792d9d 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -190,6 +190,20 @@ tcp_cookie_size - INTEGER
190tcp_dsack - BOOLEAN 190tcp_dsack - BOOLEAN
191 Allows TCP to send "duplicate" SACKs. 191 Allows TCP to send "duplicate" SACKs.
192 192
193tcp_early_retrans - INTEGER
194 Enable Early Retransmit (ER), per RFC 5827. ER lowers the threshold
195 for triggering fast retransmit when the amount of outstanding data is
196 small and when no previously unsent data can be transmitted (such
197 that limited transmit could be used).
198 Possible values:
199 0 disables ER
200 1 enables ER
201 2 enables ER but delays fast recovery and fast retransmit
202 by a fourth of RTT. This mitigates connection falsely
203 recovers when network has a small degree of reordering
204 (less than 3 packets).
205 Default: 2
206
193tcp_ecn - INTEGER 207tcp_ecn - INTEGER
194 Enable Explicit Congestion Notification (ECN) in TCP. ECN is only 208 Enable Explicit Congestion Notification (ECN) in TCP. ECN is only
195 used when both ends of the TCP flow support it. It is useful to 209 used when both ends of the TCP flow support it. It is useful to
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index f8f41e0e8a8c..89b30f32ba68 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -802,7 +802,7 @@ static void fill_rx_pool (amb_dev * dev, unsigned char pool,
802 } 802 }
803 // cast needed as there is no %? for pointer differences 803 // cast needed as there is no %? for pointer differences
804 PRINTD (DBG_SKB, "allocated skb at %p, head %p, area %li", 804 PRINTD (DBG_SKB, "allocated skb at %p, head %p, area %li",
805 skb, skb->head, (long) (skb_end_pointer(skb) - skb->head)); 805 skb, skb->head, (long) skb_end_offset(skb));
806 rx.handle = virt_to_bus (skb); 806 rx.handle = virt_to_bus (skb);
807 rx.host_address = cpu_to_be32 (virt_to_bus (skb->data)); 807 rx.host_address = cpu_to_be32 (virt_to_bus (skb->data));
808 if (rx_give (dev, &rx, pool)) 808 if (rx_give (dev, &rx, pool))
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 1c052127548c..8974bd2b961e 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -1258,7 +1258,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
1258 tail = readl(SAR_REG_RAWCT); 1258 tail = readl(SAR_REG_RAWCT);
1259 1259
1260 pci_dma_sync_single_for_cpu(card->pcidev, IDT77252_PRV_PADDR(queue), 1260 pci_dma_sync_single_for_cpu(card->pcidev, IDT77252_PRV_PADDR(queue),
1261 skb_end_pointer(queue) - queue->head - 16, 1261 skb_end_offset(queue) - 16,
1262 PCI_DMA_FROMDEVICE); 1262 PCI_DMA_FROMDEVICE);
1263 1263
1264 while (head != tail) { 1264 while (head != tail) {
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index b902794bbf07..38c4bd87b2c9 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -336,11 +336,6 @@ static inline void
336capincci_alloc_minor(struct capidev *cdev, struct capincci *np) { } 336capincci_alloc_minor(struct capidev *cdev, struct capincci *np) { }
337static inline void capincci_free_minor(struct capincci *np) { } 337static inline void capincci_free_minor(struct capincci *np) { }
338 338
339static inline unsigned int capincci_minor_opencount(struct capincci *np)
340{
341 return 0;
342}
343
344#endif /* !CONFIG_ISDN_CAPI_MIDDLEWARE */ 339#endif /* !CONFIG_ISDN_CAPI_MIDDLEWARE */
345 340
346static struct capincci *capincci_alloc(struct capidev *cdev, u32 ncci) 341static struct capincci *capincci_alloc(struct capidev *cdev, u32 ncci)
@@ -372,6 +367,7 @@ static void capincci_free(struct capidev *cdev, u32 ncci)
372 } 367 }
373} 368}
374 369
370#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
375static struct capincci *capincci_find(struct capidev *cdev, u32 ncci) 371static struct capincci *capincci_find(struct capidev *cdev, u32 ncci)
376{ 372{
377 struct capincci *np; 373 struct capincci *np;
@@ -382,7 +378,6 @@ static struct capincci *capincci_find(struct capidev *cdev, u32 ncci)
382 return NULL; 378 return NULL;
383} 379}
384 380
385#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
386/* -------- handle data queue --------------------------------------- */ 381/* -------- handle data queue --------------------------------------- */
387 382
388static struct sk_buff * 383static struct sk_buff *
@@ -578,8 +573,8 @@ static void capi_recv_message(struct capi20_appl *ap, struct sk_buff *skb)
578 struct tty_struct *tty; 573 struct tty_struct *tty;
579 struct capiminor *mp; 574 struct capiminor *mp;
580 u16 datahandle; 575 u16 datahandle;
581#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
582 struct capincci *np; 576 struct capincci *np;
577#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
583 578
584 mutex_lock(&cdev->lock); 579 mutex_lock(&cdev->lock);
585 580
@@ -597,6 +592,12 @@ static void capi_recv_message(struct capi20_appl *ap, struct sk_buff *skb)
597 goto unlock_out; 592 goto unlock_out;
598 } 593 }
599 594
595#ifndef CONFIG_ISDN_CAPI_MIDDLEWARE
596 skb_queue_tail(&cdev->recvqueue, skb);
597 wake_up_interruptible(&cdev->recvwait);
598
599#else /* CONFIG_ISDN_CAPI_MIDDLEWARE */
600
600 np = capincci_find(cdev, CAPIMSG_CONTROL(skb->data)); 601 np = capincci_find(cdev, CAPIMSG_CONTROL(skb->data));
601 if (!np) { 602 if (!np) {
602 printk(KERN_ERR "BUG: capi_signal: ncci not found\n"); 603 printk(KERN_ERR "BUG: capi_signal: ncci not found\n");
@@ -605,12 +606,6 @@ static void capi_recv_message(struct capi20_appl *ap, struct sk_buff *skb)
605 goto unlock_out; 606 goto unlock_out;
606 } 607 }
607 608
608#ifndef CONFIG_ISDN_CAPI_MIDDLEWARE
609 skb_queue_tail(&cdev->recvqueue, skb);
610 wake_up_interruptible(&cdev->recvwait);
611
612#else /* CONFIG_ISDN_CAPI_MIDDLEWARE */
613
614 mp = np->minorp; 609 mp = np->minorp;
615 if (!mp) { 610 if (!mp) {
616 skb_queue_tail(&cdev->recvqueue, skb); 611 skb_queue_tail(&cdev->recvqueue, skb);
@@ -786,7 +781,6 @@ register_out:
786 return retval; 781 return retval;
787 782
788 case CAPI_GET_VERSION: 783 case CAPI_GET_VERSION:
789 {
790 if (copy_from_user(&data.contr, argp, 784 if (copy_from_user(&data.contr, argp,
791 sizeof(data.contr))) 785 sizeof(data.contr)))
792 return -EFAULT; 786 return -EFAULT;
@@ -796,11 +790,9 @@ register_out:
796 if (copy_to_user(argp, &data.version, 790 if (copy_to_user(argp, &data.version,
797 sizeof(data.version))) 791 sizeof(data.version)))
798 return -EFAULT; 792 return -EFAULT;
799 } 793 return 0;
800 return 0;
801 794
802 case CAPI_GET_SERIAL: 795 case CAPI_GET_SERIAL:
803 {
804 if (copy_from_user(&data.contr, argp, 796 if (copy_from_user(&data.contr, argp,
805 sizeof(data.contr))) 797 sizeof(data.contr)))
806 return -EFAULT; 798 return -EFAULT;
@@ -810,10 +802,9 @@ register_out:
810 if (copy_to_user(argp, data.serial, 802 if (copy_to_user(argp, data.serial,
811 sizeof(data.serial))) 803 sizeof(data.serial)))
812 return -EFAULT; 804 return -EFAULT;
813 } 805 return 0;
814 return 0; 806
815 case CAPI_GET_PROFILE: 807 case CAPI_GET_PROFILE:
816 {
817 if (copy_from_user(&data.contr, argp, 808 if (copy_from_user(&data.contr, argp,
818 sizeof(data.contr))) 809 sizeof(data.contr)))
819 return -EFAULT; 810 return -EFAULT;
@@ -837,11 +828,9 @@ register_out:
837 } 828 }
838 if (retval) 829 if (retval)
839 return -EFAULT; 830 return -EFAULT;
840 } 831 return 0;
841 return 0;
842 832
843 case CAPI_GET_MANUFACTURER: 833 case CAPI_GET_MANUFACTURER:
844 {
845 if (copy_from_user(&data.contr, argp, 834 if (copy_from_user(&data.contr, argp,
846 sizeof(data.contr))) 835 sizeof(data.contr)))
847 return -EFAULT; 836 return -EFAULT;
@@ -853,8 +842,8 @@ register_out:
853 sizeof(data.manufacturer))) 842 sizeof(data.manufacturer)))
854 return -EFAULT; 843 return -EFAULT;
855 844
856 } 845 return 0;
857 return 0; 846
858 case CAPI_GET_ERRCODE: 847 case CAPI_GET_ERRCODE:
859 data.errcode = cdev->errcode; 848 data.errcode = cdev->errcode;
860 cdev->errcode = CAPI_NOERROR; 849 cdev->errcode = CAPI_NOERROR;
@@ -870,8 +859,7 @@ register_out:
870 return 0; 859 return 0;
871 return -ENXIO; 860 return -ENXIO;
872 861
873 case CAPI_MANUFACTURER_CMD: 862 case CAPI_MANUFACTURER_CMD: {
874 {
875 struct capi_manufacturer_cmd mcmd; 863 struct capi_manufacturer_cmd mcmd;
876 if (!capable(CAP_SYS_ADMIN)) 864 if (!capable(CAP_SYS_ADMIN))
877 return -EPERM; 865 return -EPERM;
@@ -879,8 +867,6 @@ register_out:
879 return -EFAULT; 867 return -EFAULT;
880 return capi20_manufacturer(mcmd.cmd, mcmd.data); 868 return capi20_manufacturer(mcmd.cmd, mcmd.data);
881 } 869 }
882 return 0;
883
884 case CAPI_SET_FLAGS: 870 case CAPI_SET_FLAGS:
885 case CAPI_CLR_FLAGS: { 871 case CAPI_CLR_FLAGS: {
886 unsigned userflags; 872 unsigned userflags;
@@ -902,6 +888,11 @@ register_out:
902 return -EFAULT; 888 return -EFAULT;
903 return 0; 889 return 0;
904 890
891#ifndef CONFIG_ISDN_CAPI_MIDDLEWARE
892 case CAPI_NCCI_OPENCOUNT:
893 return 0;
894
895#else /* CONFIG_ISDN_CAPI_MIDDLEWARE */
905 case CAPI_NCCI_OPENCOUNT: { 896 case CAPI_NCCI_OPENCOUNT: {
906 struct capincci *nccip; 897 struct capincci *nccip;
907 unsigned ncci; 898 unsigned ncci;
@@ -918,7 +909,6 @@ register_out:
918 return count; 909 return count;
919 } 910 }
920 911
921#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
922 case CAPI_NCCI_GETUNIT: { 912 case CAPI_NCCI_GETUNIT: {
923 struct capincci *nccip; 913 struct capincci *nccip;
924 struct capiminor *mp; 914 struct capiminor *mp;
diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c
index 6f5016b479f8..832bc807ed20 100644
--- a/drivers/isdn/capi/capidrv.c
+++ b/drivers/isdn/capi/capidrv.c
@@ -1593,7 +1593,7 @@ static int capidrv_command(isdn_ctrl *c, capidrv_contr *card)
1593 return capidrv_ioctl(c, card); 1593 return capidrv_ioctl(c, card);
1594 1594
1595 switch (c->command) { 1595 switch (c->command) {
1596 case ISDN_CMD_DIAL:{ 1596 case ISDN_CMD_DIAL: {
1597 u8 calling[ISDN_MSNLEN + 3]; 1597 u8 calling[ISDN_MSNLEN + 3];
1598 u8 called[ISDN_MSNLEN + 2]; 1598 u8 called[ISDN_MSNLEN + 2];
1599 1599
@@ -2072,7 +2072,8 @@ static int capidrv_addcontr(u16 contr, struct capi_profile *profp)
2072 card->interface.writebuf_skb = if_sendbuf; 2072 card->interface.writebuf_skb = if_sendbuf;
2073 card->interface.writecmd = NULL; 2073 card->interface.writecmd = NULL;
2074 card->interface.readstat = if_readstat; 2074 card->interface.readstat = if_readstat;
2075 card->interface.features = ISDN_FEATURE_L2_HDLC | 2075 card->interface.features =
2076 ISDN_FEATURE_L2_HDLC |
2076 ISDN_FEATURE_L2_TRANS | 2077 ISDN_FEATURE_L2_TRANS |
2077 ISDN_FEATURE_L3_TRANS | 2078 ISDN_FEATURE_L3_TRANS |
2078 ISDN_FEATURE_P_UNKNOWN | 2079 ISDN_FEATURE_P_UNKNOWN |
@@ -2080,7 +2081,8 @@ static int capidrv_addcontr(u16 contr, struct capi_profile *profp)
2080 ISDN_FEATURE_L2_X75UI | 2081 ISDN_FEATURE_L2_X75UI |
2081 ISDN_FEATURE_L2_X75BUI; 2082 ISDN_FEATURE_L2_X75BUI;
2082 if (profp->support1 & (1 << 2)) 2083 if (profp->support1 & (1 << 2))
2083 card->interface.features |= ISDN_FEATURE_L2_V11096 | 2084 card->interface.features |=
2085 ISDN_FEATURE_L2_V11096 |
2084 ISDN_FEATURE_L2_V11019 | 2086 ISDN_FEATURE_L2_V11019 |
2085 ISDN_FEATURE_L2_V11038; 2087 ISDN_FEATURE_L2_V11038;
2086 if (profp->support1 & (1 << 8)) 2088 if (profp->support1 & (1 << 8))
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index afa080258bfa..3b9278b333ba 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -410,10 +410,10 @@ static void check_pending(struct bas_cardstate *ucs)
410 if (!(ucs->basstate & BS_RESETTING)) 410 if (!(ucs->basstate & BS_RESETTING))
411 ucs->pending = 0; 411 ucs->pending = 0;
412 break; 412 break;
413 /* 413 /*
414 * HD_READ_ATMESSAGE and HD_WRITE_ATMESSAGE are handled separately 414 * HD_READ_ATMESSAGE and HD_WRITE_ATMESSAGE are handled separately
415 * and should never end up here 415 * and should never end up here
416 */ 416 */
417 default: 417 default:
418 dev_warn(&ucs->interface->dev, 418 dev_warn(&ucs->interface->dev,
419 "unknown pending request 0x%02x cleared\n", 419 "unknown pending request 0x%02x cleared\n",
@@ -877,8 +877,7 @@ static void read_iso_callback(struct urb *urb)
877 for (i = 0; i < BAS_NUMFRAMES; i++) { 877 for (i = 0; i < BAS_NUMFRAMES; i++) {
878 ubc->isoinlost += urb->iso_frame_desc[i].actual_length; 878 ubc->isoinlost += urb->iso_frame_desc[i].actual_length;
879 if (unlikely(urb->iso_frame_desc[i].status != 0 && 879 if (unlikely(urb->iso_frame_desc[i].status != 0 &&
880 urb->iso_frame_desc[i].status != 880 urb->iso_frame_desc[i].status != -EINPROGRESS))
881 -EINPROGRESS))
882 ubc->loststatus = urb->iso_frame_desc[i].status; 881 ubc->loststatus = urb->iso_frame_desc[i].status;
883 urb->iso_frame_desc[i].status = 0; 882 urb->iso_frame_desc[i].status = 0;
884 urb->iso_frame_desc[i].actual_length = 0; 883 urb->iso_frame_desc[i].actual_length = 0;
@@ -2078,16 +2077,14 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
2078/* Free hardware dependent part of the B channel structure 2077/* Free hardware dependent part of the B channel structure
2079 * parameter: 2078 * parameter:
2080 * bcs B channel structure 2079 * bcs B channel structure
2081 * return value:
2082 * !=0 on success
2083 */ 2080 */
2084static int gigaset_freebcshw(struct bc_state *bcs) 2081static void gigaset_freebcshw(struct bc_state *bcs)
2085{ 2082{
2086 struct bas_bc_state *ubc = bcs->hw.bas; 2083 struct bas_bc_state *ubc = bcs->hw.bas;
2087 int i; 2084 int i;
2088 2085
2089 if (!ubc) 2086 if (!ubc)
2090 return 0; 2087 return;
2091 2088
2092 /* kill URBs and tasklets before freeing - better safe than sorry */ 2089 /* kill URBs and tasklets before freeing - better safe than sorry */
2093 ubc->running = 0; 2090 ubc->running = 0;
@@ -2105,14 +2102,13 @@ static int gigaset_freebcshw(struct bc_state *bcs)
2105 kfree(ubc->isooutbuf); 2102 kfree(ubc->isooutbuf);
2106 kfree(ubc); 2103 kfree(ubc);
2107 bcs->hw.bas = NULL; 2104 bcs->hw.bas = NULL;
2108 return 1;
2109} 2105}
2110 2106
2111/* Initialize hardware dependent part of the B channel structure 2107/* Initialize hardware dependent part of the B channel structure
2112 * parameter: 2108 * parameter:
2113 * bcs B channel structure 2109 * bcs B channel structure
2114 * return value: 2110 * return value:
2115 * !=0 on success 2111 * 0 on success, error code < 0 on failure
2116 */ 2112 */
2117static int gigaset_initbcshw(struct bc_state *bcs) 2113static int gigaset_initbcshw(struct bc_state *bcs)
2118{ 2114{
@@ -2122,7 +2118,7 @@ static int gigaset_initbcshw(struct bc_state *bcs)
2122 bcs->hw.bas = ubc = kmalloc(sizeof(struct bas_bc_state), GFP_KERNEL); 2118 bcs->hw.bas = ubc = kmalloc(sizeof(struct bas_bc_state), GFP_KERNEL);
2123 if (!ubc) { 2119 if (!ubc) {
2124 pr_err("out of memory\n"); 2120 pr_err("out of memory\n");
2125 return 0; 2121 return -ENOMEM;
2126 } 2122 }
2127 2123
2128 ubc->running = 0; 2124 ubc->running = 0;
@@ -2139,7 +2135,7 @@ static int gigaset_initbcshw(struct bc_state *bcs)
2139 pr_err("out of memory\n"); 2135 pr_err("out of memory\n");
2140 kfree(ubc); 2136 kfree(ubc);
2141 bcs->hw.bas = NULL; 2137 bcs->hw.bas = NULL;
2142 return 0; 2138 return -ENOMEM;
2143 } 2139 }
2144 tasklet_init(&ubc->sent_tasklet, 2140 tasklet_init(&ubc->sent_tasklet,
2145 write_iso_tasklet, (unsigned long) bcs); 2141 write_iso_tasklet, (unsigned long) bcs);
@@ -2164,7 +2160,7 @@ static int gigaset_initbcshw(struct bc_state *bcs)
2164 ubc->stolen0s = 0; 2160 ubc->stolen0s = 0;
2165 tasklet_init(&ubc->rcvd_tasklet, 2161 tasklet_init(&ubc->rcvd_tasklet,
2166 read_iso_tasklet, (unsigned long) bcs); 2162 read_iso_tasklet, (unsigned long) bcs);
2167 return 1; 2163 return 0;
2168} 2164}
2169 2165
2170static void gigaset_reinitbcshw(struct bc_state *bcs) 2166static void gigaset_reinitbcshw(struct bc_state *bcs)
@@ -2187,6 +2183,12 @@ static void gigaset_freecshw(struct cardstate *cs)
2187 cs->hw.bas = NULL; 2183 cs->hw.bas = NULL;
2188} 2184}
2189 2185
2186/* Initialize hardware dependent part of the cardstate structure
2187 * parameter:
2188 * cs cardstate structure
2189 * return value:
2190 * 0 on success, error code < 0 on failure
2191 */
2190static int gigaset_initcshw(struct cardstate *cs) 2192static int gigaset_initcshw(struct cardstate *cs)
2191{ 2193{
2192 struct bas_cardstate *ucs; 2194 struct bas_cardstate *ucs;
@@ -2194,13 +2196,13 @@ static int gigaset_initcshw(struct cardstate *cs)
2194 cs->hw.bas = ucs = kmalloc(sizeof *ucs, GFP_KERNEL); 2196 cs->hw.bas = ucs = kmalloc(sizeof *ucs, GFP_KERNEL);
2195 if (!ucs) { 2197 if (!ucs) {
2196 pr_err("out of memory\n"); 2198 pr_err("out of memory\n");
2197 return 0; 2199 return -ENOMEM;
2198 } 2200 }
2199 ucs->int_in_buf = kmalloc(IP_MSGSIZE, GFP_KERNEL); 2201 ucs->int_in_buf = kmalloc(IP_MSGSIZE, GFP_KERNEL);
2200 if (!ucs->int_in_buf) { 2202 if (!ucs->int_in_buf) {
2201 kfree(ucs); 2203 kfree(ucs);
2202 pr_err("out of memory\n"); 2204 pr_err("out of memory\n");
2203 return 0; 2205 return -ENOMEM;
2204 } 2206 }
2205 2207
2206 ucs->urb_cmd_in = NULL; 2208 ucs->urb_cmd_in = NULL;
@@ -2219,7 +2221,7 @@ static int gigaset_initcshw(struct cardstate *cs)
2219 init_waitqueue_head(&ucs->waitqueue); 2221 init_waitqueue_head(&ucs->waitqueue);
2220 INIT_WORK(&ucs->int_in_wq, int_in_work); 2222 INIT_WORK(&ucs->int_in_wq, int_in_work);
2221 2223
2222 return 1; 2224 return 0;
2223} 2225}
2224 2226
2225/* freeurbs 2227/* freeurbs
@@ -2379,18 +2381,20 @@ static int gigaset_probe(struct usb_interface *interface,
2379 /* save address of controller structure */ 2381 /* save address of controller structure */
2380 usb_set_intfdata(interface, cs); 2382 usb_set_intfdata(interface, cs);
2381 2383
2382 if (!gigaset_start(cs)) 2384 rc = gigaset_start(cs);
2385 if (rc < 0)
2383 goto error; 2386 goto error;
2384 2387
2385 return 0; 2388 return 0;
2386 2389
2387allocerr: 2390allocerr:
2388 dev_err(cs->dev, "could not allocate URBs\n"); 2391 dev_err(cs->dev, "could not allocate URBs\n");
2392 rc = -ENOMEM;
2389error: 2393error:
2390 freeurbs(cs); 2394 freeurbs(cs);
2391 usb_set_intfdata(interface, NULL); 2395 usb_set_intfdata(interface, NULL);
2392 gigaset_freecs(cs); 2396 gigaset_freecs(cs);
2393 return -ENODEV; 2397 return rc;
2394} 2398}
2395 2399
2396/* gigaset_disconnect 2400/* gigaset_disconnect
diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
index 343b5c80cb7b..27e4a3e21d64 100644
--- a/drivers/isdn/gigaset/capi.c
+++ b/drivers/isdn/gigaset/capi.c
@@ -14,6 +14,7 @@
14#include "gigaset.h" 14#include "gigaset.h"
15#include <linux/proc_fs.h> 15#include <linux/proc_fs.h>
16#include <linux/seq_file.h> 16#include <linux/seq_file.h>
17#include <linux/ratelimit.h>
17#include <linux/isdn/capilli.h> 18#include <linux/isdn/capilli.h>
18#include <linux/isdn/capicmd.h> 19#include <linux/isdn/capicmd.h>
19#include <linux/isdn/capiutil.h> 20#include <linux/isdn/capiutil.h>
@@ -108,51 +109,35 @@ static struct {
108 u8 *bc; 109 u8 *bc;
109 u8 *hlc; 110 u8 *hlc;
110} cip2bchlc[] = { 111} cip2bchlc[] = {
111 [1] = { "8090A3", NULL }, 112 [1] = { "8090A3", NULL }, /* Speech (A-law) */
112 /* Speech (A-law) */ 113 [2] = { "8890", NULL }, /* Unrestricted digital information */
113 [2] = { "8890", NULL }, 114 [3] = { "8990", NULL }, /* Restricted digital information */
114 /* Unrestricted digital information */ 115 [4] = { "9090A3", NULL }, /* 3,1 kHz audio (A-law) */
115 [3] = { "8990", NULL }, 116 [5] = { "9190", NULL }, /* 7 kHz audio */
116 /* Restricted digital information */ 117 [6] = { "9890", NULL }, /* Video */
117 [4] = { "9090A3", NULL }, 118 [7] = { "88C0C6E6", NULL }, /* Packet mode */
118 /* 3,1 kHz audio (A-law) */ 119 [8] = { "8890218F", NULL }, /* 56 kbit/s rate adaptation */
119 [5] = { "9190", NULL }, 120 [9] = { "9190A5", NULL }, /* Unrestricted digital information
120 /* 7 kHz audio */ 121 * with tones/announcements */
121 [6] = { "9890", NULL }, 122 [16] = { "8090A3", "9181" }, /* Telephony */
122 /* Video */ 123 [17] = { "9090A3", "9184" }, /* Group 2/3 facsimile */
123 [7] = { "88C0C6E6", NULL }, 124 [18] = { "8890", "91A1" }, /* Group 4 facsimile Class 1 */
124 /* Packet mode */ 125 [19] = { "8890", "91A4" }, /* Teletex service basic and mixed mode
125 [8] = { "8890218F", NULL }, 126 * and Group 4 facsimile service
126 /* 56 kbit/s rate adaptation */ 127 * Classes II and III */
127 [9] = { "9190A5", NULL }, 128 [20] = { "8890", "91A8" }, /* Teletex service basic and
128 /* Unrestricted digital information with tones/announcements */ 129 * processable mode */
129 [16] = { "8090A3", "9181" }, 130 [21] = { "8890", "91B1" }, /* Teletex service basic mode */
130 /* Telephony */ 131 [22] = { "8890", "91B2" }, /* International interworking for
131 [17] = { "9090A3", "9184" }, 132 * Videotex */
132 /* Group 2/3 facsimile */ 133 [23] = { "8890", "91B5" }, /* Telex */
133 [18] = { "8890", "91A1" }, 134 [24] = { "8890", "91B8" }, /* Message Handling Systems
134 /* Group 4 facsimile Class 1 */ 135 * in accordance with X.400 */
135 [19] = { "8890", "91A4" }, 136 [25] = { "8890", "91C1" }, /* OSI application
136 /* Teletex service basic and mixed mode 137 * in accordance with X.200 */
137 and Group 4 facsimile service Classes II and III */ 138 [26] = { "9190A5", "9181" }, /* 7 kHz telephony */
138 [20] = { "8890", "91A8" }, 139 [27] = { "9190A5", "916001" }, /* Video telephony, first connection */
139 /* Teletex service basic and processable mode */ 140 [28] = { "8890", "916002" }, /* Video telephony, second connection */
140 [21] = { "8890", "91B1" },
141 /* Teletex service basic mode */
142 [22] = { "8890", "91B2" },
143 /* International interworking for Videotex */
144 [23] = { "8890", "91B5" },
145 /* Telex */
146 [24] = { "8890", "91B8" },
147 /* Message Handling Systems in accordance with X.400 */
148 [25] = { "8890", "91C1" },
149 /* OSI application in accordance with X.200 */
150 [26] = { "9190A5", "9181" },
151 /* 7 kHz telephony */
152 [27] = { "9190A5", "916001" },
153 /* Video telephony, first connection */
154 [28] = { "8890", "916002" },
155 /* Video telephony, second connection */
156}; 141};
157 142
158/* 143/*
@@ -223,10 +208,14 @@ get_appl(struct gigaset_capi_ctr *iif, u16 appl)
223static inline void dump_cmsg(enum debuglevel level, const char *tag, _cmsg *p) 208static inline void dump_cmsg(enum debuglevel level, const char *tag, _cmsg *p)
224{ 209{
225#ifdef CONFIG_GIGASET_DEBUG 210#ifdef CONFIG_GIGASET_DEBUG
211 /* dump at most 20 messages in 20 secs */
212 static DEFINE_RATELIMIT_STATE(msg_dump_ratelimit, 20 * HZ, 20);
226 _cdebbuf *cdb; 213 _cdebbuf *cdb;
227 214
228 if (!(gigaset_debuglevel & level)) 215 if (!(gigaset_debuglevel & level))
229 return; 216 return;
217 if (!___ratelimit(&msg_dump_ratelimit, tag))
218 return;
230 219
231 cdb = capi_cmsg2str(p); 220 cdb = capi_cmsg2str(p);
232 if (cdb) { 221 if (cdb) {
@@ -1192,7 +1181,9 @@ static void do_facility_req(struct gigaset_capi_ctr *iif,
1192 confparam[3] = 2; /* length */ 1181 confparam[3] = 2; /* length */
1193 capimsg_setu16(confparam, 4, CapiSuccess); 1182 capimsg_setu16(confparam, 4, CapiSuccess);
1194 break; 1183 break;
1195 /* ToDo: add supported services */ 1184
1185 /* ToDo: add supported services */
1186
1196 default: 1187 default:
1197 dev_notice(cs->dev, 1188 dev_notice(cs->dev,
1198 "%s: unsupported supplementary service function 0x%04x\n", 1189 "%s: unsupported supplementary service function 0x%04x\n",
@@ -1766,7 +1757,8 @@ static void do_connect_b3_req(struct gigaset_capi_ctr *iif,
1766 1757
1767 /* NCPI parameter: not applicable for B3 Transparent */ 1758 /* NCPI parameter: not applicable for B3 Transparent */
1768 ignore_cstruct_param(cs, cmsg->NCPI, "CONNECT_B3_REQ", "NCPI"); 1759 ignore_cstruct_param(cs, cmsg->NCPI, "CONNECT_B3_REQ", "NCPI");
1769 send_conf(iif, ap, skb, (cmsg->NCPI && cmsg->NCPI[0]) ? 1760 send_conf(iif, ap, skb,
1761 (cmsg->NCPI && cmsg->NCPI[0]) ?
1770 CapiNcpiNotSupportedByProtocol : CapiSuccess); 1762 CapiNcpiNotSupportedByProtocol : CapiSuccess);
1771} 1763}
1772 1764
@@ -1882,6 +1874,9 @@ static void do_disconnect_req(struct gigaset_capi_ctr *iif,
1882 1874
1883 /* check for active logical connection */ 1875 /* check for active logical connection */
1884 if (bcs->apconnstate >= APCONN_ACTIVE) { 1876 if (bcs->apconnstate >= APCONN_ACTIVE) {
1877 /* clear it */
1878 bcs->apconnstate = APCONN_SETUP;
1879
1885 /* 1880 /*
1886 * emit DISCONNECT_B3_IND with cause 0x3301 1881 * emit DISCONNECT_B3_IND with cause 0x3301
1887 * use separate cmsg structure, as the content of iif->acmsg 1882 * use separate cmsg structure, as the content of iif->acmsg
@@ -1906,6 +1901,7 @@ static void do_disconnect_req(struct gigaset_capi_ctr *iif,
1906 } 1901 }
1907 capi_cmsg2message(b3cmsg, 1902 capi_cmsg2message(b3cmsg,
1908 __skb_put(b3skb, CAPI_DISCONNECT_B3_IND_BASELEN)); 1903 __skb_put(b3skb, CAPI_DISCONNECT_B3_IND_BASELEN));
1904 dump_cmsg(DEBUG_CMD, __func__, b3cmsg);
1909 kfree(b3cmsg); 1905 kfree(b3cmsg);
1910 capi_ctr_handle_message(&iif->ctr, ap->id, b3skb); 1906 capi_ctr_handle_message(&iif->ctr, ap->id, b3skb);
1911 } 1907 }
@@ -1966,7 +1962,8 @@ static void do_disconnect_b3_req(struct gigaset_capi_ctr *iif,
1966 /* NCPI parameter: not applicable for B3 Transparent */ 1962 /* NCPI parameter: not applicable for B3 Transparent */
1967 ignore_cstruct_param(cs, cmsg->NCPI, 1963 ignore_cstruct_param(cs, cmsg->NCPI,
1968 "DISCONNECT_B3_REQ", "NCPI"); 1964 "DISCONNECT_B3_REQ", "NCPI");
1969 send_conf(iif, ap, skb, (cmsg->NCPI && cmsg->NCPI[0]) ? 1965 send_conf(iif, ap, skb,
1966 (cmsg->NCPI && cmsg->NCPI[0]) ?
1970 CapiNcpiNotSupportedByProtocol : CapiSuccess); 1967 CapiNcpiNotSupportedByProtocol : CapiSuccess);
1971} 1968}
1972 1969
@@ -2059,12 +2056,6 @@ static void do_reset_b3_req(struct gigaset_capi_ctr *iif,
2059} 2056}
2060 2057
2061/* 2058/*
2062 * dump unsupported/ignored messages at most twice per minute,
2063 * some apps send those very frequently
2064 */
2065static unsigned long ignored_msg_dump_time;
2066
2067/*
2068 * unsupported CAPI message handler 2059 * unsupported CAPI message handler
2069 */ 2060 */
2070static void do_unsupported(struct gigaset_capi_ctr *iif, 2061static void do_unsupported(struct gigaset_capi_ctr *iif,
@@ -2073,8 +2064,7 @@ static void do_unsupported(struct gigaset_capi_ctr *iif,
2073{ 2064{
2074 /* decode message */ 2065 /* decode message */
2075 capi_message2cmsg(&iif->acmsg, skb->data); 2066 capi_message2cmsg(&iif->acmsg, skb->data);
2076 if (printk_timed_ratelimit(&ignored_msg_dump_time, 30 * 1000)) 2067 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
2077 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
2078 send_conf(iif, ap, skb, CapiMessageNotSupportedInCurrentState); 2068 send_conf(iif, ap, skb, CapiMessageNotSupportedInCurrentState);
2079} 2069}
2080 2070
@@ -2085,11 +2075,9 @@ static void do_nothing(struct gigaset_capi_ctr *iif,
2085 struct gigaset_capi_appl *ap, 2075 struct gigaset_capi_appl *ap,
2086 struct sk_buff *skb) 2076 struct sk_buff *skb)
2087{ 2077{
2088 if (printk_timed_ratelimit(&ignored_msg_dump_time, 30 * 1000)) { 2078 /* decode message */
2089 /* decode message */ 2079 capi_message2cmsg(&iif->acmsg, skb->data);
2090 capi_message2cmsg(&iif->acmsg, skb->data); 2080 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
2091 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
2092 }
2093 dev_kfree_skb_any(skb); 2081 dev_kfree_skb_any(skb);
2094} 2082}
2095 2083
@@ -2358,7 +2346,7 @@ static const struct file_operations gigaset_proc_fops = {
2358 * @cs: device descriptor structure. 2346 * @cs: device descriptor structure.
2359 * @isdnid: device name. 2347 * @isdnid: device name.
2360 * 2348 *
2361 * Return value: 1 for success, 0 for failure 2349 * Return value: 0 on success, error code < 0 on failure
2362 */ 2350 */
2363int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid) 2351int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
2364{ 2352{
@@ -2368,7 +2356,7 @@ int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
2368 iif = kmalloc(sizeof(*iif), GFP_KERNEL); 2356 iif = kmalloc(sizeof(*iif), GFP_KERNEL);
2369 if (!iif) { 2357 if (!iif) {
2370 pr_err("%s: out of memory\n", __func__); 2358 pr_err("%s: out of memory\n", __func__);
2371 return 0; 2359 return -ENOMEM;
2372 } 2360 }
2373 2361
2374 /* prepare controller structure */ 2362 /* prepare controller structure */
@@ -2392,12 +2380,12 @@ int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
2392 if (rc) { 2380 if (rc) {
2393 pr_err("attach_capi_ctr failed (%d)\n", rc); 2381 pr_err("attach_capi_ctr failed (%d)\n", rc);
2394 kfree(iif); 2382 kfree(iif);
2395 return 0; 2383 return rc;
2396 } 2384 }
2397 2385
2398 cs->iif = iif; 2386 cs->iif = iif;
2399 cs->hw_hdr_len = CAPI_DATA_B3_REQ_LEN; 2387 cs->hw_hdr_len = CAPI_DATA_B3_REQ_LEN;
2400 return 1; 2388 return 0;
2401} 2389}
2402 2390
2403/** 2391/**
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
index 76792707f995..aa41485bc594 100644
--- a/drivers/isdn/gigaset/common.c
+++ b/drivers/isdn/gigaset/common.c
@@ -194,13 +194,13 @@ int gigaset_get_channel(struct bc_state *bcs)
194 gig_dbg(DEBUG_CHANNEL, "could not allocate channel %d", 194 gig_dbg(DEBUG_CHANNEL, "could not allocate channel %d",
195 bcs->channel); 195 bcs->channel);
196 spin_unlock_irqrestore(&bcs->cs->lock, flags); 196 spin_unlock_irqrestore(&bcs->cs->lock, flags);
197 return 0; 197 return -EBUSY;
198 } 198 }
199 ++bcs->use_count; 199 ++bcs->use_count;
200 bcs->busy = 1; 200 bcs->busy = 1;
201 gig_dbg(DEBUG_CHANNEL, "allocated channel %d", bcs->channel); 201 gig_dbg(DEBUG_CHANNEL, "allocated channel %d", bcs->channel);
202 spin_unlock_irqrestore(&bcs->cs->lock, flags); 202 spin_unlock_irqrestore(&bcs->cs->lock, flags);
203 return 1; 203 return 0;
204} 204}
205 205
206struct bc_state *gigaset_get_free_channel(struct cardstate *cs) 206struct bc_state *gigaset_get_free_channel(struct cardstate *cs)
@@ -258,7 +258,7 @@ int gigaset_get_channels(struct cardstate *cs)
258 spin_unlock_irqrestore(&cs->lock, flags); 258 spin_unlock_irqrestore(&cs->lock, flags);
259 gig_dbg(DEBUG_CHANNEL, 259 gig_dbg(DEBUG_CHANNEL,
260 "could not allocate all channels"); 260 "could not allocate all channels");
261 return 0; 261 return -EBUSY;
262 } 262 }
263 for (i = 0; i < cs->channels; ++i) 263 for (i = 0; i < cs->channels; ++i)
264 ++cs->bcs[i].use_count; 264 ++cs->bcs[i].use_count;
@@ -266,7 +266,7 @@ int gigaset_get_channels(struct cardstate *cs)
266 266
267 gig_dbg(DEBUG_CHANNEL, "allocated all channels"); 267 gig_dbg(DEBUG_CHANNEL, "allocated all channels");
268 268
269 return 1; 269 return 0;
270} 270}
271 271
272void gigaset_free_channels(struct cardstate *cs) 272void gigaset_free_channels(struct cardstate *cs)
@@ -362,7 +362,7 @@ struct event_t *gigaset_add_event(struct cardstate *cs,
362} 362}
363EXPORT_SYMBOL_GPL(gigaset_add_event); 363EXPORT_SYMBOL_GPL(gigaset_add_event);
364 364
365static void free_strings(struct at_state_t *at_state) 365static void clear_at_state(struct at_state_t *at_state)
366{ 366{
367 int i; 367 int i;
368 368
@@ -372,18 +372,13 @@ static void free_strings(struct at_state_t *at_state)
372 } 372 }
373} 373}
374 374
375static void clear_at_state(struct at_state_t *at_state) 375static void dealloc_temp_at_states(struct cardstate *cs)
376{
377 free_strings(at_state);
378}
379
380static void dealloc_at_states(struct cardstate *cs)
381{ 376{
382 struct at_state_t *cur, *next; 377 struct at_state_t *cur, *next;
383 378
384 list_for_each_entry_safe(cur, next, &cs->temp_at_states, list) { 379 list_for_each_entry_safe(cur, next, &cs->temp_at_states, list) {
385 list_del(&cur->list); 380 list_del(&cur->list);
386 free_strings(cur); 381 clear_at_state(cur);
387 kfree(cur); 382 kfree(cur);
388 } 383 }
389} 384}
@@ -393,8 +388,7 @@ static void gigaset_freebcs(struct bc_state *bcs)
393 int i; 388 int i;
394 389
395 gig_dbg(DEBUG_INIT, "freeing bcs[%d]->hw", bcs->channel); 390 gig_dbg(DEBUG_INIT, "freeing bcs[%d]->hw", bcs->channel);
396 if (!bcs->cs->ops->freebcshw(bcs)) 391 bcs->cs->ops->freebcshw(bcs);
397 gig_dbg(DEBUG_INIT, "failed");
398 392
399 gig_dbg(DEBUG_INIT, "clearing bcs[%d]->at_state", bcs->channel); 393 gig_dbg(DEBUG_INIT, "clearing bcs[%d]->at_state", bcs->channel);
400 clear_at_state(&bcs->at_state); 394 clear_at_state(&bcs->at_state);
@@ -512,7 +506,7 @@ void gigaset_freecs(struct cardstate *cs)
512 case 1: /* error when registering to LL */ 506 case 1: /* error when registering to LL */
513 gig_dbg(DEBUG_INIT, "clearing at_state"); 507 gig_dbg(DEBUG_INIT, "clearing at_state");
514 clear_at_state(&cs->at_state); 508 clear_at_state(&cs->at_state);
515 dealloc_at_states(cs); 509 dealloc_temp_at_states(cs);
516 510
517 /* fall through */ 511 /* fall through */
518 case 0: /* error in basic setup */ 512 case 0: /* error in basic setup */
@@ -571,6 +565,8 @@ static void gigaset_inbuf_init(struct inbuf_t *inbuf, struct cardstate *cs)
571 * @inbuf: buffer structure. 565 * @inbuf: buffer structure.
572 * @src: received data. 566 * @src: received data.
573 * @numbytes: number of bytes received. 567 * @numbytes: number of bytes received.
568 *
569 * Return value: !=0 if some data was appended
574 */ 570 */
575int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src, 571int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src,
576 unsigned numbytes) 572 unsigned numbytes)
@@ -614,8 +610,8 @@ int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src,
614EXPORT_SYMBOL_GPL(gigaset_fill_inbuf); 610EXPORT_SYMBOL_GPL(gigaset_fill_inbuf);
615 611
616/* Initialize the b-channel structure */ 612/* Initialize the b-channel structure */
617static struct bc_state *gigaset_initbcs(struct bc_state *bcs, 613static int gigaset_initbcs(struct bc_state *bcs, struct cardstate *cs,
618 struct cardstate *cs, int channel) 614 int channel)
619{ 615{
620 int i; 616 int i;
621 617
@@ -654,11 +650,7 @@ static struct bc_state *gigaset_initbcs(struct bc_state *bcs,
654 bcs->apconnstate = 0; 650 bcs->apconnstate = 0;
655 651
656 gig_dbg(DEBUG_INIT, " setting up bcs[%d]->hw", channel); 652 gig_dbg(DEBUG_INIT, " setting up bcs[%d]->hw", channel);
657 if (cs->ops->initbcshw(bcs)) 653 return cs->ops->initbcshw(bcs);
658 return bcs;
659
660 gig_dbg(DEBUG_INIT, " failed");
661 return NULL;
662} 654}
663 655
664/** 656/**
@@ -757,7 +749,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
757 cs->cmdbytes = 0; 749 cs->cmdbytes = 0;
758 750
759 gig_dbg(DEBUG_INIT, "setting up iif"); 751 gig_dbg(DEBUG_INIT, "setting up iif");
760 if (!gigaset_isdn_regdev(cs, modulename)) { 752 if (gigaset_isdn_regdev(cs, modulename) < 0) {
761 pr_err("error registering ISDN device\n"); 753 pr_err("error registering ISDN device\n");
762 goto error; 754 goto error;
763 } 755 }
@@ -765,7 +757,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
765 make_valid(cs, VALID_ID); 757 make_valid(cs, VALID_ID);
766 ++cs->cs_init; 758 ++cs->cs_init;
767 gig_dbg(DEBUG_INIT, "setting up hw"); 759 gig_dbg(DEBUG_INIT, "setting up hw");
768 if (!cs->ops->initcshw(cs)) 760 if (cs->ops->initcshw(cs) < 0)
769 goto error; 761 goto error;
770 762
771 ++cs->cs_init; 763 ++cs->cs_init;
@@ -779,7 +771,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
779 /* set up channel data structures */ 771 /* set up channel data structures */
780 for (i = 0; i < channels; ++i) { 772 for (i = 0; i < channels; ++i) {
781 gig_dbg(DEBUG_INIT, "setting up bcs[%d]", i); 773 gig_dbg(DEBUG_INIT, "setting up bcs[%d]", i);
782 if (!gigaset_initbcs(cs->bcs + i, cs, i)) { 774 if (gigaset_initbcs(cs->bcs + i, cs, i) < 0) {
783 pr_err("could not allocate channel %d data\n", i); 775 pr_err("could not allocate channel %d data\n", i);
784 goto error; 776 goto error;
785 } 777 }
@@ -848,8 +840,7 @@ static void cleanup_cs(struct cardstate *cs)
848 cs->mstate = MS_UNINITIALIZED; 840 cs->mstate = MS_UNINITIALIZED;
849 841
850 clear_at_state(&cs->at_state); 842 clear_at_state(&cs->at_state);
851 dealloc_at_states(cs); 843 dealloc_temp_at_states(cs);
852 free_strings(&cs->at_state);
853 gigaset_at_init(&cs->at_state, NULL, cs, 0); 844 gigaset_at_init(&cs->at_state, NULL, cs, 0);
854 845
855 cs->inbuf->inputstate = INS_command; 846 cs->inbuf->inputstate = INS_command;
@@ -875,7 +866,7 @@ static void cleanup_cs(struct cardstate *cs)
875 866
876 for (i = 0; i < cs->channels; ++i) { 867 for (i = 0; i < cs->channels; ++i) {
877 gigaset_freebcs(cs->bcs + i); 868 gigaset_freebcs(cs->bcs + i);
878 if (!gigaset_initbcs(cs->bcs + i, cs, i)) 869 if (gigaset_initbcs(cs->bcs + i, cs, i) < 0)
879 pr_err("could not allocate channel %d data\n", i); 870 pr_err("could not allocate channel %d data\n", i);
880 } 871 }
881 872
@@ -896,14 +887,14 @@ static void cleanup_cs(struct cardstate *cs)
896 * waiting for completion of the initialization. 887 * waiting for completion of the initialization.
897 * 888 *
898 * Return value: 889 * Return value:
899 * 1 - success, 0 - error 890 * 0 on success, error code < 0 on failure
900 */ 891 */
901int gigaset_start(struct cardstate *cs) 892int gigaset_start(struct cardstate *cs)
902{ 893{
903 unsigned long flags; 894 unsigned long flags;
904 895
905 if (mutex_lock_interruptible(&cs->mutex)) 896 if (mutex_lock_interruptible(&cs->mutex))
906 return 0; 897 return -EBUSY;
907 898
908 spin_lock_irqsave(&cs->lock, flags); 899 spin_lock_irqsave(&cs->lock, flags);
909 cs->connected = 1; 900 cs->connected = 1;
@@ -927,11 +918,11 @@ int gigaset_start(struct cardstate *cs)
927 wait_event(cs->waitqueue, !cs->waiting); 918 wait_event(cs->waitqueue, !cs->waiting);
928 919
929 mutex_unlock(&cs->mutex); 920 mutex_unlock(&cs->mutex);
930 return 1; 921 return 0;
931 922
932error: 923error:
933 mutex_unlock(&cs->mutex); 924 mutex_unlock(&cs->mutex);
934 return 0; 925 return -ENOMEM;
935} 926}
936EXPORT_SYMBOL_GPL(gigaset_start); 927EXPORT_SYMBOL_GPL(gigaset_start);
937 928
@@ -943,7 +934,7 @@ EXPORT_SYMBOL_GPL(gigaset_start);
943 * waiting for completion of the shutdown. 934 * waiting for completion of the shutdown.
944 * 935 *
945 * Return value: 936 * Return value:
946 * 0 - success, -1 - error (no device associated) 937 * 0 - success, -ENODEV - error (no device associated)
947 */ 938 */
948int gigaset_shutdown(struct cardstate *cs) 939int gigaset_shutdown(struct cardstate *cs)
949{ 940{
@@ -951,7 +942,7 @@ int gigaset_shutdown(struct cardstate *cs)
951 942
952 if (!(cs->flags & VALID_MINOR)) { 943 if (!(cs->flags & VALID_MINOR)) {
953 mutex_unlock(&cs->mutex); 944 mutex_unlock(&cs->mutex);
954 return -1; 945 return -ENODEV;
955 } 946 }
956 947
957 cs->waiting = 1; 948 cs->waiting = 1;
diff --git a/drivers/isdn/gigaset/dummyll.c b/drivers/isdn/gigaset/dummyll.c
index 19b1c779d50f..570c2d53b84e 100644
--- a/drivers/isdn/gigaset/dummyll.c
+++ b/drivers/isdn/gigaset/dummyll.c
@@ -60,7 +60,7 @@ void gigaset_isdn_stop(struct cardstate *cs)
60 60
61int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid) 61int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
62{ 62{
63 return 1; 63 return 0;
64} 64}
65 65
66void gigaset_isdn_unregdev(struct cardstate *cs) 66void gigaset_isdn_unregdev(struct cardstate *cs)
diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c
index 624a8256a77f..2e6963dc740e 100644
--- a/drivers/isdn/gigaset/ev-layer.c
+++ b/drivers/isdn/gigaset/ev-layer.c
@@ -153,103 +153,104 @@ struct reply_t gigaset_tab_nocid[] =
153 * action, command */ 153 * action, command */
154 154
155/* initialize device, set cid mode if possible */ 155/* initialize device, set cid mode if possible */
156 {RSP_INIT, -1, -1, SEQ_INIT, 100, 1, {ACT_TIMEOUT} }, 156 {RSP_INIT, -1, -1, SEQ_INIT, 100, 1, {ACT_TIMEOUT} },
157 157
158 {EV_TIMEOUT, 100, 100, -1, 101, 3, {0}, "Z\r"}, 158 {EV_TIMEOUT, 100, 100, -1, 101, 3, {0}, "Z\r"},
159 {RSP_OK, 101, 103, -1, 120, 5, {ACT_GETSTRING}, 159 {RSP_OK, 101, 103, -1, 120, 5, {ACT_GETSTRING},
160 "+GMR\r"}, 160 "+GMR\r"},
161 161
162 {EV_TIMEOUT, 101, 101, -1, 102, 5, {0}, "Z\r"}, 162 {EV_TIMEOUT, 101, 101, -1, 102, 5, {0}, "Z\r"},
163 {RSP_ERROR, 101, 101, -1, 102, 5, {0}, "Z\r"}, 163 {RSP_ERROR, 101, 101, -1, 102, 5, {0}, "Z\r"},
164 164
165 {EV_TIMEOUT, 102, 102, -1, 108, 5, {ACT_SETDLE1}, 165 {EV_TIMEOUT, 102, 102, -1, 108, 5, {ACT_SETDLE1},
166 "^SDLE=0\r"}, 166 "^SDLE=0\r"},
167 {RSP_OK, 108, 108, -1, 104, -1}, 167 {RSP_OK, 108, 108, -1, 104, -1},
168 {RSP_ZDLE, 104, 104, 0, 103, 5, {0}, "Z\r"}, 168 {RSP_ZDLE, 104, 104, 0, 103, 5, {0}, "Z\r"},
169 {EV_TIMEOUT, 104, 104, -1, 0, 0, {ACT_FAILINIT} }, 169 {EV_TIMEOUT, 104, 104, -1, 0, 0, {ACT_FAILINIT} },
170 {RSP_ERROR, 108, 108, -1, 0, 0, {ACT_FAILINIT} }, 170 {RSP_ERROR, 108, 108, -1, 0, 0, {ACT_FAILINIT} },
171 171
172 {EV_TIMEOUT, 108, 108, -1, 105, 2, {ACT_SETDLE0, 172 {EV_TIMEOUT, 108, 108, -1, 105, 2, {ACT_SETDLE0,
173 ACT_HUPMODEM, 173 ACT_HUPMODEM,
174 ACT_TIMEOUT} }, 174 ACT_TIMEOUT} },
175 {EV_TIMEOUT, 105, 105, -1, 103, 5, {0}, "Z\r"}, 175 {EV_TIMEOUT, 105, 105, -1, 103, 5, {0}, "Z\r"},
176 176
177 {RSP_ERROR, 102, 102, -1, 107, 5, {0}, "^GETPRE\r"}, 177 {RSP_ERROR, 102, 102, -1, 107, 5, {0}, "^GETPRE\r"},
178 {RSP_OK, 107, 107, -1, 0, 0, {ACT_CONFIGMODE} }, 178 {RSP_OK, 107, 107, -1, 0, 0, {ACT_CONFIGMODE} },
179 {RSP_ERROR, 107, 107, -1, 0, 0, {ACT_FAILINIT} }, 179 {RSP_ERROR, 107, 107, -1, 0, 0, {ACT_FAILINIT} },
180 {EV_TIMEOUT, 107, 107, -1, 0, 0, {ACT_FAILINIT} }, 180 {EV_TIMEOUT, 107, 107, -1, 0, 0, {ACT_FAILINIT} },
181 181
182 {RSP_ERROR, 103, 103, -1, 0, 0, {ACT_FAILINIT} }, 182 {RSP_ERROR, 103, 103, -1, 0, 0, {ACT_FAILINIT} },
183 {EV_TIMEOUT, 103, 103, -1, 0, 0, {ACT_FAILINIT} }, 183 {EV_TIMEOUT, 103, 103, -1, 0, 0, {ACT_FAILINIT} },
184 184
185 {RSP_STRING, 120, 120, -1, 121, -1, {ACT_SETVER} }, 185 {RSP_STRING, 120, 120, -1, 121, -1, {ACT_SETVER} },
186 186
187 {EV_TIMEOUT, 120, 121, -1, 0, 0, {ACT_FAILVER, 187 {EV_TIMEOUT, 120, 121, -1, 0, 0, {ACT_FAILVER,
188 ACT_INIT} }, 188 ACT_INIT} },
189 {RSP_ERROR, 120, 121, -1, 0, 0, {ACT_FAILVER, 189 {RSP_ERROR, 120, 121, -1, 0, 0, {ACT_FAILVER,
190 ACT_INIT} }, 190 ACT_INIT} },
191 {RSP_OK, 121, 121, -1, 0, 0, {ACT_GOTVER, 191 {RSP_OK, 121, 121, -1, 0, 0, {ACT_GOTVER,
192 ACT_INIT} }, 192 ACT_INIT} },
193 {RSP_NONE, 121, 121, -1, 120, 0, {ACT_GETSTRING} },
193 194
194/* leave dle mode */ 195/* leave dle mode */
195 {RSP_INIT, 0, 0, SEQ_DLE0, 201, 5, {0}, "^SDLE=0\r"}, 196 {RSP_INIT, 0, 0, SEQ_DLE0, 201, 5, {0}, "^SDLE=0\r"},
196 {RSP_OK, 201, 201, -1, 202, -1}, 197 {RSP_OK, 201, 201, -1, 202, -1},
197 {RSP_ZDLE, 202, 202, 0, 0, 0, {ACT_DLE0} }, 198 {RSP_ZDLE, 202, 202, 0, 0, 0, {ACT_DLE0} },
198 {RSP_NODEV, 200, 249, -1, 0, 0, {ACT_FAKEDLE0} }, 199 {RSP_NODEV, 200, 249, -1, 0, 0, {ACT_FAKEDLE0} },
199 {RSP_ERROR, 200, 249, -1, 0, 0, {ACT_FAILDLE0} }, 200 {RSP_ERROR, 200, 249, -1, 0, 0, {ACT_FAILDLE0} },
200 {EV_TIMEOUT, 200, 249, -1, 0, 0, {ACT_FAILDLE0} }, 201 {EV_TIMEOUT, 200, 249, -1, 0, 0, {ACT_FAILDLE0} },
201 202
202/* enter dle mode */ 203/* enter dle mode */
203 {RSP_INIT, 0, 0, SEQ_DLE1, 251, 5, {0}, "^SDLE=1\r"}, 204 {RSP_INIT, 0, 0, SEQ_DLE1, 251, 5, {0}, "^SDLE=1\r"},
204 {RSP_OK, 251, 251, -1, 252, -1}, 205 {RSP_OK, 251, 251, -1, 252, -1},
205 {RSP_ZDLE, 252, 252, 1, 0, 0, {ACT_DLE1} }, 206 {RSP_ZDLE, 252, 252, 1, 0, 0, {ACT_DLE1} },
206 {RSP_ERROR, 250, 299, -1, 0, 0, {ACT_FAILDLE1} }, 207 {RSP_ERROR, 250, 299, -1, 0, 0, {ACT_FAILDLE1} },
207 {EV_TIMEOUT, 250, 299, -1, 0, 0, {ACT_FAILDLE1} }, 208 {EV_TIMEOUT, 250, 299, -1, 0, 0, {ACT_FAILDLE1} },
208 209
209/* incoming call */ 210/* incoming call */
210 {RSP_RING, -1, -1, -1, -1, -1, {ACT_RING} }, 211 {RSP_RING, -1, -1, -1, -1, -1, {ACT_RING} },
211 212
212/* get cid */ 213/* get cid */
213 {RSP_INIT, 0, 0, SEQ_CID, 301, 5, {0}, "^SGCI?\r"}, 214 {RSP_INIT, 0, 0, SEQ_CID, 301, 5, {0}, "^SGCI?\r"},
214 {RSP_OK, 301, 301, -1, 302, -1}, 215 {RSP_OK, 301, 301, -1, 302, -1},
215 {RSP_ZGCI, 302, 302, -1, 0, 0, {ACT_CID} }, 216 {RSP_ZGCI, 302, 302, -1, 0, 0, {ACT_CID} },
216 {RSP_ERROR, 301, 349, -1, 0, 0, {ACT_FAILCID} }, 217 {RSP_ERROR, 301, 349, -1, 0, 0, {ACT_FAILCID} },
217 {EV_TIMEOUT, 301, 349, -1, 0, 0, {ACT_FAILCID} }, 218 {EV_TIMEOUT, 301, 349, -1, 0, 0, {ACT_FAILCID} },
218 219
219/* enter cid mode */ 220/* enter cid mode */
220 {RSP_INIT, 0, 0, SEQ_CIDMODE, 150, 5, {0}, "^SGCI=1\r"}, 221 {RSP_INIT, 0, 0, SEQ_CIDMODE, 150, 5, {0}, "^SGCI=1\r"},
221 {RSP_OK, 150, 150, -1, 0, 0, {ACT_CMODESET} }, 222 {RSP_OK, 150, 150, -1, 0, 0, {ACT_CMODESET} },
222 {RSP_ERROR, 150, 150, -1, 0, 0, {ACT_FAILCMODE} }, 223 {RSP_ERROR, 150, 150, -1, 0, 0, {ACT_FAILCMODE} },
223 {EV_TIMEOUT, 150, 150, -1, 0, 0, {ACT_FAILCMODE} }, 224 {EV_TIMEOUT, 150, 150, -1, 0, 0, {ACT_FAILCMODE} },
224 225
225/* leave cid mode */ 226/* leave cid mode */
226 {RSP_INIT, 0, 0, SEQ_UMMODE, 160, 5, {0}, "Z\r"}, 227 {RSP_INIT, 0, 0, SEQ_UMMODE, 160, 5, {0}, "Z\r"},
227 {RSP_OK, 160, 160, -1, 0, 0, {ACT_UMODESET} }, 228 {RSP_OK, 160, 160, -1, 0, 0, {ACT_UMODESET} },
228 {RSP_ERROR, 160, 160, -1, 0, 0, {ACT_FAILUMODE} }, 229 {RSP_ERROR, 160, 160, -1, 0, 0, {ACT_FAILUMODE} },
229 {EV_TIMEOUT, 160, 160, -1, 0, 0, {ACT_FAILUMODE} }, 230 {EV_TIMEOUT, 160, 160, -1, 0, 0, {ACT_FAILUMODE} },
230 231
231/* abort getting cid */ 232/* abort getting cid */
232 {RSP_INIT, 0, 0, SEQ_NOCID, 0, 0, {ACT_ABORTCID} }, 233 {RSP_INIT, 0, 0, SEQ_NOCID, 0, 0, {ACT_ABORTCID} },
233 234
234/* reset */ 235/* reset */
235 {RSP_INIT, 0, 0, SEQ_SHUTDOWN, 504, 5, {0}, "Z\r"}, 236 {RSP_INIT, 0, 0, SEQ_SHUTDOWN, 504, 5, {0}, "Z\r"},
236 {RSP_OK, 504, 504, -1, 0, 0, {ACT_SDOWN} }, 237 {RSP_OK, 504, 504, -1, 0, 0, {ACT_SDOWN} },
237 {RSP_ERROR, 501, 599, -1, 0, 0, {ACT_FAILSDOWN} }, 238 {RSP_ERROR, 501, 599, -1, 0, 0, {ACT_FAILSDOWN} },
238 {EV_TIMEOUT, 501, 599, -1, 0, 0, {ACT_FAILSDOWN} }, 239 {EV_TIMEOUT, 501, 599, -1, 0, 0, {ACT_FAILSDOWN} },
239 {RSP_NODEV, 501, 599, -1, 0, 0, {ACT_FAKESDOWN} }, 240 {RSP_NODEV, 501, 599, -1, 0, 0, {ACT_FAKESDOWN} },
240 241
241 {EV_PROC_CIDMODE, -1, -1, -1, -1, -1, {ACT_PROC_CIDMODE} }, 242 {EV_PROC_CIDMODE, -1, -1, -1, -1, -1, {ACT_PROC_CIDMODE} },
242 {EV_IF_LOCK, -1, -1, -1, -1, -1, {ACT_IF_LOCK} }, 243 {EV_IF_LOCK, -1, -1, -1, -1, -1, {ACT_IF_LOCK} },
243 {EV_IF_VER, -1, -1, -1, -1, -1, {ACT_IF_VER} }, 244 {EV_IF_VER, -1, -1, -1, -1, -1, {ACT_IF_VER} },
244 {EV_START, -1, -1, -1, -1, -1, {ACT_START} }, 245 {EV_START, -1, -1, -1, -1, -1, {ACT_START} },
245 {EV_STOP, -1, -1, -1, -1, -1, {ACT_STOP} }, 246 {EV_STOP, -1, -1, -1, -1, -1, {ACT_STOP} },
246 {EV_SHUTDOWN, -1, -1, -1, -1, -1, {ACT_SHUTDOWN} }, 247 {EV_SHUTDOWN, -1, -1, -1, -1, -1, {ACT_SHUTDOWN} },
247 248
248/* misc. */ 249/* misc. */
249 {RSP_ERROR, -1, -1, -1, -1, -1, {ACT_ERROR} }, 250 {RSP_ERROR, -1, -1, -1, -1, -1, {ACT_ERROR} },
250 {RSP_ZCAU, -1, -1, -1, -1, -1, {ACT_ZCAU} }, 251 {RSP_ZCAU, -1, -1, -1, -1, -1, {ACT_ZCAU} },
251 {RSP_NONE, -1, -1, -1, -1, -1, {ACT_DEBUG} }, 252 {RSP_NONE, -1, -1, -1, -1, -1, {ACT_DEBUG} },
252 {RSP_ANY, -1, -1, -1, -1, -1, {ACT_WARN} }, 253 {RSP_ANY, -1, -1, -1, -1, -1, {ACT_WARN} },
253 {RSP_LAST} 254 {RSP_LAST}
254}; 255};
255 256
@@ -261,90 +262,90 @@ struct reply_t gigaset_tab_cid[] =
261 * action, command */ 262 * action, command */
262 263
263/* dial */ 264/* dial */
264 {EV_DIAL, -1, -1, -1, -1, -1, {ACT_DIAL} }, 265 {EV_DIAL, -1, -1, -1, -1, -1, {ACT_DIAL} },
265 {RSP_INIT, 0, 0, SEQ_DIAL, 601, 5, {ACT_CMD + AT_BC} }, 266 {RSP_INIT, 0, 0, SEQ_DIAL, 601, 5, {ACT_CMD + AT_BC} },
266 {RSP_OK, 601, 601, -1, 603, 5, {ACT_CMD + AT_PROTO} }, 267 {RSP_OK, 601, 601, -1, 603, 5, {ACT_CMD + AT_PROTO} },
267 {RSP_OK, 603, 603, -1, 604, 5, {ACT_CMD + AT_TYPE} }, 268 {RSP_OK, 603, 603, -1, 604, 5, {ACT_CMD + AT_TYPE} },
268 {RSP_OK, 604, 604, -1, 605, 5, {ACT_CMD + AT_MSN} }, 269 {RSP_OK, 604, 604, -1, 605, 5, {ACT_CMD + AT_MSN} },
269 {RSP_NULL, 605, 605, -1, 606, 5, {ACT_CMD + AT_CLIP} }, 270 {RSP_NULL, 605, 605, -1, 606, 5, {ACT_CMD + AT_CLIP} },
270 {RSP_OK, 605, 605, -1, 606, 5, {ACT_CMD + AT_CLIP} }, 271 {RSP_OK, 605, 605, -1, 606, 5, {ACT_CMD + AT_CLIP} },
271 {RSP_NULL, 606, 606, -1, 607, 5, {ACT_CMD + AT_ISO} }, 272 {RSP_NULL, 606, 606, -1, 607, 5, {ACT_CMD + AT_ISO} },
272 {RSP_OK, 606, 606, -1, 607, 5, {ACT_CMD + AT_ISO} }, 273 {RSP_OK, 606, 606, -1, 607, 5, {ACT_CMD + AT_ISO} },
273 {RSP_OK, 607, 607, -1, 608, 5, {0}, "+VLS=17\r"}, 274 {RSP_OK, 607, 607, -1, 608, 5, {0}, "+VLS=17\r"},
274 {RSP_OK, 608, 608, -1, 609, -1}, 275 {RSP_OK, 608, 608, -1, 609, -1},
275 {RSP_ZSAU, 609, 609, ZSAU_PROCEEDING, 610, 5, {ACT_CMD + AT_DIAL} }, 276 {RSP_ZSAU, 609, 609, ZSAU_PROCEEDING, 610, 5, {ACT_CMD + AT_DIAL} },
276 {RSP_OK, 610, 610, -1, 650, 0, {ACT_DIALING} }, 277 {RSP_OK, 610, 610, -1, 650, 0, {ACT_DIALING} },
277 278
278 {RSP_ERROR, 601, 610, -1, 0, 0, {ACT_ABORTDIAL} }, 279 {RSP_ERROR, 601, 610, -1, 0, 0, {ACT_ABORTDIAL} },
279 {EV_TIMEOUT, 601, 610, -1, 0, 0, {ACT_ABORTDIAL} }, 280 {EV_TIMEOUT, 601, 610, -1, 0, 0, {ACT_ABORTDIAL} },
280 281
281/* optional dialing responses */ 282/* optional dialing responses */
282 {EV_BC_OPEN, 650, 650, -1, 651, -1}, 283 {EV_BC_OPEN, 650, 650, -1, 651, -1},
283 {RSP_ZVLS, 609, 651, 17, -1, -1, {ACT_DEBUG} }, 284 {RSP_ZVLS, 609, 651, 17, -1, -1, {ACT_DEBUG} },
284 {RSP_ZCTP, 610, 651, -1, -1, -1, {ACT_DEBUG} }, 285 {RSP_ZCTP, 610, 651, -1, -1, -1, {ACT_DEBUG} },
285 {RSP_ZCPN, 610, 651, -1, -1, -1, {ACT_DEBUG} }, 286 {RSP_ZCPN, 610, 651, -1, -1, -1, {ACT_DEBUG} },
286 {RSP_ZSAU, 650, 651, ZSAU_CALL_DELIVERED, -1, -1, {ACT_DEBUG} }, 287 {RSP_ZSAU, 650, 651, ZSAU_CALL_DELIVERED, -1, -1, {ACT_DEBUG} },
287 288
288/* connect */ 289/* connect */
289 {RSP_ZSAU, 650, 650, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT} }, 290 {RSP_ZSAU, 650, 650, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT} },
290 {RSP_ZSAU, 651, 651, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT, 291 {RSP_ZSAU, 651, 651, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT,
291 ACT_NOTIFY_BC_UP} }, 292 ACT_NOTIFY_BC_UP} },
292 {RSP_ZSAU, 750, 750, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT} }, 293 {RSP_ZSAU, 750, 750, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT} },
293 {RSP_ZSAU, 751, 751, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT, 294 {RSP_ZSAU, 751, 751, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT,
294 ACT_NOTIFY_BC_UP} }, 295 ACT_NOTIFY_BC_UP} },
295 {EV_BC_OPEN, 800, 800, -1, 800, -1, {ACT_NOTIFY_BC_UP} }, 296 {EV_BC_OPEN, 800, 800, -1, 800, -1, {ACT_NOTIFY_BC_UP} },
296 297
297/* remote hangup */ 298/* remote hangup */
298 {RSP_ZSAU, 650, 651, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEREJECT} }, 299 {RSP_ZSAU, 650, 651, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEREJECT} },
299 {RSP_ZSAU, 750, 751, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP} }, 300 {RSP_ZSAU, 750, 751, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP} },
300 {RSP_ZSAU, 800, 800, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP} }, 301 {RSP_ZSAU, 800, 800, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP} },
301 302
302/* hangup */ 303/* hangup */
303 {EV_HUP, -1, -1, -1, -1, -1, {ACT_HUP} }, 304 {EV_HUP, -1, -1, -1, -1, -1, {ACT_HUP} },
304 {RSP_INIT, -1, -1, SEQ_HUP, 401, 5, {0}, "+VLS=0\r"}, 305 {RSP_INIT, -1, -1, SEQ_HUP, 401, 5, {0}, "+VLS=0\r"},
305 {RSP_OK, 401, 401, -1, 402, 5}, 306 {RSP_OK, 401, 401, -1, 402, 5},
306 {RSP_ZVLS, 402, 402, 0, 403, 5}, 307 {RSP_ZVLS, 402, 402, 0, 403, 5},
307 {RSP_ZSAU, 403, 403, ZSAU_DISCONNECT_REQ, -1, -1, {ACT_DEBUG} }, 308 {RSP_ZSAU, 403, 403, ZSAU_DISCONNECT_REQ, -1, -1, {ACT_DEBUG} },
308 {RSP_ZSAU, 403, 403, ZSAU_NULL, 0, 0, {ACT_DISCONNECT} }, 309 {RSP_ZSAU, 403, 403, ZSAU_NULL, 0, 0, {ACT_DISCONNECT} },
309 {RSP_NODEV, 401, 403, -1, 0, 0, {ACT_FAKEHUP} }, 310 {RSP_NODEV, 401, 403, -1, 0, 0, {ACT_FAKEHUP} },
310 {RSP_ERROR, 401, 401, -1, 0, 0, {ACT_ABORTHUP} }, 311 {RSP_ERROR, 401, 401, -1, 0, 0, {ACT_ABORTHUP} },
311 {EV_TIMEOUT, 401, 403, -1, 0, 0, {ACT_ABORTHUP} }, 312 {EV_TIMEOUT, 401, 403, -1, 0, 0, {ACT_ABORTHUP} },
312 313
313 {EV_BC_CLOSED, 0, 0, -1, 0, -1, {ACT_NOTIFY_BC_DOWN} }, 314 {EV_BC_CLOSED, 0, 0, -1, 0, -1, {ACT_NOTIFY_BC_DOWN} },
314 315
315/* ring */ 316/* ring */
316 {RSP_ZBC, 700, 700, -1, -1, -1, {0} }, 317 {RSP_ZBC, 700, 700, -1, -1, -1, {0} },
317 {RSP_ZHLC, 700, 700, -1, -1, -1, {0} }, 318 {RSP_ZHLC, 700, 700, -1, -1, -1, {0} },
318 {RSP_NMBR, 700, 700, -1, -1, -1, {0} }, 319 {RSP_NMBR, 700, 700, -1, -1, -1, {0} },
319 {RSP_ZCPN, 700, 700, -1, -1, -1, {0} }, 320 {RSP_ZCPN, 700, 700, -1, -1, -1, {0} },
320 {RSP_ZCTP, 700, 700, -1, -1, -1, {0} }, 321 {RSP_ZCTP, 700, 700, -1, -1, -1, {0} },
321 {EV_TIMEOUT, 700, 700, -1, 720, 720, {ACT_ICALL} }, 322 {EV_TIMEOUT, 700, 700, -1, 720, 720, {ACT_ICALL} },
322 {EV_BC_CLOSED, 720, 720, -1, 0, -1, {ACT_NOTIFY_BC_DOWN} }, 323 {EV_BC_CLOSED, 720, 720, -1, 0, -1, {ACT_NOTIFY_BC_DOWN} },
323 324
324/*accept icall*/ 325/*accept icall*/
325 {EV_ACCEPT, -1, -1, -1, -1, -1, {ACT_ACCEPT} }, 326 {EV_ACCEPT, -1, -1, -1, -1, -1, {ACT_ACCEPT} },
326 {RSP_INIT, 720, 720, SEQ_ACCEPT, 721, 5, {ACT_CMD + AT_PROTO} }, 327 {RSP_INIT, 720, 720, SEQ_ACCEPT, 721, 5, {ACT_CMD + AT_PROTO} },
327 {RSP_OK, 721, 721, -1, 722, 5, {ACT_CMD + AT_ISO} }, 328 {RSP_OK, 721, 721, -1, 722, 5, {ACT_CMD + AT_ISO} },
328 {RSP_OK, 722, 722, -1, 723, 5, {0}, "+VLS=17\r"}, 329 {RSP_OK, 722, 722, -1, 723, 5, {0}, "+VLS=17\r"},
329 {RSP_OK, 723, 723, -1, 724, 5, {0} }, 330 {RSP_OK, 723, 723, -1, 724, 5, {0} },
330 {RSP_ZVLS, 724, 724, 17, 750, 50, {ACT_ACCEPTED} }, 331 {RSP_ZVLS, 724, 724, 17, 750, 50, {ACT_ACCEPTED} },
331 {RSP_ERROR, 721, 729, -1, 0, 0, {ACT_ABORTACCEPT} }, 332 {RSP_ERROR, 721, 729, -1, 0, 0, {ACT_ABORTACCEPT} },
332 {EV_TIMEOUT, 721, 729, -1, 0, 0, {ACT_ABORTACCEPT} }, 333 {EV_TIMEOUT, 721, 729, -1, 0, 0, {ACT_ABORTACCEPT} },
333 {RSP_ZSAU, 700, 729, ZSAU_NULL, 0, 0, {ACT_ABORTACCEPT} }, 334 {RSP_ZSAU, 700, 729, ZSAU_NULL, 0, 0, {ACT_ABORTACCEPT} },
334 {RSP_ZSAU, 700, 729, ZSAU_ACTIVE, 0, 0, {ACT_ABORTACCEPT} }, 335 {RSP_ZSAU, 700, 729, ZSAU_ACTIVE, 0, 0, {ACT_ABORTACCEPT} },
335 {RSP_ZSAU, 700, 729, ZSAU_DISCONNECT_IND, 0, 0, {ACT_ABORTACCEPT} }, 336 {RSP_ZSAU, 700, 729, ZSAU_DISCONNECT_IND, 0, 0, {ACT_ABORTACCEPT} },
336 337
337 {EV_BC_OPEN, 750, 750, -1, 751, -1}, 338 {EV_BC_OPEN, 750, 750, -1, 751, -1},
338 {EV_TIMEOUT, 750, 751, -1, 0, 0, {ACT_CONNTIMEOUT} }, 339 {EV_TIMEOUT, 750, 751, -1, 0, 0, {ACT_CONNTIMEOUT} },
339 340
340/* B channel closed (general case) */ 341/* B channel closed (general case) */
341 {EV_BC_CLOSED, -1, -1, -1, -1, -1, {ACT_NOTIFY_BC_DOWN} }, 342 {EV_BC_CLOSED, -1, -1, -1, -1, -1, {ACT_NOTIFY_BC_DOWN} },
342 343
343/* misc. */ 344/* misc. */
344 {RSP_ZCON, -1, -1, -1, -1, -1, {ACT_DEBUG} }, 345 {RSP_ZCON, -1, -1, -1, -1, -1, {ACT_DEBUG} },
345 {RSP_ZCAU, -1, -1, -1, -1, -1, {ACT_ZCAU} }, 346 {RSP_ZCAU, -1, -1, -1, -1, -1, {ACT_ZCAU} },
346 {RSP_NONE, -1, -1, -1, -1, -1, {ACT_DEBUG} }, 347 {RSP_NONE, -1, -1, -1, -1, -1, {ACT_DEBUG} },
347 {RSP_ANY, -1, -1, -1, -1, -1, {ACT_WARN} }, 348 {RSP_ANY, -1, -1, -1, -1, -1, {ACT_WARN} },
348 {RSP_LAST} 349 {RSP_LAST}
349}; 350};
350 351
@@ -648,16 +649,16 @@ static void disconnect(struct at_state_t **at_state_p)
648static inline struct at_state_t *get_free_channel(struct cardstate *cs, 649static inline struct at_state_t *get_free_channel(struct cardstate *cs,
649 int cid) 650 int cid)
650/* cids: >0: siemens-cid 651/* cids: >0: siemens-cid
651 0: without cid 652 * 0: without cid
652 -1: no cid assigned yet 653 * -1: no cid assigned yet
653*/ 654 */
654{ 655{
655 unsigned long flags; 656 unsigned long flags;
656 int i; 657 int i;
657 struct at_state_t *ret; 658 struct at_state_t *ret;
658 659
659 for (i = 0; i < cs->channels; ++i) 660 for (i = 0; i < cs->channels; ++i)
660 if (gigaset_get_channel(cs->bcs + i)) { 661 if (gigaset_get_channel(cs->bcs + i) >= 0) {
661 ret = &cs->bcs[i].at_state; 662 ret = &cs->bcs[i].at_state;
662 ret->cid = cid; 663 ret->cid = cid;
663 return ret; 664 return ret;
@@ -922,18 +923,18 @@ static void do_stop(struct cardstate *cs)
922 * channel >= 0: getting cid for the channel failed 923 * channel >= 0: getting cid for the channel failed
923 * channel < 0: entering cid mode failed 924 * channel < 0: entering cid mode failed
924 * 925 *
925 * returns 0 on failure 926 * returns 0 on success, <0 on failure
926 */ 927 */
927static int reinit_and_retry(struct cardstate *cs, int channel) 928static int reinit_and_retry(struct cardstate *cs, int channel)
928{ 929{
929 int i; 930 int i;
930 931
931 if (--cs->retry_count <= 0) 932 if (--cs->retry_count <= 0)
932 return 0; 933 return -EFAULT;
933 934
934 for (i = 0; i < cs->channels; ++i) 935 for (i = 0; i < cs->channels; ++i)
935 if (cs->bcs[i].at_state.cid > 0) 936 if (cs->bcs[i].at_state.cid > 0)
936 return 0; 937 return -EBUSY;
937 938
938 if (channel < 0) 939 if (channel < 0)
939 dev_warn(cs->dev, 940 dev_warn(cs->dev,
@@ -944,7 +945,7 @@ static int reinit_and_retry(struct cardstate *cs, int channel)
944 cs->bcs[channel].at_state.pending_commands |= PC_CID; 945 cs->bcs[channel].at_state.pending_commands |= PC_CID;
945 } 946 }
946 schedule_init(cs, MS_INIT); 947 schedule_init(cs, MS_INIT);
947 return 1; 948 return 0;
948} 949}
949 950
950static int at_state_invalid(struct cardstate *cs, 951static int at_state_invalid(struct cardstate *cs,
@@ -1015,7 +1016,7 @@ static int do_lock(struct cardstate *cs)
1015 if (cs->bcs[i].at_state.pending_commands) 1016 if (cs->bcs[i].at_state.pending_commands)
1016 return -EBUSY; 1017 return -EBUSY;
1017 1018
1018 if (!gigaset_get_channels(cs)) 1019 if (gigaset_get_channels(cs) < 0)
1019 return -EBUSY; 1020 return -EBUSY;
1020 1021
1021 break; 1022 break;
@@ -1124,7 +1125,7 @@ static void do_action(int action, struct cardstate *cs,
1124 init_failed(cs, M_UNKNOWN); 1125 init_failed(cs, M_UNKNOWN);
1125 break; 1126 break;
1126 } 1127 }
1127 if (!reinit_and_retry(cs, -1)) 1128 if (reinit_and_retry(cs, -1) < 0)
1128 schedule_init(cs, MS_RECOVER); 1129 schedule_init(cs, MS_RECOVER);
1129 break; 1130 break;
1130 case ACT_FAILUMODE: 1131 case ACT_FAILUMODE:
@@ -1267,7 +1268,7 @@ static void do_action(int action, struct cardstate *cs,
1267 case ACT_FAILCID: 1268 case ACT_FAILCID:
1268 cs->cur_at_seq = SEQ_NONE; 1269 cs->cur_at_seq = SEQ_NONE;
1269 channel = cs->curchannel; 1270 channel = cs->curchannel;
1270 if (!reinit_and_retry(cs, channel)) { 1271 if (reinit_and_retry(cs, channel) < 0) {
1271 dev_warn(cs->dev, 1272 dev_warn(cs->dev,
1272 "Could not get a call ID. Cannot dial.\n"); 1273 "Could not get a call ID. Cannot dial.\n");
1273 at_state2 = &cs->bcs[channel].at_state; 1274 at_state2 = &cs->bcs[channel].at_state;
@@ -1314,8 +1315,9 @@ static void do_action(int action, struct cardstate *cs,
1314 s = ev->ptr; 1315 s = ev->ptr;
1315 1316
1316 if (!strcmp(s, "OK")) { 1317 if (!strcmp(s, "OK")) {
1318 /* OK without version string: assume old response */
1317 *p_genresp = 1; 1319 *p_genresp = 1;
1318 *p_resp_code = RSP_ERROR; 1320 *p_resp_code = RSP_NONE;
1319 break; 1321 break;
1320 } 1322 }
1321 1323
@@ -1372,7 +1374,8 @@ static void do_action(int action, struct cardstate *cs,
1372 ev->parameter, at_state->ConState); 1374 ev->parameter, at_state->ConState);
1373 break; 1375 break;
1374 1376
1375 /* events from the LL */ 1377 /* events from the LL */
1378
1376 case ACT_DIAL: 1379 case ACT_DIAL:
1377 start_dial(at_state, ev->ptr, ev->parameter); 1380 start_dial(at_state, ev->ptr, ev->parameter);
1378 break; 1381 break;
@@ -1385,7 +1388,8 @@ static void do_action(int action, struct cardstate *cs,
1385 cs->commands_pending = 1; 1388 cs->commands_pending = 1;
1386 break; 1389 break;
1387 1390
1388 /* hotplug events */ 1391 /* hotplug events */
1392
1389 case ACT_STOP: 1393 case ACT_STOP:
1390 do_stop(cs); 1394 do_stop(cs);
1391 break; 1395 break;
@@ -1393,7 +1397,8 @@ static void do_action(int action, struct cardstate *cs,
1393 do_start(cs); 1397 do_start(cs);
1394 break; 1398 break;
1395 1399
1396 /* events from the interface */ 1400 /* events from the interface */
1401
1397 case ACT_IF_LOCK: 1402 case ACT_IF_LOCK:
1398 cs->cmd_result = ev->parameter ? do_lock(cs) : do_unlock(cs); 1403 cs->cmd_result = ev->parameter ? do_lock(cs) : do_unlock(cs);
1399 cs->waiting = 0; 1404 cs->waiting = 0;
@@ -1412,7 +1417,8 @@ static void do_action(int action, struct cardstate *cs,
1412 wake_up(&cs->waitqueue); 1417 wake_up(&cs->waitqueue);
1413 break; 1418 break;
1414 1419
1415 /* events from the proc file system */ 1420 /* events from the proc file system */
1421
1416 case ACT_PROC_CIDMODE: 1422 case ACT_PROC_CIDMODE:
1417 spin_lock_irqsave(&cs->lock, flags); 1423 spin_lock_irqsave(&cs->lock, flags);
1418 if (ev->parameter != cs->cidmode) { 1424 if (ev->parameter != cs->cidmode) {
@@ -1431,7 +1437,8 @@ static void do_action(int action, struct cardstate *cs,
1431 wake_up(&cs->waitqueue); 1437 wake_up(&cs->waitqueue);
1432 break; 1438 break;
1433 1439
1434 /* events from the hardware drivers */ 1440 /* events from the hardware drivers */
1441
1435 case ACT_NOTIFY_BC_DOWN: 1442 case ACT_NOTIFY_BC_DOWN:
1436 bchannel_down(bcs); 1443 bchannel_down(bcs);
1437 break; 1444 break;
diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
index 1dc25131e670..8e2fc8f31d16 100644
--- a/drivers/isdn/gigaset/gigaset.h
+++ b/drivers/isdn/gigaset/gigaset.h
@@ -163,8 +163,8 @@ void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
163#define BAS_LOWFRAME 5 /* " " with negative flow control */ 163#define BAS_LOWFRAME 5 /* " " with negative flow control */
164#define BAS_CORRFRAMES 4 /* flow control multiplicator */ 164#define BAS_CORRFRAMES 4 /* flow control multiplicator */
165 165
166#define BAS_INBUFSIZE (BAS_MAXFRAME * BAS_NUMFRAMES) 166#define BAS_INBUFSIZE (BAS_MAXFRAME * BAS_NUMFRAMES) /* size of isoc in buf
167/* size of isoc in buf per URB */ 167 * per URB */
168#define BAS_OUTBUFSIZE 4096 /* size of common isoc out buffer */ 168#define BAS_OUTBUFSIZE 4096 /* size of common isoc out buffer */
169#define BAS_OUTBUFPAD BAS_MAXFRAME /* size of pad area for isoc out buf */ 169#define BAS_OUTBUFPAD BAS_MAXFRAME /* size of pad area for isoc out buf */
170 170
@@ -471,18 +471,18 @@ struct cardstate {
471 for */ 471 for */
472 int commands_pending; /* flag(s) in xxx.commands_pending have 472 int commands_pending; /* flag(s) in xxx.commands_pending have
473 been set */ 473 been set */
474 struct tasklet_struct event_tasklet; 474 struct tasklet_struct
475 /* tasklet for serializing AT commands. 475 event_tasklet; /* tasklet for serializing AT commands.
476 * Scheduled 476 * Scheduled
477 * -> for modem reponses (and 477 * -> for modem reponses (and
478 * incoming data for M10x) 478 * incoming data for M10x)
479 * -> on timeout 479 * -> on timeout
480 * -> after setting bits in 480 * -> after setting bits in
481 * xxx.at_state.pending_command 481 * xxx.at_state.pending_command
482 * (e.g. command from LL) */ 482 * (e.g. command from LL) */
483 struct tasklet_struct write_tasklet; 483 struct tasklet_struct
484 /* tasklet for serial output 484 write_tasklet; /* tasklet for serial output
485 * (not used in base driver) */ 485 * (not used in base driver) */
486 486
487 /* event queue */ 487 /* event queue */
488 struct event_t events[MAX_EVENTS]; 488 struct event_t events[MAX_EVENTS];
@@ -583,7 +583,7 @@ struct gigaset_ops {
583 int (*initbcshw)(struct bc_state *bcs); 583 int (*initbcshw)(struct bc_state *bcs);
584 584
585 /* Called by gigaset_freecs() for freeing bcs->hw.xxx */ 585 /* Called by gigaset_freecs() for freeing bcs->hw.xxx */
586 int (*freebcshw)(struct bc_state *bcs); 586 void (*freebcshw)(struct bc_state *bcs);
587 587
588 /* Called by gigaset_bchannel_down() for resetting bcs->hw.xxx */ 588 /* Called by gigaset_bchannel_down() for resetting bcs->hw.xxx */
589 void (*reinitbcshw)(struct bc_state *bcs); 589 void (*reinitbcshw)(struct bc_state *bcs);
diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c
index 0f13eb1de657..2d75329007f1 100644
--- a/drivers/isdn/gigaset/i4l.c
+++ b/drivers/isdn/gigaset/i4l.c
@@ -229,7 +229,7 @@ static int command_from_LL(isdn_ctrl *cntrl)
229 return -EINVAL; 229 return -EINVAL;
230 } 230 }
231 bcs = cs->bcs + ch; 231 bcs = cs->bcs + ch;
232 if (!gigaset_get_channel(bcs)) { 232 if (gigaset_get_channel(bcs) < 0) {
233 dev_err(cs->dev, "ISDN_CMD_DIAL: channel not free\n"); 233 dev_err(cs->dev, "ISDN_CMD_DIAL: channel not free\n");
234 return -EBUSY; 234 return -EBUSY;
235 } 235 }
@@ -618,7 +618,7 @@ void gigaset_isdn_stop(struct cardstate *cs)
618 * @cs: device descriptor structure. 618 * @cs: device descriptor structure.
619 * @isdnid: device name. 619 * @isdnid: device name.
620 * 620 *
621 * Return value: 1 for success, 0 for failure 621 * Return value: 0 on success, error code < 0 on failure
622 */ 622 */
623int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid) 623int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
624{ 624{
@@ -627,14 +627,14 @@ int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
627 iif = kmalloc(sizeof *iif, GFP_KERNEL); 627 iif = kmalloc(sizeof *iif, GFP_KERNEL);
628 if (!iif) { 628 if (!iif) {
629 pr_err("out of memory\n"); 629 pr_err("out of memory\n");
630 return 0; 630 return -ENOMEM;
631 } 631 }
632 632
633 if (snprintf(iif->id, sizeof iif->id, "%s_%u", isdnid, cs->minor_index) 633 if (snprintf(iif->id, sizeof iif->id, "%s_%u", isdnid, cs->minor_index)
634 >= sizeof iif->id) { 634 >= sizeof iif->id) {
635 pr_err("ID too long: %s\n", isdnid); 635 pr_err("ID too long: %s\n", isdnid);
636 kfree(iif); 636 kfree(iif);
637 return 0; 637 return -EINVAL;
638 } 638 }
639 639
640 iif->owner = THIS_MODULE; 640 iif->owner = THIS_MODULE;
@@ -656,13 +656,13 @@ int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
656 if (!register_isdn(iif)) { 656 if (!register_isdn(iif)) {
657 pr_err("register_isdn failed\n"); 657 pr_err("register_isdn failed\n");
658 kfree(iif); 658 kfree(iif);
659 return 0; 659 return -EINVAL;
660 } 660 }
661 661
662 cs->iif = iif; 662 cs->iif = iif;
663 cs->myid = iif->channels; /* Set my device id */ 663 cs->myid = iif->channels; /* Set my device id */
664 cs->hw_hdr_len = HW_HDR_LEN; 664 cs->hw_hdr_len = HW_HDR_LEN;
665 return 1; 665 return 0;
666} 666}
667 667
668/** 668/**
diff --git a/drivers/isdn/gigaset/isocdata.c b/drivers/isdn/gigaset/isocdata.c
index a351c16705bd..bc29f1d52a2f 100644
--- a/drivers/isdn/gigaset/isocdata.c
+++ b/drivers/isdn/gigaset/isocdata.c
@@ -56,7 +56,7 @@ static inline int isowbuf_freebytes(struct isowbuf_t *iwb)
56 56
57/* start writing 57/* start writing
58 * acquire the write semaphore 58 * acquire the write semaphore
59 * return true if acquired, false if busy 59 * return 0 if acquired, <0 if busy
60 */ 60 */
61static inline int isowbuf_startwrite(struct isowbuf_t *iwb) 61static inline int isowbuf_startwrite(struct isowbuf_t *iwb)
62{ 62{
@@ -64,12 +64,12 @@ static inline int isowbuf_startwrite(struct isowbuf_t *iwb)
64 atomic_inc(&iwb->writesem); 64 atomic_inc(&iwb->writesem);
65 gig_dbg(DEBUG_ISO, "%s: couldn't acquire iso write semaphore", 65 gig_dbg(DEBUG_ISO, "%s: couldn't acquire iso write semaphore",
66 __func__); 66 __func__);
67 return 0; 67 return -EBUSY;
68 } 68 }
69 gig_dbg(DEBUG_ISO, 69 gig_dbg(DEBUG_ISO,
70 "%s: acquired iso write semaphore, data[write]=%02x, nbits=%d", 70 "%s: acquired iso write semaphore, data[write]=%02x, nbits=%d",
71 __func__, iwb->data[iwb->write], iwb->wbits); 71 __func__, iwb->data[iwb->write], iwb->wbits);
72 return 1; 72 return 0;
73} 73}
74 74
75/* finish writing 75/* finish writing
@@ -158,7 +158,7 @@ int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size)
158 /* no wraparound in valid data */ 158 /* no wraparound in valid data */
159 if (limit >= write) { 159 if (limit >= write) {
160 /* append idle frame */ 160 /* append idle frame */
161 if (!isowbuf_startwrite(iwb)) 161 if (isowbuf_startwrite(iwb) < 0)
162 return -EBUSY; 162 return -EBUSY;
163 /* write position could have changed */ 163 /* write position could have changed */
164 write = iwb->write; 164 write = iwb->write;
@@ -403,7 +403,7 @@ static inline int hdlc_buildframe(struct isowbuf_t *iwb,
403 unsigned char c; 403 unsigned char c;
404 404
405 if (isowbuf_freebytes(iwb) < count + count / 5 + 6 || 405 if (isowbuf_freebytes(iwb) < count + count / 5 + 6 ||
406 !isowbuf_startwrite(iwb)) { 406 isowbuf_startwrite(iwb) < 0) {
407 gig_dbg(DEBUG_ISO, "%s: %d bytes free -> -EAGAIN", 407 gig_dbg(DEBUG_ISO, "%s: %d bytes free -> -EAGAIN",
408 __func__, isowbuf_freebytes(iwb)); 408 __func__, isowbuf_freebytes(iwb));
409 return -EAGAIN; 409 return -EAGAIN;
@@ -457,7 +457,7 @@ static inline int trans_buildframe(struct isowbuf_t *iwb,
457 return iwb->write; 457 return iwb->write;
458 458
459 if (isowbuf_freebytes(iwb) < count || 459 if (isowbuf_freebytes(iwb) < count ||
460 !isowbuf_startwrite(iwb)) { 460 isowbuf_startwrite(iwb) < 0) {
461 gig_dbg(DEBUG_ISO, "can't put %d bytes", count); 461 gig_dbg(DEBUG_ISO, "can't put %d bytes", count);
462 return -EAGAIN; 462 return -EAGAIN;
463 } 463 }
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index 6f3fd4cf4378..8c91fd5eb6fd 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -340,17 +340,16 @@ static int gigaset_initbcshw(struct bc_state *bcs)
340{ 340{
341 /* unused */ 341 /* unused */
342 bcs->hw.ser = NULL; 342 bcs->hw.ser = NULL;
343 return 1; 343 return 0;
344} 344}
345 345
346/* 346/*
347 * Free B channel structure 347 * Free B channel structure
348 * Called by "gigaset_freebcs" in common.c 348 * Called by "gigaset_freebcs" in common.c
349 */ 349 */
350static int gigaset_freebcshw(struct bc_state *bcs) 350static void gigaset_freebcshw(struct bc_state *bcs)
351{ 351{
352 /* unused */ 352 /* unused */
353 return 1;
354} 353}
355 354
356/* 355/*
@@ -398,7 +397,7 @@ static int gigaset_initcshw(struct cardstate *cs)
398 scs = kzalloc(sizeof(struct ser_cardstate), GFP_KERNEL); 397 scs = kzalloc(sizeof(struct ser_cardstate), GFP_KERNEL);
399 if (!scs) { 398 if (!scs) {
400 pr_err("out of memory\n"); 399 pr_err("out of memory\n");
401 return 0; 400 return -ENOMEM;
402 } 401 }
403 cs->hw.ser = scs; 402 cs->hw.ser = scs;
404 403
@@ -410,13 +409,13 @@ static int gigaset_initcshw(struct cardstate *cs)
410 pr_err("error %d registering platform device\n", rc); 409 pr_err("error %d registering platform device\n", rc);
411 kfree(cs->hw.ser); 410 kfree(cs->hw.ser);
412 cs->hw.ser = NULL; 411 cs->hw.ser = NULL;
413 return 0; 412 return rc;
414 } 413 }
415 dev_set_drvdata(&cs->hw.ser->dev.dev, cs); 414 dev_set_drvdata(&cs->hw.ser->dev.dev, cs);
416 415
417 tasklet_init(&cs->write_tasklet, 416 tasklet_init(&cs->write_tasklet,
418 gigaset_modem_fill, (unsigned long) cs); 417 gigaset_modem_fill, (unsigned long) cs);
419 return 1; 418 return 0;
420} 419}
421 420
422/* 421/*
@@ -503,6 +502,7 @@ static int
503gigaset_tty_open(struct tty_struct *tty) 502gigaset_tty_open(struct tty_struct *tty)
504{ 503{
505 struct cardstate *cs; 504 struct cardstate *cs;
505 int rc;
506 506
507 gig_dbg(DEBUG_INIT, "Starting HLL for Gigaset M101"); 507 gig_dbg(DEBUG_INIT, "Starting HLL for Gigaset M101");
508 508
@@ -515,8 +515,10 @@ gigaset_tty_open(struct tty_struct *tty)
515 515
516 /* allocate memory for our device state and initialize it */ 516 /* allocate memory for our device state and initialize it */
517 cs = gigaset_initcs(driver, 1, 1, 0, cidmode, GIGASET_MODULENAME); 517 cs = gigaset_initcs(driver, 1, 1, 0, cidmode, GIGASET_MODULENAME);
518 if (!cs) 518 if (!cs) {
519 rc = -ENODEV;
519 goto error; 520 goto error;
521 }
520 522
521 cs->dev = &cs->hw.ser->dev.dev; 523 cs->dev = &cs->hw.ser->dev.dev;
522 cs->hw.ser->tty = tty; 524 cs->hw.ser->tty = tty;
@@ -530,7 +532,8 @@ gigaset_tty_open(struct tty_struct *tty)
530 */ 532 */
531 if (startmode == SM_LOCKED) 533 if (startmode == SM_LOCKED)
532 cs->mstate = MS_LOCKED; 534 cs->mstate = MS_LOCKED;
533 if (!gigaset_start(cs)) { 535 rc = gigaset_start(cs);
536 if (rc < 0) {
534 tasklet_kill(&cs->write_tasklet); 537 tasklet_kill(&cs->write_tasklet);
535 goto error; 538 goto error;
536 } 539 }
@@ -542,7 +545,7 @@ error:
542 gig_dbg(DEBUG_INIT, "Startup of HLL failed"); 545 gig_dbg(DEBUG_INIT, "Startup of HLL failed");
543 tty->disc_data = NULL; 546 tty->disc_data = NULL;
544 gigaset_freecs(cs); 547 gigaset_freecs(cs);
545 return -ENODEV; 548 return rc;
546} 549}
547 550
548/* 551/*
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
index 049da67f6392..bb12d8051732 100644
--- a/drivers/isdn/gigaset/usb-gigaset.c
+++ b/drivers/isdn/gigaset/usb-gigaset.c
@@ -549,10 +549,9 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
549 0, 0, &buf, 6, 2000); 549 0, 0, &buf, 6, 2000);
550} 550}
551 551
552static int gigaset_freebcshw(struct bc_state *bcs) 552static void gigaset_freebcshw(struct bc_state *bcs)
553{ 553{
554 /* unused */ 554 /* unused */
555 return 1;
556} 555}
557 556
558/* Initialize the b-channel structure */ 557/* Initialize the b-channel structure */
@@ -560,7 +559,7 @@ static int gigaset_initbcshw(struct bc_state *bcs)
560{ 559{
561 /* unused */ 560 /* unused */
562 bcs->hw.usb = NULL; 561 bcs->hw.usb = NULL;
563 return 1; 562 return 0;
564} 563}
565 564
566static void gigaset_reinitbcshw(struct bc_state *bcs) 565static void gigaset_reinitbcshw(struct bc_state *bcs)
@@ -582,7 +581,7 @@ static int gigaset_initcshw(struct cardstate *cs)
582 kmalloc(sizeof(struct usb_cardstate), GFP_KERNEL); 581 kmalloc(sizeof(struct usb_cardstate), GFP_KERNEL);
583 if (!ucs) { 582 if (!ucs) {
584 pr_err("out of memory\n"); 583 pr_err("out of memory\n");
585 return 0; 584 return -ENOMEM;
586 } 585 }
587 586
588 ucs->bchars[0] = 0; 587 ucs->bchars[0] = 0;
@@ -597,7 +596,7 @@ static int gigaset_initcshw(struct cardstate *cs)
597 tasklet_init(&cs->write_tasklet, 596 tasklet_init(&cs->write_tasklet,
598 gigaset_modem_fill, (unsigned long) cs); 597 gigaset_modem_fill, (unsigned long) cs);
599 598
600 return 1; 599 return 0;
601} 600}
602 601
603/* Send data from current skb to the device. */ 602/* Send data from current skb to the device. */
@@ -766,9 +765,9 @@ static int gigaset_probe(struct usb_interface *interface,
766 if (startmode == SM_LOCKED) 765 if (startmode == SM_LOCKED)
767 cs->mstate = MS_LOCKED; 766 cs->mstate = MS_LOCKED;
768 767
769 if (!gigaset_start(cs)) { 768 retval = gigaset_start(cs);
769 if (retval < 0) {
770 tasklet_kill(&cs->write_tasklet); 770 tasklet_kill(&cs->write_tasklet);
771 retval = -ENODEV;
772 goto error; 771 goto error;
773 } 772 }
774 return 0; 773 return 0;
@@ -898,8 +897,10 @@ static int __init usb_gigaset_init(void)
898 driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS, 897 driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
899 GIGASET_MODULENAME, GIGASET_DEVNAME, 898 GIGASET_MODULENAME, GIGASET_DEVNAME,
900 &ops, THIS_MODULE); 899 &ops, THIS_MODULE);
901 if (driver == NULL) 900 if (driver == NULL) {
901 result = -ENOMEM;
902 goto error; 902 goto error;
903 }
903 904
904 /* register this driver with the USB subsystem */ 905 /* register this driver with the USB subsystem */
905 result = usb_register(&gigaset_usb_driver); 906 result = usb_register(&gigaset_usb_driver);
@@ -915,7 +916,7 @@ error:
915 if (driver) 916 if (driver)
916 gigaset_freedriver(driver); 917 gigaset_freedriver(driver);
917 driver = NULL; 918 driver = NULL;
918 return -1; 919 return result;
919} 920}
920 921
921/* 922/*
diff --git a/drivers/isdn/hardware/mISDN/avmfritz.c b/drivers/isdn/hardware/mISDN/avmfritz.c
index c0b8c960ee3f..6bf2c58795a3 100644
--- a/drivers/isdn/hardware/mISDN/avmfritz.c
+++ b/drivers/isdn/hardware/mISDN/avmfritz.c
@@ -868,7 +868,7 @@ channel_ctrl(struct fritzcard *fc, struct mISDN_ctrl_req *cq)
868 868
869 switch (cq->op) { 869 switch (cq->op) {
870 case MISDN_CTRL_GETOP: 870 case MISDN_CTRL_GETOP:
871 cq->op = MISDN_CTRL_LOOP; 871 cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_L1_TIMER3;
872 break; 872 break;
873 case MISDN_CTRL_LOOP: 873 case MISDN_CTRL_LOOP:
874 /* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */ 874 /* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */
@@ -878,6 +878,9 @@ channel_ctrl(struct fritzcard *fc, struct mISDN_ctrl_req *cq)
878 } 878 }
879 ret = fc->isac.ctrl(&fc->isac, HW_TESTLOOP, cq->channel); 879 ret = fc->isac.ctrl(&fc->isac, HW_TESTLOOP, cq->channel);
880 break; 880 break;
881 case MISDN_CTRL_L1_TIMER3:
882 ret = fc->isac.ctrl(&fc->isac, HW_TIMER3_VALUE, cq->p1);
883 break;
881 default: 884 default:
882 pr_info("%s: %s unknown Op %x\n", fc->name, __func__, cq->op); 885 pr_info("%s: %s unknown Op %x\n", fc->name, __func__, cq->op);
883 ret = -EINVAL; 886 ret = -EINVAL;
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index 43013316b9b0..4c128e4bb5cf 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -4161,7 +4161,7 @@ channel_dctrl(struct dchannel *dch, struct mISDN_ctrl_req *cq)
4161 4161
4162 switch (cq->op) { 4162 switch (cq->op) {
4163 case MISDN_CTRL_GETOP: 4163 case MISDN_CTRL_GETOP:
4164 cq->op = MISDN_CTRL_HFC_OP; 4164 cq->op = MISDN_CTRL_HFC_OP | MISDN_CTRL_L1_TIMER3;
4165 break; 4165 break;
4166 case MISDN_CTRL_HFC_WD_INIT: /* init the watchdog */ 4166 case MISDN_CTRL_HFC_WD_INIT: /* init the watchdog */
4167 wd_cnt = cq->p1 & 0xf; 4167 wd_cnt = cq->p1 & 0xf;
@@ -4191,6 +4191,9 @@ channel_dctrl(struct dchannel *dch, struct mISDN_ctrl_req *cq)
4191 __func__); 4191 __func__);
4192 HFC_outb(hc, R_BERT_WD_MD, hc->hw.r_bert_wd_md | V_WD_RES); 4192 HFC_outb(hc, R_BERT_WD_MD, hc->hw.r_bert_wd_md | V_WD_RES);
4193 break; 4193 break;
4194 case MISDN_CTRL_L1_TIMER3:
4195 ret = l1_event(dch->l1, HW_TIMER3_VALUE | (cq->p1 & 0xff));
4196 break;
4194 default: 4197 default:
4195 printk(KERN_WARNING "%s: unknown Op %x\n", 4198 printk(KERN_WARNING "%s: unknown Op %x\n",
4196 __func__, cq->op); 4199 __func__, cq->op);
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index e2c83a2d7691..5fe993e2dee9 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -1819,7 +1819,7 @@ channel_ctrl(struct hfc_pci *hc, struct mISDN_ctrl_req *cq)
1819 switch (cq->op) { 1819 switch (cq->op) {
1820 case MISDN_CTRL_GETOP: 1820 case MISDN_CTRL_GETOP:
1821 cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_CONNECT | 1821 cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_CONNECT |
1822 MISDN_CTRL_DISCONNECT; 1822 MISDN_CTRL_DISCONNECT | MISDN_CTRL_L1_TIMER3;
1823 break; 1823 break;
1824 case MISDN_CTRL_LOOP: 1824 case MISDN_CTRL_LOOP:
1825 /* channel 0 disabled loop */ 1825 /* channel 0 disabled loop */
@@ -1896,6 +1896,9 @@ channel_ctrl(struct hfc_pci *hc, struct mISDN_ctrl_req *cq)
1896 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn); 1896 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1897 hc->hw.trm &= 0x7f; /* disable IOM-loop */ 1897 hc->hw.trm &= 0x7f; /* disable IOM-loop */
1898 break; 1898 break;
1899 case MISDN_CTRL_L1_TIMER3:
1900 ret = l1_event(hc->dch.l1, HW_TIMER3_VALUE | (cq->p1 & 0xff));
1901 break;
1899 default: 1902 default:
1900 printk(KERN_WARNING "%s: unknown Op %x\n", 1903 printk(KERN_WARNING "%s: unknown Op %x\n",
1901 __func__, cq->op); 1904 __func__, cq->op);
diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c
index 884369f09cad..92d4a78bc0a5 100644
--- a/drivers/isdn/hardware/mISDN/mISDNipac.c
+++ b/drivers/isdn/hardware/mISDN/mISDNipac.c
@@ -603,10 +603,11 @@ isac_l1hw(struct mISDNchannel *ch, struct sk_buff *skb)
603} 603}
604 604
605static int 605static int
606isac_ctrl(struct isac_hw *isac, u32 cmd, u_long para) 606isac_ctrl(struct isac_hw *isac, u32 cmd, unsigned long para)
607{ 607{
608 u8 tl = 0; 608 u8 tl = 0;
609 u_long flags; 609 unsigned long flags;
610 int ret = 0;
610 611
611 switch (cmd) { 612 switch (cmd) {
612 case HW_TESTLOOP: 613 case HW_TESTLOOP:
@@ -626,12 +627,15 @@ isac_ctrl(struct isac_hw *isac, u32 cmd, u_long para)
626 } 627 }
627 spin_unlock_irqrestore(isac->hwlock, flags); 628 spin_unlock_irqrestore(isac->hwlock, flags);
628 break; 629 break;
630 case HW_TIMER3_VALUE:
631 ret = l1_event(isac->dch.l1, HW_TIMER3_VALUE | (para & 0xff));
632 break;
629 default: 633 default:
630 pr_debug("%s: %s unknown command %x %lx\n", isac->name, 634 pr_debug("%s: %s unknown command %x %lx\n", isac->name,
631 __func__, cmd, para); 635 __func__, cmd, para);
632 return -1; 636 ret = -1;
633 } 637 }
634 return 0; 638 return ret;
635} 639}
636 640
637static int 641static int
@@ -1526,7 +1530,7 @@ channel_ctrl(struct ipac_hw *ipac, struct mISDN_ctrl_req *cq)
1526 1530
1527 switch (cq->op) { 1531 switch (cq->op) {
1528 case MISDN_CTRL_GETOP: 1532 case MISDN_CTRL_GETOP:
1529 cq->op = MISDN_CTRL_LOOP; 1533 cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_L1_TIMER3;
1530 break; 1534 break;
1531 case MISDN_CTRL_LOOP: 1535 case MISDN_CTRL_LOOP:
1532 /* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */ 1536 /* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */
@@ -1536,6 +1540,9 @@ channel_ctrl(struct ipac_hw *ipac, struct mISDN_ctrl_req *cq)
1536 } 1540 }
1537 ret = ipac->ctrl(ipac, HW_TESTLOOP, cq->channel); 1541 ret = ipac->ctrl(ipac, HW_TESTLOOP, cq->channel);
1538 break; 1542 break;
1543 case MISDN_CTRL_L1_TIMER3:
1544 ret = ipac->isac.ctrl(&ipac->isac, HW_TIMER3_VALUE, cq->p1);
1545 break;
1539 default: 1546 default:
1540 pr_info("%s: unknown CTRL OP %x\n", ipac->name, cq->op); 1547 pr_info("%s: unknown CTRL OP %x\n", ipac->name, cq->op);
1541 ret = -EINVAL; 1548 ret = -EINVAL;
diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
index c726e09d0981..27998d7188a5 100644
--- a/drivers/isdn/hardware/mISDN/netjet.c
+++ b/drivers/isdn/hardware/mISDN/netjet.c
@@ -837,7 +837,7 @@ channel_ctrl(struct tiger_hw *card, struct mISDN_ctrl_req *cq)
837 837
838 switch (cq->op) { 838 switch (cq->op) {
839 case MISDN_CTRL_GETOP: 839 case MISDN_CTRL_GETOP:
840 cq->op = MISDN_CTRL_LOOP; 840 cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_L1_TIMER3;
841 break; 841 break;
842 case MISDN_CTRL_LOOP: 842 case MISDN_CTRL_LOOP:
843 /* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */ 843 /* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */
@@ -847,6 +847,9 @@ channel_ctrl(struct tiger_hw *card, struct mISDN_ctrl_req *cq)
847 } 847 }
848 ret = card->isac.ctrl(&card->isac, HW_TESTLOOP, cq->channel); 848 ret = card->isac.ctrl(&card->isac, HW_TESTLOOP, cq->channel);
849 break; 849 break;
850 case MISDN_CTRL_L1_TIMER3:
851 ret = card->isac.ctrl(&card->isac, HW_TIMER3_VALUE, cq->p1);
852 break;
850 default: 853 default:
851 pr_info("%s: %s unknown Op %x\n", card->name, __func__, cq->op); 854 pr_info("%s: %s unknown Op %x\n", card->name, __func__, cq->op);
852 ret = -EINVAL; 855 ret = -EINVAL;
diff --git a/drivers/isdn/hardware/mISDN/speedfax.c b/drivers/isdn/hardware/mISDN/speedfax.c
index 04689935148b..93f344d74e54 100644
--- a/drivers/isdn/hardware/mISDN/speedfax.c
+++ b/drivers/isdn/hardware/mISDN/speedfax.c
@@ -224,7 +224,7 @@ channel_ctrl(struct sfax_hw *sf, struct mISDN_ctrl_req *cq)
224 224
225 switch (cq->op) { 225 switch (cq->op) {
226 case MISDN_CTRL_GETOP: 226 case MISDN_CTRL_GETOP:
227 cq->op = MISDN_CTRL_LOOP; 227 cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_L1_TIMER3;
228 break; 228 break;
229 case MISDN_CTRL_LOOP: 229 case MISDN_CTRL_LOOP:
230 /* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */ 230 /* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */
@@ -234,6 +234,9 @@ channel_ctrl(struct sfax_hw *sf, struct mISDN_ctrl_req *cq)
234 } 234 }
235 ret = sf->isac.ctrl(&sf->isac, HW_TESTLOOP, cq->channel); 235 ret = sf->isac.ctrl(&sf->isac, HW_TESTLOOP, cq->channel);
236 break; 236 break;
237 case MISDN_CTRL_L1_TIMER3:
238 ret = sf->isac.ctrl(&sf->isac, HW_TIMER3_VALUE, cq->p1);
239 break;
237 default: 240 default:
238 pr_info("%s: unknown Op %x\n", sf->name, cq->op); 241 pr_info("%s: unknown Op %x\n", sf->name, cq->op);
239 ret = -EINVAL; 242 ret = -EINVAL;
diff --git a/drivers/isdn/hardware/mISDN/w6692.c b/drivers/isdn/hardware/mISDN/w6692.c
index 2183357f0799..1d044670ff66 100644
--- a/drivers/isdn/hardware/mISDN/w6692.c
+++ b/drivers/isdn/hardware/mISDN/w6692.c
@@ -1035,7 +1035,10 @@ channel_ctrl(struct w6692_hw *card, struct mISDN_ctrl_req *cq)
1035 1035
1036 switch (cq->op) { 1036 switch (cq->op) {
1037 case MISDN_CTRL_GETOP: 1037 case MISDN_CTRL_GETOP:
1038 cq->op = 0; 1038 cq->op = MISDN_CTRL_L1_TIMER3;
1039 break;
1040 case MISDN_CTRL_L1_TIMER3:
1041 ret = l1_event(card->dch.l1, HW_TIMER3_VALUE | (cq->p1 & 0xff));
1039 break; 1042 break;
1040 default: 1043 default:
1041 pr_info("%s: unknown CTRL OP %x\n", card->name, cq->op); 1044 pr_info("%s: unknown CTRL OP %x\n", card->name, cq->op);
diff --git a/drivers/isdn/mISDN/core.c b/drivers/isdn/mISDN/core.c
index a24530f05db0..c401634c00ec 100644
--- a/drivers/isdn/mISDN/core.c
+++ b/drivers/isdn/mISDN/core.c
@@ -355,6 +355,22 @@ mISDN_unregister_Bprotocol(struct Bprotocol *bp)
355} 355}
356EXPORT_SYMBOL(mISDN_unregister_Bprotocol); 356EXPORT_SYMBOL(mISDN_unregister_Bprotocol);
357 357
358static const char *msg_no_channel = "<no channel>";
359static const char *msg_no_stack = "<no stack>";
360static const char *msg_no_stackdev = "<no stack device>";
361
362const char *mISDNDevName4ch(struct mISDNchannel *ch)
363{
364 if (!ch)
365 return msg_no_channel;
366 if (!ch->st)
367 return msg_no_stack;
368 if (!ch->st->dev)
369 return msg_no_stackdev;
370 return dev_name(&ch->st->dev->dev);
371};
372EXPORT_SYMBOL(mISDNDevName4ch);
373
358static int 374static int
359mISDNInit(void) 375mISDNInit(void)
360{ 376{
diff --git a/drivers/isdn/mISDN/layer1.c b/drivers/isdn/mISDN/layer1.c
index 0fc49b375514..bebc57b72138 100644
--- a/drivers/isdn/mISDN/layer1.c
+++ b/drivers/isdn/mISDN/layer1.c
@@ -28,13 +28,15 @@ static u_int *debug;
28struct layer1 { 28struct layer1 {
29 u_long Flags; 29 u_long Flags;
30 struct FsmInst l1m; 30 struct FsmInst l1m;
31 struct FsmTimer timer; 31 struct FsmTimer timer3;
32 struct FsmTimer timerX;
32 int delay; 33 int delay;
34 int t3_value;
33 struct dchannel *dch; 35 struct dchannel *dch;
34 dchannel_l1callback *dcb; 36 dchannel_l1callback *dcb;
35}; 37};
36 38
37#define TIMER3_VALUE 7000 39#define TIMER3_DEFAULT_VALUE 7000
38 40
39static 41static
40struct Fsm l1fsm_s = {NULL, 0, 0, NULL, NULL}; 42struct Fsm l1fsm_s = {NULL, 0, 0, NULL, NULL};
@@ -134,7 +136,7 @@ l1_deact_req_s(struct FsmInst *fi, int event, void *arg)
134 struct layer1 *l1 = fi->userdata; 136 struct layer1 *l1 = fi->userdata;
135 137
136 mISDN_FsmChangeState(fi, ST_L1_F3); 138 mISDN_FsmChangeState(fi, ST_L1_F3);
137 mISDN_FsmRestartTimer(&l1->timer, 550, EV_TIMER_DEACT, NULL, 2); 139 mISDN_FsmRestartTimer(&l1->timerX, 550, EV_TIMER_DEACT, NULL, 2);
138 test_and_set_bit(FLG_L1_DEACTTIMER, &l1->Flags); 140 test_and_set_bit(FLG_L1_DEACTTIMER, &l1->Flags);
139} 141}
140 142
@@ -179,11 +181,11 @@ l1_info4_ind(struct FsmInst *fi, int event, void *arg)
179 mISDN_FsmChangeState(fi, ST_L1_F7); 181 mISDN_FsmChangeState(fi, ST_L1_F7);
180 l1->dcb(l1->dch, INFO3_P8); 182 l1->dcb(l1->dch, INFO3_P8);
181 if (test_and_clear_bit(FLG_L1_DEACTTIMER, &l1->Flags)) 183 if (test_and_clear_bit(FLG_L1_DEACTTIMER, &l1->Flags))
182 mISDN_FsmDelTimer(&l1->timer, 4); 184 mISDN_FsmDelTimer(&l1->timerX, 4);
183 if (!test_bit(FLG_L1_ACTIVATED, &l1->Flags)) { 185 if (!test_bit(FLG_L1_ACTIVATED, &l1->Flags)) {
184 if (test_and_clear_bit(FLG_L1_T3RUN, &l1->Flags)) 186 if (test_and_clear_bit(FLG_L1_T3RUN, &l1->Flags))
185 mISDN_FsmDelTimer(&l1->timer, 3); 187 mISDN_FsmDelTimer(&l1->timer3, 3);
186 mISDN_FsmRestartTimer(&l1->timer, 110, EV_TIMER_ACT, NULL, 2); 188 mISDN_FsmRestartTimer(&l1->timerX, 110, EV_TIMER_ACT, NULL, 2);
187 test_and_set_bit(FLG_L1_ACTTIMER, &l1->Flags); 189 test_and_set_bit(FLG_L1_ACTTIMER, &l1->Flags);
188 } 190 }
189} 191}
@@ -201,7 +203,7 @@ l1_timer3(struct FsmInst *fi, int event, void *arg)
201 } 203 }
202 if (l1->l1m.state != ST_L1_F6) { 204 if (l1->l1m.state != ST_L1_F6) {
203 mISDN_FsmChangeState(fi, ST_L1_F3); 205 mISDN_FsmChangeState(fi, ST_L1_F3);
204 l1->dcb(l1->dch, HW_POWERUP_REQ); 206 /* do not force anything here, we need send INFO 0 */
205 } 207 }
206} 208}
207 209
@@ -233,8 +235,9 @@ l1_activate_s(struct FsmInst *fi, int event, void *arg)
233{ 235{
234 struct layer1 *l1 = fi->userdata; 236 struct layer1 *l1 = fi->userdata;
235 237
236 mISDN_FsmRestartTimer(&l1->timer, TIMER3_VALUE, EV_TIMER3, NULL, 2); 238 mISDN_FsmRestartTimer(&l1->timer3, l1->t3_value, EV_TIMER3, NULL, 2);
237 test_and_set_bit(FLG_L1_T3RUN, &l1->Flags); 239 test_and_set_bit(FLG_L1_T3RUN, &l1->Flags);
240 /* Tell HW to send INFO 1 */
238 l1->dcb(l1->dch, HW_RESET_REQ); 241 l1->dcb(l1->dch, HW_RESET_REQ);
239} 242}
240 243
@@ -302,7 +305,8 @@ static struct FsmNode L1SFnList[] =
302 305
303static void 306static void
304release_l1(struct layer1 *l1) { 307release_l1(struct layer1 *l1) {
305 mISDN_FsmDelTimer(&l1->timer, 0); 308 mISDN_FsmDelTimer(&l1->timerX, 0);
309 mISDN_FsmDelTimer(&l1->timer3, 0);
306 if (l1->dch) 310 if (l1->dch)
307 l1->dch->l1 = NULL; 311 l1->dch->l1 = NULL;
308 module_put(THIS_MODULE); 312 module_put(THIS_MODULE);
@@ -356,6 +360,16 @@ l1_event(struct layer1 *l1, u_int event)
356 release_l1(l1); 360 release_l1(l1);
357 break; 361 break;
358 default: 362 default:
363 if ((event & ~HW_TIMER3_VMASK) == HW_TIMER3_VALUE) {
364 int val = event & HW_TIMER3_VMASK;
365
366 if (val < 5)
367 val = 5;
368 if (val > 30)
369 val = 30;
370 l1->t3_value = val;
371 break;
372 }
359 if (*debug & DEBUG_L1) 373 if (*debug & DEBUG_L1)
360 printk(KERN_DEBUG "%s %x unhandled\n", 374 printk(KERN_DEBUG "%s %x unhandled\n",
361 __func__, event); 375 __func__, event);
@@ -377,13 +391,15 @@ create_l1(struct dchannel *dch, dchannel_l1callback *dcb) {
377 nl1->l1m.fsm = &l1fsm_s; 391 nl1->l1m.fsm = &l1fsm_s;
378 nl1->l1m.state = ST_L1_F3; 392 nl1->l1m.state = ST_L1_F3;
379 nl1->Flags = 0; 393 nl1->Flags = 0;
394 nl1->t3_value = TIMER3_DEFAULT_VALUE;
380 nl1->l1m.debug = *debug & DEBUG_L1_FSM; 395 nl1->l1m.debug = *debug & DEBUG_L1_FSM;
381 nl1->l1m.userdata = nl1; 396 nl1->l1m.userdata = nl1;
382 nl1->l1m.userint = 0; 397 nl1->l1m.userint = 0;
383 nl1->l1m.printdebug = l1m_debug; 398 nl1->l1m.printdebug = l1m_debug;
384 nl1->dch = dch; 399 nl1->dch = dch;
385 nl1->dcb = dcb; 400 nl1->dcb = dcb;
386 mISDN_FsmInitTimer(&nl1->l1m, &nl1->timer); 401 mISDN_FsmInitTimer(&nl1->l1m, &nl1->timer3);
402 mISDN_FsmInitTimer(&nl1->l1m, &nl1->timerX);
387 __module_get(THIS_MODULE); 403 __module_get(THIS_MODULE);
388 dch->l1 = nl1; 404 dch->l1 = nl1;
389 return 0; 405 return 0;
diff --git a/drivers/isdn/mISDN/layer2.c b/drivers/isdn/mISDN/layer2.c
index 39d7375fa551..0dc8abca1407 100644
--- a/drivers/isdn/mISDN/layer2.c
+++ b/drivers/isdn/mISDN/layer2.c
@@ -58,6 +58,8 @@ enum {
58 EV_L1_DEACTIVATE, 58 EV_L1_DEACTIVATE,
59 EV_L2_T200, 59 EV_L2_T200,
60 EV_L2_T203, 60 EV_L2_T203,
61 EV_L2_T200I,
62 EV_L2_T203I,
61 EV_L2_SET_OWN_BUSY, 63 EV_L2_SET_OWN_BUSY,
62 EV_L2_CLEAR_OWN_BUSY, 64 EV_L2_CLEAR_OWN_BUSY,
63 EV_L2_FRAME_ERROR, 65 EV_L2_FRAME_ERROR,
@@ -86,6 +88,8 @@ static char *strL2Event[] =
86 "EV_L1_DEACTIVATE", 88 "EV_L1_DEACTIVATE",
87 "EV_L2_T200", 89 "EV_L2_T200",
88 "EV_L2_T203", 90 "EV_L2_T203",
91 "EV_L2_T200I",
92 "EV_L2_T203I",
89 "EV_L2_SET_OWN_BUSY", 93 "EV_L2_SET_OWN_BUSY",
90 "EV_L2_CLEAR_OWN_BUSY", 94 "EV_L2_CLEAR_OWN_BUSY",
91 "EV_L2_FRAME_ERROR", 95 "EV_L2_FRAME_ERROR",
@@ -106,8 +110,8 @@ l2m_debug(struct FsmInst *fi, char *fmt, ...)
106 vaf.fmt = fmt; 110 vaf.fmt = fmt;
107 vaf.va = &va; 111 vaf.va = &va;
108 112
109 printk(KERN_DEBUG "l2 (sapi %d tei %d): %pV\n", 113 printk(KERN_DEBUG "%s l2 (sapi %d tei %d): %pV\n",
110 l2->sapi, l2->tei, &vaf); 114 mISDNDevName4ch(&l2->ch), l2->sapi, l2->tei, &vaf);
111 115
112 va_end(va); 116 va_end(va);
113} 117}
@@ -150,7 +154,8 @@ l2up(struct layer2 *l2, u_int prim, struct sk_buff *skb)
150 mISDN_HEAD_ID(skb) = (l2->ch.nr << 16) | l2->ch.addr; 154 mISDN_HEAD_ID(skb) = (l2->ch.nr << 16) | l2->ch.addr;
151 err = l2->up->send(l2->up, skb); 155 err = l2->up->send(l2->up, skb);
152 if (err) { 156 if (err) {
153 printk(KERN_WARNING "%s: err=%d\n", __func__, err); 157 printk(KERN_WARNING "%s: dev %s err=%d\n", __func__,
158 mISDNDevName4ch(&l2->ch), err);
154 dev_kfree_skb(skb); 159 dev_kfree_skb(skb);
155 } 160 }
156} 161}
@@ -174,7 +179,8 @@ l2up_create(struct layer2 *l2, u_int prim, int len, void *arg)
174 memcpy(skb_put(skb, len), arg, len); 179 memcpy(skb_put(skb, len), arg, len);
175 err = l2->up->send(l2->up, skb); 180 err = l2->up->send(l2->up, skb);
176 if (err) { 181 if (err) {
177 printk(KERN_WARNING "%s: err=%d\n", __func__, err); 182 printk(KERN_WARNING "%s: dev %s err=%d\n", __func__,
183 mISDNDevName4ch(&l2->ch), err);
178 dev_kfree_skb(skb); 184 dev_kfree_skb(skb);
179 } 185 }
180} 186}
@@ -185,7 +191,8 @@ l2down_skb(struct layer2 *l2, struct sk_buff *skb) {
185 191
186 ret = l2->ch.recv(l2->ch.peer, skb); 192 ret = l2->ch.recv(l2->ch.peer, skb);
187 if (ret && (*debug & DEBUG_L2_RECV)) 193 if (ret && (*debug & DEBUG_L2_RECV))
188 printk(KERN_DEBUG "l2down_skb: ret(%d)\n", ret); 194 printk(KERN_DEBUG "l2down_skb: dev %s ret(%d)\n",
195 mISDNDevName4ch(&l2->ch), ret);
189 return ret; 196 return ret;
190} 197}
191 198
@@ -276,12 +283,37 @@ ph_data_confirm(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb) {
276 return ret; 283 return ret;
277} 284}
278 285
286static void
287l2_timeout(struct FsmInst *fi, int event, void *arg)
288{
289 struct layer2 *l2 = fi->userdata;
290 struct sk_buff *skb;
291 struct mISDNhead *hh;
292
293 skb = mI_alloc_skb(0, GFP_ATOMIC);
294 if (!skb) {
295 printk(KERN_WARNING "%s: L2(%d,%d) nr:%x timer %s no skb\n",
296 mISDNDevName4ch(&l2->ch), l2->sapi, l2->tei,
297 l2->ch.nr, event == EV_L2_T200 ? "T200" : "T203");
298 return;
299 }
300 hh = mISDN_HEAD_P(skb);
301 hh->prim = event == EV_L2_T200 ? DL_TIMER200_IND : DL_TIMER203_IND;
302 hh->id = l2->ch.nr;
303 if (*debug & DEBUG_TIMER)
304 printk(KERN_DEBUG "%s: L2(%d,%d) nr:%x timer %s expired\n",
305 mISDNDevName4ch(&l2->ch), l2->sapi, l2->tei,
306 l2->ch.nr, event == EV_L2_T200 ? "T200" : "T203");
307 if (l2->ch.st)
308 l2->ch.st->own.recv(&l2->ch.st->own, skb);
309}
310
279static int 311static int
280l2mgr(struct layer2 *l2, u_int prim, void *arg) { 312l2mgr(struct layer2 *l2, u_int prim, void *arg) {
281 long c = (long)arg; 313 long c = (long)arg;
282 314
283 printk(KERN_WARNING 315 printk(KERN_WARNING "l2mgr: dev %s addr:%x prim %x %c\n",
284 "l2mgr: addr:%x prim %x %c\n", l2->id, prim, (char)c); 316 mISDNDevName4ch(&l2->ch), l2->id, prim, (char)c);
285 if (test_bit(FLG_LAPD, &l2->flag) && 317 if (test_bit(FLG_LAPD, &l2->flag) &&
286 !test_bit(FLG_FIXED_TEI, &l2->flag)) { 318 !test_bit(FLG_FIXED_TEI, &l2->flag)) {
287 switch (c) { 319 switch (c) {
@@ -603,8 +635,8 @@ send_uframe(struct layer2 *l2, struct sk_buff *skb, u_char cmd, u_char cr)
603 else { 635 else {
604 skb = mI_alloc_skb(i, GFP_ATOMIC); 636 skb = mI_alloc_skb(i, GFP_ATOMIC);
605 if (!skb) { 637 if (!skb) {
606 printk(KERN_WARNING "%s: can't alloc skbuff\n", 638 printk(KERN_WARNING "%s: can't alloc skbuff in %s\n",
607 __func__); 639 mISDNDevName4ch(&l2->ch), __func__);
608 return; 640 return;
609 } 641 }
610 } 642 }
@@ -1089,8 +1121,8 @@ enquiry_cr(struct layer2 *l2, u_char typ, u_char cr, u_char pf)
1089 tmp[i++] = (l2->vr << 5) | typ | (pf ? 0x10 : 0); 1121 tmp[i++] = (l2->vr << 5) | typ | (pf ? 0x10 : 0);
1090 skb = mI_alloc_skb(i, GFP_ATOMIC); 1122 skb = mI_alloc_skb(i, GFP_ATOMIC);
1091 if (!skb) { 1123 if (!skb) {
1092 printk(KERN_WARNING 1124 printk(KERN_WARNING "%s: isdnl2 can't alloc sbbuff in %s\n",
1093 "isdnl2 can't alloc sbbuff for enquiry_cr\n"); 1125 mISDNDevName4ch(&l2->ch), __func__);
1094 return; 1126 return;
1095 } 1127 }
1096 memcpy(skb_put(skb, i), tmp, i); 1128 memcpy(skb_put(skb, i), tmp, i);
@@ -1150,7 +1182,7 @@ invoke_retransmission(struct layer2 *l2, unsigned int nr)
1150 else 1182 else
1151 printk(KERN_WARNING 1183 printk(KERN_WARNING
1152 "%s: windowar[%d] is NULL\n", 1184 "%s: windowar[%d] is NULL\n",
1153 __func__, p1); 1185 mISDNDevName4ch(&l2->ch), p1);
1154 l2->windowar[p1] = NULL; 1186 l2->windowar[p1] = NULL;
1155 } 1187 }
1156 mISDN_FsmEvent(&l2->l2m, EV_L2_ACK_PULL, NULL); 1188 mISDN_FsmEvent(&l2->l2m, EV_L2_ACK_PULL, NULL);
@@ -1461,8 +1493,8 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
1461 p1 = (l2->vs - l2->va) % 8; 1493 p1 = (l2->vs - l2->va) % 8;
1462 p1 = (p1 + l2->sow) % l2->window; 1494 p1 = (p1 + l2->sow) % l2->window;
1463 if (l2->windowar[p1]) { 1495 if (l2->windowar[p1]) {
1464 printk(KERN_WARNING "isdnl2 try overwrite ack queue entry %d\n", 1496 printk(KERN_WARNING "%s: l2 try overwrite ack queue entry %d\n",
1465 p1); 1497 mISDNDevName4ch(&l2->ch), p1);
1466 dev_kfree_skb(l2->windowar[p1]); 1498 dev_kfree_skb(l2->windowar[p1]);
1467 } 1499 }
1468 l2->windowar[p1] = skb; 1500 l2->windowar[p1] = skb;
@@ -1482,12 +1514,14 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
1482 memcpy(skb_push(nskb, i), header, i); 1514 memcpy(skb_push(nskb, i), header, i);
1483 else { 1515 else {
1484 printk(KERN_WARNING 1516 printk(KERN_WARNING
1485 "isdnl2 pull_iqueue skb header(%d/%d) too short\n", i, p1); 1517 "%s: L2 pull_iqueue skb header(%d/%d) too short\n",
1518 mISDNDevName4ch(&l2->ch), i, p1);
1486 oskb = nskb; 1519 oskb = nskb;
1487 nskb = mI_alloc_skb(oskb->len + i, GFP_ATOMIC); 1520 nskb = mI_alloc_skb(oskb->len + i, GFP_ATOMIC);
1488 if (!nskb) { 1521 if (!nskb) {
1489 dev_kfree_skb(oskb); 1522 dev_kfree_skb(oskb);
1490 printk(KERN_WARNING "%s: no skb mem\n", __func__); 1523 printk(KERN_WARNING "%s: no skb mem in %s\n",
1524 mISDNDevName4ch(&l2->ch), __func__);
1491 return; 1525 return;
1492 } 1526 }
1493 memcpy(skb_put(nskb, i), header, i); 1527 memcpy(skb_put(nskb, i), header, i);
@@ -1814,11 +1848,16 @@ static struct FsmNode L2FnList[] =
1814 {ST_L2_8, EV_L2_SUPER, l2_st8_got_super}, 1848 {ST_L2_8, EV_L2_SUPER, l2_st8_got_super},
1815 {ST_L2_7, EV_L2_I, l2_got_iframe}, 1849 {ST_L2_7, EV_L2_I, l2_got_iframe},
1816 {ST_L2_8, EV_L2_I, l2_got_iframe}, 1850 {ST_L2_8, EV_L2_I, l2_got_iframe},
1817 {ST_L2_5, EV_L2_T200, l2_st5_tout_200}, 1851 {ST_L2_5, EV_L2_T200, l2_timeout},
1818 {ST_L2_6, EV_L2_T200, l2_st6_tout_200}, 1852 {ST_L2_6, EV_L2_T200, l2_timeout},
1819 {ST_L2_7, EV_L2_T200, l2_st7_tout_200}, 1853 {ST_L2_7, EV_L2_T200, l2_timeout},
1820 {ST_L2_8, EV_L2_T200, l2_st8_tout_200}, 1854 {ST_L2_8, EV_L2_T200, l2_timeout},
1821 {ST_L2_7, EV_L2_T203, l2_st7_tout_203}, 1855 {ST_L2_7, EV_L2_T203, l2_timeout},
1856 {ST_L2_5, EV_L2_T200I, l2_st5_tout_200},
1857 {ST_L2_6, EV_L2_T200I, l2_st6_tout_200},
1858 {ST_L2_7, EV_L2_T200I, l2_st7_tout_200},
1859 {ST_L2_8, EV_L2_T200I, l2_st8_tout_200},
1860 {ST_L2_7, EV_L2_T203I, l2_st7_tout_203},
1822 {ST_L2_7, EV_L2_ACK_PULL, l2_pull_iqueue}, 1861 {ST_L2_7, EV_L2_ACK_PULL, l2_pull_iqueue},
1823 {ST_L2_7, EV_L2_SET_OWN_BUSY, l2_set_own_busy}, 1862 {ST_L2_7, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
1824 {ST_L2_8, EV_L2_SET_OWN_BUSY, l2_set_own_busy}, 1863 {ST_L2_8, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
@@ -1858,7 +1897,8 @@ ph_data_indication(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb)
1858 ptei = *datap++; 1897 ptei = *datap++;
1859 if ((psapi & 1) || !(ptei & 1)) { 1898 if ((psapi & 1) || !(ptei & 1)) {
1860 printk(KERN_WARNING 1899 printk(KERN_WARNING
1861 "l2 D-channel frame wrong EA0/EA1\n"); 1900 "%s l2 D-channel frame wrong EA0/EA1\n",
1901 mISDNDevName4ch(&l2->ch));
1862 return ret; 1902 return ret;
1863 } 1903 }
1864 psapi >>= 2; 1904 psapi >>= 2;
@@ -1867,7 +1907,8 @@ ph_data_indication(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb)
1867 /* not our business */ 1907 /* not our business */
1868 if (*debug & DEBUG_L2) 1908 if (*debug & DEBUG_L2)
1869 printk(KERN_DEBUG "%s: sapi %d/%d mismatch\n", 1909 printk(KERN_DEBUG "%s: sapi %d/%d mismatch\n",
1870 __func__, psapi, l2->sapi); 1910 mISDNDevName4ch(&l2->ch), psapi,
1911 l2->sapi);
1871 dev_kfree_skb(skb); 1912 dev_kfree_skb(skb);
1872 return 0; 1913 return 0;
1873 } 1914 }
@@ -1875,7 +1916,7 @@ ph_data_indication(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb)
1875 /* not our business */ 1916 /* not our business */
1876 if (*debug & DEBUG_L2) 1917 if (*debug & DEBUG_L2)
1877 printk(KERN_DEBUG "%s: tei %d/%d mismatch\n", 1918 printk(KERN_DEBUG "%s: tei %d/%d mismatch\n",
1878 __func__, ptei, l2->tei); 1919 mISDNDevName4ch(&l2->ch), ptei, l2->tei);
1879 dev_kfree_skb(skb); 1920 dev_kfree_skb(skb);
1880 return 0; 1921 return 0;
1881 } 1922 }
@@ -1916,7 +1957,8 @@ ph_data_indication(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb)
1916 } else 1957 } else
1917 c = 'L'; 1958 c = 'L';
1918 if (c) { 1959 if (c) {
1919 printk(KERN_WARNING "l2 D-channel frame error %c\n", c); 1960 printk(KERN_WARNING "%s:l2 D-channel frame error %c\n",
1961 mISDNDevName4ch(&l2->ch), c);
1920 mISDN_FsmEvent(&l2->l2m, EV_L2_FRAME_ERROR, (void *)(long)c); 1962 mISDN_FsmEvent(&l2->l2m, EV_L2_FRAME_ERROR, (void *)(long)c);
1921 } 1963 }
1922 return ret; 1964 return ret;
@@ -1930,8 +1972,17 @@ l2_send(struct mISDNchannel *ch, struct sk_buff *skb)
1930 int ret = -EINVAL; 1972 int ret = -EINVAL;
1931 1973
1932 if (*debug & DEBUG_L2_RECV) 1974 if (*debug & DEBUG_L2_RECV)
1933 printk(KERN_DEBUG "%s: prim(%x) id(%x) sapi(%d) tei(%d)\n", 1975 printk(KERN_DEBUG "%s: %s prim(%x) id(%x) sapi(%d) tei(%d)\n",
1934 __func__, hh->prim, hh->id, l2->sapi, l2->tei); 1976 __func__, mISDNDevName4ch(&l2->ch), hh->prim, hh->id,
1977 l2->sapi, l2->tei);
1978 if (hh->prim == DL_INTERN_MSG) {
1979 struct mISDNhead *chh = hh + 1; /* saved copy */
1980
1981 *hh = *chh;
1982 if (*debug & DEBUG_L2_RECV)
1983 printk(KERN_DEBUG "%s: prim(%x) id(%x) internal msg\n",
1984 mISDNDevName4ch(&l2->ch), hh->prim, hh->id);
1985 }
1935 switch (hh->prim) { 1986 switch (hh->prim) {
1936 case PH_DATA_IND: 1987 case PH_DATA_IND:
1937 ret = ph_data_indication(l2, hh, skb); 1988 ret = ph_data_indication(l2, hh, skb);
@@ -1987,6 +2038,12 @@ l2_send(struct mISDNchannel *ch, struct sk_buff *skb)
1987 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_RELEASE_REQ, 2038 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_RELEASE_REQ,
1988 skb); 2039 skb);
1989 break; 2040 break;
2041 case DL_TIMER200_IND:
2042 mISDN_FsmEvent(&l2->l2m, EV_L2_T200I, NULL);
2043 break;
2044 case DL_TIMER203_IND:
2045 mISDN_FsmEvent(&l2->l2m, EV_L2_T203I, NULL);
2046 break;
1990 default: 2047 default:
1991 if (*debug & DEBUG_L2) 2048 if (*debug & DEBUG_L2)
1992 l2m_debug(&l2->l2m, "l2 unknown pr %04x", 2049 l2m_debug(&l2->l2m, "l2 unknown pr %04x",
@@ -2005,7 +2062,8 @@ tei_l2(struct layer2 *l2, u_int cmd, u_long arg)
2005 int ret = -EINVAL; 2062 int ret = -EINVAL;
2006 2063
2007 if (*debug & DEBUG_L2_TEI) 2064 if (*debug & DEBUG_L2_TEI)
2008 printk(KERN_DEBUG "%s: cmd(%x)\n", __func__, cmd); 2065 printk(KERN_DEBUG "%s: cmd(%x) in %s\n",
2066 mISDNDevName4ch(&l2->ch), cmd, __func__);
2009 switch (cmd) { 2067 switch (cmd) {
2010 case (MDL_ASSIGN_REQ): 2068 case (MDL_ASSIGN_REQ):
2011 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ASSIGN, (void *)arg); 2069 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ASSIGN, (void *)arg);
@@ -2018,7 +2076,8 @@ tei_l2(struct layer2 *l2, u_int cmd, u_long arg)
2018 break; 2076 break;
2019 case (MDL_ERROR_RSP): 2077 case (MDL_ERROR_RSP):
2020 /* ETS 300-125 5.3.2.1 Test: TC13010 */ 2078 /* ETS 300-125 5.3.2.1 Test: TC13010 */
2021 printk(KERN_NOTICE "MDL_ERROR|REQ (tei_l2)\n"); 2079 printk(KERN_NOTICE "%s: MDL_ERROR|REQ (tei_l2)\n",
2080 mISDNDevName4ch(&l2->ch));
2022 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ERROR, NULL); 2081 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ERROR, NULL);
2023 break; 2082 break;
2024 } 2083 }
@@ -2050,7 +2109,8 @@ l2_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
2050 u_int info; 2109 u_int info;
2051 2110
2052 if (*debug & DEBUG_L2_CTRL) 2111 if (*debug & DEBUG_L2_CTRL)
2053 printk(KERN_DEBUG "%s:(%x)\n", __func__, cmd); 2112 printk(KERN_DEBUG "%s: %s cmd(%x)\n",
2113 mISDNDevName4ch(ch), __func__, cmd);
2054 2114
2055 switch (cmd) { 2115 switch (cmd) {
2056 case OPEN_CHANNEL: 2116 case OPEN_CHANNEL:
diff --git a/drivers/isdn/mISDN/tei.c b/drivers/isdn/mISDN/tei.c
index ba2bc0c776e2..be88728f1106 100644
--- a/drivers/isdn/mISDN/tei.c
+++ b/drivers/isdn/mISDN/tei.c
@@ -790,18 +790,23 @@ tei_ph_data_ind(struct teimgr *tm, u_int mt, u_char *dp, int len)
790static struct layer2 * 790static struct layer2 *
791create_new_tei(struct manager *mgr, int tei, int sapi) 791create_new_tei(struct manager *mgr, int tei, int sapi)
792{ 792{
793 u_long opt = 0; 793 unsigned long opt = 0;
794 u_long flags; 794 unsigned long flags;
795 int id; 795 int id;
796 struct layer2 *l2; 796 struct layer2 *l2;
797 struct channel_req rq;
797 798
798 if (!mgr->up) 799 if (!mgr->up)
799 return NULL; 800 return NULL;
800 if ((tei >= 0) && (tei < 64)) 801 if ((tei >= 0) && (tei < 64))
801 test_and_set_bit(OPTION_L2_FIXEDTEI, &opt); 802 test_and_set_bit(OPTION_L2_FIXEDTEI, &opt);
802 if (mgr->ch.st->dev->Dprotocols 803 if (mgr->ch.st->dev->Dprotocols & ((1 << ISDN_P_TE_E1) |
803 & ((1 << ISDN_P_TE_E1) | (1 << ISDN_P_NT_E1))) 804 (1 << ISDN_P_NT_E1))) {
804 test_and_set_bit(OPTION_L2_PMX, &opt); 805 test_and_set_bit(OPTION_L2_PMX, &opt);
806 rq.protocol = ISDN_P_NT_E1;
807 } else {
808 rq.protocol = ISDN_P_NT_S0;
809 }
805 l2 = create_l2(mgr->up, ISDN_P_LAPD_NT, opt, tei, sapi); 810 l2 = create_l2(mgr->up, ISDN_P_LAPD_NT, opt, tei, sapi);
806 if (!l2) { 811 if (!l2) {
807 printk(KERN_WARNING "%s:no memory for layer2\n", __func__); 812 printk(KERN_WARNING "%s:no memory for layer2\n", __func__);
@@ -836,6 +841,14 @@ create_new_tei(struct manager *mgr, int tei, int sapi)
836 l2->ch.recv = mgr->ch.recv; 841 l2->ch.recv = mgr->ch.recv;
837 l2->ch.peer = mgr->ch.peer; 842 l2->ch.peer = mgr->ch.peer;
838 l2->ch.ctrl(&l2->ch, OPEN_CHANNEL, NULL); 843 l2->ch.ctrl(&l2->ch, OPEN_CHANNEL, NULL);
844 /* We need open here L1 for the manager as well (refcounting) */
845 rq.adr.dev = mgr->ch.st->dev->id;
846 id = mgr->ch.st->own.ctrl(&mgr->ch.st->own, OPEN_CHANNEL, &rq);
847 if (id < 0) {
848 printk(KERN_WARNING "%s: cannot open L1\n", __func__);
849 l2->ch.ctrl(&l2->ch, CLOSE_CHANNEL, NULL);
850 l2 = NULL;
851 }
839 } 852 }
840 return l2; 853 return l2;
841} 854}
@@ -978,10 +991,11 @@ TEIrelease(struct layer2 *l2)
978static int 991static int
979create_teimgr(struct manager *mgr, struct channel_req *crq) 992create_teimgr(struct manager *mgr, struct channel_req *crq)
980{ 993{
981 struct layer2 *l2; 994 struct layer2 *l2;
982 u_long opt = 0; 995 unsigned long opt = 0;
983 u_long flags; 996 unsigned long flags;
984 int id; 997 int id;
998 struct channel_req l1rq;
985 999
986 if (*debug & DEBUG_L2_TEI) 1000 if (*debug & DEBUG_L2_TEI)
987 printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n", 1001 printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n",
@@ -1016,6 +1030,7 @@ create_teimgr(struct manager *mgr, struct channel_req *crq)
1016 if (crq->protocol == ISDN_P_LAPD_TE) 1030 if (crq->protocol == ISDN_P_LAPD_TE)
1017 test_and_set_bit(MGR_OPT_USER, &mgr->options); 1031 test_and_set_bit(MGR_OPT_USER, &mgr->options);
1018 } 1032 }
1033 l1rq.adr = crq->adr;
1019 if (mgr->ch.st->dev->Dprotocols 1034 if (mgr->ch.st->dev->Dprotocols
1020 & ((1 << ISDN_P_TE_E1) | (1 << ISDN_P_NT_E1))) 1035 & ((1 << ISDN_P_TE_E1) | (1 << ISDN_P_NT_E1)))
1021 test_and_set_bit(OPTION_L2_PMX, &opt); 1036 test_and_set_bit(OPTION_L2_PMX, &opt);
@@ -1023,6 +1038,8 @@ create_teimgr(struct manager *mgr, struct channel_req *crq)
1023 mgr->up = crq->ch; 1038 mgr->up = crq->ch;
1024 id = DL_INFO_L2_CONNECT; 1039 id = DL_INFO_L2_CONNECT;
1025 teiup_create(mgr, DL_INFORMATION_IND, sizeof(id), &id); 1040 teiup_create(mgr, DL_INFORMATION_IND, sizeof(id), &id);
1041 if (test_bit(MGR_PH_ACTIVE, &mgr->options))
1042 teiup_create(mgr, PH_ACTIVATE_IND, 0, NULL);
1026 crq->ch = NULL; 1043 crq->ch = NULL;
1027 if (!list_empty(&mgr->layer2)) { 1044 if (!list_empty(&mgr->layer2)) {
1028 read_lock_irqsave(&mgr->lock, flags); 1045 read_lock_irqsave(&mgr->lock, flags);
@@ -1053,24 +1070,34 @@ create_teimgr(struct manager *mgr, struct channel_req *crq)
1053 l2->tm->tei_m.fsm = &teifsmu; 1070 l2->tm->tei_m.fsm = &teifsmu;
1054 l2->tm->tei_m.state = ST_TEI_NOP; 1071 l2->tm->tei_m.state = ST_TEI_NOP;
1055 l2->tm->tval = 1000; /* T201 1 sec */ 1072 l2->tm->tval = 1000; /* T201 1 sec */
1073 if (test_bit(OPTION_L2_PMX, &opt))
1074 l1rq.protocol = ISDN_P_TE_E1;
1075 else
1076 l1rq.protocol = ISDN_P_TE_S0;
1056 } else { 1077 } else {
1057 l2->tm->tei_m.fsm = &teifsmn; 1078 l2->tm->tei_m.fsm = &teifsmn;
1058 l2->tm->tei_m.state = ST_TEI_NOP; 1079 l2->tm->tei_m.state = ST_TEI_NOP;
1059 l2->tm->tval = 2000; /* T202 2 sec */ 1080 l2->tm->tval = 2000; /* T202 2 sec */
1081 if (test_bit(OPTION_L2_PMX, &opt))
1082 l1rq.protocol = ISDN_P_NT_E1;
1083 else
1084 l1rq.protocol = ISDN_P_NT_S0;
1060 } 1085 }
1061 mISDN_FsmInitTimer(&l2->tm->tei_m, &l2->tm->timer); 1086 mISDN_FsmInitTimer(&l2->tm->tei_m, &l2->tm->timer);
1062 write_lock_irqsave(&mgr->lock, flags); 1087 write_lock_irqsave(&mgr->lock, flags);
1063 id = get_free_id(mgr); 1088 id = get_free_id(mgr);
1064 list_add_tail(&l2->list, &mgr->layer2); 1089 list_add_tail(&l2->list, &mgr->layer2);
1065 write_unlock_irqrestore(&mgr->lock, flags); 1090 write_unlock_irqrestore(&mgr->lock, flags);
1066 if (id < 0) { 1091 if (id >= 0) {
1067 l2->ch.ctrl(&l2->ch, CLOSE_CHANNEL, NULL);
1068 } else {
1069 l2->ch.nr = id; 1092 l2->ch.nr = id;
1070 l2->up->nr = id; 1093 l2->up->nr = id;
1071 crq->ch = &l2->ch; 1094 crq->ch = &l2->ch;
1072 id = 0; 1095 /* We need open here L1 for the manager as well (refcounting) */
1096 id = mgr->ch.st->own.ctrl(&mgr->ch.st->own, OPEN_CHANNEL,
1097 &l1rq);
1073 } 1098 }
1099 if (id < 0)
1100 l2->ch.ctrl(&l2->ch, CLOSE_CHANNEL, NULL);
1074 return id; 1101 return id;
1075} 1102}
1076 1103
@@ -1096,12 +1123,16 @@ mgr_send(struct mISDNchannel *ch, struct sk_buff *skb)
1096 break; 1123 break;
1097 case PH_ACTIVATE_IND: 1124 case PH_ACTIVATE_IND:
1098 test_and_set_bit(MGR_PH_ACTIVE, &mgr->options); 1125 test_and_set_bit(MGR_PH_ACTIVE, &mgr->options);
1126 if (mgr->up)
1127 teiup_create(mgr, PH_ACTIVATE_IND, 0, NULL);
1099 mISDN_FsmEvent(&mgr->deact, EV_ACTIVATE_IND, NULL); 1128 mISDN_FsmEvent(&mgr->deact, EV_ACTIVATE_IND, NULL);
1100 do_send(mgr); 1129 do_send(mgr);
1101 ret = 0; 1130 ret = 0;
1102 break; 1131 break;
1103 case PH_DEACTIVATE_IND: 1132 case PH_DEACTIVATE_IND:
1104 test_and_clear_bit(MGR_PH_ACTIVE, &mgr->options); 1133 test_and_clear_bit(MGR_PH_ACTIVE, &mgr->options);
1134 if (mgr->up)
1135 teiup_create(mgr, PH_DEACTIVATE_IND, 0, NULL);
1105 mISDN_FsmEvent(&mgr->deact, EV_DEACTIVATE_IND, NULL); 1136 mISDN_FsmEvent(&mgr->deact, EV_DEACTIVATE_IND, NULL);
1106 ret = 0; 1137 ret = 0;
1107 break; 1138 break;
@@ -1263,7 +1294,7 @@ static int
1263mgr_bcast(struct mISDNchannel *ch, struct sk_buff *skb) 1294mgr_bcast(struct mISDNchannel *ch, struct sk_buff *skb)
1264{ 1295{
1265 struct manager *mgr = container_of(ch, struct manager, bcast); 1296 struct manager *mgr = container_of(ch, struct manager, bcast);
1266 struct mISDNhead *hh = mISDN_HEAD_P(skb); 1297 struct mISDNhead *hhc, *hh = mISDN_HEAD_P(skb);
1267 struct sk_buff *cskb = NULL; 1298 struct sk_buff *cskb = NULL;
1268 struct layer2 *l2; 1299 struct layer2 *l2;
1269 u_long flags; 1300 u_long flags;
@@ -1278,10 +1309,17 @@ mgr_bcast(struct mISDNchannel *ch, struct sk_buff *skb)
1278 skb = NULL; 1309 skb = NULL;
1279 } else { 1310 } else {
1280 if (!cskb) 1311 if (!cskb)
1281 cskb = skb_copy(skb, GFP_KERNEL); 1312 cskb = skb_copy(skb, GFP_ATOMIC);
1282 } 1313 }
1283 if (cskb) { 1314 if (cskb) {
1284 ret = l2->ch.send(&l2->ch, cskb); 1315 hhc = mISDN_HEAD_P(cskb);
1316 /* save original header behind normal header */
1317 hhc++;
1318 *hhc = *hh;
1319 hhc--;
1320 hhc->prim = DL_INTERN_MSG;
1321 hhc->id = l2->ch.nr;
1322 ret = ch->st->own.recv(&ch->st->own, cskb);
1285 if (ret) { 1323 if (ret) {
1286 if (*debug & DEBUG_SEND_ERR) 1324 if (*debug & DEBUG_SEND_ERR)
1287 printk(KERN_DEBUG 1325 printk(KERN_DEBUG
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 35b82e00d052..fcf73518f63c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -1281,14 +1281,17 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp)
1281 int port = BP_PORT(bp); 1281 int port = BP_PORT(bp);
1282 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 1282 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1283 u32 val = REG_RD(bp, addr); 1283 u32 val = REG_RD(bp, addr);
1284 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; 1284 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1285 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0; 1285 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1286 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1286 1287
1287 if (msix) { 1288 if (msix) {
1288 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 1289 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1289 HC_CONFIG_0_REG_INT_LINE_EN_0); 1290 HC_CONFIG_0_REG_INT_LINE_EN_0);
1290 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 1291 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1291 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 1292 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1293 if (single_msix)
1294 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
1292 } else if (msi) { 1295 } else if (msi) {
1293 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0; 1296 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1294 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 1297 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 7b06f3592bd7..747f68fa976d 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -558,7 +558,7 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
558 be_link_status_update(adapter, link_status); 558 be_link_status_update(adapter, link_status);
559 if (link_speed) 559 if (link_speed)
560 et_speed = link_speed * 10; 560 et_speed = link_speed * 10;
561 else 561 else if (link_status)
562 et_speed = convert_to_et_speed(port_speed); 562 et_speed = convert_to_et_speed(port_speed);
563 } else { 563 } else {
564 et_speed = adapter->phy.forced_port_speed; 564 et_speed = adapter->phy.forced_port_speed;
@@ -618,7 +618,7 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
618 ecmd->supported = adapter->phy.supported; 618 ecmd->supported = adapter->phy.supported;
619 } 619 }
620 620
621 ecmd->duplex = DUPLEX_FULL; 621 ecmd->duplex = netif_carrier_ok(netdev) ? DUPLEX_FULL : DUPLEX_UNKNOWN;
622 ecmd->phy_address = adapter->port_num; 622 ecmd->phy_address = adapter->port_num;
623 623
624 return 0; 624 return 0;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index c8f7b3aace65..6d5d30be0481 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1259,6 +1259,7 @@ static void be_rx_compl_process(struct be_rx_obj *rxo,
1259 skb_checksum_none_assert(skb); 1259 skb_checksum_none_assert(skb);
1260 1260
1261 skb->protocol = eth_type_trans(skb, netdev); 1261 skb->protocol = eth_type_trans(skb, netdev);
1262 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1262 if (netdev->features & NETIF_F_RXHASH) 1263 if (netdev->features & NETIF_F_RXHASH)
1263 skb->rxhash = rxcp->rss_hash; 1264 skb->rxhash = rxcp->rss_hash;
1264 1265
@@ -1315,6 +1316,7 @@ void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1315 skb->len = rxcp->pkt_size; 1316 skb->len = rxcp->pkt_size;
1316 skb->data_len = rxcp->pkt_size; 1317 skb->data_len = rxcp->pkt_size;
1317 skb->ip_summed = CHECKSUM_UNNECESSARY; 1318 skb->ip_summed = CHECKSUM_UNNECESSARY;
1319 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1318 if (adapter->netdev->features & NETIF_F_RXHASH) 1320 if (adapter->netdev->features & NETIF_F_RXHASH)
1319 skb->rxhash = rxcp->rss_hash; 1321 skb->rxhash = rxcp->rss_hash;
1320 1322
@@ -3819,6 +3821,11 @@ static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3819 3821
3820 pci_disable_device(pdev); 3822 pci_disable_device(pdev);
3821 3823
3824 /* The error could cause the FW to trigger a flash debug dump.
3825 * Resetting the card while flash dump is in progress
3826 * can cause it not to recover; wait for it to finish
3827 */
3828 ssleep(30);
3822 return PCI_ERS_RESULT_NEED_RESET; 3829 return PCI_ERS_RESULT_NEED_RESET;
3823} 3830}
3824 3831
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 74215c05d799..546efe30c9b8 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -193,6 +193,14 @@ config IXGBE
193 To compile this driver as a module, choose M here. The module 193 To compile this driver as a module, choose M here. The module
194 will be called ixgbe. 194 will be called ixgbe.
195 195
196config IXGBE_HWMON
197 bool "Intel(R) 10GbE PCI Express adapters HWMON support"
198 default y
199 depends on IXGBE && HWMON && !(IXGBE=y && HWMON=m)
200 ---help---
201 Say Y if you want to expose the thermal sensor data on some of
202 our cards, via a hwmon sysfs interface.
203
196config IXGBE_DCA 204config IXGBE_DCA
197 bool "Direct Cache Access (DCA) Support" 205 bool "Direct Cache Access (DCA) Support"
198 default y 206 default y
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index a212846a6574..4dd18a1f45d2 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -944,6 +944,14 @@ static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw)
944 else 944 else
945 reg |= (1 << 28); 945 reg |= (1 << 28);
946 ew32(TARC(1), reg); 946 ew32(TARC(1), reg);
947
948 /*
949 * Disable IPv6 extension header parsing because some malformed
950 * IPv6 headers can hang the Rx.
951 */
952 reg = er32(RFCTL);
953 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
954 ew32(RFCTL, reg);
947} 955}
948 956
949/** 957/**
@@ -1439,6 +1447,7 @@ static const struct e1000_mac_operations es2_mac_ops = {
1439 /* setup_physical_interface dependent on media type */ 1447 /* setup_physical_interface dependent on media type */
1440 .setup_led = e1000e_setup_led_generic, 1448 .setup_led = e1000e_setup_led_generic,
1441 .config_collision_dist = e1000e_config_collision_dist_generic, 1449 .config_collision_dist = e1000e_config_collision_dist_generic,
1450 .rar_set = e1000e_rar_set_generic,
1442}; 1451};
1443 1452
1444static const struct e1000_phy_operations es2_phy_ops = { 1453static const struct e1000_phy_operations es2_phy_ops = {
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index d0ea316731fc..36db4df09aed 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -999,7 +999,7 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
999 **/ 999 **/
1000static s32 e1000_reset_hw_82571(struct e1000_hw *hw) 1000static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
1001{ 1001{
1002 u32 ctrl, ctrl_ext; 1002 u32 ctrl, ctrl_ext, eecd;
1003 s32 ret_val; 1003 s32 ret_val;
1004 1004
1005 /* 1005 /*
@@ -1072,6 +1072,16 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
1072 */ 1072 */
1073 1073
1074 switch (hw->mac.type) { 1074 switch (hw->mac.type) {
1075 case e1000_82571:
1076 case e1000_82572:
1077 /*
1078 * REQ and GNT bits need to be cleared when using AUTO_RD
1079 * to access the EEPROM.
1080 */
1081 eecd = er32(EECD);
1082 eecd &= ~(E1000_EECD_REQ | E1000_EECD_GNT);
1083 ew32(EECD, eecd);
1084 break;
1075 case e1000_82573: 1085 case e1000_82573:
1076 case e1000_82574: 1086 case e1000_82574:
1077 case e1000_82583: 1087 case e1000_82583:
@@ -1279,6 +1289,16 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
1279 ew32(CTRL_EXT, reg); 1289 ew32(CTRL_EXT, reg);
1280 } 1290 }
1281 1291
1292 /*
1293 * Disable IPv6 extension header parsing because some malformed
1294 * IPv6 headers can hang the Rx.
1295 */
1296 if (hw->mac.type <= e1000_82573) {
1297 reg = er32(RFCTL);
1298 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
1299 ew32(RFCTL, reg);
1300 }
1301
1282 /* PCI-Ex Control Registers */ 1302 /* PCI-Ex Control Registers */
1283 switch (hw->mac.type) { 1303 switch (hw->mac.type) {
1284 case e1000_82574: 1304 case e1000_82574:
@@ -1762,7 +1782,8 @@ void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state)
1762 * incoming packets directed to this port are dropped. 1782 * incoming packets directed to this port are dropped.
1763 * Eventually the LAA will be in RAR[0] and RAR[14]. 1783 * Eventually the LAA will be in RAR[0] and RAR[14].
1764 */ 1784 */
1765 e1000e_rar_set(hw, hw->mac.addr, hw->mac.rar_entry_count - 1); 1785 hw->mac.ops.rar_set(hw, hw->mac.addr,
1786 hw->mac.rar_entry_count - 1);
1766} 1787}
1767 1788
1768/** 1789/**
@@ -1926,6 +1947,7 @@ static const struct e1000_mac_operations e82571_mac_ops = {
1926 .setup_led = e1000e_setup_led_generic, 1947 .setup_led = e1000e_setup_led_generic,
1927 .config_collision_dist = e1000e_config_collision_dist_generic, 1948 .config_collision_dist = e1000e_config_collision_dist_generic,
1928 .read_mac_addr = e1000_read_mac_addr_82571, 1949 .read_mac_addr = e1000_read_mac_addr_82571,
1950 .rar_set = e1000e_rar_set_generic,
1929}; 1951};
1930 1952
1931static const struct e1000_phy_operations e82_phy_ops_igp = { 1953static const struct e1000_phy_operations e82_phy_ops_igp = {
@@ -2060,8 +2082,9 @@ const struct e1000_info e1000_82574_info = {
2060 | FLAG_HAS_SMART_POWER_DOWN 2082 | FLAG_HAS_SMART_POWER_DOWN
2061 | FLAG_HAS_AMT 2083 | FLAG_HAS_AMT
2062 | FLAG_HAS_CTRLEXT_ON_LOAD, 2084 | FLAG_HAS_CTRLEXT_ON_LOAD,
2063 .flags2 = FLAG2_CHECK_PHY_HANG 2085 .flags2 = FLAG2_CHECK_PHY_HANG
2064 | FLAG2_DISABLE_ASPM_L0S 2086 | FLAG2_DISABLE_ASPM_L0S
2087 | FLAG2_DISABLE_ASPM_L1
2065 | FLAG2_NO_DISABLE_RX 2088 | FLAG2_NO_DISABLE_RX
2066 | FLAG2_DMA_BURST, 2089 | FLAG2_DMA_BURST,
2067 .pba = 32, 2090 .pba = 32,
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 3a5025917163..11c46661af09 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -74,7 +74,9 @@
74#define E1000_WUS_BC E1000_WUFC_BC 74#define E1000_WUS_BC E1000_WUFC_BC
75 75
76/* Extended Device Control */ 76/* Extended Device Control */
77#define E1000_CTRL_EXT_LPCD 0x00000004 /* LCD Power Cycle Done */
77#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Definable Pin 3 */ 78#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Definable Pin 3 */
79#define E1000_CTRL_EXT_FORCE_SMBUS 0x00000004 /* Force SMBus mode*/
78#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ 80#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
79#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ 81#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */
80#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ 82#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
@@ -573,6 +575,7 @@
573#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ 575#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
574 576
575/* Link Partner Ability Register (Base Page) */ 577/* Link Partner Ability Register (Base Page) */
578#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP 100TX Full Dplx Capable */
576#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ 579#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */
577#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ 580#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */
578 581
@@ -739,6 +742,7 @@
739#define I82577_E_PHY_ID 0x01540050 742#define I82577_E_PHY_ID 0x01540050
740#define I82578_E_PHY_ID 0x004DD040 743#define I82578_E_PHY_ID 0x004DD040
741#define I82579_E_PHY_ID 0x01540090 744#define I82579_E_PHY_ID 0x01540090
745#define I217_E_PHY_ID 0x015400A0
742 746
743/* M88E1000 Specific Registers */ 747/* M88E1000 Specific Registers */
744#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ 748#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
@@ -850,4 +854,8 @@
850/* SerDes Control */ 854/* SerDes Control */
851#define E1000_GEN_POLL_TIMEOUT 640 855#define E1000_GEN_POLL_TIMEOUT 640
852 856
857/* FW Semaphore */
858#define E1000_FWSM_WLOCK_MAC_MASK 0x0380
859#define E1000_FWSM_WLOCK_MAC_SHIFT 7
860
853#endif /* _E1000_DEFINES_H_ */ 861#endif /* _E1000_DEFINES_H_ */
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 1dc2067d3f28..6e6fffb34581 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -206,6 +206,7 @@ enum e1000_boards {
206 board_ich10lan, 206 board_ich10lan,
207 board_pchlan, 207 board_pchlan,
208 board_pch2lan, 208 board_pch2lan,
209 board_pch_lpt,
209}; 210};
210 211
211struct e1000_ps_page { 212struct e1000_ps_page {
@@ -528,6 +529,7 @@ extern const struct e1000_info e1000_ich9_info;
528extern const struct e1000_info e1000_ich10_info; 529extern const struct e1000_info e1000_ich10_info;
529extern const struct e1000_info e1000_pch_info; 530extern const struct e1000_info e1000_pch_info;
530extern const struct e1000_info e1000_pch2_info; 531extern const struct e1000_info e1000_pch2_info;
532extern const struct e1000_info e1000_pch_lpt_info;
531extern const struct e1000_info e1000_es2_info; 533extern const struct e1000_info e1000_es2_info;
532 534
533extern s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, 535extern s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
@@ -576,7 +578,7 @@ extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
576extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, 578extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
577 u8 *mc_addr_list, 579 u8 *mc_addr_list,
578 u32 mc_addr_count); 580 u32 mc_addr_count);
579extern void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); 581extern void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
580extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw); 582extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw);
581extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop); 583extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop);
582extern s32 e1000e_get_hw_semaphore(struct e1000_hw *hw); 584extern s32 e1000e_get_hw_semaphore(struct e1000_hw *hw);
@@ -673,11 +675,21 @@ static inline s32 e1e_rphy(struct e1000_hw *hw, u32 offset, u16 *data)
673 return hw->phy.ops.read_reg(hw, offset, data); 675 return hw->phy.ops.read_reg(hw, offset, data);
674} 676}
675 677
678static inline s32 e1e_rphy_locked(struct e1000_hw *hw, u32 offset, u16 *data)
679{
680 return hw->phy.ops.read_reg_locked(hw, offset, data);
681}
682
676static inline s32 e1e_wphy(struct e1000_hw *hw, u32 offset, u16 data) 683static inline s32 e1e_wphy(struct e1000_hw *hw, u32 offset, u16 data)
677{ 684{
678 return hw->phy.ops.write_reg(hw, offset, data); 685 return hw->phy.ops.write_reg(hw, offset, data);
679} 686}
680 687
688static inline s32 e1e_wphy_locked(struct e1000_hw *hw, u32 offset, u16 data)
689{
690 return hw->phy.ops.write_reg_locked(hw, offset, data);
691}
692
681static inline s32 e1000_get_cable_length(struct e1000_hw *hw) 693static inline s32 e1000_get_cable_length(struct e1000_hw *hw)
682{ 694{
683 return hw->phy.ops.get_cable_length(hw); 695 return hw->phy.ops.get_cable_length(hw);
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 4f1edd9c22f1..d863075df7a4 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -773,6 +773,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
773 u32 i; 773 u32 i;
774 u32 toggle; 774 u32 toggle;
775 u32 mask; 775 u32 mask;
776 u32 wlock_mac = 0;
776 777
777 /* 778 /*
778 * The status register is Read Only, so a write should fail. 779 * The status register is Read Only, so a write should fail.
@@ -838,19 +839,31 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
838 case e1000_ich10lan: 839 case e1000_ich10lan:
839 case e1000_pchlan: 840 case e1000_pchlan:
840 case e1000_pch2lan: 841 case e1000_pch2lan:
842 case e1000_pch_lpt:
841 mask |= (1 << 18); 843 mask |= (1 << 18);
842 break; 844 break;
843 default: 845 default:
844 break; 846 break;
845 } 847 }
846 for (i = 0; i < mac->rar_entry_count; i++) 848
849 if (mac->type == e1000_pch_lpt)
850 wlock_mac = (er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK) >>
851 E1000_FWSM_WLOCK_MAC_SHIFT;
852
853 for (i = 0; i < mac->rar_entry_count; i++) {
854 /* Cannot test write-protected SHRAL[n] registers */
855 if ((wlock_mac == 1) || (wlock_mac && (i > wlock_mac)))
856 continue;
857
847 REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), 858 REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1),
848 mask, 0xFFFFFFFF); 859 mask, 0xFFFFFFFF);
860 }
849 861
850 for (i = 0; i < mac->mta_reg_count; i++) 862 for (i = 0; i < mac->mta_reg_count; i++)
851 REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF); 863 REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF);
852 864
853 *data = 0; 865 *data = 0;
866
854 return 0; 867 return 0;
855} 868}
856 869
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index 7ca1b68e2e3d..ed5b40985edb 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -51,6 +51,7 @@ enum e1e_registers {
51 E1000_FEXTNVM = 0x00028, /* Future Extended NVM - RW */ 51 E1000_FEXTNVM = 0x00028, /* Future Extended NVM - RW */
52 E1000_FCT = 0x00030, /* Flow Control Type - RW */ 52 E1000_FCT = 0x00030, /* Flow Control Type - RW */
53 E1000_VET = 0x00038, /* VLAN Ether Type - RW */ 53 E1000_VET = 0x00038, /* VLAN Ether Type - RW */
54 E1000_FEXTNVM3 = 0x0003C, /* Future Extended NVM 3 - RW */
54 E1000_ICR = 0x000C0, /* Interrupt Cause Read - R/clr */ 55 E1000_ICR = 0x000C0, /* Interrupt Cause Read - R/clr */
55 E1000_ITR = 0x000C4, /* Interrupt Throttling Rate - RW */ 56 E1000_ITR = 0x000C4, /* Interrupt Throttling Rate - RW */
56 E1000_ICS = 0x000C8, /* Interrupt Cause Set - WO */ 57 E1000_ICS = 0x000C8, /* Interrupt Cause Set - WO */
@@ -199,6 +200,14 @@ enum e1e_registers {
199#define E1000_RA (E1000_RAL(0)) 200#define E1000_RA (E1000_RAL(0))
200 E1000_RAH_BASE = 0x05404, /* Receive Address High - RW */ 201 E1000_RAH_BASE = 0x05404, /* Receive Address High - RW */
201#define E1000_RAH(_n) (E1000_RAH_BASE + ((_n) * 8)) 202#define E1000_RAH(_n) (E1000_RAH_BASE + ((_n) * 8))
203 E1000_SHRAL_PCH_LPT_BASE = 0x05408,
204#define E1000_SHRAL_PCH_LPT(_n) (E1000_SHRAL_PCH_LPT_BASE + ((_n) * 8))
205 E1000_SHRAH_PCH_LTP_BASE = 0x0540C,
206#define E1000_SHRAH_PCH_LPT(_n) (E1000_SHRAH_PCH_LTP_BASE + ((_n) * 8))
207 E1000_SHRAL_BASE = 0x05438, /* Shared Receive Address Low - RW */
208#define E1000_SHRAL(_n) (E1000_SHRAL_BASE + ((_n) * 8))
209 E1000_SHRAH_BASE = 0x0543C, /* Shared Receive Address High - RW */
210#define E1000_SHRAH(_n) (E1000_SHRAH_BASE + ((_n) * 8))
202 E1000_VFTA = 0x05600, /* VLAN Filter Table Array - RW Array */ 211 E1000_VFTA = 0x05600, /* VLAN Filter Table Array - RW Array */
203 E1000_WUC = 0x05800, /* Wakeup Control - RW */ 212 E1000_WUC = 0x05800, /* Wakeup Control - RW */
204 E1000_WUFC = 0x05808, /* Wakeup Filter Control - RW */ 213 E1000_WUFC = 0x05808, /* Wakeup Filter Control - RW */
@@ -401,6 +410,8 @@ enum e1e_registers {
401#define E1000_DEV_ID_PCH_D_HV_DC 0x10F0 410#define E1000_DEV_ID_PCH_D_HV_DC 0x10F0
402#define E1000_DEV_ID_PCH2_LV_LM 0x1502 411#define E1000_DEV_ID_PCH2_LV_LM 0x1502
403#define E1000_DEV_ID_PCH2_LV_V 0x1503 412#define E1000_DEV_ID_PCH2_LV_V 0x1503
413#define E1000_DEV_ID_PCH_LPT_I217_LM 0x153A
414#define E1000_DEV_ID_PCH_LPT_I217_V 0x153B
404 415
405#define E1000_REVISION_4 4 416#define E1000_REVISION_4 4
406 417
@@ -421,6 +432,7 @@ enum e1000_mac_type {
421 e1000_ich10lan, 432 e1000_ich10lan,
422 e1000_pchlan, 433 e1000_pchlan,
423 e1000_pch2lan, 434 e1000_pch2lan,
435 e1000_pch_lpt,
424}; 436};
425 437
426enum e1000_media_type { 438enum e1000_media_type {
@@ -458,6 +470,7 @@ enum e1000_phy_type {
458 e1000_phy_82578, 470 e1000_phy_82578,
459 e1000_phy_82577, 471 e1000_phy_82577,
460 e1000_phy_82579, 472 e1000_phy_82579,
473 e1000_phy_i217,
461}; 474};
462 475
463enum e1000_bus_width { 476enum e1000_bus_width {
@@ -781,6 +794,7 @@ struct e1000_mac_operations {
781 s32 (*setup_led)(struct e1000_hw *); 794 s32 (*setup_led)(struct e1000_hw *);
782 void (*write_vfta)(struct e1000_hw *, u32, u32); 795 void (*write_vfta)(struct e1000_hw *, u32, u32);
783 void (*config_collision_dist)(struct e1000_hw *); 796 void (*config_collision_dist)(struct e1000_hw *);
797 void (*rar_set)(struct e1000_hw *, u8 *, u32);
784 s32 (*read_mac_addr)(struct e1000_hw *); 798 s32 (*read_mac_addr)(struct e1000_hw *);
785}; 799};
786 800
@@ -965,6 +979,7 @@ struct e1000_dev_spec_ich8lan {
965 struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS]; 979 struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS];
966 bool nvm_k1_enabled; 980 bool nvm_k1_enabled;
967 bool eee_disable; 981 bool eee_disable;
982 u16 eee_lp_ability;
968}; 983};
969 984
970struct e1000_hw { 985struct e1000_hw {
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index d7fd1e848ddc..bbf70ba367da 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -105,6 +105,9 @@
105#define E1000_FEXTNVM_SW_CONFIG 1 105#define E1000_FEXTNVM_SW_CONFIG 1
106#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */ 106#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */
107 107
108#define E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK 0x0C000000
109#define E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC 0x08000000
110
108#define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7 111#define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7
109#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7 112#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7
110#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3 113#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3
@@ -112,6 +115,8 @@
112#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL 115#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
113 116
114#define E1000_ICH_RAR_ENTRIES 7 117#define E1000_ICH_RAR_ENTRIES 7
118#define E1000_PCH2_RAR_ENTRIES 5 /* RAR[0], SHRA[0-3] */
119#define E1000_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */
115 120
116#define PHY_PAGE_SHIFT 5 121#define PHY_PAGE_SHIFT 5
117#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \ 122#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
@@ -127,11 +132,18 @@
127 132
128#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in milliseconds */ 133#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in milliseconds */
129 134
135/* SMBus Control Phy Register */
136#define CV_SMB_CTRL PHY_REG(769, 23)
137#define CV_SMB_CTRL_FORCE_SMBUS 0x0001
138
130/* SMBus Address Phy Register */ 139/* SMBus Address Phy Register */
131#define HV_SMB_ADDR PHY_REG(768, 26) 140#define HV_SMB_ADDR PHY_REG(768, 26)
132#define HV_SMB_ADDR_MASK 0x007F 141#define HV_SMB_ADDR_MASK 0x007F
133#define HV_SMB_ADDR_PEC_EN 0x0200 142#define HV_SMB_ADDR_PEC_EN 0x0200
134#define HV_SMB_ADDR_VALID 0x0080 143#define HV_SMB_ADDR_VALID 0x0080
144#define HV_SMB_ADDR_FREQ_MASK 0x1100
145#define HV_SMB_ADDR_FREQ_LOW_SHIFT 8
146#define HV_SMB_ADDR_FREQ_HIGH_SHIFT 12
135 147
136/* PHY Power Management Control */ 148/* PHY Power Management Control */
137#define HV_PM_CTRL PHY_REG(770, 17) 149#define HV_PM_CTRL PHY_REG(770, 17)
@@ -148,11 +160,26 @@
148#define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */ 160#define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */
149#define I82579_MSE_THRESHOLD 0x084F /* Mean Square Error Threshold */ 161#define I82579_MSE_THRESHOLD 0x084F /* Mean Square Error Threshold */
150#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */ 162#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */
163#define I217_EEE_ADVERTISEMENT 0x8001 /* IEEE MMD Register 7.60 */
164#define I217_EEE_LP_ABILITY 0x8002 /* IEEE MMD Register 7.61 */
165#define I217_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE supported */
166
167/* Intel Rapid Start Technology Support */
168#define I217_PROXY_CTRL PHY_REG(BM_WUC_PAGE, 70)
169#define I217_PROXY_CTRL_AUTO_DISABLE 0x0080
170#define I217_SxCTRL PHY_REG(BM_PORT_CTRL_PAGE, 28)
171#define I217_SxCTRL_MASK 0x1000
172#define I217_CGFREG PHY_REG(772, 29)
173#define I217_CGFREG_MASK 0x0002
174#define I217_MEMPWR PHY_REG(772, 26)
175#define I217_MEMPWR_MASK 0x0010
151 176
152/* Strapping Option Register - RO */ 177/* Strapping Option Register - RO */
153#define E1000_STRAP 0x0000C 178#define E1000_STRAP 0x0000C
154#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000 179#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000
155#define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17 180#define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17
181#define E1000_STRAP_SMT_FREQ_MASK 0x00003000
182#define E1000_STRAP_SMT_FREQ_SHIFT 12
156 183
157/* OEM Bits Phy Register */ 184/* OEM Bits Phy Register */
158#define HV_OEM_BITS PHY_REG(768, 25) 185#define HV_OEM_BITS PHY_REG(768, 25)
@@ -256,6 +283,8 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
256static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw); 283static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
257static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw); 284static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
258static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw); 285static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
286static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
287static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
259static s32 e1000_k1_workaround_lv(struct e1000_hw *hw); 288static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
260static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate); 289static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
261 290
@@ -284,18 +313,161 @@ static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
284#define ew16flash(reg, val) __ew16flash(hw, (reg), (val)) 313#define ew16flash(reg, val) __ew16flash(hw, (reg), (val))
285#define ew32flash(reg, val) __ew32flash(hw, (reg), (val)) 314#define ew32flash(reg, val) __ew32flash(hw, (reg), (val))
286 315
287static void e1000_toggle_lanphypc_value_ich8lan(struct e1000_hw *hw) 316/**
317 * e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
318 * @hw: pointer to the HW structure
319 *
320 * Test access to the PHY registers by reading the PHY ID registers. If
321 * the PHY ID is already known (e.g. resume path) compare it with known ID,
322 * otherwise assume the read PHY ID is correct if it is valid.
323 *
324 * Assumes the sw/fw/hw semaphore is already acquired.
325 **/
326static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
288{ 327{
289 u32 ctrl; 328 u16 phy_reg;
329 u32 phy_id;
290 330
291 ctrl = er32(CTRL); 331 e1e_rphy_locked(hw, PHY_ID1, &phy_reg);
292 ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE; 332 phy_id = (u32)(phy_reg << 16);
293 ctrl &= ~E1000_CTRL_LANPHYPC_VALUE; 333 e1e_rphy_locked(hw, PHY_ID2, &phy_reg);
294 ew32(CTRL, ctrl); 334 phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
295 e1e_flush(); 335
296 udelay(10); 336 if (hw->phy.id) {
297 ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE; 337 if (hw->phy.id == phy_id)
298 ew32(CTRL, ctrl); 338 return true;
339 } else {
340 if ((phy_id != 0) && (phy_id != PHY_REVISION_MASK))
341 hw->phy.id = phy_id;
342 return true;
343 }
344
345 return false;
346}
347
348/**
349 * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
350 * @hw: pointer to the HW structure
351 *
352 * Workarounds/flow necessary for PHY initialization during driver load
353 * and resume paths.
354 **/
355static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
356{
357 u32 mac_reg, fwsm = er32(FWSM);
358 s32 ret_val;
359 u16 phy_reg;
360
361 ret_val = hw->phy.ops.acquire(hw);
362 if (ret_val) {
363 e_dbg("Failed to initialize PHY flow\n");
364 return ret_val;
365 }
366
367 /*
368 * The MAC-PHY interconnect may be in SMBus mode. If the PHY is
369 * inaccessible and resetting the PHY is not blocked, toggle the
370 * LANPHYPC Value bit to force the interconnect to PCIe mode.
371 */
372 switch (hw->mac.type) {
373 case e1000_pch_lpt:
374 if (e1000_phy_is_accessible_pchlan(hw))
375 break;
376
377 /*
378 * Before toggling LANPHYPC, see if PHY is accessible by
379 * forcing MAC to SMBus mode first.
380 */
381 mac_reg = er32(CTRL_EXT);
382 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
383 ew32(CTRL_EXT, mac_reg);
384
385 /* fall-through */
386 case e1000_pch2lan:
387 /*
388 * Gate automatic PHY configuration by hardware on
389 * non-managed 82579
390 */
391 if ((hw->mac.type == e1000_pch2lan) &&
392 !(fwsm & E1000_ICH_FWSM_FW_VALID))
393 e1000_gate_hw_phy_config_ich8lan(hw, true);
394
395 if (e1000_phy_is_accessible_pchlan(hw)) {
396 if (hw->mac.type == e1000_pch_lpt) {
397 /* Unforce SMBus mode in PHY */
398 e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg);
399 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
400 e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg);
401
402 /* Unforce SMBus mode in MAC */
403 mac_reg = er32(CTRL_EXT);
404 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
405 ew32(CTRL_EXT, mac_reg);
406 }
407 break;
408 }
409
410 /* fall-through */
411 case e1000_pchlan:
412 if ((hw->mac.type == e1000_pchlan) &&
413 (fwsm & E1000_ICH_FWSM_FW_VALID))
414 break;
415
416 if (hw->phy.ops.check_reset_block(hw)) {
417 e_dbg("Required LANPHYPC toggle blocked by ME\n");
418 break;
419 }
420
421 e_dbg("Toggling LANPHYPC\n");
422
423 /* Set Phy Config Counter to 50msec */
424 mac_reg = er32(FEXTNVM3);
425 mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
426 mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
427 ew32(FEXTNVM3, mac_reg);
428
429 /* Toggle LANPHYPC Value bit */
430 mac_reg = er32(CTRL);
431 mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
432 mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
433 ew32(CTRL, mac_reg);
434 e1e_flush();
435 udelay(10);
436 mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
437 ew32(CTRL, mac_reg);
438 e1e_flush();
439 if (hw->mac.type < e1000_pch_lpt) {
440 msleep(50);
441 } else {
442 u16 count = 20;
443 do {
444 usleep_range(5000, 10000);
445 } while (!(er32(CTRL_EXT) &
446 E1000_CTRL_EXT_LPCD) && count--);
447 }
448 break;
449 default:
450 break;
451 }
452
453 hw->phy.ops.release(hw);
454
455 /*
456 * Reset the PHY before any access to it. Doing so, ensures
457 * that the PHY is in a known good state before we read/write
458 * PHY registers. The generic reset is sufficient here,
459 * because we haven't determined the PHY type yet.
460 */
461 ret_val = e1000e_phy_hw_reset_generic(hw);
462
463 /* Ungate automatic PHY configuration on non-managed 82579 */
464 if ((hw->mac.type == e1000_pch2lan) &&
465 !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
466 usleep_range(10000, 20000);
467 e1000_gate_hw_phy_config_ich8lan(hw, false);
468 }
469
470 return ret_val;
299} 471}
300 472
301/** 473/**
@@ -325,70 +497,41 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
325 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; 497 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
326 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 498 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
327 499
328 if (!hw->phy.ops.check_reset_block(hw)) { 500 phy->id = e1000_phy_unknown;
329 u32 fwsm = er32(FWSM);
330
331 /*
332 * The MAC-PHY interconnect may still be in SMBus mode after
333 * Sx->S0. If resetting the PHY is not blocked, toggle the
334 * LANPHYPC Value bit to force the interconnect to PCIe mode.
335 */
336 e1000_toggle_lanphypc_value_ich8lan(hw);
337 msleep(50);
338
339 /*
340 * Gate automatic PHY configuration by hardware on
341 * non-managed 82579
342 */
343 if ((hw->mac.type == e1000_pch2lan) &&
344 !(fwsm & E1000_ICH_FWSM_FW_VALID))
345 e1000_gate_hw_phy_config_ich8lan(hw, true);
346
347 /*
348 * Reset the PHY before any access to it. Doing so, ensures
349 * that the PHY is in a known good state before we read/write
350 * PHY registers. The generic reset is sufficient here,
351 * because we haven't determined the PHY type yet.
352 */
353 ret_val = e1000e_phy_hw_reset_generic(hw);
354 if (ret_val)
355 return ret_val;
356 501
357 /* Ungate automatic PHY configuration on non-managed 82579 */ 502 ret_val = e1000_init_phy_workarounds_pchlan(hw);
358 if ((hw->mac.type == e1000_pch2lan) && 503 if (ret_val)
359 !(fwsm & E1000_ICH_FWSM_FW_VALID)) { 504 return ret_val;
360 usleep_range(10000, 20000);
361 e1000_gate_hw_phy_config_ich8lan(hw, false);
362 }
363 }
364 505
365 phy->id = e1000_phy_unknown; 506 if (phy->id == e1000_phy_unknown)
366 switch (hw->mac.type) { 507 switch (hw->mac.type) {
367 default: 508 default:
368 ret_val = e1000e_get_phy_id(hw); 509 ret_val = e1000e_get_phy_id(hw);
369 if (ret_val) 510 if (ret_val)
370 return ret_val; 511 return ret_val;
371 if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK)) 512 if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
513 break;
514 /* fall-through */
515 case e1000_pch2lan:
516 case e1000_pch_lpt:
517 /*
518 * In case the PHY needs to be in mdio slow mode,
519 * set slow mode and try to get the PHY id again.
520 */
521 ret_val = e1000_set_mdio_slow_mode_hv(hw);
522 if (ret_val)
523 return ret_val;
524 ret_val = e1000e_get_phy_id(hw);
525 if (ret_val)
526 return ret_val;
372 break; 527 break;
373 /* fall-through */ 528 }
374 case e1000_pch2lan:
375 /*
376 * In case the PHY needs to be in mdio slow mode,
377 * set slow mode and try to get the PHY id again.
378 */
379 ret_val = e1000_set_mdio_slow_mode_hv(hw);
380 if (ret_val)
381 return ret_val;
382 ret_val = e1000e_get_phy_id(hw);
383 if (ret_val)
384 return ret_val;
385 break;
386 }
387 phy->type = e1000e_get_phy_type_from_id(phy->id); 529 phy->type = e1000e_get_phy_type_from_id(phy->id);
388 530
389 switch (phy->type) { 531 switch (phy->type) {
390 case e1000_phy_82577: 532 case e1000_phy_82577:
391 case e1000_phy_82579: 533 case e1000_phy_82579:
534 case e1000_phy_i217:
392 phy->ops.check_polarity = e1000_check_polarity_82577; 535 phy->ops.check_polarity = e1000_check_polarity_82577;
393 phy->ops.force_speed_duplex = 536 phy->ops.force_speed_duplex =
394 e1000_phy_force_speed_duplex_82577; 537 e1000_phy_force_speed_duplex_82577;
@@ -573,7 +716,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
573 /* Adaptive IFS supported */ 716 /* Adaptive IFS supported */
574 mac->adaptive_ifs = true; 717 mac->adaptive_ifs = true;
575 718
576 /* LED operations */ 719 /* LED and other operations */
577 switch (mac->type) { 720 switch (mac->type) {
578 case e1000_ich8lan: 721 case e1000_ich8lan:
579 case e1000_ich9lan: 722 case e1000_ich9lan:
@@ -592,8 +735,12 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
592 mac->ops.led_on = e1000_led_on_ich8lan; 735 mac->ops.led_on = e1000_led_on_ich8lan;
593 mac->ops.led_off = e1000_led_off_ich8lan; 736 mac->ops.led_off = e1000_led_off_ich8lan;
594 break; 737 break;
595 case e1000_pchlan:
596 case e1000_pch2lan: 738 case e1000_pch2lan:
739 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
740 mac->ops.rar_set = e1000_rar_set_pch2lan;
741 /* fall-through */
742 case e1000_pch_lpt:
743 case e1000_pchlan:
597 /* check management mode */ 744 /* check management mode */
598 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; 745 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
599 /* ID LED init */ 746 /* ID LED init */
@@ -610,12 +757,20 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
610 break; 757 break;
611 } 758 }
612 759
760 if (mac->type == e1000_pch_lpt) {
761 mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
762 mac->ops.rar_set = e1000_rar_set_pch_lpt;
763 }
764
613 /* Enable PCS Lock-loss workaround for ICH8 */ 765 /* Enable PCS Lock-loss workaround for ICH8 */
614 if (mac->type == e1000_ich8lan) 766 if (mac->type == e1000_ich8lan)
615 e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true); 767 e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
616 768
617 /* Gate automatic PHY configuration by hardware on managed 82579 */ 769 /*
618 if ((mac->type == e1000_pch2lan) && 770 * Gate automatic PHY configuration by hardware on managed
771 * 82579 and i217
772 */
773 if ((mac->type == e1000_pch2lan || mac->type == e1000_pch_lpt) &&
619 (er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) 774 (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
620 e1000_gate_hw_phy_config_ich8lan(hw, true); 775 e1000_gate_hw_phy_config_ich8lan(hw, true);
621 776
@@ -631,22 +786,50 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
631 **/ 786 **/
632static s32 e1000_set_eee_pchlan(struct e1000_hw *hw) 787static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
633{ 788{
789 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
634 s32 ret_val = 0; 790 s32 ret_val = 0;
635 u16 phy_reg; 791 u16 phy_reg;
636 792
637 if (hw->phy.type != e1000_phy_82579) 793 if ((hw->phy.type != e1000_phy_82579) &&
794 (hw->phy.type != e1000_phy_i217))
638 return 0; 795 return 0;
639 796
640 ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg); 797 ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
641 if (ret_val) 798 if (ret_val)
642 return ret_val; 799 return ret_val;
643 800
644 if (hw->dev_spec.ich8lan.eee_disable) 801 if (dev_spec->eee_disable)
645 phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK; 802 phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK;
646 else 803 else
647 phy_reg |= I82579_LPI_CTRL_ENABLE_MASK; 804 phy_reg |= I82579_LPI_CTRL_ENABLE_MASK;
648 805
649 return e1e_wphy(hw, I82579_LPI_CTRL, phy_reg); 806 ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
807 if (ret_val)
808 return ret_val;
809
810 if ((hw->phy.type == e1000_phy_i217) && !dev_spec->eee_disable) {
811 /* Save off link partner's EEE ability */
812 ret_val = hw->phy.ops.acquire(hw);
813 if (ret_val)
814 return ret_val;
815 ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR,
816 I217_EEE_LP_ABILITY);
817 if (ret_val)
818 goto release;
819 e1e_rphy_locked(hw, I82579_EMI_DATA, &dev_spec->eee_lp_ability);
820
821 /*
822 * EEE is not supported in 100Half, so ignore partner's EEE
823 * in 100 ability if full-duplex is not advertised.
824 */
825 e1e_rphy_locked(hw, PHY_LP_ABILITY, &phy_reg);
826 if (!(phy_reg & NWAY_LPAR_100TX_FD_CAPS))
827 dev_spec->eee_lp_ability &= ~I217_EEE_100_SUPPORTED;
828release:
829 hw->phy.ops.release(hw);
830 }
831
832 return 0;
650} 833}
651 834
652/** 835/**
@@ -688,6 +871,9 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
688 return ret_val; 871 return ret_val;
689 } 872 }
690 873
874 /* Clear link partner's EEE ability */
875 hw->dev_spec.ich8lan.eee_lp_ability = 0;
876
691 if (!link) 877 if (!link)
692 return 0; /* No link detected */ 878 return 0; /* No link detected */
693 879
@@ -783,6 +969,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
783 break; 969 break;
784 case e1000_pchlan: 970 case e1000_pchlan:
785 case e1000_pch2lan: 971 case e1000_pch2lan:
972 case e1000_pch_lpt:
786 rc = e1000_init_phy_params_pchlan(hw); 973 rc = e1000_init_phy_params_pchlan(hw);
787 break; 974 break;
788 default: 975 default:
@@ -968,6 +1155,145 @@ static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
968} 1155}
969 1156
970/** 1157/**
1158 * e1000_rar_set_pch2lan - Set receive address register
1159 * @hw: pointer to the HW structure
1160 * @addr: pointer to the receive address
1161 * @index: receive address array register
1162 *
1163 * Sets the receive address array register at index to the address passed
1164 * in by addr. For 82579, RAR[0] is the base address register that is to
1165 * contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1166 * Use SHRA[0-3] in place of those reserved for ME.
1167 **/
1168static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1169{
1170 u32 rar_low, rar_high;
1171
1172 /*
1173 * HW expects these in little endian so we reverse the byte order
1174 * from network order (big endian) to little endian
1175 */
1176 rar_low = ((u32)addr[0] |
1177 ((u32)addr[1] << 8) |
1178 ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
1179
1180 rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
1181
1182 /* If MAC address zero, no need to set the AV bit */
1183 if (rar_low || rar_high)
1184 rar_high |= E1000_RAH_AV;
1185
1186 if (index == 0) {
1187 ew32(RAL(index), rar_low);
1188 e1e_flush();
1189 ew32(RAH(index), rar_high);
1190 e1e_flush();
1191 return;
1192 }
1193
1194 if (index < hw->mac.rar_entry_count) {
1195 s32 ret_val;
1196
1197 ret_val = e1000_acquire_swflag_ich8lan(hw);
1198 if (ret_val)
1199 goto out;
1200
1201 ew32(SHRAL(index - 1), rar_low);
1202 e1e_flush();
1203 ew32(SHRAH(index - 1), rar_high);
1204 e1e_flush();
1205
1206 e1000_release_swflag_ich8lan(hw);
1207
1208 /* verify the register updates */
1209 if ((er32(SHRAL(index - 1)) == rar_low) &&
1210 (er32(SHRAH(index - 1)) == rar_high))
1211 return;
1212
1213 e_dbg("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
1214 (index - 1), er32(FWSM));
1215 }
1216
1217out:
1218 e_dbg("Failed to write receive address at index %d\n", index);
1219}
1220
1221/**
1222 * e1000_rar_set_pch_lpt - Set receive address registers
1223 * @hw: pointer to the HW structure
1224 * @addr: pointer to the receive address
1225 * @index: receive address array register
1226 *
1227 * Sets the receive address register array at index to the address passed
1228 * in by addr. For LPT, RAR[0] is the base address register that is to
1229 * contain the MAC address. SHRA[0-10] are the shared receive address
1230 * registers that are shared between the Host and manageability engine (ME).
1231 **/
1232static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
1233{
1234 u32 rar_low, rar_high;
1235 u32 wlock_mac;
1236
1237 /*
1238 * HW expects these in little endian so we reverse the byte order
1239 * from network order (big endian) to little endian
1240 */
1241 rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) |
1242 ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
1243
1244 rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
1245
1246 /* If MAC address zero, no need to set the AV bit */
1247 if (rar_low || rar_high)
1248 rar_high |= E1000_RAH_AV;
1249
1250 if (index == 0) {
1251 ew32(RAL(index), rar_low);
1252 e1e_flush();
1253 ew32(RAH(index), rar_high);
1254 e1e_flush();
1255 return;
1256 }
1257
1258 /*
1259 * The manageability engine (ME) can lock certain SHRAR registers that
1260 * it is using - those registers are unavailable for use.
1261 */
1262 if (index < hw->mac.rar_entry_count) {
1263 wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK;
1264 wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
1265
1266 /* Check if all SHRAR registers are locked */
1267 if (wlock_mac == 1)
1268 goto out;
1269
1270 if ((wlock_mac == 0) || (index <= wlock_mac)) {
1271 s32 ret_val;
1272
1273 ret_val = e1000_acquire_swflag_ich8lan(hw);
1274
1275 if (ret_val)
1276 goto out;
1277
1278 ew32(SHRAL_PCH_LPT(index - 1), rar_low);
1279 e1e_flush();
1280 ew32(SHRAH_PCH_LPT(index - 1), rar_high);
1281 e1e_flush();
1282
1283 e1000_release_swflag_ich8lan(hw);
1284
1285 /* verify the register updates */
1286 if ((er32(SHRAL_PCH_LPT(index - 1)) == rar_low) &&
1287 (er32(SHRAH_PCH_LPT(index - 1)) == rar_high))
1288 return;
1289 }
1290 }
1291
1292out:
1293 e_dbg("Failed to write receive address at index %d\n", index);
1294}
1295
1296/**
971 * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked 1297 * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
972 * @hw: pointer to the HW structure 1298 * @hw: pointer to the HW structure
973 * 1299 *
@@ -995,6 +1321,8 @@ static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
995{ 1321{
996 u16 phy_data; 1322 u16 phy_data;
997 u32 strap = er32(STRAP); 1323 u32 strap = er32(STRAP);
1324 u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
1325 E1000_STRAP_SMT_FREQ_SHIFT;
998 s32 ret_val = 0; 1326 s32 ret_val = 0;
999 1327
1000 strap &= E1000_STRAP_SMBUS_ADDRESS_MASK; 1328 strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
@@ -1007,6 +1335,19 @@ static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
1007 phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT); 1335 phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
1008 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID; 1336 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
1009 1337
1338 if (hw->phy.type == e1000_phy_i217) {
1339 /* Restore SMBus frequency */
1340 if (freq--) {
1341 phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
1342 phy_data |= (freq & (1 << 0)) <<
1343 HV_SMB_ADDR_FREQ_LOW_SHIFT;
1344 phy_data |= (freq & (1 << 1)) <<
1345 (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
1346 } else {
1347 e_dbg("Unsupported SMB frequency in PHY\n");
1348 }
1349 }
1350
1010 return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data); 1351 return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
1011} 1352}
1012 1353
@@ -1044,6 +1385,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
1044 /* Fall-thru */ 1385 /* Fall-thru */
1045 case e1000_pchlan: 1386 case e1000_pchlan:
1046 case e1000_pch2lan: 1387 case e1000_pch2lan:
1388 case e1000_pch_lpt:
1047 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; 1389 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
1048 break; 1390 break;
1049 default: 1391 default:
@@ -1063,10 +1405,9 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
1063 * extended configuration before SW configuration 1405 * extended configuration before SW configuration
1064 */ 1406 */
1065 data = er32(EXTCNF_CTRL); 1407 data = er32(EXTCNF_CTRL);
1066 if (!(hw->mac.type == e1000_pch2lan)) { 1408 if ((hw->mac.type < e1000_pch2lan) &&
1067 if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) 1409 (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
1068 goto release; 1410 goto release;
1069 }
1070 1411
1071 cnf_size = er32(EXTCNF_SIZE); 1412 cnf_size = er32(EXTCNF_SIZE);
1072 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK; 1413 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
@@ -1077,9 +1418,9 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
1077 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; 1418 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
1078 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; 1419 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
1079 1420
1080 if ((!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) && 1421 if (((hw->mac.type == e1000_pchlan) &&
1081 (hw->mac.type == e1000_pchlan)) || 1422 !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
1082 (hw->mac.type == e1000_pch2lan)) { 1423 (hw->mac.type > e1000_pchlan)) {
1083 /* 1424 /*
1084 * HW configures the SMBus address and LEDs when the 1425 * HW configures the SMBus address and LEDs when the
1085 * OEM and LCD Write Enable bits are set in the NVM. 1426 * OEM and LCD Write Enable bits are set in the NVM.
@@ -1122,8 +1463,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
1122 reg_addr &= PHY_REG_MASK; 1463 reg_addr &= PHY_REG_MASK;
1123 reg_addr |= phy_page; 1464 reg_addr |= phy_page;
1124 1465
1125 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr, 1466 ret_val = e1e_wphy_locked(hw, (u32)reg_addr, reg_data);
1126 reg_data);
1127 if (ret_val) 1467 if (ret_val)
1128 goto release; 1468 goto release;
1129 } 1469 }
@@ -1160,8 +1500,8 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
1160 /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */ 1500 /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
1161 if (link) { 1501 if (link) {
1162 if (hw->phy.type == e1000_phy_82578) { 1502 if (hw->phy.type == e1000_phy_82578) {
1163 ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS, 1503 ret_val = e1e_rphy_locked(hw, BM_CS_STATUS,
1164 &status_reg); 1504 &status_reg);
1165 if (ret_val) 1505 if (ret_val)
1166 goto release; 1506 goto release;
1167 1507
@@ -1176,8 +1516,7 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
1176 } 1516 }
1177 1517
1178 if (hw->phy.type == e1000_phy_82577) { 1518 if (hw->phy.type == e1000_phy_82577) {
1179 ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS, 1519 ret_val = e1e_rphy_locked(hw, HV_M_STATUS, &status_reg);
1180 &status_reg);
1181 if (ret_val) 1520 if (ret_val)
1182 goto release; 1521 goto release;
1183 1522
@@ -1192,15 +1531,13 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
1192 } 1531 }
1193 1532
1194 /* Link stall fix for link up */ 1533 /* Link stall fix for link up */
1195 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), 1534 ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x0100);
1196 0x0100);
1197 if (ret_val) 1535 if (ret_val)
1198 goto release; 1536 goto release;
1199 1537
1200 } else { 1538 } else {
1201 /* Link stall fix for link down */ 1539 /* Link stall fix for link down */
1202 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), 1540 ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x4100);
1203 0x4100);
1204 if (ret_val) 1541 if (ret_val)
1205 goto release; 1542 goto release;
1206 } 1543 }
@@ -1280,14 +1617,14 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1280 u32 mac_reg; 1617 u32 mac_reg;
1281 u16 oem_reg; 1618 u16 oem_reg;
1282 1619
1283 if ((hw->mac.type != e1000_pch2lan) && (hw->mac.type != e1000_pchlan)) 1620 if (hw->mac.type < e1000_pchlan)
1284 return ret_val; 1621 return ret_val;
1285 1622
1286 ret_val = hw->phy.ops.acquire(hw); 1623 ret_val = hw->phy.ops.acquire(hw);
1287 if (ret_val) 1624 if (ret_val)
1288 return ret_val; 1625 return ret_val;
1289 1626
1290 if (!(hw->mac.type == e1000_pch2lan)) { 1627 if (hw->mac.type == e1000_pchlan) {
1291 mac_reg = er32(EXTCNF_CTRL); 1628 mac_reg = er32(EXTCNF_CTRL);
1292 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) 1629 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
1293 goto release; 1630 goto release;
@@ -1299,7 +1636,7 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1299 1636
1300 mac_reg = er32(PHY_CTRL); 1637 mac_reg = er32(PHY_CTRL);
1301 1638
1302 ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg); 1639 ret_val = e1e_rphy_locked(hw, HV_OEM_BITS, &oem_reg);
1303 if (ret_val) 1640 if (ret_val)
1304 goto release; 1641 goto release;
1305 1642
@@ -1326,7 +1663,7 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1326 !hw->phy.ops.check_reset_block(hw)) 1663 !hw->phy.ops.check_reset_block(hw))
1327 oem_reg |= HV_OEM_BITS_RESTART_AN; 1664 oem_reg |= HV_OEM_BITS_RESTART_AN;
1328 1665
1329 ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg); 1666 ret_val = e1e_wphy_locked(hw, HV_OEM_BITS, oem_reg);
1330 1667
1331release: 1668release:
1332 hw->phy.ops.release(hw); 1669 hw->phy.ops.release(hw);
@@ -1422,11 +1759,10 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1422 ret_val = hw->phy.ops.acquire(hw); 1759 ret_val = hw->phy.ops.acquire(hw);
1423 if (ret_val) 1760 if (ret_val)
1424 return ret_val; 1761 return ret_val;
1425 ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data); 1762 ret_val = e1e_rphy_locked(hw, BM_PORT_GEN_CFG, &phy_data);
1426 if (ret_val) 1763 if (ret_val)
1427 goto release; 1764 goto release;
1428 ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG, 1765 ret_val = e1e_wphy_locked(hw, BM_PORT_GEN_CFG, phy_data & 0x00FF);
1429 phy_data & 0x00FF);
1430release: 1766release:
1431 hw->phy.ops.release(hw); 1767 hw->phy.ops.release(hw);
1432 1768
@@ -1485,7 +1821,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1485 u32 mac_reg; 1821 u32 mac_reg;
1486 u16 i; 1822 u16 i;
1487 1823
1488 if (hw->mac.type != e1000_pch2lan) 1824 if (hw->mac.type < e1000_pch2lan)
1489 return 0; 1825 return 0;
1490 1826
1491 /* disable Rx path while enabling/disabling workaround */ 1827 /* disable Rx path while enabling/disabling workaround */
@@ -1658,20 +1994,18 @@ static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1658 ret_val = hw->phy.ops.acquire(hw); 1994 ret_val = hw->phy.ops.acquire(hw);
1659 if (ret_val) 1995 if (ret_val)
1660 return ret_val; 1996 return ret_val;
1661 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, 1997 ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, I82579_MSE_THRESHOLD);
1662 I82579_MSE_THRESHOLD);
1663 if (ret_val) 1998 if (ret_val)
1664 goto release; 1999 goto release;
1665 /* set MSE higher to enable link to stay up when noise is high */ 2000 /* set MSE higher to enable link to stay up when noise is high */
1666 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA, 0x0034); 2001 ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, 0x0034);
1667 if (ret_val) 2002 if (ret_val)
1668 goto release; 2003 goto release;
1669 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, 2004 ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, I82579_MSE_LINK_DOWN);
1670 I82579_MSE_LINK_DOWN);
1671 if (ret_val) 2005 if (ret_val)
1672 goto release; 2006 goto release;
1673 /* drop link after 5 times MSE threshold was reached */ 2007 /* drop link after 5 times MSE threshold was reached */
1674 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA, 0x0005); 2008 ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, 0x0005);
1675release: 2009release:
1676 hw->phy.ops.release(hw); 2010 hw->phy.ops.release(hw);
1677 2011
@@ -1744,7 +2078,7 @@ static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
1744{ 2078{
1745 u32 extcnf_ctrl; 2079 u32 extcnf_ctrl;
1746 2080
1747 if (hw->mac.type != e1000_pch2lan) 2081 if (hw->mac.type < e1000_pch2lan)
1748 return; 2082 return;
1749 2083
1750 extcnf_ctrl = er32(EXTCNF_CTRL); 2084 extcnf_ctrl = er32(EXTCNF_CTRL);
@@ -1846,12 +2180,10 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
1846 ret_val = hw->phy.ops.acquire(hw); 2180 ret_val = hw->phy.ops.acquire(hw);
1847 if (ret_val) 2181 if (ret_val)
1848 return ret_val; 2182 return ret_val;
1849 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, 2183 ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR,
1850 I82579_LPI_UPDATE_TIMER); 2184 I82579_LPI_UPDATE_TIMER);
1851 if (!ret_val) 2185 if (!ret_val)
1852 ret_val = hw->phy.ops.write_reg_locked(hw, 2186 ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, 0x1387);
1853 I82579_EMI_DATA,
1854 0x1387);
1855 hw->phy.ops.release(hw); 2187 hw->phy.ops.release(hw);
1856 } 2188 }
1857 2189
@@ -3071,8 +3403,8 @@ static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
3071static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) 3403static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3072{ 3404{
3073 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 3405 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3074 u16 reg; 3406 u16 kum_cfg;
3075 u32 ctrl, kab; 3407 u32 ctrl, reg;
3076 s32 ret_val; 3408 s32 ret_val;
3077 3409
3078 /* 3410 /*
@@ -3106,12 +3438,12 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3106 } 3438 }
3107 3439
3108 if (hw->mac.type == e1000_pchlan) { 3440 if (hw->mac.type == e1000_pchlan) {
3109 /* Save the NVM K1 bit setting*/ 3441 /* Save the NVM K1 bit setting */
3110 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &reg); 3442 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
3111 if (ret_val) 3443 if (ret_val)
3112 return ret_val; 3444 return ret_val;
3113 3445
3114 if (reg & E1000_NVM_K1_ENABLE) 3446 if (kum_cfg & E1000_NVM_K1_ENABLE)
3115 dev_spec->nvm_k1_enabled = true; 3447 dev_spec->nvm_k1_enabled = true;
3116 else 3448 else
3117 dev_spec->nvm_k1_enabled = false; 3449 dev_spec->nvm_k1_enabled = false;
@@ -3141,6 +3473,14 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3141 /* cannot issue a flush here because it hangs the hardware */ 3473 /* cannot issue a flush here because it hangs the hardware */
3142 msleep(20); 3474 msleep(20);
3143 3475
3476 /* Set Phy Config Counter to 50msec */
3477 if (hw->mac.type == e1000_pch2lan) {
3478 reg = er32(FEXTNVM3);
3479 reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
3480 reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
3481 ew32(FEXTNVM3, reg);
3482 }
3483
3144 if (!ret_val) 3484 if (!ret_val)
3145 clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state); 3485 clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
3146 3486
@@ -3165,9 +3505,9 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3165 ew32(IMC, 0xffffffff); 3505 ew32(IMC, 0xffffffff);
3166 er32(ICR); 3506 er32(ICR);
3167 3507
3168 kab = er32(KABGTXD); 3508 reg = er32(KABGTXD);
3169 kab |= E1000_KABGTXD_BGSQLBIAS; 3509 reg |= E1000_KABGTXD_BGSQLBIAS;
3170 ew32(KABGTXD, kab); 3510 ew32(KABGTXD, reg);
3171 3511
3172 return 0; 3512 return 0;
3173} 3513}
@@ -3320,6 +3660,13 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
3320 */ 3660 */
3321 reg = er32(RFCTL); 3661 reg = er32(RFCTL);
3322 reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS); 3662 reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
3663
3664 /*
3665 * Disable IPv6 extension header parsing because some malformed
3666 * IPv6 headers can hang the Rx.
3667 */
3668 if (hw->mac.type == e1000_ich8lan)
3669 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
3323 ew32(RFCTL, reg); 3670 ew32(RFCTL, reg);
3324} 3671}
3325 3672
@@ -3370,6 +3717,7 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
3370 ew32(FCTTV, hw->fc.pause_time); 3717 ew32(FCTTV, hw->fc.pause_time);
3371 if ((hw->phy.type == e1000_phy_82578) || 3718 if ((hw->phy.type == e1000_phy_82578) ||
3372 (hw->phy.type == e1000_phy_82579) || 3719 (hw->phy.type == e1000_phy_82579) ||
3720 (hw->phy.type == e1000_phy_i217) ||
3373 (hw->phy.type == e1000_phy_82577)) { 3721 (hw->phy.type == e1000_phy_82577)) {
3374 ew32(FCRTV_PCH, hw->fc.refresh_time); 3722 ew32(FCRTV_PCH, hw->fc.refresh_time);
3375 3723
@@ -3433,6 +3781,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
3433 break; 3781 break;
3434 case e1000_phy_82577: 3782 case e1000_phy_82577:
3435 case e1000_phy_82579: 3783 case e1000_phy_82579:
3784 case e1000_phy_i217:
3436 ret_val = e1000_copper_link_setup_82577(hw); 3785 ret_val = e1000_copper_link_setup_82577(hw);
3437 if (ret_val) 3786 if (ret_val)
3438 return ret_val; 3787 return ret_val;
@@ -3679,14 +4028,88 @@ void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
3679 * the LPLU setting in the NVM or custom setting. For PCH and newer parts, 4028 * the LPLU setting in the NVM or custom setting. For PCH and newer parts,
3680 * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also 4029 * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
3681 * needs to be written. 4030 * needs to be written.
4031 * Parts that support (and are linked to a partner which support) EEE in
4032 * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
4033 * than 10Mbps w/o EEE.
3682 **/ 4034 **/
3683void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) 4035void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
3684{ 4036{
4037 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3685 u32 phy_ctrl; 4038 u32 phy_ctrl;
3686 s32 ret_val; 4039 s32 ret_val;
3687 4040
3688 phy_ctrl = er32(PHY_CTRL); 4041 phy_ctrl = er32(PHY_CTRL);
3689 phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE; 4042 phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
4043 if (hw->phy.type == e1000_phy_i217) {
4044 u16 phy_reg;
4045
4046 ret_val = hw->phy.ops.acquire(hw);
4047 if (ret_val)
4048 goto out;
4049
4050 if (!dev_spec->eee_disable) {
4051 u16 eee_advert;
4052
4053 ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR,
4054 I217_EEE_ADVERTISEMENT);
4055 if (ret_val)
4056 goto release;
4057 e1e_rphy_locked(hw, I82579_EMI_DATA, &eee_advert);
4058
4059 /*
4060 * Disable LPLU if both link partners support 100BaseT
4061 * EEE and 100Full is advertised on both ends of the
4062 * link.
4063 */
4064 if ((eee_advert & I217_EEE_100_SUPPORTED) &&
4065 (dev_spec->eee_lp_ability &
4066 I217_EEE_100_SUPPORTED) &&
4067 (hw->phy.autoneg_advertised & ADVERTISE_100_FULL))
4068 phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
4069 E1000_PHY_CTRL_NOND0A_LPLU);
4070 }
4071
4072 /*
4073 * For i217 Intel Rapid Start Technology support,
4074 * when the system is going into Sx and no manageability engine
4075 * is present, the driver must configure proxy to reset only on
4076 * power good. LPI (Low Power Idle) state must also reset only
4077 * on power good, as well as the MTA (Multicast table array).
4078 * The SMBus release must also be disabled on LCD reset.
4079 */
4080 if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
4081
4082 /* Enable proxy to reset only on power good. */
4083 e1e_rphy_locked(hw, I217_PROXY_CTRL, &phy_reg);
4084 phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
4085 e1e_wphy_locked(hw, I217_PROXY_CTRL, phy_reg);
4086
4087 /*
4088 * Set bit enable LPI (EEE) to reset only on
4089 * power good.
4090 */
4091 e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg);
4092 phy_reg |= I217_SxCTRL_MASK;
4093 e1e_wphy_locked(hw, I217_SxCTRL, phy_reg);
4094
4095 /* Disable the SMB release on LCD reset. */
4096 e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
4097 phy_reg &= ~I217_MEMPWR;
4098 e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
4099 }
4100
4101 /*
4102 * Enable MTA to reset for Intel Rapid Start Technology
4103 * Support
4104 */
4105 e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
4106 phy_reg |= I217_CGFREG_MASK;
4107 e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
4108
4109release:
4110 hw->phy.ops.release(hw);
4111 }
4112out:
3690 ew32(PHY_CTRL, phy_ctrl); 4113 ew32(PHY_CTRL, phy_ctrl);
3691 4114
3692 if (hw->mac.type == e1000_ich8lan) 4115 if (hw->mac.type == e1000_ich8lan)
@@ -3715,44 +4138,61 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
3715 * on which PHY resets are not blocked, if the PHY registers cannot be 4138 * on which PHY resets are not blocked, if the PHY registers cannot be
3716 * accessed properly by the s/w toggle the LANPHYPC value to power cycle 4139 * accessed properly by the s/w toggle the LANPHYPC value to power cycle
3717 * the PHY. 4140 * the PHY.
4141 * On i217, setup Intel Rapid Start Technology.
3718 **/ 4142 **/
3719void e1000_resume_workarounds_pchlan(struct e1000_hw *hw) 4143void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
3720{ 4144{
3721 u16 phy_id1, phy_id2;
3722 s32 ret_val; 4145 s32 ret_val;
3723 4146
3724 if ((hw->mac.type != e1000_pch2lan) || 4147 if (hw->mac.type < e1000_pch2lan)
3725 hw->phy.ops.check_reset_block(hw))
3726 return; 4148 return;
3727 4149
3728 ret_val = hw->phy.ops.acquire(hw); 4150 ret_val = e1000_init_phy_workarounds_pchlan(hw);
3729 if (ret_val) { 4151 if (ret_val) {
3730 e_dbg("Failed to acquire PHY semaphore in resume\n"); 4152 e_dbg("Failed to init PHY flow ret_val=%d\n", ret_val);
3731 return; 4153 return;
3732 } 4154 }
3733 4155
3734 /* Test access to the PHY registers by reading the ID regs */ 4156 /*
3735 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_id1); 4157 * For i217 Intel Rapid Start Technology support when the system
3736 if (ret_val) 4158 * is transitioning from Sx and no manageability engine is present
3737 goto release; 4159 * configure SMBus to restore on reset, disable proxy, and enable
3738 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_id2); 4160 * the reset on MTA (Multicast table array).
3739 if (ret_val) 4161 */
3740 goto release; 4162 if (hw->phy.type == e1000_phy_i217) {
3741 4163 u16 phy_reg;
3742 if (hw->phy.id == ((u32)(phy_id1 << 16) |
3743 (u32)(phy_id2 & PHY_REVISION_MASK)))
3744 goto release;
3745 4164
3746 e1000_toggle_lanphypc_value_ich8lan(hw); 4165 ret_val = hw->phy.ops.acquire(hw);
4166 if (ret_val) {
4167 e_dbg("Failed to setup iRST\n");
4168 return;
4169 }
3747 4170
3748 hw->phy.ops.release(hw); 4171 if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
3749 msleep(50); 4172 /*
3750 e1000_phy_hw_reset(hw); 4173 * Restore clear on SMB if no manageability engine
3751 msleep(50); 4174 * is present
3752 return; 4175 */
4176 ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
4177 if (ret_val)
4178 goto release;
4179 phy_reg |= I217_MEMPWR_MASK;
4180 e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
3753 4181
4182 /* Disable Proxy */
4183 e1e_wphy_locked(hw, I217_PROXY_CTRL, 0);
4184 }
4185 /* Enable reset on MTA */
4186 ret_val = e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
4187 if (ret_val)
4188 goto release;
4189 phy_reg &= ~I217_CGFREG_MASK;
4190 e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
3754release: 4191release:
3755 hw->phy.ops.release(hw); 4192 if (ret_val)
4193 e_dbg("Error %d in resume workarounds\n", ret_val);
4194 hw->phy.ops.release(hw);
4195 }
3756} 4196}
3757 4197
3758/** 4198/**
@@ -3993,6 +4433,7 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
3993 /* Clear PHY statistics registers */ 4433 /* Clear PHY statistics registers */
3994 if ((hw->phy.type == e1000_phy_82578) || 4434 if ((hw->phy.type == e1000_phy_82578) ||
3995 (hw->phy.type == e1000_phy_82579) || 4435 (hw->phy.type == e1000_phy_82579) ||
4436 (hw->phy.type == e1000_phy_i217) ||
3996 (hw->phy.type == e1000_phy_82577)) { 4437 (hw->phy.type == e1000_phy_82577)) {
3997 ret_val = hw->phy.ops.acquire(hw); 4438 ret_val = hw->phy.ops.acquire(hw);
3998 if (ret_val) 4439 if (ret_val)
@@ -4037,6 +4478,7 @@ static const struct e1000_mac_operations ich8_mac_ops = {
4037 .setup_physical_interface= e1000_setup_copper_link_ich8lan, 4478 .setup_physical_interface= e1000_setup_copper_link_ich8lan,
4038 /* id_led_init dependent on mac type */ 4479 /* id_led_init dependent on mac type */
4039 .config_collision_dist = e1000e_config_collision_dist_generic, 4480 .config_collision_dist = e1000e_config_collision_dist_generic,
4481 .rar_set = e1000e_rar_set_generic,
4040}; 4482};
4041 4483
4042static const struct e1000_phy_operations ich8_phy_ops = { 4484static const struct e1000_phy_operations ich8_phy_ops = {
@@ -4151,3 +4593,22 @@ const struct e1000_info e1000_pch2_info = {
4151 .phy_ops = &ich8_phy_ops, 4593 .phy_ops = &ich8_phy_ops,
4152 .nvm_ops = &ich8_nvm_ops, 4594 .nvm_ops = &ich8_nvm_ops,
4153}; 4595};
4596
4597const struct e1000_info e1000_pch_lpt_info = {
4598 .mac = e1000_pch_lpt,
4599 .flags = FLAG_IS_ICH
4600 | FLAG_HAS_WOL
4601 | FLAG_HAS_CTRLEXT_ON_LOAD
4602 | FLAG_HAS_AMT
4603 | FLAG_HAS_FLASH
4604 | FLAG_HAS_JUMBO_FRAMES
4605 | FLAG_APME_IN_WUC,
4606 .flags2 = FLAG2_HAS_PHY_STATS
4607 | FLAG2_HAS_EEE,
4608 .pba = 26,
4609 .max_hw_frame_size = DEFAULT_JUMBO,
4610 .get_variants = e1000_get_variants_ich8lan,
4611 .mac_ops = &ich8_mac_ops,
4612 .phy_ops = &ich8_phy_ops,
4613 .nvm_ops = &ich8_nvm_ops,
4614};
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index d8327499305f..026e8b3ab52e 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -143,12 +143,12 @@ void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
143 /* Setup the receive address */ 143 /* Setup the receive address */
144 e_dbg("Programming MAC Address into RAR[0]\n"); 144 e_dbg("Programming MAC Address into RAR[0]\n");
145 145
146 e1000e_rar_set(hw, hw->mac.addr, 0); 146 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
147 147
148 /* Zero out the other (rar_entry_count - 1) receive addresses */ 148 /* Zero out the other (rar_entry_count - 1) receive addresses */
149 e_dbg("Clearing RAR[1-%u]\n", rar_count - 1); 149 e_dbg("Clearing RAR[1-%u]\n", rar_count - 1);
150 for (i = 1; i < rar_count; i++) 150 for (i = 1; i < rar_count; i++)
151 e1000e_rar_set(hw, mac_addr, i); 151 hw->mac.ops.rar_set(hw, mac_addr, i);
152} 152}
153 153
154/** 154/**
@@ -215,13 +215,13 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
215 * same as the normal permanent MAC address stored by the HW into the 215 * same as the normal permanent MAC address stored by the HW into the
216 * RAR. Do this by mapping this address into RAR0. 216 * RAR. Do this by mapping this address into RAR0.
217 */ 217 */
218 e1000e_rar_set(hw, alt_mac_addr, 0); 218 hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
219 219
220 return 0; 220 return 0;
221} 221}
222 222
223/** 223/**
224 * e1000e_rar_set - Set receive address register 224 * e1000e_rar_set_generic - Set receive address register
225 * @hw: pointer to the HW structure 225 * @hw: pointer to the HW structure
226 * @addr: pointer to the receive address 226 * @addr: pointer to the receive address
227 * @index: receive address array register 227 * @index: receive address array register
@@ -229,7 +229,7 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
229 * Sets the receive address array register at index to the address passed 229 * Sets the receive address array register at index to the address passed
230 * in by addr. 230 * in by addr.
231 **/ 231 **/
232void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) 232void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
233{ 233{
234 u32 rar_low, rar_high; 234 u32 rar_low, rar_high;
235 235
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 140fee14b313..f648299c54ea 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -56,7 +56,7 @@
56 56
57#define DRV_EXTRAVERSION "-k" 57#define DRV_EXTRAVERSION "-k"
58 58
59#define DRV_VERSION "1.10.6" DRV_EXTRAVERSION 59#define DRV_VERSION "2.0.0" DRV_EXTRAVERSION
60char e1000e_driver_name[] = "e1000e"; 60char e1000e_driver_name[] = "e1000e";
61const char e1000e_driver_version[] = DRV_VERSION; 61const char e1000e_driver_version[] = DRV_VERSION;
62 62
@@ -79,6 +79,7 @@ static const struct e1000_info *e1000_info_tbl[] = {
79 [board_ich10lan] = &e1000_ich10_info, 79 [board_ich10lan] = &e1000_ich10_info,
80 [board_pchlan] = &e1000_pch_info, 80 [board_pchlan] = &e1000_pch_info,
81 [board_pch2lan] = &e1000_pch2_info, 81 [board_pch2lan] = &e1000_pch2_info,
82 [board_pch_lpt] = &e1000_pch_lpt_info,
82}; 83};
83 84
84struct e1000_reg_info { 85struct e1000_reg_info {
@@ -1084,6 +1085,10 @@ static void e1000_print_hw_hang(struct work_struct *work)
1084 phy_1000t_status, 1085 phy_1000t_status,
1085 phy_ext_status, 1086 phy_ext_status,
1086 pci_status); 1087 pci_status);
1088
1089 /* Suggest workaround for known h/w issue */
1090 if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE))
1091 e_err("Try turning off Tx pause (flow control) via ethtool\n");
1087} 1092}
1088 1093
1089/** 1094/**
@@ -2859,8 +2864,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2859 u32 rctl, rfctl; 2864 u32 rctl, rfctl;
2860 u32 pages = 0; 2865 u32 pages = 0;
2861 2866
2862 /* Workaround Si errata on 82579 - configure jumbo frame flow */ 2867 /* Workaround Si errata on PCHx - configure jumbo frame flow */
2863 if (hw->mac.type == e1000_pch2lan) { 2868 if (hw->mac.type >= e1000_pch2lan) {
2864 s32 ret_val; 2869 s32 ret_val;
2865 2870
2866 if (adapter->netdev->mtu > ETH_DATA_LEN) 2871 if (adapter->netdev->mtu > ETH_DATA_LEN)
@@ -2935,6 +2940,7 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2935 /* Enable Extended Status in all Receive Descriptors */ 2940 /* Enable Extended Status in all Receive Descriptors */
2936 rfctl = er32(RFCTL); 2941 rfctl = er32(RFCTL);
2937 rfctl |= E1000_RFCTL_EXTEN; 2942 rfctl |= E1000_RFCTL_EXTEN;
2943 ew32(RFCTL, rfctl);
2938 2944
2939 /* 2945 /*
2940 * 82571 and greater support packet-split where the protocol 2946 * 82571 and greater support packet-split where the protocol
@@ -2960,13 +2966,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2960 if (adapter->rx_ps_pages) { 2966 if (adapter->rx_ps_pages) {
2961 u32 psrctl = 0; 2967 u32 psrctl = 0;
2962 2968
2963 /*
2964 * disable packet split support for IPv6 extension headers,
2965 * because some malformed IPv6 headers can hang the Rx
2966 */
2967 rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
2968 E1000_RFCTL_NEW_IPV6_EXT_DIS);
2969
2970 /* Enable Packet split descriptors */ 2969 /* Enable Packet split descriptors */
2971 rctl |= E1000_RCTL_DTYP_PS; 2970 rctl |= E1000_RCTL_DTYP_PS;
2972 2971
@@ -3005,7 +3004,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
3005 */ 3004 */
3006 } 3005 }
3007 3006
3008 ew32(RFCTL, rfctl);
3009 ew32(RCTL, rctl); 3007 ew32(RCTL, rctl);
3010 /* just started the receive unit, no need to restart */ 3008 /* just started the receive unit, no need to restart */
3011 adapter->flags &= ~FLAG_RX_RESTART_NOW; 3009 adapter->flags &= ~FLAG_RX_RESTART_NOW;
@@ -3209,7 +3207,7 @@ static int e1000e_write_uc_addr_list(struct net_device *netdev)
3209 netdev_for_each_uc_addr(ha, netdev) { 3207 netdev_for_each_uc_addr(ha, netdev) {
3210 if (!rar_entries) 3208 if (!rar_entries)
3211 break; 3209 break;
3212 e1000e_rar_set(hw, ha->addr, rar_entries--); 3210 hw->mac.ops.rar_set(hw, ha->addr, rar_entries--);
3213 count++; 3211 count++;
3214 } 3212 }
3215 } 3213 }
@@ -3490,6 +3488,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
3490 fc->refresh_time = 0x1000; 3488 fc->refresh_time = 0x1000;
3491 break; 3489 break;
3492 case e1000_pch2lan: 3490 case e1000_pch2lan:
3491 case e1000_pch_lpt:
3493 fc->high_water = 0x05C20; 3492 fc->high_water = 0x05C20;
3494 fc->low_water = 0x05048; 3493 fc->low_water = 0x05048;
3495 fc->pause_time = 0x0650; 3494 fc->pause_time = 0x0650;
@@ -4018,6 +4017,7 @@ static int e1000_close(struct net_device *netdev)
4018static int e1000_set_mac(struct net_device *netdev, void *p) 4017static int e1000_set_mac(struct net_device *netdev, void *p)
4019{ 4018{
4020 struct e1000_adapter *adapter = netdev_priv(netdev); 4019 struct e1000_adapter *adapter = netdev_priv(netdev);
4020 struct e1000_hw *hw = &adapter->hw;
4021 struct sockaddr *addr = p; 4021 struct sockaddr *addr = p;
4022 4022
4023 if (!is_valid_ether_addr(addr->sa_data)) 4023 if (!is_valid_ether_addr(addr->sa_data))
@@ -4026,7 +4026,7 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
4026 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 4026 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
4027 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); 4027 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
4028 4028
4029 e1000e_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); 4029 hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
4030 4030
4031 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) { 4031 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) {
4032 /* activate the work around */ 4032 /* activate the work around */
@@ -4040,9 +4040,8 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
4040 * are dropped. Eventually the LAA will be in RAR[0] and 4040 * are dropped. Eventually the LAA will be in RAR[0] and
4041 * RAR[14] 4041 * RAR[14]
4042 */ 4042 */
4043 e1000e_rar_set(&adapter->hw, 4043 hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr,
4044 adapter->hw.mac.addr, 4044 adapter->hw.mac.rar_entry_count - 1);
4045 adapter->hw.mac.rar_entry_count - 1);
4046 } 4045 }
4047 4046
4048 return 0; 4047 return 0;
@@ -4621,7 +4620,7 @@ link_up:
4621 * reset from the other port. Set the appropriate LAA in RAR[0] 4620 * reset from the other port. Set the appropriate LAA in RAR[0]
4622 */ 4621 */
4623 if (e1000e_get_laa_state_82571(hw)) 4622 if (e1000e_get_laa_state_82571(hw))
4624 e1000e_rar_set(hw, adapter->hw.mac.addr, 0); 4623 hw->mac.ops.rar_set(hw, adapter->hw.mac.addr, 0);
4625 4624
4626 if (adapter->flags2 & FLAG2_CHECK_PHY_HANG) 4625 if (adapter->flags2 & FLAG2_CHECK_PHY_HANG)
4627 e1000e_check_82574_phy_workaround(adapter); 4626 e1000e_check_82574_phy_workaround(adapter);
@@ -5267,22 +5266,14 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
5267 return -EINVAL; 5266 return -EINVAL;
5268 } 5267 }
5269 5268
5270 /* Jumbo frame workaround on 82579 requires CRC be stripped */ 5269 /* Jumbo frame workaround on 82579 and newer requires CRC be stripped */
5271 if ((adapter->hw.mac.type == e1000_pch2lan) && 5270 if ((adapter->hw.mac.type >= e1000_pch2lan) &&
5272 !(adapter->flags2 & FLAG2_CRC_STRIPPING) && 5271 !(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
5273 (new_mtu > ETH_DATA_LEN)) { 5272 (new_mtu > ETH_DATA_LEN)) {
5274 e_err("Jumbo Frames not supported on 82579 when CRC stripping is disabled.\n"); 5273 e_err("Jumbo Frames not supported on this device when CRC stripping is disabled.\n");
5275 return -EINVAL; 5274 return -EINVAL;
5276 } 5275 }
5277 5276
5278 /* 82573 Errata 17 */
5279 if (((adapter->hw.mac.type == e1000_82573) ||
5280 (adapter->hw.mac.type == e1000_82574)) &&
5281 (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) {
5282 adapter->flags2 |= FLAG2_DISABLE_ASPM_L1;
5283 e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1);
5284 }
5285
5286 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) 5277 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
5287 usleep_range(1000, 2000); 5278 usleep_range(1000, 2000);
5288 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */ 5279 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
@@ -5676,7 +5667,7 @@ static int __e1000_resume(struct pci_dev *pdev)
5676 return err; 5667 return err;
5677 } 5668 }
5678 5669
5679 if (hw->mac.type == e1000_pch2lan) 5670 if (hw->mac.type >= e1000_pch2lan)
5680 e1000_resume_workarounds_pchlan(&adapter->hw); 5671 e1000_resume_workarounds_pchlan(&adapter->hw);
5681 5672
5682 e1000e_power_up_phy(adapter); 5673 e1000e_power_up_phy(adapter);
@@ -6575,6 +6566,9 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
6575 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan }, 6566 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan },
6576 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan }, 6567 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan },
6577 6568
6569 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_LM), board_pch_lpt },
6570 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt },
6571
6578 { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ 6572 { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
6579}; 6573};
6580MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); 6574MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index bd5ef64b3003..0334d013bc3c 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -639,6 +639,45 @@ s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data)
639} 639}
640 640
641/** 641/**
642 * e1000_set_master_slave_mode - Setup PHY for Master/slave mode
643 * @hw: pointer to the HW structure
644 *
645 * Sets up Master/slave mode
646 **/
647static s32 e1000_set_master_slave_mode(struct e1000_hw *hw)
648{
649 s32 ret_val;
650 u16 phy_data;
651
652 /* Resolve Master/Slave mode */
653 ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &phy_data);
654 if (ret_val)
655 return ret_val;
656
657 /* load defaults for future use */
658 hw->phy.original_ms_type = (phy_data & CR_1000T_MS_ENABLE) ?
659 ((phy_data & CR_1000T_MS_VALUE) ?
660 e1000_ms_force_master : e1000_ms_force_slave) : e1000_ms_auto;
661
662 switch (hw->phy.ms_type) {
663 case e1000_ms_force_master:
664 phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
665 break;
666 case e1000_ms_force_slave:
667 phy_data |= CR_1000T_MS_ENABLE;
668 phy_data &= ~(CR_1000T_MS_VALUE);
669 break;
670 case e1000_ms_auto:
671 phy_data &= ~CR_1000T_MS_ENABLE;
672 /* fall-through */
673 default:
674 break;
675 }
676
677 return e1e_wphy(hw, PHY_1000T_CTRL, phy_data);
678}
679
680/**
642 * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link 681 * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link
643 * @hw: pointer to the HW structure 682 * @hw: pointer to the HW structure
644 * 683 *
@@ -659,7 +698,11 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
659 /* Enable downshift */ 698 /* Enable downshift */
660 phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; 699 phy_data |= I82577_CFG_ENABLE_DOWNSHIFT;
661 700
662 return e1e_wphy(hw, I82577_CFG_REG, phy_data); 701 ret_val = e1e_wphy(hw, I82577_CFG_REG, phy_data);
702 if (ret_val)
703 return ret_val;
704
705 return e1000_set_master_slave_mode(hw);
663} 706}
664 707
665/** 708/**
@@ -722,8 +765,24 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
722 phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; 765 phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
723 766
724 /* Enable downshift on BM (disabled by default) */ 767 /* Enable downshift on BM (disabled by default) */
725 if (phy->type == e1000_phy_bm) 768 if (phy->type == e1000_phy_bm) {
769 /* For 82574/82583, first disable then enable downshift */
770 if (phy->id == BME1000_E_PHY_ID_R2) {
771 phy_data &= ~BME1000_PSCR_ENABLE_DOWNSHIFT;
772 ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL,
773 phy_data);
774 if (ret_val)
775 return ret_val;
776 /* Commit the changes. */
777 ret_val = e1000e_commit_phy(hw);
778 if (ret_val) {
779 e_dbg("Error committing the PHY changes\n");
780 return ret_val;
781 }
782 }
783
726 phy_data |= BME1000_PSCR_ENABLE_DOWNSHIFT; 784 phy_data |= BME1000_PSCR_ENABLE_DOWNSHIFT;
785 }
727 786
728 ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); 787 ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
729 if (ret_val) 788 if (ret_val)
@@ -879,31 +938,7 @@ s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw)
879 return ret_val; 938 return ret_val;
880 } 939 }
881 940
882 ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &data); 941 ret_val = e1000_set_master_slave_mode(hw);
883 if (ret_val)
884 return ret_val;
885
886 /* load defaults for future use */
887 phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ?
888 ((data & CR_1000T_MS_VALUE) ?
889 e1000_ms_force_master :
890 e1000_ms_force_slave) :
891 e1000_ms_auto;
892
893 switch (phy->ms_type) {
894 case e1000_ms_force_master:
895 data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
896 break;
897 case e1000_ms_force_slave:
898 data |= CR_1000T_MS_ENABLE;
899 data &= ~(CR_1000T_MS_VALUE);
900 break;
901 case e1000_ms_auto:
902 data &= ~CR_1000T_MS_ENABLE;
903 default:
904 break;
905 }
906 ret_val = e1e_wphy(hw, PHY_1000T_CTRL, data);
907 } 942 }
908 943
909 return ret_val; 944 return ret_val;
@@ -2319,6 +2354,9 @@ enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id)
2319 case I82579_E_PHY_ID: 2354 case I82579_E_PHY_ID:
2320 phy_type = e1000_phy_82579; 2355 phy_type = e1000_phy_82579;
2321 break; 2356 break;
2357 case I217_E_PHY_ID:
2358 phy_type = e1000_phy_i217;
2359 break;
2322 default: 2360 default:
2323 phy_type = e1000_phy_unknown; 2361 phy_type = e1000_phy_unknown;
2324 break; 2362 break;
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index 8be1d1b2132e..0708d7eb4668 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -34,7 +34,7 @@ obj-$(CONFIG_IXGBE) += ixgbe.o
34 34
35ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ 35ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
36 ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ 36 ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
37 ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o 37 ixgbe_mbx.o ixgbe_x540.o ixgbe_sysfs.o ixgbe_lib.o
38 38
39ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ 39ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
40 ixgbe_dcb_82599.o ixgbe_dcb_nl.o 40 ixgbe_dcb_82599.o ixgbe_dcb_nl.o
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 8e082f257efb..89cebc812ab9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -331,6 +331,26 @@ struct ixgbe_q_vector {
331 /* for dynamic allocation of rings associated with this q_vector */ 331 /* for dynamic allocation of rings associated with this q_vector */
332 struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp; 332 struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp;
333}; 333};
334#ifdef CONFIG_IXGBE_HWMON
335
336#define IXGBE_HWMON_TYPE_LOC 0
337#define IXGBE_HWMON_TYPE_TEMP 1
338#define IXGBE_HWMON_TYPE_CAUTION 2
339#define IXGBE_HWMON_TYPE_MAX 3
340
341struct hwmon_attr {
342 struct device_attribute dev_attr;
343 struct ixgbe_hw *hw;
344 struct ixgbe_thermal_diode_data *sensor;
345 char name[12];
346};
347
348struct hwmon_buff {
349 struct device *device;
350 struct hwmon_attr *hwmon_list;
351 unsigned int n_hwmon;
352};
353#endif /* CONFIG_IXGBE_HWMON */
334 354
335/* 355/*
336 * microsecond values for various ITR rates shifted by 2 to fit itr register 356 * microsecond values for various ITR rates shifted by 2 to fit itr register
@@ -535,6 +555,10 @@ struct ixgbe_adapter {
535 555
536 u32 timer_event_accumulator; 556 u32 timer_event_accumulator;
537 u32 vferr_refcount; 557 u32 vferr_refcount;
558 struct kobject *info_kobj;
559#ifdef CONFIG_IXGBE_HWMON
560 struct hwmon_buff ixgbe_hwmon_buff;
561#endif /* CONFIG_IXGBE_HWMON */
538}; 562};
539 563
540struct ixgbe_fdir_filter { 564struct ixgbe_fdir_filter {
@@ -635,6 +659,8 @@ extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
635#endif 659#endif
636extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32); 660extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
637extern void ixgbe_do_reset(struct net_device *netdev); 661extern void ixgbe_do_reset(struct net_device *netdev);
662extern void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter);
663extern int ixgbe_sysfs_init(struct ixgbe_adapter *adapter);
638#ifdef IXGBE_FCOE 664#ifdef IXGBE_FCOE
639extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); 665extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
640extern int ixgbe_fso(struct ixgbe_ring *tx_ring, 666extern int ixgbe_fso(struct ixgbe_ring *tx_ring,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 56fd46844f65..42537336110c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -324,24 +324,33 @@ out:
324/** 324/**
325 * ixgbe_fc_enable_82598 - Enable flow control 325 * ixgbe_fc_enable_82598 - Enable flow control
326 * @hw: pointer to hardware structure 326 * @hw: pointer to hardware structure
327 * @packetbuf_num: packet buffer number (0-7)
328 * 327 *
329 * Enable flow control according to the current settings. 328 * Enable flow control according to the current settings.
330 **/ 329 **/
331static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num) 330static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
332{ 331{
333 s32 ret_val = 0; 332 s32 ret_val = 0;
334 u32 fctrl_reg; 333 u32 fctrl_reg;
335 u32 rmcs_reg; 334 u32 rmcs_reg;
336 u32 reg; 335 u32 reg;
336 u32 fcrtl, fcrth;
337 u32 link_speed = 0; 337 u32 link_speed = 0;
338 int i;
338 bool link_up; 339 bool link_up;
339 340
340#ifdef CONFIG_DCB 341 /*
341 if (hw->fc.requested_mode == ixgbe_fc_pfc) 342 * Validate the water mark configuration for packet buffer 0. Zero
343 * water marks indicate that the packet buffer was not configured
344 * and the watermarks for packet buffer 0 should always be configured.
345 */
346 if (!hw->fc.low_water ||
347 !hw->fc.high_water[0] ||
348 !hw->fc.pause_time) {
349 hw_dbg(hw, "Invalid water mark configuration\n");
350 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
342 goto out; 351 goto out;
352 }
343 353
344#endif /* CONFIG_DCB */
345 /* 354 /*
346 * On 82598 having Rx FC on causes resets while doing 1G 355 * On 82598 having Rx FC on causes resets while doing 1G
347 * so if it's on turn it off once we know link_speed. For 356 * so if it's on turn it off once we know link_speed. For
@@ -363,9 +372,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
363 } 372 }
364 373
365 /* Negotiate the fc mode to use */ 374 /* Negotiate the fc mode to use */
366 ret_val = ixgbe_fc_autoneg(hw); 375 ixgbe_fc_autoneg(hw);
367 if (ret_val == IXGBE_ERR_FLOW_CONTROL)
368 goto out;
369 376
370 /* Disable any previous flow control settings */ 377 /* Disable any previous flow control settings */
371 fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); 378 fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
@@ -382,9 +389,6 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
382 * 2: Tx flow control is enabled (we can send pause frames but 389 * 2: Tx flow control is enabled (we can send pause frames but
383 * we do not support receiving pause frames). 390 * we do not support receiving pause frames).
384 * 3: Both Rx and Tx flow control (symmetric) are enabled. 391 * 3: Both Rx and Tx flow control (symmetric) are enabled.
385#ifdef CONFIG_DCB
386 * 4: Priority Flow Control is enabled.
387#endif
388 * other: Invalid. 392 * other: Invalid.
389 */ 393 */
390 switch (hw->fc.current_mode) { 394 switch (hw->fc.current_mode) {
@@ -417,11 +421,6 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
417 fctrl_reg |= IXGBE_FCTRL_RFCE; 421 fctrl_reg |= IXGBE_FCTRL_RFCE;
418 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; 422 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
419 break; 423 break;
420#ifdef CONFIG_DCB
421 case ixgbe_fc_pfc:
422 goto out;
423 break;
424#endif /* CONFIG_DCB */
425 default: 424 default:
426 hw_dbg(hw, "Flow control param set incorrectly\n"); 425 hw_dbg(hw, "Flow control param set incorrectly\n");
427 ret_val = IXGBE_ERR_CONFIG; 426 ret_val = IXGBE_ERR_CONFIG;
@@ -434,29 +433,29 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
434 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg); 433 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
435 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); 434 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
436 435
437 /* Set up and enable Rx high/low water mark thresholds, enable XON. */ 436 fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
438 if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
439 reg = hw->fc.low_water << 6;
440 if (hw->fc.send_xon)
441 reg |= IXGBE_FCRTL_XONE;
442
443 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg);
444 437
445 reg = hw->fc.high_water[packetbuf_num] << 6; 438 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
446 reg |= IXGBE_FCRTH_FCEN; 439 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
440 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
441 hw->fc.high_water[i]) {
442 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
443 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
444 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
445 } else {
446 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
447 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
448 }
447 449
448 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg);
449 } 450 }
450 451
451 /* Configure pause time (2 TCs per register) */ 452 /* Configure pause time (2 TCs per register) */
452 reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2)); 453 reg = hw->fc.pause_time * 0x00010001;
453 if ((packetbuf_num & 1) == 0) 454 for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
454 reg = (reg & 0xFFFF0000) | hw->fc.pause_time; 455 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
455 else
456 reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
457 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
458 456
459 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1)); 457 /* Configure flow control refresh threshold value */
458 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
460 459
461out: 460out:
462 return ret_val; 461 return ret_val;
@@ -1277,6 +1276,8 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
1277 .set_fw_drv_ver = NULL, 1276 .set_fw_drv_ver = NULL,
1278 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync, 1277 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync,
1279 .release_swfw_sync = &ixgbe_release_swfw_sync, 1278 .release_swfw_sync = &ixgbe_release_swfw_sync,
1279 .get_thermal_sensor_data = NULL,
1280 .init_thermal_sensor_thresh = NULL,
1280}; 1281};
1281 1282
1282static struct ixgbe_eeprom_operations eeprom_ops_82598 = { 1283static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 9c14685358eb..dee64d2703f0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -2119,6 +2119,8 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
2119 .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, 2119 .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
2120 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync, 2120 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync,
2121 .release_swfw_sync = &ixgbe_release_swfw_sync, 2121 .release_swfw_sync = &ixgbe_release_swfw_sync,
2122 .get_thermal_sensor_data = &ixgbe_get_thermal_sensor_data_generic,
2123 .init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic,
2122 2124
2123}; 2125};
2124 2126
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index e59888163a17..c7e51b85b8b6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -47,13 +47,6 @@ static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
47static void ixgbe_release_eeprom(struct ixgbe_hw *hw); 47static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
48 48
49static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); 49static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
50static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw);
51static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw);
52static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw);
53static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
54static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
55 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
56static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num);
57static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg); 50static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
58static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 51static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
59 u16 words, u16 *data); 52 u16 words, u16 *data);
@@ -64,6 +57,172 @@ static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
64static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); 57static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
65 58
66/** 59/**
60 * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
61 * control
62 * @hw: pointer to hardware structure
63 *
64 * There are several phys that do not support autoneg flow control. This
65 * function check the device id to see if the associated phy supports
66 * autoneg flow control.
67 **/
68static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
69{
70
71 switch (hw->device_id) {
72 case IXGBE_DEV_ID_X540T:
73 return 0;
74 case IXGBE_DEV_ID_82599_T3_LOM:
75 return 0;
76 default:
77 return IXGBE_ERR_FC_NOT_SUPPORTED;
78 }
79}
80
81/**
82 * ixgbe_setup_fc - Set up flow control
83 * @hw: pointer to hardware structure
84 *
85 * Called at init time to set up flow control.
86 **/
87static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
88{
89 s32 ret_val = 0;
90 u32 reg = 0, reg_bp = 0;
91 u16 reg_cu = 0;
92
93 /*
94 * Validate the requested mode. Strict IEEE mode does not allow
95 * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
96 */
97 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
98 hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
99 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
100 goto out;
101 }
102
103 /*
104 * 10gig parts do not have a word in the EEPROM to determine the
105 * default flow control setting, so we explicitly set it to full.
106 */
107 if (hw->fc.requested_mode == ixgbe_fc_default)
108 hw->fc.requested_mode = ixgbe_fc_full;
109
110 /*
111 * Set up the 1G and 10G flow control advertisement registers so the
112 * HW will be able to do fc autoneg once the cable is plugged in. If
113 * we link at 10G, the 1G advertisement is harmless and vice versa.
114 */
115 switch (hw->phy.media_type) {
116 case ixgbe_media_type_fiber:
117 case ixgbe_media_type_backplane:
118 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
119 reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
120 break;
121 case ixgbe_media_type_copper:
122 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
123 MDIO_MMD_AN, &reg_cu);
124 break;
125 default:
126 break;
127 }
128
129 /*
130 * The possible values of fc.requested_mode are:
131 * 0: Flow control is completely disabled
132 * 1: Rx flow control is enabled (we can receive pause frames,
133 * but not send pause frames).
134 * 2: Tx flow control is enabled (we can send pause frames but
135 * we do not support receiving pause frames).
136 * 3: Both Rx and Tx flow control (symmetric) are enabled.
137 * other: Invalid.
138 */
139 switch (hw->fc.requested_mode) {
140 case ixgbe_fc_none:
141 /* Flow control completely disabled by software override. */
142 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
143 if (hw->phy.media_type == ixgbe_media_type_backplane)
144 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
145 IXGBE_AUTOC_ASM_PAUSE);
146 else if (hw->phy.media_type == ixgbe_media_type_copper)
147 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
148 break;
149 case ixgbe_fc_tx_pause:
150 /*
151 * Tx Flow control is enabled, and Rx Flow control is
152 * disabled by software override.
153 */
154 reg |= IXGBE_PCS1GANA_ASM_PAUSE;
155 reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
156 if (hw->phy.media_type == ixgbe_media_type_backplane) {
157 reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
158 reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
159 } else if (hw->phy.media_type == ixgbe_media_type_copper) {
160 reg_cu |= IXGBE_TAF_ASM_PAUSE;
161 reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
162 }
163 break;
164 case ixgbe_fc_rx_pause:
165 /*
166 * Rx Flow control is enabled and Tx Flow control is
167 * disabled by software override. Since there really
168 * isn't a way to advertise that we are capable of RX
169 * Pause ONLY, we will advertise that we support both
170 * symmetric and asymmetric Rx PAUSE, as such we fall
171 * through to the fc_full statement. Later, we will
172 * disable the adapter's ability to send PAUSE frames.
173 */
174 case ixgbe_fc_full:
175 /* Flow control (both Rx and Tx) is enabled by SW override. */
176 reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
177 if (hw->phy.media_type == ixgbe_media_type_backplane)
178 reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
179 IXGBE_AUTOC_ASM_PAUSE;
180 else if (hw->phy.media_type == ixgbe_media_type_copper)
181 reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
182 break;
183 default:
184 hw_dbg(hw, "Flow control param set incorrectly\n");
185 ret_val = IXGBE_ERR_CONFIG;
186 goto out;
187 break;
188 }
189
190 if (hw->mac.type != ixgbe_mac_X540) {
191 /*
192 * Enable auto-negotiation between the MAC & PHY;
193 * the MAC will advertise clause 37 flow control.
194 */
195 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
196 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
197
198 /* Disable AN timeout */
199 if (hw->fc.strict_ieee)
200 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
201
202 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
203 hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
204 }
205
206 /*
207 * AUTOC restart handles negotiation of 1G and 10G on backplane
208 * and copper. There is no need to set the PCS1GCTL register.
209 *
210 */
211 if (hw->phy.media_type == ixgbe_media_type_backplane) {
212 reg_bp |= IXGBE_AUTOC_AN_RESTART;
213 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
214 } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
215 (ixgbe_device_supports_autoneg_fc(hw) == 0)) {
216 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
217 MDIO_MMD_AN, reg_cu);
218 }
219
220 hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
221out:
222 return ret_val;
223}
224
225/**
67 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx 226 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
68 * @hw: pointer to hardware structure 227 * @hw: pointer to hardware structure
69 * 228 *
@@ -95,7 +254,7 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
95 IXGBE_WRITE_FLUSH(hw); 254 IXGBE_WRITE_FLUSH(hw);
96 255
97 /* Setup flow control */ 256 /* Setup flow control */
98 ixgbe_setup_fc(hw, 0); 257 ixgbe_setup_fc(hw);
99 258
100 /* Clear adapter stopped flag */ 259 /* Clear adapter stopped flag */
101 hw->adapter_stopped = false; 260 hw->adapter_stopped = false;
@@ -1923,30 +2082,36 @@ s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
1923/** 2082/**
1924 * ixgbe_fc_enable_generic - Enable flow control 2083 * ixgbe_fc_enable_generic - Enable flow control
1925 * @hw: pointer to hardware structure 2084 * @hw: pointer to hardware structure
1926 * @packetbuf_num: packet buffer number (0-7)
1927 * 2085 *
1928 * Enable flow control according to the current settings. 2086 * Enable flow control according to the current settings.
1929 **/ 2087 **/
1930s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num) 2088s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
1931{ 2089{
1932 s32 ret_val = 0; 2090 s32 ret_val = 0;
1933 u32 mflcn_reg, fccfg_reg; 2091 u32 mflcn_reg, fccfg_reg;
1934 u32 reg; 2092 u32 reg;
1935 u32 fcrtl, fcrth; 2093 u32 fcrtl, fcrth;
2094 int i;
1936 2095
1937#ifdef CONFIG_DCB 2096 /*
1938 if (hw->fc.requested_mode == ixgbe_fc_pfc) 2097 * Validate the water mark configuration for packet buffer 0. Zero
2098 * water marks indicate that the packet buffer was not configured
2099 * and the watermarks for packet buffer 0 should always be configured.
2100 */
2101 if (!hw->fc.low_water ||
2102 !hw->fc.high_water[0] ||
2103 !hw->fc.pause_time) {
2104 hw_dbg(hw, "Invalid water mark configuration\n");
2105 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
1939 goto out; 2106 goto out;
2107 }
1940 2108
1941#endif /* CONFIG_DCB */
1942 /* Negotiate the fc mode to use */ 2109 /* Negotiate the fc mode to use */
1943 ret_val = ixgbe_fc_autoneg(hw); 2110 ixgbe_fc_autoneg(hw);
1944 if (ret_val == IXGBE_ERR_FLOW_CONTROL)
1945 goto out;
1946 2111
1947 /* Disable any previous flow control settings */ 2112 /* Disable any previous flow control settings */
1948 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); 2113 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
1949 mflcn_reg &= ~(IXGBE_MFLCN_RFCE | IXGBE_MFLCN_RPFCE); 2114 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
1950 2115
1951 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 2116 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
1952 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY); 2117 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
@@ -1959,9 +2124,6 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
1959 * 2: Tx flow control is enabled (we can send pause frames but 2124 * 2: Tx flow control is enabled (we can send pause frames but
1960 * we do not support receiving pause frames). 2125 * we do not support receiving pause frames).
1961 * 3: Both Rx and Tx flow control (symmetric) are enabled. 2126 * 3: Both Rx and Tx flow control (symmetric) are enabled.
1962#ifdef CONFIG_DCB
1963 * 4: Priority Flow Control is enabled.
1964#endif
1965 * other: Invalid. 2127 * other: Invalid.
1966 */ 2128 */
1967 switch (hw->fc.current_mode) { 2129 switch (hw->fc.current_mode) {
@@ -1994,11 +2156,6 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
1994 mflcn_reg |= IXGBE_MFLCN_RFCE; 2156 mflcn_reg |= IXGBE_MFLCN_RFCE;
1995 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; 2157 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
1996 break; 2158 break;
1997#ifdef CONFIG_DCB
1998 case ixgbe_fc_pfc:
1999 goto out;
2000 break;
2001#endif /* CONFIG_DCB */
2002 default: 2159 default:
2003 hw_dbg(hw, "Flow control param set incorrectly\n"); 2160 hw_dbg(hw, "Flow control param set incorrectly\n");
2004 ret_val = IXGBE_ERR_CONFIG; 2161 ret_val = IXGBE_ERR_CONFIG;
@@ -2011,100 +2168,86 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
2011 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); 2168 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2012 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); 2169 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2013 2170
2014 fcrtl = hw->fc.low_water << 10; 2171 fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
2015 2172
2016 if (hw->fc.current_mode & ixgbe_fc_tx_pause) { 2173 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
2017 fcrth = hw->fc.high_water[packetbuf_num] << 10; 2174 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
2018 fcrth |= IXGBE_FCRTH_FCEN; 2175 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2019 if (hw->fc.send_xon) 2176 hw->fc.high_water[i]) {
2020 fcrtl |= IXGBE_FCRTL_XONE; 2177 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2021 } else { 2178 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2022 /* 2179 } else {
2023 * If Tx flow control is disabled, set our high water mark 2180 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
2024 * to Rx FIFO size minus 32 in order prevent Tx switch 2181 /*
2025 * loopback from stalling on DMA. 2182 * In order to prevent Tx hangs when the internal Tx
2026 */ 2183 * switch is enabled we must set the high water mark
2027 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num)) - 32; 2184 * to the maximum FCRTH value. This allows the Tx
2028 } 2185 * switch to function even under heavy Rx workloads.
2186 */
2187 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
2188 }
2029 2189
2030 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), fcrth); 2190 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
2031 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), fcrtl); 2191 }
2032 2192
2033 /* Configure pause time (2 TCs per register) */ 2193 /* Configure pause time (2 TCs per register) */
2034 reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2)); 2194 reg = hw->fc.pause_time * 0x00010001;
2035 if ((packetbuf_num & 1) == 0) 2195 for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
2036 reg = (reg & 0xFFFF0000) | hw->fc.pause_time; 2196 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2037 else
2038 reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
2039 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
2040 2197
2041 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1)); 2198 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2042 2199
2043out: 2200out:
2044 return ret_val; 2201 return ret_val;
2045} 2202}
2046 2203
2047/** 2204/**
2048 * ixgbe_fc_autoneg - Configure flow control 2205 * ixgbe_negotiate_fc - Negotiate flow control
2049 * @hw: pointer to hardware structure 2206 * @hw: pointer to hardware structure
2207 * @adv_reg: flow control advertised settings
2208 * @lp_reg: link partner's flow control settings
2209 * @adv_sym: symmetric pause bit in advertisement
2210 * @adv_asm: asymmetric pause bit in advertisement
2211 * @lp_sym: symmetric pause bit in link partner advertisement
2212 * @lp_asm: asymmetric pause bit in link partner advertisement
2050 * 2213 *
2051 * Compares our advertised flow control capabilities to those advertised by 2214 * Find the intersection between advertised settings and link partner's
2052 * our link partner, and determines the proper flow control mode to use. 2215 * advertised settings
2053 **/ 2216 **/
2054s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw) 2217static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2218 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
2055{ 2219{
2056 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; 2220 if ((!(adv_reg)) || (!(lp_reg)))
2057 ixgbe_link_speed speed; 2221 return IXGBE_ERR_FC_NOT_NEGOTIATED;
2058 bool link_up;
2059
2060 if (hw->fc.disable_fc_autoneg)
2061 goto out;
2062
2063 /*
2064 * AN should have completed when the cable was plugged in.
2065 * Look for reasons to bail out. Bail out if:
2066 * - FC autoneg is disabled, or if
2067 * - link is not up.
2068 *
2069 * Since we're being called from an LSC, link is already known to be up.
2070 * So use link_up_wait_to_complete=false.
2071 */
2072 hw->mac.ops.check_link(hw, &speed, &link_up, false);
2073 if (!link_up) {
2074 ret_val = IXGBE_ERR_FLOW_CONTROL;
2075 goto out;
2076 }
2077
2078 switch (hw->phy.media_type) {
2079 /* Autoneg flow control on fiber adapters */
2080 case ixgbe_media_type_fiber:
2081 if (speed == IXGBE_LINK_SPEED_1GB_FULL)
2082 ret_val = ixgbe_fc_autoneg_fiber(hw);
2083 break;
2084
2085 /* Autoneg flow control on backplane adapters */
2086 case ixgbe_media_type_backplane:
2087 ret_val = ixgbe_fc_autoneg_backplane(hw);
2088 break;
2089
2090 /* Autoneg flow control on copper adapters */
2091 case ixgbe_media_type_copper:
2092 if (ixgbe_device_supports_autoneg_fc(hw) == 0)
2093 ret_val = ixgbe_fc_autoneg_copper(hw);
2094 break;
2095
2096 default:
2097 break;
2098 }
2099 2222
2100out: 2223 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2101 if (ret_val == 0) { 2224 /*
2102 hw->fc.fc_was_autonegged = true; 2225 * Now we need to check if the user selected Rx ONLY
2226 * of pause frames. In this case, we had to advertise
2227 * FULL flow control because we could not advertise RX
2228 * ONLY. Hence, we must now check to see if we need to
2229 * turn OFF the TRANSMISSION of PAUSE frames.
2230 */
2231 if (hw->fc.requested_mode == ixgbe_fc_full) {
2232 hw->fc.current_mode = ixgbe_fc_full;
2233 hw_dbg(hw, "Flow Control = FULL.\n");
2234 } else {
2235 hw->fc.current_mode = ixgbe_fc_rx_pause;
2236 hw_dbg(hw, "Flow Control=RX PAUSE frames only\n");
2237 }
2238 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2239 (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2240 hw->fc.current_mode = ixgbe_fc_tx_pause;
2241 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
2242 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2243 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2244 hw->fc.current_mode = ixgbe_fc_rx_pause;
2245 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
2103 } else { 2246 } else {
2104 hw->fc.fc_was_autonegged = false; 2247 hw->fc.current_mode = ixgbe_fc_none;
2105 hw->fc.current_mode = hw->fc.requested_mode; 2248 hw_dbg(hw, "Flow Control = NONE.\n");
2106 } 2249 }
2107 return ret_val; 2250 return 0;
2108} 2251}
2109 2252
2110/** 2253/**
@@ -2116,7 +2259,7 @@ out:
2116static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) 2259static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2117{ 2260{
2118 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat; 2261 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
2119 s32 ret_val; 2262 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2120 2263
2121 /* 2264 /*
2122 * On multispeed fiber at 1g, bail out if 2265 * On multispeed fiber at 1g, bail out if
@@ -2126,10 +2269,8 @@ static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2126 2269
2127 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); 2270 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
2128 if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || 2271 if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
2129 (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) { 2272 (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1))
2130 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2131 goto out; 2273 goto out;
2132 }
2133 2274
2134 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 2275 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
2135 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); 2276 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
@@ -2153,7 +2294,7 @@ out:
2153static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) 2294static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
2154{ 2295{
2155 u32 links2, anlp1_reg, autoc_reg, links; 2296 u32 links2, anlp1_reg, autoc_reg, links;
2156 s32 ret_val; 2297 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2157 2298
2158 /* 2299 /*
2159 * On backplane, bail out if 2300 * On backplane, bail out if
@@ -2161,21 +2302,13 @@ static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
2161 * - we are 82599 and link partner is not AN enabled 2302 * - we are 82599 and link partner is not AN enabled
2162 */ 2303 */
2163 links = IXGBE_READ_REG(hw, IXGBE_LINKS); 2304 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
2164 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) { 2305 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0)
2165 hw->fc.fc_was_autonegged = false;
2166 hw->fc.current_mode = hw->fc.requested_mode;
2167 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2168 goto out; 2306 goto out;
2169 }
2170 2307
2171 if (hw->mac.type == ixgbe_mac_82599EB) { 2308 if (hw->mac.type == ixgbe_mac_82599EB) {
2172 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2); 2309 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
2173 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) { 2310 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0)
2174 hw->fc.fc_was_autonegged = false;
2175 hw->fc.current_mode = hw->fc.requested_mode;
2176 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2177 goto out; 2311 goto out;
2178 }
2179 } 2312 }
2180 /* 2313 /*
2181 * Read the 10g AN autoc and LP ability registers and resolve 2314 * Read the 10g AN autoc and LP ability registers and resolve
@@ -2217,241 +2350,63 @@ static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
2217} 2350}
2218 2351
2219/** 2352/**
2220 * ixgbe_negotiate_fc - Negotiate flow control 2353 * ixgbe_fc_autoneg - Configure flow control
2221 * @hw: pointer to hardware structure
2222 * @adv_reg: flow control advertised settings
2223 * @lp_reg: link partner's flow control settings
2224 * @adv_sym: symmetric pause bit in advertisement
2225 * @adv_asm: asymmetric pause bit in advertisement
2226 * @lp_sym: symmetric pause bit in link partner advertisement
2227 * @lp_asm: asymmetric pause bit in link partner advertisement
2228 *
2229 * Find the intersection between advertised settings and link partner's
2230 * advertised settings
2231 **/
2232static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2233 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
2234{
2235 if ((!(adv_reg)) || (!(lp_reg)))
2236 return IXGBE_ERR_FC_NOT_NEGOTIATED;
2237
2238 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2239 /*
2240 * Now we need to check if the user selected Rx ONLY
2241 * of pause frames. In this case, we had to advertise
2242 * FULL flow control because we could not advertise RX
2243 * ONLY. Hence, we must now check to see if we need to
2244 * turn OFF the TRANSMISSION of PAUSE frames.
2245 */
2246 if (hw->fc.requested_mode == ixgbe_fc_full) {
2247 hw->fc.current_mode = ixgbe_fc_full;
2248 hw_dbg(hw, "Flow Control = FULL.\n");
2249 } else {
2250 hw->fc.current_mode = ixgbe_fc_rx_pause;
2251 hw_dbg(hw, "Flow Control=RX PAUSE frames only\n");
2252 }
2253 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2254 (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2255 hw->fc.current_mode = ixgbe_fc_tx_pause;
2256 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
2257 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2258 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2259 hw->fc.current_mode = ixgbe_fc_rx_pause;
2260 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
2261 } else {
2262 hw->fc.current_mode = ixgbe_fc_none;
2263 hw_dbg(hw, "Flow Control = NONE.\n");
2264 }
2265 return 0;
2266}
2267
2268/**
2269 * ixgbe_setup_fc - Set up flow control
2270 * @hw: pointer to hardware structure 2354 * @hw: pointer to hardware structure
2271 * 2355 *
2272 * Called at init time to set up flow control. 2356 * Compares our advertised flow control capabilities to those advertised by
2357 * our link partner, and determines the proper flow control mode to use.
2273 **/ 2358 **/
2274static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num) 2359void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
2275{ 2360{
2276 s32 ret_val = 0; 2361 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2277 u32 reg = 0, reg_bp = 0; 2362 ixgbe_link_speed speed;
2278 u16 reg_cu = 0; 2363 bool link_up;
2279
2280#ifdef CONFIG_DCB
2281 if (hw->fc.requested_mode == ixgbe_fc_pfc) {
2282 hw->fc.current_mode = hw->fc.requested_mode;
2283 goto out;
2284 }
2285
2286#endif /* CONFIG_DCB */
2287 /* Validate the packetbuf configuration */
2288 if (packetbuf_num < 0 || packetbuf_num > 7) {
2289 hw_dbg(hw, "Invalid packet buffer number [%d], expected range "
2290 "is 0-7\n", packetbuf_num);
2291 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2292 goto out;
2293 }
2294 2364
2295 /* 2365 /*
2296 * Validate the water mark configuration. Zero water marks are invalid 2366 * AN should have completed when the cable was plugged in.
2297 * because it causes the controller to just blast out fc packets. 2367 * Look for reasons to bail out. Bail out if:
2368 * - FC autoneg is disabled, or if
2369 * - link is not up.
2370 *
2371 * Since we're being called from an LSC, link is already known to be up.
2372 * So use link_up_wait_to_complete=false.
2298 */ 2373 */
2299 if (!hw->fc.low_water || 2374 if (hw->fc.disable_fc_autoneg)
2300 !hw->fc.high_water[packetbuf_num] ||
2301 !hw->fc.pause_time) {
2302 hw_dbg(hw, "Invalid water mark configuration\n");
2303 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2304 goto out; 2375 goto out;
2305 }
2306 2376
2307 /* 2377 hw->mac.ops.check_link(hw, &speed, &link_up, false);
2308 * Validate the requested mode. Strict IEEE mode does not allow 2378 if (!link_up)
2309 * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
2310 */
2311 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
2312 hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict "
2313 "IEEE mode\n");
2314 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2315 goto out; 2379 goto out;
2316 }
2317
2318 /*
2319 * 10gig parts do not have a word in the EEPROM to determine the
2320 * default flow control setting, so we explicitly set it to full.
2321 */
2322 if (hw->fc.requested_mode == ixgbe_fc_default)
2323 hw->fc.requested_mode = ixgbe_fc_full;
2324
2325 /*
2326 * Set up the 1G and 10G flow control advertisement registers so the
2327 * HW will be able to do fc autoneg once the cable is plugged in. If
2328 * we link at 10G, the 1G advertisement is harmless and vice versa.
2329 */
2330 2380
2331 switch (hw->phy.media_type) { 2381 switch (hw->phy.media_type) {
2382 /* Autoneg flow control on fiber adapters */
2332 case ixgbe_media_type_fiber: 2383 case ixgbe_media_type_fiber:
2384 if (speed == IXGBE_LINK_SPEED_1GB_FULL)
2385 ret_val = ixgbe_fc_autoneg_fiber(hw);
2386 break;
2387
2388 /* Autoneg flow control on backplane adapters */
2333 case ixgbe_media_type_backplane: 2389 case ixgbe_media_type_backplane:
2334 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 2390 ret_val = ixgbe_fc_autoneg_backplane(hw);
2335 reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2336 break; 2391 break;
2337 2392
2393 /* Autoneg flow control on copper adapters */
2338 case ixgbe_media_type_copper: 2394 case ixgbe_media_type_copper:
2339 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, 2395 if (ixgbe_device_supports_autoneg_fc(hw) == 0)
2340 MDIO_MMD_AN, &reg_cu); 2396 ret_val = ixgbe_fc_autoneg_copper(hw);
2341 break; 2397 break;
2342 2398
2343 default: 2399 default:
2344 ;
2345 }
2346
2347 /*
2348 * The possible values of fc.requested_mode are:
2349 * 0: Flow control is completely disabled
2350 * 1: Rx flow control is enabled (we can receive pause frames,
2351 * but not send pause frames).
2352 * 2: Tx flow control is enabled (we can send pause frames but
2353 * we do not support receiving pause frames).
2354 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2355#ifdef CONFIG_DCB
2356 * 4: Priority Flow Control is enabled.
2357#endif
2358 * other: Invalid.
2359 */
2360 switch (hw->fc.requested_mode) {
2361 case ixgbe_fc_none:
2362 /* Flow control completely disabled by software override. */
2363 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
2364 if (hw->phy.media_type == ixgbe_media_type_backplane)
2365 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
2366 IXGBE_AUTOC_ASM_PAUSE);
2367 else if (hw->phy.media_type == ixgbe_media_type_copper)
2368 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
2369 break;
2370 case ixgbe_fc_rx_pause:
2371 /*
2372 * Rx Flow control is enabled and Tx Flow control is
2373 * disabled by software override. Since there really
2374 * isn't a way to advertise that we are capable of RX
2375 * Pause ONLY, we will advertise that we support both
2376 * symmetric and asymmetric Rx PAUSE. Later, we will
2377 * disable the adapter's ability to send PAUSE frames.
2378 */
2379 reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
2380 if (hw->phy.media_type == ixgbe_media_type_backplane)
2381 reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
2382 IXGBE_AUTOC_ASM_PAUSE);
2383 else if (hw->phy.media_type == ixgbe_media_type_copper)
2384 reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
2385 break;
2386 case ixgbe_fc_tx_pause:
2387 /*
2388 * Tx Flow control is enabled, and Rx Flow control is
2389 * disabled by software override.
2390 */
2391 reg |= (IXGBE_PCS1GANA_ASM_PAUSE);
2392 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE);
2393 if (hw->phy.media_type == ixgbe_media_type_backplane) {
2394 reg_bp |= (IXGBE_AUTOC_ASM_PAUSE);
2395 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE);
2396 } else if (hw->phy.media_type == ixgbe_media_type_copper) {
2397 reg_cu |= (IXGBE_TAF_ASM_PAUSE);
2398 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE);
2399 }
2400 break;
2401 case ixgbe_fc_full:
2402 /* Flow control (both Rx and Tx) is enabled by SW override. */
2403 reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
2404 if (hw->phy.media_type == ixgbe_media_type_backplane)
2405 reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
2406 IXGBE_AUTOC_ASM_PAUSE);
2407 else if (hw->phy.media_type == ixgbe_media_type_copper)
2408 reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
2409 break;
2410#ifdef CONFIG_DCB
2411 case ixgbe_fc_pfc:
2412 goto out;
2413 break;
2414#endif /* CONFIG_DCB */
2415 default:
2416 hw_dbg(hw, "Flow control param set incorrectly\n");
2417 ret_val = IXGBE_ERR_CONFIG;
2418 goto out;
2419 break; 2400 break;
2420 } 2401 }
2421 2402
2422 if (hw->mac.type != ixgbe_mac_X540) {
2423 /*
2424 * Enable auto-negotiation between the MAC & PHY;
2425 * the MAC will advertise clause 37 flow control.
2426 */
2427 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
2428 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
2429
2430 /* Disable AN timeout */
2431 if (hw->fc.strict_ieee)
2432 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
2433
2434 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
2435 hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
2436 }
2437
2438 /*
2439 * AUTOC restart handles negotiation of 1G and 10G on backplane
2440 * and copper. There is no need to set the PCS1GCTL register.
2441 *
2442 */
2443 if (hw->phy.media_type == ixgbe_media_type_backplane) {
2444 reg_bp |= IXGBE_AUTOC_AN_RESTART;
2445 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
2446 } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
2447 (ixgbe_device_supports_autoneg_fc(hw) == 0)) {
2448 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
2449 MDIO_MMD_AN, reg_cu);
2450 }
2451
2452 hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
2453out: 2403out:
2454 return ret_val; 2404 if (ret_val == 0) {
2405 hw->fc.fc_was_autonegged = true;
2406 } else {
2407 hw->fc.fc_was_autonegged = false;
2408 hw->fc.current_mode = hw->fc.requested_mode;
2409 }
2455} 2410}
2456 2411
2457/** 2412/**
@@ -3222,28 +3177,6 @@ wwn_prefix_out:
3222} 3177}
3223 3178
3224/** 3179/**
3225 * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
3226 * control
3227 * @hw: pointer to hardware structure
3228 *
3229 * There are several phys that do not support autoneg flow control. This
3230 * function check the device id to see if the associated phy supports
3231 * autoneg flow control.
3232 **/
3233static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
3234{
3235
3236 switch (hw->device_id) {
3237 case IXGBE_DEV_ID_X540T:
3238 return 0;
3239 case IXGBE_DEV_ID_82599_T3_LOM:
3240 return 0;
3241 default:
3242 return IXGBE_ERR_FC_NOT_SUPPORTED;
3243 }
3244}
3245
3246/**
3247 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing 3180 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
3248 * @hw: pointer to hardware structure 3181 * @hw: pointer to hardware structure
3249 * @enable: enable or disable switch for anti-spoofing 3182 * @enable: enable or disable switch for anti-spoofing
@@ -3604,3 +3537,172 @@ void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
3604 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); 3537 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
3605 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); 3538 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
3606} 3539}
3540
3541static const u8 ixgbe_emc_temp_data[4] = {
3542 IXGBE_EMC_INTERNAL_DATA,
3543 IXGBE_EMC_DIODE1_DATA,
3544 IXGBE_EMC_DIODE2_DATA,
3545 IXGBE_EMC_DIODE3_DATA
3546};
3547static const u8 ixgbe_emc_therm_limit[4] = {
3548 IXGBE_EMC_INTERNAL_THERM_LIMIT,
3549 IXGBE_EMC_DIODE1_THERM_LIMIT,
3550 IXGBE_EMC_DIODE2_THERM_LIMIT,
3551 IXGBE_EMC_DIODE3_THERM_LIMIT
3552};
3553
3554/**
3555 * ixgbe_get_ets_data - Extracts the ETS bit data
3556 * @hw: pointer to hardware structure
3557 * @ets_cfg: extected ETS data
3558 * @ets_offset: offset of ETS data
3559 *
3560 * Returns error code.
3561 **/
3562static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg,
3563 u16 *ets_offset)
3564{
3565 s32 status = 0;
3566
3567 status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, ets_offset);
3568 if (status)
3569 goto out;
3570
3571 if ((*ets_offset == 0x0000) || (*ets_offset == 0xFFFF)) {
3572 status = IXGBE_NOT_IMPLEMENTED;
3573 goto out;
3574 }
3575
3576 status = hw->eeprom.ops.read(hw, *ets_offset, ets_cfg);
3577 if (status)
3578 goto out;
3579
3580 if ((*ets_cfg & IXGBE_ETS_TYPE_MASK) != IXGBE_ETS_TYPE_EMC_SHIFTED) {
3581 status = IXGBE_NOT_IMPLEMENTED;
3582 goto out;
3583 }
3584
3585out:
3586 return status;
3587}
3588
3589/**
3590 * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data
3591 * @hw: pointer to hardware structure
3592 *
3593 * Returns the thermal sensor data structure
3594 **/
3595s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
3596{
3597 s32 status = 0;
3598 u16 ets_offset;
3599 u16 ets_cfg;
3600 u16 ets_sensor;
3601 u8 num_sensors;
3602 u8 i;
3603 struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
3604
3605 /* Only support thermal sensors attached to physical port 0 */
3606 if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) {
3607 status = IXGBE_NOT_IMPLEMENTED;
3608 goto out;
3609 }
3610
3611 status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset);
3612 if (status)
3613 goto out;
3614
3615 num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
3616 if (num_sensors > IXGBE_MAX_SENSORS)
3617 num_sensors = IXGBE_MAX_SENSORS;
3618
3619 for (i = 0; i < num_sensors; i++) {
3620 u8 sensor_index;
3621 u8 sensor_location;
3622
3623 status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i),
3624 &ets_sensor);
3625 if (status)
3626 goto out;
3627
3628 sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
3629 IXGBE_ETS_DATA_INDEX_SHIFT);
3630 sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
3631 IXGBE_ETS_DATA_LOC_SHIFT);
3632
3633 if (sensor_location != 0) {
3634 status = hw->phy.ops.read_i2c_byte(hw,
3635 ixgbe_emc_temp_data[sensor_index],
3636 IXGBE_I2C_THERMAL_SENSOR_ADDR,
3637 &data->sensor[i].temp);
3638 if (status)
3639 goto out;
3640 }
3641 }
3642out:
3643 return status;
3644}
3645
3646/**
3647 * ixgbe_init_thermal_sensor_thresh_generic - Inits thermal sensor thresholds
3648 * @hw: pointer to hardware structure
3649 *
3650 * Inits the thermal sensor thresholds according to the NVM map
3651 * and save off the threshold and location values into mac.thermal_sensor_data
3652 **/
3653s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
3654{
3655 s32 status = 0;
3656 u16 ets_offset;
3657 u16 ets_cfg;
3658 u16 ets_sensor;
3659 u8 low_thresh_delta;
3660 u8 num_sensors;
3661 u8 therm_limit;
3662 u8 i;
3663 struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
3664
3665 memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data));
3666
3667 /* Only support thermal sensors attached to physical port 0 */
3668 if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) {
3669 status = IXGBE_NOT_IMPLEMENTED;
3670 goto out;
3671 }
3672
3673 status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset);
3674 if (status)
3675 goto out;
3676
3677 low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >>
3678 IXGBE_ETS_LTHRES_DELTA_SHIFT);
3679 num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
3680 if (num_sensors > IXGBE_MAX_SENSORS)
3681 num_sensors = IXGBE_MAX_SENSORS;
3682
3683 for (i = 0; i < num_sensors; i++) {
3684 u8 sensor_index;
3685 u8 sensor_location;
3686
3687 hw->eeprom.ops.read(hw, (ets_offset + 1 + i), &ets_sensor);
3688 sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
3689 IXGBE_ETS_DATA_INDEX_SHIFT);
3690 sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
3691 IXGBE_ETS_DATA_LOC_SHIFT);
3692 therm_limit = ets_sensor & IXGBE_ETS_DATA_HTHRESH_MASK;
3693
3694 hw->phy.ops.write_i2c_byte(hw,
3695 ixgbe_emc_therm_limit[sensor_index],
3696 IXGBE_I2C_THERMAL_SENSOR_ADDR, therm_limit);
3697
3698 if (sensor_location == 0)
3699 continue;
3700
3701 data->sensor[i].location = sensor_location;
3702 data->sensor[i].caution_thresh = therm_limit;
3703 data->sensor[i].max_op_thresh = therm_limit - low_thresh_delta;
3704 }
3705out:
3706 return status;
3707}
3708
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index d6d34324540c..6222fdb3d3f1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -77,8 +77,8 @@ s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
77s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw); 77s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw);
78s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw); 78s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw);
79s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); 79s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
80s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num); 80s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
81s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw); 81void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
82 82
83s32 ixgbe_validate_mac_addr(u8 *mac_addr); 83s32 ixgbe_validate_mac_addr(u8 *mac_addr);
84s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask); 84s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
@@ -107,6 +107,19 @@ void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
107void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, 107void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb,
108 u32 headroom, int strategy); 108 u32 headroom, int strategy);
109 109
110#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8
111#define IXGBE_EMC_INTERNAL_DATA 0x00
112#define IXGBE_EMC_INTERNAL_THERM_LIMIT 0x20
113#define IXGBE_EMC_DIODE1_DATA 0x01
114#define IXGBE_EMC_DIODE1_THERM_LIMIT 0x19
115#define IXGBE_EMC_DIODE2_DATA 0x23
116#define IXGBE_EMC_DIODE2_THERM_LIMIT 0x1A
117#define IXGBE_EMC_DIODE3_DATA 0x2A
118#define IXGBE_EMC_DIODE3_THERM_LIMIT 0x30
119
120s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
121s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
122
110#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg))) 123#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
111 124
112#ifndef writeq 125#ifndef writeq
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
index 888a419dc3d9..65913c5a616e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -278,18 +278,7 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
278 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg); 278 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
279 279
280 } else { 280 } else {
281 /* X540 devices have a RX bit that should be cleared 281 hw->mac.ops.fc_enable(hw);
282 * if PFC is disabled on all TCs but PFC features is
283 * enabled.
284 */
285 if (hw->mac.type == ixgbe_mac_X540) {
286 reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
287 reg &= ~IXGBE_MFLCN_RPFCE_MASK;
288 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
289 }
290
291 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
292 hw->mac.ops.fc_enable(hw, i);
293 } 282 }
294 283
295 return 0; 284 return 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index 652e4b09546d..2feacf698d9b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -662,6 +662,13 @@ static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
662 return -ENOMEM; 662 return -ENOMEM;
663 } 663 }
664 664
665 if (pfc->pfc_en) {
666 adapter->last_lfc_mode = adapter->hw.fc.current_mode;
667 adapter->hw.fc.current_mode = ixgbe_fc_pfc;
668 } else {
669 adapter->hw.fc.current_mode = adapter->last_lfc_mode;
670 }
671
665 prio_tc = adapter->ixgbe_ieee_ets->prio_tc; 672 prio_tc = adapter->ixgbe_ieee_ets->prio_tc;
666 memcpy(adapter->ixgbe_ieee_pfc, pfc, sizeof(*adapter->ixgbe_ieee_pfc)); 673 memcpy(adapter->ixgbe_ieee_pfc, pfc, sizeof(*adapter->ixgbe_ieee_pfc));
667 return ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en, prio_tc); 674 return ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en, prio_tc);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index ed1b47dc0834..af1a5314b494 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -523,11 +523,17 @@ static void ixgbe_add_ring(struct ixgbe_ring *ring,
523/** 523/**
524 * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector 524 * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector
525 * @adapter: board private structure to initialize 525 * @adapter: board private structure to initialize
526 * @v_count: q_vectors allocated on adapter, used for ring interleaving
526 * @v_idx: index of vector in adapter struct 527 * @v_idx: index of vector in adapter struct
528 * @txr_count: total number of Tx rings to allocate
529 * @txr_idx: index of first Tx ring to allocate
530 * @rxr_count: total number of Rx rings to allocate
531 * @rxr_idx: index of first Rx ring to allocate
527 * 532 *
528 * We allocate one q_vector. If allocation fails we return -ENOMEM. 533 * We allocate one q_vector. If allocation fails we return -ENOMEM.
529 **/ 534 **/
530static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, int v_idx, 535static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
536 int v_count, int v_idx,
531 int txr_count, int txr_idx, 537 int txr_count, int txr_idx,
532 int rxr_count, int rxr_idx) 538 int rxr_count, int rxr_idx)
533{ 539{
@@ -598,7 +604,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, int v_idx,
598 604
599 /* update count and index */ 605 /* update count and index */
600 txr_count--; 606 txr_count--;
601 txr_idx++; 607 txr_idx += v_count;
602 608
603 /* push pointer to next ring */ 609 /* push pointer to next ring */
604 ring++; 610 ring++;
@@ -641,7 +647,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, int v_idx,
641 647
642 /* update count and index */ 648 /* update count and index */
643 rxr_count--; 649 rxr_count--;
644 rxr_idx++; 650 rxr_idx += v_count;
645 651
646 /* push pointer to next ring */ 652 /* push pointer to next ring */
647 ring++; 653 ring++;
@@ -700,24 +706,23 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
700 q_vectors = 1; 706 q_vectors = 1;
701 707
702 if (q_vectors >= (rxr_remaining + txr_remaining)) { 708 if (q_vectors >= (rxr_remaining + txr_remaining)) {
703 for (; rxr_remaining; v_idx++, q_vectors--) { 709 for (; rxr_remaining; v_idx++) {
704 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors); 710 err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
705 err = ixgbe_alloc_q_vector(adapter, v_idx, 711 0, 0, 1, rxr_idx);
706 0, 0, rqpv, rxr_idx);
707 712
708 if (err) 713 if (err)
709 goto err_out; 714 goto err_out;
710 715
711 /* update counts and index */ 716 /* update counts and index */
712 rxr_remaining -= rqpv; 717 rxr_remaining--;
713 rxr_idx += rqpv; 718 rxr_idx++;
714 } 719 }
715 } 720 }
716 721
717 for (; q_vectors; v_idx++, q_vectors--) { 722 for (; v_idx < q_vectors; v_idx++) {
718 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors); 723 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
719 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors); 724 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
720 err = ixgbe_alloc_q_vector(adapter, v_idx, 725 err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
721 tqpv, txr_idx, 726 tqpv, txr_idx,
722 rqpv, rxr_idx); 727 rqpv, rxr_idx);
723 728
@@ -726,9 +731,9 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
726 731
727 /* update counts and index */ 732 /* update counts and index */
728 rxr_remaining -= rqpv; 733 rxr_remaining -= rqpv;
729 rxr_idx += rqpv;
730 txr_remaining -= tqpv; 734 txr_remaining -= tqpv;
731 txr_idx += tqpv; 735 rxr_idx++;
736 txr_idx++;
732 } 737 }
733 738
734 return 0; 739 return 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index aa29edb950f2..4048c9d35caa 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -133,7 +133,7 @@ static struct notifier_block dca_notifier = {
133static unsigned int max_vfs; 133static unsigned int max_vfs;
134module_param(max_vfs, uint, 0); 134module_param(max_vfs, uint, 0);
135MODULE_PARM_DESC(max_vfs, 135MODULE_PARM_DESC(max_vfs,
136 "Maximum number of virtual functions to allocate per physical function"); 136 "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63");
137#endif /* CONFIG_PCI_IOV */ 137#endif /* CONFIG_PCI_IOV */
138 138
139static unsigned int allow_unsupported_sfp; 139static unsigned int allow_unsupported_sfp;
@@ -637,7 +637,11 @@ static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
637 clear_bit(__IXGBE_HANG_CHECK_ARMED, 637 clear_bit(__IXGBE_HANG_CHECK_ARMED,
638 &adapter->tx_ring[i]->state); 638 &adapter->tx_ring[i]->state);
639 return; 639 return;
640 } else if (!(adapter->dcb_cfg.pfc_mode_enable)) 640 } else if (((adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) &&
641 !(adapter->dcb_cfg.pfc_mode_enable)) ||
642 ((adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) &&
643 adapter->ixgbe_ieee_pfc &&
644 !(adapter->ixgbe_ieee_pfc->pfc_en)))
641 return; 645 return;
642 646
643 /* update stats for each tc, only valid with PFC enabled */ 647 /* update stats for each tc, only valid with PFC enabled */
@@ -1144,7 +1148,7 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
1144 * there isn't much point in holding memory we can't use 1148 * there isn't much point in holding memory we can't use
1145 */ 1149 */
1146 if (dma_mapping_error(rx_ring->dev, dma)) { 1150 if (dma_mapping_error(rx_ring->dev, dma)) {
1147 put_page(page); 1151 __free_pages(page, ixgbe_rx_pg_order(rx_ring));
1148 bi->page = NULL; 1152 bi->page = NULL;
1149 1153
1150 rx_ring->rx_stats.alloc_rx_page_failed++; 1154 rx_ring->rx_stats.alloc_rx_page_failed++;
@@ -4102,7 +4106,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
4102 DMA_FROM_DEVICE); 4106 DMA_FROM_DEVICE);
4103 rx_buffer->dma = 0; 4107 rx_buffer->dma = 0;
4104 if (rx_buffer->page) 4108 if (rx_buffer->page)
4105 put_page(rx_buffer->page); 4109 __free_pages(rx_buffer->page,
4110 ixgbe_rx_pg_order(rx_ring));
4106 rx_buffer->page = NULL; 4111 rx_buffer->page = NULL;
4107 } 4112 }
4108 4113
@@ -4967,9 +4972,6 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
4967 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { 4972 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
4968 u64 rsc_count = 0; 4973 u64 rsc_count = 0;
4969 u64 rsc_flush = 0; 4974 u64 rsc_flush = 0;
4970 for (i = 0; i < 16; i++)
4971 adapter->hw_rx_no_dma_resources +=
4972 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
4973 for (i = 0; i < adapter->num_rx_queues; i++) { 4975 for (i = 0; i < adapter->num_rx_queues; i++) {
4974 rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count; 4976 rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
4975 rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush; 4977 rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
@@ -5072,6 +5074,9 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5072 hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC); 5074 hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
5073 hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC); 5075 hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC);
5074 case ixgbe_mac_82599EB: 5076 case ixgbe_mac_82599EB:
5077 for (i = 0; i < 16; i++)
5078 adapter->hw_rx_no_dma_resources +=
5079 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
5075 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); 5080 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
5076 IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */ 5081 IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
5077 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); 5082 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
@@ -5249,7 +5254,7 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
5249 struct ixgbe_hw *hw = &adapter->hw; 5254 struct ixgbe_hw *hw = &adapter->hw;
5250 u32 link_speed = adapter->link_speed; 5255 u32 link_speed = adapter->link_speed;
5251 bool link_up = adapter->link_up; 5256 bool link_up = adapter->link_up;
5252 int i; 5257 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
5253 5258
5254 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) 5259 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
5255 return; 5260 return;
@@ -5261,14 +5266,12 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
5261 link_speed = IXGBE_LINK_SPEED_10GB_FULL; 5266 link_speed = IXGBE_LINK_SPEED_10GB_FULL;
5262 link_up = true; 5267 link_up = true;
5263 } 5268 }
5264 if (link_up) { 5269
5265 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 5270 if (adapter->ixgbe_ieee_pfc)
5266 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) 5271 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
5267 hw->mac.ops.fc_enable(hw, i); 5272
5268 } else { 5273 if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en))
5269 hw->mac.ops.fc_enable(hw, 0); 5274 hw->mac.ops.fc_enable(hw);
5270 }
5271 }
5272 5275
5273 if (link_up || 5276 if (link_up ||
5274 time_after(jiffies, (adapter->link_check_timeout + 5277 time_after(jiffies, (adapter->link_check_timeout +
@@ -6778,9 +6781,10 @@ static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
6778 /* The 82599 supports up to 64 VFs per physical function 6781 /* The 82599 supports up to 64 VFs per physical function
6779 * but this implementation limits allocation to 63 so that 6782 * but this implementation limits allocation to 63 so that
6780 * basic networking resources are still available to the 6783 * basic networking resources are still available to the
6781 * physical function 6784 * physical function. If the user requests greater thn
6785 * 63 VFs then it is an error - reset to default of zero.
6782 */ 6786 */
6783 adapter->num_vfs = (max_vfs > 63) ? 63 : max_vfs; 6787 adapter->num_vfs = (max_vfs > 63) ? 0 : max_vfs;
6784 ixgbe_enable_sriov(adapter, ii); 6788 ixgbe_enable_sriov(adapter, ii);
6785#endif /* CONFIG_PCI_IOV */ 6789#endif /* CONFIG_PCI_IOV */
6786} 6790}
@@ -7219,6 +7223,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
7219 7223
7220 e_dev_info("%s\n", ixgbe_default_device_descr); 7224 e_dev_info("%s\n", ixgbe_default_device_descr);
7221 cards_found++; 7225 cards_found++;
7226
7227 if (ixgbe_sysfs_init(adapter))
7228 e_err(probe, "failed to allocate sysfs resources\n");
7229
7222 return 0; 7230 return 0;
7223 7231
7224err_register: 7232err_register:
@@ -7265,6 +7273,8 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
7265 } 7273 }
7266 7274
7267#endif 7275#endif
7276 ixgbe_sysfs_exit(adapter);
7277
7268#ifdef IXGBE_FCOE 7278#ifdef IXGBE_FCOE
7269 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) 7279 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
7270 ixgbe_cleanup_fcoe(adapter); 7280 ixgbe_cleanup_fcoe(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 88a58cb08569..39856371acb1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -635,6 +635,12 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
635 } 635 }
636 break; 636 break;
637 case IXGBE_VF_SET_MACVLAN: 637 case IXGBE_VF_SET_MACVLAN:
638 if (adapter->vfinfo[vf].pf_set_mac) {
639 e_warn(drv, "VF %d requested MACVLAN filter but is "
640 "administratively denied\n", vf);
641 retval = -1;
642 break;
643 }
638 index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> 644 index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
639 IXGBE_VT_MSGINFO_SHIFT; 645 IXGBE_VT_MSGINFO_SHIFT;
640 /* 646 /*
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
new file mode 100644
index 000000000000..f81c166dc5a8
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
@@ -0,0 +1,273 @@
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2012 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include "ixgbe.h"
29#include "ixgbe_common.h"
30#include "ixgbe_type.h"
31
32#include <linux/module.h>
33#include <linux/types.h>
34#include <linux/sysfs.h>
35#include <linux/kobject.h>
36#include <linux/device.h>
37#include <linux/netdevice.h>
38#include <linux/hwmon.h>
39
40/*
41 * This file provides a sysfs interface to export information from the
42 * driver. The information presented is READ-ONLY.
43 */
44#ifdef CONFIG_IXGBE_HWMON
45
46/* hwmon callback functions */
47static ssize_t ixgbe_hwmon_show_location(struct device *dev,
48 struct device_attribute *attr,
49 char *buf)
50{
51 struct hwmon_attr *ixgbe_attr = container_of(attr, struct hwmon_attr,
52 dev_attr);
53 return sprintf(buf, "loc%u\n",
54 ixgbe_attr->sensor->location);
55}
56
57static ssize_t ixgbe_hwmon_show_temp(struct device *dev,
58 struct device_attribute *attr,
59 char *buf)
60{
61 struct hwmon_attr *ixgbe_attr = container_of(attr, struct hwmon_attr,
62 dev_attr);
63 unsigned int value;
64
65 /* reset the temp field */
66 ixgbe_attr->hw->mac.ops.get_thermal_sensor_data(ixgbe_attr->hw);
67
68 value = ixgbe_attr->sensor->temp;
69
70 /* display millidegree */
71 value *= 1000;
72
73 return sprintf(buf, "%u\n", value);
74}
75
76static ssize_t ixgbe_hwmon_show_cautionthresh(struct device *dev,
77 struct device_attribute *attr,
78 char *buf)
79{
80 struct hwmon_attr *ixgbe_attr = container_of(attr, struct hwmon_attr,
81 dev_attr);
82 unsigned int value = ixgbe_attr->sensor->caution_thresh;
83
84 /* display millidegree */
85 value *= 1000;
86
87 return sprintf(buf, "%u\n", value);
88}
89
90static ssize_t ixgbe_hwmon_show_maxopthresh(struct device *dev,
91 struct device_attribute *attr,
92 char *buf)
93{
94 struct hwmon_attr *ixgbe_attr = container_of(attr, struct hwmon_attr,
95 dev_attr);
96 unsigned int value = ixgbe_attr->sensor->max_op_thresh;
97
98 /* display millidegree */
99 value *= 1000;
100
101 return sprintf(buf, "%u\n", value);
102}
103
104/*
105 * ixgbe_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file.
106 * @ adapter: pointer to the adapter structure
107 * @ offset: offset in the eeprom sensor data table
108 * @ type: type of sensor data to display
109 *
110 * For each file we want in hwmon's sysfs interface we need a device_attribute
111 * This is included in our hwmon_attr struct that contains the references to
112 * the data structures we need to get the data to display.
113 */
114static int ixgbe_add_hwmon_attr(struct ixgbe_adapter *adapter,
115 unsigned int offset, int type) {
116 int rc;
117 unsigned int n_attr;
118 struct hwmon_attr *ixgbe_attr;
119
120 n_attr = adapter->ixgbe_hwmon_buff.n_hwmon;
121 ixgbe_attr = &adapter->ixgbe_hwmon_buff.hwmon_list[n_attr];
122
123 switch (type) {
124 case IXGBE_HWMON_TYPE_LOC:
125 ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_location;
126 snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
127 "temp%u_label", offset);
128 break;
129 case IXGBE_HWMON_TYPE_TEMP:
130 ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_temp;
131 snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
132 "temp%u_input", offset);
133 break;
134 case IXGBE_HWMON_TYPE_CAUTION:
135 ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_cautionthresh;
136 snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
137 "temp%u_max", offset);
138 break;
139 case IXGBE_HWMON_TYPE_MAX:
140 ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_maxopthresh;
141 snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
142 "temp%u_crit", offset);
143 break;
144 default:
145 rc = -EPERM;
146 return rc;
147 }
148
149 /* These always the same regardless of type */
150 ixgbe_attr->sensor =
151 &adapter->hw.mac.thermal_sensor_data.sensor[offset];
152 ixgbe_attr->hw = &adapter->hw;
153 ixgbe_attr->dev_attr.store = NULL;
154 ixgbe_attr->dev_attr.attr.mode = S_IRUGO;
155 ixgbe_attr->dev_attr.attr.name = ixgbe_attr->name;
156
157 rc = device_create_file(&adapter->pdev->dev,
158 &ixgbe_attr->dev_attr);
159
160 if (rc == 0)
161 ++adapter->ixgbe_hwmon_buff.n_hwmon;
162
163 return rc;
164}
165#endif /* CONFIG_IXGBE_HWMON */
166
167static void ixgbe_sysfs_del_adapter(struct ixgbe_adapter *adapter)
168{
169#ifdef CONFIG_IXGBE_HWMON
170 int i;
171#endif /* CONFIG_IXGBE_HWMON */
172
173 if (adapter == NULL)
174 return;
175#ifdef CONFIG_IXGBE_HWMON
176
177 for (i = 0; i < adapter->ixgbe_hwmon_buff.n_hwmon; i++) {
178 device_remove_file(&adapter->pdev->dev,
179 &adapter->ixgbe_hwmon_buff.hwmon_list[i].dev_attr);
180 }
181
182 kfree(adapter->ixgbe_hwmon_buff.hwmon_list);
183
184 if (adapter->ixgbe_hwmon_buff.device)
185 hwmon_device_unregister(adapter->ixgbe_hwmon_buff.device);
186#endif /* CONFIG_IXGBE_HWMON */
187
188 if (adapter->info_kobj != NULL) {
189 kobject_put(adapter->info_kobj);
190 adapter->info_kobj = NULL;
191 }
192}
193
194/* called from ixgbe_main.c */
195void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter)
196{
197 ixgbe_sysfs_del_adapter(adapter);
198}
199
200/* called from ixgbe_main.c */
201int ixgbe_sysfs_init(struct ixgbe_adapter *adapter)
202{
203#ifdef CONFIG_IXGBE_HWMON
204 struct hwmon_buff *ixgbe_hwmon = &adapter->ixgbe_hwmon_buff;
205 unsigned int i;
206 int n_attrs;
207#endif /* CONFIG_IXGBE_HWMON */
208 struct net_device *netdev = adapter->netdev;
209 int rc = 0;
210
211 /* create info kobj and attribute listings in kobj */
212 adapter->info_kobj = kobject_create_and_add("info", &netdev->dev.kobj);
213 if (adapter->info_kobj == NULL) {
214 rc = -ENOMEM;
215 goto err;
216 }
217
218#ifdef CONFIG_IXGBE_HWMON
219 /* If this method isn't defined we don't support thermals */
220 if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL) {
221 rc = -EPERM;
222 goto err;
223 }
224
225 /* Don't create thermal hwmon interface if no sensors present */
226 rc = adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw);
227 if (rc)
228 goto err;
229
230 /*
231 * Allocation space for max attributs
232 * max num sensors * values (loc, temp, max, caution)
233 */
234 n_attrs = IXGBE_MAX_SENSORS * 4;
235 ixgbe_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr),
236 GFP_KERNEL);
237 if (!ixgbe_hwmon->hwmon_list) {
238 rc = -ENOMEM;
239 goto err;
240 }
241
242 ixgbe_hwmon->device = hwmon_device_register(&adapter->pdev->dev);
243 if (IS_ERR(ixgbe_hwmon->device)) {
244 rc = PTR_ERR(ixgbe_hwmon->device);
245 goto err;
246 }
247
248 for (i = 0; i < IXGBE_MAX_SENSORS; i++) {
249 /*
250 * Only create hwmon sysfs entries for sensors that have
251 * meaningful data for.
252 */
253 if (adapter->hw.mac.thermal_sensor_data.sensor[i].location == 0)
254 continue;
255
256 /* Bail if any hwmon attr struct fails to initialize */
257 rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_CAUTION);
258 rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_LOC);
259 rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_TEMP);
260 rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_MAX);
261 if (rc)
262 goto err;
263 }
264#endif /* CONFIG_IXGBE_HWMON */
265
266 goto exit;
267
268err:
269 ixgbe_sysfs_del_adapter(adapter);
270exit:
271 return rc;
272}
273
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 4acd9e665b28..5e64c77255e9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -112,6 +112,27 @@
112#define IXGBE_I2C_DATA_OUT 0x00000008 112#define IXGBE_I2C_DATA_OUT 0x00000008
113#define IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT 500 113#define IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT 500
114 114
115#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8
116#define IXGBE_EMC_INTERNAL_DATA 0x00
117#define IXGBE_EMC_INTERNAL_THERM_LIMIT 0x20
118#define IXGBE_EMC_DIODE1_DATA 0x01
119#define IXGBE_EMC_DIODE1_THERM_LIMIT 0x19
120#define IXGBE_EMC_DIODE2_DATA 0x23
121#define IXGBE_EMC_DIODE2_THERM_LIMIT 0x1A
122
123#define IXGBE_MAX_SENSORS 3
124
125struct ixgbe_thermal_diode_data {
126 u8 location;
127 u8 temp;
128 u8 caution_thresh;
129 u8 max_op_thresh;
130};
131
132struct ixgbe_thermal_sensor_data {
133 struct ixgbe_thermal_diode_data sensor[IXGBE_MAX_SENSORS];
134};
135
115/* Interrupt Registers */ 136/* Interrupt Registers */
116#define IXGBE_EICR 0x00800 137#define IXGBE_EICR 0x00800
117#define IXGBE_EICS 0x00808 138#define IXGBE_EICS 0x00808
@@ -1678,6 +1699,22 @@ enum {
1678#define IXGBE_PBANUM0_PTR 0x15 1699#define IXGBE_PBANUM0_PTR 0x15
1679#define IXGBE_PBANUM1_PTR 0x16 1700#define IXGBE_PBANUM1_PTR 0x16
1680#define IXGBE_FREE_SPACE_PTR 0X3E 1701#define IXGBE_FREE_SPACE_PTR 0X3E
1702
1703/* External Thermal Sensor Config */
1704#define IXGBE_ETS_CFG 0x26
1705#define IXGBE_ETS_LTHRES_DELTA_MASK 0x07C0
1706#define IXGBE_ETS_LTHRES_DELTA_SHIFT 6
1707#define IXGBE_ETS_TYPE_MASK 0x0038
1708#define IXGBE_ETS_TYPE_SHIFT 3
1709#define IXGBE_ETS_TYPE_EMC 0x000
1710#define IXGBE_ETS_TYPE_EMC_SHIFTED 0x000
1711#define IXGBE_ETS_NUM_SENSORS_MASK 0x0007
1712#define IXGBE_ETS_DATA_LOC_MASK 0x3C00
1713#define IXGBE_ETS_DATA_LOC_SHIFT 10
1714#define IXGBE_ETS_DATA_INDEX_MASK 0x0300
1715#define IXGBE_ETS_DATA_INDEX_SHIFT 8
1716#define IXGBE_ETS_DATA_HTHRESH_MASK 0x00FF
1717
1681#define IXGBE_SAN_MAC_ADDR_PTR 0x28 1718#define IXGBE_SAN_MAC_ADDR_PTR 0x28
1682#define IXGBE_DEVICE_CAPS 0x2C 1719#define IXGBE_DEVICE_CAPS 0x2C
1683#define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11 1720#define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11
@@ -1855,7 +1892,7 @@ enum {
1855#define IXGBE_MFLCN_DPF 0x00000002 /* Discard Pause Frame */ 1892#define IXGBE_MFLCN_DPF 0x00000002 /* Discard Pause Frame */
1856#define IXGBE_MFLCN_RPFCE 0x00000004 /* Receive Priority FC Enable */ 1893#define IXGBE_MFLCN_RPFCE 0x00000004 /* Receive Priority FC Enable */
1857#define IXGBE_MFLCN_RFCE 0x00000008 /* Receive FC Enable */ 1894#define IXGBE_MFLCN_RFCE 0x00000008 /* Receive FC Enable */
1858#define IXGBE_MFLCN_RPFCE_MASK 0x00000FF0 /* Receive FC Mask */ 1895#define IXGBE_MFLCN_RPFCE_MASK 0x00000FF4 /* Receive FC Mask */
1859 1896
1860#define IXGBE_MFLCN_RPFCE_SHIFT 4 1897#define IXGBE_MFLCN_RPFCE_SHIFT 4
1861 1898
@@ -2771,10 +2808,12 @@ struct ixgbe_mac_operations {
2771 void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int); 2808 void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int);
2772 2809
2773 /* Flow Control */ 2810 /* Flow Control */
2774 s32 (*fc_enable)(struct ixgbe_hw *, s32); 2811 s32 (*fc_enable)(struct ixgbe_hw *);
2775 2812
2776 /* Manageability interface */ 2813 /* Manageability interface */
2777 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8); 2814 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
2815 s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
2816 s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
2778}; 2817};
2779 2818
2780struct ixgbe_phy_operations { 2819struct ixgbe_phy_operations {
@@ -2832,6 +2871,7 @@ struct ixgbe_mac_info {
2832 bool orig_link_settings_stored; 2871 bool orig_link_settings_stored;
2833 bool autotry_restart; 2872 bool autotry_restart;
2834 u8 flags; 2873 u8 flags;
2874 struct ixgbe_thermal_sensor_data thermal_sensor_data;
2835}; 2875};
2836 2876
2837struct ixgbe_phy_info { 2877struct ixgbe_phy_info {
@@ -2941,7 +2981,6 @@ struct ixgbe_info {
2941#define IXGBE_ERR_OVERTEMP -26 2981#define IXGBE_ERR_OVERTEMP -26
2942#define IXGBE_ERR_FC_NOT_NEGOTIATED -27 2982#define IXGBE_ERR_FC_NOT_NEGOTIATED -27
2943#define IXGBE_ERR_FC_NOT_SUPPORTED -28 2983#define IXGBE_ERR_FC_NOT_SUPPORTED -28
2944#define IXGBE_ERR_FLOW_CONTROL -29
2945#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30 2984#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30
2946#define IXGBE_ERR_PBA_SECTION -31 2985#define IXGBE_ERR_PBA_SECTION -31
2947#define IXGBE_ERR_INVALID_ARGUMENT -32 2986#define IXGBE_ERR_INVALID_ARGUMENT -32
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 97a991403bbd..f90ec078ece2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -849,6 +849,8 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
849 .release_swfw_sync = &ixgbe_release_swfw_sync_X540, 849 .release_swfw_sync = &ixgbe_release_swfw_sync_X540,
850 .disable_rx_buff = &ixgbe_disable_rx_buff_generic, 850 .disable_rx_buff = &ixgbe_disable_rx_buff_generic,
851 .enable_rx_buff = &ixgbe_enable_rx_buff_generic, 851 .enable_rx_buff = &ixgbe_enable_rx_buff_generic,
852 .get_thermal_sensor_data = NULL,
853 .init_thermal_sensor_thresh = NULL,
852}; 854};
853 855
854static struct ixgbe_eeprom_operations eeprom_ops_X540 = { 856static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 947b5c830735..e09a6cc633bb 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -40,6 +40,7 @@
40typedef u32 ixgbe_link_speed; 40typedef u32 ixgbe_link_speed;
41#define IXGBE_LINK_SPEED_1GB_FULL 0x0020 41#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
42#define IXGBE_LINK_SPEED_10GB_FULL 0x0080 42#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
43#define IXGBE_LINK_SPEED_100_FULL 0x0008
43 44
44#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */ 45#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */
45#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ 46#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
@@ -48,6 +49,7 @@ typedef u32 ixgbe_link_speed;
48#define IXGBE_LINKS_SPEED_82599 0x30000000 49#define IXGBE_LINKS_SPEED_82599 0x30000000
49#define IXGBE_LINKS_SPEED_10G_82599 0x30000000 50#define IXGBE_LINKS_SPEED_10G_82599 0x30000000
50#define IXGBE_LINKS_SPEED_1G_82599 0x20000000 51#define IXGBE_LINKS_SPEED_1G_82599 0x20000000
52#define IXGBE_LINKS_SPEED_100_82599 0x10000000
51 53
52/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ 54/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
53#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8 55#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 2bfe0d1d7958..e8dddf572d38 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -107,10 +107,20 @@ static int ixgbevf_get_settings(struct net_device *netdev,
107 hw->mac.ops.check_link(hw, &link_speed, &link_up, false); 107 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
108 108
109 if (link_up) { 109 if (link_up) {
110 ethtool_cmd_speed_set( 110 __u32 speed = SPEED_10000;
111 ecmd, 111 switch (link_speed) {
112 (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? 112 case IXGBE_LINK_SPEED_10GB_FULL:
113 SPEED_10000 : SPEED_1000); 113 speed = SPEED_10000;
114 break;
115 case IXGBE_LINK_SPEED_1GB_FULL:
116 speed = SPEED_1000;
117 break;
118 case IXGBE_LINK_SPEED_100_FULL:
119 speed = SPEED_100;
120 break;
121 }
122
123 ethtool_cmd_speed_set(ecmd, speed);
114 ecmd->duplex = DUPLEX_FULL; 124 ecmd->duplex = DUPLEX_FULL;
115 } else { 125 } else {
116 ethtool_cmd_speed_set(ecmd, -1); 126 ethtool_cmd_speed_set(ecmd, -1);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index dfed420a1bf6..0a1b99240d43 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -287,7 +287,7 @@ extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
287extern const char ixgbevf_driver_name[]; 287extern const char ixgbevf_driver_name[];
288extern const char ixgbevf_driver_version[]; 288extern const char ixgbevf_driver_version[];
289 289
290extern int ixgbevf_up(struct ixgbevf_adapter *adapter); 290extern void ixgbevf_up(struct ixgbevf_adapter *adapter);
291extern void ixgbevf_down(struct ixgbevf_adapter *adapter); 291extern void ixgbevf_down(struct ixgbevf_adapter *adapter);
292extern void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter); 292extern void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
293extern void ixgbevf_reset(struct ixgbevf_adapter *adapter); 293extern void ixgbevf_reset(struct ixgbevf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 307611ae831d..f69ec4288b10 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -57,7 +57,7 @@ const char ixgbevf_driver_name[] = "ixgbevf";
57static const char ixgbevf_driver_string[] = 57static const char ixgbevf_driver_string[] =
58 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; 58 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
59 59
60#define DRV_VERSION "2.2.0-k" 60#define DRV_VERSION "2.6.0-k"
61const char ixgbevf_driver_version[] = DRV_VERSION; 61const char ixgbevf_driver_version[] = DRV_VERSION;
62static char ixgbevf_copyright[] = 62static char ixgbevf_copyright[] =
63 "Copyright (c) 2009 - 2012 Intel Corporation."; 63 "Copyright (c) 2009 - 2012 Intel Corporation.";
@@ -1608,13 +1608,14 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
1608 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc; 1608 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
1609} 1609}
1610 1610
1611static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter) 1611static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1612{ 1612{
1613 struct net_device *netdev = adapter->netdev; 1613 struct net_device *netdev = adapter->netdev;
1614 struct ixgbe_hw *hw = &adapter->hw; 1614 struct ixgbe_hw *hw = &adapter->hw;
1615 int i, j = 0; 1615 int i, j = 0;
1616 int num_rx_rings = adapter->num_rx_queues; 1616 int num_rx_rings = adapter->num_rx_queues;
1617 u32 txdctl, rxdctl; 1617 u32 txdctl, rxdctl;
1618 u32 msg[2];
1618 1619
1619 for (i = 0; i < adapter->num_tx_queues; i++) { 1620 for (i = 0; i < adapter->num_tx_queues; i++) {
1620 j = adapter->tx_ring[i].reg_idx; 1621 j = adapter->tx_ring[i].reg_idx;
@@ -1653,6 +1654,10 @@ static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1653 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0); 1654 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
1654 } 1655 }
1655 1656
1657 msg[0] = IXGBE_VF_SET_LPE;
1658 msg[1] = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1659 hw->mbx.ops.write_posted(hw, msg, 2);
1660
1656 clear_bit(__IXGBEVF_DOWN, &adapter->state); 1661 clear_bit(__IXGBEVF_DOWN, &adapter->state);
1657 ixgbevf_napi_enable_all(adapter); 1662 ixgbevf_napi_enable_all(adapter);
1658 1663
@@ -1667,24 +1672,20 @@ static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1667 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 1672 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1668 adapter->link_check_timeout = jiffies; 1673 adapter->link_check_timeout = jiffies;
1669 mod_timer(&adapter->watchdog_timer, jiffies); 1674 mod_timer(&adapter->watchdog_timer, jiffies);
1670 return 0;
1671} 1675}
1672 1676
1673int ixgbevf_up(struct ixgbevf_adapter *adapter) 1677void ixgbevf_up(struct ixgbevf_adapter *adapter)
1674{ 1678{
1675 int err;
1676 struct ixgbe_hw *hw = &adapter->hw; 1679 struct ixgbe_hw *hw = &adapter->hw;
1677 1680
1678 ixgbevf_configure(adapter); 1681 ixgbevf_configure(adapter);
1679 1682
1680 err = ixgbevf_up_complete(adapter); 1683 ixgbevf_up_complete(adapter);
1681 1684
1682 /* clear any pending interrupts, may auto mask */ 1685 /* clear any pending interrupts, may auto mask */
1683 IXGBE_READ_REG(hw, IXGBE_VTEICR); 1686 IXGBE_READ_REG(hw, IXGBE_VTEICR);
1684 1687
1685 ixgbevf_irq_enable(adapter, true, true); 1688 ixgbevf_irq_enable(adapter, true, true);
1686
1687 return err;
1688} 1689}
1689 1690
1690/** 1691/**
@@ -2673,9 +2674,7 @@ static int ixgbevf_open(struct net_device *netdev)
2673 */ 2674 */
2674 ixgbevf_map_rings_to_vectors(adapter); 2675 ixgbevf_map_rings_to_vectors(adapter);
2675 2676
2676 err = ixgbevf_up_complete(adapter); 2677 ixgbevf_up_complete(adapter);
2677 if (err)
2678 goto err_up;
2679 2678
2680 /* clear any pending interrupts, may auto mask */ 2679 /* clear any pending interrupts, may auto mask */
2681 IXGBE_READ_REG(hw, IXGBE_VTEICR); 2680 IXGBE_READ_REG(hw, IXGBE_VTEICR);
@@ -2689,7 +2688,6 @@ static int ixgbevf_open(struct net_device *netdev)
2689 2688
2690err_req_irq: 2689err_req_irq:
2691 ixgbevf_down(adapter); 2690 ixgbevf_down(adapter);
2692err_up:
2693 ixgbevf_free_irq(adapter); 2691 ixgbevf_free_irq(adapter);
2694err_setup_rx: 2692err_setup_rx:
2695 ixgbevf_free_all_rx_resources(adapter); 2693 ixgbevf_free_all_rx_resources(adapter);
@@ -3196,9 +3194,11 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3196 /* must set new MTU before calling down or up */ 3194 /* must set new MTU before calling down or up */
3197 netdev->mtu = new_mtu; 3195 netdev->mtu = new_mtu;
3198 3196
3199 msg[0] = IXGBE_VF_SET_LPE; 3197 if (!netif_running(netdev)) {
3200 msg[1] = max_frame; 3198 msg[0] = IXGBE_VF_SET_LPE;
3201 hw->mbx.ops.write_posted(hw, msg, 2); 3199 msg[1] = max_frame;
3200 hw->mbx.ops.write_posted(hw, msg, 2);
3201 }
3202 3202
3203 if (netif_running(netdev)) 3203 if (netif_running(netdev))
3204 ixgbevf_reinit_locked(adapter); 3204 ixgbevf_reinit_locked(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 74be7411242a..ec89b86f7ca4 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -404,11 +404,17 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
404 else 404 else
405 *link_up = false; 405 *link_up = false;
406 406
407 if ((links_reg & IXGBE_LINKS_SPEED_82599) == 407 switch (links_reg & IXGBE_LINKS_SPEED_82599) {
408 IXGBE_LINKS_SPEED_10G_82599) 408 case IXGBE_LINKS_SPEED_10G_82599:
409 *speed = IXGBE_LINK_SPEED_10GB_FULL; 409 *speed = IXGBE_LINK_SPEED_10GB_FULL;
410 else 410 break;
411 case IXGBE_LINKS_SPEED_1G_82599:
411 *speed = IXGBE_LINK_SPEED_1GB_FULL; 412 *speed = IXGBE_LINK_SPEED_1GB_FULL;
413 break;
414 case IXGBE_LINKS_SPEED_100_82599:
415 *speed = IXGBE_LINK_SPEED_100_FULL;
416 break;
417 }
412 418
413 return 0; 419 return 0;
414} 420}
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index ddc95b0ac78d..e559dfa06d6a 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -623,7 +623,7 @@ static void pasemi_mac_free_rx_resources(struct pasemi_mac *mac)
623 mac->rx = NULL; 623 mac->rx = NULL;
624} 624}
625 625
626static void pasemi_mac_replenish_rx_ring(const struct net_device *dev, 626static void pasemi_mac_replenish_rx_ring(struct net_device *dev,
627 const int limit) 627 const int limit)
628{ 628{
629 const struct pasemi_mac *mac = netdev_priv(dev); 629 const struct pasemi_mac *mac = netdev_priv(dev);
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index c99b3b0e2eae..703c8cce2a2c 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -9838,7 +9838,7 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
9838 goto err_out_release_parent; 9838 goto err_out_release_parent;
9839 } 9839 }
9840 } 9840 }
9841 if (err || dma_mask == DMA_BIT_MASK(32)) { 9841 if (err) {
9842 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 9842 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9843 if (err) { 9843 if (err) {
9844 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); 9844 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 0e01f4e5cd64..944cdfb80fe4 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -135,6 +135,25 @@ config MDIO_OCTEON
135 135
136 If in doubt, say Y. 136 If in doubt, say Y.
137 137
138config MDIO_BUS_MUX
139 tristate
140 depends on OF_MDIO
141 help
142 This module provides a driver framework for MDIO bus
143 multiplexers which connect one of several child MDIO busses
144 to a parent bus. Switching between child busses is done by
145 device specific drivers.
146
147config MDIO_BUS_MUX_GPIO
148 tristate "Support for GPIO controlled MDIO bus multiplexers"
149 depends on OF_GPIO && OF_MDIO
150 select MDIO_BUS_MUX
151 help
152 This module provides a driver for MDIO bus multiplexers that
153 are controlled via GPIO lines. The multiplexer connects one of
154 several child MDIO busses to a parent bus. Child bus
155 selection is under the control of GPIO lines.
156
138endif # PHYLIB 157endif # PHYLIB
139 158
140config MICREL_KS8995MA 159config MICREL_KS8995MA
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index b7438b1b94b9..f51af688ef8b 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -25,3 +25,5 @@ obj-$(CONFIG_MICREL_PHY) += micrel.o
25obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o 25obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o
26obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o 26obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o
27obj-$(CONFIG_AMD_PHY) += amd.o 27obj-$(CONFIG_AMD_PHY) += amd.o
28obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o
29obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c
new file mode 100644
index 000000000000..e0cc4ef33dee
--- /dev/null
+++ b/drivers/net/phy/mdio-mux-gpio.c
@@ -0,0 +1,142 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2011, 2012 Cavium, Inc.
7 */
8
9#include <linux/platform_device.h>
10#include <linux/device.h>
11#include <linux/of_mdio.h>
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/phy.h>
15#include <linux/mdio-mux.h>
16#include <linux/of_gpio.h>
17
18#define DRV_VERSION "1.0"
19#define DRV_DESCRIPTION "GPIO controlled MDIO bus multiplexer driver"
20
21#define MDIO_MUX_GPIO_MAX_BITS 8
22
23struct mdio_mux_gpio_state {
24 int gpio[MDIO_MUX_GPIO_MAX_BITS];
25 unsigned int num_gpios;
26 void *mux_handle;
27};
28
29static int mdio_mux_gpio_switch_fn(int current_child, int desired_child,
30 void *data)
31{
32 int change;
33 unsigned int n;
34 struct mdio_mux_gpio_state *s = data;
35
36 if (current_child == desired_child)
37 return 0;
38
39 change = current_child == -1 ? -1 : current_child ^ desired_child;
40
41 for (n = 0; n < s->num_gpios; n++) {
42 if (change & 1)
43 gpio_set_value_cansleep(s->gpio[n],
44 (desired_child & 1) != 0);
45 change >>= 1;
46 desired_child >>= 1;
47 }
48
49 return 0;
50}
51
52static int __devinit mdio_mux_gpio_probe(struct platform_device *pdev)
53{
54 enum of_gpio_flags f;
55 struct mdio_mux_gpio_state *s;
56 unsigned int num_gpios;
57 unsigned int n;
58 int r;
59
60 if (!pdev->dev.of_node)
61 return -ENODEV;
62
63 num_gpios = of_gpio_count(pdev->dev.of_node);
64 if (num_gpios == 0 || num_gpios > MDIO_MUX_GPIO_MAX_BITS)
65 return -ENODEV;
66
67 s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL);
68 if (!s)
69 return -ENOMEM;
70
71 s->num_gpios = num_gpios;
72
73 for (n = 0; n < num_gpios; ) {
74 int gpio = of_get_gpio_flags(pdev->dev.of_node, n, &f);
75 if (gpio < 0) {
76 r = (gpio == -ENODEV) ? -EPROBE_DEFER : gpio;
77 goto err;
78 }
79 s->gpio[n] = gpio;
80
81 n++;
82
83 r = gpio_request(gpio, "mdio_mux_gpio");
84 if (r)
85 goto err;
86
87 r = gpio_direction_output(gpio, 0);
88 if (r)
89 goto err;
90 }
91
92 r = mdio_mux_init(&pdev->dev,
93 mdio_mux_gpio_switch_fn, &s->mux_handle, s);
94
95 if (r == 0) {
96 pdev->dev.platform_data = s;
97 return 0;
98 }
99err:
100 while (n) {
101 n--;
102 gpio_free(s->gpio[n]);
103 }
104 devm_kfree(&pdev->dev, s);
105 return r;
106}
107
108static int __devexit mdio_mux_gpio_remove(struct platform_device *pdev)
109{
110 struct mdio_mux_gpio_state *s = pdev->dev.platform_data;
111 mdio_mux_uninit(s->mux_handle);
112 return 0;
113}
114
115static struct of_device_id mdio_mux_gpio_match[] = {
116 {
117 .compatible = "mdio-mux-gpio",
118 },
119 {
120 /* Legacy compatible property. */
121 .compatible = "cavium,mdio-mux-sn74cbtlv3253",
122 },
123 {},
124};
125MODULE_DEVICE_TABLE(of, mdio_mux_gpio_match);
126
127static struct platform_driver mdio_mux_gpio_driver = {
128 .driver = {
129 .name = "mdio-mux-gpio",
130 .owner = THIS_MODULE,
131 .of_match_table = mdio_mux_gpio_match,
132 },
133 .probe = mdio_mux_gpio_probe,
134 .remove = __devexit_p(mdio_mux_gpio_remove),
135};
136
137module_platform_driver(mdio_mux_gpio_driver);
138
139MODULE_DESCRIPTION(DRV_DESCRIPTION);
140MODULE_VERSION(DRV_VERSION);
141MODULE_AUTHOR("David Daney");
142MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
new file mode 100644
index 000000000000..39ea0674dcde
--- /dev/null
+++ b/drivers/net/phy/mdio-mux.c
@@ -0,0 +1,192 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2011, 2012 Cavium, Inc.
7 */
8
9#include <linux/platform_device.h>
10#include <linux/mdio-mux.h>
11#include <linux/of_mdio.h>
12#include <linux/device.h>
13#include <linux/module.h>
14#include <linux/phy.h>
15
16#define DRV_VERSION "1.0"
17#define DRV_DESCRIPTION "MDIO bus multiplexer driver"
18
19struct mdio_mux_child_bus;
20
21struct mdio_mux_parent_bus {
22 struct mii_bus *mii_bus;
23 int current_child;
24 int parent_id;
25 void *switch_data;
26 int (*switch_fn)(int current_child, int desired_child, void *data);
27
28 /* List of our children linked through their next fields. */
29 struct mdio_mux_child_bus *children;
30};
31
32struct mdio_mux_child_bus {
33 struct mii_bus *mii_bus;
34 struct mdio_mux_parent_bus *parent;
35 struct mdio_mux_child_bus *next;
36 int bus_number;
37 int phy_irq[PHY_MAX_ADDR];
38};
39
40/*
41 * The parent bus' lock is used to order access to the switch_fn.
42 */
43static int mdio_mux_read(struct mii_bus *bus, int phy_id, int regnum)
44{
45 struct mdio_mux_child_bus *cb = bus->priv;
46 struct mdio_mux_parent_bus *pb = cb->parent;
47 int r;
48
49 mutex_lock(&pb->mii_bus->mdio_lock);
50 r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
51 if (r)
52 goto out;
53
54 pb->current_child = cb->bus_number;
55
56 r = pb->mii_bus->read(pb->mii_bus, phy_id, regnum);
57out:
58 mutex_unlock(&pb->mii_bus->mdio_lock);
59
60 return r;
61}
62
63/*
64 * The parent bus' lock is used to order access to the switch_fn.
65 */
66static int mdio_mux_write(struct mii_bus *bus, int phy_id,
67 int regnum, u16 val)
68{
69 struct mdio_mux_child_bus *cb = bus->priv;
70 struct mdio_mux_parent_bus *pb = cb->parent;
71
72 int r;
73
74 mutex_lock(&pb->mii_bus->mdio_lock);
75 r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
76 if (r)
77 goto out;
78
79 pb->current_child = cb->bus_number;
80
81 r = pb->mii_bus->write(pb->mii_bus, phy_id, regnum, val);
82out:
83 mutex_unlock(&pb->mii_bus->mdio_lock);
84
85 return r;
86}
87
88static int parent_count;
89
90int mdio_mux_init(struct device *dev,
91 int (*switch_fn)(int cur, int desired, void *data),
92 void **mux_handle,
93 void *data)
94{
95 struct device_node *parent_bus_node;
96 struct device_node *child_bus_node;
97 int r, ret_val;
98 struct mii_bus *parent_bus;
99 struct mdio_mux_parent_bus *pb;
100 struct mdio_mux_child_bus *cb;
101
102 if (!dev->of_node)
103 return -ENODEV;
104
105 parent_bus_node = of_parse_phandle(dev->of_node, "mdio-parent-bus", 0);
106
107 if (!parent_bus_node)
108 return -ENODEV;
109
110 parent_bus = of_mdio_find_bus(parent_bus_node);
111 if (parent_bus == NULL) {
112 ret_val = -EPROBE_DEFER;
113 goto err_parent_bus;
114 }
115
116 pb = devm_kzalloc(dev, sizeof(*pb), GFP_KERNEL);
117 if (pb == NULL) {
118 ret_val = -ENOMEM;
119 goto err_parent_bus;
120 }
121
122 pb->switch_data = data;
123 pb->switch_fn = switch_fn;
124 pb->current_child = -1;
125 pb->parent_id = parent_count++;
126 pb->mii_bus = parent_bus;
127
128 ret_val = -ENODEV;
129 for_each_child_of_node(dev->of_node, child_bus_node) {
130 u32 v;
131
132 r = of_property_read_u32(child_bus_node, "reg", &v);
133 if (r)
134 continue;
135
136 cb = devm_kzalloc(dev, sizeof(*cb), GFP_KERNEL);
137 if (cb == NULL) {
138 dev_err(dev,
139 "Error: Failed to allocate memory for child\n");
140 ret_val = -ENOMEM;
141 break;
142 }
143 cb->bus_number = v;
144 cb->parent = pb;
145 cb->mii_bus = mdiobus_alloc();
146 cb->mii_bus->priv = cb;
147
148 cb->mii_bus->irq = cb->phy_irq;
149 cb->mii_bus->name = "mdio_mux";
150 snprintf(cb->mii_bus->id, MII_BUS_ID_SIZE, "%x.%x",
151 pb->parent_id, v);
152 cb->mii_bus->parent = dev;
153 cb->mii_bus->read = mdio_mux_read;
154 cb->mii_bus->write = mdio_mux_write;
155 r = of_mdiobus_register(cb->mii_bus, child_bus_node);
156 if (r) {
157 mdiobus_free(cb->mii_bus);
158 devm_kfree(dev, cb);
159 } else {
160 of_node_get(child_bus_node);
161 cb->next = pb->children;
162 pb->children = cb;
163 }
164 }
165 if (pb->children) {
166 *mux_handle = pb;
167 dev_info(dev, "Version " DRV_VERSION "\n");
168 return 0;
169 }
170err_parent_bus:
171 of_node_put(parent_bus_node);
172 return ret_val;
173}
174EXPORT_SYMBOL_GPL(mdio_mux_init);
175
176void mdio_mux_uninit(void *mux_handle)
177{
178 struct mdio_mux_parent_bus *pb = mux_handle;
179 struct mdio_mux_child_bus *cb = pb->children;
180
181 while (cb) {
182 mdiobus_unregister(cb->mii_bus);
183 mdiobus_free(cb->mii_bus);
184 cb = cb->next;
185 }
186}
187EXPORT_SYMBOL_GPL(mdio_mux_uninit);
188
189MODULE_DESCRIPTION(DRV_DESCRIPTION);
190MODULE_VERSION(DRV_VERSION);
191MODULE_AUTHOR("David Daney");
192MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 8985cc62cf41..83d5c9f55686 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -88,6 +88,38 @@ static struct class mdio_bus_class = {
88 .dev_release = mdiobus_release, 88 .dev_release = mdiobus_release,
89}; 89};
90 90
91#ifdef CONFIG_OF_MDIO
92/* Helper function for of_mdio_find_bus */
93static int of_mdio_bus_match(struct device *dev, void *mdio_bus_np)
94{
95 return dev->of_node == mdio_bus_np;
96}
97/**
98 * of_mdio_find_bus - Given an mii_bus node, find the mii_bus.
99 * @mdio_np: Pointer to the mii_bus.
100 *
101 * Returns a pointer to the mii_bus, or NULL if none found.
102 *
103 * Because the association of a device_node and mii_bus is made via
104 * of_mdiobus_register(), the mii_bus cannot be found before it is
105 * registered with of_mdiobus_register().
106 *
107 */
108struct mii_bus *of_mdio_find_bus(struct device_node *mdio_bus_np)
109{
110 struct device *d;
111
112 if (!mdio_bus_np)
113 return NULL;
114
115 d = class_find_device(&mdio_bus_class, NULL, mdio_bus_np,
116 of_mdio_bus_match);
117
118 return d ? to_mii_bus(d) : NULL;
119}
120EXPORT_SYMBOL(of_mdio_find_bus);
121#endif
122
91/** 123/**
92 * mdiobus_register - bring up all the PHYs on a given bus and attach them to bus 124 * mdiobus_register - bring up all the PHYs on a given bus and attach them to bus
93 * @bus: target mii_bus 125 * @bus: target mii_bus
diff --git a/drivers/net/wimax/i2400m/usb-rx.c b/drivers/net/wimax/i2400m/usb-rx.c
index e3257681e360..b78ee676e102 100644
--- a/drivers/net/wimax/i2400m/usb-rx.c
+++ b/drivers/net/wimax/i2400m/usb-rx.c
@@ -277,7 +277,7 @@ retry:
277 d_printf(1, dev, "RX: size changed to %d, received %d, " 277 d_printf(1, dev, "RX: size changed to %d, received %d, "
278 "copied %d, capacity %ld\n", 278 "copied %d, capacity %ld\n",
279 rx_size, read_size, rx_skb->len, 279 rx_size, read_size, rx_skb->len,
280 (long) (skb_end_pointer(new_skb) - new_skb->head)); 280 (long) skb_end_offset(new_skb));
281 goto retry; 281 goto retry;
282 } 282 }
283 /* In most cases, it happens due to the hardware scheduling a 283 /* In most cases, it happens due to the hardware scheduling a
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 483c0adcad87..2574abde8d99 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -45,6 +45,8 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
45 for (i=0; i<PHY_MAX_ADDR; i++) 45 for (i=0; i<PHY_MAX_ADDR; i++)
46 mdio->irq[i] = PHY_POLL; 46 mdio->irq[i] = PHY_POLL;
47 47
48 mdio->dev.of_node = np;
49
48 /* Register the MDIO bus */ 50 /* Register the MDIO bus */
49 rc = mdiobus_register(mdio); 51 rc = mdiobus_register(mdio);
50 if (rc) 52 if (rc)
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index 56d74dc2fbd5..418ed03d0887 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -344,7 +344,7 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
344 } 344 }
345 if (unlikely 345 if (unlikely
346 (skb->truesize != 346 (skb->truesize !=
347 sizeof(*skb) + skb_end_pointer(skb) - skb->head)) { 347 sizeof(*skb) + skb_end_offset(skb))) {
348 /* 348 /*
349 printk("TX buffer truesize has been changed\n"); 349 printk("TX buffer truesize has been changed\n");
350 */ 350 */
diff --git a/include/linux/mISDNhw.h b/include/linux/mISDNhw.h
index 4af841408fb5..de165b54237b 100644
--- a/include/linux/mISDNhw.h
+++ b/include/linux/mISDNhw.h
@@ -135,6 +135,9 @@ extern int create_l1(struct dchannel *, dchannel_l1callback *);
135#define HW_TESTRX_RAW 0x9602 135#define HW_TESTRX_RAW 0x9602
136#define HW_TESTRX_HDLC 0x9702 136#define HW_TESTRX_HDLC 0x9702
137#define HW_TESTRX_OFF 0x9802 137#define HW_TESTRX_OFF 0x9802
138#define HW_TIMER3_IND 0x9902
139#define HW_TIMER3_VALUE 0x9a00
140#define HW_TIMER3_VMASK 0x00FF
138 141
139struct layer1; 142struct layer1;
140extern int l1_event(struct layer1 *, u_int); 143extern int l1_event(struct layer1 *, u_int);
diff --git a/include/linux/mISDNif.h b/include/linux/mISDNif.h
index b5e7f2202484..ce6e613dff4c 100644
--- a/include/linux/mISDNif.h
+++ b/include/linux/mISDNif.h
@@ -37,7 +37,7 @@
37 */ 37 */
38#define MISDN_MAJOR_VERSION 1 38#define MISDN_MAJOR_VERSION 1
39#define MISDN_MINOR_VERSION 1 39#define MISDN_MINOR_VERSION 1
40#define MISDN_RELEASE 21 40#define MISDN_RELEASE 28
41 41
42/* primitives for information exchange 42/* primitives for information exchange
43 * generell format 43 * generell format
@@ -115,6 +115,11 @@
115#define MDL_ERROR_IND 0x1F04 115#define MDL_ERROR_IND 0x1F04
116#define MDL_ERROR_RSP 0x5F04 116#define MDL_ERROR_RSP 0x5F04
117 117
118/* intern layer 2 */
119#define DL_TIMER200_IND 0x7004
120#define DL_TIMER203_IND 0x7304
121#define DL_INTERN_MSG 0x7804
122
118/* DL_INFORMATION_IND types */ 123/* DL_INFORMATION_IND types */
119#define DL_INFO_L2_CONNECT 0x0001 124#define DL_INFO_L2_CONNECT 0x0001
120#define DL_INFO_L2_REMOVED 0x0002 125#define DL_INFO_L2_REMOVED 0x0002
@@ -367,6 +372,7 @@ clear_channelmap(u_int nr, u_char *map)
367#define MISDN_CTRL_RX_OFF 0x0100 372#define MISDN_CTRL_RX_OFF 0x0100
368#define MISDN_CTRL_FILL_EMPTY 0x0200 373#define MISDN_CTRL_FILL_EMPTY 0x0200
369#define MISDN_CTRL_GETPEER 0x0400 374#define MISDN_CTRL_GETPEER 0x0400
375#define MISDN_CTRL_L1_TIMER3 0x0800
370#define MISDN_CTRL_HW_FEATURES_OP 0x2000 376#define MISDN_CTRL_HW_FEATURES_OP 0x2000
371#define MISDN_CTRL_HW_FEATURES 0x2001 377#define MISDN_CTRL_HW_FEATURES 0x2001
372#define MISDN_CTRL_HFC_OP 0x4000 378#define MISDN_CTRL_HFC_OP 0x4000
@@ -585,6 +591,7 @@ static inline struct mISDNdevice *dev_to_mISDN(struct device *dev)
585extern void set_channel_address(struct mISDNchannel *, u_int, u_int); 591extern void set_channel_address(struct mISDNchannel *, u_int, u_int);
586extern void mISDN_clock_update(struct mISDNclock *, int, struct timeval *); 592extern void mISDN_clock_update(struct mISDNclock *, int, struct timeval *);
587extern unsigned short mISDN_clock_get(void); 593extern unsigned short mISDN_clock_get(void);
594extern const char *mISDNDevName4ch(struct mISDNchannel *);
588 595
589#endif /* __KERNEL__ */ 596#endif /* __KERNEL__ */
590#endif /* mISDNIF_H */ 597#endif /* mISDNIF_H */
diff --git a/include/linux/mdio-mux.h b/include/linux/mdio-mux.h
new file mode 100644
index 000000000000..a243dbba8659
--- /dev/null
+++ b/include/linux/mdio-mux.h
@@ -0,0 +1,21 @@
1/*
2 * MDIO bus multiplexer framwork.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2011, 2012 Cavium, Inc.
9 */
10#ifndef __LINUX_MDIO_MUX_H
11#define __LINUX_MDIO_MUX_H
12#include <linux/device.h>
13
14int mdio_mux_init(struct device *dev,
15 int (*switch_fn) (int cur, int desired, void *data),
16 void **mux_handle,
17 void *data);
18
19void mdio_mux_uninit(void *mux_handle);
20
21#endif /* __LINUX_MDIO_MUX_H */
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
index 53b94e025c7c..912c27a0f7ee 100644
--- a/include/linux/of_mdio.h
+++ b/include/linux/of_mdio.h
@@ -22,4 +22,6 @@ extern struct phy_device *of_phy_connect_fixed_link(struct net_device *dev,
22 void (*hndlr)(struct net_device *), 22 void (*hndlr)(struct net_device *),
23 phy_interface_t iface); 23 phy_interface_t iface);
24 24
25extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
26
25#endif /* __LINUX_OF_MDIO_H */ 27#endif /* __LINUX_OF_MDIO_H */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 988fc49667b1..91ad5e227d1d 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -645,11 +645,21 @@ static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
645{ 645{
646 return skb->head + skb->end; 646 return skb->head + skb->end;
647} 647}
648
649static inline unsigned int skb_end_offset(const struct sk_buff *skb)
650{
651 return skb->end;
652}
648#else 653#else
649static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) 654static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
650{ 655{
651 return skb->end; 656 return skb->end;
652} 657}
658
659static inline unsigned int skb_end_offset(const struct sk_buff *skb)
660{
661 return skb->end - skb->head;
662}
653#endif 663#endif
654 664
655/* Internal */ 665/* Internal */
@@ -2558,7 +2568,7 @@ static inline bool skb_is_recycleable(const struct sk_buff *skb, int skb_size)
2558 return false; 2568 return false;
2559 2569
2560 skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD); 2570 skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
2561 if (skb_end_pointer(skb) - skb->head < skb_size) 2571 if (skb_end_offset(skb) < skb_size)
2562 return false; 2572 return false;
2563 2573
2564 if (skb_shared(skb) || skb_cloned(skb)) 2574 if (skb_shared(skb) || skb_cloned(skb))
@@ -2566,5 +2576,19 @@ static inline bool skb_is_recycleable(const struct sk_buff *skb, int skb_size)
2566 2576
2567 return true; 2577 return true;
2568} 2578}
2579
2580/**
2581 * skb_head_is_locked - Determine if the skb->head is locked down
2582 * @skb: skb to check
2583 *
2584 * The head on skbs build around a head frag can be removed if they are
2585 * not cloned. This function returns true if the skb head is locked down
2586 * due to either being allocated via kmalloc, or by being a clone with
2587 * multiple references to the head.
2588 */
2589static inline bool skb_head_is_locked(const struct sk_buff *skb)
2590{
2591 return !skb->head_frag || skb_cloned(skb);
2592}
2569#endif /* __KERNEL__ */ 2593#endif /* __KERNEL__ */
2570#endif /* _LINUX_SKBUFF_H */ 2594#endif /* _LINUX_SKBUFF_H */
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 278af9ea42d4..d9b42c5be088 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -372,6 +372,8 @@ struct tcp_sock {
372 repair : 1, 372 repair : 1,
373 unused : 1; 373 unused : 1;
374 u8 repair_queue; 374 u8 repair_queue;
375 u8 do_early_retrans:1,/* Enable RFC5827 early-retransmit */
376 early_retrans_delayed:1; /* Delayed ER timer installed */
375 377
376/* RTT measurement */ 378/* RTT measurement */
377 u32 srtt; /* smoothed round trip time << 3 */ 379 u32 srtt; /* smoothed round trip time << 3 */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 0fb84de6da36..92faa6a7ea97 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -252,6 +252,7 @@ extern int sysctl_tcp_max_ssthresh;
252extern int sysctl_tcp_cookie_size; 252extern int sysctl_tcp_cookie_size;
253extern int sysctl_tcp_thin_linear_timeouts; 253extern int sysctl_tcp_thin_linear_timeouts;
254extern int sysctl_tcp_thin_dupack; 254extern int sysctl_tcp_thin_dupack;
255extern int sysctl_tcp_early_retrans;
255 256
256extern atomic_long_t tcp_memory_allocated; 257extern atomic_long_t tcp_memory_allocated;
257extern struct percpu_counter tcp_sockets_allocated; 258extern struct percpu_counter tcp_sockets_allocated;
@@ -366,13 +367,6 @@ static inline void tcp_dec_quickack_mode(struct sock *sk,
366#define TCP_ECN_DEMAND_CWR 4 367#define TCP_ECN_DEMAND_CWR 4
367#define TCP_ECN_SEEN 8 368#define TCP_ECN_SEEN 8
368 369
369static __inline__ void
370TCP_ECN_create_request(struct request_sock *req, struct tcphdr *th)
371{
372 if (sysctl_tcp_ecn && th->ece && th->cwr)
373 inet_rsk(req)->ecn_ok = 1;
374}
375
376enum tcp_tw_status { 370enum tcp_tw_status {
377 TCP_TW_SUCCESS = 0, 371 TCP_TW_SUCCESS = 0,
378 TCP_TW_RST = 1, 372 TCP_TW_RST = 1,
@@ -438,7 +432,8 @@ extern int tcp_disconnect(struct sock *sk, int flags);
438 432
439void tcp_connect_init(struct sock *sk); 433void tcp_connect_init(struct sock *sk);
440void tcp_finish_connect(struct sock *sk, struct sk_buff *skb); 434void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
441void tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen); 435int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb,
436 int hdrlen, bool *fragstolen);
442 437
443/* From syncookies.c */ 438/* From syncookies.c */
444extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS]; 439extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
@@ -499,6 +494,8 @@ extern void tcp_send_delayed_ack(struct sock *sk);
499 494
500/* tcp_input.c */ 495/* tcp_input.c */
501extern void tcp_cwnd_application_limited(struct sock *sk); 496extern void tcp_cwnd_application_limited(struct sock *sk);
497extern void tcp_resume_early_retransmit(struct sock *sk);
498extern void tcp_rearm_rto(struct sock *sk);
502 499
503/* tcp_timer.c */ 500/* tcp_timer.c */
504extern void tcp_init_xmit_timers(struct sock *); 501extern void tcp_init_xmit_timers(struct sock *);
@@ -667,6 +664,22 @@ struct tcp_skb_cb {
667 664
668#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0])) 665#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
669 666
667/* RFC3168 : 6.1.1 SYN packets must not have ECT/ECN bits set
668 *
669 * If we receive a SYN packet with these bits set, it means a network is
670 * playing bad games with TOS bits. In order to avoid possible false congestion
671 * notifications, we disable TCP ECN negociation.
672 */
673static inline void
674TCP_ECN_create_request(struct request_sock *req, const struct sk_buff *skb)
675{
676 const struct tcphdr *th = tcp_hdr(skb);
677
678 if (sysctl_tcp_ecn && th->ece && th->cwr &&
679 INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield))
680 inet_rsk(req)->ecn_ok = 1;
681}
682
670/* Due to TSO, an SKB can be composed of multiple actual 683/* Due to TSO, an SKB can be composed of multiple actual
671 * packets. To keep these tracked properly, we use this. 684 * packets. To keep these tracked properly, we use this.
672 */ 685 */
@@ -797,6 +810,21 @@ static inline void tcp_enable_fack(struct tcp_sock *tp)
797 tp->rx_opt.sack_ok |= TCP_FACK_ENABLED; 810 tp->rx_opt.sack_ok |= TCP_FACK_ENABLED;
798} 811}
799 812
813/* TCP early-retransmit (ER) is similar to but more conservative than
814 * the thin-dupack feature. Enable ER only if thin-dupack is disabled.
815 */
816static inline void tcp_enable_early_retrans(struct tcp_sock *tp)
817{
818 tp->do_early_retrans = sysctl_tcp_early_retrans &&
819 !sysctl_tcp_thin_dupack && sysctl_tcp_reordering == 3;
820 tp->early_retrans_delayed = 0;
821}
822
823static inline void tcp_disable_early_retrans(struct tcp_sock *tp)
824{
825 tp->do_early_retrans = 0;
826}
827
800static inline unsigned int tcp_left_out(const struct tcp_sock *tp) 828static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
801{ 829{
802 return tp->sacked_out + tp->lost_out; 830 return tp->sacked_out + tp->lost_out;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 52ba2b5e803d..2c35da818ef9 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -829,7 +829,7 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
829struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 829struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
830{ 830{
831 int headerlen = skb_headroom(skb); 831 int headerlen = skb_headroom(skb);
832 unsigned int size = (skb_end_pointer(skb) - skb->head) + skb->data_len; 832 unsigned int size = skb_end_offset(skb) + skb->data_len;
833 struct sk_buff *n = alloc_skb(size, gfp_mask); 833 struct sk_buff *n = alloc_skb(size, gfp_mask);
834 834
835 if (!n) 835 if (!n)
@@ -930,9 +930,8 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
930{ 930{
931 int i; 931 int i;
932 u8 *data; 932 u8 *data;
933 int size = nhead + (skb_end_pointer(skb) - skb->head) + ntail; 933 int size = nhead + skb_end_offset(skb) + ntail;
934 long off; 934 long off;
935 bool fastpath;
936 935
937 BUG_ON(nhead < 0); 936 BUG_ON(nhead < 0);
938 937
@@ -941,27 +940,6 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
941 940
942 size = SKB_DATA_ALIGN(size); 941 size = SKB_DATA_ALIGN(size);
943 942
944 /* Check if we can avoid taking references on fragments if we own
945 * the last reference on skb->head. (see skb_release_data())
946 */
947 if (!skb->cloned)
948 fastpath = true;
949 else {
950 int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1;
951 fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta;
952 }
953
954 if (fastpath && !skb->head_frag &&
955 size + sizeof(struct skb_shared_info) <= ksize(skb->head)) {
956 memmove(skb->head + size, skb_shinfo(skb),
957 offsetof(struct skb_shared_info,
958 frags[skb_shinfo(skb)->nr_frags]));
959 memmove(skb->head + nhead, skb->head,
960 skb_tail_pointer(skb) - skb->head);
961 off = nhead;
962 goto adjust_others;
963 }
964
965 data = kmalloc(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), 943 data = kmalloc(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
966 gfp_mask); 944 gfp_mask);
967 if (!data) 945 if (!data)
@@ -977,9 +955,12 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
977 skb_shinfo(skb), 955 skb_shinfo(skb),
978 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); 956 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
979 957
980 if (fastpath) { 958 /*
981 skb_free_head(skb); 959 * if shinfo is shared we must drop the old head gracefully, but if it
982 } else { 960 * is not we can just drop the old head and let the existing refcount
961 * be since all we did is relocate the values
962 */
963 if (skb_cloned(skb)) {
983 /* copy this zero copy skb frags */ 964 /* copy this zero copy skb frags */
984 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 965 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
985 if (skb_copy_ubufs(skb, gfp_mask)) 966 if (skb_copy_ubufs(skb, gfp_mask))
@@ -992,12 +973,13 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
992 skb_clone_fraglist(skb); 973 skb_clone_fraglist(skb);
993 974
994 skb_release_data(skb); 975 skb_release_data(skb);
976 } else {
977 skb_free_head(skb);
995 } 978 }
996 off = (data + nhead) - skb->head; 979 off = (data + nhead) - skb->head;
997 980
998 skb->head = data; 981 skb->head = data;
999 skb->head_frag = 0; 982 skb->head_frag = 0;
1000adjust_others:
1001 skb->data += off; 983 skb->data += off;
1002#ifdef NET_SKBUFF_DATA_USES_OFFSET 984#ifdef NET_SKBUFF_DATA_USES_OFFSET
1003 skb->end = size; 985 skb->end = size;
@@ -1699,17 +1681,17 @@ static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
1699 struct splice_pipe_desc *spd, struct sock *sk) 1681 struct splice_pipe_desc *spd, struct sock *sk)
1700{ 1682{
1701 int seg; 1683 int seg;
1702 bool head_is_linear = !skb->head_frag;
1703 1684
1704 /* map the linear part : 1685 /* map the linear part :
1705 * If skb->head_frag is set, this 'linear' part is backed 1686 * If skb->head_frag is set, this 'linear' part is backed by a
1706 * by a fragment, and we can avoid a copy. 1687 * fragment, and if the head is not shared with any clones then
1688 * we can avoid a copy since we own the head portion of this page.
1707 */ 1689 */
1708 if (__splice_segment(virt_to_page(skb->data), 1690 if (__splice_segment(virt_to_page(skb->data),
1709 (unsigned long) skb->data & (PAGE_SIZE - 1), 1691 (unsigned long) skb->data & (PAGE_SIZE - 1),
1710 skb_headlen(skb), 1692 skb_headlen(skb),
1711 offset, len, skb, spd, 1693 offset, len, skb, spd,
1712 head_is_linear, 1694 skb_head_is_locked(skb),
1713 sk, pipe)) 1695 sk, pipe))
1714 return true; 1696 return true;
1715 1697
@@ -2745,14 +2727,13 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
2745 if (unlikely(!nskb)) 2727 if (unlikely(!nskb))
2746 goto err; 2728 goto err;
2747 2729
2748 hsize = skb_end_pointer(nskb) - nskb->head; 2730 hsize = skb_end_offset(nskb);
2749 if (skb_cow_head(nskb, doffset + headroom)) { 2731 if (skb_cow_head(nskb, doffset + headroom)) {
2750 kfree_skb(nskb); 2732 kfree_skb(nskb);
2751 goto err; 2733 goto err;
2752 } 2734 }
2753 2735
2754 nskb->truesize += skb_end_pointer(nskb) - nskb->head - 2736 nskb->truesize += skb_end_offset(nskb) - hsize;
2755 hsize;
2756 skb_release_head_state(nskb); 2737 skb_release_head_state(nskb);
2757 __skb_push(nskb, doffset); 2738 __skb_push(nskb, doffset);
2758 } else { 2739 } else {
@@ -2870,6 +2851,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2870 unsigned int len = skb_gro_len(skb); 2851 unsigned int len = skb_gro_len(skb);
2871 unsigned int offset = skb_gro_offset(skb); 2852 unsigned int offset = skb_gro_offset(skb);
2872 unsigned int headlen = skb_headlen(skb); 2853 unsigned int headlen = skb_headlen(skb);
2854 unsigned int delta_truesize;
2873 2855
2874 if (p->len + len >= 65536) 2856 if (p->len + len >= 65536)
2875 return -E2BIG; 2857 return -E2BIG;
@@ -2899,11 +2881,15 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2899 frag->page_offset += offset; 2881 frag->page_offset += offset;
2900 skb_frag_size_sub(frag, offset); 2882 skb_frag_size_sub(frag, offset);
2901 2883
2884 /* all fragments truesize : remove (head size + sk_buff) */
2885 delta_truesize = skb->truesize -
2886 SKB_TRUESIZE(skb_end_offset(skb));
2887
2902 skb->truesize -= skb->data_len; 2888 skb->truesize -= skb->data_len;
2903 skb->len -= skb->data_len; 2889 skb->len -= skb->data_len;
2904 skb->data_len = 0; 2890 skb->data_len = 0;
2905 2891
2906 NAPI_GRO_CB(skb)->free = 1; 2892 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
2907 goto done; 2893 goto done;
2908 } else if (skb->head_frag) { 2894 } else if (skb->head_frag) {
2909 int nr_frags = pinfo->nr_frags; 2895 int nr_frags = pinfo->nr_frags;
@@ -2928,6 +2914,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2928 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags); 2914 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
2929 /* We dont need to clear skbinfo->nr_frags here */ 2915 /* We dont need to clear skbinfo->nr_frags here */
2930 2916
2917 delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
2931 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; 2918 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
2932 goto done; 2919 goto done;
2933 } else if (skb_gro_len(p) != pinfo->gso_size) 2920 } else if (skb_gro_len(p) != pinfo->gso_size)
@@ -2970,7 +2957,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2970 p = nskb; 2957 p = nskb;
2971 2958
2972merge: 2959merge:
2973 p->truesize += skb->truesize - len; 2960 delta_truesize = skb->truesize;
2974 if (offset > headlen) { 2961 if (offset > headlen) {
2975 unsigned int eat = offset - headlen; 2962 unsigned int eat = offset - headlen;
2976 2963
@@ -2990,7 +2977,7 @@ merge:
2990done: 2977done:
2991 NAPI_GRO_CB(p)->count++; 2978 NAPI_GRO_CB(p)->count++;
2992 p->data_len += len; 2979 p->data_len += len;
2993 p->truesize += len; 2980 p->truesize += delta_truesize;
2994 p->len += len; 2981 p->len += len;
2995 2982
2996 NAPI_GRO_CB(skb)->same_flow = 1; 2983 NAPI_GRO_CB(skb)->same_flow = 1;
diff --git a/net/core/sock.c b/net/core/sock.c
index 1a8835117fd6..b8c818e69c23 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -113,6 +113,7 @@
113#include <linux/user_namespace.h> 113#include <linux/user_namespace.h>
114#include <linux/static_key.h> 114#include <linux/static_key.h>
115#include <linux/memcontrol.h> 115#include <linux/memcontrol.h>
116#include <linux/prefetch.h>
116 117
117#include <asm/uaccess.h> 118#include <asm/uaccess.h>
118 119
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 51c6c672c8aa..0d11f234d615 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -673,10 +673,15 @@ static int do_ip_setsockopt(struct sock *sk, int level,
673 break; 673 break;
674 } else { 674 } else {
675 memset(&mreq, 0, sizeof(mreq)); 675 memset(&mreq, 0, sizeof(mreq));
676 if (optlen >= sizeof(struct in_addr) && 676 if (optlen >= sizeof(struct ip_mreq)) {
677 copy_from_user(&mreq.imr_address, optval, 677 if (copy_from_user(&mreq, optval,
678 sizeof(struct in_addr))) 678 sizeof(struct ip_mreq)))
679 break; 679 break;
680 } else if (optlen >= sizeof(struct in_addr)) {
681 if (copy_from_user(&mreq.imr_address, optval,
682 sizeof(struct in_addr)))
683 break;
684 }
680 } 685 }
681 686
682 if (!mreq.imr_ifindex) { 687 if (!mreq.imr_ifindex) {
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 33417f84e07f..ef32956ed655 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -27,6 +27,7 @@
27#include <net/tcp_memcontrol.h> 27#include <net/tcp_memcontrol.h>
28 28
29static int zero; 29static int zero;
30static int two = 2;
30static int tcp_retr1_max = 255; 31static int tcp_retr1_max = 255;
31static int ip_local_port_range_min[] = { 1, 1 }; 32static int ip_local_port_range_min[] = { 1, 1 };
32static int ip_local_port_range_max[] = { 65535, 65535 }; 33static int ip_local_port_range_max[] = { 65535, 65535 };
@@ -677,6 +678,15 @@ static struct ctl_table ipv4_table[] = {
677 .proc_handler = proc_dointvec 678 .proc_handler = proc_dointvec
678 }, 679 },
679 { 680 {
681 .procname = "tcp_early_retrans",
682 .data = &sysctl_tcp_early_retrans,
683 .maxlen = sizeof(int),
684 .mode = 0644,
685 .proc_handler = proc_dointvec_minmax,
686 .extra1 = &zero,
687 .extra2 = &two,
688 },
689 {
680 .procname = "udp_mem", 690 .procname = "udp_mem",
681 .data = &sysctl_udp_mem, 691 .data = &sysctl_udp_mem,
682 .maxlen = sizeof(sysctl_udp_mem), 692 .maxlen = sizeof(sysctl_udp_mem),
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 9670af341931..c2cff8b62772 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -395,6 +395,7 @@ void tcp_init_sock(struct sock *sk)
395 tp->mss_cache = TCP_MSS_DEFAULT; 395 tp->mss_cache = TCP_MSS_DEFAULT;
396 396
397 tp->reordering = sysctl_tcp_reordering; 397 tp->reordering = sysctl_tcp_reordering;
398 tcp_enable_early_retrans(tp);
398 icsk->icsk_ca_ops = &tcp_init_congestion_ops; 399 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
399 400
400 sk->sk_state = TCP_CLOSE; 401 sk->sk_state = TCP_CLOSE;
@@ -980,8 +981,8 @@ static inline int select_size(const struct sock *sk, bool sg)
980static int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) 981static int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
981{ 982{
982 struct sk_buff *skb; 983 struct sk_buff *skb;
983 struct tcp_skb_cb *cb;
984 struct tcphdr *th; 984 struct tcphdr *th;
985 bool fragstolen;
985 986
986 skb = alloc_skb(size + sizeof(*th), sk->sk_allocation); 987 skb = alloc_skb(size + sizeof(*th), sk->sk_allocation);
987 if (!skb) 988 if (!skb)
@@ -994,14 +995,14 @@ static int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
994 if (memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size)) 995 if (memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size))
995 goto err_free; 996 goto err_free;
996 997
997 cb = TCP_SKB_CB(skb);
998
999 TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt; 998 TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt;
1000 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + size; 999 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + size;
1001 TCP_SKB_CB(skb)->ack_seq = tcp_sk(sk)->snd_una - 1; 1000 TCP_SKB_CB(skb)->ack_seq = tcp_sk(sk)->snd_una - 1;
1002 1001
1003 tcp_queue_rcv(sk, skb, sizeof(*th)); 1002 if (tcp_queue_rcv(sk, skb, sizeof(*th), &fragstolen)) {
1004 1003 WARN_ON_ONCE(fragstolen); /* should not happen */
1004 __kfree_skb(skb);
1005 }
1005 return size; 1006 return size;
1006 1007
1007err_free: 1008err_free:
@@ -2495,6 +2496,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2495 err = -EINVAL; 2496 err = -EINVAL;
2496 else 2497 else
2497 tp->thin_dupack = val; 2498 tp->thin_dupack = val;
2499 if (tp->thin_dupack)
2500 tcp_disable_early_retrans(tp);
2498 break; 2501 break;
2499 2502
2500 case TCP_REPAIR: 2503 case TCP_REPAIR:
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 96a631deb4e6..7b2d351f24db 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -99,6 +99,7 @@ int sysctl_tcp_thin_dupack __read_mostly;
99 99
100int sysctl_tcp_moderate_rcvbuf __read_mostly = 1; 100int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
101int sysctl_tcp_abc __read_mostly; 101int sysctl_tcp_abc __read_mostly;
102int sysctl_tcp_early_retrans __read_mostly = 2;
102 103
103#define FLAG_DATA 0x01 /* Incoming frame contained data. */ 104#define FLAG_DATA 0x01 /* Incoming frame contained data. */
104#define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */ 105#define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */
@@ -906,6 +907,7 @@ static void tcp_init_metrics(struct sock *sk)
906 if (dst_metric(dst, RTAX_REORDERING) && 907 if (dst_metric(dst, RTAX_REORDERING) &&
907 tp->reordering != dst_metric(dst, RTAX_REORDERING)) { 908 tp->reordering != dst_metric(dst, RTAX_REORDERING)) {
908 tcp_disable_fack(tp); 909 tcp_disable_fack(tp);
910 tcp_disable_early_retrans(tp);
909 tp->reordering = dst_metric(dst, RTAX_REORDERING); 911 tp->reordering = dst_metric(dst, RTAX_REORDERING);
910 } 912 }
911 913
@@ -988,6 +990,9 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
988#endif 990#endif
989 tcp_disable_fack(tp); 991 tcp_disable_fack(tp);
990 } 992 }
993
994 if (metric > 0)
995 tcp_disable_early_retrans(tp);
991} 996}
992 997
993/* This must be called before lost_out is incremented */ 998/* This must be called before lost_out is incremented */
@@ -2339,6 +2344,27 @@ static inline int tcp_dupack_heuristics(const struct tcp_sock *tp)
2339 return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1; 2344 return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1;
2340} 2345}
2341 2346
2347static bool tcp_pause_early_retransmit(struct sock *sk, int flag)
2348{
2349 struct tcp_sock *tp = tcp_sk(sk);
2350 unsigned long delay;
2351
2352 /* Delay early retransmit and entering fast recovery for
2353 * max(RTT/4, 2msec) unless ack has ECE mark, no RTT samples
2354 * available, or RTO is scheduled to fire first.
2355 */
2356 if (sysctl_tcp_early_retrans < 2 || (flag & FLAG_ECE) || !tp->srtt)
2357 return false;
2358
2359 delay = max_t(unsigned long, (tp->srtt >> 5), msecs_to_jiffies(2));
2360 if (!time_after(inet_csk(sk)->icsk_timeout, (jiffies + delay)))
2361 return false;
2362
2363 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, delay, TCP_RTO_MAX);
2364 tp->early_retrans_delayed = 1;
2365 return true;
2366}
2367
2342static inline int tcp_skb_timedout(const struct sock *sk, 2368static inline int tcp_skb_timedout(const struct sock *sk,
2343 const struct sk_buff *skb) 2369 const struct sk_buff *skb)
2344{ 2370{
@@ -2446,7 +2472,7 @@ static inline int tcp_head_timedout(const struct sock *sk)
2446 * Main question: may we further continue forward transmission 2472 * Main question: may we further continue forward transmission
2447 * with the same cwnd? 2473 * with the same cwnd?
2448 */ 2474 */
2449static int tcp_time_to_recover(struct sock *sk) 2475static int tcp_time_to_recover(struct sock *sk, int flag)
2450{ 2476{
2451 struct tcp_sock *tp = tcp_sk(sk); 2477 struct tcp_sock *tp = tcp_sk(sk);
2452 __u32 packets_out; 2478 __u32 packets_out;
@@ -2492,6 +2518,16 @@ static int tcp_time_to_recover(struct sock *sk)
2492 tcp_is_sack(tp) && !tcp_send_head(sk)) 2518 tcp_is_sack(tp) && !tcp_send_head(sk))
2493 return 1; 2519 return 1;
2494 2520
2521 /* Trick#6: TCP early retransmit, per RFC5827. To avoid spurious
2522 * retransmissions due to small network reorderings, we implement
2523 * Mitigation A.3 in the RFC and delay the retransmission for a short
2524 * interval if appropriate.
2525 */
2526 if (tp->do_early_retrans && !tp->retrans_out && tp->sacked_out &&
2527 (tp->packets_out == (tp->sacked_out + 1) && tp->packets_out < 4) &&
2528 !tcp_may_send_now(sk))
2529 return !tcp_pause_early_retransmit(sk, flag);
2530
2495 return 0; 2531 return 0;
2496} 2532}
2497 2533
@@ -3022,6 +3058,38 @@ static void tcp_update_cwnd_in_recovery(struct sock *sk, int newly_acked_sacked,
3022 tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt; 3058 tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt;
3023} 3059}
3024 3060
3061static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
3062{
3063 struct tcp_sock *tp = tcp_sk(sk);
3064 int mib_idx;
3065
3066 if (tcp_is_reno(tp))
3067 mib_idx = LINUX_MIB_TCPRENORECOVERY;
3068 else
3069 mib_idx = LINUX_MIB_TCPSACKRECOVERY;
3070
3071 NET_INC_STATS_BH(sock_net(sk), mib_idx);
3072
3073 tp->high_seq = tp->snd_nxt;
3074 tp->prior_ssthresh = 0;
3075 tp->undo_marker = tp->snd_una;
3076 tp->undo_retrans = tp->retrans_out;
3077
3078 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
3079 if (!ece_ack)
3080 tp->prior_ssthresh = tcp_current_ssthresh(sk);
3081 tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk);
3082 TCP_ECN_queue_cwr(tp);
3083 }
3084
3085 tp->bytes_acked = 0;
3086 tp->snd_cwnd_cnt = 0;
3087 tp->prior_cwnd = tp->snd_cwnd;
3088 tp->prr_delivered = 0;
3089 tp->prr_out = 0;
3090 tcp_set_ca_state(sk, TCP_CA_Recovery);
3091}
3092
3025/* Process an event, which can update packets-in-flight not trivially. 3093/* Process an event, which can update packets-in-flight not trivially.
3026 * Main goal of this function is to calculate new estimate for left_out, 3094 * Main goal of this function is to calculate new estimate for left_out,
3027 * taking into account both packets sitting in receiver's buffer and 3095 * taking into account both packets sitting in receiver's buffer and
@@ -3041,7 +3109,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
3041 struct tcp_sock *tp = tcp_sk(sk); 3109 struct tcp_sock *tp = tcp_sk(sk);
3042 int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && 3110 int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
3043 (tcp_fackets_out(tp) > tp->reordering)); 3111 (tcp_fackets_out(tp) > tp->reordering));
3044 int fast_rexmit = 0, mib_idx; 3112 int fast_rexmit = 0;
3045 3113
3046 if (WARN_ON(!tp->packets_out && tp->sacked_out)) 3114 if (WARN_ON(!tp->packets_out && tp->sacked_out))
3047 tp->sacked_out = 0; 3115 tp->sacked_out = 0;
@@ -3125,7 +3193,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
3125 if (icsk->icsk_ca_state <= TCP_CA_Disorder) 3193 if (icsk->icsk_ca_state <= TCP_CA_Disorder)
3126 tcp_try_undo_dsack(sk); 3194 tcp_try_undo_dsack(sk);
3127 3195
3128 if (!tcp_time_to_recover(sk)) { 3196 if (!tcp_time_to_recover(sk, flag)) {
3129 tcp_try_to_open(sk, flag); 3197 tcp_try_to_open(sk, flag);
3130 return; 3198 return;
3131 } 3199 }
@@ -3142,32 +3210,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
3142 } 3210 }
3143 3211
3144 /* Otherwise enter Recovery state */ 3212 /* Otherwise enter Recovery state */
3145 3213 tcp_enter_recovery(sk, (flag & FLAG_ECE));
3146 if (tcp_is_reno(tp))
3147 mib_idx = LINUX_MIB_TCPRENORECOVERY;
3148 else
3149 mib_idx = LINUX_MIB_TCPSACKRECOVERY;
3150
3151 NET_INC_STATS_BH(sock_net(sk), mib_idx);
3152
3153 tp->high_seq = tp->snd_nxt;
3154 tp->prior_ssthresh = 0;
3155 tp->undo_marker = tp->snd_una;
3156 tp->undo_retrans = tp->retrans_out;
3157
3158 if (icsk->icsk_ca_state < TCP_CA_CWR) {
3159 if (!(flag & FLAG_ECE))
3160 tp->prior_ssthresh = tcp_current_ssthresh(sk);
3161 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
3162 TCP_ECN_queue_cwr(tp);
3163 }
3164
3165 tp->bytes_acked = 0;
3166 tp->snd_cwnd_cnt = 0;
3167 tp->prior_cwnd = tp->snd_cwnd;
3168 tp->prr_delivered = 0;
3169 tp->prr_out = 0;
3170 tcp_set_ca_state(sk, TCP_CA_Recovery);
3171 fast_rexmit = 1; 3214 fast_rexmit = 1;
3172 } 3215 }
3173 3216
@@ -3249,16 +3292,47 @@ static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
3249/* Restart timer after forward progress on connection. 3292/* Restart timer after forward progress on connection.
3250 * RFC2988 recommends to restart timer to now+rto. 3293 * RFC2988 recommends to restart timer to now+rto.
3251 */ 3294 */
3252static void tcp_rearm_rto(struct sock *sk) 3295void tcp_rearm_rto(struct sock *sk)
3253{ 3296{
3254 const struct tcp_sock *tp = tcp_sk(sk); 3297 struct tcp_sock *tp = tcp_sk(sk);
3255 3298
3256 if (!tp->packets_out) { 3299 if (!tp->packets_out) {
3257 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); 3300 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
3258 } else { 3301 } else {
3259 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 3302 u32 rto = inet_csk(sk)->icsk_rto;
3260 inet_csk(sk)->icsk_rto, TCP_RTO_MAX); 3303 /* Offset the time elapsed after installing regular RTO */
3304 if (tp->early_retrans_delayed) {
3305 struct sk_buff *skb = tcp_write_queue_head(sk);
3306 const u32 rto_time_stamp = TCP_SKB_CB(skb)->when + rto;
3307 s32 delta = (s32)(rto_time_stamp - tcp_time_stamp);
3308 /* delta may not be positive if the socket is locked
3309 * when the delayed ER timer fires and is rescheduled.
3310 */
3311 if (delta > 0)
3312 rto = delta;
3313 }
3314 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto,
3315 TCP_RTO_MAX);
3261 } 3316 }
3317 tp->early_retrans_delayed = 0;
3318}
3319
3320/* This function is called when the delayed ER timer fires. TCP enters
3321 * fast recovery and performs fast-retransmit.
3322 */
3323void tcp_resume_early_retransmit(struct sock *sk)
3324{
3325 struct tcp_sock *tp = tcp_sk(sk);
3326
3327 tcp_rearm_rto(sk);
3328
3329 /* Stop if ER is disabled after the delayed ER timer is scheduled */
3330 if (!tp->do_early_retrans)
3331 return;
3332
3333 tcp_enter_recovery(sk, false);
3334 tcp_update_scoreboard(sk, 1);
3335 tcp_xmit_retransmit_queue(sk);
3262} 3336}
3263 3337
3264/* If we get here, the whole TSO packet has not been acked. */ 3338/* If we get here, the whole TSO packet has not been acked. */
@@ -3707,6 +3781,9 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3707 if (after(ack, tp->snd_nxt)) 3781 if (after(ack, tp->snd_nxt))
3708 goto invalid_ack; 3782 goto invalid_ack;
3709 3783
3784 if (tp->early_retrans_delayed)
3785 tcp_rearm_rto(sk);
3786
3710 if (after(ack, prior_snd_una)) 3787 if (after(ack, prior_snd_una))
3711 flag |= FLAG_SND_UNA_ADVANCED; 3788 flag |= FLAG_SND_UNA_ADVANCED;
3712 3789
@@ -4455,6 +4532,7 @@ static inline int tcp_try_rmem_schedule(struct sock *sk, unsigned int size)
4455 * @sk: socket 4532 * @sk: socket
4456 * @to: prior buffer 4533 * @to: prior buffer
4457 * @from: buffer to add in queue 4534 * @from: buffer to add in queue
4535 * @fragstolen: pointer to boolean
4458 * 4536 *
4459 * Before queueing skb @from after @to, try to merge them 4537 * Before queueing skb @from after @to, try to merge them
4460 * to reduce overall memory use and queue lengths, if cost is small. 4538 * to reduce overall memory use and queue lengths, if cost is small.
@@ -4467,59 +4545,82 @@ static bool tcp_try_coalesce(struct sock *sk,
4467 struct sk_buff *from, 4545 struct sk_buff *from,
4468 bool *fragstolen) 4546 bool *fragstolen)
4469{ 4547{
4470 int delta, len = from->len; 4548 int i, delta, len = from->len;
4471 4549
4472 *fragstolen = false; 4550 *fragstolen = false;
4473 if (tcp_hdr(from)->fin) 4551
4552 if (tcp_hdr(from)->fin || skb_cloned(to))
4474 return false; 4553 return false;
4554
4475 if (len <= skb_tailroom(to)) { 4555 if (len <= skb_tailroom(to)) {
4476 BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); 4556 BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
4477merge: 4557 goto merge;
4478 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE);
4479 TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq;
4480 TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq;
4481 return true;
4482 } 4558 }
4483 4559
4484 if (skb_has_frag_list(to) || skb_has_frag_list(from)) 4560 if (skb_has_frag_list(to) || skb_has_frag_list(from))
4485 return false; 4561 return false;
4486 4562
4487 if (skb_headlen(from) == 0 && 4563 if (skb_headlen(from) != 0) {
4488 (skb_shinfo(to)->nr_frags +
4489 skb_shinfo(from)->nr_frags <= MAX_SKB_FRAGS)) {
4490 WARN_ON_ONCE(from->head_frag);
4491 delta = from->truesize - ksize(from->head) -
4492 SKB_DATA_ALIGN(sizeof(struct sk_buff));
4493
4494 WARN_ON_ONCE(delta < len);
4495copyfrags:
4496 memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags,
4497 skb_shinfo(from)->frags,
4498 skb_shinfo(from)->nr_frags * sizeof(skb_frag_t));
4499 skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags;
4500 skb_shinfo(from)->nr_frags = 0;
4501 to->truesize += delta;
4502 atomic_add(delta, &sk->sk_rmem_alloc);
4503 sk_mem_charge(sk, delta);
4504 to->len += len;
4505 to->data_len += len;
4506 goto merge;
4507 }
4508 if (from->head_frag) {
4509 struct page *page; 4564 struct page *page;
4510 unsigned int offset; 4565 unsigned int offset;
4511 4566
4512 if (skb_shinfo(to)->nr_frags + skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) 4567 if (skb_shinfo(to)->nr_frags +
4568 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
4513 return false; 4569 return false;
4570
4571 if (skb_head_is_locked(from))
4572 return false;
4573
4574 delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
4575
4514 page = virt_to_head_page(from->head); 4576 page = virt_to_head_page(from->head);
4515 offset = from->data - (unsigned char *)page_address(page); 4577 offset = from->data - (unsigned char *)page_address(page);
4578
4516 skb_fill_page_desc(to, skb_shinfo(to)->nr_frags, 4579 skb_fill_page_desc(to, skb_shinfo(to)->nr_frags,
4517 page, offset, skb_headlen(from)); 4580 page, offset, skb_headlen(from));
4518 *fragstolen = true; 4581 *fragstolen = true;
4519 delta = len; /* we dont know real truesize... */ 4582 } else {
4520 goto copyfrags; 4583 if (skb_shinfo(to)->nr_frags +
4584 skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS)
4585 return false;
4586
4587 delta = from->truesize -
4588 SKB_TRUESIZE(skb_end_pointer(from) - from->head);
4521 } 4589 }
4522 return false; 4590
4591 WARN_ON_ONCE(delta < len);
4592
4593 memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags,
4594 skb_shinfo(from)->frags,
4595 skb_shinfo(from)->nr_frags * sizeof(skb_frag_t));
4596 skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags;
4597
4598 if (!skb_cloned(from))
4599 skb_shinfo(from)->nr_frags = 0;
4600
4601 /* if the skb is cloned this does nothing since we set nr_frags to 0 */
4602 for (i = 0; i < skb_shinfo(from)->nr_frags; i++)
4603 skb_frag_ref(from, i);
4604
4605 to->truesize += delta;
4606 atomic_add(delta, &sk->sk_rmem_alloc);
4607 sk_mem_charge(sk, delta);
4608 to->len += len;
4609 to->data_len += len;
4610
4611merge:
4612 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE);
4613 TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq;
4614 TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq;
4615 return true;
4616}
4617
4618static void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
4619{
4620 if (head_stolen)
4621 kmem_cache_free(skbuff_head_cache, skb);
4622 else
4623 __kfree_skb(skb);
4523} 4624}
4524 4625
4525static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) 4626static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
@@ -4565,10 +4666,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
4565 if (!tcp_try_coalesce(sk, skb1, skb, &fragstolen)) { 4666 if (!tcp_try_coalesce(sk, skb1, skb, &fragstolen)) {
4566 __skb_queue_after(&tp->out_of_order_queue, skb1, skb); 4667 __skb_queue_after(&tp->out_of_order_queue, skb1, skb);
4567 } else { 4668 } else {
4568 if (fragstolen) 4669 kfree_skb_partial(skb, fragstolen);
4569 kmem_cache_free(skbuff_head_cache, skb);
4570 else
4571 __kfree_skb(skb);
4572 skb = NULL; 4670 skb = NULL;
4573 } 4671 }
4574 4672
@@ -4645,6 +4743,22 @@ end:
4645 skb_set_owner_r(skb, sk); 4743 skb_set_owner_r(skb, sk);
4646} 4744}
4647 4745
4746int tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen,
4747 bool *fragstolen)
4748{
4749 int eaten;
4750 struct sk_buff *tail = skb_peek_tail(&sk->sk_receive_queue);
4751
4752 __skb_pull(skb, hdrlen);
4753 eaten = (tail &&
4754 tcp_try_coalesce(sk, tail, skb, fragstolen)) ? 1 : 0;
4755 tcp_sk(sk)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
4756 if (!eaten) {
4757 __skb_queue_tail(&sk->sk_receive_queue, skb);
4758 skb_set_owner_r(skb, sk);
4759 }
4760 return eaten;
4761}
4648 4762
4649static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) 4763static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
4650{ 4764{
@@ -4691,20 +4805,12 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
4691 } 4805 }
4692 4806
4693 if (eaten <= 0) { 4807 if (eaten <= 0) {
4694 struct sk_buff *tail;
4695queue_and_out: 4808queue_and_out:
4696 if (eaten < 0 && 4809 if (eaten < 0 &&
4697 tcp_try_rmem_schedule(sk, skb->truesize)) 4810 tcp_try_rmem_schedule(sk, skb->truesize))
4698 goto drop; 4811 goto drop;
4699 4812
4700 tail = skb_peek_tail(&sk->sk_receive_queue); 4813 eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen);
4701 eaten = (tail &&
4702 tcp_try_coalesce(sk, tail, skb,
4703 &fragstolen)) ? 1 : 0;
4704 if (eaten <= 0) {
4705 skb_set_owner_r(skb, sk);
4706 __skb_queue_tail(&sk->sk_receive_queue, skb);
4707 }
4708 } 4814 }
4709 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 4815 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
4710 if (skb->len) 4816 if (skb->len)
@@ -4727,12 +4833,9 @@ queue_and_out:
4727 4833
4728 tcp_fast_path_check(sk); 4834 tcp_fast_path_check(sk);
4729 4835
4730 if (eaten > 0) { 4836 if (eaten > 0)
4731 if (fragstolen) 4837 kfree_skb_partial(skb, fragstolen);
4732 kmem_cache_free(skbuff_head_cache, skb); 4838 else if (!sock_flag(sk, SOCK_DEAD))
4733 else
4734 __kfree_skb(skb);
4735 } else if (!sock_flag(sk, SOCK_DEAD))
4736 sk->sk_data_ready(sk, 0); 4839 sk->sk_data_ready(sk, 0);
4737 return; 4840 return;
4738 } 4841 }
@@ -5402,14 +5505,6 @@ discard:
5402 return 0; 5505 return 0;
5403} 5506}
5404 5507
5405void tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen)
5406{
5407 __skb_pull(skb, hdrlen);
5408 __skb_queue_tail(&sk->sk_receive_queue, skb);
5409 skb_set_owner_r(skb, sk);
5410 tcp_sk(sk)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
5411}
5412
5413/* 5508/*
5414 * TCP receive function for the ESTABLISHED state. 5509 * TCP receive function for the ESTABLISHED state.
5415 * 5510 *
@@ -5518,6 +5613,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5518 } else { 5613 } else {
5519 int eaten = 0; 5614 int eaten = 0;
5520 int copied_early = 0; 5615 int copied_early = 0;
5616 bool fragstolen = false;
5521 5617
5522 if (tp->copied_seq == tp->rcv_nxt && 5618 if (tp->copied_seq == tp->rcv_nxt &&
5523 len - tcp_header_len <= tp->ucopy.len) { 5619 len - tcp_header_len <= tp->ucopy.len) {
@@ -5575,7 +5671,8 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5575 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS); 5671 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS);
5576 5672
5577 /* Bulk data transfer: receiver */ 5673 /* Bulk data transfer: receiver */
5578 tcp_queue_rcv(sk, skb, tcp_header_len); 5674 eaten = tcp_queue_rcv(sk, skb, tcp_header_len,
5675 &fragstolen);
5579 } 5676 }
5580 5677
5581 tcp_event_data_recv(sk, skb); 5678 tcp_event_data_recv(sk, skb);
@@ -5597,7 +5694,7 @@ no_ack:
5597 else 5694 else
5598#endif 5695#endif
5599 if (eaten) 5696 if (eaten)
5600 __kfree_skb(skb); 5697 kfree_skb_partial(skb, fragstolen);
5601 else 5698 else
5602 sk->sk_data_ready(sk, 0); 5699 sk->sk_data_ready(sk, 0);
5603 return 0; 5700 return 0;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index cf97e9821d76..4ff5e1f70d16 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1368,7 +1368,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1368 goto drop_and_free; 1368 goto drop_and_free;
1369 1369
1370 if (!want_cookie || tmp_opt.tstamp_ok) 1370 if (!want_cookie || tmp_opt.tstamp_ok)
1371 TCP_ECN_create_request(req, tcp_hdr(skb)); 1371 TCP_ECN_create_request(req, skb);
1372 1372
1373 if (want_cookie) { 1373 if (want_cookie) {
1374 isn = cookie_v4_init_sequence(sk, skb, &req->mss); 1374 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 3cabafb5cdd1..6f6a91832826 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -482,6 +482,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
482 newtp->sacked_out = 0; 482 newtp->sacked_out = 0;
483 newtp->fackets_out = 0; 483 newtp->fackets_out = 0;
484 newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 484 newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
485 tcp_enable_early_retrans(newtp);
485 486
486 /* So many TCP implementations out there (incorrectly) count the 487 /* So many TCP implementations out there (incorrectly) count the
487 * initial SYN frame in their delayed-ACK and congestion control 488 * initial SYN frame in their delayed-ACK and congestion control
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 834e89fc541b..d94733009923 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -78,9 +78,8 @@ static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb)
78 tp->frto_counter = 3; 78 tp->frto_counter = 3;
79 79
80 tp->packets_out += tcp_skb_pcount(skb); 80 tp->packets_out += tcp_skb_pcount(skb);
81 if (!prior_packets) 81 if (!prior_packets || tp->early_retrans_delayed)
82 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 82 tcp_rearm_rto(sk);
83 inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
84} 83}
85 84
86/* SND.NXT, if window was not shrunk. 85/* SND.NXT, if window was not shrunk.
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 34d4a02c2f16..e911e6c523ec 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -319,6 +319,11 @@ void tcp_retransmit_timer(struct sock *sk)
319 struct tcp_sock *tp = tcp_sk(sk); 319 struct tcp_sock *tp = tcp_sk(sk);
320 struct inet_connection_sock *icsk = inet_csk(sk); 320 struct inet_connection_sock *icsk = inet_csk(sk);
321 321
322 if (tp->early_retrans_delayed) {
323 tcp_resume_early_retransmit(sk);
324 return;
325 }
326
322 if (!tp->packets_out) 327 if (!tp->packets_out)
323 goto out; 328 goto out;
324 329
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 57b210969834..078d039e8fd2 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1140,7 +1140,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1140 treq->rmt_addr = ipv6_hdr(skb)->saddr; 1140 treq->rmt_addr = ipv6_hdr(skb)->saddr;
1141 treq->loc_addr = ipv6_hdr(skb)->daddr; 1141 treq->loc_addr = ipv6_hdr(skb)->daddr;
1142 if (!want_cookie || tmp_opt.tstamp_ok) 1142 if (!want_cookie || tmp_opt.tstamp_ok)
1143 TCP_ECN_create_request(req, tcp_hdr(skb)); 1143 TCP_ECN_create_request(req, skb);
1144 1144
1145 treq->iif = sk->sk_bound_dev_if; 1145 treq->iif = sk->sk_bound_dev_if;
1146 1146
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index 81445cc8196f..cc37dd52ecf9 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -332,15 +332,13 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
332 } 332 }
333 333
334 q->stats.pdrop++; 334 q->stats.pdrop++;
335 sch->qstats.drops++; 335 return qdisc_drop(skb, sch);
336 kfree_skb(skb);
337 return NET_XMIT_DROP;
338 336
339 congestion_drop: 337congestion_drop:
340 qdisc_drop(skb, sch); 338 qdisc_drop(skb, sch);
341 return NET_XMIT_CN; 339 return NET_XMIT_CN;
342 340
343 other_drop: 341other_drop:
344 if (ret & __NET_XMIT_BYPASS) 342 if (ret & __NET_XMIT_BYPASS)
345 sch->qstats.drops++; 343 sch->qstats.drops++;
346 kfree_skb(skb); 344 kfree_skb(skb);
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 389b856c6653..3886365cc207 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -265,8 +265,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
265 return NET_XMIT_SUCCESS; 265 return NET_XMIT_SUCCESS;
266 266
267drop: 267drop:
268 kfree_skb(skb); 268 qdisc_drop(skb, sch);
269 sch->qstats.drops++;
270 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; 269 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
271} 270}
272 271
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 2ea6f196e3c8..acae5b0e3849 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -558,9 +558,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
558 __skb_queue_tail(&q->direct_queue, skb); 558 __skb_queue_tail(&q->direct_queue, skb);
559 q->direct_pkts++; 559 q->direct_pkts++;
560 } else { 560 } else {
561 kfree_skb(skb); 561 return qdisc_drop(skb, sch);
562 sch->qstats.drops++;
563 return NET_XMIT_DROP;
564 } 562 }
565#ifdef CONFIG_NET_CLS_ACT 563#ifdef CONFIG_NET_CLS_ACT
566 } else if (!cl) { 564 } else if (!cl) {
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 45326599fda3..ca0c29695d51 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -88,9 +88,7 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc *sch)
88 return NET_XMIT_SUCCESS; 88 return NET_XMIT_SUCCESS;
89 } 89 }
90 90
91 kfree_skb(skb); 91 return qdisc_drop(skb, sch);
92 sch->qstats.drops++;
93 return NET_XMIT_DROP;
94} 92}
95 93
96static struct sk_buff * 94static struct sk_buff *